diff --git a/.idea/.gitignore b/.idea/.gitignore deleted file mode 100644 index e7e9d11..0000000 --- a/.idea/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Default ignored files -/workspace.xml diff --git a/.idea/SI_Projekt.iml b/.idea/SI_Projekt.iml deleted file mode 100644 index 0e4e9fa..0000000 --- a/.idea/SI_Projekt.iml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml deleted file mode 100644 index 105ce2d..0000000 --- a/.idea/inspectionProfiles/profiles_settings.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 6649a8c..0000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index 48744ac..0000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 288b36b..0000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/venv/bin/activate b/venv/bin/activate deleted file mode 100644 index 678ec96..0000000 --- a/venv/bin/activate +++ /dev/null @@ -1,76 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # This should detect bash and zsh, which have a hash command that must - # be called to get it to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r - fi - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - if [ ! "$1" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV="/home/marcin/PycharmProjects/SI_Projekt/venv" -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - if [ "x(venv) " != x ] ; then - PS1="(venv) ${PS1:-}" - else - if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" - else - PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" - fi - fi - export PS1 -fi - -# This should detect bash and zsh, which have a hash command that must -# be called to get it to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r -fi diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh deleted file mode 100644 index 76ed6b5..0000000 --- a/venv/bin/activate.csh +++ /dev/null @@ -1,37 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV "/home/marcin/PycharmProjects/SI_Projekt/venv" - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/bin:$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - if ("venv" != "") then - set env_name = "venv" - else - if (`basename "VIRTUAL_ENV"` == "__") then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` - else - set env_name = `basename "$VIRTUAL_ENV"` - endif - endif - set prompt = "[$env_name] $prompt" - unset env_name -endif - -alias pydoc python -m pydoc - -rehash diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish deleted file mode 100644 index 5c0713d..0000000 --- a/venv/bin/activate.fish +++ /dev/null @@ -1,75 +0,0 @@ -# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) -# you cannot run it directly - -function deactivate -d "Exit virtualenv and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - functions -e fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - - set -e VIRTUAL_ENV - if test "$argv[1]" != "nondestructive" - # Self destruct! - functions -e deactivate - end -end - -# unset irrelevant variables -deactivate nondestructive - -set -gx VIRTUAL_ENV "/home/marcin/PycharmProjects/SI_Projekt/venv" - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/bin" $PATH - -# unset PYTHONHOME if set -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # save the current fish_prompt function as the function _old_fish_prompt - functions -c fish_prompt _old_fish_prompt - - # with the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command - set -l old_status $status - - # Prompt override? - if test -n "(venv) " - printf "%s%s" "(venv) " (set_color normal) - else - # ...Otherwise, prepend env - set -l _checkbase (basename "$VIRTUAL_ENV") - if test $_checkbase = "__" - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) - else - printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) - end - end - - # Restore the return status of the previous command. - echo "exit $old_status" | . - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/venv/bin/easy_install b/venv/bin/easy_install deleted file mode 100755 index 4824897..0000000 --- a/venv/bin/easy_install +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install' -__requires__ = 'setuptools==40.8.0' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')() - ) diff --git a/venv/bin/easy_install-3.7 b/venv/bin/easy_install-3.7 deleted file mode 100755 index fc582e5..0000000 --- a/venv/bin/easy_install-3.7 +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7' -__requires__ = 'setuptools==40.8.0' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')() - ) diff --git a/venv/bin/f2py b/venv/bin/f2py deleted file mode 100755 index d6993d6..0000000 --- a/venv/bin/f2py +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys - -from numpy.f2py.f2py2e import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/f2py3 b/venv/bin/f2py3 deleted file mode 100755 index d6993d6..0000000 --- a/venv/bin/f2py3 +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys - -from numpy.f2py.f2py2e import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/f2py3.7 b/venv/bin/f2py3.7 deleted file mode 100755 index d6993d6..0000000 --- a/venv/bin/f2py3.7 +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys - -from numpy.f2py.f2py2e import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip b/venv/bin/pip deleted file mode 100755 index 5dcb028..0000000 --- a/venv/bin/pip +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip' -__requires__ = 'pip==19.0.3' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('pip==19.0.3', 'console_scripts', 'pip')() - ) diff --git a/venv/bin/pip3 b/venv/bin/pip3 deleted file mode 100755 index 9177fc0..0000000 --- a/venv/bin/pip3 +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3' -__requires__ = 'pip==19.0.3' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')() - ) diff --git a/venv/bin/pip3.7 b/venv/bin/pip3.7 deleted file mode 100755 index 0643220..0000000 --- a/venv/bin/pip3.7 +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7' -__requires__ = 'pip==19.0.3' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')() - ) diff --git a/venv/bin/python b/venv/bin/python deleted file mode 120000 index 940bee3..0000000 --- a/venv/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3.7 \ No newline at end of file diff --git a/venv/bin/python3 b/venv/bin/python3 deleted file mode 120000 index 940bee3..0000000 --- a/venv/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -python3.7 \ No newline at end of file diff --git a/venv/bin/python3.7 b/venv/bin/python3.7 deleted file mode 120000 index f097b0e..0000000 --- a/venv/bin/python3.7 +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/python3.7 \ No newline at end of file diff --git a/venv/include/site/python3.7/pygame/_camera.h b/venv/include/site/python3.7/pygame/_camera.h deleted file mode 100644 index 68ae989..0000000 --- a/venv/include/site/python3.7/pygame/_camera.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - pygame - Python Game Library - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ - -#ifndef _CAMERA_H -#define _CAMERA_H - -#include "_pygame.h" -#include "camera.h" - -#endif - diff --git a/venv/include/site/python3.7/pygame/_pygame.h b/venv/include/site/python3.7/pygame/_pygame.h deleted file mode 100644 index 68962fc..0000000 --- a/venv/include/site/python3.7/pygame/_pygame.h +++ /dev/null @@ -1,864 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef _PYGAME_H -#define _PYGAME_H - -/** This header file includes all the definitions for the - ** base pygame extensions. This header only requires - ** SDL and Python includes. The reason for functions - ** prototyped with #define's is to allow for maximum - ** python portability. It also uses python as the - ** runtime linker, which allows for late binding. For more - ** information on this style of development, read the Python - ** docs on this subject. - ** http://www.python.org/doc/current/ext/using-cobjects.html - ** - ** If using this to build your own derived extensions, - ** you'll see that the functions available here are mainly - ** used to help convert between python objects and SDL objects. - ** Since this library doesn't add a lot of functionality to - ** the SDL libarary, it doesn't need to offer a lot either. - ** - ** When initializing your extension module, you must manually - ** import the modules you want to use. (this is the part about - ** using python as the runtime linker). Each module has its - ** own import_xxx() routine. You need to perform this import - ** after you have initialized your own module, and before - ** you call any routines from that module. Since every module - ** in pygame does this, there are plenty of examples. - ** - ** The base module does include some useful conversion routines - ** that you are free to use in your own extension. - ** - ** When making changes, it is very important to keep the - ** FIRSTSLOT and NUMSLOT constants up to date for each - ** section. Also be sure not to overlap any of the slots. - ** When you do make a mistake with this, it will result - ** is a dereferenced NULL pointer that is easier to diagnose - ** than it could be :] - **/ -#if defined(HAVE_SNPRINTF) /* defined in python.h (pyerrors.h) and SDL.h \ - (SDL_config.h) */ -#undef HAVE_SNPRINTF /* remove GCC redefine warning */ -#endif - -// This must be before all else -#if defined(__SYMBIAN32__) && defined(OPENC) -#include - -#if defined(__WINS__) -void * -_alloca(size_t size); -#define alloca _alloca -#endif -#endif - -#define PG_STRINGIZE_HELPER(x) #x -#define PG_STRINGIZE(x) PG_STRINGIZE_HELPER(x) -#define PG_WARN(desc) message(__FILE__ "(" PG_STRINGIZE(__LINE__) "): WARNING: " #desc) - -/* This is unconditionally defined in Python.h */ -#if defined(_POSIX_C_SOURCE) -#undef _POSIX_C_SOURCE -#endif - -#include - -/* the version macros are defined since version 1.9.5 */ -#define PG_MAJOR_VERSION 1 -#define PG_MINOR_VERSION 9 -#define PG_PATCH_VERSION 6 -#define PG_VERSIONNUM(MAJOR, MINOR, PATCH) (1000*(MAJOR) + 100*(MINOR) + (PATCH)) -#define PG_VERSION_ATLEAST(MAJOR, MINOR, PATCH) \ - (PG_VERSIONNUM(PG_MAJOR_VERSION, PG_MINOR_VERSION, PG_PATCH_VERSION) >= \ - PG_VERSIONNUM(MAJOR, MINOR, PATCH)) - -/* Cobjects vanish in Python 3.2; so we will code as though we use capsules */ -#if defined(Py_CAPSULE_H) -#define PG_HAVE_CAPSULE 1 -#else -#define PG_HAVE_CAPSULE 0 -#endif -#if defined(Py_COBJECT_H) -#define PG_HAVE_COBJECT 1 -#else -#define PG_HAVE_COBJECT 0 -#endif -#if !PG_HAVE_CAPSULE -#define PyCapsule_New(ptr, n, dfn) PyCObject_FromVoidPtr(ptr, dfn) -#define PyCapsule_GetPointer(obj, n) PyCObject_AsVoidPtr(obj) -#define PyCapsule_CheckExact(obj) PyCObject_Check(obj) -#endif - -/* Pygame uses Py_buffer (PEP 3118) to exchange array information internally; - * define here as needed. - */ -#if !defined(PyBUF_SIMPLE) -typedef struct bufferinfo { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; -} Py_buffer; - -/* Flags for getting buffers */ -#define PyBUF_SIMPLE 0 -#define PyBUF_WRITABLE 0x0001 -/* we used to include an E, backwards compatible alias */ -#define PyBUF_WRITEABLE PyBUF_WRITABLE -#define PyBUF_FORMAT 0x0004 -#define PyBUF_ND 0x0008 -#define PyBUF_STRIDES (0x0010 | PyBUF_ND) -#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) -#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) -#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) -#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#define PyBUF_CONTIG (PyBUF_ND | PyBUF_WRITABLE) -#define PyBUF_CONTIG_RO (PyBUF_ND) - -#define PyBUF_STRIDED (PyBUF_STRIDES | PyBUF_WRITABLE) -#define PyBUF_STRIDED_RO (PyBUF_STRIDES) - -#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_WRITABLE | PyBUF_FORMAT) -#define PyBUF_RECORDS_RO (PyBUF_STRIDES | PyBUF_FORMAT) - -#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_WRITABLE | PyBUF_FORMAT) -#define PyBUF_FULL_RO (PyBUF_INDIRECT | PyBUF_FORMAT) - -#define PyBUF_READ 0x100 -#define PyBUF_WRITE 0x200 -#define PyBUF_SHADOW 0x400 - -typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); -typedef void (*releasebufferproc)(Py_buffer *); -#endif /* #if !defined(PyBUF_SIMPLE) */ - -/* Flag indicating a pg_buffer; used for assertions within callbacks */ -#ifndef NDEBUG -#define PyBUF_PYGAME 0x4000 -#endif - -#define PyBUF_HAS_FLAG(f, F) (((f) & (F)) == (F)) - -/* Array information exchange struct C type; inherits from Py_buffer - * - * Pygame uses its own Py_buffer derived C struct as an internal representation - * of an imported array buffer. The extended Py_buffer allows for a - * per-instance release callback, - */ -typedef void (*pybuffer_releaseproc)(Py_buffer *); - -typedef struct pg_bufferinfo_s { - Py_buffer view; - PyObject *consumer; /* Input: Borrowed reference */ - pybuffer_releaseproc release_buffer; -} pg_buffer; - -/* Operating system specific adjustments - */ -// No signal() -#if defined(__SYMBIAN32__) && defined(HAVE_SIGNAL_H) -#undef HAVE_SIGNAL_H -#endif - -#if defined(HAVE_SNPRINTF) -#undef HAVE_SNPRINTF -#endif - -#ifdef MS_WIN32 /*Python gives us MS_WIN32, SDL needs just WIN32*/ -#ifndef WIN32 -#define WIN32 -#endif -#endif - -/// Prefix when initializing module -#define MODPREFIX "" -/// Prefix when importing module -#define IMPPREFIX "pygame." - -#ifdef __SYMBIAN32__ -#undef MODPREFIX -#undef IMPPREFIX -// On Symbian there is no pygame package. The extensions are built-in or in -// sys\bin. -#define MODPREFIX "pygame_" -#define IMPPREFIX "pygame_" -#endif - -#include - -/* Pygame's SDL version macros: - * IS_SDLv1 is 1 if SDL 1.x.x, 0 otherwise - * IS_SDLv2 is 1 if at least SDL 2.0.0, 0 otherwise - */ -#if (SDL_VERSION_ATLEAST(2, 0, 0)) -#define IS_SDLv1 0 -#define IS_SDLv2 1 -#else -#define IS_SDLv1 1 -#define IS_SDLv2 0 -#endif - -/*#if IS_SDLv1 && PG_MAJOR_VERSION >= 2 -#error pygame 2 requires SDL 2 -#endif*/ - -#if IS_SDLv2 -/* SDL 1.2 constants removed from SDL 2 */ -typedef enum { - SDL_HWSURFACE = 0, - SDL_RESIZABLE = SDL_WINDOW_RESIZABLE, - SDL_ASYNCBLIT = 0, - SDL_OPENGL = SDL_WINDOW_OPENGL, - SDL_OPENGLBLIT = 0, - SDL_ANYFORMAT = 0, - SDL_HWPALETTE = 0, - SDL_DOUBLEBUF = 0, - SDL_FULLSCREEN = SDL_WINDOW_FULLSCREEN, - SDL_HWACCEL = 0, - SDL_SRCCOLORKEY = 0, - SDL_RLEACCELOK = 0, - SDL_SRCALPHA = 0, - SDL_NOFRAME = SDL_WINDOW_BORDERLESS, - SDL_GL_SWAP_CONTROL = 0, - TIMER_RESOLUTION = 0 -} PygameVideoFlags; - -/* the wheel button constants were removed from SDL 2 */ -typedef enum { - PGM_BUTTON_LEFT = SDL_BUTTON_LEFT, - PGM_BUTTON_RIGHT = SDL_BUTTON_RIGHT, - PGM_BUTTON_MIDDLE = SDL_BUTTON_MIDDLE, - PGM_BUTTON_WHEELUP = 4, - PGM_BUTTON_WHEELDOWN = 5, - PGM_BUTTON_X1 = SDL_BUTTON_X1 + 2, - PGM_BUTTON_X2 = SDL_BUTTON_X2 + 2, - PGM_BUTTON_KEEP = 0x80 -} PygameMouseFlags; - -typedef enum { - SDL_NOEVENT = 0, - /* SDL 1.2 allowed for 8 user defined events. */ - SDL_NUMEVENTS = SDL_USEREVENT + 8, - SDL_ACTIVEEVENT = SDL_NUMEVENTS, - PGE_EVENTBEGIN = SDL_NUMEVENTS, - SDL_VIDEORESIZE, - SDL_VIDEOEXPOSE, - PGE_KEYREPEAT, - PGE_EVENTEND -} PygameEventCode; - -#define PGE_NUMEVENTS (PGE_EVENTEND - PGE_EVENTBEGIN) - -typedef enum { - SDL_APPFOCUSMOUSE, - SDL_APPINPUTFOCUS, - SDL_APPACTIVE -} PygameAppCode; - -/* Surface flags: based on SDL 1.2 flags */ -typedef enum { - PGS_SWSURFACE = 0x00000000, - PGS_HWSURFACE = 0x00000001, - PGS_ASYNCBLIT = 0x00000004, - - PGS_ANYFORMAT = 0x10000000, - PGS_HWPALETTE = 0x20000000, - PGS_DOUBLEBUF = 0x40000000, - PGS_FULLSCREEN = 0x80000000, - PGS_OPENGL = 0x00000002, - PGS_OPENGLBLIT = 0x0000000A, - PGS_RESIZABLE = 0x00000010, - PGS_NOFRAME = 0x00000020, - PGS_SHOWN = 0x00000040, /* Added from SDL 2 */ - PGS_HIDDEN = 0x00000080, /* Added from SDL 2 */ - - PGS_HWACCEL = 0x00000100, - PGS_SRCCOLORKEY = 0x00001000, - PGS_RLEACCELOK = 0x00002000, - PGS_RLEACCEL = 0x00004000, - PGS_SRCALPHA = 0x00010000, - PGS_PREALLOC = 0x01000000 -} PygameSurfaceFlags; - -typedef struct { - Uint32 hw_available:1; - Uint32 wm_available:1; - Uint32 blit_hw:1; - Uint32 blit_hw_CC:1; - Uint32 blit_hw_A:1; - Uint32 blit_sw:1; - Uint32 blit_sw_CC:1; - Uint32 blit_sw_A:1; - Uint32 blit_fill:1; - Uint32 video_mem; - SDL_PixelFormat *vfmt; - SDL_PixelFormat vfmt_data; - int current_w; - int current_h; -} pg_VideoInfo; - -#endif /* IS_SDLv2 */ -/* macros used throughout the source */ -#define RAISE(x, y) (PyErr_SetString((x), (y)), (PyObject *)NULL) - -#ifdef WITH_THREAD -#define PG_CHECK_THREADS() (1) -#else /* ~WITH_THREAD */ -#define PG_CHECK_THREADS() \ - (RAISE(PyExc_NotImplementedError, \ - "Python built without thread support")) -#endif /* ~WITH_THREAD */ - -#define PyType_Init(x) (((x).ob_type) = &PyType_Type) -#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API" - -#ifndef MIN -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#endif - -#ifndef MAX -#define MAX(a, b) ((a) > (b) ? (a) : (b)) -#endif - -#ifndef ABS -#define ABS(a) (((a) < 0) ? -(a) : (a)) -#endif - -/* test sdl initializations */ -#define VIDEO_INIT_CHECK() \ - if (!SDL_WasInit(SDL_INIT_VIDEO)) \ - return RAISE(pgExc_SDLError, "video system not initialized") - -#define CDROM_INIT_CHECK() \ - if (!SDL_WasInit(SDL_INIT_CDROM)) \ - return RAISE(pgExc_SDLError, "cdrom system not initialized") - -#define JOYSTICK_INIT_CHECK() \ - if (!SDL_WasInit(SDL_INIT_JOYSTICK)) \ - return RAISE(pgExc_SDLError, "joystick system not initialized") - -/* BASE */ -#define VIEW_CONTIGUOUS 1 -#define VIEW_C_ORDER 2 -#define VIEW_F_ORDER 4 - -#define PYGAMEAPI_BASE_FIRSTSLOT 0 -#if IS_SDLv1 -#define PYGAMEAPI_BASE_NUMSLOTS 19 -#else /* IS_SDLv2 */ -#define PYGAMEAPI_BASE_NUMSLOTS 23 -#endif /* IS_SDLv2 */ -#ifndef PYGAMEAPI_BASE_INTERNAL -#define pgExc_SDLError ((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT]) - -#define pg_RegisterQuit \ - (*(void (*)(void (*)(void)))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 1]) - -#define pg_IntFromObj \ - (*(int (*)(PyObject *, int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 2]) - -#define pg_IntFromObjIndex \ - (*(int (*)(PyObject *, int, \ - int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 3]) - -#define pg_TwoIntsFromObj \ - (*(int (*)(PyObject *, int *, \ - int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 4]) - -#define pg_FloatFromObj \ - (*(int (*)(PyObject *, float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 5]) - -#define pg_FloatFromObjIndex \ - (*(int (*)(PyObject *, int, \ - float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 6]) - -#define pg_TwoFloatsFromObj \ - (*(int (*)(PyObject *, float *, \ - float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 7]) - -#define pg_UintFromObj \ - (*(int (*)(PyObject *, \ - Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 8]) - -#define pg_UintFromObjIndex \ - (*(int (*)(PyObject *, int, \ - Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 9]) - -#define pgVideo_AutoQuit \ - (*(void (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 10]) - -#define pgVideo_AutoInit \ - (*(int (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 11]) - -#define pg_RGBAFromObj \ - (*(int (*)(PyObject *, \ - Uint8 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 12]) - -#define pgBuffer_AsArrayInterface \ - (*(PyObject * (*)(Py_buffer *)) \ - PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 13]) - -#define pgBuffer_AsArrayStruct \ - (*(PyObject * (*)(Py_buffer *)) \ - PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 14]) - -#define pgObject_GetBuffer \ - (*(int (*)(PyObject *, pg_buffer *, \ - int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 15]) - -#define pgBuffer_Release \ - (*(void (*)(pg_buffer *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 16]) - -#define pgDict_AsBuffer \ - (*(int (*)(pg_buffer *, PyObject *, \ - int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 17]) - -#define pgExc_BufferError \ - ((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 18]) - -#if IS_SDLv2 -#define pg_GetDefaultWindow \ - (*(SDL_Window * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 19]) - -#define pg_SetDefaultWindow \ - (*(void (*)(SDL_Window *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 20]) - -#define pg_GetDefaultWindowSurface \ - (*(PyObject * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 21]) - -#define pg_SetDefaultWindowSurface \ - (*(void (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 22]) - -#endif /* IS_SDLv2 */ - -#define import_pygame_base() IMPORT_PYGAME_MODULE(base, BASE) -#endif - -/* RECT */ -#define PYGAMEAPI_RECT_FIRSTSLOT \ - (PYGAMEAPI_BASE_FIRSTSLOT + PYGAMEAPI_BASE_NUMSLOTS) -#define PYGAMEAPI_RECT_NUMSLOTS 4 - -#if IS_SDLv1 -typedef struct { - int x, y; - int w, h; -} GAME_Rect; -#else -typedef SDL_Rect GAME_Rect; -#endif - -typedef struct { - PyObject_HEAD GAME_Rect r; - PyObject *weakreflist; -} pgRectObject; - -#define pgRect_AsRect(x) (((pgRectObject *)x)->r) -#ifndef PYGAMEAPI_RECT_INTERNAL -#define pgRect_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0]) -#define pgRect_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0]) -#define pgRect_New \ - (*(PyObject * (*)(SDL_Rect *)) PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 1]) -#define pgRect_New4 \ - (*(PyObject * (*)(int, int, int, int)) \ - PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 2]) -#define pgRect_FromObject \ - (*(GAME_Rect * (*)(PyObject *, GAME_Rect *)) \ - PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 3]) - -#define import_pygame_rect() IMPORT_PYGAME_MODULE(rect, RECT) -#endif - -/* CDROM */ -#define PYGAMEAPI_CDROM_FIRSTSLOT \ - (PYGAMEAPI_RECT_FIRSTSLOT + PYGAMEAPI_RECT_NUMSLOTS) -#define PYGAMEAPI_CDROM_NUMSLOTS 2 - -typedef struct { - PyObject_HEAD int id; -} pgCDObject; - -#define pgCD_AsID(x) (((pgCDObject *)x)->id) -#ifndef PYGAMEAPI_CDROM_INTERNAL -#define pgCD_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0]) -#define pgCD_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0]) -#define pgCD_New \ - (*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 1]) - -#define import_pygame_cd() IMPORT_PYGAME_MODULE(cdrom, CDROM) -#endif - -/* JOYSTICK */ -#define PYGAMEAPI_JOYSTICK_FIRSTSLOT \ - (PYGAMEAPI_CDROM_FIRSTSLOT + PYGAMEAPI_CDROM_NUMSLOTS) -#define PYGAMEAPI_JOYSTICK_NUMSLOTS 2 - -typedef struct { - PyObject_HEAD int id; -} pgJoystickObject; - -#define pgJoystick_AsID(x) (((pgJoystickObject *)x)->id) - -#ifndef PYGAMEAPI_JOYSTICK_INTERNAL -#define pgJoystick_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0]) - -#define pgJoystick_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0]) -#define pgJoystick_New \ - (*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 1]) - -#define import_pygame_joystick() IMPORT_PYGAME_MODULE(joystick, JOYSTICK) -#endif - -/* DISPLAY */ -#define PYGAMEAPI_DISPLAY_FIRSTSLOT \ - (PYGAMEAPI_JOYSTICK_FIRSTSLOT + PYGAMEAPI_JOYSTICK_NUMSLOTS) -#define PYGAMEAPI_DISPLAY_NUMSLOTS 2 - -typedef struct { -#if IS_SDLv1 - PyObject_HEAD SDL_VideoInfo info; -#else - PyObject_HEAD pg_VideoInfo info; -#endif -} pgVidInfoObject; - -#define pgVidInfo_AsVidInfo(x) (((pgVidInfoObject *)x)->info) -#ifndef PYGAMEAPI_DISPLAY_INTERNAL -#define pgVidInfo_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0]) - -#define pgVidInfo_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0]) - -#if IS_SDLv1 -#define pgVidInfo_New \ - (*(PyObject * (*)(SDL_VideoInfo *)) \ - PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1]) -#else -#define pgVidInfo_New \ - (*(PyObject * (*)(pg_VideoInfo *)) \ - PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1]) -#endif - -#define import_pygame_display() IMPORT_PYGAME_MODULE(display, DISPLAY) -#endif - -/* SURFACE */ -#define PYGAMEAPI_SURFACE_FIRSTSLOT \ - (PYGAMEAPI_DISPLAY_FIRSTSLOT + PYGAMEAPI_DISPLAY_NUMSLOTS) -#define PYGAMEAPI_SURFACE_NUMSLOTS 3 -typedef struct { - PyObject_HEAD SDL_Surface *surf; -#if IS_SDLv2 - int owner; -#endif /* IS_SDLv2 */ - struct pgSubSurface_Data *subsurface; /*ptr to subsurface data (if a - * subsurface)*/ - PyObject *weakreflist; - PyObject *locklist; - PyObject *dependency; -} pgSurfaceObject; -#define pgSurface_AsSurface(x) (((pgSurfaceObject *)x)->surf) -#ifndef PYGAMEAPI_SURFACE_INTERNAL -#define pgSurface_Check(x) \ - (PyObject_IsInstance((x), \ - (PyObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0])) -#define pgSurface_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0]) -#if IS_SDLv1 -#define pgSurface_New \ - (*(PyObject * (*)(SDL_Surface *)) \ - PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1]) -#else /* IS_SDLv2 */ -#define pgSurface_New2 \ - (*(PyObject * (*)(SDL_Surface *, int)) \ - PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1]) -#endif /* IS_SDLv2 */ -#define pgSurface_Blit \ - (*(int (*)(PyObject *, PyObject *, SDL_Rect *, SDL_Rect *, \ - int))PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 2]) - -#define import_pygame_surface() \ - do { \ - IMPORT_PYGAME_MODULE(surface, SURFACE); \ - if (PyErr_Occurred() != NULL) \ - break; \ - IMPORT_PYGAME_MODULE(surflock, SURFLOCK); \ - } while (0) - -#if IS_SDLv2 -#define pgSurface_New(surface) pgSurface_New2((surface), 1) -#define pgSurface_NewNoOwn(surface) pgSurface_New2((surface), 0) -#endif /* IS_SDLv2 */ - -#endif - -/* SURFLOCK */ /*auto import/init by surface*/ -#define PYGAMEAPI_SURFLOCK_FIRSTSLOT \ - (PYGAMEAPI_SURFACE_FIRSTSLOT + PYGAMEAPI_SURFACE_NUMSLOTS) -#define PYGAMEAPI_SURFLOCK_NUMSLOTS 8 -struct pgSubSurface_Data { - PyObject *owner; - int pixeloffset; - int offsetx, offsety; -}; - -typedef struct { - PyObject_HEAD PyObject *surface; - PyObject *lockobj; - PyObject *weakrefs; -} pgLifetimeLockObject; - -#ifndef PYGAMEAPI_SURFLOCK_INTERNAL -#define pgLifetimeLock_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 0]) -#define pgSurface_Prep(x) \ - if (((pgSurfaceObject *)x)->subsurface) \ - (*(*(void (*)( \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 1]))(x) - -#define pgSurface_Unprep(x) \ - if (((pgSurfaceObject *)x)->subsurface) \ - (*(*(void (*)( \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 2]))(x) - -#define pgSurface_Lock \ - (*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 3]) -#define pgSurface_Unlock \ - (*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 4]) -#define pgSurface_LockBy \ - (*(int (*)(PyObject *, \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 5]) -#define pgSurface_UnlockBy \ - (*(int (*)(PyObject *, \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 6]) -#define pgSurface_LockLifetime \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 7]) -#endif - -/* EVENT */ -#define PYGAMEAPI_EVENT_FIRSTSLOT \ - (PYGAMEAPI_SURFLOCK_FIRSTSLOT + PYGAMEAPI_SURFLOCK_NUMSLOTS) -#if IS_SDLv1 -#define PYGAMEAPI_EVENT_NUMSLOTS 4 -#else /* IS_SDLv2 */ -#define PYGAMEAPI_EVENT_NUMSLOTS 6 -#endif /* IS_SDLv2 */ - -typedef struct { - PyObject_HEAD int type; - PyObject *dict; -} pgEventObject; - -#ifndef PYGAMEAPI_EVENT_INTERNAL -#define pgEvent_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0]) -#define pgEvent_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0]) -#define pgEvent_New \ - (*(PyObject * (*)(SDL_Event *)) \ - PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 1]) -#define pgEvent_New2 \ - (*(PyObject * (*)(int, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 2]) -#define pgEvent_FillUserEvent \ - (*(int (*)(pgEventObject *, \ - SDL_Event *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 3]) -#if IS_SDLv2 -#define pg_EnableKeyRepeat \ - (*(int (*)(int, int))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 4]) -#define pg_GetKeyRepeat \ - (*(void (*)(int *, int *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 5]) -#endif /* IS_SDLv2 */ -#define import_pygame_event() IMPORT_PYGAME_MODULE(event, EVENT) -#endif - -/* RWOBJECT */ -/*the rwobject are only needed for C side work, not accessable from python*/ -#define PYGAMEAPI_RWOBJECT_FIRSTSLOT \ - (PYGAMEAPI_EVENT_FIRSTSLOT + PYGAMEAPI_EVENT_NUMSLOTS) -#define PYGAMEAPI_RWOBJECT_NUMSLOTS 6 -#ifndef PYGAMEAPI_RWOBJECT_INTERNAL -#define pgRWops_FromObject \ - (*(SDL_RWops * (*)(PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 0]) -#define pgRWops_IsFileObject \ - (*(int (*)(SDL_RWops *))PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 1]) -#define pg_EncodeFilePath \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 2]) -#define pg_EncodeString \ - (*(PyObject * (*)(PyObject *, const char *, const char *, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 3]) -#define pgRWops_FromFileObject \ - (*(SDL_RWops * (*)(PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 4]) -#define pgRWops_ReleaseObject \ - (*(int (*)(SDL_RWops *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 5]) -#define import_pygame_rwobject() IMPORT_PYGAME_MODULE(rwobject, RWOBJECT) - -#endif - -/* PixelArray */ -#define PYGAMEAPI_PIXELARRAY_FIRSTSLOT \ - (PYGAMEAPI_RWOBJECT_FIRSTSLOT + PYGAMEAPI_RWOBJECT_NUMSLOTS) -#define PYGAMEAPI_PIXELARRAY_NUMSLOTS 2 -#ifndef PYGAMEAPI_PIXELARRAY_INTERNAL -#define PyPixelArray_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 0]) -#define PyPixelArray_New \ - (*(PyObject * (*)) PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 1]) -#define import_pygame_pixelarray() IMPORT_PYGAME_MODULE(pixelarray, PIXELARRAY) -#endif /* PYGAMEAPI_PIXELARRAY_INTERNAL */ - -/* Color */ -#define PYGAMEAPI_COLOR_FIRSTSLOT \ - (PYGAMEAPI_PIXELARRAY_FIRSTSLOT + PYGAMEAPI_PIXELARRAY_NUMSLOTS) -#define PYGAMEAPI_COLOR_NUMSLOTS 4 -#ifndef PYGAMEAPI_COLOR_INTERNAL -#define pgColor_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 0]) -#define pgColor_Type (*(PyObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT]) -#define pgColor_New \ - (*(PyObject * (*)(Uint8 *)) PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 1]) -#define pgColor_NewLength \ - (*(PyObject * (*)(Uint8 *, Uint8)) \ - PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 3]) - -#define pg_RGBAFromColorObj \ - (*(int (*)(PyObject *, \ - Uint8 *))PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 2]) -#define import_pygame_color() IMPORT_PYGAME_MODULE(color, COLOR) -#endif /* PYGAMEAPI_COLOR_INTERNAL */ - -/* Math */ -#define PYGAMEAPI_MATH_FIRSTSLOT \ - (PYGAMEAPI_COLOR_FIRSTSLOT + PYGAMEAPI_COLOR_NUMSLOTS) -#define PYGAMEAPI_MATH_NUMSLOTS 2 -#ifndef PYGAMEAPI_MATH_INTERNAL -#define pgVector2_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 0]) -#define pgVector3_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1]) -/* -#define pgVector2_New \ - (*(PyObject*(*)) PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1]) -*/ -#define import_pygame_math() IMPORT_PYGAME_MODULE(math, MATH) -#endif /* PYGAMEAPI_MATH_INTERNAL */ - -#define PG_CAPSULE_NAME(m) (IMPPREFIX m "." PYGAMEAPI_LOCAL_ENTRY) - -#define _IMPORT_PYGAME_MODULE(module, MODULE, api_root) \ - { \ - PyObject *_module = PyImport_ImportModule(IMPPREFIX #module); \ - \ - if (_module != NULL) { \ - PyObject *_c_api = \ - PyObject_GetAttrString(_module, PYGAMEAPI_LOCAL_ENTRY); \ - \ - Py_DECREF(_module); \ - if (_c_api != NULL && PyCapsule_CheckExact(_c_api)) { \ - void **localptr = (void **)PyCapsule_GetPointer( \ - _c_api, PG_CAPSULE_NAME(#module)); \ - \ - if (localptr != NULL) { \ - memcpy(api_root + PYGAMEAPI_##MODULE##_FIRSTSLOT, \ - localptr, \ - sizeof(void **) * PYGAMEAPI_##MODULE##_NUMSLOTS); \ - } \ - } \ - Py_XDECREF(_c_api); \ - } \ - } - -#ifndef NO_PYGAME_C_API -#define IMPORT_PYGAME_MODULE(module, MODULE) \ - _IMPORT_PYGAME_MODULE(module, MODULE, PyGAME_C_API) -#define PYGAMEAPI_TOTALSLOTS \ - (PYGAMEAPI_MATH_FIRSTSLOT + PYGAMEAPI_MATH_NUMSLOTS) - -#ifdef PYGAME_H -void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS] = {NULL}; -#else -extern void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS]; -#endif -#endif - -#if PG_HAVE_CAPSULE -#define encapsulate_api(ptr, module) \ - PyCapsule_New(ptr, PG_CAPSULE_NAME(module), NULL) -#else -#define encapsulate_api(ptr, module) PyCObject_FromVoidPtr(ptr, NULL) -#endif - -#ifndef PG_INLINE -#if defined(__clang__) -#define PG_INLINE __inline__ __attribute__((__unused__)) -#elif defined(__GNUC__) -#define PG_INLINE __inline__ -#elif defined(_MSC_VER) -#define PG_INLINE __inline -#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L -#define PG_INLINE inline -#else -#define PG_INLINE -#endif -#endif - -/*last platform compiler stuff*/ -#if defined(macintosh) && defined(__MWERKS__) || defined(__SYMBIAN32__) -#define PYGAME_EXPORT __declspec(export) -#else -#define PYGAME_EXPORT -#endif - - -#endif /* PYGAME_H */ diff --git a/venv/include/site/python3.7/pygame/_surface.h b/venv/include/site/python3.7/pygame/_surface.h deleted file mode 100644 index 016aac0..0000000 --- a/venv/include/site/python3.7/pygame/_surface.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - Copyright (C) 2007 Marcus von Appen - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef _SURFACE_H -#define _SURFACE_H - -#include "_pygame.h" -#include "surface.h" - -#endif - diff --git a/venv/include/site/python3.7/pygame/bitmask.h b/venv/include/site/python3.7/pygame/bitmask.h deleted file mode 100644 index 1230497..0000000 --- a/venv/include/site/python3.7/pygame/bitmask.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - Bitmask 1.7 - A pixel-perfect collision detection library. - - Copyright (C) 2002-2005 Ulf Ekstrom except for the bitcount - function which is copyright (C) Donald W. Gillies, 1992. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef BITMASK_H -#define BITMASK_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include -/* Define INLINE for different compilers. If your compiler does not - support inlining then there might be a performance hit in - bitmask_overlap_area(). -*/ -#ifndef INLINE -# ifdef __GNUC__ -# define INLINE inline -# else -# ifdef _MSC_VER -# define INLINE __inline -# else -# define INLINE -# endif -# endif -#endif - -#define BITMASK_W unsigned long int -#define BITMASK_W_LEN (sizeof(BITMASK_W)*CHAR_BIT) -#define BITMASK_W_MASK (BITMASK_W_LEN - 1) -#define BITMASK_N(n) ((BITMASK_W)1 << (n)) - -typedef struct bitmask -{ - int w,h; - BITMASK_W bits[1]; -} bitmask_t; - -/* Creates a bitmask of width w and height h, where - w and h must both be greater than or equal to 0. - The mask is automatically cleared when created. - */ -bitmask_t *bitmask_create(int w, int h); - -/* Frees all the memory allocated by bitmask_create for m. */ -void bitmask_free(bitmask_t *m); - -/* Clears all bits in the mask */ -void bitmask_clear(bitmask_t *m); - -/* Sets all bits in the mask */ -void bitmask_fill(bitmask_t *m); - -/* Flips all bits in the mask */ -void bitmask_invert(bitmask_t *m); - -/* Counts the bits in the mask */ -unsigned int bitmask_count(bitmask_t *m); - -/* Returns nonzero if the bit at (x,y) is set. Coordinates start at - (0,0) */ -static INLINE int bitmask_getbit(const bitmask_t *m, int x, int y) -{ - return (m->bits[x/BITMASK_W_LEN*m->h + y] & BITMASK_N(x & BITMASK_W_MASK)) != 0; -} - -/* Sets the bit at (x,y) */ -static INLINE void bitmask_setbit(bitmask_t *m, int x, int y) -{ - m->bits[x/BITMASK_W_LEN*m->h + y] |= BITMASK_N(x & BITMASK_W_MASK); -} - -/* Clears the bit at (x,y) */ -static INLINE void bitmask_clearbit(bitmask_t *m, int x, int y) -{ - m->bits[x/BITMASK_W_LEN*m->h + y] &= ~BITMASK_N(x & BITMASK_W_MASK); -} - -/* Returns nonzero if the masks overlap with the given offset. - The overlap tests uses the following offsets (which may be negative): - - +----+----------.. - |A | yoffset - | +-+----------.. - +--|B - |xoffset - | | - : : -*/ -int bitmask_overlap(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -/* Like bitmask_overlap(), but will also give a point of intersection. - x and y are given in the coordinates of mask a, and are untouched - if there is no overlap. */ -int bitmask_overlap_pos(const bitmask_t *a, const bitmask_t *b, - int xoffset, int yoffset, int *x, int *y); - -/* Returns the number of overlapping 'pixels' */ -int bitmask_overlap_area(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -/* Fills a mask with the overlap of two other masks. A bitwise AND. */ -void bitmask_overlap_mask (const bitmask_t *a, const bitmask_t *b, bitmask_t *c, int xoffset, int yoffset); - -/* Draws mask b onto mask a (bitwise OR). Can be used to compose large - (game background?) mask from several submasks, which may speed up - the testing. */ - -void bitmask_draw(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -void bitmask_erase(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -/* Return a new scaled bitmask, with dimensions w*h. The quality of the - scaling may not be perfect for all circumstances, but it should - be reasonable. If either w or h is 0 a clear 1x1 mask is returned. */ -bitmask_t *bitmask_scale(const bitmask_t *m, int w, int h); - -/* Convolve b into a, drawing the output into o, shifted by offset. If offset - * is 0, then the (x,y) bit will be set if and only if - * bitmask_overlap(a, b, x - b->w - 1, y - b->h - 1) returns true. - * - * Modifies bits o[xoffset ... xoffset + a->w + b->w - 1) - * [yoffset ... yoffset + a->h + b->h - 1). */ -void bitmask_convolve(const bitmask_t *a, const bitmask_t *b, bitmask_t *o, int xoffset, int yoffset); - -#ifdef __cplusplus -} /* End of extern "C" { */ -#endif - -#endif diff --git a/venv/include/site/python3.7/pygame/camera.h b/venv/include/site/python3.7/pygame/camera.h deleted file mode 100644 index 46d2beb..0000000 --- a/venv/include/site/python3.7/pygame/camera.h +++ /dev/null @@ -1,201 +0,0 @@ -/* - pygame - Python Game Library - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ - -#include "pygame.h" -#include "doc/camera_doc.h" - -#if defined(__unix__) - #include - #include - #include - #include - #include - - #include /* low-level i/o */ - #include - #include - #include - #include - #include - #include - #include - - /* on freebsd there is no asm/types */ - #ifdef linux - #include /* for videodev2.h */ - #endif - - #include -#elif defined(__APPLE__) - #include - /* We support OSX 10.6 and below. */ - #if __MAC_OS_X_VERSION_MAX_ALLOWED <= 1060 - #define PYGAME_MAC_CAMERA_OLD 1 - #endif -#endif - -#if defined(PYGAME_MAC_CAMERA_OLD) - #include - #include - #include -#endif - -/* some constants used which are not defined on non-v4l machines. */ -#ifndef V4L2_PIX_FMT_RGB24 - #define V4L2_PIX_FMT_RGB24 'RGB3' -#endif -#ifndef V4L2_PIX_FMT_RGB444 - #define V4L2_PIX_FMT_RGB444 'R444' -#endif -#ifndef V4L2_PIX_FMT_YUYV - #define V4L2_PIX_FMT_YUYV 'YUYV' -#endif - -#define CLEAR(x) memset (&(x), 0, sizeof (x)) -#define SAT(c) if (c & (~255)) { if (c < 0) c = 0; else c = 255; } -#define SAT2(c) ((c) & (~255) ? ((c) < 0 ? 0 : 255) : (c)) -#define DEFAULT_WIDTH 640 -#define DEFAULT_HEIGHT 480 -#define RGB_OUT 1 -#define YUV_OUT 2 -#define HSV_OUT 4 -#define CAM_V4L 1 /* deprecated. the incomplete support in pygame was removed */ -#define CAM_V4L2 2 - -struct buffer { - void * start; - size_t length; -}; - -#if defined(__unix__) -typedef struct pgCameraObject { - PyObject_HEAD - char* device_name; - int camera_type; - unsigned long pixelformat; - unsigned int color_out; - struct buffer* buffers; - unsigned int n_buffers; - int width; - int height; - int size; - int hflip; - int vflip; - int brightness; - int fd; -} pgCameraObject; -#elif defined(PYGAME_MAC_CAMERA_OLD) -typedef struct pgCameraObject { - PyObject_HEAD - char* device_name; /* unieke name of the device */ - OSType pixelformat; - unsigned int color_out; - SeqGrabComponent component; /* A type used by the Sequence Grabber API */ - SGChannel channel; /* Channel of the Sequence Grabber */ - GWorldPtr gworld; /* Pointer to the struct that holds the data of the captured image */ - Rect boundsRect; /* bounds of the image frame */ - long size; /* size of the image in our buffer to draw */ - int hflip; - int vflip; - short depth; - struct buffer pixels; - //struct buffer tmp_pixels /* place where the flipped image in temporarly stored if hflip or vflip is true.*/ -} pgCameraObject; - -#else -/* generic definition. -*/ - -typedef struct pgCameraObject { - PyObject_HEAD - char* device_name; - int camera_type; - unsigned long pixelformat; - unsigned int color_out; - struct buffer* buffers; - unsigned int n_buffers; - int width; - int height; - int size; - int hflip; - int vflip; - int brightness; - int fd; -} pgCameraObject; -#endif - -/* internal functions for colorspace conversion */ -void colorspace (SDL_Surface *src, SDL_Surface *dst, int cspace); -void rgb24_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void rgb444_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void rgb_to_yuv (const void* src, void* dst, int length, - unsigned long source, SDL_PixelFormat* format); -void rgb_to_hsv (const void* src, void* dst, int length, - unsigned long source, SDL_PixelFormat* format); -void yuyv_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void yuyv_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format); -void uyvy_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void uyvy_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format); -void sbggr8_to_rgb (const void* src, void* dst, int width, int height, - SDL_PixelFormat* format); -void yuv420_to_rgb (const void* src, void* dst, int width, int height, - SDL_PixelFormat* format); -void yuv420_to_yuv (const void* src, void* dst, int width, int height, - SDL_PixelFormat* format); - -#if defined(__unix__) -/* internal functions specific to v4l2 */ -char** v4l2_list_cameras (int* num_devices); -int v4l2_get_control (int fd, int id, int *value); -int v4l2_set_control (int fd, int id, int value); -PyObject* v4l2_read_raw (pgCameraObject* self); -int v4l2_xioctl (int fd, int request, void *arg); -int v4l2_process_image (pgCameraObject* self, const void *image, - unsigned int buffer_size, SDL_Surface* surf); -int v4l2_query_buffer (pgCameraObject* self); -int v4l2_read_frame (pgCameraObject* self, SDL_Surface* surf); -int v4l2_stop_capturing (pgCameraObject* self); -int v4l2_start_capturing (pgCameraObject* self); -int v4l2_uninit_device (pgCameraObject* self); -int v4l2_init_mmap (pgCameraObject* self); -int v4l2_init_device (pgCameraObject* self); -int v4l2_close_device (pgCameraObject* self); -int v4l2_open_device (pgCameraObject* self); - -#elif defined(PYGAME_MAC_CAMERA_OLD) -/* internal functions specific to mac */ -char** mac_list_cameras(int* num_devices); -int mac_open_device (pgCameraObject* self); -int mac_init_device(pgCameraObject* self); -int mac_close_device (pgCameraObject* self); -int mac_start_capturing(pgCameraObject* self); -int mac_stop_capturing (pgCameraObject* self); - -int mac_get_control(pgCameraObject* self, int id, int* value); -int mac_set_control(pgCameraObject* self, int id, int value); - -PyObject* mac_read_raw(pgCameraObject *self); -int mac_read_frame(pgCameraObject* self, SDL_Surface* surf); -int mac_camera_idle(pgCameraObject* self); -int mac_copy_gworld_to_surface(pgCameraObject* self, SDL_Surface* surf); - -void flip_image(const void* image, void* flipped_image, int width, int height, - short depth, int hflip, int vflip); - -#endif diff --git a/venv/include/site/python3.7/pygame/fastevents.h b/venv/include/site/python3.7/pygame/fastevents.h deleted file mode 100644 index 04098c3..0000000 --- a/venv/include/site/python3.7/pygame/fastevents.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef _FASTEVENTS_H_ -#define _FASTEVENTS_H_ -/* - NET2 is a threaded, event based, network IO library for SDL. - Copyright (C) 2002 Bob Pendleton - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public License - as published by the Free Software Foundation; either version 2.1 - of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA - - If you do not wish to comply with the terms of the LGPL please - contact the author as other terms are available for a fee. - - Bob Pendleton - Bob@Pendleton.com -*/ - -#include "SDL.h" - -#ifdef __cplusplus -extern "C" { -#endif - - int FE_Init(void); // Initialize FE - void FE_Quit(void); // shutdown FE - - void FE_PumpEvents(void); // replacement for SDL_PumpEvents - int FE_PollEvent(SDL_Event *event); // replacement for SDL_PollEvent - int FE_WaitEvent(SDL_Event *event); // replacement for SDL_WaitEvent - int FE_PushEvent(SDL_Event *event); // replacement for SDL_PushEvent - - char *FE_GetError(void); // get the last error -#ifdef __cplusplus -} -#endif - -#endif diff --git a/venv/include/site/python3.7/pygame/font.h b/venv/include/site/python3.7/pygame/font.h deleted file mode 100644 index b861a29..0000000 --- a/venv/include/site/python3.7/pygame/font.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#include -#if defined(HAVE_SNPRINTF) /* also defined in SDL_ttf (SDL.h) */ -#undef HAVE_SNPRINTF /* remove GCC macro redefine warning */ -#endif -#include - - -/* test font initialization */ -#define FONT_INIT_CHECK() \ - if(!(*(int*)PyFONT_C_API[2])) \ - return RAISE(pgExc_SDLError, "font system not initialized") - - - -#define PYGAMEAPI_FONT_FIRSTSLOT 0 -#define PYGAMEAPI_FONT_NUMSLOTS 3 -typedef struct { - PyObject_HEAD - TTF_Font* font; - PyObject* weakreflist; -} PyFontObject; -#define PyFont_AsFont(x) (((PyFontObject*)x)->font) - -#ifndef PYGAMEAPI_FONT_INTERNAL -#define PyFont_Check(x) ((x)->ob_type == (PyTypeObject*)PyFONT_C_API[0]) -#define PyFont_Type (*(PyTypeObject*)PyFONT_C_API[0]) -#define PyFont_New (*(PyObject*(*)(TTF_Font*))PyFONT_C_API[1]) -/*slot 2 taken by FONT_INIT_CHECK*/ - -#define import_pygame_font() \ - _IMPORT_PYGAME_MODULE(font, FONT, PyFONT_C_API) - -static void* PyFONT_C_API[PYGAMEAPI_FONT_NUMSLOTS] = {NULL}; -#endif - diff --git a/venv/include/site/python3.7/pygame/freetype.h b/venv/include/site/python3.7/pygame/freetype.h deleted file mode 100644 index fda7226..0000000 --- a/venv/include/site/python3.7/pygame/freetype.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2009 Vicent Marti - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -#ifndef _PYGAME_FREETYPE_H_ -#define _PYGAME_FREETYPE_H_ - -#define PGFT_PYGAME1_COMPAT -#define HAVE_PYGAME_SDL_VIDEO -#define HAVE_PYGAME_SDL_RWOPS - -#include "pygame.h" -#include "pgcompat.h" - -#if PY3 -# define IS_PYTHON_3 -#endif - -#include -#include FT_FREETYPE_H -#include FT_CACHE_H -#include FT_XFREE86_H -#include FT_TRIGONOMETRY_H - -/********************************************************** - * Global module constants - **********************************************************/ - -/* Render styles */ -#define FT_STYLE_NORMAL 0x00 -#define FT_STYLE_STRONG 0x01 -#define FT_STYLE_OBLIQUE 0x02 -#define FT_STYLE_UNDERLINE 0x04 -#define FT_STYLE_WIDE 0x08 -#define FT_STYLE_DEFAULT 0xFF - -/* Bounding box modes */ -#define FT_BBOX_EXACT FT_GLYPH_BBOX_SUBPIXELS -#define FT_BBOX_EXACT_GRIDFIT FT_GLYPH_BBOX_GRIDFIT -#define FT_BBOX_PIXEL FT_GLYPH_BBOX_TRUNCATE -#define FT_BBOX_PIXEL_GRIDFIT FT_GLYPH_BBOX_PIXELS - -/* Rendering flags */ -#define FT_RFLAG_NONE (0) -#define FT_RFLAG_ANTIALIAS (1 << 0) -#define FT_RFLAG_AUTOHINT (1 << 1) -#define FT_RFLAG_VERTICAL (1 << 2) -#define FT_RFLAG_HINTED (1 << 3) -#define FT_RFLAG_KERNING (1 << 4) -#define FT_RFLAG_TRANSFORM (1 << 5) -#define FT_RFLAG_PAD (1 << 6) -#define FT_RFLAG_ORIGIN (1 << 7) -#define FT_RFLAG_UCS4 (1 << 8) -#define FT_RFLAG_USE_BITMAP_STRIKES (1 << 9) -#define FT_RFLAG_DEFAULTS (FT_RFLAG_HINTED | \ - FT_RFLAG_USE_BITMAP_STRIKES | \ - FT_RFLAG_ANTIALIAS) - - -#define FT_RENDER_NEWBYTEARRAY 0x0 -#define FT_RENDER_NEWSURFACE 0x1 -#define FT_RENDER_EXISTINGSURFACE 0x2 - -/********************************************************** - * Global module types - **********************************************************/ - -typedef struct _scale_s { - FT_UInt x, y; -} Scale_t; -typedef FT_Angle Angle_t; - -struct fontinternals_; -struct freetypeinstance_; - -typedef struct { - FT_Long font_index; - FT_Open_Args open_args; -} pgFontId; - -typedef struct { - PyObject_HEAD - pgFontId id; - PyObject *path; - int is_scalable; - - Scale_t face_size; - FT_Int16 style; - FT_Int16 render_flags; - double strength; - double underline_adjustment; - FT_UInt resolution; - Angle_t rotation; - FT_Matrix transform; - FT_Byte fgcolor[4]; - - struct freetypeinstance_ *freetype; /* Personal reference */ - struct fontinternals_ *_internals; -} pgFontObject; - -#define pgFont_IS_ALIVE(o) \ - (((pgFontObject *)(o))->_internals != 0) - -/********************************************************** - * Module declaration - **********************************************************/ -#define PYGAMEAPI_FREETYPE_FIRSTSLOT 0 -#define PYGAMEAPI_FREETYPE_NUMSLOTS 2 - -#ifndef PYGAME_FREETYPE_INTERNAL - -#define pgFont_Check(x) ((x)->ob_type == (PyTypeObject*)PgFREETYPE_C_API[0]) -#define pgFont_Type (*(PyTypeObject*)PgFREETYPE_C_API[1]) -#define pgFont_New (*(PyObject*(*)(const char*, long))PgFREETYPE_C_API[1]) - -#define import_pygame_freetype() \ - _IMPORT_PYGAME_MODULE(freetype, FREETYPE, PgFREETYPE_C_API) - -static void *PgFREETYPE_C_API[PYGAMEAPI_FREETYPE_NUMSLOTS] = {0}; -#endif /* PYGAME_FREETYPE_INTERNAL */ - -#endif /* _PYGAME_FREETYPE_H_ */ diff --git a/venv/include/site/python3.7/pygame/mask.h b/venv/include/site/python3.7/pygame/mask.h deleted file mode 100644 index b151dd4..0000000 --- a/venv/include/site/python3.7/pygame/mask.h +++ /dev/null @@ -1,25 +0,0 @@ -#include -#include "bitmask.h" - -#define PYGAMEAPI_MASK_FIRSTSLOT 0 -#define PYGAMEAPI_MASK_NUMSLOTS 1 -#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API" - -typedef struct { - PyObject_HEAD - bitmask_t *mask; -} pgMaskObject; - -#define pgMask_AsBitmap(x) (((pgMaskObject*)x)->mask) - -#ifndef PYGAMEAPI_MASK_INTERNAL - -#define pgMask_Type (*(PyTypeObject*)PyMASK_C_API[0]) -#define pgMask_Check(x) ((x)->ob_type == &pgMask_Type) - -#define import_pygame_mask() \ - _IMPORT_PYGAME_MODULE(mask, MASK, PyMASK_C_API) - -static void* PyMASK_C_API[PYGAMEAPI_MASK_NUMSLOTS] = {NULL}; -#endif /* #ifndef PYGAMEAPI_MASK_INTERNAL */ - diff --git a/venv/include/site/python3.7/pygame/mixer.h b/venv/include/site/python3.7/pygame/mixer.h deleted file mode 100644 index 36d57f3..0000000 --- a/venv/include/site/python3.7/pygame/mixer.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#include -#include -#include - - -/* test mixer initializations */ -#define MIXER_INIT_CHECK() \ - if(!SDL_WasInit(SDL_INIT_AUDIO)) \ - return RAISE(pgExc_SDLError, "mixer not initialized") - - -#define PYGAMEAPI_MIXER_FIRSTSLOT 0 -#define PYGAMEAPI_MIXER_NUMSLOTS 7 -typedef struct { - PyObject_HEAD - Mix_Chunk *chunk; - Uint8 *mem; - PyObject *weakreflist; -} pgSoundObject; -typedef struct { - PyObject_HEAD - int chan; -} pgChannelObject; -#define pgSound_AsChunk(x) (((pgSoundObject*)x)->chunk) -#define pgChannel_AsInt(x) (((pgChannelObject*)x)->chan) - -#ifndef PYGAMEAPI_MIXER_INTERNAL -#define pgSound_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[0]) -#define pgSound_Type (*(PyTypeObject*)pgMIXER_C_API[0]) -#define pgSound_New (*(PyObject*(*)(Mix_Chunk*))pgMIXER_C_API[1]) -#define pgSound_Play (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[2]) -#define pgChannel_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[3]) -#define pgChannel_Type (*(PyTypeObject*)pgMIXER_C_API[3]) -#define pgChannel_New (*(PyObject*(*)(int))pgMIXER_C_API[4]) -#define pgMixer_AutoInit (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[5]) -#define pgMixer_AutoQuit (*(void(*)(void))pgMIXER_C_API[6]) - -#define import_pygame_mixer() \ - _IMPORT_PYGAME_MODULE(mixer, MIXER, pgMIXER_C_API) - -static void* pgMIXER_C_API[PYGAMEAPI_MIXER_NUMSLOTS] = {NULL}; -#endif - diff --git a/venv/include/site/python3.7/pygame/palette.h b/venv/include/site/python3.7/pygame/palette.h deleted file mode 100644 index 1ae4cf6..0000000 --- a/venv/include/site/python3.7/pygame/palette.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef PALETTE_H -#define PALETTE_H - -#include - -/* SDL 2 does not assign a default palette color scheme to a new 8 bit - * surface. Instead, the palette is set all white. This defines the SDL 1.2 - * default palette. - */ -static const SDL_Color default_palette_colors[] = { - {0, 0, 0, 255}, {0, 0, 85, 255}, {0, 0, 170, 255}, - {0, 0, 255, 255}, {0, 36, 0, 255}, {0, 36, 85, 255}, - {0, 36, 170, 255}, {0, 36, 255, 255}, {0, 73, 0, 255}, - {0, 73, 85, 255}, {0, 73, 170, 255}, {0, 73, 255, 255}, - {0, 109, 0, 255}, {0, 109, 85, 255}, {0, 109, 170, 255}, - {0, 109, 255, 255}, {0, 146, 0, 255}, {0, 146, 85, 255}, - {0, 146, 170, 255}, {0, 146, 255, 255}, {0, 182, 0, 255}, - {0, 182, 85, 255}, {0, 182, 170, 255}, {0, 182, 255, 255}, - {0, 219, 0, 255}, {0, 219, 85, 255}, {0, 219, 170, 255}, - {0, 219, 255, 255}, {0, 255, 0, 255}, {0, 255, 85, 255}, - {0, 255, 170, 255}, {0, 255, 255, 255}, {85, 0, 0, 255}, - {85, 0, 85, 255}, {85, 0, 170, 255}, {85, 0, 255, 255}, - {85, 36, 0, 255}, {85, 36, 85, 255}, {85, 36, 170, 255}, - {85, 36, 255, 255}, {85, 73, 0, 255}, {85, 73, 85, 255}, - {85, 73, 170, 255}, {85, 73, 255, 255}, {85, 109, 0, 255}, - {85, 109, 85, 255}, {85, 109, 170, 255}, {85, 109, 255, 255}, - {85, 146, 0, 255}, {85, 146, 85, 255}, {85, 146, 170, 255}, - {85, 146, 255, 255}, {85, 182, 0, 255}, {85, 182, 85, 255}, - {85, 182, 170, 255}, {85, 182, 255, 255}, {85, 219, 0, 255}, - {85, 219, 85, 255}, {85, 219, 170, 255}, {85, 219, 255, 255}, - {85, 255, 0, 255}, {85, 255, 85, 255}, {85, 255, 170, 255}, - {85, 255, 255, 255}, {170, 0, 0, 255}, {170, 0, 85, 255}, - {170, 0, 170, 255}, {170, 0, 255, 255}, {170, 36, 0, 255}, - {170, 36, 85, 255}, {170, 36, 170, 255}, {170, 36, 255, 255}, - {170, 73, 0, 255}, {170, 73, 85, 255}, {170, 73, 170, 255}, - {170, 73, 255, 255}, {170, 109, 0, 255}, {170, 109, 85, 255}, - {170, 109, 170, 255}, {170, 109, 255, 255}, {170, 146, 0, 255}, - {170, 146, 85, 255}, {170, 146, 170, 255}, {170, 146, 255, 255}, - {170, 182, 0, 255}, {170, 182, 85, 255}, {170, 182, 170, 255}, - {170, 182, 255, 255}, {170, 219, 0, 255}, {170, 219, 85, 255}, - {170, 219, 170, 255}, {170, 219, 255, 255}, {170, 255, 0, 255}, - {170, 255, 85, 255}, {170, 255, 170, 255}, {170, 255, 255, 255}, - {255, 0, 0, 255}, {255, 0, 85, 255}, {255, 0, 170, 255}, - {255, 0, 255, 255}, {255, 36, 0, 255}, {255, 36, 85, 255}, - {255, 36, 170, 255}, {255, 36, 255, 255}, {255, 73, 0, 255}, - {255, 73, 85, 255}, {255, 73, 170, 255}, {255, 73, 255, 255}, - {255, 109, 0, 255}, {255, 109, 85, 255}, {255, 109, 170, 255}, - {255, 109, 255, 255}, {255, 146, 0, 255}, {255, 146, 85, 255}, - {255, 146, 170, 255}, {255, 146, 255, 255}, {255, 182, 0, 255}, - {255, 182, 85, 255}, {255, 182, 170, 255}, {255, 182, 255, 255}, - {255, 219, 0, 255}, {255, 219, 85, 255}, {255, 219, 170, 255}, - {255, 219, 255, 255}, {255, 255, 0, 255}, {255, 255, 85, 255}, - {255, 255, 170, 255}, {255, 255, 255, 255}, {0, 0, 0, 255}, - {0, 0, 85, 255}, {0, 0, 170, 255}, {0, 0, 255, 255}, - {0, 36, 0, 255}, {0, 36, 85, 255}, {0, 36, 170, 255}, - {0, 36, 255, 255}, {0, 73, 0, 255}, {0, 73, 85, 255}, - {0, 73, 170, 255}, {0, 73, 255, 255}, {0, 109, 0, 255}, - {0, 109, 85, 255}, {0, 109, 170, 255}, {0, 109, 255, 255}, - {0, 146, 0, 255}, {0, 146, 85, 255}, {0, 146, 170, 255}, - {0, 146, 255, 255}, {0, 182, 0, 255}, {0, 182, 85, 255}, - {0, 182, 170, 255}, {0, 182, 255, 255}, {0, 219, 0, 255}, - {0, 219, 85, 255}, {0, 219, 170, 255}, {0, 219, 255, 255}, - {0, 255, 0, 255}, {0, 255, 85, 255}, {0, 255, 170, 255}, - {0, 255, 255, 255}, {85, 0, 0, 255}, {85, 0, 85, 255}, - {85, 0, 170, 255}, {85, 0, 255, 255}, {85, 36, 0, 255}, - {85, 36, 85, 255}, {85, 36, 170, 255}, {85, 36, 255, 255}, - {85, 73, 0, 255}, {85, 73, 85, 255}, {85, 73, 170, 255}, - {85, 73, 255, 255}, {85, 109, 0, 255}, {85, 109, 85, 255}, - {85, 109, 170, 255}, {85, 109, 255, 255}, {85, 146, 0, 255}, - {85, 146, 85, 255}, {85, 146, 170, 255}, {85, 146, 255, 255}, - {85, 182, 0, 255}, {85, 182, 85, 255}, {85, 182, 170, 255}, - {85, 182, 255, 255}, {85, 219, 0, 255}, {85, 219, 85, 255}, - {85, 219, 170, 255}, {85, 219, 255, 255}, {85, 255, 0, 255}, - {85, 255, 85, 255}, {85, 255, 170, 255}, {85, 255, 255, 255}, - {170, 0, 0, 255}, {170, 0, 85, 255}, {170, 0, 170, 255}, - {170, 0, 255, 255}, {170, 36, 0, 255}, {170, 36, 85, 255}, - {170, 36, 170, 255}, {170, 36, 255, 255}, {170, 73, 0, 255}, - {170, 73, 85, 255}, {170, 73, 170, 255}, {170, 73, 255, 255}, - {170, 109, 0, 255}, {170, 109, 85, 255}, {170, 109, 170, 255}, - {170, 109, 255, 255}, {170, 146, 0, 255}, {170, 146, 85, 255}, - {170, 146, 170, 255}, {170, 146, 255, 255}, {170, 182, 0, 255}, - {170, 182, 85, 255}, {170, 182, 170, 255}, {170, 182, 255, 255}, - {170, 219, 0, 255}, {170, 219, 85, 255}, {170, 219, 170, 255}, - {170, 219, 255, 255}, {170, 255, 0, 255}, {170, 255, 85, 255}, - {170, 255, 170, 255}, {170, 255, 255, 255}, {255, 0, 0, 255}, - {255, 0, 85, 255}, {255, 0, 170, 255}, {255, 0, 255, 255}, - {255, 36, 0, 255}, {255, 36, 85, 255}, {255, 36, 170, 255}, - {255, 36, 255, 255}, {255, 73, 0, 255}, {255, 73, 85, 255}, - {255, 73, 170, 255}, {255, 73, 255, 255}, {255, 109, 0, 255}, - {255, 109, 85, 255}, {255, 109, 170, 255}, {255, 109, 255, 255}, - {255, 146, 0, 255}, {255, 146, 85, 255}, {255, 146, 170, 255}, - {255, 146, 255, 255}, {255, 182, 0, 255}, {255, 182, 85, 255}, - {255, 182, 170, 255}, {255, 182, 255, 255}, {255, 219, 0, 255}, - {255, 219, 85, 255}, {255, 219, 170, 255}, {255, 219, 255, 255}, - {255, 255, 0, 255}, {255, 255, 85, 255}, {255, 255, 170, 255}, - {255, 255, 255, 255}}; - -static const int default_palette_size = - (int)(sizeof(default_palette_colors) / sizeof(SDL_Color)); - -#endif diff --git a/venv/include/site/python3.7/pygame/pgarrinter.h b/venv/include/site/python3.7/pygame/pgarrinter.h deleted file mode 100644 index 5ba096b..0000000 --- a/venv/include/site/python3.7/pygame/pgarrinter.h +++ /dev/null @@ -1,26 +0,0 @@ -/* array structure interface version 3 declarations */ - -#if !defined(PG_ARRAYINTER_HEADER) -#define PG_ARRAYINTER_HEADER - -static const int PAI_CONTIGUOUS = 0x01; -static const int PAI_FORTRAN = 0x02; -static const int PAI_ALIGNED = 0x100; -static const int PAI_NOTSWAPPED = 0x200; -static const int PAI_WRITEABLE = 0x400; -static const int PAI_ARR_HAS_DESCR = 0x800; - -typedef struct { - int two; /* contains the integer 2 -- simple sanity check */ - int nd; /* number of dimensions */ - char typekind; /* kind in array -- character code of typestr */ - int itemsize; /* size of each element */ - int flags; /* flags indicating how the data should be */ - /* interpreted */ - Py_intptr_t *shape; /* A length-nd array of shape information */ - Py_intptr_t *strides; /* A length-nd array of stride information */ - void *data; /* A pointer to the first element of the array */ - PyObject *descr; /* NULL or a data-description */ -} PyArrayInterface; - -#endif diff --git a/venv/include/site/python3.7/pygame/pgbufferproxy.h b/venv/include/site/python3.7/pygame/pgbufferproxy.h deleted file mode 100644 index 92dc2f0..0000000 --- a/venv/include/site/python3.7/pygame/pgbufferproxy.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - Copyright (C) 2007 Rene Dudfield, Richard Goedeken - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -/* Bufferproxy module C api. - Depends on pygame.h being included first. - */ -#if !defined(PG_BUFPROXY_HEADER) - -#define PYGAMEAPI_BUFPROXY_NUMSLOTS 4 -#define PYGAMEAPI_BUFPROXY_FIRSTSLOT 0 - -#if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || defined(NO_PYGAME_C_API)) -static void *PgBUFPROXY_C_API[PYGAMEAPI_BUFPROXY_NUMSLOTS]; - -typedef PyObject *(*_pgbufproxy_new_t)(PyObject *, getbufferproc); -typedef PyObject *(*_pgbufproxy_get_obj_t)(PyObject *); -typedef int (*_pgbufproxy_trip_t)(PyObject *); - -#define pgBufproxy_Type (*(PyTypeObject*)PgBUFPROXY_C_API[0]) -#define pgBufproxy_New (*(_pgbufproxy_new_t)PgBUFPROXY_C_API[1]) -#define pgBufproxy_GetParent \ - (*(_pgbufproxy_get_obj_t)PgBUFPROXY_C_API[2]) -#define pgBufproxy_Trip (*(_pgbufproxy_trip_t)PgBUFPROXY_C_API[3]) -#define pgBufproxy_Check(x) ((x)->ob_type == (pgBufproxy_Type)) -#define import_pygame_bufferproxy() \ - _IMPORT_PYGAME_MODULE(bufferproxy, BUFPROXY, PgBUFPROXY_C_API) - -#endif /* #if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || ... */ - -#define PG_BUFPROXY_HEADER - -#endif /* #if !defined(PG_BUFPROXY_HEADER) */ diff --git a/venv/include/site/python3.7/pygame/pgcompat.h b/venv/include/site/python3.7/pygame/pgcompat.h deleted file mode 100644 index 9eb1b88..0000000 --- a/venv/include/site/python3.7/pygame/pgcompat.h +++ /dev/null @@ -1,195 +0,0 @@ -/* Python 2.x/3.x compitibility tools - */ - -#if !defined(PGCOMPAT_H) -#define PGCOMPAT_H - -#if PY_MAJOR_VERSION >= 3 - -#define PY3 1 - -/* Define some aliases for the removed PyInt_* functions */ -#define PyInt_Check(op) PyLong_Check(op) -#define PyInt_FromString PyLong_FromString -#define PyInt_FromUnicode PyLong_FromUnicode -#define PyInt_FromLong PyLong_FromLong -#define PyInt_FromSize_t PyLong_FromSize_t -#define PyInt_FromSsize_t PyLong_FromSsize_t -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t -#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask -#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#define PyInt_AS_LONG PyLong_AS_LONG -#define PyNumber_Int PyNumber_Long - -/* Weakrefs flags changed in 3.x */ -#define Py_TPFLAGS_HAVE_WEAKREFS 0 - -/* Module init function returns new module instance. */ -#define MODINIT_RETURN(x) return x -#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC PyInit_##mod_name (void) -#define DECREF_MOD(mod) Py_DECREF (mod) - -/* Type header differs. */ -#define TYPE_HEAD(x,y) PyVarObject_HEAD_INIT(x,y) - -/* Text interface. Use unicode strings. */ -#define Text_Type PyUnicode_Type -#define Text_Check PyUnicode_Check - -#ifndef PYPY_VERSION -#define Text_FromLocale(s) PyUnicode_DecodeLocale((s), "strict") -#else /* PYPY_VERSION */ -/* workaround: missing function for pypy */ -#define Text_FromLocale PyUnicode_FromString -#endif /* PYPY_VERSION */ - -#define Text_FromUTF8 PyUnicode_FromString -#define Text_FromUTF8AndSize PyUnicode_FromStringAndSize -#define Text_FromFormat PyUnicode_FromFormat -#define Text_GetSize PyUnicode_GetSize -#define Text_GET_SIZE PyUnicode_GET_SIZE - -/* Binary interface. Use bytes. */ -#define Bytes_Type PyBytes_Type -#define Bytes_Check PyBytes_Check -#define Bytes_Size PyBytes_Size -#define Bytes_AsString PyBytes_AsString -#define Bytes_AsStringAndSize PyBytes_AsStringAndSize -#define Bytes_FromStringAndSize PyBytes_FromStringAndSize -#define Bytes_FromFormat PyBytes_FromFormat -#define Bytes_AS_STRING PyBytes_AS_STRING -#define Bytes_GET_SIZE PyBytes_GET_SIZE -#define Bytes_AsDecodeObject PyBytes_AsDecodedObject - -#define Object_Unicode PyObject_Str - -#define IsTextObj(x) (PyUnicode_Check(x) || PyBytes_Check(x)) - -/* Renamed builtins */ -#define BUILTINS_MODULE "builtins" -#define BUILTINS_UNICODE "str" -#define BUILTINS_UNICHR "chr" - -/* Defaults for unicode file path encoding */ -#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding -#if defined(MS_WIN32) -#define UNICODE_DEF_FS_ERROR "replace" -#else -#define UNICODE_DEF_FS_ERROR "surrogateescape" -#endif - -#else /* #if PY_MAJOR_VERSION >= 3 */ - -#define PY3 0 - -/* Module init function returns nothing. */ -#define MODINIT_RETURN(x) return -#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC init##mod_name (void) -#define DECREF_MOD(mod) - -/* Type header differs. */ -#define TYPE_HEAD(x,y) \ - PyObject_HEAD_INIT(x) \ - 0, - -/* Text interface. Use ascii strings. */ -#define Text_Type PyString_Type -#define Text_Check PyString_Check -#define Text_FromLocale PyString_FromString -#define Text_FromUTF8 PyString_FromString -#define Text_FromUTF8AndSize PyString_FromStringAndSize -#define Text_FromFormat PyString_FromFormat -#define Text_GetSize PyString_GetSize -#define Text_GET_SIZE PyString_GET_SIZE - -/* Binary interface. Use ascii strings. */ -#define Bytes_Type PyString_Type -#define Bytes_Check PyString_Check -#define Bytes_Size PyString_Size -#define Bytes_AsString PyString_AsString -#define Bytes_AsStringAndSize PyString_AsStringAndSize -#define Bytes_FromStringAndSize PyString_FromStringAndSize -#define Bytes_FromFormat PyString_FromFormat -#define Bytes_AS_STRING PyString_AS_STRING -#define Bytes_GET_SIZE PyString_GET_SIZE -#define Bytes_AsDecodedObject PyString_AsDecodedObject - -#define Object_Unicode PyObject_Unicode - -/* Renamed builtins */ -#define BUILTINS_MODULE "__builtin__" -#define BUILTINS_UNICODE "unicode" -#define BUILTINS_UNICHR "unichr" - -/* Defaults for unicode file path encoding */ -#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding -#define UNICODE_DEF_FS_ERROR "strict" - -#endif /* #if PY_MAJOR_VERSION >= 3 */ - -#define PY2 (!PY3) - -#define MODINIT_ERROR MODINIT_RETURN (NULL) - -/* Module state. These macros are used to define per-module macros. - * v - global state variable (Python 2.x) - * s - global state structure (Python 3.x) - */ -#define PY2_GETSTATE(v) (&(v)) -#define PY3_GETSTATE(s, m) ((struct s *) PyModule_GetState (m)) - -/* Pep 3123: Making PyObject_HEAD conform to standard C */ -#if !defined(Py_TYPE) -#define Py_TYPE(o) (((PyObject *)(o))->ob_type) -#define Py_REFCNT(o) (((PyObject *)(o))->ob_refcnt) -#define Py_SIZE(o) (((PyVarObject *)(o))->ob_size) -#endif - -/* Encode a unicode file path */ -#define Unicode_AsEncodedPath(u) \ - PyUnicode_AsEncodedString ((u), UNICODE_DEF_FS_CODEC, UNICODE_DEF_FS_ERROR) - -#define RELATIVE_MODULE(m) ("." m) - -#define HAVE_OLD_BUFPROTO PY2 - -#if !defined(PG_ENABLE_OLDBUF) /* allow for command line override */ -#if HAVE_OLD_BUFPROTO -#define PG_ENABLE_OLDBUF 1 -#else -#define PG_ENABLE_OLDBUF 0 -#endif -#endif - -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER -#define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#ifndef Py_TPFLAGS_HAVE_CLASS -#define Py_TPFLAGS_HAVE_CLASS 0 -#endif - -#ifndef Py_TPFLAGS_CHECKTYPES -#define Py_TPFLAGS_CHECKTYPES 0 -#endif - -#if PY_VERSION_HEX >= 0x03020000 -#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \ - PySlice_GetIndicesEx(slice, length, start, stop, step, slicelength) -#else -#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \ - PySlice_GetIndicesEx((PySliceObject *)(slice), length, \ - start, stop, step, slicelength) -#endif - -/* Support new buffer protocol? */ -#if !defined(PG_ENABLE_NEWBUF) /* allow for command line override */ -#if !defined(PYPY_VERSION) -#define PG_ENABLE_NEWBUF 1 -#else -#define PG_ENABLE_NEWBUF 0 -#endif -#endif - -#endif /* #if !defined(PGCOMPAT_H) */ diff --git a/venv/include/site/python3.7/pygame/pgopengl.h b/venv/include/site/python3.7/pygame/pgopengl.h deleted file mode 100644 index 3c80dca..0000000 --- a/venv/include/site/python3.7/pygame/pgopengl.h +++ /dev/null @@ -1,16 +0,0 @@ -#if !defined(PGOPENGL_H) -#define PGOPENGL_H - -/** This header includes definitions of Opengl functions as pointer types for - ** use with the SDL function SDL_GL_GetProcAddress. - **/ - -#if defined(_WIN32) -#define GL_APIENTRY __stdcall -#else -#define GL_APIENTRY -#endif - -typedef void (GL_APIENTRY *GL_glReadPixels_Func)(int, int, int, int, unsigned int, unsigned int, void*); - -#endif diff --git a/venv/include/site/python3.7/pygame/pygame.h b/venv/include/site/python3.7/pygame/pygame.h deleted file mode 100644 index bcbf1d9..0000000 --- a/venv/include/site/python3.7/pygame/pygame.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -/* To allow the Pygame C api to be globally shared by all code within an - * extension module built from multiple C files, only include the pygame.h - * header within the top level C file, the one which calls the - * 'import_pygame_*' macros. All other C source files of the module should - * include _pygame.h instead. - */ -#ifndef PYGAME_H -#define PYGAME_H - -#include "_pygame.h" - -#endif diff --git a/venv/include/site/python3.7/pygame/scrap.h b/venv/include/site/python3.7/pygame/scrap.h deleted file mode 100644 index b1b3856..0000000 --- a/venv/include/site/python3.7/pygame/scrap.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2006, 2007 Rene Dudfield, Marcus von Appen - - Originally put in the public domain by Sam Lantinga. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -/* This is unconditionally defined in Python.h */ -#if defined(_POSIX_C_SOURCE) -#undef _POSIX_C_SOURCE -#endif - -#include - -/* Handle clipboard text and data in arbitrary formats */ - -/** - * Predefined supported pygame scrap types. - */ -#define PYGAME_SCRAP_TEXT "text/plain" -#define PYGAME_SCRAP_BMP "image/bmp" -#define PYGAME_SCRAP_PPM "image/ppm" -#define PYGAME_SCRAP_PBM "image/pbm" - -/** - * The supported scrap clipboard types. - * - * This is only relevant in a X11 environment, which supports mouse - * selections as well. For Win32 and MacOS environments the default - * clipboard is used, no matter what value is passed. - */ -typedef enum -{ - SCRAP_CLIPBOARD, - SCRAP_SELECTION /* only supported in X11 environments. */ -} ScrapClipType; - -/** - * Macro for initialization checks. - */ -#define PYGAME_SCRAP_INIT_CHECK() \ - if(!pygame_scrap_initialized()) \ - return (PyErr_SetString (pgExc_SDLError, \ - "scrap system not initialized."), NULL) - -/** - * \brief Checks, whether the pygame scrap module was initialized. - * - * \return 1 if the modules was initialized, 0 otherwise. - */ -extern int -pygame_scrap_initialized (void); - -/** - * \brief Initializes the pygame scrap module internals. Call this before any - * other method. - * - * \return 1 on successful initialization, 0 otherwise. - */ -extern int -pygame_scrap_init (void); - -/** - * \brief Checks, whether the pygame window lost the clipboard focus or not. - * - * \return 1 if the window lost the focus, 0 otherwise. - */ -extern int -pygame_scrap_lost (void); - -/** - * \brief Places content of a specific type into the clipboard. - * - * \note For X11 the following notes are important: The following types - * are reserved for internal usage and thus will throw an error on - * setting them: "TIMESTAMP", "TARGETS", "SDL_SELECTION". - * Setting PYGAME_SCRAP_TEXT ("text/plain") will also automatically - * set the X11 types "STRING" (XA_STRING), "TEXT" and "UTF8_STRING". - * - * For Win32 the following notes are important: Setting - * PYGAME_SCRAP_TEXT ("text/plain") will also automatically set - * the Win32 type "TEXT" (CF_TEXT). - * - * For QNX the following notes are important: Setting - * PYGAME_SCRAP_TEXT ("text/plain") will also automatically set - * the QNX type "TEXT" (Ph_CL_TEXT). - * - * \param type The type of the content. - * \param srclen The length of the content. - * \param src The NULL terminated content. - * \return 1, if the content could be successfully pasted into the clipboard, - * 0 otherwise. - */ -extern int -pygame_scrap_put (char *type, int srclen, char *src); - -/** - * \brief Gets the current content from the clipboard. - * - * \note The received content does not need to be the content previously - * placed in the clipboard using pygame_put_scrap(). See the - * pygame_put_scrap() notes for more details. - * - * \param type The type of the content to receive. - * \param count The size of the returned content. - * \return The content or NULL in case of an error or if no content of the - * specified type was available. - */ -extern char* -pygame_scrap_get (char *type, unsigned long *count); - -/** - * \brief Gets the currently available content types from the clipboard. - * - * \return The different available content types or NULL in case of an - * error or if no content type is available. - */ -extern char** -pygame_scrap_get_types (void); - -/** - * \brief Checks whether content for the specified scrap type is currently - * available in the clipboard. - * - * \param type The type to check for. - * \return 1, if there is content and 0 otherwise. - */ -extern int -pygame_scrap_contains (char *type); diff --git a/venv/include/site/python3.7/pygame/surface.h b/venv/include/site/python3.7/pygame/surface.h deleted file mode 100644 index cc5f071..0000000 --- a/venv/include/site/python3.7/pygame/surface.h +++ /dev/null @@ -1,383 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - Copyright (C) 2007 Marcus von Appen - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef SURFACE_H -#define SURFACE_H - -/* This is defined in SDL.h */ -#if defined(_POSIX_C_SOURCE) -#undef _POSIX_C_SOURCE -#endif - -#include -#include "pygame.h" - -/* Blend modes */ -#define PYGAME_BLEND_ADD 0x1 -#define PYGAME_BLEND_SUB 0x2 -#define PYGAME_BLEND_MULT 0x3 -#define PYGAME_BLEND_MIN 0x4 -#define PYGAME_BLEND_MAX 0x5 - -#define PYGAME_BLEND_RGB_ADD 0x1 -#define PYGAME_BLEND_RGB_SUB 0x2 -#define PYGAME_BLEND_RGB_MULT 0x3 -#define PYGAME_BLEND_RGB_MIN 0x4 -#define PYGAME_BLEND_RGB_MAX 0x5 - -#define PYGAME_BLEND_RGBA_ADD 0x6 -#define PYGAME_BLEND_RGBA_SUB 0x7 -#define PYGAME_BLEND_RGBA_MULT 0x8 -#define PYGAME_BLEND_RGBA_MIN 0x9 -#define PYGAME_BLEND_RGBA_MAX 0x10 -#define PYGAME_BLEND_PREMULTIPLIED 0x11 - - - - - -#if SDL_BYTEORDER == SDL_LIL_ENDIAN -#define GET_PIXEL_24(b) (b[0] + (b[1] << 8) + (b[2] << 16)) -#else -#define GET_PIXEL_24(b) (b[2] + (b[1] << 8) + (b[0] << 16)) -#endif - -#define GET_PIXEL(pxl, bpp, source) \ - switch (bpp) \ - { \ - case 2: \ - pxl = *((Uint16 *) (source)); \ - break; \ - case 4: \ - pxl = *((Uint32 *) (source)); \ - break; \ - default: \ - { \ - Uint8 *b = (Uint8 *) source; \ - pxl = GET_PIXEL_24(b); \ - } \ - break; \ - } - -#if IS_SDLv1 -#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \ - _sR = ((px & fmt->Rmask) >> fmt->Rshift); \ - _sR = (_sR << fmt->Rloss) + (_sR >> (8 - (fmt->Rloss << 1))); \ - _sG = ((px & fmt->Gmask) >> fmt->Gshift); \ - _sG = (_sG << fmt->Gloss) + (_sG >> (8 - (fmt->Gloss << 1))); \ - _sB = ((px & fmt->Bmask) >> fmt->Bshift); \ - _sB = (_sB << fmt->Bloss) + (_sB >> (8 - (fmt->Bloss << 1))); \ - if (ppa) \ - { \ - _sA = ((px & fmt->Amask) >> fmt->Ashift); \ - _sA = (_sA << fmt->Aloss) + (_sA >> (8 - (fmt->Aloss << 1))); \ - } \ - else \ - { \ - _sA = 255; \ - } - -#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \ - sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \ - sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \ - sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \ - sa = 255; - -/* For 1 byte palette pixels */ -#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \ - *(px) = (Uint8) SDL_MapRGB(fmt, _dR, _dG, _dB) -#else /* IS_SDLv2 */ -#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \ - SDL_GetRGBA(px, fmt, &(_sR), &(_sG), &(_sB), &(_sA)); \ - if (!ppa) { \ - _sA = 255; \ - } - -#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \ - sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \ - sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \ - sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \ - sa = 255; - -/* For 1 byte palette pixels */ -#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \ - *(px) = (Uint8) SDL_MapRGBA(fmt, _dR, _dG, _dB, _dA) -#endif /* IS_SDLv2 */ - - - - - - - - -#if SDL_BYTEORDER == SDL_LIL_ENDIAN -#define SET_OFFSETS_24(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 0 : \ - fmt->Rshift == 8 ? 1 : \ - 2 ); \ - og = (fmt->Gshift == 0 ? 0 : \ - fmt->Gshift == 8 ? 1 : \ - 2 ); \ - ob = (fmt->Bshift == 0 ? 0 : \ - fmt->Bshift == 8 ? 1 : \ - 2 ); \ - } - -#define SET_OFFSETS_32(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 0 : \ - fmt->Rshift == 8 ? 1 : \ - fmt->Rshift == 16 ? 2 : \ - 3 ); \ - og = (fmt->Gshift == 0 ? 0 : \ - fmt->Gshift == 8 ? 1 : \ - fmt->Gshift == 16 ? 2 : \ - 3 ); \ - ob = (fmt->Bshift == 0 ? 0 : \ - fmt->Bshift == 8 ? 1 : \ - fmt->Bshift == 16 ? 2 : \ - 3 ); \ - } -#else -#define SET_OFFSETS_24(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 2 : \ - fmt->Rshift == 8 ? 1 : \ - 0 ); \ - og = (fmt->Gshift == 0 ? 2 : \ - fmt->Gshift == 8 ? 1 : \ - 0 ); \ - ob = (fmt->Bshift == 0 ? 2 : \ - fmt->Bshift == 8 ? 1 : \ - 0 ); \ - } - -#define SET_OFFSETS_32(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 3 : \ - fmt->Rshift == 8 ? 2 : \ - fmt->Rshift == 16 ? 1 : \ - 0 ); \ - og = (fmt->Gshift == 0 ? 3 : \ - fmt->Gshift == 8 ? 2 : \ - fmt->Gshift == 16 ? 1 : \ - 0 ); \ - ob = (fmt->Bshift == 0 ? 3 : \ - fmt->Bshift == 8 ? 2 : \ - fmt->Bshift == 16 ? 1 : \ - 0 ); \ - } -#endif - - -#define CREATE_PIXEL(buf, r, g, b, a, bp, ft) \ - switch (bp) \ - { \ - case 2: \ - *((Uint16 *) (buf)) = \ - ((r >> ft->Rloss) << ft->Rshift) | \ - ((g >> ft->Gloss) << ft->Gshift) | \ - ((b >> ft->Bloss) << ft->Bshift) | \ - ((a >> ft->Aloss) << ft->Ashift); \ - break; \ - case 4: \ - *((Uint32 *) (buf)) = \ - ((r >> ft->Rloss) << ft->Rshift) | \ - ((g >> ft->Gloss) << ft->Gshift) | \ - ((b >> ft->Bloss) << ft->Bshift) | \ - ((a >> ft->Aloss) << ft->Ashift); \ - break; \ - } - -/* Pretty good idea from Tom Duff :-). */ -#define LOOP_UNROLLED4(code, n, width) \ - n = (width + 3) / 4; \ - switch (width & 3) \ - { \ - case 0: do { code; \ - case 3: code; \ - case 2: code; \ - case 1: code; \ - } while (--n > 0); \ - } - -/* Used in the srcbpp == dstbpp == 1 blend functions */ -#define REPEAT_3(code) \ - code; \ - code; \ - code; - -#define REPEAT_4(code) \ - code; \ - code; \ - code; \ - code; - - -#define BLEND_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \ - tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \ - tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255); - -#define BLEND_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \ - tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \ - tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0); - -#define BLEND_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \ - dR = (dR && sR) ? (dR * sR) >> 8 : 0; \ - dG = (dG && sG) ? (dG * sG) >> 8 : 0; \ - dB = (dB && sB) ? (dB * sB) >> 8 : 0; - -#define BLEND_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR < dR) { dR = sR; } \ - if(sG < dG) { dG = sG; } \ - if(sB < dB) { dB = sB; } - -#define BLEND_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR > dR) { dR = sR; } \ - if(sG > dG) { dG = sG; } \ - if(sB > dB) { dB = sB; } - - - - - - -#define BLEND_RGBA_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \ - tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \ - tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255); \ - tmp = dA + sA; dA = (tmp <= 255 ? tmp : 255); - -#define BLEND_RGBA_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \ - tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \ - tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0); \ - tmp = dA - sA; dA = (tmp >= 0 ? tmp : 0); - -#define BLEND_RGBA_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \ - dR = (dR && sR) ? (dR * sR) >> 8 : 0; \ - dG = (dG && sG) ? (dG * sG) >> 8 : 0; \ - dB = (dB && sB) ? (dB * sB) >> 8 : 0; \ - dA = (dA && sA) ? (dA * sA) >> 8 : 0; - -#define BLEND_RGBA_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR < dR) { dR = sR; } \ - if(sG < dG) { dG = sG; } \ - if(sB < dB) { dB = sB; } \ - if(sA < dA) { dA = sA; } - -#define BLEND_RGBA_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR > dR) { dR = sR; } \ - if(sG > dG) { dG = sG; } \ - if(sB > dB) { dB = sB; } \ - if(sA > dA) { dA = sA; } - - - - - - - - - - - -#if 1 -/* Choose an alpha blend equation. If the sign is preserved on a right shift - * then use a specialized, faster, equation. Otherwise a more general form, - * where all additions are done before the shift, is needed. -*/ -#if (-1 >> 1) < 0 -#define ALPHA_BLEND_COMP(sC, dC, sA) ((((sC - dC) * sA + sC) >> 8) + dC) -#else -#define ALPHA_BLEND_COMP(sC, dC, sA) (((dC << 8) + (sC - dC) * sA + sC) >> 8) -#endif - -#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \ - do { \ - if (dA) \ - { \ - dR = ALPHA_BLEND_COMP(sR, dR, sA); \ - dG = ALPHA_BLEND_COMP(sG, dG, sA); \ - dB = ALPHA_BLEND_COMP(sB, dB, sA); \ - dA = sA + dA - ((sA * dA) / 255); \ - } \ - else \ - { \ - dR = sR; \ - dG = sG; \ - dB = sB; \ - dA = sA; \ - } \ - } while(0) - -#define ALPHA_BLEND_PREMULTIPLIED_COMP(sC, dC, sA) (sC + dC - ((dC * sA) >> 8)) - -#define ALPHA_BLEND_PREMULTIPLIED(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - do { \ - tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sR, dR, sA); dR = (tmp > 255 ? 255 : tmp); \ - tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sG, dG, sA); dG = (tmp > 255 ? 255 : tmp); \ - tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sB, dB, sA); dB = (tmp > 255 ? 255 : tmp); \ - dA = sA + dA - ((sA * dA) / 255); \ - } while(0) -#elif 0 - -#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \ - do { \ - if(sA){ \ - if(dA && sA < 255){ \ - int dContrib = dA*(255 - sA)/255; \ - dA = sA+dA - ((sA*dA)/255); \ - dR = (dR*dContrib + sR*sA)/dA; \ - dG = (dG*dContrib + sG*sA)/dA; \ - dB = (dB*dContrib + sB*sA)/dA; \ - }else{ \ - dR = sR; \ - dG = sG; \ - dB = sB; \ - dA = sA; \ - } \ - } \ - } while(0) -#endif - -int -surface_fill_blend (SDL_Surface *surface, SDL_Rect *rect, Uint32 color, - int blendargs); - -void -surface_respect_clip_rect (SDL_Surface *surface, SDL_Rect *rect); - -int -pygame_AlphaBlit (SDL_Surface * src, SDL_Rect * srcrect, - SDL_Surface * dst, SDL_Rect * dstrect, int the_args); - -int -pygame_Blit (SDL_Surface * src, SDL_Rect * srcrect, - SDL_Surface * dst, SDL_Rect * dstrect, int the_args); - -#endif /* SURFACE_H */ diff --git a/venv/lib/python3.7/site-packages/easy-install.pth b/venv/lib/python3.7/site-packages/easy-install.pth deleted file mode 100644 index b74fe2e..0000000 --- a/venv/lib/python3.7/site-packages/easy-install.pth +++ /dev/null @@ -1,2 +0,0 @@ -./setuptools-40.8.0-py3.7.egg -./pip-19.0.3-py3.7.egg diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/METADATA b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/METADATA deleted file mode 100644 index d87023b..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/METADATA +++ /dev/null @@ -1,56 +0,0 @@ -Metadata-Version: 2.1 -Name: numpy -Version: 1.18.2 -Summary: NumPy is the fundamental package for array computing with Python. -Home-page: https://www.numpy.org -Author: Travis E. Oliphant et al. -Maintainer: NumPy Developers -Maintainer-email: numpy-discussion@python.org -License: BSD -Download-URL: https://pypi.python.org/pypi/numpy -Project-URL: Bug Tracker, https://github.com/numpy/numpy/issues -Project-URL: Documentation, https://docs.scipy.org/doc/numpy/ -Project-URL: Source Code, https://github.com/numpy/numpy -Platform: Windows -Platform: Linux -Platform: Solaris -Platform: Mac OS-X -Platform: Unix -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Science/Research -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved -Classifier: Programming Language :: C -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Topic :: Software Development -Classifier: Topic :: Scientific/Engineering -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: POSIX -Classifier: Operating System :: Unix -Classifier: Operating System :: MacOS -Requires-Python: >=3.5 - -It provides: - -- a powerful N-dimensional array object -- sophisticated (broadcasting) functions -- tools for integrating C/C++ and Fortran code -- useful linear algebra, Fourier transform, and random number capabilities -- and much more - -Besides its obvious scientific uses, NumPy can also be used as an efficient -multi-dimensional container of generic data. Arbitrary data-types can be -defined. This allows NumPy to seamlessly and speedily integrate with a wide -variety of databases. - -All NumPy wheels distributed on PyPI are BSD licensed. - - - diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/RECORD b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/RECORD deleted file mode 100644 index 636a01b..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/RECORD +++ /dev/null @@ -1,852 +0,0 @@ -../../../bin/f2py,sha256=-BAA-2fWt2Kcos0jeHtabPXud3qyCZ-1dOc8lU3tJJI,258 -../../../bin/f2py3,sha256=-BAA-2fWt2Kcos0jeHtabPXud3qyCZ-1dOc8lU3tJJI,258 -../../../bin/f2py3.7,sha256=-BAA-2fWt2Kcos0jeHtabPXud3qyCZ-1dOc8lU3tJJI,258 -numpy-1.18.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -numpy-1.18.2.dist-info/METADATA,sha256=TuIVILC5R4ELDt_vDZ9g3IIi-6phvFDaLXXa_jKh1oM,2057 -numpy-1.18.2.dist-info/RECORD,, -numpy-1.18.2.dist-info/WHEEL,sha256=AhV6RMqZ2IDfreRJKo44QWYxYeP-0Jr0bezzBLQ1eog,109 -numpy-1.18.2.dist-info/entry_points.txt,sha256=MA6o_IjpQrpZlNNxq1yxwYV0u_I689RuoWedrJLsZnk,113 -numpy-1.18.2.dist-info/top_level.txt,sha256=4J9lbBMLnAiyxatxh8iRKV5Entd_6-oqbO7pzJjMsPw,6 -numpy/.libs/libgfortran-ed201abd.so.3.0.0,sha256=-wq9A9a6iPJfgojsh9Fi4vj6Br_EwUqr7W5Pc4giOYg,1023960 -numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so,sha256=yHuhchYklHB9dvBnMyw8DDkIvR3ApKIE_LPaeGklZw4,29724672 -numpy/LICENSE.txt,sha256=kL0gtRLFMt0qE0tusWLm-rVSSW0Uy3UA-f0l8ZEVikk,45692 -numpy/__config__.py,sha256=l-kYBVT3VpoLPbr8_dilDgG-Z1l-VOLtHHFd2vCF8fw,1646 -numpy/__init__.py,sha256=Ited5sCQ_GQpr_n6rXbUxiF6PsLBQHuBs6VZuTdX9iY,8858 -numpy/__pycache__/__config__.cpython-37.pyc,, -numpy/__pycache__/__init__.cpython-37.pyc,, -numpy/__pycache__/_distributor_init.cpython-37.pyc,, -numpy/__pycache__/_globals.cpython-37.pyc,, -numpy/__pycache__/_pytesttester.cpython-37.pyc,, -numpy/__pycache__/conftest.cpython-37.pyc,, -numpy/__pycache__/ctypeslib.cpython-37.pyc,, -numpy/__pycache__/dual.cpython-37.pyc,, -numpy/__pycache__/matlib.cpython-37.pyc,, -numpy/__pycache__/setup.cpython-37.pyc,, -numpy/__pycache__/version.cpython-37.pyc,, -numpy/_distributor_init.py,sha256=IgPkSK3H9bgjFeUfWuXhjKrgetQl5ztUW-rTyjGHK3c,331 -numpy/_globals.py,sha256=p8xxERZsxjGPUWV9pMY3jz75NZxDLppGeKaHbYGCDqM,2379 -numpy/_pytesttester.py,sha256=JQAw-aDSd7hl9dPpeIvD7eRbrMppI9sFeYQEgqpTqx8,6980 -numpy/compat/__init__.py,sha256=MHle4gJcrXh1w4SNv0mz5rbUTAjAzHnyO3rtbSW3AUo,498 -numpy/compat/__pycache__/__init__.cpython-37.pyc,, -numpy/compat/__pycache__/_inspect.cpython-37.pyc,, -numpy/compat/__pycache__/py3k.cpython-37.pyc,, -numpy/compat/__pycache__/setup.cpython-37.pyc,, -numpy/compat/_inspect.py,sha256=xEImUFhm4VAzT2LJj2Va_yDAHJsdy0RwSi1JwOOhykU,7513 -numpy/compat/py3k.py,sha256=EWeA4IONUTXhTcTJ7wEh2xoECE5knqPI1VzEfSTyY_8,7097 -numpy/compat/setup.py,sha256=REJcwNU7EbfwBFS1FHazGJcUhh50_5gYttr3BSczCiM,382 -numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/compat/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/compat/tests/__pycache__/test_compat.cpython-37.pyc,, -numpy/compat/tests/test_compat.py,sha256=KtCVafV8yN5g90tIIe7T9f5ruAs5Y0DNa64d040Rx5s,542 -numpy/conftest.py,sha256=HHIMNsYUUp2eensC63LtRYy_NZC1su1tbtN26rnrg5E,2749 -numpy/core/__init__.py,sha256=MM3QX8fvUwztExd4zaHTdgvXxE8yr4ZMkr4SlcGD7QI,4925 -numpy/core/__pycache__/__init__.cpython-37.pyc,, -numpy/core/__pycache__/_add_newdocs.cpython-37.pyc,, -numpy/core/__pycache__/_asarray.cpython-37.pyc,, -numpy/core/__pycache__/_dtype.cpython-37.pyc,, -numpy/core/__pycache__/_dtype_ctypes.cpython-37.pyc,, -numpy/core/__pycache__/_exceptions.cpython-37.pyc,, -numpy/core/__pycache__/_internal.cpython-37.pyc,, -numpy/core/__pycache__/_methods.cpython-37.pyc,, -numpy/core/__pycache__/_string_helpers.cpython-37.pyc,, -numpy/core/__pycache__/_type_aliases.cpython-37.pyc,, -numpy/core/__pycache__/_ufunc_config.cpython-37.pyc,, -numpy/core/__pycache__/arrayprint.cpython-37.pyc,, -numpy/core/__pycache__/cversions.cpython-37.pyc,, -numpy/core/__pycache__/defchararray.cpython-37.pyc,, -numpy/core/__pycache__/einsumfunc.cpython-37.pyc,, -numpy/core/__pycache__/fromnumeric.cpython-37.pyc,, -numpy/core/__pycache__/function_base.cpython-37.pyc,, -numpy/core/__pycache__/generate_numpy_api.cpython-37.pyc,, -numpy/core/__pycache__/getlimits.cpython-37.pyc,, -numpy/core/__pycache__/machar.cpython-37.pyc,, -numpy/core/__pycache__/memmap.cpython-37.pyc,, -numpy/core/__pycache__/multiarray.cpython-37.pyc,, -numpy/core/__pycache__/numeric.cpython-37.pyc,, -numpy/core/__pycache__/numerictypes.cpython-37.pyc,, -numpy/core/__pycache__/overrides.cpython-37.pyc,, -numpy/core/__pycache__/records.cpython-37.pyc,, -numpy/core/__pycache__/setup.cpython-37.pyc,, -numpy/core/__pycache__/setup_common.cpython-37.pyc,, -numpy/core/__pycache__/shape_base.cpython-37.pyc,, -numpy/core/__pycache__/umath.cpython-37.pyc,, -numpy/core/__pycache__/umath_tests.cpython-37.pyc,, -numpy/core/_add_newdocs.py,sha256=LqccpEMz9ETDG4jXOTrBnol3wUO0hTw0I1JDSOUsUE8,202937 -numpy/core/_asarray.py,sha256=NH0SPZr_pBMKOJgyy6dsfmKOQPy3r31hlzFG5bP1yYA,9940 -numpy/core/_dtype.py,sha256=lhiLEajO4UQ0wGSY52T4KtLdylFfCaAQs-YV6Ru-hNM,10053 -numpy/core/_dtype_ctypes.py,sha256=EiTjqVsDSibpbS8pkvzres86E9er1aFaflsss9N3Uao,3448 -numpy/core/_exceptions.py,sha256=MbGfp_yuOifOpZRppfk-DA9dL07AVv7blO0i63OX8lU,6259 -numpy/core/_internal.py,sha256=pwHot3zvS_5qcO_INVPk7gpM1YkNK1A5K8M1NyF1ghc,26469 -numpy/core/_methods.py,sha256=g8AnOnA3CdC4qe7s7N_pG3OcaW-YKhXmRz8FmLNnpG0,8399 -numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so,sha256=9Ewrq9nU6CKSUR5MXAqcCz_HcxI9Y4v_UsJsW7zNSsY,580203 -numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so,sha256=2wzZ2EtGMJjDaycOULGHZqZFUr_KZwApuza_yjReE1o,21507704 -numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so,sha256=kawkN-3Gn6UQNAFv5B_M3JmCr4yeL8RSI8-a7Xz6gz8,31319 -numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so,sha256=UrPyPujhte6FfTbtswWq8Bei_xGz8A3CqDf6PCxg0Ls,270173 -numpy/core/_string_helpers.py,sha256=NGGGhaFdU5eGiUAj3GTIBoOgWs4r9aTNlsE2r9NgX6Q,2855 -numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so,sha256=a6SlGjJLfa6wyV5Bs14o_ZnVN_txdltect3Ffk7x5HE,34727 -numpy/core/_type_aliases.py,sha256=FA2Pz5OKqcLl1QKLJNu-ETHIzQ1ii3LH5pSdHhZkfZA,9181 -numpy/core/_ufunc_config.py,sha256=yQ9RSST7_TagO8EYDZG5g23gz7loX76a0ajCU5HfYRI,14219 -numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so,sha256=l8pu1J2kNgM6hlXTbfbQEze7-fonaZMzxS0jj8RpW3Q,85900 -numpy/core/arrayprint.py,sha256=WuIViYKXL-qr000rKTQhss9swe3nsKlG2Jc0mfuiS10,59774 -numpy/core/cversions.py,sha256=ukYNpkei0Coi7DOcbroXuDoXc6kl5odxmcy_39pszA0,413 -numpy/core/defchararray.py,sha256=HJU2o-dQbiwglIwIv8MRSEDB6p4p2PE9Aq67IQ47aEQ,70980 -numpy/core/einsumfunc.py,sha256=94J-3sQQWoCzYGwUlsEIHD6B3Qjv481XUD2jd0KClGY,51271 -numpy/core/fromnumeric.py,sha256=_d9szuykDMfWhYjBl5tIcD81G7KNz9l4PMyvfxyzO64,117694 -numpy/core/function_base.py,sha256=jgKa0iHIzpUUy8T9XXlIEbI8XO0xeh1olG409kdM2qo,18344 -numpy/core/generate_numpy_api.py,sha256=0JBYTvekUeJyhp7QMKtWJSK-L6lVNhev16y0F2qX2pU,7470 -numpy/core/getlimits.py,sha256=X26A-6nrzC1FH1wtCggX-faIw0WMYYkPH1_983h4hCE,18914 -numpy/core/include/numpy/__multiarray_api.h,sha256=SQEcRelzaunap6-uUl3E21qUanrFOBcC1PiQITpVU0Y,61920 -numpy/core/include/numpy/__ufunc_api.h,sha256=fWkLh84HH3fN99gOJoZ10bZEpaO3VGT9aNpTu-2zblI,12179 -numpy/core/include/numpy/_neighborhood_iterator_imp.h,sha256=hNiUJ3gmJRxdjByk5R5jmLeBKpNfaP_29KLHFuTrSIA,1861 -numpy/core/include/numpy/_numpyconfig.h,sha256=bDiTLQ972ZWQBEpx6OM8riS64nSAelKa2kIimnXm_Ss,1010 -numpy/core/include/numpy/arrayobject.h,sha256=SXj-2avTHV8mNWvv7sOYHLKkRKcafDG7_HNpQNot1GE,164 -numpy/core/include/numpy/arrayscalars.h,sha256=vC7QCznlT8vkyvxbIh4QNwi1LR7UkP7GJ1j_0ZiJa1E,3509 -numpy/core/include/numpy/halffloat.h,sha256=ohvyl3Kz3mB1hW3MRzxwPDH-0L9WWM_eKhvYLjtT_2w,1878 -numpy/core/include/numpy/multiarray_api.txt,sha256=qG593ym4jzzsPHIkFfKSTxK1XrrICKTLb9qGIto1fxc,56884 -numpy/core/include/numpy/ndarrayobject.h,sha256=E737J_1YQI-igbXcbA3kdbwsMqTv1aXcy6bp5aE0P_0,11496 -numpy/core/include/numpy/ndarraytypes.h,sha256=Lelck68SVrCPhxTAGURh_AyOth5txewU6xp2f556lLg,65105 -numpy/core/include/numpy/noprefix.h,sha256=YE-lWegAdZKI5lf44AW5jiWbnmO6hircWzj_WMFrLT4,6786 -numpy/core/include/numpy/npy_1_7_deprecated_api.h,sha256=LLeZKLuJADU3RDfT04pu5FCxCBU5cEzY5Q9phR_HL78,4715 -numpy/core/include/numpy/npy_3kcompat.h,sha256=exFgMT6slmo2Zg3bFsY3mKLUrrkg3KU_66gUmu5IYKk,14666 -numpy/core/include/numpy/npy_common.h,sha256=R-LMbpQDZJ4XXKDeXvI58WFKgkEiljDDgDMl6Yk_KTI,37943 -numpy/core/include/numpy/npy_cpu.h,sha256=3frXChwN0Cxca-sAeTTOJCiZ6_2q1EuggUwqEotdXLg,3879 -numpy/core/include/numpy/npy_endian.h,sha256=HHanBydLvLC2anJJySvy6wZ_lYaC_xI6GNwT8cJ78rE,2596 -numpy/core/include/numpy/npy_interrupt.h,sha256=Eyddk806h30jxgymbr44b7eIZKrHXtNzXpPtUPp2Ng8,3439 -numpy/core/include/numpy/npy_math.h,sha256=VFv-sN9Dnm3wmnZoHoGJO5lFyJECbQfipzJgJj1p5vA,23139 -numpy/core/include/numpy/npy_no_deprecated_api.h,sha256=X-wRYdpuwIuerTnBblKjR7Dqsv8rqxn01RFLVWUHvi8,567 -numpy/core/include/numpy/npy_os.h,sha256=cEvEvpD92EeFjsjRelw1dXJaHYL-0yPJDuz3VeSJs4E,817 -numpy/core/include/numpy/numpyconfig.h,sha256=mHTx0sXeXNcaq0wWcP-8hGFUWvoG_2AHFKub59KJGm4,1327 -numpy/core/include/numpy/old_defines.h,sha256=7eiZoi7JrdVT9LXKCoeta5AoIncGa98GcVlWqDrLjwk,6306 -numpy/core/include/numpy/oldnumeric.h,sha256=Yo-LiSzVfDK2YyhlH41ff4gS0m-lv8XjI4JcAzpdy94,708 -numpy/core/include/numpy/random/bitgen.h,sha256=Gfrwd0M0odkpRJXw7QXJgVxb5XCw3iDXacWE_h-F_uM,389 -numpy/core/include/numpy/random/distributions.h,sha256=nbbdQ6X-lsdyzo7bmss4i3kg354GnkYQGGfYld_x6HM,9633 -numpy/core/include/numpy/ufunc_api.txt,sha256=RTz9blLHbWMCWMaiPeJyqt9d93nHJXJT7RiTf-bbMO4,6937 -numpy/core/include/numpy/ufuncobject.h,sha256=GpAJZKRnE08xRy5IOJD8r8i6Xz1nltg-iEMl3Frqsyk,12746 -numpy/core/include/numpy/utils.h,sha256=KqJzngAvarYV3oZQu5fY0ARPVihUP7FsZjdljysaSUk,729 -numpy/core/lib/libnpymath.a,sha256=aWHXyaoHHxnrPzhrK9HtatrDwlmjZKQHfT7278_T7tk,355952 -numpy/core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147 -numpy/core/lib/npy-pkg-config/npymath.ini,sha256=kamUNrYKAmXqQa8BcNv7D5sLqHh6bnChM0_5rZCsTfY,360 -numpy/core/machar.py,sha256=P8Ae9aOzoTUMWWiAXgE0Uf5Vk837DTODV5ndQLvm5zU,10860 -numpy/core/memmap.py,sha256=RVD10EyH-4jgzrTy3Xc_mXsJrvt-QMGGLmY7Aoqmy7I,11590 -numpy/core/multiarray.py,sha256=7yvhC6SVcF-MGwX5PwsSmV7jMfObe4gldkNI6lqsyvY,53002 -numpy/core/numeric.py,sha256=xV7Lo8i9bcILM4GGrryguiQAWzCuJJdM99CKkLndcQE,71955 -numpy/core/numerictypes.py,sha256=fCQuWSy6vshZHh4YP4oz9n3ysSHl-HSaGMjEzmVVQdY,17918 -numpy/core/overrides.py,sha256=_OoaYi35e6xJ9QCOeMuJlZmuU0efF47pJAXmTgWeHrU,7481 -numpy/core/records.py,sha256=xOCgmcTtTLjBaOYtjae9t-DtvpqFjFJwg_c5ZgHZ0xs,30928 -numpy/core/setup.py,sha256=eVqe4s7YjhH8bSgsGSjXKBF2BZVj5vOeiexbh_M3ibE,42069 -numpy/core/setup_common.py,sha256=z3oR0UKy8sbt0rHq7TEjzwkitQNsfKw7T69LD18qTbY,19365 -numpy/core/shape_base.py,sha256=VXd2RUcUoxp4mcLQWxNszD-ygubCS8xp9ZOHYhnxddY,28964 -numpy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/core/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/core/tests/__pycache__/_locales.cpython-37.pyc,, -numpy/core/tests/__pycache__/test__exceptions.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_abc.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_api.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_arrayprint.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_datetime.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_defchararray.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_deprecations.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_dtype.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_einsum.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_errstate.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_extint128.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_function_base.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_getlimits.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_half.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_indexerrors.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_indexing.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_issue14735.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_item_selection.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_longdouble.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_machar.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_mem_overlap.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_memmap.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_multiarray.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_nditer.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_numeric.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_numerictypes.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_overrides.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_print.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_records.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalar_ctors.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalar_methods.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarbuffer.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarinherit.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarmath.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarprint.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_shape_base.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_ufunc.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_umath.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_umath_accuracy.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_umath_complex.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_unicode.cpython-37.pyc,, -numpy/core/tests/_locales.py,sha256=GQro3bha8c5msgQyvNzmDUrNwqS2cGkKKuN4gg4c6tI,2266 -numpy/core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 -numpy/core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 -numpy/core/tests/data/umath-validation-set-README,sha256=-1JRNN1zx8S1x9l4D0786USSRMNt3Dk0nsOMg6O7CiM,959 -numpy/core/tests/data/umath-validation-set-cos,sha256=qIka8hARvhXZOu9XR3CnGiPnOdrkAaxEgFgEEqus06s,24703 -numpy/core/tests/data/umath-validation-set-exp,sha256=GZn7cZRKAjskJ4l6tcvDF53I3e9zegQH--GPzYib9_g,4703 -numpy/core/tests/data/umath-validation-set-log,sha256=gDbicMaonc26BmtHPoyvunUvXrSFLV9BY8L1QVoH5Dw,4088 -numpy/core/tests/data/umath-validation-set-sin,sha256=fMEynY6dZz18jtuRdpfOJT9KnpRSWd9ilcz0oXMwgCQ,24690 -numpy/core/tests/test__exceptions.py,sha256=8XVPAkXmYh9dHiN5XhQk4D_r_l71cYpejg_ueTscrRI,1495 -numpy/core/tests/test_abc.py,sha256=cpIqt3VFBZLHbuNpO4NuyCGgd--k1zij5aasu7FV77I,2402 -numpy/core/tests/test_api.py,sha256=RIlRUqB_lRM0xcrEAdLRdDRWWk-0O7bUcEJfPCHyNl4,19224 -numpy/core/tests/test_arrayprint.py,sha256=zoNxYH3h7VnMFtU1vt67ujPuRCAQkQ1VmXKhTo0Juqw,34400 -numpy/core/tests/test_datetime.py,sha256=LT_KGIp6AyqAryB289cKW4_xTQ44Egb6JriGNHiB_g8,108148 -numpy/core/tests/test_defchararray.py,sha256=L5EoOBTZVrRU1Vju5IhY8BSUlBOGPzEViKJwyQSlpXo,25481 -numpy/core/tests/test_deprecations.py,sha256=vcbHCQUx7_Um0pPofOLY-3u4AaF1ABIVmZsJBCXnjWw,22466 -numpy/core/tests/test_dtype.py,sha256=gkDXeJFWFcYHu5Sw5b6Wbyl_xbkkssOYdx5EdjLhEHA,49663 -numpy/core/tests/test_einsum.py,sha256=gMWQQ9yfSdEUlY0db4e-I2seD7n99xToiN-g6tB3TBE,44736 -numpy/core/tests/test_errstate.py,sha256=84S9ragkp2xqJ5s8uNEt1-4SGs99t3pkPVMHYc4QL-s,1505 -numpy/core/tests/test_extint128.py,sha256=-0zEInkai1qRhXI0bdHCguU_meD3s6Td4vUIBwirYQI,5709 -numpy/core/tests/test_function_base.py,sha256=r45sHfslz-e8qgn10PT8elVEBjeXEGk7xsaW-s4tjvY,13268 -numpy/core/tests/test_getlimits.py,sha256=2fBK7Slo67kP6bThcN9bOKmeX9gGPQVUE17jGVydoXk,4427 -numpy/core/tests/test_half.py,sha256=83O_R-Frt8mx2-7WEbmoVXLWJ5Dc5SH9n0vyPJ9Wp_I,22301 -numpy/core/tests/test_indexerrors.py,sha256=0Ku3Sy5jcaE3D2KsyDrFTvgQzMv2dyWja3hc4t5-n_k,4857 -numpy/core/tests/test_indexing.py,sha256=0-I5M5NCgDgHM58Myxp1vpOaulm7_s3n4K82_BeDihk,51366 -numpy/core/tests/test_issue14735.py,sha256=JADt-FhIdq6MaVAfVI_ACI9EpfpqylFdDrZ3A95NW1w,728 -numpy/core/tests/test_item_selection.py,sha256=0Ocg_RzeQjNqwIaPhb_Zk0ZlmqSjIBY0lHeef_H9l9U,3579 -numpy/core/tests/test_longdouble.py,sha256=C-Uaz8ho6YfvNFf5hy1HbbIfZ4mMsw0zdH1bZ60shV0,12321 -numpy/core/tests/test_machar.py,sha256=FrKeGhC7j-z9tApS_uI1E0DUkzieKIdUHMQPfCSM0t8,1141 -numpy/core/tests/test_mem_overlap.py,sha256=AyBz4pm7HhTDdlW2pq9FR1AO0E5QAYdKpBoWbOdSrco,29505 -numpy/core/tests/test_memmap.py,sha256=sFJ6uaf6ior1Hzjg7Y-VYzYPHnuZOYmNczOBa-_GgSY,7607 -numpy/core/tests/test_multiarray.py,sha256=SDfgwGmfH4lAKkCEafEsfX1ERP7tVs4jELXOInzwihI,315998 -numpy/core/tests/test_nditer.py,sha256=VYOj7XD87yjArRSxPThhMeF-Kz5tC3hmav9glLbPkKM,112098 -numpy/core/tests/test_numeric.py,sha256=0SLdicNj0ODq6bh8FpO89FZAHPTs3XpJuI3jrNxMRNs,117625 -numpy/core/tests/test_numerictypes.py,sha256=8C-_WrUHnnRcXyDjAHLevt6FZ8LO51ZVPY-ohP0FVQA,19635 -numpy/core/tests/test_overrides.py,sha256=rkP2O-8MYssKR4y6gKkNxz2LyaYvnCuHn6eOEYtJzsc,14619 -numpy/core/tests/test_print.py,sha256=Q53dqbjQQIlCzRp_1ZY0A-ptP7FlbBZVPeMeMLX0cVg,6876 -numpy/core/tests/test_records.py,sha256=CJu2VaBWsNQrYpCSS0HAV2aKv8Ow0Zfc5taegRslVW0,19651 -numpy/core/tests/test_regression.py,sha256=S8IS6iH19hsT41Ms33Bj1btMAkd2iVz2sXXHS98qcq8,88558 -numpy/core/tests/test_scalar_ctors.py,sha256=kjyYllJHyhMQGT49Xbjjc2tuFHXcQIM-PAZExMWczq8,2294 -numpy/core/tests/test_scalar_methods.py,sha256=n3eNfQ-NS6ODGfJFrww-RSKVm9QzRKeDRp0ae4TzQJ8,4220 -numpy/core/tests/test_scalarbuffer.py,sha256=M-xSWyn2ts_O4d69kWAuEEzupY6AZ6YpLI31Gxlvjn4,3556 -numpy/core/tests/test_scalarinherit.py,sha256=vIZqnyg99o3BsEQQHsiYxzedXIf4wSr9qVwqur_C-VU,1807 -numpy/core/tests/test_scalarmath.py,sha256=U-h1wclwyDaFpoASPrRq6qW2YJ1nAUW__XF6fNUzbjs,28807 -numpy/core/tests/test_scalarprint.py,sha256=SPTkscqlnApyqaOUZ5cgC2rDgGED6hPBtdRkWXxXlbE,15470 -numpy/core/tests/test_shape_base.py,sha256=B4869KCdnSxSTcTmqFhOPU2eRjmzOuG0fwVa3jrGyg8,24993 -numpy/core/tests/test_ufunc.py,sha256=LHGt9_It2-GP79B5dnEE4WhZQjTOxz99gmiVCndcHmA,81054 -numpy/core/tests/test_umath.py,sha256=Yb3SHIavyTSAJoQrNbpW9obBnSkbmosbvOa0b86DYpY,117248 -numpy/core/tests/test_umath_accuracy.py,sha256=GCvLPNmGeVCJcDpYst4Q21_0IkJGygdjMD8mBVlH_H8,2647 -numpy/core/tests/test_umath_complex.py,sha256=zvjC9COuHSZ_6BL3lz2iP7UppkNWL8ThP04fj0eulUQ,19413 -numpy/core/tests/test_unicode.py,sha256=PvWt5NLjgwulCgXakHEKMJ2pSpTLbUWgz9dZExEcSJ8,13656 -numpy/core/umath.py,sha256=KAWy8e3HN7CMF6bPfQ_MCL36bDuU7UeS39tlxaFAeto,1905 -numpy/core/umath_tests.py,sha256=Sr6VQTbH-sOMlXy-tg1-Unht7MKaaV4wtAYR6mQYNbU,455 -numpy/ctypeslib.py,sha256=_y3WO60jLJaHAaDbVj2PNF4jZ4X8EOqih14fvJffOVI,17443 -numpy/distutils/__config__.py,sha256=l-kYBVT3VpoLPbr8_dilDgG-Z1l-VOLtHHFd2vCF8fw,1646 -numpy/distutils/__init__.py,sha256=gsPLMHtEHdGbVbA9_LBfVAjnwo9n0j29aqEkCmehE7Y,1625 -numpy/distutils/__pycache__/__config__.cpython-37.pyc,, -numpy/distutils/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/__pycache__/_shell_utils.cpython-37.pyc,, -numpy/distutils/__pycache__/ccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/compat.cpython-37.pyc,, -numpy/distutils/__pycache__/conv_template.cpython-37.pyc,, -numpy/distutils/__pycache__/core.cpython-37.pyc,, -numpy/distutils/__pycache__/cpuinfo.cpython-37.pyc,, -numpy/distutils/__pycache__/exec_command.cpython-37.pyc,, -numpy/distutils/__pycache__/extension.cpython-37.pyc,, -numpy/distutils/__pycache__/from_template.cpython-37.pyc,, -numpy/distutils/__pycache__/intelccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/lib2def.cpython-37.pyc,, -numpy/distutils/__pycache__/line_endings.cpython-37.pyc,, -numpy/distutils/__pycache__/log.cpython-37.pyc,, -numpy/distutils/__pycache__/mingw32ccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/misc_util.cpython-37.pyc,, -numpy/distutils/__pycache__/msvc9compiler.cpython-37.pyc,, -numpy/distutils/__pycache__/msvccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/npy_pkg_config.cpython-37.pyc,, -numpy/distutils/__pycache__/numpy_distribution.cpython-37.pyc,, -numpy/distutils/__pycache__/pathccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/setup.cpython-37.pyc,, -numpy/distutils/__pycache__/system_info.cpython-37.pyc,, -numpy/distutils/__pycache__/unixccompiler.cpython-37.pyc,, -numpy/distutils/_shell_utils.py,sha256=kMLOIoimB7PdFRgoVxCIyCFsIl1pP3d0hkm_s3E9XdA,2613 -numpy/distutils/ccompiler.py,sha256=qlwbbVN_0Qsw4gpx8tCyMAy_9a146XHHkJCFRNKKvP8,27660 -numpy/distutils/command/__init__.py,sha256=l5r9aYwIEq1D-JJc8WFUxABk6Ip28FpRK_ok7wSLRZE,1098 -numpy/distutils/command/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/command/__pycache__/autodist.cpython-37.pyc,, -numpy/distutils/command/__pycache__/bdist_rpm.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_clib.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_ext.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_py.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_scripts.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_src.cpython-37.pyc,, -numpy/distutils/command/__pycache__/config.cpython-37.pyc,, -numpy/distutils/command/__pycache__/config_compiler.cpython-37.pyc,, -numpy/distutils/command/__pycache__/develop.cpython-37.pyc,, -numpy/distutils/command/__pycache__/egg_info.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install_clib.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install_data.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install_headers.cpython-37.pyc,, -numpy/distutils/command/__pycache__/sdist.cpython-37.pyc,, -numpy/distutils/command/autodist.py,sha256=m5BGbaBPrBjbp3U_lGD35BS_yUxjarB9S9wAwTxgGvw,3041 -numpy/distutils/command/bdist_rpm.py,sha256=rhhIyFzkd5NGi6lZaft44EBPZB3zZFRDc75klJYnbw8,775 -numpy/distutils/command/build.py,sha256=0sB5J4vmeEL6CBpvCo8EVVRx9CnM3HYR1fddv7uQIh0,1448 -numpy/distutils/command/build_clib.py,sha256=YaWxa26hf_D7qI2rv-utAPQWFf99UEBfe9uJxT_YT2c,13800 -numpy/distutils/command/build_ext.py,sha256=fiTsl8O8dBODimXtG-RAVHMA764ea_aNo3gvQ_6Nv-4,26434 -numpy/distutils/command/build_py.py,sha256=7TBGLz0va0PW6sEX-aUjsXdzvhuSbJGgIrMim1JTwu4,1210 -numpy/distutils/command/build_scripts.py,sha256=ze19jHBhC3JggKLbL9wgs9I3mG7ls-V2NbykvleNwgQ,1731 -numpy/distutils/command/build_src.py,sha256=4lOovmHAoo_vDC7RkuxZccEyQUjmelxW-J8KL2wEadk,31246 -numpy/distutils/command/config.py,sha256=ZziDEAnaHskht8MYCHA0BSEcHny-byOiDPx_P8YfhZ0,20473 -numpy/distutils/command/config_compiler.py,sha256=SKJTEk_Y_Da-dVYOHAdf4c3yXxjlE1dsr-hJxY0m0PU,4435 -numpy/distutils/command/develop.py,sha256=nYM5yjhKtGKh_3wZwrvEQBLYHKldz64aU-0iSycSkXA,641 -numpy/distutils/command/egg_info.py,sha256=pdiCFQiQuIpf_xmVk9Njl7iowY9CxGn9KRbU-A9eBfg,987 -numpy/distutils/command/install.py,sha256=-y7bHvwoQdDCMGdLONawqnOWKtwQzjp5v-vSpZ7PdYU,3144 -numpy/distutils/command/install_clib.py,sha256=rGCajxbqAZjsYWg3l5B7ZRgcHJzFtYAiUHZH-DO64eU,1465 -numpy/distutils/command/install_data.py,sha256=7iWTw93ty2sBPwHwg_EEhgQhZSZe6SsKdfTS9RbUR9A,914 -numpy/distutils/command/install_headers.py,sha256=NbZwt-Joo80z_1TfxA-mIWXm2L9Mmh4ZLht7HAuveoo,985 -numpy/distutils/command/sdist.py,sha256=tHmlb0RzD8x04dswPXEua9H_b6GuHWY1V3hYkwJDKvA,799 -numpy/distutils/compat.py,sha256=xzkW8JgJgGTmye34QCYTIkLfsXBvmPu4tvgCwXNdiU0,218 -numpy/distutils/conv_template.py,sha256=0BFDE5IToW3sMVMzSRjmgENs2PAKyt7Wnvm2gyFrKnU,9750 -numpy/distutils/core.py,sha256=9GNNyWDTCqfnD7Jp2tzp9vOBVyeJmF8lsgv_xdlt59g,8230 -numpy/distutils/cpuinfo.py,sha256=onN3xteqf2G5IgKwRCYDG0VucoQY8sCTMUJ0nhc5QT0,23013 -numpy/distutils/exec_command.py,sha256=PKHgZ-hESpsBM8vnUhPknPRioAc6hLvsJzcOQoey-zo,10918 -numpy/distutils/extension.py,sha256=hXpEH2aP6ItaqNms1RW6TA1tSi0z37abrFpnyKXcjcA,3495 -numpy/distutils/fcompiler/__init__.py,sha256=-9uYUvrMwdxy0jetB-T-QHSwmWcobNRL5u0Bbj0Sm4w,40157 -numpy/distutils/fcompiler/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/absoft.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/compaq.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/environment.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/g95.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/gnu.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/hpux.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/ibm.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/intel.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/lahey.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/mips.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/nag.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/none.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/pathf95.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/pg.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/sun.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/vast.cpython-37.pyc,, -numpy/distutils/fcompiler/absoft.py,sha256=AKbj5uGr8dpGDLzRIJbdUnXXAtF_5k4JqnqwTWvy-tQ,5565 -numpy/distutils/fcompiler/compaq.py,sha256=SlIcqV82SrmOSVMZCYdSyhtglSl3doAoxDCcjq1hbkE,4109 -numpy/distutils/fcompiler/environment.py,sha256=1AziWo5qkxOFClEnChTFnUMIShtNCwHQa2xidjorjKk,3078 -numpy/distutils/fcompiler/g95.py,sha256=K68RRAvOvyKoh-jsD9J4ZDsHltrGnJ_AllxULhy6iOE,1396 -numpy/distutils/fcompiler/gnu.py,sha256=oHipJDyfisSK9_Kdkv1Av8hDHY3UbLALgWfBO7cXkPA,20804 -numpy/distutils/fcompiler/hpux.py,sha256=xpNfy7vCKWPnJ5M3JPnjMAewKBAfKN5hFX3hvEL2zaM,1419 -numpy/distutils/fcompiler/ibm.py,sha256=3q-AZ3TC3VjRxNyvkeIGN81SDWtHDH9iddfd8hqk4x4,3607 -numpy/distutils/fcompiler/intel.py,sha256=WlsBtvZnLpFke7oTpMCDYFlccNSUWWkB2p422iwQURU,6861 -numpy/distutils/fcompiler/lahey.py,sha256=pJ0-xgtYwyYXgt8JlN8PFeYYEWB3vOmFkNx6UUFXzuM,1393 -numpy/distutils/fcompiler/mips.py,sha256=IxLojWR1oi0VW93PxPpHQXRwZcYffD1dunllQW2w19A,1780 -numpy/distutils/fcompiler/nag.py,sha256=eiTvBopdCgVh5-HDTryVbRrYvf4r_Sqse1mruTt5Blo,2608 -numpy/distutils/fcompiler/none.py,sha256=N6adoFAf8inIQfCDEBzK5cGI3hLIWWpHmQXux8iJDfA,824 -numpy/distutils/fcompiler/pathf95.py,sha256=Xf1JMB30PDSoNpA1Y-vKPRBeNO0XfSi0dvVQvvdjfUQ,1127 -numpy/distutils/fcompiler/pg.py,sha256=G0uNPfedmbkYWfChg1UbxBKqo25RenzSVJN1BUtRDw0,4232 -numpy/distutils/fcompiler/sun.py,sha256=21DQ6Rprr9rEp4pp7Np8kCwOc0Xfqdxa1iX0O-yPJPM,1643 -numpy/distutils/fcompiler/vast.py,sha256=LJ21-WIJsiquLtjdDaNsJqblwN5wuM2FZsYl1R40vN8,1733 -numpy/distutils/from_template.py,sha256=k5PrP9If_X8J5Fsh9vR2h0Tcj2JsZC9EsC2h8fGfaXs,8027 -numpy/distutils/intelccompiler.py,sha256=1qzr6PMxi0UkR0NUY3rt3gqww9GwJ-Gbe91yxQKlieU,4291 -numpy/distutils/lib2def.py,sha256=YyVORDcNVb-Wzn_ibQXIqeQlAdXQQsLY3XfwtvhnLnE,3710 -numpy/distutils/line_endings.py,sha256=jrYG8SnOyMN0lvQim4Kf6ChoHdtaWO0egeTUUHtPoQA,2085 -numpy/distutils/log.py,sha256=6wgjYylV3BPEYc0NV8V3MIeKHxmlj0cP5UsDjTe6YS4,2796 -numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=cbsN3Lk9Hkwzr9c-yOP2xEBg1_ml1X7nwAMDWxGjzc8,77 -numpy/distutils/mingw32ccompiler.py,sha256=k-2SpajodL5Ey8ZbmiKQpXPhABe7UD0PJilEWbh8gH4,25411 -numpy/distutils/misc_util.py,sha256=DK1mEpnYeSsF70lgCuF7H3a5z3cgVWACAiJqz-dIzrM,84707 -numpy/distutils/msvc9compiler.py,sha256=TuPYjPFp3nYQSIG1goNxuOly7o3VMx-H35POMpycB3k,2258 -numpy/distutils/msvccompiler.py,sha256=7EUlHbgdKBBJG3AzgE94AQeUFnj0HcD6M7_YPN7vdCs,1994 -numpy/distutils/npy_pkg_config.py,sha256=RQZnr78rmA-dMIxOnibBMBMsGqsZUBK3Hnx-J8UQl8I,13152 -numpy/distutils/numpy_distribution.py,sha256=lbnEW1OxWxC_1n2sKd0Q3fC5QnNdFuAkNAlvXF99zIQ,700 -numpy/distutils/pathccompiler.py,sha256=FjNouOTL8u4gLMbJW7GdT0RlsD2nXV1_SEBNZj9QdpQ,779 -numpy/distutils/setup.py,sha256=q3DcCZNkK_jHsC0imocewd4uCKQWWXjkzd4nkBmkMFI,611 -numpy/distutils/system_info.py,sha256=IcYgQX1CzFSspCUMq8yttCa2gPqsk09JhR_QWnpdDys,104759 -numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/distutils/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_exec_command.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_from_template.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_misc_util.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_shell_utils.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_system_info.cpython-37.pyc,, -numpy/distutils/tests/test_exec_command.py,sha256=U__8FXVF4WwYdf6ucgNzgYHGgUOIKhFWG9qoCr2GxGo,7483 -numpy/distutils/tests/test_fcompiler.py,sha256=5-wYZnqXW3RRegDmnQ_dKGIjHWXURz93wxLvGnoT-AQ,1377 -numpy/distutils/tests/test_fcompiler_gnu.py,sha256=O57uCEHeQIS0XF8GloEas3OlaOfmIHDWEtgYS_q3x48,2218 -numpy/distutils/tests/test_fcompiler_intel.py,sha256=fOjd_jv0Od6bZyzFf4YpZMcnFva0OZK7yJV_4Hebb6A,1140 -numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=5-Num0A3cN7_NS3BlAgYt174S-OGOWRLL9rXtv-h_fA,1176 -numpy/distutils/tests/test_from_template.py,sha256=SDYoe0XUpAayyEQDq7ZhrvEEz7U9upJDLYzhcdoVifc,1103 -numpy/distutils/tests/test_mingw32ccompiler.py,sha256=rMC8-IyBOiuZVfAoklV_KnD9qVeB_hFVvb5dStxfk08,1609 -numpy/distutils/tests/test_misc_util.py,sha256=8LIm12X83HmvgmpvJJ9inaU7FlGt287VwDM-rMKCOv4,3316 -numpy/distutils/tests/test_npy_pkg_config.py,sha256=wa0QMQ9JAye87t2gDbFaBHp0HGpNFgwxJrJ30ZrHvNk,2639 -numpy/distutils/tests/test_shell_utils.py,sha256=we9P8AvjCQky1NRDP3sXAJnNUek7rDmMR4Ar9cg9iSk,2030 -numpy/distutils/tests/test_system_info.py,sha256=gb99F0iX4pbKhjxCcdiby0bvFMzPwuUGlSj_VXnfpWk,8548 -numpy/distutils/unixccompiler.py,sha256=M7Hn3ANMo8iP-sZtSAebI3RCLp0ViRYxawAbck0hlQM,5177 -numpy/doc/__init__.py,sha256=BDpxTM0iw2F4thjBkYqjIXX57F5KfIaH8xMd67N6Jh0,574 -numpy/doc/__pycache__/__init__.cpython-37.pyc,, -numpy/doc/__pycache__/basics.cpython-37.pyc,, -numpy/doc/__pycache__/broadcasting.cpython-37.pyc,, -numpy/doc/__pycache__/byteswapping.cpython-37.pyc,, -numpy/doc/__pycache__/constants.cpython-37.pyc,, -numpy/doc/__pycache__/creation.cpython-37.pyc,, -numpy/doc/__pycache__/dispatch.cpython-37.pyc,, -numpy/doc/__pycache__/glossary.cpython-37.pyc,, -numpy/doc/__pycache__/indexing.cpython-37.pyc,, -numpy/doc/__pycache__/internals.cpython-37.pyc,, -numpy/doc/__pycache__/misc.cpython-37.pyc,, -numpy/doc/__pycache__/structured_arrays.cpython-37.pyc,, -numpy/doc/__pycache__/subclassing.cpython-37.pyc,, -numpy/doc/__pycache__/ufuncs.cpython-37.pyc,, -numpy/doc/basics.py,sha256=bWasRQIE2QkLs-1MEhr_l1TQC_ZDZ4vnUUdxYkgz8wc,11252 -numpy/doc/broadcasting.py,sha256=eh6Gs3wGnc4Qpuw59qAa1wH-oIl6YtIjPEutyLsfIPQ,5595 -numpy/doc/byteswapping.py,sha256=OaEr35v3R__QWWETIlYKfqIyf_qtUm_qxityFIQ0Zrc,5375 -numpy/doc/constants.py,sha256=_n8_OUw7ZKKod6Ho7jtC_J-tSg1pZOBfMO2avPIz_88,9291 -numpy/doc/creation.py,sha256=6FUALDWgqPWObcW-ZHDQMAnfo42I60rRR9pDpwb4-YE,5496 -numpy/doc/dispatch.py,sha256=wLLHuxD4g552N3ot5M6uucEatFUaw3WmYVUa7Sdv-sI,10012 -numpy/doc/glossary.py,sha256=sj5-0X9pjaQEmaTCHAzsqIcVJL_T201E1Ex8v90QiAc,14777 -numpy/doc/indexing.py,sha256=gF3w0dZp7tCx0vKkOSELIBdNGfL1gPZqfiW3T_vj_4Q,16119 -numpy/doc/internals.py,sha256=xYp6lv4yyV0ZIo_qCvLCAWxDa0rhu7FNrTmpXY1isO4,9669 -numpy/doc/misc.py,sha256=JWJqyiYL2qoSMVAb0QC8w_Pm5l7ZLxx2Z9D5ilgU4Uo,6191 -numpy/doc/structured_arrays.py,sha256=28B7iMDrJvM1vjEHou73gXjRcldI5MAz7r4CaEouxmk,26509 -numpy/doc/subclassing.py,sha256=Ha0H-lWMEDWGBWEeP3ZAy_SYfXaImvoUhoDr6f-hYW8,28624 -numpy/doc/ufuncs.py,sha256=xYcK2hwnAUwVgOAmVouIOKXpZuG0LHRd5CYXzNBbv84,5425 -numpy/dual.py,sha256=q17Lo5-3Y4_wNOkg7c7eqno9EdTTtvnz4XpF75HK2fw,1877 -numpy/f2py/__init__.py,sha256=jpo2CzWHgtnMcy0VWSlXR0ucIB_ZVE0ATInpDOReWFE,3138 -numpy/f2py/__main__.py,sha256=mnksAcMyLdK0So_DseQn0zalhnA7LflS7hHvo7QCVjU,134 -numpy/f2py/__pycache__/__init__.cpython-37.pyc,, -numpy/f2py/__pycache__/__main__.cpython-37.pyc,, -numpy/f2py/__pycache__/__version__.cpython-37.pyc,, -numpy/f2py/__pycache__/auxfuncs.cpython-37.pyc,, -numpy/f2py/__pycache__/capi_maps.cpython-37.pyc,, -numpy/f2py/__pycache__/cb_rules.cpython-37.pyc,, -numpy/f2py/__pycache__/cfuncs.cpython-37.pyc,, -numpy/f2py/__pycache__/common_rules.cpython-37.pyc,, -numpy/f2py/__pycache__/crackfortran.cpython-37.pyc,, -numpy/f2py/__pycache__/diagnose.cpython-37.pyc,, -numpy/f2py/__pycache__/f2py2e.cpython-37.pyc,, -numpy/f2py/__pycache__/f2py_testing.cpython-37.pyc,, -numpy/f2py/__pycache__/f90mod_rules.cpython-37.pyc,, -numpy/f2py/__pycache__/func2subr.cpython-37.pyc,, -numpy/f2py/__pycache__/rules.cpython-37.pyc,, -numpy/f2py/__pycache__/setup.cpython-37.pyc,, -numpy/f2py/__pycache__/use_rules.cpython-37.pyc,, -numpy/f2py/__version__.py,sha256=rEHB9hlWmpryhNa0EmMnlAlDCGI4GXILC9CZUEV3Wew,254 -numpy/f2py/auxfuncs.py,sha256=mDvaBo3Y8tYpXLZfq8DCv6UZ3-2JqWc_iNBZRxGesb0,21826 -numpy/f2py/capi_maps.py,sha256=buQRyA-zNXc5Azt6GLxqHTDw74gQb68BDStb7kYLs4A,31676 -numpy/f2py/cb_rules.py,sha256=un1xn8goj4jFL8FzxRwWSAzpr0CVcvwObVUKdIGJyaA,22946 -numpy/f2py/cfuncs.py,sha256=QqWwxZwW9Xk23673dI-RC6mfKVME34DCccHx4EAigTQ,45459 -numpy/f2py/common_rules.py,sha256=N2XFecZU_9iHjuL4Ehs0p92vJUcGBTSvAG4zi4zTwNE,5032 -numpy/f2py/crackfortran.py,sha256=onGQnPhpE8DyP4L4XinwHbdPwhXavetgPbKS3SG-REQ,128945 -numpy/f2py/diagnose.py,sha256=VNuNTGnQaXn9Fn2jlueYt47634CvLQSaAWJWy_Nxwnw,5295 -numpy/f2py/f2py2e.py,sha256=F9gKsZ1fI8h4lsNaBs_iqC92znNlZQMU6VjVC-AyZkA,24415 -numpy/f2py/f2py_testing.py,sha256=8rkBjUsNhBavpoBgi_bqDS8H8tBdd5BR8hrE6ENsIAo,1523 -numpy/f2py/f90mod_rules.py,sha256=YFK4MPkGHBxshAInbcapnumX3qlu0h6ya6GQpS8zWLk,9850 -numpy/f2py/func2subr.py,sha256=Oy12rqUa1vcXvzR6g8yx8jSYDwfKt5Jqiebf1QaWX1o,9224 -numpy/f2py/rules.py,sha256=sBUGQuWBmhEgCfcqCZuUmc-p433gVAbWim2wXl6z950,59120 -numpy/f2py/setup.py,sha256=bE-1KTXhPIAoAt4HXHW92chzNQc691AMpki3DQCQYAI,2434 -numpy/f2py/src/fortranobject.c,sha256=aoRy0d0vzgC6wJOAOYEadH5jExZKtTSMUeOO5HXirpA,36256 -numpy/f2py/src/fortranobject.h,sha256=ltMxueNeETQtEYSA_E7bpRtF8Jj1xuOBS-YNhjBMfOw,5227 -numpy/f2py/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/f2py/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_block_docstring.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_callback.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_common.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_compile_function.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_kind.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_mixed.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_parameter.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_quoted_character.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_character.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_complex.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_integer.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_logical.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_real.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_size.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_string.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/util.cpython-37.pyc,, -numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=8x5-BYpwiT0fYXwMpwyvu8IaESE1ABIWJNXOkk81QMk,7768 -numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29 -numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460 -numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499 -numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269 -numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130 -numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224 -numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347 -numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85 -numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179 -numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139 -numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939 -numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469 -numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612 -numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609 -numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610 -numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277 -numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815 -numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618 -numpy/f2py/tests/test_array_from_pyobj.py,sha256=gLSX9JuF_8NNboUQRzRF3IYC7pWJ06Mw8m6sy2wQvCQ,22083 -numpy/f2py/tests/test_assumed_shape.py,sha256=zS_LgeakxCOrb4t5m74pX86eBbBo9GhgF4Pnh2lXDig,1650 -numpy/f2py/tests/test_block_docstring.py,sha256=ld1G4pBEi8F4GrkYDpNBJKJdlfDANNI6tiKfBQS9I6w,647 -numpy/f2py/tests/test_callback.py,sha256=iRV0nslbJKovMmXPZed-w9QhNJYZfEo07p_8qneDDbU,3986 -numpy/f2py/tests/test_common.py,sha256=tLmi1JrfwFdTcBlUInxTn04f6Hf8eSB00sWRoKJvHrM,868 -numpy/f2py/tests/test_compile_function.py,sha256=WvOcUNqmRhf4KjplgcP-5s5a03020qhgfcjrhoGeaUk,4500 -numpy/f2py/tests/test_kind.py,sha256=G6u6EWjVHenmPju3RQCa9bSeCJGDul3VyXFgp2_Yc7w,1078 -numpy/f2py/tests/test_mixed.py,sha256=jojC-g_G21G-ACCqlYFuOxZokx8iHikBcmxQWEdWSSc,902 -numpy/f2py/tests/test_parameter.py,sha256=_wX-gM-XGxA_mfDBM8np9NLjYiCF6LJbglwKf09JbdM,3976 -numpy/f2py/tests/test_quoted_character.py,sha256=Q0oDtl3STQqzSap5VYPpfzJJ72NtQchm6Vg-bwuoBl4,1029 -numpy/f2py/tests/test_regression.py,sha256=lPQUKx5RrVtGhyIvIcWS5GgA_CgQypabuuna-Q1z3hs,764 -numpy/f2py/tests/test_return_character.py,sha256=4a_JeEtY1AkT-Q-01iaZyqWLDGmZGW17d88JNFZoXTc,3864 -numpy/f2py/tests/test_return_complex.py,sha256=FO4oflCncNIft36R3Fe9uiyDtryiB-_d2PLMH3x64I4,4779 -numpy/f2py/tests/test_return_integer.py,sha256=cyyAbyHUepwYeyXlgIa2FD4B7A2dHnpp2jwx8ZDQiZQ,4749 -numpy/f2py/tests/test_return_logical.py,sha256=u3dazkOU1oz9kZKYXBd2GWaEr02MYfjGdLrb7kT8MiY,4974 -numpy/f2py/tests/test_return_real.py,sha256=QVRKzeO44ZuIlV8EycmtXaHT_i0rnX2bi3rOh7py4GM,5619 -numpy/f2py/tests/test_semicolon_split.py,sha256=v7YFx-oTbXUZZ4qjdblCYeVVtkD1YYa4CbuEf2LTOLs,1580 -numpy/f2py/tests/test_size.py,sha256=GV7S4tl8FhK60T_EpX86yVQo_bMVTdyOTB8fGVIQ24o,1352 -numpy/f2py/tests/test_string.py,sha256=LTQC9AFVsUAuJVFuH3Wltl-NfFIilVl0KvBNnEgdnmo,676 -numpy/f2py/tests/util.py,sha256=Wa3lwxZYuwByUkuWYq8phvikYypQehRzKOXd_0vYPPg,9764 -numpy/f2py/use_rules.py,sha256=L6nTSJnxougQ2PVAzR7s-1spidcfDp9tzLIFAJe3gUI,3652 -numpy/fft/__init__.py,sha256=zhieVvDXjjfIEHlZo_ta3OH6qFANuy_Wl1Arh1crX28,7587 -numpy/fft/__pycache__/__init__.cpython-37.pyc,, -numpy/fft/__pycache__/_pocketfft.cpython-37.pyc,, -numpy/fft/__pycache__/helper.cpython-37.pyc,, -numpy/fft/__pycache__/setup.cpython-37.pyc,, -numpy/fft/_pocketfft.py,sha256=TRYWW7fZB_ubxOwmRYE-Ok14N-ryllJh1W3gMzd1Ha0,47832 -numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so,sha256=zUEBGzvj-_s8JWAW_3c2lQGWBoIcffG50tQ9L0ax6lI,386852 -numpy/fft/helper.py,sha256=vrKPnvFngxaag3nQA-OWzB9qsQctBk6vXaKsuQVMU0k,6271 -numpy/fft/setup.py,sha256=XT8tvC_P5KUDyBgP5S6KWc63-Fmu_L86c2u-KDLWqxo,542 -numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/fft/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/fft/tests/__pycache__/test_helper.cpython-37.pyc,, -numpy/fft/tests/__pycache__/test_pocketfft.cpython-37.pyc,, -numpy/fft/tests/test_helper.py,sha256=Stwrak0FqjR3Wn41keelozyF_M45PL3jdhF3PjZVyIA,6326 -numpy/fft/tests/test_pocketfft.py,sha256=3rWWfY23nJyv7X_CUc8JWAGxTtug1_97scsjbFaujEg,9789 -numpy/lib/__init__.py,sha256=OcdEAprMAoTSp8psgeWH9jmZnh1QbkT29uY7Z4qcFzQ,1899 -numpy/lib/__pycache__/__init__.cpython-37.pyc,, -numpy/lib/__pycache__/_datasource.cpython-37.pyc,, -numpy/lib/__pycache__/_iotools.cpython-37.pyc,, -numpy/lib/__pycache__/_version.cpython-37.pyc,, -numpy/lib/__pycache__/arraypad.cpython-37.pyc,, -numpy/lib/__pycache__/arraysetops.cpython-37.pyc,, -numpy/lib/__pycache__/arrayterator.cpython-37.pyc,, -numpy/lib/__pycache__/financial.cpython-37.pyc,, -numpy/lib/__pycache__/format.cpython-37.pyc,, -numpy/lib/__pycache__/function_base.cpython-37.pyc,, -numpy/lib/__pycache__/histograms.cpython-37.pyc,, -numpy/lib/__pycache__/index_tricks.cpython-37.pyc,, -numpy/lib/__pycache__/mixins.cpython-37.pyc,, -numpy/lib/__pycache__/nanfunctions.cpython-37.pyc,, -numpy/lib/__pycache__/npyio.cpython-37.pyc,, -numpy/lib/__pycache__/polynomial.cpython-37.pyc,, -numpy/lib/__pycache__/recfunctions.cpython-37.pyc,, -numpy/lib/__pycache__/scimath.cpython-37.pyc,, -numpy/lib/__pycache__/setup.cpython-37.pyc,, -numpy/lib/__pycache__/shape_base.cpython-37.pyc,, -numpy/lib/__pycache__/stride_tricks.cpython-37.pyc,, -numpy/lib/__pycache__/twodim_base.cpython-37.pyc,, -numpy/lib/__pycache__/type_check.cpython-37.pyc,, -numpy/lib/__pycache__/ufunclike.cpython-37.pyc,, -numpy/lib/__pycache__/user_array.cpython-37.pyc,, -numpy/lib/__pycache__/utils.cpython-37.pyc,, -numpy/lib/_datasource.py,sha256=jYNwX7pKyn-N9KzpSmrfKWbT5dXci7-VtDk4pL-vCDs,25521 -numpy/lib/_iotools.py,sha256=Nkv-GMaSyzHfkZvLSJLLQ-8uyMRsdyy6seM-Mn0gqCs,32738 -numpy/lib/_version.py,sha256=BIGo2hWBan0Qxt5C3JoPi4TXLPUv0T-FU9366Qu_5XY,4972 -numpy/lib/arraypad.py,sha256=VNvHoD3NvnxbQ1rzujmVDWRGMt4bX-4-87g0wDaVvxA,31386 -numpy/lib/arraysetops.py,sha256=7iWnvYY9aUmr0J4aVqFf3hHH1G9gC-kUClD5KZbGmo8,24231 -numpy/lib/arrayterator.py,sha256=FTXwwzs5xzPxpUbZmE3J0ChjgesJD9TiqBA_bCI05SI,7207 -numpy/lib/financial.py,sha256=YfHWv9em4_ZQg4m-AWSKJPcT43lilBQWzcX52c_q0j8,31590 -numpy/lib/format.py,sha256=QzW9kEcjjmDw8mPmEQk8_2NlcCxfb_lljy8ro_KxGf4,31632 -numpy/lib/function_base.py,sha256=5FwWTpP_ShwjjdgXQQOzeq5I04WvYUyow3YgcS5qXRY,156177 -numpy/lib/histograms.py,sha256=zSYkRkTfX_3PsDIdzarTimVChFxKooPxV0LYOkldY6g,39967 -numpy/lib/index_tricks.py,sha256=dW4TEm_KcPtBYB9EQWCFKogVai3kXkPOgeVVIeBRlJo,29706 -numpy/lib/mixins.py,sha256=6huDUGjzCFoeKrCS2pGnMPoQxpgWyoriIJ3xVwoqugQ,7233 -numpy/lib/nanfunctions.py,sha256=QPtwAIWQDv1IEilpyaKlpVSlqikn0djbMeXAhFJsc0E,58955 -numpy/lib/npyio.py,sha256=6Cwwet8pQusDj1msyv5qjI6lxLzgD5E2Iuvtlu6Zj0s,88031 -numpy/lib/polynomial.py,sha256=urWjdZ8dAvkFDKR-vkSImJIskhTXe9XlVCly0aCX7vM,40755 -numpy/lib/recfunctions.py,sha256=2hsE8JD4RI-HHL7dPG7ku6c9zFBeSJ2-7Z17Q3NiodI,56875 -numpy/lib/scimath.py,sha256=hulwijLlO0q230XOrD5SRjlTY-9O7c1u68CeNjTgNl8,14789 -numpy/lib/setup.py,sha256=os9eV9wSzwTQlfxeoQ33gYQ4wOj1_6EvqcROc8PyGbE,379 -numpy/lib/shape_base.py,sha256=2G5a_-b-8iRG9liNMc4yabCPKHniN9QHQC0HgATA4QE,38204 -numpy/lib/stride_tricks.py,sha256=rwTBZ3o0AS2KxwOLGLDmk_5w6EVUi-X1P9sDXpM7yqM,9291 -numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/lib/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test__datasource.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test__iotools.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test__version.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_arraypad.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_arraysetops.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_arrayterator.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_financial.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_format.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_function_base.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_histograms.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_index_tricks.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_io.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_mixins.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_nanfunctions.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_packbits.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_polynomial.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_recfunctions.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_shape_base.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_stride_tricks.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_twodim_base.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_type_check.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_ufunclike.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_utils.cpython-37.pyc,, -numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 -numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 -numpy/lib/tests/data/py3-objarr.npy,sha256=pTTVh8ezp-lwAK3fkgvdKU8Arp5NMKznVD-M6Ex_uA0,341 -numpy/lib/tests/data/py3-objarr.npz,sha256=qQR0gS57e9ta16d_vCQjaaKM74gPdlwCPkp55P-qrdw,449 -numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 -numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 -numpy/lib/tests/test__datasource.py,sha256=5LwfmvIysaLHlCYkmsj46S7YRF2zRG4BmKSjjJr6fdE,11463 -numpy/lib/tests/test__iotools.py,sha256=P0FnwqfgYV4Nj9oEnwGm-vXYTS0A_5FRZNxFzvsL2qg,13885 -numpy/lib/tests/test__version.py,sha256=eCeeSqb8G3WNtCgkM3XGz9Zszyye-KFDlNQ7EY2J_UY,2055 -numpy/lib/tests/test_arraypad.py,sha256=5MNlIBrm3iLnJz0YPMvfmtTdG4utCBiNu_k0klKDgBA,54140 -numpy/lib/tests/test_arraysetops.py,sha256=M-pzWVCkCuFi0a8OpUOoXYz7OxMLud5dLPLRmo7cMyk,22367 -numpy/lib/tests/test_arrayterator.py,sha256=run7iWWbvoHGGsDv_uB6G8QENFzOCSgUIxAMVp7ZMu4,1357 -numpy/lib/tests/test_financial.py,sha256=NZ3Q_wXZr6YBBkK2uElV0Q7ko9GQdN6TEvScQTuXWpc,18390 -numpy/lib/tests/test_format.py,sha256=xd-EyPq4B2sL6wNNK1MnaSD6SefZuV6AtDHELri5pe8,38984 -numpy/lib/tests/test_function_base.py,sha256=0Jnax_jByCwTG2tLP35i2-2gwSuhUx0tYAVicUOBxg0,123208 -numpy/lib/tests/test_histograms.py,sha256=zljzM6vpMhE7pskptqxeC_sYMGlUW5k2GUJ2AZyY5oo,33761 -numpy/lib/tests/test_index_tricks.py,sha256=sVyE_b2FyXEe_FFUVcw2zCjb_d5F8DBvWvm8g3qpLOs,18454 -numpy/lib/tests/test_io.py,sha256=gn5UPy6466E8lVsWFhEGVIHPocVtAc_5OR_1H4VzzJ0,100409 -numpy/lib/tests/test_mixins.py,sha256=YNIKF716Jz7V8FJ8Zzww_F6laTD8j3A6SBxCXqt6rAQ,7233 -numpy/lib/tests/test_nanfunctions.py,sha256=qJAl3wlw4hrRmBwsIn-9iAfsVyXngGJ-P6tvpFKXaF4,38207 -numpy/lib/tests/test_packbits.py,sha256=D0lwihTICKvUm9LTIIs7R16kVK-yZddeCAGnJk6TkEM,17612 -numpy/lib/tests/test_polynomial.py,sha256=NhCF2nGmc43KraPfR6LCBD8M-i-xZKwIsLYPFXNi0WE,10087 -numpy/lib/tests/test_recfunctions.py,sha256=K65UOmcZNUtLGgvI_8gzktZn2Q_B6mC6oA6c7ZG2Ztc,41335 -numpy/lib/tests/test_regression.py,sha256=JeWbMHmGCoVeFtMvd30SVZCpXD9sxnRaI1Dy2wpr5iA,8483 -numpy/lib/tests/test_shape_base.py,sha256=3iwnWAGnerQp4B5Bx-_vTx00E7ZVzjMw6_eqj6H7wdY,24513 -numpy/lib/tests/test_stride_tricks.py,sha256=KCC5XRbKzOXvWo3Pboj9oJ9b0Fw3dCh7bY0HLAOP0_8,17110 -numpy/lib/tests/test_twodim_base.py,sha256=gcrJ43TvAKVqTdWGDx9Dcs79oZtiT6lswS3FVcpt3QQ,18504 -numpy/lib/tests/test_type_check.py,sha256=c9RaZtw85vqRVzsOV1lAgdmFm9V5VgRRfpn-X8Fcv3E,15398 -numpy/lib/tests/test_ufunclike.py,sha256=DdOvBcFD33OFUMsxhnGso7q18M1NAlG-2Zn1gWlu3XM,3352 -numpy/lib/tests/test_utils.py,sha256=4v1ZRTeBbdje3MpnRCVNtRJLEUgpT2qJblUMVB1C89A,3456 -numpy/lib/twodim_base.py,sha256=UIeJOwE6p-EjgUS0L9kJa1aZAQIZqUkmZtqArE7h5WY,27642 -numpy/lib/type_check.py,sha256=fYWhY6IsmBebOIk2XlJZ7ZfhyVO98Q8LtqYlFKIrNDI,19776 -numpy/lib/ufunclike.py,sha256=CB_OBC_pbhtNbuheM-21DIxMArdXIhiyaaSOMN42ZvA,7294 -numpy/lib/user_array.py,sha256=7nJPlDfP-04Lcq8iH_cqBbSEsx5cHCcj-2Py-oh-5t0,7817 -numpy/lib/utils.py,sha256=0yugAVeRUsElIahjKs53RkAxNEAGVCtf7ohKHS41tKA,34082 -numpy/linalg/__init__.py,sha256=qD8UCWbi9l_ik7PQIqw9ChnXo1_3CSZre18px1wIA-s,1825 -numpy/linalg/__pycache__/__init__.cpython-37.pyc,, -numpy/linalg/__pycache__/linalg.cpython-37.pyc,, -numpy/linalg/__pycache__/setup.cpython-37.pyc,, -numpy/linalg/_umath_linalg.cpython-37m-x86_64-linux-gnu.so,sha256=JyTtpoRAptApG5VgzIEl76P3oRSLvMUD8du2v7Vpb30,880560 -numpy/linalg/lapack_lite.cpython-37m-x86_64-linux-gnu.so,sha256=7N_I6kaqWZ6I23cWzrVMZX9gz1PZb_qENRdXbSR74dA,112928 -numpy/linalg/linalg.py,sha256=QbOcm4NDesoEAl7LpPXo23orid-lY2_fITxD3MCj1RI,86274 -numpy/linalg/setup.py,sha256=vTut50wTnLpnWl6i-P1BY2EjikVHrnhwOgpNAF-Lgig,2003 -numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/linalg/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_build.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_deprecations.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_linalg.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/linalg/tests/test_build.py,sha256=xKcJ8JmGk-zTqxxMhDX5GFsw-ptn8uwOUOcxaTUuPHc,1704 -numpy/linalg/tests/test_deprecations.py,sha256=eGYDVF3rmGQyDEMGOc-p_zc84Cx1I3jQPyaJe7xOvEc,706 -numpy/linalg/tests/test_linalg.py,sha256=jhwNPXFJN9PLeRmoZwGZ9SBGEkXDvm60pXJJYCLJNFc,72621 -numpy/linalg/tests/test_regression.py,sha256=zz7lprqDg7yU-z1d6AOdCDH3Tjqgw82QGiaPM7peixY,5671 -numpy/ma/__init__.py,sha256=fcmMCElT3MmCkjIGVhXyEAbjuWe_j1NVUiE65eAMvy0,1470 -numpy/ma/__pycache__/__init__.cpython-37.pyc,, -numpy/ma/__pycache__/bench.cpython-37.pyc,, -numpy/ma/__pycache__/core.cpython-37.pyc,, -numpy/ma/__pycache__/extras.cpython-37.pyc,, -numpy/ma/__pycache__/mrecords.cpython-37.pyc,, -numpy/ma/__pycache__/setup.cpython-37.pyc,, -numpy/ma/__pycache__/testutils.cpython-37.pyc,, -numpy/ma/__pycache__/timer_comparison.cpython-37.pyc,, -numpy/ma/bench.py,sha256=q3y_e1wpHVEdg0iIxrBshWVt2LOFfYi6q-eIJ3RSVrU,4942 -numpy/ma/core.py,sha256=ljE2IcaC0KvnBp6M_F1pxPJfCCuLkdIk2RVXUxgZvHk,260311 -numpy/ma/extras.py,sha256=-egPiF1vXSRRb3m5sbLG-tU0c8sVV2ODdxj3p1Ws8Bk,58651 -numpy/ma/mrecords.py,sha256=0kbmSJKEbyHQEjqWiFZy64PaUfstRERbewwnWdyW8e8,26822 -numpy/ma/setup.py,sha256=zkieH8BeiGVXl3Wlt_WeP9kciZlyAZY20DDu4SGk4b4,429 -numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/ma/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_core.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_deprecations.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_extras.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_mrecords.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_old_ma.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_subclassing.cpython-37.pyc,, -numpy/ma/tests/test_core.py,sha256=5tiE3vmxdFBV4SXK9cPftUwfPlj8hEhNZ4ydq6EatqM,196581 -numpy/ma/tests/test_deprecations.py,sha256=StN-maPV6dwIPn1LmJ_Fd9l_Ysrbzvl8BZy6zYeUru8,2340 -numpy/ma/tests/test_extras.py,sha256=tw6htO0iACppdtcQ5Hc6fLVNlXWcxO72nCp7QKjUjn0,66087 -numpy/ma/tests/test_mrecords.py,sha256=G46t_9Kzo7wNv1N_Lb3zG4s6LMuXVir1NtMKDaKVdn8,19960 -numpy/ma/tests/test_old_ma.py,sha256=5Wned1evtBm2k1yFjcAnrKTvDjIL2Vatma1cH7ks1Tg,32373 -numpy/ma/tests/test_regression.py,sha256=Kq1OAjXuAyTv0J7UcWmQNd-nk8aFcU-5Vu84HPPK2Fs,3156 -numpy/ma/tests/test_subclassing.py,sha256=l4srPFjFT0jR51e9hbumLCawR9sqQ4cdH4QwY1t6Xek,12966 -numpy/ma/testutils.py,sha256=meyy8_0sx4g2sebsVO1PrFSc6ogLzEU7vjOuu2VjY1U,10365 -numpy/ma/timer_comparison.py,sha256=BCWzBW_z6M3k3Mfe-7ThiPEBF4a12J4ZXGIxFxXkY9c,15548 -numpy/matlib.py,sha256=CgnA_dNYnxFMqfwycoimMgGzjICJC1u6XRpwPEyPvXI,9757 -numpy/matrixlib/__init__.py,sha256=W-2bi7zuMWQY5U1ikwfaBPubrcYkbxzPzzIeYz3RYPA,284 -numpy/matrixlib/__pycache__/__init__.cpython-37.pyc,, -numpy/matrixlib/__pycache__/defmatrix.cpython-37.pyc,, -numpy/matrixlib/__pycache__/setup.cpython-37.pyc,, -numpy/matrixlib/defmatrix.py,sha256=r_rYp4ODTS9Rdw8EBIa0wS7NJ99ygDCzzGUPnI2ziMY,30713 -numpy/matrixlib/setup.py,sha256=7DS-rWnyWlLTuOj31UuhkyW8QhLQ7KD5wirtWT_DUhc,437 -numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/matrixlib/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_interaction.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_numeric.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/matrixlib/tests/test_defmatrix.py,sha256=FRkFPpDpgUEzEAgShORCVhPOuqclxBftHyEW5z2oV4o,15315 -numpy/matrixlib/tests/test_interaction.py,sha256=y0ldcMIKCeT_tRo_uON6Cvxuff-M4MxmqnzA0kDFHYU,12179 -numpy/matrixlib/tests/test_masked_matrix.py,sha256=jbmuf5BQjsae6kXZtH8XJ8TI5JJYDIZ0PZhGKBbxnmY,8925 -numpy/matrixlib/tests/test_matrix_linalg.py,sha256=XYsAcC02YgvlfqAQOLY2hOuggeRlRhkztNsLYWGb4QQ,2125 -numpy/matrixlib/tests/test_multiarray.py,sha256=jM-cFU_ktanoyJ0ScRYv5xwohhE3pKpVhBBtd31b-IQ,628 -numpy/matrixlib/tests/test_numeric.py,sha256=YPq5f11MUAV6WcLQbl8xKWcm17lMj9SJ09mamqGCpxA,515 -numpy/matrixlib/tests/test_regression.py,sha256=ou1TP5bFNpjRaL2-zQxzS11ChwvAkCVp3k71SBtOO9M,1001 -numpy/polynomial/__init__.py,sha256=boBgsbz2Rr49pBTyGNT3TnLRTPSauyjBNeCVGek7oUM,1134 -numpy/polynomial/__pycache__/__init__.cpython-37.pyc,, -numpy/polynomial/__pycache__/_polybase.cpython-37.pyc,, -numpy/polynomial/__pycache__/chebyshev.cpython-37.pyc,, -numpy/polynomial/__pycache__/hermite.cpython-37.pyc,, -numpy/polynomial/__pycache__/hermite_e.cpython-37.pyc,, -numpy/polynomial/__pycache__/laguerre.cpython-37.pyc,, -numpy/polynomial/__pycache__/legendre.cpython-37.pyc,, -numpy/polynomial/__pycache__/polynomial.cpython-37.pyc,, -numpy/polynomial/__pycache__/polyutils.cpython-37.pyc,, -numpy/polynomial/__pycache__/setup.cpython-37.pyc,, -numpy/polynomial/_polybase.py,sha256=HOIXM-w5L_TVFdWR72K_RtidpR8zHqNARoeVwf6gor8,33093 -numpy/polynomial/chebyshev.py,sha256=5pr-j0wWlKnNki-vaM2gV7Sni9FXtaomVMhYH01pw_I,63287 -numpy/polynomial/hermite.py,sha256=jTv8jCvVA5_bQ6AqLo5yF8n1-8mWpT_M1vET2BlKSdY,52671 -numpy/polynomial/hermite_e.py,sha256=03sKE5Osr1DIVUL3eMKmzKU0GGKUk7lEJM5K2_LRXG0,52853 -numpy/polynomial/laguerre.py,sha256=CSbhTmnKKIYGMLoahlQbFpPIvAvXQ8aQ6lQzy9ySmic,51106 -numpy/polynomial/legendre.py,sha256=4TjHkvFH8gPA2P_ncR0GyBYjp4YF5nYWVjmkkWa6DyE,52507 -numpy/polynomial/polynomial.py,sha256=_A6i4ZQKeOVy_g4Wui6f8ubbWbd0tPDpNS5VCbvqtEs,48706 -numpy/polynomial/polyutils.py,sha256=gvkAyz9vYqVAqu-X9NIVmXnZ3Lap0wGkWUHdHue3ktI,23243 -numpy/polynomial/setup.py,sha256=PKIUV6Jh7_0jBboPp3IHPmp6LWVs4tbIkdu_FtmI_5U,385 -numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/polynomial/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_classes.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_hermite.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_laguerre.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_legendre.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_polynomial.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_polyutils.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_printing.cpython-37.pyc,, -numpy/polynomial/tests/test_chebyshev.py,sha256=Vda4vCJtdIAPs0tsbXexnw4kaaou30FjZ0gQxNxOcz8,20716 -numpy/polynomial/tests/test_classes.py,sha256=18hEEMQHB3o1roK4nlPrawv9pFif2gur6lkEBoxZAFg,20370 -numpy/polynomial/tests/test_hermite.py,sha256=3zU7T69fuFvn5gDOG34SCnyDm_pVvTVlcpUMlhoU2V0,18755 -numpy/polynomial/tests/test_hermite_e.py,sha256=06gCjnh0s-1h7jWpmJyjQdfzAK_4kywto7hHuQ7NmJQ,19089 -numpy/polynomial/tests/test_laguerre.py,sha256=O5zxZQ5GIOZrx4b0ttCUoDxmb3ifhwDRcq--hYyt3zU,17689 -numpy/polynomial/tests/test_legendre.py,sha256=2y8xF4PdU-uS7OjuIzMC6DAeVc9mlW83xj_N4NSGhSY,18453 -numpy/polynomial/tests/test_polynomial.py,sha256=MD4xxU3yWSbMK9B5wpYLQOeWZj0mH7g9p9ifMVhPQF4,20080 -numpy/polynomial/tests/test_polyutils.py,sha256=GzRz3leypd2UrWE-EwuIWL0lbbj6ks6Mjli3tozDN9U,3081 -numpy/polynomial/tests/test_printing.py,sha256=_7O-05q3JEjdxmuzBdWxligQVdC6qGygKmbhfiYW9KQ,2067 -numpy/random/__init__.pxd,sha256=-E4OlHPfdF_aLa7hXIZzBBBkTIK86tR9qXnKMeUnhcg,432 -numpy/random/__init__.py,sha256=yX9S3EpGEUAnSiwoBrccxFZngr5pLmbEx6dgLPH1r5s,7527 -numpy/random/__pycache__/__init__.cpython-37.pyc,, -numpy/random/__pycache__/_pickle.cpython-37.pyc,, -numpy/random/__pycache__/setup.cpython-37.pyc,, -numpy/random/_bit_generator.cpython-37m-x86_64-linux-gnu.so,sha256=bo3-lJOD40NhsqNIdaWdkOmw_x1WlTdfsyYCA1QDqqg,839767 -numpy/random/_bit_generator.pxd,sha256=nZRRH1h_FhR-YTE_Y0kJ5n_JyuFxFHA4II_K0sqNH3k,1005 -numpy/random/_bounded_integers.cpython-37m-x86_64-linux-gnu.so,sha256=U3RpwORvqwAOjiKCPKKiFXPfjIr_Rp4OAg9BAdV6fQU,2071041 -numpy/random/_bounded_integers.pxd,sha256=hcoucPH5hkFEM2nm12zYO-5O_Rt8RujEXT5YWuAzl1Q,1669 -numpy/random/_common.cpython-37m-x86_64-linux-gnu.so,sha256=yVwyV6I9ArJ16xL7RU78bGT5W6ix1QxQDpi6eF8c-Sg,1336140 -numpy/random/_common.pxd,sha256=jJSsc_MpqkizibG03OLe7gRN3DMfwGMjDkbG-utvDKM,4690 -numpy/random/_examples/cffi/__pycache__/extending.cpython-37.pyc,, -numpy/random/_examples/cffi/__pycache__/parse.cpython-37.pyc,, -numpy/random/_examples/cffi/extending.py,sha256=xSla3zWqxi6Hj48EvnYfD3WHfE189VvC4XsKu4_T_Iw,880 -numpy/random/_examples/cffi/parse.py,sha256=v0eB67u_SgfqSflvuB31YqHUZWh6XscNcLKaCn7fCaw,1515 -numpy/random/_examples/cython/__pycache__/setup.cpython-37.pyc,, -numpy/random/_examples/cython/extending.pyx,sha256=8nSM_iELliQYfp0Hj9VzD2XZAfaRdo7iJTNP5gLRn-k,2292 -numpy/random/_examples/cython/extending_distributions.pyx,sha256=o6Pd8XP7jvMaZeLaJZTN9Vp0_5rm4M_xF16GmJE-6yw,2332 -numpy/random/_examples/cython/setup.py,sha256=68K-GEXqTLGxXyMOttMH6nwMN6zcvLjY-lWrVml2jPk,1042 -numpy/random/_examples/numba/__pycache__/extending.cpython-37.pyc,, -numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-37.pyc,, -numpy/random/_examples/numba/extending.py,sha256=L-ELWpGbqBC2WSiWHFatfTnRxu2a66x7vKIoU2zDx_U,1977 -numpy/random/_examples/numba/extending_distributions.py,sha256=Jnr9aWkHyIWygNbdae32GVURK-5T9BTGhuExRpvve98,2034 -numpy/random/_generator.cpython-37m-x86_64-linux-gnu.so,sha256=Pchb0c-AAKAi_x5bCmnDfP_Y8tYF8zyqVKC0kN1MyN0,3186082 -numpy/random/_mt19937.cpython-37m-x86_64-linux-gnu.so,sha256=_LqkYcQTdEIjyRLCpps_OBFmUqTCVAbWa4nMGol-yBw,441605 -numpy/random/_pcg64.cpython-37m-x86_64-linux-gnu.so,sha256=bzbnVd3lkge4S81m14zEkUCRYkuiquFw2OQ4OOd3Il4,313867 -numpy/random/_philox.cpython-37m-x86_64-linux-gnu.so,sha256=Mbz6bfYfW8F_4maVprTXhKva0_f6P9yrQEFuXKmiODw,378664 -numpy/random/_pickle.py,sha256=QJRCkyDVi7xJEx-XMcYlMoLwi2dPoz8jD_6NFo1nU-4,2247 -numpy/random/_sfc64.cpython-37m-x86_64-linux-gnu.so,sha256=6LnbG0QZQDufnGpL-IfiBKlVLMmwI379lsdY_XHJMlI,226830 -numpy/random/mtrand.cpython-37m-x86_64-linux-gnu.so,sha256=2W2kth8pl-ZvaTeL4AnUZ7ukUIvGTYm_NbgP6BX1PtA,2359706 -numpy/random/setup.py,sha256=OvadBHJDLR-VmfF0Ls598MMpP9kMfzkdtrei-sEpK4Q,5715 -numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/random/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_direct.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_extending.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_generator_mt19937.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_random.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_randomstate.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_randomstate_regression.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_seed_sequence.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_smoke.cpython-37.pyc,, -numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/random/tests/data/__pycache__/__init__.cpython-37.pyc,, -numpy/random/tests/data/mt19937-testset-1.csv,sha256=Xkef402AVB-eZgYQkVtoxERHkxffCA9Jyt_oMbtJGwY,15844 -numpy/random/tests/data/mt19937-testset-2.csv,sha256=nsBEQNnff-aFjHYK4thjvUK4xSXDSfv5aTbcE59pOkE,15825 -numpy/random/tests/data/pcg64-testset-1.csv,sha256=xB00DpknGUTTCxDr9L6aNo9Hs-sfzEMbUSS4t11TTfE,23839 -numpy/random/tests/data/pcg64-testset-2.csv,sha256=NTdzTKvG2U7_WyU_IoQUtMzU3kEvDH39CgnR6VzhTkw,23845 -numpy/random/tests/data/philox-testset-1.csv,sha256=SedRaIy5zFadmk71nKrGxCFZ6BwKz8g1A9-OZp3IkkY,23852 -numpy/random/tests/data/philox-testset-2.csv,sha256=dWECt-sbfvaSiK8-Ygp5AqyjoN5i26VEOrXqg01rk3g,23838 -numpy/random/tests/data/sfc64-testset-1.csv,sha256=iHs6iX6KR8bxGwKk-3tedAdMPz6ZW8slDSUECkAqC8Q,23840 -numpy/random/tests/data/sfc64-testset-2.csv,sha256=FIDIDFCaPZfWUSxsJMAe58hPNmMrU27kCd9FhCEYt_k,23833 -numpy/random/tests/test_direct.py,sha256=RHMSKQifz7vqhjn0z5rpJl_AlDLVSli-ldC6jKcwJP0,14435 -numpy/random/tests/test_extending.py,sha256=22-9bT9yMONuqb4r_5G-jV7QS_V1nN_rddEAs3X2aq4,1822 -numpy/random/tests/test_generator_mt19937.py,sha256=nmoG3KGeHyP_MO6Egr99DdEJFKCab8O98cEVKngj0ZE,94406 -numpy/random/tests/test_generator_mt19937_regressions.py,sha256=ldeCEO3N6dCAGA1g8YnqEwRTQAiv6tBuY9xuAELJNCQ,5834 -numpy/random/tests/test_random.py,sha256=6h_kDOT55P1Vq2tf8JUM4wJTqkEdftg9XlmUgYroAAc,66842 -numpy/random/tests/test_randomstate.py,sha256=P8ZLRb3EswHcZ3jTZ0tn6z33LiBiwlufTR9b6TPLUz4,78067 -numpy/random/tests/test_randomstate_regression.py,sha256=6nW_U3uLq3JbiIaNX0PstGgqHk8fhDiblDkmOvF2Huc,7707 -numpy/random/tests/test_regression.py,sha256=_M-We4kY74tXPonJjWN7rMXF5SoxHMapl1zM08-6p0w,5683 -numpy/random/tests/test_seed_sequence.py,sha256=-fvOA-gzi_hOugmzJfXxL0GNmfAvuAbiwDCuLggqrNY,2379 -numpy/random/tests/test_smoke.py,sha256=VOCrUBqDsJFu9yQ02DArd-NV5p3eTphY-NX3WwnyewU,27891 -numpy/setup.py,sha256=lsyhnRXfo0ybq63nVUX8HnYhQ1mI0bSic-mk-lK3wnc,920 -numpy/testing/__init__.py,sha256=MHRK5eimwrC9RE723HlOcOQGxu5HAmQ-qwlcVX1sZ1k,632 -numpy/testing/__pycache__/__init__.cpython-37.pyc,, -numpy/testing/__pycache__/print_coercion_tables.cpython-37.pyc,, -numpy/testing/__pycache__/setup.cpython-37.pyc,, -numpy/testing/__pycache__/utils.cpython-37.pyc,, -numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/testing/_private/__pycache__/__init__.cpython-37.pyc,, -numpy/testing/_private/__pycache__/decorators.cpython-37.pyc,, -numpy/testing/_private/__pycache__/noseclasses.cpython-37.pyc,, -numpy/testing/_private/__pycache__/nosetester.cpython-37.pyc,, -numpy/testing/_private/__pycache__/parameterized.cpython-37.pyc,, -numpy/testing/_private/__pycache__/utils.cpython-37.pyc,, -numpy/testing/_private/decorators.py,sha256=JSIBsQH4t1rdMcr1-Cf2jBJ6CXzIGEFyZoWxUJuXI7M,9015 -numpy/testing/_private/noseclasses.py,sha256=nYtV16KcoqAcHswfYO-u6bRIrDBvCvpqjCNfl7zk-SA,14601 -numpy/testing/_private/nosetester.py,sha256=S1nEtDBvNT87Zrt8XmuSVIBWpanJwjtD1YiRlcf7eoA,20515 -numpy/testing/_private/parameterized.py,sha256=PQnCG1Ul0aE9MBTDL9lJ-DOMgsahDfpMn5Xhqld1KWk,18285 -numpy/testing/_private/utils.py,sha256=_na6o-vYzN8eDMww86X49m8ciCa3G_lZlDH7IEQLdyQ,84689 -numpy/testing/print_coercion_tables.py,sha256=qIIxBkc4f2aCKiUY6EsShxQzRrBkFEb4TB7KaQuTl58,2809 -numpy/testing/setup.py,sha256=9PnlgcejccUBzaGPi9Po-ElhmuQMAmWCBRdvCDwiKYw,676 -numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/testing/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/testing/tests/__pycache__/test_decorators.cpython-37.pyc,, -numpy/testing/tests/__pycache__/test_doctesting.cpython-37.pyc,, -numpy/testing/tests/__pycache__/test_utils.cpython-37.pyc,, -numpy/testing/tests/test_decorators.py,sha256=mkMCPSPJdrKxQl93u0QlIEdp5JS0tCzgLHXuoYDDvzs,6001 -numpy/testing/tests/test_doctesting.py,sha256=sKBXwuRZwMFSiem3R9egBzzSUB81kkpw9y-Y07iqU2M,1413 -numpy/testing/tests/test_utils.py,sha256=sB8vinI9-74VO9il6mf3a7k4OXh0HFp3dSVQk6br5JM,54774 -numpy/testing/utils.py,sha256=5-ntGTS7ux_T1sowuhRT5bwerhsCmgUfkMB-JJqPOOM,1298 -numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/tests/__pycache__/test_ctypeslib.cpython-37.pyc,, -numpy/tests/__pycache__/test_matlib.cpython-37.pyc,, -numpy/tests/__pycache__/test_numpy_version.cpython-37.pyc,, -numpy/tests/__pycache__/test_public_api.cpython-37.pyc,, -numpy/tests/__pycache__/test_reloading.cpython-37.pyc,, -numpy/tests/__pycache__/test_scripts.cpython-37.pyc,, -numpy/tests/__pycache__/test_warnings.cpython-37.pyc,, -numpy/tests/test_ctypeslib.py,sha256=Fy_dBd80RrBufyeXISkBu6kS3X700qOD5ob0pDjRssg,12276 -numpy/tests/test_matlib.py,sha256=WKILeEOe3NdKP_XAy-uCs4VEi7r_ghQ7NUhIgH1LzoM,2158 -numpy/tests/test_numpy_version.py,sha256=VtTTZAPnsJ8xtKLy1qYqIwrpcjTtqJ9xP9qP5-p8DbU,647 -numpy/tests/test_public_api.py,sha256=Cfv9zpw_M9XElubxmNs_d1lwgo3ErVdGI1ttHpjHHEM,15532 -numpy/tests/test_reloading.py,sha256=k_J-pWB1mO4XoSAqOZ-qgpsn5It6yXgcRvNs1wxbcoY,1298 -numpy/tests/test_scripts.py,sha256=SxlQPb8EttfP4V5iGJyXMBtDWTS3EcYVBN-JWDTtSy4,1637 -numpy/tests/test_warnings.py,sha256=38bAtHc0P2uZ8c2Y9TQse3k6KBtPnvix8Q7OlF3WgZw,2594 -numpy/version.py,sha256=yEnGmiF7H8pwqnezXt9q8Sc7b1bD2kI-p7hhywdWKMA,294 diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/WHEEL b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/WHEEL deleted file mode 100644 index 697e432..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: false -Tag: cp37-cp37m-manylinux1_x86_64 - diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/entry_points.txt b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/entry_points.txt deleted file mode 100644 index b6bb53a..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/entry_points.txt +++ /dev/null @@ -1,5 +0,0 @@ -[console_scripts] -f2py = numpy.f2py.f2py2e:main -f2py3 = numpy.f2py.f2py2e:main -f2py3.7 = numpy.f2py.f2py2e:main - diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/top_level.txt deleted file mode 100644 index 24ce15a..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/venv/lib/python3.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 b/venv/lib/python3.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 deleted file mode 100755 index eb7ac25..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so b/venv/lib/python3.7/site-packages/numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so deleted file mode 100755 index 757de41..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/LICENSE.txt b/venv/lib/python3.7/site-packages/numpy/LICENSE.txt deleted file mode 100644 index 3f1733f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/LICENSE.txt +++ /dev/null @@ -1,910 +0,0 @@ -Copyright (c) 2005-2019, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: .libs/libopenb*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/xianyi/OpenBLAS/ -License: 3-clause BSD - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: .libs/libopenb*.so -Description: bundled in OpenBLAS -Availability: https://github.com/xianyi/OpenBLAS/ -License 3-clause BSD - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: .libs/libgfortran*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/viewcvs/gcc/ -License: GPLv3 + runtime exception - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/venv/lib/python3.7/site-packages/numpy/__config__.py b/venv/lib/python3.7/site-packages/numpy/__config__.py deleted file mode 100644 index db2e454..0000000 --- a/venv/lib/python3.7/site-packages/numpy/__config__.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file is generated by numpy's setup.py -# It contains system_info results at the time of building this package. -__all__ = ["get_info","show"] - - -import os -import sys - -extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - -if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - if sys.version_info >= (3, 8): - os.add_dll_directory(extra_dll_dir) - else: - os.environ.setdefault('PATH', '') - os.environ['PATH'] += os.pathsep + extra_dll_dir - -blas_mkl_info={} -blis_info={} -openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_mkl_info={} -openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} - -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) diff --git a/venv/lib/python3.7/site-packages/numpy/__init__.py b/venv/lib/python3.7/site-packages/numpy/__init__.py deleted file mode 100644 index 349914b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/__init__.py +++ /dev/null @@ -1,260 +0,0 @@ -""" -NumPy -===== - -Provides - 1. An array object of arbitrary homogeneous items - 2. Fast mathematical operations over arrays - 3. Linear Algebra, Fourier Transforms, Random Number Generation - -How to use the documentation ----------------------------- -Documentation is available in two forms: docstrings provided -with the code, and a loose standing reference guide, available from -`the NumPy homepage `_. - -We recommend exploring the docstrings using -`IPython `_, an advanced Python shell with -TAB-completion and introspection capabilities. See below for further -instructions. - -The docstring examples assume that `numpy` has been imported as `np`:: - - >>> import numpy as np - -Code snippets are indicated by three greater-than signs:: - - >>> x = 42 - >>> x = x + 1 - -Use the built-in ``help`` function to view a function's docstring:: - - >>> help(np.sort) - ... # doctest: +SKIP - -For some objects, ``np.info(obj)`` may provide additional help. This is -particularly true if you see the line "Help on ufunc object:" at the top -of the help() page. Ufuncs are implemented in C, not Python, for speed. -The native Python help() does not know how to view their help, but our -np.info() function does. - -To search for documents containing a keyword, do:: - - >>> np.lookfor('keyword') - ... # doctest: +SKIP - -General-purpose documents like a glossary and help on the basic concepts -of numpy are available under the ``doc`` sub-module:: - - >>> from numpy import doc - >>> help(doc) - ... # doctest: +SKIP - -Available subpackages ---------------------- -doc - Topical documentation on broadcasting, indexing, etc. -lib - Basic functions used by several sub-packages. -random - Core Random Tools -linalg - Core Linear Algebra Tools -fft - Core FFT routines -polynomial - Polynomial tools -testing - NumPy testing tools -f2py - Fortran to Python Interface Generator. -distutils - Enhancements to distutils with support for - Fortran compilers support and more. - -Utilities ---------- -test - Run numpy unittests -show_config - Show numpy build configuration -dual - Overwrite certain functions with high-performance Scipy tools -matlib - Make everything matrices. -__version__ - NumPy version string - -Viewing documentation using IPython ------------------------------------ -Start IPython with the NumPy profile (``ipython -p numpy``), which will -import `numpy` under the alias `np`. Then, use the ``cpaste`` command to -paste examples into the shell. To see which functions are available in -`numpy`, type ``np.`` (where ```` refers to the TAB key), or use -``np.*cos*?`` (where ```` refers to the ENTER key) to narrow -down the list. To view the docstring for a function, use -``np.cos?`` (to view the docstring) and ``np.cos??`` (to view -the source code). - -Copies vs. in-place operation ------------------------------ -Most of the functions in `numpy` return a copy of the array argument -(e.g., `np.sort`). In-place versions of these functions are often -available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. -Exceptions to this rule are documented. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import warnings - -from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning -from ._globals import _NoValue - -# We first need to detect if we're being called as part of the numpy setup -# procedure itself in a reliable manner. -try: - __NUMPY_SETUP__ -except NameError: - __NUMPY_SETUP__ = False - -if __NUMPY_SETUP__: - sys.stderr.write('Running from numpy source directory.\n') -else: - try: - from numpy.__config__ import show as show_config - except ImportError: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) - - from .version import git_revision as __git_revision__ - from .version import version as __version__ - - __all__ = ['ModuleDeprecationWarning', - 'VisibleDeprecationWarning'] - - # Allow distributors to run custom init code - from . import _distributor_init - - from . import core - from .core import * - from . import compat - from . import lib - # FIXME: why have numpy.lib if everything is imported here?? - from .lib import * - - from . import linalg - from . import fft - from . import polynomial - from . import random - from . import ctypeslib - from . import ma - from . import matrixlib as _mat - from .matrixlib import * - from .compat import long - - # Make these accessible from numpy name-space - # but not imported in from numpy import * - # TODO[gh-6103]: Deprecate these - if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str - else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - from .core import round, abs, max, min - # now that numpy modules are imported, can initialize limits - core.getlimits._register_known_types() - - __all__.extend(['__version__', 'show_config']) - __all__.extend(core.__all__) - __all__.extend(_mat.__all__) - __all__.extend(lib.__all__) - __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) - - # These are added by `from .core import *` and `core.__all__`, but we - # overwrite them above with builtins we do _not_ want to export. - __all__.remove('long') - __all__.remove('unicode') - - # Remove things that are in the numpy.lib but not in the numpy namespace - # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace) - # that prevents adding more things to the main namespace by accident. - # The list below will grow until the `from .lib import *` fixme above is - # taken care of - __all__.remove('Arrayterator') - del Arrayterator - - # Filter out Cython harmless warnings - warnings.filterwarnings("ignore", message="numpy.dtype size changed") - warnings.filterwarnings("ignore", message="numpy.ufunc size changed") - warnings.filterwarnings("ignore", message="numpy.ndarray size changed") - - # oldnumeric and numarray were removed in 1.9. In case some packages import - # but do not use them, we define them here for backward compatibility. - oldnumeric = 'removed' - numarray = 'removed' - - if sys.version_info[:2] >= (3, 7): - # Importing Tester requires importing all of UnitTest which is not a - # cheap import Since it is mainly used in test suits, we lazy import it - # here to save on the order of 10 ms of import time for most users - # - # The previous way Tester was imported also had a side effect of adding - # the full `numpy.testing` namespace - # - # module level getattr is only supported in 3.7 onwards - # https://www.python.org/dev/peps/pep-0562/ - def __getattr__(attr): - if attr == 'testing': - import numpy.testing as testing - return testing - elif attr == 'Tester': - from .testing import Tester - return Tester - else: - raise AttributeError("module {!r} has no attribute " - "{!r}".format(__name__, attr)) - - def __dir__(): - return list(globals().keys()) + ['Tester', 'testing'] - - else: - # We don't actually use this ourselves anymore, but I'm not 100% sure that - # no-one else in the world is using it (though I hope not) - from .testing import Tester - - # Pytest testing - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester - - - def _sanity_check(): - """ - Quick sanity checks for common bugs caused by environment. - There are some cases e.g. with wrong BLAS ABI that cause wrong - results under specific runtime conditions that are not necessarily - achieved during test suite runs, and it is useful to catch those early. - - See https://github.com/numpy/numpy/issues/8577 and other - similar bug reports. - - """ - try: - x = ones(2, dtype=float32) - if not abs(x.dot(x) - 2.0) < 1e-5: - raise AssertionError() - except AssertionError: - msg = ("The current Numpy installation ({!r}) fails to " - "pass simple sanity checks. This can be caused for example " - "by incorrect BLAS library being linked in, or by mixing " - "package managers (pip, conda, apt, ...). Search closed " - "numpy issues for similar problems.") - raise RuntimeError(msg.format(__file__)) - - _sanity_check() - del _sanity_check diff --git a/venv/lib/python3.7/site-packages/numpy/_distributor_init.py b/venv/lib/python3.7/site-packages/numpy/_distributor_init.py deleted file mode 100644 index d893ba3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/_distributor_init.py +++ /dev/null @@ -1,10 +0,0 @@ -""" Distributor init file - -Distributors: you can add custom code here to support particular distributions -of numpy. - -For example, this is a good place to put any checks for hardware requirements. - -The numpy standard source distribution will not put code in this file, so you -can safely replace this file with your own version. -""" diff --git a/venv/lib/python3.7/site-packages/numpy/_globals.py b/venv/lib/python3.7/site-packages/numpy/_globals.py deleted file mode 100644 index f5c0761..0000000 --- a/venv/lib/python3.7/site-packages/numpy/_globals.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Module defining global singleton classes. - -This module raises a RuntimeError if an attempt to reload it is made. In that -way the identities of the classes defined here are fixed and will remain so -even if numpy itself is reloaded. In particular, a function like the following -will still work correctly after numpy is reloaded:: - - def foo(arg=np._NoValue): - if arg is np._NoValue: - ... - -That was not the case when the singleton classes were defined in the numpy -``__init__.py`` file. See gh-7844 for a discussion of the reload problem that -motivated this module. - -""" -from __future__ import division, absolute_import, print_function - -__ALL__ = [ - 'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue' - ] - - -# Disallow reloading this module so as to preserve the identities of the -# classes defined here. -if '_is_loaded' in globals(): - raise RuntimeError('Reloading numpy._globals is not allowed') -_is_loaded = True - - -class ModuleDeprecationWarning(DeprecationWarning): - """Module deprecation warning. - - The nose tester turns ordinary Deprecation warnings into test failures. - That makes it hard to deprecate whole modules, because they get - imported by default. So this is a special Deprecation warning that the - nose tester will let pass without making tests fail. - - """ - - -ModuleDeprecationWarning.__module__ = 'numpy' - - -class VisibleDeprecationWarning(UserWarning): - """Visible deprecation warning. - - By default, python will not show deprecation warnings, so this class - can be used when a very visible warning is helpful, for example because - the usage is most likely a user bug. - - """ - - -VisibleDeprecationWarning.__module__ = 'numpy' - - -class _NoValueType(object): - """Special keyword value. - - The instance of this class may be used as the default value assigned to a - deprecated keyword in order to check if it has been given a user defined - value. - """ - __instance = None - def __new__(cls): - # ensure that only one instance exists - if not cls.__instance: - cls.__instance = super(_NoValueType, cls).__new__(cls) - return cls.__instance - - # needed for python 2 to preserve identity through a pickle - def __reduce__(self): - return (self.__class__, ()) - - def __repr__(self): - return "" - - -_NoValue = _NoValueType() diff --git a/venv/lib/python3.7/site-packages/numpy/_pytesttester.py b/venv/lib/python3.7/site-packages/numpy/_pytesttester.py deleted file mode 100644 index b25224c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/_pytesttester.py +++ /dev/null @@ -1,214 +0,0 @@ -""" -Pytest test running. - -This module implements the ``test()`` function for NumPy modules. The usual -boiler plate for doing that is to put the following in the module -``__init__.py`` file:: - - from numpy._pytesttester import PytestTester - test = PytestTester(__name__).test - del PytestTester - - -Warnings filtering and other runtime settings should be dealt with in the -``pytest.ini`` file in the numpy repo root. The behavior of the test depends on -whether or not that file is found as follows: - -* ``pytest.ini`` is present (develop mode) - All warnings except those explicily filtered out are raised as error. -* ``pytest.ini`` is absent (release mode) - DeprecationWarnings and PendingDeprecationWarnings are ignored, other - warnings are passed through. - -In practice, tests run from the numpy repo are run in develop mode. That -includes the standard ``python runtests.py`` invocation. - -This module is imported by every numpy subpackage, so lies at the top level to -simplify circular import issues. For the same reason, it contains no numpy -imports at module scope, instead importing numpy within function calls. -""" -from __future__ import division, absolute_import, print_function - -import sys -import os - -__all__ = ['PytestTester'] - - - -def _show_numpy_info(): - import numpy as np - - print("NumPy version %s" % np.__version__) - relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous - print("NumPy relaxed strides checking option:", relaxed_strides) - - -class PytestTester(object): - """ - Pytest test runner. - - A test function is typically added to a package's __init__.py like so:: - - from numpy._pytesttester import PytestTester - test = PytestTester(__name__).test - del PytestTester - - Calling this test function finds and runs all tests associated with the - module and all its sub-modules. - - Attributes - ---------- - module_name : str - Full path to the package to test. - - Parameters - ---------- - module_name : module name - The name of the module to test. - - Notes - ----- - Unlike the previous ``nose``-based implementation, this class is not - publicly exposed as it performs some ``numpy``-specific warning - suppression. - - """ - def __init__(self, module_name): - self.module_name = module_name - - def __call__(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False, durations=-1, tests=None): - """ - Run tests for module using pytest. - - Parameters - ---------- - label : {'fast', 'full'}, optional - Identifies the tests to run. When set to 'fast', tests decorated - with `pytest.mark.slow` are skipped, when 'full', the slow marker - is ignored. - verbose : int, optional - Verbosity value for test outputs, in the range 1-3. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to pytests. - doctests : bool, optional - .. note:: Not supported - coverage : bool, optional - If True, report coverage of NumPy code. Default is False. - Requires installation of (pip) pytest-cov. - durations : int, optional - If < 0, do nothing, If 0, report time of all tests, if > 0, - report the time of the slowest `timer` tests. Default is -1. - tests : test or list of tests - Tests to be executed with pytest '--pyargs' - - Returns - ------- - result : bool - Return True on success, false otherwise. - - Notes - ----- - Each NumPy module exposes `test` in its namespace to run all tests for - it. For example, to run all tests for numpy.lib: - - >>> np.lib.test() #doctest: +SKIP - - Examples - -------- - >>> result = np.lib.test() #doctest: +SKIP - ... - 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds - >>> result - True - - """ - import pytest - import warnings - - #FIXME This is no longer needed? Assume it was for use in tests. - # cap verbosity at 3, which is equivalent to the pytest '-vv' option - #from . import utils - #verbose = min(int(verbose), 3) - #utils.verbose = verbose - # - - module = sys.modules[self.module_name] - module_path = os.path.abspath(module.__path__[0]) - - # setup the pytest arguments - pytest_args = ["-l"] - - # offset verbosity. The "-q" cancels a "-v". - pytest_args += ["-q"] - - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - with warnings.catch_warnings(): - warnings.simplefilter("always") - from numpy.distutils import cpuinfo - - # Filter out annoying import messages. Want these in both develop and - # release mode. - pytest_args += [ - "-W ignore:Not importing directory", - "-W ignore:numpy.dtype size changed", - "-W ignore:numpy.ufunc size changed", - "-W ignore::UserWarning:cpuinfo", - ] - - # When testing matrices, ignore their PendingDeprecationWarnings - pytest_args += [ - "-W ignore:the matrix subclass is not", - ] - - # Ignore python2.7 -3 warnings - pytest_args += [ - r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning", - r"-W ignore:in 3\.x, __setslice__:DeprecationWarning", - r"-W ignore:in 3\.x, __getslice__:DeprecationWarning", - r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning", - r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning", - r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning", - r"-W ignore:the commands module has been removed in Python 3\.0:DeprecationWarning", - r"-W ignore:The 'new' module has been removed in Python 3\.0:DeprecationWarning", - ] - - - if doctests: - raise ValueError("Doctests not supported") - - if extra_argv: - pytest_args += list(extra_argv) - - if verbose > 1: - pytest_args += ["-" + "v"*(verbose - 1)] - - if coverage: - pytest_args += ["--cov=" + module_path] - - if label == "fast": - pytest_args += ["-m", "not slow"] - elif label != "full": - pytest_args += ["-m", label] - - if durations >= 0: - pytest_args += ["--durations=%s" % durations] - - if tests is None: - tests = [self.module_name] - - pytest_args += ["--pyargs"] + list(tests) - - - # run tests. - _show_numpy_info() - - try: - code = pytest.main(pytest_args) - except SystemExit as exc: - code = exc.code - - return code == 0 diff --git a/venv/lib/python3.7/site-packages/numpy/compat/__init__.py b/venv/lib/python3.7/site-packages/numpy/compat/__init__.py deleted file mode 100644 index 5b371f5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Compatibility module. - -This module contains duplicated code from Python itself or 3rd party -extensions, which may be included for the following reasons: - - * compatibility - * we may only need a small subset of the copied library/module - -""" -from __future__ import division, absolute_import, print_function - -from . import _inspect -from . import py3k -from ._inspect import getargspec, formatargspec -from .py3k import * - -__all__ = [] -__all__.extend(_inspect.__all__) -__all__.extend(py3k.__all__) diff --git a/venv/lib/python3.7/site-packages/numpy/compat/_inspect.py b/venv/lib/python3.7/site-packages/numpy/compat/_inspect.py deleted file mode 100644 index 439d0d2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/_inspect.py +++ /dev/null @@ -1,193 +0,0 @@ -"""Subset of inspect module from upstream python - -We use this instead of upstream because upstream inspect is slow to import, and -significantly contributes to numpy import times. Importing this copy has almost -no overhead. - -""" -from __future__ import division, absolute_import, print_function - -import types - -__all__ = ['getargspec', 'formatargspec'] - -# ----------------------------------------------------------- type-checking -def ismethod(object): - """Return true if the object is an instance method. - - Instance method objects provide these attributes: - __doc__ documentation string - __name__ name with which this method was defined - im_class class object in which this method belongs - im_func function object containing implementation of method - im_self instance to which this method is bound, or None - - """ - return isinstance(object, types.MethodType) - -def isfunction(object): - """Return true if the object is a user-defined function. - - Function objects provide these attributes: - __doc__ documentation string - __name__ name with which this function was defined - func_code code object containing compiled function bytecode - func_defaults tuple of any default values for arguments - func_doc (same as __doc__) - func_globals global namespace in which this function was defined - func_name (same as __name__) - - """ - return isinstance(object, types.FunctionType) - -def iscode(object): - """Return true if the object is a code object. - - Code objects provide these attributes: - co_argcount number of arguments (not including * or ** args) - co_code string of raw compiled bytecode - co_consts tuple of constants used in the bytecode - co_filename name of file in which this code object was created - co_firstlineno number of first line in Python source code - co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg - co_lnotab encoded mapping of line numbers to bytecode indices - co_name name with which this code object was defined - co_names tuple of names of local variables - co_nlocals number of local variables - co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables - - """ - return isinstance(object, types.CodeType) - -# ------------------------------------------------ argument list extraction -# These constants are from Python's compile.h. -CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 - -def getargs(co): - """Get information about the arguments accepted by a code object. - - Three things are returned: (args, varargs, varkw), where 'args' is - a list of argument names (possibly containing nested lists), and - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - - """ - - if not iscode(co): - raise TypeError('arg is not a code object') - - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - - # The following acrobatics are for anonymous (tuple) arguments. - # Which we do not need to support, so remove to avoid importing - # the dis module. - for i in range(nargs): - if args[i][:1] in ['', '.']: - raise TypeError("tuple function arguments are not supported") - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - return args, varargs, varkw - -def getargspec(func): - """Get the names and default values of a function's arguments. - - A tuple of four things is returned: (args, varargs, varkw, defaults). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'defaults' is an n-tuple of the default values of the last n arguments. - - """ - - if ismethod(func): - func = func.__func__ - if not isfunction(func): - raise TypeError('arg is not a Python function') - args, varargs, varkw = getargs(func.__code__) - return args, varargs, varkw, func.__defaults__ - -def getargvalues(frame): - """Get information about arguments passed into a particular frame. - - A tuple of four things is returned: (args, varargs, varkw, locals). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame. - - """ - args, varargs, varkw = getargs(frame.f_code) - return args, varargs, varkw, frame.f_locals - -def joinseq(seq): - if len(seq) == 1: - return '(' + seq[0] + ',)' - else: - return '(' + ', '.join(seq) + ')' - -def strseq(object, convert, join=joinseq): - """Recursively walk a sequence, stringifying each element. - - """ - if type(object) in [list, tuple]: - return join([strseq(_o, convert, join) for _o in object]) - else: - return convert(object) - -def formatargspec(args, varargs=None, varkw=None, defaults=None, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargspec. - - The first four arguments are (args, varargs, varkw, defaults). The - other four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments. - - """ - specs = [] - if defaults: - firstdefault = len(args) - len(defaults) - for i in range(len(args)): - spec = strseq(args[i], formatarg, join) - if defaults and i >= firstdefault: - spec = spec + formatvalue(defaults[i - firstdefault]) - specs.append(spec) - if varargs is not None: - specs.append(formatvarargs(varargs)) - if varkw is not None: - specs.append(formatvarkw(varkw)) - return '(' + ', '.join(specs) + ')' - -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargvalues. - - The first four arguments are (args, varargs, varkw, locals). The - next four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments. - - """ - def convert(name, locals=locals, - formatarg=formatarg, formatvalue=formatvalue): - return formatarg(name) + formatvalue(locals[name]) - specs = [strseq(arg, convert, join) for arg in args] - - if varargs: - specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) - if varkw: - specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + ', '.join(specs) + ')' diff --git a/venv/lib/python3.7/site-packages/numpy/compat/py3k.py b/venv/lib/python3.7/site-packages/numpy/compat/py3k.py deleted file mode 100644 index 90e17d6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/py3k.py +++ /dev/null @@ -1,253 +0,0 @@ -""" -Python 3.X compatibility tools. - -While this file was originally intented for Python 2 -> 3 transition, -it is now used to create a compatibility layer between different -minor versions of Python 3. - -While the active version of numpy may not support a given version of python, we -allow downstream libraries to continue to use these shims for forward -compatibility with numpy while they transition their code to newer versions of -Python. -""" -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', - 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', - 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] - -import sys -import os -try: - from pathlib import Path, PurePath -except ImportError: - Path = PurePath = None - -if sys.version_info[0] >= 3: - import io - - try: - import pickle5 as pickle - except ImportError: - import pickle - - long = int - integer_types = (int,) - basestring = str - unicode = str - bytes = bytes - - def asunicode(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - - def asbytes(s): - if isinstance(s, bytes): - return s - return str(s).encode('latin1') - - def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - - def isfileobj(f): - return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - - def sixu(s): - return s - - strchar = 'U' - -else: - import cpickle as pickle - - bytes = str - long = long - basestring = basestring - unicode = unicode - integer_types = (int, long) - asbytes = str - asstr = str - strchar = 'S' - - def isfileobj(f): - return isinstance(f, file) - - def asunicode(s): - if isinstance(s, unicode): - return s - return str(s).decode('ascii') - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode) - - def sixu(s): - return unicode(s, 'unicode_escape') - -def getexception(): - return sys.exc_info()[1] - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) - -def is_pathlib_path(obj): - """ - Check whether obj is a pathlib.Path object. - - Prefer using `isinstance(obj, os_PathLike)` instead of this function. - """ - return Path is not None and isinstance(obj, Path) - -# from Python 3.7 -class contextlib_nullcontext(object): - """Context manager that does no additional processing. - - Used as a stand-in for a normal context manager, when a particular - block of code is only sometimes used with a normal context manager: - - cm = optional_cm if condition else nullcontext() - with cm: - # Perform operation, using optional_cm if condition is True - """ - - def __init__(self, enter_result=None): - self.enter_result = enter_result - - def __enter__(self): - return self.enter_result - - def __exit__(self, *excinfo): - pass - - -if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: - def npy_load_module(name, fn, info=None): - """ - Load a module. - - .. versionadded:: 1.11.2 - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Only here for backward compatibility with Python 2.*. - - Returns - ------- - mod : module - - """ - import importlib.machinery - return importlib.machinery.SourceFileLoader(name, fn).load_module() -else: - def npy_load_module(name, fn, info=None): - """ - Load a module. - - .. versionadded:: 1.11.2 - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Information as returned by `imp.find_module` - (suffix, mode, type). - - Returns - ------- - mod : module - - """ - import imp - if info is None: - path = os.path.dirname(fn) - fo, fn, info = imp.find_module(name, [path]) - else: - fo = open(fn, info[1]) - try: - mod = imp.load_module(name, fo, fn, info) - finally: - fo.close() - return mod - -# backport abc.ABC -import abc -if sys.version_info[:2] >= (3, 4): - abc_ABC = abc.ABC -else: - abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) - - -# Backport os.fs_path, os.PathLike, and PurePath.__fspath__ -if sys.version_info[:2] >= (3, 6): - os_fspath = os.fspath - os_PathLike = os.PathLike -else: - def _PurePath__fspath__(self): - return str(self) - - class os_PathLike(abc_ABC): - """Abstract base class for implementing the file system path protocol.""" - - @abc.abstractmethod - def __fspath__(self): - """Return the file system path representation of the object.""" - raise NotImplementedError - - @classmethod - def __subclasshook__(cls, subclass): - if PurePath is not None and issubclass(subclass, PurePath): - return True - return hasattr(subclass, '__fspath__') - - - def os_fspath(path): - """Return the path representation of a path-like object. - If str or bytes is passed in, it is returned unchanged. Otherwise the - os.PathLike interface is used to get the path representation. If the - path representation is not str or bytes, TypeError is raised. If the - provided path is not str, bytes, or os.PathLike, TypeError is raised. - """ - if isinstance(path, (str, bytes)): - return path - - # Work from the object's type to match method resolution of other magic - # methods. - path_type = type(path) - try: - path_repr = path_type.__fspath__(path) - except AttributeError: - if hasattr(path_type, '__fspath__'): - raise - elif PurePath is not None and issubclass(path_type, PurePath): - return _PurePath__fspath__(path) - else: - raise TypeError("expected str, bytes or os.PathLike object, " - "not " + path_type.__name__) - if isinstance(path_repr, (str, bytes)): - return path_repr - else: - raise TypeError("expected {}.__fspath__() to return str or bytes, " - "not {}".format(path_type.__name__, - type(path_repr).__name__)) diff --git a/venv/lib/python3.7/site-packages/numpy/compat/setup.py b/venv/lib/python3.7/site-packages/numpy/compat/setup.py deleted file mode 100644 index 8828574..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('compat', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/compat/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/compat/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/compat/tests/test_compat.py b/venv/lib/python3.7/site-packages/numpy/compat/tests/test_compat.py deleted file mode 100644 index 1543aaf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/tests/test_compat.py +++ /dev/null @@ -1,21 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from os.path import join - -from numpy.compat import isfileobj -from numpy.testing import assert_ -from numpy.testing import tempdir - - -def test_isfileobj(): - with tempdir(prefix="numpy_test_compat_") as folder: - filename = join(folder, 'a.bin') - - with open(filename, 'wb') as f: - assert_(isfileobj(f)) - - with open(filename, 'ab') as f: - assert_(isfileobj(f)) - - with open(filename, 'rb') as f: - assert_(isfileobj(f)) diff --git a/venv/lib/python3.7/site-packages/numpy/conftest.py b/venv/lib/python3.7/site-packages/numpy/conftest.py deleted file mode 100644 index 1baf4ad..0000000 --- a/venv/lib/python3.7/site-packages/numpy/conftest.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Pytest configuration and fixtures for the Numpy test suite. -""" -from __future__ import division, absolute_import, print_function - -import os - -import pytest -import numpy - -from numpy.core._multiarray_tests import get_fpu_mode - - -_old_fpu_mode = None -_collect_results = {} - - -def pytest_configure(config): - config.addinivalue_line("markers", - "valgrind_error: Tests that are known to error under valgrind.") - config.addinivalue_line("markers", - "leaks_references: Tests that are known to leak references.") - config.addinivalue_line("markers", - "slow: Tests that are very slow.") - - -def pytest_addoption(parser): - parser.addoption("--available-memory", action="store", default=None, - help=("Set amount of memory available for running the " - "test suite. This can result to tests requiring " - "especially large amounts of memory to be skipped. " - "Equivalent to setting environment variable " - "NPY_AVAILABLE_MEM. Default: determined" - "automatically.")) - - -def pytest_sessionstart(session): - available_mem = session.config.getoption('available_memory') - if available_mem is not None: - os.environ['NPY_AVAILABLE_MEM'] = available_mem - - -#FIXME when yield tests are gone. -@pytest.hookimpl() -def pytest_itemcollected(item): - """ - Check FPU precision mode was not changed during test collection. - - The clumsy way we do it here is mainly necessary because numpy - still uses yield tests, which can execute code at test collection - time. - """ - global _old_fpu_mode - - mode = get_fpu_mode() - - if _old_fpu_mode is None: - _old_fpu_mode = mode - elif mode != _old_fpu_mode: - _collect_results[item] = (_old_fpu_mode, mode) - _old_fpu_mode = mode - - -@pytest.fixture(scope="function", autouse=True) -def check_fpu_mode(request): - """ - Check FPU precision mode was not changed during the test. - """ - old_mode = get_fpu_mode() - yield - new_mode = get_fpu_mode() - - if old_mode != new_mode: - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " during the test".format(old_mode, new_mode)) - - collect_result = _collect_results.get(request.node) - if collect_result is not None: - old_mode, new_mode = collect_result - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " when collecting the test".format(old_mode, - new_mode)) - - -@pytest.fixture(autouse=True) -def add_np(doctest_namespace): - doctest_namespace['np'] = numpy diff --git a/venv/lib/python3.7/site-packages/numpy/core/__init__.py b/venv/lib/python3.7/site-packages/numpy/core/__init__.py deleted file mode 100644 index c3b3f03..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/__init__.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. - -Please note that this module is private. All functions and objects -are available in the main ``numpy`` namespace - use that instead. - -""" - -from __future__ import division, absolute_import, print_function - -from numpy.version import version as __version__ - -import os - -# disables OpenBLAS affinity setting of the main thread that limits -# python threads or processes to one core -env_added = [] -for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: - if envkey not in os.environ: - os.environ[envkey] = '1' - env_added.append(envkey) - -try: - from . import multiarray -except ImportError as exc: - import sys - msg = """ - -IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! - -Importing the numpy c-extensions failed. -- Try uninstalling and reinstalling numpy. -- If you have already done that, then: - 1. Check that you expected to use Python%d.%d from "%s", - and that you have no directories in your PATH or PYTHONPATH that can - interfere with the Python and numpy version "%s" you're trying to use. - 2. If (1) looks fine, you can open a new issue at - https://github.com/numpy/numpy/issues. Please include details on: - - how you installed Python - - how you installed numpy - - your operating system - - whether or not you have multiple versions of Python installed - - if you built from source, your compiler versions and ideally a build log - -- If you're working with a numpy git repository, try `git clean -xdf` - (removes all files not under version control) and rebuild numpy. - -Note: this error has many possible causes, so please don't comment on -an existing issue about this - open a new one instead. - -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) - raise ImportError(msg) -finally: - for envkey in env_added: - del os.environ[envkey] -del envkey -del env_added -del os - -from . import umath - -# Check that multiarray,umath are pure python modules wrapping -# _multiarray_umath and not either of the old c-extension modules -if not (hasattr(multiarray, '_multiarray_umath') and - hasattr(umath, '_multiarray_umath')): - import sys - path = sys.modules['numpy'].__path__ - msg = ("Something is wrong with the numpy installation. " - "While importing we detected an older version of " - "numpy in {}. One method of fixing this is to repeatedly uninstall " - "numpy until none is found, then reinstall this version.") - raise ImportError(msg.format(path)) - -from . import numerictypes as nt -multiarray.set_typeDict(nt.sctypeDict) -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import defchararray as char -from . import records as rec -from .records import * -from .memmap import * -from .defchararray import chararray -from . import function_base -from .function_base import * -from . import machar -from .machar import * -from . import getlimits -from .getlimits import * -from . import shape_base -from .shape_base import * -from . import einsumfunc -from .einsumfunc import * -del nt - -from .fromnumeric import amax as max, amin as min, round_ as round -from .numeric import absolute as abs - -# do this after everything else, to minimize the chance of this misleadingly -# appearing in an import-time traceback -from . import _add_newdocs -# add these for module-freeze analysis (like PyInstaller) -from . import _dtype_ctypes -from . import _internal -from . import _dtype -from . import _methods - -__all__ = ['char', 'rec', 'memmap'] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += rec.__all__ -__all__ += ['chararray'] -__all__ += function_base.__all__ -__all__ += machar.__all__ -__all__ += getlimits.__all__ -__all__ += shape_base.__all__ -__all__ += einsumfunc.__all__ - -# Make it possible so that ufuncs can be pickled -# Here are the loading and unloading functions -# The name numpy.core._ufunc_reconstruct must be -# available for unpickling to work. -def _ufunc_reconstruct(module, name): - # The `fromlist` kwarg is required to ensure that `mod` points to the - # inner-most module rather than the parent package when module name is - # nested. This makes it possible to pickle non-toplevel ufuncs such as - # scipy.special.expit for instance. - mod = __import__(module, fromlist=[name]) - return getattr(mod, name) - -def _ufunc_reduce(func): - from pickle import whichmodule - name = func.__name__ - return _ufunc_reconstruct, (whichmodule(func, name), name) - - -import sys -if sys.version_info[0] >= 3: - import copyreg -else: - import copy_reg as copyreg - -copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) -# Unclutter namespace (must keep _ufunc_reconstruct for unpickling) -del copyreg -del sys -del _ufunc_reduce - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/core/_add_newdocs.py b/venv/lib/python3.7/site-packages/numpy/core/_add_newdocs.py deleted file mode 100644 index 2f12739..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_add_newdocs.py +++ /dev/null @@ -1,6874 +0,0 @@ -""" -This is only meant to add docs to objects defined in C-extension modules. -The purpose is to allow easier editing of the docstrings without -requiring a re-compile. - -NOTE: Many of the methods of ndarray have corresponding functions. - If you update these docstrings, please keep also the ones in - core/fromnumeric.py, core/defmatrix.py up-to-date. - -""" -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.core import numerictypes as _numerictypes -from numpy.core import dtype -from numpy.core.function_base import add_newdoc - -############################################################################### -# -# flatiter -# -# flatiter needs a toplevel description -# -############################################################################### - -add_newdoc('numpy.core', 'flatiter', - """ - Flat iterator object to iterate over arrays. - - A `flatiter` iterator is returned by ``x.flat`` for any array `x`. - It allows iterating over the array as if it were a 1-D array, - either in a for-loop or by calling its `next` method. - - Iteration is done in row-major, C-style order (the last - index varying the fastest). The iterator can also be indexed using - basic slicing or advanced indexing. - - See Also - -------- - ndarray.flat : Return a flat iterator over an array. - ndarray.flatten : Returns a flattened copy of an array. - - Notes - ----- - A `flatiter` iterator can not be constructed directly from Python code - by calling the `flatiter` constructor. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> type(fl) - - >>> for item in fl: - ... print(item) - ... - 0 - 1 - 2 - 3 - 4 - 5 - - >>> fl[2:4] - array([2, 3]) - - """) - -# flatiter attributes - -add_newdoc('numpy.core', 'flatiter', ('base', - """ - A reference to the array that is iterated over. - - Examples - -------- - >>> x = np.arange(5) - >>> fl = x.flat - >>> fl.base is x - True - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('coords', - """ - An N-dimensional tuple of current coordinates. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.coords - (0, 0) - >>> next(fl) - 0 - >>> fl.coords - (0, 1) - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('index', - """ - Current flat index into the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.index - 0 - >>> next(fl) - 0 - >>> fl.index - 1 - - """)) - -# flatiter functions - -add_newdoc('numpy.core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator - - """)) - - -add_newdoc('numpy.core', 'flatiter', ('copy', - """ - copy() - - Get a copy of the iterator as a 1-D array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> fl = x.flat - >>> fl.copy() - array([0, 1, 2, 3, 4, 5]) - - """)) - - -############################################################################### -# -# nditer -# -############################################################################### - -add_newdoc('numpy.core', 'nditer', - """ - Efficient multi-dimensional iterator object to iterate over arrays. - To get started using this object, see the - :ref:`introductory guide to array iteration `. - - Parameters - ---------- - op : ndarray or sequence of array_like - The array(s) to iterate over. - - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * ``buffered`` enables buffering when required. - * ``c_index`` causes a C-order index to be tracked. - * ``f_index`` causes a Fortran-order index to be tracked. - * ``multi_index`` causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * ``common_dtype`` causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * ``copy_if_overlap`` causes the iterator to determine if read - operands have overlap with write operands, and make temporary - copies as necessary to avoid overlap. False positives (needless - copying) are possible in some cases. - * ``delay_bufalloc`` delays allocation of the buffers until - a reset() call is made. Allows ``allocate`` operands to - be initialized before their values are copied into the buffers. - * ``external_loop`` causes the ``values`` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * ``grow_inner`` allows the ``value`` array sizes to be made - larger than the buffer size when both ``buffered`` and - ``external_loop`` is used. - * ``ranged`` allows the iterator to be restricted to a sub-range - of the iterindex values. - * ``refs_ok`` enables iteration of reference types, such as - object arrays. - * ``reduce_ok`` enables iteration of ``readwrite`` operands - which are broadcasted, also known as reduction operands. - * ``zerosize_ok`` allows `itersize` to be zero. - op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - ``readonly``, ``readwrite``, or ``writeonly`` must be specified. - - * ``readonly`` indicates the operand will only be read from. - * ``readwrite`` indicates the operand will be read from and written to. - * ``writeonly`` indicates the operand will only be written to. - * ``no_broadcast`` prevents the operand from being broadcasted. - * ``contig`` forces the operand data to be contiguous. - * ``aligned`` forces the operand data to be aligned. - * ``nbo`` forces the operand data to be in native byte order. - * ``copy`` allows a temporary read-only copy if required. - * ``updateifcopy`` allows a temporary read-write copy if required. - * ``allocate`` causes the array to be allocated if it is None - in the ``op`` parameter. - * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. - * ``arraymask`` indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * ``writemasked`` indicates that only elements where the chosen - ``arraymask`` operand is True will be written to. - * ``overlap_assume_elementwise`` can be used to mark operands that are - accessed only in the iterator order, to allow less conservative - copying when ``copy_if_overlap`` is present. - op_dtypes : dtype or tuple of dtype(s), optional - The required data type(s) of the operands. If copying or buffering - is enabled, the data will be converted to/from their original types. - order : {'C', 'F', 'A', 'K'}, optional - Controls the iteration order. 'C' means C order, 'F' means - Fortran order, 'A' means 'F' order if all the arrays are Fortran - contiguous, 'C' order otherwise, and 'K' means as close to the - order the array elements appear in memory as possible. This also - affects the element memory order of ``allocate`` operands, as they - are allocated to be compatible with iteration order. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur when making a copy - or buffering. Setting this to 'unsafe' is not recommended, - as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - op_axes : list of list of ints, optional - If provided, is a list of ints or None for each operands. - The list of axes for an operand is a mapping from the dimensions - of the iterator to the dimensions of the operand. A value of - -1 can be placed for entries, causing that dimension to be - treated as `newaxis`. - itershape : tuple of ints, optional - The desired shape of the iterator. This allows ``allocate`` operands - with a dimension mapped by op_axes not corresponding to a dimension - of a different operand to get a value not equal to 1 for that - dimension. - buffersize : int, optional - When buffering is enabled, controls the size of the temporary - buffers. Set to 0 for the default value. - - Attributes - ---------- - dtypes : tuple of dtype(s) - The data types of the values provided in `value`. This may be - different from the operand data types if buffering is enabled. - Valid only before the iterator is closed. - finished : bool - Whether the iteration over the operands is finished or not. - has_delayed_bufalloc : bool - If True, the iterator was created with the ``delay_bufalloc`` flag, - and no reset() function was called on it yet. - has_index : bool - If True, the iterator was created with either the ``c_index`` or - the ``f_index`` flag, and the property `index` can be used to - retrieve it. - has_multi_index : bool - If True, the iterator was created with the ``multi_index`` flag, - and the property `multi_index` can be used to retrieve it. - index - When the ``c_index`` or ``f_index`` flag was used, this property - provides access to the index. Raises a ValueError if accessed - and ``has_index`` is False. - iterationneedsapi : bool - Whether iteration requires access to the Python API, for example - if one of the operands is an object array. - iterindex : int - An index which matches the order of iteration. - itersize : int - Size of the iterator. - itviews - Structured view(s) of `operands` in memory, matching the reordered - and optimized iterator access pattern. Valid only before the iterator - is closed. - multi_index - When the ``multi_index`` flag was used, this property - provides access to the index. Raises a ValueError if accessed - accessed and ``has_multi_index`` is False. - ndim : int - The dimensions of the iterator. - nop : int - The number of iterator operands. - operands : tuple of operand(s) - The array(s) to be iterated over. Valid only before the iterator is - closed. - shape : tuple of ints - Shape tuple, the shape of the iterator. - value - Value of ``operands`` at current iteration. Normally, this is a - tuple of array scalars, but if the flag ``external_loop`` is used, - it is a tuple of one dimensional arrays. - - Notes - ----- - `nditer` supersedes `flatiter`. The iterator implementation behind - `nditer` is also exposed by the NumPy C API. - - The Python exposure supplies two iteration interfaces, one which follows - the Python iterator protocol, and another which mirrors the C-style - do-while pattern. The native Python approach is better in most cases, but - if you need the coordinates or index of an iterator, use the C-style pattern. - - Examples - -------- - Here is how we might write an ``iter_add`` function, using the - Python iterator protocol: - - >>> def iter_add_py(x, y, out=None): - ... addop = np.add - ... it = np.nditer([x, y, out], [], - ... [['readonly'], ['readonly'], ['writeonly','allocate']]) - ... with it: - ... for (a, b, c) in it: - ... addop(a, b, out=c) - ... return it.operands[2] - - Here is the same function, but following the C-style pattern: - - >>> def iter_add(x, y, out=None): - ... addop = np.add - ... it = np.nditer([x, y, out], [], - ... [['readonly'], ['readonly'], ['writeonly','allocate']]) - ... with it: - ... while not it.finished: - ... addop(it[0], it[1], out=it[2]) - ... it.iternext() - ... return it.operands[2] - - Here is an example outer product function: - - >>> def outer_it(x, y, out=None): - ... mulop = np.multiply - ... it = np.nditer([x, y, out], ['external_loop'], - ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], - ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, - ... [-1] * x.ndim + list(range(y.ndim)), - ... None]) - ... with it: - ... for (a, b, c) in it: - ... mulop(a, b, out=c) - ... return it.operands[2] - - >>> a = np.arange(2)+1 - >>> b = np.arange(3)+1 - >>> outer_it(a,b) - array([[1, 2, 3], - [2, 4, 6]]) - - Here is an example function which operates like a "lambda" ufunc: - - >>> def luf(lamdaexpr, *args, **kwargs): - ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' - ... nargs = len(args) - ... op = (kwargs.get('out',None),) + args - ... it = np.nditer(op, ['buffered','external_loop'], - ... [['writeonly','allocate','no_broadcast']] + - ... [['readonly','nbo','aligned']]*nargs, - ... order=kwargs.get('order','K'), - ... casting=kwargs.get('casting','safe'), - ... buffersize=kwargs.get('buffersize',0)) - ... while not it.finished: - ... it[0] = lamdaexpr(*it[1:]) - ... it.iternext() - ... return it.operands[0] - - >>> a = np.arange(5) - >>> b = np.ones(5) - >>> luf(lambda i,j:i*i + j/2, a, b) - array([ 0.5, 1.5, 4.5, 9.5, 16.5]) - - If operand flags `"writeonly"` or `"readwrite"` are used the - operands may be views into the original data with the - `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a - context manager or the `nditer.close` method must be called before - using the result. The temporary data will be written back to the - original data when the `__exit__` function is called but not before: - - >>> a = np.arange(6, dtype='i4')[::-2] - >>> with np.nditer(a, [], - ... [['writeonly', 'updateifcopy']], - ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: - ... x = i.operands[0] - ... x[:] = [-1, -2, -3] - ... # a still unchanged here - >>> a, x - (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) - - It is important to note that once the iterator is exited, dangling - references (like `x` in the example) may or may not share data with - the original data `a`. If writeback semantics were active, i.e. if - `x.base.flags.writebackifcopy` is `True`, then exiting the iterator - will sever the connection between `x` and `a`, writing to `x` will - no longer write to `a`. If writeback semantics are not active, then - `x.data` will still point at some part of `a.data`, and writing to - one will affect the other. - - Context management and the `close` method appeared in version 1.15.0. - - """) - -# nditer methods - -add_newdoc('numpy.core', 'nditer', ('copy', - """ - copy() - - Get a copy of the iterator in its current state. - - Examples - -------- - >>> x = np.arange(10) - >>> y = x + 1 - >>> it = np.nditer([x, y]) - >>> next(it) - (array(0), array(1)) - >>> it2 = it.copy() - >>> next(it2) - (array(1), array(2)) - - """)) - -add_newdoc('numpy.core', 'nditer', ('operands', - """ - operands[`Slice`] - - The array(s) to be iterated over. Valid only before the iterator is closed. - """)) - -add_newdoc('numpy.core', 'nditer', ('debug_print', - """ - debug_print() - - Print the current state of the `nditer` instance and debug info to stdout. - - """)) - -add_newdoc('numpy.core', 'nditer', ('enable_external_loop', - """ - enable_external_loop() - - When the "external_loop" was not used during construction, but - is desired, this modifies the iterator to behave as if the flag - was specified. - - """)) - -add_newdoc('numpy.core', 'nditer', ('iternext', - """ - iternext() - - Check whether iterations are left, and perform a single internal iteration - without returning the result. Used in the C-style pattern do-while - pattern. For an example, see `nditer`. - - Returns - ------- - iternext : bool - Whether or not there are iterations left. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_axis', - """ - remove_axis(i) - - Removes axis `i` from the iterator. Requires that the flag "multi_index" - be enabled. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_multi_index', - """ - remove_multi_index() - - When the "multi_index" flag was specified, this removes it, allowing - the internal iteration structure to be optimized further. - - """)) - -add_newdoc('numpy.core', 'nditer', ('reset', - """ - reset() - - Reset the iterator to its initial state. - - """)) - -add_newdoc('numpy.core', 'nested_iters', - """ - Create nditers for use in nested loops - - Create a tuple of `nditer` objects which iterate in nested loops over - different axes of the op argument. The first iterator is used in the - outermost loop, the last in the innermost loop. Advancing one will change - the subsequent iterators to point at its new element. - - Parameters - ---------- - op : ndarray or sequence of array_like - The array(s) to iterate over. - - axes : list of list of int - Each item is used as an "op_axes" argument to an nditer - - flags, op_flags, op_dtypes, order, casting, buffersize (optional) - See `nditer` parameters of the same name - - Returns - ------- - iters : tuple of nditer - An nditer for each item in `axes`, outermost first - - See Also - -------- - nditer - - Examples - -------- - - Basic usage. Note how y is the "flattened" version of - [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified - the first iter's axes as [1] - - >>> a = np.arange(12).reshape(2, 3, 2) - >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) - >>> for x in i: - ... print(i.multi_index) - ... for y in j: - ... print('', j.multi_index, y) - (0,) - (0, 0) 0 - (0, 1) 1 - (1, 0) 6 - (1, 1) 7 - (1,) - (0, 0) 2 - (0, 1) 3 - (1, 0) 8 - (1, 1) 9 - (2,) - (0, 0) 4 - (0, 1) 5 - (1, 0) 10 - (1, 1) 11 - - """) - -add_newdoc('numpy.core', 'nditer', ('close', - """ - close() - - Resolve all writeback semantics in writeable operands. - - .. versionadded:: 1.15.0 - - See Also - -------- - - :ref:`nditer-context-manager` - - """)) - - -############################################################################### -# -# broadcast -# -############################################################################### - -add_newdoc('numpy.core', 'broadcast', - """ - Produce an object that mimics broadcasting. - - Parameters - ---------- - in1, in2, ... : array_like - Input parameters. - - Returns - ------- - b : broadcast object - Broadcast the input parameters against one another, and - return an object that encapsulates the result. - Amongst others, it has ``shape`` and ``nd`` properties, and - may be used as an iterator. - - See Also - -------- - broadcast_arrays - broadcast_to - - Examples - -------- - - Manually adding two vectors, using broadcasting: - - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - - >>> out = np.empty(b.shape) - >>> out.flat = [u+v for (u,v) in b] - >>> out - array([[5., 6., 7.], - [6., 7., 8.], - [7., 8., 9.]]) - - Compare against built-in broadcasting: - - >>> x + y - array([[5, 6, 7], - [6, 7, 8], - [7, 8, 9]]) - - """) - -# attributes - -add_newdoc('numpy.core', 'broadcast', ('index', - """ - current index in broadcasted result - - Examples - -------- - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> next(b), next(b), next(b) - ((1, 4), (1, 5), (1, 6)) - >>> b.index - 3 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('iters', - """ - tuple of iterators along ``self``'s "components." - - Returns a tuple of `numpy.flatiter` objects, one for each "component" - of ``self``. - - See Also - -------- - numpy.flatiter - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> row, col = b.iters - >>> next(row), next(col) - (1, 4) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('ndim', - """ - Number of dimensions of broadcasted result. Alias for `nd`. - - .. versionadded:: 1.12.0 - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.ndim - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('nd', - """ - Number of dimensions of broadcasted result. For code intended for NumPy - 1.12.0 and later the more consistent `ndim` is preferred. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.nd - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('numiter', - """ - Number of iterators possessed by the broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.numiter - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('shape', - """ - Shape of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.shape - (3, 3) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('size', - """ - Total size of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.size - 9 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('reset', - """ - reset() - - Reset the broadcasted result's iterator(s). - - Parameters - ---------- - None - - Returns - ------- - None - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> next(b), next(b), next(b) - ((1, 4), (2, 4), (3, 4)) - >>> b.index - 3 - >>> b.reset() - >>> b.index - 0 - - """)) - -############################################################################### -# -# numpy functions -# -############################################################################### - -add_newdoc('numpy.core.multiarray', 'array', - """ - array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0) - - Create an array. - - Parameters - ---------- - object : array_like - An array, any object exposing the array interface, an object whose - __array__ method returns an array, or any (nested) sequence. - dtype : data-type, optional - The desired data-type for the array. If not given, then the type will - be determined as the minimum type required to hold the objects in the - sequence. - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy will - only be made if __array__ returns a copy, if obj is a nested sequence, - or if a copy is needed to satisfy any of the other requirements - (`dtype`, `order`, etc.). - order : {'K', 'A', 'C', 'F'}, optional - Specify the memory layout of the array. If object is not an array, the - newly created array will be in C order (row major) unless 'F' is - specified, in which case it will be in Fortran order (column major). - If object is an array the following holds. - - ===== ========= =================================================== - order no copy copy=True - ===== ========= =================================================== - 'K' unchanged F & C order preserved, otherwise most similar order - 'A' unchanged F order if input is F and not C, otherwise C order - 'C' C order C order - 'F' F order F order - ===== ========= =================================================== - - When ``copy=False`` and a copy is made for other reasons, the result is - the same as if ``copy=True``, with some exceptions for `A`, see the - Notes section. The default order is 'K'. - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned array will be forced to be a base-class array (default). - ndmin : int, optional - Specifies the minimum number of dimensions that the resulting - array should have. Ones will be pre-pended to the shape as - needed to meet this requirement. - - Returns - ------- - out : ndarray - An array object satisfying the specified requirements. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full_like : Return a new array with shape of input filled with value. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - full : Return a new array of given shape filled with value. - - - Notes - ----- - When order is 'A' and `object` is an array in neither 'C' nor 'F' order, - and a copy is forced by a change in dtype, then the order of the result is - not necessarily 'C' as expected. This is likely a bug. - - Examples - -------- - >>> np.array([1, 2, 3]) - array([1, 2, 3]) - - Upcasting: - - >>> np.array([1, 2, 3.0]) - array([ 1., 2., 3.]) - - More than one dimension: - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - Minimum dimensions 2: - - >>> np.array([1, 2, 3], ndmin=2) - array([[1, 2, 3]]) - - Type provided: - - >>> np.array([1, 2, 3], dtype=complex) - array([ 1.+0.j, 2.+0.j, 3.+0.j]) - - Data-type consisting of more than one element: - - >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] - array([1, 3]) - - Creating an array from sub-classes: - - >>> np.array(np.mat('1 2; 3 4')) - array([[1, 2], - [3, 4]]) - - >>> np.array(np.mat('1 2; 3 4'), subok=True) - matrix([[1, 2], - [3, 4]]) - - """) - -add_newdoc('numpy.core.multiarray', 'empty', - """ - empty(shape, dtype=float, order='C') - - Return a new array of given shape and type, without initializing entries. - - Parameters - ---------- - shape : int or tuple of int - Shape of the empty array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - Desired output data-type for the array, e.g, `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional, default: 'C' - Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. - - Returns - ------- - out : ndarray - Array of uninitialized (arbitrary) data of the given shape, dtype, and - order. Object arrays will be initialized to None. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - full : Return a new array of given shape filled with value. - - - Notes - ----- - `empty`, unlike `zeros`, does not set the array values to zero, - and may therefore be marginally faster. On the other hand, it requires - the user to manually set all the values in the array, and should be - used with caution. - - Examples - -------- - >>> np.empty([2, 2]) - array([[ -9.74499359e+001, 6.69583040e-309], - [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized - - >>> np.empty([2, 2], dtype=int) - array([[-1073741821, -1067949133], - [ 496041986, 19249760]]) #uninitialized - - """) - -add_newdoc('numpy.core.multiarray', 'scalar', - """ - scalar(dtype, obj) - - Return a new scalar array of the given type initialized with obj. - - This function is meant mainly for pickle support. `dtype` must be a - valid data-type descriptor. If `dtype` corresponds to an object - descriptor, then `obj` can be any object, otherwise `obj` must be a - string. If `obj` is not given, it will be interpreted as None for object - type and as zeros for all other types. - - """) - -add_newdoc('numpy.core.multiarray', 'zeros', - """ - zeros(shape, dtype=float, order='C') - - Return a new array of given shape and type, filled with zeros. - - Parameters - ---------- - shape : int or tuple of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional, default: 'C' - Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. - - Returns - ------- - out : ndarray - Array of zeros with the given shape, dtype, and order. - - See Also - -------- - zeros_like : Return an array of zeros with shape and type of input. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - full : Return a new array of given shape filled with value. - - Examples - -------- - >>> np.zeros(5) - array([ 0., 0., 0., 0., 0.]) - - >>> np.zeros((5,), dtype=int) - array([0, 0, 0, 0, 0]) - - >>> np.zeros((2, 1)) - array([[ 0.], - [ 0.]]) - - >>> s = (2,2) - >>> np.zeros(s) - array([[ 0., 0.], - [ 0., 0.]]) - - >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype - array([(0, 0), (0, 0)], - dtype=[('x', '>> np.fromstring('1 2', dtype=int, sep=' ') - array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') - array([1, 2]) - - """) - -add_newdoc('numpy.core.multiarray', 'compare_chararrays', - """ - compare_chararrays(a, b, cmp_op, rstrip) - - Performs element-wise comparison of two string arrays using the - comparison operator specified by `cmp_op`. - - Parameters - ---------- - a, b : array_like - Arrays to be compared. - cmp_op : {"<", "<=", "==", ">=", ">", "!="} - Type of comparison. - rstrip : Boolean - If True, the spaces at the end of Strings are removed before the comparison. - - Returns - ------- - out : ndarray - The output array of type Boolean with the same shape as a and b. - - Raises - ------ - ValueError - If `cmp_op` is not valid. - TypeError - If at least one of `a` or `b` is a non-string array - - Examples - -------- - >>> a = np.array(["a", "b", "cde"]) - >>> b = np.array(["a", "a", "dec"]) - >>> np.compare_chararrays(a, b, ">", True) - array([False, True, False]) - - """) - -add_newdoc('numpy.core.multiarray', 'fromiter', - """ - fromiter(iterable, dtype, count=-1) - - Create a new 1-dimensional array from an iterable object. - - Parameters - ---------- - iterable : iterable object - An iterable object providing data for the array. - dtype : data-type - The data-type of the returned array. - count : int, optional - The number of items to read from *iterable*. The default is -1, - which means all data is read. - - Returns - ------- - out : ndarray - The output array. - - Notes - ----- - Specify `count` to improve performance. It allows ``fromiter`` to - pre-allocate the output array, instead of resizing it on demand. - - Examples - -------- - >>> iterable = (x*x for x in range(5)) - >>> np.fromiter(iterable, float) - array([ 0., 1., 4., 9., 16.]) - - """) - -add_newdoc('numpy.core.multiarray', 'fromfile', - """ - fromfile(file, dtype=float, count=-1, sep='', offset=0) - - Construct an array from data in a text or binary file. - - A highly efficient way of reading binary data with a known data-type, - as well as parsing simply formatted text files. Data written using the - `tofile` method can be read using this function. - - Parameters - ---------- - file : file or str or Path - Open file object or filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - - dtype : data-type - Data type of the returned array. - For binary files, it is used to determine the size and byte-order - of the items in the file. - Most builtin numeric types are supported and extension types may be supported. - - .. versionadded:: 1.18.0 - Complex dtypes. - - count : int - Number of items to read. ``-1`` means all items (i.e., the complete - file). - sep : str - Separator between items if file is a text file. - Empty ("") separator means the file should be treated as binary. - Spaces (" ") in the separator match zero or more whitespace characters. - A separator consisting only of spaces must match at least one - whitespace. - offset : int - The offset (in bytes) from the file's current position. Defaults to 0. - Only permitted for binary files. - - .. versionadded:: 1.17.0 - - See also - -------- - load, save - ndarray.tofile - loadtxt : More flexible way of loading data from a text file. - - Notes - ----- - Do not rely on the combination of `tofile` and `fromfile` for - data storage, as the binary files generated are not platform - independent. In particular, no byte-order or data-type information is - saved. Data can be stored in the platform independent ``.npy`` format - using `save` and `load` instead. - - Examples - -------- - Construct an ndarray: - - >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), - ... ('temp', float)]) - >>> x = np.zeros((1,), dtype=dt) - >>> x['time']['min'] = 10; x['temp'] = 98.25 - >>> x - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> import tempfile - >>> fname = tempfile.mkstemp()[1] - >>> x.tofile(fname) - - Read the raw data from disk: - - >>> np.fromfile(fname, dtype=dt) - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> np.save(fname, x) - >>> np.load(fname + '.npy') - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> dt = np.dtype(int) - >>> dt = dt.newbyteorder('>') - >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP - - The data of the resulting array will not be byteswapped, but will be - interpreted correctly. - - Examples - -------- - >>> s = b'hello world' - >>> np.frombuffer(s, dtype='S1', count=5, offset=6) - array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') - - >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) - array([1, 2], dtype=uint8) - >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) - array([1, 2, 3], dtype=uint8) - - """) - -add_newdoc('numpy.core', 'fastCopyAndTranspose', - """_fastCopyAndTranspose(a)""") - -add_newdoc('numpy.core.multiarray', 'correlate', - """cross_correlate(a,v, mode=0)""") - -add_newdoc('numpy.core.multiarray', 'arange', - """ - arange([start,] stop[, step,], dtype=None) - - Return evenly spaced values within a given interval. - - Values are generated within the half-open interval ``[start, stop)`` - (in other words, the interval including `start` but excluding `stop`). - For integer arguments the function is equivalent to the Python built-in - `range` function, but returns an ndarray rather than a list. - - When using a non-integer step, such as 0.1, the results will often not - be consistent. It is better to use `numpy.linspace` for these cases. - - Parameters - ---------- - start : number, optional - Start of interval. The interval includes this value. The default - start value is 0. - stop : number - End of interval. The interval does not include this value, except - in some cases where `step` is not an integer and floating point - round-off affects the length of `out`. - step : number, optional - Spacing between values. For any output `out`, this is the distance - between two adjacent values, ``out[i+1] - out[i]``. The default - step size is 1. If `step` is specified as a position argument, - `start` must also be given. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - Returns - ------- - arange : ndarray - Array of evenly spaced values. - - For floating point arguments, the length of the result is - ``ceil((stop - start)/step)``. Because of floating point overflow, - this rule may result in the last element of `out` being greater - than `stop`. - - See Also - -------- - numpy.linspace : Evenly spaced numbers with careful handling of endpoints. - numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. - numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. - - Examples - -------- - >>> np.arange(3) - array([0, 1, 2]) - >>> np.arange(3.0) - array([ 0., 1., 2.]) - >>> np.arange(3,7) - array([3, 4, 5, 6]) - >>> np.arange(3,7,2) - array([3, 5]) - - """) - -add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', - """_get_ndarray_c_version() - - Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. - - """) - -add_newdoc('numpy.core.multiarray', '_reconstruct', - """_reconstruct(subtype, shape, dtype) - - Construct an empty array. Used by Pickles. - - """) - - -add_newdoc('numpy.core.multiarray', 'set_string_function', - """ - set_string_function(f, repr=1) - - Internal method to set a function to be used when pretty printing arrays. - - """) - -add_newdoc('numpy.core.multiarray', 'set_numeric_ops', - """ - set_numeric_ops(op1=func1, op2=func2, ...) - - Set numerical operators for array objects. - - .. deprecated:: 1.16 - - For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. - For ndarray subclasses, define the ``__array_ufunc__`` method and - override the relevant ufunc. - - Parameters - ---------- - op1, op2, ... : callable - Each ``op = func`` pair describes an operator to be replaced. - For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace - addition by modulus 5 addition. - - Returns - ------- - saved_ops : list of callables - A list of all operators, stored before making replacements. - - Notes - ----- - .. WARNING:: - Use with care! Incorrect usage may lead to memory errors. - - A function replacing an operator cannot make use of that operator. - For example, when replacing add, you may not use ``+``. Instead, - directly call ufuncs. - - Examples - -------- - >>> def add_mod5(x, y): - ... return np.add(x, y) % 5 - ... - >>> old_funcs = np.set_numeric_ops(add=add_mod5) - - >>> x = np.arange(12).reshape((3, 4)) - >>> x + x - array([[0, 2, 4, 1], - [3, 0, 2, 4], - [1, 3, 0, 2]]) - - >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators - - """) - -add_newdoc('numpy.core.multiarray', 'promote_types', - """ - promote_types(type1, type2) - - Returns the data type with the smallest size and smallest scalar - kind to which both ``type1`` and ``type2`` may be safely cast. - The returned data type is always in native byte order. - - This function is symmetric, but rarely associative. - - Parameters - ---------- - type1 : dtype or dtype specifier - First data type. - type2 : dtype or dtype specifier - Second data type. - - Returns - ------- - out : dtype - The promoted data type. - - Notes - ----- - .. versionadded:: 1.6.0 - - Starting in NumPy 1.9, promote_types function now returns a valid string - length when given an integer or float dtype as one argument and a string - dtype as another argument. Previously it always returned the input string - dtype, even if it wasn't long enough to store the max integer/float value - converted to a string. - - See Also - -------- - result_type, dtype, can_cast - - Examples - -------- - >>> np.promote_types('f4', 'f8') - dtype('float64') - - >>> np.promote_types('i8', 'f4') - dtype('float64') - - >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') - dtype('S11') - - An example of a non-associative case: - - >>> p = np.promote_types - >>> p('S', p('i1', 'u1')) - dtype('S6') - >>> p(p('S', 'i1'), 'u1') - dtype('S4') - - """) - -if sys.version_info.major < 3: - add_newdoc('numpy.core.multiarray', 'newbuffer', - """ - newbuffer(size) - - Return a new uninitialized buffer object. - - Parameters - ---------- - size : int - Size in bytes of returned buffer object. - - Returns - ------- - newbuffer : buffer object - Returned, uninitialized buffer object of `size` bytes. - - """) - - add_newdoc('numpy.core.multiarray', 'getbuffer', - """ - getbuffer(obj [,offset[, size]]) - - Create a buffer object from the given object referencing a slice of - length size starting at offset. - - Default is the entire buffer. A read-write buffer is attempted followed - by a read-only buffer. - - Parameters - ---------- - obj : object - - offset : int, optional - - size : int, optional - - Returns - ------- - buffer_obj : buffer - - Examples - -------- - >>> buf = np.getbuffer(np.ones(5), 1, 3) - >>> len(buf) - 3 - >>> buf[0] - '\\x00' - >>> buf - - - """) - -add_newdoc('numpy.core.multiarray', 'c_einsum', - """ - c_einsum(subscripts, *operands, out=None, dtype=None, order='K', - casting='safe') - - *This documentation shadows that of the native python implementation of the `einsum` function, - except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* - - Evaluates the Einstein summation convention on the operands. - - Using the Einstein summation convention, many common multi-dimensional, - linear algebraic array operations can be represented in a simple fashion. - In *implicit* mode `einsum` computes these values. - - In *explicit* mode, `einsum` provides further flexibility to compute - other array operations that might not be considered classical Einstein - summation operations, by disabling, or forcing summation over specified - subscript labels. - - See the notes and examples for clarification. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation as comma separated list of - subscript labels. An implicit (classical Einstein summation) - calculation is performed unless the explicit indicator '->' is - included as well as subscript labels of the precise output form. - operands : list of array_like - These are the arrays for the operation. - out : ndarray, optional - If provided, the calculation is done into this array. - dtype : {data-type, None}, optional - If provided, forces the calculation to use the data type specified. - Note that you may have to also give a more liberal `casting` - parameter to allow the conversions. Default is None. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the output. 'C' means it should - be C contiguous. 'F' means it should be Fortran contiguous, - 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. - 'K' means it should be as close to the layout as the inputs as - is possible, including arbitrarily permuted axes. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Setting this to - 'unsafe' is not recommended, as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Default is 'safe'. - optimize : {False, True, 'greedy', 'optimal'}, optional - Controls if intermediate optimization should occur. No optimization - will occur if False and True will default to the 'greedy' algorithm. - Also accepts an explicit contraction list from the ``np.einsum_path`` - function. See ``np.einsum_path`` for more details. Defaults to False. - - Returns - ------- - output : ndarray - The calculation based on the Einstein summation convention. - - See Also - -------- - einsum_path, dot, inner, outer, tensordot, linalg.multi_dot - - Notes - ----- - .. versionadded:: 1.6.0 - - The Einstein summation convention can be used to compute - many multi-dimensional, linear algebraic array operations. `einsum` - provides a succinct way of representing these. - - A non-exhaustive list of these operations, - which can be computed by `einsum`, is shown below along with examples: - - * Trace of an array, :py:func:`numpy.trace`. - * Return a diagonal, :py:func:`numpy.diag`. - * Array axis summations, :py:func:`numpy.sum`. - * Transpositions and permutations, :py:func:`numpy.transpose`. - * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. - * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. - * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. - * Tensor contractions, :py:func:`numpy.tensordot`. - * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. - - The subscripts string is a comma-separated list of subscript labels, - where each label refers to a dimension of the corresponding operand. - Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` - is equivalent to :py:func:`np.inner(a,b) `. If a label - appears only once, it is not summed, so ``np.einsum('i', a)`` produces a - view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` - describes traditional matrix multiplication and is equivalent to - :py:func:`np.matmul(a,b) `. Repeated subscript labels in one - operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent - to :py:func:`np.trace(a) `. - - In *implicit mode*, the chosen subscripts are important - since the axes of the output are reordered alphabetically. This - means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while - ``np.einsum('ji', a)`` takes its transpose. Additionally, - ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, - ``np.einsum('ij,jh', a, b)`` returns the transpose of the - multiplication since subscript 'h' precedes subscript 'i'. - - In *explicit mode* the output can be directly controlled by - specifying output subscript labels. This requires the - identifier '->' as well as the list of output subscript labels. - This feature increases the flexibility of the function since - summing can be disabled or forced when required. The call - ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, - and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. - The difference is that `einsum` does not allow broadcasting by default. - Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the - order of the output subscript labels and therefore returns matrix - multiplication, unlike the example above in implicit mode. - - To enable and control broadcasting, use an ellipsis. Default - NumPy-style broadcasting is done by adding an ellipsis - to the left of each term, like ``np.einsum('...ii->...i', a)``. - To take the trace along the first and last axes, - you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix - product with the left-most indices instead of rightmost, one can do - ``np.einsum('ij...,jk...->ik...', a, b)``. - - When there is only one operand, no axes are summed, and no output - parameter is provided, a view into the operand is returned instead - of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` - produces a view (changed in version 1.10.0). - - `einsum` also provides an alternative way to provide the subscripts - and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. - If the output shape is not provided in this format `einsum` will be - calculated in implicit mode, otherwise it will be performed explicitly. - The examples below have corresponding `einsum` calls with the two - parameter methods. - - .. versionadded:: 1.10.0 - - Views returned from einsum are now writeable whenever the input array - is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now - have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` - and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal - of a 2D array. - - Examples - -------- - >>> a = np.arange(25).reshape(5,5) - >>> b = np.arange(5) - >>> c = np.arange(6).reshape(2,3) - - Trace of a matrix: - - >>> np.einsum('ii', a) - 60 - >>> np.einsum(a, [0,0]) - 60 - >>> np.trace(a) - 60 - - Extract the diagonal (requires explicit form): - - >>> np.einsum('ii->i', a) - array([ 0, 6, 12, 18, 24]) - >>> np.einsum(a, [0,0], [0]) - array([ 0, 6, 12, 18, 24]) - >>> np.diag(a) - array([ 0, 6, 12, 18, 24]) - - Sum over an axis (requires explicit form): - - >>> np.einsum('ij->i', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [0,1], [0]) - array([ 10, 35, 60, 85, 110]) - >>> np.sum(a, axis=1) - array([ 10, 35, 60, 85, 110]) - - For higher dimensional arrays summing a single axis can be done with ellipsis: - - >>> np.einsum('...j->...', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) - array([ 10, 35, 60, 85, 110]) - - Compute a matrix transpose, or reorder any number of axes: - - >>> np.einsum('ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum('ij->ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum(c, [1,0]) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.transpose(c) - array([[0, 3], - [1, 4], - [2, 5]]) - - Vector inner products: - - >>> np.einsum('i,i', b, b) - 30 - >>> np.einsum(b, [0], b, [0]) - 30 - >>> np.inner(b,b) - 30 - - Matrix vector multiplication: - - >>> np.einsum('ij,j', a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum(a, [0,1], b, [1]) - array([ 30, 80, 130, 180, 230]) - >>> np.dot(a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum('...j,j', a, b) - array([ 30, 80, 130, 180, 230]) - - Broadcasting and scalar multiplication: - - >>> np.einsum('..., ...', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(',ij', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.multiply(3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - - Vector outer product: - - >>> np.einsum('i,j', np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.einsum(np.arange(2)+1, [0], b, [1]) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.outer(np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - - Tensor contraction: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> np.einsum('ijk,jil->kl', a, b) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.tensordot(a,b, axes=([1,0],[0,1])) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - - Writeable returned arrays (since version 1.10.0): - - >>> a = np.zeros((3, 3)) - >>> np.einsum('ii->i', a)[:] = 1 - >>> a - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - Example of ellipsis use: - - >>> a = np.arange(6).reshape((3,2)) - >>> b = np.arange(12).reshape((4,3)) - >>> np.einsum('ki,jk->ij', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('ki,...k->i...', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('k...,jk', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - - """) - - -############################################################################## -# -# Documentation for ndarray attributes and methods -# -############################################################################## - - -############################################################################## -# -# ndarray object -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', - """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) - - An array object represents a multidimensional, homogeneous array - of fixed-size items. An associated data-type object describes the - format of each element in the array (its byte-order, how many bytes it - occupies in memory, whether it is an integer, a floating point number, - or something else, etc.) - - Arrays should be constructed using `array`, `zeros` or `empty` (refer - to the See Also section below). The parameters given here refer to - a low-level method (`ndarray(...)`) for instantiating an array. - - For more information, refer to the `numpy` module and examine the - methods and attributes of an array. - - Parameters - ---------- - (for the __new__ method; see Notes below) - - shape : tuple of ints - Shape of created array. - dtype : data-type, optional - Any object that can be interpreted as a numpy data type. - buffer : object exposing buffer interface, optional - Used to fill the array with data. - offset : int, optional - Offset of array data in buffer. - strides : tuple of ints, optional - Strides of data in memory. - order : {'C', 'F'}, optional - Row-major (C-style) or column-major (Fortran-style) order. - - Attributes - ---------- - T : ndarray - Transpose of the array. - data : buffer - The array's elements, in memory. - dtype : dtype object - Describes the format of the elements in the array. - flags : dict - Dictionary containing information related to memory use, e.g., - 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. - flat : numpy.flatiter object - Flattened version of the array as an iterator. The iterator - allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for - assignment examples; TODO). - imag : ndarray - Imaginary part of the array. - real : ndarray - Real part of the array. - size : int - Number of elements in the array. - itemsize : int - The memory use of each array element in bytes. - nbytes : int - The total number of bytes required to store the array data, - i.e., ``itemsize * size``. - ndim : int - The array's number of dimensions. - shape : tuple of ints - Shape of the array. - strides : tuple of ints - The step-size required to move from one element to the next in - memory. For example, a contiguous ``(3, 4)`` array of type - ``int16`` in C-order has strides ``(8, 2)``. This implies that - to move from element to element in memory requires jumps of 2 bytes. - To move from row-to-row, one needs to jump 8 bytes at a time - (``2 * 4``). - ctypes : ctypes object - Class containing properties of the array needed for interaction - with ctypes. - base : ndarray - If the array is a view into another array, that array is its `base` - (unless that array is also a view). The `base` array is where the - array data is actually stored. - - See Also - -------- - array : Construct an array. - zeros : Create an array, each element of which is zero. - empty : Create an array, but leave its allocated memory unchanged (i.e., - it contains "garbage"). - dtype : Create a data-type. - - Notes - ----- - There are two modes of creating an array using ``__new__``: - - 1. If `buffer` is None, then only `shape`, `dtype`, and `order` - are used. - 2. If `buffer` is an object exposing the buffer interface, then - all keywords are interpreted. - - No ``__init__`` method is needed because the array is fully initialized - after the ``__new__`` method. - - Examples - -------- - These examples illustrate the low-level `ndarray` constructor. Refer - to the `See Also` section above for easier ways of constructing an - ndarray. - - First mode, `buffer` is None: - - >>> np.ndarray(shape=(2,2), dtype=float, order='F') - array([[0.0e+000, 0.0e+000], # random - [ nan, 2.5e-323]]) - - Second mode: - - >>> np.ndarray((2,), buffer=np.array([1,2,3]), - ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element - array([2, 3]) - - """) - - -############################################################################## -# -# ndarray attributes -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', - """Array protocol: Python side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', - """None.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', - """Array priority.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', - """Array protocol: C-struct side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('base', - """ - Base object if memory is from some other object. - - Examples - -------- - The base of an array that owns its memory is None: - - >>> x = np.array([1,2,3,4]) - >>> x.base is None - True - - Slicing creates a view, whose memory is shared with x: - - >>> y = x[2:] - >>> y.base is x - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', - """ - An object to simplify the interaction of the array with the ctypes - module. - - This attribute creates an object that makes it easier to use arrays - when calling shared libraries with the ctypes module. The returned - object has, among others, data, shape, and strides attributes (see - Notes below) which themselves return ctypes objects that can be used - as arguments to a shared library. - - Parameters - ---------- - None - - Returns - ------- - c : Python object - Possessing attributes data, shape, strides, etc. - - See Also - -------- - numpy.ctypeslib - - Notes - ----- - Below are the public attributes of this object which were documented - in "Guide to NumPy" (we have omitted undocumented public attributes, - as well as documented private attributes): - - .. autoattribute:: numpy.core._internal._ctypes.data - :noindex: - - .. autoattribute:: numpy.core._internal._ctypes.shape - :noindex: - - .. autoattribute:: numpy.core._internal._ctypes.strides - :noindex: - - .. automethod:: numpy.core._internal._ctypes.data_as - :noindex: - - .. automethod:: numpy.core._internal._ctypes.shape_as - :noindex: - - .. automethod:: numpy.core._internal._ctypes.strides_as - :noindex: - - If the ctypes module is not available, then the ctypes attribute - of array objects still returns something useful, but ctypes objects - are not returned and errors may be raised instead. In particular, - the object will still have the ``as_parameter`` attribute which will - return an integer equal to the data attribute. - - Examples - -------- - >>> import ctypes - >>> x - array([[0, 1], - [2, 3]]) - >>> x.ctypes.data - 30439712 - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) - - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents - c_long(0) - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents - c_longlong(4294967296L) - >>> x.ctypes.shape - - >>> x.ctypes.shape_as(ctypes.c_long) - - >>> x.ctypes.strides - - >>> x.ctypes.strides_as(ctypes.c_longlong) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('data', - """Python buffer object pointing to the start of the array's data.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', - """ - Data-type of the array's elements. - - Parameters - ---------- - None - - Returns - ------- - d : numpy dtype object - - See Also - -------- - numpy.dtype - - Examples - -------- - >>> x - array([[0, 1], - [2, 3]]) - >>> x.dtype - dtype('int32') - >>> type(x.dtype) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', - """ - The imaginary part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.imag - array([ 0. , 0.70710678]) - >>> x.imag.dtype - dtype('float64') - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', - """ - Length of one array element in bytes. - - Examples - -------- - >>> x = np.array([1,2,3], dtype=np.float64) - >>> x.itemsize - 8 - >>> x = np.array([1,2,3], dtype=np.complex128) - >>> x.itemsize - 16 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', - """ - Information about the memory layout of the array. - - Attributes - ---------- - C_CONTIGUOUS (C) - The data is in a single, C-style contiguous segment. - F_CONTIGUOUS (F) - The data is in a single, Fortran-style contiguous segment. - OWNDATA (O) - The array owns the memory it uses or borrows it from another object. - WRITEABLE (W) - The data area can be written to. Setting this to False locks - the data, making it read-only. A view (slice, etc.) inherits WRITEABLE - from its base array at creation time, but a view of a writeable - array may be subsequently locked while the base array remains writeable. - (The opposite is not true, in that a view of a locked array may not - be made writeable. However, currently, locking a base object does not - lock any views that already reference it, so under that circumstance it - is possible to alter the contents of a locked array via a previously - created writeable view onto it.) Attempting to change a non-writeable - array raises a RuntimeError exception. - ALIGNED (A) - The data and all elements are aligned appropriately for the hardware. - WRITEBACKIFCOPY (X) - This array is a copy of some other array. The C-API function - PyArray_ResolveWritebackIfCopy must be called before deallocating - to the base array will be updated with the contents of this array. - UPDATEIFCOPY (U) - (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. - When this array is - deallocated, the base array will be updated with the contents of - this array. - FNC - F_CONTIGUOUS and not C_CONTIGUOUS. - FORC - F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). - BEHAVED (B) - ALIGNED and WRITEABLE. - CARRAY (CA) - BEHAVED and C_CONTIGUOUS. - FARRAY (FA) - BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. - - Notes - ----- - The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), - or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag - names are only supported in dictionary access. - - Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be - changed by the user, via direct assignment to the attribute or dictionary - entry, or by calling `ndarray.setflags`. - - The array flags cannot be set arbitrarily: - - - UPDATEIFCOPY can only be set ``False``. - - WRITEBACKIFCOPY can only be set ``False``. - - ALIGNED can only be set ``True`` if the data is truly aligned. - - WRITEABLE can only be set ``True`` if the array owns its own memory - or the ultimate owner of the memory exposes a writeable buffer - interface or is a string. - - Arrays can be both C-style and Fortran-style contiguous simultaneously. - This is clear for 1-dimensional arrays, but can also be true for higher - dimensional arrays. - - Even for contiguous arrays a stride for a given dimension - ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` - or the array has no elements. - It does *not* generally hold that ``self.strides[-1] == self.itemsize`` - for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for - Fortran-style contiguous arrays is true. - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', - """ - A 1-D iterator over the array. - - This is a `numpy.flatiter` instance, which acts similarly to, but is not - a subclass of, Python's built-in iterator object. - - See Also - -------- - flatten : Return a copy of the array collapsed into one dimension. - - flatiter - - Examples - -------- - >>> x = np.arange(1, 7).reshape(2, 3) - >>> x - array([[1, 2, 3], - [4, 5, 6]]) - >>> x.flat[3] - 4 - >>> x.T - array([[1, 4], - [2, 5], - [3, 6]]) - >>> x.T.flat[3] - 5 - >>> type(x.flat) - - - An assignment example: - - >>> x.flat = 3; x - array([[3, 3, 3], - [3, 3, 3]]) - >>> x.flat[[1,4]] = 1; x - array([[3, 1, 3], - [3, 1, 3]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', - """ - Total bytes consumed by the elements of the array. - - Notes - ----- - Does not include memory consumed by non-element attributes of the - array object. - - Examples - -------- - >>> x = np.zeros((3,5,2), dtype=np.complex128) - >>> x.nbytes - 480 - >>> np.prod(x.shape) * x.itemsize - 480 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', - """ - Number of array dimensions. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> x.ndim - 1 - >>> y = np.zeros((2, 3, 4)) - >>> y.ndim - 3 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('real', - """ - The real part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.real - array([ 1. , 0.70710678]) - >>> x.real.dtype - dtype('float64') - - See Also - -------- - numpy.real : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', - """ - Tuple of array dimensions. - - The shape property is usually used to get the current shape of an array, - but may also be used to reshape the array in-place by assigning a tuple of - array dimensions to it. As with `numpy.reshape`, one of the new shape - dimensions can be -1, in which case its value is inferred from the size of - the array and the remaining dimensions. Reshaping an array in-place will - fail if a copy is required. - - Examples - -------- - >>> x = np.array([1, 2, 3, 4]) - >>> x.shape - (4,) - >>> y = np.zeros((2, 3, 4)) - >>> y.shape - (2, 3, 4) - >>> y.shape = (3, 8) - >>> y - array([[ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> y.shape = (3, 6) - Traceback (most recent call last): - File "", line 1, in - ValueError: total size of new array must be unchanged - >>> np.zeros((4,2))[::2].shape = (-1,) - Traceback (most recent call last): - File "", line 1, in - AttributeError: incompatible shape for a non-contiguous array - - See Also - -------- - numpy.reshape : similar function - ndarray.reshape : similar method - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('size', - """ - Number of elements in the array. - - Equal to ``np.prod(a.shape)``, i.e., the product of the array's - dimensions. - - Notes - ----- - `a.size` returns a standard arbitrary precision Python integer. This - may not be the case with other methods of obtaining the same value - (like the suggested ``np.prod(a.shape)``, which returns an instance - of ``np.int_``), and may be relevant if the value is used further in - calculations that may overflow a fixed size integer type. - - Examples - -------- - >>> x = np.zeros((3, 5, 2), dtype=np.complex128) - >>> x.size - 30 - >>> np.prod(x.shape) - 30 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', - """ - Tuple of bytes to step in each dimension when traversing an array. - - The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` - is:: - - offset = sum(np.array(i) * a.strides) - - A more detailed explanation of strides can be found in the - "ndarray.rst" file in the NumPy reference guide. - - Notes - ----- - Imagine an array of 32-bit integers (each 4 bytes):: - - x = np.array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]], dtype=np.int32) - - This array is stored in memory as 40 bytes, one after the other - (known as a contiguous block of memory). The strides of an array tell - us how many bytes we have to skip in memory to move to the next position - along a certain axis. For example, we have to skip 4 bytes (1 value) to - move to the next column, but 20 bytes (5 values) to get to the same - position in the next row. As such, the strides for the array `x` will be - ``(20, 4)``. - - See Also - -------- - numpy.lib.stride_tricks.as_strided - - Examples - -------- - >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) - >>> y - array([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]) - >>> y.strides - (48, 16, 4) - >>> y[1,1,1] - 17 - >>> offset=sum(y.strides * np.array((1,1,1))) - >>> offset/y.itemsize - 17 - - >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) - >>> x.strides - (32, 4, 224, 1344) - >>> i = np.array([3,5,2,2]) - >>> offset = sum(i * x.strides) - >>> x[3,5,2,2] - 813 - >>> offset / x.itemsize - 813 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('T', - """ - The transposed array. - - Same as ``self.transpose()``. - - Examples - -------- - >>> x = np.array([[1.,2.],[3.,4.]]) - >>> x - array([[ 1., 2.], - [ 3., 4.]]) - >>> x.T - array([[ 1., 3.], - [ 2., 4.]]) - >>> x = np.array([1.,2.,3.,4.]) - >>> x - array([ 1., 2., 3., 4.]) - >>> x.T - array([ 1., 2., 3., 4.]) - - See Also - -------- - transpose - - """)) - - -############################################################################## -# -# ndarray methods -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', - """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. - - Returns either a new reference to self if dtype is not given or a new array - of provided data type if dtype is different from the current dtype of the - array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', - """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', - """a.__array_wrap__(obj) -> Object of same type as ndarray object a. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', - """a.__copy__() - - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. - - Equivalent to ``a.copy(order='K')``. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', - """a.__deepcopy__(memo, /) -> Deep copy of array. - - Used if :func:`copy.deepcopy` is called on an array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', - """a.__reduce__() - - For pickling. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', - """a.__setstate__(state, /) - - For unpickling. - - The `state` argument must be a sequence that contains the following - elements: - - Parameters - ---------- - version : int - optional pickle version. If omitted defaults to 0. - shape : tuple - dtype : data-type - isFortran : bool - rawdata : string or list - a binary string with the data (or a list if 'a' is an object array) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('all', - """ - a.all(axis=None, out=None, keepdims=False) - - Returns True if all elements evaluate to True. - - Refer to `numpy.all` for full documentation. - - See Also - -------- - numpy.all : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('any', - """ - a.any(axis=None, out=None, keepdims=False) - - Returns True if any of the elements of `a` evaluate to True. - - Refer to `numpy.any` for full documentation. - - See Also - -------- - numpy.any : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', - """ - a.argmax(axis=None, out=None) - - Return indices of the maximum values along the given axis. - - Refer to `numpy.argmax` for full documentation. - - See Also - -------- - numpy.argmax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', - """ - a.argmin(axis=None, out=None) - - Return indices of the minimum values along the given axis of `a`. - - Refer to `numpy.argmin` for detailed documentation. - - See Also - -------- - numpy.argmin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', - """ - a.argsort(axis=-1, kind=None, order=None) - - Returns the indices that would sort this array. - - Refer to `numpy.argsort` for full documentation. - - See Also - -------- - numpy.argsort : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', - """ - a.argpartition(kth, axis=-1, kind='introselect', order=None) - - Returns the indices that would partition this array. - - Refer to `numpy.argpartition` for full documentation. - - .. versionadded:: 1.8.0 - - See Also - -------- - numpy.argpartition : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', - """ - a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) - - Copy of the array, cast to a specified type. - - Parameters - ---------- - dtype : str or dtype - Typecode or data-type to which the array is cast. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout order of the result. - 'C' means C order, 'F' means Fortran order, 'A' - means 'F' order if all the arrays are Fortran contiguous, - 'C' order otherwise, and 'K' means as close to the - order the array elements appear in memory as possible. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Defaults to 'unsafe' - for backwards compatibility. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - subok : bool, optional - If True, then sub-classes will be passed-through (default), otherwise - the returned array will be forced to be a base-class array. - copy : bool, optional - By default, astype always returns a newly allocated array. If this - is set to false, and the `dtype`, `order`, and `subok` - requirements are satisfied, the input array is returned instead - of a copy. - - Returns - ------- - arr_t : ndarray - Unless `copy` is False and the other conditions for returning the input - array are satisfied (see description for `copy` input parameter), `arr_t` - is a new array of the same shape as the input array, with dtype, order - given by `dtype`, `order`. - - Notes - ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the max - integer/float value converted. - - Raises - ------ - ComplexWarning - When casting from complex to float or int. To avoid this, - one should use ``a.real.astype(t)``. - - Examples - -------- - >>> x = np.array([1, 2, 2.5]) - >>> x - array([1. , 2. , 2.5]) - - >>> x.astype(int) - array([1, 2, 2]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', - """ - a.byteswap(inplace=False) - - Swap the bytes of the array elements - - Toggle between low-endian and big-endian data representation by - returning a byteswapped array, optionally swapped in-place. - Arrays of byte-strings are not swapped. The real and imaginary - parts of a complex number are swapped individually. - - Parameters - ---------- - inplace : bool, optional - If ``True``, swap bytes in-place, default is ``False``. - - Returns - ------- - out : ndarray - The byteswapped array. If `inplace` is ``True``, this is - a view to self. - - Examples - -------- - >>> A = np.array([1, 256, 8755], dtype=np.int16) - >>> list(map(hex, A)) - ['0x1', '0x100', '0x2233'] - >>> A.byteswap(inplace=True) - array([ 256, 1, 13090], dtype=int16) - >>> list(map(hex, A)) - ['0x100', '0x1', '0x3322'] - - Arrays of byte-strings are not swapped - - >>> A = np.array([b'ceg', b'fac']) - >>> A.byteswap() - array([b'ceg', b'fac'], dtype='|S3') - - ``A.newbyteorder().byteswap()`` produces an array with the same values - but different representation in memory - - >>> A = np.array([1, 2, 3]) - >>> A.view(np.uint8) - array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, - 0, 0], dtype=uint8) - >>> A.newbyteorder().byteswap(inplace=True) - array([1, 2, 3]) - >>> A.view(np.uint8) - array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, - 0, 3], dtype=uint8) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', - """ - a.choose(choices, out=None, mode='raise') - - Use an index array to construct a new array from a set of choices. - - Refer to `numpy.choose` for full documentation. - - See Also - -------- - numpy.choose : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', - """ - a.clip(min=None, max=None, out=None, **kwargs) - - Return an array whose values are limited to ``[min, max]``. - One of max or min must be given. - - Refer to `numpy.clip` for full documentation. - - See Also - -------- - numpy.clip : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', - """ - a.compress(condition, axis=None, out=None) - - Return selected slices of this array along given axis. - - Refer to `numpy.compress` for full documentation. - - See Also - -------- - numpy.compress : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', - """ - a.conj() - - Complex-conjugate all elements. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', - """ - a.conjugate() - - Return the complex conjugate, element-wise. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', - """ - a.copy(order='C') - - Return a copy of the array. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the copy. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :func:`numpy.copy` are very - similar, but have different default values for their order= - arguments.) - - See also - -------- - numpy.copy - numpy.copyto - - Examples - -------- - >>> x = np.array([[1,2,3],[4,5,6]], order='F') - - >>> y = x.copy() - - >>> x.fill(0) - - >>> x - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y - array([[1, 2, 3], - [4, 5, 6]]) - - >>> y.flags['C_CONTIGUOUS'] - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', - """ - a.cumprod(axis=None, dtype=None, out=None) - - Return the cumulative product of the elements along the given axis. - - Refer to `numpy.cumprod` for full documentation. - - See Also - -------- - numpy.cumprod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', - """ - a.cumsum(axis=None, dtype=None, out=None) - - Return the cumulative sum of the elements along the given axis. - - Refer to `numpy.cumsum` for full documentation. - - See Also - -------- - numpy.cumsum : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', - """ - a.diagonal(offset=0, axis1=0, axis2=1) - - Return specified diagonals. In NumPy 1.9 the returned array is a - read-only view instead of a copy as in previous NumPy versions. In - a future version the read-only restriction will be removed. - - Refer to :func:`numpy.diagonal` for full documentation. - - See Also - -------- - numpy.diagonal : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', - """ - a.dot(b, out=None) - - Dot product of two arrays. - - Refer to `numpy.dot` for full documentation. - - See Also - -------- - numpy.dot : equivalent function - - Examples - -------- - >>> a = np.eye(2) - >>> b = np.ones((2, 2)) * 2 - >>> a.dot(b) - array([[2., 2.], - [2., 2.]]) - - This array method can be conveniently chained: - - >>> a.dot(b).dot(b) - array([[8., 8.], - [8., 8.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', - """a.dump(file) - - Dump a pickle of the array to the specified file. - The array can be read back with pickle.load or numpy.load. - - Parameters - ---------- - file : str or Path - A string naming the dump file. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', - """ - a.dumps() - - Returns the pickle of the array as a string. - pickle.loads or numpy.loads will convert the string back to an array. - - Parameters - ---------- - None - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', - """ - a.fill(value) - - Fill the array with a scalar value. - - Parameters - ---------- - value : scalar - All elements of `a` will be assigned this value. - - Examples - -------- - >>> a = np.array([1, 2]) - >>> a.fill(0) - >>> a - array([0, 0]) - >>> a = np.empty(2) - >>> a.fill(1) - >>> a - array([1., 1.]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', - """ - a.flatten(order='C') - - Return a copy of the array collapsed into one dimension. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - 'C' means to flatten in row-major (C-style) order. - 'F' means to flatten in column-major (Fortran- - style) order. 'A' means to flatten in column-major - order if `a` is Fortran *contiguous* in memory, - row-major order otherwise. 'K' means to flatten - `a` in the order the elements occur in memory. - The default is 'C'. - - Returns - ------- - y : ndarray - A copy of the input array, flattened to one dimension. - - See Also - -------- - ravel : Return a flattened array. - flat : A 1-D flat iterator over the array. - - Examples - -------- - >>> a = np.array([[1,2], [3,4]]) - >>> a.flatten() - array([1, 2, 3, 4]) - >>> a.flatten('F') - array([1, 3, 2, 4]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', - """ - a.getfield(dtype, offset=0) - - Returns a field of the given array as a certain type. - - A field is a view of the array data with a given data-type. The values in - the view are determined by the given type and the offset into the current - array in bytes. The offset needs to be such that the view dtype fits in the - array dtype; for example an array of dtype complex128 has 16-byte elements. - If taking a view with a 32-bit integer (4 bytes), the offset needs to be - between 0 and 12 bytes. - - Parameters - ---------- - dtype : str or dtype - The data type of the view. The dtype size of the view can not be larger - than that of the array itself. - offset : int - Number of bytes to skip before beginning the element view. - - Examples - -------- - >>> x = np.diag([1.+1.j]*2) - >>> x[1, 1] = 2 + 4.j - >>> x - array([[1.+1.j, 0.+0.j], - [0.+0.j, 2.+4.j]]) - >>> x.getfield(np.float64) - array([[1., 0.], - [0., 2.]]) - - By choosing an offset of 8 bytes we can select the complex part of the - array for our view: - - >>> x.getfield(np.float64, offset=8) - array([[1., 0.], - [0., 4.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('item', - """ - a.item(*args) - - Copy an element of an array to a standard Python scalar and return it. - - Parameters - ---------- - \\*args : Arguments (variable number and type) - - * none: in this case, the method only works for arrays - with one element (`a.size == 1`), which element is - copied into a standard Python scalar object and returned. - - * int_type: this argument is interpreted as a flat index into - the array, specifying which element to copy and return. - - * tuple of int_types: functions as does a single int_type argument, - except that the argument is interpreted as an nd-index into the - array. - - Returns - ------- - z : Standard Python scalar object - A copy of the specified element of the array as a suitable - Python scalar - - Notes - ----- - When the data type of `a` is longdouble or clongdouble, item() returns - a scalar array object because there is no available Python scalar that - would not lose information. Void arrays return a buffer object for item(), - unless fields are defined, in which case a tuple is returned. - - `item` is very similar to a[args], except, instead of an array scalar, - a standard Python scalar is returned. This can be useful for speeding up - access to elements of the array and doing arithmetic on elements of the - array using Python's optimized math. - - Examples - -------- - >>> np.random.seed(123) - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[2, 2, 6], - [1, 3, 6], - [1, 0, 1]]) - >>> x.item(3) - 1 - >>> x.item(7) - 0 - >>> x.item((0, 1)) - 2 - >>> x.item((2, 2)) - 1 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', - """ - a.itemset(*args) - - Insert scalar into an array (scalar is cast to array's dtype, if possible) - - There must be at least 1 argument, and define the last argument - as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster - than ``a[args] = item``. The item should be a scalar value and `args` - must select a single item in the array `a`. - - Parameters - ---------- - \\*args : Arguments - If one argument: a scalar, only used in case `a` is of size 1. - If two arguments: the last argument is the value to be set - and must be a scalar, the first argument specifies a single array - element location. It is either an int or a tuple. - - Notes - ----- - Compared to indexing syntax, `itemset` provides some speed increase - for placing a scalar into a particular location in an `ndarray`, - if you must do this. However, generally this is discouraged: - among other problems, it complicates the appearance of the code. - Also, when using `itemset` (and `item`) inside a loop, be sure - to assign the methods to a local variable to avoid the attribute - look-up at each loop iteration. - - Examples - -------- - >>> np.random.seed(123) - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[2, 2, 6], - [1, 3, 6], - [1, 0, 1]]) - >>> x.itemset(4, 0) - >>> x.itemset((2, 2), 9) - >>> x - array([[2, 2, 6], - [1, 0, 6], - [1, 0, 9]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('max', - """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) - - Return the maximum along a given axis. - - Refer to `numpy.amax` for full documentation. - - See Also - -------- - numpy.amax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', - """ - a.mean(axis=None, dtype=None, out=None, keepdims=False) - - Returns the average of the array elements along given axis. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.mean : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('min', - """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) - - Return the minimum along a given axis. - - Refer to `numpy.amin` for full documentation. - - See Also - -------- - numpy.amin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', - """ - arr.newbyteorder(new_order='S') - - Return the array with the same data viewed with a different byte order. - - Equivalent to:: - - arr.view(arr.dtype.newbytorder(new_order)) - - Changes are also made in all fields and sub-arrays of the array data - type. - - - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order specifications - below. `new_order` codes can be any of: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_arr : array - New array object with the dtype reflecting given change to the - byte order. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', - """ - a.nonzero() - - Return the indices of the elements that are non-zero. - - Refer to `numpy.nonzero` for full documentation. - - See Also - -------- - numpy.nonzero : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', - """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True) - - Return the product of the array elements over the given axis - - Refer to `numpy.prod` for full documentation. - - See Also - -------- - numpy.prod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', - """ - a.ptp(axis=None, out=None, keepdims=False) - - Peak to peak (maximum - minimum) value along a given axis. - - Refer to `numpy.ptp` for full documentation. - - See Also - -------- - numpy.ptp : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('put', - """ - a.put(indices, values, mode='raise') - - Set ``a.flat[n] = values[n]`` for all `n` in indices. - - Refer to `numpy.put` for full documentation. - - See Also - -------- - numpy.put : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', - """ - a.ravel([order]) - - Return a flattened array. - - Refer to `numpy.ravel` for full documentation. - - See Also - -------- - numpy.ravel : equivalent function - - ndarray.flat : a flat iterator on the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', - """ - a.repeat(repeats, axis=None) - - Repeat elements of an array. - - Refer to `numpy.repeat` for full documentation. - - See Also - -------- - numpy.repeat : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', - """ - a.reshape(shape, order='C') - - Returns an array containing the same data with a new shape. - - Refer to `numpy.reshape` for full documentation. - - See Also - -------- - numpy.reshape : equivalent function - - Notes - ----- - Unlike the free function `numpy.reshape`, this method on `ndarray` allows - the elements of the shape parameter to be passed in as separate arguments. - For example, ``a.reshape(10, 11)`` is equivalent to - ``a.reshape((10, 11))``. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', - """ - a.resize(new_shape, refcheck=True) - - Change shape and size of array in-place. - - Parameters - ---------- - new_shape : tuple of ints, or `n` ints - Shape of resized array. - refcheck : bool, optional - If False, reference count will not be checked. Default is True. - - Returns - ------- - None - - Raises - ------ - ValueError - If `a` does not own its own data or references or views to it exist, - and the data memory must be changed. - PyPy only: will always raise if the data memory must be changed, since - there is no reliable way to determine if references or views to it - exist. - - SystemError - If the `order` keyword argument is specified. This behaviour is a - bug in NumPy. - - See Also - -------- - resize : Return a new array with the specified shape. - - Notes - ----- - This reallocates space for the data area if necessary. - - Only contiguous arrays (data elements consecutive in memory) can be - resized. - - The purpose of the reference count check is to make sure you - do not use this array as a buffer for another Python object and then - reallocate the memory. However, reference counts can increase in - other ways so if you are sure that you have not shared the memory - for this array with another Python object, then you may safely set - `refcheck` to False. - - Examples - -------- - Shrinking an array: array is flattened (in the order that the data are - stored in memory), resized, and reshaped: - - >>> a = np.array([[0, 1], [2, 3]], order='C') - >>> a.resize((2, 1)) - >>> a - array([[0], - [1]]) - - >>> a = np.array([[0, 1], [2, 3]], order='F') - >>> a.resize((2, 1)) - >>> a - array([[0], - [2]]) - - Enlarging an array: as above, but missing entries are filled with zeros: - - >>> b = np.array([[0, 1], [2, 3]]) - >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple - >>> b - array([[0, 1, 2], - [3, 0, 0]]) - - Referencing an array prevents resizing... - - >>> c = a - >>> a.resize((1, 1)) - Traceback (most recent call last): - ... - ValueError: cannot resize an array that references or is referenced ... - - Unless `refcheck` is False: - - >>> a.resize((1, 1), refcheck=False) - >>> a - array([[0]]) - >>> c - array([[0]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('round', - """ - a.round(decimals=0, out=None) - - Return `a` with each element rounded to the given number of decimals. - - Refer to `numpy.around` for full documentation. - - See Also - -------- - numpy.around : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', - """ - a.searchsorted(v, side='left', sorter=None) - - Find indices where elements of v should be inserted in a to maintain order. - - For full documentation, see `numpy.searchsorted` - - See Also - -------- - numpy.searchsorted : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', - """ - a.setfield(val, dtype, offset=0) - - Put a value into a specified place in a field defined by a data-type. - - Place `val` into `a`'s field defined by `dtype` and beginning `offset` - bytes into the field. - - Parameters - ---------- - val : object - Value to be placed in field. - dtype : dtype object - Data-type of the field in which to place `val`. - offset : int, optional - The number of bytes into the field at which to place `val`. - - Returns - ------- - None - - See Also - -------- - getfield - - Examples - -------- - >>> x = np.eye(3) - >>> x.getfield(np.float64) - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - >>> x.setfield(3, np.int32) - >>> x.getfield(np.int32) - array([[3, 3, 3], - [3, 3, 3], - [3, 3, 3]], dtype=int32) - >>> x - array([[1.0e+000, 1.5e-323, 1.5e-323], - [1.5e-323, 1.0e+000, 1.5e-323], - [1.5e-323, 1.5e-323, 1.0e+000]]) - >>> x.setfield(np.eye(3), np.int32) - >>> x - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', - """ - a.setflags(write=None, align=None, uic=None) - - Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), - respectively. - - These Boolean-valued flags affect how numpy interprets the memory - area used by `a` (see Notes below). The ALIGNED flag can only - be set to True if the data is actually aligned according to the type. - The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set - to True. The flag WRITEABLE can only be set to True if the array owns its - own memory, or the ultimate owner of the memory exposes a writeable buffer - interface, or is a string. (The exception for string is made so that - unpickling can be done without copying memory.) - - Parameters - ---------- - write : bool, optional - Describes whether or not `a` can be written to. - align : bool, optional - Describes whether or not `a` is aligned properly for its type. - uic : bool, optional - Describes whether or not `a` is a copy of another "base" array. - - Notes - ----- - Array flags provide information about how the memory area used - for the array is to be interpreted. There are 7 Boolean flags - in use, only four of which can be changed by the user: - WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. - - WRITEABLE (W) the data area can be written to; - - ALIGNED (A) the data and strides are aligned appropriately for the hardware - (as determined by the compiler); - - UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; - - WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced - by .base). When the C-API function PyArray_ResolveWritebackIfCopy is - called, the base array will be updated with the contents of this array. - - All flags can be accessed using the single (upper case) letter as well - as the full name. - - Examples - -------- - >>> y = np.array([[3, 1, 7], - ... [2, 0, 0], - ... [8, 5, 9]]) - >>> y - array([[3, 1, 7], - [2, 0, 0], - [8, 5, 9]]) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : True - ALIGNED : True - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - >>> y.setflags(write=0, align=0) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : False - ALIGNED : False - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - >>> y.setflags(uic=1) - Traceback (most recent call last): - File "", line 1, in - ValueError: cannot set WRITEBACKIFCOPY flag to True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', - """ - a.sort(axis=-1, kind=None, order=None) - - Sort an array in-place. Refer to `numpy.sort` for full documentation. - - Parameters - ---------- - axis : int, optional - Axis along which to sort. Default is -1, which means sort along the - last axis. - kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - Sorting algorithm. The default is 'quicksort'. Note that both 'stable' - and 'mergesort' use timsort under the covers and, in general, the - actual implementation will vary with datatype. The 'mergesort' option - is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. - - order : str or list of str, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. A single field can - be specified as a string, and not all fields need be specified, - but unspecified fields will still be used, in the order in which - they come up in the dtype, to break ties. - - See Also - -------- - numpy.sort : Return a sorted copy of an array. - numpy.argsort : Indirect sort. - numpy.lexsort : Indirect stable sort on multiple keys. - numpy.searchsorted : Find elements in sorted array. - numpy.partition: Partial sort. - - Notes - ----- - See `numpy.sort` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.array([[1,4], [3,1]]) - >>> a.sort(axis=1) - >>> a - array([[1, 4], - [1, 3]]) - >>> a.sort(axis=0) - >>> a - array([[1, 3], - [1, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) - >>> a.sort(order='y') - >>> a - array([(b'c', 1), (b'a', 2)], - dtype=[('x', 'S1'), ('y', '>> a = np.array([3, 4, 2, 1]) - >>> a.partition(3) - >>> a - array([2, 1, 3, 4]) - - >>> a.partition((1, 3)) - >>> a - array([1, 2, 3, 4]) - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', - """ - a.squeeze(axis=None) - - Remove single-dimensional entries from the shape of `a`. - - Refer to `numpy.squeeze` for full documentation. - - See Also - -------- - numpy.squeeze : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('std', - """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) - - Returns the standard deviation of the array elements along given axis. - - Refer to `numpy.std` for full documentation. - - See Also - -------- - numpy.std : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', - """ - a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) - - Return the sum of the array elements over the given axis. - - Refer to `numpy.sum` for full documentation. - - See Also - -------- - numpy.sum : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', - """ - a.swapaxes(axis1, axis2) - - Return a view of the array with `axis1` and `axis2` interchanged. - - Refer to `numpy.swapaxes` for full documentation. - - See Also - -------- - numpy.swapaxes : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('take', - """ - a.take(indices, axis=None, out=None, mode='raise') - - Return an array formed from the elements of `a` at the given indices. - - Refer to `numpy.take` for full documentation. - - See Also - -------- - numpy.take : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', - """ - a.tofile(fid, sep="", format="%s") - - Write array to a file as text or binary (default). - - Data is always written in 'C' order, independent of the order of `a`. - The data produced by this method can be recovered using the function - fromfile(). - - Parameters - ---------- - fid : file or str or Path - An open file object, or a string containing a filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - - sep : str - Separator between array items for text output. - If "" (empty), a binary file is written, equivalent to - ``file.write(a.tobytes())``. - format : str - Format string for text file output. - Each entry in the array is formatted to text by first converting - it to the closest Python type, and then using "format" % item. - - Notes - ----- - This is a convenience function for quick storage of array data. - Information on endianness and precision is lost, so this method is not a - good choice for files intended to archive data or transport data between - machines with different endianness. Some of these problems can be overcome - by outputting the data as text files, at the expense of speed and file - size. - - When fid is a file object, array contents are directly written to the - file, bypassing the file object's ``write`` method. As a result, tofile - cannot be used with files objects supporting compression (e.g., GzipFile) - or file-like objects that do not support ``fileno()`` (e.g., BytesIO). - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', - """ - a.tolist() - - Return the array as an ``a.ndim``-levels deep nested list of Python scalars. - - Return a copy of the array data as a (nested) Python list. - Data items are converted to the nearest compatible builtin Python type, via - the `~numpy.ndarray.item` function. - - If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will - not be a list at all, but a simple Python scalar. - - Parameters - ---------- - none - - Returns - ------- - y : object, or list of object, or list of list of object, or ... - The possibly nested list of array elements. - - Notes - ----- - The array may be recreated via ``a = np.array(a.tolist())``, although this - may sometimes lose precision. - - Examples - -------- - For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, - except that ``tolist`` changes numpy scalars to Python scalars: - - >>> a = np.uint32([1, 2]) - >>> a_list = list(a) - >>> a_list - [1, 2] - >>> type(a_list[0]) - - >>> a_tolist = a.tolist() - >>> a_tolist - [1, 2] - >>> type(a_tolist[0]) - - - Additionally, for a 2D array, ``tolist`` applies recursively: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> list(a) - [array([1, 2]), array([3, 4])] - >>> a.tolist() - [[1, 2], [3, 4]] - - The base case for this recursion is a 0D array: - - >>> a = np.array(1) - >>> list(a) - Traceback (most recent call last): - ... - TypeError: iteration over a 0-d array - >>> a.tolist() - 1 - """)) - - -tobytesdoc = """ - a.{name}(order='C') - - Construct Python bytes containing the raw data bytes in the array. - - Constructs Python bytes showing a copy of the raw contents of - data memory. The bytes object can be produced in either 'C' or 'Fortran', - or 'Any' order (the default is 'C'-order). 'Any' order means C-order - unless the F_CONTIGUOUS flag in the array is set, in which case it - means 'Fortran' order. - - {deprecated} - - Parameters - ---------- - order : {{'C', 'F', None}}, optional - Order of the data for multidimensional arrays: - C, Fortran, or the same as for the original array. - - Returns - ------- - s : bytes - Python bytes exhibiting a copy of `a`'s raw data. - - Examples - -------- - >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() - b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' - >>> x.tobytes('C') == x.tobytes() - True - >>> x.tobytes('F') - b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' - - """ - -add_newdoc('numpy.core.multiarray', 'ndarray', - ('tostring', tobytesdoc.format(name='tostring', - deprecated= - 'This function is a compatibility ' - 'alias for tobytes. Despite its ' - 'name it returns bytes not ' - 'strings.'))) -add_newdoc('numpy.core.multiarray', 'ndarray', - ('tobytes', tobytesdoc.format(name='tobytes', - deprecated='.. versionadded:: 1.9.0'))) - -add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', - """ - a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) - - Return the sum along diagonals of the array. - - Refer to `numpy.trace` for full documentation. - - See Also - -------- - numpy.trace : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', - """ - a.transpose(*axes) - - Returns a view of the array with axes transposed. - - For a 1-D array this has no effect, as a transposed vector is simply the - same vector. To convert a 1-D array into a 2D column vector, an additional - dimension must be added. `np.atleast2d(a).T` achieves this, as does - `a[:, np.newaxis]`. - For a 2-D array, this is a standard matrix transpose. - For an n-D array, if axes are given, their order indicates how the - axes are permuted (see Examples). If axes are not provided and - ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then - ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. - - Parameters - ---------- - axes : None, tuple of ints, or `n` ints - - * None or no argument: reverses the order of the axes. - - * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s - `i`-th axis becomes `a.transpose()`'s `j`-th axis. - - * `n` ints: same as an n-tuple of the same ints (this form is - intended simply as a "convenience" alternative to the tuple form) - - Returns - ------- - out : ndarray - View of `a`, with axes suitably permuted. - - See Also - -------- - ndarray.T : Array property returning the array transposed. - ndarray.reshape : Give a new shape to an array without changing its data. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> a - array([[1, 2], - [3, 4]]) - >>> a.transpose() - array([[1, 3], - [2, 4]]) - >>> a.transpose((1, 0)) - array([[1, 3], - [2, 4]]) - >>> a.transpose(1, 0) - array([[1, 3], - [2, 4]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('var', - """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('view', - """ - a.view(dtype=None, type=None) - - New view of array with the same data. - - Parameters - ---------- - dtype : data-type or ndarray sub-class, optional - Data-type descriptor of the returned view, e.g., float32 or int16. The - default, None, results in the view having the same data-type as `a`. - This argument can also be specified as an ndarray sub-class, which - then specifies the type of the returned object (this is equivalent to - setting the ``type`` parameter). - type : Python type, optional - Type of the returned view, e.g., ndarray or matrix. Again, the - default None results in type preservation. - - Notes - ----- - ``a.view()`` is used two different ways: - - ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view - of the array's memory with a different data-type. This can cause a - reinterpretation of the bytes of memory. - - ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just - returns an instance of `ndarray_subclass` that looks at the same array - (same shape, dtype, etc.) This does not cause a reinterpretation of the - memory. - - For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of - bytes per entry than the previous dtype (for example, converting a - regular array to a structured array), then the behavior of the view - cannot be predicted just from the superficial appearance of ``a`` (shown - by ``print(a)``). It also depends on exactly how ``a`` is stored in - memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus - defined as a slice or transpose, etc., the view may give different - results. - - - Examples - -------- - >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - - Viewing array data using a different type and dtype: - - >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> y - matrix([[513]], dtype=int16) - >>> print(type(y)) - - - Creating a view on a structured array so it can be used in calculations - - >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) - >>> xv = x.view(dtype=np.int8).reshape(-1,2) - >>> xv - array([[1, 2], - [3, 4]], dtype=int8) - >>> xv.mean(0) - array([2., 3.]) - - Making changes to the view changes the underlying array - - >>> xv[0,1] = 20 - >>> x - array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) - - Using a view to convert an array to a recarray: - - >>> z = x.view(np.recarray) - >>> z.a - array([1, 3], dtype=int8) - - Views share data: - - >>> x[0] = (9, 10) - >>> z[0] - (9, 10) - - Views that change the dtype size (bytes per entry) should normally be - avoided on arrays defined by slices, transposes, fortran-ordering, etc.: - - >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) - >>> y = x[:, 0:2] - >>> y - array([[1, 2], - [4, 5]], dtype=int16) - >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) - Traceback (most recent call last): - ... - ValueError: To change to a dtype of a different size, the array must be C-contiguous - >>> z = y.copy() - >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) - array([[(1, 2)], - [(4, 5)]], dtype=[('width', '>> oct_array = np.frompyfunc(oct, 1, 1) - >>> oct_array(np.array((10, 30, 100))) - array(['0o12', '0o36', '0o144'], dtype=object) - >>> np.array((oct(10), oct(30), oct(100))) # for comparison - array(['0o12', '0o36', '0o144'], dtype='>> np.geterrobj() # first get the defaults - [8192, 521, None] - - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - ... - >>> old_bufsize = np.setbufsize(20000) - >>> old_err = np.seterr(divide='raise') - >>> old_handler = np.seterrcall(err_handler) - >>> np.geterrobj() - [8192, 521, ] - - >>> old_err = np.seterr(all='ignore') - >>> np.base_repr(np.geterrobj()[1], 8) - '0' - >>> old_err = np.seterr(divide='warn', over='log', under='call', - ... invalid='print') - >>> np.base_repr(np.geterrobj()[1], 8) - '4351' - - """) - -add_newdoc('numpy.core.umath', 'seterrobj', - """ - seterrobj(errobj) - - Set the object that defines floating-point error handling. - - The error object contains all information that defines the error handling - behavior in NumPy. `seterrobj` is used internally by the other - functions that set error handling behavior (`seterr`, `seterrcall`). - - Parameters - ---------- - errobj : list - The error object, a list containing three elements: - [internal numpy buffer size, error mask, error callback function]. - - The error mask is a single integer that holds the treatment information - on all four floating point errors. The information for each error type - is contained in three bits of the integer. If we print it in base 8, we - can see what treatment is set for "invalid", "under", "over", and - "divide" (in that order). The printed string can be interpreted with - - * 0 : 'ignore' - * 1 : 'warn' - * 2 : 'raise' - * 3 : 'call' - * 4 : 'print' - * 5 : 'log' - - See Also - -------- - geterrobj, seterr, geterr, seterrcall, geterrcall - getbufsize, setbufsize - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> old_errobj = np.geterrobj() # first get the defaults - >>> old_errobj - [8192, 521, None] - - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - ... - >>> new_errobj = [20000, 12, err_handler] - >>> np.seterrobj(new_errobj) - >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') - '14' - >>> np.geterr() - {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} - >>> np.geterrcall() is err_handler - True - - """) - - -############################################################################## -# -# compiled_base functions -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'add_docstring', - """ - add_docstring(obj, docstring) - - Add a docstring to a built-in obj if possible. - If the obj already has a docstring raise a RuntimeError - If this routine does not know how to add a docstring to the object - raise a TypeError - """) - -add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', - """ - add_ufunc_docstring(ufunc, new_docstring) - - Replace the docstring for a ufunc with new_docstring. - This method will only work if the current docstring for - the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) - - Parameters - ---------- - ufunc : numpy.ufunc - A ufunc whose current doc is NULL. - new_docstring : string - The new docstring for the ufunc. - - Notes - ----- - This method allocates memory for new_docstring on - the heap. Technically this creates a mempory leak, since this - memory will not be reclaimed until the end of the program - even if the ufunc itself is removed. However this will only - be a problem if the user is repeatedly creating ufuncs with - no documentation, adding documentation via add_newdoc_ufunc, - and then throwing away the ufunc. - """) - - -add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', - """ - format_float_OSprintf_g(val, precision) - - Print a floating point scalar using the system's printf function, - equivalent to: - - printf("%.*g", precision, val); - - for half/float/double, or replacing 'g' by 'Lg' for longdouble. This - method is designed to help cross-validate the format_float_* methods. - - Parameters - ---------- - val : python float or numpy floating scalar - Value to format. - - precision : non-negative integer, optional - Precision given to printf. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_scientific - format_float_positional - """) - - -############################################################################## -# -# Documentation for ufunc attributes and methods -# -############################################################################## - - -############################################################################## -# -# ufunc object -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', - """ - Functions that operate element by element on whole arrays. - - To see the documentation for a specific ufunc, use `info`. For - example, ``np.info(np.sin)``. Because ufuncs are written in C - (for speed) and linked into Python with NumPy's ufunc facility, - Python's help() function finds this page whenever help() is called - on a ufunc. - - A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. - - Calling ufuncs: - =============== - - op(*x[, out], where=True, **kwargs) - Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. - - The broadcasting rules are: - - * Dimensions of length 1 may be prepended to either array. - * Arrays may be repeated along dimensions of length 1. - - Parameters - ---------- - *x : array_like - Input arrays. - out : ndarray, None, or tuple of ndarray and None, optional - Alternate array object(s) in which to put the result; if provided, it - must have a shape that the inputs broadcast to. A tuple of arrays - (possible only as a keyword argument) must have length equal to the - number of outputs; use None for uninitialized outputs to be - allocated by the ufunc. - where : array_like, optional - This condition is broadcast over the input. At locations where the - condition is True, the `out` array will be set to the ufunc result. - Elsewhere, the `out` array will retain its original value. - Note that if an uninitialized `out` array is created via the default - ``out=None``, locations within it where the condition is False will - remain uninitialized. - **kwargs - For other keyword-only arguments, see the :ref:`ufunc docs `. - - Returns - ------- - r : ndarray or tuple of ndarray - `r` will have the shape that the arrays in `x` broadcast to; if `out` is - provided, it will be returned. If not, `r` will be allocated and - may contain uninitialized values. If the function has more than one - output, then the result will be a tuple of arrays. - - """) - - -############################################################################## -# -# ufunc attributes -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('identity', - """ - The identity value. - - Data attribute containing the identity element for the ufunc, if it has one. - If it does not, the attribute value is None. - - Examples - -------- - >>> np.add.identity - 0 - >>> np.multiply.identity - 1 - >>> np.power.identity - 1 - >>> print(np.exp.identity) - None - """)) - -add_newdoc('numpy.core', 'ufunc', ('nargs', - """ - The number of arguments. - - Data attribute containing the number of arguments the ufunc takes, including - optional ones. - - Notes - ----- - Typically this value will be one more than what you might expect because all - ufuncs take the optional "out" argument. - - Examples - -------- - >>> np.add.nargs - 3 - >>> np.multiply.nargs - 3 - >>> np.power.nargs - 3 - >>> np.exp.nargs - 2 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nin', - """ - The number of inputs. - - Data attribute containing the number of arguments the ufunc treats as input. - - Examples - -------- - >>> np.add.nin - 2 - >>> np.multiply.nin - 2 - >>> np.power.nin - 2 - >>> np.exp.nin - 1 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nout', - """ - The number of outputs. - - Data attribute containing the number of arguments the ufunc treats as output. - - Notes - ----- - Since all ufuncs can take output arguments, this will always be (at least) 1. - - Examples - -------- - >>> np.add.nout - 1 - >>> np.multiply.nout - 1 - >>> np.power.nout - 1 - >>> np.exp.nout - 1 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('ntypes', - """ - The number of types. - - The number of numerical NumPy types - of which there are 18 total - on which - the ufunc can operate. - - See Also - -------- - numpy.ufunc.types - - Examples - -------- - >>> np.add.ntypes - 18 - >>> np.multiply.ntypes - 18 - >>> np.power.ntypes - 17 - >>> np.exp.ntypes - 7 - >>> np.remainder.ntypes - 14 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('types', - """ - Returns a list with types grouped input->output. - - Data attribute listing the data-type "Domain-Range" groupings the ufunc can - deliver. The data-types are given using the character codes. - - See Also - -------- - numpy.ufunc.ntypes - - Examples - -------- - >>> np.add.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.multiply.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.power.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', - 'OO->O'] - - >>> np.exp.types - ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] - - >>> np.remainder.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] - - """)) - -add_newdoc('numpy.core', 'ufunc', ('signature', - """ - Definition of the core elements a generalized ufunc operates on. - - The signature determines how the dimensions of each input/output array - are split into core and loop dimensions: - - 1. Each dimension in the signature is matched to a dimension of the - corresponding passed-in array, starting from the end of the shape tuple. - 2. Core dimensions assigned to the same label in the signature must have - exactly matching sizes, no broadcasting is performed. - 3. The core dimensions are removed from all inputs and the remaining - dimensions are broadcast together, defining the loop dimensions. - - Notes - ----- - Generalized ufuncs are used internally in many linalg functions, and in - the testing suite; the examples below are taken from these. - For ufuncs that operate on scalars, the signature is None, which is - equivalent to '()' for every argument. - - Examples - -------- - >>> np.core.umath_tests.matrix_multiply.signature - '(m,n),(n,p)->(m,p)' - >>> np.linalg._umath_linalg.det.signature - '(m,m)->()' - >>> np.add.signature is None - True # equivalent to '(),()->()' - """)) - -############################################################################## -# -# ufunc methods -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('reduce', - """ - reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) - - Reduces `a`'s dimension by one, by applying ufunc along one axis. - - Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then - :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = - the result of iterating `j` over :math:`range(N_i)`, cumulatively applying - ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. - For a one-dimensional array, reduce produces results equivalent to: - :: - - r = op.identity # op = ufunc - for i in range(len(A)): - r = op(r, A[i]) - return r - - For example, add.reduce() is equivalent to sum(). - - Parameters - ---------- - a : array_like - The array to act on. - axis : None or int or tuple of ints, optional - Axis or axes along which a reduction is performed. - The default (`axis` = 0) is perform a reduction over the first - dimension of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is None, a reduction is performed over all the axes. - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - - For operations which are either not commutative or not associative, - doing a reduction over multiple axes is not well-defined. The - ufuncs do not currently raise an exception in this case, but will - likely do so in the future. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data-type of the output array if this is provided, or - the data-type of the input array if no output array is provided. - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - .. versionadded:: 1.7.0 - initial : scalar, optional - The value with which to start the reduction. - If the ufunc has no identity or the dtype is object, this defaults - to None - otherwise it defaults to ufunc.identity. - If ``None`` is given, the first element of the reduction is used, - and an error is thrown if the reduction is empty. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - A boolean array which is broadcasted to match the dimensions - of `a`, and selects elements to include in the reduction. Note - that for ufuncs like ``minimum`` that do not have an identity - defined, one has to pass in also ``initial``. - - .. versionadded:: 1.17.0 - - Returns - ------- - r : ndarray - The reduced array. If `out` was supplied, `r` is a reference to it. - - Examples - -------- - >>> np.multiply.reduce([2,3,5]) - 30 - - A multi-dimensional array example: - - >>> X = np.arange(8).reshape((2,2,2)) - >>> X - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> np.add.reduce(X, 0) - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X) # confirm: default axis value is 0 - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X, 1) - array([[ 2, 4], - [10, 12]]) - >>> np.add.reduce(X, 2) - array([[ 1, 5], - [ 9, 13]]) - - You can use the ``initial`` keyword argument to initialize the reduction - with a different value, and ``where`` to select specific elements to include: - - >>> np.add.reduce([10], initial=5) - 15 - >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) - array([14., 14.]) - >>> a = np.array([10., np.nan, 10]) - >>> np.add.reduce(a, where=~np.isnan(a)) - 20.0 - - Allows reductions of empty arrays where they would normally fail, i.e. - for ufuncs without an identity. - - >>> np.minimum.reduce([], initial=np.inf) - inf - >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) - array([ 1., 10.]) - >>> np.minimum.reduce([]) - Traceback (most recent call last): - ... - ValueError: zero-size array to reduction operation minimum which has no identity - """)) - -add_newdoc('numpy.core', 'ufunc', ('accumulate', - """ - accumulate(array, axis=0, dtype=None, out=None) - - Accumulate the result of applying the operator to all elements. - - For a one-dimensional array, accumulate produces results equivalent to:: - - r = np.empty(len(A)) - t = op.identity # op = the ufunc being applied to A's elements - for i in range(len(A)): - t = op(t, A[i]) - r[i] = t - return r - - For example, add.accumulate() is equivalent to np.cumsum(). - - For a multi-dimensional array, accumulate is applied along only one - axis (axis zero by default; see Examples below) so repeated use is - necessary if one wants to accumulate over multiple axes. - - Parameters - ---------- - array : array_like - The array to act on. - axis : int, optional - The axis along which to apply the accumulation; default is zero. - dtype : data-type code, optional - The data-type used to represent the intermediate results. Defaults - to the data-type of the output array if such is provided, or the - the data-type of the input array if no output array is provided. - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - - Returns - ------- - r : ndarray - The accumulated values. If `out` was supplied, `r` is a reference to - `out`. - - Examples - -------- - 1-D array examples: - - >>> np.add.accumulate([2, 3, 5]) - array([ 2, 5, 10]) - >>> np.multiply.accumulate([2, 3, 5]) - array([ 2, 6, 30]) - - 2-D array examples: - - >>> I = np.eye(2) - >>> I - array([[1., 0.], - [0., 1.]]) - - Accumulate along axis 0 (rows), down columns: - - >>> np.add.accumulate(I, 0) - array([[1., 0.], - [1., 1.]]) - >>> np.add.accumulate(I) # no axis specified = axis zero - array([[1., 0.], - [1., 1.]]) - - Accumulate along axis 1 (columns), through rows: - - >>> np.add.accumulate(I, 1) - array([[1., 1.], - [0., 1.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('reduceat', - """ - reduceat(a, indices, axis=0, dtype=None, out=None) - - Performs a (local) reduce with specified slices over a single axis. - - For i in ``range(len(indices))``, `reduceat` computes - ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th - generalized "row" parallel to `axis` in the final result (i.e., in a - 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if - `axis = 1`, it becomes the i-th column). There are three exceptions to this: - - * when ``i = len(indices) - 1`` (so for the last index), - ``indices[i+1] = a.shape[axis]``. - * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is - simply ``a[indices[i]]``. - * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. - - The shape of the output depends on the size of `indices`, and may be - larger than `a` (this happens if ``len(indices) > a.shape[axis]``). - - Parameters - ---------- - a : array_like - The array to act on. - indices : array_like - Paired indices, comma separated (not colon), specifying slices to - reduce. - axis : int, optional - The axis along which to apply the reduceat. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data type of the output array if this is provided, or - the data type of the input array if no output array is provided. - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - - Returns - ------- - r : ndarray - The reduced values. If `out` was supplied, `r` is a reference to - `out`. - - Notes - ----- - A descriptive example: - - If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as - ``ufunc.reduceat(a, indices)[::2]`` where `indices` is - ``range(len(array) - 1)`` with a zero placed - in every other element: - ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. - - Don't be fooled by this attribute's name: `reduceat(a)` is not - necessarily smaller than `a`. - - Examples - -------- - To take the running sum of four successive values: - - >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] - array([ 6, 10, 14, 18]) - - A 2-D example: - - >>> x = np.linspace(0, 15, 16).reshape(4,4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [12., 13., 14., 15.]]) - - :: - - # reduce such that the result has the following five rows: - # [row1 + row2 + row3] - # [row4] - # [row2] - # [row3] - # [row1 + row2 + row3 + row4] - - >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) - array([[12., 15., 18., 21.], - [12., 13., 14., 15.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [24., 28., 32., 36.]]) - - :: - - # reduce such that result has the following two columns: - # [col1 * col2 * col3, col4] - - >>> np.multiply.reduceat(x, [0, 3], 1) - array([[ 0., 3.], - [ 120., 7.], - [ 720., 11.], - [2184., 15.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('outer', - """ - outer(A, B, **kwargs) - - Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. - - Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of - ``op.outer(A, B)`` is an array of dimension M + N such that: - - .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = - op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) - - For `A` and `B` one-dimensional, this is equivalent to:: - - r = empty(len(A),len(B)) - for i in range(len(A)): - for j in range(len(B)): - r[i,j] = op(A[i], B[j]) # op = ufunc in question - - Parameters - ---------- - A : array_like - First array - B : array_like - Second array - kwargs : any - Arguments to pass on to the ufunc. Typically `dtype` or `out`. - - Returns - ------- - r : ndarray - Output array - - See Also - -------- - numpy.outer - - Examples - -------- - >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) - array([[ 4, 5, 6], - [ 8, 10, 12], - [12, 15, 18]]) - - A multi-dimensional example: - - >>> A = np.array([[1, 2, 3], [4, 5, 6]]) - >>> A.shape - (2, 3) - >>> B = np.array([[1, 2, 3, 4]]) - >>> B.shape - (1, 4) - >>> C = np.multiply.outer(A, B) - >>> C.shape; C - (2, 3, 1, 4) - array([[[[ 1, 2, 3, 4]], - [[ 2, 4, 6, 8]], - [[ 3, 6, 9, 12]]], - [[[ 4, 8, 12, 16]], - [[ 5, 10, 15, 20]], - [[ 6, 12, 18, 24]]]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('at', - """ - at(a, indices, b=None) - - Performs unbuffered in place operation on operand 'a' for elements - specified by 'indices'. For addition ufunc, this method is equivalent to - ``a[indices] += b``, except that results are accumulated for elements that - are indexed more than once. For example, ``a[[0,0]] += 1`` will only - increment the first element once because of buffering, whereas - ``add.at(a, [0,0], 1)`` will increment the first element twice. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - The array to perform in place operation on. - indices : array_like or tuple - Array like index object or slice object for indexing into first - operand. If first operand has multiple dimensions, indices can be a - tuple of array like index objects or slice objects. - b : array_like - Second operand for ufuncs requiring two operands. Operand must be - broadcastable over first operand after indexing or slicing. - - Examples - -------- - Set items 0 and 1 to their negative values: - - >>> a = np.array([1, 2, 3, 4]) - >>> np.negative.at(a, [0, 1]) - >>> a - array([-1, -2, 3, 4]) - - Increment items 0 and 1, and increment item 2 twice: - - >>> a = np.array([1, 2, 3, 4]) - >>> np.add.at(a, [0, 1, 2, 2], 1) - >>> a - array([2, 3, 5, 4]) - - Add items 0 and 1 in first array to second array, - and store results in first array: - - >>> a = np.array([1, 2, 3, 4]) - >>> b = np.array([1, 2]) - >>> np.add.at(a, [0, 1], b) - >>> a - array([2, 4, 3, 4]) - - """)) - -############################################################################## -# -# Documentation for dtype attributes and methods -# -############################################################################## - -############################################################################## -# -# dtype object -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', - """ - dtype(obj, align=False, copy=False) - - Create a data type object. - - A numpy array is homogeneous, and contains elements described by a - dtype object. A dtype object can be constructed from different - combinations of fundamental numeric types. - - Parameters - ---------- - obj - Object to be converted to a data type object. - align : bool, optional - Add padding to the fields to match what a C compiler would output - for a similar C-struct. Can be ``True`` only if `obj` is a dictionary - or a comma-separated string. If a struct dtype is being created, - this also sets a sticky alignment flag ``isalignedstruct``. - copy : bool, optional - Make a new copy of the data-type object. If ``False``, the result - may just be a reference to a built-in data-type object. - - See also - -------- - result_type - - Examples - -------- - Using array-scalar type: - - >>> np.dtype(np.int16) - dtype('int16') - - Structured type, one field name 'f1', containing int16: - - >>> np.dtype([('f1', np.int16)]) - dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) - dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) - dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '>> np.dtype("i4, (2,3)f8") - dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) - dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) - - Using dictionaries. Two fields named 'gender' and 'age': - - >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', 'S1'), ('age', 'u1')]) - - Offsets in bytes, here 0 and 25: - - >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', 'S25'), ('age', 'u1')]) - - """) - -############################################################################## -# -# dtype attributes -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', - """ - The required alignment (bytes) of this data-type according to the compiler. - - More information is available in the C-API section of the manual. - - Examples - -------- - - >>> x = np.dtype('i4') - >>> x.alignment - 4 - - >>> x = np.dtype(float) - >>> x.alignment - 8 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', - """ - A character indicating the byte-order of this data-type object. - - One of: - - === ============== - '=' native - '<' little-endian - '>' big-endian - '|' not applicable - === ============== - - All built-in data-type objects have byteorder either '=' or '|'. - - Examples - -------- - - >>> dt = np.dtype('i2') - >>> dt.byteorder - '=' - >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder - '|' - >>> # or ASCII strings - >>> np.dtype('S2').byteorder - '|' - >>> # Even if specific code is given, and it is native - >>> # '=' is the byteorder - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> dt = np.dtype(native_code + 'i2') - >>> dt.byteorder - '=' - >>> # Swapped code shows up as itself - >>> dt = np.dtype(swapped_code + 'i2') - >>> dt.byteorder == swapped_code - True - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('char', - """A unique character code for each of the 21 different built-in types. - - Examples - -------- - - >>> x = np.dtype(float) - >>> x.char - 'd' - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('descr', - """ - `__array_interface__` description of the data-type. - - The format is that required by the 'descr' key in the - `__array_interface__` attribute. - - Warning: This attribute exists specifically for `__array_interface__`, - and passing it directly to `np.dtype` will not accurately reconstruct - some dtypes (e.g., scalar and subarray dtypes). - - Examples - -------- - - >>> x = np.dtype(float) - >>> x.descr - [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.descr - [('name', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> print(dt.fields) - {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('flags', - """ - Bit-flags describing how this data type is to be interpreted. - - Bit-masks are in `numpy.core.multiarray` as the constants - `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, - `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation - of these flags is in C-API documentation; they are largely useful - for user-defined data-types. - - The following example demonstrates that operations on this particular - dtype requires Python C-API. - - Examples - -------- - - >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) - >>> x.flags - 16 - >>> np.core.multiarray.NEEDS_PYAPI - 16 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', - """ - Boolean indicating whether this dtype contains any reference-counted - objects in any fields or sub-dtypes. - - Recall that what is actually in the ndarray memory representing - the Python object is the memory address of that object (a pointer). - Special handling may be required, and this attribute is useful for - distinguishing data types that may contain arbitrary Python objects - and data-types that won't. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', - """ - Integer indicating how this dtype relates to the built-in dtypes. - - Read-only. - - = ======================================================================== - 0 if this is a structured array type, with fields - 1 if this is a dtype compiled into numpy (such as ints, floats etc) - 2 if the dtype is for a user-defined numpy type - A user-defined type uses the numpy C-API machinery to extend - numpy to handle a new array type. See - :ref:`user.user-defined-data-types` in the NumPy manual. - = ======================================================================== - - Examples - -------- - >>> dt = np.dtype('i2') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype('f8') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype([('field1', 'f8')]) - >>> dt.isbuiltin - 0 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', - """ - Boolean indicating whether the byte order of this dtype is native - to the platform. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', - """ - Boolean indicating whether the dtype is a struct which maintains - field alignment. This flag is sticky, so when combining multiple - structs together, it is preserved and produces new dtypes which - are also aligned. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', - """ - The element size of this data-type object. - - For 18 of the 21 types this number is fixed by the data-type. - For the flexible data-types, this number can be anything. - - Examples - -------- - - >>> arr = np.array([[1, 2], [3, 4]]) - >>> arr.dtype - dtype('int64') - >>> arr.itemsize - 8 - - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.itemsize - 80 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('kind', - """ - A character code (one of 'biufcmMOSUV') identifying the general kind of data. - - = ====================== - b boolean - i signed integer - u unsigned integer - f floating-point - c complex floating-point - m timedelta - M datetime - O object - S (byte-)string - U Unicode - V void - = ====================== - - Examples - -------- - - >>> dt = np.dtype('i4') - >>> dt.kind - 'i' - >>> dt = np.dtype('f8') - >>> dt.kind - 'f' - >>> dt = np.dtype([('field1', 'f8')]) - >>> dt.kind - 'V' - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('name', - """ - A bit-width name for this data-type. - - Un-sized flexible data-type objects do not have this attribute. - - Examples - -------- - - >>> x = np.dtype(float) - >>> x.name - 'float64' - >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) - >>> x.name - 'void640' - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('names', - """ - Ordered list of field names, or ``None`` if there are no fields. - - The names are ordered according to increasing byte offset. This can be - used, for example, to walk through all of the named fields in offset order. - - Examples - -------- - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.names - ('name', 'grades') - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('num', - """ - A unique number for each of the 21 different built-in types. - - These are roughly ordered from least-to-most precision. - - Examples - -------- - - >>> dt = np.dtype(str) - >>> dt.num - 19 - - >>> dt = np.dtype(float) - >>> dt.num - 12 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('shape', - """ - Shape tuple of the sub-array if this data type describes a sub-array, - and ``()`` otherwise. - - Examples - -------- - - >>> dt = np.dtype(('i4', 4)) - >>> dt.shape - (4,) - - >>> dt = np.dtype(('i4', (2, 3))) - >>> dt.shape - (2, 3) - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', - """ - Number of dimensions of the sub-array if this data type describes a - sub-array, and ``0`` otherwise. - - .. versionadded:: 1.13.0 - - Examples - -------- - >>> x = np.dtype(float) - >>> x.ndim - 0 - - >>> x = np.dtype((float, 8)) - >>> x.ndim - 1 - - >>> x = np.dtype(('i4', (3, 4))) - >>> x.ndim - 2 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('str', - """The array-protocol typestring of this data-type object.""")) - -add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', - """ - Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and - None otherwise. - - The *shape* is the fixed shape of the sub-array described by this - data type, and *item_dtype* the data type of the array. - - If a field whose dtype object has this attribute is retrieved, - then the extra dimensions implied by *shape* are tacked on to - the end of the retrieved array. - - See Also - -------- - dtype.base - - Examples - -------- - >>> x = numpy.dtype('8f') - >>> x.subdtype - (dtype('float32'), (8,)) - - >>> x = numpy.dtype('i2') - >>> x.subdtype - >>> - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('base', - """ - Returns dtype for the base element of the subarrays, - regardless of their dimension or shape. - - See Also - -------- - dtype.subdtype - - Examples - -------- - >>> x = numpy.dtype('8f') - >>> x.base - dtype('float32') - - >>> x = numpy.dtype('i2') - >>> x.base - dtype('int16') - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('type', - """The type object used to instantiate a scalar of this data-type.""")) - -############################################################################## -# -# dtype methods -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new dtype with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order specifications - below. The default value ('S') results in swapping the current - byte order. `new_order` codes can be any of: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The code does a case-insensitive check on the first letter of - `new_order` for these alternatives. For example, any of '>' - or 'B' or 'b' or 'brian' are valid to specify big-endian. - - Returns - ------- - new_dtype : dtype - New dtype object with the given change to the byte order. - - Notes - ----- - Changes are also made in all fields and sub-arrays of the data type. - - Examples - -------- - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> native_dt = np.dtype(native_code+'i2') - >>> swapped_dt = np.dtype(swapped_code+'i2') - >>> native_dt.newbyteorder('S') == swapped_dt - True - >>> native_dt.newbyteorder() == swapped_dt - True - >>> native_dt == swapped_dt.newbyteorder('S') - True - >>> native_dt == swapped_dt.newbyteorder('=') - True - >>> native_dt == swapped_dt.newbyteorder('N') - True - >>> native_dt == native_dt.newbyteorder('|') - True - >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') - True - >>> np.dtype('>i2') == native_dt.newbyteorder('B') - True - - """)) - - -############################################################################## -# -# Datetime-related Methods -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', - """ - busdaycalendar(weekmask='1111100', holidays=None) - - A business day calendar object that efficiently stores information - defining valid days for the busday family of functions. - - The default valid days are Monday through Friday ("business days"). - A busdaycalendar object can be specified with any set of weekly - valid days, plus an optional "holiday" dates that always will be invalid. - - Once a busdaycalendar object is created, the weekmask and holidays - cannot be modified. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates, no matter which - weekday they fall upon. Holiday dates may be specified in any - order, and NaT (not-a-time) dates are ignored. This list is - saved in a normalized form that is suited for fast calculations - of valid days. - - Returns - ------- - out : busdaycalendar - A business day calendar object containing the specified - weekmask and holidays values. - - See Also - -------- - is_busday : Returns a boolean array indicating valid days. - busday_offset : Applies an offset counted in valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Attributes - ---------- - Note: once a busdaycalendar object is created, you cannot modify the - weekmask or holidays. The attributes return copies of internal data. - weekmask : (copy) seven-element array of bool - holidays : (copy) sorted array of datetime64[D] - - Examples - -------- - >>> # Some important days in July - ... bdd = np.busdaycalendar( - ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) - >>> # Default is Monday to Friday weekdays - ... bdd.weekmask - array([ True, True, True, True, True, False, False]) - >>> # Any holidays already on the weekend are removed - ... bdd.holidays - array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') - """) - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', - """A copy of the seven-element boolean mask indicating valid days.""")) - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', - """A copy of the holiday array indicating additional invalid days.""")) - -add_newdoc('numpy.core.multiarray', 'normalize_axis_index', - """ - normalize_axis_index(axis, ndim, msg_prefix=None) - - Normalizes an axis index, `axis`, such that is a valid positive index into - the shape of array with `ndim` dimensions. Raises an AxisError with an - appropriate message if this is not possible. - - Used internally by all axis-checking logic. - - .. versionadded:: 1.13.0 - - Parameters - ---------- - axis : int - The un-normalized index of the axis. Can be negative - ndim : int - The number of dimensions of the array that `axis` should be normalized - against - msg_prefix : str - A prefix to put before the message, typically the name of the argument - - Returns - ------- - normalized_axis : int - The normalized axis index, such that `0 <= normalized_axis < ndim` - - Raises - ------ - AxisError - If the axis index is invalid, when `-ndim <= axis < ndim` is false. - - Examples - -------- - >>> normalize_axis_index(0, ndim=3) - 0 - >>> normalize_axis_index(1, ndim=3) - 1 - >>> normalize_axis_index(-1, ndim=3) - 2 - - >>> normalize_axis_index(3, ndim=3) - Traceback (most recent call last): - ... - AxisError: axis 3 is out of bounds for array of dimension 3 - >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') - Traceback (most recent call last): - ... - AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 - """) - -add_newdoc('numpy.core.multiarray', 'datetime_data', - """ - datetime_data(dtype, /) - - Get information about the step size of a date or time type. - - The returned tuple can be passed as the second argument of `numpy.datetime64` and - `numpy.timedelta64`. - - Parameters - ---------- - dtype : dtype - The dtype object, which must be a `datetime64` or `timedelta64` type. - - Returns - ------- - unit : str - The :ref:`datetime unit ` on which this dtype - is based. - count : int - The number of base units in a step. - - Examples - -------- - >>> dt_25s = np.dtype('timedelta64[25s]') - >>> np.datetime_data(dt_25s) - ('s', 25) - >>> np.array(10, dt_25s).astype('timedelta64[s]') - array(250, dtype='timedelta64[s]') - - The result can be used to construct a datetime that uses the same units - as a timedelta - - >>> np.datetime64('2010', np.datetime_data(dt_25s)) - numpy.datetime64('2010-01-01T00:00:00','25s') - """) - - -############################################################################## -# -# Documentation for `generic` attributes and methods -# -############################################################################## - -add_newdoc('numpy.core.numerictypes', 'generic', - """ - Base class for numpy scalar types. - - Class from which most (all?) numpy scalar types are derived. For - consistency, exposes the same API as `ndarray`, despite many - consequent attributes being either "get-only," or completely irrelevant. - This is the class from which it is strongly suggested users should derive - custom scalar types. - - """) - -# Attributes - -add_newdoc('numpy.core.numerictypes', 'generic', ('T', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('base', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('data', - """Pointer to start of data.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', - """Get array data-descriptor.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flags', - """The integer value of flags.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flat', - """A 1-D view of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('imag', - """The imaginary part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', - """The length of one element in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', - """The length of the scalar in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', - """The number of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('real', - """The real part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('shape', - """Tuple of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('size', - """The number of elements in the gentype.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('strides', - """Tuple of bytes steps in each dimension.""")) - -# Methods - -add_newdoc('numpy.core.numerictypes', 'generic', ('all', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('any', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('astype', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('choose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('clip', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('compress', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('copy', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dump', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('fill', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('item', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('max', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('mean', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('min', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new `dtype` with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - The `new_order` code can be any from the following: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - Parameters - ---------- - new_order : str, optional - Byte order to force; a value from the byte order specifications - above. The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_dtype : dtype - New `dtype` object with the given change to the byte order. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('prod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('put', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('resize', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('round', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('std', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('take', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('trace', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('var', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('view', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - - -############################################################################## -# -# Documentation for scalar type abstract base classes in type hierarchy -# -############################################################################## - - -add_newdoc('numpy.core.numerictypes', 'number', - """ - Abstract base class of all numeric scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'integer', - """ - Abstract base class of all integer scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'signedinteger', - """ - Abstract base class of all signed integer scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'unsignedinteger', - """ - Abstract base class of all unsigned integer scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'inexact', - """ - Abstract base class of all numeric scalar types with a (potentially) - inexact representation of the values in its range, such as - floating-point numbers. - - """) - -add_newdoc('numpy.core.numerictypes', 'floating', - """ - Abstract base class of all floating-point scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'complexfloating', - """ - Abstract base class of all complex number scalar types that are made up of - floating-point numbers. - - """) - -add_newdoc('numpy.core.numerictypes', 'flexible', - """ - Abstract base class of all scalar types without predefined length. - The actual size of these types depends on the specific `np.dtype` - instantiation. - - """) - -add_newdoc('numpy.core.numerictypes', 'character', - """ - Abstract base class of all character string scalar types. - - """) - - -############################################################################## -# -# Documentation for concrete scalar classes -# -############################################################################## - -def numeric_type_aliases(aliases): - def type_aliases_gen(): - for alias, doc in aliases: - try: - alias_type = getattr(_numerictypes, alias) - except AttributeError: - # The set of aliases that actually exist varies between platforms - pass - else: - yield (alias_type, alias, doc) - return list(type_aliases_gen()) - - -possible_aliases = numeric_type_aliases([ - ('int8', '8-bit signed integer (-128 to 127)'), - ('int16', '16-bit signed integer (-32768 to 32767)'), - ('int32', '32-bit signed integer (-2147483648 to 2147483647)'), - ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'), - ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), - ('uint8', '8-bit unsigned integer (0 to 255)'), - ('uint16', '16-bit unsigned integer (0 to 65535)'), - ('uint32', '32-bit unsigned integer (0 to 4294967295)'), - ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'), - ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), - ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), - ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), - ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), - ('float96', '96-bit extended-precision floating-point number type'), - ('float128', '128-bit extended-precision floating-point number type'), - ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), - ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), - ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), - ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), - ]) - - -def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): - o = getattr(_numerictypes, obj) - - character_code = dtype(o).char - canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj) - alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases) - alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc) - for (alias_type, alias, doc) in possible_aliases if alias_type is o) - - docstring = """ - {doc} - Character code: ``'{character_code}'``. - {canonical_name_doc}{alias_doc} - """.format(doc=doc.strip(), character_code=character_code, - canonical_name_doc=canonical_name_doc, alias_doc=alias_doc) - - add_newdoc('numpy.core.numerictypes', obj, docstring) - - -add_newdoc_for_scalar_type('bool_', ['bool8'], - """ - Boolean type (True or False), stored as a byte. - """) - -add_newdoc_for_scalar_type('byte', [], - """ - Signed integer type, compatible with C ``char``. - """) - -add_newdoc_for_scalar_type('short', [], - """ - Signed integer type, compatible with C ``short``. - """) - -add_newdoc_for_scalar_type('intc', [], - """ - Signed integer type, compatible with C ``int``. - """) - -add_newdoc_for_scalar_type('int_', [], - """ - Signed integer type, compatible with Python `int` anc C ``long``. - """) - -add_newdoc_for_scalar_type('longlong', [], - """ - Signed integer type, compatible with C ``long long``. - """) - -add_newdoc_for_scalar_type('ubyte', [], - """ - Unsigned integer type, compatible with C ``unsigned char``. - """) - -add_newdoc_for_scalar_type('ushort', [], - """ - Unsigned integer type, compatible with C ``unsigned short``. - """) - -add_newdoc_for_scalar_type('uintc', [], - """ - Unsigned integer type, compatible with C ``unsigned int``. - """) - -add_newdoc_for_scalar_type('uint', [], - """ - Unsigned integer type, compatible with C ``unsigned long``. - """) - -add_newdoc_for_scalar_type('ulonglong', [], - """ - Signed integer type, compatible with C ``unsigned long long``. - """) - -add_newdoc_for_scalar_type('half', [], - """ - Half-precision floating-point number type. - """) - -add_newdoc_for_scalar_type('single', [], - """ - Single-precision floating-point number type, compatible with C ``float``. - """) - -add_newdoc_for_scalar_type('double', ['float_'], - """ - Double-precision floating-point number type, compatible with Python `float` - and C ``double``. - """) - -add_newdoc_for_scalar_type('longdouble', ['longfloat'], - """ - Extended-precision floating-point number type, compatible with C - ``long double`` but not necessarily with IEEE 754 quadruple-precision. - """) - -add_newdoc_for_scalar_type('csingle', ['singlecomplex'], - """ - Complex number type composed of two single-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], - """ - Complex number type composed of two double-precision floating-point - numbers, compatible with Python `complex`. - """) - -add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], - """ - Complex number type composed of two extended-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('object_', [], - """ - Any Python object. - """) - -# TODO: work out how to put this on the base class, np.floating -for float_name in ('half', 'single', 'double', 'longdouble'): - add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio', - """ - {ftype}.as_integer_ratio() -> (int, int) - - Return a pair of integers, whose ratio is exactly equal to the original - floating point number, and with a positive denominator. - Raise OverflowError on infinities and a ValueError on NaNs. - - >>> np.{ftype}(10.0).as_integer_ratio() - (10, 1) - >>> np.{ftype}(0.0).as_integer_ratio() - (0, 1) - >>> np.{ftype}(-.25).as_integer_ratio() - (-1, 4) - """.format(ftype=float_name))) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/_asarray.py b/venv/lib/python3.7/site-packages/numpy/core/_asarray.py deleted file mode 100644 index 0ad4161..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_asarray.py +++ /dev/null @@ -1,324 +0,0 @@ -""" -Functions in the ``as*array`` family that promote array-likes into arrays. - -`require` fits this category despite its name not matching this pattern. -""" -from __future__ import division, absolute_import, print_function - -from .overrides import set_module -from .multiarray import array - - -__all__ = [ - "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require", -] - -@set_module('numpy') -def asarray(a, dtype=None, order=None): - """Convert the input to an array. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or - column-major (Fortran-style) memory representation. - Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray with matching dtype and order. If `a` is a - subclass of ndarray, a base class ndarray is returned. - - See Also - -------- - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asarray(a) - array([1, 2]) - - Existing arrays are not copied: - - >>> a = np.array([1, 2]) - >>> np.asarray(a) is a - True - - If `dtype` is set, array is copied only if dtype does not match: - - >>> a = np.array([1, 2], dtype=np.float32) - >>> np.asarray(a, dtype=np.float32) is a - True - >>> np.asarray(a, dtype=np.float64) is a - False - - Contrary to `asanyarray`, ndarray subclasses are not passed through: - - >>> issubclass(np.recarray, np.ndarray) - True - >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) - >>> np.asarray(a) is a - False - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order) - - -@set_module('numpy') -def asanyarray(a, dtype=None, order=None): - """Convert the input to an ndarray, but pass ndarray subclasses through. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes scalars, lists, lists of tuples, tuples, tuples of tuples, - tuples of lists, and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or column-major - (Fortran-style) memory representation. Defaults to 'C'. - - Returns - ------- - out : ndarray or an ndarray subclass - Array interpretation of `a`. If `a` is an ndarray or a subclass - of ndarray, it is returned as-is and no copy is performed. - - See Also - -------- - asarray : Similar function which always returns ndarrays. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asanyarray(a) - array([1, 2]) - - Instances of `ndarray` subclasses are passed through as-is: - - >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order, subok=True) - - -@set_module('numpy') -def ascontiguousarray(a, dtype=None): - """ - Return a contiguous array (ndim >= 1) in memory (C order). - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - Data-type of returned array. - - Returns - ------- - out : ndarray - Contiguous array of same shape and content as `a`, with type `dtype` - if specified. - - See Also - -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> np.ascontiguousarray(x, dtype=np.float32) - array([[0., 1., 2.], - [3., 4., 5.]], dtype=float32) - >>> x.flags['C_CONTIGUOUS'] - True - - Note: This function returns an array with at least one-dimension (1-d) - so it will not preserve 0-d arrays. - - """ - return array(a, dtype, copy=False, order='C', ndmin=1) - - -@set_module('numpy') -def asfortranarray(a, dtype=None): - """ - Return an array (ndim >= 1) laid out in Fortran order in memory. - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - By default, the data-type is inferred from the input data. - - Returns - ------- - out : ndarray - The input `a` in Fortran, or column-major, order. - - See Also - -------- - ascontiguousarray : Convert input to a contiguous (C order) array. - asanyarray : Convert input to an ndarray with either row or - column-major memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> y = np.asfortranarray(x) - >>> x.flags['F_CONTIGUOUS'] - False - >>> y.flags['F_CONTIGUOUS'] - True - - Note: This function returns an array with at least one-dimension (1-d) - so it will not preserve 0-d arrays. - - """ - return array(a, dtype, copy=False, order='F', ndmin=1) - - -@set_module('numpy') -def require(a, dtype=None, requirements=None): - """ - Return an ndarray of the provided type that satisfies requirements. - - This function is useful to be sure that an array with the correct flags - is returned for passing to compiled code (perhaps through ctypes). - - Parameters - ---------- - a : array_like - The object to be converted to a type-and-requirement-satisfying array. - dtype : data-type - The required data-type. If None preserve the current dtype. If your - application requires the data to be in native byteorder, include - a byteorder specification as a part of the dtype specification. - requirements : str or list of str - The requirements list can be any of the following - - * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array - * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array - * 'ALIGNED' ('A') - ensure a data-type aligned array - * 'WRITEABLE' ('W') - ensure a writable array - * 'OWNDATA' ('O') - ensure an array that owns its own data - * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass - - Returns - ------- - out : ndarray - Array with specified requirements and type if given. - - See Also - -------- - asarray : Convert input to an ndarray. - asanyarray : Convert to an ndarray, but pass through ndarray subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - ndarray.flags : Information about the memory layout of the array. - - Notes - ----- - The returned array will be guaranteed to have the listed requirements - by making a copy if needed. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : False - WRITEABLE : True - ALIGNED : True - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - - >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) - >>> y.flags - C_CONTIGUOUS : False - F_CONTIGUOUS : True - OWNDATA : True - WRITEABLE : True - ALIGNED : True - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - - """ - possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', - 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', - 'A': 'A', 'ALIGNED': 'A', - 'W': 'W', 'WRITEABLE': 'W', - 'O': 'O', 'OWNDATA': 'O', - 'E': 'E', 'ENSUREARRAY': 'E'} - if not requirements: - return asanyarray(a, dtype=dtype) - else: - requirements = {possible_flags[x.upper()] for x in requirements} - - if 'E' in requirements: - requirements.remove('E') - subok = False - else: - subok = True - - order = 'A' - if requirements >= {'C', 'F'}: - raise ValueError('Cannot specify both "C" and "F" order') - elif 'F' in requirements: - order = 'F' - requirements.remove('F') - elif 'C' in requirements: - order = 'C' - requirements.remove('C') - - arr = array(a, dtype=dtype, order=order, copy=False, subok=subok) - - for prop in requirements: - if not arr.flags[prop]: - arr = arr.copy(order) - break - return arr diff --git a/venv/lib/python3.7/site-packages/numpy/core/_dtype.py b/venv/lib/python3.7/site-packages/numpy/core/_dtype.py deleted file mode 100644 index df1ff18..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_dtype.py +++ /dev/null @@ -1,354 +0,0 @@ -""" -A place for code to be called from the implementation of np.dtype - -String handling is much easier to do correctly in python. -""" -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np - - -_kind_to_stem = { - 'u': 'uint', - 'i': 'int', - 'c': 'complex', - 'f': 'float', - 'b': 'bool', - 'V': 'void', - 'O': 'object', - 'M': 'datetime', - 'm': 'timedelta' -} -if sys.version_info[0] >= 3: - _kind_to_stem.update({ - 'S': 'bytes', - 'U': 'str' - }) -else: - _kind_to_stem.update({ - 'S': 'string', - 'U': 'unicode' - }) - - -def _kind_name(dtype): - try: - return _kind_to_stem[dtype.kind] - except KeyError: - raise RuntimeError( - "internal dtype error, unknown kind {!r}" - .format(dtype.kind) - ) - - -def __str__(dtype): - if dtype.fields is not None: - return _struct_str(dtype, include_align=True) - elif dtype.subdtype: - return _subarray_str(dtype) - elif issubclass(dtype.type, np.flexible) or not dtype.isnative: - return dtype.str - else: - return dtype.name - - -def __repr__(dtype): - arg_str = _construction_repr(dtype, include_align=False) - if dtype.isalignedstruct: - arg_str = arg_str + ", align=True" - return "dtype({})".format(arg_str) - - -def _unpack_field(dtype, offset, title=None): - """ - Helper function to normalize the items in dtype.fields. - - Call as: - - dtype, offset, title = _unpack_field(*dtype.fields[name]) - """ - return dtype, offset, title - - -def _isunsized(dtype): - # PyDataType_ISUNSIZED - return dtype.itemsize == 0 - - -def _construction_repr(dtype, include_align=False, short=False): - """ - Creates a string repr of the dtype, excluding the 'dtype()' part - surrounding the object. This object may be a string, a list, or - a dict depending on the nature of the dtype. This - is the object passed as the first parameter to the dtype - constructor, and if no additional constructor parameters are - given, will reproduce the exact memory layout. - - Parameters - ---------- - short : bool - If true, this creates a shorter repr using 'kind' and 'itemsize', instead - of the longer type name. - - include_align : bool - If true, this includes the 'align=True' parameter - inside the struct dtype construction dict when needed. Use this flag - if you want a proper repr string without the 'dtype()' part around it. - - If false, this does not preserve the - 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for - struct arrays like the regular repr does, because the 'align' - flag is not part of first dtype constructor parameter. This - mode is intended for a full 'repr', where the 'align=True' is - provided as the second parameter. - """ - if dtype.fields is not None: - return _struct_str(dtype, include_align=include_align) - elif dtype.subdtype: - return _subarray_str(dtype) - else: - return _scalar_str(dtype, short=short) - - -def _scalar_str(dtype, short): - byteorder = _byte_order_str(dtype) - - if dtype.type == np.bool_: - if short: - return "'?'" - else: - return "'bool'" - - elif dtype.type == np.object_: - # The object reference may be different sizes on different - # platforms, so it should never include the itemsize here. - return "'O'" - - elif dtype.type == np.string_: - if _isunsized(dtype): - return "'S'" - else: - return "'S%d'" % dtype.itemsize - - elif dtype.type == np.unicode_: - if _isunsized(dtype): - return "'%sU'" % byteorder - else: - return "'%sU%d'" % (byteorder, dtype.itemsize / 4) - - # unlike the other types, subclasses of void are preserved - but - # historically the repr does not actually reveal the subclass - elif issubclass(dtype.type, np.void): - if _isunsized(dtype): - return "'V'" - else: - return "'V%d'" % dtype.itemsize - - elif dtype.type == np.datetime64: - return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) - - elif dtype.type == np.timedelta64: - return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) - - elif np.issubdtype(dtype, np.number): - # Short repr with endianness, like '' """ - # hack to obtain the native and swapped byte order characters - swapped = np.dtype(int).newbyteorder('s') - native = swapped.newbyteorder('s') - - byteorder = dtype.byteorder - if byteorder == '=': - return native.byteorder - if byteorder == 's': - # TODO: this path can never be reached - return swapped.byteorder - elif byteorder == '|': - return '' - else: - return byteorder - - -def _datetime_metadata_str(dtype): - # TODO: this duplicates the C append_metastr_to_string - unit, count = np.datetime_data(dtype) - if unit == 'generic': - return '' - elif count == 1: - return '[{}]'.format(unit) - else: - return '[{}{}]'.format(count, unit) - - -def _struct_dict_str(dtype, includealignedflag): - # unpack the fields dictionary into ls - names = dtype.names - fld_dtypes = [] - offsets = [] - titles = [] - for name in names: - fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) - fld_dtypes.append(fld_dtype) - offsets.append(offset) - titles.append(title) - - # Build up a string to make the dictionary - - # First, the names - ret = "{'names':[" - ret += ",".join(repr(name) for name in names) - - # Second, the formats - ret += "], 'formats':[" - ret += ",".join( - _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) - - # Third, the offsets - ret += "], 'offsets':[" - ret += ",".join("%d" % offset for offset in offsets) - - # Fourth, the titles - if any(title is not None for title in titles): - ret += "], 'titles':[" - ret += ",".join(repr(title) for title in titles) - - # Fifth, the itemsize - ret += "], 'itemsize':%d" % dtype.itemsize - - if (includealignedflag and dtype.isalignedstruct): - # Finally, the aligned flag - ret += ", 'aligned':True}" - else: - ret += "}" - - return ret - - -def _is_packed(dtype): - """ - Checks whether the structured data type in 'dtype' - has a simple layout, where all the fields are in order, - and follow each other with no alignment padding. - - When this returns true, the dtype can be reconstructed - from a list of the field names and dtypes with no additional - dtype parameters. - - Duplicates the C `is_dtype_struct_simple_unaligned_layout` function. - """ - total_offset = 0 - for name in dtype.names: - fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) - if fld_offset != total_offset: - return False - total_offset += fld_dtype.itemsize - if total_offset != dtype.itemsize: - return False - return True - - -def _struct_list_str(dtype): - items = [] - for name in dtype.names: - fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) - - item = "(" - if title is not None: - item += "({!r}, {!r}), ".format(title, name) - else: - item += "{!r}, ".format(name) - # Special case subarray handling here - if fld_dtype.subdtype is not None: - base, shape = fld_dtype.subdtype - item += "{}, {}".format( - _construction_repr(base, short=True), - shape - ) - else: - item += _construction_repr(fld_dtype, short=True) - - item += ")" - items.append(item) - - return "[" + ", ".join(items) + "]" - - -def _struct_str(dtype, include_align): - # The list str representation can't include the 'align=' flag, - # so if it is requested and the struct has the aligned flag set, - # we must use the dict str instead. - if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): - sub = _struct_list_str(dtype) - - else: - sub = _struct_dict_str(dtype, include_align) - - # If the data type isn't the default, void, show it - if dtype.type != np.void: - return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub) - else: - return sub - - -def _subarray_str(dtype): - base, shape = dtype.subdtype - return "({}, {})".format( - _construction_repr(base, short=True), - shape - ) - - -def _name_includes_bit_suffix(dtype): - if dtype.type == np.object_: - # pointer size varies by system, best to omit it - return False - elif dtype.type == np.bool_: - # implied - return False - elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): - # unspecified - return False - else: - return True - - -def _name_get(dtype): - # provides dtype.name.__get__, documented as returning a "bit name" - - if dtype.isbuiltin == 2: - # user dtypes don't promise to do anything special - return dtype.type.__name__ - - if issubclass(dtype.type, np.void): - # historically, void subclasses preserve their name, eg `record64` - name = dtype.type.__name__ - else: - name = _kind_name(dtype) - - # append bit counts - if _name_includes_bit_suffix(dtype): - name += "{}".format(dtype.itemsize * 8) - - # append metadata to datetimes - if dtype.type in (np.datetime64, np.timedelta64): - name += _datetime_metadata_str(dtype) - - return name diff --git a/venv/lib/python3.7/site-packages/numpy/core/_dtype_ctypes.py b/venv/lib/python3.7/site-packages/numpy/core/_dtype_ctypes.py deleted file mode 100644 index 7082412..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_dtype_ctypes.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -Conversion from ctypes to dtype. - -In an ideal world, we could achieve this through the PEP3118 buffer protocol, -something like:: - - def dtype_from_ctypes_type(t): - # needed to ensure that the shape of `t` is within memoryview.format - class DummyStruct(ctypes.Structure): - _fields_ = [('a', t)] - - # empty to avoid memory allocation - ctype_0 = (DummyStruct * 0)() - mv = memoryview(ctype_0) - - # convert the struct, and slice back out the field - return _dtype_from_pep3118(mv.format)['a'] - -Unfortunately, this fails because: - -* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) -* PEP3118 cannot represent unions, but both numpy and ctypes can -* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) -""" -import _ctypes -import ctypes - -import numpy as np - - -def _from_ctypes_array(t): - return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) - - -def _from_ctypes_structure(t): - for item in t._fields_: - if len(item) > 2: - raise TypeError( - "ctypes bitfields have no dtype equivalent") - - if hasattr(t, "_pack_"): - formats = [] - offsets = [] - names = [] - current_offset = 0 - for fname, ftyp in t._fields_: - names.append(fname) - formats.append(dtype_from_ctypes_type(ftyp)) - # Each type has a default offset, this is platform dependent for some types. - effective_pack = min(t._pack_, ctypes.alignment(ftyp)) - current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack - offsets.append(current_offset) - current_offset += ctypes.sizeof(ftyp) - - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) - else: - fields = [] - for fname, ftyp in t._fields_: - fields.append((fname, dtype_from_ctypes_type(ftyp))) - - # by default, ctypes structs are aligned - return np.dtype(fields, align=True) - - -def _from_ctypes_scalar(t): - """ - Return the dtype type with endianness included if it's the case - """ - if getattr(t, '__ctype_be__', None) is t: - return np.dtype('>' + t._type_) - elif getattr(t, '__ctype_le__', None) is t: - return np.dtype('<' + t._type_) - else: - return np.dtype(t._type_) - - -def _from_ctypes_union(t): - formats = [] - offsets = [] - names = [] - for fname, ftyp in t._fields_: - names.append(fname) - formats.append(dtype_from_ctypes_type(ftyp)) - offsets.append(0) # Union fields are offset to 0 - - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) - - -def dtype_from_ctypes_type(t): - """ - Construct a dtype object from a ctypes type - """ - if issubclass(t, _ctypes.Array): - return _from_ctypes_array(t) - elif issubclass(t, _ctypes._Pointer): - raise TypeError("ctypes pointers have no dtype equivalent") - elif issubclass(t, _ctypes.Structure): - return _from_ctypes_structure(t) - elif issubclass(t, _ctypes.Union): - return _from_ctypes_union(t) - elif isinstance(getattr(t, '_type_', None), str): - return _from_ctypes_scalar(t) - else: - raise NotImplementedError( - "Unknown ctypes type {}".format(t.__name__)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/_exceptions.py b/venv/lib/python3.7/site-packages/numpy/core/_exceptions.py deleted file mode 100644 index 88a4556..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_exceptions.py +++ /dev/null @@ -1,200 +0,0 @@ -""" -Various richly-typed exceptions, that also help us deal with string formatting -in python where it's easier. - -By putting the formatting in `__str__`, we also avoid paying the cost for -users who silence the exceptions. -""" -from numpy.core.overrides import set_module - -def _unpack_tuple(tup): - if len(tup) == 1: - return tup[0] - else: - return tup - - -def _display_as_base(cls): - """ - A decorator that makes an exception class look like its base. - - We use this to hide subclasses that are implementation details - the user - should catch the base type, which is what the traceback will show them. - - Classes decorated with this decorator are subject to removal without a - deprecation warning. - """ - assert issubclass(cls, Exception) - cls.__name__ = cls.__base__.__name__ - cls.__qualname__ = cls.__base__.__qualname__ - set_module(cls.__base__.__module__)(cls) - return cls - - -class UFuncTypeError(TypeError): - """ Base class for all ufunc exceptions """ - def __init__(self, ufunc): - self.ufunc = ufunc - - -@_display_as_base -class _UFuncBinaryResolutionError(UFuncTypeError): - """ Thrown when a binary resolution fails """ - def __init__(self, ufunc, dtypes): - super().__init__(ufunc) - self.dtypes = tuple(dtypes) - assert len(self.dtypes) == 2 - - def __str__(self): - return ( - "ufunc {!r} cannot use operands with types {!r} and {!r}" - ).format( - self.ufunc.__name__, *self.dtypes - ) - - -@_display_as_base -class _UFuncNoLoopError(UFuncTypeError): - """ Thrown when a ufunc loop cannot be found """ - def __init__(self, ufunc, dtypes): - super().__init__(ufunc) - self.dtypes = tuple(dtypes) - - def __str__(self): - return ( - "ufunc {!r} did not contain a loop with signature matching types " - "{!r} -> {!r}" - ).format( - self.ufunc.__name__, - _unpack_tuple(self.dtypes[:self.ufunc.nin]), - _unpack_tuple(self.dtypes[self.ufunc.nin:]) - ) - - -@_display_as_base -class _UFuncCastingError(UFuncTypeError): - def __init__(self, ufunc, casting, from_, to): - super().__init__(ufunc) - self.casting = casting - self.from_ = from_ - self.to = to - - -@_display_as_base -class _UFuncInputCastingError(_UFuncCastingError): - """ Thrown when a ufunc input cannot be casted """ - def __init__(self, ufunc, casting, from_, to, i): - super().__init__(ufunc, casting, from_, to) - self.in_i = i - - def __str__(self): - # only show the number if more than one input exists - i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else "" - return ( - "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting - ) - - -@_display_as_base -class _UFuncOutputCastingError(_UFuncCastingError): - """ Thrown when a ufunc output cannot be casted """ - def __init__(self, ufunc, casting, from_, to, i): - super().__init__(ufunc, casting, from_, to) - self.out_i = i - - def __str__(self): - # only show the number if more than one output exists - i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else "" - return ( - "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting - ) - - -# Exception used in shares_memory() -@set_module('numpy') -class TooHardError(RuntimeError): - pass - - -@set_module('numpy') -class AxisError(ValueError, IndexError): - """ Axis supplied was invalid. """ - def __init__(self, axis, ndim=None, msg_prefix=None): - # single-argument form just delegates to base class - if ndim is None and msg_prefix is None: - msg = axis - - # do the string formatting here, to save work in the C code - else: - msg = ("axis {} is out of bounds for array of dimension {}" - .format(axis, ndim)) - if msg_prefix is not None: - msg = "{}: {}".format(msg_prefix, msg) - - super(AxisError, self).__init__(msg) - - -@_display_as_base -class _ArrayMemoryError(MemoryError): - """ Thrown when an array cannot be allocated""" - def __init__(self, shape, dtype): - self.shape = shape - self.dtype = dtype - - @property - def _total_size(self): - num_bytes = self.dtype.itemsize - for dim in self.shape: - num_bytes *= dim - return num_bytes - - @staticmethod - def _size_to_string(num_bytes): - """ Convert a number of bytes into a binary size string """ - import math - - # https://en.wikipedia.org/wiki/Binary_prefix - LOG2_STEP = 10 - STEP = 1024 - units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] - - unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP - unit_val = 1 << (unit_i * LOG2_STEP) - n_units = num_bytes / unit_val - del unit_val - - # ensure we pick a unit that is correct after rounding - if round(n_units) == STEP: - unit_i += 1 - n_units /= STEP - - # deal with sizes so large that we don't have units for them - if unit_i >= len(units): - new_unit_i = len(units) - 1 - n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) - unit_i = new_unit_i - - unit_name = units[unit_i] - # format with a sensible number of digits - if unit_i == 0: - # no decimal point on bytes - return '{:.0f} {}'.format(n_units, unit_name) - elif round(n_units) < 1000: - # 3 significant figures, if none are dropped to the left of the . - return '{:#.3g} {}'.format(n_units, unit_name) - else: - # just give all the digits otherwise - return '{:#.0f} {}'.format(n_units, unit_name) - - def __str__(self): - size_str = self._size_to_string(self._total_size) - return ( - "Unable to allocate {} for an array with shape {} and data type {}" - .format(size_str, self.shape, self.dtype) - ) diff --git a/venv/lib/python3.7/site-packages/numpy/core/_internal.py b/venv/lib/python3.7/site-packages/numpy/core/_internal.py deleted file mode 100644 index 05e401e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_internal.py +++ /dev/null @@ -1,877 +0,0 @@ -""" -A place for internal code - -Some things are more easily handled Python. - -""" -from __future__ import division, absolute_import, print_function - -import re -import sys -import platform - -from numpy.compat import unicode -from .multiarray import dtype, array, ndarray -try: - import ctypes -except ImportError: - ctypes = None - -IS_PYPY = platform.python_implementation() == 'PyPy' - -if (sys.byteorder == 'little'): - _nbo = b'<' -else: - _nbo = b'>' - -def _makenames_list(adict, align): - allfields = [] - fnames = list(adict.keys()) - for fname in fnames: - obj = adict[fname] - n = len(obj) - if not isinstance(obj, tuple) or n not in [2, 3]: - raise ValueError("entry not a 2- or 3- tuple") - if (n > 2) and (obj[2] == fname): - continue - num = int(obj[1]) - if (num < 0): - raise ValueError("invalid offset.") - format = dtype(obj[0], align=align) - if (n > 2): - title = obj[2] - else: - title = None - allfields.append((fname, format, num, title)) - # sort by offsets - allfields.sort(key=lambda x: x[2]) - names = [x[0] for x in allfields] - formats = [x[1] for x in allfields] - offsets = [x[2] for x in allfields] - titles = [x[3] for x in allfields] - - return names, formats, offsets, titles - -# Called in PyArray_DescrConverter function when -# a dictionary without "names" and "formats" -# fields is used as a data-type descriptor. -def _usefields(adict, align): - try: - names = adict[-1] - except KeyError: - names = None - if names is None: - names, formats, offsets, titles = _makenames_list(adict, align) - else: - formats = [] - offsets = [] - titles = [] - for name in names: - res = adict[name] - formats.append(res[0]) - offsets.append(res[1]) - if (len(res) > 2): - titles.append(res[2]) - else: - titles.append(None) - - return dtype({"names": names, - "formats": formats, - "offsets": offsets, - "titles": titles}, align) - - -# construct an array_protocol descriptor list -# from the fields attribute of a descriptor -# This calls itself recursively but should eventually hit -# a descriptor that has no fields and then return -# a simple typestring - -def _array_descr(descriptor): - fields = descriptor.fields - if fields is None: - subdtype = descriptor.subdtype - if subdtype is None: - if descriptor.metadata is None: - return descriptor.str - else: - new = descriptor.metadata.copy() - if new: - return (descriptor.str, new) - else: - return descriptor.str - else: - return (_array_descr(subdtype[0]), subdtype[1]) - - names = descriptor.names - ordered_fields = [fields[x] + (x,) for x in names] - result = [] - offset = 0 - for field in ordered_fields: - if field[1] > offset: - num = field[1] - offset - result.append(('', '|V%d' % num)) - offset += num - elif field[1] < offset: - raise ValueError( - "dtype.descr is not defined for types with overlapping or " - "out-of-order fields") - if len(field) > 3: - name = (field[2], field[3]) - else: - name = field[2] - if field[0].subdtype: - tup = (name, _array_descr(field[0].subdtype[0]), - field[0].subdtype[1]) - else: - tup = (name, _array_descr(field[0])) - offset += field[0].itemsize - result.append(tup) - - if descriptor.itemsize > offset: - num = descriptor.itemsize - offset - result.append(('', '|V%d' % num)) - - return result - -# Build a new array from the information in a pickle. -# Note that the name numpy.core._internal._reconstruct is embedded in -# pickles of ndarrays made with NumPy before release 1.0 -# so don't remove the name here, or you'll -# break backward compatibility. -def _reconstruct(subtype, shape, dtype): - return ndarray.__new__(subtype, shape, dtype) - - -# format_re was originally from numarray by J. Todd Miller - -format_re = re.compile(br'(?P[<>|=]?)' - br'(?P *[(]?[ ,0-9]*[)]? *)' - br'(?P[<>|=]?)' - br'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') -sep_re = re.compile(br'\s*,\s*') -space_re = re.compile(br'\s+$') - -# astr is a string (perhaps comma separated) - -_convorder = {b'=': _nbo} - -def _commastring(astr): - startindex = 0 - result = [] - while startindex < len(astr): - mo = format_re.match(astr, pos=startindex) - try: - (order1, repeats, order2, dtype) = mo.groups() - except (TypeError, AttributeError): - raise ValueError('format number %d of "%s" is not recognized' % - (len(result)+1, astr)) - startindex = mo.end() - # Separator or ending padding - if startindex < len(astr): - if space_re.match(astr, pos=startindex): - startindex = len(astr) - else: - mo = sep_re.match(astr, pos=startindex) - if not mo: - raise ValueError( - 'format number %d of "%s" is not recognized' % - (len(result)+1, astr)) - startindex = mo.end() - - if order2 == b'': - order = order1 - elif order1 == b'': - order = order2 - else: - order1 = _convorder.get(order1, order1) - order2 = _convorder.get(order2, order2) - if (order1 != order2): - raise ValueError( - 'inconsistent byte-order specification %s and %s' % - (order1, order2)) - order = order1 - - if order in [b'|', b'=', _nbo]: - order = b'' - dtype = order + dtype - if (repeats == b''): - newitem = dtype - else: - newitem = (dtype, eval(repeats)) - result.append(newitem) - - return result - -class dummy_ctype(object): - def __init__(self, cls): - self._cls = cls - def __mul__(self, other): - return self - def __call__(self, *other): - return self._cls(other) - def __eq__(self, other): - return self._cls == other._cls - def __ne__(self, other): - return self._cls != other._cls - -def _getintp_ctype(): - val = _getintp_ctype.cache - if val is not None: - return val - if ctypes is None: - import numpy as np - val = dummy_ctype(np.intp) - else: - char = dtype('p').char - if (char == 'i'): - val = ctypes.c_int - elif char == 'l': - val = ctypes.c_long - elif char == 'q': - val = ctypes.c_longlong - else: - val = ctypes.c_long - _getintp_ctype.cache = val - return val -_getintp_ctype.cache = None - -# Used for .ctypes attribute of ndarray - -class _missing_ctypes(object): - def cast(self, num, obj): - return num.value - - class c_void_p(object): - def __init__(self, ptr): - self.value = ptr - - -class _ctypes(object): - def __init__(self, array, ptr=None): - self._arr = array - - if ctypes: - self._ctypes = ctypes - self._data = self._ctypes.c_void_p(ptr) - else: - # fake a pointer-like object that holds onto the reference - self._ctypes = _missing_ctypes() - self._data = self._ctypes.c_void_p(ptr) - self._data._objects = array - - if self._arr.ndim == 0: - self._zerod = True - else: - self._zerod = False - - def data_as(self, obj): - """ - Return the data pointer cast to a particular c-types object. - For example, calling ``self._as_parameter_`` is equivalent to - ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a - pointer to a ctypes array of floating-point data: - ``self.data_as(ctypes.POINTER(ctypes.c_double))``. - - The returned pointer will keep a reference to the array. - """ - # _ctypes.cast function causes a circular reference of self._data in - # self._data._objects. Attributes of self._data cannot be released - # until gc.collect is called. Make a copy of the pointer first then let - # it hold the array reference. This is a workaround to circumvent the - # CPython bug https://bugs.python.org/issue12836 - ptr = self._ctypes.cast(self._data, obj) - ptr._arr = self._arr - return ptr - - def shape_as(self, obj): - """ - Return the shape tuple as an array of some other c-types - type. For example: ``self.shape_as(ctypes.c_short)``. - """ - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.shape) - - def strides_as(self, obj): - """ - Return the strides tuple as an array of some other - c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. - """ - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.strides) - - @property - def data(self): - """ - A pointer to the memory area of the array as a Python integer. - This memory area may contain data that is not aligned, or not in correct - byte-order. The memory area may not even be writeable. The array - flags and data-type of this array should be respected when passing this - attribute to arbitrary C-code to avoid trouble that can include Python - crashing. User Beware! The value of this attribute is exactly the same - as ``self._array_interface_['data'][0]``. - - Note that unlike ``data_as``, a reference will not be kept to the array: - code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a - pointer to a deallocated array, and should be spelt - ``(a + b).ctypes.data_as(ctypes.c_void_p)`` - """ - return self._data.value - - @property - def shape(self): - """ - (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the C-integer corresponding to ``dtype('p')`` on this - platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or - `ctypes.c_longlong` depending on the platform. - The c_intp type is defined accordingly in `numpy.ctypeslib`. - The ctypes array contains the shape of the underlying array. - """ - return self.shape_as(_getintp_ctype()) - - @property - def strides(self): - """ - (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the same as for the shape attribute. This ctypes array - contains the strides information from the underlying array. This strides - information is important for showing how many bytes must be jumped to - get to the next element in the array. - """ - return self.strides_as(_getintp_ctype()) - - @property - def _as_parameter_(self): - """ - Overrides the ctypes semi-magic method - - Enables `c_func(some_array.ctypes)` - """ - return self.data_as(ctypes.c_void_p) - - # kept for compatibility - get_data = data.fget - get_shape = shape.fget - get_strides = strides.fget - get_as_parameter = _as_parameter_.fget - - -def _newnames(datatype, order): - """ - Given a datatype and an order object, return a new names tuple, with the - order indicated - """ - oldnames = datatype.names - nameslist = list(oldnames) - if isinstance(order, (str, unicode)): - order = [order] - seen = set() - if isinstance(order, (list, tuple)): - for name in order: - try: - nameslist.remove(name) - except ValueError: - if name in seen: - raise ValueError("duplicate field name: %s" % (name,)) - else: - raise ValueError("unknown field name: %s" % (name,)) - seen.add(name) - return tuple(list(order) + nameslist) - raise ValueError("unsupported order value: %s" % (order,)) - -def _copy_fields(ary): - """Return copy of structured array with padding between fields removed. - - Parameters - ---------- - ary : ndarray - Structured array from which to remove padding bytes - - Returns - ------- - ary_copy : ndarray - Copy of ary with padding bytes removed - """ - dt = ary.dtype - copy_dtype = {'names': dt.names, - 'formats': [dt.fields[name][0] for name in dt.names]} - return array(ary, dtype=copy_dtype, copy=True) - -def _getfield_is_safe(oldtype, newtype, offset): - """ Checks safety of getfield for object arrays. - - As in _view_is_safe, we need to check that memory containing objects is not - reinterpreted as a non-object datatype and vice versa. - - Parameters - ---------- - oldtype : data-type - Data type of the original ndarray. - newtype : data-type - Data type of the field being accessed by ndarray.getfield - offset : int - Offset of the field being accessed by ndarray.getfield - - Raises - ------ - TypeError - If the field access is invalid - - """ - if newtype.hasobject or oldtype.hasobject: - if offset == 0 and newtype == oldtype: - return - if oldtype.names is not None: - for name in oldtype.names: - if (oldtype.fields[name][1] == offset and - oldtype.fields[name][0] == newtype): - return - raise TypeError("Cannot get/set field of an object array") - return - -def _view_is_safe(oldtype, newtype): - """ Checks safety of a view involving object arrays, for example when - doing:: - - np.zeros(10, dtype=oldtype).view(newtype) - - Parameters - ---------- - oldtype : data-type - Data type of original ndarray - newtype : data-type - Data type of the view - - Raises - ------ - TypeError - If the new type is incompatible with the old type. - - """ - - # if the types are equivalent, there is no problem. - # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) - if oldtype == newtype: - return - - if newtype.hasobject or oldtype.hasobject: - raise TypeError("Cannot change data-type for object array.") - return - -# Given a string containing a PEP 3118 format specifier, -# construct a NumPy dtype - -_pep3118_native_map = { - '?': '?', - 'c': 'S1', - 'b': 'b', - 'B': 'B', - 'h': 'h', - 'H': 'H', - 'i': 'i', - 'I': 'I', - 'l': 'l', - 'L': 'L', - 'q': 'q', - 'Q': 'Q', - 'e': 'e', - 'f': 'f', - 'd': 'd', - 'g': 'g', - 'Zf': 'F', - 'Zd': 'D', - 'Zg': 'G', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) - -_pep3118_standard_map = { - '?': '?', - 'c': 'S1', - 'b': 'b', - 'B': 'B', - 'h': 'i2', - 'H': 'u2', - 'i': 'i4', - 'I': 'u4', - 'l': 'i4', - 'L': 'u4', - 'q': 'i8', - 'Q': 'u8', - 'e': 'f2', - 'f': 'f', - 'd': 'd', - 'Zf': 'F', - 'Zd': 'D', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) - -_pep3118_unsupported_map = { - 'u': 'UCS-2 strings', - '&': 'pointers', - 't': 'bitfields', - 'X': 'function pointers', -} - -class _Stream(object): - def __init__(self, s): - self.s = s - self.byteorder = '@' - - def advance(self, n): - res = self.s[:n] - self.s = self.s[n:] - return res - - def consume(self, c): - if self.s[:len(c)] == c: - self.advance(len(c)) - return True - return False - - def consume_until(self, c): - if callable(c): - i = 0 - while i < len(self.s) and not c(self.s[i]): - i = i + 1 - return self.advance(i) - else: - i = self.s.index(c) - res = self.advance(i) - self.advance(len(c)) - return res - - @property - def next(self): - return self.s[0] - - def __bool__(self): - return bool(self.s) - __nonzero__ = __bool__ - - -def _dtype_from_pep3118(spec): - stream = _Stream(spec) - dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) - return dtype - -def __dtype_from_pep3118(stream, is_subdtype): - field_spec = dict( - names=[], - formats=[], - offsets=[], - itemsize=0 - ) - offset = 0 - common_alignment = 1 - is_padding = False - - # Parse spec - while stream: - value = None - - # End of structure, bail out to upper level - if stream.consume('}'): - break - - # Sub-arrays (1) - shape = None - if stream.consume('('): - shape = stream.consume_until(')') - shape = tuple(map(int, shape.split(','))) - - # Byte order - if stream.next in ('@', '=', '<', '>', '^', '!'): - byteorder = stream.advance(1) - if byteorder == '!': - byteorder = '>' - stream.byteorder = byteorder - - # Byte order characters also control native vs. standard type sizes - if stream.byteorder in ('@', '^'): - type_map = _pep3118_native_map - type_map_chars = _pep3118_native_typechars - else: - type_map = _pep3118_standard_map - type_map_chars = _pep3118_standard_typechars - - # Item sizes - itemsize_str = stream.consume_until(lambda c: not c.isdigit()) - if itemsize_str: - itemsize = int(itemsize_str) - else: - itemsize = 1 - - # Data types - is_padding = False - - if stream.consume('T{'): - value, align = __dtype_from_pep3118( - stream, is_subdtype=True) - elif stream.next in type_map_chars: - if stream.next == 'Z': - typechar = stream.advance(2) - else: - typechar = stream.advance(1) - - is_padding = (typechar == 'x') - dtypechar = type_map[typechar] - if dtypechar in 'USV': - dtypechar += '%d' % itemsize - itemsize = 1 - numpy_byteorder = {'@': '=', '^': '='}.get( - stream.byteorder, stream.byteorder) - value = dtype(numpy_byteorder + dtypechar) - align = value.alignment - elif stream.next in _pep3118_unsupported_map: - desc = _pep3118_unsupported_map[stream.next] - raise NotImplementedError( - "Unrepresentable PEP 3118 data type {!r} ({})" - .format(stream.next, desc)) - else: - raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) - - # - # Native alignment may require padding - # - # Here we assume that the presence of a '@' character implicitly implies - # that the start of the array is *already* aligned. - # - extra_offset = 0 - if stream.byteorder == '@': - start_padding = (-offset) % align - intra_padding = (-value.itemsize) % align - - offset += start_padding - - if intra_padding != 0: - if itemsize > 1 or (shape is not None and _prod(shape) > 1): - # Inject internal padding to the end of the sub-item - value = _add_trailing_padding(value, intra_padding) - else: - # We can postpone the injection of internal padding, - # as the item appears at most once - extra_offset += intra_padding - - # Update common alignment - common_alignment = _lcm(align, common_alignment) - - # Convert itemsize to sub-array - if itemsize != 1: - value = dtype((value, (itemsize,))) - - # Sub-arrays (2) - if shape is not None: - value = dtype((value, shape)) - - # Field name - if stream.consume(':'): - name = stream.consume_until(':') - else: - name = None - - if not (is_padding and name is None): - if name is not None and name in field_spec['names']: - raise RuntimeError("Duplicate field name '%s' in PEP3118 format" - % name) - field_spec['names'].append(name) - field_spec['formats'].append(value) - field_spec['offsets'].append(offset) - - offset += value.itemsize - offset += extra_offset - - field_spec['itemsize'] = offset - - # extra final padding for aligned types - if stream.byteorder == '@': - field_spec['itemsize'] += (-offset) % common_alignment - - # Check if this was a simple 1-item type, and unwrap it - if (field_spec['names'] == [None] - and field_spec['offsets'][0] == 0 - and field_spec['itemsize'] == field_spec['formats'][0].itemsize - and not is_subdtype): - ret = field_spec['formats'][0] - else: - _fix_names(field_spec) - ret = dtype(field_spec) - - # Finished - return ret, common_alignment - -def _fix_names(field_spec): - """ Replace names which are None with the next unused f%d name """ - names = field_spec['names'] - for i, name in enumerate(names): - if name is not None: - continue - - j = 0 - while True: - name = 'f{}'.format(j) - if name not in names: - break - j = j + 1 - names[i] = name - -def _add_trailing_padding(value, padding): - """Inject the specified number of padding bytes at the end of a dtype""" - if value.fields is None: - field_spec = dict( - names=['f0'], - formats=[value], - offsets=[0], - itemsize=value.itemsize - ) - else: - fields = value.fields - names = value.names - field_spec = dict( - names=names, - formats=[fields[name][0] for name in names], - offsets=[fields[name][1] for name in names], - itemsize=value.itemsize - ) - - field_spec['itemsize'] += padding - return dtype(field_spec) - -def _prod(a): - p = 1 - for x in a: - p *= x - return p - -def _gcd(a, b): - """Calculate the greatest common divisor of a and b""" - while b: - a, b = b, a % b - return a - -def _lcm(a, b): - return a // _gcd(a, b) * b - -def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): - """ Format the error message for when __array_ufunc__ gives up. """ - args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + - ['{}={!r}'.format(k, v) - for k, v in kwargs.items()]) - args = inputs + kwargs.get('out', ()) - types_string = ', '.join(repr(type(arg).__name__) for arg in args) - return ('operand type(s) all returned NotImplemented from ' - '__array_ufunc__({!r}, {!r}, {}): {}' - .format(ufunc, method, args_string, types_string)) - - -def array_function_errmsg_formatter(public_api, types): - """ Format the error message for when __array_ufunc__ gives up. """ - func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) - return ("no implementation found for '{}' on types that implement " - '__array_function__: {}'.format(func_name, list(types))) - - -def _ufunc_doc_signature_formatter(ufunc): - """ - Builds a signature string which resembles PEP 457 - - This is used to construct the first line of the docstring - """ - - # input arguments are simple - if ufunc.nin == 1: - in_args = 'x' - else: - in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin)) - - # output arguments are both keyword or positional - if ufunc.nout == 0: - out_args = ', /, out=()' - elif ufunc.nout == 1: - out_args = ', /, out=None' - else: - out_args = '[, {positional}], / [, out={default}]'.format( - positional=', '.join( - 'out{}'.format(i+1) for i in range(ufunc.nout)), - default=repr((None,)*ufunc.nout) - ) - - # keyword only args depend on whether this is a gufunc - kwargs = ( - ", casting='same_kind'" - ", order='K'" - ", dtype=None" - ", subok=True" - "[, signature" - ", extobj]" - ) - if ufunc.signature is None: - kwargs = ", where=True" + kwargs - - # join all the parts together - return '{name}({in_args}{out_args}, *{kwargs})'.format( - name=ufunc.__name__, - in_args=in_args, - out_args=out_args, - kwargs=kwargs - ) - - -def npy_ctypes_check(cls): - # determine if a class comes from ctypes, in order to work around - # a bug in the buffer protocol for those objects, bpo-10746 - try: - # ctypes class are new-style, so have an __mro__. This probably fails - # for ctypes classes with multiple inheritance. - if IS_PYPY: - # (..., _ctypes.basics._CData, Bufferable, object) - ctype_base = cls.__mro__[-3] - else: - # # (..., _ctypes._CData, object) - ctype_base = cls.__mro__[-2] - # right now, they're part of the _ctypes module - return 'ctypes' in ctype_base.__module__ - except Exception: - return False - - -class recursive(object): - ''' - A decorator class for recursive nested functions. - Naive recursive nested functions hold a reference to themselves: - - def outer(*args): - def stringify_leaky(arg0, *arg1): - if len(arg1) > 0: - return stringify_leaky(*arg1) # <- HERE - return str(arg0) - stringify_leaky(*args) - - This design pattern creates a reference cycle that is difficult for a - garbage collector to resolve. The decorator class prevents the - cycle by passing the nested function in as an argument `self`: - - def outer(*args): - @recursive - def stringify(self, arg0, *arg1): - if len(arg1) > 0: - return self(*arg1) - return str(arg0) - stringify(*args) - - ''' - def __init__(self, func): - self.func = func - def __call__(self, *args, **kwargs): - return self.func(self, *args, **kwargs) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/_methods.py b/venv/lib/python3.7/site-packages/numpy/core/_methods.py deleted file mode 100644 index 269e509..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_methods.py +++ /dev/null @@ -1,244 +0,0 @@ -""" -Array methods which are called by both the C-code for the method -and the Python code for the NumPy-namespace function - -""" -from __future__ import division, absolute_import, print_function - -import warnings - -from numpy.core import multiarray as mu -from numpy.core import umath as um -from numpy.core._asarray import asanyarray -from numpy.core import numerictypes as nt -from numpy.core import _exceptions -from numpy._globals import _NoValue -from numpy.compat import pickle, os_fspath, contextlib_nullcontext - -# save those O(100) nanoseconds! -umr_maximum = um.maximum.reduce -umr_minimum = um.minimum.reduce -umr_sum = um.add.reduce -umr_prod = um.multiply.reduce -umr_any = um.logical_or.reduce -umr_all = um.logical_and.reduce - -# avoid keyword arguments to speed up parsing, saves about 15%-20% for very -# small reductions -def _amax(a, axis=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_maximum(a, axis, None, out, keepdims, initial, where) - -def _amin(a, axis=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_minimum(a, axis, None, out, keepdims, initial, where) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_sum(a, axis, dtype, out, keepdims, initial, where) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_prod(a, axis, dtype, out, keepdims, initial, where) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_any(a, axis, dtype, out, keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_all(a, axis, dtype, out, keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(range(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -# Numpy 1.17.0, 2019-02-24 -# Various clip behavior deprecations, marked with _clip_dep as a prefix. - -def _clip_dep_is_scalar_nan(a): - # guarded to protect circular imports - from numpy.core.fromnumeric import ndim - if ndim(a) != 0: - return False - try: - return um.isnan(a) - except TypeError: - return False - -def _clip_dep_is_byte_swapped(a): - if isinstance(a, mu.ndarray): - return not a.dtype.isnative - return False - -def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs): - # normal path - if casting is not None: - return ufunc(*args, out=out, casting=casting, **kwargs) - - # try to deal with broken casting rules - try: - return ufunc(*args, out=out, **kwargs) - except _exceptions._UFuncOutputCastingError as e: - # Numpy 1.17.0, 2019-02-24 - warnings.warn( - "Converting the output of clip from {!r} to {!r} is deprecated. " - "Pass `casting=\"unsafe\"` explicitly to silence this warning, or " - "correct the type of the variables.".format(e.from_, e.to), - DeprecationWarning, - stacklevel=2 - ) - return ufunc(*args, out=out, casting="unsafe", **kwargs) - -def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs): - if min is None and max is None: - raise ValueError("One of max or min must be given") - - # Numpy 1.17.0, 2019-02-24 - # This deprecation probably incurs a substantial slowdown for small arrays, - # it will be good to get rid of it. - if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out): - using_deprecated_nan = False - if _clip_dep_is_scalar_nan(min): - min = -float('inf') - using_deprecated_nan = True - if _clip_dep_is_scalar_nan(max): - max = float('inf') - using_deprecated_nan = True - if using_deprecated_nan: - warnings.warn( - "Passing `np.nan` to mean no clipping in np.clip has always " - "been unreliable, and is now deprecated. " - "In future, this will always return nan, like it already does " - "when min or max are arrays that contain nan. " - "To skip a bound, pass either None or an np.inf of an " - "appropriate sign.", - DeprecationWarning, - stacklevel=2 - ) - - if min is None: - return _clip_dep_invoke_with_casting( - um.minimum, a, max, out=out, casting=casting, **kwargs) - elif max is None: - return _clip_dep_invoke_with_casting( - um.maximum, a, min, out=out, casting=casting, **kwargs) - else: - return _clip_dep_invoke_with_casting( - um.clip, a, min, max, out=out, casting=casting, **kwargs) - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - is_float16_result = False - rcount = _count_reduce_items(arr, axis) - # Make this warning show up first - if rcount == 0: - warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) - - # Cast bool, unsigned int, and int to float64 by default - if dtype is None: - if issubclass(arr.dtype.type, (nt.integer, nt.bool_)): - dtype = mu.dtype('f8') - elif issubclass(arr.dtype.type, nt.float16): - dtype = mu.dtype('f4') - is_float16_result = True - - ret = umr_sum(arr, axis, dtype, out, keepdims) - if isinstance(ret, mu.ndarray): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) - if is_float16_result and out is None: - ret = arr.dtype.type(ret) - elif hasattr(ret, 'dtype'): - if is_float16_result: - ret = arr.dtype.type(ret / rcount) - else: - ret = ret.dtype.type(ret / rcount) - else: - ret = ret / rcount - - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - arr = asanyarray(a) - - rcount = _count_reduce_items(arr, axis) - # Make this warning show up on top. - if ddof >= rcount: - warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, - stacklevel=2) - - # Cast bool, unsigned int, and int to float64 by default - if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): - dtype = mu.dtype('f8') - - # Compute the mean. - # Note that if dtype is not of inexact type then arraymean will - # not be either. - arrmean = umr_sum(arr, axis, dtype, keepdims=True) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide( - arrmean, rcount, out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean.dtype.type(arrmean / rcount) - - # Compute sum of squared deviations from mean - # Note that x may not be inexact and that we need it to be an array, - # not a scalar. - x = asanyarray(arr - arrmean) - if issubclass(arr.dtype.type, (nt.floating, nt.integer)): - x = um.multiply(x, x, out=x) - else: - x = um.multiply(x, um.conjugate(x), out=x).real - - ret = umr_sum(x, axis, dtype, out, keepdims) - - # Compute degrees of freedom and make sure it is not negative. - rcount = max([rcount - ddof, 0]) - - # divide by degrees of freedom - if isinstance(ret, mu.ndarray): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) - elif hasattr(ret, 'dtype'): - ret = ret.dtype.type(ret / rcount) - else: - ret = ret / rcount - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - elif hasattr(ret, 'dtype'): - ret = ret.dtype.type(um.sqrt(ret)) - else: - ret = um.sqrt(ret) - - return ret - -def _ptp(a, axis=None, out=None, keepdims=False): - return um.subtract( - umr_maximum(a, axis, None, out, keepdims), - umr_minimum(a, axis, None, None, keepdims), - out - ) - -def _dump(self, file, protocol=2): - if hasattr(file, 'write'): - ctx = contextlib_nullcontext(file) - else: - ctx = open(os_fspath(file), "wb") - with ctx as f: - pickle.dump(self, f, protocol=protocol) - -def _dumps(self, protocol=2): - return pickle.dumps(self, protocol=protocol) diff --git a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 737083c..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 606d590..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 341093e..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index c62e625..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_string_helpers.py b/venv/lib/python3.7/site-packages/numpy/core/_string_helpers.py deleted file mode 100644 index 45e6a73..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_string_helpers.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -String-handling utilities to avoid locale-dependence. - -Used primarily to generate type name aliases. -""" -# "import string" is costly to import! -# Construct the translation tables directly -# "A" = chr(65), "a" = chr(97) -_all_chars = [chr(_m) for _m in range(256)] -_ascii_upper = _all_chars[65:65+26] -_ascii_lower = _all_chars[97:97+26] -LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) -UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) - - -def english_lower(s): - """ Apply English case rules to convert ASCII strings to all lower case. - - This is an internal utility function to replace calls to str.lower() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - lowered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_lower - >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' - >>> english_lower('') - '' - """ - lowered = s.translate(LOWER_TABLE) - return lowered - - -def english_upper(s): - """ Apply English case rules to convert ASCII strings to all upper case. - - This is an internal utility function to replace calls to str.upper() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - uppered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_upper - >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' - >>> english_upper('') - '' - """ - uppered = s.translate(UPPER_TABLE) - return uppered - - -def english_capitalize(s): - """ Apply English case rules to convert the first character of an ASCII - string to upper case. - - This is an internal utility function to replace calls to str.capitalize() - such that we can avoid changing behavior with changing locales. - - Parameters - ---------- - s : str - - Returns - ------- - capitalized : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_capitalize - >>> english_capitalize('int8') - 'Int8' - >>> english_capitalize('Int8') - 'Int8' - >>> english_capitalize('') - '' - """ - if s: - return english_upper(s[0]) + s[1:] - else: - return s diff --git a/venv/lib/python3.7/site-packages/numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b48d370..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_type_aliases.py b/venv/lib/python3.7/site-packages/numpy/core/_type_aliases.py deleted file mode 100644 index d6e1a1f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_type_aliases.py +++ /dev/null @@ -1,282 +0,0 @@ -""" -Due to compatibility, numpy has a very large number of different naming -conventions for the scalar types (those subclassing from `numpy.generic`). -This file produces a convoluted set of dictionaries mapping names to types, -and sometimes other mappings too. - -.. data:: allTypes - A dictionary of names to types that will be exposed as attributes through - ``np.core.numerictypes.*`` - -.. data:: sctypeDict - Similar to `allTypes`, but maps a broader set of aliases to their types. - -.. data:: sctypeNA - NumArray-compatible names for the scalar types. Contains not only - ``name: type`` mappings, but ``char: name`` mappings too. - - .. deprecated:: 1.16 - -.. data:: sctypes - A dictionary keyed by a "type group" string, providing a list of types - under that group. - -""" -import warnings -import sys - -from numpy.compat import unicode -from numpy._globals import VisibleDeprecationWarning -from numpy.core._string_helpers import english_lower, english_capitalize -from numpy.core.multiarray import typeinfo, dtype -from numpy.core._dtype import _kind_name - - -sctypeDict = {} # Contains all leaf-node scalar types with aliases -class TypeNADict(dict): - def __getitem__(self, key): - # 2018-06-24, 1.16 - warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' - 'of numpy', VisibleDeprecationWarning, stacklevel=2) - return dict.__getitem__(self, key) - def get(self, key, default=None): - # 2018-06-24, 1.16 - warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' - 'of numpy', VisibleDeprecationWarning, stacklevel=2) - return dict.get(self, key, default) - -sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences -allTypes = {} # Collect the types we will add to the module - - -# separate the actual type info from the abstract base classes -_abstract_types = {} -_concrete_typeinfo = {} -for k, v in typeinfo.items(): - # make all the keys lowercase too - k = english_lower(k) - if isinstance(v, type): - _abstract_types[k] = v - else: - _concrete_typeinfo[k] = v - -_concrete_types = {v.type for k, v in _concrete_typeinfo.items()} - - -def _bits_of(obj): - try: - info = next(v for v in _concrete_typeinfo.values() if v.type is obj) - except StopIteration: - if obj in _abstract_types.values(): - raise ValueError("Cannot count the bits of an abstract type") - - # some third-party type - make a best-guess - return dtype(obj).itemsize * 8 - else: - return info.bits - - -def bitname(obj): - """Return a bit-width name for a given type object""" - bits = _bits_of(obj) - dt = dtype(obj) - char = dt.kind - base = _kind_name(dt) - - if base == 'object': - bits = 0 - - if bits != 0: - char = "%s%d" % (char, bits // 8) - - return base, bits, char - - -def _add_types(): - for name, info in _concrete_typeinfo.items(): - # define C-name and insert typenum and typechar references also - allTypes[name] = info.type - sctypeDict[name] = info.type - sctypeDict[info.char] = info.type - sctypeDict[info.num] = info.type - - for name, cls in _abstract_types.items(): - allTypes[name] = cls -_add_types() - -# This is the priority order used to assign the bit-sized NPY_INTxx names, which -# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be -# consistent. -# If two C types have the same size, then the earliest one in this list is used -# as the sized name. -_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte'] -_uint_ctypes = list('u' + t for t in _int_ctypes) - -def _add_aliases(): - for name, info in _concrete_typeinfo.items(): - # these are handled by _add_integer_aliases - if name in _int_ctypes or name in _uint_ctypes: - continue - - # insert bit-width version for this class (if relevant) - base, bit, char = bitname(info.type) - - myname = "%s%d" % (base, bit) - - # ensure that (c)longdouble does not overwrite the aliases assigned to - # (c)double - if name in ('longdouble', 'clongdouble') and myname in allTypes: - continue - - base_capitalize = english_capitalize(base) - if base == 'complex': - na_name = '%s%d' % (base_capitalize, bit//2) - elif base == 'bool': - na_name = base_capitalize - else: - na_name = "%s%d" % (base_capitalize, bit) - - allTypes[myname] = info.type - - # add mapping for both the bit name and the numarray name - sctypeDict[myname] = info.type - sctypeDict[na_name] = info.type - - # add forward, reverse, and string mapping to numarray - sctypeNA[na_name] = info.type - sctypeNA[info.type] = na_name - sctypeNA[info.char] = na_name - - sctypeDict[char] = info.type - sctypeNA[char] = na_name -_add_aliases() - -def _add_integer_aliases(): - seen_bits = set() - for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes): - i_info = _concrete_typeinfo[i_ctype] - u_info = _concrete_typeinfo[u_ctype] - bits = i_info.bits # same for both - - for info, charname, intname, Intname in [ - (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits), - (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]: - if bits not in seen_bits: - # sometimes two different types have the same number of bits - # if so, the one iterated over first takes precedence - allTypes[intname] = info.type - sctypeDict[intname] = info.type - sctypeDict[Intname] = info.type - sctypeDict[charname] = info.type - sctypeNA[Intname] = info.type - sctypeNA[charname] = info.type - sctypeNA[info.type] = Intname - sctypeNA[info.char] = Intname - - seen_bits.add(bits) - -_add_integer_aliases() - -# We use these later -void = allTypes['void'] - -# -# Rework the Python names (so that float and complex and int are consistent -# with Python usage) -# -def _set_up_aliases(): - type_pairs = [('complex_', 'cdouble'), - ('int0', 'intp'), - ('uint0', 'uintp'), - ('single', 'float'), - ('csingle', 'cfloat'), - ('singlecomplex', 'cfloat'), - ('float_', 'double'), - ('intc', 'int'), - ('uintc', 'uint'), - ('int_', 'long'), - ('uint', 'ulong'), - ('cfloat', 'cdouble'), - ('longfloat', 'longdouble'), - ('clongfloat', 'clongdouble'), - ('longcomplex', 'clongdouble'), - ('bool_', 'bool'), - ('bytes_', 'string'), - ('string_', 'string'), - ('unicode_', 'unicode'), - ('object_', 'object')] - if sys.version_info[0] >= 3: - type_pairs.extend([('str_', 'unicode')]) - else: - type_pairs.extend([('str_', 'string')]) - for alias, t in type_pairs: - allTypes[alias] = allTypes[t] - sctypeDict[alias] = sctypeDict[t] - # Remove aliases overriding python types and modules - to_remove = ['ulong', 'object', 'int', 'float', - 'complex', 'bool', 'string', 'datetime', 'timedelta'] - if sys.version_info[0] >= 3: - to_remove.extend(['bytes', 'str']) - else: - to_remove.extend(['unicode', 'long']) - - for t in to_remove: - try: - del allTypes[t] - del sctypeDict[t] - except KeyError: - pass -_set_up_aliases() - - -sctypes = {'int': [], - 'uint':[], - 'float':[], - 'complex':[], - 'others':[bool, object, bytes, unicode, void]} - -def _add_array_type(typename, bits): - try: - t = allTypes['%s%d' % (typename, bits)] - except KeyError: - pass - else: - sctypes[typename].append(t) - -def _set_array_types(): - ibytes = [1, 2, 4, 8, 16, 32, 64] - fbytes = [2, 4, 8, 10, 12, 16, 32, 64] - for bytes in ibytes: - bits = 8*bytes - _add_array_type('int', bits) - _add_array_type('uint', bits) - for bytes in fbytes: - bits = 8*bytes - _add_array_type('float', bits) - _add_array_type('complex', 2*bits) - _gi = dtype('p') - if _gi.type not in sctypes['int']: - indx = 0 - sz = _gi.itemsize - _lst = sctypes['int'] - while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): - indx += 1 - sctypes['int'].insert(indx, _gi.type) - sctypes['uint'].insert(indx, dtype('P').type) -_set_array_types() - - -# Add additional strings to the sctypeDict -_toadd = ['int', 'float', 'complex', 'bool', 'object'] -if sys.version_info[0] >= 3: - _toadd.extend(['str', 'bytes', ('a', 'bytes_')]) -else: - _toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')]) - -for name in _toadd: - if isinstance(name, tuple): - sctypeDict[name[0]] = allTypes[name[1]] - else: - sctypeDict[name] = allTypes['%s_' % name] - -del _toadd, name diff --git a/venv/lib/python3.7/site-packages/numpy/core/_ufunc_config.py b/venv/lib/python3.7/site-packages/numpy/core/_ufunc_config.py deleted file mode 100644 index c3951cc..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_ufunc_config.py +++ /dev/null @@ -1,458 +0,0 @@ -""" -Functions for changing global ufunc configuration - -This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj` -""" -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import contextlib - -from .overrides import set_module -from .umath import ( - UFUNC_BUFSIZE_DEFAULT, - ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT, - SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID, -) -from . import umath - -__all__ = [ - "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", - "errstate", -] - -_errdict = {"ignore": ERR_IGNORE, - "warn": ERR_WARN, - "raise": ERR_RAISE, - "call": ERR_CALL, - "print": ERR_PRINT, - "log": ERR_LOG} - -_errdict_rev = {value: key for key, value in _errdict.items()} - - -@set_module('numpy') -def seterr(all=None, divide=None, over=None, under=None, invalid=None): - """ - Set how floating-point errors are handled. - - Note that operations on integer scalar types (such as `int16`) are - handled like floating point, and are affected by these settings. - - Parameters - ---------- - all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Set treatment for all types of floating-point errors at once: - - - ignore: Take no action when the exception occurs. - - warn: Print a `RuntimeWarning` (via the Python `warnings` module). - - raise: Raise a `FloatingPointError`. - - call: Call a function specified using the `seterrcall` function. - - print: Print a warning directly to ``stdout``. - - log: Record error in a Log object specified by `seterrcall`. - - The default is not to change the current behavior. - divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for division by zero. - over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point overflow. - under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point underflow. - invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for invalid floating-point operation. - - Returns - ------- - old_settings : dict - Dictionary containing the old settings. - - See also - -------- - seterrcall : Set a callback function for the 'call' mode. - geterr, geterrcall, errstate - - Notes - ----- - The floating-point exceptions are defined in the IEEE 754 standard [1]_: - - - Division by zero: infinite result obtained from finite numbers. - - Overflow: result too large to be expressed. - - Underflow: result so close to zero that some precision - was lost. - - Invalid operation: result is not an expressible number, typically - indicates that a NaN was produced. - - .. [1] https://en.wikipedia.org/wiki/IEEE_754 - - Examples - -------- - >>> old_settings = np.seterr(all='ignore') #seterr to known value - >>> np.seterr(over='raise') - {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} - >>> np.seterr(**old_settings) # reset to default - {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} - - >>> np.int16(32000) * np.int16(3) - 30464 - >>> old_settings = np.seterr(all='warn', over='raise') - >>> np.int16(32000) * np.int16(3) - Traceback (most recent call last): - File "", line 1, in - FloatingPointError: overflow encountered in short_scalars - - >>> from collections import OrderedDict - >>> old_settings = np.seterr(all='print') - >>> OrderedDict(np.geterr()) - OrderedDict([('divide', 'print'), ('over', 'print'), ('under', 'print'), ('invalid', 'print')]) - >>> np.int16(32000) * np.int16(3) - 30464 - - """ - - pyvals = umath.geterrobj() - old = geterr() - - if divide is None: - divide = all or old['divide'] - if over is None: - over = all or old['over'] - if under is None: - under = all or old['under'] - if invalid is None: - invalid = all or old['invalid'] - - maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + - (_errdict[over] << SHIFT_OVERFLOW) + - (_errdict[under] << SHIFT_UNDERFLOW) + - (_errdict[invalid] << SHIFT_INVALID)) - - pyvals[1] = maskvalue - umath.seterrobj(pyvals) - return old - - -@set_module('numpy') -def geterr(): - """ - Get the current way of handling floating-point errors. - - Returns - ------- - res : dict - A dictionary with keys "divide", "over", "under", and "invalid", - whose values are from the strings "ignore", "print", "log", "warn", - "raise", and "call". The keys represent possible floating-point - exceptions, and the values define how these exceptions are handled. - - See Also - -------- - geterrcall, seterr, seterrcall - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> from collections import OrderedDict - >>> sorted(np.geterr().items()) - [('divide', 'warn'), ('invalid', 'warn'), ('over', 'warn'), ('under', 'ignore')] - >>> np.arange(3.) / np.arange(3.) - array([nan, 1., 1.]) - - >>> oldsettings = np.seterr(all='warn', over='raise') - >>> OrderedDict(sorted(np.geterr().items())) - OrderedDict([('divide', 'warn'), ('invalid', 'warn'), ('over', 'raise'), ('under', 'warn')]) - >>> np.arange(3.) / np.arange(3.) - array([nan, 1., 1.]) - - """ - maskvalue = umath.geterrobj()[1] - mask = 7 - res = {} - val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask - res['divide'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_OVERFLOW) & mask - res['over'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_UNDERFLOW) & mask - res['under'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_INVALID) & mask - res['invalid'] = _errdict_rev[val] - return res - - -@set_module('numpy') -def setbufsize(size): - """ - Set the size of the buffer used in ufuncs. - - Parameters - ---------- - size : int - Size of buffer. - - """ - if size > 10e6: - raise ValueError("Buffer size, %s, is too big." % size) - if size < 5: - raise ValueError("Buffer size, %s, is too small." % size) - if size % 16 != 0: - raise ValueError("Buffer size, %s, is not a multiple of 16." % size) - - pyvals = umath.geterrobj() - old = getbufsize() - pyvals[0] = size - umath.seterrobj(pyvals) - return old - - -@set_module('numpy') -def getbufsize(): - """ - Return the size of the buffer used in ufuncs. - - Returns - ------- - getbufsize : int - Size of ufunc buffer in bytes. - - """ - return umath.geterrobj()[0] - - -@set_module('numpy') -def seterrcall(func): - """ - Set the floating-point error callback function or log object. - - There are two ways to capture floating-point error messages. The first - is to set the error-handler to 'call', using `seterr`. Then, set - the function to call using this function. - - The second is to set the error-handler to 'log', using `seterr`. - Floating-point errors then trigger a call to the 'write' method of - the provided object. - - Parameters - ---------- - func : callable f(err, flag) or object with write method - Function to call upon floating-point errors ('call'-mode) or - object whose 'write' method is used to log such message ('log'-mode). - - The call function takes two arguments. The first is a string describing - the type of error (such as "divide by zero", "overflow", "underflow", - or "invalid value"), and the second is the status flag. The flag is a - byte, whose four least-significant bits indicate the type of error, one - of "divide", "over", "under", "invalid":: - - [0 0 0 0 divide over under invalid] - - In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. - - If an object is provided, its write method should take one argument, - a string. - - Returns - ------- - h : callable, log instance or None - The old error handler. - - See Also - -------- - seterr, geterr, geterrcall - - Examples - -------- - Callback upon error: - - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - ... - - >>> saved_handler = np.seterrcall(err_handler) - >>> save_err = np.seterr(all='call') - >>> from collections import OrderedDict - - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([inf, inf, inf]) - - >>> np.seterrcall(saved_handler) - - >>> OrderedDict(sorted(np.seterr(**save_err).items())) - OrderedDict([('divide', 'call'), ('invalid', 'call'), ('over', 'call'), ('under', 'call')]) - - Log error message: - - >>> class Log(object): - ... def write(self, msg): - ... print("LOG: %s" % msg) - ... - - >>> log = Log() - >>> saved_handler = np.seterrcall(log) - >>> save_err = np.seterr(all='log') - - >>> np.array([1, 2, 3]) / 0.0 - LOG: Warning: divide by zero encountered in true_divide - array([inf, inf, inf]) - - >>> np.seterrcall(saved_handler) - - >>> OrderedDict(sorted(np.seterr(**save_err).items())) - OrderedDict([('divide', 'log'), ('invalid', 'log'), ('over', 'log'), ('under', 'log')]) - - """ - if func is not None and not isinstance(func, collections_abc.Callable): - if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable): - raise ValueError("Only callable can be used as callback") - pyvals = umath.geterrobj() - old = geterrcall() - pyvals[2] = func - umath.seterrobj(pyvals) - return old - - -@set_module('numpy') -def geterrcall(): - """ - Return the current callback function used on floating-point errors. - - When the error handling for a floating-point error (one of "divide", - "over", "under", or "invalid") is set to 'call' or 'log', the function - that is called or the log instance that is written to is returned by - `geterrcall`. This function or log instance has been set with - `seterrcall`. - - Returns - ------- - errobj : callable, log instance or None - The current error handler. If no handler was set through `seterrcall`, - ``None`` is returned. - - See Also - -------- - seterrcall, seterr, geterr - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterrcall() # we did not yet set a handler, returns None - - >>> oldsettings = np.seterr(all='call') - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - >>> oldhandler = np.seterrcall(err_handler) - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([inf, inf, inf]) - - >>> cur_handler = np.geterrcall() - >>> cur_handler is err_handler - True - - """ - return umath.geterrobj()[2] - - -class _unspecified(object): - pass - - -_Unspecified = _unspecified() - - -@set_module('numpy') -class errstate(contextlib.ContextDecorator): - """ - errstate(**kwargs) - - Context manager for floating-point error handling. - - Using an instance of `errstate` as a context manager allows statements in - that context to execute with a known error handling behavior. Upon entering - the context the error handling is set with `seterr` and `seterrcall`, and - upon exiting it is reset to what it was before. - - .. versionchanged:: 1.17.0 - `errstate` is also usable as a function decorator, saving - a level of indentation if an entire function is wrapped. - See :py:class:`contextlib.ContextDecorator` for more information. - - Parameters - ---------- - kwargs : {divide, over, under, invalid} - Keyword arguments. The valid keywords are the possible floating-point - exceptions. Each keyword should have a string value that defines the - treatment for the particular error. Possible values are - {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. - - See Also - -------- - seterr, geterr, seterrcall, geterrcall - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> from collections import OrderedDict - >>> olderr = np.seterr(all='ignore') # Set error handling to known state. - - >>> np.arange(3) / 0. - array([nan, inf, inf]) - >>> with np.errstate(divide='warn'): - ... np.arange(3) / 0. - array([nan, inf, inf]) - - >>> np.sqrt(-1) - nan - >>> with np.errstate(invalid='raise'): - ... np.sqrt(-1) - Traceback (most recent call last): - File "", line 2, in - FloatingPointError: invalid value encountered in sqrt - - Outside the context the error handling behavior has not changed: - - >>> OrderedDict(sorted(np.geterr().items())) - OrderedDict([('divide', 'ignore'), ('invalid', 'ignore'), ('over', 'ignore'), ('under', 'ignore')]) - - """ - # Note that we don't want to run the above doctests because they will fail - # without a from __future__ import with_statement - - def __init__(self, **kwargs): - self.call = kwargs.pop('call', _Unspecified) - self.kwargs = kwargs - - def __enter__(self): - self.oldstate = seterr(**self.kwargs) - if self.call is not _Unspecified: - self.oldcall = seterrcall(self.call) - - def __exit__(self, *exc_info): - seterr(**self.oldstate) - if self.call is not _Unspecified: - seterrcall(self.oldcall) - - -def _setdef(): - defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None] - umath.seterrobj(defval) - - -# set the default values -_setdef() diff --git a/venv/lib/python3.7/site-packages/numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 5be1293..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/arrayprint.py b/venv/lib/python3.7/site-packages/numpy/core/arrayprint.py deleted file mode 100644 index 4010180..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/arrayprint.py +++ /dev/null @@ -1,1622 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ["array2string", "array_str", "array_repr", "set_string_function", - "set_printoptions", "get_printoptions", "printoptions", - "format_float_positional", "format_float_scientific"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - - -# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy -# scalars but for different purposes. scalartypes.c.src has str/reprs for when -# the scalar is printed on its own, while arrayprint.py has strs for when -# scalars are printed inside an ndarray. Only the latter strs are currently -# user-customizable. - -import sys -import functools -import numbers -if sys.version_info[0] >= 3: - try: - from _thread import get_ident - except ImportError: - from _dummy_thread import get_ident -else: - try: - from thread import get_ident - except ImportError: - from dummy_thread import get_ident - -import numpy as np -from . import numerictypes as _nt -from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat -from . import multiarray -from .multiarray import (array, dragon4_positional, dragon4_scientific, - datetime_as_string, datetime_data, ndarray, - set_legacy_print_mode) -from .fromnumeric import ravel, any -from .numeric import concatenate, asarray, errstate -from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, - flexible) -from .overrides import array_function_dispatch, set_module -import warnings -import contextlib - -_format_options = { - 'edgeitems': 3, # repr N leading and trailing items of each dimension - 'threshold': 1000, # total items > triggers array summarization - 'floatmode': 'maxprec', - 'precision': 8, # precision of floating point representations - 'suppress': False, # suppress printing small floating values in exp format - 'linewidth': 75, - 'nanstr': 'nan', - 'infstr': 'inf', - 'sign': '-', - 'formatter': None, - 'legacy': False} - -def _make_options_dict(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, nanstr=None, infstr=None, - sign=None, formatter=None, floatmode=None, legacy=None): - """ make a dictionary out of the non-None arguments, plus sanity checks """ - - options = {k: v for k, v in locals().items() if v is not None} - - if suppress is not None: - options['suppress'] = bool(suppress) - - modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] - if floatmode not in modes + [None]: - raise ValueError("floatmode option must be one of " + - ", ".join('"{}"'.format(m) for m in modes)) - - if sign not in [None, '-', '+', ' ']: - raise ValueError("sign option must be one of ' ', '+', or '-'") - - if legacy not in [None, False, '1.13']: - warnings.warn("legacy printing option can currently only be '1.13' or " - "`False`", stacklevel=3) - if threshold is not None: - # forbid the bad threshold arg suggested by stack overflow, gh-12351 - if not isinstance(threshold, numbers.Number): - raise TypeError("threshold must be numeric") - if np.isnan(threshold): - raise ValueError("threshold must be non-NAN, try " - "sys.maxsize for untruncated representation") - return options - - -@set_module('numpy') -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, nanstr=None, infstr=None, - formatter=None, sign=None, floatmode=None, **kwarg): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int or None, optional - Number of digits of precision for floating point output (default 8). - May be None if `floatmode` is not `fixed`, to print as many digits as - necessary to uniquely specify the value. - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - To always use the full repr without summarization, pass `sys.maxsize`. - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - If True, always print floating point numbers using fixed point - notation, in which case numbers equal to zero in the current precision - will print as zero. If False, then scientific notation is used when - absolute value of the smallest number is < 1e-4 or the ratio of the - maximum absolute value to the minimum is > 1e3. The default is False. - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - sign : string, either '-', '+', or ' ', optional - Controls printing of the sign of floating-point types. If '+', always - print the sign of positive values. If ' ', always prints a space - (whitespace character) in the sign position of positive values. If - '-', omit the sign character of positive values. (default '-') - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - - 'object' : `np.object_` arrays - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - floatmode : str, optional - Controls the interpretation of the `precision` option for - floating-point types. Can take the following values - (default maxprec_equal): - - * 'fixed': Always print exactly `precision` fractional digits, - even if this would print more or fewer digits than - necessary to specify the value uniquely. - * 'unique': Print the minimum number of fractional digits necessary - to represent each value uniquely. Different elements may - have a different number of digits. The value of the - `precision` option is ignored. - * 'maxprec': Print at most `precision` fractional digits, but if - an element can be uniquely represented with fewer digits - only print it with that many. - * 'maxprec_equal': Print at most `precision` fractional digits, - but if every element in the array can be uniquely - represented with an equal number of fewer digits, use that - many digits for all elements. - legacy : string or `False`, optional - If set to the string `'1.13'` enables 1.13 legacy printing mode. This - approximates numpy 1.13 print output by including a space in the sign - position of floats and different behavior for 0d arrays. If set to - `False`, disables legacy mode. Unrecognized strings will be ignored - with a warning for forward compatibility. - - .. versionadded:: 1.14.0 - - See Also - -------- - get_printoptions, printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Use `printoptions` as a context manager to set the values temporarily. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> np.array([1.123456789]) - [1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> np.arange(10) - array([0, 1, 2, ..., 7, 8, 9]) - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3, infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - - Also to temporarily override options, use `printoptions` as a context manager: - - >>> with np.printoptions(precision=2, suppress=True, threshold=5): - ... np.linspace(0, 10, 10) - array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) - - """ - legacy = kwarg.pop('legacy', None) - if kwarg: - msg = "set_printoptions() got unexpected keyword argument '{}'" - raise TypeError(msg.format(kwarg.popitem()[0])) - - opt = _make_options_dict(precision, threshold, edgeitems, linewidth, - suppress, nanstr, infstr, sign, formatter, - floatmode, legacy) - # formatter is always reset - opt['formatter'] = formatter - _format_options.update(opt) - - # set the C variable for legacy mode - if _format_options['legacy'] == '1.13': - set_legacy_print_mode(113) - # reset the sign option in legacy mode to avoid confusion - _format_options['sign'] = '-' - elif _format_options['legacy'] is False: - set_legacy_print_mode(0) - - -@set_module('numpy') -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - sign : str - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, printoptions, set_string_function - - """ - return _format_options.copy() - - -@set_module('numpy') -@contextlib.contextmanager -def printoptions(*args, **kwargs): - """Context manager for setting print options. - - Set print options for the scope of the `with` block, and restore the old - options at the end. See `set_printoptions` for the full description of - available options. - - Examples - -------- - - >>> from numpy.testing import assert_equal - >>> with np.printoptions(precision=2): - ... np.array([2.0]) / 3 - array([0.67]) - - The `as`-clause of the `with`-statement gives the current print options: - - >>> with np.printoptions(precision=2) as opts: - ... assert_equal(opts, np.get_printoptions()) - - See Also - -------- - set_printoptions, get_printoptions - - """ - opts = np.get_printoptions() - try: - np.set_printoptions(*args, **kwargs) - yield np.get_printoptions() - finally: - np.set_printoptions(**opts) - - -def _leading_trailing(a, edgeitems, index=()): - """ - Keep only the N-D corners (leading and trailing edges) of an array. - - Should be passed a base-class ndarray, since it makes no guarantees about - preserving subclasses. - """ - axis = len(index) - if axis == a.ndim: - return a[index] - - if a.shape[axis] > 2*edgeitems: - return concatenate(( - _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), - _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) - ), axis=axis) - else: - return _leading_trailing(a, edgeitems, index + np.index_exp[:]) - - -def _object_format(o): - """ Object arrays containing lists should be printed unambiguously """ - if type(o) is list: - fmt = 'list({!r})' - else: - fmt = '{!r}' - return fmt.format(o) - -def repr_format(x): - return repr(x) - -def str_format(x): - return str(x) - -def _get_formatdict(data, **opt): - prec, fmode = opt['precision'], opt['floatmode'] - supp, sign = opt['suppress'], opt['sign'] - legacy = opt['legacy'] - - # wrapped in lambdas to avoid taking a code path with the wrong type of data - formatdict = { - 'bool': lambda: BoolFormat(data), - 'int': lambda: IntegerFormat(data), - 'float': lambda: - FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'longfloat': lambda: - FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'complexfloat': lambda: - ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'longcomplexfloat': lambda: - ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'datetime': lambda: DatetimeFormat(data, legacy=legacy), - 'timedelta': lambda: TimedeltaFormat(data), - 'object': lambda: _object_format, - 'void': lambda: str_format, - 'numpystr': lambda: repr_format, - 'str': lambda: str} - - # we need to wrap values in `formatter` in a lambda, so that the interface - # is the same as the above values. - def indirect(x): - return lambda: x - - formatter = opt['formatter'] - if formatter is not None: - fkeys = [k for k in formatter.keys() if formatter[k] is not None] - if 'all' in fkeys: - for key in formatdict.keys(): - formatdict[key] = indirect(formatter['all']) - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = indirect(formatter['int_kind']) - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = indirect(formatter['float_kind']) - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = indirect(formatter['complex_kind']) - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = indirect(formatter['str_kind']) - for key in formatdict.keys(): - if key in fkeys: - formatdict[key] = indirect(formatter[key]) - - return formatdict - -def _get_format_function(data, **options): - """ - find the right formatting function for the dtype_ - """ - dtype_ = data.dtype - dtypeobj = dtype_.type - formatdict = _get_formatdict(data, **options) - if issubclass(dtypeobj, _nt.bool_): - return formatdict['bool']() - elif issubclass(dtypeobj, _nt.integer): - if issubclass(dtypeobj, _nt.timedelta64): - return formatdict['timedelta']() - else: - return formatdict['int']() - elif issubclass(dtypeobj, _nt.floating): - if issubclass(dtypeobj, _nt.longfloat): - return formatdict['longfloat']() - else: - return formatdict['float']() - elif issubclass(dtypeobj, _nt.complexfloating): - if issubclass(dtypeobj, _nt.clongfloat): - return formatdict['longcomplexfloat']() - else: - return formatdict['complexfloat']() - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - return formatdict['numpystr']() - elif issubclass(dtypeobj, _nt.datetime64): - return formatdict['datetime']() - elif issubclass(dtypeobj, _nt.object_): - return formatdict['object']() - elif issubclass(dtypeobj, _nt.void): - if dtype_.names is not None: - return StructuredVoidFormat.from_data(data, **options) - else: - return formatdict['void']() - else: - return formatdict['numpystr']() - - -def _recursive_guard(fillvalue='...'): - """ - Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs - - Decorates a function such that if it calls itself with the same first - argument, it returns `fillvalue` instead of recursing. - - Largely copied from reprlib.recursive_repr - """ - - def decorating_function(f): - repr_running = set() - - @functools.wraps(f) - def wrapper(self, *args, **kwargs): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - return f(self, *args, **kwargs) - finally: - repr_running.discard(key) - - return wrapper - - return decorating_function - - -# gracefully handle recursive calls, when object arrays contain themselves -@_recursive_guard() -def _array2string(a, options, separator=' ', prefix=""): - # The formatter __init__s in _get_format_function cannot deal with - # subclasses yet, and we also need to avoid recursion issues in - # _formatArray with subclasses which return 0d arrays in place of scalars - data = asarray(a) - if a.shape == (): - a = data - - if a.size > options['threshold']: - summary_insert = "..." - data = _leading_trailing(data, options['edgeitems']) - else: - summary_insert = "" - - # find the right formatting function for the array - format_function = _get_format_function(data, **options) - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, options['linewidth'], - next_line_prefix, separator, options['edgeitems'], - summary_insert, options['legacy']) - return lst - - -def _array2string_dispatcher( - a, max_line_width=None, precision=None, - suppress_small=None, separator=None, prefix=None, - style=None, formatter=None, threshold=None, - edgeitems=None, sign=None, floatmode=None, suffix=None, - **kwarg): - return (a,) - - -@array_function_dispatch(_array2string_dispatcher, module='numpy') -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=np._NoValue, formatter=None, threshold=None, - edgeitems=None, sign=None, floatmode=None, suffix="", - **kwarg): - """ - Return a string representation of an array. - - Parameters - ---------- - a : array_like - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. - Defaults to ``numpy.get_printoptions()['linewidth']``. - precision : int or None, optional - Floating point precision. - Defaults to ``numpy.get_printoptions()['precision']``. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - Defaults to ``numpy.get_printoptions()['suppress']``. - separator : str, optional - Inserted between elements. - prefix : str, optional - suffix: str, optional - The length of the prefix and suffix strings are used to respectively - align and wrap the output. An array is typically printed as:: - - prefix + array2string(a) + suffix - - The output is left-padded by the length of the prefix string, and - wrapping is forced at the column ``max_line_width - len(suffix)``. - It should be noted that the content of prefix and suffix strings are - not included in the output. - style : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.14.0 - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'void' : type `numpy.void` - - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr. - Defaults to ``numpy.get_printoptions()['threshold']``. - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension. - Defaults to ``numpy.get_printoptions()['edgeitems']``. - sign : string, either '-', '+', or ' ', optional - Controls printing of the sign of floating-point types. If '+', always - print the sign of positive values. If ' ', always prints a space - (whitespace character) in the sign position of positive values. If - '-', omit the sign character of positive values. - Defaults to ``numpy.get_printoptions()['sign']``. - floatmode : str, optional - Controls the interpretation of the `precision` option for - floating-point types. - Defaults to ``numpy.get_printoptions()['floatmode']``. - Can take the following values: - - - 'fixed': Always print exactly `precision` fractional digits, - even if this would print more or fewer digits than - necessary to specify the value uniquely. - - 'unique': Print the minimum number of fractional digits necessary - to represent each value uniquely. Different elements may - have a different number of digits. The value of the - `precision` option is ignored. - - 'maxprec': Print at most `precision` fractional digits, but if - an element can be uniquely represented with fewer digits - only print it with that many. - - 'maxprec_equal': Print at most `precision` fractional digits, - but if every element in the array can be uniquely - represented with an equal number of fewer digits, use that - many digits for all elements. - legacy : string or `False`, optional - If set to the string `'1.13'` enables 1.13 legacy printing mode. This - approximates numpy 1.13 print output by including a space in the sign - position of floats and different behavior for 0d arrays. If set to - `False`, disables legacy mode. Unrecognized strings will be ignored - with a warning for forward compatibility. - - .. versionadded:: 1.14.0 - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError - if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - This is a very flexible function; `array_repr` and `array_str` are using - `array2string` internally so keywords with the same name should work - identically in all three functions. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - '[0.,1.,2.,3.]' - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0 0x1 0x2]' - - """ - legacy = kwarg.pop('legacy', None) - if kwarg: - msg = "array2string() got unexpected keyword argument '{}'" - raise TypeError(msg.format(kwarg.popitem()[0])) - - overrides = _make_options_dict(precision, threshold, edgeitems, - max_line_width, suppress_small, None, None, - sign, formatter, floatmode, legacy) - options = _format_options.copy() - options.update(overrides) - - if options['legacy'] == '1.13': - if style is np._NoValue: - style = repr - - if a.shape == () and a.dtype.names is None: - return style(a.item()) - elif style is not np._NoValue: - # Deprecation 11-9-2017 v1.14 - warnings.warn("'style' argument is deprecated and no longer functional" - " except in 1.13 'legacy' mode", - DeprecationWarning, stacklevel=3) - - if options['legacy'] != '1.13': - options['linewidth'] -= len(suffix) - - # treat as a null array if any of shape elements == 0 - if a.size == 0: - return "[]" - - return _array2string(a, options, separator, prefix) - - -def _extendLine(s, line, word, line_width, next_line_prefix, legacy): - needs_wrap = len(line) + len(word) > line_width - if legacy != '1.13': - s# don't wrap lines if it won't help - if len(line) <= len(next_line_prefix): - needs_wrap = False - - if needs_wrap: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, line_width, next_line_prefix, - separator, edge_items, summary_insert, legacy): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - def recurser(index, hanging_indent, curr_width): - """ - By using this local function, we don't need to recurse with all the - arguments. Since this function is not created recursively, the cost is - not significant - """ - axis = len(index) - axes_left = a.ndim - axis - - if axes_left == 0: - return format_function(a[index]) - - # when recursing, add a space to align with the [ added, and reduce the - # length of the line by 1 - next_hanging_indent = hanging_indent + ' ' - if legacy == '1.13': - next_width = curr_width - else: - next_width = curr_width - len(']') - - a_len = a.shape[axis] - show_summary = summary_insert and 2*edge_items < a_len - if show_summary: - leading_items = edge_items - trailing_items = edge_items - else: - leading_items = 0 - trailing_items = a_len - - # stringify the array with the hanging indent on the first line too - s = '' - - # last axis (rows) - wrap elements if they would not fit on one line - if axes_left == 1: - # the length up until the beginning of the separator / bracket - if legacy == '1.13': - elem_width = curr_width - len(separator.rstrip()) - else: - elem_width = curr_width - max(len(separator.rstrip()), len(']')) - - line = hanging_indent - for i in range(leading_items): - word = recurser(index + (i,), next_hanging_indent, next_width) - s, line = _extendLine( - s, line, word, elem_width, hanging_indent, legacy) - line += separator - - if show_summary: - s, line = _extendLine( - s, line, summary_insert, elem_width, hanging_indent, legacy) - if legacy == '1.13': - line += ", " - else: - line += separator - - for i in range(trailing_items, 1, -1): - word = recurser(index + (-i,), next_hanging_indent, next_width) - s, line = _extendLine( - s, line, word, elem_width, hanging_indent, legacy) - line += separator - - if legacy == '1.13': - # width of the separator is not considered on 1.13 - elem_width = curr_width - word = recurser(index + (-1,), next_hanging_indent, next_width) - s, line = _extendLine( - s, line, word, elem_width, hanging_indent, legacy) - - s += line - - # other axes - insert newlines between rows - else: - s = '' - line_sep = separator.rstrip() + '\n'*(axes_left - 1) - - for i in range(leading_items): - nested = recurser(index + (i,), next_hanging_indent, next_width) - s += hanging_indent + nested + line_sep - - if show_summary: - if legacy == '1.13': - # trailing space, fixed nbr of newlines, and fixed separator - s += hanging_indent + summary_insert + ", \n" - else: - s += hanging_indent + summary_insert + line_sep - - for i in range(trailing_items, 1, -1): - nested = recurser(index + (-i,), next_hanging_indent, - next_width) - s += hanging_indent + nested + line_sep - - nested = recurser(index + (-1,), next_hanging_indent, next_width) - s += hanging_indent + nested - - # remove the hanging indent, and wrap in [] - s = '[' + s[len(hanging_indent):] + ']' - return s - - try: - # invoke the recursive part with an initial index and prefix - return recurser(index=(), - hanging_indent=next_line_prefix, - curr_width=line_width) - finally: - # recursive closures have a cyclic reference to themselves, which - # requires gc to collect (gh-10620). To avoid this problem, for - # performance and PyPy friendliness, we break the cycle: - recurser = None - -def _none_or_positive_arg(x, name): - if x is None: - return -1 - if x < 0: - raise ValueError("{} must be >= 0".format(name)) - return x - -class FloatingFormat(object): - """ Formatter for subtypes of np.floating """ - def __init__(self, data, precision, floatmode, suppress_small, sign=False, - **kwarg): - # for backcompatibility, accept bools - if isinstance(sign, bool): - sign = '+' if sign else '-' - - self._legacy = kwarg.get('legacy', False) - if self._legacy == '1.13': - # when not 0d, legacy does not support '-' - if data.shape != () and sign == '-': - sign = ' ' - - self.floatmode = floatmode - if floatmode == 'unique': - self.precision = None - else: - self.precision = precision - - self.precision = _none_or_positive_arg(self.precision, 'precision') - - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - - self.fillFormat(data) - - def fillFormat(self, data): - # only the finite values are used to compute the number of digits - finite_vals = data[isfinite(data)] - - # choose exponential mode based on the non-zero finite values: - abs_non_zero = absolute(finite_vals[finite_vals != 0]) - if len(abs_non_zero) != 0: - max_val = np.max(abs_non_zero) - min_val = np.min(abs_non_zero) - with errstate(over='ignore'): # division can overflow - if max_val >= 1.e8 or (not self.suppress_small and - (min_val < 0.0001 or max_val/min_val > 1000.)): - self.exp_format = True - - # do a first pass of printing all the numbers, to determine sizes - if len(finite_vals) == 0: - self.pad_left = 0 - self.pad_right = 0 - self.trim = '.' - self.exp_size = -1 - self.unique = True - elif self.exp_format: - trim, unique = '.', True - if self.floatmode == 'fixed' or self._legacy == '1.13': - trim, unique = 'k', False - strs = (dragon4_scientific(x, precision=self.precision, - unique=unique, trim=trim, sign=self.sign == '+') - for x in finite_vals) - frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) - int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) - self.exp_size = max(len(s) for s in exp_strs) - 1 - - self.trim = 'k' - self.precision = max(len(s) for s in frac_part) - - # for back-compat with np 1.13, use 2 spaces & sign and full prec - if self._legacy == '1.13': - self.pad_left = 3 - else: - # this should be only 1 or 2. Can be calculated from sign. - self.pad_left = max(len(s) for s in int_part) - # pad_right is only needed for nan length calculation - self.pad_right = self.exp_size + 2 + self.precision - - self.unique = False - else: - # first pass printing to determine sizes - trim, unique = '.', True - if self.floatmode == 'fixed': - trim, unique = 'k', False - strs = (dragon4_positional(x, precision=self.precision, - fractional=True, - unique=unique, trim=trim, - sign=self.sign == '+') - for x in finite_vals) - int_part, frac_part = zip(*(s.split('.') for s in strs)) - if self._legacy == '1.13': - self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) - else: - self.pad_left = max(len(s) for s in int_part) - self.pad_right = max(len(s) for s in frac_part) - self.exp_size = -1 - - if self.floatmode in ['fixed', 'maxprec_equal']: - self.precision = self.pad_right - self.unique = False - self.trim = 'k' - else: - self.unique = True - self.trim = '.' - - if self._legacy != '1.13': - # account for sign = ' ' by adding one to pad_left - if self.sign == ' ' and not any(np.signbit(finite_vals)): - self.pad_left += 1 - - # if there are non-finite values, may need to increase pad_left - if data.size != finite_vals.size: - neginf = self.sign != '-' or any(data[isinf(data)] < 0) - nanlen = len(_format_options['nanstr']) - inflen = len(_format_options['infstr']) + neginf - offset = self.pad_right + 1 # +1 for decimal pt - self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset) - - def __call__(self, x): - if not np.isfinite(x): - with errstate(invalid='ignore'): - if np.isnan(x): - sign = '+' if self.sign == '+' else '' - ret = sign + _format_options['nanstr'] - else: # isinf - sign = '-' if x < 0 else '+' if self.sign == '+' else '' - ret = sign + _format_options['infstr'] - return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret - - if self.exp_format: - return dragon4_scientific(x, - precision=self.precision, - unique=self.unique, - trim=self.trim, - sign=self.sign == '+', - pad_left=self.pad_left, - exp_digits=self.exp_size) - else: - return dragon4_positional(x, - precision=self.precision, - unique=self.unique, - fractional=True, - trim=self.trim, - sign=self.sign == '+', - pad_left=self.pad_left, - pad_right=self.pad_right) - - -@set_module('numpy') -def format_float_scientific(x, precision=None, unique=True, trim='k', - sign=False, pad_left=None, exp_digits=None): - """ - Format a floating-point scalar as a decimal string in scientific notation. - - Provides control over rounding, trimming and padding. Uses and assumes - IEEE unbiased rounding. Uses the "Dragon4" algorithm. - - Parameters - ---------- - x : python float or numpy floating scalar - Value to format. - precision : non-negative integer or None, optional - Maximum number of digits to print. May be None if `unique` is - `True`, but must be an integer if unique is `False`. - unique : boolean, optional - If `True`, use a digit-generation strategy which gives the shortest - representation which uniquely identifies the floating-point number from - other values of the same type, by judicious rounding. If `precision` - was omitted, print all necessary digits, otherwise digit generation is - cut off after `precision` digits and the remaining value is rounded. - If `False`, digits are generated as if printing an infinite-precision - value and stopping after `precision` digits, rounding the remaining - value. - trim : one of 'k', '.', '0', '-', optional - Controls post-processing trimming of trailing digits, as follows: - - * 'k' : keep trailing zeros, keep decimal point (no trimming) - * '.' : trim all trailing zeros, leave decimal point - * '0' : trim all but the zero before the decimal point. Insert the - zero if it is missing. - * '-' : trim trailing zeros and any trailing decimal point - sign : boolean, optional - Whether to show the sign for positive values. - pad_left : non-negative integer, optional - Pad the left side of the string with whitespace until at least that - many characters are to the left of the decimal point. - exp_digits : non-negative integer, optional - Pad the exponent with zeros until it contains at least this many digits. - If omitted, the exponent will be at least 2 digits. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_positional - - Examples - -------- - >>> np.format_float_scientific(np.float32(np.pi)) - '3.1415927e+00' - >>> s = np.float32(1.23e24) - >>> np.format_float_scientific(s, unique=False, precision=15) - '1.230000071797338e+24' - >>> np.format_float_scientific(s, exp_digits=4) - '1.23e+0024' - """ - precision = _none_or_positive_arg(precision, 'precision') - pad_left = _none_or_positive_arg(pad_left, 'pad_left') - exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') - return dragon4_scientific(x, precision=precision, unique=unique, - trim=trim, sign=sign, pad_left=pad_left, - exp_digits=exp_digits) - - -@set_module('numpy') -def format_float_positional(x, precision=None, unique=True, - fractional=True, trim='k', sign=False, - pad_left=None, pad_right=None): - """ - Format a floating-point scalar as a decimal string in positional notation. - - Provides control over rounding, trimming and padding. Uses and assumes - IEEE unbiased rounding. Uses the "Dragon4" algorithm. - - Parameters - ---------- - x : python float or numpy floating scalar - Value to format. - precision : non-negative integer or None, optional - Maximum number of digits to print. May be None if `unique` is - `True`, but must be an integer if unique is `False`. - unique : boolean, optional - If `True`, use a digit-generation strategy which gives the shortest - representation which uniquely identifies the floating-point number from - other values of the same type, by judicious rounding. If `precision` - was omitted, print out all necessary digits, otherwise digit generation - is cut off after `precision` digits and the remaining value is rounded. - If `False`, digits are generated as if printing an infinite-precision - value and stopping after `precision` digits, rounding the remaining - value. - fractional : boolean, optional - If `True`, the cutoff of `precision` digits refers to the total number - of digits after the decimal point, including leading zeros. - If `False`, `precision` refers to the total number of significant - digits, before or after the decimal point, ignoring leading zeros. - trim : one of 'k', '.', '0', '-', optional - Controls post-processing trimming of trailing digits, as follows: - - * 'k' : keep trailing zeros, keep decimal point (no trimming) - * '.' : trim all trailing zeros, leave decimal point - * '0' : trim all but the zero before the decimal point. Insert the - zero if it is missing. - * '-' : trim trailing zeros and any trailing decimal point - sign : boolean, optional - Whether to show the sign for positive values. - pad_left : non-negative integer, optional - Pad the left side of the string with whitespace until at least that - many characters are to the left of the decimal point. - pad_right : non-negative integer, optional - Pad the right side of the string with whitespace until at least that - many characters are to the right of the decimal point. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_scientific - - Examples - -------- - >>> np.format_float_positional(np.float32(np.pi)) - '3.1415927' - >>> np.format_float_positional(np.float16(np.pi)) - '3.14' - >>> np.format_float_positional(np.float16(0.3)) - '0.3' - >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) - '0.3000488281' - """ - precision = _none_or_positive_arg(precision, 'precision') - pad_left = _none_or_positive_arg(pad_left, 'pad_left') - pad_right = _none_or_positive_arg(pad_right, 'pad_right') - return dragon4_positional(x, precision=precision, unique=unique, - fractional=fractional, trim=trim, - sign=sign, pad_left=pad_left, - pad_right=pad_right) - - -class IntegerFormat(object): - def __init__(self, data): - if data.size > 0: - max_str_len = max(len(str(np.max(data))), - len(str(np.min(data)))) - else: - max_str_len = 0 - self.format = '%{}d'.format(max_str_len) - - def __call__(self, x): - return self.format % x - - -class BoolFormat(object): - def __init__(self, data, **kwargs): - # add an extra space so " True" and "False" have the same length and - # array elements align nicely when printed, except in 0d arrays - self.truestr = ' True' if data.shape != () else 'True' - - def __call__(self, x): - return self.truestr if x else "False" - - -class ComplexFloatingFormat(object): - """ Formatter for subtypes of np.complexfloating """ - def __init__(self, x, precision, floatmode, suppress_small, - sign=False, **kwarg): - # for backcompatibility, accept bools - if isinstance(sign, bool): - sign = '+' if sign else '-' - - floatmode_real = floatmode_imag = floatmode - if kwarg.get('legacy', False) == '1.13': - floatmode_real = 'maxprec_equal' - floatmode_imag = 'maxprec' - - self.real_format = FloatingFormat(x.real, precision, floatmode_real, - suppress_small, sign=sign, **kwarg) - self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, - suppress_small, sign='+', **kwarg) - - def __call__(self, x): - r = self.real_format(x.real) - i = self.imag_format(x.imag) - - # add the 'j' before the terminal whitespace in i - sp = len(i.rstrip()) - i = i[:sp] + 'j' + i[sp:] - - return r + i - - -class _TimelikeFormat(object): - def __init__(self, data): - non_nat = data[~isnat(data)] - if len(non_nat) > 0: - # Max str length of non-NaT elements - max_str_len = max(len(self._format_non_nat(np.max(non_nat))), - len(self._format_non_nat(np.min(non_nat)))) - else: - max_str_len = 0 - if len(non_nat) < data.size: - # data contains a NaT - max_str_len = max(max_str_len, 5) - self._format = '%{}s'.format(max_str_len) - self._nat = "'NaT'".rjust(max_str_len) - - def _format_non_nat(self, x): - # override in subclass - raise NotImplementedError - - def __call__(self, x): - if isnat(x): - return self._nat - else: - return self._format % self._format_non_nat(x) - - -class DatetimeFormat(_TimelikeFormat): - def __init__(self, x, unit=None, timezone=None, casting='same_kind', - legacy=False): - # Get the unit from the dtype - if unit is None: - if x.dtype.kind == 'M': - unit = datetime_data(x.dtype)[0] - else: - unit = 's' - - if timezone is None: - timezone = 'naive' - self.timezone = timezone - self.unit = unit - self.casting = casting - self.legacy = legacy - - # must be called after the above are configured - super(DatetimeFormat, self).__init__(x) - - def __call__(self, x): - if self.legacy == '1.13': - return self._format_non_nat(x) - return super(DatetimeFormat, self).__call__(x) - - def _format_non_nat(self, x): - return "'%s'" % datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting) - - -class TimedeltaFormat(_TimelikeFormat): - def _format_non_nat(self, x): - return str(x.astype('i8')) - - -class SubArrayFormat(object): - def __init__(self, format_function): - self.format_function = format_function - - def __call__(self, arr): - if arr.ndim <= 1: - return "[" + ", ".join(self.format_function(a) for a in arr) + "]" - return "[" + ", ".join(self.__call__(a) for a in arr) + "]" - - -class StructuredVoidFormat(object): - """ - Formatter for structured np.void objects. - - This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), - as alias scalars lose their field information, and the implementation - relies upon np.void.__getitem__. - """ - def __init__(self, format_functions): - self.format_functions = format_functions - - @classmethod - def from_data(cls, data, **options): - """ - This is a second way to initialize StructuredVoidFormat, using the raw data - as input. Added to avoid changing the signature of __init__. - """ - format_functions = [] - for field_name in data.dtype.names: - format_function = _get_format_function(data[field_name], **options) - if data.dtype[field_name].shape != (): - format_function = SubArrayFormat(format_function) - format_functions.append(format_function) - return cls(format_functions) - - def __call__(self, x): - str_fields = [ - format_function(field) - for field, format_function in zip(x, self.format_functions) - ] - if len(str_fields) == 1: - return "({},)".format(str_fields[0]) - else: - return "({})".format(", ".join(str_fields)) - - -def _void_scalar_repr(x): - """ - Implements the repr for structured-void scalars. It is called from the - scalartypes.c.src code, and is placed here because it uses the elementwise - formatters defined above. - """ - return StructuredVoidFormat.from_data(array(x), **_format_options)(x) - - -_typelessdata = [int_, float_, complex_, bool_] -if issubclass(intc, int): - _typelessdata.append(intc) -if issubclass(longlong, int): - _typelessdata.append(longlong) - - -def dtype_is_implied(dtype): - """ - Determine if the given dtype is implied by the representation of its values. - - Parameters - ---------- - dtype : dtype - Data type - - Returns - ------- - implied : bool - True if the dtype is implied by the representation of its values. - - Examples - -------- - >>> np.core.arrayprint.dtype_is_implied(int) - True - >>> np.array([1, 2, 3], int) - array([1, 2, 3]) - >>> np.core.arrayprint.dtype_is_implied(np.int8) - False - >>> np.array([1, 2, 3], np.int8) - array([1, 2, 3], dtype=int8) - """ - dtype = np.dtype(dtype) - if _format_options['legacy'] == '1.13' and dtype.type == bool_: - return False - - # not just void types can be structured, and names are not part of the repr - if dtype.names is not None: - return False - - return dtype.type in _typelessdata - - -def dtype_short_repr(dtype): - """ - Convert a dtype to a short form which evaluates to the same dtype. - - The intent is roughly that the following holds - - >>> from numpy import * - >>> dt = np.int64([1, 2]).dtype - >>> assert eval(dtype_short_repr(dt)) == dt - """ - if dtype.names is not None: - # structured dtypes give a list or tuple repr - return str(dtype) - elif issubclass(dtype.type, flexible): - # handle these separately so they don't give garbage like str256 - return "'%s'" % str(dtype) - - typename = dtype.name - # quote typenames which can't be represented as python variable names - if typename and not (typename[0].isalpha() and typename.isalnum()): - typename = repr(typename) - - return typename - - -def _array_repr_implementation( - arr, max_line_width=None, precision=None, suppress_small=None, - array2string=array2string): - """Internal version of array_repr() that allows overriding array2string.""" - if max_line_width is None: - max_line_width = _format_options['linewidth'] - - if type(arr) is not ndarray: - class_name = type(arr).__name__ - else: - class_name = "array" - - skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 - - prefix = class_name + "(" - suffix = ")" if skipdtype else "," - - if (_format_options['legacy'] == '1.13' and - arr.shape == () and not arr.dtype.names): - lst = repr(arr.item()) - elif arr.size > 0 or arr.shape == (0,): - lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', prefix, suffix=suffix) - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - - arr_str = prefix + lst + suffix - - if skipdtype: - return arr_str - - dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) - - # compute whether we should put dtype on a new line: Do so if adding the - # dtype would extend the last line past max_line_width. - # Note: This line gives the correct result even when rfind returns -1. - last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) - spacer = " " - if _format_options['legacy'] == '1.13': - if issubclass(arr.dtype.type, flexible): - spacer = '\n' + ' '*len(class_name + "(") - elif last_line_len + len(dtype_str) + 1 > max_line_width: - spacer = '\n' + ' '*len(class_name + "(") - - return arr_str + spacer + dtype_str - - -def _array_repr_dispatcher( - arr, max_line_width=None, precision=None, suppress_small=None): - return (arr,) - - -@array_function_dispatch(_array_repr_dispatcher, module='numpy') -def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): - """ - Return the string representation of an array. - - Parameters - ---------- - arr : ndarray - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. - Defaults to ``numpy.get_printoptions()['linewidth']``. - precision : int, optional - Floating point precision. - Defaults to ``numpy.get_printoptions()['precision']``. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - Defaults to ``numpy.get_printoptions()['suppress']``. - - Returns - ------- - string : str - The string representation of an array. - - See Also - -------- - array_str, array2string, set_printoptions - - Examples - -------- - >>> np.array_repr(np.array([1,2])) - 'array([1, 2])' - >>> np.array_repr(np.ma.array([0.])) - 'MaskedArray([0.])' - >>> np.array_repr(np.array([], np.int32)) - 'array([], dtype=int32)' - - >>> x = np.array([1e-6, 4e-7, 2, 3]) - >>> np.array_repr(x, precision=6, suppress_small=True) - 'array([0.000001, 0. , 2. , 3. ])' - - """ - return _array_repr_implementation( - arr, max_line_width, precision, suppress_small) - - -@_recursive_guard() -def _guarded_repr_or_str(v): - if isinstance(v, bytes): - return repr(v) - return str(v) - - -def _array_str_implementation( - a, max_line_width=None, precision=None, suppress_small=None, - array2string=array2string): - """Internal version of array_str() that allows overriding array2string.""" - if (_format_options['legacy'] == '1.13' and - a.shape == () and not a.dtype.names): - return str(a.item()) - - # the str of 0d arrays is a special case: It should appear like a scalar, - # so floats are not truncated by `precision`, and strings are not wrapped - # in quotes. So we return the str of the scalar value. - if a.shape == (): - # obtain a scalar and call str on it, avoiding problems for subclasses - # for which indexing with () returns a 0d instead of a scalar by using - # ndarray's getindex. Also guard against recursive 0d object arrays. - return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) - - return array2string(a, max_line_width, precision, suppress_small, ' ', "") - - -def _array_str_dispatcher( - a, max_line_width=None, precision=None, suppress_small=None): - return (a,) - - -@array_function_dispatch(_array_str_dispatcher, module='numpy') -def array_str(a, max_line_width=None, precision=None, suppress_small=None): - """ - Return a string representation of the data in an array. - - The data in the array is returned as a single string. This function is - similar to `array_repr`, the difference being that `array_repr` also - returns information on the kind of array and its data type. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. - Defaults to ``numpy.get_printoptions()['linewidth']``. - precision : int, optional - Floating point precision. - Defaults to ``numpy.get_printoptions()['precision']``. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - Defaults to ``numpy.get_printoptions()['suppress']``. - - See Also - -------- - array2string, array_repr, set_printoptions - - Examples - -------- - >>> np.array_str(np.arange(3)) - '[0 1 2]' - - """ - return _array_str_implementation( - a, max_line_width, precision, suppress_small) - - -# needed if __array_function__ is disabled -_array2string_impl = getattr(array2string, '__wrapped__', array2string) -_default_array_str = functools.partial(_array_str_implementation, - array2string=_array2string_impl) -_default_array_repr = functools.partial(_array_repr_implementation, - array2string=_array2string_impl) - - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> np.set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> _ = a - >>> # [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> np.set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> np.set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([0, 1, 2, 3])' - - """ - if f is None: - if repr: - return multiarray.set_string_function(_default_array_repr, 1) - else: - return multiarray.set_string_function(_default_array_str, 0) - else: - return multiarray.set_string_function(f, repr) - -set_string_function(_default_array_str, False) -set_string_function(_default_array_repr, True) diff --git a/venv/lib/python3.7/site-packages/numpy/core/cversions.py b/venv/lib/python3.7/site-packages/numpy/core/cversions.py deleted file mode 100644 index 7995dd9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/cversions.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Simple script to compute the api hash of the current API. - -The API has is defined by numpy_api_order and ufunc_api_order. - -""" -from __future__ import division, absolute_import, print_function - -from os.path import dirname - -from code_generators.genapi import fullapi_hash -from code_generators.numpy_api import full_api - -if __name__ == '__main__': - curdir = dirname(__file__) - print(fullapi_hash(full_api)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/defchararray.py b/venv/lib/python3.7/site-packages/numpy/core/defchararray.py deleted file mode 100644 index 2d89d6f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/defchararray.py +++ /dev/null @@ -1,2819 +0,0 @@ -""" -This module contains a set of functions for vectorized string -operations and methods. - -.. note:: - The `chararray` class exists for backwards compatibility with - Numarray, it is not recommended for new development. Starting from numpy - 1.4, if one needs arrays of strings, it is recommended to use arrays of - `dtype` `object_`, `string_` or `unicode_`, and use the free functions - in the `numpy.char` module for fast vectorized string operations. - -Some methods will only be available if the corresponding string method is -available in your version of Python. - -The preferred alias for `defchararray` is `numpy.char`. - -""" -from __future__ import division, absolute_import, print_function - -import functools -import sys -from .numerictypes import string_, unicode_, integer, object_, bool_, character -from .numeric import ndarray, compare_chararrays -from .numeric import array as narray -from numpy.core.multiarray import _vec_string -from numpy.core.overrides import set_module -from numpy.core import overrides -from numpy.compat import asbytes, long -import numpy - -__all__ = [ - 'equal', 'not_equal', 'greater_equal', 'less_equal', - 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', - 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', - 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', - 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', - 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', - 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', - 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', - 'array', 'asarray' - ] - - -_globalvar = 0 -if sys.version_info[0] >= 3: - _unicode = str - _bytes = bytes -else: - _unicode = unicode - _bytes = str -_len = len - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy.char') - - -def _use_unicode(*args): - """ - Helper function for determining the output type of some string - operations. - - For an operation on two ndarrays, if at least one is unicode, the - result should be unicode. - """ - for x in args: - if (isinstance(x, _unicode) or - issubclass(numpy.asarray(x).dtype.type, unicode_)): - return unicode_ - return string_ - -def _to_string_or_unicode_array(result): - """ - Helper function to cast a result back into a string or unicode array - if an object array must be used as an intermediary. - """ - return numpy.asarray(result.tolist()) - -def _clean_args(*args): - """ - Helper function for delegating arguments to Python string - functions. - - Many of the Python string operations that have optional arguments - do not use 'None' to indicate a default value. In these cases, - we need to remove all None arguments, and those following them. - """ - newargs = [] - for chk in args: - if chk is None: - break - newargs.append(chk) - return newargs - -def _get_num_chars(a): - """ - Helper function that returns the number of characters per field in - a string or unicode array. This is to abstract out the fact that - for a unicode array this is itemsize / 4. - """ - if issubclass(a.dtype.type, unicode_): - return a.itemsize // 4 - return a.itemsize - - -def _binary_op_dispatcher(x1, x2): - return (x1, x2) - - -@array_function_dispatch(_binary_op_dispatcher) -def equal(x1, x2): - """ - Return (x1 == x2) element-wise. - - Unlike `numpy.equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - not_equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '==', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def not_equal(x1, x2): - """ - Return (x1 != x2) element-wise. - - Unlike `numpy.not_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '!=', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def greater_equal(x1, x2): - """ - Return (x1 >= x2) element-wise. - - Unlike `numpy.greater_equal`, this comparison is performed by - first stripping whitespace characters from the end of the string. - This behavior is provided for backward-compatibility with - numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '>=', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def less_equal(x1, x2): - """ - Return (x1 <= x2) element-wise. - - Unlike `numpy.less_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, greater, less - """ - return compare_chararrays(x1, x2, '<=', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def greater(x1, x2): - """ - Return (x1 > x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, less - """ - return compare_chararrays(x1, x2, '>', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def less(x1, x2): - """ - Return (x1 < x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, greater - """ - return compare_chararrays(x1, x2, '<', True) - - -def _unary_op_dispatcher(a): - return (a,) - - -@array_function_dispatch(_unary_op_dispatcher) -def str_len(a): - """ - Return len(a) element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of integers - - See also - -------- - __builtin__.len - """ - return _vec_string(a, integer, '__len__') - - -@array_function_dispatch(_binary_op_dispatcher) -def add(x1, x2): - """ - Return element-wise string concatenation for two arrays of str or unicode. - - Arrays `x1` and `x2` must have the same shape. - - Parameters - ---------- - x1 : array_like of str or unicode - Input array. - x2 : array_like of str or unicode - Input array. - - Returns - ------- - add : ndarray - Output array of `string_` or `unicode_`, depending on input types - of the same shape as `x1` and `x2`. - - """ - arr1 = numpy.asarray(x1) - arr2 = numpy.asarray(x2) - out_size = _get_num_chars(arr1) + _get_num_chars(arr2) - dtype = _use_unicode(arr1, arr2) - return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) - - -def _multiply_dispatcher(a, i): - return (a,) - - -@array_function_dispatch(_multiply_dispatcher) -def multiply(a, i): - """ - Return (a * i), that is string multiple concatenation, - element-wise. - - Values in `i` of less than 0 are treated as 0 (which yields an - empty string). - - Parameters - ---------- - a : array_like of str or unicode - - i : array_like of ints - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - """ - a_arr = numpy.asarray(a) - i_arr = numpy.asarray(i) - if not issubclass(i_arr.dtype.type, integer): - raise ValueError("Can only multiply by integers") - out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) - return _vec_string( - a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) - - -def _mod_dispatcher(a, values): - return (a, values) - - -@array_function_dispatch(_mod_dispatcher) -def mod(a, values): - """ - Return (a % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of str - or unicode. - - Parameters - ---------- - a : array_like of str or unicode - - values : array_like of values - These values will be element-wise interpolated into the string. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.__mod__ - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, '__mod__', (values,))) - - -@array_function_dispatch(_unary_op_dispatcher) -def capitalize(a): - """ - Return a copy of `a` with only the first character of each element - capitalized. - - Calls `str.capitalize` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - Input array of strings to capitalize. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.capitalize - - Examples - -------- - >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c - array(['a1b2', '1b2a', 'b2a1', '2a1b'], - dtype='|S4') - >>> np.char.capitalize(c) - array(['A1b2', '1b2a', 'B2a1', '2a1b'], - dtype='|S4') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'capitalize') - - -def _center_dispatcher(a, width, fillchar=None): - return (a,) - - -@array_function_dispatch(_center_dispatcher) -def center(a, width, fillchar=' '): - """ - Return a copy of `a` with its elements centered in a string of - length `width`. - - Calls `str.center` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The padding character to use (default is space). - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.center - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) - - -def _count_dispatcher(a, sub, start=None, end=None): - return (a,) - - -@array_function_dispatch(_count_dispatcher) -def count(a, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - Calls `str.count` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - The substring to search for. - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as slice - notation to specify the range in which to count. - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - str.count - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.count(c, 'A') - array([3, 1, 1]) - >>> np.char.count(c, 'aA') - array([3, 1, 0]) - >>> np.char.count(c, 'A', start=1, end=4) - array([2, 1, 1]) - >>> np.char.count(c, 'A', start=1, end=3) - array([1, 0, 0]) - - """ - return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) - - -def _code_dispatcher(a, encoding=None, errors=None): - return (a,) - - -@array_function_dispatch(_code_dispatcher) -def decode(a, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the - :mod:`codecs` module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.decode - - Notes - ----- - The type of the result will depend on the encoding specified. - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.encode(c, encoding='cp037') - array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', - '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], - dtype='|S7') - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) - - -@array_function_dispatch(_code_dispatcher) -def encode(a, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the codecs - module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.encode - - Notes - ----- - The type of the result will depend on the encoding specified. - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) - - -def _endswith_dispatcher(a, suffix, start=None, end=None): - return (a,) - - -@array_function_dispatch(_endswith_dispatcher) -def endswith(a, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` ends with `suffix`, otherwise `False`. - - Calls `str.endswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - suffix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Outputs an array of bools. - - See also - -------- - str.endswith - - Examples - -------- - >>> s = np.array(['foo', 'bar']) - >>> s[0] = 'foo' - >>> s[1] = 'bar' - >>> s - array(['foo', 'bar'], dtype='>> np.char.endswith(s, 'ar') - array([False, True]) - >>> np.char.endswith(s, 'a', start=1, end=2) - array([False, True]) - - """ - return _vec_string( - a, bool_, 'endswith', [suffix, start] + _clean_args(end)) - - -def _expandtabs_dispatcher(a, tabsize=None): - return (a,) - - -@array_function_dispatch(_expandtabs_dispatcher) -def expandtabs(a, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - Calls `str.expandtabs` element-wise. - - Return a copy of each string element where all tab characters are - replaced by one or more spaces, depending on the current column - and the given `tabsize`. The column number is reset to zero after - each newline occurring in the string. This doesn't understand other - non-printing characters or escape sequences. - - Parameters - ---------- - a : array_like of str or unicode - Input array - tabsize : int, optional - Replace tabs with `tabsize` number of spaces. If not given defaults - to 8 spaces. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.expandtabs - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'expandtabs', (tabsize,))) - - -@array_function_dispatch(_count_dispatcher) -def find(a, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - Calls `str.find` element-wise. - - For each element, return the lowest index in the string where - substring `sub` is found, such that `sub` is contained in the - range [`start`, `end`]. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray or int - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - str.find - - """ - return _vec_string( - a, integer, 'find', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_count_dispatcher) -def index(a, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - Calls `str.index` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - find, str.find - - """ - return _vec_string( - a, integer, 'index', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_unary_op_dispatcher) -def isalnum(a): - """ - Returns true for each element if all characters in the string are - alphanumeric and there is at least one character, false otherwise. - - Calls `str.isalnum` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.isalnum - """ - return _vec_string(a, bool_, 'isalnum') - - -@array_function_dispatch(_unary_op_dispatcher) -def isalpha(a): - """ - Returns true for each element if all characters in the string are - alphabetic and there is at least one character, false otherwise. - - Calls `str.isalpha` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isalpha - """ - return _vec_string(a, bool_, 'isalpha') - - -@array_function_dispatch(_unary_op_dispatcher) -def isdigit(a): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - Calls `str.isdigit` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isdigit - """ - return _vec_string(a, bool_, 'isdigit') - - -@array_function_dispatch(_unary_op_dispatcher) -def islower(a): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - Calls `str.islower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.islower - """ - return _vec_string(a, bool_, 'islower') - - -@array_function_dispatch(_unary_op_dispatcher) -def isspace(a): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - Calls `str.isspace` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isspace - """ - return _vec_string(a, bool_, 'isspace') - - -@array_function_dispatch(_unary_op_dispatcher) -def istitle(a): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - Call `str.istitle` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.istitle - """ - return _vec_string(a, bool_, 'istitle') - - -@array_function_dispatch(_unary_op_dispatcher) -def isupper(a): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - Call `str.isupper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isupper - """ - return _vec_string(a, bool_, 'isupper') - - -def _join_dispatcher(sep, seq): - return (sep, seq) - - -@array_function_dispatch(_join_dispatcher) -def join(sep, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - Calls `str.join` element-wise. - - Parameters - ---------- - sep : array_like of str or unicode - seq : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.join - """ - return _to_string_or_unicode_array( - _vec_string(sep, object_, 'join', (seq,))) - - - -def _just_dispatcher(a, width, fillchar=None): - return (a,) - - -@array_function_dispatch(_just_dispatcher) -def ljust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` left-justified in a - string of length `width`. - - Calls `str.ljust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.ljust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) - - -@array_function_dispatch(_unary_op_dispatcher) -def lower(a): - """ - Return an array with the elements converted to lowercase. - - Call `str.lower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.lower - - Examples - -------- - >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c - array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.char.lower(c) - array(['a1b c', '1bca', 'bca1'], dtype='>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.lstrip(c, 'a') - array(['AaAaA', ' aA ', 'bBABba'], dtype='>> np.char.lstrip(c, 'A') # leaves c unchanged - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() - ... # XXX: is this a regression? This used to return True - ... # np.char.lstrip(c,'') does not modify c at all. - False - >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() - True - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) - - -def _partition_dispatcher(a, sep): - return (a,) - - -@array_function_dispatch(_partition_dispatcher) -def partition(a, sep): - """ - Partition each element in `a` around `sep`. - - Calls `str.partition` element-wise. - - For each element in `a`, split the element as the first - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array - sep : {str, unicode} - Separator to split each string element in `a`. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type. - The output array will have an extra dimension with 3 - elements per input element. - - See also - -------- - str.partition - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'partition', (sep,))) - - -def _replace_dispatcher(a, old, new, count=None): - return (a,) - - -@array_function_dispatch(_replace_dispatcher) -def replace(a, old, new, count=None): - """ - For each element in `a`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - Calls `str.replace` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - old, new : str or unicode - - count : int, optional - If the optional argument `count` is given, only the first - `count` occurrences are replaced. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.replace - - """ - return _to_string_or_unicode_array( - _vec_string( - a, object_, 'replace', [old, new] + _clean_args(count))) - - -@array_function_dispatch(_count_dispatcher) -def rfind(a, sub, start=0, end=None): - """ - For each element in `a`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - Calls `str.rfind` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray - Output array of ints. Return -1 on failure. - - See also - -------- - str.rfind - - """ - return _vec_string( - a, integer, 'rfind', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_count_dispatcher) -def rindex(a, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - Calls `str.rindex` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - rfind, str.rindex - - """ - return _vec_string( - a, integer, 'rindex', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_just_dispatcher) -def rjust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` right-justified in a - string of length `width`. - - Calls `str.rjust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rjust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) - - -@array_function_dispatch(_partition_dispatcher) -def rpartition(a, sep): - """ - Partition (split) each element around the right-most separator. - - Calls `str.rpartition` element-wise. - - For each element in `a`, split the element as the last - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array_like of str or unicode - Input array - sep : str or unicode - Right-most separator to split each element in array. - - Returns - ------- - out : ndarray - Output array of string or unicode, depending on input - type. The output array will have an extra dimension with - 3 elements per input element. - - See also - -------- - str.rpartition - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'rpartition', (sep,))) - - -def _split_dispatcher(a, sep=None, maxsplit=None): - return (a,) - - -@array_function_dispatch(_split_dispatcher) -def rsplit(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.rsplit` element-wise. - - Except for splitting from the right, `rsplit` - behaves like `split`. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or None, any whitespace string - is a separator. - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done, - the rightmost ones. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.rsplit, split - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) - - -def _strip_dispatcher(a, chars=None): - return (a,) - - -@array_function_dispatch(_strip_dispatcher) -def rstrip(a, chars=None): - """ - For each element in `a`, return a copy with the trailing - characters removed. - - Calls `str.rstrip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a suffix; rather, all combinations of its values are - stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rstrip - - Examples - -------- - >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c - array(['aAaAaA', 'abBABba'], - dtype='|S7') - >>> np.char.rstrip(c, b'a') - array(['aAaAaA', 'abBABb'], - dtype='|S7') - >>> np.char.rstrip(c, b'A') - array(['aAaAa', 'abBABba'], - dtype='|S7') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) - - -@array_function_dispatch(_split_dispatcher) -def split(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.split` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or None, any whitespace string is a - separator. - - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.split, rsplit - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'split', [sep] + _clean_args(maxsplit)) - - -def _splitlines_dispatcher(a, keepends=None): - return (a,) - - -@array_function_dispatch(_splitlines_dispatcher) -def splitlines(a, keepends=None): - """ - For each element in `a`, return a list of the lines in the - element, breaking at line boundaries. - - Calls `str.splitlines` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - keepends : bool, optional - Line breaks are not included in the resulting list unless - keepends is given and true. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.splitlines - - """ - return _vec_string( - a, object_, 'splitlines', _clean_args(keepends)) - - -def _startswith_dispatcher(a, prefix, start=None, end=None): - return (a,) - - -@array_function_dispatch(_startswith_dispatcher) -def startswith(a, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` starts with `prefix`, otherwise `False`. - - Calls `str.startswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - prefix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Array of booleans - - See also - -------- - str.startswith - - """ - return _vec_string( - a, bool_, 'startswith', [prefix, start] + _clean_args(end)) - - -@array_function_dispatch(_strip_dispatcher) -def strip(a, chars=None): - """ - For each element in `a`, return a copy with the leading and - trailing characters removed. - - Calls `str.strip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a prefix or suffix; rather, all combinations of its - values are stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.strip - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.strip(c) - array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads - array(['AaAaA', ' aA ', 'bBABb'], dtype='>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails - array(['aAaAa', ' aA ', 'abBABba'], dtype='>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c - array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], - dtype='|S5') - >>> np.char.swapcase(c) - array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'swapcase') - - -@array_function_dispatch(_unary_op_dispatcher) -def title(a): - """ - Return element-wise title cased version of string or unicode. - - Title case words start with uppercase characters, all remaining cased - characters are lowercase. - - Calls `str.title` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.title - - Examples - -------- - >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c - array(['a1b c', '1b ca', 'b ca1', 'ca1b'], - dtype='|S5') - >>> np.char.title(c) - array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'title') - - -def _translate_dispatcher(a, table, deletechars=None): - return (a,) - - -@array_function_dispatch(_translate_dispatcher) -def translate(a, table, deletechars=None): - """ - For each element in `a`, return a copy of the string where all - characters occurring in the optional argument `deletechars` are - removed, and the remaining characters have been mapped through the - given translation table. - - Calls `str.translate` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - table : str of length 256 - - deletechars : str - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.translate - - """ - a_arr = numpy.asarray(a) - if issubclass(a_arr.dtype.type, unicode_): - return _vec_string( - a_arr, a_arr.dtype, 'translate', (table,)) - else: - return _vec_string( - a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) - - -@array_function_dispatch(_unary_op_dispatcher) -def upper(a): - """ - Return an array with the elements converted to uppercase. - - Calls `str.upper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.upper - - Examples - -------- - >>> c = np.array(['a1b c', '1bca', 'bca1']); c - array(['a1b c', '1bca', 'bca1'], dtype='>> np.char.upper(c) - array(['A1B C', '1BCA', 'BCA1'], dtype='= 2`` and ``order='F'``, in which case `strides` - is in "Fortran order". - - Methods - ------- - astype - argsort - copy - count - decode - dump - dumps - encode - endswith - expandtabs - fill - find - flatten - getfield - index - isalnum - isalpha - isdecimal - isdigit - islower - isnumeric - isspace - istitle - isupper - item - join - ljust - lower - lstrip - nonzero - put - ravel - repeat - replace - reshape - resize - rfind - rindex - rjust - rsplit - rstrip - searchsorted - setfield - setflags - sort - split - splitlines - squeeze - startswith - strip - swapaxes - swapcase - take - title - tofile - tolist - tostring - translate - transpose - upper - view - zfill - - Parameters - ---------- - shape : tuple - Shape of the array. - itemsize : int, optional - Length of each array element, in number of characters. Default is 1. - unicode : bool, optional - Are the array elements of type unicode (True) or string (False). - Default is False. - buffer : int, optional - Memory address of the start of the array data. Default is None, - in which case a new array is created. - offset : int, optional - Fixed stride displacement from the beginning of an axis? - Default is 0. Needs to be >=0. - strides : array_like of ints, optional - Strides for the array (see `ndarray.strides` for full description). - Default is None. - order : {'C', 'F'}, optional - The order in which the array data is stored in memory: 'C' -> - "row major" order (the default), 'F' -> "column major" - (Fortran) order. - - Examples - -------- - >>> charar = np.chararray((3, 3)) - >>> charar[:] = 'a' - >>> charar - chararray([[b'a', b'a', b'a'], - [b'a', b'a', b'a'], - [b'a', b'a', b'a']], dtype='|S1') - - >>> charar = np.chararray(charar.shape, itemsize=5) - >>> charar[:] = 'abc' - >>> charar - chararray([[b'abc', b'abc', b'abc'], - [b'abc', b'abc', b'abc'], - [b'abc', b'abc', b'abc']], dtype='|S5') - - """ - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, - offset=0, strides=None, order='C'): - global _globalvar - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - # force itemsize to be a Python long, since using NumPy integer - # types results in itemsize.itemsize being used as the size of - # strings in the new array. - itemsize = long(itemsize) - - if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): - # On Py3, unicode objects do not have the buffer interface - filler = buffer - buffer = None - else: - filler = None - - _globalvar = 1 - if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - order=order) - else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - buffer=buffer, - offset=offset, strides=strides, - order=order) - if filler is not None: - self[...] = filler - _globalvar = 0 - return self - - def __array_finalize__(self, obj): - # The b is a special case because it is used for reconstructing. - if not _globalvar and self.dtype.char not in 'SUbc': - raise ValueError("Can only create a chararray from string data.") - - def __getitem__(self, obj): - val = ndarray.__getitem__(self, obj) - - if isinstance(val, character): - temp = val.rstrip() - if _len(temp) == 0: - val = '' - else: - val = temp - - return val - - # IMPLEMENTATION NOTE: Most of the methods of this class are - # direct delegations to the free functions in this module. - # However, those that return an array of strings should instead - # return a chararray, so some extra wrapping is required. - - def __eq__(self, other): - """ - Return (self == other) element-wise. - - See also - -------- - equal - """ - return equal(self, other) - - def __ne__(self, other): - """ - Return (self != other) element-wise. - - See also - -------- - not_equal - """ - return not_equal(self, other) - - def __ge__(self, other): - """ - Return (self >= other) element-wise. - - See also - -------- - greater_equal - """ - return greater_equal(self, other) - - def __le__(self, other): - """ - Return (self <= other) element-wise. - - See also - -------- - less_equal - """ - return less_equal(self, other) - - def __gt__(self, other): - """ - Return (self > other) element-wise. - - See also - -------- - greater - """ - return greater(self, other) - - def __lt__(self, other): - """ - Return (self < other) element-wise. - - See also - -------- - less - """ - return less(self, other) - - def __add__(self, other): - """ - Return (self + other), that is string concatenation, - element-wise for a pair of array_likes of str or unicode. - - See also - -------- - add - """ - return asarray(add(self, other)) - - def __radd__(self, other): - """ - Return (other + self), that is string concatenation, - element-wise for a pair of array_likes of `string_` or `unicode_`. - - See also - -------- - add - """ - return asarray(add(numpy.asarray(other), self)) - - def __mul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __rmul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __mod__(self, i): - """ - Return (self % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of `string_` - or `unicode_`. - - See also - -------- - mod - """ - return asarray(mod(self, i)) - - def __rmod__(self, other): - return NotImplemented - - def argsort(self, axis=-1, kind=None, order=None): - """ - Return the indices that sort the array lexicographically. - - For full documentation see `numpy.argsort`, for which this method is - in fact merely a "thin wrapper." - - Examples - -------- - >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') - >>> c = c.view(np.chararray); c - chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], - dtype='|S5') - >>> c[c.argsort()] - chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], - dtype='|S5') - - """ - return self.__array__().argsort(axis, kind, order) - argsort.__doc__ = ndarray.argsort.__doc__ - - def capitalize(self): - """ - Return a copy of `self` with only the first character of each element - capitalized. - - See also - -------- - char.capitalize - - """ - return asarray(capitalize(self)) - - def center(self, width, fillchar=' '): - """ - Return a copy of `self` with its elements centered in a - string of length `width`. - - See also - -------- - center - """ - return asarray(center(self, width, fillchar)) - - def count(self, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - See also - -------- - char.count - - """ - return count(self, sub, start, end) - - def decode(self, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - See also - -------- - char.decode - - """ - return decode(self, encoding, errors) - - def encode(self, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - See also - -------- - char.encode - - """ - return encode(self, encoding, errors) - - def endswith(self, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` ends with `suffix`, otherwise `False`. - - See also - -------- - char.endswith - - """ - return endswith(self, suffix, start, end) - - def expandtabs(self, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - See also - -------- - char.expandtabs - - """ - return asarray(expandtabs(self, tabsize)) - - def find(self, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - See also - -------- - char.find - - """ - return find(self, sub, start, end) - - def index(self, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - See also - -------- - char.index - - """ - return index(self, sub, start, end) - - def isalnum(self): - """ - Returns true for each element if all characters in the string - are alphanumeric and there is at least one character, false - otherwise. - - See also - -------- - char.isalnum - - """ - return isalnum(self) - - def isalpha(self): - """ - Returns true for each element if all characters in the string - are alphabetic and there is at least one character, false - otherwise. - - See also - -------- - char.isalpha - - """ - return isalpha(self) - - def isdigit(self): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - See also - -------- - char.isdigit - - """ - return isdigit(self) - - def islower(self): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - See also - -------- - char.islower - - """ - return islower(self) - - def isspace(self): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - See also - -------- - char.isspace - - """ - return isspace(self) - - def istitle(self): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - See also - -------- - char.istitle - - """ - return istitle(self) - - def isupper(self): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - See also - -------- - char.isupper - - """ - return isupper(self) - - def join(self, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - See also - -------- - char.join - - """ - return join(self, seq) - - def ljust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` left-justified in a - string of length `width`. - - See also - -------- - char.ljust - - """ - return asarray(ljust(self, width, fillchar)) - - def lower(self): - """ - Return an array with the elements of `self` converted to - lowercase. - - See also - -------- - char.lower - - """ - return asarray(lower(self)) - - def lstrip(self, chars=None): - """ - For each element in `self`, return a copy with the leading characters - removed. - - See also - -------- - char.lstrip - - """ - return asarray(lstrip(self, chars)) - - def partition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - partition - """ - return asarray(partition(self, sep)) - - def replace(self, old, new, count=None): - """ - For each element in `self`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - See also - -------- - char.replace - - """ - return asarray(replace(self, old, new, count)) - - def rfind(self, sub, start=0, end=None): - """ - For each element in `self`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - See also - -------- - char.rfind - - """ - return rfind(self, sub, start, end) - - def rindex(self, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - See also - -------- - char.rindex - - """ - return rindex(self, sub, start, end) - - def rjust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` - right-justified in a string of length `width`. - - See also - -------- - char.rjust - - """ - return asarray(rjust(self, width, fillchar)) - - def rpartition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - rpartition - """ - return asarray(rpartition(self, sep)) - - def rsplit(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in - the string, using `sep` as the delimiter string. - - See also - -------- - char.rsplit - - """ - return rsplit(self, sep, maxsplit) - - def rstrip(self, chars=None): - """ - For each element in `self`, return a copy with the trailing - characters removed. - - See also - -------- - char.rstrip - - """ - return asarray(rstrip(self, chars)) - - def split(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in the - string, using `sep` as the delimiter string. - - See also - -------- - char.split - - """ - return split(self, sep, maxsplit) - - def splitlines(self, keepends=None): - """ - For each element in `self`, return a list of the lines in the - element, breaking at line boundaries. - - See also - -------- - char.splitlines - - """ - return splitlines(self, keepends) - - def startswith(self, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` starts with `prefix`, otherwise `False`. - - See also - -------- - char.startswith - - """ - return startswith(self, prefix, start, end) - - def strip(self, chars=None): - """ - For each element in `self`, return a copy with the leading and - trailing characters removed. - - See also - -------- - char.strip - - """ - return asarray(strip(self, chars)) - - def swapcase(self): - """ - For each element in `self`, return a copy of the string with - uppercase characters converted to lowercase and vice versa. - - See also - -------- - char.swapcase - - """ - return asarray(swapcase(self)) - - def title(self): - """ - For each element in `self`, return a titlecased version of the - string: words start with uppercase characters, all remaining cased - characters are lowercase. - - See also - -------- - char.title - - """ - return asarray(title(self)) - - def translate(self, table, deletechars=None): - """ - For each element in `self`, return a copy of the string where - all characters occurring in the optional argument - `deletechars` are removed, and the remaining characters have - been mapped through the given translation table. - - See also - -------- - char.translate - - """ - return asarray(translate(self, table, deletechars)) - - def upper(self): - """ - Return an array with the elements of `self` converted to - uppercase. - - See also - -------- - char.upper - - """ - return asarray(upper(self)) - - def zfill(self, width): - """ - Return the numeric string left-filled with zeros in a string of - length `width`. - - See also - -------- - char.zfill - - """ - return asarray(zfill(self, width)) - - def isnumeric(self): - """ - For each element in `self`, return True if there are only - numeric characters in the element. - - See also - -------- - char.isnumeric - - """ - return isnumeric(self) - - def isdecimal(self): - """ - For each element in `self`, return True if there are only - decimal characters in the element. - - See also - -------- - char.isdecimal - - """ - return isdecimal(self) - - -def array(obj, itemsize=None, copy=True, unicode=None, order=None): - """ - Create a `chararray`. - - .. note:: - This class is provided for numarray backward-compatibility. - New code (not concerned with numarray compatibility) should use - arrays of type `string_` or `unicode_` and use the free functions - in :mod:`numpy.char ` for fast - vectorized string operations instead. - - Versus a regular NumPy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy - will only be made if __array__ returns a copy, if obj is a - nested sequence, or if a copy is needed to satisfy any of the other - requirements (`itemsize`, unicode, `order`, etc.). - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - None and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or `unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). If order is 'A', then the returned array may - be in any order (either C-, Fortran-contiguous, or even - discontiguous). - """ - if isinstance(obj, (_bytes, _unicode)): - if unicode is None: - if isinstance(obj, _unicode): - unicode = True - else: - unicode = False - - if itemsize is None: - itemsize = _len(obj) - shape = _len(obj) // itemsize - - if unicode: - if sys.maxunicode == 0xffff: - # On a narrow Python build, the buffer for Unicode - # strings is UCS2, which doesn't match the buffer for - # NumPy Unicode types, which is ALWAYS UCS4. - # Therefore, we need to convert the buffer. On Python - # 2.6 and later, we can use the utf_32 codec. Earlier - # versions don't have that codec, so we convert to a - # numerical array that matches the input buffer, and - # then use NumPy to convert it to UCS4. All of this - # should happen in native endianness. - obj = obj.encode('utf_32') - else: - obj = _unicode(obj) - else: - # Let the default Unicode -> string encoding (if any) take - # precedence. - obj = _bytes(obj) - - return chararray(shape, itemsize=itemsize, unicode=unicode, - buffer=obj, order=order) - - if isinstance(obj, (list, tuple)): - obj = numpy.asarray(obj) - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): - # If we just have a vanilla chararray, create a chararray - # view around it. - if not isinstance(obj, chararray): - obj = obj.view(chararray) - - if itemsize is None: - itemsize = obj.itemsize - # itemsize is in 8-bit chars, so for Unicode, we need - # to divide by the size of a single Unicode character, - # which for NumPy is always 4 - if issubclass(obj.dtype.type, unicode_): - itemsize //= 4 - - if unicode is None: - if issubclass(obj.dtype.type, unicode_): - unicode = True - else: - unicode = False - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if order is not None: - obj = numpy.asarray(obj, order=order) - if (copy or - (itemsize != obj.itemsize) or - (not unicode and isinstance(obj, unicode_)) or - (unicode and isinstance(obj, string_))): - obj = obj.astype((dtype, long(itemsize))) - return obj - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): - if itemsize is None: - # Since no itemsize was specified, convert the input array to - # a list so the ndarray constructor will automatically - # determine the itemsize for us. - obj = obj.tolist() - # Fall through to the default case - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if itemsize is None: - val = narray(obj, dtype=dtype, order=order, subok=True) - else: - val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) - return val.view(chararray) - - -def asarray(obj, itemsize=None, unicode=None, order=None): - """ - Convert the input to a `chararray`, copying the data only if - necessary. - - Versus a regular NumPy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - None and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or 'unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). - """ - return array(obj, itemsize, copy=False, - unicode=unicode, order=order) diff --git a/venv/lib/python3.7/site-packages/numpy/core/einsumfunc.py b/venv/lib/python3.7/site-packages/numpy/core/einsumfunc.py deleted file mode 100644 index 3412c3f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/einsumfunc.py +++ /dev/null @@ -1,1432 +0,0 @@ -""" -Implementation of optimized einsum. - -""" -from __future__ import division, absolute_import, print_function - -import itertools - -from numpy.compat import basestring -from numpy.core.multiarray import c_einsum -from numpy.core.numeric import asanyarray, tensordot -from numpy.core.overrides import array_function_dispatch - -__all__ = ['einsum', 'einsum_path'] - -einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' -einsum_symbols_set = set(einsum_symbols) - - -def _flop_count(idx_contraction, inner, num_terms, size_dictionary): - """ - Computes the number of FLOPS in the contraction. - - Parameters - ---------- - idx_contraction : iterable - The indices involved in the contraction - inner : bool - Does this contraction require an inner product? - num_terms : int - The number of terms in a contraction - size_dictionary : dict - The size of each of the indices in idx_contraction - - Returns - ------- - flop_count : int - The total number of FLOPS required for the contraction. - - Examples - -------- - - >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) - 30 - - >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) - 60 - - """ - - overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) - op_factor = max(1, num_terms - 1) - if inner: - op_factor += 1 - - return overall_size * op_factor - -def _compute_size_by_dict(indices, idx_dict): - """ - Computes the product of the elements in indices based on the dictionary - idx_dict. - - Parameters - ---------- - indices : iterable - Indices to base the product on. - idx_dict : dictionary - Dictionary of index sizes - - Returns - ------- - ret : int - The resulting product. - - Examples - -------- - >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) - 90 - - """ - ret = 1 - for i in indices: - ret *= idx_dict[i] - return ret - - -def _find_contraction(positions, input_sets, output_set): - """ - Finds the contraction for a given set of input and output sets. - - Parameters - ---------- - positions : iterable - Integer positions of terms used in the contraction. - input_sets : list - List of sets that represent the lhs side of the einsum subscript - output_set : set - Set that represents the rhs side of the overall einsum subscript - - Returns - ------- - new_result : set - The indices of the resulting contraction - remaining : list - List of sets that have not been contracted, the new set is appended to - the end of this list - idx_removed : set - Indices removed from the entire contraction - idx_contraction : set - The indices used in the current contraction - - Examples - -------- - - # A simple dot product test case - >>> pos = (0, 1) - >>> isets = [set('ab'), set('bc')] - >>> oset = set('ac') - >>> _find_contraction(pos, isets, oset) - ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) - - # A more complex case with additional terms in the contraction - >>> pos = (0, 2) - >>> isets = [set('abd'), set('ac'), set('bdc')] - >>> oset = set('ac') - >>> _find_contraction(pos, isets, oset) - ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) - """ - - idx_contract = set() - idx_remain = output_set.copy() - remaining = [] - for ind, value in enumerate(input_sets): - if ind in positions: - idx_contract |= value - else: - remaining.append(value) - idx_remain |= value - - new_result = idx_remain & idx_contract - idx_removed = (idx_contract - new_result) - remaining.append(new_result) - - return (new_result, remaining, idx_removed, idx_contract) - - -def _optimal_path(input_sets, output_set, idx_dict, memory_limit): - """ - Computes all possible pair contractions, sieves the results based - on ``memory_limit`` and returns the lowest cost path. This algorithm - scales factorial with respect to the elements in the list ``input_sets``. - - Parameters - ---------- - input_sets : list - List of sets that represent the lhs side of the einsum subscript - output_set : set - Set that represents the rhs side of the overall einsum subscript - idx_dict : dictionary - Dictionary of index sizes - memory_limit : int - The maximum number of elements in a temporary array - - Returns - ------- - path : list - The optimal contraction order within the memory limit constraint. - - Examples - -------- - >>> isets = [set('abd'), set('ac'), set('bdc')] - >>> oset = set() - >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} - >>> _optimal_path(isets, oset, idx_sizes, 5000) - [(0, 2), (0, 1)] - """ - - full_results = [(0, [], input_sets)] - for iteration in range(len(input_sets) - 1): - iter_results = [] - - # Compute all unique pairs - for curr in full_results: - cost, positions, remaining = curr - for con in itertools.combinations(range(len(input_sets) - iteration), 2): - - # Find the contraction - cont = _find_contraction(con, remaining, output_set) - new_result, new_input_sets, idx_removed, idx_contract = cont - - # Sieve the results based on memory_limit - new_size = _compute_size_by_dict(new_result, idx_dict) - if new_size > memory_limit: - continue - - # Build (total_cost, positions, indices_remaining) - total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict) - new_pos = positions + [con] - iter_results.append((total_cost, new_pos, new_input_sets)) - - # Update combinatorial list, if we did not find anything return best - # path + remaining contractions - if iter_results: - full_results = iter_results - else: - path = min(full_results, key=lambda x: x[0])[1] - path += [tuple(range(len(input_sets) - iteration))] - return path - - # If we have not found anything return single einsum contraction - if len(full_results) == 0: - return [tuple(range(len(input_sets)))] - - path = min(full_results, key=lambda x: x[0])[1] - return path - -def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost): - """Compute the cost (removed size + flops) and resultant indices for - performing the contraction specified by ``positions``. - - Parameters - ---------- - positions : tuple of int - The locations of the proposed tensors to contract. - input_sets : list of sets - The indices found on each tensors. - output_set : set - The output indices of the expression. - idx_dict : dict - Mapping of each index to its size. - memory_limit : int - The total allowed size for an intermediary tensor. - path_cost : int - The contraction cost so far. - naive_cost : int - The cost of the unoptimized expression. - - Returns - ------- - cost : (int, int) - A tuple containing the size of any indices removed, and the flop cost. - positions : tuple of int - The locations of the proposed tensors to contract. - new_input_sets : list of sets - The resulting new list of indices if this proposed contraction is performed. - - """ - - # Find the contraction - contract = _find_contraction(positions, input_sets, output_set) - idx_result, new_input_sets, idx_removed, idx_contract = contract - - # Sieve the results based on memory_limit - new_size = _compute_size_by_dict(idx_result, idx_dict) - if new_size > memory_limit: - return None - - # Build sort tuple - old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions) - removed_size = sum(old_sizes) - new_size - - # NB: removed_size used to be just the size of any removed indices i.e.: - # helpers.compute_size_by_dict(idx_removed, idx_dict) - cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) - sort = (-removed_size, cost) - - # Sieve based on total cost as well - if (path_cost + cost) > naive_cost: - return None - - # Add contraction to possible choices - return [sort, positions, new_input_sets] - - -def _update_other_results(results, best): - """Update the positions and provisional input_sets of ``results`` based on - performing the contraction result ``best``. Remove any involving the tensors - contracted. - - Parameters - ---------- - results : list - List of contraction results produced by ``_parse_possible_contraction``. - best : list - The best contraction of ``results`` i.e. the one that will be performed. - - Returns - ------- - mod_results : list - The list of modified results, updated with outcome of ``best`` contraction. - """ - - best_con = best[1] - bx, by = best_con - mod_results = [] - - for cost, (x, y), con_sets in results: - - # Ignore results involving tensors just contracted - if x in best_con or y in best_con: - continue - - # Update the input_sets - del con_sets[by - int(by > x) - int(by > y)] - del con_sets[bx - int(bx > x) - int(bx > y)] - con_sets.insert(-1, best[2][-1]) - - # Update the position indices - mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) - mod_results.append((cost, mod_con, con_sets)) - - return mod_results - -def _greedy_path(input_sets, output_set, idx_dict, memory_limit): - """ - Finds the path by contracting the best pair until the input list is - exhausted. The best pair is found by minimizing the tuple - ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing - matrix multiplication or inner product operations, then Hadamard like - operations, and finally outer operations. Outer products are limited by - ``memory_limit``. This algorithm scales cubically with respect to the - number of elements in the list ``input_sets``. - - Parameters - ---------- - input_sets : list - List of sets that represent the lhs side of the einsum subscript - output_set : set - Set that represents the rhs side of the overall einsum subscript - idx_dict : dictionary - Dictionary of index sizes - memory_limit_limit : int - The maximum number of elements in a temporary array - - Returns - ------- - path : list - The greedy contraction order within the memory limit constraint. - - Examples - -------- - >>> isets = [set('abd'), set('ac'), set('bdc')] - >>> oset = set() - >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} - >>> _greedy_path(isets, oset, idx_sizes, 5000) - [(0, 2), (0, 1)] - """ - - # Handle trivial cases that leaked through - if len(input_sets) == 1: - return [(0,)] - elif len(input_sets) == 2: - return [(0, 1)] - - # Build up a naive cost - contract = _find_contraction(range(len(input_sets)), input_sets, output_set) - idx_result, new_input_sets, idx_removed, idx_contract = contract - naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict) - - # Initially iterate over all pairs - comb_iter = itertools.combinations(range(len(input_sets)), 2) - known_contractions = [] - - path_cost = 0 - path = [] - - for iteration in range(len(input_sets) - 1): - - # Iterate over all pairs on first step, only previously found pairs on subsequent steps - for positions in comb_iter: - - # Always initially ignore outer products - if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): - continue - - result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, - naive_cost) - if result is not None: - known_contractions.append(result) - - # If we do not have a inner contraction, rescan pairs including outer products - if len(known_contractions) == 0: - - # Then check the outer products - for positions in itertools.combinations(range(len(input_sets)), 2): - result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, - path_cost, naive_cost) - if result is not None: - known_contractions.append(result) - - # If we still did not find any remaining contractions, default back to einsum like behavior - if len(known_contractions) == 0: - path.append(tuple(range(len(input_sets)))) - break - - # Sort based on first index - best = min(known_contractions, key=lambda x: x[0]) - - # Now propagate as many unused contractions as possible to next iteration - known_contractions = _update_other_results(known_contractions, best) - - # Next iteration only compute contractions with the new tensor - # All other contractions have been accounted for - input_sets = best[2] - new_tensor_pos = len(input_sets) - 1 - comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) - - # Update path and total cost - path.append(best[1]) - path_cost += best[0][1] - - return path - - -def _can_dot(inputs, result, idx_removed): - """ - Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. - - Parameters - ---------- - inputs : list of str - Specifies the subscripts for summation. - result : str - Resulting summation. - idx_removed : set - Indices that are removed in the summation - - - Returns - ------- - type : bool - Returns true if BLAS should and can be used, else False - - Notes - ----- - If the operations is BLAS level 1 or 2 and is not already aligned - we default back to einsum as the memory movement to copy is more - costly than the operation itself. - - - Examples - -------- - - # Standard GEMM operation - >>> _can_dot(['ij', 'jk'], 'ik', set('j')) - True - - # Can use the standard BLAS, but requires odd data movement - >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) - False - - # DDOT where the memory is not aligned - >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) - False - - """ - - # All `dot` calls remove indices - if len(idx_removed) == 0: - return False - - # BLAS can only handle two operands - if len(inputs) != 2: - return False - - input_left, input_right = inputs - - for c in set(input_left + input_right): - # can't deal with repeated indices on same input or more than 2 total - nl, nr = input_left.count(c), input_right.count(c) - if (nl > 1) or (nr > 1) or (nl + nr > 2): - return False - - # can't do implicit summation or dimension collapse e.g. - # "ab,bc->c" (implicitly sum over 'a') - # "ab,ca->ca" (take diagonal of 'a') - if nl + nr - 1 == int(c in result): - return False - - # Build a few temporaries - set_left = set(input_left) - set_right = set(input_right) - keep_left = set_left - idx_removed - keep_right = set_right - idx_removed - rs = len(idx_removed) - - # At this point we are a DOT, GEMV, or GEMM operation - - # Handle inner products - - # DDOT with aligned data - if input_left == input_right: - return True - - # DDOT without aligned data (better to use einsum) - if set_left == set_right: - return False - - # Handle the 4 possible (aligned) GEMV or GEMM cases - - # GEMM or GEMV no transpose - if input_left[-rs:] == input_right[:rs]: - return True - - # GEMM or GEMV transpose both - if input_left[:rs] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose right - if input_left[-rs:] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose left - if input_left[:rs] == input_right[:rs]: - return True - - # Einsum is faster than GEMV if we have to copy data - if not keep_left or not keep_right: - return False - - # We are a matrix-matrix product, but we need to copy data - return True - - -def _parse_einsum_input(operands): - """ - A reproduction of einsum c side einsum parsing in python. - - Returns - ------- - input_strings : str - Parsed input strings - output_string : str - Parsed output string - operands : list of array_like - The operands to use in the numpy contraction - - Examples - -------- - The operand list is simplified to reduce printing: - - >>> np.random.seed(123) - >>> a = np.random.rand(4, 4) - >>> b = np.random.rand(4, 4, 4) - >>> _parse_einsum_input(('...a,...a->...', a, b)) - ('za,xza', 'xz', [a, b]) # may vary - - >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) - ('za,xza', 'xz', [a, b]) # may vary - """ - - if len(operands) == 0: - raise ValueError("No input operands") - - if isinstance(operands[0], basestring): - subscripts = operands[0].replace(" ", "") - operands = [asanyarray(v) for v in operands[1:]] - - # Ensure all characters are valid - for s in subscripts: - if s in '.,->': - continue - if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) - - else: - tmp_operands = list(operands) - operand_list = [] - subscript_list = [] - for p in range(len(operands) // 2): - operand_list.append(tmp_operands.pop(0)) - subscript_list.append(tmp_operands.pop(0)) - - output_list = tmp_operands[-1] if len(tmp_operands) else None - operands = [asanyarray(v) for v in operand_list] - subscripts = "" - last = len(subscript_list) - 1 - for num, sub in enumerate(subscript_list): - for s in sub: - if s is Ellipsis: - subscripts += "..." - elif isinstance(s, int): - subscripts += einsum_symbols[s] - else: - raise TypeError("For this input type lists must contain " - "either int or Ellipsis") - if num != last: - subscripts += "," - - if output_list is not None: - subscripts += "->" - for s in output_list: - if s is Ellipsis: - subscripts += "..." - elif isinstance(s, int): - subscripts += einsum_symbols[s] - else: - raise TypeError("For this input type lists must contain " - "either int or Ellipsis") - # Check for proper "->" - if ("-" in subscripts) or (">" in subscripts): - invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) - if invalid or (subscripts.count("->") != 1): - raise ValueError("Subscripts can only contain one '->'.") - - # Parse ellipses - if "." in subscripts: - used = subscripts.replace(".", "").replace(",", "").replace("->", "") - unused = list(einsum_symbols_set - set(used)) - ellipse_inds = "".join(unused) - longest = 0 - - if "->" in subscripts: - input_tmp, output_sub = subscripts.split("->") - split_subscripts = input_tmp.split(",") - out_sub = True - else: - split_subscripts = subscripts.split(',') - out_sub = False - - for num, sub in enumerate(split_subscripts): - if "." in sub: - if (sub.count(".") != 3) or (sub.count("...") != 1): - raise ValueError("Invalid Ellipses.") - - # Take into account numerical values - if operands[num].shape == (): - ellipse_count = 0 - else: - ellipse_count = max(operands[num].ndim, 1) - ellipse_count -= (len(sub) - 3) - - if ellipse_count > longest: - longest = ellipse_count - - if ellipse_count < 0: - raise ValueError("Ellipses lengths do not match.") - elif ellipse_count == 0: - split_subscripts[num] = sub.replace('...', '') - else: - rep_inds = ellipse_inds[-ellipse_count:] - split_subscripts[num] = sub.replace('...', rep_inds) - - subscripts = ",".join(split_subscripts) - if longest == 0: - out_ellipse = "" - else: - out_ellipse = ellipse_inds[-longest:] - - if out_sub: - subscripts += "->" + output_sub.replace("...", out_ellipse) - else: - # Special care for outputless ellipses - output_subscript = "" - tmp_subscripts = subscripts.replace(",", "") - for s in sorted(set(tmp_subscripts)): - if s not in (einsum_symbols): - raise ValueError("Character %s is not a valid symbol." % s) - if tmp_subscripts.count(s) == 1: - output_subscript += s - normal_inds = ''.join(sorted(set(output_subscript) - - set(out_ellipse))) - - subscripts += "->" + out_ellipse + normal_inds - - # Build output string if does not exist - if "->" in subscripts: - input_subscripts, output_subscript = subscripts.split("->") - else: - input_subscripts = subscripts - # Build output subscripts - tmp_subscripts = subscripts.replace(",", "") - output_subscript = "" - for s in sorted(set(tmp_subscripts)): - if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) - if tmp_subscripts.count(s) == 1: - output_subscript += s - - # Make sure output subscripts are in the input - for char in output_subscript: - if char not in input_subscripts: - raise ValueError("Output character %s did not appear in the input" - % char) - - # Make sure number operands is equivalent to the number of terms - if len(input_subscripts.split(',')) != len(operands): - raise ValueError("Number of einsum subscripts must be equal to the " - "number of operands.") - - return (input_subscripts, output_subscript, operands) - - -def _einsum_path_dispatcher(*operands, **kwargs): - # NOTE: technically, we should only dispatch on array-like arguments, not - # subscripts (given as strings). But separating operands into - # arrays/subscripts is a little tricky/slow (given einsum's two supported - # signatures), so as a practical shortcut we dispatch on everything. - # Strings will be ignored for dispatching since they don't define - # __array_function__. - return operands - - -@array_function_dispatch(_einsum_path_dispatcher, module='numpy') -def einsum_path(*operands, **kwargs): - """ - einsum_path(subscripts, *operands, optimize='greedy') - - Evaluates the lowest cost contraction order for an einsum expression by - considering the creation of intermediate arrays. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation. - *operands : list of array_like - These are the arrays for the operation. - optimize : {bool, list, tuple, 'greedy', 'optimal'} - Choose the type of path. If a tuple is provided, the second argument is - assumed to be the maximum intermediate size created. If only a single - argument is provided the largest input or output array size is used - as a maximum intermediate size. - - * if a list is given that starts with ``einsum_path``, uses this as the - contraction path - * if False no optimization is taken - * if True defaults to the 'greedy' algorithm - * 'optimal' An algorithm that combinatorially explores all possible - ways of contracting the listed tensors and choosest the least costly - path. Scales exponentially with the number of terms in the - contraction. - * 'greedy' An algorithm that chooses the best pair contraction - at each step. Effectively, this algorithm searches the largest inner, - Hadamard, and then outer products at each step. Scales cubically with - the number of terms in the contraction. Equivalent to the 'optimal' - path for most contractions. - - Default is 'greedy'. - - Returns - ------- - path : list of tuples - A list representation of the einsum path. - string_repr : str - A printable representation of the einsum path. - - Notes - ----- - The resulting path indicates which terms of the input contraction should be - contracted first, the result of this contraction is then appended to the - end of the contraction list. This list can then be iterated over until all - intermediate contractions are complete. - - See Also - -------- - einsum, linalg.multi_dot - - Examples - -------- - - We can begin with a chain dot example. In this case, it is optimal to - contract the ``b`` and ``c`` tensors first as represented by the first - element of the path ``(1, 2)``. The resulting tensor is added to the end - of the contraction and the remaining contraction ``(0, 1)`` is then - completed. - - >>> np.random.seed(123) - >>> a = np.random.rand(2, 2) - >>> b = np.random.rand(2, 5) - >>> c = np.random.rand(5, 2) - >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') - >>> print(path_info[0]) - ['einsum_path', (1, 2), (0, 1)] - >>> print(path_info[1]) - Complete contraction: ij,jk,kl->il # may vary - Naive scaling: 4 - Optimized scaling: 3 - Naive FLOP count: 1.600e+02 - Optimized FLOP count: 5.600e+01 - Theoretical speedup: 2.857 - Largest intermediate: 4.000e+00 elements - ------------------------------------------------------------------------- - scaling current remaining - ------------------------------------------------------------------------- - 3 kl,jk->jl ij,jl->il - 3 jl,ij->il il->il - - - A more complex index transformation example. - - >>> I = np.random.rand(10, 10, 10, 10) - >>> C = np.random.rand(10, 10) - >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, - ... optimize='greedy') - - >>> print(path_info[0]) - ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] - >>> print(path_info[1]) - Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary - Naive scaling: 8 - Optimized scaling: 5 - Naive FLOP count: 8.000e+08 - Optimized FLOP count: 8.000e+05 - Theoretical speedup: 1000.000 - Largest intermediate: 1.000e+04 elements - -------------------------------------------------------------------------- - scaling current remaining - -------------------------------------------------------------------------- - 5 abcd,ea->bcde fb,gc,hd,bcde->efgh - 5 bcde,fb->cdef gc,hd,cdef->efgh - 5 cdef,gc->defg hd,defg->efgh - 5 defg,hd->efgh efgh->efgh - """ - - # Make sure all keywords are valid - valid_contract_kwargs = ['optimize', 'einsum_call'] - unknown_kwargs = [k for (k, v) in kwargs.items() if k - not in valid_contract_kwargs] - if len(unknown_kwargs): - raise TypeError("Did not understand the following kwargs:" - " %s" % unknown_kwargs) - - # Figure out what the path really is - path_type = kwargs.pop('optimize', True) - if path_type is True: - path_type = 'greedy' - if path_type is None: - path_type = False - - memory_limit = None - - # No optimization or a named path algorithm - if (path_type is False) or isinstance(path_type, basestring): - pass - - # Given an explicit path - elif len(path_type) and (path_type[0] == 'einsum_path'): - pass - - # Path tuple with memory limit - elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and - isinstance(path_type[1], (int, float))): - memory_limit = int(path_type[1]) - path_type = path_type[0] - - else: - raise TypeError("Did not understand the path: %s" % str(path_type)) - - # Hidden option, only einsum should call this - einsum_call_arg = kwargs.pop("einsum_call", False) - - # Python side parsing - input_subscripts, output_subscript, operands = _parse_einsum_input(operands) - - # Build a few useful list and sets - input_list = input_subscripts.split(',') - input_sets = [set(x) for x in input_list] - output_set = set(output_subscript) - indices = set(input_subscripts.replace(',', '')) - - # Get length of each unique dimension and ensure all dimensions are correct - dimension_dict = {} - broadcast_indices = [[] for x in range(len(input_list))] - for tnum, term in enumerate(input_list): - sh = operands[tnum].shape - if len(sh) != len(term): - raise ValueError("Einstein sum subscript %s does not contain the " - "correct number of indices for operand %d." - % (input_subscripts[tnum], tnum)) - for cnum, char in enumerate(term): - dim = sh[cnum] - - # Build out broadcast indices - if dim == 1: - broadcast_indices[tnum].append(char) - - if char in dimension_dict.keys(): - # For broadcasting cases we always want the largest dim size - if dimension_dict[char] == 1: - dimension_dict[char] = dim - elif dim not in (1, dimension_dict[char]): - raise ValueError("Size of label '%s' for operand %d (%d) " - "does not match previous terms (%d)." - % (char, tnum, dimension_dict[char], dim)) - else: - dimension_dict[char] = dim - - # Convert broadcast inds to sets - broadcast_indices = [set(x) for x in broadcast_indices] - - # Compute size of each input array plus the output array - size_list = [_compute_size_by_dict(term, dimension_dict) - for term in input_list + [output_subscript]] - max_size = max(size_list) - - if memory_limit is None: - memory_arg = max_size - else: - memory_arg = memory_limit - - # Compute naive cost - # This isn't quite right, need to look into exactly how einsum does this - inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 - naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict) - - # Compute the path - if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set): - # Nothing to be optimized, leave it to einsum - path = [tuple(range(len(input_list)))] - elif path_type == "greedy": - path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg) - elif path_type == "optimal": - path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg) - elif path_type[0] == 'einsum_path': - path = path_type[1:] - else: - raise KeyError("Path name %s not found", path_type) - - cost_list, scale_list, size_list, contraction_list = [], [], [], [] - - # Build contraction tuple (positions, gemm, einsum_str, remaining) - for cnum, contract_inds in enumerate(path): - # Make sure we remove inds from right to left - contract_inds = tuple(sorted(list(contract_inds), reverse=True)) - - contract = _find_contraction(contract_inds, input_sets, output_set) - out_inds, input_sets, idx_removed, idx_contract = contract - - cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict) - cost_list.append(cost) - scale_list.append(len(idx_contract)) - size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) - - bcast = set() - tmp_inputs = [] - for x in contract_inds: - tmp_inputs.append(input_list.pop(x)) - bcast |= broadcast_indices.pop(x) - - new_bcast_inds = bcast - idx_removed - - # If we're broadcasting, nix blas - if not len(idx_removed & bcast): - do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) - else: - do_blas = False - - # Last contraction - if (cnum - len(path)) == -1: - idx_result = output_subscript - else: - sort_result = [(dimension_dict[ind], ind) for ind in out_inds] - idx_result = "".join([x[1] for x in sorted(sort_result)]) - - input_list.append(idx_result) - broadcast_indices.append(new_bcast_inds) - einsum_str = ",".join(tmp_inputs) + "->" + idx_result - - contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) - contraction_list.append(contraction) - - opt_cost = sum(cost_list) + 1 - - if einsum_call_arg: - return (operands, contraction_list) - - # Return the path along with a nice string representation - overall_contraction = input_subscripts + "->" + output_subscript - header = ("scaling", "current", "remaining") - - speedup = naive_cost / opt_cost - max_i = max(size_list) - - path_print = " Complete contraction: %s\n" % overall_contraction - path_print += " Naive scaling: %d\n" % len(indices) - path_print += " Optimized scaling: %d\n" % max(scale_list) - path_print += " Naive FLOP count: %.3e\n" % naive_cost - path_print += " Optimized FLOP count: %.3e\n" % opt_cost - path_print += " Theoretical speedup: %3.3f\n" % speedup - path_print += " Largest intermediate: %.3e elements\n" % max_i - path_print += "-" * 74 + "\n" - path_print += "%6s %24s %40s\n" % header - path_print += "-" * 74 - - for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction - remaining_str = ",".join(remaining) + "->" + output_subscript - path_run = (scale_list[n], einsum_str, remaining_str) - path_print += "\n%4d %24s %40s" % path_run - - path = ['einsum_path'] + path - return (path, path_print) - - -def _einsum_dispatcher(*operands, **kwargs): - # Arguably we dispatch on more arguments that we really should; see note in - # _einsum_path_dispatcher for why. - for op in operands: - yield op - yield kwargs.get('out') - - -# Rewrite einsum to handle different cases -@array_function_dispatch(_einsum_dispatcher, module='numpy') -def einsum(*operands, **kwargs): - """ - einsum(subscripts, *operands, out=None, dtype=None, order='K', - casting='safe', optimize=False) - - Evaluates the Einstein summation convention on the operands. - - Using the Einstein summation convention, many common multi-dimensional, - linear algebraic array operations can be represented in a simple fashion. - In *implicit* mode `einsum` computes these values. - - In *explicit* mode, `einsum` provides further flexibility to compute - other array operations that might not be considered classical Einstein - summation operations, by disabling, or forcing summation over specified - subscript labels. - - See the notes and examples for clarification. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation as comma separated list of - subscript labels. An implicit (classical Einstein summation) - calculation is performed unless the explicit indicator '->' is - included as well as subscript labels of the precise output form. - operands : list of array_like - These are the arrays for the operation. - out : ndarray, optional - If provided, the calculation is done into this array. - dtype : {data-type, None}, optional - If provided, forces the calculation to use the data type specified. - Note that you may have to also give a more liberal `casting` - parameter to allow the conversions. Default is None. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the output. 'C' means it should - be C contiguous. 'F' means it should be Fortran contiguous, - 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. - 'K' means it should be as close to the layout as the inputs as - is possible, including arbitrarily permuted axes. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Setting this to - 'unsafe' is not recommended, as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Default is 'safe'. - optimize : {False, True, 'greedy', 'optimal'}, optional - Controls if intermediate optimization should occur. No optimization - will occur if False and True will default to the 'greedy' algorithm. - Also accepts an explicit contraction list from the ``np.einsum_path`` - function. See ``np.einsum_path`` for more details. Defaults to False. - - Returns - ------- - output : ndarray - The calculation based on the Einstein summation convention. - - See Also - -------- - einsum_path, dot, inner, outer, tensordot, linalg.multi_dot - - Notes - ----- - .. versionadded:: 1.6.0 - - The Einstein summation convention can be used to compute - many multi-dimensional, linear algebraic array operations. `einsum` - provides a succinct way of representing these. - - A non-exhaustive list of these operations, - which can be computed by `einsum`, is shown below along with examples: - - * Trace of an array, :py:func:`numpy.trace`. - * Return a diagonal, :py:func:`numpy.diag`. - * Array axis summations, :py:func:`numpy.sum`. - * Transpositions and permutations, :py:func:`numpy.transpose`. - * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. - * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. - * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. - * Tensor contractions, :py:func:`numpy.tensordot`. - * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. - - The subscripts string is a comma-separated list of subscript labels, - where each label refers to a dimension of the corresponding operand. - Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` - is equivalent to :py:func:`np.inner(a,b) `. If a label - appears only once, it is not summed, so ``np.einsum('i', a)`` produces a - view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` - describes traditional matrix multiplication and is equivalent to - :py:func:`np.matmul(a,b) `. Repeated subscript labels in one - operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent - to :py:func:`np.trace(a) `. - - In *implicit mode*, the chosen subscripts are important - since the axes of the output are reordered alphabetically. This - means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while - ``np.einsum('ji', a)`` takes its transpose. Additionally, - ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, - ``np.einsum('ij,jh', a, b)`` returns the transpose of the - multiplication since subscript 'h' precedes subscript 'i'. - - In *explicit mode* the output can be directly controlled by - specifying output subscript labels. This requires the - identifier '->' as well as the list of output subscript labels. - This feature increases the flexibility of the function since - summing can be disabled or forced when required. The call - ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, - and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. - The difference is that `einsum` does not allow broadcasting by default. - Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the - order of the output subscript labels and therefore returns matrix - multiplication, unlike the example above in implicit mode. - - To enable and control broadcasting, use an ellipsis. Default - NumPy-style broadcasting is done by adding an ellipsis - to the left of each term, like ``np.einsum('...ii->...i', a)``. - To take the trace along the first and last axes, - you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix - product with the left-most indices instead of rightmost, one can do - ``np.einsum('ij...,jk...->ik...', a, b)``. - - When there is only one operand, no axes are summed, and no output - parameter is provided, a view into the operand is returned instead - of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` - produces a view (changed in version 1.10.0). - - `einsum` also provides an alternative way to provide the subscripts - and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. - If the output shape is not provided in this format `einsum` will be - calculated in implicit mode, otherwise it will be performed explicitly. - The examples below have corresponding `einsum` calls with the two - parameter methods. - - .. versionadded:: 1.10.0 - - Views returned from einsum are now writeable whenever the input array - is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now - have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` - and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal - of a 2D array. - - .. versionadded:: 1.12.0 - - Added the ``optimize`` argument which will optimize the contraction order - of an einsum expression. For a contraction with three or more operands this - can greatly increase the computational efficiency at the cost of a larger - memory footprint during computation. - - Typically a 'greedy' algorithm is applied which empirical tests have shown - returns the optimal path in the majority of cases. In some cases 'optimal' - will return the superlative path through a more expensive, exhaustive search. - For iterative calculations it may be advisable to calculate the optimal path - once and reuse that path by supplying it as an argument. An example is given - below. - - See :py:func:`numpy.einsum_path` for more details. - - Examples - -------- - >>> a = np.arange(25).reshape(5,5) - >>> b = np.arange(5) - >>> c = np.arange(6).reshape(2,3) - - Trace of a matrix: - - >>> np.einsum('ii', a) - 60 - >>> np.einsum(a, [0,0]) - 60 - >>> np.trace(a) - 60 - - Extract the diagonal (requires explicit form): - - >>> np.einsum('ii->i', a) - array([ 0, 6, 12, 18, 24]) - >>> np.einsum(a, [0,0], [0]) - array([ 0, 6, 12, 18, 24]) - >>> np.diag(a) - array([ 0, 6, 12, 18, 24]) - - Sum over an axis (requires explicit form): - - >>> np.einsum('ij->i', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [0,1], [0]) - array([ 10, 35, 60, 85, 110]) - >>> np.sum(a, axis=1) - array([ 10, 35, 60, 85, 110]) - - For higher dimensional arrays summing a single axis can be done with ellipsis: - - >>> np.einsum('...j->...', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) - array([ 10, 35, 60, 85, 110]) - - Compute a matrix transpose, or reorder any number of axes: - - >>> np.einsum('ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum('ij->ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum(c, [1,0]) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.transpose(c) - array([[0, 3], - [1, 4], - [2, 5]]) - - Vector inner products: - - >>> np.einsum('i,i', b, b) - 30 - >>> np.einsum(b, [0], b, [0]) - 30 - >>> np.inner(b,b) - 30 - - Matrix vector multiplication: - - >>> np.einsum('ij,j', a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum(a, [0,1], b, [1]) - array([ 30, 80, 130, 180, 230]) - >>> np.dot(a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum('...j,j', a, b) - array([ 30, 80, 130, 180, 230]) - - Broadcasting and scalar multiplication: - - >>> np.einsum('..., ...', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(',ij', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.multiply(3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - - Vector outer product: - - >>> np.einsum('i,j', np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.einsum(np.arange(2)+1, [0], b, [1]) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.outer(np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - - Tensor contraction: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> np.einsum('ijk,jil->kl', a, b) - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - >>> np.tensordot(a,b, axes=([1,0],[0,1])) - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - - Writeable returned arrays (since version 1.10.0): - - >>> a = np.zeros((3, 3)) - >>> np.einsum('ii->i', a)[:] = 1 - >>> a - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - Example of ellipsis use: - - >>> a = np.arange(6).reshape((3,2)) - >>> b = np.arange(12).reshape((4,3)) - >>> np.einsum('ki,jk->ij', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('ki,...k->i...', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('k...,jk', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - - Chained array operations. For more complicated contractions, speed ups - might be achieved by repeatedly computing a 'greedy' path or pre-computing the - 'optimal' path and repeatedly applying it, using an - `einsum_path` insertion (since version 1.12.0). Performance improvements can be - particularly significant with larger arrays: - - >>> a = np.ones(64).reshape(2,4,8) - - Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) - - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) - - Sub-optimal `einsum` (due to repeated path calculation time): ~330ms - - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') - - Greedy `einsum` (faster optimal path approximation): ~160ms - - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') - - Optimal `einsum` (best usage pattern in some use cases): ~110ms - - >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) - - """ - - # Grab non-einsum kwargs; do not optimize by default. - optimize_arg = kwargs.pop('optimize', False) - - # If no optimization, run pure einsum - if optimize_arg is False: - return c_einsum(*operands, **kwargs) - - valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting'] - einsum_kwargs = {k: v for (k, v) in kwargs.items() if - k in valid_einsum_kwargs} - - # Make sure all keywords are valid - valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs - unknown_kwargs = [k for (k, v) in kwargs.items() if - k not in valid_contract_kwargs] - - if len(unknown_kwargs): - raise TypeError("Did not understand the following kwargs: %s" - % unknown_kwargs) - - # Special handeling if out is specified - specified_out = False - out_array = einsum_kwargs.pop('out', None) - if out_array is not None: - specified_out = True - - # Build the contraction list and operand - operands, contraction_list = einsum_path(*operands, optimize=optimize_arg, - einsum_call=True) - - handle_out = False - - # Start contraction loop - for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction - tmp_operands = [operands.pop(x) for x in inds] - - # Do we need to deal with the output? - handle_out = specified_out and ((num + 1) == len(contraction_list)) - - # Call tensordot if still possible - if blas: - # Checks have already been handled - input_str, results_index = einsum_str.split('->') - input_left, input_right = input_str.split(',') - - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") - - # Find indices to contract over - left_pos, right_pos = [], [] - for s in sorted(idx_rm): - left_pos.append(input_left.find(s)) - right_pos.append(input_right.find(s)) - - # Contract! - new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) - - # Build a new view if needed - if (tensor_result != results_index) or handle_out: - if handle_out: - einsum_kwargs["out"] = out_array - new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs) - - # Call einsum - else: - # If out was specified - if handle_out: - einsum_kwargs["out"] = out_array - - # Do the contraction - new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs) - - # Append new items and dereference what we can - operands.append(new_view) - del tmp_operands, new_view - - if specified_out: - return out_array - else: - return operands[0] diff --git a/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py b/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py deleted file mode 100644 index d454480..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py +++ /dev/null @@ -1,3649 +0,0 @@ -"""Module containing non-deprecated functions borrowed from Numeric. - -""" -from __future__ import division, absolute_import, print_function - -import functools -import types -import warnings - -import numpy as np -from .. import VisibleDeprecationWarning -from . import multiarray as mu -from . import overrides -from . import umath as um -from . import numerictypes as nt -from ._asarray import asarray, array, asanyarray -from .multiarray import concatenate -from . import _methods - -_dt_ = nt.sctype2char - -# functions that are methods -__all__ = [ - 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', - 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', - 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', - 'ravel', 'repeat', 'reshape', 'resize', 'round_', - 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', - 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', -] - -_gentype = types.GeneratorType -# save away Python sum -_sum_ = sum - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -# functions that are now methods -def _wrapit(obj, method, *args, **kwds): - try: - wrap = obj.__array_wrap__ - except AttributeError: - wrap = None - result = getattr(asarray(obj), method)(*args, **kwds) - if wrap: - if not isinstance(result, mu.ndarray): - result = asarray(result) - result = wrap(result) - return result - - -def _wrapfunc(obj, method, *args, **kwds): - bound = getattr(obj, method, None) - if bound is None: - return _wrapit(obj, method, *args, **kwds) - - try: - return bound(*args, **kwds) - except TypeError: - # A TypeError occurs if the object does have such a method in its - # class, but its signature is not identical to that of NumPy's. This - # situation has occurred in the case of a downstream library like - # 'pandas'. - # - # Call _wrapit from within the except clause to ensure a potential - # exception has a traceback chain. - return _wrapit(obj, method, *args, **kwds) - - -def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): - passkwargs = {k: v for k, v in kwargs.items() - if v is not np._NoValue} - - if type(obj) is not mu.ndarray: - try: - reduction = getattr(obj, method) - except AttributeError: - pass - else: - # This branch is needed for reductions like any which don't - # support a dtype. - if dtype is not None: - return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) - else: - return reduction(axis=axis, out=out, **passkwargs) - - return ufunc.reduce(obj, axis, dtype, out, **passkwargs) - - -def _take_dispatcher(a, indices, axis=None, out=None, mode=None): - return (a, out) - - -@array_function_dispatch(_take_dispatcher) -def take(a, indices, axis=None, out=None, mode='raise'): - """ - Take elements from an array along an axis. - - When axis is not None, this function does the same thing as "fancy" - indexing (indexing arrays using arrays); however, it can be easier to use - if you need elements along a given axis. A call such as - ``np.take(arr, indices, axis=3)`` is equivalent to - ``arr[:,:,:,indices,...]``. - - Explained without fancy indexing, this is equivalent to the following use - of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of - indices:: - - Ni, Nk = a.shape[:axis], a.shape[axis+1:] - Nj = indices.shape - for ii in ndindex(Ni): - for jj in ndindex(Nj): - for kk in ndindex(Nk): - out[ii + jj + kk] = a[ii + (indices[jj],) + kk] - - Parameters - ---------- - a : array_like (Ni..., M, Nk...) - The source array. - indices : array_like (Nj...) - The indices of the values to extract. - - .. versionadded:: 1.8.0 - - Also allow scalars for indices. - axis : int, optional - The axis over which to select values. By default, the flattened - input array is used. - out : ndarray, optional (Ni..., Nj..., Nk...) - If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. Note that `out` is always - buffered if `mode='raise'`; use other modes for better performance. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - Returns - ------- - out : ndarray (Ni..., Nj..., Nk...) - The returned array has the same type as `a`. - - See Also - -------- - compress : Take elements using a boolean mask - ndarray.take : equivalent method - take_along_axis : Take elements by matching the array and the index arrays - - Notes - ----- - - By eliminating the inner loop in the description above, and using `s_` to - build simple slice objects, `take` can be expressed in terms of applying - fancy indexing to each 1-d slice:: - - Ni, Nk = a.shape[:axis], a.shape[axis+1:] - for ii in ndindex(Ni): - for kk in ndindex(Nj): - out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] - - For this reason, it is equivalent to (but faster than) the following use - of `apply_along_axis`:: - - out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) - - Examples - -------- - >>> a = [4, 3, 5, 7, 6, 8] - >>> indices = [0, 1, 4] - >>> np.take(a, indices) - array([4, 3, 6]) - - In this example if `a` is an ndarray, "fancy" indexing can be used. - - >>> a = np.array(a) - >>> a[indices] - array([4, 3, 6]) - - If `indices` is not one dimensional, the output also has these dimensions. - - >>> np.take(a, [[0, 1], [2, 3]]) - array([[4, 3], - [5, 7]]) - """ - return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) - - -def _reshape_dispatcher(a, newshape, order=None): - return (a,) - - -# not deprecated --- copy if necessary, view otherwise -@array_function_dispatch(_reshape_dispatcher) -def reshape(a, newshape, order='C'): - """ - Gives a new shape to an array without changing its data. - - Parameters - ---------- - a : array_like - Array to be reshaped. - newshape : int or tuple of ints - The new shape should be compatible with the original shape. If - an integer, then the result will be a 1-D array of that length. - One shape dimension can be -1. In this case, the value is - inferred from the length of the array and remaining dimensions. - order : {'C', 'F', 'A'}, optional - Read the elements of `a` using this index order, and place the - elements into the reshaped array using this index order. 'C' - means to read / write the elements using C-like index order, - with the last axis index changing fastest, back to the first - axis index changing slowest. 'F' means to read / write the - elements using Fortran-like index order, with the first index - changing fastest, and the last index changing slowest. Note that - the 'C' and 'F' options take no account of the memory layout of - the underlying array, and only refer to the order of indexing. - 'A' means to read / write the elements in Fortran-like index - order if `a` is Fortran *contiguous* in memory, C-like order - otherwise. - - Returns - ------- - reshaped_array : ndarray - This will be a new view object if possible; otherwise, it will - be a copy. Note there is no guarantee of the *memory layout* (C- or - Fortran- contiguous) of the returned array. - - See Also - -------- - ndarray.reshape : Equivalent method. - - Notes - ----- - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raised when the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - - # A transpose makes the array non-contiguous - >>> b = a.T - - # Taking a view makes it possible to modify the shape without modifying - # the initial object. - >>> c = b.view() - >>> c.shape = (20) - Traceback (most recent call last): - ... - AttributeError: incompatible shape for a non-contiguous array - - The `order` keyword gives the index ordering both for *fetching* the values - from `a`, and then *placing* the values into the output array. - For example, let's say you have an array: - - >>> a = np.arange(6).reshape((3, 2)) - >>> a - array([[0, 1], - [2, 3], - [4, 5]]) - - You can think of reshaping as first raveling the array (using the given - index order), then inserting the elements from the raveled array into the - new array using the same kind of index ordering as was used for the - raveling. - - >>> np.reshape(a, (2, 3)) # C-like index ordering - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering - array([[0, 4, 3], - [2, 1, 5]]) - >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') - array([[0, 4, 3], - [2, 1, 5]]) - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> np.reshape(a, 6) - array([1, 2, 3, 4, 5, 6]) - >>> np.reshape(a, 6, order='F') - array([1, 4, 2, 5, 3, 6]) - - >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 - array([[1, 2], - [3, 4], - [5, 6]]) - """ - return _wrapfunc(a, 'reshape', newshape, order=order) - - -def _choose_dispatcher(a, choices, out=None, mode=None): - yield a - for c in choices: - yield c - yield out - - -@array_function_dispatch(_choose_dispatcher) -def choose(a, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. - - First of all, if confused or uncertain, definitely look at the Examples - - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): - - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. - - But this omits some subtleties. Here is a fully general summary: - - Given an "index" array (`a`) of integers and a sequence of `n` arrays - (`choices`), `a` and each choice array are first broadcast, as necessary, - to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = - 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as - follows: - - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; - - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) - integer; modular arithmetic is used to map integers outside the range - `[0, n-1]` back into that range; and then the new array is constructed - as above; - - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. - - Parameters - ---------- - a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. - choices : sequence of arrays - Choice arrays. `a` and all of the choices must be broadcastable to the - same shape. If `choices` is itself an array (not recommended), then - its outermost dimension (i.e., the one corresponding to - ``choices.shape[0]``) is taken as defining the "sequence". - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. Note that `out` is always - buffered if `mode='raise'`; use other modes for better performance. - mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: - - * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` - * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 - - Returns - ------- - merged_array : array - The merged result. - - Raises - ------ - ValueError: shape mismatch - If `a` and each choice array are not all broadcastable to the same - shape. - - See Also - -------- - ndarray.choose : equivalent method - numpy.take_along_axis : Preferable if `choices` is an array - - Notes - ----- - To reduce the chance of misinterpretation, even though the following - "abuse" is nominally supported, `choices` should neither be, nor be - thought of as, a single array, i.e., the outermost sequence-like container - should be either a list or a tuple. - - Examples - -------- - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> np.choose([2, 3, 1, 0], choices - ... # the first element of the result will be the first element of the - ... # third (2+1) "array" in choices, namely, 20; the second element - ... # will be the second element of the fourth (3+1) choice array, i.e., - ... # 31, etc. - ... ) - array([20, 31, 12, 3]) - >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) - array([20, 31, 12, 3]) - >>> # because there are 4 choice arrays - >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) - array([20, 1, 12, 3]) - >>> # i.e., 0 - - A couple examples illustrating how choose broadcasts: - - >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] - >>> choices = [-10, 10] - >>> np.choose(a, choices) - array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]]) - - >>> # With thanks to Anne Archibald - >>> a = np.array([0, 1]).reshape((2,1,1)) - >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) - >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) - >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 - array([[[ 1, 1, 1, 1, 1], - [ 2, 2, 2, 2, 2], - [ 3, 3, 3, 3, 3]], - [[-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5]]]) - - """ - return _wrapfunc(a, 'choose', choices, out=out, mode=mode) - - -def _repeat_dispatcher(a, repeats, axis=None): - return (a,) - - -@array_function_dispatch(_repeat_dispatcher) -def repeat(a, repeats, axis=None): - """ - Repeat elements of an array. - - Parameters - ---------- - a : array_like - Input array. - repeats : int or array of ints - The number of repetitions for each element. `repeats` is broadcasted - to fit the shape of the given axis. - axis : int, optional - The axis along which to repeat values. By default, use the - flattened input array, and return a flat output array. - - Returns - ------- - repeated_array : ndarray - Output array which has the same shape as `a`, except along - the given axis. - - See Also - -------- - tile : Tile an array. - - Examples - -------- - >>> np.repeat(3, 4) - array([3, 3, 3, 3]) - >>> x = np.array([[1,2],[3,4]]) - >>> np.repeat(x, 2) - array([1, 1, 2, 2, 3, 3, 4, 4]) - >>> np.repeat(x, 3, axis=1) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - >>> np.repeat(x, [1, 2], axis=0) - array([[1, 2], - [3, 4], - [3, 4]]) - - """ - return _wrapfunc(a, 'repeat', repeats, axis=axis) - - -def _put_dispatcher(a, ind, v, mode=None): - return (a, ind, v) - - -@array_function_dispatch(_put_dispatcher) -def put(a, ind, v, mode='raise'): - """ - Replaces specified elements of an array with given values. - - The indexing works on the flattened target array. `put` is roughly - equivalent to: - - :: - - a.flat[ind] = v - - Parameters - ---------- - a : ndarray - Target array. - ind : array_like - Target indices, interpreted as integers. - v : array_like - Values to place in `a` at target indices. If `v` is shorter than - `ind` it will be repeated as necessary. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. In 'raise' mode, - if an exception occurs the target array may still be modified. - - See Also - -------- - putmask, place - put_along_axis : Put elements by matching the array and the index arrays - - Examples - -------- - >>> a = np.arange(5) - >>> np.put(a, [0, 2], [-44, -55]) - >>> a - array([-44, 1, -55, 3, 4]) - - >>> a = np.arange(5) - >>> np.put(a, 22, -5, mode='clip') - >>> a - array([ 0, 1, 2, 3, -5]) - - """ - try: - put = a.put - except AttributeError: - raise TypeError("argument 1 must be numpy.ndarray, " - "not {name}".format(name=type(a).__name__)) - - return put(ind, v, mode=mode) - - -def _swapaxes_dispatcher(a, axis1, axis2): - return (a,) - - -@array_function_dispatch(_swapaxes_dispatcher) -def swapaxes(a, axis1, axis2): - """ - Interchange two axes of an array. - - Parameters - ---------- - a : array_like - Input array. - axis1 : int - First axis. - axis2 : int - Second axis. - - Returns - ------- - a_swapped : ndarray - For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is - returned; otherwise a new array is created. For earlier NumPy - versions a view of `a` is returned only if the order of the - axes is changed, otherwise the input array is returned. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> np.swapaxes(x,0,1) - array([[1], - [2], - [3]]) - - >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) - >>> x - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - - >>> np.swapaxes(x,0,2) - array([[[0, 4], - [2, 6]], - [[1, 5], - [3, 7]]]) - - """ - return _wrapfunc(a, 'swapaxes', axis1, axis2) - - -def _transpose_dispatcher(a, axes=None): - return (a,) - - -@array_function_dispatch(_transpose_dispatcher) -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. - axes : list of ints, optional - By default, reverse the dimensions, otherwise permute the axes - according to the values given. - - Returns - ------- - p : ndarray - `a` with its axes permuted. A view is returned whenever - possible. - - See Also - -------- - moveaxis - argsort - - Notes - ----- - Use `transpose(a, argsort(axes))` to invert the transposition of tensors - when using the `axes` keyword argument. - - Transposing a 1-D array returns an unchanged view of the original array. - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.transpose(x) - array([[0, 2], - [1, 3]]) - - >>> x = np.ones((1, 2, 3)) - >>> np.transpose(x, (1, 0, 2)).shape - (2, 1, 3) - - """ - return _wrapfunc(a, 'transpose', axes) - - -def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): - return (a,) - - -@array_function_dispatch(_partition_dispatcher) -def partition(a, kth, axis=-1, kind='introselect', order=None): - """ - Return a partitioned copy of an array. - - Creates a copy of the array with its elements rearranged in such a - way that the value of the element in k-th position is in the - position it would be in a sorted array. All elements smaller than - the k-th element are moved before this element and all equal or - greater are moved behind it. The ordering of the elements in the two - partitions is undefined. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to be sorted. - kth : int or sequence of ints - Element index to partition by. The k-th value of the element - will be in its final sorted position and all smaller elements - will be moved before it and all equal or greater elements behind - it. The order of all elements in the partitions is undefined. If - provided with a sequence of k-th it will partition all elements - indexed by k-th of them into their sorted position at once. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect'. - order : str or list of str, optional - When `a` is an array with fields defined, this argument - specifies which fields to compare first, second, etc. A single - field can be specified as a string. Not all fields need be - specified, but unspecified fields will still be used, in the - order in which they come up in the dtype, to break ties. - - Returns - ------- - partitioned_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.partition : Method to sort an array in-place. - argpartition : Indirect partition. - sort : Full sorting - - Notes - ----- - The various selection algorithms are characterized by their average - speed, worst case performance, work space size, and whether they are - stable. A stable sort keeps items with the same key in the same - relative order. The available algorithms have the following - properties: - - ================= ======= ============= ============ ======= - kind speed worst case work space stable - ================= ======= ============= ============ ======= - 'introselect' 1 O(n) 0 no - ================= ======= ============= ============ ======= - - All the partition algorithms make temporary copies of the data when - partitioning along any but the last axis. Consequently, - partitioning along the last axis is faster and uses less space than - partitioning along any other axis. - - The sort order for complex numbers is lexicographic. If both the - real and imaginary parts are non-nan then the order is determined by - the real parts except when they are equal, in which case the order - is determined by the imaginary parts. - - Examples - -------- - >>> a = np.array([3, 4, 2, 1]) - >>> np.partition(a, 3) - array([2, 1, 3, 4]) - - >>> np.partition(a, (1, 3)) - array([1, 2, 3, 4]) - - """ - if axis is None: - # flatten returns (1, N) for np.matrix, so always use the last axis - a = asanyarray(a).flatten() - axis = -1 - else: - a = asanyarray(a).copy(order="K") - a.partition(kth, axis=axis, kind=kind, order=order) - return a - - -def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): - return (a,) - - -@array_function_dispatch(_argpartition_dispatcher) -def argpartition(a, kth, axis=-1, kind='introselect', order=None): - """ - Perform an indirect partition along the given axis using the - algorithm specified by the `kind` keyword. It returns an array of - indices of the same shape as `a` that index data along the given - axis in partitioned order. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to sort. - kth : int or sequence of ints - Element index to partition by. The k-th element will be in its - final sorted position and all smaller elements will be moved - before it and all larger elements behind it. The order all - elements in the partitions is undefined. If provided with a - sequence of k-th it will partition all of them into their sorted - position at once. - axis : int or None, optional - Axis along which to sort. The default is -1 (the last axis). If - None, the flattened array is used. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect' - order : str or list of str, optional - When `a` is an array with fields defined, this argument - specifies which fields to compare first, second, etc. A single - field can be specified as a string, and not all fields need be - specified, but unspecified fields will still be used, in the - order in which they come up in the dtype, to break ties. - - Returns - ------- - index_array : ndarray, int - Array of indices that partition `a` along the specified axis. - If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. - More generally, ``np.take_along_axis(a, index_array, axis=a)`` always - yields the partitioned `a`, irrespective of dimensionality. - - See Also - -------- - partition : Describes partition algorithms used. - ndarray.partition : Inplace partition. - argsort : Full indirect sort. - take_along_axis : Apply ``index_array`` from argpartition - to an array as if by calling partition. - - Notes - ----- - See `partition` for notes on the different selection algorithms. - - Examples - -------- - One dimensional array: - - >>> x = np.array([3, 4, 2, 1]) - >>> x[np.argpartition(x, 3)] - array([2, 1, 3, 4]) - >>> x[np.argpartition(x, (1, 3))] - array([1, 2, 3, 4]) - - >>> x = [3, 4, 2, 1] - >>> np.array(x)[np.argpartition(x, 3)] - array([2, 1, 3, 4]) - - Multi-dimensional array: - - >>> x = np.array([[3, 4, 2], [1, 3, 1]]) - >>> index_array = np.argpartition(x, kth=1, axis=-1) - >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1) - array([[2, 3, 4], - [1, 1, 3]]) - - """ - return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) - - -def _sort_dispatcher(a, axis=None, kind=None, order=None): - return (a,) - - -@array_function_dispatch(_sort_dispatcher) -def sort(a, axis=-1, kind=None, order=None): - """ - Return a sorted copy of an array. - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - Sorting algorithm. The default is 'quicksort'. Note that both 'stable' - and 'mergesort' use timsort or radix sort under the covers and, in general, - the actual implementation will vary with data type. The 'mergesort' option - is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. - - order : str or list of str, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. A single field can - be specified as a string, and not all fields need be specified, - but unspecified fields will still be used, in the order in which - they come up in the dtype, to break ties. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - partition : Partial sort. - - Notes - ----- - The various sorting algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The four algorithms implemented in NumPy have the following - properties: - - =========== ======= ============= ============ ======== - kind speed worst case work space stable - =========== ======= ============= ============ ======== - 'quicksort' 1 O(n^2) 0 no - 'heapsort' 3 O(n*log(n)) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'timsort' 2 O(n*log(n)) ~n/2 yes - =========== ======= ============= ============ ======== - - .. note:: The datatype determines which of 'mergesort' or 'timsort' - is actually used, even if 'mergesort' is specified. User selection - at a finer scale is not currently available. - - All the sort algorithms make temporary copies of the data when - sorting along any but the last axis. Consequently, sorting along - the last axis is faster and uses less space than sorting along - any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Previous to numpy 1.4.0 sorting real and complex arrays containing nan - values led to undefined behaviour. In numpy versions >= 1.4.0 nan - values are sorted to the end. The extended sort order is: - - * Real: [R, nan] - * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] - - where R is a non-nan real value. Complex values with the same nan - placements are sorted according to the non-nan part if it exists. - Non-nan values are sorted as before. - - .. versionadded:: 1.12.0 - - quicksort has been changed to `introsort `_. - When sorting does not make enough progress it switches to - `heapsort `_. - This implementation makes quicksort O(n*log(n)) in the worst case. - - 'stable' automatically chooses the best stable sorting algorithm - for the data type being sorted. - It, along with 'mergesort' is currently mapped to - `timsort `_ - or `radix sort `_ - depending on the data type. - API forward compatibility currently limits the - ability to select the implementation and it is hardwired for the different - data types. - - .. versionadded:: 1.17.0 - - Timsort is added for better performance on already or nearly - sorted data. On random data timsort is almost identical to - mergesort. It is now used for stable sort while quicksort is still the - default sort if none is chosen. For timsort details, refer to - `CPython listsort.txt `_. - 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an - O(n) sort instead of O(n log n). - - .. versionchanged:: 1.17.0 - - NaT now sorts to the end of arrays for consistency with NaN. - - Examples - -------- - >>> a = np.array([[1,4],[3,1]]) - >>> np.sort(a) # sort along the last axis - array([[1, 4], - [1, 3]]) - >>> np.sort(a, axis=None) # sort the flattened array - array([1, 1, 3, 4]) - >>> np.sort(a, axis=0) # sort along the first axis - array([[1, 1], - [3, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] - >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), - ... ('Galahad', 1.7, 38)] - >>> a = np.array(values, dtype=dtype) # create a structured array - >>> np.sort(a, order='height') # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.8999999999999999, 38)], - dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), - ('Arthur', 1.8, 41)], - dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) - >>> np.argsort(x) - array([1, 2, 0]) - - Two-dimensional array: - - >>> x = np.array([[0, 3], [2, 2]]) - >>> x - array([[0, 3], - [2, 2]]) - - >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) - >>> ind - array([[0, 1], - [1, 0]]) - >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) - array([[0, 2], - [2, 3]]) - - >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) - >>> ind - array([[0, 1], - [0, 1]]) - >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) - array([[0, 3], - [2, 2]]) - - Indices of the sorted elements of a N-dimensional array: - - >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) - >>> ind - (array([0, 1, 1, 0]), array([0, 0, 1, 1])) - >>> x[ind] # same as np.sort(x, axis=None) - array([0, 2, 2, 3]) - - Sorting with keys: - - >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x - array([(1, 0), (0, 1)], - dtype=[('x', '>> np.argsort(x, order=('x','y')) - array([1, 0]) - - >>> np.argsort(x, order=('y','x')) - array([0, 1]) - - """ - return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order) - - -def _argmax_dispatcher(a, axis=None, out=None): - return (a, out) - - -@array_function_dispatch(_argmax_dispatcher) -def argmax(a, axis=None, out=None): - """ - Returns the indices of the maximum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmax, argmin - amax : The maximum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - take_along_axis : Apply ``np.expand_dims(index_array, axis)`` - from argmax to an array as if by calling max. - - Notes - ----- - In case of multiple occurrences of the maximum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) + 10 - >>> a - array([[10, 11, 12], - [13, 14, 15]]) - >>> np.argmax(a) - 5 - >>> np.argmax(a, axis=0) - array([1, 1, 1]) - >>> np.argmax(a, axis=1) - array([2, 2]) - - Indexes of the maximal elements of a N-dimensional array: - - >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) - >>> ind - (1, 2) - >>> a[ind] - 15 - - >>> b = np.arange(6) - >>> b[1] = 5 - >>> b - array([0, 5, 2, 3, 4, 5]) - >>> np.argmax(b) # Only the first occurrence is returned. - 1 - - >>> x = np.array([[4,2,3], [1,0,3]]) - >>> index_array = np.argmax(x, axis=-1) - >>> # Same as np.max(x, axis=-1, keepdims=True) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) - array([[4], - [3]]) - >>> # Same as np.max(x, axis=-1) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) - array([4, 3]) - - """ - return _wrapfunc(a, 'argmax', axis=axis, out=out) - - -def _argmin_dispatcher(a, axis=None, out=None): - return (a, out) - - -@array_function_dispatch(_argmin_dispatcher) -def argmin(a, axis=None, out=None): - """ - Returns the indices of the minimum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmin, argmax - amin : The minimum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - take_along_axis : Apply ``np.expand_dims(index_array, axis)`` - from argmin to an array as if by calling min. - - Notes - ----- - In case of multiple occurrences of the minimum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) + 10 - >>> a - array([[10, 11, 12], - [13, 14, 15]]) - >>> np.argmin(a) - 0 - >>> np.argmin(a, axis=0) - array([0, 0, 0]) - >>> np.argmin(a, axis=1) - array([0, 0]) - - Indices of the minimum elements of a N-dimensional array: - - >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) - >>> ind - (0, 0) - >>> a[ind] - 10 - - >>> b = np.arange(6) + 10 - >>> b[4] = 10 - >>> b - array([10, 11, 12, 13, 10, 15]) - >>> np.argmin(b) # Only the first occurrence is returned. - 0 - - >>> x = np.array([[4,2,3], [1,0,3]]) - >>> index_array = np.argmin(x, axis=-1) - >>> # Same as np.min(x, axis=-1, keepdims=True) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) - array([[2], - [0]]) - >>> # Same as np.max(x, axis=-1) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) - array([2, 0]) - - """ - return _wrapfunc(a, 'argmin', axis=axis, out=out) - - -def _searchsorted_dispatcher(a, v, side=None, sorter=None): - return (a, v, sorter) - - -@array_function_dispatch(_searchsorted_dispatcher) -def searchsorted(a, v, side='left', sorter=None): - """ - Find indices where elements should be inserted to maintain order. - - Find the indices into a sorted array `a` such that, if the - corresponding elements in `v` were inserted before the indices, the - order of `a` would be preserved. - - Assuming that `a` is sorted: - - ====== ============================ - `side` returned index `i` satisfies - ====== ============================ - left ``a[i-1] < v <= a[i]`` - right ``a[i-1] <= v < a[i]`` - ====== ============================ - - Parameters - ---------- - a : 1-D array_like - Input array. If `sorter` is None, then it must be sorted in - ascending order, otherwise `sorter` must be an array of indices - that sort it. - v : array_like - Values to insert into `a`. - side : {'left', 'right'}, optional - If 'left', the index of the first suitable location found is given. - If 'right', return the last such index. If there is no suitable - index, return either 0 or N (where N is the length of `a`). - sorter : 1-D array_like, optional - Optional array of integer indices that sort array a into ascending - order. They are typically the result of argsort. - - .. versionadded:: 1.7.0 - - Returns - ------- - indices : array of ints - Array of insertion points with the same shape as `v`. - - See Also - -------- - sort : Return a sorted copy of an array. - histogram : Produce histogram from 1-D data. - - Notes - ----- - Binary search is used to find the required insertion points. - - As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing - `nan` values. The enhanced sort order is documented in `sort`. - - This function uses the same algorithm as the builtin python `bisect.bisect_left` - (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, - which is also vectorized in the `v` argument. - - Examples - -------- - >>> np.searchsorted([1,2,3,4,5], 3) - 2 - >>> np.searchsorted([1,2,3,4,5], 3, side='right') - 3 - >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) - array([0, 5, 1, 2]) - - """ - return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) - - -def _resize_dispatcher(a, new_shape): - return (a,) - - -@array_function_dispatch(_resize_dispatcher) -def resize(a, new_shape): - """ - Return a new array with the specified shape. - - If the new array is larger than the original array, then the new - array is filled with repeated copies of `a`. Note that this behavior - is different from a.resize(new_shape) which fills with zeros instead - of repeated copies of `a`. - - Parameters - ---------- - a : array_like - Array to be resized. - - new_shape : int or tuple of int - Shape of resized array. - - Returns - ------- - reshaped_array : ndarray - The new array is formed from the data in the old array, repeated - if necessary to fill out the required number of elements. The - data are repeated in the order that they are stored in memory. - - See Also - -------- - ndarray.resize : resize an array in-place. - - Notes - ----- - Warning: This functionality does **not** consider axes separately, - i.e. it does not apply interpolation/extrapolation. - It fills the return array with the required number of elements, taken - from `a` as they are laid out in memory, disregarding strides and axes. - (This is in case the new shape is smaller. For larger, see above.) - This functionality is therefore not suitable to resize images, - or data where each axis represents a separate and distinct entity. - - Examples - -------- - >>> a=np.array([[0,1],[2,3]]) - >>> np.resize(a,(2,3)) - array([[0, 1, 2], - [3, 0, 1]]) - >>> np.resize(a,(1,4)) - array([[0, 1, 2, 3]]) - >>> np.resize(a,(2,4)) - array([[0, 1, 2, 3], - [0, 1, 2, 3]]) - - """ - if isinstance(new_shape, (int, nt.integer)): - new_shape = (new_shape,) - a = ravel(a) - Na = len(a) - total_size = um.multiply.reduce(new_shape) - if Na == 0 or total_size == 0: - return mu.zeros(new_shape, a.dtype) - - n_copies = int(total_size / Na) - extra = total_size % Na - - if extra != 0: - n_copies = n_copies + 1 - extra = Na - extra - - a = concatenate((a,) * n_copies) - if extra > 0: - a = a[:-extra] - - return reshape(a, new_shape) - - -def _squeeze_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_squeeze_dispatcher) -def squeeze(a, axis=None): - """ - Remove single-dimensional entries from the shape of an array. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - .. versionadded:: 1.7.0 - - Selects a subset of the single-dimensional entries in the - shape. If an axis is selected with shape entry greater than - one, an error is raised. - - Returns - ------- - squeezed : ndarray - The input array, but with all or a subset of the - dimensions of length 1 removed. This is always `a` itself - or a view into `a`. - - Raises - ------ - ValueError - If `axis` is not None, and an axis being squeezed is not of length 1 - - See Also - -------- - expand_dims : The inverse operation, adding singleton dimensions - reshape : Insert, remove, and combine dimensions, and resize existing ones - - Examples - -------- - >>> x = np.array([[[0], [1], [2]]]) - >>> x.shape - (1, 3, 1) - >>> np.squeeze(x).shape - (3,) - >>> np.squeeze(x, axis=0).shape - (3, 1) - >>> np.squeeze(x, axis=1).shape - Traceback (most recent call last): - ... - ValueError: cannot select an axis to squeeze out which has size not equal to one - >>> np.squeeze(x, axis=2).shape - (1, 3) - - """ - try: - squeeze = a.squeeze - except AttributeError: - return _wrapit(a, 'squeeze', axis=axis) - if axis is None: - return squeeze() - else: - return squeeze(axis=axis) - - -def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): - return (a,) - - -@array_function_dispatch(_diagonal_dispatcher) -def diagonal(a, offset=0, axis1=0, axis2=1): - """ - Return specified diagonals. - - If `a` is 2-D, returns the diagonal of `a` with the given offset, - i.e., the collection of elements of the form ``a[i, i+offset]``. If - `a` has more than two dimensions, then the axes specified by `axis1` - and `axis2` are used to determine the 2-D sub-array whose diagonal is - returned. The shape of the resulting array can be determined by - removing `axis1` and `axis2` and appending an index to the right equal - to the size of the resulting diagonals. - - In versions of NumPy prior to 1.7, this function always returned a new, - independent array containing a copy of the values in the diagonal. - - In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, - but depending on this fact is deprecated. Writing to the resulting - array continues to work as it used to, but a FutureWarning is issued. - - Starting in NumPy 1.9 it returns a read-only view on the original array. - Attempting to write to the resulting array will produce an error. - - In some future release, it will return a read/write view and writing to - the returned array will alter your original array. The returned array - will have the same type as the input array. - - If you don't write to the array returned by this function, then you can - just ignore all of the above. - - If you depend on the current behavior, then we suggest copying the - returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead - of just ``np.diagonal(a)``. This will work with both past and future - versions of NumPy. - - Parameters - ---------- - a : array_like - Array from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be positive or - negative. Defaults to main diagonal (0). - axis1 : int, optional - Axis to be used as the first axis of the 2-D sub-arrays from which - the diagonals should be taken. Defaults to first axis (0). - axis2 : int, optional - Axis to be used as the second axis of the 2-D sub-arrays from - which the diagonals should be taken. Defaults to second axis (1). - - Returns - ------- - array_of_diagonals : ndarray - If `a` is 2-D, then a 1-D array containing the diagonal and of the - same type as `a` is returned unless `a` is a `matrix`, in which case - a 1-D array rather than a (2-D) `matrix` is returned in order to - maintain backward compatibility. - - If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` - are removed, and a new axis inserted at the end corresponding to the - diagonal. - - Raises - ------ - ValueError - If the dimension of `a` is less than 2. - - See Also - -------- - diag : MATLAB work-a-like for 1-D and 2-D arrays. - diagflat : Create diagonal arrays. - trace : Sum along diagonals. - - Examples - -------- - >>> a = np.arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> a.diagonal() - array([0, 3]) - >>> a.diagonal(1) - array([1]) - - A 3-D example: - - >>> a = np.arange(8).reshape(2,2,2); a - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> a.diagonal(0, # Main diagonals of two arrays created by skipping - ... 0, # across the outer(left)-most axis last and - ... 1) # the "middle" (row) axis first. - array([[0, 6], - [1, 7]]) - - The sub-arrays whose main diagonals we just obtained; note that each - corresponds to fixing the right-most (column) axis, and that the - diagonals are "packed" in rows. - - >>> a[:,:,0] # main diagonal is [0 6] - array([[0, 2], - [4, 6]]) - >>> a[:,:,1] # main diagonal is [1 7] - array([[1, 3], - [5, 7]]) - - The anti-diagonal can be obtained by reversing the order of elements - using either `numpy.flipud` or `numpy.fliplr`. - - >>> a = np.arange(9).reshape(3, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - >>> np.fliplr(a).diagonal() # Horizontal flip - array([2, 4, 6]) - >>> np.flipud(a).diagonal() # Vertical flip - array([6, 4, 2]) - - Note that the order in which the diagonal is retrieved varies depending - on the flip function. - """ - if isinstance(a, np.matrix): - # Make diagonal of matrix 1-D to preserve backward compatibility. - return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) - else: - return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) - - -def _trace_dispatcher( - a, offset=None, axis1=None, axis2=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_trace_dispatcher) -def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """ - Return the sum along diagonals of the array. - - If `a` is 2-D, the sum along its diagonal with the given offset - is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. - - If `a` has more than two dimensions, then the axes specified by axis1 and - axis2 are used to determine the 2-D sub-arrays whose traces are returned. - The shape of the resulting array is the same as that of `a` with `axis1` - and `axis2` removed. - - Parameters - ---------- - a : array_like - Input array, from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be both positive - and negative. Defaults to 0. - axis1, axis2 : int, optional - Axes to be used as the first and second axis of the 2-D sub-arrays - from which the diagonals should be taken. Defaults are the first two - axes of `a`. - dtype : dtype, optional - Determines the data-type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and `a` is - of integer type of precision less than the default integer - precision, then the default integer precision is used. Otherwise, - the precision is the same as that of `a`. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and - it must be of the right shape to hold the output. - - Returns - ------- - sum_along_diagonals : ndarray - If `a` is 2-D, the sum along the diagonal is returned. If `a` has - larger dimensions, then an array of sums along diagonals is returned. - - See Also - -------- - diag, diagonal, diagflat - - Examples - -------- - >>> np.trace(np.eye(3)) - 3.0 - >>> a = np.arange(8).reshape((2,2,2)) - >>> np.trace(a) - array([6, 8]) - - >>> a = np.arange(24).reshape((2,2,2,3)) - >>> np.trace(a).shape - (2, 3) - - """ - if isinstance(a, np.matrix): - # Get trace of matrix via an array to preserve backward compatibility. - return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) - else: - return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) - - -def _ravel_dispatcher(a, order=None): - return (a,) - - -@array_function_dispatch(_ravel_dispatcher) -def ravel(a, order='C'): - """Return a contiguous flattened array. - - A 1-D array, containing the elements of the input, is returned. A copy is - made only if needed. - - As of NumPy 1.10, the returned array will have the same type as the input - array. (for example, a masked array will be returned for a masked array - input) - - Parameters - ---------- - a : array_like - Input array. The elements in `a` are read in the order specified by - `order`, and packed as a 1-D array. - order : {'C','F', 'A', 'K'}, optional - - The elements of `a` are read using this index order. 'C' means - to index the elements in row-major, C-style order, - with the last axis index changing fastest, back to the first - axis index changing slowest. 'F' means to index the elements - in column-major, Fortran-style order, with the - first index changing fastest, and the last index changing - slowest. Note that the 'C' and 'F' options take no account of - the memory layout of the underlying array, and only refer to - the order of axis indexing. 'A' means to read the elements in - Fortran-like index order if `a` is Fortran *contiguous* in - memory, C-like order otherwise. 'K' means to read the - elements in the order they occur in memory, except for - reversing the data when strides are negative. By default, 'C' - index order is used. - - Returns - ------- - y : array_like - y is an array of the same subtype as `a`, with shape ``(a.size,)``. - Note that matrices are special cased for backward compatibility, if `a` - is a matrix, then y is a 1-D ndarray. - - See Also - -------- - ndarray.flat : 1-D iterator over an array. - ndarray.flatten : 1-D array copy of the elements of an array - in row-major order. - ndarray.reshape : Change the shape of an array without changing its data. - - Notes - ----- - In row-major, C-style order, in two dimensions, the row index - varies the slowest, and the column index the quickest. This can - be generalized to multiple dimensions, where row-major order - implies that the index along the first axis varies slowest, and - the index along the last quickest. The opposite holds for - column-major, Fortran-style index ordering. - - When a view is desired in as many cases as possible, ``arr.reshape(-1)`` - may be preferable. - - Examples - -------- - It is equivalent to ``reshape(-1, order=order)``. - - >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.ravel(x) - array([1, 2, 3, 4, 5, 6]) - - >>> x.reshape(-1) - array([1, 2, 3, 4, 5, 6]) - - >>> np.ravel(x, order='F') - array([1, 4, 2, 5, 3, 6]) - - When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: - - >>> np.ravel(x.T) - array([1, 4, 2, 5, 3, 6]) - >>> np.ravel(x.T, order='A') - array([1, 2, 3, 4, 5, 6]) - - When ``order`` is 'K', it will preserve orderings that are neither 'C' - nor 'F', but won't reverse axes: - - >>> a = np.arange(3)[::-1]; a - array([2, 1, 0]) - >>> a.ravel(order='C') - array([2, 1, 0]) - >>> a.ravel(order='K') - array([2, 1, 0]) - - >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a - array([[[ 0, 2, 4], - [ 1, 3, 5]], - [[ 6, 8, 10], - [ 7, 9, 11]]]) - >>> a.ravel(order='C') - array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) - >>> a.ravel(order='K') - array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - - """ - if isinstance(a, np.matrix): - return asarray(a).ravel(order=order) - else: - return asanyarray(a).ravel(order=order) - - -def _nonzero_dispatcher(a): - return (a,) - - -@array_function_dispatch(_nonzero_dispatcher) -def nonzero(a): - """ - Return the indices of the elements that are non-zero. - - Returns a tuple of arrays, one for each dimension of `a`, - containing the indices of the non-zero elements in that - dimension. The values in `a` are always tested and returned in - row-major, C-style order. - - To group the indices by element, rather than dimension, use `argwhere`, - which returns a row for each non-zero element. - - .. note:: - - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast1d(a))``. - - .. deprecated:: 1.17.0 - - Use `atleast1d` explicitly if this behavior is deliberate. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - tuple_of_arrays : tuple - Indices of elements that are non-zero. - - See Also - -------- - flatnonzero : - Return indices that are non-zero in the flattened version of the input - array. - ndarray.nonzero : - Equivalent ndarray method. - count_nonzero : - Counts the number of non-zero elements in the input array. - - Notes - ----- - While the nonzero values can be obtained with ``a[nonzero(a)]``, it is - recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which - will correctly handle 0-d arrays. - - Examples - -------- - >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) - >>> x - array([[3, 0, 0], - [0, 4, 0], - [5, 6, 0]]) - >>> np.nonzero(x) - (array([0, 1, 2, 2]), array([0, 1, 0, 1])) - - >>> x[np.nonzero(x)] - array([3, 4, 5, 6]) - >>> np.transpose(np.nonzero(x)) - array([[0, 0], - [1, 1], - [2, 0], - [2, 1]]) - - A common use for ``nonzero`` is to find the indices of an array, where - a condition is True. Given an array `a`, the condition `a` > 3 is a - boolean array and since False is interpreted as 0, np.nonzero(a > 3) - yields the indices of the `a` where the condition is true. - - >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - >>> a > 3 - array([[False, False, False], - [ True, True, True], - [ True, True, True]]) - >>> np.nonzero(a > 3) - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - Using this result to index `a` is equivalent to using the mask directly: - - >>> a[np.nonzero(a > 3)] - array([4, 5, 6, 7, 8, 9]) - >>> a[a > 3] # prefer this spelling - array([4, 5, 6, 7, 8, 9]) - - ``nonzero`` can also be called as a method of the array. - - >>> (a > 3).nonzero() - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - """ - return _wrapfunc(a, 'nonzero') - - -def _shape_dispatcher(a): - return (a,) - - -@array_function_dispatch(_shape_dispatcher) -def shape(a): - """ - Return the shape of an array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - shape : tuple of ints - The elements of the shape tuple give the lengths of the - corresponding array dimensions. - - See Also - -------- - alen - ndarray.shape : Equivalent array method. - - Examples - -------- - >>> np.shape(np.eye(3)) - (3, 3) - >>> np.shape([[1, 2]]) - (1, 2) - >>> np.shape([0]) - (1,) - >>> np.shape(0) - () - - >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - >>> np.shape(a) - (2,) - >>> a.shape - (2,) - - """ - try: - result = a.shape - except AttributeError: - result = asarray(a).shape - return result - - -def _compress_dispatcher(condition, a, axis=None, out=None): - return (condition, a, out) - - -@array_function_dispatch(_compress_dispatcher) -def compress(condition, a, axis=None, out=None): - """ - Return selected slices of an array along given axis. - - When working along a given axis, a slice along that axis is returned in - `output` for each index where `condition` evaluates to True. When - working on a 1-D array, `compress` is equivalent to `extract`. - - Parameters - ---------- - condition : 1-D array of bools - Array that selects which entries to return. If len(condition) - is less than the size of `a` along the given axis, then output is - truncated to the length of the condition array. - a : array_like - Array from which to extract a part. - axis : int, optional - Axis along which to take slices. If None (default), work on the - flattened array. - out : ndarray, optional - Output array. Its type is preserved and it must be of the right - shape to hold the output. - - Returns - ------- - compressed_array : ndarray - A copy of `a` without the slices along axis for which `condition` - is false. - - See Also - -------- - take, choose, diag, diagonal, select - ndarray.compress : Equivalent method in ndarray - np.extract: Equivalent method when working on 1-D arrays - ufuncs-output-type - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4], [5, 6]]) - >>> a - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.compress([0, 1], a, axis=0) - array([[3, 4]]) - >>> np.compress([False, True, True], a, axis=0) - array([[3, 4], - [5, 6]]) - >>> np.compress([False, True], a, axis=1) - array([[2], - [4], - [6]]) - - Working on the flattened array does not return slices along an axis but - selects elements. - - >>> np.compress([False, True], a) - array([2]) - - """ - return _wrapfunc(a, 'compress', condition, axis=axis, out=out) - - -def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): - return (a, a_min, a_max) - - -@array_function_dispatch(_clip_dispatcher) -def clip(a, a_min, a_max, out=None, **kwargs): - """ - Clip (limit) the values in an array. - - Given an interval, values outside the interval are clipped to - the interval edges. For example, if an interval of ``[0, 1]`` - is specified, values smaller than 0 become 0, and values larger - than 1 become 1. - - Equivalent to but faster than ``np.maximum(a_min, np.minimum(a, a_max))``. - No check is performed to ensure ``a_min < a_max``. - - Parameters - ---------- - a : array_like - Array containing elements to clip. - a_min : scalar or array_like or None - Minimum value. If None, clipping is not performed on lower - interval edge. Not more than one of `a_min` and `a_max` may be - None. - a_max : scalar or array_like or None - Maximum value. If None, clipping is not performed on upper - interval edge. Not more than one of `a_min` and `a_max` may be - None. If `a_min` or `a_max` are array_like, then the three - arrays will be broadcasted to match their shapes. - out : ndarray, optional - The results will be placed in this array. It may be the input - array for in-place clipping. `out` must be of the right shape - to hold the output. Its type is preserved. - **kwargs - For other keyword-only arguments, see the - :ref:`ufunc docs `. - - .. versionadded:: 1.17.0 - - Returns - ------- - clipped_array : ndarray - An array with the elements of `a`, but where values - < `a_min` are replaced with `a_min`, and those > `a_max` - with `a_max`. - - See Also - -------- - ufuncs-output-type - - Examples - -------- - >>> a = np.arange(10) - >>> np.clip(a, 1, 8) - array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, 3, 6, out=a) - array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) - >>> a = np.arange(10) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) - array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) - - """ - return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) - - -def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, - initial=None, where=None): - return (a, out) - - -@array_function_dispatch(_sum_dispatcher) -def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, - initial=np._NoValue, where=np._NoValue): - """ - Sum of array elements over a given axis. - - Parameters - ---------- - a : array_like - Elements to sum. - axis : None or int or tuple of ints, optional - Axis or axes along which a sum is performed. The default, - axis=None, will sum all of the elements of the input array. If - axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, a sum is performed on all of the axes - specified in the tuple instead of a single axis or all the axes as - before. - dtype : dtype, optional - The type of the returned array and of the accumulator in which the - elements are summed. The dtype of `a` is used by default unless `a` - has an integer dtype of less precision than the default platform - integer. In that case, if `a` is signed then the platform integer - is used while if `a` is unsigned then an unsigned integer of the - same precision as the platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `sum` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - initial : scalar, optional - Starting value for the sum. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to include in the sum. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - sum_along_axis : ndarray - An array with the same shape as `a`, with the specified - axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar - is returned. If an output array is specified, a reference to - `out` is returned. - - See Also - -------- - ndarray.sum : Equivalent method. - - add.reduce : Equivalent functionality of `add`. - - cumsum : Cumulative sum of array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - mean, average - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - The sum of an empty array is the neutral element 0: - - >>> np.sum([]) - 0.0 - - For floating point numbers the numerical precision of sum (and - ``np.add.reduce``) is in general limited by directly adding each number - individually to the result causing rounding errors in every step. - However, often numpy will use a numerically better approach (partial - pairwise summation) leading to improved precision in many use-cases. - This improved precision is always provided when no ``axis`` is given. - When ``axis`` is given, it will depend on which axis is summed. - Technically, to provide the best speed possible, the improved precision - is only used when the summation is along the fast axis in memory. - Note that the exact precision may vary depending on other parameters. - In contrast to NumPy, Python's ``math.fsum`` function uses a slower but - more precise approach to summation. - Especially when summing a large number of lower precision floating point - numbers, such as ``float32``, numerical errors can become significant. - In such cases it can be advisable to use `dtype="float64"` to use a higher - precision for the output. - - Examples - -------- - >>> np.sum([0.5, 1.5]) - 2.0 - >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) - 1 - >>> np.sum([[0, 1], [0, 5]]) - 6 - >>> np.sum([[0, 1], [0, 5]], axis=0) - array([0, 6]) - >>> np.sum([[0, 1], [0, 5]], axis=1) - array([1, 5]) - >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) - array([1., 5.]) - - If the accumulator is too small, overflow occurs: - - >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) - -128 - - You can also start the sum with a value other than zero: - - >>> np.sum([10], initial=5) - 15 - """ - if isinstance(a, _gentype): - # 2018-02-25, 1.15.0 - warnings.warn( - "Calling np.sum(generator) is deprecated, and in the future will give a different result. " - "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.", - DeprecationWarning, stacklevel=3) - - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - - return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, - initial=initial, where=where) - - -def _any_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_any_dispatcher) -def any(a, axis=None, out=None, keepdims=np._NoValue): - """ - Test whether any array element along a given axis evaluates to True. - - Returns single boolean unless `axis` is not ``None`` - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : None or int or tuple of ints, optional - Axis or axes along which a logical OR reduction is performed. - The default (``axis=None``) is to perform a logical OR over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output and its type is preserved - (e.g., if it is of type float, then it will remain so, returning - 1.0 for True and 0.0 for False, regardless of the type of `a`). - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `any` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - any : bool or ndarray - A new boolean or `ndarray` is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.any : equivalent method - - all : Test whether all elements along a given axis evaluate to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity evaluate - to `True` because these are not equal to zero. - - Examples - -------- - >>> np.any([[True, False], [True, True]]) - True - - >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False]) - - >>> np.any([-1, 0, 5]) - True - - >>> np.any(np.nan) - True - - >>> o=np.array(False) - >>> z=np.any([-1, 4, 5], out=o) - >>> z, o - (array(True), array(True)) - >>> # Check now that z is a reference to o - >>> z is o - True - >>> id(z), id(o) # identity of z and o # doctest: +SKIP - (191614240, 191614240) - - """ - return _wrapreduction(a, np.logical_or, 'any', axis, None, out, keepdims=keepdims) - - -def _all_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_all_dispatcher) -def all(a, axis=None, out=None, keepdims=np._NoValue): - """ - Test whether all array elements along a given axis evaluate to True. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : None or int or tuple of ints, optional - Axis or axes along which a logical AND reduction is performed. - The default (``axis=None``) is to perform a logical AND over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : ndarray, optional - Alternate output array in which to place the result. - It must have the same shape as the expected output and its - type is preserved (e.g., if ``dtype(out)`` is float, the result - will consist of 0.0's and 1.0's). See `ufuncs-output-type` for more - details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `all` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - all : ndarray, bool - A new boolean or array is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.all : equivalent method - - any : Test whether any element along a given axis evaluates to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity - evaluate to `True` because these are not equal to zero. - - Examples - -------- - >>> np.all([[True,False],[True,True]]) - False - - >>> np.all([[True,False],[True,True]], axis=0) - array([ True, False]) - - >>> np.all([-1, 4, 5]) - True - - >>> np.all([1.0, np.nan]) - True - - >>> o=np.array(False) - >>> z=np.all([-1, 4, 5], out=o) - >>> id(z), id(o), z - (28293632, 28293632, array(True)) # may vary - - """ - return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims) - - -def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_cumsum_dispatcher) -def cumsum(a, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of the elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative sum is computed. The default - (None) is to compute the cumsum over the flattened array. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `ufuncs-output-type` for - more details. - - Returns - ------- - cumsum_along_axis : ndarray. - A new array holding the result is returned unless `out` is - specified, in which case a reference to `out` is returned. The - result has the same size as `a`, and the same shape as `a` if - `axis` is not None or `a` is a 1-d array. - - - See Also - -------- - sum : Sum array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - diff : Calculate the n-th discrete difference along given axis. - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.cumsum(a) - array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) - array([ 1., 3., 6., 10., 15., 21.]) - - >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns - array([[1, 2, 3], - [5, 7, 9]]) - >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows - array([[ 1, 3, 6], - [ 4, 9, 15]]) - - """ - return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) - - -def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_ptp_dispatcher) -def ptp(a, axis=None, out=None, keepdims=np._NoValue): - """ - Range of values (maximum - minimum) along an axis. - - The name of the function comes from the acronym for 'peak to peak'. - - Parameters - ---------- - a : array_like - Input values. - axis : None or int or tuple of ints, optional - Axis along which to find the peaks. By default, flatten the - array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.15.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : array_like - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type of the output values will be cast if necessary. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `ptp` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - ptp : ndarray - A new array holding the result, unless `out` was - specified, in which case a reference to `out` is returned. - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.ptp(x, axis=0) - array([2, 2]) - - >>> np.ptp(x, axis=1) - array([1, 1]) - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is not mu.ndarray: - try: - ptp = a.ptp - except AttributeError: - pass - else: - return ptp(axis=axis, out=out, **kwargs) - return _methods._ptp(a, axis=axis, out=out, **kwargs) - - -def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, - where=None): - return (a, out) - - -@array_function_dispatch(_amax_dispatcher) -def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, - where=np._NoValue): - """ - Return the maximum of an array or maximum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which to operate. By default, flattened input is - used. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, the maximum is selected over multiple axes, - instead of a single axis or all the axes as before. - out : ndarray, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `amax` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - initial : scalar, optional - The minimum value of an output element. Must be present to allow - computation on empty slice. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to compare for the maximum. See `~numpy.ufunc.reduce` - for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - amax : ndarray or scalar - Maximum of `a`. If `axis` is None, the result is a scalar value. - If `axis` is given, the result is an array of dimension - ``a.ndim - 1``. - - See Also - -------- - amin : - The minimum value of an array along a given axis, propagating any NaNs. - nanmax : - The maximum value of an array along a given axis, ignoring any NaNs. - maximum : - Element-wise maximum of two arrays, propagating any NaNs. - fmax : - Element-wise maximum of two arrays, ignoring any NaNs. - argmax : - Return the indices of the maximum values. - - nanmin, minimum, fmin - - Notes - ----- - NaN values are propagated, that is if at least one item is NaN, the - corresponding max value will be NaN as well. To ignore NaN values - (MATLAB behavior), please use nanmax. - - Don't use `amax` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than - ``amax(a, axis=0)``. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amax(a) # Maximum of the flattened array - 3 - >>> np.amax(a, axis=0) # Maxima along the first axis - array([2, 3]) - >>> np.amax(a, axis=1) # Maxima along the second axis - array([1, 3]) - >>> np.amax(a, where=[False, True], initial=-1, axis=0) - array([-1, 3]) - >>> b = np.arange(5, dtype=float) - >>> b[2] = np.NaN - >>> np.amax(b) - nan - >>> np.amax(b, where=~np.isnan(b), initial=-1) - 4.0 - >>> np.nanmax(b) - 4.0 - - You can use an initial value to compute the maximum of an empty slice, or - to initialize it to a different value: - - >>> np.max([[-50], [10]], axis=-1, initial=0) - array([ 0, 10]) - - Notice that the initial value is used as one of the elements for which the - maximum is determined, unlike for the default argument Python's max - function, which is only used for empty iterables. - - >>> np.max([5], initial=6) - 6 - >>> max([5], default=6) - 5 - """ - return _wrapreduction(a, np.maximum, 'max', axis, None, out, - keepdims=keepdims, initial=initial, where=where) - - -def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, - where=None): - return (a, out) - - -@array_function_dispatch(_amin_dispatcher) -def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, - where=np._NoValue): - """ - Return the minimum of an array or minimum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which to operate. By default, flattened input is - used. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, the minimum is selected over multiple axes, - instead of a single axis or all the axes as before. - out : ndarray, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `amin` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - initial : scalar, optional - The maximum value of an output element. Must be present to allow - computation on empty slice. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to compare for the minimum. See `~numpy.ufunc.reduce` - for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - amin : ndarray or scalar - Minimum of `a`. If `axis` is None, the result is a scalar value. - If `axis` is given, the result is an array of dimension - ``a.ndim - 1``. - - See Also - -------- - amax : - The maximum value of an array along a given axis, propagating any NaNs. - nanmin : - The minimum value of an array along a given axis, ignoring any NaNs. - minimum : - Element-wise minimum of two arrays, propagating any NaNs. - fmin : - Element-wise minimum of two arrays, ignoring any NaNs. - argmin : - Return the indices of the minimum values. - - nanmax, maximum, fmax - - Notes - ----- - NaN values are propagated, that is if at least one item is NaN, the - corresponding min value will be NaN as well. To ignore NaN values - (MATLAB behavior), please use nanmin. - - Don't use `amin` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than - ``amin(a, axis=0)``. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amin(a) # Minimum of the flattened array - 0 - >>> np.amin(a, axis=0) # Minima along the first axis - array([0, 1]) - >>> np.amin(a, axis=1) # Minima along the second axis - array([0, 2]) - >>> np.amin(a, where=[False, True], initial=10, axis=0) - array([10, 1]) - - >>> b = np.arange(5, dtype=float) - >>> b[2] = np.NaN - >>> np.amin(b) - nan - >>> np.amin(b, where=~np.isnan(b), initial=10) - 0.0 - >>> np.nanmin(b) - 0.0 - - >>> np.min([[-50], [10]], axis=-1, initial=0) - array([-50, 0]) - - Notice that the initial value is used as one of the elements for which the - minimum is determined, unlike for the default argument Python's max - function, which is only used for empty iterables. - - Notice that this isn't the same as Python's ``default`` argument. - - >>> np.min([6], initial=5) - 5 - >>> min([6], default=5) - 6 - """ - return _wrapreduction(a, np.minimum, 'min', axis, None, out, - keepdims=keepdims, initial=initial, where=where) - - -def _alen_dispathcer(a): - return (a,) - - -@array_function_dispatch(_alen_dispathcer) -def alen(a): - """ - Return the length of the first dimension of the input array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - alen : int - Length of the first dimension of `a`. - - See Also - -------- - shape, size - - Examples - -------- - >>> a = np.zeros((7,4,5)) - >>> a.shape[0] - 7 - >>> np.alen(a) - 7 - - """ - # NumPy 1.18.0, 2019-08-02 - warnings.warn( - "`np.alen` is deprecated, use `len` instead", - DeprecationWarning, stacklevel=2) - try: - return len(a) - except TypeError: - return len(array(a, ndmin=1)) - - -def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, - initial=None, where=None): - return (a, out) - - -@array_function_dispatch(_prod_dispatcher) -def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, - initial=np._NoValue, where=np._NoValue): - """ - Return the product of array elements over a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which a product is performed. The default, - axis=None, will calculate the product of all the elements in the - input array. If axis is negative it counts from the last to the - first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, a product is performed on all of the - axes specified in the tuple instead of a single axis or all the - axes as before. - dtype : dtype, optional - The type of the returned array, as well as of the accumulator in - which the elements are multiplied. The dtype of `a` is used by - default unless `a` has an integer dtype of less precision than the - default platform integer. In that case, if `a` is signed then the - platform integer is used while if `a` is unsigned then an unsigned - integer of the same precision as the platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `prod` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - initial : scalar, optional - The starting value for this product. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to include in the product. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - product_along_axis : ndarray, see `dtype` parameter above. - An array shaped as `a` but with the specified axis removed. - Returns a reference to `out` if specified. - - See Also - -------- - ndarray.prod : equivalent method - ufuncs-output-type - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. That means that, on a 32-bit platform: - - >>> x = np.array([536870910, 536870910, 536870910, 536870910]) - >>> np.prod(x) - 16 # may vary - - The product of an empty array is the neutral element 1: - - >>> np.prod([]) - 1.0 - - Examples - -------- - By default, calculate the product of all elements: - - >>> np.prod([1.,2.]) - 2.0 - - Even when the input array is two-dimensional: - - >>> np.prod([[1.,2.],[3.,4.]]) - 24.0 - - But we can also specify the axis over which to multiply: - - >>> np.prod([[1.,2.],[3.,4.]], axis=1) - array([ 2., 12.]) - - Or select specific elements to include: - - >>> np.prod([1., np.nan, 3.], where=[True, False, True]) - 3.0 - - If the type of `x` is unsigned, then the output type is - the unsigned platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.uint8) - >>> np.prod(x).dtype == np.uint - True - - If `x` is of a signed integer type, then the output type - is the default platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.int8) - >>> np.prod(x).dtype == int - True - - You can also start the product with a value other than one: - - >>> np.prod([1, 2], initial=5) - 10 - """ - return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, - keepdims=keepdims, initial=initial, where=where) - - -def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_cumprod_dispatcher) -def cumprod(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product of elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative product is computed. By default - the input is flattened. - dtype : dtype, optional - Type of the returned array, as well as of the accumulator in which - the elements are multiplied. If *dtype* is not specified, it - defaults to the dtype of `a`, unless `a` has an integer dtype with - a precision less than that of the default platform integer. In - that case, the default platform integer is used instead. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type of the resulting values will be cast if necessary. - - Returns - ------- - cumprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case a reference to out is returned. - - See Also - -------- - ufuncs-output-type - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([1,2,3]) - >>> np.cumprod(a) # intermediate results 1, 1*2 - ... # total product 1*2*3 = 6 - array([1, 2, 6]) - >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output - array([ 1., 2., 6., 24., 120., 720.]) - - The cumulative product for each column (i.e., over the rows) of `a`: - - >>> np.cumprod(a, axis=0) - array([[ 1, 2, 3], - [ 4, 10, 18]]) - - The cumulative product for each row (i.e. over the columns) of `a`: - - >>> np.cumprod(a,axis=1) - array([[ 1, 2, 6], - [ 4, 20, 120]]) - - """ - return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) - - -def _ndim_dispatcher(a): - return (a,) - - -@array_function_dispatch(_ndim_dispatcher) -def ndim(a): - """ - Return the number of dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. If it is not already an ndarray, a conversion is - attempted. - - Returns - ------- - number_of_dimensions : int - The number of dimensions in `a`. Scalars are zero-dimensional. - - See Also - -------- - ndarray.ndim : equivalent method - shape : dimensions of array - ndarray.shape : dimensions of array - - Examples - -------- - >>> np.ndim([[1,2,3],[4,5,6]]) - 2 - >>> np.ndim(np.array([[1,2,3],[4,5,6]])) - 2 - >>> np.ndim(1) - 0 - - """ - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def _size_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_size_dispatcher) -def size(a, axis=None): - """ - Return the number of elements along a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which the elements are counted. By default, give - the total number of elements. - - Returns - ------- - element_count : int - Number of elements along the specified axis. - - See Also - -------- - shape : dimensions of array - ndarray.shape : dimensions of array - ndarray.size : number of elements in array - - Examples - -------- - >>> a = np.array([[1,2,3],[4,5,6]]) - >>> np.size(a) - 6 - >>> np.size(a,1) - 3 - >>> np.size(a,0) - 2 - - """ - if axis is None: - try: - return a.size - except AttributeError: - return asarray(a).size - else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] - - -def _around_dispatcher(a, decimals=None, out=None): - return (a, out) - - -@array_function_dispatch(_around_dispatcher) -def around(a, decimals=0, out=None): - """ - Evenly round to the given number of decimals. - - Parameters - ---------- - a : array_like - Input data. - decimals : int, optional - Number of decimal places to round to (default: 0). If - decimals is negative, it specifies the number of positions to - the left of the decimal point. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. See `ufuncs-output-type` for more - details. - - Returns - ------- - rounded_array : ndarray - An array of the same type as `a`, containing the rounded values. - Unless `out` was specified, a new array is created. A reference to - the result is returned. - - The real and imaginary parts of complex numbers are rounded - separately. The result of rounding a float is a float. - - See Also - -------- - ndarray.round : equivalent method - - ceil, fix, floor, rint, trunc - - - Notes - ----- - For values exactly halfway between rounded decimal values, NumPy - rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, - -0.5 and 0.5 round to 0.0, etc. - - ``np.around`` uses a fast but sometimes inexact algorithm to round - floating-point datatypes. For positive `decimals` it is equivalent to - ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has - error due to the inexact representation of decimal fractions in the IEEE - floating point standard [1]_ and errors introduced when scaling by powers - of ten. For instance, note the extra "1" in the following: - - >>> np.round(56294995342131.5, 3) - 56294995342131.51 - - If your goal is to print such values with a fixed number of decimals, it is - preferable to use numpy's float printing routines to limit the number of - printed decimals: - - >>> np.format_float_positional(56294995342131.5, precision=3) - '56294995342131.5' - - The float printing routines use an accurate but much more computationally - demanding algorithm to compute the number of digits after the decimal - point. - - Alternatively, Python's builtin `round` function uses a more accurate - but slower algorithm for 64-bit floating point values: - - >>> round(56294995342131.5, 3) - 56294995342131.5 - >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 - (16.06, 16.05) - - - References - ---------- - .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, - https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF - .. [2] "How Futile are Mindless Assessments of - Roundoff in Floating-Point Computation?", William Kahan, - https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf - - Examples - -------- - >>> np.around([0.37, 1.64]) - array([0., 2.]) - >>> np.around([0.37, 1.64], decimals=1) - array([0.4, 1.6]) - >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value - array([0., 2., 2., 4., 4.]) - >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned - array([ 1, 2, 3, 11]) - >>> np.around([1,2,3,11], decimals=-1) - array([ 0, 0, 0, 10]) - - """ - return _wrapfunc(a, 'round', decimals=decimals, out=out) - - -def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_mean_dispatcher) -def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Compute the arithmetic mean along the specified axis. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - `float64` intermediate and return values are used for integer inputs. - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : None or int or tuple of ints, optional - Axis or axes along which the means are computed. The default is to - compute the mean of the flattened array. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a mean is performed over multiple axes, - instead of a single axis or all the axes as before. - dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default - is `float64`; for floating point inputs, it is the same as the - input dtype. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `mean` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - m : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. - - See Also - -------- - average : Weighted average - std, var, nanmean, nanstd, nanvar - - Notes - ----- - The arithmetic mean is the sum of the elements along the axis divided - by the number of elements. - - Note that for floating-point input, the mean is computed using the - same precision the input has. Depending on the input data, this can - cause the results to be inaccurate, especially for `float32` (see - example below). Specifying a higher-precision accumulator using the - `dtype` keyword can alleviate this issue. - - By default, `float16` results are computed using `float32` intermediates - for extra precision. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.mean(a) - 2.5 - >>> np.mean(a, axis=0) - array([2., 3.]) - >>> np.mean(a, axis=1) - array([1.5, 3.5]) - - In single precision, `mean` can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.mean(a) - 0.54999924 - - Computing the mean in float64 is more accurate: - - >>> np.mean(a, dtype=np.float64) - 0.55000000074505806 # may vary - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is not mu.ndarray: - try: - mean = a.mean - except AttributeError: - pass - else: - return mean(axis=axis, dtype=dtype, out=out, **kwargs) - - return _methods._mean(a, axis=axis, dtype=dtype, - out=out, **kwargs) - - -def _std_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_std_dispatcher) -def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the standard deviation along the specified axis. - - Returns the standard deviation, a measure of the spread of a distribution, - of the array elements. The standard deviation is computed for the - flattened array by default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Calculate the standard deviation of these values. - axis : None or int or tuple of ints, optional - Axis or axes along which the standard deviation is computed. The - default is to compute the standard deviation of the flattened array. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a standard deviation is performed over - multiple axes, instead of a single axis or all the axes as before. - dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float64, for arrays of float types it is - the same as the array type. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type (of the calculated - values) will be cast if necessary. - ddof : int, optional - Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. - By default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `std` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - standard_deviation : ndarray, see dtype parameter above. - If `out` is None, return a new array containing the standard deviation, - otherwise return a reference to the output array. - - See Also - -------- - var, mean, nanmean, nanstd, nanvar - ufuncs-output-type - - Notes - ----- - The standard deviation is the square root of the average of the squared - deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as - ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, - the divisor ``N - ddof`` is used instead. In standard statistical - practice, ``ddof=1`` provides an unbiased estimator of the variance - of the infinite population. ``ddof=0`` provides a maximum likelihood - estimate of the variance for normally distributed variables. The - standard deviation computed in this function is the square root of - the estimated variance, so even with ``ddof=1``, it will not be an - unbiased estimate of the standard deviation per se. - - Note that, for complex numbers, `std` takes the absolute - value before squaring, so that the result is always real and nonnegative. - - For floating-point input, the *std* is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for float32 (see example below). - Specifying a higher-accuracy accumulator using the `dtype` keyword can - alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.std(a) - 1.1180339887498949 # may vary - >>> np.std(a, axis=0) - array([1., 1.]) - >>> np.std(a, axis=1) - array([0.5, 0.5]) - - In single precision, std() can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.std(a) - 0.45000005 - - Computing the standard deviation in float64 is more accurate: - - >>> np.std(a, dtype=np.float64) - 0.44999999925494177 # may vary - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - - if type(a) is not mu.ndarray: - try: - std = a.std - except AttributeError: - pass - else: - return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) - - return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - **kwargs) - - -def _var_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_var_dispatcher) -def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the variance along the specified axis. - - Returns the variance of the array elements, a measure of the spread of a - distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Array containing numbers whose variance is desired. If `a` is not an - array, a conversion is attempted. - axis : None or int or tuple of ints, optional - Axis or axes along which the variance is computed. The default is to - compute the variance of the flattened array. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a variance is performed over multiple axes, - instead of a single axis or all the axes as before. - dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float64`; for arrays of float types it is the same as - the array type. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output, but the type is cast if - necessary. - ddof : int, optional - "Delta Degrees of Freedom": the divisor used in the calculation is - ``N - ddof``, where ``N`` represents the number of elements. By - default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `var` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - variance : ndarray, see dtype parameter above - If ``out=None``, returns a new array containing the variance; - otherwise, a reference to the output array is returned. - - See Also - -------- - std, mean, nanmean, nanstd, nanvar - ufuncs-output-type - - Notes - ----- - The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. - - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. - If, however, `ddof` is specified, the divisor ``N - ddof`` is used - instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of a hypothetical infinite population. - ``ddof=0`` provides a maximum likelihood estimate of the variance for - normally distributed variables. - - Note that for complex numbers, the absolute value is taken before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the variance is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32` (see example - below). Specifying a higher-accuracy accumulator using the ``dtype`` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.var(a) - 1.25 - >>> np.var(a, axis=0) - array([1., 1.]) - >>> np.var(a, axis=1) - array([0.25, 0.25]) - - In single precision, var() can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.var(a) - 0.20250003 - - Computing the variance in float64 is more accurate: - - >>> np.var(a, dtype=np.float64) - 0.20249999932944759 # may vary - >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 - 0.2025 - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - - if type(a) is not mu.ndarray: - try: - var = a.var - - except AttributeError: - pass - else: - return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) - - return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - **kwargs) - - -# Aliases of other functions. These have their own definitions only so that -# they can have unique docstrings. - -@array_function_dispatch(_around_dispatcher) -def round_(a, decimals=0, out=None): - """ - Round an array to the given number of decimals. - - See Also - -------- - around : equivalent function; see for details. - """ - return around(a, decimals=decimals, out=out) - - -@array_function_dispatch(_prod_dispatcher, verify=False) -def product(*args, **kwargs): - """ - Return the product of array elements over a given axis. - - See Also - -------- - prod : equivalent function; see for details. - """ - return prod(*args, **kwargs) - - -@array_function_dispatch(_cumprod_dispatcher, verify=False) -def cumproduct(*args, **kwargs): - """ - Return the cumulative product over the given axis. - - See Also - -------- - cumprod : equivalent function; see for details. - """ - return cumprod(*args, **kwargs) - - -@array_function_dispatch(_any_dispatcher, verify=False) -def sometrue(*args, **kwargs): - """ - Check whether some values are true. - - Refer to `any` for full documentation. - - See Also - -------- - any : equivalent function; see for details. - """ - return any(*args, **kwargs) - - -@array_function_dispatch(_all_dispatcher, verify=False) -def alltrue(*args, **kwargs): - """ - Check if all elements of input array are true. - - See Also - -------- - numpy.all : Equivalent function; see for details. - """ - return all(*args, **kwargs) diff --git a/venv/lib/python3.7/site-packages/numpy/core/function_base.py b/venv/lib/python3.7/site-packages/numpy/core/function_base.py deleted file mode 100644 index 538ac8b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/function_base.py +++ /dev/null @@ -1,514 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import functools -import warnings -import operator -import types - -from . import numeric as _nx -from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, - TooHardError, asanyarray, ndim) -from numpy.core.multiarray import add_docstring -from numpy.core import overrides - -__all__ = ['logspace', 'linspace', 'geomspace'] - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, - dtype=None, axis=None): - return (start, stop) - - -@array_function_dispatch(_linspace_dispatcher) -def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, - axis=0): - """ - Return evenly spaced numbers over a specified interval. - - Returns `num` evenly spaced samples, calculated over the - interval [`start`, `stop`]. - - The endpoint of the interval can optionally be excluded. - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - Parameters - ---------- - start : array_like - The starting value of the sequence. - stop : array_like - The end value of the sequence, unless `endpoint` is set to False. - In that case, the sequence consists of all but the last of ``num + 1`` - evenly spaced samples, so that `stop` is excluded. Note that the step - size changes when `endpoint` is False. - num : int, optional - Number of samples to generate. Default is 50. Must be non-negative. - endpoint : bool, optional - If True, `stop` is the last sample. Otherwise, it is not included. - Default is True. - retstep : bool, optional - If True, return (`samples`, `step`), where `step` is the spacing - between samples. - dtype : dtype, optional - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - .. versionadded:: 1.9.0 - - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - Returns - ------- - samples : ndarray - There are `num` equally spaced samples in the closed interval - ``[start, stop]`` or the half-open interval ``[start, stop)`` - (depending on whether `endpoint` is True or False). - step : float, optional - Only returned if `retstep` is True - - Size of spacing between samples. - - - See Also - -------- - arange : Similar to `linspace`, but uses a step size (instead of the - number of samples). - geomspace : Similar to `linspace`, but with numbers spaced evenly on a log - scale (a geometric progression). - logspace : Similar to `geomspace`, but with the end points specified as - logarithms. - - Examples - -------- - >>> np.linspace(2.0, 3.0, num=5) - array([2. , 2.25, 2.5 , 2.75, 3. ]) - >>> np.linspace(2.0, 3.0, num=5, endpoint=False) - array([2. , 2.2, 2.4, 2.6, 2.8]) - >>> np.linspace(2.0, 3.0, num=5, retstep=True) - (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 8 - >>> y = np.zeros(N) - >>> x1 = np.linspace(0, 10, N, endpoint=True) - >>> x2 = np.linspace(0, 10, N, endpoint=False) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - try: - num = operator.index(num) - except TypeError: - raise TypeError( - "object of type {} cannot be safely interpreted as an integer." - .format(type(num))) - - if num < 0: - raise ValueError("Number of samples, %s, must be non-negative." % num) - div = (num - 1) if endpoint else num - - # Convert float/complex array scalars to float, gh-3504 - # and make sure one can use variables that have an __array_interface__, gh-6634 - start = asanyarray(start) * 1.0 - stop = asanyarray(stop) * 1.0 - - dt = result_type(start, stop, float(num)) - if dtype is None: - dtype = dt - - delta = stop - start - y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta)) - # In-place multiplication y *= delta/div is faster, but prevents the multiplicant - # from overriding what class is produced, and thus prevents, e.g. use of Quantities, - # see gh-7142. Hence, we multiply in place only for standard scalar types. - _mult_inplace = _nx.isscalar(delta) - if div > 0: - step = delta / div - if _nx.any(step == 0): - # Special handling for denormal numbers, gh-5437 - y /= div - if _mult_inplace: - y *= delta - else: - y = y * delta - else: - if _mult_inplace: - y *= step - else: - y = y * step - else: - # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) - # have an undefined step - step = NaN - # Multiply with delta to allow possible override of output class. - y = y * delta - - y += start - - if endpoint and num > 1: - y[-1] = stop - - if axis != 0: - y = _nx.moveaxis(y, 0, axis) - - if retstep: - return y.astype(dtype, copy=False), step - else: - return y.astype(dtype, copy=False) - - -def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, - dtype=None, axis=None): - return (start, stop) - - -@array_function_dispatch(_logspace_dispatcher) -def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, - axis=0): - """ - Return numbers spaced evenly on a log scale. - - In linear space, the sequence starts at ``base ** start`` - (`base` to the power of `start`) and ends with ``base ** stop`` - (see `endpoint` below). - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - Parameters - ---------- - start : array_like - ``base ** start`` is the starting value of the sequence. - stop : array_like - ``base ** stop`` is the final value of the sequence, unless `endpoint` - is False. In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length `num`) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - base : float, optional - The base of the log space. The step size between the elements in - ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. - Default is 10.0. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - arange : Similar to linspace, with the step size specified instead of the - number of samples. Note that, when used with a float endpoint, the - endpoint may or may not be included. - linspace : Similar to logspace, but with the samples uniformly distributed - in linear space, instead of log space. - geomspace : Similar to logspace, but with endpoints specified directly. - - Notes - ----- - Logspace is equivalent to the code - - >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) - ... # doctest: +SKIP - >>> power(base, y).astype(dtype) - ... # doctest: +SKIP - - Examples - -------- - >>> np.logspace(2.0, 3.0, num=4) - array([ 100. , 215.443469 , 464.15888336, 1000. ]) - >>> np.logspace(2.0, 3.0, num=4, endpoint=False) - array([100. , 177.827941 , 316.22776602, 562.34132519]) - >>> np.logspace(2.0, 3.0, num=4, base=2.0) - array([4. , 5.0396842 , 6.34960421, 8. ]) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> x1 = np.logspace(0.1, 1, N, endpoint=True) - >>> x2 = np.logspace(0.1, 1, N, endpoint=False) - >>> y = np.zeros(N) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) - if dtype is None: - return _nx.power(base, y) - return _nx.power(base, y).astype(dtype, copy=False) - - -def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, - axis=None): - return (start, stop) - - -@array_function_dispatch(_geomspace_dispatcher) -def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): - """ - Return numbers spaced evenly on a log scale (a geometric progression). - - This is similar to `logspace`, but with endpoints specified directly. - Each output sample is a constant multiple of the previous. - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - Parameters - ---------- - start : array_like - The starting value of the sequence. - stop : array_like - The final value of the sequence, unless `endpoint` is False. - In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length `num`) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - logspace : Similar to geomspace, but with endpoints specified using log - and base. - linspace : Similar to geomspace, but with arithmetic instead of geometric - progression. - arange : Similar to linspace, with the step size specified instead of the - number of samples. - - Notes - ----- - If the inputs or dtype are complex, the output will follow a logarithmic - spiral in the complex plane. (There are an infinite number of spirals - passing through two points; the output will follow the shortest such path.) - - Examples - -------- - >>> np.geomspace(1, 1000, num=4) - array([ 1., 10., 100., 1000.]) - >>> np.geomspace(1, 1000, num=3, endpoint=False) - array([ 1., 10., 100.]) - >>> np.geomspace(1, 1000, num=4, endpoint=False) - array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) - >>> np.geomspace(1, 256, num=9) - array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) - - Note that the above may not produce exact integers: - - >>> np.geomspace(1, 256, num=9, dtype=int) - array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) - array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) - - Negative, decreasing, and complex inputs are allowed: - - >>> np.geomspace(1000, 1, num=4) - array([1000., 100., 10., 1.]) - >>> np.geomspace(-1000, -1, num=4) - array([-1000., -100., -10., -1.]) - >>> np.geomspace(1j, 1000j, num=4) # Straight line - array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) - >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle - array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, - 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, - 1.00000000e+00+0.00000000e+00j]) - - Graphical illustration of ``endpoint`` parameter: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> y = np.zeros(N) - >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') - [] - >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') - [] - >>> plt.axis([0.5, 2000, 0, 3]) - [0.5, 2000, 0, 3] - >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') - >>> plt.show() - - """ - start = asanyarray(start) - stop = asanyarray(stop) - if _nx.any(start == 0) or _nx.any(stop == 0): - raise ValueError('Geometric sequence cannot include zero') - - dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) - if dtype is None: - dtype = dt - else: - # complex to dtype('complex128'), for instance - dtype = _nx.dtype(dtype) - - # Promote both arguments to the same dtype in case, for instance, one is - # complex and another is negative and log would produce NaN otherwise. - # Copy since we may change things in-place further down. - start = start.astype(dt, copy=True) - stop = stop.astype(dt, copy=True) - - out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt) - # Avoid negligible real or imaginary parts in output by rotating to - # positive real, calculating, then undoing rotation - if _nx.issubdtype(dt, _nx.complexfloating): - all_imag = (start.real == 0.) & (stop.real == 0.) - if _nx.any(all_imag): - start[all_imag] = start[all_imag].imag - stop[all_imag] = stop[all_imag].imag - out_sign[all_imag] = 1j - - both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1) - if _nx.any(both_negative): - _nx.negative(start, out=start, where=both_negative) - _nx.negative(stop, out=stop, where=both_negative) - _nx.negative(out_sign, out=out_sign, where=both_negative) - - log_start = _nx.log10(start) - log_stop = _nx.log10(stop) - result = out_sign * logspace(log_start, log_stop, num=num, - endpoint=endpoint, base=10.0, dtype=dtype) - if axis != 0: - result = _nx.moveaxis(result, 0, axis) - - return result.astype(dtype, copy=False) - - -def _needs_add_docstring(obj): - """ - Returns true if the only way to set the docstring of `obj` from python is - via add_docstring. - - This function errs on the side of being overly conservative. - """ - Py_TPFLAGS_HEAPTYPE = 1 << 9 - - if isinstance(obj, (types.FunctionType, types.MethodType, property)): - return False - - if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: - return False - - return True - - -def _add_docstring(obj, doc, warn_on_python): - if warn_on_python and not _needs_add_docstring(obj): - warnings.warn( - "add_newdoc was used on a pure-python object {}. " - "Prefer to attach it directly to the source." - .format(obj), - UserWarning, - stacklevel=3) - try: - add_docstring(obj, doc) - except Exception: - pass - - -def add_newdoc(place, obj, doc, warn_on_python=True): - """ - Add documentation to an existing object, typically one defined in C - - The purpose is to allow easier editing of the docstrings without requiring - a re-compile. This exists primarily for internal use within numpy itself. - - Parameters - ---------- - place : str - The absolute name of the module to import from - obj : str - The name of the object to add documentation to, typically a class or - function name - doc : {str, Tuple[str, str], List[Tuple[str, str]]} - If a string, the documentation to apply to `obj` - - If a tuple, then the first element is interpreted as an attribute of - `obj` and the second as the docstring to apply - ``(method, docstring)`` - - If a list, then each element of the list should be a tuple of length - two - ``[(method1, docstring1), (method2, docstring2), ...]`` - warn_on_python : bool - If True, the default, emit `UserWarning` if this is used to attach - documentation to a pure-python object. - - Notes - ----- - This routine never raises an error if the docstring can't be written, but - will raise an error if the object being documented does not exist. - - This routine cannot modify read-only docstrings, as appear - in new-style classes or built-in functions. Because this - routine never raises an error the caller must check manually - that the docstrings were changed. - - Since this function grabs the ``char *`` from a c-level str object and puts - it into the ``tp_doc`` slot of the type of `obj`, it violates a number of - C-API best-practices, by: - - - modifying a `PyTypeObject` after calling `PyType_Ready` - - calling `Py_INCREF` on the str and losing the reference, so the str - will never be released - - If possible it should be avoided. - """ - new = getattr(__import__(place, globals(), {}, [obj]), obj) - if isinstance(doc, str): - _add_docstring(new, doc.strip(), warn_on_python) - elif isinstance(doc, tuple): - attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) - elif isinstance(doc, list): - for attr, docstring in doc: - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) diff --git a/venv/lib/python3.7/site-packages/numpy/core/generate_numpy_api.py b/venv/lib/python3.7/site-packages/numpy/core/generate_numpy_api.py deleted file mode 100644 index 5e04fb8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/generate_numpy_api.py +++ /dev/null @@ -1,254 +0,0 @@ -from __future__ import division, print_function - -import os -import genapi - -from genapi import \ - TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi - -import numpy_api - -# use annotated api when running under cpychecker -h_template = r""" -#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -%s - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -%s - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - int st; - PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyArray_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); - return -1; - } - - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version 0x%%x but this version of numpy is 0x%%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version 0x%%x but this version of numpy is 0x%%x", \ - (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); - return -1; - } - - /* - * Perform runtime check of endianness and check it matches the one set by - * the headers (npy_endian.h) as a safeguard - */ - st = PyArray_GetEndianness(); - if (st == NPY_CPU_UNKNOWN_ENDIAN) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); - return -1; - } -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN - if (st != NPY_CPU_BIG) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "big endian, but detected different endianness at runtime"); - return -1; - } -#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - if (st != NPY_CPU_LITTLE) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "little endian, but detected different endianness at runtime"); - return -1; - } -#endif - - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_ARRAY_RETVAL NULL -#else -#define NUMPY_IMPORT_ARRAY_RETVAL -#endif - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif -""" - - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyArray_API[] = { -%s -}; -""" - -c_api_header = """ -=========== -NumPy C-API -=========== -""" - -def generate_api(output_dir, force=False): - basename = 'multiarray_api' - - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) - d_file = os.path.join(output_dir, '%s.txt' % basename) - targets = (h_file, c_file, d_file) - - sources = numpy_api.multiarray_api - - if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): - return targets - else: - do_generate_api(targets, sources) - - return targets - -def do_generate_api(targets, sources): - header_file = targets[0] - c_file = targets[1] - doc_file = targets[2] - - global_vars = sources[0] - scalar_bool_values = sources[1] - types_api = sources[2] - multiarray_funcs = sources[3] - - multiarray_api = sources[:] - - module_list = [] - extension_list = [] - init_list = [] - - # Check multiarray api indexes - multiarray_api_index = genapi.merge_api_dicts(multiarray_api) - genapi.check_api_dict(multiarray_api_index) - - numpyapi_list = genapi.get_api_functions('NUMPY_API', - multiarray_funcs) - - # FIXME: ordered_funcs_api is unused - ordered_funcs_api = genapi.order_dict(multiarray_funcs) - - # Create dict name -> *Api instance - api_name = 'PyArray_API' - multiarray_api_dict = {} - for f in numpyapi_list: - name = f.name - index = multiarray_funcs[name][0] - annotations = multiarray_funcs[name][1:] - multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, - f.return_type, - f.args, api_name) - - for name, val in global_vars.items(): - index, type = val - multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) - - for name, val in scalar_bool_values.items(): - index = val[0] - multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) - - for name, val in types_api.items(): - index = val[0] - multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) - - if len(multiarray_api_dict) != len(multiarray_api_index): - keys_dict = set(multiarray_api_dict.keys()) - keys_index = set(multiarray_api_index.keys()) - raise AssertionError( - "Multiarray API size mismatch - " - "index has extra keys {}, dict has extra keys {}" - .format(keys_index - keys_dict, keys_dict - keys_index) - ) - - extension_list = [] - for name, index in genapi.order_dict(multiarray_api_index): - api_item = multiarray_api_dict[name] - extension_list.append(api_item.define_from_array_api_string()) - init_list.append(api_item.array_api_define()) - module_list.append(api_item.internal_define()) - - # Write to header - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - genapi.write_file(header_file, s) - - # Write to c-code - s = c_template % ',\n'.join(init_list) - genapi.write_file(c_file, s) - - # write to documentation - s = c_api_header - for func in numpyapi_list: - s += func.to_ReST() - s += '\n\n' - genapi.write_file(doc_file, s) - - return targets diff --git a/venv/lib/python3.7/site-packages/numpy/core/getlimits.py b/venv/lib/python3.7/site-packages/numpy/core/getlimits.py deleted file mode 100644 index 31fa6b9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/getlimits.py +++ /dev/null @@ -1,548 +0,0 @@ -"""Machine limits for Float32 and Float64 and (long double) if available... - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['finfo', 'iinfo'] - -import warnings - -from .machar import MachAr -from .overrides import set_module -from . import numeric -from . import numerictypes as ntypes -from .numeric import array, inf -from .umath import log10, exp2 -from . import umath - - -def _fr0(a): - """fix rank-0 --> rank-1""" - if a.ndim == 0: - a = a.copy() - a.shape = (1,) - return a - - -def _fr1(a): - """fix rank > 0 --> rank-0""" - if a.size == 1: - a = a.copy() - a.shape = () - return a - -class MachArLike(object): - """ Object to simulate MachAr instance """ - - def __init__(self, - ftype, - **kwargs): - params = _MACHAR_PARAMS[ftype] - float_conv = lambda v: array([v], ftype) - float_to_float = lambda v : _fr1(float_conv(v)) - float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype)) - - self.title = params['title'] - # Parameter types same as for discovered MachAr object. - self.epsilon = self.eps = float_to_float(kwargs.pop('eps')) - self.epsneg = float_to_float(kwargs.pop('epsneg')) - self.xmax = self.huge = float_to_float(kwargs.pop('huge')) - self.xmin = self.tiny = float_to_float(kwargs.pop('tiny')) - self.ibeta = params['itype'](kwargs.pop('ibeta')) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = float_to_float(float_conv(10) ** (-self.precision)) - self._str_eps = float_to_str(self.eps) - self._str_epsneg = float_to_str(self.epsneg) - self._str_xmin = float_to_str(self.xmin) - self._str_xmax = float_to_str(self.xmax) - self._str_resolution = float_to_str(self.resolution) - -_convert_to_float = { - ntypes.csingle: ntypes.single, - ntypes.complex_: ntypes.float_, - ntypes.clongfloat: ntypes.longfloat - } - -# Parameters for creating MachAr / MachAr-like objects -_title_fmt = 'numpy {} precision floating point number' -_MACHAR_PARAMS = { - ntypes.double: dict( - itype = ntypes.int64, - fmt = '%24.16e', - title = _title_fmt.format('double')), - ntypes.single: dict( - itype = ntypes.int32, - fmt = '%15.7e', - title = _title_fmt.format('single')), - ntypes.longdouble: dict( - itype = ntypes.longlong, - fmt = '%s', - title = _title_fmt.format('long double')), - ntypes.half: dict( - itype = ntypes.int16, - fmt = '%12.5e', - title = _title_fmt.format('half'))} - -# Key to identify the floating point type. Key is result of -# ftype('-0.1').newbyteorder('<').tobytes() -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar -_float_ma = {} - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended number of - # digits in the significand. - huge_dd = (umath.nextafter(ld(inf), ld(0)) - if hasattr(umath, 'nextafter') # Missing on some platforms? - else float64_ma.huge) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg= exp2(ld(-106)), - huge=huge_dd, - tiny=exp2(ld(-1022))) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - key = ftype('-0.1').newbyteorder('<').tobytes() - ma_like = _KNOWN_TYPES.get(key) - # Could be 80 bit == 10 byte extended precision, where last bytes can be - # random garbage. Try comparing first 10 bytes to pattern. - if ma_like is None and ftype == ntypes.longdouble: - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - 'Signature {} for {} does not match any known type: ' - 'falling back to type probe function'.format(key, ftype), - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v:_fr0(v.astype(params['itype']))[0], - lambda v:array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) - - -@set_module('numpy') -class finfo(object): - """ - finfo(dtype) - - Machine limits for floating point types. - - Attributes - ---------- - bits : int - The number of bits occupied by the type. - eps : float - The smallest representable positive number such that - ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating - point type. - epsneg : floating point number of the appropriate type - The smallest representable positive number such that - ``1.0 - epsneg != 1.0``. - iexp : int - The number of bits in the exponent portion of the floating point - representation. - machar : MachAr - The object which calculated these parameters and holds more - detailed information. - machep : int - The exponent that yields `eps`. - max : floating point number of the appropriate type - The largest representable number. - maxexp : int - The smallest positive power of the base (2) that causes overflow. - min : floating point number of the appropriate type - The smallest representable number, typically ``-max``. - minexp : int - The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. - negep : int - The exponent that yields `epsneg`. - nexp : int - The number of bits in the exponent including its sign and bias. - nmant : int - The number of bits in the mantissa. - precision : int - The approximate number of decimal digits to which this kind of - float is precise. - resolution : floating point number of the appropriate type - The approximate decimal resolution of this type, i.e., - ``10**-precision``. - tiny : float - The smallest positive usable number. Type of `tiny` is an - appropriate floating point type. - - Parameters - ---------- - dtype : float, dtype, or instance - Kind of floating point data-type about which to get information. - - See Also - -------- - MachAr : The implementation of the tests that produce this information. - iinfo : The equivalent for integer data types. - - Notes - ----- - For developers of NumPy: do not instantiate this at the module level. - The initial calculation of these parameters is expensive and negatively - impacts import times. These objects are cached, so calling ``finfo()`` - repeatedly inside your functions is not a problem. - - """ - - _finfo_cache = {} - - def __new__(cls, dtype): - try: - dtype = numeric.dtype(dtype) - except TypeError: - # In case a float instance was given - dtype = numeric.dtype(type(dtype)) - - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - dtypes = [dtype] - newdtype = numeric.obj2sctype(dtype) - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - if not issubclass(dtype, numeric.inexact): - raise ValueError("data type %r not inexact" % (dtype)) - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - if not issubclass(dtype, numeric.floating): - newdtype = _convert_to_float[dtype] - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - obj = object.__new__(cls)._init(dtype) - for dt in dtypes: - cls._finfo_cache[dt] = obj - return obj - - def _init(self, dtype): - self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['tiny', 'resolution', 'epsneg']: - setattr(self, word, getattr(machar, word).flat[0]) - self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self.machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - return self - - def __str__(self): - fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - def __repr__(self): - c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - - -@set_module('numpy') -class iinfo(object): - """ - iinfo(type) - - Machine limits for integer types. - - Attributes - ---------- - bits : int - The number of bits occupied by the type. - min : int - The smallest integer expressible by the type. - max : int - The largest integer expressible by the type. - - Parameters - ---------- - int_type : integer type, dtype, or instance - The kind of integer data type to get information about. - - See Also - -------- - finfo : The equivalent for floating point data types. - - Examples - -------- - With types: - - >>> ii16 = np.iinfo(np.int16) - >>> ii16.min - -32768 - >>> ii16.max - 32767 - >>> ii32 = np.iinfo(np.int32) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - With instances: - - >>> ii32 = np.iinfo(np.int32(10)) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - """ - - _min_vals = {} - _max_vals = {} - - def __init__(self, int_type): - try: - self.dtype = numeric.dtype(int_type) - except TypeError: - self.dtype = numeric.dtype(type(int_type)) - self.kind = self.dtype.kind - self.bits = self.dtype.itemsize * 8 - self.key = "%s%d" % (self.kind, self.bits) - if self.kind not in 'iu': - raise ValueError("Invalid integer data type %r." % (self.kind,)) - - @property - def min(self): - """Minimum value of given dtype.""" - if self.kind == 'u': - return 0 - else: - try: - val = iinfo._min_vals[self.key] - except KeyError: - val = int(-(1 << (self.bits-1))) - iinfo._min_vals[self.key] = val - return val - - @property - def max(self): - """Maximum value of given dtype.""" - try: - val = iinfo._max_vals[self.key] - except KeyError: - if self.kind == 'u': - val = int((1 << self.bits) - 1) - else: - val = int((1 << (self.bits-1)) - 1) - iinfo._max_vals[self.key] = val - return val - - def __str__(self): - """String representation.""" - fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'min = %(min)s\n' - 'max = %(max)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} - - def __repr__(self): - return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, - self.min, self.max, self.dtype) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__multiarray_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__multiarray_api.h deleted file mode 100644 index 01de270..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,1554 +0,0 @@ - -#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ - (void); -extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArray_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; - -extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; - -extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; - -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; - -NPY_NO_EXPORT int PyArray_SetNumericOps \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \ - (void); -NPY_NO_EXPORT int PyArray_INCREF \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_XDECREF \ - (PyArrayObject *); -NPY_NO_EXPORT void PyArray_SetStringFunction \ - (PyObject *, int); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ - (int); -NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ - (int); -NPY_NO_EXPORT char * PyArray_Zero \ - (PyArrayObject *); -NPY_NO_EXPORT char * PyArray_One \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_CastToType \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT int PyArray_CastTo \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CastAnyTo \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CanCastSafely \ - (int, int); -NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_ObjectType \ - (PyObject *, int); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ - (PyObject *, PyArray_Descr *); -NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ - (PyObject *, int *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ - (PyObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ - (PyObject *); -NPY_NO_EXPORT npy_intp PyArray_Size \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Scalar \ - (void *, PyArray_Descr *, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ - (PyObject *, PyArray_Descr *); -NPY_NO_EXPORT void PyArray_ScalarAsCtype \ - (PyObject *, void *); -NPY_NO_EXPORT int PyArray_CastScalarToCtype \ - (PyObject *, void *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_CastScalarDirect \ - (PyObject *, PyArray_Descr *, void *, int); -NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \ - (PyObject *); -NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \ - (PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_FromDims \ - (int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type)); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \ - (int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data)); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ - (PyObject *, PyArray_Descr *, int, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ - (PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromFile \ - (FILE *, PyArray_Descr *, npy_intp, char *); -NPY_NO_EXPORT PyObject * PyArray_FromString \ - (char *, npy_intp, PyArray_Descr *, npy_intp, char *); -NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ - (PyObject *, PyArray_Descr *, npy_intp, npy_intp); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ - (PyObject *, PyArray_Descr *, npy_intp); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_GetField \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) int PyArray_SetField \ - (PyArrayObject *, PyArray_Descr *, int, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Byteswap \ - (PyArrayObject *, npy_bool); -NPY_NO_EXPORT PyObject * PyArray_Resize \ - (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order)); -NPY_NO_EXPORT int PyArray_MoveInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyAnyInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyObject \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_NewCopy \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_ToList \ - (PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_ToString \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT int PyArray_ToFile \ - (PyArrayObject *, FILE *, char *, char *); -NPY_NO_EXPORT int PyArray_Dump \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Dumps \ - (PyObject *, int); -NPY_NO_EXPORT int PyArray_ValidType \ - (int); -NPY_NO_EXPORT void PyArray_UpdateFlags \ - (PyArrayObject *, int); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_New \ - (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(1) NPY_GCC_NONNULL(2) PyObject * PyArray_NewFromDescr \ - (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ - (PyArray_Descr *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ - (int); -NPY_NO_EXPORT double PyArray_GetPriority \ - (PyObject *, double); -NPY_NO_EXPORT PyObject * PyArray_IterNew \ - (PyObject *); -NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \ - (int, ...); -NPY_NO_EXPORT int PyArray_PyIntAsInt \ - (PyObject *); -NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ - (PyObject *); -NPY_NO_EXPORT int PyArray_Broadcast \ - (PyArrayMultiIterObject *); -NPY_NO_EXPORT void PyArray_FillObjectArray \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT int PyArray_FillWithScalar \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ - (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ - (PyArray_Descr *, char); -NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ - (PyObject *, int *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ - (PyObject *, PyArray_Descr *, int, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_FromInterface \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ - (PyObject *, PyArray_Descr *, PyObject *); -NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ - (int, PyArrayObject **); -NPY_NO_EXPORT int PyArray_CanCoerceScalar \ - (int, int, NPY_SCALARKIND); -NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \ - (PyObject *); -NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ - (PyTypeObject *, PyTypeObject *); -NPY_NO_EXPORT int PyArray_CompareUCS4 \ - (npy_ucs4 *, npy_ucs4 *, size_t); -NPY_NO_EXPORT int PyArray_RemoveSmallest \ - (PyArrayMultiIterObject *); -NPY_NO_EXPORT int PyArray_ElementStrides \ - (PyObject *); -NPY_NO_EXPORT void PyArray_Item_INCREF \ - (char *, PyArray_Descr *); -NPY_NO_EXPORT void PyArray_Item_XDECREF \ - (char *, PyArray_Descr *); -NPY_NO_EXPORT PyObject * PyArray_FieldNames \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Transpose \ - (PyArrayObject *, PyArray_Dims *); -NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ - (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); -NPY_NO_EXPORT PyObject * PyArray_PutTo \ - (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); -NPY_NO_EXPORT PyObject * PyArray_PutMask \ - (PyArrayObject *, PyObject*, PyObject*); -NPY_NO_EXPORT PyObject * PyArray_Repeat \ - (PyArrayObject *, PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Choose \ - (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); -NPY_NO_EXPORT int PyArray_Sort \ - (PyArrayObject *, int, NPY_SORTKIND); -NPY_NO_EXPORT PyObject * PyArray_ArgSort \ - (PyArrayObject *, int, NPY_SORTKIND); -NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ - (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_ArgMax \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_ArgMin \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Reshape \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Newshape \ - (PyArrayObject *, PyArray_Dims *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_Squeeze \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ - (PyArrayObject *, PyArray_Descr *, PyTypeObject *); -NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ - (PyArrayObject *, int, int); -NPY_NO_EXPORT PyObject * PyArray_Max \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Min \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Ptp \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Mean \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Trace \ - (PyArrayObject *, int, int, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Diagonal \ - (PyArrayObject *, int, int, int); -NPY_NO_EXPORT PyObject * PyArray_Clip \ - (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Conjugate \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Nonzero \ - (PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Std \ - (PyArrayObject *, int, int, PyArrayObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Sum \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_CumSum \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Prod \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_CumProd \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_All \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Any \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Compress \ - (PyArrayObject *, PyObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Flatten \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_Ravel \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ - (npy_intp const *, int); -NPY_NO_EXPORT int PyArray_MultiplyIntList \ - (int const *, int); -NPY_NO_EXPORT void * PyArray_GetPtr \ - (PyArrayObject *, npy_intp const*); -NPY_NO_EXPORT int PyArray_CompareLists \ - (npy_intp const *, npy_intp const *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ - (PyObject **, void *, npy_intp *, int, PyArray_Descr*); -NPY_NO_EXPORT int PyArray_As1D \ - (PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode)); -NPY_NO_EXPORT int PyArray_As2D \ - (PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode)); -NPY_NO_EXPORT int PyArray_Free \ - (PyObject *, void *); -NPY_NO_EXPORT int PyArray_Converter \ - (PyObject *, PyObject **); -NPY_NO_EXPORT int PyArray_IntpFromSequence \ - (PyObject *, npy_intp *, int); -NPY_NO_EXPORT PyObject * PyArray_Concatenate \ - (PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ - (PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ - (PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Correlate \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT int PyArray_TypestrConvert \ - (int, int); -NPY_NO_EXPORT int PyArray_DescrConverter \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_DescrConverter2 \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_IntpConverter \ - (PyObject *, PyArray_Dims *); -NPY_NO_EXPORT int PyArray_BufferConverter \ - (PyObject *, PyArray_Chunk *); -NPY_NO_EXPORT int PyArray_AxisConverter \ - (PyObject *, int *); -NPY_NO_EXPORT int PyArray_BoolConverter \ - (PyObject *, npy_bool *); -NPY_NO_EXPORT int PyArray_ByteorderConverter \ - (PyObject *, char *); -NPY_NO_EXPORT int PyArray_OrderConverter \ - (PyObject *, NPY_ORDER *); -NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ - (int, npy_intp const *, PyArray_Descr *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ - (int, npy_intp const *, PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_Where \ - (PyObject *, PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Arange \ - (double, double, double, int); -NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ - (PyObject *, PyObject *, PyObject *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_SortkindConverter \ - (PyObject *, NPY_SORTKIND *); -NPY_NO_EXPORT PyObject * PyArray_LexSort \ - (PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Round \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ - (int, int); -NPY_NO_EXPORT int PyArray_RegisterDataType \ - (PyArray_Descr *); -NPY_NO_EXPORT int PyArray_RegisterCastFunc \ - (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); -NPY_NO_EXPORT int PyArray_RegisterCanCast \ - (PyArray_Descr *, int, NPY_SCALARKIND); -NPY_NO_EXPORT void PyArray_InitArrFuncs \ - (PyArray_ArrFuncs *); -NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ - (int, npy_intp *); -NPY_NO_EXPORT int PyArray_TypeNumFromName \ - (char *); -NPY_NO_EXPORT int PyArray_ClipmodeConverter \ - (PyObject *, NPY_CLIPMODE *); -NPY_NO_EXPORT int PyArray_OutputConverter \ - (PyObject *, PyArrayObject **); -NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ - (PyObject *, npy_intp *, int); -NPY_NO_EXPORT void _PyArray_SigintHandler \ - (int); -NPY_NO_EXPORT void* _PyArray_GetSigintBuf \ - (void); -NPY_NO_EXPORT int PyArray_DescrAlignConverter \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_SearchsideConverter \ - (PyObject *, void *); -NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ - (PyArrayObject *, int *, int); -NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ - (npy_intp *, int); -NPY_NO_EXPORT int PyArray_CompareString \ - (const char *, const char *, size_t); -NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \ - (PyObject **, int, int, ...); -NPY_NO_EXPORT int PyArray_GetEndianness \ - (void); -NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ - (void); -NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ - (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*); -extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; - -NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \ - (PyObject *NPY_UNUSED(op)); -NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \ - (npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *); -NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \ - (npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *); -NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \ - (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d)); -NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \ - (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d)); -NPY_NO_EXPORT NpyIter * NpyIter_New \ - (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); -NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ - (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); -NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ - (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); -NPY_NO_EXPORT NpyIter * NpyIter_Copy \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_Deallocate \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_Reset \ - (NpyIter *, char **); -NPY_NO_EXPORT int NpyIter_ResetBasePointers \ - (NpyIter *, char **, char **); -NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ - (NpyIter *, npy_intp, npy_intp, char **); -NPY_NO_EXPORT int NpyIter_GetNDim \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GetNOp \ - (NpyIter *); -NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ - (NpyIter *, char **); -NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ - (NpyIter *); -NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ - (NpyIter *, npy_intp *, npy_intp *); -NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GotoIterIndex \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GetShape \ - (NpyIter *, npy_intp *); -NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ - (NpyIter *, char **); -NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ - (NpyIter *, npy_intp const *); -NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ - (NpyIter *); -NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GotoIndex \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ - (NpyIter *); -NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ - (NpyIter *); -NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ - (NpyIter *); -NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT void NpyIter_GetReadFlags \ - (NpyIter *, char *); -NPY_NO_EXPORT void NpyIter_GetWriteFlags \ - (NpyIter *, char *); -NPY_NO_EXPORT void NpyIter_DebugPrint \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ - (NpyIter *); -NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ - (NpyIter *, npy_intp *); -NPY_NO_EXPORT int NpyIter_RemoveAxis \ - (NpyIter *, int); -NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ - (NpyIter *, int); -NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ - (NpyIter *); -NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ - (NpyIter *, npy_intp, npy_intp *); -NPY_NO_EXPORT int PyArray_CastingConverter \ - (PyObject *, NPY_CASTING *); -NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ - (PyArrayObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ - (PyArrayObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ - (npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **); -NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ - (PyArrayObject *, PyArray_Descr *, NPY_CASTING); -NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ - (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); -NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ - (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) NPY_GCC_NONNULL(1) PyObject * PyArray_NewLikeArray \ - (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); -NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \ - (PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *); -NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ - (PyObject *, NPY_CLIPMODE *, int); -NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ - (PyObject *, PyObject *, PyArrayObject*); -NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ - (NpyIter *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ - (int, npy_intp const *, npy_stride_sort_item *); -NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ - (PyArrayObject *, const npy_bool *); -NPY_NO_EXPORT void PyArray_DebugPrint \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ - (PyArrayObject *, const char *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT void * PyDataMem_NEW \ - (size_t); -NPY_NO_EXPORT void PyDataMem_FREE \ - (void *); -NPY_NO_EXPORT void * PyDataMem_RENEW \ - (void *, size_t); -NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \ - (PyDataMem_EventHookFunc *, void *, void **); -extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; - -NPY_NO_EXPORT void PyArray_MapIterSwapAxes \ - (PyArrayMapIterObject *, PyArrayObject **, int); -NPY_NO_EXPORT PyObject * PyArray_MapIterArray \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT void PyArray_MapIterNext \ - (PyArrayMapIterObject *); -NPY_NO_EXPORT int PyArray_Partition \ - (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); -NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ - (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); -NPY_NO_EXPORT int PyArray_SelectkindConverter \ - (PyObject *, NPY_SELECTKIND *); -NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ - (size_t, size_t); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) int PyArray_CheckAnyScalarExact \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap \ - (PyArrayObject *, PyObject *, int, PyArrayObject *); -NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ - (PyArrayObject *, PyArrayObject *); - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -#define PyArray_GetNDArrayCVersion \ - (*(unsigned int (*)(void)) \ - PyArray_API[0]) -#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) -#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) -#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) -#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) -#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) -#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) -#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) -#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) -#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) -#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) -#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) -#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) -#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) -#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) -#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) -#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) -#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) -#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) -#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) -#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) -#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) -#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) -#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) -#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) -#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) -#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) -#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) -#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) -#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) -#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) -#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) -#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) -#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) -#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) -#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) -#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) -#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) -#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) -#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) -#define PyArray_SetNumericOps \ - (*(int (*)(PyObject *)) \ - PyArray_API[40]) -#define PyArray_GetNumericOps \ - (*(PyObject * (*)(void)) \ - PyArray_API[41]) -#define PyArray_INCREF \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[42]) -#define PyArray_XDECREF \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[43]) -#define PyArray_SetStringFunction \ - (*(void (*)(PyObject *, int)) \ - PyArray_API[44]) -#define PyArray_DescrFromType \ - (*(PyArray_Descr * (*)(int)) \ - PyArray_API[45]) -#define PyArray_TypeObjectFromType \ - (*(PyObject * (*)(int)) \ - PyArray_API[46]) -#define PyArray_Zero \ - (*(char * (*)(PyArrayObject *)) \ - PyArray_API[47]) -#define PyArray_One \ - (*(char * (*)(PyArrayObject *)) \ - PyArray_API[48]) -#define PyArray_CastToType \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[49]) -#define PyArray_CastTo \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[50]) -#define PyArray_CastAnyTo \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[51]) -#define PyArray_CanCastSafely \ - (*(int (*)(int, int)) \ - PyArray_API[52]) -#define PyArray_CanCastTo \ - (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[53]) -#define PyArray_ObjectType \ - (*(int (*)(PyObject *, int)) \ - PyArray_API[54]) -#define PyArray_DescrFromObject \ - (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ - PyArray_API[55]) -#define PyArray_ConvertToCommonType \ - (*(PyArrayObject ** (*)(PyObject *, int *)) \ - PyArray_API[56]) -#define PyArray_DescrFromScalar \ - (*(PyArray_Descr * (*)(PyObject *)) \ - PyArray_API[57]) -#define PyArray_DescrFromTypeObject \ - (*(PyArray_Descr * (*)(PyObject *)) \ - PyArray_API[58]) -#define PyArray_Size \ - (*(npy_intp (*)(PyObject *)) \ - PyArray_API[59]) -#define PyArray_Scalar \ - (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ - PyArray_API[60]) -#define PyArray_FromScalar \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ - PyArray_API[61]) -#define PyArray_ScalarAsCtype \ - (*(void (*)(PyObject *, void *)) \ - PyArray_API[62]) -#define PyArray_CastScalarToCtype \ - (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ - PyArray_API[63]) -#define PyArray_CastScalarDirect \ - (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ - PyArray_API[64]) -#define PyArray_ScalarFromObject \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[65]) -#define PyArray_GetCastFunc \ - (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \ - PyArray_API[66]) -#define PyArray_FromDims \ - (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))) \ - PyArray_API[67]) -#define PyArray_FromDimsAndDataAndDescr \ - (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data))) \ - PyArray_API[68]) -#define PyArray_FromAny \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ - PyArray_API[69]) -#define PyArray_EnsureArray \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[70]) -#define PyArray_EnsureAnyArray \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[71]) -#define PyArray_FromFile \ - (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ - PyArray_API[72]) -#define PyArray_FromString \ - (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ - PyArray_API[73]) -#define PyArray_FromBuffer \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ - PyArray_API[74]) -#define PyArray_FromIter \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ - PyArray_API[75]) -#define PyArray_Return \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[76]) -#define PyArray_GetField \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[77]) -#define PyArray_SetField \ - (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ - PyArray_API[78]) -#define PyArray_Byteswap \ - (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ - PyArray_API[79]) -#define PyArray_Resize \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \ - PyArray_API[80]) -#define PyArray_MoveInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[81]) -#define PyArray_CopyInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[82]) -#define PyArray_CopyAnyInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[83]) -#define PyArray_CopyObject \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[84]) -#define PyArray_NewCopy \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[85]) -#define PyArray_ToList \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[86]) -#define PyArray_ToString \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[87]) -#define PyArray_ToFile \ - (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ - PyArray_API[88]) -#define PyArray_Dump \ - (*(int (*)(PyObject *, PyObject *, int)) \ - PyArray_API[89]) -#define PyArray_Dumps \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[90]) -#define PyArray_ValidType \ - (*(int (*)(int)) \ - PyArray_API[91]) -#define PyArray_UpdateFlags \ - (*(void (*)(PyArrayObject *, int)) \ - PyArray_API[92]) -#define PyArray_New \ - (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \ - PyArray_API[93]) -#define PyArray_NewFromDescr \ - (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \ - PyArray_API[94]) -#define PyArray_DescrNew \ - (*(PyArray_Descr * (*)(PyArray_Descr *)) \ - PyArray_API[95]) -#define PyArray_DescrNewFromType \ - (*(PyArray_Descr * (*)(int)) \ - PyArray_API[96]) -#define PyArray_GetPriority \ - (*(double (*)(PyObject *, double)) \ - PyArray_API[97]) -#define PyArray_IterNew \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[98]) -#define PyArray_MultiIterNew \ - (*(PyObject* (*)(int, ...)) \ - PyArray_API[99]) -#define PyArray_PyIntAsInt \ - (*(int (*)(PyObject *)) \ - PyArray_API[100]) -#define PyArray_PyIntAsIntp \ - (*(npy_intp (*)(PyObject *)) \ - PyArray_API[101]) -#define PyArray_Broadcast \ - (*(int (*)(PyArrayMultiIterObject *)) \ - PyArray_API[102]) -#define PyArray_FillObjectArray \ - (*(void (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[103]) -#define PyArray_FillWithScalar \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[104]) -#define PyArray_CheckStrides \ - (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)) \ - PyArray_API[105]) -#define PyArray_DescrNewByteorder \ - (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ - PyArray_API[106]) -#define PyArray_IterAllButAxis \ - (*(PyObject * (*)(PyObject *, int *)) \ - PyArray_API[107]) -#define PyArray_CheckFromAny \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ - PyArray_API[108]) -#define PyArray_FromArray \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[109]) -#define PyArray_FromInterface \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[110]) -#define PyArray_FromStructInterface \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[111]) -#define PyArray_FromArrayAttr \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ - PyArray_API[112]) -#define PyArray_ScalarKind \ - (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ - PyArray_API[113]) -#define PyArray_CanCoerceScalar \ - (*(int (*)(int, int, NPY_SCALARKIND)) \ - PyArray_API[114]) -#define PyArray_NewFlagsObject \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[115]) -#define PyArray_CanCastScalar \ - (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ - PyArray_API[116]) -#define PyArray_CompareUCS4 \ - (*(int (*)(npy_ucs4 *, npy_ucs4 *, size_t)) \ - PyArray_API[117]) -#define PyArray_RemoveSmallest \ - (*(int (*)(PyArrayMultiIterObject *)) \ - PyArray_API[118]) -#define PyArray_ElementStrides \ - (*(int (*)(PyObject *)) \ - PyArray_API[119]) -#define PyArray_Item_INCREF \ - (*(void (*)(char *, PyArray_Descr *)) \ - PyArray_API[120]) -#define PyArray_Item_XDECREF \ - (*(void (*)(char *, PyArray_Descr *)) \ - PyArray_API[121]) -#define PyArray_FieldNames \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[122]) -#define PyArray_Transpose \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ - PyArray_API[123]) -#define PyArray_TakeFrom \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ - PyArray_API[124]) -#define PyArray_PutTo \ - (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ - PyArray_API[125]) -#define PyArray_PutMask \ - (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ - PyArray_API[126]) -#define PyArray_Repeat \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ - PyArray_API[127]) -#define PyArray_Choose \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ - PyArray_API[128]) -#define PyArray_Sort \ - (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ - PyArray_API[129]) -#define PyArray_ArgSort \ - (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ - PyArray_API[130]) -#define PyArray_SearchSorted \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ - PyArray_API[131]) -#define PyArray_ArgMax \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[132]) -#define PyArray_ArgMin \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[133]) -#define PyArray_Reshape \ - (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[134]) -#define PyArray_Newshape \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ - PyArray_API[135]) -#define PyArray_Squeeze \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[136]) -#define PyArray_View \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ - PyArray_API[137]) -#define PyArray_SwapAxes \ - (*(PyObject * (*)(PyArrayObject *, int, int)) \ - PyArray_API[138]) -#define PyArray_Max \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[139]) -#define PyArray_Min \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[140]) -#define PyArray_Ptp \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[141]) -#define PyArray_Mean \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[142]) -#define PyArray_Trace \ - (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ - PyArray_API[143]) -#define PyArray_Diagonal \ - (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ - PyArray_API[144]) -#define PyArray_Clip \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ - PyArray_API[145]) -#define PyArray_Conjugate \ - (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[146]) -#define PyArray_Nonzero \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[147]) -#define PyArray_Std \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ - PyArray_API[148]) -#define PyArray_Sum \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[149]) -#define PyArray_CumSum \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[150]) -#define PyArray_Prod \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[151]) -#define PyArray_CumProd \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[152]) -#define PyArray_All \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[153]) -#define PyArray_Any \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[154]) -#define PyArray_Compress \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ - PyArray_API[155]) -#define PyArray_Flatten \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[156]) -#define PyArray_Ravel \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[157]) -#define PyArray_MultiplyList \ - (*(npy_intp (*)(npy_intp const *, int)) \ - PyArray_API[158]) -#define PyArray_MultiplyIntList \ - (*(int (*)(int const *, int)) \ - PyArray_API[159]) -#define PyArray_GetPtr \ - (*(void * (*)(PyArrayObject *, npy_intp const*)) \ - PyArray_API[160]) -#define PyArray_CompareLists \ - (*(int (*)(npy_intp const *, npy_intp const *, int)) \ - PyArray_API[161]) -#define PyArray_AsCArray \ - (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ - PyArray_API[162]) -#define PyArray_As1D \ - (*(int (*)(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))) \ - PyArray_API[163]) -#define PyArray_As2D \ - (*(int (*)(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))) \ - PyArray_API[164]) -#define PyArray_Free \ - (*(int (*)(PyObject *, void *)) \ - PyArray_API[165]) -#define PyArray_Converter \ - (*(int (*)(PyObject *, PyObject **)) \ - PyArray_API[166]) -#define PyArray_IntpFromSequence \ - (*(int (*)(PyObject *, npy_intp *, int)) \ - PyArray_API[167]) -#define PyArray_Concatenate \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[168]) -#define PyArray_InnerProduct \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyArray_API[169]) -#define PyArray_MatrixProduct \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyArray_API[170]) -#define PyArray_CopyAndTranspose \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[171]) -#define PyArray_Correlate \ - (*(PyObject * (*)(PyObject *, PyObject *, int)) \ - PyArray_API[172]) -#define PyArray_TypestrConvert \ - (*(int (*)(int, int)) \ - PyArray_API[173]) -#define PyArray_DescrConverter \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[174]) -#define PyArray_DescrConverter2 \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[175]) -#define PyArray_IntpConverter \ - (*(int (*)(PyObject *, PyArray_Dims *)) \ - PyArray_API[176]) -#define PyArray_BufferConverter \ - (*(int (*)(PyObject *, PyArray_Chunk *)) \ - PyArray_API[177]) -#define PyArray_AxisConverter \ - (*(int (*)(PyObject *, int *)) \ - PyArray_API[178]) -#define PyArray_BoolConverter \ - (*(int (*)(PyObject *, npy_bool *)) \ - PyArray_API[179]) -#define PyArray_ByteorderConverter \ - (*(int (*)(PyObject *, char *)) \ - PyArray_API[180]) -#define PyArray_OrderConverter \ - (*(int (*)(PyObject *, NPY_ORDER *)) \ - PyArray_API[181]) -#define PyArray_EquivTypes \ - (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[182]) -#define PyArray_Zeros \ - (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ - PyArray_API[183]) -#define PyArray_Empty \ - (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ - PyArray_API[184]) -#define PyArray_Where \ - (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ - PyArray_API[185]) -#define PyArray_Arange \ - (*(PyObject * (*)(double, double, double, int)) \ - PyArray_API[186]) -#define PyArray_ArangeObj \ - (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ - PyArray_API[187]) -#define PyArray_SortkindConverter \ - (*(int (*)(PyObject *, NPY_SORTKIND *)) \ - PyArray_API[188]) -#define PyArray_LexSort \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[189]) -#define PyArray_Round \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[190]) -#define PyArray_EquivTypenums \ - (*(unsigned char (*)(int, int)) \ - PyArray_API[191]) -#define PyArray_RegisterDataType \ - (*(int (*)(PyArray_Descr *)) \ - PyArray_API[192]) -#define PyArray_RegisterCastFunc \ - (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ - PyArray_API[193]) -#define PyArray_RegisterCanCast \ - (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ - PyArray_API[194]) -#define PyArray_InitArrFuncs \ - (*(void (*)(PyArray_ArrFuncs *)) \ - PyArray_API[195]) -#define PyArray_IntTupleFromIntp \ - (*(PyObject * (*)(int, npy_intp *)) \ - PyArray_API[196]) -#define PyArray_TypeNumFromName \ - (*(int (*)(char *)) \ - PyArray_API[197]) -#define PyArray_ClipmodeConverter \ - (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ - PyArray_API[198]) -#define PyArray_OutputConverter \ - (*(int (*)(PyObject *, PyArrayObject **)) \ - PyArray_API[199]) -#define PyArray_BroadcastToShape \ - (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ - PyArray_API[200]) -#define _PyArray_SigintHandler \ - (*(void (*)(int)) \ - PyArray_API[201]) -#define _PyArray_GetSigintBuf \ - (*(void* (*)(void)) \ - PyArray_API[202]) -#define PyArray_DescrAlignConverter \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[203]) -#define PyArray_DescrAlignConverter2 \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[204]) -#define PyArray_SearchsideConverter \ - (*(int (*)(PyObject *, void *)) \ - PyArray_API[205]) -#define PyArray_CheckAxis \ - (*(PyObject * (*)(PyArrayObject *, int *, int)) \ - PyArray_API[206]) -#define PyArray_OverflowMultiplyList \ - (*(npy_intp (*)(npy_intp *, int)) \ - PyArray_API[207]) -#define PyArray_CompareString \ - (*(int (*)(const char *, const char *, size_t)) \ - PyArray_API[208]) -#define PyArray_MultiIterFromObjects \ - (*(PyObject* (*)(PyObject **, int, int, ...)) \ - PyArray_API[209]) -#define PyArray_GetEndianness \ - (*(int (*)(void)) \ - PyArray_API[210]) -#define PyArray_GetNDArrayCFeatureVersion \ - (*(unsigned int (*)(void)) \ - PyArray_API[211]) -#define PyArray_Correlate2 \ - (*(PyObject * (*)(PyObject *, PyObject *, int)) \ - PyArray_API[212]) -#define PyArray_NeighborhoodIterNew \ - (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \ - PyArray_API[213]) -#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) -#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) -#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) -#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) -#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) -#define PyArray_SetDatetimeParseFunction \ - (*(void (*)(PyObject *NPY_UNUSED(op))) \ - PyArray_API[219]) -#define PyArray_DatetimeToDatetimeStruct \ - (*(void (*)(npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *)) \ - PyArray_API[220]) -#define PyArray_TimedeltaToTimedeltaStruct \ - (*(void (*)(npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *)) \ - PyArray_API[221]) -#define PyArray_DatetimeStructToDatetime \ - (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))) \ - PyArray_API[222]) -#define PyArray_TimedeltaStructToTimedelta \ - (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))) \ - PyArray_API[223]) -#define NpyIter_New \ - (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ - PyArray_API[224]) -#define NpyIter_MultiNew \ - (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ - PyArray_API[225]) -#define NpyIter_AdvancedNew \ - (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ - PyArray_API[226]) -#define NpyIter_Copy \ - (*(NpyIter * (*)(NpyIter *)) \ - PyArray_API[227]) -#define NpyIter_Deallocate \ - (*(int (*)(NpyIter *)) \ - PyArray_API[228]) -#define NpyIter_HasDelayedBufAlloc \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[229]) -#define NpyIter_HasExternalLoop \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[230]) -#define NpyIter_EnableExternalLoop \ - (*(int (*)(NpyIter *)) \ - PyArray_API[231]) -#define NpyIter_GetInnerStrideArray \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[232]) -#define NpyIter_GetInnerLoopSizePtr \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[233]) -#define NpyIter_Reset \ - (*(int (*)(NpyIter *, char **)) \ - PyArray_API[234]) -#define NpyIter_ResetBasePointers \ - (*(int (*)(NpyIter *, char **, char **)) \ - PyArray_API[235]) -#define NpyIter_ResetToIterIndexRange \ - (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ - PyArray_API[236]) -#define NpyIter_GetNDim \ - (*(int (*)(NpyIter *)) \ - PyArray_API[237]) -#define NpyIter_GetNOp \ - (*(int (*)(NpyIter *)) \ - PyArray_API[238]) -#define NpyIter_GetIterNext \ - (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ - PyArray_API[239]) -#define NpyIter_GetIterSize \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[240]) -#define NpyIter_GetIterIndexRange \ - (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ - PyArray_API[241]) -#define NpyIter_GetIterIndex \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[242]) -#define NpyIter_GotoIterIndex \ - (*(int (*)(NpyIter *, npy_intp)) \ - PyArray_API[243]) -#define NpyIter_HasMultiIndex \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[244]) -#define NpyIter_GetShape \ - (*(int (*)(NpyIter *, npy_intp *)) \ - PyArray_API[245]) -#define NpyIter_GetGetMultiIndex \ - (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ - PyArray_API[246]) -#define NpyIter_GotoMultiIndex \ - (*(int (*)(NpyIter *, npy_intp const *)) \ - PyArray_API[247]) -#define NpyIter_RemoveMultiIndex \ - (*(int (*)(NpyIter *)) \ - PyArray_API[248]) -#define NpyIter_HasIndex \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[249]) -#define NpyIter_IsBuffered \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[250]) -#define NpyIter_IsGrowInner \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[251]) -#define NpyIter_GetBufferSize \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[252]) -#define NpyIter_GetIndexPtr \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[253]) -#define NpyIter_GotoIndex \ - (*(int (*)(NpyIter *, npy_intp)) \ - PyArray_API[254]) -#define NpyIter_GetDataPtrArray \ - (*(char ** (*)(NpyIter *)) \ - PyArray_API[255]) -#define NpyIter_GetDescrArray \ - (*(PyArray_Descr ** (*)(NpyIter *)) \ - PyArray_API[256]) -#define NpyIter_GetOperandArray \ - (*(PyArrayObject ** (*)(NpyIter *)) \ - PyArray_API[257]) -#define NpyIter_GetIterView \ - (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ - PyArray_API[258]) -#define NpyIter_GetReadFlags \ - (*(void (*)(NpyIter *, char *)) \ - PyArray_API[259]) -#define NpyIter_GetWriteFlags \ - (*(void (*)(NpyIter *, char *)) \ - PyArray_API[260]) -#define NpyIter_DebugPrint \ - (*(void (*)(NpyIter *)) \ - PyArray_API[261]) -#define NpyIter_IterationNeedsAPI \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[262]) -#define NpyIter_GetInnerFixedStrideArray \ - (*(void (*)(NpyIter *, npy_intp *)) \ - PyArray_API[263]) -#define NpyIter_RemoveAxis \ - (*(int (*)(NpyIter *, int)) \ - PyArray_API[264]) -#define NpyIter_GetAxisStrideArray \ - (*(npy_intp * (*)(NpyIter *, int)) \ - PyArray_API[265]) -#define NpyIter_RequiresBuffering \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[266]) -#define NpyIter_GetInitialDataPtrArray \ - (*(char ** (*)(NpyIter *)) \ - PyArray_API[267]) -#define NpyIter_CreateCompatibleStrides \ - (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ - PyArray_API[268]) -#define PyArray_CastingConverter \ - (*(int (*)(PyObject *, NPY_CASTING *)) \ - PyArray_API[269]) -#define PyArray_CountNonzero \ - (*(npy_intp (*)(PyArrayObject *)) \ - PyArray_API[270]) -#define PyArray_PromoteTypes \ - (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[271]) -#define PyArray_MinScalarType \ - (*(PyArray_Descr * (*)(PyArrayObject *)) \ - PyArray_API[272]) -#define PyArray_ResultType \ - (*(PyArray_Descr * (*)(npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **)) \ - PyArray_API[273]) -#define PyArray_CanCastArrayTo \ - (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ - PyArray_API[274]) -#define PyArray_CanCastTypeTo \ - (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ - PyArray_API[275]) -#define PyArray_EinsteinSum \ - (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ - PyArray_API[276]) -#define PyArray_NewLikeArray \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ - PyArray_API[277]) -#define PyArray_GetArrayParamsFromObject \ - (*(int (*)(PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *)) \ - PyArray_API[278]) -#define PyArray_ConvertClipmodeSequence \ - (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ - PyArray_API[279]) -#define PyArray_MatrixProduct2 \ - (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ - PyArray_API[280]) -#define NpyIter_IsFirstVisit \ - (*(npy_bool (*)(NpyIter *, int)) \ - PyArray_API[281]) -#define PyArray_SetBaseObject \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[282]) -#define PyArray_CreateSortedStridePerm \ - (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \ - PyArray_API[283]) -#define PyArray_RemoveAxesInPlace \ - (*(void (*)(PyArrayObject *, const npy_bool *)) \ - PyArray_API[284]) -#define PyArray_DebugPrint \ - (*(void (*)(PyArrayObject *)) \ - PyArray_API[285]) -#define PyArray_FailUnlessWriteable \ - (*(int (*)(PyArrayObject *, const char *)) \ - PyArray_API[286]) -#define PyArray_SetUpdateIfCopyBase \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[287]) -#define PyDataMem_NEW \ - (*(void * (*)(size_t)) \ - PyArray_API[288]) -#define PyDataMem_FREE \ - (*(void (*)(void *)) \ - PyArray_API[289]) -#define PyDataMem_RENEW \ - (*(void * (*)(void *, size_t)) \ - PyArray_API[290]) -#define PyDataMem_SetEventHook \ - (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \ - PyArray_API[291]) -#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) -#define PyArray_MapIterSwapAxes \ - (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \ - PyArray_API[293]) -#define PyArray_MapIterArray \ - (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[294]) -#define PyArray_MapIterNext \ - (*(void (*)(PyArrayMapIterObject *)) \ - PyArray_API[295]) -#define PyArray_Partition \ - (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ - PyArray_API[296]) -#define PyArray_ArgPartition \ - (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ - PyArray_API[297]) -#define PyArray_SelectkindConverter \ - (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ - PyArray_API[298]) -#define PyDataMem_NEW_ZEROED \ - (*(void * (*)(size_t, size_t)) \ - PyArray_API[299]) -#define PyArray_CheckAnyScalarExact \ - (*(int (*)(PyObject *)) \ - PyArray_API[300]) -#define PyArray_MapIterArrayCopyIfOverlap \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ - PyArray_API[301]) -#define PyArray_ResolveWritebackIfCopy \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[302]) -#define PyArray_SetWritebackIfCopyBase \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[303]) - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - int st; - PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyArray_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); - return -1; - } - - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version 0x%x but this version of numpy is 0x%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version 0x%x but this version of numpy is 0x%x", \ - (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); - return -1; - } - - /* - * Perform runtime check of endianness and check it matches the one set by - * the headers (npy_endian.h) as a safeguard - */ - st = PyArray_GetEndianness(); - if (st == NPY_CPU_UNKNOWN_ENDIAN) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); - return -1; - } -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN - if (st != NPY_CPU_BIG) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "big endian, but detected different endianness at runtime"); - return -1; - } -#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - if (st != NPY_CPU_LITTLE) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "little endian, but detected different endianness at runtime"); - return -1; - } -#endif - - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_ARRAY_RETVAL NULL -#else -#define NUMPY_IMPORT_ARRAY_RETVAL -#endif - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__ufunc_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__ufunc_api.h deleted file mode 100644 index d9385a6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__ufunc_api.h +++ /dev/null @@ -1,326 +0,0 @@ - -#ifdef _UMATHMODULE - -extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; - -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ - (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *); -NPY_NO_EXPORT int PyUFunc_GenericFunction \ - (PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **); -NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_GetPyValues \ - (char *, int *, int *, PyObject **); -NPY_NO_EXPORT int PyUFunc_checkfperr \ - (int, PyObject *, int *); -NPY_NO_EXPORT void PyUFunc_clearfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_getfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_handlefperr \ - (int, PyObject *, int, int *); -NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ - (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *); -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *); -NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ - (void **, size_t); -NPY_NO_EXPORT void PyUFunc_e_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_ValidateCasting \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ - (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *); - -#else - -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; -#else -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; -#else -static void **PyUFunc_API=NULL; -#endif -#endif - -#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) -#define PyUFunc_FromFuncAndData \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \ - PyUFunc_API[1]) -#define PyUFunc_RegisterLoopForType \ - (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \ - PyUFunc_API[2]) -#define PyUFunc_GenericFunction \ - (*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \ - PyUFunc_API[3]) -#define PyUFunc_f_f_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[4]) -#define PyUFunc_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[5]) -#define PyUFunc_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[6]) -#define PyUFunc_g_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[7]) -#define PyUFunc_F_F_As_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[8]) -#define PyUFunc_F_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[9]) -#define PyUFunc_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[10]) -#define PyUFunc_G_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[11]) -#define PyUFunc_O_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[12]) -#define PyUFunc_ff_f_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[13]) -#define PyUFunc_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[14]) -#define PyUFunc_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[15]) -#define PyUFunc_gg_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[16]) -#define PyUFunc_FF_F_As_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[17]) -#define PyUFunc_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[18]) -#define PyUFunc_FF_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[19]) -#define PyUFunc_GG_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[20]) -#define PyUFunc_OO_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[21]) -#define PyUFunc_O_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[22]) -#define PyUFunc_OO_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[23]) -#define PyUFunc_On_Om \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[24]) -#define PyUFunc_GetPyValues \ - (*(int (*)(char *, int *, int *, PyObject **)) \ - PyUFunc_API[25]) -#define PyUFunc_checkfperr \ - (*(int (*)(int, PyObject *, int *)) \ - PyUFunc_API[26]) -#define PyUFunc_clearfperr \ - (*(void (*)(void)) \ - PyUFunc_API[27]) -#define PyUFunc_getfperr \ - (*(int (*)(void)) \ - PyUFunc_API[28]) -#define PyUFunc_handlefperr \ - (*(int (*)(int, PyObject *, int, int *)) \ - PyUFunc_API[29]) -#define PyUFunc_ReplaceLoopBySignature \ - (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \ - PyUFunc_API[30]) -#define PyUFunc_FromFuncAndDataAndSignature \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \ - PyUFunc_API[31]) -#define PyUFunc_SetUsesArraysAsData \ - (*(int (*)(void **, size_t)) \ - PyUFunc_API[32]) -#define PyUFunc_e_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[33]) -#define PyUFunc_e_e_As_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[34]) -#define PyUFunc_e_e_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[35]) -#define PyUFunc_ee_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[36]) -#define PyUFunc_ee_e_As_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[37]) -#define PyUFunc_ee_e_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[38]) -#define PyUFunc_DefaultTypeResolver \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ - PyUFunc_API[39]) -#define PyUFunc_ValidateCasting \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ - PyUFunc_API[40]) -#define PyUFunc_RegisterLoopForDescr \ - (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ - PyUFunc_API[41]) -#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \ - PyUFunc_API[42]) - -static NPY_INLINE int -_import_umath(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, - "numpy.core._multiarray_umath failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyUFunc_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); - return -1; - } - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_UMATH_RETVAL NULL -#else -#define NUMPY_IMPORT_UMATH_RETVAL -#endif - -#define import_umath() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return NUMPY_IMPORT_UMATH_RETVAL;\ - }\ - } while(0) - -#define import_umath1(ret) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return ret;\ - }\ - } while(0) - -#define import_umath2(ret, msg) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError, msg);\ - return ret;\ - }\ - } while(0) - -#define import_ufunc() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - }\ - } while(0) - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h deleted file mode 100644 index e8860cb..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP -#error You should not include this header directly -#endif -/* - * Private API (here for inline) - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); - -/* - * Update to next item of the iterator - * - * Note: this simply increment the coordinates vector, last dimension - * incremented first , i.e, for dimension 3 - * ... - * -1, -1, -1 - * -1, -1, 0 - * -1, -1, 1 - * .... - * -1, 0, -1 - * -1, 0, 0 - * .... - * 0, -1, -1 - * 0, -1, 0 - * .... - */ -#define _UPDATE_COORD_ITER(c) \ - wb = iter->coordinates[c] < iter->bounds[c][1]; \ - if (wb) { \ - iter->coordinates[c] += 1; \ - return 0; \ - } \ - else { \ - iter->coordinates[c] = iter->bounds[c][0]; \ - } - -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i, wb; - - for (i = iter->nd - 1; i >= 0; --i) { - _UPDATE_COORD_ITER(i) - } - - return 0; -} - -/* - * Version optimized for 2d arrays, manual loop unrolling - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp wb; - - _UPDATE_COORD_ITER(1) - _UPDATE_COORD_ITER(0) - - return 0; -} -#undef _UPDATE_COORD_ITER - -/* - * Advance to the next neighbour - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) -{ - _PyArrayNeighborhoodIter_IncrCoord (iter); - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} - -/* - * Reset functions - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i; - - for (i = 0; i < iter->nd; ++i) { - iter->coordinates[i] = iter->bounds[i][0]; - } - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_numpyconfig.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_numpyconfig.h deleted file mode 100644 index edb7e37..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_numpyconfig.h +++ /dev/null @@ -1,32 +0,0 @@ -#define NPY_HAVE_ENDIAN_H 1 -#define NPY_SIZEOF_SHORT SIZEOF_SHORT -#define NPY_SIZEOF_INT SIZEOF_INT -#define NPY_SIZEOF_LONG SIZEOF_LONG -#define NPY_SIZEOF_FLOAT 4 -#define NPY_SIZEOF_COMPLEX_FLOAT 8 -#define NPY_SIZEOF_DOUBLE 8 -#define NPY_SIZEOF_COMPLEX_DOUBLE 16 -#define NPY_SIZEOF_LONGDOUBLE 16 -#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 -#define NPY_SIZEOF_PY_INTPTR_T 8 -#define NPY_SIZEOF_OFF_T 8 -#define NPY_SIZEOF_PY_LONG_LONG 8 -#define NPY_SIZEOF_LONGLONG 8 -#define NPY_NO_SMP 0 -#define NPY_HAVE_DECL_ISNAN -#define NPY_HAVE_DECL_ISINF -#define NPY_HAVE_DECL_ISFINITE -#define NPY_HAVE_DECL_SIGNBIT -#define NPY_USE_C99_COMPLEX 1 -#define NPY_HAVE_COMPLEX_DOUBLE 1 -#define NPY_HAVE_COMPLEX_FLOAT 1 -#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 -#define NPY_RELAXED_STRIDES_CHECKING 1 -#define NPY_USE_C99_FORMATS 1 -#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) -#define NPY_ABI_VERSION 0x01000009 -#define NPY_API_VERSION 0x0000000D - -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS 1 -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h deleted file mode 100644 index 4f46d6b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef Py_ARRAYOBJECT_H -#define Py_ARRAYOBJECT_H - -#include "ndarrayobject.h" -#include "npy_interrupt.h" - -#ifdef NPY_NO_PREFIX -#include "noprefix.h" -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayscalars.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayscalars.h deleted file mode 100644 index 64450e7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayscalars.h +++ /dev/null @@ -1,175 +0,0 @@ -#ifndef _NPY_ARRAYSCALARS_H_ -#define _NPY_ARRAYSCALARS_H_ - -#ifndef _MULTIARRAYMODULE -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; -#endif - - -typedef struct { - PyObject_HEAD - signed char obval; -} PyByteScalarObject; - - -typedef struct { - PyObject_HEAD - short obval; -} PyShortScalarObject; - - -typedef struct { - PyObject_HEAD - int obval; -} PyIntScalarObject; - - -typedef struct { - PyObject_HEAD - long obval; -} PyLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longlong obval; -} PyLongLongScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned char obval; -} PyUByteScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned short obval; -} PyUShortScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned int obval; -} PyUIntScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned long obval; -} PyULongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_ulonglong obval; -} PyULongLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_half obval; -} PyHalfScalarObject; - - -typedef struct { - PyObject_HEAD - float obval; -} PyFloatScalarObject; - - -typedef struct { - PyObject_HEAD - double obval; -} PyDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longdouble obval; -} PyLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cfloat obval; -} PyCFloatScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cdouble obval; -} PyCDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_clongdouble obval; -} PyCLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - PyObject * obval; -} PyObjectScalarObject; - -typedef struct { - PyObject_HEAD - npy_datetime obval; - PyArray_DatetimeMetaData obmeta; -} PyDatetimeScalarObject; - -typedef struct { - PyObject_HEAD - npy_timedelta obval; - PyArray_DatetimeMetaData obmeta; -} PyTimedeltaScalarObject; - - -typedef struct { - PyObject_HEAD - char obval; -} PyScalarObject; - -#define PyStringScalarObject PyStringObject -#define PyUnicodeScalarObject PyUnicodeObject - -typedef struct { - PyObject_VAR_HEAD - char *obval; - PyArray_Descr *descr; - int flags; - PyObject *base; -} PyVoidScalarObject; - -/* Macros - PyScalarObject - PyArrType_Type - are defined in ndarrayobject.h -*/ - -#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) -#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) -#define PyArrayScalar_FromLong(i) \ - ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) -#define PyArrayScalar_RETURN_FALSE \ - return Py_INCREF(PyArrayScalar_False), \ - PyArrayScalar_False -#define PyArrayScalar_RETURN_TRUE \ - return Py_INCREF(PyArrayScalar_True), \ - PyArrayScalar_True - -#define PyArrayScalar_New(cls) \ - Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) -#define PyArrayScalar_VAL(obj, cls) \ - ((Py##cls##ScalarObject *)obj)->obval -#define PyArrayScalar_ASSIGN(obj, cls, val) \ - PyArrayScalar_VAL(obj, cls) = val - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/halffloat.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/halffloat.h deleted file mode 100644 index ab0d221..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/halffloat.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef __NPY_HALFFLOAT_H__ -#define __NPY_HALFFLOAT_H__ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Half-precision routines - */ - -/* Conversions */ -float npy_half_to_float(npy_half h); -double npy_half_to_double(npy_half h); -npy_half npy_float_to_half(float f); -npy_half npy_double_to_half(double d); -/* Comparisons */ -int npy_half_eq(npy_half h1, npy_half h2); -int npy_half_ne(npy_half h1, npy_half h2); -int npy_half_le(npy_half h1, npy_half h2); -int npy_half_lt(npy_half h1, npy_half h2); -int npy_half_ge(npy_half h1, npy_half h2); -int npy_half_gt(npy_half h1, npy_half h2); -/* faster *_nonan variants for when you know h1 and h2 are not NaN */ -int npy_half_eq_nonan(npy_half h1, npy_half h2); -int npy_half_lt_nonan(npy_half h1, npy_half h2); -int npy_half_le_nonan(npy_half h1, npy_half h2); -/* Miscellaneous functions */ -int npy_half_iszero(npy_half h); -int npy_half_isnan(npy_half h); -int npy_half_isinf(npy_half h); -int npy_half_isfinite(npy_half h); -int npy_half_signbit(npy_half h); -npy_half npy_half_copysign(npy_half x, npy_half y); -npy_half npy_half_spacing(npy_half h); -npy_half npy_half_nextafter(npy_half x, npy_half y); -npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); - -/* - * Half-precision constants - */ - -#define NPY_HALF_ZERO (0x0000u) -#define NPY_HALF_PZERO (0x0000u) -#define NPY_HALF_NZERO (0x8000u) -#define NPY_HALF_ONE (0x3c00u) -#define NPY_HALF_NEGONE (0xbc00u) -#define NPY_HALF_PINF (0x7c00u) -#define NPY_HALF_NINF (0xfc00u) -#define NPY_HALF_NAN (0x7e00u) - -#define NPY_MAX_HALF (0x7bffu) - -/* - * Bit-level conversions - */ - -npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); -npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); -npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); -npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/multiarray_api.txt b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/multiarray_api.txt deleted file mode 100644 index 7e06386..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/multiarray_api.txt +++ /dev/null @@ -1,2506 +0,0 @@ - -=========== -NumPy C-API -=========== -:: - - unsigned int - PyArray_GetNDArrayCVersion(void ) - - -Included at the very first so not auto-grabbed and thus not labeled. - -:: - - int - PyArray_SetNumericOps(PyObject *dict) - -Set internal structure with number functions that all arrays will use - -:: - - PyObject * - PyArray_GetNumericOps(void ) - -Get dictionary showing number functions that all arrays will use - -:: - - int - PyArray_INCREF(PyArrayObject *mp) - -For object arrays, increment all internal references. - -:: - - int - PyArray_XDECREF(PyArrayObject *mp) - -Decrement all internal references for object arrays. -(or arrays with object fields) - -:: - - void - PyArray_SetStringFunction(PyObject *op, int repr) - -Set the array print function to be a Python function. - -:: - - PyArray_Descr * - PyArray_DescrFromType(int type) - -Get the PyArray_Descr structure for a type. - -:: - - PyObject * - PyArray_TypeObjectFromType(int type) - -Get a typeobject from a type-number -- can return NULL. - -New reference - -:: - - char * - PyArray_Zero(PyArrayObject *arr) - -Get pointer to zero of correct type for array. - -:: - - char * - PyArray_One(PyArrayObject *arr) - -Get pointer to one of correct type for array - -:: - - PyObject * - PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int - is_f_order) - -For backward compatibility - -Cast an array using typecode structure. -steals reference to dtype --- cannot be NULL - -This function always makes a copy of arr, even if the dtype -doesn't change. - -:: - - int - PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) - -Cast to an already created array. - -:: - - int - PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) - -Cast to an already created array. Arrays don't have to be "broadcastable" -Only requirement is they have the same number of elements. - -:: - - int - PyArray_CanCastSafely(int fromtype, int totype) - -Check the type coercion rules. - -:: - - npy_bool - PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) - -leaves reference count alone --- cannot be NULL - -PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting' -parameter. - -:: - - int - PyArray_ObjectType(PyObject *op, int minimum_type) - -Return the typecode of the array a Python object would be converted to - -Returns the type number the result should have, or NPY_NOTYPE on error. - -:: - - PyArray_Descr * - PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) - -new reference -- accepts NULL for mintype - -:: - - PyArrayObject ** - PyArray_ConvertToCommonType(PyObject *op, int *retn) - - -:: - - PyArray_Descr * - PyArray_DescrFromScalar(PyObject *sc) - -Return descr object from array scalar. - -New reference - -:: - - PyArray_Descr * - PyArray_DescrFromTypeObject(PyObject *type) - - -:: - - npy_intp - PyArray_Size(PyObject *op) - -Compute the size of an array (in number of items) - -:: - - PyObject * - PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) - -Get scalar-equivalent to a region of memory described by a descriptor. - -:: - - PyObject * - PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) - -Get 0-dim array from scalar - -0-dim array from array-scalar object -always contains a copy of the data -unless outcode is NULL, it is of void type and the referrer does -not own it either. - -steals reference to outcode - -:: - - void - PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) - -Convert to c-type - -no error checking is performed -- ctypeptr must be same type as scalar -in case of flexible type, the data is not copied -into ctypeptr which is expected to be a pointer to pointer - -:: - - int - PyArray_CastScalarToCtype(PyObject *scalar, void - *ctypeptr, PyArray_Descr *outcode) - -Cast Scalar to c-type - -The output buffer must be large-enough to receive the value -Even for flexible types which is different from ScalarAsCtype -where only a reference for flexible types is returned - -This may not work right on narrow builds for NumPy unicode scalars. - -:: - - int - PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr - *indescr, void *ctypeptr, int outtype) - -Cast Scalar to c-type - -:: - - PyObject * - PyArray_ScalarFromObject(PyObject *object) - -Get an Array Scalar From a Python Object - -Returns NULL if unsuccessful but error is only set if another error occurred. -Currently only Numeric-like object supported. - -:: - - PyArray_VectorUnaryFunc * - PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) - -Get a cast function to cast from the input descriptor to the -output type_number (must be a registered data-type). -Returns NULL if un-successful. - -:: - - PyObject * - PyArray_FromDims(int NPY_UNUSED(nd) , int *NPY_UNUSED(d) , int - NPY_UNUSED(type) ) - -Deprecated, use PyArray_SimpleNew instead. - -:: - - PyObject * - PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd) , int - *NPY_UNUSED(d) , PyArray_Descr - *descr, char *NPY_UNUSED(data) ) - -Deprecated, use PyArray_NewFromDescr instead. - -:: - - PyObject * - PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int - min_depth, int max_depth, int flags, PyObject - *context) - -Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags -Steals a reference to newtype --- which can be NULL - -:: - - PyObject * - PyArray_EnsureArray(PyObject *op) - -This is a quick wrapper around -PyArray_FromAny(op, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL) -that special cases Arrays and PyArray_Scalars up front -It *steals a reference* to the object -It also guarantees that the result is PyArray_Type -Because it decrefs op if any conversion needs to take place -so it can be used like PyArray_EnsureArray(some_function(...)) - -:: - - PyObject * - PyArray_EnsureAnyArray(PyObject *op) - - -:: - - PyObject * - PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char - *sep) - - -Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an -array corresponding to the data encoded in that file. - -The reference to `dtype` is stolen (it is possible that the passed in -dtype is not held on to). - -The number of elements to read is given as ``num``; if it is < 0, then -then as many as possible are read. - -If ``sep`` is NULL or empty, then binary data is assumed, else -text data, with ``sep`` as the separator between elements. Whitespace in -the separator matches any length of whitespace in the text, and a match -for whitespace around the separator is added. - -For memory-mapped files, use the buffer interface. No more data than -necessary is read by this routine. - -:: - - PyObject * - PyArray_FromString(char *data, npy_intp slen, PyArray_Descr - *dtype, npy_intp num, char *sep) - - -Given a pointer to a string ``data``, a string length ``slen``, and -a ``PyArray_Descr``, return an array corresponding to the data -encoded in that string. - -If the dtype is NULL, the default array type is used (double). -If non-null, the reference is stolen. - -If ``slen`` is < 0, then the end of string is used for text data. -It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs -would be the norm). - -The number of elements to read is given as ``num``; if it is < 0, then -then as many as possible are read. - -If ``sep`` is NULL or empty, then binary data is assumed, else -text data, with ``sep`` as the separator between elements. Whitespace in -the separator matches any length of whitespace in the text, and a match -for whitespace around the separator is added. - -:: - - PyObject * - PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp - count, npy_intp offset) - - -:: - - PyObject * - PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) - - -steals a reference to dtype (which cannot be NULL) - -:: - - PyObject * - PyArray_Return(PyArrayObject *mp) - - -Return either an array or the appropriate Python object if the array -is 0d and matches a Python type. -steals reference to mp - -:: - - PyObject * - PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int - offset) - -Get a subset of bytes from each element of the array -steals reference to typed, must not be NULL - -:: - - int - PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int - offset, PyObject *val) - -Set a subset of bytes from each element of the array -steals reference to dtype, must not be NULL - -:: - - PyObject * - PyArray_Byteswap(PyArrayObject *self, npy_bool inplace) - - -:: - - PyObject * - PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int - refcheck, NPY_ORDER NPY_UNUSED(order) ) - -Resize (reallocate data). Only works if nothing else is referencing this -array and it is contiguous. If refcheck is 0, then the reference count is -not checked and assumed to be 1. You still must own this data and have no -weak-references and no base object. - -:: - - int - PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src) - -Move the memory of one array into another, allowing for overlapping data. - -Returns 0 on success, negative on failure. - -:: - - int - PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src) - -Copy an Array into another array. -Broadcast to the destination shape if necessary. - -Returns 0 on success, -1 on failure. - -:: - - int - PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src) - -Copy an Array into another array -- memory must not overlap -Does not require src and dest to have "broadcastable" shapes -(only the same number of elements). - -TODO: For NumPy 2.0, this could accept an order parameter which -only allows NPY_CORDER and NPY_FORDER. Could also rename -this to CopyAsFlat to make the name more intuitive. - -Returns 0 on success, -1 on error. - -:: - - int - PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) - - -:: - - PyObject * - PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) - -Copy an array. - -:: - - PyObject * - PyArray_ToList(PyArrayObject *self) - -To List - -:: - - PyObject * - PyArray_ToString(PyArrayObject *self, NPY_ORDER order) - - -:: - - int - PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) - -To File - -:: - - int - PyArray_Dump(PyObject *self, PyObject *file, int protocol) - - -:: - - PyObject * - PyArray_Dumps(PyObject *self, int protocol) - - -:: - - int - PyArray_ValidType(int type) - -Is the typenum valid? - -:: - - void - PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) - -Update Several Flags at once. - -:: - - PyObject * - PyArray_New(PyTypeObject *subtype, int nd, npy_intp const *dims, int - type_num, npy_intp const *strides, void *data, int - itemsize, int flags, PyObject *obj) - -Generic new array creation routine. - -:: - - PyObject * - PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int - nd, npy_intp const *dims, npy_intp const - *strides, void *data, int flags, PyObject *obj) - -Generic new array creation routine. - -steals a reference to descr. On failure or when dtype->subarray is -true, dtype will be decrefed. - -:: - - PyArray_Descr * - PyArray_DescrNew(PyArray_Descr *base) - -base cannot be NULL - -:: - - PyArray_Descr * - PyArray_DescrNewFromType(int type_num) - - -:: - - double - PyArray_GetPriority(PyObject *obj, double default_) - -Get Priority from object - -:: - - PyObject * - PyArray_IterNew(PyObject *obj) - -Get Iterator. - -:: - - PyObject* - PyArray_MultiIterNew(int n, ... ) - -Get MultiIterator, - -:: - - int - PyArray_PyIntAsInt(PyObject *o) - - -:: - - npy_intp - PyArray_PyIntAsIntp(PyObject *o) - - -:: - - int - PyArray_Broadcast(PyArrayMultiIterObject *mit) - - -:: - - void - PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) - -Assumes contiguous - -:: - - int - PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) - - -:: - - npy_bool - PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp - offset, npy_intp *dims, npy_intp *newstrides) - - -:: - - PyArray_Descr * - PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) - - -returns a copy of the PyArray_Descr structure with the byteorder -altered: -no arguments: The byteorder is swapped (in all subfields as well) -single argument: The byteorder is forced to the given state -(in all subfields as well) - -Valid states: ('big', '>') or ('little' or '<') -('native', or '=') - -If a descr structure with | is encountered it's own -byte-order is not changed but any fields are: - - -Deep bytorder change of a data-type descriptor -Leaves reference count of self unchanged --- does not DECREF self *** - -:: - - PyObject * - PyArray_IterAllButAxis(PyObject *obj, int *inaxis) - -Get Iterator that iterates over all but one axis (don't use this with -PyArray_ITER_GOTO1D). The axis will be over-written if negative -with the axis having the smallest stride. - -:: - - PyObject * - PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int - min_depth, int max_depth, int requires, PyObject - *context) - -steals a reference to descr -- accepts NULL - -:: - - PyObject * - PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int - flags) - -steals reference to newtype --- acc. NULL - -:: - - PyObject * - PyArray_FromInterface(PyObject *origin) - - -:: - - PyObject * - PyArray_FromStructInterface(PyObject *input) - - -:: - - PyObject * - PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject - *context) - - -:: - - NPY_SCALARKIND - PyArray_ScalarKind(int typenum, PyArrayObject **arr) - -ScalarKind - -Returns the scalar kind of a type number, with an -optional tweak based on the scalar value itself. -If no scalar is provided, it returns INTPOS_SCALAR -for both signed and unsigned integers, otherwise -it checks the sign of any signed integer to choose -INTNEG_SCALAR when appropriate. - -:: - - int - PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND - scalar) - - -Determines whether the data type 'thistype', with -scalar kind 'scalar', can be coerced into 'neededtype'. - -:: - - PyObject * - PyArray_NewFlagsObject(PyObject *obj) - - -Get New ArrayFlagsObject - -:: - - npy_bool - PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) - -See if array scalars can be cast. - -TODO: For NumPy 2.0, add a NPY_CASTING parameter. - -:: - - int - PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) - - -:: - - int - PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) - -Adjusts previously broadcasted iterators so that the axis with -the smallest sum of iterator strides is not iterated over. -Returns dimension which is smallest in the range [0,multi->nd). -A -1 is returned if multi->nd == 0. - -don't use with PyArray_ITER_GOTO1D because factors are not adjusted - -:: - - int - PyArray_ElementStrides(PyObject *obj) - - -:: - - void - PyArray_Item_INCREF(char *data, PyArray_Descr *descr) - -XINCREF all objects in a single array item. This is complicated for -structured datatypes where the position of objects needs to be extracted. -The function is execute recursively for each nested field or subarrays dtype -such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` - -:: - - void - PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) - - -XDECREF all objects in a single array item. This is complicated for -structured datatypes where the position of objects needs to be extracted. -The function is execute recursively for each nested field or subarrays dtype -such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` - -:: - - PyObject * - PyArray_FieldNames(PyObject *fields) - -Return the tuple of ordered field names from a dictionary. - -:: - - PyObject * - PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) - -Return Transpose. - -:: - - PyObject * - PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int - axis, PyArrayObject *out, NPY_CLIPMODE clipmode) - -Take - -:: - - PyObject * - PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject - *indices0, NPY_CLIPMODE clipmode) - -Put values into an array - -:: - - PyObject * - PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0) - -Put values into an array according to a mask. - -:: - - PyObject * - PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) - -Repeat the array. - -:: - - PyObject * - PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject - *out, NPY_CLIPMODE clipmode) - - -:: - - int - PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) - -Sort an array in-place - -:: - - PyObject * - PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) - -ArgSort an array - -:: - - PyObject * - PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE - side, PyObject *perm) - - -Search the sorted array op1 for the location of the items in op2. The -result is an array of indexes, one for each element in op2, such that if -the item were to be inserted in op1 just before that index the array -would still be in sorted order. - -Parameters ----------- -op1 : PyArrayObject * -Array to be searched, must be 1-D. -op2 : PyObject * -Array of items whose insertion indexes in op1 are wanted -side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT} -If NPY_SEARCHLEFT, return first valid insertion indexes -If NPY_SEARCHRIGHT, return last valid insertion indexes -perm : PyObject * -Permutation array that sorts op1 (optional) - -Returns -------- -ret : PyObject * -New reference to npy_intp array containing indexes where items in op2 -could be validly inserted into op1. NULL on error. - -Notes ------ -Binary search is used to find the indexes. - -:: - - PyObject * - PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) - -ArgMax - -:: - - PyObject * - PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) - -ArgMin - -:: - - PyObject * - PyArray_Reshape(PyArrayObject *self, PyObject *shape) - -Reshape - -:: - - PyObject * - PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER - order) - -New shape for an array - -:: - - PyObject * - PyArray_Squeeze(PyArrayObject *self) - - -return a new view of the array object with all of its unit-length -dimensions squeezed out if needed, otherwise -return the same array. - -:: - - PyObject * - PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject - *pytype) - -View -steals a reference to type -- accepts NULL - -:: - - PyObject * - PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) - -SwapAxes - -:: - - PyObject * - PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) - -Max - -:: - - PyObject * - PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) - -Min - -:: - - PyObject * - PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) - -Ptp - -:: - - PyObject * - PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Mean - -:: - - PyObject * - PyArray_Trace(PyArrayObject *self, int offset, int axis1, int - axis2, int rtype, PyArrayObject *out) - -Trace - -:: - - PyObject * - PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int - axis2) - -Diagonal - -In NumPy versions prior to 1.7, this function always returned a copy of -the diagonal array. In 1.7, the code has been updated to compute a view -onto 'self', but it still copies this array before returning, as well as -setting the internal WARN_ON_WRITE flag. In a future version, it will -simply return a view onto self. - -:: - - PyObject * - PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject - *max, PyArrayObject *out) - -Clip - -:: - - PyObject * - PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) - -Conjugate - -:: - - PyObject * - PyArray_Nonzero(PyArrayObject *self) - -Nonzero - -TODO: In NumPy 2.0, should make the iteration order a parameter. - -:: - - PyObject * - PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out, int variance) - -Set variance to 1 to by-pass square-root calculation and return variance -Std - -:: - - PyObject * - PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Sum - -:: - - PyObject * - PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -CumSum - -:: - - PyObject * - PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Prod - -:: - - PyObject * - PyArray_CumProd(PyArrayObject *self, int axis, int - rtype, PyArrayObject *out) - -CumProd - -:: - - PyObject * - PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) - -All - -:: - - PyObject * - PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) - -Any - -:: - - PyObject * - PyArray_Compress(PyArrayObject *self, PyObject *condition, int - axis, PyArrayObject *out) - -Compress - -:: - - PyObject * - PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) - -Flatten - -:: - - PyObject * - PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) - -Ravel -Returns a contiguous array - -:: - - npy_intp - PyArray_MultiplyList(npy_intp const *l1, int n) - -Multiply a List - -:: - - int - PyArray_MultiplyIntList(int const *l1, int n) - -Multiply a List of ints - -:: - - void * - PyArray_GetPtr(PyArrayObject *obj, npy_intp const*ind) - -Produce a pointer into array - -:: - - int - PyArray_CompareLists(npy_intp const *l1, npy_intp const *l2, int n) - -Compare Lists - -:: - - int - PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int - nd, PyArray_Descr*typedescr) - -Simulate a C-array -steals a reference to typedescr -- can be NULL - -:: - - int - PyArray_As1D(PyObject **NPY_UNUSED(op) , char **NPY_UNUSED(ptr) , int - *NPY_UNUSED(d1) , int NPY_UNUSED(typecode) ) - -Convert to a 1D C-array - -:: - - int - PyArray_As2D(PyObject **NPY_UNUSED(op) , char ***NPY_UNUSED(ptr) , int - *NPY_UNUSED(d1) , int *NPY_UNUSED(d2) , int - NPY_UNUSED(typecode) ) - -Convert to a 2D C-array - -:: - - int - PyArray_Free(PyObject *op, void *ptr) - -Free pointers created if As2D is called - -:: - - int - PyArray_Converter(PyObject *object, PyObject **address) - - -Useful to pass as converter function for O& processing in PyArgs_ParseTuple. - -This conversion function can be used with the "O&" argument for -PyArg_ParseTuple. It will immediately return an object of array type -or will convert to a NPY_ARRAY_CARRAY any other object. - -If you use PyArray_Converter, you must DECREF the array when finished -as you get a new reference to it. - -:: - - int - PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) - -PyArray_IntpFromSequence -Returns the number of integers converted or -1 if an error occurred. -vals must be large enough to hold maxvals - -:: - - PyObject * - PyArray_Concatenate(PyObject *op, int axis) - -Concatenate - -Concatenate an arbitrary Python sequence into an array. -op is a python object supporting the sequence interface. -Its elements will be concatenated together to form a single -multidimensional array. If axis is NPY_MAXDIMS or bigger, then -each sequence object will be flattened before concatenation - -:: - - PyObject * - PyArray_InnerProduct(PyObject *op1, PyObject *op2) - -Numeric.innerproduct(a,v) - -:: - - PyObject * - PyArray_MatrixProduct(PyObject *op1, PyObject *op2) - -Numeric.matrixproduct(a,v) -just like inner product but does the swapaxes stuff on the fly - -:: - - PyObject * - PyArray_CopyAndTranspose(PyObject *op) - -Copy and Transpose - -Could deprecate this function, as there isn't a speed benefit over -calling Transpose and then Copy. - -:: - - PyObject * - PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) - -Numeric.correlate(a1,a2,mode) - -:: - - int - PyArray_TypestrConvert(int itemsize, int gentype) - -Typestr converter - -:: - - int - PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) - -Get typenum from an object -- None goes to NPY_DEFAULT_TYPE -This function takes a Python object representing a type and converts it -to a the correct PyArray_Descr * structure to describe the type. - -Many objects can be used to represent a data-type which in NumPy is -quite a flexible concept. - -This is the central code that converts Python objects to -Type-descriptor objects that are used throughout numpy. - -Returns a new reference in *at, but the returned should not be -modified as it may be one of the canonical immutable objects or -a reference to the input obj. - -:: - - int - PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) - -Get typenum from an object -- None goes to NULL - -:: - - int - PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) - -Get intp chunk from sequence - -This function takes a Python sequence object and allocates and -fills in an intp array with the converted values. - -Remember to free the pointer seq.ptr when done using -PyDimMem_FREE(seq.ptr)** - -:: - - int - PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) - -Get buffer chunk from object - -this function takes a Python object which exposes the (single-segment) -buffer interface and returns a pointer to the data segment - -You should increment the reference count by one of buf->base -if you will hang on to a reference - -You only get a borrowed reference to the object. Do not free the -memory... - -:: - - int - PyArray_AxisConverter(PyObject *obj, int *axis) - -Get axis from an object (possibly None) -- a converter function, - -See also PyArray_ConvertMultiAxis, which also handles a tuple of axes. - -:: - - int - PyArray_BoolConverter(PyObject *object, npy_bool *val) - -Convert an object to true / false - -:: - - int - PyArray_ByteorderConverter(PyObject *obj, char *endian) - -Convert object to endian - -:: - - int - PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) - -Convert an object to FORTRAN / C / ANY / KEEP - -:: - - unsigned char - PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) - - -This function returns true if the two typecodes are -equivalent (same basic kind and same itemsize). - -:: - - PyObject * - PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int - is_f_order) - -Zeros - -steals a reference to type. On failure or when dtype->subarray is -true, dtype will be decrefed. -accepts NULL type - -:: - - PyObject * - PyArray_Empty(int nd, npy_intp const *dims, PyArray_Descr *type, int - is_f_order) - -Empty - -accepts NULL type -steals a reference to type - -:: - - PyObject * - PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) - -Where - -:: - - PyObject * - PyArray_Arange(double start, double stop, double step, int type_num) - -Arange, - -:: - - PyObject * - PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject - *step, PyArray_Descr *dtype) - - -ArangeObj, - -this doesn't change the references - -:: - - int - PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) - -Convert object to sort kind - -:: - - PyObject * - PyArray_LexSort(PyObject *sort_keys, int axis) - -LexSort an array providing indices that will sort a collection of arrays -lexicographically. The first key is sorted on first, followed by the second key --- requires that arg"merge"sort is available for each sort_key - -Returns an index array that shows the indexes for the lexicographic sort along -the given axis. - -:: - - PyObject * - PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) - -Round - -:: - - unsigned char - PyArray_EquivTypenums(int typenum1, int typenum2) - - -:: - - int - PyArray_RegisterDataType(PyArray_Descr *descr) - -Register Data type -Does not change the reference count of descr - -:: - - int - PyArray_RegisterCastFunc(PyArray_Descr *descr, int - totype, PyArray_VectorUnaryFunc *castfunc) - -Register Casting Function -Replaces any function currently stored. - -:: - - int - PyArray_RegisterCanCast(PyArray_Descr *descr, int - totype, NPY_SCALARKIND scalar) - -Register a type number indicating that a descriptor can be cast -to it safely - -:: - - void - PyArray_InitArrFuncs(PyArray_ArrFuncs *f) - -Initialize arrfuncs to NULL - -:: - - PyObject * - PyArray_IntTupleFromIntp(int len, npy_intp *vals) - -PyArray_IntTupleFromIntp - -:: - - int - PyArray_TypeNumFromName(char *str) - - -:: - - int - PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) - -Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP - -:: - - int - PyArray_OutputConverter(PyObject *object, PyArrayObject **address) - -Useful to pass as converter function for O& processing in -PyArgs_ParseTuple for output arrays - -:: - - PyObject * - PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) - -Get Iterator broadcast to a particular shape - -:: - - void - _PyArray_SigintHandler(int signum) - - -:: - - void* - _PyArray_GetSigintBuf(void ) - - -:: - - int - PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) - - -Get type-descriptor from an object forcing alignment if possible -None goes to DEFAULT type. - -any object with the .fields attribute and/or .itemsize attribute (if the -.fields attribute does not give the total size -- i.e. a partial record -naming). If itemsize is given it must be >= size computed from fields - -The .fields attribute must return a convertible dictionary if present. -Result inherits from NPY_VOID. - -:: - - int - PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) - - -Get type-descriptor from an object forcing alignment if possible -None goes to NULL. - -:: - - int - PyArray_SearchsideConverter(PyObject *obj, void *addr) - -Convert object to searchsorted side - -:: - - PyObject * - PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags) - -PyArray_CheckAxis - -check that axis is valid -convert 0-d arrays to 1-d arrays - -:: - - npy_intp - PyArray_OverflowMultiplyList(npy_intp *l1, int n) - -Multiply a List of Non-negative numbers with over-flow detection. - -:: - - int - PyArray_CompareString(const char *s1, const char *s2, size_t len) - - -:: - - PyObject* - PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... ) - -Get MultiIterator from array of Python objects and any additional - -PyObject **mps - array of PyObjects -int n - number of PyObjects in the array -int nadd - number of additional arrays to include in the iterator. - -Returns a multi-iterator object. - -:: - - int - PyArray_GetEndianness(void ) - - -:: - - unsigned int - PyArray_GetNDArrayCFeatureVersion(void ) - -Returns the built-in (at compilation time) C API version - -:: - - PyObject * - PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) - -correlate(a1,a2,mode) - -This function computes the usual correlation (correlate(a1, a2) != -correlate(a2, a1), and conjugate the second argument for complex inputs - -:: - - PyObject* - PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp - *bounds, int mode, PyArrayObject*fill) - -A Neighborhood Iterator object. - -:: - - void - PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op) ) - -This function is scheduled to be removed - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - void - PyArray_DatetimeToDatetimeStruct(npy_datetime NPY_UNUSED(val) - , NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_datetimestruct *result) - -Fill the datetime struct from the value and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - void - PyArray_TimedeltaToTimedeltaStruct(npy_timedelta NPY_UNUSED(val) - , NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_timedeltastruct *result) - -Fill the timedelta struct from the timedelta value and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - npy_datetime - PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_datetimestruct *NPY_UNUSED(d) ) - -Create a datetime value from a filled datetime struct and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - npy_datetime - PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_timedeltastruct - *NPY_UNUSED(d) ) - -Create a timdelta value from a filled timedelta struct and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - NpyIter * - NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER - order, NPY_CASTING casting, PyArray_Descr*dtype) - -Allocate a new iterator for one array object. - -:: - - NpyIter * - NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 - flags, NPY_ORDER order, NPY_CASTING - casting, npy_uint32 *op_flags, PyArray_Descr - **op_request_dtypes) - -Allocate a new iterator for more than one array object, using -standard NumPy broadcasting rules and the default buffer size. - -:: - - NpyIter * - NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 - flags, NPY_ORDER order, NPY_CASTING - casting, npy_uint32 *op_flags, PyArray_Descr - **op_request_dtypes, int oa_ndim, int - **op_axes, npy_intp *itershape, npy_intp - buffersize) - -Allocate a new iterator for multiple array objects, and advanced -options for controlling the broadcasting, shape, and buffer size. - -:: - - NpyIter * - NpyIter_Copy(NpyIter *iter) - -Makes a copy of the iterator - -:: - - int - NpyIter_Deallocate(NpyIter *iter) - -Deallocate an iterator - -:: - - npy_bool - NpyIter_HasDelayedBufAlloc(NpyIter *iter) - -Whether the buffer allocation is being delayed - -:: - - npy_bool - NpyIter_HasExternalLoop(NpyIter *iter) - -Whether the iterator handles the inner loop - -:: - - int - NpyIter_EnableExternalLoop(NpyIter *iter) - -Removes the inner loop handling (so HasExternalLoop returns true) - -:: - - npy_intp * - NpyIter_GetInnerStrideArray(NpyIter *iter) - -Get the array of strides for the inner loop (when HasExternalLoop is true) - -This function may be safely called without holding the Python GIL. - -:: - - npy_intp * - NpyIter_GetInnerLoopSizePtr(NpyIter *iter) - -Get a pointer to the size of the inner loop (when HasExternalLoop is true) - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_Reset(NpyIter *iter, char **errmsg) - -Resets the iterator to its initial state - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char - **errmsg) - -Resets the iterator to its initial state, with new base data pointers. -This function requires great caution. - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp - iend, char **errmsg) - -Resets the iterator to a new iterator index range - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_GetNDim(NpyIter *iter) - -Gets the number of dimensions being iterated - -:: - - int - NpyIter_GetNOp(NpyIter *iter) - -Gets the number of operands being iterated - -:: - - NpyIter_IterNextFunc * - NpyIter_GetIterNext(NpyIter *iter, char **errmsg) - -Compute the specialized iteration function for an iterator - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - npy_intp - NpyIter_GetIterSize(NpyIter *iter) - -Gets the number of elements being iterated - -:: - - void - NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp - *iend) - -Gets the range of iteration indices being iterated - -:: - - npy_intp - NpyIter_GetIterIndex(NpyIter *iter) - -Gets the current iteration index - -:: - - int - NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) - -Sets the iterator position to the specified iterindex, -which matches the iteration order of the iterator. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - npy_bool - NpyIter_HasMultiIndex(NpyIter *iter) - -Whether the iterator is tracking a multi-index - -:: - - int - NpyIter_GetShape(NpyIter *iter, npy_intp *outshape) - -Gets the broadcast shape if a multi-index is being tracked by the iterator, -otherwise gets the shape of the iteration as Fortran-order -(fastest-changing index first). - -The reason Fortran-order is returned when a multi-index -is not enabled is that this is providing a direct view into how -the iterator traverses the n-dimensional space. The iterator organizes -its memory from fastest index to slowest index, and when -a multi-index is enabled, it uses a permutation to recover the original -order. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - NpyIter_GetMultiIndexFunc * - NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg) - -Compute a specialized get_multi_index function for the iterator - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp const *multi_index) - -Sets the iterator to the specified multi-index, which must have the -correct number of entries for 'ndim'. It is only valid -when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation -fails if the multi-index is out of bounds. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - int - NpyIter_RemoveMultiIndex(NpyIter *iter) - -Removes multi-index support from an iterator. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - npy_bool - NpyIter_HasIndex(NpyIter *iter) - -Whether the iterator is tracking an index - -:: - - npy_bool - NpyIter_IsBuffered(NpyIter *iter) - -Whether the iterator is buffered - -:: - - npy_bool - NpyIter_IsGrowInner(NpyIter *iter) - -Whether the inner loop can grow if buffering is unneeded - -:: - - npy_intp - NpyIter_GetBufferSize(NpyIter *iter) - -Gets the size of the buffer, or 0 if buffering is not enabled - -:: - - npy_intp * - NpyIter_GetIndexPtr(NpyIter *iter) - -Get a pointer to the index, if it is being tracked - -:: - - int - NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index) - -If the iterator is tracking an index, sets the iterator -to the specified index. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - char ** - NpyIter_GetDataPtrArray(NpyIter *iter) - -Get the array of data pointers (1 per object being iterated) - -This function may be safely called without holding the Python GIL. - -:: - - PyArray_Descr ** - NpyIter_GetDescrArray(NpyIter *iter) - -Get the array of data type pointers (1 per object being iterated) - -:: - - PyArrayObject ** - NpyIter_GetOperandArray(NpyIter *iter) - -Get the array of objects being iterated - -:: - - PyArrayObject * - NpyIter_GetIterView(NpyIter *iter, npy_intp i) - -Returns a view to the i-th object with the iterator's internal axes - -:: - - void - NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags) - -Gets an array of read flags (1 per object being iterated) - -:: - - void - NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags) - -Gets an array of write flags (1 per object being iterated) - -:: - - void - NpyIter_DebugPrint(NpyIter *iter) - -For debugging - -:: - - npy_bool - NpyIter_IterationNeedsAPI(NpyIter *iter) - -Whether the iteration loop, and in particular the iternext() -function, needs API access. If this is true, the GIL must -be retained while iterating. - -:: - - void - NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) - -Get an array of strides which are fixed. Any strides which may -change during iteration receive the value NPY_MAX_INTP. Once -the iterator is ready to iterate, call this to get the strides -which will always be fixed in the inner loop, then choose optimized -inner loop functions which take advantage of those fixed strides. - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_RemoveAxis(NpyIter *iter, int axis) - -Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX -was set for iterator creation, and does not work if buffering is -enabled. This function also resets the iterator to its initial state. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - npy_intp * - NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) - -Gets the array of strides for the specified axis. -If the iterator is tracking a multi-index, gets the strides -for the axis specified, otherwise gets the strides for -the iteration axis as Fortran order (fastest-changing axis first). - -Returns NULL if an error occurs. - -:: - - npy_bool - NpyIter_RequiresBuffering(NpyIter *iter) - -Whether the iteration could be done with no buffering. - -:: - - char ** - NpyIter_GetInitialDataPtrArray(NpyIter *iter) - -Get the array of data pointers (1 per object being iterated), -directly into the arrays (never pointing to a buffer), for starting -unbuffered iteration. This always returns the addresses for the -iterator position as reset to iterator index 0. - -These pointers are different from the pointers accepted by -NpyIter_ResetBasePointers, because the direction along some -axes may have been reversed, requiring base offsets. - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp - itemsize, npy_intp *outstrides) - -Builds a set of strides which are the same as the strides of an -output array created using the NPY_ITER_ALLOCATE flag, where NULL -was passed for op_axes. This is for data packed contiguously, -but not necessarily in C or Fortran order. This should be used -together with NpyIter_GetShape and NpyIter_GetNDim. - -A use case for this function is to match the shape and layout of -the iterator and tack on one or more dimensions. For example, -in order to generate a vector per input value for a numerical gradient, -you pass in ndim*itemsize for itemsize, then add another dimension to -the end with size ndim and stride itemsize. To do the Hessian matrix, -you do the same thing but add two dimensions, or take advantage of -the symmetry and pack it into 1 dimension with a particular encoding. - -This function may only be called if the iterator is tracking a multi-index -and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from -being iterated in reverse order. - -If an array is created with this method, simply adding 'itemsize' -for each iteration will traverse the new array matching the -iterator. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - int - PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) - -Convert any Python object, *obj*, to an NPY_CASTING enum. - -:: - - npy_intp - PyArray_CountNonzero(PyArrayObject *self) - -Counts the number of non-zero elements in the array. - -Returns -1 on error. - -:: - - PyArray_Descr * - PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) - -Produces the smallest size and lowest kind type to which both -input types can be cast. - -:: - - PyArray_Descr * - PyArray_MinScalarType(PyArrayObject *arr) - -If arr is a scalar (has 0 dimensions) with a built-in number data type, -finds the smallest type size/kind which can still represent its data. -Otherwise, returns the array's data type. - - -:: - - PyArray_Descr * - PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, npy_intp - ndtypes, PyArray_Descr **dtypes) - -Produces the result type of a bunch of inputs, using the UFunc -type promotion rules. Use this function when you have a set of -input arrays, and need to determine an output array dtype. - -If all the inputs are scalars (have 0 dimensions) or the maximum "kind" -of the scalars is greater than the maximum "kind" of the arrays, does -a regular type promotion. - -Otherwise, does a type promotion on the MinScalarType -of all the inputs. Data types passed directly are treated as array -types. - - -:: - - npy_bool - PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr - *to, NPY_CASTING casting) - -Returns 1 if the array object may be cast to the given data type using -the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in -that it handles scalar arrays (0 dimensions) specially, by checking -their value. - -:: - - npy_bool - PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr - *to, NPY_CASTING casting) - -Returns true if data of type 'from' may be cast to data of type -'to' according to the rule 'casting'. - -:: - - PyArrayObject * - PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject - **op_in, PyArray_Descr *dtype, NPY_ORDER - order, NPY_CASTING casting, PyArrayObject *out) - -This function provides summation of array elements according to -the Einstein summation convention. For example: -- trace(a) -> einsum("ii", a) -- transpose(a) -> einsum("ji", a) -- multiply(a,b) -> einsum(",", a, b) -- inner(a,b) -> einsum("i,i", a, b) -- outer(a,b) -> einsum("i,j", a, b) -- matvec(a,b) -> einsum("ij,j", a, b) -- matmat(a,b) -> einsum("ij,jk", a, b) - -subscripts: The string of subscripts for einstein summation. -nop: The number of operands -op_in: The array of operands -dtype: Either NULL, or the data type to force the calculation as. -order: The order for the calculation/the output axes. -casting: What kind of casts should be permitted. -out: Either NULL, or an array into which the output should be placed. - -By default, the labels get placed in alphabetical order -at the end of the output. So, if c = einsum("i,j", a, b) -then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b) -then c[i,j] = a[j]*b[i]. - -Alternatively, you can control the output order or prevent -an axis from being summed/force an axis to be summed by providing -indices for the output. This allows us to turn 'trace' into -'diag', for example. -- diag(a) -> einsum("ii->i", a) -- sum(a, axis=0) -> einsum("i...->", a) - -Subscripts at the beginning and end may be specified by -putting an ellipsis "..." in the middle. For example, -the function einsum("i...i", a) takes the diagonal of -the first and last dimensions of the operand, and -einsum("ij...,jk...->ik...") takes the matrix product using -the first two indices of each operand instead of the last two. - -When there is only one operand, no axes being summed, and -no output parameter, this function returns a view -into the operand instead of making a copy. - -:: - - PyObject * - PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER - order, PyArray_Descr *dtype, int subok) - -Creates a new array with the same shape as the provided one, -with possible memory layout order and data type changes. - -prototype - The array the new one should be like. -order - NPY_CORDER - C-contiguous result. -NPY_FORTRANORDER - Fortran-contiguous result. -NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise. -NPY_KEEPORDER - Keeps the axis ordering of prototype. -dtype - If not NULL, overrides the data type of the result. -subok - If 1, use the prototype's array subtype, otherwise -always create a base-class array. - -NOTE: If dtype is not NULL, steals the dtype reference. On failure or when -dtype->subarray is true, dtype will be decrefed. - -:: - - int - PyArray_GetArrayParamsFromObject(PyObject *op, PyArray_Descr - *requested_dtype, npy_bool - writeable, PyArray_Descr - **out_dtype, int *out_ndim, npy_intp - *out_dims, PyArrayObject - **out_arr, PyObject *context) - -Retrieves the array parameters for viewing/converting an arbitrary -PyObject* to a NumPy array. This allows the "innate type and shape" -of Python list-of-lists to be discovered without -actually converting to an array. - -In some cases, such as structured arrays and the __array__ interface, -a data type needs to be used to make sense of the object. When -this is needed, provide a Descr for 'requested_dtype', otherwise -provide NULL. This reference is not stolen. Also, if the requested -dtype doesn't modify the interpretation of the input, out_dtype will -still get the "innate" dtype of the object, not the dtype passed -in 'requested_dtype'. - -If writing to the value in 'op' is desired, set the boolean -'writeable' to 1. This raises an error when 'op' is a scalar, list -of lists, or other non-writeable 'op'. - -Result: When success (0 return value) is returned, either out_arr -is filled with a non-NULL PyArrayObject and -the rest of the parameters are untouched, or out_arr is -filled with NULL, and the rest of the parameters are -filled. - -Typical usage: - -PyArrayObject *arr = NULL; -PyArray_Descr *dtype = NULL; -int ndim = 0; -npy_intp dims[NPY_MAXDIMS]; - -if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype, -&ndim, dims, &arr, NULL) < 0) { -return NULL; -} -if (arr == NULL) { -... validate/change dtype, validate flags, ndim, etc ... -// Could make custom strides here too -arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, -dims, NULL, -is_f_order ? NPY_ARRAY_F_CONTIGUOUS : 0, -NULL); -if (arr == NULL) { -return NULL; -} -if (PyArray_CopyObject(arr, op) < 0) { -Py_DECREF(arr); -return NULL; -} -} -else { -... in this case the other parameters weren't filled, just -validate and possibly copy arr itself ... -} -... use arr ... - -:: - - int - PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE - *modes, int n) - -Convert an object to an array of n NPY_CLIPMODE values. -This is intended to be used in functions where a different mode -could be applied to each axis, like in ravel_multi_index. - -:: - - PyObject * - PyArray_MatrixProduct2(PyObject *op1, PyObject - *op2, PyArrayObject*out) - -Numeric.matrixproduct2(a,v,out) -just like inner product but does the swapaxes stuff on the fly - -:: - - npy_bool - NpyIter_IsFirstVisit(NpyIter *iter, int iop) - -Checks to see whether this is the first time the elements -of the specified reduction operand which the iterator points at are -being seen for the first time. The function returns -a reasonable answer for reduction operands and when buffering is -disabled. The answer may be incorrect for buffered non-reduction -operands. - -This function is intended to be used in EXTERNAL_LOOP mode only, -and will produce some wrong answers when that mode is not enabled. - -If this function returns true, the caller should also -check the inner loop stride of the operand, because if -that stride is 0, then only the first element of the innermost -external loop is being visited for the first time. - -WARNING: For performance reasons, 'iop' is not bounds-checked, -it is not confirmed that 'iop' is actually a reduction -operand, and it is not confirmed that EXTERNAL_LOOP -mode is enabled. These checks are the responsibility of -the caller, and should be done outside of any inner loops. - -:: - - int - PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) - -Sets the 'base' attribute of the array. This steals a reference -to 'obj'. - -Returns 0 on success, -1 on failure. - -:: - - void - PyArray_CreateSortedStridePerm(int ndim, npy_intp const - *strides, npy_stride_sort_item - *out_strideperm) - - -This function populates the first ndim elements -of strideperm with sorted descending by their absolute values. -For example, the stride array (4, -2, 12) becomes -[(2, 12), (0, 4), (1, -2)]. - -:: - - void - PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags) - - -Removes the axes flagged as True from the array, -modifying it in place. If an axis flagged for removal -has a shape entry bigger than one, this effectively selects -index zero for that axis. - -WARNING: If an axis flagged for removal has a shape equal to zero, -the array will point to invalid memory. The caller must -validate this! -If an axis flagged for removal has a shape larger than one, -the aligned flag (and in the future the contiguous flags), -may need explicit update. -(check also NPY_RELAXED_STRIDES_CHECKING) - -For example, this can be used to remove the reduction axes -from a reduction result once its computation is complete. - -:: - - void - PyArray_DebugPrint(PyArrayObject *obj) - -Prints the raw data of the ndarray in a form useful for debugging -low-level C issues. - -:: - - int - PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name) - - -This function does nothing if obj is writeable, and raises an exception -(and returns -1) if obj is not writeable. It may also do other -house-keeping, such as issuing warnings on arrays which are transitioning -to become views. Always call this function at some point before writing to -an array. - -'name' is a name for the array, used to give better error -messages. Something like "assignment destination", "output array", or even -just "array". - -:: - - int - PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) - - -Precondition: 'arr' is a copy of 'base' (though possibly with different -strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the -->base pointer on 'arr', so that when 'arr' is destructed, it will copy any -changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase - -Steals a reference to 'base'. - -Returns 0 on success, -1 on failure. - -:: - - void * - PyDataMem_NEW(size_t size) - -Allocates memory for array data. - -:: - - void - PyDataMem_FREE(void *ptr) - -Free memory for array data. - -:: - - void * - PyDataMem_RENEW(void *ptr, size_t size) - -Reallocate/resize memory for array data. - -:: - - PyDataMem_EventHookFunc * - PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void - *user_data, void **old_data) - -Sets the allocation event hook for numpy array data. -Takes a PyDataMem_EventHookFunc *, which has the signature: -void hook(void *old, void *new, size_t size, void *user_data). -Also takes a void *user_data, and void **old_data. - -Returns a pointer to the previous hook or NULL. If old_data is -non-NULL, the previous user_data pointer will be copied to it. - -If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW: -result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data) -PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data) -result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data) - -When the hook is called, the GIL will be held by the calling -thread. The hook should be written to be reentrant, if it performs -operations that might cause new allocation events (such as the -creation/destruction numpy objects, or creating/destroying Python -objects which might cause a gc) - -:: - - void - PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject - **ret, int getmap) - - -:: - - PyObject * - PyArray_MapIterArray(PyArrayObject *a, PyObject *index) - - -Use advanced indexing to iterate an array. - -:: - - void - PyArray_MapIterNext(PyArrayMapIterObject *mit) - -This function needs to update the state of the map iterator -and point mit->dataptr to the memory-location of the next object - -Note that this function never handles an extra operand but provides -compatibility for an old (exposed) API. - -:: - - int - PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int - axis, NPY_SELECTKIND which) - -Partition an array in-place - -:: - - PyObject * - PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int - axis, NPY_SELECTKIND which) - -ArgPartition an array - -:: - - int - PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind) - -Convert object to select kind - -:: - - void * - PyDataMem_NEW_ZEROED(size_t size, size_t elsize) - -Allocates zeroed memory for array data. - -:: - - int - PyArray_CheckAnyScalarExact(PyObject *obj) - -return true an object is exactly a numpy scalar - -:: - - PyObject * - PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, PyObject - *index, int - copy_if_overlap, PyArrayObject - *extra_op) - - -Same as PyArray_MapIterArray, but: - -If copy_if_overlap != 0, check if `a` has memory overlap with any of the -arrays in `index` and with `extra_op`. If yes, make copies as appropriate -to avoid problems if `a` is modified during the iteration. -`iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). - -:: - - int - PyArray_ResolveWritebackIfCopy(PyArrayObject *self) - - -If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag, -copy the local data to base, release the local data, and set flags -appropriately. Return 0 if not relevant, 1 if success, < 0 on failure - -:: - - int - PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject - *base) - - -Precondition: 'arr' is a copy of 'base' (though possibly with different -strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the -->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any -changes back to 'base' before deallocating the array. - -Steals a reference to 'base'. - -Returns 0 on success, -1 on failure. - diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarrayobject.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarrayobject.h deleted file mode 100644 index 95e9cb0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarrayobject.h +++ /dev/null @@ -1,285 +0,0 @@ -/* - * DON'T INCLUDE THIS DIRECTLY. - */ - -#ifndef NPY_NDARRAYOBJECT_H -#define NPY_NDARRAYOBJECT_H -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include "ndarraytypes.h" - -/* Includes the "function" C-API -- these are all stored in a - list of pointers --- one for each file - The two lists are concatenated into one in multiarray. - - They are available as import_array() -*/ - -#include "__multiarray_api.h" - - -/* C-API that requires previous API to be defined */ - -#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) - -#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) - -#define PyArray_HasArrayInterfaceType(op, type, context, out) \ - ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromArrayAttr(op, type, context)) != \ - Py_NotImplemented)) - -#define PyArray_HasArrayInterface(op, out) \ - PyArray_HasArrayInterfaceType(op, NULL, NULL, out) - -#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ - (PyArray_NDIM((PyArrayObject *)op) == 0)) - -#define PyArray_IsScalar(obj, cls) \ - (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) - -#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ - PyArray_IsZeroDim(m)) -#if PY_MAJOR_VERSION >= 3 -#define PyArray_IsPythonNumber(obj) \ - (PyFloat_Check(obj) || PyComplex_Check(obj) || \ - PyLong_Check(obj) || PyBool_Check(obj)) -#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ - || PyArray_IsScalar((obj), Integer)) -#define PyArray_IsPythonScalar(obj) \ - (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ - PyUnicode_Check(obj)) -#else -#define PyArray_IsPythonNumber(obj) \ - (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ - PyLong_Check(obj) || PyBool_Check(obj)) -#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \ - || PyLong_Check(obj) \ - || PyArray_IsScalar((obj), Integer)) -#define PyArray_IsPythonScalar(obj) \ - (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ - PyUnicode_Check(obj)) -#endif - -#define PyArray_IsAnyScalar(obj) \ - (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) - -#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ - PyArray_CheckScalar(obj)) - - -#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ - Py_INCREF(m), (m) : \ - (PyArrayObject *)(PyArray_Copy(m))) - -#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ - PyArray_CompareLists(PyArray_DIMS(a1), \ - PyArray_DIMS(a2), \ - PyArray_NDIM(a1))) - -#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) -#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) -#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) - -#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ - NULL) - -#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ - PyArray_DescrFromType(type), 0, 0, 0, NULL) - -#define PyArray_FROM_OTF(m, type, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ - (((flags) & NPY_ARRAY_ENSURECOPY) ? \ - ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) - -#define PyArray_FROMANY(m, type, min, max, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ - (((flags) & NPY_ARRAY_ENSURECOPY) ? \ - (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) - -#define PyArray_ZEROS(m, dims, type, is_f_order) \ - PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) - -#define PyArray_EMPTY(m, dims, type, is_f_order) \ - PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) - -#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ - PyArray_NBYTES(obj)) -#ifndef PYPY_VERSION -#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) -#define NPY_REFCOUNT PyArray_REFCOUNT -#endif -#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) - -#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_DEFAULT, NULL) - -#define PyArray_EquivArrTypes(a1, a2) \ - PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) - -#define PyArray_EquivByteorders(b1, b2) \ - (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) - -#define PyArray_SimpleNew(nd, dims, typenum) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) - -#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ - data, 0, NPY_ARRAY_CARRAY, NULL) - -#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ - PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ - NULL, NULL, 0, NULL) - -#define PyArray_ToScalar(data, arr) \ - PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) - - -/* These might be faster without the dereferencing of obj - going on inside -- of course an optimizing compiler should - inline the constants inside a for loop making it a moot point -*/ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0])) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1])) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2])) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2] + \ - (l)*PyArray_STRIDES(obj)[3])) - -/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */ -static NPY_INLINE void -PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) -{ - PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; - if (fa && fa->base) { - if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || - (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { - PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); - Py_DECREF(fa->base); - fa->base = NULL; - PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); - PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); - } - } -} - -#define PyArray_DESCR_REPLACE(descr) do { \ - PyArray_Descr *_new_; \ - _new_ = PyArray_DescrNew(descr); \ - Py_XDECREF(descr); \ - descr = _new_; \ - } while(0) - -/* Copy should always return contiguous array */ -#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) - -#define PyArray_FromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_BEHAVED | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_DEFAULT | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_ENSURECOPY | \ - NPY_ARRAY_DEFAULT | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_Cast(mp, type_num) \ - PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) - -#define PyArray_Take(ap, items, axis) \ - PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) - -#define PyArray_Put(ap, items, values) \ - PyArray_PutTo(ap, items, values, NPY_RAISE) - -/* Compatibility with old Numeric stuff -- don't use in new code */ - -#define PyArray_FromDimsAndData(nd, d, type, data) \ - PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ - data) - - -/* - Check to see if this key in the dictionary is the "title" - entry of the tuple (i.e. a duplicate dictionary entry in the fields - dict. -*/ - -static NPY_INLINE int -NPY_TITLE_KEY_check(PyObject *key, PyObject *value) -{ - PyObject *title; - if (PyTuple_Size(value) != 3) { - return 0; - } - title = PyTuple_GetItem(value, 2); - if (key == title) { - return 1; - } -#ifdef PYPY_VERSION - /* - * On PyPy, dictionary keys do not always preserve object identity. - * Fall back to comparison by value. - */ - if (PyUnicode_Check(title) && PyUnicode_Check(key)) { - return PyUnicode_Compare(title, key) == 0 ? 1 : 0; - } -#if PY_VERSION_HEX < 0x03000000 - if (PyString_Check(title) && PyString_Check(key)) { - return PyObject_Compare(title, key) == 0 ? 1 : 0; - } -#endif -#endif - return 0; -} - -/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ -#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) - -#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) -#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) - -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION) -static NPY_INLINE void -PyArray_XDECREF_ERR(PyArrayObject *arr) -{ - /* 2017-Nov-10 1.14 */ - DEPRECATE("PyArray_XDECREF_ERR is deprecated, call " - "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead"); - PyArray_DiscardWritebackIfCopy(arr); - Py_XDECREF(arr); -} -#endif - - -#ifdef __cplusplus -} -#endif - - -#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarraytypes.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarraytypes.h deleted file mode 100644 index ad98d56..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarraytypes.h +++ /dev/null @@ -1,1848 +0,0 @@ -#ifndef NDARRAYTYPES_H -#define NDARRAYTYPES_H - -#include "npy_common.h" -#include "npy_endian.h" -#include "npy_cpu.h" -#include "utils.h" - -#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - -#ifndef __has_extension -#define __has_extension(x) 0 -#endif - -#if !defined(_NPY_NO_DEPRECATIONS) && \ - ((defined(__GNUC__)&& __GNUC__ >= 6) || \ - __has_extension(attribute_deprecated_with_message)) -#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text))) -#else -#define NPY_ATTR_DEPRECATE(text) -#endif - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION - -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"), - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; -#ifdef _MSC_VER -#pragma deprecated(NPY_CHAR) -#endif - -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - -/* - * These characters correspond to the array type and the struct - * module - */ - -enum NPY_TYPECHAR { - NPY_BOOLLTR = '?', - NPY_BYTELTR = 'b', - NPY_UBYTELTR = 'B', - NPY_SHORTLTR = 'h', - NPY_USHORTLTR = 'H', - NPY_INTLTR = 'i', - NPY_UINTLTR = 'I', - NPY_LONGLTR = 'l', - NPY_ULONGLTR = 'L', - NPY_LONGLONGLTR = 'q', - NPY_ULONGLONGLTR = 'Q', - NPY_HALFLTR = 'e', - NPY_FLOATLTR = 'f', - NPY_DOUBLELTR = 'd', - NPY_LONGDOUBLELTR = 'g', - NPY_CFLOATLTR = 'F', - NPY_CDOUBLELTR = 'D', - NPY_CLONGDOUBLELTR = 'G', - NPY_OBJECTLTR = 'O', - NPY_STRINGLTR = 'S', - NPY_STRINGLTR2 = 'a', - NPY_UNICODELTR = 'U', - NPY_VOIDLTR = 'V', - NPY_DATETIMELTR = 'M', - NPY_TIMEDELTALTR = 'm', - NPY_CHARLTR = 'c', - - /* - * No Descriptor, just a define -- this let's - * Python users specify an array of integers - * large enough to hold a pointer on the - * platform - */ - NPY_INTPLTR = 'p', - NPY_UINTPLTR = 'P', - - /* - * These are for dtype 'kinds', not dtype 'typecodes' - * as the above are for. - */ - NPY_GENBOOLLTR ='b', - NPY_SIGNEDLTR = 'i', - NPY_UNSIGNEDLTR = 'u', - NPY_FLOATINGLTR = 'f', - NPY_COMPLEXLTR = 'c' -}; - -/* - * Changing this may break Numpy API compatibility - * due to changing offsets in PyArray_ArrFuncs, so be - * careful. Here we have reused the mergesort slot for - * any kind of stable sort, the actual implementation will - * depend on the data type. - */ -typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, - NPY_STABLESORT=2, -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_STABLESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0 -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { - NPY_NOSCALAR=-1, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR -} NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) - -/* For specifying array memory layout or iteration order */ -typedef enum { - /* Fortran order if inputs are all Fortran, C otherwise */ - NPY_ANYORDER=-1, - /* C order */ - NPY_CORDER=0, - /* Fortran order */ - NPY_FORTRANORDER=1, - /* An order as close to the inputs as possible */ - NPY_KEEPORDER=2 -} NPY_ORDER; - -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4 -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) - -/* The FR in the unit names stands for frequency */ -typedef enum { - /* Force signed enum type, must be -1 for code compatibility */ - NPY_FR_ERROR = -1, /* error or undetermined */ - - /* Start of valid units */ - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10, /* nanoseconds */ - NPY_FR_ps = 11, /* picoseconds */ - NPY_FR_fs = 12, /* femtoseconds */ - NPY_FR_as = 13, /* attoseconds */ - NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 - /* numpy sometimes calls PyArray_malloc() with the GIL released. On Python - 3.3 and older, it was safe to call PyMem_Malloc() with the GIL released. - On Python 3.4 and newer, it's better to use PyMem_RawMalloc() to be able - to use tracemalloc. On Python 3.6, calling PyMem_Malloc() with the GIL - released is now a fatal error in debug mode. */ -# if PY_VERSION_HEX >= 0x03040000 -# define PyArray_malloc PyMem_RawMalloc -# define PyArray_free PyMem_RawFree -# define PyArray_realloc PyMem_RawRealloc -# else -# define PyArray_malloc PyMem_Malloc -# define PyArray_free PyMem_Free -# define PyArray_realloc PyMem_Realloc -# endif -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. Should return 0 on success - * and -1 on failure. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; - /* Cached hash value (-1 if not yet computed). - * This was added for NumPy 2.0.0. - */ - npy_hash_t hash; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. - * - * It has been recommended to use the inline functions defined below - * (PyArray_DATA and friends) to access fields here for a number of - * releases. Direct access to the members themselves is deprecated. - * To ensure that your code does not use deprecated access, - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - * (or NPY_1_8_API_VERSION or higher as required). - */ -/* This struct will be moved to a private header in a future release */ -typedef struct tagPyArrayObject_fields { - PyObject_HEAD - /* Pointer to the raw data buffer */ - char *data; - /* The number of dimensions, also called 'ndim' */ - int nd; - /* The size in each dimension, also called 'shape' */ - npy_intp *dimensions; - /* - * Number of bytes to jump to get to the - * next element in each dimension - */ - npy_intp *strides; - /* - * This object is decref'd upon - * deletion of array. Except in the - * case of WRITEBACKIFCOPY which has - * special handling. - * - * For views it points to the original - * array, collapsed so no chains of - * views occur. - * - * For creation from buffer object it - * points to an object that should be - * decref'd on deletion - * - * For WRITEBACKIFCOPY flag this is an - * array to-be-updated upon calling - * PyArray_ResolveWritebackIfCopy - */ - PyObject *base; - /* Pointer to type structure */ - PyArray_Descr *descr; - /* Flags describing array -- see below */ - int flags; - /* For weak references */ - PyObject *weakreflist; -} PyArrayObject_fields; - -/* - * To hide the implementation details, we only expose - * the Python struct HEAD. - */ -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -/* - * Can't put this in npy_deprecated_api.h like the others. - * PyArrayObject field access is deprecated as of NumPy 1.7. - */ -typedef PyArrayObject_fields PyArrayObject; -#else -typedef struct tagPyArrayObject { - PyObject_HEAD -} PyArrayObject; -#endif - -#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef struct { - NPY_DATETIMEUNIT base; - int num; -} PyArray_DatetimeMetaData; - -typedef struct { - NpyAuxData base; - PyArray_DatetimeMetaData meta; -} PyArray_DatetimeDTypeMetaData; - -/* - * This structure contains an exploded view of a date-time value. - * NaT is represented by year == NPY_DATETIME_NAT. - */ -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -/* This is not used internally. */ -typedef struct { - npy_int64 day; - npy_int32 sec, us, ps, as; -} npy_timedeltastruct; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 - -/* - * Set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 - -/* - * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a - * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with - * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS - * at the same time if they have either zero or one element. - * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional - * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements - * and the array is contiguous if ndarray.squeeze() is contiguous. - * I.e. dimensions for which `ndarray.shape[dimension] == 1` are - * ignored. - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - * - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the various FromAny functions - * - * This flag may be requested in constructor functions. - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_ARRAY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSURECOPY 0x0020 - -/* - * Make sure the returned array is a base-class ndarray - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropriate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_ALIGNED 0x0100 - -/* - * Array data has the native endianness - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_NOTSWAPPED 0x0200 - -/* - * Array data is writeable - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when PyArray_ResolveWritebackIfCopy is called. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */ -#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 - -/* - * NOTE: there are also internal flags defined in multiarray/arrayobject.h, - * which start at bit 31 and work down. - */ - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_WRITEBACKIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_WRITEBACKIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -/* This flag is for the array interface, not PyArrayObject */ -#define NPY_ARR_HAS_DESCR 0x0800 - - - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) - -/* - * C API: consists of Macros and functions. The MACROS are defined - * here. - */ - - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS) - -/* the variable is used in some places, so always define it */ -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); -#define NPY_END_THREADS do { if (_save) \ - { PyEval_RestoreThread(_save); _save = NULL;} } while (0); -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \ - { _save = PyEval_SaveThread();} } while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); -#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/********************************** - * The nditer object, added in 1.6 - **********************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 -/* - * If output operands overlap with other operands (based on heuristics that - * has false positives but no false negatives), make temporary copies to - * eliminate overlap. - */ -#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* This is a virtual array slot, operand is NULL but temporary data is there */ -#define NPY_ITER_VIRTUAL 0x04000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 -/* A mask is being used on this array, affects buffer -> array copy */ -#define NPY_ITER_WRITEMASKED 0x10000000 -/* This array is the mask for all WRITEMASKED operands */ -#define NPY_ITER_ARRAYMASK 0x20000000 -/* Assume iterator order data access for COPY_IF_OVERLAP */ -#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)( - PyArrayIterObject* iter, const npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp)(ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_GOTO(multi, dest) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - - -/* - * Store the information needed for fancy-indexing over an array. The - * fields are slightly unordered to keep consec, dataptr and subspace - * where they were originally. - */ -typedef struct { - PyObject_HEAD - /* - * Multi-iterator portion --- needs to be present in this - * order to work with PyArray_Broadcast - */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - NpyIter *outer; /* index objects - iterator */ - void *unused[NPY_MAXDIMS - 2]; - PyArrayObject *array; - /* Flat iterator for the indexed array. For compatibility solely. */ - PyArrayIterObject *ait; - - /* - * Subspace array. For binary compatibility (was an iterator, - * but only the check for NULL should be used). - */ - PyArrayObject *subspace; - - /* - * if subspace iteration, then this is the array of axes in - * the underlying array represented by the index objects - */ - int iteraxes[NPY_MAXDIMS]; - npy_intp fancy_strides[NPY_MAXDIMS]; - - /* pointer when all fancy indices are 0 */ - char *baseoffset; - - /* - * after binding consec denotes at which axis the fancy axes - * are inserted. - */ - int consec; - char *dataptr; - - int nd_fancy; - npy_intp fancy_dims[NPY_MAXDIMS]; - - /* Whether the iterator (any of the iterators) requires API */ - int needs_api; - - /* - * Extra op information. - */ - PyArrayObject *extra_op; - PyArray_Descr *extra_op_dtype; /* desired dtype */ - npy_uint32 *extra_op_flags; /* Iterator flags */ - - NpyIter *extra_op_iter; - NpyIter_IterNextFunc *extra_op_next; - char **extra_op_ptrs; - - /* - * Information about the iteration state. - */ - NpyIter_IterNextFunc *outer_next; - char **outer_ptrs; - npy_intp *outer_strides; - - /* - * Information about the subspace iterator. - */ - NpyIter *subspace_iter; - NpyIter_IterNextFunc *subspace_next; - char **subspace_ptrs; - npy_intp *subspace_strides; - - /* Count for the external loop (which ever it is) for API iteration */ - npy_intp iter_count; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* - * Neighborhood points coordinates are computed relatively to the - * point pointed by _internal_iter - */ - PyArrayIterObject* _internal_iter; - /* - * To keep a reference to the representation of the constant value - * for constant padding - */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* - * Include inline implementations - functions defined there are not - * considered public API - */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE - -/* - * All sorts of useful ways to look into a PyArrayObject. It is recommended - * to use PyArrayObject * objects instead of always casting from PyObject *, - * for improved type checking. - * - * In many cases here the macro versions of the accessors are deprecated, - * but can't be immediately changed to inline functions because the - * preexisting macros accept PyObject * and do automatic casts. Inline - * functions accepting PyArrayObject * provides for some compile-time - * checking of correctness when working with these objects in C. - */ - -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ - (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ - NPY_ARRAY_F_CONTIGUOUS : 0)) - -#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) -/* - * Changing access macros into functions, to allow for future hiding - * of the internal memory layout. This later hiding will allow the 2.x series - * to change the internal representation of arrays without affecting - * ABI compatibility. - */ - -static NPY_INLINE int -PyArray_NDIM(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->nd; -} - -static NPY_INLINE void * -PyArray_DATA(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE char * -PyArray_BYTES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE npy_intp * -PyArray_DIMS(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -static NPY_INLINE npy_intp * -PyArray_STRIDES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->strides; -} - -static NPY_INLINE npy_intp -PyArray_DIM(const PyArrayObject *arr, int idim) -{ - return ((PyArrayObject_fields *)arr)->dimensions[idim]; -} - -static NPY_INLINE npy_intp -PyArray_STRIDE(const PyArrayObject *arr, int istride) -{ - return ((PyArrayObject_fields *)arr)->strides[istride]; -} - -static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject * -PyArray_BASE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->base; -} - -static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr * -PyArray_DESCR(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE int -PyArray_FLAGS(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->flags; -} - -static NPY_INLINE npy_intp -PyArray_ITEMSIZE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->elsize; -} - -static NPY_INLINE int -PyArray_TYPE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->type_num; -} - -static NPY_INLINE int -PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) -{ - return (PyArray_FLAGS(arr) & flags) == flags; -} - -static NPY_INLINE PyObject * -PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) -{ - return ((PyArrayObject_fields *)arr)->descr->f->getitem( - (void *)itemptr, (PyArrayObject *)arr); -} - -static NPY_INLINE int -PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) -{ - return ((PyArrayObject_fields *)arr)->descr->f->setitem( - v, itemptr, arr); -} - -#else - -/* These macros are deprecated as of NumPy 1.7. */ -#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) -#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) -#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) -#define PyArray_ITEMSIZE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->type_num) -#define PyArray_GETITEM(obj,itemptr) \ - PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)) - -#define PyArray_SETITEM(obj,itemptr,v) \ - PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)) -#endif - -static NPY_INLINE PyArray_Descr * -PyArray_DTYPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE npy_intp * -PyArray_SHAPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -/* - * Enables the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags |= flags; -} - -/* - * Clears the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags &= ~flags; -} - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ - ((type) == NPY_UNICODE)) - -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ - ((type) == NPY_DOUBLE) || \ - ((type) == NPY_CDOUBLE) || \ - ((type) == NPY_BOOL) || \ - ((type) == NPY_OBJECT )) - -#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ - ((type) <=NPY_VOID)) - -#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ - ((type) <=NPY_TIMEDELTA)) - -#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ - ((type) < NPY_USERDEF+ \ - NPY_NUMUSERTYPES)) - -#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ - PyTypeNum_ISUSERDEF(type)) - -#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) - - -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) -#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) -#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ - !PyDataType_HASFIELDS(dtype)) -#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) - -#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) -#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) -#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) -#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) -#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) -#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) -#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) -#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) -#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) -#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) -#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) -#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) -#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - - /* - * FIXME: This should check for a flag on the data-type that - * states whether or not it is variable length. Because the - * ISFLEXIBLE check is hard-coded to the built-in data-types. - */ -#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) - -#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) - - -#define NPY_LITTLE '<' -#define NPY_BIG '>' -#define NPY_NATIVE '=' -#define NPY_SWAP 's' -#define NPY_IGNORE '|' - -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN -#define NPY_NATBYTE NPY_BIG -#define NPY_OPPBYTE NPY_LITTLE -#else -#define NPY_NATBYTE NPY_LITTLE -#define NPY_OPPBYTE NPY_BIG -#endif - -#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) -#define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) -#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - - -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) -#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) - -/************************************************************ - * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. - ************************************************************/ - -typedef struct { - npy_intp perm, stride; -} npy_stride_sort_item; - -/************************************************************ - * This is the form of the struct that's returned pointed by the - * PyCObject attribute of an array __array_struct__. See - * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full - * documentation. - ************************************************************/ -typedef struct { - int two; /* - * contains the integer 2 as a sanity - * check - */ - - int nd; /* number of dimensions */ - - char typekind; /* - * kind in array --- character code of - * typestr - */ - - int itemsize; /* size of each element */ - - int flags; /* - * how should be data interpreted. Valid - * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), - * ALIGNED (0x100), NOTSWAPPED (0x200), and - * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) - * states that arrdescr field is present in - * structure - */ - - npy_intp *shape; /* - * A length-nd array of shape - * information - */ - - npy_intp *strides; /* A length-nd array of stride information */ - - void *data; /* A pointer to the first element of the array */ - - PyObject *descr; /* - * A list of fields or NULL (ignored if flags - * does not have ARR_HAS_DESCR flag set) - */ -} PyArrayInterface; - -/* - * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. - * See the documentation for PyDataMem_SetEventHook. - */ -typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, - void *user_data); - -/* - * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files - * npy_*_*_deprecated_api.h are only included from here and nowhere else. - */ -#ifdef NPY_DEPRECATED_INCLUDES -#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." -#endif -#define NPY_DEPRECATED_INCLUDES -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -#include "npy_1_7_deprecated_api.h" -#endif -/* - * There is no file npy_1_8_deprecated_api.h since there are no additional - * deprecated API features in NumPy 1.8. - * - * Note to maintainers: insert code like the following in future NumPy - * versions. - * - * #if !defined(NPY_NO_DEPRECATED_API) || \ - * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) - * #include "npy_1_9_deprecated_api.h" - * #endif - */ -#undef NPY_DEPRECATED_INCLUDES - -#endif /* NPY_ARRAYTYPES_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/noprefix.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/noprefix.h deleted file mode 100644 index 041f301..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/noprefix.h +++ /dev/null @@ -1,212 +0,0 @@ -#ifndef NPY_NOPREFIX_H -#define NPY_NOPREFIX_H - -/* - * You can directly include noprefix.h as a backward - * compatibility measure - */ -#ifndef NPY_NO_PREFIX -#include "ndarrayobject.h" -#include "npy_interrupt.h" -#endif - -#define SIGSETJMP NPY_SIGSETJMP -#define SIGLONGJMP NPY_SIGLONGJMP -#define SIGJMP_BUF NPY_SIGJMP_BUF - -#define MAX_DIMS NPY_MAXDIMS - -#define longlong npy_longlong -#define ulonglong npy_ulonglong -#define Bool npy_bool -#define longdouble npy_longdouble -#define byte npy_byte - -#ifndef _BSD_SOURCE -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#endif - -#define ubyte npy_ubyte -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#define cfloat npy_cfloat -#define cdouble npy_cdouble -#define clongdouble npy_clongdouble -#define Int8 npy_int8 -#define UInt8 npy_uint8 -#define Int16 npy_int16 -#define UInt16 npy_uint16 -#define Int32 npy_int32 -#define UInt32 npy_uint32 -#define Int64 npy_int64 -#define UInt64 npy_uint64 -#define Int128 npy_int128 -#define UInt128 npy_uint128 -#define Int256 npy_int256 -#define UInt256 npy_uint256 -#define Float16 npy_float16 -#define Complex32 npy_complex32 -#define Float32 npy_float32 -#define Complex64 npy_complex64 -#define Float64 npy_float64 -#define Complex128 npy_complex128 -#define Float80 npy_float80 -#define Complex160 npy_complex160 -#define Float96 npy_float96 -#define Complex192 npy_complex192 -#define Float128 npy_float128 -#define Complex256 npy_complex256 -#define intp npy_intp -#define uintp npy_uintp -#define datetime npy_datetime -#define timedelta npy_timedelta - -#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG -#define SIZEOF_INTP NPY_SIZEOF_INTP -#define SIZEOF_UINTP NPY_SIZEOF_UINTP -#define SIZEOF_HALF NPY_SIZEOF_HALF -#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE -#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME -#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA - -#define LONGLONG_FMT NPY_LONGLONG_FMT -#define ULONGLONG_FMT NPY_ULONGLONG_FMT -#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX -#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX - -#define MAX_INT8 127 -#define MIN_INT8 -128 -#define MAX_UINT8 255 -#define MAX_INT16 32767 -#define MIN_INT16 -32768 -#define MAX_UINT16 65535 -#define MAX_INT32 2147483647 -#define MIN_INT32 (-MAX_INT32 - 1) -#define MAX_UINT32 4294967295U -#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) -#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) -#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) -#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) -#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) -#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) - -#define MAX_BYTE NPY_MAX_BYTE -#define MIN_BYTE NPY_MIN_BYTE -#define MAX_UBYTE NPY_MAX_UBYTE -#define MAX_SHORT NPY_MAX_SHORT -#define MIN_SHORT NPY_MIN_SHORT -#define MAX_USHORT NPY_MAX_USHORT -#define MAX_INT NPY_MAX_INT -#define MIN_INT NPY_MIN_INT -#define MAX_UINT NPY_MAX_UINT -#define MAX_LONG NPY_MAX_LONG -#define MIN_LONG NPY_MIN_LONG -#define MAX_ULONG NPY_MAX_ULONG -#define MAX_LONGLONG NPY_MAX_LONGLONG -#define MIN_LONGLONG NPY_MIN_LONGLONG -#define MAX_ULONGLONG NPY_MAX_ULONGLONG -#define MIN_DATETIME NPY_MIN_DATETIME -#define MAX_DATETIME NPY_MAX_DATETIME -#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA -#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA - -#define BITSOF_BOOL NPY_BITSOF_BOOL -#define BITSOF_CHAR NPY_BITSOF_CHAR -#define BITSOF_SHORT NPY_BITSOF_SHORT -#define BITSOF_INT NPY_BITSOF_INT -#define BITSOF_LONG NPY_BITSOF_LONG -#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG -#define BITSOF_HALF NPY_BITSOF_HALF -#define BITSOF_FLOAT NPY_BITSOF_FLOAT -#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE -#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE -#define BITSOF_DATETIME NPY_BITSOF_DATETIME -#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA - -#define _pya_malloc PyArray_malloc -#define _pya_free PyArray_free -#define _pya_realloc PyArray_realloc - -#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF -#define BEGIN_THREADS NPY_BEGIN_THREADS -#define END_THREADS NPY_END_THREADS -#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF -#define ALLOW_C_API NPY_ALLOW_C_API -#define DISABLE_C_API NPY_DISABLE_C_API - -#define PY_FAIL NPY_FAIL -#define PY_SUCCEED NPY_SUCCEED - -#ifndef TRUE -#define TRUE NPY_TRUE -#endif - -#ifndef FALSE -#define FALSE NPY_FALSE -#endif - -#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT - -#define CONTIGUOUS NPY_CONTIGUOUS -#define C_CONTIGUOUS NPY_C_CONTIGUOUS -#define FORTRAN NPY_FORTRAN -#define F_CONTIGUOUS NPY_F_CONTIGUOUS -#define OWNDATA NPY_OWNDATA -#define FORCECAST NPY_FORCECAST -#define ENSURECOPY NPY_ENSURECOPY -#define ENSUREARRAY NPY_ENSUREARRAY -#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES -#define ALIGNED NPY_ALIGNED -#define NOTSWAPPED NPY_NOTSWAPPED -#define WRITEABLE NPY_WRITEABLE -#define UPDATEIFCOPY NPY_UPDATEIFCOPY -#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY -#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR -#define BEHAVED NPY_BEHAVED -#define BEHAVED_NS NPY_BEHAVED_NS -#define CARRAY NPY_CARRAY -#define CARRAY_RO NPY_CARRAY_RO -#define FARRAY NPY_FARRAY -#define FARRAY_RO NPY_FARRAY_RO -#define DEFAULT NPY_DEFAULT -#define IN_ARRAY NPY_IN_ARRAY -#define OUT_ARRAY NPY_OUT_ARRAY -#define INOUT_ARRAY NPY_INOUT_ARRAY -#define IN_FARRAY NPY_IN_FARRAY -#define OUT_FARRAY NPY_OUT_FARRAY -#define INOUT_FARRAY NPY_INOUT_FARRAY -#define UPDATE_ALL NPY_UPDATE_ALL - -#define OWN_DATA NPY_OWNDATA -#define BEHAVED_FLAGS NPY_BEHAVED -#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS -#define CARRAY_FLAGS_RO NPY_CARRAY_RO -#define CARRAY_FLAGS NPY_CARRAY -#define FARRAY_FLAGS NPY_FARRAY -#define FARRAY_FLAGS_RO NPY_FARRAY_RO -#define DEFAULT_FLAGS NPY_DEFAULT -#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS - -#ifndef MIN -#define MIN PyArray_MIN -#endif -#ifndef MAX -#define MAX PyArray_MAX -#endif -#define MAX_INTP NPY_MAX_INTP -#define MIN_INTP NPY_MIN_INTP -#define MAX_UINTP NPY_MAX_UINTP -#define INTP_FMT NPY_INTP_FMT - -#ifndef PYPY_VERSION -#define REFCOUNT PyArray_REFCOUNT -#define MAX_ELSIZE NPY_MAX_ELSIZE -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h deleted file mode 100644 index a6ee212..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h +++ /dev/null @@ -1,133 +0,0 @@ -#ifndef _NPY_1_7_DEPRECATED_API_H -#define _NPY_1_7_DEPRECATED_API_H - -#ifndef NPY_DEPRECATED_INCLUDES -#error "Should never include npy_*_*_deprecated_api directly." -#endif - -/* Emit a warning if the user did not specifically request the old API */ -#ifndef NPY_NO_DEPRECATED_API -#if defined(_WIN32) -#define _WARN___STR2__(x) #x -#define _WARN___STR1__(x) _WARN___STR2__(x) -#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " -#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") -#elif defined(__GNUC__) -#warning "Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" -#endif -/* TODO: How to do this warning message for other compilers? */ -#endif - -/* - * This header exists to collect all dangerous/deprecated NumPy API - * as of NumPy 1.7. - * - * This is an attempt to remove bad API, the proliferation of macros, - * and namespace pollution currently produced by the NumPy headers. - */ - -/* These array flags are deprecated as of NumPy 1.7 */ -#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS - -/* - * The consistent NPY_ARRAY_* names which don't pollute the NPY_* - * namespace were added in NumPy 1.7. - * - * These versions of the carray flags are deprecated, but - * probably should only be removed after two releases instead of one. - */ -#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS -#define NPY_OWNDATA NPY_ARRAY_OWNDATA -#define NPY_FORCECAST NPY_ARRAY_FORCECAST -#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY -#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY -#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES -#define NPY_ALIGNED NPY_ARRAY_ALIGNED -#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED -#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE -#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY -#define NPY_BEHAVED NPY_ARRAY_BEHAVED -#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS -#define NPY_CARRAY NPY_ARRAY_CARRAY -#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO -#define NPY_DEFAULT NPY_ARRAY_DEFAULT -#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY -#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY -#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY -#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY -#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY -#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY -#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL - -/* This way of accessing the default type is deprecated as of NumPy 1.7 */ -#define PyArray_DEFAULT NPY_DEFAULT_TYPE - -/* These DATETIME bits aren't used internally */ -#if PY_VERSION_HEX >= 0x03000000 -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ - PyDict_GetItemString( \ - descr->metadata, NPY_METADATA_DTSTR), NULL)))) -#else -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ - PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) -#endif - -/* - * Deprecated as of NumPy 1.7, this kind of shortcut doesn't - * belong in the public API. - */ -#define NPY_AO PyArrayObject - -/* - * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't - * belong in the public API. - */ -#define fortran fortran_ - -/* - * Deprecated as of NumPy 1.7, as it is a namespace-polluting - * macro. - */ -#define FORTRAN_IF PyArray_FORTRAN_IF - -/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ -#define NPY_METADATA_DTSTR "__timeunit__" - -/* - * Deprecated as of NumPy 1.7. - * The reasoning: - * - These are for datetime, but there's no datetime "namespace". - * - They just turn NPY_STR_ into "", which is just - * making something simple be indirected. - */ -#define NPY_STR_Y "Y" -#define NPY_STR_M "M" -#define NPY_STR_W "W" -#define NPY_STR_D "D" -#define NPY_STR_h "h" -#define NPY_STR_m "m" -#define NPY_STR_s "s" -#define NPY_STR_ms "ms" -#define NPY_STR_us "us" -#define NPY_STR_ns "ns" -#define NPY_STR_ps "ps" -#define NPY_STR_fs "fs" -#define NPY_STR_as "as" - -/* - * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be - * removed in the next major release. - */ -#include "old_defines.h" - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h deleted file mode 100644 index 832bc05..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h +++ /dev/null @@ -1,577 +0,0 @@ -/* - * This is a convenience header file providing compatibility utilities - * for supporting Python 2 and Python 3 in the same code base. - * - * If you want to use this for your own projects, it's recommended to make a - * copy of it. Although the stuff below is unlikely to change, we don't provide - * strong backwards compatibility guarantees at the moment. - */ - -#ifndef _NPY_3KCOMPAT_H_ -#define _NPY_3KCOMPAT_H_ - -#include -#include - -#if PY_VERSION_HEX >= 0x03000000 -#ifndef NPY_PY3K -#define NPY_PY3K 1 -#endif -#endif - -#include "numpy/npy_common.h" -#include "numpy/ndarrayobject.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * PyInt -> PyLong - */ - -#if defined(NPY_PY3K) -/* Return True only if the long fits in a C long */ -static NPY_INLINE int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); -} - -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t - -/* NOTE: - * - * Since the PyLong type is very different from the fixed-range PyInt, - * we don't define PyInt_Type -> PyLong_Type. - */ -#endif /* NPY_PY3K */ - -/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ -#ifdef NPY_PY3K -# define NpySlice_GetIndicesEx PySlice_GetIndicesEx -#else -# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ - PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) -#endif - -/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */ -#if (PY_VERSION_HEX < 0x02070B00) || \ - ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400)) - #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x)) -#else - #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) -#endif - -/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */ -#if PY_VERSION_HEX < 0x03050200 - #define Py_SETREF(op, op2) \ - do { \ - PyObject *_py_tmp = (PyObject *)(op); \ - (op) = (op2); \ - Py_DECREF(_py_tmp); \ - } while (0) -#endif - -/* - * PyString -> PyBytes - */ - -#if defined(NPY_PY3K) - -#define PyString_Type PyBytes_Type -#define PyString_Check PyBytes_Check -#define PyStringObject PyBytesObject -#define PyString_FromString PyBytes_FromString -#define PyString_FromStringAndSize PyBytes_FromStringAndSize -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsStringAndSize PyBytes_AsStringAndSize -#define PyString_FromFormat PyBytes_FromFormat -#define PyString_Concat PyBytes_Concat -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_Size PyBytes_Size - -#define PyUString_Type PyUnicode_Type -#define PyUString_Check PyUnicode_Check -#define PyUStringObject PyUnicodeObject -#define PyUString_FromString PyUnicode_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyUString_FromFormat PyUnicode_FromFormat -#define PyUString_Concat PyUnicode_Concat2 -#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel -#define PyUString_GET_SIZE PyUnicode_GET_SIZE -#define PyUString_Size PyUnicode_Size -#define PyUString_InternFromString PyUnicode_InternFromString -#define PyUString_Format PyUnicode_Format - -#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) - -#else - -#define PyBytes_Type PyString_Type -#define PyBytes_Check PyString_Check -#define PyBytesObject PyStringObject -#define PyBytes_FromString PyString_FromString -#define PyBytes_FromStringAndSize PyString_FromStringAndSize -#define PyBytes_AS_STRING PyString_AS_STRING -#define PyBytes_AsStringAndSize PyString_AsStringAndSize -#define PyBytes_FromFormat PyString_FromFormat -#define PyBytes_Concat PyString_Concat -#define PyBytes_ConcatAndDel PyString_ConcatAndDel -#define PyBytes_AsString PyString_AsString -#define PyBytes_GET_SIZE PyString_GET_SIZE -#define PyBytes_Size PyString_Size - -#define PyUString_Type PyString_Type -#define PyUString_Check PyString_Check -#define PyUStringObject PyStringObject -#define PyUString_FromString PyString_FromString -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#define PyUString_FromFormat PyString_FromFormat -#define PyUString_Concat PyString_Concat -#define PyUString_ConcatAndDel PyString_ConcatAndDel -#define PyUString_GET_SIZE PyString_GET_SIZE -#define PyUString_Size PyString_Size -#define PyUString_InternFromString PyString_InternFromString -#define PyUString_Format PyString_Format - -#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) - -#endif /* NPY_PY3K */ - - -static NPY_INLINE void -PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); - Py_DECREF(right); -} - -static NPY_INLINE void -PyUnicode_Concat2(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); -} - -/* - * PyFile_* compatibility - */ - -/* - * Get a FILE* handle to the file represented by the Python object - */ -static NPY_INLINE FILE* -npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) -{ - int fd, fd2, unbuf; - PyObject *ret, *os, *io, *io_raw; - npy_off_t pos; - FILE *handle; - - /* For Python 2 PyFileObject, use PyFile_AsFile */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return PyFile_AsFile(file); - } -#endif - - /* Flush first to ensure things end up in the file in the correct order */ - ret = PyObject_CallMethod(file, "flush", ""); - if (ret == NULL) { - return NULL; - } - Py_DECREF(ret); - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return NULL; - } - - /* - * The handle needs to be dup'd because we have to call fclose - * at the end - */ - os = PyImport_ImportModule("os"); - if (os == NULL) { - return NULL; - } - ret = PyObject_CallMethod(os, "dup", "i", fd); - Py_DECREF(os); - if (ret == NULL) { - return NULL; - } - fd2 = PyNumber_AsSsize_t(ret, NULL); - Py_DECREF(ret); - - /* Convert to FILE* handle */ -#ifdef _WIN32 - handle = _fdopen(fd2, mode); -#else - handle = fdopen(fd2, mode); -#endif - if (handle == NULL) { - PyErr_SetString(PyExc_IOError, - "Getting a FILE* from a Python file object failed"); - return NULL; - } - - /* Record the original raw file handle position */ - *orig_pos = npy_ftell(handle); - if (*orig_pos == -1) { - /* The io module is needed to determine if buffering is used */ - io = PyImport_ImportModule("io"); - if (io == NULL) { - fclose(handle); - return NULL; - } - /* File object instances of RawIOBase are unbuffered */ - io_raw = PyObject_GetAttrString(io, "RawIOBase"); - Py_DECREF(io); - if (io_raw == NULL) { - fclose(handle); - return NULL; - } - unbuf = PyObject_IsInstance(file, io_raw); - Py_DECREF(io_raw); - if (unbuf == 1) { - /* Succeed if the IO is unbuffered */ - return handle; - } - else { - PyErr_SetString(PyExc_IOError, "obtaining file position failed"); - fclose(handle); - return NULL; - } - } - - /* Seek raw handle to the Python-side position */ - ret = PyObject_CallMethod(file, "tell", ""); - if (ret == NULL) { - fclose(handle); - return NULL; - } - pos = PyLong_AsLongLong(ret); - Py_DECREF(ret); - if (PyErr_Occurred()) { - fclose(handle); - return NULL; - } - if (npy_fseek(handle, pos, SEEK_SET) == -1) { - PyErr_SetString(PyExc_IOError, "seeking file failed"); - fclose(handle); - return NULL; - } - return handle; -} - -/* - * Close the dup-ed file handle, and seek the Python one to the current position - */ -static NPY_INLINE int -npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) -{ - int fd, unbuf; - PyObject *ret, *io, *io_raw; - npy_off_t position; - - /* For Python 2 PyFileObject, do nothing */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 0; - } -#endif - - position = npy_ftell(handle); - - /* Close the FILE* handle */ - fclose(handle); - - /* - * Restore original file handle position, in order to not confuse - * Python-side data structures - */ - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return -1; - } - - if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { - - /* The io module is needed to determine if buffering is used */ - io = PyImport_ImportModule("io"); - if (io == NULL) { - return -1; - } - /* File object instances of RawIOBase are unbuffered */ - io_raw = PyObject_GetAttrString(io, "RawIOBase"); - Py_DECREF(io); - if (io_raw == NULL) { - return -1; - } - unbuf = PyObject_IsInstance(file, io_raw); - Py_DECREF(io_raw); - if (unbuf == 1) { - /* Succeed if the IO is unbuffered */ - return 0; - } - else { - PyErr_SetString(PyExc_IOError, "seeking file failed"); - return -1; - } - } - - if (position == -1) { - PyErr_SetString(PyExc_IOError, "obtaining file position failed"); - return -1; - } - - /* Seek Python-side handle to the FILE* handle position */ - ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - -static NPY_INLINE int -npy_PyFile_Check(PyObject *file) -{ - int fd; - /* For Python 2, check if it is a PyFileObject */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 1; - } -#endif - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - -static NPY_INLINE PyObject* -npy_PyFile_OpenFile(PyObject *filename, const char *mode) -{ - PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); - if (open == NULL) { - return NULL; - } - return PyObject_CallFunction(open, "Os", filename, mode); -} - -static NPY_INLINE int -npy_PyFile_CloseFile(PyObject *file) -{ - PyObject *ret; - - ret = PyObject_CallMethod(file, "close", NULL); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - - -/* This is a copy of _PyErr_ChainExceptions - */ -static NPY_INLINE void -npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) -{ - if (exc == NULL) - return; - - if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetContext(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif - } - else { - PyErr_Restore(exc, val, tb); - } -} - - -/* This is a copy of _PyErr_ChainExceptions, with: - * - a minimal implementation for python 2 - * - __cause__ used instead of __context__ - */ -static NPY_INLINE void -npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) -{ - if (exc == NULL) - return; - - if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetCause(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif - } - else { - PyErr_Restore(exc, val, tb); - } -} - -/* - * PyObject_Cmp - */ -#if defined(NPY_PY3K) -static NPY_INLINE int -PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) -{ - int v; - v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 1) { - *cmp = -1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 1) { - *cmp = 1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 1) { - *cmp = 0; - return 1; - } - else { - *cmp = 0; - return -1; - } -} -#endif - -/* - * PyCObject functions adapted to PyCapsules. - * - * The main job here is to get rid of the improved error handling - * of PyCapsules. It's a shame... - */ -#if PY_VERSION_HEX >= 0x03000000 - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) -{ - PyObject *ret = PyCapsule_New(ptr, NULL, dtor); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) -{ - PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); - if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { - PyErr_Clear(); - Py_DECREF(ret); - ret = NULL; - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *obj) -{ - void *ret = PyCapsule_GetPointer(obj, NULL); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCapsule_GetContext(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCapsule_CheckExact(ptr); -} - -#else - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) -{ - return PyCObject_FromVoidPtr(ptr, dtor); -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, - void (*dtor)(void *, void *)) -{ - return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor); -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *ptr) -{ - return PyCObject_AsVoidPtr(ptr); -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCObject_GetDesc(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCObject_Check(ptr); -} - -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* _NPY_3KCOMPAT_H_ */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_common.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_common.h deleted file mode 100644 index 27b83f7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_common.h +++ /dev/null @@ -1,1104 +0,0 @@ -#ifndef _NPY_COMMON_H_ -#define _NPY_COMMON_H_ - -/* numpconfig.h is auto-generated */ -#include "numpyconfig.h" -#ifdef HAVE_NPY_CONFIG_H -#include -#endif - -/* need Python.h for npy_intp, npy_uintp */ -#include - -/* - * using static inline modifiers when defining npy_math functions - * allows the compiler to make optimizations when possible - */ -#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD -#ifndef NPY_INLINE_MATH -#define NPY_INLINE_MATH 1 -#endif -#endif - -/* - * gcc does not unroll even with -O3 - * use with care, unrolling on modern cpus rarely speeds things up - */ -#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS -#define NPY_GCC_UNROLL_LOOPS \ - __attribute__((optimize("unroll-loops"))) -#else -#define NPY_GCC_UNROLL_LOOPS -#endif - -/* highest gcc optimization level, enabled autovectorizer */ -#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 -#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) -#else -#define NPY_GCC_OPT_3 -#endif - -/* compile target attributes */ -#if defined HAVE_ATTRIBUTE_TARGET_AVX && defined HAVE_LINK_AVX -#define NPY_GCC_TARGET_AVX __attribute__((target("avx"))) -#else -#define NPY_GCC_TARGET_AVX -#endif - -#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS -#define HAVE_ATTRIBUTE_TARGET_FMA -#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma"))) -#endif - -#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2 -#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2"))) -#else -#define NPY_GCC_TARGET_AVX2 -#endif - -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F && defined HAVE_LINK_AVX512F -#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f"))) -#elif defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS -#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f"))) -#else -#define NPY_GCC_TARGET_AVX512F -#endif - -/* - * mark an argument (starting from 1) that must not be NULL and is not checked - * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check - */ -#ifdef HAVE_ATTRIBUTE_NONNULL -#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) -#else -#define NPY_GCC_NONNULL(n) -#endif - -#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS -#define NPY_HAVE_SSE_INTRINSICS -#endif - -#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD -#define NPY_HAVE_SSE2_INTRINSICS -#endif - -#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX2 -#define NPY_HAVE_AVX2_INTRINSICS -#endif - -#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX512F -#define NPY_HAVE_AVX512F_INTRINSICS -#endif -/* - * give a hint to the compiler which branch is more likely or unlikely - * to occur, e.g. rare error cases: - * - * if (NPY_UNLIKELY(failure == 0)) - * return NULL; - * - * the double !! is to cast the expression (e.g. NULL) to a boolean required by - * the intrinsic - */ -#ifdef HAVE___BUILTIN_EXPECT -#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) -#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define NPY_LIKELY(x) (x) -#define NPY_UNLIKELY(x) (x) -#endif - -#ifdef HAVE___BUILTIN_PREFETCH -/* unlike _mm_prefetch also works on non-x86 */ -#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) -#else -#ifdef HAVE__MM_PREFETCH -/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ -#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ - (loc == 1 ? _MM_HINT_T2 : \ - (loc == 2 ? _MM_HINT_T1 : \ - (loc == 3 ? _MM_HINT_T0 : -1)))) -#else -#define NPY_PREFETCH(x, rw,loc) -#endif -#endif - -#if defined(_MSC_VER) - #define NPY_INLINE __inline -#elif defined(__GNUC__) - #if defined(__STRICT_ANSI__) - #define NPY_INLINE __inline__ - #else - #define NPY_INLINE inline - #endif -#else - #define NPY_INLINE -#endif - -#ifdef HAVE___THREAD - #define NPY_TLS __thread -#else - #ifdef HAVE___DECLSPEC_THREAD_ - #define NPY_TLS __declspec(thread) - #else - #define NPY_TLS - #endif -#endif - -#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE - #define NPY_RETURNS_BORROWED_REF \ - __attribute__((cpychecker_returns_borrowed_ref)) -#else - #define NPY_RETURNS_BORROWED_REF -#endif - -#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE - #define NPY_STEALS_REF_TO_ARG(n) \ - __attribute__((cpychecker_steals_reference_to_arg(n))) -#else - #define NPY_STEALS_REF_TO_ARG(n) -#endif - -/* 64 bit file position support, also on win-amd64. Ticket #1660 */ -#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ - defined(__MINGW32__) || defined(__MINGW64__) - #include - -/* mingw based on 3.4.5 has lseek but not ftell/fseek */ -#if defined(__MINGW32__) || defined(__MINGW64__) -extern int __cdecl _fseeki64(FILE *, long long, int); -extern long long __cdecl _ftelli64(FILE *); -#endif - - #define npy_fseek _fseeki64 - #define npy_ftell _ftelli64 - #define npy_lseek _lseeki64 - #define npy_off_t npy_int64 - - #if NPY_SIZEOF_INT == 8 - #define NPY_OFF_T_PYFMT "i" - #elif NPY_SIZEOF_LONG == 8 - #define NPY_OFF_T_PYFMT "l" - #elif NPY_SIZEOF_LONGLONG == 8 - #define NPY_OFF_T_PYFMT "L" - #else - #error Unsupported size for type off_t - #endif -#else -#ifdef HAVE_FSEEKO - #define npy_fseek fseeko -#else - #define npy_fseek fseek -#endif -#ifdef HAVE_FTELLO - #define npy_ftell ftello -#else - #define npy_ftell ftell -#endif - #include - #define npy_lseek lseek - #define npy_off_t off_t - - #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT - #define NPY_OFF_T_PYFMT "h" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT - #define NPY_OFF_T_PYFMT "i" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG - #define NPY_OFF_T_PYFMT "l" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG - #define NPY_OFF_T_PYFMT "L" - #else - #error Unsupported size for type off_t - #endif -#endif - -/* enums for detected endianness */ -enum { - NPY_CPU_UNKNOWN_ENDIAN, - NPY_CPU_LITTLE, - NPY_CPU_BIG -}; - -/* - * This is to typedef npy_intp to the appropriate pointer size for this - * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. - */ -typedef Py_intptr_t npy_intp; -typedef Py_uintptr_t npy_uintp; - -/* - * Define sizes that were not defined in numpyconfig.h. - */ -#define NPY_SIZEOF_CHAR 1 -#define NPY_SIZEOF_BYTE 1 -#define NPY_SIZEOF_DATETIME 8 -#define NPY_SIZEOF_TIMEDELTA 8 -#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_HALF 2 -#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT -#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE -#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE - -#ifdef constchar -#undef constchar -#endif - -#define NPY_SSIZE_T_PYFMT "n" -#define constchar char - -/* NPY_INTP_FMT Note: - * Unlike the other NPY_*_FMT macros which are used with - * PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and - * PyString_Format. These functions use different formatting - * codes which are portably specified according to the Python - * documentation. See ticket #1795. - */ -#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT - #define NPY_INTP NPY_INT - #define NPY_UINTP NPY_UINT - #define PyIntpArrType_Type PyIntArrType_Type - #define PyUIntpArrType_Type PyUIntArrType_Type - #define NPY_MAX_INTP NPY_MAX_INT - #define NPY_MIN_INTP NPY_MIN_INT - #define NPY_MAX_UINTP NPY_MAX_UINT - #define NPY_INTP_FMT "d" -#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG - #define NPY_INTP NPY_LONG - #define NPY_UINTP NPY_ULONG - #define PyIntpArrType_Type PyLongArrType_Type - #define PyUIntpArrType_Type PyULongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONG - #define NPY_MIN_INTP NPY_MIN_LONG - #define NPY_MAX_UINTP NPY_MAX_ULONG - #define NPY_INTP_FMT "ld" -#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) - #define NPY_INTP NPY_LONGLONG - #define NPY_UINTP NPY_ULONGLONG - #define PyIntpArrType_Type PyLongLongArrType_Type - #define PyUIntpArrType_Type PyULongLongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONGLONG - #define NPY_MIN_INTP NPY_MIN_LONGLONG - #define NPY_MAX_UINTP NPY_MAX_ULONGLONG - #define NPY_INTP_FMT "lld" -#endif - -/* - * We can only use C99 formats for npy_int_p if it is the same as - * intp_t, hence the condition on HAVE_UNITPTR_T - */ -#if (NPY_USE_C99_FORMATS) == 1 \ - && (defined HAVE_UINTPTR_T) \ - && (defined HAVE_INTTYPES_H) - #include - #undef NPY_INTP_FMT - #define NPY_INTP_FMT PRIdPTR -#endif - - -/* - * Some platforms don't define bool, long long, or long double. - * Handle that here. - */ -#define NPY_BYTE_FMT "hhd" -#define NPY_UBYTE_FMT "hhu" -#define NPY_SHORT_FMT "hd" -#define NPY_USHORT_FMT "hu" -#define NPY_INT_FMT "d" -#define NPY_UINT_FMT "u" -#define NPY_LONG_FMT "ld" -#define NPY_ULONG_FMT "lu" -#define NPY_HALF_FMT "g" -#define NPY_FLOAT_FMT "g" -#define NPY_DOUBLE_FMT "g" - - -#ifdef PY_LONG_LONG -typedef PY_LONG_LONG npy_longlong; -typedef unsigned PY_LONG_LONG npy_ulonglong; -# ifdef _MSC_VER -# define NPY_LONGLONG_FMT "I64d" -# define NPY_ULONGLONG_FMT "I64u" -# else -# define NPY_LONGLONG_FMT "lld" -# define NPY_ULONGLONG_FMT "llu" -# endif -# ifdef _MSC_VER -# define NPY_LONGLONG_SUFFIX(x) (x##i64) -# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) -# else -# define NPY_LONGLONG_SUFFIX(x) (x##LL) -# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) -# endif -#else -typedef long npy_longlong; -typedef unsigned long npy_ulonglong; -# define NPY_LONGLONG_SUFFIX(x) (x##L) -# define NPY_ULONGLONG_SUFFIX(x) (x##UL) -#endif - - -typedef unsigned char npy_bool; -#define NPY_FALSE 0 -#define NPY_TRUE 1 - - -#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE - typedef double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "g" -#else - typedef long double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "Lg" -#endif - -#ifndef Py_USING_UNICODE -#error Must use Python with unicode enabled. -#endif - - -typedef signed char npy_byte; -typedef unsigned char npy_ubyte; -typedef unsigned short npy_ushort; -typedef unsigned int npy_uint; -typedef unsigned long npy_ulong; - -/* These are for completeness */ -typedef char npy_char; -typedef short npy_short; -typedef int npy_int; -typedef long npy_long; -typedef float npy_float; -typedef double npy_double; - -/* - * Hash value compatibility. - * As of Python 3.2 hash values are of type Py_hash_t. - * Previous versions use C long. - */ -#if PY_VERSION_HEX < 0x03020000 -typedef long npy_hash_t; -#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG -#else -typedef Py_hash_t npy_hash_t; -#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP -#endif - -/* - * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being - * able to do .real/.imag. Will have to convert code first. - */ -#if 0 -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) -typedef complex npy_cdouble; -#else -typedef struct { double real, imag; } npy_cdouble; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) -typedef complex float npy_cfloat; -#else -typedef struct { float real, imag; } npy_cfloat; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) -typedef complex long double npy_clongdouble; -#else -typedef struct {npy_longdouble real, imag;} npy_clongdouble; -#endif -#endif -#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE -#error npy_cdouble definition is not compatible with C99 complex definition ! \ - Please contact NumPy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { double real, imag; } npy_cdouble; - -#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT -#error npy_cfloat definition is not compatible with C99 complex definition ! \ - Please contact NumPy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { float real, imag; } npy_cfloat; - -#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE -#error npy_clongdouble definition is not compatible with C99 complex definition ! \ - Please contact NumPy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { npy_longdouble real, imag; } npy_clongdouble; - -/* - * numarray-style bit-width typedefs - */ -#define NPY_MAX_INT8 127 -#define NPY_MIN_INT8 -128 -#define NPY_MAX_UINT8 255 -#define NPY_MAX_INT16 32767 -#define NPY_MIN_INT16 -32768 -#define NPY_MAX_UINT16 65535 -#define NPY_MAX_INT32 2147483647 -#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) -#define NPY_MAX_UINT32 4294967295U -#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) -#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) -#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) -#define NPY_MIN_DATETIME NPY_MIN_INT64 -#define NPY_MAX_DATETIME NPY_MAX_INT64 -#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 -#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 - - /* Need to find the number of bits for each type and - make definitions accordingly. - - C states that sizeof(char) == 1 by definition - - So, just using the sizeof keyword won't help. - - It also looks like Python itself uses sizeof(char) quite a - bit, which by definition should be 1 all the time. - - Idea: Make Use of CHAR_BIT which should tell us how many - BITS per CHARACTER - */ - - /* Include platform definitions -- These are in the C89/90 standard */ -#include -#define NPY_MAX_BYTE SCHAR_MAX -#define NPY_MIN_BYTE SCHAR_MIN -#define NPY_MAX_UBYTE UCHAR_MAX -#define NPY_MAX_SHORT SHRT_MAX -#define NPY_MIN_SHORT SHRT_MIN -#define NPY_MAX_USHORT USHRT_MAX -#define NPY_MAX_INT INT_MAX -#ifndef INT_MIN -#define INT_MIN (-INT_MAX - 1) -#endif -#define NPY_MIN_INT INT_MIN -#define NPY_MAX_UINT UINT_MAX -#define NPY_MAX_LONG LONG_MAX -#define NPY_MIN_LONG LONG_MIN -#define NPY_MAX_ULONG ULONG_MAX - -#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) -#define NPY_BITSOF_CHAR CHAR_BIT -#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) -#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) -#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) -#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) -#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) -#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) -#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) -#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) -#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) -#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) -#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) -#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) -#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) -#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) -#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) - -#if NPY_BITSOF_LONG == 8 -#define NPY_INT8 NPY_LONG -#define NPY_UINT8 NPY_ULONG - typedef long npy_int8; - typedef unsigned long npy_uint8; -#define PyInt8ScalarObject PyLongScalarObject -#define PyInt8ArrType_Type PyLongArrType_Type -#define PyUInt8ScalarObject PyULongScalarObject -#define PyUInt8ArrType_Type PyULongArrType_Type -#define NPY_INT8_FMT NPY_LONG_FMT -#define NPY_UINT8_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 16 -#define NPY_INT16 NPY_LONG -#define NPY_UINT16 NPY_ULONG - typedef long npy_int16; - typedef unsigned long npy_uint16; -#define PyInt16ScalarObject PyLongScalarObject -#define PyInt16ArrType_Type PyLongArrType_Type -#define PyUInt16ScalarObject PyULongScalarObject -#define PyUInt16ArrType_Type PyULongArrType_Type -#define NPY_INT16_FMT NPY_LONG_FMT -#define NPY_UINT16_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 32 -#define NPY_INT32 NPY_LONG -#define NPY_UINT32 NPY_ULONG - typedef long npy_int32; - typedef unsigned long npy_uint32; - typedef unsigned long npy_ucs4; -#define PyInt32ScalarObject PyLongScalarObject -#define PyInt32ArrType_Type PyLongArrType_Type -#define PyUInt32ScalarObject PyULongScalarObject -#define PyUInt32ArrType_Type PyULongArrType_Type -#define NPY_INT32_FMT NPY_LONG_FMT -#define NPY_UINT32_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 64 -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG - typedef long npy_int64; - typedef unsigned long npy_uint64; -#define PyInt64ScalarObject PyLongScalarObject -#define PyInt64ArrType_Type PyLongArrType_Type -#define PyUInt64ScalarObject PyULongScalarObject -#define PyUInt64ArrType_Type PyULongArrType_Type -#define NPY_INT64_FMT NPY_LONG_FMT -#define NPY_UINT64_FMT NPY_ULONG_FMT -#define MyPyLong_FromInt64 PyLong_FromLong -#define MyPyLong_AsInt64 PyLong_AsLong -#elif NPY_BITSOF_LONG == 128 -#define NPY_INT128 NPY_LONG -#define NPY_UINT128 NPY_ULONG - typedef long npy_int128; - typedef unsigned long npy_uint128; -#define PyInt128ScalarObject PyLongScalarObject -#define PyInt128ArrType_Type PyLongArrType_Type -#define PyUInt128ScalarObject PyULongScalarObject -#define PyUInt128ArrType_Type PyULongArrType_Type -#define NPY_INT128_FMT NPY_LONG_FMT -#define NPY_UINT128_FMT NPY_ULONG_FMT -#endif - -#if NPY_BITSOF_LONGLONG == 8 -# ifndef NPY_INT8 -# define NPY_INT8 NPY_LONGLONG -# define NPY_UINT8 NPY_ULONGLONG - typedef npy_longlong npy_int8; - typedef npy_ulonglong npy_uint8; -# define PyInt8ScalarObject PyLongLongScalarObject -# define PyInt8ArrType_Type PyLongLongArrType_Type -# define PyUInt8ScalarObject PyULongLongScalarObject -# define PyUInt8ArrType_Type PyULongLongArrType_Type -#define NPY_INT8_FMT NPY_LONGLONG_FMT -#define NPY_UINT8_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT8 -# define NPY_MIN_LONGLONG NPY_MIN_INT8 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 -#elif NPY_BITSOF_LONGLONG == 16 -# ifndef NPY_INT16 -# define NPY_INT16 NPY_LONGLONG -# define NPY_UINT16 NPY_ULONGLONG - typedef npy_longlong npy_int16; - typedef npy_ulonglong npy_uint16; -# define PyInt16ScalarObject PyLongLongScalarObject -# define PyInt16ArrType_Type PyLongLongArrType_Type -# define PyUInt16ScalarObject PyULongLongScalarObject -# define PyUInt16ArrType_Type PyULongLongArrType_Type -#define NPY_INT16_FMT NPY_LONGLONG_FMT -#define NPY_UINT16_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT16 -# define NPY_MIN_LONGLONG NPY_MIN_INT16 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 -#elif NPY_BITSOF_LONGLONG == 32 -# ifndef NPY_INT32 -# define NPY_INT32 NPY_LONGLONG -# define NPY_UINT32 NPY_ULONGLONG - typedef npy_longlong npy_int32; - typedef npy_ulonglong npy_uint32; - typedef npy_ulonglong npy_ucs4; -# define PyInt32ScalarObject PyLongLongScalarObject -# define PyInt32ArrType_Type PyLongLongArrType_Type -# define PyUInt32ScalarObject PyULongLongScalarObject -# define PyUInt32ArrType_Type PyULongLongArrType_Type -#define NPY_INT32_FMT NPY_LONGLONG_FMT -#define NPY_UINT32_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT32 -# define NPY_MIN_LONGLONG NPY_MIN_INT32 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 -#elif NPY_BITSOF_LONGLONG == 64 -# ifndef NPY_INT64 -# define NPY_INT64 NPY_LONGLONG -# define NPY_UINT64 NPY_ULONGLONG - typedef npy_longlong npy_int64; - typedef npy_ulonglong npy_uint64; -# define PyInt64ScalarObject PyLongLongScalarObject -# define PyInt64ArrType_Type PyLongLongArrType_Type -# define PyUInt64ScalarObject PyULongLongScalarObject -# define PyUInt64ArrType_Type PyULongLongArrType_Type -#define NPY_INT64_FMT NPY_LONGLONG_FMT -#define NPY_UINT64_FMT NPY_ULONGLONG_FMT -# define MyPyLong_FromInt64 PyLong_FromLongLong -# define MyPyLong_AsInt64 PyLong_AsLongLong -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT64 -# define NPY_MIN_LONGLONG NPY_MIN_INT64 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 -#elif NPY_BITSOF_LONGLONG == 128 -# ifndef NPY_INT128 -# define NPY_INT128 NPY_LONGLONG -# define NPY_UINT128 NPY_ULONGLONG - typedef npy_longlong npy_int128; - typedef npy_ulonglong npy_uint128; -# define PyInt128ScalarObject PyLongLongScalarObject -# define PyInt128ArrType_Type PyLongLongArrType_Type -# define PyUInt128ScalarObject PyULongLongScalarObject -# define PyUInt128ArrType_Type PyULongLongArrType_Type -#define NPY_INT128_FMT NPY_LONGLONG_FMT -#define NPY_UINT128_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT128 -# define NPY_MIN_LONGLONG NPY_MIN_INT128 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 -#elif NPY_BITSOF_LONGLONG == 256 -# define NPY_INT256 NPY_LONGLONG -# define NPY_UINT256 NPY_ULONGLONG - typedef npy_longlong npy_int256; - typedef npy_ulonglong npy_uint256; -# define PyInt256ScalarObject PyLongLongScalarObject -# define PyInt256ArrType_Type PyLongLongArrType_Type -# define PyUInt256ScalarObject PyULongLongScalarObject -# define PyUInt256ArrType_Type PyULongLongArrType_Type -#define NPY_INT256_FMT NPY_LONGLONG_FMT -#define NPY_UINT256_FMT NPY_ULONGLONG_FMT -# define NPY_MAX_LONGLONG NPY_MAX_INT256 -# define NPY_MIN_LONGLONG NPY_MIN_INT256 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 -#endif - -#if NPY_BITSOF_INT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_INT -#define NPY_UINT8 NPY_UINT - typedef int npy_int8; - typedef unsigned int npy_uint8; -# define PyInt8ScalarObject PyIntScalarObject -# define PyInt8ArrType_Type PyIntArrType_Type -# define PyUInt8ScalarObject PyUIntScalarObject -# define PyUInt8ArrType_Type PyUIntArrType_Type -#define NPY_INT8_FMT NPY_INT_FMT -#define NPY_UINT8_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_INT -#define NPY_UINT16 NPY_UINT - typedef int npy_int16; - typedef unsigned int npy_uint16; -# define PyInt16ScalarObject PyIntScalarObject -# define PyInt16ArrType_Type PyIntArrType_Type -# define PyUInt16ScalarObject PyIntUScalarObject -# define PyUInt16ArrType_Type PyIntUArrType_Type -#define NPY_INT16_FMT NPY_INT_FMT -#define NPY_UINT16_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT - typedef int npy_int32; - typedef unsigned int npy_uint32; - typedef unsigned int npy_ucs4; -# define PyInt32ScalarObject PyIntScalarObject -# define PyInt32ArrType_Type PyIntArrType_Type -# define PyUInt32ScalarObject PyUIntScalarObject -# define PyUInt32ArrType_Type PyUIntArrType_Type -#define NPY_INT32_FMT NPY_INT_FMT -#define NPY_UINT32_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_INT -#define NPY_UINT64 NPY_UINT - typedef int npy_int64; - typedef unsigned int npy_uint64; -# define PyInt64ScalarObject PyIntScalarObject -# define PyInt64ArrType_Type PyIntArrType_Type -# define PyUInt64ScalarObject PyUIntScalarObject -# define PyUInt64ArrType_Type PyUIntArrType_Type -#define NPY_INT64_FMT NPY_INT_FMT -#define NPY_UINT64_FMT NPY_UINT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_INT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_INT -#define NPY_UINT128 NPY_UINT - typedef int npy_int128; - typedef unsigned int npy_uint128; -# define PyInt128ScalarObject PyIntScalarObject -# define PyInt128ArrType_Type PyIntArrType_Type -# define PyUInt128ScalarObject PyUIntScalarObject -# define PyUInt128ArrType_Type PyUIntArrType_Type -#define NPY_INT128_FMT NPY_INT_FMT -#define NPY_UINT128_FMT NPY_UINT_FMT -#endif -#endif - -#if NPY_BITSOF_SHORT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_SHORT -#define NPY_UINT8 NPY_USHORT - typedef short npy_int8; - typedef unsigned short npy_uint8; -# define PyInt8ScalarObject PyShortScalarObject -# define PyInt8ArrType_Type PyShortArrType_Type -# define PyUInt8ScalarObject PyUShortScalarObject -# define PyUInt8ArrType_Type PyUShortArrType_Type -#define NPY_INT8_FMT NPY_SHORT_FMT -#define NPY_UINT8_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT - typedef short npy_int16; - typedef unsigned short npy_uint16; -# define PyInt16ScalarObject PyShortScalarObject -# define PyInt16ArrType_Type PyShortArrType_Type -# define PyUInt16ScalarObject PyUShortScalarObject -# define PyUInt16ArrType_Type PyUShortArrType_Type -#define NPY_INT16_FMT NPY_SHORT_FMT -#define NPY_UINT16_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_SHORT -#define NPY_UINT32 NPY_USHORT - typedef short npy_int32; - typedef unsigned short npy_uint32; - typedef unsigned short npy_ucs4; -# define PyInt32ScalarObject PyShortScalarObject -# define PyInt32ArrType_Type PyShortArrType_Type -# define PyUInt32ScalarObject PyUShortScalarObject -# define PyUInt32ArrType_Type PyUShortArrType_Type -#define NPY_INT32_FMT NPY_SHORT_FMT -#define NPY_UINT32_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_SHORT -#define NPY_UINT64 NPY_USHORT - typedef short npy_int64; - typedef unsigned short npy_uint64; -# define PyInt64ScalarObject PyShortScalarObject -# define PyInt64ArrType_Type PyShortArrType_Type -# define PyUInt64ScalarObject PyUShortScalarObject -# define PyUInt64ArrType_Type PyUShortArrType_Type -#define NPY_INT64_FMT NPY_SHORT_FMT -#define NPY_UINT64_FMT NPY_USHORT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_SHORT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_SHORT -#define NPY_UINT128 NPY_USHORT - typedef short npy_int128; - typedef unsigned short npy_uint128; -# define PyInt128ScalarObject PyShortScalarObject -# define PyInt128ArrType_Type PyShortArrType_Type -# define PyUInt128ScalarObject PyUShortScalarObject -# define PyUInt128ArrType_Type PyUShortArrType_Type -#define NPY_INT128_FMT NPY_SHORT_FMT -#define NPY_UINT128_FMT NPY_USHORT_FMT -#endif -#endif - - -#if NPY_BITSOF_CHAR == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE - typedef signed char npy_int8; - typedef unsigned char npy_uint8; -# define PyInt8ScalarObject PyByteScalarObject -# define PyInt8ArrType_Type PyByteArrType_Type -# define PyUInt8ScalarObject PyUByteScalarObject -# define PyUInt8ArrType_Type PyUByteArrType_Type -#define NPY_INT8_FMT NPY_BYTE_FMT -#define NPY_UINT8_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_BYTE -#define NPY_UINT16 NPY_UBYTE - typedef signed char npy_int16; - typedef unsigned char npy_uint16; -# define PyInt16ScalarObject PyByteScalarObject -# define PyInt16ArrType_Type PyByteArrType_Type -# define PyUInt16ScalarObject PyUByteScalarObject -# define PyUInt16ArrType_Type PyUByteArrType_Type -#define NPY_INT16_FMT NPY_BYTE_FMT -#define NPY_UINT16_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_BYTE -#define NPY_UINT32 NPY_UBYTE - typedef signed char npy_int32; - typedef unsigned char npy_uint32; - typedef unsigned char npy_ucs4; -# define PyInt32ScalarObject PyByteScalarObject -# define PyInt32ArrType_Type PyByteArrType_Type -# define PyUInt32ScalarObject PyUByteScalarObject -# define PyUInt32ArrType_Type PyUByteArrType_Type -#define NPY_INT32_FMT NPY_BYTE_FMT -#define NPY_UINT32_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_BYTE -#define NPY_UINT64 NPY_UBYTE - typedef signed char npy_int64; - typedef unsigned char npy_uint64; -# define PyInt64ScalarObject PyByteScalarObject -# define PyInt64ArrType_Type PyByteArrType_Type -# define PyUInt64ScalarObject PyUByteScalarObject -# define PyUInt64ArrType_Type PyUByteArrType_Type -#define NPY_INT64_FMT NPY_BYTE_FMT -#define NPY_UINT64_FMT NPY_UBYTE_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_CHAR == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_BYTE -#define NPY_UINT128 NPY_UBYTE - typedef signed char npy_int128; - typedef unsigned char npy_uint128; -# define PyInt128ScalarObject PyByteScalarObject -# define PyInt128ArrType_Type PyByteArrType_Type -# define PyUInt128ScalarObject PyUByteScalarObject -# define PyUInt128ArrType_Type PyUByteArrType_Type -#define NPY_INT128_FMT NPY_BYTE_FMT -#define NPY_UINT128_FMT NPY_UBYTE_FMT -#endif -#endif - - - -#if NPY_BITSOF_DOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_DOUBLE -#define NPY_COMPLEX64 NPY_CDOUBLE - typedef double npy_float32; - typedef npy_cdouble npy_complex64; -# define PyFloat32ScalarObject PyDoubleScalarObject -# define PyComplex64ScalarObject PyCDoubleScalarObject -# define PyFloat32ArrType_Type PyDoubleArrType_Type -# define PyComplex64ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX128 NPY_CDOUBLE - typedef double npy_float64; - typedef npy_cdouble npy_complex128; -# define PyFloat64ScalarObject PyDoubleScalarObject -# define PyComplex128ScalarObject PyCDoubleScalarObject -# define PyFloat64ArrType_Type PyDoubleArrType_Type -# define PyComplex128ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_DOUBLE -#define NPY_COMPLEX160 NPY_CDOUBLE - typedef double npy_float80; - typedef npy_cdouble npy_complex160; -# define PyFloat80ScalarObject PyDoubleScalarObject -# define PyComplex160ScalarObject PyCDoubleScalarObject -# define PyFloat80ArrType_Type PyDoubleArrType_Type -# define PyComplex160ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_DOUBLE -#define NPY_COMPLEX192 NPY_CDOUBLE - typedef double npy_float96; - typedef npy_cdouble npy_complex192; -# define PyFloat96ScalarObject PyDoubleScalarObject -# define PyComplex192ScalarObject PyCDoubleScalarObject -# define PyFloat96ArrType_Type PyDoubleArrType_Type -# define PyComplex192ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_DOUBLE -#define NPY_COMPLEX256 NPY_CDOUBLE - typedef double npy_float128; - typedef npy_cdouble npy_complex256; -# define PyFloat128ScalarObject PyDoubleScalarObject -# define PyComplex256ScalarObject PyCDoubleScalarObject -# define PyFloat128ArrType_Type PyDoubleArrType_Type -# define PyComplex256ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT -#endif -#endif - - - -#if NPY_BITSOF_FLOAT == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_COMPLEX64 NPY_CFLOAT - typedef float npy_float32; - typedef npy_cfloat npy_complex64; -# define PyFloat32ScalarObject PyFloatScalarObject -# define PyComplex64ScalarObject PyCFloatScalarObject -# define PyFloat32ArrType_Type PyFloatArrType_Type -# define PyComplex64ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT32_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_FLOAT -#define NPY_COMPLEX128 NPY_CFLOAT - typedef float npy_float64; - typedef npy_cfloat npy_complex128; -# define PyFloat64ScalarObject PyFloatScalarObject -# define PyComplex128ScalarObject PyCFloatScalarObject -# define PyFloat64ArrType_Type PyFloatArrType_Type -# define PyComplex128ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT64_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_FLOAT -#define NPY_COMPLEX160 NPY_CFLOAT - typedef float npy_float80; - typedef npy_cfloat npy_complex160; -# define PyFloat80ScalarObject PyFloatScalarObject -# define PyComplex160ScalarObject PyCFloatScalarObject -# define PyFloat80ArrType_Type PyFloatArrType_Type -# define PyComplex160ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT80_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_FLOAT -#define NPY_COMPLEX192 NPY_CFLOAT - typedef float npy_float96; - typedef npy_cfloat npy_complex192; -# define PyFloat96ScalarObject PyFloatScalarObject -# define PyComplex192ScalarObject PyCFloatScalarObject -# define PyFloat96ArrType_Type PyFloatArrType_Type -# define PyComplex192ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT96_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_FLOAT -#define NPY_COMPLEX256 NPY_CFLOAT - typedef float npy_float128; - typedef npy_cfloat npy_complex256; -# define PyFloat128ScalarObject PyFloatScalarObject -# define PyComplex256ScalarObject PyCFloatScalarObject -# define PyFloat128ArrType_Type PyFloatArrType_Type -# define PyComplex256ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT128_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT -#endif -#endif - -/* half/float16 isn't a floating-point type in C */ -#define NPY_FLOAT16 NPY_HALF -typedef npy_uint16 npy_half; -typedef npy_half npy_float16; - -#if NPY_BITSOF_LONGDOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_LONGDOUBLE -#define NPY_COMPLEX64 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float32; - typedef npy_clongdouble npy_complex64; -# define PyFloat32ScalarObject PyLongDoubleScalarObject -# define PyComplex64ScalarObject PyCLongDoubleScalarObject -# define PyFloat32ArrType_Type PyLongDoubleArrType_Type -# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_LONGDOUBLE -#define NPY_COMPLEX128 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float64; - typedef npy_clongdouble npy_complex128; -# define PyFloat64ScalarObject PyLongDoubleScalarObject -# define PyComplex128ScalarObject PyCLongDoubleScalarObject -# define PyFloat64ArrType_Type PyLongDoubleArrType_Type -# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_LONGDOUBLE -#define NPY_COMPLEX160 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float80; - typedef npy_clongdouble npy_complex160; -# define PyFloat80ScalarObject PyLongDoubleScalarObject -# define PyComplex160ScalarObject PyCLongDoubleScalarObject -# define PyFloat80ArrType_Type PyLongDoubleArrType_Type -# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_LONGDOUBLE -#define NPY_COMPLEX192 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float96; - typedef npy_clongdouble npy_complex192; -# define PyFloat96ScalarObject PyLongDoubleScalarObject -# define PyComplex192ScalarObject PyCLongDoubleScalarObject -# define PyFloat96ArrType_Type PyLongDoubleArrType_Type -# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_LONGDOUBLE -#define NPY_COMPLEX256 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float128; - typedef npy_clongdouble npy_complex256; -# define PyFloat128ScalarObject PyLongDoubleScalarObject -# define PyComplex256ScalarObject PyCLongDoubleScalarObject -# define PyFloat128ArrType_Type PyLongDoubleArrType_Type -# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 256 -#define NPY_FLOAT256 NPY_LONGDOUBLE -#define NPY_COMPLEX512 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float256; - typedef npy_clongdouble npy_complex512; -# define PyFloat256ScalarObject PyLongDoubleScalarObject -# define PyComplex512ScalarObject PyCLongDoubleScalarObject -# define PyFloat256ArrType_Type PyLongDoubleArrType_Type -# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT -#endif - -/* datetime typedefs */ -typedef npy_int64 npy_timedelta; -typedef npy_int64 npy_datetime; -#define NPY_DATETIME_FMT NPY_INT64_FMT -#define NPY_TIMEDELTA_FMT NPY_INT64_FMT - -/* End of typedefs for numarray style bit-width names */ - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_cpu.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_cpu.h deleted file mode 100644 index 5edd8f4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_cpu.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * This set (target) cpu specific macros: - * - Possible values: - * NPY_CPU_X86 - * NPY_CPU_AMD64 - * NPY_CPU_PPC - * NPY_CPU_PPC64 - * NPY_CPU_PPC64LE - * NPY_CPU_SPARC - * NPY_CPU_S390 - * NPY_CPU_IA64 - * NPY_CPU_HPPA - * NPY_CPU_ALPHA - * NPY_CPU_ARMEL - * NPY_CPU_ARMEB - * NPY_CPU_SH_LE - * NPY_CPU_SH_BE - * NPY_CPU_ARCEL - * NPY_CPU_ARCEB - * NPY_CPU_RISCV64 - */ -#ifndef _NPY_CPUARCH_H_ -#define _NPY_CPUARCH_H_ - -#include "numpyconfig.h" -#include /* for memcpy */ - -#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) - /* - * __i386__ is defined by gcc and Intel compiler on Linux, - * _M_IX86 by VS compiler, - * i386 by Sun compilers on opensolaris at least - */ - #define NPY_CPU_X86 -#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) - /* - * both __x86_64__ and __amd64__ are defined by gcc - * __x86_64 defined by sun compiler on opensolaris at least - * _M_AMD64 defined by MS compiler - */ - #define NPY_CPU_AMD64 -#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_PPC64LE -#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_PPC64 -#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) - /* - * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, - * but can't find it ATM - * _ARCH_PPC is used by at least gcc on AIX - * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check - * for those specifically first before defaulting to ppc - */ - #define NPY_CPU_PPC -#elif defined(__sparc__) || defined(__sparc) - /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ - #define NPY_CPU_SPARC -#elif defined(__s390__) - #define NPY_CPU_S390 -#elif defined(__ia64) - #define NPY_CPU_IA64 -#elif defined(__hppa) - #define NPY_CPU_HPPA -#elif defined(__alpha__) - #define NPY_CPU_ALPHA -#elif defined(__arm__) || defined(__aarch64__) - #if defined(__ARMEB__) || defined(__AARCH64EB__) - #if defined(__ARM_32BIT_STATE) - #define NPY_CPU_ARMEB_AARCH32 - #elif defined(__ARM_64BIT_STATE) - #define NPY_CPU_ARMEB_AARCH64 - #else - #define NPY_CPU_ARMEB - #endif - #elif defined(__ARMEL__) || defined(__AARCH64EL__) - #if defined(__ARM_32BIT_STATE) - #define NPY_CPU_ARMEL_AARCH32 - #elif defined(__ARM_64BIT_STATE) - #define NPY_CPU_ARMEL_AARCH64 - #else - #define NPY_CPU_ARMEL - #endif - #else - # error Unknown ARM CPU, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) - #endif -#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_SH_LE -#elif defined(__sh__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_SH_BE -#elif defined(__MIPSEL__) - #define NPY_CPU_MIPSEL -#elif defined(__MIPSEB__) - #define NPY_CPU_MIPSEB -#elif defined(__or1k__) - #define NPY_CPU_OR1K -#elif defined(__mc68000__) - #define NPY_CPU_M68K -#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_ARCEL -#elif defined(__arc__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_ARCEB -#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 - #define NPY_CPU_RISCV64 -#else - #error Unknown CPU, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) -#endif - -#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *)) - -#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1 -#else -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0 -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_endian.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_endian.h deleted file mode 100644 index 44cdffd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_endian.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef _NPY_ENDIAN_H_ -#define _NPY_ENDIAN_H_ - -/* - * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in - * endian.h - */ - -#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) - /* Use endian.h if available */ - - #if defined(NPY_HAVE_ENDIAN_H) - #include - #elif defined(NPY_HAVE_SYS_ENDIAN_H) - #include - #endif - - #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) - #define NPY_BYTE_ORDER BYTE_ORDER - #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN - #define NPY_BIG_ENDIAN BIG_ENDIAN - #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) - #define NPY_BYTE_ORDER _BYTE_ORDER - #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN - #define NPY_BIG_ENDIAN _BIG_ENDIAN - #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) - #define NPY_BYTE_ORDER __BYTE_ORDER - #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN - #define NPY_BIG_ENDIAN __BIG_ENDIAN - #endif -#endif - -#ifndef NPY_BYTE_ORDER - /* Set endianness info using target CPU */ - #include "npy_cpu.h" - - #define NPY_LITTLE_ENDIAN 1234 - #define NPY_BIG_ENDIAN 4321 - - #if defined(NPY_CPU_X86) \ - || defined(NPY_CPU_AMD64) \ - || defined(NPY_CPU_IA64) \ - || defined(NPY_CPU_ALPHA) \ - || defined(NPY_CPU_ARMEL) \ - || defined(NPY_CPU_ARMEL_AARCH32) \ - || defined(NPY_CPU_ARMEL_AARCH64) \ - || defined(NPY_CPU_SH_LE) \ - || defined(NPY_CPU_MIPSEL) \ - || defined(NPY_CPU_PPC64LE) \ - || defined(NPY_CPU_ARCEL) \ - || defined(NPY_CPU_RISCV64) - #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN - #elif defined(NPY_CPU_PPC) \ - || defined(NPY_CPU_SPARC) \ - || defined(NPY_CPU_S390) \ - || defined(NPY_CPU_HPPA) \ - || defined(NPY_CPU_PPC64) \ - || defined(NPY_CPU_ARMEB) \ - || defined(NPY_CPU_ARMEB_AARCH32) \ - || defined(NPY_CPU_ARMEB_AARCH64) \ - || defined(NPY_CPU_SH_BE) \ - || defined(NPY_CPU_MIPSEB) \ - || defined(NPY_CPU_OR1K) \ - || defined(NPY_CPU_M68K) \ - || defined(NPY_CPU_ARCEB) - #define NPY_BYTE_ORDER NPY_BIG_ENDIAN - #else - #error Unknown CPU: can not set endianness - #endif -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_interrupt.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_interrupt.h deleted file mode 100644 index 40cb7ac..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_interrupt.h +++ /dev/null @@ -1,117 +0,0 @@ - -/* Signal handling: - -This header file defines macros that allow your code to handle -interrupts received during processing. Interrupts that -could reasonably be handled: - -SIGINT, SIGABRT, SIGALRM, SIGSEGV - -****Warning*************** - -Do not allow code that creates temporary memory or increases reference -counts of Python objects to be interrupted unless you handle it -differently. - -************************** - -The mechanism for handling interrupts is conceptually simple: - - - replace the signal handler with our own home-grown version - and store the old one. - - run the code to be interrupted -- if an interrupt occurs - the handler should basically just cause a return to the - calling function for finish work. - - restore the old signal handler - -Of course, every code that allows interrupts must account for -returning via the interrupt and handle clean-up correctly. But, -even still, the simple paradigm is complicated by at least three -factors. - - 1) platform portability (i.e. Microsoft says not to use longjmp - to return from signal handling. They have a __try and __except - extension to C instead but what about mingw?). - - 2) how to handle threads: apparently whether signals are delivered to - every thread of the process or the "invoking" thread is platform - dependent. --- we don't handle threads for now. - - 3) do we need to worry about re-entrance. For now, assume the - code will not call-back into itself. - -Ideas: - - 1) Start by implementing an approach that works on platforms that - can use setjmp and longjmp functionality and does nothing - on other platforms. - - 2) Ignore threads --- i.e. do not mix interrupt handling and threads - - 3) Add a default signal_handler function to the C-API but have the rest - use macros. - - -Simple Interface: - - -In your C-extension: around a block of code you want to be interruptible -with a SIGINT - -NPY_SIGINT_ON -[code] -NPY_SIGINT_OFF - -In order for this to work correctly, the -[code] block must not allocate any memory or alter the reference count of any -Python objects. In other words [code] must be interruptible so that continuation -after NPY_SIGINT_OFF will only be "missing some computations" - -Interrupt handling does not work well with threads. - -*/ - -/* Add signal handling macros - Make the global variable and signal handler part of the C-API -*/ - -#ifndef NPY_INTERRUPT_H -#define NPY_INTERRUPT_H - -#ifndef NPY_NO_SIGNAL - -#include -#include - -#ifndef sigsetjmp - -#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) -#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) -#define NPY_SIGJMP_BUF jmp_buf - -#else - -#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) -#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) -#define NPY_SIGJMP_BUF sigjmp_buf - -#endif - -# define NPY_SIGINT_ON { \ - PyOS_sighandler_t _npy_sig_save; \ - _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ - if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ - 1) == 0) { \ - -# define NPY_SIGINT_OFF } \ - PyOS_setsig(SIGINT, _npy_sig_save); \ - } - -#else /* NPY_NO_SIGNAL */ - -#define NPY_SIGINT_ON -#define NPY_SIGINT_OFF - -#endif /* HAVE_SIGSETJMP */ - -#endif /* NPY_INTERRUPT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_math.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_math.h deleted file mode 100644 index 69e690f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_math.h +++ /dev/null @@ -1,646 +0,0 @@ -#ifndef __NPY_MATH_C99_H_ -#define __NPY_MATH_C99_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#ifdef __SUNPRO_CC -#include -#endif -#ifdef HAVE_NPY_CONFIG_H -#include -#endif -#include - -/* By adding static inline specifiers to npy_math function definitions when - appropriate, compiler is given the opportunity to optimize */ -#if NPY_INLINE_MATH -#define NPY_INPLACE NPY_INLINE static -#else -#define NPY_INPLACE -#endif - - -/* - * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 - * for INFINITY) - * - * XXX: I should test whether INFINITY and NAN are available on the platform - */ -NPY_INLINE static float __npy_inff(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nanf(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_pzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; - return __bint.__f; -} - -#define NPY_INFINITYF __npy_inff() -#define NPY_NANF __npy_nanf() -#define NPY_PZEROF __npy_pzerof() -#define NPY_NZEROF __npy_nzerof() - -#define NPY_INFINITY ((npy_double)NPY_INFINITYF) -#define NPY_NAN ((npy_double)NPY_NANF) -#define NPY_PZERO ((npy_double)NPY_PZEROF) -#define NPY_NZERO ((npy_double)NPY_NZEROF) - -#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) -#define NPY_NANL ((npy_longdouble)NPY_NANF) -#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) -#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) - -/* - * Useful constants - */ -#define NPY_E 2.718281828459045235360287471352662498 /* e */ -#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ -#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ -#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ -#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ -#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ -#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ -#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ -#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ -#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ -#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ -#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ -#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ - -#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ -#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ -#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ -#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ -#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ -#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ -#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ -#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ -#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ -#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ -#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ -#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ -#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ - -#define NPY_El 2.718281828459045235360287471352662498L /* e */ -#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ -#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ -#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ -#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ -#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ -#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ -#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ -#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ -#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ -#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ -#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ -#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ - -/* - * Constants used in vector implementation of exp(x) - */ -#define NPY_RINT_CVT_MAGICf 0x1.800000p+23f -#define NPY_CODY_WAITE_LOGE_2_HIGHf -6.93145752e-1f -#define NPY_CODY_WAITE_LOGE_2_LOWf -1.42860677e-6f -#define NPY_COEFF_P0_EXPf 9.999999999980870924916e-01f -#define NPY_COEFF_P1_EXPf 7.257664613233124478488e-01f -#define NPY_COEFF_P2_EXPf 2.473615434895520810817e-01f -#define NPY_COEFF_P3_EXPf 5.114512081637298353406e-02f -#define NPY_COEFF_P4_EXPf 6.757896990527504603057e-03f -#define NPY_COEFF_P5_EXPf 5.082762527590693718096e-04f -#define NPY_COEFF_Q0_EXPf 1.000000000000000000000e+00f -#define NPY_COEFF_Q1_EXPf -2.742335390411667452936e-01f -#define NPY_COEFF_Q2_EXPf 2.159509375685829852307e-02f - -/* - * Constants used in vector implementation of log(x) - */ -#define NPY_COEFF_P0_LOGf 0.000000000000000000000e+00f -#define NPY_COEFF_P1_LOGf 9.999999999999998702752e-01f -#define NPY_COEFF_P2_LOGf 2.112677543073053063722e+00f -#define NPY_COEFF_P3_LOGf 1.480000633576506585156e+00f -#define NPY_COEFF_P4_LOGf 3.808837741388407920751e-01f -#define NPY_COEFF_P5_LOGf 2.589979117907922693523e-02f -#define NPY_COEFF_Q0_LOGf 1.000000000000000000000e+00f -#define NPY_COEFF_Q1_LOGf 2.612677543073109236779e+00f -#define NPY_COEFF_Q2_LOGf 2.453006071784736363091e+00f -#define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f -#define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f -#define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f -/* - * Constants used in vector implementation of sinf/cosf(x) - */ -#define NPY_TWO_O_PIf 0x1.45f306p-1f -#define NPY_CODY_WAITE_PI_O_2_HIGHf -0x1.921fb0p+00f -#define NPY_CODY_WAITE_PI_O_2_MEDf -0x1.5110b4p-22f -#define NPY_CODY_WAITE_PI_O_2_LOWf -0x1.846988p-48f -#define NPY_COEFF_INVF0_COSINEf 0x1.000000p+00f -#define NPY_COEFF_INVF2_COSINEf -0x1.000000p-01f -#define NPY_COEFF_INVF4_COSINEf 0x1.55553cp-05f -#define NPY_COEFF_INVF6_COSINEf -0x1.6c06dcp-10f -#define NPY_COEFF_INVF8_COSINEf 0x1.98e616p-16f -#define NPY_COEFF_INVF3_SINEf -0x1.555556p-03f -#define NPY_COEFF_INVF5_SINEf 0x1.11119ap-07f -#define NPY_COEFF_INVF7_SINEf -0x1.a06bbap-13f -#define NPY_COEFF_INVF9_SINEf 0x1.7d3bbcp-19f -/* - * Integer functions. - */ -NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); -NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); -NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); -NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); - -NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); -NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); -NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); -NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); -NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); -NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); - -NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); -NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); -NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); -NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); -NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); -NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); -NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); -NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); - -NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); -NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); -NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); -NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); -NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); -NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); -NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); -NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); -NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); -NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); - -/* - * avx function has a common API for both sin & cos. This enum is used to - * distinguish between the two - */ -typedef enum { - npy_compute_sin, - npy_compute_cos -} NPY_TRIG_OP; - -/* - * C99 double math funcs - */ -NPY_INPLACE double npy_sin(double x); -NPY_INPLACE double npy_cos(double x); -NPY_INPLACE double npy_tan(double x); -NPY_INPLACE double npy_sinh(double x); -NPY_INPLACE double npy_cosh(double x); -NPY_INPLACE double npy_tanh(double x); - -NPY_INPLACE double npy_asin(double x); -NPY_INPLACE double npy_acos(double x); -NPY_INPLACE double npy_atan(double x); - -NPY_INPLACE double npy_log(double x); -NPY_INPLACE double npy_log10(double x); -NPY_INPLACE double npy_exp(double x); -NPY_INPLACE double npy_sqrt(double x); -NPY_INPLACE double npy_cbrt(double x); - -NPY_INPLACE double npy_fabs(double x); -NPY_INPLACE double npy_ceil(double x); -NPY_INPLACE double npy_fmod(double x, double y); -NPY_INPLACE double npy_floor(double x); - -NPY_INPLACE double npy_expm1(double x); -NPY_INPLACE double npy_log1p(double x); -NPY_INPLACE double npy_hypot(double x, double y); -NPY_INPLACE double npy_acosh(double x); -NPY_INPLACE double npy_asinh(double xx); -NPY_INPLACE double npy_atanh(double x); -NPY_INPLACE double npy_rint(double x); -NPY_INPLACE double npy_trunc(double x); -NPY_INPLACE double npy_exp2(double x); -NPY_INPLACE double npy_log2(double x); - -NPY_INPLACE double npy_atan2(double x, double y); -NPY_INPLACE double npy_pow(double x, double y); -NPY_INPLACE double npy_modf(double x, double* y); -NPY_INPLACE double npy_frexp(double x, int* y); -NPY_INPLACE double npy_ldexp(double n, int y); - -NPY_INPLACE double npy_copysign(double x, double y); -double npy_nextafter(double x, double y); -double npy_spacing(double x); - -/* - * IEEE 754 fpu handling. Those are guaranteed to be macros - */ - -/* use builtins to avoid function calls in tight loops - * only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISNAN - #define npy_isnan(x) __builtin_isnan(x) -#else - #ifndef NPY_HAVE_DECL_ISNAN - #define npy_isnan(x) ((x) != (x)) - #else - #if defined(_MSC_VER) && (_MSC_VER < 1900) - #define npy_isnan(x) _isnan((x)) - #else - #define npy_isnan(x) isnan(x) - #endif - #endif -#endif - - -/* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISFINITE - #define npy_isfinite(x) __builtin_isfinite(x) -#else - #ifndef NPY_HAVE_DECL_ISFINITE - #ifdef _MSC_VER - #define npy_isfinite(x) _finite((x)) - #else - #define npy_isfinite(x) !npy_isnan((x) + (-x)) - #endif - #else - #define npy_isfinite(x) isfinite((x)) - #endif -#endif - -/* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISINF - #define npy_isinf(x) __builtin_isinf(x) -#else - #ifndef NPY_HAVE_DECL_ISINF - #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) - #else - #if defined(_MSC_VER) && (_MSC_VER < 1900) - #define npy_isinf(x) (!_finite((x)) && !_isnan((x))) - #else - #define npy_isinf(x) isinf((x)) - #endif - #endif -#endif - -#ifndef NPY_HAVE_DECL_SIGNBIT - int _npy_signbit_f(float x); - int _npy_signbit_d(double x); - int _npy_signbit_ld(long double x); - #define npy_signbit(x) \ - (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ - : _npy_signbit_f (x)) -#else - #define npy_signbit(x) signbit((x)) -#endif - -/* - * float C99 math functions - */ -NPY_INPLACE float npy_sinf(float x); -NPY_INPLACE float npy_cosf(float x); -NPY_INPLACE float npy_tanf(float x); -NPY_INPLACE float npy_sinhf(float x); -NPY_INPLACE float npy_coshf(float x); -NPY_INPLACE float npy_tanhf(float x); -NPY_INPLACE float npy_fabsf(float x); -NPY_INPLACE float npy_floorf(float x); -NPY_INPLACE float npy_ceilf(float x); -NPY_INPLACE float npy_rintf(float x); -NPY_INPLACE float npy_truncf(float x); -NPY_INPLACE float npy_sqrtf(float x); -NPY_INPLACE float npy_cbrtf(float x); -NPY_INPLACE float npy_log10f(float x); -NPY_INPLACE float npy_logf(float x); -NPY_INPLACE float npy_expf(float x); -NPY_INPLACE float npy_expm1f(float x); -NPY_INPLACE float npy_asinf(float x); -NPY_INPLACE float npy_acosf(float x); -NPY_INPLACE float npy_atanf(float x); -NPY_INPLACE float npy_asinhf(float x); -NPY_INPLACE float npy_acoshf(float x); -NPY_INPLACE float npy_atanhf(float x); -NPY_INPLACE float npy_log1pf(float x); -NPY_INPLACE float npy_exp2f(float x); -NPY_INPLACE float npy_log2f(float x); - -NPY_INPLACE float npy_atan2f(float x, float y); -NPY_INPLACE float npy_hypotf(float x, float y); -NPY_INPLACE float npy_powf(float x, float y); -NPY_INPLACE float npy_fmodf(float x, float y); - -NPY_INPLACE float npy_modff(float x, float* y); -NPY_INPLACE float npy_frexpf(float x, int* y); -NPY_INPLACE float npy_ldexpf(float x, int y); - -NPY_INPLACE float npy_copysignf(float x, float y); -float npy_nextafterf(float x, float y); -float npy_spacingf(float x); - -/* - * long double C99 math functions - */ -NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_sinhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_coshl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_tanhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_fabsl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_floorl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_ceill(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_rintl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_truncl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_cbrtl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_log10l(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_logl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_expm1l(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_asinl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_acosl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_atanl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_asinhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_acoshl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_atanhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_log1pl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_exp2l(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); - -NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); - -NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); -NPY_INPLACE npy_longdouble npy_frexpl(npy_longdouble x, int* y); -NPY_INPLACE npy_longdouble npy_ldexpl(npy_longdouble x, int y); - -NPY_INPLACE npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_spacingl(npy_longdouble x); - -/* - * Non standard functions - */ -NPY_INPLACE double npy_deg2rad(double x); -NPY_INPLACE double npy_rad2deg(double x); -NPY_INPLACE double npy_logaddexp(double x, double y); -NPY_INPLACE double npy_logaddexp2(double x, double y); -NPY_INPLACE double npy_divmod(double x, double y, double *modulus); -NPY_INPLACE double npy_heaviside(double x, double h0); - -NPY_INPLACE float npy_deg2radf(float x); -NPY_INPLACE float npy_rad2degf(float x); -NPY_INPLACE float npy_logaddexpf(float x, float y); -NPY_INPLACE float npy_logaddexp2f(float x, float y); -NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); -NPY_INPLACE float npy_heavisidef(float x, float h0); - -NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, - npy_longdouble *modulus); -NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); - -#define npy_degrees npy_rad2deg -#define npy_degreesf npy_rad2degf -#define npy_degreesl npy_rad2degl - -#define npy_radians npy_deg2rad -#define npy_radiansf npy_deg2radf -#define npy_radiansl npy_deg2radl - -/* - * Complex declarations - */ - -/* - * C99 specifies that complex numbers have the same representation as - * an array of two elements, where the first element is the real part - * and the second element is the imaginary part. - */ -#define __NPY_CPACK_IMP(x, y, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } z1;; \ - \ - z1.a[0] = (x); \ - z1.a[1] = (y); \ - \ - return z1.z; - -static NPY_INLINE npy_cdouble npy_cpack(double x, double y) -{ - __NPY_CPACK_IMP(x, y, double, npy_cdouble); -} - -static NPY_INLINE npy_cfloat npy_cpackf(float x, float y) -{ - __NPY_CPACK_IMP(x, y, float, npy_cfloat); -} - -static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) -{ - __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CPACK_IMP - -/* - * Same remark as above, but in the other direction: extract first/second - * member of complex number, assuming a C99-compatible representation - * - * Those are defineds as static inline, and such as a reasonable compiler would - * most likely compile this to one or two instructions (on CISC at least) - */ -#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } __z_repr; \ - __z_repr.z = z; \ - \ - return __z_repr.a[index]; - -static NPY_INLINE double npy_creal(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); -} - -static NPY_INLINE double npy_cimag(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); -} - -static NPY_INLINE float npy_crealf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); -} - -static NPY_INLINE float npy_cimagf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); -} - -static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); -} - -static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CEXTRACT_IMP - -/* - * Double precision complex functions - */ -double npy_cabs(npy_cdouble z); -double npy_carg(npy_cdouble z); - -npy_cdouble npy_cexp(npy_cdouble z); -npy_cdouble npy_clog(npy_cdouble z); -npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); - -npy_cdouble npy_csqrt(npy_cdouble z); - -npy_cdouble npy_ccos(npy_cdouble z); -npy_cdouble npy_csin(npy_cdouble z); -npy_cdouble npy_ctan(npy_cdouble z); - -npy_cdouble npy_ccosh(npy_cdouble z); -npy_cdouble npy_csinh(npy_cdouble z); -npy_cdouble npy_ctanh(npy_cdouble z); - -npy_cdouble npy_cacos(npy_cdouble z); -npy_cdouble npy_casin(npy_cdouble z); -npy_cdouble npy_catan(npy_cdouble z); - -npy_cdouble npy_cacosh(npy_cdouble z); -npy_cdouble npy_casinh(npy_cdouble z); -npy_cdouble npy_catanh(npy_cdouble z); - -/* - * Single precision complex functions - */ -float npy_cabsf(npy_cfloat z); -float npy_cargf(npy_cfloat z); - -npy_cfloat npy_cexpf(npy_cfloat z); -npy_cfloat npy_clogf(npy_cfloat z); -npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); - -npy_cfloat npy_csqrtf(npy_cfloat z); - -npy_cfloat npy_ccosf(npy_cfloat z); -npy_cfloat npy_csinf(npy_cfloat z); -npy_cfloat npy_ctanf(npy_cfloat z); - -npy_cfloat npy_ccoshf(npy_cfloat z); -npy_cfloat npy_csinhf(npy_cfloat z); -npy_cfloat npy_ctanhf(npy_cfloat z); - -npy_cfloat npy_cacosf(npy_cfloat z); -npy_cfloat npy_casinf(npy_cfloat z); -npy_cfloat npy_catanf(npy_cfloat z); - -npy_cfloat npy_cacoshf(npy_cfloat z); -npy_cfloat npy_casinhf(npy_cfloat z); -npy_cfloat npy_catanhf(npy_cfloat z); - - -/* - * Extended precision complex functions - */ -npy_longdouble npy_cabsl(npy_clongdouble z); -npy_longdouble npy_cargl(npy_clongdouble z); - -npy_clongdouble npy_cexpl(npy_clongdouble z); -npy_clongdouble npy_clogl(npy_clongdouble z); -npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); - -npy_clongdouble npy_csqrtl(npy_clongdouble z); - -npy_clongdouble npy_ccosl(npy_clongdouble z); -npy_clongdouble npy_csinl(npy_clongdouble z); -npy_clongdouble npy_ctanl(npy_clongdouble z); - -npy_clongdouble npy_ccoshl(npy_clongdouble z); -npy_clongdouble npy_csinhl(npy_clongdouble z); -npy_clongdouble npy_ctanhl(npy_clongdouble z); - -npy_clongdouble npy_cacosl(npy_clongdouble z); -npy_clongdouble npy_casinl(npy_clongdouble z); -npy_clongdouble npy_catanl(npy_clongdouble z); - -npy_clongdouble npy_cacoshl(npy_clongdouble z); -npy_clongdouble npy_casinhl(npy_clongdouble z); -npy_clongdouble npy_catanhl(npy_clongdouble z); - - -/* - * Functions that set the floating point error - * status word. - */ - -/* - * platform-dependent code translates floating point - * status to an integer sum of these values - */ -#define NPY_FPE_DIVIDEBYZERO 1 -#define NPY_FPE_OVERFLOW 2 -#define NPY_FPE_UNDERFLOW 4 -#define NPY_FPE_INVALID 8 - -int npy_clear_floatstatus_barrier(char*); -int npy_get_floatstatus_barrier(char*); -/* - * use caution with these - clang and gcc8.1 are known to reorder calls - * to this form of the function which can defeat the check. The _barrier - * form of the call is preferable, where the argument is - * (char*)&local_variable - */ -int npy_clear_floatstatus(void); -int npy_get_floatstatus(void); - -void npy_set_floatstatus_divbyzero(void); -void npy_set_floatstatus_overflow(void); -void npy_set_floatstatus_underflow(void); -void npy_set_floatstatus_invalid(void); - -#ifdef __cplusplus -} -#endif - -#if NPY_INLINE_MATH -#include "npy_math_internal.h" -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h deleted file mode 100644 index 6183dc2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This include file is provided for inclusion in Cython *.pyd files where - * one would like to define the NPY_NO_DEPRECATED_API macro. It can be - * included by - * - * cdef extern from "npy_no_deprecated_api.h": pass - * - */ -#ifndef NPY_NO_DEPRECATED_API - -/* put this check here since there may be multiple includes in C extensions. */ -#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ - defined(OLD_DEFINES_H) -#error "npy_no_deprecated_api.h" must be first among numpy includes. -#else -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_os.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_os.h deleted file mode 100644 index 9228c39..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_os.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _NPY_OS_H_ -#define _NPY_OS_H_ - -#if defined(linux) || defined(__linux) || defined(__linux__) - #define NPY_OS_LINUX -#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__OpenBSD__) || defined(__DragonFly__) - #define NPY_OS_BSD - #ifdef __FreeBSD__ - #define NPY_OS_FREEBSD - #elif defined(__NetBSD__) - #define NPY_OS_NETBSD - #elif defined(__OpenBSD__) - #define NPY_OS_OPENBSD - #elif defined(__DragonFly__) - #define NPY_OS_DRAGONFLY - #endif -#elif defined(sun) || defined(__sun) - #define NPY_OS_SOLARIS -#elif defined(__CYGWIN__) - #define NPY_OS_CYGWIN -#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) - #define NPY_OS_WIN32 -#elif defined(__APPLE__) - #define NPY_OS_DARWIN -#else - #define NPY_OS_UNKNOWN -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/numpyconfig.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/numpyconfig.h deleted file mode 100644 index 4bca82f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/numpyconfig.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _NPY_NUMPYCONFIG_H_ -#define _NPY_NUMPYCONFIG_H_ - -#include "_numpyconfig.h" - -/* - * On Mac OS X, because there is only one configuration stage for all the archs - * in universal builds, any macro which depends on the arch needs to be - * hardcoded - */ -#ifdef __APPLE__ - #undef NPY_SIZEOF_LONG - #undef NPY_SIZEOF_PY_INTPTR_T - - #ifdef __LP64__ - #define NPY_SIZEOF_LONG 8 - #define NPY_SIZEOF_PY_INTPTR_T 8 - #else - #define NPY_SIZEOF_LONG 4 - #define NPY_SIZEOF_PY_INTPTR_T 4 - #endif -#endif - -/** - * To help with the NPY_NO_DEPRECATED_API macro, we include API version - * numbers for specific versions of NumPy. To exclude all API that was - * deprecated as of 1.7, add the following before #including any NumPy - * headers: - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - */ -#define NPY_1_7_API_VERSION 0x00000007 -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_1_9_API_VERSION 0x00000008 -#define NPY_1_10_API_VERSION 0x00000008 -#define NPY_1_11_API_VERSION 0x00000008 -#define NPY_1_12_API_VERSION 0x00000008 -#define NPY_1_13_API_VERSION 0x00000008 -#define NPY_1_14_API_VERSION 0x00000008 -#define NPY_1_15_API_VERSION 0x00000008 -#define NPY_1_16_API_VERSION 0x00000008 -#define NPY_1_17_API_VERSION 0x00000008 -#define NPY_1_18_API_VERSION 0x00000008 - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/old_defines.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/old_defines.h deleted file mode 100644 index abf8159..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/old_defines.h +++ /dev/null @@ -1,187 +0,0 @@ -/* This header is deprecated as of NumPy 1.7 */ -#ifndef OLD_DEFINES_H -#define OLD_DEFINES_H - -#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION -#error The header "old_defines.h" is deprecated as of NumPy 1.7. -#endif - -#define NDARRAY_VERSION NPY_VERSION - -#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE -#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE -#define PyArray_BUFSIZE NPY_BUFSIZE - -#define PyArray_PRIORITY NPY_PRIORITY -#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY -#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE - -#define NPY_MAX PyArray_MAX -#define NPY_MIN PyArray_MIN - -#define PyArray_TYPES NPY_TYPES -#define PyArray_BOOL NPY_BOOL -#define PyArray_BYTE NPY_BYTE -#define PyArray_UBYTE NPY_UBYTE -#define PyArray_SHORT NPY_SHORT -#define PyArray_USHORT NPY_USHORT -#define PyArray_INT NPY_INT -#define PyArray_UINT NPY_UINT -#define PyArray_LONG NPY_LONG -#define PyArray_ULONG NPY_ULONG -#define PyArray_LONGLONG NPY_LONGLONG -#define PyArray_ULONGLONG NPY_ULONGLONG -#define PyArray_HALF NPY_HALF -#define PyArray_FLOAT NPY_FLOAT -#define PyArray_DOUBLE NPY_DOUBLE -#define PyArray_LONGDOUBLE NPY_LONGDOUBLE -#define PyArray_CFLOAT NPY_CFLOAT -#define PyArray_CDOUBLE NPY_CDOUBLE -#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE -#define PyArray_OBJECT NPY_OBJECT -#define PyArray_STRING NPY_STRING -#define PyArray_UNICODE NPY_UNICODE -#define PyArray_VOID NPY_VOID -#define PyArray_DATETIME NPY_DATETIME -#define PyArray_TIMEDELTA NPY_TIMEDELTA -#define PyArray_NTYPES NPY_NTYPES -#define PyArray_NOTYPE NPY_NOTYPE -#define PyArray_CHAR NPY_CHAR -#define PyArray_USERDEF NPY_USERDEF -#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES - -#define PyArray_INTP NPY_INTP -#define PyArray_UINTP NPY_UINTP - -#define PyArray_INT8 NPY_INT8 -#define PyArray_UINT8 NPY_UINT8 -#define PyArray_INT16 NPY_INT16 -#define PyArray_UINT16 NPY_UINT16 -#define PyArray_INT32 NPY_INT32 -#define PyArray_UINT32 NPY_UINT32 - -#ifdef NPY_INT64 -#define PyArray_INT64 NPY_INT64 -#define PyArray_UINT64 NPY_UINT64 -#endif - -#ifdef NPY_INT128 -#define PyArray_INT128 NPY_INT128 -#define PyArray_UINT128 NPY_UINT128 -#endif - -#ifdef NPY_FLOAT16 -#define PyArray_FLOAT16 NPY_FLOAT16 -#define PyArray_COMPLEX32 NPY_COMPLEX32 -#endif - -#ifdef NPY_FLOAT80 -#define PyArray_FLOAT80 NPY_FLOAT80 -#define PyArray_COMPLEX160 NPY_COMPLEX160 -#endif - -#ifdef NPY_FLOAT96 -#define PyArray_FLOAT96 NPY_FLOAT96 -#define PyArray_COMPLEX192 NPY_COMPLEX192 -#endif - -#ifdef NPY_FLOAT128 -#define PyArray_FLOAT128 NPY_FLOAT128 -#define PyArray_COMPLEX256 NPY_COMPLEX256 -#endif - -#define PyArray_FLOAT32 NPY_FLOAT32 -#define PyArray_COMPLEX64 NPY_COMPLEX64 -#define PyArray_FLOAT64 NPY_FLOAT64 -#define PyArray_COMPLEX128 NPY_COMPLEX128 - - -#define PyArray_TYPECHAR NPY_TYPECHAR -#define PyArray_BOOLLTR NPY_BOOLLTR -#define PyArray_BYTELTR NPY_BYTELTR -#define PyArray_UBYTELTR NPY_UBYTELTR -#define PyArray_SHORTLTR NPY_SHORTLTR -#define PyArray_USHORTLTR NPY_USHORTLTR -#define PyArray_INTLTR NPY_INTLTR -#define PyArray_UINTLTR NPY_UINTLTR -#define PyArray_LONGLTR NPY_LONGLTR -#define PyArray_ULONGLTR NPY_ULONGLTR -#define PyArray_LONGLONGLTR NPY_LONGLONGLTR -#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR -#define PyArray_HALFLTR NPY_HALFLTR -#define PyArray_FLOATLTR NPY_FLOATLTR -#define PyArray_DOUBLELTR NPY_DOUBLELTR -#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR -#define PyArray_CFLOATLTR NPY_CFLOATLTR -#define PyArray_CDOUBLELTR NPY_CDOUBLELTR -#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR -#define PyArray_OBJECTLTR NPY_OBJECTLTR -#define PyArray_STRINGLTR NPY_STRINGLTR -#define PyArray_STRINGLTR2 NPY_STRINGLTR2 -#define PyArray_UNICODELTR NPY_UNICODELTR -#define PyArray_VOIDLTR NPY_VOIDLTR -#define PyArray_DATETIMELTR NPY_DATETIMELTR -#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR -#define PyArray_CHARLTR NPY_CHARLTR -#define PyArray_INTPLTR NPY_INTPLTR -#define PyArray_UINTPLTR NPY_UINTPLTR -#define PyArray_GENBOOLLTR NPY_GENBOOLLTR -#define PyArray_SIGNEDLTR NPY_SIGNEDLTR -#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR -#define PyArray_FLOATINGLTR NPY_FLOATINGLTR -#define PyArray_COMPLEXLTR NPY_COMPLEXLTR - -#define PyArray_QUICKSORT NPY_QUICKSORT -#define PyArray_HEAPSORT NPY_HEAPSORT -#define PyArray_MERGESORT NPY_MERGESORT -#define PyArray_SORTKIND NPY_SORTKIND -#define PyArray_NSORTS NPY_NSORTS - -#define PyArray_NOSCALAR NPY_NOSCALAR -#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR -#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR -#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR -#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR -#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR -#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR -#define PyArray_SCALARKIND NPY_SCALARKIND -#define PyArray_NSCALARKINDS NPY_NSCALARKINDS - -#define PyArray_ANYORDER NPY_ANYORDER -#define PyArray_CORDER NPY_CORDER -#define PyArray_FORTRANORDER NPY_FORTRANORDER -#define PyArray_ORDER NPY_ORDER - -#define PyDescr_ISBOOL PyDataType_ISBOOL -#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED -#define PyDescr_ISSIGNED PyDataType_ISSIGNED -#define PyDescr_ISINTEGER PyDataType_ISINTEGER -#define PyDescr_ISFLOAT PyDataType_ISFLOAT -#define PyDescr_ISNUMBER PyDataType_ISNUMBER -#define PyDescr_ISSTRING PyDataType_ISSTRING -#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX -#define PyDescr_ISPYTHON PyDataType_ISPYTHON -#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE -#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF -#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED -#define PyDescr_ISOBJECT PyDataType_ISOBJECT -#define PyDescr_HASFIELDS PyDataType_HASFIELDS - -#define PyArray_LITTLE NPY_LITTLE -#define PyArray_BIG NPY_BIG -#define PyArray_NATIVE NPY_NATIVE -#define PyArray_SWAP NPY_SWAP -#define PyArray_IGNORE NPY_IGNORE - -#define PyArray_NATBYTE NPY_NATBYTE -#define PyArray_OPPBYTE NPY_OPPBYTE - -#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE - -#define PyArray_USE_PYMEM NPY_USE_PYMEM - -#define PyArray_RemoveLargest PyArray_RemoveSmallest - -#define PyArray_UCS4 npy_ucs4 - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/oldnumeric.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/oldnumeric.h deleted file mode 100644 index 38530fa..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/oldnumeric.h +++ /dev/null @@ -1,25 +0,0 @@ -#include "arrayobject.h" - -#ifndef PYPY_VERSION -#ifndef REFCOUNT -# define REFCOUNT NPY_REFCOUNT -# define MAX_ELSIZE 16 -#endif -#endif - -#define PyArray_UNSIGNED_TYPES -#define PyArray_SBYTE NPY_BYTE -#define PyArray_CopyArray PyArray_CopyInto -#define _PyArray_multiply_list PyArray_MultiplyIntList -#define PyArray_ISSPACESAVER(m) NPY_FALSE -#define PyScalarArray_Check PyArray_CheckScalar - -#define CONTIGUOUS NPY_CONTIGUOUS -#define OWN_DIMENSIONS 0 -#define OWN_STRIDES 0 -#define OWN_DATA NPY_OWNDATA -#define SAVESPACE 0 -#define SAVESPACEBIT 0 - -#undef import_array -#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/bitgen.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/bitgen.h deleted file mode 100644 index 83c2858..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/bitgen.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _RANDOM_BITGEN_H -#define _RANDOM_BITGEN_H - -#pragma once -#include -#include -#include - -/* Must match the declaration in numpy/random/.pxd */ - -typedef struct bitgen { - void *state; - uint64_t (*next_uint64)(void *st); - uint32_t (*next_uint32)(void *st); - double (*next_double)(void *st); - uint64_t (*next_raw)(void *st); -} bitgen_t; - - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/distributions.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/distributions.h deleted file mode 100644 index c474c4d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/distributions.h +++ /dev/null @@ -1,200 +0,0 @@ -#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_ -#define _RANDOMDGEN__DISTRIBUTIONS_H_ - -#include "Python.h" -#include "numpy/npy_common.h" -#include -#include -#include - -#include "numpy/npy_math.h" -#include "numpy/random/bitgen.h" - -/* - * RAND_INT_TYPE is used to share integer generators with RandomState which - * used long in place of int64_t. If changing a distribution that uses - * RAND_INT_TYPE, then the original unmodified copy must be retained for - * use in RandomState by copying to the legacy distributions source file. - */ -#ifdef NP_RANDOM_LEGACY -#define RAND_INT_TYPE long -#define RAND_INT_MAX LONG_MAX -#else -#define RAND_INT_TYPE int64_t -#define RAND_INT_MAX INT64_MAX -#endif - -#ifdef _MSC_VER -#define DECLDIR __declspec(dllexport) -#else -#define DECLDIR extern -#endif - -#ifndef MIN -#define MIN(x, y) (((x) < (y)) ? x : y) -#define MAX(x, y) (((x) > (y)) ? x : y) -#endif - -#ifndef M_PI -#define M_PI 3.14159265358979323846264338328 -#endif - -typedef struct s_binomial_t { - int has_binomial; /* !=0: following parameters initialized for binomial */ - double psave; - RAND_INT_TYPE nsave; - double r; - double q; - double fm; - RAND_INT_TYPE m; - double p1; - double xm; - double xl; - double xr; - double c; - double laml; - double lamr; - double p2; - double p3; - double p4; -} binomial_t; - -DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); -DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); -DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); - -DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); -DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); -DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); -DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); - -DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); -DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); -DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); -DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *); - -DECLDIR double random_standard_normal(bitgen_t *bitgen_state); -DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); -DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); -DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); -DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); - -DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); - -DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); -DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); - -DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); -DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); -DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b); -DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df); -DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden); -DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state); -DECLDIR double random_pareto(bitgen_t *bitgen_state, double a); -DECLDIR double random_weibull(bitgen_t *bitgen_state, double a); -DECLDIR double random_power(bitgen_t *bitgen_state, double a); -DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale); -DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale); -DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale); -DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma); -DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode); -DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df); -DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, - double nonc); -DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, - double dfden, double nonc); -DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale); -DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa); -DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, - double right); - -DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); -DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, - double p); - -DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, - int64_t n, binomial_t *binomial); - -DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p); -DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p); -DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); -DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, - int64_t good, int64_t bad, int64_t sample); -DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); - -/* Generate random uint64 numbers in closed interval [off, off + rng]. */ -DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, - uint64_t rng, uint64_t mask, - bool use_masked); - -/* Generate random uint32 numbers in closed interval [off, off + rng]. */ -DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, - uint32_t off, uint32_t rng, - uint32_t mask, bool use_masked, - int *bcnt, uint32_t *buf); -DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, - uint16_t off, uint16_t rng, - uint16_t mask, bool use_masked, - int *bcnt, uint32_t *buf); -DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off, - uint8_t rng, uint8_t mask, - bool use_masked, int *bcnt, - uint32_t *buf); -DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off, - npy_bool rng, npy_bool mask, - bool use_masked, int *bcnt, - uint32_t *buf); - -DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, - uint64_t rng, npy_intp cnt, - bool use_masked, uint64_t *out); -DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off, - uint32_t rng, npy_intp cnt, - bool use_masked, uint32_t *out); -DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off, - uint16_t rng, npy_intp cnt, - bool use_masked, uint16_t *out); -DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, - uint8_t rng, npy_intp cnt, - bool use_masked, uint8_t *out); -DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, - npy_bool rng, npy_intp cnt, - bool use_masked, npy_bool *out); - -DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, - double *pix, npy_intp d, binomial_t *binomial); - -/* multivariate hypergeometric, "count" method */ -DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, - int64_t total, - size_t num_colors, int64_t *colors, - int64_t nsample, - size_t num_variates, int64_t *variates); - -/* multivariate hypergeometric, "marginals" method */ -DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, - int64_t total, - size_t num_colors, int64_t *colors, - int64_t nsample, - size_t num_variates, int64_t *variates); - -/* Common to legacy-distributions.c and distributions.c but not exported */ - -RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, - RAND_INT_TYPE n, - double p, - binomial_t *binomial); -RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, - RAND_INT_TYPE n, - double p, - binomial_t *binomial); -double random_loggam(double x); -static NPY_INLINE double next_double(bitgen_t *bitgen_state) { - return bitgen_state->next_double(bitgen_state->state); -} - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufunc_api.txt b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufunc_api.txt deleted file mode 100644 index 58a2689..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufunc_api.txt +++ /dev/null @@ -1,338 +0,0 @@ - -================= -NumPy Ufunc C-API -================= -:: - - PyObject * - PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void - **data, char *types, int ntypes, int nin, int - nout, int identity, const char *name, const - char *doc, int unused) - - -:: - - int - PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int - usertype, PyUFuncGenericFunction - function, const int *arg_types, void - *data) - - -:: - - int - PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject - *kwds, PyArrayObject **op) - - -This generic function is called with the ufunc object, the arguments to it, -and an array of (pointers to) PyArrayObjects which are NULL. - -'op' is an array of at least NPY_MAXARGS PyArrayObject *. - -:: - - void - PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - int - PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject - **errobj) - - -On return, if errobj is populated with a non-NULL value, the caller -owns a new reference to errobj. - -:: - - int - PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) - - -:: - - void - PyUFunc_clearfperr() - - -:: - - int - PyUFunc_getfperr(void ) - - -:: - - int - PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int - *first) - - -:: - - int - PyUFunc_ReplaceLoopBySignature(PyUFuncObject - *func, PyUFuncGenericFunction - newfunc, const int - *signature, PyUFuncGenericFunction - *oldfunc) - - -:: - - PyObject * - PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void - **data, char *types, int - ntypes, int nin, int nout, int - identity, const char *name, const - char *doc, int unused, const char - *signature) - - -:: - - int - PyUFunc_SetUsesArraysAsData(void **data, size_t i) - - -:: - - void - PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - int - PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING - casting, PyArrayObject - **operands, PyObject - *type_tup, PyArray_Descr **out_dtypes) - - -This function applies the default type resolution rules -for the provided ufunc. - -Returns 0 on success, -1 on error. - -:: - - int - PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING - casting, PyArrayObject - **operands, PyArray_Descr **dtypes) - - -Validates that the input operands can be cast to -the input types, and the output types can be cast to -the output operands where provided. - -Returns 0 on success, -1 (with exception raised) on validation failure. - -:: - - int - PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr - *user_dtype, PyUFuncGenericFunction - function, PyArray_Descr - **arg_dtypes, void *data) - - -:: - - PyObject * - PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction - *func, void - **data, char - *types, int ntypes, int - nin, int nout, int - identity, const char - *name, const char - *doc, const int - unused, const char - *signature, PyObject - *identity_value) - - diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h deleted file mode 100644 index 5ff4a00..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h +++ /dev/null @@ -1,369 +0,0 @@ -#ifndef Py_UFUNCOBJECT_H -#define Py_UFUNCOBJECT_H - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * The legacy generic inner loop for a standard element-wise or - * generalized ufunc. - */ -typedef void (*PyUFuncGenericFunction) - (char **args, - npy_intp *dimensions, - npy_intp *strides, - void *innerloopdata); - -/* - * The most generic one-dimensional inner loop for - * a masked standard element-wise ufunc. "Masked" here means that it skips - * doing calculations on any items for which the maskptr array has a true - * value. - */ -typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( - char **dataptrs, npy_intp *strides, - char *maskptr, npy_intp mask_stride, - npy_intp count, - NpyAuxData *innerloopdata); - -/* Forward declaration for the type resolver and loop selector typedefs */ -struct _tagPyUFuncObject; - -/* - * Given the operands for calling a ufunc, should determine the - * calculation input and output data types and return an inner loop function. - * This function should validate that the casting rule is being followed, - * and fail if it is not. - * - * For backwards compatibility, the regular type resolution function does not - * support auxiliary data with object semantics. The type resolution call - * which returns a masked generic function returns a standard NpyAuxData - * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros - * work. - * - * ufunc: The ufunc object. - * casting: The 'casting' parameter provided to the ufunc. - * operands: An array of length (ufunc->nin + ufunc->nout), - * with the output parameters possibly NULL. - * type_tup: Either NULL, or the type_tup passed to the ufunc. - * out_dtypes: An array which should be populated with new - * references to (ufunc->nin + ufunc->nout) new - * dtypes, one for each input and output. These - * dtypes should all be in native-endian format. - * - * Should return 0 on success, -1 on failure (with exception set), - * or -2 if Py_NotImplemented should be returned. - */ -typedef int (PyUFunc_TypeResolutionFunc)( - struct _tagPyUFuncObject *ufunc, - NPY_CASTING casting, - PyArrayObject **operands, - PyObject *type_tup, - PyArray_Descr **out_dtypes); - -/* - * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc, - * and an array of fixed strides (the array will contain NPY_MAX_INTP for - * strides which are not necessarily fixed), returns an inner loop - * with associated auxiliary data. - * - * For backwards compatibility, there is a variant of the inner loop - * selection which returns an inner loop irrespective of the strides, - * and with a void* static auxiliary data instead of an NpyAuxData * - * dynamically allocatable auxiliary data. - * - * ufunc: The ufunc object. - * dtypes: An array which has been populated with dtypes, - * in most cases by the type resolution function - * for the same ufunc. - * fixed_strides: For each input/output, either the stride that - * will be used every time the function is called - * or NPY_MAX_INTP if the stride might change or - * is not known ahead of time. The loop selection - * function may use this stride to pick inner loops - * which are optimized for contiguous or 0-stride - * cases. - * out_innerloop: Should be populated with the correct ufunc inner - * loop for the given type. - * out_innerloopdata: Should be populated with the void* data to - * be passed into the out_innerloop function. - * out_needs_api: If the inner loop needs to use the Python API, - * should set the to 1, otherwise should leave - * this untouched. - */ -typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_needs_api); -typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyArray_Descr *mask_dtype, - npy_intp *fixed_strides, - npy_intp fixed_mask_stride, - PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop, - NpyAuxData **out_innerloopdata, - int *out_needs_api); - -typedef struct _tagPyUFuncObject { - PyObject_HEAD - /* - * nin: Number of inputs - * nout: Number of outputs - * nargs: Always nin + nout (Why is it stored?) - */ - int nin, nout, nargs; - - /* - * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero - * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone, - * PyUFunc_IdentityValue. - */ - int identity; - - /* Array of one-dimensional core loops */ - PyUFuncGenericFunction *functions; - /* Array of funcdata that gets passed into the functions */ - void **data; - /* The number of elements in 'functions' and 'data' */ - int ntypes; - - /* Used to be unused field 'check_return' */ - int reserved1; - - /* The name of the ufunc */ - const char *name; - - /* Array of type numbers, of size ('nargs' * 'ntypes') */ - char *types; - - /* Documentation string */ - const char *doc; - - void *ptr; - PyObject *obj; - PyObject *userloops; - - /* generalized ufunc parameters */ - - /* 0 for scalar ufunc; 1 for generalized ufunc */ - int core_enabled; - /* number of distinct dimension names in signature */ - int core_num_dim_ix; - - /* - * dimension indices of input/output argument k are stored in - * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] - */ - - /* numbers of core dimensions of each argument */ - int *core_num_dims; - /* - * dimension indices in a flatted form; indices - * are in the range of [0,core_num_dim_ix) - */ - int *core_dim_ixs; - /* - * positions of 1st core dimensions of each - * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) - */ - int *core_offsets; - /* signature string for printing purpose */ - char *core_signature; - - /* - * A function which resolves the types and fills an array - * with the dtypes for the inputs and outputs. - */ - PyUFunc_TypeResolutionFunc *type_resolver; - /* - * A function which returns an inner loop written for - * NumPy 1.6 and earlier ufuncs. This is for backwards - * compatibility, and may be NULL if inner_loop_selector - * is specified. - */ - PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; - /* - * This was blocked off to be the "new" inner loop selector in 1.7, - * but this was never implemented. (This is also why the above - * selector is called the "legacy" selector.) - */ - void *reserved2; - /* - * A function which returns a masked inner loop for the ufunc. - */ - PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector; - - /* - * List of flags for each operand when ufunc is called by nditer object. - * These flags will be used in addition to the default flags for each - * operand set by nditer object. - */ - npy_uint32 *op_flags; - - /* - * List of global flags used when ufunc is called by nditer object. - * These flags will be used in addition to the default global flags - * set by nditer object. - */ - npy_uint32 iter_flags; - - /* New in NPY_API_VERSION 0x0000000D and above */ - - /* - * for each core_num_dim_ix distinct dimension names, - * the possible "frozen" size (-1 if not frozen). - */ - npy_intp *core_dim_sizes; - - /* - * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags - */ - npy_uint32 *core_dim_flags; - - /* Identity for reduction, when identity == PyUFunc_IdentityValue */ - PyObject *identity_value; - -} PyUFuncObject; - -#include "arrayobject.h" -/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ -/* the core dimension's size will be determined by the operands. */ -#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 -/* the core dimension may be absent */ -#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 -/* flags inferred during execution */ -#define UFUNC_CORE_DIM_MISSING 0x00040000 - -#define UFUNC_ERR_IGNORE 0 -#define UFUNC_ERR_WARN 1 -#define UFUNC_ERR_RAISE 2 -#define UFUNC_ERR_CALL 3 -#define UFUNC_ERR_PRINT 4 -#define UFUNC_ERR_LOG 5 - - /* Python side integer mask */ - -#define UFUNC_MASK_DIVIDEBYZERO 0x07 -#define UFUNC_MASK_OVERFLOW 0x3f -#define UFUNC_MASK_UNDERFLOW 0x1ff -#define UFUNC_MASK_INVALID 0xfff - -#define UFUNC_SHIFT_DIVIDEBYZERO 0 -#define UFUNC_SHIFT_OVERFLOW 3 -#define UFUNC_SHIFT_UNDERFLOW 6 -#define UFUNC_SHIFT_INVALID 9 - - -#define UFUNC_OBJ_ISOBJECT 1 -#define UFUNC_OBJ_NEEDS_API 2 - - /* Default user error mode */ -#define UFUNC_ERR_DEFAULT \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) - -#if NPY_ALLOW_THREADS -#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); -#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); -#else -#define NPY_LOOP_BEGIN_THREADS -#define NPY_LOOP_END_THREADS -#endif - -/* - * UFunc has unit of 0, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_Zero 0 -/* - * UFunc has unit of 1, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_One 1 -/* - * UFunc has unit of -1, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. Intended for - * bitwise_and reduction. - */ -#define PyUFunc_MinusOne 2 -/* - * UFunc has no unit, and the order of operations cannot be reordered. - * This case does not allow reduction with multiple axes at once. - */ -#define PyUFunc_None -1 -/* - * UFunc has no unit, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_ReorderableNone -2 -/* - * UFunc unit is an identity_value, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_IdentityValue -3 - - -#define UFUNC_REDUCE 0 -#define UFUNC_ACCUMULATE 1 -#define UFUNC_REDUCEAT 2 -#define UFUNC_OUTER 3 - - -typedef struct { - int nin; - int nout; - PyObject *callable; -} PyUFunc_PyFuncData; - -/* A linked-list of function information for - user-defined 1-d loops. - */ -typedef struct _loop1d_info { - PyUFuncGenericFunction func; - void *data; - int *arg_types; - struct _loop1d_info *next; - int nargs; - PyArray_Descr **arg_dtypes; -} PyUFunc_Loop1d; - - -#include "__ufunc_api.h" - -#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" - -/* - * THESE MACROS ARE DEPRECATED. - * Use npy_set_floatstatus_* in the npymath library. - */ -#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO -#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW -#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW -#define UFUNC_FPE_INVALID NPY_FPE_INVALID - -#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() -#define generate_overflow_error() npy_set_floatstatus_overflow() - - /* Make sure it gets defined if it isn't already */ -#ifndef UFUNC_NOFPE -/* Clear the floating point exception default of Borland C++ */ -#if defined(__BORLANDC__) -#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); -#else -#define UFUNC_NOFPE -#endif -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_UFUNCOBJECT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/utils.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/utils.h deleted file mode 100644 index 32218b8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/utils.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef __NUMPY_UTILS_HEADER__ -#define __NUMPY_UTILS_HEADER__ - -#ifndef __COMP_NPY_UNUSED - #if defined(__GNUC__) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - # elif defined(__ICC) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - # elif defined(__clang__) - #define __COMP_NPY_UNUSED __attribute__ ((unused)) - #else - #define __COMP_NPY_UNUSED - #endif -#endif - -/* Use this to tag a variable as not used. It will remove unused variable - * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable - * to avoid accidental use */ -#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/lib/libnpymath.a b/venv/lib/python3.7/site-packages/numpy/core/lib/libnpymath.a deleted file mode 100644 index 1273173..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/lib/libnpymath.a and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini deleted file mode 100644 index 5840f5e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini +++ /dev/null @@ -1,12 +0,0 @@ -[meta] -Name = mlib -Description = Math library used with this version of numpy -Version = 1.0 - -[default] -Libs=-lm -Cflags= - -[msvc] -Libs=m.lib -Cflags= diff --git a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini deleted file mode 100644 index 3e465ad..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini +++ /dev/null @@ -1,20 +0,0 @@ -[meta] -Name=npymath -Description=Portable, core math library implementing C99 standard -Version=0.1 - -[variables] -pkgname=numpy.core -prefix=${pkgdir} -libdir=${prefix}/lib -includedir=${prefix}/include - -[default] -Libs=-L${libdir} -lnpymath -Cflags=-I${includedir} -Requires=mlib - -[msvc] -Libs=/LIBPATH:${libdir} npymath.lib -Cflags=/INCLUDE:${includedir} -Requires=mlib diff --git a/venv/lib/python3.7/site-packages/numpy/core/machar.py b/venv/lib/python3.7/site-packages/numpy/core/machar.py deleted file mode 100644 index 202580b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/machar.py +++ /dev/null @@ -1,344 +0,0 @@ -""" -Machine arithmetics - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['MachAr'] - -from numpy.core.fromnumeric import any -from numpy.core._ufunc_config import errstate -from numpy.core.overrides import set_module - -# Need to speed this up...especially for longfloat - -@set_module('numpy') -class MachAr(object): - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, subtracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating point number ``beta**minexp`` (the smallest [in - magnitude] usable floating value). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - Same as `xmin`. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - - def __init__(self, float_conv=float,int_conv=int, - float_to_float=float, - float_to_str=lambda v:'%24.16e' % v, - title='Python floating point number'): - """ - - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the runninng arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp-a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp-a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd == 0 and any(temp-tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp-one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp-one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd == 0 and any(temp*one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y*y - a = z*one # Check here for underflow - temp = z*t - if any(a+a == zero) or any(abs(z) >= y): - break - temp1 = temp * betain - if any(temp1*beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any((a + a) != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1*beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax*one != xmax): - xmax = one - beta*epsneg - xmax = xmax / (xmin*beta*beta*beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta == 2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - fmt = ( - 'Machine parameters for %(title)s\n' - '---------------------------------------------------------------------\n' - 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' - 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' - 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' - 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' - 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' - '---------------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/venv/lib/python3.7/site-packages/numpy/core/memmap.py b/venv/lib/python3.7/site-packages/numpy/core/memmap.py deleted file mode 100644 index 0626455..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/memmap.py +++ /dev/null @@ -1,334 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from .numeric import uint8, ndarray, dtype -from numpy.compat import ( - long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path -) -from numpy.core.overrides import set_module - -__all__ = ['memmap'] - -dtypedescr = dtype -valid_filemodes = ["r", "c", "r+", "w+"] -writeable_filemodes = ["r+", "w+"] - -mode_equivalents = { - "readonly":"r", - "copyonwrite":"c", - "readwrite":"r+", - "write":"w+" - } - - -@set_module('numpy') -class memmap(ndarray): - """Create a memory-map to an array stored in a *binary* file on disk. - - Memory-mapped files are used for accessing small segments of large files - on disk, without reading the entire file into memory. NumPy's - memmap's are array-like objects. This differs from Python's ``mmap`` - module, which uses file-like objects. - - This subclass of ndarray has some unpleasant interactions with - some operations, because it doesn't quite fit properly as a subclass. - An alternative to using this subclass is to create the ``mmap`` - object yourself, then create an ndarray with ndarray.__new__ directly, - passing the object created in its 'buffer=' parameter. - - This class may at some point be turned into a factory function - which returns a view into an mmap buffer. - - Delete the memmap instance to close the memmap file. - - - Parameters - ---------- - filename : str, file-like object, or pathlib.Path instance - The file name or file object to be used as the array data buffer. - dtype : data-type, optional - The data-type used to interpret the file contents. - Default is `uint8`. - mode : {'r+', 'r', 'w+', 'c'}, optional - The file is opened in this mode: - - +------+-------------------------------------------------------------+ - | 'r' | Open existing file for reading only. | - +------+-------------------------------------------------------------+ - | 'r+' | Open existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'w+' | Create or overwrite existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'c' | Copy-on-write: assignments affect data in memory, but | - | | changes are not saved to disk. The file on disk is | - | | read-only. | - +------+-------------------------------------------------------------+ - - Default is 'r+'. - offset : int, optional - In the file, array data starts at this offset. Since `offset` is - measured in bytes, it should normally be a multiple of the byte-size - of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of - file are valid; The file will be extended to accommodate the - additional data. By default, ``memmap`` will start at the beginning of - the file, even if ``filename`` is a file pointer ``fp`` and - ``fp.tell() != 0``. - shape : tuple, optional - The desired shape of the array. If ``mode == 'r'`` and the number - of remaining bytes after `offset` is not a multiple of the byte-size - of `dtype`, you must specify `shape`. By default, the returned array - will be 1-D with the number of elements determined by file size - and data-type. - order : {'C', 'F'}, optional - Specify the order of the ndarray memory layout: - :term:`row-major`, C-style or :term:`column-major`, - Fortran-style. This only has an effect if the shape is - greater than 1-D. The default order is 'C'. - - Attributes - ---------- - filename : str or pathlib.Path instance - Path to the mapped file. - offset : int - Offset position in the file. - mode : str - File mode. - - Methods - ------- - flush - Flush any changes in memory to file on disk. - When you delete a memmap object, flush is called first to write - changes to disk before removing the object. - - - See also - -------- - lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. - - Notes - ----- - The memmap object can be used anywhere an ndarray is accepted. - Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns - ``True``. - - Memory-mapped files cannot be larger than 2GB on 32-bit systems. - - When a memmap causes a file to be created or extended beyond its - current size in the filesystem, the contents of the new part are - unspecified. On systems with POSIX filesystem semantics, the extended - part will be filled with zero bytes. - - Examples - -------- - >>> data = np.arange(12, dtype='float32') - >>> data.resize((3,4)) - - This example uses a temporary file so that doctest doesn't write - files to your directory. You would use a 'normal' filename. - - >>> from tempfile import mkdtemp - >>> import os.path as path - >>> filename = path.join(mkdtemp(), 'newfile.dat') - - Create a memmap with dtype and shape that matches our data: - - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) - >>> fp - memmap([[0., 0., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 0.]], dtype=float32) - - Write data to memmap array: - - >>> fp[:] = data[:] - >>> fp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - >>> fp.filename == path.abspath(filename) - True - - Deletion flushes memory changes to disk before removing the object: - - >>> del fp - - Load the memmap and verify data was stored: - - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> newfp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Read-only memmap: - - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> fpr.flags.writeable - False - - Copy-on-write memmap: - - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) - >>> fpc.flags.writeable - True - - It's possible to assign to copy-on-write array, but values are only - written into the memory copy of the array, and not written to disk: - - >>> fpc - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - >>> fpc[0,:] = 0 - >>> fpc - memmap([[ 0., 0., 0., 0.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - File on disk is unchanged: - - >>> fpr - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Offset into a memmap: - - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) - >>> fpo - memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) - - """ - - __array_priority__ = -100.0 - - def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, - shape=None, order='C'): - # Import here to minimize 'import numpy' overhead - import mmap - import os.path - try: - mode = mode_equivalents[mode] - except KeyError: - if mode not in valid_filemodes: - raise ValueError("mode must be one of %s" % - (valid_filemodes + list(mode_equivalents.keys()))) - - if mode == 'w+' and shape is None: - raise ValueError("shape must be given") - - if hasattr(filename, 'read'): - f_ctx = contextlib_nullcontext(filename) - else: - f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b') - - with f_ctx as fid: - fid.seek(0, 2) - flen = fid.tell() - descr = dtypedescr(dtype) - _dbytes = descr.itemsize - - if shape is None: - bytes = flen - offset - if bytes % _dbytes: - raise ValueError("Size of available data is not a " - "multiple of the data-type size.") - size = bytes // _dbytes - shape = (size,) - else: - if not isinstance(shape, tuple): - shape = (shape,) - size = np.intp(1) # avoid default choice of np.int_, which might overflow - for k in shape: - size *= k - - bytes = long(offset + size*_dbytes) - - if mode in ('w+', 'r+') and flen < bytes: - fid.seek(bytes - 1, 0) - fid.write(b'\0') - fid.flush() - - if mode == 'c': - acc = mmap.ACCESS_COPY - elif mode == 'r': - acc = mmap.ACCESS_READ - else: - acc = mmap.ACCESS_WRITE - - start = offset - offset % mmap.ALLOCATIONGRANULARITY - bytes -= start - array_offset = offset - start - mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) - - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, - offset=array_offset, order=order) - self._mmap = mm - self.offset = offset - self.mode = mode - - if is_pathlib_path(filename): - # special case - if we were constructed with a pathlib.path, - # then filename is a path object, not a string - self.filename = filename.resolve() - elif hasattr(fid, "name") and isinstance(fid.name, basestring): - # py3 returns int for TemporaryFile().name - self.filename = os.path.abspath(fid.name) - # same as memmap copies (e.g. memmap + 1) - else: - self.filename = None - - return self - - def __array_finalize__(self, obj): - if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): - self._mmap = obj._mmap - self.filename = obj.filename - self.offset = obj.offset - self.mode = obj.mode - else: - self._mmap = None - self.filename = None - self.offset = None - self.mode = None - - def flush(self): - """ - Write any changes in the array to the file on disk. - - For further information, see `memmap`. - - Parameters - ---------- - None - - See Also - -------- - memmap - - """ - if self.base is not None and hasattr(self.base, 'flush'): - self.base.flush() - - def __array_wrap__(self, arr, context=None): - arr = super(memmap, self).__array_wrap__(arr, context) - - # Return a memmap if a memmap was given as the output of the - # ufunc. Leave the arr class unchanged if self is not a memmap - # to keep original memmap subclasses behavior - if self is arr or type(self) is not memmap: - return arr - # Return scalar instead of 0d memmap, e.g. for np.sum with - # axis=None - if arr.shape == (): - return arr[()] - # Return ndarray otherwise - return arr.view(np.ndarray) - - def __getitem__(self, index): - res = super(memmap, self).__getitem__(index) - if type(res) is memmap and res._mmap is None: - return res.view(type=ndarray) - return res diff --git a/venv/lib/python3.7/site-packages/numpy/core/multiarray.py b/venv/lib/python3.7/site-packages/numpy/core/multiarray.py deleted file mode 100644 index c0fcc10..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/multiarray.py +++ /dev/null @@ -1,1631 +0,0 @@ -""" -Create the numpy.core.multiarray namespace for backward compatibility. In v1.16 -the multiarray and umath c-extension modules were merged into a single -_multiarray_umath extension module. So we replicate the old namespace -by importing from the extension module. - -""" - -import functools -import sys -import warnings -import sys - -from . import overrides -from . import _multiarray_umath -import numpy as np -from numpy.core._multiarray_umath import * -from numpy.core._multiarray_umath import ( - _fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string, - _ARRAY_API, _monotonicity, _get_ndarray_c_version - ) - -__all__ = [ - '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', - 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', - 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', - 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose', - '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity', - 'add_docstring', 'arange', 'array', 'bincount', 'broadcast', - 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', - 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', - 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', - 'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', - 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', - 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner', - 'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort', - 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', - 'nested_iters', 'normalize_axis_index', 'packbits', - 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', - 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops', - 'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt', - 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', - 'where', 'zeros'] -if sys.version_info.major < 3: - __all__ += ['newbuffer', 'getbuffer'] - -# For backward compatibility, make sure pickle imports these functions from here -_reconstruct.__module__ = 'numpy.core.multiarray' -scalar.__module__ = 'numpy.core.multiarray' - - -arange.__module__ = 'numpy' -array.__module__ = 'numpy' -datetime_data.__module__ = 'numpy' -empty.__module__ = 'numpy' -frombuffer.__module__ = 'numpy' -fromfile.__module__ = 'numpy' -fromiter.__module__ = 'numpy' -frompyfunc.__module__ = 'numpy' -fromstring.__module__ = 'numpy' -geterrobj.__module__ = 'numpy' -may_share_memory.__module__ = 'numpy' -nested_iters.__module__ = 'numpy' -promote_types.__module__ = 'numpy' -set_numeric_ops.__module__ = 'numpy' -seterrobj.__module__ = 'numpy' -zeros.__module__ = 'numpy' - - -# We can't verify dispatcher signatures because NumPy's C functions don't -# support introspection. -array_function_from_c_func_and_dispatcher = functools.partial( - overrides.array_function_from_dispatcher, - module='numpy', docs_from_dispatcher=True, verify=False) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) -def empty_like(prototype, dtype=None, order=None, subok=None, shape=None): - """ - empty_like(prototype, dtype=None, order='K', subok=True, shape=None) - - Return a new array with the same shape and type as a given array. - - Parameters - ---------- - prototype : array_like - The shape and data-type of `prototype` define these same attributes - of the returned array. - dtype : data-type, optional - Overrides the data type of the result. - - .. versionadded:: 1.6.0 - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran - contiguous, 'C' otherwise. 'K' means match the layout of ``prototype`` - as closely as possible. - - .. versionadded:: 1.6.0 - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of uninitialized (arbitrary) data with the same - shape and type as `prototype`. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full_like : Return a new array with shape of input filled with value. - empty : Return a new uninitialized array. - - Notes - ----- - This function does *not* initialize the returned array; to do that use - `zeros_like` or `ones_like` instead. It may be marginally faster than - the functions that do set the array values. - - Examples - -------- - >>> a = ([1,2,3], [4,5,6]) # a is array-like - >>> np.empty_like(a) - array([[-1073741821, -1073741821, 3], # uninitialized - [ 0, 0, -1073741821]]) - >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) - >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized - [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) - - """ - return (prototype,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) -def concatenate(arrays, axis=None, out=None): - """ - concatenate((a1, a2, ...), axis=0, out=None) - - Join a sequence of arrays along an existing axis. - - Parameters - ---------- - a1, a2, ... : sequence of array_like - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - The axis along which the arrays will be joined. If axis is None, - arrays are flattened before use. Default is 0. - out : ndarray, optional - If provided, the destination to place the result. The shape must be - correct, matching that of what concatenate would have returned if no - out argument were specified. - - Returns - ------- - res : ndarray - The concatenated array. - - See Also - -------- - ma.concatenate : Concatenate function that preserves input masks. - array_split : Split an array into multiple sub-arrays of equal or - near-equal size. - split : Split array into a list of multiple sub-arrays of equal size. - hsplit : Split array into multiple sub-arrays horizontally (column wise) - vsplit : Split array into multiple sub-arrays vertically (row wise) - dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). - stack : Stack a sequence of arrays along a new axis. - hstack : Stack arrays in sequence horizontally (column wise) - vstack : Stack arrays in sequence vertically (row wise) - dstack : Stack arrays in sequence depth wise (along third dimension) - block : Assemble arrays from blocks. - - Notes - ----- - When one or more of the arrays to be concatenated is a MaskedArray, - this function will return a MaskedArray object instead of an ndarray, - but the input masks are *not* preserved. In cases where a MaskedArray - is expected as input, use the ma.concatenate function from the masked - array module instead. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> b = np.array([[5, 6]]) - >>> np.concatenate((a, b), axis=0) - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.concatenate((a, b.T), axis=1) - array([[1, 2, 5], - [3, 4, 6]]) - >>> np.concatenate((a, b), axis=None) - array([1, 2, 3, 4, 5, 6]) - - This function will not preserve masking of MaskedArray inputs. - - >>> a = np.ma.arange(3) - >>> a[1] = np.ma.masked - >>> b = np.arange(2, 5) - >>> a - masked_array(data=[0, --, 2], - mask=[False, True, False], - fill_value=999999) - >>> b - array([2, 3, 4]) - >>> np.concatenate([a, b]) - masked_array(data=[0, 1, 2, 2, 3, 4], - mask=False, - fill_value=999999) - >>> np.ma.concatenate([a, b]) - masked_array(data=[0, --, 2, 2, 3, 4], - mask=[False, True, False, False, False, False], - fill_value=999999) - - """ - if out is not None: - # optimize for the typical case where only arrays is provided - arrays = list(arrays) - arrays.append(out) - return arrays - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) -def inner(a, b): - """ - inner(a, b) - - Inner product of two arrays. - - Ordinary inner product of vectors for 1-D arrays (without complex - conjugation), in higher dimensions a sum product over the last axes. - - Parameters - ---------- - a, b : array_like - If `a` and `b` are nonscalar, their last dimensions must match. - - Returns - ------- - out : ndarray - `out.shape = a.shape[:-1] + b.shape[:-1]` - - Raises - ------ - ValueError - If the last dimension of `a` and `b` has different size. - - See Also - -------- - tensordot : Sum products over arbitrary axes. - dot : Generalised matrix product, using second last dimension of `b`. - einsum : Einstein summation convention. - - Notes - ----- - For vectors (1-D arrays) it computes the ordinary inner-product:: - - np.inner(a, b) = sum(a[:]*b[:]) - - More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: - - np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) - - or explicitly:: - - np.inner(a, b)[i0,...,ir-1,j0,...,js-1] - = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) - - In addition `a` or `b` may be scalars, in which case:: - - np.inner(a,b) = a*b - - Examples - -------- - Ordinary inner product for vectors: - - >>> a = np.array([1,2,3]) - >>> b = np.array([0,1,0]) - >>> np.inner(a, b) - 2 - - A multidimensional example: - - >>> a = np.arange(24).reshape((2,3,4)) - >>> b = np.arange(4) - >>> np.inner(a, b) - array([[ 14, 38, 62], - [ 86, 110, 134]]) - - An example where `b` is a scalar: - - >>> np.inner(np.eye(2), 7) - array([[7., 0.], - [0., 7.]]) - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) -def where(condition, x=None, y=None): - """ - where(condition, [x, y]) - - Return elements chosen from `x` or `y` depending on `condition`. - - .. note:: - When only `condition` is provided, this function is a shorthand for - ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be - preferred, as it behaves correctly for subclasses. The rest of this - documentation covers only the case where all three arguments are - provided. - - Parameters - ---------- - condition : array_like, bool - Where True, yield `x`, otherwise yield `y`. - x, y : array_like - Values from which to choose. `x`, `y` and `condition` need to be - broadcastable to some shape. - - Returns - ------- - out : ndarray - An array with elements from `x` where `condition` is True, and elements - from `y` elsewhere. - - See Also - -------- - choose - nonzero : The function that is called when x and y are omitted - - Notes - ----- - If all the arrays are 1-D, `where` is equivalent to:: - - [xv if c else yv - for c, xv, yv in zip(condition, x, y)] - - Examples - -------- - >>> a = np.arange(10) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.where(a < 5, a, 10*a) - array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) - - This can be used on multidimensional arrays too: - - >>> np.where([[True, False], [True, True]], - ... [[1, 2], [3, 4]], - ... [[9, 8], [7, 6]]) - array([[1, 8], - [3, 4]]) - - The shapes of x, y, and the condition are broadcast together: - - >>> x, y = np.ogrid[:3, :4] - >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast - array([[10, 0, 0, 0], - [10, 11, 1, 1], - [10, 11, 12, 2]]) - - >>> a = np.array([[0, 1, 2], - ... [0, 2, 4], - ... [0, 3, 6]]) - >>> np.where(a < 4, a, -1) # -1 is broadcast - array([[ 0, 1, 2], - [ 0, 2, -1], - [ 0, 3, -1]]) - """ - return (condition, x, y) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) -def lexsort(keys, axis=None): - """ - lexsort(keys, axis=-1) - - Perform an indirect stable sort using a sequence of keys. - - Given multiple sorting keys, which can be interpreted as columns in a - spreadsheet, lexsort returns an array of integer indices that describes - the sort order by multiple columns. The last key in the sequence is used - for the primary sort order, the second-to-last key for the secondary sort - order, and so on. The keys argument must be a sequence of objects that - can be converted to arrays of the same shape. If a 2D array is provided - for the keys argument, it's rows are interpreted as the sorting keys and - sorting is according to the last row, second last row etc. - - Parameters - ---------- - keys : (k, N) array or tuple containing k (N,)-shaped sequences - The `k` different "columns" to be sorted. The last column (or row if - `keys` is a 2D array) is the primary sort key. - axis : int, optional - Axis to be indirectly sorted. By default, sort over the last axis. - - Returns - ------- - indices : (N,) ndarray of ints - Array of indices that sort the keys along the specified axis. - - See Also - -------- - argsort : Indirect sort. - ndarray.sort : In-place sort. - sort : Return a sorted copy of an array. - - Examples - -------- - Sort names: first by surname, then by name. - - >>> surnames = ('Hertz', 'Galilei', 'Hertz') - >>> first_names = ('Heinrich', 'Galileo', 'Gustav') - >>> ind = np.lexsort((first_names, surnames)) - >>> ind - array([1, 2, 0]) - - >>> [surnames[i] + ", " + first_names[i] for i in ind] - ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] - - Sort two columns of numbers: - - >>> a = [1,5,1,4,3,4,4] # First column - >>> b = [9,4,0,4,0,2,1] # Second column - >>> ind = np.lexsort((b,a)) # Sort by a, then by b - >>> ind - array([2, 0, 4, 6, 5, 3, 1]) - - >>> [(a[i],b[i]) for i in ind] - [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] - - Note that sorting is first according to the elements of ``a``. - Secondary sorting is according to the elements of ``b``. - - A normal ``argsort`` would have yielded: - - >>> [(a[i],b[i]) for i in np.argsort(a)] - [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] - - Structured arrays are sorted lexically by ``argsort``: - - >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], - ... dtype=np.dtype([('x', int), ('y', int)])) - - >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) - array([2, 0, 4, 6, 5, 3, 1]) - - """ - if isinstance(keys, tuple): - return keys - else: - return (keys,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) -def can_cast(from_, to, casting=None): - """ - can_cast(from_, to, casting='safe') - - Returns True if cast between data types can occur according to the - casting rule. If from is a scalar or array scalar, also returns - True if the scalar value can be cast without overflow or truncation - to an integer. - - Parameters - ---------- - from_ : dtype, dtype specifier, scalar, or array - Data type, scalar, or array to cast from. - to : dtype or dtype specifier - Data type to cast to. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Returns - ------- - out : bool - True if cast can occur according to the casting rule. - - Notes - ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the maximum - integer/float value converted. - - See also - -------- - dtype, result_type - - Examples - -------- - Basic examples - - >>> np.can_cast(np.int32, np.int64) - True - >>> np.can_cast(np.float64, complex) - True - >>> np.can_cast(complex, float) - False - - >>> np.can_cast('i8', 'f8') - True - >>> np.can_cast('i8', 'f4') - False - >>> np.can_cast('i4', 'S4') - False - - Casting scalars - - >>> np.can_cast(100, 'i1') - True - >>> np.can_cast(150, 'i1') - False - >>> np.can_cast(150, 'u1') - True - - >>> np.can_cast(3.5e100, np.float32) - False - >>> np.can_cast(1000.0, np.float32) - True - - Array scalar checks the value, array does not - - >>> np.can_cast(np.array(1000.0), np.float32) - True - >>> np.can_cast(np.array([1000.0]), np.float32) - False - - Using the casting rules - - >>> np.can_cast('i8', 'i8', 'no') - True - >>> np.can_cast('i8', 'no') - False - - >>> np.can_cast('i8', 'equiv') - True - >>> np.can_cast('i8', 'equiv') - False - - >>> np.can_cast('i8', 'safe') - True - >>> np.can_cast('i4', 'safe') - False - - >>> np.can_cast('i4', 'same_kind') - True - >>> np.can_cast('u4', 'same_kind') - False - - >>> np.can_cast('u4', 'unsafe') - True - - """ - return (from_,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) -def min_scalar_type(a): - """ - min_scalar_type(a) - - For scalar ``a``, returns the data type with the smallest size - and smallest scalar kind which can hold its value. For non-scalar - array ``a``, returns the vector's dtype unmodified. - - Floating point values are not demoted to integers, - and complex values are not demoted to floats. - - Parameters - ---------- - a : scalar or array_like - The value whose minimal data type is to be found. - - Returns - ------- - out : dtype - The minimal data type. - - Notes - ----- - .. versionadded:: 1.6.0 - - See Also - -------- - result_type, promote_types, dtype, can_cast - - Examples - -------- - >>> np.min_scalar_type(10) - dtype('uint8') - - >>> np.min_scalar_type(-260) - dtype('int16') - - >>> np.min_scalar_type(3.1) - dtype('float16') - - >>> np.min_scalar_type(1e50) - dtype('float64') - - >>> np.min_scalar_type(np.arange(4,dtype='f8')) - dtype('float64') - - """ - return (a,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) -def result_type(*arrays_and_dtypes): - """ - result_type(*arrays_and_dtypes) - - Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. - - Parameters - ---------- - arrays_and_dtypes : list of arrays and dtypes - The operands of some operation whose result type is needed. - - Returns - ------- - out : dtype - The result type. - - See also - -------- - dtype, promote_types, min_scalar_type, can_cast - - Notes - ----- - .. versionadded:: 1.6.0 - - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each array, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - - Examples - -------- - >>> np.result_type(3, np.arange(7, dtype='i1')) - dtype('int8') - - >>> np.result_type('i4', 'c8') - dtype('complex128') - - >>> np.result_type(3.0, -2) - dtype('float64') - - """ - return arrays_and_dtypes - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) -def dot(a, b, out=None): - """ - dot(a, b, out=None) - - Dot product of two arrays. Specifically, - - - If both `a` and `b` are 1-D arrays, it is inner product of vectors - (without complex conjugation). - - - If both `a` and `b` are 2-D arrays, it is matrix multiplication, - but using :func:`matmul` or ``a @ b`` is preferred. - - - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` - and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred. - - - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over - the last axis of `a` and `b`. - - - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a - sum product over the last axis of `a` and the second-to-last axis of `b`:: - - dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) - - Parameters - ---------- - a : array_like - First argument. - b : array_like - Second argument. - out : ndarray, optional - Output argument. This must have the exact kind that would be returned - if it was not used. In particular, it must have the right type, must be - C-contiguous, and its dtype must be the dtype that would be returned - for `dot(a,b)`. This is a performance feature. Therefore, if these - conditions are not met, an exception is raised, instead of attempting - to be flexible. - - Returns - ------- - output : ndarray - Returns the dot product of `a` and `b`. If `a` and `b` are both - scalars or both 1-D arrays then a scalar is returned; otherwise - an array is returned. - If `out` is given, then it is returned. - - Raises - ------ - ValueError - If the last dimension of `a` is not the same size as - the second-to-last dimension of `b`. - - See Also - -------- - vdot : Complex-conjugating dot product. - tensordot : Sum products over arbitrary axes. - einsum : Einstein summation convention. - matmul : '@' operator as method with out parameter. - - Examples - -------- - >>> np.dot(3, 4) - 12 - - Neither argument is complex-conjugated: - - >>> np.dot([2j, 3j], [2j, 3j]) - (-13+0j) - - For 2-D arrays it is the matrix product: - - >>> a = [[1, 0], [0, 1]] - >>> b = [[4, 1], [2, 2]] - >>> np.dot(a, b) - array([[4, 1], - [2, 2]]) - - >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) - >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) - >>> np.dot(a, b)[2,3,2,1,2,2] - 499128 - >>> sum(a[2,3,2,:] * b[1,2,:,2]) - 499128 - - """ - return (a, b, out) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) -def vdot(a, b): - """ - vdot(a, b) - - Return the dot product of two vectors. - - The vdot(`a`, `b`) function handles complex numbers differently than - dot(`a`, `b`). If the first argument is complex the complex conjugate - of the first argument is used for the calculation of the dot product. - - Note that `vdot` handles multidimensional arrays differently than `dot`: - it does *not* perform a matrix product, but flattens input arguments - to 1-D vectors first. Consequently, it should only be used for vectors. - - Parameters - ---------- - a : array_like - If `a` is complex the complex conjugate is taken before calculation - of the dot product. - b : array_like - Second argument to the dot product. - - Returns - ------- - output : ndarray - Dot product of `a` and `b`. Can be an int, float, or - complex depending on the types of `a` and `b`. - - See Also - -------- - dot : Return the dot product without using the complex conjugate of the - first argument. - - Examples - -------- - >>> a = np.array([1+2j,3+4j]) - >>> b = np.array([5+6j,7+8j]) - >>> np.vdot(a, b) - (70-8j) - >>> np.vdot(b, a) - (70+8j) - - Note that higher-dimensional arrays are flattened! - - >>> a = np.array([[1, 4], [5, 6]]) - >>> b = np.array([[4, 1], [2, 2]]) - >>> np.vdot(a, b) - 30 - >>> np.vdot(b, a) - 30 - >>> 1*4 + 4*1 + 5*2 + 6*2 - 30 - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) -def bincount(x, weights=None, minlength=None): - """ - bincount(x, weights=None, minlength=0) - - Count number of occurrences of each value in array of non-negative ints. - - The number of bins (of size 1) is one larger than the largest value in - `x`. If `minlength` is specified, there will be at least this number - of bins in the output array (though it will be longer if necessary, - depending on the contents of `x`). - Each bin gives the number of occurrences of its index value in `x`. - If `weights` is specified the input array is weighted by it, i.e. if a - value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead - of ``out[n] += 1``. - - Parameters - ---------- - x : array_like, 1 dimension, nonnegative ints - Input array. - weights : array_like, optional - Weights, array of the same shape as `x`. - minlength : int, optional - A minimum number of bins for the output array. - - .. versionadded:: 1.6.0 - - Returns - ------- - out : ndarray of ints - The result of binning the input array. - The length of `out` is equal to ``np.amax(x)+1``. - - Raises - ------ - ValueError - If the input is not 1-dimensional, or contains elements with negative - values, or if `minlength` is negative. - TypeError - If the type of the input is float or complex. - - See Also - -------- - histogram, digitize, unique - - Examples - -------- - >>> np.bincount(np.arange(5)) - array([1, 1, 1, 1, 1]) - >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) - array([1, 3, 1, 1, 0, 0, 0, 1]) - - >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) - >>> np.bincount(x).size == np.amax(x)+1 - True - - The input array needs to be of integer dtype, otherwise a - TypeError is raised: - - >>> np.bincount(np.arange(5, dtype=float)) - Traceback (most recent call last): - File "", line 1, in - TypeError: array cannot be safely cast to required type - - A possible use of ``bincount`` is to perform sums over - variable-size chunks of an array, using the ``weights`` keyword. - - >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights - >>> x = np.array([0, 1, 1, 2, 2, 2]) - >>> np.bincount(x, weights=w) - array([ 0.3, 0.7, 1.1]) - - """ - return (x, weights) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) -def ravel_multi_index(multi_index, dims, mode=None, order=None): - """ - ravel_multi_index(multi_index, dims, mode='raise', order='C') - - Converts a tuple of index arrays into an array of flat - indices, applying boundary modes to the multi-index. - - Parameters - ---------- - multi_index : tuple of array_like - A tuple of integer arrays, one array for each dimension. - dims : tuple of ints - The shape of array into which the indices from ``multi_index`` apply. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices are handled. Can specify - either one mode or a tuple of modes, one mode per index. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - In 'clip' mode, a negative index which would normally - wrap will clip to 0 instead. - order : {'C', 'F'}, optional - Determines whether the multi-index should be viewed as - indexing in row-major (C-style) or column-major - (Fortran-style) order. - - Returns - ------- - raveled_indices : ndarray - An array of indices into the flattened version of an array - of dimensions ``dims``. - - See Also - -------- - unravel_index - - Notes - ----- - .. versionadded:: 1.6.0 - - Examples - -------- - >>> arr = np.array([[3,6,6],[4,5,1]]) - >>> np.ravel_multi_index(arr, (7,6)) - array([22, 41, 37]) - >>> np.ravel_multi_index(arr, (7,6), order='F') - array([31, 41, 13]) - >>> np.ravel_multi_index(arr, (4,6), mode='clip') - array([22, 23, 19]) - >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) - array([12, 13, 13]) - - >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) - 1621 - """ - return multi_index - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None, dims=None): - """ - unravel_index(indices, shape, order='C') - - Converts a flat index or array of flat indices into a tuple - of coordinate arrays. - - Parameters - ---------- - indices : array_like - An integer array whose elements are indices into the flattened - version of an array of dimensions ``shape``. Before version 1.6.0, - this function accepted just one index value. - shape : tuple of ints - The shape of the array to use for unraveling ``indices``. - - .. versionchanged:: 1.16.0 - Renamed from ``dims`` to ``shape``. - - order : {'C', 'F'}, optional - Determines whether the indices should be viewed as indexing in - row-major (C-style) or column-major (Fortran-style) order. - - .. versionadded:: 1.6.0 - - Returns - ------- - unraveled_coords : tuple of ndarray - Each array in the tuple has the same shape as the ``indices`` - array. - - See Also - -------- - ravel_multi_index - - Examples - -------- - >>> np.unravel_index([22, 41, 37], (7,6)) - (array([3, 6, 6]), array([4, 5, 1])) - >>> np.unravel_index([31, 41, 13], (7,6), order='F') - (array([3, 6, 6]), array([4, 5, 1])) - - >>> np.unravel_index(1621, (6,7,8,9)) - (3, 1, 4, 1) - - """ - if dims is not None: - warnings.warn("'shape' argument should be used instead of 'dims'", - DeprecationWarning, stacklevel=3) - return (indices,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) -def copyto(dst, src, casting=None, where=None): - """ - copyto(dst, src, casting='same_kind', where=True) - - Copies values from one array to another, broadcasting as necessary. - - Raises a TypeError if the `casting` rule is violated, and if - `where` is provided, it selects which elements to copy. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dst : ndarray - The array into which values are copied. - src : array_like - The array from which values are copied. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur when copying. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - where : array_like of bool, optional - A boolean array which is broadcasted to match the dimensions - of `dst`, and selects elements to copy from `src` to `dst` - wherever it contains the value True. - """ - return (dst, src, where) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) -def putmask(a, mask, values): - """ - putmask(a, mask, values) - - Changes elements of an array based on conditional and input values. - - Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. - - If `values` is not the same size as `a` and `mask` then it will repeat. - This gives behavior different from ``a[mask] = values``. - - Parameters - ---------- - a : array_like - Target array. - mask : array_like - Boolean mask array. It has to be the same shape as `a`. - values : array_like - Values to put into `a` where `mask` is True. If `values` is smaller - than `a` it will be repeated. - - See Also - -------- - place, put, take, copyto - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> np.putmask(x, x>2, x**2) - >>> x - array([[ 0, 1, 2], - [ 9, 16, 25]]) - - If `values` is smaller than `a` it is repeated: - - >>> x = np.arange(5) - >>> np.putmask(x, x>1, [-33, -44]) - >>> x - array([ 0, 1, -33, -44, -33]) - - """ - return (a, mask, values) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) -def packbits(a, axis=None, bitorder='big'): - """ - packbits(a, axis=None, bitorder='big') - - Packs the elements of a binary-valued array into bits in a uint8 array. - - The result is padded to full bytes by inserting zero bits at the end. - - Parameters - ---------- - a : array_like - An array of integers or booleans whose elements should be packed to - bits. - axis : int, optional - The dimension over which bit-packing is done. - ``None`` implies packing the flattened array. - bitorder : {'big', 'little'}, optional - The order of the input bits. 'big' will mimic bin(val), - ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011 => ``, 'little' will - reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. - Defaults to 'big'. - - .. versionadded:: 1.17.0 - - Returns - ------- - packed : ndarray - Array of type uint8 whose elements represent bits corresponding to the - logical (0 or nonzero) value of the input elements. The shape of - `packed` has the same number of dimensions as the input (unless `axis` - is None, in which case the output is 1-D). - - See Also - -------- - unpackbits: Unpacks elements of a uint8 array into a binary-valued output - array. - - Examples - -------- - >>> a = np.array([[[1,0,1], - ... [0,1,0]], - ... [[1,1,0], - ... [0,0,1]]]) - >>> b = np.packbits(a, axis=-1) - >>> b - array([[[160], - [ 64]], - [[192], - [ 32]]], dtype=uint8) - - Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, - and 32 = 0010 0000. - - """ - return (a,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) -def unpackbits(a, axis=None, count=None, bitorder='big'): - """ - unpackbits(a, axis=None, count=None, bitorder='big') - - Unpacks elements of a uint8 array into a binary-valued output array. - - Each element of `a` represents a bit-field that should be unpacked - into a binary-valued output array. The shape of the output array is - either 1-D (if `axis` is ``None``) or the same shape as the input - array with unpacking done along the axis specified. - - Parameters - ---------- - a : ndarray, uint8 type - Input array. - axis : int, optional - The dimension over which bit-unpacking is done. - ``None`` implies unpacking the flattened array. - count : int or None, optional - The number of elements to unpack along `axis`, provided as a way - of undoing the effect of packing a size that is not a multiple - of eight. A non-negative number means to only unpack `count` - bits. A negative number means to trim off that many bits from - the end. ``None`` means to unpack the entire array (the - default). Counts larger than the available number of bits will - add zero padding to the output. Negative counts must not - exceed the available number of bits. - - .. versionadded:: 1.17.0 - - bitorder : {'big', 'little'}, optional - The order of the returned bits. 'big' will mimic bin(val), - ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse - the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. - Defaults to 'big'. - - .. versionadded:: 1.17.0 - - Returns - ------- - unpacked : ndarray, uint8 type - The elements are binary-valued (0 or 1). - - See Also - -------- - packbits : Packs the elements of a binary-valued array into bits in - a uint8 array. - - Examples - -------- - >>> a = np.array([[2], [7], [23]], dtype=np.uint8) - >>> a - array([[ 2], - [ 7], - [23]], dtype=uint8) - >>> b = np.unpackbits(a, axis=1) - >>> b - array([[0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) - >>> c = np.unpackbits(a, axis=1, count=-3) - >>> c - array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 1, 0]], dtype=uint8) - - >>> p = np.packbits(b, axis=0) - >>> np.unpackbits(p, axis=0) - array([[0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 0, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) - >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) - True - - """ - return (a,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) -def shares_memory(a, b, max_work=None): - """ - shares_memory(a, b, max_work=None) - - Determine if two arrays share memory - - Parameters - ---------- - a, b : ndarray - Input arrays - max_work : int, optional - Effort to spend on solving the overlap problem (maximum number - of candidate solutions to consider). The following special - values are recognized: - - max_work=MAY_SHARE_EXACT (default) - The problem is solved exactly. In this case, the function returns - True only if there is an element shared between the arrays. - max_work=MAY_SHARE_BOUNDS - Only the memory bounds of a and b are checked. - - Raises - ------ - numpy.TooHardError - Exceeded max_work. - - Returns - ------- - out : bool - - See Also - -------- - may_share_memory - - Examples - -------- - >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) - False - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) -def may_share_memory(a, b, max_work=None): - """ - may_share_memory(a, b, max_work=None) - - Determine if two arrays might share memory - - A return of True does not necessarily mean that the two arrays - share any element. It just means that they *might*. - - Only the memory bounds of a and b are checked by default. - - Parameters - ---------- - a, b : ndarray - Input arrays - max_work : int, optional - Effort to spend on solving the overlap problem. See - `shares_memory` for details. Default for ``may_share_memory`` - is to do a bounds check. - - Returns - ------- - out : bool - - See Also - -------- - shares_memory - - Examples - -------- - >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) - False - >>> x = np.zeros([3, 4]) - >>> np.may_share_memory(x[:,0], x[:,1]) - True - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) -def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): - """ - is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) - - Calculates which of the given dates are valid days, and which are not. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dates : array_like of datetime64[D] - The array of dates to process. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of bool, optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of bool - An array with the same shape as ``dates``, containing True for - each valid day, and False for each invalid day. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - busday_offset : Applies an offset counted in valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Examples - -------- - >>> # The weekdays are Friday, Saturday, and Monday - ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], - ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) - array([False, False, True]) - """ - return (dates, weekmask, holidays, out) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) -def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, - busdaycal=None, out=None): - """ - busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) - - First adjusts the date to fall on a valid day according to - the ``roll`` rule, then applies offsets to the given dates - counted in valid days. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dates : array_like of datetime64[D] - The array of dates to process. - offsets : array_like of int - The array of offsets, which is broadcast with ``dates``. - roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional - How to treat dates that do not fall on a valid day. The default - is 'raise'. - - * 'raise' means to raise an exception for an invalid day. - * 'nat' means to return a NaT (not-a-time) for an invalid day. - * 'forward' and 'following' mean to take the first valid day - later in time. - * 'backward' and 'preceding' mean to take the first valid day - earlier in time. - * 'modifiedfollowing' means to take the first valid day - later in time unless it is across a Month boundary, in which - case to take the first valid day earlier in time. - * 'modifiedpreceding' means to take the first valid day - earlier in time unless it is across a Month boundary, in which - case to take the first valid day later in time. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of datetime64[D], optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of datetime64[D] - An array with a shape from broadcasting ``dates`` and ``offsets`` - together, containing the dates with offsets applied. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - is_busday : Returns a boolean array indicating valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Examples - -------- - >>> # First business day in October 2011 (not accounting for holidays) - ... np.busday_offset('2011-10', 0, roll='forward') - numpy.datetime64('2011-10-03') - >>> # Last business day in February 2012 (not accounting for holidays) - ... np.busday_offset('2012-03', -1, roll='forward') - numpy.datetime64('2012-02-29') - >>> # Third Wednesday in January 2011 - ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') - numpy.datetime64('2011-01-19') - >>> # 2012 Mother's Day in Canada and the U.S. - ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') - numpy.datetime64('2012-05-13') - - >>> # First business day on or after a date - ... np.busday_offset('2011-03-20', 0, roll='forward') - numpy.datetime64('2011-03-21') - >>> np.busday_offset('2011-03-22', 0, roll='forward') - numpy.datetime64('2011-03-22') - >>> # First business day after a date - ... np.busday_offset('2011-03-20', 1, roll='backward') - numpy.datetime64('2011-03-21') - >>> np.busday_offset('2011-03-22', 1, roll='backward') - numpy.datetime64('2011-03-23') - """ - return (dates, offsets, weekmask, holidays, out) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) -def busday_count(begindates, enddates, weekmask=None, holidays=None, - busdaycal=None, out=None): - """ - busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) - - Counts the number of valid days between `begindates` and - `enddates`, not including the day of `enddates`. - - If ``enddates`` specifies a date value that is earlier than the - corresponding ``begindates`` date value, the count will be negative. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - begindates : array_like of datetime64[D] - The array of the first dates for counting. - enddates : array_like of datetime64[D] - The array of the end dates for counting, which are excluded - from the count themselves. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of int, optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of int - An array with a shape from broadcasting ``begindates`` and ``enddates`` - together, containing the number of valid days between - the begin and end dates. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - is_busday : Returns a boolean array indicating valid days. - busday_offset : Applies an offset counted in valid days. - - Examples - -------- - >>> # Number of weekdays in January 2011 - ... np.busday_count('2011-01', '2011-02') - 21 - >>> # Number of weekdays in 2011 - >>> np.busday_count('2011', '2012') - 260 - >>> # Number of Saturdays in 2011 - ... np.busday_count('2011', '2012', weekmask='Sat') - 53 - """ - return (begindates, enddates, weekmask, holidays, out) - - -@array_function_from_c_func_and_dispatcher( - _multiarray_umath.datetime_as_string) -def datetime_as_string(arr, unit=None, timezone=None, casting=None): - """ - datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') - - Convert an array of datetimes into an array of strings. - - Parameters - ---------- - arr : array_like of datetime64 - The array of UTC timestamps to format. - unit : str - One of None, 'auto', or a :ref:`datetime unit `. - timezone : {'naive', 'UTC', 'local'} or tzinfo - Timezone information to use when displaying the datetime. If 'UTC', end - with a Z to indicate UTC time. If 'local', convert to the local timezone - first, and suffix with a +-#### timezone offset. If a tzinfo object, - then do as with 'local', but use the specified timezone. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} - Casting to allow when changing between datetime units. - - Returns - ------- - str_arr : ndarray - An array of strings the same shape as `arr`. - - Examples - -------- - >>> import pytz - >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') - >>> d - array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', - '2002-10-27T07:30'], dtype='datetime64[m]') - - Setting the timezone to UTC shows the same information, but with a Z suffix - - >>> np.datetime_as_string(d, timezone='UTC') - array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', - '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) - array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', - '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') - array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], - dtype='>> np.datetime_as_string(d, unit='s') - array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', - '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') - Traceback (most recent call last): - ... - TypeError: Cannot create a datetime string as units 'h' from a NumPy - datetime with units 'm' according to the rule 'safe' - """ - return (arr,) diff --git a/venv/lib/python3.7/site-packages/numpy/core/numeric.py b/venv/lib/python3.7/site-packages/numpy/core/numeric.py deleted file mode 100644 index 1e011e2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/numeric.py +++ /dev/null @@ -1,2411 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import functools -import itertools -import operator -import sys -import warnings -import numbers -import contextlib - -import numpy as np -from numpy.compat import pickle, basestring -from . import multiarray -from .multiarray import ( - _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS, - BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE, - WRAP, arange, array, broadcast, can_cast, compare_chararrays, - concatenate, copyto, dot, dtype, empty, - empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring, - inner, int_asbuffer, lexsort, matmul, may_share_memory, - min_scalar_type, ndarray, nditer, nested_iters, promote_types, - putmask, result_type, set_numeric_ops, shares_memory, vdot, where, - zeros, normalize_axis_index) -if sys.version_info[0] < 3: - from .multiarray import newbuffer, getbuffer - -from . import overrides -from . import umath -from . import shape_base -from .overrides import set_module -from .umath import (multiply, invert, sin, PINF, NAN) -from . import numerictypes -from .numerictypes import longlong, intc, int_, float_, complex_, bool_ -from ._exceptions import TooHardError, AxisError -from ._asarray import asarray, asanyarray -from ._ufunc_config import errstate - -bitwise_not = invert -ufunc = type(sin) -newaxis = None - -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', - 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', - 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where', - 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', - 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', - 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', - 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', - 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', - 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', - 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', - 'identity', 'allclose', 'compare_chararrays', 'putmask', - 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', - 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', - 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', - 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', - 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError'] - -if sys.version_info[0] < 3: - __all__.extend(['getbuffer', 'newbuffer']) - - -@set_module('numpy') -class ComplexWarning(RuntimeWarning): - """ - The warning raised when casting a complex dtype to a real dtype. - - As implemented, casting a complex number to a real discards its imaginary - part, but this behavior may not be what the user actually wants. - - """ - pass - - -def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): - return (a,) - - -@array_function_dispatch(_zeros_like_dispatcher) -def zeros_like(a, dtype=None, order='K', subok=True, shape=None): - """ - Return an array of zeros with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - dtype : data-type, optional - Overrides the data type of the result. - - .. versionadded:: 1.6.0 - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - - .. versionadded:: 1.6.0 - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of zeros with the same shape and type as `a`. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - full_like : Return a new array with shape of input filled with value. - zeros : Return a new array setting values to zero. - - Examples - -------- - >>> x = np.arange(6) - >>> x = x.reshape((2, 3)) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.zeros_like(x) - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y = np.arange(3, dtype=float) - >>> y - array([0., 1., 2.]) - >>> np.zeros_like(y) - array([0., 0., 0.]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - # needed instead of a 0 to get same result as zeros for for string dtypes - z = zeros(1, dtype=res.dtype) - multiarray.copyto(res, z, casting='unsafe') - return res - - -@set_module('numpy') -def ones(shape, dtype=None, order='C'): - """ - Return a new array of given shape and type, filled with ones. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional, default: C - Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. - - Returns - ------- - out : ndarray - Array of ones with the given shape, dtype, and order. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - empty : Return a new uninitialized array. - zeros : Return a new array setting values to zero. - full : Return a new array of given shape filled with value. - - - Examples - -------- - >>> np.ones(5) - array([1., 1., 1., 1., 1.]) - - >>> np.ones((5,), dtype=int) - array([1, 1, 1, 1, 1]) - - >>> np.ones((2, 1)) - array([[1.], - [1.]]) - - >>> s = (2,2) - >>> np.ones(s) - array([[1., 1.], - [1., 1.]]) - - """ - a = empty(shape, dtype, order) - multiarray.copyto(a, 1, casting='unsafe') - return a - - -def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): - return (a,) - - -@array_function_dispatch(_ones_like_dispatcher) -def ones_like(a, dtype=None, order='K', subok=True, shape=None): - """ - Return an array of ones with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - dtype : data-type, optional - Overrides the data type of the result. - - .. versionadded:: 1.6.0 - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - - .. versionadded:: 1.6.0 - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of ones with the same shape and type as `a`. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full_like : Return a new array with shape of input filled with value. - ones : Return a new array setting values to one. - - Examples - -------- - >>> x = np.arange(6) - >>> x = x.reshape((2, 3)) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.ones_like(x) - array([[1, 1, 1], - [1, 1, 1]]) - - >>> y = np.arange(3, dtype=float) - >>> y - array([0., 1., 2.]) - >>> np.ones_like(y) - array([1., 1., 1.]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - multiarray.copyto(res, 1, casting='unsafe') - return res - - -@set_module('numpy') -def full(shape, fill_value, dtype=None, order='C'): - """ - Return a new array of given shape and type, filled with `fill_value`. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - fill_value : scalar - Fill value. - dtype : data-type, optional - The desired data-type for the array The default, None, means - `np.array(fill_value).dtype`. - order : {'C', 'F'}, optional - Whether to store multidimensional data in C- or Fortran-contiguous - (row- or column-wise) order in memory. - - Returns - ------- - out : ndarray - Array of `fill_value` with the given shape, dtype, and order. - - See Also - -------- - full_like : Return a new array with shape of input filled with value. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - - Examples - -------- - >>> np.full((2, 2), np.inf) - array([[inf, inf], - [inf, inf]]) - >>> np.full((2, 2), 10) - array([[10, 10], - [10, 10]]) - - """ - if dtype is None: - dtype = array(fill_value).dtype - a = empty(shape, dtype, order) - multiarray.copyto(a, fill_value, casting='unsafe') - return a - - -def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None): - return (a,) - - -@array_function_dispatch(_full_like_dispatcher) -def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): - """ - Return a full array with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - fill_value : scalar - Fill value. - dtype : data-type, optional - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of `fill_value` with the same shape and type as `a`. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full : Return a new array of given shape filled with value. - - Examples - -------- - >>> x = np.arange(6, dtype=int) - >>> np.full_like(x, 1) - array([1, 1, 1, 1, 1, 1]) - >>> np.full_like(x, 0.1) - array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) - array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) - array([nan, nan, nan, nan, nan, nan]) - - >>> y = np.arange(6, dtype=np.double) - >>> np.full_like(y, 0.1) - array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - multiarray.copyto(res, fill_value, casting='unsafe') - return res - - -def _count_nonzero_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_count_nonzero_dispatcher) -def count_nonzero(a, axis=None): - """ - Counts the number of non-zero values in the array ``a``. - - The word "non-zero" is in reference to the Python 2.x - built-in method ``__nonzero__()`` (renamed ``__bool__()`` - in Python 3.x) of Python objects that tests an object's - "truthfulness". For example, any number is considered - truthful if it is nonzero, whereas any string is considered - truthful if it is not the empty string. Thus, this function - (recursively) counts how many elements in ``a`` (and in - sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` - method evaluated to ``True``. - - Parameters - ---------- - a : array_like - The array for which to count non-zeros. - axis : int or tuple, optional - Axis or tuple of axes along which to count non-zeros. - Default is None, meaning that non-zeros will be counted - along a flattened version of ``a``. - - .. versionadded:: 1.12.0 - - Returns - ------- - count : int or array of int - Number of non-zero values in the array along a given axis. - Otherwise, the total number of non-zero values in the array - is returned. - - See Also - -------- - nonzero : Return the coordinates of all the non-zero values. - - Examples - -------- - >>> np.count_nonzero(np.eye(4)) - 4 - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]]) - 5 - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0) - array([1, 1, 1, 1, 1]) - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1) - array([2, 3]) - - """ - if axis is None: - return multiarray.count_nonzero(a) - - a = asanyarray(a) - - # TODO: this works around .astype(bool) not working properly (gh-9847) - if np.issubdtype(a.dtype, np.character): - a_bool = a != a.dtype.type() - else: - a_bool = a.astype(np.bool_, copy=False) - - return a_bool.sum(axis=axis, dtype=np.intp) - - -@set_module('numpy') -def isfortran(a): - """ - Check if the array is Fortran contiguous but *not* C contiguous. - - This function is obsolete and, because of changes due to relaxed stride - checking, its return value for the same array may differ for versions - of NumPy >= 1.10.0 and previous versions. If you only want to check if an - array is Fortran contiguous use ``a.flags.f_contiguous`` instead. - - Parameters - ---------- - a : ndarray - Input array. - - Returns - ------- - isfortran : bool - Returns True if the array is Fortran contiguous but *not* C contiguous. - - - Examples - -------- - - np.array allows to specify whether the array is written in C-contiguous - order (last index varies the fastest), or FORTRAN-contiguous order in - memory (first index varies the fastest). - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - - >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') - >>> b - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(b) - True - - - The transpose of a C-ordered array is a FORTRAN-ordered array. - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - >>> b = a.T - >>> b - array([[1, 4], - [2, 5], - [3, 6]]) - >>> np.isfortran(b) - True - - C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. - - >>> np.isfortran(np.array([1, 2], order='F')) - False - - """ - return a.flags.fnc - - -def _argwhere_dispatcher(a): - return (a,) - - -@array_function_dispatch(_argwhere_dispatcher) -def argwhere(a): - """ - Find the indices of array elements that are non-zero, grouped by element. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - index_array : (N, a.ndim) ndarray - Indices of elements that are non-zero. Indices are grouped by element. - This array will have shape ``(N, a.ndim)`` where ``N`` is the number of - non-zero items. - - See Also - -------- - where, nonzero - - Notes - ----- - ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, - but produces a result of the correct shape for a 0D array. - - The output of ``argwhere`` is not suitable for indexing arrays. - For this purpose use ``nonzero(a)`` instead. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argwhere(x>1) - array([[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - """ - # nonzero does not behave well on 0d, so promote to 1d - if np.ndim(a) == 0: - a = shape_base.atleast_1d(a) - # then remove the added dimension - return argwhere(a)[:,:0] - return transpose(nonzero(a)) - - -def _flatnonzero_dispatcher(a): - return (a,) - - -@array_function_dispatch(_flatnonzero_dispatcher) -def flatnonzero(a): - """ - Return indices that are non-zero in the flattened version of a. - - This is equivalent to np.nonzero(np.ravel(a))[0]. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - res : ndarray - Output array, containing the indices of the elements of `a.ravel()` - that are non-zero. - - See Also - -------- - nonzero : Return the indices of the non-zero elements of the input array. - ravel : Return a 1-D array containing the elements of the input array. - - Examples - -------- - >>> x = np.arange(-2, 3) - >>> x - array([-2, -1, 0, 1, 2]) - >>> np.flatnonzero(x) - array([0, 1, 3, 4]) - - Use the indices of the non-zero elements as an index array to extract - these elements: - - >>> x.ravel()[np.flatnonzero(x)] - array([-2, -1, 1, 2]) - - """ - return np.nonzero(np.ravel(a))[0] - - -_mode_from_name_dict = {'v': 0, - 's': 1, - 'f': 2} - - -def _mode_from_name(mode): - if isinstance(mode, basestring): - return _mode_from_name_dict[mode.lower()[0]] - return mode - - -def _correlate_dispatcher(a, v, mode=None): - return (a, v) - - -@array_function_dispatch(_correlate_dispatcher) -def correlate(a, v, mode='valid'): - """ - Cross-correlation of two 1-dimensional sequences. - - This function computes the correlation as generally defined in signal - processing texts:: - - c_{av}[k] = sum_n a[n+k] * conj(v[n]) - - with a and v sequences being zero-padded where necessary and conj being - the conjugate. - - Parameters - ---------- - a, v : array_like - Input sequences. - mode : {'valid', 'same', 'full'}, optional - Refer to the `convolve` docstring. Note that the default - is 'valid', unlike `convolve`, which uses 'full'. - old_behavior : bool - `old_behavior` was removed in NumPy 1.10. If you need the old - behavior, use `multiarray.correlate`. - - Returns - ------- - out : ndarray - Discrete cross-correlation of `a` and `v`. - - See Also - -------- - convolve : Discrete, linear convolution of two one-dimensional sequences. - multiarray.correlate : Old, no conjugate, version of correlate. - - Notes - ----- - The definition of correlation above is not unique and sometimes correlation - may be defined differently. Another common definition is:: - - c'_{av}[k] = sum_n a[n] conj(v[n+k]) - - which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``. - - Examples - -------- - >>> np.correlate([1, 2, 3], [0, 1, 0.5]) - array([3.5]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") - array([2. , 3.5, 3. ]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") - array([0.5, 2. , 3.5, 3. , 0. ]) - - Using complex sequences: - - >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') - array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) - - Note that you get the time reversed, complex conjugated result - when the two input sequences change places, i.e., - ``c_{va}[k] = c^{*}_{av}[-k]``: - - >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') - array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) - - """ - mode = _mode_from_name(mode) - return multiarray.correlate2(a, v, mode) - - -def _convolve_dispatcher(a, v, mode=None): - return (a, v) - - -@array_function_dispatch(_convolve_dispatcher) -def convolve(a, v, mode='full'): - """ - Returns the discrete, linear convolution of two one-dimensional sequences. - - The convolution operator is often seen in signal processing, where it - models the effect of a linear time-invariant system on a signal [1]_. In - probability theory, the sum of two independent random variables is - distributed according to the convolution of their individual - distributions. - - If `v` is longer than `a`, the arrays are swapped before computation. - - Parameters - ---------- - a : (N,) array_like - First one-dimensional input array. - v : (M,) array_like - Second one-dimensional input array. - mode : {'full', 'valid', 'same'}, optional - 'full': - By default, mode is 'full'. This returns the convolution - at each point of overlap, with an output shape of (N+M-1,). At - the end-points of the convolution, the signals do not overlap - completely, and boundary effects may be seen. - - 'same': - Mode 'same' returns output of length ``max(M, N)``. Boundary - effects are still visible. - - 'valid': - Mode 'valid' returns output of length - ``max(M, N) - min(M, N) + 1``. The convolution product is only given - for points where the signals overlap completely. Values outside - the signal boundary have no effect. - - Returns - ------- - out : ndarray - Discrete, linear convolution of `a` and `v`. - - See Also - -------- - scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier - Transform. - scipy.linalg.toeplitz : Used to construct the convolution operator. - polymul : Polynomial multiplication. Same output as convolve, but also - accepts poly1d objects as input. - - Notes - ----- - The discrete convolution operation is defined as - - .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m] - - It can be shown that a convolution :math:`x(t) * y(t)` in time/space - is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier - domain, after appropriate padding (padding is necessary to prevent - circular convolution). Since multiplication is more efficient (faster) - than convolution, the function `scipy.signal.fftconvolve` exploits the - FFT to calculate the convolution of large data-sets. - - References - ---------- - .. [1] Wikipedia, "Convolution", - https://en.wikipedia.org/wiki/Convolution - - Examples - -------- - Note how the convolution operator flips the second array - before "sliding" the two across one another: - - >>> np.convolve([1, 2, 3], [0, 1, 0.5]) - array([0. , 1. , 2.5, 4. , 1.5]) - - Only return the middle values of the convolution. - Contains boundary effects, where zeros are taken - into account: - - >>> np.convolve([1,2,3],[0,1,0.5], 'same') - array([1. , 2.5, 4. ]) - - The two arrays are of the same length, so there - is only one position where they completely overlap: - - >>> np.convolve([1,2,3],[0,1,0.5], 'valid') - array([2.5]) - - """ - a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) - if (len(v) > len(a)): - a, v = v, a - if len(a) == 0: - raise ValueError('a cannot be empty') - if len(v) == 0: - raise ValueError('v cannot be empty') - mode = _mode_from_name(mode) - return multiarray.correlate(a, v[::-1], mode) - - -def _outer_dispatcher(a, b, out=None): - return (a, b, out) - - -@array_function_dispatch(_outer_dispatcher) -def outer(a, b, out=None): - """ - Compute the outer product of two vectors. - - Given two vectors, ``a = [a0, a1, ..., aM]`` and - ``b = [b0, b1, ..., bN]``, - the outer product [1]_ is:: - - [[a0*b0 a0*b1 ... a0*bN ] - [a1*b0 . - [ ... . - [aM*b0 aM*bN ]] - - Parameters - ---------- - a : (M,) array_like - First input vector. Input is flattened if - not already 1-dimensional. - b : (N,) array_like - Second input vector. Input is flattened if - not already 1-dimensional. - out : (M, N) ndarray, optional - A location where the result is stored - - .. versionadded:: 1.9.0 - - Returns - ------- - out : (M, N) ndarray - ``out[i, j] = a[i] * b[j]`` - - See also - -------- - inner - einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. - ufunc.outer : A generalization to N dimensions and other operations. - ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent. - - References - ---------- - .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd - ed., Baltimore, MD, Johns Hopkins University Press, 1996, - pg. 8. - - Examples - -------- - Make a (*very* coarse) grid for computing a Mandelbrot set: - - >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) - >>> rl - array([[-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.]]) - >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) - >>> im - array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], - [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], - [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], - [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) - >>> grid = rl + im - >>> grid - array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], - [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], - [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], - [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], - [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) - - An example using a "vector" of letters: - - >>> x = np.array(['a', 'b', 'c'], dtype=object) - >>> np.outer(x, [1, 2, 3]) - array([['a', 'aa', 'aaa'], - ['b', 'bb', 'bbb'], - ['c', 'cc', 'ccc']], dtype=object) - - """ - a = asarray(a) - b = asarray(b) - return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) - - -def _tensordot_dispatcher(a, b, axes=None): - return (a, b) - - -@array_function_dispatch(_tensordot_dispatcher) -def tensordot(a, b, axes=2): - """ - Compute tensor dot product along specified axes. - - Given two tensors, `a` and `b`, and an array_like object containing - two array_like objects, ``(a_axes, b_axes)``, sum the products of - `a`'s and `b`'s elements (components) over the axes specified by - ``a_axes`` and ``b_axes``. The third argument can be a single non-negative - integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions - of `a` and the first ``N`` dimensions of `b` are summed over. - - Parameters - ---------- - a, b : array_like - Tensors to "dot". - - axes : int or (2,) array_like - * integer_like - If an int N, sum over the last N axes of `a` and the first N axes - of `b` in order. The sizes of the corresponding axes must match. - * (2,) array_like - Or, a list of axes to be summed over, first sequence applying to `a`, - second to `b`. Both elements array_like must be of the same length. - - Returns - ------- - output : ndarray - The tensor dot product of the input. - - See Also - -------- - dot, einsum - - Notes - ----- - Three common use cases are: - * ``axes = 0`` : tensor product :math:`a\\otimes b` - * ``axes = 1`` : tensor dot product :math:`a\\cdot b` - * ``axes = 2`` : (default) tensor double contraction :math:`a:b` - - When `axes` is integer_like, the sequence for evaluation will be: first - the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and - Nth axis in `b` last. - - When there is more than one axis to sum over - and they are not the last - (first) axes of `a` (`b`) - the argument `axes` should consist of - two sequences of the same length, with the first axis to sum over given - first in both sequences, the second axis second, and so forth. - - The shape of the result consists of the non-contracted axes of the - first tensor, followed by the non-contracted axes of the second. - - Examples - -------- - A "traditional" example: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) - >>> c.shape - (5, 2) - >>> c - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - >>> # A slower but equivalent way of computing the same... - >>> d = np.zeros((5,2)) - >>> for i in range(5): - ... for j in range(2): - ... for k in range(3): - ... for n in range(4): - ... d[i,j] += a[k,n,i] * b[n,k,j] - >>> c == d - array([[ True, True], - [ True, True], - [ True, True], - [ True, True], - [ True, True]]) - - An extended example taking advantage of the overloading of + and \\*: - - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) - >>> a; A - array([[[1, 2], - [3, 4]], - [[5, 6], - [7, 8]]]) - array([['a', 'b'], - ['c', 'd']], dtype=object) - - >>> np.tensordot(a, A) # third argument default is 2 for double-contraction - array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) - - >>> np.tensordot(a, A, 1) - array([[['acc', 'bdd'], - ['aaacccc', 'bbbdddd']], - [['aaaaacccccc', 'bbbbbdddddd'], - ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) - - >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) - array([[[[['a', 'b'], - ['c', 'd']], - ... - - >>> np.tensordot(a, A, (0, 1)) - array([[['abbbbb', 'cddddd'], - ['aabbbbbb', 'ccdddddd']], - [['aaabbbbbbb', 'cccddddddd'], - ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) - - >>> np.tensordot(a, A, (2, 1)) - array([[['abb', 'cdd'], - ['aaabbbb', 'cccdddd']], - [['aaaaabbbbbb', 'cccccdddddd'], - ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) - - >>> np.tensordot(a, A, ((0, 1), (0, 1))) - array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) - - >>> np.tensordot(a, A, ((2, 1), (1, 0))) - array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) - - """ - try: - iter(axes) - except Exception: - axes_a = list(range(-axes, 0)) - axes_b = list(range(0, axes)) - else: - axes_a, axes_b = axes - try: - na = len(axes_a) - axes_a = list(axes_a) - except TypeError: - axes_a = [axes_a] - na = 1 - try: - nb = len(axes_b) - axes_b = list(axes_b) - except TypeError: - axes_b = [axes_b] - nb = 1 - - a, b = asarray(a), asarray(b) - as_ = a.shape - nda = a.ndim - bs = b.shape - ndb = b.ndim - equal = True - if na != nb: - equal = False - else: - for k in range(na): - if as_[axes_a[k]] != bs[axes_b[k]]: - equal = False - break - if axes_a[k] < 0: - axes_a[k] += nda - if axes_b[k] < 0: - axes_b[k] += ndb - if not equal: - raise ValueError("shape-mismatch for sum") - - # Move the axes to sum over to the end of "a" - # and to the front of "b" - notin = [k for k in range(nda) if k not in axes_a] - newaxes_a = notin + axes_a - N2 = 1 - for axis in axes_a: - N2 *= as_[axis] - newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2) - olda = [as_[axis] for axis in notin] - - notin = [k for k in range(ndb) if k not in axes_b] - newaxes_b = axes_b + notin - N2 = 1 - for axis in axes_b: - N2 *= bs[axis] - newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin]))) - oldb = [bs[axis] for axis in notin] - - at = a.transpose(newaxes_a).reshape(newshape_a) - bt = b.transpose(newaxes_b).reshape(newshape_b) - res = dot(at, bt) - return res.reshape(olda + oldb) - - -def _roll_dispatcher(a, shift, axis=None): - return (a,) - - -@array_function_dispatch(_roll_dispatcher) -def roll(a, shift, axis=None): - """ - Roll array elements along a given axis. - - Elements that roll beyond the last position are re-introduced at - the first. - - Parameters - ---------- - a : array_like - Input array. - shift : int or tuple of ints - The number of places by which elements are shifted. If a tuple, - then `axis` must be a tuple of the same size, and each of the - given axes is shifted by the corresponding number. If an int - while `axis` is a tuple of ints, then the same value is used for - all given axes. - axis : int or tuple of ints, optional - Axis or axes along which elements are shifted. By default, the - array is flattened before shifting, after which the original - shape is restored. - - Returns - ------- - res : ndarray - Output array, with the same shape as `a`. - - See Also - -------- - rollaxis : Roll the specified axis backwards, until it lies in a - given position. - - Notes - ----- - .. versionadded:: 1.12.0 - - Supports rolling over multiple dimensions simultaneously. - - Examples - -------- - >>> x = np.arange(10) - >>> np.roll(x, 2) - array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) - >>> np.roll(x, -2) - array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) - - >>> x2 = np.reshape(x, (2,5)) - >>> x2 - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> np.roll(x2, 1) - array([[9, 0, 1, 2, 3], - [4, 5, 6, 7, 8]]) - >>> np.roll(x2, -1) - array([[1, 2, 3, 4, 5], - [6, 7, 8, 9, 0]]) - >>> np.roll(x2, 1, axis=0) - array([[5, 6, 7, 8, 9], - [0, 1, 2, 3, 4]]) - >>> np.roll(x2, -1, axis=0) - array([[5, 6, 7, 8, 9], - [0, 1, 2, 3, 4]]) - >>> np.roll(x2, 1, axis=1) - array([[4, 0, 1, 2, 3], - [9, 5, 6, 7, 8]]) - >>> np.roll(x2, -1, axis=1) - array([[1, 2, 3, 4, 0], - [6, 7, 8, 9, 5]]) - - """ - a = asanyarray(a) - if axis is None: - return roll(a.ravel(), shift, 0).reshape(a.shape) - - else: - axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) - broadcasted = broadcast(shift, axis) - if broadcasted.ndim > 1: - raise ValueError( - "'shift' and 'axis' should be scalars or 1D sequences") - shifts = {ax: 0 for ax in range(a.ndim)} - for sh, ax in broadcasted: - shifts[ax] += sh - - rolls = [((slice(None), slice(None)),)] * a.ndim - for ax, offset in shifts.items(): - offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. - if offset: - # (original, result), (original, result) - rolls[ax] = ((slice(None, -offset), slice(offset, None)), - (slice(-offset, None), slice(None, offset))) - - result = empty_like(a) - for indices in itertools.product(*rolls): - arr_index, res_index = zip(*indices) - result[res_index] = a[arr_index] - - return result - - -def _rollaxis_dispatcher(a, axis, start=None): - return (a,) - - -@array_function_dispatch(_rollaxis_dispatcher) -def rollaxis(a, axis, start=0): - """ - Roll the specified axis backwards, until it lies in a given position. - - This function continues to be supported for backward compatibility, but you - should prefer `moveaxis`. The `moveaxis` function was added in NumPy - 1.11. - - Parameters - ---------- - a : ndarray - Input array. - axis : int - The axis to roll backwards. The positions of the other axes do not - change relative to one another. - start : int, optional - The axis is rolled until it lies before this position. The default, - 0, results in a "complete" roll. - - Returns - ------- - res : ndarray - For NumPy >= 1.10.0 a view of `a` is always returned. For earlier - NumPy versions a view of `a` is returned only if the order of the - axes is changed, otherwise the input array is returned. - - See Also - -------- - moveaxis : Move array axes to new positions. - roll : Roll the elements of an array by a number of positions along a - given axis. - - Examples - -------- - >>> a = np.ones((3,4,5,6)) - >>> np.rollaxis(a, 3, 1).shape - (3, 6, 4, 5) - >>> np.rollaxis(a, 2).shape - (5, 3, 4, 6) - >>> np.rollaxis(a, 1, 4).shape - (3, 5, 6, 4) - - """ - n = a.ndim - axis = normalize_axis_index(axis, n) - if start < 0: - start += n - msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" - if not (0 <= start < n + 1): - raise AxisError(msg % ('start', -n, 'start', n + 1, start)) - if axis < start: - # it's been removed - start -= 1 - if axis == start: - return a[...] - axes = list(range(0, n)) - axes.remove(axis) - axes.insert(start, axis) - return a.transpose(axes) - - -def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): - """ - Normalizes an axis argument into a tuple of non-negative integer axes. - - This handles shorthands such as ``1`` and converts them to ``(1,)``, - as well as performing the handling of negative indices covered by - `normalize_axis_index`. - - By default, this forbids axes from being specified multiple times. - - Used internally by multi-axis-checking logic. - - .. versionadded:: 1.13.0 - - Parameters - ---------- - axis : int, iterable of int - The un-normalized index or indices of the axis. - ndim : int - The number of dimensions of the array that `axis` should be normalized - against. - argname : str, optional - A prefix to put before the error message, typically the name of the - argument. - allow_duplicate : bool, optional - If False, the default, disallow an axis from being specified twice. - - Returns - ------- - normalized_axes : tuple of int - The normalized axis index, such that `0 <= normalized_axis < ndim` - - Raises - ------ - AxisError - If any axis provided is out of range - ValueError - If an axis is repeated - - See also - -------- - normalize_axis_index : normalizing a single scalar axis - """ - # Optimization to speed-up the most common cases. - if type(axis) not in (tuple, list): - try: - axis = [operator.index(axis)] - except TypeError: - pass - # Going via an iterator directly is slower than via list comprehension. - axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) - if not allow_duplicate and len(set(axis)) != len(axis): - if argname: - raise ValueError('repeated axis in `{}` argument'.format(argname)) - else: - raise ValueError('repeated axis') - return axis - - -def _moveaxis_dispatcher(a, source, destination): - return (a,) - - -@array_function_dispatch(_moveaxis_dispatcher) -def moveaxis(a, source, destination): - """ - Move axes of an array to new positions. - - Other axes remain in their original order. - - .. versionadded:: 1.11.0 - - Parameters - ---------- - a : np.ndarray - The array whose axes should be reordered. - source : int or sequence of int - Original positions of the axes to move. These must be unique. - destination : int or sequence of int - Destination positions for each of the original axes. These must also be - unique. - - Returns - ------- - result : np.ndarray - Array with moved axes. This array is a view of the input array. - - See Also - -------- - transpose: Permute the dimensions of an array. - swapaxes: Interchange two axes of an array. - - Examples - -------- - - >>> x = np.zeros((3, 4, 5)) - >>> np.moveaxis(x, 0, -1).shape - (4, 5, 3) - >>> np.moveaxis(x, -1, 0).shape - (5, 3, 4) - - These all achieve the same result: - - >>> np.transpose(x).shape - (5, 4, 3) - >>> np.swapaxes(x, 0, -1).shape - (5, 4, 3) - >>> np.moveaxis(x, [0, 1], [-1, -2]).shape - (5, 4, 3) - >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape - (5, 4, 3) - - """ - try: - # allow duck-array types if they define transpose - transpose = a.transpose - except AttributeError: - a = asarray(a) - transpose = a.transpose - - source = normalize_axis_tuple(source, a.ndim, 'source') - destination = normalize_axis_tuple(destination, a.ndim, 'destination') - if len(source) != len(destination): - raise ValueError('`source` and `destination` arguments must have ' - 'the same number of elements') - - order = [n for n in range(a.ndim) if n not in source] - - for dest, src in sorted(zip(destination, source)): - order.insert(dest, src) - - result = transpose(order) - return result - - -# fix hack in scipy which imports this function -def _move_axis_to_0(a, axis): - return moveaxis(a, axis, 0) - - -def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): - return (a, b) - - -@array_function_dispatch(_cross_dispatcher) -def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): - """ - Return the cross product of two (arrays of) vectors. - - The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular - to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors - are defined by the last axis of `a` and `b` by default, and these axes - can have dimensions 2 or 3. Where the dimension of either `a` or `b` is - 2, the third component of the input vector is assumed to be zero and the - cross product calculated accordingly. In cases where both input vectors - have dimension 2, the z-component of the cross product is returned. - - Parameters - ---------- - a : array_like - Components of the first vector(s). - b : array_like - Components of the second vector(s). - axisa : int, optional - Axis of `a` that defines the vector(s). By default, the last axis. - axisb : int, optional - Axis of `b` that defines the vector(s). By default, the last axis. - axisc : int, optional - Axis of `c` containing the cross product vector(s). Ignored if - both input vectors have dimension 2, as the return is scalar. - By default, the last axis. - axis : int, optional - If defined, the axis of `a`, `b` and `c` that defines the vector(s) - and cross product(s). Overrides `axisa`, `axisb` and `axisc`. - - Returns - ------- - c : ndarray - Vector cross product(s). - - Raises - ------ - ValueError - When the dimension of the vector(s) in `a` and/or `b` does not - equal 2 or 3. - - See Also - -------- - inner : Inner product - outer : Outer product. - ix_ : Construct index arrays. - - Notes - ----- - .. versionadded:: 1.9.0 - - Supports full broadcasting of the inputs. - - Examples - -------- - Vector cross-product. - - >>> x = [1, 2, 3] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([-3, 6, -3]) - - One vector with dimension 2. - - >>> x = [1, 2] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Equivalently: - - >>> x = [1, 2, 0] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Both vectors with dimension 2. - - >>> x = [1,2] - >>> y = [4,5] - >>> np.cross(x, y) - array(-3) - - Multiple vector cross-products. Note that the direction of the cross - product vector is defined by the `right-hand rule`. - - >>> x = np.array([[1,2,3], [4,5,6]]) - >>> y = np.array([[4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[-3, 6, -3], - [ 3, -6, 3]]) - - The orientation of `c` can be changed using the `axisc` keyword. - - >>> np.cross(x, y, axisc=0) - array([[-3, 3], - [ 6, -6], - [-3, 3]]) - - Change the vector definition of `x` and `y` using `axisa` and `axisb`. - - >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) - >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[ -6, 12, -6], - [ 0, 0, 0], - [ 6, -12, 6]]) - >>> np.cross(x, y, axisa=0, axisb=0) - array([[-24, 48, -24], - [-30, 60, -30], - [-36, 72, -36]]) - - """ - if axis is not None: - axisa, axisb, axisc = (axis,) * 3 - a = asarray(a) - b = asarray(b) - # Check axisa and axisb are within bounds - axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') - axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') - - # Move working axis to the end of the shape - a = moveaxis(a, axisa, -1) - b = moveaxis(b, axisb, -1) - msg = ("incompatible dimensions for cross product\n" - "(dimension must be 2 or 3)") - if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): - raise ValueError(msg) - - # Create the output array - shape = broadcast(a[..., 0], b[..., 0]).shape - if a.shape[-1] == 3 or b.shape[-1] == 3: - shape += (3,) - # Check axisc is within bounds - axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') - dtype = promote_types(a.dtype, b.dtype) - cp = empty(shape, dtype) - - # create local aliases for readability - a0 = a[..., 0] - a1 = a[..., 1] - if a.shape[-1] == 3: - a2 = a[..., 2] - b0 = b[..., 0] - b1 = b[..., 1] - if b.shape[-1] == 3: - b2 = b[..., 2] - if cp.ndim != 0 and cp.shape[-1] == 3: - cp0 = cp[..., 0] - cp1 = cp[..., 1] - cp2 = cp[..., 2] - - if a.shape[-1] == 2: - if b.shape[-1] == 2: - # a0 * b1 - a1 * b0 - multiply(a0, b1, out=cp) - cp -= a1 * b0 - return cp - else: - assert b.shape[-1] == 3 - # cp0 = a1 * b2 - 0 (a2 = 0) - # cp1 = 0 - a0 * b2 (a2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - multiply(a0, b2, out=cp1) - negative(cp1, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - else: - assert a.shape[-1] == 3 - if b.shape[-1] == 3: - # cp0 = a1 * b2 - a2 * b1 - # cp1 = a2 * b0 - a0 * b2 - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - tmp = array(a2 * b1) - cp0 -= tmp - multiply(a2, b0, out=cp1) - multiply(a0, b2, out=tmp) - cp1 -= tmp - multiply(a0, b1, out=cp2) - multiply(a1, b0, out=tmp) - cp2 -= tmp - else: - assert b.shape[-1] == 2 - # cp0 = 0 - a2 * b1 (b2 = 0) - # cp1 = a2 * b0 - 0 (b2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a2, b1, out=cp0) - negative(cp0, out=cp0) - multiply(a2, b0, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - - return moveaxis(cp, -1, axisc) - - -little_endian = (sys.byteorder == 'little') - - -@set_module('numpy') -def indices(dimensions, dtype=int, sparse=False): - """ - Return an array representing the indices of a grid. - - Compute an array where the subarrays contain index values 0, 1, ... - varying only along the corresponding axis. - - Parameters - ---------- - dimensions : sequence of ints - The shape of the grid. - dtype : dtype, optional - Data type of the result. - sparse : boolean, optional - Return a sparse representation of the grid instead of a dense - representation. Default is False. - - .. versionadded:: 1.17 - - Returns - ------- - grid : one ndarray or tuple of ndarrays - If sparse is False: - Returns one array of grid indices, - ``grid.shape = (len(dimensions),) + tuple(dimensions)``. - If sparse is True: - Returns a tuple of arrays, with - ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with - dimensions[i] in the ith place - - See Also - -------- - mgrid, ogrid, meshgrid - - Notes - ----- - The output shape in the dense case is obtained by prepending the number - of dimensions in front of the tuple of dimensions, i.e. if `dimensions` - is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is - ``(N, r0, ..., rN-1)``. - - The subarrays ``grid[k]`` contains the N-D array of indices along the - ``k-th`` axis. Explicitly:: - - grid[k, i0, i1, ..., iN-1] = ik - - Examples - -------- - >>> grid = np.indices((2, 3)) - >>> grid.shape - (2, 2, 3) - >>> grid[0] # row indices - array([[0, 0, 0], - [1, 1, 1]]) - >>> grid[1] # column indices - array([[0, 1, 2], - [0, 1, 2]]) - - The indices can be used as an index into an array. - - >>> x = np.arange(20).reshape(5, 4) - >>> row, col = np.indices((2, 3)) - >>> x[row, col] - array([[0, 1, 2], - [4, 5, 6]]) - - Note that it would be more straightforward in the above example to - extract the required elements directly with ``x[:2, :3]``. - - If sparse is set to true, the grid will be returned in a sparse - representation. - - >>> i, j = np.indices((2, 3), sparse=True) - >>> i.shape - (2, 1) - >>> j.shape - (1, 3) - >>> i # row indices - array([[0], - [1]]) - >>> j # column indices - array([[0, 1, 2]]) - - """ - dimensions = tuple(dimensions) - N = len(dimensions) - shape = (1,)*N - if sparse: - res = tuple() - else: - res = empty((N,)+dimensions, dtype=dtype) - for i, dim in enumerate(dimensions): - idx = arange(dim, dtype=dtype).reshape( - shape[:i] + (dim,) + shape[i+1:] - ) - if sparse: - res = res + (idx,) - else: - res[i] = idx - return res - - -@set_module('numpy') -def fromfunction(function, shape, **kwargs): - """ - Construct an array by executing a function over each coordinate. - - The resulting array therefore has a value ``fn(x, y, z)`` at - coordinate ``(x, y, z)``. - - Parameters - ---------- - function : callable - The function is called with N parameters, where N is the rank of - `shape`. Each parameter represents the coordinates of the array - varying along a specific axis. For example, if `shape` - were ``(2, 2)``, then the parameters would be - ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` - shape : (N,) tuple of ints - Shape of the output array, which also determines the shape of - the coordinate arrays passed to `function`. - dtype : data-type, optional - Data-type of the coordinate arrays passed to `function`. - By default, `dtype` is float. - - Returns - ------- - fromfunction : any - The result of the call to `function` is passed back directly. - Therefore the shape of `fromfunction` is completely determined by - `function`. If `function` returns a scalar value, the shape of - `fromfunction` would not match the `shape` parameter. - - See Also - -------- - indices, meshgrid - - Notes - ----- - Keywords other than `dtype` are passed to `function`. - - Examples - -------- - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) - array([[ True, False, False], - [False, True, False], - [False, False, True]]) - - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) - array([[0, 1, 2], - [1, 2, 3], - [2, 3, 4]]) - - """ - dtype = kwargs.pop('dtype', float) - args = indices(shape, dtype=dtype) - return function(*args, **kwargs) - - -def _frombuffer(buf, dtype, shape, order): - return frombuffer(buf, dtype=dtype).reshape(shape, order=order) - - -@set_module('numpy') -def isscalar(element): - """ - Returns True if the type of `element` is a scalar type. - - Parameters - ---------- - element : any - Input argument, can be of any type and shape. - - Returns - ------- - val : bool - True if `element` is a scalar type, False if it is not. - - See Also - -------- - ndim : Get the number of dimensions of an array - - Notes - ----- - If you need a stricter way to identify a *numerical* scalar, use - ``isinstance(x, numbers.Number)``, as that returns ``False`` for most - non-numerical elements such as strings. - - In most cases ``np.ndim(x) == 0`` should be used instead of this function, - as that will also return true for 0d arrays. This is how numpy overloads - functions in the style of the ``dx`` arguments to `gradient` and the ``bins`` - argument to `histogram`. Some key differences: - - +--------------------------------------+---------------+-------------------+ - | x |``isscalar(x)``|``np.ndim(x) == 0``| - +======================================+===============+===================+ - | PEP 3141 numeric objects (including | ``True`` | ``True`` | - | builtins) | | | - +--------------------------------------+---------------+-------------------+ - | builtin string and buffer objects | ``True`` | ``True`` | - +--------------------------------------+---------------+-------------------+ - | other builtin objects, like | ``False`` | ``True`` | - | `pathlib.Path`, `Exception`, | | | - | the result of `re.compile` | | | - +--------------------------------------+---------------+-------------------+ - | third-party objects like | ``False`` | ``True`` | - | `matplotlib.figure.Figure` | | | - +--------------------------------------+---------------+-------------------+ - | zero-dimensional numpy arrays | ``False`` | ``True`` | - +--------------------------------------+---------------+-------------------+ - | other numpy arrays | ``False`` | ``False`` | - +--------------------------------------+---------------+-------------------+ - | `list`, `tuple`, and other sequence | ``False`` | ``False`` | - | objects | | | - +--------------------------------------+---------------+-------------------+ - - Examples - -------- - >>> np.isscalar(3.1) - True - >>> np.isscalar(np.array(3.1)) - False - >>> np.isscalar([3.1]) - False - >>> np.isscalar(False) - True - >>> np.isscalar('numpy') - True - - NumPy supports PEP 3141 numbers: - - >>> from fractions import Fraction - >>> np.isscalar(Fraction(5, 17)) - True - >>> from numbers import Number - >>> np.isscalar(Number()) - True - - """ - return (isinstance(element, generic) - or type(element) in ScalarType - or isinstance(element, numbers.Number)) - - -@set_module('numpy') -def binary_repr(num, width=None): - """ - Return the binary representation of the input number as a string. - - For negative numbers, if width is not given, a minus sign is added to the - front. If width is given, the two's complement of the number is - returned, with respect to that width. - - In a two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement - system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. - - Parameters - ---------- - num : int - Only an integer decimal number can be used. - width : int, optional - The length of the returned string if `num` is positive, or the length - of the two's complement if `num` is negative, provided that `width` is - at least a sufficient number of bits for `num` to be represented in the - designated form. - - If the `width` value is insufficient, it will be ignored, and `num` will - be returned in binary (`num` > 0) or two's complement (`num` < 0) form - with its width equal to the minimum number of bits needed to represent - the number in the designated form. This behavior is deprecated and will - later raise an error. - - .. deprecated:: 1.12.0 - - Returns - ------- - bin : str - Binary representation of `num` or two's complement of `num`. - - See Also - -------- - base_repr: Return a string representation of a number in the given base - system. - bin: Python's built-in binary representation generator of an integer. - - Notes - ----- - `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x - faster. - - References - ---------- - .. [1] Wikipedia, "Two's complement", - https://en.wikipedia.org/wiki/Two's_complement - - Examples - -------- - >>> np.binary_repr(3) - '11' - >>> np.binary_repr(-3) - '-11' - >>> np.binary_repr(3, width=4) - '0011' - - The two's complement is returned when the input number is negative and - width is specified: - - >>> np.binary_repr(-3, width=3) - '101' - >>> np.binary_repr(-3, width=5) - '11101' - - """ - def warn_if_insufficient(width, binwidth): - if width is not None and width < binwidth: - warnings.warn( - "Insufficient bit width provided. This behavior " - "will raise an error in the future.", DeprecationWarning, - stacklevel=3) - - # Ensure that num is a Python integer to avoid overflow or unwanted - # casts to floating point. - num = operator.index(num) - - if num == 0: - return '0' * (width or 1) - - elif num > 0: - binary = bin(num)[2:] - binwidth = len(binary) - outwidth = (binwidth if width is None - else max(binwidth, width)) - warn_if_insufficient(width, binwidth) - return binary.zfill(outwidth) - - else: - if width is None: - return '-' + bin(-num)[2:] - - else: - poswidth = len(bin(-num)[2:]) - - # See gh-8679: remove extra digit - # for numbers at boundaries. - if 2**(poswidth - 1) == -num: - poswidth -= 1 - - twocomp = 2**(poswidth + 1) + num - binary = bin(twocomp)[2:] - binwidth = len(binary) - - outwidth = max(binwidth, width) - warn_if_insufficient(width, binwidth) - return '1' * (outwidth - binwidth) + binary - - -@set_module('numpy') -def base_repr(number, base=2, padding=0): - """ - Return a string representation of a number in the given base system. - - Parameters - ---------- - number : int - The value to convert. Positive and negative values are handled. - base : int, optional - Convert `number` to the `base` number system. The valid range is 2-36, - the default value is 2. - padding : int, optional - Number of zeros padded on the left. Default is 0 (no padding). - - Returns - ------- - out : str - String representation of `number` in `base` system. - - See Also - -------- - binary_repr : Faster version of `base_repr` for base 2. - - Examples - -------- - >>> np.base_repr(5) - '101' - >>> np.base_repr(6, 5) - '11' - >>> np.base_repr(7, base=5, padding=3) - '00012' - - >>> np.base_repr(10, base=16) - 'A' - >>> np.base_repr(32, base=16) - '20' - - """ - digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' - if base > len(digits): - raise ValueError("Bases greater than 36 not handled in base_repr.") - elif base < 2: - raise ValueError("Bases less than 2 not handled in base_repr.") - - num = abs(number) - res = [] - while num: - res.append(digits[num % base]) - num //= base - if padding: - res.append('0' * padding) - if number < 0: - res.append('-') - return ''.join(reversed(res or '0')) - - -# These are all essentially abbreviations -# These might wind up in a special abbreviations module - - -def _maketup(descr, val): - dt = dtype(descr) - # Place val in all scalar tuples: - fields = dt.fields - if fields is None: - return val - else: - res = [_maketup(fields[name][0], val) for name in dt.names] - return tuple(res) - - -@set_module('numpy') -def identity(n, dtype=None): - """ - Return the identity array. - - The identity array is a square array with ones on - the main diagonal. - - Parameters - ---------- - n : int - Number of rows (and columns) in `n` x `n` output. - dtype : data-type, optional - Data-type of the output. Defaults to ``float``. - - Returns - ------- - out : ndarray - `n` x `n` array with its main diagonal set to one, - and all other elements 0. - - Examples - -------- - >>> np.identity(3) - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - """ - from numpy import eye - return eye(n, dtype=dtype) - - -def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): - return (a, b) - - -@array_function_dispatch(_allclose_dispatcher) -def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): - """ - Returns True if two arrays are element-wise equal within a tolerance. - - The tolerance values are positive, typically very small numbers. The - relative difference (`rtol` * abs(`b`)) and the absolute difference - `atol` are added together to compare against the absolute difference - between `a` and `b`. - - NaNs are treated as equal if they are in the same place and if - ``equal_nan=True``. Infs are treated as equal if they are in the same - place and of the same sign in both arrays. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - rtol : float - The relative tolerance parameter (see Notes). - atol : float - The absolute tolerance parameter (see Notes). - equal_nan : bool - Whether to compare NaN's as equal. If True, NaN's in `a` will be - considered equal to NaN's in `b` in the output array. - - .. versionadded:: 1.10.0 - - Returns - ------- - allclose : bool - Returns True if the two arrays are equal within the given - tolerance; False otherwise. - - See Also - -------- - isclose, all, any, equal - - Notes - ----- - If the following equation is element-wise True, then allclose returns - True. - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - The above equation is not symmetric in `a` and `b`, so that - ``allclose(a, b)`` might be different from ``allclose(b, a)`` in - some rare cases. - - The comparison of `a` and `b` uses standard broadcasting, which - means that `a` and `b` need not have the same shape in order for - ``allclose(a, b)`` to evaluate to True. The same is true for - `equal` but not `array_equal`. - - Examples - -------- - >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) - False - >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) - True - >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) - False - >>> np.allclose([1.0, np.nan], [1.0, np.nan]) - False - >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) - True - - """ - res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) - return bool(res) - - -def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): - return (a, b) - - -@array_function_dispatch(_isclose_dispatcher) -def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): - """ - Returns a boolean array where two arrays are element-wise equal within a - tolerance. - - The tolerance values are positive, typically very small numbers. The - relative difference (`rtol` * abs(`b`)) and the absolute difference - `atol` are added together to compare against the absolute difference - between `a` and `b`. - - .. warning:: The default `atol` is not appropriate for comparing numbers - that are much smaller than one (see Notes). - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - rtol : float - The relative tolerance parameter (see Notes). - atol : float - The absolute tolerance parameter (see Notes). - equal_nan : bool - Whether to compare NaN's as equal. If True, NaN's in `a` will be - considered equal to NaN's in `b` in the output array. - - Returns - ------- - y : array_like - Returns a boolean array of where `a` and `b` are equal within the - given tolerance. If both `a` and `b` are scalars, returns a single - boolean value. - - See Also - -------- - allclose - - Notes - ----- - .. versionadded:: 1.7.0 - - For finite values, isclose uses the following equation to test whether - two floating point values are equivalent. - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - Unlike the built-in `math.isclose`, the above equation is not symmetric - in `a` and `b` -- it assumes `b` is the reference value -- so that - `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, - the default value of atol is not zero, and is used to determine what - small values should be considered close to zero. The default value is - appropriate for expected values of order unity: if the expected values - are significantly smaller than one, it can result in false positives. - `atol` should be carefully selected for the use case at hand. A zero value - for `atol` will result in `False` if either `a` or `b` is zero. - - Examples - -------- - >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) - array([ True, False]) - >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) - array([ True, True]) - >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) - array([False, True]) - >>> np.isclose([1.0, np.nan], [1.0, np.nan]) - array([ True, False]) - >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) - array([ True, True]) - >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) - array([ True, False]) - >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) - array([False, False]) - >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) - array([ True, True]) - >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) - array([False, True]) - """ - def within_tol(x, y, atol, rtol): - with errstate(invalid='ignore'): - return less_equal(abs(x-y), atol + rtol * abs(y)) - - x = asanyarray(a) - y = asanyarray(b) - - # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). - # This will cause casting of x later. Also, make sure to allow subclasses - # (e.g., for numpy.ma). - dt = multiarray.result_type(y, 1.) - y = array(y, dtype=dt, copy=False, subok=True) - - xfin = isfinite(x) - yfin = isfinite(y) - if all(xfin) and all(yfin): - return within_tol(x, y, atol, rtol) - else: - finite = xfin & yfin - cond = zeros_like(finite, subok=True) - # Because we're using boolean indexing, x & y must be the same shape. - # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in - # lib.stride_tricks, though, so we can't import it here. - x = x * ones_like(cond) - y = y * ones_like(cond) - # Avoid subtraction with infinite/nan values... - cond[finite] = within_tol(x[finite], y[finite], atol, rtol) - # Check for equality of infinite values... - cond[~finite] = (x[~finite] == y[~finite]) - if equal_nan: - # Make NaN == NaN - both_nan = isnan(x) & isnan(y) - - # Needed to treat masked arrays correctly. = True would not work. - cond[both_nan] = both_nan[both_nan] - - return cond[()] # Flatten 0d arrays to scalars - - -def _array_equal_dispatcher(a1, a2): - return (a1, a2) - - -@array_function_dispatch(_array_equal_dispatcher) -def array_equal(a1, a2): - """ - True if two arrays have the same shape and elements, False otherwise. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - b : bool - Returns True if the arrays are equal. - - See Also - -------- - allclose: Returns True if two arrays are element-wise equal within a - tolerance. - array_equiv: Returns True if input arrays are shape consistent and all - elements equal. - - Examples - -------- - >>> np.array_equal([1, 2], [1, 2]) - True - >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) - True - >>> np.array_equal([1, 2], [1, 2, 3]) - False - >>> np.array_equal([1, 2], [1, 4]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except Exception: - return False - if a1.shape != a2.shape: - return False - return bool(asarray(a1 == a2).all()) - - -def _array_equiv_dispatcher(a1, a2): - return (a1, a2) - - -@array_function_dispatch(_array_equiv_dispatcher) -def array_equiv(a1, a2): - """ - Returns True if input arrays are shape consistent and all elements equal. - - Shape consistent means they are either the same shape, or one input array - can be broadcasted to create the same shape as the other one. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - out : bool - True if equivalent, False otherwise. - - Examples - -------- - >>> np.array_equiv([1, 2], [1, 2]) - True - >>> np.array_equiv([1, 2], [1, 3]) - False - - Showing the shape equivalence: - - >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) - True - >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) - False - - >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except Exception: - return False - try: - multiarray.broadcast(a1, a2) - except Exception: - return False - - return bool(asarray(a1 == a2).all()) - - -Inf = inf = infty = Infinity = PINF -nan = NaN = NAN -False_ = bool_(False) -True_ = bool_(True) - - -def extend_all(module): - existing = set(__all__) - mall = getattr(module, '__all__') - for a in mall: - if a not in existing: - __all__.append(a) - - -from .umath import * -from .numerictypes import * -from . import fromnumeric -from .fromnumeric import * -from . import arrayprint -from .arrayprint import * -from . import _asarray -from ._asarray import * -from . import _ufunc_config -from ._ufunc_config import * -extend_all(fromnumeric) -extend_all(umath) -extend_all(numerictypes) -extend_all(arrayprint) -extend_all(_asarray) -extend_all(_ufunc_config) diff --git a/venv/lib/python3.7/site-packages/numpy/core/numerictypes.py b/venv/lib/python3.7/site-packages/numpy/core/numerictypes.py deleted file mode 100644 index 761c708..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/numerictypes.py +++ /dev/null @@ -1,675 +0,0 @@ -""" -numerictypes: Define the numeric type objects - -This module is designed so "from numerictypes import \\*" is safe. -Exported symbols include: - - Dictionary with all registered number types (including aliases): - typeDict - - Type objects (not all will be available, depends on platform): - see variable sctypes for which ones you have - - Bit-width names - - int8 int16 int32 int64 int128 - uint8 uint16 uint32 uint64 uint128 - float16 float32 float64 float96 float128 float256 - complex32 complex64 complex128 complex192 complex256 complex512 - datetime64 timedelta64 - - c-based names - - bool_ - - object_ - - void, str_, unicode_ - - byte, ubyte, - short, ushort - intc, uintc, - intp, uintp, - int_, uint, - longlong, ulonglong, - - single, csingle, - float_, complex_, - longfloat, clongfloat, - - As part of the type-hierarchy: xx -- is bit-width - - generic - +-> bool_ (kind=b) - +-> number - | +-> integer - | | +-> signedinteger (intxx) (kind=i) - | | | byte - | | | short - | | | intc - | | | intp int0 - | | | int_ - | | | longlong - | | \\-> unsignedinteger (uintxx) (kind=u) - | | ubyte - | | ushort - | | uintc - | | uintp uint0 - | | uint_ - | | ulonglong - | +-> inexact - | +-> floating (floatxx) (kind=f) - | | half - | | single - | | float_ (double) - | | longfloat - | \\-> complexfloating (complexxx) (kind=c) - | csingle (singlecomplex) - | complex_ (cfloat, cdouble) - | clongfloat (longcomplex) - +-> flexible - | +-> character - | | str_ (string_, bytes_) (kind=S) [Python 2] - | | unicode_ (kind=U) [Python 2] - | | - | | bytes_ (string_) (kind=S) [Python 3] - | | str_ (unicode_) (kind=U) [Python 3] - | | - | \\-> void (kind=V) - \\-> object_ (not used much) (kind=O) - -""" -from __future__ import division, absolute_import, print_function - -import types as _types -import sys -import numbers -import warnings - -from numpy.compat import bytes, long -from numpy.core.multiarray import ( - typeinfo, ndarray, array, empty, dtype, datetime_data, - datetime_as_string, busday_offset, busday_count, is_busday, - busdaycalendar - ) -from numpy.core.overrides import set_module - -# we add more at the bottom -__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', - 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', - 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', - 'issubdtype', 'datetime_data', 'datetime_as_string', - 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', - ] - -# we don't need all these imports, but we need to keep them for compatibility -# for users using np.core.numerictypes.UPPER_TABLE -from ._string_helpers import ( - english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE -) - -from ._type_aliases import ( - sctypeDict, - sctypeNA, - allTypes, - bitname, - sctypes, - _concrete_types, - _concrete_typeinfo, - _bits_of, -) -from ._dtype import _kind_name - -# we don't export these for import *, but we do want them accessible -# as numerictypes.bool, etc. -if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str -else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - -# We use this later -generic = allTypes['generic'] - -genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', - 'int32', 'uint32', 'int64', 'uint64', 'int128', - 'uint128', 'float16', - 'float32', 'float64', 'float80', 'float96', 'float128', - 'float256', - 'complex32', 'complex64', 'complex128', 'complex160', - 'complex192', 'complex256', 'complex512', 'object'] - -@set_module('numpy') -def maximum_sctype(t): - """ - Return the scalar type of highest precision of the same kind as the input. - - Parameters - ---------- - t : dtype or dtype specifier - The input data type. This can be a `dtype` object or an object that - is convertible to a `dtype`. - - Returns - ------- - out : dtype - The highest precision data type of the same kind (`dtype.kind`) as `t`. - - See Also - -------- - obj2sctype, mintypecode, sctype2char - dtype - - Examples - -------- - >>> np.maximum_sctype(int) - - >>> np.maximum_sctype(np.uint8) - - >>> np.maximum_sctype(complex) - # may vary - - >>> np.maximum_sctype(str) - - - >>> np.maximum_sctype('i2') - - >>> np.maximum_sctype('f4') - # may vary - - """ - g = obj2sctype(t) - if g is None: - return t - t = g - base = _kind_name(dtype(t)) - if base in sctypes: - return sctypes[base][-1] - else: - return t - - -@set_module('numpy') -def issctype(rep): - """ - Determines whether the given object represents a scalar data-type. - - Parameters - ---------- - rep : any - If `rep` is an instance of a scalar dtype, True is returned. If not, - False is returned. - - Returns - ------- - out : bool - Boolean result of check whether `rep` is a scalar dtype. - - See Also - -------- - issubsctype, issubdtype, obj2sctype, sctype2char - - Examples - -------- - >>> np.issctype(np.int32) - True - >>> np.issctype(list) - False - >>> np.issctype(1.1) - False - - Strings are also a scalar type: - - >>> np.issctype(np.dtype('str')) - True - - """ - if not isinstance(rep, (type, dtype)): - return False - try: - res = obj2sctype(rep) - if res and res != object_: - return True - return False - except Exception: - return False - - -@set_module('numpy') -def obj2sctype(rep, default=None): - """ - Return the scalar dtype or NumPy equivalent of Python type of an object. - - Parameters - ---------- - rep : any - The object of which the type is returned. - default : any, optional - If given, this is returned for objects whose types can not be - determined. If not given, None is returned for those objects. - - Returns - ------- - dtype : dtype or Python type - The data type of `rep`. - - See Also - -------- - sctype2char, issctype, issubsctype, issubdtype, maximum_sctype - - Examples - -------- - >>> np.obj2sctype(np.int32) - - >>> np.obj2sctype(np.array([1., 2.])) - - >>> np.obj2sctype(np.array([1.j])) - - - >>> np.obj2sctype(dict) - - >>> np.obj2sctype('string') - - >>> np.obj2sctype(1, default=list) - - - """ - # prevent abstract classes being upcast - if isinstance(rep, type) and issubclass(rep, generic): - return rep - # extract dtype from arrays - if isinstance(rep, ndarray): - return rep.dtype.type - # fall back on dtype to convert - try: - res = dtype(rep) - except Exception: - return default - else: - return res.type - - -@set_module('numpy') -def issubclass_(arg1, arg2): - """ - Determine if a class is a subclass of a second class. - - `issubclass_` is equivalent to the Python built-in ``issubclass``, - except that it returns False instead of raising a TypeError if one - of the arguments is not a class. - - Parameters - ---------- - arg1 : class - Input class. True is returned if `arg1` is a subclass of `arg2`. - arg2 : class or tuple of classes. - Input class. If a tuple of classes, True is returned if `arg1` is a - subclass of any of the tuple elements. - - Returns - ------- - out : bool - Whether `arg1` is a subclass of `arg2` or not. - - See Also - -------- - issubsctype, issubdtype, issctype - - Examples - -------- - >>> np.issubclass_(np.int32, int) - False # True on Python 2.7 - >>> np.issubclass_(np.int32, float) - False - - """ - try: - return issubclass(arg1, arg2) - except TypeError: - return False - - -@set_module('numpy') -def issubsctype(arg1, arg2): - """ - Determine if the first argument is a subclass of the second argument. - - Parameters - ---------- - arg1, arg2 : dtype or dtype specifier - Data-types. - - Returns - ------- - out : bool - The result. - - See Also - -------- - issctype, issubdtype, obj2sctype - - Examples - -------- - >>> np.issubsctype('S8', str) - False - >>> np.issubsctype(np.array([1]), int) - True - >>> np.issubsctype(np.array([1]), float) - False - - """ - return issubclass(obj2sctype(arg1), obj2sctype(arg2)) - - -@set_module('numpy') -def issubdtype(arg1, arg2): - """ - Returns True if first argument is a typecode lower/equal in type hierarchy. - - Parameters - ---------- - arg1, arg2 : dtype_like - dtype or string representing a typecode. - - Returns - ------- - out : bool - - See Also - -------- - issubsctype, issubclass_ - numpy.core.numerictypes : Overview of numpy type hierarchy. - - Examples - -------- - >>> np.issubdtype('S1', np.string_) - True - >>> np.issubdtype(np.float64, np.float32) - False - - """ - if not issubclass_(arg1, generic): - arg1 = dtype(arg1).type - if not issubclass_(arg2, generic): - arg2_orig = arg2 - arg2 = dtype(arg2).type - if not isinstance(arg2_orig, dtype): - # weird deprecated behaviour, that tried to infer np.floating from - # float, and similar less obvious things, such as np.generic from - # basestring - mro = arg2.mro() - arg2 = mro[1] if len(mro) > 1 else mro[0] - - def type_repr(x): - """ Helper to produce clear error messages """ - if not isinstance(x, type): - return repr(x) - elif issubclass(x, generic): - return "np.{}".format(x.__name__) - else: - return x.__name__ - - # 1.14, 2017-08-01 - warnings.warn( - "Conversion of the second argument of issubdtype from `{raw}` " - "to `{abstract}` is deprecated. In future, it will be treated " - "as `{concrete} == np.dtype({raw}).type`.".format( - raw=type_repr(arg2_orig), - abstract=type_repr(arg2), - concrete=type_repr(dtype(arg2_orig).type) - ), - FutureWarning, stacklevel=2 - ) - - return issubclass(arg1, arg2) - - -# This dictionary allows look up based on any alias for an array data-type -class _typedict(dict): - """ - Base object for a dictionary for look-up with any alias for an array dtype. - - Instances of `_typedict` can not be used as dictionaries directly, - first they have to be populated. - - """ - - def __getitem__(self, obj): - return dict.__getitem__(self, obj2sctype(obj)) - -nbytes = _typedict() -_alignment = _typedict() -_maxvals = _typedict() -_minvals = _typedict() -def _construct_lookups(): - for name, info in _concrete_typeinfo.items(): - obj = info.type - nbytes[obj] = info.bits // 8 - _alignment[obj] = info.alignment - if len(info) > 5: - _maxvals[obj] = info.max - _minvals[obj] = info.min - else: - _maxvals[obj] = None - _minvals[obj] = None - -_construct_lookups() - - -@set_module('numpy') -def sctype2char(sctype): - """ - Return the string representation of a scalar dtype. - - Parameters - ---------- - sctype : scalar dtype or object - If a scalar dtype, the corresponding string character is - returned. If an object, `sctype2char` tries to infer its scalar type - and then return the corresponding string character. - - Returns - ------- - typechar : str - The string character corresponding to the scalar type. - - Raises - ------ - ValueError - If `sctype` is an object for which the type can not be inferred. - - See Also - -------- - obj2sctype, issctype, issubsctype, mintypecode - - Examples - -------- - >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]: - ... print(np.sctype2char(sctype)) - l # may vary - d - D - S - O - - >>> x = np.array([1., 2-1.j]) - >>> np.sctype2char(x) - 'D' - >>> np.sctype2char(list) - 'O' - - """ - sctype = obj2sctype(sctype) - if sctype is None: - raise ValueError("unrecognized type") - if sctype not in _concrete_types: - # for compatibility - raise KeyError(sctype) - return dtype(sctype).char - -# Create dictionary of casting functions that wrap sequences -# indexed by type or type character -cast = _typedict() -for key in _concrete_types: - cast[key] = lambda x, k=key: array(x, copy=False).astype(k) - -try: - ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType, - _types.LongType, _types.BooleanType, - _types.StringType, _types.UnicodeType, _types.BufferType] -except AttributeError: - # Py3K - ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] - -ScalarType.extend(_concrete_types) -ScalarType = tuple(ScalarType) - - -# Now add the types we've determined to this module -for key in allTypes: - globals()[key] = allTypes[key] - __all__.append(key) - -del key - -typecodes = {'Character':'c', - 'Integer':'bhilqp', - 'UnsignedInteger':'BHILQP', - 'Float':'efdg', - 'Complex':'FDG', - 'AllInteger':'bBhHiIlLqQpP', - 'AllFloat':'efdgFDG', - 'Datetime': 'Mm', - 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} - -# backwards compatibility --- deprecated name -typeDict = sctypeDict -typeNA = sctypeNA - -# b -> boolean -# u -> unsigned integer -# i -> signed integer -# f -> floating point -# c -> complex -# M -> datetime -# m -> timedelta -# S -> string -# U -> Unicode string -# V -> record -# O -> Python object -_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] - -__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' -__len_test_types = len(__test_types) - -# Keep incrementing until a common type both can be coerced to -# is found. Otherwise, return None -def _find_common_coerce(a, b): - if a > b: - return a - try: - thisind = __test_types.index(a.char) - except ValueError: - return None - return _can_coerce_all([a, b], start=thisind) - -# Find a data-type that all data-types in a list can be coerced to -def _can_coerce_all(dtypelist, start=0): - N = len(dtypelist) - if N == 0: - return None - if N == 1: - return dtypelist[0] - thisind = start - while thisind < __len_test_types: - newdtype = dtype(__test_types[thisind]) - numcoerce = len([x for x in dtypelist if newdtype >= x]) - if numcoerce == N: - return newdtype - thisind += 1 - return None - -def _register_types(): - numbers.Integral.register(integer) - numbers.Complex.register(inexact) - numbers.Real.register(floating) - numbers.Number.register(number) - -_register_types() - - -@set_module('numpy') -def find_common_type(array_types, scalar_types): - """ - Determine common type following standard coercion rules. - - Parameters - ---------- - array_types : sequence - A list of dtypes or dtype convertible objects representing arrays. - scalar_types : sequence - A list of dtypes or dtype convertible objects representing scalars. - - Returns - ------- - datatype : dtype - The common data type, which is the maximum of `array_types` ignoring - `scalar_types`, unless the maximum of `scalar_types` is of a - different kind (`dtype.kind`). If the kind is not understood, then - None is returned. - - See Also - -------- - dtype, common_type, can_cast, mintypecode - - Examples - -------- - >>> np.find_common_type([], [np.int64, np.float32, complex]) - dtype('complex128') - >>> np.find_common_type([np.int64, np.float32], []) - dtype('float64') - - The standard casting rules ensure that a scalar cannot up-cast an - array unless the scalar is of a fundamentally different kind of data - (i.e. under a different hierarchy in the data type hierarchy) then - the array: - - >>> np.find_common_type([np.float32], [np.int64, np.float64]) - dtype('float32') - - Complex is of a different type, so it up-casts the float in the - `array_types` argument: - - >>> np.find_common_type([np.float32], [complex]) - dtype('complex128') - - Type specifier strings are convertible to dtypes and can therefore - be used instead of dtypes: - - >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) - dtype('complex128') - - """ - array_types = [dtype(x) for x in array_types] - scalar_types = [dtype(x) for x in scalar_types] - - maxa = _can_coerce_all(array_types) - maxsc = _can_coerce_all(scalar_types) - - if maxa is None: - return maxsc - - if maxsc is None: - return maxa - - try: - index_a = _kind_list.index(maxa.kind) - index_sc = _kind_list.index(maxsc.kind) - except ValueError: - return None - - if index_sc > index_a: - return _find_common_coerce(maxsc, maxa) - else: - return maxa diff --git a/venv/lib/python3.7/site-packages/numpy/core/overrides.py b/venv/lib/python3.7/site-packages/numpy/core/overrides.py deleted file mode 100644 index 55c7bd1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/overrides.py +++ /dev/null @@ -1,210 +0,0 @@ -"""Implementation of __array_function__ overrides from NEP-18.""" -import collections -import functools -import os -import textwrap - -from numpy.core._multiarray_umath import ( - add_docstring, implement_array_function, _get_implementing_args) -from numpy.compat._inspect import getargspec - - -ARRAY_FUNCTION_ENABLED = bool( - int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1))) - - -add_docstring( - implement_array_function, - """ - Implement a function with checks for __array_function__ overrides. - - All arguments are required, and can only be passed by position. - - Arguments - --------- - implementation : function - Function that implements the operation on NumPy array without - overrides when called like ``implementation(*args, **kwargs)``. - public_api : function - Function exposed by NumPy's public API originally called like - ``public_api(*args, **kwargs)`` on which arguments are now being - checked. - relevant_args : iterable - Iterable of arguments to check for __array_function__ methods. - args : tuple - Arbitrary positional arguments originally passed into ``public_api``. - kwargs : dict - Arbitrary keyword arguments originally passed into ``public_api``. - - Returns - ------- - Result from calling ``implementation()`` or an ``__array_function__`` - method, as appropriate. - - Raises - ------ - TypeError : if no implementation is found. - """) - - -# exposed for testing purposes; used internally by implement_array_function -add_docstring( - _get_implementing_args, - """ - Collect arguments on which to call __array_function__. - - Parameters - ---------- - relevant_args : iterable of array-like - Iterable of possibly array-like arguments to check for - __array_function__ methods. - - Returns - ------- - Sequence of arguments with __array_function__ methods, in the order in - which they should be called. - """) - - -ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') - - -def verify_matching_signatures(implementation, dispatcher): - """Verify that a dispatcher function has the right signature.""" - implementation_spec = ArgSpec(*getargspec(implementation)) - dispatcher_spec = ArgSpec(*getargspec(dispatcher)) - - if (implementation_spec.args != dispatcher_spec.args or - implementation_spec.varargs != dispatcher_spec.varargs or - implementation_spec.keywords != dispatcher_spec.keywords or - (bool(implementation_spec.defaults) != - bool(dispatcher_spec.defaults)) or - (implementation_spec.defaults is not None and - len(implementation_spec.defaults) != - len(dispatcher_spec.defaults))): - raise RuntimeError('implementation and dispatcher for %s have ' - 'different function signatures' % implementation) - - if implementation_spec.defaults is not None: - if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): - raise RuntimeError('dispatcher functions can only use None for ' - 'default argument values') - - -def set_module(module): - """Decorator for overriding __module__ on a function or class. - - Example usage:: - - @set_module('numpy') - def example(): - pass - - assert example.__module__ == 'numpy' - """ - def decorator(func): - if module is not None: - func.__module__ = module - return func - return decorator - - - -# Call textwrap.dedent here instead of in the function so as to avoid -# calling dedent multiple times on the same text -_wrapped_func_source = textwrap.dedent(""" - @functools.wraps(implementation) - def {name}(*args, **kwargs): - relevant_args = dispatcher(*args, **kwargs) - return implement_array_function( - implementation, {name}, relevant_args, args, kwargs) - """) - - -def array_function_dispatch(dispatcher, module=None, verify=True, - docs_from_dispatcher=False): - """Decorator for adding dispatch with the __array_function__ protocol. - - See NEP-18 for example usage. - - Parameters - ---------- - dispatcher : callable - Function that when called like ``dispatcher(*args, **kwargs)`` with - arguments from the NumPy function call returns an iterable of - array-like arguments to check for ``__array_function__``. - module : str, optional - __module__ attribute to set on new function, e.g., ``module='numpy'``. - By default, module is copied from the decorated function. - verify : bool, optional - If True, verify the that the signature of the dispatcher and decorated - function signatures match exactly: all required and optional arguments - should appear in order with the same names, but the default values for - all optional arguments should be ``None``. Only disable verification - if the dispatcher's signature needs to deviate for some particular - reason, e.g., because the function has a signature like - ``func(*args, **kwargs)``. - docs_from_dispatcher : bool, optional - If True, copy docs from the dispatcher function onto the dispatched - function, rather than from the implementation. This is useful for - functions defined in C, which otherwise don't have docstrings. - - Returns - ------- - Function suitable for decorating the implementation of a NumPy function. - """ - - if not ARRAY_FUNCTION_ENABLED: - def decorator(implementation): - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) - if module is not None: - implementation.__module__ = module - return implementation - return decorator - - def decorator(implementation): - if verify: - verify_matching_signatures(implementation, dispatcher) - - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) - - # Equivalently, we could define this function directly instead of using - # exec. This version has the advantage of giving the helper function a - # more interpettable name. Otherwise, the original function does not - # show up at all in many cases, e.g., if it's written in C or if the - # dispatcher gets an invalid keyword argument. - source = _wrapped_func_source.format(name=implementation.__name__) - - source_object = compile( - source, filename='<__array_function__ internals>', mode='exec') - scope = { - 'implementation': implementation, - 'dispatcher': dispatcher, - 'functools': functools, - 'implement_array_function': implement_array_function, - } - exec(source_object, scope) - - public_api = scope[implementation.__name__] - - if module is not None: - public_api.__module__ = module - - public_api._implementation = implementation - - return public_api - - return decorator - - -def array_function_from_dispatcher( - implementation, module=None, verify=True, docs_from_dispatcher=True): - """Like array_function_dispatcher, but with function arguments flipped.""" - - def decorator(dispatcher): - return array_function_dispatch( - dispatcher, module, verify=verify, - docs_from_dispatcher=docs_from_dispatcher)(implementation) - return decorator diff --git a/venv/lib/python3.7/site-packages/numpy/core/records.py b/venv/lib/python3.7/site-packages/numpy/core/records.py deleted file mode 100644 index a1cad90..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/records.py +++ /dev/null @@ -1,886 +0,0 @@ -""" -Record Arrays -============= -Record arrays expose the fields of structured arrays as properties. - -Most commonly, ndarrays contain elements of a single type, e.g. floats, -integers, bools etc. However, it is possible for elements to be combinations -of these using structured types, such as:: - - >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)]) - >>> a - array([(1, 2.), (1, 2.)], dtype=[('x', '>> a['x'] - array([1, 1]) - - >>> a['y'] - array([2., 2.]) - -Record arrays allow us to access fields as properties:: - - >>> ar = np.rec.array(a) - - >>> ar.x - array([1, 1]) - - >>> ar.y - array([2., 2.]) - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import warnings -from collections import Counter, OrderedDict - -from . import numeric as sb -from . import numerictypes as nt -from numpy.compat import ( - isfileobj, bytes, long, unicode, os_fspath, contextlib_nullcontext -) -from numpy.core.overrides import set_module -from .arrayprint import get_printoptions - -# All of the functions allow formats to be a dtype -__all__ = ['record', 'recarray', 'format_parser'] - - -ndarray = sb.ndarray - -_byteorderconv = {'b':'>', - 'l':'<', - 'n':'=', - 'B':'>', - 'L':'<', - 'N':'=', - 'S':'s', - 's':'s', - '>':'>', - '<':'<', - '=':'=', - '|':'|', - 'I':'|', - 'i':'|'} - -# formats regular expression -# allows multidimension spec with a tuple syntax in front -# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' -# are equally allowed - -numfmt = nt.typeDict - -# taken from OrderedDict recipes in the Python documentation -# https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes -class _OrderedCounter(Counter, OrderedDict): - """Counter that remembers the order elements are first encountered""" - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, OrderedDict(self)) - - def __reduce__(self): - return self.__class__, (OrderedDict(self),) - - -def find_duplicate(list): - """Find duplication in a list, return a list of duplicated elements""" - return [ - item - for item, counts in _OrderedCounter(list).items() - if counts > 1 - ] - - -@set_module('numpy') -class format_parser(object): - """ - Class to convert formats, names, titles description to a dtype. - - After constructing the format_parser object, the dtype attribute is - the converted data-type: - ``dtype = format_parser(formats, names, titles).dtype`` - - Attributes - ---------- - dtype : dtype - The converted data-type. - - Parameters - ---------- - formats : str or list of str - The format description, either specified as a string with - comma-separated format descriptions in the form ``'f8, i4, a5'``, or - a list of format description strings in the form - ``['f8', 'i4', 'a5']``. - names : str or list/tuple of str - The field names, either specified as a comma-separated string in the - form ``'col1, col2, col3'``, or as a list or tuple of strings in the - form ``['col1', 'col2', 'col3']``. - An empty list can be used, in that case default field names - ('f0', 'f1', ...) are used. - titles : sequence - Sequence of title strings. An empty list can be used to leave titles - out. - aligned : bool, optional - If True, align the fields by padding as the C-compiler would. - Default is False. - byteorder : str, optional - If specified, all the fields will be changed to the - provided byte-order. Otherwise, the default byte-order is - used. For all available string specifiers, see `dtype.newbyteorder`. - - See Also - -------- - dtype, typename, sctype2char - - Examples - -------- - >>> np.format_parser(['>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], - ... []).dtype - dtype([('col1', '>> np.format_parser([' len(titles)): - self._titles += [None] * (self._nfields - len(titles)) - - def _createdescr(self, byteorder): - descr = sb.dtype({'names':self._names, - 'formats':self._f_formats, - 'offsets':self._offsets, - 'titles':self._titles}) - if (byteorder is not None): - byteorder = _byteorderconv[byteorder[0]] - descr = descr.newbyteorder(byteorder) - - self._descr = descr - -class record(nt.void): - """A data-type scalar that allows field access as attribute lookup. - """ - - # manually set name and module so that this class's type shows up - # as numpy.record when printed - __name__ = 'record' - __module__ = 'numpy' - - def __repr__(self): - if get_printoptions()['legacy'] == '1.13': - return self.__str__() - return super(record, self).__repr__() - - def __str__(self): - if get_printoptions()['legacy'] == '1.13': - return str(self.item()) - return super(record, self).__str__() - - def __getattribute__(self, attr): - if attr in ['setfield', 'getfield', 'dtype']: - return nt.void.__getattribute__(self, attr) - try: - return nt.void.__getattribute__(self, attr) - except AttributeError: - pass - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - obj = self.getfield(*res[:2]) - # if it has fields return a record, - # otherwise return the object - try: - dt = obj.dtype - except AttributeError: - #happens if field is Object type - return obj - if dt.names is not None: - return obj.view((self.__class__, obj.dtype)) - return obj - else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) - - def __setattr__(self, attr, val): - if attr in ['setfield', 'getfield', 'dtype']: - raise AttributeError("Cannot set '%s' attribute" % attr) - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - return self.setfield(val, *res[:2]) - else: - if getattr(self, attr, None): - return nt.void.__setattr__(self, attr, val) - else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) - - def __getitem__(self, indx): - obj = nt.void.__getitem__(self, indx) - - # copy behavior of record.__getattribute__, - if isinstance(obj, nt.void) and obj.dtype.names is not None: - return obj.view((self.__class__, obj.dtype)) - else: - # return a single element - return obj - - def pprint(self): - """Pretty-print all fields.""" - # pretty-print all fields - names = self.dtype.names - maxlen = max(len(name) for name in names) - fmt = '%% %ds: %%s' % maxlen - rows = [fmt % (name, getattr(self, name)) for name in names] - return "\n".join(rows) - -# The recarray is almost identical to a standard array (which supports -# named fields already) The biggest difference is that it can use -# attribute-lookup to find the fields and it is constructed using -# a record. - -# If byteorder is given it forces a particular byteorder on all -# the fields (and any subfields) - -class recarray(ndarray): - """Construct an ndarray that allows field access using attributes. - - Arrays may have a data-types containing fields, analogous - to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, - where each entry in the array is a pair of ``(int, float)``. Normally, - these attributes are accessed using dictionary lookups such as ``arr['x']`` - and ``arr['y']``. Record arrays allow the fields to be accessed as members - of the array, using ``arr.x`` and ``arr.y``. - - Parameters - ---------- - shape : tuple - Shape of output array. - dtype : data-type, optional - The desired data-type. By default, the data-type is determined - from `formats`, `names`, `titles`, `aligned` and `byteorder`. - formats : list of data-types, optional - A list containing the data-types for the different columns, e.g. - ``['i4', 'f8', 'i4']``. `formats` does *not* support the new - convention of using types directly, i.e. ``(int, float, int)``. - Note that `formats` must be a list, not a tuple. - Given that `formats` is somewhat limited, we recommend specifying - `dtype` instead. - names : tuple of str, optional - The name of each column, e.g. ``('x', 'y', 'z')``. - buf : buffer, optional - By default, a new array is created of the given shape and data-type. - If `buf` is specified and is an object exposing the buffer interface, - the array will use the memory from the existing buffer. In this case, - the `offset` and `strides` keywords are available. - - Other Parameters - ---------------- - titles : tuple of str, optional - Aliases for column names. For example, if `names` were - ``('x', 'y', 'z')`` and `titles` is - ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then - ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. - byteorder : {'<', '>', '='}, optional - Byte-order for all fields. - aligned : bool, optional - Align the fields in memory as the C-compiler would. - strides : tuple of ints, optional - Buffer (`buf`) is interpreted according to these strides (strides - define how many bytes each array element, row, column, etc. - occupy in memory). - offset : int, optional - Start reading buffer (`buf`) from this offset onwards. - order : {'C', 'F'}, optional - Row-major (C-style) or column-major (Fortran-style) order. - - Returns - ------- - rec : recarray - Empty array of the given shape and type. - - See Also - -------- - rec.fromrecords : Construct a record array from data. - record : fundamental data-type for `recarray`. - format_parser : determine a data-type from formats, names, titles. - - Notes - ----- - This constructor can be compared to ``empty``: it creates a new record - array but does not fill it with data. To create a record array from data, - use one of the following methods: - - 1. Create a standard ndarray and convert it to a record array, - using ``arr.view(np.recarray)`` - 2. Use the `buf` keyword. - 3. Use `np.rec.fromrecords`. - - Examples - -------- - Create an array with two fields, ``x`` and ``y``: - - >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x - array([(1., 2), (3., 4)], dtype=[('x', '>> x['x'] - array([1., 3.]) - - View the array as a record array: - - >>> x = x.view(np.recarray) - - >>> x.x - array([1., 3.]) - - >>> x.y - array([2, 4]) - - Create a new, empty record array: - - >>> np.recarray((2,), - ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP - rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), - (3471280, 1.2134086255804012e-316, 0)], - dtype=[('x', ' 0 or self.shape == (0,): - lst = sb.array2string( - self, separator=', ', prefix=prefix, suffix=',') - else: - # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(self.shape),) - - lf = '\n'+' '*len(prefix) - if get_printoptions()['legacy'] == '1.13': - lf = ' ' + lf # trailing space - return fmt % (lst, lf, repr_dtype) - - def field(self, attr, val=None): - if isinstance(attr, int): - names = ndarray.__getattribute__(self, 'dtype').names - attr = names[attr] - - fielddict = ndarray.__getattribute__(self, 'dtype').fields - - res = fielddict[attr][:2] - - if val is None: - obj = self.getfield(*res) - if obj.dtype.names is not None: - return obj - return obj.view(ndarray) - else: - return self.setfield(val, *res) - - -def fromarrays(arrayList, dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a record array from a (flat) list of arrays - - >>> x1=np.array([1,2,3,4]) - >>> x2=np.array(['a','dd','xyz','12']) - >>> x3=np.array([1.1,2,3,4]) - >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') - >>> print(r[1]) - (2, 'dd', 2.0) # may vary - >>> x1[1]=34 - >>> r.a - array([1, 2, 3, 4]) - """ - - arrayList = [sb.asarray(x) for x in arrayList] - - if shape is None or shape == 0: - shape = arrayList[0].shape - - if isinstance(shape, int): - shape = (shape,) - - if formats is None and dtype is None: - # go through each object in the list to see if it is an ndarray - # and determine the formats. - formats = [] - for obj in arrayList: - formats.append(obj.dtype) - - if dtype is not None: - descr = sb.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - - # Determine shape from data-type. - if len(descr) != len(arrayList): - raise ValueError("mismatch between the number of fields " - "and the number of arrays") - - d0 = descr[0].shape - nn = len(d0) - if nn > 0: - shape = shape[:-nn] - - for k, obj in enumerate(arrayList): - nn = descr[k].ndim - testshape = obj.shape[:obj.ndim - nn] - if testshape != shape: - raise ValueError("array-shape mismatch in array %d" % k) - - _array = recarray(shape, descr) - - # populate the record array (makes a copy) - for i in range(len(arrayList)): - _array[_names[i]] = arrayList[i] - - return _array - -def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None): - """ create a recarray from a list of records in text form - - The data in the same field can be heterogeneous, they will be promoted - to the highest data type. This method is intended for creating - smaller record arrays. If used to create large array without formats - defined - - r=fromrecords([(2,3.,'abc')]*100000) - - it can be slow. - - If formats is None, then this will auto-detect formats. Use list of - tuples rather than list of lists for faster processing. - - >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], - ... names='col1,col2,col3') - >>> print(r[0]) - (456, 'dbe', 1.2) - >>> r.col1 - array([456, 2]) - >>> r.col2 - array(['dbe', 'de'], dtype='>> import pickle - >>> pickle.loads(pickle.dumps(r)) - rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], - dtype=[('col1', ' 1: - raise ValueError("Can only deal with 1-d array.") - _array = recarray(shape, descr) - for k in range(_array.size): - _array[k] = tuple(recList[k]) - # list of lists instead of list of tuples ? - # 2018-02-07, 1.14.1 - warnings.warn( - "fromrecords expected a list of tuples, may have received a list " - "of lists instead. In the future that will raise an error", - FutureWarning, stacklevel=2) - return _array - else: - if shape is not None and retval.shape != shape: - retval.shape = shape - - res = retval.view(recarray) - - return res - - -def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a (read-only) record array from binary data contained in - a string""" - - if dtype is None and formats is None: - raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - if (shape is None or shape == 0 or shape == -1): - shape = (len(datastring) - offset) // itemsize - - _array = recarray(shape, descr, buf=datastring, offset=offset) - return _array - -def get_remaining_size(fd): - try: - fn = fd.fileno() - except AttributeError: - return os.path.getsize(fd.name) - fd.tell() - st = os.fstat(fn) - size = st.st_size - fd.tell() - return size - -def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """Create an array from binary file data - - If file is a string or a path-like object then that file is opened, - else it is assumed to be a file object. The file object must - support random access (i.e. it must have tell and seek methods). - - >>> from tempfile import TemporaryFile - >>> a = np.empty(10,dtype='f8,i4,a5') - >>> a[5] = (0.5,10,'abcde') - >>> - >>> fd=TemporaryFile() - >>> a = a.newbyteorder('<') - >>> a.tofile(fd) - >>> - >>> _ = fd.seek(0) - >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, - ... byteorder='<') - >>> print(r[5]) - (0.5, 10, 'abcde') - >>> r.shape - (10,) - """ - - if dtype is None and formats is None: - raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") - - if (shape is None or shape == 0): - shape = (-1,) - elif isinstance(shape, (int, long)): - shape = (shape,) - - if isfileobj(fd): - # file already opened - ctx = contextlib_nullcontext(fd) - else: - # open file - ctx = open(os_fspath(fd), 'rb') - - with ctx as fd: - if (offset > 0): - fd.seek(offset, 1) - size = get_remaining_size(fd) - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - - shapeprod = sb.array(shape).prod(dtype=nt.intp) - shapesize = shapeprod * itemsize - if shapesize < 0: - shape = list(shape) - shape[shape.index(-1)] = size // -shapesize - shape = tuple(shape) - shapeprod = sb.array(shape).prod(dtype=nt.intp) - - nbytes = shapeprod * itemsize - - if nbytes > size: - raise ValueError( - "Not enough bytes left in file for specified shape and type") - - # create the array - _array = recarray(shape, descr) - nbytesread = fd.readinto(_array.data) - if nbytesread != nbytes: - raise IOError("Didn't read as many bytes as expected") - - return _array - -def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, copy=True): - """Construct a record array from a wide-variety of objects. - """ - - if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and - (formats is None) and (dtype is None)): - raise ValueError("Must define formats (or dtype) if object is " - "None, string, or an open file") - - kwds = {} - if dtype is not None: - dtype = sb.dtype(dtype) - elif formats is not None: - dtype = format_parser(formats, names, titles, - aligned, byteorder)._descr - else: - kwds = {'formats': formats, - 'names': names, - 'titles': titles, - 'aligned': aligned, - 'byteorder': byteorder - } - - if obj is None: - if shape is None: - raise ValueError("Must define a shape if obj is None") - return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) - - elif isinstance(obj, bytes): - return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) - - elif isinstance(obj, (list, tuple)): - if isinstance(obj[0], (tuple, list)): - return fromrecords(obj, dtype=dtype, shape=shape, **kwds) - else: - return fromarrays(obj, dtype=dtype, shape=shape, **kwds) - - elif isinstance(obj, recarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - return new - - elif isfileobj(obj): - return fromfile(obj, dtype=dtype, shape=shape, offset=offset) - - elif isinstance(obj, ndarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - return new.view(recarray) - - else: - interface = getattr(obj, "__array_interface__", None) - if interface is None or not isinstance(interface, dict): - raise ValueError("Unknown input type") - obj = sb.array(obj) - if dtype is not None and (obj.dtype != dtype): - obj = obj.view(dtype) - return obj.view(recarray) diff --git a/venv/lib/python3.7/site-packages/numpy/core/setup.py b/venv/lib/python3.7/site-packages/numpy/core/setup.py deleted file mode 100644 index 974ec46..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/setup.py +++ /dev/null @@ -1,979 +0,0 @@ -from __future__ import division, print_function - -import os -import sys -import pickle -import copy -import warnings -import platform -import textwrap -from os.path import join - -from numpy.distutils import log -from distutils.dep_util import newer -from distutils.sysconfig import get_config_var -from numpy._build_utils.apple_accelerate import ( - uses_accelerate_framework, get_sgemv_fix - ) -from numpy.compat import npy_load_module -from setup_common import * - -# Set to True to enable relaxed strides checking. This (mostly) means -# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags. -NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0") - -# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a -# bogus value for affected strides in order to help smoke out bad stride usage -# when relaxed stride checking is enabled. -NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0") -NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING - -# XXX: ugly, we use a class to avoid calling twice some expensive functions in -# config.h/numpyconfig.h. I don't see a better way because distutils force -# config.h generation inside an Extension class, and as such sharing -# configuration information between extensions is not easy. -# Using a pickled-based memoize does not work because config_cmd is an instance -# method, which cPickle does not like. -# -# Use pickle in all cases, as cPickle is gone in python3 and the difference -# in time is only in build. -- Charles Harris, 2013-03-30 - -class CallOnceOnly(object): - def __init__(self): - self._check_types = None - self._check_ieee_macros = None - self._check_complex = None - - def check_types(self, *a, **kw): - if self._check_types is None: - out = check_types(*a, **kw) - self._check_types = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_types)) - return out - - def check_ieee_macros(self, *a, **kw): - if self._check_ieee_macros is None: - out = check_ieee_macros(*a, **kw) - self._check_ieee_macros = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_ieee_macros)) - return out - - def check_complex(self, *a, **kw): - if self._check_complex is None: - out = check_complex(*a, **kw) - self._check_complex = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_complex)) - return out - -def pythonlib_dir(): - """return path where libpython* is.""" - if sys.platform == 'win32': - return os.path.join(sys.prefix, "libs") - else: - return get_config_var('LIBDIR') - -def is_npy_no_signal(): - """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration - header.""" - return sys.platform == 'win32' - -def is_npy_no_smp(): - """Return True if the NPY_NO_SMP symbol must be defined in public - header (when SMP support cannot be reliably enabled).""" - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - return 'NPY_NOSMP' in os.environ - -def win32_checks(deflist): - from numpy.distutils.misc_util import get_build_architecture - a = get_build_architecture() - - # Distutils hack on AMD64 on windows - print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % - (a, os.name, sys.platform)) - if a == 'AMD64': - deflist.append('DISTUTILS_USE_SDK') - - # On win32, force long double format string to be 'g', not - # 'Lg', since the MS runtime does not support long double whose - # size is > sizeof(double) - if a == "Intel" or a == "AMD64": - deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') - -def check_math_capabilities(config, moredefs, mathlibs): - def check_func(func_name): - return config.check_func(func_name, libraries=mathlibs, - decl=True, call=True) - - def check_funcs_once(funcs_name): - decl = dict([(f, True) for f in funcs_name]) - st = config.check_funcs_once(funcs_name, libraries=mathlibs, - decl=decl, call=decl) - if st: - moredefs.extend([(fname2def(f), 1) for f in funcs_name]) - return st - - def check_funcs(funcs_name): - # Use check_funcs_once first, and if it does not work, test func per - # func. Return success only if all the functions are available - if not check_funcs_once(funcs_name): - # Global check failed, check func per func - for f in funcs_name: - if check_func(f): - moredefs.append((fname2def(f), 1)) - return 0 - else: - return 1 - - #use_msvc = config.check_decl("_MSC_VER") - - if not check_funcs_once(MANDATORY_FUNCS): - raise SystemError("One of the required function to build numpy is not" - " available (the list is %s)." % str(MANDATORY_FUNCS)) - - # Standard functions which may not be available and for which we have a - # replacement implementation. Note that some of these are C99 functions. - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - for f in OPTIONAL_STDFUNCS_MAYBE: - if config.check_decl(fname2def(f), - headers=["Python.h", "math.h"]): - OPTIONAL_STDFUNCS.remove(f) - - check_funcs(OPTIONAL_STDFUNCS) - - for h in OPTIONAL_HEADERS: - if config.check_func("", decl=False, call=False, headers=[h]): - h = h.replace(".", "_").replace(os.path.sep, "_") - moredefs.append((fname2def(h), 1)) - - for tup in OPTIONAL_INTRINSICS: - headers = None - if len(tup) == 2: - f, args, m = tup[0], tup[1], fname2def(tup[0]) - elif len(tup) == 3: - f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0]) - else: - f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3]) - if config.check_func(f, decl=False, call=True, call_args=args, - headers=headers): - moredefs.append((m, 1)) - - for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES: - if config.check_gcc_function_attribute(dec, fn): - moredefs.append((fname2def(fn), 1)) - - for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS: - if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code, - header): - moredefs.append((fname2def(fn), 1)) - - for fn in OPTIONAL_VARIABLE_ATTRIBUTES: - if config.check_gcc_variable_attribute(fn): - m = fn.replace("(", "_").replace(")", "_") - moredefs.append((fname2def(m), 1)) - - # C99 functions: float and long double versions - check_funcs(C99_FUNCS_SINGLE) - check_funcs(C99_FUNCS_EXTENDED) - -def check_complex(config, mathlibs): - priv = [] - pub = [] - - try: - if os.uname()[0] == "Interix": - warnings.warn("Disabling broken complex support. See #1365", stacklevel=2) - return priv, pub - except Exception: - # os.uname not available on all platforms. blanket except ugly but safe - pass - - # Check for complex support - st = config.check_header('complex.h') - if st: - priv.append(('HAVE_COMPLEX_H', 1)) - pub.append(('NPY_USE_C99_COMPLEX', 1)) - - for t in C99_COMPLEX_TYPES: - st = config.check_type(t, headers=["complex.h"]) - if st: - pub.append(('NPY_HAVE_%s' % type2def(t), 1)) - - def check_prec(prec): - flist = [f + prec for f in C99_COMPLEX_FUNCS] - decl = dict([(f, True) for f in flist]) - if not config.check_funcs_once(flist, call=decl, decl=decl, - libraries=mathlibs): - for f in flist: - if config.check_func(f, call=True, decl=True, - libraries=mathlibs): - priv.append((fname2def(f), 1)) - else: - priv.extend([(fname2def(f), 1) for f in flist]) - - check_prec('') - check_prec('f') - check_prec('l') - - return priv, pub - -def check_ieee_macros(config): - priv = [] - pub = [] - - macros = [] - - def _add_decl(f): - priv.append(fname2def("decl_%s" % f)) - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - _macros = ["isnan", "isinf", "signbit", "isfinite"] - for f in _macros: - py_symbol = fname2def("decl_%s" % f) - already_declared = config.check_decl(py_symbol, - headers=["Python.h", "math.h"]) - if already_declared: - if config.check_macro_true(py_symbol, - headers=["Python.h", "math.h"]): - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - else: - macros.append(f) - # Normally, isnan and isinf are macro (C99), but some platforms only have - # func, or both func and macro version. Check for macro only, and define - # replacement ones if not found. - # Note: including Python.h is necessary because it modifies some math.h - # definitions - for f in macros: - st = config.check_decl(f, headers=["Python.h", "math.h"]) - if st: - _add_decl(f) - - return priv, pub - -def check_types(config_cmd, ext, build_dir): - private_defines = [] - public_defines = [] - - # Expected size (in number of bytes) for each type. This is an - # optimization: those are only hints, and an exhaustive search for the size - # is done if the hints are wrong. - expected = {'short': [2], 'int': [4], 'long': [8, 4], - 'float': [4], 'double': [8], 'long double': [16, 12, 8], - 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8], - 'off_t': [8, 4]} - - # Check we have the python header (-dev* packages on Linux) - result = config_cmd.check_header('Python.h') - if not result: - python = 'python' - if '__pypy__' in sys.builtin_module_names: - python = 'pypy' - raise SystemError( - "Cannot compile 'Python.h'. Perhaps you need to " - "install {0}-dev|{0}-devel.".format(python)) - res = config_cmd.check_header("endian.h") - if res: - private_defines.append(('HAVE_ENDIAN_H', 1)) - public_defines.append(('NPY_HAVE_ENDIAN_H', 1)) - res = config_cmd.check_header("sys/endian.h") - if res: - private_defines.append(('HAVE_SYS_ENDIAN_H', 1)) - public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1)) - - # Check basic types sizes - for type in ('short', 'int', 'long'): - res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"]) - if res: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type))) - else: - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - for type in ('float', 'double', 'long double'): - already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), - headers=["Python.h"]) - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - if not already_declared and not type == 'long double': - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # Compute size of corresponding complex type: used to check that our - # definition is binary compatible with C99 complex type (check done at - # build time in npy_common.h) - complex_def = "struct {%s __x; %s __y;}" % (type, type) - res = config_cmd.check_type_size(complex_def, - expected=[2 * x for x in expected[type]]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % complex_def) - - for type in ('Py_intptr_t', 'off_t'): - res = config_cmd.check_type_size(type, headers=["Python.h"], - library_dirs=[pythonlib_dir()], - expected=expected[type]) - - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # We check declaration AND type because that's how distutils does it. - if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): - res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], - library_dirs=[pythonlib_dir()], - expected=expected['PY_LONG_LONG']) - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') - - res = config_cmd.check_type_size('long long', - expected=expected['long long']) - if res >= 0: - #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'long long') - - if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): - raise RuntimeError( - "Config wo CHAR_BIT is not supported" - ", please contact the maintainers") - - return private_defines, public_defines - -def check_mathlib(config_cmd): - # Testing the C math library - mathlibs = [] - mathlibs_choices = [[], ['m'], ['cpml']] - mathlib = os.environ.get('MATHLIB') - if mathlib: - mathlibs_choices.insert(0, mathlib.split(',')) - for libs in mathlibs_choices: - if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): - mathlibs = libs - break - else: - raise EnvironmentError("math library missing; rerun " - "setup.py after setting the " - "MATHLIB env variable") - return mathlibs - -def visibility_define(config): - """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty - string).""" - hide = '__attribute__((visibility("hidden")))' - if config.check_gcc_function_attribute(hide, 'hideme'): - return hide - else: - return '' - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration, dot_join - from numpy.distutils.system_info import get_info, dict_append - - config = Configuration('core', parent_package, top_path) - local_dir = config.local_path - codegen_dir = join(local_dir, 'code_generators') - - if is_released(config): - warnings.simplefilter('error', MismatchCAPIWarning) - - # Check whether we have a mismatch between the set C API VERSION and the - # actual C API VERSION - check_api_version(C_API_VERSION, codegen_dir) - - generate_umath_py = join(codegen_dir, 'generate_umath.py') - n = dot_join(config.name, 'generate_umath') - generate_umath = npy_load_module('_'.join(n.split('.')), - generate_umath_py, ('.py', 'U', 1)) - - header_dir = 'include/numpy' # this is relative to config.path_in_package - - cocache = CallOnceOnly() - - def generate_config_h(ext, build_dir): - target = join(build_dir, header_dir, 'config.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - - if newer(__file__, target): - config_cmd = config.get_config_cmd() - log.info('Generating %s', target) - - # Check sizeof - moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) - - # Check math library and C99 math funcs availability - mathlibs = check_mathlib(config_cmd) - moredefs.append(('MATHLIB', ','.join(mathlibs))) - - check_math_capabilities(config_cmd, moredefs, mathlibs) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) - - # Signal check - if is_npy_no_signal(): - moredefs.append('__NPY_PRIVATE_NO_SIGNAL') - - # Windows checks - if sys.platform == 'win32' or os.name == 'nt': - win32_checks(moredefs) - - # C99 restrict keyword - moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict())) - - # Inline check - inline = config_cmd.check_inline() - - # Use relaxed stride checking - if NPY_RELAXED_STRIDES_CHECKING: - moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) - - # Use bogus stride debug aid when relaxed strides are enabled - if NPY_RELAXED_STRIDES_DEBUG: - moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) - - # Get long double representation - rep = check_long_double_representation(config_cmd) - moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) - - if check_for_right_shift_internal_compiler_error(config_cmd): - moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift') - moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift') - moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift') - moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift') - - # Py3K check - if sys.version_info[0] >= 3: - moredefs.append(('NPY_PY3K', 1)) - - # Generate the config.h file from moredefs - with open(target, 'w') as target_f: - for d in moredefs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0], d[1])) - - # define inline to our keyword, or nothing - target_f.write('#ifndef __cplusplus\n') - if inline == 'inline': - target_f.write('/* #undef inline */\n') - else: - target_f.write('#define inline %s\n' % inline) - target_f.write('#endif\n') - - # add the guard to make sure config.h is never included directly, - # but always through npy_config.h - target_f.write(textwrap.dedent(""" - #ifndef _NPY_NPY_CONFIG_H_ - #error config.h should never be included directly, include npy_config.h instead - #endif - """)) - - log.info('File: %s' % target) - with open(target) as target_f: - log.info(target_f.read()) - log.info('EOF') - else: - mathlibs = [] - with open(target) as target_f: - for line in target_f: - s = '#define MATHLIB' - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - - # Ugly: this can be called within a library and not an extension, - # in which case there is no libraries attributes (and none is - # needed). - if hasattr(ext, 'libraries'): - ext.libraries.extend(mathlibs) - - incl_dir = os.path.dirname(target) - if incl_dir not in config.numpy_include_dirs: - config.numpy_include_dirs.append(incl_dir) - - return target - - def generate_numpyconfig_h(ext, build_dir): - """Depends on config.h: generate_config_h has to be called before !""" - # put common include directory in build_dir on search path - # allows using code generation in headers headers - config.add_include_dirs(join(build_dir, "src", "common")) - config.add_include_dirs(join(build_dir, "src", "npymath")) - - target = join(build_dir, header_dir, '_numpyconfig.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - if newer(__file__, target): - config_cmd = config.get_config_cmd() - log.info('Generating %s', target) - - # Check sizeof - ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) - - if is_npy_no_signal(): - moredefs.append(('NPY_NO_SIGNAL', 1)) - - if is_npy_no_smp(): - moredefs.append(('NPY_NO_SMP', 1)) - else: - moredefs.append(('NPY_NO_SMP', 0)) - - mathlibs = check_mathlib(config_cmd) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) - - if NPY_RELAXED_STRIDES_CHECKING: - moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) - - if NPY_RELAXED_STRIDES_DEBUG: - moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) - - # Check whether we can use inttypes (C99) formats - if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']): - moredefs.append(('NPY_USE_C99_FORMATS', 1)) - - # visibility check - hidden_visibility = visibility_define(config_cmd) - moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) - - # Add the C API/ABI versions - moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) - moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) - - # Add moredefs to header - with open(target, 'w') as target_f: - for d in moredefs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0], d[1])) - - # Define __STDC_FORMAT_MACROS - target_f.write(textwrap.dedent(""" - #ifndef __STDC_FORMAT_MACROS - #define __STDC_FORMAT_MACROS 1 - #endif - """)) - - # Dump the numpyconfig.h header to stdout - log.info('File: %s' % target) - with open(target) as target_f: - log.info(target_f.read()) - log.info('EOF') - config.add_data_files((header_dir, target)) - return target - - def generate_api_func(module_name): - def generate_api(ext, build_dir): - script = join(codegen_dir, module_name + '.py') - sys.path.insert(0, codegen_dir) - try: - m = __import__(module_name) - log.info('executing %s', script) - h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir)) - finally: - del sys.path[0] - config.add_data_files((header_dir, h_file), - (header_dir, doc_file)) - return (h_file,) - return generate_api - - generate_numpy_api = generate_api_func('generate_numpy_api') - generate_ufunc_api = generate_api_func('generate_ufunc_api') - - config.add_include_dirs(join(local_dir, "src", "common")) - config.add_include_dirs(join(local_dir, "src")) - config.add_include_dirs(join(local_dir)) - - config.add_data_dir('include/numpy') - config.add_include_dirs(join('src', 'npymath')) - config.add_include_dirs(join('src', 'multiarray')) - config.add_include_dirs(join('src', 'umath')) - config.add_include_dirs(join('src', 'npysort')) - - config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process - config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")]) - if sys.platform[:3] == "aix": - config.add_define_macros([("_LARGE_FILES", None)]) - else: - config.add_define_macros([("_FILE_OFFSET_BITS", "64")]) - config.add_define_macros([('_LARGEFILE_SOURCE', '1')]) - config.add_define_macros([('_LARGEFILE64_SOURCE', '1')]) - - config.numpy_include_dirs.extend(config.paths('include')) - - deps = [join('src', 'npymath', '_signbit.c'), - join('include', 'numpy', '*object.h'), - join(codegen_dir, 'genapi.py'), - ] - - ####################################################################### - # npymath library # - ####################################################################### - - subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) - - def get_mathlib_info(*args): - # Another ugly hack: the mathlib info is known once build_src is run, - # but we cannot use add_installed_pkg_config here either, so we only - # update the substitution dictionary during npymath build - config_cmd = config.get_config_cmd() - - # Check that the toolchain works, to fail early if it doesn't - # (avoid late errors with MATHLIB which are confusing if the - # compiler does not work). - st = config_cmd.try_link('int main(void) { return 0;}') - if not st: - # rerun the failing command in verbose mode - config_cmd.compiler.verbose = True - config_cmd.try_link('int main(void) { return 0;}') - raise RuntimeError("Broken toolchain: cannot link a simple C program") - mlibs = check_mathlib(config_cmd) - - posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) - msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) - subst_dict["posix_mathlib"] = posix_mlib - subst_dict["msvc_mathlib"] = msvc_mlib - - npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'), - join('src', 'npymath', 'npy_math.c'), - join('src', 'npymath', 'ieee754.c.src'), - join('src', 'npymath', 'npy_math_complex.c.src'), - join('src', 'npymath', 'halffloat.c') - ] - - # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977. - # Intel and Clang also don't seem happy with /GL - is_msvc = (platform.platform().startswith('Windows') and - platform.python_compiler().startswith('MS')) - config.add_installed_library('npymath', - sources=npymath_sources + [get_mathlib_info], - install_dir='lib', - build_info={ - 'include_dirs' : [], # empty list required for creating npy_math_internal.h - 'extra_compiler_args' : (['/GL-'] if is_msvc else []), - }) - config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", - subst_dict) - config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", - subst_dict) - - ####################################################################### - # npysort library # - ####################################################################### - - # This library is created for the build but it is not installed - npysort_sources = [join('src', 'common', 'npy_sort.h.src'), - join('src', 'npysort', 'quicksort.c.src'), - join('src', 'npysort', 'mergesort.c.src'), - join('src', 'npysort', 'timsort.c.src'), - join('src', 'npysort', 'heapsort.c.src'), - join('src', 'npysort', 'radixsort.c.src'), - join('src', 'common', 'npy_partition.h.src'), - join('src', 'npysort', 'selection.c.src'), - join('src', 'common', 'npy_binsearch.h.src'), - join('src', 'npysort', 'binsearch.c.src'), - ] - config.add_library('npysort', - sources=npysort_sources, - include_dirs=[]) - - ####################################################################### - # multiarray_tests module # - ####################################################################### - - config.add_extension('_multiarray_tests', - sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), - join('src', 'common', 'mem_overlap.c')], - depends=[join('src', 'common', 'mem_overlap.h'), - join('src', 'common', 'npy_extint128.h')], - libraries=['npymath']) - - ####################################################################### - # _multiarray_umath module - common part # - ####################################################################### - - common_deps = [ - join('src', 'common', 'array_assign.h'), - join('src', 'common', 'binop_override.h'), - join('src', 'common', 'cblasfuncs.h'), - join('src', 'common', 'lowlevel_strided_loops.h'), - join('src', 'common', 'mem_overlap.h'), - join('src', 'common', 'npy_cblas.h'), - join('src', 'common', 'npy_config.h'), - join('src', 'common', 'npy_ctypes.h'), - join('src', 'common', 'npy_extint128.h'), - join('src', 'common', 'npy_import.h'), - join('src', 'common', 'npy_longdouble.h'), - join('src', 'common', 'templ_common.h.src'), - join('src', 'common', 'ucsnarrow.h'), - join('src', 'common', 'ufunc_override.h'), - join('src', 'common', 'umathmodule.h'), - join('src', 'common', 'numpyos.h'), - ] - - common_src = [ - join('src', 'common', 'array_assign.c'), - join('src', 'common', 'mem_overlap.c'), - join('src', 'common', 'npy_longdouble.c'), - join('src', 'common', 'templ_common.h.src'), - join('src', 'common', 'ucsnarrow.c'), - join('src', 'common', 'ufunc_override.c'), - join('src', 'common', 'numpyos.c'), - ] - - if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0": - blas_info = get_info('blas_ilp64_opt', 2) - else: - blas_info = get_info('blas_opt', 0) - - have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []) - - if have_blas: - extra_info = blas_info - # These files are also in MANIFEST.in so that they are always in - # the source distribution independently of HAVE_CBLAS. - common_src.extend([join('src', 'common', 'cblasfuncs.c'), - join('src', 'common', 'python_xerbla.c'), - ]) - if uses_accelerate_framework(blas_info): - common_src.extend(get_sgemv_fix()) - else: - extra_info = {} - - ####################################################################### - # _multiarray_umath module - multiarray part # - ####################################################################### - - multiarray_deps = [ - join('src', 'multiarray', 'arrayobject.h'), - join('src', 'multiarray', 'arraytypes.h'), - join('src', 'multiarray', 'arrayfunction_override.h'), - join('src', 'multiarray', 'npy_buffer.h'), - join('src', 'multiarray', 'calculation.h'), - join('src', 'multiarray', 'common.h'), - join('src', 'multiarray', 'convert_datatype.h'), - join('src', 'multiarray', 'convert.h'), - join('src', 'multiarray', 'conversion_utils.h'), - join('src', 'multiarray', 'ctors.h'), - join('src', 'multiarray', 'descriptor.h'), - join('src', 'multiarray', 'dragon4.h'), - join('src', 'multiarray', 'getset.h'), - join('src', 'multiarray', 'hashdescr.h'), - join('src', 'multiarray', 'iterators.h'), - join('src', 'multiarray', 'mapping.h'), - join('src', 'multiarray', 'methods.h'), - join('src', 'multiarray', 'multiarraymodule.h'), - join('src', 'multiarray', 'nditer_impl.h'), - join('src', 'multiarray', 'number.h'), - join('src', 'multiarray', 'refcount.h'), - join('src', 'multiarray', 'scalartypes.h'), - join('src', 'multiarray', 'sequence.h'), - join('src', 'multiarray', 'shape.h'), - join('src', 'multiarray', 'strfuncs.h'), - join('src', 'multiarray', 'typeinfo.h'), - join('src', 'multiarray', 'usertypes.h'), - join('src', 'multiarray', 'vdot.h'), - join('include', 'numpy', 'arrayobject.h'), - join('include', 'numpy', '_neighborhood_iterator_imp.h'), - join('include', 'numpy', 'npy_endian.h'), - join('include', 'numpy', 'arrayscalars.h'), - join('include', 'numpy', 'noprefix.h'), - join('include', 'numpy', 'npy_interrupt.h'), - join('include', 'numpy', 'npy_3kcompat.h'), - join('include', 'numpy', 'npy_math.h'), - join('include', 'numpy', 'halffloat.h'), - join('include', 'numpy', 'npy_common.h'), - join('include', 'numpy', 'npy_os.h'), - join('include', 'numpy', 'utils.h'), - join('include', 'numpy', 'ndarrayobject.h'), - join('include', 'numpy', 'npy_cpu.h'), - join('include', 'numpy', 'numpyconfig.h'), - join('include', 'numpy', 'ndarraytypes.h'), - join('include', 'numpy', 'npy_1_7_deprecated_api.h'), - # add library sources as distuils does not consider libraries - # dependencies - ] + npysort_sources + npymath_sources - - multiarray_src = [ - join('src', 'multiarray', 'alloc.c'), - join('src', 'multiarray', 'arrayobject.c'), - join('src', 'multiarray', 'arraytypes.c.src'), - join('src', 'multiarray', 'array_assign_scalar.c'), - join('src', 'multiarray', 'array_assign_array.c'), - join('src', 'multiarray', 'arrayfunction_override.c'), - join('src', 'multiarray', 'buffer.c'), - join('src', 'multiarray', 'calculation.c'), - join('src', 'multiarray', 'compiled_base.c'), - join('src', 'multiarray', 'common.c'), - join('src', 'multiarray', 'convert.c'), - join('src', 'multiarray', 'convert_datatype.c'), - join('src', 'multiarray', 'conversion_utils.c'), - join('src', 'multiarray', 'ctors.c'), - join('src', 'multiarray', 'datetime.c'), - join('src', 'multiarray', 'datetime_strings.c'), - join('src', 'multiarray', 'datetime_busday.c'), - join('src', 'multiarray', 'datetime_busdaycal.c'), - join('src', 'multiarray', 'descriptor.c'), - join('src', 'multiarray', 'dragon4.c'), - join('src', 'multiarray', 'dtype_transfer.c'), - join('src', 'multiarray', 'einsum.c.src'), - join('src', 'multiarray', 'flagsobject.c'), - join('src', 'multiarray', 'getset.c'), - join('src', 'multiarray', 'hashdescr.c'), - join('src', 'multiarray', 'item_selection.c'), - join('src', 'multiarray', 'iterators.c'), - join('src', 'multiarray', 'lowlevel_strided_loops.c.src'), - join('src', 'multiarray', 'mapping.c'), - join('src', 'multiarray', 'methods.c'), - join('src', 'multiarray', 'multiarraymodule.c'), - join('src', 'multiarray', 'nditer_templ.c.src'), - join('src', 'multiarray', 'nditer_api.c'), - join('src', 'multiarray', 'nditer_constr.c'), - join('src', 'multiarray', 'nditer_pywrap.c'), - join('src', 'multiarray', 'number.c'), - join('src', 'multiarray', 'refcount.c'), - join('src', 'multiarray', 'sequence.c'), - join('src', 'multiarray', 'shape.c'), - join('src', 'multiarray', 'scalarapi.c'), - join('src', 'multiarray', 'scalartypes.c.src'), - join('src', 'multiarray', 'strfuncs.c'), - join('src', 'multiarray', 'temp_elide.c'), - join('src', 'multiarray', 'typeinfo.c'), - join('src', 'multiarray', 'usertypes.c'), - join('src', 'multiarray', 'vdot.c'), - ] - - ####################################################################### - # _multiarray_umath module - umath part # - ####################################################################### - - def generate_umath_c(ext, build_dir): - target = join(build_dir, header_dir, '__umath_generated.c') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) - script = generate_umath_py - if newer(script, target): - with open(target, 'w') as f: - f.write(generate_umath.make_code(generate_umath.defdict, - generate_umath.__file__)) - return [] - - umath_src = [ - join('src', 'umath', 'umathmodule.c'), - join('src', 'umath', 'reduction.c'), - join('src', 'umath', 'funcs.inc.src'), - join('src', 'umath', 'simd.inc.src'), - join('src', 'umath', 'loops.h.src'), - join('src', 'umath', 'loops.c.src'), - join('src', 'umath', 'matmul.h.src'), - join('src', 'umath', 'matmul.c.src'), - join('src', 'umath', 'clip.h.src'), - join('src', 'umath', 'clip.c.src'), - join('src', 'umath', 'ufunc_object.c'), - join('src', 'umath', 'extobj.c'), - join('src', 'umath', 'cpuid.c'), - join('src', 'umath', 'scalarmath.c.src'), - join('src', 'umath', 'ufunc_type_resolution.c'), - join('src', 'umath', 'override.c'), - ] - - umath_deps = [ - generate_umath_py, - join('include', 'numpy', 'npy_math.h'), - join('include', 'numpy', 'halffloat.h'), - join('src', 'multiarray', 'common.h'), - join('src', 'multiarray', 'number.h'), - join('src', 'common', 'templ_common.h.src'), - join('src', 'umath', 'simd.inc.src'), - join('src', 'umath', 'override.h'), - join(codegen_dir, 'generate_ufunc_api.py'), - ] - - config.add_extension('_multiarray_umath', - sources=multiarray_src + umath_src + - npymath_sources + common_src + - [generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - join(codegen_dir, 'generate_numpy_api.py'), - join('*.py'), - generate_umath_c, - generate_ufunc_api, - ], - depends=deps + multiarray_deps + umath_deps + - common_deps, - libraries=['npymath', 'npysort'], - extra_info=extra_info) - - ####################################################################### - # umath_tests module # - ####################################################################### - - config.add_extension('_umath_tests', - sources=[join('src', 'umath', '_umath_tests.c.src')]) - - ####################################################################### - # custom rational dtype module # - ####################################################################### - - config.add_extension('_rational_tests', - sources=[join('src', 'umath', '_rational_tests.c.src')]) - - ####################################################################### - # struct_ufunc_test module # - ####################################################################### - - config.add_extension('_struct_ufunc_tests', - sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')]) - - - ####################################################################### - # operand_flag_tests module # - ####################################################################### - - config.add_extension('_operand_flag_tests', - sources=[join('src', 'umath', '_operand_flag_tests.c.src')]) - - config.add_data_dir('tests') - config.add_data_dir('tests/data') - - config.make_svn_version_py() - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/core/setup_common.py b/venv/lib/python3.7/site-packages/numpy/core/setup_common.py deleted file mode 100644 index 6356f08..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/setup_common.py +++ /dev/null @@ -1,457 +0,0 @@ -from __future__ import division, absolute_import, print_function - -# Code common to build tools -import sys -import warnings -import copy -import binascii -import textwrap - -from numpy.distutils.misc_util import mingw32 - - -#------------------- -# Versioning support -#------------------- -# How to change C_API_VERSION ? -# - increase C_API_VERSION value -# - record the hash for the new C API with the cversions.py script -# and add the hash to cversions.txt -# The hash values are used to remind developers when the C API number was not -# updated - generates a MismatchCAPIWarning warning which is turned into an -# exception for released version. - -# Binary compatibility version number. This number is increased whenever the -# C-API is changed such that binary compatibility is broken, i.e. whenever a -# recompile of extension modules is needed. -C_ABI_VERSION = 0x01000009 - -# Minor API version. This number is increased whenever a change is made to the -# C-API -- whether it breaks binary compatibility or not. Some changes, such -# as adding a function pointer to the end of the function table, can be made -# without breaking binary compatibility. In this case, only the C_API_VERSION -# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is -# broken, both C_API_VERSION and C_ABI_VERSION should be increased. -# -# 0x00000008 - 1.7.x -# 0x00000009 - 1.8.x -# 0x00000009 - 1.9.x -# 0x0000000a - 1.10.x -# 0x0000000a - 1.11.x -# 0x0000000a - 1.12.x -# 0x0000000b - 1.13.x -# 0x0000000c - 1.14.x -# 0x0000000c - 1.15.x -# 0x0000000d - 1.16.x -C_API_VERSION = 0x0000000d - -class MismatchCAPIWarning(Warning): - pass - -def is_released(config): - """Return True if a released version of numpy is detected.""" - from distutils.version import LooseVersion - - v = config.get_version('../version.py') - if v is None: - raise ValueError("Could not get version") - pv = LooseVersion(vstring=v).version - if len(pv) > 3: - return False - return True - -def get_api_versions(apiversion, codegen_dir): - """ - Return current C API checksum and the recorded checksum. - - Return current C API checksum and the recorded checksum for the given - version of the C API version. - - """ - # Compute the hash of the current API as defined in the .txt files in - # code_generators - sys.path.insert(0, codegen_dir) - try: - m = __import__('genapi') - numpy_api = __import__('numpy_api') - curapi_hash = m.fullapi_hash(numpy_api.full_api) - apis_hash = m.get_versions_hash() - finally: - del sys.path[0] - - return curapi_hash, apis_hash[apiversion] - -def check_api_version(apiversion, codegen_dir): - """Emits a MismatchCAPIWarning if the C API version needs updating.""" - curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) - - # If different hash, it means that the api .txt files in - # codegen_dir have been updated without the API version being - # updated. Any modification in those .txt files should be reflected - # in the api and eventually abi versions. - # To compute the checksum of the current API, use numpy/core/cversions.py - if not curapi_hash == api_hash: - msg = ("API mismatch detected, the C API version " - "numbers have to be updated. Current C api version is %d, " - "with checksum %s, but recorded checksum for C API version %d " - "in core/codegen_dir/cversions.txt is %s. If functions were " - "added in the C API, you have to update C_API_VERSION in %s." - ) - warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, - __file__), - MismatchCAPIWarning, stacklevel=2) -# Mandatory functions: if not found, fail the build -MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", - "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", - "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] - -# Standard functions which may not be available and for which we have a -# replacement implementation. Note that some of these are C99 functions. -OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", - "copysign", "nextafter", "ftello", "fseeko", - "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate", - "backtrace", "madvise"] - - -OPTIONAL_HEADERS = [ -# sse headers only enabled automatically on amd64/x32 builds - "xmmintrin.h", # SSE - "emmintrin.h", # SSE2 - "immintrin.h", # AVX - "features.h", # for glibc version linux - "xlocale.h", # see GH#8367 - "dlfcn.h", # dladdr - "sys/mman.h", #madvise -] - -# optional gcc compiler builtins and their call arguments and optional a -# required header and definition name (HAVE_ prepended) -# call arguments are required as the compiler will do strict signature checking -OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), - ("__builtin_isinf", '5.'), - ("__builtin_isfinite", '5.'), - ("__builtin_bswap32", '5u'), - ("__builtin_bswap64", '5u'), - ("__builtin_expect", '5, 0'), - ("__builtin_mul_overflow", '5, 5, (int*)5'), - # broken on OSX 10.11, make sure its not optimized away - ("volatile int r = __builtin_cpu_supports", '"sse"', - "stdio.h", "__BUILTIN_CPU_SUPPORTS"), - ("volatile int r = __builtin_cpu_supports", '"avx512f"', - "stdio.h", "__BUILTIN_CPU_SUPPORTS_AVX512F"), - # MMX only needed for icc, but some clangs don't have it - ("_m_from_int64", '0', "emmintrin.h"), - ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE - ("_mm_prefetch", '(float*)0, _MM_HINT_NTA', - "xmmintrin.h"), # SSE - ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 - ("__builtin_prefetch", "(float*)0, 0, 3"), - # check that the linker can handle avx - ("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"', - "stdio.h", "LINK_AVX"), - ("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"', - "stdio.h", "LINK_AVX2"), - ("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"', - "stdio.h", "LINK_AVX512F"), - ("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"), - ] - -# function attributes -# tested via "int %s %s(void *);" % (attribute, name) -# function name will be converted to HAVE_ preprocessor macro -OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', - 'attribute_optimize_unroll_loops'), - ('__attribute__((optimize("O3")))', - 'attribute_optimize_opt_3'), - ('__attribute__((nonnull (1)))', - 'attribute_nonnull'), - ('__attribute__((target ("avx")))', - 'attribute_target_avx'), - ('__attribute__((target ("avx2")))', - 'attribute_target_avx2'), - ('__attribute__((target ("avx512f")))', - 'attribute_target_avx512f'), - ] - -# function attributes with intrinsics -# To ensure your compiler can compile avx intrinsics with just the attributes -# gcc 4.8.4 support attributes but not with intrisics -# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code) -# function name will be converted to HAVE_ preprocessor macro -OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))', - 'attribute_target_avx2_with_intrinsics', - '__m256 temp = _mm256_set1_ps(1.0); temp = \ - _mm256_fmadd_ps(temp, temp, temp)', - 'immintrin.h'), - ('__attribute__((target("avx512f")))', - 'attribute_target_avx512f_with_intrinsics', - '__m512 temp = _mm512_set1_ps(1.0)', - 'immintrin.h'), - ] - -# variable attributes tested via "int %s a" % attribute -OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] - -# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h -OPTIONAL_STDFUNCS_MAYBE = [ - "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign", - "ftello", "fseeko" - ] - -# C99 functions: float and long double versions -C99_FUNCS = [ - "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", - "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", - "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", - "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign", - "nextafter", "cbrt" - ] -C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] -C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] -C99_COMPLEX_TYPES = [ - 'complex double', 'complex float', 'complex long double' - ] -C99_COMPLEX_FUNCS = [ - "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan", - "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow", - "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh" - ] - -def fname2def(name): - return "HAVE_%s" % name.upper() - -def sym2def(symbol): - define = symbol.replace(' ', '') - return define.upper() - -def type2def(symbol): - define = symbol.replace(' ', '_') - return define.upper() - -# Code to detect long double representation taken from MPFR m4 macro -def check_long_double_representation(cmd): - cmd._check_compiler() - body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} - - # Disable whole program optimization (the default on vs2015, with python 3.5+) - # which generates intermediary object files and prevents checking the - # float representation. - if sys.platform == "win32" and not mingw32(): - try: - cmd.compiler.compile_options.remove("/GL") - except (AttributeError, ValueError): - pass - - # Disable multi-file interprocedural optimization in the Intel compiler on Linux - # which generates intermediary object files and prevents checking the - # float representation. - elif (sys.platform != "win32" - and cmd.compiler.compiler_type.startswith('intel') - and '-ipo' in cmd.compiler.cc_exe): - newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') - cmd.compiler.set_executables( - compiler=newcompiler, - compiler_so=newcompiler, - compiler_cxx=newcompiler, - linker_exe=newcompiler, - linker_so=newcompiler + ' -shared' - ) - - # We need to use _compile because we need the object filename - src, obj = cmd._compile(body, None, None, 'c') - try: - ltype = long_double_representation(pyod(obj)) - return ltype - except ValueError: - # try linking to support CC="gcc -flto" or icc -ipo - # struct needs to be volatile so it isn't optimized away - # additionally "clang -flto" requires the foo struct to be used - body = body.replace('struct', 'volatile struct') - body += "int main(void) { return foo.before[0]; }\n" - src, obj = cmd._compile(body, None, None, 'c') - cmd.temp_files.append("_configtest") - cmd.compiler.link_executable([obj], "_configtest") - ltype = long_double_representation(pyod("_configtest")) - return ltype - finally: - cmd._clean() - -LONG_DOUBLE_REPRESENTATION_SRC = r""" -/* "before" is 16 bytes to ensure there's no padding between it and "x". - * We're not expecting any "long double" bigger than 16 bytes or with - * alignment requirements stricter than 16 bytes. */ -typedef %(type)s test_type; - -struct { - char before[16]; - test_type x; - char after[8]; -} foo = { - { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, - -123456789.0, - { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } -}; -""" - -def pyod(filename): - """Python implementation of the od UNIX utility (od -b, more exactly). - - Parameters - ---------- - filename : str - name of the file to get the dump from. - - Returns - ------- - out : seq - list of lines of od output - - Note - ---- - We only implement enough to get the necessary information for long double - representation, this is not intended as a compatible replacement for od. - """ - def _pyod2(): - out = [] - - with open(filename, 'rb') as fid: - yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] - for i in range(0, len(yo), 16): - line = ['%07d' % int(oct(i))] - line.extend(['%03d' % c for c in yo[i:i+16]]) - out.append(" ".join(line)) - return out - - def _pyod3(): - out = [] - - with open(filename, 'rb') as fid: - yo2 = [oct(o)[2:] for o in fid.read()] - for i in range(0, len(yo2), 16): - line = ['%07d' % int(oct(i)[2:])] - line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) - out.append(" ".join(line)) - return out - - if sys.version_info[0] < 3: - return _pyod2() - else: - return _pyod3() - -_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', - '001', '043', '105', '147', '211', '253', '315', '357'] -_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] - -_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] -_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] -_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000'] -_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000', '000', '000', '000', '000'] -_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', - '242', '240', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', - '000', '000', '000', '000', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] -_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] + - ['000'] * 8) -_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] + - ['000'] * 8) - -def long_double_representation(lines): - """Given a binary dump as given by GNU od -b, look for long double - representation.""" - - # Read contains a list of 32 items, each item is a byte (in octal - # representation, as a string). We 'slide' over the output until read is of - # the form before_seq + content + after_sequence, where content is the long double - # representation: - # - content is 12 bytes: 80 bits Intel representation - # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision - # - content is 8 bytes: same as double (not implemented yet) - read = [''] * 32 - saw = None - for line in lines: - # we skip the first word, as od -b output an index at the beginning of - # each line - for w in line.split()[1:]: - read.pop(0) - read.append(w) - - # If the end of read is equal to the after_sequence, read contains - # the long double - if read[-8:] == _AFTER_SEQ: - saw = copy.copy(read) - # if the content was 12 bytes, we only have 32 - 8 - 12 = 12 - # "before" bytes. In other words the first 4 "before" bytes went - # past the sliding window. - if read[:12] == _BEFORE_SEQ[4:]: - if read[12:-8] == _INTEL_EXTENDED_12B: - return 'INTEL_EXTENDED_12_BYTES_LE' - if read[12:-8] == _MOTOROLA_EXTENDED_12B: - return 'MOTOROLA_EXTENDED_12_BYTES_BE' - # if the content was 16 bytes, we are left with 32-8-16 = 16 - # "before" bytes, so 8 went past the sliding window. - elif read[:8] == _BEFORE_SEQ[8:]: - if read[8:-8] == _INTEL_EXTENDED_16B: - return 'INTEL_EXTENDED_16_BYTES_LE' - elif read[8:-8] == _IEEE_QUAD_PREC_BE: - return 'IEEE_QUAD_BE' - elif read[8:-8] == _IEEE_QUAD_PREC_LE: - return 'IEEE_QUAD_LE' - elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE: - return 'IBM_DOUBLE_DOUBLE_LE' - elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE: - return 'IBM_DOUBLE_DOUBLE_BE' - # if the content was 8 bytes, left with 32-8-8 = 16 bytes - elif read[:16] == _BEFORE_SEQ: - if read[16:-8] == _IEEE_DOUBLE_LE: - return 'IEEE_DOUBLE_LE' - elif read[16:-8] == _IEEE_DOUBLE_BE: - return 'IEEE_DOUBLE_BE' - - if saw is not None: - raise ValueError("Unrecognized format (%s)" % saw) - else: - # We never detected the after_sequence - raise ValueError("Could not lock sequences (%s)" % saw) - - -def check_for_right_shift_internal_compiler_error(cmd): - """ - On our arm CI, this fails with an internal compilation error - - The failure looks like the following, and can be reproduced on ARM64 GCC 5.4: - - : In function 'right_shift': - :4:20: internal compiler error: in expand_shift_1, at expmed.c:2349 - ip1[i] = ip1[i] >> in2; - ^ - Please submit a full bug report, - with preprocessed source if appropriate. - See for instructions. - Compiler returned: 1 - - This function returns True if this compiler bug is present, and we need to - turn off optimization for the function - """ - cmd._check_compiler() - has_optimize = cmd.try_compile(textwrap.dedent("""\ - __attribute__((optimize("O3"))) void right_shift() {} - """), None, None) - if not has_optimize: - return False - - no_err = cmd.try_compile(textwrap.dedent("""\ - typedef long the_type; /* fails also for unsigned and long long */ - __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) { - for (int i = 0; i < n; i++) { - if (in2 < (the_type)sizeof(the_type) * 8) { - ip1[i] = ip1[i] >> in2; - } - } - } - """), None, None) - return not no_err diff --git a/venv/lib/python3.7/site-packages/numpy/core/shape_base.py b/venv/lib/python3.7/site-packages/numpy/core/shape_base.py deleted file mode 100644 index 31b1c20..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/shape_base.py +++ /dev/null @@ -1,906 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', - 'stack', 'vstack'] - -import functools -import operator -import warnings - -from . import numeric as _nx -from . import overrides -from ._asarray import array, asanyarray -from .multiarray import normalize_axis_index -from . import fromnumeric as _from_nx - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -def _atleast_1d_dispatcher(*arys): - return arys - - -@array_function_dispatch(_atleast_1d_dispatcher) -def atleast_1d(*arys): - """ - Convert inputs to arrays with at least one dimension. - - Scalar inputs are converted to 1-dimensional arrays, whilst - higher-dimensional inputs are preserved. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more input arrays. - - Returns - ------- - ret : ndarray - An array, or list of arrays, each with ``a.ndim >= 1``. - Copies are made only if necessary. - - See Also - -------- - atleast_2d, atleast_3d - - Examples - -------- - >>> np.atleast_1d(1.0) - array([1.]) - - >>> x = np.arange(9.0).reshape(3,3) - >>> np.atleast_1d(x) - array([[0., 1., 2.], - [3., 4., 5.], - [6., 7., 8.]]) - >>> np.atleast_1d(x) is x - True - - >>> np.atleast_1d(1, [3, 4]) - [array([1]), array([3, 4])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1) - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def _atleast_2d_dispatcher(*arys): - return arys - - -@array_function_dispatch(_atleast_2d_dispatcher) -def atleast_2d(*arys): - """ - View inputs as arrays with at least two dimensions. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more array-like sequences. Non-array inputs are converted - to arrays. Arrays that already have two or more dimensions are - preserved. - - Returns - ------- - res, res2, ... : ndarray - An array, or list of arrays, each with ``a.ndim >= 2``. - Copies are avoided where possible, and views with two or more - dimensions are returned. - - See Also - -------- - atleast_1d, atleast_3d - - Examples - -------- - >>> np.atleast_2d(3.0) - array([[3.]]) - - >>> x = np.arange(3.0) - >>> np.atleast_2d(x) - array([[0., 1., 2.]]) - >>> np.atleast_2d(x).base is x - True - - >>> np.atleast_2d(1, [1, 2], [[1, 2]]) - [array([[1]]), array([[1, 2]]), array([[1, 2]])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1, 1) - elif ary.ndim == 1: - result = ary[_nx.newaxis, :] - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def _atleast_3d_dispatcher(*arys): - return arys - - -@array_function_dispatch(_atleast_3d_dispatcher) -def atleast_3d(*arys): - """ - View inputs as arrays with at least three dimensions. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more array-like sequences. Non-array inputs are converted to - arrays. Arrays that already have three or more dimensions are - preserved. - - Returns - ------- - res1, res2, ... : ndarray - An array, or list of arrays, each with ``a.ndim >= 3``. Copies are - avoided where possible, and views with three or more dimensions are - returned. For example, a 1-D array of shape ``(N,)`` becomes a view - of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a - view of shape ``(M, N, 1)``. - - See Also - -------- - atleast_1d, atleast_2d - - Examples - -------- - >>> np.atleast_3d(3.0) - array([[[3.]]]) - - >>> x = np.arange(3.0) - >>> np.atleast_3d(x).shape - (1, 3, 1) - - >>> x = np.arange(12.0).reshape(4,3) - >>> np.atleast_3d(x).shape - (4, 3, 1) - >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself - True - - >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): - ... print(arr, arr.shape) # doctest: +SKIP - ... - [[[1] - [2]]] (1, 2, 1) - [[[1] - [2]]] (1, 2, 1) - [[[1 2]]] (1, 1, 2) - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1, 1, 1) - elif ary.ndim == 1: - result = ary[_nx.newaxis, :, _nx.newaxis] - elif ary.ndim == 2: - result = ary[:, :, _nx.newaxis] - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def _arrays_for_stack_dispatcher(arrays, stacklevel=4): - if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): - warnings.warn('arrays to stack must be passed as a "sequence" type ' - 'such as list or tuple. Support for non-sequence ' - 'iterables such as generators is deprecated as of ' - 'NumPy 1.16 and will raise an error in the future.', - FutureWarning, stacklevel=stacklevel) - return () - return arrays - - -def _vhstack_dispatcher(tup): - return _arrays_for_stack_dispatcher(tup) - - -@array_function_dispatch(_vhstack_dispatcher) -def vstack(tup): - """ - Stack arrays in sequence vertically (row wise). - - This is equivalent to concatenation along the first axis after 1-D arrays - of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by - `vsplit`. - - This function makes most sense for arrays with up to 3 dimensions. For - instance, for pixel-data with a height (first axis), width (second axis), - and r/g/b channels (third axis). The functions `concatenate`, `stack` and - `block` provide more general stacking and concatenation operations. - - Parameters - ---------- - tup : sequence of ndarrays - The arrays must have the same shape along all but the first axis. - 1-D arrays must have the same length. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays, will be at least 2-D. - - See Also - -------- - stack : Join a sequence of arrays along a new axis. - hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - concatenate : Join a sequence of arrays along an existing axis. - vsplit : Split array into a list of multiple sub-arrays vertically. - block : Assemble arrays from blocks. - - Examples - -------- - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.vstack((a,b)) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> a = np.array([[1], [2], [3]]) - >>> b = np.array([[2], [3], [4]]) - >>> np.vstack((a,b)) - array([[1], - [2], - [3], - [2], - [3], - [4]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(tup, stacklevel=2) - arrs = atleast_2d(*tup) - if not isinstance(arrs, list): - arrs = [arrs] - return _nx.concatenate(arrs, 0) - - -@array_function_dispatch(_vhstack_dispatcher) -def hstack(tup): - """ - Stack arrays in sequence horizontally (column wise). - - This is equivalent to concatenation along the second axis, except for 1-D - arrays where it concatenates along the first axis. Rebuilds arrays divided - by `hsplit`. - - This function makes most sense for arrays with up to 3 dimensions. For - instance, for pixel-data with a height (first axis), width (second axis), - and r/g/b channels (third axis). The functions `concatenate`, `stack` and - `block` provide more general stacking and concatenation operations. - - Parameters - ---------- - tup : sequence of ndarrays - The arrays must have the same shape along all but the second axis, - except 1-D arrays which can be any length. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - stack : Join a sequence of arrays along a new axis. - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third axis). - concatenate : Join a sequence of arrays along an existing axis. - hsplit : Split array along second axis. - block : Assemble arrays from blocks. - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.hstack((a,b)) - array([1, 2, 3, 2, 3, 4]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.hstack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(tup, stacklevel=2) - - arrs = atleast_1d(*tup) - if not isinstance(arrs, list): - arrs = [arrs] - # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" - if arrs and arrs[0].ndim == 1: - return _nx.concatenate(arrs, 0) - else: - return _nx.concatenate(arrs, 1) - - -def _stack_dispatcher(arrays, axis=None, out=None): - arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6) - if out is not None: - # optimize for the typical case where only arrays is provided - arrays = list(arrays) - arrays.append(out) - return arrays - - -@array_function_dispatch(_stack_dispatcher) -def stack(arrays, axis=0, out=None): - """ - Join a sequence of arrays along a new axis. - - The ``axis`` parameter specifies the index of the new axis in the - dimensions of the result. For example, if ``axis=0`` it will be the first - dimension and if ``axis=-1`` it will be the last dimension. - - .. versionadded:: 1.10.0 - - Parameters - ---------- - arrays : sequence of array_like - Each array must have the same shape. - - axis : int, optional - The axis in the result array along which the input arrays are stacked. - - out : ndarray, optional - If provided, the destination to place the result. The shape must be - correct, matching that of what stack would have returned if no - out argument were specified. - - Returns - ------- - stacked : ndarray - The stacked array has one more dimension than the input arrays. - - See Also - -------- - concatenate : Join a sequence of arrays along an existing axis. - split : Split array into a list of multiple sub-arrays of equal size. - block : Assemble arrays from blocks. - - Examples - -------- - >>> arrays = [np.random.randn(3, 4) for _ in range(10)] - >>> np.stack(arrays, axis=0).shape - (10, 3, 4) - - >>> np.stack(arrays, axis=1).shape - (3, 10, 4) - - >>> np.stack(arrays, axis=2).shape - (3, 4, 10) - - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.stack((a, b)) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> np.stack((a, b), axis=-1) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(arrays, stacklevel=2) - - arrays = [asanyarray(arr) for arr in arrays] - if not arrays: - raise ValueError('need at least one array to stack') - - shapes = {arr.shape for arr in arrays} - if len(shapes) != 1: - raise ValueError('all input arrays must have the same shape') - - result_ndim = arrays[0].ndim + 1 - axis = normalize_axis_index(axis, result_ndim) - - sl = (slice(None),) * axis + (_nx.newaxis,) - expanded_arrays = [arr[sl] for arr in arrays] - return _nx.concatenate(expanded_arrays, axis=axis, out=out) - - -# Internal functions to eliminate the overhead of repeated dispatch in one of -# the two possible paths inside np.block. -# Use getattr to protect against __array_function__ being disabled. -_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) -_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) -_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate) - - -def _block_format_index(index): - """ - Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. - """ - idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) - return 'arrays' + idx_str - - -def _block_check_depths_match(arrays, parent_index=[]): - """ - Recursive function checking that the depths of nested lists in `arrays` - all match. Mismatch raises a ValueError as described in the block - docstring below. - - The entire index (rather than just the depth) needs to be calculated - for each innermost list, in case an error needs to be raised, so that - the index of the offending list can be printed as part of the error. - - Parameters - ---------- - arrays : nested list of arrays - The arrays to check - parent_index : list of int - The full index of `arrays` within the nested lists passed to - `_block_check_depths_match` at the top of the recursion. - - Returns - ------- - first_index : list of int - The full index of an element from the bottom of the nesting in - `arrays`. If any element at the bottom is an empty list, this will - refer to it, and the last index along the empty axis will be None. - max_arr_ndim : int - The maximum of the ndims of the arrays nested in `arrays`. - final_size: int - The number of elements in the final array. This is used the motivate - the choice of algorithm used using benchmarking wisdom. - - """ - if type(arrays) is tuple: - # not strictly necessary, but saves us from: - # - more than one way to do things - no point treating tuples like - # lists - # - horribly confusing behaviour that results when tuples are - # treated like ndarray - raise TypeError( - '{} is a tuple. ' - 'Only lists can be used to arrange blocks, and np.block does ' - 'not allow implicit conversion from tuple to ndarray.'.format( - _block_format_index(parent_index) - ) - ) - elif type(arrays) is list and len(arrays) > 0: - idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) - for i, arr in enumerate(arrays)) - - first_index, max_arr_ndim, final_size = next(idxs_ndims) - for index, ndim, size in idxs_ndims: - final_size += size - if ndim > max_arr_ndim: - max_arr_ndim = ndim - if len(index) != len(first_index): - raise ValueError( - "List depths are mismatched. First element was at depth " - "{}, but there is an element at depth {} ({})".format( - len(first_index), - len(index), - _block_format_index(index) - ) - ) - # propagate our flag that indicates an empty list at the bottom - if index[-1] is None: - first_index = index - - return first_index, max_arr_ndim, final_size - elif type(arrays) is list and len(arrays) == 0: - # We've 'bottomed out' on an empty list - return parent_index + [None], 0, 0 - else: - # We've 'bottomed out' - arrays is either a scalar or an array - size = _size(arrays) - return parent_index, _ndim(arrays), size - - -def _atleast_nd(a, ndim): - # Ensures `a` has at least `ndim` dimensions by prepending - # ones to `a.shape` as necessary - return array(a, ndmin=ndim, copy=False, subok=True) - - -def _accumulate(values): - # Helper function because Python 2.7 doesn't have - # itertools.accumulate - value = 0 - accumulated = [] - for v in values: - value += v - accumulated.append(value) - return accumulated - - -def _concatenate_shapes(shapes, axis): - """Given array shapes, return the resulting shape and slices prefixes. - - These help in nested concatation. - Returns - ------- - shape: tuple of int - This tuple satisfies: - ``` - shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) - shape == concatenate(arrs, axis).shape - ``` - - slice_prefixes: tuple of (slice(start, end), ) - For a list of arrays being concatenated, this returns the slice - in the larger array at axis that needs to be sliced into. - - For example, the following holds: - ``` - ret = concatenate([a, b, c], axis) - _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) - - ret[(slice(None),) * axis + sl_a] == a - ret[(slice(None),) * axis + sl_b] == b - ret[(slice(None),) * axis + sl_c] == c - ``` - - These are called slice prefixes since they are used in the recursive - blocking algorithm to compute the left-most slices during the - recursion. Therefore, they must be prepended to rest of the slice - that was computed deeper in the recursion. - - These are returned as tuples to ensure that they can quickly be added - to existing slice tuple without creating a new tuple every time. - - """ - # Cache a result that will be reused. - shape_at_axis = [shape[axis] for shape in shapes] - - # Take a shape, any shape - first_shape = shapes[0] - first_shape_pre = first_shape[:axis] - first_shape_post = first_shape[axis+1:] - - if any(shape[:axis] != first_shape_pre or - shape[axis+1:] != first_shape_post for shape in shapes): - raise ValueError( - 'Mismatched array shapes in block along axis {}.'.format(axis)) - - shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) - - offsets_at_axis = _accumulate(shape_at_axis) - slice_prefixes = [(slice(start, end),) - for start, end in zip([0] + offsets_at_axis, - offsets_at_axis)] - return shape, slice_prefixes - - -def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): - """ - Returns the shape of the final array, along with a list - of slices and a list of arrays that can be used for assignment inside the - new array - - Parameters - ---------- - arrays : nested list of arrays - The arrays to check - max_depth : list of int - The number of nested lists - result_ndim: int - The number of dimensions in thefinal array. - - Returns - ------- - shape : tuple of int - The shape that the final array will take on. - slices: list of tuple of slices - The slices into the full array required for assignment. These are - required to be prepended with ``(Ellipsis, )`` to obtain to correct - final index. - arrays: list of ndarray - The data to assign to each slice of the full array - - """ - if depth < max_depth: - shapes, slices, arrays = zip( - *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) - for arr in arrays]) - - axis = result_ndim - max_depth + depth - shape, slice_prefixes = _concatenate_shapes(shapes, axis) - - # Prepend the slice prefix and flatten the slices - slices = [slice_prefix + the_slice - for slice_prefix, inner_slices in zip(slice_prefixes, slices) - for the_slice in inner_slices] - - # Flatten the array list - arrays = functools.reduce(operator.add, arrays) - - return shape, slices, arrays - else: - # We've 'bottomed out' - arrays is either a scalar or an array - # type(arrays) is not list - # Return the slice and the array inside a list to be consistent with - # the recursive case. - arr = _atleast_nd(arrays, result_ndim) - return arr.shape, [()], [arr] - - -def _block(arrays, max_depth, result_ndim, depth=0): - """ - Internal implementation of block based on repeated concatenation. - `arrays` is the argument passed to - block. `max_depth` is the depth of nested lists within `arrays` and - `result_ndim` is the greatest of the dimensions of the arrays in - `arrays` and the depth of the lists in `arrays` (see block docstring - for details). - """ - if depth < max_depth: - arrs = [_block(arr, max_depth, result_ndim, depth+1) - for arr in arrays] - return _concatenate(arrs, axis=-(max_depth-depth)) - else: - # We've 'bottomed out' - arrays is either a scalar or an array - # type(arrays) is not list - return _atleast_nd(arrays, result_ndim) - - -def _block_dispatcher(arrays): - # Use type(...) is list to match the behavior of np.block(), which special - # cases list specifically rather than allowing for generic iterables or - # tuple. Also, we know that list.__array_function__ will never exist. - if type(arrays) is list: - for subarrays in arrays: - for subarray in _block_dispatcher(subarrays): - yield subarray - else: - yield arrays - - -@array_function_dispatch(_block_dispatcher) -def block(arrays): - """ - Assemble an nd-array from nested lists of blocks. - - Blocks in the innermost lists are concatenated (see `concatenate`) along - the last dimension (-1), then these are concatenated along the - second-last dimension (-2), and so on until the outermost list is reached. - - Blocks can be of any dimension, but will not be broadcasted using the normal - rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` - the same for all blocks. This is primarily useful for working with scalars, - and means that code like ``np.block([v, 1])`` is valid, where - ``v.ndim == 1``. - - When the nested list is two levels deep, this allows block matrices to be - constructed from their components. - - .. versionadded:: 1.13.0 - - Parameters - ---------- - arrays : nested list of array_like or scalars (but not tuples) - If passed a single ndarray or scalar (a nested list of depth 0), this - is returned unmodified (and not copied). - - Elements shapes must match along the appropriate axes (without - broadcasting), but leading 1s will be prepended to the shape as - necessary to make the dimensions match. - - Returns - ------- - block_array : ndarray - The array assembled from the given blocks. - - The dimensionality of the output is equal to the greatest of: - * the dimensionality of all the inputs - * the depth to which the input list is nested - - Raises - ------ - ValueError - * If list depths are mismatched - for instance, ``[[a, b], c]`` is - illegal, and should be spelt ``[[a, b], [c]]`` - * If lists are empty - for instance, ``[[a, b], []]`` - - See Also - -------- - concatenate : Join a sequence of arrays together. - stack : Stack arrays in sequence along a new dimension. - hstack : Stack arrays in sequence horizontally (column wise). - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - vsplit : Split array into a list of multiple sub-arrays vertically. - - Notes - ----- - - When called with only scalars, ``np.block`` is equivalent to an ndarray - call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to - ``np.array([[1, 2], [3, 4]])``. - - This function does not enforce that the blocks lie on a fixed grid. - ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: - - AAAbb - AAAbb - cccDD - - But is also allowed to produce, for some ``a, b, c, d``:: - - AAAbb - AAAbb - cDDDD - - Since concatenation happens along the last axis first, `block` is _not_ - capable of producing the following directly:: - - AAAbb - cccbb - cccDD - - Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is - equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. - - Examples - -------- - The most common use of this function is to build a block matrix - - >>> A = np.eye(2) * 2 - >>> B = np.eye(3) * 3 - >>> np.block([ - ... [A, np.zeros((2, 3))], - ... [np.ones((3, 2)), B ] - ... ]) - array([[2., 0., 0., 0., 0.], - [0., 2., 0., 0., 0.], - [1., 1., 3., 0., 0.], - [1., 1., 0., 3., 0.], - [1., 1., 0., 0., 3.]]) - - With a list of depth 1, `block` can be used as `hstack` - - >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) - array([1, 2, 3]) - - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.block([a, b, 10]) # hstack([a, b, 10]) - array([ 1, 2, 3, 2, 3, 4, 10]) - - >>> A = np.ones((2, 2), int) - >>> B = 2 * A - >>> np.block([A, B]) # hstack([A, B]) - array([[1, 1, 2, 2], - [1, 1, 2, 2]]) - - With a list of depth 2, `block` can be used in place of `vstack`: - - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.block([[a], [b]]) # vstack([a, b]) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> A = np.ones((2, 2), int) - >>> B = 2 * A - >>> np.block([[A], [B]]) # vstack([A, B]) - array([[1, 1], - [1, 1], - [2, 2], - [2, 2]]) - - It can also be used in places of `atleast_1d` and `atleast_2d` - - >>> a = np.array(0) - >>> b = np.array([1]) - >>> np.block([a]) # atleast_1d(a) - array([0]) - >>> np.block([b]) # atleast_1d(b) - array([1]) - - >>> np.block([[a]]) # atleast_2d(a) - array([[0]]) - >>> np.block([[b]]) # atleast_2d(b) - array([[1]]) - - - """ - arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) - - # It was found through benchmarking that making an array of final size - # around 256x256 was faster by straight concatenation on a - # i7-7700HQ processor and dual channel ram 2400MHz. - # It didn't seem to matter heavily on the dtype used. - # - # A 2D array using repeated concatenation requires 2 copies of the array. - # - # The fastest algorithm will depend on the ratio of CPU power to memory - # speed. - # One can monitor the results of the benchmark - # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d - # to tune this parameter until a C version of the `_block_info_recursion` - # algorithm is implemented which would likely be faster than the python - # version. - if list_ndim * final_size > (2 * 512 * 512): - return _block_slicing(arrays, list_ndim, result_ndim) - else: - return _block_concatenate(arrays, list_ndim, result_ndim) - - -# These helper functions are mostly used for testing. -# They allow us to write tests that directly call `_block_slicing` -# or `_block_concatenate` without blocking large arrays to force the wisdom -# to trigger the desired path. -def _block_setup(arrays): - """ - Returns - (`arrays`, list_ndim, result_ndim, final_size) - """ - bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) - list_ndim = len(bottom_index) - if bottom_index and bottom_index[-1] is None: - raise ValueError( - 'List at {} cannot be empty'.format( - _block_format_index(bottom_index) - ) - ) - result_ndim = max(arr_ndim, list_ndim) - return arrays, list_ndim, result_ndim, final_size - - -def _block_slicing(arrays, list_ndim, result_ndim): - shape, slices, arrays = _block_info_recursion( - arrays, list_ndim, result_ndim) - dtype = _nx.result_type(*[arr.dtype for arr in arrays]) - - # Test preferring F only in the case that all input arrays are F - F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) - C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) - order = 'F' if F_order and not C_order else 'C' - result = _nx.empty(shape=shape, dtype=dtype, order=order) - # Note: In a c implementation, the function - # PyArray_CreateMultiSortedStridePerm could be used for more advanced - # guessing of the desired order. - - for the_slice, arr in zip(slices, arrays): - result[(Ellipsis,) + the_slice] = arr - return result - - -def _block_concatenate(arrays, list_ndim, result_ndim): - result = _block(arrays, list_ndim, result_ndim) - if list_ndim == 0: - # Catch an edge case where _block returns a view because - # `arrays` is a single numpy array and not a list of numpy arrays. - # This might copy scalars or lists twice, but this isn't a likely - # usecase for those interested in performance - result = result.copy() - return result diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/core/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/_locales.py b/venv/lib/python3.7/site-packages/numpy/core/tests/_locales.py deleted file mode 100644 index 52e4ff3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/_locales.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Provide class for testing in French locale - -""" -from __future__ import division, absolute_import, print_function - -import sys -import locale - -import pytest - -__ALL__ = ['CommaDecimalPointLocale'] - - -def find_comma_decimal_point_locale(): - """See if platform has a decimal point as comma locale. - - Find a locale that uses a comma instead of a period as the - decimal point. - - Returns - ------- - old_locale: str - Locale when the function was called. - new_locale: {str, None) - First French locale found, None if none found. - - """ - if sys.platform == 'win32': - locales = ['FRENCH'] - else: - locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] - - old_locale = locale.getlocale(locale.LC_NUMERIC) - new_locale = None - try: - for loc in locales: - try: - locale.setlocale(locale.LC_NUMERIC, loc) - new_locale = loc - break - except locale.Error: - pass - finally: - locale.setlocale(locale.LC_NUMERIC, locale=old_locale) - return old_locale, new_locale - - -class CommaDecimalPointLocale(object): - """Sets LC_NUMERIC to a locale with comma as decimal point. - - Classes derived from this class have setup and teardown methods that run - tests with locale.LC_NUMERIC set to a locale where commas (',') are used as - the decimal point instead of periods ('.'). On exit the locale is restored - to the initial locale. It also serves as context manager with the same - effect. If no such locale is available, the test is skipped. - - .. versionadded:: 1.15.0 - - """ - (cur_locale, tst_locale) = find_comma_decimal_point_locale() - - def setup(self): - if self.tst_locale is None: - pytest.skip("No French locale available") - locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) - - def teardown(self): - locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) - - def __enter__(self): - if self.tst_locale is None: - pytest.skip("No French locale available") - locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) - - def __exit__(self, type, value, traceback): - locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/astype_copy.pkl b/venv/lib/python3.7/site-packages/numpy/core/tests/data/astype_copy.pkl deleted file mode 100644 index 7397c97..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/tests/data/astype_copy.pkl and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/recarray_from_file.fits b/venv/lib/python3.7/site-packages/numpy/core/tests/data/recarray_from_file.fits deleted file mode 100644 index ca48ee8..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/tests/data/recarray_from_file.fits and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-README b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-README deleted file mode 100644 index 6561ca3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-README +++ /dev/null @@ -1,15 +0,0 @@ -Steps to validate transcendental functions: -1) Add a file 'umath-validation-set-', where ufuncname is name of - the function in NumPy you want to validate -2) The file should contain 4 columns: dtype,input,expected output,ulperror - a. dtype: one of np.float16, np.float32, np.float64 - b. input: floating point input to ufunc in hex. Example: 0x414570a4 - represents 12.340000152587890625 - c. expected output: floating point output for the corresponding input in hex. - This should be computed using a high(er) precision library and then rounded to - same format as the input. - d. ulperror: expected maximum ulp error of the function. This - should be same across all rows of the same dtype. Otherwise, the function is - tested for the maximum ulp error among all entries of that dtype. -3) Add file umath-validation-set- to the test file test_umath_accuracy.py - which will then validate your ufunc. diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-cos b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-cos deleted file mode 100644 index 360ebcd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-cos +++ /dev/null @@ -1,707 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0x3f800000,2 -np.float32,0x007b2490,0x3f800000,2 -np.float32,0x007c99fa,0x3f800000,2 -np.float32,0x00734a0c,0x3f800000,2 -np.float32,0x0070de24,0x3f800000,2 -np.float32,0x007fffff,0x3f800000,2 -np.float32,0x00000001,0x3f800000,2 -## -ve denormals ## -np.float32,0x80495d65,0x3f800000,2 -np.float32,0x806894f6,0x3f800000,2 -np.float32,0x80555a76,0x3f800000,2 -np.float32,0x804e1fb8,0x3f800000,2 -np.float32,0x80687de9,0x3f800000,2 -np.float32,0x807fffff,0x3f800000,2 -np.float32,0x80000001,0x3f800000,2 -## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## -np.float32,0x00000000,0x3f800000,2 -np.float32,0x80000000,0x3f800000,2 -np.float32,0x00800000,0x3f800000,2 -np.float32,0x7f7fffff,0x3f5a5f96,2 -np.float32,0x80800000,0x3f800000,2 -np.float32,0xff7fffff,0x3f5a5f96,2 -## 1.00f + 0x00000001 ## -np.float32,0x3f800000,0x3f0a5140,2 -np.float32,0x3f800001,0x3f0a513f,2 -np.float32,0x3f800002,0x3f0a513d,2 -np.float32,0xc090a8b0,0xbe4332ce,2 -np.float32,0x41ce3184,0x3f4d1de1,2 -np.float32,0xc1d85848,0xbeaa8980,2 -np.float32,0x402b8820,0xbf653aa3,2 -np.float32,0x42b4e454,0xbf4a338b,2 -np.float32,0x42a67a60,0x3c58202e,2 -np.float32,0x41d92388,0xbed987c7,2 -np.float32,0x422dd66c,0x3f5dcab3,2 -np.float32,0xc28f5be6,0xbf5688d8,2 -np.float32,0x41ab2674,0xbf53aa3b,2 -np.float32,0xd0102756,0x3f45d12d,2 -np.float32,0xcf99405e,0xbe9cf281,2 -np.float32,0xcfd83a12,0x3eaae4ca,2 -np.float32,0x4fb54db0,0xbf7b2894,2 -np.float32,0xcfcca29d,0x3f752e4e,2 -np.float32,0xceec2ac0,0xbf745303,2 -np.float32,0xcfdca97f,0x3ef554a7,2 -np.float32,0xcfe92b0a,0x3f4618f2,2 -np.float32,0x5014b0eb,0x3ee933e6,2 -np.float32,0xcfa7ee96,0xbeedeeb2,2 -np.float32,0x754c09a0,0xbef298de,2 -np.float32,0x77a731fb,0x3f24599f,2 -np.float32,0x76de2494,0x3f79576c,2 -np.float32,0xf74920dc,0xbf4d196e,2 -np.float32,0x7707a312,0xbeb5cb8e,2 -np.float32,0x75bf9790,0xbf7fd7fe,2 -np.float32,0xf4ca7c40,0xbe15107d,2 -np.float32,0x77e91899,0xbe8a968b,2 -np.float32,0xf74c9820,0xbf7f9677,2 -np.float32,0x7785ca29,0xbe6ef93b,2 -np.float32,0x3f490fdb,0x3f3504f3,2 -np.float32,0xbf490fdb,0x3f3504f3,2 -np.float32,0x3fc90fdb,0xb33bbd2e,2 -np.float32,0xbfc90fdb,0xb33bbd2e,2 -np.float32,0x40490fdb,0xbf800000,2 -np.float32,0xc0490fdb,0xbf800000,2 -np.float32,0x3fc90fdb,0xb33bbd2e,2 -np.float32,0xbfc90fdb,0xb33bbd2e,2 -np.float32,0x40490fdb,0xbf800000,2 -np.float32,0xc0490fdb,0xbf800000,2 -np.float32,0x40c90fdb,0x3f800000,2 -np.float32,0xc0c90fdb,0x3f800000,2 -np.float32,0x4016cbe4,0xbf3504f3,2 -np.float32,0xc016cbe4,0xbf3504f3,2 -np.float32,0x4096cbe4,0x324cde2e,2 -np.float32,0xc096cbe4,0x324cde2e,2 -np.float32,0x4116cbe4,0xbf800000,2 -np.float32,0xc116cbe4,0xbf800000,2 -np.float32,0x40490fdb,0xbf800000,2 -np.float32,0xc0490fdb,0xbf800000,2 -np.float32,0x40c90fdb,0x3f800000,2 -np.float32,0xc0c90fdb,0x3f800000,2 -np.float32,0x41490fdb,0x3f800000,2 -np.float32,0xc1490fdb,0x3f800000,2 -np.float32,0x407b53d2,0xbf3504f1,2 -np.float32,0xc07b53d2,0xbf3504f1,2 -np.float32,0x40fb53d2,0xb4b5563d,2 -np.float32,0xc0fb53d2,0xb4b5563d,2 -np.float32,0x417b53d2,0xbf800000,2 -np.float32,0xc17b53d2,0xbf800000,2 -np.float32,0x4096cbe4,0x324cde2e,2 -np.float32,0xc096cbe4,0x324cde2e,2 -np.float32,0x4116cbe4,0xbf800000,2 -np.float32,0xc116cbe4,0xbf800000,2 -np.float32,0x4196cbe4,0x3f800000,2 -np.float32,0xc196cbe4,0x3f800000,2 -np.float32,0x40afede0,0x3f3504f7,2 -np.float32,0xc0afede0,0x3f3504f7,2 -np.float32,0x412fede0,0x353222c4,2 -np.float32,0xc12fede0,0x353222c4,2 -np.float32,0x41afede0,0xbf800000,2 -np.float32,0xc1afede0,0xbf800000,2 -np.float32,0x40c90fdb,0x3f800000,2 -np.float32,0xc0c90fdb,0x3f800000,2 -np.float32,0x41490fdb,0x3f800000,2 -np.float32,0xc1490fdb,0x3f800000,2 -np.float32,0x41c90fdb,0x3f800000,2 -np.float32,0xc1c90fdb,0x3f800000,2 -np.float32,0x40e231d6,0x3f3504f3,2 -np.float32,0xc0e231d6,0x3f3504f3,2 -np.float32,0x416231d6,0xb319a6a2,2 -np.float32,0xc16231d6,0xb319a6a2,2 -np.float32,0x41e231d6,0xbf800000,2 -np.float32,0xc1e231d6,0xbf800000,2 -np.float32,0x40fb53d2,0xb4b5563d,2 -np.float32,0xc0fb53d2,0xb4b5563d,2 -np.float32,0x417b53d2,0xbf800000,2 -np.float32,0xc17b53d2,0xbf800000,2 -np.float32,0x41fb53d2,0x3f800000,2 -np.float32,0xc1fb53d2,0x3f800000,2 -np.float32,0x410a3ae7,0xbf3504fb,2 -np.float32,0xc10a3ae7,0xbf3504fb,2 -np.float32,0x418a3ae7,0x35b08908,2 -np.float32,0xc18a3ae7,0x35b08908,2 -np.float32,0x420a3ae7,0xbf800000,2 -np.float32,0xc20a3ae7,0xbf800000,2 -np.float32,0x4116cbe4,0xbf800000,2 -np.float32,0xc116cbe4,0xbf800000,2 -np.float32,0x4196cbe4,0x3f800000,2 -np.float32,0xc196cbe4,0x3f800000,2 -np.float32,0x4216cbe4,0x3f800000,2 -np.float32,0xc216cbe4,0x3f800000,2 -np.float32,0x41235ce2,0xbf3504ef,2 -np.float32,0xc1235ce2,0xbf3504ef,2 -np.float32,0x41a35ce2,0xb53889b6,2 -np.float32,0xc1a35ce2,0xb53889b6,2 -np.float32,0x42235ce2,0xbf800000,2 -np.float32,0xc2235ce2,0xbf800000,2 -np.float32,0x412fede0,0x353222c4,2 -np.float32,0xc12fede0,0x353222c4,2 -np.float32,0x41afede0,0xbf800000,2 -np.float32,0xc1afede0,0xbf800000,2 -np.float32,0x422fede0,0x3f800000,2 -np.float32,0xc22fede0,0x3f800000,2 -np.float32,0x413c7edd,0x3f3504f4,2 -np.float32,0xc13c7edd,0x3f3504f4,2 -np.float32,0x41bc7edd,0x33800add,2 -np.float32,0xc1bc7edd,0x33800add,2 -np.float32,0x423c7edd,0xbf800000,2 -np.float32,0xc23c7edd,0xbf800000,2 -np.float32,0x41490fdb,0x3f800000,2 -np.float32,0xc1490fdb,0x3f800000,2 -np.float32,0x41c90fdb,0x3f800000,2 -np.float32,0xc1c90fdb,0x3f800000,2 -np.float32,0x42490fdb,0x3f800000,2 -np.float32,0xc2490fdb,0x3f800000,2 -np.float32,0x4155a0d9,0x3f3504eb,2 -np.float32,0xc155a0d9,0x3f3504eb,2 -np.float32,0x41d5a0d9,0xb5b3bc81,2 -np.float32,0xc1d5a0d9,0xb5b3bc81,2 -np.float32,0x4255a0d9,0xbf800000,2 -np.float32,0xc255a0d9,0xbf800000,2 -np.float32,0x416231d6,0xb319a6a2,2 -np.float32,0xc16231d6,0xb319a6a2,2 -np.float32,0x41e231d6,0xbf800000,2 -np.float32,0xc1e231d6,0xbf800000,2 -np.float32,0x426231d6,0x3f800000,2 -np.float32,0xc26231d6,0x3f800000,2 -np.float32,0x416ec2d4,0xbf3504f7,2 -np.float32,0xc16ec2d4,0xbf3504f7,2 -np.float32,0x41eec2d4,0x353ef0a7,2 -np.float32,0xc1eec2d4,0x353ef0a7,2 -np.float32,0x426ec2d4,0xbf800000,2 -np.float32,0xc26ec2d4,0xbf800000,2 -np.float32,0x417b53d2,0xbf800000,2 -np.float32,0xc17b53d2,0xbf800000,2 -np.float32,0x41fb53d2,0x3f800000,2 -np.float32,0xc1fb53d2,0x3f800000,2 -np.float32,0x427b53d2,0x3f800000,2 -np.float32,0xc27b53d2,0x3f800000,2 -np.float32,0x4183f268,0xbf3504e7,2 -np.float32,0xc183f268,0xbf3504e7,2 -np.float32,0x4203f268,0xb6059a13,2 -np.float32,0xc203f268,0xb6059a13,2 -np.float32,0x4283f268,0xbf800000,2 -np.float32,0xc283f268,0xbf800000,2 -np.float32,0x418a3ae7,0x35b08908,2 -np.float32,0xc18a3ae7,0x35b08908,2 -np.float32,0x420a3ae7,0xbf800000,2 -np.float32,0xc20a3ae7,0xbf800000,2 -np.float32,0x428a3ae7,0x3f800000,2 -np.float32,0xc28a3ae7,0x3f800000,2 -np.float32,0x41908365,0x3f3504f0,2 -np.float32,0xc1908365,0x3f3504f0,2 -np.float32,0x42108365,0xb512200d,2 -np.float32,0xc2108365,0xb512200d,2 -np.float32,0x42908365,0xbf800000,2 -np.float32,0xc2908365,0xbf800000,2 -np.float32,0x4196cbe4,0x3f800000,2 -np.float32,0xc196cbe4,0x3f800000,2 -np.float32,0x4216cbe4,0x3f800000,2 -np.float32,0xc216cbe4,0x3f800000,2 -np.float32,0x4296cbe4,0x3f800000,2 -np.float32,0xc296cbe4,0x3f800000,2 -np.float32,0x419d1463,0x3f3504ef,2 -np.float32,0xc19d1463,0x3f3504ef,2 -np.float32,0x421d1463,0xb5455799,2 -np.float32,0xc21d1463,0xb5455799,2 -np.float32,0x429d1463,0xbf800000,2 -np.float32,0xc29d1463,0xbf800000,2 -np.float32,0x41a35ce2,0xb53889b6,2 -np.float32,0xc1a35ce2,0xb53889b6,2 -np.float32,0x42235ce2,0xbf800000,2 -np.float32,0xc2235ce2,0xbf800000,2 -np.float32,0x42a35ce2,0x3f800000,2 -np.float32,0xc2a35ce2,0x3f800000,2 -np.float32,0x41a9a561,0xbf3504ff,2 -np.float32,0xc1a9a561,0xbf3504ff,2 -np.float32,0x4229a561,0x360733d0,2 -np.float32,0xc229a561,0x360733d0,2 -np.float32,0x42a9a561,0xbf800000,2 -np.float32,0xc2a9a561,0xbf800000,2 -np.float32,0x41afede0,0xbf800000,2 -np.float32,0xc1afede0,0xbf800000,2 -np.float32,0x422fede0,0x3f800000,2 -np.float32,0xc22fede0,0x3f800000,2 -np.float32,0x42afede0,0x3f800000,2 -np.float32,0xc2afede0,0x3f800000,2 -np.float32,0x41b6365e,0xbf3504f6,2 -np.float32,0xc1b6365e,0xbf3504f6,2 -np.float32,0x4236365e,0x350bb91c,2 -np.float32,0xc236365e,0x350bb91c,2 -np.float32,0x42b6365e,0xbf800000,2 -np.float32,0xc2b6365e,0xbf800000,2 -np.float32,0x41bc7edd,0x33800add,2 -np.float32,0xc1bc7edd,0x33800add,2 -np.float32,0x423c7edd,0xbf800000,2 -np.float32,0xc23c7edd,0xbf800000,2 -np.float32,0x42bc7edd,0x3f800000,2 -np.float32,0xc2bc7edd,0x3f800000,2 -np.float32,0x41c2c75c,0x3f3504f8,2 -np.float32,0xc1c2c75c,0x3f3504f8,2 -np.float32,0x4242c75c,0x354bbe8a,2 -np.float32,0xc242c75c,0x354bbe8a,2 -np.float32,0x42c2c75c,0xbf800000,2 -np.float32,0xc2c2c75c,0xbf800000,2 -np.float32,0x41c90fdb,0x3f800000,2 -np.float32,0xc1c90fdb,0x3f800000,2 -np.float32,0x42490fdb,0x3f800000,2 -np.float32,0xc2490fdb,0x3f800000,2 -np.float32,0x42c90fdb,0x3f800000,2 -np.float32,0xc2c90fdb,0x3f800000,2 -np.float32,0x41cf585a,0x3f3504e7,2 -np.float32,0xc1cf585a,0x3f3504e7,2 -np.float32,0x424f585a,0xb608cd8c,2 -np.float32,0xc24f585a,0xb608cd8c,2 -np.float32,0x42cf585a,0xbf800000,2 -np.float32,0xc2cf585a,0xbf800000,2 -np.float32,0x41d5a0d9,0xb5b3bc81,2 -np.float32,0xc1d5a0d9,0xb5b3bc81,2 -np.float32,0x4255a0d9,0xbf800000,2 -np.float32,0xc255a0d9,0xbf800000,2 -np.float32,0x42d5a0d9,0x3f800000,2 -np.float32,0xc2d5a0d9,0x3f800000,2 -np.float32,0x41dbe958,0xbf350507,2 -np.float32,0xc1dbe958,0xbf350507,2 -np.float32,0x425be958,0x365eab75,2 -np.float32,0xc25be958,0x365eab75,2 -np.float32,0x42dbe958,0xbf800000,2 -np.float32,0xc2dbe958,0xbf800000,2 -np.float32,0x41e231d6,0xbf800000,2 -np.float32,0xc1e231d6,0xbf800000,2 -np.float32,0x426231d6,0x3f800000,2 -np.float32,0xc26231d6,0x3f800000,2 -np.float32,0x42e231d6,0x3f800000,2 -np.float32,0xc2e231d6,0x3f800000,2 -np.float32,0x41e87a55,0xbf3504ef,2 -np.float32,0xc1e87a55,0xbf3504ef,2 -np.float32,0x42687a55,0xb552257b,2 -np.float32,0xc2687a55,0xb552257b,2 -np.float32,0x42e87a55,0xbf800000,2 -np.float32,0xc2e87a55,0xbf800000,2 -np.float32,0x41eec2d4,0x353ef0a7,2 -np.float32,0xc1eec2d4,0x353ef0a7,2 -np.float32,0x426ec2d4,0xbf800000,2 -np.float32,0xc26ec2d4,0xbf800000,2 -np.float32,0x42eec2d4,0x3f800000,2 -np.float32,0xc2eec2d4,0x3f800000,2 -np.float32,0x41f50b53,0x3f3504ff,2 -np.float32,0xc1f50b53,0x3f3504ff,2 -np.float32,0x42750b53,0x360a6748,2 -np.float32,0xc2750b53,0x360a6748,2 -np.float32,0x42f50b53,0xbf800000,2 -np.float32,0xc2f50b53,0xbf800000,2 -np.float32,0x41fb53d2,0x3f800000,2 -np.float32,0xc1fb53d2,0x3f800000,2 -np.float32,0x427b53d2,0x3f800000,2 -np.float32,0xc27b53d2,0x3f800000,2 -np.float32,0x42fb53d2,0x3f800000,2 -np.float32,0xc2fb53d2,0x3f800000,2 -np.float32,0x4200ce28,0x3f3504f6,2 -np.float32,0xc200ce28,0x3f3504f6,2 -np.float32,0x4280ce28,0x34fdd672,2 -np.float32,0xc280ce28,0x34fdd672,2 -np.float32,0x4300ce28,0xbf800000,2 -np.float32,0xc300ce28,0xbf800000,2 -np.float32,0x4203f268,0xb6059a13,2 -np.float32,0xc203f268,0xb6059a13,2 -np.float32,0x4283f268,0xbf800000,2 -np.float32,0xc283f268,0xbf800000,2 -np.float32,0x4303f268,0x3f800000,2 -np.float32,0xc303f268,0x3f800000,2 -np.float32,0x420716a7,0xbf3504f8,2 -np.float32,0xc20716a7,0xbf3504f8,2 -np.float32,0x428716a7,0x35588c6d,2 -np.float32,0xc28716a7,0x35588c6d,2 -np.float32,0x430716a7,0xbf800000,2 -np.float32,0xc30716a7,0xbf800000,2 -np.float32,0x420a3ae7,0xbf800000,2 -np.float32,0xc20a3ae7,0xbf800000,2 -np.float32,0x428a3ae7,0x3f800000,2 -np.float32,0xc28a3ae7,0x3f800000,2 -np.float32,0x430a3ae7,0x3f800000,2 -np.float32,0xc30a3ae7,0x3f800000,2 -np.float32,0x420d5f26,0xbf3504e7,2 -np.float32,0xc20d5f26,0xbf3504e7,2 -np.float32,0x428d5f26,0xb60c0105,2 -np.float32,0xc28d5f26,0xb60c0105,2 -np.float32,0x430d5f26,0xbf800000,2 -np.float32,0xc30d5f26,0xbf800000,2 -np.float32,0x42108365,0xb512200d,2 -np.float32,0xc2108365,0xb512200d,2 -np.float32,0x42908365,0xbf800000,2 -np.float32,0xc2908365,0xbf800000,2 -np.float32,0x43108365,0x3f800000,2 -np.float32,0xc3108365,0x3f800000,2 -np.float32,0x4213a7a5,0x3f350507,2 -np.float32,0xc213a7a5,0x3f350507,2 -np.float32,0x4293a7a5,0x3661deee,2 -np.float32,0xc293a7a5,0x3661deee,2 -np.float32,0x4313a7a5,0xbf800000,2 -np.float32,0xc313a7a5,0xbf800000,2 -np.float32,0x4216cbe4,0x3f800000,2 -np.float32,0xc216cbe4,0x3f800000,2 -np.float32,0x4296cbe4,0x3f800000,2 -np.float32,0xc296cbe4,0x3f800000,2 -np.float32,0x4316cbe4,0x3f800000,2 -np.float32,0xc316cbe4,0x3f800000,2 -np.float32,0x4219f024,0x3f3504d8,2 -np.float32,0xc219f024,0x3f3504d8,2 -np.float32,0x4299f024,0xb69bde6c,2 -np.float32,0xc299f024,0xb69bde6c,2 -np.float32,0x4319f024,0xbf800000,2 -np.float32,0xc319f024,0xbf800000,2 -np.float32,0x421d1463,0xb5455799,2 -np.float32,0xc21d1463,0xb5455799,2 -np.float32,0x429d1463,0xbf800000,2 -np.float32,0xc29d1463,0xbf800000,2 -np.float32,0x431d1463,0x3f800000,2 -np.float32,0xc31d1463,0x3f800000,2 -np.float32,0x422038a3,0xbf350516,2 -np.float32,0xc22038a3,0xbf350516,2 -np.float32,0x42a038a3,0x36c6cd61,2 -np.float32,0xc2a038a3,0x36c6cd61,2 -np.float32,0x432038a3,0xbf800000,2 -np.float32,0xc32038a3,0xbf800000,2 -np.float32,0x42235ce2,0xbf800000,2 -np.float32,0xc2235ce2,0xbf800000,2 -np.float32,0x42a35ce2,0x3f800000,2 -np.float32,0xc2a35ce2,0x3f800000,2 -np.float32,0x43235ce2,0x3f800000,2 -np.float32,0xc3235ce2,0x3f800000,2 -np.float32,0x42268121,0xbf3504f6,2 -np.float32,0xc2268121,0xbf3504f6,2 -np.float32,0x42a68121,0x34e43aac,2 -np.float32,0xc2a68121,0x34e43aac,2 -np.float32,0x43268121,0xbf800000,2 -np.float32,0xc3268121,0xbf800000,2 -np.float32,0x4229a561,0x360733d0,2 -np.float32,0xc229a561,0x360733d0,2 -np.float32,0x42a9a561,0xbf800000,2 -np.float32,0xc2a9a561,0xbf800000,2 -np.float32,0x4329a561,0x3f800000,2 -np.float32,0xc329a561,0x3f800000,2 -np.float32,0x422cc9a0,0x3f3504f8,2 -np.float32,0xc22cc9a0,0x3f3504f8,2 -np.float32,0x42acc9a0,0x35655a50,2 -np.float32,0xc2acc9a0,0x35655a50,2 -np.float32,0x432cc9a0,0xbf800000,2 -np.float32,0xc32cc9a0,0xbf800000,2 -np.float32,0x422fede0,0x3f800000,2 -np.float32,0xc22fede0,0x3f800000,2 -np.float32,0x42afede0,0x3f800000,2 -np.float32,0xc2afede0,0x3f800000,2 -np.float32,0x432fede0,0x3f800000,2 -np.float32,0xc32fede0,0x3f800000,2 -np.float32,0x4233121f,0x3f3504e7,2 -np.float32,0xc233121f,0x3f3504e7,2 -np.float32,0x42b3121f,0xb60f347d,2 -np.float32,0xc2b3121f,0xb60f347d,2 -np.float32,0x4333121f,0xbf800000,2 -np.float32,0xc333121f,0xbf800000,2 -np.float32,0x4236365e,0x350bb91c,2 -np.float32,0xc236365e,0x350bb91c,2 -np.float32,0x42b6365e,0xbf800000,2 -np.float32,0xc2b6365e,0xbf800000,2 -np.float32,0x4336365e,0x3f800000,2 -np.float32,0xc336365e,0x3f800000,2 -np.float32,0x42395a9e,0xbf350507,2 -np.float32,0xc2395a9e,0xbf350507,2 -np.float32,0x42b95a9e,0x36651267,2 -np.float32,0xc2b95a9e,0x36651267,2 -np.float32,0x43395a9e,0xbf800000,2 -np.float32,0xc3395a9e,0xbf800000,2 -np.float32,0x423c7edd,0xbf800000,2 -np.float32,0xc23c7edd,0xbf800000,2 -np.float32,0x42bc7edd,0x3f800000,2 -np.float32,0xc2bc7edd,0x3f800000,2 -np.float32,0x433c7edd,0x3f800000,2 -np.float32,0xc33c7edd,0x3f800000,2 -np.float32,0x423fa31d,0xbf3504d7,2 -np.float32,0xc23fa31d,0xbf3504d7,2 -np.float32,0x42bfa31d,0xb69d7828,2 -np.float32,0xc2bfa31d,0xb69d7828,2 -np.float32,0x433fa31d,0xbf800000,2 -np.float32,0xc33fa31d,0xbf800000,2 -np.float32,0x4242c75c,0x354bbe8a,2 -np.float32,0xc242c75c,0x354bbe8a,2 -np.float32,0x42c2c75c,0xbf800000,2 -np.float32,0xc2c2c75c,0xbf800000,2 -np.float32,0x4342c75c,0x3f800000,2 -np.float32,0xc342c75c,0x3f800000,2 -np.float32,0x4245eb9c,0x3f350517,2 -np.float32,0xc245eb9c,0x3f350517,2 -np.float32,0x42c5eb9c,0x36c8671d,2 -np.float32,0xc2c5eb9c,0x36c8671d,2 -np.float32,0x4345eb9c,0xbf800000,2 -np.float32,0xc345eb9c,0xbf800000,2 -np.float32,0x42490fdb,0x3f800000,2 -np.float32,0xc2490fdb,0x3f800000,2 -np.float32,0x42c90fdb,0x3f800000,2 -np.float32,0xc2c90fdb,0x3f800000,2 -np.float32,0x43490fdb,0x3f800000,2 -np.float32,0xc3490fdb,0x3f800000,2 -np.float32,0x424c341a,0x3f3504f5,2 -np.float32,0xc24c341a,0x3f3504f5,2 -np.float32,0x42cc341a,0x34ca9ee6,2 -np.float32,0xc2cc341a,0x34ca9ee6,2 -np.float32,0x434c341a,0xbf800000,2 -np.float32,0xc34c341a,0xbf800000,2 -np.float32,0x424f585a,0xb608cd8c,2 -np.float32,0xc24f585a,0xb608cd8c,2 -np.float32,0x42cf585a,0xbf800000,2 -np.float32,0xc2cf585a,0xbf800000,2 -np.float32,0x434f585a,0x3f800000,2 -np.float32,0xc34f585a,0x3f800000,2 -np.float32,0x42527c99,0xbf3504f9,2 -np.float32,0xc2527c99,0xbf3504f9,2 -np.float32,0x42d27c99,0x35722833,2 -np.float32,0xc2d27c99,0x35722833,2 -np.float32,0x43527c99,0xbf800000,2 -np.float32,0xc3527c99,0xbf800000,2 -np.float32,0x4255a0d9,0xbf800000,2 -np.float32,0xc255a0d9,0xbf800000,2 -np.float32,0x42d5a0d9,0x3f800000,2 -np.float32,0xc2d5a0d9,0x3f800000,2 -np.float32,0x4355a0d9,0x3f800000,2 -np.float32,0xc355a0d9,0x3f800000,2 -np.float32,0x4258c518,0xbf3504e6,2 -np.float32,0xc258c518,0xbf3504e6,2 -np.float32,0x42d8c518,0xb61267f6,2 -np.float32,0xc2d8c518,0xb61267f6,2 -np.float32,0x4358c518,0xbf800000,2 -np.float32,0xc358c518,0xbf800000,2 -np.float32,0x425be958,0x365eab75,2 -np.float32,0xc25be958,0x365eab75,2 -np.float32,0x42dbe958,0xbf800000,2 -np.float32,0xc2dbe958,0xbf800000,2 -np.float32,0x435be958,0x3f800000,2 -np.float32,0xc35be958,0x3f800000,2 -np.float32,0x425f0d97,0x3f350508,2 -np.float32,0xc25f0d97,0x3f350508,2 -np.float32,0x42df0d97,0x366845e0,2 -np.float32,0xc2df0d97,0x366845e0,2 -np.float32,0x435f0d97,0xbf800000,2 -np.float32,0xc35f0d97,0xbf800000,2 -np.float32,0x426231d6,0x3f800000,2 -np.float32,0xc26231d6,0x3f800000,2 -np.float32,0x42e231d6,0x3f800000,2 -np.float32,0xc2e231d6,0x3f800000,2 -np.float32,0x436231d6,0x3f800000,2 -np.float32,0xc36231d6,0x3f800000,2 -np.float32,0x42655616,0x3f3504d7,2 -np.float32,0xc2655616,0x3f3504d7,2 -np.float32,0x42e55616,0xb69f11e5,2 -np.float32,0xc2e55616,0xb69f11e5,2 -np.float32,0x43655616,0xbf800000,2 -np.float32,0xc3655616,0xbf800000,2 -np.float32,0x42687a55,0xb552257b,2 -np.float32,0xc2687a55,0xb552257b,2 -np.float32,0x42e87a55,0xbf800000,2 -np.float32,0xc2e87a55,0xbf800000,2 -np.float32,0x43687a55,0x3f800000,2 -np.float32,0xc3687a55,0x3f800000,2 -np.float32,0x426b9e95,0xbf350517,2 -np.float32,0xc26b9e95,0xbf350517,2 -np.float32,0x42eb9e95,0x36ca00d9,2 -np.float32,0xc2eb9e95,0x36ca00d9,2 -np.float32,0x436b9e95,0xbf800000,2 -np.float32,0xc36b9e95,0xbf800000,2 -np.float32,0x426ec2d4,0xbf800000,2 -np.float32,0xc26ec2d4,0xbf800000,2 -np.float32,0x42eec2d4,0x3f800000,2 -np.float32,0xc2eec2d4,0x3f800000,2 -np.float32,0x436ec2d4,0x3f800000,2 -np.float32,0xc36ec2d4,0x3f800000,2 -np.float32,0x4271e713,0xbf3504f5,2 -np.float32,0xc271e713,0xbf3504f5,2 -np.float32,0x42f1e713,0x34b10321,2 -np.float32,0xc2f1e713,0x34b10321,2 -np.float32,0x4371e713,0xbf800000,2 -np.float32,0xc371e713,0xbf800000,2 -np.float32,0x42750b53,0x360a6748,2 -np.float32,0xc2750b53,0x360a6748,2 -np.float32,0x42f50b53,0xbf800000,2 -np.float32,0xc2f50b53,0xbf800000,2 -np.float32,0x43750b53,0x3f800000,2 -np.float32,0xc3750b53,0x3f800000,2 -np.float32,0x42782f92,0x3f3504f9,2 -np.float32,0xc2782f92,0x3f3504f9,2 -np.float32,0x42f82f92,0x357ef616,2 -np.float32,0xc2f82f92,0x357ef616,2 -np.float32,0x43782f92,0xbf800000,2 -np.float32,0xc3782f92,0xbf800000,2 -np.float32,0x427b53d2,0x3f800000,2 -np.float32,0xc27b53d2,0x3f800000,2 -np.float32,0x42fb53d2,0x3f800000,2 -np.float32,0xc2fb53d2,0x3f800000,2 -np.float32,0x437b53d2,0x3f800000,2 -np.float32,0xc37b53d2,0x3f800000,2 -np.float32,0x427e7811,0x3f3504e6,2 -np.float32,0xc27e7811,0x3f3504e6,2 -np.float32,0x42fe7811,0xb6159b6f,2 -np.float32,0xc2fe7811,0xb6159b6f,2 -np.float32,0x437e7811,0xbf800000,2 -np.float32,0xc37e7811,0xbf800000,2 -np.float32,0x4280ce28,0x34fdd672,2 -np.float32,0xc280ce28,0x34fdd672,2 -np.float32,0x4300ce28,0xbf800000,2 -np.float32,0xc300ce28,0xbf800000,2 -np.float32,0x4380ce28,0x3f800000,2 -np.float32,0xc380ce28,0x3f800000,2 -np.float32,0x42826048,0xbf350508,2 -np.float32,0xc2826048,0xbf350508,2 -np.float32,0x43026048,0x366b7958,2 -np.float32,0xc3026048,0x366b7958,2 -np.float32,0x43826048,0xbf800000,2 -np.float32,0xc3826048,0xbf800000,2 -np.float32,0x4283f268,0xbf800000,2 -np.float32,0xc283f268,0xbf800000,2 -np.float32,0x4303f268,0x3f800000,2 -np.float32,0xc303f268,0x3f800000,2 -np.float32,0x4383f268,0x3f800000,2 -np.float32,0xc383f268,0x3f800000,2 -np.float32,0x42858487,0xbf350504,2 -np.float32,0xc2858487,0xbf350504,2 -np.float32,0x43058487,0x363ea8be,2 -np.float32,0xc3058487,0x363ea8be,2 -np.float32,0x43858487,0xbf800000,2 -np.float32,0xc3858487,0xbf800000,2 -np.float32,0x428716a7,0x35588c6d,2 -np.float32,0xc28716a7,0x35588c6d,2 -np.float32,0x430716a7,0xbf800000,2 -np.float32,0xc30716a7,0xbf800000,2 -np.float32,0x438716a7,0x3f800000,2 -np.float32,0xc38716a7,0x3f800000,2 -np.float32,0x4288a8c7,0x3f350517,2 -np.float32,0xc288a8c7,0x3f350517,2 -np.float32,0x4308a8c7,0x36cb9a96,2 -np.float32,0xc308a8c7,0x36cb9a96,2 -np.float32,0x4388a8c7,0xbf800000,2 -np.float32,0xc388a8c7,0xbf800000,2 -np.float32,0x428a3ae7,0x3f800000,2 -np.float32,0xc28a3ae7,0x3f800000,2 -np.float32,0x430a3ae7,0x3f800000,2 -np.float32,0xc30a3ae7,0x3f800000,2 -np.float32,0x438a3ae7,0x3f800000,2 -np.float32,0xc38a3ae7,0x3f800000,2 -np.float32,0x428bcd06,0x3f3504f5,2 -np.float32,0xc28bcd06,0x3f3504f5,2 -np.float32,0x430bcd06,0x3497675b,2 -np.float32,0xc30bcd06,0x3497675b,2 -np.float32,0x438bcd06,0xbf800000,2 -np.float32,0xc38bcd06,0xbf800000,2 -np.float32,0x428d5f26,0xb60c0105,2 -np.float32,0xc28d5f26,0xb60c0105,2 -np.float32,0x430d5f26,0xbf800000,2 -np.float32,0xc30d5f26,0xbf800000,2 -np.float32,0x438d5f26,0x3f800000,2 -np.float32,0xc38d5f26,0x3f800000,2 -np.float32,0x428ef146,0xbf350526,2 -np.float32,0xc28ef146,0xbf350526,2 -np.float32,0x430ef146,0x3710bc40,2 -np.float32,0xc30ef146,0x3710bc40,2 -np.float32,0x438ef146,0xbf800000,2 -np.float32,0xc38ef146,0xbf800000,2 -np.float32,0x42908365,0xbf800000,2 -np.float32,0xc2908365,0xbf800000,2 -np.float32,0x43108365,0x3f800000,2 -np.float32,0xc3108365,0x3f800000,2 -np.float32,0x43908365,0x3f800000,2 -np.float32,0xc3908365,0x3f800000,2 -np.float32,0x42921585,0xbf3504e6,2 -np.float32,0xc2921585,0xbf3504e6,2 -np.float32,0x43121585,0xb618cee8,2 -np.float32,0xc3121585,0xb618cee8,2 -np.float32,0x43921585,0xbf800000,2 -np.float32,0xc3921585,0xbf800000,2 -np.float32,0x4293a7a5,0x3661deee,2 -np.float32,0xc293a7a5,0x3661deee,2 -np.float32,0x4313a7a5,0xbf800000,2 -np.float32,0xc313a7a5,0xbf800000,2 -np.float32,0x4393a7a5,0x3f800000,2 -np.float32,0xc393a7a5,0x3f800000,2 -np.float32,0x429539c5,0x3f350536,2 -np.float32,0xc29539c5,0x3f350536,2 -np.float32,0x431539c5,0x373bab34,2 -np.float32,0xc31539c5,0x373bab34,2 -np.float32,0x439539c5,0xbf800000,2 -np.float32,0xc39539c5,0xbf800000,2 -np.float32,0x4296cbe4,0x3f800000,2 -np.float32,0xc296cbe4,0x3f800000,2 -np.float32,0x4316cbe4,0x3f800000,2 -np.float32,0xc316cbe4,0x3f800000,2 -np.float32,0x4396cbe4,0x3f800000,2 -np.float32,0xc396cbe4,0x3f800000,2 -np.float32,0x42985e04,0x3f3504d7,2 -np.float32,0xc2985e04,0x3f3504d7,2 -np.float32,0x43185e04,0xb6a2455d,2 -np.float32,0xc3185e04,0xb6a2455d,2 -np.float32,0x43985e04,0xbf800000,2 -np.float32,0xc3985e04,0xbf800000,2 -np.float32,0x4299f024,0xb69bde6c,2 -np.float32,0xc299f024,0xb69bde6c,2 -np.float32,0x4319f024,0xbf800000,2 -np.float32,0xc319f024,0xbf800000,2 -np.float32,0x4399f024,0x3f800000,2 -np.float32,0xc399f024,0x3f800000,2 -np.float32,0x429b8243,0xbf3504ea,2 -np.float32,0xc29b8243,0xbf3504ea,2 -np.float32,0x431b8243,0xb5cb2eb8,2 -np.float32,0xc31b8243,0xb5cb2eb8,2 -np.float32,0x439b8243,0xbf800000,2 -np.float32,0xc39b8243,0xbf800000,2 -np.float32,0x435b2047,0x3f3504c1,2 -np.float32,0x42a038a2,0xb5e4ca7e,2 -np.float32,0x432038a2,0xbf800000,2 -np.float32,0x4345eb9b,0xbf800000,2 -np.float32,0x42c5eb9b,0xb5de638c,2 -np.float32,0x42eb9e94,0xb5d7fc9b,2 -np.float32,0x4350ea79,0x3631dadb,2 -np.float32,0x42dbe957,0xbf800000,2 -np.float32,0x425be957,0xb505522a,2 -np.float32,0x435be957,0x3f800000,2 -np.float32,0x487fe5ab,0xba140185,2 -np.float32,0x497fe5ab,0x3f7fffd5,2 -np.float32,0x49ffe5ab,0x3f7fff55,2 -np.float32,0x49ffeb37,0x3b9382f5,2 -np.float32,0x497ff0c3,0x3b13049f,2 -np.float32,0x49fff0c3,0xbf7fff57,2 -np.float32,0x49fff64f,0xbb928618,2 -np.float32,0x497ffbdb,0xbf7fffd6,2 -np.float32,0x49fffbdb,0x3f7fff59,2 -np.float32,0x48fffbdb,0xba9207c6,2 -np.float32,0x4e736e56,0xbf800000,2 -np.float32,0x4d4da377,0xbf800000,2 -np.float32,0x4ece58c3,0xbf800000,2 -np.float32,0x4ee0db9c,0xbf800000,2 -np.float32,0x4dee7002,0x3f800000,2 -np.float32,0x4ee86afc,0x38857a23,2 -np.float32,0x4dca4f3f,0xbf800000,2 -np.float32,0x4ecb48af,0xb95d1e10,2 -np.float32,0x4e51e33f,0xbf800000,2 -np.float32,0x4ef5f421,0xbf800000,2 -np.float32,0x46027eb2,0x3e7d94c9,2 -np.float32,0x4477baed,0xbe7f1824,2 -np.float32,0x454b8024,0x3e7f5268,2 -np.float32,0x455d2c09,0x3e7f40cb,2 -np.float32,0x4768d3de,0xba14b4af,2 -np.float32,0x46c1e7cd,0x3e7fb102,2 -np.float32,0x44a52949,0xbe7dc9d5,2 -np.float32,0x4454633a,0x3e7dbc7d,2 -np.float32,0x4689810b,0x3e7eb02b,2 -np.float32,0x473473cd,0xbe7eef6f,2 -np.float32,0x44a5193f,0x3e7e1b1f,2 -np.float32,0x46004b36,0x3e7dac59,2 -np.float32,0x467f604b,0x3d7ffd3a,2 -np.float32,0x45ea1805,0x3dffd2e0,2 -np.float32,0x457b6af3,0x3dff7831,2 -np.float32,0x44996159,0xbe7d85f4,2 -np.float32,0x47883553,0xbb80584e,2 -np.float32,0x44e19f0c,0xbdffcfe6,2 -np.float32,0x472b3bf6,0xbe7f7a82,2 -np.float32,0x4600bb4e,0x3a135e33,2 -np.float32,0x449f4556,0x3e7e42e5,2 -np.float32,0x474e9420,0x3dff77b2,2 -np.float32,0x45cbdb23,0x3dff7240,2 -np.float32,0x44222747,0x3dffb039,2 -np.float32,0x4772e419,0xbdff74b8,2 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-exp b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-exp deleted file mode 100644 index 1b2cc9c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-exp +++ /dev/null @@ -1,135 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0x3f800000,3 -np.float32,0x007b2490,0x3f800000,3 -np.float32,0x007c99fa,0x3f800000,3 -np.float32,0x00734a0c,0x3f800000,3 -np.float32,0x0070de24,0x3f800000,3 -np.float32,0x00495d65,0x3f800000,3 -np.float32,0x006894f6,0x3f800000,3 -np.float32,0x00555a76,0x3f800000,3 -np.float32,0x004e1fb8,0x3f800000,3 -np.float32,0x00687de9,0x3f800000,3 -## -ve denormals ## -np.float32,0x805b59af,0x3f800000,3 -np.float32,0x807ed8ed,0x3f800000,3 -np.float32,0x807142ad,0x3f800000,3 -np.float32,0x80772002,0x3f800000,3 -np.float32,0x8062abcb,0x3f800000,3 -np.float32,0x8045e31c,0x3f800000,3 -np.float32,0x805f01c2,0x3f800000,3 -np.float32,0x80506432,0x3f800000,3 -np.float32,0x8060089d,0x3f800000,3 -np.float32,0x8071292f,0x3f800000,3 -## floats that output a denormal ## -np.float32,0xc2cf3fc1,0x00000001,3 -np.float32,0xc2c79726,0x00000021,3 -np.float32,0xc2cb295d,0x00000005,3 -np.float32,0xc2b49e6b,0x00068c4c,3 -np.float32,0xc2ca8116,0x00000008,3 -np.float32,0xc2c23f82,0x000001d7,3 -np.float32,0xc2cb69c0,0x00000005,3 -np.float32,0xc2cc1f4d,0x00000003,3 -np.float32,0xc2ae094e,0x00affc4c,3 -np.float32,0xc2c86c44,0x00000015,3 -## random floats between -87.0f and 88.0f ## -np.float32,0x4030d7e0,0x417d9a05,3 -np.float32,0x426f60e8,0x6aa1be2c,3 -np.float32,0x41a1b220,0x4e0efc11,3 -np.float32,0xc20cc722,0x26159da7,3 -np.float32,0x41c492bc,0x512ec79d,3 -np.float32,0x40980210,0x42e73a0e,3 -np.float32,0xbf1f7b80,0x3f094de3,3 -np.float32,0x42a678a4,0x7b87a383,3 -np.float32,0xc20f3cfd,0x25a1c304,3 -np.float32,0x423ff34c,0x6216467f,3 -np.float32,0x00000000,0x3f800000,3 -## floats that cause an overflow ## -np.float32,0x7f06d8c1,0x7f800000,3 -np.float32,0x7f451912,0x7f800000,3 -np.float32,0x7ecceac3,0x7f800000,3 -np.float32,0x7f643b45,0x7f800000,3 -np.float32,0x7e910ea0,0x7f800000,3 -np.float32,0x7eb4756b,0x7f800000,3 -np.float32,0x7f4ec708,0x7f800000,3 -np.float32,0x7f6b4551,0x7f800000,3 -np.float32,0x7d8edbda,0x7f800000,3 -np.float32,0x7f730718,0x7f800000,3 -np.float32,0x42b17217,0x7f7fff84,3 -np.float32,0x42b17218,0x7f800000,3 -np.float32,0x42b17219,0x7f800000,3 -np.float32,0xfef2b0bc,0x00000000,3 -np.float32,0xff69f83e,0x00000000,3 -np.float32,0xff4ecb12,0x00000000,3 -np.float32,0xfeac6d86,0x00000000,3 -np.float32,0xfde0cdb8,0x00000000,3 -np.float32,0xff26aef4,0x00000000,3 -np.float32,0xff6f9277,0x00000000,3 -np.float32,0xff7adfc4,0x00000000,3 -np.float32,0xff0ad40e,0x00000000,3 -np.float32,0xff6fd8f3,0x00000000,3 -np.float32,0xc2cff1b4,0x00000001,3 -np.float32,0xc2cff1b5,0x00000000,3 -np.float32,0xc2cff1b6,0x00000000,3 -np.float32,0x7f800000,0x7f800000,3 -np.float32,0xff800000,0x00000000,3 -np.float32,0x4292f27c,0x7480000a,3 -np.float32,0x42a920be,0x7c7fff94,3 -np.float32,0x41c214c9,0x50ffffd9,3 -np.float32,0x41abe686,0x4effffd9,3 -np.float32,0x4287db5a,0x707fffd3,3 -np.float32,0x41902cbb,0x4c800078,3 -np.float32,0x42609466,0x67ffffeb,3 -np.float32,0x41a65af5,0x4e7fffd1,3 -np.float32,0x417f13ff,0x4affffc9,3 -np.float32,0x426d0e6c,0x6a3504f2,3 -np.float32,0x41bc8934,0x507fff51,3 -np.float32,0x42a7bdde,0x7c0000d6,3 -np.float32,0x4120cf66,0x46b504f6,3 -np.float32,0x4244da8f,0x62ffff1a,3 -np.float32,0x41a0cf69,0x4e000034,3 -np.float32,0x41cd2bec,0x52000005,3 -np.float32,0x42893e41,0x7100009e,3 -np.float32,0x41b437e1,0x4fb50502,3 -np.float32,0x41d8430f,0x5300001d,3 -np.float32,0x4244da92,0x62ffffda,3 -np.float32,0x41a0cf63,0x4dffffa9,3 -np.float32,0x3eb17218,0x3fb504f3,3 -np.float32,0x428729e8,0x703504dc,3 -np.float32,0x41a0cf67,0x4e000014,3 -np.float32,0x4252b77d,0x65800011,3 -np.float32,0x41902cb9,0x4c800058,3 -np.float32,0x42a0cf67,0x79800052,3 -np.float32,0x4152b77b,0x48ffffe9,3 -np.float32,0x41265af3,0x46ffffc8,3 -np.float32,0x42187e0b,0x5affff9a,3 -np.float32,0xc0d2b77c,0x3ab504f6,3 -np.float32,0xc283b2ac,0x10000072,3 -np.float32,0xc1cff1b4,0x2cb504f5,3 -np.float32,0xc05dce9e,0x3d000000,3 -np.float32,0xc28ec9d2,0x0bfffea5,3 -np.float32,0xc23c893a,0x1d7fffde,3 -np.float32,0xc2a920c0,0x027fff6c,3 -np.float32,0xc1f9886f,0x2900002b,3 -np.float32,0xc2c42920,0x000000b5,3 -np.float32,0xc2893e41,0x0dfffec5,3 -np.float32,0xc2c4da93,0x00000080,3 -np.float32,0xc17f1401,0x3400000c,3 -np.float32,0xc1902cb6,0x327fffaf,3 -np.float32,0xc27c4e3b,0x11ffffc5,3 -np.float32,0xc268e5c5,0x157ffe9d,3 -np.float32,0xc2b4e953,0x0005a826,3 -np.float32,0xc287db5a,0x0e800016,3 -np.float32,0xc207db5a,0x2700000b,3 -np.float32,0xc2b2d4fe,0x000ffff1,3 -np.float32,0xc268e5c0,0x157fffdd,3 -np.float32,0xc22920bd,0x2100003b,3 -np.float32,0xc2902caf,0x0b80011e,3 -np.float32,0xc1902cba,0x327fff2f,3 -np.float32,0xc2ca6625,0x00000008,3 -np.float32,0xc280ece8,0x10fffeb5,3 -np.float32,0xc2918f94,0x0b0000ea,3 -np.float32,0xc29b43d5,0x077ffffc,3 -np.float32,0xc1e61ff7,0x2ab504f5,3 -np.float32,0xc2867878,0x0effff15,3 -np.float32,0xc2a2324a,0x04fffff4,3 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-log b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-log deleted file mode 100644 index a7bd984..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-log +++ /dev/null @@ -1,118 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0xc2afbc1b,4 -np.float32,0x007b2490,0xc2aec01e,4 -np.float32,0x007c99fa,0xc2aeba17,4 -np.float32,0x00734a0c,0xc2aee1dc,4 -np.float32,0x0070de24,0xc2aeecba,4 -np.float32,0x007fffff,0xc2aeac50,4 -np.float32,0x00000001,0xc2ce8ed0,4 -## -ve denormals ## -np.float32,0x80495d65,0xffc00000,4 -np.float32,0x806894f6,0xffc00000,4 -np.float32,0x80555a76,0xffc00000,4 -np.float32,0x804e1fb8,0xffc00000,4 -np.float32,0x80687de9,0xffc00000,4 -np.float32,0x807fffff,0xffc00000,4 -np.float32,0x80000001,0xffc00000,4 -## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## -np.float32,0x00000000,0xff800000,4 -np.float32,0x80000000,0xff800000,4 -np.float32,0x7f7fffff,0x42b17218,4 -np.float32,0x80800000,0xffc00000,4 -np.float32,0xff7fffff,0xffc00000,4 -## 1.00f + 0x00000001 ## -np.float32,0x3f800000,0x00000000,4 -np.float32,0x3f800001,0x33ffffff,4 -np.float32,0x3f800002,0x347ffffe,4 -np.float32,0x3f7fffff,0xb3800000,4 -np.float32,0x3f7ffffe,0xb4000000,4 -np.float32,0x3f7ffffd,0xb4400001,4 -np.float32,0x402df853,0x3f7ffffe,4 -np.float32,0x402df854,0x3f7fffff,4 -np.float32,0x402df855,0x3f800000,4 -np.float32,0x402df856,0x3f800001,4 -np.float32,0x3ebc5ab0,0xbf800001,4 -np.float32,0x3ebc5ab1,0xbf800000,4 -np.float32,0x3ebc5ab2,0xbf800000,4 -np.float32,0x3ebc5ab3,0xbf7ffffe,4 -np.float32,0x423ef575,0x407768ab,4 -np.float32,0x427b8c61,0x408485dd,4 -np.float32,0x4211e9ee,0x406630b0,4 -np.float32,0x424d5c41,0x407c0fed,4 -np.float32,0x42be722a,0x4091cc91,4 -np.float32,0x42b73d30,0x4090908b,4 -np.float32,0x427e48e2,0x4084de7f,4 -np.float32,0x428f759b,0x4088bba3,4 -np.float32,0x41629069,0x4029a0cc,4 -np.float32,0x4272c99d,0x40836379,4 -np.float32,0x4d1b7458,0x4197463d,4 -np.float32,0x4f10c594,0x41ace2b2,4 -np.float32,0x4ea397c2,0x41a85171,4 -np.float32,0x4fefa9d1,0x41b6769c,4 -np.float32,0x4ebac6ab,0x41a960dc,4 -np.float32,0x4f6efb42,0x41b0e535,4 -np.float32,0x4e9ab8e7,0x41a7df44,4 -np.float32,0x4e81b5d1,0x41a67625,4 -np.float32,0x5014d9f2,0x41b832bd,4 -np.float32,0x4f02175c,0x41ac07b8,4 -np.float32,0x7f034f89,0x42b01c47,4 -np.float32,0x7f56d00e,0x42b11849,4 -np.float32,0x7f1cd5f6,0x42b0773a,4 -np.float32,0x7e979174,0x42af02d7,4 -np.float32,0x7f23369f,0x42b08ba2,4 -np.float32,0x7f0637ae,0x42b0277d,4 -np.float32,0x7efcb6e8,0x42b00897,4 -np.float32,0x7f7907c8,0x42b163f6,4 -np.float32,0x7e95c4c2,0x42aefcba,4 -np.float32,0x7f4577b2,0x42b0ed2d,4 -np.float32,0x3f49c92e,0xbe73ae84,4 -np.float32,0x3f4a23d1,0xbe71e2f8,4 -np.float32,0x3f4abb67,0xbe6ee430,4 -np.float32,0x3f48169a,0xbe7c5532,4 -np.float32,0x3f47f5fa,0xbe7cfc37,4 -np.float32,0x3f488309,0xbe7a2ad8,4 -np.float32,0x3f479df4,0xbe7ebf5f,4 -np.float32,0x3f47cfff,0xbe7dbec9,4 -np.float32,0x3f496704,0xbe75a125,4 -np.float32,0x3f478ee8,0xbe7f0c92,4 -np.float32,0x3f4a763b,0xbe7041ce,4 -np.float32,0x3f47a108,0xbe7eaf94,4 -np.float32,0x3f48136c,0xbe7c6578,4 -np.float32,0x3f481c17,0xbe7c391c,4 -np.float32,0x3f47cd28,0xbe7dcd56,4 -np.float32,0x3f478be8,0xbe7f1bf7,4 -np.float32,0x3f4c1f8e,0xbe67e367,4 -np.float32,0x3f489b0c,0xbe79b03f,4 -np.float32,0x3f4934cf,0xbe76a08a,4 -np.float32,0x3f4954df,0xbe75fd6a,4 -np.float32,0x3f47a3f5,0xbe7ea093,4 -np.float32,0x3f4ba4fc,0xbe6a4b02,4 -np.float32,0x3f47a0e1,0xbe7eb05c,4 -np.float32,0x3f48c30a,0xbe78e42f,4 -np.float32,0x3f48cab8,0xbe78bd05,4 -np.float32,0x3f4b0569,0xbe6d6ea4,4 -np.float32,0x3f47de32,0xbe7d7607,4 -np.float32,0x3f477328,0xbe7f9b00,4 -np.float32,0x3f496dab,0xbe757f52,4 -np.float32,0x3f47662c,0xbe7fddac,4 -np.float32,0x3f48ddd8,0xbe785b80,4 -np.float32,0x3f481866,0xbe7c4bff,4 -np.float32,0x3f48b119,0xbe793fb6,4 -np.float32,0x3f48c7e8,0xbe78cb5c,4 -np.float32,0x3f4985f6,0xbe7503da,4 -np.float32,0x3f483fdf,0xbe7b8212,4 -np.float32,0x3f4b1c76,0xbe6cfa67,4 -np.float32,0x3f480b2e,0xbe7c8fa8,4 -np.float32,0x3f48745f,0xbe7a75bf,4 -np.float32,0x3f485bda,0xbe7af308,4 -np.float32,0x3f47a660,0xbe7e942c,4 -np.float32,0x3f47d4d5,0xbe7da600,4 -np.float32,0x3f4b0a26,0xbe6d56be,4 -np.float32,0x3f4a4883,0xbe712924,4 -np.float32,0x3f4769e7,0xbe7fca84,4 -np.float32,0x3f499702,0xbe74ad3f,4 -np.float32,0x3f494ab1,0xbe763131,4 -np.float32,0x3f476b69,0xbe7fc2c6,4 -np.float32,0x3f4884e8,0xbe7a214a,4 -np.float32,0x3f486945,0xbe7aae76,4 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-sin b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-sin deleted file mode 100644 index a562731..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-sin +++ /dev/null @@ -1,707 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0x004b4716,2 -np.float32,0x007b2490,0x007b2490,2 -np.float32,0x007c99fa,0x007c99fa,2 -np.float32,0x00734a0c,0x00734a0c,2 -np.float32,0x0070de24,0x0070de24,2 -np.float32,0x007fffff,0x007fffff,2 -np.float32,0x00000001,0x00000001,2 -## -ve denormals ## -np.float32,0x80495d65,0x80495d65,2 -np.float32,0x806894f6,0x806894f6,2 -np.float32,0x80555a76,0x80555a76,2 -np.float32,0x804e1fb8,0x804e1fb8,2 -np.float32,0x80687de9,0x80687de9,2 -np.float32,0x807fffff,0x807fffff,2 -np.float32,0x80000001,0x80000001,2 -## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## -np.float32,0x00000000,0x00000000,2 -np.float32,0x80000000,0x80000000,2 -np.float32,0x00800000,0x00800000,2 -np.float32,0x7f7fffff,0xbf0599b3,2 -np.float32,0x80800000,0x80800000,2 -np.float32,0xff7fffff,0x3f0599b3,2 -## 1.00f ## -np.float32,0x3f800000,0x3f576aa4,2 -np.float32,0x3f800001,0x3f576aa6,2 -np.float32,0x3f800002,0x3f576aa7,2 -np.float32,0xc090a8b0,0x3f7b4e48,2 -np.float32,0x41ce3184,0x3f192d43,2 -np.float32,0xc1d85848,0xbf7161cb,2 -np.float32,0x402b8820,0x3ee3f29f,2 -np.float32,0x42b4e454,0x3f1d0151,2 -np.float32,0x42a67a60,0x3f7ffa4c,2 -np.float32,0x41d92388,0x3f67beef,2 -np.float32,0x422dd66c,0xbeffb0c1,2 -np.float32,0xc28f5be6,0xbf0bae79,2 -np.float32,0x41ab2674,0x3f0ffe2b,2 -np.float32,0xd0102756,0x3f227e8a,2 -np.float32,0xcf99405e,0x3f73ad00,2 -np.float32,0xcfd83a12,0xbf7151a7,2 -np.float32,0x4fb54db0,0xbe46354b,2 -np.float32,0xcfcca29d,0xbe9345e6,2 -np.float32,0xceec2ac0,0x3e98dc89,2 -np.float32,0xcfdca97f,0xbf60b2b4,2 -np.float32,0xcfe92b0a,0xbf222705,2 -np.float32,0x5014b0eb,0x3f63e75c,2 -np.float32,0xcfa7ee96,0x3f62ada4,2 -np.float32,0x754c09a0,0xbf617056,2 -np.float32,0x77a731fb,0x3f44472b,2 -np.float32,0x76de2494,0xbe680739,2 -np.float32,0xf74920dc,0xbf193338,2 -np.float32,0x7707a312,0xbf6f51b1,2 -np.float32,0x75bf9790,0xbd0f1a47,2 -np.float32,0xf4ca7c40,0xbf7d45e7,2 -np.float32,0x77e91899,0x3f767181,2 -np.float32,0xf74c9820,0xbd685b75,2 -np.float32,0x7785ca29,0x3f78ee61,2 -np.float32,0x3f490fdb,0x3f3504f3,2 -np.float32,0xbf490fdb,0xbf3504f3,2 -np.float32,0x3fc90fdb,0x3f800000,2 -np.float32,0xbfc90fdb,0xbf800000,2 -np.float32,0x40490fdb,0xb3bbbd2e,2 -np.float32,0xc0490fdb,0x33bbbd2e,2 -np.float32,0x3fc90fdb,0x3f800000,2 -np.float32,0xbfc90fdb,0xbf800000,2 -np.float32,0x40490fdb,0xb3bbbd2e,2 -np.float32,0xc0490fdb,0x33bbbd2e,2 -np.float32,0x40c90fdb,0x343bbd2e,2 -np.float32,0xc0c90fdb,0xb43bbd2e,2 -np.float32,0x4016cbe4,0x3f3504f3,2 -np.float32,0xc016cbe4,0xbf3504f3,2 -np.float32,0x4096cbe4,0xbf800000,2 -np.float32,0xc096cbe4,0x3f800000,2 -np.float32,0x4116cbe4,0xb2ccde2e,2 -np.float32,0xc116cbe4,0x32ccde2e,2 -np.float32,0x40490fdb,0xb3bbbd2e,2 -np.float32,0xc0490fdb,0x33bbbd2e,2 -np.float32,0x40c90fdb,0x343bbd2e,2 -np.float32,0xc0c90fdb,0xb43bbd2e,2 -np.float32,0x41490fdb,0x34bbbd2e,2 -np.float32,0xc1490fdb,0xb4bbbd2e,2 -np.float32,0x407b53d2,0xbf3504f5,2 -np.float32,0xc07b53d2,0x3f3504f5,2 -np.float32,0x40fb53d2,0x3f800000,2 -np.float32,0xc0fb53d2,0xbf800000,2 -np.float32,0x417b53d2,0xb535563d,2 -np.float32,0xc17b53d2,0x3535563d,2 -np.float32,0x4096cbe4,0xbf800000,2 -np.float32,0xc096cbe4,0x3f800000,2 -np.float32,0x4116cbe4,0xb2ccde2e,2 -np.float32,0xc116cbe4,0x32ccde2e,2 -np.float32,0x4196cbe4,0x334cde2e,2 -np.float32,0xc196cbe4,0xb34cde2e,2 -np.float32,0x40afede0,0xbf3504ef,2 -np.float32,0xc0afede0,0x3f3504ef,2 -np.float32,0x412fede0,0xbf800000,2 -np.float32,0xc12fede0,0x3f800000,2 -np.float32,0x41afede0,0xb5b222c4,2 -np.float32,0xc1afede0,0x35b222c4,2 -np.float32,0x40c90fdb,0x343bbd2e,2 -np.float32,0xc0c90fdb,0xb43bbd2e,2 -np.float32,0x41490fdb,0x34bbbd2e,2 -np.float32,0xc1490fdb,0xb4bbbd2e,2 -np.float32,0x41c90fdb,0x353bbd2e,2 -np.float32,0xc1c90fdb,0xb53bbd2e,2 -np.float32,0x40e231d6,0x3f3504f3,2 -np.float32,0xc0e231d6,0xbf3504f3,2 -np.float32,0x416231d6,0x3f800000,2 -np.float32,0xc16231d6,0xbf800000,2 -np.float32,0x41e231d6,0xb399a6a2,2 -np.float32,0xc1e231d6,0x3399a6a2,2 -np.float32,0x40fb53d2,0x3f800000,2 -np.float32,0xc0fb53d2,0xbf800000,2 -np.float32,0x417b53d2,0xb535563d,2 -np.float32,0xc17b53d2,0x3535563d,2 -np.float32,0x41fb53d2,0x35b5563d,2 -np.float32,0xc1fb53d2,0xb5b5563d,2 -np.float32,0x410a3ae7,0x3f3504eb,2 -np.float32,0xc10a3ae7,0xbf3504eb,2 -np.float32,0x418a3ae7,0xbf800000,2 -np.float32,0xc18a3ae7,0x3f800000,2 -np.float32,0x420a3ae7,0xb6308908,2 -np.float32,0xc20a3ae7,0x36308908,2 -np.float32,0x4116cbe4,0xb2ccde2e,2 -np.float32,0xc116cbe4,0x32ccde2e,2 -np.float32,0x4196cbe4,0x334cde2e,2 -np.float32,0xc196cbe4,0xb34cde2e,2 -np.float32,0x4216cbe4,0x33ccde2e,2 -np.float32,0xc216cbe4,0xb3ccde2e,2 -np.float32,0x41235ce2,0xbf3504f7,2 -np.float32,0xc1235ce2,0x3f3504f7,2 -np.float32,0x41a35ce2,0x3f800000,2 -np.float32,0xc1a35ce2,0xbf800000,2 -np.float32,0x42235ce2,0xb5b889b6,2 -np.float32,0xc2235ce2,0x35b889b6,2 -np.float32,0x412fede0,0xbf800000,2 -np.float32,0xc12fede0,0x3f800000,2 -np.float32,0x41afede0,0xb5b222c4,2 -np.float32,0xc1afede0,0x35b222c4,2 -np.float32,0x422fede0,0x363222c4,2 -np.float32,0xc22fede0,0xb63222c4,2 -np.float32,0x413c7edd,0xbf3504f3,2 -np.float32,0xc13c7edd,0x3f3504f3,2 -np.float32,0x41bc7edd,0xbf800000,2 -np.float32,0xc1bc7edd,0x3f800000,2 -np.float32,0x423c7edd,0xb4000add,2 -np.float32,0xc23c7edd,0x34000add,2 -np.float32,0x41490fdb,0x34bbbd2e,2 -np.float32,0xc1490fdb,0xb4bbbd2e,2 -np.float32,0x41c90fdb,0x353bbd2e,2 -np.float32,0xc1c90fdb,0xb53bbd2e,2 -np.float32,0x42490fdb,0x35bbbd2e,2 -np.float32,0xc2490fdb,0xb5bbbd2e,2 -np.float32,0x4155a0d9,0x3f3504fb,2 -np.float32,0xc155a0d9,0xbf3504fb,2 -np.float32,0x41d5a0d9,0x3f800000,2 -np.float32,0xc1d5a0d9,0xbf800000,2 -np.float32,0x4255a0d9,0xb633bc81,2 -np.float32,0xc255a0d9,0x3633bc81,2 -np.float32,0x416231d6,0x3f800000,2 -np.float32,0xc16231d6,0xbf800000,2 -np.float32,0x41e231d6,0xb399a6a2,2 -np.float32,0xc1e231d6,0x3399a6a2,2 -np.float32,0x426231d6,0x3419a6a2,2 -np.float32,0xc26231d6,0xb419a6a2,2 -np.float32,0x416ec2d4,0x3f3504ef,2 -np.float32,0xc16ec2d4,0xbf3504ef,2 -np.float32,0x41eec2d4,0xbf800000,2 -np.float32,0xc1eec2d4,0x3f800000,2 -np.float32,0x426ec2d4,0xb5bef0a7,2 -np.float32,0xc26ec2d4,0x35bef0a7,2 -np.float32,0x417b53d2,0xb535563d,2 -np.float32,0xc17b53d2,0x3535563d,2 -np.float32,0x41fb53d2,0x35b5563d,2 -np.float32,0xc1fb53d2,0xb5b5563d,2 -np.float32,0x427b53d2,0x3635563d,2 -np.float32,0xc27b53d2,0xb635563d,2 -np.float32,0x4183f268,0xbf3504ff,2 -np.float32,0xc183f268,0x3f3504ff,2 -np.float32,0x4203f268,0x3f800000,2 -np.float32,0xc203f268,0xbf800000,2 -np.float32,0x4283f268,0xb6859a13,2 -np.float32,0xc283f268,0x36859a13,2 -np.float32,0x418a3ae7,0xbf800000,2 -np.float32,0xc18a3ae7,0x3f800000,2 -np.float32,0x420a3ae7,0xb6308908,2 -np.float32,0xc20a3ae7,0x36308908,2 -np.float32,0x428a3ae7,0x36b08908,2 -np.float32,0xc28a3ae7,0xb6b08908,2 -np.float32,0x41908365,0xbf3504f6,2 -np.float32,0xc1908365,0x3f3504f6,2 -np.float32,0x42108365,0xbf800000,2 -np.float32,0xc2108365,0x3f800000,2 -np.float32,0x42908365,0x3592200d,2 -np.float32,0xc2908365,0xb592200d,2 -np.float32,0x4196cbe4,0x334cde2e,2 -np.float32,0xc196cbe4,0xb34cde2e,2 -np.float32,0x4216cbe4,0x33ccde2e,2 -np.float32,0xc216cbe4,0xb3ccde2e,2 -np.float32,0x4296cbe4,0x344cde2e,2 -np.float32,0xc296cbe4,0xb44cde2e,2 -np.float32,0x419d1463,0x3f3504f8,2 -np.float32,0xc19d1463,0xbf3504f8,2 -np.float32,0x421d1463,0x3f800000,2 -np.float32,0xc21d1463,0xbf800000,2 -np.float32,0x429d1463,0xb5c55799,2 -np.float32,0xc29d1463,0x35c55799,2 -np.float32,0x41a35ce2,0x3f800000,2 -np.float32,0xc1a35ce2,0xbf800000,2 -np.float32,0x42235ce2,0xb5b889b6,2 -np.float32,0xc2235ce2,0x35b889b6,2 -np.float32,0x42a35ce2,0x363889b6,2 -np.float32,0xc2a35ce2,0xb63889b6,2 -np.float32,0x41a9a561,0x3f3504e7,2 -np.float32,0xc1a9a561,0xbf3504e7,2 -np.float32,0x4229a561,0xbf800000,2 -np.float32,0xc229a561,0x3f800000,2 -np.float32,0x42a9a561,0xb68733d0,2 -np.float32,0xc2a9a561,0x368733d0,2 -np.float32,0x41afede0,0xb5b222c4,2 -np.float32,0xc1afede0,0x35b222c4,2 -np.float32,0x422fede0,0x363222c4,2 -np.float32,0xc22fede0,0xb63222c4,2 -np.float32,0x42afede0,0x36b222c4,2 -np.float32,0xc2afede0,0xb6b222c4,2 -np.float32,0x41b6365e,0xbf3504f0,2 -np.float32,0xc1b6365e,0x3f3504f0,2 -np.float32,0x4236365e,0x3f800000,2 -np.float32,0xc236365e,0xbf800000,2 -np.float32,0x42b6365e,0x358bb91c,2 -np.float32,0xc2b6365e,0xb58bb91c,2 -np.float32,0x41bc7edd,0xbf800000,2 -np.float32,0xc1bc7edd,0x3f800000,2 -np.float32,0x423c7edd,0xb4000add,2 -np.float32,0xc23c7edd,0x34000add,2 -np.float32,0x42bc7edd,0x34800add,2 -np.float32,0xc2bc7edd,0xb4800add,2 -np.float32,0x41c2c75c,0xbf3504ef,2 -np.float32,0xc1c2c75c,0x3f3504ef,2 -np.float32,0x4242c75c,0xbf800000,2 -np.float32,0xc242c75c,0x3f800000,2 -np.float32,0x42c2c75c,0xb5cbbe8a,2 -np.float32,0xc2c2c75c,0x35cbbe8a,2 -np.float32,0x41c90fdb,0x353bbd2e,2 -np.float32,0xc1c90fdb,0xb53bbd2e,2 -np.float32,0x42490fdb,0x35bbbd2e,2 -np.float32,0xc2490fdb,0xb5bbbd2e,2 -np.float32,0x42c90fdb,0x363bbd2e,2 -np.float32,0xc2c90fdb,0xb63bbd2e,2 -np.float32,0x41cf585a,0x3f3504ff,2 -np.float32,0xc1cf585a,0xbf3504ff,2 -np.float32,0x424f585a,0x3f800000,2 -np.float32,0xc24f585a,0xbf800000,2 -np.float32,0x42cf585a,0xb688cd8c,2 -np.float32,0xc2cf585a,0x3688cd8c,2 -np.float32,0x41d5a0d9,0x3f800000,2 -np.float32,0xc1d5a0d9,0xbf800000,2 -np.float32,0x4255a0d9,0xb633bc81,2 -np.float32,0xc255a0d9,0x3633bc81,2 -np.float32,0x42d5a0d9,0x36b3bc81,2 -np.float32,0xc2d5a0d9,0xb6b3bc81,2 -np.float32,0x41dbe958,0x3f3504e0,2 -np.float32,0xc1dbe958,0xbf3504e0,2 -np.float32,0x425be958,0xbf800000,2 -np.float32,0xc25be958,0x3f800000,2 -np.float32,0x42dbe958,0xb6deab75,2 -np.float32,0xc2dbe958,0x36deab75,2 -np.float32,0x41e231d6,0xb399a6a2,2 -np.float32,0xc1e231d6,0x3399a6a2,2 -np.float32,0x426231d6,0x3419a6a2,2 -np.float32,0xc26231d6,0xb419a6a2,2 -np.float32,0x42e231d6,0x3499a6a2,2 -np.float32,0xc2e231d6,0xb499a6a2,2 -np.float32,0x41e87a55,0xbf3504f8,2 -np.float32,0xc1e87a55,0x3f3504f8,2 -np.float32,0x42687a55,0x3f800000,2 -np.float32,0xc2687a55,0xbf800000,2 -np.float32,0x42e87a55,0xb5d2257b,2 -np.float32,0xc2e87a55,0x35d2257b,2 -np.float32,0x41eec2d4,0xbf800000,2 -np.float32,0xc1eec2d4,0x3f800000,2 -np.float32,0x426ec2d4,0xb5bef0a7,2 -np.float32,0xc26ec2d4,0x35bef0a7,2 -np.float32,0x42eec2d4,0x363ef0a7,2 -np.float32,0xc2eec2d4,0xb63ef0a7,2 -np.float32,0x41f50b53,0xbf3504e7,2 -np.float32,0xc1f50b53,0x3f3504e7,2 -np.float32,0x42750b53,0xbf800000,2 -np.float32,0xc2750b53,0x3f800000,2 -np.float32,0x42f50b53,0xb68a6748,2 -np.float32,0xc2f50b53,0x368a6748,2 -np.float32,0x41fb53d2,0x35b5563d,2 -np.float32,0xc1fb53d2,0xb5b5563d,2 -np.float32,0x427b53d2,0x3635563d,2 -np.float32,0xc27b53d2,0xb635563d,2 -np.float32,0x42fb53d2,0x36b5563d,2 -np.float32,0xc2fb53d2,0xb6b5563d,2 -np.float32,0x4200ce28,0x3f3504f0,2 -np.float32,0xc200ce28,0xbf3504f0,2 -np.float32,0x4280ce28,0x3f800000,2 -np.float32,0xc280ce28,0xbf800000,2 -np.float32,0x4300ce28,0x357dd672,2 -np.float32,0xc300ce28,0xb57dd672,2 -np.float32,0x4203f268,0x3f800000,2 -np.float32,0xc203f268,0xbf800000,2 -np.float32,0x4283f268,0xb6859a13,2 -np.float32,0xc283f268,0x36859a13,2 -np.float32,0x4303f268,0x37059a13,2 -np.float32,0xc303f268,0xb7059a13,2 -np.float32,0x420716a7,0x3f3504ee,2 -np.float32,0xc20716a7,0xbf3504ee,2 -np.float32,0x428716a7,0xbf800000,2 -np.float32,0xc28716a7,0x3f800000,2 -np.float32,0x430716a7,0xb5d88c6d,2 -np.float32,0xc30716a7,0x35d88c6d,2 -np.float32,0x420a3ae7,0xb6308908,2 -np.float32,0xc20a3ae7,0x36308908,2 -np.float32,0x428a3ae7,0x36b08908,2 -np.float32,0xc28a3ae7,0xb6b08908,2 -np.float32,0x430a3ae7,0x37308908,2 -np.float32,0xc30a3ae7,0xb7308908,2 -np.float32,0x420d5f26,0xbf350500,2 -np.float32,0xc20d5f26,0x3f350500,2 -np.float32,0x428d5f26,0x3f800000,2 -np.float32,0xc28d5f26,0xbf800000,2 -np.float32,0x430d5f26,0xb68c0105,2 -np.float32,0xc30d5f26,0x368c0105,2 -np.float32,0x42108365,0xbf800000,2 -np.float32,0xc2108365,0x3f800000,2 -np.float32,0x42908365,0x3592200d,2 -np.float32,0xc2908365,0xb592200d,2 -np.float32,0x43108365,0xb612200d,2 -np.float32,0xc3108365,0x3612200d,2 -np.float32,0x4213a7a5,0xbf3504df,2 -np.float32,0xc213a7a5,0x3f3504df,2 -np.float32,0x4293a7a5,0xbf800000,2 -np.float32,0xc293a7a5,0x3f800000,2 -np.float32,0x4313a7a5,0xb6e1deee,2 -np.float32,0xc313a7a5,0x36e1deee,2 -np.float32,0x4216cbe4,0x33ccde2e,2 -np.float32,0xc216cbe4,0xb3ccde2e,2 -np.float32,0x4296cbe4,0x344cde2e,2 -np.float32,0xc296cbe4,0xb44cde2e,2 -np.float32,0x4316cbe4,0x34ccde2e,2 -np.float32,0xc316cbe4,0xb4ccde2e,2 -np.float32,0x4219f024,0x3f35050f,2 -np.float32,0xc219f024,0xbf35050f,2 -np.float32,0x4299f024,0x3f800000,2 -np.float32,0xc299f024,0xbf800000,2 -np.float32,0x4319f024,0xb71bde6c,2 -np.float32,0xc319f024,0x371bde6c,2 -np.float32,0x421d1463,0x3f800000,2 -np.float32,0xc21d1463,0xbf800000,2 -np.float32,0x429d1463,0xb5c55799,2 -np.float32,0xc29d1463,0x35c55799,2 -np.float32,0x431d1463,0x36455799,2 -np.float32,0xc31d1463,0xb6455799,2 -np.float32,0x422038a3,0x3f3504d0,2 -np.float32,0xc22038a3,0xbf3504d0,2 -np.float32,0x42a038a3,0xbf800000,2 -np.float32,0xc2a038a3,0x3f800000,2 -np.float32,0x432038a3,0xb746cd61,2 -np.float32,0xc32038a3,0x3746cd61,2 -np.float32,0x42235ce2,0xb5b889b6,2 -np.float32,0xc2235ce2,0x35b889b6,2 -np.float32,0x42a35ce2,0x363889b6,2 -np.float32,0xc2a35ce2,0xb63889b6,2 -np.float32,0x43235ce2,0x36b889b6,2 -np.float32,0xc3235ce2,0xb6b889b6,2 -np.float32,0x42268121,0xbf3504f1,2 -np.float32,0xc2268121,0x3f3504f1,2 -np.float32,0x42a68121,0x3f800000,2 -np.float32,0xc2a68121,0xbf800000,2 -np.float32,0x43268121,0x35643aac,2 -np.float32,0xc3268121,0xb5643aac,2 -np.float32,0x4229a561,0xbf800000,2 -np.float32,0xc229a561,0x3f800000,2 -np.float32,0x42a9a561,0xb68733d0,2 -np.float32,0xc2a9a561,0x368733d0,2 -np.float32,0x4329a561,0x370733d0,2 -np.float32,0xc329a561,0xb70733d0,2 -np.float32,0x422cc9a0,0xbf3504ee,2 -np.float32,0xc22cc9a0,0x3f3504ee,2 -np.float32,0x42acc9a0,0xbf800000,2 -np.float32,0xc2acc9a0,0x3f800000,2 -np.float32,0x432cc9a0,0xb5e55a50,2 -np.float32,0xc32cc9a0,0x35e55a50,2 -np.float32,0x422fede0,0x363222c4,2 -np.float32,0xc22fede0,0xb63222c4,2 -np.float32,0x42afede0,0x36b222c4,2 -np.float32,0xc2afede0,0xb6b222c4,2 -np.float32,0x432fede0,0x373222c4,2 -np.float32,0xc32fede0,0xb73222c4,2 -np.float32,0x4233121f,0x3f350500,2 -np.float32,0xc233121f,0xbf350500,2 -np.float32,0x42b3121f,0x3f800000,2 -np.float32,0xc2b3121f,0xbf800000,2 -np.float32,0x4333121f,0xb68f347d,2 -np.float32,0xc333121f,0x368f347d,2 -np.float32,0x4236365e,0x3f800000,2 -np.float32,0xc236365e,0xbf800000,2 -np.float32,0x42b6365e,0x358bb91c,2 -np.float32,0xc2b6365e,0xb58bb91c,2 -np.float32,0x4336365e,0xb60bb91c,2 -np.float32,0xc336365e,0x360bb91c,2 -np.float32,0x42395a9e,0x3f3504df,2 -np.float32,0xc2395a9e,0xbf3504df,2 -np.float32,0x42b95a9e,0xbf800000,2 -np.float32,0xc2b95a9e,0x3f800000,2 -np.float32,0x43395a9e,0xb6e51267,2 -np.float32,0xc3395a9e,0x36e51267,2 -np.float32,0x423c7edd,0xb4000add,2 -np.float32,0xc23c7edd,0x34000add,2 -np.float32,0x42bc7edd,0x34800add,2 -np.float32,0xc2bc7edd,0xb4800add,2 -np.float32,0x433c7edd,0x35000add,2 -np.float32,0xc33c7edd,0xb5000add,2 -np.float32,0x423fa31d,0xbf35050f,2 -np.float32,0xc23fa31d,0x3f35050f,2 -np.float32,0x42bfa31d,0x3f800000,2 -np.float32,0xc2bfa31d,0xbf800000,2 -np.float32,0x433fa31d,0xb71d7828,2 -np.float32,0xc33fa31d,0x371d7828,2 -np.float32,0x4242c75c,0xbf800000,2 -np.float32,0xc242c75c,0x3f800000,2 -np.float32,0x42c2c75c,0xb5cbbe8a,2 -np.float32,0xc2c2c75c,0x35cbbe8a,2 -np.float32,0x4342c75c,0x364bbe8a,2 -np.float32,0xc342c75c,0xb64bbe8a,2 -np.float32,0x4245eb9c,0xbf3504d0,2 -np.float32,0xc245eb9c,0x3f3504d0,2 -np.float32,0x42c5eb9c,0xbf800000,2 -np.float32,0xc2c5eb9c,0x3f800000,2 -np.float32,0x4345eb9c,0xb748671d,2 -np.float32,0xc345eb9c,0x3748671d,2 -np.float32,0x42490fdb,0x35bbbd2e,2 -np.float32,0xc2490fdb,0xb5bbbd2e,2 -np.float32,0x42c90fdb,0x363bbd2e,2 -np.float32,0xc2c90fdb,0xb63bbd2e,2 -np.float32,0x43490fdb,0x36bbbd2e,2 -np.float32,0xc3490fdb,0xb6bbbd2e,2 -np.float32,0x424c341a,0x3f3504f1,2 -np.float32,0xc24c341a,0xbf3504f1,2 -np.float32,0x42cc341a,0x3f800000,2 -np.float32,0xc2cc341a,0xbf800000,2 -np.float32,0x434c341a,0x354a9ee6,2 -np.float32,0xc34c341a,0xb54a9ee6,2 -np.float32,0x424f585a,0x3f800000,2 -np.float32,0xc24f585a,0xbf800000,2 -np.float32,0x42cf585a,0xb688cd8c,2 -np.float32,0xc2cf585a,0x3688cd8c,2 -np.float32,0x434f585a,0x3708cd8c,2 -np.float32,0xc34f585a,0xb708cd8c,2 -np.float32,0x42527c99,0x3f3504ee,2 -np.float32,0xc2527c99,0xbf3504ee,2 -np.float32,0x42d27c99,0xbf800000,2 -np.float32,0xc2d27c99,0x3f800000,2 -np.float32,0x43527c99,0xb5f22833,2 -np.float32,0xc3527c99,0x35f22833,2 -np.float32,0x4255a0d9,0xb633bc81,2 -np.float32,0xc255a0d9,0x3633bc81,2 -np.float32,0x42d5a0d9,0x36b3bc81,2 -np.float32,0xc2d5a0d9,0xb6b3bc81,2 -np.float32,0x4355a0d9,0x3733bc81,2 -np.float32,0xc355a0d9,0xb733bc81,2 -np.float32,0x4258c518,0xbf350500,2 -np.float32,0xc258c518,0x3f350500,2 -np.float32,0x42d8c518,0x3f800000,2 -np.float32,0xc2d8c518,0xbf800000,2 -np.float32,0x4358c518,0xb69267f6,2 -np.float32,0xc358c518,0x369267f6,2 -np.float32,0x425be958,0xbf800000,2 -np.float32,0xc25be958,0x3f800000,2 -np.float32,0x42dbe958,0xb6deab75,2 -np.float32,0xc2dbe958,0x36deab75,2 -np.float32,0x435be958,0x375eab75,2 -np.float32,0xc35be958,0xb75eab75,2 -np.float32,0x425f0d97,0xbf3504df,2 -np.float32,0xc25f0d97,0x3f3504df,2 -np.float32,0x42df0d97,0xbf800000,2 -np.float32,0xc2df0d97,0x3f800000,2 -np.float32,0x435f0d97,0xb6e845e0,2 -np.float32,0xc35f0d97,0x36e845e0,2 -np.float32,0x426231d6,0x3419a6a2,2 -np.float32,0xc26231d6,0xb419a6a2,2 -np.float32,0x42e231d6,0x3499a6a2,2 -np.float32,0xc2e231d6,0xb499a6a2,2 -np.float32,0x436231d6,0x3519a6a2,2 -np.float32,0xc36231d6,0xb519a6a2,2 -np.float32,0x42655616,0x3f35050f,2 -np.float32,0xc2655616,0xbf35050f,2 -np.float32,0x42e55616,0x3f800000,2 -np.float32,0xc2e55616,0xbf800000,2 -np.float32,0x43655616,0xb71f11e5,2 -np.float32,0xc3655616,0x371f11e5,2 -np.float32,0x42687a55,0x3f800000,2 -np.float32,0xc2687a55,0xbf800000,2 -np.float32,0x42e87a55,0xb5d2257b,2 -np.float32,0xc2e87a55,0x35d2257b,2 -np.float32,0x43687a55,0x3652257b,2 -np.float32,0xc3687a55,0xb652257b,2 -np.float32,0x426b9e95,0x3f3504cf,2 -np.float32,0xc26b9e95,0xbf3504cf,2 -np.float32,0x42eb9e95,0xbf800000,2 -np.float32,0xc2eb9e95,0x3f800000,2 -np.float32,0x436b9e95,0xb74a00d9,2 -np.float32,0xc36b9e95,0x374a00d9,2 -np.float32,0x426ec2d4,0xb5bef0a7,2 -np.float32,0xc26ec2d4,0x35bef0a7,2 -np.float32,0x42eec2d4,0x363ef0a7,2 -np.float32,0xc2eec2d4,0xb63ef0a7,2 -np.float32,0x436ec2d4,0x36bef0a7,2 -np.float32,0xc36ec2d4,0xb6bef0a7,2 -np.float32,0x4271e713,0xbf3504f1,2 -np.float32,0xc271e713,0x3f3504f1,2 -np.float32,0x42f1e713,0x3f800000,2 -np.float32,0xc2f1e713,0xbf800000,2 -np.float32,0x4371e713,0x35310321,2 -np.float32,0xc371e713,0xb5310321,2 -np.float32,0x42750b53,0xbf800000,2 -np.float32,0xc2750b53,0x3f800000,2 -np.float32,0x42f50b53,0xb68a6748,2 -np.float32,0xc2f50b53,0x368a6748,2 -np.float32,0x43750b53,0x370a6748,2 -np.float32,0xc3750b53,0xb70a6748,2 -np.float32,0x42782f92,0xbf3504ee,2 -np.float32,0xc2782f92,0x3f3504ee,2 -np.float32,0x42f82f92,0xbf800000,2 -np.float32,0xc2f82f92,0x3f800000,2 -np.float32,0x43782f92,0xb5fef616,2 -np.float32,0xc3782f92,0x35fef616,2 -np.float32,0x427b53d2,0x3635563d,2 -np.float32,0xc27b53d2,0xb635563d,2 -np.float32,0x42fb53d2,0x36b5563d,2 -np.float32,0xc2fb53d2,0xb6b5563d,2 -np.float32,0x437b53d2,0x3735563d,2 -np.float32,0xc37b53d2,0xb735563d,2 -np.float32,0x427e7811,0x3f350500,2 -np.float32,0xc27e7811,0xbf350500,2 -np.float32,0x42fe7811,0x3f800000,2 -np.float32,0xc2fe7811,0xbf800000,2 -np.float32,0x437e7811,0xb6959b6f,2 -np.float32,0xc37e7811,0x36959b6f,2 -np.float32,0x4280ce28,0x3f800000,2 -np.float32,0xc280ce28,0xbf800000,2 -np.float32,0x4300ce28,0x357dd672,2 -np.float32,0xc300ce28,0xb57dd672,2 -np.float32,0x4380ce28,0xb5fdd672,2 -np.float32,0xc380ce28,0x35fdd672,2 -np.float32,0x42826048,0x3f3504de,2 -np.float32,0xc2826048,0xbf3504de,2 -np.float32,0x43026048,0xbf800000,2 -np.float32,0xc3026048,0x3f800000,2 -np.float32,0x43826048,0xb6eb7958,2 -np.float32,0xc3826048,0x36eb7958,2 -np.float32,0x4283f268,0xb6859a13,2 -np.float32,0xc283f268,0x36859a13,2 -np.float32,0x4303f268,0x37059a13,2 -np.float32,0xc303f268,0xb7059a13,2 -np.float32,0x4383f268,0x37859a13,2 -np.float32,0xc383f268,0xb7859a13,2 -np.float32,0x42858487,0xbf3504e2,2 -np.float32,0xc2858487,0x3f3504e2,2 -np.float32,0x43058487,0x3f800000,2 -np.float32,0xc3058487,0xbf800000,2 -np.float32,0x43858487,0x36bea8be,2 -np.float32,0xc3858487,0xb6bea8be,2 -np.float32,0x428716a7,0xbf800000,2 -np.float32,0xc28716a7,0x3f800000,2 -np.float32,0x430716a7,0xb5d88c6d,2 -np.float32,0xc30716a7,0x35d88c6d,2 -np.float32,0x438716a7,0x36588c6d,2 -np.float32,0xc38716a7,0xb6588c6d,2 -np.float32,0x4288a8c7,0xbf3504cf,2 -np.float32,0xc288a8c7,0x3f3504cf,2 -np.float32,0x4308a8c7,0xbf800000,2 -np.float32,0xc308a8c7,0x3f800000,2 -np.float32,0x4388a8c7,0xb74b9a96,2 -np.float32,0xc388a8c7,0x374b9a96,2 -np.float32,0x428a3ae7,0x36b08908,2 -np.float32,0xc28a3ae7,0xb6b08908,2 -np.float32,0x430a3ae7,0x37308908,2 -np.float32,0xc30a3ae7,0xb7308908,2 -np.float32,0x438a3ae7,0x37b08908,2 -np.float32,0xc38a3ae7,0xb7b08908,2 -np.float32,0x428bcd06,0x3f3504f2,2 -np.float32,0xc28bcd06,0xbf3504f2,2 -np.float32,0x430bcd06,0x3f800000,2 -np.float32,0xc30bcd06,0xbf800000,2 -np.float32,0x438bcd06,0x3517675b,2 -np.float32,0xc38bcd06,0xb517675b,2 -np.float32,0x428d5f26,0x3f800000,2 -np.float32,0xc28d5f26,0xbf800000,2 -np.float32,0x430d5f26,0xb68c0105,2 -np.float32,0xc30d5f26,0x368c0105,2 -np.float32,0x438d5f26,0x370c0105,2 -np.float32,0xc38d5f26,0xb70c0105,2 -np.float32,0x428ef146,0x3f3504c0,2 -np.float32,0xc28ef146,0xbf3504c0,2 -np.float32,0x430ef146,0xbf800000,2 -np.float32,0xc30ef146,0x3f800000,2 -np.float32,0x438ef146,0xb790bc40,2 -np.float32,0xc38ef146,0x3790bc40,2 -np.float32,0x42908365,0x3592200d,2 -np.float32,0xc2908365,0xb592200d,2 -np.float32,0x43108365,0xb612200d,2 -np.float32,0xc3108365,0x3612200d,2 -np.float32,0x43908365,0xb692200d,2 -np.float32,0xc3908365,0x3692200d,2 -np.float32,0x42921585,0xbf350501,2 -np.float32,0xc2921585,0x3f350501,2 -np.float32,0x43121585,0x3f800000,2 -np.float32,0xc3121585,0xbf800000,2 -np.float32,0x43921585,0xb698cee8,2 -np.float32,0xc3921585,0x3698cee8,2 -np.float32,0x4293a7a5,0xbf800000,2 -np.float32,0xc293a7a5,0x3f800000,2 -np.float32,0x4313a7a5,0xb6e1deee,2 -np.float32,0xc313a7a5,0x36e1deee,2 -np.float32,0x4393a7a5,0x3761deee,2 -np.float32,0xc393a7a5,0xb761deee,2 -np.float32,0x429539c5,0xbf3504b1,2 -np.float32,0xc29539c5,0x3f3504b1,2 -np.float32,0x431539c5,0xbf800000,2 -np.float32,0xc31539c5,0x3f800000,2 -np.float32,0x439539c5,0xb7bbab34,2 -np.float32,0xc39539c5,0x37bbab34,2 -np.float32,0x4296cbe4,0x344cde2e,2 -np.float32,0xc296cbe4,0xb44cde2e,2 -np.float32,0x4316cbe4,0x34ccde2e,2 -np.float32,0xc316cbe4,0xb4ccde2e,2 -np.float32,0x4396cbe4,0x354cde2e,2 -np.float32,0xc396cbe4,0xb54cde2e,2 -np.float32,0x42985e04,0x3f350510,2 -np.float32,0xc2985e04,0xbf350510,2 -np.float32,0x43185e04,0x3f800000,2 -np.float32,0xc3185e04,0xbf800000,2 -np.float32,0x43985e04,0xb722455d,2 -np.float32,0xc3985e04,0x3722455d,2 -np.float32,0x4299f024,0x3f800000,2 -np.float32,0xc299f024,0xbf800000,2 -np.float32,0x4319f024,0xb71bde6c,2 -np.float32,0xc319f024,0x371bde6c,2 -np.float32,0x4399f024,0x379bde6c,2 -np.float32,0xc399f024,0xb79bde6c,2 -np.float32,0x429b8243,0x3f3504fc,2 -np.float32,0xc29b8243,0xbf3504fc,2 -np.float32,0x431b8243,0xbf800000,2 -np.float32,0xc31b8243,0x3f800000,2 -np.float32,0x439b8243,0x364b2eb8,2 -np.float32,0xc39b8243,0xb64b2eb8,2 -np.float32,0x435b2047,0xbf350525,2 -np.float32,0x42a038a2,0xbf800000,2 -np.float32,0x432038a2,0x3664ca7e,2 -np.float32,0x4345eb9b,0x365e638c,2 -np.float32,0x42c5eb9b,0xbf800000,2 -np.float32,0x42eb9e94,0xbf800000,2 -np.float32,0x4350ea79,0x3f800000,2 -np.float32,0x42dbe957,0x3585522a,2 -np.float32,0x425be957,0xbf800000,2 -np.float32,0x435be957,0xb605522a,2 -np.float32,0x487fe5ab,0xbf7ffffd,2 -np.float32,0x497fe5ab,0xbb14017d,2 -np.float32,0x49ffe5ab,0xbb940164,2 -np.float32,0x49ffeb37,0x3f7fff56,2 -np.float32,0x497ff0c3,0x3f7fffd6,2 -np.float32,0x49fff0c3,0x3b930487,2 -np.float32,0x49fff64f,0xbf7fff58,2 -np.float32,0x497ffbdb,0x3b1207c0,2 -np.float32,0x49fffbdb,0xbb9207a9,2 -np.float32,0x48fffbdb,0xbf7ffff6,2 -np.float32,0x4e736e56,0x397fa7f2,2 -np.float32,0x4d4da377,0xb57c64bc,2 -np.float32,0x4ece58c3,0xb80846c8,2 -np.float32,0x4ee0db9c,0x394c4786,2 -np.float32,0x4dee7002,0x381bce96,2 -np.float32,0x4ee86afc,0x3f800000,2 -np.float32,0x4dca4f3f,0xb8e25111,2 -np.float32,0x4ecb48af,0xbf800000,2 -np.float32,0x4e51e33f,0xb8a4fa6f,2 -np.float32,0x4ef5f421,0x387ca7df,2 -np.float32,0x476362a2,0xbd7ff911,2 -np.float32,0x464c99a4,0x3e7f4d41,2 -np.float32,0x4471f73d,0x3e7fe1b0,2 -np.float32,0x445a6752,0x3e7ef367,2 -np.float32,0x474fa400,0x3e7f9fcd,2 -np.float32,0x47c9e70e,0xbb4bba09,2 -np.float32,0x45c1e72f,0xbe7fc7af,2 -np.float32,0x4558c91d,0x3e7e9f31,2 -np.float32,0x43784f94,0xbdff6654,2 -np.float32,0x466e8500,0xbe7ea0a3,2 -np.float32,0x468e1c25,0x3e7e22fb,2 -np.float32,0x47d28adc,0xbe7d5e6b,2 -np.float32,0x44ea6cfc,0x3dff70c3,2 -np.float32,0x4605126c,0x3e7f89ef,2 -np.float32,0x4788b3c6,0xbb87d853,2 -np.float32,0x4531b042,0x3dffd163,2 -np.float32,0x47e46c29,0xbe7def2b,2 -np.float32,0x47c10e07,0xbdff63d4,2 -np.float32,0x43f1f71d,0x3dfff387,2 -np.float32,0x47c3e38c,0x3e7f0b2f,2 -np.float32,0x462c3fa5,0xbd7fe13d,2 -np.float32,0x441c5354,0xbdff76b4,2 -np.float32,0x44908b69,0x3e7dcf0d,2 -np.float32,0x478813ad,0xbe7e9d80,2 -np.float32,0x441c4351,0x3dff937b,2 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test__exceptions.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test__exceptions.py deleted file mode 100644 index 494b51f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test__exceptions.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Tests of the ._exceptions module. Primarily for exercising the __str__ methods. -""" -import numpy as np - -_ArrayMemoryError = np.core._exceptions._ArrayMemoryError - -class TestArrayMemoryError: - def test_str(self): - e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) - str(e) # not crashing is enough - - # testing these properties is easier than testing the full string repr - def test__size_to_string(self): - """ Test e._size_to_string """ - f = _ArrayMemoryError._size_to_string - Ki = 1024 - assert f(0) == '0 bytes' - assert f(1) == '1 bytes' - assert f(1023) == '1023 bytes' - assert f(Ki) == '1.00 KiB' - assert f(Ki+1) == '1.00 KiB' - assert f(10*Ki) == '10.0 KiB' - assert f(int(999.4*Ki)) == '999. KiB' - assert f(int(1023.4*Ki)) == '1023. KiB' - assert f(int(1023.5*Ki)) == '1.00 MiB' - assert f(Ki*Ki) == '1.00 MiB' - - # 1023.9999 Mib should round to 1 GiB - assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' - assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' - # larger than sys.maxsize, adding larger prefices isn't going to help - # anyway. - assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' - - def test__total_size(self): - """ Test e._total_size """ - e = _ArrayMemoryError((1,), np.dtype(np.uint8)) - assert e._total_size == 1 - - e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) - assert e._total_size == 1024 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_abc.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_abc.py deleted file mode 100644 index d9c61b0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_abc.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_ - -import numbers - -import numpy as np -from numpy.core.numerictypes import sctypes - -class TestABC(object): - def test_abstract(self): - assert_(issubclass(np.number, numbers.Number)) - - assert_(issubclass(np.inexact, numbers.Complex)) - assert_(issubclass(np.complexfloating, numbers.Complex)) - assert_(issubclass(np.floating, numbers.Real)) - - assert_(issubclass(np.integer, numbers.Integral)) - assert_(issubclass(np.signedinteger, numbers.Integral)) - assert_(issubclass(np.unsignedinteger, numbers.Integral)) - - def test_floats(self): - for t in sctypes['float']: - assert_(isinstance(t(), numbers.Real), - "{0} is not instance of Real".format(t.__name__)) - assert_(issubclass(t, numbers.Real), - "{0} is not subclass of Real".format(t.__name__)) - assert_(not isinstance(t(), numbers.Rational), - "{0} is instance of Rational".format(t.__name__)) - assert_(not issubclass(t, numbers.Rational), - "{0} is subclass of Rational".format(t.__name__)) - - def test_complex(self): - for t in sctypes['complex']: - assert_(isinstance(t(), numbers.Complex), - "{0} is not instance of Complex".format(t.__name__)) - assert_(issubclass(t, numbers.Complex), - "{0} is not subclass of Complex".format(t.__name__)) - assert_(not isinstance(t(), numbers.Real), - "{0} is instance of Real".format(t.__name__)) - assert_(not issubclass(t, numbers.Real), - "{0} is subclass of Real".format(t.__name__)) - - def test_int(self): - for t in sctypes['int']: - assert_(isinstance(t(), numbers.Integral), - "{0} is not instance of Integral".format(t.__name__)) - assert_(issubclass(t, numbers.Integral), - "{0} is not subclass of Integral".format(t.__name__)) - - def test_uint(self): - for t in sctypes['uint']: - assert_(isinstance(t(), numbers.Integral), - "{0} is not instance of Integral".format(t.__name__)) - assert_(issubclass(t, numbers.Integral), - "{0} is not subclass of Integral".format(t.__name__)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_api.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_api.py deleted file mode 100644 index 89fc2b0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_api.py +++ /dev/null @@ -1,526 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -import pytest -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns, - HAS_REFCOUNT - ) - -# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set. -NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous - - -def test_array_array(): - tobj = type(object) - ones11 = np.ones((1, 1), np.float64) - tndarray = type(ones11) - # Test is_ndarray - assert_equal(np.array(ones11, dtype=np.float64), ones11) - if HAS_REFCOUNT: - old_refcount = sys.getrefcount(tndarray) - np.array(ones11) - assert_equal(old_refcount, sys.getrefcount(tndarray)) - - # test None - assert_equal(np.array(None, dtype=np.float64), - np.array(np.nan, dtype=np.float64)) - if HAS_REFCOUNT: - old_refcount = sys.getrefcount(tobj) - np.array(None, dtype=np.float64) - assert_equal(old_refcount, sys.getrefcount(tobj)) - - # test scalar - assert_equal(np.array(1.0, dtype=np.float64), - np.ones((), dtype=np.float64)) - if HAS_REFCOUNT: - old_refcount = sys.getrefcount(np.float64) - np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) - assert_equal(old_refcount, sys.getrefcount(np.float64)) - - # test string - S2 = np.dtype((str, 2)) - S3 = np.dtype((str, 3)) - S5 = np.dtype((str, 5)) - assert_equal(np.array("1.0", dtype=np.float64), - np.ones((), dtype=np.float64)) - assert_equal(np.array("1.0").dtype, S3) - assert_equal(np.array("1.0", dtype=str).dtype, S3) - assert_equal(np.array("1.0", dtype=S2), np.array("1.")) - assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5)) - - # test unicode - _unicode = globals().get("unicode") - if _unicode: - U2 = np.dtype((_unicode, 2)) - U3 = np.dtype((_unicode, 3)) - U5 = np.dtype((_unicode, 5)) - assert_equal(np.array(_unicode("1.0"), dtype=np.float64), - np.ones((), dtype=np.float64)) - assert_equal(np.array(_unicode("1.0")).dtype, U3) - assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3) - assert_equal(np.array(_unicode("1.0"), dtype=U2), - np.array(_unicode("1."))) - assert_equal(np.array(_unicode("1"), dtype=U5), - np.ones((), dtype=U5)) - - builtins = getattr(__builtins__, '__dict__', __builtins__) - assert_(hasattr(builtins, 'get')) - - # test buffer - _buffer = builtins.get("buffer") - if _buffer and sys.version_info[:3] >= (2, 7, 5): - # This test fails for earlier versions of Python. - # Evidently a bug got fixed in 2.7.5. - dat = np.array(_buffer('1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_buffer(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) - - # test memoryview, new version of buffer - _memoryview = builtins.get("memoryview") - if _memoryview: - dat = np.array(_memoryview(b'1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_memoryview(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) - - # test array interface - a = np.array(100.0, dtype=np.float64) - o = type("o", (object,), - dict(__array_interface__=a.__array_interface__)) - assert_equal(np.array(o, dtype=np.float64), a) - - # test array_struct interface - a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], - dtype=[('f0', int), ('f1', float), ('f2', str)]) - o = type("o", (object,), - dict(__array_struct__=a.__array_struct__)) - ## wasn't what I expected... is np.array(o) supposed to equal a ? - ## instead we get a array([...], dtype=">V18") - assert_equal(bytes(np.array(o).data), bytes(a.data)) - - # test array - o = type("o", (object,), - dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))() - assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) - - # test recursion - nested = 1.5 - for i in range(np.MAXDIMS): - nested = [nested] - - # no error - np.array(nested) - - # Exceeds recursion limit - assert_raises(ValueError, np.array, [nested], dtype=np.float64) - - # Try with lists... - assert_equal(np.array([None] * 10, dtype=np.float64), - np.full((10,), np.nan, dtype=np.float64)) - assert_equal(np.array([[None]] * 10, dtype=np.float64), - np.full((10, 1), np.nan, dtype=np.float64)) - assert_equal(np.array([[None] * 10], dtype=np.float64), - np.full((1, 10), np.nan, dtype=np.float64)) - assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), - np.full((10, 10), np.nan, dtype=np.float64)) - - assert_equal(np.array([1.0] * 10, dtype=np.float64), - np.ones((10,), dtype=np.float64)) - assert_equal(np.array([[1.0]] * 10, dtype=np.float64), - np.ones((10, 1), dtype=np.float64)) - assert_equal(np.array([[1.0] * 10], dtype=np.float64), - np.ones((1, 10), dtype=np.float64)) - assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), - np.ones((10, 10), dtype=np.float64)) - - # Try with tuples - assert_equal(np.array((None,) * 10, dtype=np.float64), - np.full((10,), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,)] * 10, dtype=np.float64), - np.full((10, 1), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,) * 10], dtype=np.float64), - np.full((1, 10), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), - np.full((10, 10), np.nan, dtype=np.float64)) - - assert_equal(np.array((1.0,) * 10, dtype=np.float64), - np.ones((10,), dtype=np.float64)) - assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), - np.ones((10, 1), dtype=np.float64)) - assert_equal(np.array([(1.0,) * 10], dtype=np.float64), - np.ones((1, 10), dtype=np.float64)) - assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), - np.ones((10, 10), dtype=np.float64)) - - -def test_fastCopyAndTranspose(): - # 0D array - a = np.array(2) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 1D array - a = np.array([3, 2, 7, 0]) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 2D array - a = np.arange(6).reshape(2, 3) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - -def test_array_astype(): - a = np.arange(6, dtype='f4').reshape(2, 3) - # Default behavior: allows unsafe casts, keeps memory layout, - # always copies. - b = a.astype('i4') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('i4')) - assert_equal(a.strides, b.strides) - b = a.T.astype('i4') - assert_equal(a.T, b) - assert_equal(b.dtype, np.dtype('i4')) - assert_equal(a.T.strides, b.strides) - b = a.astype('f4') - assert_equal(a, b) - assert_(not (a is b)) - - # copy=False parameter can sometimes skip a copy - b = a.astype('f4', copy=False) - assert_(a is b) - - # order parameter allows overriding of the memory layout, - # forcing a copy if the layout is wrong - b = a.astype('f4', order='F', copy=False) - assert_equal(a, b) - assert_(not (a is b)) - assert_(b.flags.f_contiguous) - - b = a.astype('f4', order='C', copy=False) - assert_equal(a, b) - assert_(a is b) - assert_(b.flags.c_contiguous) - - # casting parameter allows catching bad casts - b = a.astype('c8', casting='safe') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('c8')) - - assert_raises(TypeError, a.astype, 'i4', casting='safe') - - # subok=False passes through a non-subclassed array - b = a.astype('f4', subok=0, copy=False) - assert_(a is b) - - class MyNDArray(np.ndarray): - pass - - a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) - - # subok=True passes through a subclass - b = a.astype('f4', subok=True, copy=False) - assert_(a is b) - - # subok=True is default, and creates a subtype on a cast - b = a.astype('i4', copy=False) - assert_equal(a, b) - assert_equal(type(b), MyNDArray) - - # subok=False never returns a subclass - b = a.astype('f4', subok=False, copy=False) - assert_equal(a, b) - assert_(not (a is b)) - assert_(type(b) is not MyNDArray) - - # Make sure converting from string object to fixed length string - # does not truncate. - a = np.array([b'a'*100], dtype='O') - b = a.astype('S') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('S100')) - a = np.array([u'a'*100], dtype='O') - b = a.astype('U') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('U100')) - - # Same test as above but for strings shorter than 64 characters - a = np.array([b'a'*10], dtype='O') - b = a.astype('S') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('S10')) - a = np.array([u'a'*10], dtype='O') - b = a.astype('U') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('U10')) - - a = np.array(123456789012345678901234567890, dtype='O').astype('S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array(123456789012345678901234567890, dtype='O').astype('U') - assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) - - a = np.array([123456789012345678901234567890], dtype='O').astype('S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array([123456789012345678901234567890], dtype='O').astype('U') - assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) - - a = np.array(123456789012345678901234567890, dtype='S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array(123456789012345678901234567890, dtype='U') - assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) - - a = np.array(u'a\u0140', dtype='U') - b = np.ndarray(buffer=a, dtype='uint32', shape=2) - assert_(b.size == 2) - - a = np.array([1000], dtype='i4') - assert_raises(TypeError, a.astype, 'S1', casting='safe') - - a = np.array(1000, dtype='i4') - assert_raises(TypeError, a.astype, 'U1', casting='safe') - -@pytest.mark.parametrize("t", - np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float'] -) -def test_array_astype_warning(t): - # test ComplexWarning when casting from complex to float or int - a = np.array(10, dtype=np.complex_) - assert_warns(np.ComplexWarning, a.astype, t) - -def test_copyto_fromscalar(): - a = np.arange(6, dtype='f4').reshape(2, 3) - - # Simple copy - np.copyto(a, 1.5) - assert_equal(a, 1.5) - np.copyto(a.T, 2.5) - assert_equal(a, 2.5) - - # Where-masked copy - mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') - np.copyto(a, 3.5, where=mask) - assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) - mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') - np.copyto(a.T, 4.5, where=mask) - assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) - -def test_copyto(): - a = np.arange(6, dtype='i4').reshape(2, 3) - - # Simple copy - np.copyto(a, [[3, 1, 5], [6, 2, 1]]) - assert_equal(a, [[3, 1, 5], [6, 2, 1]]) - - # Overlapping copy should work - np.copyto(a[:, :2], a[::-1, 1::-1]) - assert_equal(a, [[2, 6, 5], [1, 3, 1]]) - - # Defaults to 'same_kind' casting - assert_raises(TypeError, np.copyto, a, 1.5) - - # Force a copy with 'unsafe' casting, truncating 1.5 to 1 - np.copyto(a, 1.5, casting='unsafe') - assert_equal(a, 1) - - # Copying with a mask - np.copyto(a, 3, where=[True, False, True]) - assert_equal(a, [[3, 1, 3], [3, 1, 3]]) - - # Casting rule still applies with a mask - assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) - - # Lists of integer 0's and 1's is ok too - np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) - assert_equal(a, [[3, 4, 4], [4, 1, 3]]) - - # Overlapping copy with mask should work - np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) - assert_equal(a, [[3, 4, 4], [4, 3, 3]]) - - # 'dst' must be an array - assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) - -def test_copyto_permut(): - # test explicit overflow case - pad = 500 - l = [True] * pad + [True, True, True, True] - r = np.zeros(len(l)-pad) - d = np.ones(len(l)-pad) - mask = np.array(l)[pad:] - np.copyto(r, d, where=mask[::-1]) - - # test all permutation of possible masks, 9 should be sufficient for - # current 4 byte unrolled code - power = 9 - d = np.ones(power) - for i in range(2**power): - r = np.zeros(power) - l = [(i & x) != 0 for x in range(power)] - mask = np.array(l) - np.copyto(r, d, where=mask) - assert_array_equal(r == 1, l) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r, d, where=mask[::-1]) - assert_array_equal(r == 1, l[::-1]) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r[::2], d[::2], where=mask[::2]) - assert_array_equal(r[::2] == 1, l[::2]) - assert_equal(r[::2].sum(), sum(l[::2])) - - r = np.zeros(power) - np.copyto(r[::2], d[::2], where=mask[::-2]) - assert_array_equal(r[::2] == 1, l[::-2]) - assert_equal(r[::2].sum(), sum(l[::-2])) - - for c in [0xFF, 0x7F, 0x02, 0x10]: - r = np.zeros(power) - mask = np.array(l) - imask = np.array(l).view(np.uint8) - imask[mask != 0] = c - np.copyto(r, d, where=mask) - assert_array_equal(r == 1, l) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r, d, where=True) - assert_equal(r.sum(), r.size) - r = np.ones(power) - d = np.zeros(power) - np.copyto(r, d, where=False) - assert_equal(r.sum(), r.size) - -def test_copy_order(): - a = np.arange(24).reshape(2, 1, 3, 4) - b = a.copy(order='F') - c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) - - def check_copy_result(x, y, ccontig, fcontig, strides=False): - assert_(not (x is y)) - assert_equal(x, y) - assert_equal(res.flags.c_contiguous, ccontig) - assert_equal(res.flags.f_contiguous, fcontig) - # This check is impossible only because - # NPY_RELAXED_STRIDES_CHECKING changes the strides actively - if not NPY_RELAXED_STRIDES_CHECKING: - if strides: - assert_equal(x.strides, y.strides) - else: - assert_(x.strides != y.strides) - - # Validate the initial state of a, b, and c - assert_(a.flags.c_contiguous) - assert_(not a.flags.f_contiguous) - assert_(not b.flags.c_contiguous) - assert_(b.flags.f_contiguous) - assert_(not c.flags.c_contiguous) - assert_(not c.flags.f_contiguous) - - # Copy with order='C' - res = a.copy(order='C') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = b.copy(order='C') - check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) - res = c.copy(order='C') - check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) - res = np.copy(a, order='C') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = np.copy(b, order='C') - check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) - res = np.copy(c, order='C') - check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) - - # Copy with order='F' - res = a.copy(order='F') - check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) - res = b.copy(order='F') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = c.copy(order='F') - check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) - res = np.copy(a, order='F') - check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) - res = np.copy(b, order='F') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = np.copy(c, order='F') - check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) - - # Copy with order='K' - res = a.copy(order='K') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = b.copy(order='K') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = c.copy(order='K') - check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) - res = np.copy(a, order='K') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = np.copy(b, order='K') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = np.copy(c, order='K') - check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) - -def test_contiguous_flags(): - a = np.ones((4, 4, 1))[::2,:,:] - if NPY_RELAXED_STRIDES_CHECKING: - a.strides = a.strides[:2] + (-123,) - b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) - - def check_contig(a, ccontig, fcontig): - assert_(a.flags.c_contiguous == ccontig) - assert_(a.flags.f_contiguous == fcontig) - - # Check if new arrays are correct: - check_contig(a, False, False) - check_contig(b, False, False) - if NPY_RELAXED_STRIDES_CHECKING: - check_contig(np.empty((2, 2, 0, 2, 2)), True, True) - check_contig(np.array([[[1], [2]]], order='F'), True, True) - else: - check_contig(np.empty((2, 2, 0, 2, 2)), True, False) - check_contig(np.array([[[1], [2]]], order='F'), False, True) - check_contig(np.empty((2, 2)), True, False) - check_contig(np.empty((2, 2), order='F'), False, True) - - # Check that np.array creates correct contiguous flags: - check_contig(np.array(a, copy=False), False, False) - check_contig(np.array(a, copy=False, order='C'), True, False) - check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) - - if NPY_RELAXED_STRIDES_CHECKING: - # Check slicing update of flags and : - check_contig(a[0], True, True) - check_contig(a[None, ::4, ..., None], True, True) - check_contig(b[0, 0, ...], False, True) - check_contig(b[:,:, 0:0,:,:], True, True) - else: - # Check slicing update of flags: - check_contig(a[0], True, False) - # Would be nice if this was C-Contiguous: - check_contig(a[None, 0, ..., None], False, False) - check_contig(b[0, 0, 0, ...], False, True) - - # Test ravel and squeeze. - check_contig(a.ravel(), True, True) - check_contig(np.ones((1, 3, 1)).squeeze(), True, True) - -def test_broadcast_arrays(): - # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') - result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_arrayprint.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_arrayprint.py deleted file mode 100644 index 702e68e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_arrayprint.py +++ /dev/null @@ -1,888 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import division, absolute_import, print_function - -import sys -import gc -import pytest - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, - assert_raises_regex, - ) -import textwrap - -class TestArrayRepr(object): - def test_nan_inf(self): - x = np.array([np.nan, np.inf]) - assert_equal(repr(x), 'array([nan, inf])') - - def test_subclass(self): - class sub(np.ndarray): pass - - # one dimensional - x1d = np.array([1, 2]).view(sub) - assert_equal(repr(x1d), 'sub([1, 2])') - - # two dimensional - x2d = np.array([[1, 2], [3, 4]]).view(sub) - assert_equal(repr(x2d), - 'sub([[1, 2],\n' - ' [3, 4]])') - - # two dimensional with flexible dtype - xstruct = np.ones((2,2), dtype=[('a', ' 1) - y = sub(None) - x[()] = y - y[()] = x - assert_equal(repr(x), - 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') - assert_equal(str(x), '...') - x[()] = 0 # resolve circular references for garbage collector - - # nested 0d-subclass-object - x = sub(None) - x[()] = sub(None) - assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') - assert_equal(str(x), 'None') - - # gh-10663 - class DuckCounter(np.ndarray): - def __getitem__(self, item): - result = super(DuckCounter, self).__getitem__(item) - if not isinstance(result, DuckCounter): - result = result[...].view(DuckCounter) - return result - - def to_string(self): - return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') - - def __str__(self): - if self.shape == (): - return self.to_string() - else: - fmt = {'all': lambda x: x.to_string()} - return np.array2string(self, formatter=fmt) - - dc = np.arange(5).view(DuckCounter) - assert_equal(str(dc), "[zero one two many many]") - assert_equal(str(dc[0]), "zero") - - def test_self_containing(self): - arr0d = np.array(None) - arr0d[()] = arr0d - assert_equal(repr(arr0d), - 'array(array(..., dtype=object), dtype=object)') - arr0d[()] = 0 # resolve recursion for garbage collector - - arr1d = np.array([None, None]) - arr1d[1] = arr1d - assert_equal(repr(arr1d), - 'array([None, array(..., dtype=object)], dtype=object)') - arr1d[1] = 0 # resolve recursion for garbage collector - - first = np.array(None) - second = np.array(None) - first[()] = second - second[()] = first - assert_equal(repr(first), - 'array(array(array(..., dtype=object), dtype=object), dtype=object)') - first[()] = 0 # resolve circular references for garbage collector - - def test_containing_list(self): - # printing square brackets directly would be ambiguuous - arr1d = np.array([None, None]) - arr1d[0] = [1, 2] - arr1d[1] = [3] - assert_equal(repr(arr1d), - 'array([list([1, 2]), list([3])], dtype=object)') - - def test_void_scalar_recursion(self): - # gh-9345 - repr(np.void(b'test')) # RecursionError ? - - def test_fieldless_structured(self): - # gh-10366 - no_fields = np.dtype([]) - arr_no_fields = np.empty(4, dtype=no_fields) - assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') - - -class TestComplexArray(object): - def test_str(self): - rvals = [0, 1, -1, np.inf, -np.inf, np.nan] - cvals = [complex(rp, ip) for rp in rvals for ip in rvals] - dtypes = [np.complex64, np.cdouble, np.clongdouble] - actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] - wanted = [ - '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', - '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', - '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', - '[0.+infj]', '[0.+infj]', '[0.+infj]', - '[0.-infj]', '[0.-infj]', '[0.-infj]', - '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', - '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', - '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', - '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', - '[1.+infj]', '[1.+infj]', '[1.+infj]', - '[1.-infj]', '[1.-infj]', '[1.-infj]', - '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', - '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', - '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', - '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', - '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', - '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', - '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', - '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', - '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', - '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', - '[inf+infj]', '[inf+infj]', '[inf+infj]', - '[inf-infj]', '[inf-infj]', '[inf-infj]', - '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', - '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', - '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', - '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', - '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', - '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', - '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', - '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', - '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', - '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', - '[nan+infj]', '[nan+infj]', '[nan+infj]', - '[nan-infj]', '[nan-infj]', '[nan-infj]', - '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] - - for res, val in zip(actual, wanted): - assert_equal(res, val) - -class TestArray2String(object): - def test_basic(self): - """Basic test of array2string.""" - a = np.arange(3) - assert_(np.array2string(a) == '[0 1 2]') - assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') - assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') - - def test_unexpected_kwarg(self): - # ensure than an appropriate TypeError - # is raised when array2string receives - # an unexpected kwarg - - with assert_raises_regex(TypeError, 'nonsense'): - np.array2string(np.array([1, 2, 3]), - nonsense=None) - - def test_format_function(self): - """Test custom format function for each element in array.""" - def _format_function(x): - if np.abs(x) < 1: - return '.' - elif np.abs(x) < 2: - return 'o' - else: - return 'O' - - x = np.arange(3) - if sys.version_info[0] >= 3: - x_hex = "[0x0 0x1 0x2]" - x_oct = "[0o0 0o1 0o2]" - else: - x_hex = "[0x0L 0x1L 0x2L]" - x_oct = "[0L 01L 02L]" - assert_(np.array2string(x, formatter={'all':_format_function}) == - "[. o O]") - assert_(np.array2string(x, formatter={'int_kind':_format_function}) == - "[. o O]") - assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == - "[0.0000 1.0000 2.0000]") - assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), - x_hex) - assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), - x_oct) - - x = np.arange(3.) - assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == - "[0.00 1.00 2.00]") - assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == - "[0.00 1.00 2.00]") - - s = np.array(['abc', 'def']) - assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == - '[abcabc defdef]') - - - def test_structure_format(self): - dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - assert_equal(np.array2string(x), - "[('Sarah', [8., 7.]) ('John', [6., 7.])]") - - np.set_printoptions(legacy='1.13') - try: - # for issue #5692 - A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) - A[5:].fill(np.datetime64('NaT')) - assert_equal( - np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) - ('NaT',) ('NaT',) ('NaT',)]""") - ) - finally: - np.set_printoptions(legacy=False) - - # same again, but with non-legacy behavior - assert_equal( - np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ( 'NaT',) - ( 'NaT',) ( 'NaT',) - ( 'NaT',) ( 'NaT',)]""") - ) - - # and again, with timedeltas - A = np.full(10, 123456, dtype=[("A", "m8[s]")]) - A[5:].fill(np.datetime64('NaT')) - assert_equal( - np.array2string(A), - textwrap.dedent("""\ - [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) - ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") - ) - - # See #8160 - struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) - assert_equal(np.array2string(struct_int), - "[([ 1, -1],) ([123, 1],)]") - struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], - dtype=[('B', 'i4', (2, 2))]) - assert_equal(np.array2string(struct_2dint), - "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") - - # See #8172 - array_scalar = np.array( - (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) - assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") - - def test_unstructured_void_repr(self): - a = np.array([27, 91, 50, 75, 7, 65, 10, 8, - 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8') - assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") - assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") - assert_equal(repr(a), - r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" - r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") - - assert_equal(eval(repr(a), vars(np)), a) - assert_equal(eval(repr(a[0]), vars(np)), a[0]) - - def test_edgeitems_kwarg(self): - # previously the global print options would be taken over the kwarg - arr = np.zeros(3, int) - assert_equal( - np.array2string(arr, edgeitems=1, threshold=0), - "[0 ... 0]" - ) - - def test_summarize_1d(self): - A = np.arange(1001) - strA = '[ 0 1 2 ... 998 999 1000]' - assert_equal(str(A), strA) - - reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' - assert_equal(repr(A), reprA) - - def test_summarize_2d(self): - A = np.arange(1002).reshape(2, 501) - strA = '[[ 0 1 2 ... 498 499 500]\n' \ - ' [ 501 502 503 ... 999 1000 1001]]' - assert_equal(str(A), strA) - - reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ - ' [ 501, 502, 503, ..., 999, 1000, 1001]])' - assert_equal(repr(A), reprA) - - def test_linewidth(self): - a = np.full(6, 1) - - def make_str(a, width, **kw): - return np.array2string(a, separator="", max_line_width=width, **kw) - - assert_equal(make_str(a, 8, legacy='1.13'), '[111111]') - assert_equal(make_str(a, 7, legacy='1.13'), '[111111]') - assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n' - ' 11]') - - assert_equal(make_str(a, 8), '[111111]') - assert_equal(make_str(a, 7), '[11111\n' - ' 1]') - assert_equal(make_str(a, 5), '[111\n' - ' 111]') - - b = a[None,None,:] - - assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]') - assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]') - assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n' - ' 1]]]') - - assert_equal(make_str(b, 12), '[[[111111]]]') - assert_equal(make_str(b, 9), '[[[111\n' - ' 111]]]') - assert_equal(make_str(b, 8), '[[[11\n' - ' 11\n' - ' 11]]]') - - def test_wide_element(self): - a = np.array(['xxxxx']) - assert_equal( - np.array2string(a, max_line_width=5), - "['xxxxx']" - ) - assert_equal( - np.array2string(a, max_line_width=5, legacy='1.13'), - "[ 'xxxxx']" - ) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_refcount(self): - # make sure we do not hold references to the array due to a recursive - # closure (gh-10620) - gc.disable() - a = np.arange(2) - r1 = sys.getrefcount(a) - np.array2string(a) - np.array2string(a) - r2 = sys.getrefcount(a) - gc.collect() - gc.enable() - assert_(r1 == r2) - -class TestPrintOptions(object): - """Test getting and setting global print options.""" - - def setup(self): - self.oldopts = np.get_printoptions() - - def teardown(self): - np.set_printoptions(**self.oldopts) - - def test_basic(self): - x = np.array([1.5, 0, 1.234567890]) - assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") - np.set_printoptions(precision=4) - assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") - - def test_precision_zero(self): - np.set_printoptions(precision=0) - for values, string in ( - ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."), - ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."), - ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."), - ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")): - x = np.array(values) - assert_equal(repr(x), "array([%s])" % string) - - def test_formatter(self): - x = np.arange(3) - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - - def test_formatter_reset(self): - x = np.arange(3) - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'int':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'all':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - np.set_printoptions(formatter={'int':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'int_kind':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - x = np.arange(3.) - np.set_printoptions(formatter={'float':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") - np.set_printoptions(formatter={'float_kind':None}) - assert_equal(repr(x), "array([0., 1., 2.])") - - def test_0d_arrays(self): - unicode = type(u'') - - assert_equal(unicode(np.array(u'café', '= 3: - assert_equal(repr(np.array('café', '= 3 else '|S4' - assert_equal(repr(np.ones(3, dtype=styp)), - "array(['1', '1', '1'], dtype='{}')".format(styp)) - assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\ - array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'], - dtype='{}')""".format(styp))) - - def test_linewidth_repr(self): - a = np.full(7, fill_value=2) - np.set_printoptions(linewidth=17) - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, - 2, 2, 2, - 2])""") - ) - np.set_printoptions(linewidth=17, legacy='1.13') - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, - 2, 2, 2, 2])""") - ) - - a = np.full(8, fill_value=2) - - np.set_printoptions(linewidth=18, legacy=False) - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, - 2, 2, 2, - 2, 2])""") - ) - - np.set_printoptions(linewidth=18, legacy='1.13') - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, 2, - 2, 2, 2, 2])""") - ) - - def test_linewidth_str(self): - a = np.full(18, fill_value=2) - np.set_printoptions(linewidth=18) - assert_equal( - str(a), - textwrap.dedent("""\ - [2 2 2 2 2 2 2 2 - 2 2 2 2 2 2 2 2 - 2 2]""") - ) - np.set_printoptions(linewidth=18, legacy='1.13') - assert_equal( - str(a), - textwrap.dedent("""\ - [2 2 2 2 2 2 2 2 2 - 2 2 2 2 2 2 2 2 2]""") - ) - - def test_edgeitems(self): - np.set_printoptions(edgeitems=1, threshold=1) - a = np.arange(27).reshape((3, 3, 3)) - assert_equal( - repr(a), - textwrap.dedent("""\ - array([[[ 0, ..., 2], - ..., - [ 6, ..., 8]], - - ..., - - [[18, ..., 20], - ..., - [24, ..., 26]]])""") - ) - - b = np.zeros((3, 3, 1, 1)) - assert_equal( - repr(b), - textwrap.dedent("""\ - array([[[[0.]], - - ..., - - [[0.]]], - - - ..., - - - [[[0.]], - - ..., - - [[0.]]]])""") - ) - - # 1.13 had extra trailing spaces, and was missing newlines - np.set_printoptions(legacy='1.13') - - assert_equal( - repr(a), - textwrap.dedent("""\ - array([[[ 0, ..., 2], - ..., - [ 6, ..., 8]], - - ..., - [[18, ..., 20], - ..., - [24, ..., 26]]])""") - ) - - assert_equal( - repr(b), - textwrap.dedent("""\ - array([[[[ 0.]], - - ..., - [[ 0.]]], - - - ..., - [[[ 0.]], - - ..., - [[ 0.]]]])""") - ) - - def test_bad_args(self): - assert_raises(ValueError, np.set_printoptions, threshold=float('nan')) - assert_raises(TypeError, np.set_printoptions, threshold='1') - assert_raises(TypeError, np.set_printoptions, threshold=b'1') - -def test_unicode_object_array(): - import sys - if sys.version_info[0] >= 3: - expected = "array(['é'], dtype=object)" - else: - expected = "array([u'\\xe9'], dtype=object)" - x = np.array([u'\xe9'], dtype=object) - assert_equal(repr(x), expected) - - -class TestContextManager(object): - def test_ctx_mgr(self): - # test that context manager actuall works - with np.printoptions(precision=2): - s = str(np.array([2.0]) / 3) - assert_equal(s, '[0.67]') - - def test_ctx_mgr_restores(self): - # test that print options are actually restrored - opts = np.get_printoptions() - with np.printoptions(precision=opts['precision'] - 1, - linewidth=opts['linewidth'] - 4): - pass - assert_equal(np.get_printoptions(), opts) - - def test_ctx_mgr_exceptions(self): - # test that print options are restored even if an exception is raised - opts = np.get_printoptions() - try: - with np.printoptions(precision=2, linewidth=11): - raise ValueError - except ValueError: - pass - assert_equal(np.get_printoptions(), opts) - - def test_ctx_mgr_as_smth(self): - opts = {"precision": 2} - with np.printoptions(**opts) as ctx: - saved_opts = ctx.copy() - assert_equal({k: saved_opts[k] for k in opts}, opts) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_datetime.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_datetime.py deleted file mode 100644 index d38444e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_datetime.py +++ /dev/null @@ -1,2375 +0,0 @@ -from __future__ import division, absolute_import, print_function - - -import numpy -import numpy as np -import datetime -import pytest -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, - assert_raises_regex, - ) -from numpy.compat import pickle - -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False - -try: - RecursionError -except NameError: - RecursionError = RuntimeError # python < 3.5 - - -class TestDateTime(object): - def test_datetime_dtype_creation(self): - for unit in ['Y', 'M', 'W', 'D', - 'h', 'm', 's', 'ms', 'us', - 'ns', 'ps', 'fs', 'as']: - dt1 = np.dtype('M8[750%s]' % unit) - assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) - dt2 = np.dtype('m8[%s]' % unit) - assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) - - # Generic units shouldn't add [] to the end - assert_equal(str(np.dtype("M8")), "datetime64") - - # Should be possible to specify the endianness - assert_equal(np.dtype("=M8"), np.dtype("M8")) - assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) - assert_(np.dtype(">M8") == np.dtype("M8") or - np.dtype("M8[D]") == np.dtype("M8[D]") or - np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or - np.dtype("m8[D]") == np.dtype("m8[D]") or - np.dtype("m8") != np.dtype(" Scalars - assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) - assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) - assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) - assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) - assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) - - # Arrays -> Scalars - assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) - assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) - assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) - assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) - assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) - - # NaN -> NaT - nan = np.array([np.nan] * 8) - fnan = nan.astype('f') - lnan = nan.astype('g') - cnan = nan.astype('D') - cfnan = nan.astype('F') - clnan = nan.astype('G') - - nat = np.array([np.datetime64('NaT')] * 8) - assert_equal(nan.astype('M8[ns]'), nat) - assert_equal(fnan.astype('M8[ns]'), nat) - assert_equal(lnan.astype('M8[ns]'), nat) - assert_equal(cnan.astype('M8[ns]'), nat) - assert_equal(cfnan.astype('M8[ns]'), nat) - assert_equal(clnan.astype('M8[ns]'), nat) - - nat = np.array([np.timedelta64('NaT')] * 8) - assert_equal(nan.astype('timedelta64[ns]'), nat) - assert_equal(fnan.astype('timedelta64[ns]'), nat) - assert_equal(lnan.astype('timedelta64[ns]'), nat) - assert_equal(cnan.astype('timedelta64[ns]'), nat) - assert_equal(cfnan.astype('timedelta64[ns]'), nat) - assert_equal(clnan.astype('timedelta64[ns]'), nat) - - def test_days_creation(self): - assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 - 365) - assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3) - assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 + 366) - assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4) - assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4 + 365) - assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) - assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) - assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) - assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) - assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) - assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) - assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) - assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) - assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4) - assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366) - assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3) - assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) - - assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) - assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) - assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) - assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) - assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) - - def test_days_to_pydate(self): - assert_equal(np.array('1599', dtype='M8[D]').astype('O'), - datetime.date(1599, 1, 1)) - assert_equal(np.array('1600', dtype='M8[D]').astype('O'), - datetime.date(1600, 1, 1)) - assert_equal(np.array('1601', dtype='M8[D]').astype('O'), - datetime.date(1601, 1, 1)) - assert_equal(np.array('1900', dtype='M8[D]').astype('O'), - datetime.date(1900, 1, 1)) - assert_equal(np.array('1901', dtype='M8[D]').astype('O'), - datetime.date(1901, 1, 1)) - assert_equal(np.array('2000', dtype='M8[D]').astype('O'), - datetime.date(2000, 1, 1)) - assert_equal(np.array('2001', dtype='M8[D]').astype('O'), - datetime.date(2001, 1, 1)) - assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), - datetime.date(1600, 2, 29)) - assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), - datetime.date(1600, 3, 1)) - assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), - datetime.date(2001, 3, 22)) - - def test_dtype_comparison(self): - assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) - assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) - assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) - assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) - - def test_pydatetime_creation(self): - a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') - assert_equal(a[0], a[1]) - a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') - assert_equal(a[0], a[1]) - a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') - assert_equal(a[0], a[1]) - # Will fail if the date changes during the exact right moment - a = np.array(['today', datetime.date.today()], dtype='M8[D]') - assert_equal(a[0], a[1]) - # datetime.datetime.now() returns local time, not UTC - #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') - #assert_equal(a[0], a[1]) - - # we can give a datetime.date time units - assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), - np.array(np.datetime64('1960-03-12T00:00:00'))) - - def test_datetime_string_conversion(self): - a = ['2011-03-16', '1920-01-01', '2013-05-19'] - str_a = np.array(a, dtype='S') - uni_a = np.array(a, dtype='U') - dt_a = np.array(a, dtype='M') - - # String to datetime - assert_equal(dt_a, str_a.astype('M')) - assert_equal(dt_a.dtype, str_a.astype('M').dtype) - dt_b = np.empty_like(dt_a) - dt_b[...] = str_a - assert_equal(dt_a, dt_b) - - # Datetime to string - assert_equal(str_a, dt_a.astype('S0')) - str_b = np.empty_like(str_a) - str_b[...] = dt_a - assert_equal(str_a, str_b) - - # Unicode to datetime - assert_equal(dt_a, uni_a.astype('M')) - assert_equal(dt_a.dtype, uni_a.astype('M').dtype) - dt_b = np.empty_like(dt_a) - dt_b[...] = uni_a - assert_equal(dt_a, dt_b) - - # Datetime to unicode - assert_equal(uni_a, dt_a.astype('U')) - uni_b = np.empty_like(uni_a) - uni_b[...] = dt_a - assert_equal(uni_a, uni_b) - - # Datetime to long string - gh-9712 - assert_equal(str_a, dt_a.astype((np.string_, 128))) - str_b = np.empty(str_a.shape, dtype=(np.string_, 128)) - str_b[...] = dt_a - assert_equal(str_a, str_b) - - def test_datetime_array_str(self): - a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') - assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") - - a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') - assert_equal(np.array2string(a, separator=', ', - formatter={'datetime': lambda x: - "'%s'" % np.datetime_as_string(x, timezone='UTC')}), - "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") - - # Check that one NaT doesn't corrupt subsequent entries - a = np.array(['2010', 'NaT', '2030']).astype('M') - assert_equal(str(a), "['2010' 'NaT' '2030']") - - def test_timedelta_array_str(self): - a = np.array([-1, 0, 100], dtype='m') - assert_equal(str(a), "[ -1 0 100]") - a = np.array(['NaT', 'NaT'], dtype='m') - assert_equal(str(a), "['NaT' 'NaT']") - # Check right-alignment with NaTs - a = np.array([-1, 'NaT', 0], dtype='m') - assert_equal(str(a), "[ -1 'NaT' 0]") - a = np.array([-1, 'NaT', 1234567], dtype='m') - assert_equal(str(a), "[ -1 'NaT' 1234567]") - - # Test with other byteorder: - a = np.array([-1, 'NaT', 1234567], dtype='>m') - assert_equal(str(a), "[ -1 'NaT' 1234567]") - a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) - - def test_setstate(self): - "Verify that datetime dtype __setstate__ can handle bad arguments" - dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) - assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) - assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - - def test_dtype_promotion(self): - # datetime datetime computes the metadata gcd - # timedelta timedelta computes the metadata gcd - for mM in ['m', 'M']: - assert_equal( - np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), - np.dtype(mM+'8[2Y]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), - np.dtype(mM+'8[3Y]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), - np.dtype(mM+'8[2M]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), - np.dtype(mM+'8[1D]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), - np.dtype(mM+'8[s]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), - np.dtype(mM+'8[7s]')) - # timedelta timedelta raises when there is no reasonable gcd - assert_raises(TypeError, np.promote_types, - np.dtype('m8[Y]'), np.dtype('m8[D]')) - assert_raises(TypeError, np.promote_types, - np.dtype('m8[M]'), np.dtype('m8[W]')) - # timedelta timedelta may overflow with big unit ranges - assert_raises(OverflowError, np.promote_types, - np.dtype('m8[W]'), np.dtype('m8[fs]')) - assert_raises(OverflowError, np.promote_types, - np.dtype('m8[s]'), np.dtype('m8[as]')) - - def test_cast_overflow(self): - # gh-4486 - def cast(): - numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("datetime64[%s]', - 'timedelta64[%s]']) - def test_isfinite_isinf_isnan_units(self, unit, dstr): - '''check isfinite, isinf, isnan for all units of M, m dtypes - ''' - arr_val = [123, -321, "NaT"] - arr = np.array(arr_val, dtype= dstr % unit) - pos = np.array([True, True, False]) - neg = np.array([False, False, True]) - false = np.array([False, False, False]) - assert_equal(np.isfinite(arr), pos) - assert_equal(np.isinf(arr), false) - assert_equal(np.isnan(arr), neg) - - def test_assert_equal(self): - assert_raises(AssertionError, assert_equal, - np.datetime64('nat'), np.timedelta64('nat')) - - def test_corecursive_input(self): - # construct a co-recursive list - a, b = [], [] - a.append(b) - b.append(a) - obj_arr = np.array([None]) - obj_arr[0] = a - - # gh-11154: This shouldn't cause a C stack overflow - assert_raises(RecursionError, obj_arr.astype, 'M8') - assert_raises(RecursionError, obj_arr.astype, 'm8') - - @pytest.mark.parametrize("time_unit", [ - "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", - # compound units - "10D", "2M", - ]) - def test_limit_symmetry(self, time_unit): - """ - Dates should have symmetric limits around the unix epoch at +/-np.int64 - """ - epoch = np.datetime64(0, time_unit) - latest = np.datetime64(np.iinfo(np.int64).max, time_unit) - earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) - - # above should not have overflowed - assert earliest < epoch < latest - - @pytest.mark.parametrize("time_unit", [ - "Y", "M", - pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), - "D", "h", "m", - "s", "ms", "us", "ns", "ps", "fs", "as", - pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), - ]) - @pytest.mark.parametrize("sign", [-1, 1]) - def test_limit_str_roundtrip(self, time_unit, sign): - """ - Limits should roundtrip when converted to strings. - - This tests the conversion to and from npy_datetimestruct. - """ - # TODO: add absolute (gold standard) time span limit strings - limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) - - # Convert to string and back. Explicit unit needed since the day and - # week reprs are not distinguishable. - limit_via_str = np.datetime64(str(limit), time_unit) - assert limit_via_str == limit - - -class TestDateTimeData(object): - - def test_basic(self): - a = np.array(['1980-03-23'], dtype=np.datetime64) - assert_equal(np.datetime_data(a.dtype), ('D', 1)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_defchararray.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_defchararray.py deleted file mode 100644 index 7b0e6f8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_defchararray.py +++ /dev/null @@ -1,692 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -from numpy.core.multiarray import _vec_string -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - assert_raises_regex, suppress_warnings, - ) - -kw_unicode_true = {'unicode': True} # make 2to3 work properly -kw_unicode_false = {'unicode': False} - -class TestBasic(object): - def test_from_object_array(self): - A = np.array([['abc', 2], - ['long ', '0123456789']], dtype='O') - B = np.char.array(A) - assert_equal(B.dtype.itemsize, 10) - assert_array_equal(B, [[b'abc', b'2'], - [b'long', b'0123456789']]) - - def test_from_object_array_unicode(self): - A = np.array([['abc', u'Sigma \u03a3'], - ['long ', '0123456789']], dtype='O') - assert_raises(ValueError, np.char.array, (A,)) - B = np.char.array(A, **kw_unicode_true) - assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) - assert_array_equal(B, [['abc', u'Sigma \u03a3'], - ['long', '0123456789']]) - - def test_from_string_array(self): - A = np.array([[b'abc', b'foo'], - [b'long ', b'0123456789']]) - assert_equal(A.dtype.type, np.string_) - B = np.char.array(A) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - B[0, 0] = 'changed' - assert_(B[0, 0] != A[0, 0]) - C = np.char.asarray(A) - assert_array_equal(C, A) - assert_equal(C.dtype, A.dtype) - C[0, 0] = 'changed again' - assert_(C[0, 0] != B[0, 0]) - assert_(C[0, 0] == A[0, 0]) - - def test_from_unicode_array(self): - A = np.array([['abc', u'Sigma \u03a3'], - ['long ', '0123456789']]) - assert_equal(A.dtype.type, np.unicode_) - B = np.char.array(A) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - B = np.char.array(A, **kw_unicode_true) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - - def fail(): - np.char.array(A, **kw_unicode_false) - - assert_raises(UnicodeEncodeError, fail) - - def test_unicode_upconvert(self): - A = np.char.array(['abc']) - B = np.char.array([u'\u03a3']) - assert_(issubclass((A + B).dtype.type, np.unicode_)) - - def test_from_string(self): - A = np.char.array(b'abc') - assert_equal(len(A), 1) - assert_equal(len(A[0]), 3) - assert_(issubclass(A.dtype.type, np.string_)) - - def test_from_unicode(self): - A = np.char.array(u'\u03a3') - assert_equal(len(A), 1) - assert_equal(len(A[0]), 1) - assert_equal(A.itemsize, 4) - assert_(issubclass(A.dtype.type, np.unicode_)) - -class TestVecString(object): - def test_non_existent_method(self): - - def fail(): - _vec_string('a', np.string_, 'bogus') - - assert_raises(AttributeError, fail) - - def test_non_string_array(self): - - def fail(): - _vec_string(1, np.string_, 'strip') - - assert_raises(TypeError, fail) - - def test_invalid_args_tuple(self): - - def fail(): - _vec_string(['a'], np.string_, 'strip', 1) - - assert_raises(TypeError, fail) - - def test_invalid_type_descr(self): - - def fail(): - _vec_string(['a'], 'BOGUS', 'strip') - - assert_raises(TypeError, fail) - - def test_invalid_function_args(self): - - def fail(): - _vec_string(['a'], np.string_, 'strip', (1,)) - - assert_raises(TypeError, fail) - - def test_invalid_result_type(self): - - def fail(): - _vec_string(['a'], np.integer, 'strip') - - assert_raises(TypeError, fail) - - def test_broadcast_error(self): - - def fail(): - _vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],)) - - assert_raises(ValueError, fail) - - -class TestWhitespace(object): - def setup(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - - def test1(self): - assert_(np.all(self.A == self.B)) - assert_(np.all(self.A >= self.B)) - assert_(np.all(self.A <= self.B)) - assert_(not np.any(self.A > self.B)) - assert_(not np.any(self.A < self.B)) - assert_(not np.any(self.A != self.B)) - -class TestChar(object): - def setup(self): - self.A = np.array('abc1', dtype='c').view(np.chararray) - - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), b'AB') - -class TestComparisons(object): - def setup(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']]).view(np.chararray) - - def test_not_equal(self): - assert_array_equal((self.A != self.B), [[True, False], [True, True]]) - - def test_equal(self): - assert_array_equal((self.A == self.B), [[False, True], [False, False]]) - - def test_greater_equal(self): - assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) - - def test_less_equal(self): - assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) - - def test_greater(self): - assert_array_equal((self.A > self.B), [[False, False], [True, True]]) - - def test_less(self): - assert_array_equal((self.A < self.B), [[True, False], [False, False]]) - -class TestComparisonsMixed1(TestComparisons): - """Ticket #1276""" - - def setup(self): - TestComparisons.setup(self) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']], np.unicode_).view(np.chararray) - -class TestComparisonsMixed2(TestComparisons): - """Ticket #1276""" - - def setup(self): - TestComparisons.setup(self) - self.A = np.array([['abc', '123'], - ['789', 'xyz']], np.unicode_).view(np.chararray) - -class TestInformation(object): - def setup(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) - self.B = np.array([[u' \u03a3 ', u''], - [u'12345', u'MixedCase'], - [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) - - def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) - - def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) - # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) - - def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool_)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) - - def fail(): - self.A.endswith('3', 'fdjk') - - assert_raises(TypeError, fail) - - def test_find(self): - assert_(issubclass(self.A.find('a').dtype.type, np.integer)) - assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) - assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) - - def test_index(self): - - def fail(): - self.A.index('a') - - assert_raises(ValueError, fail) - assert_(np.char.index('abcba', 'b') == 1) - assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) - - def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool_)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) - - def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool_)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) - - def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool_)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) - - def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool_)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) - - def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool_)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) - - def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool_)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) - - def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool_)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) - - def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) - - def test_rindex(self): - - def fail(): - self.A.rindex('a') - - assert_raises(ValueError, fail) - assert_(np.char.rindex('abcba', 'b') == 3) - assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) - - def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool_)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) - - def fail(): - self.A.startswith('3', 'fdjk') - - assert_raises(TypeError, fail) - - -class TestMethods(object): - def setup(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.chararray) - self.B = np.array([[u' \u03a3 ', u''], - [u'12345', u'MixedCase'], - [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) - - def test_capitalize(self): - tgt = [[b' abc ', b''], - [b'12345', b'Mixedcase'], - [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.capitalize().dtype.type, np.string_)) - assert_array_equal(self.A.capitalize(), tgt) - - tgt = [[u' \u03c3 ', ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_)) - assert_array_equal(self.B.capitalize(), tgt) - - def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.string_)) - C = self.A.center([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - - C = self.A.center(20, b'#') - assert_(np.all(C.startswith(b'#'))) - assert_(np.all(C.endswith(b'#'))) - - C = np.char.center(b'FOO', [[10, 20], [15, 8]]) - tgt = [[b' FOO ', b' FOO '], - [b' FOO ', b' FOO ']] - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, tgt) - - def test_decode(self): - if sys.version_info[0] >= 3: - A = np.char.array([b'\\u03a3']) - assert_(A.decode('unicode-escape')[0] == '\u03a3') - else: - with suppress_warnings() as sup: - if sys.py3kwarning: - sup.filter(DeprecationWarning, "'hex_codec'") - A = np.char.array(['736563726574206d657373616765']) - assert_(A.decode('hex_codec')[0] == 'secret message') - - def test_encode(self): - B = self.B.encode('unicode_escape') - assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) - - def test_expandtabs(self): - T = self.A.expandtabs() - assert_(T[2, 0] == b'123 345 \0') - - def test_join(self): - if sys.version_info[0] >= 3: - # NOTE: list(b'123') == [49, 50, 51] - # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') - else: - A0 = self.A - - A = np.char.join([',', '#'], A0) - if sys.version_info[0] >= 3: - assert_(issubclass(A.dtype.type, np.unicode_)) - else: - assert_(issubclass(A.dtype.type, np.string_)) - tgt = np.array([[' ,a,b,c, ', ''], - ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], - ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) - assert_array_equal(np.char.join([',', '#'], A0), tgt) - - def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.string_)) - - C = self.A.ljust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - - C = self.A.ljust(20, b'#') - assert_array_equal(C.startswith(b'#'), [ - [False, True], [False, False], [False, False]]) - assert_(np.all(C.endswith(b'#'))) - - C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) - tgt = [[b'FOO ', b'FOO '], - [b'FOO ', b'FOO ']] - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, tgt) - - def test_lower(self): - tgt = [[b' abc ', b''], - [b'12345', b'mixedcase'], - [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.lower().dtype.type, np.string_)) - assert_array_equal(self.A.lower(), tgt) - - tgt = [[u' \u03c3 ', u''], - [u'12345', u'mixedcase'], - [u'123 \t 345 \0 ', u'upper']] - assert_(issubclass(self.B.lower().dtype.type, np.unicode_)) - assert_array_equal(self.B.lower(), tgt) - - def test_lstrip(self): - tgt = [[b'abc ', b''], - [b'12345', b'MixedCase'], - [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.lstrip().dtype.type, np.string_)) - assert_array_equal(self.A.lstrip(), tgt) - - tgt = [[b' abc', b''], - [b'2345', b'ixedCase'], - [b'23 \t 345 \x00', b'UPPER']] - assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) - - tgt = [[u'\u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_)) - assert_array_equal(self.B.lstrip(), tgt) - - def test_partition(self): - P = self.A.partition([b'3', b'M']) - tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], - [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], - [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] - assert_(issubclass(P.dtype.type, np.string_)) - assert_array_equal(P, tgt) - - def test_replace(self): - R = self.A.replace([b'3', b'a'], - [b'##########', b'@']) - tgt = [[b' abc ', b''], - [b'12##########45', b'MixedC@se'], - [b'12########## \t ##########45 \x00', b'UPPER']] - assert_(issubclass(R.dtype.type, np.string_)) - assert_array_equal(R, tgt) - - if sys.version_info[0] < 3: - # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3 - R = self.A.replace(b'a', u'\u03a3') - tgt = [[u' \u03a3bc ', ''], - ['12345', u'MixedC\u03a3se'], - ['123 \t 345 \x00', 'UPPER']] - assert_(issubclass(R.dtype.type, np.unicode_)) - assert_array_equal(R, tgt) - - def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.string_)) - - C = self.A.rjust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - - C = self.A.rjust(20, b'#') - assert_(np.all(C.startswith(b'#'))) - assert_array_equal(C.endswith(b'#'), - [[False, True], [False, False], [False, False]]) - - C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) - tgt = [[b' FOO', b' FOO'], - [b' FOO', b' FOO']] - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, tgt) - - def test_rpartition(self): - P = self.A.rpartition([b'3', b'M']) - tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], - [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], - [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] - assert_(issubclass(P.dtype.type, np.string_)) - assert_array_equal(P, tgt) - - def test_rsplit(self): - A = self.A.rsplit(b'3') - tgt = [[[b' abc '], [b'']], - [[b'12', b'45'], [b'MixedCase']], - [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] - assert_(issubclass(A.dtype.type, np.object_)) - assert_equal(A.tolist(), tgt) - - def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.string_)) - - tgt = [[b' abc', b''], - [b'12345', b'MixedCase'], - [b'123 \t 345', b'UPPER']] - assert_array_equal(self.A.rstrip(), tgt) - - tgt = [[b' abc ', b''], - [b'1234', b'MixedCase'], - [b'123 \t 345 \x00', b'UPP'] - ] - assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) - - tgt = [[u' \u03a3', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_)) - assert_array_equal(self.B.rstrip(), tgt) - - def test_strip(self): - tgt = [[b'abc', b''], - [b'12345', b'MixedCase'], - [b'123 \t 345', b'UPPER']] - assert_(issubclass(self.A.strip().dtype.type, np.string_)) - assert_array_equal(self.A.strip(), tgt) - - tgt = [[b' abc ', b''], - [b'234', b'ixedCas'], - [b'23 \t 345 \x00', b'UPP']] - assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) - - tgt = [[u'\u03a3', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.strip().dtype.type, np.unicode_)) - assert_array_equal(self.B.strip(), tgt) - - def test_split(self): - A = self.A.split(b'3') - tgt = [ - [[b' abc '], [b'']], - [[b'12', b'45'], [b'MixedCase']], - [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] - assert_(issubclass(A.dtype.type, np.object_)) - assert_equal(A.tolist(), tgt) - - def test_splitlines(self): - A = np.char.array(['abc\nfds\nwer']).splitlines() - assert_(issubclass(A.dtype.type, np.object_)) - assert_(A.shape == (1,)) - assert_(len(A[0]) == 3) - - def test_swapcase(self): - tgt = [[b' ABC ', b''], - [b'12345', b'mIXEDcASE'], - [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.swapcase().dtype.type, np.string_)) - assert_array_equal(self.A.swapcase(), tgt) - - tgt = [[u' \u03c3 ', u''], - [u'12345', u'mIXEDcASE'], - [u'123 \t 345 \0 ', u'upper']] - assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_)) - assert_array_equal(self.B.swapcase(), tgt) - - def test_title(self): - tgt = [[b' Abc ', b''], - [b'12345', b'Mixedcase'], - [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.title().dtype.type, np.string_)) - assert_array_equal(self.A.title(), tgt) - - tgt = [[u' \u03a3 ', u''], - [u'12345', u'Mixedcase'], - [u'123 \t 345 \0 ', u'Upper']] - assert_(issubclass(self.B.title().dtype.type, np.unicode_)) - assert_array_equal(self.B.title(), tgt) - - def test_upper(self): - tgt = [[b' ABC ', b''], - [b'12345', b'MIXEDCASE'], - [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.upper().dtype.type, np.string_)) - assert_array_equal(self.A.upper(), tgt) - - tgt = [[u' \u03a3 ', u''], - [u'12345', u'MIXEDCASE'], - [u'123 \t 345 \0 ', u'UPPER']] - assert_(issubclass(self.B.upper().dtype.type, np.unicode_)) - assert_array_equal(self.B.upper(), tgt) - - def test_isnumeric(self): - - def fail(): - self.A.isnumeric() - - assert_raises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) - assert_array_equal(self.B.isnumeric(), [ - [False, False], [True, False], [False, False]]) - - def test_isdecimal(self): - - def fail(): - self.A.isdecimal() - - assert_raises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) - assert_array_equal(self.B.isdecimal(), [ - [False, False], [True, False], [False, False]]) - - -class TestOperations(object): - def setup(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.chararray) - - def test_add(self): - AB = np.array([['abcefg', '123456'], - ['789051', 'xyztuv']]).view(np.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) - - def test_radd(self): - QA = np.array([['qabc', 'q123'], - ['q789', 'qxyz']]).view(np.chararray) - assert_array_equal(QA, ('q' + self.A)) - - def test_mul(self): - A = self.A - for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) - - assert_array_equal(Ar, (self.A * r)) - - for ob in [object(), 'qrs']: - with assert_raises_regex(ValueError, - 'Can only multiply by integers'): - A*ob - - def test_rmul(self): - A = self.A - for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) - assert_array_equal(Ar, (r * self.A)) - - for ob in [object(), 'qrs']: - with assert_raises_regex(ValueError, - 'Can only multiply by integers'): - ob * A - - def test_mod(self): - """Ticket #856""" - F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) - C = np.array([[3, 7], [19, 1]]) - FC = np.array([['3', '7.000000'], - ['19', '1']]).view(np.chararray) - assert_array_equal(FC, F % C) - - A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) - A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) - assert_array_equal(A1, (A % 1)) - - A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) - assert_array_equal(A2, (A % [[1, 2], [3, 4]])) - - def test_rmod(self): - assert_(("%s" % self.A) == str(self.A)) - assert_(("%r" % self.A) == repr(self.A)) - - for ob in [42, object()]: - with assert_raises_regex( - TypeError, "unsupported operand type.* and 'chararray'"): - ob % self.A - - def test_slice(self): - """Regression test for https://github.com/numpy/numpy/issues/5982""" - - arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], - dtype='S4').view(np.chararray) - sl1 = arr[:] - assert_array_equal(sl1, arr) - assert_(sl1.base is arr) - assert_(sl1.base.base is arr.base) - - sl2 = arr[:, :] - assert_array_equal(sl2, arr) - assert_(sl2.base is arr) - assert_(sl2.base.base is arr.base) - - assert_(arr[0, 0] == b'abc') - - -def test_empty_indexing(): - """Regression test for ticket 1948.""" - # Check that indexing a chararray with an empty list/array returns an - # empty chararray instead of a chararray with a single empty string in it. - s = np.chararray((4,)) - assert_(s[[]].size == 0) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py deleted file mode 100644 index 363ff26..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py +++ /dev/null @@ -1,570 +0,0 @@ -""" -Tests related to deprecation warnings. Also a convenient place -to document how deprecations should eventually be turned into errors. - -""" -from __future__ import division, absolute_import, print_function - -import datetime -import sys -import operator -import warnings -import pytest -import shutil -import tempfile - -import numpy as np -from numpy.testing import ( - assert_raises, assert_warns, assert_, assert_array_equal - ) - -from numpy.core._multiarray_tests import fromstring_null_term_c_api - -try: - import pytz - _has_pytz = True -except ImportError: - _has_pytz = False - - -class _DeprecationTestCase(object): - # Just as warning: warnings uses re.match, so the start of this message - # must match. - message = '' - warning_cls = DeprecationWarning - - def setup(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # https://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=self.warning_cls) - warnings.filterwarnings("always", message=self.message, - category=self.warning_cls) - - def teardown(self): - self.warn_ctx.__exit__() - - def assert_deprecated(self, function, num=1, ignore_others=False, - function_fails=False, - exceptions=np._NoValue, - args=(), kwargs={}): - """Test if DeprecationWarnings are given and raised. - - This first checks if the function when called gives `num` - DeprecationWarnings, after that it tries to raise these - DeprecationWarnings and compares them with `exceptions`. - The exceptions can be different for cases where this code path - is simply not anticipated and the exception is replaced. - - Parameters - ---------- - function : callable - The function to test - num : int - Number of DeprecationWarnings to expect. This should normally be 1. - ignore_others : bool - Whether warnings of the wrong type should be ignored (note that - the message is not checked) - function_fails : bool - If the function would normally fail, setting this will check for - warnings inside a try/except block. - exceptions : Exception or tuple of Exceptions - Exception to expect when turning the warnings into an error. - The default checks for DeprecationWarnings. If exceptions is - empty the function is expected to run successfully. - args : tuple - Arguments for `function` - kwargs : dict - Keyword arguments for `function` - """ - # reset the log - self.log[:] = [] - - if exceptions is np._NoValue: - exceptions = (self.warning_cls,) - - try: - function(*args, **kwargs) - except (Exception if function_fails else tuple()): - pass - - # just in case, clear the registry - num_found = 0 - for warning in self.log: - if warning.category is self.warning_cls: - num_found += 1 - elif not ignore_others: - raise AssertionError( - "expected %s but got: %s" % - (self.warning_cls.__name__, warning.category)) - if num is not None and num_found != num: - msg = "%i warnings found but %i expected." % (len(self.log), num) - lst = [str(w) for w in self.log] - raise AssertionError("\n".join([msg] + lst)) - - with warnings.catch_warnings(): - warnings.filterwarnings("error", message=self.message, - category=self.warning_cls) - try: - function(*args, **kwargs) - if exceptions != tuple(): - raise AssertionError( - "No error raised during function call") - except exceptions: - if exceptions == tuple(): - raise AssertionError( - "Error raised during function call") - - def assert_not_deprecated(self, function, args=(), kwargs={}): - """Test that warnings are not raised. - - This is just a shorthand for: - - self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) - """ - self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) - - -class _VisibleDeprecationTestCase(_DeprecationTestCase): - warning_cls = np.VisibleDeprecationWarning - - -class TestNonTupleNDIndexDeprecation(object): - def test_basic(self): - a = np.zeros((5, 5)) - with warnings.catch_warnings(): - warnings.filterwarnings('always') - assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) - assert_warns(FutureWarning, a.__getitem__, [slice(None)]) - - warnings.filterwarnings('error') - assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) - assert_raises(FutureWarning, a.__getitem__, [slice(None)]) - - # a a[[0, 1]] always was advanced indexing, so no error/warning - a[[0, 1]] - - -class TestComparisonDeprecations(_DeprecationTestCase): - """This tests the deprecation, for non-element-wise comparison logic. - This used to mean that when an error occurred during element-wise comparison - (i.e. broadcasting) NotImplemented was returned, but also in the comparison - itself, False was given instead of the error. - - Also test FutureWarning for the None comparison. - """ - - message = "elementwise.* comparison failed; .*" - - def test_normal_types(self): - for op in (operator.eq, operator.ne): - # Broadcasting errors: - self.assert_deprecated(op, args=(np.zeros(3), [])) - a = np.zeros(3, dtype='i,i') - # (warning is issued a couple of times here) - self.assert_deprecated(op, args=(a, a[:-1]), num=None) - - # ragged array comparison returns True/False - a = np.array([1, np.array([1,2,3])], dtype=object) - b = np.array([1, np.array([1,2,3])], dtype=object) - self.assert_deprecated(op, args=(a, b), num=None) - - def test_string(self): - # For two string arrays, strings always raised the broadcasting error: - a = np.array(['a', 'b']) - b = np.array(['a', 'b', 'c']) - assert_raises(ValueError, lambda x, y: x == y, a, b) - - # The empty list is not cast to string, and this used to pass due - # to dtype mismatch; now (2018-06-21) it correctly leads to a - # FutureWarning. - assert_warns(FutureWarning, lambda: a == []) - - def test_void_dtype_equality_failures(self): - class NotArray(object): - def __array__(self): - raise TypeError - - # Needed so Python 3 does not raise DeprecationWarning twice. - def __ne__(self, other): - return NotImplemented - - self.assert_deprecated(lambda: np.arange(2) == NotArray()) - self.assert_deprecated(lambda: np.arange(2) != NotArray()) - - struct1 = np.zeros(2, dtype="i4,i4") - struct2 = np.zeros(2, dtype="i4,i4,i4") - - assert_warns(FutureWarning, lambda: struct1 == 1) - assert_warns(FutureWarning, lambda: struct1 == struct2) - assert_warns(FutureWarning, lambda: struct1 != 1) - assert_warns(FutureWarning, lambda: struct1 != struct2) - - def test_array_richcompare_legacy_weirdness(self): - # It doesn't really work to use assert_deprecated here, b/c part of - # the point of assert_deprecated is to check that when warnings are - # set to "error" mode then the error is propagated -- which is good! - # But here we are testing a bunch of code that is deprecated *because* - # it has the habit of swallowing up errors and converting them into - # different warnings. So assert_warns will have to be sufficient. - assert_warns(FutureWarning, lambda: np.arange(2) == "a") - assert_warns(FutureWarning, lambda: np.arange(2) != "a") - # No warning for scalar comparisons - with warnings.catch_warnings(): - warnings.filterwarnings("error") - assert_(not (np.array(0) == "a")) - assert_(np.array(0) != "a") - assert_(not (np.int16(0) == "a")) - assert_(np.int16(0) != "a") - - for arg1 in [np.asarray(0), np.int16(0)]: - struct = np.zeros(2, dtype="i4,i4") - for arg2 in [struct, "a"]: - for f in [operator.lt, operator.le, operator.gt, operator.ge]: - if sys.version_info[0] >= 3: - # py3 - with warnings.catch_warnings() as l: - warnings.filterwarnings("always") - assert_raises(TypeError, f, arg1, arg2) - assert_(not l) - else: - # py2 - assert_warns(DeprecationWarning, f, arg1, arg2) - - -class TestDatetime64Timezone(_DeprecationTestCase): - """Parsing of datetime64 with timezones deprecated in 1.11.0, because - datetime64 is now timezone naive rather than UTC only. - - It will be quite a while before we can remove this, because, at the very - least, a lot of existing code uses the 'Z' modifier to avoid conversion - from local time to UTC, even if otherwise it handles time in a timezone - naive fashion. - """ - def test_string(self): - self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',)) - self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',)) - - @pytest.mark.skipif(not _has_pytz, - reason="The pytz module is not available.") - def test_datetime(self): - tz = pytz.timezone('US/Eastern') - dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz) - self.assert_deprecated(np.datetime64, args=(dt,)) - - -class TestNonCContiguousViewDeprecation(_DeprecationTestCase): - """View of non-C-contiguous arrays deprecated in 1.11.0. - - The deprecation will not be raised for arrays that are both C and F - contiguous, as C contiguous is dominant. There are more such arrays - with relaxed stride checking than without so the deprecation is not - as visible with relaxed stride checking in force. - """ - - def test_fortran_contiguous(self): - self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,)) - self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) - - -class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): - """Assigning the 'data' attribute of an ndarray is unsafe as pointed - out in gh-7093. Eventually, such assignment should NOT be allowed, but - in the interests of maintaining backwards compatibility, only a Deprecation- - Warning will be raised instead for the time being to give developers time to - refactor relevant code. - """ - - def test_data_attr_assignment(self): - a = np.arange(10) - b = np.linspace(0, 1, 10) - - self.message = ("Assigning the 'data' attribute is an " - "inherently unsafe operation and will " - "be removed in the future.") - self.assert_deprecated(a.__setattr__, args=('data', b.data)) - - -class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase): - """ - If a 'width' parameter is passed into ``binary_repr`` that is insufficient to - represent the number in base 2 (positive) or 2's complement (negative) form, - the function used to silently ignore the parameter and return a representation - using the minimal number of bits needed for the form in question. Such behavior - is now considered unsafe from a user perspective and will raise an error in the future. - """ - - def test_insufficient_width_positive(self): - args = (10,) - kwargs = {'width': 2} - - self.message = ("Insufficient bit width provided. This behavior " - "will raise an error in the future.") - self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) - - def test_insufficient_width_negative(self): - args = (-5,) - kwargs = {'width': 2} - - self.message = ("Insufficient bit width provided. This behavior " - "will raise an error in the future.") - self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) - - -class TestNumericStyleTypecodes(_DeprecationTestCase): - """ - Deprecate the old numeric-style dtypes, which are especially - confusing for complex types, e.g. Complex32 -> complex64. When the - deprecation cycle is complete, the check for the strings should be - removed from PyArray_DescrConverter in descriptor.c, and the - deprecated keys should not be added as capitalized aliases in - _add_aliases in numerictypes.py. - """ - def test_all_dtypes(self): - deprecated_types = [ - 'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64', - 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64', - 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0' - ] - if sys.version_info[0] < 3: - deprecated_types.extend(['Unicode0', 'String0']) - - for dt in deprecated_types: - self.assert_deprecated(np.dtype, exceptions=(TypeError,), - args=(dt,)) - - -class TestTestDeprecated(object): - def test_assert_deprecated(self): - test_case_instance = _DeprecationTestCase() - test_case_instance.setup() - assert_raises(AssertionError, - test_case_instance.assert_deprecated, - lambda: None) - - def foo(): - warnings.warn("foo", category=DeprecationWarning, stacklevel=2) - - test_case_instance.assert_deprecated(foo) - test_case_instance.teardown() - - -class TestClassicIntDivision(_DeprecationTestCase): - """ - See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2 - if used for division - List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html - """ - def test_int_dtypes(self): - #scramble types and do some mix and match testing - deprecated_types = [ - 'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16', - 'intp', 'int64', 'uint32', 'int16' - ] - if sys.version_info[0] < 3 and sys.py3kwarning: - import operator as op - dt2 = 'bool_' - for dt1 in deprecated_types: - a = np.array([1,2,3], dtype=dt1) - b = np.array([1,2,3], dtype=dt2) - self.assert_deprecated(op.div, args=(a,b)) - dt2 = dt1 - - -class TestNonNumericConjugate(_DeprecationTestCase): - """ - Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, - which conflicts with the error behavior of np.conjugate. - """ - def test_conjugate(self): - for a in np.array(5), np.array(5j): - self.assert_not_deprecated(a.conjugate) - for a in (np.array('s'), np.array('2016', 'M'), - np.array((1, 2), [('a', int), ('b', int)])): - self.assert_deprecated(a.conjugate) - - -class TestNPY_CHAR(_DeprecationTestCase): - # 2017-05-03, 1.13.0 - def test_npy_char_deprecation(self): - from numpy.core._multiarray_tests import npy_char_deprecation - self.assert_deprecated(npy_char_deprecation) - assert_(npy_char_deprecation() == 'S1') - - -class TestPyArray_AS1D(_DeprecationTestCase): - def test_npy_pyarrayas1d_deprecation(self): - from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation - assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation) - - -class TestPyArray_AS2D(_DeprecationTestCase): - def test_npy_pyarrayas2d_deprecation(self): - from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation - assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation) - - -class Test_UPDATEIFCOPY(_DeprecationTestCase): - """ - v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use - WRITEBACKIFCOPY instead - """ - def test_npy_updateifcopy_deprecation(self): - from numpy.core._multiarray_tests import npy_updateifcopy_deprecation - arr = np.arange(9).reshape(3, 3) - v = arr.T - self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,)) - - -class TestDatetimeEvent(_DeprecationTestCase): - # 2017-08-11, 1.14.0 - def test_3_tuple(self): - for cls in (np.datetime64, np.timedelta64): - # two valid uses - (unit, num) and (unit, num, den, None) - self.assert_not_deprecated(cls, args=(1, ('ms', 2))) - self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) - - # trying to use the event argument, removed in 1.7.0, is deprecated - # it used to be a uint8 - self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) - - -class TestTruthTestingEmptyArrays(_DeprecationTestCase): - # 2017-09-25, 1.14.0 - message = '.*truth value of an empty array is ambiguous.*' - - def test_1d(self): - self.assert_deprecated(bool, args=(np.array([]),)) - - def test_2d(self): - self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) - - -class TestBincount(_DeprecationTestCase): - # 2017-06-01, 1.14.0 - def test_bincount_minlength(self): - self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) - - -class TestAlen(_DeprecationTestCase): - # 2019-08-02, 1.18.0 - def test_alen(self): - self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3]))) - - -class TestGeneratorSum(_DeprecationTestCase): - # 2018-02-25, 1.15.0 - def test_generator_sum(self): - self.assert_deprecated(np.sum, args=((i for i in range(5)),)) - - -class TestSctypeNA(_VisibleDeprecationTestCase): - # 2018-06-24, 1.16 - def test_sctypeNA(self): - self.assert_deprecated(lambda: np.sctypeNA['?']) - self.assert_deprecated(lambda: np.typeNA['?']) - self.assert_deprecated(lambda: np.typeNA.get('?')) - - -class TestPositiveOnNonNumerical(_DeprecationTestCase): - # 2018-06-28, 1.16.0 - def test_positive_on_non_number(self): - self.assert_deprecated(operator.pos, args=(np.array('foo'),)) - - -class TestFromstring(_DeprecationTestCase): - # 2017-10-19, 1.14 - def test_fromstring(self): - self.assert_deprecated(np.fromstring, args=('\x00'*80,)) - - -class TestFromStringAndFileInvalidData(_DeprecationTestCase): - # 2019-06-08, 1.17.0 - # Tests should be moved to real tests when deprecation is done. - message = "string or file could not be read to its end" - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_data_file(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - - with tempfile.TemporaryFile(mode="w") as f: - x.tofile(f, sep=',', format='%.2f') - f.write(invalid_str) - - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",")) - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5)) - # Should not raise: - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - f.seek(0) - res = np.fromfile(f, sep=",", count=4) - assert_array_equal(res, x) - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_string(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - x_str = "1.51,2,3.51,4{}".format(invalid_str) - - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",")) - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5)) - - # The C-level API can use not fixed size, but 0 terminated strings, - # so test that as well: - bytestr = x_str.encode("ascii") - self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr)) - - with assert_warns(DeprecationWarning): - # this is slightly strange, in that fromstring leaves data - # potentially uninitialized (would be good to error when all is - # read, but count is larger then actual data maybe). - res = np.fromstring(x_str, sep=",", count=5) - assert_array_equal(res[:-1], x) - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - - # Should not raise: - res = np.fromstring(x_str, sep=",", count=4) - assert_array_equal(res, x) - - -class Test_GetSet_NumericOps(_DeprecationTestCase): - # 2018-09-20, 1.16.0 - def test_get_numeric_ops(self): - from numpy.core._multiarray_tests import getset_numericops - self.assert_deprecated(getset_numericops, num=2) - - # empty kwargs prevents any state actually changing which would break - # other tests. - self.assert_deprecated(np.set_numeric_ops, kwargs={}) - assert_raises(ValueError, np.set_numeric_ops, add='abc') - - -class TestShape1Fields(_DeprecationTestCase): - warning_cls = FutureWarning - - # 2019-05-20, 1.17.0 - def test_shape_1_fields(self): - self.assert_deprecated(np.dtype, args=([('a', int, 1)],)) - - -class TestNonZero(_DeprecationTestCase): - # 2019-05-26, 1.17.0 - def test_zerod(self): - self.assert_deprecated(lambda: np.nonzero(np.array(0))) - self.assert_deprecated(lambda: np.nonzero(np.array(1))) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_dtype.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_dtype.py deleted file mode 100644 index e18e66c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_dtype.py +++ /dev/null @@ -1,1300 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import operator -import pytest -import ctypes -import gc - -import numpy as np -from numpy.core._rational_tests import rational -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT) -from numpy.compat import pickle -from itertools import permutations - -def assert_dtype_equal(a, b): - assert_equal(a, b) - assert_equal(hash(a), hash(b), - "two equivalent types do not hash to the same value !") - -def assert_dtype_not_equal(a, b): - assert_(a != b) - assert_(hash(a) != hash(b), - "two different types hash to the same value !") - -class TestBuiltin(object): - @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, - np.compat.unicode]) - def test_run(self, t): - """Only test hash runs at all.""" - dt = np.dtype(t) - hash(dt) - - @pytest.mark.parametrize('t', [int, float]) - def test_dtype(self, t): - # Make sure equivalent byte order char hash the same (e.g. < and = on - # little endian) - dt = np.dtype(t) - dt2 = dt.newbyteorder("<") - dt3 = dt.newbyteorder(">") - if dt == dt2: - assert_(dt.byteorder != dt2.byteorder, "bogus test") - assert_dtype_equal(dt, dt2) - else: - assert_(dt.byteorder != dt3.byteorder, "bogus test") - assert_dtype_equal(dt, dt3) - - def test_equivalent_dtype_hashing(self): - # Make sure equivalent dtypes with different type num hash equal - uintp = np.dtype(np.uintp) - if uintp.itemsize == 4: - left = uintp - right = np.dtype(np.uint32) - else: - left = uintp - right = np.dtype(np.ulonglong) - assert_(left == right) - assert_(hash(left) == hash(right)) - - def test_invalid_types(self): - # Make sure invalid type strings raise an error - - assert_raises(TypeError, np.dtype, 'O3') - assert_raises(TypeError, np.dtype, 'O5') - assert_raises(TypeError, np.dtype, 'O7') - assert_raises(TypeError, np.dtype, 'b3') - assert_raises(TypeError, np.dtype, 'h4') - assert_raises(TypeError, np.dtype, 'I5') - assert_raises(TypeError, np.dtype, 'e3') - assert_raises(TypeError, np.dtype, 'f5') - - if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: - assert_raises(TypeError, np.dtype, 'g12') - elif np.dtype('g').itemsize == 12: - assert_raises(TypeError, np.dtype, 'g16') - - if np.dtype('l').itemsize == 8: - assert_raises(TypeError, np.dtype, 'l4') - assert_raises(TypeError, np.dtype, 'L4') - else: - assert_raises(TypeError, np.dtype, 'l8') - assert_raises(TypeError, np.dtype, 'L8') - - if np.dtype('q').itemsize == 8: - assert_raises(TypeError, np.dtype, 'q4') - assert_raises(TypeError, np.dtype, 'Q4') - else: - assert_raises(TypeError, np.dtype, 'q8') - assert_raises(TypeError, np.dtype, 'Q8') - - @pytest.mark.parametrize( - 'value', - ['m8', 'M8', 'datetime64', 'timedelta64', - 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10', - '>f', 'f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])]) - assert_equal(str(dt), - "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,)), " - "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))])]") - - # If the sticky aligned flag is set to True, it makes the - # str() function use a dict representation with an 'aligned' flag - dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], - (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])], - align=True) - assert_equal(str(dt), - "{'names':['top','bottom'], " - "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,))," - "[('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))]], " - "'offsets':[0,76800], " - "'itemsize':80000, " - "'aligned':True}") - assert_equal(np.dtype(eval(str(dt))), dt) - - dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], - 'offsets': [0, 1, 2], - 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) - assert_equal(str(dt), - "[(('Red pixel', 'r'), 'u1'), " - "(('Green pixel', 'g'), 'u1'), " - "(('Blue pixel', 'b'), 'u1')]") - - dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], - 'formats': ['f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])]) - assert_equal(repr(dt), - "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,)), " - "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))])])") - - dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], - 'offsets': [0, 1, 2], - 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, - align=True) - assert_equal(repr(dt), - "dtype([(('Red pixel', 'r'), 'u1'), " - "(('Green pixel', 'g'), 'u1'), " - "(('Blue pixel', 'b'), 'u1')], align=True)") - - def test_repr_structured_not_packed(self): - dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], - 'formats': ['= 3, reason="Python 2 only") - def test_dtype_str_with_long_in_shape(self): - # Pull request #376, should not error - np.dtype('(1L,)i4') - - def test_base_dtype_with_object_type(self): - # Issue gh-2798, should not error. - np.array(['a'], dtype="O").astype(("O", [("name", "O")])) - - def test_empty_string_to_object(self): - # Pull request #4722 - np.array(["", ""]).astype(object) - - def test_void_subclass_unsized(self): - dt = np.dtype(np.record) - assert_equal(repr(dt), "dtype('V')") - assert_equal(str(dt), '|V0') - assert_equal(dt.name, 'record') - - def test_void_subclass_sized(self): - dt = np.dtype((np.record, 2)) - assert_equal(repr(dt), "dtype('V2')") - assert_equal(str(dt), '|V2') - assert_equal(dt.name, 'record16') - - def test_void_subclass_fields(self): - dt = np.dtype((np.record, [('a', 'f4', (2, 1)), ('b', 'u4')]) - self.check(BigEndStruct, expected) - - def test_little_endian_structure_packed(self): - class LittleEndStruct(ctypes.LittleEndianStructure): - _fields_ = [ - ('one', ctypes.c_uint8), - ('two', ctypes.c_uint32) - ] - _pack_ = 1 - expected = np.dtype([('one', 'u1'), ('two', 'B'), - ('b', '>H') - ], align=True) - self.check(PaddedStruct, expected) - - def test_simple_endian_types(self): - self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2')) - self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) - self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) - - all_types = set(np.typecodes['All']) - all_pairs = permutations(all_types, 2) - - @pytest.mark.parametrize("pair", all_pairs) - def test_pairs(self, pair): - """ - Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')] - Example: np.dtype('d,I') -> dtype([('f0', '..j", [0, 0], optimize=do_opt) - assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt) - - # invalid subscript character - assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt) - assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt) - assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt) - - # output subscripts must appear in input - assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt) - - # output subscripts may only be specified once - assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]], - optimize=do_opt) - - # dimensions much match when being collapsed - assert_raises(ValueError, np.einsum, "ii", - np.arange(6).reshape(2, 3), optimize=do_opt) - assert_raises(ValueError, np.einsum, "ii->i", - np.arange(6).reshape(2, 3), optimize=do_opt) - - # broadcasting to new dimensions must be enabled explicitly - assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), - optimize=do_opt) - assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], - out=np.arange(4).reshape(2, 2), optimize=do_opt) - with assert_raises_regex(ValueError, "'b'"): - # gh-11221 - 'c' erroneously appeared in the error message - a = np.ones((3, 3, 4, 5, 6)) - b = np.ones((3, 4, 5)) - np.einsum('aabcb,abc', a, b) - - def test_einsum_views(self): - # pass-through - for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) - - b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) - - b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) - - b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a) - - b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a) - - # output is writeable whenever input is writeable - b = np.einsum("...", a, optimize=do_opt) - assert_(b.flags['WRITEABLE']) - a.flags['WRITEABLE'] = False - b = np.einsum("...", a, optimize=do_opt) - assert_(not b.flags['WRITEABLE']) - - # transpose - a = np.arange(6) - a.shape = (2, 3) - - b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.T) - - b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.T) - - # diagonal - a = np.arange(9) - a.shape = (3, 3) - - b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i] for i in range(3)]) - - # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) - - b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) - - b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) - - b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(2, 0, 1)]) - - b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(2, 0, 1)]) - - b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) - - b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) - - b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) - - b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(1, 0, 2)]) - - b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(1, 0, 2)]) - - # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) - - b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i, i] for i in range(3)]) - - # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) - - b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0, 1)) - - b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0, 1)) - - def check_einsum_sums(self, dtype, do_opt=False): - # Check various sums. Does many sizes to exercise unrolled loops. - - # sum(a, axis=-1) - for n in range(1, 17): - a = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i->", a, optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [0], [], optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - - for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("...i->...", a, optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - - # sum(a, axis=0) - for n in range(1, 17): - a = np.arange(2*n, dtype=dtype).reshape(2, n) - assert_equal(np.einsum("i...->...", a, optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - - for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("i...->...", a, optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - - # trace(a) - for n in range(1, 17): - a = np.arange(n*n, dtype=dtype).reshape(n, n) - assert_equal(np.einsum("ii", a, optimize=do_opt), - np.trace(a).astype(dtype)) - assert_equal(np.einsum(a, [0, 0], optimize=do_opt), - np.trace(a).astype(dtype)) - - # multiply(a, b) - assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case - for n in range(1, 17): - a = np.arange(3 * n, dtype=dtype).reshape(3, n) - b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), - np.multiply(a, b)) - assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), - np.multiply(a, b)) - - # inner(a,b) - for n in range(1, 17): - a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) - assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), - np.inner(a, b)) - - for n in range(1, 11): - a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), - np.inner(a.T, b.T).T) - assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), - np.inner(a.T, b.T).T) - - # outer(a,b) - for n in range(1, 17): - a = np.arange(3, dtype=dtype)+1 - b = np.arange(n, dtype=dtype)+1 - assert_equal(np.einsum("i,j", a, b, optimize=do_opt), - np.outer(a, b)) - assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), - np.outer(a, b)) - - # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning) - - # matvec(a,b) / a.dot(b) where a is matrix, b is vector - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), - np.dot(a, b)) - assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), - np.dot(a, b)) - - c = np.arange(4, dtype=dtype) - np.einsum("ij,j", a, b, out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1], b, [1], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), - np.dot(b.T, a.T)) - assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), - np.dot(b.T, a.T)) - - c = np.arange(4, dtype=dtype) - np.einsum("ji,j", a.T, b.T, out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a.T, [1, 0], b.T, [1], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - - # matmat(a,b) / a.dot(b) where a is matrix, b is matrix - for n in range(1, 17): - if n < 8 or dtype != 'f2': - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) - assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), - np.dot(a, b)) - assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), - np.dot(a, b)) - - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) - c = np.arange(24, dtype=dtype).reshape(4, 6) - np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', - optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1], b, [1, 2], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - # matrix triple product (note this is not currently an efficient - # way to multiply 3 matrices) - a = np.arange(12, dtype=dtype).reshape(3, 4) - b = np.arange(20, dtype=dtype).reshape(4, 5) - c = np.arange(30, dtype=dtype).reshape(5, 6) - if dtype != 'f2': - assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), - a.dot(b).dot(c)) - assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], - optimize=do_opt), a.dot(b).dot(c)) - - d = np.arange(18, dtype=dtype).reshape(3, 6) - np.einsum("ij,jk,kl", a, b, c, out=d, - dtype='f8', casting='unsafe', optimize=do_opt) - tgt = a.astype('f8').dot(b.astype('f8')) - tgt = tgt.dot(c.astype('f8')).astype(dtype) - assert_equal(d, tgt) - - d[...] = 0 - np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, - dtype='f8', casting='unsafe', optimize=do_opt) - tgt = a.astype('f8').dot(b.astype('f8')) - tgt = tgt.dot(c.astype('f8')).astype(dtype) - assert_equal(d, tgt) - - # tensordot(a, b) - if np.dtype(dtype) != np.dtype('f2'): - a = np.arange(60, dtype=dtype).reshape(3, 4, 5) - b = np.arange(24, dtype=dtype).reshape(4, 3, 2) - assert_equal(np.einsum("ijk, jil -> kl", a, b), - np.tensordot(a, b, axes=([1, 0], [0, 1]))) - assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), - np.tensordot(a, b, axes=([1, 0], [0, 1]))) - - c = np.arange(10, dtype=dtype).reshape(5, 2) - np.einsum("ijk,jil->kl", a, b, out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1, 0], [0, 1])).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1, 0], [0, 1])).astype(dtype)) - - # logical_and(logical_and(a!=0, b!=0), c!=0) - a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) - b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) - c = np.array([True, True, False, True, True, False, True, True]) - assert_equal(np.einsum("i,i,i->i", a, b, c, - dtype='?', casting='unsafe', optimize=do_opt), - np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) - assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], - dtype='?', casting='unsafe'), - np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) - - a = np.arange(9, dtype=dtype) - assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) - assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) - assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) - assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) - - # Various stride0, contiguous, and SSE aligned variants - for n in range(1, 25): - a = np.arange(n, dtype=dtype) - if np.dtype(dtype).itemsize > 1: - assert_equal(np.einsum("...,...", a, a, optimize=do_opt), - np.multiply(a, a)) - assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) - assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) - assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) - assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) - assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) - - assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), - np.multiply(a[1:], a[:-1])) - assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), - np.dot(a[1:], a[:-1])) - assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) - assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) - assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), - 2*np.sum(a[1:])) - assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), - 2*np.sum(a[1:])) - - # An object array, summed as the data type - a = np.arange(9, dtype=object) - - b = np.einsum("i->", a, dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - # A case which was failing (ticket #1885) - p = np.arange(2) + 1 - q = np.arange(4).reshape(2, 2) + 3 - r = np.arange(4).reshape(2, 2) + 7 - assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) - - # singleton dimensions broadcast (gh-10343) - p = np.ones((10,2)) - q = np.ones((1,2)) - assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), - np.einsum('ij,ij->j', p, q, optimize=False)) - assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), - [10.] * 2) - - # a blas-compatible contraction broadcasting case which was failing - # for optimize=True (ticket #10930) - x = np.array([2., 3.]) - y = np.array([4.]) - assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) - assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) - - # all-ones array was bypassing bug (ticket #10930) - p = np.ones((1, 5)) / 2 - q = np.ones((5, 5)) / 2 - for optimize in (True, False): - assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, - optimize=optimize), - np.einsum("...ij,...jk->...ik", p, q, - optimize=optimize)) - assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, - optimize=optimize), - np.full((1, 5), 1.25)) - - # Cases which were failing (gh-10899) - x = np.eye(2, dtype=dtype) - y = np.ones(2, dtype=dtype) - assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), - [2.]) # contig_contig_outstride0_two - assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), - [2.]) # stride0_contig_outstride0_two - assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), - [2.]) # contig_stride0_outstride0_two - - def test_einsum_sums_int8(self): - self.check_einsum_sums('i1') - - def test_einsum_sums_uint8(self): - self.check_einsum_sums('u1') - - def test_einsum_sums_int16(self): - self.check_einsum_sums('i2') - - def test_einsum_sums_uint16(self): - self.check_einsum_sums('u2') - - def test_einsum_sums_int32(self): - self.check_einsum_sums('i4') - self.check_einsum_sums('i4', True) - - def test_einsum_sums_uint32(self): - self.check_einsum_sums('u4') - self.check_einsum_sums('u4', True) - - def test_einsum_sums_int64(self): - self.check_einsum_sums('i8') - - def test_einsum_sums_uint64(self): - self.check_einsum_sums('u8') - - def test_einsum_sums_float16(self): - self.check_einsum_sums('f2') - - def test_einsum_sums_float32(self): - self.check_einsum_sums('f4') - - def test_einsum_sums_float64(self): - self.check_einsum_sums('f8') - self.check_einsum_sums('f8', True) - - def test_einsum_sums_longdouble(self): - self.check_einsum_sums(np.longdouble) - - def test_einsum_sums_cfloat64(self): - self.check_einsum_sums('c8') - self.check_einsum_sums('c8', True) - - def test_einsum_sums_cfloat128(self): - self.check_einsum_sums('c16') - - def test_einsum_sums_clongdouble(self): - self.check_einsum_sums(np.clongdouble) - - def test_einsum_misc(self): - # This call used to crash because of a bug in - # PyArray_AssignZero - a = np.ones((1, 2)) - b = np.ones((2, 2, 1)) - assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) - assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) - - # Regression test for issue #10369 (test unicode inputs with Python 2) - assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]]) - assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) - assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20) - assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], - optimize=u'greedy'), 20) - - # The iterator had an issue with buffering this reduction - a = np.ones((5, 12, 4, 2, 3), np.int64) - b = np.ones((5, 12, 11), np.int64) - assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), - np.einsum('ijklm,ijn->', a, b)) - assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), - np.einsum('ijklm,ijn->', a, b, optimize=True)) - - # Issue #2027, was a problem in the contiguous 3-argument - # inner loop implementation - a = np.arange(1, 3) - b = np.arange(1, 5).reshape(2, 2) - c = np.arange(1, 9).reshape(4, 2) - assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), - [[[1, 3], [3, 9], [5, 15], [7, 21]], - [[8, 16], [16, 32], [24, 48], [32, 64]]]) - assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), - [[[1, 3], [3, 9], [5, 15], [7, 21]], - [[8, 16], [16, 32], [24, 48], [32, 64]]]) - - def test_subscript_range(self): - # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used - # when creating a subscript from arrays - a = np.ones((2, 3)) - b = np.ones((3, 4)) - np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) - np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) - np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) - assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) - assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) - - def test_einsum_broadcast(self): - # Issue #2455 change in handling ellipsis - # remove the 'middle broadcast' error - # only use the 'RIGHT' iteration in prepare_op_axes - # adds auto broadcast on left where it belongs - # broadcast on right has to be explicit - # We need to test the optimized parsing as well - - A = np.arange(2 * 3 * 4).reshape(2, 3, 4) - B = np.arange(3) - ref = np.einsum('ijk,j->ijk', A, B, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error - - A = np.arange(12).reshape((4, 3)) - B = np.arange(6).reshape((3, 2)) - ref = np.einsum('ik,kj->ij', A, B, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) - assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error - - dims = [2, 3, 4, 5] - a = np.arange(np.prod(dims)).reshape(dims) - v = np.arange(dims[2]) - ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) - assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error - assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) - - J, K, M = 160, 160, 120 - A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) - B = np.arange(J * K * M * 3).reshape(J, K, M, 3) - ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('...lmn,lmno->...o', A, B, - optimize=opt), ref) # used to raise error - - def test_einsum_fixedstridebug(self): - # Issue #4485 obscure einsum bug - # This case revealed a bug in nditer where it reported a stride - # as 'fixed' (0) when it was in fact not fixed during processing - # (0 or 4). The reason for the bug was that the check for a fixed - # stride was using the information from the 2D inner loop reuse - # to restrict the iteration dimensions it had to validate to be - # the same, but that 2D inner loop reuse logic is only triggered - # during the buffer copying step, and hence it was invalid to - # rely on those values. The fix is to check all the dimensions - # of the stride in question, which in the test case reveals that - # the stride is not fixed. - # - # NOTE: This test is triggered by the fact that the default buffersize, - # used by einsum, is 8192, and 3*2731 = 8193, is larger than that - # and results in a mismatch between the buffering and the - # striding for operand A. - A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) - B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) - es = np.einsum('cl, cpx->lpx', A, B) - tp = np.tensordot(A, B, axes=(0, 0)) - assert_equal(es, tp) - # The following is the original test case from the bug report, - # made repeatable by changing random arrays to aranges. - A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) - B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) - es = np.einsum('cl, cpxy->lpxy', A, B) - tp = np.tensordot(A, B, axes=(0, 0)) - assert_equal(es, tp) - - def test_einsum_fixed_collapsingbug(self): - # Issue #5147. - # The bug only occurred when output argument of einssum was used. - x = np.random.normal(0, 1, (5, 5, 5, 5)) - y1 = np.zeros((5, 5)) - np.einsum('aabb->ab', x, out=y1) - idx = np.arange(5) - y2 = x[idx[:, None], idx[:, None], idx, idx] - assert_equal(y1, y2) - - def test_einsum_failed_on_p9_and_s390x(self): - # Issues gh-14692 and gh-12689 - # Bug with signed vs unsigned char errored on power9 and s390x Linux - tensor = np.random.random_sample((10, 10, 10, 10)) - x = np.einsum('ijij->', tensor) - y = tensor.trace(axis1=0, axis2=2).trace() - assert_allclose(x, y) - - def test_einsum_all_contig_non_contig_output(self): - # Issue gh-5907, tests that the all contiguous special case - # actually checks the contiguity of the output - x = np.ones((5, 5)) - out = np.ones(10)[::2] - correct_base = np.ones(10) - correct_base[::2] = 5 - # Always worked (inner iteration is done with 0-stride): - np.einsum('mi,mi,mi->m', x, x, x, out=out) - assert_array_equal(out.base, correct_base) - # Example 1: - out = np.ones(10)[::2] - np.einsum('im,im,im->m', x, x, x, out=out) - assert_array_equal(out.base, correct_base) - # Example 2, buffering causes x to be contiguous but - # special cases do not catch the operation before: - out = np.ones((2, 2, 2))[..., 0] - correct_base = np.ones((2, 2, 2)) - correct_base[..., 0] = 2 - x = np.ones((2, 2), np.float32) - np.einsum('ij,jk->ik', x, x, out=out) - assert_array_equal(out.base, correct_base) - - def test_small_boolean_arrays(self): - # See gh-5946. - # Use array of True embedded in False. - a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] - a[...] = True - out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] - tgt = np.ones((2, 1, 1), dtype=np.bool_) - res = np.einsum('...ij,...jk->...ik', a, a, out=out) - assert_equal(res, tgt) - - def test_out_is_res(self): - a = np.arange(9).reshape(3, 3) - res = np.einsum('...ij,...jk->...ik', a, a, out=a) - assert res is a - - def optimize_compare(self, subscripts, operands=None): - # Tests all paths of the optimization function against - # conventional einsum - if operands is None: - args = [subscripts] - terms = subscripts.split('->')[0].split(',') - for term in terms: - dims = [global_size_dict[x] for x in term] - args.append(np.random.rand(*dims)) - else: - args = [subscripts] + operands - - noopt = np.einsum(*args, optimize=False) - opt = np.einsum(*args, optimize='greedy') - assert_almost_equal(opt, noopt) - opt = np.einsum(*args, optimize='optimal') - assert_almost_equal(opt, noopt) - - def test_hadamard_like_products(self): - # Hadamard outer products - self.optimize_compare('a,ab,abc->abc') - self.optimize_compare('a,b,ab->ab') - - def test_index_transformations(self): - # Simple index transformation cases - self.optimize_compare('ea,fb,gc,hd,abcd->efgh') - self.optimize_compare('ea,fb,abcd,gc,hd->efgh') - self.optimize_compare('abcd,ea,fb,gc,hd->efgh') - - def test_complex(self): - # Long test cases - self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') - self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') - self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') - self.optimize_compare('abhe,hidj,jgba,hiab,gab') - self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') - self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') - self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') - self.optimize_compare('bdhe,acad,hiab,agac,hibd') - - def test_collapse(self): - # Inner products - self.optimize_compare('ab,ab,c->') - self.optimize_compare('ab,ab,c->c') - self.optimize_compare('ab,ab,cd,cd->') - self.optimize_compare('ab,ab,cd,cd->ac') - self.optimize_compare('ab,ab,cd,cd->cd') - self.optimize_compare('ab,ab,cd,cd,ef,ef->') - - def test_expand(self): - # Outer products - self.optimize_compare('ab,cd,ef->abcdef') - self.optimize_compare('ab,cd,ef->acdf') - self.optimize_compare('ab,cd,de->abcde') - self.optimize_compare('ab,cd,de->be') - self.optimize_compare('ab,bcd,cd->abcd') - self.optimize_compare('ab,bcd,cd->abd') - - def test_edge_cases(self): - # Difficult edge cases for optimization - self.optimize_compare('eb,cb,fb->cef') - self.optimize_compare('dd,fb,be,cdb->cef') - self.optimize_compare('bca,cdb,dbf,afc->') - self.optimize_compare('dcc,fce,ea,dbf->ab') - self.optimize_compare('fdf,cdd,ccd,afe->ae') - self.optimize_compare('abcd,ad') - self.optimize_compare('ed,fcd,ff,bcf->be') - self.optimize_compare('baa,dcf,af,cde->be') - self.optimize_compare('bd,db,eac->ace') - self.optimize_compare('fff,fae,bef,def->abd') - self.optimize_compare('efc,dbc,acf,fd->abe') - self.optimize_compare('ba,ac,da->bcd') - - def test_inner_product(self): - # Inner products - self.optimize_compare('ab,ab') - self.optimize_compare('ab,ba') - self.optimize_compare('abc,abc') - self.optimize_compare('abc,bac') - self.optimize_compare('abc,cba') - - def test_random_cases(self): - # Randomly built test cases - self.optimize_compare('aab,fa,df,ecc->bde') - self.optimize_compare('ecb,fef,bad,ed->ac') - self.optimize_compare('bcf,bbb,fbf,fc->') - self.optimize_compare('bb,ff,be->e') - self.optimize_compare('bcb,bb,fc,fff->') - self.optimize_compare('fbb,dfd,fc,fc->') - self.optimize_compare('afd,ba,cc,dc->bf') - self.optimize_compare('adb,bc,fa,cfc->d') - self.optimize_compare('bbd,bda,fc,db->acf') - self.optimize_compare('dba,ead,cad->bce') - self.optimize_compare('aef,fbc,dca->bde') - - def test_combined_views_mapping(self): - # gh-10792 - a = np.arange(9).reshape(1, 1, 3, 1, 3) - b = np.einsum('bbcdc->d', a) - assert_equal(b, [12]) - - def test_broadcasting_dot_cases(self): - # Ensures broadcasting cases are not mistaken for GEMM - - a = np.random.rand(1, 5, 4) - b = np.random.rand(4, 6) - c = np.random.rand(5, 6) - d = np.random.rand(10) - - self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) - self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) - - e = np.random.rand(1, 1, 5, 4) - f = np.random.rand(7, 7) - self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) - self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) - - # Edge case found in gh-11308 - g = np.arange(64).reshape(2, 4, 8) - self.optimize_compare('obk,ijk->ioj', operands=[g, g]) - - -class TestEinsumPath(object): - def build_operands(self, string, size_dict=global_size_dict): - - # Builds views based off initial operands - operands = [string] - terms = string.split('->')[0].split(',') - for term in terms: - dims = [size_dict[x] for x in term] - operands.append(np.random.rand(*dims)) - - return operands - - def assert_path_equal(self, comp, benchmark): - # Checks if list of tuples are equivalent - ret = (len(comp) == len(benchmark)) - assert_(ret) - for pos in range(len(comp) - 1): - ret &= isinstance(comp[pos + 1], tuple) - ret &= (comp[pos + 1] == benchmark[pos + 1]) - assert_(ret) - - def test_memory_contraints(self): - # Ensure memory constraints are satisfied - - outer_test = self.build_operands('a,b,c->abc') - - path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) - - path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) - - long_test = self.build_operands('acdf,jbje,gihb,hfac') - path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) - - path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) - - def test_long_paths(self): - # Long complex cases - - # Long test 1 - long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') - path, path_str = np.einsum_path(*long_test1, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', - (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) - - path, path_str = np.einsum_path(*long_test1, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', - (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) - - # Long test 2 - long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') - path, path_str = np.einsum_path(*long_test2, optimize='greedy') - print(path) - self.assert_path_equal(path, ['einsum_path', - (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) - - path, path_str = np.einsum_path(*long_test2, optimize='optimal') - print(path) - self.assert_path_equal(path, ['einsum_path', - (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) - - def test_edge_paths(self): - # Difficult edge cases - - # Edge test1 - edge_test1 = self.build_operands('eb,cb,fb->cef') - path, path_str = np.einsum_path(*edge_test1, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test1, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) - - # Edge test2 - edge_test2 = self.build_operands('dd,fb,be,cdb->cef') - path, path_str = np.einsum_path(*edge_test2, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test2, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) - - # Edge test3 - edge_test3 = self.build_operands('bca,cdb,dbf,afc->') - path, path_str = np.einsum_path(*edge_test3, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test3, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) - - # Edge test4 - edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') - path, path_str = np.einsum_path(*edge_test4, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test4, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) - - # Edge test5 - edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', - size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) - path, path_str = np.einsum_path(*edge_test4, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) - - path, path_str = np.einsum_path(*edge_test4, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) - - def test_path_type_input(self): - # Test explicit path handeling - path_test = self.build_operands('dcc,fce,ea,dbf->ab') - - path, path_str = np.einsum_path(*path_test, optimize=False) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) - - path, path_str = np.einsum_path(*path_test, optimize=True) - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) - - exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] - path, path_str = np.einsum_path(*path_test, optimize=exp_path) - self.assert_path_equal(path, exp_path) - - # Double check einsum works on the input path - noopt = np.einsum(*path_test, optimize=False) - opt = np.einsum(*path_test, optimize=exp_path) - assert_almost_equal(noopt, opt) - - def test_spaces(self): - #gh-10794 - arr = np.array([[1]]) - for sp in itertools.product(['', ' '], repeat=4): - # no error for any spacing - np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) - -def test_overlap(): - a = np.arange(9, dtype=int).reshape(3, 3) - b = np.arange(9, dtype=int).reshape(3, 3) - d = np.dot(a, b) - # sanity check - c = np.einsum('ij,jk->ik', a, b) - assert_equal(c, d) - #gh-10080, out overlaps one of the operands - c = np.einsum('ij,jk->ik', a, b, out=b) - assert_equal(c, d) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_errstate.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_errstate.py deleted file mode 100644 index 0008c4c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_errstate.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -import numpy as np -from numpy.testing import assert_, assert_raises - - -class TestErrstate(object): - @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") - def test_invalid(self): - with np.errstate(all='raise', under='ignore'): - a = -np.arange(3) - # This should work - with np.errstate(invalid='ignore'): - np.sqrt(a) - # While this should fail! - with assert_raises(FloatingPointError): - np.sqrt(a) - - def test_divide(self): - with np.errstate(all='raise', under='ignore'): - a = -np.arange(3) - # This should work - with np.errstate(divide='ignore'): - a // 0 - # While this should fail! - with assert_raises(FloatingPointError): - a // 0 - - def test_errcall(self): - def foo(*args): - print(args) - - olderrcall = np.geterrcall() - with np.errstate(call=foo): - assert_(np.geterrcall() is foo, 'call is not foo') - with np.errstate(call=None): - assert_(np.geterrcall() is None, 'call is not None') - assert_(np.geterrcall() is olderrcall, 'call is not olderrcall') - - def test_errstate_decorator(self): - @np.errstate(all='ignore') - def foo(): - a = -np.arange(3) - a // 0 - - foo() diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_extint128.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_extint128.py deleted file mode 100644 index 7c454a6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_extint128.py +++ /dev/null @@ -1,221 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import itertools -import contextlib -import operator -import pytest - -import numpy as np -import numpy.core._multiarray_tests as mt - -from numpy.testing import assert_raises, assert_equal - - -INT64_MAX = np.iinfo(np.int64).max -INT64_MIN = np.iinfo(np.int64).min -INT64_MID = 2**32 - -# int128 is not two's complement, the sign bit is separate -INT128_MAX = 2**128 - 1 -INT128_MIN = -INT128_MAX -INT128_MID = 2**64 - -INT64_VALUES = ( - [INT64_MIN + j for j in range(20)] + - [INT64_MAX - j for j in range(20)] + - [INT64_MID + j for j in range(-20, 20)] + - [2*INT64_MID + j for j in range(-20, 20)] + - [INT64_MID//2 + j for j in range(-20, 20)] + - list(range(-70, 70)) -) - -INT128_VALUES = ( - [INT128_MIN + j for j in range(20)] + - [INT128_MAX - j for j in range(20)] + - [INT128_MID + j for j in range(-20, 20)] + - [2*INT128_MID + j for j in range(-20, 20)] + - [INT128_MID//2 + j for j in range(-20, 20)] + - list(range(-70, 70)) + - [False] # negative zero -) - -INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] - - -@contextlib.contextmanager -def exc_iter(*args): - """ - Iterate over Cartesian product of *args, and if an exception is raised, - add information of the current iterate. - """ - - value = [None] - - def iterate(): - for v in itertools.product(*args): - value[0] = v - yield v - - try: - yield iterate() - except Exception: - import traceback - msg = "At: %r\n%s" % (repr(value[0]), - traceback.format_exc()) - raise AssertionError(msg) - - -def test_safe_binop(): - # Test checked arithmetic routines - - ops = [ - (operator.add, 1), - (operator.sub, 2), - (operator.mul, 3) - ] - - with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: - for xop, a, b in it: - pyop, op = xop - c = pyop(a, b) - - if not (INT64_MIN <= c <= INT64_MAX): - assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) - else: - d = mt.extint_safe_binop(a, b, op) - if c != d: - # assert_equal is slow - assert_equal(d, c) - - -def test_to_128(): - with exc_iter(INT64_VALUES) as it: - for a, in it: - b = mt.extint_to_128(a) - if a != b: - assert_equal(b, a) - - -def test_to_64(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - if not (INT64_MIN <= a <= INT64_MAX): - assert_raises(OverflowError, mt.extint_to_64, a) - else: - b = mt.extint_to_64(a) - if a != b: - assert_equal(b, a) - - -def test_mul_64_64(): - with exc_iter(INT64_VALUES, INT64_VALUES) as it: - for a, b in it: - c = a * b - d = mt.extint_mul_64_64(a, b) - if c != d: - assert_equal(d, c) - - -def test_add_128(): - with exc_iter(INT128_VALUES, INT128_VALUES) as it: - for a, b in it: - c = a + b - if not (INT128_MIN <= c <= INT128_MAX): - assert_raises(OverflowError, mt.extint_add_128, a, b) - else: - d = mt.extint_add_128(a, b) - if c != d: - assert_equal(d, c) - - -def test_sub_128(): - with exc_iter(INT128_VALUES, INT128_VALUES) as it: - for a, b in it: - c = a - b - if not (INT128_MIN <= c <= INT128_MAX): - assert_raises(OverflowError, mt.extint_sub_128, a, b) - else: - d = mt.extint_sub_128(a, b) - if c != d: - assert_equal(d, c) - - -def test_neg_128(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - b = -a - c = mt.extint_neg_128(a) - if b != c: - assert_equal(c, b) - - -def test_shl_128(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - if a < 0: - b = -(((-a) << 1) & (2**128-1)) - else: - b = (a << 1) & (2**128-1) - c = mt.extint_shl_128(a) - if b != c: - assert_equal(c, b) - - -def test_shr_128(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - if a < 0: - b = -((-a) >> 1) - else: - b = a >> 1 - c = mt.extint_shr_128(a) - if b != c: - assert_equal(c, b) - - -def test_gt_128(): - with exc_iter(INT128_VALUES, INT128_VALUES) as it: - for a, b in it: - c = a > b - d = mt.extint_gt_128(a, b) - if c != d: - assert_equal(d, c) - - -@pytest.mark.slow -def test_divmod_128_64(): - with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: - for a, b in it: - if a >= 0: - c, cr = divmod(a, b) - else: - c, cr = divmod(-a, b) - c = -c - cr = -cr - - d, dr = mt.extint_divmod_128_64(a, b) - - if c != d or d != dr or b*d + dr != a: - assert_equal(d, c) - assert_equal(dr, cr) - assert_equal(b*d + dr, a) - - -def test_floordiv_128_64(): - with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: - for a, b in it: - c = a // b - d = mt.extint_floordiv_128_64(a, b) - - if c != d: - assert_equal(d, c) - - -def test_ceildiv_128_64(): - with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: - for a, b in it: - c = (a + b - 1) // b - d = mt.extint_ceildiv_128_64(a, b) - - if c != d: - assert_equal(d, c) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_function_base.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_function_base.py deleted file mode 100644 index c8a7cb6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_function_base.py +++ /dev/null @@ -1,373 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy import ( - logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, - ndarray, sqrt, nextafter, stack - ) -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, - suppress_warnings - ) - - -class PhysicalQuantity(float): - def __new__(cls, value): - return float.__new__(cls, value) - - def __add__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(x) + float(self)) - __radd__ = __add__ - - def __sub__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(self) - float(x)) - - def __rsub__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(x) - float(self)) - - def __mul__(self, x): - return PhysicalQuantity(float(x) * float(self)) - __rmul__ = __mul__ - - def __div__(self, x): - return PhysicalQuantity(float(self) / float(x)) - - def __rdiv__(self, x): - return PhysicalQuantity(float(x) / float(self)) - - -class PhysicalQuantity2(ndarray): - __array_priority__ = 10 - - -class TestLogspace(object): - - def test_basic(self): - y = logspace(0, 6) - assert_(len(y) == 50) - y = logspace(0, 6, num=100) - assert_(y[-1] == 10 ** 6) - y = logspace(0, 6, endpoint=False) - assert_(y[-1] < 10 ** 6) - y = logspace(0, 6, num=7) - assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) - - def test_start_stop_array(self): - start = array([0., 1.]) - stop = array([6., 7.]) - t1 = logspace(start, stop, 6) - t2 = stack([logspace(_start, _stop, 6) - for _start, _stop in zip(start, stop)], axis=1) - assert_equal(t1, t2) - t3 = logspace(start, stop[0], 6) - t4 = stack([logspace(_start, stop[0], 6) - for _start in start], axis=1) - assert_equal(t3, t4) - t5 = logspace(start, stop, 6, axis=-1) - assert_equal(t5, t2.T) - - def test_dtype(self): - y = logspace(0, 6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = logspace(0, 6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = logspace(0, 6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - def test_physical_quantities(self): - a = PhysicalQuantity(1.0) - b = PhysicalQuantity(5.0) - assert_equal(logspace(a, b), logspace(1.0, 5.0)) - - def test_subclass(self): - a = array(1).view(PhysicalQuantity2) - b = array(7).view(PhysicalQuantity2) - ls = logspace(a, b) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, logspace(1.0, 7.0)) - ls = logspace(a, b, 1) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, logspace(1.0, 7.0, 1)) - - -class TestGeomspace(object): - - def test_basic(self): - y = geomspace(1, 1e6) - assert_(len(y) == 50) - y = geomspace(1, 1e6, num=100) - assert_(y[-1] == 10 ** 6) - y = geomspace(1, 1e6, endpoint=False) - assert_(y[-1] < 10 ** 6) - y = geomspace(1, 1e6, num=7) - assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) - - y = geomspace(8, 2, num=3) - assert_allclose(y, [8, 4, 2]) - assert_array_equal(y.imag, 0) - - y = geomspace(-1, -100, num=3) - assert_array_equal(y, [-1, -10, -100]) - assert_array_equal(y.imag, 0) - - y = geomspace(-100, -1, num=3) - assert_array_equal(y, [-100, -10, -1]) - assert_array_equal(y.imag, 0) - - def test_complex(self): - # Purely imaginary - y = geomspace(1j, 16j, num=5) - assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) - assert_array_equal(y.real, 0) - - y = geomspace(-4j, -324j, num=5) - assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) - assert_array_equal(y.real, 0) - - y = geomspace(1+1j, 1000+1000j, num=4) - assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) - - y = geomspace(-1+1j, -1000+1000j, num=4) - assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) - - # Logarithmic spirals - y = geomspace(-1, 1, num=3, dtype=complex) - assert_allclose(y, [-1, 1j, +1]) - - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(0+3j, 3+0j, 3) - assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) - y = geomspace(-3+0j, 0-3j, 3) - assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(-2-3j, 5+7j, 7) - assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, - 2.08885354-4.34146838j, 4.58345529-3.16355218j, - 6.41401745-0.55233457j, 6.75707386+3.11795092j, - 5+7j]) - - # Type promotion should prevent the -5 from becoming a NaN - y = geomspace(3j, -5, 2) - assert_allclose(y, [3j, -5]) - y = geomspace(-5, 3j, 2) - assert_allclose(y, [-5, 3j]) - - def test_dtype(self): - y = geomspace(1, 1e6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = geomspace(1, 1e6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = geomspace(1, 1e6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - # Native types - y = geomspace(1, 1e6, dtype=float) - assert_equal(y.dtype, dtype('float_')) - y = geomspace(1, 1e6, dtype=complex) - assert_equal(y.dtype, dtype('complex')) - - def test_start_stop_array_scalar(self): - lim1 = array([120, 100], dtype="int8") - lim2 = array([-120, -100], dtype="int8") - lim3 = array([1200, 1000], dtype="uint16") - t1 = geomspace(lim1[0], lim1[1], 5) - t2 = geomspace(lim2[0], lim2[1], 5) - t3 = geomspace(lim3[0], lim3[1], 5) - t4 = geomspace(120.0, 100.0, 5) - t5 = geomspace(-120.0, -100.0, 5) - t6 = geomspace(1200.0, 1000.0, 5) - - # t3 uses float32, t6 uses float64 - assert_allclose(t1, t4, rtol=1e-2) - assert_allclose(t2, t5, rtol=1e-2) - assert_allclose(t3, t6, rtol=1e-5) - - def test_start_stop_array(self): - # Try to use all special cases. - start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) - stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) - t1 = geomspace(start, stop, 5) - t2 = stack([geomspace(_start, _stop, 5) - for _start, _stop in zip(start, stop)], axis=1) - assert_equal(t1, t2) - t3 = geomspace(start, stop[0], 5) - t4 = stack([geomspace(_start, stop[0], 5) - for _start in start], axis=1) - assert_equal(t3, t4) - t5 = geomspace(start, stop, 5, axis=-1) - assert_equal(t5, t2.T) - - def test_physical_quantities(self): - a = PhysicalQuantity(1.0) - b = PhysicalQuantity(5.0) - assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) - - def test_subclass(self): - a = array(1).view(PhysicalQuantity2) - b = array(7).view(PhysicalQuantity2) - gs = geomspace(a, b) - assert type(gs) is PhysicalQuantity2 - assert_equal(gs, geomspace(1.0, 7.0)) - gs = geomspace(a, b, 1) - assert type(gs) is PhysicalQuantity2 - assert_equal(gs, geomspace(1.0, 7.0, 1)) - - def test_bounds(self): - assert_raises(ValueError, geomspace, 0, 10) - assert_raises(ValueError, geomspace, 10, 0) - assert_raises(ValueError, geomspace, 0, 0) - - -class TestLinspace(object): - - def test_basic(self): - y = linspace(0, 10) - assert_(len(y) == 50) - y = linspace(2, 10, num=100) - assert_(y[-1] == 10) - y = linspace(2, 10, endpoint=False) - assert_(y[-1] < 10) - assert_raises(ValueError, linspace, 0, 10, num=-1) - - def test_corner(self): - y = list(linspace(0, 1, 1)) - assert_(y == [0.0], y) - assert_raises(TypeError, linspace, 0, 1, num=2.5) - - def test_type(self): - t1 = linspace(0, 1, 0).dtype - t2 = linspace(0, 1, 1).dtype - t3 = linspace(0, 1, 2).dtype - assert_equal(t1, t2) - assert_equal(t2, t3) - - def test_dtype(self): - y = linspace(0, 6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = linspace(0, 6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = linspace(0, 6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - def test_start_stop_array_scalar(self): - lim1 = array([-120, 100], dtype="int8") - lim2 = array([120, -100], dtype="int8") - lim3 = array([1200, 1000], dtype="uint16") - t1 = linspace(lim1[0], lim1[1], 5) - t2 = linspace(lim2[0], lim2[1], 5) - t3 = linspace(lim3[0], lim3[1], 5) - t4 = linspace(-120.0, 100.0, 5) - t5 = linspace(120.0, -100.0, 5) - t6 = linspace(1200.0, 1000.0, 5) - assert_equal(t1, t4) - assert_equal(t2, t5) - assert_equal(t3, t6) - - def test_start_stop_array(self): - start = array([-120, 120], dtype="int8") - stop = array([100, -100], dtype="int8") - t1 = linspace(start, stop, 5) - t2 = stack([linspace(_start, _stop, 5) - for _start, _stop in zip(start, stop)], axis=1) - assert_equal(t1, t2) - t3 = linspace(start, stop[0], 5) - t4 = stack([linspace(_start, stop[0], 5) - for _start in start], axis=1) - assert_equal(t3, t4) - t5 = linspace(start, stop, 5, axis=-1) - assert_equal(t5, t2.T) - - def test_complex(self): - lim1 = linspace(1 + 2j, 3 + 4j, 5) - t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) - lim2 = linspace(1j, 10, 5) - t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) - assert_equal(lim1, t1) - assert_equal(lim2, t2) - - def test_physical_quantities(self): - a = PhysicalQuantity(0.0) - b = PhysicalQuantity(1.0) - assert_equal(linspace(a, b), linspace(0.0, 1.0)) - - def test_subclass(self): - a = array(0).view(PhysicalQuantity2) - b = array(1).view(PhysicalQuantity2) - ls = linspace(a, b) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, linspace(0.0, 1.0)) - ls = linspace(a, b, 1) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, linspace(0.0, 1.0, 1)) - - def test_array_interface(self): - # Regression test for https://github.com/numpy/numpy/pull/6659 - # Ensure that start/stop can be objects that implement - # __array_interface__ and are convertible to numeric scalars - - class Arrayish(object): - """ - A generic object that supports the __array_interface__ and hence - can in principle be converted to a numeric scalar, but is not - otherwise recognized as numeric, but also happens to support - multiplication by floats. - - Data should be an object that implements the buffer interface, - and contains at least 4 bytes. - """ - - def __init__(self, data): - self._data = data - - @property - def __array_interface__(self): - return {'shape': (), 'typestr': ' 1) - assert_(info.minexp < -1) - assert_(info.maxexp > 1) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_half.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_half.py deleted file mode 100644 index 1e1e6d7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_half.py +++ /dev/null @@ -1,518 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -import numpy as np -from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal - - -def assert_raises_fpe(strmatch, callable, *args, **kwargs): - try: - callable(*args, **kwargs) - except FloatingPointError as exc: - assert_(str(exc).find(strmatch) >= 0, - "Did not raise floating point %s error" % strmatch) - else: - assert_(False, - "Did not raise floating point %s error" % strmatch) - -class TestHalf(object): - def setup(self): - # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) - - # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( - (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), - np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] - - def test_half_conversions(self): - """Checks that all 16-bit values survive conversion - to/from 32-bit and 64-bit float""" - # Because the underlying routines preserve the NaN bits, every - # value is preserved when converting to/from other floats. - - # Convert from float32 back to float16 - b = np.array(self.all_f32, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert from float64 back to float16 - b = np.array(self.all_f64, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert float16 to longdouble and back - # This doesn't necessarily preserve the extra NaN bits, - # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) - b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Check the range for which all integers can be represented - i_int = np.arange(-2048, 2049) - i_f16 = np.array(i_int, dtype=float16) - j = np.array(i_f16, dtype=int) - assert_equal(i_int, j) - - @pytest.mark.parametrize("offset", [None, "up", "down"]) - @pytest.mark.parametrize("shift", [None, "up", "down"]) - @pytest.mark.parametrize("float_t", [np.float32, np.float64]) - def test_half_conversion_rounding(self, float_t, shift, offset): - # Assumes that round to even is used during casting. - max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) - - # Test all (positive) finite numbers, denormals are most interesting - # however: - f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16) - f16s_float = f16s_patterns.view(np.float16).astype(float_t) - - # Shift the values by half a bit up or a down (or do not shift), - if shift == "up": - f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] - elif shift == "down": - f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] - else: - f16s_float = f16s_float[1:-1] - - # Increase the float by a minimal value: - if offset == "up": - f16s_float = np.nextafter(f16s_float, float_t(1e50)) - elif offset == "down": - f16s_float = np.nextafter(f16s_float, float_t(-1e50)) - - # Convert back to float16 and its bit pattern: - res_patterns = f16s_float.astype(np.float16).view(np.uint16) - - # The above calculations tries the original values, or the exact - # mid points between the float16 values. It then further offsets them - # by as little as possible. If no offset occurs, "round to even" - # logic will be necessary, an arbitrarily small offset should cause - # normal up/down rounding always. - - # Calculate the expected pattern: - cmp_patterns = f16s_patterns[1:-1].copy() - - if shift == "down" and offset != "up": - shift_pattern = -1 - elif shift == "up" and offset != "down": - shift_pattern = 1 - else: - # There cannot be a shift, either shift is None, so all rounding - # will go back to original, or shift is reduced by offset too much. - shift_pattern = 0 - - # If rounding occurs, is it normal rounding or round to even? - if offset is None: - # Round to even occurs, modify only non-even, cast to allow + (-1) - cmp_patterns[0::2].view(np.int16)[...] += shift_pattern - else: - cmp_patterns.view(np.int16)[...] += shift_pattern - - assert_equal(res_patterns, cmp_patterns) - - @pytest.mark.parametrize(["float_t", "uint_t", "bits"], - [(np.float32, np.uint32, 23), - (np.float64, np.uint64, 52)]) - def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): - # Test specifically that all bits are considered when deciding - # whether round to even should occur (i.e. no bits are lost at the - # end. Compare also gh-12721. The most bits can get lost for the - # smallest denormal: - smallest_value = np.uint16(1).view(np.float16).astype(float_t) - assert smallest_value == 2**-24 - - # Will be rounded to zero based on round to even rule: - rounded_to_zero = smallest_value / float_t(2) - assert rounded_to_zero.astype(np.float16) == 0 - - # The significand will be all 0 for the float_t, test that we do not - # lose the lower ones of these: - for i in range(bits): - # slightly increasing the value should make it round up: - larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) - larger_value = larger_pattern.view(float_t) - assert larger_value.astype(np.float16) == smallest_value - - def test_nans_infs(self): - with np.errstate(all='ignore'): - # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) - assert_equal(np.spacing(float16(65504)), np.inf) - - # Check comparisons of all values with NaN - nan = float16(np.nan) - - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) - - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) - - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) - - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) - - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) - - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) - - def test_half_values(self): - """Confirms a small number of known half values""" - a = np.array([1.0, -1.0, - 2.0, -2.0, - 0.0999755859375, 0.333251953125, # 1/10, 1/3 - 65504, -65504, # Maximum magnitude - 2.0**(-14), -2.0**(-14), # Minimum normal - 2.0**(-24), -2.0**(-24), # Minimum subnormal - 0, -1/1e1000, # Signed zeros - np.inf, -np.inf]) - b = np.array([0x3c00, 0xbc00, - 0x4000, 0xc000, - 0x2e66, 0x3555, - 0x7bff, 0xfbff, - 0x0400, 0x8400, - 0x0001, 0x8001, - 0x0000, 0x8000, - 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 - assert_equal(a, b) - - def test_half_rounding(self): - """Checks that rounding when converting to half is correct""" - a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal - 2.0**-25, # Underflows to zero (nearest even mode) - 2.0**-26, # Underflows to zero - 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) - 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) - 1.0+2.0**-12, # rounds to 1.0 - 65519, # rounds to 65504 - 65520], # rounds to inf - dtype=float64) - rounded = [2.0**-24, - 0.0, - 0.0, - 1.0+2.0**(-10), - 1.0, - 1.0, - 65504, - np.inf] - - # Check float64->float16 rounding - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - # Check float32->float16 rounding - a = np.array(a, dtype=float32) - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - def test_half_correctness(self): - """Take every finite float16, and check the casting functions with - a manual conversion.""" - - # Create an array of all finite float16s - a_bits = self.finite_f16.view(dtype=uint16) - - # Convert to 64-bit float manually - a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) - a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 - a_man = (a_bits & 0x03ff) * 2.0**(-10) - # Implicit bit of normalized floats - a_man[a_exp != -15] += 1 - # Denormalized exponent is -14 - a_exp[a_exp == -15] = -14 - - a_manual = a_sgn * a_man * 2.0**a_exp - - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] - if len(a32_fail) != 0: - bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, - "First non-equal is half value %x -> %g != %g" % - (self.finite_f16[bad_index], - self.finite_f32[bad_index], - a_manual[bad_index])) - - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] - if len(a64_fail) != 0: - bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, - "First non-equal is half value %x -> %g != %g" % - (self.finite_f16[bad_index], - self.finite_f64[bad_index], - a_manual[bad_index])) - - def test_half_ordering(self): - """Make sure comparisons are working right""" - - # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() - - # 32-bit float copy - b = np.array(a, dtype=float32) - - # Should sort the same - a.sort() - b.sort() - assert_equal(a, b) - - # Comparisons should work - assert_((a[:-1] <= a[1:]).all()) - assert_(not (a[:-1] > a[1:]).any()) - assert_((a[1:] >= a[:-1]).all()) - assert_(not (a[1:] < a[:-1]).any()) - # All != except for +/-0 - assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) - assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) - - def test_half_funcs(self): - """Test the various ArrFuncs""" - - # fill - assert_equal(np.arange(10, dtype=float16), - np.arange(10, dtype=float32)) - - # fillwithscalar - a = np.zeros((5,), dtype=float16) - a.fill(1) - assert_equal(a, np.ones((5,), dtype=float16)) - - # nonzero and copyswap - a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) - assert_equal(a.nonzero()[0], - [2, 5, 6]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], - [2, 5, 6]) - - # dot - a = np.arange(0, 10, 0.5, dtype=float16) - b = np.ones((20,), dtype=float16) - assert_equal(np.dot(a, b), - 95) - - # argmax - a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 4) - a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 5) - - # getitem - a = np.arange(10, dtype=float16) - for i in range(10): - assert_equal(a.item(i), i) - - def test_spacing_nextafter(self): - """Test np.spacing and np.nextafter""" - # All non-negative finite #'s - a = np.arange(0x7c00, dtype=uint16) - hinf = np.array((np.inf,), dtype=float16) - a_f16 = a.view(dtype=float16) - - assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) - - assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) - assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) - - # switch to negatives - a |= 0x8000 - - assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) - assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) - - assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) - assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) - - def test_half_ufuncs(self): - """Test the various ufuncs""" - - a = np.array([0, 1, 2, 4, 2], dtype=float16) - b = np.array([-2, 5, 1, 4, 3], dtype=float16) - c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) - - assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) - assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) - assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) - assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) - - assert_equal(np.equal(a, b), [False, False, False, True, False]) - assert_equal(np.not_equal(a, b), [True, True, True, False, True]) - assert_equal(np.less(a, b), [False, True, False, False, True]) - assert_equal(np.less_equal(a, b), [False, True, False, True, True]) - assert_equal(np.greater(a, b), [True, False, True, False, False]) - assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) - assert_equal(np.logical_and(a, b), [False, True, True, True, True]) - assert_equal(np.logical_or(a, b), [True, True, True, True, True]) - assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) - assert_equal(np.logical_not(a), [True, False, False, False, False]) - - assert_equal(np.isnan(c), [False, False, False, True, False]) - assert_equal(np.isinf(c), [False, False, True, False, False]) - assert_equal(np.isfinite(c), [True, True, False, False, True]) - assert_equal(np.signbit(b), [True, False, False, False, False]) - - assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) - - assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) - - x = np.maximum(b, c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [0, 5, 1, 0, 6]) - - assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) - - x = np.minimum(b, c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [-2, -1, -np.inf, 0, 3]) - - assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) - assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) - assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) - assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) - - assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) - assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) - assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) - assert_equal(np.square(b), [4, 25, 1, 16, 9]) - assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) - assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) - assert_equal(np.conjugate(b), b) - assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) - assert_equal(np.negative(b), [2, -5, -1, -4, -3]) - assert_equal(np.positive(b), b) - assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) - assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) - assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) - assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) - - def test_half_coercion(self): - """Test that half gets coerced properly with the other types""" - a16 = np.array((1,), dtype=float16) - a32 = np.array((1,), dtype=float32) - b16 = float16(1) - b32 = float32(1) - - assert_equal(np.power(a16, 2).dtype, float16) - assert_equal(np.power(a16, 2.0).dtype, float16) - assert_equal(np.power(a16, b16).dtype, float16) - assert_equal(np.power(a16, b32).dtype, float16) - assert_equal(np.power(a16, a16).dtype, float16) - assert_equal(np.power(a16, a32).dtype, float32) - - assert_equal(np.power(b16, 2).dtype, float64) - assert_equal(np.power(b16, 2.0).dtype, float64) - assert_equal(np.power(b16, b16).dtype, float16) - assert_equal(np.power(b16, b32).dtype, float32) - assert_equal(np.power(b16, a16).dtype, float16) - assert_equal(np.power(b16, a32).dtype, float32) - - assert_equal(np.power(a32, a16).dtype, float32) - assert_equal(np.power(a32, b16).dtype, float32) - assert_equal(np.power(b32, a16).dtype, float16) - assert_equal(np.power(b32, b16).dtype, float32) - - @pytest.mark.skipif(platform.machine() == "armv5tel", - reason="See gh-413.") - def test_half_fpe(self): - with np.errstate(all='raise'): - sx16 = np.array((1e-4,), dtype=float16) - bx16 = np.array((1e4,), dtype=float16) - sy16 = float16(1e-4) - by16 = float16(1e4) - - # Underflow errors - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14-2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-23), float16(4)) - - # Overflow errors - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) - assert_raises_fpe('overflow', lambda a, b:a+b, - float16(65504), float16(17)) - assert_raises_fpe('overflow', lambda a, b:a-b, - float16(-65504), float16(17)) - assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) - assert_raises_fpe('overflow', np.spacing, float16(65504)) - - # Invalid value errors - assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.nan)) - assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan)) - - # These should not raise - float16(65472)+float16(32) - float16(2**-13)/float16(2) - float16(2**-14)/float16(2**10) - np.spacing(float16(-65504)) - np.nextafter(float16(65504), float16(-np.inf)) - np.nextafter(float16(-65504), float16(np.inf)) - float16(2**-14)/float16(2**10) - float16(-2**-14)/float16(2**10) - float16(2**-14+2**-23)/float16(2) - float16(-2**-14-2**-23)/float16(2) - - def test_half_array_interface(self): - """Test that half is compatible with __array_interface__""" - class Dummy: - pass - - a = np.ones((1,), dtype=float16) - b = Dummy() - b.__array_interface__ = a.__array_interface__ - c = np.array(b) - assert_(c.dtype == float16) - assert_equal(a, c) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexerrors.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexerrors.py deleted file mode 100644 index 63b43c4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexerrors.py +++ /dev/null @@ -1,123 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_raises - -class TestIndexErrors(object): - '''Tests to exercise indexerrors not covered by other tests.''' - - def test_arraytypes_fasttake(self): - 'take from a 0-length dimension' - x = np.empty((2, 3, 0, 4)) - assert_raises(IndexError, x.take, [0], axis=2) - assert_raises(IndexError, x.take, [1], axis=2) - assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') - assert_raises(IndexError, x.take, [0], axis=2, mode='clip') - - def test_take_from_object(self): - # Check exception taking from object array - d = np.zeros(5, dtype=object) - assert_raises(IndexError, d.take, [6]) - - # Check exception taking from 0-d array - d = np.zeros((5, 0), dtype=object) - assert_raises(IndexError, d.take, [1], axis=1) - assert_raises(IndexError, d.take, [0], axis=1) - assert_raises(IndexError, d.take, [0]) - assert_raises(IndexError, d.take, [0], mode='wrap') - assert_raises(IndexError, d.take, [0], mode='clip') - - def test_multiindex_exceptions(self): - a = np.empty(5, dtype=object) - assert_raises(IndexError, a.item, 20) - a = np.empty((5, 0), dtype=object) - assert_raises(IndexError, a.item, (0, 0)) - - a = np.empty(5, dtype=object) - assert_raises(IndexError, a.itemset, 20, 0) - a = np.empty((5, 0), dtype=object) - assert_raises(IndexError, a.itemset, (0, 0), 0) - - def test_put_exceptions(self): - a = np.zeros((5, 5)) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5), dtype=object) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5, 0)) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5, 0), dtype=object) - assert_raises(IndexError, a.put, 100, 0) - - def test_iterators_exceptions(self): - "cases in iterators.c" - def assign(obj, ind, val): - obj[ind] = val - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a[0, 5, None, 2]) - assert_raises(IndexError, lambda: a[0, 5, 0, 2]) - assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) - assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) - - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a[0, 0, None, 2]) - assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[10]) - assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[10]) - assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[np.array(10)]) - assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[np.array(10)]) - assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[np.array([10])]) - assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[np.array([10])]) - assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) - - def test_mapping(self): - "cases from mapping.c" - - def assign(obj, ind, val): - obj[ind] = val - - a = np.zeros((0, 10)) - assert_raises(IndexError, lambda: a[12]) - - a = np.zeros((3, 5)) - assert_raises(IndexError, lambda: a[(10, 20)]) - assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) - a = np.zeros((3, 0)) - assert_raises(IndexError, lambda: a[(1, 0)]) - assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) - - a = np.zeros((10,)) - assert_raises(IndexError, lambda: assign(a, 10, 1)) - a = np.zeros((0,)) - assert_raises(IndexError, lambda: assign(a, 10, 1)) - - a = np.zeros((3, 5)) - assert_raises(IndexError, lambda: a[(1, [1, 20])]) - assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) - a = np.zeros((3, 0)) - assert_raises(IndexError, lambda: a[(1, [0, 1])]) - assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) - - def test_methods(self): - "cases from methods.c" - - a = np.zeros((3, 3)) - assert_raises(IndexError, lambda: a.item(100)) - assert_raises(IndexError, lambda: a.itemset(100, 1)) - a = np.zeros((0, 3)) - assert_raises(IndexError, lambda: a.item(100)) - assert_raises(IndexError, lambda: a.itemset(100, 1)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexing.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexing.py deleted file mode 100644 index 70a5a24..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexing.py +++ /dev/null @@ -1,1347 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -import functools -import operator -import pytest - -import numpy as np -from numpy.core._multiarray_tests import array_indexing -from itertools import product -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, assert_warns, - HAS_REFCOUNT, suppress_warnings, - ) - - -class TestIndexing(object): - def test_index_no_floats(self): - a = np.array([[[5]]]) - - assert_raises(IndexError, lambda: a[0.0]) - assert_raises(IndexError, lambda: a[0, 0.0]) - assert_raises(IndexError, lambda: a[0.0, 0]) - assert_raises(IndexError, lambda: a[0.0,:]) - assert_raises(IndexError, lambda: a[:, 0.0]) - assert_raises(IndexError, lambda: a[:, 0.0,:]) - assert_raises(IndexError, lambda: a[0.0,:,:]) - assert_raises(IndexError, lambda: a[0, 0, 0.0]) - assert_raises(IndexError, lambda: a[0.0, 0, 0]) - assert_raises(IndexError, lambda: a[0, 0.0, 0]) - assert_raises(IndexError, lambda: a[-1.4]) - assert_raises(IndexError, lambda: a[0, -1.4]) - assert_raises(IndexError, lambda: a[-1.4, 0]) - assert_raises(IndexError, lambda: a[-1.4,:]) - assert_raises(IndexError, lambda: a[:, -1.4]) - assert_raises(IndexError, lambda: a[:, -1.4,:]) - assert_raises(IndexError, lambda: a[-1.4,:,:]) - assert_raises(IndexError, lambda: a[0, 0, -1.4]) - assert_raises(IndexError, lambda: a[-1.4, 0, 0]) - assert_raises(IndexError, lambda: a[0, -1.4, 0]) - assert_raises(IndexError, lambda: a[0.0:, 0.0]) - assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) - - def test_slicing_no_floats(self): - a = np.array([[5]]) - - # start as float. - assert_raises(TypeError, lambda: a[0.0:]) - assert_raises(TypeError, lambda: a[0:, 0.0:2]) - assert_raises(TypeError, lambda: a[0.0::2, :0]) - assert_raises(TypeError, lambda: a[0.0:1:2,:]) - assert_raises(TypeError, lambda: a[:, 0.0:]) - # stop as float. - assert_raises(TypeError, lambda: a[:0.0]) - assert_raises(TypeError, lambda: a[:0, 1:2.0]) - assert_raises(TypeError, lambda: a[:0.0:2, :0]) - assert_raises(TypeError, lambda: a[:0.0,:]) - assert_raises(TypeError, lambda: a[:, 0:4.0:2]) - # step as float. - assert_raises(TypeError, lambda: a[::1.0]) - assert_raises(TypeError, lambda: a[0:, :2:2.0]) - assert_raises(TypeError, lambda: a[1::4.0, :0]) - assert_raises(TypeError, lambda: a[::5.0,:]) - assert_raises(TypeError, lambda: a[:, 0:4:2.0]) - # mixed. - assert_raises(TypeError, lambda: a[1.0:2:2.0]) - assert_raises(TypeError, lambda: a[1.0::2.0]) - assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) - assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) - assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) - assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) - # should still get the DeprecationWarning if step = 0. - assert_raises(TypeError, lambda: a[::0.0]) - - def test_index_no_array_to_index(self): - # No non-scalar arrays. - a = np.array([[[1]]]) - - assert_raises(TypeError, lambda: a[a:a:a]) - - def test_none_index(self): - # `None` index adds newaxis - a = np.array([1, 2, 3]) - assert_equal(a[None], a[np.newaxis]) - assert_equal(a[None].ndim, a.ndim + 1) - - def test_empty_tuple_index(self): - # Empty tuple index creates a view - a = np.array([1, 2, 3]) - assert_equal(a[()], a) - assert_(a[()].base is a) - a = np.array(0) - assert_(isinstance(a[()], np.int_)) - - def test_void_scalar_empty_tuple(self): - s = np.zeros((), dtype='V4') - assert_equal(s[()].dtype, s.dtype) - assert_equal(s[()], s) - assert_equal(type(s[...]), np.ndarray) - - def test_same_kind_index_casting(self): - # Indexes should be cast with same-kind and not safe, even if that - # is somewhat unsafe. So test various different code paths. - index = np.arange(5) - u_index = index.astype(np.uintp) - arr = np.arange(10) - - assert_array_equal(arr[index], arr[u_index]) - arr[u_index] = np.arange(5) - assert_array_equal(arr, np.arange(10)) - - arr = np.arange(10).reshape(5, 2) - assert_array_equal(arr[index], arr[u_index]) - - arr[u_index] = np.arange(5)[:,None] - assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) - - arr = np.arange(25).reshape(5, 5) - assert_array_equal(arr[u_index, u_index], arr[index, index]) - - def test_empty_fancy_index(self): - # Empty list index creates an empty array - # with the same dtype (but with weird shape) - a = np.array([1, 2, 3]) - assert_equal(a[[]], []) - assert_equal(a[[]].dtype, a.dtype) - - b = np.array([], dtype=np.intp) - assert_equal(a[[]], []) - assert_equal(a[[]].dtype, a.dtype) - - b = np.array([]) - assert_raises(IndexError, a.__getitem__, b) - - def test_ellipsis_index(self): - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - assert_(a[...] is not a) - assert_equal(a[...], a) - # `a[...]` was `a` in numpy <1.9. - assert_(a[...].base is a) - - # Slicing with ellipsis can skip an - # arbitrary number of dimensions - assert_equal(a[0, ...], a[0]) - assert_equal(a[0, ...], a[0,:]) - assert_equal(a[..., 0], a[:, 0]) - - # Slicing with ellipsis always results - # in an array, not a scalar - assert_equal(a[0, ..., 1], np.array(2)) - - # Assignment with `(Ellipsis,)` on 0-d arrays - b = np.array(1) - b[(Ellipsis,)] = 2 - assert_equal(b, 2) - - def test_single_int_index(self): - # Single integer index selects one row - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - - assert_equal(a[0], [1, 2, 3]) - assert_equal(a[-1], [7, 8, 9]) - - # Index out of bounds produces IndexError - assert_raises(IndexError, a.__getitem__, 1 << 30) - # Index overflow produces IndexError - assert_raises(IndexError, a.__getitem__, 1 << 64) - - def test_single_bool_index(self): - # Single boolean index - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - - assert_equal(a[np.array(True)], a[None]) - assert_equal(a[np.array(False)], a[None][0:0]) - - def test_boolean_shape_mismatch(self): - arr = np.ones((5, 4, 3)) - - index = np.array([True]) - assert_raises(IndexError, arr.__getitem__, index) - - index = np.array([False] * 6) - assert_raises(IndexError, arr.__getitem__, index) - - index = np.zeros((4, 4), dtype=bool) - assert_raises(IndexError, arr.__getitem__, index) - - assert_raises(IndexError, arr.__getitem__, (slice(None), index)) - - def test_boolean_indexing_onedim(self): - # Indexing a 2-dimensional array with - # boolean array of length one - a = np.array([[ 0., 0., 0.]]) - b = np.array([ True], dtype=bool) - assert_equal(a[b], a) - # boolean assignment - a[b] = 1. - assert_equal(a, [[1., 1., 1.]]) - - def test_boolean_assignment_value_mismatch(self): - # A boolean assignment should fail when the shape of the values - # cannot be broadcast to the subscription. (see also gh-3458) - a = np.arange(4) - - def f(a, v): - a[a > -1] = v - - assert_raises(ValueError, f, a, []) - assert_raises(ValueError, f, a, [1, 2, 3]) - assert_raises(ValueError, f, a[:1], [1, 2, 3]) - - def test_boolean_assignment_needs_api(self): - # See also gh-7666 - # This caused a segfault on Python 2 due to the GIL not being - # held when the iterator does not need it, but the transfer function - # does - arr = np.zeros(1000) - indx = np.zeros(1000, dtype=bool) - indx[:100] = True - arr[indx] = np.ones(100, dtype=object) - - expected = np.zeros(1000) - expected[:100] = 1 - assert_array_equal(arr, expected) - - def test_boolean_indexing_twodim(self): - # Indexing a 2-dimensional array with - # 2-dimensional boolean array - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - b = np.array([[ True, False, True], - [False, True, False], - [ True, False, True]]) - assert_equal(a[b], [1, 3, 5, 7, 9]) - assert_equal(a[b[1]], [[4, 5, 6]]) - assert_equal(a[b[0]], a[b[2]]) - - # boolean assignment - a[b] = 0 - assert_equal(a, [[0, 2, 0], - [4, 0, 6], - [0, 8, 0]]) - - def test_boolean_indexing_list(self): - # Regression test for #13715. It's a use-after-free bug which the - # test won't directly catch, but it will show up in valgrind. - a = np.array([1, 2, 3]) - b = [True, False, True] - # Two variants of the test because the first takes a fast path - assert_equal(a[b], [1, 3]) - assert_equal(a[None, b], [[1, 3]]) - - def test_reverse_strides_and_subspace_bufferinit(self): - # This tests that the strides are not reversed for simple and - # subspace fancy indexing. - a = np.ones(5) - b = np.zeros(5, dtype=np.intp)[::-1] - c = np.arange(5)[::-1] - - a[b] = c - # If the strides are not reversed, the 0 in the arange comes last. - assert_equal(a[0], 0) - - # This also tests that the subspace buffer is initialized: - a = np.ones((5, 2)) - c = np.arange(10).reshape(5, 2)[::-1] - a[b, :] = c - assert_equal(a[0], [0, 1]) - - def test_reversed_strides_result_allocation(self): - # Test a bug when calculating the output strides for a result array - # when the subspace size was 1 (and test other cases as well) - a = np.arange(10)[:, None] - i = np.arange(10)[::-1] - assert_array_equal(a[i], a[i.copy('C')]) - - a = np.arange(20).reshape(-1, 2) - - def test_uncontiguous_subspace_assignment(self): - # During development there was a bug activating a skip logic - # based on ndim instead of size. - a = np.full((3, 4, 2), -1) - b = np.full((3, 4, 2), -1) - - a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T - b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() - - assert_equal(a, b) - - def test_too_many_fancy_indices_special_case(self): - # Just documents behaviour, this is a small limitation. - a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS - assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) - - def test_scalar_array_bool(self): - # NumPy bools can be used as boolean index (python ones as of yet not) - a = np.array(1) - assert_equal(a[np.bool_(True)], a[np.array(True)]) - assert_equal(a[np.bool_(False)], a[np.array(False)]) - - # After deprecating bools as integers: - #a = np.array([0,1,2]) - #assert_equal(a[True, :], a[None, :]) - #assert_equal(a[:, True], a[:, None]) - # - #assert_(not np.may_share_memory(a, a[True, :])) - - def test_everything_returns_views(self): - # Before `...` would return a itself. - a = np.arange(5) - - assert_(a is not a[()]) - assert_(a is not a[...]) - assert_(a is not a[:]) - - def test_broaderrors_indexing(self): - a = np.zeros((5, 5)) - assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) - assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) - - def test_trivial_fancy_out_of_bounds(self): - a = np.zeros(5) - ind = np.ones(20, dtype=np.intp) - ind[-1] = 10 - assert_raises(IndexError, a.__getitem__, ind) - assert_raises(IndexError, a.__setitem__, ind, 0) - ind = np.ones(20, dtype=np.intp) - ind[0] = 11 - assert_raises(IndexError, a.__getitem__, ind) - assert_raises(IndexError, a.__setitem__, ind, 0) - - def test_trivial_fancy_not_possible(self): - # Test that the fast path for trivial assignment is not incorrectly - # used when the index is not contiguous or 1D, see also gh-11467. - a = np.arange(6) - idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] - assert_array_equal(a[idx], idx) - - # this case must not go into the fast path, note that idx is - # a non-contiuguous none 1D array here. - a[idx] = -1 - res = np.arange(6) - res[0] = -1 - res[3] = -1 - assert_array_equal(a, res) - - def test_nonbaseclass_values(self): - class SubClass(np.ndarray): - def __array_finalize__(self, old): - # Have array finalize do funny things - self.fill(99) - - a = np.zeros((5, 5)) - s = a.copy().view(type=SubClass) - s.fill(1) - - a[[0, 1, 2, 3, 4], :] = s - assert_((a == 1).all()) - - # Subspace is last, so transposing might want to finalize - a[:, [0, 1, 2, 3, 4]] = s - assert_((a == 1).all()) - - a.fill(0) - a[...] = s - assert_((a == 1).all()) - - def test_subclass_writeable(self): - d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], - dtype=[('target', 'S20'), ('V_mag', '>f4')]) - ind = np.array([False, True, True], dtype=bool) - assert_(d[ind].flags.writeable) - ind = np.array([0, 1]) - assert_(d[ind].flags.writeable) - assert_(d[...].flags.writeable) - assert_(d[0].flags.writeable) - - def test_memory_order(self): - # This is not necessary to preserve. Memory layouts for - # more complex indices are not as simple. - a = np.arange(10) - b = np.arange(10).reshape(5,2).T - assert_(a[b].flags.f_contiguous) - - # Takes a different implementation branch: - a = a.reshape(-1, 1) - assert_(a[b, 0].flags.f_contiguous) - - def test_scalar_return_type(self): - # Full scalar indices should return scalars and object - # arrays should not call PyArray_Return on their items - class Zero(object): - # The most basic valid indexing - def __index__(self): - return 0 - - z = Zero() - - class ArrayLike(object): - # Simple array, should behave like the array - def __array__(self): - return np.array(0) - - a = np.zeros(()) - assert_(isinstance(a[()], np.float_)) - a = np.zeros(1) - assert_(isinstance(a[z], np.float_)) - a = np.zeros((1, 1)) - assert_(isinstance(a[z, np.array(0)], np.float_)) - assert_(isinstance(a[z, ArrayLike()], np.float_)) - - # And object arrays do not call it too often: - b = np.array(0) - a = np.array(0, dtype=object) - a[()] = b - assert_(isinstance(a[()], np.ndarray)) - a = np.array([b, None]) - assert_(isinstance(a[z], np.ndarray)) - a = np.array([[b, None]]) - assert_(isinstance(a[z, np.array(0)], np.ndarray)) - assert_(isinstance(a[z, ArrayLike()], np.ndarray)) - - def test_small_regressions(self): - # Reference count of intp for index checks - a = np.array([0]) - if HAS_REFCOUNT: - refcount = sys.getrefcount(np.dtype(np.intp)) - # item setting always checks indices in separate function: - a[np.array([0], dtype=np.intp)] = 1 - a[np.array([0], dtype=np.uint8)] = 1 - assert_raises(IndexError, a.__setitem__, - np.array([1], dtype=np.intp), 1) - assert_raises(IndexError, a.__setitem__, - np.array([1], dtype=np.uint8), 1) - - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) - - def test_unaligned(self): - v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] - d = v.view(np.dtype("S8")) - # unaligned source - x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] - x = x.view(np.dtype("S8")) - x[...] = np.array("b" * 8, dtype="S") - b = np.arange(d.size) - #trivial - assert_equal(d[b], d) - d[b] = x - # nontrivial - # unaligned index array - b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] - b = b.view(np.intp)[:d.size] - b[...] = np.arange(d.size) - assert_equal(d[b.astype(np.int16)], d) - d[b.astype(np.int16)] = x - # boolean - d[b % 2 == 0] - d[b % 2 == 0] = x[::2] - - def test_tuple_subclass(self): - arr = np.ones((5, 5)) - - # A tuple subclass should also be an nd-index - class TupleSubclass(tuple): - pass - index = ([1], [1]) - index = TupleSubclass(index) - assert_(arr[index].shape == (1,)) - # Unlike the non nd-index: - assert_(arr[index,].shape != (1,)) - - def test_broken_sequence_not_nd_index(self): - # See gh-5063: - # If we have an object which claims to be a sequence, but fails - # on item getting, this should not be converted to an nd-index (tuple) - # If this object happens to be a valid index otherwise, it should work - # This object here is very dubious and probably bad though: - class SequenceLike(object): - def __index__(self): - return 0 - - def __len__(self): - return 1 - - def __getitem__(self, item): - raise IndexError('Not possible') - - arr = np.arange(10) - assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) - - # also test that field indexing does not segfault - # for a similar reason, by indexing a structured array - arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) - assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) - - def test_indexing_array_weird_strides(self): - # See also gh-6221 - # the shapes used here come from the issue and create the correct - # size for the iterator buffering size. - x = np.ones(10) - x2 = np.ones((10, 2)) - ind = np.arange(10)[:, None, None, None] - ind = np.broadcast_to(ind, (10, 55, 4, 4)) - - # single advanced index case - assert_array_equal(x[ind], x[ind.copy()]) - # higher dimensional advanced index - zind = np.zeros(4, dtype=np.intp) - assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) - - def test_indexing_array_negative_strides(self): - # From gh-8264, - # core dumps if negative strides are used in iteration - arro = np.zeros((4, 4)) - arr = arro[::-1, ::-1] - - slices = (slice(None), [0, 1, 2, 3]) - arr[slices] = 10 - assert_array_equal(arr, 10.) - -class TestFieldIndexing(object): - def test_scalar_return_type(self): - # Field access on an array should return an array, even if it - # is 0-d. - a = np.zeros((), [('a','f8')]) - assert_(isinstance(a['a'], np.ndarray)) - assert_(isinstance(a[['a']], np.ndarray)) - - -class TestBroadcastedAssignments(object): - def assign(self, a, ind, val): - a[ind] = val - return a - - def test_prepending_ones(self): - a = np.zeros((3, 2)) - - a[...] = np.ones((1, 3, 2)) - # Fancy with subspace with and without transpose - a[[0, 1, 2], :] = np.ones((1, 3, 2)) - a[:, [0, 1]] = np.ones((1, 3, 2)) - # Fancy without subspace (with broadcasting) - a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) - - def test_prepend_not_one(self): - assign = self.assign - s_ = np.s_ - a = np.zeros(5) - - # Too large and not only ones. - assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) - - def test_simple_broadcasting_errors(self): - assign = self.assign - s_ = np.s_ - a = np.zeros((5, 1)) - - assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) - assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) - assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) - assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) - assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) - - def test_index_is_larger(self): - # Simple case of fancy index broadcasting of the index. - a = np.zeros((5, 5)) - a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] - - assert_((a[:3, :3] == [2, 3, 4]).all()) - - def test_broadcast_subspace(self): - a = np.zeros((100, 100)) - v = np.arange(100)[:,None] - b = np.arange(100)[::-1] - a[b] = v - assert_((a[::-1] == v).all()) - - -class TestSubclasses(object): - def test_basic(self): - # Test that indexing in various ways produces SubClass instances, - # and that the base is set up correctly: the original subclass - # instance for views, and a new ndarray for advanced/boolean indexing - # where a copy was made (latter a regression test for gh-11983). - class SubClass(np.ndarray): - pass - - a = np.arange(5) - s = a.view(SubClass) - s_slice = s[:3] - assert_(type(s_slice) is SubClass) - assert_(s_slice.base is s) - assert_array_equal(s_slice, a[:3]) - - s_fancy = s[[0, 1, 2]] - assert_(type(s_fancy) is SubClass) - assert_(s_fancy.base is not s) - assert_(type(s_fancy.base) is np.ndarray) - assert_array_equal(s_fancy, a[[0, 1, 2]]) - assert_array_equal(s_fancy.base, a[[0, 1, 2]]) - - s_bool = s[s > 0] - assert_(type(s_bool) is SubClass) - assert_(s_bool.base is not s) - assert_(type(s_bool.base) is np.ndarray) - assert_array_equal(s_bool, a[a > 0]) - assert_array_equal(s_bool.base, a[a > 0]) - - def test_fancy_on_read_only(self): - # Test that fancy indexing on read-only SubClass does not make a - # read-only copy (gh-14132) - class SubClass(np.ndarray): - pass - - a = np.arange(5) - s = a.view(SubClass) - s.flags.writeable = False - s_fancy = s[[0, 1, 2]] - assert_(s_fancy.flags.writeable) - - - def test_finalize_gets_full_info(self): - # Array finalize should be called on the filled array. - class SubClass(np.ndarray): - def __array_finalize__(self, old): - self.finalize_status = np.array(self) - self.old = old - - s = np.arange(10).view(SubClass) - new_s = s[:3] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - new_s = s[[0,1,2,3]] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - new_s = s[s > 0] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_slice_decref_getsetslice(self): - # See gh-10066, a temporary slice object should be discarted. - # This test is only really interesting on Python 2 since - # it goes through `__set/getslice__` here and can probably be - # removed. Use 0:7 to make sure it is never None:7. - class KeepIndexObject(np.ndarray): - def __getitem__(self, indx): - self.indx = indx - if indx == slice(0, 7): - raise ValueError - - def __setitem__(self, indx, val): - self.indx = indx - if indx == slice(0, 4): - raise ValueError - - k = np.array([1]).view(KeepIndexObject) - k[0:5] - assert_equal(k.indx, slice(0, 5)) - assert_equal(sys.getrefcount(k.indx), 2) - try: - k[0:7] - raise AssertionError - except ValueError: - # The exception holds a reference to the slice so clear on Py2 - if hasattr(sys, 'exc_clear'): - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - sys.exc_clear() - assert_equal(k.indx, slice(0, 7)) - assert_equal(sys.getrefcount(k.indx), 2) - - k[0:3] = 6 - assert_equal(k.indx, slice(0, 3)) - assert_equal(sys.getrefcount(k.indx), 2) - try: - k[0:4] = 2 - raise AssertionError - except ValueError: - # The exception holds a reference to the slice so clear on Py2 - if hasattr(sys, 'exc_clear'): - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - sys.exc_clear() - assert_equal(k.indx, slice(0, 4)) - assert_equal(sys.getrefcount(k.indx), 2) - - -class TestFancyIndexingCast(object): - def test_boolean_index_cast_assign(self): - # Setup the boolean index and float arrays. - shape = (8, 63) - bool_index = np.zeros(shape).astype(bool) - bool_index[0, 1] = True - zero_array = np.zeros(shape) - - # Assigning float is fine. - zero_array[bool_index] = np.array([1]) - assert_equal(zero_array[0, 1], 1) - - # Fancy indexing works, although we get a cast warning. - assert_warns(np.ComplexWarning, - zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) - assert_equal(zero_array[0, 1], 2) # No complex part - - # Cast complex to float, throwing away the imaginary portion. - assert_warns(np.ComplexWarning, - zero_array.__setitem__, bool_index, np.array([1j])) - assert_equal(zero_array[0, 1], 0) - -class TestFancyIndexingEquivalence(object): - def test_object_assign(self): - # Check that the field and object special case using copyto is active. - # The right hand side cannot be converted to an array here. - a = np.arange(5, dtype=object) - b = a.copy() - a[:3] = [1, (1,2), 3] - b[[0, 1, 2]] = [1, (1,2), 3] - assert_array_equal(a, b) - - # test same for subspace fancy indexing - b = np.arange(5, dtype=object)[None, :] - b[[0], :3] = [[1, (1,2), 3]] - assert_array_equal(a, b[0]) - - # Check that swapping of axes works. - # There was a bug that made the later assignment throw a ValueError - # do to an incorrectly transposed temporary right hand side (gh-5714) - b = b.T - b[:3, [0]] = [[1], [(1,2)], [3]] - assert_array_equal(a, b[:, 0]) - - # Another test for the memory order of the subspace - arr = np.ones((3, 4, 5), dtype=object) - # Equivalent slicing assignment for comparison - cmp_arr = arr.copy() - cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] - arr[[0], ...] = [[[1], [2], [3], [4]]] - assert_array_equal(arr, cmp_arr) - arr = arr.copy('F') - arr[[0], ...] = [[[1], [2], [3], [4]]] - assert_array_equal(arr, cmp_arr) - - def test_cast_equivalence(self): - # Yes, normal slicing uses unsafe casting. - a = np.arange(5) - b = a.copy() - - a[:3] = np.array(['2', '-3', '-1']) - b[[0, 2, 1]] = np.array(['2', '-1', '-3']) - assert_array_equal(a, b) - - # test the same for subspace fancy indexing - b = np.arange(5)[None, :] - b[[0], :3] = np.array([['2', '-3', '-1']]) - assert_array_equal(a, b[0]) - - -class TestMultiIndexingAutomated(object): - """ - These tests use code to mimic the C-Code indexing for selection. - - NOTE: - - * This still lacks tests for complex item setting. - * If you change behavior of indexing, you might want to modify - these tests to try more combinations. - * Behavior was written to match numpy version 1.8. (though a - first version matched 1.7.) - * Only tuple indices are supported by the mimicking code. - (and tested as of writing this) - * Error types should match most of the time as long as there - is only one error. For multiple errors, what gets raised - will usually not be the same one. They are *not* tested. - - Update 2016-11-30: It is probably not worth maintaining this test - indefinitely and it can be dropped if maintenance becomes a burden. - - """ - - def setup(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, - 0, - # Boolean indices, up to 3-d for some special cases of eating up - # dimensions, also need to test all False - np.array([True, False, False]), - np.array([[True, False], [False, True]]), - np.array([[[False, False], [False, False]]]), - # Some slices: - slice(-5, 5, 2), - slice(1, 1, 100), - slice(4, -1, -2), - slice(None, None, -3), - # Some Fancy indexes: - np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast - np.array([0, 1, -2]), - np.array([[2], [0], [1]]), - np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), - np.array([2, -1], dtype=np.int8), - np.zeros([1]*31, dtype=int), # trigger too large array. - np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), - 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] - - def _get_multi_index(self, arr, indices): - """Mimic multi dimensional indexing. - - Parameters - ---------- - arr : ndarray - Array to be indexed. - indices : tuple of index objects - - Returns - ------- - out : ndarray - An array equivalent to the indexing operation (but always a copy). - `arr[indices]` should be identical. - no_copy : bool - Whether the indexing operation requires a copy. If this is `True`, - `np.may_share_memory(arr, arr[indices])` should be `True` (with - some exceptions for scalars and possibly 0-d arrays). - - Notes - ----- - While the function may mostly match the errors of normal indexing this - is generally not the case. - """ - in_indices = list(indices) - indices = [] - # if False, this is a fancy or boolean index - no_copy = True - # number of fancy/scalar indexes that are not consecutive - num_fancy = 0 - # number of dimensions indexed by a "fancy" index - fancy_dim = 0 - # NOTE: This is a funny twist (and probably OK to change). - # The boolean array has illegal indexes, but this is - # allowed if the broadcast fancy-indices are 0-sized. - # This variable is to catch that case. - error_unless_broadcast_to_empty = False - - # We need to handle Ellipsis and make arrays from indices, also - # check if this is fancy indexing (set no_copy). - ndim = 0 - ellipsis_pos = None # define here mostly to replace all but first. - for i, indx in enumerate(in_indices): - if indx is None: - continue - if isinstance(indx, np.ndarray) and indx.dtype == bool: - no_copy = False - if indx.ndim == 0: - raise IndexError - # boolean indices can have higher dimensions - ndim += indx.ndim - fancy_dim += indx.ndim - continue - if indx is Ellipsis: - if ellipsis_pos is None: - ellipsis_pos = i - continue # do not increment ndim counter - raise IndexError - if isinstance(indx, slice): - ndim += 1 - continue - if not isinstance(indx, np.ndarray): - # This could be open for changes in numpy. - # numpy should maybe raise an error if casting to intp - # is not safe. It rejects np.array([1., 2.]) but not - # [1., 2.] as index (same for ie. np.take). - # (Note the importance of empty lists if changing this here) - try: - indx = np.array(indx, dtype=np.intp) - except ValueError: - raise IndexError - in_indices[i] = indx - elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': - raise IndexError('arrays used as indices must be of ' - 'integer (or boolean) type') - if indx.ndim != 0: - no_copy = False - ndim += 1 - fancy_dim += 1 - - if arr.ndim - ndim < 0: - # we can't take more dimensions then we have, not even for 0-d - # arrays. since a[()] makes sense, but not a[(),]. We will - # raise an error later on, unless a broadcasting error occurs - # first. - raise IndexError - - if ndim == 0 and None not in in_indices: - # Well we have no indexes or one Ellipsis. This is legal. - return arr.copy(), no_copy - - if ellipsis_pos is not None: - in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * - (arr.ndim - ndim)) - - for ax, indx in enumerate(in_indices): - if isinstance(indx, slice): - # convert to an index array - indx = np.arange(*indx.indices(arr.shape[ax])) - indices.append(['s', indx]) - continue - elif indx is None: - # this is like taking a slice with one element from a new axis: - indices.append(['n', np.array([0], dtype=np.intp)]) - arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) - continue - if isinstance(indx, np.ndarray) and indx.dtype == bool: - if indx.shape != arr.shape[ax:ax+indx.ndim]: - raise IndexError - - try: - flat_indx = np.ravel_multi_index(np.nonzero(indx), - arr.shape[ax:ax+indx.ndim], mode='raise') - except Exception: - error_unless_broadcast_to_empty = True - # fill with 0s instead, and raise error later - flat_indx = np.array([0]*indx.sum(), dtype=np.intp) - # concatenate axis into a single one: - if indx.ndim != 0: - arr = arr.reshape((arr.shape[:ax] - + (np.prod(arr.shape[ax:ax+indx.ndim]),) - + arr.shape[ax+indx.ndim:])) - indx = flat_indx - else: - # This could be changed, a 0-d boolean index can - # make sense (even outside the 0-d indexed array case) - # Note that originally this is could be interpreted as - # integer in the full integer special case. - raise IndexError - else: - # If the index is a singleton, the bounds check is done - # before the broadcasting. This used to be different in <1.9 - if indx.ndim == 0: - if indx >= arr.shape[ax] or indx < -arr.shape[ax]: - raise IndexError - if indx.ndim == 0: - # The index is a scalar. This used to be two fold, but if - # fancy indexing was active, the check was done later, - # possibly after broadcasting it away (1.7. or earlier). - # Now it is always done. - if indx >= arr.shape[ax] or indx < - arr.shape[ax]: - raise IndexError - if (len(indices) > 0 and - indices[-1][0] == 'f' and - ax != ellipsis_pos): - # NOTE: There could still have been a 0-sized Ellipsis - # between them. Checked that with ellipsis_pos. - indices[-1].append(indx) - else: - # We have a fancy index that is not after an existing one. - # NOTE: A 0-d array triggers this as well, while one may - # expect it to not trigger it, since a scalar would not be - # considered fancy indexing. - num_fancy += 1 - indices.append(['f', indx]) - - if num_fancy > 1 and not no_copy: - # We have to flush the fancy indexes left - new_indices = indices[:] - axes = list(range(arr.ndim)) - fancy_axes = [] - new_indices.insert(0, ['f']) - ni = 0 - ai = 0 - for indx in indices: - ni += 1 - if indx[0] == 'f': - new_indices[0].extend(indx[1:]) - del new_indices[ni] - ni -= 1 - for ax in range(ai, ai + len(indx[1:])): - fancy_axes.append(ax) - axes.remove(ax) - ai += len(indx) - 1 # axis we are at - indices = new_indices - # and now we need to transpose arr: - arr = arr.transpose(*(fancy_axes + axes)) - - # We only have one 'f' index now and arr is transposed accordingly. - # Now handle newaxis by reshaping... - ax = 0 - for indx in indices: - if indx[0] == 'f': - if len(indx) == 1: - continue - # First of all, reshape arr to combine fancy axes into one: - orig_shape = arr.shape - orig_slice = orig_shape[ax:ax + len(indx[1:])] - arr = arr.reshape((arr.shape[:ax] - + (np.prod(orig_slice).astype(int),) - + arr.shape[ax + len(indx[1:]):])) - - # Check if broadcasting works - res = np.broadcast(*indx[1:]) - # unfortunately the indices might be out of bounds. So check - # that first, and use mode='wrap' then. However only if - # there are any indices... - if res.size != 0: - if error_unless_broadcast_to_empty: - raise IndexError - for _indx, _size in zip(indx[1:], orig_slice): - if _indx.size == 0: - continue - if np.any(_indx >= _size) or np.any(_indx < -_size): - raise IndexError - if len(indx[1:]) == len(orig_slice): - if np.product(orig_slice) == 0: - # Work around for a crash or IndexError with 'wrap' - # in some 0-sized cases. - try: - mi = np.ravel_multi_index(indx[1:], orig_slice, - mode='raise') - except Exception: - # This happens with 0-sized orig_slice (sometimes?) - # here it is a ValueError, but indexing gives a: - raise IndexError('invalid index into 0-sized') - else: - mi = np.ravel_multi_index(indx[1:], orig_slice, - mode='wrap') - else: - # Maybe never happens... - raise ValueError - arr = arr.take(mi.ravel(), axis=ax) - try: - arr = arr.reshape((arr.shape[:ax] - + mi.shape - + arr.shape[ax+1:])) - except ValueError: - # too many dimensions, probably - raise IndexError - ax += mi.ndim - continue - - # If we are here, we have a 1D array for take: - arr = arr.take(indx[1], axis=ax) - ax += 1 - - return arr, no_copy - - def _check_multi_index(self, arr, index): - """Check a multi index item getting and simple setting. - - Parameters - ---------- - arr : ndarray - Array to be indexed, must be a reshaped arange. - index : tuple of indexing objects - Index being tested. - """ - # Test item getting - try: - mimic_get, no_copy = self._get_multi_index(arr, index) - except Exception as e: - if HAS_REFCOUNT: - prev_refcount = sys.getrefcount(arr) - assert_raises(type(e), arr.__getitem__, index) - assert_raises(type(e), arr.__setitem__, index, 0) - if HAS_REFCOUNT: - assert_equal(prev_refcount, sys.getrefcount(arr)) - return - - self._compare_index_result(arr, index, mimic_get, no_copy) - - def _check_single_index(self, arr, index): - """Check a single index item getting and simple setting. - - Parameters - ---------- - arr : ndarray - Array to be indexed, must be an arange. - index : indexing object - Index being tested. Must be a single index and not a tuple - of indexing objects (see also `_check_multi_index`). - """ - try: - mimic_get, no_copy = self._get_multi_index(arr, (index,)) - except Exception as e: - if HAS_REFCOUNT: - prev_refcount = sys.getrefcount(arr) - assert_raises(type(e), arr.__getitem__, index) - assert_raises(type(e), arr.__setitem__, index, 0) - if HAS_REFCOUNT: - assert_equal(prev_refcount, sys.getrefcount(arr)) - return - - self._compare_index_result(arr, index, mimic_get, no_copy) - - def _compare_index_result(self, arr, index, mimic_get, no_copy): - """Compare mimicked result to indexing result. - """ - arr = arr.copy() - indexed_arr = arr[index] - assert_array_equal(indexed_arr, mimic_get) - # Check if we got a view, unless its a 0-sized or 0-d array. - # (then its not a view, and that does not matter) - if indexed_arr.size != 0 and indexed_arr.ndim != 0: - assert_(np.may_share_memory(indexed_arr, arr) == no_copy) - # Check reference count of the original array - if HAS_REFCOUNT: - if no_copy: - # refcount increases by one: - assert_equal(sys.getrefcount(arr), 3) - else: - assert_equal(sys.getrefcount(arr), 2) - - # Test non-broadcast setitem: - b = arr.copy() - b[index] = mimic_get + 1000 - if b.size == 0: - return # nothing to compare here... - if no_copy and indexed_arr.ndim != 0: - # change indexed_arr in-place to manipulate original: - indexed_arr += 1000 - assert_array_equal(arr, b) - return - # Use the fact that the array is originally an arange: - arr.flat[indexed_arr.ravel()] += 1000 - assert_array_equal(arr, b) - - def test_boolean(self): - a = np.array(5) - assert_equal(a[np.array(True)], 5) - a[np.array(True)] = 1 - assert_equal(a, 1) - # NOTE: This is different from normal broadcasting, as - # arr[boolean_array] works like in a multi index. Which means - # it is aligned to the left. This is probably correct for - # consistency with arr[boolean_array,] also no broadcasting - # is done at all - self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool),)) - self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) - self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) - - def test_multidim(self): - # Automatically test combinations with complex indexes on 2nd (or 1st) - # spot and the simple ones in one other spot. - with warnings.catch_warnings(): - # This is so that np.array(True) is not accepted in a full integer - # index, when running the file separately. - warnings.filterwarnings('error', '', DeprecationWarning) - warnings.filterwarnings('error', '', np.VisibleDeprecationWarning) - - def isskip(idx): - return isinstance(idx, str) and idx == "skip" - - for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices - for index in product(*tocheck): - index = tuple(i for i in index if not isskip(i)) - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) - - # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) - # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) - - def test_1d(self): - a = np.arange(10) - for index in self.complex_indices: - self._check_single_index(a, index) - -class TestFloatNonIntegerArgument(object): - """ - These test that ``TypeError`` is raised when you try to use - non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` - and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. - - """ - def test_valid_indexing(self): - # These should raise no errors. - a = np.array([[[5]]]) - - a[np.array([0])] - a[[0, 0]] - a[:, [0, 0]] - a[:, 0,:] - a[:,:,:] - - def test_valid_slicing(self): - # These should raise no errors. - a = np.array([[[5]]]) - - a[::] - a[0:] - a[:2] - a[0:2] - a[::2] - a[1::2] - a[:2:2] - a[1:2:2] - - def test_non_integer_argument_errors(self): - a = np.array([[5]]) - - assert_raises(TypeError, np.reshape, a, (1., 1., -1)) - assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) - assert_raises(TypeError, np.take, a, [0], 1.) - assert_raises(TypeError, np.take, a, [0], np.float64(1.)) - - def test_non_integer_sequence_multiplication(self): - # NumPy scalar sequence multiply should not work with non-integers - def mult(a, b): - return a * b - - assert_raises(TypeError, mult, [1], np.float_(3)) - # following should be OK - mult([1], np.int_(3)) - - def test_reduce_axis_float_index(self): - d = np.zeros((3,3,3)) - assert_raises(TypeError, np.min, d, 0.5) - assert_raises(TypeError, np.min, d, (0.5, 1)) - assert_raises(TypeError, np.min, d, (1, 2.2)) - assert_raises(TypeError, np.min, d, (.2, 1.2)) - - -class TestBooleanIndexing(object): - # Using a boolean as integer argument/indexing is an error. - def test_bool_as_int_argument_errors(self): - a = np.array([[[1]]]) - - assert_raises(TypeError, np.reshape, a, (True, -1)) - assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1)) - # Note that operator.index(np.array(True)) does not work, a boolean - # array is thus also deprecated, but not with the same message: - assert_raises(TypeError, operator.index, np.array(True)) - assert_warns(DeprecationWarning, operator.index, np.True_) - assert_raises(TypeError, np.take, args=(a, [0], False)) - - def test_boolean_indexing_weirdness(self): - # Weird boolean indexing things - a = np.ones((2, 3, 4)) - a[False, True, ...].shape == (0, 2, 3, 4) - a[True, [0, 1], True, True, [1], [[2]]] == (1, 2) - assert_raises(IndexError, lambda: a[False, [0, 1], ...]) - - -class TestArrayToIndexDeprecation(object): - """Creating an an index from array not 0-D is an error. - - """ - def test_array_to_index_error(self): - # so no exception is expected. The raising is effectively tested above. - a = np.array([[[1]]]) - - assert_raises(TypeError, operator.index, np.array([1])) - assert_raises(TypeError, np.reshape, a, (a, -1)) - assert_raises(TypeError, np.take, a, [0], a) - - -class TestNonIntegerArrayLike(object): - """Tests that array_likes only valid if can safely cast to integer. - - For instance, lists give IndexError when they cannot be safely cast to - an integer. - - """ - def test_basic(self): - a = np.arange(10) - - assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) - assert_raises(IndexError, a.__getitem__, (['1', '2'],)) - - # The following is valid - a.__getitem__([]) - - -class TestMultipleEllipsisError(object): - """An index can only have a single ellipsis. - - """ - def test_basic(self): - a = np.arange(10) - assert_raises(IndexError, lambda: a[..., ...]) - assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) - assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) - - -class TestCApiAccess(object): - def test_getitem(self): - subscript = functools.partial(array_indexing, 0) - - # 0-d arrays don't work: - assert_raises(IndexError, subscript, np.ones(()), 0) - # Out of bound values: - assert_raises(IndexError, subscript, np.ones(10), 11) - assert_raises(IndexError, subscript, np.ones(10), -11) - assert_raises(IndexError, subscript, np.ones((10, 10)), 11) - assert_raises(IndexError, subscript, np.ones((10, 10)), -11) - - a = np.arange(10) - assert_array_equal(a[4], subscript(a, 4)) - a = a.reshape(5, 2) - assert_array_equal(a[-4], subscript(a, -4)) - - def test_setitem(self): - assign = functools.partial(array_indexing, 1) - - # Deletion is impossible: - assert_raises(ValueError, assign, np.ones(10), 0) - # 0-d arrays don't work: - assert_raises(IndexError, assign, np.ones(()), 0, 0) - # Out of bound values: - assert_raises(IndexError, assign, np.ones(10), 11, 0) - assert_raises(IndexError, assign, np.ones(10), -11, 0) - assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) - assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) - - a = np.arange(10) - assign(a, 4, 10) - assert_(a[4] == 10) - - a = a.reshape(5, 2) - assign(a, 4, 10) - assert_array_equal(a[-1], [10, 10]) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_issue14735.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_issue14735.py deleted file mode 100644 index 6105c8e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_issue14735.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest -import warnings -import numpy as np - - -class Wrapper: - def __init__(self, array): - self.array = array - - def __len__(self): - return len(self.array) - - def __getitem__(self, item): - return type(self)(self.array[item]) - - def __getattr__(self, name): - if name.startswith("__array_"): - warnings.warn("object got converted", UserWarning, stacklevel=1) - - return getattr(self.array, name) - - def __repr__(self): - return "".format(self=self) - -@pytest.mark.filterwarnings("error") -def test_getattr_warning(): - array = Wrapper(np.arange(10)) - with pytest.raises(UserWarning, match="object got converted"): - np.asarray(array) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_item_selection.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_item_selection.py deleted file mode 100644 index 9bd2468..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_item_selection.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_array_equal, HAS_REFCOUNT - ) - - -class TestTake(object): - def test_simple(self): - a = [[1, 2], [3, 4]] - a_str = [[b'1', b'2'], [b'3', b'4']] - modes = ['raise', 'wrap', 'clip'] - indices = [-1, 4] - index_arrays = [np.empty(0, dtype=np.intp), - np.empty(tuple(), dtype=np.intp), - np.empty((1, 1), dtype=np.intp)] - real_indices = {'raise': {-1: 1, 4: IndexError}, - 'wrap': {-1: 1, 4: 0}, - 'clip': {-1: 0, 4: 1}} - # Currently all types but object, use the same function generation. - # So it should not be necessary to test all. However test also a non - # refcounted struct on top of object. - types = int, object, np.dtype([('', 'i', 2)]) - for t in types: - # ta works, even if the array may be odd if buffer interface is used - ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) - tresult = list(ta.T.copy()) - for index_array in index_arrays: - if index_array.size != 0: - tresult[0].shape = (2,) + index_array.shape - tresult[1].shape = (2,) + index_array.shape - for mode in modes: - for index in indices: - real_index = real_indices[mode][index] - if real_index is IndexError and index_array.size != 0: - index_array.put(0, index) - assert_raises(IndexError, ta.take, index_array, - mode=mode, axis=1) - elif index_array.size != 0: - index_array.put(0, index) - res = ta.take(index_array, mode=mode, axis=1) - assert_array_equal(res, tresult[real_index]) - else: - res = ta.take(index_array, mode=mode, axis=1) - assert_(res.shape == (2,) + index_array.shape) - - def test_refcounting(self): - objects = [object() for i in range(10)] - for mode in ('raise', 'clip', 'wrap'): - a = np.array(objects) - b = np.array([2, 2, 4, 5, 3, 5]) - a.take(b, out=a[:6], mode=mode) - del a - if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) - # not contiguous, example: - a = np.array(objects * 2)[::2] - a.take(b, out=a[:6], mode=mode) - del a - if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) - - def test_unicode_mode(self): - d = np.arange(10) - k = b'\xc3\xa4'.decode("UTF8") - assert_raises(ValueError, d.take, 5, mode=k) - - def test_empty_partition(self): - # In reference to github issue #6530 - a_original = np.array([0, 2, 4, 6, 8, 10]) - a = a_original.copy() - - # An empty partition should be a successful no-op - a.partition(np.array([], dtype=np.int16)) - - assert_array_equal(a, a_original) - - def test_empty_argpartition(self): - # In reference to github issue #6530 - a = np.array([0, 2, 4, 6, 8, 10]) - a = a.argpartition(np.array([], dtype=np.int16)) - - b = np.array([0, 1, 2, 3, 4, 5]) - assert_array_equal(a, b) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_longdouble.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_longdouble.py deleted file mode 100644 index 2b6e1c5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_longdouble.py +++ /dev/null @@ -1,357 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import pytest - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, - temppath, - ) -from numpy.core.tests._locales import CommaDecimalPointLocale - -LD_INFO = np.finfo(np.longdouble) -longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) - - -_o = 1 + LD_INFO.eps -string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o))) -del _o - - -def test_scalar_extraction(): - """Confirm that extracting a value doesn't convert to python float""" - o = 1 + LD_INFO.eps - a = np.array([o, o, o]) - assert_equal(a[1], o) - - -# Conversions string -> long double - -# 0.1 not exactly representable in base 2 floating point. -repr_precision = len(repr(np.longdouble(0.1))) -# +2 from macro block starting around line 842 in scalartypes.c.src. -@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, - reason="repr precision not enough to show eps") -def test_repr_roundtrip(): - # We will only see eps in repr if within printing precision. - o = 1 + LD_INFO.eps - assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o)) - - -def test_unicode(): - np.longdouble(u"1.2") - - -def test_string(): - np.longdouble("1.2") - - -def test_bytes(): - np.longdouble(b"1.2") - - -@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") -def test_repr_roundtrip_bytes(): - o = 1 + LD_INFO.eps - assert_equal(np.longdouble(repr(o).encode("ascii")), o) - - -def test_bogus_string(): - assert_raises(ValueError, np.longdouble, "spam") - assert_raises(ValueError, np.longdouble, "1.0 flub") - - -@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") -def test_fromstring(): - o = 1 + LD_INFO.eps - s = (" " + repr(o))*5 - a = np.array([o]*5) - assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, - err_msg="reading '%s'" % s) - - -def test_fromstring_complex(): - for ctype in ["complex", "cdouble", "cfloat"]: - # Check spacing between separator - assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), - np.array([1., 2., 3., 4.])) - # Real component not specified - assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), - np.array([1.j, -2.j, 3.j, 40.j])) - # Both components specified - assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), - np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) - # Spaces at wrong places - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), - np.array([1j])) - - -def test_fromstring_bogus(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), - np.array([1., 2., 3.])) - - -def test_fromstring_empty(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("xxxxx", sep="x"), - np.array([])) - - -def test_fromstring_missing(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), - np.array([1])) - - -class TestFileBased(object): - - ldbl = 1 + LD_INFO.eps - tgt = np.array([ldbl]*5) - out = ''.join([repr(t) + '\n' for t in tgt]) - - def test_fromfile_bogus(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write("1. 2. 3. flop 4.\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=float, sep=" ") - assert_equal(res, np.array([1., 2., 3.])) - - def test_fromfile_complex(self): - for ctype in ["complex", "cdouble", "cfloat"]: - # Check spacing between separator and only real component specified - with temppath() as path: - with open(path, 'wt') as f: - f.write("1, 2 , 3 ,4\n") - - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1., 2., 3., 4.])) - - # Real component not specified - with temppath() as path: - with open(path, 'wt') as f: - f.write("1j, -2j, 3j, 4e1j\n") - - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) - - # Both components specified - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") - - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+2 j,3\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+ 2j,3\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1 +2j,3\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+j\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1j+1\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.j])) - - - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_fromfile(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write(self.out) - res = np.fromfile(path, dtype=np.longdouble, sep="\n") - assert_equal(res, self.tgt) - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_genfromtxt(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write(self.out) - res = np.genfromtxt(path, dtype=np.longdouble) - assert_equal(res, self.tgt) - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_loadtxt(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write(self.out) - res = np.loadtxt(path, dtype=np.longdouble) - assert_equal(res, self.tgt) - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_tofile_roundtrip(self): - with temppath() as path: - self.tgt.tofile(path, sep=" ") - res = np.fromfile(path, dtype=np.longdouble, sep=" ") - assert_equal(res, self.tgt) - - -# Conversions long double -> string - - -def test_repr_exact(): - o = 1 + LD_INFO.eps - assert_(repr(o) != '1') - - -@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") -@pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") -def test_format(): - o = 1 + LD_INFO.eps - assert_("{0:.40g}".format(o) != '1') - - -@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") -@pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") -def test_percent(): - o = 1 + LD_INFO.eps - assert_("%.40g" % o != '1') - - -@pytest.mark.skipif(longdouble_longer_than_double, - reason="array repr problem") -@pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") -def test_array_repr(): - o = 1 + LD_INFO.eps - a = np.array([o]) - b = np.array([1], dtype=np.longdouble) - if not np.all(a != b): - raise ValueError("precision loss creating arrays") - assert_(repr(a) != repr(b)) - -# -# Locale tests: scalar types formatting should be independent of the locale -# - -class TestCommaDecimalPointLocale(CommaDecimalPointLocale): - - def test_repr_roundtrip_foreign(self): - o = 1.5 - assert_equal(o, np.longdouble(repr(o))) - - def test_fromstring_foreign_repr(self): - f = 1.234 - a = np.fromstring(repr(f), dtype=float, sep=" ") - assert_equal(a[0], f) - - def test_fromstring_best_effort_float(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=float, sep=" "), - np.array([1.])) - - def test_fromstring_best_effort(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), - np.array([1.])) - - def test_fromstring_foreign(self): - s = "1.234" - a = np.fromstring(s, dtype=np.longdouble, sep=" ") - assert_equal(a[0], np.longdouble(s)) - - def test_fromstring_foreign_sep(self): - a = np.array([1, 2, 3, 4]) - b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") - assert_array_equal(a, b) - - def test_fromstring_foreign_value(self): - with assert_warns(DeprecationWarning): - b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") - assert_array_equal(b[0], 1) - - -@pytest.mark.parametrize("int_val", [ - # cases discussed in gh-10723 - # and gh-9968 - 2 ** 1024, 0]) -def test_longdouble_from_int(int_val): - # for issue gh-9968 - str_val = str(int_val) - # we'll expect a RuntimeWarning on platforms - # with np.longdouble equivalent to np.double - # for large integer input - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - # can be inf==inf on some platforms - assert np.longdouble(int_val) == np.longdouble(str_val) - # we can't directly compare the int and - # max longdouble value on all platforms - if np.allclose(np.finfo(np.longdouble).max, - np.finfo(np.double).max) and w: - assert w[0].category is RuntimeWarning - -@pytest.mark.parametrize("bool_val", [ - True, False]) -def test_longdouble_from_bool(bool_val): - assert np.longdouble(bool_val) == np.longdouble(int(bool_val)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_machar.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_machar.py deleted file mode 100644 index ab8800c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_machar.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Test machar. Given recent changes to hardcode type data, we might want to get -rid of both MachAr and this test at some point. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.core.machar import MachAr -import numpy.core.numerictypes as ntypes -from numpy import errstate, array - - -class TestMachAr(object): - def _run_machar_highprec(self): - # Instantiate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - MachAr(lambda v:array([v], hiprec)) - except AttributeError: - # Fixme, this needs to raise a 'skip' exception. - "Skipping test: no ntypes.float96 available on this platform." - - def test_underlow(self): - # Regression test for #759: - # instantiating MachAr for dtype = np.float96 raises spurious warning. - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - msg = "Caught %s exception, should not have been raised." % e - raise AssertionError(msg) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_mem_overlap.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_mem_overlap.py deleted file mode 100644 index 3c8e0e7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_mem_overlap.py +++ /dev/null @@ -1,950 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import itertools -import pytest - -import numpy as np -from numpy.core._multiarray_tests import solve_diophantine, internal_overlap -from numpy.core import _umath_tests -from numpy.lib.stride_tricks import as_strided -from numpy.compat import long -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal - ) - -if sys.version_info[0] >= 3: - xrange = range - - -ndims = 2 -size = 10 -shape = tuple([size] * ndims) - -MAY_SHARE_BOUNDS = 0 -MAY_SHARE_EXACT = -1 - - -def _indices_for_nelems(nelems): - """Returns slices of length nelems, from start onwards, in direction sign.""" - - if nelems == 0: - return [size // 2] # int index - - res = [] - for step in (1, 2): - for sign in (-1, 1): - start = size // 2 - nelems * step * sign // 2 - stop = start + nelems * step * sign - res.append(slice(start, stop, step * sign)) - - return res - - -def _indices_for_axis(): - """Returns (src, dst) pairs of indices.""" - - res = [] - for nelems in (0, 2, 3): - ind = _indices_for_nelems(nelems) - - # no itertools.product available in Py2.4 - res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" - - return res - - -def _indices(ndims): - """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" - - ind = _indices_for_axis() - - # no itertools.product available in Py2.4 - - res = [[]] - for i in range(ndims): - newres = [] - for elem in ind: - for others in res: - newres.append([elem] + others) - res = newres - - return res - - -def _check_assignment(srcidx, dstidx): - """Check assignment arr[dstidx] = arr[srcidx] works.""" - - arr = np.arange(np.product(shape)).reshape(shape) - - cpy = arr.copy() - - cpy[dstidx] = arr[srcidx] - arr[dstidx] = arr[srcidx] - - assert_(np.all(arr == cpy), - 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) - - -def test_overlapping_assignments(): - # Test automatically generated assignments which overlap in memory. - - inds = _indices(ndims) - - for ind in inds: - srcidx = tuple([a[0] for a in ind]) - dstidx = tuple([a[1] for a in ind]) - - _check_assignment(srcidx, dstidx) - - -@pytest.mark.slow -def test_diophantine_fuzz(): - # Fuzz test the diophantine solver - rng = np.random.RandomState(1234) - - max_int = np.iinfo(np.intp).max - - for ndim in range(10): - feasible_count = 0 - infeasible_count = 0 - - min_count = 500//(ndim + 1) - - while min(feasible_count, infeasible_count) < min_count: - # Ensure big and small integer problems - A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 - U_max = rng.randint(0, 11, dtype=np.intp)**6 - - A_max = min(max_int, A_max) - U_max = min(max_int-1, U_max) - - A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) - for j in range(ndim)) - U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) - for j in range(ndim)) - - b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) - b = rng.randint(-1, b_ub+2, dtype=np.intp) - - if ndim == 0 and feasible_count < min_count: - b = 0 - - X = solve_diophantine(A, U, b) - - if X is None: - # Check the simplified decision problem agrees - X_simplified = solve_diophantine(A, U, b, simplify=1) - assert_(X_simplified is None, (A, U, b, X_simplified)) - - # Check no solution exists (provided the problem is - # small enough so that brute force checking doesn't - # take too long) - try: - ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U)) - except OverflowError: - # xrange on 32-bit Python 2 may overflow - continue - - size = 1 - for r in ranges: - size *= len(r) - if size < 100000: - assert_(not any(sum(w) == b for w in itertools.product(*ranges))) - infeasible_count += 1 - else: - # Check the simplified decision problem agrees - X_simplified = solve_diophantine(A, U, b, simplify=1) - assert_(X_simplified is not None, (A, U, b, X_simplified)) - - # Check validity - assert_(sum(a*x for a, x in zip(A, X)) == b) - assert_(all(0 <= x <= ub for x, ub in zip(X, U))) - feasible_count += 1 - - -def test_diophantine_overflow(): - # Smoke test integer overflow detection - max_intp = np.iinfo(np.intp).max - max_int64 = np.iinfo(np.int64).max - - if max_int64 <= max_intp: - # Check that the algorithm works internally in 128-bit; - # solving this problem requires large intermediate numbers - A = (max_int64//2, max_int64//2 - 10) - U = (max_int64//2, max_int64//2 - 10) - b = 2*(max_int64//2) - 10 - - assert_equal(solve_diophantine(A, U, b), (1, 1)) - - -def check_may_share_memory_exact(a, b): - got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) - - assert_equal(np.may_share_memory(a, b), - np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) - - a.fill(0) - b.fill(0) - a.fill(1) - exact = b.any() - - err_msg = "" - if got != exact: - err_msg = " " + "\n ".join([ - "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), - "shape_a = %r" % (a.shape,), - "shape_b = %r" % (b.shape,), - "strides_a = %r" % (a.strides,), - "strides_b = %r" % (b.strides,), - "size_a = %r" % (a.size,), - "size_b = %r" % (b.size,) - ]) - - assert_equal(got, exact, err_msg=err_msg) - - -def test_may_share_memory_manual(): - # Manual test cases for may_share_memory - - # Base arrays - xs0 = [ - np.zeros([13, 21, 23, 22], dtype=np.int8), - np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] - ] - - # Generate all negative stride combinations - xs = [] - for x in xs0: - for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): - xp = x[ss] - xs.append(xp) - - for x in xs: - # The default is a simple extent check - assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) - assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) - - # Exact checks - check_may_share_memory_exact(x[:,0,:], x[:,1,:]) - check_may_share_memory_exact(x[:,::7], x[:,3::3]) - - try: - xp = x.ravel() - if xp.flags.owndata: - continue - xp = xp.view(np.int16) - except ValueError: - continue - - # 0-size arrays cannot overlap - check_may_share_memory_exact(x.ravel()[6:6], - xp.reshape(13, 21, 23, 11)[:,::7]) - - # Test itemsize is dealt with - check_may_share_memory_exact(x[:,::7], - xp.reshape(13, 21, 23, 11)) - check_may_share_memory_exact(x[:,::7], - xp.reshape(13, 21, 23, 11)[:,3::3]) - check_may_share_memory_exact(x.ravel()[6:7], - xp.reshape(13, 21, 23, 11)[:,::7]) - - # Check unit size - x = np.zeros([1], dtype=np.int8) - check_may_share_memory_exact(x, x) - check_may_share_memory_exact(x, x.copy()) - - -def iter_random_view_pairs(x, same_steps=True, equal_size=False): - rng = np.random.RandomState(1234) - - if equal_size and same_steps: - raise ValueError() - - def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) - if rng.randint(0, 2, dtype=np.intp) == 0: - stop, start = start, stop - step *= -1 - return slice(start, stop, step) - - def random_slice_fixed_size(n, step, size): - start = rng.randint(0, n+1 - size*step) - stop = start + (size-1)*step + 1 - if rng.randint(0, 2) == 0: - stop, start = start-1, stop-1 - if stop < 0: - stop = None - step *= -1 - return slice(start, stop, step) - - # First a few regular views - yield x, x - for j in range(1, 7, 3): - yield x[j:], x[:-j] - yield x[...,j:], x[...,:-j] - - # An array with zero stride internal overlap - strides = list(x.strides) - strides[0] = 0 - xp = as_strided(x, shape=x.shape, strides=strides) - yield x, xp - yield xp, xp - - # An array with non-zero stride internal overlap - strides = list(x.strides) - if strides[0] > 1: - strides[0] = 1 - xp = as_strided(x, shape=x.shape, strides=strides) - yield x, xp - yield xp, xp - - # Then discontiguous views - while True: - steps = tuple(rng.randint(1, 11, dtype=np.intp) - if rng.randint(0, 5, dtype=np.intp) == 0 else 1 - for j in range(x.ndim)) - s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) - - t1 = np.arange(x.ndim) - rng.shuffle(t1) - - if equal_size: - t2 = t1 - else: - t2 = np.arange(x.ndim) - rng.shuffle(t2) - - a = x[s1] - - if equal_size: - if a.size == 0: - continue - - steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) - if rng.randint(0, 5) == 0 else 1 - for p, s, pa in zip(x.shape, s1, a.shape)) - s2 = tuple(random_slice_fixed_size(p, s, pa) - for p, s, pa in zip(x.shape, steps2, a.shape)) - elif same_steps: - steps2 = steps - else: - steps2 = tuple(rng.randint(1, 11, dtype=np.intp) - if rng.randint(0, 5, dtype=np.intp) == 0 else 1 - for j in range(x.ndim)) - - if not equal_size: - s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) - - a = a.transpose(t1) - b = x[s2].transpose(t2) - - yield a, b - - -def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): - # Check that overlap problems with common strides are solved with - # little work. - x = np.zeros([17,34,71,97], dtype=np.int16) - - feasible = 0 - infeasible = 0 - - pair_iter = iter_random_view_pairs(x, same_steps) - - while min(feasible, infeasible) < min_count: - a, b = next(pair_iter) - - bounds_overlap = np.may_share_memory(a, b) - may_share_answer = np.may_share_memory(a, b) - easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) - exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) - - if easy_answer != exact_answer: - # assert_equal is slow... - assert_equal(easy_answer, exact_answer) - - if may_share_answer != bounds_overlap: - assert_equal(may_share_answer, bounds_overlap) - - if bounds_overlap: - if exact_answer: - feasible += 1 - else: - infeasible += 1 - - -@pytest.mark.slow -def test_may_share_memory_easy_fuzz(): - # Check that overlap problems with common strides are always - # solved with little work. - - check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, - same_steps=True, - min_count=2000) - - -@pytest.mark.slow -def test_may_share_memory_harder_fuzz(): - # Overlap problems with not necessarily common strides take more - # work. - # - # The work bound below can't be reduced much. Harder problems can - # also exist but not be detected here, as the set of problems - # comes from RNG. - - check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, - same_steps=False, - min_count=2000) - - -def test_shares_memory_api(): - x = np.zeros([4, 5, 6], dtype=np.int8) - - assert_equal(np.shares_memory(x, x), True) - assert_equal(np.shares_memory(x, x.copy()), False) - - a = x[:,::2,::3] - b = x[:,::3,::2] - assert_equal(np.shares_memory(a, b), True) - assert_equal(np.shares_memory(a, b, max_work=None), True) - assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) - assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1)) - - -def test_may_share_memory_bad_max_work(): - x = np.zeros([1]) - assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) - assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) - - -def test_internal_overlap_diophantine(): - def check(A, U, exists=None): - X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) - - if exists is None: - exists = (X is not None) - - if X is not None: - assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) - assert_(all(0 <= x <= u for x, u in zip(X, U))) - assert_(any(x != u//2 for x, u in zip(X, U))) - - if exists: - assert_(X is not None, repr(X)) - else: - assert_(X is None, repr(X)) - - # Smoke tests - check((3, 2), (2*2, 3*2), exists=True) - check((3*2, 2), (15*2, (3-1)*2), exists=False) - - -def test_internal_overlap_slices(): - # Slicing an array never generates internal overlap - - x = np.zeros([17,34,71,97], dtype=np.int16) - - rng = np.random.RandomState(1234) - - def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) - if rng.randint(0, 2, dtype=np.intp) == 0: - stop, start = start, stop - step *= -1 - return slice(start, stop, step) - - cases = 0 - min_count = 5000 - - while cases < min_count: - steps = tuple(rng.randint(1, 11, dtype=np.intp) - if rng.randint(0, 5, dtype=np.intp) == 0 else 1 - for j in range(x.ndim)) - t1 = np.arange(x.ndim) - rng.shuffle(t1) - s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) - a = x[s1].transpose(t1) - - assert_(not internal_overlap(a)) - cases += 1 - - -def check_internal_overlap(a, manual_expected=None): - got = internal_overlap(a) - - # Brute-force check - m = set() - ranges = tuple(xrange(n) for n in a.shape) - for v in itertools.product(*ranges): - offset = sum(s*w for s, w in zip(a.strides, v)) - if offset in m: - expected = True - break - else: - m.add(offset) - else: - expected = False - - # Compare - if got != expected: - assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) - if manual_expected is not None and expected != manual_expected: - assert_equal(expected, manual_expected) - return got - - -def test_internal_overlap_manual(): - # Stride tricks can construct arrays with internal overlap - - # We don't care about memory bounds, the array is not - # read/write accessed - x = np.arange(1).astype(np.int8) - - # Check low-dimensional special cases - - check_internal_overlap(x, False) # 1-dim - check_internal_overlap(x.reshape([]), False) # 0-dim - - a = as_strided(x, strides=(3, 4), shape=(4, 4)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(3, 4), shape=(5, 4)) - check_internal_overlap(a, True) - - a = as_strided(x, strides=(0,), shape=(0,)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(0,), shape=(1,)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(0,), shape=(2,)) - check_internal_overlap(a, True) - - a = as_strided(x, strides=(0, -9993), shape=(87, 22)) - check_internal_overlap(a, True) - - a = as_strided(x, strides=(0, -9993), shape=(1, 22)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(0, -9993), shape=(0, 22)) - check_internal_overlap(a, False) - - -def test_internal_overlap_fuzz(): - # Fuzz check; the brute-force check is fairly slow - - x = np.arange(1).astype(np.int8) - - overlap = 0 - no_overlap = 0 - min_count = 100 - - rng = np.random.RandomState(1234) - - while min(overlap, no_overlap) < min_count: - ndim = rng.randint(1, 4, dtype=np.intp) - - strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) - for j in range(ndim)) - shape = tuple(rng.randint(1, 30, dtype=np.intp) - for j in range(ndim)) - - a = as_strided(x, strides=strides, shape=shape) - result = check_internal_overlap(a) - - if result: - overlap += 1 - else: - no_overlap += 1 - - -def test_non_ndarray_inputs(): - # Regression check for gh-5604 - - class MyArray(object): - def __init__(self, data): - self.data = data - - @property - def __array_interface__(self): - return self.data.__array_interface__ - - class MyArray2(object): - def __init__(self, data): - self.data = data - - def __array__(self): - return self.data - - for cls in [MyArray, MyArray2]: - x = np.arange(5) - - assert_(np.may_share_memory(cls(x[::2]), x[1::2])) - assert_(not np.shares_memory(cls(x[::2]), x[1::2])) - - assert_(np.shares_memory(cls(x[1::3]), x[::2])) - assert_(np.may_share_memory(cls(x[1::3]), x[::2])) - - -def view_element_first_byte(x): - """Construct an array viewing the first byte of each element of `x`""" - from numpy.lib.stride_tricks import DummyArray - interface = dict(x.__array_interface__) - interface['typestr'] = '|b1' - interface['descr'] = [('', '|b1')] - return np.asarray(DummyArray(interface, x)) - - -def assert_copy_equivalent(operation, args, out, **kwargs): - """ - Check that operation(*args, out=out) produces results - equivalent to out[...] = operation(*args, out=out.copy()) - """ - - kwargs['out'] = out - kwargs2 = dict(kwargs) - kwargs2['out'] = out.copy() - - out_orig = out.copy() - out[...] = operation(*args, **kwargs2) - expected = out.copy() - out[...] = out_orig - - got = operation(*args, **kwargs).copy() - - if (got != expected).any(): - assert_equal(got, expected) - - -class TestUFunc(object): - """ - Test ufunc call memory overlap handling - """ - - def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, - count=5000): - shapes = [7, 13, 8, 21, 29, 32] - - rng = np.random.RandomState(1234) - - for ndim in range(1, 6): - x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) - - it = iter_random_view_pairs(x, same_steps=False, equal_size=True) - - min_count = count // (ndim + 1)**2 - - overlapping = 0 - while overlapping < min_count: - a, b = next(it) - - a_orig = a.copy() - b_orig = b.copy() - - if get_out_axis_size is None: - assert_copy_equivalent(operation, [a], out=b) - - if np.shares_memory(a, b): - overlapping += 1 - else: - for axis in itertools.chain(range(ndim), [None]): - a[...] = a_orig - b[...] = b_orig - - # Determine size for reduction axis (None if scalar) - outsize, scalarize = get_out_axis_size(a, b, axis) - if outsize == 'skip': - continue - - # Slice b to get an output array of the correct size - sl = [slice(None)] * ndim - if axis is None: - if outsize is None: - sl = [slice(0, 1)] + [0]*(ndim - 1) - else: - sl = [slice(0, outsize)] + [0]*(ndim - 1) - else: - if outsize is None: - k = b.shape[axis]//2 - if ndim == 1: - sl[axis] = slice(k, k + 1) - else: - sl[axis] = k - else: - assert b.shape[axis] >= outsize - sl[axis] = slice(0, outsize) - b_out = b[tuple(sl)] - - if scalarize: - b_out = b_out.reshape([]) - - if np.shares_memory(a, b_out): - overlapping += 1 - - # Check result - assert_copy_equivalent(operation, [a], out=b_out, axis=axis) - - @pytest.mark.slow - def test_unary_ufunc_call_fuzz(self): - self.check_unary_fuzz(np.invert, None, np.int16) - - def test_binary_ufunc_accumulate_fuzz(self): - def get_out_axis_size(a, b, axis): - if axis is None: - if a.ndim == 1: - return a.size, False - else: - return 'skip', False # accumulate doesn't support this - else: - return a.shape[axis], False - - self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, - dtype=np.int16, count=500) - - def test_binary_ufunc_reduce_fuzz(self): - def get_out_axis_size(a, b, axis): - return None, (axis is None or a.ndim == 1) - - self.check_unary_fuzz(np.add.reduce, get_out_axis_size, - dtype=np.int16, count=500) - - def test_binary_ufunc_reduceat_fuzz(self): - def get_out_axis_size(a, b, axis): - if axis is None: - if a.ndim == 1: - return a.size, False - else: - return 'skip', False # reduceat doesn't support this - else: - return a.shape[axis], False - - def do_reduceat(a, out, axis): - if axis is None: - size = len(a) - step = size//len(out) - else: - size = a.shape[axis] - step = a.shape[axis] // out.shape[axis] - idx = np.arange(0, size, step) - return np.add.reduceat(a, idx, out=out, axis=axis) - - self.check_unary_fuzz(do_reduceat, get_out_axis_size, - dtype=np.int16, count=500) - - def test_binary_ufunc_reduceat_manual(self): - def check(ufunc, a, ind, out): - c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) - c2 = ufunc.reduceat(a, ind, out=out) - assert_array_equal(c1, c2) - - # Exactly same input/output arrays - a = np.arange(10000, dtype=np.int16) - check(np.add, a, a[::-1].copy(), a) - - # Overlap with index - a = np.arange(10000, dtype=np.int16) - check(np.add, a, a[::-1], a) - - def test_unary_gufunc_fuzz(self): - shapes = [7, 13, 8, 21, 29, 32] - gufunc = _umath_tests.euclidean_pdist - - rng = np.random.RandomState(1234) - - for ndim in range(2, 6): - x = rng.rand(*shapes[:ndim]) - - it = iter_random_view_pairs(x, same_steps=False, equal_size=True) - - min_count = 500 // (ndim + 1)**2 - - overlapping = 0 - while overlapping < min_count: - a, b = next(it) - - if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: - continue - - # Ensure the shapes are so that euclidean_pdist is happy - if b.shape[-1] > b.shape[-2]: - b = b[...,0,:] - else: - b = b[...,:,0] - - n = a.shape[-2] - p = n * (n - 1) // 2 - if p <= b.shape[-1] and p > 0: - b = b[...,:p] - else: - n = max(2, int(np.sqrt(b.shape[-1]))//2) - p = n * (n - 1) // 2 - a = a[...,:n,:] - b = b[...,:p] - - # Call - if np.shares_memory(a, b): - overlapping += 1 - - with np.errstate(over='ignore', invalid='ignore'): - assert_copy_equivalent(gufunc, [a], out=b) - - def test_ufunc_at_manual(self): - def check(ufunc, a, ind, b=None): - a0 = a.copy() - if b is None: - ufunc.at(a0, ind.copy()) - c1 = a0.copy() - ufunc.at(a, ind) - c2 = a.copy() - else: - ufunc.at(a0, ind.copy(), b.copy()) - c1 = a0.copy() - ufunc.at(a, ind, b) - c2 = a.copy() - assert_array_equal(c1, c2) - - # Overlap with index - a = np.arange(10000, dtype=np.int16) - check(np.invert, a[::-1], a) - - # Overlap with second data array - a = np.arange(100, dtype=np.int16) - ind = np.arange(0, 100, 2, dtype=np.int16) - check(np.add, a, ind, a[25:75]) - - def test_unary_ufunc_1d_manual(self): - # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE - - def check(a, b): - a_orig = a.copy() - b_orig = b.copy() - - b0 = b.copy() - c1 = ufunc(a, out=b0) - c2 = ufunc(a, out=b) - assert_array_equal(c1, c2) - - # Trigger "fancy ufunc loop" code path - mask = view_element_first_byte(b).view(np.bool_) - - a[...] = a_orig - b[...] = b_orig - c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() - - a[...] = a_orig - b[...] = b_orig - c2 = ufunc(a, out=b, where=mask.copy()).copy() - - # Also, mask overlapping with output - a[...] = a_orig - b[...] = b_orig - c3 = ufunc(a, out=b, where=mask).copy() - - assert_array_equal(c1, c2) - assert_array_equal(c1, c3) - - dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, - np.float64, np.complex64, np.complex128] - dtypes = [np.dtype(x) for x in dtypes] - - for dtype in dtypes: - if np.issubdtype(dtype, np.integer): - ufunc = np.invert - else: - ufunc = np.reciprocal - - n = 1000 - k = 10 - indices = [ - np.index_exp[:n], - np.index_exp[k:k+n], - np.index_exp[n-1::-1], - np.index_exp[k+n-1:k-1:-1], - np.index_exp[:2*n:2], - np.index_exp[k:k+2*n:2], - np.index_exp[2*n-1::-2], - np.index_exp[k+2*n-1:k-1:-2], - ] - - for xi, yi in itertools.product(indices, indices): - v = np.arange(1, 1 + n*2 + k, dtype=dtype) - x = v[xi] - y = v[yi] - - with np.errstate(all='ignore'): - check(x, y) - - # Scalar cases - check(x[:1], y) - check(x[-1:], y) - check(x[:1].reshape([]), y) - check(x[-1:].reshape([]), y) - - def test_unary_ufunc_where_same(self): - # Check behavior at wheremask overlap - ufunc = np.invert - - def check(a, out, mask): - c1 = ufunc(a, out=out.copy(), where=mask.copy()) - c2 = ufunc(a, out=out, where=mask) - assert_array_equal(c1, c2) - - # Check behavior with same input and output arrays - x = np.arange(100).astype(np.bool_) - check(x, x, x) - check(x, x.copy(), x) - check(x, x, x.copy()) - - @pytest.mark.slow - def test_binary_ufunc_1d_manual(self): - ufunc = np.add - - def check(a, b, c): - c0 = c.copy() - c1 = ufunc(a, b, out=c0) - c2 = ufunc(a, b, out=c) - assert_array_equal(c1, c2) - - for dtype in [np.int8, np.int16, np.int32, np.int64, - np.float32, np.float64, np.complex64, np.complex128]: - # Check different data dependency orders - - n = 1000 - k = 10 - - indices = [] - for p in [1, 2]: - indices.extend([ - np.index_exp[:p*n:p], - np.index_exp[k:k+p*n:p], - np.index_exp[p*n-1::-p], - np.index_exp[k+p*n-1:k-1:-p], - ]) - - for x, y, z in itertools.product(indices, indices, indices): - v = np.arange(6*n).astype(dtype) - x = v[x] - y = v[y] - z = v[z] - - check(x, y, z) - - # Scalar cases - check(x[:1], y, z) - check(x[-1:], y, z) - check(x[:1].reshape([]), y, z) - check(x[-1:].reshape([]), y, z) - check(x, y[:1], z) - check(x, y[-1:], z) - check(x, y[:1].reshape([]), z) - check(x, y[-1:].reshape([]), z) - - def test_inplace_op_simple_manual(self): - rng = np.random.RandomState(1234) - x = rng.rand(200, 200) # bigger than bufsize - - x += x.T - assert_array_equal(x - x.T, 0) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_memmap.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_memmap.py deleted file mode 100644 index d2ae564..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_memmap.py +++ /dev/null @@ -1,216 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import os -import shutil -import mmap -import pytest -from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp - -from numpy import ( - memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) -from numpy.compat import Path - -from numpy import arange, allclose, asarray -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, suppress_warnings - ) - -class TestMemmap(object): - def setup(self): - self.tmpfp = NamedTemporaryFile(prefix='mmap') - self.tempdir = mkdtemp() - self.shape = (3, 4) - self.dtype = 'float32' - self.data = arange(12, dtype=self.dtype) - self.data.resize(self.shape) - - def teardown(self): - self.tmpfp.close() - shutil.rmtree(self.tempdir) - - def test_roundtrip(self): - # Write data to file - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp # Test __del__ machinery, which handles cleanup - - # Read data back from file - newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', - shape=self.shape) - assert_(allclose(self.data, newfp)) - assert_array_equal(self.data, newfp) - assert_equal(newfp.flags.writeable, False) - - def test_open_with_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp - - def test_unnamed_file(self): - with TemporaryFile() as f: - fp = memmap(f, dtype=self.dtype, shape=self.shape) - del fp - - def test_attributes(self): - offset = 1 - mode = "w+" - fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, - shape=self.shape, offset=offset) - assert_equal(offset, fp.offset) - assert_equal(mode, fp.mode) - del fp - - def test_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - abspath = os.path.abspath(tmpname) - fp[:] = self.data[:] - assert_equal(abspath, fp.filename) - b = fp[:1] - assert_equal(abspath, b.filename) - del b - del fp - - @pytest.mark.skipif(Path is None, reason="No pathlib.Path") - def test_path(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', - shape=self.shape) - # os.path.realpath does not resolve symlinks on Windows - # see: https://bugs.python.org/issue9949 - # use Path.resolve, just as memmap class does internally - abspath = str(Path(tmpname).resolve()) - fp[:] = self.data[:] - assert_equal(abspath, str(fp.filename.resolve())) - b = fp[:1] - assert_equal(abspath, str(b.filename.resolve())) - del b - del fp - - def test_filename_fileobj(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", - shape=self.shape) - assert_equal(fp.filename, self.tmpfp.name) - - @pytest.mark.skipif(sys.platform == 'gnu0', - reason="Known to fail on hurd") - def test_flush(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - assert_equal(fp[0], self.data[0]) - fp.flush() - - def test_del(self): - # Make sure a view does not delete the underlying mmap - fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp_base[0] = 5 - fp_view = fp_base[0:1] - assert_equal(fp_view[0], 5) - del fp_view - # Should still be able to access and assign values after - # deleting the view - assert_equal(fp_base[0], 5) - fp_base[0] = 6 - assert_equal(fp_base[0], 6) - - def test_arithmetic_drops_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - tmp = (fp + 10) - if isinstance(tmp, memmap): - assert_(tmp._mmap is not fp._mmap) - - def test_indexing_drops_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - tmp = fp[(1, 2), (2, 3)] - if isinstance(tmp, memmap): - assert_(tmp._mmap is not fp._mmap) - - def test_slicing_keeps_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - assert_(fp[:2, :2]._mmap is fp._mmap) - - def test_view(self): - fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) - new1 = fp.view() - new2 = new1.view() - assert_(new1.base is fp) - assert_(new2.base is fp) - new_array = asarray(fp) - assert_(new_array.base is fp) - - def test_ufunc_return_ndarray(self): - fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) - fp[:] = self.data - - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") - for unary_op in [sum, average, product]: - result = unary_op(fp) - assert_(isscalar(result)) - assert_(result.__class__ is self.data[0, 0].__class__) - - assert_(unary_op(fp, axis=0).__class__ is ndarray) - assert_(unary_op(fp, axis=1).__class__ is ndarray) - - for binary_op in [add, subtract, multiply]: - assert_(binary_op(fp, self.data).__class__ is ndarray) - assert_(binary_op(self.data, fp).__class__ is ndarray) - assert_(binary_op(fp, fp).__class__ is ndarray) - - fp += 1 - assert(fp.__class__ is memmap) - add(fp, 1, out=fp) - assert(fp.__class__ is memmap) - - def test_getitem(self): - fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) - fp[:] = self.data - - assert_(fp[1:, :-1].__class__ is memmap) - # Fancy indexing returns a copy that is not memmapped - assert_(fp[[0, 1]].__class__ is ndarray) - - def test_memmap_subclass(self): - class MemmapSubClass(memmap): - pass - - fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) - fp[:] = self.data - - # We keep previous behavior for subclasses of memmap, i.e. the - # ufunc and __getitem__ output is never turned into a ndarray - assert_(sum(fp, axis=0).__class__ is MemmapSubClass) - assert_(sum(fp).__class__ is MemmapSubClass) - assert_(fp[1:, :-1].__class__ is MemmapSubClass) - assert(fp[[0, 1]].__class__ is MemmapSubClass) - - def test_mmap_offset_greater_than_allocation_granularity(self): - size = 5 * mmap.ALLOCATIONGRANULARITY - offset = mmap.ALLOCATIONGRANULARITY + 1 - fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) - assert_(fp.offset == offset) - - def test_no_shape(self): - self.tmpfp.write(b'a'*16) - mm = memmap(self.tmpfp, dtype='float64') - assert_equal(mm.shape, (2,)) - - def test_empty_array(self): - # gh-12653 - with pytest.raises(ValueError, match='empty file'): - memmap(self.tmpfp, shape=(0,4), mode='w+') - - self.tmpfp.write(b'\0') - - # ok now the file is not empty - memmap(self.tmpfp, shape=(0,4), mode='w+') diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_multiarray.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_multiarray.py deleted file mode 100644 index 958b265..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_multiarray.py +++ /dev/null @@ -1,8431 +0,0 @@ -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import tempfile -import sys -import shutil -import warnings -import operator -import io -import itertools -import functools -import ctypes -import os -import gc -import weakref -import pytest -from contextlib import contextmanager - -from numpy.compat import pickle - -try: - import pathlib -except ImportError: - try: - import pathlib2 as pathlib - except ImportError: - pathlib = None - -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins -from decimal import Decimal - -import numpy as np -from numpy.compat import strchar, unicode -import numpy.core._multiarray_tests as _multiarray_tests -from numpy.testing import ( - assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, - assert_array_equal, assert_raises_regex, assert_array_almost_equal, - assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, - temppath, suppress_warnings, break_cycles, - ) -from numpy.testing._private.utils import _no_tracing -from numpy.core.tests._locales import CommaDecimalPointLocale - -# Need to test an object that does not fully implement math interface -from datetime import timedelta, datetime - - -if sys.version_info[:2] > (3, 2): - # In Python 3.3 the representation of empty shape, strides and sub-offsets - # is an empty tuple instead of None. - # https://docs.python.org/dev/whatsnew/3.3.html#api-changes - EMPTY = () -else: - EMPTY = None - - -def _aligned_zeros(shape, dtype=float, order="C", align=None): - """ - Allocate a new ndarray with aligned memory. - - The ndarray is guaranteed *not* aligned to twice the requested alignment. - Eg, if align=4, guarantees it is not aligned to 8. If align=None uses - dtype.alignment.""" - dtype = np.dtype(dtype) - if dtype == np.dtype(object): - # Can't do this, fall back to standard allocation (which - # should always be sufficiently aligned) - if align is not None: - raise ValueError("object array alignment not supported") - return np.zeros(shape, dtype=dtype, order=order) - if align is None: - align = dtype.alignment - if not hasattr(shape, '__len__'): - shape = (shape,) - size = functools.reduce(operator.mul, shape) * dtype.itemsize - buf = np.empty(size + 2*align + 1, np.uint8) - - ptr = buf.__array_interface__['data'][0] - offset = ptr % align - if offset != 0: - offset = align - offset - if (ptr % (2*align)) == 0: - offset += align - - # Note: slices producing 0-size arrays do not necessarily change - # data pointer --- so we use and allocate size+1 - buf = buf[offset:offset+size+1][:-1] - data = np.ndarray(shape, dtype, buf, order=order) - data.fill(0) - return data - - -class TestFlags(object): - def setup(self): - self.a = np.arange(10) - - def test_writeable(self): - mydict = locals() - self.a.flags.writeable = False - assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) - assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 - - def test_writeable_any_base(self): - # Ensure that any base being writeable is sufficient to change flag; - # this is especially interesting for arrays from an array interface. - arr = np.arange(10) - - class subclass(np.ndarray): - pass - - # Create subclass so base will not be collapsed, this is OK to change - view1 = arr.view(subclass) - view2 = view1[...] - arr.flags.writeable = False - view2.flags.writeable = False - view2.flags.writeable = True # Can be set to True again. - - arr = np.arange(10) - - class frominterface: - def __init__(self, arr): - self.arr = arr - self.__array_interface__ = arr.__array_interface__ - - view1 = np.asarray(frominterface) - view2 = view1[...] - view2.flags.writeable = False - view2.flags.writeable = True - - view1.flags.writeable = False - view2.flags.writeable = False - with assert_raises(ValueError): - # Must assume not writeable, since only base is not: - view2.flags.writeable = True - - def test_writeable_from_readonly(self): - # gh-9440 - make sure fromstring, from buffer on readonly buffers - # set writeable False - data = b'\x00' * 100 - vals = np.frombuffer(data, 'B') - assert_raises(ValueError, vals.setflags, write=True) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) - values = np.core.records.fromstring(data, types) - vals = values['vals'] - assert_raises(ValueError, vals.setflags, write=True) - - def test_writeable_from_buffer(self): - data = bytearray(b'\x00' * 100) - vals = np.frombuffer(data, 'B') - assert_(vals.flags.writeable) - vals.setflags(write=False) - assert_(vals.flags.writeable is False) - vals.setflags(write=True) - assert_(vals.flags.writeable) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) - values = np.core.records.fromstring(data, types) - vals = values['vals'] - assert_(vals.flags.writeable) - vals.setflags(write=False) - assert_(vals.flags.writeable is False) - vals.setflags(write=True) - assert_(vals.flags.writeable) - - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies") - @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") - def test_writeable_pickle(self): - import pickle - # Small arrays will be copied without setting base. - # See condition for using PyArray_SetBaseObject in - # array_setstate. - a = np.arange(1000) - for v in range(pickle.HIGHEST_PROTOCOL): - vals = pickle.loads(pickle.dumps(a, v)) - assert_(vals.flags.writeable) - assert_(isinstance(vals.base, bytes)) - - def test_writeable_from_c_data(self): - # Test that the writeable flag can be changed for an array wrapping - # low level C-data, but not owning its data. - # Also see that this is deprecated to change from python. - from numpy.core._multiarray_tests import get_c_wrapping_array - - arr_writeable = get_c_wrapping_array(True) - assert not arr_writeable.flags.owndata - assert arr_writeable.flags.writeable - view = arr_writeable[...] - - # Toggling the writeable flag works on the view: - view.flags.writeable = False - assert not view.flags.writeable - view.flags.writeable = True - assert view.flags.writeable - # Flag can be unset on the arr_writeable: - arr_writeable.flags.writeable = False - - arr_readonly = get_c_wrapping_array(False) - assert not arr_readonly.flags.owndata - assert not arr_readonly.flags.writeable - - for arr in [arr_writeable, arr_readonly]: - view = arr[...] - view.flags.writeable = False # make sure it is readonly - arr.flags.writeable = False - assert not arr.flags.writeable - - with assert_raises(ValueError): - view.flags.writeable = True - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - with assert_raises(DeprecationWarning): - arr.flags.writeable = True - - with assert_warns(DeprecationWarning): - arr.flags.writeable = True - - def test_warnonwrite(self): - a = np.arange(10) - a.flags._warn_on_write = True - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always') - a[1] = 10 - a[2] = 10 - # only warn once - assert_(len(w) == 1) - - def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags['C'], True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - with assert_warns(DeprecationWarning): - assert_equal(self.a.flags.updateifcopy, False) - with assert_warns(DeprecationWarning): - assert_equal(self.a.flags['U'], False) - assert_equal(self.a.flags['UPDATEIFCOPY'], False) - assert_equal(self.a.flags.writebackifcopy, False) - assert_equal(self.a.flags['X'], False) - assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) - - def test_string_align(self): - a = np.zeros(4, dtype=np.dtype('|S4')) - assert_(a.flags.aligned) - # not power of two are accessed byte-wise and thus considered aligned - a = np.zeros(5, dtype=np.dtype('|S4')) - assert_(a.flags.aligned) - - def test_void_align(self): - a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) - assert_(a.flags.aligned) - - -class TestHash(object): - # see #3793 - def test_int(self): - for st, ut, s in [(np.int8, np.uint8, 8), - (np.int16, np.uint16, 16), - (np.int32, np.uint32, 32), - (np.int64, np.uint64, 64)]: - for i in range(1, s): - assert_equal(hash(st(-2**i)), hash(-2**i), - err_msg="%r: -2**%d" % (st, i)) - assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (st, i - 1)) - assert_equal(hash(st(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (st, i)) - - i = max(i - 1, 1) - assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (ut, i - 1)) - assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (ut, i)) - - -class TestAttributes(object): - def setup(self): - self.one = np.arange(10) - self.two = np.arange(20).reshape(4, 5) - self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) - - def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5*num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30*num, 6*num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20*num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, np.arange(20)) - - def test_dtypeattr(self): - assert_equal(self.one.dtype, np.dtype(np.int_)) - assert_equal(self.three.dtype, np.dtype(np.float_)) - assert_equal(self.one.dtype.char, 'l') - assert_equal(self.three.dtype.char, 'd') - assert_(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') - - def test_int_subclassing(self): - # Regression test for https://github.com/numpy/numpy/pull/3526 - - numpy_int = np.int_(0) - - if sys.version_info[0] >= 3: - # On Py3k int_ should not inherit from int, because it's not - # fixed-width anymore - assert_equal(isinstance(numpy_int, int), False) - else: - # Otherwise, it should inherit from int... - assert_equal(isinstance(numpy_int, int), True) - - # ... and fast-path checks on C-API level should also work - from numpy.core._multiarray_tests import test_int_subclass - assert_equal(test_int_subclass(numpy_int), True) - - def test_stridesattr(self): - x = self.one - - def make_array(size, offset, strides): - return np.ndarray(size, buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) - - assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) - assert_raises(ValueError, make_array, 4, 4, -2) - assert_raises(ValueError, make_array, 4, 2, -1) - assert_raises(ValueError, make_array, 8, 3, 1) - assert_equal(make_array(8, 3, 0), np.array([3]*8)) - # Check behavior reported in gh-2503: - assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) - make_array(0, 0, 10) - - def test_set_stridesattr(self): - x = self.one - - def make_array(size, offset, strides): - try: - r = np.ndarray([size], dtype=int, buffer=x, - offset=offset*x.itemsize) - except Exception as e: - raise RuntimeError(e) - r.strides = strides = strides*x.itemsize - return r - - assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) - assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) - assert_raises(ValueError, make_array, 4, 4, -2) - assert_raises(ValueError, make_array, 4, 2, -1) - assert_raises(RuntimeError, make_array, 8, 3, 1) - # Check that the true extent of the array is used. - # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) - - def set_strides(arr, strides): - arr.strides = strides - - assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) - - # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], - shape=(10,), strides=(-1,)) - assert_raises(ValueError, set_strides, x[::-1], -1) - a = x[::-1] - a.strides = 1 - a[::2].strides = 2 - - def test_fill(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = np.empty((3, 2, 1), t) - y = np.empty((3, 2, 1), t) - x.fill(1) - y[...] = 1 - assert_equal(x, y) - - def test_fill_max_uint64(self): - x = np.empty((3, 2, 1), dtype=np.uint64) - y = np.empty((3, 2, 1), dtype=np.uint64) - value = 2**64 - 1 - y[...] = value - x.fill(value) - assert_array_equal(x, y) - - def test_fill_struct_array(self): - # Filling from a scalar - x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') - x.fill(x[0]) - assert_equal(x['f1'][1], x['f1'][0]) - # Filling from a tuple that can be converted - # to a scalar - x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) - x.fill((3.5, -2)) - assert_array_equal(x['a'], [3.5, 3.5]) - assert_array_equal(x['b'], [-2, -2]) - - -class TestArrayConstruction(object): - def test_array(self): - d = np.ones(6) - r = np.array([d, d]) - assert_equal(r, np.ones((2, 6))) - - d = np.ones(6) - tgt = np.ones((2, 6)) - r = np.array([d, d]) - assert_equal(r, tgt) - tgt[1] = 2 - r = np.array([d, d + 1]) - assert_equal(r, tgt) - - d = np.ones(6) - r = np.array([[d, d]]) - assert_equal(r, np.ones((1, 2, 6))) - - d = np.ones(6) - r = np.array([[d, d], [d, d]]) - assert_equal(r, np.ones((2, 2, 6))) - - d = np.ones((6, 6)) - r = np.array([d, d]) - assert_equal(r, np.ones((2, 6, 6))) - - d = np.ones((6, )) - r = np.array([[d, d + 1], d + 2]) - assert_equal(len(r), 2) - assert_equal(r[0], [d, d + 1]) - assert_equal(r[1], d + 2) - - tgt = np.ones((2, 3), dtype=bool) - tgt[0, 2] = False - tgt[1, 0:2] = False - r = np.array([[True, True, False], [False, False, True]]) - assert_equal(r, tgt) - r = np.array([[True, False], [True, False], [False, True]]) - assert_equal(r, tgt.T) - - def test_array_empty(self): - assert_raises(TypeError, np.array) - - def test_array_copy_false(self): - d = np.array([1, 2, 3]) - e = np.array(d, copy=False) - d[1] = 3 - assert_array_equal(e, [1, 3, 3]) - e = np.array(d, copy=False, order='F') - d[1] = 4 - assert_array_equal(e, [1, 4, 3]) - e[2] = 7 - assert_array_equal(d, [1, 4, 7]) - - def test_array_copy_true(self): - d = np.array([[1,2,3], [1, 2, 3]]) - e = np.array(d, copy=True) - d[0, 1] = 3 - e[0, 2] = -7 - assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) - assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) - e = np.array(d, copy=True, order='F') - d[0, 1] = 5 - e[0, 2] = 7 - assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) - assert_array_equal(d, [[1, 5, 3], [1,2,3]]) - - def test_array_cont(self): - d = np.ones(10)[::2] - assert_(np.ascontiguousarray(d).flags.c_contiguous) - assert_(np.ascontiguousarray(d).flags.f_contiguous) - assert_(np.asfortranarray(d).flags.c_contiguous) - assert_(np.asfortranarray(d).flags.f_contiguous) - d = np.ones((10, 10))[::2,::2] - assert_(np.ascontiguousarray(d).flags.c_contiguous) - assert_(np.asfortranarray(d).flags.f_contiguous) - - -class TestAssignment(object): - def test_assignment_broadcasting(self): - a = np.arange(6).reshape(2, 3) - - # Broadcasting the input to the output - a[...] = np.arange(3) - assert_equal(a, [[0, 1, 2], [0, 1, 2]]) - a[...] = np.arange(2).reshape(2, 1) - assert_equal(a, [[0, 0, 0], [1, 1, 1]]) - - # For compatibility with <= 1.5, a limited version of broadcasting - # the output to the input. - # - # This behavior is inconsistent with NumPy broadcasting - # in general, because it only uses one of the two broadcasting - # rules (adding a new "1" dimension to the left of the shape), - # applied to the output instead of an input. In NumPy 2.0, this kind - # of broadcasting assignment will likely be disallowed. - a[...] = np.arange(6)[::-1].reshape(1, 2, 3) - assert_equal(a, [[5, 4, 3], [2, 1, 0]]) - # The other type of broadcasting would require a reduction operation. - - def assign(a, b): - a[...] = b - - assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) - - def test_assignment_errors(self): - # Address issue #2276 - class C: - pass - a = np.zeros(1) - - def assign(v): - a[0] = v - - assert_raises((AttributeError, TypeError), assign, C()) - assert_raises(ValueError, assign, [1]) - - def test_unicode_assignment(self): - # gh-5049 - from numpy.core.numeric import set_string_function - - @contextmanager - def inject_str(s): - """ replace ndarray.__str__ temporarily """ - set_string_function(lambda x: s, repr=False) - try: - yield - finally: - set_string_function(None, repr=False) - - a1d = np.array([u'test']) - a0d = np.array(u'done') - with inject_str(u'bad'): - a1d[0] = a0d # previously this would invoke __str__ - assert_equal(a1d[0], u'done') - - # this would crash for the same reason - np.array([np.array(u'\xe5\xe4\xf6')]) - - def test_stringlike_empty_list(self): - # gh-8902 - u = np.array([u'done']) - b = np.array([b'done']) - - class bad_sequence(object): - def __getitem__(self): pass - def __len__(self): raise RuntimeError - - assert_raises(ValueError, operator.setitem, u, 0, []) - assert_raises(ValueError, operator.setitem, b, 0, []) - - assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) - assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) - - def test_longdouble_assignment(self): - # only relevant if longdouble is larger than float - # we're looking for loss of precision - - for dtype in (np.longdouble, np.longcomplex): - # gh-8902 - tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) - tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) - - # construction - tiny1d = np.array([tinya]) - assert_equal(tiny1d[0], tinya) - - # scalar = scalar - tiny1d[0] = tinyb - assert_equal(tiny1d[0], tinyb) - - # 0d = scalar - tiny1d[0, ...] = tinya - assert_equal(tiny1d[0], tinya) - - # 0d = 0d - tiny1d[0, ...] = tinyb[...] - assert_equal(tiny1d[0], tinyb) - - # scalar = 0d - tiny1d[0] = tinyb[...] - assert_equal(tiny1d[0], tinyb) - - arr = np.array([np.array(tinya)]) - assert_equal(arr[0], tinya) - - def test_cast_to_string(self): - # cast to str should do "str(scalar)", not "str(scalar.item())" - # Example: In python2, str(float) is truncated, so we want to avoid - # str(np.float64(...).item()) as this would incorrectly truncate. - a = np.zeros(1, dtype='S20') - a[:] = np.array(['1.12345678901234567890'], dtype='f8') - assert_equal(a[0], b"1.1234567890123457") - - -class TestDtypedescr(object): - def test_construction(self): - d1 = np.dtype('i4') - assert_equal(d1, np.dtype(np.int32)) - d2 = np.dtype('f8') - assert_equal(d2, np.dtype(np.float64)) - - def test_byteorders(self): - assert_(np.dtype('i4')) - assert_(np.dtype([('a', 'i4')])) - - def test_structured_non_void(self): - fields = [('a', '= 3, reason="Not Python 2") - def test_sequence_long(self): - assert_equal(np.array([long(4), long(4)]).dtype, long) - assert_equal(np.array([long(4), 2**80]).dtype, object) - assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object) - assert_equal(np.array([2**80, long(4)]).dtype, object) - - def test_non_sequence_sequence(self): - """Should not segfault. - - Class Fail breaks the sequence protocol for new style classes, i.e., - those derived from object. Class Map is a mapping type indicated by - raising a ValueError. At some point we may raise a warning instead - of an error in the Fail case. - - """ - class Fail(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise ValueError() - - class Map(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise KeyError() - - a = np.array([Map()]) - assert_(a.shape == (1,)) - assert_(a.dtype == np.dtype(object)) - assert_raises(ValueError, np.array, [Fail()]) - - def test_no_len_object_type(self): - # gh-5100, want object array from iterable object without len() - class Point2: - def __init__(self): - pass - - def __getitem__(self, ind): - if ind in [0, 1]: - return ind - else: - raise IndexError() - d = np.array([Point2(), Point2(), Point2()]) - assert_equal(d.dtype, np.dtype(object)) - - def test_false_len_sequence(self): - # gh-7264, segfault for this example - class C: - def __getitem__(self, i): - raise IndexError - def __len__(self): - return 42 - - assert_raises(ValueError, np.array, C()) # segfault? - - def test_failed_len_sequence(self): - # gh-7393 - class A(object): - def __init__(self, data): - self._data = data - def __getitem__(self, item): - return type(self)(self._data[item]) - def __len__(self): - return len(self._data) - - # len(d) should give 3, but len(d[0]) will fail - d = A([1,2,3]) - assert_equal(len(np.array(d)), 3) - - def test_array_too_big(self): - # Test that array creation succeeds for arrays addressable by intp - # on the byte level and fails for too large arrays. - buf = np.zeros(100) - - max_bytes = np.iinfo(np.intp).max - for dtype in ["intp", "S20", "b"]: - dtype = np.dtype(dtype) - itemsize = dtype.itemsize - - np.ndarray(buffer=buf, strides=(0,), - shape=(max_bytes//itemsize,), dtype=dtype) - assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,), - shape=(max_bytes//itemsize + 1,), dtype=dtype) - - def test_jagged_ndim_object(self): - # Lists of mismatching depths are treated as object arrays - a = np.array([[1], 2, 3]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([1, [2], 3]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([1, 2, [3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - def test_jagged_shape_object(self): - # The jagged dimension of a list is turned into an object array - a = np.array([[1, 1], [2], [3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([[1], [2, 2], [3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([[1], [2], [3, 3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - -class TestStructured(object): - def test_subarray_field_access(self): - a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) - a['a'] = np.arange(60).reshape(3, 5, 2, 2) - - # Since the subarray is always in C-order, a transpose - # does not swap the subarray: - assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) - - # In Fortran order, the subarray gets appended - # like in all other cases, not prepended as a special case - b = a.copy(order='F') - assert_equal(a['a'].shape, b['a'].shape) - assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) - - def test_subarray_comparison(self): - # Check that comparisons between record arrays with - # multi-dimensional field types work properly - a = np.rec.fromrecords( - [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], - dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) - b = a.copy() - assert_equal(a == b, [True, True]) - assert_equal(a != b, [False, False]) - b[1].b = 'c' - assert_equal(a == b, [True, False]) - assert_equal(a != b, [False, True]) - for i in range(3): - b[0].a = a[0].a - b[0].a[i] = 5 - assert_equal(a == b, [False, False]) - assert_equal(a != b, [True, True]) - for i in range(2): - for j in range(2): - b = a.copy() - b[0].c[i, j] = 10 - assert_equal(a == b, [False, True]) - assert_equal(a != b, [True, False]) - - # Check that broadcasting with a subarray works - a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) - b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) - assert_equal(a == b, [[True, True, False], [False, False, True]]) - assert_equal(b == a, [[True, True, False], [False, False, True]]) - a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) - b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) - assert_equal(a == b, [[True, True, False], [False, False, True]]) - assert_equal(b == a, [[True, True, False], [False, False, True]]) - a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) - b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) - assert_equal(a == b, [[True, False, False], [False, False, True]]) - assert_equal(b == a, [[True, False, False], [False, False, True]]) - - # Check that broadcasting Fortran-style arrays with a subarray work - a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') - b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) - assert_equal(a == b, [[True, False, False], [False, False, True]]) - assert_equal(b == a, [[True, False, False], [False, False, True]]) - - # Check that incompatible sub-array shapes don't result to broadcasting - x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - # This comparison invokes deprecated behaviour, and will probably - # start raising an error eventually. What we really care about in this - # test is just that it doesn't return True. - with suppress_warnings() as sup: - sup.filter(FutureWarning, "elementwise == comparison failed") - assert_equal(x == y, False) - - x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - # This comparison invokes deprecated behaviour, and will probably - # start raising an error eventually. What we really care about in this - # test is just that it doesn't return True. - with suppress_warnings() as sup: - sup.filter(FutureWarning, "elementwise == comparison failed") - assert_equal(x == y, False) - - # Check that structured arrays that are different only in - # byte-order work - a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', 'f8')]) - assert_equal(a == b, [False, True]) - - def test_casting(self): - # Check that casting a structured array to change its byte order - # works - a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) - b = a.astype([('a', '>i4')]) - assert_equal(b, a.byteswap().newbyteorder()) - assert_equal(a['a'][0], b['a'][0]) - - # Check that equality comparison works on structured arrays if - # they are 'equiv'-castable - a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) - assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) - assert_equal(a == b, [True, True]) - - # Check that 'equiv' casting can change byte order - assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) - c = a.astype(b.dtype, casting='equiv') - assert_equal(a == c, [True, True]) - - # Check that 'safe' casting can change byte order and up-cast - # fields - t = [('a', 'f8')] - assert_(np.can_cast(a.dtype, t, casting='safe')) - c = a.astype(t, casting='safe') - assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), - [True, True]) - - # Check that 'same_kind' casting can change byte order and - # change field widths within a "kind" - t = [('a', 'f4')] - assert_(np.can_cast(a.dtype, t, casting='same_kind')) - c = a.astype(t, casting='same_kind') - assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), - [True, True]) - - # Check that casting fails if the casting rule should fail on - # any of the fields - t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] - assert_(not np.can_cast(a.dtype, t, casting=casting)) - t = [('a', '>i4'), ('b', ' false - for n in range(3): - v = np.array(b'', (dtype, n)) - assert_equal(bool(v), False) - assert_equal(bool(v[()]), False) - assert_equal(v.astype(bool), False) - assert_(isinstance(v.astype(bool), np.ndarray)) - assert_(v[()].astype(bool) is np.False_) - - # anything else -> true - for n in range(1, 4): - for val in [b'a', b'0', b' ']: - v = np.array(val, (dtype, n)) - assert_equal(bool(v), True) - assert_equal(bool(v[()]), True) - assert_equal(v.astype(bool), True) - assert_(isinstance(v.astype(bool), np.ndarray)) - assert_(v[()].astype(bool) is np.True_) - - def test_cast_from_void(self): - self._test_cast_from_flexible(np.void) - - @pytest.mark.xfail(reason="See gh-9847") - def test_cast_from_unicode(self): - self._test_cast_from_flexible(np.unicode_) - - @pytest.mark.xfail(reason="See gh-9847") - def test_cast_from_bytes(self): - self._test_cast_from_flexible(np.bytes_) - - -class TestZeroSizeFlexible(object): - @staticmethod - def _zeros(shape, dtype=str): - dtype = np.dtype(dtype) - if dtype == np.void: - return np.zeros(shape, dtype=(dtype, 0)) - - # not constructable directly - dtype = np.dtype([('x', dtype, 0)]) - return np.zeros(shape, dtype=dtype)['x'] - - def test_create(self): - zs = self._zeros(10, bytes) - assert_equal(zs.itemsize, 0) - zs = self._zeros(10, np.void) - assert_equal(zs.itemsize, 0) - zs = self._zeros(10, unicode) - assert_equal(zs.itemsize, 0) - - def _test_sort_partition(self, name, kinds, **kwargs): - # Previously, these would all hang - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - sort_method = getattr(zs, name) - sort_func = getattr(np, name) - for kind in kinds: - sort_method(kind=kind, **kwargs) - sort_func(zs, kind=kind, **kwargs) - - def test_sort(self): - self._test_sort_partition('sort', kinds='qhs') - - def test_argsort(self): - self._test_sort_partition('argsort', kinds='qhs') - - def test_partition(self): - self._test_sort_partition('partition', kinds=['introselect'], kth=2) - - def test_argpartition(self): - self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) - - def test_resize(self): - # previously an error - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - zs.resize(25) - zs.resize((10, 10)) - - def test_view(self): - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - - # viewing as itself should be allowed - assert_equal(zs.view(dt).dtype, np.dtype(dt)) - - # viewing as any non-empty type gives an empty result - assert_equal(zs.view((dt, 1)).shape, (0,)) - - def test_dumps(self): - zs = self._zeros(10, int) - assert_equal(zs, pickle.loads(zs.dumps())) - - def test_pickle(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - p = pickle.dumps(zs, protocol=proto) - zs2 = pickle.loads(p) - - assert_equal(zs.dtype, zs2.dtype) - - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, - reason="requires pickle protocol 5") - def test_pickle_with_buffercallback(self): - array = np.arange(10) - buffers = [] - bytes_string = pickle.dumps(array, buffer_callback=buffers.append, - protocol=5) - array_from_buffer = pickle.loads(bytes_string, buffers=buffers) - # when using pickle protocol 5 with buffer callbacks, - # array_from_buffer is reconstructed from a buffer holding a view - # to the initial array's data, so modifying an element in array - # should modify it in array_from_buffer too. - array[0] = -1 - assert array_from_buffer[0] == -1, array_from_buffer[0] - - -class TestMethods(object): - - sort_kinds = ['quicksort', 'heapsort', 'stable'] - - def test_compress(self): - tgt = [[5, 6, 7, 8, 9]] - arr = np.arange(10).reshape(2, 5) - out = arr.compress([0, 1], axis=0) - assert_equal(out, tgt) - - tgt = [[1, 3], [6, 8]] - out = arr.compress([0, 1, 0, 1, 0], axis=1) - assert_equal(out, tgt) - - tgt = [[1], [6]] - arr = np.arange(10).reshape(2, 5) - out = arr.compress([0, 1], axis=1) - assert_equal(out, tgt) - - arr = np.arange(10).reshape(2, 5) - out = arr.compress([0, 1]) - assert_equal(out, 1) - - def test_choose(self): - x = 2*np.ones((3,), dtype=int) - y = 3*np.ones((3,), dtype=int) - x2 = 2*np.ones((2, 3), dtype=int) - y2 = 3*np.ones((2, 3), dtype=int) - ind = np.array([0, 0, 1]) - - A = ind.choose((x, y)) - assert_equal(A, [2, 2, 3]) - - A = ind.choose((x2, y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - A = ind.choose((x, y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - oned = np.ones(1) - # gh-12031, caused SEGFAULT - assert_raises(TypeError, oned.choose,np.void(0), [oned]) - - # gh-6272 check overlap on out - x = np.arange(5) - y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') - assert_equal(y, np.array([0, 1, 2])) - - def test_prod(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - - for ctype in [np.int16, np.uint16, np.int32, np.uint32, - np.float32, np.float64, np.complex64, np.complex128]: - a = np.array(ba, ctype) - a2 = np.array(ba2, ctype) - if ctype in ['1', 'b']: - assert_raises(ArithmeticError, a.prod) - assert_raises(ArithmeticError, a2.prod, axis=1) - else: - assert_equal(a.prod(axis=0), 26400) - assert_array_equal(a2.prod(axis=0), - np.array([50, 36, 84, 180], ctype)) - assert_array_equal(a2.prod(axis=-1), - np.array([24, 1890, 600], ctype)) - - def test_repeat(self): - m = np.array([1, 2, 3, 4, 5, 6]) - m_rect = m.reshape((2, 3)) - - A = m.repeat([1, 3, 2, 1, 1, 2]) - assert_equal(A, [1, 2, 2, 2, 3, - 3, 4, 5, 6, 6]) - - A = m.repeat(2) - assert_equal(A, [1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6]) - - A = m_rect.repeat([2, 1], axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6]]) - - A = m_rect.repeat([1, 3, 2], axis=1) - assert_equal(A, [[1, 2, 2, 2, 3, 3], - [4, 5, 5, 5, 6, 6]]) - - A = m_rect.repeat(2, axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6], - [4, 5, 6]]) - - A = m_rect.repeat(2, axis=1) - assert_equal(A, [[1, 1, 2, 2, 3, 3], - [4, 4, 5, 5, 6, 6]]) - - def test_reshape(self): - arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) - - tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] - assert_equal(arr.reshape(2, 6), tgt) - - tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] - assert_equal(arr.reshape(3, 4), tgt) - - tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] - assert_equal(arr.reshape((3, 4), order='F'), tgt) - - tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] - assert_equal(arr.T.reshape((3, 4), order='C'), tgt) - - def test_round(self): - def check_round(arr, expected, *round_args): - assert_equal(arr.round(*round_args), expected) - # With output array - out = np.zeros_like(arr) - res = arr.round(*round_args, out=out) - assert_equal(out, expected) - assert_equal(out, res) - - check_round(np.array([1.2, 1.5]), [1, 2]) - check_round(np.array(1.5), 2) - check_round(np.array([12.2, 15.5]), [10, 20], -1) - check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) - # Complex rounding - check_round(np.array([4.5 + 1.5j]), [4 + 2j]) - check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) - - def test_squeeze(self): - a = np.array([[[1], [2], [3]]]) - assert_equal(a.squeeze(), [1, 2, 3]) - assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) - assert_raises(ValueError, a.squeeze, axis=(1,)) - assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) - - def test_transpose(self): - a = np.array([[1, 2], [3, 4]]) - assert_equal(a.transpose(), [[1, 3], [2, 4]]) - assert_raises(ValueError, lambda: a.transpose(0)) - assert_raises(ValueError, lambda: a.transpose(0, 0)) - assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) - - def test_sort(self): - # test ordering for floats and complex containing nans. It is only - # necessary to check the less-than comparison, so sorts that - # only follow the insertion sort path are sufficient. We only - # test doubles and complex doubles as the logic is the same. - - # check doubles - msg = "Test real sort order with nans" - a = np.array([np.nan, 1, 0]) - b = np.sort(a) - assert_equal(b, a[::-1], msg) - # check complex - msg = "Test complex sort order with nans" - a = np.zeros(9, dtype=np.complex128) - a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] - a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] - b = np.sort(a) - assert_equal(b, a[::-1], msg) - - # all c scalar sorts use the same code with different types - # so it suffices to run a quick check with one type. The number - # of sorted items must be greater than ~50 to check the actual - # algorithm because quick and merge sort fall over to insertion - # sort for small arrays. - # Test unsigned dtypes and nonnegative numbers - for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, np.longdouble]: - a = np.arange(101, dtype=dtype) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype) - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # Test signed dtypes and negative numbers as well - for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64, np.longdouble]: - a = np.arange(-50, 51, dtype=dtype) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype) - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test complex sorts. These use the same code as the scalars - # but the compare function differs. - ai = a*1j + 1 - bi = b*1j + 1 - for kind in self.sort_kinds: - msg = "complex sort, real part == 1, kind=%s" % kind - c = ai.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - ai = a + 1j - bi = b + 1j - for kind in self.sort_kinds: - msg = "complex sort, imag part == 1, kind=%s" % kind - c = ai.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - - # test sorting of complex arrays requiring byte-swapping, gh-5441 - for endianness in '<>': - for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) - c = arr.copy() - c.sort() - msg = 'byte-swapped complex sort, dtype={0}'.format(dt) - assert_equal(c, arr, msg) - - # test string sorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)]) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "string sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test unicode sorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "unicode sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test object array sorts. - a = np.empty((101,), dtype=object) - a[:] = list(range(101)) - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "object sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test record array sorts. - dt = np.dtype([('f', float), ('i', int)]) - a = np.array([(i, i) for i in range(101)], dtype=dt) - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "object sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test datetime64 sorts. - a = np.arange(0, 101, dtype='datetime64[D]') - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "datetime64 sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test timedelta64 sorts. - a = np.arange(0, 101, dtype='timedelta64[D]') - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "timedelta64 sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # check axis handling. This should be the same for all type - # specific sorts, so we only check it for one type and one kind - a = np.array([[3, 2], [1, 0]]) - b = np.array([[1, 0], [3, 2]]) - c = np.array([[2, 3], [0, 1]]) - d = a.copy() - d.sort(axis=0) - assert_equal(d, b, "test sort with axis=0") - d = a.copy() - d.sort(axis=1) - assert_equal(d, c, "test sort with axis=1") - d = a.copy() - d.sort() - assert_equal(d, c, "test sort with default axis") - - # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) - for axis in range(-a.ndim, a.ndim): - msg = 'test empty array sort with axis={0}'.format(axis) - assert_equal(np.sort(a, axis=axis), a, msg) - msg = 'test empty array sort with axis=None' - assert_equal(np.sort(a, axis=None), a.ravel(), msg) - - # test generic class with bogus ordering, - # should not segfault. - class Boom(object): - def __lt__(self, other): - return True - - a = np.array([Boom()]*100, dtype=object) - for kind in self.sort_kinds: - msg = "bogus comparison object sort, kind=%s" % kind - c.sort(kind=kind) - - def test_void_sort(self): - # gh-8210 - previously segfaulted - for i in range(4): - rand = np.random.randint(256, size=4000, dtype=np.uint8) - arr = rand.view('V4') - arr[::-1].sort() - - dt = np.dtype([('val', 'i4', (1,))]) - for i in range(4): - rand = np.random.randint(256, size=4000, dtype=np.uint8) - arr = rand.view(dt) - arr[::-1].sort() - - def test_sort_raises(self): - #gh-9404 - arr = np.array([0, datetime.now(), 1], dtype=object) - for kind in self.sort_kinds: - assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 - class Raiser(object): - def raises_anything(*args, **kwargs): - raise TypeError("SOMETHING ERRORED") - __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything - arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) - np.random.shuffle(arr) - for kind in self.sort_kinds: - assert_raises(TypeError, arr.sort, kind=kind) - - def test_sort_degraded(self): - # test degraded dataset would take minutes to run with normal qsort - d = np.arange(1000000) - do = d.copy() - x = d - # create a median of 3 killer where each median is the sorted second - # last element of the quicksort partition - while x.size > 3: - mid = x.size // 2 - x[mid], x[-2] = x[-2], x[mid] - x = x[:-2] - - assert_equal(np.sort(d), do) - assert_equal(d[np.argsort(d)], do) - - def test_copy(self): - def assert_fortran(arr): - assert_(arr.flags.fortran) - assert_(arr.flags.f_contiguous) - assert_(not arr.flags.c_contiguous) - - def assert_c(arr): - assert_(not arr.flags.fortran) - assert_(not arr.flags.f_contiguous) - assert_(arr.flags.c_contiguous) - - a = np.empty((2, 2), order='F') - # Test copying a Fortran array - assert_c(a.copy()) - assert_c(a.copy('C')) - assert_fortran(a.copy('F')) - assert_fortran(a.copy('A')) - - # Now test starting with a C array. - a = np.empty((2, 2), order='C') - assert_c(a.copy()) - assert_c(a.copy('C')) - assert_fortran(a.copy('F')) - assert_c(a.copy('A')) - - def test_sort_order(self): - # Test sorting an array with fields - x1 = np.array([21, 32, 14]) - x2 = np.array(['my', 'first', 'name']) - x3 = np.array([3.1, 4.5, 6.2]) - r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') - - r.sort(order=['id']) - assert_equal(r.id, np.array([14, 21, 32])) - assert_equal(r.word, np.array(['name', 'my', 'first'])) - assert_equal(r.number, np.array([6.2, 3.1, 4.5])) - - r.sort(order=['word']) - assert_equal(r.id, np.array([32, 21, 14])) - assert_equal(r.word, np.array(['first', 'my', 'name'])) - assert_equal(r.number, np.array([4.5, 3.1, 6.2])) - - r.sort(order=['number']) - assert_equal(r.id, np.array([21, 32, 14])) - assert_equal(r.word, np.array(['my', 'first', 'name'])) - assert_equal(r.number, np.array([3.1, 4.5, 6.2])) - - assert_raises_regex(ValueError, 'duplicate', - lambda: r.sort(order=['id', 'id'])) - - if sys.byteorder == 'little': - strtype = '>i2' - else: - strtype = '': - for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) - msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) - assert_equal(arr.argsort(), - np.arange(len(arr), dtype=np.intp), msg) - - # test string argsorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)]) - b = a[::-1].copy() - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "string argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test unicode argsorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_) - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "unicode argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test object array argsorts. - a = np.empty((101,), dtype=object) - a[:] = list(range(101)) - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "object argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test structured array argsorts. - dt = np.dtype([('f', float), ('i', int)]) - a = np.array([(i, i) for i in range(101)], dtype=dt) - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "structured array argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test datetime64 argsorts. - a = np.arange(0, 101, dtype='datetime64[D]') - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in ['q', 'h', 'm']: - msg = "datetime64 argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test timedelta64 argsorts. - a = np.arange(0, 101, dtype='timedelta64[D]') - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in ['q', 'h', 'm']: - msg = "timedelta64 argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # check axis handling. This should be the same for all type - # specific argsorts, so we only check it for one type and one kind - a = np.array([[3, 2], [1, 0]]) - b = np.array([[1, 1], [0, 0]]) - c = np.array([[1, 0], [1, 0]]) - assert_equal(a.copy().argsort(axis=0), b) - assert_equal(a.copy().argsort(axis=1), c) - assert_equal(a.copy().argsort(), c) - - # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) - for axis in range(-a.ndim, a.ndim): - msg = 'test empty array argsort with axis={0}'.format(axis) - assert_equal(np.argsort(a, axis=axis), - np.zeros_like(a, dtype=np.intp), msg) - msg = 'test empty array argsort with axis=None' - assert_equal(np.argsort(a, axis=None), - np.zeros_like(a.ravel(), dtype=np.intp), msg) - - # check that stable argsorts are stable - r = np.arange(100) - # scalars - a = np.zeros(100) - assert_equal(a.argsort(kind='m'), r) - # complex - a = np.zeros(100, dtype=complex) - assert_equal(a.argsort(kind='m'), r) - # string - a = np.array(['aaaaaaaaa' for i in range(100)]) - assert_equal(a.argsort(kind='m'), r) - # unicode - a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_) - assert_equal(a.argsort(kind='m'), r) - - def test_sort_unicode_kind(self): - d = np.arange(10) - k = b'\xc3\xa4'.decode("UTF8") - assert_raises(ValueError, d.sort, kind=k) - assert_raises(ValueError, d.argsort, kind=k) - - def test_searchsorted(self): - # test for floats and complex containing nans. The logic is the - # same for all float types so only test double types for now. - # The search sorted routines use the compare functions for the - # array type, so this checks if that is consistent with the sort - # order. - - # check double - a = np.array([0, 1, np.nan]) - msg = "Test real searchsorted with nans, side='l'" - b = a.searchsorted(a, side='l') - assert_equal(b, np.arange(3), msg) - msg = "Test real searchsorted with nans, side='r'" - b = a.searchsorted(a, side='r') - assert_equal(b, np.arange(1, 4), msg) - # check double complex - a = np.zeros(9, dtype=np.complex128) - a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] - a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] - msg = "Test complex searchsorted with nans, side='l'" - b = a.searchsorted(a, side='l') - assert_equal(b, np.arange(9), msg) - msg = "Test complex searchsorted with nans, side='r'" - b = a.searchsorted(a, side='r') - assert_equal(b, np.arange(1, 10), msg) - msg = "Test searchsorted with little endian, side='l'" - a = np.array([0, 128], dtype=' p[:, i]).all(), - msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) - aae(p, d1[np.arange(d1.shape[0])[:, None], - np.argpartition(d1, i, axis=1, kind=k)]) - - p = np.partition(d0, i, axis=0, kind=k) - aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) - # array_less does not seem to work right - at((p[:i, :] <= p[i, :]).all(), - msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) - at((p[i + 1:, :] > p[i, :]).all(), - msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) - aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), - np.arange(d0.shape[1])[None, :]]) - - # check inplace - dc = d.copy() - dc.partition(i, kind=k) - assert_equal(dc, np.partition(d, i, kind=k)) - dc = d0.copy() - dc.partition(i, axis=0, kind=k) - assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) - dc = d1.copy() - dc.partition(i, axis=1, kind=k) - assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) - - def assert_partitioned(self, d, kth): - prev = 0 - for k in np.sort(kth): - assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) - assert_((d[k:] >= d[k]).all(), - msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) - prev = k + 1 - - def test_partition_iterative(self): - d = np.arange(17) - kth = (0, 1, 2, 429, 231) - assert_raises(ValueError, d.partition, kth) - assert_raises(ValueError, d.argpartition, kth) - d = np.arange(10).reshape((2, 5)) - assert_raises(ValueError, d.partition, kth, axis=0) - assert_raises(ValueError, d.partition, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=None) - - d = np.array([3, 4, 2, 1]) - p = np.partition(d, (0, 3)) - self.assert_partitioned(p, (0, 3)) - self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) - - assert_array_equal(p, np.partition(d, (-3, -1))) - assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) - - d = np.arange(17) - np.random.shuffle(d) - d.partition(range(d.size)) - assert_array_equal(np.arange(17), d) - np.random.shuffle(d) - assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) - - # test unsorted kth - d = np.arange(17) - np.random.shuffle(d) - keys = np.array([1, 3, 8, -2]) - np.random.shuffle(d) - p = np.partition(d, keys) - self.assert_partitioned(p, keys) - p = d[np.argpartition(d, keys)] - self.assert_partitioned(p, keys) - np.random.shuffle(keys) - assert_array_equal(np.partition(d, keys), p) - assert_array_equal(d[np.argpartition(d, keys)], p) - - # equal kth - d = np.arange(20)[::-1] - self.assert_partitioned(np.partition(d, [5]*4), [5]) - self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), - [5]*4 + [6, 13]) - self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) - self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], - [5]*4 + [6, 13]) - - d = np.arange(12) - np.random.shuffle(d) - d1 = np.tile(np.arange(12), (4, 1)) - map(np.random.shuffle, d1) - d0 = np.transpose(d1) - - kth = (1, 6, 7, -1) - p = np.partition(d1, kth, axis=1) - pa = d1[np.arange(d1.shape[0])[:, None], - d1.argpartition(kth, axis=1)] - assert_array_equal(p, pa) - for i in range(d1.shape[0]): - self.assert_partitioned(p[i,:], kth) - p = np.partition(d0, kth, axis=0) - pa = d0[np.argpartition(d0, kth, axis=0), - np.arange(d0.shape[1])[None,:]] - assert_array_equal(p, pa) - for i in range(d0.shape[1]): - self.assert_partitioned(p[:, i], kth) - - def test_partition_cdtype(self): - d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.9, 38)], - dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) - ops = { - 'add': (np.add, True, float), - 'sub': (np.subtract, True, float), - 'mul': (np.multiply, True, float), - 'truediv': (np.true_divide, True, float), - 'floordiv': (np.floor_divide, True, float), - 'mod': (np.remainder, True, float), - 'divmod': (np.divmod, False, float), - 'pow': (np.power, True, int), - 'lshift': (np.left_shift, True, int), - 'rshift': (np.right_shift, True, int), - 'and': (np.bitwise_and, True, int), - 'xor': (np.bitwise_xor, True, int), - 'or': (np.bitwise_or, True, int), - # 'ge': (np.less_equal, False), - # 'gt': (np.less, False), - # 'le': (np.greater_equal, False), - # 'lt': (np.greater, False), - # 'eq': (np.equal, False), - # 'ne': (np.not_equal, False), - } - if sys.version_info >= (3, 5): - ops['matmul'] = (np.matmul, False, float) - - class Coerced(Exception): - pass - - def array_impl(self): - raise Coerced - - def op_impl(self, other): - return "forward" - - def rop_impl(self, other): - return "reverse" - - def iop_impl(self, other): - return "in-place" - - def array_ufunc_impl(self, ufunc, method, *args, **kwargs): - return ("__array_ufunc__", ufunc, method, args, kwargs) - - # Create an object with the given base, in the given module, with a - # bunch of placeholder __op__ methods, and optionally a - # __array_ufunc__ and __array_priority__. - def make_obj(base, array_priority=False, array_ufunc=False, - alleged_module="__main__"): - class_namespace = {"__array__": array_impl} - if array_priority is not False: - class_namespace["__array_priority__"] = array_priority - for op in ops: - class_namespace["__{0}__".format(op)] = op_impl - class_namespace["__r{0}__".format(op)] = rop_impl - class_namespace["__i{0}__".format(op)] = iop_impl - if array_ufunc is not False: - class_namespace["__array_ufunc__"] = array_ufunc - eval_namespace = {"base": base, - "class_namespace": class_namespace, - "__name__": alleged_module, - } - MyType = eval("type('MyType', (base,), class_namespace)", - eval_namespace) - if issubclass(MyType, np.ndarray): - # Use this range to avoid special case weirdnesses around - # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. - return np.arange(3, 7).reshape(2, 2).view(MyType) - else: - return MyType() - - def check(obj, binop_override_expected, ufunc_override_expected, - inplace_override_expected, check_scalar=True): - for op, (ufunc, has_inplace, dtype) in ops.items(): - err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' - % (op, ufunc, has_inplace, dtype)) - check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] - if check_scalar: - check_objs.append(check_objs[0][0]) - for arr in check_objs: - arr_method = getattr(arr, "__{0}__".format(op)) - - def first_out_arg(result): - if op == "divmod": - assert_(isinstance(result, tuple)) - return result[0] - else: - return result - - # arr __op__ obj - if binop_override_expected: - assert_equal(arr_method(obj), NotImplemented, err_msg) - elif ufunc_override_expected: - assert_equal(arr_method(obj)[0], "__array_ufunc__", - err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_method(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_method, obj, err_msg=err_msg) - # obj __op__ arr - arr_rmethod = getattr(arr, "__r{0}__".format(op)) - if ufunc_override_expected: - res = arr_rmethod(obj) - assert_equal(res[0], "__array_ufunc__", - err_msg=err_msg) - assert_equal(res[1], ufunc, err_msg=err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_rmethod(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - # __array_ufunc__ = "asdf" creates a TypeError - assert_raises((TypeError, Coerced), - arr_rmethod, obj, err_msg=err_msg) - - # arr __iop__ obj - # array scalars don't have in-place operators - if has_inplace and isinstance(arr, np.ndarray): - arr_imethod = getattr(arr, "__i{0}__".format(op)) - if inplace_override_expected: - assert_equal(arr_method(obj), NotImplemented, - err_msg=err_msg) - elif ufunc_override_expected: - res = arr_imethod(obj) - assert_equal(res[0], "__array_ufunc__", err_msg) - assert_equal(res[1], ufunc, err_msg) - assert_(type(res[-1]["out"]) is tuple, err_msg) - assert_(res[-1]["out"][0] is arr, err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - assert_(arr_imethod(obj) is arr, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_imethod, obj, - err_msg=err_msg) - - op_fn = getattr(operator, op, None) - if op_fn is None: - op_fn = getattr(operator, op + "_", None) - if op_fn is None: - op_fn = getattr(builtins, op) - assert_equal(op_fn(obj, arr), "forward", err_msg) - if not isinstance(obj, np.ndarray): - if binop_override_expected: - assert_equal(op_fn(arr, obj), "reverse", err_msg) - elif ufunc_override_expected: - assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", - err_msg) - if ufunc_override_expected: - assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", - err_msg) - - # No array priority, no array_ufunc -> nothing called - check(make_obj(object), False, False, False) - # Negative array priority, no array_ufunc -> nothing called - # (has to be very negative, because scalar priority is -1000000.0) - check(make_obj(object, array_priority=-2**30), False, False, False) - # Positive array priority, no array_ufunc -> binops and iops only - check(make_obj(object, array_priority=1), True, False, True) - # ndarray ignores array_priority for ndarray subclasses - check(make_obj(np.ndarray, array_priority=1), False, False, False, - check_scalar=False) - # Positive array_priority and array_ufunc -> array_ufunc only - check(make_obj(object, array_priority=1, - array_ufunc=array_ufunc_impl), False, True, False) - check(make_obj(np.ndarray, array_priority=1, - array_ufunc=array_ufunc_impl), False, True, False) - # array_ufunc set to None -> defer binops only - check(make_obj(object, array_ufunc=None), True, False, False) - check(make_obj(np.ndarray, array_ufunc=None), True, False, False, - check_scalar=False) - - def test_ufunc_override_normalize_signature(self): - # gh-5674 - class SomeClass(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - return kw - - a = SomeClass() - kw = np.add(a, [1]) - assert_('sig' not in kw and 'signature' not in kw) - kw = np.add(a, [1], sig='ii->i') - assert_('sig' not in kw and 'signature' in kw) - assert_equal(kw['signature'], 'ii->i') - kw = np.add(a, [1], signature='ii->i') - assert_('sig' not in kw and 'signature' in kw) - assert_equal(kw['signature'], 'ii->i') - - def test_array_ufunc_index(self): - # Check that index is set appropriately, also if only an output - # is passed on (latter is another regression tests for github bug 4753) - # This also checks implicitly that 'out' is always a tuple. - class CheckIndex(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - for i, a in enumerate(inputs): - if a is self: - return i - # calls below mean we must be in an output. - for j, a in enumerate(kw['out']): - if a is self: - return (j,) - - a = CheckIndex() - dummy = np.arange(2.) - # 1 input, 1 output - assert_equal(np.sin(a), 0) - assert_equal(np.sin(dummy, a), (0,)) - assert_equal(np.sin(dummy, out=a), (0,)) - assert_equal(np.sin(dummy, out=(a,)), (0,)) - assert_equal(np.sin(a, a), 0) - assert_equal(np.sin(a, out=a), 0) - assert_equal(np.sin(a, out=(a,)), 0) - # 1 input, 2 outputs - assert_equal(np.modf(dummy, a), (0,)) - assert_equal(np.modf(dummy, None, a), (1,)) - assert_equal(np.modf(dummy, dummy, a), (1,)) - assert_equal(np.modf(dummy, out=(a, None)), (0,)) - assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) - assert_equal(np.modf(dummy, out=(None, a)), (1,)) - assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) - assert_equal(np.modf(a, out=(dummy, a)), 0) - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs - np.modf(dummy, out=a) - - assert_raises(ValueError, np.modf, dummy, out=(a,)) - - # 2 inputs, 1 output - assert_equal(np.add(a, dummy), 0) - assert_equal(np.add(dummy, a), 1) - assert_equal(np.add(dummy, dummy, a), (0,)) - assert_equal(np.add(dummy, a, a), 1) - assert_equal(np.add(dummy, dummy, out=a), (0,)) - assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) - assert_equal(np.add(a, dummy, out=a), 0) - - def test_out_override(self): - # regression test for github bug 4753 - class OutClass(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if 'out' in kw: - tmp_kw = kw.copy() - tmp_kw.pop('out') - func = getattr(ufunc, method) - kw['out'][0][...] = func(*inputs, **tmp_kw) - - A = np.array([0]).view(OutClass) - B = np.array([5]) - C = np.array([6]) - np.multiply(C, B, A) - assert_equal(A[0], 30) - assert_(isinstance(A, OutClass)) - A[0] = 0 - np.multiply(C, B, out=A) - assert_equal(A[0], 30) - assert_(isinstance(A, OutClass)) - - def test_pow_override_with_errors(self): - # regression test for gh-9112 - class PowerOnly(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if ufunc is not np.power: - raise NotImplementedError - return "POWER!" - # explicit cast to float, to ensure the fast power path is taken. - a = np.array(5., dtype=np.float64).view(PowerOnly) - assert_equal(a ** 2.5, "POWER!") - with assert_raises(NotImplementedError): - a ** 0.5 - with assert_raises(NotImplementedError): - a ** 0 - with assert_raises(NotImplementedError): - a ** 1 - with assert_raises(NotImplementedError): - a ** -1 - with assert_raises(NotImplementedError): - a ** 2 - - def test_pow_array_object_dtype(self): - # test pow on arrays of object dtype - class SomeClass(object): - def __init__(self, num=None): - self.num = num - - # want to ensure a fast pow path is not taken - def __mul__(self, other): - raise AssertionError('__mul__ should not be called') - - def __div__(self, other): - raise AssertionError('__div__ should not be called') - - def __pow__(self, exp): - return SomeClass(num=self.num ** exp) - - def __eq__(self, other): - if isinstance(other, SomeClass): - return self.num == other.num - - __rpow__ = __pow__ - - def pow_for(exp, arr): - return np.array([x ** exp for x in arr]) - - obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) - - assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) - assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) - assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) - assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) - assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) - - def test_pos_array_ufunc_override(self): - class A(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return getattr(ufunc, method)(*[i.view(np.ndarray) for - i in inputs], **kwargs) - tst = np.array('foo').view(A) - with assert_raises(TypeError): - +tst - - -class TestTemporaryElide(object): - # elision is only triggered on relatively large arrays - - def test_extension_incref_elide(self): - # test extension (e.g. cython) calling PyNumber_* slots without - # increasing the reference counts - # - # def incref_elide(a): - # d = input.copy() # refcount 1 - # return d, d + d # PyNumber_Add without increasing refcount - from numpy.core._multiarray_tests import incref_elide - d = np.ones(100000) - orig, res = incref_elide(d) - d + d - # the return original should not be changed to an inplace operation - assert_array_equal(orig, d) - assert_array_equal(res, d + d) - - def test_extension_incref_elide_stack(self): - # scanning if the refcount == 1 object is on the python stack to check - # that we are called directly from python is flawed as object may still - # be above the stack pointer and we have no access to the top of it - # - # def incref_elide_l(d): - # return l[4] + l[4] # PyNumber_Add without increasing refcount - from numpy.core._multiarray_tests import incref_elide_l - # padding with 1 makes sure the object on the stack is not overwritten - l = [1, 1, 1, 1, np.ones(100000)] - res = incref_elide_l(l) - # the return original should not be changed to an inplace operation - assert_array_equal(l[4], np.ones(100000)) - assert_array_equal(res, l[4] + l[4]) - - def test_temporary_with_cast(self): - # check that we don't elide into a temporary which would need casting - d = np.ones(200000, dtype=np.int64) - assert_equal(((d + d) + 2**222).dtype, np.dtype('O')) - - r = ((d + d) / 2) - assert_equal(r.dtype, np.dtype('f8')) - - r = np.true_divide((d + d), 2) - assert_equal(r.dtype, np.dtype('f8')) - - r = ((d + d) / 2.) - assert_equal(r.dtype, np.dtype('f8')) - - r = ((d + d) // 2) - assert_equal(r.dtype, np.dtype(np.int64)) - - # commutative elision into the astype result - f = np.ones(100000, dtype=np.float32) - assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) - - # no elision into lower type - d = f.astype(np.float64) - assert_equal(((f + f) + d).dtype, d.dtype) - l = np.ones(100000, dtype=np.longdouble) - assert_equal(((d + d) + l).dtype, l.dtype) - - # test unary abs with different output dtype - for dt in (np.complex64, np.complex128, np.clongdouble): - c = np.ones(100000, dtype=dt) - r = abs(c * 2.0) - assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) - - def test_elide_broadcast(self): - # test no elision on broadcast to higher dimension - # only triggers elision code path in debug mode as triggering it in - # normal mode needs 256kb large matching dimension, so a lot of memory - d = np.ones((2000, 1), dtype=int) - b = np.ones((2000), dtype=bool) - r = (1 - d) + b - assert_equal(r, 1) - assert_equal(r.shape, (2000, 2000)) - - def test_elide_scalar(self): - # check inplace op does not create ndarray from scalars - a = np.bool_() - assert_(type(~(a & a)) is np.bool_) - - def test_elide_scalar_readonly(self): - # The imaginary part of a real array is readonly. This needs to go - # through fast_scalar_power which is only called for powers of - # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for - # elision which can be gotten for the imaginary part of a real - # array. Should not error. - a = np.empty(100000, dtype=np.float64) - a.imag ** 2 - - def test_elide_readonly(self): - # don't try to elide readonly temporaries - r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 - assert_equal(r, 0) - - def test_elide_updateifcopy(self): - a = np.ones(2**20)[::2] - b = a.flat.__array__() + 1 - del b - assert_equal(a, 1) - - -class TestCAPI(object): - def test_IsPythonScalar(self): - from numpy.core._multiarray_tests import IsPythonScalar - assert_(IsPythonScalar(b'foobar')) - assert_(IsPythonScalar(1)) - assert_(IsPythonScalar(2**80)) - assert_(IsPythonScalar(2.)) - assert_(IsPythonScalar("a")) - - -class TestSubscripting(object): - def test_test_zero_rank(self): - x = np.array([1, 2, 3]) - assert_(isinstance(x[0], np.int_)) - if sys.version_info[0] < 3: - assert_(isinstance(x[0], int)) - assert_(type(x[0, ...]) is np.ndarray) - - -class TestPickling(object): - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, - reason=('this tests the error messages when trying to' - 'protocol 5 although it is not available')) - def test_correct_protocol5_error_message(self): - array = np.arange(10) - - if sys.version_info[:2] in ((3, 6), (3, 7)): - # For the specific case of python3.6 and 3.7, raise a clear import - # error about the pickle5 backport when trying to use protocol=5 - # without the pickle5 package - with pytest.raises(ImportError): - array.__reduce_ex__(5) - - elif sys.version_info[:2] < (3, 6): - # when calling __reduce_ex__ explicitly with protocol=5 on python - # raise a ValueError saying that protocol 5 is not available for - # this python version - with pytest.raises(ValueError): - array.__reduce_ex__(5) - - def test_record_array_with_object_dtype(self): - my_object = object() - - arr_with_object = np.array( - [(my_object, 1, 2.0)], - dtype=[('a', object), ('b', int), ('c', float)]) - arr_without_object = np.array( - [('xxx', 1, 2.0)], - dtype=[('a', str), ('b', int), ('c', float)]) - - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - depickled_arr_with_object = pickle.loads( - pickle.dumps(arr_with_object, protocol=proto)) - depickled_arr_without_object = pickle.loads( - pickle.dumps(arr_without_object, protocol=proto)) - - assert_equal(arr_with_object.dtype, - depickled_arr_with_object.dtype) - assert_equal(arr_without_object.dtype, - depickled_arr_without_object.dtype) - - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, - reason="requires pickle protocol 5") - def test_f_contiguous_array(self): - f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') - buffers = [] - - # When using pickle protocol 5, Fortran-contiguous arrays can be - # serialized using out-of-band buffers - bytes_string = pickle.dumps(f_contiguous_array, protocol=5, - buffer_callback=buffers.append) - - assert len(buffers) > 0 - - depickled_f_contiguous_array = pickle.loads(bytes_string, - buffers=buffers) - - assert_equal(f_contiguous_array, depickled_f_contiguous_array) - - def test_non_contiguous_array(self): - non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] - assert not non_contiguous_array.flags.c_contiguous - assert not non_contiguous_array.flags.f_contiguous - - # make sure non-contiguous arrays can be pickled-depickled - # using any protocol - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - depickled_non_contiguous_array = pickle.loads( - pickle.dumps(non_contiguous_array, protocol=proto)) - - assert_equal(non_contiguous_array, depickled_non_contiguous_array) - - def test_roundtrip(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - carray = np.array([[2, 9], [7, 0], [3, 8]]) - DATA = [ - carray, - np.transpose(carray), - np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), - ('c', float)]) - ] - - refs = [weakref.ref(a) for a in DATA] - for a in DATA: - assert_equal( - a, pickle.loads(pickle.dumps(a, protocol=proto)), - err_msg="%r" % a) - del a, DATA, carray - break_cycles() - # check for reference leaks (gh-12793) - for ref in refs: - assert ref() is None - - def _loads(self, obj): - if sys.version_info[0] >= 3: - return pickle.loads(obj, encoding='latin1') - else: - return pickle.loads(obj) - - # version 0 pickles, using protocol=2 to pickle - # version 0 doesn't have a version field - def test_version0_int8(self): - s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' - a = np.array([1, 2, 3, 4], dtype=np.int8) - p = self._loads(s) - assert_equal(a, p) - - def test_version0_float32(self): - s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) - - def test_mixed(self): - g1 = np.array(["spam", "spa", "spammer", "and eggs"]) - g2 = "spam" - assert_array_equal(g1 == g2, [x == g2 for x in g1]) - assert_array_equal(g1 != g2, [x != g2 for x in g1]) - assert_array_equal(g1 < g2, [x < g2 for x in g1]) - assert_array_equal(g1 > g2, [x > g2 for x in g1]) - assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) - assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) - - def test_unicode(self): - g1 = np.array([u"This", u"is", u"example"]) - g2 = np.array([u"This", u"was", u"example"]) - assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) - - -class TestArgmax(object): - - nan_arr = [ - ([0, 1, 2, 3, np.nan], 4), - ([0, 1, 2, np.nan, 3], 3), - ([np.nan, 0, 1, 2, 3], 0), - ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0, np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan, 0)], 4), - ([0, 1, 2, complex(np.nan, 0), 3], 3), - ([0, 1, 2, complex(0, np.nan), 3], 3), - ([complex(0, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), - - ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), - ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), - ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), - - ([np.datetime64('1923-04-14T12:43:12'), - np.datetime64('1994-06-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('1995-11-25T16:02:16'), - np.datetime64('2005-01-04T03:14:12'), - np.datetime64('2041-12-03T14:05:03')], 5), - ([np.datetime64('1935-09-14T04:40:11'), - np.datetime64('1949-10-12T12:32:11'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('2015-11-20T12:20:59'), - np.datetime64('1932-09-23T10:10:13'), - np.datetime64('2014-10-10T03:50:30')], 3), - # Assorted tests with NaTs - ([np.datetime64('NaT'), - np.datetime64('NaT'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('NaT'), - np.datetime64('2015-09-23T10:10:13'), - np.datetime64('1932-10-10T03:50:30')], 0), - ([np.datetime64('2059-03-14T12:43:12'), - np.datetime64('1996-09-21T14:43:15'), - np.datetime64('NaT'), - np.datetime64('2022-12-25T16:02:16'), - np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 2), - ([np.timedelta64(2, 's'), - np.timedelta64(1, 's'), - np.timedelta64('NaT', 's'), - np.timedelta64(3, 's')], 2), - ([np.timedelta64('NaT', 's')] * 3, 0), - - ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), - timedelta(days=-1, seconds=23)], 0), - ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), - timedelta(days=5, seconds=14)], 1), - ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), - timedelta(days=10, seconds=43)], 2), - - ([False, False, False, False, True], 4), - ([False, False, False, True, False], 3), - ([True, False, False, False, False], 0), - ([True, False, True, False, False], 0), - ] - - def test_all(self): - a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) - for i in range(a.ndim): - amax = a.max(i) - aargmax = a.argmax(i) - axes = list(range(a.ndim)) - axes.remove(i) - assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) - - def test_combinations(self): - for arr, pos in self.nan_arr: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") - max_val = np.max(arr) - - assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) - assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr) - - def test_output_shape(self): - # see also gh-616 - a = np.ones((10, 5)) - # Check some simple shape mismatches - out = np.ones(11, dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - out = np.ones((2, 5), dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - # these could be relaxed possibly (used to allow even the previous) - out = np.ones((1, 10), dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - out = np.ones(10, dtype=np.int_) - a.argmax(-1, out=out) - assert_equal(out, a.argmax(-1)) - - def test_argmax_unicode(self): - d = np.zeros(6031, dtype='= cmin)) - assert_(np.all(x <= cmax)) - - def _clip_type(self, type_group, array_max, - clip_min, clip_max, inplace=False, - expected_min=None, expected_max=None): - if expected_min is None: - expected_min = clip_min - if expected_max is None: - expected_max = clip_max - - for T in np.sctypes[type_group]: - if sys.byteorder == 'little': - byte_orders = ['=', '>'] - else: - byte_orders = ['<', '='] - - for byteorder in byte_orders: - dtype = np.dtype(T).newbyteorder(byteorder) - - x = (np.random.random(1000) * array_max).astype(dtype) - if inplace: - # The tests that call us pass clip_min and clip_max that - # might not fit in the destination dtype. They were written - # assuming the previous unsafe casting, which now must be - # passed explicitly to avoid a warning. - x.clip(clip_min, clip_max, x, casting='unsafe') - else: - x = x.clip(clip_min, clip_max) - byteorder = '=' - - if x.dtype.byteorder == '|': - byteorder = '|' - assert_equal(x.dtype.byteorder, byteorder) - self._check_range(x, expected_min, expected_max) - return x - - def test_basic(self): - for inplace in [False, True]: - self._clip_type( - 'float', 1024, -12.8, 100.2, inplace=inplace) - self._clip_type( - 'float', 1024, 0, 0, inplace=inplace) - - self._clip_type( - 'int', 1024, -120, 100, inplace=inplace) - self._clip_type( - 'int', 1024, 0, 0, inplace=inplace) - - self._clip_type( - 'uint', 1024, 0, 0, inplace=inplace) - self._clip_type( - 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) - - def test_record_array(self): - rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], - dtype=[('x', '= 3)) - x = val.clip(min=3) - assert_(np.all(x >= 3)) - x = val.clip(max=4) - assert_(np.all(x <= 4)) - - def test_nan(self): - input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) - result = input_arr.clip(-1, 1) - expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) - assert_array_equal(result, expected) - - -class TestCompress(object): - def test_axis(self): - tgt = [[5, 6, 7, 8, 9]] - arr = np.arange(10).reshape(2, 5) - out = np.compress([0, 1], arr, axis=0) - assert_equal(out, tgt) - - tgt = [[1, 3], [6, 8]] - out = np.compress([0, 1, 0, 1, 0], arr, axis=1) - assert_equal(out, tgt) - - def test_truncate(self): - tgt = [[1], [6]] - arr = np.arange(10).reshape(2, 5) - out = np.compress([0, 1], arr, axis=1) - assert_equal(out, tgt) - - def test_flatten(self): - arr = np.arange(10).reshape(2, 5) - out = np.compress([0, 1], arr) - assert_equal(out, 1) - - -class TestPutmask(object): - def tst_basic(self, x, T, mask, val): - np.putmask(x, mask, val) - assert_equal(x[mask], T(val)) - assert_equal(x.dtype, T) - - def test_ip_types(self): - unchecked_types = [bytes, unicode, np.void, object] - - x = np.random.random(1000)*100 - mask = x < 40 - - for val in [-100, 0, 15]: - for types in np.sctypes.values(): - for T in types: - if T not in unchecked_types: - self.tst_basic(x.copy().astype(T), T, mask, val) - - def test_mask_size(self): - assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) - - @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', 'i4', 'f8'), ('z', ' 16MB - d = np.zeros(4 * 1024 ** 2) - d.tofile(self.filename) - assert_equal(os.path.getsize(self.filename), d.nbytes) - assert_array_equal(d, np.fromfile(self.filename)) - # check offset - with open(self.filename, "r+b") as f: - f.seek(d.nbytes) - d.tofile(f) - assert_equal(os.path.getsize(self.filename), d.nbytes * 2) - # check append mode (gh-8329) - open(self.filename, "w").close() # delete file contents - with open(self.filename, "ab") as f: - d.tofile(f) - assert_array_equal(d, np.fromfile(self.filename)) - with open(self.filename, "ab") as f: - d.tofile(f) - assert_equal(os.path.getsize(self.filename), d.nbytes * 2) - - def test_io_open_buffered_fromfile(self): - # gh-6632 - self.x.tofile(self.filename) - with io.open(self.filename, 'rb', buffering=-1) as f: - y = np.fromfile(f, dtype=self.dtype) - assert_array_equal(y, self.x.flat) - - def test_file_position_after_fromfile(self): - # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, - io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] - - for size in sizes: - f = open(self.filename, 'wb') - f.seek(size-1) - f.write(b'\0') - f.close() - - for mode in ['rb', 'r+b']: - err_msg = "%d %s" % (size, mode) - - f = open(self.filename, mode) - f.read(2) - np.fromfile(f, dtype=np.float64, count=1) - pos = f.tell() - f.close() - assert_equal(pos, 10, err_msg=err_msg) - - def test_file_position_after_tofile(self): - # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, - io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] - - for size in sizes: - err_msg = "%d" % (size,) - - f = open(self.filename, 'wb') - f.seek(size-1) - f.write(b'\0') - f.seek(10) - f.write(b'12') - np.array([0], dtype=np.float64).tofile(f) - pos = f.tell() - f.close() - assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) - - f = open(self.filename, 'r+b') - f.read(2) - f.seek(0, 1) # seek between read&write required by ANSI C - np.array([0], dtype=np.float64).tofile(f) - pos = f.tell() - f.close() - assert_equal(pos, 10, err_msg=err_msg) - - def test_load_object_array_fromfile(self): - # gh-12300 - with open(self.filename, 'w') as f: - # Ensure we have a file with consistent contents - pass - - with open(self.filename, 'rb') as f: - assert_raises_regex(ValueError, "Cannot read into object array", - np.fromfile, f, dtype=object) - - assert_raises_regex(ValueError, "Cannot read into object array", - np.fromfile, self.filename, dtype=object) - - def test_fromfile_offset(self): - with open(self.filename, 'wb') as f: - self.x.tofile(f) - - with open(self.filename, 'rb') as f: - y = np.fromfile(f, dtype=self.dtype, offset=0) - assert_array_equal(y, self.x.flat) - - with open(self.filename, 'rb') as f: - count_items = len(self.x.flat) // 8 - offset_items = len(self.x.flat) // 4 - offset_bytes = self.dtype.itemsize * offset_items - y = np.fromfile(f, dtype=self.dtype, count=count_items, offset=offset_bytes) - assert_array_equal(y, self.x.flat[offset_items:offset_items+count_items]) - - # subsequent seeks should stack - offset_bytes = self.dtype.itemsize - z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes) - assert_array_equal(z, self.x.flat[offset_items+count_items+1:]) - - with open(self.filename, 'wb') as f: - self.x.tofile(f, sep=",") - - with open(self.filename, 'rb') as f: - assert_raises_regex( - TypeError, - "'offset' argument only permitted for binary files", - np.fromfile, self.filename, dtype=self.dtype, - sep=",", offset=1) - - def _check_from(self, s, value, **kw): - if 'sep' not in kw: - y = np.frombuffer(s, **kw) - else: - y = np.fromstring(s, **kw) - assert_array_equal(y, value) - - f = open(self.filename, 'wb') - f.write(s) - f.close() - y = np.fromfile(self.filename, **kw) - assert_array_equal(y, value) - - def test_nan(self): - self._check_from( - b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", - [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - sep=' ') - - def test_inf(self): - self._check_from( - b"inf +inf -inf infinity -Infinity iNfInItY -inF", - [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], - sep=' ') - - def test_numbers(self): - self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133", - [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') - - def test_binary(self): - self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', - np.array([1, 2, 3, 4]), - dtype=' 1 minute on mechanical hard drive - def test_big_binary(self): - """Test workarounds for 32-bit limited fwrite, fseek, and ftell - calls in windows. These normally would hang doing something like this. - See http://projects.scipy.org/numpy/ticket/1660""" - if sys.platform != 'win32': - return - try: - # before workarounds, only up to 2**32-1 worked - fourgbplus = 2**32 + 2**16 - testbytes = np.arange(8, dtype=np.int8) - n = len(testbytes) - flike = tempfile.NamedTemporaryFile() - f = flike.file - np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) - flike.seek(0) - a = np.fromfile(f, dtype=np.int8) - flike.close() - assert_(len(a) == fourgbplus) - # check only start and end for speed: - assert_((a[:n] == testbytes).all()) - assert_((a[-n:] == testbytes).all()) - except (MemoryError, ValueError): - pass - - def test_string(self): - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',') - - def test_counted_string(self): - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') - - def test_string_with_ws(self): - self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') - - def test_counted_string_with_ws(self): - self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int, - sep=' ') - - def test_ascii(self): - self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') - - def test_malformed(self): - with assert_warns(DeprecationWarning): - self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') - - def test_long_sep(self): - self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') - - def test_dtype(self): - v = np.array([1, 2, 3, 4], dtype=np.int_) - self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_) - - def test_dtype_bool(self): - # can't use _check_from because fromstring can't handle True/False - v = np.array([True, False, True, False], dtype=np.bool_) - s = b'1,0,-2.3,0' - f = open(self.filename, 'wb') - f.write(s) - f.close() - y = np.fromfile(self.filename, sep=',', dtype=np.bool_) - assert_(y.dtype == '?') - assert_array_equal(y, v) - - def test_tofile_sep(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - #assert_equal(s, '1.51,2.0,3.51,4.0') - y = np.array([float(p) for p in s.split(',')]) - assert_array_equal(x,y) - - def test_tofile_format(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',', format='%.2f') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - assert_equal(s, '1.51,2.00,3.51,4.00') - - def test_locale(self): - with CommaDecimalPointLocale(): - self.test_numbers() - self.test_nan() - self.test_inf() - self.test_counted_string() - self.test_ascii() - self.test_malformed() - self.test_tofile_sep() - self.test_tofile_format() - - def test_fromfile_subarray_binary(self): - # Test subarray dtypes which are absorbed into the shape - x = np.arange(24, dtype="i4").reshape(2, 3, 4) - x.tofile(self.filename) - res = np.fromfile(self.filename, dtype="(3,4)i4") - assert_array_equal(x, res) - - x_str = x.tobytes() - with assert_warns(DeprecationWarning): - # binary fromstring is deprecated - res = np.fromstring(x_str, dtype="(3,4)i4") - assert_array_equal(x, res) - - -class TestFromBuffer(object): - @pytest.mark.parametrize('byteorder', ['<', '>']) - @pytest.mark.parametrize('dtype', [float, int, complex]) - def test_basic(self, byteorder, dtype): - dt = np.dtype(dtype).newbyteorder(byteorder) - x = (np.random.random((4, 7)) * 5).astype(dt) - buf = x.tobytes() - assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) - - def test_empty(self): - assert_array_equal(np.frombuffer(b''), np.array([])) - - -class TestFlat(object): - def setup(self): - a0 = np.arange(20.0) - a = a0.reshape(4, 5) - a0.shape = (4, 5) - a.flags.writeable = False - self.a = a - self.b = a[::2, ::2] - self.a0 = a0 - self.b0 = a0[::2, ::2] - - def test_contiguous(self): - testpassed = False - try: - self.a.flat[12] = 100.0 - except ValueError: - testpassed = True - assert_(testpassed) - assert_(self.a.flat[12] == 12.0) - - def test_discontiguous(self): - testpassed = False - try: - self.b.flat[4] = 100.0 - except ValueError: - testpassed = True - assert_(testpassed) - assert_(self.b.flat[4] == 12.0) - - def test___array__(self): - c = self.a.flat.__array__() - d = self.b.flat.__array__() - e = self.a0.flat.__array__() - f = self.b0.flat.__array__() - - assert_(c.flags.writeable is False) - assert_(d.flags.writeable is False) - # for 1.14 all are set to non-writeable on the way to replacing the - # UPDATEIFCOPY array returned for non-contiguous arrays. - assert_(e.flags.writeable is True) - assert_(f.flags.writeable is False) - with assert_warns(DeprecationWarning): - assert_(c.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - assert_(d.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - assert_(e.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - # UPDATEIFCOPY is removed. - assert_(f.flags.updateifcopy is False) - assert_(c.flags.writebackifcopy is False) - assert_(d.flags.writebackifcopy is False) - assert_(e.flags.writebackifcopy is False) - assert_(f.flags.writebackifcopy is False) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_refcount(self): - # includes regression test for reference count error gh-13165 - inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None] - indtype = np.dtype(np.intp) - rc_indtype = sys.getrefcount(indtype) - for ind in inds: - rc_ind = sys.getrefcount(ind) - for _ in range(100): - try: - self.a.flat[ind] - except IndexError: - pass - assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) - assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) - - -class TestResize(object): - - @_no_tracing - def test_basic(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - if IS_PYPY: - x.resize((5, 5), refcheck=False) - else: - x.resize((5, 5)) - assert_array_equal(x.flat[:9], - np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) - assert_array_equal(x[9:].flat, 0) - - def test_check_reference(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - y = x - assert_raises(ValueError, x.resize, (5, 1)) - del y # avoid pyflakes unused variable warning. - - @_no_tracing - def test_int_shape(self): - x = np.eye(3) - if IS_PYPY: - x.resize(3, refcheck=False) - else: - x.resize(3) - assert_array_equal(x, np.eye(3)[0,:]) - - def test_none_shape(self): - x = np.eye(3) - x.resize(None) - assert_array_equal(x, np.eye(3)) - x.resize() - assert_array_equal(x, np.eye(3)) - - def test_0d_shape(self): - # to it multiple times to test it does not break alloc cache gh-9216 - for i in range(10): - x = np.empty((1,)) - x.resize(()) - assert_equal(x.shape, ()) - assert_equal(x.size, 1) - x = np.empty(()) - x.resize((1,)) - assert_equal(x.shape, (1,)) - assert_equal(x.size, 1) - - def test_invalid_arguments(self): - assert_raises(TypeError, np.eye(3).resize, 'hi') - assert_raises(ValueError, np.eye(3).resize, -1) - assert_raises(TypeError, np.eye(3).resize, order=1) - assert_raises(TypeError, np.eye(3).resize, refcheck='hi') - - @_no_tracing - def test_freeform_shape(self): - x = np.eye(3) - if IS_PYPY: - x.resize(3, 2, 1, refcheck=False) - else: - x.resize(3, 2, 1) - assert_(x.shape == (3, 2, 1)) - - @_no_tracing - def test_zeros_appended(self): - x = np.eye(3) - if IS_PYPY: - x.resize(2, 3, 3, refcheck=False) - else: - x.resize(2, 3, 3) - assert_array_equal(x[0], np.eye(3)) - assert_array_equal(x[1], np.zeros((3, 3))) - - @_no_tracing - def test_obj_obj(self): - # check memory is initialized on resize, gh-4857 - a = np.ones(10, dtype=[('k', object, 2)]) - if IS_PYPY: - a.resize(15, refcheck=False) - else: - a.resize(15,) - assert_equal(a.shape, (15,)) - assert_array_equal(a['k'][-5:], 0) - assert_array_equal(a['k'][:-5], 1) - - def test_empty_view(self): - # check that sizes containing a zero don't trigger a reallocate for - # already empty arrays - x = np.zeros((10, 0), int) - x_view = x[...] - x_view.resize((0, 10)) - x_view.resize((0, 100)) - - def test_check_weakref(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - xref = weakref.ref(x) - assert_raises(ValueError, x.resize, (5, 1)) - del xref # avoid pyflakes unused variable warning. - - -class TestRecord(object): - def test_field_rename(self): - dt = np.dtype([('f', float), ('i', int)]) - dt.names = ['p', 'q'] - assert_equal(dt.names, ['p', 'q']) - - def test_multiple_field_name_occurrence(self): - def test_dtype_init(): - np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) - - # Error raised when multiple fields have the same name - assert_raises(ValueError, test_dtype_init) - - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") - def test_bytes_fields(self): - # Bytes are not allowed in field names and not recognized in titles - # on Py3 - assert_raises(TypeError, np.dtype, [(b'a', int)]) - assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) - - dt = np.dtype([((b'a', 'b'), int)]) - assert_raises(TypeError, dt.__getitem__, b'a') - - x = np.array([(1,), (2,), (3,)], dtype=dt) - assert_raises(IndexError, x.__getitem__, b'a') - - y = x[0] - assert_raises(IndexError, y.__getitem__, b'a') - - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") - def test_multiple_field_name_unicode(self): - def test_dtype_unicode(): - np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) - - # Error raised when multiple fields have the same name(unicode included) - assert_raises(ValueError, test_dtype_unicode) - - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") - def test_unicode_field_titles(self): - # Unicode field titles are added to field dict on Py2 - title = u'b' - dt = np.dtype([((title, 'a'), int)]) - dt[title] - dt['a'] - x = np.array([(1,), (2,), (3,)], dtype=dt) - x[title] - x['a'] - y = x[0] - y[title] - y['a'] - - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") - def test_unicode_field_names(self): - # Unicode field names are converted to ascii on Python 2: - encodable_name = u'b' - assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') - assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') - - # But raises UnicodeEncodeError if it can't be encoded: - nonencodable_name = u'\uc3bc' - assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) - assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) - - def test_fromarrays_unicode(self): - # A single name string provided to fromarrays() is allowed to be unicode - # on both Python 2 and 3: - x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4') - assert_equal(x['a'][0], 0) - assert_equal(x['b'][0], 1) - - def test_unicode_order(self): - # Test that we can sort with order as a unicode field name in both Python 2 and - # 3: - name = u'b' - x = np.array([1, 3, 2], dtype=[(name, int)]) - x.sort(order=name) - assert_equal(x[u'b'], np.array([1, 2, 3])) - - def test_field_names(self): - # Test unicode and 8-bit / byte strings can be used - a = np.zeros((1,), dtype=[('f1', 'i4'), - ('f2', 'i4'), - ('f3', [('sf1', 'i4')])]) - is_py3 = sys.version_info[0] >= 3 - if is_py3: - funcs = (str,) - # byte string indexing fails gracefully - assert_raises(IndexError, a.__setitem__, b'f1', 1) - assert_raises(IndexError, a.__getitem__, b'f1') - assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) - assert_raises(IndexError, a['f1'].__getitem__, b'sf1') - else: - funcs = (str, unicode) - for func in funcs: - b = a.copy() - fn1 = func('f1') - b[fn1] = 1 - assert_equal(b[fn1], 1) - fnn = func('not at all') - assert_raises(ValueError, b.__setitem__, fnn, 1) - assert_raises(ValueError, b.__getitem__, fnn) - b[0][fn1] = 2 - assert_equal(b[fn1], 2) - # Subfield - assert_raises(ValueError, b[0].__setitem__, fnn, 1) - assert_raises(ValueError, b[0].__getitem__, fnn) - # Subfield - fn3 = func('f3') - sfn1 = func('sf1') - b[fn3][sfn1] = 1 - assert_equal(b[fn3][sfn1], 1) - assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) - assert_raises(ValueError, b[fn3].__getitem__, fnn) - # multiple subfields - fn2 = func('f2') - b[fn2] = 3 - - assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) - assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) - assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) - - # non-ascii unicode field indexing is well behaved - if not is_py3: - pytest.skip('non ascii unicode field indexing skipped; ' - 'raises segfault on python 2.x') - else: - assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) - assert_raises(ValueError, a.__getitem__, u'\u03e0') - - def test_record_hash(self): - a = np.array([(1, 2), (1, 2)], dtype='i1,i2') - a.flags.writeable = False - b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) - b.flags.writeable = False - c = np.array([(1, 2), (3, 4)], dtype='i1,i2') - c.flags.writeable = False - assert_(hash(a[0]) == hash(a[1])) - assert_(hash(a[0]) == hash(b[0])) - assert_(hash(a[0]) != hash(b[1])) - assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) - - def test_record_no_hash(self): - a = np.array([(1, 2), (1, 2)], dtype='i1,i2') - assert_raises(TypeError, hash, a[0]) - - def test_empty_structure_creation(self): - # make sure these do not raise errors (gh-5631) - np.array([()], dtype={'names': [], 'formats': [], - 'offsets': [], 'itemsize': 12}) - np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], - 'offsets': [], 'itemsize': 12}) - - def test_multifield_indexing_view(self): - a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) - v = a[['a', 'c']] - assert_(v.base is a) - assert_(v.dtype == np.dtype({'names': ['a', 'c'], - 'formats': ['i4', 'u4'], - 'offsets': [0, 8]})) - v[:] = (4,5) - assert_equal(a[0].item(), (4, 1, 5)) - -class TestView(object): - def test_basic(self): - x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], - dtype=[('r', np.int8), ('g', np.int8), - ('b', np.int8), ('a', np.int8)]) - # We must be specific about the endianness here: - y = x.view(dtype=' 0) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - A = np.zeros((0, 3)) - for f in self.funcs: - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(A, axis=axis)).all()) - assert_(len(w) > 0) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(f(A, axis=axis), np.zeros([])) - - def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1]: - tgt = mat.sum(axis=axis) - res = _mean(mat, axis=axis) * mat.shape[axis] - assert_almost_equal(res, tgt) - for axis in [None]: - tgt = mat.sum(axis=axis) - res = _mean(mat, axis=axis) * np.prod(mat.shape) - assert_almost_equal(res, tgt) - - def test_mean_float16(self): - # This fail if the sum inside mean is done in float16 instead - # of float32. - assert_(_mean(np.ones(100000, dtype='float16')) == 1) - - def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1, None]: - msqr = _mean(mat * mat.conj(), axis=axis) - mean = _mean(mat, axis=axis) - tgt = msqr - mean * mean.conjugate() - res = _var(mat, axis=axis) - assert_almost_equal(res, tgt) - - def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1, None]: - tgt = np.sqrt(_var(mat, axis=axis)) - res = _std(mat, axis=axis) - assert_almost_equal(res, tgt) - - def test_subclass(self): - class TestArray(np.ndarray): - def __new__(cls, data, info): - result = np.array(data) - result = result.view(cls) - result.info = info - return result - - def __array_finalize__(self, obj): - self.info = getattr(obj, "info", '') - - dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') - res = dat.mean(1) - assert_(res.info == dat.info) - res = dat.std(1) - assert_(res.info == dat.info) - res = dat.var(1) - assert_(res.info == dat.info) - -class TestVdot(object): - def test_basic(self): - dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] - dt_complex = np.typecodes['Complex'] - - # test real - a = np.eye(3) - for dt in dt_numeric + 'O': - b = a.astype(dt) - res = np.vdot(b, b) - assert_(np.isscalar(res)) - assert_equal(np.vdot(b, b), 3) - - # test complex - a = np.eye(3) * 1j - for dt in dt_complex + 'O': - b = a.astype(dt) - res = np.vdot(b, b) - assert_(np.isscalar(res)) - assert_equal(np.vdot(b, b), 3) - - # test boolean - b = np.eye(3, dtype=bool) - res = np.vdot(b, b) - assert_(np.isscalar(res)) - assert_equal(np.vdot(b, b), True) - - def test_vdot_array_order(self): - a = np.array([[1, 2], [3, 4]], order='C') - b = np.array([[1, 2], [3, 4]], order='F') - res = np.vdot(a, a) - - # integer arrays are exact - assert_equal(np.vdot(a, b), res) - assert_equal(np.vdot(b, a), res) - assert_equal(np.vdot(b, b), res) - - def test_vdot_uncontiguous(self): - for size in [2, 1000]: - # Different sizes match different branches in vdot. - a = np.zeros((size, 2, 2)) - b = np.zeros((size, 2, 2)) - a[:, 0, 0] = np.arange(size) - b[:, 0, 0] = np.arange(size) + 1 - # Make a and b uncontiguous: - a = a[..., 0] - b = b[..., 0] - - assert_equal(np.vdot(a, b), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a, b.copy()), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a.copy(), b), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a.copy('F'), b), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a, b.copy('F')), - np.vdot(a.flatten(), b.flatten())) - - -class TestDot(object): - def setup(self): - np.random.seed(128) - self.A = np.random.rand(4, 2) - self.b1 = np.random.rand(2, 1) - self.b2 = np.random.rand(2) - self.b3 = np.random.rand(1, 2) - self.b4 = np.random.rand(4) - self.N = 7 - - def test_dotmatmat(self): - A = self.A - res = np.dot(A.transpose(), A) - tgt = np.array([[1.45046013, 0.86323640], - [0.86323640, 0.84934569]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotmatvec(self): - A, b1 = self.A, self.b1 - res = np.dot(A, b1) - tgt = np.array([[0.32114320], [0.04889721], - [0.15696029], [0.33612621]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotmatvec2(self): - A, b2 = self.A, self.b2 - res = np.dot(A, b2) - tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecmat(self): - A, b4 = self.A, self.b4 - res = np.dot(b4, A) - tgt = np.array([1.23495091, 1.12222648]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecmat2(self): - b3, A = self.b3, self.A - res = np.dot(b3, A.transpose()) - tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecmat3(self): - A, b4 = self.A, self.b4 - res = np.dot(A.transpose(), b4) - tgt = np.array([1.23495091, 1.12222648]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecvecouter(self): - b1, b3 = self.b1, self.b3 - res = np.dot(b1, b3) - tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecvecinner(self): - b1, b3 = self.b1, self.b3 - res = np.dot(b3, b1) - tgt = np.array([[ 0.23129668]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotcolumnvect1(self): - b1 = np.ones((3, 1)) - b2 = [5.3] - res = np.dot(b1, b2) - tgt = np.array([5.3, 5.3, 5.3]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotcolumnvect2(self): - b1 = np.ones((3, 1)).transpose() - b2 = [6.2] - res = np.dot(b2, b1) - tgt = np.array([6.2, 6.2, 6.2]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecscalar(self): - np.random.seed(100) - b1 = np.random.rand(1, 1) - b2 = np.random.rand(1, 4) - res = np.dot(b1, b2) - tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecscalar2(self): - np.random.seed(100) - b1 = np.random.rand(4, 1) - b2 = np.random.rand(1, 1) - res = np.dot(b1, b2) - tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_all(self): - dims = [(), (1,), (1, 1)] - dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] - for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): - b1 = np.zeros(dim1) - b2 = np.zeros(dim2) - res = np.dot(b1, b2) - tgt = np.zeros(dim) - assert_(res.shape == tgt.shape) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_vecobject(self): - class Vec(object): - def __init__(self, sequence=None): - if sequence is None: - sequence = [] - self.array = np.array(sequence) - - def __add__(self, other): - out = Vec() - out.array = self.array + other.array - return out - - def __sub__(self, other): - out = Vec() - out.array = self.array - other.array - return out - - def __mul__(self, other): # with scalar - out = Vec(self.array.copy()) - out.array *= other - return out - - def __rmul__(self, other): - return self*other - - U_non_cont = np.transpose([[1., 1.], [1., 2.]]) - U_cont = np.ascontiguousarray(U_non_cont) - x = np.array([Vec([1., 0.]), Vec([0., 1.])]) - zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) - zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) - assert_equal(zeros[0].array, zeros_test[0].array) - assert_equal(zeros[1].array, zeros_test[1].array) - - def test_dot_2args(self): - from numpy.core.multiarray import dot - - a = np.array([[1, 2], [3, 4]], dtype=float) - b = np.array([[1, 0], [1, 1]], dtype=float) - c = np.array([[3, 2], [7, 4]], dtype=float) - - d = dot(a, b) - assert_allclose(c, d) - - def test_dot_3args(self): - from numpy.core.multiarray import dot - - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 32)) - for i in range(12): - dot(f, v, r) - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(r), 2) - r2 = dot(f, v, out=None) - assert_array_equal(r2, r) - assert_(r is dot(f, v, out=r)) - - v = v[:, 0].copy() # v.shape == (16,) - r = r[:, 0].copy() # r.shape == (1024,) - r2 = dot(f, v) - assert_(r is dot(f, v, r)) - assert_array_equal(r2, r) - - def test_dot_3args_errors(self): - from numpy.core.multiarray import dot - - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 31)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((1024,)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((32,)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((32, 1024)) - assert_raises(ValueError, dot, f, v, r) - assert_raises(ValueError, dot, f, v, r.T) - - r = np.empty((1024, 64)) - assert_raises(ValueError, dot, f, v, r[:, ::2]) - assert_raises(ValueError, dot, f, v, r[:, :32]) - - r = np.empty((1024, 32), dtype=np.float32) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((1024, 32), dtype=int) - assert_raises(ValueError, dot, f, v, r) - - def test_dot_array_order(self): - a = np.array([[1, 2], [3, 4]], order='C') - b = np.array([[1, 2], [3, 4]], order='F') - res = np.dot(a, a) - - # integer arrays are exact - assert_equal(np.dot(a, b), res) - assert_equal(np.dot(b, a), res) - assert_equal(np.dot(b, b), res) - - def test_accelerate_framework_sgemv_fix(self): - - def aligned_array(shape, align, dtype, order='C'): - d = dtype(0) - N = np.prod(shape) - tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) - address = tmp.__array_interface__["data"][0] - for offset in range(align): - if (address + offset) % align == 0: - break - tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) - return tmp.reshape(shape, order=order) - - def as_aligned(arr, align, dtype, order='C'): - aligned = aligned_array(arr.shape, align, dtype, order) - aligned[:] = arr[:] - return aligned - - def assert_dot_close(A, X, desired): - assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) - - m = aligned_array(100, 15, np.float32) - s = aligned_array((100, 100), 15, np.float32) - np.dot(s, m) # this will always segfault if the bug is present - - testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) - for align, m, n, a_order in testdata: - # Calculation in double precision - A_d = np.random.rand(m, n) - X_d = np.random.rand(n) - desired = np.dot(A_d, X_d) - # Calculation with aligned single precision - A_f = as_aligned(A_d, align, np.float32, order=a_order) - X_f = as_aligned(X_d, align, np.float32) - assert_dot_close(A_f, X_f, desired) - # Strided A rows - A_d_2 = A_d[::2] - desired = np.dot(A_d_2, X_d) - A_f_2 = A_f[::2] - assert_dot_close(A_f_2, X_f, desired) - # Strided A columns, strided X vector - A_d_22 = A_d_2[:, ::2] - X_d_2 = X_d[::2] - desired = np.dot(A_d_22, X_d_2) - A_f_22 = A_f_2[:, ::2] - X_f_2 = X_f[::2] - assert_dot_close(A_f_22, X_f_2, desired) - # Check the strides are as expected - if a_order == 'F': - assert_equal(A_f_22.strides, (8, 8 * m)) - else: - assert_equal(A_f_22.strides, (8 * n, 8)) - assert_equal(X_f_2.strides, (8,)) - # Strides in A rows + cols only - X_f_2c = as_aligned(X_f_2, align, np.float32) - assert_dot_close(A_f_22, X_f_2c, desired) - # Strides just in A cols - A_d_12 = A_d[:, ::2] - desired = np.dot(A_d_12, X_d_2) - A_f_12 = A_f[:, ::2] - assert_dot_close(A_f_12, X_f_2c, desired) - # Strides in A cols and X - assert_dot_close(A_f_12, X_f_2, desired) - - -class MatmulCommon(object): - """Common tests for '@' operator and numpy.matmul. - - """ - # Should work with these types. Will want to add - # "O" at some point - types = "?bhilqBHILQefdgFDGO" - - def test_exceptions(self): - dims = [ - ((1,), (2,)), # mismatched vector vector - ((2, 1,), (2,)), # mismatched matrix vector - ((2,), (1, 2)), # mismatched vector matrix - ((1, 2), (3, 1)), # mismatched matrix matrix - ((1,), ()), # vector scalar - ((), (1)), # scalar vector - ((1, 1), ()), # matrix scalar - ((), (1, 1)), # scalar matrix - ((2, 2, 1), (3, 1, 2)), # cannot broadcast - ] - - for dt, (dm1, dm2) in itertools.product(self.types, dims): - a = np.ones(dm1, dtype=dt) - b = np.ones(dm2, dtype=dt) - assert_raises(ValueError, self.matmul, a, b) - - def test_shapes(self): - dims = [ - ((1, 1), (2, 1, 1)), # broadcast first argument - ((2, 1, 1), (1, 1)), # broadcast second argument - ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match - ] - - for dt, (dm1, dm2) in itertools.product(self.types, dims): - a = np.ones(dm1, dtype=dt) - b = np.ones(dm2, dtype=dt) - res = self.matmul(a, b) - assert_(res.shape == (2, 1, 1)) - - # vector vector returns scalars. - for dt in self.types: - a = np.ones((2,), dtype=dt) - b = np.ones((2,), dtype=dt) - c = self.matmul(a, b) - assert_(np.array(c).shape == ()) - - def test_result_types(self): - mat = np.ones((1,1)) - vec = np.ones((1,)) - for dt in self.types: - m = mat.astype(dt) - v = vec.astype(dt) - for arg in [(m, v), (v, m), (m, m)]: - res = self.matmul(*arg) - assert_(res.dtype == dt) - - # vector vector returns scalars - if dt != "O": - res = self.matmul(v, v) - assert_(type(res) is np.dtype(dt).type) - - def test_scalar_output(self): - vec1 = np.array([2]) - vec2 = np.array([3, 4]).reshape(1, -1) - tgt = np.array([6, 8]) - for dt in self.types[1:]: - v1 = vec1.astype(dt) - v2 = vec2.astype(dt) - res = self.matmul(v1, v2) - assert_equal(res, tgt) - res = self.matmul(v2.T, v1) - assert_equal(res, tgt) - - # boolean type - vec = np.array([True, True], dtype='?').reshape(1, -1) - res = self.matmul(vec[:, 0], vec) - assert_equal(res, True) - - def test_vector_vector_values(self): - vec1 = np.array([1, 2]) - vec2 = np.array([3, 4]).reshape(-1, 1) - tgt1 = np.array([11]) - tgt2 = np.array([[3, 6], [4, 8]]) - for dt in self.types[1:]: - v1 = vec1.astype(dt) - v2 = vec2.astype(dt) - res = self.matmul(v1, v2) - assert_equal(res, tgt1) - # no broadcast, we must make v1 into a 2d ndarray - res = self.matmul(v2, v1.reshape(1, -1)) - assert_equal(res, tgt2) - - # boolean type - vec = np.array([True, True], dtype='?') - res = self.matmul(vec, vec) - assert_equal(res, True) - - def test_vector_matrix_values(self): - vec = np.array([1, 2]) - mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([7, 10]) - tgt2 = np.stack([tgt1]*2, axis=0) - for dt in self.types[1:]: - v = vec.astype(dt) - m1 = mat1.astype(dt) - m2 = mat2.astype(dt) - res = self.matmul(v, m1) - assert_equal(res, tgt1) - res = self.matmul(v, m2) - assert_equal(res, tgt2) - - # boolean type - vec = np.array([True, False]) - mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) - - res = self.matmul(vec, mat1) - assert_equal(res, tgt1) - res = self.matmul(vec, mat2) - assert_equal(res, tgt2) - - def test_matrix_vector_values(self): - vec = np.array([1, 2]) - mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([5, 11]) - tgt2 = np.stack([tgt1]*2, axis=0) - for dt in self.types[1:]: - v = vec.astype(dt) - m1 = mat1.astype(dt) - m2 = mat2.astype(dt) - res = self.matmul(m1, v) - assert_equal(res, tgt1) - res = self.matmul(m2, v) - assert_equal(res, tgt2) - - # boolean type - vec = np.array([True, False]) - mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) - - res = self.matmul(vec, mat1) - assert_equal(res, tgt1) - res = self.matmul(vec, mat2) - assert_equal(res, tgt2) - - def test_matrix_matrix_values(self): - mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.array([[1, 0], [1, 1]]) - mat12 = np.stack([mat1, mat2], axis=0) - mat21 = np.stack([mat2, mat1], axis=0) - tgt11 = np.array([[7, 10], [15, 22]]) - tgt12 = np.array([[3, 2], [7, 4]]) - tgt21 = np.array([[1, 2], [4, 6]]) - tgt12_21 = np.stack([tgt12, tgt21], axis=0) - tgt11_12 = np.stack((tgt11, tgt12), axis=0) - tgt11_21 = np.stack((tgt11, tgt21), axis=0) - for dt in self.types[1:]: - m1 = mat1.astype(dt) - m2 = mat2.astype(dt) - m12 = mat12.astype(dt) - m21 = mat21.astype(dt) - - # matrix @ matrix - res = self.matmul(m1, m2) - assert_equal(res, tgt12) - res = self.matmul(m2, m1) - assert_equal(res, tgt21) - - # stacked @ matrix - res = self.matmul(m12, m1) - assert_equal(res, tgt11_21) - - # matrix @ stacked - res = self.matmul(m1, m12) - assert_equal(res, tgt11_12) - - # stacked @ stacked - res = self.matmul(m12, m21) - assert_equal(res, tgt12_21) - - # boolean type - m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) - m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) - m12 = np.stack([m1, m2], axis=0) - m21 = np.stack([m2, m1], axis=0) - tgt11 = m1 - tgt12 = m1 - tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) - tgt12_21 = np.stack([tgt12, tgt21], axis=0) - tgt11_12 = np.stack((tgt11, tgt12), axis=0) - tgt11_21 = np.stack((tgt11, tgt21), axis=0) - - # matrix @ matrix - res = self.matmul(m1, m2) - assert_equal(res, tgt12) - res = self.matmul(m2, m1) - assert_equal(res, tgt21) - - # stacked @ matrix - res = self.matmul(m12, m1) - assert_equal(res, tgt11_21) - - # matrix @ stacked - res = self.matmul(m1, m12) - assert_equal(res, tgt11_12) - - # stacked @ stacked - res = self.matmul(m12, m21) - assert_equal(res, tgt12_21) - - -class TestMatmul(MatmulCommon): - matmul = np.matmul - - def test_out_arg(self): - a = np.ones((5, 2), dtype=float) - b = np.array([[1, 3], [5, 7]], dtype=float) - tgt = np.dot(a, b) - - # test as positional argument - msg = "out positional argument" - out = np.zeros((5, 2), dtype=float) - self.matmul(a, b, out) - assert_array_equal(out, tgt, err_msg=msg) - - # test as keyword argument - msg = "out keyword argument" - out = np.zeros((5, 2), dtype=float) - self.matmul(a, b, out=out) - assert_array_equal(out, tgt, err_msg=msg) - - # test out with not allowed type cast (safe casting) - msg = "Cannot cast ufunc .* output" - out = np.zeros((5, 2), dtype=np.int32) - assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) - - # test out with type upcast to complex - out = np.zeros((5, 2), dtype=np.complex128) - c = self.matmul(a, b, out=out) - assert_(c is out) - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning, '') - c = c.astype(tgt.dtype) - assert_array_equal(c, tgt) - - def test_out_contiguous(self): - a = np.ones((5, 2), dtype=float) - b = np.array([[1, 3], [5, 7]], dtype=float) - v = np.array([1, 3], dtype=float) - tgt = np.dot(a, b) - tgt_mv = np.dot(a, v) - - # test out non-contiguous - out = np.ones((5, 2, 2), dtype=float) - c = self.matmul(a, b, out=out[..., 0]) - assert c.base is out - assert_array_equal(c, tgt) - c = self.matmul(a, v, out=out[:, 0, 0]) - assert_array_equal(c, tgt_mv) - c = self.matmul(v, a.T, out=out[:, 0, 0]) - assert_array_equal(c, tgt_mv) - - # test out contiguous in only last dim - out = np.ones((10, 2), dtype=float) - c = self.matmul(a, b, out=out[::2, :]) - assert_array_equal(c, tgt) - - # test transposes of out, args - out = np.ones((5, 2), dtype=float) - c = self.matmul(b.T, a.T, out=out.T) - assert_array_equal(out, tgt) - - m1 = np.arange(15.).reshape(5, 3) - m2 = np.arange(21.).reshape(3, 7) - m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous - vc = np.arange(10.) - vr = np.arange(6.) - m0 = np.zeros((3, 0)) - @pytest.mark.parametrize('args', ( - # matrix-matrix - (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), - # matrix-matrix-transpose, contiguous and non - (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), - (m3, m3.T), (m3.T, m3), - # matrix-matrix non-contiguous - (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), - # vector-matrix, matrix-vector, contiguous - (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), - # vector-matrix, matrix-vector, vector non-contiguous - (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), - # vector-matrix, matrix-vector, matrix non-contiguous - (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), - # vector-matrix, matrix-vector, both non-contiguous - (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), - # size == 0 - (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), - )) - def test_dot_equivalent(self, args): - r1 = np.matmul(*args) - r2 = np.dot(*args) - assert_equal(r1, r2) - - r3 = np.matmul(args[0].copy(), args[1].copy()) - assert_equal(r1, r3) - - def test_matmul_object(self): - import fractions - - f = np.vectorize(fractions.Fraction) - def random_ints(): - return np.random.randint(1, 1000, size=(10, 3, 3)) - M1 = f(random_ints(), random_ints()) - M2 = f(random_ints(), random_ints()) - - M3 = self.matmul(M1, M2) - - [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]] - - assert_allclose(N3, self.matmul(N1, N2)) - - def test_matmul_object_type_scalar(self): - from fractions import Fraction as F - v = np.array([F(2,3), F(5,7)]) - res = self.matmul(v, v) - assert_(type(res) is F) - - def test_matmul_empty(self): - a = np.empty((3, 0), dtype=object) - b = np.empty((0, 3), dtype=object) - c = np.zeros((3, 3)) - assert_array_equal(np.matmul(a, b), c) - - def test_matmul_exception_multiply(self): - # test that matmul fails if `__mul__` is missing - class add_not_multiply(): - def __add__(self, other): - return self - a = np.full((3,3), add_not_multiply()) - with assert_raises(TypeError): - b = np.matmul(a, a) - - def test_matmul_exception_add(self): - # test that matmul fails if `__add__` is missing - class multiply_not_add(): - def __mul__(self, other): - return self - a = np.full((3,3), multiply_not_add()) - with assert_raises(TypeError): - b = np.matmul(a, a) - - def test_matmul_bool(self): - # gh-14439 - a = np.array([[1, 0],[1, 1]], dtype=bool) - assert np.max(a.view(np.uint8)) == 1 - b = np.matmul(a, a) - # matmul with boolean output should always be 0, 1 - assert np.max(b.view(np.uint8)) == 1 - - rg = np.random.default_rng(np.random.PCG64(43)) - d = rg.integers(2, size=4*5, dtype=np.int8) - d = d.reshape(4, 5) > 0 - out1 = np.matmul(d, d.reshape(5, 4)) - out2 = np.dot(d, d.reshape(5, 4)) - assert_equal(out1, out2) - - c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) - assert not np.any(c) - - -if sys.version_info[:2] >= (3, 5): - class TestMatmulOperator(MatmulCommon): - import operator - matmul = operator.matmul - - def test_array_priority_override(self): - - class A(object): - __array_priority__ = 1000 - - def __matmul__(self, other): - return "A" - - def __rmatmul__(self, other): - return "A" - - a = A() - b = np.ones(2) - assert_equal(self.matmul(a, b), "A") - assert_equal(self.matmul(b, a), "A") - - def test_matmul_raises(self): - assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) - assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) - assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc')) - - def test_matmul_inplace(): - # It would be nice to support in-place matmul eventually, but for now - # we don't have a working implementation, so better just to error out - # and nudge people to writing "a = a @ b". - a = np.eye(3) - b = np.eye(3) - assert_raises(TypeError, a.__imatmul__, b) - import operator - assert_raises(TypeError, operator.imatmul, a, b) - # we avoid writing the token `exec` so as not to crash python 2's - # parser - exec_ = getattr(builtins, "exec") - assert_raises(TypeError, exec_, "a @= b", globals(), locals()) - - def test_matmul_axes(): - a = np.arange(3*4*5).reshape(3, 4, 5) - c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) - assert c.shape == (3, 4, 4) - d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) - assert d.shape == (4, 4, 3) - e = np.swapaxes(d, 0, 2) - assert_array_equal(e, c) - f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) - assert f.shape == (4, 5) - - -class TestInner(object): - - def test_inner_type_mismatch(self): - c = 1. - A = np.array((1,1), dtype='i,i') - - assert_raises(TypeError, np.inner, c, A) - assert_raises(TypeError, np.inner, A, c) - - def test_inner_scalar_and_vector(self): - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - sca = np.array(3, dtype=dt)[()] - vec = np.array([1, 2], dtype=dt) - desired = np.array([3, 6], dtype=dt) - assert_equal(np.inner(vec, sca), desired) - assert_equal(np.inner(sca, vec), desired) - - def test_vecself(self): - # Ticket 844. - # Inner product of a vector with itself segfaults or give - # meaningless result - a = np.zeros(shape=(1, 80), dtype=np.float64) - p = np.inner(a, a) - assert_almost_equal(p, 0, decimal=14) - - def test_inner_product_with_various_contiguities(self): - # github issue 6532 - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - # check an inner product involving a matrix transpose - A = np.array([[1, 2], [3, 4]], dtype=dt) - B = np.array([[1, 3], [2, 4]], dtype=dt) - C = np.array([1, 1], dtype=dt) - desired = np.array([4, 6], dtype=dt) - assert_equal(np.inner(A.T, C), desired) - assert_equal(np.inner(C, A.T), desired) - assert_equal(np.inner(B, C), desired) - assert_equal(np.inner(C, B), desired) - # check a matrix product - desired = np.array([[7, 10], [15, 22]], dtype=dt) - assert_equal(np.inner(A, B), desired) - # check the syrk vs. gemm paths - desired = np.array([[5, 11], [11, 25]], dtype=dt) - assert_equal(np.inner(A, A), desired) - assert_equal(np.inner(A, A.copy()), desired) - # check an inner product involving an aliased and reversed view - a = np.arange(5).astype(dt) - b = a[::-1] - desired = np.array(10, dtype=dt).item() - assert_equal(np.inner(b, a), desired) - - def test_3d_tensor(self): - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - a = np.arange(24).reshape(2,3,4).astype(dt) - b = np.arange(24, 48).reshape(2,3,4).astype(dt) - desired = np.array( - [[[[ 158, 182, 206], - [ 230, 254, 278]], - - [[ 566, 654, 742], - [ 830, 918, 1006]], - - [[ 974, 1126, 1278], - [1430, 1582, 1734]]], - - [[[1382, 1598, 1814], - [2030, 2246, 2462]], - - [[1790, 2070, 2350], - [2630, 2910, 3190]], - - [[2198, 2542, 2886], - [3230, 3574, 3918]]]], - dtype=dt - ) - assert_equal(np.inner(a, b), desired) - assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) - - -class TestAlen(object): - def test_basic(self): - with pytest.warns(DeprecationWarning): - m = np.array([1, 2, 3]) - assert_equal(np.alen(m), 3) - - m = np.array([[1, 2, 3], [4, 5, 7]]) - assert_equal(np.alen(m), 2) - - m = [1, 2, 3] - assert_equal(np.alen(m), 3) - - m = [[1, 2, 3], [4, 5, 7]] - assert_equal(np.alen(m), 2) - - def test_singleton(self): - with pytest.warns(DeprecationWarning): - assert_equal(np.alen(5), 1) - - -class TestChoose(object): - def setup(self): - self.x = 2*np.ones((3,), dtype=int) - self.y = 3*np.ones((3,), dtype=int) - self.x2 = 2*np.ones((2, 3), dtype=int) - self.y2 = 3*np.ones((2, 3), dtype=int) - self.ind = [0, 0, 1] - - def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) - assert_equal(A, [2, 2, 3]) - - def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - -class TestRepeat(object): - def setup(self): - self.m = np.array([1, 2, 3, 4, 5, 6]) - self.m_rect = self.m.reshape((2, 3)) - - def test_basic(self): - A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) - assert_equal(A, [1, 2, 2, 2, 3, - 3, 4, 5, 6, 6]) - - def test_broadcast1(self): - A = np.repeat(self.m, 2) - assert_equal(A, [1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6]) - - def test_axis_spec(self): - A = np.repeat(self.m_rect, [2, 1], axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6]]) - - A = np.repeat(self.m_rect, [1, 3, 2], axis=1) - assert_equal(A, [[1, 2, 2, 2, 3, 3], - [4, 5, 5, 5, 6, 6]]) - - def test_broadcast2(self): - A = np.repeat(self.m_rect, 2, axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6], - [4, 5, 6]]) - - A = np.repeat(self.m_rect, 2, axis=1) - assert_equal(A, [[1, 1, 2, 2, 3, 3], - [4, 4, 5, 5, 6, 6]]) - - -# TODO: test for multidimensional -NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} - - -@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) -class TestNeighborhoodIter(object): - # Simple, 2d tests - def test_simple2d(self, dt): - # Test zero and one padding for simple data type - x = np.array([[0, 1], [2, 3]], dtype=dt) - r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), - np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), - np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), - np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), - np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), - np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), - np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) - assert_array_equal(l, r) - - r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), - np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), - np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), - np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) - assert_array_equal(l, r) - - def test_mirror2d(self, dt): - x = np.array([[0, 1], [2, 3]], dtype=dt) - r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), - np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), - np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), - np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Simple, 1d tests - def test_simple(self, dt): - # Test padding with constant values - x = np.linspace(1, 5, 5).astype(dt) - r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 1], x[0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 1], x[0], NEIGH_MODE['one']) - assert_array_equal(l, r) - - r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 1], x[4], NEIGH_MODE['constant']) - assert_array_equal(l, r) - - # Test mirror modes - def test_mirror(self, dt): - x = np.linspace(1, 5, 5).astype(dt) - r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], - [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) - l = _multiarray_tests.test_neighborhood_iterator( - x, [-2, 2], x[1], NEIGH_MODE['mirror']) - assert_([i.dtype == dt for i in l]) - assert_array_equal(l, r) - - # Circular mode - def test_circular(self, dt): - x = np.linspace(1, 5, 5).astype(dt) - r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], - [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) - l = _multiarray_tests.test_neighborhood_iterator( - x, [-2, 2], x[0], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - -# Test stacking neighborhood iterators -class TestStackedNeighborhoodIter(object): - # Simple, 1d test: stacking 2 constant-padded neigh iterators - def test_simple_const(self): - dt = np.float64 - # Test zero and one padding for simple data type - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0], dtype=dt), - np.array([0], dtype=dt), - np.array([1], dtype=dt), - np.array([2], dtype=dt), - np.array([3], dtype=dt), - np.array([0], dtype=dt), - np.array([0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [np.array([1, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) - assert_array_equal(l, r) - - # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and - # mirror padding - def test_simple_mirror(self): - dt = np.float64 - # Stacking zero on top of mirror - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 1], dtype=dt), - np.array([1, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 3], dtype=dt), - np.array([3, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 2nd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 3], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 3rd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 0, 0, 1, 2], dtype=dt), - np.array([0, 0, 1, 2, 3], dtype=dt), - np.array([0, 1, 2, 3, 0], dtype=dt), - np.array([1, 2, 3, 0, 0], dtype=dt), - np.array([2, 3, 0, 0, 3], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and - # circular padding - def test_simple_circular(self): - dt = np.float64 - # Stacking zero on top of mirror - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 3, 1], dtype=dt), - np.array([3, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 1], dtype=dt), - np.array([3, 1, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 2nd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 3rd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([3, 0, 0, 1, 2], dtype=dt), - np.array([0, 0, 1, 2, 3], dtype=dt), - np.array([0, 1, 2, 3, 0], dtype=dt), - np.array([1, 2, 3, 0, 0], dtype=dt), - np.array([2, 3, 0, 0, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator - # being strictly within the array - def test_simple_strict_within(self): - dt = np.float64 - # Stacking zero on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 3], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - -class TestWarnings(object): - - def test_complex_warning(self): - x = np.array([1, 2]) - y = np.array([1-2j, 1+2j]) - - with warnings.catch_warnings(): - warnings.simplefilter("error", np.ComplexWarning) - assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) - assert_equal(x, [1, 2]) - - -class TestMinScalarType(object): - - def test_usigned_shortshort(self): - dt = np.min_scalar_type(2**8-1) - wanted = np.dtype('uint8') - assert_equal(wanted, dt) - - def test_usigned_short(self): - dt = np.min_scalar_type(2**16-1) - wanted = np.dtype('uint16') - assert_equal(wanted, dt) - - def test_usigned_int(self): - dt = np.min_scalar_type(2**32-1) - wanted = np.dtype('uint32') - assert_equal(wanted, dt) - - def test_usigned_longlong(self): - dt = np.min_scalar_type(2**63-1) - wanted = np.dtype('uint64') - assert_equal(wanted, dt) - - def test_object(self): - dt = np.min_scalar_type(2**64) - wanted = np.dtype('O') - assert_equal(wanted, dt) - - -from numpy.core._internal import _dtype_from_pep3118 - - -class TestPEP3118Dtype(object): - def _check(self, spec, wanted): - dt = np.dtype(wanted) - actual = _dtype_from_pep3118(spec) - assert_equal(actual, dt, - err_msg="spec %r != dtype %r" % (spec, wanted)) - - def test_native_padding(self): - align = np.dtype('i').alignment - for j in range(8): - if j == 0: - s = 'bi' - else: - s = 'b%dxi' % j - self._check('@'+s, {'f0': ('i1', 0), - 'f1': ('i', align*(1 + j//align))}) - self._check('='+s, {'f0': ('i1', 0), - 'f1': ('i', 1+j)}) - - def test_native_padding_2(self): - # Native padding should work also for structs and sub-arrays - self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) - self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) - - def test_trailing_padding(self): - # Trailing padding should be included, *and*, the item size - # should match the alignment if in aligned mode - align = np.dtype('i').alignment - size = np.dtype('i').itemsize - - def aligned(n): - return align*(1 + (n-1)//align) - - base = dict(formats=['i'], names=['f0']) - - self._check('ix', dict(itemsize=aligned(size + 1), **base)) - self._check('ixx', dict(itemsize=aligned(size + 2), **base)) - self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) - self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) - self._check('i7x', dict(itemsize=aligned(size + 7), **base)) - - self._check('^ix', dict(itemsize=size + 1, **base)) - self._check('^ixx', dict(itemsize=size + 2, **base)) - self._check('^ixxx', dict(itemsize=size + 3, **base)) - self._check('^ixxxx', dict(itemsize=size + 4, **base)) - self._check('^i7x', dict(itemsize=size + 7, **base)) - - def test_native_padding_3(self): - dt = np.dtype( - [('a', 'b'), ('b', 'i'), - ('sub', np.dtype('b,i')), ('c', 'i')], - align=True) - self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) - - dt = np.dtype( - [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), - ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) - self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) - - def test_padding_with_array_inside_struct(self): - dt = np.dtype( - [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), - ('d', 'i')], - align=True) - self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) - - def test_byteorder_inside_struct(self): - # The byte order after @T{=i} should be '=', not '@'. - # Check this by noting the absence of native alignment. - self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), - 'f1': ('i', 5)}) - - def test_intra_padding(self): - # Natively aligned sub-arrays may require some internal padding - align = np.dtype('i').alignment - size = np.dtype('i').itemsize - - def aligned(n): - return (align*(1 + (n-1)//align)) - - self._check('(3)T{ix}', (dict( - names=['f0'], - formats=['i'], - offsets=[0], - itemsize=aligned(size + 1) - ), (3,))) - - def test_char_vs_string(self): - dt = np.dtype('c') - self._check('c', dt) - - dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) - self._check('4c4s', dt) - - def test_field_order(self): - # gh-9053 - previously, we relied on dictionary key order - self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) - self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) - - def test_unnamed_fields(self): - self._check('ii', [('f0', 'i'), ('f1', 'i')]) - self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) - - self._check('i', 'i') - self._check('i:f0:', [('f0', 'i')]) - - -class TestNewBufferProtocol(object): - """ Test PEP3118 buffers """ - - def _check_roundtrip(self, obj): - obj = np.asarray(obj) - x = memoryview(obj) - y = np.asarray(x) - y2 = np.array(x) - assert_(not y.flags.owndata) - assert_(y2.flags.owndata) - - assert_equal(y.dtype, obj.dtype) - assert_equal(y.shape, obj.shape) - assert_array_equal(obj, y) - - assert_equal(y2.dtype, obj.dtype) - assert_equal(y2.shape, obj.shape) - assert_array_equal(obj, y2) - - def test_roundtrip(self): - x = np.array([1, 2, 3, 4, 5], dtype='i4') - self._check_roundtrip(x) - - x = np.array([[1, 2], [3, 4]], dtype=np.float64) - self._check_roundtrip(x) - - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] - self._check_roundtrip(x) - - dt = [('a', 'b'), - ('b', 'h'), - ('c', 'i'), - ('d', 'l'), - ('dx', 'q'), - ('e', 'B'), - ('f', 'H'), - ('g', 'I'), - ('h', 'L'), - ('hx', 'Q'), - ('i', np.single), - ('j', np.double), - ('k', np.longdouble), - ('ix', np.csingle), - ('jx', np.cdouble), - ('kx', np.clongdouble), - ('l', 'S4'), - ('m', 'U4'), - ('n', 'V3'), - ('o', '?'), - ('p', np.half), - ] - x = np.array( - [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - b'aaaa', 'bbbb', b'xxx', True, 1.0)], - dtype=dt) - self._check_roundtrip(x) - - x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) - self._check_roundtrip(x) - - x = np.array([1, 2, 3], dtype='>i2') - self._check_roundtrip(x) - - x = np.array([1, 2, 3], dtype='') - x = np.zeros(4, dtype=dt) - self._check_roundtrip(x) - - def test_roundtrip_scalar(self): - # Issue #4015. - self._check_roundtrip(0) - - def test_invalid_buffer_format(self): - # datetime64 cannot be used fully in a buffer yet - # Should be fixed in the next Numpy major release - dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) - a = np.empty(3, dt) - assert_raises((ValueError, BufferError), memoryview, a) - assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) - - def test_export_simple_1d(self): - x = np.array([1, 2, 3, 4, 5], dtype='i') - y = memoryview(x) - assert_equal(y.format, 'i') - assert_equal(y.shape, (5,)) - assert_equal(y.ndim, 1) - assert_equal(y.strides, (4,)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 4) - - def test_export_simple_nd(self): - x = np.array([[1, 2], [3, 4]], dtype=np.float64) - y = memoryview(x) - assert_equal(y.format, 'd') - assert_equal(y.shape, (2, 2)) - assert_equal(y.ndim, 2) - assert_equal(y.strides, (16, 8)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 8) - - def test_export_discontiguous(self): - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] - y = memoryview(x) - assert_equal(y.format, 'f') - assert_equal(y.shape, (3, 3)) - assert_equal(y.ndim, 2) - assert_equal(y.strides, (36, 4)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 4) - - def test_export_record(self): - dt = [('a', 'b'), - ('b', 'h'), - ('c', 'i'), - ('d', 'l'), - ('dx', 'q'), - ('e', 'B'), - ('f', 'H'), - ('g', 'I'), - ('h', 'L'), - ('hx', 'Q'), - ('i', np.single), - ('j', np.double), - ('k', np.longdouble), - ('ix', np.csingle), - ('jx', np.cdouble), - ('kx', np.clongdouble), - ('l', 'S4'), - ('m', 'U4'), - ('n', 'V3'), - ('o', '?'), - ('p', np.half), - ] - x = np.array( - [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - b'aaaa', 'bbbb', b' ', True, 1.0)], - dtype=dt) - y = memoryview(x) - assert_equal(y.shape, (1,)) - assert_equal(y.ndim, 1) - assert_equal(y.suboffsets, EMPTY) - - sz = sum([np.dtype(b).itemsize for a, b in dt]) - if np.dtype('l').itemsize == 4: - assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') - else: - assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides - if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): - assert_equal(y.strides, (sz,)) - assert_equal(y.itemsize, sz) - - def test_export_subarray(self): - x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) - y = memoryview(x) - assert_equal(y.format, 'T{(2,2)i:a:}') - assert_equal(y.shape, EMPTY) - assert_equal(y.ndim, 0) - assert_equal(y.strides, EMPTY) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 16) - - def test_export_endian(self): - x = np.array([1, 2, 3], dtype='>i') - y = memoryview(x) - if sys.byteorder == 'little': - assert_equal(y.format, '>i') - else: - assert_equal(y.format, 'i') - - x = np.array([1, 2, 3], dtype=' 2: - with assert_raises_regex( - NotImplementedError, - r"Unrepresentable .* 'u' \(UCS-2 strings\)" - ): - raise exc.__cause__ - - def test_ctypes_integer_via_memoryview(self): - # gh-11150, due to bpo-10746 - for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}: - value = c_integer(42) - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) - np.asarray(value) - - def test_ctypes_struct_via_memoryview(self): - # gh-10528 - class foo(ctypes.Structure): - _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)] - f = foo(a=1, b=2) - - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) - arr = np.asarray(f) - - assert_equal(arr['a'], 1) - assert_equal(arr['b'], 2) - f.a = 3 - assert_equal(arr['a'], 3) - - -class TestArrayAttributeDeletion(object): - - def test_multiarray_writable_attributes_deletion(self): - # ticket #2046, should not seqfault, raise AttributeError - a = np.ones(2) - attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "Assigning the 'data' attribute") - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - def test_multiarray_not_writable_attributes_deletion(self): - a = np.ones(2) - attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", - "ctypes", "T", "__array_interface__", "__array_struct__", - "__array_priority__", "__array_finalize__"] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - def test_multiarray_flags_writable_attribute_deletion(self): - a = np.ones(2).flags - attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - def test_multiarray_flags_not_writable_attribute_deletion(self): - a = np.ones(2).flags - attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", - "owndata", "fnc", "forc", "behaved", "carray", "farray", - "num"] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -class TestArrayInterface(): - class Foo(object): - def __init__(self, value): - self.value = value - self.iface = {'typestr': 'f8'} - - def __float__(self): - return float(self.value) - - @property - def __array_interface__(self): - return self.iface - - - f = Foo(0.5) - - @pytest.mark.parametrize('val, iface, expected', [ - (f, {}, 0.5), - ([f], {}, [0.5]), - ([f, f], {}, [0.5, 0.5]), - (f, {'shape': ()}, 0.5), - (f, {'shape': None}, TypeError), - (f, {'shape': (1, 1)}, [[0.5]]), - (f, {'shape': (2,)}, ValueError), - (f, {'strides': ()}, 0.5), - (f, {'strides': (2,)}, ValueError), - (f, {'strides': 16}, TypeError), - ]) - def test_scalar_interface(self, val, iface, expected): - # Test scalar coercion within the array interface - self.f.iface = {'typestr': 'f8'} - self.f.iface.update(iface) - if HAS_REFCOUNT: - pre_cnt = sys.getrefcount(np.dtype('f8')) - if isinstance(expected, type): - assert_raises(expected, np.array, val) - else: - result = np.array(val) - assert_equal(np.array(val), expected) - assert result.dtype == 'f8' - del result - if HAS_REFCOUNT: - post_cnt = sys.getrefcount(np.dtype('f8')) - assert_equal(pre_cnt, post_cnt) - -def test_interface_no_shape(): - class ArrayLike(object): - array = np.array(1) - __array_interface__ = array.__array_interface__ - assert_equal(np.array(ArrayLike()), 1) - - -def test_array_interface_itemsize(): - # See gh-6361 - my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], - 'offsets': [0, 8], 'itemsize': 16}) - a = np.ones(10, dtype=my_dtype) - descr_t = np.dtype(a.__array_interface__['descr']) - typestr_t = np.dtype(a.__array_interface__['typestr']) - assert_equal(descr_t.itemsize, typestr_t.itemsize) - - -def test_array_interface_empty_shape(): - # See gh-7994 - arr = np.array([1, 2, 3]) - interface1 = dict(arr.__array_interface__) - interface1['shape'] = () - - class DummyArray1(object): - __array_interface__ = interface1 - - # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting - # the interface data to bytes would invoke the bug this tests for, that - # __array_interface__ with shape=() is not allowed if the data is an object - # exposing the buffer interface - interface2 = dict(interface1) - interface2['data'] = arr[0].tobytes() - - class DummyArray2(object): - __array_interface__ = interface2 - - arr1 = np.asarray(DummyArray1()) - arr2 = np.asarray(DummyArray2()) - arr3 = arr[:1].reshape(()) - assert_equal(arr1, arr2) - assert_equal(arr1, arr3) - -def test_array_interface_offset(): - arr = np.array([1, 2, 3], dtype='int32') - interface = dict(arr.__array_interface__) - interface['data'] = memoryview(arr) - interface['shape'] = (2,) - interface['offset'] = 4 - - - class DummyArray(object): - __array_interface__ = interface - - arr1 = np.asarray(DummyArray()) - assert_equal(arr1, arr[1:]) - -def test_flat_element_deletion(): - it = np.ones(3).flat - try: - del it[1] - del it[1:2] - except TypeError: - pass - except Exception: - raise AssertionError - - -def test_scalar_element_deletion(): - a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) - assert_raises(ValueError, a[0].__delitem__, 'x') - - -class TestMemEventHook(object): - def test_mem_seteventhook(self): - # The actual tests are within the C code in - # multiarray/_multiarray_tests.c.src - _multiarray_tests.test_pydatamem_seteventhook_start() - # force an allocation and free of a numpy array - # needs to be larger then limit of small memory cacher in ctors.c - a = np.zeros(1000) - del a - break_cycles() - _multiarray_tests.test_pydatamem_seteventhook_end() - -class TestMapIter(object): - def test_mapiter(self): - # The actual tests are within the C code in - # multiarray/_multiarray_tests.c.src - - a = np.arange(12).reshape((3, 4)).astype(float) - index = ([1, 1, 2, 0], - [0, 0, 2, 3]) - vals = [50, 50, 30, 16] - - _multiarray_tests.test_inplace_increment(a, index, vals) - assert_equal(a, [[0.00, 1., 2.0, 19.], - [104., 5., 6.0, 7.0], - [8.00, 9., 40., 11.]]) - - b = np.arange(6).astype(float) - index = (np.array([1, 2, 0]),) - vals = [50, 4, 100.1] - _multiarray_tests.test_inplace_increment(b, index, vals) - assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) - - -class TestAsCArray(object): - def test_1darray(self): - array = np.arange(24, dtype=np.double) - from_c = _multiarray_tests.test_as_c_array(array, 3) - assert_equal(array[3], from_c) - - def test_2darray(self): - array = np.arange(24, dtype=np.double).reshape(3, 8) - from_c = _multiarray_tests.test_as_c_array(array, 2, 4) - assert_equal(array[2, 4], from_c) - - def test_3darray(self): - array = np.arange(24, dtype=np.double).reshape(2, 3, 4) - from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3) - assert_equal(array[1, 2, 3], from_c) - - -class TestConversion(object): - def test_array_scalar_relational_operation(self): - # All integer - for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - - for dt2 in np.typecodes['AllInteger']: - assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - # Unsigned integers - for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - - # Unsigned vs signed - for dt2 in 'bhilqp': - assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - # Signed integers and floats - for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - - for dt2 in 'bhlqp' + np.typecodes['Float']: - assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - def test_to_bool_scalar(self): - assert_equal(bool(np.array([False])), False) - assert_equal(bool(np.array([True])), True) - assert_equal(bool(np.array([[42]])), True) - assert_raises(ValueError, bool, np.array([1, 2])) - - class NotConvertible(object): - def __bool__(self): - raise NotImplementedError - __nonzero__ = __bool__ # python 2 - - assert_raises(NotImplementedError, bool, np.array(NotConvertible())) - assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) - - self_containing = np.array([None]) - self_containing[0] = self_containing - try: - Error = RecursionError - except NameError: - Error = RuntimeError # python < 3.5 - assert_raises(Error, bool, self_containing) # previously stack overflow - self_containing[0] = None # resolve circular reference - - def test_to_int_scalar(self): - # gh-9972 means that these aren't always the same - int_funcs = (int, lambda x: x.__int__()) - for int_func in int_funcs: - assert_equal(int_func(np.array([1])), 1) - assert_equal(int_func(np.array([0])), 0) - assert_equal(int_func(np.array([[42]])), 42) - assert_raises(TypeError, int_func, np.array([1, 2])) - - # gh-9972 - assert_equal(4, int_func(np.array('4'))) - assert_equal(5, int_func(np.bytes_(b'5'))) - assert_equal(6, int_func(np.unicode_(u'6'))) - - class HasTrunc: - def __trunc__(self): - return 3 - assert_equal(3, int_func(np.array(HasTrunc()))) - assert_equal(3, int_func(np.array([HasTrunc()]))) - - class NotConvertible(object): - def __int__(self): - raise NotImplementedError - assert_raises(NotImplementedError, - int_func, np.array(NotConvertible())) - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) - - -class TestWhere(object): - def test_basic(self): - dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, - np.longdouble, np.clongdouble] - for dt in dts: - c = np.ones(53, dtype=bool) - assert_equal(np.where( c, dt(0), dt(1)), dt(0)) - assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) - assert_equal(np.where(True, dt(0), dt(1)), dt(0)) - assert_equal(np.where(False, dt(0), dt(1)), dt(1)) - d = np.ones_like(c).astype(dt) - e = np.zeros_like(d) - r = d.astype(dt) - c[7] = False - r[7] = e[7] - assert_equal(np.where(c, e, e), e) - assert_equal(np.where(c, d, e), r) - assert_equal(np.where(c, d, e[0]), r) - assert_equal(np.where(c, d[0], e), r) - assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) - assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) - assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) - assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) - assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) - assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) - assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) - - def test_exotic(self): - # object - assert_array_equal(np.where(True, None, None), np.array(None)) - # zero sized - m = np.array([], dtype=bool).reshape(0, 3) - b = np.array([], dtype=np.float64).reshape(0, 3) - assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) - - # object cast - d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, - 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, - 1.267, 0.229, -1.39, 0.487]) - nan = float('NaN') - e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, - 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], - dtype=object) - m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, - 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) - - r = e[:] - r[np.where(m)] = d[np.where(m)] - assert_array_equal(np.where(m, d, e), r) - - r = e[:] - r[np.where(~m)] = d[np.where(~m)] - assert_array_equal(np.where(m, e, d), r) - - assert_array_equal(np.where(m, e, e), e) - - # minimal dtype result with NaN scalar (e.g required by pandas) - d = np.array([1., 2.], dtype=np.float32) - e = float('NaN') - assert_equal(np.where(True, d, e).dtype, np.float32) - e = float('Infinity') - assert_equal(np.where(True, d, e).dtype, np.float32) - e = float('-Infinity') - assert_equal(np.where(True, d, e).dtype, np.float32) - # also check upcast - e = float(1e150) - assert_equal(np.where(True, d, e).dtype, np.float64) - - def test_ndim(self): - c = [True, False] - a = np.zeros((2, 25)) - b = np.ones((2, 25)) - r = np.where(np.array(c)[:,np.newaxis], a, b) - assert_array_equal(r[0], a[0]) - assert_array_equal(r[1], b[0]) - - a = a.T - b = b.T - r = np.where(c, a, b) - assert_array_equal(r[:,0], a[:,0]) - assert_array_equal(r[:,1], b[:,0]) - - def test_dtype_mix(self): - c = np.array([False, True, False, False, False, False, True, False, - False, False, True, False]) - a = np.uint32(1) - b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], - dtype=np.float64) - r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], - dtype=np.float64) - assert_equal(np.where(c, a, b), r) - - a = a.astype(np.float32) - b = b.astype(np.int64) - assert_equal(np.where(c, a, b), r) - - # non bool mask - c = c.astype(int) - c[c != 0] = 34242324 - assert_equal(np.where(c, a, b), r) - # invert - tmpmask = c != 0 - c[c == 0] = 41247212 - c[tmpmask] = 0 - assert_equal(np.where(c, b, a), r) - - def test_foreign(self): - c = np.array([False, True, False, False, False, False, True, False, - False, False, True, False]) - r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], - dtype=np.float64) - a = np.ones(1, dtype='>i4') - b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], - dtype=np.float64) - assert_equal(np.where(c, a, b), r) - - b = b.astype('>f8') - assert_equal(np.where(c, a, b), r) - - a = a.astype('i4') - assert_equal(np.where(c, a, b), r) - - def test_error(self): - c = [True, True] - a = np.ones((4, 5)) - b = np.ones((5, 5)) - assert_raises(ValueError, np.where, c, a, a) - assert_raises(ValueError, np.where, c[0], a, b) - - def test_string(self): - # gh-4778 check strings are properly filled with nulls - a = np.array("abc") - b = np.array("x" * 753) - assert_equal(np.where(True, a, b), "abc") - assert_equal(np.where(False, b, a), "abc") - - # check native datatype sized strings - a = np.array("abcd") - b = np.array("x" * 8) - assert_equal(np.where(True, a, b), "abcd") - assert_equal(np.where(False, b, a), "abcd") - - def test_empty_result(self): - # pass empty where result through an assignment which reads the data of - # empty arrays, error detectable with valgrind, see gh-8922 - x = np.zeros((1, 1)) - ibad = np.vstack(np.where(x == 99.)) - assert_array_equal(ibad, - np.atleast_2d(np.array([[],[]], dtype=np.intp))) - - def test_largedim(self): - # invalid read regression gh-9304 - shape = [10, 2, 3, 4, 5, 6] - np.random.seed(2) - array = np.random.rand(*shape) - - for i in range(10): - benchmark = array.nonzero() - result = array.nonzero() - assert_array_equal(benchmark, result) - - -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf(object): - - def test_empty_array(self): - x = np.array([]) - assert_(sys.getsizeof(x) > 0) - - def check_array(self, dtype): - elem_size = dtype(0).itemsize - - for length in [10, 50, 100, 500]: - x = np.arange(length, dtype=dtype) - assert_(sys.getsizeof(x) > length * elem_size) - - def test_array_int32(self): - self.check_array(np.int32) - - def test_array_int64(self): - self.check_array(np.int64) - - def test_array_float32(self): - self.check_array(np.float32) - - def test_array_float64(self): - self.check_array(np.float64) - - def test_view(self): - d = np.ones(100) - assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) - - def test_reshape(self): - d = np.ones(100) - assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) - - @_no_tracing - def test_resize(self): - d = np.ones(100) - old = sys.getsizeof(d) - d.resize(50) - assert_(old > sys.getsizeof(d)) - d.resize(150) - assert_(old < sys.getsizeof(d)) - - def test_error(self): - d = np.ones(100) - assert_raises(TypeError, d.__sizeof__, "a") - - -class TestHashing(object): - - def test_arrays_not_hashable(self): - x = np.ones(3) - assert_raises(TypeError, hash, x) - - def test_collections_hashable(self): - x = np.array([]) - assert_(not isinstance(x, collections_abc.Hashable)) - - -class TestArrayPriority(object): - # This will go away when __array_priority__ is settled, meanwhile - # it serves to check unintended changes. - op = operator - binary_ops = [ - op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, - op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, - op.ge, op.lt, op.le, op.ne, op.eq - ] - - # See #7949. Don't use "/" operator With -3 switch, since python reports it - # as a DeprecationWarning - if sys.version_info[0] < 3 and not sys.py3kwarning: - binary_ops.append(op.div) - - class Foo(np.ndarray): - __array_priority__ = 100. - - def __new__(cls, *args, **kwargs): - return np.array(*args, **kwargs).view(cls) - - class Bar(np.ndarray): - __array_priority__ = 101. - - def __new__(cls, *args, **kwargs): - return np.array(*args, **kwargs).view(cls) - - class Other(object): - __array_priority__ = 1000. - - def _all(self, other): - return self.__class__() - - __add__ = __radd__ = _all - __sub__ = __rsub__ = _all - __mul__ = __rmul__ = _all - __pow__ = __rpow__ = _all - __div__ = __rdiv__ = _all - __mod__ = __rmod__ = _all - __truediv__ = __rtruediv__ = _all - __floordiv__ = __rfloordiv__ = _all - __and__ = __rand__ = _all - __xor__ = __rxor__ = _all - __or__ = __ror__ = _all - __lshift__ = __rlshift__ = _all - __rshift__ = __rrshift__ = _all - __eq__ = _all - __ne__ = _all - __gt__ = _all - __ge__ = _all - __lt__ = _all - __le__ = _all - - def test_ndarray_subclass(self): - a = np.array([1, 2]) - b = self.Bar([1, 2]) - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Bar), msg) - assert_(isinstance(f(b, a), self.Bar), msg) - - def test_ndarray_other(self): - a = np.array([1, 2]) - b = self.Other() - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Other), msg) - assert_(isinstance(f(b, a), self.Other), msg) - - def test_subclass_subclass(self): - a = self.Foo([1, 2]) - b = self.Bar([1, 2]) - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Bar), msg) - assert_(isinstance(f(b, a), self.Bar), msg) - - def test_subclass_other(self): - a = self.Foo([1, 2]) - b = self.Other() - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Other), msg) - assert_(isinstance(f(b, a), self.Other), msg) - - -class TestBytestringArrayNonzero(object): - - def test_empty_bstring_array_is_falsey(self): - assert_(not np.array([''], dtype=str)) - - def test_whitespace_bstring_array_is_falsey(self): - a = np.array(['spam'], dtype=str) - a[0] = ' \0\0' - assert_(not a) - - def test_all_null_bstring_array_is_falsey(self): - a = np.array(['spam'], dtype=str) - a[0] = '\0\0\0\0' - assert_(not a) - - def test_null_inside_bstring_array_is_truthy(self): - a = np.array(['spam'], dtype=str) - a[0] = ' \0 \0' - assert_(a) - - -class TestUnicodeArrayNonzero(object): - - def test_empty_ustring_array_is_falsey(self): - assert_(not np.array([''], dtype=np.unicode_)) - - def test_whitespace_ustring_array_is_falsey(self): - a = np.array(['eggs'], dtype=np.unicode_) - a[0] = ' \0\0' - assert_(not a) - - def test_all_null_ustring_array_is_falsey(self): - a = np.array(['eggs'], dtype=np.unicode_) - a[0] = '\0\0\0\0' - assert_(not a) - - def test_null_inside_ustring_array_is_truthy(self): - a = np.array(['eggs'], dtype=np.unicode_) - a[0] = ' \0 \0' - assert_(a) - - -class TestFormat(object): - - def test_0d(self): - a = np.array(np.pi) - assert_equal('{:0.3g}'.format(a), '3.14') - assert_equal('{:0.3g}'.format(a[()]), '3.14') - - def test_1d_no_format(self): - a = np.array([np.pi]) - assert_equal('{}'.format(a), str(a)) - - def test_1d_format(self): - # until gh-5543, ensure that the behaviour matches what it used to be - a = np.array([np.pi]) - if sys.version_info[:2] >= (3, 4): - assert_raises(TypeError, '{:30}'.format, a) - else: - with suppress_warnings() as sup: - sup.filter(PendingDeprecationWarning) - res = '{:30}'.format(a) - dst = object.__format__(a, '30') - assert_equal(res, dst) - -from numpy.testing import IS_PYPY - -class TestCTypes(object): - - def test_ctypes_is_available(self): - test_arr = np.array([[1, 2, 3], [4, 5, 6]]) - - assert_equal(ctypes, test_arr.ctypes._ctypes) - assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) - - def test_ctypes_is_not_available(self): - from numpy.core import _internal - _internal.ctypes = None - try: - test_arr = np.array([[1, 2, 3], [4, 5, 6]]) - - assert_(isinstance(test_arr.ctypes._ctypes, - _internal._missing_ctypes)) - assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) - finally: - _internal.ctypes = ctypes - - def _make_readonly(x): - x.flags.writeable = False - return x - - @pytest.mark.parametrize('arr', [ - np.array([1, 2, 3]), - np.array([['one', 'two'], ['three', 'four']]), - np.array((1, 2), dtype='i4,i4'), - np.zeros((2,), dtype= - np.dtype(dict( - formats=['2, [44, 55]) - assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) - # hit one of the failing paths - assert_raises(ValueError, np.place, a, a>20, []) - - def test_put_noncontiguous(self): - a = np.arange(6).reshape(2,3).T # force non-c-contiguous - np.put(a, [0, 2], [44, 55]) - assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) - - def test_putmask_noncontiguous(self): - a = np.arange(6).reshape(2,3).T # force non-c-contiguous - # uses arr_putmask - np.putmask(a, a>2, a**2) - assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) - - def test_take_mode_raise(self): - a = np.arange(6, dtype='int') - out = np.empty(2, dtype='int') - np.take(a, [0, 2], out=out, mode='raise') - assert_equal(out, np.array([0, 2])) - - def test_choose_mod_raise(self): - a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) - out = np.empty((3,3), dtype='int') - choices = [-10, 10] - np.choose(a, choices, out=out, mode='raise') - assert_equal(out, np.array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]])) - - def test_flatiter__array__(self): - a = np.arange(9).reshape(3,3) - b = a.T.flat - c = b.__array__() - # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics - del c - - def test_dot_out(self): - # if HAVE_CBLAS, will use WRITEBACKIFCOPY - a = np.arange(9, dtype=float).reshape(3,3) - b = np.dot(a, a, out=a) - assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) - - def test_view_assign(self): - from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve - - arr = np.arange(9).reshape(3, 3).T - arr_wb = npy_create_writebackifcopy(arr) - assert_(arr_wb.flags.writebackifcopy) - assert_(arr_wb.base is arr) - arr_wb[...] = -100 - npy_resolve(arr_wb) - # arr changes after resolve, even though we assigned to arr_wb - assert_equal(arr, -100) - # after resolve, the two arrays no longer reference each other - assert_(arr_wb.ctypes.data != 0) - assert_equal(arr_wb.base, None) - # assigning to arr_wb does not get transferred to arr - arr_wb[...] = 100 - assert_equal(arr, -100) - - @pytest.mark.leaks_references( - reason="increments self in dealloc; ignore since deprecated path.") - def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T - _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 - - def test_view_discard_refcount(self): - from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard - - arr = np.arange(9).reshape(3, 3).T - orig = arr.copy() - if HAS_REFCOUNT: - arr_cnt = sys.getrefcount(arr) - arr_wb = npy_create_writebackifcopy(arr) - assert_(arr_wb.flags.writebackifcopy) - assert_(arr_wb.base is arr) - arr_wb[...] = -100 - npy_discard(arr_wb) - # arr remains unchanged after discard - assert_equal(arr, orig) - # after discard, the two arrays no longer reference each other - assert_(arr_wb.ctypes.data != 0) - assert_equal(arr_wb.base, None) - if HAS_REFCOUNT: - assert_equal(arr_cnt, sys.getrefcount(arr)) - # assigning to arr_wb does not get transferred to arr - arr_wb[...] = 100 - assert_equal(arr, orig) - - -class TestArange(object): - def test_infinite(self): - assert_raises_regex( - ValueError, "size exceeded", - np.arange, 0, np.inf - ) - - def test_nan_step(self): - assert_raises_regex( - ValueError, "cannot compute length", - np.arange, 0, 1, np.nan - ) - - def test_zero_step(self): - assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) - assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) - - # empty range - assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) - assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) - - -class TestArrayFinalize(object): - """ Tests __array_finalize__ """ - - def test_receives_base(self): - # gh-11237 - class SavesBase(np.ndarray): - def __array_finalize__(self, obj): - self.saved_base = self.base - - a = np.array(1).view(SavesBase) - assert_(a.saved_base is a.base) - - def test_lifetime_on_error(self): - # gh-11237 - class RaisesInFinalize(np.ndarray): - def __array_finalize__(self, obj): - # crash, but keep this object alive - raise Exception(self) - - # a plain object can't be weakref'd - class Dummy(object): pass - - # get a weak reference to an object within an array - obj_arr = np.array(Dummy()) - obj_ref = weakref.ref(obj_arr[()]) - - # get an array that crashed in __array_finalize__ - with assert_raises(Exception) as e: - obj_arr.view(RaisesInFinalize) - if sys.version_info.major == 2: - # prevent an extra reference being kept - sys.exc_clear() - - obj_subarray = e.exception.args[0] - del e - assert_(isinstance(obj_subarray, RaisesInFinalize)) - - # reference should still be held by obj_arr - break_cycles() - assert_(obj_ref() is not None, "object should not already be dead") - - del obj_arr - break_cycles() - assert_(obj_ref() is not None, "obj_arr should not hold the last reference") - - del obj_subarray - break_cycles() - assert_(obj_ref() is None, "no references should remain") - - -def test_orderconverter_with_nonASCII_unicode_ordering(): - # gh-7475 - a = np.arange(5) - assert_raises(ValueError, a.flatten, order=u'\xe2') - - -def test_equal_override(): - # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which - # did not respect overrides with __array_priority__ or __array_ufunc__. - # The PR fixed this for __array_priority__ and __array_ufunc__ = None. - class MyAlwaysEqual(object): - def __eq__(self, other): - return "eq" - - def __ne__(self, other): - return "ne" - - class MyAlwaysEqualOld(MyAlwaysEqual): - __array_priority__ = 10000 - - class MyAlwaysEqualNew(MyAlwaysEqual): - __array_ufunc__ = None - - array = np.array([(0, 1), (2, 3)], dtype='i4,i4') - for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew: - my_always_equal = my_always_equal_cls() - assert_equal(my_always_equal == array, 'eq') - assert_equal(array == my_always_equal, 'eq') - assert_equal(my_always_equal != array, 'ne') - assert_equal(array != my_always_equal, 'ne') - - -def test_npymath_complex(): - # Smoketest npymath functions - from numpy.core._multiarray_tests import ( - npy_cabs, npy_carg) - - funcs = {npy_cabs: np.absolute, - npy_carg: np.angle} - vals = (1, np.inf, -np.inf, np.nan) - types = (np.complex64, np.complex128, np.clongdouble) - - for fun, npfun in funcs.items(): - for x, y in itertools.product(vals, vals): - for t in types: - z = t(complex(x, y)) - got = fun(z) - expected = npfun(z) - assert_allclose(got, expected) - - -def test_npymath_real(): - # Smoketest npymath functions - from numpy.core._multiarray_tests import ( - npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) - - funcs = {npy_log10: np.log10, - npy_cosh: np.cosh, - npy_sinh: np.sinh, - npy_tan: np.tan, - npy_tanh: np.tanh} - vals = (1, np.inf, -np.inf, np.nan) - types = (np.float32, np.float64, np.longdouble) - - with np.errstate(all='ignore'): - for fun, npfun in funcs.items(): - for x, t in itertools.product(vals, types): - z = t(x) - got = fun(z) - expected = npfun(z) - assert_allclose(got, expected) - -def test_uintalignment_and_alignment(): - # alignment code needs to satisfy these requrements: - # 1. numpy structs match C struct layout - # 2. ufuncs/casting is safe wrt to aligned access - # 3. copy code is safe wrt to "uint alidned" access - # - # Complex types are the main problem, whose alignment may not be the same - # as their "uint alignment". - # - # This test might only fail on certain platforms, where uint64 alignment is - # not equal to complex64 alignment. The second 2 tests will only fail - # for DEBUG=1. - - d1 = np.dtype('u1,c8', align=True) - d2 = np.dtype('u4,c8', align=True) - d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) - - assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) - assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) - assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) - - # check that C struct matches numpy struct size - s = _multiarray_tests.get_struct_alignments() - for d, (alignment, size) in zip([d1,d2,d3], s): - assert_equal(d.alignment, alignment) - assert_equal(d.itemsize, size) - - # check that ufuncs don't complain in debug mode - # (this is probably OK if the aligned flag is true above) - src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often - np.exp(src) # assert fails? - - # check that copy code doesn't complain in debug mode - dst = np.zeros((2,2), dtype='c8') - dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? - -class TestAlignment(object): - # adapted from scipy._lib.tests.test__util.test__aligned_zeros - # Checks that unusual memory alignments don't trip up numpy. - # In particular, check RELAXED_STRIDES don't trip alignment assertions in - # NDEBUG mode for size-0 arrays (gh-12503) - - def check(self, shape, dtype, order, align): - err_msg = repr((shape, dtype, order, align)) - x = _aligned_zeros(shape, dtype, order, align=align) - if align is None: - align = np.dtype(dtype).alignment - assert_equal(x.__array_interface__['data'][0] % align, 0) - if hasattr(shape, '__len__'): - assert_equal(x.shape, shape, err_msg) - else: - assert_equal(x.shape, (shape,), err_msg) - assert_equal(x.dtype, dtype) - if order == "C": - assert_(x.flags.c_contiguous, err_msg) - elif order == "F": - if x.size > 0: - assert_(x.flags.f_contiguous, err_msg) - elif order is None: - assert_(x.flags.c_contiguous, err_msg) - else: - raise ValueError() - - def test_various_alignments(self): - for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: - for n in [0, 1, 3, 11]: - for order in ["C", "F", None]: - for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: - if dtype == 'O': - # object dtype can't be misaligned - continue - for shape in [n, (1, 2, 3, n)]: - self.check(shape, np.dtype(dtype), order, align) - - def test_strided_loop_alignments(self): - # particularly test that complex64 and float128 use right alignment - # code-paths, since these are particularly problematic. It is useful to - # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. - for align in [1, 2, 4, 8, 12, 16, None]: - xf64 = _aligned_zeros(3, np.float64) - - xc64 = _aligned_zeros(3, np.complex64, align=align) - xf128 = _aligned_zeros(3, np.longdouble, align=align) - - # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning, "Casting complex values") - xc64.astype('f8') - xf64.astype(np.complex64) - test = xc64 + xf64 - - xf128.astype('f8') - xf64.astype(np.longdouble) - test = xf128 + xf64 - - test = xf128 + xc64 - - # test copy, both to and from misaligned - # contig copy - xf64[:] = xf64.copy() - xc64[:] = xc64.copy() - xf128[:] = xf128.copy() - # strided copy - xf64[::2] = xf64[::2].copy() - xc64[::2] = xc64[::2].copy() - xf128[::2] = xf128[::2].copy() - -def test_getfield(): - a = np.arange(32, dtype='uint16') - if sys.byteorder == 'little': - i = 0 - j = 1 - else: - i = 1 - j = 0 - b = a.getfield('int8', i) - assert_equal(b, a) - b = a.getfield('int8', j) - assert_equal(b, 0) - pytest.raises(ValueError, a.getfield, 'uint8', -1) - pytest.raises(ValueError, a.getfield, 'uint8', 16) - pytest.raises(ValueError, a.getfield, 'uint64', 0) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_nditer.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_nditer.py deleted file mode 100644 index daec9ce..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_nditer.py +++ /dev/null @@ -1,2861 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import pytest - -import numpy as np -import numpy.core._multiarray_tests as _multiarray_tests -from numpy import array, arange, nditer, all -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - HAS_REFCOUNT, suppress_warnings - ) - - -def iter_multi_index(i): - ret = [] - while not i.finished: - ret.append(i.multi_index) - i.iternext() - return ret - -def iter_indices(i): - ret = [] - while not i.finished: - ret.append(i.index) - i.iternext() - return ret - -def iter_iterindices(i): - ret = [] - while not i.finished: - ret.append(i.iterindex) - i.iternext() - return ret - -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -def test_iter_refcount(): - # Make sure the iterator doesn't leak - - # Basic - a = arange(6) - dt = np.dtype('f4').newbyteorder() - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - with nditer(a, [], - [['readwrite', 'updateifcopy']], - casting='unsafe', - op_dtypes=[dt]) as it: - assert_(not it.iterationneedsapi) - assert_(sys.getrefcount(a) > rc_a) - assert_(sys.getrefcount(dt) > rc_dt) - # del 'it' - it = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - - # With a copy - a = arange(6, dtype='f4') - dt = np.dtype('f4') - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - it = nditer(a, [], - [['readwrite']], - op_dtypes=[dt]) - rc2_a = sys.getrefcount(a) - rc2_dt = sys.getrefcount(dt) - it2 = it.copy() - assert_(sys.getrefcount(a) > rc2_a) - assert_(sys.getrefcount(dt) > rc2_dt) - it = None - assert_equal(sys.getrefcount(a), rc2_a) - assert_equal(sys.getrefcount(dt), rc2_dt) - it2 = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - - del it2 # avoid pyflakes unused variable warning - -def test_iter_best_order(): - # The iterator should always find the iteration order - # with increasing memory addresses - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, [], [['readonly']]) - assert_equal([x for x in i], a) - # Fortran-order - i = nditer(aview.T, [], [['readonly']]) - assert_equal([x for x in i], a) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) - assert_equal([x for x in i], a) - -def test_iter_c_order(): - # Test forcing C order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='C') - assert_equal([x for x in i], aview.ravel(order='C')) - # Fortran-order - i = nditer(aview.T, order='C') - assert_equal([x for x in i], aview.T.ravel(order='C')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='C') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='C')) - -def test_iter_f_order(): - # Test forcing F order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='F') - assert_equal([x for x in i], aview.ravel(order='F')) - # Fortran-order - i = nditer(aview.T, order='F') - assert_equal([x for x in i], aview.T.ravel(order='F')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='F') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='F')) - -def test_iter_c_or_f_order(): - # Test forcing any contiguous (C or F) order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='A') - assert_equal([x for x in i], aview.ravel(order='A')) - # Fortran-order - i = nditer(aview.T, order='A') - assert_equal([x for x in i], aview.T.ravel(order='A')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='A') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='A')) - -def test_iter_best_order_multi_index_1d(): - # The multi-indices should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) - # 1D reversed order - i = nditer(a[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) - -def test_iter_best_order_multi_index_2d(): - # The multi-indices should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) - -def test_iter_best_order_multi_index_3d(): - # The multi-indices should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), - (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), - (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), - (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), - (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), - (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), - (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), - (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), - (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) - -def test_iter_best_order_c_index_1d(): - # The C index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3]) - # 1D reversed order - i = nditer(a[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 2, 1, 0]) - -def test_iter_best_order_c_index_2d(): - # The C index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) - -def test_iter_best_order_c_index_3d(): - # The C index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) - -def test_iter_best_order_f_index_1d(): - # The Fortran index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3]) - # 1D reversed order - i = nditer(a[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 2, 1, 0]) - -def test_iter_best_order_f_index_2d(): - # The Fortran index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) - -def test_iter_best_order_f_index_3d(): - # The Fortran index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) - -def test_iter_no_inner_full_coalesce(): - # Check no_inner iterators which coalesce into a single inner loop - - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - size = np.prod(shape) - a = arange(size) - # Test each combination of forward and backwards indexing - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Fortran-order - i = nditer(aview.T, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), - ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - -def test_iter_no_inner_dim_coalescing(): - # Check no_inner iterators whose dimensions may not coalesce completely - - # Skipping the last element in a dimension prevents coalescing - # with the next-bigger dimension - a = arange(24).reshape(2, 3, 4)[:,:, :-1] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (3,)) - a = arange(24).reshape(2, 3, 4)[:, :-1,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (8,)) - a = arange(24).reshape(2, 3, 4)[:-1,:,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (12,)) - - # Even with lots of 1-sized dimensions, should still coalesce - a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (24,)) - -def test_iter_dim_coalescing(): - # Check that the correct number of dimensions are coalesced - - # Tracking a multi-index disables coalescing - a = arange(24).reshape(2, 3, 4) - i = nditer(a, ['multi_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # A tracked index can allow coalescing if it's compatible with the array - a3d = arange(24).reshape(2, 3, 4) - i = nditer(a3d, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['f_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # When C or F order is forced, coalescing may still occur - a3d = arange(24).reshape(2, 3, 4) - i = nditer(a3d, order='C') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='C') - assert_equal(i.ndim, 3) - i = nditer(a3d, order='F') - assert_equal(i.ndim, 3) - i = nditer(a3d.T, order='F') - assert_equal(i.ndim, 1) - i = nditer(a3d, order='A') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='A') - assert_equal(i.ndim, 1) - -def test_iter_broadcasting(): - # Standard NumPy broadcasting rules - - # 1D with scalar - i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (6,)) - - # 2D with scalar - i = nditer([arange(6).reshape(2, 3), np.int32(2)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - # 2D with 1D - i = nditer([arange(6).reshape(2, 3), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - i = nditer([arange(2).reshape(2, 1), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - # 2D with 2D - i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - - # 3D with scalar - i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 1D - i = nditer([arange(3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 2D - i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 3D - i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), - arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*3) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - -def test_iter_itershape(): - # Check that allocated outputs work with a specified shape - a = np.arange(6, dtype='i2').reshape(2, 3) - i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (2, 3, 4)) - assert_equal(i.operands[1].strides, (24, 8, 2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (3, 2, 4)) - assert_equal(i.operands[1].strides, (8, 24, 2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], - order='F', - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (3, 2, 4)) - assert_equal(i.operands[1].strides, (2, 6, 12)) - - # If we specify 1 in the itershape, it shouldn't allow broadcasting - # of that dimension to a bigger value - assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, 1, 4)) - # Test bug that for no op_axes but itershape, they are NULLed correctly - i = np.nditer([np.ones(2), None, None], itershape=(2,)) - -def test_iter_broadcasting_errors(): - # Check that errors are thrown for bad broadcasting shapes - - # 1D with 1D - assert_raises(ValueError, nditer, [arange(2), arange(3)], - [], [['readonly']]*2) - # 2D with 1D - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(2)], - [], [['readonly']]*2) - # 2D with 2D - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], - [], [['readonly']]*2) - # 3D with 3D - assert_raises(ValueError, nditer, - [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) - - # Verify that the error message mentions the right shapes - try: - nditer([arange(2).reshape(1, 2, 1), - arange(3).reshape(1, 3), - arange(6).reshape(2, 3)], - [], - [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) - raise AssertionError('Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain the shape of the 3rd operand - assert_(msg.find('(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) - # The message should contain the broadcast shape - assert_(msg.find('(1,2,3)') >= 0, - 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) - - try: - nditer([arange(6).reshape(2, 3), arange(2)], - [], - [['readonly'], ['readonly']], - op_axes=[[0, 1], [0, np.newaxis]], - itershape=(4, 3)) - raise AssertionError('Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain "shape->remappedshape" for each operand - assert_(msg.find('(2,3)->(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) - assert_(msg.find('(2,)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' + - '(2,)->(2,newaxis)') % msg) - # The message should contain the itershape parameter - assert_(msg.find('(4,3)') >= 0, - 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) - - try: - nditer([np.zeros((2, 1, 1)), np.zeros((2,))], - [], - [['writeonly', 'no_broadcast'], ['readonly']]) - raise AssertionError('Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain the shape of the bad operand - assert_(msg.find('(2,1,1)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) - # The message should contain the broadcast shape - assert_(msg.find('(2,1,2)') >= 0, - 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) - -def test_iter_flags_errors(): - # Check that bad combinations of flags produce errors - - a = arange(6) - - # Not enough operands - assert_raises(ValueError, nditer, [], [], []) - # Too many operands - assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) - # Bad global flag - assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) - # Bad op flag - assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) - # Bad order parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') - # Bad casting parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') - # op_flags must match ops - assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) - # Cannot track both a C and an F index - assert_raises(ValueError, nditer, a, - ['c_index', 'f_index'], [['readonly']]) - # Inner iteration and multi-indices/indices are incompatible - assert_raises(ValueError, nditer, a, - ['external_loop', 'multi_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop', 'c_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop', 'f_index'], [['readonly']]) - # Must specify exactly one of readwrite/readonly/writeonly per operand - assert_raises(ValueError, nditer, a, [], [[]]) - assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) - assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) - assert_raises(ValueError, nditer, a, - [], [['readonly', 'writeonly', 'readwrite']]) - # Python scalars are always readonly - assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) - assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) - # Array scalars are always readonly - assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) - assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) - # Check readonly array - a.flags.writeable = False - assert_raises(ValueError, nditer, a, [], [['writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readwrite']]) - a.flags.writeable = True - # Multi-indices available only with the multi_index flag - i = nditer(arange(6), [], [['readonly']]) - assert_raises(ValueError, lambda i:i.multi_index, i) - # Index available only with an index flag - assert_raises(ValueError, lambda i:i.index, i) - # GotoCoords and GotoIndex incompatible with buffering or no_inner - - def assign_multi_index(i): - i.multi_index = (0,) - - def assign_index(i): - i.index = 0 - - def assign_iterindex(i): - i.iterindex = 0 - - def assign_iterrange(i): - i.iterrange = (0, 1) - i = nditer(arange(6), ['external_loop']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterindex, i) - assert_raises(ValueError, assign_iterrange, i) - i = nditer(arange(6), ['buffered']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterrange, i) - # Can't iterate if size is zero - assert_raises(ValueError, nditer, np.array([])) - -def test_iter_slice(): - a, b, c = np.arange(3), np.arange(3), np.arange(3.) - i = nditer([a, b, c], [], ['readwrite']) - with i: - i[0:2] = (3, 3) - assert_equal(a, [3, 1, 2]) - assert_equal(b, [3, 1, 2]) - assert_equal(c, [0, 1, 2]) - i[1] = 12 - assert_equal(i[0:2], [3, 12]) - -def test_iter_assign_mapping(): - a = np.arange(24, dtype='f8').reshape(2, 3, 4).T - it = np.nditer(a, [], [['readwrite', 'updateifcopy']], - casting='same_kind', op_dtypes=[np.dtype('f4')]) - with it: - it.operands[0][...] = 3 - it.operands[0][...] = 14 - assert_equal(a, 14) - it = np.nditer(a, [], [['readwrite', 'updateifcopy']], - casting='same_kind', op_dtypes=[np.dtype('f4')]) - with it: - x = it.operands[0][-1:1] - x[...] = 14 - it.operands[0][...] = -1234 - assert_equal(a, -1234) - # check for no warnings on dealloc - x = None - it = None - -def test_iter_nbo_align_contig(): - # Check that byte order, alignment, and contig changes work - - # Byte order change by requesting a specific dtype - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - with i: - # context manager triggers UPDATEIFCOPY on i at exit - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 2 - assert_equal(au, [2]*6) - del i # should not raise a warning - # Byte order change by requesting NBO - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], - casting='equiv') as i: - # context manager triggers UPDATEIFCOPY on i at exit - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 12345 - i.operands[0][:] = 2 - assert_equal(au, [2]*6) - - # Unaligned input - a = np.zeros((6*4+1,), dtype='i1')[1:] - a.dtype = 'f4' - a[:] = np.arange(6, dtype='f4') - assert_(not a.flags.aligned) - # Without 'aligned', shouldn't copy - i = nditer(a, [], [['readonly']]) - assert_(not i.operands[0].flags.aligned) - assert_equal(i.operands[0], a) - # With 'aligned', should make a copy - with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: - assert_(i.operands[0].flags.aligned) - # context manager triggers UPDATEIFCOPY on i at exit - assert_equal(i.operands[0], a) - i.operands[0][:] = 3 - assert_equal(a, [3]*6) - - # Discontiguous input - a = arange(12) - # If it is contiguous, shouldn't copy - i = nditer(a[:6], [], [['readonly']]) - assert_(i.operands[0].flags.contiguous) - assert_equal(i.operands[0], a[:6]) - # If it isn't contiguous, should buffer - i = nditer(a[::2], ['buffered', 'external_loop'], - [['readonly', 'contig']], - buffersize=10) - assert_(i[0].flags.contiguous) - assert_equal(i[0], a[::2]) - -def test_iter_array_cast(): - # Check that arrays are cast as requested - - # No cast 'f4' -> 'f4' - a = np.arange(6, dtype='f4').reshape(2, 3) - i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) - with i: - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - - # Byte-order cast ' '>f4' - a = np.arange(6, dtype='f4')]) as i: - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('>f4')) - - # Safe case 'f4' -> 'f8' - a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) - i = nditer(a, [], [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - # The memory layout of the temporary should match a (a is (48,4,16)) - # except negative strides get flipped to positive strides. - assert_equal(i.operands[0].strides, (96, 8, 32)) - a = a[::-1,:, ::-1] - i = nditer(a, [], [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - assert_equal(i.operands[0].strides, (96, 8, 32)) - - # Same-kind cast 'f8' -> 'f4' -> 'f8' - a = np.arange(24, dtype='f8').reshape(2, 3, 4).T - with nditer(a, [], - [['readwrite', 'updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) as i: - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - assert_equal(i.operands[0].strides, (4, 16, 48)) - # Check that WRITEBACKIFCOPY is activated at exit - i.operands[0][2, 1, 1] = -12.5 - assert_(a[2, 1, 1] != -12.5) - assert_equal(a[2, 1, 1], -12.5) - - a = np.arange(6, dtype='i4')[::-2] - with nditer(a, [], - [['writeonly', 'updateifcopy']], - casting='unsafe', - op_dtypes=[np.dtype('f4')]) as i: - assert_equal(i.operands[0].dtype, np.dtype('f4')) - # Even though the stride was negative in 'a', it - # becomes positive in the temporary - assert_equal(i.operands[0].strides, (4,)) - i.operands[0][:] = [1, 2, 3] - assert_equal(a, [1, 2, 3]) - -def test_iter_array_cast_errors(): - # Check that invalid casts are caught - - # Need to enable copying for casts to occur - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly']], op_dtypes=[np.dtype('f8')]) - # Also need to allow casting for casts to occur - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], casting='no', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], casting='equiv', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['writeonly', 'updateifcopy']], - casting='no', - op_dtypes=[np.dtype('f4')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['writeonly', 'updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - # ' '>f4' should not work with casting='no' - assert_raises(TypeError, nditer, arange(2, dtype='f4')]) - # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readwrite', 'updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['readwrite', 'updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], - [['writeonly', 'updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - -def test_iter_scalar_cast(): - # Check that scalars are cast as requested - - # No cast 'f4' -> 'f4' - i = nditer(np.float32(2.5), [], [['readonly']], - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Safe cast 'f4' -> 'f8' - i = nditer(np.float32(2.5), [], - [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.value.dtype, np.dtype('f8')) - assert_equal(i.value, 2.5) - # Same-kind cast 'f8' -> 'f4' - i = nditer(np.float64(2.5), [], - [['readonly', 'copy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Unsafe cast 'f8' -> 'i4' - i = nditer(np.float64(3.0), [], - [['readonly', 'copy']], - casting='unsafe', - op_dtypes=[np.dtype('i4')]) - assert_equal(i.dtypes[0], np.dtype('i4')) - assert_equal(i.value.dtype, np.dtype('i4')) - assert_equal(i.value, 3) - # Readonly scalars may be cast even without setting COPY or BUFFERED - i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) - assert_equal(i[0].dtype, np.dtype('f8')) - assert_equal(i[0], 3.) - -def test_iter_scalar_cast_errors(): - # Check that invalid casts are caught - - # Need to allow copying/buffering for write casts of scalars to occur - assert_raises(TypeError, nditer, np.float32(2), [], - [['readwrite']], op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, 2.5, [], - [['readwrite']], op_dtypes=[np.dtype('f4')]) - # 'f8' -> 'f4' isn't a safe cast if the value would overflow - assert_raises(TypeError, nditer, np.float64(1e60), [], - [['readonly']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, np.float32(2), [], - [['readonly']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - -def test_iter_object_arrays_basic(): - # Check that object arrays work - - obj = {'a':3,'b':'d'} - a = np.array([[1, 2, 3], None, obj, None], dtype='O') - if HAS_REFCOUNT: - rc = sys.getrefcount(obj) - - # Need to allow references for object arrays - assert_raises(TypeError, nditer, a) - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a, ['refs_ok'], ['readonly']) - vals = [x_[()] for x_ in i] - assert_equal(np.array(vals, dtype='O'), a) - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], - ['readonly'], order='C') - assert_(i.iterationneedsapi) - vals = [x_[()] for x_ in i] - assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], - ['readwrite'], order='C') - with i: - for x in i: - x[...] = None - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_(sys.getrefcount(obj) == rc-1) - assert_equal(a, np.array([None]*4, dtype='O')) - -def test_iter_object_arrays_conversions(): - # Conversions to/from objects - a = np.arange(6, dtype='O') - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - with i: - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - a = np.arange(6, dtype='i4') - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - with i: - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - # Non-contiguous object array - a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) - a = a['a'] - a[:] = np.arange(6) - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - with i: - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - #Non-contiguous value array - a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) - a = a['a'] - a[:] = np.arange(6) + 98172488 - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - with i: - ob = i[0][()] - if HAS_REFCOUNT: - rc = sys.getrefcount(ob) - for x in i: - x[...] += 1 - if HAS_REFCOUNT: - assert_(sys.getrefcount(ob) == rc-1) - assert_equal(a, np.arange(6)+98172489) - -def test_iter_common_dtype(): - # Check that the iterator finds a common data type correctly - - i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.dtypes[1], np.dtype('f8')) - i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.dtypes[1], np.dtype('f8')) - i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='same_kind') - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.dtypes[1], np.dtype('f4')) - i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('u4')) - assert_equal(i.dtypes[1], np.dtype('u4')) - i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i8')) - assert_equal(i.dtypes[1], np.dtype('i8')) - i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), - array([2j], dtype='c8'), array([9], dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*4, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')) - assert_equal(i.dtypes[1], np.dtype('c16')) - assert_equal(i.dtypes[2], np.dtype('c16')) - assert_equal(i.dtypes[3], np.dtype('c16')) - assert_equal(i.value, (3, -12, 2j, 9)) - - # When allocating outputs, other outputs aren't factored in - i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], - [['readonly', 'copy'], - ['writeonly', 'allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i4')) - assert_equal(i.dtypes[1], np.dtype('i4')) - assert_equal(i.dtypes[2], np.dtype('c16')) - # But, if common data types are requested, they are - i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], - ['common_dtype'], - [['readonly', 'copy'], - ['writeonly', 'allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')) - assert_equal(i.dtypes[1], np.dtype('c16')) - assert_equal(i.dtypes[2], np.dtype('c16')) - -def test_iter_copy_if_overlap(): - # Ensure the iterator makes copies on read/write overlap, if requested - - # Copy not needed, 1 op - for flag in ['readonly', 'writeonly', 'readwrite']: - a = arange(10) - i = nditer([a], ['copy_if_overlap'], [[flag]]) - with i: - assert_(i.operands[0] is a) - - # Copy needed, 2 ops, read-write overlap - x = arange(10) - a = x[1:] - b = x[:-1] - with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: - assert_(not np.shares_memory(*i.operands)) - - # Copy not needed with elementwise, 2 ops, exactly same arrays - x = arange(10) - a = x - b = x - i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], - ['readwrite', 'overlap_assume_elementwise']]) - with i: - assert_(i.operands[0] is a and i.operands[1] is b) - with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: - assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) - - # Copy not needed, 2 ops, no overlap - x = arange(10) - a = x[::2] - b = x[1::2] - i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) - assert_(i.operands[0] is a and i.operands[1] is b) - - # Copy needed, 2 ops, read-write overlap - x = arange(4, dtype=np.int8) - a = x[3:] - b = x.view(np.int32)[:1] - with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: - assert_(not np.shares_memory(*i.operands)) - - # Copy needed, 3 ops, read-write overlap - for flag in ['writeonly', 'readwrite']: - x = np.ones([10, 10]) - a = x - b = x.T - c = x - with nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['readonly'], [flag]]) as i: - a2, b2, c2 = i.operands - assert_(not np.shares_memory(a2, c2)) - assert_(not np.shares_memory(b2, c2)) - - # Copy not needed, 3 ops, read-only overlap - x = np.ones([10, 10]) - a = x - b = x.T - c = x - i = nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['readonly'], ['readonly']]) - a2, b2, c2 = i.operands - assert_(a is a2) - assert_(b is b2) - assert_(c is c2) - - # Copy not needed, 3 ops, read-only overlap - x = np.ones([10, 10]) - a = x - b = np.ones([10, 10]) - c = x.T - i = nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['writeonly'], ['readonly']]) - a2, b2, c2 = i.operands - assert_(a is a2) - assert_(b is b2) - assert_(c is c2) - - # Copy not needed, 3 ops, write-only overlap - x = np.arange(7) - a = x[:3] - b = x[3:6] - c = x[4:7] - i = nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['writeonly'], ['writeonly']]) - a2, b2, c2 = i.operands - assert_(a is a2) - assert_(b is b2) - assert_(c is c2) - -def test_iter_op_axes(): - # Check that custom axes work - - # Reverse the axes - a = arange(6).reshape(2, 3) - i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) - assert_(all([x == y for (x, y) in i])) - a = arange(24).reshape(2, 3, 4) - i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) - assert_(all([x == y for (x, y) in i])) - - # Broadcast 1D to any dimension - a = arange(1, 31).reshape(2, 3, 5) - b = arange(1, 3) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) - b = arange(1, 4) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) - b = arange(1, 6) - i = nditer([a, b], [], [['readonly']]*2, - op_axes=[None, [np.newaxis, np.newaxis, 0]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) - - # Inner product-style broadcasting - a = arange(24).reshape(2, 3, 4) - b = arange(40).reshape(5, 2, 4) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, - op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) - assert_equal(i.shape, (2, 3, 5, 2)) - - # Matrix product-style broadcasting - a = arange(12).reshape(3, 4) - b = arange(20).reshape(4, 5) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, - op_axes=[[0, -1], [-1, 1]]) - assert_equal(i.shape, (3, 5)) - -def test_iter_op_axes_errors(): - # Check that custom axes throws errors for bad inputs - - # Wrong number of items in op_axes - a = arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0], [1], [0]]) - # Out of bounds items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[2, 1], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [2, -1]]) - # Duplicate items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 0], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [1, 1]]) - - # Different sized arrays in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [0, 1, 0]]) - - # Non-broadcastable dimensions in the result - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [1, 0]]) - -def test_iter_copy(): - # Check that copying the iterator works correctly - a = arange(24).reshape(2, 3, 4) - - # Simple iterator - i = nditer(a) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Buffered iterator - i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (3, 9) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (2, 18) - next(i) - next(i) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Casting iterator - with nditer(a, ['buffered'], order='F', casting='unsafe', - op_dtypes='f8', buffersize=5) as i: - j = i.copy() - assert_equal([x[()] for x in j], a.ravel(order='F')) - - a = arange(24, dtype='cast->swap - - a = np.arange(10, dtype='f4').newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f8').newbyteorder()], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f4')) - - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning) - - a = np.arange(10, dtype='f8').newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='unsafe', - op_dtypes=[np.dtype('c8').newbyteorder()], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f8')) - -def test_iter_buffered_cast_byteswapped_complex(): - # Test that buffering can handle a cast which requires swap->cast->copy - - a = np.arange(10, dtype='c8').newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype='c8') - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16').newbyteorder()], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) - - a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f4')], - buffersize=7) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) - -def test_iter_buffered_cast_structured_type(): - # Tests buffering of structured types - - # simple -> struct type (duplicates the value) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.arange(3, dtype='f4') + 0.5 - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [np.array(x) for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - - # object -> struct type - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.zeros((3,), dtype='O') - a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) - a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) - a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) - if HAS_REFCOUNT: - rc = sys.getrefcount(a[0]) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [x.copy() for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(a[0]), rc) - - # single-field struct type -> simple - sdt = [('a', 'f4')] - a = np.array([(5.5,), (8,)], dtype=sdt) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes='i4') - assert_equal([x_[()] for x_ in i], [5, 8]) - - # make sure multi-field struct type -> simple doesn't work - sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) - assert_raises(TypeError, lambda: ( - nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes='i4'))) - - # struct type -> struct type (field-wise copy) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] - a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - assert_equal([np.array(x_) for x_ in i], - [np.array((1, 2, 3), dtype=sdt2), - np.array((4, 5, 6), dtype=sdt2)]) - - # make sure struct type -> struct type with different - # number of fields fails - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - - assert_raises(ValueError, lambda : ( - nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2))) - - -def test_iter_buffered_cast_subarray(): - # Tests buffering of subarrays - - # one element -> many (copies it to all) - sdt1 = [('a', 'f4')] - sdt2 = [('a', 'f8', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - for x, count in zip(i, list(range(6))): - assert_(np.all(x['a'] == count)) - - # one element -> many -> back (copies it to all) - sdt1 = [('a', 'O', (1, 1))] - sdt2 = [('a', 'O', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - with i: - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_(np.all(x['a'] == count)) - x['a'][0] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - with i: - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - x['a'] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'f8', (3, 2, 2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> one element (copies just element 0) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'f4', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> matching shape (straightforward copy) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'f4', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a']) - count += 1 - - # vector -> smaller vector (truncates) - sdt1 = [('a', 'f8', (6,))] - sdt2 = [('a', 'f4', (2,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*6).reshape(6, 6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a'][:2]) - count += 1 - - # vector -> bigger vector (pads with zeros) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (6,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2], a[count]['a']) - assert_equal(x['a'][2:], [0, 0, 0, 0]) - count += 1 - - # vector -> matrix (broadcasts) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][0], a[count]['a']) - assert_equal(x['a'][1], a[count]['a']) - count += 1 - - # vector -> matrix (broadcasts and zero-pads) - sdt1 = [('a', 'f8', (2, 1))] - sdt2 = [('a', 'f4', (3, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2, 1) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) - assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) - assert_equal(x['a'][2,:], [0, 0]) - count += 1 - - # matrix -> matrix (truncates and zero-pads) - sdt1 = [('a', 'f8', (2, 3))] - sdt2 = [('a', 'f4', (3, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2*3).reshape(6, 2, 3) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) - assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) - assert_equal(x['a'][2,:], [0, 0]) - count += 1 - -def test_iter_buffering_badwriteback(): - # Writing back from a buffer cannot combine elements - - # a needs write buffering, but had a broadcast dimension - a = np.arange(6).reshape(2, 3, 1) - b = np.arange(12).reshape(2, 3, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - - # But if a is readonly, it's fine - nditer([a, b], ['buffered', 'external_loop'], - [['readonly'], ['writeonly']], - order='C') - - # If a has just one element, it's fine too (constant 0 stride, a reduction) - a = np.arange(1).reshape(1, 1, 1) - nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], - [['readwrite'], ['writeonly']], - order='C') - - # check that it fails on other dimensions too - a = np.arange(6).reshape(1, 3, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - a = np.arange(4).reshape(2, 1, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - -def test_iter_buffering_string(): - # Safe casting disallows shrinking strings - a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) - assert_equal(a.dtype, np.dtype('S4')) - assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], - op_dtypes='S2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') - assert_equal(i[0], b'abc') - assert_equal(i[0].dtype, np.dtype('S6')) - - a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_) - assert_equal(a.dtype, np.dtype('U4')) - assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], - op_dtypes='U2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') - assert_equal(i[0], u'abc') - assert_equal(i[0].dtype, np.dtype('U6')) - -def test_iter_buffering_growinner(): - # Test that the inner loop grows when no buffering is needed - a = np.arange(30) - i = nditer(a, ['buffered', 'growinner', 'external_loop'], - buffersize=5) - # Should end up with just one inner loop here - assert_equal(i[0].size, a.size) - - -@pytest.mark.slow -def test_iter_buffered_reduce_reuse(): - # large enough array for all views, including negative strides. - a = np.arange(2*3**5)[3**5:3**5+1] - flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] - op_flags = [('readonly',), ('readwrite', 'allocate')] - op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] - # wrong dtype to force buffering - op_dtypes = [float, a.dtype] - - def get_params(): - for xs in range(-3**2, 3**2 + 1): - for ys in range(xs, 3**2 + 1): - for op_axes in op_axes_list: - # last stride is reduced and because of that not - # important for this test, as it is the inner stride. - strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) - arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) - - for skip in [0, 1]: - yield arr, op_axes, skip - - for arr, op_axes, skip in get_params(): - nditer2 = np.nditer([arr.copy(), None], - op_axes=op_axes, flags=flags, op_flags=op_flags, - op_dtypes=op_dtypes) - with nditer2: - nditer2.operands[-1][...] = 0 - nditer2.reset() - nditer2.iterindex = skip - - for (a2_in, b2_in) in nditer2: - b2_in += a2_in.astype(np.int_) - - comp_res = nditer2.operands[-1] - - for bufsize in range(0, 3**3): - nditer1 = np.nditer([arr, None], - op_axes=op_axes, flags=flags, op_flags=op_flags, - buffersize=bufsize, op_dtypes=op_dtypes) - with nditer1: - nditer1.operands[-1][...] = 0 - nditer1.reset() - nditer1.iterindex = skip - - for (a1_in, b1_in) in nditer1: - b1_in += a1_in.astype(np.int_) - - res = nditer1.operands[-1] - assert_array_equal(res, comp_res) - - -def test_iter_no_broadcast(): - # Test that the no_broadcast flag works - a = np.arange(24).reshape(2, 3, 4) - b = np.arange(6).reshape(2, 3, 1) - c = np.arange(12).reshape(3, 4) - - nditer([a, b, c], [], - [['readonly', 'no_broadcast'], - ['readonly'], ['readonly']]) - assert_raises(ValueError, nditer, [a, b, c], [], - [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) - assert_raises(ValueError, nditer, [a, b, c], [], - [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) - - -class TestIterNested(object): - - def test_basic(self): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - def test_reorder(self): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0], [2, 1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0], [2, 1]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) - - def test_flip_axes(self): - # Test nested iteration with negative axes - a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] - - # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0], [1, 2]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) - - i, j = np.nested_iters(a, [[0, 1], [2]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) - - i, j = np.nested_iters(a, [[0, 2], [1]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) - - def test_broadcast(self): - # Test nested iteration with broadcasting - a = arange(2).reshape(2, 1) - b = arange(3).reshape(1, 3) - - i, j = np.nested_iters([a, b], [[0], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) - - i, j = np.nested_iters([a, b], [[1], [0]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) - - def test_dtype_copy(self): - # Test nested iteration with a copy to change dtype - - # copy - a = arange(6, dtype='i4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readonly', 'copy'], - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) - vals = None - - # writebackifcopy - using context manager - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - with i, j: - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - - # writebackifcopy - using close() - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - i.close() - j.close() - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - - def test_dtype_buffered(self): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - - def test_0d(self): - a = np.arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[], [1, 0, 2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0, 2], []]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) - - i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) - vals = [] - for x in i: - for y in j: - vals.append([z for z in k]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - def test_iter_nested_iters_dtype_buffered(self): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - with i, j: - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - -def test_iter_reduction_error(): - - a = np.arange(6) - assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0], [-1]]) - - a = np.arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, None], ['external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0, 1], [-1, -1]]) - -def test_iter_reduction(): - # Test doing reductions with the iterator - - a = np.arange(6) - i = nditer([a, None], ['reduce_ok'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0], [-1]]) - # Need to initialize the output operand to the addition unit - with i: - i.operands[1][...] = 0 - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - a = np.arange(6).reshape(2, 3) - i = nditer([a, None], ['reduce_ok', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0, 1], [-1, -1]]) - # Need to initialize the output operand to the addition unit - with i: - i.operands[1][...] = 0 - # Reduction shape/strides for the output - assert_equal(i[1].shape, (6,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - # Use a for loop instead of ``y[...] += x`` - # (equivalent to ``y[...] = y[...].copy() + x``), - # because y has zero strides we use for the reduction - for j in range(len(y)): - y[j] += x[j] - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - # This is a tricky reduction case for the buffering double loop - # to handle - a = np.ones((2, 3, 5)) - it1 = nditer([a, None], ['reduce_ok', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0, -1, 1]]) - it2 = nditer([a, None], ['reduce_ok', 'external_loop', - 'buffered', 'delay_bufalloc'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0, -1, 1]], buffersize=10) - with it1, it2: - it1.operands[1].fill(0) - it2.operands[1].fill(0) - it2.reset() - for x in it1: - x[1][...] += x[0] - for x in it2: - x[1][...] += x[0] - assert_equal(it1.operands[1], it2.operands[1]) - assert_equal(it2.operands[1].sum(), a.size) - -def test_iter_buffering_reduction(): - # Test doing buffered reductions with the iterator - - a = np.arange(6) - b = np.array(0., dtype='f8').byteswap().newbyteorder() - i = nditer([a, b], ['reduce_ok', 'buffered'], - [['readonly'], ['readwrite', 'nbo']], - op_axes=[[0], [-1]]) - with i: - assert_equal(i[1].dtype, np.dtype('f8')) - assert_(i[1].dtype != b.dtype) - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(b, np.sum(a)) - - a = np.arange(6).reshape(2, 3) - b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() - i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], - [['readonly'], ['readwrite', 'nbo']], - op_axes=[[0, 1], [0, -1]]) - # Reduction shape/strides for the output - with i: - assert_equal(i[1].shape, (3,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - # Use a for loop instead of ``y[...] += x`` - # (equivalent to ``y[...] = y[...].copy() + x``), - # because y has zero strides we use for the reduction - for j in range(len(y)): - y[j] += x[j] - assert_equal(b, np.sum(a, axis=1)) - - # Iterator inner double loop was wrong on this one - p = np.arange(2) + 1 - it = np.nditer([p, None], - ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[-1, 0], [-1, -1]], - itershape=(2, 2)) - with it: - it.operands[1].fill(0) - it.reset() - assert_equal(it[0], [1, 2, 1, 2]) - - # Iterator inner loop should take argument contiguity into account - x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) - x[...] = np.arange(x.size).reshape(x.shape) - y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) - y_base_copy = y_base.copy() - y = y_base[::2,:,None] - - it = np.nditer([y, x], - ['buffered', 'external_loop', 'reduce_ok'], - [['readwrite'], ['readonly']]) - with it: - for a, b in it: - a.fill(2) - - assert_equal(y_base[1::2], y_base_copy[1::2]) - assert_equal(y_base[::2], 2) - -def test_iter_buffering_reduction_reuse_reduce_loops(): - # There was a bug triggering reuse of the reduce loop inappropriately, - # which caused processing to happen in unnecessarily small chunks - # and overran the buffer. - - a = np.zeros((2, 7)) - b = np.zeros((1, 7)) - it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], - op_flags=[['readonly'], ['readwrite']], - buffersize=5) - - with it: - bufsizes = [x.shape[0] for x, y in it] - assert_equal(bufsizes, [5, 2, 5, 2]) - assert_equal(sum(bufsizes), a.size) - -def test_iter_writemasked_badinput(): - a = np.zeros((2, 3)) - b = np.zeros((3,)) - m = np.array([[True, True, False], [False, True, False]]) - m2 = np.array([True, True, False]) - m3 = np.array([0, 1, 1], dtype='u1') - mbad1 = np.array([0, 1, 1], dtype='i1') - mbad2 = np.array([0, 1, 1], dtype='f4') - - # Need an 'arraymask' if any operand is 'writemasked' - assert_raises(ValueError, nditer, [a, m], [], - [['readwrite', 'writemasked'], ['readonly']]) - - # A 'writemasked' operand must not be readonly - assert_raises(ValueError, nditer, [a, m], [], - [['readonly', 'writemasked'], ['readonly', 'arraymask']]) - - # 'writemasked' and 'arraymask' may not be used together - assert_raises(ValueError, nditer, [a, m], [], - [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) - - # 'arraymask' may only be specified once - assert_raises(ValueError, nditer, [a, m, m2], [], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask'], - ['readonly', 'arraymask']]) - - # An 'arraymask' with nothing 'writemasked' also doesn't make sense - assert_raises(ValueError, nditer, [a, m], [], - [['readwrite'], ['readonly', 'arraymask']]) - - # A writemasked reduction requires a similarly smaller mask - assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - # But this should work with a smaller/equal mask to the reduction operand - np.nditer([a, b, m2], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - # The arraymask itself cannot be a reduction - assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readwrite', 'arraymask']]) - - # A uint8 mask is ok too - np.nditer([a, m3], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - # An int8 mask isn't ok - assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - # A float32 mask isn't ok - assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - -def test_iter_writemasked(): - a = np.zeros((3,), dtype='f8') - msk = np.array([True, True, False]) - - # When buffering is unused, 'writemasked' effectively does nothing. - # It's up to the user of the iterator to obey the requested semantics. - it = np.nditer([a, msk], [], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - with it: - for x, m in it: - x[...] = 1 - # Because we violated the semantics, all the values became 1 - assert_equal(a, [1, 1, 1]) - - # Even if buffering is enabled, we still may be accessing the array - # directly. - it = np.nditer([a, msk], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - with it: - for x, m in it: - x[...] = 2.5 - # Because we violated the semantics, all the values became 2.5 - assert_equal(a, [2.5, 2.5, 2.5]) - - # If buffering will definitely happening, for instance because of - # a cast, only the items selected by the mask will be copied back from - # the buffer. - it = np.nditer([a, msk], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['i8', None], - casting='unsafe') - with it: - for x, m in it: - x[...] = 3 - # Even though we violated the semantics, only the selected values - # were copied back - assert_equal(a, [3, 3, 2.5]) - -def test_iter_non_writable_attribute_deletion(): - it = np.nditer(np.ones(2)) - attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", - "iterationneedsapi", "has_multi_index", "has_index", "dtypes", - "ndim", "nop", "itersize", "finished"] - - for s in attr: - assert_raises(AttributeError, delattr, it, s) - - -def test_iter_writable_attribute_deletion(): - it = np.nditer(np.ones(2)) - attr = [ "multi_index", "index", "iterrange", "iterindex"] - for s in attr: - assert_raises(AttributeError, delattr, it, s) - - -def test_iter_element_deletion(): - it = np.nditer(np.ones(3)) - try: - del it[1] - del it[1:2] - except TypeError: - pass - except Exception: - raise AssertionError - -def test_iter_allocated_array_dtypes(): - # If the dtype of an allocated output has a shape, the shape gets - # tacked onto the end of the result. - it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) - for a, b in it: - b[0] = a - 1 - b[1] = a + 1 - assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) - - # Make sure this works for scalars too - it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) - for a, b, c in it: - c[0, 0] = a - b - c[0, 1] = a + b - c[1, 0] = a * b - c[1, 1] = a / b - assert_equal(it.operands[2], [[8, 12], [20, 5]]) - - -def test_0d_iter(): - # Basic test for iteration of 0-d arrays: - i = nditer([2, 3], ['multi_index'], [['readonly']]*2) - assert_equal(i.ndim, 0) - assert_equal(next(i), (2, 3)) - assert_equal(i.multi_index, ()) - assert_equal(i.iterindex, 0) - assert_raises(StopIteration, next, i) - # test reset: - i.reset() - assert_equal(next(i), (2, 3)) - assert_raises(StopIteration, next, i) - - # test forcing to 0-d - i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) - assert_equal(i.ndim, 0) - assert_equal(len(i), 1) - # note that itershape=(), still behaves like None due to the conversions - - # Test a more complex buffered casting case (same as another test above) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.array(0.5, dtype='f4') - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', op_dtypes=sdt) - vals = next(i) - assert_equal(vals['a'], 0.5) - assert_equal(vals['b'], 0) - assert_equal(vals['c'], [[(0.5)]*3]*2) - assert_equal(vals['d'], 0.5) - - -def test_iter_too_large(): - # The total size of the iterator must not exceed the maximum intp due - # to broadcasting. Dividing by 1024 will keep it small enough to - # give a legal array. - size = np.iinfo(np.intp).max // 1024 - arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) - assert_raises(ValueError, nditer, (arr, arr[:, None])) - # test the same for multiindex. That may get more interesting when - # removing 0 dimensional axis is allowed (since an iterator can grow then) - assert_raises(ValueError, nditer, - (arr, arr[:, None]), flags=['multi_index']) - - -def test_iter_too_large_with_multiindex(): - # When a multi index is being tracked, the error is delayed this - # checks the delayed error messages and getting below that by - # removing an axis. - base_size = 2**10 - num = 1 - while base_size**num < np.iinfo(np.intp).max: - num += 1 - - shape_template = [1, 1] * num - arrays = [] - for i in range(num): - shape = shape_template[:] - shape[i * 2] = 2**10 - arrays.append(np.empty(shape)) - arrays = tuple(arrays) - - # arrays are now too large to be broadcast. The different modes test - # different nditer functionality with or without GIL. - for mode in range(6): - with assert_raises(ValueError): - _multiarray_tests.test_nditer_too_large(arrays, -1, mode) - # but if we do nothing with the nditer, it can be constructed: - _multiarray_tests.test_nditer_too_large(arrays, -1, 7) - - # When an axis is removed, things should work again (half the time): - for i in range(num): - for mode in range(6): - # an axis with size 1024 is removed: - _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) - # an axis with size 1 is removed: - with assert_raises(ValueError): - _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) - -def test_writebacks(): - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - it = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - with it: - it.operands[0][:] = 100 - assert_equal(au, 100) - # do it again, this time raise an error, - it = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - try: - with it: - assert_equal(au.flags.writeable, False) - it.operands[0][:] = 0 - raise ValueError('exit context manager on exception') - except: - pass - assert_equal(au, 0) - assert_equal(au.flags.writeable, True) - # cannot reuse i outside context manager - assert_raises(ValueError, getattr, it, 'operands') - - it = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - with it: - x = it.operands[0] - x[:] = 6 - assert_(x.flags.writebackifcopy) - assert_equal(au, 6) - assert_(not x.flags.writebackifcopy) - x[:] = 123 # x.data still valid - assert_equal(au, 6) # but not connected to au - - it = nditer(au, [], - [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - # reentering works - with it: - with it: - for x in it: - x[...] = 123 - - it = nditer(au, [], - [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - # make sure exiting the inner context manager closes the iterator - with it: - with it: - for x in it: - x[...] = 123 - assert_raises(ValueError, getattr, it, 'operands') - # do not crash if original data array is decrefed - it = nditer(au, [], - [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - del au - with it: - for x in it: - x[...] = 123 - # make sure we cannot reenter the closed iterator - enter = it.__enter__ - assert_raises(RuntimeError, enter) - -def test_close_equivalent(): - ''' using a context amanger and using nditer.close are equivalent - ''' - def add_close(x, y, out=None): - addop = np.add - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - for (a, b, c) in it: - addop(a, b, out=c) - ret = it.operands[2] - it.close() - return ret - - def add_context(x, y, out=None): - addop = np.add - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - with it: - for (a, b, c) in it: - addop(a, b, out=c) - return it.operands[2] - z = add_close(range(5), range(5)) - assert_equal(z, range(0, 10, 2)) - z = add_context(range(5), range(5)) - assert_equal(z, range(0, 10, 2)) - -def test_close_raises(): - it = np.nditer(np.arange(3)) - assert_equal (next(it), 0) - it.close() - assert_raises(StopIteration, next, it) - assert_raises(ValueError, getattr, it, 'operands') - -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -def test_warn_noclose(): - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - del it - assert len(sup.log) == 1 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numeric.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_numeric.py deleted file mode 100644 index ffebdf6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numeric.py +++ /dev/null @@ -1,3117 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -import itertools -import platform -import pytest -from decimal import Decimal - -import numpy as np -from numpy.core import umath -from numpy.random import rand, randint, randn -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_warns, HAS_REFCOUNT - ) - - -class TestResize(object): - def test_copies(self): - A = np.array([[1, 2], [3, 4]]) - Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) - assert_equal(np.resize(A, (2, 4)), Ar1) - - Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) - assert_equal(np.resize(A, (4, 2)), Ar2) - - Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) - assert_equal(np.resize(A, (4, 3)), Ar3) - - def test_zeroresize(self): - A = np.array([[1, 2], [3, 4]]) - Ar = np.resize(A, (0,)) - assert_array_equal(Ar, np.array([])) - assert_equal(A.dtype, Ar.dtype) - - Ar = np.resize(A, (0, 2)) - assert_equal(Ar.shape, (0, 2)) - - Ar = np.resize(A, (2, 0)) - assert_equal(Ar.shape, (2, 0)) - - def test_reshape_from_zero(self): - # See also gh-6740 - A = np.zeros(0, dtype=[('a', np.float32)]) - Ar = np.resize(A, (2, 1)) - assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) - assert_equal(A.dtype, Ar.dtype) - - -class TestNonarrayArgs(object): - # check that non-array arguments to functions wrap them in arrays - def test_choose(self): - choices = [[0, 1, 2], - [3, 4, 5], - [5, 6, 7]] - tgt = [5, 1, 5] - a = [2, 0, 1] - - out = np.choose(a, choices) - assert_equal(out, tgt) - - def test_clip(self): - arr = [-1, 5, 2, 3, 10, -4, -9] - out = np.clip(arr, 2, 7) - tgt = [2, 5, 2, 3, 7, 2, 2] - assert_equal(out, tgt) - - def test_compress(self): - arr = [[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]] - tgt = [[5, 6, 7, 8, 9]] - out = np.compress([0, 1], arr, axis=0) - assert_equal(out, tgt) - - def test_count_nonzero(self): - arr = [[0, 1, 7, 0, 0], - [3, 0, 0, 2, 19]] - tgt = np.array([2, 3]) - out = np.count_nonzero(arr, axis=1) - assert_equal(out, tgt) - - def test_cumproduct(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720]))) - - def test_diagonal(self): - a = [[0, 1, 2, 3], - [4, 5, 6, 7], - [8, 9, 10, 11]] - out = np.diagonal(a) - tgt = [0, 5, 10] - - assert_equal(out, tgt) - - def test_mean(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(np.mean(A) == 3.5) - assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) - assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.mean([]))) - assert_(w[0].category is RuntimeWarning) - - def test_ptp(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(np.ptp(a, axis=0), 15.0) - - def test_prod(self): - arr = [[1, 2, 3, 4], - [5, 6, 7, 9], - [10, 3, 4, 5]] - tgt = [24, 1890, 600] - - assert_equal(np.prod(arr, axis=-1), tgt) - - def test_ravel(self): - a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] - tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] - assert_equal(np.ravel(a), tgt) - - def test_repeat(self): - a = [1, 2, 3] - tgt = [1, 1, 2, 2, 3, 3] - - out = np.repeat(a, 2) - assert_equal(out, tgt) - - def test_reshape(self): - arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] - tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] - assert_equal(np.reshape(arr, (2, 6)), tgt) - - def test_round(self): - arr = [1.56, 72.54, 6.35, 3.25] - tgt = [1.6, 72.5, 6.4, 3.2] - assert_equal(np.around(arr, decimals=1), tgt) - - def test_searchsorted(self): - arr = [-8, -5, -1, 3, 6, 10] - out = np.searchsorted(arr, 0) - assert_equal(out, 3) - - def test_size(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(np.size(A) == 6) - assert_(np.size(A, 0) == 2) - assert_(np.size(A, 1) == 3) - - def test_squeeze(self): - A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] - assert_equal(np.squeeze(A).shape, (3, 3)) - assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,)) - assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1)) - assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3)) - assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3)) - assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,)) - assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1)) - assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3)) - assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3)) - - def test_std(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_almost_equal(np.std(A), 1.707825127659933) - assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) - assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.std([]))) - assert_(w[0].category is RuntimeWarning) - - def test_swapaxes(self): - tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] - a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] - out = np.swapaxes(a, 0, 2) - assert_equal(out, tgt) - - def test_sum(self): - m = [[1, 2, 3], - [4, 5, 6], - [7, 8, 9]] - tgt = [[6], [15], [24]] - out = np.sum(m, axis=1, keepdims=True) - - assert_equal(tgt, out) - - def test_take(self): - tgt = [2, 3, 5] - indices = [1, 2, 4] - a = [1, 2, 3, 4, 5] - - out = np.take(a, indices) - assert_equal(out, tgt) - - def test_trace(self): - c = [[1, 2], [3, 4], [5, 6]] - assert_equal(np.trace(c), 5) - - def test_transpose(self): - arr = [[1, 2], [3, 4], [5, 6]] - tgt = [[1, 3, 5], [2, 4, 6]] - assert_equal(np.transpose(arr, (1, 0)), tgt) - - def test_var(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_almost_equal(np.var(A), 2.9166666666666665) - assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) - assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.var([]))) - assert_(w[0].category is RuntimeWarning) - - B = np.array([None, 0]) - B[0] = 1j - assert_almost_equal(np.var(B), 0.25) - -class TestIsscalar(object): - def test_isscalar(self): - assert_(np.isscalar(3.1)) - assert_(np.isscalar(np.int16(12345))) - assert_(np.isscalar(False)) - assert_(np.isscalar('numpy')) - assert_(not np.isscalar([3.1])) - assert_(not np.isscalar(None)) - - # PEP 3141 - from fractions import Fraction - assert_(np.isscalar(Fraction(5, 17))) - from numbers import Number - assert_(np.isscalar(Number())) - - -class TestBoolScalar(object): - def test_logical(self): - f = np.False_ - t = np.True_ - s = "xyz" - assert_((t and s) is s) - assert_((f and s) is f) - - def test_bitwise_or(self): - f = np.False_ - t = np.True_ - assert_((t | t) is t) - assert_((f | t) is t) - assert_((t | f) is t) - assert_((f | f) is f) - - def test_bitwise_and(self): - f = np.False_ - t = np.True_ - assert_((t & t) is t) - assert_((f & t) is f) - assert_((t & f) is f) - assert_((f & f) is f) - - def test_bitwise_xor(self): - f = np.False_ - t = np.True_ - assert_((t ^ t) is f) - assert_((f ^ t) is t) - assert_((t ^ f) is t) - assert_((f ^ f) is f) - - -class TestBoolArray(object): - def setup(self): - # offset for simd tests - self.t = np.array([True] * 41, dtype=bool)[1::] - self.f = np.array([False] * 41, dtype=bool)[1::] - self.o = np.array([False] * 42, dtype=bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False - - def test_all_any(self): - assert_(self.t.all()) - assert_(self.t.any()) - assert_(not self.f.all()) - assert_(not self.f.any()) - assert_(self.nm.any()) - assert_(self.im.any()) - assert_(not self.nm.all()) - assert_(not self.im.all()) - # check bad element in all positions - for i in range(256 - 7): - d = np.array([False] * 256, dtype=bool)[7::] - d[i] = True - assert_(np.any(d)) - e = np.array([True] * 256, dtype=bool)[7::] - e[i] = False - assert_(not np.all(e)) - assert_array_equal(e, ~d) - # big array test for blocked libc loops - for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: - d = np.array([False] * 100043, dtype=bool) - d[i] = True - assert_(np.any(d), msg="%r" % i) - e = np.array([True] * 100043, dtype=bool) - e[i] = False - assert_(not np.all(e), msg="%r" % i) - - def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) - - def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) - - -class TestBoolCmp(object): - def setup(self): - self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=bool) - self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=bool) - # generate values for all permutation of 256bit simd vectors - s = 0 - for i in range(32): - self.f[s:s+8] = [i & 2**x for x in range(8)] - self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s+4] = [i & 2**x for x in range(4)] - self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - self.inff = self.f.copy() - self.infd = self.d.copy() - self.inff[::3][self.ef[::3]] = np.inf - self.infd[::3][self.ed[::3]] = np.inf - self.inff[1::3][self.ef[1::3]] = -np.inf - self.infd[1::3][self.ed[1::3]] = -np.inf - self.inff[2::3][self.ef[2::3]] = np.nan - self.infd[2::3][self.ed[2::3]] = np.nan - self.efnonan = self.ef.copy() - self.efnonan[2::3] = False - self.ednonan = self.ed.copy() - self.ednonan[2::3] = False - - self.signf = self.f.copy() - self.signd = self.d.copy() - self.signf[self.ef] *= -1. - self.signd[self.ed] *= -1. - self.signf[1::6][self.ef[1::6]] = -np.inf - self.signd[1::6][self.ed[1::6]] = -np.inf - self.signf[3::6][self.ef[3::6]] = -np.nan - self.signd[3::6][self.ed[3::6]] = -np.nan - self.signf[4::6][self.ef[4::6]] = -0. - self.signd[4::6][self.ed[4::6]] = -0. - - def test_float(self): - # offset for alignment test - for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] - assert_array_equal(r, r2) - assert_array_equal(r, r3) - # check bool == 0x1 - assert_array_equal(r.view(np.int8), r.astype(np.int8)) - assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) - assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - - # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) - assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) - assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) - assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) - - def test_double(self): - # offset for alignment test - for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] - assert_array_equal(r, r2) - assert_array_equal(r, r3) - # check bool == 0x1 - assert_array_equal(r.view(np.int8), r.astype(np.int8)) - assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) - assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - - # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) - assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) - assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) - assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) - - -class TestSeterr(object): - def test_default(self): - err = np.geterr() - assert_equal(err, - dict(divide='warn', - invalid='warn', - over='warn', - under='ignore') - ) - - def test_set(self): - with np.errstate(): - err = np.seterr() - old = np.seterr(divide='print') - assert_(err == old) - new = np.seterr() - assert_(new['divide'] == 'print') - np.seterr(over='raise') - assert_(np.geterr()['over'] == 'raise') - assert_(new['divide'] == 'print') - np.seterr(**old) - assert_(np.geterr() == old) - - @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") - def test_divide_err(self): - with np.errstate(divide='raise'): - with assert_raises(FloatingPointError): - np.array([1.]) / np.array([0.]) - - np.seterr(divide='ignore') - np.array([1.]) / np.array([0.]) - - def test_errobj(self): - olderrobj = np.geterrobj() - self.called = 0 - try: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - with np.errstate(divide='warn'): - np.seterrobj([20000, 1, None]) - np.array([1.]) / np.array([0.]) - assert_equal(len(w), 1) - - def log_err(*args): - self.called += 1 - extobj_err = args - assert_(len(extobj_err) == 2) - assert_("divide" in extobj_err[0]) - - with np.errstate(divide='ignore'): - np.seterrobj([20000, 3, log_err]) - np.array([1.]) / np.array([0.]) - assert_equal(self.called, 1) - - np.seterrobj(olderrobj) - with np.errstate(divide='ignore'): - np.divide(1., 0., extobj=[20000, 3, log_err]) - assert_equal(self.called, 2) - finally: - np.seterrobj(olderrobj) - del self.called - - def test_errobj_noerrmask(self): - # errmask = 0 has a special code path for the default - olderrobj = np.geterrobj() - try: - # set errobj to something non default - np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, - umath.ERR_DEFAULT + 1, None]) - # call a ufunc - np.isnan(np.array([6])) - # same with the default, lots of times to get rid of possible - # pre-existing stack in the code - for i in range(10000): - np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, - None]) - np.isnan(np.array([6])) - finally: - np.seterrobj(olderrobj) - - -class TestFloatExceptions(object): - def assert_raises_fpe(self, fpeerr, flop, x, y): - ftype = type(x) - try: - flop(x, y) - assert_(False, - "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) - except FloatingPointError as exc: - assert_(str(exc).find(fpeerr) >= 0, - "Type %s raised wrong fpe error '%s'." % (ftype, exc)) - - def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): - # Check that fpe exception is raised. - # - # Given a floating operation `flop` and two scalar values, check that - # the operation raises the floating point exception specified by - # `fpeerr`. Tests all variants with 0-d array scalars as well. - - self.assert_raises_fpe(fpeerr, flop, sc1, sc2) - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) - self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) - - def test_floating_exceptions(self): - # Test basic arithmetic function errors - with np.errstate(all='raise'): - # Test for all real and complex float types - for typecode in np.typecodes['AllFloat']: - ftype = np.obj2sctype(typecode) - if np.dtype(ftype).kind == 'f': - # Get some extreme values for the type - fi = np.finfo(ftype) - ft_tiny = fi.tiny - ft_max = fi.max - ft_eps = fi.eps - underflow = 'underflow' - divbyzero = 'divide by zero' - else: - # 'c', complex, corresponding real dtype - rtype = type(ftype(0).real) - fi = np.finfo(rtype) - ft_tiny = ftype(fi.tiny) - ft_max = ftype(fi.max) - ft_eps = ftype(fi.eps) - # The complex types raise different exceptions - underflow = '' - divbyzero = '' - overflow = 'overflow' - invalid = 'invalid' - - self.assert_raises_fpe(underflow, - lambda a, b: a/b, ft_tiny, ft_max) - self.assert_raises_fpe(underflow, - lambda a, b: a*b, ft_tiny, ft_tiny) - self.assert_raises_fpe(overflow, - lambda a, b: a*b, ft_max, ftype(2)) - self.assert_raises_fpe(overflow, - lambda a, b: a/b, ft_max, ftype(0.5)) - self.assert_raises_fpe(overflow, - lambda a, b: a+b, ft_max, ft_max*ft_eps) - self.assert_raises_fpe(overflow, - lambda a, b: a-b, -ft_max, ft_max*ft_eps) - self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) - self.assert_raises_fpe(divbyzero, - lambda a, b: a/b, ftype(1), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a, b: a/b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b: a/b, ftype(0), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a, b: a-b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b: a*b, ftype(0), ftype(np.inf)) - - def test_warnings(self): - # test warning code path - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - with np.errstate(all="warn"): - np.divide(1, 0.) - assert_equal(len(w), 1) - assert_("divide by zero" in str(w[0].message)) - np.array(1e300) * np.array(1e300) - assert_equal(len(w), 2) - assert_("overflow" in str(w[-1].message)) - np.array(np.inf) - np.array(np.inf) - assert_equal(len(w), 3) - assert_("invalid value" in str(w[-1].message)) - np.array(1e-300) * np.array(1e-300) - assert_equal(len(w), 4) - assert_("underflow" in str(w[-1].message)) - - -class TestTypes(object): - def check_promotion_cases(self, promote_func): - # tests that the scalars get coerced correctly. - b = np.bool_(0) - i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) - u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) - f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) - c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) - - # coercion within the same kind - assert_equal(promote_func(i8, i16), np.dtype(np.int16)) - assert_equal(promote_func(i32, i8), np.dtype(np.int32)) - assert_equal(promote_func(i16, i64), np.dtype(np.int64)) - assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) - assert_equal(promote_func(f32, f64), np.dtype(np.float64)) - assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) - assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) - assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) - assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) - assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) - - # coercion between kinds - assert_equal(promote_func(b, i32), np.dtype(np.int32)) - assert_equal(promote_func(b, u8), np.dtype(np.uint8)) - assert_equal(promote_func(i8, u8), np.dtype(np.int16)) - assert_equal(promote_func(u8, i32), np.dtype(np.int32)) - assert_equal(promote_func(i64, u32), np.dtype(np.int64)) - assert_equal(promote_func(u64, i32), np.dtype(np.float64)) - assert_equal(promote_func(i32, f32), np.dtype(np.float64)) - assert_equal(promote_func(i64, f32), np.dtype(np.float64)) - assert_equal(promote_func(f32, i16), np.dtype(np.float32)) - assert_equal(promote_func(f32, u32), np.dtype(np.float64)) - assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) - assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) - assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) - - # coercion between scalars and 1-D arrays - assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) - assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) - assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) - assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) - assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8)) - assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32)) - assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32)) - assert_equal(promote_func(np.int32(-1), np.array([u64])), - np.dtype(np.float64)) - assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32)) - assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32)) - assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64)) - assert_equal(promote_func(fld, np.array([c64])), - np.dtype(np.complex64)) - assert_equal(promote_func(c64, np.array([f64])), - np.dtype(np.complex128)) - assert_equal(promote_func(np.complex64(3j), np.array([f64])), - np.dtype(np.complex128)) - - # coercion between scalars and 1-D arrays, where - # the scalar has greater kind than the array - assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) - assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) - assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) - assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) - assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) - - # uint and int are treated as the same "kind" for - # the purposes of array-scalar promotion. - assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16)) - - # float and complex are treated as the same "kind" for - # the purposes of array-scalar promotion, so that you can do - # (0j + float32array) to get a complex64 array instead of - # a complex128 array. - assert_equal(promote_func(np.array([f32]), c128), - np.dtype(np.complex64)) - - def test_coercion(self): - def res_type(a, b): - return np.add(a, b).dtype - - self.check_promotion_cases(res_type) - - # Use-case: float/complex scalar * bool/int8 array - # shouldn't narrow the float/complex type - for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: - b = 1.234 * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.longdouble(1.234) * a - assert_equal(b.dtype, np.dtype(np.longdouble), - "array type %s" % a.dtype) - b = np.float64(1.234) * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.float32(1.234) * a - assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) - b = np.float16(1.234) * a - assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) - - b = 1.234j * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.clongdouble(1.234j) * a - assert_equal(b.dtype, np.dtype(np.clongdouble), - "array type %s" % a.dtype) - b = np.complex128(1.234j) * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.complex64(1.234j) * a - assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) - - # The following use-case is problematic, and to resolve its - # tricky side-effects requires more changes. - # - # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is - # a float32, shouldn't promote to float64 - # - # a = np.array([1.0, 1.5], dtype=np.float32) - # t = np.array([True, False]) - # b = t*a - # assert_equal(b, [1.0, 0.0]) - # assert_equal(b.dtype, np.dtype('f4')) - # b = (1-t)*a - # assert_equal(b, [0.0, 1.5]) - # assert_equal(b.dtype, np.dtype('f4')) - # - # Probably ~t (bitwise negation) is more proper to use here, - # but this is arguably less intuitive to understand at a glance, and - # would fail if 't' is actually an integer array instead of boolean: - # - # b = (~t)*a - # assert_equal(b, [0.0, 1.5]) - # assert_equal(b.dtype, np.dtype('f4')) - - def test_result_type(self): - self.check_promotion_cases(np.result_type) - assert_(np.result_type(None) == np.dtype(None)) - - def test_promote_types_endian(self): - # promote_types should always return native-endian types - assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) - - assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) - assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) - assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) - assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) - assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) - - assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) - assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) - - def test_promote_types_strings(self): - assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) - assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) - assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) - assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) - assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) - assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) - assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) - assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) - assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) - assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) - assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) - assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) - assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) - assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) - assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) - assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) - assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30')) - - def test_can_cast(self): - assert_(np.can_cast(np.int32, np.int64)) - assert_(np.can_cast(np.float64, complex)) - assert_(not np.can_cast(complex, float)) - - assert_(np.can_cast('i8', 'f8')) - assert_(not np.can_cast('i8', 'f4')) - assert_(np.can_cast('i4', 'S11')) - - assert_(np.can_cast('i8', 'i8', 'no')) - assert_(not np.can_cast('i8', 'no')) - - assert_(np.can_cast('i8', 'equiv')) - assert_(not np.can_cast('i8', 'equiv')) - - assert_(np.can_cast('i8', 'safe')) - assert_(not np.can_cast('i4', 'safe')) - - assert_(np.can_cast('i4', 'same_kind')) - assert_(not np.can_cast('u4', 'same_kind')) - - assert_(np.can_cast('u4', 'unsafe')) - - assert_(np.can_cast('bool', 'S5')) - assert_(not np.can_cast('bool', 'S4')) - - assert_(np.can_cast('b', 'S4')) - assert_(not np.can_cast('b', 'S3')) - - assert_(np.can_cast('u1', 'S3')) - assert_(not np.can_cast('u1', 'S2')) - assert_(np.can_cast('u2', 'S5')) - assert_(not np.can_cast('u2', 'S4')) - assert_(np.can_cast('u4', 'S10')) - assert_(not np.can_cast('u4', 'S9')) - assert_(np.can_cast('u8', 'S20')) - assert_(not np.can_cast('u8', 'S19')) - - assert_(np.can_cast('i1', 'S4')) - assert_(not np.can_cast('i1', 'S3')) - assert_(np.can_cast('i2', 'S6')) - assert_(not np.can_cast('i2', 'S5')) - assert_(np.can_cast('i4', 'S11')) - assert_(not np.can_cast('i4', 'S10')) - assert_(np.can_cast('i8', 'S21')) - assert_(not np.can_cast('i8', 'S20')) - - assert_(np.can_cast('bool', 'S5')) - assert_(not np.can_cast('bool', 'S4')) - - assert_(np.can_cast('b', 'U4')) - assert_(not np.can_cast('b', 'U3')) - - assert_(np.can_cast('u1', 'U3')) - assert_(not np.can_cast('u1', 'U2')) - assert_(np.can_cast('u2', 'U5')) - assert_(not np.can_cast('u2', 'U4')) - assert_(np.can_cast('u4', 'U10')) - assert_(not np.can_cast('u4', 'U9')) - assert_(np.can_cast('u8', 'U20')) - assert_(not np.can_cast('u8', 'U19')) - - assert_(np.can_cast('i1', 'U4')) - assert_(not np.can_cast('i1', 'U3')) - assert_(np.can_cast('i2', 'U6')) - assert_(not np.can_cast('i2', 'U5')) - assert_(np.can_cast('i4', 'U11')) - assert_(not np.can_cast('i4', 'U10')) - assert_(np.can_cast('i8', 'U21')) - assert_(not np.can_cast('i8', 'U20')) - - assert_raises(TypeError, np.can_cast, 'i4', None) - assert_raises(TypeError, np.can_cast, None, 'i4') - - # Also test keyword arguments - assert_(np.can_cast(from_=np.int32, to=np.int64)) - - def test_can_cast_simple_to_structured(self): - # Non-structured can only be cast to structured in 'unsafe' mode. - assert_(not np.can_cast('i4', 'i4,i4')) - assert_(not np.can_cast('i4', 'i4,i2')) - assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) - assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) - # Even if there is just a single field which is OK. - assert_(not np.can_cast('i2', [('f1', 'i4')])) - assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) - assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) - # It should be the same for recursive structured or subarrays. - assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) - assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) - assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) - assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) - - def test_can_cast_structured_to_simple(self): - # Need unsafe casting for structured to simple. - assert_(not np.can_cast([('f1', 'i4')], 'i4')) - assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) - assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) - # Since it is unclear what is being cast, multiple fields to - # single should not work even for unsafe casting. - assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) - # But a single field inside a single field is OK. - assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) - assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) - # And a subarray is fine too - it will just take the first element - # (arguably not very consistently; might also take the first field). - assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) - assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) - # But a structured subarray with multiple fields should fail. - assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', - casting='unsafe')) - - def test_can_cast_values(self): - # gh-5917 - for dt in np.sctypes['int'] + np.sctypes['uint']: - ii = np.iinfo(dt) - assert_(np.can_cast(ii.min, dt)) - assert_(np.can_cast(ii.max, dt)) - assert_(not np.can_cast(ii.min - 1, dt)) - assert_(not np.can_cast(ii.max + 1, dt)) - - for dt in np.sctypes['float']: - fi = np.finfo(dt) - assert_(np.can_cast(fi.min, dt)) - assert_(np.can_cast(fi.max, dt)) - - -# Custom exception class to test exception propagation in fromiter -class NIterError(Exception): - pass - - -class TestFromiter(object): - def makegen(self): - for x in range(24): - yield x**2 - - def test_types(self): - ai32 = np.fromiter(self.makegen(), np.int32) - ai64 = np.fromiter(self.makegen(), np.int64) - af = np.fromiter(self.makegen(), float) - assert_(ai32.dtype == np.dtype(np.int32)) - assert_(ai64.dtype == np.dtype(np.int64)) - assert_(af.dtype == np.dtype(float)) - - def test_lengths(self): - expected = np.array(list(self.makegen())) - a = np.fromiter(self.makegen(), int) - a20 = np.fromiter(self.makegen(), int, 20) - assert_(len(a) == len(expected)) - assert_(len(a20) == 20) - assert_raises(ValueError, np.fromiter, - self.makegen(), int, len(expected) + 10) - - def test_values(self): - expected = np.array(list(self.makegen())) - a = np.fromiter(self.makegen(), int) - a20 = np.fromiter(self.makegen(), int, 20) - assert_(np.alltrue(a == expected, axis=0)) - assert_(np.alltrue(a20 == expected[:20], axis=0)) - - def load_data(self, n, eindex): - # Utility method for the issue 2592 tests. - # Raise an exception at the desired index in the iterator. - for e in range(n): - if e == eindex: - raise NIterError('error at index %s' % eindex) - yield e - - def test_2592(self): - # Test iteration exceptions are correctly raised. - count, eindex = 10, 5 - assert_raises(NIterError, np.fromiter, - self.load_data(count, eindex), dtype=int, count=count) - - def test_2592_edge(self): - # Test iter. exceptions, edge case (exception at end of iterator). - count = 10 - eindex = count-1 - assert_raises(NIterError, np.fromiter, - self.load_data(count, eindex), dtype=int, count=count) - - -class TestNonzero(object): - def test_nonzero_trivial(self): - assert_equal(np.count_nonzero(np.array([])), 0) - assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) - assert_equal(np.nonzero(np.array([])), ([],)) - - assert_equal(np.count_nonzero(np.array([0])), 0) - assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0) - assert_equal(np.nonzero(np.array([0])), ([],)) - - assert_equal(np.count_nonzero(np.array([1])), 1) - assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) - assert_equal(np.nonzero(np.array([1])), ([0],)) - - def test_nonzero_zerod(self): - assert_equal(np.count_nonzero(np.array(0)), 0) - assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(0)), ([],)) - - assert_equal(np.count_nonzero(np.array(1)), 1) - assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(1)), ([0],)) - - def test_nonzero_onedim(self): - x = np.array([1, 0, 2, -1, 0, 0, 8]) - assert_equal(np.count_nonzero(x), 4) - assert_equal(np.count_nonzero(x), 4) - assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) - - x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], - dtype=[('a', 'i4'), ('b', 'i2')]) - assert_equal(np.count_nonzero(x['a']), 3) - assert_equal(np.count_nonzero(x['b']), 4) - assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) - assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) - - def test_nonzero_twodim(self): - x = np.array([[0, 1, 0], [2, 0, 3]]) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) - - x = np.eye(3) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) - - x = np.array([[(0, 1), (0, 0), (1, 11)], - [(1, 1), (1, 0), (0, 0)], - [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) - assert_equal(np.count_nonzero(x['a']), 4) - assert_equal(np.count_nonzero(x['b']), 5) - assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) - assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) - - assert_(not x['a'].T.flags.aligned) - assert_equal(np.count_nonzero(x['a'].T), 4) - assert_equal(np.count_nonzero(x['b'].T), 5) - assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) - assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) - - def test_sparse(self): - # test special sparse condition boolean code path - for i in range(20): - c = np.zeros(200, dtype=bool) - c[i::20] = True - assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) - - c = np.zeros(400, dtype=bool) - c[10 + i:20 + i] = True - c[20 + i*2] = True - assert_equal(np.nonzero(c)[0], - np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) - - def test_return_type(self): - class C(np.ndarray): - pass - - for view in (C, np.ndarray): - for nd in range(1, 4): - shape = tuple(range(2, 2+nd)) - x = np.arange(np.prod(shape)).reshape(shape).view(view) - for nzx in (np.nonzero(x), x.nonzero()): - for nzx_i in nzx: - assert_(type(nzx_i) is np.ndarray) - assert_(nzx_i.flags.writeable) - - def test_count_nonzero_axis(self): - # Basic check of functionality - m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) - - expected = np.array([1, 1, 1, 1, 1]) - assert_equal(np.count_nonzero(m, axis=0), expected) - - expected = np.array([2, 3]) - assert_equal(np.count_nonzero(m, axis=1), expected) - - assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) - assert_raises(TypeError, np.count_nonzero, m, axis='foo') - assert_raises(np.AxisError, np.count_nonzero, m, axis=3) - assert_raises(TypeError, np.count_nonzero, - m, axis=np.array([[1], [2]])) - - def test_count_nonzero_axis_all_dtypes(self): - # More thorough test that the axis argument is respected - # for all dtypes and responds correctly when presented with - # either integer or tuple arguments for axis - msg = "Mismatch for dtype: %s" - - def assert_equal_w_dt(a, b, err_msg): - assert_equal(a.dtype, b.dtype, err_msg=err_msg) - assert_equal(a, b, err_msg=err_msg) - - for dt in np.typecodes['All']: - err_msg = msg % (np.dtype(dt).name,) - - if dt != 'V': - if dt != 'M': - m = np.zeros((3, 3), dtype=dt) - n = np.ones(1, dtype=dt) - - m[0, 0] = n[0] - m[1, 0] = n[0] - - else: # np.zeros doesn't work for np.datetime64 - m = np.array(['1970-01-01'] * 9) - m = m.reshape((3, 3)) - - m[0, 0] = '1970-01-12' - m[1, 0] = '1970-01-12' - m = m.astype(dt) - - expected = np.array([2, 0, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=0), - expected, err_msg=err_msg) - - expected = np.array([1, 1, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=1), - expected, err_msg=err_msg) - - expected = np.array(2) - assert_equal(np.count_nonzero(m, axis=(0, 1)), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m, axis=None), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m), - expected, err_msg=err_msg) - - if dt == 'V': - # There are no 'nonzero' objects for np.void, so the testing - # setup is slightly different for this dtype - m = np.array([np.void(1)] * 6).reshape((2, 3)) - - expected = np.array([0, 0, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=0), - expected, err_msg=err_msg) - - expected = np.array([0, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=1), - expected, err_msg=err_msg) - - expected = np.array(0) - assert_equal(np.count_nonzero(m, axis=(0, 1)), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m, axis=None), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m), - expected, err_msg=err_msg) - - def test_count_nonzero_axis_consistent(self): - # Check that the axis behaviour for valid axes in - # non-special cases is consistent (and therefore - # correct) by checking it against an integer array - # that is then casted to the generic object dtype - from itertools import combinations, permutations - - axis = (0, 1, 2, 3) - size = (5, 5, 5, 5) - msg = "Mismatch for axis: %s" - - rng = np.random.RandomState(1234) - m = rng.randint(-100, 100, size=size) - n = m.astype(object) - - for length in range(len(axis)): - for combo in combinations(axis, length): - for perm in permutations(combo): - assert_equal( - np.count_nonzero(m, axis=perm), - np.count_nonzero(n, axis=perm), - err_msg=msg % (perm,)) - - def test_countnonzero_axis_empty(self): - a = np.array([[0, 0, 1], [1, 0, 1]]) - assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) - - def test_array_method(self): - # Tests that the array method - # call to nonzero works - m = np.array([[1, 0, 0], [4, 0, 6]]) - tgt = [[0, 1, 1], [0, 0, 2]] - - assert_equal(m.nonzero(), tgt) - - def test_nonzero_invalid_object(self): - # gh-9295 - a = np.array([np.array([1, 2]), 3]) - assert_raises(ValueError, np.nonzero, a) - - class BoolErrors: - def __bool__(self): - raise ValueError("Not allowed") - def __nonzero__(self): - raise ValueError("Not allowed") - - assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) - - def test_nonzero_sideeffect_safety(self): - # gh-13631 - class FalseThenTrue: - _val = False - def __bool__(self): - try: - return self._val - finally: - self._val = True - - class TrueThenFalse: - _val = True - def __bool__(self): - try: - return self._val - finally: - self._val = False - - # result grows on the second pass - a = np.array([True, FalseThenTrue()]) - assert_raises(RuntimeError, np.nonzero, a) - - a = np.array([[True], [FalseThenTrue()]]) - assert_raises(RuntimeError, np.nonzero, a) - - # result shrinks on the second pass - a = np.array([False, TrueThenFalse()]) - assert_raises(RuntimeError, np.nonzero, a) - - a = np.array([[False], [TrueThenFalse()]]) - assert_raises(RuntimeError, np.nonzero, a) - - def test_nonzero_exception_safe(self): - # gh-13930 - - class ThrowsAfter: - def __init__(self, iters): - self.iters_left = iters - - def __bool__(self): - if self.iters_left == 0: - raise ValueError("called `iters` times") - - self.iters_left -= 1 - return True - - """ - Test that a ValueError is raised instead of a SystemError - - If the __bool__ function is called after the error state is set, - Python (cpython) will raise a SystemError. - """ - - # assert that an exception in first pass is handled correctly - a = np.array([ThrowsAfter(5)]*10) - assert_raises(ValueError, np.nonzero, a) - - # raise exception in second pass for 1-dimensional loop - a = np.array([ThrowsAfter(15)]*10) - assert_raises(ValueError, np.nonzero, a) - - # raise exception in second pass for n-dimensional loop - a = np.array([[ThrowsAfter(15)]]*10) - assert_raises(ValueError, np.nonzero, a) - - -class TestIndex(object): - def test_boolean(self): - a = rand(3, 5, 8) - V = rand(5, 8) - g1 = randint(0, 5, size=15) - g2 = randint(0, 8, size=15) - V[g1, g2] = -V[g1, g2] - assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) - - def test_boolean_edgecase(self): - a = np.array([], dtype='int32') - b = np.array([], dtype='bool') - c = a[b] - assert_equal(c, []) - assert_equal(c.dtype, np.dtype('int32')) - - -class TestBinaryRepr(object): - def test_zero(self): - assert_equal(np.binary_repr(0), '0') - - def test_positive(self): - assert_equal(np.binary_repr(10), '1010') - assert_equal(np.binary_repr(12522), - '11000011101010') - assert_equal(np.binary_repr(10736848), - '101000111101010011010000') - - def test_negative(self): - assert_equal(np.binary_repr(-1), '-1') - assert_equal(np.binary_repr(-10), '-1010') - assert_equal(np.binary_repr(-12522), - '-11000011101010') - assert_equal(np.binary_repr(-10736848), - '-101000111101010011010000') - - def test_sufficient_width(self): - assert_equal(np.binary_repr(0, width=5), '00000') - assert_equal(np.binary_repr(10, width=7), '0001010') - assert_equal(np.binary_repr(-5, width=7), '1111011') - - def test_neg_width_boundaries(self): - # see gh-8670 - - # Ensure that the example in the issue does not - # break before proceeding to a more thorough test. - assert_equal(np.binary_repr(-128, width=8), '10000000') - - for width in range(1, 11): - num = -2**(width - 1) - exp = '1' + (width - 1) * '0' - assert_equal(np.binary_repr(num, width=width), exp) - - def test_large_neg_int64(self): - # See gh-14289. - assert_equal(np.binary_repr(np.int64(-2**62), width=64), - '11' + '0'*62) - - -class TestBaseRepr(object): - def test_base3(self): - assert_equal(np.base_repr(3**5, 3), '100000') - - def test_positive(self): - assert_equal(np.base_repr(12, 10), '12') - assert_equal(np.base_repr(12, 10, 4), '000012') - assert_equal(np.base_repr(12, 4), '30') - assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') - - def test_negative(self): - assert_equal(np.base_repr(-12, 10), '-12') - assert_equal(np.base_repr(-12, 10, 4), '-000012') - assert_equal(np.base_repr(-12, 4), '-30') - - def test_base_range(self): - with assert_raises(ValueError): - np.base_repr(1, 1) - with assert_raises(ValueError): - np.base_repr(1, 37) - - -class TestArrayComparisons(object): - def test_array_equal(self): - res = np.array_equal(np.array([1, 2]), np.array([1, 2])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equal(np.array([1, 2]), np.array([3, 4])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equal(np.array([1, 2]), np.array([1, 3])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1')) - assert_(res) - assert_(type(res) is bool) - res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'), - np.array([('a', 1)], dtype='S1,u4')) - assert_(res) - assert_(type(res) is bool) - - def test_none_compares_elementwise(self): - a = np.array([None, 1, None], dtype=object) - assert_equal(a == None, [True, False, True]) - assert_equal(a != None, [False, True, False]) - - a = np.ones(3) - assert_equal(a == None, [False, False, False]) - assert_equal(a != None, [True, True, True]) - - def test_array_equiv(self): - res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) - assert_(not res) - assert_(type(res) is bool) - - res = np.array_equiv(np.array([1, 1]), np.array([1])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([2])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) - assert_(not res) - assert_(type(res) is bool) - - -def assert_array_strict_equal(x, y): - assert_array_equal(x, y) - # Check flags, 32 bit arches typically don't provide 16 byte alignment - if ((x.dtype.alignment <= 8 or - np.intp().dtype.itemsize != 4) and - sys.platform != 'win32'): - assert_(x.flags == y.flags) - else: - assert_(x.flags.owndata == y.flags.owndata) - assert_(x.flags.writeable == y.flags.writeable) - assert_(x.flags.c_contiguous == y.flags.c_contiguous) - assert_(x.flags.f_contiguous == y.flags.f_contiguous) - assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) - # check endianness - assert_(x.dtype.isnative == y.dtype.isnative) - - -class TestClip(object): - def setup(self): - self.nr = 5 - self.nc = 3 - - def fastclip(self, a, m, M, out=None, casting=None): - if out is None: - if casting is None: - return a.clip(m, M) - else: - return a.clip(m, M, casting=casting) - else: - if casting is None: - return a.clip(m, M, out) - else: - return a.clip(m, M, out, casting=casting) - - def clip(self, a, m, M, out=None): - # use slow-clip - selector = np.less(a, m) + 2*np.greater(a, M) - return selector.choose((a, m, M), out=out) - - # Handy functions - def _generate_data(self, n, m): - return randn(n, m) - - def _generate_data_complex(self, n, m): - return randn(n, m) + 1.j * rand(n, m) - - def _generate_flt_data(self, n, m): - return (randn(n, m)).astype(np.float32) - - def _neg_byteorder(self, a): - a = np.asarray(a) - if sys.byteorder == 'little': - a = a.astype(a.dtype.newbyteorder('>')) - else: - a = a.astype(a.dtype.newbyteorder('<')) - return a - - def _generate_non_native_data(self, n, m): - data = randn(n, m) - data = self._neg_byteorder(data) - assert_(not data.dtype.isnative) - return data - - def _generate_int_data(self, n, m): - return (10 * rand(n, m)).astype(np.int64) - - def _generate_int32_data(self, n, m): - return (10 * rand(n, m)).astype(np.int32) - - # Now the real test cases - - @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO') - def test_ones_pathological(self, dtype): - # for preservation of behavior described in - # gh-12519; amin > amax behavior may still change - # in the future - arr = np.ones(10, dtype=dtype) - expected = np.zeros(10, dtype=dtype) - actual = np.clip(arr, 1, 0) - if dtype == 'O': - assert actual.tolist() == expected.tolist() - else: - assert_equal(actual, expected) - - def test_simple_double(self): - # Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = 0.1 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_int(self): - # Test native int input with scalar min/max. - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_array_double(self): - # Test native double input with array min/max. - a = self._generate_data(self.nr, self.nc) - m = np.zeros(a.shape) - M = m + 0.5 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_nonnative(self): - # Test non native double input with scalar min/max. - # Test native double input with non native double scalar min/max. - a = self._generate_non_native_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - # Test native double input with non native double scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = self._neg_byteorder(0.6) - assert_(not M.dtype.isnative) - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - def test_simple_complex(self): - # Test native complex input with native double scalar min/max. - # Test native input with complex double scalar min/max. - a = 3 * self._generate_data_complex(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - # Test native input with complex double scalar min/max. - a = 3 * self._generate_data(self.nr, self.nc) - m = -0.5 + 1.j - M = 1. + 2.j - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_clip_complex(self): - # Address Issue gh-5354 for clipping complex arrays - # Test native complex input without explicit min/max - # ie, either min=None or max=None - a = np.ones(10, dtype=complex) - m = a.min() - M = a.max() - am = self.fastclip(a, m, None) - aM = self.fastclip(a, None, M) - assert_array_strict_equal(am, a) - assert_array_strict_equal(aM, a) - - def test_clip_non_contig(self): - # Test clip for non contiguous native input and native scalar min/max. - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = self.fastclip(a, -1.6, 1.7) - act = self.clip(a, -1.6, 1.7) - assert_array_strict_equal(ac, act) - - def test_simple_out(self): - # Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = np.zeros(a.shape) - act = np.zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - @pytest.mark.parametrize("casting", [None, "unsafe"]) - def test_simple_int32_inout(self, casting): - # Test native int32 input with double min/max and int32 out. - a = self._generate_int32_data(self.nr, self.nc) - m = np.float64(0) - M = np.float64(2) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - if casting is None: - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac, casting=casting) - else: - # explicitly passing "unsafe" will silence warning - self.fastclip(a, m, M, ac, casting=casting) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_out(self): - # Test native int32 input with int32 scalar min/max and int64 out. - a = self._generate_int32_data(self.nr, self.nc) - m = np.int32(-1) - M = np.int32(1) - ac = np.zeros(a.shape, dtype=np.int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_inout(self): - # Test native int32 input with double array min/max and int32 out. - a = self._generate_int32_data(self.nr, self.nc) - m = np.zeros(a.shape, np.float64) - M = np.float64(1) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_out(self): - # Test native double input with scalar min/max and int out. - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_inplace_01(self): - # Test native double input with array min/max in-place. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = np.zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_simple_inplace_02(self): - # Test native double input with scalar min/max in-place. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(ac, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_noncontig_inplace(self): - # Test non contiguous double input with double scalar min/max in-place. - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(ac, m, M, ac) - assert_array_equal(a, ac) - - def test_type_cast_01(self): - # Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_02(self): - # Test native int32 input with int32 scalar min/max. - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(np.int32) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_03(self): - # Test native int32 input with float64 scalar min/max. - a = self._generate_int32_data(self.nr, self.nc) - m = -2 - M = 4 - ac = self.fastclip(a, np.float64(m), np.float64(M)) - act = self.clip(a, np.float64(m), np.float64(M)) - assert_array_strict_equal(ac, act) - - def test_type_cast_04(self): - # Test native int32 input with float32 scalar min/max. - a = self._generate_int32_data(self.nr, self.nc) - m = np.float32(-2) - M = np.float32(4) - act = self.fastclip(a, m, M) - ac = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_05(self): - # Test native int32 with double arrays min/max. - a = self._generate_int_data(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m * np.zeros(a.shape), M) - act = self.clip(a, m * np.zeros(a.shape), M) - assert_array_strict_equal(ac, act) - - def test_type_cast_06(self): - # Test native with NON native scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = 0.5 - m_s = self._neg_byteorder(m) - M = 1. - act = self.clip(a, m_s, M) - ac = self.fastclip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_07(self): - # Test NON native with native array min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 * np.ones(a.shape) - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - act = a_s.clip(m, M) - ac = self.fastclip(a_s, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_08(self): - # Test NON native with native scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - ac = self.fastclip(a_s, m, M) - act = a_s.clip(m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_09(self): - # Test native with NON native array min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 * np.ones(a.shape) - M = 1. - m_s = self._neg_byteorder(m) - assert_(not m_s.dtype.isnative) - ac = self.fastclip(a, m_s, M) - act = self.clip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_10(self): - # Test native int32 with float min/max and float out for output argument. - a = self._generate_int_data(self.nr, self.nc) - b = np.zeros(a.shape, dtype=np.float32) - m = np.float32(-0.5) - M = np.float32(1) - act = self.clip(a, m, M, out=b) - ac = self.fastclip(a, m, M, out=b) - assert_array_strict_equal(ac, act) - - def test_type_cast_11(self): - # Test non native with native scalar, min/max, out non native - a = self._generate_non_native_data(self.nr, self.nc) - b = a.copy() - b = b.astype(b.dtype.newbyteorder('>')) - bt = b.copy() - m = -0.5 - M = 1. - self.fastclip(a, m, M, out=b) - self.clip(a, m, M, out=bt) - assert_array_strict_equal(b, bt) - - def test_type_cast_12(self): - # Test native int32 input and min/max and float out - a = self._generate_int_data(self.nr, self.nc) - b = np.zeros(a.shape, dtype=np.float32) - m = np.int32(0) - M = np.int32(1) - act = self.clip(a, m, M, out=b) - ac = self.fastclip(a, m, M, out=b) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple(self): - # Test native double input with scalar min/max - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = np.zeros(a.shape) - act = np.zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple2(self): - # Test native int32 input with double min/max and int32 out - a = self._generate_int32_data(self.nr, self.nc) - m = np.float64(0) - M = np.float64(2) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple_int32(self): - # Test native int32 input with int32 scalar min/max and int64 out - a = self._generate_int32_data(self.nr, self.nc) - m = np.int32(-1) - M = np.int32(1) - ac = np.zeros(a.shape, dtype=np.int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_int32(self): - # Test native int32 input with double array min/max and int32 out - a = self._generate_int32_data(self.nr, self.nc) - m = np.zeros(a.shape, np.float64) - M = np.float64(1) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_outint32(self): - # Test native double input with scalar min/max and int out - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_transposed(self): - # Test that the out argument works when tranposed - a = np.arange(16).reshape(4, 4) - out = np.empty_like(a).T - a.clip(4, 10, out=out) - expected = self.clip(a, 4, 10) - assert_array_equal(out, expected) - - def test_clip_with_out_memory_overlap(self): - # Test that the out argument works when it has memory overlap - a = np.arange(16).reshape(4, 4) - ac = a.copy() - a[:-1].clip(4, 10, out=a[1:]) - expected = self.clip(ac[:-1], 4, 10) - assert_array_equal(a[1:], expected) - - def test_clip_inplace_array(self): - # Test native double input with array min/max - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = np.zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_inplace_simple(self): - # Test native double input with scalar min/max - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_func_takes_out(self): - # Ensure that the clip() function takes an out=argument. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - a2 = np.clip(a, m, M, out=a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a2, ac) - assert_(a2 is a) - - def test_clip_nan(self): - d = np.arange(7.) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(max=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=np.nan, max=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=-2, max=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=np.nan, max=10), d) - - def test_object_clip(self): - a = np.arange(10, dtype=object) - actual = np.clip(a, 1, 5) - expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5]) - assert actual.tolist() == expected.tolist() - - def test_clip_all_none(self): - a = np.arange(10, dtype=object) - with assert_raises_regex(ValueError, 'max or min'): - np.clip(a, None, None) - - def test_clip_invalid_casting(self): - a = np.arange(10, dtype=object) - with assert_raises_regex(ValueError, - 'casting must be one of'): - self.fastclip(a, 1, 8, casting="garbage") - - @pytest.mark.parametrize("amin, amax", [ - # two scalars - (1, 0), - # mix scalar and array - (1, np.zeros(10)), - # two arrays - (np.ones(10), np.zeros(10)), - ]) - def test_clip_value_min_max_flip(self, amin, amax): - a = np.arange(10, dtype=np.int64) - # requirement from ufunc_docstrings.py - expected = np.minimum(np.maximum(a, amin), amax) - actual = np.clip(a, amin, amax) - assert_equal(actual, expected) - - @pytest.mark.parametrize("arr, amin, amax, exp", [ - # for a bug in npy_ObjectClip, based on a - # case produced by hypothesis - (np.zeros(10, dtype=np.int64), - 0, - -2**64+1, - np.full(10, -2**64+1, dtype=object)), - # for bugs in NPY_TIMEDELTA_MAX, based on a case - # produced by hypothesis - (np.zeros(10, dtype='m8') - 1, - 0, - 0, - np.zeros(10, dtype='m8')), - ]) - def test_clip_problem_cases(self, arr, amin, amax, exp): - actual = np.clip(arr, amin, amax) - assert_equal(actual, exp) - - @pytest.mark.xfail(reason="no scalar nan propagation yet") - @pytest.mark.parametrize("arr, amin, amax", [ - # problematic scalar nan case from hypothesis - (np.zeros(10, dtype=np.int64), - np.array(np.nan), - np.zeros(10, dtype=np.int32)), - ]) - def test_clip_scalar_nan_propagation(self, arr, amin, amax): - # enforcement of scalar nan propagation for comparisons - # called through clip() - expected = np.minimum(np.maximum(a, amin), amax) - with assert_warns(DeprecationWarning): - actual = np.clip(arr, amin, amax) - assert_equal(actual, expected) - - @pytest.mark.xfail(reason="propagation doesn't match spec") - @pytest.mark.parametrize("arr, amin, amax", [ - (np.array([1] * 10, dtype='m8'), - np.timedelta64('NaT'), - np.zeros(10, dtype=np.int32)), - ]) - def test_NaT_propagation(self, arr, amin, amax): - # NOTE: the expected function spec doesn't - # propagate NaT, but clip() now does - expected = np.minimum(np.maximum(a, amin), amax) - actual = np.clip(arr, amin, amax) - assert_equal(actual, expected) - - -class TestAllclose(object): - rtol = 1e-5 - atol = 1e-8 - - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - def tst_allclose(self, x, y): - assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) - - def tst_not_allclose(self, x, y): - assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) - - def test_ip_allclose(self): - # Parametric test factory. - arr = np.array([100, 1000]) - aran = np.arange(125).reshape((5, 5, 5)) - - atol = self.atol - rtol = self.rtol - - data = [([1, 0], [1, 0]), - ([atol], [0]), - ([1], [1+rtol+atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol*2), - (aran, aran + aran*rtol), - (np.inf, np.inf), - (np.inf, [np.inf])] - - for (x, y) in data: - self.tst_allclose(x, y) - - def test_ip_not_allclose(self): - # Parametric test factory. - aran = np.arange(125).reshape((5, 5, 5)) - - atol = self.atol - rtol = self.rtol - - data = [([np.inf, 0], [1, np.inf]), - ([np.inf, 0], [1, 0]), - ([np.inf, np.inf], [1, np.inf]), - ([np.inf, np.inf], [1, 0]), - ([-np.inf, 0], [np.inf, 0]), - ([np.nan, 0], [np.nan, 0]), - ([atol*2], [0]), - ([1], [1+rtol+atol*2]), - (aran, aran + aran*atol + atol*2), - (np.array([np.inf, 1]), np.array([0, np.inf]))] - - for (x, y) in data: - self.tst_not_allclose(x, y) - - def test_no_parameter_modification(self): - x = np.array([np.inf, 1]) - y = np.array([0, np.inf]) - np.allclose(x, y) - assert_array_equal(x, np.array([np.inf, 1])) - assert_array_equal(y, np.array([0, np.inf])) - - def test_min_int(self): - # Could make problems because of abs(min_int) == min_int - min_int = np.iinfo(np.int_).min - a = np.array([min_int], dtype=np.int_) - assert_(np.allclose(a, a)) - - def test_equalnan(self): - x = np.array([1.0, np.nan]) - assert_(np.allclose(x, x, equal_nan=True)) - - def test_return_class_is_ndarray(self): - # Issue gh-6475 - # Check that allclose does not preserve subtypes - class Foo(np.ndarray): - def __new__(cls, *args, **kwargs): - return np.array(*args, **kwargs).view(cls) - - a = Foo([1]) - assert_(type(np.allclose(a, a)) is bool) - - -class TestIsclose(object): - rtol = 1e-5 - atol = 1e-8 - - def setup(self): - atol = self.atol - rtol = self.rtol - arr = np.array([100, 1000]) - aran = np.arange(125).reshape((5, 5, 5)) - - self.all_close_tests = [ - ([1, 0], [1, 0]), - ([atol], [0]), - ([1], [1 + rtol + atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol), - (aran, aran + aran*rtol), - (np.inf, np.inf), - (np.inf, [np.inf]), - ([np.inf, -np.inf], [np.inf, -np.inf]), - ] - self.none_close_tests = [ - ([np.inf, 0], [1, np.inf]), - ([np.inf, -np.inf], [1, 0]), - ([np.inf, np.inf], [1, -np.inf]), - ([np.inf, np.inf], [1, 0]), - ([np.nan, 0], [np.nan, -np.inf]), - ([atol*2], [0]), - ([1], [1 + rtol + atol*2]), - (aran, aran + rtol*1.1*aran + atol*1.1), - (np.array([np.inf, 1]), np.array([0, np.inf])), - ] - self.some_close_tests = [ - ([np.inf, 0], [np.inf, atol*2]), - ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), - (np.arange(3), [0, 1, 2.1]), - (np.nan, [np.nan, np.nan, np.nan]), - ([0], [atol, np.inf, -np.inf, np.nan]), - (0, [atol, np.inf, -np.inf, np.nan]), - ] - self.some_close_results = [ - [True, False], - [True, False, False], - [True, True, False], - [False, False, False], - [True, False, False, False], - [True, False, False, False], - ] - - def test_ip_isclose(self): - self.setup() - tests = self.some_close_tests - results = self.some_close_results - for (x, y), result in zip(tests, results): - assert_array_equal(np.isclose(x, y), result) - - def tst_all_isclose(self, x, y): - assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) - - def tst_none_isclose(self, x, y): - msg = "%s and %s shouldn't be close" - assert_(not np.any(np.isclose(x, y)), msg % (x, y)) - - def tst_isclose_allclose(self, x, y): - msg = "isclose.all() and allclose aren't same for %s and %s" - msg2 = "isclose and allclose aren't same for %s and %s" - if np.isscalar(x) and np.isscalar(y): - assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) - else: - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) - - def test_ip_all_isclose(self): - self.setup() - for (x, y) in self.all_close_tests: - self.tst_all_isclose(x, y) - - def test_ip_none_isclose(self): - self.setup() - for (x, y) in self.none_close_tests: - self.tst_none_isclose(x, y) - - def test_ip_isclose_allclose(self): - self.setup() - tests = (self.all_close_tests + self.none_close_tests + - self.some_close_tests) - for (x, y) in tests: - self.tst_isclose_allclose(x, y) - - def test_equal_nan(self): - assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) - arr = np.array([1.0, np.nan]) - assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) - - def test_masked_arrays(self): - # Make sure to test the output type when arguments are interchanged. - - x = np.ma.masked_where([True, True, False], np.arange(3)) - assert_(type(x) is type(np.isclose(2, x))) - assert_(type(x) is type(np.isclose(x, 2))) - - x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) - assert_(type(x) is type(np.isclose(np.inf, x))) - assert_(type(x) is type(np.isclose(x, np.inf))) - - x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) - y = np.isclose(np.nan, x, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - y = np.isclose(x, np.nan, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - - x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) - y = np.isclose(x, x, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - - def test_scalar_return(self): - assert_(np.isscalar(np.isclose(1, 1))) - - def test_no_parameter_modification(self): - x = np.array([np.inf, 1]) - y = np.array([0, np.inf]) - np.isclose(x, y) - assert_array_equal(x, np.array([np.inf, 1])) - assert_array_equal(y, np.array([0, np.inf])) - - def test_non_finite_scalar(self): - # GH7014, when two scalars are compared the output should also be a - # scalar - assert_(np.isclose(np.inf, -np.inf) is np.False_) - assert_(np.isclose(0, np.inf) is np.False_) - assert_(type(np.isclose(0, np.inf)) is np.bool_) - - -class TestStdVar(object): - def setup(self): - self.A = np.array([1, -1, 1, -1]) - self.real_var = 1 - - def test_basic(self): - assert_almost_equal(np.var(self.A), self.real_var) - assert_almost_equal(np.std(self.A)**2, self.real_var) - - def test_scalars(self): - assert_equal(np.var(1), 0) - assert_equal(np.std(1), 0) - - def test_ddof1(self): - assert_almost_equal(np.var(self.A, ddof=1), - self.real_var*len(self.A)/float(len(self.A)-1)) - assert_almost_equal(np.std(self.A, ddof=1)**2, - self.real_var*len(self.A)/float(len(self.A)-1)) - - def test_ddof2(self): - assert_almost_equal(np.var(self.A, ddof=2), - self.real_var*len(self.A)/float(len(self.A)-2)) - assert_almost_equal(np.std(self.A, ddof=2)**2, - self.real_var*len(self.A)/float(len(self.A)-2)) - - def test_out_scalar(self): - d = np.arange(10) - out = np.array(0.) - r = np.std(d, out=out) - assert_(r is out) - assert_array_equal(r, out) - r = np.var(d, out=out) - assert_(r is out) - assert_array_equal(r, out) - r = np.mean(d, out=out) - assert_(r is out) - assert_array_equal(r, out) - - -class TestStdVarComplex(object): - def test_basic(self): - A = np.array([1, 1.j, -1, -1.j]) - real_var = 1 - assert_almost_equal(np.var(A), real_var) - assert_almost_equal(np.std(A)**2, real_var) - - def test_scalars(self): - assert_equal(np.var(1j), 0) - assert_equal(np.std(1j), 0) - - -class TestCreationFuncs(object): - # Test ones, zeros, empty and full. - - def setup(self): - dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())} - # void, bytes, str - variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} - self.dtypes = sorted(dtypes - variable_sized | - {np.dtype(tp.str.replace("0", str(i))) - for tp in variable_sized for i in range(1, 10)}, - key=lambda dtype: dtype.str) - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 - - def check_function(self, func, fill_value=None): - par = ((0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes) - fill_kwarg = {} - if fill_value is not None: - fill_kwarg = {'fill_value': fill_value} - - for size, ndims, order, dtype in itertools.product(*par): - shape = ndims * [size] - - # do not fill void type - if fill_kwarg and dtype.str.startswith('|V'): - continue - - arr = func(shape, order=order, dtype=dtype, - **fill_kwarg) - - assert_equal(arr.dtype, dtype) - assert_(getattr(arr.flags, self.orders[order])) - - if fill_value is not None: - if dtype.str.startswith('|S'): - val = str(fill_value) - else: - val = fill_value - assert_equal(arr, dtype.type(val)) - - def test_zeros(self): - self.check_function(np.zeros) - - def test_ones(self): - self.check_function(np.zeros) - - def test_empty(self): - self.check_function(np.empty) - - def test_full(self): - self.check_function(np.full, 0) - self.check_function(np.full, 1) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_for_reference_leak(self): - # Make sure we have an object for reference - dim = 1 - beg = sys.getrefcount(dim) - np.zeros([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.ones([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.empty([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.full([dim]*10, 0) - assert_(sys.getrefcount(dim) == beg) - - -class TestLikeFuncs(object): - '''Test ones_like, zeros_like, empty_like and full_like''' - - def setup(self): - self.data = [ - # Array scalars - (np.array(3.), None), - (np.array(3), 'f8'), - # 1D arrays - (np.arange(6, dtype='f4'), None), - (np.arange(6), 'c16'), - # 2D C-layout arrays - (np.arange(6).reshape(2, 3), None), - (np.arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (np.arange(6).reshape((2, 3), order='F'), None), - (np.arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (np.arange(24).reshape(2, 3, 4), None), - (np.arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (np.arange(24).reshape((2, 3, 4), order='F'), None), - (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - self.shapes = [(5,), (5,6,), (5,6,7,)] - - def compare_array_value(self, dz, value, fill_value): - if value is not None: - if fill_value: - try: - z = dz.dtype.type(value) - except OverflowError: - pass - else: - assert_(np.all(dz == z)) - else: - assert_(np.all(dz == value)) - - def check_like_function(self, like_function, value, fill_value=False): - if fill_value: - fill_kwarg = {'fill_value': value} - else: - fill_kwarg = {} - for d, dtype in self.data: - # default (K) order, dtype - dz = like_function(d, dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_equal(np.array(dz.strides)*d.dtype.itemsize, - np.array(d.strides)*dz.dtype.itemsize) - assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) - assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # C order, default dtype - dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # F order, default dtype - dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # A order - dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - if d.flags.f_contiguous: - assert_(dz.flags.f_contiguous) - else: - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # Test the 'shape' parameter - for s in self.shapes: - for o in 'CFA': - sz = like_function(d, dtype=dtype, shape=s, order=o, - **fill_kwarg) - assert_equal(sz.shape, s) - if dtype is None: - assert_equal(sz.dtype, d.dtype) - else: - assert_equal(sz.dtype, np.dtype(dtype)) - if o == 'C' or (o == 'A' and d.flags.c_contiguous): - assert_(sz.flags.c_contiguous) - elif o == 'F' or (o == 'A' and d.flags.f_contiguous): - assert_(sz.flags.f_contiguous) - self.compare_array_value(sz, value, fill_value) - - if (d.ndim != len(s)): - assert_equal(np.argsort(like_function(d, dtype=dtype, - shape=s, order='K', - **fill_kwarg).strides), - np.argsort(np.empty(s, dtype=dtype, - order='C').strides)) - else: - assert_equal(np.argsort(like_function(d, dtype=dtype, - shape=s, order='K', - **fill_kwarg).strides), - np.argsort(d.strides)) - - # Test the 'subok' parameter - class MyNDArray(np.ndarray): - pass - - a = np.array([[1, 2], [3, 4]]).view(MyNDArray) - - b = like_function(a, **fill_kwarg) - assert_(type(b) is MyNDArray) - - b = like_function(a, subok=False, **fill_kwarg) - assert_(type(b) is not MyNDArray) - - def test_ones_like(self): - self.check_like_function(np.ones_like, 1) - - def test_zeros_like(self): - self.check_like_function(np.zeros_like, 0) - - def test_empty_like(self): - self.check_like_function(np.empty_like, None) - - def test_filled_like(self): - self.check_like_function(np.full_like, 0, True) - self.check_like_function(np.full_like, 1, True) - self.check_like_function(np.full_like, 1000, True) - self.check_like_function(np.full_like, 123.456, True) - self.check_like_function(np.full_like, np.inf, True) - - -class TestCorrelate(object): - def _setup(self, dt): - self.x = np.array([1, 2, 3, 4, 5], dtype=dt) - self.xs = np.arange(1, 20)[::3] - self.y = np.array([-1, -2, -3], dtype=dt) - self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) - self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) - self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) - self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) - self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) - self.zs = np.array([-3., -14., -30., -48., -66., -84., - -102., -54., -19.], dtype=dt) - - def test_float(self): - self._setup(float) - z = np.correlate(self.x, self.y, 'full') - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.x, self.y[:-1], 'full') - assert_array_almost_equal(z, self.z1_4) - z = np.correlate(self.y, self.x, 'full') - assert_array_almost_equal(z, self.z2) - z = np.correlate(self.x[::-1], self.y, 'full') - assert_array_almost_equal(z, self.z1r) - z = np.correlate(self.y, self.x[::-1], 'full') - assert_array_almost_equal(z, self.z2r) - z = np.correlate(self.xs, self.y, 'full') - assert_array_almost_equal(z, self.zs) - - def test_object(self): - self._setup(Decimal) - z = np.correlate(self.x, self.y, 'full') - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full') - assert_array_almost_equal(z, self.z2) - - def test_no_overwrite(self): - d = np.ones(100) - k = np.ones(3) - np.correlate(d, k) - assert_array_equal(d, np.ones(100)) - assert_array_equal(k, np.ones(3)) - - def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=complex) - y = np.array([-1, -2j, 3+1j], dtype=complex) - r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) - r_z = r_z[::-1].conjugate() - z = np.correlate(y, x, mode='full') - assert_array_almost_equal(z, r_z) - - def test_zero_size(self): - with pytest.raises(ValueError): - np.correlate(np.array([]), np.ones(1000), mode='full') - with pytest.raises(ValueError): - np.correlate(np.ones(1000), np.array([]), mode='full') - -class TestConvolve(object): - def test_object(self): - d = [1.] * 100 - k = [1.] * 3 - assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) - - def test_no_overwrite(self): - d = np.ones(100) - k = np.ones(3) - np.convolve(d, k) - assert_array_equal(d, np.ones(100)) - assert_array_equal(k, np.ones(3)) - - -class TestArgwhere(object): - - @pytest.mark.parametrize('nd', [0, 1, 2]) - def test_nd(self, nd): - # get an nd array with multiple elements in every dimension - x = np.empty((2,)*nd, bool) - - # none - x[...] = False - assert_equal(np.argwhere(x).shape, (0, nd)) - - # only one - x[...] = False - x.flat[0] = True - assert_equal(np.argwhere(x).shape, (1, nd)) - - # all but one - x[...] = True - x.flat[0] = False - assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) - - # all - x[...] = True - assert_equal(np.argwhere(x).shape, (x.size, nd)) - - def test_2D(self): - x = np.arange(6).reshape((2, 3)) - assert_array_equal(np.argwhere(x > 1), - [[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - def test_list(self): - assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) - - -class TestStringFunction(object): - - def test_set_string_function(self): - a = np.array([1]) - np.set_string_function(lambda x: "FOO", repr=True) - assert_equal(repr(a), "FOO") - np.set_string_function(None, repr=True) - assert_equal(repr(a), "array([1])") - - np.set_string_function(lambda x: "FOO", repr=False) - assert_equal(str(a), "FOO") - np.set_string_function(None, repr=False) - assert_equal(str(a), "[1]") - - -class TestRoll(object): - def test_roll1d(self): - x = np.arange(10) - xr = np.roll(x, 2) - assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) - - def test_roll2d(self): - x2 = np.reshape(np.arange(10), (2, 5)) - x2r = np.roll(x2, 1) - assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) - - x2r = np.roll(x2, 1, axis=0) - assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) - - x2r = np.roll(x2, 1, axis=1) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - # Roll multiple axes at once. - x2r = np.roll(x2, 1, axis=(0, 1)) - assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) - - x2r = np.roll(x2, (1, 0), axis=(0, 1)) - assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) - - x2r = np.roll(x2, (-1, 0), axis=(0, 1)) - assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) - - x2r = np.roll(x2, (0, 1), axis=(0, 1)) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - x2r = np.roll(x2, (0, -1), axis=(0, 1)) - assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) - - x2r = np.roll(x2, (1, 1), axis=(0, 1)) - assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) - - x2r = np.roll(x2, (-1, -1), axis=(0, 1)) - assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) - - # Roll the same axis multiple times. - x2r = np.roll(x2, 1, axis=(0, 0)) - assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) - - x2r = np.roll(x2, 1, axis=(1, 1)) - assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) - - # Roll more than one turn in either direction. - x2r = np.roll(x2, 6, axis=1) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - x2r = np.roll(x2, -4, axis=1) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - def test_roll_empty(self): - x = np.array([]) - assert_equal(np.roll(x, 1), np.array([])) - - -class TestRollaxis(object): - - # expected shape indexed by (axis, start) for array of - # shape (1, 2, 3, 4) - tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), - (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), - (0, 4): (2, 3, 4, 1), - (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), - (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), - (1, 4): (1, 3, 4, 2), - (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), - (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), - (2, 4): (1, 2, 4, 3), - (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), - (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), - (3, 4): (1, 2, 3, 4)} - - def test_exceptions(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) - assert_raises(np.AxisError, np.rollaxis, a, -5, 0) - assert_raises(np.AxisError, np.rollaxis, a, 0, -5) - assert_raises(np.AxisError, np.rollaxis, a, 4, 0) - assert_raises(np.AxisError, np.rollaxis, a, 0, 5) - - def test_results(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() - aind = np.indices(a.shape) - assert_(a.flags['OWNDATA']) - for (i, j) in self.tgtshape: - # positive axis, positive start - res = np.rollaxis(a, axis=i, start=j) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) - assert_(not res.flags['OWNDATA']) - - # negative axis, positive start - ip = i + 1 - res = np.rollaxis(a, axis=-ip, start=j) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(4 - ip, j)]) - assert_(not res.flags['OWNDATA']) - - # positive axis, negative start - jp = j + 1 if j < 4 else j - res = np.rollaxis(a, axis=i, start=-jp) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(i, 4 - jp)]) - assert_(not res.flags['OWNDATA']) - - # negative axis, negative start - ip = i + 1 - jp = j + 1 if j < 4 else j - res = np.rollaxis(a, axis=-ip, start=-jp) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) - assert_(not res.flags['OWNDATA']) - - -class TestMoveaxis(object): - def test_move_to_end(self): - x = np.random.randn(5, 6, 7) - for source, expected in [(0, (6, 7, 5)), - (1, (5, 7, 6)), - (2, (5, 6, 7)), - (-1, (5, 6, 7))]: - actual = np.moveaxis(x, source, -1).shape - assert_(actual, expected) - - def test_move_new_position(self): - x = np.random.randn(1, 2, 3, 4) - for source, destination, expected in [ - (0, 1, (2, 1, 3, 4)), - (1, 2, (1, 3, 2, 4)), - (1, -1, (1, 3, 4, 2)), - ]: - actual = np.moveaxis(x, source, destination).shape - assert_(actual, expected) - - def test_preserve_order(self): - x = np.zeros((1, 2, 3, 4)) - for source, destination in [ - (0, 0), - (3, -1), - (-1, 3), - ([0, -1], [0, -1]), - ([2, 0], [2, 0]), - (range(4), range(4)), - ]: - actual = np.moveaxis(x, source, destination).shape - assert_(actual, (1, 2, 3, 4)) - - def test_move_multiples(self): - x = np.zeros((0, 1, 2, 3)) - for source, destination, expected in [ - ([0, 1], [2, 3], (2, 3, 0, 1)), - ([2, 3], [0, 1], (2, 3, 0, 1)), - ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), - ([3, 0], [1, 0], (0, 3, 1, 2)), - ([0, 3], [0, 1], (0, 3, 1, 2)), - ]: - actual = np.moveaxis(x, source, destination).shape - assert_(actual, expected) - - def test_errors(self): - x = np.random.randn(1, 2, 3) - assert_raises_regex(np.AxisError, 'source.*out of bounds', - np.moveaxis, x, 3, 0) - assert_raises_regex(np.AxisError, 'source.*out of bounds', - np.moveaxis, x, -4, 0) - assert_raises_regex(np.AxisError, 'destination.*out of bounds', - np.moveaxis, x, 0, 5) - assert_raises_regex(ValueError, 'repeated axis in `source`', - np.moveaxis, x, [0, 0], [0, 1]) - assert_raises_regex(ValueError, 'repeated axis in `destination`', - np.moveaxis, x, [0, 1], [1, 1]) - assert_raises_regex(ValueError, 'must have the same number', - np.moveaxis, x, 0, [0, 1]) - assert_raises_regex(ValueError, 'must have the same number', - np.moveaxis, x, [0, 1], [0]) - - def test_array_likes(self): - x = np.ma.zeros((1, 2, 3)) - result = np.moveaxis(x, 0, 0) - assert_(x.shape, result.shape) - assert_(isinstance(result, np.ma.MaskedArray)) - - x = [1, 2, 3] - result = np.moveaxis(x, 0, 0) - assert_(x, list(result)) - assert_(isinstance(result, np.ndarray)) - - -class TestCross(object): - def test_2x2(self): - u = [1, 2] - v = [3, 4] - z = -2 - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_2x3(self): - u = [1, 2] - v = [3, 4, 5] - z = np.array([10, -5, -2]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_3x3(self): - u = [1, 2, 3] - v = [4, 5, 6] - z = np.array([-3, 6, -3]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_broadcasting(self): - # Ticket #2624 (Trac #2032) - u = np.tile([1, 2], (11, 1)) - v = np.tile([3, 4], (11, 1)) - z = -2 - assert_equal(np.cross(u, v), z) - assert_equal(np.cross(v, u), -z) - assert_equal(np.cross(u, u), 0) - - u = np.tile([1, 2], (11, 1)).T - v = np.tile([3, 4, 5], (11, 1)) - z = np.tile([10, -5, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0), z) - assert_equal(np.cross(v, u.T), -z) - assert_equal(np.cross(v, v), 0) - - u = np.tile([1, 2, 3], (11, 1)).T - v = np.tile([3, 4], (11, 1)).T - z = np.tile([-12, 9, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0, axisb=0), z) - assert_equal(np.cross(v.T, u.T), -z) - assert_equal(np.cross(u.T, u.T), 0) - - u = np.tile([1, 2, 3], (5, 1)) - v = np.tile([4, 5, 6], (5, 1)).T - z = np.tile([-3, 6, -3], (5, 1)) - assert_equal(np.cross(u, v, axisb=0), z) - assert_equal(np.cross(v.T, u), -z) - assert_equal(np.cross(u, u), 0) - - def test_broadcasting_shapes(self): - u = np.ones((2, 1, 3)) - v = np.ones((5, 3)) - assert_equal(np.cross(u, v).shape, (2, 5, 3)) - u = np.ones((10, 3, 5)) - v = np.ones((2, 5)) - assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) - assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2) - assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0) - u = np.ones((10, 3, 5, 7)) - v = np.ones((5, 7, 2)) - assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) - assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2) - assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4) - # gh-5885 - u = np.ones((3, 4, 2)) - for axisc in range(-2, 2): - assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) - - -def test_outer_out_param(): - arr1 = np.ones((5,)) - arr2 = np.ones((2,)) - arr3 = np.linspace(-2, 2, 5) - out1 = np.ndarray(shape=(5,5)) - out2 = np.ndarray(shape=(2, 5)) - res1 = np.outer(arr1, arr3, out1) - assert_equal(res1, out1) - assert_equal(np.outer(arr2, arr3, out2), out2) - - -class TestIndices(object): - - def test_simple(self): - [x, y] = np.indices((4, 3)) - assert_array_equal(x, np.array([[0, 0, 0], - [1, 1, 1], - [2, 2, 2], - [3, 3, 3]])) - assert_array_equal(y, np.array([[0, 1, 2], - [0, 1, 2], - [0, 1, 2], - [0, 1, 2]])) - - def test_single_input(self): - [x] = np.indices((4,)) - assert_array_equal(x, np.array([0, 1, 2, 3])) - - [x] = np.indices((4,), sparse=True) - assert_array_equal(x, np.array([0, 1, 2, 3])) - - def test_scalar_input(self): - assert_array_equal([], np.indices(())) - assert_array_equal([], np.indices((), sparse=True)) - assert_array_equal([[]], np.indices((0,))) - assert_array_equal([[]], np.indices((0,), sparse=True)) - - def test_sparse(self): - [x, y] = np.indices((4,3), sparse=True) - assert_array_equal(x, np.array([[0], [1], [2], [3]])) - assert_array_equal(y, np.array([[0, 1, 2]])) - - @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) - @pytest.mark.parametrize("dims", [(), (0,), (4, 3)]) - def test_return_type(self, dtype, dims): - inds = np.indices(dims, dtype=dtype) - assert_(inds.dtype == dtype) - - for arr in np.indices(dims, dtype=dtype, sparse=True): - assert_(arr.dtype == dtype) - - -class TestRequire(object): - flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', - 'F', 'F_CONTIGUOUS', 'FORTRAN', - 'A', 'ALIGNED', - 'W', 'WRITEABLE', - 'O', 'OWNDATA'] - - def generate_all_false(self, dtype): - arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) - arr.setflags(write=False) - a = arr['a'] - assert_(not a.flags['C']) - assert_(not a.flags['F']) - assert_(not a.flags['O']) - assert_(not a.flags['W']) - assert_(not a.flags['A']) - return a - - def set_and_check_flag(self, flag, dtype, arr): - if dtype is None: - dtype = arr.dtype - b = np.require(arr, dtype, [flag]) - assert_(b.flags[flag]) - assert_(b.dtype == dtype) - - # a further call to np.require ought to return the same array - # unless OWNDATA is specified. - c = np.require(b, None, [flag]) - if flag[0] != 'O': - assert_(c is b) - else: - assert_(c.flags[flag]) - - def test_require_each(self): - - id = ['f8', 'i4'] - fd = [None, 'f8', 'c16'] - for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): - a = self.generate_all_false(idtype) - self.set_and_check_flag(flag, fdtype, a) - - def test_unknown_requirement(self): - a = self.generate_all_false('f8') - assert_raises(KeyError, np.require, a, None, 'Q') - - def test_non_array_input(self): - a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) - assert_(a.flags['O']) - assert_(a.flags['C']) - assert_(a.flags['A']) - assert_(a.dtype == 'i4') - assert_equal(a, [1, 2, 3, 4]) - - def test_C_and_F_simul(self): - a = self.generate_all_false('f8') - assert_raises(ValueError, np.require, a, None, ['C', 'F']) - - def test_ensure_array(self): - class ArraySubclass(np.ndarray): - pass - - a = ArraySubclass((2, 2)) - b = np.require(a, None, ['E']) - assert_(type(b) is np.ndarray) - - def test_preserve_subtype(self): - class ArraySubclass(np.ndarray): - pass - - for flag in self.flag_names: - a = ArraySubclass((2, 2)) - self.set_and_check_flag(flag, None, a) - - -class TestBroadcast(object): - def test_broadcast_in_args(self): - # gh-5881 - arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), - np.empty((5, 1, 7))] - mits = [np.broadcast(*arrs), - np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])), - np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])), - np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), - np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] - for mit in mits: - assert_equal(mit.shape, (5, 6, 7)) - assert_equal(mit.ndim, 3) - assert_equal(mit.nd, 3) - assert_equal(mit.numiter, 4) - for a, ia in zip(arrs, mit.iters): - assert_(a is ia.base) - - def test_broadcast_single_arg(self): - # gh-6899 - arrs = [np.empty((5, 6, 7))] - mit = np.broadcast(*arrs) - assert_equal(mit.shape, (5, 6, 7)) - assert_equal(mit.ndim, 3) - assert_equal(mit.nd, 3) - assert_equal(mit.numiter, 1) - assert_(arrs[0] is mit.iters[0].base) - - def test_number_of_arguments(self): - arr = np.empty((5,)) - for j in range(35): - arrs = [arr] * j - if j > 32: - assert_raises(ValueError, np.broadcast, *arrs) - else: - mit = np.broadcast(*arrs) - assert_equal(mit.numiter, j) - - def test_broadcast_error_kwargs(self): - #gh-13455 - arrs = [np.empty((5, 6, 7))] - mit = np.broadcast(*arrs) - mit2 = np.broadcast(*arrs, **{}) - assert_equal(mit.shape, mit2.shape) - assert_equal(mit.ndim, mit2.ndim) - assert_equal(mit.nd, mit2.nd) - assert_equal(mit.numiter, mit2.numiter) - assert_(mit.iters[0].base is mit2.iters[0].base) - - assert_raises(ValueError, np.broadcast, 1, **{'x': 1}) - -class TestKeepdims(object): - - class sub_array(np.ndarray): - def sum(self, axis=None, dtype=None, out=None): - return np.ndarray.sum(self, axis, dtype, out, keepdims=True) - - def test_raise(self): - sub_class = self.sub_array - x = np.arange(30).view(sub_class) - assert_raises(TypeError, np.sum, x, keepdims=True) - - -class TestTensordot(object): - - def test_zero_dimension(self): - # Test resolution to issue #5663 - a = np.ndarray((3,0)) - b = np.ndarray((0,4)) - td = np.tensordot(a, b, (1, 0)) - assert_array_equal(td, np.dot(a, b)) - assert_array_equal(td, np.einsum('ij,jk', a, b)) - - def test_zero_dimensional(self): - # gh-12130 - arr_0d = np.array(1) - ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined - assert_array_equal(ret, arr_0d) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numerictypes.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_numerictypes.py deleted file mode 100644 index 387740e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numerictypes.py +++ /dev/null @@ -1,529 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import itertools - -import pytest -import numpy as np -from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY - -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3, 2], [[6., 4.], [6., 4.]], 8), - ([4, 3], [[7., 5.], [7., 5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8), - ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9), - ] - - -byteorder = {'little':'<', 'big':'>'}[sys.byteorder] - -def normalize_descr(descr): - "Normalize a description adding the platform byteorder." - - out = [] - for item in descr: - dtype = item[1] - if isinstance(dtype, str): - if dtype[0] not in ['|', '<', '>']: - onebyte = dtype[1:] == "1" - if onebyte or dtype[0] in ['S', 'V', 'b']: - dtype = "|" + dtype - else: - dtype = byteorder + dtype - if len(item) > 2 and np.prod(item[2]) > 1: - nitem = (item[0], dtype, item[2]) - else: - nitem = (item[0], dtype) - out.append(nitem) - elif isinstance(dtype, list): - l = normalize_descr(dtype) - out.append((item[0], l)) - else: - raise ValueError("Expected a str or list and got %s" % - (type(item))) - return out - - -############################################################ -# Creation tests -############################################################ - -class CreateZeros(object): - """Check the creation of heterogeneous arrays zero-valued""" - - def test_zeros0D(self): - """Check creation of 0-dimensional objects""" - h = np.zeros((), dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - assert_(h.dtype.fields['x'][0].name[:4] == 'void') - assert_(h.dtype.fields['x'][0].char == 'V') - assert_(h.dtype.fields['x'][0].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((), dtype='u1')) - - def test_zerosSD(self): - """Check creation of single-dimensional objects""" - h = np.zeros((2,), dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - assert_(h.dtype['y'].name[:4] == 'void') - assert_(h.dtype['y'].char == 'V') - assert_(h.dtype['y'].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2,), dtype='u1')) - - def test_zerosMD(self): - """Check creation of multi-dimensional objects""" - h = np.zeros((2, 3), dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - assert_(h.dtype['z'].name == 'uint8') - assert_(h.dtype['z'].char == 'B') - assert_(h.dtype['z'].type == np.uint8) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) - - -class TestCreateZerosPlain(CreateZeros): - """Check the creation of heterogeneous arrays zero-valued (plain)""" - _descr = Pdescr - -class TestCreateZerosNested(CreateZeros): - """Check the creation of heterogeneous arrays zero-valued (nested)""" - _descr = Ndescr - - -class CreateValues(object): - """Check the creation of heterogeneous arrays with values""" - - def test_tuple(self): - """Check creation from tuples""" - h = np.array(self._buffer, dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - assert_(h.shape == (2,)) - else: - assert_(h.shape == ()) - - def test_list_of_tuple(self): - """Check creation from list of tuples""" - h = np.array([self._buffer], dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - assert_(h.shape == (1, 2)) - else: - assert_(h.shape == (1,)) - - def test_list_of_list_of_tuple(self): - """Check creation from list of list of tuples""" - h = np.array([[self._buffer]], dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - assert_(h.shape == (1, 1, 2)) - else: - assert_(h.shape == (1, 1)) - - -class TestCreateValuesPlainSingle(CreateValues): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class TestCreateValuesPlainMultiple(CreateValues): - """Check the creation of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class TestCreateValuesNestedSingle(CreateValues): - """Check the creation of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = 0 - _buffer = NbufferT[0] - -class TestCreateValuesNestedMultiple(CreateValues): - """Check the creation of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = 1 - _buffer = NbufferT - - -############################################################ -# Reading tests -############################################################ - -class ReadValuesPlain(object): - """Check the reading of values in heterogeneous arrays (plain)""" - - def test_access_fields(self): - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) - else: - assert_(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][1], - self._buffer[1][1]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][2], - self._buffer[1][2]], dtype='u1')) - - -class TestReadValuesPlainSingle(ReadValuesPlain): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class TestReadValuesPlainMultiple(ReadValuesPlain): - """Check the values of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class ReadValuesNested(object): - """Check the reading of values in heterogeneous arrays (nested)""" - - def test_access_top_fields(self): - """Check reading the top fields of a nested array""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) - else: - assert_(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][4], - self._buffer[1][4]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][5], - self._buffer[1][5]], dtype='u1')) - - def test_nested1_acessors(self): - """Check reading the nested fields of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['value'], - np.array(self._buffer[1][0], dtype='c16')) - assert_equal(h['Info']['y2'], - np.array(self._buffer[1][1], dtype='f8')) - assert_equal(h['info']['Name'], - np.array(self._buffer[3][0], dtype='U2')) - assert_equal(h['info']['Value'], - np.array(self._buffer[3][1], dtype='c16')) - else: - assert_equal(h['Info']['value'], - np.array([self._buffer[0][1][0], - self._buffer[1][1][0]], - dtype='c16')) - assert_equal(h['Info']['y2'], - np.array([self._buffer[0][1][1], - self._buffer[1][1][1]], - dtype='f8')) - assert_equal(h['info']['Name'], - np.array([self._buffer[0][3][0], - self._buffer[1][3][0]], - dtype='U2')) - assert_equal(h['info']['Value'], - np.array([self._buffer[0][3][1], - self._buffer[1][3][1]], - dtype='c16')) - - def test_nested2_acessors(self): - """Check reading the nested fields of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['Info2']['value'], - np.array(self._buffer[1][2][1], dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array(self._buffer[1][2][3], dtype='u4')) - else: - assert_equal(h['Info']['Info2']['value'], - np.array([self._buffer[0][1][2][1], - self._buffer[1][1][2][1]], - dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array([self._buffer[0][1][2][3], - self._buffer[1][1][2][3]], - dtype='u4')) - - def test_nested1_descriptor(self): - """Check access nested descriptors of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - assert_(h.dtype['Info']['value'].name == 'complex128') - assert_(h.dtype['Info']['y2'].name == 'float64') - if sys.version_info[0] >= 3: - assert_(h.dtype['info']['Name'].name == 'str256') - else: - assert_(h.dtype['info']['Name'].name == 'unicode256') - assert_(h.dtype['info']['Value'].name == 'complex128') - - def test_nested2_descriptor(self): - """Check access nested descriptors of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - assert_(h.dtype['Info']['Info2']['value'].name == 'void256') - assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') - - -class TestReadValuesNestedSingle(ReadValuesNested): - """Check the values of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = False - _buffer = NbufferT[0] - -class TestReadValuesNestedMultiple(ReadValuesNested): - """Check the values of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = True - _buffer = NbufferT - -class TestEmptyField(object): - def test_assign(self): - a = np.arange(10, dtype=np.float32) - a.dtype = [("int", "<0i4"), ("float", "<2f4")] - assert_(a['int'].shape == (5, 0)) - assert_(a['float'].shape == (5, 2)) - -class TestCommonType(object): - def test_scalar_loses1(self): - res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) - assert_(res == 'f4') - - def test_scalar_loses2(self): - res = np.find_common_type(['f4', 'f4'], ['i8']) - assert_(res == 'f4') - - def test_scalar_wins(self): - res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) - assert_(res == 'c8') - - def test_scalar_wins2(self): - res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) - assert_(res == 'f8') - - def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose - res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) - assert_(res == 'f8') - -class TestMultipleFields(object): - def setup(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - - def _bad_call(self): - return self.ary['f0', 'f1'] - - def test_no_tuple(self): - assert_raises(IndexError, self._bad_call) - - def test_return(self): - res = self.ary[['f0', 'f2']].tolist() - assert_(res == [(1, 3), (5, 7)]) - - -class TestIsSubDType(object): - # scalar types can be promoted into dtypes - wrappers = [np.dtype, lambda x: x] - - def test_both_abstract(self): - assert_(np.issubdtype(np.floating, np.inexact)) - assert_(not np.issubdtype(np.inexact, np.floating)) - - def test_same(self): - for cls in (np.float32, np.int32): - for w1, w2 in itertools.product(self.wrappers, repeat=2): - assert_(np.issubdtype(w1(cls), w2(cls))) - - def test_subclass(self): - # note we cannot promote floating to a dtype, as it would turn into a - # concrete type - for w in self.wrappers: - assert_(np.issubdtype(w(np.float32), np.floating)) - assert_(np.issubdtype(w(np.float64), np.floating)) - - def test_subclass_backwards(self): - for w in self.wrappers: - assert_(not np.issubdtype(np.floating, w(np.float32))) - assert_(not np.issubdtype(np.floating, w(np.float64))) - - def test_sibling_class(self): - for w1, w2 in itertools.product(self.wrappers, repeat=2): - assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) - assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) - - -class TestSctypeDict(object): - def test_longdouble(self): - assert_(np.sctypeDict['f8'] is not np.longdouble) - assert_(np.sctypeDict['c16'] is not np.clongdouble) - - -class TestBitName(object): - def test_abstract(self): - assert_raises(ValueError, np.core.numerictypes.bitname, np.floating) - - -class TestMaximumSctype(object): - - # note that parametrizing with sctype['int'] and similar would skip types - # with the same size (gh-11923) - - @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong]) - def test_int(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1]) - - @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]) - def test_uint(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1]) - - @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) - def test_float(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1]) - - @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) - def test_complex(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1]) - - @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void]) - def test_other(self, t): - assert_equal(np.maximum_sctype(t), t) - - -class Test_sctype2char(object): - # This function is old enough that we're really just documenting the quirks - # at this point. - - def test_scalar_type(self): - assert_equal(np.sctype2char(np.double), 'd') - assert_equal(np.sctype2char(np.int_), 'l') - assert_equal(np.sctype2char(np.unicode_), 'U') - assert_equal(np.sctype2char(np.bytes_), 'S') - - def test_other_type(self): - assert_equal(np.sctype2char(float), 'd') - assert_equal(np.sctype2char(list), 'O') - assert_equal(np.sctype2char(np.ndarray), 'O') - - def test_third_party_scalar_type(self): - from numpy.core._rational_tests import rational - assert_raises(KeyError, np.sctype2char, rational) - assert_raises(KeyError, np.sctype2char, rational(1)) - - def test_array_instance(self): - assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd') - - def test_abstract_type(self): - assert_raises(KeyError, np.sctype2char, np.floating) - - def test_non_type(self): - assert_raises(ValueError, np.sctype2char, 1) - -@pytest.mark.parametrize("rep, expected", [ - (np.int32, True), - (list, False), - (1.1, False), - (str, True), - (np.dtype(np.float64), True), - (np.dtype((np.int16, (3, 4))), True), - (np.dtype([('a', np.int8)]), True), - ]) -def test_issctype(rep, expected): - # ensure proper identification of scalar - # data-types by issctype() - actual = np.issctype(rep) - assert_equal(actual, expected) - - -@pytest.mark.skipif(sys.flags.optimize > 1, - reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -class TestDocStrings(object): - def test_platform_dependent_aliases(self): - if np.int64 is np.int_: - assert_('int64' in np.int_.__doc__) - elif np.int64 is np.longlong: - assert_('int64' in np.longlong.__doc__) - - -class TestScalarTypeNames: - # gh-9799 - - numeric_types = [ - np.byte, np.short, np.intc, np.int_, np.longlong, - np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong, - np.half, np.single, np.double, np.longdouble, - np.csingle, np.cdouble, np.clongdouble, - ] - - def test_names_are_unique(self): - # none of the above may be aliases for each other - assert len(set(self.numeric_types)) == len(self.numeric_types) - - # names must be unique - names = [t.__name__ for t in self.numeric_types] - assert len(set(names)) == len(names) - - @pytest.mark.parametrize('t', numeric_types) - def test_names_reflect_attributes(self, t): - """ Test that names correspond to where the type is under ``np.`` """ - assert getattr(np, t.__name__) is t - - @pytest.mark.parametrize('t', numeric_types) - def test_names_are_undersood_by_dtype(self, t): - """ Test the dtype constructor maps names back to the type """ - assert np.dtype(t.__name__).type is t diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_overrides.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_overrides.py deleted file mode 100644 index 63b0e45..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_overrides.py +++ /dev/null @@ -1,429 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import inspect -import sys -from unittest import mock - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex) -from numpy.core.overrides import ( - _get_implementing_args, array_function_dispatch, - verify_matching_signatures, ARRAY_FUNCTION_ENABLED) -from numpy.compat import pickle -import pytest - - -requires_array_function = pytest.mark.skipif( - not ARRAY_FUNCTION_ENABLED, - reason="__array_function__ dispatch not enabled.") - - -def _return_not_implemented(self, *args, **kwargs): - return NotImplemented - - -# need to define this at the top level to test pickling -@array_function_dispatch(lambda array: (array,)) -def dispatched_one_arg(array): - """Docstring.""" - return 'original' - - -@array_function_dispatch(lambda array1, array2: (array1, array2)) -def dispatched_two_arg(array1, array2): - """Docstring.""" - return 'original' - - -class TestGetImplementingArgs(object): - - def test_ndarray(self): - array = np.array(1) - - args = _get_implementing_args([array]) - assert_equal(list(args), [array]) - - args = _get_implementing_args([array, array]) - assert_equal(list(args), [array]) - - args = _get_implementing_args([array, 1]) - assert_equal(list(args), [array]) - - args = _get_implementing_args([1, array]) - assert_equal(list(args), [array]) - - def test_ndarray_subclasses(self): - - class OverrideSub(np.ndarray): - __array_function__ = _return_not_implemented - - class NoOverrideSub(np.ndarray): - pass - - array = np.array(1).view(np.ndarray) - override_sub = np.array(1).view(OverrideSub) - no_override_sub = np.array(1).view(NoOverrideSub) - - args = _get_implementing_args([array, override_sub]) - assert_equal(list(args), [override_sub, array]) - - args = _get_implementing_args([array, no_override_sub]) - assert_equal(list(args), [no_override_sub, array]) - - args = _get_implementing_args( - [override_sub, no_override_sub]) - assert_equal(list(args), [override_sub, no_override_sub]) - - def test_ndarray_and_duck_array(self): - - class Other(object): - __array_function__ = _return_not_implemented - - array = np.array(1) - other = Other() - - args = _get_implementing_args([other, array]) - assert_equal(list(args), [other, array]) - - args = _get_implementing_args([array, other]) - assert_equal(list(args), [array, other]) - - def test_ndarray_subclass_and_duck_array(self): - - class OverrideSub(np.ndarray): - __array_function__ = _return_not_implemented - - class Other(object): - __array_function__ = _return_not_implemented - - array = np.array(1) - subarray = np.array(1).view(OverrideSub) - other = Other() - - assert_equal(_get_implementing_args([array, subarray, other]), - [subarray, array, other]) - assert_equal(_get_implementing_args([array, other, subarray]), - [subarray, array, other]) - - def test_many_duck_arrays(self): - - class A(object): - __array_function__ = _return_not_implemented - - class B(A): - __array_function__ = _return_not_implemented - - class C(A): - __array_function__ = _return_not_implemented - - class D(object): - __array_function__ = _return_not_implemented - - a = A() - b = B() - c = C() - d = D() - - assert_equal(_get_implementing_args([1]), []) - assert_equal(_get_implementing_args([a]), [a]) - assert_equal(_get_implementing_args([a, 1]), [a]) - assert_equal(_get_implementing_args([a, a, a]), [a]) - assert_equal(_get_implementing_args([a, d, a]), [a, d]) - assert_equal(_get_implementing_args([a, b]), [b, a]) - assert_equal(_get_implementing_args([b, a]), [b, a]) - assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) - assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) - - def test_too_many_duck_arrays(self): - namespace = dict(__array_function__=_return_not_implemented) - types = [type('A' + str(i), (object,), namespace) for i in range(33)] - relevant_args = [t() for t in types] - - actual = _get_implementing_args(relevant_args[:32]) - assert_equal(actual, relevant_args[:32]) - - with assert_raises_regex(TypeError, 'distinct argument types'): - _get_implementing_args(relevant_args) - - -class TestNDArrayArrayFunction(object): - - @requires_array_function - def test_method(self): - - class Other(object): - __array_function__ = _return_not_implemented - - class NoOverrideSub(np.ndarray): - pass - - class OverrideSub(np.ndarray): - __array_function__ = _return_not_implemented - - array = np.array([1]) - other = Other() - no_override_sub = array.view(NoOverrideSub) - override_sub = array.view(OverrideSub) - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray,), - args=(array, 1.), kwargs={}) - assert_equal(result, 'original') - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray, Other), - args=(array, other), kwargs={}) - assert_(result is NotImplemented) - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray, NoOverrideSub), - args=(array, no_override_sub), - kwargs={}) - assert_equal(result, 'original') - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray, OverrideSub), - args=(array, override_sub), - kwargs={}) - assert_equal(result, 'original') - - with assert_raises_regex(TypeError, 'no implementation found'): - np.concatenate((array, other)) - - expected = np.concatenate((array, array)) - result = np.concatenate((array, no_override_sub)) - assert_equal(result, expected.view(NoOverrideSub)) - result = np.concatenate((array, override_sub)) - assert_equal(result, expected.view(OverrideSub)) - - def test_no_wrapper(self): - # This shouldn't happen unless a user intentionally calls - # __array_function__ with invalid arguments, but check that we raise - # an appropriate error all the same. - array = np.array(1) - func = lambda x: x - with assert_raises_regex(AttributeError, '_implementation'): - array.__array_function__(func=func, types=(np.ndarray,), - args=(array,), kwargs={}) - - -@requires_array_function -class TestArrayFunctionDispatch(object): - - def test_pickle(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - roundtripped = pickle.loads( - pickle.dumps(dispatched_one_arg, protocol=proto)) - assert_(roundtripped is dispatched_one_arg) - - def test_name_and_docstring(self): - assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') - if sys.flags.optimize < 2: - assert_equal(dispatched_one_arg.__doc__, 'Docstring.') - - def test_interface(self): - - class MyArray(object): - def __array_function__(self, func, types, args, kwargs): - return (self, func, types, args, kwargs) - - original = MyArray() - (obj, func, types, args, kwargs) = dispatched_one_arg(original) - assert_(obj is original) - assert_(func is dispatched_one_arg) - assert_equal(set(types), {MyArray}) - # assert_equal uses the overloaded np.iscomplexobj() internally - assert_(args == (original,)) - assert_equal(kwargs, {}) - - def test_not_implemented(self): - - class MyArray(object): - def __array_function__(self, func, types, args, kwargs): - return NotImplemented - - array = MyArray() - with assert_raises_regex(TypeError, 'no implementation found'): - dispatched_one_arg(array) - - -@requires_array_function -class TestVerifyMatchingSignatures(object): - - def test_verify_matching_signatures(self): - - verify_matching_signatures(lambda x: 0, lambda x: 0) - verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) - verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) - - with assert_raises(RuntimeError): - verify_matching_signatures(lambda a: 0, lambda b: 0) - with assert_raises(RuntimeError): - verify_matching_signatures(lambda x: 0, lambda x=None: 0) - with assert_raises(RuntimeError): - verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) - with assert_raises(RuntimeError): - verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) - - def test_array_function_dispatch(self): - - with assert_raises(RuntimeError): - @array_function_dispatch(lambda x: (x,)) - def f(y): - pass - - # should not raise - @array_function_dispatch(lambda x: (x,), verify=False) - def f(y): - pass - - -def _new_duck_type_and_implements(): - """Create a duck array type and implements functions.""" - HANDLED_FUNCTIONS = {} - - class MyArray(object): - def __array_function__(self, func, types, args, kwargs): - if func not in HANDLED_FUNCTIONS: - return NotImplemented - if not all(issubclass(t, MyArray) for t in types): - return NotImplemented - return HANDLED_FUNCTIONS[func](*args, **kwargs) - - def implements(numpy_function): - """Register an __array_function__ implementations.""" - def decorator(func): - HANDLED_FUNCTIONS[numpy_function] = func - return func - return decorator - - return (MyArray, implements) - - -@requires_array_function -class TestArrayFunctionImplementation(object): - - def test_one_arg(self): - MyArray, implements = _new_duck_type_and_implements() - - @implements(dispatched_one_arg) - def _(array): - return 'myarray' - - assert_equal(dispatched_one_arg(1), 'original') - assert_equal(dispatched_one_arg(MyArray()), 'myarray') - - def test_optional_args(self): - MyArray, implements = _new_duck_type_and_implements() - - @array_function_dispatch(lambda array, option=None: (array,)) - def func_with_option(array, option='default'): - return option - - @implements(func_with_option) - def my_array_func_with_option(array, new_option='myarray'): - return new_option - - # we don't need to implement every option on __array_function__ - # implementations - assert_equal(func_with_option(1), 'default') - assert_equal(func_with_option(1, option='extra'), 'extra') - assert_equal(func_with_option(MyArray()), 'myarray') - with assert_raises(TypeError): - func_with_option(MyArray(), option='extra') - - # but new options on implementations can't be used - result = my_array_func_with_option(MyArray(), new_option='yes') - assert_equal(result, 'yes') - with assert_raises(TypeError): - func_with_option(MyArray(), new_option='no') - - def test_not_implemented(self): - MyArray, implements = _new_duck_type_and_implements() - - @array_function_dispatch(lambda array: (array,), module='my') - def func(array): - return array - - array = np.array(1) - assert_(func(array) is array) - assert_equal(func.__module__, 'my') - - with assert_raises_regex( - TypeError, "no implementation found for 'my.func'"): - func(MyArray()) - - -class TestNDArrayMethods(object): - - def test_repr(self): - # gh-12162: should still be defined even if __array_function__ doesn't - # implement np.array_repr() - - class MyArray(np.ndarray): - def __array_function__(*args, **kwargs): - return NotImplemented - - array = np.array(1).view(MyArray) - assert_equal(repr(array), 'MyArray(1)') - assert_equal(str(array), '1') - - -class TestNumPyFunctions(object): - - def test_set_module(self): - assert_equal(np.sum.__module__, 'numpy') - assert_equal(np.char.equal.__module__, 'numpy.char') - assert_equal(np.fft.fft.__module__, 'numpy.fft') - assert_equal(np.linalg.solve.__module__, 'numpy.linalg') - - def test_inspect_sum(self): - signature = inspect.signature(np.sum) - assert_('axis' in signature.parameters) - - @requires_array_function - def test_override_sum(self): - MyArray, implements = _new_duck_type_and_implements() - - @implements(np.sum) - def _(array): - return 'yes' - - assert_equal(np.sum(MyArray()), 'yes') - - @requires_array_function - def test_sum_on_mock_array(self): - - # We need a proxy for mocks because __array_function__ is only looked - # up in the class dict - class ArrayProxy: - def __init__(self, value): - self.value = value - def __array_function__(self, *args, **kwargs): - return self.value.__array_function__(*args, **kwargs) - def __array__(self, *args, **kwargs): - return self.value.__array__(*args, **kwargs) - - proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) - proxy.value.__array_function__.return_value = 1 - result = np.sum(proxy) - assert_equal(result, 1) - proxy.value.__array_function__.assert_called_once_with( - np.sum, (ArrayProxy,), (proxy,), {}) - proxy.value.__array__.assert_not_called() - - @requires_array_function - def test_sum_forwarding_implementation(self): - - class MyArray(np.ndarray): - - def sum(self, axis, out): - return 'summed' - - def __array_function__(self, func, types, args, kwargs): - return super().__array_function__(func, types, args, kwargs) - - # note: the internal implementation of np.sum() calls the .sum() method - array = np.array(1).view(MyArray) - assert_equal(np.sum(array), 'summed') diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_print.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_print.py deleted file mode 100644 index c5c091e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_print.py +++ /dev/null @@ -1,205 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import pytest - -import numpy as np -from numpy.testing import assert_, assert_equal -from numpy.core.tests._locales import CommaDecimalPointLocale - - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} - - -@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) -def test_float_types(tp): - """ Check formatting. - - This is only for the str function, and only for simple types. - The precision of np.float32 and np.longdouble aren't the same as the - python float precision. - - """ - for x in [0, 1, -1, 1e20]: - assert_equal(str(tp(x)), str(float(x)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e16).itemsize > 4: - assert_equal(str(tp(1e16)), str(float('1e16')), - err_msg='Failed str formatting for type %s' % tp) - else: - ref = '1e+16' - assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) - - -@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) -def test_nan_inf_float(tp): - """ Check formatting of nan & inf. - - This is only for the str function, and only for simple types. - The precision of np.float32 and np.longdouble aren't the same as the - python float precision. - - """ - for x in [np.inf, -np.inf, np.nan]: - assert_equal(str(tp(x)), _REF[x], - err_msg='Failed str formatting for type %s' % tp) - - -@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) -def test_complex_types(tp): - """Check formatting of complex types. - - This is only for the str function, and only for simple types. - The precision of np.float32 and np.longdouble aren't the same as the - python float precision. - - """ - for x in [0, 1, -1, 1e20]: - assert_equal(str(tp(x)), str(complex(x)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x*1j)), str(complex(x*1j)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e16).itemsize > 8: - assert_equal(str(tp(1e16)), str(complex(1e16)), - err_msg='Failed str formatting for type %s' % tp) - else: - ref = '(1e+16+0j)' - assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) - - -@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) -def test_complex_inf_nan(dtype): - """Check inf/nan formatting of complex types.""" - TESTS = { - complex(np.inf, 0): "(inf+0j)", - complex(0, np.inf): "infj", - complex(-np.inf, 0): "(-inf+0j)", - complex(0, -np.inf): "-infj", - complex(np.inf, 1): "(inf+1j)", - complex(1, np.inf): "(1+infj)", - complex(-np.inf, 1): "(-inf+1j)", - complex(1, -np.inf): "(1-infj)", - complex(np.nan, 0): "(nan+0j)", - complex(0, np.nan): "nanj", - complex(-np.nan, 0): "(nan+0j)", - complex(0, -np.nan): "nanj", - complex(np.nan, 1): "(nan+1j)", - complex(1, np.nan): "(1+nanj)", - complex(-np.nan, 1): "(nan+1j)", - complex(1, -np.nan): "(1+nanj)", - } - for c, s in TESTS.items(): - assert_equal(str(dtype(c)), s) - - -# print tests -def _test_redirected_print(x, tp, ref=None): - file = StringIO() - file_tp = StringIO() - stdout = sys.stdout - try: - sys.stdout = file_tp - print(tp(x)) - sys.stdout = file - if ref: - print(ref) - else: - print(x) - finally: - sys.stdout = stdout - - assert_equal(file.getvalue(), file_tp.getvalue(), - err_msg='print failed for type%s' % tp) - - -@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) -def test_float_type_print(tp): - """Check formatting when using print """ - for x in [0, 1, -1, 1e20]: - _test_redirected_print(float(x), tp) - - for x in [np.inf, -np.inf, np.nan]: - _test_redirected_print(float(x), tp, _REF[x]) - - if tp(1e16).itemsize > 4: - _test_redirected_print(float(1e16), tp) - else: - ref = '1e+16' - _test_redirected_print(float(1e16), tp, ref) - - -@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) -def test_complex_type_print(tp): - """Check formatting when using print """ - # We do not create complex with inf/nan directly because the feature is - # missing in python < 2.6 - for x in [0, 1, -1, 1e20]: - _test_redirected_print(complex(x), tp) - - if tp(1e16).itemsize > 8: - _test_redirected_print(complex(1e16), tp) - else: - ref = '(1e+16+0j)' - _test_redirected_print(complex(1e16), tp, ref) - - _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') - _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') - _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') - - -def test_scalar_format(): - """Test the str.format method with NumPy scalar types""" - tests = [('{0}', True, np.bool_), - ('{0}', False, np.bool_), - ('{0:d}', 130, np.uint8), - ('{0:d}', 50000, np.uint16), - ('{0:d}', 3000000000, np.uint32), - ('{0:d}', 15000000000000000000, np.uint64), - ('{0:d}', -120, np.int8), - ('{0:d}', -30000, np.int16), - ('{0:d}', -2000000000, np.int32), - ('{0:d}', -7000000000000000000, np.int64), - ('{0:g}', 1.5, np.float16), - ('{0:g}', 1.5, np.float32), - ('{0:g}', 1.5, np.float64), - ('{0:g}', 1.5, np.longdouble), - ('{0:g}', 1.5+0.5j, np.complex64), - ('{0:g}', 1.5+0.5j, np.complex128), - ('{0:g}', 1.5+0.5j, np.clongdouble)] - - for (fmat, val, valtype) in tests: - try: - assert_equal(fmat.format(val), fmat.format(valtype(val)), - "failed with val %s, type %s" % (val, valtype)) - except ValueError as e: - assert_(False, - "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % - (fmat, repr(val), repr(valtype), str(e))) - - -# -# Locale tests: scalar types formatting should be independent of the locale -# - -class TestCommaDecimalPointLocale(CommaDecimalPointLocale): - - def test_locale_single(self): - assert_equal(str(np.float32(1.2)), str(float(1.2))) - - def test_locale_double(self): - assert_equal(str(np.double(1.2)), str(float(1.2))) - - def test_locale_longdouble(self): - assert_equal(str(np.longdouble('1.2')), str(float(1.2))) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_records.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_records.py deleted file mode 100644 index c1b7941..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_records.py +++ /dev/null @@ -1,501 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import textwrap -from os import path -import pytest - -import numpy as np -from numpy.compat import Path -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_array_almost_equal, - assert_raises, temppath - ) -from numpy.compat import pickle - - -class TestFromrecords(object): - def test_fromrecords(self): - r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], - names='col1,col2,col3') - assert_equal(r[0].item(), (456, 'dbe', 1.2)) - assert_equal(r['col1'].dtype.kind, 'i') - if sys.version_info[0] >= 3: - assert_equal(r['col2'].dtype.kind, 'U') - assert_equal(r['col2'].dtype.itemsize, 12) - else: - assert_equal(r['col2'].dtype.kind, 'S') - assert_equal(r['col2'].dtype.itemsize, 3) - assert_equal(r['col3'].dtype.kind, 'f') - - def test_fromrecords_0len(self): - """ Verify fromrecords works with a 0-length input """ - dtype = [('a', float), ('b', float)] - r = np.rec.fromrecords([], dtype=dtype) - assert_equal(r.shape, (0,)) - - def test_fromrecords_2d(self): - data = [ - [(1, 2), (3, 4), (5, 6)], - [(6, 5), (4, 3), (2, 1)] - ] - expected_a = [[1, 3, 5], [6, 4, 2]] - expected_b = [[2, 4, 6], [5, 3, 1]] - - # try with dtype - r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) - assert_equal(r1['a'], expected_a) - assert_equal(r1['b'], expected_b) - - # try with names - r2 = np.rec.fromrecords(data, names=['a', 'b']) - assert_equal(r2['a'], expected_a) - assert_equal(r2['b'], expected_b) - - assert_equal(r1, r2) - - def test_method_array(self): - r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big') - assert_equal(r[1].item(), (25444, b'efg', 1633837924)) - - def test_method_array2(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1].item(), (2, 22.0, b'b')) - - def test_recarray_slices(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) - - def test_recarray_fromarrays(self): - x1 = np.array([1, 2, 3, 4]) - x2 = np.array(['a', 'dd', 'xyz', '12']) - x3 = np.array([1.1, 2, 3, 4]) - r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') - assert_equal(r[1].item(), (2, 'dd', 2.0)) - x1[1] = 34 - assert_equal(r.a, np.array([1, 2, 3, 4])) - - def test_recarray_fromfile(self): - data_dir = path.join(path.dirname(__file__), 'data') - filename = path.join(data_dir, 'recarray_from_file.fits') - fd = open(filename, 'rb') - fd.seek(2880 * 2) - r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') - fd.seek(2880 * 2) - r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') - fd.close() - assert_equal(r1, r2) - - def test_recarray_from_obj(self): - count = 10 - a = np.zeros(count, dtype='O') - b = np.zeros(count, dtype='f8') - c = np.zeros(count, dtype='f8') - for i in range(len(a)): - a[i] = list(range(1, 10)) - - mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') - for i in range(len(a)): - assert_((mine.date[i] == list(range(1, 10)))) - assert_((mine.data1[i] == 0.0)) - assert_((mine.data2[i] == 0.0)) - - def test_recarray_repr(self): - a = np.array([(1, 0.1), (2, 0.2)], - dtype=[('foo', ' 2) & (a < 6)) - xb = np.where((b > 2) & (b < 6)) - ya = ((a > 2) & (a < 6)) - yb = ((b > 2) & (b < 6)) - assert_array_almost_equal(xa, ya.nonzero()) - assert_array_almost_equal(xb, yb.nonzero()) - assert_(np.all(a[ya] > 0.5)) - assert_(np.all(b[yb] > 0.5)) - - def test_endian_where(self): - # GitHub issue #369 - net = np.zeros(3, dtype='>f4') - net[1] = 0.00458849 - net[2] = 0.605202 - max_net = net.max() - test = np.where(net <= 0., max_net, net) - correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) - assert_array_almost_equal(test, correct) - - def test_endian_recarray(self): - # Ticket #2185 - dt = np.dtype([ - ('head', '>u4'), - ('data', '>u4', 2), - ]) - buf = np.recarray(1, dtype=dt) - buf[0]['head'] = 1 - buf[0]['data'][:] = [1, 1] - - h = buf[0]['head'] - d = buf[0]['data'][0] - buf[0]['head'] = h - buf[0]['data'][0] = d - assert_(buf[0]['head'] == 1) - - def test_mem_dot(self): - # Ticket #106 - x = np.random.randn(0, 1) - y = np.random.randn(10, 1) - # Dummy array to detect bad memory access: - _z = np.ones(10) - _dummy = np.empty((0, 10)) - z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) - np.dot(x, np.transpose(y), out=z) - assert_equal(_z, np.ones(10)) - # Do the same for the built-in dot: - np.core.multiarray.dot(x, np.transpose(y), out=z) - assert_equal(_z, np.ones(10)) - - def test_arange_endian(self): - # Ticket #111 - ref = np.arange(10) - x = np.arange(10, dtype='= (3, 4): - # encoding='bytes' was added in Py3.4 - for original, data in test_data: - result = pickle.loads(data, encoding='bytes') - assert_equal(result, original) - - if isinstance(result, np.ndarray) and result.dtype.names is not None: - for name in result.dtype.names: - assert_(isinstance(name, str)) - - def test_pickle_dtype(self): - # Ticket #251 - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - pickle.dumps(float, protocol=proto) - - def test_swap_real(self): - # Ticket #265 - assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) - assert_equal(np.arange(4, dtype=' 1 and x['two'] > 2) - - def test_method_args(self): - # Make sure methods and functions have same default axis - # keyword and arguments - funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'), - ('sometrue', 'any'), - ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), - 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', - 'round', 'min', 'max', 'argsort', 'sort'] - funcs2 = ['compress', 'take', 'repeat'] - - for func in funcs1: - arr = np.random.rand(8, 7) - arr2 = arr.copy() - if isinstance(func, tuple): - func_meth = func[1] - func = func[0] - else: - func_meth = func - res1 = getattr(arr, func_meth)() - res2 = getattr(np, func)(arr2) - if res1 is None: - res1 = arr - - if res1.dtype.kind in 'uib': - assert_((res1 == res2).all(), func) - else: - assert_(abs(res1-res2).max() < 1e-8, func) - - for func in funcs2: - arr1 = np.random.rand(8, 7) - arr2 = np.random.rand(8, 7) - res1 = None - if func == 'compress': - arr1 = arr1.ravel() - res1 = getattr(arr2, func)(arr1) - else: - arr2 = (15*arr2).astype(int).ravel() - if res1 is None: - res1 = getattr(arr1, func)(arr2) - res2 = getattr(np, func)(arr1, arr2) - assert_(abs(res1-res2).max() < 1e-8, func) - - def test_mem_lexsort_strings(self): - # Ticket #298 - lst = ['abc', 'cde', 'fgh'] - np.lexsort((lst,)) - - def test_fancy_index(self): - # Ticket #302 - x = np.array([1, 2])[np.array([0])] - assert_equal(x.shape, (1,)) - - def test_recarray_copy(self): - # Ticket #312 - dt = [('x', np.int16), ('y', np.float64)] - ra = np.array([(1, 2.3)], dtype=dt) - rb = np.rec.array(ra, dtype=dt) - rb['x'] = 2. - assert_(ra['x'] != rb['x']) - - def test_rec_fromarray(self): - # Ticket #322 - x1 = np.array([[1, 2], [3, 4], [5, 6]]) - x2 = np.array(['a', 'dd', 'xyz']) - x3 = np.array([1.1, 2, 3]) - np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") - - def test_object_array_assign(self): - x = np.empty((2, 2), object) - x.flat[2] = (1, 2, 3) - assert_equal(x.flat[2], (1, 2, 3)) - - def test_ndmin_float64(self): - # Ticket #324 - x = np.array([1, 2, 3], dtype=np.float64) - assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) - assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) - - def test_ndmin_order(self): - # Issue #465 and related checks - assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) - assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) - - def test_mem_axis_minimization(self): - # Ticket #327 - data = np.arange(5) - data = np.add.outer(data, data) - - def test_mem_float_imag(self): - # Ticket #330 - np.float64(1.0).imag - - def test_dtype_tuple(self): - # Ticket #334 - assert_(np.dtype('i4') == np.dtype(('i4', ()))) - - def test_dtype_posttuple(self): - # Ticket #335 - np.dtype([('col1', '()i4')]) - - def test_numeric_carray_compare(self): - # Ticket #341 - assert_equal(np.array(['X'], 'c'), b'X') - - def test_string_array_size(self): - # Ticket #342 - assert_raises(ValueError, - np.array, [['X'], ['X', 'X', 'X']], '|S1') - - def test_dtype_repr(self): - # Ticket #344 - dt1 = np.dtype(('uint32', 2)) - dt2 = np.dtype(('uint32', (2,))) - assert_equal(dt1.__repr__(), dt2.__repr__()) - - def test_reshape_order(self): - # Make sure reshape order works. - a = np.arange(6).reshape(2, 3, order='F') - assert_equal(a, [[0, 2, 4], [1, 3, 5]]) - a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) - b = a[:, 1] - assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) - - def test_reshape_zero_strides(self): - # Issue #380, test reshaping of zero strided arrays - a = np.ones(1) - a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) - assert_(a.reshape(5, 1).strides[0] == 0) - - def test_reshape_zero_size(self): - # GitHub Issue #2700, setting shape failed for 0-sized arrays - a = np.ones((0, 2)) - a.shape = (-1, 2) - - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. - # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. - @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, - reason="Using relaxed stride checking") - def test_reshape_trailing_ones_strides(self): - # GitHub issue gh-2949, bad strides for trailing ones of new shape - a = np.zeros(12, dtype=np.int32)[::2] # not contiguous - strides_c = (16, 8, 8, 8) - strides_f = (8, 24, 48, 48) - assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) - assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) - assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) - - def test_repeat_discont(self): - # Ticket #352 - a = np.arange(12).reshape(4, 3)[:, 2] - assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) - - def test_array_index(self): - # Make sure optimization is not called in this case. - a = np.array([1, 2, 3]) - a2 = np.array([[1, 2, 3]]) - assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) - - def test_object_argmax(self): - a = np.array([1, 2, 3], dtype=object) - assert_(a.argmax() == 2) - - def test_recarray_fields(self): - # Ticket #372 - dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) - dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) - for a in [np.array([(1, 2), (3, 4)], "i4,i4"), - np.rec.array([(1, 2), (3, 4)], "i4,i4"), - np.rec.array([(1, 2), (3, 4)]), - np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), - np.rec.fromarrays([(1, 2), (3, 4)])]: - assert_(a.dtype in [dt0, dt1]) - - def test_random_shuffle(self): - # Ticket #374 - a = np.arange(5).reshape((5, 1)) - b = a.copy() - np.random.shuffle(b) - assert_equal(np.sort(b, axis=0), a) - - def test_refcount_vdot(self): - # Changeset #3443 - _assert_valid_refcount(np.vdot) - - def test_startswith(self): - ca = np.char.array(['Hi', 'There']) - assert_equal(ca.startswith('H'), [True, False]) - - def test_noncommutative_reduce_accumulate(self): - # Ticket #413 - tosubtract = np.arange(5) - todivide = np.array([2.0, 0.5, 0.25]) - assert_equal(np.subtract.reduce(tosubtract), -10) - assert_equal(np.divide.reduce(todivide), 16.0) - assert_array_equal(np.subtract.accumulate(tosubtract), - np.array([0, -1, -3, -6, -10])) - assert_array_equal(np.divide.accumulate(todivide), - np.array([2., 4., 16.])) - - def test_convolve_empty(self): - # Convolve should raise an error for empty input array. - assert_raises(ValueError, np.convolve, [], [1]) - assert_raises(ValueError, np.convolve, [1], []) - - def test_multidim_byteswap(self): - # Ticket #449 - r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") - assert_array_equal(r.byteswap(), - np.array([(256, (0, 256, 512))], r.dtype)) - - def test_string_NULL(self): - # Changeset 3557 - assert_equal(np.array("a\x00\x0b\x0c\x00").item(), - 'a\x00\x0b\x0c') - - def test_junk_in_string_fields_of_recarray(self): - # Ticket #483 - r = np.array([[b'abc']], dtype=[('var1', '|S20')]) - assert_(asbytes(r['var1'][0][0]) == b'abc') - - def test_take_output(self): - # Ensure that 'take' honours output parameter. - x = np.arange(12).reshape((3, 4)) - a = np.take(x, [0, 2], axis=1) - b = np.zeros_like(a) - np.take(x, [0, 2], axis=1, out=b) - assert_array_equal(a, b) - - def test_take_object_fail(self): - # Issue gh-3001 - d = 123. - a = np.array([d, 1], dtype=object) - if HAS_REFCOUNT: - ref_d = sys.getrefcount(d) - try: - a.take([0, 100]) - except IndexError: - pass - if HAS_REFCOUNT: - assert_(ref_d == sys.getrefcount(d)) - - def test_array_str_64bit(self): - # Ticket #501 - s = np.array([1, np.nan], dtype=np.float64) - with np.errstate(all='raise'): - np.array_str(s) # Should succeed - - def test_frompyfunc_endian(self): - # Ticket #503 - from math import radians - uradians = np.frompyfunc(radians, 1, 1) - big_endian = np.array([83.4, 83.5], dtype='>f8') - little_endian = np.array([83.4, 83.5], dtype=' object - # casting succeeds - def rs(): - x = np.ones([484, 286]) - y = np.zeros([484, 286]) - x |= y - - assert_raises(TypeError, rs) - - def test_unicode_scalar(self): - # Ticket #600 - x = np.array(["DROND", "DROND1"], dtype="U6") - el = x[1] - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - new = pickle.loads(pickle.dumps(el, protocol=proto)) - assert_equal(new, el) - - def test_arange_non_native_dtype(self): - # Ticket #616 - for T in ('>f4', ' 0)] = v - - assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) - assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) - - # Old special case (different code path): - assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) - assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) - - def test_mem_scalar_indexing(self): - # Ticket #603 - x = np.array([0], dtype=float) - index = np.array(0, dtype=np.int32) - x[index] - - def test_binary_repr_0_width(self): - assert_equal(np.binary_repr(0, width=3), '000') - - def test_fromstring(self): - assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), - [12, 9, 9]) - - def test_searchsorted_variable_length(self): - x = np.array(['a', 'aa', 'b']) - y = np.array(['d', 'e']) - assert_equal(x.searchsorted(y), [3, 3]) - - def test_string_argsort_with_zeros(self): - # Check argsort for strings containing zeros. - x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") - assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) - assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) - - def test_string_sort_with_zeros(self): - # Check sort for strings containing zeros. - x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") - y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") - assert_array_equal(np.sort(x, kind="q"), y) - - def test_copy_detection_zero_dim(self): - # Ticket #658 - np.indices((0, 3, 4)).T.reshape(-1, 3) - - def test_flat_byteorder(self): - # Ticket #657 - x = np.arange(10) - assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): - x = np.array([-1, 0, 1], dtype=dt) - assert_equal(x.flat[0].dtype, x[0].dtype) - - def test_copy_detection_corner_case(self): - # Ticket #658 - np.indices((0, 3, 4)).T.reshape(-1, 3) - - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. - # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, - # 0-sized reshape itself is tested elsewhere. - @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, - reason="Using relaxed stride checking") - def test_copy_detection_corner_case2(self): - # Ticket #771: strides are not set correctly when reshaping 0-sized - # arrays - b = np.indices((0, 3, 4)).T.reshape(-1, 3) - assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) - - def test_object_array_refcounting(self): - # Ticket #633 - if not hasattr(sys, 'getrefcount'): - return - - # NB. this is probably CPython-specific - - cnt = sys.getrefcount - - a = object() - b = object() - c = object() - - cnt0_a = cnt(a) - cnt0_b = cnt(b) - cnt0_c = cnt(c) - - # -- 0d -> 1-d broadcast slice assignment - - arr = np.zeros(5, dtype=np.object_) - - arr[:] = a - assert_equal(cnt(a), cnt0_a + 5) - - arr[:] = b - assert_equal(cnt(a), cnt0_a) - assert_equal(cnt(b), cnt0_b + 5) - - arr[:2] = c - assert_equal(cnt(b), cnt0_b + 3) - assert_equal(cnt(c), cnt0_c + 2) - - del arr - - # -- 1-d -> 2-d broadcast slice assignment - - arr = np.zeros((5, 2), dtype=np.object_) - arr0 = np.zeros(2, dtype=np.object_) - - arr0[0] = a - assert_(cnt(a) == cnt0_a + 1) - arr0[1] = b - assert_(cnt(b) == cnt0_b + 1) - - arr[:, :] = arr0 - assert_(cnt(a) == cnt0_a + 6) - assert_(cnt(b) == cnt0_b + 6) - - arr[:, 0] = None - assert_(cnt(a) == cnt0_a + 1) - - del arr, arr0 - - # -- 2-d copying + flattening - - arr = np.zeros((5, 2), dtype=np.object_) - - arr[:, 0] = a - arr[:, 1] = b - assert_(cnt(a) == cnt0_a + 5) - assert_(cnt(b) == cnt0_b + 5) - - arr2 = arr.copy() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 10) - - arr2 = arr[:, 0].copy() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 5) - - arr2 = arr.flatten() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 10) - - del arr, arr2 - - # -- concatenate, repeat, take, choose - - arr1 = np.zeros((5, 1), dtype=np.object_) - arr2 = np.zeros((5, 1), dtype=np.object_) - - arr1[...] = a - arr2[...] = b - assert_(cnt(a) == cnt0_a + 5) - assert_(cnt(b) == cnt0_b + 5) - - tmp = np.concatenate((arr1, arr2)) - assert_(cnt(a) == cnt0_a + 5 + 5) - assert_(cnt(b) == cnt0_b + 5 + 5) - - tmp = arr1.repeat(3, axis=0) - assert_(cnt(a) == cnt0_a + 5 + 3*5) - - tmp = arr1.take([1, 2, 3], axis=0) - assert_(cnt(a) == cnt0_a + 5 + 3) - - x = np.array([[0], [1], [0], [1], [1]], int) - tmp = x.choose(arr1, arr2) - assert_(cnt(a) == cnt0_a + 5 + 2) - assert_(cnt(b) == cnt0_b + 5 + 3) - - del tmp # Avoid pyflakes unused variable warning - - def test_mem_custom_float_to_array(self): - # Ticket 702 - class MyFloat(object): - def __float__(self): - return 1.0 - - tmp = np.atleast_1d([MyFloat()]) - tmp.astype(float) # Should succeed - - def test_object_array_refcount_self_assign(self): - # Ticket #711 - class VictimObject(object): - deleted = False - - def __del__(self): - self.deleted = True - - d = VictimObject() - arr = np.zeros(5, dtype=np.object_) - arr[:] = d - del d - arr[:] = arr # refcount of 'd' might hit zero here - assert_(not arr[0].deleted) - arr[:] = arr # trying to induce a segfault by doing it again... - assert_(not arr[0].deleted) - - def test_mem_fromiter_invalid_dtype_string(self): - x = [1, 2, 3] - assert_raises(ValueError, - np.fromiter, [xi for xi in x], dtype='S') - - def test_reduce_big_object_array(self): - # Ticket #713 - oldsize = np.setbufsize(10*16) - a = np.array([None]*161, object) - assert_(not np.any(a)) - np.setbufsize(oldsize) - - def test_mem_0d_array_index(self): - # Ticket #714 - np.zeros(10)[np.array(0)] - - def test_nonnative_endian_fill(self): - # Non-native endian arrays were incorrectly filled with scalars - # before r5034. - if sys.byteorder == 'little': - dtype = np.dtype('>i4') - else: - dtype = np.dtype('= 3: - f = open(filename, 'rb') - xp = pickle.load(f, encoding='latin1') - f.close() - else: - f = open(filename) - xp = pickle.load(f) - f.close() - xpd = xp.astype(np.float64) - assert_((xp.__array_interface__['data'][0] != - xpd.__array_interface__['data'][0])) - - def test_compress_small_type(self): - # Ticket #789, changeset 5217. - # compress with out argument segfaulted if cannot cast safely - import numpy as np - a = np.array([[1, 2], [3, 4]]) - b = np.zeros((2, 1), dtype=np.single) - try: - a.compress([True, False], axis=1, out=b) - raise AssertionError("compress with an out which cannot be " - "safely casted should not return " - "successfully") - except TypeError: - pass - - def test_attributes(self): - # Ticket #791 - class TestArray(np.ndarray): - def __new__(cls, data, info): - result = np.array(data) - result = result.view(cls) - result.info = info - return result - - def __array_finalize__(self, obj): - self.info = getattr(obj, 'info', '') - - dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') - assert_(dat.info == 'jubba') - dat.resize((4, 2)) - assert_(dat.info == 'jubba') - dat.sort() - assert_(dat.info == 'jubba') - dat.fill(2) - assert_(dat.info == 'jubba') - dat.put([2, 3, 4], [6, 3, 4]) - assert_(dat.info == 'jubba') - dat.setfield(4, np.int32, 0) - assert_(dat.info == 'jubba') - dat.setflags() - assert_(dat.info == 'jubba') - assert_(dat.all(1).info == 'jubba') - assert_(dat.any(1).info == 'jubba') - assert_(dat.argmax(1).info == 'jubba') - assert_(dat.argmin(1).info == 'jubba') - assert_(dat.argsort(1).info == 'jubba') - assert_(dat.astype(TestArray).info == 'jubba') - assert_(dat.byteswap().info == 'jubba') - assert_(dat.clip(2, 7).info == 'jubba') - assert_(dat.compress([0, 1, 1]).info == 'jubba') - assert_(dat.conj().info == 'jubba') - assert_(dat.conjugate().info == 'jubba') - assert_(dat.copy().info == 'jubba') - dat2 = TestArray([2, 3, 1, 0], 'jubba') - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - assert_(dat2.choose(choices).info == 'jubba') - assert_(dat.cumprod(1).info == 'jubba') - assert_(dat.cumsum(1).info == 'jubba') - assert_(dat.diagonal().info == 'jubba') - assert_(dat.flatten().info == 'jubba') - assert_(dat.getfield(np.int32, 0).info == 'jubba') - assert_(dat.imag.info == 'jubba') - assert_(dat.max(1).info == 'jubba') - assert_(dat.mean(1).info == 'jubba') - assert_(dat.min(1).info == 'jubba') - assert_(dat.newbyteorder().info == 'jubba') - assert_(dat.prod(1).info == 'jubba') - assert_(dat.ptp(1).info == 'jubba') - assert_(dat.ravel().info == 'jubba') - assert_(dat.real.info == 'jubba') - assert_(dat.repeat(2).info == 'jubba') - assert_(dat.reshape((2, 4)).info == 'jubba') - assert_(dat.round().info == 'jubba') - assert_(dat.squeeze().info == 'jubba') - assert_(dat.std(1).info == 'jubba') - assert_(dat.sum(1).info == 'jubba') - assert_(dat.swapaxes(0, 1).info == 'jubba') - assert_(dat.take([2, 3, 5]).info == 'jubba') - assert_(dat.transpose().info == 'jubba') - assert_(dat.T.info == 'jubba') - assert_(dat.var(1).info == 'jubba') - assert_(dat.view(TestArray).info == 'jubba') - # These methods do not preserve subclasses - assert_(type(dat.nonzero()[0]) is np.ndarray) - assert_(type(dat.nonzero()[1]) is np.ndarray) - - def test_recarray_tolist(self): - # Ticket #793, changeset r5215 - # Comparisons fail for NaN, so we can't use random memory - # for the test. - buf = np.zeros(40, dtype=np.int8) - a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) - b = a.tolist() - assert_( a[0].tolist() == b[0]) - assert_( a[1].tolist() == b[1]) - - def test_nonscalar_item_method(self): - # Make sure that .item() fails graciously when it should - a = np.arange(5) - assert_raises(ValueError, a.item) - - def test_char_array_creation(self): - a = np.array('123', dtype='c') - b = np.array([b'1', b'2', b'3']) - assert_equal(a, b) - - def test_unaligned_unicode_access(self): - # Ticket #825 - for i in range(1, 9): - msg = 'unicode offset: %d chars' % i - t = np.dtype([('a', 'S%d' % i), ('b', 'U2')]) - x = np.array([(b'a', u'b')], dtype=t) - if sys.version_info[0] >= 3: - assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) - else: - assert_equal(str(x), "[('a', u'b')]", err_msg=msg) - - def test_sign_for_complex_nan(self): - # Ticket 794. - with np.errstate(invalid='ignore'): - C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) - have = np.sign(C) - want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) - assert_equal(have, want) - - def test_for_equal_names(self): - # Ticket #674 - dt = np.dtype([('foo', float), ('bar', float)]) - a = np.zeros(10, dt) - b = list(a.dtype.names) - b[0] = "notfoo" - a.dtype.names = b - assert_(a.dtype.names[0] == "notfoo") - assert_(a.dtype.names[1] == "bar") - - def test_for_object_scalar_creation(self): - # Ticket #816 - a = np.object_() - b = np.object_(3) - b2 = np.object_(3.0) - c = np.object_([4, 5]) - d = np.object_([None, {}, []]) - assert_(a is None) - assert_(type(b) is int) - assert_(type(b2) is float) - assert_(type(c) is np.ndarray) - assert_(c.dtype == object) - assert_(d.dtype == object) - - def test_array_resize_method_system_error(self): - # Ticket #840 - order should be an invalid keyword. - x = np.array([[0, 1], [2, 3]]) - assert_raises(TypeError, x.resize, (2, 2), order='C') - - def test_for_zero_length_in_choose(self): - "Ticket #882" - a = np.array(1) - assert_raises(ValueError, lambda x: x.choose([]), a) - - def test_array_ndmin_overflow(self): - "Ticket #947." - assert_raises(ValueError, lambda: np.array([1], ndmin=33)) - - def test_void_scalar_with_titles(self): - # No ticket - data = [('john', 4), ('mary', 5)] - dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] - arr = np.array(data, dtype=dtype1) - assert_(arr[0][0] == 'john') - assert_(arr[0][1] == 4) - - def test_void_scalar_constructor(self): - #Issue #1550 - - #Create test string data, construct void scalar from data and assert - #that void scalar contains original data. - test_string = np.array("test") - test_string_void_scalar = np.core.multiarray.scalar( - np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) - - assert_(test_string_void_scalar.view(test_string.dtype) == test_string) - - #Create record scalar, construct from data and assert that - #reconstructed scalar is correct. - test_record = np.ones((), "i,i") - test_record_void_scalar = np.core.multiarray.scalar( - test_record.dtype, test_record.tobytes()) - - assert_(test_record_void_scalar == test_record) - - # Test pickle and unpickle of void and record scalars - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - assert_(pickle.loads( - pickle.dumps(test_string, protocol=proto)) == test_string) - assert_(pickle.loads( - pickle.dumps(test_record, protocol=proto)) == test_record) - - @_no_tracing - def test_blasdot_uninitialized_memory(self): - # Ticket #950 - for m in [0, 1, 2]: - for n in [0, 1, 2]: - for k in range(3): - # Try to ensure that x->data contains non-zero floats - x = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - x.resize((m, 0), refcheck=False) - else: - x.resize((m, 0)) - y = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - y.resize((0, n), refcheck=False) - else: - y.resize((0, n)) - - # `dot` should just return zero (m, n) matrix - z = np.dot(x, y) - assert_(np.all(z == 0)) - assert_(z.shape == (m, n)) - - def test_zeros(self): - # Regression test for #1061. - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - with assert_raises_regex(ValueError, - 'Maximum allowed dimension exceeded'): - np.empty(sz) - - def test_huge_arange(self): - # Regression test for #1062. - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - with assert_raises_regex(ValueError, - 'Maximum allowed size exceeded'): - np.arange(sz) - assert_(np.size == sz) - - def test_fromiter_bytes(self): - # Ticket #1058 - a = np.fromiter(list(range(10)), dtype='b') - b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - - def test_array_from_sequence_scalar_array(self): - # Ticket #1078: segfaults when creating an array with a sequence of - # 0d arrays. - a = np.array((np.ones(2), np.array(2))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], np.ones(2)) - assert_equal(a[1], np.array(2)) - - a = np.array(((1,), np.array(1))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], (1,)) - assert_equal(a[1], np.array(1)) - - def test_array_from_sequence_scalar_array2(self): - # Ticket #1081: weird array with strange input... - t = np.array([np.array([]), np.array(0, object)]) - assert_equal(t.shape, (2,)) - assert_equal(t.dtype, np.dtype(object)) - - def test_array_too_big(self): - # Ticket #1080. - assert_raises(ValueError, np.zeros, [975]*7, np.int8) - assert_raises(ValueError, np.zeros, [26244]*5, np.int8) - - def test_dtype_keyerrors_(self): - # Ticket #1106. - dt = np.dtype([('f1', np.uint)]) - assert_raises(KeyError, dt.__getitem__, "f2") - assert_raises(IndexError, dt.__getitem__, 1) - assert_raises(TypeError, dt.__getitem__, 0.0) - - def test_lexsort_buffer_length(self): - # Ticket #1217, don't segfault. - a = np.ones(100, dtype=np.int8) - b = np.ones(100, dtype=np.int32) - i = np.lexsort((a[::-1], b)) - assert_equal(i, np.arange(100, dtype=int)) - - def test_object_array_to_fixed_string(self): - # Ticket #1235. - a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) - b = np.array(a, dtype=(np.str_, 8)) - assert_equal(a, b) - c = np.array(a, dtype=(np.str_, 5)) - assert_equal(c, np.array(['abcde', 'ijklm'])) - d = np.array(a, dtype=(np.str_, 12)) - assert_equal(a, d) - e = np.empty((2, ), dtype=(np.str_, 8)) - e[:] = a[:] - assert_equal(a, e) - - def test_unicode_to_string_cast(self): - # Ticket #1240. - a = np.array([[u'abc', u'\u03a3'], - [u'asdf', u'erw']], - dtype='U') - assert_raises(UnicodeEncodeError, np.array, a, 'S4') - - def test_mixed_string_unicode_array_creation(self): - a = np.array(['1234', u'123']) - assert_(a.itemsize == 16) - a = np.array([u'123', '1234']) - assert_(a.itemsize == 16) - a = np.array(['1234', u'123', '12345']) - assert_(a.itemsize == 20) - a = np.array([u'123', '1234', u'12345']) - assert_(a.itemsize == 20) - a = np.array([u'123', '1234', u'1234']) - assert_(a.itemsize == 16) - - def test_misaligned_objects_segfault(self): - # Ticket #1198 and #1267 - a1 = np.zeros((10,), dtype='O,c') - a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') - a1['f0'] = a2 - repr(a1) - np.argmax(a1['f0']) - a1['f0'][1] = "FOO" - a1['f0'] = "FOO" - np.array(a1['f0'], dtype='S') - np.nonzero(a1['f0']) - a1.sort() - copy.deepcopy(a1) - - def test_misaligned_scalars_segfault(self): - # Ticket #1267 - s1 = np.array(('a', 'Foo'), dtype='c,O') - s2 = np.array(('b', 'Bar'), dtype='c,O') - s1['f1'] = s2['f1'] - s1['f1'] = 'Baz' - - def test_misaligned_dot_product_objects(self): - # Ticket #1267 - # This didn't require a fix, but it's worth testing anyway, because - # it may fail if .dot stops enforcing the arrays to be BEHAVED - a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') - b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') - np.dot(a['f0'], b['f0']) - - def test_byteswap_complex_scalar(self): - # Ticket #1259 and gh-441 - for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: - z = np.array([2.2-1.1j], dtype) - x = z[0] # always native-endian - y = x.byteswap() - if x.dtype.byteorder == z.dtype.byteorder: - # little-endian machine - assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) - else: - # big-endian machine - assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) - # double check real and imaginary parts: - assert_equal(x.real, y.real.byteswap()) - assert_equal(x.imag, y.imag.byteswap()) - - def test_structured_arrays_with_objects1(self): - # Ticket #1299 - stra = 'aaaa' - strb = 'bbbb' - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(x[0, 1] == x[0, 0]) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_structured_arrays_with_objects2(self): - # Ticket #1299 second test - stra = 'aaaa' - strb = 'bbbb' - numb = sys.getrefcount(strb) - numa = sys.getrefcount(stra) - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(sys.getrefcount(strb) == numb) - assert_(sys.getrefcount(stra) == numa + 2) - - def test_duplicate_title_and_name(self): - # Ticket #1254 - dtspec = [(('a', 'a'), 'i'), ('b', 'i')] - assert_raises(ValueError, np.dtype, dtspec) - - def test_signed_integer_division_overflow(self): - # Ticket #1317. - def test_type(t): - min = np.array([np.iinfo(t).min]) - min //= -1 - - with np.errstate(divide="ignore"): - for t in (np.int8, np.int16, np.int32, np.int64, int, np.compat.long): - test_type(t) - - def test_buffer_hashlib(self): - try: - from hashlib import md5 - except ImportError: - from md5 import new as md5 - - x = np.array([1, 2, 3], dtype=np.dtype('c') - - def test_log1p_compiler_shenanigans(self): - # Check if log1p is behaving on 32 bit intel systems. - assert_(np.isfinite(np.log1p(np.exp2(-53)))) - - def test_fromiter_comparison(self): - a = np.fromiter(list(range(10)), dtype='b') - b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - - def test_fromstring_crash(self): - # Ticket #1345: the following should not cause a crash - with assert_warns(DeprecationWarning): - np.fromstring(b'aa, aa, 1.0', sep=',') - - def test_ticket_1539(self): - dtypes = [x for x in np.typeDict.values() - if (issubclass(x, np.number) - and not issubclass(x, np.timedelta64))] - a = np.array([], np.bool_) # not x[0] because it is unordered - failures = [] - - for x in dtypes: - b = a.astype(x) - for y in dtypes: - c = a.astype(y) - try: - np.dot(b, c) - except TypeError: - failures.append((x, y)) - if failures: - raise AssertionError("Failures: %r" % failures) - - def test_ticket_1538(self): - x = np.finfo(np.float32) - for name in 'eps epsneg max min resolution tiny'.split(): - assert_equal(type(getattr(x, name)), np.float32, - err_msg=name) - - def test_ticket_1434(self): - # Check that the out= argument in var and std has an effect - data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) - out = np.zeros((3,)) - - ret = data.var(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.var(axis=1)) - - ret = data.std(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.std(axis=1)) - - def test_complex_nan_maximum(self): - cnan = complex(0, np.nan) - assert_equal(np.maximum(1, cnan), cnan) - - def test_subclass_int_tuple_assignment(self): - # ticket #1563 - class Subclass(np.ndarray): - def __new__(cls, i): - return np.ones((i,)).view(cls) - - x = Subclass(5) - x[(0,)] = 2 # shouldn't raise an exception - assert_equal(x[0], 2) - - def test_ufunc_no_unnecessary_views(self): - # ticket #1548 - class Subclass(np.ndarray): - pass - x = np.array([1, 2, 3]).view(Subclass) - y = np.add(x, x, x) - assert_equal(id(x), id(y)) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_take_refcount(self): - # ticket #939 - a = np.arange(16, dtype=float) - a.shape = (4, 4) - lut = np.ones((5 + 3, 4), float) - rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) - c1 = sys.getrefcount(rgba) - try: - lut.take(a, axis=0, mode='clip', out=rgba) - except TypeError: - pass - c2 = sys.getrefcount(rgba) - assert_equal(c1, c2) - - def test_fromfile_tofile_seeks(self): - # On Python 3, tofile/fromfile used to get (#1610) the Python - # file handle out of sync - f0 = tempfile.NamedTemporaryFile() - f = f0.file - f.write(np.arange(255, dtype='u1').tobytes()) - - f.seek(20) - ret = np.fromfile(f, count=4, dtype='u1') - assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) - assert_equal(f.tell(), 24) - - f.seek(40) - np.array([1, 2, 3], dtype='u1').tofile(f) - assert_equal(f.tell(), 43) - - f.seek(40) - data = f.read(3) - assert_equal(data, b"\x01\x02\x03") - - f.seek(80) - f.read(4) - data = np.fromfile(f, dtype='u1', count=4) - assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) - - f.close() - - def test_complex_scalar_warning(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_warns(np.ComplexWarning, float, x) - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning) - assert_equal(float(x), float(x.real)) - - def test_complex_scalar_complex_cast(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_equal(complex(x), 1+2j) - - def test_complex_boolean_cast(self): - # Ticket #2218 - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) - assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) - assert_(np.any(x)) - assert_(np.all(x[1:])) - - def test_uint_int_conversion(self): - x = 2**64 - 1 - assert_equal(int(np.uint64(x)), x) - - def test_duplicate_field_names_assign(self): - ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') - ra.dtype.names = ('f1', 'f2') - repr(ra) # should not cause a segmentation fault - assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) - - def test_eq_string_and_object_array(self): - # From e-mail thread "__eq__ with str and object" (Keith Goodman) - a1 = np.array(['a', 'b'], dtype=object) - a2 = np.array(['a', 'c']) - assert_array_equal(a1 == a2, [True, False]) - assert_array_equal(a2 == a1, [True, False]) - - def test_nonzero_byteswap(self): - a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 - assert_equal(a.nonzero()[0], [1]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap - - def test_find_common_type_boolean(self): - # Ticket #1695 - assert_(np.find_common_type([], ['?', '?']) == '?') - - def test_empty_mul(self): - a = np.array([1.]) - a[1:1] *= 2 - assert_equal(a, [1.]) - - def test_array_side_effect(self): - # The second use of itemsize was throwing an exception because in - # ctors.c, discover_itemsize was calling PyObject_Length without - # checking the return code. This failed to get the length of the - # number 2, and the exception hung around until something checked - # PyErr_Occurred() and returned an error. - assert_equal(np.dtype('S10').itemsize, 10) - np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) - assert_equal(np.dtype('S10').itemsize, 10) - - def test_any_float(self): - # all and any for floats - a = np.array([0.1, 0.9]) - assert_(np.any(a)) - assert_(np.all(a)) - - def test_large_float_sum(self): - a = np.arange(10000, dtype='f') - assert_equal(a.sum(dtype='d'), a.astype('d').sum()) - - def test_ufunc_casting_out(self): - a = np.array(1.0, dtype=np.float32) - b = np.array(1.0, dtype=np.float64) - c = np.array(1.0, dtype=np.float32) - np.add(a, b, out=c) - assert_equal(c, 2.0) - - def test_array_scalar_contiguous(self): - # Array scalars are both C and Fortran contiguous - assert_(np.array(1.0).flags.c_contiguous) - assert_(np.array(1.0).flags.f_contiguous) - assert_(np.array(np.float32(1.0)).flags.c_contiguous) - assert_(np.array(np.float32(1.0)).flags.f_contiguous) - - def test_squeeze_contiguous(self): - # Similar to GitHub issue #387 - a = np.zeros((1, 2)).squeeze() - b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() - assert_(a.flags.c_contiguous) - assert_(a.flags.f_contiguous) - assert_(b.flags.f_contiguous) - - def test_squeeze_axis_handling(self): - # Issue #10779 - # Ensure proper handling of objects - # that don't support axis specification - # when squeezing - - class OldSqueeze(np.ndarray): - - def __new__(cls, - input_array): - obj = np.asarray(input_array).view(cls) - return obj - - # it is perfectly reasonable that prior - # to numpy version 1.7.0 a subclass of ndarray - # might have been created that did not expect - # squeeze to have an axis argument - # NOTE: this example is somewhat artificial; - # it is designed to simulate an old API - # expectation to guard against regression - def squeeze(self): - return super(OldSqueeze, self).squeeze() - - oldsqueeze = OldSqueeze(np.array([[1],[2],[3]])) - - # if no axis argument is specified the old API - # expectation should give the correct result - assert_equal(np.squeeze(oldsqueeze), - np.array([1,2,3])) - - # likewise, axis=None should work perfectly well - # with the old API expectation - assert_equal(np.squeeze(oldsqueeze, axis=None), - np.array([1,2,3])) - - # however, specification of any particular axis - # should raise a TypeError in the context of the - # old API specification, even when using a valid - # axis specification like 1 for this array - with assert_raises(TypeError): - # this would silently succeed for array - # subclasses / objects that did not support - # squeeze axis argument handling before fixing - # Issue #10779 - np.squeeze(oldsqueeze, axis=1) - - # check for the same behavior when using an invalid - # axis specification -- in this case axis=0 does not - # have size 1, but the priority should be to raise - # a TypeError for the axis argument and NOT a - # ValueError for squeezing a non-empty dimension - with assert_raises(TypeError): - np.squeeze(oldsqueeze, axis=0) - - # the new API knows how to handle the axis - # argument and will return a ValueError if - # attempting to squeeze an axis that is not - # of length 1 - with assert_raises(ValueError): - np.squeeze(np.array([[1],[2],[3]]), axis=0) - - def test_reduce_contiguous(self): - # GitHub issue #387 - a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) - b = np.add.reduce(np.zeros((2, 1, 2)), 1) - assert_(a.flags.c_contiguous) - assert_(a.flags.f_contiguous) - assert_(b.flags.c_contiguous) - - def test_object_array_self_reference(self): - # Object arrays with references to themselves can cause problems - a = np.array(0, dtype=object) - a[()] = a - assert_raises(RecursionError, int, a) - assert_raises(RecursionError, long, a) - assert_raises(RecursionError, float, a) - if sys.version_info.major == 2: - # in python 3, this falls back on operator.index, which fails on - # on dtype=object - assert_raises(RecursionError, oct, a) - assert_raises(RecursionError, hex, a) - a[()] = None - - def test_object_array_circular_reference(self): - # Test the same for a circular reference. - a = np.array(0, dtype=object) - b = np.array(0, dtype=object) - a[()] = b - b[()] = a - assert_raises(RecursionError, int, a) - # NumPy has no tp_traverse currently, so circular references - # cannot be detected. So resolve it: - a[()] = None - - # This was causing a to become like the above - a = np.array(0, dtype=object) - a[...] += 1 - assert_equal(a, 1) - - def test_object_array_nested(self): - # but is fine with a reference to a different array - a = np.array(0, dtype=object) - b = np.array(0, dtype=object) - a[()] = b - assert_equal(int(a), int(0)) - assert_equal(long(a), long(0)) - assert_equal(float(a), float(0)) - if sys.version_info.major == 2: - # in python 3, this falls back on operator.index, which fails on - # on dtype=object - assert_equal(oct(a), oct(0)) - assert_equal(hex(a), hex(0)) - - def test_object_array_self_copy(self): - # An object array being copied into itself DECREF'ed before INCREF'ing - # causing segmentation faults (gh-3787) - a = np.array(object(), dtype=object) - np.copyto(a, a) - if HAS_REFCOUNT: - assert_(sys.getrefcount(a[()]) == 2) - a[()].__class__ # will segfault if object was deleted - - def test_zerosize_accumulate(self): - "Ticket #1733" - x = np.array([[42, 0]], dtype=np.uint32) - assert_equal(np.add.accumulate(x[:-1, 0]), []) - - def test_objectarray_setfield(self): - # Setfield should not overwrite Object fields with non-Object data - x = np.array([1, 2, 3], dtype=object) - assert_raises(TypeError, x.setfield, 4, np.int32, 0) - - def test_setting_rank0_string(self): - "Ticket #1736" - s1 = b"hello1" - s2 = b"hello2" - a = np.zeros((), dtype="S10") - a[()] = s1 - assert_equal(a, np.array(s1)) - a[()] = np.array(s2) - assert_equal(a, np.array(s2)) - - a = np.zeros((), dtype='f4') - a[()] = 3 - assert_equal(a, np.array(3)) - a[()] = np.array(4) - assert_equal(a, np.array(4)) - - def test_string_astype(self): - "Ticket #1748" - s1 = b'black' - s2 = b'white' - s3 = b'other' - a = np.array([[s1], [s2], [s3]]) - assert_equal(a.dtype, np.dtype('S5')) - b = a.astype(np.dtype('S0')) - assert_equal(b.dtype, np.dtype('S5')) - - def test_ticket_1756(self): - # Ticket #1756 - s = b'0123456789abcdef' - a = np.array([s]*5) - for i in range(1, 17): - a1 = np.array(a, "|S%d" % i) - a2 = np.array([s[:i]]*5) - assert_equal(a1, a2) - - def test_fields_strides(self): - "gh-2355" - r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') - assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) - assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) - assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) - assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) - - def test_alignment_update(self): - # Check that alignment flag is updated on stride setting - a = np.arange(10) - assert_(a.flags.aligned) - a.strides = 3 - assert_(not a.flags.aligned) - - def test_ticket_1770(self): - "Should not segfault on python 3k" - import numpy as np - try: - a = np.zeros((1,), dtype=[('f1', 'f')]) - a['f1'] = 1 - a['f2'] = 1 - except ValueError: - pass - except Exception: - raise AssertionError - - def test_ticket_1608(self): - "x.flat shouldn't modify data" - x = np.array([[1, 2], [3, 4]]).T - np.array(x.flat) - assert_equal(x, [[1, 3], [2, 4]]) - - def test_pickle_string_overwrite(self): - import re - - data = np.array([1], dtype='b') - blob = pickle.dumps(data, protocol=1) - data = pickle.loads(blob) - - # Check that loads does not clobber interned strings - s = re.sub("a(.)", "\x01\\1", "a_") - assert_equal(s[0], "\x01") - data[0] = 0xbb - s = re.sub("a(.)", "\x01\\1", "a_") - assert_equal(s[0], "\x01") - - def test_pickle_bytes_overwrite(self): - if sys.version_info[0] >= 3: - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - data = np.array([1], dtype='b') - data = pickle.loads(pickle.dumps(data, protocol=proto)) - data[0] = 0xdd - bytestring = "\x01 ".encode('ascii') - assert_equal(bytestring[0:1], '\x01'.encode('ascii')) - - def test_pickle_py2_array_latin1_hack(self): - # Check that unpickling hacks in Py3 that support - # encoding='latin1' work correctly. - - # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) - data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" - b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" - b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" - b"p13\ntp14\nb.") - if sys.version_info[0] >= 3: - # This should work: - result = pickle.loads(data, encoding='latin1') - assert_array_equal(result, np.array([129], dtype='b')) - # Should not segfault: - assert_raises(Exception, pickle.loads, data, encoding='koi8-r') - - def test_pickle_py2_scalar_latin1_hack(self): - # Check that scalar unpickling hack in Py3 that supports - # encoding='latin1' work correctly. - - # Python2 output for pickle.dumps(...) - datas = [ - # (original, python2_pickle, koi8r_validity) - (np.unicode_('\u6bd2'), - (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" - b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" - b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), - 'invalid'), - - (np.float64(9e123), - (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" - b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" - b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), - 'invalid'), - - (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1 - (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" - b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" - b"tp8\nRp9\n."), - 'different'), - ] - if sys.version_info[0] >= 3: - for original, data, koi8r_validity in datas: - result = pickle.loads(data, encoding='latin1') - assert_equal(result, original) - - # Decoding under non-latin1 encoding (e.g.) KOI8-R can - # produce bad results, but should not segfault. - if koi8r_validity == 'different': - # Unicode code points happen to lie within latin1, - # but are different in koi8-r, resulting to silent - # bogus results - result = pickle.loads(data, encoding='koi8-r') - assert_(result != original) - elif koi8r_validity == 'invalid': - # Unicode code points outside latin1, so results - # to an encoding exception - assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') - else: - raise ValueError(koi8r_validity) - - def test_structured_type_to_object(self): - a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') - a_obj = np.empty((2,), dtype=object) - a_obj[0] = (0, 1) - a_obj[1] = (3, 2) - # astype records -> object - assert_equal(a_rec.astype(object), a_obj) - # '=' records -> object - b = np.empty_like(a_obj) - b[...] = a_rec - assert_equal(b, a_obj) - # '=' object -> records - b = np.empty_like(a_rec) - b[...] = a_obj - assert_equal(b, a_rec) - - def test_assign_obj_listoflists(self): - # Ticket # 1870 - # The inner list should get assigned to the object elements - a = np.zeros(4, dtype=object) - b = a.copy() - a[0] = [1] - a[1] = [2] - a[2] = [3] - a[3] = [4] - b[...] = [[1], [2], [3], [4]] - assert_equal(a, b) - # The first dimension should get broadcast - a = np.zeros((2, 2), dtype=object) - a[...] = [[1, 2]] - assert_equal(a, [[1, 2], [1, 2]]) - - def test_memoryleak(self): - # Ticket #1917 - ensure that array data doesn't leak - for i in range(1000): - # 100MB times 1000 would give 100GB of memory usage if it leaks - a = np.empty((100000000,), dtype='i1') - del a - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_ufunc_reduce_memoryleak(self): - a = np.arange(6) - acnt = sys.getrefcount(a) - np.add.reduce(a) - assert_equal(sys.getrefcount(a), acnt) - - def test_search_sorted_invalid_arguments(self): - # Ticket #2021, should not segfault. - x = np.arange(0, 4, dtype='datetime64[D]') - assert_raises(TypeError, x.searchsorted, 1) - - def test_string_truncation(self): - # Ticket #1990 - Data can be truncated in creation of an array from a - # mixed sequence of numeric values and strings - for val in [True, 1234, 123.4, complex(1, 234)]: - for tostr in [asunicode, asbytes]: - b = np.array([val, tostr('xx')]) - assert_equal(tostr(b[0]), tostr(val)) - b = np.array([tostr('xx'), val]) - assert_equal(tostr(b[1]), tostr(val)) - - # test also with longer strings - b = np.array([val, tostr('xxxxxxxxxx')]) - assert_equal(tostr(b[0]), tostr(val)) - b = np.array([tostr('xxxxxxxxxx'), val]) - assert_equal(tostr(b[1]), tostr(val)) - - def test_string_truncation_ucs2(self): - # Ticket #2081. Python compiled with two byte unicode - # can lead to truncation if itemsize is not properly - # adjusted for NumPy's four byte unicode. - if sys.version_info[0] >= 3: - a = np.array(['abcd']) - else: - a = np.array([u'abcd']) - assert_equal(a.dtype.itemsize, 16) - - def test_unique_stable(self): - # Ticket #2063 must always choose stable sort for argsort to - # get consistent results - v = np.array(([0]*5 + [1]*6 + [2]*6)*4) - res = np.unique(v, return_index=True) - tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) - assert_equal(res, tgt) - - def test_unicode_alloc_dealloc_match(self): - # Ticket #1578, the mismatch only showed up when running - # python-debug for python versions >= 2.7, and then as - # a core dump and error message. - a = np.array(['abc'], dtype=np.unicode_)[0] - del a - - def test_refcount_error_in_clip(self): - # Ticket #1588 - a = np.zeros((2,), dtype='>i2').clip(min=0) - x = a + a - # This used to segfault: - y = str(x) - # Check the final string: - assert_(y == "[0 0]") - - def test_searchsorted_wrong_dtype(self): - # Ticket #2189, it used to segfault, so we check that it raises the - # proper exception. - a = np.array([('a', 1)], dtype='S1, int') - assert_raises(TypeError, np.searchsorted, a, 1.2) - # Ticket #2066, similar problem: - dtype = np.format_parser(['i4', 'i4'], [], []) - a = np.recarray((2, ), dtype) - assert_raises(TypeError, np.searchsorted, a, 1) - - def test_complex64_alignment(self): - # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment - dtt = np.complex64 - arr = np.arange(10, dtype=dtt) - # 2D array - arr2 = np.reshape(arr, (2, 5)) - # Fortran write followed by (C or F) read caused bus error - data_str = arr2.tobytes('F') - data_back = np.ndarray(arr2.shape, - arr2.dtype, - buffer=data_str, - order='F') - assert_array_equal(arr2, data_back) - - def test_structured_count_nonzero(self): - arr = np.array([0, 1]).astype('i4, (2)i4')[:1] - count = np.count_nonzero(arr) - assert_equal(count, 0) - - def test_copymodule_preserves_f_contiguity(self): - a = np.empty((2, 2), order='F') - b = copy.copy(a) - c = copy.deepcopy(a) - assert_(b.flags.fortran) - assert_(b.flags.f_contiguous) - assert_(c.flags.fortran) - assert_(c.flags.f_contiguous) - - def test_fortran_order_buffer(self): - import numpy as np - a = np.array([['Hello', 'Foob']], dtype='U5', order='F') - arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) - arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'], - [u'F', u'o', u'o', u'b', u'']]]) - assert_array_equal(arr, arr2) - - def test_assign_from_sequence_error(self): - # Ticket #4024. - arr = np.array([1, 2, 3]) - assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) - arr.__setitem__(slice(None), [9]) - assert_equal(arr, [9, 9, 9]) - - def test_format_on_flex_array_element(self): - # Ticket #4369. - dt = np.dtype([('date', '= 3: - assert_raises(TypeError, f, lhs, rhs) - elif not sys.py3kwarning: - # With -3 switch in python 2, DeprecationWarning is raised - # which we are not interested in - f(lhs, rhs) - assert_(not op.eq(lhs, rhs)) - assert_(op.ne(lhs, rhs)) - - def test_richcompare_scalar_and_subclass(self): - # gh-4709 - class Foo(np.ndarray): - def __eq__(self, other): - return "OK" - - x = np.array([1, 2, 3]).view(Foo) - assert_equal(10 == x, "OK") - assert_equal(np.int32(10) == x, "OK") - assert_equal(np.array([10]) == x, "OK") - - def test_pickle_empty_string(self): - # gh-3926 - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - test_string = np.string_('') - assert_equal(pickle.loads( - pickle.dumps(test_string, protocol=proto)), test_string) - - def test_frompyfunc_many_args(self): - # gh-5672 - - def passer(*args): - pass - - assert_raises(ValueError, np.frompyfunc, passer, 32, 1) - - def test_repeat_broadcasting(self): - # gh-5743 - a = np.arange(60).reshape(3, 4, 5) - for axis in chain(range(-a.ndim, a.ndim), [None]): - assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis)) - - def test_frompyfunc_nout_0(self): - # gh-2014 - - def f(x): - x[0], x[-1] = x[-1], x[0] - - uf = np.frompyfunc(f, 1, 0) - a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]]) - assert_equal(uf(a), ()) - assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]]) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_leak_in_structured_dtype_comparison(self): - # gh-6250 - recordtype = np.dtype([('a', np.float64), - ('b', np.int32), - ('d', (str, 5))]) - - # Simple case - a = np.zeros(2, dtype=recordtype) - for i in range(100): - a == a - assert_(sys.getrefcount(a) < 10) - - # The case in the bug report. - before = sys.getrefcount(a) - u, v = a[0], a[1] - u == v - del u, v - gc.collect() - after = sys.getrefcount(a) - assert_equal(before, after) - - def test_empty_percentile(self): - # gh-6530 / gh-6553 - assert_array_equal(np.percentile(np.arange(10), []), np.array([])) - - def test_void_compare_segfault(self): - # gh-6922. The following should not segfault - a = np.ones(3, dtype=[('object', 'O'), ('int', ' 0: - # unpickling ndarray goes through _frombuffer for protocol 5 - assert b'numpy.core.numeric' in s - else: - assert b'numpy.core.multiarray' in s - - def test_object_casting_errors(self): - # gh-11993 - arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) - assert_raises(TypeError, arr.astype, 'c8') - - def test_eff1d_casting(self): - # gh-12711 - x = np.array([1, 2, 4, 7, 0], dtype=np.int16) - res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) - assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20)) - assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20)) - - def test_pickle_datetime64_array(self): - # gh-12745 (would fail with pickle5 installed) - d = np.datetime64('2015-07-04 12:59:59.50', 'ns') - arr = np.array([d]) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - dumped = pickle.dumps(arr, protocol=proto) - assert_equal(pickle.loads(dumped), arr) - - def test_bad_array_interface(self): - class T(object): - __array_interface__ = {} - - np.array([T()]) - - @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') - @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8), - reason='overflows on windows, fixed in bpo-16865') - def test_to_ctypes(self): - #gh-14214 - arr = np.zeros((2 ** 31 + 1,), 'b') - assert arr.size * arr.itemsize > 2 ** 31 - c_arr = np.ctypeslib.as_ctypes(arr) - assert_equal(c_arr._length_, arr.size) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_ctors.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_ctors.py deleted file mode 100644 index b21bc9d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_ctors.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Test the scalar constructors, which also do type-coercion -""" -from __future__ import division, absolute_import, print_function - -import sys -import platform -import pytest - -import numpy as np -from numpy.testing import ( - assert_equal, assert_almost_equal, assert_raises, assert_warns, - ) - -class TestFromString(object): - def test_floating(self): - # Ticket #640, floats from string - fsingle = np.single('1.234') - fdouble = np.double('1.234') - flongdouble = np.longdouble('1.234') - assert_almost_equal(fsingle, 1.234) - assert_almost_equal(fdouble, 1.234) - assert_almost_equal(flongdouble, 1.234) - - def test_floating_overflow(self): - """ Strings containing an unrepresentable float overflow """ - fhalf = np.half('1e10000') - assert_equal(fhalf, np.inf) - fsingle = np.single('1e10000') - assert_equal(fsingle, np.inf) - fdouble = np.double('1e10000') - assert_equal(fdouble, np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') - assert_equal(flongdouble, np.inf) - - fhalf = np.half('-1e10000') - assert_equal(fhalf, -np.inf) - fsingle = np.single('-1e10000') - assert_equal(fsingle, -np.inf) - fdouble = np.double('-1e10000') - assert_equal(fdouble, -np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') - assert_equal(flongdouble, -np.inf) - - @pytest.mark.skipif((sys.version_info[0] >= 3) - or (sys.platform == "win32" - and platform.architecture()[0] == "64bit"), - reason="numpy.intp('0xff', 16) not supported on Py3 " - "or 64 bit Windows") - def test_intp(self): - # Ticket #99 - i_width = np.int_(0).nbytes*2 - 1 - np.intp('0x' + 'f'*i_width, 16) - assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) - assert_raises(ValueError, np.intp, '0x1', 32) - assert_equal(255, np.intp('0xFF', 16)) - - -class TestFromInt(object): - def test_intp(self): - # Ticket #99 - assert_equal(1024, np.intp(1024)) - - def test_uint64_from_negative(self): - assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_methods.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_methods.py deleted file mode 100644 index 93434dd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Test the scalar constructors, which also do type-coercion -""" -from __future__ import division, absolute_import, print_function - -import os -import fractions -import platform - -import pytest -import numpy as np - -from numpy.testing import ( - run_module_suite, - assert_equal, assert_almost_equal, assert_raises, assert_warns, - dec -) - -class TestAsIntegerRatio(object): - # derived in part from the cpython test "test_floatasratio" - - @pytest.mark.parametrize("ftype", [ - np.half, np.single, np.double, np.longdouble]) - @pytest.mark.parametrize("f, ratio", [ - (0.875, (7, 8)), - (-0.875, (-7, 8)), - (0.0, (0, 1)), - (11.5, (23, 2)), - ]) - def test_small(self, ftype, f, ratio): - assert_equal(ftype(f).as_integer_ratio(), ratio) - - @pytest.mark.parametrize("ftype", [ - np.half, np.single, np.double, np.longdouble]) - def test_simple_fractions(self, ftype): - R = fractions.Fraction - assert_equal(R(0, 1), - R(*ftype(0.0).as_integer_ratio())) - assert_equal(R(5, 2), - R(*ftype(2.5).as_integer_ratio())) - assert_equal(R(1, 2), - R(*ftype(0.5).as_integer_ratio())) - assert_equal(R(-2100, 1), - R(*ftype(-2100.0).as_integer_ratio())) - - @pytest.mark.parametrize("ftype", [ - np.half, np.single, np.double, np.longdouble]) - def test_errors(self, ftype): - assert_raises(OverflowError, ftype('inf').as_integer_ratio) - assert_raises(OverflowError, ftype('-inf').as_integer_ratio) - assert_raises(ValueError, ftype('nan').as_integer_ratio) - - def test_against_known_values(self): - R = fractions.Fraction - assert_equal(R(1075, 512), - R(*np.half(2.1).as_integer_ratio())) - assert_equal(R(-1075, 512), - R(*np.half(-2.1).as_integer_ratio())) - assert_equal(R(4404019, 2097152), - R(*np.single(2.1).as_integer_ratio())) - assert_equal(R(-4404019, 2097152), - R(*np.single(-2.1).as_integer_ratio())) - assert_equal(R(4728779608739021, 2251799813685248), - R(*np.double(2.1).as_integer_ratio())) - assert_equal(R(-4728779608739021, 2251799813685248), - R(*np.double(-2.1).as_integer_ratio())) - # longdouble is platform dependent - - @pytest.mark.parametrize("ftype, frac_vals, exp_vals", [ - # dtype test cases generated using hypothesis - # first five generated cases per dtype - (np.half, [0.0, 0.01154830649280303, 0.31082276347447274, - 0.527350517124794, 0.8308562335072596], - [0, 1, 0, -8, 12]), - (np.single, [0.0, 0.09248576989263226, 0.8160498218131407, - 0.17389442853722373, 0.7956044195067877], - [0, 12, 10, 17, -26]), - (np.double, [0.0, 0.031066908499895136, 0.5214135908877832, - 0.45780736035689296, 0.5906586745934036], - [0, -801, 51, 194, -653]), - pytest.param( - np.longdouble, - [0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495, - 0.9620175814461964], - [0, -7400, 14266, -7822, -8721], - marks=[ - pytest.mark.skipif( - np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double"), - pytest.mark.skipif( - platform.machine().startswith("ppc"), - reason="IBM double double"), - ] - ) - ]) - def test_roundtrip(self, ftype, frac_vals, exp_vals): - for frac, exp in zip(frac_vals, exp_vals): - f = np.ldexp(frac, exp, dtype=ftype) - n, d = f.as_integer_ratio() - - try: - # workaround for gh-9968 - nf = np.longdouble(str(n)) - df = np.longdouble(str(d)) - except (OverflowError, RuntimeWarning): - # the values may not fit in any float type - pytest.skip("longdouble too small on this platform") - - assert_equal(nf / df, f, "{}/{}".format(n, d)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarbuffer.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarbuffer.py deleted file mode 100644 index 3ded7ee..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarbuffer.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Test scalar buffer interface adheres to PEP 3118 -""" -import sys -import numpy as np -import pytest - -from numpy.testing import assert_, assert_equal, assert_raises - -# PEP3118 format strings for native (standard alignment and byteorder) types -scalars_and_codes = [ - (np.bool_, '?'), - (np.byte, 'b'), - (np.short, 'h'), - (np.intc, 'i'), - (np.int_, 'l'), - (np.longlong, 'q'), - (np.ubyte, 'B'), - (np.ushort, 'H'), - (np.uintc, 'I'), - (np.uint, 'L'), - (np.ulonglong, 'Q'), - (np.half, 'e'), - (np.single, 'f'), - (np.double, 'd'), - (np.longdouble, 'g'), - (np.csingle, 'Zf'), - (np.cdouble, 'Zd'), - (np.clongdouble, 'Zg'), -] -scalars_only, codes_only = zip(*scalars_and_codes) - - -@pytest.mark.skipif(sys.version_info.major < 3, - reason="Python 2 scalars lack a buffer interface") -class TestScalarPEP3118(object): - - @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) - def test_scalar_match_array(self, scalar): - x = scalar() - a = np.array([], dtype=np.dtype(scalar)) - mv_x = memoryview(x) - mv_a = memoryview(a) - assert_equal(mv_x.format, mv_a.format) - - @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) - def test_scalar_dim(self, scalar): - x = scalar() - mv_x = memoryview(x) - assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) - assert_equal(mv_x.ndim, 0) - assert_equal(mv_x.shape, ()) - assert_equal(mv_x.strides, ()) - assert_equal(mv_x.suboffsets, ()) - - @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) - def test_scalar_known_code(self, scalar, code): - x = scalar() - mv_x = memoryview(x) - assert_equal(mv_x.format, code) - - def test_void_scalar_structured_data(self): - dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))]) - x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] - assert_(isinstance(x, np.void)) - mv_x = memoryview(x) - expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize - expected_size += 2 * np.dtype(np.float64).itemsize - assert_equal(mv_x.itemsize, expected_size) - assert_equal(mv_x.ndim, 0) - assert_equal(mv_x.shape, ()) - assert_equal(mv_x.strides, ()) - assert_equal(mv_x.suboffsets, ()) - - # check scalar format string against ndarray format string - a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - assert_(isinstance(a, np.ndarray)) - mv_a = memoryview(a) - assert_equal(mv_x.itemsize, mv_a.itemsize) - assert_equal(mv_x.format, mv_a.format) - - def test_datetime_memoryview(self): - # gh-11656 - # Values verified with v1.13.3, shape is not () as in test_scalar_dim - def as_dict(m): - return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, - ndim=m.ndim, format=m.format) - - dt1 = np.datetime64('2016-01-01') - dt2 = np.datetime64('2017-01-01') - expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, - 'shape': (8,), 'format': 'B'} - v = memoryview(dt1) - res = as_dict(v) - assert_equal(res, expected) - - v = memoryview(dt2 - dt1) - res = as_dict(v) - assert_equal(res, expected) - - dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) - a = np.empty(1, dt) - # Fails to create a PEP 3118 valid buffer - assert_raises((ValueError, BufferError), memoryview, a[0]) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarinherit.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarinherit.py deleted file mode 100644 index 6a5c4fd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarinherit.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -""" Test printing of scalar types. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_ - - -class A(object): - pass -class B(A, np.float64): - pass - -class C(B): - pass -class D(C, B): - pass - -class B0(np.float64, A): - pass -class C0(B0): - pass - -class TestInherit(object): - def test_init(self): - x = B(1.0) - assert_(str(x) == '1.0') - y = C(2.0) - assert_(str(y) == '2.0') - z = D(3.0) - assert_(str(z) == '3.0') - - def test_init2(self): - x = B0(1.0) - assert_(str(x) == '1.0') - y = C0(2.0) - assert_(str(y) == '2.0') - - -class TestCharacter(object): - def test_char_radd(self): - # GH issue 9620, reached gentype_add and raise TypeError - np_s = np.string_('abc') - np_u = np.unicode_('abc') - s = b'def' - u = u'def' - assert_(np_s.__radd__(np_s) is NotImplemented) - assert_(np_s.__radd__(np_u) is NotImplemented) - assert_(np_s.__radd__(s) is NotImplemented) - assert_(np_s.__radd__(u) is NotImplemented) - assert_(np_u.__radd__(np_s) is NotImplemented) - assert_(np_u.__radd__(np_u) is NotImplemented) - assert_(np_u.__radd__(s) is NotImplemented) - assert_(np_u.__radd__(u) is NotImplemented) - assert_(s + np_s == b'defabc') - assert_(u + np_u == u'defabc') - - - class Mystr(str, np.generic): - # would segfault - pass - - ret = s + Mystr('abc') - assert_(type(ret) is type(s)) - - def test_char_repeat(self): - np_s = np.string_('abc') - np_u = np.unicode_('abc') - res_s = b'abc' * 5 - res_u = u'abc' * 5 - assert_(np_s * 5 == res_s) - assert_(np_u * 5 == res_u) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarmath.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarmath.py deleted file mode 100644 index c84380c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarmath.py +++ /dev/null @@ -1,704 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -import itertools -import operator -import platform -import pytest - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_almost_equal, - assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, - assert_warns, assert_raises_regex, - ) - -types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, - np.int_, np.uint, np.longlong, np.ulonglong, - np.single, np.double, np.longdouble, np.csingle, - np.cdouble, np.clongdouble] - -floating_types = np.floating.__subclasses__() -complex_floating_types = np.complexfloating.__subclasses__() - - -# This compares scalarmath against ufuncs. - -class TestTypes(object): - def test_types(self): - for atype in types: - a = atype(1) - assert_(a == 1, "error with %r: got %r" % (atype, a)) - - def test_type_add(self): - # list of types - for k, atype in enumerate(types): - a_scalar = atype(3) - a_array = np.array([3], dtype=atype) - for l, btype in enumerate(types): - b_scalar = btype(1) - b_array = np.array([1], dtype=btype) - c_scalar = a_scalar + b_scalar - c_array = a_array + b_array - # It was comparing the type numbers, but the new ufunc - # function-finding mechanism finds the lowest function - # to which both inputs can be cast - which produces 'l' - # when you do 'q' + 'b'. The old function finding mechanism - # skipped ahead based on the first argument, but that - # does not produce properly symmetric results... - assert_equal(c_scalar.dtype, c_array.dtype, - "error with types (%d/'%c' + %d/'%c')" % - (k, np.dtype(atype).char, l, np.dtype(btype).char)) - - def test_type_create(self): - for k, atype in enumerate(types): - a = np.array([1, 2, 3], atype) - b = atype([1, 2, 3]) - assert_equal(a, b) - - def test_leak(self): - # test leak of scalar objects - # a leak would show up in valgrind as still-reachable of ~2.6MB - for i in range(200000): - np.add(1, 1) - - -class TestBaseMath(object): - def test_blocked(self): - # test alignments offsets for simd instructions - # alignments for vz + 2 * (vs - 1) + 1 - for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: - for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, - type='binary', - max_size=sz): - exp1 = np.ones_like(inp1) - inp1[...] = np.ones_like(inp1) - inp2[...] = np.zeros_like(inp2) - assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) - assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) - assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) - - np.add(inp1, inp2, out=out) - assert_almost_equal(out, exp1, err_msg=msg) - - inp2[...] += np.arange(inp2.size, dtype=dt) + 1 - assert_almost_equal(np.square(inp2), - np.multiply(inp2, inp2), err_msg=msg) - # skip true divide for ints - if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning): - assert_almost_equal(np.reciprocal(inp2), - np.divide(1, inp2), err_msg=msg) - - inp1[...] = np.ones_like(inp1) - np.add(inp1, 2, out=out) - assert_almost_equal(out, exp1 + 2, err_msg=msg) - inp2[...] = np.ones_like(inp2) - np.add(2, inp2, out=out) - assert_almost_equal(out, exp1 + 2, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_almost_equal(d + d, d * 2) - np.add(d, d, out=o) - np.add(np.ones_like(d), d, out=o) - np.add(d, np.ones_like(d), out=o) - np.add(np.ones_like(d), d) - np.add(d, np.ones_like(d)) - - -class TestPower(object): - def test_small_types(self): - for t in [np.int8, np.int16, np.float16]: - a = t(3) - b = a ** 4 - assert_(b == 81, "error with %r: got %r" % (t, b)) - - def test_large_types(self): - for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: - a = t(51) - b = a ** 4 - msg = "error with %r: got %r" % (t, b) - if np.issubdtype(t, np.integer): - assert_(b == 6765201, msg) - else: - assert_almost_equal(b, 6765201, err_msg=msg) - - def test_integers_to_negative_integer_power(self): - # Note that the combination of uint64 with a signed integer - # has common type np.float64. The other combinations should all - # raise a ValueError for integer ** negative integer. - exp = [np.array(-1, dt)[()] for dt in 'bhilq'] - - # 1 ** -1 possible special case - base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] - for i1, i2 in itertools.product(base, exp): - if i1.dtype != np.uint64: - assert_raises(ValueError, operator.pow, i1, i2) - else: - res = operator.pow(i1, i2) - assert_(res.dtype.type is np.float64) - assert_almost_equal(res, 1.) - - # -1 ** -1 possible special case - base = [np.array(-1, dt)[()] for dt in 'bhilq'] - for i1, i2 in itertools.product(base, exp): - if i1.dtype != np.uint64: - assert_raises(ValueError, operator.pow, i1, i2) - else: - res = operator.pow(i1, i2) - assert_(res.dtype.type is np.float64) - assert_almost_equal(res, -1.) - - # 2 ** -1 perhaps generic - base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] - for i1, i2 in itertools.product(base, exp): - if i1.dtype != np.uint64: - assert_raises(ValueError, operator.pow, i1, i2) - else: - res = operator.pow(i1, i2) - assert_(res.dtype.type is np.float64) - assert_almost_equal(res, .5) - - def test_mixed_types(self): - typelist = [np.int8, np.int16, np.float16, - np.float32, np.float64, np.int8, - np.int16, np.int32, np.int64] - for t1 in typelist: - for t2 in typelist: - a = t1(3) - b = t2(2) - result = a**b - msg = ("error with %r and %r:" - "got %r, expected %r") % (t1, t2, result, 9) - if np.issubdtype(np.dtype(result), np.integer): - assert_(result == 9, msg) - else: - assert_almost_equal(result, 9, err_msg=msg) - - def test_modular_power(self): - # modular power is not implemented, so ensure it errors - a = 5 - b = 4 - c = 10 - expected = pow(a, b, c) # noqa: F841 - for t in (np.int32, np.float32, np.complex64): - # note that 3-operand power only dispatches on the first argument - assert_raises(TypeError, operator.pow, t(a), b, c) - assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) - - -def floordiv_and_mod(x, y): - return (x // y, x % y) - - -def _signs(dt): - if dt in np.typecodes['UnsignedInteger']: - return (+1,) - else: - return (+1, -1) - - -class TestModulus(object): - - def test_modulus_basic(self): - dt = np.typecodes['AllInteger'] + np.typecodes['Float'] - for op in [floordiv_and_mod, divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1)[()] - b = np.array(sg2*19, dtype=dt2)[()] - div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_modulus_exact(self): - # test that float results are exact for small integers. This also - # holds for the same integers scaled by powers of two. - nlst = list(range(-127, 0)) - plst = list(range(1, 128)) - dividend = nlst + [0] + plst - divisor = nlst + plst - arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) - - a, b = np.array(arg, dtype=int).T - # convert exact integer results from Python to float so that - # signed zero can be used, it is checked. - tgtdiv, tgtrem = np.array(tgt, dtype=float).T - tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) - tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) - - for op in [floordiv_and_mod, divmod]: - for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) - fa = a.astype(dt) - fb = b.astype(dt) - # use list comprehension so a_ and b_ are scalars - div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) - assert_equal(div, tgtdiv, err_msg=msg) - assert_equal(rem, tgtrem, err_msg=msg) - - def test_float_modulus_roundoff(self): - # gh-6127 - dt = np.typecodes['Float'] - for op in [floordiv_and_mod, divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1)[()] - b = np.array(sg2*6e-8, dtype=dt2)[()] - div, rem = op(a, b) - # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_modulus_corner_cases(self): - # Check remainder magnitude. - for dt in np.typecodes['Float']: - b = np.array(1.0, dtype=dt) - a = np.nextafter(np.array(0.0, dtype=dt), -b) - rem = operator.mod(a, b) - assert_(rem <= b, 'dt: %s' % dt) - rem = operator.mod(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) - - # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - for dt in np.typecodes['Float']: - fone = np.array(1.0, dtype=dt) - fzer = np.array(0.0, dtype=dt) - finf = np.array(np.inf, dtype=dt) - fnan = np.array(np.nan, dtype=dt) - rem = operator.mod(fone, fzer) - assert_(np.isnan(rem), 'dt: %s' % dt) - # MSVC 2008 returns NaN here, so disable the check. - #rem = operator.mod(fone, finf) - #assert_(rem == fone, 'dt: %s' % dt) - rem = operator.mod(fone, fnan) - assert_(np.isnan(rem), 'dt: %s' % dt) - rem = operator.mod(finf, fone) - assert_(np.isnan(rem), 'dt: %s' % dt) - - def test_inplace_floordiv_handling(self): - # issue gh-12927 - # this only applies to in-place floordiv //=, because the output type - # promotes to float which does not fit - a = np.array([1, 2], np.int64) - b = np.array([1, 2], np.uint64) - pattern = 'could not be coerced to provided output parameter' - with assert_raises_regex(TypeError, pattern): - a //= b - - -class TestComplexDivision(object): - def test_zero_division(self): - with np.errstate(all="ignore"): - for t in [np.complex64, np.complex128]: - a = t(0.0) - b = t(1.0) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.nan)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.nan)) - assert_(np.isnan(b/a)) - b = t(0.) - assert_(np.isnan(b/a)) - - def test_signed_zeros(self): - with np.errstate(all="ignore"): - for t in [np.complex64, np.complex128]: - # tupled (numerator, denominator, expected) - # for testing as expected == numerator/denominator - data = ( - (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), - (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), - (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) - ) - for cases in data: - n = cases[0] - d = cases[1] - ex = cases[2] - result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) - # check real and imag parts separately to avoid comparison - # in array context, which does not account for signed zeros - assert_equal(result.real, ex[0]) - assert_equal(result.imag, ex[1]) - - def test_branches(self): - with np.errstate(all="ignore"): - for t in [np.complex64, np.complex128]: - # tupled (numerator, denominator, expected) - # for testing as expected == numerator/denominator - data = list() - - # trigger branch: real(fabs(denom)) > imag(fabs(denom)) - # followed by else condition as neither are == 0 - data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) - - # trigger branch: real(fabs(denom)) > imag(fabs(denom)) - # followed by if condition as both are == 0 - # is performed in test_zero_division(), so this is skipped - - # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) - data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) - - for cases in data: - n = cases[0] - d = cases[1] - ex = cases[2] - result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) - # check real and imag parts separately to avoid comparison - # in array context, which does not account for signed zeros - assert_equal(result.real, ex[0]) - assert_equal(result.imag, ex[1]) - - -class TestConversion(object): - def test_int_from_long(self): - l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] - li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] - for T in [None, np.float64, np.int64]: - a = np.array(l, dtype=T) - assert_equal([int(_m) for _m in a], li) - - a = np.array(l[:3], dtype=np.uint64) - assert_equal([int(_m) for _m in a], li[:3]) - - def test_iinfo_long_values(self): - for code in 'bBhH': - res = np.array(np.iinfo(code).max + 1, dtype=code) - tgt = np.iinfo(code).min - assert_(res == tgt) - - for code in np.typecodes['AllInteger']: - res = np.array(np.iinfo(code).max, dtype=code) - tgt = np.iinfo(code).max - assert_(res == tgt) - - for code in np.typecodes['AllInteger']: - res = np.typeDict[code](np.iinfo(code).max) - tgt = np.iinfo(code).max - assert_(res == tgt) - - def test_int_raise_behaviour(self): - def overflow_error_func(dtype): - np.typeDict[dtype](np.iinfo(dtype).max + 1) - - for code in 'lLqQ': - assert_raises(OverflowError, overflow_error_func, code) - - def test_int_from_infinite_longdouble(self): - # gh-627 - x = np.longdouble(np.inf) - assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(np.ComplexWarning) - x = np.clongdouble(np.inf) - assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) - - @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") - def test_int_from_infinite_longdouble___int__(self): - x = np.longdouble(np.inf) - assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(np.ComplexWarning) - x = np.clongdouble(np.inf) - assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) - - @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double") - @pytest.mark.skipif(platform.machine().startswith("ppc"), - reason="IBM double double") - def test_int_from_huge_longdouble(self): - # Produce a longdouble that would overflow a double, - # use exponent that avoids bug in Darwin pow function. - exp = np.finfo(np.double).maxexp - 1 - huge_ld = 2 * 1234 * np.longdouble(2) ** exp - huge_i = 2 * 1234 * 2 ** exp - assert_(huge_ld != np.inf) - assert_equal(int(huge_ld), huge_i) - - def test_int_from_longdouble(self): - x = np.longdouble(1.5) - assert_equal(int(x), 1) - x = np.longdouble(-10.5) - assert_equal(int(x), -10) - - def test_numpy_scalar_relational_operators(self): - # All integer - for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - - for dt2 in np.typecodes['AllInteger']: - assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - #Unsigned integers - for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - - #unsigned vs signed - for dt2 in 'bhilqp': - assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - #Signed integers and floats - for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - - for dt2 in 'bhlqp' + np.typecodes['Float']: - assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - def test_scalar_comparison_to_none(self): - # Scalars should just return False and not give a warnings. - # The comparisons are flagged by pep8, ignore that. - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', FutureWarning) - assert_(not np.float32(1) == None) - assert_(not np.str_('test') == None) - # This is dubious (see below): - assert_(not np.datetime64('NaT') == None) - - assert_(np.float32(1) != None) - assert_(np.str_('test') != None) - # This is dubious (see below): - assert_(np.datetime64('NaT') != None) - assert_(len(w) == 0) - - # For documentation purposes, this is why the datetime is dubious. - # At the time of deprecation this was no behaviour change, but - # it has to be considered when the deprecations are done. - assert_(np.equal(np.datetime64('NaT'), None)) - - -#class TestRepr(object): -# def test_repr(self): -# for t in types: -# val = t(1197346475.0137341) -# val_repr = repr(val) -# val2 = eval(val_repr) -# assert_equal( val, val2 ) - - -class TestRepr(object): - def _test_type_repr(self, t): - finfo = np.finfo(t) - last_fraction_bit_idx = finfo.nexp + finfo.nmant - last_exponent_bit_idx = finfo.nexp - storage_bytes = np.dtype(t).itemsize*8 - # could add some more types to the list below - for which in ['small denorm', 'small norm']: - # Values from https://en.wikipedia.org/wiki/IEEE_754 - constr = np.array([0x00]*storage_bytes, dtype=np.uint8) - if which == 'small denorm': - byte = last_fraction_bit_idx // 8 - bytebit = 7-(last_fraction_bit_idx % 8) - constr[byte] = 1 << bytebit - elif which == 'small norm': - byte = last_exponent_bit_idx // 8 - bytebit = 7-(last_exponent_bit_idx % 8) - constr[byte] = 1 << bytebit - else: - raise ValueError('hmm') - val = constr.view(t)[0] - val_repr = repr(val) - val2 = t(eval(val_repr)) - if not (val2 == 0 and val < 1e-100): - assert_equal(val, val2) - - def test_float_repr(self): - # long double test cannot work, because eval goes through a python - # float - for t in [np.float32, np.float64]: - self._test_type_repr(t) - - -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf(object): - - def test_equal_nbytes(self): - for type in types: - x = type(0) - assert_(sys.getsizeof(x) > x.nbytes) - - def test_error(self): - d = np.float32() - assert_raises(TypeError, d.__sizeof__, "a") - - -class TestMultiply(object): - def test_seq_repeat(self): - # Test that basic sequences get repeated when multiplied with - # numpy integers. And errors are raised when multiplied with others. - # Some of this behaviour may be controversial and could be open for - # change. - accepted_types = set(np.typecodes["AllInteger"]) - deprecated_types = {'?'} - forbidden_types = ( - set(np.typecodes["All"]) - accepted_types - deprecated_types) - forbidden_types -= {'V'} # can't default-construct void scalars - - for seq_type in (list, tuple): - seq = seq_type([1, 2, 3]) - for numpy_type in accepted_types: - i = np.dtype(numpy_type).type(2) - assert_equal(seq * i, seq * int(i)) - assert_equal(i * seq, int(i) * seq) - - for numpy_type in deprecated_types: - i = np.dtype(numpy_type).type() - assert_equal( - assert_warns(DeprecationWarning, operator.mul, seq, i), - seq * int(i)) - assert_equal( - assert_warns(DeprecationWarning, operator.mul, i, seq), - int(i) * seq) - - for numpy_type in forbidden_types: - i = np.dtype(numpy_type).type() - assert_raises(TypeError, operator.mul, seq, i) - assert_raises(TypeError, operator.mul, i, seq) - - def test_no_seq_repeat_basic_array_like(self): - # Test that an array-like which does not know how to be multiplied - # does not attempt sequence repeat (raise TypeError). - # See also gh-7428. - class ArrayLike(object): - def __init__(self, arr): - self.arr = arr - def __array__(self): - return self.arr - - # Test for simple ArrayLike above and memoryviews (original report) - for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): - assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) - assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) - assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) - assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) - - -class TestNegative(object): - def test_exceptions(self): - a = np.ones((), dtype=np.bool_)[()] - assert_raises(TypeError, operator.neg, a) - - def test_result(self): - types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) - for dt in types: - a = np.ones((), dtype=dt)[()] - assert_equal(operator.neg(a) + a, 0) - - -class TestSubtract(object): - def test_exceptions(self): - a = np.ones((), dtype=np.bool_)[()] - assert_raises(TypeError, operator.sub, a, a) - - def test_result(self): - types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) - for dt in types: - a = np.ones((), dtype=dt)[()] - assert_equal(operator.sub(a, a), 0) - - -class TestAbs(object): - def _test_abs_func(self, absfunc): - for tp in floating_types + complex_floating_types: - x = tp(-1.5) - assert_equal(absfunc(x), 1.5) - x = tp(0.0) - res = absfunc(x) - # assert_equal() checks zero signedness - assert_equal(res, 0.0) - x = tp(-0.0) - res = absfunc(x) - assert_equal(res, 0.0) - - x = tp(np.finfo(tp).max) - assert_equal(absfunc(x), x.real) - - x = tp(np.finfo(tp).tiny) - assert_equal(absfunc(x), x.real) - - x = tp(np.finfo(tp).min) - assert_equal(absfunc(x), -x.real) - - def test_builtin_abs(self): - self._test_abs_func(abs) - - def test_numpy_abs(self): - self._test_abs_func(np.abs) - - -class TestBitShifts(object): - - @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) - @pytest.mark.parametrize('op', - [operator.rshift, operator.lshift], ids=['>>', '<<']) - def test_shift_all_bits(self, type_code, op): - """ Shifts where the shift amount is the width of the type or wider """ - # gh-2449 - dt = np.dtype(type_code) - nbits = dt.itemsize * 8 - for val in [5, -5]: - for shift in [nbits, nbits + 4]: - val_scl = dt.type(val) - shift_scl = dt.type(shift) - res_scl = op(val_scl, shift_scl) - if val_scl < 0 and op is operator.rshift: - # sign bit is preserved - assert_equal(res_scl, -1) - else: - assert_equal(res_scl, 0) - - # Result on scalars should be the same as on arrays - val_arr = np.array([val]*32, dtype=dt) - shift_arr = np.array([shift]*32, dtype=dt) - res_arr = op(val_arr, shift_arr) - assert_equal(res_arr, res_scl) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarprint.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarprint.py deleted file mode 100644 index 86b0ca1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarprint.py +++ /dev/null @@ -1,326 +0,0 @@ -# -*- coding: utf-8 -*- -""" Test printing of scalar types. - -""" -from __future__ import division, absolute_import, print_function - -import code, sys -import platform -import pytest - -from tempfile import TemporaryFile -import numpy as np -from numpy.testing import assert_, assert_equal, suppress_warnings - -class TestRealScalars(object): - def test_str(self): - svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] - styps = [np.float16, np.float32, np.float64, np.longdouble] - wanted = [ - ['0.0', '0.0', '0.0', '0.0' ], - ['-0.0', '-0.0', '-0.0', '-0.0'], - ['1.0', '1.0', '1.0', '1.0' ], - ['-1.0', '-1.0', '-1.0', '-1.0'], - ['inf', 'inf', 'inf', 'inf' ], - ['-inf', '-inf', '-inf', '-inf'], - ['nan', 'nan', 'nan', 'nan']] - - for wants, val in zip(wanted, svals): - for want, styp in zip(wants, styps): - msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) - assert_equal(str(styp(val)), want, err_msg=msg) - - def test_scalar_cutoffs(self): - # test that both the str and repr of np.float64 behaves - # like python floats in python3. Note that in python2 - # the str has truncated digits, but we do not do this - def check(v): - # we compare str to repr, to avoid python2 truncation behavior - assert_equal(str(np.float64(v)), repr(v)) - assert_equal(repr(np.float64(v)), repr(v)) - - # check we use the same number of significant digits - check(1.12345678901234567890) - check(0.0112345678901234567890) - - # check switch from scientific output to positional and back - check(1e-5) - check(1e-4) - check(1e15) - check(1e16) - - def test_py2_float_print(self): - # gh-10753 - # In python2, the python float type implements an obsolete method - # tp_print, which overrides tp_repr and tp_str when using "print" to - # output to a "real file" (ie, not a StringIO). Make sure we don't - # inherit it. - x = np.double(0.1999999999999) - with TemporaryFile('r+t') as f: - print(x, file=f) - f.seek(0) - output = f.read() - assert_equal(output, str(x) + '\n') - # In python2 the value float('0.1999999999999') prints with reduced - # precision as '0.2', but we want numpy's np.double('0.1999999999999') - # to print the unique value, '0.1999999999999'. - - # gh-11031 - # Only in the python2 interactive shell and when stdout is a "real" - # file, the output of the last command is printed to stdout without - # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print - # x` are potentially different. Make sure they are the same. The only - # way I found to get prompt-like output is using an actual prompt from - # the 'code' module. Again, must use tempfile to get a "real" file. - - # dummy user-input which enters one line and then ctrl-Ds. - def userinput(): - yield 'np.sqrt(2)' - raise EOFError - gen = userinput() - input_func = lambda prompt="": next(gen) - - with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: - orig_stdout, orig_stderr = sys.stdout, sys.stderr - sys.stdout, sys.stderr = fo, fe - - # py2 code.interact sends irrelevant internal DeprecationWarnings - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - code.interact(local={'np': np}, readfunc=input_func, banner='') - - sys.stdout, sys.stderr = orig_stdout, orig_stderr - - fo.seek(0) - capture = fo.read().strip() - - assert_equal(capture, repr(np.sqrt(2))) - - def test_dragon4(self): - # these tests are adapted from Ryan Juckett's dragon4 implementation, - # see dragon4.c for details. - - fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) - fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) - fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) - fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) - - preckwd = lambda prec: {'unique': False, 'precision': prec} - - assert_equal(fpos32('1.0'), "1.") - assert_equal(fsci32('1.0'), "1.e+00") - assert_equal(fpos32('10.234'), "10.234") - assert_equal(fpos32('-10.234'), "-10.234") - assert_equal(fsci32('10.234'), "1.0234e+01") - assert_equal(fsci32('-10.234'), "-1.0234e+01") - assert_equal(fpos32('1000.0'), "1000.") - assert_equal(fpos32('1.0', precision=0), "1.") - assert_equal(fsci32('1.0', precision=0), "1.e+00") - assert_equal(fpos32('10.234', precision=0), "10.") - assert_equal(fpos32('-10.234', precision=0), "-10.") - assert_equal(fsci32('10.234', precision=0), "1.e+01") - assert_equal(fsci32('-10.234', precision=0), "-1.e+01") - assert_equal(fpos32('10.234', precision=2), "10.23") - assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") - assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), - '9.9999999999999995e-08') - assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), - '9.8813129168249309e-324') - assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), - '9.9999999999999694e-311') - - - # test rounding - # 3.1415927410 is closest float32 to np.pi - assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), - "3.1415927410") - assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), - "3.1415927410e+00") - assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), - "3.1415926536") - assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), - "3.1415926536e+00") - # 299792448 is closest float32 to 299792458 - assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") - assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") - assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") - assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") - - assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), - "3.1415927410125732421875000") - assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), - "3.14159265358979311599796346854418516159057617187500") - assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") - - - # smallest numbers - assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), - "0.00000000000000000000000000000000000000000000140129846432" - "4817070923729583289916131280261941876515771757068283889791" - "08268586060148663818836212158203125") - assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074), - "0.00000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000049406564584124654417656" - "8792868221372365059802614324764425585682500675507270208751" - "8652998363616359923797965646954457177309266567103559397963" - "9877479601078187812630071319031140452784581716784898210368" - "8718636056998730723050006387409153564984387312473397273169" - "6151400317153853980741262385655911710266585566867681870395" - "6031062493194527159149245532930545654440112748012970999954" - "1931989409080416563324524757147869014726780159355238611550" - "1348035264934720193790268107107491703332226844753335720832" - "4319360923828934583680601060115061698097530783422773183292" - "4790498252473077637592724787465608477820373446969953364701" - "7972677717585125660551199131504891101451037862738167250955" - "8373897335989936648099411642057026370902792427675445652290" - "87538682506419718265533447265625") - - # largest numbers - assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)), - "340282346638528859811704183484516925440.") - assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), - "1797693134862315708145274237317043567980705675258449965989" - "1747680315726078002853876058955863276687817154045895351438" - "2464234321326889464182768467546703537516986049910576551282" - "0762454900903893289440758685084551339423045832369032229481" - "6580855933212334827479782620414472316873817718091929988125" - "0404026184124858368.") - # Warning: In unique mode only the integer digits necessary for - # uniqueness are computed, the rest are 0. Should we change this? - assert_equal(fpos32(np.finfo(np.float32).max, precision=0), - "340282350000000000000000000000000000000.") - - # test trailing zeros - assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") - assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") - assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") - assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") - assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") - assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") - assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") - assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") - # gh-10713 - assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00") - - def test_dragon4_interface(self): - tps = [np.float16, np.float32, np.float64] - if hasattr(np, 'float128'): - tps.append(np.float128) - - fpos = np.format_float_positional - fsci = np.format_float_scientific - - for tp in tps: - # test padding - assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") - assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") - assert_equal(fpos(tp('-10.2'), - pad_left=4, pad_right=4), " -10.2 ") - - # test exp_digits - assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") - - # test fixed (non-unique) mode - assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") - assert_equal(fsci(tp('1.0'), unique=False, precision=4), - "1.0000e+00") - - # test trimming - # trim of 'k' or '.' only affects non-unique mode, since unique - # mode will not output trailing 0s. - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), - "1.0000") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), - "1.") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), - "1.2" if tp != np.float16 else "1.2002") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), - "1.0") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='0'), "1.0") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), - "1") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='-'), "1") - - @pytest.mark.skipif(not platform.machine().startswith("ppc64"), - reason="only applies to ppc float128 values") - def test_ppc64_ibm_double_double128(self): - # check that the precision decreases once we get into the subnormal - # range. Unlike float64, this starts around 1e-292 instead of 1e-308, - # which happens when the first double is normal and the second is - # subnormal. - x = np.float128('2.123123123123123123123123123123123e-286') - got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)] - expected = [ - "1.06156156156156156156156156156157e-286", - "1.06156156156156156156156156156158e-287", - "1.06156156156156156156156156156159e-288", - "1.0615615615615615615615615615616e-289", - "1.06156156156156156156156156156157e-290", - "1.06156156156156156156156156156156e-291", - "1.0615615615615615615615615615616e-292", - "1.0615615615615615615615615615615e-293", - "1.061561561561561561561561561562e-294", - "1.06156156156156156156156156155e-295", - "1.0615615615615615615615615616e-296", - "1.06156156156156156156156156e-297", - "1.06156156156156156156156157e-298", - "1.0615615615615615615615616e-299", - "1.06156156156156156156156e-300", - "1.06156156156156156156155e-301", - "1.0615615615615615615616e-302", - "1.061561561561561561562e-303", - "1.06156156156156156156e-304", - "1.0615615615615615618e-305", - "1.06156156156156156e-306", - "1.06156156156156157e-307", - "1.0615615615615616e-308", - "1.06156156156156e-309", - "1.06156156156157e-310", - "1.0615615615616e-311", - "1.06156156156e-312", - "1.06156156154e-313", - "1.0615615616e-314", - "1.06156156e-315", - "1.06156155e-316", - "1.061562e-317", - "1.06156e-318", - "1.06155e-319", - "1.0617e-320", - "1.06e-321", - "1.04e-322", - "1e-323", - "0.0", - "0.0"] - assert_equal(got, expected) - - # Note: we follow glibc behavior, but it (or gcc) might not be right. - # In particular we can get two values that print the same but are not - # equal: - a = np.float128('2')/np.float128('3') - b = np.float128(str(a)) - assert_equal(str(a), str(b)) - assert_(a != b) - - def float32_roundtrip(self): - # gh-9360 - x = np.float32(1024 - 2**-14) - y = np.float32(1024 - 2**-13) - assert_(repr(x) != repr(y)) - assert_equal(np.float32(repr(x)), x) - assert_equal(np.float32(repr(y)), y) - - def float64_vs_python(self): - # gh-2643, gh-6136, gh-6908 - assert_equal(repr(np.float64(0.1)), repr(0.1)) - assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_shape_base.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_shape_base.py deleted file mode 100644 index 53d272f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_shape_base.py +++ /dev/null @@ -1,720 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest -import sys -import numpy as np -from numpy.core import ( - array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, - newaxis, concatenate, stack - ) -from numpy.core.shape_base import (_block_dispatcher, _block_setup, - _block_concatenate, _block_slicing) -from numpy.testing import ( - assert_, assert_raises, assert_array_equal, assert_equal, - assert_raises_regex, assert_warns - ) - -from numpy.compat import long - -class TestAtleast1d(object): - def test_0D_array(self): - a = array(1) - b = array(2) - res = [atleast_1d(a), atleast_1d(b)] - desired = [array([1]), array([2])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1, 2]) - b = array([2, 3]) - res = [atleast_1d(a), atleast_1d(b)] - desired = [array([1, 2]), array([2, 3])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - res = [atleast_1d(a), atleast_1d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - a = array([a, a]) - b = array([b, b]) - res = [atleast_1d(a), atleast_1d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_r1array(self): - """ Test to make sure equivalent Travis O's r1array function - """ - assert_(atleast_1d(3).shape == (1,)) - assert_(atleast_1d(3j).shape == (1,)) - assert_(atleast_1d(long(3)).shape == (1,)) - assert_(atleast_1d(3.0).shape == (1,)) - assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) - - -class TestAtleast2d(object): - def test_0D_array(self): - a = array(1) - b = array(2) - res = [atleast_2d(a), atleast_2d(b)] - desired = [array([[1]]), array([[2]])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1, 2]) - b = array([2, 3]) - res = [atleast_2d(a), atleast_2d(b)] - desired = [array([[1, 2]]), array([[2, 3]])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - res = [atleast_2d(a), atleast_2d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - a = array([a, a]) - b = array([b, b]) - res = [atleast_2d(a), atleast_2d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_r2array(self): - """ Test to make sure equivalent Travis O's r2array function - """ - assert_(atleast_2d(3).shape == (1, 1)) - assert_(atleast_2d([3j, 1]).shape == (1, 2)) - assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) - - -class TestAtleast3d(object): - def test_0D_array(self): - a = array(1) - b = array(2) - res = [atleast_3d(a), atleast_3d(b)] - desired = [array([[[1]]]), array([[[2]]])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1, 2]) - b = array([2, 3]) - res = [atleast_3d(a), atleast_3d(b)] - desired = [array([[[1], [2]]]), array([[[2], [3]]])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - res = [atleast_3d(a), atleast_3d(b)] - desired = [a[:,:, newaxis], b[:,:, newaxis]] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - a = array([a, a]) - b = array([b, b]) - res = [atleast_3d(a), atleast_3d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - -class TestHstack(object): - def test_non_iterable(self): - assert_raises(TypeError, hstack, 1) - - def test_empty_input(self): - assert_raises(ValueError, hstack, ()) - - def test_0D_array(self): - a = array(1) - b = array(2) - res = hstack([a, b]) - desired = array([1, 2]) - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1]) - b = array([2]) - res = hstack([a, b]) - desired = array([1, 2]) - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1], [2]]) - b = array([[1], [2]]) - res = hstack([a, b]) - desired = array([[1, 1], [2, 2]]) - assert_array_equal(res, desired) - - def test_generator(self): - with assert_warns(FutureWarning): - hstack((np.arange(3) for _ in range(2))) - if sys.version_info.major > 2: - # map returns a list on Python 2 - with assert_warns(FutureWarning): - hstack(map(lambda x: x, np.ones((3, 2)))) - - -class TestVstack(object): - def test_non_iterable(self): - assert_raises(TypeError, vstack, 1) - - def test_empty_input(self): - assert_raises(ValueError, vstack, ()) - - def test_0D_array(self): - a = array(1) - b = array(2) - res = vstack([a, b]) - desired = array([[1], [2]]) - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1]) - b = array([2]) - res = vstack([a, b]) - desired = array([[1], [2]]) - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1], [2]]) - b = array([[1], [2]]) - res = vstack([a, b]) - desired = array([[1], [2], [1], [2]]) - assert_array_equal(res, desired) - - def test_2D_array2(self): - a = array([1, 2]) - b = array([1, 2]) - res = vstack([a, b]) - desired = array([[1, 2], [1, 2]]) - assert_array_equal(res, desired) - - def test_generator(self): - with assert_warns(FutureWarning): - vstack((np.arange(3) for _ in range(2))) - - -class TestConcatenate(object): - def test_returns_copy(self): - a = np.eye(3) - b = np.concatenate([a]) - b[0, 0] = 2 - assert b[0, 0] != a[0, 0] - - def test_exceptions(self): - # test axis must be in bounds - for ndim in [1, 2, 3]: - a = np.ones((1,)*ndim) - np.concatenate((a, a), axis=0) # OK - assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim) - assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) - - # Scalars cannot be concatenated - assert_raises(ValueError, concatenate, (0,)) - assert_raises(ValueError, concatenate, (np.array(0),)) - - # dimensionality must match - assert_raises_regex( - ValueError, - r"all the input arrays must have same number of dimensions, but " - r"the array at index 0 has 1 dimension\(s\) and the array at " - r"index 1 has 2 dimension\(s\)", - np.concatenate, (np.zeros(1), np.zeros((1, 1)))) - - # test shapes must match except for concatenation axis - a = np.ones((1, 2, 3)) - b = np.ones((2, 2, 3)) - axis = list(range(3)) - for i in range(3): - np.concatenate((a, b), axis=axis[0]) # OK - assert_raises_regex( - ValueError, - "all the input array dimensions for the concatenation axis " - "must match exactly, but along dimension {}, the array at " - "index 0 has size 1 and the array at index 1 has size 2" - .format(i), - np.concatenate, (a, b), axis=axis[1]) - assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) - a = np.moveaxis(a, -1, 0) - b = np.moveaxis(b, -1, 0) - axis.append(axis.pop(0)) - - # No arrays to concatenate raises ValueError - assert_raises(ValueError, concatenate, ()) - - def test_concatenate_axis_None(self): - a = np.arange(4, dtype=np.float64).reshape((2, 2)) - b = list(range(3)) - c = ['x'] - r = np.concatenate((a, a), axis=None) - assert_equal(r.dtype, a.dtype) - assert_equal(r.ndim, 1) - r = np.concatenate((a, b), axis=None) - assert_equal(r.size, a.size + len(b)) - assert_equal(r.dtype, a.dtype) - r = np.concatenate((a, b, c), axis=None) - d = array(['0.0', '1.0', '2.0', '3.0', - '0', '1', '2', 'x']) - assert_array_equal(r, d) - - out = np.zeros(a.size + len(b)) - r = np.concatenate((a, b), axis=None) - rout = np.concatenate((a, b), axis=None, out=out) - assert_(out is rout) - assert_equal(r, rout) - - def test_large_concatenate_axis_None(self): - # When no axis is given, concatenate uses flattened versions. - # This also had a bug with many arrays (see gh-5979). - x = np.arange(1, 100) - r = np.concatenate(x, None) - assert_array_equal(x, r) - - # This should probably be deprecated: - r = np.concatenate(x, 100) # axis is >= MAXDIMS - assert_array_equal(x, r) - - def test_concatenate(self): - # Test concatenate function - # One sequence returns unmodified (but as array) - r4 = list(range(4)) - assert_array_equal(concatenate((r4,)), r4) - # Any sequence - assert_array_equal(concatenate((tuple(r4),)), r4) - assert_array_equal(concatenate((array(r4),)), r4) - # 1D default concatenation - r3 = list(range(3)) - assert_array_equal(concatenate((r4, r3)), r4 + r3) - # Mixed sequence types - assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) - assert_array_equal(concatenate((array(r4), r3)), r4 + r3) - # Explicit axis specification - assert_array_equal(concatenate((r4, r3), 0), r4 + r3) - # Including negative - assert_array_equal(concatenate((r4, r3), -1), r4 + r3) - # 2D - a23 = array([[10, 11, 12], [13, 14, 15]]) - a13 = array([[0, 1, 2]]) - res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) - assert_array_equal(concatenate((a23, a13)), res) - assert_array_equal(concatenate((a23, a13), 0), res) - assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) - assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) - # Arrays much match shape - assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) - # 3D - res = arange(2 * 3 * 7).reshape((2, 3, 7)) - a0 = res[..., :4] - a1 = res[..., 4:6] - a2 = res[..., 6:] - assert_array_equal(concatenate((a0, a1, a2), 2), res) - assert_array_equal(concatenate((a0, a1, a2), -1), res) - assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) - - out = res.copy() - rout = concatenate((a0, a1, a2), 2, out=out) - assert_(out is rout) - assert_equal(res, rout) - - def test_bad_out_shape(self): - a = array([1, 2]) - b = array([3, 4]) - - assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) - concatenate((a, b), out=np.empty(4)) - - def test_out_dtype(self): - out = np.empty(4, np.float32) - res = concatenate((array([1, 2]), array([3, 4])), out=out) - assert_(out is res) - - out = np.empty(4, np.complex64) - res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out) - assert_(out is res) - - # invalid cast - out = np.empty(4, np.int32) - assert_raises(TypeError, concatenate, - (array([0.1, 0.2]), array([0.3, 0.4])), out=out) - - -def test_stack(): - # non-iterable input - assert_raises(TypeError, stack, 1) - - # 0d input - for input_ in [(1, 2, 3), - [np.int32(1), np.int32(2), np.int32(3)], - [np.array(1), np.array(2), np.array(3)]]: - assert_array_equal(stack(input_), [1, 2, 3]) - # 1d input examples - a = np.array([1, 2, 3]) - b = np.array([4, 5, 6]) - r1 = array([[1, 2, 3], [4, 5, 6]]) - assert_array_equal(np.stack((a, b)), r1) - assert_array_equal(np.stack((a, b), axis=1), r1.T) - # all input types - assert_array_equal(np.stack(list([a, b])), r1) - assert_array_equal(np.stack(array([a, b])), r1) - # all shapes for 1d input - arrays = [np.random.randn(3) for _ in range(10)] - axes = [0, 1, -1, -2] - expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] - for axis, expected_shape in zip(axes, expected_shapes): - assert_equal(np.stack(arrays, axis).shape, expected_shape) - assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2) - assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3) - # all shapes for 2d input - arrays = [np.random.randn(3, 4) for _ in range(10)] - axes = [0, 1, 2, -1, -2, -3] - expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), - (3, 4, 10), (3, 10, 4), (10, 3, 4)] - for axis, expected_shape in zip(axes, expected_shapes): - assert_equal(np.stack(arrays, axis).shape, expected_shape) - # empty arrays - assert_(stack([[], [], []]).shape == (3, 0)) - assert_(stack([[], [], []], axis=1).shape == (0, 3)) - # out - out = np.zeros_like(r1) - np.stack((a, b), out=out) - assert_array_equal(out, r1) - # edge cases - assert_raises_regex(ValueError, 'need at least one array', stack, []) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [1, np.arange(3)]) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.arange(3), 1]) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.arange(3), 1], axis=1) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.arange(2), np.arange(3)]) - # generator is deprecated - with assert_warns(FutureWarning): - result = stack((x for x in range(3))) - assert_array_equal(result, np.array([0, 1, 2])) - - -class TestBlock(object): - @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) - def block(self, request): - # blocking small arrays and large arrays go through different paths. - # the algorithm is triggered depending on the number of element - # copies required. - # We define a test fixture that forces most tests to go through - # both code paths. - # Ultimately, this should be removed if a single algorithm is found - # to be faster for both small and large arrays. - def _block_force_concatenate(arrays): - arrays, list_ndim, result_ndim, _ = _block_setup(arrays) - return _block_concatenate(arrays, list_ndim, result_ndim) - - def _block_force_slicing(arrays): - arrays, list_ndim, result_ndim, _ = _block_setup(arrays) - return _block_slicing(arrays, list_ndim, result_ndim) - - if request.param == 'force_concatenate': - return _block_force_concatenate - elif request.param == 'force_slicing': - return _block_force_slicing - elif request.param == 'block': - return block - else: - raise ValueError('Unknown blocking request. There is a typo in the tests.') - - def test_returns_copy(self, block): - a = np.eye(3) - b = block(a) - b[0, 0] = 2 - assert b[0, 0] != a[0, 0] - - def test_block_total_size_estimate(self, block): - _, _, _, total_size = _block_setup([1]) - assert total_size == 1 - - _, _, _, total_size = _block_setup([[1]]) - assert total_size == 1 - - _, _, _, total_size = _block_setup([[1, 1]]) - assert total_size == 2 - - _, _, _, total_size = _block_setup([[1], [1]]) - assert total_size == 2 - - _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) - assert total_size == 4 - - def test_block_simple_row_wise(self, block): - a_2d = np.ones((2, 2)) - b_2d = 2 * a_2d - desired = np.array([[1, 1, 2, 2], - [1, 1, 2, 2]]) - result = block([a_2d, b_2d]) - assert_equal(desired, result) - - def test_block_simple_column_wise(self, block): - a_2d = np.ones((2, 2)) - b_2d = 2 * a_2d - expected = np.array([[1, 1], - [1, 1], - [2, 2], - [2, 2]]) - result = block([[a_2d], [b_2d]]) - assert_equal(expected, result) - - def test_block_with_1d_arrays_row_wise(self, block): - # # # 1-D vectors are treated as row arrays - a = np.array([1, 2, 3]) - b = np.array([2, 3, 4]) - expected = np.array([1, 2, 3, 2, 3, 4]) - result = block([a, b]) - assert_equal(expected, result) - - def test_block_with_1d_arrays_multiple_rows(self, block): - a = np.array([1, 2, 3]) - b = np.array([2, 3, 4]) - expected = np.array([[1, 2, 3, 2, 3, 4], - [1, 2, 3, 2, 3, 4]]) - result = block([[a, b], [a, b]]) - assert_equal(expected, result) - - def test_block_with_1d_arrays_column_wise(self, block): - # # # 1-D vectors are treated as row arrays - a_1d = np.array([1, 2, 3]) - b_1d = np.array([2, 3, 4]) - expected = np.array([[1, 2, 3], - [2, 3, 4]]) - result = block([[a_1d], [b_1d]]) - assert_equal(expected, result) - - def test_block_mixed_1d_and_2d(self, block): - a_2d = np.ones((2, 2)) - b_1d = np.array([2, 2]) - result = block([[a_2d], [b_1d]]) - expected = np.array([[1, 1], - [1, 1], - [2, 2]]) - assert_equal(expected, result) - - def test_block_complicated(self, block): - # a bit more complicated - one_2d = np.array([[1, 1, 1]]) - two_2d = np.array([[2, 2, 2]]) - three_2d = np.array([[3, 3, 3, 3, 3, 3]]) - four_1d = np.array([4, 4, 4, 4, 4, 4]) - five_0d = np.array(5) - six_1d = np.array([6, 6, 6, 6, 6]) - zero_2d = np.zeros((2, 6)) - - expected = np.array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 3, 3, 3], - [4, 4, 4, 4, 4, 4], - [5, 6, 6, 6, 6, 6], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - - result = block([[one_2d, two_2d], - [three_2d], - [four_1d], - [five_0d, six_1d], - [zero_2d]]) - assert_equal(result, expected) - - def test_nested(self, block): - one = np.array([1, 1, 1]) - two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) - three = np.array([3, 3, 3]) - four = np.array([4, 4, 4]) - five = np.array(5) - six = np.array([6, 6, 6, 6, 6]) - zero = np.zeros((2, 6)) - - result = block([ - [ - block([ - [one], - [three], - [four] - ]), - two - ], - [five, six], - [zero] - ]) - expected = np.array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 2, 2, 2], - [4, 4, 4, 2, 2, 2], - [5, 6, 6, 6, 6, 6], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - - assert_equal(result, expected) - - def test_3d(self, block): - a000 = np.ones((2, 2, 2), int) * 1 - - a100 = np.ones((3, 2, 2), int) * 2 - a010 = np.ones((2, 3, 2), int) * 3 - a001 = np.ones((2, 2, 3), int) * 4 - - a011 = np.ones((2, 3, 3), int) * 5 - a101 = np.ones((3, 2, 3), int) * 6 - a110 = np.ones((3, 3, 2), int) * 7 - - a111 = np.ones((3, 3, 3), int) * 8 - - result = block([ - [ - [a000, a001], - [a010, a011], - ], - [ - [a100, a101], - [a110, a111], - ] - ]) - expected = array([[[1, 1, 4, 4, 4], - [1, 1, 4, 4, 4], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5]], - - [[1, 1, 4, 4, 4], - [1, 1, 4, 4, 4], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5]], - - [[2, 2, 6, 6, 6], - [2, 2, 6, 6, 6], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8]], - - [[2, 2, 6, 6, 6], - [2, 2, 6, 6, 6], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8]], - - [[2, 2, 6, 6, 6], - [2, 2, 6, 6, 6], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8]]]) - - assert_array_equal(result, expected) - - def test_block_with_mismatched_shape(self, block): - a = np.array([0, 0]) - b = np.eye(2) - assert_raises(ValueError, block, [a, b]) - assert_raises(ValueError, block, [b, a]) - - to_block = [[np.ones((2,3)), np.ones((2,2))], - [np.ones((2,2)), np.ones((2,2))]] - assert_raises(ValueError, block, to_block) - def test_no_lists(self, block): - assert_equal(block(1), np.array(1)) - assert_equal(block(np.eye(3)), np.eye(3)) - - def test_invalid_nesting(self, block): - msg = 'depths are mismatched' - assert_raises_regex(ValueError, msg, block, [1, [2]]) - assert_raises_regex(ValueError, msg, block, [1, []]) - assert_raises_regex(ValueError, msg, block, [[1], 2]) - assert_raises_regex(ValueError, msg, block, [[], 2]) - assert_raises_regex(ValueError, msg, block, [ - [[1], [2]], - [[3, 4]], - [5] # missing brackets - ]) - - def test_empty_lists(self, block): - assert_raises_regex(ValueError, 'empty', block, []) - assert_raises_regex(ValueError, 'empty', block, [[]]) - assert_raises_regex(ValueError, 'empty', block, [[1], []]) - - def test_tuple(self, block): - assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) - assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) - - def test_different_ndims(self, block): - a = 1. - b = 2 * np.ones((1, 2)) - c = 3 * np.ones((1, 1, 3)) - - result = block([a, b, c]) - expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) - - assert_equal(result, expected) - - def test_different_ndims_depths(self, block): - a = 1. - b = 2 * np.ones((1, 2)) - c = 3 * np.ones((1, 2, 3)) - - result = block([[a, b], [c]]) - expected = np.array([[[1., 2., 2.], - [3., 3., 3.], - [3., 3., 3.]]]) - - assert_equal(result, expected) - - def test_block_memory_order(self, block): - # 3D - arr_c = np.zeros((3,)*3, order='C') - arr_f = np.zeros((3,)*3, order='F') - - b_c = [[[arr_c, arr_c], - [arr_c, arr_c]], - [[arr_c, arr_c], - [arr_c, arr_c]]] - - b_f = [[[arr_f, arr_f], - [arr_f, arr_f]], - [[arr_f, arr_f], - [arr_f, arr_f]]] - - assert block(b_c).flags['C_CONTIGUOUS'] - assert block(b_f).flags['F_CONTIGUOUS'] - - arr_c = np.zeros((3, 3), order='C') - arr_f = np.zeros((3, 3), order='F') - # 2D - b_c = [[arr_c, arr_c], - [arr_c, arr_c]] - - b_f = [[arr_f, arr_f], - [arr_f, arr_f]] - - assert block(b_c).flags['C_CONTIGUOUS'] - assert block(b_f).flags['F_CONTIGUOUS'] - - -def test_block_dispatcher(): - class ArrayLike(object): - pass - a = ArrayLike() - b = ArrayLike() - c = ArrayLike() - assert_equal(list(_block_dispatcher(a)), [a]) - assert_equal(list(_block_dispatcher([a])), [a]) - assert_equal(list(_block_dispatcher([a, b])), [a, b]) - assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) - # don't recurse into non-lists - assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_ufunc.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_ufunc.py deleted file mode 100644 index 526925e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_ufunc.py +++ /dev/null @@ -1,1948 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import itertools - -import pytest - -import numpy as np -import numpy.core._umath_tests as umt -import numpy.linalg._umath_linalg as uml -import numpy.core._operand_flag_tests as opflag_tests -import numpy.core._rational_tests as _rational_tests -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_no_warnings, - assert_allclose, - ) -from numpy.compat import pickle - - -class TestUfuncKwargs(object): - def test_kwarg_exact(self): - assert_raises(TypeError, np.add, 1, 2, castingx='safe') - assert_raises(TypeError, np.add, 1, 2, dtypex=int) - assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) - assert_raises(TypeError, np.add, 1, 2, outx=None) - assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') - assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') - assert_raises(TypeError, np.add, 1, 2, subokx=False) - assert_raises(TypeError, np.add, 1, 2, wherex=[True]) - - def test_sig_signature(self): - assert_raises(ValueError, np.add, 1, 2, sig='ii->i', - signature='ii->i') - - def test_sig_dtype(self): - assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i', - dtype=int) - assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i', - dtype=int) - - def test_extobj_refcount(self): - # Should not segfault with USE_DEBUG. - assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True) - - -class TestUfuncGenericLoops(object): - """Test generic loops. - - The loops to be tested are: - - PyUFunc_ff_f_As_dd_d - PyUFunc_ff_f - PyUFunc_dd_d - PyUFunc_gg_g - PyUFunc_FF_F_As_DD_D - PyUFunc_DD_D - PyUFunc_FF_F - PyUFunc_GG_G - PyUFunc_OO_O - PyUFunc_OO_O_method - PyUFunc_f_f_As_d_d - PyUFunc_d_d - PyUFunc_f_f - PyUFunc_g_g - PyUFunc_F_F_As_D_D - PyUFunc_F_F - PyUFunc_D_D - PyUFunc_G_G - PyUFunc_O_O - PyUFunc_O_O_method - PyUFunc_On_Om - - Where: - - f -- float - d -- double - g -- long double - F -- complex float - D -- complex double - G -- complex long double - O -- python object - - It is difficult to assure that each of these loops is entered from the - Python level as the special cased loops are a moving target and the - corresponding types are architecture dependent. We probably need to - define C level testing ufuncs to get at them. For the time being, I've - just looked at the signatures registered in the build directory to find - relevant functions. - - """ - np_dtypes = [ - (np.single, np.single), (np.single, np.double), - (np.csingle, np.csingle), (np.csingle, np.cdouble), - (np.double, np.double), (np.longdouble, np.longdouble), - (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)] - - @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) - def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1): - xs = np.full(10, input_dtype(x), dtype=output_dtype) - ys = f(xs)[::2] - assert_allclose(ys, y) - assert_equal(ys.dtype, output_dtype) - - def f2(x, y): - return x**y - - @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) - def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1): - xs = np.full(10, input_dtype(x), dtype=output_dtype) - ys = f(xs, xs)[::2] - assert_allclose(ys, y) - assert_equal(ys.dtype, output_dtype) - - # class to use in testing object method loops - class foo(object): - def conjugate(self): - return np.bool_(1) - - def logical_xor(self, obj): - return np.bool_(1) - - def test_unary_PyUFunc_O_O(self): - x = np.ones(10, dtype=object) - assert_(np.all(np.abs(x) == 1)) - - def test_unary_PyUFunc_O_O_method(self, foo=foo): - x = np.full(10, foo(), dtype=object) - assert_(np.all(np.conjugate(x) == True)) - - def test_binary_PyUFunc_OO_O(self): - x = np.ones(10, dtype=object) - assert_(np.all(np.add(x, x) == 2)) - - def test_binary_PyUFunc_OO_O_method(self, foo=foo): - x = np.full(10, foo(), dtype=object) - assert_(np.all(np.logical_xor(x, x))) - - def test_binary_PyUFunc_On_Om_method(self, foo=foo): - x = np.full((10, 2, 3), foo(), dtype=object) - assert_(np.all(np.logical_xor(x, x))) - - -class TestUfunc(object): - def test_pickle(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - assert_(pickle.loads(pickle.dumps(np.sin, - protocol=proto)) is np.sin) - - # Check that ufunc not defined in the top level numpy namespace - # such as numpy.core._rational_tests.test_add can also be pickled - res = pickle.loads(pickle.dumps(_rational_tests.test_add, - protocol=proto)) - assert_(res is _rational_tests.test_add) - - def test_pickle_withstring(self): - astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" - b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") - assert_(pickle.loads(astring) is np.cos) - - def test_reduceat_shifting_sum(self): - L = 6 - x = np.arange(L) - idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() - assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) - - def test_all_ufunc(self): - """Try to check presence and results of all ufuncs. - - The list of ufuncs comes from generate_umath.py and is as follows: - - ===== ==== ============= =============== ======================== - done args function types notes - ===== ==== ============= =============== ======================== - n 1 conjugate nums + O - n 1 absolute nums + O complex -> real - n 1 negative nums + O - n 1 sign nums + O -> int - n 1 invert bool + ints + O flts raise an error - n 1 degrees real + M cmplx raise an error - n 1 radians real + M cmplx raise an error - n 1 arccos flts + M - n 1 arccosh flts + M - n 1 arcsin flts + M - n 1 arcsinh flts + M - n 1 arctan flts + M - n 1 arctanh flts + M - n 1 cos flts + M - n 1 sin flts + M - n 1 tan flts + M - n 1 cosh flts + M - n 1 sinh flts + M - n 1 tanh flts + M - n 1 exp flts + M - n 1 expm1 flts + M - n 1 log flts + M - n 1 log10 flts + M - n 1 log1p flts + M - n 1 sqrt flts + M real x < 0 raises error - n 1 ceil real + M - n 1 trunc real + M - n 1 floor real + M - n 1 fabs real + M - n 1 rint flts + M - n 1 isnan flts -> bool - n 1 isinf flts -> bool - n 1 isfinite flts -> bool - n 1 signbit real -> bool - n 1 modf real -> (frac, int) - n 1 logical_not bool + nums + M -> bool - n 2 left_shift ints + O flts raise an error - n 2 right_shift ints + O flts raise an error - n 2 add bool + nums + O boolean + is || - n 2 subtract bool + nums + O boolean - is ^ - n 2 multiply bool + nums + O boolean * is & - n 2 divide nums + O - n 2 floor_divide nums + O - n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d - n 2 fmod nums + M - n 2 power nums + O - n 2 greater bool + nums + O -> bool - n 2 greater_equal bool + nums + O -> bool - n 2 less bool + nums + O -> bool - n 2 less_equal bool + nums + O -> bool - n 2 equal bool + nums + O -> bool - n 2 not_equal bool + nums + O -> bool - n 2 logical_and bool + nums + M -> bool - n 2 logical_or bool + nums + M -> bool - n 2 logical_xor bool + nums + M -> bool - n 2 maximum bool + nums + O - n 2 minimum bool + nums + O - n 2 bitwise_and bool + ints + O flts raise an error - n 2 bitwise_or bool + ints + O flts raise an error - n 2 bitwise_xor bool + ints + O flts raise an error - n 2 arctan2 real + M - n 2 remainder ints + real + O - n 2 hypot real + M - ===== ==== ============= =============== ======================== - - Types other than those listed will be accepted, but they are cast to - the smallest compatible type for which the function is defined. The - casting rules are: - - bool -> int8 -> float32 - ints -> double - - """ - pass - - # from include/numpy/ufuncobject.h - size_inferred = 2 - can_ignore = 4 - def test_signature0(self): - # the arguments to test_signature are: nin, nout, core_signature - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(i),(i)->()") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 1, 0)) - assert_equal(ixs, (0, 0)) - assert_equal(flags, (self.size_inferred,)) - assert_equal(sizes, (-1,)) - - def test_signature1(self): - # empty core signature; treat as plain ufunc (with trivial core) - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(),()->()") - assert_equal(enabled, 0) - assert_equal(num_dims, (0, 0, 0)) - assert_equal(ixs, ()) - assert_equal(flags, ()) - assert_equal(sizes, ()) - - def test_signature2(self): - # more complicated names for variables - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(i1,i2),(J_1)->(_kAB)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 1, 1)) - assert_equal(ixs, (0, 1, 2, 3)) - assert_equal(flags, (self.size_inferred,)*4) - assert_equal(sizes, (-1, -1, -1, -1)) - - def test_signature3(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, u"(i1, i12), (J_1)->(i12, i2)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 1, 2)) - assert_equal(ixs, (0, 1, 2, 1, 3)) - assert_equal(flags, (self.size_inferred,)*4) - assert_equal(sizes, (-1, -1, -1, -1)) - - def test_signature4(self): - # matrix_multiply signature from _umath_tests - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(n,k),(k,m)->(n,m)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 2, 2)) - assert_equal(ixs, (0, 1, 1, 2, 0, 2)) - assert_equal(flags, (self.size_inferred,)*3) - assert_equal(sizes, (-1, -1, -1)) - - def test_signature5(self): - # matmul signature from _umath_tests - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(n?,k),(k,m?)->(n?,m?)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 2, 2)) - assert_equal(ixs, (0, 1, 1, 2, 0, 2)) - assert_equal(flags, (self.size_inferred | self.can_ignore, - self.size_inferred, - self.size_inferred | self.can_ignore)) - assert_equal(sizes, (-1, -1, -1)) - - def test_signature6(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 1, 1, "(3)->()") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 0)) - assert_equal(ixs, (0,)) - assert_equal(flags, (0,)) - assert_equal(sizes, (3,)) - - def test_signature7(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 3, 1, "(3),(03,3),(n)->(9)") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 2, 1, 1)) - assert_equal(ixs, (0, 0, 0, 1, 2)) - assert_equal(flags, (0, self.size_inferred, 0)) - assert_equal(sizes, (3, -1, 9)) - - def test_signature8(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 3, 1, "(3?),(3?,3?),(n)->(9)") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 2, 1, 1)) - assert_equal(ixs, (0, 0, 0, 1, 2)) - assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) - assert_equal(sizes, (3, -1, 9)) - - def test_signature_failure_extra_parenthesis(self): - with assert_raises(ValueError): - umt.test_signature(2, 1, "((i)),(i)->()") - - def test_signature_failure_mismatching_parenthesis(self): - with assert_raises(ValueError): - umt.test_signature(2, 1, "(i),)i(->()") - - def test_signature_failure_signature_missing_input_arg(self): - with assert_raises(ValueError): - umt.test_signature(2, 1, "(i),->()") - - def test_signature_failure_signature_missing_output_arg(self): - with assert_raises(ValueError): - umt.test_signature(2, 2, "(i),(i)->()") - - def test_get_signature(self): - assert_equal(umt.inner1d.signature, "(i),(i)->()") - - def test_forced_sig(self): - a = 0.5*np.arange(3, dtype='f8') - assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) - assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), - casting='unsafe'), [0, 0, 1]) - - b = np.zeros((3,), dtype='f8') - np.add(a, 0.5, out=b) - assert_equal(b, [0.5, 1, 1.5]) - b[:] = 0 - np.add(a, 0.5, sig='i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - - def test_true_divide(self): - a = np.array(10) - b = np.array(20) - tgt = np.array(0.5) - - for tc in 'bhilqBHILQefdgFDG': - dt = np.dtype(tc) - aa = a.astype(dt) - bb = b.astype(dt) - - # Check result value and dtype. - for x, y in itertools.product([aa, -aa], [bb, -bb]): - - # Check with no output type specified - if tc in 'FDG': - tgt = complex(x)/complex(y) - else: - tgt = float(x)/float(y) - - res = np.true_divide(x, y) - rtol = max(np.finfo(res).resolution, 1e-15) - assert_allclose(res, tgt, rtol=rtol) - - if tc in 'bhilqBHILQ': - assert_(res.dtype.name == 'float64') - else: - assert_(res.dtype.name == dt.name ) - - # Check with output type specified. This also checks for the - # incorrect casts in issue gh-3484 because the unary '-' does - # not change types, even for unsigned types, Hence casts in the - # ufunc from signed to unsigned and vice versa will lead to - # errors in the values. - for tcout in 'bhilqBHILQ': - dtout = np.dtype(tcout) - assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) - - for tcout in 'efdg': - dtout = np.dtype(tcout) - if tc in 'FDG': - # Casting complex to float is not allowed - assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) - else: - tgt = float(x)/float(y) - rtol = max(np.finfo(dtout).resolution, 1e-15) - atol = max(np.finfo(dtout).tiny, 3e-308) - # Some test values result in invalid for float16. - with np.errstate(invalid='ignore'): - res = np.true_divide(x, y, dtype=dtout) - if not np.isfinite(res) and tcout == 'e': - continue - assert_allclose(res, tgt, rtol=rtol, atol=atol) - assert_(res.dtype.name == dtout.name) - - for tcout in 'FDG': - dtout = np.dtype(tcout) - tgt = complex(x)/complex(y) - rtol = max(np.finfo(dtout).resolution, 1e-15) - atol = max(np.finfo(dtout).tiny, 3e-308) - res = np.true_divide(x, y, dtype=dtout) - if not np.isfinite(res): - continue - assert_allclose(res, tgt, rtol=rtol, atol=atol) - assert_(res.dtype.name == dtout.name) - - # Check booleans - a = np.ones((), dtype=np.bool_) - res = np.true_divide(a, a) - assert_(res == 1.0) - assert_(res.dtype.name == 'float64') - res = np.true_divide(~a, a) - assert_(res == 0.0) - assert_(res.dtype.name == 'float64') - - def test_sum_stability(self): - a = np.ones(500, dtype=np.float32) - assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) - - a = np.ones(500, dtype=np.float64) - assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) - - def test_sum(self): - for dt in (int, np.float16, np.float32, np.float64, np.longdouble): - for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, - 128, 1024, 1235): - tgt = dt(v * (v + 1) / 2) - d = np.arange(1, v + 1, dtype=dt) - - # warning if sum overflows, which it does in float16 - overflow = not np.isfinite(tgt) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - assert_almost_equal(np.sum(d), tgt) - assert_equal(len(w), 1 * overflow) - - assert_almost_equal(np.sum(d[::-1]), tgt) - assert_equal(len(w), 2 * overflow) - - d = np.ones(500, dtype=dt) - assert_almost_equal(np.sum(d[::2]), 250.) - assert_almost_equal(np.sum(d[1::2]), 250.) - assert_almost_equal(np.sum(d[::3]), 167.) - assert_almost_equal(np.sum(d[1::3]), 167.) - assert_almost_equal(np.sum(d[::-2]), 250.) - assert_almost_equal(np.sum(d[-1::-2]), 250.) - assert_almost_equal(np.sum(d[::-3]), 167.) - assert_almost_equal(np.sum(d[-1::-3]), 167.) - # sum with first reduction entry != 0 - d = np.ones((1,), dtype=dt) - d += d - assert_almost_equal(d, 2.) - - def test_sum_complex(self): - for dt in (np.complex64, np.complex128, np.clongdouble): - for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, - 128, 1024, 1235): - tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) - d = np.empty(v, dtype=dt) - d.real = np.arange(1, v + 1) - d.imag = -np.arange(1, v + 1) - assert_almost_equal(np.sum(d), tgt) - assert_almost_equal(np.sum(d[::-1]), tgt) - - d = np.ones(500, dtype=dt) + 1j - assert_almost_equal(np.sum(d[::2]), 250. + 250j) - assert_almost_equal(np.sum(d[1::2]), 250. + 250j) - assert_almost_equal(np.sum(d[::3]), 167. + 167j) - assert_almost_equal(np.sum(d[1::3]), 167. + 167j) - assert_almost_equal(np.sum(d[::-2]), 250. + 250j) - assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) - assert_almost_equal(np.sum(d[::-3]), 167. + 167j) - assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) - # sum with first reduction entry != 0 - d = np.ones((1,), dtype=dt) + 1j - d += d - assert_almost_equal(d, 2. + 2j) - - def test_sum_initial(self): - # Integer, single axis - assert_equal(np.sum([3], initial=2), 5) - - # Floating point - assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) - - # Multiple non-adjacent axes - assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), - [12, 12, 12]) - - def test_sum_where(self): - # More extensive tests done in test_reduction_with_where. - assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.) - assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5., - where=[True, False]), [9., 5.]) - - def test_inner1d(self): - a = np.arange(6).reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) - a = np.arange(6) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) - - def test_broadcast(self): - msg = "broadcast" - a = np.arange(4).reshape((2, 1, 2)) - b = np.arange(4).reshape((1, 2, 2)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "extend & broadcast loop dimensions" - b = np.arange(4).reshape((2, 2)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - # Broadcast in core dimensions should fail - a = np.arange(8).reshape((4, 2)) - b = np.arange(4).reshape((4, 1)) - assert_raises(ValueError, umt.inner1d, a, b) - # Extend core dimensions should fail - a = np.arange(8).reshape((4, 2)) - b = np.array(7) - assert_raises(ValueError, umt.inner1d, a, b) - # Broadcast should fail - a = np.arange(2).reshape((2, 1, 1)) - b = np.arange(3).reshape((3, 1, 1)) - assert_raises(ValueError, umt.inner1d, a, b) - - # Writing to a broadcasted array with overlap should warn, gh-2705 - a = np.arange(2) - b = np.arange(4).reshape((2, 2)) - u, v = np.broadcast_arrays(a, b) - assert_equal(u.strides[0], 0) - x = u + v - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - u += v - assert_equal(len(w), 1) - assert_(x[0,0] != u[0, 0]) - - def test_type_cast(self): - msg = "type cast" - a = np.arange(6, dtype='short').reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), - err_msg=msg) - msg = "type cast on one argument" - a = np.arange(6).reshape((2, 3)) - b = a + 0.1 - assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), - err_msg=msg) - - def test_endian(self): - msg = "big endian" - a = np.arange(6, dtype='>i4').reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), - err_msg=msg) - msg = "little endian" - a = np.arange(6, dtype='()' - inner1d = umt.inner1d - a = np.arange(27.).reshape((3, 3, 3)) - b = np.arange(10., 19.).reshape((3, 1, 3)) - # basic tests on inputs (outputs tested below with matrix_multiply). - c = inner1d(a, b) - assert_array_equal(c, (a * b).sum(-1)) - # default - c = inner1d(a, b, axes=[(-1,), (-1,), ()]) - assert_array_equal(c, (a * b).sum(-1)) - # integers ok for single axis. - c = inner1d(a, b, axes=[-1, -1, ()]) - assert_array_equal(c, (a * b).sum(-1)) - # mix fine - c = inner1d(a, b, axes=[(-1,), -1, ()]) - assert_array_equal(c, (a * b).sum(-1)) - # can omit last axis. - c = inner1d(a, b, axes=[-1, -1]) - assert_array_equal(c, (a * b).sum(-1)) - # can pass in other types of integer (with __index__ protocol) - c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) - assert_array_equal(c, (a * b).sum(-1)) - # swap some axes - c = inner1d(a, b, axes=[0, 0]) - assert_array_equal(c, (a * b).sum(0)) - c = inner1d(a, b, axes=[0, 2]) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) - # Check errors for improperly constructed axes arguments. - # should have list. - assert_raises(TypeError, inner1d, a, b, axes=-1) - # needs enough elements - assert_raises(ValueError, inner1d, a, b, axes=[-1]) - # should pass in indices. - assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0]) - assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1]) - assert_raises(TypeError, inner1d, a, b, axes=[None, 1]) - # cannot pass an index unless there is only one dimension - # (output is wrong in this case) - assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1]) - # or pass in generally the wrong number of axes - assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)]) - assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()]) - # axes need to have same length. - assert_raises(ValueError, inner1d, a, b, axes=[0, 1]) - - # matrix_multiply signature: '(m,n),(n,p)->(m,p)' - mm = umt.matrix_multiply - a = np.arange(12).reshape((2, 3, 2)) - b = np.arange(8).reshape((2, 2, 2, 1)) + 1 - # Sanity check. - c = mm(a, b) - assert_array_equal(c, np.matmul(a, b)) - # Default axes. - c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) - assert_array_equal(c, np.matmul(a, b)) - # Default with explicit axes. - c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) - assert_array_equal(c, np.matmul(a, b)) - # swap some axes. - c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) - assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), - b.transpose(0, 3, 1, 2))) - # Default with output array. - c = np.empty((2, 2, 3, 1)) - d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) - assert_(c is d) - assert_array_equal(c, np.matmul(a, b)) - # Transposed output array - c = np.empty((1, 2, 2, 3)) - d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) - assert_(c is d) - assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) - # Check errors for improperly constructed axes arguments. - # wrong argument - assert_raises(TypeError, mm, a, b, axis=1) - # axes should be list - assert_raises(TypeError, mm, a, b, axes=1) - assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) - # list needs to have right length - assert_raises(ValueError, mm, a, b, axes=[]) - assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) - # list should contain tuples for multiple axes - assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1]) - assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) - assert_raises(TypeError, - mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) - assert_raises(TypeError, - mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) - assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) - # tuples should not have duplicated values - assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) - # arrays should have enough axes. - z = np.zeros((2, 2)) - assert_raises(ValueError, mm, z, z[0]) - assert_raises(ValueError, mm, z, z, out=z[:, 0]) - assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) - assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) - # Regular ufuncs should not accept axes. - assert_raises(TypeError, np.add, 1., 1., axes=[0]) - # should be able to deal with bad unrelated kwargs. - assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) - - def test_axis_argument(self): - # inner1d signature: '(i),(i)->()' - inner1d = umt.inner1d - a = np.arange(27.).reshape((3, 3, 3)) - b = np.arange(10., 19.).reshape((3, 1, 3)) - c = inner1d(a, b) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, axis=-1) - assert_array_equal(c, (a * b).sum(-1)) - out = np.zeros_like(c) - d = inner1d(a, b, axis=-1, out=out) - assert_(d is out) - assert_array_equal(d, c) - c = inner1d(a, b, axis=0) - assert_array_equal(c, (a * b).sum(0)) - # Sanity checks on innerwt and cumsum. - a = np.arange(6).reshape((2, 3)) - b = np.arange(10, 16).reshape((2, 3)) - w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w, axis=0), - np.sum(a * b * w, axis=0)) - assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) - assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) - out = np.empty_like(a) - b = umt.cumsum(a, out=out, axis=0) - assert_(out is b) - assert_array_equal(b, np.cumsum(a, axis=0)) - b = umt.cumsum(a, out=out, axis=1) - assert_(out is b) - assert_array_equal(b, np.cumsum(a, axis=-1)) - # Check errors. - # Cannot pass in both axis and axes. - assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0]) - # Not an integer. - assert_raises(TypeError, inner1d, a, b, axis=[0]) - # more than 1 core dimensions. - mm = umt.matrix_multiply - assert_raises(TypeError, mm, a, b, axis=1) - # Output wrong size in axis. - out = np.empty((1, 2, 3), dtype=a.dtype) - assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) - # Regular ufuncs should not accept axis. - assert_raises(TypeError, np.add, 1., 1., axis=0) - - def test_keepdims_argument(self): - # inner1d signature: '(i),(i)->()' - inner1d = umt.inner1d - a = np.arange(27.).reshape((3, 3, 3)) - b = np.arange(10., 19.).reshape((3, 1, 3)) - c = inner1d(a, b) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, keepdims=False) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, keepdims=True) - assert_array_equal(c, (a * b).sum(-1, keepdims=True)) - out = np.zeros_like(c) - d = inner1d(a, b, keepdims=True, out=out) - assert_(d is out) - assert_array_equal(d, c) - # Now combined with axis and axes. - c = inner1d(a, b, axis=-1, keepdims=False) - assert_array_equal(c, (a * b).sum(-1, keepdims=False)) - c = inner1d(a, b, axis=-1, keepdims=True) - assert_array_equal(c, (a * b).sum(-1, keepdims=True)) - c = inner1d(a, b, axis=0, keepdims=False) - assert_array_equal(c, (a * b).sum(0, keepdims=False)) - c = inner1d(a, b, axis=0, keepdims=True) - assert_array_equal(c, (a * b).sum(0, keepdims=True)) - c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) - assert_array_equal(c, (a * b).sum(-1, keepdims=True)) - c = inner1d(a, b, axes=[0, 0], keepdims=False) - assert_array_equal(c, (a * b).sum(0)) - c = inner1d(a, b, axes=[0, 0, 0], keepdims=True) - assert_array_equal(c, (a * b).sum(0, keepdims=True)) - c = inner1d(a, b, axes=[0, 2], keepdims=False) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) - c = inner1d(a, b, axes=[0, 2], keepdims=True) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, - keepdims=True)) - c = inner1d(a, b, axes=[0, 2, 2], keepdims=True) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, - keepdims=True)) - c = inner1d(a, b, axes=[0, 2, 0], keepdims=True) - assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) - # Hardly useful, but should work. - c = inner1d(a, b, axes=[0, 2, 1], keepdims=True) - assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) - .sum(1, keepdims=True)) - # Check with two core dimensions. - a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] - expected = uml.det(a) - c = uml.det(a, keepdims=False) - assert_array_equal(c, expected) - c = uml.det(a, keepdims=True) - assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) - a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] - expected_s, expected_l = uml.slogdet(a) - cs, cl = uml.slogdet(a, keepdims=False) - assert_array_equal(cs, expected_s) - assert_array_equal(cl, expected_l) - cs, cl = uml.slogdet(a, keepdims=True) - assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) - assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) - # Sanity check on innerwt. - a = np.arange(6).reshape((2, 3)) - b = np.arange(10, 16).reshape((2, 3)) - w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w, keepdims=True), - np.sum(a * b * w, axis=-1, keepdims=True)) - assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), - np.sum(a * b * w, axis=0, keepdims=True)) - # Check errors. - # Not a boolean - assert_raises(TypeError, inner1d, a, b, keepdims='true') - # More than 1 core dimension, and core output dimensions. - mm = umt.matrix_multiply - assert_raises(TypeError, mm, a, b, keepdims=True) - assert_raises(TypeError, mm, a, b, keepdims=False) - # Regular ufuncs should not accept keepdims. - assert_raises(TypeError, np.add, 1., 1., keepdims=False) - - def test_innerwt(self): - a = np.arange(6).reshape((2, 3)) - b = np.arange(10, 16).reshape((2, 3)) - w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) - a = np.arange(100, 124).reshape((2, 3, 4)) - b = np.arange(200, 224).reshape((2, 3, 4)) - w = np.arange(300, 324).reshape((2, 3, 4)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) - - def test_innerwt_empty(self): - """Test generalized ufunc with zero-sized operands""" - a = np.array([], dtype='f8') - b = np.array([], dtype='f8') - w = np.array([], dtype='f8') - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) - - def test_cross1d(self): - """Test with fixed-sized signature.""" - a = np.eye(3) - assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) - out = np.zeros((3, 3)) - result = umt.cross1d(a[0], a, out) - assert_(result is out) - assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) - assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) - assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) - assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) - - def test_can_ignore_signature(self): - # Comparing the effects of ? in signature: - # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. - # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. - mat = np.arange(12).reshape((2, 3, 2)) - single_vec = np.arange(2) - col_vec = single_vec[:, np.newaxis] - col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 - # matrix @ single column vector with proper dimension - mm_col_vec = umt.matrix_multiply(mat, col_vec) - # matmul does the same thing - matmul_col_vec = umt.matmul(mat, col_vec) - assert_array_equal(matmul_col_vec, mm_col_vec) - # matrix @ vector without dimension making it a column vector. - # matrix multiply fails -> missing core dim. - assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) - # matmul mimicker passes, and returns a vector. - matmul_col = umt.matmul(mat, single_vec) - assert_array_equal(matmul_col, mm_col_vec.squeeze()) - # Now with a column array: same as for column vector, - # broadcasting sensibly. - mm_col_vec = umt.matrix_multiply(mat, col_vec_array) - matmul_col_vec = umt.matmul(mat, col_vec_array) - assert_array_equal(matmul_col_vec, mm_col_vec) - # As above, but for row vector - single_vec = np.arange(3) - row_vec = single_vec[np.newaxis, :] - row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 - # row vector @ matrix - mm_row_vec = umt.matrix_multiply(row_vec, mat) - matmul_row_vec = umt.matmul(row_vec, mat) - assert_array_equal(matmul_row_vec, mm_row_vec) - # single row vector @ matrix - assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) - matmul_row = umt.matmul(single_vec, mat) - assert_array_equal(matmul_row, mm_row_vec.squeeze()) - # row vector array @ matrix - mm_row_vec = umt.matrix_multiply(row_vec_array, mat) - matmul_row_vec = umt.matmul(row_vec_array, mat) - assert_array_equal(matmul_row_vec, mm_row_vec) - # Now for vector combinations - # row vector @ column vector - col_vec = row_vec.T - col_vec_array = row_vec_array.swapaxes(-2, -1) - mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) - matmul_row_col_vec = umt.matmul(row_vec, col_vec) - assert_array_equal(matmul_row_col_vec, mm_row_col_vec) - # single row vector @ single col vector - assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) - matmul_row_col = umt.matmul(single_vec, single_vec) - assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) - # row vector array @ matrix - mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) - matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) - assert_array_equal(matmul_row_col_array, mm_row_col_array) - # Finally, check that things are *not* squeezed if one gives an - # output. - out = np.zeros_like(mm_row_col_array) - out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) - assert_array_equal(out, mm_row_col_array) - out[:] = 0 - out = umt.matmul(row_vec_array, col_vec_array, out=out) - assert_array_equal(out, mm_row_col_array) - # And check one cannot put missing dimensions back. - out = np.zeros_like(mm_row_col_vec) - assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, - out) - # But fine for matmul, since it is just a broadcast. - out = umt.matmul(single_vec, single_vec, out) - assert_array_equal(out, mm_row_col_vec.squeeze()) - - def test_matrix_multiply(self): - self.compare_matrix_multiply_results(np.int64) - self.compare_matrix_multiply_results(np.double) - - def test_matrix_multiply_umath_empty(self): - res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) - assert_array_equal(res, np.zeros((0, 0))) - res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) - assert_array_equal(res, np.zeros((10, 10))) - - def compare_matrix_multiply_results(self, tp): - d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) - d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) - msg = "matrix multiply on type %s" % d1.dtype.name - - def permute_n(n): - if n == 1: - return ([0],) - ret = () - base = permute_n(n-1) - for perm in base: - for i in range(n): - new = perm + [n-1] - new[n-1] = new[i] - new[i] = n-1 - ret += (new,) - return ret - - def slice_n(n): - if n == 0: - return ((),) - ret = () - base = slice_n(n-1) - for sl in base: - ret += (sl+(slice(None),),) - ret += (sl+(slice(0, 1),),) - return ret - - def broadcastable(s1, s2): - return s1 == s2 or s1 == 1 or s2 == 1 - - permute_3 = permute_n(3) - slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) - - ref = True - for p1 in permute_3: - for p2 in permute_3: - for s1 in slice_3: - for s2 in slice_3: - a1 = d1.transpose(p1)[s1] - a2 = d2.transpose(p2)[s2] - ref = ref and a1.base is not None - ref = ref and a2.base is not None - if (a1.shape[-1] == a2.shape[-2] and - broadcastable(a1.shape[0], a2.shape[0])): - assert_array_almost_equal( - umt.matrix_multiply(a1, a2), - np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * - a1[..., np.newaxis,:], axis=-1), - err_msg=msg + ' %s %s' % (str(a1.shape), - str(a2.shape))) - - assert_equal(ref, True, err_msg="reference check") - - def test_euclidean_pdist(self): - a = np.arange(12, dtype=float).reshape(4, 3) - out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) - umt.euclidean_pdist(a, out) - b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) - b = b[~np.tri(a.shape[0], dtype=bool)] - assert_almost_equal(out, b) - # An output array is required to determine p with signature (n,d)->(p) - assert_raises(ValueError, umt.euclidean_pdist, a) - - def test_cumsum(self): - a = np.arange(10) - result = umt.cumsum(a) - assert_array_equal(result, a.cumsum()) - - def test_object_logical(self): - a = np.array([3, None, True, False, "test", ""], dtype=object) - assert_equal(np.logical_or(a, None), - np.array([x or None for x in a], dtype=object)) - assert_equal(np.logical_or(a, True), - np.array([x or True for x in a], dtype=object)) - assert_equal(np.logical_or(a, 12), - np.array([x or 12 for x in a], dtype=object)) - assert_equal(np.logical_or(a, "blah"), - np.array([x or "blah" for x in a], dtype=object)) - - assert_equal(np.logical_and(a, None), - np.array([x and None for x in a], dtype=object)) - assert_equal(np.logical_and(a, True), - np.array([x and True for x in a], dtype=object)) - assert_equal(np.logical_and(a, 12), - np.array([x and 12 for x in a], dtype=object)) - assert_equal(np.logical_and(a, "blah"), - np.array([x and "blah" for x in a], dtype=object)) - - assert_equal(np.logical_not(a), - np.array([not x for x in a], dtype=object)) - - assert_equal(np.logical_or.reduce(a), 3) - assert_equal(np.logical_and.reduce(a), None) - - def test_object_comparison(self): - class HasComparisons(object): - def __eq__(self, other): - return '==' - - arr0d = np.array(HasComparisons()) - assert_equal(arr0d == arr0d, True) - assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast - - arr1d = np.array([HasComparisons()]) - assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast - assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) - - def test_object_array_reduction(self): - # Reductions on object arrays - a = np.array(['a', 'b', 'c'], dtype=object) - assert_equal(np.sum(a), 'abc') - assert_equal(np.max(a), 'c') - assert_equal(np.min(a), 'a') - a = np.array([True, False, True], dtype=object) - assert_equal(np.sum(a), 2) - assert_equal(np.prod(a), 0) - assert_equal(np.any(a), True) - assert_equal(np.all(a), False) - assert_equal(np.max(a), True) - assert_equal(np.min(a), False) - assert_equal(np.array([[1]], dtype=object).sum(), 1) - assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) - assert_equal(np.array([1], dtype=object).sum(initial=1), 2) - assert_equal(np.array([[1], [2, 3]], dtype=object) - .sum(initial=[0], where=[False, True]), [0, 2, 3]) - - def test_object_array_accumulate_inplace(self): - # Checks that in-place accumulates work, see also gh-7402 - arr = np.ones(4, dtype=object) - arr[:] = [[1] for i in range(4)] - # Twice reproduced also for tuples: - np.add.accumulate(arr, out=arr) - np.add.accumulate(arr, out=arr) - assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]])) - - # And the same if the axis argument is used - arr = np.ones((2, 4), dtype=object) - arr[0, :] = [[2] for i in range(4)] - np.add.accumulate(arr, out=arr, axis=-1) - np.add.accumulate(arr, out=arr, axis=-1) - assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]])) - - def test_object_array_reduceat_inplace(self): - # Checks that in-place reduceats work, see also gh-7465 - arr = np.empty(4, dtype=object) - arr[:] = [[1] for i in range(4)] - out = np.empty(4, dtype=object) - out[:] = [[1] for i in range(4)] - np.add.reduceat(arr, np.arange(4), out=arr) - np.add.reduceat(arr, np.arange(4), out=arr) - assert_array_equal(arr, out) - - # And the same if the axis argument is used - arr = np.ones((2, 4), dtype=object) - arr[0, :] = [[2] for i in range(4)] - out = np.ones((2, 4), dtype=object) - out[0, :] = [[2] for i in range(4)] - np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) - np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) - assert_array_equal(arr, out) - - def test_zerosize_reduction(self): - # Test with default dtype and object dtype - for a in [[], np.array([], dtype=object)]: - assert_equal(np.sum(a), 0) - assert_equal(np.prod(a), 1) - assert_equal(np.any(a), False) - assert_equal(np.all(a), True) - assert_raises(ValueError, np.max, a) - assert_raises(ValueError, np.min, a) - - def test_axis_out_of_bounds(self): - a = np.array([False, False]) - assert_raises(np.AxisError, a.all, axis=1) - a = np.array([False, False]) - assert_raises(np.AxisError, a.all, axis=-2) - - a = np.array([False, False]) - assert_raises(np.AxisError, a.any, axis=1) - a = np.array([False, False]) - assert_raises(np.AxisError, a.any, axis=-2) - - def test_scalar_reduction(self): - # The functions 'sum', 'prod', etc allow specifying axis=0 - # even for scalars - assert_equal(np.sum(3, axis=0), 3) - assert_equal(np.prod(3.5, axis=0), 3.5) - assert_equal(np.any(True, axis=0), True) - assert_equal(np.all(False, axis=0), False) - assert_equal(np.max(3, axis=0), 3) - assert_equal(np.min(2.5, axis=0), 2.5) - - # Check scalar behaviour for ufuncs without an identity - assert_equal(np.power.reduce(3), 3) - - # Make sure that scalars are coming out from this operation - assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) - - # check if scalars/0-d arrays get cast - assert_(type(np.any(0, axis=0)) is np.bool_) - - # assert that 0-d arrays get wrapped - class MyArray(np.ndarray): - pass - a = np.array(1).view(MyArray) - assert_(type(np.any(a)) is MyArray) - - def test_casting_out_param(self): - # Test that it's possible to do casts on output - a = np.ones((200, 100), np.int64) - b = np.ones((200, 100), np.int64) - c = np.ones((200, 100), np.float64) - np.add(a, b, out=c) - assert_equal(c, 2) - - a = np.zeros(65536) - b = np.zeros(65536, dtype=np.float32) - np.subtract(a, 0, out=b) - assert_equal(b, 0) - - def test_where_param(self): - # Test that the where= ufunc parameter works with regular arrays - a = np.arange(7) - b = np.ones(7) - c = np.zeros(7) - np.add(a, b, out=c, where=(a % 2 == 1)) - assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) - - a = np.arange(4).reshape(2, 2) + 2 - np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) - assert_equal(a, [[2, 27], [16, 5]]) - # Broadcasting the where= parameter - np.subtract(a, 2, out=a, where=[True, False]) - assert_equal(a, [[0, 27], [14, 5]]) - - def test_where_param_buffer_output(self): - # This test is temporarily skipped because it requires - # adding masking features to the nditer to work properly - - # With casting on output - a = np.ones(10, np.int64) - b = np.ones(10, np.int64) - c = 1.5 * np.ones(10, np.float64) - np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) - assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) - - def test_where_param_alloc(self): - # With casting and allocated output - a = np.array([1], dtype=np.int64) - m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) - - # No casting and allocated output - a = np.array([1], dtype=np.float64) - m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) - - def check_identityless_reduction(self, a): - # np.minimum.reduce is an identityless reduction - - # Verify that it sees the zero at various positions - a[...] = 1 - a[1, 0, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) - assert_equal(np.minimum.reduce(a, axis=0), - [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 1, 1, 1], [0, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 1, 1], [0, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) - - a[...] = 1 - a[0, 1, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 0, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) - - a[...] = 1 - a[0, 0, 1] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 0, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[0, 1, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) - - def test_identityless_reduction_corder(self): - a = np.empty((2, 3, 4), order='C') - self.check_identityless_reduction(a) - - def test_identityless_reduction_forder(self): - a = np.empty((2, 3, 4), order='F') - self.check_identityless_reduction(a) - - def test_identityless_reduction_otherorder(self): - a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig(self): - a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig_unaligned(self): - a = np.empty((3*4*5*8 + 1,), dtype='i1') - a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_initial_reduction(self): - # np.minimum.reduce is an identityless reduction - - # For cases like np.maximum(np.abs(...), initial=0) - # More generally, a supremum over non-negative numbers. - assert_equal(np.maximum.reduce([], initial=0), 0) - - # For cases like reduction of an empty array over the reals. - assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) - assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) - - # Random tests - assert_equal(np.minimum.reduce([5], initial=4), 4) - assert_equal(np.maximum.reduce([4], initial=5), 5) - assert_equal(np.maximum.reduce([5], initial=4), 5) - assert_equal(np.minimum.reduce([4], initial=5), 4) - - # Check initial=None raises ValueError for both types of ufunc reductions - assert_raises(ValueError, np.minimum.reduce, [], initial=None) - assert_raises(ValueError, np.add.reduce, [], initial=None) - - # Check that np._NoValue gives default behavior. - assert_equal(np.add.reduce([], initial=np._NoValue), 0) - - # Check that initial kwarg behaves as intended for dtype=object - a = np.array([10], dtype=object) - res = np.add.reduce(a, initial=5) - assert_equal(res, 15) - - @pytest.mark.parametrize('axis', (0, 1, None)) - @pytest.mark.parametrize('where', (np.array([False, True, True]), - np.array([[True], [False], [True]]), - np.array([[True, False, False], - [False, True, False], - [False, True, True]]))) - def test_reduction_with_where(self, axis, where): - a = np.arange(9.).reshape(3, 3) - a_copy = a.copy() - a_check = np.zeros_like(a) - np.positive(a, out=a_check, where=where) - - res = np.add.reduce(a, axis=axis, where=where) - check = a_check.sum(axis) - assert_equal(res, check) - # Check we do not overwrite elements of a internally. - assert_array_equal(a, a_copy) - - @pytest.mark.parametrize(('axis', 'where'), - ((0, np.array([True, False, True])), - (1, [True, True, False]), - (None, True))) - @pytest.mark.parametrize('initial', (-np.inf, 5.)) - def test_reduction_with_where_and_initial(self, axis, where, initial): - a = np.arange(9.).reshape(3, 3) - a_copy = a.copy() - a_check = np.full(a.shape, -np.inf) - np.positive(a, out=a_check, where=where) - - res = np.maximum.reduce(a, axis=axis, where=where, initial=initial) - check = a_check.max(axis, initial=initial) - assert_equal(res, check) - - def test_reduction_where_initial_needed(self): - a = np.arange(9.).reshape(3, 3) - m = [False, True, False] - assert_raises(ValueError, np.maximum.reduce, a, where=m) - - def test_identityless_reduction_nonreorderable(self): - a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) - - res = np.divide.reduce(a, axis=0) - assert_equal(res, [8.0, 4.0, 8.0]) - - res = np.divide.reduce(a, axis=1) - assert_equal(res, [2.0, 8.0]) - - res = np.divide.reduce(a, axis=()) - assert_equal(res, a) - - assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) - - def test_reduce_zero_axis(self): - # If we have a n x m array and do a reduction with axis=1, then we are - # doing n reductions, and each reduction takes an m-element array. For - # a reduction operation without an identity, then: - # n > 0, m > 0: fine - # n = 0, m > 0: fine, doing 0 reductions of m-element arrays - # n > 0, m = 0: can't reduce a 0-element array, ValueError - # n = 0, m = 0: can't reduce a 0-element array, ValueError (for - # consistency with the above case) - # This test doesn't actually look at return values, it just checks to - # make sure that error we get an error in exactly those cases where we - # expect one, and assumes the calculations themselves are done - # correctly. - - def ok(f, *args, **kwargs): - f(*args, **kwargs) - - def err(f, *args, **kwargs): - assert_raises(ValueError, f, *args, **kwargs) - - def t(expect, func, n, m): - expect(func, np.zeros((n, m)), axis=1) - expect(func, np.zeros((m, n)), axis=0) - expect(func, np.zeros((n // 2, n // 2, m)), axis=2) - expect(func, np.zeros((n // 2, m, n // 2)), axis=1) - expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) - expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) - expect(func, np.zeros((m // 3, m // 3, m // 3, - n // 2, n // 2)), - axis=(0, 1, 2)) - # Check what happens if the inner (resp. outer) dimensions are a - # mix of zero and non-zero: - expect(func, np.zeros((10, m, n)), axis=(0, 1)) - expect(func, np.zeros((10, n, m)), axis=(0, 2)) - expect(func, np.zeros((m, 10, n)), axis=0) - expect(func, np.zeros((10, m, n)), axis=1) - expect(func, np.zeros((10, n, m)), axis=2) - - # np.maximum is just an arbitrary ufunc with no reduction identity - assert_equal(np.maximum.identity, None) - t(ok, np.maximum.reduce, 30, 30) - t(ok, np.maximum.reduce, 0, 30) - t(err, np.maximum.reduce, 30, 0) - t(err, np.maximum.reduce, 0, 0) - err(np.maximum.reduce, []) - np.maximum.reduce(np.zeros((0, 0)), axis=()) - - # all of the combinations are fine for a reduction that has an - # identity - t(ok, np.add.reduce, 30, 30) - t(ok, np.add.reduce, 0, 30) - t(ok, np.add.reduce, 30, 0) - t(ok, np.add.reduce, 0, 0) - np.add.reduce([]) - np.add.reduce(np.zeros((0, 0)), axis=()) - - # OTOH, accumulate always makes sense for any combination of n and m, - # because it maps an m-element array to an m-element array. These - # tests are simpler because accumulate doesn't accept multiple axes. - for uf in (np.maximum, np.add): - uf.accumulate(np.zeros((30, 0)), axis=0) - uf.accumulate(np.zeros((0, 30)), axis=0) - uf.accumulate(np.zeros((30, 30)), axis=0) - uf.accumulate(np.zeros((0, 0)), axis=0) - - def test_safe_casting(self): - # In old versions of numpy, in-place operations used the 'unsafe' - # casting rules. In versions >= 1.10, 'same_kind' is the - # default and an exception is raised instead of a warning. - # when 'same_kind' is not satisfied. - a = np.array([1, 2, 3], dtype=int) - # Non-in-place addition is fine - assert_array_equal(assert_no_warnings(np.add, a, 1.1), - [2.1, 3.1, 4.1]) - assert_raises(TypeError, np.add, a, 1.1, out=a) - - def add_inplace(a, b): - a += b - - assert_raises(TypeError, add_inplace, a, 1.1) - # Make sure that explicitly overriding the exception is allowed: - assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") - assert_array_equal(a, [2, 3, 4]) - - def test_ufunc_custom_out(self): - # Test ufunc with built in input types and custom output type - - a = np.array([0, 1, 2], dtype='i8') - b = np.array([0, 1, 2], dtype='i8') - c = np.empty(3, dtype=_rational_tests.rational) - - # Output must be specified so numpy knows what - # ufunc signature to look for - result = _rational_tests.test_add(a, b, c) - target = np.array([0, 2, 4], dtype=_rational_tests.rational) - assert_equal(result, target) - - # no output type should raise TypeError - with assert_raises(TypeError): - _rational_tests.test_add(a, b) - - def test_operand_flags(self): - a = np.arange(16, dtype='l').reshape(4, 4) - b = np.arange(9, dtype='l').reshape(3, 3) - opflag_tests.inplace_add(a[:-1, :-1], b) - assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], - [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) - - a = np.array(0) - opflag_tests.inplace_add(a, 3) - assert_equal(a, 3) - opflag_tests.inplace_add(a, [3, 4]) - assert_equal(a, 10) - - def test_struct_ufunc(self): - import numpy.core._struct_ufunc_tests as struct_ufunc - - a = np.array([(1, 2, 3)], dtype='u8,u8,u8') - b = np.array([(1, 2, 3)], dtype='u8,u8,u8') - - result = struct_ufunc.add_triplet(a, b) - assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) - assert_raises(RuntimeError, struct_ufunc.register_fail) - - def test_custom_ufunc(self): - a = np.array( - [_rational_tests.rational(1, 2), - _rational_tests.rational(1, 3), - _rational_tests.rational(1, 4)], - dtype=_rational_tests.rational) - b = np.array( - [_rational_tests.rational(1, 2), - _rational_tests.rational(1, 3), - _rational_tests.rational(1, 4)], - dtype=_rational_tests.rational) - - result = _rational_tests.test_add_rationals(a, b) - expected = np.array( - [_rational_tests.rational(1), - _rational_tests.rational(2, 3), - _rational_tests.rational(1, 2)], - dtype=_rational_tests.rational) - assert_equal(result, expected) - - def test_custom_ufunc_forced_sig(self): - # gh-9351 - looking for a non-first userloop would previously hang - with assert_raises(TypeError): - np.multiply(_rational_tests.rational(1), 1, - signature=(_rational_tests.rational, int, None)) - - def test_custom_array_like(self): - - class MyThing(object): - __array_priority__ = 1000 - - rmul_count = 0 - getitem_count = 0 - - def __init__(self, shape): - self.shape = shape - - def __len__(self): - return self.shape[0] - - def __getitem__(self, i): - MyThing.getitem_count += 1 - if not isinstance(i, tuple): - i = (i,) - if len(i) > self.ndim: - raise IndexError("boo") - - return MyThing(self.shape[len(i):]) - - def __rmul__(self, other): - MyThing.rmul_count += 1 - return self - - np.float64(5)*MyThing((3, 3)) - assert_(MyThing.rmul_count == 1, MyThing.rmul_count) - assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) - - def test_inplace_fancy_indexing(self): - - a = np.arange(10) - np.add.at(a, [2, 5, 2], 1) - assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) - - a = np.arange(10) - b = np.array([100, 100, 100]) - np.add.at(a, [2, 5, 2], b) - assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) - - a = np.arange(9).reshape(3, 3) - b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) - np.add.at(a, (slice(None), [1, 2, 1]), b) - assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) - assert_equal(a, - [[[0, 401, 202], - [3, 404, 205], - [6, 407, 208]], - - [[9, 410, 211], - [12, 413, 214], - [15, 416, 217]], - - [[18, 419, 220], - [21, 422, 223], - [24, 425, 226]]]) - - a = np.arange(9).reshape(3, 3) - b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) - np.add.at(a, ([1, 2, 1], slice(None)), b) - assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) - assert_equal(a, - [[[0, 1, 2], - [203, 404, 605], - [106, 207, 308]], - - [[9, 10, 11], - [212, 413, 614], - [115, 216, 317]], - - [[18, 19, 20], - [221, 422, 623], - [124, 225, 326]]]) - - a = np.arange(9).reshape(3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (0, [1, 2, 1]), b) - assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, ([1, 2, 1], 0, slice(None)), b) - assert_equal(a, - [[[0, 1, 2], - [3, 4, 5], - [6, 7, 8]], - - [[209, 410, 611], - [12, 13, 14], - [15, 16, 17]], - - [[118, 219, 320], - [21, 22, 23], - [24, 25, 26]]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), slice(None), slice(None)), b) - assert_equal(a, - [[[100, 201, 302], - [103, 204, 305], - [106, 207, 308]], - - [[109, 210, 311], - [112, 213, 314], - [115, 216, 317]], - - [[118, 219, 320], - [121, 222, 323], - [124, 225, 326]]]) - - a = np.arange(10) - np.negative.at(a, [2, 5, 2]) - assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) - - # Test 0-dim array - a = np.array(0) - np.add.at(a, (), 1) - assert_equal(a, 1) - - assert_raises(IndexError, np.add.at, a, 0, 1) - assert_raises(IndexError, np.add.at, a, [], 1) - - # Test mixed dtypes - a = np.arange(10) - np.power.at(a, [1, 2, 3, 2], 3.5) - assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) - - # Test boolean indexing and boolean ufuncs - a = np.arange(10) - index = a % 2 == 0 - np.equal.at(a, index, [0, 2, 4, 6, 8]) - assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) - - # Test unary operator - a = np.arange(10, dtype='u4') - np.invert.at(a, [2, 5, 2]) - assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) - - # Test empty subspace - orig = np.arange(4) - a = orig[:, None][:, 0:0] - np.add.at(a, [0, 1], 3) - assert_array_equal(orig, np.arange(4)) - - # Test with swapped byte order - index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) - values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) - np.add.at(values, index, 3) - assert_array_equal(values, [1, 8, 6, 4]) - - # Test exception thrown - values = np.array(['a', 1], dtype=object) - assert_raises(TypeError, np.add.at, values, [0, 1], 1) - assert_array_equal(values, np.array(['a', 1], dtype=object)) - - # Test multiple output ufuncs raise error, gh-5665 - assert_raises(ValueError, np.modf.at, np.arange(10), [1]) - - def test_reduce_arguments(self): - f = np.add.reduce - d = np.ones((5,2), dtype=int) - o = np.ones((2,), dtype=d.dtype) - r = o * 5 - assert_equal(f(d), r) - # a, axis=0, dtype=None, out=None, keepdims=False - assert_equal(f(d, axis=0), r) - assert_equal(f(d, 0), r) - assert_equal(f(d, 0, dtype=None), r) - assert_equal(f(d, 0, dtype='i'), r) - assert_equal(f(d, 0, 'i'), r) - assert_equal(f(d, 0, None), r) - assert_equal(f(d, 0, None, out=None), r) - assert_equal(f(d, 0, None, out=o), r) - assert_equal(f(d, 0, None, o), r) - assert_equal(f(d, 0, None, None), r) - assert_equal(f(d, 0, None, None, keepdims=False), r) - assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) - assert_equal(f(d, 0, None, None, False, 0), r) - assert_equal(f(d, 0, None, None, False, initial=0), r) - assert_equal(f(d, 0, None, None, False, 0, True), r) - assert_equal(f(d, 0, None, None, False, 0, where=True), r) - # multiple keywords - assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) - assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) - assert_equal(f(d, 0, None, out=None, keepdims=False), r) - assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0, - where=True), r) - - # too little - assert_raises(TypeError, f) - # too much - assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1) - # invalid axis - assert_raises(TypeError, f, d, "invalid") - assert_raises(TypeError, f, d, axis="invalid") - assert_raises(TypeError, f, d, axis="invalid", dtype=None, - keepdims=True) - # invalid dtype - assert_raises(TypeError, f, d, 0, "invalid") - assert_raises(TypeError, f, d, dtype="invalid") - assert_raises(TypeError, f, d, dtype="invalid", out=None) - # invalid out - assert_raises(TypeError, f, d, 0, None, "invalid") - assert_raises(TypeError, f, d, out="invalid") - assert_raises(TypeError, f, d, out="invalid", dtype=None) - # keepdims boolean, no invalid value - # assert_raises(TypeError, f, d, 0, None, None, "invalid") - # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) - # invalid mix - assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", - out=None) - - # invalid keyord - assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) - assert_raises(TypeError, f, d, invalid=0) - assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", - out=None) - assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, - out=None, invalid=0) - assert_raises(TypeError, f, d, axis=0, dtype=None, - out=None, invalid=0) - - def test_structured_equal(self): - # https://github.com/numpy/numpy/issues/4855 - - class MyA(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return getattr(ufunc, method)(*(input.view(np.ndarray) - for input in inputs), **kwargs) - a = np.arange(12.).reshape(4,3) - ra = a.view(dtype=('f8,f8,f8')).squeeze() - mra = ra.view(MyA) - - target = np.array([ True, False, False, False], dtype=bool) - assert_equal(np.all(target == (mra == ra[0])), True) - - def test_scalar_equal(self): - # Scalar comparisons should always work, without deprecation warnings. - # even when the ufunc fails. - a = np.array(0.) - b = np.array('a') - assert_(a != b) - assert_(b != a) - assert_(not (a == b)) - assert_(not (b == a)) - - def test_NotImplemented_not_returned(self): - # See gh-5964 and gh-2091. Some of these functions are not operator - # related and were fixed for other reasons in the past. - binary_funcs = [ - np.power, np.add, np.subtract, np.multiply, np.divide, - np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, - np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, - np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, - np.logical_and, np.logical_or, np.logical_xor, np.maximum, - np.minimum, np.mod, - np.greater, np.greater_equal, np.less, np.less_equal, - np.equal, np.not_equal] - - a = np.array('1') - b = 1 - c = np.array([1., 2.]) - for f in binary_funcs: - assert_raises(TypeError, f, a, b) - assert_raises(TypeError, f, c, a) - - def test_reduce_noncontig_output(self): - # Check that reduction deals with non-contiguous output arrays - # appropriately. - # - # gh-8036 - - x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8) - x = x[4:6,1:11:6,1:5].transpose(1, 2, 0) - y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4) - y = y_base[::2,:] - - y_base_copy = y_base.copy() - - r0 = np.add.reduce(x, out=y.copy(), axis=2) - r1 = np.add.reduce(x, out=y, axis=2) - - # The results should match, and y_base shouldn't get clobbered - assert_equal(r0, r1) - assert_equal(y_base[1,:], y_base_copy[1,:]) - assert_equal(y_base[3,:], y_base_copy[3,:]) - - def test_no_doc_string(self): - # gh-9337 - assert_('\n' not in umt.inner1d_no_doc.__doc__) - - def test_invalid_args(self): - # gh-7961 - exc = pytest.raises(TypeError, np.sqrt, None) - # minimally check the exception text - assert exc.match('loop of ufunc does not support') - - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) - def test_nat_is_not_finite(self, nat): - try: - assert not np.isfinite(nat) - except TypeError: - pass # ok, just not implemented - - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) - def test_nat_is_nan(self, nat): - try: - assert np.isnan(nat) - except TypeError: - pass # ok, just not implemented - - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) - def test_nat_is_not_inf(self, nat): - try: - assert not np.isinf(nat) - except TypeError: - pass # ok, just not implemented - - -@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) - if isinstance(getattr(np, x), np.ufunc)]) -def test_ufunc_types(ufunc): - ''' - Check all ufuncs that the correct type is returned. Avoid - object and boolean types since many operations are not defined for - for them. - - Choose the shape so even dot and matmul will succeed - ''' - for typ in ufunc.types: - # types is a list of strings like ii->i - if 'O' in typ or '?' in typ: - continue - inp, out = typ.split('->') - args = [np.ones((3, 3), t) for t in inp] - with warnings.catch_warnings(record=True): - warnings.filterwarnings("always") - res = ufunc(*args) - if isinstance(res, tuple): - outs = tuple(out) - assert len(res) == len(outs) - for r, t in zip(res, outs): - assert r.dtype == np.dtype(t) - else: - assert res.dtype == np.dtype(out) - -@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) - if isinstance(getattr(np, x), np.ufunc)]) -def test_ufunc_noncontiguous(ufunc): - ''' - Check that contiguous and non-contiguous calls to ufuncs - have the same results for values in range(9) - ''' - for typ in ufunc.types: - # types is a list of strings like ii->i - if any(set('O?mM') & set(typ)): - # bool, object, datetime are too irregular for this simple test - continue - inp, out = typ.split('->') - args_c = [np.empty(6, t) for t in inp] - args_n = [np.empty(18, t)[::3] for t in inp] - for a in args_c: - a.flat = range(1,7) - for a in args_n: - a.flat = range(1,7) - with warnings.catch_warnings(record=True): - warnings.filterwarnings("always") - res_c = ufunc(*args_c) - res_n = ufunc(*args_n) - if len(out) == 1: - res_c = (res_c,) - res_n = (res_n,) - for c_ar, n_ar in zip(res_c, res_n): - dt = c_ar.dtype - if np.issubdtype(dt, np.floating): - # for floating point results allow a small fuss in comparisons - # since different algorithms (libm vs. intrinsics) can be used - # for different input strides - res_eps = np.finfo(dt).eps - tol = 2*res_eps - assert_allclose(res_c, res_n, atol=tol, rtol=tol) - else: - assert_equal(c_ar, n_ar) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath.py deleted file mode 100644 index e892e81..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath.py +++ /dev/null @@ -1,3138 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import warnings -import fnmatch -import itertools -import pytest -from fractions import Fraction - -import numpy.core.umath as ncu -from numpy.core import _umath_tests as ncu_tests -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, assert_array_almost_equal_nulp - ) - -def on_powerpc(): - """ True if we are running on a Power PC platform.""" - return platform.processor() == 'powerpc' or \ - platform.machine().startswith('ppc') - - -class _FilterInvalids(object): - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - -class TestConstants(object): - def test_pi(self): - assert_allclose(ncu.pi, 3.141592653589793, 1e-15) - - def test_e(self): - assert_allclose(ncu.e, 2.718281828459045, 1e-15) - - def test_euler_gamma(self): - assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) - - -class TestOut(object): - def test_out_subok(self): - for subok in (True, False): - a = np.array(0.5) - o = np.empty(()) - - r = np.add(a, 2, o, subok=subok) - assert_(r is o) - r = np.add(a, 2, out=o, subok=subok) - assert_(r is o) - r = np.add(a, 2, out=(o,), subok=subok) - assert_(r is o) - - d = np.array(5.7) - o1 = np.empty(()) - o2 = np.empty((), dtype=np.int32) - - r1, r2 = np.frexp(d, o1, None, subok=subok) - assert_(r1 is o1) - r1, r2 = np.frexp(d, None, o2, subok=subok) - assert_(r2 is o2) - r1, r2 = np.frexp(d, o1, o2, subok=subok) - assert_(r1 is o1) - assert_(r2 is o2) - - r1, r2 = np.frexp(d, out=(o1, None), subok=subok) - assert_(r1 is o1) - r1, r2 = np.frexp(d, out=(None, o2), subok=subok) - assert_(r2 is o2) - r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) - assert_(r1 is o1) - assert_(r2 is o2) - - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs. - r1, r2 = np.frexp(d, out=o1, subok=subok) - - assert_raises(ValueError, np.add, a, 2, o, o, subok=subok) - assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) - assert_raises(TypeError, np.add, a, 2, [], subok=subok) - assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) - assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) - o.flags.writeable = False - assert_raises(ValueError, np.add, a, 2, o, subok=subok) - assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) - - def test_out_wrap_subok(self): - class ArrayWrap(np.ndarray): - __array_priority__ = 10 - - def __new__(cls, arr): - return np.asarray(arr).view(cls).copy() - - def __array_wrap__(self, arr, context): - return arr.view(type(self)) - - for subok in (True, False): - a = ArrayWrap([0.5]) - - r = np.add(a, 2, subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - r = np.add(a, 2, None, subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - r = np.add(a, 2, out=None, subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - r = np.add(a, 2, out=(None,), subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - d = ArrayWrap([5.7]) - o1 = np.empty((1,)) - o2 = np.empty((1,), dtype=np.int32) - - r1, r2 = np.frexp(d, o1, subok=subok) - if subok: - assert_(isinstance(r2, ArrayWrap)) - else: - assert_(type(r2) == np.ndarray) - - r1, r2 = np.frexp(d, o1, None, subok=subok) - if subok: - assert_(isinstance(r2, ArrayWrap)) - else: - assert_(type(r2) == np.ndarray) - - r1, r2 = np.frexp(d, None, o2, subok=subok) - if subok: - assert_(isinstance(r1, ArrayWrap)) - else: - assert_(type(r1) == np.ndarray) - - r1, r2 = np.frexp(d, out=(o1, None), subok=subok) - if subok: - assert_(isinstance(r2, ArrayWrap)) - else: - assert_(type(r2) == np.ndarray) - - r1, r2 = np.frexp(d, out=(None, o2), subok=subok) - if subok: - assert_(isinstance(r1, ArrayWrap)) - else: - assert_(type(r1) == np.ndarray) - - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs. - r1, r2 = np.frexp(d, out=o1, subok=subok) - - -class TestComparisons(object): - def test_ignore_object_identity_in_equal(self): - # Check comparing identical objects whose comparison - # is not a simple boolean, e.g., arrays that are compared elementwise. - a = np.array([np.array([1, 2, 3]), None], dtype=object) - assert_raises(ValueError, np.equal, a, a) - - # Check error raised when comparing identical non-comparable objects. - class FunkyType(object): - def __eq__(self, other): - raise TypeError("I won't compare") - - a = np.array([FunkyType()]) - assert_raises(TypeError, np.equal, a, a) - - # Check identity doesn't override comparison mismatch. - a = np.array([np.nan], dtype=object) - assert_equal(np.equal(a, a), [False]) - - def test_ignore_object_identity_in_not_equal(self): - # Check comparing identical objects whose comparison - # is not a simple boolean, e.g., arrays that are compared elementwise. - a = np.array([np.array([1, 2, 3]), None], dtype=object) - assert_raises(ValueError, np.not_equal, a, a) - - # Check error raised when comparing identical non-comparable objects. - class FunkyType(object): - def __ne__(self, other): - raise TypeError("I won't compare") - - a = np.array([FunkyType()]) - assert_raises(TypeError, np.not_equal, a, a) - - # Check identity doesn't override comparison mismatch. - a = np.array([np.nan], dtype=object) - assert_equal(np.not_equal(a, a), [True]) - - -class TestAdd(object): - def test_reduce_alignment(self): - # gh-9876 - # make sure arrays with weird strides work with the optimizations in - # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a - # 4 byte offset, even though its itemsize is 8. - a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) - a['a'] = -1 - assert_equal(a['b'].sum(), 0) - - -class TestDivision(object): - def test_division_int(self): - # int division should follow Python - x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) - if 5 / 10 == 0.5: - assert_equal(x / 100, [0.05, 0.1, 0.9, 1, - -0.05, -0.1, -0.9, -1, -1.2]) - else: - assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) - - def test_division_complex(self): - # check that implementation is correct - msg = "Complex division implementation check" - x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) - assert_almost_equal(x**2/x, x, err_msg=msg) - # check overflow, underflow - msg = "Complex division overflow/underflow check" - x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = x**2/x - assert_almost_equal(y/x, [1, 1], err_msg=msg) - - def test_zero_division_complex(self): - with np.errstate(invalid="ignore", divide="ignore"): - x = np.array([0.0], dtype=np.complex128) - y = 1.0/x - assert_(np.isinf(y)[0]) - y = complex(np.inf, np.nan)/x - assert_(np.isinf(y)[0]) - y = complex(np.nan, np.inf)/x - assert_(np.isinf(y)[0]) - y = complex(np.inf, np.inf)/x - assert_(np.isinf(y)[0]) - y = 0.0/x - assert_(np.isnan(y)[0]) - - def test_floor_division_complex(self): - # check that implementation is correct - msg = "Complex floor division implementation check" - x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) - y = np.array([0., -1., 0., 0.], dtype=np.complex128) - assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) - # check overflow, underflow - msg = "Complex floor division overflow/underflow check" - x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = np.floor_divide(x**2, x) - assert_equal(y, [1.e+110, 0], err_msg=msg) - - def test_floor_division_signed_zero(self): - # Check that the sign bit is correctly set when dividing positive and - # negative zero by one. - x = np.zeros(10) - assert_equal(np.signbit(x//1), 0) - assert_equal(np.signbit((-x)//1), 1) - -def floor_divide_and_remainder(x, y): - return (np.floor_divide(x, y), np.remainder(x, y)) - - -def _signs(dt): - if dt in np.typecodes['UnsignedInteger']: - return (+1,) - else: - return (+1, -1) - - -class TestRemainder(object): - - def test_remainder_basic(self): - dt = np.typecodes['AllInteger'] + np.typecodes['Float'] - for op in [floor_divide_and_remainder, np.divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1) - b = np.array(sg2*19, dtype=dt2) - div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_remainder_exact(self): - # test that float results are exact for small integers. This also - # holds for the same integers scaled by powers of two. - nlst = list(range(-127, 0)) - plst = list(range(1, 128)) - dividend = nlst + [0] + plst - divisor = nlst + plst - arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) - - a, b = np.array(arg, dtype=int).T - # convert exact integer results from Python to float so that - # signed zero can be used, it is checked. - tgtdiv, tgtrem = np.array(tgt, dtype=float).T - tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) - tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) - - for op in [floor_divide_and_remainder, np.divmod]: - for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) - fa = a.astype(dt) - fb = b.astype(dt) - div, rem = op(fa, fb) - assert_equal(div, tgtdiv, err_msg=msg) - assert_equal(rem, tgtrem, err_msg=msg) - - def test_float_remainder_roundoff(self): - # gh-6127 - dt = np.typecodes['Float'] - for op in [floor_divide_and_remainder, np.divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1) - b = np.array(sg2*6e-8, dtype=dt2) - div, rem = op(a, b) - # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_remainder_corner_cases(self): - # Check remainder magnitude. - for dt in np.typecodes['Float']: - b = np.array(1.0, dtype=dt) - a = np.nextafter(np.array(0.0, dtype=dt), -b) - rem = np.remainder(a, b) - assert_(rem <= b, 'dt: %s' % dt) - rem = np.remainder(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) - - # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - for dt in np.typecodes['Float']: - fone = np.array(1.0, dtype=dt) - fzer = np.array(0.0, dtype=dt) - finf = np.array(np.inf, dtype=dt) - fnan = np.array(np.nan, dtype=dt) - rem = np.remainder(fone, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - # MSVC 2008 returns NaN here, so disable the check. - #rem = np.remainder(fone, finf) - #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) - rem = np.remainder(fone, fnan) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - rem = np.remainder(finf, fone) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - - -class TestCbrt(object): - def test_cbrt_scalar(self): - assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) - - def test_cbrt(self): - x = np.array([1., 2., -3., np.inf, -np.inf]) - assert_almost_equal(np.cbrt(x**3), x) - - assert_(np.isnan(np.cbrt(np.nan))) - assert_equal(np.cbrt(np.inf), np.inf) - assert_equal(np.cbrt(-np.inf), -np.inf) - - -class TestPower(object): - def test_power_float(self): - x = np.array([1., 2., 3.]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_equal(x**2, [1., 4., 9.]) - y = x.copy() - y **= 2 - assert_equal(y, [1., 4., 9.]) - assert_almost_equal(x**(-1), [1., 0.5, 1./3]) - assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) - - for out, inp, msg in _gen_alignment_data(dtype=np.float32, - type='unary', - max_size=11): - exp = [ncu.sqrt(i) for i in inp] - assert_almost_equal(inp**(0.5), exp, err_msg=msg) - np.sqrt(inp, out=out) - assert_equal(out, exp, err_msg=msg) - - for out, inp, msg in _gen_alignment_data(dtype=np.float64, - type='unary', - max_size=7): - exp = [ncu.sqrt(i) for i in inp] - assert_almost_equal(inp**(0.5), exp, err_msg=msg) - np.sqrt(inp, out=out) - assert_equal(out, exp, err_msg=msg) - - def test_power_complex(self): - x = np.array([1+2j, 2+3j, 3+4j]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) - assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) - assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) - assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) - assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) - assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, - (-117-44j)/15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), - ncu.sqrt(3+4j)]) - norm = 1./((x**14)[0]) - assert_almost_equal(x**14 * norm, - [i * norm for i in [-76443+16124j, 23161315+58317492j, - 5583548873 + 2465133864j]]) - - # Ticket #836 - def assert_complex_equal(x, y): - assert_array_equal(x.real, y.real) - assert_array_equal(x.imag, y.imag) - - for z in [complex(0, np.inf), complex(1, np.inf)]: - z = np.array([z], dtype=np.complex_) - with np.errstate(invalid="ignore"): - assert_complex_equal(z**1, z) - assert_complex_equal(z**2, z*z) - assert_complex_equal(z**3, z*z*z) - - def test_power_zero(self): - # ticket #1271 - zero = np.array([0j]) - one = np.array([1+0j]) - cnan = np.array([complex(np.nan, np.nan)]) - # FIXME cinf not tested. - #cinf = np.array([complex(np.inf, 0)]) - - def assert_complex_equal(x, y): - x, y = np.asarray(x), np.asarray(y) - assert_array_equal(x.real, y.real) - assert_array_equal(x.imag, y.imag) - - # positive powers - for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: - assert_complex_equal(np.power(zero, p), zero) - - # zero power - assert_complex_equal(np.power(zero, 0), one) - with np.errstate(invalid="ignore"): - assert_complex_equal(np.power(zero, 0+1j), cnan) - - # negative power - for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: - assert_complex_equal(np.power(zero, -p), cnan) - assert_complex_equal(np.power(zero, -1+0.2j), cnan) - - def test_fast_power(self): - x = np.array([1, 2, 3], np.int16) - res = x**2.0 - assert_((x**2.00001).dtype is res.dtype) - assert_array_equal(res, [1, 4, 9]) - # check the inplace operation on the casted copy doesn't mess with x - assert_(not np.may_share_memory(res, x)) - assert_array_equal(x, [1, 2, 3]) - - # Check that the fast path ignores 1-element not 0-d arrays - res = x ** np.array([[[2]]]) - assert_equal(res.shape, (1, 1, 3)) - - def test_integer_power(self): - a = np.array([15, 15], 'i8') - b = np.power(a, a) - assert_equal(b, [437893890380859375, 437893890380859375]) - - def test_integer_power_with_integer_zero_exponent(self): - dtypes = np.typecodes['Integer'] - for dt in dtypes: - arr = np.arange(-10, 10, dtype=dt) - assert_equal(np.power(arr, 0), np.ones_like(arr)) - - dtypes = np.typecodes['UnsignedInteger'] - for dt in dtypes: - arr = np.arange(10, dtype=dt) - assert_equal(np.power(arr, 0), np.ones_like(arr)) - - def test_integer_power_of_1(self): - dtypes = np.typecodes['AllInteger'] - for dt in dtypes: - arr = np.arange(10, dtype=dt) - assert_equal(np.power(1, arr), np.ones_like(arr)) - - def test_integer_power_of_zero(self): - dtypes = np.typecodes['AllInteger'] - for dt in dtypes: - arr = np.arange(1, 10, dtype=dt) - assert_equal(np.power(0, arr), np.zeros_like(arr)) - - def test_integer_to_negative_power(self): - dtypes = np.typecodes['Integer'] - for dt in dtypes: - a = np.array([0, 1, 2, 3], dtype=dt) - b = np.array([0, 1, 2, -3], dtype=dt) - one = np.array(1, dtype=dt) - minusone = np.array(-1, dtype=dt) - assert_raises(ValueError, np.power, a, b) - assert_raises(ValueError, np.power, a, minusone) - assert_raises(ValueError, np.power, one, b) - assert_raises(ValueError, np.power, one, minusone) - - -class TestFloat_power(object): - def test_type_conversion(self): - arg_type = '?bhilBHILefdgFDG' - res_type = 'ddddddddddddgDDG' - for dtin, dtout in zip(arg_type, res_type): - msg = "dtin: %s, dtout: %s" % (dtin, dtout) - arg = np.ones(1, dtype=dtin) - res = np.float_power(arg, arg) - assert_(res.dtype.name == np.dtype(dtout).name, msg) - - -class TestLog2(object): - def test_log2_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_almost_equal(np.log2(xf), yf) - - def test_log2_ints(self): - # a good log2 implementation should provide this, - # might fail on OS with bad libm - for i in range(1, 65): - v = np.log2(2.**i) - assert_equal(v, float(i), err_msg='at exponent %d' % i) - - def test_log2_special(self): - assert_equal(np.log2(1.), 0.) - assert_equal(np.log2(np.inf), np.inf) - assert_(np.isnan(np.log2(np.nan))) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.log2(-1.))) - assert_(np.isnan(np.log2(-np.inf))) - assert_equal(np.log2(0.), -np.inf) - assert_(w[0].category is RuntimeWarning) - assert_(w[1].category is RuntimeWarning) - assert_(w[2].category is RuntimeWarning) - - -class TestExp2(object): - def test_exp2_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_almost_equal(np.exp2(yf), xf) - - -class TestLogAddExp2(_FilterInvalids): - # Need test for intermediate precisions - def test_logaddexp2_values(self): - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): - xf = np.log2(np.array(x, dtype=dt)) - yf = np.log2(np.array(y, dtype=dt)) - zf = np.log2(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) - - def test_logaddexp2_range(self): - x = [1000000, -1000000, 1000200, -1000200] - y = [1000200, -1000200, 1000000, -1000000] - z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) - - def test_inf(self): - inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] - z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='raise'): - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_equal(np.logaddexp2(logxf, logyf), logzf) - - def test_nan(self): - assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) - assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) - assert_(np.isnan(np.logaddexp2(np.nan, 0))) - assert_(np.isnan(np.logaddexp2(0, np.nan))) - assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) - - -class TestLog(object): - def test_log_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - log2_ = 0.69314718055994530943 - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ - assert_almost_equal(np.log(xf), yf) - - -class TestExp(object): - def test_exp_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - log2_ = 0.69314718055994530943 - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ - assert_almost_equal(np.exp(yf), xf) - -class TestSpecialFloats(object): - def test_exp_values(self): - x = [np.nan, np.nan, np.inf, 0.] - y = [np.nan, -np.nan, np.inf, -np.inf] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.exp(yf), xf) - - with np.errstate(over='raise'): - assert_raises(FloatingPointError, np.exp, np.float32(100.)) - assert_raises(FloatingPointError, np.exp, np.float32(1E19)) - - def test_log_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] - y = [np.nan, -np.nan, np.inf, -np.inf, 0., -1.0] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.log(yf), xf) - - with np.errstate(divide='raise'): - assert_raises(FloatingPointError, np.log, np.float32(0.)) - - with np.errstate(invalid='raise'): - assert_raises(FloatingPointError, np.log, np.float32(-np.inf)) - assert_raises(FloatingPointError, np.log, np.float32(-1.0)) - - def test_sincos_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, np.nan, np.nan] - y = [np.nan, -np.nan, np.inf, -np.inf] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.sin(yf), xf) - assert_equal(np.cos(yf), xf) - - with np.errstate(invalid='raise'): - assert_raises(FloatingPointError, np.sin, np.float32(-np.inf)) - assert_raises(FloatingPointError, np.sin, np.float32(np.inf)) - assert_raises(FloatingPointError, np.cos, np.float32(-np.inf)) - assert_raises(FloatingPointError, np.cos, np.float32(np.inf)) - - def test_sqrt_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, np.inf, np.nan, 0.] - y = [np.nan, -np.nan, np.inf, -np.inf, 0.] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.sqrt(yf), xf) - - #with np.errstate(invalid='raise'): - # for dt in ['f', 'd', 'g']: - # assert_raises(FloatingPointError, np.sqrt, np.array(-100., dtype=dt)) - - def test_abs_values(self): - x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] - y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.abs(yf), xf) - - def test_square_values(self): - x = [np.nan, np.nan, np.inf, np.inf] - y = [np.nan, -np.nan, np.inf, -np.inf] - with np.errstate(all='ignore'): - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.square(yf), xf) - - with np.errstate(over='raise'): - assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f')) - assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d')) - - def test_reciprocal_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] - y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.reciprocal(yf), xf) - - with np.errstate(divide='raise'): - for dt in ['f', 'd', 'g']: - assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt)) - -# func : [maxulperror, low, high] -avx_ufuncs = {'sqrt' :[1, 0., 100.], - 'absolute' :[0, -100., 100.], - 'reciprocal' :[1, 1., 100.], - 'square' :[1, -100., 100.], - 'rint' :[0, -100., 100.], - 'floor' :[0, -100., 100.], - 'ceil' :[0, -100., 100.], - 'trunc' :[0, -100., 100.]} - -class TestAVXUfuncs(object): - def test_avx_based_ufunc(self): - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - np.random.seed(42) - for func, prop in avx_ufuncs.items(): - maxulperr = prop[0] - minval = prop[1] - maxval = prop[2] - # various array sizes to ensure masking in AVX is tested - for size in range(1,32): - myfunc = getattr(np, func) - x_f32 = np.float32(np.random.uniform(low=minval, high=maxval, - size=size)) - x_f64 = np.float64(x_f32) - x_f128 = np.longdouble(x_f32) - y_true128 = myfunc(x_f128) - if maxulperr == 0: - assert_equal(myfunc(x_f32), np.float32(y_true128)) - assert_equal(myfunc(x_f64), np.float64(y_true128)) - else: - assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128), - maxulp=maxulperr) - assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128), - maxulp=maxulperr) - # various strides to test gather instruction - if size > 1: - y_true32 = myfunc(x_f32) - y_true64 = myfunc(x_f64) - for jj in strides: - assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) - assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) - -class TestAVXFloat32Transcendental(object): - def test_exp_float32(self): - np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000)) - x_f64 = np.float64(x_f32) - assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) - - def test_log_float32(self): - np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000)) - x_f64 = np.float64(x_f32) - assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) - - def test_sincos_float32(self): - np.random.seed(42) - N = 1000000 - M = np.int_(N/20) - index = np.random.randint(low=0, high=N, size=M) - x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N)) - # test coverage for elements > 117435.992f for which glibc is used - x_f32[index] = np.float32(10E+10*np.random.rand(M)) - x_f64 = np.float64(x_f32) - assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) - assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) - - def test_strided_float32(self): - np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) - for ii in sizes: - x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii)) - exp_true = np.exp(x_f32) - log_true = np.log(x_f32) - sin_true = np.sin(x_f32) - cos_true = np.cos(x_f32) - for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.sin(x_f32[::jj]), sin_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.cos(x_f32[::jj]), cos_true[::jj], nulp=2) - -class TestLogAddExp(_FilterInvalids): - def test_logaddexp_values(self): - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): - xf = np.log(np.array(x, dtype=dt)) - yf = np.log(np.array(y, dtype=dt)) - zf = np.log(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) - - def test_logaddexp_range(self): - x = [1000000, -1000000, 1000200, -1000200] - y = [1000200, -1000200, 1000000, -1000000] - z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp(logxf, logyf), logzf) - - def test_inf(self): - inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] - z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='raise'): - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_equal(np.logaddexp(logxf, logyf), logzf) - - def test_nan(self): - assert_(np.isnan(np.logaddexp(np.nan, np.inf))) - assert_(np.isnan(np.logaddexp(np.inf, np.nan))) - assert_(np.isnan(np.logaddexp(np.nan, 0))) - assert_(np.isnan(np.logaddexp(0, np.nan))) - assert_(np.isnan(np.logaddexp(np.nan, np.nan))) - - def test_reduce(self): - assert_equal(np.logaddexp.identity, -np.inf) - assert_equal(np.logaddexp.reduce([]), -np.inf) - - -class TestLog1p(object): - def test_log1p(self): - assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) - assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) - - def test_special(self): - with np.errstate(invalid="ignore", divide="ignore"): - assert_equal(ncu.log1p(np.nan), np.nan) - assert_equal(ncu.log1p(np.inf), np.inf) - assert_equal(ncu.log1p(-1.), -np.inf) - assert_equal(ncu.log1p(-2.), np.nan) - assert_equal(ncu.log1p(-np.inf), np.nan) - - -class TestExpm1(object): - def test_expm1(self): - assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) - assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) - - def test_special(self): - assert_equal(ncu.expm1(np.inf), np.inf) - assert_equal(ncu.expm1(0.), 0.) - assert_equal(ncu.expm1(-0.), -0.) - assert_equal(ncu.expm1(np.inf), np.inf) - assert_equal(ncu.expm1(-np.inf), -1.) - - -class TestHypot(object): - def test_simple(self): - assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) - assert_almost_equal(ncu.hypot(0, 0), 0) - - def test_reduce(self): - assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) - assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) - assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) - assert_equal(ncu.hypot.reduce([]), 0.0) - - -def assert_hypot_isnan(x, y): - with np.errstate(invalid='ignore'): - assert_(np.isnan(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) - - -def assert_hypot_isinf(x, y): - with np.errstate(invalid='ignore'): - assert_(np.isinf(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) - - -class TestHypotSpecialValues(object): - def test_nan_outputs(self): - assert_hypot_isnan(np.nan, np.nan) - assert_hypot_isnan(np.nan, 1) - - def test_nan_outputs2(self): - assert_hypot_isinf(np.nan, np.inf) - assert_hypot_isinf(np.inf, np.nan) - assert_hypot_isinf(np.inf, 0) - assert_hypot_isinf(0, np.inf) - assert_hypot_isinf(np.inf, np.inf) - assert_hypot_isinf(np.inf, 23.0) - - def test_no_fpe(self): - assert_no_warnings(ncu.hypot, np.inf, 0) - - -def assert_arctan2_isnan(x, y): - assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_ispinf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_isninf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_ispzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_isnzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) - - -class TestArctan2SpecialValues(object): - def test_one_one(self): - # atan2(1, 1) returns pi/4. - assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) - assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) - - def test_zero_nzero(self): - # atan2(+-0, -0) returns +-pi. - assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) - - def test_zero_pzero(self): - # atan2(+-0, +0) returns +-0. - assert_arctan2_ispzero(np.PZERO, np.PZERO) - assert_arctan2_isnzero(np.NZERO, np.PZERO) - - def test_zero_negative(self): - # atan2(+-0, x) returns +-pi for x < 0. - assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) - - def test_zero_positive(self): - # atan2(+-0, x) returns +-0 for x > 0. - assert_arctan2_ispzero(np.PZERO, 1) - assert_arctan2_isnzero(np.NZERO, 1) - - def test_positive_zero(self): - # atan2(y, +-0) returns +pi/2 for y > 0. - assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) - - def test_negative_zero(self): - # atan2(y, +-0) returns -pi/2 for y < 0. - assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) - - def test_any_ninf(self): - # atan2(+-y, -infinity) returns +-pi for finite y > 0. - assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) - - def test_any_pinf(self): - # atan2(+-y, +infinity) returns +-0 for finite y > 0. - assert_arctan2_ispzero(1, np.inf) - assert_arctan2_isnzero(-1, np.inf) - - def test_inf_any(self): - # atan2(+-infinity, x) returns +-pi/2 for finite x. - assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) - - def test_inf_ninf(self): - # atan2(+-infinity, -infinity) returns +-3*pi/4. - assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) - - def test_inf_pinf(self): - # atan2(+-infinity, +infinity) returns +-pi/4. - assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) - - def test_nan_any(self): - # atan2(nan, x) returns nan for any x, including inf - assert_arctan2_isnan(np.nan, np.inf) - assert_arctan2_isnan(np.inf, np.nan) - assert_arctan2_isnan(np.nan, np.nan) - - -class TestLdexp(object): - def _check_ldexp(self, tp): - assert_almost_equal(ncu.ldexp(np.array(2., np.float32), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.float64), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), - np.array(3, tp)), 16.) - - def test_ldexp(self): - # The default Python int type should work - assert_almost_equal(ncu.ldexp(2., 3), 16.) - # The following int types should all be accepted - self._check_ldexp(np.int8) - self._check_ldexp(np.int16) - self._check_ldexp(np.int32) - self._check_ldexp('i') - self._check_ldexp('l') - - def test_ldexp_overflow(self): - # silence warning emitted on overflow - with np.errstate(over="ignore"): - imax = np.iinfo(np.dtype('l')).max - imin = np.iinfo(np.dtype('l')).min - assert_equal(ncu.ldexp(2., imax), np.inf) - assert_equal(ncu.ldexp(2., imin), 0) - - -class TestMaximum(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.maximum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.maximum.reduce([1, 2j]), 1) - assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.maximum(arg1, arg2), out) - - def test_object_nans(self): - # Multiple checks to give this a chance to - # fail if cmp is used instead of rich compare. - # Failure cannot be guaranteed. - for i in range(1): - x = np.array(float('nan'), object) - y = 1.0 - z = np.array(float('nan'), object) - assert_(np.maximum(x, y) == 1.0) - assert_(np.maximum(z, y) == 1.0) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([nan, nan, nan], dtype=complex) - assert_equal(np.maximum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=object) - arg2 = arg1 + 1 - assert_equal(np.maximum(arg1, arg2), arg2) - - -class TestMinimum(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.minimum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.minimum.reduce([1, 2j]), 2j) - assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.minimum(arg1, arg2), out) - - def test_object_nans(self): - # Multiple checks to give this a chance to - # fail if cmp is used instead of rich compare. - # Failure cannot be guaranteed. - for i in range(1): - x = np.array(float('nan'), object) - y = 1.0 - z = np.array(float('nan'), object) - assert_(np.minimum(x, y) == 1.0) - assert_(np.minimum(z, y) == 1.0) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([nan, nan, nan], dtype=complex) - assert_equal(np.minimum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=object) - arg2 = arg1 + 1 - assert_equal(np.minimum(arg1, arg2), arg1) - - -class TestFmax(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmax.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 9) - assert_equal(func(tmp2), 9) - - def test_reduce_complex(self): - assert_equal(np.fmax.reduce([1, 2j]), 1) - assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmax(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([0, 0, nan], dtype=complex) - assert_equal(np.fmax(arg1, arg2), out) - - -class TestFmin(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmin.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 1) - assert_equal(func(tmp2), 1) - - def test_reduce_complex(self): - assert_equal(np.fmin.reduce([1, 2j]), 2j) - assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmin(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([0, 0, nan], dtype=complex) - assert_equal(np.fmin(arg1, arg2), out) - - -class TestBool(object): - def test_exceptions(self): - a = np.ones(1, dtype=np.bool_) - assert_raises(TypeError, np.negative, a) - assert_raises(TypeError, np.positive, a) - assert_raises(TypeError, np.subtract, a, a) - - def test_truth_table_logical(self): - # 2, 3 and 4 serves as true values - input1 = [0, 0, 3, 2] - input2 = [0, 4, 0, 2] - - typecodes = (np.typecodes['AllFloat'] - + np.typecodes['AllInteger'] - + '?') # boolean - for dtype in map(np.dtype, typecodes): - arg1 = np.asarray(input1, dtype=dtype) - arg2 = np.asarray(input2, dtype=dtype) - - # OR - out = [False, True, True, True] - for func in (np.logical_or, np.maximum): - assert_equal(func(arg1, arg2).astype(bool), out) - # AND - out = [False, False, False, True] - for func in (np.logical_and, np.minimum): - assert_equal(func(arg1, arg2).astype(bool), out) - # XOR - out = [False, True, True, False] - for func in (np.logical_xor, np.not_equal): - assert_equal(func(arg1, arg2).astype(bool), out) - - def test_truth_table_bitwise(self): - arg1 = [False, False, True, True] - arg2 = [False, True, False, True] - - out = [False, True, True, True] - assert_equal(np.bitwise_or(arg1, arg2), out) - - out = [False, False, False, True] - assert_equal(np.bitwise_and(arg1, arg2), out) - - out = [False, True, True, False] - assert_equal(np.bitwise_xor(arg1, arg2), out) - - def test_reduce(self): - none = np.array([0, 0, 0, 0], bool) - some = np.array([1, 0, 1, 1], bool) - every = np.array([1, 1, 1, 1], bool) - empty = np.array([], bool) - - arrs = [none, some, every, empty] - - for arr in arrs: - assert_equal(np.logical_and.reduce(arr), all(arr)) - - for arr in arrs: - assert_equal(np.logical_or.reduce(arr), any(arr)) - - for arr in arrs: - assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) - - -class TestBitwiseUFuncs(object): - - bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O'] - - def test_values(self): - for dt in self.bitwise_types: - zeros = np.array([0], dtype=dt) - ones = np.array([-1], dtype=dt) - msg = "dt = '%s'" % dt.char - - assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) - assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) - - assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) - assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) - assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) - - assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) - assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) - assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) - - assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) - assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) - - def test_types(self): - for dt in self.bitwise_types: - zeros = np.array([0], dtype=dt) - ones = np.array([-1], dtype=dt) - msg = "dt = '%s'" % dt.char - - assert_(np.bitwise_not(zeros).dtype == dt, msg) - assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) - assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) - assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) - - def test_identity(self): - assert_(np.bitwise_or.identity == 0, 'bitwise_or') - assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') - assert_(np.bitwise_and.identity == -1, 'bitwise_and') - - def test_reduction(self): - binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) - - for dt in self.bitwise_types: - zeros = np.array([0], dtype=dt) - ones = np.array([-1], dtype=dt) - for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) - assert_equal(f.reduce(zeros), zeros, err_msg=msg) - assert_equal(f.reduce(ones), ones, err_msg=msg) - - # Test empty reduction, no object dtype - for dt in self.bitwise_types[:-1]: - # No object array types - empty = np.array([], dtype=dt) - for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) - tgt = np.array(f.identity, dtype=dt) - res = f.reduce(empty) - assert_equal(res, tgt, err_msg=msg) - assert_(res.dtype == tgt.dtype, msg) - - # Empty object arrays use the identity. Note that the types may - # differ, the actual type used is determined by the assign_identity - # function and is not the same as the type returned by the identity - # method. - for f in binary_funcs: - msg = "dt: '%s'" % (f,) - empty = np.array([], dtype=object) - tgt = f.identity - res = f.reduce(empty) - assert_equal(res, tgt, err_msg=msg) - - # Non-empty object arrays do not use the identity - for f in binary_funcs: - msg = "dt: '%s'" % (f,) - btype = np.array([True], dtype=object) - assert_(type(f.reduce(btype)) is bool, msg) - - -class TestInt(object): - def test_logical_not(self): - x = np.ones(10, dtype=np.int16) - o = np.ones(10 * 2, dtype=bool) - tgt = o.copy() - tgt[::2] = False - os = o[::2] - assert_array_equal(np.logical_not(x, out=os), False) - assert_array_equal(o, tgt) - - -class TestFloatingPoint(object): - def test_floating_point(self): - assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) - - -class TestDegrees(object): - def test_degrees(self): - assert_almost_equal(ncu.degrees(np.pi), 180.0) - assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) - - -class TestRadians(object): - def test_radians(self): - assert_almost_equal(ncu.radians(180.0), np.pi) - assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) - - -class TestHeavside(object): - def test_heaviside(self): - x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) - expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) - expected1 = expectedhalf.copy() - expected1[0, 2] = 1 - - h = ncu.heaviside(x, 0.5) - assert_equal(h, expectedhalf) - - h = ncu.heaviside(x, 1.0) - assert_equal(h, expected1) - - x = x.astype(np.float32) - - h = ncu.heaviside(x, np.float32(0.5)) - assert_equal(h, expectedhalf.astype(np.float32)) - - h = ncu.heaviside(x, np.float32(1.0)) - assert_equal(h, expected1.astype(np.float32)) - - -class TestSign(object): - def test_sign(self): - a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) - out = np.zeros(a.shape) - tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) - - with np.errstate(invalid='ignore'): - res = ncu.sign(a) - assert_equal(res, tgt) - res = ncu.sign(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - def test_sign_dtype_object(self): - # In reference to github issue #6229 - - foo = np.array([-.1, 0, .1]) - a = np.sign(foo.astype(object)) - b = np.sign(foo) - - assert_array_equal(a, b) - - def test_sign_dtype_nan_object(self): - # In reference to github issue #6229 - def test_nan(): - foo = np.array([np.nan]) - # FIXME: a not used - a = np.sign(foo.astype(object)) - - assert_raises(TypeError, test_nan) - -class TestMinMax(object): - def test_minmax_blocked(self): - # simd tests on max/min, test all alignments, slow but important - # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) - for dt, sz in [(np.float32, 15), (np.float64, 7)]: - for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', - max_size=sz): - for i in range(inp.size): - inp[:] = np.arange(inp.size, dtype=dt) - inp[i] = np.nan - emsg = lambda: '%r\n%s' % (inp, msg) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") - assert_(np.isnan(inp.max()), msg=emsg) - assert_(np.isnan(inp.min()), msg=emsg) - - inp[i] = 1e10 - assert_equal(inp.max(), 1e10, err_msg=msg) - inp[i] = -1e10 - assert_equal(inp.min(), -1e10, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_equal(d.max(), d[0]) - assert_equal(d.min(), d[0]) - - def test_reduce_reorder(self): - # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus - # and put it before the call to an intrisic function that causes - # invalid status to be set. Also make sure warnings are not emitted - for n in (2, 4, 8, 16, 32): - for dt in (np.float32, np.float16, np.complex64): - for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): - assert_equal(np.min(r), np.nan) - - def test_minimize_no_warns(self): - a = np.minimum(np.nan, 1) - assert_equal(a, np.nan) - - -class TestAbsoluteNegative(object): - def test_abs_neg_blocked(self): - # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 - for dt, sz in [(np.float32, 11), (np.float64, 5)]: - for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', - max_size=sz): - tgt = [ncu.absolute(i) for i in inp] - np.absolute(inp, out=out) - assert_equal(out, tgt, err_msg=msg) - assert_((out >= 0).all()) - - tgt = [-1*(i) for i in inp] - np.negative(inp, out=out) - assert_equal(out, tgt, err_msg=msg) - - for v in [np.nan, -np.inf, np.inf]: - for i in range(inp.size): - d = np.arange(inp.size, dtype=dt) - inp[:] = -d - inp[i] = v - d[i] = -v if v == -np.inf else v - assert_array_equal(np.abs(inp), d, err_msg=msg) - np.abs(inp, out=out) - assert_array_equal(out, d, err_msg=msg) - - assert_array_equal(-inp, -1*inp, err_msg=msg) - d = -1 * inp - np.negative(inp, out=out) - assert_array_equal(out, d, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_equal(np.abs(d), d) - assert_equal(np.negative(d), -d) - np.negative(d, out=d) - np.negative(np.ones_like(d), out=d) - np.abs(d, out=d) - np.abs(np.ones_like(d), out=d) - - -class TestPositive(object): - def test_valid(self): - valid_dtypes = [int, float, complex, object] - for dtype in valid_dtypes: - x = np.arange(5, dtype=dtype) - result = np.positive(x) - assert_equal(x, result, err_msg=str(dtype)) - - def test_invalid(self): - with assert_raises(TypeError): - np.positive(True) - with assert_raises(TypeError): - np.positive(np.datetime64('2000-01-01')) - with assert_raises(TypeError): - np.positive(np.array(['foo'], dtype=str)) - with assert_raises(TypeError): - np.positive(np.array(['bar'], dtype=object)) - - -class TestSpecialMethods(object): - def test_wrap(self): - - class with_wrap(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context): - r = with_wrap() - r.arr = arr - r.context = context - return r - - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - func, args, i = x.context - assert_(func is ncu.minimum) - assert_equal(len(args), 2) - assert_equal(args[0], a) - assert_equal(args[1], a) - assert_equal(i, 0) - - def test_wrap_and_prepare_out(self): - # Calling convention for out should not affect how special methods are - # called - - class StoreArrayPrepareWrap(np.ndarray): - _wrap_args = None - _prepare_args = None - def __new__(cls): - return np.empty(()).view(cls) - def __array_wrap__(self, obj, context): - self._wrap_args = context[1] - return obj - def __array_prepare__(self, obj, context): - self._prepare_args = context[1] - return obj - @property - def args(self): - # We need to ensure these are fetched at the same time, before - # any other ufuncs are calld by the assertions - return (self._prepare_args, self._wrap_args) - def __repr__(self): - return "a" # for short test output - - def do_test(f_call, f_expected): - a = StoreArrayPrepareWrap() - f_call(a) - p, w = a.args - expected = f_expected(a) - try: - assert_equal(p, expected) - assert_equal(w, expected) - except AssertionError as e: - # assert_equal produces truly useless error messages - raise AssertionError("\n".join([ - "Bad arguments passed in ufunc call", - " expected: {}".format(expected), - " __array_prepare__ got: {}".format(p), - " __array_wrap__ got: {}".format(w) - ])) - - # method not on the out argument - do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) - - # method on the out argument - do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) - - def test_wrap_with_iterable(self): - # test fix for bug #1026: - - class with_wrap(np.ndarray): - __array_priority__ = 10 - - def __new__(cls): - return np.asarray(1).view(cls).copy() - - def __array_wrap__(self, arr, context): - return arr.view(type(self)) - - a = with_wrap() - x = ncu.multiply(a, (1, 2, 3)) - assert_(isinstance(x, with_wrap)) - assert_array_equal(x, np.array((1, 2, 3))) - - def test_priority_with_scalar(self): - # test fix for bug #826: - - class A(np.ndarray): - __array_priority__ = 10 - - def __new__(cls): - return np.asarray(1.0, 'float64').view(cls).copy() - - a = A() - x = np.float64(1)*a - assert_(isinstance(x, A)) - assert_array_equal(x, np.array(1)) - - def test_old_wrap(self): - - class with_wrap(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr): - r = with_wrap() - r.arr = arr - return r - - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - - def test_priority(self): - - class A(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context): - r = type(self)() - r.arr = arr - r.context = context - return r - - class B(A): - __array_priority__ = 20. - - class C(A): - __array_priority__ = 40. - - x = np.zeros(1) - a = A() - b = B() - c = C() - f = ncu.minimum - assert_(type(f(x, x)) is np.ndarray) - assert_(type(f(x, a)) is A) - assert_(type(f(x, b)) is B) - assert_(type(f(x, c)) is C) - assert_(type(f(a, x)) is A) - assert_(type(f(b, x)) is B) - assert_(type(f(c, x)) is C) - - assert_(type(f(a, a)) is A) - assert_(type(f(a, b)) is B) - assert_(type(f(b, a)) is B) - assert_(type(f(b, b)) is B) - assert_(type(f(b, c)) is C) - assert_(type(f(c, b)) is C) - assert_(type(f(c, c)) is C) - - assert_(type(ncu.exp(a) is A)) - assert_(type(ncu.exp(b) is B)) - assert_(type(ncu.exp(c) is C)) - - def test_failing_wrap(self): - - class A(object): - def __array__(self): - return np.zeros(2) - - def __array_wrap__(self, arr, context): - raise RuntimeError - - a = A() - assert_raises(RuntimeError, ncu.maximum, a, a) - assert_raises(RuntimeError, ncu.maximum.reduce, a) - - def test_failing_out_wrap(self): - - singleton = np.array([1.0]) - - class Ok(np.ndarray): - def __array_wrap__(self, obj): - return singleton - - class Bad(np.ndarray): - def __array_wrap__(self, obj): - raise RuntimeError - - ok = np.empty(1).view(Ok) - bad = np.empty(1).view(Bad) - # double-free (segfault) of "ok" if "bad" raises an exception - for i in range(10): - assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) - - def test_none_wrap(self): - # Tests that issue #8507 is resolved. Previously, this would segfault - - class A(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context=None): - return None - - a = A() - assert_equal(ncu.maximum(a, a), None) - - def test_default_prepare(self): - - class with_wrap(object): - __array_priority__ = 10 - - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context): - return arr - - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x, np.zeros(1)) - assert_equal(type(x), np.ndarray) - - def test_prepare(self): - - class with_prepare(np.ndarray): - __array_priority__ = 10 - - def __array_prepare__(self, arr, context): - # make sure we can return a new - return np.array(arr).view(type=with_prepare) - - a = np.array(1).view(type=with_prepare) - x = np.add(a, a) - assert_equal(x, np.array(2)) - assert_equal(type(x), with_prepare) - - def test_prepare_out(self): - - class with_prepare(np.ndarray): - __array_priority__ = 10 - - def __array_prepare__(self, arr, context): - return np.array(arr).view(type=with_prepare) - - a = np.array([1]).view(type=with_prepare) - x = np.add(a, a, a) - # Returned array is new, because of the strange - # __array_prepare__ above - assert_(not np.shares_memory(x, a)) - assert_equal(x, np.array([2])) - assert_equal(type(x), with_prepare) - - def test_failing_prepare(self): - - class A(object): - def __array__(self): - return np.zeros(1) - - def __array_prepare__(self, arr, context=None): - raise RuntimeError - - a = A() - assert_raises(RuntimeError, ncu.maximum, a, a) - - def test_array_with_context(self): - - class A(object): - def __array__(self, dtype=None, context=None): - func, args, i = context - self.func = func - self.args = args - self.i = i - return np.zeros(1) - - class B(object): - def __array__(self, dtype=None): - return np.zeros(1, dtype) - - class C(object): - def __array__(self): - return np.zeros(1) - - a = A() - ncu.maximum(np.zeros(1), a) - assert_(a.func is ncu.maximum) - assert_equal(a.args[0], 0) - assert_(a.args[1] is a) - assert_(a.i == 1) - assert_equal(ncu.maximum(a, B()), 0) - assert_equal(ncu.maximum(a, C()), 0) - - def test_ufunc_override(self): - # check override works even with instance with high priority. - class A(object): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return self, func, method, inputs, kwargs - - class MyNDArray(np.ndarray): - __array_priority__ = 100 - - a = A() - b = np.array([1]).view(MyNDArray) - res0 = np.multiply(a, b) - res1 = np.multiply(b, b, out=a) - - # self - assert_equal(res0[0], a) - assert_equal(res1[0], a) - assert_equal(res0[1], np.multiply) - assert_equal(res1[1], np.multiply) - assert_equal(res0[2], '__call__') - assert_equal(res1[2], '__call__') - assert_equal(res0[3], (a, b)) - assert_equal(res1[3], (b, b)) - assert_equal(res0[4], {}) - assert_equal(res1[4], {'out': (a,)}) - - def test_ufunc_override_mro(self): - - # Some multi arg functions for testing. - def tres_mul(a, b, c): - return a * b * c - - def quatro_mul(a, b, c, d): - return a * b * c * d - - # Make these into ufuncs. - three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) - four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) - - class A(object): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return "A" - - class ASub(A): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return "ASub" - - class B(object): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return "B" - - class C(object): - def __init__(self): - self.count = 0 - - def __array_ufunc__(self, func, method, *inputs, **kwargs): - self.count += 1 - return NotImplemented - - class CSub(C): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - self.count += 1 - return NotImplemented - - a = A() - a_sub = ASub() - b = B() - c = C() - - # Standard - res = np.multiply(a, a_sub) - assert_equal(res, "ASub") - res = np.multiply(a_sub, b) - assert_equal(res, "ASub") - - # With 1 NotImplemented - res = np.multiply(c, a) - assert_equal(res, "A") - assert_equal(c.count, 1) - # Check our counter works, so we can trust tests below. - res = np.multiply(c, a) - assert_equal(c.count, 2) - - # Both NotImplemented. - c = C() - c_sub = CSub() - assert_raises(TypeError, np.multiply, c, c_sub) - assert_equal(c.count, 1) - assert_equal(c_sub.count, 1) - c.count = c_sub.count = 0 - assert_raises(TypeError, np.multiply, c_sub, c) - assert_equal(c.count, 1) - assert_equal(c_sub.count, 1) - c.count = 0 - assert_raises(TypeError, np.multiply, c, c) - assert_equal(c.count, 1) - c.count = 0 - assert_raises(TypeError, np.multiply, 2, c) - assert_equal(c.count, 1) - - # Ternary testing. - assert_equal(three_mul_ufunc(a, 1, 2), "A") - assert_equal(three_mul_ufunc(1, a, 2), "A") - assert_equal(three_mul_ufunc(1, 2, a), "A") - - assert_equal(three_mul_ufunc(a, a, 6), "A") - assert_equal(three_mul_ufunc(a, 2, a), "A") - assert_equal(three_mul_ufunc(a, 2, b), "A") - assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") - assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") - c.count = 0 - assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") - assert_equal(c.count, 1) - c.count = 0 - assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") - assert_equal(c.count, 0) - - c.count = 0 - assert_equal(three_mul_ufunc(a, b, c), "A") - assert_equal(c.count, 0) - c_sub.count = 0 - assert_equal(three_mul_ufunc(a, b, c_sub), "A") - assert_equal(c_sub.count, 0) - assert_equal(three_mul_ufunc(1, 2, b), "B") - - assert_raises(TypeError, three_mul_ufunc, 1, 2, c) - assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) - assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) - - # Quaternary testing. - assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") - assert_equal(four_mul_ufunc(1, a, 2, 3), "A") - assert_equal(four_mul_ufunc(1, 1, a, 3), "A") - assert_equal(four_mul_ufunc(1, 1, 2, a), "A") - - assert_equal(four_mul_ufunc(a, b, 2, 3), "A") - assert_equal(four_mul_ufunc(1, a, 2, b), "A") - assert_equal(four_mul_ufunc(b, 1, a, 3), "B") - assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") - assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") - - c = C() - c_sub = CSub() - assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) - assert_equal(c.count, 1) - c.count = 0 - assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) - assert_equal(c_sub.count, 1) - assert_equal(c.count, 1) - c2 = C() - c.count = c_sub.count = 0 - assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) - assert_equal(c_sub.count, 1) - assert_equal(c.count, 1) - assert_equal(c2.count, 0) - c.count = c2.count = c_sub.count = 0 - assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) - assert_equal(c_sub.count, 1) - assert_equal(c.count, 0) - assert_equal(c2.count, 1) - - def test_ufunc_override_methods(self): - - class A(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return self, ufunc, method, inputs, kwargs - - # __call__ - a = A() - res = np.multiply.__call__(1, a, foo='bar', answer=42) - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], '__call__') - assert_equal(res[3], (1, a)) - assert_equal(res[4], {'foo': 'bar', 'answer': 42}) - - # __call__, wrong args - assert_raises(TypeError, np.multiply, a) - assert_raises(TypeError, np.multiply, a, a, a, a) - assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') - assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) - - # reduce, positional args - res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduce') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'keepdims': 'keep0', - 'axis': 'axis0'}) - - # reduce, kwargs - res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', - keepdims='keep0', initial='init0', - where='where0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduce') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'keepdims': 'keep0', - 'axis': 'axis0', - 'initial': 'init0', - 'where': 'where0'}) - - # reduce, output equal to None removed, but not other explicit ones, - # even if they are at their default value. - res = np.multiply.reduce(a, 0, None, None, False) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) - res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) - assert_equal(res[4], {'axis': 0, 'keepdims': True}) - res = np.multiply.reduce(a, None, out=(None,), dtype=None) - assert_equal(res[4], {'axis': None, 'dtype': None}) - res = np.multiply.reduce(a, 0, None, None, False, 2, True) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, - 'initial': 2, 'where': True}) - # np._NoValue ignored for initial - res = np.multiply.reduce(a, 0, None, None, False, - np._NoValue, True) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, - 'where': True}) - # None kept for initial, True for where. - res = np.multiply.reduce(a, 0, None, None, False, None, True) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, - 'initial': None, 'where': True}) - - # reduce, wrong args - assert_raises(ValueError, np.multiply.reduce, a, out=()) - assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) - assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') - - # accumulate, pos args - res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'accumulate') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # accumulate, kwargs - res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', - out='out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'accumulate') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # accumulate, output equal to None removed. - res = np.multiply.accumulate(a, 0, None, None) - assert_equal(res[4], {'axis': 0, 'dtype': None}) - res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') - assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) - res = np.multiply.accumulate(a, None, out=(None,), dtype=None) - assert_equal(res[4], {'axis': None, 'dtype': None}) - - # accumulate, wrong args - assert_raises(ValueError, np.multiply.accumulate, a, out=()) - assert_raises(ValueError, np.multiply.accumulate, a, - out=('out0', 'out1')) - assert_raises(TypeError, np.multiply.accumulate, a, - 'axis0', axis='axis0') - - # reduceat, pos args - res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduceat') - assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # reduceat, kwargs - res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', - out='out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduceat') - assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # reduceat, output equal to None removed. - res = np.multiply.reduceat(a, [4, 2], 0, None, None) - assert_equal(res[4], {'axis': 0, 'dtype': None}) - res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') - assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) - res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) - assert_equal(res[4], {'axis': None, 'dtype': None}) - - # reduceat, wrong args - assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) - assert_raises(ValueError, np.multiply.reduce, a, [4, 2], - out=('out0', 'out1')) - assert_raises(TypeError, np.multiply.reduce, a, [4, 2], - 'axis0', axis='axis0') - - # outer - res = np.multiply.outer(a, 42) - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'outer') - assert_equal(res[3], (a, 42)) - assert_equal(res[4], {}) - - # outer, wrong args - assert_raises(TypeError, np.multiply.outer, a) - assert_raises(TypeError, np.multiply.outer, a, a, a, a) - assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') - - # at - res = np.multiply.at(a, [4, 2], 'b0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'at') - assert_equal(res[3], (a, [4, 2], 'b0')) - - # at, wrong args - assert_raises(TypeError, np.multiply.at, a) - assert_raises(TypeError, np.multiply.at, a, a, a, a) - - def test_ufunc_override_out(self): - - class A(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return kwargs - - class B(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return kwargs - - a = A() - b = B() - res0 = np.multiply(a, b, 'out_arg') - res1 = np.multiply(a, b, out='out_arg') - res2 = np.multiply(2, b, 'out_arg') - res3 = np.multiply(3, b, out='out_arg') - res4 = np.multiply(a, 4, 'out_arg') - res5 = np.multiply(a, 5, out='out_arg') - - assert_equal(res0['out'][0], 'out_arg') - assert_equal(res1['out'][0], 'out_arg') - assert_equal(res2['out'][0], 'out_arg') - assert_equal(res3['out'][0], 'out_arg') - assert_equal(res4['out'][0], 'out_arg') - assert_equal(res5['out'][0], 'out_arg') - - # ufuncs with multiple output modf and frexp. - res6 = np.modf(a, 'out0', 'out1') - res7 = np.frexp(a, 'out0', 'out1') - assert_equal(res6['out'][0], 'out0') - assert_equal(res6['out'][1], 'out1') - assert_equal(res7['out'][0], 'out0') - assert_equal(res7['out'][1], 'out1') - - # While we're at it, check that default output is never passed on. - assert_(np.sin(a, None) == {}) - assert_(np.sin(a, out=None) == {}) - assert_(np.sin(a, out=(None,)) == {}) - assert_(np.modf(a, None) == {}) - assert_(np.modf(a, None, None) == {}) - assert_(np.modf(a, out=(None, None)) == {}) - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs. - np.modf(a, out=None) - - # don't give positional and output argument, or too many arguments. - # wrong number of arguments in the tuple is an error too. - assert_raises(TypeError, np.multiply, a, b, 'one', out='two') - assert_raises(TypeError, np.multiply, a, b, 'one', 'two') - assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) - assert_raises(ValueError, np.multiply, a, out=()) - assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) - assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') - assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) - assert_raises(ValueError, np.modf, a, out=('one',)) - - def test_ufunc_override_exception(self): - - class A(object): - def __array_ufunc__(self, *a, **kwargs): - raise ValueError("oops") - - a = A() - assert_raises(ValueError, np.negative, 1, out=a) - assert_raises(ValueError, np.negative, a) - assert_raises(ValueError, np.divide, 1., a) - - def test_ufunc_override_not_implemented(self): - - class A(object): - def __array_ufunc__(self, *args, **kwargs): - return NotImplemented - - msg = ("operand type(s) all returned NotImplemented from " - "__array_ufunc__(, '__call__', <*>): 'A'") - with assert_raises_regex(TypeError, fnmatch.translate(msg)): - np.negative(A()) - - msg = ("operand type(s) all returned NotImplemented from " - "__array_ufunc__(, '__call__', <*>, , " - "out=(1,)): 'A', 'object', 'int'") - with assert_raises_regex(TypeError, fnmatch.translate(msg)): - np.add(A(), object(), out=1) - - def test_ufunc_override_disabled(self): - - class OptOut(object): - __array_ufunc__ = None - - opt_out = OptOut() - - # ufuncs always raise - msg = "operand 'OptOut' does not support ufuncs" - with assert_raises_regex(TypeError, msg): - np.add(opt_out, 1) - with assert_raises_regex(TypeError, msg): - np.add(1, opt_out) - with assert_raises_regex(TypeError, msg): - np.negative(opt_out) - - # opt-outs still hold even when other arguments have pathological - # __array_ufunc__ implementations - - class GreedyArray(object): - def __array_ufunc__(self, *args, **kwargs): - return self - - greedy = GreedyArray() - assert_(np.negative(greedy) is greedy) - with assert_raises_regex(TypeError, msg): - np.add(greedy, opt_out) - with assert_raises_regex(TypeError, msg): - np.add(greedy, 1, out=opt_out) - - def test_gufunc_override(self): - # gufunc are just ufunc instances, but follow a different path, - # so check __array_ufunc__ overrides them properly. - class A(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return self, ufunc, method, inputs, kwargs - - inner1d = ncu_tests.inner1d - a = A() - res = inner1d(a, a) - assert_equal(res[0], a) - assert_equal(res[1], inner1d) - assert_equal(res[2], '__call__') - assert_equal(res[3], (a, a)) - assert_equal(res[4], {}) - - res = inner1d(1, 1, out=a) - assert_equal(res[0], a) - assert_equal(res[1], inner1d) - assert_equal(res[2], '__call__') - assert_equal(res[3], (1, 1)) - assert_equal(res[4], {'out': (a,)}) - - # wrong number of arguments in the tuple is an error too. - assert_raises(TypeError, inner1d, a, out='two') - assert_raises(TypeError, inner1d, a, a, 'one', out='two') - assert_raises(TypeError, inner1d, a, a, 'one', 'two') - assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) - assert_raises(ValueError, inner1d, a, a, out=()) - - def test_ufunc_override_with_super(self): - # NOTE: this class is given as an example in doc/subclassing.py; - # if you make any changes here, do update it there too. - class A(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - args = [] - in_no = [] - for i, input_ in enumerate(inputs): - if isinstance(input_, A): - in_no.append(i) - args.append(input_.view(np.ndarray)) - else: - args.append(input_) - - outputs = kwargs.pop('out', None) - out_no = [] - if outputs: - out_args = [] - for j, output in enumerate(outputs): - if isinstance(output, A): - out_no.append(j) - out_args.append(output.view(np.ndarray)) - else: - out_args.append(output) - kwargs['out'] = tuple(out_args) - else: - outputs = (None,) * ufunc.nout - - info = {} - if in_no: - info['inputs'] = in_no - if out_no: - info['outputs'] = out_no - - results = super(A, self).__array_ufunc__(ufunc, method, - *args, **kwargs) - if results is NotImplemented: - return NotImplemented - - if method == 'at': - if isinstance(inputs[0], A): - inputs[0].info = info - return - - if ufunc.nout == 1: - results = (results,) - - results = tuple((np.asarray(result).view(A) - if output is None else output) - for result, output in zip(results, outputs)) - if results and isinstance(results[0], A): - results[0].info = info - - return results[0] if len(results) == 1 else results - - class B(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - if any(isinstance(input_, A) for input_ in inputs): - return "A!" - else: - return NotImplemented - - d = np.arange(5.) - # 1 input, 1 output - a = np.arange(5.).view(A) - b = np.sin(a) - check = np.sin(d) - assert_(np.all(check == b)) - assert_equal(b.info, {'inputs': [0]}) - b = np.sin(d, out=(a,)) - assert_(np.all(check == b)) - assert_equal(b.info, {'outputs': [0]}) - assert_(b is a) - a = np.arange(5.).view(A) - b = np.sin(a, out=a) - assert_(np.all(check == b)) - assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) - - # 1 input, 2 outputs - a = np.arange(5.).view(A) - b1, b2 = np.modf(a) - assert_equal(b1.info, {'inputs': [0]}) - b1, b2 = np.modf(d, out=(None, a)) - assert_(b2 is a) - assert_equal(b1.info, {'outputs': [1]}) - a = np.arange(5.).view(A) - b = np.arange(5.).view(A) - c1, c2 = np.modf(a, out=(a, b)) - assert_(c1 is a) - assert_(c2 is b) - assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) - - # 2 input, 1 output - a = np.arange(5.).view(A) - b = np.arange(5.).view(A) - c = np.add(a, b, out=a) - assert_(c is a) - assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) - # some tests with a non-ndarray subclass - a = np.arange(5.) - b = B() - assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) - assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) - assert_raises(TypeError, np.add, a, b) - a = a.view(A) - assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) - assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") - assert_(np.add(a, b) == "A!") - # regression check for gh-9102 -- tests ufunc.reduce implicitly. - d = np.array([[1, 2, 3], [1, 2, 3]]) - a = d.view(A) - c = a.any() - check = d.any() - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - c = a.max() - check = d.max() - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.array(0).view(A) - c = a.max(out=b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - check = a.max(axis=0) - b = np.zeros_like(check).view(A) - c = a.max(axis=0, out=b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - # simple explicit tests of reduce, accumulate, reduceat - check = np.add.reduce(d, axis=1) - c = np.add.reduce(a, axis=1) - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.zeros_like(c) - c = np.add.reduce(a, 1, None, b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - check = np.add.accumulate(d, axis=0) - c = np.add.accumulate(a, axis=0) - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.zeros_like(c) - c = np.add.accumulate(a, 0, None, b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - indices = [0, 2, 1] - check = np.add.reduceat(d, indices, axis=1) - c = np.add.reduceat(a, indices, axis=1) - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.zeros_like(c) - c = np.add.reduceat(a, indices, 1, None, b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - # and a few tests for at - d = np.array([[1, 2, 3], [1, 2, 3]]) - check = d.copy() - a = d.copy().view(A) - np.add.at(check, ([0, 1], [0, 2]), 1.) - np.add.at(a, ([0, 1], [0, 2]), 1.) - assert_equal(a, check) - assert_(a.info, {'inputs': [0]}) - b = np.array(1.).view(A) - a = d.copy().view(A) - np.add.at(a, ([0, 1], [0, 2]), b) - assert_equal(a, check) - assert_(a.info, {'inputs': [0, 2]}) - - -class TestChoose(object): - def test_mixed(self): - c = np.array([True, True]) - a = np.array([True, True]) - assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) - - -class TestRationalFunctions(object): - def test_lcm(self): - self._test_lcm_inner(np.int16) - self._test_lcm_inner(np.uint16) - - def test_lcm_object(self): - self._test_lcm_inner(np.object_) - - def test_gcd(self): - self._test_gcd_inner(np.int16) - self._test_lcm_inner(np.uint16) - - def test_gcd_object(self): - self._test_gcd_inner(np.object_) - - def _test_lcm_inner(self, dtype): - # basic use - a = np.array([12, 120], dtype=dtype) - b = np.array([20, 200], dtype=dtype) - assert_equal(np.lcm(a, b), [60, 600]) - - if not issubclass(dtype, np.unsignedinteger): - # negatives are ignored - a = np.array([12, -12, 12, -12], dtype=dtype) - b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.lcm(a, b), [60]*4) - - # reduce - a = np.array([3, 12, 20], dtype=dtype) - assert_equal(np.lcm.reduce([3, 12, 20]), 60) - - # broadcasting, and a test including 0 - a = np.arange(6).astype(dtype) - b = 20 - assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) - - def _test_gcd_inner(self, dtype): - # basic use - a = np.array([12, 120], dtype=dtype) - b = np.array([20, 200], dtype=dtype) - assert_equal(np.gcd(a, b), [4, 40]) - - if not issubclass(dtype, np.unsignedinteger): - # negatives are ignored - a = np.array([12, -12, 12, -12], dtype=dtype) - b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.gcd(a, b), [4]*4) - - # reduce - a = np.array([15, 25, 35], dtype=dtype) - assert_equal(np.gcd.reduce(a), 5) - - # broadcasting, and a test including 0 - a = np.arange(6).astype(dtype) - b = 20 - assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) - - def test_lcm_overflow(self): - # verify that we don't overflow when a*b does overflow - big = np.int32(np.iinfo(np.int32).max // 11) - a = 2*big - b = 5*big - assert_equal(np.lcm(a, b), 10*big) - - def test_gcd_overflow(self): - for dtype in (np.int32, np.int64): - # verify that we don't overflow when taking abs(x) - # not relevant for lcm, where the result is unrepresentable anyway - a = dtype(np.iinfo(dtype).min) # negative power of two - q = -(a // 4) - assert_equal(np.gcd(a, q*3), q) - assert_equal(np.gcd(a, -q*3), q) - - def test_decimal(self): - from decimal import Decimal - a = np.array([1, 1, -1, -1]) * Decimal('0.20') - b = np.array([1, -1, 1, -1]) * Decimal('0.12') - - assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) - assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) - - def test_float(self): - # not well-defined on float due to rounding errors - assert_raises(TypeError, np.gcd, 0.3, 0.4) - assert_raises(TypeError, np.lcm, 0.3, 0.4) - - def test_builtin_long(self): - # sanity check that array coercion is alright for builtin longs - assert_equal(np.array(2**200).item(), 2**200) - - # expressed as prime factors - a = np.array(2**100 * 3**5) - b = np.array([2**100 * 5**7, 2**50 * 3**10]) - assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) - assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) - - assert_equal(np.gcd(2**100, 3**100), 1) - - -class TestRoundingFunctions(object): - - def test_object_direct(self): - """ test direct implementation of these magic methods """ - class C: - def __floor__(self): - return 1 - def __ceil__(self): - return 2 - def __trunc__(self): - return 3 - - arr = np.array([C(), C()]) - assert_equal(np.floor(arr), [1, 1]) - assert_equal(np.ceil(arr), [2, 2]) - assert_equal(np.trunc(arr), [3, 3]) - - def test_object_indirect(self): - """ test implementations via __float__ """ - class C: - def __float__(self): - return -2.5 - - arr = np.array([C(), C()]) - assert_equal(np.floor(arr), [-3, -3]) - assert_equal(np.ceil(arr), [-2, -2]) - with pytest.raises(TypeError): - np.trunc(arr) # consistent with math.trunc - - def test_fraction(self): - f = Fraction(-4, 3) - assert_equal(np.floor(f), -2) - assert_equal(np.ceil(f), -1) - assert_equal(np.trunc(f), -1) - - -class TestComplexFunctions(object): - funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, - np.arctanh, np.sin, np.cos, np.tan, np.exp, - np.exp2, np.log, np.sqrt, np.log10, np.log2, - np.log1p] - - def test_it(self): - for f in self.funcs: - if f is np.arccosh: - x = 1.5 - else: - x = .5 - fr = f(x) - fz = f(complex(x)) - assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) - assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) - - def test_precisions_consistent(self): - z = 1 + 1j - for f in self.funcs: - fcf = f(np.csingle(z)) - fcd = f(np.cdouble(z)) - fcl = f(np.clongdouble(z)) - assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f) - assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f) - - def test_branch_cuts(self): - # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) - - _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) - - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) - _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) - - # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) - _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) - - _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) - - def test_branch_cuts_complex64(self): - # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) - - _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) - - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) - _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - - # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) - - _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) - - def test_against_cmath(self): - import cmath - - points = [-1-1j, -1+1j, +1-1j, +1+1j] - name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', - 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} - atol = 4*np.finfo(complex).eps - for func in self.funcs: - fname = func.__name__.split('.')[-1] - cname = name_map.get(fname, fname) - try: - cfunc = getattr(cmath, cname) - except AttributeError: - continue - for p in points: - a = complex(func(np.complex_(p))) - b = cfunc(p) - assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b)) - - @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex]) - def test_loss_of_precision(self, dtype): - """Check loss of precision in complex arc* functions""" - - # Check against known-good functions - - info = np.finfo(dtype) - real_dtype = dtype(0.).real.dtype - eps = info.eps - - def check(x, rtol): - x = x.astype(real_dtype) - - z = x.astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsinh')) - - z = (1j*x).astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsin')) - - z = x.astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctanh')) - - z = (1j*x).astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctan')) - - # The switchover was chosen as 1e-3; hence there can be up to - # ~eps/1e-3 of relative cancellation error before it - - x_series = np.logspace(-20, -3.001, 200) - x_basic = np.logspace(-2.999, 0, 10, endpoint=False) - - if dtype is np.longcomplex: - # It's not guaranteed that the system-provided arc functions - # are accurate down to a few epsilons. (Eg. on Linux 64-bit) - # So, give more leeway for long complex tests here: - # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19. - check(x_series, 50.0*eps) - else: - check(x_series, 2.1*eps) - check(x_basic, 2.0*eps/1e-3) - - # Check a few points - - z = np.array([1e-5*(1+1j)], dtype=dtype) - p = 9.999999999333333333e-6 + 1.000000000066666666e-5j - d = np.absolute(1-np.arctanh(z)/p) - assert_(np.all(d < 1e-15)) - - p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j - d = np.absolute(1-np.arcsinh(z)/p) - assert_(np.all(d < 1e-15)) - - p = 9.999999999333333333e-6j + 1.000000000066666666e-5 - d = np.absolute(1-np.arctan(z)/p) - assert_(np.all(d < 1e-15)) - - p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 - d = np.absolute(1-np.arcsin(z)/p) - assert_(np.all(d < 1e-15)) - - # Check continuity across switchover points - - def check(func, z0, d=1): - z0 = np.asarray(z0, dtype=dtype) - zp = z0 + abs(z0) * d * eps * 2 - zm = z0 - abs(z0) * d * eps * 2 - assert_(np.all(zp != zm), (zp, zm)) - - # NB: the cancellation error at the switchover is at least eps - good = (abs(func(zp) - func(zm)) < 2*eps) - assert_(np.all(good), (func, z0[~good])) - - for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): - pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) - if rp != 0 or ip != 0] - check(func, pts, 1) - check(func, pts, 1j) - check(func, pts, 1+1j) - - -class TestAttributes(object): - def test_attributes(self): - add = ncu.add - assert_equal(add.__name__, 'add') - assert_(add.ntypes >= 18) # don't fail if types added - assert_('ii->i' in add.types) - assert_equal(add.nin, 2) - assert_equal(add.nout, 1) - assert_equal(add.identity, 0) - - def test_doc(self): - # don't bother checking the long list of kwargs, which are likely to - # change - assert_(ncu.add.__doc__.startswith( - "add(x1, x2, /, out=None, *, where=True")) - assert_(ncu.frexp.__doc__.startswith( - "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) - - -class TestSubclass(object): - - def test_subclass_op(self): - - class simple(np.ndarray): - def __new__(subtype, shape): - self = np.ndarray.__new__(subtype, shape, dtype=object) - self.fill(0) - return self - - a = simple((3, 4)) - assert_equal(a+a, a) - -def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, - dtype=complex): - """ - Check for a branch cut in a function. - - Assert that `x0` lies on a branch cut of function `f` and `f` is - continuous from the direction `dx`. - - Parameters - ---------- - f : func - Function to check - x0 : array-like - Point on branch cut - dx : array-like - Direction to check continuity in - re_sign, im_sign : {1, -1} - Change of sign of the real or imaginary part expected - sig_zero_ok : bool - Whether to check if the branch cut respects signed zero (if applicable) - dtype : dtype - Dtype to check (should be complex) - - """ - x0 = np.atleast_1d(x0).astype(dtype) - dx = np.atleast_1d(dx).astype(dtype) - - if np.dtype(dtype).char == 'F': - scale = np.finfo(dtype).eps * 1e2 - atol = np.float32(1e-2) - else: - scale = np.finfo(dtype).eps * 1e3 - atol = 1e-4 - - y0 = f(x0) - yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) - ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) - - assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) - assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) - - if sig_zero_ok: - # check that signed zeros also work as a displacement - jr = (x0.real == 0) & (dx.real != 0) - ji = (x0.imag == 0) & (dx.imag != 0) - if np.any(jr): - x = x0[jr] - x.real = np.NZERO - ym = f(x) - assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym)) - assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym)) - - if np.any(ji): - x = x0[ji] - x.imag = np.NZERO - ym = f(x) - assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym)) - assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym)) - -def test_copysign(): - assert_(np.copysign(1, -1) == -1) - with np.errstate(divide="ignore"): - assert_(1 / np.copysign(0, -1) < 0) - assert_(1 / np.copysign(0, 1) > 0) - assert_(np.signbit(np.copysign(np.nan, -1))) - assert_(not np.signbit(np.copysign(np.nan, 1))) - -def _test_nextafter(t): - one = t(1) - two = t(2) - zero = t(0) - eps = np.finfo(t).eps - assert_(np.nextafter(one, two) - one == eps) - assert_(np.nextafter(one, zero) - one < 0) - assert_(np.isnan(np.nextafter(np.nan, one))) - assert_(np.isnan(np.nextafter(one, np.nan))) - assert_(np.nextafter(one, one) == one) - -def test_nextafter(): - return _test_nextafter(np.float64) - - -def test_nextafterf(): - return _test_nextafter(np.float32) - - -@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double") -@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), - reason="IBM double double") -def test_nextafterl(): - return _test_nextafter(np.longdouble) - - -def test_nextafter_0(): - for t, direction in itertools.product(np.sctypes['float'], (1, -1)): - tiny = np.finfo(t).tiny - assert_(0. < direction * np.nextafter(t(0), t(direction)) < tiny) - assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) - -def _test_spacing(t): - one = t(1) - eps = np.finfo(t).eps - nan = t(np.nan) - inf = t(np.inf) - with np.errstate(invalid='ignore'): - assert_(np.spacing(one) == eps) - assert_(np.isnan(np.spacing(nan))) - assert_(np.isnan(np.spacing(inf))) - assert_(np.isnan(np.spacing(-inf))) - assert_(np.spacing(t(1e30)) != 0) - -def test_spacing(): - return _test_spacing(np.float64) - -def test_spacingf(): - return _test_spacing(np.float32) - - -@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double") -@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), - reason="IBM double double") -def test_spacingl(): - return _test_spacing(np.longdouble) - -def test_spacing_gfortran(): - # Reference from this fortran file, built with gfortran 4.3.3 on linux - # 32bits: - # PROGRAM test_spacing - # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) - # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) - # - # WRITE(*,*) spacing(0.00001_DBL) - # WRITE(*,*) spacing(1.0_DBL) - # WRITE(*,*) spacing(1000._DBL) - # WRITE(*,*) spacing(10500._DBL) - # - # WRITE(*,*) spacing(0.00001_SGL) - # WRITE(*,*) spacing(1.0_SGL) - # WRITE(*,*) spacing(1000._SGL) - # WRITE(*,*) spacing(10500._SGL) - # END PROGRAM - ref = {np.float64: [1.69406589450860068E-021, - 2.22044604925031308E-016, - 1.13686837721616030E-013, - 1.81898940354585648E-012], - np.float32: [9.09494702E-13, - 1.19209290E-07, - 6.10351563E-05, - 9.76562500E-04]} - - for dt, dec_ in zip([np.float32, np.float64], (10, 20)): - x = np.array([1e-5, 1, 1000, 10500], dtype=dt) - assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) - -def test_nextafter_vs_spacing(): - # XXX: spacing does not handle long double yet - for t in [np.float32, np.float64]: - for _f in [1, 1e-5, 1000]: - f = t(_f) - f1 = t(_f + 1) - assert_(np.nextafter(f, f1) - f == np.spacing(f)) - -def test_pos_nan(): - """Check np.nan is a positive nan.""" - assert_(np.signbit(np.nan) == 0) - -def test_reduceat(): - """Test bug in reduceat when structured arrays are not copied.""" - db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) - a = np.empty([100], dtype=db) - a['name'] = 'Simple' - a['time'] = 10 - a['value'] = 100 - indx = [0, 7, 15, 25] - - h2 = [] - val1 = indx[0] - for val2 in indx[1:]: - h2.append(np.add.reduce(a['value'][val1:val2])) - val1 = val2 - h2.append(np.add.reduce(a['value'][val1:])) - h2 = np.array(h2) - - # test buffered -- this should work - h1 = np.add.reduceat(a['value'], indx) - assert_array_almost_equal(h1, h2) - - # This is when the error occurs. - # test no buffer - np.setbufsize(32) - h1 = np.add.reduceat(a['value'], indx) - np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) - assert_array_almost_equal(h1, h2) - -def test_reduceat_empty(): - """Reduceat should work with empty arrays""" - indices = np.array([], 'i4') - x = np.array([], 'f8') - result = np.add.reduceat(x, indices) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (0,)) - # Another case with a slightly different zero-sized shape - x = np.ones((5, 2)) - result = np.add.reduceat(x, [], axis=0) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (0, 2)) - result = np.add.reduceat(x, [], axis=1) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (5, 0)) - -def test_complex_nan_comparisons(): - nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] - fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), - complex(1, 1), complex(-1, -1), complex(0, 0)] - - with np.errstate(invalid='ignore'): - for x in nans + fins: - x = np.array([x]) - for y in nans + fins: - y = np.array([y]) - - if np.isfinite(x) and np.isfinite(y): - continue - - assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) - assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) - assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) - assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) - assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) - - -def test_rint_big_int(): - # np.rint bug for large integer values on Windows 32-bit and MKL - # https://github.com/numpy/numpy/issues/6685 - val = 4607998452777363968 - # This is exactly representable in floating point - assert_equal(val, int(float(val))) - # Rint should not change the value - assert_equal(val, np.rint(val)) - - -def test_signaling_nan_exceptions(): - with assert_no_warnings(): - a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') - np.isnan(a) - -@pytest.mark.parametrize("arr", [ - np.arange(2), - np.matrix([0, 1]), - np.matrix([[0, 1], [2, 5]]), - ]) -def test_outer_subclass_preserve(arr): - # for gh-8661 - class foo(np.ndarray): pass - actual = np.multiply.outer(arr.view(foo), arr.view(foo)) - assert actual.__class__.__name__ == 'foo' diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_accuracy.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_accuracy.py deleted file mode 100644 index fec1807..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_accuracy.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy as np -import platform -from os import path -import sys -import pytest -from ctypes import * -from numpy.testing import assert_array_max_ulp - -runtest = sys.platform.startswith('linux') and (platform.machine() == 'x86_64') -platform_skip = pytest.mark.skipif(not runtest, - reason=""" - stick to x86_64 and linux platforms. - test seems to fail on some of ARM and power - architectures. - """) - -# convert string to hex function taken from: -# https://stackoverflow.com/questions/1592158/convert-hex-to-float # -def convert(s): - i = int(s, 16) # convert from hex to a Python int - cp = pointer(c_int(i)) # make this into a c integer - fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer - return fp.contents.value # dereference the pointer, get the float - -str_to_float = np.vectorize(convert) -files = ['umath-validation-set-exp', - 'umath-validation-set-log', - 'umath-validation-set-sin', - 'umath-validation-set-cos'] - -class TestAccuracy(object): - @pytest.mark.xfail(reason="Fails for MacPython/numpy-wheels builds") - def test_validate_transcendentals(self): - with np.errstate(all='ignore'): - for filename in files: - data_dir = path.join(path.dirname(__file__), 'data') - filepath = path.join(data_dir, filename) - with open(filepath) as fid: - file_without_comments = (r for r in fid if not r[0] in ('$', '#')) - data = np.genfromtxt(file_without_comments, - dtype=('|S39','|S39','|S39',int), - names=('type','input','output','ulperr'), - delimiter=',', - skip_header=1) - npfunc = getattr(np, filename.split('-')[3]) - for datatype in np.unique(data['type']): - data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str)), dtype=eval(datatype)) - perm = np.random.permutation(len(inval)) - inval = inval[perm] - outval = outval[perm] - maxulperr = data_subset['ulperr'].max() - assert_array_max_ulp(npfunc(inval), outval, maxulperr) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_complex.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_complex.py deleted file mode 100644 index 1f5b407..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_complex.py +++ /dev/null @@ -1,544 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import platform -import pytest - -import numpy as np -# import the c-extension module directly since _arg is not exported via umath -import numpy.core._multiarray_umath as ncu -from numpy.testing import ( - assert_raises, assert_equal, assert_array_equal, assert_almost_equal - ) - -# TODO: branch cuts (use Pauli code) -# TODO: conj 'symmetry' -# TODO: FPU exceptions - -# At least on Windows the results of many complex functions are not conforming -# to the C99 standard. See ticket 1574. -# Ditto for Solaris (ticket 1642) and OS X on PowerPC. -#FIXME: this will probably change when we require full C99 campatibility -with np.errstate(all='ignore'): - functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) - or (np.log(complex(np.NZERO, 0)).imag != np.pi)) -# TODO: replace with a check on whether platform-provided C99 funcs are used -xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) - -# TODO This can be xfail when the generator functions are got rid of. -platform_skip = pytest.mark.skipif(xfail_complex_tests, - reason="Inadequate C99 complex support") - - - -class TestCexp(object): - def test_simple(self): - check = check_complex_value - f = np.exp - - check(f, 1, 0, np.exp(1), 0, False) - check(f, 0, 1, np.cos(1), np.sin(1), False) - - ref = np.exp(1) * complex(np.cos(1), np.sin(1)) - check(f, 1, 1, ref.real, ref.imag, False) - - @platform_skip - def test_special_values(self): - # C99: Section G 6.3.1 - - check = check_complex_value - f = np.exp - - # cexp(+-0 + 0i) is 1 + 0i - check(f, np.PZERO, 0, 1, 0, False) - check(f, np.NZERO, 0, 1, 0, False) - - # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU - # exception - check(f, 1, np.inf, np.nan, np.nan) - check(f, -1, np.inf, np.nan, np.nan) - check(f, 0, np.inf, np.nan, np.nan) - - # cexp(inf + 0i) is inf + 0i - check(f, np.inf, 0, np.inf, 0) - - # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y - check(f, -np.inf, 1, np.PZERO, np.PZERO) - check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) - - # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y - check(f, np.inf, 1, np.inf, np.inf) - check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) - - # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) - def _check_ninf_inf(dummy): - msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(-np.inf, np.inf))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform % (z.real, z.imag)) - - _check_ninf_inf(None) - - # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. - def _check_inf_inf(dummy): - msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(np.inf, np.inf))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - - _check_inf_inf(None) - - # cexp(-inf + nan i) is +-0 +- 0i - def _check_ninf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(-np.inf, np.nan))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform % (z.real, z.imag)) - - _check_ninf_nan(None) - - # cexp(inf + nan i) is +-inf + nan - def _check_inf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(np.inf, np.nan))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - - _check_inf_nan(None) - - # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU - # ex) - check(f, np.nan, 1, np.nan, np.nan) - check(f, np.nan, -1, np.nan, np.nan) - - check(f, np.nan, np.inf, np.nan, np.nan) - check(f, np.nan, -np.inf, np.nan, np.nan) - - # cexp(nan + nani) is nan + nani - check(f, np.nan, np.nan, np.nan, np.nan) - - # TODO This can be xfail when the generator functions are got rid of. - @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") - def test_special_values2(self): - # XXX: most implementations get it wrong here (including glibc <= 2.10) - # cexp(nan + 0i) is nan + 0i - check = check_complex_value - f = np.exp - - check(f, np.nan, 0, np.nan, 0) - -class TestClog(object): - def test_simple(self): - x = np.array([1+0j, 1+2j]) - y_r = np.log(np.abs(x)) + 1j * np.angle(x) - y = np.log(x) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - @platform_skip - @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") - def test_special_values(self): - xl = [] - yl = [] - - # From C99 std (Sec 6.3.2) - # XXX: check exceptions raised - # --- raise for invalid fails. - - # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' - # floating-point exception. - with np.errstate(divide='raise'): - x = np.array([np.NZERO], dtype=complex) - y = complex(-np.inf, np.pi) - assert_raises(FloatingPointError, np.log, x) - with np.errstate(divide='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' - # floating-point exception. - with np.errstate(divide='raise'): - x = np.array([0], dtype=complex) - y = complex(-np.inf, 0) - assert_raises(FloatingPointError, np.log, x) - with np.errstate(divide='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(x + i inf returns +inf + i pi /2, for finite x. - x = np.array([complex(1, np.inf)], dtype=complex) - y = complex(np.inf, 0.5 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-1, np.inf)], dtype=complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(x + iNaN) returns NaN + iNaN and optionally raises the - # 'invalid' floating- point exception, for finite x. - with np.errstate(invalid='raise'): - x = np.array([complex(1., np.nan)], dtype=complex) - y = complex(np.nan, np.nan) - #assert_raises(FloatingPointError, np.log, x) - with np.errstate(invalid='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - with np.errstate(invalid='raise'): - x = np.array([np.inf + 1j * np.nan], dtype=complex) - #assert_raises(FloatingPointError, np.log, x) - with np.errstate(invalid='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. - x = np.array([-np.inf + 1j], dtype=complex) - y = complex(np.inf, np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. - x = np.array([np.inf + 1j], dtype=complex) - y = complex(np.inf, 0) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(- inf + i inf) returns +inf + i3pi /4. - x = np.array([complex(-np.inf, np.inf)], dtype=complex) - y = complex(np.inf, 0.75 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + i inf) returns +inf + ipi /4. - x = np.array([complex(np.inf, np.inf)], dtype=complex) - y = complex(np.inf, 0.25 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+/- inf + iNaN) returns +inf + iNaN. - x = np.array([complex(np.inf, np.nan)], dtype=complex) - y = complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-np.inf, np.nan)], dtype=complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iy) returns NaN + iNaN and optionally raises the - # 'invalid' floating-point exception, for finite y. - x = np.array([complex(np.nan, 1)], dtype=complex) - y = complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + i inf) returns +inf + iNaN. - x = np.array([complex(np.nan, np.inf)], dtype=complex) - y = complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iNaN) returns NaN + iNaN. - x = np.array([complex(np.nan, np.nan)], dtype=complex) - y = complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(conj(z)) = conj(clog(z)). - xa = np.array(xl, dtype=complex) - ya = np.array(yl, dtype=complex) - with np.errstate(divide='ignore'): - for i in range(len(xa)): - assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) - - -class TestCsqrt(object): - - def test_simple(self): - # sqrt(1) - check_complex_value(np.sqrt, 1, 0, 1, 0) - - # sqrt(1i) - rres = 0.5*np.sqrt(2) - ires = rres - check_complex_value(np.sqrt, 0, 1, rres, ires, False) - - # sqrt(-1) - check_complex_value(np.sqrt, -1, 0, 0, 1) - - def test_simple_conjugate(self): - ref = np.conj(np.sqrt(complex(1, 1))) - - def f(z): - return np.sqrt(np.conj(z)) - - check_complex_value(f, 1, 1, ref.real, ref.imag, False) - - #def test_branch_cut(self): - # _check_branch_cut(f, -1, 0, 1, -1) - - @platform_skip - def test_special_values(self): - # C99: Sec G 6.4.2 - - check = check_complex_value - f = np.sqrt - - # csqrt(+-0 + 0i) is 0 + 0i - check(f, np.PZERO, 0, 0, 0) - check(f, np.NZERO, 0, 0, 0) - - # csqrt(x + infi) is inf + infi for any x (including NaN) - check(f, 1, np.inf, np.inf, np.inf) - check(f, -1, np.inf, np.inf, np.inf) - - check(f, np.PZERO, np.inf, np.inf, np.inf) - check(f, np.NZERO, np.inf, np.inf, np.inf) - check(f, np.inf, np.inf, np.inf, np.inf) - check(f, -np.inf, np.inf, np.inf, np.inf) - check(f, -np.nan, np.inf, np.inf, np.inf) - - # csqrt(x + nani) is nan + nani for any finite x - check(f, 1, np.nan, np.nan, np.nan) - check(f, -1, np.nan, np.nan, np.nan) - check(f, 0, np.nan, np.nan, np.nan) - - # csqrt(-inf + yi) is +0 + infi for any finite y > 0 - check(f, -np.inf, 1, np.PZERO, np.inf) - - # csqrt(inf + yi) is +inf + 0i for any finite y > 0 - check(f, np.inf, 1, np.inf, np.PZERO) - - # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) - def _check_ninf_nan(dummy): - msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" - z = np.sqrt(np.array(complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. - with np.errstate(invalid='ignore'): - if not (np.isnan(z.real) and np.isinf(z.imag)): - raise AssertionError(msgform % (z.real, z.imag)) - - _check_ninf_nan(None) - - # csqrt(+inf + nani) is inf + nani - check(f, np.inf, np.nan, np.inf, np.nan) - - # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x - # + nani) - check(f, np.nan, 0, np.nan, np.nan) - check(f, np.nan, 1, np.nan, np.nan) - check(f, np.nan, np.nan, np.nan, np.nan) - - # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch - # cuts first) - -class TestCpow(object): - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - y_r = x ** 2 - y = np.power(x, 2) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - def test_scalar(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = list(range(len(x))) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy complex scalars - n_r = [x[i] ** y[i] for i in lx] - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - - def test_array(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = list(range(len(x))) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy arrays - n_r = x ** y - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - -class TestCabs(object): - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) - y = np.abs(x) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - def test_fabs(self): - # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) - x = np.array([1+0j], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(1, np.NZERO)], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(np.inf, np.NZERO)], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(np.nan, np.NZERO)], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - def test_cabs_inf_nan(self): - x, y = [], [] - - # cabs(+-nan + nani) returns nan - x.append(np.nan) - y.append(np.nan) - check_real_value(np.abs, np.nan, np.nan, np.nan) - - x.append(np.nan) - y.append(-np.nan) - check_real_value(np.abs, -np.nan, np.nan, np.nan) - - # According to C99 standard, if exactly one of the real/part is inf and - # the other nan, then cabs should return inf - x.append(np.inf) - y.append(np.nan) - check_real_value(np.abs, np.inf, np.nan, np.inf) - - x.append(-np.inf) - y.append(np.nan) - check_real_value(np.abs, -np.inf, np.nan, np.inf) - - # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) - def f(a): - return np.abs(np.conj(a)) - - def g(a, b): - return np.abs(complex(a, b)) - - xa = np.array(x, dtype=complex) - for i in range(len(xa)): - ref = g(x[i], y[i]) - check_real_value(f, x[i], y[i], ref) - -class TestCarg(object): - def test_simple(self): - check_real_value(ncu._arg, 1, 0, 0, False) - check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) - - check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) - check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) - - # TODO This can be xfail when the generator functions are got rid of. - @pytest.mark.skip( - reason="Complex arithmetic with signed zero fails on most platforms") - def test_zero(self): - # carg(-0 +- 0i) returns +- pi - check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) - check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) - - # carg(+0 +- 0i) returns +- 0 - check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) - check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) - - # carg(x +- 0i) returns +- 0 for x > 0 - check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) - check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) - - # carg(x +- 0i) returns +- pi for x < 0 - check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) - check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) - - # carg(+- 0 + yi) returns pi/2 for y > 0 - check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) - check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) - - # carg(+- 0 + yi) returns -pi/2 for y < 0 - check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) - check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) - - #def test_branch_cuts(self): - # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) - - def test_special_values(self): - # carg(-np.inf +- yi) returns +-pi for finite y > 0 - check_real_value(ncu._arg, -np.inf, 1, np.pi, False) - check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) - - # carg(np.inf +- yi) returns +-0 for finite y > 0 - check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) - check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) - - # carg(x +- np.infi) returns +-pi/2 for finite x - check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) - check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) - - # carg(-np.inf +- np.infi) returns +-3pi/4 - check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) - check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) - - # carg(np.inf +- np.infi) returns +-pi/4 - check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) - check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) - - # carg(x + yi) returns np.nan if x or y is nan - check_real_value(ncu._arg, np.nan, 0, np.nan, False) - check_real_value(ncu._arg, 0, np.nan, np.nan, False) - - check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) - check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) - - -def check_real_value(f, x1, y1, x, exact=True): - z1 = np.array([complex(x1, y1)]) - if exact: - assert_equal(f(z1), x) - else: - assert_almost_equal(f(z1), x) - - -def check_complex_value(f, x1, y1, x2, y2, exact=True): - z1 = np.array([complex(x1, y1)]) - z2 = complex(x2, y2) - with np.errstate(invalid='ignore'): - if exact: - assert_equal(f(z1), z2) - else: - assert_almost_equal(f(z1), z2) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_unicode.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_unicode.py deleted file mode 100644 index 2ffd880..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_unicode.py +++ /dev/null @@ -1,396 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -from numpy.compat import unicode -from numpy.testing import assert_, assert_equal, assert_array_equal - -# Guess the UCS length for this python interpreter -if sys.version_info[:2] >= (3, 3): - # Python 3.3 uses a flexible string representation - ucs4 = False - - def buffer_length(arr): - if isinstance(arr, unicode): - arr = str(arr) - if not arr: - charmax = 0 - else: - charmax = max([ord(c) for c in arr]) - if charmax < 256: - size = 1 - elif charmax < 65536: - size = 2 - else: - size = 4 - return size * len(arr) - v = memoryview(arr) - if v.shape is None: - return len(v) * v.itemsize - else: - return np.prod(v.shape) * v.itemsize -else: - if len(buffer(u'u')) == 4: - ucs4 = True - else: - ucs4 = False - - def buffer_length(arr): - if isinstance(arr, np.ndarray): - return len(arr.data) - return len(buffer(arr)) - -# In both cases below we need to make sure that the byte swapped value (as -# UCS4) is still a valid unicode: -# Value that can be represented in UCS2 interpreters -ucs2_value = u'\u0900' -# Value that cannot be represented in UCS2 interpreters (but can in UCS4) -ucs4_value = u'\U00100900' - - -def test_string_cast(): - str_arr = np.array(["1234", "1234\0\0"], dtype='S') - uni_arr1 = str_arr.astype('>U') - uni_arr2 = str_arr.astype('>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP - -Our result type, an ndarray that must be of type double, be 1-dimensional -and is C-contiguous in memory: - ->>> array_1d_double = np.ctypeslib.ndpointer( -... dtype=np.double, -... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP - -Our C-function typically takes an array and updates its values -in-place. For example:: - - void foo_func(double* x, int length) - { - int i; - for (i = 0; i < length; i++) { - x[i] = i*i; - } - } - -We wrap it using: - ->>> _lib.foo_func.restype = None #doctest: +SKIP ->>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP - -Then, we're ready to call ``foo_func``: - ->>> out = np.empty(15, dtype=np.double) ->>> _lib.foo_func(out, len(out)) #doctest: +SKIP - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['load_library', 'ndpointer', 'ctypes_load_library', - 'c_intp', 'as_ctypes', 'as_array'] - -import os -from numpy import ( - integer, ndarray, dtype as _dtype, deprecate, array, frombuffer -) -from numpy.core.multiarray import _flagdict, flagsobj - -try: - import ctypes -except ImportError: - ctypes = None - -if ctypes is None: - def _dummy(*args, **kwds): - """ - Dummy object that raises an ImportError if ctypes is not available. - - Raises - ------ - ImportError - If ctypes is not available. - - """ - raise ImportError("ctypes is not available.") - ctypes_load_library = _dummy - load_library = _dummy - as_ctypes = _dummy - as_array = _dummy - from numpy import intp as c_intp - _ndptr_base = object -else: - import numpy.core._internal as nic - c_intp = nic._getintp_ctype() - del nic - _ndptr_base = ctypes.c_void_p - - # Adapted from Albert Strasheim - def load_library(libname, loader_path): - """ - It is possible to load a library using - >>> lib = ctypes.cdll[] # doctest: +SKIP - - But there are cross-platform considerations, such as library file extensions, - plus the fact Windows will just load the first library it finds with that name. - NumPy supplies the load_library function as a convenience. - - Parameters - ---------- - libname : str - Name of the library, which can have 'lib' as a prefix, - but without an extension. - loader_path : str - Where the library can be found. - - Returns - ------- - ctypes.cdll[libpath] : library object - A ctypes library object - - Raises - ------ - OSError - If there is no library with the expected extension, or the - library is defective and cannot be loaded. - """ - if ctypes.__version__ < '1.0.1': - import warnings - warnings.warn("All features of ctypes interface may not work " - "with ctypes < 1.0.1", stacklevel=2) - - ext = os.path.splitext(libname)[1] - if not ext: - # Try to load library with platform-specific name, otherwise - # default to libname.[so|pyd]. Sometimes, these files are built - # erroneously on non-linux platforms. - from numpy.distutils.misc_util import get_shared_lib_extension - so_ext = get_shared_lib_extension() - libname_ext = [libname + so_ext] - # mac, windows and linux >= py3.2 shared library and loadable - # module have different extensions so try both - so_ext2 = get_shared_lib_extension(is_python_ext=True) - if not so_ext2 == so_ext: - libname_ext.insert(0, libname + so_ext2) - else: - libname_ext = [libname] - - loader_path = os.path.abspath(loader_path) - if not os.path.isdir(loader_path): - libdir = os.path.dirname(loader_path) - else: - libdir = loader_path - - for ln in libname_ext: - libpath = os.path.join(libdir, ln) - if os.path.exists(libpath): - try: - return ctypes.cdll[libpath] - except OSError: - ## defective lib file - raise - ## if no successful return in the libname_ext loop: - raise OSError("no file with expected extension") - - ctypes_load_library = deprecate(load_library, 'ctypes_load_library', - 'load_library') - -def _num_fromflags(flaglist): - num = 0 - for val in flaglist: - num += _flagdict[val] - return num - -_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY'] -def _flags_fromnum(num): - res = [] - for key in _flagnames: - value = _flagdict[key] - if (num & value): - res.append(key) - return res - - -class _ndptr(_ndptr_base): - @classmethod - def from_param(cls, obj): - if not isinstance(obj, ndarray): - raise TypeError("argument must be an ndarray") - if cls._dtype_ is not None \ - and obj.dtype != cls._dtype_: - raise TypeError("array must have data type %s" % cls._dtype_) - if cls._ndim_ is not None \ - and obj.ndim != cls._ndim_: - raise TypeError("array must have %d dimension(s)" % cls._ndim_) - if cls._shape_ is not None \ - and obj.shape != cls._shape_: - raise TypeError("array must have shape %s" % str(cls._shape_)) - if cls._flags_ is not None \ - and ((obj.flags.num & cls._flags_) != cls._flags_): - raise TypeError("array must have flags %s" % - _flags_fromnum(cls._flags_)) - return obj.ctypes - - -class _concrete_ndptr(_ndptr): - """ - Like _ndptr, but with `_shape_` and `_dtype_` specified. - - Notably, this means the pointer has enough information to reconstruct - the array, which is not generally true. - """ - def _check_retval_(self): - """ - This method is called when this class is used as the .restype - attribute for a shared-library function, to automatically wrap the - pointer into an array. - """ - return self.contents - - @property - def contents(self): - """ - Get an ndarray viewing the data pointed to by this pointer. - - This mirrors the `contents` attribute of a normal ctypes pointer - """ - full_dtype = _dtype((self._dtype_, self._shape_)) - full_ctype = ctypes.c_char * full_dtype.itemsize - buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents - return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) - - -# Factory for an array-checking class with from_param defined for -# use with ctypes argtypes mechanism -_pointer_type_cache = {} -def ndpointer(dtype=None, ndim=None, shape=None, flags=None): - """ - Array-checking restype/argtypes. - - An ndpointer instance is used to describe an ndarray in restypes - and argtypes specifications. This approach is more flexible than - using, for example, ``POINTER(c_double)``, since several restrictions - can be specified, which are verified upon calling the ctypes function. - These include data type, number of dimensions, shape and flags. If a - given array does not satisfy the specified restrictions, - a ``TypeError`` is raised. - - Parameters - ---------- - dtype : data-type, optional - Array data-type. - ndim : int, optional - Number of array dimensions. - shape : tuple of ints, optional - Array shape. - flags : str or tuple of str - Array flags; may be one or more of: - - - C_CONTIGUOUS / C / CONTIGUOUS - - F_CONTIGUOUS / F / FORTRAN - - OWNDATA / O - - WRITEABLE / W - - ALIGNED / A - - WRITEBACKIFCOPY / X - - UPDATEIFCOPY / U - - Returns - ------- - klass : ndpointer type object - A type object, which is an ``_ndtpr`` instance containing - dtype, ndim, shape and flags information. - - Raises - ------ - TypeError - If a given array does not satisfy the specified restrictions. - - Examples - -------- - >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, - ... ndim=1, - ... flags='C_CONTIGUOUS')] - ... #doctest: +SKIP - >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) - ... #doctest: +SKIP - - """ - - # normalize dtype to an Optional[dtype] - if dtype is not None: - dtype = _dtype(dtype) - - # normalize flags to an Optional[int] - num = None - if flags is not None: - if isinstance(flags, str): - flags = flags.split(',') - elif isinstance(flags, (int, integer)): - num = flags - flags = _flags_fromnum(num) - elif isinstance(flags, flagsobj): - num = flags.num - flags = _flags_fromnum(num) - if num is None: - try: - flags = [x.strip().upper() for x in flags] - except Exception: - raise TypeError("invalid flags specification") - num = _num_fromflags(flags) - - # normalize shape to an Optional[tuple] - if shape is not None: - try: - shape = tuple(shape) - except TypeError: - # single integer -> 1-tuple - shape = (shape,) - - cache_key = (dtype, ndim, shape, num) - - try: - return _pointer_type_cache[cache_key] - except KeyError: - pass - - # produce a name for the new type - if dtype is None: - name = 'any' - elif dtype.names is not None: - name = str(id(dtype)) - else: - name = dtype.str - if ndim is not None: - name += "_%dd" % ndim - if shape is not None: - name += "_"+"x".join(str(x) for x in shape) - if flags is not None: - name += "_"+"_".join(flags) - - if dtype is not None and shape is not None: - base = _concrete_ndptr - else: - base = _ndptr - - klass = type("ndpointer_%s"%name, (base,), - {"_dtype_": dtype, - "_shape_" : shape, - "_ndim_" : ndim, - "_flags_" : num}) - _pointer_type_cache[cache_key] = klass - return klass - - -if ctypes is not None: - def _ctype_ndarray(element_type, shape): - """ Create an ndarray of the given element type and shape """ - for dim in shape[::-1]: - element_type = dim * element_type - # prevent the type name include np.ctypeslib - element_type.__module__ = None - return element_type - - - def _get_scalar_type_map(): - """ - Return a dictionary mapping native endian scalar dtype to ctypes types - """ - ct = ctypes - simple_types = [ - ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, - ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, - ct.c_float, ct.c_double, - ct.c_bool, - ] - return {_dtype(ctype): ctype for ctype in simple_types} - - - _scalar_type_map = _get_scalar_type_map() - - - def _ctype_from_dtype_scalar(dtype): - # swapping twice ensure that `=` is promoted to <, >, or | - dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') - dtype_native = dtype.newbyteorder('=') - try: - ctype = _scalar_type_map[dtype_native] - except KeyError: - raise NotImplementedError( - "Converting {!r} to a ctypes type".format(dtype) - ) - - if dtype_with_endian.byteorder == '>': - ctype = ctype.__ctype_be__ - elif dtype_with_endian.byteorder == '<': - ctype = ctype.__ctype_le__ - - return ctype - - - def _ctype_from_dtype_subarray(dtype): - element_dtype, shape = dtype.subdtype - ctype = _ctype_from_dtype(element_dtype) - return _ctype_ndarray(ctype, shape) - - - def _ctype_from_dtype_structured(dtype): - # extract offsets of each field - field_data = [] - for name in dtype.names: - field_dtype, offset = dtype.fields[name][:2] - field_data.append((offset, name, _ctype_from_dtype(field_dtype))) - - # ctypes doesn't care about field order - field_data = sorted(field_data, key=lambda f: f[0]) - - if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): - # union, if multiple fields all at address 0 - size = 0 - _fields_ = [] - for offset, name, ctype in field_data: - _fields_.append((name, ctype)) - size = max(size, ctypes.sizeof(ctype)) - - # pad to the right size - if dtype.itemsize != size: - _fields_.append(('', ctypes.c_char * dtype.itemsize)) - - # we inserted manual padding, so always `_pack_` - return type('union', (ctypes.Union,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) - else: - last_offset = 0 - _fields_ = [] - for offset, name, ctype in field_data: - padding = offset - last_offset - if padding < 0: - raise NotImplementedError("Overlapping fields") - if padding > 0: - _fields_.append(('', ctypes.c_char * padding)) - - _fields_.append((name, ctype)) - last_offset = offset + ctypes.sizeof(ctype) - - - padding = dtype.itemsize - last_offset - if padding > 0: - _fields_.append(('', ctypes.c_char * padding)) - - # we inserted manual padding, so always `_pack_` - return type('struct', (ctypes.Structure,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) - - - def _ctype_from_dtype(dtype): - if dtype.fields is not None: - return _ctype_from_dtype_structured(dtype) - elif dtype.subdtype is not None: - return _ctype_from_dtype_subarray(dtype) - else: - return _ctype_from_dtype_scalar(dtype) - - - def as_ctypes_type(dtype): - r""" - Convert a dtype into a ctypes type. - - Parameters - ---------- - dtype : dtype - The dtype to convert - - Returns - ------- - ctype - A ctype scalar, union, array, or struct - - Raises - ------ - NotImplementedError - If the conversion is not possible - - Notes - ----- - This function does not losslessly round-trip in either direction. - - ``np.dtype(as_ctypes_type(dt))`` will: - - - insert padding fields - - reorder fields to be sorted by offset - - discard field titles - - ``as_ctypes_type(np.dtype(ctype))`` will: - - - discard the class names of `ctypes.Structure`\ s and - `ctypes.Union`\ s - - convert single-element `ctypes.Union`\ s into single-element - `ctypes.Structure`\ s - - insert padding fields - - """ - return _ctype_from_dtype(_dtype(dtype)) - - - def as_array(obj, shape=None): - """ - Create a numpy array from a ctypes array or POINTER. - - The numpy array shares the memory with the ctypes object. - - The shape parameter must be given if converting from a ctypes POINTER. - The shape parameter is ignored if converting from a ctypes array - """ - if isinstance(obj, ctypes._Pointer): - # convert pointers to an array of the desired shape - if shape is None: - raise TypeError( - 'as_array() requires a shape argument when called on a ' - 'pointer') - p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) - obj = ctypes.cast(obj, p_arr_type).contents - - return array(obj, copy=False) - - - def as_ctypes(obj): - """Create and return a ctypes object from a numpy array. Actually - anything that exposes the __array_interface__ is accepted.""" - ai = obj.__array_interface__ - if ai["strides"]: - raise TypeError("strided arrays not supported") - if ai["version"] != 3: - raise TypeError("only __array_interface__ version 3 supported") - addr, readonly = ai["data"] - if readonly: - raise TypeError("readonly arrays unsupported") - - # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows - # dtype.itemsize (gh-14214) - ctype_scalar = as_ctypes_type(ai["typestr"]) - result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) - result = result_type.from_address(addr) - result.__keep = obj - return result diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/__config__.py b/venv/lib/python3.7/site-packages/numpy/distutils/__config__.py deleted file mode 100644 index db2e454..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/__config__.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file is generated by numpy's setup.py -# It contains system_info results at the time of building this package. -__all__ = ["get_info","show"] - - -import os -import sys - -extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - -if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - if sys.version_info >= (3, 8): - os.add_dll_directory(extra_dll_dir) - else: - os.environ.setdefault('PATH', '') - os.environ['PATH'] += os.pathsep + extra_dll_dir - -blas_mkl_info={} -blis_info={} -openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_mkl_info={} -openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} - -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/__init__.py deleted file mode 100644 index 8dbb63b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -An enhanced distutils, providing support for Fortran compilers, for BLAS, -LAPACK and other common libraries for numerical computing, and more. - -Public submodules are:: - - misc_util - system_info - cpu_info - log - exec_command - -For details, please see the *Packaging* and *NumPy Distutils User Guide* -sections of the NumPy Reference Guide. - -For configuring the preference for and location of libraries like BLAS and -LAPACK, and for setting include paths and similar build options, please see -``site.cfg.example`` in the root of the NumPy repository or sdist. - -""" - -from __future__ import division, absolute_import, print_function - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -from . import ccompiler -from . import unixccompiler - -from .npy_pkg_config import * - -# If numpy is installed, add distutils.test() -try: - from . import __config__ - # Normally numpy is installed if the above import works, but an interrupted - # in-place build could also have left a __config__.py. In that case the - # next import may still fail, so keep it inside the try block. - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester -except ImportError: - pass - - -def customized_fcompiler(plat=None, compiler=None): - from numpy.distutils.fcompiler import new_fcompiler - c = new_fcompiler(plat=plat, compiler=compiler) - c.customize() - return c - -def customized_ccompiler(plat=None, compiler=None, verbose=1): - c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) - c.customize('') - return c diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/_shell_utils.py b/venv/lib/python3.7/site-packages/numpy/distutils/_shell_utils.py deleted file mode 100644 index 82abd5f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/_shell_utils.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Helper functions for interacting with the shell, and consuming shell-style -parameters provided in config files. -""" -import os -import shlex -import subprocess -try: - from shlex import quote -except ImportError: - from pipes import quote - -__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] - - -class CommandLineParser: - """ - An object that knows how to split and join command-line arguments. - - It must be true that ``argv == split(join(argv))`` for all ``argv``. - The reverse neednt be true - `join(split(cmd))` may result in the addition - or removal of unnecessary escaping. - """ - @staticmethod - def join(argv): - """ Join a list of arguments into a command line string """ - raise NotImplementedError - - @staticmethod - def split(cmd): - """ Split a command line string into a list of arguments """ - raise NotImplementedError - - -class WindowsParser: - """ - The parsing behavior used by `subprocess.call("string")` on Windows, which - matches the Microsoft C/C++ runtime. - - Note that this is _not_ the behavior of cmd. - """ - @staticmethod - def join(argv): - # note that list2cmdline is specific to the windows syntax - return subprocess.list2cmdline(argv) - - @staticmethod - def split(cmd): - import ctypes # guarded import for systems without ctypes - try: - ctypes.windll - except AttributeError: - raise NotImplementedError - - # Windows has special parsing rules for the executable (no quotes), - # that we do not care about - insert a dummy element - if not cmd: - return [] - cmd = 'dummy ' + cmd - - CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW - CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) - CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) - - nargs = ctypes.c_int() - lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) - args = [lpargs[i] for i in range(nargs.value)] - assert not ctypes.windll.kernel32.LocalFree(lpargs) - - # strip the element we inserted - assert args[0] == "dummy" - return args[1:] - - -class PosixParser: - """ - The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. - """ - @staticmethod - def join(argv): - return ' '.join(quote(arg) for arg in argv) - - @staticmethod - def split(cmd): - return shlex.split(cmd, posix=True) - - -if os.name == 'nt': - NativeParser = WindowsParser -elif os.name == 'posix': - NativeParser = PosixParser diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/ccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/ccompiler.py deleted file mode 100644 index 6438790..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,805 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import types -import shlex -import time -import subprocess -from copy import copy -from distutils import ccompiler -from distutils.ccompiler import * -from distutils.errors import DistutilsExecError, DistutilsModuleError, \ - DistutilsPlatformError, CompileError -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.compat import get_exception -from numpy.distutils.exec_command import ( - filepath_from_subprocess_output, forward_bytes_to_stdout -) -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - get_num_build_jobs, \ - _commandline_dep_string - -# globals for parallel build management -try: - import threading -except ImportError: - import dummy_threading as threading -_job_semaphore = None -_global_lock = threading.Lock() -_processing_files = set() - - -def _needs_build(obj, cc_args, extra_postargs, pp_opts): - """ - Check if an objects needs to be rebuild based on its dependencies - - Parameters - ---------- - obj : str - object file - - Returns - ------- - bool - """ - # defined in unixcompiler.py - dep_file = obj + '.d' - if not os.path.exists(dep_file): - return True - - # dep_file is a makefile containing 'object: dependencies' - # formatted like posix shell (spaces escaped, \ line continuations) - # the last line contains the compiler commandline arguments as some - # projects may compile an extension multiple times with different - # arguments - with open(dep_file, "r") as f: - lines = f.readlines() - - cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) - last_cmdline = lines[-1] - if last_cmdline != cmdline: - return True - - contents = ''.join(lines[:-1]) - deps = [x for x in shlex.split(contents, posix=True) - if x != "\n" and not x.endswith(":")] - - try: - t_obj = os.stat(obj).st_mtime - - # check if any of the dependencies is newer than the object - # the dependencies includes the source used to create the object - for f in deps: - if os.stat(f).st_mtime > t_obj: - return True - except OSError: - # no object counts as newer (shouldn't happen if dep_file exists) - return True - - return False - - -def replace_method(klass, method_name, func): - if sys.version_info[0] < 3: - m = types.MethodType(func, None, klass) - else: - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - - -###################################################################### -## Method that subclasses may redefine. But don't call this method, -## it i private to CCompiler class and may return unexpected -## results if used elsewhere. So, you have been warned.. - -def CCompiler_find_executables(self): - """ - Does nothing here, but is called by the get_version method and can be - overridden by subclasses. In particular it is redefined in the `FCompiler` - class where more documentation can be found. - - """ - pass - - -replace_method(CCompiler, 'find_executables', CCompiler_find_executables) - - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - try: - if self.verbose: - subprocess.check_output(cmd) - else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError: - # OSError doesn't have the same hooks for the exception - # output, but exec_command() historically would use an - # empty string for EnvironmentError (base class for - # OSError) - o = b'' - # status previously used by exec_command() for parent - # of OSError - s = 127 - else: - # use a convenience return here so that any kind of - # caught exception will execute the default code after the - # try / except block, which handles various exceptions - return None - - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - - if self.verbose: - forward_bytes_to_stdout(o) - - if re.search(b'Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % - (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - # This method is effective only with Python >=2.3 distutils. - # Any changes here should be applied also to fcompiler.compile - # method to support pre Python 2.3 distutils. - global _job_semaphore - - jobs = get_num_build_jobs() - - # setup semaphore to not exceed number of compile jobs when parallelized at - # extension level (python >= 3.5) - with _global_lock: - if _job_semaphore is None: - _job_semaphore = threading.Semaphore(jobs) - - if not sources: - return [] - # FIXME:RELATIVE_IMPORT - if sys.version_info[0] < 3: - from .fcompiler import FCompiler, is_f_file, has_f90_header - else: - from numpy.distutils.fcompiler import (FCompiler, is_f_file, - has_f90_header) - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - def single_compile(args): - obj, (src, ext) = args - if not _needs_build(obj, cc_args, extra_postargs, pp_opts): - return - - # check if we are currently already processing the same object - # happens when using the same source in multiple extensions - while True: - # need explicit lock as there is no atomic check and add with GIL - with _global_lock: - # file not being worked on, start working - if obj not in _processing_files: - _processing_files.add(obj) - break - # wait for the processing to end - time.sleep(0.1) - - try: - # retrieve slot from our #job semaphore and build - with _job_semaphore: - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - finally: - # register being done processing - with _global_lock: - _processing_files.remove(obj) - - - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - f77_objects, other_objects = [], [] - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - if is_f_file(src) and not has_f90_header(src): - f77_objects.append((obj, (src, ext))) - else: - other_objects.append((obj, (src, ext))) - - # f77 objects can be built in parallel - build_items = f77_objects - # build f90 modules serial, module files are generated during - # compilation and may be used by files later in the list so the - # ordering is important - for o in other_objects: - single_compile(o) - else: - build_items = build.items() - - if len(build) > 1 and jobs > 1: - # build parallel - import multiprocessing.pool - pool = multiprocessing.pool.ThreadPool(jobs) - pool.map(single_compile, build_items) - pool.close() - else: - # build serial - for o in build_items: - single_compile(o) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from `distutils.cmd.Command`. - ignore : sequence of str, optional - List of `CCompiler` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - fmt = '%-' + repr(mx+1) + 's = %s' - lines = [fmt % prop for prop in props] - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - if 0: - for attrname in ['include_dirs', 'define', 'undef', - 'libraries', 'library_dirs', - 'rpath', 'link_objects']: - attr = getattr(self, attrname, None) - if not attr: - continue - log.info("compiler '%s' is set to %s" % (attrname, attr)) - try: - self.get_version() - except Exception: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls `distutils.sysconfig.customize_compiler` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - if not hasattr(self, 'compiler_cxx'): - log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) - - - # check if compiler supports gcc style automatic dependencies - # run on every extension so skip for known good compilers - if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or - 'g++' in self.compiler[0] or - 'clang' in self.compiler[0]): - self._auto_depends = True - elif os.name == 'posix': - import tempfile - import shutil - tmpdir = tempfile.mkdtemp() - try: - fn = os.path.join(tmpdir, "file.c") - with open(fn, "w") as f: - f.write("int a;\n") - self.compile([fn], output_dir=tmpdir, - extra_preargs=['-MMD', '-MF', fn + '.d']) - self._auto_depends = True - except CompileError: - self._auto_depends = False - finally: - shutil.rmtree(tmpdir) - - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a `CCompiler` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of `distutils.version.LooseVersion`. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - try: - output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - output = exc.output - status = exc.returncode - except OSError: - # match the historical returns for a parent - # exception class caught by exec_command() - status = 127 - output = b'' - else: - # output isn't actually a filepath but we do this - # for now to match previous distutils behavior - output = filepath_from_subprocess_output(output) - status = 0 - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a `CCompiler` instance. - - """ - if self.compiler_type in ('msvc', 'intelw', 'intelemw'): - return self - - cxx = copy(self) - cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] - if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', - "Intel C Compiler for 32-bit applications on Windows") -compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', - "Intel C Compiler for 64-bit applications on Windows") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc'), - ('nt', 'intelw'), - ('nt', 'intelemw')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=None, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if verbose is None: - verbose = log.get_threshold() <= log.INFO - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError: - msg = str(get_exception()) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError: - msg = str(get_exception()) - raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ - module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + - "in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - compiler.verbose = verbose - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - # the version of this function provided by CPython allows the following - # to return lists, which are unpacked automatically: - # - compiler.runtime_library_dir_option - # our version extends the behavior to: - # - compiler.library_dir_option - # - compiler.library_option - # - compiler.find_library_file - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.' + _cc + 'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/__init__.py deleted file mode 100644 index 76a2600..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -from __future__ import division, absolute_import, print_function - -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/autodist.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/autodist.py deleted file mode 100644 index 9c98b84..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,122 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -from __future__ import division, absolute_import, print_function - -import textwrap - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - #ifndef __cplusplus - static %(inline)s int static_func (void) - { - return 0; - } - %(inline)s int nostatic_func (void) - { - return 0; - } - #endif""") - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - - -def check_restrict(cmd): - """Return the restrict identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - static int static_func (char * %(restrict)s a) - { - return 0; - } - """) - - for kw in ['restrict', '__restrict__', '__restrict']: - st = cmd.try_compile(body % {'restrict': kw}, None, None) - if st: - return kw - - return '' - - -def check_compiler_gcc4(cmd): - """Return True if the C compiler is GCC 4.x.""" - cmd._check_compiler() - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) || (__GNUC__ < 4) - #error gcc >= 4 required - #endif - return 0; - } - """) - return cmd.try_compile(body, None, None) - - -def check_gcc_function_attribute(cmd, attribute, name): - """Return True if the given function attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s %s(void*); - - int - main() - { - return 0; - } - """) % (attribute, name) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, - include): - """Return True if the given function attribute is supported with - intrinsics.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #include<%s> - int %s %s(void) - { - %s; - return 0; - } - - int - main() - { - return 0; - } - """) % (include, attribute, name, code) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_variable_attribute(cmd, attribute): - """Return True if the given variable attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s foo; - - int - main() - { - return 0; - } - """) % (attribute, ) - return cmd.try_compile(body, None, None) != 0 diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/bdist_rpm.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 3e52a50..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build.py deleted file mode 100644 index 5a9da12..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - self.warn_error = False - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_clib.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_clib.py deleted file mode 100644 index 13edf07..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,333 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -from __future__ import division, absolute_import, print_function - -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import filter_sources, has_f_sources,\ - has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ - get_numpy_include_dirs - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] -# - - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ] - - boolean_options = old_build_clib.boolean_options + ['inplace', 'warn-error'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - self.parallel = None - self.warn_error = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError: - raise ValueError("--parallel/-j argument must be an integer") - old_build_clib.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ) - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: - languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c') == 'f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: - source_languages.append('c') - if cxx_sources: - source_languages.append('c++') - if requiref90: - source_languages.append('f90') - elif f_sources: - source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - if not (self.force or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script ' - 'for fortran compiler: %s' - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources" - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get( - 'extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get( - 'extra_f90_compile_args') or [] - - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - extra_postargs = build_info.get('extra_compiler_args') or [] - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: - self.mkpath(module_build_dir) - - if compiler.compiler_type == 'msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - objects = [] - if c_sources: - log.info("compiling C sources") - objects = compiler.compile(c_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile(cxx_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - if f_objects and not fcompiler.can_ccompiler_link(compiler): - # Default linker cannot link Fortran object files, and results - # need to be wrapped later. Instead of creating a real static - # library, just keep track of the object files. - listfn = os.path.join(self.build_clib, - lib_name + '.fobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) - - listfn = os.path.join(self.build_clib, - lib_name + '.cobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in objects)) - - # create empty "library" file for dependency tracking - lib_fname = os.path.join(self.build_clib, - lib_name + compiler.static_lib_extension) - with open(lib_fname, 'wb') as f: - pass - else: - # assume that default linker is suitable for - # linking Fortran object files - objects.extend(f_objects) - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo.get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_ext.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_ext.py deleted file mode 100644 index cd9b1c6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,611 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -from __future__ import division, absolute_import, print_function - -import os -import subprocess -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.system_info import combine_paths, system_info -from numpy.distutils.misc_util import filter_sources, has_f_sources, \ - has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence, get_build_architecture, \ - msvc_version -from numpy.distutils.command.config_compiler import show_fortran_compilers - - - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = old_build_ext.boolean_options + ['warn-error'] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - self.parallel = None - self.warn_error = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError: - raise ValueError("--parallel/-j argument must be an integer") - - # Ensure that self.include_dirs and self.distribution.include_dirs - # refer to the same list object. finalize_options will modify - # self.include_dirs, but self.distribution.include_dirs is used - # during the actual build. - # self.include_dirs is None unless paths are specified with - # --include-dirs. - # The include paths will be passed to the compiler in the order: - # numpy paths, --include-dirs paths, Python include path. - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - incl_dirs = self.include_dirs or [] - if self.distribution.include_dirs is None: - self.distribution.include_dirs = [] - self.include_dirs = self.distribution.include_dirs - self.include_dirs.extend(incl_dirs) - - old_build_ext.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ) - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj( - 'build_clib') - else: - build_clib = self.distribution.get_command_obj( - 'build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - self.compiler.show_customization() - - # Setup directory for storing generated extra DLL files on Windows - self.extra_dll_dir = os.path.join(self.build_temp, '.libs') - if not os.path.isdir(self.extra_dll_dir): - os.makedirs(self.extra_dll_dir) - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,' - ' overwriting build_info\n%s... \nwith\n%s...' - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - # reset language attribute for choosing proper linker - if 'c++' in ext_languages: - ext_language = 'c++' - elif 'f90' in ext_languages: - ext_language = 'f90' - elif 'f77' in ext_languages: - ext_language = 'f77' - else: - ext_language = 'c' # default - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - ext.language = ext_language - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler=self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Copy over any extra DLL files - # FIXME: In the case where there are more than two packages, - # we blindly assume that both packages need all of the libraries, - # resulting in a larger wheel than is required. This should be fixed, - # but it's so rare that I won't bother to handle it. - pkg_roots = { - self.get_ext_fullname(ext.name).split('.')[0] - for ext in self.extensions - } - for pkg_root in pkg_roots: - shared_lib_dir = os.path.join(pkg_root, '.libs') - if not self.inplace: - shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) - for fn in os.listdir(self.extra_dll_dir): - if not os.path.isdir(shared_lib_dir): - os.makedirs(shared_lib_dir) - if not fn.lower().endswith('.dll'): - continue - runtime_lib = os.path.join(self.extra_dll_dir, fn) - copy_file(runtime_lib, shared_lib_dir) - - def swig_sources(self, sources, extensions=None): - # Do nothing. Swig sources have been handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - if not (self.force or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - if self.compiler.compiler_type == 'msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language == 'f90': - fcompiler = self._f90_compiler - elif ext.language == 'f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( - ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( - ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language == 'c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends': ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - c_objects = [] - if c_sources: - log.info("compiling C sources") - c_objects = self.compiler.compile(c_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile(cxx_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if f_objects and not fcompiler.can_ccompiler_link(self.compiler): - unlinkable_fobjects = f_objects - objects = c_objects - else: - unlinkable_fobjects = [] - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran( - fcompiler, libraries, library_dirs) - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language == 'c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if fcompiler is not None: - objects, libraries = self._process_unlinkable_fobjects( - objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects) - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=ext.language) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib( - objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _process_unlinkable_fobjects(self, objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects): - libraries = list(libraries) - objects = list(objects) - unlinkable_fobjects = list(unlinkable_fobjects) - - # Expand possible fake static libraries to objects - for lib in list(libraries): - for libdir in library_dirs: - fake_lib = os.path.join(libdir, lib + '.fobjects') - if os.path.isfile(fake_lib): - # Replace fake static library - libraries.remove(lib) - with open(fake_lib, 'r') as f: - unlinkable_fobjects.extend(f.read().splitlines()) - - # Expand C objects - c_lib = os.path.join(libdir, lib + '.cobjects') - with open(c_lib, 'r') as f: - objects.extend(f.read().splitlines()) - - # Wrap unlinkable objects to a linkable one - if unlinkable_fobjects: - fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects] - wrapped = fcompiler.wrap_unlinkable_objects( - fobjects, output_dir=self.build_temp, - extra_dll_dir=self.extra_dll_dir) - objects.extend(wrapped) - - return objects, libraries - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: - return - - for libname in c_libraries: - if libname.startswith('msvc'): - continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: - continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: - continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - try: - dir = subprocess.check_output(['cygpath', '-w', dir]) - except (OSError, subprocess.CalledProcessError): - pass - else: - dir = filepath_from_subprocess_output(dir) - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs(self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_py.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_py.py deleted file mode 100644 index 54dcde4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_scripts.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_scripts.py deleted file mode 100644 index c8b25fc..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,51 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from __future__ import division, absolute_import, print_function - -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_src.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_src.py deleted file mode 100644 index 3e0522c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,775 +0,0 @@ -""" Build swig and f2py sources. -""" -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import ( - fortran_ext_match, appendpath, is_string, is_sequence, get_cmd - ) -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurrence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - with open(source, 'r') as fs: - with open(target, 'w') as ft: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ('verbose-cfg', None, - "change logging level from WARN to INFO which will show all " + - "compiler output") - ] - - boolean_options = ['force', 'inplace', 'verbose-cfg'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - self.verbose_cfg = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - self.mkpath(build_dir) - - if self.verbose_cfg: - new_level = log.INFO - else: - new_level = log.WARN - old_level = log.set_threshold(new_level) - - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - log.set_threshold(old_level) - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - with open(target_file, 'w') as fid: - fid.write(outstr) - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - """Pyrex not supported; this remains for Cython support (see below)""" - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - """Pyrex is not supported, but some projects monkeypatch this method. - - That allows compiling Cython code, see gh-6955. - This method will remain here for compatibility reasons. - """ - return [] - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - import numpy.f2py - numpy.f2py.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - import numpy.f2py - numpy.f2py.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - build_dir = os.path.join(self.build_src, target_dir) - target_c = os.path.join(build_dir, 'fortranobject.c') - target_h = os.path.join(build_dir, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if build_dir not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." % (build_dir)) - extension.include_dirs.append(build_dir) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search -_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search - -def get_swig_target(source): - with open(source, 'r') as f: - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - return result - -def get_swig_modulename(source): - with open(source, 'r') as f: - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' - r'__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - with open(source) as f: - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -########################################## diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/config.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/config.py deleted file mode 100644 index b9f2fa7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/config.py +++ /dev/null @@ -1,513 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -from __future__ import division, absolute_import, print_function - -import os, signal -import warnings -import sys -import subprocess -import textwrap - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import (check_gcc_function_attribute, - check_gcc_function_attribute_with_intrinsics, - check_gcc_variable_attribute, - check_inline, - check_restrict, - check_compiler_gcc4) -from numpy.distutils.compat import get_exception - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and (self.compiler.compiler_type in - ('msvc', 'intelw', 'intelemw')): - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an IOError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print an helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except IOError: - e = get_exception() - msg = textwrap.dedent("""\ - Could not initialize compiler instance: do you have Visual Studio - installed? If you are trying to build with MinGW, please use "python setup.py - build -c mingw32" instead. If you have Visual Studio installed, check it is - correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, - VS 2010 for >= 3.3). - - Original exception was: %s, and the Compiler class was %s - ============================================================================""") \ - % (e, self.compiler.__class__.__name__) - print(textwrap.dedent("""\ - ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError): - str(get_exception()) - self.compiler = save_compiler - raise CompileError - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - src, obj = self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - # _compile in unixcompiler.py sometimes creates .d dependency files. - # Clean them up. - self.temp_files.append(obj + '.d') - return src, obj - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - try: - d = subprocess.check_output(['cygpath', - '-w', d]) - except (OSError, subprocess.CalledProcessError): - pass - else: - d = filepath_from_subprocess_output(d) - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #ifndef %s - (void) %s; - #endif - ; - return 0; - }""") % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #if %s - #else - #error false or undefined macro - #endif - ; - return 0; - }""") % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - int main(void) { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; - } - """) % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; - } - """) - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - library_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionary, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_restrict(self): - """Return the restrict keyword recognized by the compiler, empty string - otherwise.""" - return check_restrict(self) - - def check_compiler_gcc4(self): - """Return True if the C compiler is gcc >= 4.""" - return check_compiler_gcc4(self) - - def check_gcc_function_attribute(self, attribute, name): - return check_gcc_function_attribute(self, attribute, name) - - def check_gcc_function_attribute_with_intrinsics(self, attribute, name, - code, include): - return check_gcc_function_attribute_with_intrinsics(self, attribute, - name, code, include) - - def check_gcc_variable_attribute(self, attribute): - return check_gcc_variable_attribute(self, attribute) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - # 2008-11-16, RemoveMe - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" - "Usage of get_output is deprecated: please do not \n" - "use it anymore, and avoid configuration checks \n" - "involving running executable on the target machine.\n" - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning, stacklevel=2) - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except Exception: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - try: - # specify cwd arg for consistency with - # historic usage pattern of exec_command() - # also, note that exe appears to be a string, - # which exec_command() handled, but we now - # use a list for check_output() -- this assumes - # that exe is always a single command - output = subprocess.check_output([exe], cwd='.') - except subprocess.CalledProcessError as exc: - exitstatus = exc.returncode - output = '' - except OSError: - # preserve the EnvironmentError exit status - # used historically in exec_command() - exitstatus = 127 - output = '' - else: - output = filepath_from_subprocess_output(output) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout(object): - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/config_compiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/config_compiler.py deleted file mode 100644 index bf17006..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,128 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=None): - # Using cache to prevent infinite recursion. - if _cache: - return - elif _cache is None: - _cache = [] - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/develop.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/develop.py deleted file mode 100644 index 1410ab2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/develop.py +++ /dev/null @@ -1,17 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from __future__ import division, absolute_import, print_function - -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/egg_info.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/egg_info.py deleted file mode 100644 index 18673ec..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - if 'sdist' in sys.argv: - import warnings - import textwrap - msg = textwrap.dedent(""" - `build_src` is being run, this may lead to missing - files in your sdist! You want to use distutils.sdist - instead of the setuptools version: - - from distutils.command.sdist import sdist - cmdclass={'sdist': sdist}" - - See numpy's setup.py or gh-7131 for details.""") - warnings.warn(msg, UserWarning, stacklevel=2) - - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install.py deleted file mode 100644 index c74ae94..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - with open(self.record, 'r') as f: - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_clib.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install_clib.py deleted file mode 100644 index 6a73f7e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - if not build_clib_cmd.build_clib: - # can happen if the user specified `--skip-build` - build_clib_cmd.finalize_options() - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_data.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install_data.py deleted file mode 100644 index 996cf7e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_headers.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install_headers.py deleted file mode 100644 index f3f58aa..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy.core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/sdist.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/sdist.py deleted file mode 100644 index bfaab1c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/compat.py b/venv/lib/python3.7/site-packages/numpy/distutils/compat.py deleted file mode 100644 index 9a81cd3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/compat.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Small modules to cope with python 2 vs 3 incompatibilities inside -numpy.distutils - -""" -from __future__ import division, absolute_import, print_function - -import sys - -def get_exception(): - return sys.exc_info()[1] diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/conv_template.py b/venv/lib/python3.7/site-packages/numpy/distutils/conv_template.py deleted file mode 100644 index 3bcb7b8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/conv_template.py +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/env python -""" -takes templated file .xxx.src and produces .xxx file where .xxx is -.i or .c or .h, using the following template rules - -/**begin repeat -- on a line by itself marks the start of a repeated code - segment -/**end repeat**/ -- on a line by itself marks it's end - -After the /**begin repeat and before the */, all the named templates are placed -these should all have the same number of replacements - -Repeat blocks can be nested, with each nested block labeled with its depth, -i.e. -/**begin repeat1 - *.... - */ -/**end repeat1**/ - -When using nested loops, you can optionally exclude particular -combinations of the variables using (inside the comment portion of the inner loop): - - :exclude: var1=value1, var2=value2, ... - -This will exclude the pattern where var1 is value1 and var2 is value2 when -the result is being generated. - - -In the main body each replace will use one entry from the list of named replacements - - Note that all #..# forms in a block must have the same number of - comma-separated entries. - -Example: - - An input file containing - - /**begin repeat - * #a = 1,2,3# - * #b = 1,2,3# - */ - - /**begin repeat1 - * #c = ted, jim# - */ - @a@, @b@, @c@ - /**end repeat1**/ - - /**end repeat**/ - - produces - - line 1 "template.c.src" - - /* - ********************************************************************* - ** This file was autogenerated from a template DO NOT EDIT!!** - ** Changes should be made to the original source (.src) file ** - ********************************************************************* - */ - - #line 9 - 1, 1, ted - - #line 9 - 1, 1, jim - - #line 9 - 2, 2, ted - - #line 9 - 2, 2, jim - - #line 9 - 3, 3, ted - - #line 9 - 3, 3, jim - -""" -from __future__ import division, absolute_import, print_function - - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -from numpy.distutils.compat import get_exception - -# names for replacement that are already global. -global_names = {} - -# header placed at the front of head processed file -header =\ -""" -/* - ***************************************************************************** - ** This file was autogenerated from a template DO NOT EDIT!!!! ** - ** Changes should be made to the original source (.src) file ** - ***************************************************************************** - */ - -""" -# Parse string for repeat loops -def parse_structure(astr, level): - """ - The returned line number is from the beginning of the string, starting - at zero. Returns an empty list if no loops found. - - """ - if level == 0 : - loopbeg = "/**begin repeat" - loopend = "/**end repeat**/" - else : - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level - - ind = 0 - line = 0 - spanlist = [] - while True: - start = astr.find(loopbeg, ind) - if start == -1: - break - start2 = astr.find("*/", start) - start2 = astr.find("\n", start2) - fini1 = astr.find(loopend, start2) - fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) - ind = fini2 - spanlist.sort() - return spanlist - - -def paren_repl(obj): - torep = obj.group(1) - numrep = obj.group(2) - return ','.join([torep]*int(numrep)) - -parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)") -plainrep = re.compile(r"([^*]+)\*(\d+)") -def parse_values(astr): - # replaces all occurrences of '(a,b,c)*4' in astr - # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate - # empty values, i.e., ()*4 yields ',,,'. The result is - # split at ',' and a list of values returned. - astr = parenrep.sub(paren_repl, astr) - # replaces occurrences of xxx*3 with xxx, xxx, xxx - astr = ','.join([plainrep.sub(paren_repl, x.strip()) - for x in astr.split(',')]) - return astr.split(',') - - -stripast = re.compile(r"\n\s*\*?") -named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") -exclude_vars_re = re.compile(r"(\w*)=(\w*)") -exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : - """Find all named replacements in the header - - Returns a list of dictionaries, one for each loop iteration, - where each key is a name to be substituted and the corresponding - value is the replacement string. - - Also return a list of exclusions. The exclusions are dictionaries - of key value pairs. There can be more than one exclusion. - [{'var1':'value1', 'var2', 'value2'[,...]}, ...] - - """ - # Strip out '\n' and leading '*', if any, in continuation lines. - # This should not effect code previous to this change as - # continuation lines were not allowed. - loophead = stripast.sub("", loophead) - # parse out the names and lists of values - names = [] - reps = named_re.findall(loophead) - nsub = None - for rep in reps: - name = rep[0] - vals = parse_values(rep[1]) - size = len(vals) - if nsub is None : - nsub = size - elif nsub != size : - msg = "Mismatch in number of values, %d != %d\n%s = %s" - raise ValueError(msg % (nsub, size, name, vals)) - names.append((name, vals)) - - - # Find any exclude variables - excludes = [] - - for obj in exclude_re.finditer(loophead): - span = obj.span() - # find next newline - endline = loophead.find('\n', span[1]) - substr = loophead[span[1]:endline] - ex_names = exclude_vars_re.findall(substr) - excludes.append(dict(ex_names)) - - # generate list of dictionaries, one for each template iteration - dlist = [] - if nsub is None : - raise ValueError("No substitution variables found") - for i in range(nsub): - tmp = {name: vals[i] for name, vals in names} - dlist.append(tmp) - return dlist - -replace_re = re.compile(r"@([\w]+)@") -def parse_string(astr, env, level, line) : - lineno = "#line %d\n" % line - - # local function for string replacement, uses env - def replace(match): - name = match.group(1) - try : - val = env[name] - except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) - raise ValueError(msg) - return val - - code = [lineno] - struct = parse_structure(astr, level) - if struct : - # recurse over inner loops - oldend = 0 - newlevel = level + 1 - for sub in struct: - pref = astr[oldend:sub[0]] - head = astr[sub[0]:sub[1]] - text = astr[sub[1]:sub[2]] - oldend = sub[3] - newline = line + sub[4] - code.append(replace_re.sub(replace, pref)) - try : - envlist = parse_loop_header(head) - except ValueError: - e = get_exception() - msg = "line %d: %s" % (newline, e) - raise ValueError(msg) - for newenv in envlist : - newenv.update(env) - newcode = parse_string(text, newenv, newlevel, newline) - code.extend(newcode) - suff = astr[oldend:] - code.append(replace_re.sub(replace, suff)) - else : - # replace keys - code.append(replace_re.sub(replace, astr)) - code.append('\n') - return ''.join(code) - -def process_str(astr): - code = [header] - code.extend(parse_string(astr, global_names, 0, 1)) - return ''.join(code) - - -include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" - r"(?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - print('Including file', fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - sourcefile = os.path.normcase(source).replace("\\", "\\\\") - try: - code = process_str(''.join(lines)) - except ValueError: - e = get_exception() - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) - return '#line 1 "%s"\n%s' % (sourcefile, code) - - -def unique_key(adict): - # this obtains a unique key given a dictionary - # currently it works by appending together n of the letters of the - # current keys and increasing n until a unique key is found - # -- not particularly quick - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = "".join([x[:n] for x in allkeys]) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - try: - writestr = process_str(allstr) - except ValueError: - e = get_exception() - raise ValueError("In %s loop at %s" % (file, e)) - - outfile.write(writestr) - -if __name__ == "__main__": - main() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/core.py b/venv/lib/python3.7/site-packages/numpy/distutils/core.py deleted file mode 100644 index 70cc37c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/core.py +++ /dev/null @@ -1,217 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from distutils.core import * - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import get_data_files, is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=None): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - elif _cache is None: - _cache = [] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,), - stacklevel=2) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,), - stacklevel=2) - break - libraries.append((lib_name, build_info)) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/cpuinfo.py b/venv/lib/python3.7/site-packages/numpy/distutils/cpuinfo.py deleted file mode 100644 index bc97283..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/env python -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['cpu'] - -import sys, re, types -import os - -if sys.version_info[0] >= 3: - from subprocess import getstatusoutput -else: - from commands import getstatusoutput - -import warnings -import platform - -from numpy.distutils.compat import get_exception - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, "" - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase(object): - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except Exception: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile(r'(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning, stacklevel=2) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return (self.is_Intel() - and (self.info[0]['cpu family'] == '6' - or self.info[0]['cpu family'] == '15') - and (self.has_sse3() and not self.has_ssse3()) - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) - - def _is_Core2(self): - return (self.is_64bit() and self.is_Intel() and - re.match(r'.*?Core\(TM\)2\b', - self.info[0]['model name']) is not None) - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except Exception: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - if sys.version_info[0] >= 3: - import winreg - else: - import _winreg as winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" - r"\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except Exception: - print(sys.exc_info()[1], '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [7, 8, 9, 10, 11]) - or self.info[0]['Family']==15) - elif self.is_AMD(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [6, 7, 8, 10]) - or self.info[0]['Family']==15) - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print('CPU information:'), -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print('%s=%s' %(name[1:],r)) -# else: -# print(name[1:]), -# print() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/exec_command.py b/venv/lib/python3.7/site-packages/numpy/distutils/exec_command.py deleted file mode 100644 index 712f226..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/exec_command.py +++ /dev/null @@ -1,330 +0,0 @@ -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Successfully tested on: - -======== ============ ================================================= -os.name sys.platform comments -======== ============ ================================================= -posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 -posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 -posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 -posix darwin Darwin 7.2.0, Python 2.3 -nt win32 Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 -nt win32 Windows 98, Python 2.1.1. Idle 0.8 -nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. -posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) -nt win32 Windows XP, Python 2.3.3 -======== ============ ================================================= - -Known bugs: - -* Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import subprocess -import locale -import warnings - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def filepath_from_subprocess_output(output): - """ - Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. - - Inherited from `exec_command`, and possibly incorrect. - """ - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - output = output.decode(mylocale, errors='replace') - output = output.replace('\r\n', '\n') - # Another historical oddity - if output[-1:] == '\n': - output = output[:-1] - # stdio uses bytes in python 2, so to avoid issues, we simply - # remove all non-ascii characters - if sys.version_info < (3, 0): - output = output.encode('ascii', errors='replace') - return output - - -def forward_bytes_to_stdout(val): - """ - Forward bytes from a subprocess call to the console, without attempting to - decode them. - - The assumption is that the subprocess call already returned bytes in - a suitable encoding. - """ - if sys.version_info.major < 3: - # python 2 has binary output anyway - sys.stdout.write(val) - elif hasattr(sys.stdout, 'buffer'): - # use the underlying binary output if there is one - sys.stdout.buffer.write(val) - elif hasattr(sys.stdout, 'encoding'): - # round-trip the encoding if necessary - sys.stdout.write(val.decode(sys.stdout.encoding)) - else: - # make a best-guess at the encoding - sys.stdout.write(val.decode('utf8', errors='replace')) - - -def temp_file_name(): - # 2019-01-30, 1.17 - warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' - 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {name: os.environ.get(name) for name in names} - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def exec_command(command, execute_in='', use_shell=None, use_tee=None, - _with_python = 1, **env ): - """ - Return (status,output) of executed command. - - .. deprecated:: 1.17 - Use subprocess.Popen instead - - Parameters - ---------- - command : str - A concatenated string of executable and arguments. - execute_in : str - Before running command ``cd execute_in`` and after ``cd -``. - use_shell : {bool, None}, optional - If True, execute ``sh -c command``. Default None (True) - use_tee : {bool, None}, optional - If True use tee. Default None (True) - - - Returns - ------- - res : str - Both stdout and stderr messages. - - Notes - ----- - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - - """ - # 2019-01-30, 1.17 - warnings.warn('exec_command is deprecated since NumPy v1.17, use ' - 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) - log.debug('exec_command(%r,%s)' % (command, - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - st = _exec_command(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - - -def _exec_command(command, use_shell=None, use_tee = None, **env): - """ - Internal workhorse for exec_command(). - """ - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - - if os.name == 'posix' and use_shell: - # On POSIX, subprocess always uses /bin/sh, override - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - command = [sh, '-c', ' '.join(command)] - else: - command = [sh, '-c', command] - use_shell = False - - elif os.name == 'nt' and is_sequence(command): - # On Windows, join the string for CreateProcess() ourselves as - # subprocess does it a bit differently - command = ' '.join(_quote_arg(arg) for arg in command) - - # Inherit environment by default - env = env or None - try: - # universal_newlines is set to False so that communicate() - # will return bytes. We need to decode the output ourselves - # so that Python will not raise a UnicodeDecodeError when - # it encounters an invalid character; rather, we simply replace it - proc = subprocess.Popen(command, shell=use_shell, env=env, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=False) - except EnvironmentError: - # Return 127, as os.spawn*() and /bin/sh do - return 127, '' - - text, err = proc.communicate() - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - text = text.decode(mylocale, errors='replace') - text = text.replace('\r\n', '\n') - # Another historical oddity - if text[-1:] == '\n': - text = text[:-1] - - # stdio uses bytes in python 2, so to avoid issues, we simply - # remove all non-ascii characters - if sys.version_info < (3, 0): - text = text.encode('ascii', errors='replace') - - if use_tee and text: - print(text) - return proc.returncode, text - - -def _quote_arg(arg): - """ - Quote the argument for safe use in a shell command line. - """ - # If there is a quote in the string, assume relevants parts of the - # string are already quoted (e.g. '-I"C:\\Program Files\\..."') - if '"' not in arg and ' ' in arg: - return '"%s"' % arg - return arg - -############################################################ diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/extension.py b/venv/lib/python3.7/site-packages/numpy/distutils/extension.py deleted file mode 100644 index 872bd53..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/extension.py +++ /dev/null @@ -1,109 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import re -from distutils.extension import Extension as old_Extension - -if sys.version_info[0] >= 3: - basestring = str - - -cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - - -class Extension(old_Extension): - """ - Parameters - ---------- - name : str - Extension name. - sources : list of str - List of source file locations relative to the top directory of - the package. - extra_compile_args : list of str - Extra command line arguments to pass to the compiler. - extra_f77_compile_args : list of str - Extra command line arguments to pass to the fortran77 compiler. - extra_f90_compile_args : list of str - Extra command line arguments to pass to the fortran90 compiler. - """ - def __init__( - self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None,): - - old_Extension.__init__( - self, name, [], - include_dirs=include_dirs, - define_macros=define_macros, - undef_macros=undef_macros, - library_dirs=library_dirs, - libraries=libraries, - runtime_library_dirs=runtime_library_dirs, - extra_objects=extra_objects, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - export_symbols=export_symbols) - - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, basestring): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning, stacklevel=2) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False - - def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False - -# class Extension diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 3723470..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,1032 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -import types - -from numpy.compat import open_latin1 - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.exec_command import find_executable -from numpy.distutils.compat import get_exception -from numpy.distutils import _shell_utils - -from .environment import EnvironmentConfig - -__metaclass__ = type - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration description is - # (, , , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropriate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool, False), - noarch = (None, None, 'noarch', str2bool, False), - debug = (None, None, 'debug', str2bool, False), - verbose = (None, None, 'verbose', str2bool, False), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), - version_cmd = ('exe.version_cmd', None, None, None, False), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), - archiver = (None, 'AR', 'ar', None, False), - ranlib = (None, 'RANLIB', 'ranlib', None, False), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), - fix = ('flags.fix', None, None, flaglist, False), - opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), - opt_f77 = ('flags.opt_f77', None, None, flaglist, False), - opt_f90 = ('flags.opt_f90', None, None, flaglist, False), - arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), - arch_f77 = ('flags.arch_f77', None, None, flaglist, False), - arch_f90 = ('flags.arch_f90', None, None, flaglist, False), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), - debug_f77 = ('flags.debug_f77', None, None, flaglist, False), - debug_f90 = ('flags.debug_f90', None, None, flaglist, False), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropriate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overridden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(self): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77 = _shell_utils.NativeParser.split(f77) - f77flags = self.flag_vars.f77 - if f90: - f90 = _shell_utils.NativeParser.split(f90) - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - # NOTE: this and similar examples are probably just - # excluding --coverage flag when F90 = gfortran --coverage - # instead of putting that flag somewhere more appropriate - # this and similar examples where a Fortran compiler - # environment variable has been customized by CI or a user - # should perhaps eventually be more thoroughly tested and more - # robustly handled - if fix: - fix = _shell_utils.NativeParser.split(fix) - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=f77+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=fix+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if is_f_file(src) and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError: - msg = str(get_exception()) - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - def can_ccompiler_link(self, ccompiler): - """ - Check if the given C compiler can link objects produced by - this compiler. - """ - return True - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - - Parameters - ---------- - objects : list - List of object files to include. - output_dir : str - Output directory to place generated object files. - extra_dll_dir : str - Output directory to place extra DLL files that need to be - included on Windows. - - Returns - ------- - converted_objects : list of str - List of converted object files. - Note that the number of output files is not necessarily - the same as inputs. - - """ - raise NotImplementedError() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem', 'flang')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', - 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')), - ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - log.info("get_default_fcompiler: matching types: '%s'", - matching_compiler_types) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound): - e = get_exception() - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open_latin1(file, 'r') - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - -def has_f90_header(src): - f = open_latin1(src, 'r') - line = f.readline() - f.close() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - f = open_latin1(src, 'r') - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - f.close() - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/absoft.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index d14fee0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,158 +0,0 @@ - -# http://www.absoft.com/literature/osxuserguide.pdf -# http://www.absoft.com/documentation.html - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:%s' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/compaq.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 671b3a5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,126 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -from __future__ import division, absolute_import, print_function - -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.compat import get_exception -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' - r' Version (?P[^\s]*).*') - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from numpy.distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError: - msg = get_exception() - if '_MSVCCompiler__root' in str(msg): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) - else: - raise - except IOError: - e = get_exception() - if not "vcvarsall.bat" in str(e): - print("Unexpected IOError in", __file__) - raise e - except ValueError: - e = get_exception() - if not "'path'" in str(e): - print("Unexpected ValueError in", __file__) - raise e - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/environment.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/environment.py deleted file mode 100644 index bb362d4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/environment.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import warnings -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig(object): - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert, append = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError(name) - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert, append = conf_desc - if convert is None: - convert = lambda x: x - var = self._hook_handler(name, hook) - if envvar is not None: - envvar_contents = os.environ.get(envvar) - if envvar_contents is not None: - envvar_contents = convert(envvar_contents) - if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': - var.extend(envvar_contents) - else: - # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 - # to keep old (overwrite flags rather than append to - # them) behavior - var = envvar_contents - else: - var = envvar_contents - if confvar is not None and self._conf: - if confvar in self._conf: - source, confvar_contents = self._conf[confvar] - var = convert(confvar_contents) - return var - - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/g95.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index e7c659b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,44 +0,0 @@ -# http://g95.sourceforge.net/ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('g95').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/gnu.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 965c670..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,564 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re -import os -import sys -import warnings -import platform -import tempfile -import hashlib -import base64 -import subprocess -from subprocess import Popen, PIPE, STDOUT -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.compat import get_exception -from numpy.distutils.system_info import system_info - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation - - -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - - -if is_win64(): - #_EXTRAFLAGS = ["-fno-leading-underscore"] - _EXTRAFLAGS = [] -else: - _EXTRAFLAGS = [] - - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77', ) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - # Strip warning(s) that may be emitted by gfortran - while version_string.startswith('gfortran: warning'): - version_string = version_string[version_string.find('\n') + 1:] - - # Gfortran versions from after 2010 will output a simple string - # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older - # gfortrans may still return long version strings (``-dumpversion`` was - # an alias for ``--version``) - if len(version_string) <= 20: - # Try to find a valid version string - m = re.search(r'([0-9.]+)', version_string) - if m: - # g77 provides a longer version string that starts with GNU - # Fortran - if version_string.startswith('GNU Fortran'): - return ('g77', m.group(1)) - - # gfortran only outputs a version string such as #.#.#, so check - # if the match is at the start of the string - elif m.start() == 0: - return ('gfortran', m.group(1)) - else: - # Output probably from --version, try harder: - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search( - r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith('0') or v.startswith('2') or v.startswith('3'): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - # If still nothing, raise an error to make the problem easy to find. - err = 'A valid Fortran version was not found in this string:\n' - raise ValueError(err + version_string) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "-dumpversion"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - suggested_f90_compiler = 'gnu95' - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform == 'darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let disutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from the Python Makefile and then we - # fall back to setting it to 10.3 to maximize the set of - # versions we can work with. This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import distutils.sysconfig as sc - g = {} - try: - get_makefile_filename = sc.get_makefile_filename - except AttributeError: - pass # i.e. PyPy - else: - filename = get_makefile_filename() - sc.parse_makefile(filename, g) - target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') - os.environ['MACOSX_DEPLOYMENT_TARGET'] = target - if target == '10.3': - s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' - warnings.warn(s, stacklevel=2) - - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - try: - output = subprocess.check_output(self.compiler_f77 + - ['-print-libgcc-file-name']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - return os.path.dirname(output) - return None - - def get_libgfortran_dir(self): - if sys.platform[:5] == 'linux': - libgfortran_name = 'libgfortran.so' - elif sys.platform == 'darwin': - libgfortran_name = 'libgfortran.dylib' - else: - libgfortran_name = None - - libgfortran_dir = None - if libgfortran_name: - find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] - try: - output = subprocess.check_output( - self.compiler_f77 + find_lib_arg) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - libgfortran_dir = os.path.dirname(output) - return libgfortran_dir - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - path = os.path.join(d, "lib%s.a" % self.g2c) - if not os.path.exists(path): - root = os.path.join(d, *((os.pardir, ) * 4)) - d2 = os.path.abspath(os.path.join(root, 'lib')) - path = os.path.join(d2, "lib%s.a" % self.g2c) - if os.path.exists(path): - opt.append(d2) - opt.append(d) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - opt.append('gcc') - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v <= '3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - from distutils import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - if sys.platform[:3] == 'aix' or sys.platform == 'win32': - # Linux/Solaris/Unix support RPATH, Windows and AIX do not - raise NotImplementedError - - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - sep = ',' if sys.platform == 'darwin' else '=' - return '-Wl,-rpath%s%s' % (sep, dir) - - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran', ) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if v >= '4.': - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not - # Cygwin-Python - if sys.platform == 'win32': - for key in [ - 'version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe' - ]: - self.executables[key].append('-mno-cygwin') - return v - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "-dumpversion"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - if sys.platform[:3] == 'aix': - executables['linker_so'].append('-lpthread') - if platform.architecture()[0][:2] == '64': - for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: - executables[key].append('-maix64') - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir, ) * 4)) - path = os.path.join(root, "lib") - mingwdir = os.path.normpath(path) - if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): - opt.append(mingwdir) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i + 1, "mingwex") - opt.insert(i + 1, "mingw32") - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass - return opt - - def get_target(self): - try: - output = subprocess.check_output(self.compiler_f77 + ['-v']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def _hash_files(self, filenames): - h = hashlib.sha1() - for fn in filenames: - with open(fn, 'rb') as f: - while True: - block = f.read(131072) - if not block: - break - h.update(block) - text = base64.b32encode(h.digest()) - if sys.version_info[0] >= 3: - text = text.decode('ascii') - return text.rstrip('=') - - def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, - chained_dlls, is_archive): - """Create a wrapper shared library for the given objects - - Return an MSVC-compatible lib - """ - - c_compiler = self.c_compiler - if c_compiler.compiler_type != "msvc": - raise ValueError("This method only supports MSVC") - - object_hash = self._hash_files(list(objects) + list(chained_dlls)) - - if is_win64(): - tag = 'win_amd64' - else: - tag = 'win32' - - basename = 'lib' + os.path.splitext( - os.path.basename(objects[0]))[0][:8] - root_name = basename + '.' + object_hash + '.gfortran-' + tag - dll_name = root_name + '.dll' - def_name = root_name + '.def' - lib_name = root_name + '.lib' - dll_path = os.path.join(extra_dll_dir, dll_name) - def_path = os.path.join(output_dir, def_name) - lib_path = os.path.join(output_dir, lib_name) - - if os.path.isfile(lib_path): - # Nothing to do - return lib_path, dll_path - - if is_archive: - objects = (["-Wl,--whole-archive"] + list(objects) + - ["-Wl,--no-whole-archive"]) - self.link_shared_object( - objects, - dll_name, - output_dir=extra_dll_dir, - extra_postargs=list(chained_dlls) + [ - '-Wl,--allow-multiple-definition', - '-Wl,--output-def,' + def_path, - '-Wl,--export-all-symbols', - '-Wl,--enable-auto-import', - '-static', - '-mlong-double-64', - ]) - - # No PowerPC! - if is_win64(): - specifier = '/MACHINE:X64' - else: - specifier = '/MACHINE:X86' - - # MSVC specific code - lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] - if not c_compiler.initialized: - c_compiler.initialize() - c_compiler.spawn([c_compiler.lib] + lib_args) - - return lib_path, dll_path - - def can_ccompiler_link(self, compiler): - # MSVC cannot link objects compiled by GNU fortran - return compiler.compiler_type not in ("msvc", ) - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - """ - if self.c_compiler.compiler_type == "msvc": - # Compile a DLL and return the lib for the DLL as - # the object. Also keep track of previous DLLs that - # we have compiled so that we can link against them. - - # If there are .a archives, assume they are self-contained - # static libraries, and build separate DLLs for each - archives = [] - plain_objects = [] - for obj in objects: - if obj.lower().endswith('.a'): - archives.append(obj) - else: - plain_objects.append(obj) - - chained_libs = [] - chained_dlls = [] - for archive in archives[::-1]: - lib, dll = self._link_wrapper_lib( - [archive], - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=True) - chained_libs.insert(0, lib) - chained_dlls.insert(0, dll) - - if not plain_objects: - return chained_libs - - lib, dll = self._link_wrapper_lib( - plain_objects, - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=False) - return [lib] + chained_libs - else: - raise ValueError("Unsupported C compiler") - - -def _can_target(cmd, arch): - """Return true if the architecture supports the -arch flag""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - os.close(fid) - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - return False - - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - - print(customized_fcompiler('gnu').get_version()) - try: - print(customized_fcompiler('g95').get_version()) - except Exception: - print(get_exception()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/hpux.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 51bad54..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/ibm.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 70d2132..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,99 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import subprocess - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - try: - o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) - except (OSError, subprocess.CalledProcessError): - pass - else: - m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - with open(xlf_cfg, 'r') as fi: - crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/intel.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 51f6812..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,222 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - - def runtime_library_dir_option(self, dir): - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - return '-Wl,-rpath=%s' % dir - - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ['-FR'] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model strict -O1 -{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model strict -O1 -{}'.format(mpopt)] - - def get_flags_arch(self): - return [''] - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None], - 'compiler_fix' : [None], - 'compiler_f90' : [None], - 'linker_so' : [None], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' # No space after /Fo! - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '/module:' # No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore'] - return opt - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA32", "/QaxSSE3"] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - - def get_flags_arch(self): - return [''] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='intel').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/lahey.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index 1beb662..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/mips.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index da337b2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='mips').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/nag.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index cb71d54..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler', 'NAGFORCompiler'] - -class BaseNAGFCompiler(FCompiler): - version_pattern = r'NAG.* Release (?P[^(\s]*)' - - def version_match(self, version_string): - m = re.search(self.version_pattern, version_string) - if m: - return m.group('version') - else: - return None - - def get_flags_linker_so(self): - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - return [''] - -class NAGFCompiler(BaseNAGFCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return BaseNAGFCompiler.get_flags_arch(self) - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -class NAGFORCompiler(BaseNAGFCompiler): - - compiler_type = 'nagfor' - description = 'NAG Fortran Compiler' - - executables = { - 'version_cmd' : ["nagfor", "-V"], - 'compiler_f77' : ["nagfor", "-fixed"], - 'compiler_fix' : ["nagfor", "-fixed"], - 'compiler_f90' : ["nagfor"], - 'linker_so' : ["nagfor"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_debug(self): - version = self.get_version() - if version and version > '6.1': - return ['-g', '-u', '-nan', '-C=all', '-thread_safe', - '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] - else: - return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - compiler = customized_fcompiler(compiler='nagfor') - print(compiler.get_version()) - print(compiler.get_flags_debug()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/none.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/none.py deleted file mode 100644 index bdeea15..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils import customized_fcompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - print(customized_fcompiler(compiler='none').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pathf95.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 5de86f6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pg.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 9c51947..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,142 +0,0 @@ -# http://www.pgroup.com -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] - - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran", "-dynamiclib"], - 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90': ["pgfortran", "-dynamiclib"], - 'linker_so': ["libtool"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran"], - 'compiler_fix': ["pgfortran", "-Mfixed"], - 'compiler_f90': ["pgfortran"], - 'linker_so': ["pgfortran"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - - else: - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - - -if sys.version_info >= (3, 5): - import functools - - class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['flang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["flang"], - 'compiler_fix': ["flang"], - 'compiler_f90': ["flang"], - 'linker_so': [None], - 'archiver': [ar_exe, "/verbose", "/OUT:"], - 'ranlib': None - } - - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - -else: - from numpy.distutils.fcompiler import CompilerNotFound - - # No point in supporting on older Pythons because not ABI compatible - class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - - def get_version(self): - raise CompilerNotFound('Flang unsupported on Python < 3.5') - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - if 'flang' in sys.argv: - print(customized_fcompiler(compiler='flang').get_version()) - else: - print(customized_fcompiler(compiler='pg').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/sun.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index 561ea85..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='sun').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/vast.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index adc1591..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = (r'\s*Pacific-Sierra Research vf90 ' - r'(Personal|Professional)\s+(?P[^\s]*)') - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='vast').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/from_template.py b/venv/lib/python3.7/site-packages/numpy/distutils/from_template.py deleted file mode 100644 index c5c1163..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/from_template.py +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env python -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separated words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace(r'\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -def find_and_remove_repl_patterns(astr): - names = find_repl_patterns(astr) - astr = re.subn(named_re, '', astr)[0] - return astr, names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace(r'\>', '@rightarrow@') - substr = substr.replace(r'\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) - writestr += cleanedstr - names.update(defs) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - print('Including file', fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) - - -if __name__ == "__main__": - main() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/intelccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/intelccompiler.py deleted file mode 100644 index 3386775..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.ccompiler import simple_version_match -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler - - -class IntelCCompiler(UnixCCompiler): - """A modified Intel compiler compatible with a GCC-built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - - # On Itanium, the Intel Compiler used to be called ecc, let's search for - # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable, ['icc', 'ecc']): - if cc_exe: - break - - -class IntelEM64TCCompiler(UnixCCompiler): - """ - A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64' - cc_args = '-fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -if platform.system() == 'Windows': - class IntelCCompilerW(MSVCCompiler): - """ - A modified Intel compiler compatible with an MSVC-built Python. - """ - compiler_type = 'intelw' - compiler_cxx = 'icl' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?32,') - self.__version = version_match - - def initialize(self, plat_name=None): - MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe('icl.exe') - self.lib = self.find_exe('xilib') - self.linker = self.find_exe('xilink') - self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Qstd=c99', '/Z7', '/D_DEBUG'] - - class IntelEM64TCCompilerW(IntelCCompilerW): - """ - A modified Intel x86_64 compiler compatible with - a 64bit MSVC-built Python. - """ - compiler_type = 'intelemw' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - self.__version = version_match diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/lib2def.py b/venv/lib/python3.7/site-packages/numpy/distutils/lib2def.py deleted file mode 100644 index 34b1ece..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/lib2def.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re -import sys -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = ['nm', '-Cs'] - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" - p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) - nm_output, nm_err = p.communicate() - if p.returncode != 0: - raise RuntimeError('failed to run "%s": "%s"' % ( - ' '.join(nm_cmd), nm_err)) - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = DEFAULT_NM + [str(libfile)] - nm_output = getnm(nm_cmd, shell=False) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/line_endings.py b/venv/lib/python3.7/site-packages/numpy/distutils/line_endings.py deleted file mode 100644 index fe8fd1b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/line_endings.py +++ /dev/null @@ -1,76 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -from __future__ import division, absolute_import, print_function - -import sys, re, os - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/log.py b/venv/lib/python3.7/site-packages/numpy/distutils/log.py deleted file mode 100644 index ff7de86..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/log.py +++ /dev/null @@ -1,95 +0,0 @@ -# Colored log, requires Python 2.3 or up. -from __future__ import division, absolute_import, print_function - -import sys -from distutils.log import * -from distutils.log import Log as old_Log -from distutils.log import _global_log - -if sys.version_info[0] < 3: - from .misc_util import (red_text, default_text, cyan_text, green_text, - is_sequence, is_string) -else: - from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - -def get_threshold(): - return _global_log.threshold - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/venv/lib/python3.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 485a675..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/mingw32ccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index a56cc8f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,660 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import subprocess -import re -import textwrap - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.version import StrictVersion -from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version -from distutils.errors import (DistutilsExecError, CompileError, - UnknownFileError) -from numpy.distutils.misc_util import (msvc_runtime_library, - msvc_runtime_version, - msvc_runtime_major, - get_build_architecture) - -def get_msvcr_replacement(): - """Replacement for outdated version of get_msvcr from cygwinccompiler""" - msvcr = msvc_runtime_library() - return [] if msvcr is None else [msvcr] - -# monkey-patch cygwinccompiler with our updated version from misc_util -# to avoid getting an exception raised on Python 3.5 -distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, - dry_run, force) - - # we need to support 3.2 which doesn't match the standard - # get_versions methods regex - if self.gcc_version is None: - try: - out_string = subprocess.check_output(['gcc', '-dumpversion']) - except (OSError, CalledProcessError): - out_string = "" # ignore failures to match old behavior - result = re.search(r'(\d+\.\d+)', out_string) - if result: - self.gcc_version = StrictVersion(result.group(1)) - - # A real mingw32 doesn't need to specify a different entry point, - # but cygwin 2.91.57 in no-cygwin-mode needs it. - if self.gcc_version <= "2.91.57": - entry_point = '--entry _DllMain@12' - else: - entry_point = '' - - if self.linker_dll == 'dllwrap': - # Commented out '--driver-name g++' part that fixes weird - # g++.exe: g++: No such file or directory - # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). - # If the --driver-name part is required for some environment - # then make the inclusion of this part specific to that - # environment. - self.linker = 'dllwrap' # --driver-name g++' - elif self.linker_dll == 'gcc': - self.linker = 'g++' - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = msvc_runtime_version() - if msvcr_version: - self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) - - # MS_WIN64 should be defined when building for amd64 on windows, - # but python headers define it only for MS compilers, which has all - # kind of bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - if self.gcc_version < "4.0": - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0' - ' -Wall -Wstrict-prototypes', - linker_exe='gcc -g -mno-cygwin', - linker_so='gcc -g -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - if self.gcc_version <= "3.0.0": - self.set_executables( - compiler='gcc -mno-cygwin -O2 -w', - compiler_so='gcc -mno-cygwin -mdll -O2 -w' - ' -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='%s -mno-cygwin -mdll -static %s' % - (self.linker, entry_point)) - elif self.gcc_version < "4.0": - self.set_executables( - compiler='gcc -mno-cygwin -O2 -Wall', - compiler_so='gcc -mno-cygwin -O2 -Wall' - ' -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='g++ -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables(compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished dlls - # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support - # thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropriate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - if self.gcc_version < "3.0.0": - func = distutils.cygwinccompiler.CygwinCCompiler.link - else: - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - # We can't do much here: - # - find it in the virtualenv (sys.prefix) - # - find it in python main dir (sys.base_prefix, if in a virtualenv) - # - sys.real_prefix is main dir for virtualenvs in Python 2.7 - # - in system32, - # - ortherwise (Sxs), I don't know how to get it. - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - sub_dirs = ['', 'lib', 'bin'] - # generate possible combinations of directory trees and sub-directories - lib_dirs = [] - for stem in stems: - for folder in sub_dirs: - lib_dirs.append(os.path.join(stem, folder)) - - # add system directory as well - if 'SYSTEMROOT' in os.environ: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) - - # search in the file system for possible candidates - major_version, minor_version = tuple(sys.version_info[:2]) - patterns = ['python%d%d.dll'] - - for pat in patterns: - dllname = pat % (major_version, minor_version) - print("Looking for %s" % dllname) - for folder in lib_dirs: - dll = os.path.join(folder, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.check_output(["objdump.exe", "-p", dll]) - return st.split(b'\n') - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - with open(dfile, 'w') as d: - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), - 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - # If the version number is None, then we couldn't find the MSVC runtime at - # all, because we are running on a Python distribution which is customed - # compiled; trust that the compiler is the same as the one available to us - # now, and that it is capable of linking with the correct runtime without - # any extra options. - msvcr_ver = msvc_runtime_major() - if msvcr_ver is None: - log.debug('Skip building import library: ' - 'Runtime is not compiled with MSVC') - return False - - # Skip using a custom library for versions < MSVC 8.0 - if msvcr_ver < 80: - log.debug('Skip building msvcr library:' - ' custom functionality not present') - return False - - msvcr_name = msvc_runtime_library() - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % - (out_file,)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % - msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _check_for_import_lib(): - """Check if an import library for the Python runtime already exists.""" - major_version, minor_version = tuple(sys.version_info[:2]) - - # patterns for the file name of the library itself - patterns = ['libpython%d%d.a', - 'libpython%d%d.dll.a', - 'libpython%d.%d.dll.a'] - - # directory trees that may contain the library - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - # possible subdirectories within those trees where it is placed - sub_dirs = ['libs', 'lib'] - - # generate a list of candidate locations - candidates = [] - for pat in patterns: - filename = pat % (major_version, minor_version) - for stem_dir in stems: - for folder in sub_dirs: - candidates.append(os.path.join(stem_dir, folder, filename)) - - # test the filesystem to see if we can find any of these - for fullname in candidates: - if os.path.isfile(fullname): - # already exists, in location given - return (True, fullname) - - # needs to be built, preferred location given first - return (False, candidates[0]) - -def _build_import_library_amd64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=AMD64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - if not os.path.isfile(lib_file): - # didn't find library file in virtualenv, try base distribution, too, - # and use that instead if found there. for Python 2.7 venvs, the base - # directory is in attribute real_prefix instead of base_prefix. - if hasattr(sys, 'base_prefix'): - base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) - elif hasattr(sys, 'real_prefix'): - base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) - else: - base_lib = '' # os.path.isfile('') == False - - if os.path.isfile(base_lib): - lib_file = base_lib - else: - log.warn('Cannot build import library: "%s" not found', lib_file) - return - log.info('Building import library (ARCH=x86): "%s"', out_file) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_output = lib2def.getnm( - lib2def.DEFAULT_NM + [lib_file], shell=False) - dlist, flist = lib2def.parse_nm(nm_output) - with open(def_file, 'w') as fid: - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) - - dll_name = find_python_dll () - - cmd = ["dlltool", - "--dllname", dll_name, - "--def", def_file, - "--output-lib", out_file] - status = subprocess.check_output(cmd) - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 - # on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - # Python 3.7 uses 1415, but get_build_version returns 140 ?? - _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0" - if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): - major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) - _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION - del major, minor, rest - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what - # to do in that case: manifest building will fail, but it should not be - # used in that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignment constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = textwrap.dedent("""\ - - - - - - - - - - - - - - """) - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - maj = msvc_runtime_major() - if maj: - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configtest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma = int(msver) - mi = int((msver - ma) * 10) - # Write the manifest file - manxml = msvc_manifest_xml(ma, mi) - man = open(manifest_name(config), "w") - config.temp_files.append(manifest_name(config)) - man.write(manxml) - man.close() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/misc_util.py b/venv/lib/python3.7/site-packages/numpy/distutils/misc_util.py deleted file mode 100644 index bb1699e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2373 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil -import multiprocessing -import textwrap - -import distutils -from distutils.errors import DistutilsError -try: - from threading import local as tlocal -except ImportError: - from dummy_threading import local as tlocal - -# stores temporary directory of each thread to only create one per thread -_tdata = tlocal() - -# store all created temporary directories so they can be deleted on exit -_tmpdirs = [] -def clean_up_temporary_directory(): - if _tmpdirs is not None: - for d in _tmpdirs: - try: - shutil.rmtree(d) - except OSError: - pass - -atexit.register(clean_up_temporary_directory) - -from numpy.distutils.compat import get_exception -from numpy.compat import basestring -from numpy.compat import npy_load_module - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs'] - -class InstallableLib(object): - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - - -def get_num_build_jobs(): - """ - Get number of parallel build jobs set by the --parallel command line - argument of setup.py - If the command did not receive a setting the environment variable - NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of - processors on the system, with a maximum of 8 (to prevent - overloading the system if there a lot of CPUs). - - Returns - ------- - out : int - number of parallel jobs that can be run - - """ - from numpy.distutils.core import get_distribution - try: - cpu_count = len(os.sched_getaffinity(0)) - except AttributeError: - cpu_count = multiprocessing.cpu_count() - cpu_count = min(cpu_count, 8) - envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) - dist = get_distribution() - # may be None during configuration - if dist is None: - return envjobs - - # any of these three may have the job set, take the largest - cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), - getattr(dist.get_command_obj('build_ext'), 'parallel', None), - getattr(dist.get_command_obj('build_clib'), 'parallel', None)) - if all(x is None for x in cmdattr): - return envjobs - else: - return max(x for x in cmdattr if x is not None) - -def quote_args(args): - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - splitted = name.split('/') - return os.path.join(*splitted) - -def rel_path(path, parent_path): - """Return path relative to parent_path.""" - # Use realpath to avoid issues with symlinked dirs (see gh-7707) - pd = os.path.realpath(os.path.abspath(parent_path)) - apath = os.path.realpath(os.path.abspath(path)) - if len(apath) < len(pd): - return path - if apath == pd: - return '' - if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) - path = apath[len(pd)+1:] - return path - -def get_path_from_frame(frame, parent_path=None): - """Return path of the module given a frame object from the call stack. - - Returned path is relative to parent_path when given, - otherwise it is absolute path. - """ - - # First, try to find if the file name is in the frame. - try: - caller_file = eval('__file__', frame.f_globals, frame.f_locals) - d = os.path.dirname(os.path.abspath(caller_file)) - except NameError: - # __file__ is not defined, so let's try __name__. We try this second - # because setuptools spoofs __name__ to be '__main__' even though - # sys.modules['__main__'] might be something else, like easy_install(1). - caller_name = eval('__name__', frame.f_globals, frame.f_locals) - __import__(caller_name) - mod = sys.modules[caller_name] - if hasattr(mod, '__file__'): - d = os.path.dirname(os.path.abspath(mod.__file__)) - else: - # we're probably running setup.py as execfile("setup.py") - # (likely we're building an egg) - d = os.path.abspath('.') - # hmm, should we use sys.argv[0] like in __builtin__ case? - - if parent_path is not None: - d = rel_path(d, parent_path) - - return d or '.' - -def njoin(*path): - """Join two or more pathname components + - - convert a /-separated pathname to one using the OS's path separator. - - resolve `..` and `.` from path. - - Either passing n arguments as in njoin('a','b'), or a sequence - of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. - """ - paths = [] - for p in path: - if is_sequence(p): - # njoin(['a', 'b'], 'c') - paths.append(njoin(*p)) - else: - assert is_string(p) - paths.append(p) - path = paths - if not path: - # njoin() - joined = '' - else: - # njoin('a', 'b') - joined = os.path.join(*path) - if os.path.sep != '/': - joined = joined.replace('/', os.path.sep) - return minrelpath(joined) - -def get_mathlibs(path=None): - """Return the MATHLIB line from numpyconfig.h - """ - if path is not None: - config_file = os.path.join(path, '_numpyconfig.h') - else: - # Look for the file in each of the numpy include directories. - dirs = get_numpy_include_dirs() - for path in dirs: - fn = os.path.join(path, '_numpyconfig.h') - if os.path.exists(fn): - config_file = fn - break - else: - raise DistutilsError('_numpyconfig.h not found in numpy include ' - 'dirs %r' % (dirs,)) - - with open(config_file) as fid: - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - return mathlibs - -def minrelpath(path): - """Resolve `..` and '.' from path. - """ - if not is_string(path): - return path - if '.' not in path: - return path - l = path.split(os.sep) - while l: - try: - i = l.index('.', 1) - except ValueError: - break - del l[i] - j = 1 - while l: - try: - i = l.index('..', j) - except ValueError: - break - if l[i-1]=='..': - j += 1 - else: - del l[i], l[i-1] - j = 1 - if not l: - return '' - return os.sep.join(l) - -def sorted_glob(fileglob): - """sorts output of python glob for https://bugs.python.org/issue30461 - to allow extensions to have reproducible build results""" - return sorted(glob.glob(fileglob)) - -def _fix_paths(paths, local_path, include_non_existing): - assert is_sequence(paths), repr(type(paths)) - new_paths = [] - assert not is_string(paths), repr(paths) - for n in paths: - if is_string(n): - if '*' in n or '?' in n: - p = sorted_glob(n) - p2 = sorted_glob(njoin(local_path, n)) - if p2: - new_paths.extend(p2) - elif p: - new_paths.extend(p) - else: - if include_non_existing: - new_paths.append(n) - print('could not resolve pattern in %r: %r' % - (local_path, n)) - else: - n2 = njoin(local_path, n) - if os.path.exists(n2): - new_paths.append(n2) - else: - if os.path.exists(n): - new_paths.append(n) - elif include_non_existing: - new_paths.append(n) - if not os.path.exists(n): - print('non-existing path in %r: %r' % - (local_path, n)) - - elif is_sequence(n): - new_paths.extend(_fix_paths(n, local_path, include_non_existing)) - else: - new_paths.append(n) - return [minrelpath(p) for p in new_paths] - -def gpaths(paths, local_path='', include_non_existing=True): - """Apply glob to paths and prepend local_path if needed. - """ - if is_string(paths): - paths = (paths,) - return _fix_paths(paths, local_path, include_non_existing) - -def make_temp_file(suffix='', prefix='', text=True): - if not hasattr(_tdata, 'tempdir'): - _tdata.tempdir = tempfile.mkdtemp() - _tmpdirs.append(_tdata.tempdir) - fid, name = tempfile.mkstemp(suffix=suffix, - prefix=prefix, - dir=_tdata.tempdir, - text=text) - fo = os.fdopen(fid, 'w') - return fo, name - -# Hooks for colored terminal output. -# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle -def terminal_has_colors(): - if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: - # Avoid importing curses that causes illegal operation - # with a message: - # PYTHON2 caused an invalid page fault in - # module CYGNURSES7.DLL as 015f:18bbfc28 - # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] - # ssh to Win32 machine from debian - # curses.version is 2.2 - # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) - return 0 - if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): - try: - import curses - curses.setupterm() - if (curses.tigetnum("colors") >= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(fg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path): - if sys.platform=='cygwin' and path.startswith('/cygdrive'): - path = path[10] + ':' + os.path.normcase(path[11:]) - return path - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_version(): - "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) - else: - msc_ver = None - return msc_ver - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - ver = msvc_runtime_major () - if ver: - if ver < 140: - return "msvcr%i" % ver - else: - return "vcruntime%i" % ver - else: - return None - -def msvc_runtime_major(): - "Return major version of MSVC runtime coded like get_build_msvc_version" - major = {1300: 70, # MSVC 7.0 - 1310: 71, # MSVC 7.1 - 1400: 80, # MSVC 8 - 1500: 90, # MSVC 9 (aka 2008) - 1600: 100, # MSVC 10 (aka 2010) - 1900: 140, # MSVC 14 (aka 2015) - }.get(msvc_runtime_version(), None) - return major - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - with open(source, 'r') as f: - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - return modules - -def is_string(s): - return isinstance(s, basestring) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except Exception: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' in s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def _commandline_dep_string(cc_args, extra_postargs, pp_opts): - """ - Return commandline representation used to determine if a file needs - to be recompiled - """ - cmdline = 'commandline: ' - cmdline += ' '.join(cc_args) - cmdline += ' '.join(extra_postargs) - cmdline += ' '.join(pp_opts) + '\n' - return cmdline - - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. For Python 3.2 this is implemented on - Linux, but not on OS X. - - """ - confvars = distutils.sysconfig.get_config_vars() - # SO is deprecated in 3.3.1, use EXT_SUFFIX instead - so_ext = confvars.get('EXT_SUFFIX', None) - if so_ext is None: - so_ext = confvars.get('SO', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration(object): - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s\n' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = npy_load_module('_'.join(n.split('.')), - setup_py, - ('.py', 'U', 1)) - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - def fix_args_py2(args): - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - return args - def fix_args_py3(args): - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - return args - if sys.version_info[0] < 3: - args = fix_args_py2(args) - else: - args = fix_args_py3(args) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths:: - - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. ``*``.txt -> parent/a.txt, parent/b.txt - #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt - #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with `distutils` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - When cross-compiling with numpy distutils, it might be necessary to - use modified npy-pkg-config files. Using the default/generated files - will link with the host libraries (i.e. libnpymath.a). For - cross-compilation you of-course need to link with target libraries, - while using the host Python installation. - - You can copy out the numpy/core/lib/npy-pkg-config directory, add a - pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment - variable to point to the directory with the modified npy-pkg-config - files. - - Example npymath.ini modified for cross-compilation:: - - [meta] - Name=npymath - Description=Portable, core math library implementing C99 standard - Version=0.1 - - [variables] - pkgname=numpy.core - pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core - prefix=${pkgdir} - libdir=${prefix}/lib - includedir=${prefix}/include - - [default] - Libs=-L${libdir} -lnpymath - Cflags=-I${includedir} - Requires=mlib - - [msvc] - Libs=/LIBPATH:${libdir} npymath.lib - Cflags=/INCLUDE:${includedir} - Requires=mlib - - """ - if subst_dict is None: - subst_dict = {} - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - try: - output = subprocess.check_output(['svnversion'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - with open(entries) as f: - fstr = f.read() - if fstr[:5] == '\d+)"', fstr) - if m: - return int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - return int(m.group('revision')) - return None - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - try: - output = subprocess.check_output( - ['hg', 'identify', '--num'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - with open(branch_fn) as f: - revision0 = f.read().strip() - - branch_map = {} - for line in file(branch_cache_fn, 'r'): - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - return branch_map.get(branch0) - - return None - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = ('.py', 'U', 1) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = npy_load_module('_'.join(n.split('.')), - fn, info) - except ImportError: - msg = get_exception() - self.warn(str(msg)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in an Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory. - - If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that - is returned. Otherwise, a path inside the location of the numpy module is - returned. - - The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining - customized npy-pkg-config .ini files for the cross-compilation - environment, and using them when cross-compiling. - - """ - # XXX: import here for bootstrapping reasons - import numpy - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d is not None: - return d - d = os.path.join(os.path.dirname(numpy.__file__), - 'core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - if sys.version_info[0] >= 3: - import builtins - else: - import __builtin__ as builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - ), stacklevel=2) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - with open(target, 'w') as f: - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - - # For gfortran+msvc combination, extra shared libraries may exist - f.write(textwrap.dedent(""" - import os - import sys - - extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - - if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - if sys.version_info >= (3, 8): - os.add_dll_directory(extra_dll_dir) - else: - os.environ.setdefault('PATH', '') - os.environ['PATH'] += os.pathsep + extra_dll_dir - - """)) - - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(textwrap.dedent(r''' - def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - - def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - ''')) - - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -def get_build_architecture(): - # Importing distutils.msvccompiler triggers a warning on non-Windows - # systems, so delay the import to here. - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/msvc9compiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/msvc9compiler.py deleted file mode 100644 index e9cc334..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/msvc9compiler.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if not old: - return new - if new in old: - return old - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self, plat_name=None): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib') - environ_include = os.getenv('include') - _MSVCCompiler.initialize(self, plat_name) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - ld_args.append('/MANIFEST') - _MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/msvccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/msvccompiler.py deleted file mode 100644 index 0cb4bf9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/msvccompiler.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if new in old: - return old - if not old: - return new - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib', '') - environ_include = os.getenv('include', '') - _MSVCCompiler.initialize(self) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/npy_pkg_config.py b/venv/lib/python3.7/site-packages/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 48584b4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,443 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re -import os - -if sys.version_info[0] < 3: - from ConfigParser import RawConfigParser -else: - from configparser import RawConfigParser - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(IOError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(IOError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - - flags = (' ' + line).split(' -') - for flag in flags: - flag = '-' + flag - if len(flag) > 0: - if flag.startswith('-I'): - d['include_dirs'].append(flag[2:].strip()) - elif flag.startswith('-L'): - d['library_dirs'].append(flag[2:].strip()) - elif flag.startswith('-l'): - d['libraries'].append(flag[2:].strip()) - elif flag.startswith('-D'): - d['macros'].append(flag[2:].strip()) - else: - d['ignored'].append(flag) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo(object): - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name, 'Description: %s' % self.description] - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet(object): - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = dict(config.items('meta')) - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - config = RawConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print(npymath_info) - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - import sys - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) - else: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search(r'([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/numpy_distribution.py b/venv/lib/python3.7/site-packages/numpy/distutils/numpy_distribution.py deleted file mode 100644 index 6ae19d1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,19 +0,0 @@ -# XXX: Handle setuptools ? -from __future__ import division, absolute_import, print_function - -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/pathccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/pathccompiler.py deleted file mode 100644 index fc9872d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with an gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/setup.py b/venv/lib/python3.7/site-packages/numpy/distutils/setup.py deleted file mode 100644 index 82a53bd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/setup.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('distutils', parent_package, top_path) - config.add_subpackage('command') - config.add_subpackage('fcompiler') - config.add_data_dir('tests') - config.add_data_files('site.cfg') - config.add_data_files('mingw/gfortran_vs2003_hack.c') - config.make_config_py() - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/system_info.py b/venv/lib/python3.7/site-packages/numpy/distutils/system_info.py deleted file mode 100644 index fc7018a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/system_info.py +++ /dev/null @@ -1,2975 +0,0 @@ -#!/usr/bin/env python -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Currently, the following -classes are available: - - atlas_info - atlas_threads_info - atlas_blas_info - atlas_blas_threads_info - lapack_atlas_info - lapack_atlas_threads_info - atlas_3_10_info - atlas_3_10_threads_info - atlas_3_10_blas_info, - atlas_3_10_blas_threads_info, - lapack_atlas_3_10_info - lapack_atlas_3_10_threads_info - flame_info - blas_info - lapack_info - openblas_info - openblas64__info - openblas_ilp64_info - blis_info - blas_opt_info # usage recommended - lapack_opt_info # usage recommended - blas_ilp64_opt_info # usage recommended (general ILP64 BLAS) - lapack_ilp64_opt_info # usage recommended (general ILP64 LAPACK) - blas_ilp64_plain_opt_info # usage recommended (general ILP64 BLAS, no symbol suffix) - lapack_ilp64_plain_opt_info # usage recommended (general ILP64 LAPACK, no symbol suffix) - blas64__opt_info # usage recommended (general ILP64 BLAS, 64_ symbol suffix) - lapack64__opt_info # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) - fftw_info,dfftw_info,sfftw_info - fftw_threads_info,dfftw_threads_info,sfftw_threads_info - djbfft_info - x11_info - lapack_src_info - blas_src_info - numpy_info - numarray_info - numpy_info - boost_python_info - agg2_info - wx_info - gdk_pixbuf_xlib_2_info - gdk_pixbuf_2_info - gdk_x11_2_info - gtkp_x11_2_info - gtkp_2_info - xft_info - freetype2_info - umfpack_info - -Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL has options that are the default for each section. The -available sections are fftw, atlas, and x11. Appropriate defaults are -used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. ALL section in site.cfg -Only the first complete match is returned. - -Example: ----------- -[ALL] -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -libraries = rfftw, fftw - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -libraries = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Note that the ``libraries`` key is the default setting for libraries. - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import re -import copy -import warnings -import subprocess -import textwrap - -from glob import glob -from functools import reduce -if sys.version_info[0] < 3: - from ConfigParser import NoOptionError - from ConfigParser import RawConfigParser as ConfigParser -else: - from configparser import NoOptionError - from configparser import RawConfigParser as ConfigParser -# It seems that some people are importing ConfigParser from here so is -# good to keep its class name. Use of RawConfigParser is needed in -# order to be able to load path names with percent in them, like -# `feature%2Fcool` which is common on git flow branch names. - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import distutils.sysconfig -from numpy.distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import ( - find_executable, filepath_from_subprocess_output, - get_pythonexe) -from numpy.distutils.misc_util import (is_sequence, is_string, - get_shared_lib_extension) -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils.compat import get_exception -from numpy.distutils import customized_ccompiler as _customized_ccompiler -from numpy.distutils import _shell_utils -import distutils.ccompiler -import tempfile -import shutil - - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -global_compiler = None - -def customized_ccompiler(): - global global_compiler - if not global_compiler: - global_compiler = _customized_ccompiler() - return global_compiler - - -def _c_string_literal(s): - """ - Convert a python string into a literal suitable for inclusion into C code - """ - # only these three characters are forbidden in C strings - s = s.replace('\\', r'\\') - s = s.replace('"', r'\"') - s = s.replace('\n', r'\n') - return '"{}"'.format(s) - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(distutils.sysconfig.EXEC_PREFIX, - 'libs')] - default_runtime_dirs = [] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] - _include_dirs = [ - 'include', - 'include/suitesparse', - ] - _lib_dirs = [ - 'lib', - ] - - _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] - _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] - def add_system_root(library_root): - """Add a package manager root to the include directories""" - global default_lib_dirs - global default_include_dirs - - library_root = os.path.normpath(library_root) - - default_lib_dirs.extend( - os.path.join(library_root, d) for d in _lib_dirs) - default_include_dirs.extend( - os.path.join(library_root, d) for d in _include_dirs) - - if sys.version_info >= (3, 3): - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - # We also don't re-implement shutil.which for Python 2.7 because - # vcpkg doesn't support MSVC 2008. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture() == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) - -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_runtime_dirs = [] - default_include_dirs = ['/usr/local/include', - '/opt/include', '/usr/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include', - '/usr/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - with open(os.devnull, 'w') as tmp: - try: - p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - else: - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead - 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto - 'atlas_3_10_blas': atlas_3_10_blas_info, - 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, - 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead - 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto - 'flame': flame_info, # use lapack_opt instead - 'mkl': mkl_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'openblas_clapack': openblas_clapack_info, # use blas_opt instead - 'blis': blis_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'accelerate': accelerate_info, # use blas_opt instead - 'openblas64_': openblas64__info, - 'openblas64__lapack': openblas64__lapack_info, - 'openblas_ilp64': openblas_ilp64_info, - 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'lapack_ilp64_opt': lapack_ilp64_opt_info, - 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, - 'lapack64__opt': lapack64__opt_info, - 'blas_opt': blas_opt_info, - 'blas_ilp64_opt': blas_ilp64_opt_info, - 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, - 'blas64__opt': blas64__opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AliasedOptionError(DistutilsError): - """ - Aliases entries in config files should not be existing. - In section '{section}' we found multiple appearances of options {options}.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class FlameNotFoundError(NotFoundError): - """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [flame]).""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class LapackILP64NotFoundError(NotFoundError): - """ - 64-bit Lapack libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasOptNotFoundError(NotFoundError): - """ - Optimized (vendor) Blas libraries are not found. - Falls back to netlib Blas library which has worse performance. - A better performance should be easily gained by switching - Blas library.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasILP64NotFoundError(NotFoundError): - """ - 64-bit Blas libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (https://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info(object): - - """ get_info() is the only public method. Don't use others. - """ - section = 'ALL' - dir_env_var = None - search_static_first = 0 # XXX: disabled by default, may disappear in - # future unless it is proved to be useful. - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), - 'include_dirs': os.pathsep.join(default_include_dirs), - 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), - 'rpath': '', - 'src_dirs': os.pathsep.join(default_src_dirs), - 'search_static_first': str(self.search_static_first), - 'extra_compile_args': '', 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - # The extensions use runtime_library_dirs - r_dirs = self.get_runtime_lib_dirs() - # Intrinsic distutils use rpath, we simply append both entries - # as though they were one entry - r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - - if r_dirs: - i = self.check_libs(r_dirs, [lib]) - if i is not None: - # Swap library keywords found to runtime_library_dirs - # the libraries are insisting on the user having defined - # them using the library_dirs, and not necessarily by - # runtime_library_dirs - del i['libraries'] - i['runtime_library_dirs'] = i.pop('library_dirs') - dict_append(info, **i) - else: - log.info('Runtime library %s was not found. Ignoring' % (lib)) - - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - # Update extra information - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - self.saved_results[self.__class__.__name__] = info - - def get_option_single(self, *options): - """ Ensure that only one of `options` are found in the section - - Parameters - ---------- - *options : list of str - a list of options to be found in the section (``self.section``) - - Returns - ------- - str : - the option that is uniquely found in the section - - Raises - ------ - AliasedOptionError : - in case more than one of the options are found - """ - found = map(lambda opt: self.cp.has_option(self.section, opt), options) - found = list(found) - if sum(found) == 1: - return options[found.index(True)] - elif sum(found) == 0: - # nothing is found anyways - return options[0] - - # Else we have more than 1 key found - if AliasedOptionError.__doc__ is None: - raise AliasedOptionError() - raise AliasedOptionError(AliasedOptionError.__doc__.format( - section=self.section, options='[{}]'.format(', '.join(options)))) - - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def calc_extra_info(self): - """ Updates the information in the current information with - respect to these flags: - extra_compile_args - extra_link_args - """ - info = {} - for key in ['extra_compile_args', 'extra_link_args']: - # Get values - opt = self.cp.get(self.section, key) - opt = _shell_utils.NativeParser.split(opt) - if opt: - tmp = {key: opt} - dict_append(info, **tmp) - return info - - def get_info(self, notfound_action=0): - """ Return a dictonary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__, stacklevel=2) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if log.get_threshold() <= log.INFO and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if len(d) > 0 and not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_runtime_lib_dirs(self, key='runtime_library_dirs'): - path = self.get_paths(self.section, key) - if path == ['']: - path = [] - return path - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - if hasattr(self, '_lib_names'): - return self.get_libs(key, default=self._lib_names) - else: - return self.get_libs(key, '') - - def library_extensions(self): - c = customized_ccompiler() - static_exts = [] - if c.compiler_type != 'msvc': - # MSVC doesn't understand binutils - static_exts.append('.a') - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC and others - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - - return info - - def _find_lib(self, lib_dir, lib, exts): - assert is_string(lib_dir) - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + lib + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - lib += '.dll' - if ext == '.lib': - lib = prefix + lib - return lib - - return False - - def _find_libs(self, lib_dirs, libs, exts): - # make sure we preserve the order of libs, as it can be important - found_dirs, found_libs = [], [] - for lib in libs: - for lib_dir in lib_dirs: - found_lib = self._find_lib(lib_dir, lib, exts) - if found_lib: - found_libs.append(found_lib) - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - break - return found_dirs, found_libs - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - if not is_sequence(lib_dirs): - lib_dirs = [lib_dirs] - # First, try to find the mandatory libraries - found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) - if len(found_libs) > 0 and len(found_libs) == len(libs): - # Now, check for optional libraries - opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) - found_libs.extend(opt_found_libs) - for lib_dir in opt_found_dirs: - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - info = {'libraries': found_libs, 'library_dirs': found_dirs} - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - - opt = self.get_option_single(self.section + '_libs', 'libraries') - libs = self.get_libs(opt, ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKLROOT' - _lib_mkl = ['mkl_rt'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - with open(ld_so_conf, 'r') as f: - for d in f: - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for sub_dir in dirs: - if os.path.isdir(os.path.join(sub_dir, 'lib')): - return sub_dir - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - if cpu.is_Itanium(): - plt = '64' - elif cpu.is_Intel() and cpu.is_64bit(): - plt = 'intel64' - else: - plt = '32' - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - opt = self.get_option_single('mkl_libs', 'libraries') - mkl_libs = self.get_libs(opt, self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - pass - - -class blas_mkl_info(mkl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - # FIXME: lapack_atlas is unused - lapack_atlas = self.check_libs2(d, ['lapack_atlas'], []) - atlas = self.check_libs2(d, atlas_libs, []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = textwrap.dedent(""" - ********************************************************************* - Could not find lapack library within the ATLAS installation. - ********************************************************************* - """) - warnings.warn(message, stacklevel=2) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = textwrap.dedent(""" - ********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. - ********************************************************************* - """) % (lapack_lib, sz / 1024) - warnings.warn(message, stacklevel=2) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class atlas_3_10_info(atlas_info): - _lib_names = ['satlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_info(atlas_3_10_info): - _lib_names = ['satlas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_lib', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_3_10_threads_info(atlas_3_10_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - - -class lapack_atlas_3_10_info(atlas_3_10_info): - pass - - -class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): - pass - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('lapack_libs', 'libraries') - lapack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - ) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - ) - if not s: - warnings.warn(textwrap.dedent(""" - ***************************************************** - Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - - when building extension libraries that use ATLAS. - Make sure that -lgfortran is used for C++ extensions. - ***************************************************** - """), stacklevel=2) - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - elif atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - notfounderror = LapackNotFoundError - # List of all known BLAS libraries, in the default order - lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack'] - order_env_var_name = 'NPY_LAPACK_ORDER' - - def _calc_info_mkl(self): - info = get_info('lapack_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas_lapack') - if info: - self.set_info(**info) - return True - info = get_info('openblas_clapack') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_flame(self): - info = get_info('flame') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_threads') - if not info: - info = get_info('atlas_3_10') - if not info: - info = get_info('atlas_threads') - if not info: - info = get_info('atlas') - if info: - # Figure out if ATLAS has lapack... - # If not we need the lapack library, but not BLAS! - l = info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - # Get LAPACK (with possible warnings) - # If not found we don't accept anything - # since we can't use ATLAS with LAPACK! - lapack_info = self._get_info_lapack() - if not lapack_info: - return False - dict_append(info, **lapack_info) - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _get_info_blas(self): - # Default to get the optimized BLAS implementation - info = get_info('blas_opt') - if not info: - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('blas_src') - if not info_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('fblas_src', info_src)]) - return info - - def _get_info_lapack(self): - info = get_info('lapack') - if not info: - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('lapack_src') - if not info_src: - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('flapack_src', info_src)]) - return info - - def _calc_info_lapack(self): - info = self._get_info_lapack() - if info: - info_blas = self._get_info_blas() - dict_append(info, **info_blas) - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - self.set_info(**info) - return True - return False - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - user_order = os.environ.get(self.order_env_var_name, None) - if user_order is None: - lapack_order = self.lapack_order - else: - # the user has requested the order of the - # check they are all in the available list, a COMMA SEPARATED list - user_order = user_order.lower().split(',') - non_existing = [] - lapack_order = [] - for order in user_order: - if order in self.lapack_order: - lapack_order.append(order) - elif len(order) > 0: - non_existing.append(order) - if len(non_existing) > 0: - raise ValueError("lapack_opt_info user defined " - "LAPACK order has unacceptable " - "values: {}".format(non_existing)) - - for lapack in lapack_order: - if self._calc_info(lapack): - return - - if 'lapack' not in lapack_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class _ilp64_opt_info_mixin: - symbol_suffix = None - symbol_prefix = None - - def _check_info(self, info): - macros = dict(info.get('define_macros', [])) - prefix = macros.get('BLAS_SYMBOL_PREFIX', '') - suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') - - if self.symbol_prefix not in (None, prefix): - return False - - if self.symbol_suffix not in (None, suffix): - return False - - return bool(info) - - -class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): - notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64'] - order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name + '_lapack') - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): - # Same as lapack_ilp64_opt_info, but fix symbol names - symbol_prefix = '' - symbol_suffix = '' - - -class lapack64__opt_info(lapack_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_opt_info(system_info): - notfounderror = BlasNotFoundError - # List of all known BLAS libraries, in the default order - blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'accelerate', 'blas'] - order_env_var_name = 'NPY_BLAS_ORDER' - - def _calc_info_mkl(self): - info = get_info('blas_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blis(self): - info = get_info('blis') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_blas_threads') - if not info: - info = get_info('atlas_3_10_blas') - if not info: - info = get_info('atlas_blas_threads') - if not info: - info = get_info('atlas_blas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blas(self): - # Warn about a non-optimized BLAS library - warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) - info = {} - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - blas = get_info('blas') - if blas: - dict_append(info, **blas) - else: - # Not even BLAS was found! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - - blas_src = get_info('blas_src') - if not blas_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return False - dict_append(info, libraries=[('fblas_src', blas_src)]) - - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - user_order = os.environ.get(self.order_env_var_name, None) - if user_order is None: - blas_order = self.blas_order - else: - # the user has requested the order of the - # check they are all in the available list - user_order = user_order.lower().split(',') - non_existing = [] - blas_order = [] - for order in user_order: - if order in self.blas_order: - blas_order.append(order) - elif len(order) > 0: - non_existing.append(order) - if len(non_existing) > 0: - raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(non_existing)) - - for blas in blas_order: - if self._calc_info(blas): - return - - if 'blas' not in blas_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): - notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64'] - order_env_var_name = 'NPY_BLAS_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name) - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '' - - -class blas64__opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blas_libs', 'libraries') - blas_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - else: - info['include_dirs'] = self.get_include_dirs() - if platform.system() == 'Windows': - # The check for windows is needed because get_cblas_libs uses the - # same compiler that was used to compile Python and msvc is - # often not installed when mingw is being used. This rough - # treatment is not desirable, but windows is tricky. - info['language'] = 'f77' # XXX: is it generally true? - else: - lib = self.get_cblas_libs(info) - if lib is not None: - info['language'] = 'c' - info['libraries'] = lib - info['define_macros'] = [('HAVE_CBLAS', None)] - self.set_info(**info) - - def get_cblas_libs(self, info): - """ Check whether we can link with CBLAS interface - - This method will search through several combinations of libraries - to check whether CBLAS is present: - - 1. Libraries in ``info['libraries']``, as is - 2. As 1. but also explicitly adding ``'cblas'`` as a library - 3. As 1. but also explicitly adding ``'blas'`` as a library - 4. Check only library ``'cblas'`` - 5. Check only library ``'blas'`` - - Parameters - ---------- - info : dict - system information dictionary for compilation and linking - - Returns - ------- - libraries : list of str or None - a list of libraries that enables the use of CBLAS interface. - Returns None if not found or a compilation error occurs. - - Since 1.17 returns a list. - """ - # primitive cblas check by looking for the header and trying to link - # cblas or blas - c = customized_ccompiler() - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - #include - int main(int argc, const char *argv[]) - { - double a[4] = {1,2,3,4}; - double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; - }""") - src = os.path.join(tmpdir, 'source.c') - try: - with open(src, 'wt') as f: - f.write(s) - - try: - # check we can compile (find headers) - obj = c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): - return None - - # check we can link (find library) - # some systems have separate cblas and blas libs. - for libs in [info['libraries'], ['cblas'] + info['libraries'], - ['blas'] + info['libraries'], ['cblas'], ['blas']]: - try: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=libs, - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - return libs - except distutils.ccompiler.LinkError: - pass - finally: - shutil.rmtree(tmpdir) - return None - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = [] - notfounderror = BlasNotFoundError - - @property - def symbol_prefix(self): - try: - return self.cp.get(self.section, 'symbol_prefix') - except NoOptionError: - return '' - - @property - def symbol_suffix(self): - try: - return self.cp.get(self.section, 'symbol_suffix') - except NoOptionError: - return '' - - def _calc_info(self): - c = customized_ccompiler() - - lib_dirs = self.get_lib_dirs() - - # Prefer to use libraries over openblas_libs - opt = self.get_option_single('openblas_libs', 'libraries') - openblas_libs = self.get_libs(opt, self._lib_names) - - info = self.check_libs(lib_dirs, openblas_libs, []) - - if c.compiler_type == "msvc" and info is None: - from numpy.distutils.fcompiler import new_fcompiler - f = new_fcompiler(c_compiler=c) - if f and f.compiler_type == 'gnu95': - # Try gfortran-compatible library files - info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) - # Skip lapack check, we'd need build_ext to do it - skip_symbol_check = True - elif info: - skip_symbol_check = False - info['language'] = 'c' - - if info is None: - return None - - # Add extra info for OpenBLAS - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if not (skip_symbol_check or self.check_symbols(info)): - return None - - info['define_macros'] = [('HAVE_CBLAS', None)] - if self.symbol_prefix: - info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] - if self.symbol_suffix: - info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] - - return info - - def calc_info(self): - info = self._calc_info() - if info is not None: - self.set_info(**info) - - def check_msvc_gfortran_libs(self, library_dirs, libraries): - # First, find the full path to each library directory - library_paths = [] - for library in libraries: - for library_dir in library_dirs: - # MinGW static ext will be .a - fullpath = os.path.join(library_dir, library + '.a') - if os.path.isfile(fullpath): - library_paths.append(fullpath) - break - else: - return None - - # Generate numpy.distutils virtual static library file - basename = self.__class__.__name__ - tmpdir = os.path.join(os.getcwd(), 'build', basename) - if not os.path.isdir(tmpdir): - os.makedirs(tmpdir) - - info = {'library_dirs': [tmpdir], - 'libraries': [basename], - 'language': 'f77'} - - fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') - fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') - with open(fake_lib_file, 'w') as f: - f.write("\n".join(library_paths)) - with open(fake_clib_file, 'w') as f: - pass - - return info - - def check_symbols(self, info): - res = False - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - - prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - calls = "\n".join("%s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - s = textwrap.dedent("""\ - %(prototypes)s - int main(int argc, const char *argv[]) - { - %(calls)s - return 0; - }""") % dict(prototypes=prototypes, calls=calls) - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - try: - extra_args = info['extra_link_args'] - except Exception: - extra_args = [] - if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc": - extra_args.append("/MANIFEST") - try: - with open(src, 'wt') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = ['zungqr_'] - notfounderror = BlasNotFoundError - -class openblas_clapack_info(openblas_lapack_info): - _lib_names = ['openblas', 'lapack'] - -class openblas_ilp64_info(openblas_info): - section = 'openblas_ilp64' - dir_env_var = 'OPENBLAS_ILP64' - _lib_names = ['openblas64'] - _require_symbols = ['dgemm_', 'cblas_dgemm'] - notfounderror = BlasILP64NotFoundError - - def _calc_info(self): - info = super()._calc_info() - if info is not None: - info['define_macros'] += [('HAVE_BLAS_ILP64', None)] - return info - -class openblas_ilp64_lapack_info(openblas_ilp64_info): - _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] - - def _calc_info(self): - info = super()._calc_info() - if info: - info['define_macros'] += [('HAVE_LAPACKE', None)] - return info - -class openblas64__info(openblas_ilp64_info): - # ILP64 Openblas, with default symbol suffix - section = 'openblas64_' - dir_env_var = 'OPENBLAS64_' - _lib_names = ['openblas64_'] - symbol_suffix = '64_' - symbol_prefix = '' - -class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): - pass - -class blis_info(blas_info): - section = 'blis' - dir_env_var = 'BLIS' - _lib_names = ['blis'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blis_libs', 'libraries') - blis_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs2(lib_dirs, blis_libs, []) - if info is None: - return - - # Add include dirs - incl_dirs = self.get_include_dirs() - dict_append(info, - language='c', - define_macros=[('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - - -class flame_info(system_info): - """ Usage of libflame for LAPACK operations - - This requires libflame to be compiled with lapack wrappers: - - ./configure --enable-lapack2flame ... - - Be aware that libflame 5.1.0 has some missing names in the shared library, so - if you have problems, try the static flame library. - """ - section = 'flame' - _lib_names = ['flame'] - notfounderror = FlameNotFoundError - - def check_embedded_lapack(self, info): - """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - void zungqr_(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""") - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - extra_args = info.get('extra_link_args', []) - try: - with open(src, 'wt') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - return True - except distutils.ccompiler.LinkError: - return False - finally: - shutil.rmtree(tmpdir) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - flame_libs = self.get_libs('libraries', self._lib_names) - - info = self.check_libs2(lib_dirs, flame_libs, []) - if info is None: - return - - if self.check_embedded_lapack(info): - # check if the user has supplied all information required - self.set_info(**info) - else: - # Try and get the BLAS lib to see if we can get it to work - blas_info = get_info('blas_opt') - if not blas_info: - # since we already failed once, this ain't going to work either - return - - # Now we need to merge the two dictionaries - for key in blas_info: - if isinstance(blas_info[key], list): - info[key] = info.get(key, []) + blas_info[key] - elif isinstance(blas_info[key], tuple): - info[key] = info.get(key, ()) + blas_info[key] - else: - info[key] = info.get(key, '') + blas_info[key] - - # Now check again - if self.check_embedded_lapack(info): - self.set_info(**info) - - -class accelerate_info(system_info): - section = 'accelerate' - _lib_names = ['accelerate', 'veclib'] - notfounderror = BlasNotFoundError - - def calc_info(self): - # Make possible to enable/disable from config file/env var - libraries = os.environ.get('ACCELERATE') - if libraries: - libraries = [libraries] - else: - libraries = self.get_libs('libraries', self._lib_names) - libraries = [lib.strip().lower() for lib in libraries] - - if (sys.platform == 'darwin' and - not os.getenv('_PYTHON_HOST_PLATFORM', None)): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if (os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/') and - 'accelerate' in libraries): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif (os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/') and - 'veclib' in libraries): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None)]) - - return - -class blas_src_info(system_info): - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - _lib_names = ['X11'] - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - opt = self.get_option_single('x11_libs', 'libraries') - x11_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(distutils.sysconfig.get_python_inc( - prefix=os.sep.join(prefix))) - except ImportError: - pass - py_incl_dir = distutils.sysconfig.get_python_inc() - include_dirs.append(py_incl_dir) - py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - _c_string_literal(vrs)), - (self.modulename.upper(), None)] - break - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy # noqa: F401 - which = "numpy", "defaulted" - except ImportError: - msg1 = str(get_exception()) - try: - import Numeric # noqa: F401 - which = "numeric", "defaulted" - except ImportError: - msg2 = str(get_exception()) - try: - import numarray # noqa: F401 - which = "numarray", "defaulted" - except ImportError: - msg3 = str(get_exception()) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [distutils.sysconfig.get_python_inc()] - py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - try: - o = subprocess.check_output(cmd) - except (OSError, subprocess.CalledProcessError): - pass - else: - o = filepath_from_subprocess_output(o) - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - _c_string_literal(version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('amd_libs', 'libraries') - amd_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('umfpack_libs', 'libraries') - umfpack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', - 'extra_compile_args', 'extra_link_args', - 'runtime_library_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - # FIXME: r not used - r = conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_exec_command.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index 37912f5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,220 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command -from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, assert_warns - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -class redirect_stdout(object): - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr(object): - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix(object): - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - -class TestExecCommand(object): - def setup(self): - self.pyexe = get_pythonexe() - - def check_nt(self, **kws): - s, o = exec_command.exec_command('cmd /C echo path=%path%') - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - assert_(s == 0) - assert_(o == 'win32') - - def check_posix(self, **kws): - s, o = exec_command.exec_command("echo Hello", **kws) - assert_(s == 0) - assert_(o == 'Hello') - - s, o = exec_command.exec_command('echo $AAA', **kws) - assert_(s == 0) - assert_(o == '') - - s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - assert_(s == 0) - assert_(o == 'Tere') - - s, o = exec_command.exec_command('echo "$AAA"', **kws) - assert_(s == 0) - assert_(o == '') - - if 'BBB' not in os.environ: - os.environ['BBB'] = 'Hi' - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - assert_(s == 0) - assert_(o == 'Hey') - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - del os.environ['BBB'] - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == '') - - - s, o = exec_command.exec_command('this_is_not_a_command', **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command('echo path=$PATH', **kws) - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'posix') - - def check_basic(self, *kws): - s, o = exec_command.exec_command( - '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(\'0\');' - 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == '012') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - assert_(s == 15) - assert_(o == '') - - s, o = exec_command.exec_command( - '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'Heipa') - - def check_execute_in(self, **kws): - with tempdir() as tmpdir: - fn = "file" - tmpfile = os.path.join(tmpdir, fn) - f = open(tmpfile, 'w') - f.write('Hello') - f.close() - - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % - (self.pyexe, fn), **kws) - assert_(s != 0) - assert_(o != '') - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' - 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - assert_(s == 0) - assert_(o == 'Hello') - - def test_basic(self): - with redirect_stdout(StringIO()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler.py deleted file mode 100644 index 6d245fb..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy.testing import assert_, suppress_warnings -import numpy.distutils.fcompiler - -customizable_flags = [ - ('f77', 'F77FLAGS'), - ('f90', 'F90FLAGS'), - ('free', 'FREEFLAGS'), - ('arch', 'FARCH'), - ('debug', 'FDEBUG'), - ('flags', 'FFLAGS'), - ('linker_so', 'LDFLAGS'), -] - - -def test_fcompiler_flags(monkeypatch): - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') - flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - if prev_flags is None: - assert_(new_flags == [new_flag]) - else: - assert_(new_flags == prev_flags + [new_flag]) - diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 49208aa..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_ - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), - ('4.8.0', '4.8.0'), - ('4.0.3-7', '4.0.3'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", - '4.9.1'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" - "gfortran: warning: yet another warning\n4.9.1", - '4.9.1'), - ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') -] - -class TestG77Versions(object): - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGFortranVersions(object): - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index 5e014ba..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy.distutils.fcompiler -from numpy.testing import assert_ - - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications" - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions(object): - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions(object): - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py deleted file mode 100644 index 1c93605..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' - '6.2(Chiyoda) Build 6200', '6.2'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.1(Tozai) Build 6136', '6.1'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.0(Hibiya) Build 1021', '6.0'), - ('nagfor', 'NAG Fortran Compiler Release ' - '5.3.2(971)', '5.3.2'), - ('nag', 'NAGWare Fortran 95 compiler Release 5.1' - '(347,355-367,375,380-383,389,394,399,401-402,407,' - '431,435,437,446,459-460,463,472,494,496,503,508,' - '511,517,529,555,557,565)', '5.1')] - -class TestNagFCompilerVersions(object): - def test_version_match(self): - for comp, vs, version in nag_version_strings: - fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) - v = fc.version_match(vs) - assert_(v == version) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_from_template.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_from_template.py deleted file mode 100644 index 5881754..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_from_template.py +++ /dev/null @@ -1,44 +0,0 @@ - -from numpy.distutils.from_template import process_str -from numpy.testing import assert_equal - - -pyf_src = """ -python module foo - <_rd=real,double precision> - interface - subroutine foosub(tol) - <_rd>, intent(in,out) :: tol - end subroutine foosub - end interface -end python module foo -""" - -expected_pyf = """ -python module foo - interface - subroutine sfoosub(tol) - real, intent(in,out) :: tol - end subroutine sfoosub - subroutine dfoosub(tol) - double precision, intent(in,out) :: tol - end subroutine dfoosub - end interface -end python module foo -""" - - -def normalize_whitespace(s): - """ - Remove leading and trailing whitespace, and convert internal - stretches of whitespace to a single space. - """ - return ' '.join(s.split()) - - -def test_from_template(): - """Regression test for gh-10712.""" - pyf = process_str(pyf_src) - normalized_pyf = normalize_whitespace(pyf) - normalized_expected_pyf = normalize_whitespace(expected_pyf) - assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py deleted file mode 100644 index ebedacb..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py +++ /dev/null @@ -1,42 +0,0 @@ -import shutil -import subprocess -import sys -import pytest - -from numpy.distutils import mingw32ccompiler - - -@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') -def test_build_import(): - '''Test the mingw32ccompiler.build_import_library, which builds a - `python.a` from the MSVC `python.lib` - ''' - - # make sure `nm.exe` exists and supports the current python version. This - # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit - try: - out = subprocess.check_output(['nm.exe', '--help']) - except FileNotFoundError: - pytest.skip("'nm.exe' not on path, is mingw installed?") - supported = out[out.find(b'supported targets:'):] - if sys.maxsize < 2**32: - if b'pe-i386' not in supported: - raise ValueError("'nm.exe' found but it does not support 32-bit " - "dlls when using 32-bit python. Supported " - "formats: '%s'" % supported) - elif b'pe-x86-64' not in supported: - raise ValueError("'nm.exe' found but it does not support 64-bit " - "dlls when using 64-bit python. Supported " - "formats: '%s'" % supported) - # Hide the import library to force a build - has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() - if has_import_lib: - shutil.move(fullpath, fullpath + '.bak') - - try: - # Whew, now we can actually test the function - mingw32ccompiler.build_import_library() - - finally: - if has_import_lib: - shutil.move(fullpath + '.bak', fullpath) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_misc_util.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 3e239cf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from os.path import join, sep, dirname - -from numpy.distutils.misc_util import ( - appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info - ) -from numpy.testing import ( - assert_, assert_equal - ) - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath(object): - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath(object): - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths(object): - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py') == f[0], repr(f)) - -class TestSharedExtension(object): - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - - -def test_installed_npymath_ini(): - # Regression test for gh-7707. If npymath.ini wasn't installed, then this - # will give an error. - info = get_info('npymath') - - assert isinstance(info, dict) - assert "define_macros" in info diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index 537e16e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import temppath, assert_ - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo(object): - def test_simple(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_d['cflags']) - assert_(out.libs() == simple_d['libflags']) - assert_(out.name == simple_d['name']) - assert_(out.version == simple_d['version']) - - def test_simple_variable(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple_variable) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_variable_d['cflags']) - assert_(out.libs() == simple_variable_d['libflags']) - assert_(out.name == simple_variable_d['name']) - assert_(out.version == simple_variable_d['version']) - out.vars['prefix'] = '/Users/david' - assert_(out.cflags() == '-I/Users/david/include') - -class TestParseFlags(object): - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - assert_(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_shell_utils.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_shell_utils.py deleted file mode 100644 index a034424..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_shell_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest -import subprocess -import os -import json -import sys - -from numpy.distutils import _shell_utils - -argv_cases = [ - [r'exe'], - [r'path/exe'], - [r'path\exe'], - [r'\\server\path\exe'], - [r'path to/exe'], - [r'path to\exe'], - - [r'exe', '--flag'], - [r'path/exe', '--flag'], - [r'path\exe', '--flag'], - [r'path to/exe', '--flag'], - [r'path to\exe', '--flag'], - - # flags containing literal quotes in their name - [r'path to/exe', '--flag-"quoted"'], - [r'path to\exe', '--flag-"quoted"'], - [r'path to/exe', '"--flag-quoted"'], - [r'path to\exe', '"--flag-quoted"'], -] - - -@pytest.fixture(params=[ - _shell_utils.WindowsParser, - _shell_utils.PosixParser -]) -def Parser(request): - return request.param - - -@pytest.fixture -def runner(Parser): - if Parser != _shell_utils.NativeParser: - pytest.skip('Unable to run with non-native parser') - - if Parser == _shell_utils.WindowsParser: - return lambda cmd: subprocess.check_output(cmd) - elif Parser == _shell_utils.PosixParser: - # posix has no non-shell string parsing - return lambda cmd: subprocess.check_output(cmd, shell=True) - else: - raise NotImplementedError - - -@pytest.mark.parametrize('argv', argv_cases) -def test_join_matches_subprocess(Parser, runner, argv): - """ - Test that join produces strings understood by subprocess - """ - # invoke python to return its arguments as json - cmd = [ - sys.executable, '-c', - 'import json, sys; print(json.dumps(sys.argv[1:]))' - ] - joined = Parser.join(cmd + argv) - json_out = runner(joined).decode() - assert json.loads(json_out) == argv - - -@pytest.mark.parametrize('argv', argv_cases) -def test_roundtrip(Parser, argv): - """ - Test that split is the inverse operation of join - """ - try: - joined = Parser.join(argv) - assert argv == Parser.split(joined) - except NotImplementedError: - pytest.skip("Not implemented") diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_system_info.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_system_info.py deleted file mode 100644 index 3c76389..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_system_info.py +++ /dev/null @@ -1,257 +0,0 @@ -from __future__ import division, print_function - -import os -import shutil -import pytest -from tempfile import mkstemp, mkdtemp -from subprocess import Popen, PIPE -from distutils.errors import DistutilsError - -from numpy.testing import assert_, assert_equal, assert_raises -from numpy.distutils import ccompiler, customized_ccompiler -from numpy.distutils.system_info import system_info, ConfigParser -from numpy.distutils.system_info import AliasedOptionError -from numpy.distutils.system_info import default_lib_dirs, default_include_dirs -from numpy.distutils import _shell_utils - - -def get_class(name, notfound_action=1): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'temp1': Temp1Info, - 'temp2': Temp2Info, - 'duplicate_options': DuplicateOptionInfo, - }.get(name.lower(), _system_info) - return cl() - -simple_site = """ -[ALL] -library_dirs = {dir1:s}{pathsep:s}{dir2:s} -libraries = {lib1:s},{lib2:s} -extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os -runtime_library_dirs = {dir1:s} - -[temp1] -library_dirs = {dir1:s} -libraries = {lib1:s} -runtime_library_dirs = {dir1:s} - -[temp2] -library_dirs = {dir2:s} -libraries = {lib2:s} -extra_link_args = -Wl,-rpath={lib2_escaped:s} -rpath = {dir2:s} - -[duplicate_options] -mylib_libs = {lib1:s} -libraries = {lib2:s} -""" -site_cfg = simple_site - -fakelib_c_text = """ -/* This file is generated from numpy/distutils/testing/test_system_info.py */ -#include -void foo(void) { - printf("Hello foo"); -} -void bar(void) { - printf("Hello bar"); -} -""" - -def have_compiler(): - """ Return True if there appears to be an executable compiler - """ - compiler = customized_ccompiler() - try: - cmd = compiler.compiler # Unix compilers - except AttributeError: - try: - if not compiler.initialized: - compiler.initialize() # MSVC is different - except (DistutilsError, ValueError): - return False - cmd = [compiler.cc] - try: - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - p.stdout.close() - p.stderr.close() - p.wait() - except OSError: - return False - return True - - -HAVE_COMPILER = have_compiler() - - -class _system_info(system_info): - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': '', - 'include_dirs': '', - 'runtime_library_dirs': '', - 'rpath': '', - 'src_dirs': '', - 'search_static_first': "0", - 'extra_compile_args': '', - 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - # We have to parse the config files afterwards - # to have a consistent temporary filepath - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Override _check_libs to return with all dirs """ - info = {'libraries': libs, 'library_dirs': lib_dirs} - return info - - -class Temp1Info(_system_info): - """For testing purposes""" - section = 'temp1' - - -class Temp2Info(_system_info): - """For testing purposes""" - section = 'temp2' - -class DuplicateOptionInfo(_system_info): - """For testing purposes""" - section = 'duplicate_options' - - -class TestSystemInfoReading(object): - - def setup(self): - """ Create the libraries """ - # Create 2 sources and 2 libraries - self._dir1 = mkdtemp() - self._src1 = os.path.join(self._dir1, 'foo.c') - self._lib1 = os.path.join(self._dir1, 'libfoo.so') - self._dir2 = mkdtemp() - self._src2 = os.path.join(self._dir2, 'bar.c') - self._lib2 = os.path.join(self._dir2, 'libbar.so') - # Update local site.cfg - global simple_site, site_cfg - site_cfg = simple_site.format(**{ - 'dir1': self._dir1, - 'lib1': self._lib1, - 'dir2': self._dir2, - 'lib2': self._lib2, - 'pathsep': os.pathsep, - 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) - }) - # Write site.cfg - fd, self._sitecfg = mkstemp() - os.close(fd) - with open(self._sitecfg, 'w') as fd: - fd.write(site_cfg) - # Write the sources - with open(self._src1, 'w') as fd: - fd.write(fakelib_c_text) - with open(self._src2, 'w') as fd: - fd.write(fakelib_c_text) - # We create all class-instances - - def site_and_parse(c, site_cfg): - c.files = [site_cfg] - c.parse_config_files() - return c - self.c_default = site_and_parse(get_class('default'), self._sitecfg) - self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) - self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) - self.c_dup_options = site_and_parse(get_class('duplicate_options'), - self._sitecfg) - - - def teardown(self): - # Do each removal separately - try: - shutil.rmtree(self._dir1) - except Exception: - pass - try: - shutil.rmtree(self._dir2) - except Exception: - pass - try: - os.remove(self._sitecfg) - except Exception: - pass - - def test_all(self): - # Read in all information in the ALL block - tsi = self.c_default - assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) - - def test_temp1(self): - # Read in all information in the temp1 block - tsi = self.c_temp1 - assert_equal(tsi.get_lib_dirs(), [self._dir1]) - assert_equal(tsi.get_libraries(), [self._lib1]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - - def test_temp2(self): - # Read in all information in the temp2 block - tsi = self.c_temp2 - assert_equal(tsi.get_lib_dirs(), [self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib2]) - # Now from rpath and not runtime_library_dirs - assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - - def test_duplicate_options(self): - # Ensure that duplicates are raising an AliasedOptionError - tsi = self.c_dup_options - assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") - assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) - assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - def test_compile1(self): - # Compile source and link the first source - c = customized_ccompiler() - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir1) - c.compile([os.path.basename(self._src1)], output_dir=self._dir1) - # Ensure that the object exists - assert_(os.path.isfile(self._src1.replace('.c', '.o')) or - os.path.isfile(self._src1.replace('.c', '.obj'))) - finally: - os.chdir(previousDir) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), - reason="Fails with MSVC compiler ") - def test_compile2(self): - # Compile source and link the second source - tsi = self.c_temp2 - c = customized_ccompiler() - extra_link_args = tsi.calc_extra_info()['extra_link_args'] - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir2) - c.compile([os.path.basename(self._src2)], output_dir=self._dir2, - extra_postargs=extra_link_args) - # Ensure that the object exists - assert_(os.path.isfile(self._src2.replace('.c', '.o'))) - finally: - os.chdir(previousDir) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/unixccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/unixccompiler.py deleted file mode 100644 index 11b2cce..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -from __future__ import division, absolute_import, print_function - -import os - -from distutils.errors import DistutilsExecError, CompileError -from distutils.unixccompiler import * -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.compat import get_exception -from numpy.distutils.misc_util import _commandline_dep_string - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - # ensure OPT environment variable is read - if 'OPT' in os.environ: - from distutils.sysconfig import get_config_vars - opt = " ".join(os.environ['OPT'].split()) - gcv_opt = " ".join(get_config_vars('OPT')[0].split()) - ccomp_s = " ".join(self.compiler_so) - if opt not in ccomp_s: - ccomp_s = ccomp_s.replace(gcv_opt, opt) - self.compiler_so = ccomp_s.split() - llink_s = " ".join(self.linker_so) - if opt not in llink_s: - self.linker_so = llink_s.split() + opt.split() - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - - # gcc style automatic dependencies, outputs a makefile (-MF) that lists - # all headers needed by a c file as a side effect of compilation (-MMD) - if getattr(self, '_auto_depends', False): - deps = ['-MMD', '-MF', obj + '.d'] - else: - deps = [] - - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + - extra_postargs, display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - - # add commandline flags to dependency file - if deps: - with open(obj + '.d', 'a') as f: - f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except (IOError, OSError): - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/venv/lib/python3.7/site-packages/numpy/doc/__init__.py b/venv/lib/python3.7/site-packages/numpy/doc/__init__.py deleted file mode 100644 index b6f1fa7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -ref_dir = os.path.join(os.path.dirname(__file__)) - -__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and - not f.startswith('__')) - -for f in __all__: - __import__(__name__ + '.' + f) - -del f, ref_dir - -__doc__ = """\ -Topical documentation -===================== - -The following topics are available: -%s - -You can view them by - ->>> help(np.doc.TOPIC) #doctest: +SKIP - -""" % '\n- '.join([''] + __all__) - -__all__.extend(['__doc__']) diff --git a/venv/lib/python3.7/site-packages/numpy/doc/basics.py b/venv/lib/python3.7/site-packages/numpy/doc/basics.py deleted file mode 100644 index c05f347..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/basics.py +++ /dev/null @@ -1,342 +0,0 @@ -""" -============ -Array basics -============ - -Array types and conversions between types -========================================= - -NumPy supports a much greater variety of numerical types than Python does. -This section shows which are available, and how to modify an array's data-type. - -The primitive types supported are tied closely to those in C: - -.. list-table:: - :header-rows: 1 - - * - Numpy type - - C type - - Description - - * - `np.bool_` - - ``bool`` - - Boolean (True or False) stored as a byte - - * - `np.byte` - - ``signed char`` - - Platform-defined - - * - `np.ubyte` - - ``unsigned char`` - - Platform-defined - - * - `np.short` - - ``short`` - - Platform-defined - - * - `np.ushort` - - ``unsigned short`` - - Platform-defined - - * - `np.intc` - - ``int`` - - Platform-defined - - * - `np.uintc` - - ``unsigned int`` - - Platform-defined - - * - `np.int_` - - ``long`` - - Platform-defined - - * - `np.uint` - - ``unsigned long`` - - Platform-defined - - * - `np.longlong` - - ``long long`` - - Platform-defined - - * - `np.ulonglong` - - ``unsigned long long`` - - Platform-defined - - * - `np.half` / `np.float16` - - - - Half precision float: - sign bit, 5 bits exponent, 10 bits mantissa - - * - `np.single` - - ``float`` - - Platform-defined single precision float: - typically sign bit, 8 bits exponent, 23 bits mantissa - - * - `np.double` - - ``double`` - - Platform-defined double precision float: - typically sign bit, 11 bits exponent, 52 bits mantissa. - - * - `np.longdouble` - - ``long double`` - - Platform-defined extended-precision float - - * - `np.csingle` - - ``float complex`` - - Complex number, represented by two single-precision floats (real and imaginary components) - - * - `np.cdouble` - - ``double complex`` - - Complex number, represented by two double-precision floats (real and imaginary components). - - * - `np.clongdouble` - - ``long double complex`` - - Complex number, represented by two extended-precision floats (real and imaginary components). - - -Since many of these have platform-dependent definitions, a set of fixed-size -aliases are provided: - -.. list-table:: - :header-rows: 1 - - * - Numpy type - - C type - - Description - - * - `np.int8` - - ``int8_t`` - - Byte (-128 to 127) - - * - `np.int16` - - ``int16_t`` - - Integer (-32768 to 32767) - - * - `np.int32` - - ``int32_t`` - - Integer (-2147483648 to 2147483647) - - * - `np.int64` - - ``int64_t`` - - Integer (-9223372036854775808 to 9223372036854775807) - - * - `np.uint8` - - ``uint8_t`` - - Unsigned integer (0 to 255) - - * - `np.uint16` - - ``uint16_t`` - - Unsigned integer (0 to 65535) - - * - `np.uint32` - - ``uint32_t`` - - Unsigned integer (0 to 4294967295) - - * - `np.uint64` - - ``uint64_t`` - - Unsigned integer (0 to 18446744073709551615) - - * - `np.intp` - - ``intptr_t`` - - Integer used for indexing, typically the same as ``ssize_t`` - - * - `np.uintp` - - ``uintptr_t`` - - Integer large enough to hold a pointer - - * - `np.float32` - - ``float`` - - - - * - `np.float64` / `np.float_` - - ``double`` - - Note that this matches the precision of the builtin python `float`. - - * - `np.complex64` - - ``float complex`` - - Complex number, represented by two 32-bit floats (real and imaginary components) - - * - `np.complex128` / `np.complex_` - - ``double complex`` - - Note that this matches the precision of the builtin python `complex`. - - -NumPy numerical types are instances of ``dtype`` (data-type) objects, each -having unique characteristics. Once you have imported NumPy using - - :: - - >>> import numpy as np - -the dtypes are available as ``np.bool_``, ``np.float32``, etc. - -Advanced types, not listed in the table above, are explored in -section :ref:`structured_arrays`. - -There are 5 basic numerical types representing booleans (bool), integers (int), -unsigned integers (uint) floating point (float) and complex. Those with numbers -in their name indicate the bitsize of the type (i.e. how many bits are needed -to represent a single value in memory). Some types, such as ``int`` and -``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit -vs. 64-bit machines). This should be taken into account when interfacing -with low-level code (such as C or Fortran) where the raw memory is addressed. - -Data-types can be used as functions to convert python numbers to array scalars -(see the array scalar section for an explanation), python sequences of numbers -to arrays of that type, or as arguments to the dtype keyword that many numpy -functions or methods accept. Some examples:: - - >>> import numpy as np - >>> x = np.float32(1.0) - >>> x - 1.0 - >>> y = np.int_([1,2,4]) - >>> y - array([1, 2, 4]) - >>> z = np.arange(3, dtype=np.uint8) - >>> z - array([0, 1, 2], dtype=uint8) - -Array types can also be referred to by character codes, mostly to retain -backward compatibility with older packages such as Numeric. Some -documentation may still refer to these, for example:: - - >>> np.array([1, 2, 3], dtype='f') - array([ 1., 2., 3.], dtype=float32) - -We recommend using dtype objects instead. - -To convert the type of an array, use the .astype() method (preferred) or -the type itself as a function. For example: :: - - >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE - array([ 0., 1., 2.]) - >>> np.int8(z) - array([0, 1, 2], dtype=int8) - -Note that, above, we use the *Python* float object as a dtype. NumPy knows -that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``, -that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``. -The other data-types do not have Python equivalents. - -To determine the type of an array, look at the dtype attribute:: - - >>> z.dtype - dtype('uint8') - -dtype objects also contain information about the type, such as its bit-width -and its byte-order. The data type can also be used indirectly to query -properties of the type, such as whether it is an integer:: - - >>> d = np.dtype(int) - >>> d - dtype('int32') - - >>> np.issubdtype(d, np.integer) - True - - >>> np.issubdtype(d, np.floating) - False - - -Array Scalars -============= - -NumPy generally returns elements of arrays as array scalars (a scalar -with an associated dtype). Array scalars differ from Python scalars, but -for the most part they can be used interchangeably (the primary -exception is for versions of Python older than v2.x, where integer array -scalars cannot act as indices for lists and tuples). There are some -exceptions, such as when code requires very specific attributes of a scalar -or when it checks specifically whether a value is a Python scalar. Generally, -problems are easily fixed by explicitly converting array scalars -to Python scalars, using the corresponding Python type function -(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). - -The primary advantage of using array scalars is that -they preserve the array type (Python may not have a matching scalar type -available, e.g. ``int16``). Therefore, the use of array scalars ensures -identical behaviour between arrays and scalars, irrespective of whether the -value is inside an array or not. NumPy scalars also have many of the same -methods arrays do. - -Overflow Errors -=============== - -The fixed size of NumPy numeric types may cause overflow errors when a value -requires more memory than available in the data type. For example, -`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers, -but gives 1874919424 (incorrect) for a 32-bit integer. - - >>> np.power(100, 8, dtype=np.int64) - 10000000000000000 - >>> np.power(100, 8, dtype=np.int32) - 1874919424 - -The behaviour of NumPy and Python integer types differs significantly for -integer overflows and may confuse users expecting NumPy integers to behave -similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is -flexible. This means Python integers may expand to accommodate any integer and -will not overflow. - -NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the -minimum or maximum values of NumPy integer and floating point values -respectively :: - - >>> np.iinfo(int) # Bounds of the default integer on this system. - iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64) - >>> np.iinfo(np.int32) # Bounds of a 32-bit integer - iinfo(min=-2147483648, max=2147483647, dtype=int32) - >>> np.iinfo(np.int64) # Bounds of a 64-bit integer - iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64) - -If 64-bit integers are still too small the result may be cast to a -floating point number. Floating point numbers offer a larger, but inexact, -range of possible values. - - >>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int - 0 - >>> np.power(100, 100, dtype=np.float64) - 1e+200 - -Extended Precision -================== - -Python's floating-point numbers are usually 64-bit floating-point numbers, -nearly equivalent to ``np.float64``. In some unusual situations it may be -useful to use floating-point numbers with more precision. Whether this -is possible in numpy depends on the hardware and on the development -environment: specifically, x86 machines provide hardware floating-point -with 80-bit precision, and while most C compilers provide this as their -``long double`` type, MSVC (standard for Windows builds) makes -``long double`` identical to ``double`` (64 bits). NumPy makes the -compiler's ``long double`` available as ``np.longdouble`` (and -``np.clongdouble`` for the complex numbers). You can find out what your -numpy provides with ``np.finfo(np.longdouble)``. - -NumPy does not provide a dtype with more precision than C's -``long double``\\; in particular, the 128-bit IEEE quad precision -data type (FORTRAN's ``REAL*16``\\) is not available. - -For efficient memory alignment, ``np.longdouble`` is usually stored -padded with zero bits, either to 96 or 128 bits. Which is more efficient -depends on hardware and development environment; typically on 32-bit -systems they are padded to 96 bits, while on 64-bit systems they are -typically padded to 128 bits. ``np.longdouble`` is padded to the system -default; ``np.float96`` and ``np.float128`` are provided for users who -want specific padding. In spite of the names, ``np.float96`` and -``np.float128`` provide only as much precision as ``np.longdouble``, -that is, 80 bits on most x86 machines and 64 bits in standard -Windows builds. - -Be warned that even if ``np.longdouble`` offers more precision than -python ``float``, it is easy to lose that extra precision, since -python often forces values to pass through ``float``. For example, -the ``%`` formatting operator requires its arguments to be converted -to standard python types, and it is therefore impossible to preserve -extended precision even if many decimal places are requested. It can -be useful to test your code with the value -``1 + np.finfo(np.longdouble).eps``. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/broadcasting.py b/venv/lib/python3.7/site-packages/numpy/doc/broadcasting.py deleted file mode 100644 index cb548a0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/broadcasting.py +++ /dev/null @@ -1,181 +0,0 @@ -""" -======================== -Broadcasting over arrays -======================== - -.. note:: - See `this article - `_ - for illustrations of broadcasting concepts. - - -The term broadcasting describes how numpy treats arrays with different -shapes during arithmetic operations. Subject to certain constraints, -the smaller array is "broadcast" across the larger array so that they -have compatible shapes. Broadcasting provides a means of vectorizing -array operations so that looping occurs in C instead of Python. It does -this without making needless copies of data and usually leads to -efficient algorithm implementations. There are, however, cases where -broadcasting is a bad idea because it leads to inefficient use of memory -that slows computation. - -NumPy operations are usually done on pairs of arrays on an -element-by-element basis. In the simplest case, the two arrays must -have exactly the same shape, as in the following example: - - >>> a = np.array([1.0, 2.0, 3.0]) - >>> b = np.array([2.0, 2.0, 2.0]) - >>> a * b - array([ 2., 4., 6.]) - -NumPy's broadcasting rule relaxes this constraint when the arrays' -shapes meet certain constraints. The simplest broadcasting example occurs -when an array and a scalar value are combined in an operation: - ->>> a = np.array([1.0, 2.0, 3.0]) ->>> b = 2.0 ->>> a * b -array([ 2., 4., 6.]) - -The result is equivalent to the previous example where ``b`` was an array. -We can think of the scalar ``b`` being *stretched* during the arithmetic -operation into an array with the same shape as ``a``. The new elements in -``b`` are simply copies of the original scalar. The stretching analogy is -only conceptual. NumPy is smart enough to use the original scalar value -without actually making copies so that broadcasting operations are as -memory and computationally efficient as possible. - -The code in the second example is more efficient than that in the first -because broadcasting moves less memory around during the multiplication -(``b`` is a scalar rather than an array). - -General Broadcasting Rules -========================== -When operating on two arrays, NumPy compares their shapes element-wise. -It starts with the trailing dimensions and works its way forward. Two -dimensions are compatible when - -1) they are equal, or -2) one of them is 1 - -If these conditions are not met, a -``ValueError: operands could not be broadcast together`` exception is -thrown, indicating that the arrays have incompatible shapes. The size of -the resulting array is the size that is not 1 along each axis of the inputs. - -Arrays do not need to have the same *number* of dimensions. For example, -if you have a ``256x256x3`` array of RGB values, and you want to scale -each color in the image by a different value, you can multiply the image -by a one-dimensional array with 3 values. Lining up the sizes of the -trailing axes of these arrays according to the broadcast rules, shows that -they are compatible:: - - Image (3d array): 256 x 256 x 3 - Scale (1d array): 3 - Result (3d array): 256 x 256 x 3 - -When either of the dimensions compared is one, the other is -used. In other words, dimensions with size 1 are stretched or "copied" -to match the other. - -In the following example, both the ``A`` and ``B`` arrays have axes with -length one that are expanded to a larger size during the broadcast -operation:: - - A (4d array): 8 x 1 x 6 x 1 - B (3d array): 7 x 1 x 5 - Result (4d array): 8 x 7 x 6 x 5 - -Here are some more examples:: - - A (2d array): 5 x 4 - B (1d array): 1 - Result (2d array): 5 x 4 - - A (2d array): 5 x 4 - B (1d array): 4 - Result (2d array): 5 x 4 - - A (3d array): 15 x 3 x 5 - B (3d array): 15 x 1 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 1 - Result (3d array): 15 x 3 x 5 - -Here are examples of shapes that do not broadcast:: - - A (1d array): 3 - B (1d array): 4 # trailing dimensions do not match - - A (2d array): 2 x 1 - B (3d array): 8 x 4 x 3 # second from last dimensions mismatched - -An example of broadcasting in practice:: - - >>> x = np.arange(4) - >>> xx = x.reshape(4,1) - >>> y = np.ones(5) - >>> z = np.ones((3,4)) - - >>> x.shape - (4,) - - >>> y.shape - (5,) - - >>> x + y - ValueError: operands could not be broadcast together with shapes (4,) (5,) - - >>> xx.shape - (4, 1) - - >>> y.shape - (5,) - - >>> (xx + y).shape - (4, 5) - - >>> xx + y - array([[ 1., 1., 1., 1., 1.], - [ 2., 2., 2., 2., 2.], - [ 3., 3., 3., 3., 3.], - [ 4., 4., 4., 4., 4.]]) - - >>> x.shape - (4,) - - >>> z.shape - (3, 4) - - >>> (x + z).shape - (3, 4) - - >>> x + z - array([[ 1., 2., 3., 4.], - [ 1., 2., 3., 4.], - [ 1., 2., 3., 4.]]) - -Broadcasting provides a convenient way of taking the outer product (or -any other outer operation) of two arrays. The following example shows an -outer addition operation of two 1-d arrays:: - - >>> a = np.array([0.0, 10.0, 20.0, 30.0]) - >>> b = np.array([1.0, 2.0, 3.0]) - >>> a[:, np.newaxis] + b - array([[ 1., 2., 3.], - [ 11., 12., 13.], - [ 21., 22., 23.], - [ 31., 32., 33.]]) - -Here the ``newaxis`` index operator inserts a new axis into ``a``, -making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array -with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/byteswapping.py b/venv/lib/python3.7/site-packages/numpy/doc/byteswapping.py deleted file mode 100644 index 7a749c8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/byteswapping.py +++ /dev/null @@ -1,156 +0,0 @@ -""" - -============================= - Byteswapping and byte order -============================= - -Introduction to byte ordering and ndarrays -========================================== - -The ``ndarray`` is an object that provide a python array interface to data -in memory. - -It often happens that the memory that you want to view with an array is -not of the same byte ordering as the computer on which you are running -Python. - -For example, I might be working on a computer with a little-endian CPU - -such as an Intel Pentium, but I have loaded some data from a file -written by a computer that is big-endian. Let's say I have loaded 4 -bytes from a file written by a Sun (big-endian) computer. I know that -these 4 bytes represent two 16-bit integers. On a big-endian machine, a -two-byte integer is stored with the Most Significant Byte (MSB) first, -and then the Least Significant Byte (LSB). Thus the bytes are, in memory order: - -#. MSB integer 1 -#. LSB integer 1 -#. MSB integer 2 -#. LSB integer 2 - -Let's say the two integers were in fact 1 and 770. Because 770 = 256 * -3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. -The bytes I have loaded from the file would have these contents: - ->>> big_end_buffer = bytearray([0,1,3,2]) ->>> big_end_buffer -bytearray(b'\\x00\\x01\\x03\\x02') - -We might want to use an ``ndarray`` to access these integers. In that -case, we can create an array around this memory, and tell numpy that -there are two integers, and that they are 16 bit and big-endian: - ->>> import numpy as np ->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer) ->>> big_end_arr[0] -1 ->>> big_end_arr[1] -770 - -Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' -(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For -example, if our data represented a single unsigned 4-byte little-endian -integer, the dtype string would be ``>> little_end_u4 = np.ndarray(shape=(1,),dtype='>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 -True - -Returning to our ``big_end_arr`` - in this case our underlying data is -big-endian (data endianness) and we've set the dtype to match (the dtype -is also big-endian). However, sometimes you need to flip these around. - -.. warning:: - - Scalars currently do not include byte order information, so extracting - a scalar from an array will return an integer in native byte order. - Hence: - - >>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder - True - -Changing byte ordering -====================== - -As you can imagine from the introduction, there are two ways you can -affect the relationship between the byte ordering of the array and the -underlying memory it is looking at: - -* Change the byte-ordering information in the array dtype so that it - interprets the underlying data as being in a different byte order. - This is the role of ``arr.newbyteorder()`` -* Change the byte-ordering of the underlying data, leaving the dtype - interpretation as it was. This is what ``arr.byteswap()`` does. - -The common situations in which you need to change byte ordering are: - -#. Your data and dtype endianness don't match, and you want to change - the dtype so that it matches the data. -#. Your data and dtype endianness don't match, and you want to swap the - data so that they match the dtype -#. Your data and dtype endianness match, but you want the data swapped - and the dtype to reflect this - -Data and dtype endianness don't match, change dtype to match data ------------------------------------------------------------------ - -We make something where they don't match: - ->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] -256 - -The obvious fix for this situation is to change the dtype so it gives -the correct endianness: - ->>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder() ->>> fixed_end_dtype_arr[0] -1 - -Note the array has not changed in memory: - ->>> fixed_end_dtype_arr.tobytes() == big_end_buffer -True - -Data and type endianness don't match, change data to match dtype ----------------------------------------------------------------- - -You might want to do this if you need the data in memory to be a certain -ordering. For example you might be writing the memory out to a file -that needs a certain byte ordering. - ->>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() ->>> fixed_end_mem_arr[0] -1 - -Now the array *has* changed in memory: - ->>> fixed_end_mem_arr.tobytes() == big_end_buffer -False - -Data and dtype endianness match, swap data and dtype ----------------------------------------------------- - -You may have a correctly specified array dtype, but you need the array -to have the opposite byte order in memory, and you want the dtype to -match so the array values make sense. In this case you just do both of -the previous operations: - ->>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() ->>> swapped_end_arr[0] -1 ->>> swapped_end_arr.tobytes() == big_end_buffer -False - -An easier way of casting the data to a specific dtype and byte ordering -can be achieved with the ndarray astype method: - ->>> swapped_end_arr = big_end_arr.astype('>> swapped_end_arr[0] -1 ->>> swapped_end_arr.tobytes() == big_end_buffer -False - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/constants.py b/venv/lib/python3.7/site-packages/numpy/doc/constants.py deleted file mode 100644 index 72793e4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/constants.py +++ /dev/null @@ -1,418 +0,0 @@ -# -*- coding: utf-8 -*- -""" -========= -Constants -========= - -.. currentmodule:: numpy - -NumPy includes several constants: - -%(constant_list)s -""" -# -# Note: the docstring is autogenerated. -# -from __future__ import division, absolute_import, print_function - -import textwrap, re - -# Maintain same format as in numpy.add_newdocs -constants = [] -def add_newdoc(module, name, doc): - constants.append((name, doc)) - -add_newdoc('numpy', 'pi', - """ - ``pi = 3.1415926535897932384626433...`` - - References - ---------- - https://en.wikipedia.org/wiki/Pi - - """) - -add_newdoc('numpy', 'e', - """ - Euler's constant, base of natural logarithms, Napier's constant. - - ``e = 2.71828182845904523536028747135266249775724709369995...`` - - See Also - -------- - exp : Exponential function - log : Natural logarithm - - References - ---------- - https://en.wikipedia.org/wiki/E_%28mathematical_constant%29 - - """) - -add_newdoc('numpy', 'euler_gamma', - """ - ``γ = 0.5772156649015328606065120900824024310421...`` - - References - ---------- - https://en.wikipedia.org/wiki/Euler-Mascheroni_constant - - """) - -add_newdoc('numpy', 'inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Returns - ------- - y : float - A floating point representation of positive infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. - - Examples - -------- - >>> np.inf - inf - >>> np.array([1]) / 0. - array([ Inf]) - - """) - -add_newdoc('numpy', 'nan', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - Returns - ------- - y : A floating point representation of Not a Number. - - See Also - -------- - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite (not one of - Not a Number, positive infinity and negative infinity) - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - `NaN` and `NAN` are aliases of `nan`. - - Examples - -------- - >>> np.nan - nan - >>> np.log(-1) - nan - >>> np.log([-1, 1, 2]) - array([ NaN, 0. , 0.69314718]) - - """) - -add_newdoc('numpy', 'newaxis', - """ - A convenient alias for None, useful for indexing arrays. - - See Also - -------- - `numpy.doc.indexing` - - Examples - -------- - >>> newaxis is None - True - >>> x = np.arange(3) - >>> x - array([0, 1, 2]) - >>> x[:, newaxis] - array([[0], - [1], - [2]]) - >>> x[:, newaxis, newaxis] - array([[[0]], - [[1]], - [[2]]]) - >>> x[:, newaxis] * x - array([[0, 0, 0], - [0, 1, 2], - [0, 2, 4]]) - - Outer product, same as ``outer(x, y)``: - - >>> y = np.arange(3, 6) - >>> x[:, newaxis] * y - array([[ 0, 0, 0], - [ 3, 4, 5], - [ 6, 8, 10]]) - - ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: - - >>> x[newaxis, :].shape - (1, 3) - >>> x[newaxis].shape - (1, 3) - >>> x[None].shape - (1, 3) - >>> x[:, newaxis].shape - (3, 1) - - """) - -add_newdoc('numpy', 'NZERO', - """ - IEEE 754 floating point representation of negative zero. - - Returns - ------- - y : float - A floating point representation of negative zero. - - See Also - -------- - PZERO : Defines positive zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Negative zero is considered to be a finite number. - - Examples - -------- - >>> np.NZERO - -0.0 - >>> np.PZERO - 0.0 - - >>> np.isfinite([np.NZERO]) - array([ True]) - >>> np.isnan([np.NZERO]) - array([False]) - >>> np.isinf([np.NZERO]) - array([False]) - - """) - -add_newdoc('numpy', 'PZERO', - """ - IEEE 754 floating point representation of positive zero. - - Returns - ------- - y : float - A floating point representation of positive zero. - - See Also - -------- - NZERO : Defines negative zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Positive zero is considered to be a finite number. - - Examples - -------- - >>> np.PZERO - 0.0 - >>> np.NZERO - -0.0 - - >>> np.isfinite([np.PZERO]) - array([ True]) - >>> np.isnan([np.PZERO]) - array([False]) - >>> np.isinf([np.PZERO]) - array([False]) - - """) - -add_newdoc('numpy', 'NAN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NAN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'NaN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NaN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'NINF', - """ - IEEE 754 floating point representation of negative infinity. - - Returns - ------- - y : float - A floating point representation of negative infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - Examples - -------- - >>> np.NINF - -inf - >>> np.log(0) - -inf - - """) - -add_newdoc('numpy', 'PINF', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'infty', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'Inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'Infinity', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - - -if __doc__: - constants_str = [] - constants.sort() - for name, doc in constants: - s = textwrap.dedent(doc).replace("\n", "\n ") - - # Replace sections by rubrics - lines = s.split("\n") - new_lines = [] - for line in lines: - m = re.match(r'^(\s+)[-=]+\s*$', line) - if m and new_lines: - prev = textwrap.dedent(new_lines.pop()) - new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) - new_lines.append('') - else: - new_lines.append(line) - s = "\n".join(new_lines) - - # Done. - constants_str.append(""".. data:: %s\n %s""" % (name, s)) - constants_str = "\n".join(constants_str) - - __doc__ = __doc__ % dict(constant_list=constants_str) - del constants_str, name, doc - del line, lines, new_lines, m, s, prev - -del constants, add_newdoc diff --git a/venv/lib/python3.7/site-packages/numpy/doc/creation.py b/venv/lib/python3.7/site-packages/numpy/doc/creation.py deleted file mode 100644 index 9ebe938..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/creation.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -============== -Array Creation -============== - -Introduction -============ - -There are 5 general mechanisms for creating arrays: - -1) Conversion from other Python structures (e.g., lists, tuples) -2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros, - etc.) -3) Reading arrays from disk, either from standard or custom formats -4) Creating arrays from raw bytes through the use of strings or buffers -5) Use of special library functions (e.g., random) - -This section will not cover means of replicating, joining, or otherwise -expanding or mutating existing arrays. Nor will it cover creating object -arrays or structured arrays. Both of those are covered in their own sections. - -Converting Python array_like Objects to NumPy Arrays -==================================================== - -In general, numerical data arranged in an array-like structure in Python can -be converted to arrays through the use of the array() function. The most -obvious examples are lists and tuples. See the documentation for array() for -details for its use. Some objects may support the array-protocol and allow -conversion to arrays this way. A simple way to find out if the object can be -converted to a numpy array using array() is simply to try it interactively and -see if it works! (The Python Way). - -Examples: :: - - >>> x = np.array([2,3,1,0]) - >>> x = np.array([2, 3, 1, 0]) - >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, - and types - >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) - -Intrinsic NumPy Array Creation -============================== - -NumPy has built-in functions for creating arrays from scratch: - -zeros(shape) will create an array filled with 0 values with the specified -shape. The default dtype is float64. :: - - >>> np.zeros((2, 3)) - array([[ 0., 0., 0.], [ 0., 0., 0.]]) - -ones(shape) will create an array filled with 1 values. It is identical to -zeros in all other respects. - -arange() will create arrays with regularly incrementing values. Check the -docstring for complete information on the various ways it can be used. A few -examples will be given here: :: - - >>> np.arange(10) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=float) - array([ 2., 3., 4., 5., 6., 7., 8., 9.]) - >>> np.arange(2, 3, 0.1) - array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) - -Note that there are some subtleties regarding the last usage that the user -should be aware of that are described in the arange docstring. - -linspace() will create arrays with a specified number of elements, and -spaced equally between the specified beginning and end values. For -example: :: - - >>> np.linspace(1., 4., 6) - array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ]) - -The advantage of this creation function is that one can guarantee the -number of elements and the starting and end point, which arange() -generally will not do for arbitrary start, stop, and step values. - -indices() will create a set of arrays (stacked as a one-higher dimensioned -array), one per dimension with each representing variation in that dimension. -An example illustrates much better than a verbal description: :: - - >>> np.indices((3,3)) - array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) - -This is particularly useful for evaluating functions of multiple dimensions on -a regular grid. - -Reading Arrays From Disk -======================== - -This is presumably the most common case of large array creation. The details, -of course, depend greatly on the format of data on disk and so this section -can only give general pointers on how to handle various formats. - -Standard Binary Formats ------------------------ - -Various fields have standard formats for array data. The following lists the -ones with known python libraries to read them and return numpy arrays (there -may be others for which it is possible to read and convert to numpy arrays so -check the last section as well) -:: - - HDF5: h5py - FITS: Astropy - -Examples of formats that cannot be read directly but for which it is not hard to -convert are those formats supported by libraries like PIL (able to read and -write many image formats such as jpg, png, etc). - -Common ASCII Formats ------------------------- - -Comma Separated Value files (CSV) are widely used (and an export and import -option for programs like Excel). There are a number of ways of reading these -files in Python. There are CSV functions in Python and functions in pylab -(part of matplotlib). - -More generic ascii files can be read using the io package in scipy. - -Custom Binary Formats ---------------------- - -There are a variety of approaches one can use. If the file has a relatively -simple format then one can write a simple I/O library and use the numpy -fromfile() function and .tofile() method to read and write numpy arrays -directly (mind your byteorder though!) If a good C or C++ library exists that -read the data, one can wrap that library with a variety of techniques though -that certainly is much more work and requires significantly more advanced -knowledge to interface with C or C++. - -Use of Special Libraries ------------------------- - -There are libraries that can be used to generate arrays for special purposes -and it isn't possible to enumerate all of them. The most common uses are use -of the many array generation functions in random that can generate arrays of -random values, and some utility functions to generate special matrices (e.g. -diagonal). - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/dispatch.py b/venv/lib/python3.7/site-packages/numpy/doc/dispatch.py deleted file mode 100644 index c902994..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/dispatch.py +++ /dev/null @@ -1,271 +0,0 @@ -""".. _dispatch_mechanism: - -Numpy's dispatch mechanism, introduced in numpy version v1.16 is the -recommended approach for writing custom N-dimensional array containers that are -compatible with the numpy API and provide custom implementations of numpy -functionality. Applications include `dask `_ arrays, an -N-dimensional array distributed across multiple nodes, and `cupy -`_ arrays, an N-dimensional array on -a GPU. - -To get a feel for writing custom array containers, we'll begin with a simple -example that has rather narrow utility but illustrates the concepts involved. - ->>> import numpy as np ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... - -Our custom array can be instantiated like: - ->>> arr = DiagonalArray(5, 1) ->>> arr -DiagonalArray(N=5, value=1) - -We can convert to a numpy array using :func:`numpy.array` or -:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a -standard ``numpy.ndarray``. - ->>> np.asarray(arr) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - -If we operate on ``arr`` with a numpy function, numpy will again use the -``__array__`` interface to convert it to an array and then apply the function -in the usual way. - ->>> np.multiply(arr, 2) -array([[2., 0., 0., 0., 0.], - [0., 2., 0., 0., 0.], - [0., 0., 2., 0., 0.], - [0., 0., 0., 2., 0.], - [0., 0., 0., 0., 2.]]) - - -Notice that the return type is a standard ``numpy.ndarray``. - ->>> type(arr) -numpy.ndarray - -How can we pass our custom array type through this function? Numpy allows a -class to indicate that it would like to handle computations in a custom-defined -way through the interaces ``__array_ufunc__`` and ``__array_function__``. Let's -take one at a time, starting with ``_array_ufunc__``. This method covers -:ref:`ufuncs`, a class of functions that includes, for example, -:func:`numpy.multiply` and :func:`numpy.sin`. - -The ``__array_ufunc__`` receives: - -- ``ufunc``, a function like ``numpy.multiply`` -- ``method``, a string, differentiating between ``numpy.multiply(...)`` and - variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so - on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``. -- ``inputs``, which could be a mixture of different types -- ``kwargs``, keyword arguments passed to the function - -For this example we will only handle the method ``__call__``. - ->>> from numbers import Number ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != self._N: -... raise TypeError("inconsistent sizes") -... else: -... N = self._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... - -Now our custom array type passes through numpy functions. - ->>> arr = DiagonalArray(5, 1) ->>> np.multiply(arr, 3) -DiagonalArray(N=5, value=3) ->>> np.add(arr, 3) -DiagonalArray(N=5, value=4) ->>> np.sin(arr) -DiagonalArray(N=5, value=0.8414709848078965) - -At this point ``arr + 3`` does not work. - ->>> arr + 3 -TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int' - -To support it, we need to define the Python interfaces ``__add__``, ``__lt__``, -and so on to dispatch to the corresponding ufunc. We can achieve this -conveniently by inheriting from the mixin -:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`. - ->>> import numpy.lib.mixins ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != self._N: -... raise TypeError("inconsistent sizes") -... else: -... N = self._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... - ->>> arr = DiagonalArray(5, 1) ->>> arr + 3 -DiagonalArray(N=5, value=4) ->>> arr > 0 -DiagonalArray(N=5, value=True) - -Now let's tackle ``__array_function__``. We'll create dict that maps numpy -functions to our custom variants. - ->>> HANDLED_FUNCTIONS = {} ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... # In this case we accept only scalar numbers or DiagonalArrays. -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != self._N: -... raise TypeError("inconsistent sizes") -... else: -... N = self._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... def __array_function__(self, func, types, args, kwargs): -... if func not in HANDLED_FUNCTIONS: -... return NotImplemented -... # Note: this allows subclasses that don't override -... # __array_function__ to handle DiagonalArray objects. -... if not all(issubclass(t, self.__class__) for t in types): -... return NotImplemented -... return HANDLED_FUNCTIONS[func](*args, **kwargs) -... - -A convenient pattern is to define a decorator ``implements`` that can be used -to add functions to ``HANDLED_FUNCTIONS``. - ->>> def implements(np_function): -... "Register an __array_function__ implementation for DiagonalArray objects." -... def decorator(func): -... HANDLED_FUNCTIONS[np_function] = func -... return func -... return decorator -... - -Now we write implementations of numpy functions for ``DiagonalArray``. -For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that -calls ``numpy.sum(self)``, and the same for ``mean``. - ->>> @implements(np.sum) -... def sum(arr): -... "Implementation of np.sum for DiagonalArray objects" -... return arr._i * arr._N -... ->>> @implements(np.mean) -... def mean(arr): -... "Implementation of np.mean for DiagonalArray objects" -... return arr._i / arr._N -... ->>> arr = DiagonalArray(5, 1) ->>> np.sum(arr) -5 ->>> np.mean(arr) -0.2 - -If the user tries to use any numpy functions not included in -``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that -this operation is not supported. For example, concatenating two -``DiagonalArrays`` does not produce another diagonal array, so it is not -supported. - ->>> np.concatenate([arr, arr]) -TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [] - -Additionally, our implementations of ``sum`` and ``mean`` do not accept the -optional arguments that numpy's implementation does. - ->>> np.sum(arr, axis=0) -TypeError: sum() got an unexpected keyword argument 'axis' - -The user always has the option of converting to a normal ``numpy.ndarray`` with -:func:`numpy.asarray` and using standard numpy from there. - ->>> np.concatenate([np.asarray(arr), np.asarray(arr)]) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.], - [1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - -Refer to the `dask source code `_ and -`cupy source code `_ for more fully-worked -examples of custom array containers. - -See also `NEP 18 `_. -""" diff --git a/venv/lib/python3.7/site-packages/numpy/doc/glossary.py b/venv/lib/python3.7/site-packages/numpy/doc/glossary.py deleted file mode 100644 index 7d1c9a1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/glossary.py +++ /dev/null @@ -1,476 +0,0 @@ -""" -======== -Glossary -======== - -.. glossary:: - - along an axis - Axes are defined for arrays with more than one dimension. A - 2-dimensional array has two corresponding axes: the first running - vertically downwards across rows (axis 0), and the second running - horizontally across columns (axis 1). - - Many operations can take place along one of these axes. For example, - we can sum each row of an array, in which case we operate along - columns, or axis 1:: - - >>> x = np.arange(12).reshape((3,4)) - - >>> x - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - >>> x.sum(axis=1) - array([ 6, 22, 38]) - - array - A homogeneous container of numerical elements. Each element in the - array occupies a fixed amount of memory (hence homogeneous), and - can be a numerical element of a single type (such as float, int - or complex) or a combination (such as ``(float, int, float)``). Each - array has an associated data-type (or ``dtype``), which describes - the numerical type of its elements:: - - >>> x = np.array([1, 2, 3], float) - - >>> x - array([ 1., 2., 3.]) - - >>> x.dtype # floating point number, 64 bits of memory per element - dtype('float64') - - - # More complicated data type: each array element is a combination of - # and integer and a floating point number - >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) - array([(1, 2.0), (3, 4.0)], - dtype=[('x', '>> x = np.array([1, 2, 3]) - >>> x.shape - (3,) - - big-endian - When storing a multi-byte value in memory as a sequence of bytes, the - sequence addresses/sends/stores the most significant byte first (lowest - address) and the least significant byte last (highest address). Common in - micro-processors and used for transmission of data over network protocols. - - BLAS - `Basic Linear Algebra Subprograms `_ - - broadcast - NumPy can do operations on arrays whose shapes are mismatched:: - - >>> x = np.array([1, 2]) - >>> y = np.array([[3], [4]]) - - >>> x - array([1, 2]) - - >>> y - array([[3], - [4]]) - - >>> x + y - array([[4, 5], - [5, 6]]) - - See `numpy.doc.broadcasting` for more information. - - C order - See `row-major` - - column-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In column-major order, the leftmost index "varies the - fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the column-major order as:: - - [1, 4, 2, 5, 3, 6] - - Column-major order is also known as the Fortran order, as the Fortran - programming language uses it. - - decorator - An operator that transforms a function. For example, a ``log`` - decorator may be defined to print debugging information upon - function execution:: - - >>> def log(f): - ... def new_logging_func(*args, **kwargs): - ... print("Logging call with parameters:", args, kwargs) - ... return f(*args, **kwargs) - ... - ... return new_logging_func - - Now, when we define a function, we can "decorate" it using ``log``:: - - >>> @log - ... def add(a, b): - ... return a + b - - Calling ``add`` then yields: - - >>> add(1, 2) - Logging call with parameters: (1, 2) {} - 3 - - dictionary - Resembling a language dictionary, which provides a mapping between - words and descriptions thereof, a Python dictionary is a mapping - between two objects:: - - >>> x = {1: 'one', 'two': [1, 2]} - - Here, `x` is a dictionary mapping keys to values, in this case - the integer 1 to the string "one", and the string "two" to - the list ``[1, 2]``. The values may be accessed using their - corresponding keys:: - - >>> x[1] - 'one' - - >>> x['two'] - [1, 2] - - Note that dictionaries are not stored in any specific order. Also, - most mutable (see *immutable* below) objects, such as lists, may not - be used as keys. - - For more information on dictionaries, read the - `Python tutorial `_. - - field - In a :term:`structured data type`, each sub-type is called a `field`. - The `field` has a name (a string), a type (any valid dtype, and - an optional `title`. See :ref:`arrays.dtypes` - - Fortran order - See `column-major` - - flattened - Collapsed to a one-dimensional array. See `numpy.ndarray.flatten` - for details. - - homogenous - Describes a block of memory comprised of blocks, each block comprised of - items and of the same size, and blocks are interpreted in exactly the - same way. In the simplest case each block contains a single item, for - instance int32 or float64. - - immutable - An object that cannot be modified after execution is called - immutable. Two common examples are strings and tuples. - - instance - A class definition gives the blueprint for constructing an object:: - - >>> class House(object): - ... wall_colour = 'white' - - Yet, we have to *build* a house before it exists:: - - >>> h = House() # build a house - - Now, ``h`` is called a ``House`` instance. An instance is therefore - a specific realisation of a class. - - iterable - A sequence that allows "walking" (iterating) over items, typically - using a loop such as:: - - >>> x = [1, 2, 3] - >>> [item**2 for item in x] - [1, 4, 9] - - It is often used in combination with ``enumerate``:: - >>> keys = ['a','b','c'] - >>> for n, k in enumerate(keys): - ... print("Key %d: %s" % (n, k)) - ... - Key 0: a - Key 1: b - Key 2: c - - itemsize - The size of the dtype element in bytes. - - list - A Python container that can hold any number of objects or items. - The items do not have to be of the same type, and can even be - lists themselves:: - - >>> x = [2, 2.0, "two", [2, 2.0]] - - The list `x` contains 4 items, each which can be accessed individually:: - - >>> x[2] # the string 'two' - 'two' - - >>> x[3] # a list, containing an integer 2 and a float 2.0 - [2, 2.0] - - It is also possible to select more than one item at a time, - using *slicing*:: - - >>> x[0:2] # or, equivalently, x[:2] - [2, 2.0] - - In code, arrays are often conveniently expressed as nested lists:: - - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - For more information, read the section on lists in the `Python - tutorial `_. For a mapping - type (key-value), see *dictionary*. - - little-endian - When storing a multi-byte value in memory as a sequence of bytes, the - sequence addresses/sends/stores the least significant byte first (lowest - address) and the most significant byte last (highest address). Common in - x86 processors. - - mask - A boolean array, used to select only certain elements for an operation:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> mask = (x > 2) - >>> mask - array([False, False, False, True, True]) - - >>> x[mask] = -1 - >>> x - array([ 0, 1, 2, -1, -1]) - - masked array - Array that suppressed values indicated by a mask:: - - >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) - >>> x - masked_array(data = [-- 2.0 --], - mask = [ True False True], - fill_value = 1e+20) - - >>> x + [1, 2, 3] - masked_array(data = [-- 4.0 --], - mask = [ True False True], - fill_value = 1e+20) - - - Masked arrays are often used when operating on arrays containing - missing or invalid entries. - - matrix - A 2-dimensional ndarray that preserves its two-dimensional nature - throughout operations. It has certain special operations, such as ``*`` - (matrix multiplication) and ``**`` (matrix power), defined:: - - >>> x = np.mat([[1, 2], [3, 4]]) - >>> x - matrix([[1, 2], - [3, 4]]) - - >>> x**2 - matrix([[ 7, 10], - [15, 22]]) - - method - A function associated with an object. For example, each ndarray has a - method called ``repeat``:: - - >>> x = np.array([1, 2, 3]) - >>> x.repeat(2) - array([1, 1, 2, 2, 3, 3]) - - ndarray - See *array*. - - record array - An :term:`ndarray` with :term:`structured data type` which has been - subclassed as ``np.recarray`` and whose dtype is of type ``np.record``, - making the fields of its data type to be accessible by attribute. - - reference - If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, - ``a`` and ``b`` are different names for the same Python object. - - row-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In row-major order, the rightmost index "varies - the fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the row-major order as:: - - [1, 2, 3, 4, 5, 6] - - Row-major order is also known as the C order, as the C programming - language uses it. New NumPy arrays are by default in row-major order. - - self - Often seen in method signatures, ``self`` refers to the instance - of the associated class. For example: - - >>> class Paintbrush(object): - ... color = 'blue' - ... - ... def paint(self): - ... print("Painting the city %s!" % self.color) - ... - >>> p = Paintbrush() - >>> p.color = 'red' - >>> p.paint() # self refers to 'p' - Painting the city red! - - slice - Used to select only certain elements from a sequence: - - >>> x = range(5) - >>> x - [0, 1, 2, 3, 4] - - >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) - [1, 2] - - >>> x[1:5:2] # slice from 1 to 5, but skipping every second element - [1, 3] - - >>> x[::-1] # slice a sequence in reverse - [4, 3, 2, 1, 0] - - Arrays may have more than one dimension, each which can be sliced - individually: - - >>> x = np.array([[1, 2], [3, 4]]) - >>> x - array([[1, 2], - [3, 4]]) - - >>> x[:, 1] - array([2, 4]) - - structure - See :term:`structured data type` - - structured data type - A data type composed of other datatypes - - subarray data type - A :term:`structured data type` may contain a :term:`ndarray` with its - own dtype and shape: - - >>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))]) - >>> np.zeros(3, dtype=dt) - array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])], - dtype=[('a', '` which is an alias to the name and is - commonly used for plotting. - - tuple - A sequence that may contain a variable number of types of any - kind. A tuple is immutable, i.e., once constructed it cannot be - changed. Similar to a list, it can be indexed and sliced:: - - >>> x = (1, 'one', [1, 2]) - >>> x - (1, 'one', [1, 2]) - - >>> x[0] - 1 - - >>> x[:2] - (1, 'one') - - A useful concept is "tuple unpacking", which allows variables to - be assigned to the contents of a tuple:: - - >>> x, y = (1, 2) - >>> x, y = 1, 2 - - This is often used when a function returns multiple values: - - >>> def return_many(): - ... return 1, 'alpha', None - - >>> a, b, c = return_many() - >>> a, b, c - (1, 'alpha', None) - - >>> a - 1 - >>> b - 'alpha' - - ufunc - Universal function. A fast element-wise, :term:`vectorized - ` array operation. Examples include ``add``, ``sin`` and - ``logical_or``. - - vectorization - Optimizing a looping block by specialized code. In a traditional sense, - vectorization performs the same operation on multiple elements with - fixed strides between them via specialized hardware. Compilers know how - to take advantage of well-constructed loops to implement such - optimizations. NumPy uses :ref:`vectorization ` - to mean any optimization via specialized code performing the same - operations on multiple elements, typically achieving speedups by - avoiding some of the overhead in looking up and converting the elements. - - view - An array that does not own its data, but refers to another array's - data instead. For example, we may create a view that only shows - every second element of another array:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> y = x[::2] - >>> y - array([0, 2, 4]) - - >>> x[0] = 3 # changing x changes y as well, since y is a view on x - >>> y - array([3, 2, 4]) - - wrapper - Python is a high-level (highly abstracted, or English-like) language. - This abstraction comes at a price in execution speed, and sometimes - it becomes necessary to use lower level languages to do fast - computations. A wrapper is code that provides a bridge between - high and the low level languages, allowing, e.g., Python to execute - code written in C or Fortran. - - Examples include ctypes, SWIG and Cython (which wraps C and C++) - and f2py (which wraps Fortran). - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/indexing.py b/venv/lib/python3.7/site-packages/numpy/doc/indexing.py deleted file mode 100644 index 6760156..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/indexing.py +++ /dev/null @@ -1,449 +0,0 @@ -""" -============== -Array indexing -============== - -Array indexing refers to any use of the square brackets ([]) to index -array values. There are many options to indexing, which give numpy -indexing great power, but with power comes some complexity and the -potential for confusion. This section is just an overview of the -various options and issues related to indexing. Aside from single -element indexing, the details on most of these options are to be -found in related sections. - -Assignment vs referencing -========================= - -Most of the following examples show the use of indexing when -referencing data in an array. The examples work just as well -when assigning to an array. See the section at the end for -specific examples and explanations on how assignments work. - -Single element indexing -======================= - -Single element indexing for a 1-D array is what one expects. It work -exactly like that for other standard Python sequences. It is 0-based, -and accepts negative indices for indexing from the end of the array. :: - - >>> x = np.arange(10) - >>> x[2] - 2 - >>> x[-2] - 8 - -Unlike lists and tuples, numpy arrays support multidimensional indexing -for multidimensional arrays. That means that it is not necessary to -separate each dimension's index into its own set of square brackets. :: - - >>> x.shape = (2,5) # now x is 2-dimensional - >>> x[1,3] - 8 - >>> x[1,-1] - 9 - -Note that if one indexes a multidimensional array with fewer indices -than dimensions, one gets a subdimensional array. For example: :: - - >>> x[0] - array([0, 1, 2, 3, 4]) - -That is, each index specified selects the array corresponding to the -rest of the dimensions selected. In the above example, choosing 0 -means that the remaining dimension of length 5 is being left unspecified, -and that what is returned is an array of that dimensionality and size. -It must be noted that the returned array is not a copy of the original, -but points to the same values in memory as does the original array. -In this case, the 1-D array at the first position (0) is returned. -So using a single index on the returned array, results in a single -element being returned. That is: :: - - >>> x[0][2] - 2 - -So note that ``x[0,2] = x[0][2]`` though the second case is more -inefficient as a new temporary array is created after the first index -that is subsequently indexed by 2. - -Note to those used to IDL or Fortran memory order as it relates to -indexing. NumPy uses C-order indexing. That means that the last -index usually represents the most rapidly changing memory location, -unlike Fortran or IDL, where the first index represents the most -rapidly changing location in memory. This difference represents a -great potential for confusion. - -Other indexing options -====================== - -It is possible to slice and stride arrays to extract arrays of the -same number of dimensions, but of different sizes than the original. -The slicing and striding works exactly the same way it does for lists -and tuples except that they can be applied to multiple dimensions as -well. A few examples illustrates best: :: - - >>> x = np.arange(10) - >>> x[2:5] - array([2, 3, 4]) - >>> x[:-7] - array([0, 1, 2]) - >>> x[1:7:2] - array([1, 3, 5]) - >>> y = np.arange(35).reshape(5,7) - >>> y[1:5:2,::3] - array([[ 7, 10, 13], - [21, 24, 27]]) - -Note that slices of arrays do not copy the internal array data but -only produce new views of the original data. This is different from -list or tuple slicing and an explicit ``copy()`` is recommended if -the original data is not required anymore. - -It is possible to index arrays with other arrays for the purposes of -selecting lists of values out of arrays into new arrays. There are -two different ways of accomplishing this. One uses one or more arrays -of index values. The other involves giving a boolean array of the proper -shape to indicate the values to be selected. Index arrays are a very -powerful tool that allow one to avoid looping over individual elements in -arrays and thus greatly improve performance. - -It is possible to use special features to effectively increase the -number of dimensions in an array through indexing so the resulting -array acquires the shape needed for use in an expression or with a -specific function. - -Index arrays -============ - -NumPy arrays may be indexed with other arrays (or any other sequence- -like object that can be converted to an array, such as lists, with the -exception of tuples; see the end of this document for why this is). The -use of index arrays ranges from simple, straightforward cases to -complex, hard-to-understand cases. For all cases of index arrays, what -is returned is a copy of the original data, not a view as one gets for -slices. - -Index arrays must be of integer type. Each value in the array indicates -which value in the array to use in place of the index. To illustrate: :: - - >>> x = np.arange(10,1,-1) - >>> x - array([10, 9, 8, 7, 6, 5, 4, 3, 2]) - >>> x[np.array([3, 3, 1, 8])] - array([7, 7, 9, 2]) - - -The index array consisting of the values 3, 3, 1 and 8 correspondingly -create an array of length 4 (same as the index array) where each index -is replaced by the value the index array has in the array being indexed. - -Negative values are permitted and work as they do with single indices -or slices: :: - - >>> x[np.array([3,3,-3,8])] - array([7, 7, 4, 2]) - -It is an error to have index values out of bounds: :: - - >>> x[np.array([3, 3, 20, 8])] - : index 20 out of bounds 0<=index<9 - -Generally speaking, what is returned when index arrays are used is -an array with the same shape as the index array, but with the type -and values of the array being indexed. As an example, we can use a -multidimensional index array instead: :: - - >>> x[np.array([[1,1],[2,3]])] - array([[9, 9], - [8, 7]]) - -Indexing Multi-dimensional arrays -================================= - -Things become more complex when multidimensional arrays are indexed, -particularly with multidimensional index arrays. These tend to be -more unusual uses, but they are permitted, and they are useful for some -problems. We'll start with the simplest multidimensional case (using -the array y from the previous examples): :: - - >>> y[np.array([0,2,4]), np.array([0,1,2])] - array([ 0, 15, 30]) - -In this case, if the index arrays have a matching shape, and there is -an index array for each dimension of the array being indexed, the -resultant array has the same shape as the index arrays, and the values -correspond to the index set for each position in the index arrays. In -this example, the first index value is 0 for both index arrays, and -thus the first value of the resultant array is y[0,0]. The next value -is y[2,1], and the last is y[4,2]. - -If the index arrays do not have the same shape, there is an attempt to -broadcast them to the same shape. If they cannot be broadcast to the -same shape, an exception is raised: :: - - >>> y[np.array([0,2,4]), np.array([0,1])] - : shape mismatch: objects cannot be - broadcast to a single shape - -The broadcasting mechanism permits index arrays to be combined with -scalars for other indices. The effect is that the scalar value is used -for all the corresponding values of the index arrays: :: - - >>> y[np.array([0,2,4]), 1] - array([ 1, 15, 29]) - -Jumping to the next level of complexity, it is possible to only -partially index an array with index arrays. It takes a bit of thought -to understand what happens in such cases. For example if we just use -one index array with y: :: - - >>> y[np.array([0,2,4])] - array([[ 0, 1, 2, 3, 4, 5, 6], - [14, 15, 16, 17, 18, 19, 20], - [28, 29, 30, 31, 32, 33, 34]]) - -What results is the construction of a new array where each value of -the index array selects one row from the array being indexed and the -resultant array has the resulting shape (number of index elements, -size of row). - -An example of where this may be useful is for a color lookup table -where we want to map the values of an image into RGB triples for -display. The lookup table could have a shape (nlookup, 3). Indexing -such an array with an image with shape (ny, nx) with dtype=np.uint8 -(or any integer type so long as values are with the bounds of the -lookup table) will result in an array of shape (ny, nx, 3) where a -triple of RGB values is associated with each pixel location. - -In general, the shape of the resultant array will be the concatenation -of the shape of the index array (or the shape that all the index arrays -were broadcast to) with the shape of any unused dimensions (those not -indexed) in the array being indexed. - -Boolean or "mask" index arrays -============================== - -Boolean arrays used as indices are treated in a different manner -entirely than index arrays. Boolean arrays must be of the same shape -as the initial dimensions of the array being indexed. In the -most straightforward case, the boolean array has the same shape: :: - - >>> b = y>20 - >>> y[b] - array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) - -Unlike in the case of integer index arrays, in the boolean case, the -result is a 1-D array containing all the elements in the indexed array -corresponding to all the true elements in the boolean array. The -elements in the indexed array are always iterated and returned in -:term:`row-major` (C-style) order. The result is also identical to -``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy -of the data, not a view as one gets with slices. - -The result will be multidimensional if y has more dimensions than b. -For example: :: - - >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y - array([False, False, False, True, True]) - >>> y[b[:,5]] - array([[21, 22, 23, 24, 25, 26, 27], - [28, 29, 30, 31, 32, 33, 34]]) - -Here the 4th and 5th rows are selected from the indexed array and -combined to make a 2-D array. - -In general, when the boolean array has fewer dimensions than the array -being indexed, this is equivalent to y[b, ...], which means -y is indexed by b followed by as many : as are needed to fill -out the rank of y. -Thus the shape of the result is one dimension containing the number -of True elements of the boolean array, followed by the remaining -dimensions of the array being indexed. - -For example, using a 2-D boolean array of shape (2,3) -with four True elements to select rows from a 3-D array of shape -(2,3,5) results in a 2-D result of shape (4,5): :: - - >>> x = np.arange(30).reshape(2,3,5) - >>> x - array([[[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [10, 11, 12, 13, 14]], - [[15, 16, 17, 18, 19], - [20, 21, 22, 23, 24], - [25, 26, 27, 28, 29]]]) - >>> b = np.array([[True, True, False], [False, True, True]]) - >>> x[b] - array([[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [20, 21, 22, 23, 24], - [25, 26, 27, 28, 29]]) - -For further details, consult the numpy reference documentation on array indexing. - -Combining index arrays with slices -================================== - -Index arrays may be combined with slices. For example: :: - - >>> y[np.array([0,2,4]),1:3] - array([[ 1, 2], - [15, 16], - [29, 30]]) - -In effect, the slice is converted to an index array -np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array -to produce a resultant array of shape (3,2). - -Likewise, slicing can be combined with broadcasted boolean indices: :: - - >>> b = y > 20 - >>> b - array([[False, False, False, False, False, False, False], - [False, False, False, False, False, False, False], - [False, False, False, False, False, False, False], - [ True, True, True, True, True, True, True], - [ True, True, True, True, True, True, True]]) - >>> y[b[:,5],1:3] - array([[22, 23], - [29, 30]]) - -Structural indexing tools -========================= - -To facilitate easy matching of array shapes with expressions and in -assignments, the np.newaxis object can be used within array indices -to add new dimensions with a size of 1. For example: :: - - >>> y.shape - (5, 7) - >>> y[:,np.newaxis,:].shape - (5, 1, 7) - -Note that there are no new elements in the array, just that the -dimensionality is increased. This can be handy to combine two -arrays in a way that otherwise would require explicitly reshaping -operations. For example: :: - - >>> x = np.arange(5) - >>> x[:,np.newaxis] + x[np.newaxis,:] - array([[0, 1, 2, 3, 4], - [1, 2, 3, 4, 5], - [2, 3, 4, 5, 6], - [3, 4, 5, 6, 7], - [4, 5, 6, 7, 8]]) - -The ellipsis syntax maybe used to indicate selecting in full any -remaining unspecified dimensions. For example: :: - - >>> z = np.arange(81).reshape(3,3,3,3) - >>> z[1,...,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -This is equivalent to: :: - - >>> z[1,:,:,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -Assigning values to indexed arrays -================================== - -As mentioned, one can select a subset of an array to assign to using -a single index, slices, and index and mask arrays. The value being -assigned to the indexed array must be shape consistent (the same shape -or broadcastable to the shape the index produces). For example, it is -permitted to assign a constant to a slice: :: - - >>> x = np.arange(10) - >>> x[2:7] = 1 - -or an array of the right size: :: - - >>> x[2:7] = np.arange(5) - -Note that assignments may result in changes if assigning -higher types to lower types (like floats to ints) or even -exceptions (assigning complex to floats or ints): :: - - >>> x[1] = 1.2 - >>> x[1] - 1 - >>> x[1] = 1.2j - : can't convert complex to long; use - long(abs(z)) - - -Unlike some of the references (such as array and mask indices) -assignments are always made to the original data in the array -(indeed, nothing else would make sense!). Note though, that some -actions may not work as one may naively expect. This particular -example is often surprising to people: :: - - >>> x = np.arange(0, 50, 10) - >>> x - array([ 0, 10, 20, 30, 40]) - >>> x[np.array([1, 1, 3, 1])] += 1 - >>> x - array([ 0, 11, 20, 31, 40]) - -Where people expect that the 1st location will be incremented by 3. -In fact, it will only be incremented by 1. The reason is because -a new array is extracted from the original (as a temporary) containing -the values at 1, 1, 3, 1, then the value 1 is added to the temporary, -and then the temporary is assigned back to the original array. Thus -the value of the array at x[1]+1 is assigned to x[1] three times, -rather than being incremented 3 times. - -Dealing with variable numbers of indices within programs -======================================================== - -The index syntax is very powerful but limiting when dealing with -a variable number of indices. For example, if you want to write -a function that can handle arguments with various numbers of -dimensions without having to write special case code for each -number of possible dimensions, how can that be done? If one -supplies to the index a tuple, the tuple will be interpreted -as a list of indices. For example (using the previous definition -for the array z): :: - - >>> indices = (1,1,1,1) - >>> z[indices] - 40 - -So one can use code to construct tuples of any number of indices -and then use these within an index. - -Slices can be specified within programs by using the slice() function -in Python. For example: :: - - >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] - >>> z[indices] - array([39, 40]) - -Likewise, ellipsis can be specified by code by using the Ellipsis -object: :: - - >>> indices = (1, Ellipsis, 1) # same as [1,...,1] - >>> z[indices] - array([[28, 31, 34], - [37, 40, 43], - [46, 49, 52]]) - -For this reason it is possible to use the output from the np.nonzero() -function directly as an index since it always returns a tuple of index -arrays. - -Because the special treatment of tuples, they are not automatically -converted to an array as a list would be. As an example: :: - - >>> z[[1,1,1,1]] # produces a large array - array([[[[27, 28, 29], - [30, 31, 32], ... - >>> z[(1,1,1,1)] # returns a single value - 40 - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/internals.py b/venv/lib/python3.7/site-packages/numpy/doc/internals.py deleted file mode 100644 index a14fee7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/internals.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -=============== -Array Internals -=============== - -Internal organization of numpy arrays -===================================== - -It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy". - -NumPy arrays consist of two major components, the raw array data (from now on, -referred to as the data buffer), and the information about the raw array data. -The data buffer is typically what people think of as arrays in C or Fortran, -a contiguous (and fixed) block of memory containing fixed sized data items. -NumPy also contains a significant set of data that describes how to interpret -the data in the data buffer. This extra information contains (among other things): - - 1) The basic data element's size in bytes - 2) The start of the data within the data buffer (an offset relative to the - beginning of the data buffer). - 3) The number of dimensions and the size of each dimension - 4) The separation between elements for each dimension (the 'stride'). This - does not have to be a multiple of the element size - 5) The byte order of the data (which may not be the native byte order) - 6) Whether the buffer is read-only - 7) Information (via the dtype object) about the interpretation of the basic - data element. The basic data element may be as simple as a int or a float, - or it may be a compound object (e.g., struct-like), a fixed character field, - or Python object pointers. - 8) Whether the array is to interpreted as C-order or Fortran-order. - -This arrangement allow for very flexible use of arrays. One thing that it allows -is simple changes of the metadata to change the interpretation of the array buffer. -Changing the byteorder of the array is a simple change involving no rearrangement -of the data. The shape of the array can be changed very easily without changing -anything in the data buffer or any data copying at all - -Among other things that are made possible is one can create a new array metadata -object that uses the same data buffer -to create a new view of that data buffer that has a different interpretation -of the buffer (e.g., different shape, offset, byte order, strides, etc) but -shares the same data bytes. Many operations in numpy do just this such as -slices. Other operations, such as transpose, don't move data elements -around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. - -Typically these new versions of the array metadata but the same data buffer are -new 'views' into the data buffer. There is a different ndarray object, but it -uses the same data buffer. This is why it is necessary to force copies through -use of the .copy() method if one really wants to make a new and independent -copy of the data buffer. - -New views into arrays mean the object reference counts for the data buffer -increase. Simply doing away with the original array object will not remove the -data buffer if other views of it still exist. - -Multidimensional Array Indexing Order Issues -============================================ - -What is the right way to index -multi-dimensional arrays? Before you jump to conclusions about the one and -true way to index multi-dimensional arrays, it pays to understand why this is -a confusing issue. This section will try to explain in detail how numpy -indexing works and why we adopt the convention we do for images, and when it -may be appropriate to adopt other conventions. - -The first thing to understand is -that there are two conflicting conventions for indexing 2-dimensional arrays. -Matrix notation uses the first index to indicate which row is being selected and -the second index to indicate which column is selected. This is opposite the -geometrically oriented-convention for images where people generally think the -first index represents x position (i.e., column) and the second represents y -position (i.e., row). This alone is the source of much confusion; -matrix-oriented users and image-oriented users expect two different things with -regard to indexing. - -The second issue to understand is how indices correspond -to the order the array is stored in memory. In Fortran the first index is the -most rapidly varying index when moving through the elements of a two -dimensional array as it is stored in memory. If you adopt the matrix -convention for indexing, then this means the matrix is stored one column at a -time (since the first index moves to the next row as it changes). Thus Fortran -is considered a Column-major language. C has just the opposite convention. In -C, the last index changes most rapidly as one moves through the array as -stored in memory. Thus C is a Row-major language. The matrix is stored by -rows. Note that in both cases it presumes that the matrix convention for -indexing is being used, i.e., for both Fortran and C, the first index is the -row. Note this convention implies that the indexing convention is invariant -and that the data order changes to keep that so. - -But that's not the only way -to look at it. Suppose one has large two-dimensional arrays (images or -matrices) stored in data files. Suppose the data are stored by rows rather than -by columns. If we are to preserve our index convention (whether matrix or -image) that means that depending on the language we use, we may be forced to -reorder the data if it is read into memory to preserve our indexing -convention. For example if we read row-ordered data into memory without -reordering, it will match the matrix indexing convention for C, but not for -Fortran. Conversely, it will match the image indexing convention for Fortran, -but not for C. For C, if one is using data stored in row order, and one wants -to preserve the image index convention, the data must be reordered when -reading into memory. - -In the end, which you do for Fortran or C depends on -which is more important, not reordering data or preserving the indexing -convention. For large images, reordering data is potentially expensive, and -often the indexing convention is inverted to avoid that. - -The situation with -numpy makes this issue yet more complicated. The internal machinery of numpy -arrays is flexible enough to accept any ordering of indices. One can simply -reorder indices by manipulating the internal stride information for arrays -without reordering the data at all. NumPy will know how to map the new index -order to the data without moving the data. - -So if this is true, why not choose -the index order that matches what you most expect? In particular, why not define -row-ordered images to use the image convention? (This is sometimes referred -to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' -order options for array ordering in numpy.) The drawback of doing this is -potential performance penalties. It's common to access the data sequentially, -either implicitly in array operations or explicitly by looping over rows of an -image. When that is done, then the data will be accessed in non-optimal order. -As the first index is incremented, what is actually happening is that elements -spaced far apart in memory are being sequentially accessed, with usually poor -memory access speeds. For example, for a two dimensional image 'im' defined so -that im[0, 10] represents the value at x=0, y=10. To be consistent with usual -Python behavior then im[0] would represent a column at x=0. Yet that data -would be spread over the whole array since the data are stored in row order. -Despite the flexibility of numpy's indexing, it can't really paper over the fact -basic operations are rendered inefficient because of data order or that getting -contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs -im[0]), thus one can't use an idiom such as for row in im; for col in im does -work, but doesn't yield contiguous column data. - -As it turns out, numpy is -smart enough when dealing with ufuncs to determine which index is the most -rapidly varying one in memory and uses that for the innermost loop. Thus for -ufuncs there is no large intrinsic advantage to either approach in most cases. -On the other hand, use of .flat with an FORTRAN ordered array will lead to -non-optimal memory access as adjacent elements in the flattened array (iterator, -actually) are not contiguous in memory. - -Indeed, the fact is that Python -indexing on lists and other sequences naturally leads to an outside-to inside -ordering (the first index gets the largest grouping, the next the next largest, -and the last gets the smallest element). Since image data are normally stored -by rows, this corresponds to position within rows being the last item indexed. - -If you do want to use Fortran ordering realize that -there are two approaches to consider: 1) accept that the first index is just not -the most rapidly changing in memory and have all your I/O routines reorder -your data when going from memory to disk or visa versa, or use numpy's -mechanism for mapping the first index to the most rapidly varying data. We -recommend the former if possible. The disadvantage of the latter is that many -of numpy's functions will yield arrays without Fortran ordering unless you are -careful to use the 'order' keyword. Doing this would be highly inconvenient. - -Otherwise we recommend simply learning to reverse the usual order of indices -when accessing elements of an array. Granted, it goes against the grain, but -it is more in line with Python semantics and the natural order of the data. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/misc.py b/venv/lib/python3.7/site-packages/numpy/doc/misc.py deleted file mode 100644 index a76abe1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/misc.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -============= -Miscellaneous -============= - -IEEE 754 Floating Point Special Values --------------------------------------- - -Special values defined in numpy: nan, inf, - -NaNs can be used as a poor-man's mask (if you don't care what the -original value was) - -Note: cannot use equality to test NaNs. E.g.: :: - - >>> myarr = np.array([1., 0., np.nan, 3.]) - >>> np.nonzero(myarr == np.nan) - (array([], dtype=int64),) - >>> np.nan == np.nan # is always False! Use special numpy functions instead. - False - >>> myarr[myarr == np.nan] = 0. # doesn't work - >>> myarr - array([ 1., 0., NaN, 3.]) - >>> myarr[np.isnan(myarr)] = 0. # use this instead find - >>> myarr - array([ 1., 0., 0., 3.]) - -Other related special value functions: :: - - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float - -The following corresponds to the usual functions except that nans are excluded -from the results: :: - - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() - - >>> x = np.arange(10.) - >>> x[3] = np.nan - >>> x.sum() - nan - >>> np.nansum(x) - 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print("saw stupid error!") - >>> np.seterrcall(errorhandler) - - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - FloatingPointError: invalid value encountered in divide - saw stupid error! - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - - - API will change for Python 3.0! - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -3) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing sharable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data a.ctypes.get_strides - a.ctypes.data_as a.ctypes.shape - a.ctypes.get_as_parameter a.ctypes.shape_as - a.ctypes.get_data a.ctypes.strides - a.ctypes.get_shape a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -4) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -5) scipy.weave - - - Plusses: - - - can turn many numpy expressions into C code - - dynamic compiling and loading of generated C code - - can embed pure C code in Python module and have weave extract, generate - interfaces and compile, etc. - - - Minuses: - - - Future very uncertain: it's the only part of Scipy not ported to Python 3 - and is effectively deprecated in favor of Cython. - -6) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/structured_arrays.py b/venv/lib/python3.7/site-packages/numpy/doc/structured_arrays.py deleted file mode 100644 index 1343d2a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/structured_arrays.py +++ /dev/null @@ -1,647 +0,0 @@ -""" -================= -Structured Arrays -================= - -Introduction -============ - -Structured arrays are ndarrays whose datatype is a composition of simpler -datatypes organized as a sequence of named :term:`fields `. For example, -:: - - >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], - ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')]) - >>> x - array([('Rex', 9, 81.), ('Fido', 3, 27.)], - dtype=[('name', 'U10'), ('age', '>> x[1] - ('Fido', 3, 27.0) - -You can access and modify individual fields of a structured array by indexing -with the field name:: - - >>> x['age'] - array([9, 3], dtype=int32) - >>> x['age'] = 5 - >>> x - array([('Rex', 5, 81.), ('Fido', 5, 27.)], - dtype=[('name', 'U10'), ('age', '` reference page, and in -summary they are: - -1. A list of tuples, one tuple per field - - Each tuple has the form ``(fieldname, datatype, shape)`` where shape is - optional. ``fieldname`` is a string (or tuple if titles are used, see - :ref:`Field Titles ` below), ``datatype`` may be any object - convertible to a datatype, and ``shape`` is a tuple of integers specifying - subarray shape. - - >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))]) - dtype([('x', '>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')]) - dtype([('x', '` may be used in a string and separated by - commas. The itemsize and byte offsets of the fields are determined - automatically, and the field names are given the default names ``f0``, - ``f1``, etc. :: - - >>> np.dtype('i8, f4, S3') - dtype([('f0', '>> np.dtype('3int8, float32, (2, 3)float64') - dtype([('f0', 'i1', (3,)), ('f1', '>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']}) - dtype([('col1', '>> np.dtype({'names': ['col1', 'col2'], - ... 'formats': ['i4', 'f4'], - ... 'offsets': [0, 4], - ... 'itemsize': 12}) - dtype({'names':['col1','col2'], 'formats':['` below. - -4. A dictionary of field names - - The use of this form of specification is discouraged, but documented here - because older numpy code may use it. The keys of the dictionary are the - field names and the values are tuples specifying type and offset:: - - >>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)}) - dtype([('col1', 'i1'), ('col2', '` may be - specified by using a 3-tuple, see below. - -Manipulating and Displaying Structured Datatypes ------------------------------------------------- - -The list of field names of a structured datatype can be found in the ``names`` -attribute of the dtype object:: - - >>> d = np.dtype([('x', 'i8'), ('y', 'f4')]) - >>> d.names - ('x', 'y') - -The field names may be modified by assigning to the ``names`` attribute using a -sequence of strings of the same length. - -The dtype object also has a dictionary-like attribute, ``fields``, whose keys -are the field names (and :ref:`Field Titles `, see below) and whose -values are tuples containing the dtype and byte offset of each field. :: - - >>> d.fields - mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)}) - -Both the ``names`` and ``fields`` attributes will equal ``None`` for -unstructured arrays. The recommended way to test if a dtype is structured is -with `if dt.names is not None` rather than `if dt.names`, to account for dtypes -with 0 fields. - -The string representation of a structured datatype is shown in the "list of -tuples" form if possible, otherwise numpy falls back to using the more general -dictionary form. - -.. _offsets-and-alignment: - -Automatic Byte Offsets and Alignment ------------------------------------- - -Numpy uses one of two methods to automatically determine the field byte offsets -and the overall itemsize of a structured datatype, depending on whether -``align=True`` was specified as a keyword argument to :func:`numpy.dtype`. - -By default (``align=False``), numpy will pack the fields together such that -each field starts at the byte offset the previous field ended, and the fields -are contiguous in memory. :: - - >>> def print_offsets(d): - ... print("offsets:", [d.fields[name][1] for name in d.names]) - ... print("itemsize:", d.itemsize) - >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2')) - offsets: [0, 1, 2, 6, 7, 15] - itemsize: 17 - -If ``align=True`` is set, numpy will pad the structure in the same way many C -compilers would pad a C-struct. Aligned structures can give a performance -improvement in some cases, at the cost of increased datatype size. Padding -bytes are inserted between fields such that each field's byte offset will be a -multiple of that field's alignment, which is usually equal to the field's size -in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The -structure will also have trailing padding added so that its itemsize is a -multiple of the largest field's alignment. :: - - >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True)) - offsets: [0, 1, 4, 8, 16, 24] - itemsize: 32 - -Note that although almost all modern C compilers pad in this way by default, -padding in C structs is C-implementation-dependent so this memory layout is not -guaranteed to exactly match that of a corresponding struct in a C program. Some -work may be needed, either on the numpy side or the C side, to obtain exact -correspondence. - -If offsets were specified using the optional ``offsets`` key in the -dictionary-based dtype specification, setting ``align=True`` will check that -each field's offset is a multiple of its size and that the itemsize is a -multiple of the largest field size, and raise an exception if not. - -If the offsets of the fields and itemsize of a structured array satisfy the -alignment conditions, the array will have the ``ALIGNED`` :attr:`flag -` set. - -A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an -aligned dtype or array to a packed one and vice versa. It takes either a dtype -or structured ndarray as an argument, and returns a copy with fields re-packed, -with or without padding bytes. - -.. _titles: - -Field Titles ------------- - -In addition to field names, fields may also have an associated :term:`title`, -an alternate name, which is sometimes used as an additional description or -alias for the field. The title may be used to index an array, just like a -field name. - -To add titles when using the list-of-tuples form of dtype specification, the -field name may be specified as a tuple of two strings instead of a single -string, which will be the field's title and field name respectively. For -example:: - - >>> np.dtype([(('my title', 'name'), 'f4')]) - dtype([(('my title', 'name'), '>> np.dtype({'name': ('i4', 0, 'my title')}) - dtype([(('my title', 'name'), '>> for name in d.names: - ... print(d.fields[name][:2]) - (dtype('int64'), 0) - (dtype('float32'), 8) - -Union types ------------ - -Structured datatypes are implemented in numpy to have base type -:class:`numpy.void` by default, but it is possible to interpret other numpy -types as structured types using the ``(base_dtype, dtype)`` form of dtype -specification described in -:ref:`Data Type Objects `. Here, ``base_dtype`` is -the desired underlying dtype, and fields and flags will be copied from -``dtype``. This dtype is similar to a 'union' in C. - -Indexing and Assignment to Structured arrays -============================================ - -Assigning data to a Structured Array ------------------------------------- - -There are a number of ways to assign values to a structured array: Using python -tuples, using scalar values, or using other structured arrays. - -Assignment from Python Native Types (Tuples) -```````````````````````````````````````````` - -The simplest way to assign values to a structured array is using python tuples. -Each assigned value should be a tuple of length equal to the number of fields -in the array, and not a list or array as these will trigger numpy's -broadcasting rules. The tuple's elements are assigned to the successive fields -of the array, from left to right:: - - >>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8') - >>> x[1] = (7, 8, 9) - >>> x - array([(1, 2., 3.), (7, 8., 9.)], - dtype=[('f0', '>> x = np.zeros(2, dtype='i8, f4, ?, S1') - >>> x[:] = 3 - >>> x - array([(3, 3., True, b'3'), (3, 3., True, b'3')], - dtype=[('f0', '>> x[:] = np.arange(2) - >>> x - array([(0, 0., False, b'0'), (1, 1., True, b'1')], - dtype=[('f0', '>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')]) - >>> onefield = np.zeros(2, dtype=[('A', 'i4')]) - >>> nostruct = np.zeros(2, dtype='i4') - >>> nostruct[:] = twofield - Traceback (most recent call last): - ... - TypeError: Cannot cast scalar from dtype([('A', '>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')]) - >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')]) - >>> b[:] = a - >>> b - array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')], - dtype=[('x', '>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) - >>> x['foo'] - array([1, 3]) - >>> x['foo'] = 10 - >>> x - array([(10, 2.), (10, 4.)], - dtype=[('foo', '>> y = x['bar'] - >>> y[:] = 11 - >>> x - array([(10, 11.), (10, 11.)], - dtype=[('foo', '>> y.dtype, y.shape, y.strides - (dtype('float32'), (2,), (12,)) - -If the accessed field is a subarray, the dimensions of the subarray -are appended to the shape of the result:: - - >>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))]) - >>> x['a'].shape - (2, 2) - >>> x['b'].shape - (2, 2, 3, 3) - -Accessing Multiple Fields -``````````````````````````` - -One can index and assign to a structured array with a multi-field index, where -the index is a list of field names. - -.. warning:: - The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16. - -The result of indexing with a multi-field index is a view into the original -array, as follows:: - - >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')]) - >>> a[['a', 'c']] - array([(0, 0.), (0, 0.), (0, 0.)], - dtype={'names':['a','c'], 'formats':['>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16 - Traceback (most recent call last): - File "", line 1, in - ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype - - will need to be changed. This code has raised a ``FutureWarning`` since - Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7. - - In 1.16 a number of functions have been introduced in the - :mod:`numpy.lib.recfunctions` module to help users account for this - change. These are - :func:`numpy.lib.recfunctions.repack_fields`. - :func:`numpy.lib.recfunctions.structured_to_unstructured`, - :func:`numpy.lib.recfunctions.unstructured_to_structured`, - :func:`numpy.lib.recfunctions.apply_along_fields`, - :func:`numpy.lib.recfunctions.assign_fields_by_name`, and - :func:`numpy.lib.recfunctions.require_fields`. - - The function :func:`numpy.lib.recfunctions.repack_fields` can always be - used to reproduce the old behavior, as it will return a packed copy of the - structured array. The code above, for example, can be replaced with: - - >>> from numpy.lib.recfunctions import repack_fields - >>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16 - array([0, 0, 0]) - - Furthermore, numpy now provides a new function - :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer - and more efficient alternative for users who wish to convert structured - arrays to unstructured arrays, as the view above is often indeded to do. - This function allows safe conversion to an unstructured type taking into - account padding, often avoids a copy, and also casts the datatypes - as needed, unlike the view. Code such as: - - >>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) - >>> b[['x', 'z']].view('f4') - array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32) - - can be made safer by replacing with: - - >>> from numpy.lib.recfunctions import structured_to_unstructured - >>> structured_to_unstructured(b[['x', 'z']]) - array([0, 0, 0]) - - -Assignment to an array with a multi-field index modifies the original array:: - - >>> a[['a', 'c']] = (2, 3) - >>> a - array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)], - dtype=[('a', '>> a[['a', 'c']] = a[['c', 'a']] - -Indexing with an Integer to get a Structured Scalar -``````````````````````````````````````````````````` - -Indexing a single element of a structured array (with an integer index) returns -a structured scalar:: - - >>> x = np.array([(1, 2., 3.)], dtype='i, f, f') - >>> scalar = x[0] - >>> scalar - (1, 2., 3.) - >>> type(scalar) - - -Unlike other numpy scalars, structured scalars are mutable and act like views -into the original array, such that modifying the scalar will modify the -original array. Structured scalars also support access and assignment by field -name:: - - >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) - >>> s = x[0] - >>> s['bar'] = 100 - >>> x - array([(1, 100.), (3, 4.)], - dtype=[('foo', '>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0] - >>> scalar[0] - 1 - >>> scalar[1] = 4 - -Thus, tuples might be thought of as the native Python equivalent to numpy's -structured types, much like native python integers are the equivalent to -numpy's integer types. Structured scalars may be converted to a tuple by -calling :func:`ndarray.item`:: - - >>> scalar.item(), type(scalar.item()) - ((1, 4.0, 3.0), ) - -Viewing Structured Arrays Containing Objects --------------------------------------------- - -In order to prevent clobbering object pointers in fields of -:class:`numpy.object` type, numpy currently does not allow views of structured -arrays containing objects. - -Structure Comparison --------------------- - -If the dtypes of two void structured arrays are equal, testing the equality of -the arrays will result in a boolean array with the dimensions of the original -arrays, with elements set to ``True`` where all fields of the corresponding -structures are equal. Structured dtypes are equal if the field names, -dtypes and titles are the same, ignoring endianness, and the fields are in -the same order:: - - >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')]) - >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')]) - >>> a == b - array([False, False]) - -Currently, if the dtypes of two void structured arrays are not equivalent the -comparison fails, returning the scalar value ``False``. This behavior is -deprecated as of numpy 1.10 and will raise an error or perform elementwise -comparison in the future. - -The ``<`` and ``>`` operators always return ``False`` when comparing void -structured arrays, and arithmetic and bitwise operations are not supported. - -Record Arrays -============= - -As an optional convenience numpy provides an ndarray subclass, -:class:`numpy.recarray`, and associated helper functions in the -:mod:`numpy.rec` submodule, that allows access to fields of structured arrays -by attribute instead of only by index. Record arrays also use a special -datatype, :class:`numpy.record`, that allows field access by attribute on the -structured scalars obtained from the array. - -The simplest way to create a record array is with :func:`numpy.rec.array`:: - - >>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")], - ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')]) - >>> recordarr.bar - array([ 2., 3.], dtype=float32) - >>> recordarr[1:2] - rec.array([(2, 3., b'World')], - dtype=[('foo', '>> recordarr[1:2].foo - array([2], dtype=int32) - >>> recordarr.foo[1:2] - array([2], dtype=int32) - >>> recordarr[1].baz - b'World' - -:func:`numpy.rec.array` can convert a wide variety of arguments into record -arrays, including structured arrays:: - - >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")], - ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')]) - >>> recordarr = np.rec.array(arr) - -The :mod:`numpy.rec` module provides a number of other convenience functions for -creating record arrays, see :ref:`record array creation routines -`. - -A record array representation of a structured array can be obtained using the -appropriate `view `_:: - - >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")], - ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')]) - >>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)), - ... type=np.recarray) - -For convenience, viewing an ndarray as type :class:`np.recarray` will -automatically convert to :class:`np.record` datatype, so the dtype can be left -out of the view:: - - >>> recordarr = arr.view(np.recarray) - >>> recordarr.dtype - dtype((numpy.record, [('foo', '>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray) - -Record array fields accessed by index or by attribute are returned as a record -array if the field has a structured type but as a plain ndarray otherwise. :: - - >>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))], - ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])]) - >>> type(recordarr.foo) - - >>> type(recordarr.bar) - - -Note that if a field has the same name as an ndarray attribute, the ndarray -attribute takes precedence. Such fields will be inaccessible by attribute but -will still be accessible by index. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/subclassing.py b/venv/lib/python3.7/site-packages/numpy/doc/subclassing.py deleted file mode 100644 index d068532..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/subclassing.py +++ /dev/null @@ -1,753 +0,0 @@ -"""============================= -Subclassing ndarray in python -============================= - -Introduction ------------- - -Subclassing ndarray is relatively simple, but it has some complications -compared to other Python objects. On this page we explain the machinery -that allows you to subclass ndarray, and the implications for -implementing a subclass. - -ndarrays and object creation -============================ - -Subclassing ndarray is complicated by the fact that new instances of -ndarray classes can come about in three different ways. These are: - -#. Explicit constructor call - as in ``MySubClass(params)``. This is - the usual route to Python instance creation. -#. View casting - casting an existing ndarray as a given subclass -#. New from template - creating a new instance from a template - instance. Examples include returning slices from a subclassed array, - creating return types from ufuncs, and copying arrays. See - :ref:`new-from-template` for more details - -The last two are characteristics of ndarrays - in order to support -things like array slicing. The complications of subclassing ndarray are -due to the mechanisms numpy has to support these latter two routes of -instance creation. - -.. _view-casting: - -View casting ------------- - -*View casting* is the standard ndarray mechanism by which you take an -ndarray of any subclass, and return a view of the array as another -(specified) subclass: - ->>> import numpy as np ->>> # create a completely useless ndarray subclass ->>> class C(np.ndarray): pass ->>> # create a standard ndarray ->>> arr = np.zeros((3,)) ->>> # take a view of it, as our useless subclass ->>> c_arr = arr.view(C) ->>> type(c_arr) - - -.. _new-from-template: - -Creating new from template --------------------------- - -New instances of an ndarray subclass can also come about by a very -similar mechanism to :ref:`view-casting`, when numpy finds it needs to -create a new instance from a template instance. The most obvious place -this has to happen is when you are taking slices of subclassed arrays. -For example: - ->>> v = c_arr[1:] ->>> type(v) # the view is of type 'C' - ->>> v is c_arr # but it's a new instance -False - -The slice is a *view* onto the original ``c_arr`` data. So, when we -take a view from the ndarray, we return a new ndarray, of the same -class, that points to the data in the original. - -There are other points in the use of ndarrays where we need such views, -such as copying arrays (``c_arr.copy()``), creating ufunc output arrays -(see also :ref:`array-wrap`), and reducing methods (like -``c_arr.mean()``. - -Relationship of view casting and new-from-template --------------------------------------------------- - -These paths both use the same machinery. We make the distinction here, -because they result in different input to your methods. Specifically, -:ref:`view-casting` means you have created a new instance of your array -type from any potential subclass of ndarray. :ref:`new-from-template` -means you have created a new instance of your class from a pre-existing -instance, allowing you - for example - to copy across attributes that -are particular to your subclass. - -Implications for subclassing ----------------------------- - -If we subclass ndarray, we need to deal not only with explicit -construction of our array type, but also :ref:`view-casting` or -:ref:`new-from-template`. NumPy has the machinery to do this, and this -machinery that makes subclassing slightly non-standard. - -There are two aspects to the machinery that ndarray uses to support -views and new-from-template in subclasses. - -The first is the use of the ``ndarray.__new__`` method for the main work -of object initialization, rather then the more usual ``__init__`` -method. The second is the use of the ``__array_finalize__`` method to -allow subclasses to clean up after the creation of views and new -instances from templates. - -A brief Python primer on ``__new__`` and ``__init__`` -===================================================== - -``__new__`` is a standard Python method, and, if present, is called -before ``__init__`` when we create a class instance. See the `python -__new__ documentation -`_ for more detail. - -For example, consider the following Python code: - -.. testcode:: - - class C(object): - def __new__(cls, *args): - print('Cls in __new__:', cls) - print('Args in __new__:', args) - # The `object` type __new__ method takes a single argument. - return object.__new__(cls) - - def __init__(self, *args): - print('type(self) in __init__:', type(self)) - print('Args in __init__:', args) - -meaning that we get: - ->>> c = C('hello') -Cls in __new__: -Args in __new__: ('hello',) -type(self) in __init__: -Args in __init__: ('hello',) - -When we call ``C('hello')``, the ``__new__`` method gets its own class -as first argument, and the passed argument, which is the string -``'hello'``. After python calls ``__new__``, it usually (see below) -calls our ``__init__`` method, with the output of ``__new__`` as the -first argument (now a class instance), and the passed arguments -following. - -As you can see, the object can be initialized in the ``__new__`` -method or the ``__init__`` method, or both, and in fact ndarray does -not have an ``__init__`` method, because all the initialization is -done in the ``__new__`` method. - -Why use ``__new__`` rather than just the usual ``__init__``? Because -in some cases, as for ndarray, we want to be able to return an object -of some other class. Consider the following: - -.. testcode:: - - class D(C): - def __new__(cls, *args): - print('D cls is:', cls) - print('D args in __new__:', args) - return C.__new__(C, *args) - - def __init__(self, *args): - # we never get here - print('In D __init__') - -meaning that: - ->>> obj = D('hello') -D cls is: -D args in __new__: ('hello',) -Cls in __new__: -Args in __new__: ('hello',) ->>> type(obj) - - -The definition of ``C`` is the same as before, but for ``D``, the -``__new__`` method returns an instance of class ``C`` rather than -``D``. Note that the ``__init__`` method of ``D`` does not get -called. In general, when the ``__new__`` method returns an object of -class other than the class in which it is defined, the ``__init__`` -method of that class is not called. - -This is how subclasses of the ndarray class are able to return views -that preserve the class type. When taking a view, the standard -ndarray machinery creates the new ndarray object with something -like:: - - obj = ndarray.__new__(subtype, shape, ... - -where ``subdtype`` is the subclass. Thus the returned view is of the -same class as the subclass, rather than being of class ``ndarray``. - -That solves the problem of returning views of the same type, but now -we have a new problem. The machinery of ndarray can set the class -this way, in its standard methods for taking views, but the ndarray -``__new__`` method knows nothing of what we have done in our own -``__new__`` method in order to set attributes, and so on. (Aside - -why not call ``obj = subdtype.__new__(...`` then? Because we may not -have a ``__new__`` method with the same call signature). - -The role of ``__array_finalize__`` -================================== - -``__array_finalize__`` is the mechanism that numpy provides to allow -subclasses to handle the various ways that new instances get created. - -Remember that subclass instances can come about in these three ways: - -#. explicit constructor call (``obj = MySubClass(params)``). This will - call the usual sequence of ``MySubClass.__new__`` then (if it exists) - ``MySubClass.__init__``. -#. :ref:`view-casting` -#. :ref:`new-from-template` - -Our ``MySubClass.__new__`` method only gets called in the case of the -explicit constructor call, so we can't rely on ``MySubClass.__new__`` or -``MySubClass.__init__`` to deal with the view casting and -new-from-template. It turns out that ``MySubClass.__array_finalize__`` -*does* get called for all three methods of object creation, so this is -where our object creation housekeeping usually goes. - -* For the explicit constructor call, our subclass will need to create a - new ndarray instance of its own class. In practice this means that - we, the authors of the code, will need to make a call to - ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to - ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an - existing array (see below) -* For view casting and new-from-template, the equivalent of - ``ndarray.__new__(MySubClass,...`` is called, at the C level. - -The arguments that ``__array_finalize__`` receives differ for the three -methods of instance creation above. - -The following code allows us to look at the call sequences and arguments: - -.. testcode:: - - import numpy as np - - class C(np.ndarray): - def __new__(cls, *args, **kwargs): - print('In __new__ with class %s' % cls) - return super(C, cls).__new__(cls, *args, **kwargs) - - def __init__(self, *args, **kwargs): - # in practice you probably will not need or want an __init__ - # method for your subclass - print('In __init__ with class %s' % self.__class__) - - def __array_finalize__(self, obj): - print('In array_finalize:') - print(' self type is %s' % type(self)) - print(' obj type is %s' % type(obj)) - - -Now: - ->>> # Explicit constructor ->>> c = C((10,)) -In __new__ with class -In array_finalize: - self type is - obj type is -In __init__ with class ->>> # View casting ->>> a = np.arange(10) ->>> cast_a = a.view(C) -In array_finalize: - self type is - obj type is ->>> # Slicing (example of new-from-template) ->>> cv = c[:1] -In array_finalize: - self type is - obj type is - -The signature of ``__array_finalize__`` is:: - - def __array_finalize__(self, obj): - -One sees that the ``super`` call, which goes to -``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our -own class (``self``) as well as the object from which the view has been -taken (``obj``). As you can see from the output above, the ``self`` is -always a newly created instance of our subclass, and the type of ``obj`` -differs for the three instance creation methods: - -* When called from the explicit constructor, ``obj`` is ``None`` -* When called from view casting, ``obj`` can be an instance of any - subclass of ndarray, including our own. -* When called in new-from-template, ``obj`` is another instance of our - own subclass, that we might use to update the new ``self`` instance. - -Because ``__array_finalize__`` is the only method that always sees new -instances being created, it is the sensible place to fill in instance -defaults for new object attributes, among other tasks. - -This may be clearer with an example. - -Simple example - adding an extra attribute to ndarray ------------------------------------------------------ - -.. testcode:: - - import numpy as np - - class InfoArray(np.ndarray): - - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, - strides=None, order=None, info=None): - # Create the ndarray instance of our type, given the usual - # ndarray input arguments. This will call the standard - # ndarray constructor, but return an object of our type. - # It also triggers a call to InfoArray.__array_finalize__ - obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, - buffer, offset, strides, - order) - # set the new 'info' attribute to the value passed - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # ``self`` is a new object resulting from - # ndarray.__new__(InfoArray, ...), therefore it only has - # attributes that the ndarray.__new__ constructor gave it - - # i.e. those of a standard ndarray. - # - # We could have got to the ndarray.__new__ call in 3 ways: - # From an explicit constructor - e.g. InfoArray(): - # obj is None - # (we're in the middle of the InfoArray.__new__ - # constructor, and self.info will be set when we return to - # InfoArray.__new__) - if obj is None: return - # From view casting - e.g arr.view(InfoArray): - # obj is arr - # (type(obj) can be InfoArray) - # From new-from-template - e.g infoarr[:3] - # type(obj) is InfoArray - # - # Note that it is here, rather than in the __new__ method, - # that we set the default value for 'info', because this - # method sees all creation of default objects - with the - # InfoArray.__new__ constructor, but also with - # arr.view(InfoArray). - self.info = getattr(obj, 'info', None) - # We do not need to return anything - - -Using the object looks like this: - - >>> obj = InfoArray(shape=(3,)) # explicit constructor - >>> type(obj) - - >>> obj.info is None - True - >>> obj = InfoArray(shape=(3,), info='information') - >>> obj.info - 'information' - >>> v = obj[1:] # new-from-template - here - slicing - >>> type(v) - - >>> v.info - 'information' - >>> arr = np.arange(10) - >>> cast_arr = arr.view(InfoArray) # view casting - >>> type(cast_arr) - - >>> cast_arr.info is None - True - -This class isn't very useful, because it has the same constructor as the -bare ndarray object, including passing in buffers and shapes and so on. -We would probably prefer the constructor to be able to take an already -formed ndarray from the usual numpy calls to ``np.array`` and return an -object. - -Slightly more realistic example - attribute added to existing array -------------------------------------------------------------------- - -Here is a class that takes a standard ndarray that already exists, casts -as our type, and adds an extra attribute. - -.. testcode:: - - import numpy as np - - class RealisticInfoArray(np.ndarray): - - def __new__(cls, input_array, info=None): - # Input array is an already formed ndarray instance - # We first cast to be our class type - obj = np.asarray(input_array).view(cls) - # add the new attribute to the created instance - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # see InfoArray.__array_finalize__ for comments - if obj is None: return - self.info = getattr(obj, 'info', None) - - -So: - - >>> arr = np.arange(5) - >>> obj = RealisticInfoArray(arr, info='information') - >>> type(obj) - - >>> obj.info - 'information' - >>> v = obj[1:] - >>> type(v) - - >>> v.info - 'information' - -.. _array-ufunc: - -``__array_ufunc__`` for ufuncs ------------------------------- - - .. versionadded:: 1.13 - -A subclass can override what happens when executing numpy ufuncs on it by -overriding the default ``ndarray.__array_ufunc__`` method. This method is -executed *instead* of the ufunc and should return either the result of the -operation, or :obj:`NotImplemented` if the operation requested is not -implemented. - -The signature of ``__array_ufunc__`` is:: - - def __array_ufunc__(ufunc, method, *inputs, **kwargs): - - - *ufunc* is the ufunc object that was called. - - *method* is a string indicating how the Ufunc was called, either - ``"__call__"`` to indicate it was called directly, or one of its - :ref:`methods`: ``"reduce"``, ``"accumulate"``, - ``"reduceat"``, ``"outer"``, or ``"at"``. - - *inputs* is a tuple of the input arguments to the ``ufunc`` - - *kwargs* contains any optional or keyword arguments passed to the - function. This includes any ``out`` arguments, which are always - contained in a tuple. - -A typical implementation would convert any inputs or outputs that are -instances of one's own class, pass everything on to a superclass using -``super()``, and finally return the results after possible -back-conversion. An example, taken from the test case -``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the -following. - -.. testcode:: - - input numpy as np - - class A(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - args = [] - in_no = [] - for i, input_ in enumerate(inputs): - if isinstance(input_, A): - in_no.append(i) - args.append(input_.view(np.ndarray)) - else: - args.append(input_) - - outputs = kwargs.pop('out', None) - out_no = [] - if outputs: - out_args = [] - for j, output in enumerate(outputs): - if isinstance(output, A): - out_no.append(j) - out_args.append(output.view(np.ndarray)) - else: - out_args.append(output) - kwargs['out'] = tuple(out_args) - else: - outputs = (None,) * ufunc.nout - - info = {} - if in_no: - info['inputs'] = in_no - if out_no: - info['outputs'] = out_no - - results = super(A, self).__array_ufunc__(ufunc, method, - *args, **kwargs) - if results is NotImplemented: - return NotImplemented - - if method == 'at': - if isinstance(inputs[0], A): - inputs[0].info = info - return - - if ufunc.nout == 1: - results = (results,) - - results = tuple((np.asarray(result).view(A) - if output is None else output) - for result, output in zip(results, outputs)) - if results and isinstance(results[0], A): - results[0].info = info - - return results[0] if len(results) == 1 else results - -So, this class does not actually do anything interesting: it just -converts any instances of its own to regular ndarray (otherwise, we'd -get infinite recursion!), and adds an ``info`` dictionary that tells -which inputs and outputs it converted. Hence, e.g., - ->>> a = np.arange(5.).view(A) ->>> b = np.sin(a) ->>> b.info -{'inputs': [0]} ->>> b = np.sin(np.arange(5.), out=(a,)) ->>> b.info -{'outputs': [0]} ->>> a = np.arange(5.).view(A) ->>> b = np.ones(1).view(A) ->>> c = a + b ->>> c.info -{'inputs': [0, 1]} ->>> a += b ->>> a.info -{'inputs': [0, 1], 'outputs': [0]} - -Note that another approach would be to to use ``getattr(ufunc, -methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, -the result would be identical, but there is a difference if another operand -also defines ``__array_ufunc__``. E.g., lets assume that we evalulate -``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has -an override. If you use ``super`` as in the example, -``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which -means it cannot evaluate the result itself. Thus, it will return -`NotImplemented` and so will our class ``A``. Then, control will be passed -over to ``b``, which either knows how to deal with us and produces a result, -or does not and returns `NotImplemented`, raising a ``TypeError``. - -If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we -effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__`` -will be called, but now it sees an ``ndarray`` as the other argument. Likely, -it will know how to handle this, and return a new instance of the ``B`` class -to us. Our example class is not set up to handle this, but it might well be -the best approach if, e.g., one were to re-implement ``MaskedArray`` using -``__array_ufunc__``. - -As a final note: if the ``super`` route is suited to a given class, an -advantage of using it is that it helps in constructing class hierarchies. -E.g., suppose that our other class ``B`` also used the ``super`` in its -``__array_ufunc__`` implementation, and we created a class ``C`` that depended -on both, i.e., ``class C(A, B)`` (with, for simplicity, not another -``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would -pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to -``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to -``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate. - -.. _array-wrap: - -``__array_wrap__`` for ufuncs and other functions -------------------------------------------------- - -Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using -``__array_wrap__`` and ``__array_prepare__``. These two allowed one to -change the output type of a ufunc, but, in contrast to -``__array_ufunc__``, did not allow one to make any changes to the inputs. -It is hoped to eventually deprecate these, but ``__array_wrap__`` is also -used by other numpy functions and methods, such as ``squeeze``, so at the -present time is still needed for full functionality. - -Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of -allowing a subclass to set the type of the return value and update -attributes and metadata. Let's show how this works with an example. First -we return to the simpler example subclass, but with a different name and -some print statements: - -.. testcode:: - - import numpy as np - - class MySubClass(np.ndarray): - - def __new__(cls, input_array, info=None): - obj = np.asarray(input_array).view(cls) - obj.info = info - return obj - - def __array_finalize__(self, obj): - print('In __array_finalize__:') - print(' self is %s' % repr(self)) - print(' obj is %s' % repr(obj)) - if obj is None: return - self.info = getattr(obj, 'info', None) - - def __array_wrap__(self, out_arr, context=None): - print('In __array_wrap__:') - print(' self is %s' % repr(self)) - print(' arr is %s' % repr(out_arr)) - # then just call the parent - return super(MySubClass, self).__array_wrap__(self, out_arr, context) - -We run a ufunc on an instance of our new array: - ->>> obj = MySubClass(np.arange(5), info='spam') -In __array_finalize__: - self is MySubClass([0, 1, 2, 3, 4]) - obj is array([0, 1, 2, 3, 4]) ->>> arr2 = np.arange(5)+1 ->>> ret = np.add(arr2, obj) -In __array_wrap__: - self is MySubClass([0, 1, 2, 3, 4]) - arr is array([1, 3, 5, 7, 9]) -In __array_finalize__: - self is MySubClass([1, 3, 5, 7, 9]) - obj is MySubClass([0, 1, 2, 3, 4]) ->>> ret -MySubClass([1, 3, 5, 7, 9]) ->>> ret.info -'spam' - -Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method -with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result -of the addition. In turn, the default ``__array_wrap__`` -(``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, -and called ``__array_finalize__`` - hence the copying of the ``info`` -attribute. This has all happened at the C level. - -But, we could do anything we wanted: - -.. testcode:: - - class SillySubClass(np.ndarray): - - def __array_wrap__(self, arr, context=None): - return 'I lost your data' - ->>> arr1 = np.arange(5) ->>> obj = arr1.view(SillySubClass) ->>> arr2 = np.arange(5) ->>> ret = np.multiply(obj, arr2) ->>> ret -'I lost your data' - -So, by defining a specific ``__array_wrap__`` method for our subclass, -we can tweak the output from ufuncs. The ``__array_wrap__`` method -requires ``self``, then an argument - which is the result of the ufunc - -and an optional parameter *context*. This parameter is returned by -ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, -domain of the ufunc), but is not set by other numpy functions. Though, -as seen above, it is possible to do otherwise, ``__array_wrap__`` should -return an instance of its containing class. See the masked array -subclass for an implementation. - -In addition to ``__array_wrap__``, which is called on the way out of the -ufunc, there is also an ``__array_prepare__`` method which is called on -the way into the ufunc, after the output arrays are created but before any -computation has been performed. The default implementation does nothing -but pass through the array. ``__array_prepare__`` should not attempt to -access the array data or resize the array, it is intended for setting the -output array type, updating attributes and metadata, and performing any -checks based on the input that may be desired before computation begins. -Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or -subclass thereof or raise an error. - -Extra gotchas - custom ``__del__`` methods and ndarray.base ------------------------------------------------------------ - -One of the problems that ndarray solves is keeping track of memory -ownership of ndarrays and their views. Consider the case where we have -created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. -The two objects are looking at the same memory. NumPy keeps track of -where the data came from for a particular array or view, with the -``base`` attribute: - ->>> # A normal ndarray, that owns its own data ->>> arr = np.zeros((4,)) ->>> # In this case, base is None ->>> arr.base is None -True ->>> # We take a view ->>> v1 = arr[1:] ->>> # base now points to the array that it derived from ->>> v1.base is arr -True ->>> # Take a view of a view ->>> v2 = v1[1:] ->>> # base points to the view it derived from ->>> v2.base is v1 -True - -In general, if the array owns its own memory, as for ``arr`` in this -case, then ``arr.base`` will be None - there are some exceptions to this -- see the numpy book for more details. - -The ``base`` attribute is useful in being able to tell whether we have -a view or the original array. This in turn can be useful if we need -to know whether or not to do some specific cleanup when the subclassed -array is deleted. For example, we may only want to do the cleanup if -the original array is deleted, but not the views. For an example of -how this can work, have a look at the ``memmap`` class in -``numpy.core``. - -Subclassing and Downstream Compatibility ----------------------------------------- - -When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray`` -interface, it is your responsibility to decide how aligned your APIs will be -with those of numpy. For convenience, many numpy functions that have a corresponding -``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking -if the first argument to a function has a method of the same name. If it exists, the -method is called instead of coercing the arguments to a numpy array. - -For example, if you want your sub-class or duck-type to be compatible with -numpy's ``sum`` function, the method signature for this object's ``sum`` method -should be the following: - -.. testcode:: - - def sum(self, axis=None, dtype=None, out=None, keepdims=False): - ... - -This is the exact same method signature for ``np.sum``, so now if a user calls -``np.sum`` on this object, numpy will call the object's own ``sum`` method and -pass in these arguments enumerated above in the signature, and no errors will -be raised because the signatures are completely compatible with each other. - -If, however, you decide to deviate from this signature and do something like this: - -.. testcode:: - - def sum(self, axis=None, dtype=None): - ... - -This object is no longer compatible with ``np.sum`` because if you call ``np.sum``, -it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError -to be raised. - -If you wish to maintain compatibility with numpy and its subsequent versions (which -might add new keyword arguments) but do not want to surface all of numpy's arguments, -your function's signature should accept ``**kwargs``. For example: - -.. testcode:: - - def sum(self, axis=None, dtype=None, **unused_kwargs): - ... - -This object is now compatible with ``np.sum`` again because any extraneous arguments -(i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the -``**unused_kwargs`` parameter. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/ufuncs.py b/venv/lib/python3.7/site-packages/numpy/doc/ufuncs.py deleted file mode 100644 index df2c455..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/ufuncs.py +++ /dev/null @@ -1,138 +0,0 @@ -""" -=================== -Universal Functions -=================== - -Ufuncs are, generally speaking, mathematical functions or operations that are -applied element-by-element to the contents of an array. That is, the result -in each output array element only depends on the value in the corresponding -input array (or arrays) and on no other array elements. NumPy comes with a -large suite of ufuncs, and scipy extends that suite substantially. The simplest -example is the addition operator: :: - - >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) - array([1, 3, 2, 6]) - -The ufunc module lists all the available ufuncs in numpy. Documentation on -the specific ufuncs may be found in those modules. This documentation is -intended to address the more general aspects of ufuncs common to most of -them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) -have equivalent functions defined (e.g. add() for +) - -Type coercion -============= - -What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of -two different types? What is the type of the result? Typically, the result is -the higher of the two types. For example: :: - - float32 + float64 -> float64 - int8 + int32 -> int32 - int16 + float32 -> float32 - float32 + complex64 -> complex64 - -There are some less obvious cases generally involving mixes of types -(e.g. uints, ints and floats) where equal bit sizes for each are not -capable of saving all the information in a different type of equivalent -bit size. Some examples are int32 vs float32 or uint32 vs int32. -Generally, the result is the higher type of larger size than both -(if available). So: :: - - int32 + float32 -> float64 - uint32 + int32 -> int64 - -Finally, the type coercion behavior when expressions involve Python -scalars is different than that seen for arrays. Since Python has a -limited number of types, combining a Python int with a dtype=np.int8 -array does not coerce to the higher type but instead, the type of the -array prevails. So the rules for Python scalars combined with arrays is -that the result will be that of the array equivalent the Python scalar -if the Python scalar is of a higher 'kind' than the array (e.g., float -vs. int), otherwise the resultant type will be that of the array. -For example: :: - - Python int + int8 -> int8 - Python float + int8 -> float64 - -ufunc methods -============= - -Binary ufuncs support 4 methods. - -**.reduce(arr)** applies the binary operator to elements of the array in - sequence. For example: :: - - >>> np.add.reduce(np.arange(10)) # adds all elements of array - 45 - -For multidimensional arrays, the first dimension is reduced by default: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5)) - array([ 5, 7, 9, 11, 13]) - -The axis keyword can be used to specify different axes to reduce: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) - array([10, 35]) - -**.accumulate(arr)** applies the binary operator and generates an an -equivalently shaped array that includes the accumulated amount for each -element of the array. A couple examples: :: - - >>> np.add.accumulate(np.arange(10)) - array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) - >>> np.multiply.accumulate(np.arange(1,9)) - array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) - -The behavior for multidimensional arrays is the same as for .reduce(), -as is the use of the axis keyword). - -**.reduceat(arr,indices)** allows one to apply reduce to selected parts - of an array. It is a difficult method to understand. See the documentation - at: - -**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and - arr2. It will work on multidimensional arrays (the shape of the result is - the concatenation of the two input shapes.: :: - - >>> np.multiply.outer(np.arange(3),np.arange(4)) - array([[0, 0, 0, 0], - [0, 1, 2, 3], - [0, 2, 4, 6]]) - -Output arguments -================ - -All ufuncs accept an optional output array. The array must be of the expected -output shape. Beware that if the type of the output array is of a different -(and lower) type than the output result, the results may be silently truncated -or otherwise corrupted in the downcast to the lower type. This usage is useful -when one wants to avoid creating large temporary arrays and instead allows one -to reuse the same array memory repeatedly (at the expense of not being able to -use more convenient operator notation in expressions). Note that when the -output argument is used, the ufunc still returns a reference to the result. - - >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) - array([0, 2]) - >>> x - array([0, 2]) - -and & or as ufuncs -================== - -Invariably people try to use the python 'and' and 'or' as logical operators -(and quite understandably). But these operators do not behave as normal -operators since Python treats these quite differently. They cannot be -overloaded with array equivalents. Thus using 'and' or 'or' with an array -results in an error. There are two alternatives: - - 1) use the ufunc functions logical_and() and logical_or(). - 2) use the bitwise operators & and \\|. The drawback of these is that if - the arguments to these operators are not boolean arrays, the result is - likely incorrect. On the other hand, most usages of logical_and and - logical_or are with boolean arrays. As long as one is careful, this is - a convenient way to apply these operators. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/dual.py b/venv/lib/python3.7/site-packages/numpy/dual.py deleted file mode 100644 index 651e845..0000000 --- a/venv/lib/python3.7/site-packages/numpy/dual.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Aliases for functions which may be accelerated by Scipy. - -Scipy_ can be built to use accelerated or otherwise improved libraries -for FFTs, linear algebra, and special functions. This module allows -developers to transparently support these accelerated functions when -scipy is available but still support users who have only installed -NumPy. - -.. _Scipy : https://www.scipy.org - -""" -from __future__ import division, absolute_import, print_function - -# This module should be used for functions both in numpy and scipy if -# you want to use the numpy version if available but the scipy version -# otherwise. -# Usage --- from numpy.dual import fft, inv - -__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', - 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', - 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] - -import numpy.linalg as linpkg -import numpy.fft as fftpkg -from numpy.lib import i0 -import sys - - -fft = fftpkg.fft -ifft = fftpkg.ifft -fftn = fftpkg.fftn -ifftn = fftpkg.ifftn -fft2 = fftpkg.fft2 -ifft2 = fftpkg.ifft2 - -norm = linpkg.norm -inv = linpkg.inv -svd = linpkg.svd -solve = linpkg.solve -det = linpkg.det -eig = linpkg.eig -eigvals = linpkg.eigvals -eigh = linpkg.eigh -eigvalsh = linpkg.eigvalsh -lstsq = linpkg.lstsq -pinv = linpkg.pinv -cholesky = linpkg.cholesky - -_restore_dict = {} - -def register_func(name, func): - if name not in __all__: - raise ValueError("{} not a dual function.".format(name)) - f = sys._getframe(0).f_globals - _restore_dict[name] = f[name] - f[name] = func - -def restore_func(name): - if name not in __all__: - raise ValueError("{} not a dual function.".format(name)) - try: - val = _restore_dict[name] - except KeyError: - return - else: - sys._getframe(0).f_globals[name] = val - -def restore_all(): - for name in _restore_dict.keys(): - restore_func(name) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/__init__.py b/venv/lib/python3.7/site-packages/numpy/f2py/__init__.py deleted file mode 100644 index 42e3632..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/__init__.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -"""Fortran to Python Interface Generator. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['run_main', 'compile', 'f2py_testing'] - -import sys -import subprocess -import os - -import numpy as np - -from . import f2py2e -from . import f2py_testing -from . import diagnose - -run_main = f2py2e.run_main -main = f2py2e.main - - -def compile(source, - modulename='untitled', - extra_args='', - verbose=True, - source_fn=None, - extension='.f' - ): - """ - Build extension module from a Fortran 77 source string with f2py. - - Parameters - ---------- - source : str or bytes - Fortran source of module / subroutine to compile - - .. versionchanged:: 1.16.0 - Accept str as well as bytes - - modulename : str, optional - The name of the compiled python module - extra_args : str or list, optional - Additional parameters passed to f2py - - .. versionchanged:: 1.16.0 - A list of args may also be provided. - - verbose : bool, optional - Print f2py output to screen - source_fn : str, optional - Name of the file where the fortran source is written. - The default is to use a temporary file with the extension - provided by the `extension` parameter - extension : {'.f', '.f90'}, optional - Filename extension if `source_fn` is not provided. - The extension tells which fortran standard is used. - The default is `.f`, which implies F77 standard. - - .. versionadded:: 1.11.0 - - Returns - ------- - result : int - 0 on success - - Examples - -------- - .. include:: compile_session.dat - :literal: - - """ - import tempfile - import shlex - - if source_fn is None: - f, fname = tempfile.mkstemp(suffix=extension) - # f is a file descriptor so need to close it - # carefully -- not with .close() directly - os.close(f) - else: - fname = source_fn - - if not isinstance(source, str): - source = str(source, 'utf-8') - try: - with open(fname, 'w') as f: - f.write(source) - - args = ['-c', '-m', modulename, f.name] - - if isinstance(extra_args, np.compat.basestring): - is_posix = (os.name == 'posix') - extra_args = shlex.split(extra_args, posix=is_posix) - - args.extend(extra_args) - - c = [sys.executable, - '-c', - 'import numpy.f2py as f2py2e;f2py2e.main()'] + args - try: - output = subprocess.check_output(c) - except subprocess.CalledProcessError as exc: - status = exc.returncode - output = '' - except OSError: - # preserve historic status code used by exec_command() - status = 127 - output = '' - else: - status = 0 - output = output.decode() - if verbose: - print(output) - finally: - if source_fn is None: - os.remove(fname) - return status - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/__main__.py b/venv/lib/python3.7/site-packages/numpy/f2py/__main__.py deleted file mode 100644 index 708f7f3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/__main__.py +++ /dev/null @@ -1,6 +0,0 @@ -# See http://cens.ioc.ee/projects/f2py2e/ -from __future__ import division, print_function - -from numpy.f2py.f2py2e import main - -main() diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/__version__.py b/venv/lib/python3.7/site-packages/numpy/f2py/__version__.py deleted file mode 100644 index 49a2199..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/__version__.py +++ /dev/null @@ -1,10 +0,0 @@ -from __future__ import division, absolute_import, print_function - -major = 2 - -try: - from __svn_version__ import version - version_info = (major, version) - version = '%s_%s' % version_info -except (ImportError, ValueError): - version = str(major) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/auxfuncs.py b/venv/lib/python3.7/site-packages/numpy/f2py/auxfuncs.py deleted file mode 100644 index 404bdbd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/auxfuncs.py +++ /dev/null @@ -1,854 +0,0 @@ -#!/usr/bin/env python -""" - -Auxiliary functions for f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) LICENSE. - - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/24 19:01:55 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import pprint -import sys -import types -from functools import reduce - -from . import __version__ -from . import cfuncs - -__all__ = [ - 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', - 'getargs2', 'getcallprotoargument', 'getcallstatement', - 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', - 'getusercode1', 'hasbody', 'hascallstatement', 'hascommon', - 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', - 'isallocatable', 'isarray', 'isarrayofstrings', 'iscomplex', - 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', - 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', - 'isfunction_wrap', 'isint1array', 'isinteger', 'isintent_aux', - 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', - 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', - 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', - 'islogicalfunction', 'islong_complex', 'islong_double', - 'islong_doublefunction', 'islong_long', 'islong_longfunction', - 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', - 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', - 'isstringarray', 'isstringfunction', 'issubroutine', - 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', - 'isunsigned_chararray', 'isunsigned_long_long', - 'isunsigned_long_longarray', 'isunsigned_short', - 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', -] - - -f2py_version = __version__.version - - -errmess = sys.stderr.write -show = pprint.pprint - -options = {} -debugoptions = [] -wrapfuncs = 1 - - -def outmess(t): - if options.get('verbose', 1): - sys.stdout.write(t) - - -def debugcapi(var): - return 'capi' in debugoptions - - -def _isstring(var): - return 'typespec' in var and var['typespec'] == 'character' and \ - not isexternal(var) - - -def isstring(var): - return _isstring(var) and not isarray(var) - - -def ischaracter(var): - return isstring(var) and 'charselector' not in var - - -def isstringarray(var): - return isarray(var) and _isstring(var) - - -def isarrayofstrings(var): - # leaving out '*' for now so that `character*(*) a(m)` and `character - # a(m,*)` are treated differently. Luckily `character**` is illegal. - return isstringarray(var) and var['dimension'][-1] == '(*)' - - -def isarray(var): - return 'dimension' in var and not isexternal(var) - - -def isscalar(var): - return not (isarray(var) or isstring(var) or isexternal(var)) - - -def iscomplex(var): - return isscalar(var) and \ - var.get('typespec') in ['complex', 'double complex'] - - -def islogical(var): - return isscalar(var) and var.get('typespec') == 'logical' - - -def isinteger(var): - return isscalar(var) and var.get('typespec') == 'integer' - - -def isreal(var): - return isscalar(var) and var.get('typespec') == 'real' - - -def get_kind(var): - try: - return var['kindselector']['*'] - except KeyError: - try: - return var['kindselector']['kind'] - except KeyError: - pass - - -def islong_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') not in ['integer', 'logical']: - return 0 - return get_kind(var) == '8' - - -def isunsigned_char(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-1' - - -def isunsigned_short(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-2' - - -def isunsigned(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-4' - - -def isunsigned_long_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-8' - - -def isdouble(var): - if not isscalar(var): - return 0 - if not var.get('typespec') == 'real': - return 0 - return get_kind(var) == '8' - - -def islong_double(var): - if not isscalar(var): - return 0 - if not var.get('typespec') == 'real': - return 0 - return get_kind(var) == '16' - - -def islong_complex(var): - if not iscomplex(var): - return 0 - return get_kind(var) == '32' - - -def iscomplexarray(var): - return isarray(var) and \ - var.get('typespec') in ['complex', 'double complex'] - - -def isint1array(var): - return isarray(var) and var.get('typespec') == 'integer' \ - and get_kind(var) == '1' - - -def isunsigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-1' - - -def isunsigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-2' - - -def isunsignedarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-4' - - -def isunsigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-8' - - -def issigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '1' - - -def issigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '2' - - -def issigned_array(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '4' - - -def issigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '8' - - -def isallocatable(var): - return 'attrspec' in var and 'allocatable' in var['attrspec'] - - -def ismutable(var): - return not ('dimension' not in var or isstring(var)) - - -def ismoduleroutine(rout): - return 'modulename' in rout - - -def ismodule(rout): - return 'block' in rout and 'module' == rout['block'] - - -def isfunction(rout): - return 'block' in rout and 'function' == rout['block'] - -def isfunction_wrap(rout): - if isintent_c(rout): - return 0 - return wrapfuncs and isfunction(rout) and (not isexternal(rout)) - - -def issubroutine(rout): - return 'block' in rout and 'subroutine' == rout['block'] - - -def issubroutine_wrap(rout): - if isintent_c(rout): - return 0 - return issubroutine(rout) and hasassumedshape(rout) - - -def hasassumedshape(rout): - if rout.get('hasassumedshape'): - return True - for a in rout['args']: - for d in rout['vars'].get(a, {}).get('dimension', []): - if d == ':': - rout['hasassumedshape'] = True - return True - return False - - -def isroutine(rout): - return isfunction(rout) or issubroutine(rout) - - -def islogicalfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return islogical(rout['vars'][a]) - return 0 - - -def islong_longfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return islong_long(rout['vars'][a]) - return 0 - - -def islong_doublefunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return islong_double(rout['vars'][a]) - return 0 - - -def iscomplexfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return iscomplex(rout['vars'][a]) - return 0 - - -def iscomplexfunction_warn(rout): - if iscomplexfunction(rout): - outmess("""\ - ************************************************************** - Warning: code with a function returning complex value - may not work correctly with your Fortran compiler. - Run the following test before using it in your applications: - $(f2py install dir)/test-site/{b/runme_scalar,e/runme} - When using GNU gcc/g77 compilers, codes should work correctly. - **************************************************************\n""") - return 1 - return 0 - - -def isstringfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return isstring(rout['vars'][a]) - return 0 - - -def hasexternals(rout): - return 'externals' in rout and rout['externals'] - - -def isthreadsafe(rout): - return 'f2pyenhancements' in rout and \ - 'threadsafe' in rout['f2pyenhancements'] - - -def hasvariables(rout): - return 'vars' in rout and rout['vars'] - - -def isoptional(var): - return ('attrspec' in var and 'optional' in var['attrspec'] and - 'required' not in var['attrspec']) and isintent_nothide(var) - - -def isexternal(var): - return 'attrspec' in var and 'external' in var['attrspec'] - - -def isrequired(var): - return not isoptional(var) and isintent_nothide(var) - - -def isintent_in(var): - if 'intent' not in var: - return 1 - if 'hide' in var['intent']: - return 0 - if 'inplace' in var['intent']: - return 0 - if 'in' in var['intent']: - return 1 - if 'out' in var['intent']: - return 0 - if 'inout' in var['intent']: - return 0 - if 'outin' in var['intent']: - return 0 - return 1 - - -def isintent_inout(var): - return ('intent' in var and ('inout' in var['intent'] or - 'outin' in var['intent']) and 'in' not in var['intent'] and - 'hide' not in var['intent'] and 'inplace' not in var['intent']) - - -def isintent_out(var): - return 'out' in var.get('intent', []) - - -def isintent_hide(var): - return ('intent' in var and ('hide' in var['intent'] or - ('out' in var['intent'] and 'in' not in var['intent'] and - (not l_or(isintent_inout, isintent_inplace)(var))))) - -def isintent_nothide(var): - return not isintent_hide(var) - - -def isintent_c(var): - return 'c' in var.get('intent', []) - - -def isintent_cache(var): - return 'cache' in var.get('intent', []) - - -def isintent_copy(var): - return 'copy' in var.get('intent', []) - - -def isintent_overwrite(var): - return 'overwrite' in var.get('intent', []) - - -def isintent_callback(var): - return 'callback' in var.get('intent', []) - - -def isintent_inplace(var): - return 'inplace' in var.get('intent', []) - - -def isintent_aux(var): - return 'aux' in var.get('intent', []) - - -def isintent_aligned4(var): - return 'aligned4' in var.get('intent', []) - - -def isintent_aligned8(var): - return 'aligned8' in var.get('intent', []) - - -def isintent_aligned16(var): - return 'aligned16' in var.get('intent', []) - -isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', - isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', - isintent_cache: 'INTENT_CACHE', - isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', - isintent_inplace: 'INTENT_INPLACE', - isintent_aligned4: 'INTENT_ALIGNED4', - isintent_aligned8: 'INTENT_ALIGNED8', - isintent_aligned16: 'INTENT_ALIGNED16', - } - - -def isprivate(var): - return 'attrspec' in var and 'private' in var['attrspec'] - - -def hasinitvalue(var): - return '=' in var - - -def hasinitvalueasstring(var): - if not hasinitvalue(var): - return 0 - return var['='][0] in ['"', "'"] - - -def hasnote(var): - return 'note' in var - - -def hasresultnote(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return hasnote(rout['vars'][a]) - return 0 - - -def hascommon(rout): - return 'common' in rout - - -def containscommon(rout): - if hascommon(rout): - return 1 - if hasbody(rout): - for b in rout['body']: - if containscommon(b): - return 1 - return 0 - - -def containsmodule(block): - if ismodule(block): - return 1 - if not hasbody(block): - return 0 - for b in block['body']: - if containsmodule(b): - return 1 - return 0 - - -def hasbody(rout): - return 'body' in rout - - -def hascallstatement(rout): - return getcallstatement(rout) is not None - - -def istrue(var): - return 1 - - -def isfalse(var): - return 0 - - -class F2PYError(Exception): - pass - - -class throw_error(object): - - def __init__(self, mess): - self.mess = mess - - def __call__(self, var): - mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) - raise F2PYError(mess) - - -def l_and(*f): - l, l2 = 'lambda v', [] - for i in range(len(f)): - l = '%s,f%d=f[%d]' % (l, i, i) - l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l, ' and '.join(l2))) - - -def l_or(*f): - l, l2 = 'lambda v', [] - for i in range(len(f)): - l = '%s,f%d=f[%d]' % (l, i, i) - l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l, ' or '.join(l2))) - - -def l_not(f): - return eval('lambda v,f=f:not f(v)') - - -def isdummyroutine(rout): - try: - return rout['f2pyenhancements']['fortranname'] == '' - except KeyError: - return 0 - - -def getfortranname(rout): - try: - name = rout['f2pyenhancements']['fortranname'] - if name == '': - raise KeyError - if not name: - errmess('Failed to use fortranname from %s\n' % - (rout['f2pyenhancements'])) - raise KeyError - except KeyError: - name = rout['name'] - return name - - -def getmultilineblock(rout, blockname, comment=1, counter=0): - try: - r = rout['f2pyenhancements'].get(blockname) - except KeyError: - return - if not r: - return - if counter > 0 and isinstance(r, str): - return - if isinstance(r, list): - if counter >= len(r): - return - r = r[counter] - if r[:3] == "'''": - if comment: - r = '\t/* start ' + blockname + \ - ' multiline (' + repr(counter) + ') */\n' + r[3:] - else: - r = r[3:] - if r[-3:] == "'''": - if comment: - r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' - else: - r = r[:-3] - else: - errmess("%s multiline block should end with `'''`: %s\n" - % (blockname, repr(r))) - return r - - -def getcallstatement(rout): - return getmultilineblock(rout, 'callstatement') - - -def getcallprotoargument(rout, cb_map={}): - r = getmultilineblock(rout, 'callprotoargument', comment=0) - if r: - return r - if hascallstatement(rout): - outmess( - 'warning: callstatement is defined without callprotoargument\n') - return - from .capi_maps import getctype - arg_types, arg_types2 = [], [] - if l_and(isstringfunction, l_not(isfunction_wrap))(rout): - arg_types.extend(['char*', 'size_t']) - for n in rout['args']: - var = rout['vars'][n] - if isintent_callback(var): - continue - if n in cb_map: - ctype = cb_map[n] + '_typedef' - else: - ctype = getctype(var) - if l_and(isintent_c, l_or(isscalar, iscomplex))(var): - pass - elif isstring(var): - pass - else: - ctype = ctype + '*' - if isstring(var) or isarrayofstrings(var): - arg_types2.append('size_t') - arg_types.append(ctype) - - proto_args = ','.join(arg_types + arg_types2) - if not proto_args: - proto_args = 'void' - return proto_args - - -def getusercode(rout): - return getmultilineblock(rout, 'usercode') - - -def getusercode1(rout): - return getmultilineblock(rout, 'usercode', counter=1) - - -def getpymethoddef(rout): - return getmultilineblock(rout, 'pymethoddef') - - -def getargs(rout): - sortargs, args = [], [] - if 'args' in rout: - args = rout['args'] - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: - sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: - sortargs = rout['args'] - return args, sortargs - - -def getargs2(rout): - sortargs, args = [], rout.get('args', []) - auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) - and a not in args] - args = auxvars + args - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: - sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: - sortargs = auxvars + rout['args'] - return args, sortargs - - -def getrestdoc(rout): - if 'f2pymultilines' not in rout: - return None - k = None - if rout['block'] == 'python module': - k = rout['block'], rout['name'] - return rout['f2pymultilines'].get(k, None) - - -def gentitle(name): - l = (80 - len(name) - 6) // 2 - return '/*%s %s %s*/' % (l * '*', name, l * '*') - - -def flatlist(l): - if isinstance(l, list): - return reduce(lambda x, y, f=flatlist: x + f(y), l, []) - return [l] - - -def stripcomma(s): - if s and s[-1] == ',': - return s[:-1] - return s - - -def replace(str, d, defaultsep=''): - if isinstance(d, list): - return [replace(str, _m, defaultsep) for _m in d] - if isinstance(str, list): - return [replace(_m, d, defaultsep) for _m in str] - for k in 2 * list(d.keys()): - if k == 'separatorsfor': - continue - if 'separatorsfor' in d and k in d['separatorsfor']: - sep = d['separatorsfor'][k] - else: - sep = defaultsep - if isinstance(d[k], list): - str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) - else: - str = str.replace('#%s#' % (k), d[k]) - return str - - -def dictappend(rd, ar): - if isinstance(ar, list): - for a in ar: - rd = dictappend(rd, a) - return rd - for k in ar.keys(): - if k[0] == '_': - continue - if k in rd: - if isinstance(rd[k], str): - rd[k] = [rd[k]] - if isinstance(rd[k], list): - if isinstance(ar[k], list): - rd[k] = rd[k] + ar[k] - else: - rd[k].append(ar[k]) - elif isinstance(rd[k], dict): - if isinstance(ar[k], dict): - if k == 'separatorsfor': - for k1 in ar[k].keys(): - if k1 not in rd[k]: - rd[k][k1] = ar[k][k1] - else: - rd[k] = dictappend(rd[k], ar[k]) - else: - rd[k] = ar[k] - return rd - - -def applyrules(rules, d, var={}): - ret = {} - if isinstance(rules, list): - for r in rules: - rr = applyrules(r, d, var) - ret = dictappend(ret, rr) - if '_break' in rr: - break - return ret - if '_check' in rules and (not rules['_check'](var)): - return ret - if 'need' in rules: - res = applyrules({'needs': rules['need']}, d, var) - if 'needs' in res: - cfuncs.append_needs(res['needs']) - - for k in rules.keys(): - if k == 'separatorsfor': - ret[k] = rules[k] - continue - if isinstance(rules[k], str): - ret[k] = replace(rules[k], d) - elif isinstance(rules[k], list): - ret[k] = [] - for i in rules[k]: - ar = applyrules({k: i}, d, var) - if k in ar: - ret[k].append(ar[k]) - elif k[0] == '_': - continue - elif isinstance(rules[k], dict): - ret[k] = [] - for k1 in rules[k].keys(): - if isinstance(k1, types.FunctionType) and k1(var): - if isinstance(rules[k][k1], list): - for i in rules[k][k1]: - if isinstance(i, dict): - res = applyrules({'supertext': i}, d, var) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' - ret[k].append(replace(i, d)) - else: - i = rules[k][k1] - if isinstance(i, dict): - res = applyrules({'supertext': i}, d) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' - ret[k].append(replace(i, d)) - else: - errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) - if isinstance(ret[k], list): - if len(ret[k]) == 1: - ret[k] = ret[k][0] - if ret[k] == []: - del ret[k] - return ret diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/capi_maps.py b/venv/lib/python3.7/site-packages/numpy/f2py/capi_maps.py deleted file mode 100644 index ce79f68..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/capi_maps.py +++ /dev/null @@ -1,849 +0,0 @@ -#!/usr/bin/env python -""" - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.60 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -import copy -import re -import os -import sys -from .crackfortran import markoutercomma -from . import cb_rules - -# The eviroment provided by auxfuncs.py is needed for some calls to eval. -# As the needed functions cannot be determined by static inspection of the -# code, it is safest to use import * pending a major refactoring of f2py. -from .auxfuncs import * - -__all__ = [ - 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', - 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', - 'cb_sign2map', 'cb_routsign2map', 'common_sign2map' -] - - -# Numarray and Numeric users should set this False -using_newcore = True - -depargs = [] -lcb_map = {} -lcb2_map = {} -# forced casting: mainly caused by the fact that Python or Numeric -# C/APIs do not support the corresponding C types. -c2py_map = {'double': 'float', - 'float': 'float', # forced casting - 'long_double': 'float', # forced casting - 'char': 'int', # forced casting - 'signed_char': 'int', # forced casting - 'unsigned_char': 'int', # forced casting - 'short': 'int', # forced casting - 'unsigned_short': 'int', # forced casting - 'int': 'int', # (forced casting) - 'long': 'int', - 'long_long': 'long', - 'unsigned': 'int', # forced casting - 'complex_float': 'complex', # forced casting - 'complex_double': 'complex', - 'complex_long_double': 'complex', # forced casting - 'string': 'string', - } -c2capi_map = {'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_DOUBLE', # forced casting - 'char': 'NPY_STRING', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'long_long': 'NPY_LONG', # forced casting - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', # forced casting - 'string': 'NPY_STRING'} - -# These new maps aren't used anyhere yet, but should be by default -# unless building numeric or numarray extensions. -if using_newcore: - c2capi_map = {'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_LONGDOUBLE', - 'char': 'NPY_BYTE', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'unsigned_long': 'NPY_ULONG', - 'long_long': 'NPY_LONGLONG', - 'unsigned_long_long': 'NPY_ULONGLONG', - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', - 'string':'NPY_STRING' - - } -c2pycode_map = {'double': 'd', - 'float': 'f', - 'long_double': 'd', # forced casting - 'char': '1', - 'signed_char': '1', - 'unsigned_char': 'b', - 'short': 's', - 'unsigned_short': 'w', - 'int': 'i', - 'unsigned': 'u', - 'long': 'l', - 'long_long': 'L', - 'complex_float': 'F', - 'complex_double': 'D', - 'complex_long_double': 'D', # forced casting - 'string': 'c' - } -if using_newcore: - c2pycode_map = {'double': 'd', - 'float': 'f', - 'long_double': 'g', - 'char': 'b', - 'unsigned_char': 'B', - 'signed_char': 'b', - 'short': 'h', - 'unsigned_short': 'H', - 'int': 'i', - 'unsigned': 'I', - 'long': 'l', - 'unsigned_long': 'L', - 'long_long': 'q', - 'unsigned_long_long': 'Q', - 'complex_float': 'F', - 'complex_double': 'D', - 'complex_long_double': 'G', - 'string': 'S'} -c2buildvalue_map = {'double': 'd', - 'float': 'f', - 'char': 'b', - 'signed_char': 'b', - 'short': 'h', - 'int': 'i', - 'long': 'l', - 'long_long': 'L', - 'complex_float': 'N', - 'complex_double': 'N', - 'complex_long_double': 'N', - 'string': 'z'} - -if sys.version_info[0] >= 3: - # Bytes, not Unicode strings - c2buildvalue_map['string'] = 'y' - -if using_newcore: - # c2buildvalue_map=??? - pass - -f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', - '12': 'long_double', '16': 'long_double'}, - 'integer': {'': 'int', '1': 'signed_char', '2': 'short', - '4': 'int', '8': 'long_long', - '-1': 'unsigned_char', '-2': 'unsigned_short', - '-4': 'unsigned', '-8': 'unsigned_long_long'}, - 'complex': {'': 'complex_float', '8': 'complex_float', - '16': 'complex_double', '24': 'complex_long_double', - '32': 'complex_long_double'}, - 'complexkind': {'': 'complex_float', '4': 'complex_float', - '8': 'complex_double', '12': 'complex_long_double', - '16': 'complex_long_double'}, - 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', - '8': 'long_long'}, - 'double complex': {'': 'complex_double'}, - 'double precision': {'': 'double'}, - 'byte': {'': 'char'}, - 'character': {'': 'string'} - } - -f2cmap_default = copy.deepcopy(f2cmap_all) - - -def load_f2cmap_file(f2cmap_file): - global f2cmap_all - - f2cmap_all = copy.deepcopy(f2cmap_default) - - if f2cmap_file is None: - # Default value - f2cmap_file = '.f2py_f2cmap' - if not os.path.isfile(f2cmap_file): - return - - # User defined additions to f2cmap_all. - # f2cmap_file must contain a dictionary of dictionaries, only. For - # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is - # interpreted as C 'float'. This feature is useful for F90/95 users if - # they use PARAMETERSs in type specifications. - try: - outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) - with open(f2cmap_file, 'r') as f: - d = eval(f.read(), {}, {}) - for k, d1 in list(d.items()): - for k1 in list(d1.keys()): - d1[k1.lower()] = d1[k1] - d[k.lower()] = d[k] - for k in list(d.keys()): - if k not in f2cmap_all: - f2cmap_all[k] = {} - for k1 in list(d[k].keys()): - if d[k][k1] in c2py_map: - if k1 in f2cmap_all[k]: - outmess( - "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1])) - f2cmap_all[k][k1] = d[k][k1] - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % - (k, k1, d[k][k1])) - else: - errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % ( - k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) - outmess('Successfully applied user defined f2cmap changes\n') - except Exception as msg: - errmess( - 'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) - -cformat_map = {'double': '%g', - 'float': '%g', - 'long_double': '%Lg', - 'char': '%d', - 'signed_char': '%d', - 'unsigned_char': '%hhu', - 'short': '%hd', - 'unsigned_short': '%hu', - 'int': '%d', - 'unsigned': '%u', - 'long': '%ld', - 'unsigned_long': '%lu', - 'long_long': '%ld', - 'complex_float': '(%g,%g)', - 'complex_double': '(%g,%g)', - 'complex_long_double': '(%Lg,%Lg)', - 'string': '%s', - } - -# Auxiliary functions - - -def getctype(var): - """ - Determines C type - """ - ctype = 'void' - if isfunction(var): - if 'result' in var: - a = var['result'] - else: - a = var['name'] - if a in var['vars']: - return getctype(var['vars'][a]) - else: - errmess('getctype: function %s has no return value?!\n' % a) - elif issubroutine(var): - return ctype - elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: - typespec = var['typespec'].lower() - f2cmap = f2cmap_all[typespec] - ctype = f2cmap[''] # default type - if 'kindselector' in var: - if '*' in var['kindselector']: - try: - ctype = f2cmap[var['kindselector']['*']] - except KeyError: - errmess('getctype: "%s %s %s" not supported.\n' % - (var['typespec'], '*', var['kindselector']['*'])) - elif 'kind' in var['kindselector']: - if typespec + 'kind' in f2cmap_all: - f2cmap = f2cmap_all[typespec + 'kind'] - try: - ctype = f2cmap[var['kindselector']['kind']] - except KeyError: - if typespec in f2cmap_all: - f2cmap = f2cmap_all[typespec] - try: - ctype = f2cmap[str(var['kindselector']['kind'])] - except KeyError: - errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' - % (typespec, var['kindselector']['kind'], ctype, - typespec, var['kindselector']['kind'], os.getcwd())) - - else: - if not isexternal(var): - errmess( - 'getctype: No C-type found in "%s", assuming void.\n' % var) - return ctype - - -def getstrlength(var): - if isstringfunction(var): - if 'result' in var: - a = var['result'] - else: - a = var['name'] - if a in var['vars']: - return getstrlength(var['vars'][a]) - else: - errmess('getstrlength: function %s has no return value?!\n' % a) - if not isstring(var): - errmess( - 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) - len = '1' - if 'charselector' in var: - a = var['charselector'] - if '*' in a: - len = a['*'] - elif 'len' in a: - len = a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): - if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( - repr(var))) - len = '-1' - return len - - -def getarrdims(a, var, verbose=0): - global depargs - ret = {} - if isstring(var) and not isarray(var): - ret['dims'] = getstrlength(var) - ret['size'] = ret['dims'] - ret['rank'] = '1' - elif isscalar(var): - ret['size'] = '1' - ret['rank'] = '0' - ret['dims'] = '' - elif isarray(var): - dim = copy.copy(var['dimension']) - ret['size'] = '*'.join(dim) - try: - ret['size'] = repr(eval(ret['size'])) - except Exception: - pass - ret['dims'] = ','.join(dim) - ret['rank'] = repr(len(dim)) - ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] - for i in range(len(dim)): # solve dim for dependencies - v = [] - if dim[i] in depargs: - v = [dim[i]] - else: - for va in depargs: - if re.match(r'.*?\b%s\b.*' % va, dim[i]): - v.append(va) - for va in v: - if depargs.index(va) > depargs.index(a): - dim[i] = '*' - break - ret['setdims'], i = '', -1 - for d in dim: - i = i + 1 - if d not in ['*', ':', '(*)', '(:)']: - ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['setdims'], i, d) - if ret['setdims']: - ret['setdims'] = ret['setdims'][:-1] - ret['cbsetdims'], i = '', -1 - for d in var['dimension']: - i = i + 1 - if d not in ['*', ':', '(*)', '(:)']: - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, d) - elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' - % (d)) - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, 0) - elif verbose: - errmess( - 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) - if ret['cbsetdims']: - ret['cbsetdims'] = ret['cbsetdims'][:-1] -# if not isintent_c(var): -# var['dimension'].reverse() - return ret - - -def getpydocsign(a, var): - global lcb_map - if isfunction(var): - if 'result' in var: - af = var['result'] - else: - af = var['name'] - if af in var['vars']: - return getpydocsign(af, var['vars'][af]) - else: - errmess('getctype: function %s has no return value?!\n' % af) - return '', '' - sig, sigout = a, a - opt = '' - if isintent_in(var): - opt = 'input' - elif isintent_inout(var): - opt = 'in/output' - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4] == 'out=': - out_a = k[4:] - break - init = '' - ctype = getctype(var) - - if hasinitvalue(var): - init, showinit = getinit(a, var) - init = ', optional\\n Default: %s' % showinit - if isscalar(var): - if isintent_inout(var): - sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], - c2pycode_map[ctype], init) - else: - sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) - sigout = '%s : %s' % (out_a, c2py_map[ctype]) - elif isstring(var): - if isintent_inout(var): - sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( - a, opt, getstrlength(var), init) - else: - sig = '%s : %s string(len=%s)%s' % ( - a, opt, getstrlength(var), init) - sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) - elif isarray(var): - dim = var['dimension'] - rank = repr(len(dim)) - sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, - c2pycode_map[ - ctype], - ','.join(dim), init) - if a == out_a: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ - % (a, rank, c2pycode_map[ctype], ','.join(dim)) - else: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) - elif isexternal(var): - ua = '' - if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: - ua = lcb2_map[lcb_map[a]]['argname'] - if not ua == a: - ua = ' => %s' % ua - else: - ua = '' - sig = '%s : call-back function%s' % (a, ua) - sigout = sig - else: - errmess( - 'getpydocsign: Could not resolve docsignature for "%s".\\n' % a) - return sig, sigout - - -def getarrdocsign(a, var): - ctype = getctype(var) - if isstring(var) and (not isarray(var)): - sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, - getstrlength(var)) - elif isscalar(var): - sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], - c2pycode_map[ctype],) - elif isarray(var): - dim = var['dimension'] - rank = repr(len(dim)) - sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, - c2pycode_map[ - ctype], - ','.join(dim)) - return sig - - -def getinit(a, var): - if isstring(var): - init, showinit = '""', "''" - else: - init, showinit = '', '' - if hasinitvalue(var): - init = var['='] - showinit = init - if iscomplex(var) or iscomplexarray(var): - ret = {} - - try: - v = var["="] - if ',' in v: - ret['init.r'], ret['init.i'] = markoutercomma( - v[1:-1]).split('@,@') - else: - v = eval(v, {}, {}) - ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) - except Exception: - raise ValueError( - 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) - if isarray(var): - init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( - ret['init.r'], ret['init.i']) - elif isstring(var): - if not init: - init, showinit = '""', "''" - if init[0] == "'": - init = '"%s"' % (init[1:-1].replace('"', '\\"')) - if init[0] == '"': - showinit = "'%s'" % (init[1:-1]) - return init, showinit - - -def sign2map(a, var): - """ - varname,ctype,atype - init,init.r,init.i,pytype - vardebuginfo,vardebugshowvalue,varshowvalue - varrfromat - intent - """ - global lcb_map, cb_map - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4] == 'out=': - out_a = k[4:] - break - ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} - intent_flags = [] - for f, s in isintent_dict.items(): - if f(var): - intent_flags.append('F2PY_%s' % s) - if intent_flags: - # XXX: Evaluate intent_flags here. - ret['intent'] = '|'.join(intent_flags) - else: - ret['intent'] = 'F2PY_INTENT_IN' - if isarray(var): - ret['varrformat'] = 'N' - elif ret['ctype'] in c2buildvalue_map: - ret['varrformat'] = c2buildvalue_map[ret['ctype']] - else: - ret['varrformat'] = 'O' - ret['init'], ret['showinit'] = getinit(a, var) - if hasinitvalue(var) and iscomplex(var) and not isarray(var): - ret['init.r'], ret['init.i'] = markoutercomma( - ret['init'][1:-1]).split('@,@') - if isexternal(var): - ret['cbnamekey'] = a - if a in lcb_map: - ret['cbname'] = lcb_map[a] - ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] - ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] - ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] - ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] - else: - ret['cbname'] = a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( - a, list(lcb_map.keys()))) - if isstring(var): - ret['length'] = getstrlength(var) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - dim = copy.copy(var['dimension']) - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - # Debug info - if debugcapi(var): - il = [isintent_in, 'input', isintent_out, 'output', - isintent_inout, 'inoutput', isrequired, 'required', - isoptional, 'optional', isintent_hide, 'hidden', - iscomplex, 'complex scalar', - l_and(isscalar, l_not(iscomplex)), 'scalar', - isstring, 'string', isarray, 'array', - iscomplexarray, 'complex array', isstringarray, 'string array', - iscomplexfunction, 'complex function', - l_and(isfunction, l_not(iscomplexfunction)), 'function', - isexternal, 'callback', - isintent_callback, 'callback', - isintent_aux, 'auxiliary', - ] - rl = [] - for i in range(0, len(il), 2): - if il[i](var): - rl.append(il[i + 1]) - if isstring(var): - rl.append('slen(%s)=%s' % (a, ret['length'])) - if isarray(var): - ddim = ','.join( - map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) - rl.append('dims(%s)' % ddim) - if isexternal(var): - ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( - a, ret['cbname'], ','.join(rl)) - else: - ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( - ret['ctype'], a, ret['showinit'], ','.join(rl)) - if isscalar(var): - if ret['ctype'] in cformat_map: - ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) - if isstring(var): - ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) - if isexternal(var): - ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) - if ret['ctype'] in cformat_map: - ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isstring(var): - ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - return ret - - -def routsign2map(rout): - """ - name,NAME,begintitle,endtitle - rname,ctype,rformat - routdebugshowvalue - """ - global lcb_map - name = rout['name'] - fname = getfortranname(rout) - ret = {'name': name, - 'texname': name.replace('_', '\\_'), - 'name_lower': name.lower(), - 'NAME': name.upper(), - 'begintitle': gentitle(name), - 'endtitle': gentitle('end of %s' % name), - 'fortranname': fname, - 'FORTRANNAME': fname.upper(), - 'callstatement': getcallstatement(rout) or '', - 'usercode': getusercode(rout) or '', - 'usercode1': getusercode1(rout) or '', - } - if '_' in fname: - ret['F_FUNC'] = 'F_FUNC_US' - else: - ret['F_FUNC'] = 'F_FUNC' - if '_' in name: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' - else: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' - lcb_map = {} - if 'use' in rout: - for u in rout['use'].keys(): - if u in cb_rules.cb_map: - for un in cb_rules.cb_map[u]: - ln = un[0] - if 'map' in rout['use'][u]: - for k in rout['use'][u]['map'].keys(): - if rout['use'][u]['map'][k] == un[0]: - ln = k - break - lcb_map[ln] = un[1] - elif 'externals' in rout and rout['externals']: - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( - ret['name'], repr(rout['externals']))) - ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' - if isfunction(rout): - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - ret['rname'] = a - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) - ret['ctype'] = getctype(rout['vars'][a]) - if hasresultnote(rout): - ret['resultnote'] = rout['vars'][a]['note'] - rout['vars'][a]['note'] = ['See elsewhere.'] - if ret['ctype'] in c2buildvalue_map: - ret['rformat'] = c2buildvalue_map[ret['ctype']] - else: - ret['rformat'] = 'O' - errmess('routsign2map: no c2buildvalue key for type %s\n' % - (repr(ret['ctype']))) - if debugcapi(rout): - if ret['ctype'] in cformat_map: - ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) - if isstringfunction(rout): - ret['rlength'] = getstrlength(rout['vars'][a]) - if ret['rlength'] == '-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( - repr(rout['name']))) - ret['rlength'] = '10' - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] - return ret - - -def modsign2map(m): - """ - modulename - """ - if ismodule(m): - ret = {'f90modulename': m['name'], - 'F90MODULENAME': m['name'].upper(), - 'texf90modulename': m['name'].replace('_', '\\_')} - else: - ret = {'modulename': m['name'], - 'MODULENAME': m['name'].upper(), - 'texmodulename': m['name'].replace('_', '\\_')} - ret['restdoc'] = getrestdoc(m) or [] - if hasnote(m): - ret['note'] = m['note'] - ret['usercode'] = getusercode(m) or '' - ret['usercode1'] = getusercode1(m) or '' - if m['body']: - ret['interface_usercode'] = getusercode(m['body'][0]) or '' - else: - ret['interface_usercode'] = '' - ret['pymethoddef'] = getpymethoddef(m) or '' - if 'coutput' in m: - ret['coutput'] = m['coutput'] - if 'f2py_wrapper_output' in m: - ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] - return ret - - -def cb_sign2map(a, var, index=None): - ret = {'varname': a} - ret['varname_i'] = ret['varname'] - ret['ctype'] = getctype(var) - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - var['note'] = ['See elsewhere.'] - return ret - - -def cb_routsign2map(rout, um): - """ - name,begintitle,endtitle,argname - ctype,rctype,maxnofargs,nofoptargs,returncptr - """ - ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), - 'returncptr': ''} - if isintent_callback(rout): - if '_' in rout['name']: - F_FUNC = 'F_FUNC_US' - else: - F_FUNC = 'F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) - ret['static'] = 'extern' - else: - ret['callbackname'] = ret['name'] - ret['static'] = 'static' - ret['argname'] = rout['name'] - ret['begintitle'] = gentitle(ret['name']) - ret['endtitle'] = gentitle('end of %s' % ret['name']) - ret['ctype'] = getctype(rout) - ret['rctype'] = 'void' - if ret['ctype'] == 'string': - ret['rctype'] = 'void' - else: - ret['rctype'] = ret['ctype'] - if ret['rctype'] != 'void': - if iscomplexfunction(rout): - ret['returncptr'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -return_value= -#endif -""" - else: - ret['returncptr'] = 'return_value=' - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['strlength'] = getstrlength(rout) - if isfunction(rout): - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if hasnote(rout['vars'][a]): - ret['note'] = rout['vars'][a]['note'] - rout['vars'][a]['note'] = ['See elsewhere.'] - ret['rname'] = a - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) - if iscomplexfunction(rout): - ret['rctype'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -#ctype# -#else -void -#endif -""" - else: - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] - nofargs = 0 - nofoptargs = 0 - if 'args' in rout and 'vars' in rout: - for a in rout['args']: - var = rout['vars'][a] - if l_or(isintent_in, isintent_inout)(var): - nofargs = nofargs + 1 - if isoptional(var): - nofoptargs = nofoptargs + 1 - ret['maxnofargs'] = repr(nofargs) - ret['nofoptargs'] = repr(nofoptargs) - if hasnote(rout) and isfunction(rout) and 'result' in rout: - ret['routnote'] = rout['note'] - rout['note'] = ['See elsewhere.'] - return ret - - -def common_sign2map(a, var): # obsolute - ret = {'varname': a, 'ctype': getctype(var)} - if isstringarray(var): - ret['ctype'] = 'char' - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - elif isstring(var): - ret['size'] = getstrlength(var) - ret['rank'] = '1' - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - var['note'] = ['See elsewhere.'] - # for strings this returns 0-rank but actually is 1-rank - ret['arrdocstr'] = getarrdocsign(a, var) - return ret diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/cb_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/cb_rules.py deleted file mode 100644 index 183d7c2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/cb_rules.py +++ /dev/null @@ -1,578 +0,0 @@ -#!/usr/bin/env python -""" - -Build call-back mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/20 11:27:58 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -from . import __version__ -from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, - iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, - isintent_hide, isintent_in, isintent_inout, isintent_nothide, - isintent_out, isoptional, isrequired, isscalar, isstring, - isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, - stripcomma, throw_error -) -from . import cfuncs - -f2py_version = __version__.version - - -################## Rules for callback function ############## - -cb_routine_rules = { - 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', - 'body': """ -#begintitle# -PyObject *#name#_capi = NULL;/*was Py_None*/ -PyTupleObject *#name#_args_capi = NULL; -int #name#_nofargs = 0; -jmp_buf #name#_jmpbuf; -/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ -#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { -\tPyTupleObject *capi_arglist = #name#_args_capi; -\tPyObject *capi_return = NULL; -\tPyObject *capi_tmp = NULL; -\tPyObject *capi_arglist_list = NULL; -\tint capi_j,capi_i = 0; -\tint capi_longjmp_ok = 1; -#decl# -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_clock(); -#endif -\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); -\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); -\tif (#name#_capi==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); -\t} -\tif (#name#_capi==NULL) { -\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); -\t\tgoto capi_fail; -\t} -\tif (F2PyCapsule_Check(#name#_capi)) { -\t#name#_typedef #name#_cptr; -\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi); -\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); -\t#return# -\t} -\tif (capi_arglist==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); -\t\tif (capi_tmp) { -\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); -\t\t\tif (capi_arglist==NULL) { -\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t} else { -\t\t\tPyErr_Clear(); -\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); -\t\t} -\t} -\tif (capi_arglist == NULL) { -\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); -\t\tgoto capi_fail; -\t} -#setdims# -#ifdef PYPY_VERSION -#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) -\tcapi_arglist_list = PySequence_List(capi_arglist); -\tif (capi_arglist_list == NULL) goto capi_fail; -#else -#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) -#endif -#pyobjfrom# -#undef CAPI_ARGLIST_SETITEM -#ifdef PYPY_VERSION -\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); -#else -\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -#endif -\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_call_clock(); -#endif -#ifdef PYPY_VERSION -\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist_list); -\tPy_DECREF(capi_arglist_list); -\tcapi_arglist_list = NULL; -#else -\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); -#endif -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_call_clock(); -#endif -\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); -\tif (capi_return == NULL) { -\t\tfprintf(stderr,\"capi_return is NULL\\n\"); -\t\tgoto capi_fail; -\t} -\tif (capi_return == Py_None) { -\t\tPy_DECREF(capi_return); -\t\tcapi_return = Py_BuildValue(\"()\"); -\t} -\telse if (!PyTuple_Check(capi_return)) { -\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); -\t} -\tcapi_j = PyTuple_Size(capi_return); -\tcapi_i = 0; -#frompyobj# -\tCFUNCSMESS(\"cb:#name#:successful\\n\"); -\tPy_DECREF(capi_return); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_clock(); -#endif -\tgoto capi_return_pt; -capi_fail: -\tfprintf(stderr,\"Call-back #name# failed.\\n\"); -\tPy_XDECREF(capi_return); -\tPy_XDECREF(capi_arglist_list); -\tif (capi_longjmp_ok) -\t\tlongjmp(#name#_jmpbuf,-1); -capi_return_pt: -\t; -#return# -} -#endtitle# -""", - 'need': ['setjmp.h', 'CFUNCSMESS'], - 'maxnofargs': '#maxnofargs#', - 'nofoptargs': '#nofoptargs#', - 'docstr': """\ -\tdef #argname#(#docsignature#): return #docreturn#\\n\\ -#docstrsigns#""", - 'latexdocstr': """ -{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} -#routnote# - -#latexdocstrsigns#""", - 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' -} -cb_rout_rules = [ - { # Init - 'separatorsfor': {'decl': '\n', - 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', - 'args_td': ',', 'optargs_td': '', - 'args_nm': ',', 'optargs_nm': '', - 'frompyobj': '\n', 'setdims': '\n', - 'docstrsigns': '\\n"\n"', - 'latexdocstrsigns': '\n', - 'latexdocstrreq': '\n', 'latexdocstropt': '\n', - 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', - }, - 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', - 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', - 'args_td': [], 'optargs_td': '', 'strarglens_td': '', - 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', - 'noargs': '', - 'setdims': '/*setdims*/', - 'docstrsigns': '', 'latexdocstrsigns': '', - 'docstrreq': '\tRequired arguments:', - 'docstropt': '\tOptional arguments:', - 'docstrout': '\tReturn objects:', - 'docstrcbs': '\tCall-back functions:', - 'docreturn': '', 'docsign': '', 'docsignopt': '', - 'latexdocstrreq': '\\noindent Required arguments:', - 'latexdocstropt': '\\noindent Optional arguments:', - 'latexdocstrout': '\\noindent Return objects:', - 'latexdocstrcbs': '\\noindent Call-back functions:', - 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, - }, { # Function - 'decl': '\t#ctype# return_value;', - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'}, - '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', - {debugcapi: - '\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} - ], - 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], - 'return': '\treturn return_value;', - '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) - }, - { # String function - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, - 'args': '#ctype# return_value,int return_value_len', - 'args_nm': 'return_value,&return_value_len', - 'args_td': '#ctype# ,int', - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", - {debugcapi: - '\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} - ], - 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, - 'string.h', 'GETSTRFROMPYTUPLE'], - 'return': 'return;', - '_check': isstringfunction - }, - { # Complex function - 'optargs': """ -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# *return_value -#endif -""", - 'optargs_nm': """ -#ifndef F2PY_CB_RETURNCOMPLEX -return_value -#endif -""", - 'optargs_td': """ -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# * -#endif -""", - 'decl': """ -#ifdef F2PY_CB_RETURNCOMPLEX -\t#ctype# return_value; -#endif -""", - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'}, - """\ -\tif (capi_j>capi_i) -#ifdef F2PY_CB_RETURNCOMPLEX -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#else -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#endif -""", - {debugcapi: """ -#ifdef F2PY_CB_RETURNCOMPLEX -\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); -#else -\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); -#endif - -"""} - ], - 'return': """ -#ifdef F2PY_CB_RETURNCOMPLEX -\treturn return_value; -#else -\treturn; -#endif -""", - 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, - 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], - '_check': iscomplexfunction - }, - {'docstrout': '\t\t#pydocsignout#', - 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasnote: '--- #note#'}], - 'docreturn': '#rname#,', - '_check': isfunction}, - {'_check': issubroutine, 'return': 'return;'} -] - -cb_arg_rules = [ - { # Doc - 'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'}, - 'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'}, - 'docstrout': {isintent_out: '\t\t#pydocsignout#'}, - 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote, isintent_hide): '--- #note#', - l_and(hasnote, isintent_nothide): '--- See above.'}]}, - 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, - 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, - 'depend': '' - }, - { - 'args': { - l_and(isscalar, isintent_c): '#ctype# #varname_i#', - l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', - isarray: '#ctype# *#varname_i#', - isstring: '#ctype# #varname_i#' - }, - 'args_nm': { - l_and(isscalar, isintent_c): '#varname_i#', - l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', - isarray: '#varname_i#', - isstring: '#varname_i#' - }, - 'args_td': { - l_and(isscalar, isintent_c): '#ctype#', - l_and(isscalar, l_not(isintent_c)): '#ctype# *', - isarray: '#ctype# *', - isstring: '#ctype#' - }, - # untested with multiple args - 'strarglens': {isstring: ',int #varname_i#_cb_len'}, - 'strarglens_td': {isstring: ',int'}, # untested with multiple args - # untested with multiple args - 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, - }, - { # Scalars - 'decl': {l_not(isintent_c): '\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, - 'error': {l_and(isintent_c, isintent_out, - throw_error('intent(c,out) is forbidden for callback scalar arguments')): - ''}, - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'}, - {isintent_out: - '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, - {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): - '\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, - {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): - '\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, - {l_and(debugcapi, l_and(iscomplex, isintent_c)): - '\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, - {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): - '\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, - ], - 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, - {debugcapi: 'CFUNCSMESS'}], - '_check': isscalar - }, { - 'pyobjfrom': [{isintent_in: """\ -\tif (#name#_nofargs>capi_i) -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout: """\ -\tif (#name#_nofargs>capi_i) -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) -\t\t\tgoto capi_fail;"""}], - 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, - {isintent_inout: 'pyarr_from_p_#ctype#1'}, - {iscomplex: '#ctype#'}], - '_check': l_and(isscalar, isintent_nothide), - '_optional': '' - }, { # String - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", - {debugcapi: - '\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, - ], - 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', - {debugcapi: 'CFUNCSMESS'}, 'string.h'], - '_check': l_and(isstring, isintent_out) - }, { - 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, - {isintent_in: """\ -\tif (#name#_nofargs>capi_i) -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout: """\ -\tif (#name#_nofargs>capi_i) { -\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) -\t\t\tgoto capi_fail; -\t}"""}], - 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, - {isintent_inout: 'pyarr_from_p_#ctype#1'}], - '_check': l_and(isstring, isintent_nothide), - '_optional': '' - }, - # Array ... - { - 'decl': '\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', - 'setdims': '\t#cbsetdims#;', - '_check': isarray, - '_depend': '' - }, - { - 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, - {isintent_c: """\ -\tif (#name#_nofargs>capi_i) { -\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; -\t\t/*XXX: Hmm, what will destroy this array??? */ -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_CARRAY,NULL); -""", - l_not(isintent_c): """\ -\tif (#name#_nofargs>capi_i) { -\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; -\t\t/*XXX: Hmm, what will destroy this array??? */ -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_FARRAY,NULL); -""", - }, - """ -\t\tif (tmp_arr==NULL) -\t\t\tgoto capi_fail; -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) -\t\t\tgoto capi_fail; -}"""], - '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), - '_optional': '', - }, { - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'}, - """\tif (capi_j>capi_i) { -\t\tPyArrayObject *rv_cb_arr = NULL; -\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; -\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", - {isintent_c: '|F2PY_INTENT_C'}, - """,capi_tmp); -\t\tif (rv_cb_arr == NULL) { -\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tMEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); -\t\tif (capi_tmp != (PyObject *)rv_cb_arr) { -\t\t\tPy_DECREF(rv_cb_arr); -\t\t} -\t}""", - {debugcapi: '\tfprintf(stderr,"<-.\\n");'}, - ], - 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], - '_check': l_and(isarray, isintent_out) - }, { - 'docreturn': '#varname#,', - '_check': isintent_out - } -] - -################## Build call-back module ############# -cb_map = {} - - -def buildcallbacks(m): - global cb_map - cb_map[m['name']] = [] - for bi in m['body']: - if bi['block'] == 'interface': - for b in bi['body']: - if b: - buildcallback(b, m['name']) - else: - errmess('warning: empty body for %s\n' % (m['name'])) - - -def buildcallback(rout, um): - global cb_map - from . import capi_maps - - outmess('\tConstructing call-back function "cb_%s_in_%s"\n' % - (rout['name'], um)) - args, depargs = getargs(rout) - capi_maps.depargs = depargs - var = rout['vars'] - vrd = capi_maps.cb_routsign2map(rout, um) - rd = dictappend({}, vrd) - cb_map[um].append([rout['name'], rd['name']]) - for r in cb_rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar = applyrules(r, vrd, rout) - rd = dictappend(rd, ar) - savevrd = {} - for i, a in enumerate(args): - vrd = capi_maps.cb_sign2map(a, var[a], index=i) - savevrd[a] = vrd - for r in cb_arg_rules: - if '_depend' in r: - continue - if '_optional' in r and isoptional(var[a]): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - for a in args: - vrd = savevrd[a] - for r in cb_arg_rules: - if '_depend' in r: - continue - if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - for a in depargs: - vrd = savevrd[a] - for r in cb_arg_rules: - if '_depend' not in r: - continue - if '_optional' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - if 'args' in rd and 'optargs' in rd: - if isinstance(rd['optargs'], list): - rd['optargs'] = rd['optargs'] + [""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_nm'] = rd['optargs_nm'] + [""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_td'] = rd['optargs_td'] + [""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - if isinstance(rd['docreturn'], list): - rd['docreturn'] = stripcomma( - replace('#docreturn#', {'docreturn': rd['docreturn']})) - optargs = stripcomma(replace('#docsignopt#', - {'docsignopt': rd['docsignopt']} - )) - if optargs == '': - rd['docsignature'] = stripcomma( - replace('#docsign#', {'docsign': rd['docsign']})) - else: - rd['docsignature'] = replace('#docsign#[#docsignopt#]', - {'docsign': rd['docsign'], - 'docsignopt': optargs, - }) - rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') - rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') - rd['docstrsigns'] = [] - rd['latexdocstrsigns'] = [] - for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: - if k in rd and isinstance(rd[k], list): - rd['docstrsigns'] = rd['docstrsigns'] + rd[k] - k = 'latex' + k - if k in rd and isinstance(rd[k], list): - rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ - ['\\begin{description}'] + rd[k][1:] +\ - ['\\end{description}'] - if 'args' not in rd: - rd['args'] = '' - rd['args_td'] = '' - rd['args_nm'] = '' - if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): - rd['noargs'] = 'void' - - ar = applyrules(cb_routine_rules, rd) - cfuncs.callbacks[rd['name']] = ar['body'] - if isinstance(ar['need'], str): - ar['need'] = [ar['need']] - - if 'need' in rd: - for t in cfuncs.typedefs.keys(): - if t in rd['need']: - ar['need'].append(t) - - cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] - ar['need'].append(rd['name'] + '_typedef') - cfuncs.needs[rd['name']] = ar['need'] - - capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], - 'nofoptargs': ar['nofoptargs'], - 'docstr': ar['docstr'], - 'latexdocstr': ar['latexdocstr'], - 'argname': rd['argname'] - } - outmess('\t %s\n' % (ar['docstrshort'])) - return -################## Build call-back function ############# diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/cfuncs.py b/venv/lib/python3.7/site-packages/numpy/f2py/cfuncs.py deleted file mode 100644 index ccb7b3a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/cfuncs.py +++ /dev/null @@ -1,1275 +0,0 @@ -#!/usr/bin/env python -""" - -C declarations, CPP macros, and C functions for f2py2e. -Only required declarations/macros/functions will be used. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 11:42:34 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import sys -import copy - -from . import __version__ - -f2py_version = __version__.version -errmess = sys.stderr.write - -##################### Definitions ################## - -outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], - 'userincludes': [], - 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], - 'commonhooks': []} -needs = {} -includes0 = {'includes0': '/*need_includes0*/'} -includes = {'includes': '/*need_includes*/'} -userincludes = {'userincludes': '/*need_userincludes*/'} -typedefs = {'typedefs': '/*need_typedefs*/'} -typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} -cppmacros = {'cppmacros': '/*need_cppmacros*/'} -cfuncs = {'cfuncs': '/*need_cfuncs*/'} -callbacks = {'callbacks': '/*need_callbacks*/'} -f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', - 'initf90modhooksstatic': '/*initf90modhooksstatic*/', - 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', - } -commonhooks = {'commonhooks': '/*need_commonhooks*/', - 'initcommonhooks': '/*need_initcommonhooks*/', - } - -############ Includes ################### - -includes0['math.h'] = '#include ' -includes0['string.h'] = '#include ' -includes0['setjmp.h'] = '#include ' - -includes['Python.h'] = '#include "Python.h"' -needs['arrayobject.h'] = ['Python.h'] -includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API -#include "arrayobject.h"''' - -includes['arrayobject.h'] = '#include "fortranobject.h"' -includes['stdarg.h'] = '#include ' - -############# Type definitions ############### - -typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' -typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' -typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' -typedefs['signed_char'] = 'typedef signed char signed_char;' -typedefs['long_long'] = """\ -#ifdef _WIN32 -typedef __int64 long_long; -#else -typedef long long long_long; -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['unsigned_long_long'] = """\ -#ifdef _WIN32 -typedef __uint64 long_long; -#else -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['long_double'] = """\ -#ifndef _LONG_DOUBLE -typedef long double long_double; -#endif -""" -typedefs[ - 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' -typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' -typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' -typedefs['string'] = """typedef char * string;""" - - -############### CPP macros #################### -cppmacros['CFUNCSMESS'] = """\ -#ifdef DEBUGCFUNCS -#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); -#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ - PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ - fprintf(stderr,\"\\n\"); -#else -#define CFUNCSMESS(mess) -#define CFUNCSMESSPY(mess,obj) -#endif -""" -cppmacros['F_FUNC'] = """\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F -#else -#define F_FUNC(f,F) _##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F##_ -#else -#define F_FUNC(f,F) _##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) -#else -#define F_FUNC_US(f,F) F_FUNC(f,F) -#endif -""" -cppmacros['F_WRAPPEDFUNC'] = """\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) -#else -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) -#endif -""" -cppmacros['F_MODFUNC'] = """\ -#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f -#else -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f -#else -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) f ## .in. ## m -#else -#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ -#endif -#endif -/* -#if defined(UPPERCASE_FORTRAN) -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) -#else -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) -#endif -*/ - -#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) -""" -cppmacros['SWAPUNSAFE'] = """\ -#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) -""" -cppmacros['SWAP'] = """\ -#define SWAP(a,b,t) {\\ - t *c;\\ - c = a;\\ - a = b;\\ - b = c;} -""" -# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & -# NPY_ARRAY_C_CONTIGUOUS)' -cppmacros['PRINTPYOBJERR'] = """\ -#define PRINTPYOBJERR(obj)\\ - fprintf(stderr,\"#modulename#.error is related to \");\\ - PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ - fprintf(stderr,\"\\n\"); -""" -cppmacros['MINMAX'] = """\ -#ifndef max -#define max(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef min -#define min(a,b) ((a < b) ? (a) : (b)) -#endif -#ifndef MAX -#define MAX(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef MIN -#define MIN(a,b) ((a < b) ? (a) : (b)) -#endif -""" -needs['len..'] = ['f2py_size'] -cppmacros['len..'] = """\ -#define rank(var) var ## _Rank -#define shape(var,dim) var ## _Dims[dim] -#define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp))) -#define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim) -#define fshape(var,dim) shape(var,rank(var)-dim-1) -#define len(var) shape(var,0) -#define flen(var) fshape(var,0) -#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) -/* #define index(i) capi_i ## i */ -#define slen(var) capi_ ## var ## _len -#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) -""" -needs['f2py_size'] = ['stdarg.h'] -cfuncs['f2py_size'] = """\ -static int f2py_size(PyArrayObject* var, ...) -{ - npy_int sz = 0; - npy_int dim; - npy_int rank; - va_list argp; - va_start(argp, var); - dim = va_arg(argp, npy_int); - if (dim==-1) - { - sz = PyArray_SIZE(var); - } - else - { - rank = PyArray_NDIM(var); - if (dim>=1 && dim<=rank) - sz = PyArray_DIM(var, dim-1); - else - fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); - } - va_end(argp); - return sz; -} -""" - -cppmacros[ - 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyInt_FromLong(v))' -cppmacros[ - 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyInt_FromLong(v))' -needs['pyobj_from_int1'] = ['signed_char'] -cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyInt_FromLong(v))' -cppmacros[ - 'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))' -needs['pyobj_from_long_long1'] = ['long_long'] -cppmacros['pyobj_from_long_long1'] = """\ -#ifdef HAVE_LONG_LONG -#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) -#else -#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. -#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) -#endif -""" -needs['pyobj_from_long_double1'] = ['long_double'] -cppmacros[ - 'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' -cppmacros[ - 'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' -cppmacros[ - 'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' -needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] -cppmacros[ - 'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_double1'] = ['complex_double'] -cppmacros[ - 'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_float1'] = ['complex_float'] -cppmacros[ - 'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_string1'] = ['string'] -cppmacros[ - 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyString_FromString((char *)v))' -needs['pyobj_from_string1size'] = ['string'] -cppmacros[ - 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))' -needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] -cppmacros['TRYPYARRAYTEMPLATE'] = """\ -/* New SciPy */ -#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; -#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; -#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; - -#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ - switch (PyArray_TYPE(arr)) {\\ - case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ - case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ - default: return -2;\\ - };\\ - return 1 -""" - -needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] -cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ -#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; -#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (PyArray_DESCR(arr)->type==typecode) {\\ - *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ - *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ - return 1;\\ - }\\ - switch (PyArray_TYPE(arr)) {\\ - case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\ - case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\ - case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ - case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ - case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ - default: return -2;\\ - };\\ - return -1; -""" -# cppmacros['NUMFROMARROBJ']="""\ -# define NUMFROMARROBJ(typenum,ctype) \\ -# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -# if (arr) {\\ -# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ -# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ -# goto capi_fail;\\ -# } else {\\ -# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ -# }\\ -# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -# return 1;\\ -# } -# """ -# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ -# cppmacros['CNUMFROMARROBJ']="""\ -# define CNUMFROMARROBJ(typenum,ctype) \\ -# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -# if (arr) {\\ -# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ -# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ -# goto capi_fail;\\ -# } else {\\ -# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ -# }\\ -# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -# return 1;\\ -# } -# """ - - -needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] -cppmacros['GETSTRFROMPYTUPLE'] = """\ -#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ - PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ - if (rv_cb_str == NULL)\\ - goto capi_fail;\\ - if (PyString_Check(rv_cb_str)) {\\ - str[len-1]='\\0';\\ - STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ - } else {\\ - PRINTPYOBJERR(rv_cb_str);\\ - PyErr_SetString(#modulename#_error,\"string object expected\");\\ - goto capi_fail;\\ - }\\ - } -""" -cppmacros['GETSCALARFROMPYTUPLE'] = """\ -#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ - if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ - if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ - goto capi_fail;\\ - } -""" - -cppmacros['FAILNULL'] = """\\ -#define FAILNULL(p) do { \\ - if ((p) == NULL) { \\ - PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ - goto capi_fail; \\ - } \\ -} while (0) -""" -needs['MEMCOPY'] = ['string.h', 'FAILNULL'] -cppmacros['MEMCOPY'] = """\ -#define MEMCOPY(to,from,n)\\ - do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) -""" -cppmacros['STRINGMALLOC'] = """\ -#define STRINGMALLOC(str,len)\\ - if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ - PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ - goto capi_fail;\\ - } else {\\ - (str)[len] = '\\0';\\ - } -""" -cppmacros['STRINGFREE'] = """\ -#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) -""" -needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] -cppmacros['STRINGCOPYN'] = """\ -#define STRINGCOPYN(to,from,buf_size) \\ - do { \\ - int _m = (buf_size); \\ - char *_to = (to); \\ - char *_from = (from); \\ - FAILNULL(_to); FAILNULL(_from); \\ - (void)strncpy(_to, _from, sizeof(char)*_m); \\ - _to[_m-1] = '\\0'; \\ - /* Padding with spaces instead of nulls */ \\ - for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ - _to[_m] = ' '; \\ - } \\ - } while (0) -""" -needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] -cppmacros['STRINGCOPY'] = """\ -#define STRINGCOPY(to,from)\\ - do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) -""" -cppmacros['CHECKGENERIC'] = """\ -#define CHECKGENERIC(check,tcheck,name) \\ - if (!(check)) {\\ - PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ - /*goto capi_fail;*/\\ - } else """ -cppmacros['CHECKARRAY'] = """\ -#define CHECKARRAY(check,tcheck,name) \\ - if (!(check)) {\\ - PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ - /*goto capi_fail;*/\\ - } else """ -cppmacros['CHECKSTRING'] = """\ -#define CHECKSTRING(check,tcheck,name,show,var)\\ - if (!(check)) {\\ - char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ - PyErr_SetString(#modulename#_error, errstring);\\ - /*goto capi_fail;*/\\ - } else """ -cppmacros['CHECKSCALAR'] = """\ -#define CHECKSCALAR(check,tcheck,name,show,var)\\ - if (!(check)) {\\ - char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ - PyErr_SetString(#modulename#_error,errstring);\\ - /*goto capi_fail;*/\\ - } else """ -# cppmacros['CHECKDIMS']="""\ -# define CHECKDIMS(dims,rank) \\ -# for (int i=0;i<(rank);i++)\\ -# if (dims[i]<0) {\\ -# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ -# goto capi_fail;\\ -# } -# """ -cppmacros[ - 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' -cppmacros['OLDPYNUM'] = """\ -#ifdef OLDPYNUM -#error You need to install NumPy version 13 or higher. See https://scipy.org/install.html -#endif -""" -################# C functions ############### - -cfuncs['calcarrindex'] = """\ -static int calcarrindex(int *i,PyArrayObject *arr) { - int k,ii = i[0]; - for (k=1; k < PyArray_NDIM(arr); k++) - ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ - return ii; -}""" -cfuncs['calcarrindextr'] = """\ -static int calcarrindextr(int *i,PyArrayObject *arr) { - int k,ii = i[PyArray_NDIM(arr)-1]; - for (k=1; k < PyArray_NDIM(arr); k++) - ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ - return ii; -}""" -cfuncs['forcomb'] = """\ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { - int k; - if (dims==NULL) return 0; - if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - for (k=1;k= 0x03000000 - else if (PyUnicode_Check(obj)) { - tmp = PyUnicode_AsASCIIString(obj); - } - else { - PyObject *tmp2; - tmp2 = PyObject_Str(obj); - if (tmp2) { - tmp = PyUnicode_AsASCIIString(tmp2); - Py_DECREF(tmp2); - } - else { - tmp = NULL; - } - } -#else - else { - tmp = PyObject_Str(obj); - } -#endif - if (tmp == NULL) goto capi_fail; - if (*len == -1) - *len = PyString_GET_SIZE(tmp); - STRINGMALLOC(*str,*len); - STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); - Py_DECREF(tmp); - return 1; -capi_fail: - Py_XDECREF(tmp); - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['char_from_pyobj'] = ['int_from_pyobj'] -cfuncs['char_from_pyobj'] = """\ -static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { - int i=0; - if (int_from_pyobj(&i,obj,errmess)) { - *v = (char)i; - return 1; - } - return 0; -} -""" -needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] -cfuncs['signed_char_from_pyobj'] = """\ -static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { - int i=0; - if (int_from_pyobj(&i,obj,errmess)) { - *v = (signed_char)i; - return 1; - } - return 0; -} -""" -needs['short_from_pyobj'] = ['int_from_pyobj'] -cfuncs['short_from_pyobj'] = """\ -static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { - int i=0; - if (int_from_pyobj(&i,obj,errmess)) { - *v = (short)i; - return 1; - } - return 0; -} -""" -cfuncs['int_from_pyobj'] = """\ -static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyInt_Check(obj)) { - *v = (int)PyInt_AS_LONG(obj); - return 1; - } - tmp = PyNumber_Int(obj); - if (tmp) { - *v = PyInt_AS_LONG(tmp); - Py_DECREF(tmp); - return 1; - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -cfuncs['long_from_pyobj'] = """\ -static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyInt_Check(obj)) { - *v = PyInt_AS_LONG(obj); - return 1; - } - tmp = PyNumber_Int(obj); - if (tmp) { - *v = PyInt_AS_LONG(tmp); - Py_DECREF(tmp); - return 1; - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['long_long_from_pyobj'] = ['long_long'] -cfuncs['long_long_from_pyobj'] = """\ -static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyLong_Check(obj)) { - *v = PyLong_AsLongLong(obj); - return (!PyErr_Occurred()); - } - if (PyInt_Check(obj)) { - *v = (long_long)PyInt_AS_LONG(obj); - return 1; - } - tmp = PyNumber_Long(obj); - if (tmp) { - *v = PyLong_AsLongLong(tmp); - Py_DECREF(tmp); - return (!PyErr_Occurred()); - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] -cfuncs['long_double_from_pyobj'] = """\ -static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { - double d=0; - if (PyArray_CheckScalar(obj)){ - if PyArray_IsScalar(obj, LongDouble) { - PyArray_ScalarAsCtype(obj, v); - return 1; - } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; - } - } - if (double_from_pyobj(&d,obj,errmess)) { - *v = (long_double)d; - return 1; - } - return 0; -} -""" -cfuncs['double_from_pyobj'] = """\ -static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyFloat_Check(obj)) { -#ifdef __sgi - *v = PyFloat_AsDouble(obj); -#else - *v = PyFloat_AS_DOUBLE(obj); -#endif - return 1; - } - tmp = PyNumber_Float(obj); - if (tmp) { -#ifdef __sgi - *v = PyFloat_AsDouble(tmp); -#else - *v = PyFloat_AS_DOUBLE(tmp); -#endif - Py_DECREF(tmp); - return 1; - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['float_from_pyobj'] = ['double_from_pyobj'] -cfuncs['float_from_pyobj'] = """\ -static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { - double d=0.0; - if (double_from_pyobj(&d,obj,errmess)) { - *v = (float)d; - return 1; - } - return 0; -} -""" -needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', - 'complex_double_from_pyobj'] -cfuncs['complex_long_double_from_pyobj'] = """\ -static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { - complex_double cd={0.0,0.0}; - if (PyArray_CheckScalar(obj)){ - if PyArray_IsScalar(obj, CLongDouble) { - PyArray_ScalarAsCtype(obj, v); - return 1; - } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; - (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; - return 1; - } - } - if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (long_double)cd.r; - (*v).i = (long_double)cd.i; - return 1; - } - return 0; -} -""" -needs['complex_double_from_pyobj'] = ['complex_double'] -cfuncs['complex_double_from_pyobj'] = """\ -static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { - Py_complex c; - if (PyComplex_Check(obj)) { - c=PyComplex_AsCComplex(obj); - (*v).r=c.real, (*v).i=c.imag; - return 1; - } - if (PyArray_IsScalar(obj, ComplexFloating)) { - if (PyArray_IsScalar(obj, CFloat)) { - npy_cfloat new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)new.real; - (*v).i = (double)new.imag; - } - else if (PyArray_IsScalar(obj, CLongDouble)) { - npy_clongdouble new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)new.real; - (*v).i = (double)new.imag; - } - else { /* if (PyArray_IsScalar(obj, CDouble)) */ - PyArray_ScalarAsCtype(obj, v); - } - return 1; - } - if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ - PyObject *arr; - if (PyArray_Check(obj)) { - arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); - } - else { - arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); - } - if (arr==NULL) return 0; - (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; - (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; - return 1; - } - /* Python does not provide PyNumber_Complex function :-( */ - (*v).i=0.0; - if (PyFloat_Check(obj)) { -#ifdef __sgi - (*v).r = PyFloat_AsDouble(obj); -#else - (*v).r = PyFloat_AS_DOUBLE(obj); -#endif - return 1; - } - if (PyInt_Check(obj)) { - (*v).r = (double)PyInt_AS_LONG(obj); - return 1; - } - if (PyLong_Check(obj)) { - (*v).r = PyLong_AsDouble(obj); - return (!PyErr_Occurred()); - } - if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { - PyObject *tmp = PySequence_GetItem(obj,0); - if (tmp) { - if (complex_double_from_pyobj(v,tmp,errmess)) { - Py_DECREF(tmp); - return 1; - } - Py_DECREF(tmp); - } - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) - err = PyExc_TypeError; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['complex_float_from_pyobj'] = [ - 'complex_float', 'complex_double_from_pyobj'] -cfuncs['complex_float_from_pyobj'] = """\ -static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { - complex_double cd={0.0,0.0}; - if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (float)cd.r; - (*v).i = (float)cd.i; - return 1; - } - return 0; -} -""" -needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' -needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] -cfuncs[ - 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' -needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] -cfuncs[ - 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' -needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' -needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' -needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' -needs['try_pyarr_from_long_long'] = [ - 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] -cfuncs[ - 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' -needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' -needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' -needs['try_pyarr_from_complex_float'] = [ - 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] -cfuncs[ - 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' -needs['try_pyarr_from_complex_double'] = [ - 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] -cfuncs[ - 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' - -needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] -cfuncs['create_cb_arglist'] = """\ -static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { - PyObject *tmp = NULL; - PyObject *tmp_fun = NULL; - int tot,opt,ext,siz,i,di=0; - CFUNCSMESS(\"create_cb_arglist\\n\"); - tot=opt=ext=siz=0; - /* Get the total number of arguments */ - if (PyFunction_Check(fun)) { - tmp_fun = fun; - Py_INCREF(tmp_fun); - } - else { - di = 1; - if (PyObject_HasAttrString(fun,\"im_func\")) { - tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); - } - else if (PyObject_HasAttrString(fun,\"__call__\")) { - tmp = PyObject_GetAttrString(fun,\"__call__\"); - if (PyObject_HasAttrString(tmp,\"im_func\")) - tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); - else { - tmp_fun = fun; /* built-in function */ - Py_INCREF(tmp_fun); - tot = maxnofargs; - if (xa != NULL) - tot += PyTuple_Size((PyObject *)xa); - } - Py_XDECREF(tmp); - } - else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { - tot = maxnofargs; - if (xa != NULL) - tot += PyTuple_Size((PyObject *)xa); - tmp_fun = fun; - Py_INCREF(tmp_fun); - } - else if (F2PyCapsule_Check(fun)) { - tot = maxnofargs; - if (xa != NULL) - ext = PyTuple_Size((PyObject *)xa); - if(ext>0) { - fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); - goto capi_fail; - } - tmp_fun = fun; - Py_INCREF(tmp_fun); - } - } -if (tmp_fun==NULL) { -fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); -goto capi_fail; -} -#if PY_VERSION_HEX >= 0x03000000 - if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { - if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { -#else - if (PyObject_HasAttrString(tmp_fun,\"func_code\")) { - if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) { -#endif - PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); - Py_DECREF(tmp); - if (tmp_argcount == NULL) { - goto capi_fail; - } - tot = PyInt_AsLong(tmp_argcount) - di; - Py_DECREF(tmp_argcount); - } - } - /* Get the number of optional arguments */ -#if PY_VERSION_HEX >= 0x03000000 - if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { - if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) -#else - if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { - if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) -#endif - opt = PyTuple_Size(tmp); - Py_XDECREF(tmp); - } - /* Get the number of extra arguments */ - if (xa != NULL) - ext = PyTuple_Size((PyObject *)xa); - /* Calculate the size of call-backs argument list */ - siz = MIN(maxnofargs+ext,tot); - *nofargs = MAX(0,siz-ext); -#ifdef DEBUGCFUNCS - fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); -#endif - if (siz 0: - if outneeds[n][0] not in needs: - out.append(outneeds[n][0]) - del outneeds[n][0] - else: - flag = 0 - for k in outneeds[n][1:]: - if k in needs[outneeds[n][0]]: - flag = 1 - break - if flag: - outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] - else: - out.append(outneeds[n][0]) - del outneeds[n][0] - if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ - and outneeds[n] != []: - print(n, saveout) - errmess( - 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') - out = out + saveout - break - saveout = copy.copy(outneeds[n]) - if out == []: - out = [n] - res[n] = out - return res diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/common_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/common_rules.py deleted file mode 100644 index f61d881..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/common_rules.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -""" - -Build common block mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.19 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -from .auxfuncs import ( - hasbody, hascommon, hasnote, isintent_hide, outmess -) -from . import capi_maps -from . import func2subr -from .crackfortran import rmbadname - - -def findcommonblocks(block, top=1): - ret = [] - if hascommon(block): - for key, value in block['common'].items(): - vars_ = {v: block['vars'][v] for v in value} - ret.append((key, value, vars_)) - elif hasbody(block): - for b in block['body']: - ret = ret + findcommonblocks(b, 0) - if top: - tret = [] - names = [] - for t in ret: - if t[0] not in names: - names.append(t[0]) - tret.append(t) - return tret - return ret - - -def buildhooks(m): - ret = {'commonhooks': [], 'initcommonhooks': [], - 'docs': ['"COMMON blocks:\\n"']} - fwrap = [''] - - def fadd(line, s=fwrap): - s[0] = '%s\n %s' % (s[0], line) - chooks = [''] - - def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) - ihooks = [''] - - def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) - doc = [''] - - def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) - for (name, vnames, vars) in findcommonblocks(m): - lower_name = name.lower() - hnames, inames = [], [] - for n in vnames: - if isintent_hide(vars[n]): - hnames.append(n) - else: - inames.append(n) - if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( - name, ','.join(inames), ','.join(hnames))) - else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( - name, ','.join(inames))) - fadd('subroutine f2pyinit%s(setupfunc)' % name) - fadd('external setupfunc') - for n in vnames: - fadd(func2subr.var2fixfortran(vars, n)) - if name == '_BLNK_': - fadd('common %s' % (','.join(vnames))) - else: - fadd('common /%s/ %s' % (name, ','.join(vnames))) - fadd('call setupfunc(%s)' % (','.join(inames))) - fadd('end\n') - cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) - idims = [] - for n in inames: - ct = capi_maps.getctype(vars[n]) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n, vars[n]) - if dm['dims']: - idims.append('(%s)' % (dm['dims'])) - else: - idims.append('') - dms = dm['dims'].strip() - if not dms: - dms = '-1' - cadd('\t{\"%s\",%s,{{%s}},%s},' % (n, dm['rank'], dms, at)) - cadd('\t{NULL}\n};') - inames1 = rmbadname(inames) - inames1_tps = ','.join(['char *' + s for s in inames1]) - cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) - cadd('\tint i_f2py=0;') - for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) - cadd('}') - if '_' in lower_name: - F_FUNC = 'F_FUNC_US' - else: - F_FUNC = 'F_FUNC' - cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' - % (F_FUNC, lower_name, name.upper(), - ','.join(['char*'] * len(inames1)))) - cadd('static void f2py_init_%s(void) {' % name) - cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, lower_name, name.upper(), name)) - cadd('}\n') - iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) - iadd('\tF2PyDict_SetItemString(d, \"%s\", tmp);' % name) - iadd('\tPy_DECREF(tmp);') - tname = name.replace('_', '\\_') - dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) - dadd('\\begin{description}') - for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, vars[n]))) - if hasnote(vars[n]): - note = vars[n]['note'] - if isinstance(note, list): - note = '\n'.join(note) - dadd('--- %s' % (note)) - dadd('\\end{description}') - ret['docs'].append( - '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) - ret['commonhooks'] = chooks - ret['initcommonhooks'] = ihooks - ret['latexdoc'] = doc[0] - if len(ret['docs']) <= 1: - ret['docs'] = '' - return ret, fwrap[0] diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/crackfortran.py b/venv/lib/python3.7/site-packages/numpy/f2py/crackfortran.py deleted file mode 100644 index 2aaf5d7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/crackfortran.py +++ /dev/null @@ -1,3345 +0,0 @@ -#!/usr/bin/env python -""" -crackfortran --- read fortran (77,90) code and extract declaration information. - -Copyright 1999-2004 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/09/27 07:13:49 $ -Pearu Peterson - - -Usage of crackfortran: -====================== -Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h - -m ,--ignore-contains -Functions: crackfortran, crack2fortran -The following Fortran statements/constructions are supported -(or will be if needed): - block data,byte,call,character,common,complex,contains,data, - dimension,double complex,double precision,end,external,function, - implicit,integer,intent,interface,intrinsic, - logical,module,optional,parameter,private,public, - program,real,(sequence?),subroutine,type,use,virtual, - include,pythonmodule -Note: 'virtual' is mapped to 'dimension'. -Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). -Note: code after 'contains' will be ignored until its scope ends. -Note: 'common' statement is extended: dimensions are moved to variable definitions -Note: f2py directive: f2py is read as -Note: pythonmodule is introduced to represent Python module - -Usage: - `postlist=crackfortran(files)` - `postlist` contains declaration information read from the list of files `files`. - `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file - - `postlist` has the following structure: - *** it is a list of dictionaries containing `blocks': - B = {'block','body','vars','parent_block'[,'name','prefix','args','result', - 'implicit','externals','interfaced','common','sortvars', - 'commonvars','note']} - B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | - 'program' | 'block data' | 'type' | 'pythonmodule' - B['body'] --- list containing `subblocks' with the same structure as `blocks' - B['parent_block'] --- dictionary of a parent block: - C['body'][]['parent_block'] is C - B['vars'] --- dictionary of variable definitions - B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) - B['name'] --- name of the block (not if B['block']=='interface') - B['prefix'] --- prefix string (only if B['block']=='function') - B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' - B['result'] --- name of the return value (only if B['block']=='function') - B['implicit'] --- dictionary {'a':,'b':...} | None - B['externals'] --- list of variables being external - B['interfaced'] --- list of variables being external and defined - B['common'] --- dictionary of common blocks (list of objects) - B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) - B['from'] --- string showing the 'parents' of the current block - B['use'] --- dictionary of modules used in current block: - {:{['only':<0|1>],['map':{:,...}]}} - B['note'] --- list of LaTeX comments on the block - B['f2pyenhancements'] --- optional dictionary - {'threadsafe':'','fortranname':, - 'callstatement':|, - 'callprotoargument':, - 'usercode':|, - 'pymethoddef:' - } - B['entry'] --- dictionary {entryname:argslist,..} - B['varnames'] --- list of variable names given in the order of reading the - Fortran code, useful for derived types. - B['saved_interface'] --- a string of scanned routine signature, defines explicit interface - *** Variable definition is a dictionary - D = B['vars'][] = - {'typespec'[,'attrspec','kindselector','charselector','=','typename']} - D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | - 'double precision' | 'integer' | 'logical' | 'real' | 'type' - D['attrspec'] --- list of attributes (e.g. 'dimension()', - 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', - 'optional','required', etc) - K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = - 'complex' | 'integer' | 'logical' | 'real' ) - C = D['charselector'] = {['*','len','kind']} - (only if D['typespec']=='character') - D['='] --- initialization expression string - D['typename'] --- name of the type if D['typespec']=='type' - D['dimension'] --- list of dimension bounds - D['intent'] --- list of intent specifications - D['depend'] --- list of variable names on which current variable depends on - D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised - D['note'] --- list of LaTeX comments on the variable - *** Meaning of kind/char selectors (few examples): - D['typespec>']*K['*'] - D['typespec'](kind=K['kind']) - character*C['*'] - character(len=C['len'],kind=C['kind']) - (see also fortran type declaration statement formats below) - -Fortran 90 type declaration statement format (F77 is subset of F90) -==================================================================== -(Main source: IBM XL Fortran 5.1 Language Reference Manual) -type declaration = [[]::] - = byte | - character[] | - complex[] | - double complex | - double precision | - integer[] | - logical[] | - real[] | - type() - = * | - ([len=][,[kind=]]) | - (kind=[,len=]) - = * | - ([kind=]) - = comma separated list of attributes. - Only the following attributes are used in - building up the interface: - external - (parameter --- affects '=' key) - optional - intent - Other attributes are ignored. - = in | out | inout - = comma separated list of dimension bounds. - = [[*][()] | [()]*] - [// | =] [,] - -In addition, the following attributes are used: check,depend,note - -TODO: - * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' - -> 'real x(2)') - The above may be solved by creating appropriate preprocessor program, for example. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import string -import fileinput -import re -import os -import copy -import platform - -from . import __version__ - -# The eviroment provided by auxfuncs.py is needed for some calls to eval. -# As the needed functions cannot be determined by static inspection of the -# code, it is safest to use import * pending a major refactoring of f2py. -from .auxfuncs import * - - -f2py_version = __version__.version - -# Global flags: -strictf77 = 1 # Ignore `!' comments unless line[0]=='!' -sourcecodeform = 'fix' # 'fix','free' -quiet = 0 # Be verbose if 0 (Obsolete: not used any more) -verbose = 1 # Be quiet if 0, extra verbose if > 1. -tabchar = 4 * ' ' -pyffilename = '' -f77modulename = '' -skipemptyends = 0 # for old F77 programs without 'program' statement -ignorecontains = 1 -dolowercase = 1 -debug = [] - -# Global variables -beginpattern = '' -currentfilename = '' -expectbegin = 1 -f90modulevars = {} -filepositiontext = '' -gotnextfile = 1 -groupcache = None -groupcounter = 0 -grouplist = {groupcounter: []} -groupname = '' -include_paths = [] -neededmodule = -1 -onlyfuncs = [] -previous_context = None -skipblocksuntil = -1 -skipfuncs = [] -skipfunctions = [] -usermodules = [] - - -def reset_global_f2py_vars(): - global groupcounter, grouplist, neededmodule, expectbegin - global skipblocksuntil, usermodules, f90modulevars, gotnextfile - global filepositiontext, currentfilename, skipfunctions, skipfuncs - global onlyfuncs, include_paths, previous_context - global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename - global f77modulename, skipemptyends, ignorecontains, dolowercase, debug - - # flags - strictf77 = 1 - sourcecodeform = 'fix' - quiet = 0 - verbose = 1 - tabchar = 4 * ' ' - pyffilename = '' - f77modulename = '' - skipemptyends = 0 - ignorecontains = 1 - dolowercase = 1 - debug = [] - # variables - groupcounter = 0 - grouplist = {groupcounter: []} - neededmodule = -1 - expectbegin = 1 - skipblocksuntil = -1 - usermodules = [] - f90modulevars = {} - gotnextfile = 1 - filepositiontext = '' - currentfilename = '' - skipfunctions = [] - skipfuncs = [] - onlyfuncs = [] - include_paths = [] - previous_context = None - - -def outmess(line, flag=1): - global filepositiontext - - if not verbose: - return - if not quiet: - if flag: - sys.stdout.write(filepositiontext) - sys.stdout.write(line) - -re._MAXCACHE = 50 -defaultimplicitrules = {} -for c in "abcdefghopqrstuvwxyz$_": - defaultimplicitrules[c] = {'typespec': 'real'} -for c in "ijklmn": - defaultimplicitrules[c] = {'typespec': 'integer'} -del c -badnames = {} -invbadnames = {} -for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', - 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', - 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', - 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', - 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', - 'max', 'min', - 'flen', 'fshape', - 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', - 'type', 'default']: - badnames[n] = n + '_bn' - invbadnames[n + '_bn'] = n - - -def rmbadname1(name): - if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n' % - (name, badnames[name])) - return badnames[name] - return name - - -def rmbadname(names): - return [rmbadname1(_m) for _m in names] - - -def undo_rmbadname1(name): - if name in invbadnames: - errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' - % (name, invbadnames[name])) - return invbadnames[name] - return name - - -def undo_rmbadname(names): - return [undo_rmbadname1(_m) for _m in names] - - -def getextension(name): - i = name.rfind('.') - if i == -1: - return '' - if '\\' in name[i:]: - return '' - if '/' in name[i:]: - return '' - return name[i + 1:] - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search -_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match - - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - with open(file, 'r') as f: - line = f.readline() - n = 15 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n > 0 and line: - if line[0] != '!' and line.strip(): - n -= 1 - if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': - result = 1 - break - line = f.readline() - return result - - -# Read fortran (77,90) code -def readfortrancode(ffile, dowithline=show, istop=1): - """ - Read fortran codes from files and - 1) Get rid of comments, line continuations, and empty lines; lower cases. - 2) Call dowithline(line) on every line. - 3) Recursively call itself when statement \"include ''\" is met. - """ - global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 - global beginpattern, quiet, verbose, dolowercase, include_paths - - if not istop: - saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ - beginpattern, quiet, verbose, dolowercase - if ffile == []: - return - localdolowercase = dolowercase - cont = 0 - finalline = '' - ll = '' - includeline = re.compile( - r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) - cont1 = re.compile(r'(?P.*)&\s*\Z') - cont2 = re.compile(r'(\s*&|)(?P.*)') - mline_mark = re.compile(r".*?'''") - if istop: - dowithline('', -1) - ll, l1 = '', '' - spacedigits = [' '] + [str(_m) for _m in range(10)] - filepositiontext = '' - fin = fileinput.FileInput(ffile) - while True: - l = fin.readline() - if not l: - break - if fin.isfirstline(): - filepositiontext = '' - currentfilename = fin.filename() - gotnextfile = 1 - l1 = l - strictf77 = 0 - sourcecodeform = 'fix' - ext = os.path.splitext(currentfilename)[1] - if is_f_file(currentfilename) and \ - not (_has_f90_header(l) or _has_fix_header(l)): - strictf77 = 1 - elif is_free_format(currentfilename) and not _has_fix_header(l): - sourcecodeform = 'free' - if strictf77: - beginpattern = beginpattern77 - else: - beginpattern = beginpattern90 - outmess('\tReading file %s (format:%s%s)\n' - % (repr(currentfilename), sourcecodeform, - strictf77 and ',strict' or '')) - - l = l.expandtabs().replace('\xa0', ' ') - # Get rid of newline characters - while not l == '': - if l[-1] not in "\n\r\f": - break - l = l[:-1] - if not strictf77: - (l, rl) = split_by_unquoted(l, '!') - l += ' ' - if rl[:5].lower() == '!f2py': # f2py directive - l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') - if l.strip() == '': # Skip empty line - cont = 0 - continue - if sourcecodeform == 'fix': - if l[0] in ['*', 'c', '!', 'C', '#']: - if l[1:5].lower() == 'f2py': # f2py directive - l = ' ' + l[5:] - else: # Skip comment line - cont = 0 - continue - elif strictf77: - if len(l) > 72: - l = l[:72] - if not (l[0] in spacedigits): - raise Exception('readfortrancode: Found non-(space,digit) char ' - 'in the first column.\n\tAre you sure that ' - 'this code is in fix form?\n\tline=%s' % repr(l)) - - if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): - # Continuation of a previous line - ll = ll + l[6:] - finalline = '' - origfinalline = '' - else: - if not strictf77: - # F90 continuation - r = cont1.match(l) - if r: - l = r.group('line') # Continuation follows .. - if cont: - ll = ll + cont2.match(l).group('line') - finalline = '' - origfinalline = '' - else: - # clean up line beginning from possible digits. - l = ' ' + l[5:] - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - cont = (r is not None) - else: - # clean up line beginning from possible digits. - l = ' ' + l[5:] - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - - elif sourcecodeform == 'free': - if not cont and ext == '.pyf' and mline_mark.match(l): - l = l + '\n' - while True: - lc = fin.readline() - if not lc: - errmess( - 'Unexpected end of file when reading multiline\n') - break - l = l + lc - if mline_mark.match(lc): - break - l = l.rstrip() - r = cont1.match(l) - if r: - l = r.group('line') # Continuation follows .. - if cont: - ll = ll + cont2.match(l).group('line') - finalline = '' - origfinalline = '' - else: - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - cont = (r is not None) - else: - raise ValueError( - "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) - m = includeline.match(origfinalline) - if m: - fn = m.group('name') - if os.path.isfile(fn): - readfortrancode(fn, dowithline=dowithline, istop=0) - else: - include_dirs = [ - os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir, fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1, dowithline=dowithline, istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - l1 = ll - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) - m = includeline.match(origfinalline) - if m: - fn = m.group('name') - if os.path.isfile(fn): - readfortrancode(fn, dowithline=dowithline, istop=0) - else: - include_dirs = [os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir, fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1, dowithline=dowithline, istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - filepositiontext = '' - fin.close() - if istop: - dowithline('', 1) - else: - gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ - beginpattern, quiet, verbose, dolowercase = saveglobals - -# Crack line -beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ - r'\s*(?P(\b(%s)\b))' + \ - r'\s*(?P%s)\s*\Z' -## -fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' -typespattern = re.compile( - beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' -typespattern4implicit = re.compile(beforethisafter % ( - '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) -# -functionpattern = re.compile(beforethisafter % ( - r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' -subroutinepattern = re.compile(beforethisafter % ( - r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' -# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' -# -groupbegins77 = r'program|block\s*data' -beginpattern77 = re.compile( - beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' -groupbegins90 = groupbegins77 + \ - r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' -beginpattern90 = re.compile( - beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' -groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' -endpattern = re.compile( - beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end' -# endifs='end\s*(if|do|where|select|while|forall)' -endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' -endifpattern = re.compile( - beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif' -# -implicitpattern = re.compile( - beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' -dimensionpattern = re.compile(beforethisafter % ( - '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' -externalpattern = re.compile( - beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' -optionalpattern = re.compile( - beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' -requiredpattern = re.compile( - beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' -publicpattern = re.compile( - beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' -privatepattern = re.compile( - beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' -intrisicpattern = re.compile( - beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic' -intentpattern = re.compile(beforethisafter % ( - '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' -parameterpattern = re.compile( - beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' -datapattern = re.compile( - beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' -callpattern = re.compile( - beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' -entrypattern = re.compile( - beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' -callfunpattern = re.compile( - beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' -commonpattern = re.compile( - beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' -usepattern = re.compile( - beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' -containspattern = re.compile( - beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' -formatpattern = re.compile( - beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' -# Non-fortran and f2py-specific statements -f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', - 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' -multilinepattern = re.compile( - r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' -## - -def split_by_unquoted(line, characters): - """ - Splits the line into (line[:i], line[i:]), - where i is the index of first occurrence of one of the characters - not within quotes, or len(line) if no such index exists - """ - assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" - r = re.compile( - r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" - r"(?P{char}.*)\Z".format( - not_quoted="[^\"'{}]".format(re.escape(characters)), - char="[{}]".format(re.escape(characters)), - single_quoted=r"('([^'\\]|(\\.))*')", - double_quoted=r'("([^"\\]|(\\.))*")')) - m = r.match(line) - if m: - d = m.groupdict() - return (d["before"], d["after"]) - return (line, "") - -def _simplifyargs(argsline): - a = [] - for n in markoutercomma(argsline).split('@,@'): - for r in '(),': - n = n.replace(r, '_') - a.append(n) - return ','.join(a) - -crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*', re.I) - - -def crackline(line, reset=0): - """ - reset=-1 --- initialize - reset=0 --- crack the line - reset=1 --- final check if mismatch of blocks occurred - - Cracked data is saved in grouplist[0]. - """ - global beginpattern, groupcounter, groupname, groupcache, grouplist - global filepositiontext, currentfilename, neededmodule, expectbegin - global skipblocksuntil, skipemptyends, previous_context, gotnextfile - - _, has_semicolon = split_by_unquoted(line, ";") - if has_semicolon and not (f2pyenhancementspattern[0].match(line) or - multilinepattern[0].match(line)): - # XXX: non-zero reset values need testing - assert reset == 0, repr(reset) - # split line on unquoted semicolons - line, semicolon_line = split_by_unquoted(line, ";") - while semicolon_line: - crackline(line, reset) - line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") - crackline(line, reset) - return - if reset < 0: - groupcounter = 0 - groupname = {groupcounter: ''} - groupcache = {groupcounter: {}} - grouplist = {groupcounter: []} - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['vars'] = {} - groupcache[groupcounter]['block'] = '' - groupcache[groupcounter]['name'] = '' - neededmodule = -1 - skipblocksuntil = -1 - return - if reset > 0: - fl = 0 - if f77modulename and neededmodule == groupcounter: - fl = 2 - while groupcounter > fl: - outmess('crackline: groupcounter=%s groupname=%s\n' % - (repr(groupcounter), repr(groupname))) - outmess( - 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 - if f77modulename and neededmodule == groupcounter: - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end interface - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end module - neededmodule = -1 - return - if line == '': - return - flag = 0 - for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, - requiredpattern, - parameterpattern, datapattern, publicpattern, privatepattern, - intrisicpattern, - endifpattern, endpattern, - formatpattern, - beginpattern, functionpattern, subroutinepattern, - implicitpattern, typespattern, commonpattern, - callpattern, usepattern, containspattern, - entrypattern, - f2pyenhancementspattern, - multilinepattern - ]: - m = pat[0].match(line) - if m: - break - flag = flag + 1 - if not m: - re_1 = crackline_re_1 - if 0 <= skipblocksuntil <= groupcounter: - return - if 'externals' in groupcache[groupcounter]: - for name in groupcache[groupcounter]['externals']: - if name in invbadnames: - name = invbadnames[name] - if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: - continue - m1 = re.match( - r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) - if m1: - m2 = re_1.match(m1.group('before')) - a = _simplifyargs(m1.group('args')) - if m2: - line = 'callfun %s(%s) result (%s)' % ( - name, a, m2.group('result')) - else: - line = 'callfun %s(%s)' % (name, a) - m = callfunpattern[0].match(line) - if not m: - outmess( - 'crackline: could not resolve function call for line=%s.\n' % repr(line)) - return - analyzeline(m, 'callfun', line) - return - if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): - previous_context = None - outmess('crackline:%d: No pattern for line\n' % (groupcounter)) - return - elif pat[1] == 'end': - if 0 <= skipblocksuntil < groupcounter: - groupcounter = groupcounter - 1 - if skipblocksuntil <= groupcounter: - return - if groupcounter <= 0: - raise Exception('crackline: groupcounter(=%s) is nonpositive. ' - 'Check the blocks.' - % (groupcounter)) - m1 = beginpattern[0].match((line)) - if (m1) and (not m1.group('this') == groupname[groupcounter]): - raise Exception('crackline: End group %s does not match with ' - 'previous Begin group %s\n\t%s' % - (repr(m1.group('this')), repr(groupname[groupcounter]), - filepositiontext) - ) - if skipblocksuntil == groupcounter: - skipblocksuntil = -1 - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 - if not skipemptyends: - expectbegin = 1 - elif pat[1] == 'begin': - if 0 <= skipblocksuntil <= groupcounter: - groupcounter = groupcounter + 1 - return - gotnextfile = 0 - analyzeline(m, pat[1], line) - expectbegin = 0 - elif pat[1] == 'endif': - pass - elif pat[1] == 'contains': - if ignorecontains: - return - if 0 <= skipblocksuntil <= groupcounter: - return - skipblocksuntil = groupcounter - else: - if 0 <= skipblocksuntil <= groupcounter: - return - analyzeline(m, pat[1], line) - - -def markouterparen(line): - l = '' - f = 0 - for c in line: - if c == '(': - f = f + 1 - if f == 1: - l = l + '@(@' - continue - elif c == ')': - f = f - 1 - if f == 0: - l = l + '@)@' - continue - l = l + c - return l - - -def markoutercomma(line, comma=','): - l = '' - f = 0 - before, after = split_by_unquoted(line, comma + '()') - l += before - while after: - if (after[0] == comma) and (f == 0): - l += '@' + comma + '@' - else: - l += after[0] - if after[0] == '(': - f += 1 - elif after[0] == ')': - f -= 1 - before, after = split_by_unquoted(after[1:], comma + '()') - l += before - assert not f, repr((f, line, l)) - return l - -def unmarkouterparen(line): - r = line.replace('@(@', '(').replace('@)@', ')') - return r - - -def appenddecl(decl, decl2, force=1): - if not decl: - decl = {} - if not decl2: - return decl - if decl is decl2: - return decl - for k in list(decl2.keys()): - if k == 'typespec': - if force or k not in decl: - decl[k] = decl2[k] - elif k == 'attrspec': - for l in decl2[k]: - decl = setattrspec(decl, l, force) - elif k == 'kindselector': - decl = setkindselector(decl, decl2[k], force) - elif k == 'charselector': - decl = setcharselector(decl, decl2[k], force) - elif k in ['=', 'typename']: - if force or k not in decl: - decl[k] = decl2[k] - elif k == 'note': - pass - elif k in ['intent', 'check', 'dimension', 'optional', 'required']: - errmess('appenddecl: "%s" not implemented.\n' % k) - else: - raise Exception('appenddecl: Unknown variable definition key:' + - str(k)) - return decl - -selectpattern = re.compile( - r'\s*(?P(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) -nameargspattern = re.compile( - r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) -callnameargspattern = re.compile( - r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) -real16pattern = re.compile( - r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') -real8pattern = re.compile( - r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') - -_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) - - -def _is_intent_callback(vdecl): - for a in vdecl.get('attrspec', []): - if _intentcallbackpattern.match(a): - return 1 - return 0 - - -def _resolvenameargspattern(line): - line = markouterparen(line) - m1 = nameargspattern.match(line) - if m1: - return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') - m1 = callnameargspattern.match(line) - if m1: - return m1.group('name'), m1.group('args'), None, None - return None, [], None, None - - -def analyzeline(m, case, line): - global groupcounter, groupname, groupcache, grouplist, filepositiontext - global currentfilename, f77modulename, neededinterface, neededmodule - global expectbegin, gotnextfile, previous_context - - block = m.group('this') - if case != 'multiline': - previous_context = None - if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ - and not skipemptyends and groupcounter < 1: - newname = os.path.basename(currentfilename).split('.')[0] - outmess( - 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) - gotnextfile = 0 - groupcounter = groupcounter + 1 - groupname[groupcounter] = 'program' - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['vars'] = {} - groupcache[groupcounter]['block'] = 'program' - groupcache[groupcounter]['name'] = newname - groupcache[groupcounter]['from'] = 'fromsky' - expectbegin = 0 - if case in ['begin', 'call', 'callfun']: - # Crack line => block,name,args,result - block = block.lower() - if re.match(r'block\s*data', block, re.I): - block = 'block data' - if re.match(r'python\s*module', block, re.I): - block = 'python module' - name, args, result, bind = _resolvenameargspattern(m.group('after')) - if name is None: - if block == 'block data': - name = '_BLOCK_DATA_' - else: - name = '' - if block not in ['interface', 'block data']: - outmess('analyzeline: No name/args pattern found for line.\n') - - previous_context = (block, name, groupcounter) - if args: - args = rmbadname([x.strip() - for x in markoutercomma(args).split('@,@')]) - else: - args = [] - if '' in args: - while '' in args: - args.remove('') - outmess( - 'analyzeline: argument list is malformed (missing argument).\n') - - # end of crack line => block,name,args,result - needmodule = 0 - needinterface = 0 - - if case in ['call', 'callfun']: - needinterface = 1 - if 'args' not in groupcache[groupcounter]: - return - if name not in groupcache[groupcounter]['args']: - return - for it in grouplist[groupcounter]: - if it['name'] == name: - return - if name in groupcache[groupcounter]['interfaced']: - return - block = {'call': 'subroutine', 'callfun': 'function'}[case] - if f77modulename and neededmodule == -1 and groupcounter <= 1: - neededmodule = groupcounter + 2 - needmodule = 1 - if block != 'interface': - needinterface = 1 - # Create new block(s) - groupcounter = groupcounter + 1 - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - if needmodule: - if verbose > 1: - outmess('analyzeline: Creating module block %s\n' % - repr(f77modulename), 0) - groupname[groupcounter] = 'module' - groupcache[groupcounter]['block'] = 'python module' - groupcache[groupcounter]['name'] = f77modulename - groupcache[groupcounter]['from'] = '' - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['interfaced'] = [] - groupcache[groupcounter]['vars'] = {} - groupcounter = groupcounter + 1 - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - if needinterface: - if verbose > 1: - outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( - groupcounter), 0) - groupname[groupcounter] = 'interface' - groupcache[groupcounter]['block'] = 'interface' - groupcache[groupcounter]['name'] = 'unknown_interface' - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['interfaced'] = [] - groupcache[groupcounter]['vars'] = {} - groupcounter = groupcounter + 1 - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - groupname[groupcounter] = block - groupcache[groupcounter]['block'] = block - if not name: - name = 'unknown_' + block - groupcache[groupcounter]['prefix'] = m.group('before') - groupcache[groupcounter]['name'] = rmbadname1(name) - groupcache[groupcounter]['result'] = result - if groupcounter == 1: - groupcache[groupcounter]['from'] = currentfilename - else: - if f77modulename and groupcounter == 3: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], currentfilename) - else: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) - for k in list(groupcache[groupcounter].keys()): - if not groupcache[groupcounter][k]: - del groupcache[groupcounter][k] - - groupcache[groupcounter]['args'] = args - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['interfaced'] = [] - groupcache[groupcounter]['vars'] = {} - groupcache[groupcounter]['entry'] = {} - # end of creation - if block == 'type': - groupcache[groupcounter]['varnames'] = [] - - if case in ['call', 'callfun']: # set parents variables - if name not in groupcache[groupcounter - 2]['externals']: - groupcache[groupcounter - 2]['externals'].append(name) - groupcache[groupcounter]['vars'] = copy.deepcopy( - groupcache[groupcounter - 2]['vars']) - try: - del groupcache[groupcounter]['vars'][name][ - groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] - except Exception: - pass - if block in ['function', 'subroutine']: # set global attributes - try: - groupcache[groupcounter]['vars'][name] = appenddecl( - groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) - except Exception: - pass - if case == 'callfun': # return type - if result and result in groupcache[groupcounter]['vars']: - if not name == result: - groupcache[groupcounter]['vars'][name] = appenddecl( - groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) - # if groupcounter>1: # name is interfaced - try: - groupcache[groupcounter - 2]['interfaced'].append(name) - except Exception: - pass - if block == 'function': - t = typespattern[0].match(m.group('before') + ' ' + name) - if t: - typespec, selector, attr, edecl = cracktypespec0( - t.group('this'), t.group('after')) - updatevars(typespec, selector, attr, edecl) - - if case in ['call', 'callfun']: - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end routine - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end interface - - elif case == 'entry': - name, args, result, bind = _resolvenameargspattern(m.group('after')) - if name is not None: - if args: - args = rmbadname([x.strip() - for x in markoutercomma(args).split('@,@')]) - else: - args = [] - assert result is None, repr(result) - groupcache[groupcounter]['entry'][name] = args - previous_context = ('entry', name, groupcounter) - elif case == 'type': - typespec, selector, attr, edecl = cracktypespec0( - block, m.group('after')) - last_name = updatevars(typespec, selector, attr, edecl) - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']: - edecl = groupcache[groupcounter]['vars'] - ll = m.group('after').strip() - i = ll.find('::') - if i < 0 and case == 'intent': - i = markouterparen(ll).find('@)@') - 2 - ll = ll[:i + 1] + '::' + ll[i + 1:] - i = ll.find('::') - if ll[i:] == '::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n' % - (m.group('this'), ll[:i])) - ll = ll + ','.join(groupcache[groupcounter]['args']) - if i < 0: - i = 0 - pl = '' - else: - pl = ll[:i].strip() - ll = ll[i + 2:] - ch = markoutercomma(pl).split('@,@') - if len(ch) > 1: - pl = ch[0] - outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( - ','.join(ch[1:]))) - last_name = None - - for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: - m1 = namepattern.match(e) - if not m1: - if case in ['public', 'private']: - k = '' - else: - print(m.groupdict()) - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( - case, repr(e))) - continue - else: - k = rmbadname1(m1.group('name')) - if k not in edecl: - edecl[k] = {} - if case == 'dimension': - ap = case + m1.group('after') - if case == 'intent': - ap = m.group('this') + pl - if _intentcallbackpattern.match(ap): - if k not in groupcache[groupcounter]['args']: - if groupcounter > 1: - if '__user__' not in groupcache[groupcounter - 2]['name']: - outmess( - 'analyzeline: missing __user__ module (could be nothing)\n') - # fixes ticket 1693 - if k != groupcache[groupcounter]['name']: - outmess('analyzeline: appending intent(callback) %s' - ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) - groupcache[groupcounter]['args'].append(k) - else: - errmess( - 'analyzeline: intent(callback) %s is ignored' % (k)) - else: - errmess('analyzeline: intent(callback) %s is already' - ' in argument list' % (k)) - if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']: - ap = case - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append(ap) - else: - edecl[k]['attrspec'] = [ap] - if case == 'external': - if groupcache[groupcounter]['block'] == 'program': - outmess('analyzeline: ignoring program arguments\n') - continue - if k not in groupcache[groupcounter]['args']: - continue - if 'externals' not in groupcache[groupcounter]: - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['externals'].append(k) - last_name = k - groupcache[groupcounter]['vars'] = edecl - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case == 'parameter': - edecl = groupcache[groupcounter]['vars'] - ll = m.group('after').strip()[1:-1] - last_name = None - for e in markoutercomma(ll).split('@,@'): - try: - k, initexpr = [x.strip() for x in e.split('=')] - except Exception: - outmess( - 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) - continue - params = get_parameters(edecl) - k = rmbadname1(k) - if k not in edecl: - edecl[k] = {} - if '=' in edecl[k] and (not edecl[k]['='] == initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( - k, edecl[k]['='], initexpr)) - t = determineexprtype(initexpr, params) - if t: - if t.get('typespec') == 'real': - tt = list(initexpr) - for m in real16pattern.finditer(initexpr): - tt[m.start():m.end()] = list( - initexpr[m.start():m.end()].lower().replace('d', 'e')) - initexpr = ''.join(tt) - elif t.get('typespec') == 'complex': - initexpr = initexpr[1:].lower().replace('d', 'e').\ - replace(',', '+1j*(') - try: - v = eval(initexpr, {}, params) - except (SyntaxError, NameError, TypeError) as msg: - errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' - % (initexpr, msg)) - continue - edecl[k]['='] = repr(v) - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append('parameter') - else: - edecl[k]['attrspec'] = ['parameter'] - last_name = k - groupcache[groupcounter]['vars'] = edecl - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case == 'implicit': - if m.group('after').strip().lower() == 'none': - groupcache[groupcounter]['implicit'] = None - elif m.group('after'): - if 'implicit' in groupcache[groupcounter]: - impl = groupcache[groupcounter]['implicit'] - else: - impl = {} - if impl is None: - outmess( - 'analyzeline: Overwriting earlier "implicit none" statement.\n') - impl = {} - for e in markoutercomma(m.group('after')).split('@,@'): - decl = {} - m1 = re.match( - r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) - if not m1: - outmess( - 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) - continue - m2 = typespattern4implicit.match(m1.group('this')) - if not m2: - outmess( - 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) - continue - typespec, selector, attr, edecl = cracktypespec0( - m2.group('this'), m2.group('after')) - kindselect, charselect, typename = cracktypespec( - typespec, selector) - decl['typespec'] = typespec - decl['kindselector'] = kindselect - decl['charselector'] = charselect - decl['typename'] = typename - for k in list(decl.keys()): - if not decl[k]: - del decl[k] - for r in markoutercomma(m1.group('after')).split('@,@'): - if '-' in r: - try: - begc, endc = [x.strip() for x in r.split('-')] - except Exception: - outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) - continue - else: - begc = endc = r.strip() - if not len(begc) == len(endc) == 1: - outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) - continue - for o in range(ord(begc), ord(endc) + 1): - impl[chr(o)] = decl - groupcache[groupcounter]['implicit'] = impl - elif case == 'data': - ll = [] - dl = '' - il = '' - f = 0 - fc = 1 - inp = 0 - for c in m.group('after'): - if not inp: - if c == "'": - fc = not fc - if c == '/' and fc: - f = f + 1 - continue - if c == '(': - inp = inp + 1 - elif c == ')': - inp = inp - 1 - if f == 0: - dl = dl + c - elif f == 1: - il = il + c - elif f == 2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl, il]) - dl = c - il = '' - f = 0 - if f == 2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl, il]) - vars = {} - if 'vars' in groupcache[groupcounter]: - vars = groupcache[groupcounter]['vars'] - last_name = None - for l in ll: - l = [x.strip() for x in l] - if l[0][0] == ',': - l[0] = l[0][1:] - if l[0][0] == '(': - outmess( - 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) - continue - i = 0 - j = 0 - llen = len(l[1]) - for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): - if v[0] == '(': - outmess( - 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) - # XXX: subsequent init expressions may get wrong values. - # Ignoring since data statements are irrelevant for - # wrapping. - continue - fc = 0 - while (i < llen) and (fc or not l[1][i] == ','): - if l[1][i] == "'": - fc = not fc - i = i + 1 - i = i + 1 - if v not in vars: - vars[v] = {} - if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]: - outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( - v, vars[v]['='], l[1][j:i - 1])) - vars[v]['='] = l[1][j:i - 1] - j = i - last_name = v - groupcache[groupcounter]['vars'] = vars - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case == 'common': - line = m.group('after').strip() - if not line[0] == '/': - line = '//' + line - cl = [] - f = 0 - bn = '' - ol = '' - for c in line: - if c == '/': - f = f + 1 - continue - if f >= 3: - bn = bn.strip() - if not bn: - bn = '_BLNK_' - cl.append([bn, ol]) - f = f - 2 - bn = '' - ol = '' - if f % 2: - bn = bn + c - else: - ol = ol + c - bn = bn.strip() - if not bn: - bn = '_BLNK_' - cl.append([bn, ol]) - commonkey = {} - if 'common' in groupcache[groupcounter]: - commonkey = groupcache[groupcounter]['common'] - for c in cl: - if c[0] not in commonkey: - commonkey[c[0]] = [] - for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: - if i: - commonkey[c[0]].append(i) - groupcache[groupcounter]['common'] = commonkey - previous_context = ('common', bn, groupcounter) - elif case == 'use': - m1 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) - if m1: - mm = m1.groupdict() - if 'use' not in groupcache[groupcounter]: - groupcache[groupcounter]['use'] = {} - name = m1.group('name') - groupcache[groupcounter]['use'][name] = {} - isonly = 0 - if 'list' in mm and mm['list'] is not None: - if 'notonly' in mm and mm['notonly'] is None: - isonly = 1 - groupcache[groupcounter]['use'][name]['only'] = isonly - ll = [x.strip() for x in mm['list'].split(',')] - rl = {} - for l in ll: - if '=' in l: - m2 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z', l, re.I) - if m2: - rl[m2.group('local').strip()] = m2.group( - 'use').strip() - else: - outmess( - 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) - else: - rl[l] = l - groupcache[groupcounter]['use'][name]['map'] = rl - else: - pass - else: - print(m.groupdict()) - outmess('analyzeline: Could not crack the use statement.\n') - elif case in ['f2pyenhancements']: - if 'f2pyenhancements' not in groupcache[groupcounter]: - groupcache[groupcounter]['f2pyenhancements'] = {} - d = groupcache[groupcounter]['f2pyenhancements'] - if m.group('this') == 'usercode' and 'usercode' in d: - if isinstance(d['usercode'], str): - d['usercode'] = [d['usercode']] - d['usercode'].append(m.group('after')) - else: - d[m.group('this')] = m.group('after') - elif case == 'multiline': - if previous_context is None: - if verbose: - outmess('analyzeline: No context for multiline block.\n') - return - gc = groupcounter - appendmultiline(groupcache[gc], - previous_context[:2], - m.group('this')) - else: - if verbose > 1: - print(m.groupdict()) - outmess('analyzeline: No code implemented for line.\n') - - -def appendmultiline(group, context_name, ml): - if 'f2pymultilines' not in group: - group['f2pymultilines'] = {} - d = group['f2pymultilines'] - if context_name not in d: - d[context_name] = [] - d[context_name].append(ml) - return - - -def cracktypespec0(typespec, ll): - selector = None - attr = None - if re.match(r'double\s*complex', typespec, re.I): - typespec = 'double complex' - elif re.match(r'double\s*precision', typespec, re.I): - typespec = 'double precision' - else: - typespec = typespec.strip().lower() - m1 = selectpattern.match(markouterparen(ll)) - if not m1: - outmess( - 'cracktypespec0: no kind/char_selector pattern found for line.\n') - return - d = m1.groupdict() - for k in list(d.keys()): - d[k] = unmarkouterparen(d[k]) - if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: - selector = d['this'] - ll = d['after'] - i = ll.find('::') - if i >= 0: - attr = ll[:i].strip() - ll = ll[i + 2:] - return typespec, selector, attr, ll -##### -namepattern = re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z', re.I) -kindselector = re.compile( - r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) -charselector = re.compile( - r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z', re.I) -lenkindpattern = re.compile( - r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) -lenarraypattern = re.compile( - r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) - - -def removespaces(expr): - expr = expr.strip() - if len(expr) <= 1: - return expr - expr2 = expr[0] - for i in range(1, len(expr) - 1): - if (expr[i] == ' ' and - ((expr[i + 1] in "()[]{}=+-/* ") or - (expr[i - 1] in "()[]{}=+-/* "))): - continue - expr2 = expr2 + expr[i] - expr2 = expr2 + expr[-1] - return expr2 - - -def markinnerspaces(line): - l = '' - f = 0 - cc = '\'' - cb = '' - for c in line: - if cb == '\\' and c in ['\\', '\'', '"']: - l = l + c - cb = c - continue - if f == 0 and c in ['\'', '"']: - cc = c - if c == cc: - f = f + 1 - elif c == cc: - f = f - 1 - elif c == ' ' and f == 1: - l = l + '@_@' - continue - l = l + c - cb = c - return l - - -def updatevars(typespec, selector, attrspec, entitydecl): - global groupcache, groupcounter - - last_name = None - kindselect, charselect, typename = cracktypespec(typespec, selector) - if attrspec: - attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] - l = [] - c = re.compile(r'(?P[a-zA-Z]+)') - for a in attrspec: - if not a: - continue - m = c.match(a) - if m: - s = m.group('start').lower() - a = s + a[len(s):] - l.append(a) - attrspec = l - el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] - el1 = [] - for e in el: - for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: - if e1: - el1.append(e1.replace('@_@', ' ')) - for e in el1: - m = namepattern.match(e) - if not m: - outmess( - 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) - continue - ename = rmbadname1(m.group('name')) - edecl = {} - if ename in groupcache[groupcounter]['vars']: - edecl = groupcache[groupcounter]['vars'][ename].copy() - not_has_typespec = 'typespec' not in edecl - if not_has_typespec: - edecl['typespec'] = typespec - elif typespec and (not typespec == edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typespec'], typespec)) - if 'kindselector' not in edecl: - edecl['kindselector'] = copy.copy(kindselect) - elif kindselect: - for k in list(kindselect.keys()): - if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['kindselector'][k], kindselect[k])) - else: - edecl['kindselector'][k] = copy.copy(kindselect[k]) - if 'charselector' not in edecl and charselect: - if not_has_typespec: - edecl['charselector'] = charselect - else: - errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' - % (ename, charselect)) - elif charselect: - for k in list(charselect.keys()): - if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['charselector'][k], charselect[k])) - else: - edecl['charselector'][k] = copy.copy(charselect[k]) - if 'typename' not in edecl: - edecl['typename'] = typename - elif typename and (not edecl['typename'] == typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typename'], typename)) - if 'attrspec' not in edecl: - edecl['attrspec'] = copy.copy(attrspec) - elif attrspec: - for a in attrspec: - if a not in edecl['attrspec']: - edecl['attrspec'].append(a) - else: - edecl['typespec'] = copy.copy(typespec) - edecl['kindselector'] = copy.copy(kindselect) - edecl['charselector'] = copy.copy(charselect) - edecl['typename'] = typename - edecl['attrspec'] = copy.copy(attrspec) - if m.group('after'): - m1 = lenarraypattern.match(markouterparen(m.group('after'))) - if m1: - d1 = m1.groupdict() - for lk in ['len', 'array', 'init']: - if d1[lk + '2'] is not None: - d1[lk] = d1[lk + '2'] - del d1[lk + '2'] - for k in list(d1.keys()): - if d1[k] is not None: - d1[k] = unmarkouterparen(d1[k]) - else: - del d1[k] - if 'len' in d1 and 'array' in d1: - if d1['len'] == '': - d1['len'] = d1['array'] - del d1['array'] - else: - d1['array'] = d1['array'] + ',' + d1['len'] - del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( - typespec, e, typespec, ename, d1['array'])) - if 'array' in d1: - dm = 'dimension(%s)' % d1['array'] - if 'attrspec' not in edecl or (not edecl['attrspec']): - edecl['attrspec'] = [dm] - else: - edecl['attrspec'].append(dm) - for dm1 in edecl['attrspec']: - if dm1[:9] == 'dimension' and dm1 != dm: - del edecl['attrspec'][-1] - errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' - % (ename, dm1, dm)) - break - - if 'len' in d1: - if typespec in ['complex', 'integer', 'logical', 'real']: - if ('kindselector' not in edecl) or (not edecl['kindselector']): - edecl['kindselector'] = {} - edecl['kindselector']['*'] = d1['len'] - elif typespec == 'character': - if ('charselector' not in edecl) or (not edecl['charselector']): - edecl['charselector'] = {} - if 'len' in edecl['charselector']: - del edecl['charselector']['len'] - edecl['charselector']['*'] = d1['len'] - if 'init' in d1: - if '=' in edecl and (not edecl['='] == d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['='], d1['init'])) - else: - edecl['='] = d1['init'] - else: - outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( - ename + m.group('after'))) - for k in list(edecl.keys()): - if not edecl[k]: - del edecl[k] - groupcache[groupcounter]['vars'][ename] = edecl - if 'varnames' in groupcache[groupcounter]: - groupcache[groupcounter]['varnames'].append(ename) - last_name = ename - return last_name - - -def cracktypespec(typespec, selector): - kindselect = None - charselect = None - typename = None - if selector: - if typespec in ['complex', 'integer', 'logical', 'real']: - kindselect = kindselector.match(selector) - if not kindselect: - outmess( - 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) - return - kindselect = kindselect.groupdict() - kindselect['*'] = kindselect['kind2'] - del kindselect['kind2'] - for k in list(kindselect.keys()): - if not kindselect[k]: - del kindselect[k] - for k, i in list(kindselect.items()): - kindselect[k] = rmbadname1(i) - elif typespec == 'character': - charselect = charselector.match(selector) - if not charselect: - outmess( - 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) - return - charselect = charselect.groupdict() - charselect['*'] = charselect['charlen'] - del charselect['charlen'] - if charselect['lenkind']: - lenkind = lenkindpattern.match( - markoutercomma(charselect['lenkind'])) - lenkind = lenkind.groupdict() - for lk in ['len', 'kind']: - if lenkind[lk + '2']: - lenkind[lk] = lenkind[lk + '2'] - charselect[lk] = lenkind[lk] - del lenkind[lk + '2'] - del charselect['lenkind'] - for k in list(charselect.keys()): - if not charselect[k]: - del charselect[k] - for k, i in list(charselect.items()): - charselect[k] = rmbadname1(i) - elif typespec == 'type': - typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) - if typename: - typename = typename.group('name') - else: - outmess('cracktypespec: no typename found in %s\n' % - (repr(typespec + selector))) - else: - outmess('cracktypespec: no selector used for %s\n' % - (repr(selector))) - return kindselect, charselect, typename -###### - - -def setattrspec(decl, attr, force=0): - if not decl: - decl = {} - if not attr: - return decl - if 'attrspec' not in decl: - decl['attrspec'] = [attr] - return decl - if force: - decl['attrspec'].append(attr) - if attr in decl['attrspec']: - return decl - if attr == 'static' and 'automatic' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr == 'automatic' and 'static' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr == 'public' and 'private' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr == 'private' and 'public' not in decl['attrspec']: - decl['attrspec'].append(attr) - else: - decl['attrspec'].append(attr) - return decl - - -def setkindselector(decl, sel, force=0): - if not decl: - decl = {} - if not sel: - return decl - if 'kindselector' not in decl: - decl['kindselector'] = sel - return decl - for k in list(sel.keys()): - if force or k not in decl['kindselector']: - decl['kindselector'][k] = sel[k] - return decl - - -def setcharselector(decl, sel, force=0): - if not decl: - decl = {} - if not sel: - return decl - if 'charselector' not in decl: - decl['charselector'] = sel - return decl - for k in list(sel.keys()): - if force or k not in decl['charselector']: - decl['charselector'][k] = sel[k] - return decl - - -def getblockname(block, unknown='unknown'): - if 'name' in block: - return block['name'] - return unknown - -# post processing - - -def setmesstext(block): - global filepositiontext - - try: - filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) - except Exception: - pass - - -def get_usedict(block): - usedict = {} - if 'parent_block' in block: - usedict = get_usedict(block['parent_block']) - if 'use' in block: - usedict.update(block['use']) - return usedict - - -def get_useparameters(block, param_map=None): - global f90modulevars - - if param_map is None: - param_map = {} - usedict = get_usedict(block) - if not usedict: - return param_map - for usename, mapping in list(usedict.items()): - usename = usename.lower() - if usename not in f90modulevars: - outmess('get_useparameters: no module %s info used by %s\n' % - (usename, block.get('name'))) - continue - mvars = f90modulevars[usename] - params = get_parameters(mvars) - if not params: - continue - # XXX: apply mapping - if mapping: - errmess('get_useparameters: mapping for %s not impl.' % (mapping)) - for k, v in list(params.items()): - if k in param_map: - outmess('get_useparameters: overriding parameter %s with' - ' value from module %s' % (repr(k), repr(usename))) - param_map[k] = v - - return param_map - - -def postcrack2(block, tab='', param_map=None): - global f90modulevars - - if not f90modulevars: - return block - if isinstance(block, list): - ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) - for g in block] - return ret - setmesstext(block) - outmess('%sBlock: %s\n' % (tab, block['name']), 0) - - if param_map is None: - param_map = get_useparameters(block) - - if param_map is not None and 'vars' in block: - vars = block['vars'] - for n in list(vars.keys()): - var = vars[n] - if 'kindselector' in var: - kind = var['kindselector'] - if 'kind' in kind: - val = kind['kind'] - if val in param_map: - kind['kind'] = param_map[val] - new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) - for b in block['body']] - block['body'] = new_body - - return block - - -def postcrack(block, args=None, tab=''): - """ - TODO: - function return values - determine expression types if in argument list - """ - global usermodules, onlyfunctions - - if isinstance(block, list): - gret = [] - uret = [] - for g in block: - setmesstext(g) - g = postcrack(g, tab=tab + '\t') - # sort user routines to appear first - if 'name' in g and '__user__' in g['name']: - uret.append(g) - else: - gret.append(g) - return uret + gret - setmesstext(block) - if not isinstance(block, dict) and 'block' not in block: - raise Exception('postcrack: Expected block dictionary instead of ' + - str(block)) - if 'name' in block and not block['name'] == 'unknown_interface': - outmess('%sBlock: %s\n' % (tab, block['name']), 0) - block = analyzeargs(block) - block = analyzecommon(block) - block['vars'] = analyzevars(block) - block['sortvars'] = sortvarnames(block['vars']) - if 'args' in block and block['args']: - args = block['args'] - block['body'] = analyzebody(block, args, tab=tab) - - userisdefined = [] - if 'use' in block: - useblock = block['use'] - for k in list(useblock.keys()): - if '__user__' in k: - userisdefined.append(k) - else: - useblock = {} - name = '' - if 'name' in block: - name = block['name'] - # and not userisdefined: # Build a __user__ module - if 'externals' in block and block['externals']: - interfaced = [] - if 'interfaced' in block: - interfaced = block['interfaced'] - mvars = copy.copy(block['vars']) - if name: - mname = name + '__user__routines' - else: - mname = 'unknown__user__routines' - if mname in userisdefined: - i = 1 - while '%s_%i' % (mname, i) in userisdefined: - i = i + 1 - mname = '%s_%i' % (mname, i) - interface = {'block': 'interface', 'body': [], - 'vars': {}, 'name': name + '_user_interface'} - for e in block['externals']: - if e in interfaced: - edef = [] - j = -1 - for b in block['body']: - j = j + 1 - if b['block'] == 'interface': - i = -1 - for bb in b['body']: - i = i + 1 - if 'name' in bb and bb['name'] == e: - edef = copy.copy(bb) - del b['body'][i] - break - if edef: - if not b['body']: - del block['body'][j] - del interfaced[interfaced.index(e)] - break - interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e] = mvars[e] - if interface['vars'] or interface['body']: - block['interfaced'] = interfaced - mblock = {'block': 'python module', 'body': [ - interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} - useblock[mname] = {} - usermodules.append(mblock) - if useblock: - block['use'] = useblock - return block - - -def sortvarnames(vars): - indep = [] - dep = [] - for v in list(vars.keys()): - if 'depend' in vars[v] and vars[v]['depend']: - dep.append(v) - else: - indep.append(v) - n = len(dep) - i = 0 - while dep: # XXX: How to catch dependence cycles correctly? - v = dep[0] - fl = 0 - for w in dep[1:]: - if w in vars[v]['depend']: - fl = 1 - break - if fl: - dep = dep[1:] + [v] - i = i + 1 - if i > n: - errmess('sortvarnames: failed to compute dependencies because' - ' of cyclic dependencies between ' - + ', '.join(dep) + '\n') - indep = indep + dep - break - else: - indep.append(v) - dep = dep[1:] - n = len(dep) - i = 0 - return indep - - -def analyzecommon(block): - if not hascommon(block): - return block - commonvars = [] - for k in list(block['common'].keys()): - comvars = [] - for e in block['common'][k]: - m = re.match( - r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) - if m: - dims = [] - if m.group('dims'): - dims = [x.strip() - for x in markoutercomma(m.group('dims')).split('@,@')] - n = rmbadname1(m.group('name').strip()) - if n in block['vars']: - if 'attrspec' in block['vars'][n]: - block['vars'][n]['attrspec'].append( - 'dimension(%s)' % (','.join(dims))) - else: - block['vars'][n]['attrspec'] = [ - 'dimension(%s)' % (','.join(dims))] - else: - if dims: - block['vars'][n] = { - 'attrspec': ['dimension(%s)' % (','.join(dims))]} - else: - block['vars'][n] = {} - if n not in commonvars: - commonvars.append(n) - else: - n = e - errmess( - 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) - comvars.append(n) - block['common'][k] = comvars - if 'commonvars' not in block: - block['commonvars'] = commonvars - else: - block['commonvars'] = block['commonvars'] + commonvars - return block - - -def analyzebody(block, args, tab=''): - global usermodules, skipfuncs, onlyfuncs, f90modulevars - - setmesstext(block) - body = [] - for b in block['body']: - b['parent_block'] = block - if b['block'] in ['function', 'subroutine']: - if args is not None and b['name'] not in args: - continue - else: - as_ = b['args'] - if b['name'] in skipfuncs: - continue - if onlyfuncs and b['name'] not in onlyfuncs: - continue - b['saved_interface'] = crack2fortrangen( - b, '\n' + ' ' * 6, as_interface=True) - - else: - as_ = args - b = postcrack(b, as_, tab=tab + '\t') - if b['block'] == 'interface' and not b['body']: - if 'f2pyenhancements' not in b: - continue - if b['block'].replace(' ', '') == 'pythonmodule': - usermodules.append(b) - else: - if b['block'] == 'module': - f90modulevars[b['name']] = b['vars'] - body.append(b) - return body - - -def buildimplicitrules(block): - setmesstext(block) - implicitrules = defaultimplicitrules - attrrules = {} - if 'implicit' in block: - if block['implicit'] is None: - implicitrules = None - if verbose > 1: - outmess( - 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) - else: - for k in list(block['implicit'].keys()): - if block['implicit'][k].get('typespec') not in ['static', 'automatic']: - implicitrules[k] = block['implicit'][k] - else: - attrrules[k] = block['implicit'][k]['typespec'] - return implicitrules, attrrules - - -def myeval(e, g=None, l=None): - r = eval(e, g, l) - if type(r) in [type(0), type(0.0)]: - return r - raise ValueError('r=%r' % (r)) - -getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) - - -def getlincoef(e, xset): # e = a*x+b ; x in xset - try: - c = int(myeval(e, {}, {})) - return 0, c, None - except Exception: - pass - if getlincoef_re_1.match(e): - return 1, 0, e - len_e = len(e) - for x in xset: - if len(x) > len_e: - continue - if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): - # skip function calls having x as an argument, e.g max(1, x) - continue - re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) - m = re_1.match(e) - if m: - try: - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0, m1.group('after')) - m1 = re_1.match(ee) - b = myeval(ee, {}, {}) - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1, m1.group('after')) - m1 = re_1.match(ee) - a = myeval(ee, {}, {}) - b - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0.5, m1.group('after')) - m1 = re_1.match(ee) - c = myeval(ee, {}, {}) - # computing another point to be sure that expression is linear - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1.5, m1.group('after')) - m1 = re_1.match(ee) - c2 = myeval(ee, {}, {}) - if (a * 0.5 + b == c and a * 1.5 + b == c2): - return a, b, x - except Exception: - pass - break - return None, None, None - -_varname_match = re.compile(r'\A[a-z]\w*\Z').match - - -def getarrlen(dl, args, star='*'): - edl = [] - try: - edl.append(myeval(dl[0], {}, {})) - except Exception: - edl.append(dl[0]) - try: - edl.append(myeval(dl[1], {}, {})) - except Exception: - edl.append(dl[1]) - if isinstance(edl[0], int): - p1 = 1 - edl[0] - if p1 == 0: - d = str(dl[1]) - elif p1 < 0: - d = '%s-%s' % (dl[1], -p1) - else: - d = '%s+%s' % (dl[1], p1) - elif isinstance(edl[1], int): - p1 = 1 + edl[1] - if p1 == 0: - d = '-(%s)' % (dl[0]) - else: - d = '%s-(%s)' % (p1, dl[0]) - else: - d = '%s-(%s)+1' % (dl[1], dl[0]) - try: - return repr(myeval(d, {}, {})), None, None - except Exception: - pass - d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) - if None not in [d1[0], d2[0]]: - if (d1[0], d2[0]) == (0, 0): - return repr(d2[1] - d1[1] + 1), None, None - b = d2[1] - d1[1] + 1 - d1 = (d1[0], 0, d1[2]) - d2 = (d2[0], b, d2[2]) - if d1[0] == 0 and d2[2] in args: - if b < 0: - return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0]) - elif b: - return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0]) - else: - return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0]) - if d2[0] == 0 and d1[2] in args: - - if b < 0: - return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0]) - elif b: - return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0]) - else: - return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0]) - if d1[2] == d2[2] and d1[2] in args: - a = d2[0] - d1[0] - if not a: - return repr(b), None, None - if b < 0: - return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a) - elif b: - return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a) - else: - return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a) - if d1[0] == d2[0] == 1: - c = str(d1[2]) - if c not in args: - if _varname_match(c): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) - c = '(%s)' % c - if b == 0: - d = '%s-%s' % (d2[2], c) - elif b < 0: - d = '%s-%s-%s' % (d2[2], c, -b) - else: - d = '%s-%s+%s' % (d2[2], c, b) - elif d1[0] == 0: - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)' % c2 - if d2[0] == 1: - pass - elif d2[0] == -1: - c2 = '-%s' % c2 - else: - c2 = '%s*%s' % (d2[0], c2) - - if b == 0: - d = c2 - elif b < 0: - d = '%s-%s' % (c2, -b) - else: - d = '%s+%s' % (c2, b) - elif d2[0] == 0: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)' % c1 - if d1[0] == 1: - c1 = '-%s' % c1 - elif d1[0] == -1: - c1 = '+%s' % c1 - elif d1[0] < 0: - c1 = '+%s*%s' % (-d1[0], c1) - else: - c1 = '-%s*%s' % (d1[0], c1) - - if b == 0: - d = c1 - elif b < 0: - d = '%s-%s' % (c1, -b) - else: - d = '%s+%s' % (c1, b) - else: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)' % c1 - if d1[0] == 1: - c1 = '-%s' % c1 - elif d1[0] == -1: - c1 = '+%s' % c1 - elif d1[0] < 0: - c1 = '+%s*%s' % (-d1[0], c1) - else: - c1 = '-%s*%s' % (d1[0], c1) - - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)' % c2 - if d2[0] == 1: - pass - elif d2[0] == -1: - c2 = '-%s' % c2 - else: - c2 = '%s*%s' % (d2[0], c2) - - if b == 0: - d = '%s%s' % (c2, c1) - elif b < 0: - d = '%s%s-%s' % (c2, c1, -b) - else: - d = '%s%s+%s' % (c2, c1, b) - return d, None, None - -word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) - - -def _get_depend_dict(name, vars, deps): - if name in vars: - words = vars[name].get('depend', []) - - if '=' in vars[name] and not isstring(vars[name]): - for word in word_pattern.findall(vars[name]['=']): - if word not in words and word in vars: - words.append(word) - for word in words[:]: - for w in deps.get(word, []) \ - or _get_depend_dict(word, vars, deps): - if w not in words: - words.append(w) - else: - outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) - words = [] - deps[name] = words - return words - - -def _calc_depend_dict(vars): - names = list(vars.keys()) - depend_dict = {} - for n in names: - _get_depend_dict(n, vars, depend_dict) - return depend_dict - - -def get_sorted_names(vars): - """ - """ - depend_dict = _calc_depend_dict(vars) - names = [] - for name in list(depend_dict.keys()): - if not depend_dict[name]: - names.append(name) - del depend_dict[name] - while depend_dict: - for name, lst in list(depend_dict.items()): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - return [name for name in names if name in vars] - - -def _kind_func(string): - # XXX: return something sensible. - if string[0] in "'\"": - string = string[1:-1] - if real16pattern.match(string): - return 8 - elif real8pattern.match(string): - return 4 - return 'kind(' + string + ')' - - -def _selected_int_kind_func(r): - # XXX: This should be processor dependent - m = 10 ** r - if m <= 2 ** 8: - return 1 - if m <= 2 ** 16: - return 2 - if m <= 2 ** 32: - return 4 - if m <= 2 ** 63: - return 8 - if m <= 2 ** 128: - return 16 - return -1 - - -def _selected_real_kind_func(p, r=0, radix=0): - # XXX: This should be processor dependent - # This is only good for 0 <= p <= 20 - if p < 7: - return 4 - if p < 16: - return 8 - machine = platform.machine().lower() - if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')): - if p <= 20: - return 16 - else: - if p < 19: - return 10 - elif p <= 20: - return 16 - return -1 - - -def get_parameters(vars, global_params={}): - params = copy.copy(global_params) - g_params = copy.copy(global_params) - for name, func in [('kind', _kind_func), - ('selected_int_kind', _selected_int_kind_func), - ('selected_real_kind', _selected_real_kind_func), ]: - if name not in g_params: - g_params[name] = func - param_names = [] - for n in get_sorted_names(vars): - if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: - param_names.append(n) - kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) - selected_int_kind_re = re.compile( - r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) - selected_kind_re = re.compile( - r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) - for n in param_names: - if '=' in vars[n]: - v = vars[n]['='] - if islogical(vars[n]): - v = v.lower() - for repl in [ - ('.false.', 'False'), - ('.true.', 'True'), - # TODO: test .eq., .neq., etc replacements. - ]: - v = v.replace(*repl) - v = kind_re.sub(r'kind("\1")', v) - v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) - - # We need to act according to the data. - # The easy case is if the data has a kind-specifier, - # then we may easily remove those specifiers. - # However, it may be that the user uses other specifiers...(!) - is_replaced = False - if 'kindselector' in vars[n]: - if 'kind' in vars[n]['kindselector']: - orig_v_len = len(v) - v = v.replace('_' + vars[n]['kindselector']['kind'], '') - # Again, this will be true if even a single specifier - # has been replaced, see comment above. - is_replaced = len(v) < orig_v_len - - if not is_replaced: - if not selected_kind_re.match(v): - v_ = v.split('_') - # In case there are additive parameters - if len(v_) > 1: - v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') - - # Currently this will not work for complex numbers. - # There is missing code for extracting a complex number, - # which may be defined in either of these: - # a) (Re, Im) - # b) cmplx(Re, Im) - # c) dcmplx(Re, Im) - # d) cmplx(Re, Im, ) - - if isdouble(vars[n]): - tt = list(v) - for m in real16pattern.finditer(v): - tt[m.start():m.end()] = list( - v[m.start():m.end()].lower().replace('d', 'e')) - v = ''.join(tt) - - elif iscomplex(vars[n]): - # FIXME complex numbers may also have exponents - if v[0] == '(' and v[-1] == ')': - # FIXME, unused l looks like potential bug - l = markoutercomma(v[1:-1]).split('@,@') - - try: - params[n] = eval(v, g_params, params) - except Exception as msg: - params[n] = v - outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) - if isstring(vars[n]) and isinstance(params[n], int): - params[n] = chr(params[n]) - nl = n.lower() - if nl != n: - params[nl] = params[n] - else: - print(vars[n]) - outmess( - 'get_parameters:parameter %s does not have value?!\n' % (repr(n))) - return params - - -def _eval_length(length, params): - if length in ['(:)', '(*)', '*']: - return '(*)' - return _eval_scalar(length, params) - -_is_kind_number = re.compile(r'\d+_').match - - -def _eval_scalar(value, params): - if _is_kind_number(value): - value = value.split('_')[0] - try: - value = str(eval(value, {}, params)) - except (NameError, SyntaxError, TypeError): - return value - except Exception as msg: - errmess('"%s" in evaluating %r ' - '(available names: %s)\n' - % (msg, value, list(params.keys()))) - return value - - -def analyzevars(block): - global f90modulevars - - setmesstext(block) - implicitrules, attrrules = buildimplicitrules(block) - vars = copy.copy(block['vars']) - if block['block'] == 'function' and block['name'] not in vars: - vars[block['name']] = {} - if '' in block['vars']: - del vars[''] - if 'attrspec' in block['vars']['']: - gen = block['vars']['']['attrspec'] - for n in list(vars.keys()): - for k in ['public', 'private']: - if k in gen: - vars[n] = setattrspec(vars[n], k) - svars = [] - args = block['args'] - for a in args: - try: - vars[a] - svars.append(a) - except KeyError: - pass - for n in list(vars.keys()): - if n not in args: - svars.append(n) - - params = get_parameters(vars, get_useparameters(block)) - - dep_matches = {} - name_match = re.compile(r'\w[\w\d_$]*').match - for v in list(vars.keys()): - m = name_match(v) - if m: - n = v[m.start():m.end()] - try: - dep_matches[n] - except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match - for n in svars: - if n[0] in list(attrrules.keys()): - vars[n] = setattrspec(vars[n], attrrules[n[0]]) - if 'typespec' not in vars[n]: - if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): - if implicitrules: - ln0 = n[0].lower() - for k in list(implicitrules[ln0].keys()): - if k == 'typespec' and implicitrules[ln0][k] == 'undefined': - continue - if k not in vars[n]: - vars[n][k] = implicitrules[ln0][k] - elif k == 'attrspec': - for l in implicitrules[ln0][k]: - vars[n] = setattrspec(vars[n], l) - elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( - repr(n), block['name'])) - - if 'charselector' in vars[n]: - if 'len' in vars[n]['charselector']: - l = vars[n]['charselector']['len'] - try: - l = str(eval(l, {}, params)) - except Exception: - pass - vars[n]['charselector']['len'] = l - - if 'kindselector' in vars[n]: - if 'kind' in vars[n]['kindselector']: - l = vars[n]['kindselector']['kind'] - try: - l = str(eval(l, {}, params)) - except Exception: - pass - vars[n]['kindselector']['kind'] = l - - savelindims = {} - if 'attrspec' in vars[n]: - attr = vars[n]['attrspec'] - attr.reverse() - vars[n]['attrspec'] = [] - dim, intent, depend, check, note = None, None, None, None, None - for a in attr: - if a[:9] == 'dimension': - dim = (a[9:].strip())[1:-1] - elif a[:6] == 'intent': - intent = (a[6:].strip())[1:-1] - elif a[:6] == 'depend': - depend = (a[6:].strip())[1:-1] - elif a[:5] == 'check': - check = (a[5:].strip())[1:-1] - elif a[:4] == 'note': - note = (a[4:].strip())[1:-1] - else: - vars[n] = setattrspec(vars[n], a) - if intent: - if 'intent' not in vars[n]: - vars[n]['intent'] = [] - for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: - # Remove spaces so that 'in out' becomes 'inout' - tmp = c.replace(' ', '') - if tmp not in vars[n]['intent']: - vars[n]['intent'].append(tmp) - intent = None - if note: - note = note.replace('\\n\\n', '\n\n') - note = note.replace('\\n ', '\n') - if 'note' not in vars[n]: - vars[n]['note'] = [note] - else: - vars[n]['note'].append(note) - note = None - if depend is not None: - if 'depend' not in vars[n]: - vars[n]['depend'] = [] - for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): - if c not in vars[n]['depend']: - vars[n]['depend'].append(c) - depend = None - if check is not None: - if 'check' not in vars[n]: - vars[n]['check'] = [] - for c in [x.strip() for x in markoutercomma(check).split('@,@')]: - if c not in vars[n]['check']: - vars[n]['check'].append(c) - check = None - if dim and 'dimension' not in vars[n]: - vars[n]['dimension'] = [] - for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): - star = '*' - if d == ':': - star = ':' - if d in params: - d = str(params[d]) - for p in list(params.keys()): - re_1 = re.compile(r'(?P.*?)\b' + p + r'\b(?P.*)', re.I) - m = re_1.match(d) - while m: - d = m.group('before') + \ - str(params[p]) + m.group('after') - m = re_1.match(d) - if d == star: - dl = [star] - else: - dl = markoutercomma(d, ':').split('@:@') - if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) - dl = ['*'] - d = '*' - if len(dl) == 1 and not dl[0] == star: - dl = ['1', dl[0]] - if len(dl) == 2: - d, v, di = getarrlen(dl, list(block['vars'].keys())) - if d[:4] == '1 * ': - d = d[4:] - if di and di[-4:] == '/(1)': - di = di[:-4] - if v: - savelindims[d] = v, di - vars[n]['dimension'].append(d) - if 'dimension' in vars[n]: - if isintent_c(vars[n]): - shape_macro = 'shape' - else: - shape_macro = 'shape' # 'fshape' - if isstringarray(vars[n]): - if 'charselector' in vars[n]: - d = vars[n]['charselector'] - if '*' in d: - d = d['*'] - errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n' - % (d, n, - ','.join(vars[n]['dimension']), - n, ','.join(vars[n]['dimension'] + [d]))) - vars[n]['dimension'].append(d) - del vars[n]['charselector'] - if 'intent' not in vars[n]: - vars[n]['intent'] = [] - if 'c' not in vars[n]['intent']: - vars[n]['intent'].append('c') - else: - errmess( - "analyzevars: charselector=%r unhandled." % (d)) - if 'check' not in vars[n] and 'args' in block and n in block['args']: - flag = 'depend' not in vars[n] - if flag: - vars[n]['depend'] = [] - vars[n]['check'] = [] - if 'dimension' in vars[n]: - #/----< no check - i = -1 - ni = len(vars[n]['dimension']) - for d in vars[n]['dimension']: - ddeps = [] # dependencies of 'd' - ad = '' - pd = '' - if d not in vars: - if d in savelindims: - pd, ad = '(', savelindims[d][1] - d = savelindims[d][0] - else: - for r in block['args']: - if r not in vars: - continue - if re.match(r'.*?\b' + r + r'\b', d, re.I): - ddeps.append(r) - if d in vars: - if 'attrspec' in vars[d]: - for aa in vars[d]['attrspec']: - if aa[:6] == 'depend': - ddeps += aa[6:].strip()[1:-1].split(',') - if 'depend' in vars[d]: - ddeps = ddeps + vars[d]['depend'] - i = i + 1 - if d in vars and ('depend' not in vars[d]) \ - and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ - and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): - vars[d]['depend'] = [n] - if ni > 1: - vars[d]['='] = '%s%s(%s,%s)%s' % ( - pd, shape_macro, n, i, ad) - else: - vars[d]['='] = '%slen(%s)%s' % (pd, n, ad) - # /---< no check - if 1 and 'check' not in vars[d]: - if ni > 1: - vars[d]['check'] = ['%s%s(%s,%i)%s==%s' - % (pd, shape_macro, n, i, ad, d)] - else: - vars[d]['check'] = [ - '%slen(%s)%s>=%s' % (pd, n, ad, d)] - if 'attrspec' not in vars[d]: - vars[d]['attrspec'] = ['optional'] - if ('optional' not in vars[d]['attrspec']) and\ - ('required' not in vars[d]['attrspec']): - vars[d]['attrspec'].append('optional') - elif d not in ['*', ':']: - #/----< no check - if flag: - if d in vars: - if n not in ddeps: - vars[n]['depend'].append(d) - else: - vars[n]['depend'] = vars[n]['depend'] + ddeps - elif isstring(vars[n]): - length = '1' - if 'charselector' in vars[n]: - if '*' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['*'], - params) - vars[n]['charselector']['*'] = length - elif 'len' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['len'], - params) - del vars[n]['charselector']['len'] - vars[n]['charselector']['*'] = length - - if not vars[n]['check']: - del vars[n]['check'] - if flag and not vars[n]['depend']: - del vars[n]['depend'] - if '=' in vars[n]: - if 'attrspec' not in vars[n]: - vars[n]['attrspec'] = [] - if ('optional' not in vars[n]['attrspec']) and \ - ('required' not in vars[n]['attrspec']): - vars[n]['attrspec'].append('optional') - if 'depend' not in vars[n]: - vars[n]['depend'] = [] - for v, m in list(dep_matches.items()): - if m(vars[n]['=']): - vars[n]['depend'].append(v) - if not vars[n]['depend']: - del vars[n]['depend'] - if isscalar(vars[n]): - vars[n]['='] = _eval_scalar(vars[n]['='], params) - - for n in list(vars.keys()): - if n == block['name']: # n is block name - if 'note' in vars[n]: - block['note'] = vars[n]['note'] - if block['block'] == 'function': - if 'result' in block and block['result'] in vars: - vars[n] = appenddecl(vars[n], vars[block['result']]) - if 'prefix' in block: - pr = block['prefix'] - ispure = 0 - isrec = 1 - pr1 = pr.replace('pure', '') - ispure = (not pr == pr1) - pr = pr1.replace('recursive', '') - isrec = (not pr == pr1) - m = typespattern[0].match(pr) - if m: - typespec, selector, attr, edecl = cracktypespec0( - m.group('this'), m.group('after')) - kindselect, charselect, typename = cracktypespec( - typespec, selector) - vars[n]['typespec'] = typespec - if kindselect: - if 'kind' in kindselect: - try: - kindselect['kind'] = eval( - kindselect['kind'], {}, params) - except Exception: - pass - vars[n]['kindselector'] = kindselect - if charselect: - vars[n]['charselector'] = charselect - if typename: - vars[n]['typename'] = typename - if ispure: - vars[n] = setattrspec(vars[n], 'pure') - if isrec: - vars[n] = setattrspec(vars[n], 'recursive') - else: - outmess( - 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) - if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: - if 'commonvars' in block: - neededvars = copy.copy(block['args'] + block['commonvars']) - else: - neededvars = copy.copy(block['args']) - for n in list(vars.keys()): - if l_or(isintent_callback, isintent_aux)(vars[n]): - neededvars.append(n) - if 'entry' in block: - neededvars.extend(list(block['entry'].keys())) - for k in list(block['entry'].keys()): - for n in block['entry'][k]: - if n not in neededvars: - neededvars.append(n) - if block['block'] == 'function': - if 'result' in block: - neededvars.append(block['result']) - else: - neededvars.append(block['name']) - if block['block'] in ['subroutine', 'function']: - name = block['name'] - if name in vars and 'intent' in vars[name]: - block['intent'] = vars[name]['intent'] - if block['block'] == 'type': - neededvars.extend(list(vars.keys())) - for n in list(vars.keys()): - if n not in neededvars: - del vars[n] - return vars - -analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) - - -def expr2name(a, block, args=[]): - orig_a = a - a_is_expr = not analyzeargs_re_1.match(a) - if a_is_expr: # `a` is an expression - implicitrules, attrrules = buildimplicitrules(block) - at = determineexprtype(a, block['vars'], implicitrules) - na = 'e_' - for c in a: - c = c.lower() - if c not in string.ascii_lowercase + string.digits: - c = '_' - na = na + c - if na[-1] == '_': - na = na + 'e' - else: - na = na + '_e' - a = na - while a in block['vars'] or a in block['args']: - a = a + 'r' - if a in args: - k = 1 - while a + str(k) in args: - k = k + 1 - a = a + str(k) - if a_is_expr: - block['vars'][a] = at - else: - if a not in block['vars']: - if orig_a in block['vars']: - block['vars'][a] = block['vars'][orig_a] - else: - block['vars'][a] = {} - if 'externals' in block and orig_a in block['externals'] + block['interfaced']: - block['vars'][a] = setattrspec(block['vars'][a], 'external') - return a - - -def analyzeargs(block): - setmesstext(block) - implicitrules, attrrules = buildimplicitrules(block) - if 'args' not in block: - block['args'] = [] - args = [] - for a in block['args']: - a = expr2name(a, block, args) - args.append(a) - block['args'] = args - if 'entry' in block: - for k, args1 in list(block['entry'].items()): - for a in args1: - if a not in block['vars']: - block['vars'][a] = {} - - for b in block['body']: - if b['name'] in args: - if 'externals' not in block: - block['externals'] = [] - if b['name'] not in block['externals']: - block['externals'].append(b['name']) - if 'result' in block and block['result'] not in block['vars']: - block['vars'][block['result']] = {} - return block - -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P[\w]+)|)\Z', re.I) -determineexprtype_re_3 = re.compile( - r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P[\w]+)|)\Z', re.I) -determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) -determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) - - -def _ensure_exprdict(r): - if isinstance(r, int): - return {'typespec': 'integer'} - if isinstance(r, float): - return {'typespec': 'real'} - if isinstance(r, complex): - return {'typespec': 'complex'} - if isinstance(r, dict): - return r - raise AssertionError(repr(r)) - - -def determineexprtype(expr, vars, rules={}): - if expr in vars: - return _ensure_exprdict(vars[expr]) - expr = expr.strip() - if determineexprtype_re_1.match(expr): - return {'typespec': 'complex'} - m = determineexprtype_re_2.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) - return {'typespec': 'integer'} - m = determineexprtype_re_3.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) - return {'typespec': 'real'} - for op in ['+', '-', '*', '/']: - for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: - if e in vars: - return _ensure_exprdict(vars[e]) - t = {} - if determineexprtype_re_4.match(expr): # in parenthesis - t = determineexprtype(expr[1:-1], vars, rules) - else: - m = determineexprtype_re_5.match(expr) - if m: - rn = m.group('name') - t = determineexprtype(m.group('name'), vars, rules) - if t and 'attrspec' in t: - del t['attrspec'] - if not t: - if rn[0] in rules: - return _ensure_exprdict(rules[rn[0]]) - if expr[0] in '\'"': - return {'typespec': 'character', 'charselector': {'*': '*'}} - if not t: - outmess( - 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) - return t - -###### - - -def crack2fortrangen(block, tab='\n', as_interface=False): - global skipfuncs, onlyfuncs - - setmesstext(block) - ret = '' - if isinstance(block, list): - for g in block: - if g and g['block'] in ['function', 'subroutine']: - if g['name'] in skipfuncs: - continue - if onlyfuncs and g['name'] not in onlyfuncs: - continue - ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) - return ret - prefix = '' - name = '' - args = '' - blocktype = block['block'] - if blocktype == 'program': - return '' - argsl = [] - if 'name' in block: - name = block['name'] - if 'args' in block: - vars = block['vars'] - for a in block['args']: - a = expr2name(a, block, argsl) - if not isintent_callback(vars[a]): - argsl.append(a) - if block['block'] == 'function' or argsl: - args = '(%s)' % ','.join(argsl) - f2pyenhancements = '' - if 'f2pyenhancements' in block: - for k in list(block['f2pyenhancements'].keys()): - f2pyenhancements = '%s%s%s %s' % ( - f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) - intent_lst = block.get('intent', [])[:] - if blocktype == 'function' and 'callback' in intent_lst: - intent_lst.remove('callback') - if intent_lst: - f2pyenhancements = '%s%sintent(%s) %s' %\ - (f2pyenhancements, tab + tabchar, - ','.join(intent_lst), name) - use = '' - if 'use' in block: - use = use2fortran(block['use'], tab + tabchar) - common = '' - if 'common' in block: - common = common2fortran(block['common'], tab + tabchar) - if name == 'unknown_interface': - name = '' - result = '' - if 'result' in block: - result = ' result (%s)' % block['result'] - if block['result'] not in argsl: - argsl.append(block['result']) - body = crack2fortrangen(block['body'], tab + tabchar) - vars = vars2fortran( - block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) - mess = '' - if 'from' in block and not as_interface: - mess = '! in %s' % block['from'] - if 'entry' in block: - entry_stmts = '' - for k, i in list(block['entry'].items()): - entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts, tab + tabchar, k, ','.join(i)) - body = body + entry_stmts - if blocktype == 'block data' and name == '_BLOCK_DATA_': - name = '' - ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( - tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) - return ret - - -def common2fortran(common, tab=''): - ret = '' - for k in list(common.keys()): - if k == '_BLNK_': - ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) - else: - ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) - return ret - - -def use2fortran(use, tab=''): - ret = '' - for m in list(use.keys()): - ret = '%s%suse %s,' % (ret, tab, m) - if use[m] == {}: - if ret and ret[-1] == ',': - ret = ret[:-1] - continue - if 'only' in use[m] and use[m]['only']: - ret = '%s only:' % (ret) - if 'map' in use[m] and use[m]['map']: - c = ' ' - for k in list(use[m]['map'].keys()): - if k == use[m]['map'][k]: - ret = '%s%s%s' % (ret, c, k) - c = ',' - else: - ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) - c = ',' - if ret and ret[-1] == ',': - ret = ret[:-1] - return ret - - -def true_intent_list(var): - lst = var['intent'] - ret = [] - for intent in lst: - try: - c = eval('isintent_%s(var)' % intent) - except NameError: - c = 0 - if c: - ret.append(intent) - return ret - - -def vars2fortran(block, vars, args, tab='', as_interface=False): - """ - TODO: - public sub - ... - """ - setmesstext(block) - ret = '' - nout = [] - for a in args: - if a in block['vars']: - nout.append(a) - if 'commonvars' in block: - for a in block['commonvars']: - if a in vars: - if a not in nout: - nout.append(a) - else: - errmess( - 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) - if 'varnames' in block: - nout.extend(block['varnames']) - if not as_interface: - for a in list(vars.keys()): - if a not in nout: - nout.append(a) - for a in nout: - if 'depend' in vars[a]: - for d in vars[a]['depend']: - if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: - errmess( - 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) - if 'externals' in block and a in block['externals']: - if isintent_callback(vars[a]): - ret = '%s%sintent(callback) %s' % (ret, tab, a) - ret = '%s%sexternal %s' % (ret, tab, a) - if isoptional(vars[a]): - ret = '%s%soptional %s' % (ret, tab, a) - if a in vars and 'typespec' not in vars[a]: - continue - cont = 1 - for b in block['body']: - if a == b['name'] and b['block'] == 'function': - cont = 0 - break - if cont: - continue - if a not in vars: - show(vars) - outmess('vars2fortran: No definition for argument "%s".\n' % a) - continue - if a == block['name'] and not block['block'] == 'function': - continue - if 'typespec' not in vars[a]: - if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: - if a in args: - ret = '%s%sexternal %s' % (ret, tab, a) - continue - show(vars[a]) - outmess('vars2fortran: No typespec for argument "%s".\n' % a) - continue - vardef = vars[a]['typespec'] - if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) - selector = {} - if 'kindselector' in vars[a]: - selector = vars[a]['kindselector'] - elif 'charselector' in vars[a]: - selector = vars[a]['charselector'] - if '*' in selector: - if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) - else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) - c = ' ' - if 'attrspec' in vars[a]: - attr = [l for l in vars[a]['attrspec'] - if l not in ['external']] - if attr: - vardef = '%s, %s' % (vardef, ','.join(attr)) - c = ',' - if 'dimension' in vars[a]: - vardef = '%s%sdimension(%s)' % ( - vardef, c, ','.join(vars[a]['dimension'])) - c = ',' - if 'intent' in vars[a]: - lst = true_intent_list(vars[a]) - if lst: - vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) - c = ',' - if 'check' in vars[a]: - vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) - c = ',' - if 'depend' in vars[a]: - vardef = '%s%sdepend(%s)' % ( - vardef, c, ','.join(vars[a]['depend'])) - c = ',' - if '=' in vars[a]: - v = vars[a]['='] - if vars[a]['typespec'] in ['complex', 'double complex']: - try: - v = eval(v) - v = '(%s,%s)' % (v.real, v.imag) - except Exception: - pass - vardef = '%s :: %s=%s' % (vardef, a, v) - else: - vardef = '%s :: %s' % (vardef, a) - ret = '%s%s%s' % (ret, tab, vardef) - return ret -###### - - -def crackfortran(files): - global usermodules - - outmess('Reading fortran codes...\n', 0) - readfortrancode(files, crackline) - outmess('Post-processing...\n', 0) - usermodules = [] - postlist = postcrack(grouplist[0]) - outmess('Post-processing (stage 2)...\n', 0) - postlist = postcrack2(postlist) - return usermodules + postlist - - -def crack2fortran(block): - global f2py_version - - pyf = crack2fortrangen(block) + '\n' - header = """! -*- f90 -*- -! Note: the context of this file is case sensitive. -""" - footer = """ -! This file was auto-generated with f2py (version:%s). -! See http://cens.ioc.ee/projects/f2py2e/ -""" % (f2py_version) - return header + pyf + footer - -if __name__ == "__main__": - files = [] - funcs = [] - f = 1 - f2 = 0 - f3 = 0 - showblocklist = 0 - for l in sys.argv[1:]: - if l == '': - pass - elif l[0] == ':': - f = 0 - elif l == '-quiet': - quiet = 1 - verbose = 0 - elif l == '-verbose': - verbose = 2 - quiet = 0 - elif l == '-fix': - if strictf77: - outmess( - 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) - skipemptyends = 1 - sourcecodeform = 'fix' - elif l == '-skipemptyends': - skipemptyends = 1 - elif l == '--ignore-contains': - ignorecontains = 1 - elif l == '-f77': - strictf77 = 1 - sourcecodeform = 'fix' - elif l == '-f90': - strictf77 = 0 - sourcecodeform = 'free' - skipemptyends = 1 - elif l == '-h': - f2 = 1 - elif l == '-show': - showblocklist = 1 - elif l == '-m': - f3 = 1 - elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) - elif f2: - f2 = 0 - pyffilename = l - elif f3: - f3 = 0 - f77modulename = l - elif f: - try: - open(l).close() - files.append(l) - except IOError as detail: - errmess('IOError: %s\n' % str(detail)) - else: - funcs.append(l) - if not strictf77 and f77modulename and not skipemptyends: - outmess("""\ - Warning: You have specified module name for non Fortran 77 code - that should not need one (expect if you are scanning F90 code - for non module blocks but then you should use flag -skipemptyends - and also be sure that the files do not contain programs without program statement). -""", 0) - - postlist = crackfortran(files) - if pyffilename: - outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) - pyf = crack2fortran(postlist) - with open(pyffilename, 'w') as f: - f.write(pyf) - if showblocklist: - show(postlist) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/diagnose.py b/venv/lib/python3.7/site-packages/numpy/f2py/diagnose.py deleted file mode 100644 index 0241fed..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/diagnose.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, absolute_import, print_function - -import os -import sys -import tempfile - - -def run_command(cmd): - print('Running %r:' % (cmd)) - os.system(cmd) - print('------') - - -def run(): - _path = os.getcwd() - os.chdir(tempfile.gettempdir()) - print('------') - print('os.name=%r' % (os.name)) - print('------') - print('sys.platform=%r' % (sys.platform)) - print('------') - print('sys.version:') - print(sys.version) - print('------') - print('sys.prefix:') - print(sys.prefix) - print('------') - print('sys.path=%r' % (':'.join(sys.path))) - print('------') - - try: - import numpy - has_newnumpy = 1 - except ImportError: - print('Failed to import new numpy:', sys.exc_info()[1]) - has_newnumpy = 0 - - try: - from numpy.f2py import f2py2e - has_f2py2e = 1 - except ImportError: - print('Failed to import f2py2e:', sys.exc_info()[1]) - has_f2py2e = 0 - - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError: - print('Failed to import numpy_distutils:', sys.exc_info()[1]) - has_numpy_distutils = 0 - - if has_newnumpy: - try: - print('Found new numpy version %r in %s' % - (numpy.__version__, numpy.__file__)) - except Exception as msg: - print('error:', msg) - print('------') - - if has_f2py2e: - try: - print('Found f2py2e version %r in %s' % - (f2py2e.__version__.version, f2py2e.__file__)) - except Exception as msg: - print('error:', msg) - print('------') - - if has_numpy_distutils: - try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % ( - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print( - 'Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print( - 'Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print( - 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo - print('ok') - print('------') - else: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') - os.chdir(_path) -if __name__ == "__main__": - run() diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/f2py2e.py b/venv/lib/python3.7/site-packages/numpy/f2py/f2py2e.py deleted file mode 100644 index d03eff9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/f2py2e.py +++ /dev/null @@ -1,696 +0,0 @@ -#!/usr/bin/env python -""" - -f2py2e - Fortran to Python C/API generator. 2nd Edition. - See __usage__ below. - -Copyright 1999--2011 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 08:31:19 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import pprint -import re - -from . import crackfortran -from . import rules -from . import cb_rules -from . import auxfuncs -from . import cfuncs -from . import f90mod_rules -from . import __version__ -from . import capi_maps - -f2py_version = __version__.version -errmess = sys.stderr.write -# outmess=sys.stdout.write -show = pprint.pprint -outmess = auxfuncs.outmess - -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__usage__ = """\ -Usage: - -1) To construct extension module sources: - - f2py [] [[[only:]||[skip:]] \\ - ] \\ - [: ...] - -2) To compile fortran files and build extension modules: - - f2py -c [, , ] - -3) To generate signature files: - - f2py -h ...< same options as in (1) > - -Description: This program generates a Python C/API file (module.c) - that contains wrappers for given fortran functions so that they - can be called from Python. With the -c option the corresponding - extension modules are built. - -Options: - - --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] - --2d-numeric Use f2py2e tool with Numeric support. - --2d-numarray Use f2py2e tool with Numarray support. - --g3-numpy Use 3rd generation f2py from the separate f2py package. - [NOT AVAILABLE YET] - - -h Write signatures of the fortran routines to file - and exit. You can then edit and use it instead - of . If ==stdout then the - signatures are printed to stdout. - Names of fortran routines for which Python C/API - functions will be generated. Default is all that are found - in . - Paths to fortran/signature files that will be scanned for - in order to determine their signatures. - skip: Ignore fortran functions that follow until `:'. - only: Use only fortran functions that follow until `:'. - : Get back to mode. - - -m Name of the module; f2py generates a Python/C API - file module.c or extension module . - Default is 'untitled'. - - --[no-]lower Do [not] lower the cases in . By default, - --lower is assumed with -h key, and --no-lower without -h key. - - --build-dir All f2py generated files are created in . - Default is tempfile.mkdtemp(). - - --overwrite-signature Overwrite existing signature file. - - --[no-]latex-doc Create (or not) module.tex. - Default is --no-latex-doc. - --short-latex Create 'incomplete' LaTeX document (without commands - \\documentclass, \\tableofcontents, and \\begin{document}, - \\end{document}). - - --[no-]rest-doc Create (or not) module.rst. - Default is --no-rest-doc. - - --debug-capi Create C/API code that reports the state of the wrappers - during runtime. Useful for debugging. - - --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 - functions. --wrap-functions is default because it ensures - maximum portability/compiler independence. - - --include-paths ::... Search include files from the given - directories. - - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - - --f2cmap Load Fortran-to-Python KIND specification from the given - file. Default: .f2py_f2cmap in current directory. - - --quiet Run quietly. - --verbose Run with extra verbosity. - -v Print f2py version ID and exit. - - -numpy.distutils options (only effective with -c): - - --fcompiler= Specify Fortran compiler type by vendor - --compiler= Specify C compiler type (as defined by distutils) - - --help-fcompiler List available Fortran compilers and exit - --f77exec= Specify the path to F77 compiler - --f90exec= Specify the path to F90 compiler - --f77flags= Specify F77 compiler flags - --f90flags= Specify F90 compiler flags - --opt= Specify optimization flags - --arch= Specify architecture specific optimization flags - --noopt Compile without optimization - --noarch Compile without arch-dependent optimization - --debug Compile with debugging information - -Extra options (only effective with -c): - - --link- Link extension module with as defined - by numpy.distutils/system_info.py. E.g. to link - with optimized LAPACK libraries (vecLib on MacOSX, - ATLAS elsewhere), use --link-lapack_opt. - See also --help-link switch. - - -L/path/to/lib/ -l - -D -U - -I/path/to/include/ - .o .so .a - - Using the following macros may be required with non-gcc Fortran - compilers: - -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN - -DUNDERSCORE_G77 - - When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY - interface is printed out at exit (platforms: Linux). - - When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is - sent to stderr whenever F2PY interface makes a copy of an - array. Integer sets the threshold for array sizes when - a message should be shown. - -Version: %s -numpy Version: %s -Requires: Python 2.3 or higher. -License: NumPy license (see LICENSE.txt in the NumPy source code) -Copyright 1999 - 2011 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version) - - -def scaninputline(inputline): - files, skipfuncs, onlyfuncs, debug = [], [], [], [] - f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0 - verbose = 1 - dolc = -1 - dolatexdoc = 0 - dorestdoc = 0 - wrapfuncs = 1 - buildpath = '.' - include_paths = [] - signsfile, modulename = None, None - options = {'buildpath': buildpath, - 'coutput': None, - 'f2py_wrapper_output': None} - for l in inputline: - if l == '': - pass - elif l == 'only:': - f = 0 - elif l == 'skip:': - f = -1 - elif l == ':': - f = 1 - elif l[:8] == '--debug-': - debug.append(l[8:]) - elif l == '--lower': - dolc = 1 - elif l == '--build-dir': - f6 = 1 - elif l == '--no-lower': - dolc = 0 - elif l == '--quiet': - verbose = 0 - elif l == '--verbose': - verbose += 1 - elif l == '--latex-doc': - dolatexdoc = 1 - elif l == '--no-latex-doc': - dolatexdoc = 0 - elif l == '--rest-doc': - dorestdoc = 1 - elif l == '--no-rest-doc': - dorestdoc = 0 - elif l == '--wrap-functions': - wrapfuncs = 1 - elif l == '--no-wrap-functions': - wrapfuncs = 0 - elif l == '--short-latex': - options['shortlatex'] = 1 - elif l == '--coutput': - f8 = 1 - elif l == '--f2py-wrapper-output': - f9 = 1 - elif l == '--f2cmap': - f10 = 1 - elif l == '--overwrite-signature': - options['h-overwrite'] = 1 - elif l == '-h': - f2 = 1 - elif l == '-m': - f3 = 1 - elif l[:2] == '-v': - print(f2py_version) - sys.exit() - elif l == '--show-compilers': - f5 = 1 - elif l[:8] == '-include': - cfuncs.outneeds['userincludes'].append(l[9:-1]) - cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] - elif l[:15] in '--include_paths': - outmess( - 'f2py option --include_paths is deprecated, use --include-paths instead.\n') - f7 = 1 - elif l[:15] in '--include-paths': - f7 = 1 - elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) - sys.exit() - elif f2: - f2 = 0 - signsfile = l - elif f3: - f3 = 0 - modulename = l - elif f6: - f6 = 0 - buildpath = l - elif f7: - f7 = 0 - include_paths.extend(l.split(os.pathsep)) - elif f8: - f8 = 0 - options["coutput"] = l - elif f9: - f9 = 0 - options["f2py_wrapper_output"] = l - elif f10: - f10 = 0 - options["f2cmap_file"] = l - elif f == 1: - try: - with open(l): - pass - files.append(l) - except IOError as detail: - errmess('IOError: %s. Skipping file "%s".\n' % - (str(detail), l)) - elif f == -1: - skipfuncs.append(l) - elif f == 0: - onlyfuncs.append(l) - if not f5 and not files and not modulename: - print(__usage__) - sys.exit() - if not os.path.isdir(buildpath): - if not verbose: - outmess('Creating build directory %s' % (buildpath)) - os.mkdir(buildpath) - if signsfile: - signsfile = os.path.join(buildpath, signsfile) - if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: - errmess( - 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) - sys.exit() - - options['debug'] = debug - options['verbose'] = verbose - if dolc == -1 and not signsfile: - options['do-lower'] = 0 - else: - options['do-lower'] = dolc - if modulename: - options['module'] = modulename - if signsfile: - options['signsfile'] = signsfile - if onlyfuncs: - options['onlyfuncs'] = onlyfuncs - if skipfuncs: - options['skipfuncs'] = skipfuncs - options['dolatexdoc'] = dolatexdoc - options['dorestdoc'] = dorestdoc - options['wrapfuncs'] = wrapfuncs - options['buildpath'] = buildpath - options['include_paths'] = include_paths - options.setdefault('f2cmap_file', None) - return files, options - - -def callcrackfortran(files, options): - rules.options = options - crackfortran.debug = options['debug'] - crackfortran.verbose = options['verbose'] - if 'module' in options: - crackfortran.f77modulename = options['module'] - if 'skipfuncs' in options: - crackfortran.skipfuncs = options['skipfuncs'] - if 'onlyfuncs' in options: - crackfortran.onlyfuncs = options['onlyfuncs'] - crackfortran.include_paths[:] = options['include_paths'] - crackfortran.dolowercase = options['do-lower'] - postlist = crackfortran.crackfortran(files) - if 'signsfile' in options: - outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) - pyf = crackfortran.crack2fortran(postlist) - if options['signsfile'][-6:] == 'stdout': - sys.stdout.write(pyf) - else: - with open(options['signsfile'], 'w') as f: - f.write(pyf) - if options["coutput"] is None: - for mod in postlist: - mod["coutput"] = "%smodule.c" % mod["name"] - else: - for mod in postlist: - mod["coutput"] = options["coutput"] - if options["f2py_wrapper_output"] is None: - for mod in postlist: - mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] - else: - for mod in postlist: - mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] - return postlist - - -def buildmodules(lst): - cfuncs.buildcfuncs() - outmess('Building modules...\n') - modules, mnames, isusedby = [], [], {} - for i in range(len(lst)): - if '__user__' in lst[i]['name']: - cb_rules.buildcallbacks(lst[i]) - else: - if 'use' in lst[i]: - for u in lst[i]['use'].keys(): - if u not in isusedby: - isusedby[u] = [] - isusedby[u].append(lst[i]['name']) - modules.append(lst[i]) - mnames.append(lst[i]['name']) - ret = {} - for i in range(len(mnames)): - if mnames[i] in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n' % ( - mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]]))) - else: - um = [] - if 'use' in modules[i]: - for u in modules[i]['use'].keys(): - if u in isusedby and u in mnames: - um.append(modules[mnames.index(u)]) - else: - outmess( - '\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u)) - ret[mnames[i]] = {} - dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um)) - return ret - - -def dict_append(d_out, d_in): - for (k, v) in d_in.items(): - if k not in d_out: - d_out[k] = [] - if isinstance(v, list): - d_out[k] = d_out[k] + v - else: - d_out[k].append(v) - - -def run_main(comline_list): - """ - Equivalent to running:: - - f2py - - where ``=string.join(,' ')``, but in Python. Unless - ``-h`` is used, this function returns a dictionary containing - information on generated modules and their dependencies on source - files. For example, the command ``f2py -m scalar scalar.f`` can be - executed from Python as follows - - You cannot build extension modules with this function, that is, - using ``-c`` is not allowed. Use ``compile`` command instead - - Examples - -------- - .. include:: run_main_session.dat - :literal: - - """ - crackfortran.reset_global_f2py_vars() - f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) - fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') - fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') - files, options = scaninputline(comline_list) - auxfuncs.options = options - capi_maps.load_f2cmap_file(options['f2cmap_file']) - postlist = callcrackfortran(files, options) - isusedby = {} - for i in range(len(postlist)): - if 'use' in postlist[i]: - for u in postlist[i]['use'].keys(): - if u not in isusedby: - isusedby[u] = [] - isusedby[u].append(postlist[i]['name']) - for i in range(len(postlist)): - if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']: - if postlist[i]['name'] in isusedby: - # if not quiet: - outmess('Skipping Makefile build for module "%s" which is used by %s\n' % ( - postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]]))) - if 'signsfile' in options: - if options['verbose'] > 1: - outmess( - 'Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n' % - (os.path.basename(sys.argv[0]), options['signsfile'])) - return - for i in range(len(postlist)): - if postlist[i]['block'] != 'python module': - if 'python module' not in options: - errmess( - 'Tip: If your original code is Fortran source then you must use -m option.\n') - raise TypeError('All blocks must be python module blocks but got %s' % ( - repr(postlist[i]['block']))) - auxfuncs.debugoptions = options['debug'] - f90mod_rules.options = options - auxfuncs.wrapfuncs = options['wrapfuncs'] - - ret = buildmodules(postlist) - - for mn in ret.keys(): - dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) - return ret - - -def filter_files(prefix, suffix, files, remove_prefix=None): - """ - Filter files by prefix and suffix. - """ - filtered, rest = [], [] - match = re.compile(prefix + r'.*' + suffix + r'\Z').match - if remove_prefix: - ind = len(prefix) - else: - ind = 0 - for file in [x.strip() for x in files]: - if match(file): - filtered.append(file[ind:]) - else: - rest.append(file) - return filtered, rest - - -def get_prefix(module): - p = os.path.dirname(os.path.dirname(module.__file__)) - return p - - -def run_compile(): - """ - Do it all in one call! - """ - import tempfile - - i = sys.argv.index('-c') - del sys.argv[i] - - remove_build_dir = 0 - try: - i = sys.argv.index('--build-dir') - except ValueError: - i = None - if i is not None: - build_dir = sys.argv[i + 1] - del sys.argv[i + 1] - del sys.argv[i] - else: - remove_build_dir = 1 - build_dir = tempfile.mkdtemp() - - _reg1 = re.compile(r'[-][-]link[-]') - sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] - if sysinfo_flags: - sysinfo_flags = [f[7:] for f in sysinfo_flags] - - _reg2 = re.compile( - r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include') - f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] - f2py_flags2 = [] - fl = 0 - for a in sys.argv[1:]: - if a in ['only:', 'skip:']: - fl = 1 - elif a == ':': - fl = 0 - if fl or a == ':': - f2py_flags2.append(a) - if f2py_flags2 and f2py_flags2[-1] != ':': - f2py_flags2.append(':') - f2py_flags.extend(f2py_flags2) - - sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] - _reg3 = re.compile( - r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)') - flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in flib_flags] - _reg4 = re.compile( - r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))') - fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in fc_flags] - - if 1: - del_list = [] - for s in flib_flags: - v = '--fcompiler=' - if s[:len(v)] == v: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print('Unknown vendor: "%s"' % (s[len(v):])) - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv - continue - for s in del_list: - i = flib_flags.index(s) - del flib_flags[i] - assert len(flib_flags) <= 2, repr(flib_flags) - - _reg5 = re.compile(r'[-][-](verbose)') - setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in setup_flags] - - if '--quiet' in f2py_flags: - setup_flags.append('--quiet') - - modulename = 'untitled' - sources = sys.argv[1:] - - for optname in ['--include_paths', '--include-paths', '--f2cmap']: - if optname in sys.argv: - i = sys.argv.index(optname) - f2py_flags.extend(sys.argv[i:i + 2]) - del sys.argv[i + 1], sys.argv[i] - sources = sys.argv[1:] - - if '-m' in sys.argv: - i = sys.argv.index('-m') - modulename = sys.argv[i + 1] - del sys.argv[i + 1], sys.argv[i] - sources = sys.argv[1:] - else: - from numpy.distutils.command.build_src import get_f2py_modulename - pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) - sources = pyf_files + sources - for f in pyf_files: - modulename = get_f2py_modulename(f) - if modulename: - break - - extra_objects, sources = filter_files('', '[.](o|a|so)', sources) - include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1) - library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) - libraries, sources = filter_files('-l', '', sources, remove_prefix=1) - undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) - define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) - for i in range(len(define_macros)): - name_value = define_macros[i].split('=', 1) - if len(name_value) == 1: - name_value.append(None) - if len(name_value) == 2: - define_macros[i] = tuple(name_value) - else: - print('Invalid use of -D:', name_value) - - from numpy.distutils.system_info import get_info - - num_info = {} - if num_info: - include_dirs.extend(num_info.get('include_dirs', [])) - - from numpy.distutils.core import setup, Extension - ext_args = {'name': modulename, 'sources': sources, - 'include_dirs': include_dirs, - 'library_dirs': library_dirs, - 'libraries': libraries, - 'define_macros': define_macros, - 'undef_macros': undef_macros, - 'extra_objects': extra_objects, - 'f2py_options': f2py_flags, - } - - if sysinfo_flags: - from numpy.distutils.misc_util import dict_append - for n in sysinfo_flags: - i = get_info(n) - if not i: - outmess('No %s resources found in system' - ' (try `f2py --help-link`)\n' % (repr(n))) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - sys.argv = [sys.argv[0]] + setup_flags - sys.argv.extend(['build', - '--build-temp', build_dir, - '--build-base', build_dir, - '--build-platlib', '.']) - if fc_flags: - sys.argv.extend(['config_fc'] + fc_flags) - if flib_flags: - sys.argv.extend(['build_ext'] + flib_flags) - - setup(ext_modules=[ext]) - - if remove_build_dir and os.path.exists(build_dir): - import shutil - outmess('Removing build directory %s\n' % (build_dir)) - shutil.rmtree(build_dir) - - -def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - from numpy.distutils.system_info import show_all - show_all() - return - - # Probably outdated options that were not working before 1.16 - if '--g3-numpy' in sys.argv[1:]: - sys.stderr.write("G3 f2py support is not implemented, yet.\\n") - sys.exit(1) - elif '--2e-numeric' in sys.argv[1:]: - sys.argv.remove('--2e-numeric') - elif '--2e-numarray' in sys.argv[1:]: - # Note that this errors becaust the -DNUMARRAY argument is - # not recognized. Just here for back compatibility and the - # error message. - sys.argv.append("-DNUMARRAY") - sys.argv.remove('--2e-numarray') - elif '--2e-numpy' in sys.argv[1:]: - sys.argv.remove('--2e-numpy') - else: - pass - - if '-c' in sys.argv[1:]: - run_compile() - else: - run_main(sys.argv[1:]) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/f2py_testing.py b/venv/lib/python3.7/site-packages/numpy/f2py/f2py_testing.py deleted file mode 100644 index f5d5fa6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/f2py_testing.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re - -from numpy.testing import jiffies, memusage - - -def cmdline(): - m = re.compile(r'\A\d+\Z') - args = [] - repeat = 1 - for a in sys.argv[1:]: - if m.match(a): - repeat = eval(a) - else: - args.append(a) - f2py_opts = ' '.join(args) - return repeat, f2py_opts - - -def run(runtest, test_functions, repeat=1): - l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] - start_memusage = memusage() - diff_memusage = None - start_jiffies = jiffies() - i = 0 - while i < repeat: - i += 1 - for t, fname in l: - runtest(t) - if start_memusage is None: - continue - if diff_memusage is None: - diff_memusage = memusage() - start_memusage - else: - diff_memusage2 = memusage() - start_memusage - if diff_memusage2 != diff_memusage: - print('memory usage change at step %i:' % i, - diff_memusage2 - diff_memusage, - fname) - diff_memusage = diff_memusage2 - current_memusage = memusage() - print('run', repeat * len(test_functions), 'tests', - 'in %.2f seconds' % ((jiffies() - start_jiffies) / 100.0)) - if start_memusage: - print('initial virtual memory size:', start_memusage, 'bytes') - print('current virtual memory size:', current_memusage, 'bytes') diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/f90mod_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/f90mod_rules.py deleted file mode 100644 index 85eae80..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/f90mod_rules.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env python -""" - -Build F90 module support for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/02/03 19:30:23 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.27 $"[10:-1] - -f2py_version = 'See `f2py -v`' - -import numpy as np - -from . import capi_maps -from . import func2subr -from .crackfortran import undo_rmbadname, undo_rmbadname1 - -# The eviroment provided by auxfuncs.py is needed for some calls to eval. -# As the needed functions cannot be determined by static inspection of the -# code, it is safest to use import * pending a major refactoring of f2py. -from .auxfuncs import * - -options = {} - - -def findf90modules(m): - if ismodule(m): - return [m] - if not hasbody(m): - return [] - ret = [] - for b in m['body']: - if ismodule(b): - ret.append(b) - else: - ret = ret + findf90modules(b) - return ret - -fgetdims1 = """\ - external f2pysetdata - logical ns - integer r,i - integer(%d) s(*) - ns = .FALSE. - if (allocated(d)) then - do i=1,r - if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if - end if - if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize - -fgetdims2 = """\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - end if - flag = 1 - call f2pysetdata(d,allocated(d))""" - -fgetdims2_sa = """\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - !s(r) must be equal to len(d(1)) - end if - flag = 2 - call f2pysetdata(d,allocated(d))""" - - -def buildhooks(pymod): - global fgetdims1, fgetdims2 - from . import rules - ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], - 'need': ['F_FUNC', 'arrayobject.h'], - 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, - 'docs': ['"Fortran 90/95 modules:\\n"'], - 'latexdoc': []} - fhooks = [''] - - def fadd(line, s=fhooks): - s[0] = '%s\n %s' % (s[0], line) - doc = [''] - - def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) - for m in findf90modules(pymod): - sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ - m['name']], [] - sargsp = [] - ifargs = [] - mfargs = [] - if hasbody(m): - for b in m['body']: - notvars.append(b['name']) - for n in m['vars'].keys(): - var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): - onlyvars.append(n) - mfargs.append(n) - outmess('\t\tConstructing F90 module support for "%s"...\n' % - (m['name'])) - if onlyvars: - outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) - chooks = [''] - - def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) - ihooks = [''] - - def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) - - vrd = capi_maps.modsign2map(m) - cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) - dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) - if hasnote(m): - note = m['note'] - if isinstance(note, list): - note = '\n'.join(note) - dadd(note) - if onlyvars: - dadd('\\begin{description}') - for n in onlyvars: - var = m['vars'][n] - modobjs.append(n) - ct = capi_maps.getctype(var) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n, var) - dms = dm['dims'].replace('*', '-1').strip() - dms = dms.replace(':', '-1').strip() - if not dms: - dms = '-1' - use_fgetdims2 = fgetdims2 - if isstringarray(var): - if 'charselector' in var and 'len' in var['charselector']: - cadd('\t{"%s",%s,{{%s,%s}},%s},' - % (undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at)) - use_fgetdims2 = fgetdims2_sa - else: - cadd('\t{"%s",%s,{{%s}},%s},' % - (undo_rmbadname1(n), dm['rank'], dms, at)) - else: - cadd('\t{"%s",%s,{{%s}},%s},' % - (undo_rmbadname1(n), dm['rank'], dms, at)) - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, var))) - if hasnote(var): - note = var['note'] - if isinstance(note, list): - note = '\n'.join(note) - dadd('--- %s' % (note)) - if isallocatable(var): - fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) - efargs.append(fargs[-1]) - sargs.append( - 'void (*%s)(int*,int*,void(*)(char*,int*),int*)' % (n)) - sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) - fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) - fadd('use %s, only: d => %s\n' % - (m['name'], undo_rmbadname1(n))) - fadd('integer flag\n') - fhooks[0] = fhooks[0] + fgetdims1 - dms = eval('range(1,%s+1)' % (dm['rank'])) - fadd(' allocate(d(%s))\n' % - (','.join(['s(%s)' % i for i in dms]))) - fhooks[0] = fhooks[0] + use_fgetdims2 - fadd('end subroutine %s' % (fargs[-1])) - else: - fargs.append(n) - sargs.append('char *%s' % (n)) - sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) - if onlyvars: - dadd('\\end{description}') - if hasbody(m): - for b in m['body']: - if not isroutine(b): - print('Skipping', b['block'], b['name']) - continue - modobjs.append('%s()' % (b['name'])) - b['modulename'] = m['name'] - api, wrap = rules.buildapi(b) - if isfunction(b): - fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) - ifargs.append(func2subr.createfuncwrapper(b, signature=1)) - else: - if wrap: - fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) - ifargs.append( - func2subr.createsubrwrapper(b, signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) - api['externroutines'] = [] - ar = applyrules(api, vrd) - ar['docs'] = [] - ar['docshort'] = [] - ret = dictappend(ret, ar) - cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},' % - (b['name'], m['name'], b['name'], m['name'], b['name'])) - sargs.append('char *%s' % (b['name'])) - sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % - (m['name'], b['name'])) - cadd('\t{NULL}\n};\n') - iadd('}') - ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( - m['name'], ','.join(sargs), ihooks[0]) - if '_' in m['name']: - F_FUNC = 'F_FUNC_US' - else: - F_FUNC = 'F_FUNC' - iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' - % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) - iadd('static void f2py_init_%s(void) {' % (m['name'])) - iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, m['name'], m['name'].upper(), m['name'])) - iadd('}\n') - ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks - ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( - m['name'], m['name'], m['name'])] + ret['initf90modhooks'] - fadd('') - fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) - if mfargs: - for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s' % (m['name'], a)) - if ifargs: - fadd(' '.join(['interface'] + ifargs)) - fadd('end interface') - fadd('external f2pysetupfunc') - if efargs: - for a in undo_rmbadname(efargs): - fadd('external %s' % (a)) - fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) - fadd('end subroutine f2pyinit%s\n' % (m['name'])) - - dadd('\n'.join(ret['latexdoc']).replace( - r'\subsection{', r'\subsubsection{')) - - ret['latexdoc'] = [] - ret['docs'].append('"\t%s --- %s"' % (m['name'], - ','.join(undo_rmbadname(modobjs)))) - - ret['routine_defs'] = '' - ret['doc'] = [] - ret['docshort'] = [] - ret['latexdoc'] = doc[0] - if len(ret['docs']) <= 1: - ret['docs'] = '' - return ret, fhooks[0] diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/func2subr.py b/venv/lib/python3.7/site-packages/numpy/f2py/func2subr.py deleted file mode 100644 index 6010d5a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/func2subr.py +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2004/11/26 11:13:06 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.16 $"[10:-1] - -f2py_version = 'See `f2py -v`' - -import copy - -from .auxfuncs import ( - getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, - isintent_out, islogicalfunction, ismoduleroutine, isscalar, - issubroutine, issubroutine_wrap, outmess, show -) - - -def var2fixfortran(vars, a, fa=None, f90mode=None): - if fa is None: - fa = a - if a not in vars: - show(vars) - outmess('var2fixfortran: No definition for argument "%s".\n' % a) - return '' - if 'typespec' not in vars[a]: - show(vars[a]) - outmess('var2fixfortran: No typespec for argument "%s".\n' % a) - return '' - vardef = vars[a]['typespec'] - if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) - selector = {} - lk = '' - if 'kindselector' in vars[a]: - selector = vars[a]['kindselector'] - lk = 'kind' - elif 'charselector' in vars[a]: - selector = vars[a]['charselector'] - lk = 'len' - if '*' in selector: - if f90mode: - if selector['*'] in ['*', ':', '(*)']: - vardef = '%s(len=*)' % (vardef) - else: - vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) - else: - if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) - else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) - - vardef = '%s %s' % (vardef, fa) - if 'dimension' in vars[a]: - vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) - return vardef - - -def createfuncwrapper(rout, signature=0): - assert isfunction(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i, d in enumerate(v.get('dimension', [])): - if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - - def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - newname = '%sf2pywrap' % (name) - - if newname not in vars: - vars[newname] = vars[name] - args = [newname] + rout['args'][1:] - else: - args = [newname] + rout['args'] - - l = var2fixfortran(vars, name, newname, f90mode) - if l[:13] == 'character*(*)': - if f90mode: - l = 'character(len=10)' + l[13:] - else: - l = 'character*10' + l[13:] - charselect = vars[name]['charselector'] - if charselect.get('*', '') == '(*)': - charselect['*'] = '10' - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) - if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) - else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) - if not need_interface: - add('external %s' % (fortranname)) - l = l + ', ' + fortranname - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - args = args[1:] - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s' % (a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - if isscalar(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - if isintent_in(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - add(var2fixfortran(vars, a, f90mode=f90mode)) - - add(l) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) - else: - add('%s = %s(%s)' % (newname, fortranname, sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) - else: - add('end') - return ret[0] - - -def createsubrwrapper(rout, signature=0): - assert issubroutine(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i, d in enumerate(v.get('dimension', [])): - if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - - def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - - args = rout['args'] - - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) - if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) - else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) - if not need_interface: - add('external %s' % (fortranname)) - - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s' % (a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - if isscalar(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - add(var2fixfortran(vars, a, f90mode=f90mode)) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - add('call %s(%s)' % (fortranname, sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) - else: - add('end') - return ret[0] - - -def assubr(rout): - if isfunction_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( - name, fortranname)) - rout = copy.copy(rout) - fname = name - rname = fname - if 'result' in rout: - rname = rout['result'] - rout['vars'][fname] = rout['vars'][rname] - fvar = rout['vars'][fname] - if not isintent_out(fvar): - if 'intent' not in fvar: - fvar['intent'] = [] - fvar['intent'].append('out') - flag = 1 - for i in fvar['intent']: - if i.startswith('out='): - flag = 0 - break - if flag: - fvar['intent'].append('out=%s' % (rname)) - rout['args'][:] = [fname] + rout['args'] - return rout, createfuncwrapper(rout) - if issubroutine_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' % ( - name, fortranname)) - rout = copy.copy(rout) - return rout, createsubrwrapper(rout) - return rout, '' diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/rules.py deleted file mode 100644 index f2f713b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/rules.py +++ /dev/null @@ -1,1488 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Here is a skeleton of a new wrapper function (13Dec2001): - -wrapper_function(args) - declarations - get_python_arguments, say, `a' and `b' - - get_a_from_python - if (successful) { - - get_b_from_python - if (successful) { - - callfortran - if (successful) { - - put_a_to_python - if (successful) { - - put_b_to_python - if (successful) { - - buildvalue = ... - - } - - } - - } - - } - cleanup_b - - } - cleanup_a - - return buildvalue - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/08/30 08:58:42 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.129 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -import os -import time -import copy - -from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, - hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote, - isarray, isarrayofstrings, iscomplex, iscomplexarray, - iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal, - isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c, - isintent_callback, isintent_copy, isintent_hide, isintent_inout, - isintent_nothide, isintent_out, isintent_overwrite, islogical, - islong_complex, islong_double, islong_doublefunction, islong_long, - islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar, - issigned_long_longarray, isstring, isstringarray, isstringfunction, - issubroutine, issubroutine_wrap, isthreadsafe, isunsigned, - isunsigned_char, isunsigned_chararray, isunsigned_long_long, - isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, - l_and, l_not, l_or, outmess, replace, stripcomma, -) - -from . import capi_maps -from . import cfuncs -from . import common_rules -from . import use_rules -from . import f90mod_rules -from . import func2subr - -options = {} -sepdict = {} -#for k in ['need_cfuncs']: sepdict[k]=',' -for k in ['decl', - 'frompyobj', - 'cleanupfrompyobj', - 'topyarr', 'method', - 'pyobjfrom', 'closepyobjfrom', - 'freemem', - 'userincludes', - 'includes0', 'includes', 'typedefs', 'typedefs_generated', - 'cppmacros', 'cfuncs', 'callbacks', - 'latexdoc', - 'restdoc', - 'routine_defs', 'externroutines', - 'initf2pywraphooks', - 'commonhooks', 'initcommonhooks', - 'f90modhooks', 'initf90modhooks']: - sepdict[k] = '\n' - -#################### Rules for C/API module ################# - -generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) -module_rules = { - 'modulebody': """\ -/* File: #modulename#module.c - * This file is auto-generated with f2py (version:#f2py_version#). - * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, - * written by Pearu Peterson . - * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ - * Do not edit this file directly unless you know what you are doing!!! - */ - -#ifdef __cplusplus -extern \"C\" { -#endif - -""" + gentitle("See f2py2e/cfuncs.py: includes") + """ -#includes# -#includes0# - -""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ -static PyObject *#modulename#_error; -static PyObject *#modulename#_module; - -""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ -#typedefs# - -""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ -#typedefs_generated# - -""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ -#cppmacros# - -""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ -#cfuncs# - -""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ -#userincludes# - -""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ -#usercode# - -/* See f2py2e/rules.py */ -#externroutines# - -""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ -#usercode1# - -""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ -#callbacks# - -""" + gentitle("See f2py2e/rules.py: buildapi") + """ -#body# - -""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ -#f90modhooks# - -""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ - -""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ -#commonhooks# - -""" + gentitle("See f2py2e/rules.py") + """ - -static FortranDataDef f2py_routine_defs[] = { -#routine_defs# -\t{NULL} -}; - -static PyMethodDef f2py_module_methods[] = { -#pymethoddef# -\t{NULL,NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { -\tPyModuleDef_HEAD_INIT, -\t"#modulename#", -\tNULL, -\t-1, -\tf2py_module_methods, -\tNULL, -\tNULL, -\tNULL, -\tNULL -}; -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define RETVAL m -PyMODINIT_FUNC PyInit_#modulename#(void) { -#else -#define RETVAL -PyMODINIT_FUNC init#modulename#(void) { -#endif -\tint i; -\tPyObject *m,*d, *s, *tmp; -#if PY_VERSION_HEX >= 0x03000000 -\tm = #modulename#_module = PyModule_Create(&moduledef); -#else -\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods); -#endif -\tPy_TYPE(&PyFortran_Type) = &PyType_Type; -\timport_array(); -\tif (PyErr_Occurred()) -\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;} -\td = PyModule_GetDict(m); -\ts = PyString_FromString(\"$R""" + """evision: $\"); -\tPyDict_SetItemString(d, \"__version__\", s); -\tPy_DECREF(s); -#if PY_VERSION_HEX >= 0x03000000 -\ts = PyUnicode_FromString( -#else -\ts = PyString_FromString( -#endif -\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); -\tPyDict_SetItemString(d, \"__doc__\", s); -\tPy_DECREF(s); -\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); -\t/* -\t * Store the error object inside the dict, so that it could get deallocated. -\t * (in practice, this is a module, so it likely will not and cannot.) -\t */ -\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); -\tPy_DECREF(#modulename#_error); -\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) { -\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); -\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); -\t\tPy_DECREF(tmp); -\t} -#initf2pywraphooks# -#initf90modhooks# -#initcommonhooks# -#interface_usercode# - -#ifdef F2PY_REPORT_ATEXIT -\tif (! PyErr_Occurred()) -\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); -#endif -\treturn RETVAL; -} -#ifdef __cplusplus -} -#endif -""", - 'separatorsfor': {'latexdoc': '\n\n', - 'restdoc': '\n\n'}, - 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', - '#modnote#\n', - '#latexdoc#'], - 'restdoc': ['Module #modulename#\n' + '=' * 80, - '\n#restdoc#'] -} - -defmod_rules = [ - {'body': '/*eof body*/', - 'method': '/*eof method*/', - 'externroutines': '/*eof externroutines*/', - 'routine_defs': '/*eof routine_defs*/', - 'initf90modhooks': '/*eof initf90modhooks*/', - 'initf2pywraphooks': '/*eof initf2pywraphooks*/', - 'initcommonhooks': '/*eof initcommonhooks*/', - 'latexdoc': '', - 'restdoc': '', - 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, - } -] - -routine_rules = { - 'separatorsfor': sepdict, - 'body': """ -#begintitle# -static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; -/* #declfortranroutine# */ -static PyObject *#apiname#(const PyObject *capi_self, - PyObject *capi_args, - PyObject *capi_keywds, - #functype# (*f2py_func)(#callprotoargument#)) { -\tPyObject * volatile capi_buildvalue = NULL; -\tvolatile int f2py_success = 1; -#decl# -\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; -#usercode# -#routdebugenter# -#ifdef F2PY_REPORT_ATEXIT -f2py_start_clock(); -#endif -\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ -\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ -\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; -#frompyobj# -/*end of frompyobj*/ -#ifdef F2PY_REPORT_ATEXIT -f2py_start_call_clock(); -#endif -#callfortranroutine# -if (PyErr_Occurred()) - f2py_success = 0; -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_call_clock(); -#endif -/*end of callfortranroutine*/ -\t\tif (f2py_success) { -#pyobjfrom# -/*end of pyobjfrom*/ -\t\tCFUNCSMESS(\"Building return value.\\n\"); -\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); -/*closepyobjfrom*/ -#closepyobjfrom# -\t\t} /*if (f2py_success) after callfortranroutine*/ -/*cleanupfrompyobj*/ -#cleanupfrompyobj# -\tif (capi_buildvalue == NULL) { -#routdebugfailure# -\t} else { -#routdebugleave# -\t} -\tCFUNCSMESS(\"Freeing memory.\\n\"); -#freemem# -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_clock(); -#endif -\treturn capi_buildvalue; -} -#endtitle# -""", - 'routine_defs': '#routine_def#', - 'initf2pywraphooks': '#initf2pywraphook#', - 'externroutines': '#declfortranroutine#', - 'doc': '#docreturn##name#(#docsignature#)', - 'docshort': '#docreturn##name#(#docsignatureshort#)', - 'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n', - 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], - 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, - 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', - """ -\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} -#routnote# - -#latexdocstrsigns# -"""], - 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, - - ] -} - -################## Rules for C/API function ############## - -rout_rules = [ - { # Init - 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', - 'routdebugleave': '\n', 'routdebugfailure': '\n', - 'setjmpbuf': ' || ', - 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', - 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', - 'latexdocstrsigns': '\n', - 'latexdocstrreq': '\n', 'latexdocstropt': '\n', - 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', - }, - 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', - 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', - 'freemem': '/*freemem*/', - 'docsignshort': '', 'docsignoptshort': '', - 'docstrsigns': '', 'latexdocstrsigns': '', - 'docstrreq': '\\nParameters\\n----------', - 'docstropt': '\\nOther Parameters\\n----------------', - 'docstrout': '\\nReturns\\n-------', - 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', - 'latexdocstrreq': '\\noindent Required arguments:', - 'latexdocstropt': '\\noindent Optional arguments:', - 'latexdocstrout': '\\noindent Return objects:', - 'latexdocstrcbs': '\\noindent Call-back functions:', - 'args_capi': '', 'keys_capi': '', 'functype': '', - 'frompyobj': '/*frompyobj*/', - # this list will be reversed - 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], - 'pyobjfrom': '/*pyobjfrom*/', - # this list will be reversed - 'closepyobjfrom': ['/*end of closepyobjfrom*/'], - 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', - 'routdebugenter': '/*routdebugenter*/', - 'routdebugfailure': '/*routdebugfailure*/', - 'callfortranroutine': '/*callfortranroutine*/', - 'argformat': '', 'keyformat': '', 'need_cfuncs': '', - 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', - 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', - 'initf2pywraphook': '', - 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, - }, { - 'apiname': 'f2py_rout_#modulename#_#name#', - 'pyname': '#modulename#.#name#', - 'decl': '', - '_check': l_not(ismoduleroutine) - }, { - 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', - 'pyname': '#modulename#.#f90modulename#.#name#', - 'decl': '', - '_check': ismoduleroutine - }, { # Subroutine - 'functype': 'void', - 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', - ismoduleroutine: '', - isdummyroutine: '' - }, - 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, - 'callfortranroutine': [ - {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, - {hasexternals: """\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement: '''\t\t\t\t#callstatement#; -\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t\t\t\t(*f2py_func)(#callfortran#);'}, - {isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'}, - {hasexternals: """\t\t}"""} - ], - '_check': l_and(issubroutine, l_not(issubroutine_wrap)), - }, { # Wrapped function - 'functype': 'void', - 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine: '', - }, - - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' - { - extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); - PyObject_SetAttrString(o,"_cpointer", tmp); - Py_DECREF(tmp); -#if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromString("#name#"); -#else - s = PyString_FromString("#name#"); -#endif - PyObject_SetAttrString(o,"__name__", s); - Py_DECREF(s); - } - '''}, - 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, - 'callfortranroutine': [ - {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t(*f2py_func)(#callfortran#);'}, - {hascallstatement: - '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'} - ], - '_check': isfunction_wrap, - }, { # Wrapped subroutine - 'functype': 'void', - 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine: '', - }, - - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' - { - extern void #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); - PyObject_SetAttrString(o,"_cpointer", tmp); - Py_DECREF(tmp); -#if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromString("#name#"); -#else - s = PyString_FromString("#name#"); -#endif - PyObject_SetAttrString(o,"__name__", s); - Py_DECREF(s); - } - '''}, - 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, - 'callfortranroutine': [ - {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t(*f2py_func)(#callfortran#);'}, - {hascallstatement: - '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'} - ], - '_check': issubroutine_wrap, - }, { # Function - 'functype': '#ctype#', - 'docreturn': {l_not(isintent_hide): '#rname#,'}, - 'docstrout': '#pydocsignout#', - 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasresultnote: '--- #resultnote#'}], - 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ -#ifdef USESCOMPAQFORTRAN -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); -#else -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -#endif -"""}, - {l_and(debugcapi, l_not(isstringfunction)): """\ -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -"""} - ], - '_check': l_and(isfunction, l_not(isfunction_wrap)) - }, { # Scalar function - 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', - isdummyroutine: '' - }, - 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};', - l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'}, - {iscomplexfunction: - '\tPyObject *#name#_return_value_capi = Py_None;'} - ], - 'callfortranroutine': [ - {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement: '''\t#callstatement#; -/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ -'''}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t#name#_return_value = (*f2py_func)(#callfortran#);'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'}, - {l_and(debugcapi, iscomplexfunction) - : '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, - {l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], - 'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, - 'need': [{l_not(isdummyroutine): 'F_FUNC'}, - {iscomplexfunction: 'pyobj_from_#ctype#1'}, - {islong_longfunction: 'long_long'}, - {islong_doublefunction: 'long_double'}], - 'returnformat': {l_not(isintent_hide): '#rformat#'}, - 'return': {iscomplexfunction: ',#name#_return_value_capi', - l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, - '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) - }, { # String function # in use for --no-wrap - 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): - '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c): - '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' - }, - 'decl': ['\t#ctype# #name#_return_value = NULL;', - '\tint #name#_return_value_len = 0;'], - 'callfortran':'#name#_return_value,#name#_return_value_len,', - 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', - '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', - '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', - '\t\tf2py_success = 0;', - '\t} else {', - "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", - '\t}', - '\tif (f2py_success) {', - {hasexternals: """\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'}, - """\ -#ifdef USESCOMPAQFORTRAN -\t\t(*f2py_func)(#callcompaqfortran#); -#else -\t\t(*f2py_func)(#callfortran#); -#endif -""", - {isthreadsafe: '\t\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t\t}'}, - {debugcapi: - '\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, - '\t} /* if (f2py_success) after (string)malloc */', - ], - 'returnformat': '#rformat#', - 'return': ',#name#_return_value', - 'freemem': '\tSTRINGFREE(#name#_return_value);', - 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], - '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete - }, - { # Debugging - 'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', - 'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', - 'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', - '_check': debugcapi - } -] - -################ Rules for arguments ################## - -typedef_need_dict = {islong_long: 'long_long', - islong_double: 'long_double', - islong_complex: 'complex_long_double', - isunsigned_char: 'unsigned_char', - isunsigned_short: 'unsigned_short', - isunsigned: 'unsigned', - isunsigned_long_long: 'unsigned_long_long', - isunsigned_chararray: 'unsigned_char', - isunsigned_shortarray: 'unsigned_short', - isunsigned_long_longarray: 'unsigned_long_long', - issigned_long_longarray: 'long_long', - } - -aux_rules = [ - { - 'separatorsfor': sepdict - }, - { # Common - 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', - {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', - 'need': typedef_need_dict, - }, - # Scalars (not complex) - { # Common - 'decl': '\t#ctype# #varname# = 0;', - 'need': {hasinitvalue: 'math.h'}, - 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, - '_check': l_and(isscalar, l_not(iscomplex)), - }, - { - 'return': ',#varname#', - 'docstrout': '#pydocsignout#', - 'docreturn': '#outvarname#,', - 'returnformat': '#varrformat#', - '_check': l_and(isscalar, l_not(iscomplex), isintent_out), - }, - # Complex scalars - { # Common - 'decl': '\t#ctype# #varname#;', - 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check': iscomplex - }, - # String - { # Common - 'decl': ['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - ], - 'need':['len..'], - '_check':isstring - }, - # Array - { # Common - 'decl': ['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - ], - 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], - '_check': isarray - }, - # Scalararray - { # Common - '_check': l_and(isarray, l_not(iscomplexarray)) - }, { # Not hidden - '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) - }, - # Integer*1 array - {'need': '#ctype#', - '_check': isint1array, - '_depend': '' - }, - # Integer*-1 array - {'need': '#ctype#', - '_check': isunsigned_chararray, - '_depend': '' - }, - # Integer*-2 array - {'need': '#ctype#', - '_check': isunsigned_shortarray, - '_depend': '' - }, - # Integer*-8 array - {'need': '#ctype#', - '_check': isunsigned_long_longarray, - '_depend': '' - }, - # Complexarray - {'need': '#ctype#', - '_check': iscomplexarray, - '_depend': '' - }, - # Stringarray - { - 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, - 'need': 'string', - '_check': isstringarray - } -] - -arg_rules = [ - { - 'separatorsfor': sepdict - }, - { # Common - 'frompyobj': ['\t/* Processing variable #varname# */', - {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', - '_depend': '', - 'need': typedef_need_dict, - }, - # Doc signatures - { - 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, - 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, - 'docstrout': {isintent_out: '#pydocsignout#'}, - 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote, isintent_hide): '--- #note#', - l_and(hasnote, isintent_nothide): '--- See above.'}]}, - 'depend': '' - }, - # Required/Optional arguments - { - 'kwlist': '"#varname#",', - 'docsign': '#varname#,', - '_check': l_and(isintent_nothide, l_not(isoptional)) - }, - { - 'kwlistopt': '"#varname#",', - 'docsignopt': '#varname#=#showinit#,', - 'docsignoptshort': '#varname#,', - '_check': l_and(isintent_nothide, isoptional) - }, - # Docstring/BuildValue - { - 'docreturn': '#outvarname#,', - 'returnformat': '#varrformat#', - '_check': isintent_out - }, - # Externals (call-back functions) - { # Common - 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, - 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, - 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, - 'docstrcbs': '#cbdocstr#', - 'latexdocstrcbs': '\\item[] #cblatexdocstr#', - 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, - 'decl': ['\tPyObject *#varname#_capi = Py_None;', - '\tPyTupleObject *#varname#_xa_capi = NULL;', - '\tPyTupleObject *#varname#_args_capi = NULL;', - '\tint #varname#_nofargs_capi = 0;', - {l_not(isintent_callback): - '\t#cbname#_typedef #varname#_cptr;'} - ], - 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'xaformat': {isintent_nothide: 'O!'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', - 'setjmpbuf': '(setjmp(#cbname#_jmpbuf))', - 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, - 'need': ['#cbname#', 'setjmp.h'], - '_check':isexternal - }, - { - 'frompyobj': [{l_not(isintent_callback): """\ -if(F2PyCapsule_Check(#varname#_capi)) { - #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi); -} else { - #varname#_cptr = #cbname#; -} -"""}, {isintent_callback: """\ -if (#varname#_capi==Py_None) { - #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); - if (#varname#_capi) { - if (#varname#_xa_capi==NULL) { - if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { - PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); - if (capi_tmp) { - #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); - Py_DECREF(capi_tmp); - } - else { - #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); - } - if (#varname#_xa_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); - return NULL; - } - } - } - } - if (#varname#_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); - return NULL; - } -} -"""}, - """\ -\t#varname#_nofargs_capi = #cbname#_nofargs; -\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) { -\t\tjmp_buf #varname#_jmpbuf;""", - {debugcapi: ["""\ -\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs); -\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", - {l_not(isintent_callback): """\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, - """\ -\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\"); -\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject); -\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject); -\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""", - ], - 'cleanupfrompyobj': - """\ -\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\"); -\t\t#cbname#_capi = #varname#_capi; -\t\tPy_DECREF(#cbname#_args_capi); -\t\t#cbname#_args_capi = #varname#_args_capi; -\t\t#cbname#_nofargs = #varname#_nofargs_capi; -\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); -\t}""", - 'need': ['SWAP', 'create_cb_arglist'], - '_check':isexternal, - '_depend':'' - }, - # Scalars (not complex) - { # Common - 'decl': '\t#ctype# #varname# = 0;', - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, - 'return': {isintent_out: ',#varname#'}, - '_check': l_and(isscalar, l_not(iscomplex)) - }, { - 'need': {hasinitvalue: 'math.h'}, - '_check': l_and(isscalar, l_not(iscomplex)), - }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'pyobjfrom': {isintent_inout: """\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\tif (f2py_success) {"""}, - 'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, - '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide) - }, { - 'frompyobj': [ - # hasinitvalue... - # if pyobj is None: - # varname = init - # else - # from_pyobj(varname) - # - # isoptional and noinitvalue... - # if pyobj is not None: - # from_pyobj(varname) - # else: - # varname is uninitialized - # - # ... - # from_pyobj(varname) - # - {hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else', - '_depend': ''}, - {l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)', - '_depend': ''}, - {l_not(islogical): '''\ -\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); -\tif (f2py_success) {'''}, - {islogical: '''\ -\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); -\t\tf2py_success = 1; -\tif (f2py_success) {'''}, - ], - 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/', - 'need': {l_not(islogical): '#ctype#_from_pyobj'}, - '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), - '_depend': '' - }, { # Hidden - 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, - 'need': typedef_need_dict, - '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), - '_depend': '' - }, { # Common - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - '_check': l_and(isscalar, l_not(iscomplex)), - '_depend': '' - }, - # Complex scalars - { # Common - 'decl': '\t#ctype# #varname#;', - 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - 'return': {isintent_out: ',#varname#_capi'}, - '_check': iscomplex - }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, - 'pyobjfrom': {isintent_inout: """\ -\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\t\tif (f2py_success) {"""}, - 'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - '_check': l_and(iscomplex, isintent_nothide) - }, { - 'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, - {l_and(isoptional, l_not(hasinitvalue)) - : '\tif (#varname#_capi != Py_None)'}, - '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n\tif (f2py_success) {'], - 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/', - 'need': ['#ctype#_from_pyobj'], - '_check': l_and(iscomplex, isintent_nothide), - '_depend': '' - }, { # Hidden - 'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'}, - '_check': l_and(iscomplex, isintent_hide) - }, { - 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check': l_and(iscomplex, isintent_hide), - '_depend': '' - }, { # Common - 'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, - 'need': ['pyobj_from_#ctype#1'], - '_check': iscomplex - }, { - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - '_check': iscomplex, - '_depend': '' - }, - # String - { # Common - 'decl': ['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - '\tPyObject *#varname#_capi = Py_None;'], - 'callfortran':'#varname#,', - 'callfortranappend':'slen(#varname#),', - 'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, - 'return': {isintent_out: ',#varname#'}, - 'need': ['len..'], # 'STRINGFREE'], - '_check':isstring - }, { # Common - 'frompyobj': """\ -\tslen(#varname#) = #length#; -\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); -\tif (f2py_success) {""", - 'cleanupfrompyobj': """\ -\t\tSTRINGFREE(#varname#); -\t} /*if (f2py_success) of #varname#*/""", - 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'], - '_check':isstring, - '_depend':'' - }, { # Not hidden - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'pyobjfrom': {isintent_inout: '''\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); -\tif (f2py_success) {'''}, - 'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, - 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, - '_check': l_and(isstring, isintent_nothide) - }, { # Hidden - '_check': l_and(isstring, isintent_hide) - }, { - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, - '_check': isstring, - '_depend': '' - }, - # Array - { # Common - 'decl': ['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - '\tPyArrayObject *capi_#varname#_tmp = NULL;', - '\tint capi_#varname#_intent = 0;', - ], - 'callfortran':'#varname#,', - 'return':{isintent_out: ',capi_#varname#_tmp'}, - 'need': 'len..', - '_check': isarray - }, { # intent(overwrite) array - 'decl': '\tint capi_overwrite_#varname# = 1;', - 'kwlistxa': '"overwrite_#varname#",', - 'xaformat': 'i', - 'keys_xa': ',&capi_overwrite_#varname#', - 'docsignxa': 'overwrite_#varname#=1,', - 'docsignxashort': 'overwrite_#varname#,', - 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', - '_check': l_and(isarray, isintent_overwrite), - }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check': l_and(isarray, isintent_overwrite), - '_depend': '', - }, - { # intent(copy) array - 'decl': '\tint capi_overwrite_#varname# = 0;', - 'kwlistxa': '"overwrite_#varname#",', - 'xaformat': 'i', - 'keys_xa': ',&capi_overwrite_#varname#', - 'docsignxa': 'overwrite_#varname#=0,', - 'docsignxashort': 'overwrite_#varname#,', - 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', - '_check': l_and(isarray, isintent_copy), - }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check': l_and(isarray, isintent_copy), - '_depend': '', - }, { - 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], - '_check': isarray, - '_depend': '' - }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - '_check': l_and(isarray, isintent_nothide) - }, { - 'frompyobj': ['\t#setdims#;', - '\tcapi_#varname#_intent |= #intent#;', - {isintent_hide: - '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, - {isintent_nothide: - '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, - """\ -\tif (capi_#varname#_tmp == NULL) { -\t\tif (!PyErr_Occurred()) -\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); -\t} else { -\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp)); -""", - {hasinitvalue: [ - {isintent_nothide: - '\tif (#varname#_capi == Py_None) {'}, - {isintent_hide: '\t{'}, - {iscomplexarray: '\t\t#ctype# capi_c;'}, - """\ -\t\tint *_i,capi_i=0; -\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); -\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) { -\t\t\twhile ((_i = nextforcomb())) -\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ -\t\t} else { -\t\t\tif (!PyErr_Occurred()) -\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); -\t\t\tf2py_success = 0; -\t\t} -\t} -\tif (f2py_success) {"""]}, - ], - 'cleanupfrompyobj': [ # note that this list will be reversed - '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', - {l_not(l_or(isintent_out, isintent_hide)): """\ -\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { -\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, - {l_and(isintent_hide, l_not(isintent_out)) - : """\t\tPy_XDECREF(capi_#varname#_tmp);"""}, - {hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'}, - ], - '_check': isarray, - '_depend': '' - }, - # Scalararray - { # Common - '_check': l_and(isarray, l_not(iscomplexarray)) - }, { # Not hidden - '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) - }, - # Integer*1 array - {'need': '#ctype#', - '_check': isint1array, - '_depend': '' - }, - # Integer*-1 array - {'need': '#ctype#', - '_check': isunsigned_chararray, - '_depend': '' - }, - # Integer*-2 array - {'need': '#ctype#', - '_check': isunsigned_shortarray, - '_depend': '' - }, - # Integer*-8 array - {'need': '#ctype#', - '_check': isunsigned_long_longarray, - '_depend': '' - }, - # Complexarray - {'need': '#ctype#', - '_check': iscomplexarray, - '_depend': '' - }, - # Stringarray - { - 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, - 'need': 'string', - '_check': isstringarray - } -] - -################# Rules for checking ############### - -check_rules = [ - { - 'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, - 'need': 'len..' - }, { - 'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/', - 'need': 'CHECKSCALAR', - '_check': l_and(isscalar, l_not(iscomplex)), - '_break': '' - }, { - 'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/', - 'need': 'CHECKSTRING', - '_check': isstring, - '_break': '' - }, { - 'need': 'CHECKARRAY', - 'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/', - '_check': isarray, - '_break': '' - }, { - 'need': 'CHECKGENERIC', - 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', - } -] - -########## Applying the rules. No need to modify what follows ############# - -#################### Build C/API module ####################### - - -def buildmodule(m, um): - """ - Return - """ - global f2py_version, options - outmess('\tBuilding module "%s"...\n' % (m['name'])) - ret = {} - mod_rules = defmod_rules[:] - vrd = capi_maps.modsign2map(m) - rd = dictappend({'f2py_version': f2py_version}, vrd) - funcwrappers = [] - funcwrappers2 = [] # F90 codes - for n in m['interfaced']: - nb = None - for bi in m['body']: - if not bi['block'] == 'interface': - errmess('buildmodule: Expected interface block. Skipping.\n') - continue - for b in bi['body']: - if b['name'] == n: - nb = b - break - - if not nb: - errmess( - 'buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n' % (n)) - continue - nb_list = [nb] - if 'entry' in nb: - for k, a in nb['entry'].items(): - nb1 = copy.deepcopy(nb) - del nb1['entry'] - nb1['name'] = k - nb1['args'] = a - nb_list.append(nb1) - for nb in nb_list: - api, wrap = buildapi(nb) - if wrap: - if ismoduleroutine(nb): - funcwrappers2.append(wrap) - else: - funcwrappers.append(wrap) - ar = applyrules(api, vrd) - rd = dictappend(rd, ar) - - # Construct COMMON block support - cr, wrap = common_rules.buildhooks(m) - if wrap: - funcwrappers.append(wrap) - ar = applyrules(cr, vrd) - rd = dictappend(rd, ar) - - # Construct F90 module support - mr, wrap = f90mod_rules.buildhooks(m) - if wrap: - funcwrappers2.append(wrap) - ar = applyrules(mr, vrd) - rd = dictappend(rd, ar) - - for u in um: - ar = use_rules.buildusevars(u, m['use'][u['name']]) - rd = dictappend(rd, ar) - - needs = cfuncs.get_needs() - code = {} - for n in needs.keys(): - code[n] = [] - for k in needs[n]: - c = '' - if k in cfuncs.includes0: - c = cfuncs.includes0[k] - elif k in cfuncs.includes: - c = cfuncs.includes[k] - elif k in cfuncs.userincludes: - c = cfuncs.userincludes[k] - elif k in cfuncs.typedefs: - c = cfuncs.typedefs[k] - elif k in cfuncs.typedefs_generated: - c = cfuncs.typedefs_generated[k] - elif k in cfuncs.cppmacros: - c = cfuncs.cppmacros[k] - elif k in cfuncs.cfuncs: - c = cfuncs.cfuncs[k] - elif k in cfuncs.callbacks: - c = cfuncs.callbacks[k] - elif k in cfuncs.f90modhooks: - c = cfuncs.f90modhooks[k] - elif k in cfuncs.commonhooks: - c = cfuncs.commonhooks[k] - else: - errmess('buildmodule: unknown need %s.\n' % (repr(k))) - continue - code[n].append(c) - mod_rules.append(code) - for r in mod_rules: - if ('_check' in r and r['_check'](m)) or ('_check' not in r): - ar = applyrules(r, vrd, m) - rd = dictappend(rd, ar) - ar = applyrules(module_rules, rd) - - fn = os.path.join(options['buildpath'], vrd['coutput']) - ret['csrc'] = fn - with open(fn, 'w') as f: - f.write(ar['modulebody'].replace('\t', 2 * ' ')) - outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) - - if options['dorestdoc']: - fn = os.path.join( - options['buildpath'], vrd['modulename'] + 'module.rest') - with open(fn, 'w') as f: - f.write('.. -*- rest -*-\n') - f.write('\n'.join(ar['restdoc'])) - outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' % - (options['buildpath'], vrd['modulename'])) - if options['dolatexdoc']: - fn = os.path.join( - options['buildpath'], vrd['modulename'] + 'module.tex') - ret['ltx'] = fn - with open(fn, 'w') as f: - f.write( - '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) - if 'shortlatex' not in options: - f.write( - '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') - f.write('\n'.join(ar['latexdoc'])) - if 'shortlatex' not in options: - f.write('\\end{document}') - outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' % - (options['buildpath'], vrd['modulename'])) - if funcwrappers: - wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) - ret['fsrc'] = wn - with open(wn, 'w') as f: - f.write('C -*- fortran -*-\n') - f.write( - 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) - f.write( - 'C It contains Fortran 77 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): - if l and l[0] == ' ': - while len(l) >= 66: - lines.append(l[:66] + '\n &') - l = l[66:] - lines.append(l + '\n') - else: - lines.append(l + '\n') - lines = ''.join(lines).replace('\n &\n', '\n') - f.write(lines) - outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn)) - if funcwrappers2: - wn = os.path.join( - options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) - ret['fsrc'] = wn - with open(wn, 'w') as f: - f.write('! -*- f90 -*-\n') - f.write( - '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) - f.write( - '! It contains Fortran 90 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): - if len(l) > 72 and l[0] == ' ': - lines.append(l[:72] + '&\n &') - l = l[72:] - while len(l) > 66: - lines.append(l[:66] + '&\n &') - l = l[66:] - lines.append(l + '\n') - else: - lines.append(l + '\n') - lines = ''.join(lines).replace('\n &\n', '\n') - f.write(lines) - outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn)) - return ret - -################## Build C/API function ############# - -stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', - 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} - - -def buildapi(rout): - rout, wrap = func2subr.assubr(rout) - args, depargs = getargs2(rout) - capi_maps.depargs = depargs - var = rout['vars'] - - if ismoduleroutine(rout): - outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' % - (rout['modulename'], rout['name'])) - else: - outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name'])) - # Routine - vrd = capi_maps.routsign2map(rout) - rd = dictappend({}, vrd) - for r in rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar = applyrules(r, vrd, rout) - rd = dictappend(rd, ar) - - # Args - nth, nthk = 0, 0 - savevrd = {} - for a in args: - vrd = capi_maps.sign2map(a, var[a]) - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - if not isintent_hide(var[a]): - if not isoptional(var[a]): - nth = nth + 1 - vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' - else: - nthk = nthk + 1 - vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' - else: - vrd['nth'] = 'hidden' - savevrd[a] = vrd - for r in _rules: - if '_depend' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - for a in depargs: - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - vrd = savevrd[a] - for r in _rules: - if '_depend' not in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - if 'check' in var[a]: - for c in var[a]['check']: - vrd['check'] = c - ar = applyrules(check_rules, vrd, var[a]) - rd = dictappend(rd, ar) - if isinstance(rd['cleanupfrompyobj'], list): - rd['cleanupfrompyobj'].reverse() - if isinstance(rd['closepyobjfrom'], list): - rd['closepyobjfrom'].reverse() - rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', - {'docsign': rd['docsign'], - 'docsignopt': rd['docsignopt'], - 'docsignxa': rd['docsignxa']})) - optargs = stripcomma(replace('#docsignopt##docsignxa#', - {'docsignxa': rd['docsignxashort'], - 'docsignopt': rd['docsignoptshort']} - )) - if optargs == '': - rd['docsignatureshort'] = stripcomma( - replace('#docsign#', {'docsign': rd['docsign']})) - else: - rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', - {'docsign': rd['docsign'], - 'docsignopt': optargs, - }) - rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') - rd['latexdocsignatureshort'] = rd[ - 'latexdocsignatureshort'].replace(',', ', ') - cfs = stripcomma(replace('#callfortran##callfortranappend#', { - 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) - if len(rd['callfortranappend']) > 1: - rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { - 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) - else: - rd['callcompaqfortran'] = cfs - rd['callfortran'] = cfs - if isinstance(rd['docreturn'], list): - rd['docreturn'] = stripcomma( - replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' - rd['docstrsigns'] = [] - rd['latexdocstrsigns'] = [] - for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: - if k in rd and isinstance(rd[k], list): - rd['docstrsigns'] = rd['docstrsigns'] + rd[k] - k = 'latex' + k - if k in rd and isinstance(rd[k], list): - rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ - ['\\begin{description}'] + rd[k][1:] +\ - ['\\end{description}'] - - # Workaround for Python 2.6, 2.6.1 bug: https://bugs.python.org/issue4720 - if rd['keyformat'] or rd['xaformat']: - argformat = rd['argformat'] - if isinstance(argformat, list): - argformat.append('|') - else: - assert isinstance(argformat, str), repr( - (argformat, type(argformat))) - rd['argformat'] += '|' - - ar = applyrules(routine_rules, rd) - if ismoduleroutine(rout): - outmess('\t\t\t %s\n' % (ar['docshort'])) - else: - outmess('\t\t %s\n' % (ar['docshort'])) - return ar, wrap - - -#################### EOF rules.py ####################### diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/setup.py b/venv/lib/python3.7/site-packages/numpy/f2py/setup.py deleted file mode 100644 index a8c1401..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/setup.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -""" -setup.py for installing F2PY - -Usage: - pip install . - -Copyright 2001-2005 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Revision: 1.32 $ -$Date: 2005/01/30 17:22:14 $ -Pearu Peterson - -""" -from __future__ import division, print_function - -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration - - -from __version__ import version - - -def configuration(parent_package='', top_path=None): - config = Configuration('f2py', parent_package, top_path) - config.add_data_dir('tests') - config.add_data_files( - 'src/fortranobject.c', - 'src/fortranobject.h') - return config - - -if __name__ == "__main__": - - config = configuration(top_path='') - config = config.todict() - - config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ - "/F2PY-2-latest.tar.gz" - config['classifiers'] = [ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: NumPy License', - 'Natural Language :: English', - 'Operating System :: OS Independent', - 'Programming Language :: C', - 'Programming Language :: Fortran', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development :: Code Generators', - ] - setup(version=version, - description="F2PY - Fortran to Python Interface Generator", - author="Pearu Peterson", - author_email="pearu@cens.ioc.ee", - maintainer="Pearu Peterson", - maintainer_email="pearu@cens.ioc.ee", - license="BSD", - platforms="Unix, Windows (mingw|cygwin), Mac OSX", - long_description="""\ -The Fortran to Python Interface Generator, or F2PY for short, is a -command line tool (f2py) for generating Python C/API modules for -wrapping Fortran 77/90/95 subroutines, accessing common blocks from -Python, and calling Python functions from Fortran (call-backs). -Interfacing subroutines/data from Fortran 90/95 modules is supported.""", - url="http://cens.ioc.ee/projects/f2py2e/", - keywords=['Fortran', 'f2py'], - **config) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/src/fortranobject.c b/venv/lib/python3.7/site-packages/numpy/f2py/src/fortranobject.c deleted file mode 100644 index 8aa5555..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/src/fortranobject.c +++ /dev/null @@ -1,1109 +0,0 @@ -#define FORTRANOBJECT_C -#include "fortranobject.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -/* - This file implements: FortranObject, array_from_pyobj, copy_ND_array - - Author: Pearu Peterson - $Revision: 1.52 $ - $Date: 2005/07/11 07:44:20 $ -*/ - -int -F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) -{ - if (obj==NULL) { - fprintf(stderr, "Error loading %s\n", name); - if (PyErr_Occurred()) { - PyErr_Print(); - PyErr_Clear(); - } - return -1; - } - return PyDict_SetItemString(dict, name, obj); -} - -/************************* FortranObject *******************************/ - -typedef PyObject *(*fortranfunc)(PyObject *,PyObject *,PyObject *,void *); - -PyObject * -PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { - int i; - PyFortranObject *fp = NULL; - PyObject *v = NULL; - if (init!=NULL) { /* Initialize F90 module objects */ - (*(init))(); - } - fp = PyObject_New(PyFortranObject, &PyFortran_Type); - if (fp == NULL) { - return NULL; - } - if ((fp->dict = PyDict_New()) == NULL) { - Py_DECREF(fp); - return NULL; - } - fp->len = 0; - while (defs[fp->len].name != NULL) { - fp->len++; - } - if (fp->len == 0) { - goto fail; - } - fp->defs = defs; - for (i=0;ilen;i++) { - if (fp->defs[i].rank == -1) { /* Is Fortran routine */ - v = PyFortranObject_NewAsAttr(&(fp->defs[i])); - if (v==NULL) { - goto fail; - } - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); - Py_XDECREF(v); - } else - if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */ - if (fp->defs[i].type == NPY_STRING) { - int n = fp->defs[i].rank-1; - v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d, - NPY_STRING, NULL, fp->defs[i].data, fp->defs[i].dims.d[n], - NPY_ARRAY_FARRAY, NULL); - } - else { - v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, - NULL); - } - if (v==NULL) { - goto fail; - } - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); - Py_XDECREF(v); - } - } - return (PyObject *)fp; - fail: - Py_XDECREF(fp); - return NULL; -} - -PyObject * -PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module routines */ - PyFortranObject *fp = NULL; - fp = PyObject_New(PyFortranObject, &PyFortran_Type); - if (fp == NULL) return NULL; - if ((fp->dict = PyDict_New())==NULL) { - PyObject_Del(fp); - return NULL; - } - fp->len = 1; - fp->defs = defs; - return (PyObject *)fp; -} - -/* Fortran methods */ - -static void -fortran_dealloc(PyFortranObject *fp) { - Py_XDECREF(fp->dict); - PyObject_Del(fp); -} - - -#if PY_VERSION_HEX >= 0x03000000 -#else -static PyMethodDef fortran_methods[] = { - {NULL, NULL} /* sentinel */ -}; -#endif - - -/* Returns number of bytes consumed from buf, or -1 on error. */ -static Py_ssize_t -format_def(char *buf, Py_ssize_t size, FortranDataDef def) -{ - char *p = buf; - int i, n; - - n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); - if (n < 0 || n >= size) { - return -1; - } - p += n; - size -= n; - - for (i = 1; i < def.rank; i++) { - n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); - if (n < 0 || n >= size) { - return -1; - } - p += n; - size -= n; - } - - if (size <= 0) { - return -1; - } - - *p++ = ')'; - size--; - - if (def.data == NULL) { - static const char notalloc[] = ", not allocated"; - if ((size_t) size < sizeof(notalloc)) { - return -1; - } - memcpy(p, notalloc, sizeof(notalloc)); - } - - return p - buf; -} - -static PyObject * -fortran_doc(FortranDataDef def) -{ - char *buf, *p; - PyObject *s = NULL; - Py_ssize_t n, origsize, size = 100; - - if (def.doc != NULL) { - size += strlen(def.doc); - } - origsize = size; - buf = p = (char *)PyMem_Malloc(size); - if (buf == NULL) { - return PyErr_NoMemory(); - } - - if (def.rank == -1) { - if (def.doc) { - n = strlen(def.doc); - if (n > size) { - goto fail; - } - memcpy(p, def.doc, n); - p += n; - size -= n; - } - else { - n = PyOS_snprintf(p, size, "%s - no docs available", def.name); - if (n < 0 || n >= size) { - goto fail; - } - p += n; - size -= n; - } - } - else { - PyArray_Descr *d = PyArray_DescrFromType(def.type); - n = PyOS_snprintf(p, size, "'%c'-", d->type); - Py_DECREF(d); - if (n < 0 || n >= size) { - goto fail; - } - p += n; - size -= n; - - if (def.data == NULL) { - n = format_def(p, size, def) == -1; - if (n < 0) { - goto fail; - } - p += n; - size -= n; - } - else if (def.rank > 0) { - n = format_def(p, size, def); - if (n < 0) { - goto fail; - } - p += n; - size -= n; - } - else { - n = strlen("scalar"); - if (size < n) { - goto fail; - } - memcpy(p, "scalar", n); - p += n; - size -= n; - } - } - if (size <= 1) { - goto fail; - } - *p++ = '\n'; - size--; - - /* p now points one beyond the last character of the string in buf */ -#if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromStringAndSize(buf, p - buf); -#else - s = PyString_FromStringAndSize(buf, p - buf); -#endif - - PyMem_Free(buf); - return s; - - fail: - fprintf(stderr, "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" - " too long docstring required, increase size\n", - p - buf, origsize); - PyMem_Free(buf); - return NULL; -} - -static FortranDataDef *save_def; /* save pointer of an allocatable array */ -static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ - if (*f) /* In fortran f=allocated(d) */ - save_def->data = d; - else - save_def->data = NULL; - /* printf("set_data: d=%p,f=%d\n",d,*f); */ -} - -static PyObject * -fortran_getattr(PyFortranObject *fp, char *name) { - int i,j,k,flag; - if (fp->dict != NULL) { - PyObject *v = PyDict_GetItemString(fp->dict, name); - if (v != NULL) { - Py_INCREF(v); - return v; - } - } - for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) - if (fp->defs[i].rank!=-1) { /* F90 allocatable array */ - if (fp->defs[i].func==NULL) return NULL; - for(k=0;kdefs[i].rank;++k) - fp->defs[i].dims.d[k]=-1; - save_def = &fp->defs[i]; - (*(fp->defs[i].func))(&fp->defs[i].rank,fp->defs[i].dims.d,set_data,&flag); - if (flag==2) - k = fp->defs[i].rank + 1; - else - k = fp->defs[i].rank; - if (fp->defs[i].data !=NULL) { /* array is allocated */ - PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, - NULL); - if (v==NULL) return NULL; - /* Py_INCREF(v); */ - return v; - } else { /* array is not allocated */ - Py_RETURN_NONE; - } - } - if (strcmp(name,"__dict__")==0) { - Py_INCREF(fp->dict); - return fp->dict; - } - if (strcmp(name,"__doc__")==0) { -#if PY_VERSION_HEX >= 0x03000000 - PyObject *s = PyUnicode_FromString(""), *s2, *s3; - for (i=0;ilen;i++) { - s2 = fortran_doc(fp->defs[i]); - s3 = PyUnicode_Concat(s, s2); - Py_DECREF(s2); - Py_DECREF(s); - s = s3; - } -#else - PyObject *s = PyString_FromString(""); - for (i=0;ilen;i++) - PyString_ConcatAndDel(&s,fortran_doc(fp->defs[i])); -#endif - if (PyDict_SetItemString(fp->dict, name, s)) - return NULL; - return s; - } - if ((strcmp(name,"_cpointer")==0) && (fp->len==1)) { - PyObject *cobj = F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data),NULL); - if (PyDict_SetItemString(fp->dict, name, cobj)) - return NULL; - return cobj; - } -#if PY_VERSION_HEX >= 0x03000000 - if (1) { - PyObject *str, *ret; - str = PyUnicode_FromString(name); - ret = PyObject_GenericGetAttr((PyObject *)fp, str); - Py_DECREF(str); - return ret; - } -#else - return Py_FindMethod(fortran_methods, (PyObject *)fp, name); -#endif -} - -static int -fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) { - int i,j,flag; - PyArrayObject *arr = NULL; - for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) { - if (fp->defs[i].rank==-1) { - PyErr_SetString(PyExc_AttributeError,"over-writing fortran routine"); - return -1; - } - if (fp->defs[i].func!=NULL) { /* is allocatable array */ - npy_intp dims[F2PY_MAX_DIMS]; - int k; - save_def = &fp->defs[i]; - if (v!=Py_None) { /* set new value (reallocate if needed -- - see f2py generated code for more - details ) */ - for(k=0;kdefs[i].rank;k++) dims[k]=-1; - if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) - return -1; - (*(fp->defs[i].func))(&fp->defs[i].rank,PyArray_DIMS(arr),set_data,&flag); - } else { /* deallocate */ - for(k=0;kdefs[i].rank;k++) dims[k]=0; - (*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag); - for(k=0;kdefs[i].rank;k++) dims[k]=-1; - } - memcpy(fp->defs[i].dims.d,dims,fp->defs[i].rank*sizeof(npy_intp)); - } else { /* not allocatable array */ - if ((arr = array_from_pyobj(fp->defs[i].type,fp->defs[i].dims.d,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) - return -1; - } - if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */ - npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,PyArray_NDIM(arr)); - if (s==-1) - s = PyArray_MultiplyList(PyArray_DIMS(arr),PyArray_NDIM(arr)); - if (s<0 || - (memcpy(fp->defs[i].data,PyArray_DATA(arr),s*PyArray_ITEMSIZE(arr)))==NULL) { - if ((PyObject*)arr!=v) { - Py_DECREF(arr); - } - return -1; - } - if ((PyObject*)arr!=v) { - Py_DECREF(arr); - } - } else return (fp->defs[i].func==NULL?-1:0); - return 0; /* successful */ - } - if (fp->dict == NULL) { - fp->dict = PyDict_New(); - if (fp->dict == NULL) - return -1; - } - if (v == NULL) { - int rv = PyDict_DelItemString(fp->dict, name); - if (rv < 0) - PyErr_SetString(PyExc_AttributeError,"delete non-existing fortran attribute"); - return rv; - } - else - return PyDict_SetItemString(fp->dict, name, v); -} - -static PyObject* -fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) { - int i = 0; - /* printf("fortran call - name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, - fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ - if (fp->defs[i].rank==-1) {/* is Fortran routine */ - if (fp->defs[i].func==NULL) { - PyErr_Format(PyExc_RuntimeError, "no function to call"); - return NULL; - } - else if (fp->defs[i].data==NULL) - /* dummy routine */ - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,NULL); - else - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw, - (void *)fp->defs[i].data); - } - PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); - return NULL; -} - -static PyObject * -fortran_repr(PyFortranObject *fp) -{ - PyObject *name = NULL, *repr = NULL; - name = PyObject_GetAttrString((PyObject *)fp, "__name__"); - PyErr_Clear(); -#if PY_VERSION_HEX >= 0x03000000 - if (name != NULL && PyUnicode_Check(name)) { - repr = PyUnicode_FromFormat("", name); - } - else { - repr = PyUnicode_FromString(""); - } -#else - if (name != NULL && PyString_Check(name)) { - repr = PyString_FromFormat("", PyString_AsString(name)); - } - else { - repr = PyString_FromString(""); - } -#endif - Py_XDECREF(name); - return repr; -} - - -PyTypeObject PyFortran_Type = { -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(0) - 0, /*ob_size*/ -#endif - "fortran", /*tp_name*/ - sizeof(PyFortranObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - /* methods */ - (destructor)fortran_dealloc, /*tp_dealloc*/ - 0, /*tp_print*/ - (getattrfunc)fortran_getattr, /*tp_getattr*/ - (setattrfunc)fortran_setattr, /*tp_setattr*/ - 0, /*tp_compare/tp_reserved*/ - (reprfunc)fortran_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - (ternaryfunc)fortran_call, /*tp_call*/ -}; - -/************************* f2py_report_atexit *******************************/ - -#ifdef F2PY_REPORT_ATEXIT -static int passed_time = 0; -static int passed_counter = 0; -static int passed_call_time = 0; -static struct timeb start_time; -static struct timeb stop_time; -static struct timeb start_call_time; -static struct timeb stop_call_time; -static int cb_passed_time = 0; -static int cb_passed_counter = 0; -static int cb_passed_call_time = 0; -static struct timeb cb_start_time; -static struct timeb cb_stop_time; -static struct timeb cb_start_call_time; -static struct timeb cb_stop_call_time; - -extern void f2py_start_clock(void) { ftime(&start_time); } -extern -void f2py_start_call_clock(void) { - f2py_stop_clock(); - ftime(&start_call_time); -} -extern -void f2py_stop_clock(void) { - ftime(&stop_time); - passed_time += 1000*(stop_time.time - start_time.time); - passed_time += stop_time.millitm - start_time.millitm; -} -extern -void f2py_stop_call_clock(void) { - ftime(&stop_call_time); - passed_call_time += 1000*(stop_call_time.time - start_call_time.time); - passed_call_time += stop_call_time.millitm - start_call_time.millitm; - passed_counter += 1; - f2py_start_clock(); -} - -extern void f2py_cb_start_clock(void) { ftime(&cb_start_time); } -extern -void f2py_cb_start_call_clock(void) { - f2py_cb_stop_clock(); - ftime(&cb_start_call_time); -} -extern -void f2py_cb_stop_clock(void) { - ftime(&cb_stop_time); - cb_passed_time += 1000*(cb_stop_time.time - cb_start_time.time); - cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; -} -extern -void f2py_cb_stop_call_clock(void) { - ftime(&cb_stop_call_time); - cb_passed_call_time += 1000*(cb_stop_call_time.time - cb_start_call_time.time); - cb_passed_call_time += cb_stop_call_time.millitm - cb_start_call_time.millitm; - cb_passed_counter += 1; - f2py_cb_start_clock(); -} - -static int f2py_report_on_exit_been_here = 0; -extern -void f2py_report_on_exit(int exit_flag,void *name) { - if (f2py_report_on_exit_been_here) { - fprintf(stderr," %s\n",(char*)name); - return; - } - f2py_report_on_exit_been_here = 1; - fprintf(stderr," /-----------------------\\\n"); - fprintf(stderr," < F2PY performance report >\n"); - fprintf(stderr," \\-----------------------/\n"); - fprintf(stderr,"Overall time spent in ...\n"); - fprintf(stderr,"(a) wrapped (Fortran/C) functions : %8d msec\n", - passed_call_time); - fprintf(stderr,"(b) f2py interface, %6d calls : %8d msec\n", - passed_counter,passed_time); - fprintf(stderr,"(c) call-back (Python) functions : %8d msec\n", - cb_passed_call_time); - fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n", - cb_passed_counter,cb_passed_time); - - fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", - passed_call_time-cb_passed_call_time-cb_passed_time); - fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); - fprintf(stderr,"Exit status: %d\n",exit_flag); - fprintf(stderr,"Modules : %s\n",(char*)name); -} -#endif - -/********************** report on array copy ****************************/ - -#ifdef F2PY_REPORT_ON_ARRAY_COPY -static void f2py_report_on_array_copy(PyArrayObject* arr) { - const npy_intp arr_size = PyArray_Size((PyObject *)arr); - if (arr_size>F2PY_REPORT_ON_ARRAY_COPY) { - fprintf(stderr,"copied an array: size=%ld, elsize=%"NPY_INTP_FMT"\n", - arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); - } -} -static void f2py_report_on_array_copy_fromany(void) { - fprintf(stderr,"created an array from object\n"); -} - -#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR f2py_report_on_array_copy((PyArrayObject *)arr) -#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() -#else -#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR -#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY -#endif - - -/************************* array_from_obj *******************************/ - -/* - * File: array_from_pyobj.c - * - * Description: - * ------------ - * Provides array_from_pyobj function that returns a contiguous array - * object with the given dimensions and required storage order, either - * in row-major (C) or column-major (Fortran) order. The function - * array_from_pyobj is very flexible about its Python object argument - * that can be any number, list, tuple, or array. - * - * array_from_pyobj is used in f2py generated Python extension - * modules. - * - * Author: Pearu Peterson - * Created: 13-16 January 2002 - * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ - */ - -static int check_and_fix_dimensions(const PyArrayObject* arr, - const int rank, - npy_intp *dims); - -static int -count_negative_dimensions(const int rank, - const npy_intp *dims) { - int i=0,r=0; - while (iflags,size); - printf("\tstrides = "); - dump_dims(rank,arr->strides); - printf("\tdimensions = "); - dump_dims(rank,arr->dimensions); -} -#endif - -#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; } - -static int swap_arrays(PyArrayObject* obj1, PyArrayObject* obj2) { - PyArrayObject_fields *arr1 = (PyArrayObject_fields*) obj1, - *arr2 = (PyArrayObject_fields*) obj2; - SWAPTYPE(arr1->data,arr2->data,char*); - SWAPTYPE(arr1->nd,arr2->nd,int); - SWAPTYPE(arr1->dimensions,arr2->dimensions,npy_intp*); - SWAPTYPE(arr1->strides,arr2->strides,npy_intp*); - SWAPTYPE(arr1->base,arr2->base,PyObject*); - SWAPTYPE(arr1->descr,arr2->descr,PyArray_Descr*); - SWAPTYPE(arr1->flags,arr2->flags,int); - /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ - return 0; -} - -#define ARRAY_ISCOMPATIBLE(arr,type_num) \ - ( (PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) \ - ||(PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) \ - ||(PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) \ - ||(PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) \ - ) - -extern -PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj) { - /* - * Note about reference counting - * ----------------------------- - * If the caller returns the array to Python, it must be done with - * Py_BuildValue("N",arr). - * Otherwise, if obj!=arr then the caller must call Py_DECREF(arr). - * - * Note on intent(cache,out,..) - * --------------------- - * Don't expect correct data when returning intent(cache) array. - * - */ - char mess[200]; - PyArrayObject *arr = NULL; - PyArray_Descr *descr; - char typechar; - int elsize; - - if ((intent & F2PY_INTENT_HIDE) - || ((intent & F2PY_INTENT_CACHE) && (obj==Py_None)) - || ((intent & F2PY_OPTIONAL) && (obj==Py_None)) - ) { - /* intent(cache), optional, intent(hide) */ - if (count_negative_dimensions(rank,dims) > 0) { - int i; - strcpy(mess, "failed to create intent(cache|hide)|optional array" - "-- must have defined dimensions but got ("); - for(i=0;ielsize = 1; - descr->type = NPY_CHARLTR; - } - elsize = descr->elsize; - typechar = descr->type; - Py_DECREF(descr); - if (PyArray_Check(obj)) { - arr = (PyArrayObject *)obj; - - if (intent & F2PY_INTENT_CACHE) { - /* intent(cache) */ - if (PyArray_ISONESEGMENT(arr) - && PyArray_ITEMSIZE(arr)>=elsize) { - if (check_and_fix_dimensions(arr, rank, dims)) { - return NULL; - } - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - return arr; - } - strcpy(mess, "failed to initialize intent(cache) array"); - if (!PyArray_ISONESEGMENT(arr)) - strcat(mess, " -- input must be in one segment"); - if (PyArray_ITEMSIZE(arr)type,typechar); - if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) - sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); - PyErr_SetString(PyExc_ValueError,mess); - return NULL; - } - - /* here we have always intent(in) or intent(inplace) */ - - { - PyArrayObject * retarr; - retarr = (PyArrayObject *) \ - PyArray_New(&PyArray_Type, PyArray_NDIM(arr), PyArray_DIMS(arr), type_num, - NULL,NULL,1, - !(intent&F2PY_INTENT_C), - NULL); - if (retarr==NULL) - return NULL; - F2PY_REPORT_ON_ARRAY_COPY_FROMARR; - if (PyArray_CopyInto(retarr, arr)) { - Py_DECREF(retarr); - return NULL; - } - if (intent & F2PY_INTENT_INPLACE) { - if (swap_arrays(arr,retarr)) - return NULL; /* XXX: set exception */ - Py_XDECREF(retarr); - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - } else { - arr = retarr; - } - } - return arr; - } - - if ((intent & F2PY_INTENT_INOUT) || - (intent & F2PY_INTENT_INPLACE) || - (intent & F2PY_INTENT_CACHE)) { - PyErr_SetString(PyExc_TypeError, - "failed to initialize intent(inout|inplace|cache) " - "array, input not an array"); - return NULL; - } - - { - PyArray_Descr * descr = PyArray_DescrFromType(type_num); - /* compatibility with NPY_CHAR */ - if (type_num == NPY_STRING) { - PyArray_DESCR_REPLACE(descr); - if (descr == NULL) { - return NULL; - } - descr->elsize = 1; - descr->type = NPY_CHARLTR; - } - F2PY_REPORT_ON_ARRAY_COPY_FROMANY; - arr = (PyArrayObject *) \ - PyArray_FromAny(obj, descr, 0,0, - ((intent & F2PY_INTENT_C)?NPY_ARRAY_CARRAY:NPY_ARRAY_FARRAY) \ - | NPY_ARRAY_FORCECAST, NULL); - if (arr==NULL) - return NULL; - if (check_and_fix_dimensions(arr, rank, dims)) { - return NULL; - } - return arr; - } - -} - -/*****************************************/ -/* Helper functions for array_from_pyobj */ -/*****************************************/ - -static -int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims) -{ - /* - * This function fills in blanks (that are -1's) in dims list using - * the dimensions from arr. It also checks that non-blank dims will - * match with the corresponding values in arr dimensions. - * - * Returns 0 if the function is successful. - * - * If an error condition is detected, an exception is set and 1 is returned. - */ - const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1; -#ifdef DEBUG_COPY_ND_ARRAY - dump_attrs(arr); - printf("check_and_fix_dimensions:init: dims="); - dump_dims(rank,dims); -#endif - if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ - npy_intp new_size = 1; - int free_axe = -1; - int i; - npy_intp d; - /* Fill dims where -1 or 0; check dimensions; calc new_size; */ - for(i=0;i= 0) { - if (d>1 && dims[i]!=d) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", - i, dims[i], d); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else { - dims[i] = d ? d : 1; - } - new_size *= dims[i]; - } - for(i=PyArray_NDIM(arr);i1) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be %" NPY_INTP_FMT - " but got 0 (not defined).\n", - i, dims[i]); - return 1; - } else if (free_axe<0) - free_axe = i; - else - dims[i] = 1; - if (free_axe>=0) { - dims[free_axe] = arr_size/new_size; - new_size *= dims[free_axe]; - } - if (new_size != arr_size) { - PyErr_Format(PyExc_ValueError, - "unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT - " (maybe too many free indices)\n", - new_size, arr_size); - return 1; - } - } else if (rank==PyArray_NDIM(arr)) { - npy_intp new_size = 1; - int i; - npy_intp d; - for (i=0; i=0) { - if (d > 1 && d!=dims[i]) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", - i, dims[i], d); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else dims[i] = d; - new_size *= dims[i]; - } - if (new_size != arr_size) { - PyErr_Format(PyExc_ValueError, - "unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT "\n", - new_size, arr_size); - return 1; - } - } else { /* [[1,2]] -> [[1],[2]] */ - int i,j; - npy_intp d; - int effrank; - npy_intp size; - for (i=0,effrank=0;i1) ++effrank; - if (dims[rank-1]>=0) - if (effrank>rank) { - PyErr_Format(PyExc_ValueError, - "too many axes: %d (effrank=%d), " - "expected rank=%d\n", - PyArray_NDIM(arr), effrank, rank); - return 1; - } - - for (i=0,j=0;i=PyArray_NDIM(arr)) d = 1; - else d = PyArray_DIM(arr,j++); - if (dims[i]>=0) { - if (d>1 && d!=dims[i]) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT - " (real index=%d)\n", - i, dims[i], d, j-1); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else - dims[i] = d; - } - - for (i=rank;i [1,2,3,4] */ - while (j=PyArray_NDIM(arr)) d = 1; - else d = PyArray_DIM(arr,j++); - dims[rank-1] *= d; - } - for (i=0,size=1;i= 0x03000000 -#define PyString_Check PyBytes_Check -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_FromString PyBytes_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString - -#define PyInt_Check PyLong_Check -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsLong PyLong_AsLong - -#define PyNumber_Int PyNumber_Long - -#else - -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#endif - - -#ifdef F2PY_REPORT_ATEXIT -#include - extern void f2py_start_clock(void); - extern void f2py_stop_clock(void); - extern void f2py_start_call_clock(void); - extern void f2py_stop_call_clock(void); - extern void f2py_cb_start_clock(void); - extern void f2py_cb_stop_clock(void); - extern void f2py_cb_start_call_clock(void); - extern void f2py_cb_stop_call_clock(void); - extern void f2py_report_on_exit(int,void*); -#endif - -#ifdef DMALLOC -#include "dmalloc.h" -#endif - -/* Fortran object interface */ - -/* -123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 - -PyFortranObject represents various Fortran objects: -Fortran (module) routines, COMMON blocks, module data. - -Author: Pearu Peterson -*/ - -#define F2PY_MAX_DIMS 40 - -typedef void (*f2py_set_data_func)(char*,npy_intp*); -typedef void (*f2py_void_func)(void); -typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); - - /*typedef void* (*f2py_c_func)(void*,...);*/ - -typedef void *(*f2pycfunc)(void); - -typedef struct { - char *name; /* attribute (array||routine) name */ - int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ - struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ - int type; /* PyArray_ || not used */ - char *data; /* pointer to array || Fortran routine */ - f2py_init_func func; /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ - char *doc; /* documentation string; only recommended - for routines. */ -} FortranDataDef; - -typedef struct { - PyObject_HEAD - int len; /* Number of attributes */ - FortranDataDef *defs; /* An array of FortranDataDef's */ - PyObject *dict; /* Fortran object attribute dictionary */ -} PyFortranObject; - -#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) -#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) - - extern PyTypeObject PyFortran_Type; - extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); - extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); - extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); - -#if PY_VERSION_HEX >= 0x03000000 - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); -void * F2PyCapsule_AsVoidPtr(PyObject *obj); -int F2PyCapsule_Check(PyObject *ptr); - -#else - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)); -void * F2PyCapsule_AsVoidPtr(PyObject *ptr); -int F2PyCapsule_Check(PyObject *ptr); - -#endif - -#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) -#define F2PY_INTENT_IN 1 -#define F2PY_INTENT_INOUT 2 -#define F2PY_INTENT_OUT 4 -#define F2PY_INTENT_HIDE 8 -#define F2PY_INTENT_CACHE 16 -#define F2PY_INTENT_COPY 32 -#define F2PY_INTENT_C 64 -#define F2PY_OPTIONAL 128 -#define F2PY_INTENT_INPLACE 256 -#define F2PY_INTENT_ALIGNED4 512 -#define F2PY_INTENT_ALIGNED8 1024 -#define F2PY_INTENT_ALIGNED16 2048 - -#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) -#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) -#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) -#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) - -#define F2PY_GET_ALIGNMENT(intent) \ - (F2PY_ALIGN4(intent) ? 4 : \ - (F2PY_ALIGN8(intent) ? 8 : \ - (F2PY_ALIGN16(intent) ? 16 : 1) )) -#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) - - extern PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj); - extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); - -#ifdef DEBUG_COPY_ND_ARRAY - extern void dump_attrs(const PyArrayObject* arr); -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_FORTRANOBJECT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c deleted file mode 100644 index 978db4e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ /dev/null @@ -1,245 +0,0 @@ -/* File: wrapmodule.c - * This file is auto-generated with f2py (version:2_1330). - * Hand edited by Pearu. - * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, - * written by Pearu Peterson . - * See http://cens.ioc.ee/projects/f2py2e/ - * Generation date: Fri Oct 21 22:41:12 2005 - * $Revision:$ - * $Date:$ - * Do not edit this file directly unless you know what you are doing!!! - */ -#ifdef __cplusplus -extern "C" { -#endif - -/*********************** See f2py2e/cfuncs.py: includes ***********************/ -#include "Python.h" -#include "fortranobject.h" -#include - -static PyObject *wrap_error; -static PyObject *wrap_module; - -/************************************ call ************************************/ -static char doc_f2py_rout_wrap_call[] = "\ -Function signature:\n\ - arr = call(type_num,dims,intent,obj)\n\ -Required arguments:\n" -" type_num : input int\n" -" dims : input int-sequence\n" -" intent : input int\n" -" obj : input python object\n" -"Return objects:\n" -" arr : array"; -static PyObject *f2py_rout_wrap_call(PyObject *capi_self, - PyObject *capi_args) { - PyObject * volatile capi_buildvalue = NULL; - int type_num = 0; - npy_intp *dims = NULL; - PyObject *dims_capi = Py_None; - int rank = 0; - int intent = 0; - PyArrayObject *capi_arr_tmp = NULL; - PyObject *arr_capi = Py_None; - int i; - - if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\ - &type_num,&dims_capi,&intent,&arr_capi)) - return NULL; - rank = PySequence_Length(dims_capi); - dims = malloc(rank*sizeof(npy_intp)); - for (i=0;ikind, - PyArray_DESCR(arr)->type, - PyArray_TYPE(arr), - PyArray_ITEMSIZE(arr), - PyArray_DESCR(arr)->alignment, - PyArray_FLAGS(arr), - PyArray_ITEMSIZE(arr)); -} - -static PyMethodDef f2py_module_methods[] = { - - {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, - {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, - {NULL,NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "test_array_from_pyobj_ext", - NULL, - -1, - f2py_module_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define RETVAL m -PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { -#else -#define RETVAL -PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) { -#endif - PyObject *m,*d, *s; -#if PY_VERSION_HEX >= 0x03000000 - m = wrap_module = PyModule_Create(&moduledef); -#else - m = wrap_module = Py_InitModule("test_array_from_pyobj_ext", f2py_module_methods); -#endif - Py_TYPE(&PyFortran_Type) = &PyType_Type; - import_array(); - if (PyErr_Occurred()) - Py_FatalError("can't initialize module wrap (failed to import numpy)"); - d = PyModule_GetDict(m); - s = PyString_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" -" arr = call(type_num,dims,intent,obj)\n" -"."); - PyDict_SetItemString(d, "__doc__", s); - wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); - Py_DECREF(s); - -#define ADDCONST(NAME, CONST) \ - s = PyInt_FromLong(CONST); \ - PyDict_SetItemString(d, NAME, s); \ - Py_DECREF(s) - - ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); - ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); - ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); - ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); - ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); - ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); - ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); - ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); - ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); - ADDCONST("NPY_BOOL", NPY_BOOL); - ADDCONST("NPY_BYTE", NPY_BYTE); - ADDCONST("NPY_UBYTE", NPY_UBYTE); - ADDCONST("NPY_SHORT", NPY_SHORT); - ADDCONST("NPY_USHORT", NPY_USHORT); - ADDCONST("NPY_INT", NPY_INT); - ADDCONST("NPY_UINT", NPY_UINT); - ADDCONST("NPY_INTP", NPY_INTP); - ADDCONST("NPY_UINTP", NPY_UINTP); - ADDCONST("NPY_LONG", NPY_LONG); - ADDCONST("NPY_ULONG", NPY_ULONG); - ADDCONST("NPY_LONGLONG", NPY_LONGLONG); - ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); - ADDCONST("NPY_FLOAT", NPY_FLOAT); - ADDCONST("NPY_DOUBLE", NPY_DOUBLE); - ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); - ADDCONST("NPY_CFLOAT", NPY_CFLOAT); - ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); - ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); - ADDCONST("NPY_OBJECT", NPY_OBJECT); - ADDCONST("NPY_STRING", NPY_STRING); - ADDCONST("NPY_UNICODE", NPY_UNICODE); - ADDCONST("NPY_VOID", NPY_VOID); - ADDCONST("NPY_NTYPES", NPY_NTYPES); - ADDCONST("NPY_NOTYPE", NPY_NOTYPE); - ADDCONST("NPY_USERDEF", NPY_USERDEF); - - ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); - ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); - ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); - ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); - ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); - ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); - ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); - ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); - ADDCONST("UPDATEIFCOPY", NPY_ARRAY_UPDATEIFCOPY); - ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); - - ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); - ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); - ADDCONST("CARRAY", NPY_ARRAY_CARRAY); - ADDCONST("FARRAY", NPY_ARRAY_FARRAY); - ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); - ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); - ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); - ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); - -#undef ADDCONST( - - if (PyErr_Occurred()) - Py_FatalError("can't initialize module wrap"); - -#ifdef F2PY_REPORT_ATEXIT - on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); -#endif - - return RETVAL; -} -#ifdef __cplusplus -} -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap deleted file mode 100644 index 2665f89..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap +++ /dev/null @@ -1 +0,0 @@ -dict(real=dict(rk="double")) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 deleted file mode 100644 index b301710..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 +++ /dev/null @@ -1,34 +0,0 @@ - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 deleted file mode 100644 index cbe6317..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 +++ /dev/null @@ -1,41 +0,0 @@ - -module mod - -contains - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum - - -end module mod diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 deleted file mode 100644 index 337465a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 +++ /dev/null @@ -1,19 +0,0 @@ -subroutine sum_with_use(x, res) - use precision - - implicit none - - real(kind=rk), intent(in) :: x(:) - real(kind=rk), intent(out) :: res - - integer :: i - - !print *, "size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - - end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 deleted file mode 100644 index ed6c70c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 +++ /dev/null @@ -1,4 +0,0 @@ -module precision - integer, parameter :: rk = selected_real_kind(8) - integer, parameter :: ik = selected_real_kind(4) -end module diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/common/block.f b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/common/block.f deleted file mode 100644 index 7ea7968..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/common/block.f +++ /dev/null @@ -1,11 +0,0 @@ - SUBROUTINE INITCB - DOUBLE PRECISION LONG - CHARACTER STRING - INTEGER OK - - COMMON /BLOCK/ LONG, STRING, OK - LONG = 1.0 - STRING = '2' - OK = 3 - RETURN - END diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 deleted file mode 100644 index d3d15cf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 +++ /dev/null @@ -1,20 +0,0 @@ - - -subroutine selectedrealkind(p, r, res) - implicit none - - integer, intent(in) :: p, r - !f2py integer :: r=0 - integer, intent(out) :: res - res = selected_real_kind(p, r) - -end subroutine - -subroutine selectedintkind(p, res) - implicit none - - integer, intent(in) :: p - integer, intent(out) :: res - res = selected_int_kind(p) - -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo.f b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo.f deleted file mode 100644 index c347425..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo.f +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar11(a) -cf2py intent(out) a - integer a - a = 11 - end diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 deleted file mode 100644 index 7543a6a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 +++ /dev/null @@ -1,8 +0,0 @@ - module foo_fixed - contains - subroutine bar12(a) -!f2py intent(out) a - integer a - a = 12 - end subroutine bar12 - end module foo_fixed diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 deleted file mode 100644 index c1b641f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 +++ /dev/null @@ -1,8 +0,0 @@ -module foo_free -contains - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 -end module foo_free diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 deleted file mode 100644 index ac90ced..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 +++ /dev/null @@ -1,57 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo(x) - implicit none - integer, parameter :: sp = selected_real_kind(6) - integer, parameter :: dp = selected_real_kind(15) - integer, parameter :: ii = selected_int_kind(9) - integer, parameter :: il = selected_int_kind(18) - real(dp), intent(inout) :: x - dimension x(3) - real(sp), parameter :: three_s = 3._sp - real(dp), parameter :: three_d = 3._dp - integer(ii), parameter :: three_i = 3_ii - integer(il), parameter :: three_l = 3_il - x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l - x(2) = x(2) * three_s - x(3) = x(3) * three_l - return -end subroutine - - -subroutine foo_no(x) - implicit none - integer, parameter :: sp = selected_real_kind(6) - integer, parameter :: dp = selected_real_kind(15) - integer, parameter :: ii = selected_int_kind(9) - integer, parameter :: il = selected_int_kind(18) - real(dp), intent(inout) :: x - dimension x(3) - real(sp), parameter :: three_s = 3. - real(dp), parameter :: three_d = 3. - integer(ii), parameter :: three_i = 3 - integer(il), parameter :: three_l = 3 - x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l - x(2) = x(2) * three_s - x(3) = x(3) * three_l - return -end subroutine - -subroutine foo_sum(x) - implicit none - integer, parameter :: sp = selected_real_kind(6) - integer, parameter :: dp = selected_real_kind(15) - integer, parameter :: ii = selected_int_kind(9) - integer, parameter :: il = selected_int_kind(18) - real(dp), intent(inout) :: x - dimension x(3) - real(sp), parameter :: three_s = 2._sp + 1._sp - real(dp), parameter :: three_d = 1._dp + 2._dp - integer(ii), parameter :: three_i = 2_ii + 1_ii - integer(il), parameter :: three_l = 1_il + 2_il - x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l - x(2) = x(2) * three_s - x(3) = x(3) * three_l - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 deleted file mode 100644 index e51f5e9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 +++ /dev/null @@ -1,15 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo_compound_int(x) - implicit none - integer, parameter :: ii = selected_int_kind(9) - integer(ii), intent(inout) :: x - dimension x(3) - integer(ii), parameter :: three = 3_ii - integer(ii), parameter :: two = 2_ii - integer(ii), parameter :: six = three * 1_ii * two - - x(1) = x(1) + x(2) + x(3) * six - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 deleted file mode 100644 index aaa83d2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 +++ /dev/null @@ -1,22 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo_int(x) - implicit none - integer, parameter :: ii = selected_int_kind(9) - integer(ii), intent(inout) :: x - dimension x(3) - integer(ii), parameter :: three = 3_ii - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - -subroutine foo_long(x) - implicit none - integer, parameter :: ii = selected_int_kind(18) - integer(ii), intent(inout) :: x - dimension x(3) - integer(ii), parameter :: three = 3_ii - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 deleted file mode 100644 index 62c9a5b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 +++ /dev/null @@ -1,23 +0,0 @@ -! Check that parameters are correct intercepted. -! Specifically that types of constants without -! compound kind specs are correctly inferred -! adapted Gibbs iteration code from pymc -! for this test case -subroutine foo_non_compound_int(x) - implicit none - integer, parameter :: ii = selected_int_kind(9) - - integer(ii) maxiterates - parameter (maxiterates=2) - - integer(ii) maxseries - parameter (maxseries=2) - - integer(ii) wasize - parameter (wasize=maxiterates*maxseries) - integer(ii), intent(inout) :: x - dimension x(wasize) - - x(1) = x(1) + x(2) + x(3) + x(4) * wasize - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 deleted file mode 100644 index 02ac9dd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 +++ /dev/null @@ -1,23 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo_single(x) - implicit none - integer, parameter :: rp = selected_real_kind(6) - real(rp), intent(inout) :: x - dimension x(3) - real(rp), parameter :: three = 3._rp - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - -subroutine foo_double(x) - implicit none - integer, parameter :: rp = selected_real_kind(15) - real(rp), intent(inout) :: x - dimension x(3) - real(rp), parameter :: three = 3._rp - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 deleted file mode 100644 index 80cdad9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 +++ /dev/null @@ -1,9 +0,0 @@ -! Check that intent(in out) translates as intent(inout). -! The separation seems to be a common usage. - subroutine foo(x) - implicit none - real(4), intent(in out) :: x - dimension x(3) - x(1) = x(1) + x(2) + x(3) - return - end diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/size/foo.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/size/foo.f90 deleted file mode 100644 index 5b66f8c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/size/foo.f90 +++ /dev/null @@ -1,44 +0,0 @@ - -subroutine foo(a, n, m, b) - implicit none - - real, intent(in) :: a(n, m) - integer, intent(in) :: n, m - real, intent(out) :: b(size(a, 1)) - - integer :: i - - do i = 1, size(b) - b(i) = sum(a(i,:)) - enddo -end subroutine - -subroutine trans(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x,2), size(x,1) ) :: y - integer :: N, M, i, j - N = size(x,1) - M = size(x,2) - DO i=1,N - do j=1,M - y(j,i) = x(i,j) - END DO - END DO -end subroutine trans - -subroutine flatten(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x) ) :: y - integer :: N, M, i, j, k - N = size(x,1) - M = size(x,2) - k = 1 - DO i=1,N - do j=1,M - y(k) = x(i,j) - k = k + 1 - END DO - END DO -end subroutine flatten diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/string/char.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/string/char.f90 deleted file mode 100644 index bb7985c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/string/char.f90 +++ /dev/null @@ -1,29 +0,0 @@ -MODULE char_test - -CONTAINS - -SUBROUTINE change_strings(strings, n_strs, out_strings) - IMPLICIT NONE - - ! Inputs - INTEGER, INTENT(IN) :: n_strs - CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings - CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings - -!f2py INTEGER, INTENT(IN) :: n_strs -!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings -!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings - - ! Misc. - INTEGER*4 :: j - - - DO j=1, n_strs - out_strings(1,j) = strings(1,j) - out_strings(2,j) = 'A' - END DO - -END SUBROUTINE change_strings - -END MODULE char_test - diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py deleted file mode 100644 index a800901..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py +++ /dev/null @@ -1,581 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import copy -import pytest - -from numpy import ( - array, alltrue, ndarray, zeros, dtype, intp, clongdouble - ) -from numpy.testing import assert_, assert_equal -from numpy.core.multiarray import typeinfo -from . import util - -wrap = None - - -def setup_module(): - """ - Build the required testing extension module - - """ - global wrap - - # Check compiler availability first - if not util.has_c_compiler(): - pytest.skip("No C compiler available") - - if wrap is None: - config_code = """ - config.add_extension('test_array_from_pyobj_ext', - sources=['wrapmodule.c', 'fortranobject.c'], - define_macros=[]) - """ - d = os.path.dirname(__file__) - src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), - os.path.join(d, '..', 'src', 'fortranobject.c'), - os.path.join(d, '..', 'src', 'fortranobject.h')] - wrap = util.build_module_distutils(src, config_code, - 'test_array_from_pyobj_ext') - - -def flags_info(arr): - flags = wrap.array_attrs(arr)[6] - return flags2names(flags) - - -def flags2names(flags): - info = [] - for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', - 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', - 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', - 'CARRAY', 'FARRAY' - ]: - if abs(flags) & getattr(wrap, flagname, 0): - info.append(flagname) - return info - - -class Intent(object): - - def __init__(self, intent_list=[]): - self.intent_list = intent_list[:] - flags = 0 - for i in intent_list: - if i == 'optional': - flags |= wrap.F2PY_OPTIONAL - else: - flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper()) - self.flags = flags - - def __getattr__(self, name): - name = name.lower() - if name == 'in_': - name = 'in' - return self.__class__(self.intent_list + [name]) - - def __str__(self): - return 'intent(%s)' % (','.join(self.intent_list)) - - def __repr__(self): - return 'Intent(%r)' % (self.intent_list) - - def is_intent(self, *names): - for name in names: - if name not in self.intent_list: - return False - return True - - def is_intent_exact(self, *names): - return len(self.intent_list) == len(names) and self.is_intent(*names) - -intent = Intent() - -_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', - 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', - 'FLOAT', 'DOUBLE', 'CFLOAT'] - -_cast_dict = {'BOOL': ['BOOL']} -_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] -_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] -_cast_dict['BYTE'] = ['BYTE'] -_cast_dict['UBYTE'] = ['UBYTE'] -_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] -_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] -_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] -_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] - -_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] -_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] - -_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] -_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] - -_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] -_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] - -_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] - -# 32 bit system malloc typically does not provide the alignment required by -# 16 byte long double types this means the inout intent cannot be satisfied -# and several tests fail as the alignment flag can be randomly true or fals -# when numpy gains an aligned allocator the tests could be enabled again -if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and - sys.platform != 'win32'): - _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) - _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ - ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] - _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ - ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] - _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] - - -class Type(object): - _type_cache = {} - - def __new__(cls, name): - if isinstance(name, dtype): - dtype0 = name - name = None - for n, i in typeinfo.items(): - if not isinstance(i, type) and dtype0.type is i.type: - name = n - break - obj = cls._type_cache.get(name.upper(), None) - if obj is not None: - return obj - obj = object.__new__(cls) - obj._init(name) - cls._type_cache[name.upper()] = obj - return obj - - def _init(self, name): - self.NAME = name.upper() - info = typeinfo[self.NAME] - self.type_num = getattr(wrap, 'NPY_' + self.NAME) - assert_equal(self.type_num, info.num) - self.dtype = info.type - self.elsize = info.bits / 8 - self.dtypechar = info.char - - def cast_types(self): - return [self.__class__(_m) for _m in _cast_dict[self.NAME]] - - def all_types(self): - return [self.__class__(_m) for _m in _type_names] - - def smaller_types(self): - bits = typeinfo[self.NAME].alignment - types = [] - for name in _type_names: - if typeinfo[name].alignment < bits: - types.append(Type(name)) - return types - - def equal_types(self): - bits = typeinfo[self.NAME].alignment - types = [] - for name in _type_names: - if name == self.NAME: - continue - if typeinfo[name].alignment == bits: - types.append(Type(name)) - return types - - def larger_types(self): - bits = typeinfo[self.NAME].alignment - types = [] - for name in _type_names: - if typeinfo[name].alignment > bits: - types.append(Type(name)) - return types - - -class Array(object): - - def __init__(self, typ, dims, intent, obj): - self.type = typ - self.dims = dims - self.intent = intent - self.obj_copy = copy.deepcopy(obj) - self.obj = obj - - # arr.dtypechar may be different from typ.dtypechar - self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) - - assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) - - self.arr_attr = wrap.array_attrs(self.arr) - - if len(dims) > 1: - if self.intent.is_intent('c'): - assert_(intent.flags & wrap.F2PY_INTENT_C) - assert_(not self.arr.flags['FORTRAN'], - repr((self.arr.flags, getattr(obj, 'flags', None)))) - assert_(self.arr.flags['CONTIGUOUS']) - assert_(not self.arr_attr[6] & wrap.FORTRAN) - else: - assert_(not intent.flags & wrap.F2PY_INTENT_C) - assert_(self.arr.flags['FORTRAN']) - assert_(not self.arr.flags['CONTIGUOUS']) - assert_(self.arr_attr[6] & wrap.FORTRAN) - - if obj is None: - self.pyarr = None - self.pyarr_attr = None - return - - if intent.is_intent('cache'): - assert_(isinstance(obj, ndarray), repr(type(obj))) - self.pyarr = array(obj).reshape(*dims).copy() - else: - self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims), - order=self.intent.is_intent('c') and 'C' or 'F') - assert_(self.pyarr.dtype == typ, - repr((self.pyarr.dtype, typ))) - assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) - self.pyarr_attr = wrap.array_attrs(self.pyarr) - - if len(dims) > 1: - if self.intent.is_intent('c'): - assert_(not self.pyarr.flags['FORTRAN']) - assert_(self.pyarr.flags['CONTIGUOUS']) - assert_(not self.pyarr_attr[6] & wrap.FORTRAN) - else: - assert_(self.pyarr.flags['FORTRAN']) - assert_(not self.pyarr.flags['CONTIGUOUS']) - assert_(self.pyarr_attr[6] & wrap.FORTRAN) - - assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd - assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions - if self.arr_attr[1] <= 1: - assert_(self.arr_attr[3] == self.pyarr_attr[3], - repr((self.arr_attr[3], self.pyarr_attr[3], - self.arr.tobytes(), self.pyarr.tobytes()))) # strides - assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], - repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr - assert_(self.arr_attr[6] == self.pyarr_attr[6], - repr((self.arr_attr[6], self.pyarr_attr[6], - flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), - flags2names(self.arr_attr[6]), intent))) # flags - - if intent.is_intent('cache'): - assert_(self.arr_attr[5][3] >= self.type.elsize, - repr((self.arr_attr[5][3], self.type.elsize))) - else: - assert_(self.arr_attr[5][3] == self.type.elsize, - repr((self.arr_attr[5][3], self.type.elsize))) - assert_(self.arr_equal(self.pyarr, self.arr)) - - if isinstance(self.obj, ndarray): - if typ.elsize == Type(obj.dtype).elsize: - if not intent.is_intent('copy') and self.arr_attr[1] <= 1: - assert_(self.has_shared_memory()) - - def arr_equal(self, arr1, arr2): - if arr1.shape != arr2.shape: - return False - s = arr1 == arr2 - return alltrue(s.flatten()) - - def __str__(self): - return str(self.arr) - - def has_shared_memory(self): - """Check that created array shares data with input array. - """ - if self.obj is self.arr: - return True - if not isinstance(self.obj, ndarray): - return False - obj_attr = wrap.array_attrs(self.obj) - return obj_attr[0] == self.arr_attr[0] - - -class TestIntent(object): - - def test_in_out(self): - assert_equal(str(intent.in_.out), 'intent(in,out)') - assert_(intent.in_.c.is_intent('c')) - assert_(not intent.in_.c.is_intent_exact('c')) - assert_(intent.in_.c.is_intent_exact('c', 'in')) - assert_(intent.in_.c.is_intent_exact('in', 'c')) - assert_(not intent.in_.is_intent('c')) - - -class TestSharedMemory(object): - num2seq = [1, 2] - num23seq = [[1, 2, 3], [4, 5, 6]] - - @pytest.fixture(autouse=True, scope='class', params=_type_names) - def setup_type(self, request): - request.cls.type = Type(request.param) - request.cls.array = lambda self, dims, intent, obj: \ - Array(Type(request.param), dims, intent, obj) - - def test_in_from_2seq(self): - a = self.array([2], intent.in_, self.num2seq) - assert_(not a.has_shared_memory()) - - def test_in_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) - a = self.array([len(self.num2seq)], intent.in_, obj) - if t.elsize == self.type.elsize: - assert_( - a.has_shared_memory(), repr((self.type.dtype, t.dtype))) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_inout_2seq(self): - obj = array(self.num2seq, dtype=self.type.dtype) - a = self.array([len(self.num2seq)], intent.inout, obj) - assert_(a.has_shared_memory()) - - try: - a = self.array([2], intent.in_.inout, self.num2seq) - except TypeError as msg: - if not str(msg).startswith('failed to initialize intent' - '(inout|inplace|cache) array'): - raise - else: - raise SystemError('intent(inout) should have failed on sequence') - - def test_f_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype, order='F') - shape = (len(self.num23seq), len(self.num23seq[0])) - a = self.array(shape, intent.in_.inout, obj) - assert_(a.has_shared_memory()) - - obj = array(self.num23seq, dtype=self.type.dtype, order='C') - shape = (len(self.num23seq), len(self.num23seq[0])) - try: - a = self.array(shape, intent.in_.inout, obj) - except ValueError as msg: - if not str(msg).startswith('failed to initialize intent' - '(inout) array'): - raise - else: - raise SystemError( - 'intent(inout) should have failed on improper array') - - def test_c_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype) - shape = (len(self.num23seq), len(self.num23seq[0])) - a = self.array(shape, intent.in_.c.inout, obj) - assert_(a.has_shared_memory()) - - def test_in_copy_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) - a = self.array([len(self.num2seq)], intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_in_from_23seq(self): - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, self.num23seq) - assert_(not a.has_shared_memory()) - - def test_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_f_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) - if t.elsize == self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c, obj) - if t.elsize == self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_f_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_in_cache_from_2casttype(self): - for t in self.type.all_types(): - if t.elsize != self.type.elsize: - continue - obj = array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) - a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - obj = array(self.num2seq, dtype=t.dtype, order='F') - a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - try: - a = self.array(shape, intent.in_.cache, obj[::-1]) - except ValueError as msg: - if not str(msg).startswith('failed to initialize' - ' intent(cache) array'): - raise - else: - raise SystemError( - 'intent(cache) should have failed on multisegmented array') - - def test_in_cache_from_2casttype_failure(self): - for t in self.type.all_types(): - if t.elsize >= self.type.elsize: - continue - obj = array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) - try: - self.array(shape, intent.in_.cache, obj) # Should succeed - except ValueError as msg: - if not str(msg).startswith('failed to initialize' - ' intent(cache) array'): - raise - else: - raise SystemError( - 'intent(cache) should have failed on smaller array') - - def test_cache_hidden(self): - shape = (2,) - a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape == shape) - - shape = (2, 3) - a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape == shape) - - shape = (-1, 3) - try: - a = self.array(shape, intent.cache.hide, None) - except ValueError as msg: - if not str(msg).startswith('failed to create intent' - '(cache|hide)|optional array'): - raise - else: - raise SystemError( - 'intent(cache) should have failed on undefined dimensions') - - def test_hidden(self): - shape = (2,) - a = self.array(shape, intent.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - - shape = (2, 3) - a = self.array(shape, intent.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2, 3) - a = self.array(shape, intent.c.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - shape = (-1, 3) - try: - a = self.array(shape, intent.hide, None) - except ValueError as msg: - if not str(msg).startswith('failed to create intent' - '(cache|hide)|optional array'): - raise - else: - raise SystemError('intent(hide) should have failed' - ' on undefined dimensions') - - def test_optional_none(self): - shape = (2,) - a = self.array(shape, intent.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - - shape = (2, 3) - a = self.array(shape, intent.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2, 3) - a = self.array(shape, intent.c.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - def test_optional_from_2seq(self): - obj = self.num2seq - shape = (len(obj),) - a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) - - def test_optional_from_23seq(self): - obj = self.num23seq - shape = (len(obj), len(obj[0])) - a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) - - a = self.array(shape, intent.optional.c, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) - - def test_inplace(self): - obj = array(self.num23seq, dtype=self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) - a.arr[1][2] = 54 - assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - - def test_inplace_from_casttype(self): - for t in self.type.cast_types(): - if t is self.type: - continue - obj = array(self.num23seq, dtype=t.dtype) - assert_(obj.dtype.type == t.dtype) - assert_(obj.dtype.type is not self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) - a.arr[1][2] = 54 - assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - assert_(obj.dtype.type is self.type.dtype) # obj changed inplace! diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_assumed_shape.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_assumed_shape.py deleted file mode 100644 index e5695a6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_assumed_shape.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest -import tempfile - -from numpy.testing import assert_ -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestAssumedShapeSumExample(util.F2PyTest): - sources = [_path('src', 'assumed_shape', 'foo_free.f90'), - _path('src', 'assumed_shape', 'foo_use.f90'), - _path('src', 'assumed_shape', 'precision.f90'), - _path('src', 'assumed_shape', 'foo_mod.f90'), - _path('src', 'assumed_shape', '.f2py_f2cmap'), - ] - - @pytest.mark.slow - def test_all(self): - r = self.module.fsum([1, 2]) - assert_(r == 3, repr(r)) - r = self.module.sum([1, 2]) - assert_(r == 3, repr(r)) - r = self.module.sum_with_use([1, 2]) - assert_(r == 3, repr(r)) - - r = self.module.mod.sum([1, 2]) - assert_(r == 3, repr(r)) - r = self.module.mod.fsum([1, 2]) - assert_(r == 3, repr(r)) - - -class TestF2cmapOption(TestAssumedShapeSumExample): - def setup(self): - # Use a custom file name for .f2py_f2cmap - self.sources = list(self.sources) - f2cmap_src = self.sources.pop(-1) - - self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) - with open(f2cmap_src, 'rb') as f: - self.f2cmap_file.write(f.read()) - self.f2cmap_file.close() - - self.sources.append(self.f2cmap_file.name) - self.options = ["--f2cmap", self.f2cmap_file.name] - - super(TestF2cmapOption, self).setup() - - def teardown(self): - os.unlink(self.f2cmap_file.name) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_block_docstring.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_block_docstring.py deleted file mode 100644 index 4f16789..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_block_docstring.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import pytest -from . import util - -from numpy.testing import assert_equal, IS_PYPY - -class TestBlockDocString(util.F2PyTest): - code = """ - SUBROUTINE FOO() - INTEGER BAR(2, 3) - - COMMON /BLOCK/ BAR - RETURN - END - """ - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") - def test_block_docstring(self): - expected = "'i'-array(2,3)\n" - assert_equal(self.module.block.__doc__, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_callback.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_callback.py deleted file mode 100644 index 21c29ba..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_callback.py +++ /dev/null @@ -1,165 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import math -import textwrap -import sys -import pytest - -import numpy as np -from numpy.testing import assert_, assert_equal -from . import util - - -class TestF77Callback(util.F2PyTest): - code = """ - subroutine t(fun,a) - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine func(a) -cf2py intent(in,out) a - integer a - a = a + 11 - end - - subroutine func0(a) -cf2py intent(out) a - integer a - a = 11 - end - - subroutine t2(a) -cf2py intent(callback) fun - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine string_callback(callback, a) - external callback - double precision callback - double precision a - character*1 r -cf2py intent(out) a - r = 'r' - a = callback(r) - end - - subroutine string_callback_array(callback, cu, lencu, a) - external callback - integer callback - integer lencu - character*8 cu(lencu) - integer a -cf2py intent(out) a - - a = callback(cu, lencu) - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't,t2'.split(',')) - def test_all(self, name): - self.check_function(name) - - @pytest.mark.slow - def test_docstring(self): - expected = textwrap.dedent("""\ - a = t(fun,[fun_extra_args]) - - Wrapper for ``t``. - - Parameters - ---------- - fun : call-back function - - Other Parameters - ---------------- - fun_extra_args : input tuple, optional - Default: () - - Returns - ------- - a : int - - Notes - ----- - Call-back functions:: - - def fun(): return a - Return objects: - a : int - """) - assert_equal(self.module.t.__doc__, expected) - - def check_function(self, name): - t = getattr(self.module, name) - r = t(lambda: 4) - assert_(r == 4, repr(r)) - r = t(lambda a: 5, fun_extra_args=(6,)) - assert_(r == 5, repr(r)) - r = t(lambda a: a, fun_extra_args=(6,)) - assert_(r == 6, repr(r)) - r = t(lambda a: 5 + a, fun_extra_args=(7,)) - assert_(r == 12, repr(r)) - r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,)) - assert_(r == 180, repr(r)) - r = t(math.degrees, fun_extra_args=(math.pi,)) - assert_(r == 180, repr(r)) - - r = t(self.module.func, fun_extra_args=(6,)) - assert_(r == 17, repr(r)) - r = t(self.module.func0) - assert_(r == 11, repr(r)) - r = t(self.module.func0._cpointer) - assert_(r == 11, repr(r)) - - class A(object): - - def __call__(self): - return 7 - - def mth(self): - return 9 - a = A() - r = t(a) - assert_(r == 7, repr(r)) - r = t(a.mth) - assert_(r == 9, repr(r)) - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_string_callback(self): - - def callback(code): - if code == 'r': - return 0 - else: - return 1 - - f = getattr(self.module, 'string_callback') - r = f(callback) - assert_(r == 0, repr(r)) - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_string_callback_array(self): - # See gh-10027 - cu = np.zeros((1, 8), 'S1') - - def callback(cu, lencu): - if cu.shape != (lencu, 8): - return 1 - if cu.dtype != 'S1': - return 2 - if not np.all(cu == b''): - return 3 - return 0 - - f = getattr(self.module, 'string_callback_array') - res = f(callback, cu, len(cu)) - assert_(res == 0, repr(res)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_common.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_common.py deleted file mode 100644 index dcb01b0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_common.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import pytest - -import numpy as np -from . import util - -from numpy.testing import assert_array_equal - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestCommonBlock(util.F2PyTest): - sources = [_path('src', 'common', 'block.f')] - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_common_block(self): - self.module.initcb() - assert_array_equal(self.module.block.long_bn, - np.array(1.0, dtype=np.float64)) - assert_array_equal(self.module.block.string_bn, - np.array('2', dtype='|S1')) - assert_array_equal(self.module.block.ok, - np.array(3, dtype=np.int32)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_compile_function.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_compile_function.py deleted file mode 100644 index 40ea799..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_compile_function.py +++ /dev/null @@ -1,129 +0,0 @@ -"""See https://github.com/numpy/numpy/pull/11937. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import uuid -from importlib import import_module -import pytest - -import numpy.f2py - -from numpy.testing import assert_equal -from . import util - - -def setup_module(): - if sys.platform == 'win32' and sys.version_info[0] < 3: - pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') - if not util.has_c_compiler(): - pytest.skip("Needs C compiler") - if not util.has_f77_compiler(): - pytest.skip('Needs FORTRAN 77 compiler') - - -# extra_args can be a list (since gh-11937) or string. -# also test absence of extra_args -@pytest.mark.parametrize( - "extra_args", [['--noopt', '--debug'], '--noopt --debug', ''] - ) -@pytest.mark.leaks_references(reason="Imported module seems never deleted.") -def test_f2py_init_compile(extra_args): - # flush through the f2py __init__ compile() function code path as a - # crude test for input handling following migration from - # exec_command() to subprocess.check_output() in gh-11937 - - # the Fortran 77 syntax requires 6 spaces before any commands, but - # more space may be added/ - fsource = """ - integer function foo() - foo = 10 + 5 - return - end - """ - # use various helper functions in util.py to enable robust build / - # compile and reimport cycle in test suite - moddir = util.get_module_dir() - modname = util.get_temp_module_name() - - cwd = os.getcwd() - target = os.path.join(moddir, str(uuid.uuid4()) + '.f') - # try running compile() with and without a source_fn provided so - # that the code path where a temporary file for writing Fortran - # source is created is also explored - for source_fn in [target, None]: - # mimic the path changing behavior used by build_module() in - # util.py, but don't actually use build_module() because it has - # its own invocation of subprocess that circumvents the - # f2py.compile code block under test - try: - os.chdir(moddir) - ret_val = numpy.f2py.compile( - fsource, - modulename=modname, - extra_args=extra_args, - source_fn=source_fn - ) - finally: - os.chdir(cwd) - - # check for compile success return value - assert_equal(ret_val, 0) - - # we are not currently able to import the Python-Fortran - # interface module on Windows / Appveyor, even though we do get - # successful compilation on that platform with Python 3.x - if sys.platform != 'win32': - # check for sensible result of Fortran function; that means - # we can import the module name in Python and retrieve the - # result of the sum operation - return_check = import_module(modname) - calc_result = return_check.foo() - assert_equal(calc_result, 15) - # Removal from sys.modules, is not as such necessary. Even with - # removal, the module (dict) stays alive. - del sys.modules[modname] - - -def test_f2py_init_compile_failure(): - # verify an appropriate integer status value returned by - # f2py.compile() when invalid Fortran is provided - ret_val = numpy.f2py.compile(b"invalid") - assert_equal(ret_val, 1) - - -def test_f2py_init_compile_bad_cmd(): - # verify that usage of invalid command in f2py.compile() returns - # status value of 127 for historic consistency with exec_command() - # error handling - - # patch the sys Python exe path temporarily to induce an OSError - # downstream NOTE: how bad of an idea is this patching? - try: - temp = sys.executable - sys.executable = 'does not exist' - - # the OSError should take precedence over invalid Fortran - ret_val = numpy.f2py.compile(b"invalid") - assert_equal(ret_val, 127) - finally: - sys.executable = temp - - -@pytest.mark.parametrize('fsource', - ['program test_f2py\nend program test_f2py', - b'program test_f2py\nend program test_f2py',]) -def test_compile_from_strings(tmpdir, fsource): - # Make sure we can compile str and bytes gh-12796 - cwd = os.getcwd() - try: - os.chdir(str(tmpdir)) - ret_val = numpy.f2py.compile( - fsource, - modulename='test_compile_from_strings', - extension='.f90') - assert_equal(ret_val, 0) - finally: - os.chdir(cwd) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_kind.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_kind.py deleted file mode 100644 index 1f7762a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_kind.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -from numpy.testing import assert_ -from numpy.f2py.crackfortran import ( - _selected_int_kind_func as selected_int_kind, - _selected_real_kind_func as selected_real_kind - ) -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestKind(util.F2PyTest): - sources = [_path('src', 'kind', 'foo.f90')] - - @pytest.mark.slow - def test_all(self): - selectedrealkind = self.module.selectedrealkind - selectedintkind = self.module.selectedintkind - - for i in range(40): - assert_(selectedintkind(i) in [selected_int_kind(i), -1], - 'selectedintkind(%s): expected %r but got %r' % - (i, selected_int_kind(i), selectedintkind(i))) - - for i in range(20): - assert_(selectedrealkind(i) in [selected_real_kind(i), -1], - 'selectedrealkind(%s): expected %r but got %r' % - (i, selected_real_kind(i), selectedrealkind(i))) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_mixed.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_mixed.py deleted file mode 100644 index 0337538..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_mixed.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import textwrap -import pytest - -from numpy.testing import assert_, assert_equal -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestMixed(util.F2PyTest): - sources = [_path('src', 'mixed', 'foo.f'), - _path('src', 'mixed', 'foo_fixed.f90'), - _path('src', 'mixed', 'foo_free.f90')] - - @pytest.mark.slow - def test_all(self): - assert_(self.module.bar11() == 11) - assert_(self.module.foo_fixed.bar12() == 12) - assert_(self.module.foo_free.bar13() == 13) - - @pytest.mark.slow - def test_docstring(self): - expected = textwrap.dedent("""\ - a = bar11() - - Wrapper for ``bar11``. - - Returns - ------- - a : int - """) - assert_equal(self.module.bar11.__doc__, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_parameter.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_parameter.py deleted file mode 100644 index 6a37868..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_parameter.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -import numpy as np -from numpy.testing import assert_raises, assert_equal - -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestParameters(util.F2PyTest): - # Check that intent(in out) translates as intent(inout) - sources = [_path('src', 'parameter', 'constant_real.f90'), - _path('src', 'parameter', 'constant_integer.f90'), - _path('src', 'parameter', 'constant_both.f90'), - _path('src', 'parameter', 'constant_compound.f90'), - _path('src', 'parameter', 'constant_non_compound.f90'), - ] - - @pytest.mark.slow - def test_constant_real_single(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float32)[::2] - assert_raises(ValueError, self.module.foo_single, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float32) - self.module.foo_single(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_real_double(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_double, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo_double(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_compound_int(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.int32)[::2] - assert_raises(ValueError, self.module.foo_compound_int, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.int32) - self.module.foo_compound_int(x) - assert_equal(x, [0 + 1 + 2*6, 1, 2]) - - @pytest.mark.slow - def test_constant_non_compound_int(self): - # check values - x = np.arange(4, dtype=np.int32) - self.module.foo_non_compound_int(x) - assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) - - @pytest.mark.slow - def test_constant_integer_int(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.int32)[::2] - assert_raises(ValueError, self.module.foo_int, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.int32) - self.module.foo_int(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_integer_long(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.int64)[::2] - assert_raises(ValueError, self.module.foo_long, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.int64) - self.module.foo_long(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_both(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) - - @pytest.mark.slow - def test_constant_no(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_no, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo_no(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) - - @pytest.mark.slow - def test_constant_sum(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_sum, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo_sum(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_quoted_character.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_quoted_character.py deleted file mode 100644 index c9a1c36..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_quoted_character.py +++ /dev/null @@ -1,35 +0,0 @@ -"""See https://github.com/numpy/numpy/pull/10676. - -""" -from __future__ import division, absolute_import, print_function - -import sys -from importlib import import_module -import pytest - -from numpy.testing import assert_equal -from . import util - - -class TestQuotedCharacter(util.F2PyTest): - code = """ - SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) - CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR - PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", - 1 OPENPAR="(", CLOSEPAR=")") - CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 -Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 - OUT1 = SINGLE - OUT2 = DOUBLE - OUT3 = SEMICOL - OUT4 = EXCLA - OUT5 = OPENPAR - OUT6 = CLOSEPAR - RETURN - END - """ - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_quoted_character(self): - assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')')) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_regression.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_regression.py deleted file mode 100644 index 3adae63..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_regression.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -import numpy as np -from numpy.testing import assert_raises, assert_equal - -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestIntentInOut(util.F2PyTest): - # Check that intent(in out) translates as intent(inout) - sources = [_path('src', 'regression', 'inout.f90')] - - @pytest.mark.slow - def test_inout(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float32)[::2] - assert_raises(ValueError, self.module.foo, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float32) - self.module.foo(x) - assert_equal(x, [3, 1, 2]) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_character.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_character.py deleted file mode 100644 index fc3a58d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_character.py +++ /dev/null @@ -1,146 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.testing import assert_ -from . import util - - -class TestReturnCharacter(util.F2PyTest): - - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0', 't1', 's0', 's1']: - assert_(t(23) == b'2') - r = t('ab') - assert_(r == b'a', repr(r)) - r = t(array('ab')) - assert_(r == b'a', repr(r)) - r = t(array(77, 'u1')) - assert_(r == b'M', repr(r)) - #assert_(_raises(ValueError, t, array([77,87]))) - #assert_(_raises(ValueError, t, array(77))) - elif tname in ['ts', 'ss']: - assert_(t(23) == b'23 ', repr(t(23))) - assert_(t('123456789abcdef') == b'123456789a') - elif tname in ['t5', 's5']: - assert_(t(23) == b'23 ', repr(t(23))) - assert_(t('ab') == b'ab ', repr(t('ab'))) - assert_(t('123456789abcdef') == b'12345') - else: - raise NotImplementedError - - -class TestF77ReturnCharacter(TestReturnCharacter): - code = """ - function t0(value) - character value - character t0 - t0 = value - end - function t1(value) - character*1 value - character*1 t1 - t1 = value - end - function t5(value) - character*5 value - character*5 t5 - t5 = value - end - function ts(value) - character*(*) value - character*(*) ts - ts = value - end - - subroutine s0(t0,value) - character value - character t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - character*1 value - character*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s5(t5,value) - character*5 value - character*5 t5 -cf2py intent(out) t5 - t5 = value - end - subroutine ss(ts,value) - character*(*) value - character*10 ts -cf2py intent(out) ts - ts = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnCharacter(TestReturnCharacter): - suffix = ".f90" - code = """ -module f90_return_char - contains - function t0(value) - character :: value - character :: t0 - t0 = value - end function t0 - function t1(value) - character(len=1) :: value - character(len=1) :: t1 - t1 = value - end function t1 - function t5(value) - character(len=5) :: value - character(len=5) :: t5 - t5 = value - end function t5 - function ts(value) - character(len=*) :: value - character(len=10) :: ts - ts = value - end function ts - - subroutine s0(t0,value) - character :: value - character :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - character(len=1) :: value - character(len=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s5(t5,value) - character(len=5) :: value - character(len=5) :: t5 -!f2py intent(out) t5 - t5 = value - end subroutine s5 - subroutine ss(ts,value) - character(len=*) :: value - character(len=10) :: ts -!f2py intent(out) ts - ts = value - end subroutine ss -end module f90_return_char - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_char, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_complex.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_complex.py deleted file mode 100644 index 43c884d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_complex.py +++ /dev/null @@ -1,169 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnComplex(util.F2PyTest): - - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0', 't8', 's0', 's8']: - err = 1e-5 - else: - err = 0.0 - assert_(abs(t(234j) - 234.0j) <= err) - assert_(abs(t(234.6) - 234.6) <= err) - assert_(abs(t(long(234)) - 234.0) <= err) - assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err) - #assert_( abs(t('234')-234.)<=err) - #assert_( abs(t('234.6')-234.6)<=err) - assert_(abs(t(-234) + 234.) <= err) - assert_(abs(t([234]) - 234.) <= err) - assert_(abs(t((234,)) - 234.) <= err) - assert_(abs(t(array(234)) - 234.) <= err) - assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err) - assert_(abs(t(array([234])) - 234.) <= err) - assert_(abs(t(array([[234]])) - 234.) <= err) - assert_(abs(t(array([234], 'b')) + 22.) <= err) - assert_(abs(t(array([234], 'h')) - 234.) <= err) - assert_(abs(t(array([234], 'i')) - 234.) <= err) - assert_(abs(t(array([234], 'l')) - 234.) <= err) - assert_(abs(t(array([234], 'q')) - 234.) <= err) - assert_(abs(t(array([234], 'f')) - 234.) <= err) - assert_(abs(t(array([234], 'd')) - 234.) <= err) - assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err) - assert_(abs(t(array([234], 'D')) - 234.) <= err) - - #assert_raises(TypeError, t, array([234], 'a1')) - assert_raises(TypeError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(TypeError, t, t) - assert_raises(TypeError, t, {}) - - try: - r = t(10 ** 400) - assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) - except OverflowError: - pass - - -class TestF77ReturnComplex(TestReturnComplex): - code = """ - function t0(value) - complex value - complex t0 - t0 = value - end - function t8(value) - complex*8 value - complex*8 t8 - t8 = value - end - function t16(value) - complex*16 value - complex*16 t16 - t16 = value - end - function td(value) - double complex value - double complex td - td = value - end - - subroutine s0(t0,value) - complex value - complex t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s8(t8,value) - complex*8 value - complex*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine s16(t16,value) - complex*16 value - complex*16 t16 -cf2py intent(out) t16 - t16 = value - end - subroutine sd(td,value) - double complex value - double complex td -cf2py intent(out) td - td = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnComplex(TestReturnComplex): - suffix = ".f90" - code = """ -module f90_return_complex - contains - function t0(value) - complex :: value - complex :: t0 - t0 = value - end function t0 - function t8(value) - complex(kind=4) :: value - complex(kind=4) :: t8 - t8 = value - end function t8 - function t16(value) - complex(kind=8) :: value - complex(kind=8) :: t16 - t16 = value - end function t16 - function td(value) - double complex :: value - double complex :: td - td = value - end function td - - subroutine s0(t0,value) - complex :: value - complex :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s8(t8,value) - complex(kind=4) :: value - complex(kind=4) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine s16(t16,value) - complex(kind=8) :: value - complex(kind=8) :: t16 -!f2py intent(out) t16 - t16 = value - end subroutine s16 - subroutine sd(td,value) - double complex :: value - double complex :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_complex - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_complex, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_integer.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_integer.py deleted file mode 100644 index 22f4acf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_integer.py +++ /dev/null @@ -1,181 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnInteger(util.F2PyTest): - - def check_function(self, t): - assert_(t(123) == 123, repr(t(123))) - assert_(t(123.6) == 123) - assert_(t(long(123)) == 123) - assert_(t('123') == 123) - assert_(t(-123) == -123) - assert_(t([123]) == 123) - assert_(t((123,)) == 123) - assert_(t(array(123)) == 123) - assert_(t(array([123])) == 123) - assert_(t(array([[123]])) == 123) - assert_(t(array([123], 'b')) == 123) - assert_(t(array([123], 'h')) == 123) - assert_(t(array([123], 'i')) == 123) - assert_(t(array([123], 'l')) == 123) - assert_(t(array([123], 'B')) == 123) - assert_(t(array([123], 'f')) == 123) - assert_(t(array([123], 'd')) == 123) - - #assert_raises(ValueError, t, array([123],'S3')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - if t.__doc__.split()[0] in ['t8', 's8']: - assert_raises(OverflowError, t, 100000000000000000000000) - assert_raises(OverflowError, t, 10000000011111111111111.23) - - -class TestF77ReturnInteger(TestReturnInteger): - code = """ - function t0(value) - integer value - integer t0 - t0 = value - end - function t1(value) - integer*1 value - integer*1 t1 - t1 = value - end - function t2(value) - integer*2 value - integer*2 t2 - t2 = value - end - function t4(value) - integer*4 value - integer*4 t4 - t4 = value - end - function t8(value) - integer*8 value - integer*8 t8 - t8 = value - end - - subroutine s0(t0,value) - integer value - integer t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - integer*1 value - integer*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - integer*2 value - integer*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - integer*4 value - integer*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - integer*8 value - integer*8 t8 -cf2py intent(out) t8 - t8 = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnInteger(TestReturnInteger): - suffix = ".f90" - code = """ -module f90_return_integer - contains - function t0(value) - integer :: value - integer :: t0 - t0 = value - end function t0 - function t1(value) - integer(kind=1) :: value - integer(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - integer(kind=2) :: value - integer(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - integer(kind=4) :: value - integer(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - integer(kind=8) :: value - integer(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - integer :: value - integer :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - integer(kind=1) :: value - integer(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - integer(kind=2) :: value - integer(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - integer(kind=4) :: value - integer(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - integer(kind=8) :: value - integer(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_integer - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_integer, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_logical.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_logical.py deleted file mode 100644 index 96f215a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_logical.py +++ /dev/null @@ -1,189 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnLogical(util.F2PyTest): - - def check_function(self, t): - assert_(t(True) == 1, repr(t(True))) - assert_(t(False) == 0, repr(t(False))) - assert_(t(0) == 0) - assert_(t(None) == 0) - assert_(t(0.0) == 0) - assert_(t(0j) == 0) - assert_(t(1j) == 1) - assert_(t(234) == 1) - assert_(t(234.6) == 1) - assert_(t(long(234)) == 1) - assert_(t(234.6 + 3j) == 1) - assert_(t('234') == 1) - assert_(t('aaa') == 1) - assert_(t('') == 0) - assert_(t([]) == 0) - assert_(t(()) == 0) - assert_(t({}) == 0) - assert_(t(t) == 1) - assert_(t(-234) == 1) - assert_(t(10 ** 100) == 1) - assert_(t([234]) == 1) - assert_(t((234,)) == 1) - assert_(t(array(234)) == 1) - assert_(t(array([234])) == 1) - assert_(t(array([[234]])) == 1) - assert_(t(array([234], 'b')) == 1) - assert_(t(array([234], 'h')) == 1) - assert_(t(array([234], 'i')) == 1) - assert_(t(array([234], 'l')) == 1) - assert_(t(array([234], 'f')) == 1) - assert_(t(array([234], 'd')) == 1) - assert_(t(array([234 + 3j], 'F')) == 1) - assert_(t(array([234], 'D')) == 1) - assert_(t(array(0)) == 0) - assert_(t(array([0])) == 0) - assert_(t(array([[0]])) == 0) - assert_(t(array([0j])) == 0) - assert_(t(array([1])) == 1) - assert_raises(ValueError, t, array([0, 0])) - - -class TestF77ReturnLogical(TestReturnLogical): - code = """ - function t0(value) - logical value - logical t0 - t0 = value - end - function t1(value) - logical*1 value - logical*1 t1 - t1 = value - end - function t2(value) - logical*2 value - logical*2 t2 - t2 = value - end - function t4(value) - logical*4 value - logical*4 t4 - t4 = value - end -c function t8(value) -c logical*8 value -c logical*8 t8 -c t8 = value -c end - - subroutine s0(t0,value) - logical value - logical t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - logical*1 value - logical*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - logical*2 value - logical*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - logical*4 value - logical*4 t4 -cf2py intent(out) t4 - t4 = value - end -c subroutine s8(t8,value) -c logical*8 value -c logical*8 t8 -cf2py intent(out) t8 -c t8 = value -c end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnLogical(TestReturnLogical): - suffix = ".f90" - code = """ -module f90_return_logical - contains - function t0(value) - logical :: value - logical :: t0 - t0 = value - end function t0 - function t1(value) - logical(kind=1) :: value - logical(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - logical(kind=2) :: value - logical(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - logical(kind=4) :: value - logical(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - logical(kind=8) :: value - logical(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - logical :: value - logical :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - logical(kind=1) :: value - logical(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - logical(kind=2) :: value - logical(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - logical(kind=4) :: value - logical(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - logical(kind=8) :: value - logical(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_logical - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_real.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_real.py deleted file mode 100644 index 315cfe4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_real.py +++ /dev/null @@ -1,210 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnReal(util.F2PyTest): - - def check_function(self, t): - if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: - err = 1e-5 - else: - err = 0.0 - assert_(abs(t(234) - 234.0) <= err) - assert_(abs(t(234.6) - 234.6) <= err) - assert_(abs(t(long(234)) - 234.0) <= err) - assert_(abs(t('234') - 234) <= err) - assert_(abs(t('234.6') - 234.6) <= err) - assert_(abs(t(-234) + 234) <= err) - assert_(abs(t([234]) - 234) <= err) - assert_(abs(t((234,)) - 234.) <= err) - assert_(abs(t(array(234)) - 234.) <= err) - assert_(abs(t(array([234])) - 234.) <= err) - assert_(abs(t(array([[234]])) - 234.) <= err) - assert_(abs(t(array([234], 'b')) + 22) <= err) - assert_(abs(t(array([234], 'h')) - 234.) <= err) - assert_(abs(t(array([234], 'i')) - 234.) <= err) - assert_(abs(t(array([234], 'l')) - 234.) <= err) - assert_(abs(t(array([234], 'B')) - 234.) <= err) - assert_(abs(t(array([234], 'f')) - 234.) <= err) - assert_(abs(t(array([234], 'd')) - 234.) <= err) - if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: - assert_(t(1e200) == t(1e300)) # inf - - #assert_raises(ValueError, t, array([234], 'S1')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - try: - r = t(10 ** 400) - assert_(repr(r) in ['inf', 'Infinity'], repr(r)) - except OverflowError: - pass - - - -@pytest.mark.skipif( - platform.system() == 'Darwin', - reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") -class TestCReturnReal(TestReturnReal): - suffix = ".pyf" - module_name = "c_ext_return_real" - code = """ -python module c_ext_return_real -usercode \'\'\' -float t4(float value) { return value; } -void s4(float *t4, float value) { *t4 = value; } -double t8(double value) { return value; } -void s8(double *t8, double value) { *t8 = value; } -\'\'\' -interface - function t4(value) - real*4 intent(c) :: t4,value - end - function t8(value) - real*8 intent(c) :: t8,value - end - subroutine s4(t4,value) - intent(c) s4 - real*4 intent(out) :: t4 - real*4 intent(c) :: value - end - subroutine s8(t8,value) - intent(c) s8 - real*8 intent(out) :: t8 - real*8 intent(c) :: value - end -end interface -end python module c_ext_return_real - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF77ReturnReal(TestReturnReal): - code = """ - function t0(value) - real value - real t0 - t0 = value - end - function t4(value) - real*4 value - real*4 t4 - t4 = value - end - function t8(value) - real*8 value - real*8 t8 - t8 = value - end - function td(value) - double precision value - double precision td - td = value - end - - subroutine s0(t0,value) - real value - real t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s4(t4,value) - real*4 value - real*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - real*8 value - real*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine sd(td,value) - double precision value - double precision td -cf2py intent(out) td - td = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnReal(TestReturnReal): - suffix = ".f90" - code = """ -module f90_return_real - contains - function t0(value) - real :: value - real :: t0 - t0 = value - end function t0 - function t4(value) - real(kind=4) :: value - real(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - real(kind=8) :: value - real(kind=8) :: t8 - t8 = value - end function t8 - function td(value) - double precision :: value - double precision :: td - td = value - end function td - - subroutine s0(t0,value) - real :: value - real :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s4(t4,value) - real(kind=4) :: value - real(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - real(kind=8) :: value - real(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine sd(td,value) - double precision :: value - double precision :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_real - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_real, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_semicolon_split.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_semicolon_split.py deleted file mode 100644 index bcd18c8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_semicolon_split.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -from . import util -from numpy.testing import assert_equal - -@pytest.mark.skipif( - platform.system() == 'Darwin', - reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") -class TestMultiline(util.F2PyTest): - suffix = ".pyf" - module_name = "multiline" - code = """ -python module {module} - usercode ''' -void foo(int* x) {{ - char dummy = ';'; - *x = 42; -}} -''' - interface - subroutine foo(x) - intent(c) foo - integer intent(out) :: x - end subroutine foo - end interface -end python module {module} - """.format(module=module_name) - - def test_multiline(self): - assert_equal(self.module.foo(), 42) - - -@pytest.mark.skipif( - platform.system() == 'Darwin', - reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") -class TestCallstatement(util.F2PyTest): - suffix = ".pyf" - module_name = "callstatement" - code = """ -python module {module} - usercode ''' -void foo(int* x) {{ -}} -''' - interface - subroutine foo(x) - intent(c) foo - integer intent(out) :: x - callprotoargument int* - callstatement {{ & - ; & - x = 42; & - }} - end subroutine foo - end interface -end python module {module} - """.format(module=module_name) - - def test_callstatement(self): - assert_equal(self.module.foo(), 42) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_size.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_size.py deleted file mode 100644 index e2af618..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_size.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -from numpy.testing import assert_equal -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestSizeSumExample(util.F2PyTest): - sources = [_path('src', 'size', 'foo.f90')] - - @pytest.mark.slow - def test_all(self): - r = self.module.foo([[]]) - assert_equal(r, [0], repr(r)) - - r = self.module.foo([[1, 2]]) - assert_equal(r, [3], repr(r)) - - r = self.module.foo([[1, 2], [3, 4]]) - assert_equal(r, [3, 7], repr(r)) - - r = self.module.foo([[1, 2], [3, 4], [5, 6]]) - assert_equal(r, [3, 7, 11], repr(r)) - - @pytest.mark.slow - def test_transpose(self): - r = self.module.trans([[]]) - assert_equal(r.T, [[]], repr(r)) - - r = self.module.trans([[1, 2]]) - assert_equal(r, [[1], [2]], repr(r)) - - r = self.module.trans([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) - - @pytest.mark.slow - def test_flatten(self): - r = self.module.flatten([[]]) - assert_equal(r, [], repr(r)) - - r = self.module.flatten([[1, 2]]) - assert_equal(r, [1, 2], repr(r)) - - r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_string.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_string.py deleted file mode 100644 index 0493c99..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_string.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -from numpy.testing import assert_array_equal -import numpy as np -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestString(util.F2PyTest): - sources = [_path('src', 'string', 'char.f90')] - - @pytest.mark.slow - def test_char(self): - strings = np.array(['ab', 'cd', 'ef'], dtype='c').T - inp, out = self.module.char_test.change_strings(strings, strings.shape[1]) - assert_array_equal(inp, strings) - expected = strings.copy() - expected[1, :] = 'AAA' - assert_array_equal(out, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/util.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/util.py deleted file mode 100644 index bf005df..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/util.py +++ /dev/null @@ -1,367 +0,0 @@ -""" -Utility functions for - -- building and importing modules on test time, using a temporary location -- detecting if compilers are present - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import subprocess -import tempfile -import shutil -import atexit -import textwrap -import re -import pytest - -from numpy.compat import asbytes, asstr -from numpy.testing import temppath -from importlib import import_module - -try: - from hashlib import md5 -except ImportError: - from md5 import new as md5 # noqa: F401 - -# -# Maintaining a temporary module directory -# - -_module_dir = None -_module_num = 5403 - - -def _cleanup(): - global _module_dir - if _module_dir is not None: - try: - sys.path.remove(_module_dir) - except ValueError: - pass - try: - shutil.rmtree(_module_dir) - except (IOError, OSError): - pass - _module_dir = None - - -def get_module_dir(): - global _module_dir - if _module_dir is None: - _module_dir = tempfile.mkdtemp() - atexit.register(_cleanup) - if _module_dir not in sys.path: - sys.path.insert(0, _module_dir) - return _module_dir - - -def get_temp_module_name(): - # Assume single-threaded, and the module dir usable only by this thread - global _module_num - d = get_module_dir() - name = "_test_ext_module_%d" % _module_num - _module_num += 1 - if name in sys.modules: - # this should not be possible, but check anyway - raise RuntimeError("Temporary module name already in use.") - return name - - -def _memoize(func): - memo = {} - - def wrapper(*a, **kw): - key = repr((a, kw)) - if key not in memo: - try: - memo[key] = func(*a, **kw) - except Exception as e: - memo[key] = e - raise - ret = memo[key] - if isinstance(ret, Exception): - raise ret - return ret - wrapper.__name__ = func.__name__ - return wrapper - -# -# Building modules -# - - -@_memoize -def build_module(source_files, options=[], skip=[], only=[], module_name=None): - """ - Compile and import a f2py module, built from the given files. - - """ - - code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " - "f2py2e.main()" % repr(sys.path)) - - d = get_module_dir() - - # Copy files - dst_sources = [] - f2py_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - base, ext = os.path.splitext(dst) - if ext in ('.f90', '.f', '.c', '.pyf'): - f2py_sources.append(dst) - - # Prepare options - if module_name is None: - module_name = get_temp_module_name() - f2py_opts = ['-c', '-m', module_name] + options + f2py_sources - if skip: - f2py_opts += ['skip:'] + skip - if only: - f2py_opts += ['only:'] + only - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, '-c', code] + f2py_opts - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running f2py failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - return import_module(module_name) - - -@_memoize -def build_code(source_code, options=[], skip=[], only=[], suffix=None, - module_name=None): - """ - Compile and import Fortran code using f2py. - - """ - if suffix is None: - suffix = '.f' - with temppath(suffix=suffix) as path: - with open(path, 'w') as f: - f.write(source_code) - return build_module([path], options=options, skip=skip, only=only, - module_name=module_name) - -# -# Check if compilers are available at all... -# - -_compiler_status = None - - -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = textwrap.dedent("""\ - import os - import sys - sys.path = %(syspath)s - - def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - - from numpy.distutils.core import setup - setup(configuration=configuration) - - config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {}') - print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) - sys.exit(99) - """) - code = code % dict(syspath=repr(sys.path)) - - tmpdir = tempfile.mkdtemp() - try: - script = os.path.join(tmpdir, 'setup.py') - - with open(script, 'w') as f: - f.write(code) - - cmd = [sys.executable, 'setup.py', 'config'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - cwd=tmpdir) - out, err = p.communicate() - finally: - shutil.rmtree(tmpdir) - - m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out) - if m: - _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), - bool(int(m.group(3)))) - # Finished - return _compiler_status - - -def has_c_compiler(): - return _get_compiler_status()[0] - - -def has_f77_compiler(): - return _get_compiler_status()[1] - - -def has_f90_compiler(): - return _get_compiler_status()[2] - -# -# Building with distutils -# - - -@_memoize -def build_module_distutils(source_files, config_code, module_name, **kw): - """ - Build a module via distutils and import it. - - """ - from numpy.distutils.misc_util import Configuration - from numpy.distutils.core import setup - - d = get_module_dir() - - # Copy files - dst_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - # Build script - config_code = textwrap.dedent(config_code).replace("\n", "\n ") - - code = textwrap.dedent("""\ - import os - import sys - sys.path = %(syspath)s - - def configuration(parent_name='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - %(config_code)s - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) - """) % dict(config_code=config_code, syspath=repr(sys.path)) - - script = os.path.join(d, get_temp_module_name() + '.py') - dst_sources.append(script) - f = open(script, 'wb') - f.write(asbytes(code)) - f.close() - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, script, 'build_ext', '-i'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running distutils build failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - __import__(module_name) - return sys.modules[module_name] - -# -# Unittest convenience -# - - -class F2PyTest(object): - code = None - sources = None - options = [] - skip = [] - only = [] - suffix = '.f' - module = None - module_name = None - - def setup(self): - if sys.platform == 'win32': - pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') - - if self.module is not None: - return - - # Check compiler availability first - if not has_c_compiler(): - pytest.skip("No C compiler available") - - codes = [] - if self.sources: - codes.extend(self.sources) - if self.code is not None: - codes.append(self.suffix) - - needs_f77 = False - needs_f90 = False - for fn in codes: - if fn.endswith('.f'): - needs_f77 = True - elif fn.endswith('.f90'): - needs_f90 = True - if needs_f77 and not has_f77_compiler(): - pytest.skip("No Fortran 77 compiler available") - if needs_f90 and not has_f90_compiler(): - pytest.skip("No Fortran 90 compiler available") - - # Build the module - if self.code is not None: - self.module = build_code(self.code, options=self.options, - skip=self.skip, only=self.only, - suffix=self.suffix, - module_name=self.module_name) - - if self.sources is not None: - self.module = build_module(self.sources, options=self.options, - skip=self.skip, only=self.only, - module_name=self.module_name) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/use_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/use_rules.py deleted file mode 100644 index 6f44f16..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/use_rules.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python -""" - -Build 'use others module data' mechanism for f2py2e. - -Unfinished. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2000/09/10 12:35:43 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.3 $"[10:-1] - -f2py_version = 'See `f2py -v`' - - -from .auxfuncs import ( - applyrules, dictappend, gentitle, hasnote, outmess -) - - -usemodule_rules = { - 'body': """ -#begintitle# -static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ -\t #name# = get_#name#()\\n\\ -Arguments:\\n\\ -#docstr#\"; -extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); -static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { -/*#decl#*/ -\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; -printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); -\treturn Py_BuildValue(\"\"); -capi_fail: -\treturn NULL; -} -""", - 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', - 'need': ['F_MODFUNC'] -} - -################ - - -def buildusevars(m, r): - ret = {} - outmess( - '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) - varsmap = {} - revmap = {} - if 'map' in r: - for k in r['map'].keys(): - if r['map'][k] in revmap: - outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( - r['map'][k], k, revmap[r['map'][k]])) - else: - revmap[r['map'][k]] = k - if 'only' in r and r['only']: - for v in r['map'].keys(): - if r['map'][v] in m['vars']: - - if revmap[r['map'][v]] == v: - varsmap[v] = r['map'][v] - else: - outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % - (v, r['map'][v])) - else: - outmess( - '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) - else: - for v in m['vars'].keys(): - if v in revmap: - varsmap[v] = revmap[v] - else: - varsmap[v] = v - for v in varsmap.keys(): - ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) - return ret - - -def buildusevar(name, realname, vars, usemodulename): - outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( - name, realname)) - ret = {} - vrd = {'name': name, - 'realname': realname, - 'REALNAME': realname.upper(), - 'usemodulename': usemodulename, - 'USEMODULENAME': usemodulename.upper(), - 'texname': name.replace('_', '\\_'), - 'begintitle': gentitle('%s=>%s' % (name, realname)), - 'endtitle': gentitle('end of %s=>%s' % (name, realname)), - 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) - } - nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', - 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} - vrd['texnamename'] = name - for i in nummap.keys(): - vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) - if hasnote(vars[realname]): - vrd['note'] = vars[realname]['note'] - rd = dictappend({}, vrd) - - print(name, realname, vars[realname]) - ret = applyrules(usemodule_rules, rd) - return ret diff --git a/venv/lib/python3.7/site-packages/numpy/fft/__init__.py b/venv/lib/python3.7/site-packages/numpy/fft/__init__.py deleted file mode 100644 index 37b3f0d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/__init__.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= - -.. currentmodule:: numpy.fft - -Standard FFTs -------------- - -.. autosummary:: - :toctree: generated/ - - fft Discrete Fourier transform. - ifft Inverse discrete Fourier transform. - fft2 Discrete Fourier transform in two dimensions. - ifft2 Inverse discrete Fourier transform in two dimensions. - fftn Discrete Fourier transform in N-dimensions. - ifftn Inverse discrete Fourier transform in N dimensions. - -Real FFTs ---------- - -.. autosummary:: - :toctree: generated/ - - rfft Real discrete Fourier transform. - irfft Inverse real discrete Fourier transform. - rfft2 Real discrete Fourier transform in two dimensions. - irfft2 Inverse real discrete Fourier transform in two dimensions. - rfftn Real discrete Fourier transform in N dimensions. - irfftn Inverse real discrete Fourier transform in N dimensions. - -Hermitian FFTs --------------- - -.. autosummary:: - :toctree: generated/ - - hfft Hermitian discrete Fourier transform. - ihfft Inverse Hermitian discrete Fourier transform. - -Helper routines ---------------- - -.. autosummary:: - :toctree: generated/ - - fftfreq Discrete Fourier Transform sample frequencies. - rfftfreq DFT sample frequencies (for usage with rfft, irfft). - fftshift Shift zero-frequency component to center of spectrum. - ifftshift Inverse of fftshift. - - -Background information ----------------------- - -Fourier analysis is fundamentally a method for expressing a function as a -sum of periodic components, and for recovering the function from those -components. When both the function and its Fourier transform are -replaced with discretized counterparts, it is called the discrete Fourier -transform (DFT). The DFT has become a mainstay of numerical computing in -part because of a very fast algorithm for computing it, called the Fast -Fourier Transform (FFT), which was known to Gauss (1805) and was brought -to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ -provide an accessible introduction to Fourier analysis and its -applications. - -Because the discrete Fourier transform separates its input into -components that contribute at discrete frequencies, it has a great number -of applications in digital signal processing, e.g., for filtering, and in -this context the discretized input to the transform is customarily -referred to as a *signal*, which exists in the *time domain*. The output -is called a *spectrum* or *transform* and exists in the *frequency -domain*. - -Implementation details ----------------------- - -There are many ways to define the DFT, varying in the sign of the -exponent, normalization, etc. In this implementation, the DFT is defined -as - -.. math:: - A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} - \\qquad k = 0,\\ldots,n-1. - -The DFT is in general defined for complex inputs and outputs, and a -single-frequency component at linear frequency :math:`f` is -represented by a complex exponential -:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` -is the sampling interval. - -The values in the result follow so-called "standard" order: If ``A = -fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of -the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` -contains the positive-frequency terms, and ``A[n/2+1:]`` contains the -negative-frequency terms, in order of decreasingly negative frequency. -For an even number of input points, ``A[n/2]`` represents both positive and -negative Nyquist frequency, and is also purely real for real input. For -an odd number of input points, ``A[(n-1)/2]`` contains the largest positive -frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. -The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies -of corresponding elements in the output. The routine -``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the -zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes -that shift. - -When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` -is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. -The phase spectrum is obtained by ``np.angle(A)``. - -The inverse DFT is defined as - -.. math:: - a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} - \\qquad m = 0,\\ldots,n-1. - -It differs from the forward transform by the sign of the exponential -argument and the default normalization by :math:`1/n`. - -Type Promotion --------------- - -`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and -``complex128`` arrays respectively. For an FFT implementation that does not -promote input arrays, see `scipy.fftpack`. - -Normalization -------------- - -The default normalization has the direct transforms unscaled and the inverse -transforms are scaled by :math:`1/n`. It is possible to obtain unitary -transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is -`None`) so that both direct and inverse transforms will be scaled by -:math:`1/\\sqrt{n}`. - -Real and Hermitian transforms ------------------------------ - -When the input is purely real, its transform is Hermitian, i.e., the -component at frequency :math:`f_k` is the complex conjugate of the -component at frequency :math:`-f_k`, which means that for real -inputs there is no information in the negative frequency components that -is not already available from the positive frequency components. -The family of `rfft` functions is -designed to operate on real inputs, and exploits this symmetry by -computing only the positive frequency components, up to and including the -Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex -output points. The inverses of this family assumes the same symmetry of -its input, and for an output of ``n`` points uses ``n/2+1`` input points. - -Correspondingly, when the spectrum is purely real, the signal is -Hermitian. The `hfft` family of functions exploits this symmetry by -using ``n/2+1`` complex points in the input (time) domain for ``n`` real -points in the frequency domain. - -In higher dimensions, FFTs are used, e.g., for image analysis and -filtering. The computational efficiency of the FFT means that it can -also be a faster way to compute large convolutions, using the property -that a convolution in the time domain is equivalent to a point-by-point -multiplication in the frequency domain. - -Higher dimensions ------------------ - -In two dimensions, the DFT is defined as - -.. math:: - A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} - a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} - \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, - -which extends in the obvious way to higher dimensions, and the inverses -in higher dimensions also extend in the same way. - -References ----------- - -.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - -.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., - 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. - 12-13. Cambridge Univ. Press, Cambridge, UK. - -Examples --------- - -For examples, see the various functions. - -""" - -from __future__ import division, absolute_import, print_function - -from ._pocketfft import * -from .helper import * - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft.py b/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft.py deleted file mode 100644 index 50720cd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft.py +++ /dev/null @@ -1,1307 +0,0 @@ -""" -Discrete Fourier Transforms - -Routines in this module: - -fft(a, n=None, axis=-1) -ifft(a, n=None, axis=-1) -rfft(a, n=None, axis=-1) -irfft(a, n=None, axis=-1) -hfft(a, n=None, axis=-1) -ihfft(a, n=None, axis=-1) -fftn(a, s=None, axes=None) -ifftn(a, s=None, axes=None) -rfftn(a, s=None, axes=None) -irfftn(a, s=None, axes=None) -fft2(a, s=None, axes=(-2,-1)) -ifft2(a, s=None, axes=(-2, -1)) -rfft2(a, s=None, axes=(-2,-1)) -irfft2(a, s=None, axes=(-2, -1)) - -i = inverse transform -r = transform of purely real data -h = Hermite transform -n = n-dimensional transform -2 = 2-dimensional transform -(Note: 2D routines are just nD routines with different default -behavior.) - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', - 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] - -import functools - -from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt -from . import _pocketfft_internal as pfi -from numpy.core.multiarray import normalize_axis_index -from numpy.core import overrides - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy.fft') - - -# `inv_norm` is a float by which the result of the transform needs to be -# divided. This replaces the original, more intuitive 'fct` parameter to avoid -# divisions by zero (or alternatively additional checks) in the case of -# zero-length axes during its computation. -def _raw_fft(a, n, axis, is_real, is_forward, inv_norm): - axis = normalize_axis_index(axis, a.ndim) - if n is None: - n = a.shape[axis] - - if n < 1: - raise ValueError("Invalid number of FFT data points (%d) specified." - % n) - - fct = 1/inv_norm - - if a.shape[axis] != n: - s = list(a.shape) - if s[axis] > n: - index = [slice(None)]*len(s) - index[axis] = slice(0, n) - a = a[tuple(index)] - else: - index = [slice(None)]*len(s) - index[axis] = slice(0, s[axis]) - s[axis] = n - z = zeros(s, a.dtype.char) - z[tuple(index)] = a - a = z - - if axis == a.ndim-1: - r = pfi.execute(a, is_real, is_forward, fct) - else: - a = swapaxes(a, axis, -1) - r = pfi.execute(a, is_real, is_forward, fct) - r = swapaxes(r, axis, -1) - return r - - -def _unitary(norm): - if norm is None: - return False - if norm=="ortho": - return True - raise ValueError("Invalid norm value %s, should be None or \"ortho\"." - % norm) - - -def _fft_dispatcher(a, n=None, axis=None, norm=None): - return (a,) - - -@array_function_dispatch(_fft_dispatcher) -def fft(a, n=None, axis=-1, norm=None): - """ - Compute the one-dimensional discrete Fourier Transform. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - if `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : for definition of the DFT and conventions used. - ifft : The inverse of `fft`. - fft2 : The two-dimensional FFT. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - fftfreq : Frequency bins for given FFT parameters. - - Notes - ----- - FFT (Fast Fourier Transform) refers to a way the discrete Fourier - Transform (DFT) can be calculated efficiently, by using symmetries in the - calculated terms. The symmetry is highest when `n` is a power of 2, and - the transform is therefore most efficient for these sizes. - - The DFT is defined, with the conventions used in this implementation, in - the documentation for the `numpy.fft` module. - - References - ---------- - .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - - Examples - -------- - >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) - array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, - 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, - -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, - 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) - - In this example, real input has an FFT which is Hermitian, i.e., symmetric - in the real part and anti-symmetric in the imaginary part, as described in - the `numpy.fft` documentation: - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(256) - >>> sp = np.fft.fft(np.sin(t)) - >>> freq = np.fft.fftfreq(t.shape[-1]) - >>> plt.plot(freq, sp.real, freq, sp.imag) - [, ] - >>> plt.show() - - """ - - a = asarray(a) - if n is None: - n = a.shape[axis] - inv_norm = 1 - if norm is not None and _unitary(norm): - inv_norm = sqrt(n) - output = _raw_fft(a, n, axis, False, True, inv_norm) - return output - - -@array_function_dispatch(_fft_dispatcher) -def ifft(a, n=None, axis=-1, norm=None): - """ - Compute the one-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier transform computed by `fft`. In other words, - ``ifft(fft(a)) == a`` to within numerical accuracy. - For a general description of the algorithm and definitions, - see `numpy.fft`. - - The input should be ordered in the same way as is returned by `fft`, - i.e., - - * ``a[0]`` should contain the zero frequency term, - * ``a[1:n//2]`` should contain the positive-frequency terms, - * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in - increasing order starting from the most negative frequency. - - For an even number of input points, ``A[n//2]`` represents the sum of - the values at the positive and negative Nyquist frequencies, as the two - are aliased together. See `numpy.fft` for details. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - See notes about padding issues. - axis : int, optional - Axis over which to compute the inverse DFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - If `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : An introduction, with definitions and general explanations. - fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse - ifft2 : The two-dimensional inverse FFT. - ifftn : The n-dimensional inverse FFT. - - Notes - ----- - If the input parameter `n` is larger than the size of the input, the input - is padded by appending zeros at the end. Even though this is the common - approach, it might lead to surprising results. If a different padding is - desired, it must be performed before calling `ifft`. - - Examples - -------- - >>> np.fft.ifft([0, 4, 0, 0]) - array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary - - Create and plot a band-limited signal with random phases: - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) - >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) - >>> s = np.fft.ifft(n) - >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') - [, ] - >>> plt.legend(('real', 'imaginary')) - - >>> plt.show() - - """ - a = asarray(a) - if n is None: - n = a.shape[axis] - if norm is not None and _unitary(norm): - inv_norm = sqrt(max(n, 1)) - else: - inv_norm = n - output = _raw_fft(a, n, axis, False, False, inv_norm) - return output - - - -@array_function_dispatch(_fft_dispatcher) -def rfft(a, n=None, axis=-1, norm=None): - """ - Compute the one-dimensional discrete Fourier Transform for real input. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) of a real-valued array by means of an efficient algorithm - called the Fast Fourier Transform (FFT). - - Parameters - ---------- - a : array_like - Input array - n : int, optional - Number of points along transformation axis in the input to use. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - If `n` is even, the length of the transformed axis is ``(n/2)+1``. - If `n` is odd, the length is ``(n+1)/2``. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - irfft : The inverse of `rfft`. - fft : The one-dimensional FFT of general (complex) input. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - - Notes - ----- - When the DFT is computed for purely real input, the output is - Hermitian-symmetric, i.e. the negative frequency terms are just the complex - conjugates of the corresponding positive-frequency terms, and the - negative-frequency terms are therefore redundant. This function does not - compute the negative frequency terms, and the length of the transformed - axis of the output is therefore ``n//2 + 1``. - - When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains - the zero-frequency term 0*fs, which is real due to Hermitian symmetry. - - If `n` is even, ``A[-1]`` contains the term representing both positive - and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely - real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains - the largest positive frequency (fs/2*(n-1)/n), and is complex in the - general case. - - If the input `a` contains an imaginary part, it is silently discarded. - - Examples - -------- - >>> np.fft.fft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary - >>> np.fft.rfft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary - - Notice how the final element of the `fft` output is the complex conjugate - of the second element, for real input. For `rfft`, this symmetry is - exploited to compute only the non-negative frequency terms. - - """ - a = asarray(a) - inv_norm = 1 - if norm is not None and _unitary(norm): - if n is None: - n = a.shape[axis] - inv_norm = sqrt(n) - output = _raw_fft(a, n, axis, True, True, inv_norm) - return output - - -@array_function_dispatch(_fft_dispatcher) -def irfft(a, n=None, axis=-1, norm=None): - """ - Compute the inverse of the n-point DFT for real input. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier Transform of real input computed by `rfft`. - In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical - accuracy. (See Notes below for why ``len(a)`` is necessary here.) - - The input is expected to be in the form returned by `rfft`, i.e. the - real zero-frequency term followed by the complex positive frequency terms - in order of increasing frequency. Since the discrete Fourier Transform of - real input is Hermitian-symmetric, the negative frequency terms are taken - to be the complex conjugates of the corresponding positive frequency terms. - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - Length of the transformed axis of the output. - For `n` output points, ``n//2+1`` input points are necessary. If the - input is longer than this, it is cropped. If it is shorter than this, - it is padded with zeros. If `n` is not given, it is taken to be - ``2*(m-1)`` where ``m`` is the length of the input along the axis - specified by `axis`. - axis : int, optional - Axis over which to compute the inverse FFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is `n`, or, if `n` is not given, - ``2*(m-1)`` where ``m`` is the length of the transformed axis of the - input. To get an odd number of output points, `n` must be specified. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. - fft : The one-dimensional FFT. - irfft2 : The inverse of the two-dimensional FFT of real input. - irfftn : The inverse of the *n*-dimensional FFT of real input. - - Notes - ----- - Returns the real valued `n`-point inverse discrete Fourier transform - of `a`, where `a` contains the non-negative frequency terms of a - Hermitian-symmetric sequence. `n` is the length of the result, not the - input. - - If you specify an `n` such that `a` must be zero-padded or truncated, the - extra/removed values will be added/removed at high frequencies. One can - thus resample a series to `m` points via Fourier interpolation by: - ``a_resamp = irfft(rfft(a), m)``. - - The correct interpretation of the hermitian input depends on the length of - the original data, as given by `n`. This is because each input shape could - correspond to either an odd or even length signal. By default, `irfft` - assumes an even output length which puts the last entry at the Nyquist - frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, - the value is thus treated as purely real. To avoid losing information, the - correct length of the real input **must** be given. - - Examples - -------- - >>> np.fft.ifft([1, -1j, -1, 1j]) - array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary - >>> np.fft.irfft([1, -1j, -1]) - array([0., 1., 0., 0.]) - - Notice how the last term in the input to the ordinary `ifft` is the - complex conjugate of the second term, and the output has zero imaginary - part everywhere. When calling `irfft`, the negative frequencies are not - specified, and the output array is purely real. - - """ - a = asarray(a) - if n is None: - n = (a.shape[axis] - 1) * 2 - inv_norm = n - if norm is not None and _unitary(norm): - inv_norm = sqrt(n) - output = _raw_fft(a, n, axis, True, False, inv_norm) - return output - - -@array_function_dispatch(_fft_dispatcher) -def hfft(a, n=None, axis=-1, norm=None): - """ - Compute the FFT of a signal that has Hermitian symmetry, i.e., a real - spectrum. - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - Length of the transformed axis of the output. For `n` output - points, ``n//2 + 1`` input points are necessary. If the input is - longer than this, it is cropped. If it is shorter than this, it is - padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` - where ``m`` is the length of the input along the axis specified by - `axis`. - axis : int, optional - Axis over which to compute the FFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - Normalization mode (see `numpy.fft`). Default is None. - - .. versionadded:: 1.10.0 - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is `n`, or, if `n` is not given, - ``2*m - 2`` where ``m`` is the length of the transformed axis of - the input. To get an odd number of output points, `n` must be - specified, for instance as ``2*m - 1`` in the typical case, - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See also - -------- - rfft : Compute the one-dimensional FFT for real input. - ihfft : The inverse of `hfft`. - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal has Hermitian symmetry in the time - domain and is real in the frequency domain. So here it's `hfft` for - which you must supply the length of the result if it is to be odd. - - * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, - * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. - - The correct interpretation of the hermitian input depends on the length of - the original data, as given by `n`. This is because each input shape could - correspond to either an odd or even length signal. By default, `hfft` - assumes an even output length which puts the last entry at the Nyquist - frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, - the value is thus treated as purely real. To avoid losing information, the - shape of the full signal **must** be given. - - Examples - -------- - >>> signal = np.array([1, 2, 3, 4, 3, 2]) - >>> np.fft.fft(signal) - array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary - >>> np.fft.hfft(signal[:4]) # Input first half of signal - array([15., -4., 0., -1., 0., -4.]) - >>> np.fft.hfft(signal, 6) # Input entire signal and truncate - array([15., -4., 0., -1., 0., -4.]) - - - >>> signal = np.array([[1, 1.j], [-1.j, 2]]) - >>> np.conj(signal.T) - signal # check Hermitian symmetry - array([[ 0.-0.j, -0.+0.j], # may vary - [ 0.+0.j, 0.-0.j]]) - >>> freq_spectrum = np.fft.hfft(signal) - >>> freq_spectrum - array([[ 1., 1.], - [ 2., -2.]]) - - """ - a = asarray(a) - if n is None: - n = (a.shape[axis] - 1) * 2 - unitary = _unitary(norm) - return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n) - - -@array_function_dispatch(_fft_dispatcher) -def ihfft(a, n=None, axis=-1, norm=None): - """ - Compute the inverse FFT of a signal that has Hermitian symmetry. - - Parameters - ---------- - a : array_like - Input array. - n : int, optional - Length of the inverse FFT, the number of points along - transformation axis in the input to use. If `n` is smaller than - the length of the input, the input is cropped. If it is larger, - the input is padded with zeros. If `n` is not given, the length of - the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the inverse FFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - Normalization mode (see `numpy.fft`). Default is None. - - .. versionadded:: 1.10.0 - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is ``n//2 + 1``. - - See also - -------- - hfft, irfft - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal has Hermitian symmetry in the time - domain and is real in the frequency domain. So here it's `hfft` for - which you must supply the length of the result if it is to be odd: - - * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, - * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. - - Examples - -------- - >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) - >>> np.fft.ifft(spectrum) - array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary - >>> np.fft.ihfft(spectrum) - array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary - - """ - a = asarray(a) - if n is None: - n = a.shape[axis] - unitary = _unitary(norm) - output = conjugate(rfft(a, n, axis)) - return output * (1 / (sqrt(n) if unitary else n)) - - -def _cook_nd_args(a, s=None, axes=None, invreal=0): - if s is None: - shapeless = 1 - if axes is None: - s = list(a.shape) - else: - s = take(a.shape, axes) - else: - shapeless = 0 - s = list(s) - if axes is None: - axes = list(range(-len(s), 0)) - if len(s) != len(axes): - raise ValueError("Shape and axes have different lengths.") - if invreal and shapeless: - s[-1] = (a.shape[axes[-1]] - 1) * 2 - return s, axes - - -def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None): - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes) - itl = list(range(len(axes))) - itl.reverse() - for ii in itl: - a = function(a, n=s[ii], axis=axes[ii], norm=norm) - return a - - -def _fftn_dispatcher(a, s=None, axes=None, norm=None): - return (a,) - - -@array_function_dispatch(_fftn_dispatcher) -def fftn(a, s=None, axes=None, norm=None): - """ - Compute the N-dimensional discrete Fourier Transform. - - This function computes the *N*-dimensional discrete Fourier Transform over - any number of axes in an *M*-dimensional array by means of the Fast Fourier - Transform (FFT). - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``fft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the transform over that axis is - performed multiple times. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. - fft : The one-dimensional FFT, with definitions and conventions used. - rfftn : The *n*-dimensional FFT of real input. - fft2 : The two-dimensional FFT. - fftshift : Shifts zero-frequency terms to centre of array - - Notes - ----- - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of all axes, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - See `numpy.fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.mgrid[:3, :3, :3][0] - >>> np.fft.fftn(a, axes=(1, 2)) - array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 9.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[18.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) - array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[-2.+0.j, -2.+0.j, -2.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - - >>> import matplotlib.pyplot as plt - >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, - ... 2 * np.pi * np.arange(200) / 34) - >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) - >>> FS = np.fft.fftn(S) - >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) - - >>> plt.show() - - """ - - return _raw_fftnd(a, s, axes, fft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def ifftn(a, s=None, axes=None, norm=None): - """ - Compute the N-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform over any number of axes in an M-dimensional array by - means of the Fast Fourier Transform (FFT). In other words, - ``ifftn(fftn(a)) == a`` to within numerical accuracy. - For a description of the definitions and conventions used, see `numpy.fft`. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fftn`, i.e. it should have the term for zero frequency - in all axes in the low-order corner, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``ifft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the IFFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. - ifft : The one-dimensional inverse FFT. - ifft2 : The two-dimensional inverse FFT. - ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning - of array. - - Notes - ----- - See `numpy.fft` for definitions and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifftn` is called. - - Examples - -------- - >>> a = np.eye(4) - >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) - array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary - [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) - - - Create and plot an image with band-limited frequency content: - - >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) - >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) - >>> im = np.fft.ifftn(n).real - >>> plt.imshow(im) - - >>> plt.show() - - """ - - return _raw_fftnd(a, s, axes, ifft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def fft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional discrete Fourier Transform - - This function computes the *n*-dimensional discrete Fourier Transform - over any axes in an *M*-dimensional array by means of the - Fast Fourier Transform (FFT). By default, the transform is computed over - the last two axes of the input array, i.e., a 2-dimensional FFT. - - Parameters - ---------- - a : array_like - Input array, can be complex - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``fft(x, n)``. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifft2 : The inverse two-dimensional FFT. - fft : The one-dimensional FFT. - fftn : The *n*-dimensional FFT. - fftshift : Shifts zero-frequency terms to the center of the array. - For two-dimensional input, swaps first and third quadrants, and second - and fourth quadrants. - - Notes - ----- - `fft2` is just `fftn` with a different default for `axes`. - - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of the transformed axes, the positive frequency terms - in the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - the axes, in order of decreasingly negative frequency. - - See `fftn` for details and a plotting example, and `numpy.fft` for - definitions and conventions used. - - - Examples - -------- - >>> a = np.mgrid[:5, :5][0] - >>> np.fft.fft2(a) - array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary - 0. +0.j , 0. +0.j ], - [-12.5+17.20477401j, 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ], - [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ], - [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ], - [-12.5-17.20477401j, 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ]]) - - """ - - return _raw_fftnd(a, s, axes, fft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def ifft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the 2-dimensional discrete Fourier - Transform over any number of axes in an M-dimensional array by means of - the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` - to within numerical accuracy. By default, the inverse transform is - computed over the last two axes of the input array. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fft2`, i.e. it should have the term for zero frequency - in the low-order corner of the two axes, the positive frequency terms in - the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - both axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each axis) of the output (``s[0]`` refers to axis 0, - ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. - ifftn : The inverse of the *n*-dimensional FFT. - fft : The one-dimensional FFT. - ifft : The one-dimensional inverse FFT. - - Notes - ----- - `ifft2` is just `ifftn` with a different default for `axes`. - - See `ifftn` for details and a plotting example, and `numpy.fft` for - definition and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifft2` is called. - - Examples - -------- - >>> a = 4 * np.eye(4) - >>> np.fft.ifft2(a) - array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary - [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], - [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) - - """ - - return _raw_fftnd(a, s, axes, ifft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def rfftn(a, s=None, axes=None, norm=None): - """ - Compute the N-dimensional discrete Fourier Transform for real input. - - This function computes the N-dimensional discrete Fourier Transform over - any number of axes in an M-dimensional real array by means of the Fast - Fourier Transform (FFT). By default, all axes are transformed, with the - real transform performed over the last axis, while the remaining - transforms are complex. - - Parameters - ---------- - a : array_like - Input array, taken to be real. - s : sequence of ints, optional - Shape (length along each transformed axis) to use from the input. - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - The final element of `s` corresponds to `n` for ``rfft(x, n)``, while - for the remaining axes, it corresponds to `n` for ``fft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - The length of the last axis transformed will be ``s[-1]//2+1``, - while the remaining transformed axes will have lengths according to - `s`, or unchanged from the input. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT - of real input. - fft : The one-dimensional FFT, with definitions and conventions used. - rfft : The one-dimensional FFT of real input. - fftn : The n-dimensional FFT. - rfft2 : The two-dimensional FFT of real input. - - Notes - ----- - The transform for real input is performed over the last transformation - axis, as by `rfft`, then the transform over the remaining axes is - performed as by `fftn`. The order of the output is as for `rfft` for the - final transformation axis, and as for `fftn` for the remaining - transformation axes. - - See `fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.ones((2, 2, 2)) - >>> np.fft.rfftn(a) - array([[[8.+0.j, 0.+0.j], # may vary - [0.+0.j, 0.+0.j]], - [[0.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j]]]) - - >>> np.fft.rfftn(a, axes=(2, 0)) - array([[[4.+0.j, 0.+0.j], # may vary - [4.+0.j, 0.+0.j]], - [[0.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j]]]) - - """ - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes) - a = rfft(a, s[-1], axes[-1], norm) - for ii in range(len(axes)-1): - a = fft(a, s[ii], axes[ii], norm) - return a - - -@array_function_dispatch(_fftn_dispatcher) -def rfft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional FFT of a real array. - - Parameters - ---------- - a : array - Input array, taken to be real. - s : sequence of ints, optional - Shape of the FFT. - axes : sequence of ints, optional - Axes over which to compute the FFT. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The result of the real 2-D FFT. - - See Also - -------- - rfftn : Compute the N-dimensional discrete Fourier Transform for real - input. - - Notes - ----- - This is really just `rfftn` with different default behavior. - For more details see `rfftn`. - - """ - - return rfftn(a, s, axes, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def irfftn(a, s=None, axes=None, norm=None): - """ - Compute the inverse of the N-dimensional FFT of real input. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform for real input over any number of axes in an - M-dimensional array by means of the Fast Fourier Transform (FFT). In - other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical - accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, - and for the same reason.) - - The input should be ordered in the same way as is returned by `rfftn`, - i.e. as for `irfft` for the final transformation axis, and as for `ifftn` - along all the other axes. - - Parameters - ---------- - a : array_like - Input array. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the - number of input points used along this axis, except for the last axis, - where ``s[-1]//2+1`` points of the input are used. - Along any axis, if the shape indicated by `s` is smaller than that of - the input, the input is cropped. If it is larger, the input is padded - with zeros. If `s` is not given, the shape of the input along the axes - specified by axes is used. Except for the last axis which is taken to be - ``2*(m-1)`` where ``m`` is the length of the input along that axis. - axes : sequence of ints, optional - Axes over which to compute the inverse FFT. If not given, the last - `len(s)` axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - The length of each transformed axis is as given by the corresponding - element of `s`, or the length of the input in every axis except for the - last one if `s` is not given. In the final transformed axis the length - of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the - length of the final transformed axis of the input. To get an odd - number of output points in the final axis, `s` must be specified. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - rfftn : The forward n-dimensional FFT of real input, - of which `ifftn` is the inverse. - fft : The one-dimensional FFT, with definitions and conventions used. - irfft : The inverse of the one-dimensional FFT of real input. - irfft2 : The inverse of the two-dimensional FFT of real input. - - Notes - ----- - See `fft` for definitions and conventions used. - - See `rfft` for definitions and conventions used for real input. - - The correct interpretation of the hermitian input depends on the shape of - the original data, as given by `s`. This is because each input shape could - correspond to either an odd or even length signal. By default, `irfftn` - assumes an even output length which puts the last entry at the Nyquist - frequency; aliasing with its symmetric counterpart. When performing the - final complex to real transform, the last value is thus treated as purely - real. To avoid losing information, the correct shape of the real input - **must** be given. - - Examples - -------- - >>> a = np.zeros((3, 2, 2)) - >>> a[0, 0, 0] = 3 * 2 * 2 - >>> np.fft.irfftn(a) - array([[[1., 1.], - [1., 1.]], - [[1., 1.], - [1., 1.]], - [[1., 1.], - [1., 1.]]]) - - """ - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes, invreal=1) - for ii in range(len(axes)-1): - a = ifft(a, s[ii], axes[ii], norm) - a = irfft(a, s[-1], axes[-1], norm) - return a - - -@array_function_dispatch(_fftn_dispatcher) -def irfft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional inverse FFT of a real array. - - Parameters - ---------- - a : array_like - The input array - s : sequence of ints, optional - Shape of the real output to the inverse FFT. - axes : sequence of ints, optional - The axes over which to compute the inverse fft. - Default is the last two axes. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The result of the inverse real 2-D FFT. - - See Also - -------- - irfftn : Compute the inverse of the N-dimensional FFT of real input. - - Notes - ----- - This is really `irfftn` with different defaults. - For more details see `irfftn`. - - """ - - return irfftn(a, s, axes, norm) diff --git a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b4c0a9d..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/fft/helper.py b/venv/lib/python3.7/site-packages/numpy/fft/helper.py deleted file mode 100644 index a920a4a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/helper.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -Discrete Fourier Transforms - helper.py - -""" -from __future__ import division, absolute_import, print_function - -from numpy.compat import integer_types -from numpy.core import integer, empty, arange, asarray, roll -from numpy.core.overrides import array_function_dispatch, set_module - -# Created by Pearu Peterson, September 2002 - -__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] - -integer_types = integer_types + (integer,) - - -def _fftshift_dispatcher(x, axes=None): - return (x,) - - -@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') -def fftshift(x, axes=None): - """ - Shift the zero-frequency component to the center of the spectrum. - - This function swaps half-spaces for all axes listed (defaults to all). - Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to shift. Default is None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - ifftshift : The inverse of `fftshift`. - - Examples - -------- - >>> freqs = np.fft.fftfreq(10, 0.1) - >>> freqs - array([ 0., 1., 2., ..., -3., -2., -1.]) - >>> np.fft.fftshift(freqs) - array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) - - Shift the zero-frequency component only along the second axis: - - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.fftshift(freqs, axes=(1,)) - array([[ 2., 0., 1.], - [-4., 3., 4.], - [-1., -3., -2.]]) - - """ - x = asarray(x) - if axes is None: - axes = tuple(range(x.ndim)) - shift = [dim // 2 for dim in x.shape] - elif isinstance(axes, integer_types): - shift = x.shape[axes] // 2 - else: - shift = [x.shape[ax] // 2 for ax in axes] - - return roll(x, shift, axes) - - -@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') -def ifftshift(x, axes=None): - """ - The inverse of `fftshift`. Although identical for even-length `x`, the - functions differ by one sample for odd-length `x`. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to calculate. Defaults to None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - fftshift : Shift zero-frequency component to the center of the spectrum. - - Examples - -------- - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.ifftshift(np.fft.fftshift(freqs)) - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - - """ - x = asarray(x) - if axes is None: - axes = tuple(range(x.ndim)) - shift = [-(dim // 2) for dim in x.shape] - elif isinstance(axes, integer_types): - shift = -(x.shape[axes] // 2) - else: - shift = [-(x.shape[ax] // 2) for ax in axes] - - return roll(x, shift, axes) - - -@set_module('numpy.fft') -def fftfreq(n, d=1.0): - """ - Return the Discrete Fourier Transform sample frequencies. - - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if - the sample spacing is in seconds, then the frequency unit is cycles/second. - - Given a window length `n` and a sample spacing `d`:: - - f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even - f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd - - Parameters - ---------- - n : int - Window length. - d : scalar, optional - Sample spacing (inverse of the sampling rate). Defaults to 1. - - Returns - ------- - f : ndarray - Array of length `n` containing the sample frequencies. - - Examples - -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) - >>> fourier = np.fft.fft(signal) - >>> n = signal.size - >>> timestep = 0.1 - >>> freq = np.fft.fftfreq(n, d=timestep) - >>> freq - array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) - - """ - if not isinstance(n, integer_types): - raise ValueError("n should be an integer") - val = 1.0 / (n * d) - results = empty(n, int) - N = (n-1)//2 + 1 - p1 = arange(0, N, dtype=int) - results[:N] = p1 - p2 = arange(-(n//2), 0, dtype=int) - results[N:] = p2 - return results * val - - -@set_module('numpy.fft') -def rfftfreq(n, d=1.0): - """ - Return the Discrete Fourier Transform sample frequencies - (for usage with rfft, irfft). - - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if - the sample spacing is in seconds, then the frequency unit is cycles/second. - - Given a window length `n` and a sample spacing `d`:: - - f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even - f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd - - Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) - the Nyquist frequency component is considered to be positive. - - Parameters - ---------- - n : int - Window length. - d : scalar, optional - Sample spacing (inverse of the sampling rate). Defaults to 1. - - Returns - ------- - f : ndarray - Array of length ``n//2 + 1`` containing the sample frequencies. - - Examples - -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) - >>> fourier = np.fft.rfft(signal) - >>> n = signal.size - >>> sample_rate = 100 - >>> freq = np.fft.fftfreq(n, d=1./sample_rate) - >>> freq - array([ 0., 10., 20., ..., -30., -20., -10.]) - >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) - >>> freq - array([ 0., 10., 20., 30., 40., 50.]) - - """ - if not isinstance(n, integer_types): - raise ValueError("n should be an integer") - val = 1.0/(n*d) - N = n//2 + 1 - results = arange(0, N, dtype=int) - return results * val diff --git a/venv/lib/python3.7/site-packages/numpy/fft/setup.py b/venv/lib/python3.7/site-packages/numpy/fft/setup.py deleted file mode 100644 index 8c3a315..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/setup.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('fft', parent_package, top_path) - - config.add_data_dir('tests') - - # Configure pocketfft_internal - config.add_extension('_pocketfft_internal', - sources=['_pocketfft.c'] - ) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/fft/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/fft/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_helper.py b/venv/lib/python3.7/site-packages/numpy/fft/tests/test_helper.py deleted file mode 100644 index 6613c80..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_helper.py +++ /dev/null @@ -1,169 +0,0 @@ -"""Test functions for fftpack.helper module - -Copied from fftpack.helper by Pearu Peterson, October 2005 - -""" -from __future__ import division, absolute_import, print_function -import numpy as np -from numpy.testing import assert_array_almost_equal, assert_equal -from numpy import fft, pi - - -class TestFFTShift(object): - - def test_definition(self): - x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] - assert_array_almost_equal(fft.fftshift(x), y) - assert_array_almost_equal(fft.ifftshift(y), x) - x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] - assert_array_almost_equal(fft.fftshift(x), y) - assert_array_almost_equal(fft.ifftshift(y), x) - - def test_inverse(self): - for n in [1, 4, 9, 100, 211]: - x = np.random.random((n,)) - assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) - - def test_axes_keyword(self): - freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] - shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] - assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) - assert_array_almost_equal(fft.fftshift(freqs, axes=0), - fft.fftshift(freqs, axes=(0,))) - assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) - assert_array_almost_equal(fft.ifftshift(shifted, axes=0), - fft.ifftshift(shifted, axes=(0,))) - - assert_array_almost_equal(fft.fftshift(freqs), shifted) - assert_array_almost_equal(fft.ifftshift(shifted), freqs) - - def test_uneven_dims(self): - """ Test 2D input, which has uneven dimension sizes """ - freqs = [ - [0, 1], - [2, 3], - [4, 5] - ] - - # shift in dimension 0 - shift_dim0 = [ - [4, 5], - [0, 1], - [2, 3] - ] - assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) - assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) - assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) - assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) - - # shift in dimension 1 - shift_dim1 = [ - [1, 0], - [3, 2], - [5, 4] - ] - assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) - assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) - - # shift in both dimensions - shift_dim_both = [ - [5, 4], - [1, 0], - [3, 2] - ] - assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) - assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) - - # axes=None (default) shift in all dimensions - assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) - assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) - - def test_equal_to_original(self): - """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ - from numpy.compat import integer_types - from numpy.core import asarray, concatenate, arange, take - - def original_fftshift(x, axes=None): - """ How fftshift was implemented in v1.14""" - tmp = asarray(x) - ndim = tmp.ndim - if axes is None: - axes = list(range(ndim)) - elif isinstance(axes, integer_types): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = (n + 1) // 2 - mylist = concatenate((arange(p2, n), arange(p2))) - y = take(y, mylist, k) - return y - - def original_ifftshift(x, axes=None): - """ How ifftshift was implemented in v1.14 """ - tmp = asarray(x) - ndim = tmp.ndim - if axes is None: - axes = list(range(ndim)) - elif isinstance(axes, integer_types): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = n - (n + 1) // 2 - mylist = concatenate((arange(p2, n), arange(p2))) - y = take(y, mylist, k) - return y - - # create possible 2d array combinations and try all possible keywords - # compare output to original functions - for i in range(16): - for j in range(16): - for axes_keyword in [0, 1, None, (0,), (0, 1)]: - inp = np.random.rand(i, j) - - assert_array_almost_equal(fft.fftshift(inp, axes_keyword), - original_fftshift(inp, axes_keyword)) - - assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), - original_ifftshift(inp, axes_keyword)) - - -class TestFFTFreq(object): - - def test_definition(self): - x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - assert_array_almost_equal(9*fft.fftfreq(9), x) - assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) - x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - assert_array_almost_equal(10*fft.fftfreq(10), x) - assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) - - -class TestRFFTFreq(object): - - def test_definition(self): - x = [0, 1, 2, 3, 4] - assert_array_almost_equal(9*fft.rfftfreq(9), x) - assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) - x = [0, 1, 2, 3, 4, 5] - assert_array_almost_equal(10*fft.rfftfreq(10), x) - assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) - - -class TestIRFFTN(object): - - def test_not_last_axis_success(self): - ar, ai = np.random.random((2, 16, 8, 32)) - a = ar + 1j*ai - - axes = (-2,) - - # Should not raise error - fft.irfftn(a, axes=axes) diff --git a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_pocketfft.py b/venv/lib/python3.7/site-packages/numpy/fft/tests/test_pocketfft.py deleted file mode 100644 index 453e964..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_pocketfft.py +++ /dev/null @@ -1,261 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import pytest -from numpy.random import random -from numpy.testing import ( - assert_array_equal, assert_raises, assert_allclose - ) -import threading -import sys -if sys.version_info[0] >= 3: - import queue -else: - import Queue as queue - - -def fft1(x): - L = len(x) - phase = -2j*np.pi*(np.arange(L)/float(L)) - phase = np.arange(L).reshape(-1, 1) * phase - return np.sum(x*np.exp(phase), axis=1) - - -class TestFFTShift(object): - - def test_fft_n(self): - assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) - - -class TestFFT1D(object): - - def test_identity(self): - maxlen = 512 - x = random(maxlen) + 1j*random(maxlen) - xr = random(maxlen) - for i in range(1,maxlen): - assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], - atol=1e-12) - assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]),i), - xr[0:i], atol=1e-12) - - def test_fft(self): - x = random(30) + 1j*random(30) - assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) - assert_allclose(fft1(x) / np.sqrt(30), - np.fft.fft(x, norm="ortho"), atol=1e-6) - - @pytest.mark.parametrize('norm', (None, 'ortho')) - def test_ifft(self, norm): - x = random(30) + 1j*random(30) - assert_allclose( - x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), - atol=1e-6) - # Ensure we get the correct error message - with pytest.raises(ValueError, - match='Invalid number of FFT data points'): - np.fft.ifft([], norm=norm) - - def test_fft2(self): - x = random((30, 20)) + 1j*random((30, 20)) - assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), - np.fft.fft2(x), atol=1e-6) - assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), - np.fft.fft2(x, norm="ortho"), atol=1e-6) - - def test_ifft2(self): - x = random((30, 20)) + 1j*random((30, 20)) - assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), - np.fft.ifft2(x), atol=1e-6) - assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), - np.fft.ifft2(x, norm="ortho"), atol=1e-6) - - def test_fftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) - assert_allclose( - np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), - np.fft.fftn(x), atol=1e-6) - assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), - np.fft.fftn(x, norm="ortho"), atol=1e-6) - - def test_ifftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) - assert_allclose( - np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), - np.fft.ifftn(x), atol=1e-6) - assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), - np.fft.ifftn(x, norm="ortho"), atol=1e-6) - - def test_rfft(self): - x = random(30) - for n in [x.size, 2*x.size]: - for norm in [None, 'ortho']: - assert_allclose( - np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], - np.fft.rfft(x, n=n, norm=norm), atol=1e-6) - assert_allclose( - np.fft.rfft(x, n=n) / np.sqrt(n), - np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) - - def test_irfft(self): - x = random(30) - assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) - assert_allclose( - x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"), atol=1e-6) - - def test_rfft2(self): - x = random((30, 20)) - assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) - assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), - np.fft.rfft2(x, norm="ortho"), atol=1e-6) - - def test_irfft2(self): - x = random((30, 20)) - assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) - assert_allclose( - x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"), atol=1e-6) - - def test_rfftn(self): - x = random((30, 20, 10)) - assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) - assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), - np.fft.rfftn(x, norm="ortho"), atol=1e-6) - - def test_irfftn(self): - x = random((30, 20, 10)) - assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) - assert_allclose( - x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"), atol=1e-6) - - def test_hfft(self): - x = random(14) + 1j*random(14) - x_herm = np.concatenate((random(1), x, random(1))) - x = np.concatenate((x_herm, x[::-1].conj())) - assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) - assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), - np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) - - def test_ihttf(self): - x = random(14) + 1j*random(14) - x_herm = np.concatenate((random(1), x, random(1))) - x = np.concatenate((x_herm, x[::-1].conj())) - assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) - assert_allclose( - x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"), - norm="ortho"), atol=1e-6) - - @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, - np.fft.rfftn, np.fft.irfftn]) - def test_axes(self, op): - x = random((30, 20, 10)) - axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] - for a in axes: - op_tr = op(np.transpose(x, a)) - tr_op = np.transpose(op(x, axes=a), a) - assert_allclose(op_tr, tr_op, atol=1e-6) - - def test_all_1d_norm_preserving(self): - # verify that round-trip transforms are norm-preserving - x = random(30) - x_norm = np.linalg.norm(x) - n = x.size * 2 - func_pairs = [(np.fft.fft, np.fft.ifft), - (np.fft.rfft, np.fft.irfft), - # hfft: order so the first function takes x.size samples - # (necessary for comparison to x_norm above) - (np.fft.ihfft, np.fft.hfft), - ] - for forw, back in func_pairs: - for n in [x.size, 2*x.size]: - for norm in [None, 'ortho']: - tmp = forw(x, n=n, norm=norm) - tmp = back(tmp, n=n, norm=norm) - assert_allclose(x_norm, - np.linalg.norm(tmp), atol=1e-6) - - @pytest.mark.parametrize("dtype", [np.half, np.single, np.double, - np.longdouble]) - def test_dtypes(self, dtype): - # make sure that all input precisions are accepted and internally - # converted to 64bit - x = random(30).astype(dtype) - assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6) - assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6) - - -@pytest.mark.parametrize( - "dtype", - [np.float32, np.float64, np.complex64, np.complex128]) -@pytest.mark.parametrize("order", ["F", 'non-contiguous']) -@pytest.mark.parametrize( - "fft", - [np.fft.fft, np.fft.fft2, np.fft.fftn, - np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) -def test_fft_with_order(dtype, order, fft): - # Check that FFT/IFFT produces identical results for C, Fortran and - # non contiguous arrays - rng = np.random.RandomState(42) - X = rng.rand(8, 7, 13).astype(dtype, copy=False) - # See discussion in pull/14178 - _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps - if order == 'F': - Y = np.asfortranarray(X) - else: - # Make a non contiguous array - Y = X[::-1] - X = np.ascontiguousarray(X[::-1]) - - if fft.__name__.endswith('fft'): - for axis in range(3): - X_res = fft(X, axis=axis) - Y_res = fft(Y, axis=axis) - assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) - elif fft.__name__.endswith(('fft2', 'fftn')): - axes = [(0, 1), (1, 2), (0, 2)] - if fft.__name__.endswith('fftn'): - axes.extend([(0,), (1,), (2,), None]) - for ax in axes: - X_res = fft(X, axes=ax) - Y_res = fft(Y, axes=ax) - assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) - else: - raise ValueError() - - -class TestFFTThreadSafe(object): - threads = 16 - input_shape = (800, 200) - - def _test_mtsame(self, func, *args): - def worker(args, q): - q.put(func(*args)) - - q = queue.Queue() - expected = func(*args) - - # Spin off a bunch of threads to call the same function simultaneously - t = [threading.Thread(target=worker, args=(args, q)) - for i in range(self.threads)] - [x.start() for x in t] - - [x.join() for x in t] - # Make sure all threads returned the correct value - for i in range(self.threads): - assert_array_equal(q.get(timeout=5), expected, - 'Function returned wrong value in multithreaded context') - - def test_fft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.fft, a) - - def test_ifft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.ifft, a) - - def test_rfft(self): - a = np.ones(self.input_shape) - self._test_mtsame(np.fft.rfft, a) - - def test_irfft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.irfft, a) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/__init__.py b/venv/lib/python3.7/site-packages/numpy/lib/__init__.py deleted file mode 100644 index 2db12d9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -**Note:** almost all functions in the ``numpy.lib`` namespace -are also present in the main ``numpy`` namespace. Please use the -functions as ``np.`` where possible. - -``numpy.lib`` is mostly a space for implementing functions that don't -belong in core or in another NumPy submodule with a clear purpose -(e.g. ``random``, ``fft``, ``linalg``, ``ma``). - -Most contains basic functions that are used by several submodules and are -useful to have in the main name-space. - -""" -from __future__ import division, absolute_import, print_function - -import math - -from numpy.version import version as __version__ - -# Public submodules -# Note: recfunctions and (maybe) format are public too, but not imported -from . import mixins -from . import scimath as emath - -# Private submodules -from .type_check import * -from .index_tricks import * -from .function_base import * -from .nanfunctions import * -from .shape_base import * -from .stride_tricks import * -from .twodim_base import * -from .ufunclike import * -from .histograms import * - -from .polynomial import * -from .utils import * -from .arraysetops import * -from .npyio import * -from .financial import * -from .arrayterator import Arrayterator -from .arraypad import * -from ._version import * -from numpy.core._multiarray_umath import tracemalloc_domain - -__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator'] -__all__ += type_check.__all__ -__all__ += index_tricks.__all__ -__all__ += function_base.__all__ -__all__ += shape_base.__all__ -__all__ += stride_tricks.__all__ -__all__ += twodim_base.__all__ -__all__ += ufunclike.__all__ -__all__ += arraypad.__all__ -__all__ += polynomial.__all__ -__all__ += utils.__all__ -__all__ += arraysetops.__all__ -__all__ += npyio.__all__ -__all__ += financial.__all__ -__all__ += nanfunctions.__all__ -__all__ += histograms.__all__ - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/lib/_datasource.py b/venv/lib/python3.7/site-packages/numpy/lib/_datasource.py deleted file mode 100644 index 0d71375..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/_datasource.py +++ /dev/null @@ -1,794 +0,0 @@ -"""A file interface for handling local and remote data files. - -The goal of datasource is to abstract some of the file system operations -when dealing with data files so the researcher doesn't have to know all the -low-level details. Through datasource, a researcher can obtain and use a -file with one function call, regardless of location of the file. - -DataSource is meant to augment standard python libraries, not replace them. -It should work seamlessly with standard file IO operations and the os -module. - -DataSource files can originate locally or remotely: - -- local files : '/home/guido/src/local/data.txt' -- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' - -DataSource files can also be compressed or uncompressed. Currently only -gzip, bz2 and xz are supported. - -Example:: - - >>> # Create a DataSource, use os.curdir (default) for local storage. - >>> from numpy import DataSource - >>> ds = DataSource() - >>> - >>> # Open a remote file. - >>> # DataSource downloads the file, stores it locally in: - >>> # './www.google.com/index.html' - >>> # opens the file and returns a file object. - >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP - >>> - >>> # Use the file as you normally would - >>> fp.read() # doctest: +SKIP - >>> fp.close() # doctest: +SKIP - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import warnings -import shutil -import io -from contextlib import closing - -from numpy.core.overrides import set_module - - -_open = open - - -def _check_mode(mode, encoding, newline): - """Check mode and that encoding and newline are compatible. - - Parameters - ---------- - mode : str - File open mode. - encoding : str - File encoding. - newline : str - Newline for text files. - - """ - if "t" in mode: - if "b" in mode: - raise ValueError("Invalid mode: %r" % (mode,)) - else: - if encoding is not None: - raise ValueError("Argument 'encoding' not supported in binary mode") - if newline is not None: - raise ValueError("Argument 'newline' not supported in binary mode") - - -def _python2_bz2open(fn, mode, encoding, newline): - """Wrapper to open bz2 in text mode. - - Parameters - ---------- - fn : str - File name - mode : {'r', 'w'} - File mode. Note that bz2 Text files are not supported. - encoding : str - Ignored, text bz2 files not supported in Python2. - newline : str - Ignored, text bz2 files not supported in Python2. - """ - import bz2 - - _check_mode(mode, encoding, newline) - - if "t" in mode: - # BZ2File is missing necessary functions for TextIOWrapper - warnings.warn("Assuming latin1 encoding for bz2 text file in Python2", - RuntimeWarning, stacklevel=5) - mode = mode.replace("t", "") - return bz2.BZ2File(fn, mode) - -def _python2_gzipopen(fn, mode, encoding, newline): - """ Wrapper to open gzip in text mode. - - Parameters - ---------- - fn : str, bytes, file - File path or opened file. - mode : str - File mode. The actual files are opened as binary, but will decoded - using the specified `encoding` and `newline`. - encoding : str - Encoding to be used when reading/writing as text. - newline : str - Newline to be used when reading/writing as text. - - """ - import gzip - # gzip is lacking read1 needed for TextIOWrapper - class GzipWrap(gzip.GzipFile): - def read1(self, n): - return self.read(n) - - _check_mode(mode, encoding, newline) - - gz_mode = mode.replace("t", "") - - if isinstance(fn, (str, bytes)): - binary_file = GzipWrap(fn, gz_mode) - elif hasattr(fn, "read") or hasattr(fn, "write"): - binary_file = GzipWrap(None, gz_mode, fileobj=fn) - else: - raise TypeError("filename must be a str or bytes object, or a file") - - if "t" in mode: - return io.TextIOWrapper(binary_file, encoding, newline=newline) - else: - return binary_file - - -# Using a class instead of a module-level dictionary -# to reduce the initial 'import numpy' overhead by -# deferring the import of lzma, bz2 and gzip until needed - -# TODO: .zip support, .tar support? -class _FileOpeners(object): - """ - Container for different methods to open (un-)compressed files. - - `_FileOpeners` contains a dictionary that holds one method for each - supported file format. Attribute lookup is implemented in such a way - that an instance of `_FileOpeners` itself can be indexed with the keys - of that dictionary. Currently uncompressed files as well as files - compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. - - Notes - ----- - `_file_openers`, an instance of `_FileOpeners`, is made available for - use in the `_datasource` module. - - Examples - -------- - >>> import gzip - >>> np.lib._datasource._file_openers.keys() - [None, '.bz2', '.gz', '.xz', '.lzma'] - >>> np.lib._datasource._file_openers['.gz'] is gzip.open - True - - """ - - def __init__(self): - self._loaded = False - self._file_openers = {None: io.open} - - def _load(self): - if self._loaded: - return - - try: - import bz2 - if sys.version_info[0] >= 3: - self._file_openers[".bz2"] = bz2.open - else: - self._file_openers[".bz2"] = _python2_bz2open - except ImportError: - pass - - try: - import gzip - if sys.version_info[0] >= 3: - self._file_openers[".gz"] = gzip.open - else: - self._file_openers[".gz"] = _python2_gzipopen - except ImportError: - pass - - try: - import lzma - self._file_openers[".xz"] = lzma.open - self._file_openers[".lzma"] = lzma.open - except (ImportError, AttributeError): - # There are incompatible backports of lzma that do not have the - # lzma.open attribute, so catch that as well as ImportError. - pass - - self._loaded = True - - def keys(self): - """ - Return the keys of currently supported file openers. - - Parameters - ---------- - None - - Returns - ------- - keys : list - The keys are None for uncompressed files and the file extension - strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression - methods. - - """ - self._load() - return list(self._file_openers.keys()) - - def __getitem__(self, key): - self._load() - return self._file_openers[key] - -_file_openers = _FileOpeners() - -def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): - """ - Open `path` with `mode` and return the file object. - - If ``path`` is an URL, it will be downloaded, stored in the - `DataSource` `destpath` directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : str, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to - append. Available modes depend on the type of object specified by - path. Default is 'r'. - destpath : str, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - encoding : {None, str}, optional - Open text file with given encoding. The default encoding will be - what `io.open` uses. - newline : {None, str}, optional - Newline to use when reading text file. - - Returns - ------- - out : file object - The opened file. - - Notes - ----- - This is a convenience function that instantiates a `DataSource` and - returns the file object from ``DataSource.open(path)``. - - """ - - ds = DataSource(destpath) - return ds.open(path, mode, encoding=encoding, newline=newline) - - -@set_module('numpy') -class DataSource(object): - """ - DataSource(destpath='.') - - A generic data source file (file, http, ftp, ...). - - DataSources can be local files or remote files/URLs. The files may - also be compressed or uncompressed. DataSource hides some of the - low-level details of downloading the file, allowing you to simply pass - in a valid file path (or URL) and obtain a file object. - - Parameters - ---------- - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Notes - ----- - URLs require a scheme string (``http://``) to be used, without it they - will fail:: - - >>> repos = np.DataSource() - >>> repos.exists('www.google.com/index.html') - False - >>> repos.exists('http://www.google.com/index.html') - True - - Temporary directories are deleted when the DataSource is deleted. - - Examples - -------- - :: - - >>> ds = np.DataSource('/home/guido') - >>> urlname = 'http://www.google.com/' - >>> gfile = ds.open('http://www.google.com/') - >>> ds.abspath(urlname) - '/home/guido/www.google.com/index.html' - - >>> ds = np.DataSource(None) # use with temporary file - >>> ds.open('/home/guido/foobar.txt') - - >>> ds.abspath('/home/guido/foobar.txt') - '/tmp/.../home/guido/foobar.txt' - - """ - - def __init__(self, destpath=os.curdir): - """Create a DataSource with a local path at destpath.""" - if destpath: - self._destpath = os.path.abspath(destpath) - self._istmpdest = False - else: - import tempfile # deferring import to improve startup time - self._destpath = tempfile.mkdtemp() - self._istmpdest = True - - def __del__(self): - # Remove temp directories - if hasattr(self, '_istmpdest') and self._istmpdest: - shutil.rmtree(self._destpath) - - def _iszip(self, filename): - """Test if the filename is a zip file by looking at the file extension. - - """ - fname, ext = os.path.splitext(filename) - return ext in _file_openers.keys() - - def _iswritemode(self, mode): - """Test if the given mode will open a file for writing.""" - - # Currently only used to test the bz2 files. - _writemodes = ("w", "+") - for c in mode: - if c in _writemodes: - return True - return False - - def _splitzipext(self, filename): - """Split zip extension from filename and return filename. - - *Returns*: - base, zip_ext : {tuple} - - """ - - if self._iszip(filename): - return os.path.splitext(filename) - else: - return filename, None - - def _possible_names(self, filename): - """Return a tuple containing compressed filename variations.""" - names = [filename] - if not self._iszip(filename): - for zipext in _file_openers.keys(): - if zipext: - names.append(filename+zipext) - return names - - def _isurl(self, path): - """Test if path is a net location. Tests the scheme and netloc.""" - - # We do this here to reduce the 'import numpy' initial import time. - if sys.version_info[0] >= 3: - from urllib.parse import urlparse - else: - from urlparse import urlparse - - # BUG : URLs require a scheme string ('http://') to be used. - # www.google.com will fail. - # Should we prepend the scheme for those that don't have it and - # test that also? Similar to the way we append .gz and test for - # for compressed versions of files. - - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - return bool(scheme and netloc) - - def _cache(self, path): - """Cache the file specified by path. - - Creates a copy of the file in the datasource cache. - - """ - # We import these here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - if sys.version_info[0] >= 3: - from urllib.request import urlopen - from urllib.error import URLError - else: - from urllib2 import urlopen - from urllib2 import URLError - - upath = self.abspath(path) - - # ensure directory exists - if not os.path.exists(os.path.dirname(upath)): - os.makedirs(os.path.dirname(upath)) - - # TODO: Doesn't handle compressed files! - if self._isurl(path): - try: - with closing(urlopen(path)) as openedurl: - with _open(upath, 'wb') as f: - shutil.copyfileobj(openedurl, f) - except URLError: - raise URLError("URL not found: %s" % path) - else: - shutil.copyfile(path, upath) - return upath - - def _findfile(self, path): - """Searches for ``path`` and returns full path if found. - - If path is an URL, _findfile will cache a local copy and return the - path to the cached file. If path is a local file, _findfile will - return a path to that local file. - - The search will include possible compressed versions of the file - and return the first occurrence found. - - """ - - # Build list of possible local file paths - if not self._isurl(path): - # Valid local paths - filelist = self._possible_names(path) - # Paths in self._destpath - filelist += self._possible_names(self.abspath(path)) - else: - # Cached URLs in self._destpath - filelist = self._possible_names(self.abspath(path)) - # Remote URLs - filelist = filelist + self._possible_names(path) - - for name in filelist: - if self.exists(name): - if self._isurl(name): - name = self._cache(name) - return name - return None - - def abspath(self, path): - """ - Return absolute path of file in the DataSource directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - Notes - ----- - The functionality is based on `os.path.abspath`. - - """ - # We do this here to reduce the 'import numpy' initial import time. - if sys.version_info[0] >= 3: - from urllib.parse import urlparse - else: - from urlparse import urlparse - - # TODO: This should be more robust. Handles case where path includes - # the destpath, but not other sub-paths. Failing case: - # path = /home/guido/datafile.txt - # destpath = /home/alex/ - # upath = self.abspath(path) - # upath == '/home/alex/home/guido/datafile.txt' - - # handle case where path includes self._destpath - splitpath = path.split(self._destpath, 2) - if len(splitpath) > 1: - path = splitpath[1] - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - netloc = self._sanitize_relative_path(netloc) - upath = self._sanitize_relative_path(upath) - return os.path.join(self._destpath, netloc, upath) - - def _sanitize_relative_path(self, path): - """Return a sanitised relative path for which - os.path.abspath(os.path.join(base, path)).startswith(base) - """ - last = None - path = os.path.normpath(path) - while path != last: - last = path - # Note: os.path.join treats '/' as os.sep on Windows - path = path.lstrip(os.sep).lstrip('/') - path = path.lstrip(os.pardir).lstrip('..') - drive, path = os.path.splitdrive(path) # for Windows - return path - - def exists(self, path): - """ - Test if path exists. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and - accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either - stored locally in the `DataSource` directory, or is a valid remote - URL. `DataSource` does not discriminate between the two, the file - is accessible if it exists in either location. - - """ - - # First test for local path - if os.path.exists(path): - return True - - # We import this here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - if sys.version_info[0] >= 3: - from urllib.request import urlopen - from urllib.error import URLError - else: - from urllib2 import urlopen - from urllib2 import URLError - - # Test cached url - upath = self.abspath(path) - if os.path.exists(upath): - return True - - # Test remote url - if self._isurl(path): - try: - netfile = urlopen(path) - netfile.close() - del(netfile) - return True - except URLError: - return False - return False - - def open(self, path, mode='r', encoding=None, newline=None): - """ - Open and return file-like object. - - If `path` is an URL, it will be downloaded, stored in the - `DataSource` directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, - 'a' to append. Available modes depend on the type of object - specified by `path`. Default is 'r'. - encoding : {None, str}, optional - Open text file with given encoding. The default encoding will be - what `io.open` uses. - newline : {None, str}, optional - Newline to use when reading text file. - - Returns - ------- - out : file object - File object. - - """ - - # TODO: There is no support for opening a file for writing which - # doesn't exist yet (creating a file). Should there be? - - # TODO: Add a ``subdir`` parameter for specifying the subdirectory - # used to store URLs in self._destpath. - - if self._isurl(path) and self._iswritemode(mode): - raise ValueError("URLs are not writeable") - - # NOTE: _findfile will fail on a new file opened for writing. - found = self._findfile(path) - if found: - _fname, ext = self._splitzipext(found) - if ext == 'bz2': - mode.replace("+", "") - return _file_openers[ext](found, mode=mode, - encoding=encoding, newline=newline) - else: - raise IOError("%s not found." % path) - - -class Repository (DataSource): - """ - Repository(baseurl, destpath='.') - - A data repository where multiple DataSource's share a base - URL/directory. - - `Repository` extends `DataSource` by prepending a base URL (or - directory) to all the files it handles. Use `Repository` when you will - be working with multiple files from one base URL. Initialize - `Repository` with the base URL, then refer to each file by its filename - only. - - Parameters - ---------- - baseurl : str - Path to the local directory or remote location that contains the - data files. - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Examples - -------- - To analyze all files in the repository, do something like this - (note: this is not self-contained code):: - - >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') - >>> for filename in filelist: - ... fp = repos.open(filename) - ... fp.analyze() - ... fp.close() - - Similarly you could use a URL for a repository:: - - >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') - - """ - - def __init__(self, baseurl, destpath=os.curdir): - """Create a Repository with a shared url or directory of baseurl.""" - DataSource.__init__(self, destpath=destpath) - self._baseurl = baseurl - - def __del__(self): - DataSource.__del__(self) - - def _fullpath(self, path): - """Return complete path for path. Prepends baseurl if necessary.""" - splitpath = path.split(self._baseurl, 2) - if len(splitpath) == 1: - result = os.path.join(self._baseurl, path) - else: - result = path # path contains baseurl already - return result - - def _findfile(self, path): - """Extend DataSource method to prepend baseurl to ``path``.""" - return DataSource._findfile(self, self._fullpath(path)) - - def abspath(self, path): - """ - Return absolute path of file in the Repository directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not - have to, include the `baseurl` with which the `Repository` was - initialized. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - """ - return DataSource.abspath(self, self._fullpath(path)) - - def exists(self, path): - """ - Test if path exists prepending Repository base URL to path. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and - accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not - have to, include the `baseurl` with which the `Repository` was - initialized. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either - stored locally in the `DataSource` directory, or is a valid remote - URL. `DataSource` does not discriminate between the two, the file - is accessible if it exists in either location. - - """ - return DataSource.exists(self, self._fullpath(path)) - - def open(self, path, mode='r', encoding=None, newline=None): - """ - Open and return file-like object prepending Repository base URL. - - If `path` is an URL, it will be downloaded, stored in the - DataSource directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. This may, but does not have to, - include the `baseurl` with which the `Repository` was - initialized. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, - 'a' to append. Available modes depend on the type of object - specified by `path`. Default is 'r'. - encoding : {None, str}, optional - Open text file with given encoding. The default encoding will be - what `io.open` uses. - newline : {None, str}, optional - Newline to use when reading text file. - - Returns - ------- - out : file object - File object. - - """ - return DataSource.open(self, self._fullpath(path), mode, - encoding=encoding, newline=newline) - - def listdir(self): - """ - List files in the source Repository. - - Returns - ------- - files : list of str - List of file names (not containing a directory part). - - Notes - ----- - Does not currently work for remote repositories. - - """ - if self._isurl(self._baseurl): - raise NotImplementedError( - "Directory listing of URLs, not supported yet.") - else: - return os.listdir(self._baseurl) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/_iotools.py b/venv/lib/python3.7/site-packages/numpy/lib/_iotools.py deleted file mode 100644 index c392929..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/_iotools.py +++ /dev/null @@ -1,958 +0,0 @@ -"""A collection of functions designed to help I/O with ascii files. - -""" -from __future__ import division, absolute_import, print_function - -__docformat__ = "restructuredtext en" - -import sys -import numpy as np -import numpy.core.numeric as nx -from numpy.compat import asbytes, asunicode, bytes, basestring - -if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str -else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - -def _decode_line(line, encoding=None): - """Decode bytes from binary input streams. - - Defaults to decoding from 'latin1'. That differs from the behavior of - np.compat.asunicode that decodes from 'ascii'. - - Parameters - ---------- - line : str or bytes - Line to be decoded. - - Returns - ------- - decoded_line : unicode - Unicode in Python 2, a str (unicode) in Python 3. - - """ - if type(line) is bytes: - if encoding is None: - line = line.decode('latin1') - else: - line = line.decode(encoding) - - return line - - -def _is_string_like(obj): - """ - Check whether obj behaves like a string. - """ - try: - obj + '' - except (TypeError, ValueError): - return False - return True - - -def _is_bytes_like(obj): - """ - Check whether obj behaves like a bytes object. - """ - try: - obj + b'' - except (TypeError, ValueError): - return False - return True - - -def _to_filehandle(fname, flag='r', return_opened=False): - """ - Returns the filehandle corresponding to a string or a file. - If the string ends in '.gz', the file is automatically unzipped. - - Parameters - ---------- - fname : string, filehandle - Name of the file whose filehandle must be returned. - flag : string, optional - Flag indicating the status of the file ('r' for read, 'w' for write). - return_opened : boolean, optional - Whether to return the opening status of the file. - """ - if _is_string_like(fname): - if fname.endswith('.gz'): - import gzip - fhd = gzip.open(fname, flag) - elif fname.endswith('.bz2'): - import bz2 - fhd = bz2.BZ2File(fname) - else: - fhd = file(fname, flag) - opened = True - elif hasattr(fname, 'seek'): - fhd = fname - opened = False - else: - raise ValueError('fname must be a string or file handle') - if return_opened: - return fhd, opened - return fhd - - -def has_nested_fields(ndtype): - """ - Returns whether one or several fields of a dtype are nested. - - Parameters - ---------- - ndtype : dtype - Data-type of a structured array. - - Raises - ------ - AttributeError - If `ndtype` does not have a `names` attribute. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) - >>> np.lib._iotools.has_nested_fields(dt) - False - - """ - for name in ndtype.names or (): - if ndtype[name].names is not None: - return True - return False - - -def flatten_dtype(ndtype, flatten_base=False): - """ - Unpack a structured data-type by collapsing nested fields and/or fields - with a shape. - - Note that the field names are lost. - - Parameters - ---------- - ndtype : dtype - The datatype to collapse - flatten_base : bool, optional - If True, transform a field with a shape into several fields. Default is - False. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ... ('block', int, (2, 3))]) - >>> np.lib._iotools.flatten_dtype(dt) - [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] - >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) - [dtype('S4'), - dtype('float64'), - dtype('float64'), - dtype('int64'), - dtype('int64'), - dtype('int64'), - dtype('int64'), - dtype('int64'), - dtype('int64')] - - """ - names = ndtype.names - if names is None: - if flatten_base: - return [ndtype.base] * int(np.prod(ndtype.shape)) - return [ndtype.base] - else: - types = [] - for field in names: - info = ndtype.fields[field] - flat_dt = flatten_dtype(info[0], flatten_base) - types.extend(flat_dt) - return types - - -class LineSplitter(object): - """ - Object to split a string at a given delimiter or at given places. - - Parameters - ---------- - delimiter : str, int, or sequence of ints, optional - If a string, character used to delimit consecutive fields. - If an integer or a sequence of integers, width(s) of each field. - comments : str, optional - Character used to mark the beginning of a comment. Default is '#'. - autostrip : bool, optional - Whether to strip each individual field. Default is True. - - """ - - def autostrip(self, method): - """ - Wrapper to strip each member of the output of `method`. - - Parameters - ---------- - method : function - Function that takes a single argument and returns a sequence of - strings. - - Returns - ------- - wrapped : function - The result of wrapping `method`. `wrapped` takes a single input - argument and returns a list of strings that are stripped of - white-space. - - """ - return lambda input: [_.strip() for _ in method(input)] - # - - def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None): - delimiter = _decode_line(delimiter) - comments = _decode_line(comments) - - self.comments = comments - - # Delimiter is a character - if (delimiter is None) or isinstance(delimiter, basestring): - delimiter = delimiter or None - _handyman = self._delimited_splitter - # Delimiter is a list of field widths - elif hasattr(delimiter, '__iter__'): - _handyman = self._variablewidth_splitter - idx = np.cumsum([0] + list(delimiter)) - delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] - # Delimiter is a single integer - elif int(delimiter): - (_handyman, delimiter) = ( - self._fixedwidth_splitter, int(delimiter)) - else: - (_handyman, delimiter) = (self._delimited_splitter, None) - self.delimiter = delimiter - if autostrip: - self._handyman = self.autostrip(_handyman) - else: - self._handyman = _handyman - self.encoding = encoding - # - - def _delimited_splitter(self, line): - """Chop off comments, strip, and split at delimiter. """ - if self.comments is not None: - line = line.split(self.comments)[0] - line = line.strip(" \r\n") - if not line: - return [] - return line.split(self.delimiter) - # - - def _fixedwidth_splitter(self, line): - if self.comments is not None: - line = line.split(self.comments)[0] - line = line.strip("\r\n") - if not line: - return [] - fixed = self.delimiter - slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] - return [line[s] for s in slices] - # - - def _variablewidth_splitter(self, line): - if self.comments is not None: - line = line.split(self.comments)[0] - if not line: - return [] - slices = self.delimiter - return [line[s] for s in slices] - # - - def __call__(self, line): - return self._handyman(_decode_line(line, self.encoding)) - - -class NameValidator(object): - """ - Object to validate a list of strings to use as field names. - - The strings are stripped of any non alphanumeric character, and spaces - are replaced by '_'. During instantiation, the user can define a list - of names to exclude, as well as a list of invalid characters. Names in - the exclusion list are appended a '_' character. - - Once an instance has been created, it can be called with a list of - names, and a list of valid names will be created. The `__call__` - method accepts an optional keyword "default" that sets the default name - in case of ambiguity. By default this is 'f', so that names will - default to `f0`, `f1`, etc. - - Parameters - ---------- - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default - list ['return', 'file', 'print']. Excluded names are appended an - underscore: for example, `file` becomes `file_` if supplied. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - case_sensitive : {True, False, 'upper', 'lower'}, optional - * If True, field names are case-sensitive. - * If False or 'upper', field names are converted to upper case. - * If 'lower', field names are converted to lower case. - - The default value is True. - replace_space : '_', optional - Character(s) used in replacement of white spaces. - - Notes - ----- - Calling an instance of `NameValidator` is the same as calling its - method `validate`. - - Examples - -------- - >>> validator = np.lib._iotools.NameValidator() - >>> validator(['file', 'field2', 'with space', 'CaSe']) - ('file_', 'field2', 'with_space', 'CaSe') - - >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], - ... deletechars='q', - ... case_sensitive=False) - >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) - ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') - - """ - # - defaultexcludelist = ['return', 'file', 'print'] - defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") - # - - def __init__(self, excludelist=None, deletechars=None, - case_sensitive=None, replace_space='_'): - # Process the exclusion list .. - if excludelist is None: - excludelist = [] - excludelist.extend(self.defaultexcludelist) - self.excludelist = excludelist - # Process the list of characters to delete - if deletechars is None: - delete = self.defaultdeletechars - else: - delete = set(deletechars) - delete.add('"') - self.deletechars = delete - # Process the case option ..... - if (case_sensitive is None) or (case_sensitive is True): - self.case_converter = lambda x: x - elif (case_sensitive is False) or case_sensitive.startswith('u'): - self.case_converter = lambda x: x.upper() - elif case_sensitive.startswith('l'): - self.case_converter = lambda x: x.lower() - else: - msg = 'unrecognized case_sensitive value %s.' % case_sensitive - raise ValueError(msg) - # - self.replace_space = replace_space - - def validate(self, names, defaultfmt="f%i", nbfields=None): - """ - Validate a list of strings as field names for a structured array. - - Parameters - ---------- - names : sequence of str - Strings to be validated. - defaultfmt : str, optional - Default format string, used if validating a given string - reduces its length to zero. - nbfields : integer, optional - Final number of validated names, used to expand or shrink the - initial list of names. - - Returns - ------- - validatednames : list of str - The list of validated field names. - - Notes - ----- - A `NameValidator` instance can be called directly, which is the - same as calling `validate`. For examples, see `NameValidator`. - - """ - # Initial checks .............. - if (names is None): - if (nbfields is None): - return None - names = [] - if isinstance(names, basestring): - names = [names, ] - if nbfields is not None: - nbnames = len(names) - if (nbnames < nbfields): - names = list(names) + [''] * (nbfields - nbnames) - elif (nbnames > nbfields): - names = names[:nbfields] - # Set some shortcuts ........... - deletechars = self.deletechars - excludelist = self.excludelist - case_converter = self.case_converter - replace_space = self.replace_space - # Initializes some variables ... - validatednames = [] - seen = dict() - nbempty = 0 - # - for item in names: - item = case_converter(item).strip() - if replace_space: - item = item.replace(' ', replace_space) - item = ''.join([c for c in item if c not in deletechars]) - if item == '': - item = defaultfmt % nbempty - while item in names: - nbempty += 1 - item = defaultfmt % nbempty - nbempty += 1 - elif item in excludelist: - item += '_' - cnt = seen.get(item, 0) - if cnt > 0: - validatednames.append(item + '_%d' % cnt) - else: - validatednames.append(item) - seen[item] = cnt + 1 - return tuple(validatednames) - # - - def __call__(self, names, defaultfmt="f%i", nbfields=None): - return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) - - -def str2bool(value): - """ - Tries to transform a string supposed to represent a boolean to a boolean. - - Parameters - ---------- - value : str - The string that is transformed to a boolean. - - Returns - ------- - boolval : bool - The boolean representation of `value`. - - Raises - ------ - ValueError - If the string is not 'True' or 'False' (case independent) - - Examples - -------- - >>> np.lib._iotools.str2bool('TRUE') - True - >>> np.lib._iotools.str2bool('false') - False - - """ - value = value.upper() - if value == 'TRUE': - return True - elif value == 'FALSE': - return False - else: - raise ValueError("Invalid boolean") - - -class ConverterError(Exception): - """ - Exception raised when an error occurs in a converter for string values. - - """ - pass - - -class ConverterLockError(ConverterError): - """ - Exception raised when an attempt is made to upgrade a locked converter. - - """ - pass - - -class ConversionWarning(UserWarning): - """ - Warning issued when a string converter has a problem. - - Notes - ----- - In `genfromtxt` a `ConversionWarning` is issued if raising exceptions - is explicitly suppressed with the "invalid_raise" keyword. - - """ - pass - - -class StringConverter(object): - """ - Factory class for function transforming a string into another object - (int, float). - - After initialization, an instance can be called to transform a string - into another object. If the string is recognized as representing a - missing value, a default value is returned. - - Attributes - ---------- - func : function - Function used for the conversion. - default : any - Default value to return when the input corresponds to a missing - value. - type : type - Type of the output. - _status : int - Integer representing the order of the conversion. - _mapper : sequence of tuples - Sequence of tuples (dtype, function, default value) to evaluate in - order. - _locked : bool - Holds `locked` parameter. - - Parameters - ---------- - dtype_or_func : {None, dtype, function}, optional - If a `dtype`, specifies the input data type, used to define a basic - function and a default value for missing data. For example, when - `dtype` is float, the `func` attribute is set to `float` and the - default value to `np.nan`. If a function, this function is used to - convert a string to another object. In this case, it is recommended - to give an associated default value as input. - default : any, optional - Value to return by default, that is, when the string to be - converted is flagged as missing. If not given, `StringConverter` - tries to supply a reasonable default value. - missing_values : {None, sequence of str}, optional - ``None`` or sequence of strings indicating a missing value. If ``None`` - then missing values are indicated by empty entries. The default is - ``None``. - locked : bool, optional - Whether the StringConverter should be locked to prevent automatic - upgrade or not. Default is False. - - """ - # - _mapper = [(nx.bool_, str2bool, False), - (nx.integer, int, -1)] - - # On 32-bit systems, we need to make sure that we explicitly include - # nx.int64 since ns.integer is nx.int32. - if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize: - _mapper.append((nx.int64, int, -1)) - - _mapper.extend([(nx.floating, float, nx.nan), - (nx.complexfloating, complex, nx.nan + 0j), - (nx.longdouble, nx.longdouble, nx.nan), - (nx.unicode_, asunicode, '???'), - (nx.string_, asbytes, '???')]) - - (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) - - @classmethod - def _getdtype(cls, val): - """Returns the dtype of the input variable.""" - return np.array(val).dtype - # - - @classmethod - def _getsubdtype(cls, val): - """Returns the type of the dtype of the input variable.""" - return np.array(val).dtype.type - # - # This is a bit annoying. We want to return the "general" type in most - # cases (ie. "string" rather than "S10"), but we want to return the - # specific type for datetime64 (ie. "datetime64[us]" rather than - # "datetime64"). - - @classmethod - def _dtypeortype(cls, dtype): - """Returns dtype for datetime64 and type of dtype otherwise.""" - if dtype.type == np.datetime64: - return dtype - return dtype.type - # - - @classmethod - def upgrade_mapper(cls, func, default=None): - """ - Upgrade the mapper of a StringConverter by adding a new function and - its corresponding default. - - The input function (or sequence of functions) and its associated - default value (if any) is inserted in penultimate position of the - mapper. The corresponding type is estimated from the dtype of the - default value. - - Parameters - ---------- - func : var - Function, or sequence of functions - - Examples - -------- - >>> import dateutil.parser - >>> import datetime - >>> dateparser = dateutil.parser.parse - >>> defaultdate = datetime.date(2000, 1, 1) - >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) - """ - # Func is a single functions - if hasattr(func, '__call__'): - cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) - return - elif hasattr(func, '__iter__'): - if isinstance(func[0], (tuple, list)): - for _ in func: - cls._mapper.insert(-1, _) - return - if default is None: - default = [None] * len(func) - else: - default = list(default) - default.append([None] * (len(func) - len(default))) - for (fct, dft) in zip(func, default): - cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) - # - - def __init__(self, dtype_or_func=None, default=None, missing_values=None, - locked=False): - # Defines a lock for upgrade - self._locked = bool(locked) - # No input dtype: minimal initialization - if dtype_or_func is None: - self.func = str2bool - self._status = 0 - self.default = default or False - dtype = np.dtype('bool') - else: - # Is the input a np.dtype ? - try: - self.func = None - dtype = np.dtype(dtype_or_func) - except TypeError: - # dtype_or_func must be a function, then - if not hasattr(dtype_or_func, '__call__'): - errmsg = ("The input argument `dtype` is neither a" - " function nor a dtype (got '%s' instead)") - raise TypeError(errmsg % type(dtype_or_func)) - # Set the function - self.func = dtype_or_func - # If we don't have a default, try to guess it or set it to - # None - if default is None: - try: - default = self.func('0') - except ValueError: - default = None - dtype = self._getdtype(default) - # Set the status according to the dtype - _status = -1 - for (i, (deftype, func, default_def)) in enumerate(self._mapper): - if np.issubdtype(dtype.type, deftype): - _status = i - if default is None: - self.default = default_def - else: - self.default = default - break - # if a converter for the specific dtype is available use that - last_func = func - for (i, (deftype, func, default_def)) in enumerate(self._mapper): - if dtype.type == deftype: - _status = i - last_func = func - if default is None: - self.default = default_def - else: - self.default = default - break - func = last_func - if _status == -1: - # We never found a match in the _mapper... - _status = 0 - self.default = default - self._status = _status - # If the input was a dtype, set the function to the last we saw - if self.func is None: - self.func = func - # If the status is 1 (int), change the function to - # something more robust. - if self.func == self._mapper[1][1]: - if issubclass(dtype.type, np.uint64): - self.func = np.uint64 - elif issubclass(dtype.type, np.int64): - self.func = np.int64 - else: - self.func = lambda x: int(float(x)) - # Store the list of strings corresponding to missing values. - if missing_values is None: - self.missing_values = {''} - else: - if isinstance(missing_values, basestring): - missing_values = missing_values.split(",") - self.missing_values = set(list(missing_values) + ['']) - # - self._callingfunction = self._strict_call - self.type = self._dtypeortype(dtype) - self._checked = False - self._initial_default = default - # - - def _loose_call(self, value): - try: - return self.func(value) - except ValueError: - return self.default - # - - def _strict_call(self, value): - try: - - # We check if we can convert the value using the current function - new_value = self.func(value) - - # In addition to having to check whether func can convert the - # value, we also have to make sure that we don't get overflow - # errors for integers. - if self.func is int: - try: - np.array(value, dtype=self.type) - except OverflowError: - raise ValueError - - # We're still here so we can now return the new value - return new_value - - except ValueError: - if value.strip() in self.missing_values: - if not self._status: - self._checked = False - return self.default - raise ValueError("Cannot convert string '%s'" % value) - # - - def __call__(self, value): - return self._callingfunction(value) - # - - def upgrade(self, value): - """ - Find the best converter for a given string, and return the result. - - The supplied string `value` is converted by testing different - converters in order. First the `func` method of the - `StringConverter` instance is tried, if this fails other available - converters are tried. The order in which these other converters - are tried is determined by the `_status` attribute of the instance. - - Parameters - ---------- - value : str - The string to convert. - - Returns - ------- - out : any - The result of converting `value` with the appropriate converter. - - """ - self._checked = True - try: - return self._strict_call(value) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - errmsg = "Could not find a valid conversion function" - raise ConverterError(errmsg) - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - self._status = _status - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - return self.upgrade(value) - - def iterupgrade(self, value): - self._checked = True - if not hasattr(value, '__iter__'): - value = (value,) - _strict_call = self._strict_call - try: - for _m in value: - _strict_call(_m) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - raise ConverterError( - "Could not find a valid conversion function" - ) - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - self._status = _status - self.iterupgrade(value) - - def update(self, func, default=None, testing_value=None, - missing_values='', locked=False): - """ - Set StringConverter attributes directly. - - Parameters - ---------- - func : function - Conversion function. - default : any, optional - Value to return by default, that is, when the string to be - converted is flagged as missing. If not given, - `StringConverter` tries to supply a reasonable default value. - testing_value : str, optional - A string representing a standard input value of the converter. - This string is used to help defining a reasonable default - value. - missing_values : {sequence of str, None}, optional - Sequence of strings indicating a missing value. If ``None``, then - the existing `missing_values` are cleared. The default is `''`. - locked : bool, optional - Whether the StringConverter should be locked to prevent - automatic upgrade or not. Default is False. - - Notes - ----- - `update` takes the same parameters as the constructor of - `StringConverter`, except that `func` does not accept a `dtype` - whereas `dtype_or_func` in the constructor does. - - """ - self.func = func - self._locked = locked - - # Don't reset the default to None if we can avoid it - if default is not None: - self.default = default - self.type = self._dtypeortype(self._getdtype(default)) - else: - try: - tester = func(testing_value or '1') - except (TypeError, ValueError): - tester = None - self.type = self._dtypeortype(self._getdtype(tester)) - - # Add the missing values to the existing set or clear it. - if missing_values is None: - # Clear all missing values even though the ctor initializes it to - # set(['']) when the argument is None. - self.missing_values = set() - else: - if not np.iterable(missing_values): - missing_values = [missing_values] - if not all(isinstance(v, basestring) for v in missing_values): - raise TypeError("missing_values must be strings or unicode") - self.missing_values.update(missing_values) - - -def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): - """ - Convenience function to create a `np.dtype` object. - - The function processes the input `dtype` and matches it with the given - names. - - Parameters - ---------- - ndtype : var - Definition of the dtype. Can be any string or dictionary recognized - by the `np.dtype` function, or a sequence of types. - names : str or sequence, optional - Sequence of strings to use as field names for a structured dtype. - For convenience, `names` can be a string of a comma-separated list - of names. - defaultfmt : str, optional - Format string used to define missing names, such as ``"f%i"`` - (default) or ``"fields_%02i"``. - validationargs : optional - A series of optional arguments used to initialize a - `NameValidator`. - - Examples - -------- - >>> np.lib._iotools.easy_dtype(float) - dtype('float64') - >>> np.lib._iotools.easy_dtype("i4, f8") - dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") - dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") - dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") - dtype([('a', ' 9) in principle): - - - Released version: '1.8.0', '1.8.1', etc. - - Alpha: '1.8.0a1', '1.8.0a2', etc. - - Beta: '1.8.0b1', '1.8.0b2', etc. - - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. - - Development versions (no git hash available): '1.8.0.dev-Unknown' - - Comparing needs to be done against a valid version string or other - `NumpyVersion` instance. Note that all development versions of the same - (pre-)release compare equal. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - vstring : str - NumPy version string (``np.__version__``). - - Examples - -------- - >>> from numpy.lib import NumpyVersion - >>> if NumpyVersion(np.__version__) < '1.7.0': - ... print('skip') - >>> # skip - - >>> NumpyVersion('1.7') # raises ValueError, add ".0" - Traceback (most recent call last): - ... - ValueError: Not a valid numpy version string - - """ - - def __init__(self, vstring): - self.vstring = vstring - ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) - if not ver_main: - raise ValueError("Not a valid numpy version string") - - self.version = ver_main.group() - self.major, self.minor, self.bugfix = [int(x) for x in - self.version.split('.')] - if len(vstring) == ver_main.end(): - self.pre_release = 'final' - else: - alpha = re.match(r'a\d', vstring[ver_main.end():]) - beta = re.match(r'b\d', vstring[ver_main.end():]) - rc = re.match(r'rc\d', vstring[ver_main.end():]) - pre_rel = [m for m in [alpha, beta, rc] if m is not None] - if pre_rel: - self.pre_release = pre_rel[0].group() - else: - self.pre_release = '' - - self.is_devversion = bool(re.search(r'.dev', vstring)) - - def _compare_version(self, other): - """Compare major.minor.bugfix""" - if self.major == other.major: - if self.minor == other.minor: - if self.bugfix == other.bugfix: - vercmp = 0 - elif self.bugfix > other.bugfix: - vercmp = 1 - else: - vercmp = -1 - elif self.minor > other.minor: - vercmp = 1 - else: - vercmp = -1 - elif self.major > other.major: - vercmp = 1 - else: - vercmp = -1 - - return vercmp - - def _compare_pre_release(self, other): - """Compare alpha/beta/rc/final.""" - if self.pre_release == other.pre_release: - vercmp = 0 - elif self.pre_release == 'final': - vercmp = 1 - elif other.pre_release == 'final': - vercmp = -1 - elif self.pre_release > other.pre_release: - vercmp = 1 - else: - vercmp = -1 - - return vercmp - - def _compare(self, other): - if not isinstance(other, (basestring, NumpyVersion)): - raise ValueError("Invalid object to compare with NumpyVersion.") - - if isinstance(other, basestring): - other = NumpyVersion(other) - - vercmp = self._compare_version(other) - if vercmp == 0: - # Same x.y.z version, check for alpha/beta/rc - vercmp = self._compare_pre_release(other) - if vercmp == 0: - # Same version and same pre-release, check if dev version - if self.is_devversion is other.is_devversion: - vercmp = 0 - elif self.is_devversion: - vercmp = -1 - else: - vercmp = 1 - - return vercmp - - def __lt__(self, other): - return self._compare(other) < 0 - - def __le__(self, other): - return self._compare(other) <= 0 - - def __eq__(self, other): - return self._compare(other) == 0 - - def __ne__(self, other): - return self._compare(other) != 0 - - def __gt__(self, other): - return self._compare(other) > 0 - - def __ge__(self, other): - return self._compare(other) >= 0 - - def __repr(self): - return "NumpyVersion(%s)" % self.vstring diff --git a/venv/lib/python3.7/site-packages/numpy/lib/arraypad.py b/venv/lib/python3.7/site-packages/numpy/lib/arraypad.py deleted file mode 100644 index 33e6470..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/arraypad.py +++ /dev/null @@ -1,881 +0,0 @@ -""" -The arraypad module contains a group of functions to pad values onto the edges -of an n-dimensional array. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.core.overrides import array_function_dispatch -from numpy.lib.index_tricks import ndindex - - -__all__ = ['pad'] - - -############################################################################### -# Private utility functions. - - -def _round_if_needed(arr, dtype): - """ - Rounds arr inplace if destination dtype is integer. - - Parameters - ---------- - arr : ndarray - Input array. - dtype : dtype - The dtype of the destination array. - """ - if np.issubdtype(dtype, np.integer): - arr.round(out=arr) - - -def _slice_at_axis(sl, axis): - """ - Construct tuple of slices to slice an array in the given dimension. - - Parameters - ---------- - sl : slice - The slice for the given dimension. - axis : int - The axis to which `sl` is applied. All other dimensions are left - "unsliced". - - Returns - ------- - sl : tuple of slices - A tuple with slices matching `shape` in length. - - Examples - -------- - >>> _slice_at_axis(slice(None, 3, -1), 1) - (slice(None, None, None), slice(None, 3, -1), (...,)) - """ - return (slice(None),) * axis + (sl,) + (...,) - - -def _view_roi(array, original_area_slice, axis): - """ - Get a view of the current region of interest during iterative padding. - - When padding multiple dimensions iteratively corner values are - unnecessarily overwritten multiple times. This function reduces the - working area for the first dimensions so that corners are excluded. - - Parameters - ---------- - array : ndarray - The array with the region of interest. - original_area_slice : tuple of slices - Denotes the area with original values of the unpadded array. - axis : int - The currently padded dimension assuming that `axis` is padded before - `axis` + 1. - - Returns - ------- - roi : ndarray - The region of interest of the original `array`. - """ - axis += 1 - sl = (slice(None),) * axis + original_area_slice[axis:] - return array[sl] - - -def _pad_simple(array, pad_width, fill_value=None): - """ - Pad array on all sides with either a single value or undefined values. - - Parameters - ---------- - array : ndarray - Array to grow. - pad_width : sequence of tuple[int, int] - Pad width on both sides for each dimension in `arr`. - fill_value : scalar, optional - If provided the padded area is filled with this value, otherwise - the pad area left undefined. - - Returns - ------- - padded : ndarray - The padded array with the same dtype as`array`. Its order will default - to C-style if `array` is not F-contiguous. - original_area_slice : tuple - A tuple of slices pointing to the area of the original array. - """ - # Allocate grown array - new_shape = tuple( - left + size + right - for size, (left, right) in zip(array.shape, pad_width) - ) - order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order - padded = np.empty(new_shape, dtype=array.dtype, order=order) - - if fill_value is not None: - padded.fill(fill_value) - - # Copy old array into correct space - original_area_slice = tuple( - slice(left, left + size) - for size, (left, right) in zip(array.shape, pad_width) - ) - padded[original_area_slice] = array - - return padded, original_area_slice - - -def _set_pad_area(padded, axis, width_pair, value_pair): - """ - Set empty-padded area in given dimension. - - Parameters - ---------- - padded : ndarray - Array with the pad area which is modified inplace. - axis : int - Dimension with the pad area to set. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - value_pair : tuple of scalars or ndarrays - Values inserted into the pad area on each side. It must match or be - broadcastable to the shape of `arr`. - """ - left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) - padded[left_slice] = value_pair[0] - - right_slice = _slice_at_axis( - slice(padded.shape[axis] - width_pair[1], None), axis) - padded[right_slice] = value_pair[1] - - -def _get_edges(padded, axis, width_pair): - """ - Retrieve edge values from empty-padded array in given dimension. - - Parameters - ---------- - padded : ndarray - Empty-padded array. - axis : int - Dimension in which the edges are considered. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - - Returns - ------- - left_edge, right_edge : ndarray - Edge values of the valid area in `padded` in the given dimension. Its - shape will always match `padded` except for the dimension given by - `axis` which will have a length of 1. - """ - left_index = width_pair[0] - left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) - left_edge = padded[left_slice] - - right_index = padded.shape[axis] - width_pair[1] - right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) - right_edge = padded[right_slice] - - return left_edge, right_edge - - -def _get_linear_ramps(padded, axis, width_pair, end_value_pair): - """ - Construct linear ramps for empty-padded array in given dimension. - - Parameters - ---------- - padded : ndarray - Empty-padded array. - axis : int - Dimension in which the ramps are constructed. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - end_value_pair : (scalar, scalar) - End values for the linear ramps which form the edge of the fully padded - array. These values are included in the linear ramps. - - Returns - ------- - left_ramp, right_ramp : ndarray - Linear ramps to set on both sides of `padded`. - """ - edge_pair = _get_edges(padded, axis, width_pair) - - left_ramp = np.linspace( - start=end_value_pair[0], - stop=edge_pair[0].squeeze(axis), # Dimensions is replaced by linspace - num=width_pair[0], - endpoint=False, - dtype=padded.dtype, - axis=axis, - ) - - right_ramp = np.linspace( - start=end_value_pair[1], - stop=edge_pair[1].squeeze(axis), # Dimension is replaced by linspace - num=width_pair[1], - endpoint=False, - dtype=padded.dtype, - axis=axis, - ) - # Reverse linear space in appropriate dimension - right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] - - return left_ramp, right_ramp - - -def _get_stats(padded, axis, width_pair, length_pair, stat_func): - """ - Calculate statistic for the empty-padded array in given dimnsion. - - Parameters - ---------- - padded : ndarray - Empty-padded array. - axis : int - Dimension in which the statistic is calculated. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - length_pair : 2-element sequence of None or int - Gives the number of values in valid area from each side that is - taken into account when calculating the statistic. If None the entire - valid area in `padded` is considered. - stat_func : function - Function to compute statistic. The expected signature is - ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. - - Returns - ------- - left_stat, right_stat : ndarray - Calculated statistic for both sides of `padded`. - """ - # Calculate indices of the edges of the area with original values - left_index = width_pair[0] - right_index = padded.shape[axis] - width_pair[1] - # as well as its length - max_length = right_index - left_index - - # Limit stat_lengths to max_length - left_length, right_length = length_pair - if left_length is None or max_length < left_length: - left_length = max_length - if right_length is None or max_length < right_length: - right_length = max_length - - if (left_length == 0 or right_length == 0) \ - and stat_func in {np.amax, np.amin}: - # amax and amin can't operate on an emtpy array, - # raise a more descriptive warning here instead of the default one - raise ValueError("stat_length of 0 yields no value for padding") - - # Calculate statistic for the left side - left_slice = _slice_at_axis( - slice(left_index, left_index + left_length), axis) - left_chunk = padded[left_slice] - left_stat = stat_func(left_chunk, axis=axis, keepdims=True) - _round_if_needed(left_stat, padded.dtype) - - if left_length == right_length == max_length: - # return early as right_stat must be identical to left_stat - return left_stat, left_stat - - # Calculate statistic for the right side - right_slice = _slice_at_axis( - slice(right_index - right_length, right_index), axis) - right_chunk = padded[right_slice] - right_stat = stat_func(right_chunk, axis=axis, keepdims=True) - _round_if_needed(right_stat, padded.dtype) - - return left_stat, right_stat - - -def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): - """ - Pad `axis` of `arr` with reflection. - - Parameters - ---------- - padded : ndarray - Input array of arbitrary shape. - axis : int - Axis along which to pad `arr`. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - method : str - Controls method of reflection; options are 'even' or 'odd'. - include_edge : bool - If true, edge value is included in reflection, otherwise the edge - value forms the symmetric axis to the reflection. - - Returns - ------- - pad_amt : tuple of ints, length 2 - New index positions of padding to do along the `axis`. If these are - both 0, padding is done in this dimension. - """ - left_pad, right_pad = width_pair - old_length = padded.shape[axis] - right_pad - left_pad - - if include_edge: - # Edge is included, we need to offset the pad amount by 1 - edge_offset = 1 - else: - edge_offset = 0 # Edge is not included, no need to offset pad amount - old_length -= 1 # but must be omitted from the chunk - - if left_pad > 0: - # Pad with reflected values on left side: - # First limit chunk size which can't be larger than pad area - chunk_length = min(old_length, left_pad) - # Slice right to left, stop on or next to edge, start relative to stop - stop = left_pad - edge_offset - start = stop + chunk_length - left_slice = _slice_at_axis(slice(start, stop, -1), axis) - left_chunk = padded[left_slice] - - if method == "odd": - # Negate chunk and align with edge - edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) - left_chunk = 2 * padded[edge_slice] - left_chunk - - # Insert chunk into padded area - start = left_pad - chunk_length - stop = left_pad - pad_area = _slice_at_axis(slice(start, stop), axis) - padded[pad_area] = left_chunk - # Adjust pointer to left edge for next iteration - left_pad -= chunk_length - - if right_pad > 0: - # Pad with reflected values on right side: - # First limit chunk size which can't be larger than pad area - chunk_length = min(old_length, right_pad) - # Slice right to left, start on or next to edge, stop relative to start - start = -right_pad + edge_offset - 2 - stop = start - chunk_length - right_slice = _slice_at_axis(slice(start, stop, -1), axis) - right_chunk = padded[right_slice] - - if method == "odd": - # Negate chunk and align with edge - edge_slice = _slice_at_axis( - slice(-right_pad - 1, -right_pad), axis) - right_chunk = 2 * padded[edge_slice] - right_chunk - - # Insert chunk into padded area - start = padded.shape[axis] - right_pad - stop = start + chunk_length - pad_area = _slice_at_axis(slice(start, stop), axis) - padded[pad_area] = right_chunk - # Adjust pointer to right edge for next iteration - right_pad -= chunk_length - - return left_pad, right_pad - - -def _set_wrap_both(padded, axis, width_pair): - """ - Pad `axis` of `arr` with wrapped values. - - Parameters - ---------- - padded : ndarray - Input array of arbitrary shape. - axis : int - Axis along which to pad `arr`. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - - Returns - ------- - pad_amt : tuple of ints, length 2 - New index positions of padding to do along the `axis`. If these are - both 0, padding is done in this dimension. - """ - left_pad, right_pad = width_pair - period = padded.shape[axis] - right_pad - left_pad - - # If the current dimension of `arr` doesn't contain enough valid values - # (not part of the undefined pad area) we need to pad multiple times. - # Each time the pad area shrinks on both sides which is communicated with - # these variables. - new_left_pad = 0 - new_right_pad = 0 - - if left_pad > 0: - # Pad with wrapped values on left side - # First slice chunk from right side of the non-pad area. - # Use min(period, left_pad) to ensure that chunk is not larger than - # pad area - right_slice = _slice_at_axis( - slice(-right_pad - min(period, left_pad), - -right_pad if right_pad != 0 else None), - axis - ) - right_chunk = padded[right_slice] - - if left_pad > period: - # Chunk is smaller than pad area - pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) - new_left_pad = left_pad - period - else: - # Chunk matches pad area - pad_area = _slice_at_axis(slice(None, left_pad), axis) - padded[pad_area] = right_chunk - - if right_pad > 0: - # Pad with wrapped values on right side - # First slice chunk from left side of the non-pad area. - # Use min(period, right_pad) to ensure that chunk is not larger than - # pad area - left_slice = _slice_at_axis( - slice(left_pad, left_pad + min(period, right_pad),), axis) - left_chunk = padded[left_slice] - - if right_pad > period: - # Chunk is smaller than pad area - pad_area = _slice_at_axis( - slice(-right_pad, -right_pad + period), axis) - new_right_pad = right_pad - period - else: - # Chunk matches pad area - pad_area = _slice_at_axis(slice(-right_pad, None), axis) - padded[pad_area] = left_chunk - - return new_left_pad, new_right_pad - - -def _as_pairs(x, ndim, as_index=False): - """ - Broadcast `x` to an array with the shape (`ndim`, 2). - - A helper function for `pad` that prepares and validates arguments like - `pad_width` for iteration in pairs. - - Parameters - ---------- - x : {None, scalar, array-like} - The object to broadcast to the shape (`ndim`, 2). - ndim : int - Number of pairs the broadcasted `x` will have. - as_index : bool, optional - If `x` is not None, try to round each element of `x` to an integer - (dtype `np.intp`) and ensure every element is positive. - - Returns - ------- - pairs : nested iterables, shape (`ndim`, 2) - The broadcasted version of `x`. - - Raises - ------ - ValueError - If `as_index` is True and `x` contains negative elements. - Or if `x` is not broadcastable to the shape (`ndim`, 2). - """ - if x is None: - # Pass through None as a special case, otherwise np.round(x) fails - # with an AttributeError - return ((None, None),) * ndim - - x = np.array(x) - if as_index: - x = np.round(x).astype(np.intp, copy=False) - - if x.ndim < 3: - # Optimization: Possibly use faster paths for cases where `x` has - # only 1 or 2 elements. `np.broadcast_to` could handle these as well - # but is currently slower - - if x.size == 1: - # x was supplied as a single value - x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 - if as_index and x < 0: - raise ValueError("index can't contain negative values") - return ((x[0], x[0]),) * ndim - - if x.size == 2 and x.shape != (2, 1): - # x was supplied with a single value for each side - # but except case when each dimension has a single value - # which should be broadcasted to a pair, - # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] - x = x.ravel() # Ensure x[0], x[1] works - if as_index and (x[0] < 0 or x[1] < 0): - raise ValueError("index can't contain negative values") - return ((x[0], x[1]),) * ndim - - if as_index and x.min() < 0: - raise ValueError("index can't contain negative values") - - # Converting the array with `tolist` seems to improve performance - # when iterating and indexing the result (see usage in `pad`) - return np.broadcast_to(x, (ndim, 2)).tolist() - - -def _pad_dispatcher(array, pad_width, mode=None, **kwargs): - return (array,) - - -############################################################################### -# Public functions - - -@array_function_dispatch(_pad_dispatcher, module='numpy') -def pad(array, pad_width, mode='constant', **kwargs): - """ - Pad an array. - - Parameters - ---------- - array : array_like of rank N - The array to pad. - pad_width : {sequence, array_like, int} - Number of values padded to the edges of each axis. - ((before_1, after_1), ... (before_N, after_N)) unique pad widths - for each axis. - ((before, after),) yields same before and after pad for each axis. - (pad,) or int is a shortcut for before = after = pad width for all - axes. - mode : str or function, optional - One of the following string values or a user supplied function. - - 'constant' (default) - Pads with a constant value. - 'edge' - Pads with the edge values of array. - 'linear_ramp' - Pads with the linear ramp between end_value and the - array edge value. - 'maximum' - Pads with the maximum value of all or part of the - vector along each axis. - 'mean' - Pads with the mean value of all or part of the - vector along each axis. - 'median' - Pads with the median value of all or part of the - vector along each axis. - 'minimum' - Pads with the minimum value of all or part of the - vector along each axis. - 'reflect' - Pads with the reflection of the vector mirrored on - the first and last values of the vector along each - axis. - 'symmetric' - Pads with the reflection of the vector mirrored - along the edge of the array. - 'wrap' - Pads with the wrap of the vector along the axis. - The first values are used to pad the end and the - end values are used to pad the beginning. - 'empty' - Pads with undefined values. - - .. versionadded:: 1.17 - - - Padding function, see Notes. - stat_length : sequence or int, optional - Used in 'maximum', 'mean', 'median', and 'minimum'. Number of - values at edge of each axis used to calculate the statistic value. - - ((before_1, after_1), ... (before_N, after_N)) unique statistic - lengths for each axis. - - ((before, after),) yields same before and after statistic lengths - for each axis. - - (stat_length,) or int is a shortcut for before = after = statistic - length for all axes. - - Default is ``None``, to use the entire axis. - constant_values : sequence or scalar, optional - Used in 'constant'. The values to set the padded values for each - axis. - - ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants - for each axis. - - ``((before, after),)`` yields same before and after constants for each - axis. - - ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for - all axes. - - Default is 0. - end_values : sequence or scalar, optional - Used in 'linear_ramp'. The values used for the ending value of the - linear_ramp and that will form the edge of the padded array. - - ``((before_1, after_1), ... (before_N, after_N))`` unique end values - for each axis. - - ``((before, after),)`` yields same before and after end values for each - axis. - - ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for - all axes. - - Default is 0. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the - default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by - subtracting the reflected values from two times the edge value. - - Returns - ------- - pad : ndarray - Padded array of rank equal to `array` with shape increased - according to `pad_width`. - - Notes - ----- - .. versionadded:: 1.7.0 - - For an array with rank greater than 1, some of the padding of later - axes is calculated from padding of previous axes. This is easiest to - think about with a rank 2 array where the corners of the padded array - are calculated by using padded values from the first axis. - - The padding function, if used, should modify a rank 1 array in-place. It - has the following signature:: - - padding_func(vector, iaxis_pad_width, iaxis, kwargs) - - where - - vector : ndarray - A rank 1 array already padded with zeros. Padded values are - vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. - iaxis_pad_width : tuple - A 2-tuple of ints, iaxis_pad_width[0] represents the number of - values padded at the beginning of vector where - iaxis_pad_width[1] represents the number of values padded at - the end of vector. - iaxis : int - The axis currently being calculated. - kwargs : dict - Any keyword arguments the function requires. - - Examples - -------- - >>> a = [1, 2, 3, 4, 5] - >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) - array([4, 4, 1, ..., 6, 6, 6]) - - >>> np.pad(a, (2, 3), 'edge') - array([1, 1, 1, ..., 5, 5, 5]) - - >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) - array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) - - >>> np.pad(a, (2,), 'maximum') - array([5, 5, 1, 2, 3, 4, 5, 5, 5]) - - >>> np.pad(a, (2,), 'mean') - array([3, 3, 1, 2, 3, 4, 5, 3, 3]) - - >>> np.pad(a, (2,), 'median') - array([3, 3, 1, 2, 3, 4, 5, 3, 3]) - - >>> a = [[1, 2], [3, 4]] - >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') - array([[1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [3, 3, 3, 4, 3, 3, 3], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1]]) - - >>> a = [1, 2, 3, 4, 5] - >>> np.pad(a, (2, 3), 'reflect') - array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) - - >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') - array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) - - >>> np.pad(a, (2, 3), 'symmetric') - array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) - - >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') - array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) - - >>> np.pad(a, (2, 3), 'wrap') - array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) - - >>> def pad_with(vector, pad_width, iaxis, kwargs): - ... pad_value = kwargs.get('padder', 10) - ... vector[:pad_width[0]] = pad_value - ... vector[-pad_width[1]:] = pad_value - >>> a = np.arange(6) - >>> a = a.reshape((2, 3)) - >>> np.pad(a, 2, pad_with) - array([[10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10], - [10, 10, 0, 1, 2, 10, 10], - [10, 10, 3, 4, 5, 10, 10], - [10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10]]) - >>> np.pad(a, 2, pad_with, padder=100) - array([[100, 100, 100, 100, 100, 100, 100], - [100, 100, 100, 100, 100, 100, 100], - [100, 100, 0, 1, 2, 100, 100], - [100, 100, 3, 4, 5, 100, 100], - [100, 100, 100, 100, 100, 100, 100], - [100, 100, 100, 100, 100, 100, 100]]) - """ - array = np.asarray(array) - pad_width = np.asarray(pad_width) - - if not pad_width.dtype.kind == 'i': - raise TypeError('`pad_width` must be of integral type.') - - # Broadcast to shape (array.ndim, 2) - pad_width = _as_pairs(pad_width, array.ndim, as_index=True) - - if callable(mode): - # Old behavior: Use user-supplied function with np.apply_along_axis - function = mode - # Create a new zero padded array - padded, _ = _pad_simple(array, pad_width, fill_value=0) - # And apply along each axis - - for axis in range(padded.ndim): - # Iterate using ndindex as in apply_along_axis, but assuming that - # function operates inplace on the padded array. - - # view with the iteration axis at the end - view = np.moveaxis(padded, axis, -1) - - # compute indices for the iteration axes, and append a trailing - # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) - inds = ndindex(view.shape[:-1]) - inds = (ind + (Ellipsis,) for ind in inds) - for ind in inds: - function(view[ind], pad_width[axis], axis, kwargs) - - return padded - - # Make sure that no unsupported keywords were passed for the current mode - allowed_kwargs = { - 'empty': [], 'edge': [], 'wrap': [], - 'constant': ['constant_values'], - 'linear_ramp': ['end_values'], - 'maximum': ['stat_length'], - 'mean': ['stat_length'], - 'median': ['stat_length'], - 'minimum': ['stat_length'], - 'reflect': ['reflect_type'], - 'symmetric': ['reflect_type'], - } - try: - unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) - except KeyError: - raise ValueError("mode '{}' is not supported".format(mode)) - if unsupported_kwargs: - raise ValueError("unsupported keyword arguments for mode '{}': {}" - .format(mode, unsupported_kwargs)) - - stat_functions = {"maximum": np.amax, "minimum": np.amin, - "mean": np.mean, "median": np.median} - - # Create array with final shape and original values - # (padded area is undefined) - padded, original_area_slice = _pad_simple(array, pad_width) - # And prepare iteration over all dimensions - # (zipping may be more readable than using enumerate) - axes = range(padded.ndim) - - if mode == "constant": - values = kwargs.get("constant_values", 0) - values = _as_pairs(values, padded.ndim) - for axis, width_pair, value_pair in zip(axes, pad_width, values): - roi = _view_roi(padded, original_area_slice, axis) - _set_pad_area(roi, axis, width_pair, value_pair) - - elif mode == "empty": - pass # Do nothing as _pad_simple already returned the correct result - - elif array.size == 0: - # Only modes "constant" and "empty" can extend empty axes, all other - # modes depend on `array` not being empty - # -> ensure every empty axis is only "padded with 0" - for axis, width_pair in zip(axes, pad_width): - if array.shape[axis] == 0 and any(width_pair): - raise ValueError( - "can't extend empty axis {} using modes other than " - "'constant' or 'empty'".format(axis) - ) - # passed, don't need to do anything more as _pad_simple already - # returned the correct result - - elif mode == "edge": - for axis, width_pair in zip(axes, pad_width): - roi = _view_roi(padded, original_area_slice, axis) - edge_pair = _get_edges(roi, axis, width_pair) - _set_pad_area(roi, axis, width_pair, edge_pair) - - elif mode == "linear_ramp": - end_values = kwargs.get("end_values", 0) - end_values = _as_pairs(end_values, padded.ndim) - for axis, width_pair, value_pair in zip(axes, pad_width, end_values): - roi = _view_roi(padded, original_area_slice, axis) - ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) - _set_pad_area(roi, axis, width_pair, ramp_pair) - - elif mode in stat_functions: - func = stat_functions[mode] - length = kwargs.get("stat_length", None) - length = _as_pairs(length, padded.ndim, as_index=True) - for axis, width_pair, length_pair in zip(axes, pad_width, length): - roi = _view_roi(padded, original_area_slice, axis) - stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) - _set_pad_area(roi, axis, width_pair, stat_pair) - - elif mode in {"reflect", "symmetric"}: - method = kwargs.get("reflect_type", "even") - include_edge = True if mode == "symmetric" else False - for axis, (left_index, right_index) in zip(axes, pad_width): - if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): - # Extending singleton dimension for 'reflect' is legacy - # behavior; it really should raise an error. - edge_pair = _get_edges(padded, axis, (left_index, right_index)) - _set_pad_area( - padded, axis, (left_index, right_index), edge_pair) - continue - - roi = _view_roi(padded, original_area_slice, axis) - while left_index > 0 or right_index > 0: - # Iteratively pad until dimension is filled with reflected - # values. This is necessary if the pad area is larger than - # the length of the original values in the current dimension. - left_index, right_index = _set_reflect_both( - roi, axis, (left_index, right_index), - method, include_edge - ) - - elif mode == "wrap": - for axis, (left_index, right_index) in zip(axes, pad_width): - roi = _view_roi(padded, original_area_slice, axis) - while left_index > 0 or right_index > 0: - # Iteratively pad until dimension is filled with wrapped - # values. This is necessary if the pad area is larger than - # the length of the original values in the current dimension. - left_index, right_index = _set_wrap_both( - roi, axis, (left_index, right_index)) - - return padded diff --git a/venv/lib/python3.7/site-packages/numpy/lib/arraysetops.py b/venv/lib/python3.7/site-packages/numpy/lib/arraysetops.py deleted file mode 100644 index 2309f7e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/arraysetops.py +++ /dev/null @@ -1,788 +0,0 @@ -""" -Set operations for arrays based on sorting. - -:Contains: - unique, - isin, - ediff1d, - intersect1d, - setxor1d, - in1d, - union1d, - setdiff1d - -:Notes: - -For floating point arrays, inaccurate results may appear due to usual round-off -and floating point comparison issues. - -Speed could be gained in some operations by an implementation of -sort(), that can provide directly the permutation vectors, avoiding -thus calls to argsort(). - -To do: Optionally return indices analogously to unique for all functions. - -:Author: Robert Cimrman - -""" -from __future__ import division, absolute_import, print_function - -import functools - -import numpy as np -from numpy.core import overrides - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', - 'in1d', 'isin' - ] - - -def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): - return (ary, to_end, to_begin) - - -@array_function_dispatch(_ediff1d_dispatcher) -def ediff1d(ary, to_end=None, to_begin=None): - """ - The differences between consecutive elements of an array. - - Parameters - ---------- - ary : array_like - If necessary, will be flattened before the differences are taken. - to_end : array_like, optional - Number(s) to append at the end of the returned differences. - to_begin : array_like, optional - Number(s) to prepend at the beginning of the returned differences. - - Returns - ------- - ediff1d : ndarray - The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. - - See Also - -------- - diff, gradient - - Notes - ----- - When applied to masked arrays, this function drops the mask information - if the `to_begin` and/or `to_end` parameters are used. - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.ediff1d(x) - array([ 1, 2, 3, -7]) - - >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - array([-99, 1, 2, ..., -7, 88, 99]) - - The returned array is always 1D. - - >>> y = [[1, 2, 4], [1, 6, 24]] - >>> np.ediff1d(y) - array([ 1, 2, -3, 5, 18]) - - """ - # force a 1d array - ary = np.asanyarray(ary).ravel() - - # enforce propagation of the dtype of input - # ary to returned result - dtype_req = ary.dtype - - # fast track default case - if to_begin is None and to_end is None: - return ary[1:] - ary[:-1] - - if to_begin is None: - l_begin = 0 - else: - _to_begin = np.asanyarray(to_begin, dtype=dtype_req) - if not np.all(_to_begin == to_begin): - raise ValueError("cannot convert 'to_begin' to array with dtype " - "'%r' as required for input ary" % dtype_req) - to_begin = _to_begin.ravel() - l_begin = len(to_begin) - - if to_end is None: - l_end = 0 - else: - _to_end = np.asanyarray(to_end, dtype=dtype_req) - # check that casting has not overflowed - if not np.all(_to_end == to_end): - raise ValueError("cannot convert 'to_end' to array with dtype " - "'%r' as required for input ary" % dtype_req) - to_end = _to_end.ravel() - l_end = len(to_end) - - # do the calculation in place and copy to_begin and to_end - l_diff = max(len(ary) - 1, 0) - result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype) - result = ary.__array_wrap__(result) - if l_begin > 0: - result[:l_begin] = to_begin - if l_end > 0: - result[l_begin + l_diff:] = to_end - np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) - return result - - -def _unpack_tuple(x): - """ Unpacks one-element tuples for use as return values """ - if len(x) == 1: - return x[0] - else: - return x - - -def _unique_dispatcher(ar, return_index=None, return_inverse=None, - return_counts=None, axis=None): - return (ar,) - - -@array_function_dispatch(_unique_dispatcher) -def unique(ar, return_index=False, return_inverse=False, - return_counts=False, axis=None): - """ - Find the unique elements of an array. - - Returns the sorted unique elements of an array. There are three optional - outputs in addition to the unique elements: - - * the indices of the input array that give the unique values - * the indices of the unique array that reconstruct the input array - * the number of times each unique value comes up in the input array - - Parameters - ---------- - ar : array_like - Input array. Unless `axis` is specified, this will be flattened if it - is not already 1-D. - return_index : bool, optional - If True, also return the indices of `ar` (along the specified axis, - if provided, or in the flattened array) that result in the unique array. - return_inverse : bool, optional - If True, also return the indices of the unique array (for the specified - axis, if provided) that can be used to reconstruct `ar`. - return_counts : bool, optional - If True, also return the number of times each unique item appears - in `ar`. - - .. versionadded:: 1.9.0 - - axis : int or None, optional - The axis to operate on. If None, `ar` will be flattened. If an integer, - the subarrays indexed by the given axis will be flattened and treated - as the elements of a 1-D array with the dimension of the given axis, - see the notes for more details. Object arrays or structured arrays - that contain objects are not supported if the `axis` kwarg is used. The - default is None. - - .. versionadded:: 1.13.0 - - Returns - ------- - unique : ndarray - The sorted unique values. - unique_indices : ndarray, optional - The indices of the first occurrences of the unique values in the - original array. Only provided if `return_index` is True. - unique_inverse : ndarray, optional - The indices to reconstruct the original array from the - unique array. Only provided if `return_inverse` is True. - unique_counts : ndarray, optional - The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - - .. versionadded:: 1.9.0 - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - When an axis is specified the subarrays indexed by the axis are sorted. - This is done by making the specified axis the first dimension of the array - (move the axis to the first dimension to keep the order of the other axes) - and then flattening the subarrays in C order. The flattened subarrays are - then viewed as a structured type with each element given a label, with the - effect that we end up with a 1-D array of structured types that can be - treated in the same way as any other 1-D array. The result is that the - flattened subarrays are sorted in lexicographic order starting with the - first element. - - Examples - -------- - >>> np.unique([1, 1, 2, 2, 3, 3]) - array([1, 2, 3]) - >>> a = np.array([[1, 1], [2, 3]]) - >>> np.unique(a) - array([1, 2, 3]) - - Return the unique rows of a 2D array - - >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) - >>> np.unique(a, axis=0) - array([[1, 0, 0], [2, 3, 4]]) - - Return the indices of the original array that give the unique values: - - >>> a = np.array(['a', 'b', 'b', 'c', 'a']) - >>> u, indices = np.unique(a, return_index=True) - >>> u - array(['a', 'b', 'c'], dtype='>> indices - array([0, 1, 3]) - >>> a[indices] - array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) - >>> u, indices = np.unique(a, return_inverse=True) - >>> u - array([1, 2, 3, 4, 6]) - >>> indices - array([0, 1, 4, ..., 1, 2, 1]) - >>> u[indices] - array([1, 2, 6, ..., 2, 3, 2]) - - """ - ar = np.asanyarray(ar) - if axis is None: - ret = _unique1d(ar, return_index, return_inverse, return_counts) - return _unpack_tuple(ret) - - # axis was specified and not None - try: - ar = np.moveaxis(ar, axis, 0) - except np.AxisError: - # this removes the "axis1" or "axis2" prefix from the error message - raise np.AxisError(axis, ar.ndim) - - # Must reshape to a contiguous 2D array for this to work... - orig_shape, orig_dtype = ar.shape, ar.dtype - ar = ar.reshape(orig_shape[0], -1) - ar = np.ascontiguousarray(ar) - dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] - - try: - consolidated = ar.view(dtype) - except TypeError: - # There's no good way to do this for object arrays, etc... - msg = 'The axis argument to unique is not supported for dtype {dt}' - raise TypeError(msg.format(dt=ar.dtype)) - - def reshape_uniq(uniq): - uniq = uniq.view(orig_dtype) - uniq = uniq.reshape(-1, *orig_shape[1:]) - uniq = np.moveaxis(uniq, 0, axis) - return uniq - - output = _unique1d(consolidated, return_index, - return_inverse, return_counts) - output = (reshape_uniq(output[0]),) + output[1:] - return _unpack_tuple(output) - - -def _unique1d(ar, return_index=False, return_inverse=False, - return_counts=False): - """ - Find the unique elements of an array, ignoring shape. - """ - ar = np.asanyarray(ar).flatten() - - optional_indices = return_index or return_inverse - - if optional_indices: - perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') - aux = ar[perm] - else: - ar.sort() - aux = ar - mask = np.empty(aux.shape, dtype=np.bool_) - mask[:1] = True - mask[1:] = aux[1:] != aux[:-1] - - ret = (aux[mask],) - if return_index: - ret += (perm[mask],) - if return_inverse: - imask = np.cumsum(mask) - 1 - inv_idx = np.empty(mask.shape, dtype=np.intp) - inv_idx[perm] = imask - ret += (inv_idx,) - if return_counts: - idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) - ret += (np.diff(idx),) - return ret - - -def _intersect1d_dispatcher( - ar1, ar2, assume_unique=None, return_indices=None): - return (ar1, ar2) - - -@array_function_dispatch(_intersect1d_dispatcher) -def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): - """ - Find the intersection of two arrays. - - Return the sorted, unique values that are in both of the input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. Will be flattened if not already 1D. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - return_indices : bool - If True, the indices which correspond to the intersection of the two - arrays are returned. The first instance of a value is used if there are - multiple. Default is False. - - .. versionadded:: 1.15.0 - - Returns - ------- - intersect1d : ndarray - Sorted 1D array of common and unique elements. - comm1 : ndarray - The indices of the first occurrences of the common values in `ar1`. - Only provided if `return_indices` is True. - comm2 : ndarray - The indices of the first occurrences of the common values in `ar2`. - Only provided if `return_indices` is True. - - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) - array([1, 3]) - - To intersect more than two arrays, use functools.reduce: - - >>> from functools import reduce - >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) - array([3]) - - To return the indices of the values common to the input arrays - along with the intersected values: - - >>> x = np.array([1, 1, 2, 3, 4]) - >>> y = np.array([2, 1, 4, 6]) - >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) - >>> x_ind, y_ind - (array([0, 2, 4]), array([1, 0, 2])) - >>> xy, x[x_ind], y[y_ind] - (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) - - """ - ar1 = np.asanyarray(ar1) - ar2 = np.asanyarray(ar2) - - if not assume_unique: - if return_indices: - ar1, ind1 = unique(ar1, return_index=True) - ar2, ind2 = unique(ar2, return_index=True) - else: - ar1 = unique(ar1) - ar2 = unique(ar2) - else: - ar1 = ar1.ravel() - ar2 = ar2.ravel() - - aux = np.concatenate((ar1, ar2)) - if return_indices: - aux_sort_indices = np.argsort(aux, kind='mergesort') - aux = aux[aux_sort_indices] - else: - aux.sort() - - mask = aux[1:] == aux[:-1] - int1d = aux[:-1][mask] - - if return_indices: - ar1_indices = aux_sort_indices[:-1][mask] - ar2_indices = aux_sort_indices[1:][mask] - ar1.size - if not assume_unique: - ar1_indices = ind1[ar1_indices] - ar2_indices = ind2[ar2_indices] - - return int1d, ar1_indices, ar2_indices - else: - return int1d - - -def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): - return (ar1, ar2) - - -@array_function_dispatch(_setxor1d_dispatcher) -def setxor1d(ar1, ar2, assume_unique=False): - """ - Find the set exclusive-or of two arrays. - - Return the sorted, unique values that are in only one (not both) of the - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - setxor1d : ndarray - Sorted 1D array of unique values that are in only one of the input - arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4]) - >>> b = np.array([2, 3, 5, 7, 5]) - >>> np.setxor1d(a,b) - array([1, 4, 5, 7]) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - - aux = np.concatenate((ar1, ar2)) - if aux.size == 0: - return aux - - aux.sort() - flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) - return aux[flag[1:] & flag[:-1]] - - -def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None): - return (ar1, ar2) - - -@array_function_dispatch(_in1d_dispatcher) -def in1d(ar1, ar2, assume_unique=False, invert=False): - """ - Test whether each element of a 1-D array is also present in a second array. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - We recommend using :func:`isin` instead of `in1d` for new code. - - Parameters - ---------- - ar1 : (M,) array_like - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted (that is, - False where an element of `ar1` is in `ar2` and True otherwise). - Default is False. ``np.in1d(a, b, invert=True)`` is equivalent - to (but is faster than) ``np.invert(in1d(a, b))``. - - .. versionadded:: 1.8.0 - - Returns - ------- - in1d : (M,) ndarray, bool - The values `ar1[in1d]` are in `ar2`. - - See Also - -------- - isin : Version of this function that preserves the - shape of ar1. - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - However, this idea fails if `ar2` is a set, or similar (non-sequence) - container: As ``ar2`` is converted to an array, in those cases - ``asarray(ar2)`` is an object array rather than the expected array of - contained values. - - .. versionadded:: 1.4.0 - - Examples - -------- - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True]) - >>> test[mask] - array([0, 2, 0]) - >>> mask = np.in1d(test, states, invert=True) - >>> mask - array([False, True, False, True, False]) - >>> test[mask] - array([1, 5]) - """ - # Ravel both arrays, behavior for the first array could be different - ar1 = np.asarray(ar1).ravel() - ar2 = np.asarray(ar2).ravel() - - # Check if one of the arrays may contain arbitrary objects - contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject - - # This code is run when - # a) the first condition is true, making the code significantly faster - # b) the second condition is true (i.e. `ar1` or `ar2` may contain - # arbitrary objects), since then sorting is not guaranteed to work - if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: - if invert: - mask = np.ones(len(ar1), dtype=bool) - for a in ar2: - mask &= (ar1 != a) - else: - mask = np.zeros(len(ar1), dtype=bool) - for a in ar2: - mask |= (ar1 == a) - return mask - - # Otherwise use sorting - if not assume_unique: - ar1, rev_idx = np.unique(ar1, return_inverse=True) - ar2 = np.unique(ar2) - - ar = np.concatenate((ar1, ar2)) - # We need this to be a stable sort, so always use 'mergesort' - # here. The values from the first array should always come before - # the values from the second array. - order = ar.argsort(kind='mergesort') - sar = ar[order] - if invert: - bool_ar = (sar[1:] != sar[:-1]) - else: - bool_ar = (sar[1:] == sar[:-1]) - flag = np.concatenate((bool_ar, [invert])) - ret = np.empty(ar.shape, dtype=bool) - ret[order] = flag - - if assume_unique: - return ret[:len(ar1)] - else: - return ret[rev_idx] - - -def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None): - return (element, test_elements) - - -@array_function_dispatch(_isin_dispatcher) -def isin(element, test_elements, assume_unique=False, invert=False): - """ - Calculates `element in test_elements`, broadcasting over `element` only. - Returns a boolean array of the same shape as `element` that is True - where an element of `element` is in `test_elements` and False otherwise. - - Parameters - ---------- - element : array_like - Input array. - test_elements : array_like - The values against which to test each value of `element`. - This argument is flattened if it is an array or array_like. - See notes for behavior with non-array-like parameters. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted, as if - calculating `element not in test_elements`. Default is False. - ``np.isin(a, b, invert=True)`` is equivalent to (but faster - than) ``np.invert(np.isin(a, b))``. - - Returns - ------- - isin : ndarray, bool - Has the same shape as `element`. The values `element[isin]` - are in `test_elements`. - - See Also - -------- - in1d : Flattened version of this function. - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - - `isin` is an element-wise function version of the python keyword `in`. - ``isin(a, b)`` is roughly equivalent to - ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. - - `element` and `test_elements` are converted to arrays if they are not - already. If `test_elements` is a set (or other non-sequence collection) - it will be converted to an object array with one element, rather than an - array of the values contained in `test_elements`. This is a consequence - of the `array` constructor's way of handling non-sequence collections. - Converting the set to a list usually gives the desired behavior. - - .. versionadded:: 1.13.0 - - Examples - -------- - >>> element = 2*np.arange(4).reshape((2, 2)) - >>> element - array([[0, 2], - [4, 6]]) - >>> test_elements = [1, 2, 4, 8] - >>> mask = np.isin(element, test_elements) - >>> mask - array([[False, True], - [ True, False]]) - >>> element[mask] - array([2, 4]) - - The indices of the matched values can be obtained with `nonzero`: - - >>> np.nonzero(mask) - (array([0, 1]), array([1, 0])) - - The test can also be inverted: - - >>> mask = np.isin(element, test_elements, invert=True) - >>> mask - array([[ True, False], - [False, True]]) - >>> element[mask] - array([0, 6]) - - Because of how `array` handles sets, the following does not - work as expected: - - >>> test_set = {1, 2, 4, 8} - >>> np.isin(element, test_set) - array([[False, False], - [False, False]]) - - Casting the set to a list gives the expected result: - - >>> np.isin(element, list(test_set)) - array([[False, True], - [ True, False]]) - """ - element = np.asarray(element) - return in1d(element, test_elements, assume_unique=assume_unique, - invert=invert).reshape(element.shape) - - -def _union1d_dispatcher(ar1, ar2): - return (ar1, ar2) - - -@array_function_dispatch(_union1d_dispatcher) -def union1d(ar1, ar2): - """ - Find the union of two arrays. - - Return the unique, sorted array of values that are in either of the two - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. They are flattened if they are not already 1D. - - Returns - ------- - union1d : ndarray - Unique, sorted union of the input arrays. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.union1d([-1, 0, 1], [-2, 0, 2]) - array([-2, -1, 0, 1, 2]) - - To find the union of more than two arrays, use functools.reduce: - - >>> from functools import reduce - >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) - array([1, 2, 3, 4, 6]) - """ - return unique(np.concatenate((ar1, ar2), axis=None)) - - -def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): - return (ar1, ar2) - - -@array_function_dispatch(_setdiff1d_dispatcher) -def setdiff1d(ar1, ar2, assume_unique=False): - """ - Find the set difference of two arrays. - - Return the unique values in `ar1` that are not in `ar2`. - - Parameters - ---------- - ar1 : array_like - Input array. - ar2 : array_like - Input comparison array. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - setdiff1d : ndarray - 1D array of values in `ar1` that are not in `ar2`. The result - is sorted when `assume_unique=False`, but otherwise only sorted - if the input is sorted. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4, 1]) - >>> b = np.array([3, 4, 5, 6]) - >>> np.setdiff1d(a, b) - array([1, 2]) - - """ - if assume_unique: - ar1 = np.asarray(ar1).ravel() - else: - ar1 = unique(ar1) - ar2 = unique(ar2) - return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] - diff --git a/venv/lib/python3.7/site-packages/numpy/lib/arrayterator.py b/venv/lib/python3.7/site-packages/numpy/lib/arrayterator.py deleted file mode 100644 index c166685..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/arrayterator.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -A buffered iterator for big arrays. - -This module solves the problem of iterating over a big file-based array -without having to read it into memory. The `Arrayterator` class wraps -an array object, and when iterated it will return sub-arrays with at most -a user-specified number of elements. - -""" -from __future__ import division, absolute_import, print_function - -from operator import mul -from functools import reduce - -from numpy.compat import long - -__all__ = ['Arrayterator'] - - -class Arrayterator(object): - """ - Buffered iterator for big arrays. - - `Arrayterator` creates a buffered iterator for reading big arrays in small - contiguous blocks. The class is useful for objects stored in the - file system. It allows iteration over the object *without* reading - everything in memory; instead, small blocks are read and iterated over. - - `Arrayterator` can be used with any object that supports multidimensional - slices. This includes NumPy arrays, but also variables from - Scientific.IO.NetCDF or pynetcdf for example. - - Parameters - ---------- - var : array_like - The object to iterate over. - buf_size : int, optional - The buffer size. If `buf_size` is supplied, the maximum amount of - data that will be read into memory is `buf_size` elements. - Default is None, which will read as many element as possible - into memory. - - Attributes - ---------- - var - buf_size - start - stop - step - shape - flat - - See Also - -------- - ndenumerate : Multidimensional array iterator. - flatiter : Flat array iterator. - memmap : Create a memory-map to an array stored in a binary file on disk. - - Notes - ----- - The algorithm works by first finding a "running dimension", along which - the blocks will be extracted. Given an array of dimensions - ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the - first dimension will be used. If, on the other hand, - ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. - Blocks are extracted along this dimension, and when the last block is - returned the process continues from the next dimension, until all - elements have been read. - - Examples - -------- - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.Arrayterator(a, 2) - >>> a_itor.shape - (3, 4, 5, 6) - - Now we can iterate over ``a_itor``, and it will return arrays of size - two. Since `buf_size` was smaller than any dimension, the first - dimension will be iterated over first: - - >>> for subarr in a_itor: - ... if not subarr.all(): - ... print(subarr, subarr.shape) # doctest: +SKIP - >>> # [[[[0 1]]]] (1, 1, 1, 2) - - """ - - def __init__(self, var, buf_size=None): - self.var = var - self.buf_size = buf_size - - self.start = [0 for dim in var.shape] - self.stop = [dim for dim in var.shape] - self.step = [1 for dim in var.shape] - - def __getattr__(self, attr): - return getattr(self.var, attr) - - def __getitem__(self, index): - """ - Return a new arrayterator. - - """ - # Fix index, handling ellipsis and incomplete slices. - if not isinstance(index, tuple): - index = (index,) - fixed = [] - length, dims = len(index), self.ndim - for slice_ in index: - if slice_ is Ellipsis: - fixed.extend([slice(None)] * (dims-length+1)) - length = len(fixed) - elif isinstance(slice_, (int, long)): - fixed.append(slice(slice_, slice_+1, 1)) - else: - fixed.append(slice_) - index = tuple(fixed) - if len(index) < dims: - index += (slice(None),) * (dims-len(index)) - - # Return a new arrayterator object. - out = self.__class__(self.var, self.buf_size) - for i, (start, stop, step, slice_) in enumerate( - zip(self.start, self.stop, self.step, index)): - out.start[i] = start + (slice_.start or 0) - out.step[i] = step * (slice_.step or 1) - out.stop[i] = start + (slice_.stop or stop-start) - out.stop[i] = min(stop, out.stop[i]) - return out - - def __array__(self): - """ - Return corresponding data. - - """ - slice_ = tuple(slice(*t) for t in zip( - self.start, self.stop, self.step)) - return self.var[slice_] - - @property - def flat(self): - """ - A 1-D flat iterator for Arrayterator objects. - - This iterator returns elements of the array to be iterated over in - `Arrayterator` one by one. It is similar to `flatiter`. - - See Also - -------- - Arrayterator - flatiter - - Examples - -------- - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.Arrayterator(a, 2) - - >>> for subarr in a_itor.flat: - ... if not subarr: - ... print(subarr, type(subarr)) - ... - 0 - - """ - for block in self: - for value in block.flat: - yield value - - @property - def shape(self): - """ - The shape of the array to be iterated over. - - For an example, see `Arrayterator`. - - """ - return tuple(((stop-start-1)//step+1) for start, stop, step in - zip(self.start, self.stop, self.step)) - - def __iter__(self): - # Skip arrays with degenerate dimensions - if [dim for dim in self.shape if dim <= 0]: - return - - start = self.start[:] - stop = self.stop[:] - step = self.step[:] - ndims = self.var.ndim - - while True: - count = self.buf_size or reduce(mul, self.shape) - - # iterate over each dimension, looking for the - # running dimension (ie, the dimension along which - # the blocks will be built from) - rundim = 0 - for i in range(ndims-1, -1, -1): - # if count is zero we ran out of elements to read - # along higher dimensions, so we read only a single position - if count == 0: - stop[i] = start[i]+1 - elif count <= self.shape[i]: - # limit along this dimension - stop[i] = start[i] + count*step[i] - rundim = i - else: - # read everything along this dimension - stop[i] = self.stop[i] - stop[i] = min(self.stop[i], stop[i]) - count = count//self.shape[i] - - # yield a block - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) - yield self.var[slice_] - - # Update start position, taking care of overflow to - # other dimensions - start[rundim] = stop[rundim] # start where we stopped - for i in range(ndims-1, 0, -1): - if start[i] >= self.stop[i]: - start[i] = self.start[i] - start[i-1] += self.step[i-1] - if start[0] >= self.stop[0]: - return diff --git a/venv/lib/python3.7/site-packages/numpy/lib/financial.py b/venv/lib/python3.7/site-packages/numpy/lib/financial.py deleted file mode 100644 index a011e52..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/financial.py +++ /dev/null @@ -1,969 +0,0 @@ -"""Some simple financial calculations - -patterned after spreadsheet computations. - -There is some complexity in each function -so that the functions behave like ufuncs with -broadcasting and being able to be called with scalars -or arrays (or other sequences). - -Functions support the :class:`decimal.Decimal` type unless -otherwise stated. -""" -from __future__ import division, absolute_import, print_function - -import warnings -from decimal import Decimal -import functools - -import numpy as np -from numpy.core import overrides - - -_depmsg = ("numpy.{name} is deprecated and will be removed from NumPy 1.20. " - "Use numpy_financial.{name} instead " - "(https://pypi.org/project/numpy-financial/).") - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', - 'irr', 'npv', 'mirr'] - -_when_to_num = {'end':0, 'begin':1, - 'e':0, 'b':1, - 0:0, 1:1, - 'beginning':1, - 'start':1, - 'finish':0} - -def _convert_when(when): - #Test to see if when has already been converted to ndarray - #This will happen if one function calls another, for example ppmt - if isinstance(when, np.ndarray): - return when - try: - return _when_to_num[when] - except (KeyError, TypeError): - return [_when_to_num[x] for x in when] - - -def _fv_dispatcher(rate, nper, pmt, pv, when=None): - warnings.warn(_depmsg.format(name='fv'), - DeprecationWarning, stacklevel=3) - return (rate, nper, pmt, pv) - - -@array_function_dispatch(_fv_dispatcher) -def fv(rate, nper, pmt, pv, when='end'): - """ - Compute the future value. - - .. deprecated:: 1.18 - - `fv` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Given: - * a present value, `pv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value at the end of the `nper` periods - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pmt : scalar or array_like of shape(M, ) - Payment - pv : scalar or array_like of shape(M, ) - Present value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Future values. If all input is scalar, returns a scalar float. If - any input is array_like, returns future values for each input element. - If multiple inputs are array_like, they all must have the same shape. - - Notes - ----- - The future value is computed by solving the equation:: - - fv + - pv*(1+rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - - Examples - -------- - What is the future value after 10 years of saving $100 now, with - an additional monthly savings of $100. Assume the interest rate is - 5% (annually) compounded monthly? - - >>> np.fv(0.05/12, 10*12, -100, -100) - 15692.928894335748 - - By convention, the negative sign represents cash flow out (i.e. money not - available today). Thus, saving $100 a month at 5% annual interest leads - to $15,692.93 available to spend in 10 years. - - If any input is array_like, returns an array of equal shape. Let's - compare different interest rates from the example above. - - >>> a = np.array((0.05, 0.06, 0.07))/12 - >>> np.fv(a, 10*12, -100, -100) - array([ 15692.92889434, 16569.87435405, 17509.44688102]) # may vary - - """ - when = _convert_when(when) - (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when]) - temp = (1+rate)**nper - fact = np.where(rate == 0, nper, - (1 + rate*when)*(temp - 1)/rate) - return -(pv*temp + pmt*fact) - - -def _pmt_dispatcher(rate, nper, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='pmt'), - DeprecationWarning, stacklevel=3) - return (rate, nper, pv, fv) - - -@array_function_dispatch(_pmt_dispatcher) -def pmt(rate, nper, pv, fv=0, when='end'): - """ - Compute the payment against loan principal plus interest. - - .. deprecated:: 1.18 - - `pmt` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Given: - * a present value, `pv` (e.g., an amount borrowed) - * a future value, `fv` (e.g., 0) - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * and (optional) specification of whether payment is made - at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the (fixed) periodic payment. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like, optional - Future value (default = 0) - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray - Payment against loan plus interest. If all input is scalar, returns a - scalar float. If any input is array_like, returns payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - Notes - ----- - The payment is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - for ``pmt``. - - Note that computing a monthly mortgage payment is only - one use for this function. For example, pmt returns the - periodic deposit one must make to achieve a specified - future balance given an initial deposit, a fixed, - periodically compounded interest rate, and the total - number of periods. - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php - ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt - - Examples - -------- - What is the monthly payment needed to pay off a $200,000 loan in 15 - years at an annual interest rate of 7.5%? - - >>> np.pmt(0.075/12, 12*15, 200000) - -1854.0247200054619 - - In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained - today, a monthly payment of $1,854.02 would be required. Note that this - example illustrates usage of `fv` having a default value of 0. - - """ - when = _convert_when(when) - (rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when]) - temp = (1 + rate)**nper - mask = (rate == 0) - masked_rate = np.where(mask, 1, rate) - fact = np.where(mask != 0, nper, - (1 + masked_rate*when)*(temp - 1)/masked_rate) - return -(fv + pv*temp) / fact - - -def _nper_dispatcher(rate, pmt, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='nper'), - DeprecationWarning, stacklevel=3) - return (rate, pmt, pv, fv) - - -@array_function_dispatch(_nper_dispatcher) -def nper(rate, pmt, pv, fv=0, when='end'): - """ - Compute the number of periodic payments. - - .. deprecated:: 1.18 - - `nper` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - :class:`decimal.Decimal` type is not supported. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Notes - ----- - The number of periods ``nper`` is computed by solving the equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0 - - but if ``rate = 0`` then:: - - fv + pv + pmt*nper = 0 - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - - Examples - -------- - If you only had $150/month to pay towards the loan, how long would it take - to pay-off a loan of $8,000 at 7% annual interest? - - >>> print(np.round(np.nper(0.07/12, -150, 8000), 5)) - 64.07335 - - So, over 64 months would be required to pay off the loan. - - The same analysis could be done with several different interest rates - and/or payments and/or total amounts to produce an entire table. - - >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, - ... -150 : -99 : 50 , - ... 8000 : 9001 : 1000])) - array([[[ 64.07334877, 74.06368256], - [108.07548412, 127.99022654]], - [[ 66.12443902, 76.87897353], - [114.70165583, 137.90124779]]]) - - """ - when = _convert_when(when) - (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when]) - - use_zero_rate = False - with np.errstate(divide="raise"): - try: - z = pmt*(1+rate*when)/rate - except FloatingPointError: - use_zero_rate = True - - if use_zero_rate: - return (-fv + pv) / pmt - else: - A = -(fv + pv)/(pmt+0) - B = np.log((-fv+z) / (pv+z))/np.log(1+rate) - return np.where(rate == 0, A, B) - - -def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='ipmt'), - DeprecationWarning, stacklevel=3) - return (rate, per, nper, pv, fv) - - -@array_function_dispatch(_ipmt_dispatcher) -def ipmt(rate, per, nper, pv, fv=0, when='end'): - """ - Compute the interest portion of a payment. - - .. deprecated:: 1.18 - - `ipmt` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - per : scalar or array_like of shape(M, ) - Interest paid against the loan changes during the life or the loan. - The `per` is the payment period to calculate the interest amount. - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pv : scalar or array_like of shape(M, ) - Present value - fv : scalar or array_like of shape(M, ), optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Interest portion of payment. If all input is scalar, returns a scalar - float. If any input is array_like, returns interest payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - See Also - -------- - ppmt, pmt, pv - - Notes - ----- - The total payment is made up of payment against principal plus interest. - - ``pmt = ppmt + ipmt`` - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - - Examples - -------- - What is the amortization schedule for a 1 year loan of $2500 at - 8.24% interest per year compounded monthly? - - >>> principal = 2500.00 - - The 'per' variable represents the periods of the loan. Remember that - financial equations start the period count at 1! - - >>> per = np.arange(1*12) + 1 - >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal) - >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal) - - Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal - 'pmt'. - - >>> pmt = np.pmt(0.0824/12, 1*12, principal) - >>> np.allclose(ipmt + ppmt, pmt) - True - - >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}' - >>> for payment in per: - ... index = payment - 1 - ... principal = principal + ppmt[index] - ... print(fmt.format(payment, ppmt[index], ipmt[index], principal)) - 1 -200.58 -17.17 2299.42 - 2 -201.96 -15.79 2097.46 - 3 -203.35 -14.40 1894.11 - 4 -204.74 -13.01 1689.37 - 5 -206.15 -11.60 1483.22 - 6 -207.56 -10.18 1275.66 - 7 -208.99 -8.76 1066.67 - 8 -210.42 -7.32 856.25 - 9 -211.87 -5.88 644.38 - 10 -213.32 -4.42 431.05 - 11 -214.79 -2.96 216.26 - 12 -216.26 -1.49 -0.00 - - >>> interestpd = np.sum(ipmt) - >>> np.round(interestpd, 2) - -112.98 - - """ - when = _convert_when(when) - rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, - pv, fv, when) - total_pmt = pmt(rate, nper, pv, fv, when) - ipmt = _rbl(rate, per, total_pmt, pv, when)*rate - try: - ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt) - ipmt = np.where(np.logical_and(when == 1, per == 1), 0, ipmt) - except IndexError: - pass - return ipmt - - -def _rbl(rate, per, pmt, pv, when): - """ - This function is here to simply have a different name for the 'fv' - function to not interfere with the 'fv' keyword argument within the 'ipmt' - function. It is the 'remaining balance on loan' which might be useful as - it's own function, but is easily calculated with the 'fv' function. - """ - return fv(rate, (per - 1), pmt, pv, when) - - -def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='ppmt'), - DeprecationWarning, stacklevel=3) - return (rate, per, nper, pv, fv) - - -@array_function_dispatch(_ppmt_dispatcher) -def ppmt(rate, per, nper, pv, fv=0, when='end'): - """ - Compute the payment against loan principal. - - .. deprecated:: 1.18 - - `ppmt` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - per : array_like, int - Amount paid against the loan changes. The `per` is the period of - interest. - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - See Also - -------- - pmt, pv, ipmt - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - - """ - total = pmt(rate, nper, pv, fv, when) - return total - ipmt(rate, per, nper, pv, fv, when) - - -def _pv_dispatcher(rate, nper, pmt, fv=None, when=None): - warnings.warn(_depmsg.format(name='pv'), - DeprecationWarning, stacklevel=3) - return (rate, nper, nper, pv, fv) - - -@array_function_dispatch(_pv_dispatcher) -def pv(rate, nper, pmt, fv=0, when='end'): - """ - Compute the present value. - - .. deprecated:: 1.18 - - `pv` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Given: - * a future value, `fv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value now - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pmt : array_like - Payment - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray, float - Present value of a series of payments or investments. - - Notes - ----- - The present value is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0 - - or, when ``rate = 0``:: - - fv + pv + pmt * nper = 0 - - for `pv`, which is then returned. - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - Examples - -------- - What is the present value (e.g., the initial investment) - of an investment that needs to total $15692.93 - after 10 years of saving $100 every month? Assume the - interest rate is 5% (annually) compounded monthly. - - >>> np.pv(0.05/12, 10*12, -100, 15692.93) - -100.00067131625819 - - By convention, the negative sign represents cash flow out - (i.e., money not available today). Thus, to end up with - $15,692.93 in 10 years saving $100 a month at 5% annual - interest, one's initial deposit should also be $100. - - If any input is array_like, ``pv`` returns an array of equal shape. - Let's compare different interest rates in the example above: - - >>> a = np.array((0.05, 0.04, 0.03))/12 - >>> np.pv(a, 10*12, -100, 15692.93) - array([ -100.00067132, -649.26771385, -1273.78633713]) # may vary - - So, to end up with the same $15692.93 under the same $100 per month - "savings plan," for annual interest rates of 4% and 3%, one would - need initial investments of $649.27 and $1273.79, respectively. - - """ - when = _convert_when(when) - (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when]) - temp = (1+rate)**nper - fact = np.where(rate == 0, nper, (1+rate*when)*(temp-1)/rate) - return -(fv + pmt*fact)/temp - -# Computed with Sage -# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - -# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + -# p*((r + 1)^n - 1)*w/r) - -def _g_div_gp(r, n, p, x, y, w): - t1 = (r+1)**n - t2 = (r+1)**(n-1) - return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) / - (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + - p*(t1 - 1)*w/r)) - - -def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None, - maxiter=None): - warnings.warn(_depmsg.format(name='rate'), - DeprecationWarning, stacklevel=3) - return (nper, pmt, pv, fv) - - -# Use Newton's iteration until the change is less than 1e-6 -# for all values or a maximum of 100 iterations is reached. -# Newton's rule is -# r_{n+1} = r_{n} - g(r_n)/g'(r_n) -# where -# g(r) is the formula -# g'(r) is the derivative with respect to r. -@array_function_dispatch(_rate_dispatcher) -def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): - """ - Compute the rate of interest per period. - - .. deprecated:: 1.18 - - `rate` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - nper : array_like - Number of compounding periods - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - guess : Number, optional - Starting guess for solving the rate of interest, default 0.1 - tol : Number, optional - Required tolerance for the solution, default 1e-6 - maxiter : int, optional - Maximum iterations in finding the solution - - Notes - ----- - The rate of interest is computed by iteratively solving the - (non-linear) equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 - - for ``rate``. - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - """ - when = _convert_when(when) - default_type = Decimal if isinstance(pmt, Decimal) else float - - # Handle casting defaults to Decimal if/when pmt is a Decimal and - # guess and/or tol are not given default values - if guess is None: - guess = default_type('0.1') - - if tol is None: - tol = default_type('1e-6') - - (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when]) - - rn = guess - iterator = 0 - close = False - while (iterator < maxiter) and not close: - rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) - diff = abs(rnp1-rn) - close = np.all(diff < tol) - iterator += 1 - rn = rnp1 - if not close: - # Return nan's in array of the same shape as rn - return np.nan + rn - else: - return rn - - -def _irr_dispatcher(values): - warnings.warn(_depmsg.format(name='irr'), - DeprecationWarning, stacklevel=3) - return (values,) - - -@array_function_dispatch(_irr_dispatcher) -def irr(values): - """ - Return the Internal Rate of Return (IRR). - - .. deprecated:: 1.18 - - `irr` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - This is the "average" periodically compounded rate of return - that gives a net present value of 0.0; for a more complete explanation, - see Notes below. - - :class:`decimal.Decimal` type is not supported. - - Parameters - ---------- - values : array_like, shape(N,) - Input cash flows per time period. By convention, net "deposits" - are negative and net "withdrawals" are positive. Thus, for - example, at least the first element of `values`, which represents - the initial investment, will typically be negative. - - Returns - ------- - out : float - Internal Rate of Return for periodic input values. - - Notes - ----- - The IRR is perhaps best understood through an example (illustrated - using np.irr in the Examples section below). Suppose one invests 100 - units and then makes the following withdrawals at regular (fixed) - intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100 - unit investment yields 173 units; however, due to the combination of - compounding and the periodic withdrawals, the "average" rate of return - is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution - (for :math:`r`) of the equation: - - .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2} - + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 - - In general, for `values` :math:`= [v_0, v_1, ... v_M]`, - irr is the solution of the equation: [2]_ - - .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., - Addison-Wesley, 2003, pg. 348. - - Examples - -------- - >>> round(np.irr([-100, 39, 59, 55, 20]), 5) - 0.28095 - >>> round(np.irr([-100, 0, 0, 74]), 5) - -0.0955 - >>> round(np.irr([-100, 100, 0, -7]), 5) - -0.0833 - >>> round(np.irr([-100, 100, 0, 7]), 5) - 0.06206 - >>> round(np.irr([-5, 10.5, 1, -8, 1]), 5) - 0.0886 - - """ - # `np.roots` call is why this function does not support Decimal type. - # - # Ultimately Decimal support needs to be added to np.roots, which has - # greater implications on the entire linear algebra module and how it does - # eigenvalue computations. - res = np.roots(values[::-1]) - mask = (res.imag == 0) & (res.real > 0) - if not mask.any(): - return np.nan - res = res[mask].real - # NPV(rate) = 0 can have more than one solution so we return - # only the solution closest to zero. - rate = 1/res - 1 - rate = rate.item(np.argmin(np.abs(rate))) - return rate - - -def _npv_dispatcher(rate, values): - warnings.warn(_depmsg.format(name='npv'), - DeprecationWarning, stacklevel=3) - return (values,) - - -@array_function_dispatch(_npv_dispatcher) -def npv(rate, values): - """ - Returns the NPV (Net Present Value) of a cash flow series. - - .. deprecated:: 1.18 - - `npv` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - rate : scalar - The discount rate. - values : array_like, shape(M, ) - The values of the time series of cash flows. The (fixed) time - interval between cash flow "events" must be the same as that for - which `rate` is given (i.e., if `rate` is per year, then precisely - a year is understood to elapse between each cash flow event). By - convention, investments or "deposits" are negative, income or - "withdrawals" are positive; `values` must begin with the initial - investment, thus `values[0]` will typically be negative. - - Returns - ------- - out : float - The NPV of the input cash flow series `values` at the discount - `rate`. - - Warnings - -------- - ``npv`` considers a series of cashflows starting in the present (t = 0). - NPV can also be defined with a series of future cashflows, paid at the - end, rather than the start, of each period. If future cashflows are used, - the first cashflow `values[0]` must be zeroed and added to the net - present value of the future cashflows. This is demonstrated in the - examples. - - Notes - ----- - Returns the result of: [2]_ - - .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}} - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., - Addison-Wesley, 2003, pg. 346. - - Examples - -------- - Consider a potential project with an initial investment of $40 000 and - projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of - each period discounted at a rate of 8% per period. To find the project's - net present value: - - >>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000] - >>> np.npv(rate, cashflows).round(5) - 3065.22267 - - It may be preferable to split the projected cashflow into an initial - investment and expected future cashflows. In this case, the value of - the initial cashflow is zero and the initial investment is later added - to the future cashflows net present value: - - >>> initial_cashflow = cashflows[0] - >>> cashflows[0] = 0 - >>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5) - 3065.22267 - - """ - values = np.asarray(values) - return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0) - - -def _mirr_dispatcher(values, finance_rate, reinvest_rate): - warnings.warn(_depmsg.format(name='mirr'), - DeprecationWarning, stacklevel=3) - return (values,) - - -@array_function_dispatch(_mirr_dispatcher) -def mirr(values, finance_rate, reinvest_rate): - """ - Modified internal rate of return. - - .. deprecated:: 1.18 - - `mirr` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - values : array_like - Cash flows (must contain at least one positive and one negative - value) or nan is returned. The first value is considered a sunk - cost at time zero. - finance_rate : scalar - Interest rate paid on the cash flows - reinvest_rate : scalar - Interest rate received on the cash flows upon reinvestment - - Returns - ------- - out : float - Modified internal rate of return - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - """ - values = np.asarray(values) - n = values.size - - # Without this explicit cast the 1/(n - 1) computation below - # becomes a float, which causes TypeError when using Decimal - # values. - if isinstance(finance_rate, Decimal): - n = Decimal(n) - - pos = values > 0 - neg = values < 0 - if not (pos.any() and neg.any()): - return np.nan - numer = np.abs(npv(reinvest_rate, values*pos)) - denom = np.abs(npv(finance_rate, values*neg)) - return (numer/denom)**(1/(n - 1))*(1 + reinvest_rate) - 1 diff --git a/venv/lib/python3.7/site-packages/numpy/lib/format.py b/venv/lib/python3.7/site-packages/numpy/lib/format.py deleted file mode 100644 index 20e2e9c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/format.py +++ /dev/null @@ -1,916 +0,0 @@ -""" -Binary serialization - -NPY format -========== - -A simple format for saving numpy arrays to disk with the full -information about them. - -The ``.npy`` format is the standard binary file format in NumPy for -persisting a *single* arbitrary NumPy array on disk. The format stores all -of the shape and dtype information necessary to reconstruct the array -correctly even on another machine with a different architecture. -The format is designed to be as simple as possible while achieving -its limited goals. - -The ``.npz`` format is the standard format for persisting *multiple* NumPy -arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` -files, one for each array. - -Capabilities ------------- - -- Can represent all NumPy arrays including nested record arrays and - object arrays. - -- Represents the data in its native binary form. - -- Supports Fortran-contiguous arrays directly. - -- Stores all of the necessary information to reconstruct the array - including shape and dtype on a machine of a different - architecture. Both little-endian and big-endian arrays are - supported, and a file with little-endian numbers will yield - a little-endian array on any machine reading the file. The - types are described in terms of their actual sizes. For example, - if a machine with a 64-bit C "long int" writes out an array with - "long ints", a reading machine with 32-bit C "long ints" will yield - an array with 64-bit integers. - -- Is straightforward to reverse engineer. Datasets often live longer than - the programs that created them. A competent developer should be - able to create a solution in their preferred programming language to - read most ``.npy`` files that he has been given without much - documentation. - -- Allows memory-mapping of the data. See `open_memmep`. - -- Can be read from a filelike stream object instead of an actual file. - -- Stores object arrays, i.e. arrays containing elements that are arbitrary - Python objects. Files with object arrays are not to be mmapable, but - can be read and written to disk. - -Limitations ------------ - -- Arbitrary subclasses of numpy.ndarray are not completely preserved. - Subclasses will be accepted for writing, but only the array data will - be written out. A regular numpy.ndarray object will be created - upon reading the file. - -.. warning:: - - Due to limitations in the interpretation of structured dtypes, dtypes - with fields with empty names will have the names replaced by 'f0', 'f1', - etc. Such arrays will not round-trip through the format entirely - accurately. The data is intact; only the field names will differ. We are - working on a fix for this. This fix will not require a change in the - file format. The arrays with such structures can still be saved and - restored, and the correct dtype may be restored by using the - ``loadedarray.view(correct_dtype)`` method. - -File extensions ---------------- - -We recommend using the ``.npy`` and ``.npz`` extensions for files saved -in this format. This is by no means a requirement; applications may wish -to use these file formats but use an extension specific to the -application. In the absence of an obvious alternative, however, -we suggest using ``.npy`` and ``.npz``. - -Version numbering ------------------ - -The version numbering of these formats is independent of NumPy version -numbering. If the format is upgraded, the code in `numpy.io` will still -be able to read and write Version 1.0 files. - -Format Version 1.0 ------------------- - -The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. - -The next 1 byte is an unsigned byte: the major version number of the file -format, e.g. ``\\x01``. - -The next 1 byte is an unsigned byte: the minor version number of the file -format, e.g. ``\\x00``. Note: the version of the file format is not tied -to the version of the numpy package. - -The next 2 bytes form a little-endian unsigned short int: the length of -the header data HEADER_LEN. - -The next HEADER_LEN bytes form the header data describing the array's -format. It is an ASCII string which contains a Python literal expression -of a dictionary. It is terminated by a newline (``\\n``) and padded with -spaces (``\\x20``) to make the total of -``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible -by 64 for alignment purposes. - -The dictionary contains three keys: - - "descr" : dtype.descr - An object that can be passed as an argument to the `numpy.dtype` - constructor to create the array's dtype. - "fortran_order" : bool - Whether the array data is Fortran-contiguous or not. Since - Fortran-contiguous arrays are a common form of non-C-contiguity, - we allow them to be written directly to disk for efficiency. - "shape" : tuple of int - The shape of the array. - -For repeatability and readability, the dictionary keys are sorted in -alphabetic order. This is for convenience only. A writer SHOULD implement -this if possible. A reader MUST NOT depend on this. - -Following the header comes the array data. If the dtype contains Python -objects (i.e. ``dtype.hasobject is True``), then the data is a Python -pickle of the array. Otherwise the data is the contiguous (either C- -or Fortran-, depending on ``fortran_order``) bytes of the array. -Consumers can figure out the number of bytes by multiplying the number -of elements given by the shape (noting that ``shape=()`` means there is -1 element) by ``dtype.itemsize``. - -Format Version 2.0 ------------------- - -The version 1.0 format only allowed the array header to have a total size of -65535 bytes. This can be exceeded by structured arrays with a large number of -columns. The version 2.0 format extends the header size to 4 GiB. -`numpy.save` will automatically save in 2.0 format if the data requires it, -else it will always use the more compatible 1.0 format. - -The description of the fourth element of the header therefore has become: -"The next 4 bytes form a little-endian unsigned int: the length of the header -data HEADER_LEN." - -Format Version 3.0 ------------------- - -This version replaces the ASCII string (which in practice was latin1) with -a utf8-encoded string, so supports structured types with any unicode field -names. - -Notes ------ -The ``.npy`` format, including motivation for creating it and a comparison of -alternatives, is described in the `"npy-format" NEP -`_, however details have -evolved with time and this document is more current. - -""" -from __future__ import division, absolute_import, print_function - -import numpy -import sys -import io -import warnings -from numpy.lib.utils import safe_eval -from numpy.compat import ( - isfileobj, long, os_fspath, pickle - ) - - -__all__ = [] - - -MAGIC_PREFIX = b'\x93NUMPY' -MAGIC_LEN = len(MAGIC_PREFIX) + 2 -ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 -BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes - -# difference between version 1.0 and 2.0 is a 4 byte (I) header length -# instead of 2 bytes (H) allowing storage of large structured arrays -_header_size_info = { - (1, 0): (' 255: - raise ValueError("major version must be 0 <= major < 256") - if minor < 0 or minor > 255: - raise ValueError("minor version must be 0 <= minor < 256") - if sys.version_info[0] < 3: - return MAGIC_PREFIX + chr(major) + chr(minor) - else: - return MAGIC_PREFIX + bytes([major, minor]) - -def read_magic(fp): - """ Read the magic string to get the version of the file format. - - Parameters - ---------- - fp : filelike object - - Returns - ------- - major : int - minor : int - """ - magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") - if magic_str[:-2] != MAGIC_PREFIX: - msg = "the magic string is not correct; expected %r, got %r" - raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) - if sys.version_info[0] < 3: - major, minor = map(ord, magic_str[-2:]) - else: - major, minor = magic_str[-2:] - return major, minor - -def _has_metadata(dt): - if dt.metadata is not None: - return True - elif dt.names is not None: - return any(_has_metadata(dt[k]) for k in dt.names) - elif dt.subdtype is not None: - return _has_metadata(dt.base) - else: - return False - -def dtype_to_descr(dtype): - """ - Get a serializable descriptor from the dtype. - - The .descr attribute of a dtype object cannot be round-tripped through - the dtype() constructor. Simple types, like dtype('float32'), have - a descr which looks like a record array with one field with '' as - a name. The dtype() constructor interprets this as a request to give - a default name. Instead, we construct descriptor that can be passed to - dtype(). - - Parameters - ---------- - dtype : dtype - The dtype of the array that will be written to disk. - - Returns - ------- - descr : object - An object that can be passed to `numpy.dtype()` in order to - replicate the input dtype. - - """ - if _has_metadata(dtype): - warnings.warn("metadata on a dtype may be saved or ignored, but will " - "raise if saved when read. Use another form of storage.", - UserWarning, stacklevel=2) - if dtype.names is not None: - # This is a record array. The .descr is fine. XXX: parts of the - # record array with an empty name, like padding bytes, still get - # fiddled with. This needs to be fixed in the C implementation of - # dtype(). - return dtype.descr - else: - return dtype.str - -def descr_to_dtype(descr): - ''' - descr may be stored as dtype.descr, which is a list of - (name, format, [shape]) tuples where format may be a str or a tuple. - Offsets are not explicitly saved, rather empty fields with - name, format == '', '|Vn' are added as padding. - - This function reverses the process, eliminating the empty padding fields. - ''' - if isinstance(descr, str): - # No padding removal needed - return numpy.dtype(descr) - elif isinstance(descr, tuple): - # subtype, will always have a shape descr[1] - dt = descr_to_dtype(descr[0]) - return numpy.dtype((dt, descr[1])) - fields = [] - offset = 0 - for field in descr: - if len(field) == 2: - name, descr_str = field - dt = descr_to_dtype(descr_str) - else: - name, descr_str, shape = field - dt = numpy.dtype((descr_to_dtype(descr_str), shape)) - - # Ignore padding bytes, which will be void bytes with '' as name - # Once support for blank names is removed, only "if name == ''" needed) - is_pad = (name == '' and dt.type is numpy.void and dt.names is None) - if not is_pad: - fields.append((name, dt, offset)) - - offset += dt.itemsize - - names, formats, offsets = zip(*fields) - # names may be (title, names) tuples - nametups = (n if isinstance(n, tuple) else (None, n) for n in names) - titles, names = zip(*nametups) - return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, - 'offsets': offsets, 'itemsize': offset}) - -def header_data_from_array_1_0(array): - """ Get the dictionary of header metadata from a numpy.ndarray. - - Parameters - ---------- - array : numpy.ndarray - - Returns - ------- - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - """ - d = {'shape': array.shape} - if array.flags.c_contiguous: - d['fortran_order'] = False - elif array.flags.f_contiguous: - d['fortran_order'] = True - else: - # Totally non-contiguous data. We will have to make it C-contiguous - # before writing. Note that we need to test for C_CONTIGUOUS first - # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. - d['fortran_order'] = False - - d['descr'] = dtype_to_descr(array.dtype) - return d - - -def _wrap_header(header, version): - """ - Takes a stringified header, and attaches the prefix and padding to it - """ - import struct - assert version is not None - fmt, encoding = _header_size_info[version] - if not isinstance(header, bytes): # always true on python 3 - header = header.encode(encoding) - hlen = len(header) + 1 - padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) - try: - header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) - except struct.error: - msg = "Header length {} too big for version={}".format(hlen, version) - raise ValueError(msg) - - # Pad the header with spaces and a final newline such that the magic - # string, the header-length short and the header are aligned on a - # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes - # aligned up to ARRAY_ALIGN on systems like Linux where mmap() - # offset must be page-aligned (i.e. the beginning of the file). - return header_prefix + header + b' '*padlen + b'\n' - - -def _wrap_header_guess_version(header): - """ - Like `_wrap_header`, but chooses an appropriate version given the contents - """ - try: - return _wrap_header(header, (1, 0)) - except ValueError: - pass - - try: - ret = _wrap_header(header, (2, 0)) - except UnicodeEncodeError: - pass - else: - warnings.warn("Stored array in format 2.0. It can only be" - "read by NumPy >= 1.9", UserWarning, stacklevel=2) - return ret - - header = _wrap_header(header, (3, 0)) - warnings.warn("Stored array in format 3.0. It can only be " - "read by NumPy >= 1.17", UserWarning, stacklevel=2) - return header - - -def _write_array_header(fp, d, version=None): - """ Write the header for an array and returns the version used - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - version: tuple or None - None means use oldest that works - explicit version will raise a ValueError if the format does not - allow saving this data. Default: None - """ - header = ["{"] - for key, value in sorted(d.items()): - # Need to use repr here, since we eval these when reading - header.append("'%s': %s, " % (key, repr(value))) - header.append("}") - header = "".join(header) - header = _filter_header(header) - if version is None: - header = _wrap_header_guess_version(header) - else: - header = _wrap_header(header, version) - fp.write(header) - -def write_array_header_1_0(fp, d): - """ Write the header for an array using the 1.0 format. - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string - representation to the header of the file. - """ - _write_array_header(fp, d, (1, 0)) - - -def write_array_header_2_0(fp, d): - """ Write the header for an array using the 2.0 format. - The 2.0 format allows storing very large structured arrays. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string - representation to the header of the file. - """ - _write_array_header(fp, d, (2, 0)) - -def read_array_header_1_0(fp): - """ - Read an array header from a filelike object using the 1.0 file format - version. - - This will leave the file object located just after the header. - - Parameters - ---------- - fp : filelike object - A file object or something with a `.read()` method like a file. - - Returns - ------- - shape : tuple of int - The shape of the array. - fortran_order : bool - The array data will be written out directly if it is either - C-contiguous or Fortran-contiguous. Otherwise, it will be made - contiguous before writing it out. - dtype : dtype - The dtype of the file's data. - - Raises - ------ - ValueError - If the data is invalid. - - """ - return _read_array_header(fp, version=(1, 0)) - -def read_array_header_2_0(fp): - """ - Read an array header from a filelike object using the 2.0 file format - version. - - This will leave the file object located just after the header. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fp : filelike object - A file object or something with a `.read()` method like a file. - - Returns - ------- - shape : tuple of int - The shape of the array. - fortran_order : bool - The array data will be written out directly if it is either - C-contiguous or Fortran-contiguous. Otherwise, it will be made - contiguous before writing it out. - dtype : dtype - The dtype of the file's data. - - Raises - ------ - ValueError - If the data is invalid. - - """ - return _read_array_header(fp, version=(2, 0)) - - -def _filter_header(s): - """Clean up 'L' in npz header ints. - - Cleans up the 'L' in strings representing integers. Needed to allow npz - headers produced in Python2 to be read in Python3. - - Parameters - ---------- - s : string - Npy file header. - - Returns - ------- - header : str - Cleaned up header. - - """ - import tokenize - if sys.version_info[0] >= 3: - from io import StringIO - else: - from StringIO import StringIO - - tokens = [] - last_token_was_number = False - # adding newline as python 2.7.5 workaround - string = s + "\n" - for token in tokenize.generate_tokens(StringIO(string).readline): - token_type = token[0] - token_string = token[1] - if (last_token_was_number and - token_type == tokenize.NAME and - token_string == "L"): - continue - else: - tokens.append(token) - last_token_was_number = (token_type == tokenize.NUMBER) - # removing newline (see above) as python 2.7.5 workaround - return tokenize.untokenize(tokens)[:-1] - - -def _read_array_header(fp, version): - """ - see read_array_header_1_0 - """ - # Read an unsigned, little-endian short int which has the length of the - # header. - import struct - hinfo = _header_size_info.get(version) - if hinfo is None: - raise ValueError("Invalid version {!r}".format(version)) - hlength_type, encoding = hinfo - - hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") - header_length = struct.unpack(hlength_type, hlength_str)[0] - header = _read_bytes(fp, header_length, "array header") - header = header.decode(encoding) - - # The header is a pretty-printed string representation of a literal - # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte - # boundary. The keys are strings. - # "shape" : tuple of int - # "fortran_order" : bool - # "descr" : dtype.descr - header = _filter_header(header) - try: - d = safe_eval(header) - except SyntaxError as e: - msg = "Cannot parse header: {!r}\nException: {!r}" - raise ValueError(msg.format(header, e)) - if not isinstance(d, dict): - msg = "Header is not a dictionary: {!r}" - raise ValueError(msg.format(d)) - keys = sorted(d.keys()) - if keys != ['descr', 'fortran_order', 'shape']: - msg = "Header does not contain the correct keys: {!r}" - raise ValueError(msg.format(keys)) - - # Sanity-check the values. - if (not isinstance(d['shape'], tuple) or - not numpy.all([isinstance(x, (int, long)) for x in d['shape']])): - msg = "shape is not valid: {!r}" - raise ValueError(msg.format(d['shape'])) - if not isinstance(d['fortran_order'], bool): - msg = "fortran_order is not a valid bool: {!r}" - raise ValueError(msg.format(d['fortran_order'])) - try: - dtype = descr_to_dtype(d['descr']) - except TypeError as e: - msg = "descr is not a valid dtype descriptor: {!r}" - raise ValueError(msg.format(d['descr'])) - - return d['shape'], d['fortran_order'], dtype - -def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): - """ - Write an array to an NPY file, including a header. - - If the array is neither C-contiguous nor Fortran-contiguous AND the - file_like object is not a real file object, this function will have to - copy data in memory. - - Parameters - ---------- - fp : file_like object - An open, writable file object, or similar object with a - ``.write()`` method. - array : ndarray - The array to write to disk. - version : (int, int) or None, optional - The version number of the format. None means use the oldest - supported version that is able to store the data. Default: None - allow_pickle : bool, optional - Whether to allow writing pickled data. Default: True - pickle_kwargs : dict, optional - Additional keyword arguments to pass to pickle.dump, excluding - 'protocol'. These are only useful when pickling objects in object - arrays on Python 3 to Python 2 compatible format. - - Raises - ------ - ValueError - If the array cannot be persisted. This includes the case of - allow_pickle=False and array being an object array. - Various other errors - If the array contains Python objects as part of its dtype, the - process of pickling them may raise various errors if the objects - are not picklable. - - """ - _check_version(version) - _write_array_header(fp, header_data_from_array_1_0(array), version) - - if array.itemsize == 0: - buffersize = 0 - else: - # Set buffer size to 16 MiB to hide the Python loop overhead. - buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) - - if array.dtype.hasobject: - # We contain Python objects so we cannot write out the data - # directly. Instead, we will pickle it out - if not allow_pickle: - raise ValueError("Object arrays cannot be saved when " - "allow_pickle=False") - if pickle_kwargs is None: - pickle_kwargs = {} - pickle.dump(array, fp, protocol=3, **pickle_kwargs) - elif array.flags.f_contiguous and not array.flags.c_contiguous: - if isfileobj(fp): - array.T.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='F'): - fp.write(chunk.tobytes('C')) - else: - if isfileobj(fp): - array.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='C'): - fp.write(chunk.tobytes('C')) - - -def read_array(fp, allow_pickle=False, pickle_kwargs=None): - """ - Read an array from an NPY file. - - Parameters - ---------- - fp : file_like object - If this is not a real file object, then this may take extra memory - and time. - allow_pickle : bool, optional - Whether to allow writing pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - - pickle_kwargs : dict - Additional keyword arguments to pass to pickle.load. These are only - useful when loading object arrays saved on Python 2 when using - Python 3. - - Returns - ------- - array : ndarray - The array from the data on disk. - - Raises - ------ - ValueError - If the data is invalid, or allow_pickle=False and the file contains - an object array. - - """ - version = read_magic(fp) - _check_version(version) - shape, fortran_order, dtype = _read_array_header(fp, version) - if len(shape) == 0: - count = 1 - else: - count = numpy.multiply.reduce(shape, dtype=numpy.int64) - - # Now read the actual data. - if dtype.hasobject: - # The array contained Python objects. We need to unpickle the data. - if not allow_pickle: - raise ValueError("Object arrays cannot be loaded when " - "allow_pickle=False") - if pickle_kwargs is None: - pickle_kwargs = {} - try: - array = pickle.load(fp, **pickle_kwargs) - except UnicodeError as err: - if sys.version_info[0] >= 3: - # Friendlier error message - raise UnicodeError("Unpickling a python object failed: %r\n" - "You may need to pass the encoding= option " - "to numpy.load" % (err,)) - raise - else: - if isfileobj(fp): - # We can use the fast fromfile() function. - array = numpy.fromfile(fp, dtype=dtype, count=count) - else: - # This is not a real file. We have to read it the - # memory-intensive way. - # crc32 module fails on reads greater than 2 ** 32 bytes, - # breaking large reads from gzip streams. Chunk reads to - # BUFFER_SIZE bytes to avoid issue and reduce memory overhead - # of the read. In non-chunked case count < max_read_count, so - # only one read is performed. - - # Use np.ndarray instead of np.empty since the latter does - # not correctly instantiate zero-width string dtypes; see - # https://github.com/numpy/numpy/pull/6430 - array = numpy.ndarray(count, dtype=dtype) - - if dtype.itemsize > 0: - # If dtype.itemsize == 0 then there's nothing more to read - max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) - - for i in range(0, count, max_read_count): - read_count = min(max_read_count, count - i) - read_size = int(read_count * dtype.itemsize) - data = _read_bytes(fp, read_size, "array data") - array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, - count=read_count) - - if fortran_order: - array.shape = shape[::-1] - array = array.transpose() - else: - array.shape = shape - - return array - - -def open_memmap(filename, mode='r+', dtype=None, shape=None, - fortran_order=False, version=None): - """ - Open a .npy file as a memory-mapped array. - - This may be used to read an existing file or create a new one. - - Parameters - ---------- - filename : str or path-like - The name of the file on disk. This may *not* be a file-like - object. - mode : str, optional - The mode in which to open the file; the default is 'r+'. In - addition to the standard file modes, 'c' is also accepted to mean - "copy on write." See `memmap` for the available mode strings. - dtype : data-type, optional - The data type of the array if we are creating a new file in "write" - mode, if not, `dtype` is ignored. The default value is None, which - results in a data-type of `float64`. - shape : tuple of int - The shape of the array if we are creating a new file in "write" - mode, in which case this parameter is required. Otherwise, this - parameter is ignored and is thus optional. - fortran_order : bool, optional - Whether the array should be Fortran-contiguous (True) or - C-contiguous (False, the default) if we are creating a new file in - "write" mode. - version : tuple of int (major, minor) or None - If the mode is a "write" mode, then this is the version of the file - format used to create the file. None means use the oldest - supported version that is able to store the data. Default: None - - Returns - ------- - marray : memmap - The memory-mapped array. - - Raises - ------ - ValueError - If the data or the mode is invalid. - IOError - If the file is not found or cannot be opened correctly. - - See Also - -------- - memmap - - """ - if isfileobj(filename): - raise ValueError("Filename must be a string or a path-like object." - " Memmap cannot use existing file handles.") - - if 'w' in mode: - # We are creating the file, not reading it. - # Check if we ought to create the file. - _check_version(version) - # Ensure that the given dtype is an authentic dtype object rather - # than just something that can be interpreted as a dtype object. - dtype = numpy.dtype(dtype) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - d = dict( - descr=dtype_to_descr(dtype), - fortran_order=fortran_order, - shape=shape, - ) - # If we got here, then it should be safe to create the file. - with open(os_fspath(filename), mode+'b') as fp: - _write_array_header(fp, d, version) - offset = fp.tell() - else: - # Read the header of the file first. - with open(os_fspath(filename), 'rb') as fp: - version = read_magic(fp) - _check_version(version) - - shape, fortran_order, dtype = _read_array_header(fp, version) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - offset = fp.tell() - - if fortran_order: - order = 'F' - else: - order = 'C' - - # We need to change a write-only mode to a read-write mode since we've - # already written data to the file. - if mode == 'w+': - mode = 'r+' - - marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, - mode=mode, offset=offset) - - return marray - - -def _read_bytes(fp, size, error_template="ran out of data"): - """ - Read from file-like object until size bytes are read. - Raises ValueError if not EOF is encountered before size bytes are read. - Non-blocking objects only supported if they derive from io objects. - - Required as e.g. ZipExtFile in python 2.6 can return less data than - requested. - """ - data = bytes() - while True: - # io files (default in python3) return None or raise on - # would-block, python2 file will truncate, probably nothing can be - # done about that. note that regular files can't be non-blocking - try: - r = fp.read(size - len(data)) - data += r - if len(r) == 0 or len(data) == size: - break - except io.BlockingIOError: - pass - if len(data) != size: - msg = "EOF: reading %s, expected %d bytes got %d" - raise ValueError(msg % (error_template, size, len(data))) - else: - return data diff --git a/venv/lib/python3.7/site-packages/numpy/lib/function_base.py b/venv/lib/python3.7/site-packages/numpy/lib/function_base.py deleted file mode 100644 index df06d10..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/function_base.py +++ /dev/null @@ -1,4808 +0,0 @@ -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import functools -import re -import sys -import warnings - -import numpy as np -import numpy.core.numeric as _nx -from numpy.core import atleast_1d, transpose -from numpy.core.numeric import ( - ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, - empty_like, ndarray, around, floor, ceil, take, dot, where, intp, - integer, isscalar, absolute - ) -from numpy.core.umath import ( - pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, - mod, exp, not_equal, subtract - ) -from numpy.core.fromnumeric import ( - ravel, nonzero, partition, mean, any, sum - ) -from numpy.core.numerictypes import typecodes -from numpy.core.overrides import set_module -from numpy.core import overrides -from numpy.core.function_base import add_newdoc -from numpy.lib.twodim_base import diag -from numpy.core.multiarray import ( - _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, - interp as compiled_interp, interp_complex as compiled_interp_complex - ) -from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc -from numpy.compat import long - -if sys.version_info[0] < 3: - # Force range to be a generator, for np.delete's usage. - range = xrange - import __builtin__ as builtins -else: - import builtins - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -# needed in this module for compatibility -from numpy.lib.histograms import histogram, histogramdd - -__all__ = [ - 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', - 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', - 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', - 'bincount', 'digitize', 'cov', 'corrcoef', - 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', - 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', - 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', - 'quantile' - ] - - -def _rot90_dispatcher(m, k=None, axes=None): - return (m,) - - -@array_function_dispatch(_rot90_dispatcher) -def rot90(m, k=1, axes=(0,1)): - """ - Rotate an array by 90 degrees in the plane specified by axes. - - Rotation direction is from the first towards the second axis. - - Parameters - ---------- - m : array_like - Array of two or more dimensions. - k : integer - Number of times the array is rotated by 90 degrees. - axes: (2,) array_like - The array is rotated in the plane defined by the axes. - Axes must be different. - - .. versionadded:: 1.12.0 - - Returns - ------- - y : ndarray - A rotated view of `m`. - - See Also - -------- - flip : Reverse the order of elements in an array along the given axis. - fliplr : Flip an array horizontally. - flipud : Flip an array vertically. - - Notes - ----- - rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) - rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) - - Examples - -------- - >>> m = np.array([[1,2],[3,4]], int) - >>> m - array([[1, 2], - [3, 4]]) - >>> np.rot90(m) - array([[2, 4], - [1, 3]]) - >>> np.rot90(m, 2) - array([[4, 3], - [2, 1]]) - >>> m = np.arange(8).reshape((2,2,2)) - >>> np.rot90(m, 1, (1,2)) - array([[[1, 3], - [0, 2]], - [[5, 7], - [4, 6]]]) - - """ - axes = tuple(axes) - if len(axes) != 2: - raise ValueError("len(axes) must be 2.") - - m = asanyarray(m) - - if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: - raise ValueError("Axes must be different.") - - if (axes[0] >= m.ndim or axes[0] < -m.ndim - or axes[1] >= m.ndim or axes[1] < -m.ndim): - raise ValueError("Axes={} out of range for array of ndim={}." - .format(axes, m.ndim)) - - k %= 4 - - if k == 0: - return m[:] - if k == 2: - return flip(flip(m, axes[0]), axes[1]) - - axes_list = arange(0, m.ndim) - (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], - axes_list[axes[0]]) - - if k == 1: - return transpose(flip(m,axes[1]), axes_list) - else: - # k == 3 - return flip(transpose(m, axes_list), axes[1]) - - -def _flip_dispatcher(m, axis=None): - return (m,) - - -@array_function_dispatch(_flip_dispatcher) -def flip(m, axis=None): - """ - Reverse the order of elements in an array along the given axis. - - The shape of the array is preserved, but the elements are reordered. - - .. versionadded:: 1.12.0 - - Parameters - ---------- - m : array_like - Input array. - axis : None or int or tuple of ints, optional - Axis or axes along which to flip over. The default, - axis=None, will flip over all of the axes of the input array. - If axis is negative it counts from the last to the first axis. - - If axis is a tuple of ints, flipping is performed on all of the axes - specified in the tuple. - - .. versionchanged:: 1.15.0 - None and tuples of axes are supported - - Returns - ------- - out : array_like - A view of `m` with the entries of axis reversed. Since a view is - returned, this operation is done in constant time. - - See Also - -------- - flipud : Flip an array vertically (axis=0). - fliplr : Flip an array horizontally (axis=1). - - Notes - ----- - flip(m, 0) is equivalent to flipud(m). - - flip(m, 1) is equivalent to fliplr(m). - - flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. - - flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all - positions. - - flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at - position 0 and position 1. - - Examples - -------- - >>> A = np.arange(8).reshape((2,2,2)) - >>> A - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> np.flip(A, 0) - array([[[4, 5], - [6, 7]], - [[0, 1], - [2, 3]]]) - >>> np.flip(A, 1) - array([[[2, 3], - [0, 1]], - [[6, 7], - [4, 5]]]) - >>> np.flip(A) - array([[[7, 6], - [5, 4]], - [[3, 2], - [1, 0]]]) - >>> np.flip(A, (0, 2)) - array([[[5, 4], - [7, 6]], - [[1, 0], - [3, 2]]]) - >>> A = np.random.randn(3,4,5) - >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) - True - """ - if not hasattr(m, 'ndim'): - m = asarray(m) - if axis is None: - indexer = (np.s_[::-1],) * m.ndim - else: - axis = _nx.normalize_axis_tuple(axis, m.ndim) - indexer = [np.s_[:]] * m.ndim - for ax in axis: - indexer[ax] = np.s_[::-1] - indexer = tuple(indexer) - return m[indexer] - - -@set_module('numpy') -def iterable(y): - """ - Check whether or not an object can be iterated over. - - Parameters - ---------- - y : object - Input object. - - Returns - ------- - b : bool - Return ``True`` if the object has an iterator method or is a - sequence and ``False`` otherwise. - - - Examples - -------- - >>> np.iterable([1, 2, 3]) - True - >>> np.iterable(2) - False - - """ - try: - iter(y) - except TypeError: - return False - return True - - -def _average_dispatcher(a, axis=None, weights=None, returned=None): - return (a, weights) - - -@array_function_dispatch(_average_dispatcher) -def average(a, axis=None, weights=None, returned=False): - """ - Compute the weighted average along the specified axis. - - Parameters - ---------- - a : array_like - Array containing data to be averaged. If `a` is not an array, a - conversion is attempted. - axis : None or int or tuple of ints, optional - Axis or axes along which to average `a`. The default, - axis=None, will average over all of the elements of the input array. - If axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, averaging is performed on all of the axes - specified in the tuple instead of a single axis or all the axes as - before. - weights : array_like, optional - An array of weights associated with the values in `a`. Each value in - `a` contributes to the average according to its associated weight. - The weights array can either be 1-D (in which case its length must be - the size of `a` along the given axis) or of the same shape as `a`. - If `weights=None`, then all data in `a` are assumed to have a - weight equal to one. The 1-D calculation is:: - - avg = sum(a * weights) / sum(weights) - - The only constraint on `weights` is that `sum(weights)` must not be 0. - returned : bool, optional - Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) - is returned, otherwise only the average is returned. - If `weights=None`, `sum_of_weights` is equivalent to the number of - elements over which the average is taken. - - Returns - ------- - retval, [sum_of_weights] : array_type or double - Return the average along the specified axis. When `returned` is `True`, - return a tuple with the average as the first element and the sum - of the weights as the second element. `sum_of_weights` is of the - same type as `retval`. The result dtype follows a genereal pattern. - If `weights` is None, the result dtype will be that of `a` , or ``float64`` - if `a` is integral. Otherwise, if `weights` is not None and `a` is non- - integral, the result type will be the type of lowest precision capable of - representing values of both `a` and `weights`. If `a` happens to be - integral, the previous rules still applies but the result dtype will - at least be ``float64``. - - Raises - ------ - ZeroDivisionError - When all weights along axis are zero. See `numpy.ma.average` for a - version robust to this type of error. - TypeError - When the length of 1D `weights` is not the same as the shape of `a` - along axis. - - See Also - -------- - mean - - ma.average : average for masked arrays -- useful if your data contains - "missing" values - numpy.result_type : Returns the type that results from applying the - numpy type promotion rules to the arguments. - - Examples - -------- - >>> data = np.arange(1, 5) - >>> data - array([1, 2, 3, 4]) - >>> np.average(data) - 2.5 - >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) - 4.0 - - >>> data = np.arange(6).reshape((3,2)) - >>> data - array([[0, 1], - [2, 3], - [4, 5]]) - >>> np.average(data, axis=1, weights=[1./4, 3./4]) - array([0.75, 2.75, 4.75]) - >>> np.average(data, weights=[1./4, 3./4]) - Traceback (most recent call last): - ... - TypeError: Axis must be specified when shapes of a and weights differ. - - >>> a = np.ones(5, dtype=np.float128) - >>> w = np.ones(5, dtype=np.complex64) - >>> avg = np.average(a, weights=w) - >>> print(avg.dtype) - complex256 - """ - a = np.asanyarray(a) - - if weights is None: - avg = a.mean(axis) - scl = avg.dtype.type(a.size/avg.size) - else: - wgt = np.asanyarray(weights) - - if issubclass(a.dtype.type, (np.integer, np.bool_)): - result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') - else: - result_dtype = np.result_type(a.dtype, wgt.dtype) - - # Sanity checks - if a.shape != wgt.shape: - if axis is None: - raise TypeError( - "Axis must be specified when shapes of a and weights " - "differ.") - if wgt.ndim != 1: - raise TypeError( - "1D weights expected when shapes of a and weights differ.") - if wgt.shape[0] != a.shape[axis]: - raise ValueError( - "Length of weights not compatible with specified axis.") - - # setup wgt to broadcast along axis - wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) - wgt = wgt.swapaxes(-1, axis) - - scl = wgt.sum(axis=axis, dtype=result_dtype) - if np.any(scl == 0.0): - raise ZeroDivisionError( - "Weights sum to zero, can't be normalized") - - avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl - - if returned: - if scl.shape != avg.shape: - scl = np.broadcast_to(scl, avg.shape).copy() - return avg, scl - else: - return avg - - -@set_module('numpy') -def asarray_chkfinite(a, dtype=None, order=None): - """Convert the input to an array, checking for NaNs or Infs. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. Success requires no NaNs or Infs. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or - column-major (Fortran-style) memory representation. - Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - - Raises - ------ - ValueError - Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). - - See Also - -------- - asarray : Create and array. - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array. If all elements are finite - ``asarray_chkfinite`` is identical to ``asarray``. - - >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) - array([1., 2.]) - - Raises ValueError if array_like contains Nans or Infs. - - >>> a = [1, 2, np.inf] - >>> try: - ... np.asarray_chkfinite(a) - ... except ValueError: - ... print('ValueError') - ... - ValueError - - """ - a = asarray(a, dtype=dtype, order=order) - if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): - raise ValueError( - "array must not contain infs or NaNs") - return a - - -def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): - yield x - # support the undocumented behavior of allowing scalars - if np.iterable(condlist): - for c in condlist: - yield c - - -@array_function_dispatch(_piecewise_dispatcher) -def piecewise(x, condlist, funclist, *args, **kw): - """ - Evaluate a piecewise-defined function. - - Given a set of conditions and corresponding functions, evaluate each - function on the input data wherever its condition is true. - - Parameters - ---------- - x : ndarray or scalar - The input domain. - condlist : list of bool arrays or bool scalars - Each boolean array corresponds to a function in `funclist`. Wherever - `condlist[i]` is True, `funclist[i](x)` is used as the output value. - - Each boolean array in `condlist` selects a piece of `x`, - and should therefore be of the same shape as `x`. - - The length of `condlist` must correspond to that of `funclist`. - If one extra function is given, i.e. if - ``len(funclist) == len(condlist) + 1``, then that extra function - is the default value, used wherever all conditions are false. - funclist : list of callables, f(x,*args,**kw), or scalars - Each function is evaluated over `x` wherever its corresponding - condition is True. It should take a 1d array as input and give an 1d - array or a scalar value as output. If, instead of a callable, - a scalar is provided then a constant function (``lambda x: scalar``) is - assumed. - args : tuple, optional - Any further arguments given to `piecewise` are passed to the functions - upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then - each function is called as ``f(x, 1, 'a')``. - kw : dict, optional - Keyword arguments used in calling `piecewise` are passed to the - functions upon execution, i.e., if called - ``piecewise(..., ..., alpha=1)``, then each function is called as - ``f(x, alpha=1)``. - - Returns - ------- - out : ndarray - The output is the same shape and type as x and is found by - calling the functions in `funclist` on the appropriate portions of `x`, - as defined by the boolean arrays in `condlist`. Portions not covered - by any condition have a default value of 0. - - - See Also - -------- - choose, select, where - - Notes - ----- - This is similar to choose or select, except that functions are - evaluated on elements of `x` that satisfy the corresponding condition from - `condlist`. - - The result is:: - - |-- - |funclist[0](x[condlist[0]]) - out = |funclist[1](x[condlist[1]]) - |... - |funclist[n2](x[condlist[n2]]) - |-- - - Examples - -------- - Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. - - >>> x = np.linspace(-2.5, 2.5, 6) - >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) - array([-1., -1., -1., 1., 1., 1.]) - - Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for - ``x >= 0``. - - >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) - array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) - - Apply the same function to a scalar value. - - >>> y = -2 - >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) - array(2) - - """ - x = asanyarray(x) - n2 = len(funclist) - - # undocumented: single condition is promoted to a list of one condition - if isscalar(condlist) or ( - not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): - condlist = [condlist] - - condlist = array(condlist, dtype=bool) - n = len(condlist) - - if n == n2 - 1: # compute the "otherwise" condition. - condelse = ~np.any(condlist, axis=0, keepdims=True) - condlist = np.concatenate([condlist, condelse], axis=0) - n += 1 - elif n != n2: - raise ValueError( - "with {} condition(s), either {} or {} functions are expected" - .format(n, n, n+1) - ) - - y = zeros(x.shape, x.dtype) - for k in range(n): - item = funclist[k] - if not isinstance(item, collections_abc.Callable): - y[condlist[k]] = item - else: - vals = x[condlist[k]] - if vals.size > 0: - y[condlist[k]] = item(vals, *args, **kw) - - return y - - -def _select_dispatcher(condlist, choicelist, default=None): - for c in condlist: - yield c - for c in choicelist: - yield c - - -@array_function_dispatch(_select_dispatcher) -def select(condlist, choicelist, default=0): - """ - Return an array drawn from elements in choicelist, depending on conditions. - - Parameters - ---------- - condlist : list of bool ndarrays - The list of conditions which determine from which array in `choicelist` - the output elements are taken. When multiple conditions are satisfied, - the first one encountered in `condlist` is used. - choicelist : list of ndarrays - The list of arrays from which the output elements are taken. It has - to be of the same length as `condlist`. - default : scalar, optional - The element inserted in `output` when all conditions evaluate to False. - - Returns - ------- - output : ndarray - The output at position m is the m-th element of the array in - `choicelist` where the m-th element of the corresponding array in - `condlist` is True. - - See Also - -------- - where : Return elements from one of two arrays depending on condition. - take, choose, compress, diag, diagonal - - Examples - -------- - >>> x = np.arange(10) - >>> condlist = [x<3, x>5] - >>> choicelist = [x, x**2] - >>> np.select(condlist, choicelist) - array([ 0, 1, 2, ..., 49, 64, 81]) - - """ - # Check the size of condlist and choicelist are the same, or abort. - if len(condlist) != len(choicelist): - raise ValueError( - 'list of cases must be same length as list of conditions') - - # Now that the dtype is known, handle the deprecated select([], []) case - if len(condlist) == 0: - raise ValueError("select with an empty condition list is not possible") - - choicelist = [np.asarray(choice) for choice in choicelist] - choicelist.append(np.asarray(default)) - - # need to get the result type before broadcasting for correct scalar - # behaviour - dtype = np.result_type(*choicelist) - - # Convert conditions to arrays and broadcast conditions and choices - # as the shape is needed for the result. Doing it separately optimizes - # for example when all choices are scalars. - condlist = np.broadcast_arrays(*condlist) - choicelist = np.broadcast_arrays(*choicelist) - - # If cond array is not an ndarray in boolean format or scalar bool, abort. - for i in range(len(condlist)): - cond = condlist[i] - if cond.dtype.type is not np.bool_: - raise TypeError( - 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) - - if choicelist[0].ndim == 0: - # This may be common, so avoid the call. - result_shape = condlist[0].shape - else: - result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape - - result = np.full(result_shape, choicelist[-1], dtype) - - # Use np.copyto to burn each choicelist array onto result, using the - # corresponding condlist as a boolean mask. This is done in reverse - # order since the first choice should take precedence. - choicelist = choicelist[-2::-1] - condlist = condlist[::-1] - for choice, cond in zip(choicelist, condlist): - np.copyto(result, choice, where=cond) - - return result - - -def _copy_dispatcher(a, order=None): - return (a,) - - -@array_function_dispatch(_copy_dispatcher) -def copy(a, order='K'): - """ - Return an array copy of the given object. - - Parameters - ---------- - a : array_like - Input data. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the copy. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :meth:`ndarray.copy` are very - similar, but have different default values for their order= - arguments.) - - Returns - ------- - arr : ndarray - Array interpretation of `a`. - - Notes - ----- - This is equivalent to: - - >>> np.array(a, copy=True) #doctest: +SKIP - - Examples - -------- - Create an array x, with a reference y and a copy z: - - >>> x = np.array([1, 2, 3]) - >>> y = x - >>> z = np.copy(x) - - Note that, when we modify x, y changes, but not z: - - >>> x[0] = 10 - >>> x[0] == y[0] - True - >>> x[0] == z[0] - False - - """ - return array(a, order=order, copy=True) - -# Basic operations - - -def _gradient_dispatcher(f, *varargs, **kwargs): - yield f - for v in varargs: - yield v - - -@array_function_dispatch(_gradient_dispatcher) -def gradient(f, *varargs, **kwargs): - """ - Return the gradient of an N-dimensional array. - - The gradient is computed using second order accurate central differences - in the interior points and either first or second order accurate one-sides - (forward or backwards) differences at the boundaries. - The returned gradient hence has the same shape as the input array. - - Parameters - ---------- - f : array_like - An N-dimensional array containing samples of a scalar function. - varargs : list of scalar or array, optional - Spacing between f values. Default unitary spacing for all dimensions. - Spacing can be specified using: - - 1. single scalar to specify a sample distance for all dimensions. - 2. N scalars to specify a constant sample distance for each dimension. - i.e. `dx`, `dy`, `dz`, ... - 3. N arrays to specify the coordinates of the values along each - dimension of F. The length of the array must match the size of - the corresponding dimension - 4. Any combination of N scalars/arrays with the meaning of 2. and 3. - - If `axis` is given, the number of varargs must equal the number of axes. - Default: 1. - - edge_order : {1, 2}, optional - Gradient is calculated using N-th order accurate differences - at the boundaries. Default: 1. - - .. versionadded:: 1.9.1 - - axis : None or int or tuple of ints, optional - Gradient is calculated only along the given axis or axes - The default (axis = None) is to calculate the gradient for all the axes - of the input array. axis may be negative, in which case it counts from - the last to the first axis. - - .. versionadded:: 1.11.0 - - Returns - ------- - gradient : ndarray or list of ndarray - A set of ndarrays (or a single ndarray if there is only one dimension) - corresponding to the derivatives of f with respect to each dimension. - Each derivative has the same shape as f. - - Examples - -------- - >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) - >>> np.gradient(f) - array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - >>> np.gradient(f, 2) - array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) - - Spacing can be also specified with an array that represents the coordinates - of the values F along the dimensions. - For instance a uniform spacing: - - >>> x = np.arange(f.size) - >>> np.gradient(f, x) - array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - - Or a non uniform one: - - >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) - >>> np.gradient(f, x) - array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) - - For two dimensional arrays, the return will be two arrays ordered by - axis. In this example the first array stands for the gradient in - rows and the second one in columns direction: - - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) - [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], - [1. , 1. , 1. ]])] - - In this example the spacing is also specified: - uniform for axis=0 and non uniform for axis=1 - - >>> dx = 2. - >>> y = [1., 1.5, 3.5] - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) - [array([[ 1. , 1. , -0.5], - [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], - [2. , 1.7, 0.5]])] - - It is possible to specify how boundaries are treated using `edge_order` - - >>> x = np.array([0, 1, 2, 3, 4]) - >>> f = x**2 - >>> np.gradient(f, edge_order=1) - array([1., 2., 4., 6., 7.]) - >>> np.gradient(f, edge_order=2) - array([0., 2., 4., 6., 8.]) - - The `axis` keyword can be used to specify a subset of axes of which the - gradient is calculated - - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) - array([[ 2., 2., -1.], - [ 2., 2., -1.]]) - - Notes - ----- - Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous - derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we - minimize the "consistency error" :math:`\\eta_{i}` between the true gradient - and its estimate from a linear combination of the neighboring grid-points: - - .. math:: - - \\eta_{i} = f_{i}^{\\left(1\\right)} - - \\left[ \\alpha f\\left(x_{i}\\right) + - \\beta f\\left(x_{i} + h_{d}\\right) + - \\gamma f\\left(x_{i}-h_{s}\\right) - \\right] - - By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` - with their Taylor series expansion, this translates into solving - the following the linear system: - - .. math:: - - \\left\\{ - \\begin{array}{r} - \\alpha+\\beta+\\gamma=0 \\\\ - \\beta h_{d}-\\gamma h_{s}=1 \\\\ - \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 - \\end{array} - \\right. - - The resulting approximation of :math:`f_{i}^{(1)}` is the following: - - .. math:: - - \\hat f_{i}^{(1)} = - \\frac{ - h_{s}^{2}f\\left(x_{i} + h_{d}\\right) - + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} - { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} - + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} - + h_{s}h_{d}^{2}}{h_{d} - + h_{s}}\\right) - - It is worth noting that if :math:`h_{s}=h_{d}` - (i.e., data are evenly spaced) - we find the standard second order approximation: - - .. math:: - - \\hat f_{i}^{(1)}= - \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} - + \\mathcal{O}\\left(h^{2}\\right) - - With a similar procedure the forward/backward approximations used for - boundaries can be derived. - - References - ---------- - .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics - (Texts in Applied Mathematics). New York: Springer. - .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations - in Geophysical Fluid Dynamics. New York: Springer. - .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on - Arbitrarily Spaced Grids, - Mathematics of Computation 51, no. 184 : 699-706. - `PDF `_. - """ - f = np.asanyarray(f) - N = f.ndim # number of dimensions - - axes = kwargs.pop('axis', None) - if axes is None: - axes = tuple(range(N)) - else: - axes = _nx.normalize_axis_tuple(axes, N) - - len_axes = len(axes) - n = len(varargs) - if n == 0: - # no spacing argument - use 1 in all axes - dx = [1.0] * len_axes - elif n == 1 and np.ndim(varargs[0]) == 0: - # single scalar for all axes - dx = varargs * len_axes - elif n == len_axes: - # scalar or 1d array for each axis - dx = list(varargs) - for i, distances in enumerate(dx): - distances = np.asanyarray(distances) - if distances.ndim == 0: - continue - elif distances.ndim != 1: - raise ValueError("distances must be either scalars or 1d") - if len(distances) != f.shape[axes[i]]: - raise ValueError("when 1d, distances must match " - "the length of the corresponding dimension") - if np.issubdtype(distances.dtype, np.integer): - # Convert numpy integer types to float64 to avoid modular - # arithmetic in np.diff(distances). - distances = distances.astype(np.float64) - diffx = np.diff(distances) - # if distances are constant reduce to the scalar case - # since it brings a consistent speedup - if (diffx == diffx[0]).all(): - diffx = diffx[0] - dx[i] = diffx - else: - raise TypeError("invalid number of arguments") - - edge_order = kwargs.pop('edge_order', 1) - if kwargs: - raise TypeError('"{}" are not valid keyword arguments.'.format( - '", "'.join(kwargs.keys()))) - if edge_order > 2: - raise ValueError("'edge_order' greater than 2 not supported") - - # use central differences on interior and one-sided differences on the - # endpoints. This preserves second order-accuracy over the full domain. - - outvals = [] - - # create slice objects --- initially all are [:, :, ..., :] - slice1 = [slice(None)]*N - slice2 = [slice(None)]*N - slice3 = [slice(None)]*N - slice4 = [slice(None)]*N - - otype = f.dtype - if otype.type is np.datetime64: - # the timedelta dtype with the same unit information - otype = np.dtype(otype.name.replace('datetime', 'timedelta')) - # view as timedelta to allow addition - f = f.view(otype) - elif otype.type is np.timedelta64: - pass - elif np.issubdtype(otype, np.inexact): - pass - else: - # All other types convert to floating point. - # First check if f is a numpy integer type; if so, convert f to float64 - # to avoid modular arithmetic when computing the changes in f. - if np.issubdtype(otype, np.integer): - f = f.astype(np.float64) - otype = np.float64 - - for axis, ax_dx in zip(axes, dx): - if f.shape[axis] < edge_order + 1: - raise ValueError( - "Shape of array too small to calculate a numerical gradient, " - "at least (edge_order + 1) elements are required.") - # result allocation - out = np.empty_like(f, dtype=otype) - - # spacing for the current axis - uniform_spacing = np.ndim(ax_dx) == 0 - - # Numerical differentiation: 2nd order interior - slice1[axis] = slice(1, -1) - slice2[axis] = slice(None, -2) - slice3[axis] = slice(1, -1) - slice4[axis] = slice(2, None) - - if uniform_spacing: - out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) - else: - dx1 = ax_dx[0:-1] - dx2 = ax_dx[1:] - a = -(dx2)/(dx1 * (dx1 + dx2)) - b = (dx2 - dx1) / (dx1 * dx2) - c = dx1 / (dx2 * (dx1 + dx2)) - # fix the shape for broadcasting - shape = np.ones(N, dtype=int) - shape[axis] = -1 - a.shape = b.shape = c.shape = shape - # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] - - # Numerical differentiation: 1st order edges - if edge_order == 1: - slice1[axis] = 0 - slice2[axis] = 1 - slice3[axis] = 0 - dx_0 = ax_dx if uniform_spacing else ax_dx[0] - # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) - out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 - - slice1[axis] = -1 - slice2[axis] = -1 - slice3[axis] = -2 - dx_n = ax_dx if uniform_spacing else ax_dx[-1] - # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) - out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n - - # Numerical differentiation: 2nd order edges - else: - slice1[axis] = 0 - slice2[axis] = 0 - slice3[axis] = 1 - slice4[axis] = 2 - if uniform_spacing: - a = -1.5 / ax_dx - b = 2. / ax_dx - c = -0.5 / ax_dx - else: - dx1 = ax_dx[0] - dx2 = ax_dx[1] - a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) - b = (dx1 + dx2) / (dx1 * dx2) - c = - dx1 / (dx2 * (dx1 + dx2)) - # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] - - slice1[axis] = -1 - slice2[axis] = -3 - slice3[axis] = -2 - slice4[axis] = -1 - if uniform_spacing: - a = 0.5 / ax_dx - b = -2. / ax_dx - c = 1.5 / ax_dx - else: - dx1 = ax_dx[-2] - dx2 = ax_dx[-1] - a = (dx2) / (dx1 * (dx1 + dx2)) - b = - (dx2 + dx1) / (dx1 * dx2) - c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) - # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] - - outvals.append(out) - - # reset the slice object in this dimension to ":" - slice1[axis] = slice(None) - slice2[axis] = slice(None) - slice3[axis] = slice(None) - slice4[axis] = slice(None) - - if len_axes == 1: - return outvals[0] - else: - return outvals - - -def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): - return (a, prepend, append) - - -@array_function_dispatch(_diff_dispatcher) -def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): - """ - Calculate the n-th discrete difference along the given axis. - - The first difference is given by ``out[i] = a[i+1] - a[i]`` along - the given axis, higher differences are calculated by using `diff` - recursively. - - Parameters - ---------- - a : array_like - Input array - n : int, optional - The number of times values are differenced. If zero, the input - is returned as-is. - axis : int, optional - The axis along which the difference is taken, default is the - last axis. - prepend, append : array_like, optional - Values to prepend or append to `a` along axis prior to - performing the difference. Scalar values are expanded to - arrays with length 1 in the direction of axis and the shape - of the input array in along all other axes. Otherwise the - dimension and shape must match `a` except along axis. - - .. versionadded:: 1.16.0 - - Returns - ------- - diff : ndarray - The n-th differences. The shape of the output is the same as `a` - except along `axis` where the dimension is smaller by `n`. The - type of the output is the same as the type of the difference - between any two elements of `a`. This is the same as the type of - `a` in most cases. A notable exception is `datetime64`, which - results in a `timedelta64` output array. - - See Also - -------- - gradient, ediff1d, cumsum - - Notes - ----- - Type is preserved for boolean arrays, so the result will contain - `False` when consecutive elements are the same and `True` when they - differ. - - For unsigned integer arrays, the results will also be unsigned. This - should not be surprising, as the result is consistent with - calculating the difference directly: - - >>> u8_arr = np.array([1, 0], dtype=np.uint8) - >>> np.diff(u8_arr) - array([255], dtype=uint8) - >>> u8_arr[1,...] - u8_arr[0,...] - 255 - - If this is not desirable, then the array should be cast to a larger - integer type first: - - >>> i16_arr = u8_arr.astype(np.int16) - >>> np.diff(i16_arr) - array([-1], dtype=int16) - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.diff(x) - array([ 1, 2, 3, -7]) - >>> np.diff(x, n=2) - array([ 1, 1, -10]) - - >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) - >>> np.diff(x) - array([[2, 3, 4], - [5, 1, 2]]) - >>> np.diff(x, axis=0) - array([[-1, 2, 0, -2]]) - - >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) - >>> np.diff(x) - array([1, 1], dtype='timedelta64[D]') - - """ - if n == 0: - return a - if n < 0: - raise ValueError( - "order must be non-negative but got " + repr(n)) - - a = asanyarray(a) - nd = a.ndim - if nd == 0: - raise ValueError("diff requires input that is at least one dimensional") - axis = normalize_axis_index(axis, nd) - - combined = [] - if prepend is not np._NoValue: - prepend = np.asanyarray(prepend) - if prepend.ndim == 0: - shape = list(a.shape) - shape[axis] = 1 - prepend = np.broadcast_to(prepend, tuple(shape)) - combined.append(prepend) - - combined.append(a) - - if append is not np._NoValue: - append = np.asanyarray(append) - if append.ndim == 0: - shape = list(a.shape) - shape[axis] = 1 - append = np.broadcast_to(append, tuple(shape)) - combined.append(append) - - if len(combined) > 1: - a = np.concatenate(combined, axis) - - slice1 = [slice(None)] * nd - slice2 = [slice(None)] * nd - slice1[axis] = slice(1, None) - slice2[axis] = slice(None, -1) - slice1 = tuple(slice1) - slice2 = tuple(slice2) - - op = not_equal if a.dtype == np.bool_ else subtract - for _ in range(n): - a = op(a[slice1], a[slice2]) - - return a - - -def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): - return (x, xp, fp) - - -@array_function_dispatch(_interp_dispatcher) -def interp(x, xp, fp, left=None, right=None, period=None): - """ - One-dimensional linear interpolation. - - Returns the one-dimensional piecewise linear interpolant to a function - with given discrete data points (`xp`, `fp`), evaluated at `x`. - - Parameters - ---------- - x : array_like - The x-coordinates at which to evaluate the interpolated values. - - xp : 1-D sequence of floats - The x-coordinates of the data points, must be increasing if argument - `period` is not specified. Otherwise, `xp` is internally sorted after - normalizing the periodic boundaries with ``xp = xp % period``. - - fp : 1-D sequence of float or complex - The y-coordinates of the data points, same length as `xp`. - - left : optional float or complex corresponding to fp - Value to return for `x < xp[0]`, default is `fp[0]`. - - right : optional float or complex corresponding to fp - Value to return for `x > xp[-1]`, default is `fp[-1]`. - - period : None or float, optional - A period for the x-coordinates. This parameter allows the proper - interpolation of angular x-coordinates. Parameters `left` and `right` - are ignored if `period` is specified. - - .. versionadded:: 1.10.0 - - Returns - ------- - y : float or complex (corresponding to fp) or ndarray - The interpolated values, same shape as `x`. - - Raises - ------ - ValueError - If `xp` and `fp` have different length - If `xp` or `fp` are not 1-D sequences - If `period == 0` - - Notes - ----- - The x-coordinate sequence is expected to be increasing, but this is not - explicitly enforced. However, if the sequence `xp` is non-increasing, - interpolation results are meaningless. - - Note that, since NaN is unsortable, `xp` also cannot contain NaNs. - - A simple check for `xp` being strictly increasing is:: - - np.all(np.diff(xp) > 0) - - Examples - -------- - >>> xp = [1, 2, 3] - >>> fp = [3, 2, 0] - >>> np.interp(2.5, xp, fp) - 1.0 - >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) - array([3. , 3. , 2.5 , 0.56, 0. ]) - >>> UNDEF = -99.0 - >>> np.interp(3.14, xp, fp, right=UNDEF) - -99.0 - - Plot an interpolant to the sine function: - - >>> x = np.linspace(0, 2*np.pi, 10) - >>> y = np.sin(x) - >>> xvals = np.linspace(0, 2*np.pi, 50) - >>> yinterp = np.interp(xvals, x, y) - >>> import matplotlib.pyplot as plt - >>> plt.plot(x, y, 'o') - [] - >>> plt.plot(xvals, yinterp, '-x') - [] - >>> plt.show() - - Interpolation with periodic x-coordinates: - - >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] - >>> xp = [190, -190, 350, -350] - >>> fp = [5, 10, 3, 4] - >>> np.interp(x, xp, fp, period=360) - array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) - - Complex interpolation: - - >>> x = [1.5, 4.0] - >>> xp = [2,3,5] - >>> fp = [1.0j, 0, 2+3j] - >>> np.interp(x, xp, fp) - array([0.+1.j , 1.+1.5j]) - - """ - - fp = np.asarray(fp) - - if np.iscomplexobj(fp): - interp_func = compiled_interp_complex - input_dtype = np.complex128 - else: - interp_func = compiled_interp - input_dtype = np.float64 - - if period is not None: - if period == 0: - raise ValueError("period must be a non-zero value") - period = abs(period) - left = None - right = None - - x = np.asarray(x, dtype=np.float64) - xp = np.asarray(xp, dtype=np.float64) - fp = np.asarray(fp, dtype=input_dtype) - - if xp.ndim != 1 or fp.ndim != 1: - raise ValueError("Data points must be 1-D sequences") - if xp.shape[0] != fp.shape[0]: - raise ValueError("fp and xp are not of the same length") - # normalizing periodic boundaries - x = x % period - xp = xp % period - asort_xp = np.argsort(xp) - xp = xp[asort_xp] - fp = fp[asort_xp] - xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) - fp = np.concatenate((fp[-1:], fp, fp[0:1])) - - return interp_func(x, xp, fp, left, right) - - -def _angle_dispatcher(z, deg=None): - return (z,) - - -@array_function_dispatch(_angle_dispatcher) -def angle(z, deg=False): - """ - Return the angle of the complex argument. - - Parameters - ---------- - z : array_like - A complex number or sequence of complex numbers. - deg : bool, optional - Return angle in degrees if True, radians if False (default). - - Returns - ------- - angle : ndarray or scalar - The counterclockwise angle from the positive real axis on the complex - plane in the range ``(-pi, pi]``, with dtype as numpy.float64. - - ..versionchanged:: 1.16.0 - This function works on subclasses of ndarray like `ma.array`. - - See Also - -------- - arctan2 - absolute - - Examples - -------- - >>> np.angle([1.0, 1.0j, 1+1j]) # in radians - array([ 0. , 1.57079633, 0.78539816]) # may vary - >>> np.angle(1+1j, deg=True) # in degrees - 45.0 - - """ - z = asanyarray(z) - if issubclass(z.dtype.type, _nx.complexfloating): - zimag = z.imag - zreal = z.real - else: - zimag = 0 - zreal = z - - a = arctan2(zimag, zreal) - if deg: - a *= 180/pi - return a - - -def _unwrap_dispatcher(p, discont=None, axis=None): - return (p,) - - -@array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, discont=pi, axis=-1): - """ - Unwrap by changing deltas between values to 2*pi complement. - - Unwrap radian phase `p` by changing absolute jumps greater than - `discont` to their 2*pi complement along the given axis. - - Parameters - ---------- - p : array_like - Input array. - discont : float, optional - Maximum discontinuity between values, default is ``pi``. - axis : int, optional - Axis along which unwrap will operate, default is the last axis. - - Returns - ------- - out : ndarray - Output array. - - See Also - -------- - rad2deg, deg2rad - - Notes - ----- - If the discontinuity in `p` is smaller than ``pi``, but larger than - `discont`, no unwrapping is done because taking the 2*pi complement - would only make the discontinuity larger. - - Examples - -------- - >>> phase = np.linspace(0, np.pi, num=5) - >>> phase[3:] += np.pi - >>> phase - array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary - >>> np.unwrap(phase) - array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary - - """ - p = asarray(p) - nd = p.ndim - dd = diff(p, axis=axis) - slice1 = [slice(None, None)]*nd # full slices - slice1[axis] = slice(1, None) - slice1 = tuple(slice1) - ddmod = mod(dd + pi, 2*pi) - pi - _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) - ph_correct = ddmod - dd - _nx.copyto(ph_correct, 0, where=abs(dd) < discont) - up = array(p, copy=True, dtype='d') - up[slice1] = p[slice1] + ph_correct.cumsum(axis) - return up - - -def _sort_complex(a): - return (a,) - - -@array_function_dispatch(_sort_complex) -def sort_complex(a): - """ - Sort a complex array using the real part first, then the imaginary part. - - Parameters - ---------- - a : array_like - Input array - - Returns - ------- - out : complex ndarray - Always returns a sorted complex array. - - Examples - -------- - >>> np.sort_complex([5, 3, 6, 2, 1]) - array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) - - >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) - array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) - - """ - b = array(a, copy=True) - b.sort() - if not issubclass(b.dtype.type, _nx.complexfloating): - if b.dtype.char in 'bhBH': - return b.astype('F') - elif b.dtype.char == 'g': - return b.astype('G') - else: - return b.astype('D') - else: - return b - - -def _trim_zeros(filt, trim=None): - return (filt,) - - -@array_function_dispatch(_trim_zeros) -def trim_zeros(filt, trim='fb'): - """ - Trim the leading and/or trailing zeros from a 1-D array or sequence. - - Parameters - ---------- - filt : 1-D array or sequence - Input array. - trim : str, optional - A string with 'f' representing trim from front and 'b' to trim from - back. Default is 'fb', trim zeros from both front and back of the - array. - - Returns - ------- - trimmed : 1-D array or sequence - The result of trimming the input. The input data type is preserved. - - Examples - -------- - >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) - >>> np.trim_zeros(a) - array([1, 2, 3, 0, 2, 1]) - - >>> np.trim_zeros(a, 'b') - array([0, 0, 0, ..., 0, 2, 1]) - - The input data type is preserved, list/tuple in means list/tuple out. - - >>> np.trim_zeros([0, 1, 2, 0]) - [1, 2] - - """ - first = 0 - trim = trim.upper() - if 'F' in trim: - for i in filt: - if i != 0.: - break - else: - first = first + 1 - last = len(filt) - if 'B' in trim: - for i in filt[::-1]: - if i != 0.: - break - else: - last = last - 1 - return filt[first:last] - -def _extract_dispatcher(condition, arr): - return (condition, arr) - - -@array_function_dispatch(_extract_dispatcher) -def extract(condition, arr): - """ - Return the elements of an array that satisfy some condition. - - This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If - `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. - - Note that `place` does the exact opposite of `extract`. - - Parameters - ---------- - condition : array_like - An array whose nonzero or True entries indicate the elements of `arr` - to extract. - arr : array_like - Input array of the same size as `condition`. - - Returns - ------- - extract : ndarray - Rank 1 array of values from `arr` where `condition` is True. - - See Also - -------- - take, put, copyto, compress, place - - Examples - -------- - >>> arr = np.arange(12).reshape((3, 4)) - >>> arr - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> condition = np.mod(arr, 3)==0 - >>> condition - array([[ True, False, False, True], - [False, False, True, False], - [False, True, False, False]]) - >>> np.extract(condition, arr) - array([0, 3, 6, 9]) - - - If `condition` is boolean: - - >>> arr[condition] - array([0, 3, 6, 9]) - - """ - return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) - - -def _place_dispatcher(arr, mask, vals): - return (arr, mask, vals) - - -@array_function_dispatch(_place_dispatcher) -def place(arr, mask, vals): - """ - Change elements of an array based on conditional and input values. - - Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that - `place` uses the first N elements of `vals`, where N is the number of - True values in `mask`, while `copyto` uses the elements where `mask` - is True. - - Note that `extract` does the exact opposite of `place`. - - Parameters - ---------- - arr : ndarray - Array to put data into. - mask : array_like - Boolean mask array. Must have the same size as `a`. - vals : 1-D sequence - Values to put into `a`. Only the first N elements are used, where - N is the number of True values in `mask`. If `vals` is smaller - than N, it will be repeated, and if elements of `a` are to be masked, - this sequence must be non-empty. - - See Also - -------- - copyto, put, take, extract - - Examples - -------- - >>> arr = np.arange(6).reshape(2, 3) - >>> np.place(arr, arr>2, [44, 55]) - >>> arr - array([[ 0, 1, 2], - [44, 55, 44]]) - - """ - if not isinstance(arr, np.ndarray): - raise TypeError("argument 1 must be numpy.ndarray, " - "not {name}".format(name=type(arr).__name__)) - - return _insert(arr, mask, vals) - - -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from io import StringIO - >>> buf = StringIO() - >>> np.disp(u'"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - if device is None: - device = sys.stdout - if linefeed: - device.write('%s\n' % mesg) - else: - device.write('%s' % mesg) - device.flush() - return - - -# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html -_DIMENSION_NAME = r'\w+' -_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) -_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) -_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) -_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) - - -def _parse_gufunc_signature(signature): - """ - Parse string signatures for a generalized universal function. - - Arguments - --------- - signature : string - Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` - for ``np.matmul``. - - Returns - ------- - Tuple of input and output core dimensions parsed from the signature, each - of the form List[Tuple[str, ...]]. - """ - if not re.match(_SIGNATURE, signature): - raise ValueError( - 'not a valid gufunc signature: {}'.format(signature)) - return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) - for arg in re.findall(_ARGUMENT, arg_list)] - for arg_list in signature.split('->')) - - -def _update_dim_sizes(dim_sizes, arg, core_dims): - """ - Incrementally check and update core dimension sizes for a single argument. - - Arguments - --------- - dim_sizes : Dict[str, int] - Sizes of existing core dimensions. Will be updated in-place. - arg : ndarray - Argument to examine. - core_dims : Tuple[str, ...] - Core dimensions for this argument. - """ - if not core_dims: - return - - num_core_dims = len(core_dims) - if arg.ndim < num_core_dims: - raise ValueError( - '%d-dimensional argument does not have enough ' - 'dimensions for all core dimensions %r' - % (arg.ndim, core_dims)) - - core_shape = arg.shape[-num_core_dims:] - for dim, size in zip(core_dims, core_shape): - if dim in dim_sizes: - if size != dim_sizes[dim]: - raise ValueError( - 'inconsistent size for core dimension %r: %r vs %r' - % (dim, size, dim_sizes[dim])) - else: - dim_sizes[dim] = size - - -def _parse_input_dimensions(args, input_core_dims): - """ - Parse broadcast and core dimensions for vectorize with a signature. - - Arguments - --------- - args : Tuple[ndarray, ...] - Tuple of input arguments to examine. - input_core_dims : List[Tuple[str, ...]] - List of core dimensions corresponding to each input. - - Returns - ------- - broadcast_shape : Tuple[int, ...] - Common shape to broadcast all non-core dimensions to. - dim_sizes : Dict[str, int] - Common sizes for named core dimensions. - """ - broadcast_args = [] - dim_sizes = {} - for arg, core_dims in zip(args, input_core_dims): - _update_dim_sizes(dim_sizes, arg, core_dims) - ndim = arg.ndim - len(core_dims) - dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) - broadcast_args.append(dummy_array) - broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) - return broadcast_shape, dim_sizes - - -def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): - """Helper for calculating broadcast shapes with core dimensions.""" - return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) - for core_dims in list_of_core_dims] - - -def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): - """Helper for creating output arrays in vectorize.""" - shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) - arrays = tuple(np.empty(shape, dtype=dtype) - for shape, dtype in zip(shapes, dtypes)) - return arrays - - -@set_module('numpy') -class vectorize(object): - """ - vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, - signature=None) - - Generalized function class. - - Define a vectorized function which takes a nested sequence of objects or - numpy arrays as inputs and returns a single numpy array or a tuple of numpy - arrays. The vectorized function evaluates `pyfunc` over successive tuples - of the input arrays like the python map function, except it uses the - broadcasting rules of numpy. - - The data type of the output of `vectorized` is determined by calling - the function with the first element of the input. This can be avoided - by specifying the `otypes` argument. - - Parameters - ---------- - pyfunc : callable - A python function or method. - otypes : str or list of dtypes, optional - The output data type. It must be specified as either a string of - typecode characters or a list of data type specifiers. There should - be one data type specifier for each output. - doc : str, optional - The docstring for the function. If None, the docstring will be the - ``pyfunc.__doc__``. - excluded : set, optional - Set of strings or integers representing the positional or keyword - arguments for which the function will not be vectorized. These will be - passed directly to `pyfunc` unmodified. - - .. versionadded:: 1.7.0 - - cache : bool, optional - If `True`, then cache the first function call that determines the number - of outputs if `otypes` is not provided. - - .. versionadded:: 1.7.0 - - signature : string, optional - Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for - vectorized matrix-vector multiplication. If provided, ``pyfunc`` will - be called with (and expected to return) arrays with shapes given by the - size of corresponding core dimensions. By default, ``pyfunc`` is - assumed to take scalars as input and output. - - .. versionadded:: 1.12.0 - - Returns - ------- - vectorized : callable - Vectorized function. - - See Also - -------- - frompyfunc : Takes an arbitrary Python function and returns a ufunc - - Notes - ----- - The `vectorize` function is provided primarily for convenience, not for - performance. The implementation is essentially a for loop. - - If `otypes` is not specified, then a call to the function with the - first argument will be used to determine the number of outputs. The - results of this call will be cached if `cache` is `True` to prevent - calling the function twice. However, to implement the cache, the - original function must be wrapped which will slow down subsequent - calls, so only do this if your function is expensive. - - The new keyword argument interface and `excluded` argument support - further degrades performance. - - References - ---------- - .. [1] NumPy Reference, section `Generalized Universal Function API - `_. - - Examples - -------- - >>> def myfunc(a, b): - ... "Return a-b if a>b, otherwise return a+b" - ... if a > b: - ... return a - b - ... else: - ... return a + b - - >>> vfunc = np.vectorize(myfunc) - >>> vfunc([1, 2, 3, 4], 2) - array([3, 4, 1, 2]) - - The docstring is taken from the input function to `vectorize` unless it - is specified: - - >>> vfunc.__doc__ - 'Return a-b if a>b, otherwise return a+b' - >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') - >>> vfunc.__doc__ - 'Vectorized `myfunc`' - - The output type is determined by evaluating the first element of the input, - unless it is specified: - - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - >>> vfunc = np.vectorize(myfunc, otypes=[float]) - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - - The `excluded` argument can be used to prevent vectorizing over certain - arguments. This can be useful for array-like arguments of a fixed length - such as the coefficients for a polynomial as in `polyval`: - - >>> def mypolyval(p, x): - ... _p = list(p) - ... res = _p.pop(0) - ... while _p: - ... res = res*x + _p.pop(0) - ... return res - >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) - >>> vpolyval(p=[1, 2, 3], x=[0, 1]) - array([3, 6]) - - Positional arguments may also be excluded by specifying their position: - - >>> vpolyval.excluded.add(0) - >>> vpolyval([1, 2, 3], x=[0, 1]) - array([3, 6]) - - The `signature` argument allows for vectorizing functions that act on - non-scalar arrays of fixed length. For example, you can use it for a - vectorized calculation of Pearson correlation coefficient and its p-value: - - >>> import scipy.stats - >>> pearsonr = np.vectorize(scipy.stats.pearsonr, - ... signature='(n),(n)->(),()') - >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) - (array([ 1., -1.]), array([ 0., 0.])) - - Or for a vectorized convolution: - - >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') - >>> convolve(np.eye(4), [1, 2, 1]) - array([[1., 2., 1., 0., 0., 0.], - [0., 1., 2., 1., 0., 0.], - [0., 0., 1., 2., 1., 0.], - [0., 0., 0., 1., 2., 1.]]) - - """ - def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, - cache=False, signature=None): - self.pyfunc = pyfunc - self.cache = cache - self.signature = signature - self._ufunc = None # Caching to improve default performance - - if doc is None: - self.__doc__ = pyfunc.__doc__ - else: - self.__doc__ = doc - - if isinstance(otypes, str): - for char in otypes: - if char not in typecodes['All']: - raise ValueError("Invalid otype specified: %s" % (char,)) - elif iterable(otypes): - otypes = ''.join([_nx.dtype(x).char for x in otypes]) - elif otypes is not None: - raise ValueError("Invalid otype specification") - self.otypes = otypes - - # Excluded variable support - if excluded is None: - excluded = set() - self.excluded = set(excluded) - - if signature is not None: - self._in_and_out_core_dims = _parse_gufunc_signature(signature) - else: - self._in_and_out_core_dims = None - - def __call__(self, *args, **kwargs): - """ - Return arrays with the results of `pyfunc` broadcast (vectorized) over - `args` and `kwargs` not in `excluded`. - """ - excluded = self.excluded - if not kwargs and not excluded: - func = self.pyfunc - vargs = args - else: - # The wrapper accepts only positional arguments: we use `names` and - # `inds` to mutate `the_args` and `kwargs` to pass to the original - # function. - nargs = len(args) - - names = [_n for _n in kwargs if _n not in excluded] - inds = [_i for _i in range(nargs) if _i not in excluded] - the_args = list(args) - - def func(*vargs): - for _n, _i in enumerate(inds): - the_args[_i] = vargs[_n] - kwargs.update(zip(names, vargs[len(inds):])) - return self.pyfunc(*the_args, **kwargs) - - vargs = [args[_i] for _i in inds] - vargs.extend([kwargs[_n] for _n in names]) - - return self._vectorize_call(func=func, args=vargs) - - def _get_ufunc_and_otypes(self, func, args): - """Return (ufunc, otypes).""" - # frompyfunc will fail if args is empty - if not args: - raise ValueError('args can not be empty') - - if self.otypes is not None: - otypes = self.otypes - nout = len(otypes) - - # Note logic here: We only *use* self._ufunc if func is self.pyfunc - # even though we set self._ufunc regardless. - if func is self.pyfunc and self._ufunc is not None: - ufunc = self._ufunc - else: - ufunc = self._ufunc = frompyfunc(func, len(args), nout) - else: - # Get number of outputs and output types by calling the function on - # the first entries of args. We also cache the result to prevent - # the subsequent call when the ufunc is evaluated. - # Assumes that ufunc first evaluates the 0th elements in the input - # arrays (the input values are not checked to ensure this) - args = [asarray(arg) for arg in args] - if builtins.any(arg.size == 0 for arg in args): - raise ValueError('cannot call `vectorize` on size 0 inputs ' - 'unless `otypes` is set') - - inputs = [arg.flat[0] for arg in args] - outputs = func(*inputs) - - # Performance note: profiling indicates that -- for simple - # functions at least -- this wrapping can almost double the - # execution time. - # Hence we make it optional. - if self.cache: - _cache = [outputs] - - def _func(*vargs): - if _cache: - return _cache.pop() - else: - return func(*vargs) - else: - _func = func - - if isinstance(outputs, tuple): - nout = len(outputs) - else: - nout = 1 - outputs = (outputs,) - - otypes = ''.join([asarray(outputs[_k]).dtype.char - for _k in range(nout)]) - - # Performance note: profiling indicates that creating the ufunc is - # not a significant cost compared with wrapping so it seems not - # worth trying to cache this. - ufunc = frompyfunc(_func, len(args), nout) - - return ufunc, otypes - - def _vectorize_call(self, func, args): - """Vectorized call to `func` over positional `args`.""" - if self.signature is not None: - res = self._vectorize_call_with_signature(func, args) - elif not args: - res = func() - else: - ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) - - # Convert args to object arrays first - inputs = [array(a, copy=False, subok=True, dtype=object) - for a in args] - - outputs = ufunc(*inputs) - - if ufunc.nout == 1: - res = array(outputs, copy=False, subok=True, dtype=otypes[0]) - else: - res = tuple([array(x, copy=False, subok=True, dtype=t) - for x, t in zip(outputs, otypes)]) - return res - - def _vectorize_call_with_signature(self, func, args): - """Vectorized call over positional arguments with a signature.""" - input_core_dims, output_core_dims = self._in_and_out_core_dims - - if len(args) != len(input_core_dims): - raise TypeError('wrong number of positional arguments: ' - 'expected %r, got %r' - % (len(input_core_dims), len(args))) - args = tuple(asanyarray(arg) for arg in args) - - broadcast_shape, dim_sizes = _parse_input_dimensions( - args, input_core_dims) - input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, - input_core_dims) - args = [np.broadcast_to(arg, shape, subok=True) - for arg, shape in zip(args, input_shapes)] - - outputs = None - otypes = self.otypes - nout = len(output_core_dims) - - for index in np.ndindex(*broadcast_shape): - results = func(*(arg[index] for arg in args)) - - n_results = len(results) if isinstance(results, tuple) else 1 - - if nout != n_results: - raise ValueError( - 'wrong number of outputs from pyfunc: expected %r, got %r' - % (nout, n_results)) - - if nout == 1: - results = (results,) - - if outputs is None: - for result, core_dims in zip(results, output_core_dims): - _update_dim_sizes(dim_sizes, result, core_dims) - - if otypes is None: - otypes = [asarray(result).dtype for result in results] - - outputs = _create_arrays(broadcast_shape, dim_sizes, - output_core_dims, otypes) - - for output, result in zip(outputs, results): - output[index] = result - - if outputs is None: - # did not call the function even once - if otypes is None: - raise ValueError('cannot call `vectorize` on size 0 inputs ' - 'unless `otypes` is set') - if builtins.any(dim not in dim_sizes - for dims in output_core_dims - for dim in dims): - raise ValueError('cannot call `vectorize` with a signature ' - 'including new output dimensions on size 0 ' - 'inputs') - outputs = _create_arrays(broadcast_shape, dim_sizes, - output_core_dims, otypes) - - return outputs[0] if nout == 1 else outputs - - -def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, - fweights=None, aweights=None): - return (m, y, fweights, aweights) - - -@array_function_dispatch(_cov_dispatcher) -def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, - aweights=None): - """ - Estimate a covariance matrix, given data and weights. - - Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, - then the covariance matrix element :math:`C_{ij}` is the covariance of - :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance - of :math:`x_i`. - - See the notes for an outline of the algorithm. - - Parameters - ---------- - m : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `m` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same form - as that of `m`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : bool, optional - Default normalization (False) is by ``(N - 1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, - then normalization is by ``N``. These values can be overridden by using - the keyword ``ddof`` in numpy versions >= 1.5. - ddof : int, optional - If not ``None`` the default value implied by `bias` is overridden. - Note that ``ddof=1`` will return the unbiased estimate, even if both - `fweights` and `aweights` are specified, and ``ddof=0`` will return - the simple average. See the notes for the details. The default value - is ``None``. - - .. versionadded:: 1.5 - fweights : array_like, int, optional - 1-D array of integer frequency weights; the number of times each - observation vector should be repeated. - - .. versionadded:: 1.10 - aweights : array_like, optional - 1-D array of observation vector weights. These relative weights are - typically large for observations considered "important" and smaller for - observations considered less "important". If ``ddof=0`` the array of - weights can be used to assign probabilities to observation vectors. - - .. versionadded:: 1.10 - - Returns - ------- - out : ndarray - The covariance matrix of the variables. - - See Also - -------- - corrcoef : Normalized covariance matrix - - Notes - ----- - Assume that the observations are in the columns of the observation - array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The - steps to compute the weighted covariance are as follows:: - - >>> m = np.arange(10, dtype=np.float64) - >>> f = np.arange(10) * 2 - >>> a = np.arange(10) ** 2. - >>> ddof = 1 - >>> w = f * a - >>> v1 = np.sum(w) - >>> v2 = np.sum(w * a) - >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 - >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) - - Note that when ``a == 1``, the normalization factor - ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` - as it should. - - Examples - -------- - Consider two variables, :math:`x_0` and :math:`x_1`, which - correlate perfectly, but in opposite directions: - - >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T - >>> x - array([[0, 1, 2], - [2, 1, 0]]) - - Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance - matrix shows this clearly: - - >>> np.cov(x) - array([[ 1., -1.], - [-1., 1.]]) - - Note that element :math:`C_{0,1}`, which shows the correlation between - :math:`x_0` and :math:`x_1`, is negative. - - Further, note how `x` and `y` are combined: - - >>> x = [-2.1, -1, 4.3] - >>> y = [3, 1.1, 0.12] - >>> X = np.stack((x, y), axis=0) - >>> np.cov(X) - array([[11.71 , -4.286 ], # may vary - [-4.286 , 2.144133]]) - >>> np.cov(x, y) - array([[11.71 , -4.286 ], # may vary - [-4.286 , 2.144133]]) - >>> np.cov(x) - array(11.71) - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError( - "ddof must be integer") - - # Handles complex arrays too - m = np.asarray(m) - if m.ndim > 2: - raise ValueError("m has more than 2 dimensions") - - if y is None: - dtype = np.result_type(m, np.float64) - else: - y = np.asarray(y) - if y.ndim > 2: - raise ValueError("y has more than 2 dimensions") - dtype = np.result_type(m, y, np.float64) - - X = array(m, ndmin=2, dtype=dtype) - if not rowvar and X.shape[0] != 1: - X = X.T - if X.shape[0] == 0: - return np.array([]).reshape(0, 0) - if y is not None: - y = array(y, copy=False, ndmin=2, dtype=dtype) - if not rowvar and y.shape[0] != 1: - y = y.T - X = np.concatenate((X, y), axis=0) - - if ddof is None: - if bias == 0: - ddof = 1 - else: - ddof = 0 - - # Get the product of frequencies and weights - w = None - if fweights is not None: - fweights = np.asarray(fweights, dtype=float) - if not np.all(fweights == np.around(fweights)): - raise TypeError( - "fweights must be integer") - if fweights.ndim > 1: - raise RuntimeError( - "cannot handle multidimensional fweights") - if fweights.shape[0] != X.shape[1]: - raise RuntimeError( - "incompatible numbers of samples and fweights") - if any(fweights < 0): - raise ValueError( - "fweights cannot be negative") - w = fweights - if aweights is not None: - aweights = np.asarray(aweights, dtype=float) - if aweights.ndim > 1: - raise RuntimeError( - "cannot handle multidimensional aweights") - if aweights.shape[0] != X.shape[1]: - raise RuntimeError( - "incompatible numbers of samples and aweights") - if any(aweights < 0): - raise ValueError( - "aweights cannot be negative") - if w is None: - w = aweights - else: - w *= aweights - - avg, w_sum = average(X, axis=1, weights=w, returned=True) - w_sum = w_sum[0] - - # Determine the normalization - if w is None: - fact = X.shape[1] - ddof - elif ddof == 0: - fact = w_sum - elif aweights is None: - fact = w_sum - ddof - else: - fact = w_sum - ddof*sum(w*aweights)/w_sum - - if fact <= 0: - warnings.warn("Degrees of freedom <= 0 for slice", - RuntimeWarning, stacklevel=3) - fact = 0.0 - - X -= avg[:, None] - if w is None: - X_T = X.T - else: - X_T = (X*w).T - c = dot(X, X_T.conj()) - c *= np.true_divide(1, fact) - return c.squeeze() - - -def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None): - return (x, y) - - -@array_function_dispatch(_corrcoef_dispatcher) -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): - """ - Return Pearson product-moment correlation coefficients. - - Please refer to the documentation for `cov` for more detail. The - relationship between the correlation coefficient matrix, `R`, and the - covariance matrix, `C`, is - - .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } - - The values of `R` are between -1 and 1, inclusive. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - shape as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - - Returns - ------- - R : ndarray - The correlation coefficient matrix of the variables. - - See Also - -------- - cov : Covariance matrix - - Notes - ----- - Due to floating point rounding the resulting array may not be Hermitian, - the diagonal elements may not be 1, and the elements may not satisfy the - inequality abs(a) <= 1. The real and imaginary parts are clipped to the - interval [-1, 1] in an attempt to improve on that situation but is not - much help in the complex case. - - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - - """ - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn('bias and ddof have no effect and are deprecated', - DeprecationWarning, stacklevel=3) - c = cov(x, y, rowvar) - try: - d = diag(c) - except ValueError: - # scalar covariance - # nan if incorrect value (nan, inf, 0), 1 otherwise - return c / c - stddev = sqrt(d.real) - c /= stddev[:, None] - c /= stddev[None, :] - - # Clip real and imaginary parts to [-1, 1]. This does not guarantee - # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without - # excessive work. - np.clip(c.real, -1, 1, out=c.real) - if np.iscomplexobj(c): - np.clip(c.imag, -1, 1, out=c.imag) - - return c - - -@set_module('numpy') -def blackman(M): - """ - Return the Blackman window. - - The Blackman window is a taper formed by using the first three - terms of a summation of cosines. It was designed to have close to the - minimal leakage possible. It is close to optimal, only slightly worse - than a Kaiser window. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an empty - array is returned. - - Returns - ------- - out : ndarray - The window, with the maximum value normalized to one (the value one - appears only if the number of samples is odd). - - See Also - -------- - bartlett, hamming, hanning, kaiser - - Notes - ----- - The Blackman window is defined as - - .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) - - Most references to the Blackman window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. It is known as a - "near optimal" tapering function, almost as good (by some measures) - as the kaiser window. - - References - ---------- - Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, - Dover Publications, New York. - - Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. - Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> np.blackman(12) - array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary - 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, - 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, - 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.blackman(51) - >>> plt.plot(window) - [] - >>> plt.title("Blackman window") - Text(0.5, 1.0, 'Blackman window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -

- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> with np.errstate(divide='ignore', invalid='ignore'): - ... response = 20 * np.log10(mag) - ... - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Blackman window") - Text(0.5, 1.0, 'Frequency response of Blackman window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> _ = plt.axis('tight') - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) - - -@set_module('numpy') -def bartlett(M): - """ - Return the Bartlett window. - - The Bartlett window is very similar to a triangular window, except - that the end points are at zero. It is often used in signal - processing for tapering a signal, without generating too much - ripple in the frequency domain. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : array - The triangular window, with the maximum value normalized to one - (the value one appears only if the number of samples is odd), with - the first and last samples equal to zero. - - See Also - -------- - blackman, hamming, hanning, kaiser - - Notes - ----- - The Bartlett window is defined as - - .. math:: w(n) = \\frac{2}{M-1} \\left( - \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| - \\right) - - Most references to the Bartlett window come from the signal - processing literature, where it is used as one of many windowing - functions for smoothing values. Note that convolution with this - window produces linear interpolation. It is also known as an - apodization (which means"removing the foot", i.e. smoothing - discontinuities at the beginning and end of the sampled signal) or - tapering function. The fourier transform of the Bartlett is the product - of two sinc functions. - Note the excellent discussion in Kanasewich. - - References - ---------- - .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", - Biometrika 37, 1-16, 1950. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 109-110. - .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal - Processing", Prentice-Hall, 1999, pp. 468-471. - .. [4] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 429. - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> np.bartlett(12) - array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary - 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, - 0.18181818, 0. ]) - - Plot the window and its frequency response (requires SciPy and matplotlib): - - >>> from numpy.fft import fft, fftshift - >>> window = np.bartlett(51) - >>> plt.plot(window) - [] - >>> plt.title("Bartlett window") - Text(0.5, 1.0, 'Bartlett window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> with np.errstate(divide='ignore', invalid='ignore'): - ... response = 20 * np.log10(mag) - ... - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Bartlett window") - Text(0.5, 1.0, 'Frequency response of Bartlett window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> _ = plt.axis('tight') - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) - - -@set_module('numpy') -def hanning(M): - """ - Return the Hanning window. - - The Hanning window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray, shape(M,) - The window, with the maximum value normalized to one (the value - one appears only if `M` is odd). - - See Also - -------- - bartlett, blackman, hamming, kaiser - - Notes - ----- - The Hanning window is defined as - - .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hanning was named for Julius von Hann, an Austrian meteorologist. - It is also known as the Cosine Bell. Some authors prefer that it be - called a Hann window, to help avoid confusion with the very similar - Hamming window. - - Most references to the Hanning window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 106-108. - .. [3] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> np.hanning(12) - array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, - 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, - 0.07937323, 0. ]) - - Plot the window and its frequency response: - - >>> import matplotlib.pyplot as plt - >>> from numpy.fft import fft, fftshift - >>> window = np.hanning(51) - >>> plt.plot(window) - [] - >>> plt.title("Hann window") - Text(0.5, 1.0, 'Hann window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> with np.errstate(divide='ignore', invalid='ignore'): - ... response = 20 * np.log10(mag) - ... - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of the Hann window") - Text(0.5, 1.0, 'Frequency response of the Hann window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> plt.axis('tight') - ... - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) - - -@set_module('numpy') -def hamming(M): - """ - Return the Hamming window. - - The Hamming window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray - The window, with the maximum value normalized to one (the value - one appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hanning, kaiser - - Notes - ----- - The Hamming window is defined as - - .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hamming was named for R. W. Hamming, an associate of J. W. Tukey - and is described in Blackman and Tukey. It was recommended for - smoothing the truncated autocovariance function in the time domain. - Most references to the Hamming window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 109-110. - .. [3] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> np.hamming(12) - array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary - 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, - 0.15302337, 0.08 ]) - - Plot the window and the frequency response: - - >>> import matplotlib.pyplot as plt - >>> from numpy.fft import fft, fftshift - >>> window = np.hamming(51) - >>> plt.plot(window) - [] - >>> plt.title("Hamming window") - Text(0.5, 1.0, 'Hamming window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Hamming window") - Text(0.5, 1.0, 'Frequency response of Hamming window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> plt.axis('tight') - ... - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) - -## Code from cephes for i0 - -_i0A = [ - -4.41534164647933937950E-18, - 3.33079451882223809783E-17, - -2.43127984654795469359E-16, - 1.71539128555513303061E-15, - -1.16853328779934516808E-14, - 7.67618549860493561688E-14, - -4.85644678311192946090E-13, - 2.95505266312963983461E-12, - -1.72682629144155570723E-11, - 9.67580903537323691224E-11, - -5.18979560163526290666E-10, - 2.65982372468238665035E-9, - -1.30002500998624804212E-8, - 6.04699502254191894932E-8, - -2.67079385394061173391E-7, - 1.11738753912010371815E-6, - -4.41673835845875056359E-6, - 1.64484480707288970893E-5, - -5.75419501008210370398E-5, - 1.88502885095841655729E-4, - -5.76375574538582365885E-4, - 1.63947561694133579842E-3, - -4.32430999505057594430E-3, - 1.05464603945949983183E-2, - -2.37374148058994688156E-2, - 4.93052842396707084878E-2, - -9.49010970480476444210E-2, - 1.71620901522208775349E-1, - -3.04682672343198398683E-1, - 6.76795274409476084995E-1 - ] - -_i0B = [ - -7.23318048787475395456E-18, - -4.83050448594418207126E-18, - 4.46562142029675999901E-17, - 3.46122286769746109310E-17, - -2.82762398051658348494E-16, - -3.42548561967721913462E-16, - 1.77256013305652638360E-15, - 3.81168066935262242075E-15, - -9.55484669882830764870E-15, - -4.15056934728722208663E-14, - 1.54008621752140982691E-14, - 3.85277838274214270114E-13, - 7.18012445138366623367E-13, - -1.79417853150680611778E-12, - -1.32158118404477131188E-11, - -3.14991652796324136454E-11, - 1.18891471078464383424E-11, - 4.94060238822496958910E-10, - 3.39623202570838634515E-9, - 2.26666899049817806459E-8, - 2.04891858946906374183E-7, - 2.89137052083475648297E-6, - 6.88975834691682398426E-5, - 3.36911647825569408990E-3, - 8.04490411014108831608E-1 - ] - - -def _chbevl(x, vals): - b0 = vals[0] - b1 = 0.0 - - for i in range(1, len(vals)): - b2 = b1 - b1 = b0 - b0 = x*b1 - b2 + vals[i] - - return 0.5*(b0 - b2) - - -def _i0_1(x): - return exp(x) * _chbevl(x/2.0-2, _i0A) - - -def _i0_2(x): - return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) - - -def _i0_dispatcher(x): - return (x,) - - -@array_function_dispatch(_i0_dispatcher) -def i0(x): - """ - Modified Bessel function of the first kind, order 0. - - Usually denoted :math:`I_0`. This function does broadcast, but will *not* - "up-cast" int dtype arguments unless accompanied by at least one float or - complex dtype argument (see Raises below). - - Parameters - ---------- - x : array_like, dtype float or complex - Argument of the Bessel function. - - Returns - ------- - out : ndarray, shape = x.shape, dtype = x.dtype - The modified Bessel function evaluated at each of the elements of `x`. - - Raises - ------ - TypeError: array cannot be safely cast to required type - If argument consists exclusively of int dtypes. - - See Also - -------- - scipy.special.i0, scipy.special.iv, scipy.special.ive - - Notes - ----- - The scipy implementation is recommended over this function: it is a - proper ufunc written in C, and more than an order of magnitude faster. - - We use the algorithm published by Clenshaw [1]_ and referenced by - Abramowitz and Stegun [2]_, for which the function domain is - partitioned into the two intervals [0,8] and (8,inf), and Chebyshev - polynomial expansions are employed in each interval. Relative error on - the domain [0,30] using IEEE arithmetic is documented [3]_ as having a - peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). - - References - ---------- - .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in - *National Physical Laboratory Mathematical Tables*, vol. 5, London: - Her Majesty's Stationery Office, 1962. - .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical - Functions*, 10th printing, New York: Dover, 1964, pp. 379. - http://www.math.sfu.ca/~cbm/aands/page_379.htm - .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html - - Examples - -------- - >>> np.i0(0.) - array(1.0) # may vary - >>> np.i0([0., 1. + 2j]) - array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary - - """ - x = np.asanyarray(x) - x = np.abs(x) - return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) - -## End of cephes code for i0 - - -@set_module('numpy') -def kaiser(M, beta): - """ - Return the Kaiser window. - - The Kaiser window is a taper formed by using a Bessel function. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - beta : float - Shape parameter for window. - - Returns - ------- - out : array - The window, with the maximum value normalized to one (the value - one appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hamming, hanning - - Notes - ----- - The Kaiser window is defined as - - .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} - \\right)/I_0(\\beta) - - with - - .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, - - where :math:`I_0` is the modified zeroth-order Bessel function. - - The Kaiser was named for Jim Kaiser, who discovered a simple - approximation to the DPSS window based on Bessel functions. The Kaiser - window is a very good approximation to the Digital Prolate Spheroidal - Sequence, or Slepian window, which is the transform which maximizes the - energy in the main lobe of the window relative to total energy. - - The Kaiser can approximate many other windows by varying the beta - parameter. - - ==== ======================= - beta Window shape - ==== ======================= - 0 Rectangular - 5 Similar to a Hamming - 6 Similar to a Hanning - 8.6 Similar to a Blackman - ==== ======================= - - A beta value of 14 is probably a good starting point. Note that as beta - gets large, the window narrows, and so the number of samples needs to be - large enough to sample the increasingly narrow spike, otherwise NaNs will - get returned. - - Most references to the Kaiser window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by - digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. - John Wiley and Sons, New York, (1966). - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 177-178. - .. [3] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> np.kaiser(12, 14) - array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary - 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, - 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, - 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) - - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.kaiser(51, 14) - >>> plt.plot(window) - [] - >>> plt.title("Kaiser window") - Text(0.5, 1.0, 'Kaiser window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Kaiser window") - Text(0.5, 1.0, 'Frequency response of Kaiser window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) # may vary - >>> plt.show() - - """ - from numpy.dual import i0 - if M == 1: - return np.array([1.]) - n = arange(0, M) - alpha = (M-1)/2.0 - return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) - - -def _sinc_dispatcher(x): - return (x,) - - -@array_function_dispatch(_sinc_dispatcher) -def sinc(x): - """ - Return the sinc function. - - The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. - - Parameters - ---------- - x : ndarray - Array (possibly multi-dimensional) of values for which to to - calculate ``sinc(x)``. - - Returns - ------- - out : ndarray - ``sinc(x)``, which has the same shape as the input. - - Notes - ----- - ``sinc(0)`` is the limit value 1. - - The name sinc is short for "sine cardinal" or "sinus cardinalis". - - The sinc function is used in various signal processing applications, - including in anti-aliasing, in the construction of a Lanczos resampling - filter, and in interpolation. - - For bandlimited interpolation of discrete-time signals, the ideal - interpolation kernel is proportional to the sinc function. - - References - ---------- - .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web - Resource. http://mathworld.wolfram.com/SincFunction.html - .. [2] Wikipedia, "Sinc function", - https://en.wikipedia.org/wiki/Sinc_function - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-4, 4, 41) - >>> np.sinc(x) - array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary - -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, - 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, - 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, - -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, - 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, - 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, - 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, - 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, - -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, - -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, - 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, - -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, - -4.92362781e-02, -3.89804309e-17]) - - >>> plt.plot(x, np.sinc(x)) - [] - >>> plt.title("Sinc Function") - Text(0.5, 1.0, 'Sinc Function') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("X") - Text(0.5, 0, 'X') - >>> plt.show() - - """ - x = np.asanyarray(x) - y = pi * where(x == 0, 1.0e-20, x) - return sin(y)/y - - -def _msort_dispatcher(a): - return (a,) - - -@array_function_dispatch(_msort_dispatcher) -def msort(a): - """ - Return a copy of an array sorted along the first axis. - - Parameters - ---------- - a : array_like - Array to be sorted. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - sort - - Notes - ----- - ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. - - """ - b = array(a, subok=True, copy=True) - b.sort(0) - return b - - -def _ureduce(a, func, **kwargs): - """ - Internal Function. - Call `func` with `a` as first argument swapping the axes to use extended - axis on functions that don't support it natively. - - Returns result and a.shape with axis dims set to 1. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - func : callable - Reduction function capable of receiving a single axis argument. - It is called with `a` as first argument followed by `kwargs`. - kwargs : keyword arguments - additional keyword arguments to pass to `func`. - - Returns - ------- - result : tuple - Result of func(a, **kwargs) and a.shape with axis dims set to 1 - which can be used to reshape the result to the same shape a ufunc with - keepdims=True would produce. - - """ - a = np.asanyarray(a) - axis = kwargs.get('axis', None) - if axis is not None: - keepdim = list(a.shape) - nd = a.ndim - axis = _nx.normalize_axis_tuple(axis, nd) - - for ax in axis: - keepdim[ax] = 1 - - if len(axis) == 1: - kwargs['axis'] = axis[0] - else: - keep = set(range(nd)) - set(axis) - nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) - kwargs['axis'] = -1 - keepdim = tuple(keepdim) - else: - keepdim = (1,) * a.ndim - - r = func(a, **kwargs) - return r, keepdim - - -def _median_dispatcher( - a, axis=None, out=None, overwrite_input=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_median_dispatcher) -def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): - """ - Compute the median along the specified axis. - - Returns the median of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : {int, sequence of int, None}, optional - Axis or axes along which the medians are computed. The default - is to compute the median along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - `median`. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. If `overwrite_input` is ``True`` and `a` is not already an - `ndarray`, an error will be raised. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - .. versionadded:: 1.9.0 - - Returns - ------- - median : ndarray - A new array holding the result. If the input contains integers - or floats smaller than ``float64``, then the output data-type is - ``np.float64``. Otherwise, the data-type of the output is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean, percentile - - Notes - ----- - Given a vector ``V`` of length ``N``, the median of ``V`` is the - middle value of a sorted copy of ``V``, ``V_sorted`` - i - e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the - two middle values of ``V_sorted`` when ``N`` is even. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.median(a) - 3.5 - >>> np.median(a, axis=0) - array([6.5, 4.5, 2.5]) - >>> np.median(a, axis=1) - array([7., 2.]) - >>> m = np.median(a, axis=0) - >>> out = np.zeros_like(m) - >>> np.median(a, axis=0, out=m) - array([6.5, 4.5, 2.5]) - >>> m - array([6.5, 4.5, 2.5]) - >>> b = a.copy() - >>> np.median(b, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.median(b, axis=None, overwrite_input=True) - 3.5 - >>> assert not np.all(a==b) - - """ - r, k = _ureduce(a, func=_median, axis=axis, out=out, - overwrite_input=overwrite_input) - if keepdims: - return r.reshape(k) - else: - return r - -def _median(a, axis=None, out=None, overwrite_input=False): - # can't be reasonably be implemented in terms of percentile as we have to - # call mean to not break astropy - a = np.asanyarray(a) - - # Set the partition indexes - if axis is None: - sz = a.size - else: - sz = a.shape[axis] - if sz % 2 == 0: - szh = sz // 2 - kth = [szh - 1, szh] - else: - kth = [(sz - 1) // 2] - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - kth.append(-1) - - if overwrite_input: - if axis is None: - part = a.ravel() - part.partition(kth) - else: - a.partition(kth, axis=axis) - part = a - else: - part = partition(a, kth, axis=axis) - - if part.shape == (): - # make 0-D arrays work - return part.item() - if axis is None: - axis = 0 - - indexer = [slice(None)] * part.ndim - index = part.shape[axis] // 2 - if part.shape[axis] % 2 == 1: - # index with slice to allow mean (below) to work - indexer[axis] = slice(index, index+1) - else: - indexer[axis] = slice(index-1, index+1) - indexer = tuple(indexer) - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact) and sz > 0: - # warn and return nans like mean would - rout = mean(part[indexer], axis=axis, out=out) - return np.lib.utils._median_nancheck(part, rout, axis, out) - else: - # if there are no nans - # Use mean in odd and even case to coerce data type - # and check, use out array. - return mean(part[indexer], axis=axis, out=out) - - -def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_percentile_dispatcher) -def percentile(a, q, axis=None, out=None, - overwrite_input=False, interpolation='linear', keepdims=False): - """ - Compute the q-th percentile of the data along the specified axis. - - Returns the q-th percentile(s) of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - q : array_like of float - Percentile or sequence of percentiles to compute, which must be between - 0 and 100 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the percentiles are computed. The - default is to compute the percentile(s) along a flattened - version of the array. - - .. versionchanged:: 1.9.0 - A tuple of axes is supported - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired percentile lies between two data points - ``i < j``: - - * 'linear': ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * 'lower': ``i``. - * 'higher': ``j``. - * 'nearest': ``i`` or ``j``, whichever is nearest. - * 'midpoint': ``(i + j) / 2``. - - .. versionadded:: 1.9.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - .. versionadded:: 1.9.0 - - Returns - ------- - percentile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the result - is a scalar. If multiple percentiles are given, first axis of - the result corresponds to the percentiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean - median : equivalent to ``percentile(..., 50)`` - nanpercentile - quantile : equivalent to percentile, except with q in the range [0, 1]. - - Notes - ----- - Given a vector ``V`` of length ``N``, the q-th percentile of - ``V`` is the value ``q/100`` of the way from the minimum to the - maximum in a sorted copy of ``V``. The values and distances of - the two nearest neighbors as well as the `interpolation` parameter - will determine the percentile if the normalized ranking does not - match the location of ``q`` exactly. This function is the same as - the median if ``q=50``, the same as the minimum if ``q=0`` and the - same as the maximum if ``q=100``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.percentile(a, 50) - 3.5 - >>> np.percentile(a, 50, axis=0) - array([6.5, 4.5, 2.5]) - >>> np.percentile(a, 50, axis=1) - array([7., 2.]) - >>> np.percentile(a, 50, axis=1, keepdims=True) - array([[7.], - [2.]]) - - >>> m = np.percentile(a, 50, axis=0) - >>> out = np.zeros_like(m) - >>> np.percentile(a, 50, axis=0, out=out) - array([6.5, 4.5, 2.5]) - >>> m - array([6.5, 4.5, 2.5]) - - >>> b = a.copy() - >>> np.percentile(b, 50, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a == b) - - The different types of interpolation can be visualized graphically: - - .. plot:: - - import matplotlib.pyplot as plt - - a = np.arange(4) - p = np.linspace(0, 100, 6001) - ax = plt.gca() - lines = [ - ('linear', None), - ('higher', '--'), - ('lower', '--'), - ('nearest', '-.'), - ('midpoint', '-.'), - ] - for interpolation, style in lines: - ax.plot( - p, np.percentile(a, p, interpolation=interpolation), - label=interpolation, linestyle=style) - ax.set( - title='Interpolation methods for list: ' + str(a), - xlabel='Percentile', - ylabel='List item returned', - yticks=a) - ax.legend() - plt.show() - - """ - q = np.true_divide(q, 100) - q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) - if not _quantile_is_valid(q): - raise ValueError("Percentiles must be in the range [0, 100]") - return _quantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_quantile_dispatcher) -def quantile(a, q, axis=None, out=None, - overwrite_input=False, interpolation='linear', keepdims=False): - """ - Compute the q-th quantile of the data along the specified axis. - - .. versionadded:: 1.15.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - q : array_like of float - Quantile or sequence of quantiles to compute, which must be between - 0 and 1 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the quantiles are computed. The - default is to compute the quantile(s) along a flattened - version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired quantile lies between two data points - ``i < j``: - - * linear: ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * lower: ``i``. - * higher: ``j``. - * nearest: ``i`` or ``j``, whichever is nearest. - * midpoint: ``(i + j) / 2``. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - Returns - ------- - quantile : scalar or ndarray - If `q` is a single quantile and `axis=None`, then the result - is a scalar. If multiple quantiles are given, first axis of - the result corresponds to the quantiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean - percentile : equivalent to quantile, but with q in the range [0, 100]. - median : equivalent to ``quantile(..., 0.5)`` - nanquantile - - Notes - ----- - Given a vector ``V`` of length ``N``, the q-th quantile of - ``V`` is the value ``q`` of the way from the minimum to the - maximum in a sorted copy of ``V``. The values and distances of - the two nearest neighbors as well as the `interpolation` parameter - will determine the quantile if the normalized ranking does not - match the location of ``q`` exactly. This function is the same as - the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the - same as the maximum if ``q=1.0``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.quantile(a, 0.5) - 3.5 - >>> np.quantile(a, 0.5, axis=0) - array([6.5, 4.5, 2.5]) - >>> np.quantile(a, 0.5, axis=1) - array([7., 2.]) - >>> np.quantile(a, 0.5, axis=1, keepdims=True) - array([[7.], - [2.]]) - >>> m = np.quantile(a, 0.5, axis=0) - >>> out = np.zeros_like(m) - >>> np.quantile(a, 0.5, axis=0, out=out) - array([6.5, 4.5, 2.5]) - >>> m - array([6.5, 4.5, 2.5]) - >>> b = a.copy() - >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a == b) - """ - q = np.asanyarray(q) - if not _quantile_is_valid(q): - raise ValueError("Quantiles must be in the range [0, 1]") - return _quantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): - """Assumes that q is in [0, 1], and is an ndarray""" - r, k = _ureduce(a, func=_quantile_ureduce_func, q=q, axis=axis, out=out, - overwrite_input=overwrite_input, - interpolation=interpolation) - if keepdims: - return r.reshape(q.shape + k) - else: - return r - - -def _quantile_is_valid(q): - # avoid expensive reductions, relevant for arrays with < O(1000) elements - if q.ndim == 1 and q.size < 10: - for i in range(q.size): - if q[i] < 0.0 or q[i] > 1.0: - return False - else: - # faster than any() - if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0): - return False - return True - - -def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): - a = asarray(a) - if q.ndim == 0: - # Do not allow 0-d arrays because following code fails for scalar - zerod = True - q = q[None] - else: - zerod = False - - # prepare a for partitioning - if overwrite_input: - if axis is None: - ap = a.ravel() - else: - ap = a - else: - if axis is None: - ap = a.flatten() - else: - ap = a.copy() - - if axis is None: - axis = 0 - - Nx = ap.shape[axis] - indices = q * (Nx - 1) - - # round fractional indices according to interpolation method - if interpolation == 'lower': - indices = floor(indices).astype(intp) - elif interpolation == 'higher': - indices = ceil(indices).astype(intp) - elif interpolation == 'midpoint': - indices = 0.5 * (floor(indices) + ceil(indices)) - elif interpolation == 'nearest': - indices = around(indices).astype(intp) - elif interpolation == 'linear': - pass # keep index as fraction and interpolate - else: - raise ValueError( - "interpolation can only be 'linear', 'lower' 'higher', " - "'midpoint', or 'nearest'") - - n = np.array(False, dtype=bool) # check for nan's flag - if indices.dtype == intp: # take the points along axis - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices = concatenate((indices, [-1])) - - ap.partition(indices, axis=axis) - # ensure axis with q-th is first - ap = np.moveaxis(ap, axis, 0) - axis = 0 - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices = indices[:-1] - n = np.isnan(ap[-1:, ...]) - - if zerod: - indices = indices[0] - r = take(ap, indices, axis=axis, out=out) - - - else: # weight the points above and below the indices - indices_below = floor(indices).astype(intp) - indices_above = indices_below + 1 - indices_above[indices_above > Nx - 1] = Nx - 1 - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices_above = concatenate((indices_above, [-1])) - - weights_above = indices - indices_below - weights_below = 1 - weights_above - - weights_shape = [1, ] * ap.ndim - weights_shape[axis] = len(indices) - weights_below.shape = weights_shape - weights_above.shape = weights_shape - - ap.partition(concatenate((indices_below, indices_above)), axis=axis) - - # ensure axis with q-th is first - ap = np.moveaxis(ap, axis, 0) - weights_below = np.moveaxis(weights_below, axis, 0) - weights_above = np.moveaxis(weights_above, axis, 0) - axis = 0 - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices_above = indices_above[:-1] - n = np.isnan(ap[-1:, ...]) - - x1 = take(ap, indices_below, axis=axis) * weights_below - x2 = take(ap, indices_above, axis=axis) * weights_above - - # ensure axis with q-th is first - x1 = np.moveaxis(x1, axis, 0) - x2 = np.moveaxis(x2, axis, 0) - - if zerod: - x1 = x1.squeeze(0) - x2 = x2.squeeze(0) - - if out is not None: - r = add(x1, x2, out=out) - else: - r = add(x1, x2) - - if np.any(n): - if zerod: - if ap.ndim == 1: - if out is not None: - out[...] = a.dtype.type(np.nan) - r = out - else: - r = a.dtype.type(np.nan) - else: - r[..., n.squeeze(0)] = a.dtype.type(np.nan) - else: - if r.ndim == 1: - r[:] = a.dtype.type(np.nan) - else: - r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) - - return r - - -def _trapz_dispatcher(y, x=None, dx=None, axis=None): - return (y, x) - - -@array_function_dispatch(_trapz_dispatcher) -def trapz(y, x=None, dx=1.0, axis=-1): - """ - Integrate along the given axis using the composite trapezoidal rule. - - Integrate `y` (`x`) along given axis. - - Parameters - ---------- - y : array_like - Input array to integrate. - x : array_like, optional - The sample points corresponding to the `y` values. If `x` is None, - the sample points are assumed to be evenly spaced `dx` apart. The - default is None. - dx : scalar, optional - The spacing between sample points when `x` is None. The default is 1. - axis : int, optional - The axis along which to integrate. - - Returns - ------- - trapz : float - Definite integral as approximated by trapezoidal rule. - - See Also - -------- - sum, cumsum - - Notes - ----- - Image [2]_ illustrates trapezoidal rule -- y-axis locations of points - will be taken from `y` array, by default x-axis distances between - points will be 1.0, alternatively they can be provided with `x` array - or with `dx` scalar. Return value will be equal to combined area under - the red lines. - - - References - ---------- - .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule - - .. [2] Illustration image: - https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png - - Examples - -------- - >>> np.trapz([1,2,3]) - 4.0 - >>> np.trapz([1,2,3], x=[4,6,8]) - 8.0 - >>> np.trapz([1,2,3], dx=2) - 8.0 - >>> a = np.arange(6).reshape(2, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.trapz(a, axis=0) - array([1.5, 2.5, 3.5]) - >>> np.trapz(a, axis=1) - array([2., 8.]) - - """ - y = asanyarray(y) - if x is None: - d = dx - else: - x = asanyarray(x) - if x.ndim == 1: - d = diff(x) - # reshape to correct shape - shape = [1]*y.ndim - shape[axis] = d.shape[0] - d = d.reshape(shape) - else: - d = diff(x, axis=axis) - nd = y.ndim - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd - slice1[axis] = slice(1, None) - slice2[axis] = slice(None, -1) - try: - ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) - except ValueError: - # Operations didn't work, cast to ndarray - d = np.asarray(d) - y = np.asarray(y) - ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) - return ret - - -def _meshgrid_dispatcher(*xi, **kwargs): - return xi - - -# Based on scitools meshgrid -@array_function_dispatch(_meshgrid_dispatcher) -def meshgrid(*xi, **kwargs): - """ - Return coordinate matrices from coordinate vectors. - - Make N-D coordinate arrays for vectorized evaluations of - N-D scalar/vector fields over N-D grids, given - one-dimensional coordinate arrays x1, x2,..., xn. - - .. versionchanged:: 1.9 - 1-D and 0-D cases are allowed. - - Parameters - ---------- - x1, x2,..., xn : array_like - 1-D arrays representing the coordinates of a grid. - indexing : {'xy', 'ij'}, optional - Cartesian ('xy', default) or matrix ('ij') indexing of output. - See Notes for more details. - - .. versionadded:: 1.7.0 - sparse : bool, optional - If True a sparse grid is returned in order to conserve memory. - Default is False. - - .. versionadded:: 1.7.0 - copy : bool, optional - If False, a view into the original arrays are returned in order to - conserve memory. Default is True. Please note that - ``sparse=False, copy=False`` will likely return non-contiguous - arrays. Furthermore, more than one element of a broadcast array - may refer to a single memory location. If you need to write to the - arrays, make copies first. - - .. versionadded:: 1.7.0 - - Returns - ------- - X1, X2,..., XN : ndarray - For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , - return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' - or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' - with the elements of `xi` repeated to fill the matrix along - the first dimension for `x1`, the second for `x2` and so on. - - Notes - ----- - This function supports both indexing conventions through the indexing - keyword argument. Giving the string 'ij' returns a meshgrid with - matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. - In the 2-D case with inputs of length M and N, the outputs are of shape - (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case - with inputs of length M, N and P, outputs are of shape (N, M, P) for - 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is - illustrated by the following code snippet:: - - xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') - for i in range(nx): - for j in range(ny): - # treat xv[i,j], yv[i,j] - - xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') - for i in range(nx): - for j in range(ny): - # treat xv[j,i], yv[j,i] - - In the 1-D and 0-D case, the indexing and sparse keywords have no effect. - - See Also - -------- - index_tricks.mgrid : Construct a multi-dimensional "meshgrid" - using indexing notation. - index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" - using indexing notation. - - Examples - -------- - >>> nx, ny = (3, 2) - >>> x = np.linspace(0, 1, nx) - >>> y = np.linspace(0, 1, ny) - >>> xv, yv = np.meshgrid(x, y) - >>> xv - array([[0. , 0.5, 1. ], - [0. , 0.5, 1. ]]) - >>> yv - array([[0., 0., 0.], - [1., 1., 1.]]) - >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays - >>> xv - array([[0. , 0.5, 1. ]]) - >>> yv - array([[0.], - [1.]]) - - `meshgrid` is very useful to evaluate functions on a grid. - - >>> import matplotlib.pyplot as plt - >>> x = np.arange(-5, 5, 0.1) - >>> y = np.arange(-5, 5, 0.1) - >>> xx, yy = np.meshgrid(x, y, sparse=True) - >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) - >>> h = plt.contourf(x,y,z) - >>> plt.show() - - """ - ndim = len(xi) - - copy_ = kwargs.pop('copy', True) - sparse = kwargs.pop('sparse', False) - indexing = kwargs.pop('indexing', 'xy') - - if kwargs: - raise TypeError("meshgrid() got an unexpected keyword argument '%s'" - % (list(kwargs)[0],)) - - if indexing not in ['xy', 'ij']: - raise ValueError( - "Valid values for `indexing` are 'xy' and 'ij'.") - - s0 = (1,) * ndim - output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) - for i, x in enumerate(xi)] - - if indexing == 'xy' and ndim > 1: - # switch first and second axis - output[0].shape = (1, -1) + s0[2:] - output[1].shape = (-1, 1) + s0[2:] - - if not sparse: - # Return the full N-D matrix (not only the 1-D vector) - output = np.broadcast_arrays(*output, subok=True) - - if copy_: - output = [x.copy() for x in output] - - return output - - -def _delete_dispatcher(arr, obj, axis=None): - return (arr, obj) - - -@array_function_dispatch(_delete_dispatcher) -def delete(arr, obj, axis=None): - """ - Return a new array with sub-arrays along an axis deleted. For a one - dimensional array, this returns those entries not returned by - `arr[obj]`. - - Parameters - ---------- - arr : array_like - Input array. - obj : slice, int or array of ints - Indicate indices of sub-arrays to remove along the specified axis. - axis : int, optional - The axis along which to delete the subarray defined by `obj`. - If `axis` is None, `obj` is applied to the flattened array. - - Returns - ------- - out : ndarray - A copy of `arr` with the elements specified by `obj` removed. Note - that `delete` does not occur in-place. If `axis` is None, `out` is - a flattened array. - - See Also - -------- - insert : Insert elements into an array. - append : Append elements at the end of an array. - - Notes - ----- - Often it is preferable to use a boolean mask. For example: - - >>> arr = np.arange(12) + 1 - >>> mask = np.ones(len(arr), dtype=bool) - >>> mask[[0,2,4]] = False - >>> result = arr[mask,...] - - Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further - use of `mask`. - - Examples - -------- - >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) - >>> arr - array([[ 1, 2, 3, 4], - [ 5, 6, 7, 8], - [ 9, 10, 11, 12]]) - >>> np.delete(arr, 1, 0) - array([[ 1, 2, 3, 4], - [ 9, 10, 11, 12]]) - - >>> np.delete(arr, np.s_[::2], 1) - array([[ 2, 4], - [ 6, 8], - [10, 12]]) - >>> np.delete(arr, [1,3,5], None) - array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - arr = asarray(arr) - ndim = arr.ndim - arrorder = 'F' if arr.flags.fnc else 'C' - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim - axis = -1 - - if ndim == 0: - # 2013-09-24, 1.9 - warnings.warn( - "in the future the special handling of scalars will be removed " - "from delete and raise an error", DeprecationWarning, stacklevel=3) - if wrap: - return wrap(arr) - else: - return arr.copy(order=arrorder) - - axis = normalize_axis_index(axis, ndim) - - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - - if isinstance(obj, slice): - start, stop, step = obj.indices(N) - xr = range(start, stop, step) - numtodel = len(xr) - - if numtodel <= 0: - if wrap: - return wrap(arr.copy(order=arrorder)) - else: - return arr.copy(order=arrorder) - - # Invert if step is negative: - if step < 0: - step = -step - start = xr[-1] - stop = xr[0] + 1 - - newshape[axis] -= numtodel - new = empty(newshape, arr.dtype, arrorder) - # copy initial chunk - if start == 0: - pass - else: - slobj[axis] = slice(None, start) - new[tuple(slobj)] = arr[tuple(slobj)] - # copy end chunk - if stop == N: - pass - else: - slobj[axis] = slice(stop-numtodel, None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(stop, None) - new[tuple(slobj)] = arr[tuple(slobj2)] - # copy middle pieces - if step == 1: - pass - else: # use array indexing. - keep = ones(stop-start, dtype=bool) - keep[:stop-start:step] = False - slobj[axis] = slice(start, stop-numtodel) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(start, stop) - arr = arr[tuple(slobj2)] - slobj2[axis] = keep - new[tuple(slobj)] = arr[tuple(slobj2)] - if wrap: - return wrap(new) - else: - return new - - _obj = obj - obj = np.asarray(obj) - # After removing the special handling of booleans and out of - # bounds values, the conversion to the array can be removed. - if obj.dtype == bool: - warnings.warn("in the future insert will treat boolean arrays and " - "array-likes as boolean index instead of casting it " - "to integer", FutureWarning, stacklevel=3) - obj = obj.astype(intp) - if isinstance(_obj, (int, long, integer)): - # optimization for a single value - obj = obj.item() - if (obj < -N or obj >= N): - raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) - if (obj < 0): - obj += N - newshape[axis] -= 1 - new = empty(newshape, arr.dtype, arrorder) - slobj[axis] = slice(None, obj) - new[tuple(slobj)] = arr[tuple(slobj)] - slobj[axis] = slice(obj, None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(obj+1, None) - new[tuple(slobj)] = arr[tuple(slobj2)] - else: - if obj.size == 0 and not isinstance(_obj, np.ndarray): - obj = obj.astype(intp) - if not np.can_cast(obj, intp, 'same_kind'): - # obj.size = 1 special case always failed and would just - # give superfluous warnings. - # 2013-09-24, 1.9 - warnings.warn( - "using a non-integer array as obj in delete will result in an " - "error in the future", DeprecationWarning, stacklevel=3) - obj = obj.astype(intp) - keep = ones(N, dtype=bool) - - # Test if there are out of bound indices, this is deprecated - inside_bounds = (obj < N) & (obj >= -N) - if not inside_bounds.all(): - # 2013-09-24, 1.9 - warnings.warn( - "in the future out of bounds indices will raise an error " - "instead of being ignored by `numpy.delete`.", - DeprecationWarning, stacklevel=3) - obj = obj[inside_bounds] - positive_indices = obj >= 0 - if not positive_indices.all(): - warnings.warn( - "in the future negative indices will not be ignored by " - "`numpy.delete`.", FutureWarning, stacklevel=3) - obj = obj[positive_indices] - - keep[obj, ] = False - slobj[axis] = keep - new = arr[tuple(slobj)] - - if wrap: - return wrap(new) - else: - return new - - -def _insert_dispatcher(arr, obj, values, axis=None): - return (arr, obj, values) - - -@array_function_dispatch(_insert_dispatcher) -def insert(arr, obj, values, axis=None): - """ - Insert values along the given axis before the given indices. - - Parameters - ---------- - arr : array_like - Input array. - obj : int, slice or sequence of ints - Object that defines the index or indices before which `values` is - inserted. - - .. versionadded:: 1.8.0 - - Support for multiple insertions when `obj` is a single scalar or a - sequence with one element (similar to calling insert multiple - times). - values : array_like - Values to insert into `arr`. If the type of `values` is different - from that of `arr`, `values` is converted to the type of `arr`. - `values` should be shaped so that ``arr[...,obj,...] = values`` - is legal. - axis : int, optional - Axis along which to insert `values`. If `axis` is None then `arr` - is flattened first. - - Returns - ------- - out : ndarray - A copy of `arr` with `values` inserted. Note that `insert` - does not occur in-place: a new array is returned. If - `axis` is None, `out` is a flattened array. - - See Also - -------- - append : Append elements at the end of an array. - concatenate : Join a sequence of arrays along an existing axis. - delete : Delete elements from an array. - - Notes - ----- - Note that for higher dimensional inserts `obj=0` behaves very different - from `obj=[0]` just like `arr[:,0,:] = values` is different from - `arr[:,[0],:] = values`. - - Examples - -------- - >>> a = np.array([[1, 1], [2, 2], [3, 3]]) - >>> a - array([[1, 1], - [2, 2], - [3, 3]]) - >>> np.insert(a, 1, 5) - array([1, 5, 1, ..., 2, 3, 3]) - >>> np.insert(a, 1, 5, axis=1) - array([[1, 5, 1], - [2, 5, 2], - [3, 5, 3]]) - - Difference between sequence and scalars: - - >>> np.insert(a, [1], [[1],[2],[3]], axis=1) - array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]]) - >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), - ... np.insert(a, [1], [[1],[2],[3]], axis=1)) - True - - >>> b = a.flatten() - >>> b - array([1, 1, 2, 2, 3, 3]) - >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) - - >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) - - >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, ..., 2, 3, 3]) - - >>> x = np.arange(8).reshape(2, 4) - >>> idx = (1, 3) - >>> np.insert(x, idx, 999, axis=1) - array([[ 0, 999, 1, 2, 999, 3], - [ 4, 999, 5, 6, 999, 7]]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - arr = asarray(arr) - ndim = arr.ndim - arrorder = 'F' if arr.flags.fnc else 'C' - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim - axis = ndim - 1 - elif ndim == 0: - # 2013-09-24, 1.9 - warnings.warn( - "in the future the special handling of scalars will be removed " - "from insert and raise an error", DeprecationWarning, stacklevel=3) - arr = arr.copy(order=arrorder) - arr[...] = values - if wrap: - return wrap(arr) - else: - return arr - else: - axis = normalize_axis_index(axis, ndim) - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - - if isinstance(obj, slice): - # turn it into a range object - indices = arange(*obj.indices(N), **{'dtype': intp}) - else: - # need to copy obj, because indices will be changed in-place - indices = np.array(obj) - if indices.dtype == bool: - # See also delete - warnings.warn( - "in the future insert will treat boolean arrays and " - "array-likes as a boolean index instead of casting it to " - "integer", FutureWarning, stacklevel=3) - indices = indices.astype(intp) - # Code after warning period: - #if obj.ndim != 1: - # raise ValueError('boolean array argument obj to insert ' - # 'must be one dimensional') - #indices = np.flatnonzero(obj) - elif indices.ndim > 1: - raise ValueError( - "index array argument obj to insert must be one dimensional " - "or scalar") - if indices.size == 1: - index = indices.item() - if index < -N or index > N: - raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) - if (index < 0): - index += N - - # There are some object array corner cases here, but we cannot avoid - # that: - values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) - if indices.ndim == 0: - # broadcasting is very different here, since a[:,0,:] = ... behaves - # very different from a[:,[0],:] = ...! This changes values so that - # it works likes the second case. (here a[:,0:1,:]) - values = np.moveaxis(values, 0, axis) - numnew = values.shape[axis] - newshape[axis] += numnew - new = empty(newshape, arr.dtype, arrorder) - slobj[axis] = slice(None, index) - new[tuple(slobj)] = arr[tuple(slobj)] - slobj[axis] = slice(index, index+numnew) - new[tuple(slobj)] = values - slobj[axis] = slice(index+numnew, None) - slobj2 = [slice(None)] * ndim - slobj2[axis] = slice(index, None) - new[tuple(slobj)] = arr[tuple(slobj2)] - if wrap: - return wrap(new) - return new - elif indices.size == 0 and not isinstance(obj, np.ndarray): - # Can safely cast the empty list to intp - indices = indices.astype(intp) - - if not np.can_cast(indices, intp, 'same_kind'): - # 2013-09-24, 1.9 - warnings.warn( - "using a non-integer array as obj in insert will result in an " - "error in the future", DeprecationWarning, stacklevel=3) - indices = indices.astype(intp) - - indices[indices < 0] += N - - numnew = len(indices) - order = indices.argsort(kind='mergesort') # stable sort - indices[order] += np.arange(numnew) - - newshape[axis] += numnew - old_mask = ones(newshape[axis], dtype=bool) - old_mask[indices] = False - - new = empty(newshape, arr.dtype, arrorder) - slobj2 = [slice(None)]*ndim - slobj[axis] = indices - slobj2[axis] = old_mask - new[tuple(slobj)] = values - new[tuple(slobj2)] = arr - - if wrap: - return wrap(new) - return new - - -def _append_dispatcher(arr, values, axis=None): - return (arr, values) - - -@array_function_dispatch(_append_dispatcher) -def append(arr, values, axis=None): - """ - Append values to the end of an array. - - Parameters - ---------- - arr : array_like - Values are appended to a copy of this array. - values : array_like - These values are appended to a copy of `arr`. It must be of the - correct shape (the same shape as `arr`, excluding `axis`). If - `axis` is not specified, `values` can be any shape and will be - flattened before use. - axis : int, optional - The axis along which `values` are appended. If `axis` is not - given, both `arr` and `values` are flattened before use. - - Returns - ------- - append : ndarray - A copy of `arr` with `values` appended to `axis`. Note that - `append` does not occur in-place: a new array is allocated and - filled. If `axis` is None, `out` is a flattened array. - - See Also - -------- - insert : Insert elements into an array. - delete : Delete elements from an array. - - Examples - -------- - >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) - array([1, 2, 3, ..., 7, 8, 9]) - - When `axis` is specified, `values` must have the correct shape. - - >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) - array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) - Traceback (most recent call last): - ... - ValueError: all the input arrays must have same number of dimensions - - """ - arr = asanyarray(arr) - if axis is None: - if arr.ndim != 1: - arr = arr.ravel() - values = ravel(values) - axis = arr.ndim-1 - return concatenate((arr, values), axis=axis) - - -def _digitize_dispatcher(x, bins, right=None): - return (x, bins) - - -@array_function_dispatch(_digitize_dispatcher) -def digitize(x, bins, right=False): - """ - Return the indices of the bins to which each value in input array belongs. - - ========= ============= ============================ - `right` order of bins returned index `i` satisfies - ========= ============= ============================ - ``False`` increasing ``bins[i-1] <= x < bins[i]`` - ``True`` increasing ``bins[i-1] < x <= bins[i]`` - ``False`` decreasing ``bins[i-1] > x >= bins[i]`` - ``True`` decreasing ``bins[i-1] >= x > bins[i]`` - ========= ============= ============================ - - If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is - returned as appropriate. - - Parameters - ---------- - x : array_like - Input array to be binned. Prior to NumPy 1.10.0, this array had to - be 1-dimensional, but can now have any shape. - bins : array_like - Array of bins. It has to be 1-dimensional and monotonic. - right : bool, optional - Indicating whether the intervals include the right or the left bin - edge. Default behavior is (right==False) indicating that the interval - does not include the right edge. The left bin end is open in this - case, i.e., bins[i-1] <= x < bins[i] is the default behavior for - monotonically increasing bins. - - Returns - ------- - indices : ndarray of ints - Output array of indices, of same shape as `x`. - - Raises - ------ - ValueError - If `bins` is not monotonic. - TypeError - If the type of the input is complex. - - See Also - -------- - bincount, histogram, unique, searchsorted - - Notes - ----- - If values in `x` are such that they fall outside the bin range, - attempting to index `bins` with the indices that `digitize` returns - will result in an IndexError. - - .. versionadded:: 1.10.0 - - `np.digitize` is implemented in terms of `np.searchsorted`. This means - that a binary search is used to bin the values, which scales much better - for larger number of bins than the previous linear search. It also removes - the requirement for the input array to be 1-dimensional. - - For monotonically _increasing_ `bins`, the following are equivalent:: - - np.digitize(x, bins, right=True) - np.searchsorted(bins, x, side='left') - - Note that as the order of the arguments are reversed, the side must be too. - The `searchsorted` call is marginally faster, as it does not do any - monotonicity checks. Perhaps more importantly, it supports all dtypes. - - Examples - -------- - >>> x = np.array([0.2, 6.4, 3.0, 1.6]) - >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) - >>> inds = np.digitize(x, bins) - >>> inds - array([1, 4, 3, 2]) - >>> for n in range(x.size): - ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) - ... - 0.0 <= 0.2 < 1.0 - 4.0 <= 6.4 < 10.0 - 2.5 <= 3.0 < 4.0 - 1.0 <= 1.6 < 2.5 - - >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) - >>> bins = np.array([0, 5, 10, 15, 20]) - >>> np.digitize(x,bins,right=True) - array([1, 2, 3, 4, 4]) - >>> np.digitize(x,bins,right=False) - array([1, 3, 3, 4, 5]) - """ - x = _nx.asarray(x) - bins = _nx.asarray(bins) - - # here for compatibility, searchsorted below is happy to take this - if np.issubdtype(x.dtype, _nx.complexfloating): - raise TypeError("x may not be complex") - - mono = _monotonicity(bins) - if mono == 0: - raise ValueError("bins must be monotonically increasing or decreasing") - - # this is backwards because the arguments below are swapped - side = 'left' if right else 'right' - if mono == -1: - # reverse the bins, and invert the results - return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) - else: - return _nx.searchsorted(bins, x, side=side) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/histograms.py b/venv/lib/python3.7/site-packages/numpy/lib/histograms.py deleted file mode 100644 index 03c365a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/histograms.py +++ /dev/null @@ -1,1123 +0,0 @@ -""" -Histogram-related functions -""" -from __future__ import division, absolute_import, print_function - -import contextlib -import functools -import operator -import warnings - -import numpy as np -from numpy.compat.py3k import basestring -from numpy.core import overrides - -__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - -# range is a keyword argument to many functions, so save the builtin so they can -# use it. -_range = range - - -def _ptp(x): - """Peak-to-peak value of x. - - This implementation avoids the problem of signed integer arrays having a - peak-to-peak value that cannot be represented with the array's data type. - This function returns an unsigned value for signed integer arrays. - """ - return _unsigned_subtract(x.max(), x.min()) - - -def _hist_bin_sqrt(x, range): - """ - Square root histogram bin estimator. - - Bin width is inversely proportional to the data size. Used by many - programs for its simplicity. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return _ptp(x) / np.sqrt(x.size) - - -def _hist_bin_sturges(x, range): - """ - Sturges histogram bin estimator. - - A very simplistic estimator based on the assumption of normality of - the data. This estimator has poor performance for non-normal data, - which becomes especially obvious for large data sets. The estimate - depends only on size of the data. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return _ptp(x) / (np.log2(x.size) + 1.0) - - -def _hist_bin_rice(x, range): - """ - Rice histogram bin estimator. - - Another simple estimator with no normality assumption. It has better - performance for large data than Sturges, but tends to overestimate - the number of bins. The number of bins is proportional to the cube - root of data size (asymptotically optimal). The estimate depends - only on size of the data. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) - - -def _hist_bin_scott(x, range): - """ - Scott histogram bin estimator. - - The binwidth is proportional to the standard deviation of the data - and inversely proportional to the cube root of data size - (asymptotically optimal). - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) - - -def _hist_bin_stone(x, range): - """ - Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). - - The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. - The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. - https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule - - This paper by Stone appears to be the origination of this rule. - http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - range : (float, float) - The lower and upper range of the bins. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - - n = x.size - ptp_x = _ptp(x) - if n <= 1 or ptp_x == 0: - return 0 - - def jhat(nbins): - hh = ptp_x / nbins - p_k = np.histogram(x, bins=nbins, range=range)[0] / n - return (2 - (n + 1) * p_k.dot(p_k)) / hh - - nbins_upper_bound = max(100, int(np.sqrt(n))) - nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) - if nbins == nbins_upper_bound: - warnings.warn("The number of bins estimated may be suboptimal.", - RuntimeWarning, stacklevel=3) - return ptp_x / nbins - - -def _hist_bin_doane(x, range): - """ - Doane's histogram bin estimator. - - Improved version of Sturges' formula which works better for - non-normal data. See - stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - if x.size > 2: - sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) - sigma = np.std(x) - if sigma > 0.0: - # These three operations add up to - # g1 = np.mean(((x - np.mean(x)) / sigma)**3) - # but use only one temp array instead of three - temp = x - np.mean(x) - np.true_divide(temp, sigma, temp) - np.power(temp, 3, temp) - g1 = np.mean(temp) - return _ptp(x) / (1.0 + np.log2(x.size) + - np.log2(1.0 + np.absolute(g1) / sg1)) - return 0.0 - - -def _hist_bin_fd(x, range): - """ - The Freedman-Diaconis histogram bin estimator. - - The Freedman-Diaconis rule uses interquartile range (IQR) to - estimate binwidth. It is considered a variation of the Scott rule - with more robustness as the IQR is less affected by outliers than - the standard deviation. However, the IQR depends on fewer points - than the standard deviation, so it is less accurate, especially for - long tailed distributions. - - If the IQR is 0, this function returns 1 for the number of bins. - Binwidth is inversely proportional to the cube root of data size - (asymptotically optimal). - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - iqr = np.subtract(*np.percentile(x, [75, 25])) - return 2.0 * iqr * x.size ** (-1.0 / 3.0) - - -def _hist_bin_auto(x, range): - """ - Histogram bin estimator that uses the minimum width of the - Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero - and the Sturges estimator if the FD bandwidth is 0. - - The FD estimator is usually the most robust method, but its width - estimate tends to be too large for small `x` and bad for data with limited - variance. The Sturges estimator is quite good for small (<1000) datasets - and is the default in the R language. This method gives good off the shelf - behaviour. - - .. versionchanged:: 1.15.0 - If there is limited variance the IQR can be 0, which results in the - FD bin width being 0 too. This is not a valid bin width, so - ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. - If the IQR is 0, it's unlikely any variance based estimators will be of - use, so we revert to the sturges estimator, which only uses the size of the - dataset in its calculation. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - - See Also - -------- - _hist_bin_fd, _hist_bin_sturges - """ - fd_bw = _hist_bin_fd(x, range) - sturges_bw = _hist_bin_sturges(x, range) - del range # unused - if fd_bw: - return min(fd_bw, sturges_bw) - else: - # limited variance, so we return a len dependent bw estimator - return sturges_bw - -# Private dict initialized at module load time -_hist_bin_selectors = {'stone': _hist_bin_stone, - 'auto': _hist_bin_auto, - 'doane': _hist_bin_doane, - 'fd': _hist_bin_fd, - 'rice': _hist_bin_rice, - 'scott': _hist_bin_scott, - 'sqrt': _hist_bin_sqrt, - 'sturges': _hist_bin_sturges} - - -def _ravel_and_check_weights(a, weights): - """ Check a and weights have matching shapes, and ravel both """ - a = np.asarray(a) - - # Ensure that the array is a "subtractable" dtype - if a.dtype == np.bool_: - warnings.warn("Converting input from {} to {} for compatibility." - .format(a.dtype, np.uint8), - RuntimeWarning, stacklevel=3) - a = a.astype(np.uint8) - - if weights is not None: - weights = np.asarray(weights) - if weights.shape != a.shape: - raise ValueError( - 'weights should have the same shape as a.') - weights = weights.ravel() - a = a.ravel() - return a, weights - - -def _get_outer_edges(a, range): - """ - Determine the outer bin edges to use, from either the data or the range - argument - """ - if range is not None: - first_edge, last_edge = range - if first_edge > last_edge: - raise ValueError( - 'max must be larger than min in range parameter.') - if not (np.isfinite(first_edge) and np.isfinite(last_edge)): - raise ValueError( - "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) - elif a.size == 0: - # handle empty arrays. Can't determine range, so use 0-1. - first_edge, last_edge = 0, 1 - else: - first_edge, last_edge = a.min(), a.max() - if not (np.isfinite(first_edge) and np.isfinite(last_edge)): - raise ValueError( - "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) - - # expand empty range to avoid divide by zero - if first_edge == last_edge: - first_edge = first_edge - 0.5 - last_edge = last_edge + 0.5 - - return first_edge, last_edge - - -def _unsigned_subtract(a, b): - """ - Subtract two values where a >= b, and produce an unsigned result - - This is needed when finding the difference between the upper and lower - bound of an int16 histogram - """ - # coerce to a single type - signed_to_unsigned = { - np.byte: np.ubyte, - np.short: np.ushort, - np.intc: np.uintc, - np.int_: np.uint, - np.longlong: np.ulonglong - } - dt = np.result_type(a, b) - try: - dt = signed_to_unsigned[dt.type] - except KeyError: - return np.subtract(a, b, dtype=dt) - else: - # we know the inputs are integers, and we are deliberately casting - # signed to unsigned - return np.subtract(a, b, casting='unsafe', dtype=dt) - - -def _get_bin_edges(a, bins, range, weights): - """ - Computes the bins used internally by `histogram`. - - Parameters - ========== - a : ndarray - Ravelled data array - bins, range - Forwarded arguments from `histogram`. - weights : ndarray, optional - Ravelled weights array, or None - - Returns - ======= - bin_edges : ndarray - Array of bin edges - uniform_bins : (Number, Number, int): - The upper bound, lowerbound, and number of bins, used in the optimized - implementation of `histogram` that works on uniform bins. - """ - # parse the overloaded bins argument - n_equal_bins = None - bin_edges = None - - if isinstance(bins, basestring): - bin_name = bins - # if `bins` is a string for an automatic method, - # this will replace it with the number of bins calculated - if bin_name not in _hist_bin_selectors: - raise ValueError( - "{!r} is not a valid estimator for `bins`".format(bin_name)) - if weights is not None: - raise TypeError("Automated estimation of the number of " - "bins is not supported for weighted data") - - first_edge, last_edge = _get_outer_edges(a, range) - - # truncate the range if needed - if range is not None: - keep = (a >= first_edge) - keep &= (a <= last_edge) - if not np.logical_and.reduce(keep): - a = a[keep] - - if a.size == 0: - n_equal_bins = 1 - else: - # Do not call selectors on empty arrays - width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) - if width: - n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) - else: - # Width can be zero for some estimators, e.g. FD when - # the IQR of the data is zero. - n_equal_bins = 1 - - elif np.ndim(bins) == 0: - try: - n_equal_bins = operator.index(bins) - except TypeError: - raise TypeError( - '`bins` must be an integer, a string, or an array') - if n_equal_bins < 1: - raise ValueError('`bins` must be positive, when an integer') - - first_edge, last_edge = _get_outer_edges(a, range) - - elif np.ndim(bins) == 1: - bin_edges = np.asarray(bins) - if np.any(bin_edges[:-1] > bin_edges[1:]): - raise ValueError( - '`bins` must increase monotonically, when an array') - - else: - raise ValueError('`bins` must be 1d, when an array') - - if n_equal_bins is not None: - # gh-10322 means that type resolution rules are dependent on array - # shapes. To avoid this causing problems, we pick a type now and stick - # with it throughout. - bin_type = np.result_type(first_edge, last_edge, a) - if np.issubdtype(bin_type, np.integer): - bin_type = np.result_type(bin_type, float) - - # bin edges must be computed - bin_edges = np.linspace( - first_edge, last_edge, n_equal_bins + 1, - endpoint=True, dtype=bin_type) - return bin_edges, (first_edge, last_edge, n_equal_bins) - else: - return bin_edges, None - - -def _search_sorted_inclusive(a, v): - """ - Like `searchsorted`, but where the last item in `v` is placed on the right. - - In the context of a histogram, this makes the last bin edge inclusive - """ - return np.concatenate(( - a.searchsorted(v[:-1], 'left'), - a.searchsorted(v[-1:], 'right') - )) - - -def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): - return (a, bins, weights) - - -@array_function_dispatch(_histogram_bin_edges_dispatcher) -def histogram_bin_edges(a, bins=10, range=None, weights=None): - r""" - Function to calculate only the edges of the bins used by the `histogram` - function. - - Parameters - ---------- - a : array_like - Input data. The histogram is computed over the flattened array. - bins : int or sequence of scalars or str, optional - If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a - sequence, it defines the bin edges, including the rightmost - edge, allowing for non-uniform bin widths. - - If `bins` is a string from the list below, `histogram_bin_edges` will use - the method chosen to calculate the optimal bin width and - consequently the number of bins (see `Notes` for more detail on - the estimators) from the data that falls within the requested - range. While the bin width will be optimal for the actual data - in the range, the number of bins will be computed to fill the - entire range, including the empty portions. For visualisation, - using the 'auto' option is suggested. Weighted data is not - supported for automated bin size selection. - - 'auto' - Maximum of the 'sturges' and 'fd' estimators. Provides good - all around performance. - - 'fd' (Freedman Diaconis Estimator) - Robust (resilient to outliers) estimator that takes into - account data variability and data size. - - 'doane' - An improved version of Sturges' estimator that works better - with non-normal datasets. - - 'scott' - Less robust estimator that that takes into account data - variability and data size. - - 'stone' - Estimator based on leave-one-out cross-validation estimate of - the integrated squared error. Can be regarded as a generalization - of Scott's rule. - - 'rice' - Estimator does not take variability into account, only data - size. Commonly overestimates number of bins required. - - 'sturges' - R's default method, only accounts for data size. Only - optimal for gaussian data and underestimates number of bins - for large non-gaussian datasets. - - 'sqrt' - Square root (of data size) estimator, used by Excel and - other programs for its speed and simplicity. - - range : (float, float), optional - The lower and upper range of the bins. If not provided, range - is simply ``(a.min(), a.max())``. Values outside the range are - ignored. The first element of the range must be less than or - equal to the second. `range` affects the automatic bin - computation as well. While bin width is computed to be optimal - based on the actual data within `range`, the bin count will fill - the entire range including portions containing no data. - - weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in - `a` only contributes its associated weight towards the bin count - (instead of 1). This is currently not used by any of the bin estimators, - but may be in the future. - - Returns - ------- - bin_edges : array of dtype float - The edges to pass into `histogram` - - See Also - -------- - histogram - - Notes - ----- - The methods to estimate the optimal number of bins are well founded - in literature, and are inspired by the choices R provides for - histogram visualisation. Note that having the number of bins - proportional to :math:`n^{1/3}` is asymptotically optimal, which is - why it appears in most estimators. These are simply plug-in methods - that give good starting points for number of bins. In the equations - below, :math:`h` is the binwidth and :math:`n_h` is the number of - bins. All estimators that compute bin counts are recast to bin width - using the `ptp` of the data. The final bin count is obtained from - ``np.round(np.ceil(range / h))``. - - 'auto' (maximum of the 'sturges' and 'fd' estimators) - A compromise to get a good value. For small datasets the Sturges - value will usually be chosen, while larger datasets will usually - default to FD. Avoids the overly conservative behaviour of FD - and Sturges for small and large datasets respectively. - Switchover point is usually :math:`a.size \approx 1000`. - - 'fd' (Freedman Diaconis Estimator) - .. math:: h = 2 \frac{IQR}{n^{1/3}} - - The binwidth is proportional to the interquartile range (IQR) - and inversely proportional to cube root of a.size. Can be too - conservative for small datasets, but is quite good for large - datasets. The IQR is very robust to outliers. - - 'scott' - .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} - - The binwidth is proportional to the standard deviation of the - data and inversely proportional to cube root of ``x.size``. Can - be too conservative for small datasets, but is quite good for - large datasets. The standard deviation is not very robust to - outliers. Values are very similar to the Freedman-Diaconis - estimator in the absence of outliers. - - 'rice' - .. math:: n_h = 2n^{1/3} - - The number of bins is only proportional to cube root of - ``a.size``. It tends to overestimate the number of bins and it - does not take into account data variability. - - 'sturges' - .. math:: n_h = \log _{2}n+1 - - The number of bins is the base 2 log of ``a.size``. This - estimator assumes normality of data and is too conservative for - larger, non-normal datasets. This is the default method in R's - ``hist`` method. - - 'doane' - .. math:: n_h = 1 + \log_{2}(n) + - \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) - - g_1 = mean[(\frac{x - \mu}{\sigma})^3] - - \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} - - An improved version of Sturges' formula that produces better - estimates for non-normal datasets. This estimator attempts to - account for the skew of the data. - - 'sqrt' - .. math:: n_h = \sqrt n - - The simplest and fastest estimator. Only takes into account the - data size. - - Examples - -------- - >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) - >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) - array([0. , 0.25, 0.5 , 0.75, 1. ]) - >>> np.histogram_bin_edges(arr, bins=2) - array([0. , 2.5, 5. ]) - - For consistency with histogram, an array of pre-computed bins is - passed through unmodified: - - >>> np.histogram_bin_edges(arr, [1, 2]) - array([1, 2]) - - This function allows one set of bins to be computed, and reused across - multiple histograms: - - >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') - >>> shared_bins - array([0., 1., 2., 3., 4., 5.]) - - >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) - >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) - >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) - - >>> hist_0; hist_1 - array([1, 1, 0, 1, 0]) - array([2, 0, 1, 1, 2]) - - Which gives more easily comparable results than using separate bins for - each histogram: - - >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') - >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') - >>> hist_0; hist_1 - array([1, 1, 1]) - array([2, 1, 1, 2]) - >>> bins_0; bins_1 - array([0., 1., 2., 3.]) - array([0. , 1.25, 2.5 , 3.75, 5. ]) - - """ - a, weights = _ravel_and_check_weights(a, weights) - bin_edges, _ = _get_bin_edges(a, bins, range, weights) - return bin_edges - - -def _histogram_dispatcher( - a, bins=None, range=None, normed=None, weights=None, density=None): - return (a, bins, weights) - - -@array_function_dispatch(_histogram_dispatcher) -def histogram(a, bins=10, range=None, normed=None, weights=None, - density=None): - r""" - Compute the histogram of a set of data. - - Parameters - ---------- - a : array_like - Input data. The histogram is computed over the flattened array. - bins : int or sequence of scalars or str, optional - If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a - sequence, it defines a monotonically increasing array of bin edges, - including the rightmost edge, allowing for non-uniform bin widths. - - .. versionadded:: 1.11.0 - - If `bins` is a string, it defines the method used to calculate the - optimal bin width, as defined by `histogram_bin_edges`. - - range : (float, float), optional - The lower and upper range of the bins. If not provided, range - is simply ``(a.min(), a.max())``. Values outside the range are - ignored. The first element of the range must be less than or - equal to the second. `range` affects the automatic bin - computation as well. While bin width is computed to be optimal - based on the actual data within `range`, the bin count will fill - the entire range including portions containing no data. - normed : bool, optional - - .. deprecated:: 1.6.0 - - This is equivalent to the `density` argument, but produces incorrect - results for unequal bin widths. It should not be used. - - .. versionchanged:: 1.15.0 - DeprecationWarnings are actually emitted. - - weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in - `a` only contributes its associated weight towards the bin count - (instead of 1). If `density` is True, the weights are - normalized, so that the integral of the density over the range - remains 1. - density : bool, optional - If ``False``, the result will contain the number of samples in - each bin. If ``True``, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that the sum of the - histogram values will not be equal to 1 unless bins of unity - width are chosen; it is not a probability *mass* function. - - Overrides the ``normed`` keyword if given. - - Returns - ------- - hist : array - The values of the histogram. See `density` and `weights` for a - description of the possible semantics. - bin_edges : array of dtype float - Return the bin edges ``(length(hist)+1)``. - - - See Also - -------- - histogramdd, bincount, searchsorted, digitize, histogram_bin_edges - - Notes - ----- - All but the last (righthand-most) bin is half-open. In other words, - if `bins` is:: - - [1, 2, 3, 4] - - then the first bin is ``[1, 2)`` (including 1, but excluding 2) and - the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which - *includes* 4. - - - Examples - -------- - >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) - (array([0, 2, 1]), array([0, 1, 2, 3])) - >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) - (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) - >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) - (array([1, 4, 1]), array([0, 1, 2, 3])) - - >>> a = np.arange(5) - >>> hist, bin_edges = np.histogram(a, density=True) - >>> hist - array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) - >>> hist.sum() - 2.4999999999999996 - >>> np.sum(hist * np.diff(bin_edges)) - 1.0 - - .. versionadded:: 1.11.0 - - Automated Bin Selection Methods example, using 2 peak random data - with 2000 points: - - >>> import matplotlib.pyplot as plt - >>> rng = np.random.RandomState(10) # deterministic random data - >>> a = np.hstack((rng.normal(size=1000), - ... rng.normal(loc=5, scale=2, size=1000))) - >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram - >>> plt.title("Histogram with 'auto' bins") - Text(0.5, 1.0, "Histogram with 'auto' bins") - >>> plt.show() - - """ - a, weights = _ravel_and_check_weights(a, weights) - - bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) - - # Histogram is an integer or a float array depending on the weights. - if weights is None: - ntype = np.dtype(np.intp) - else: - ntype = weights.dtype - - # We set a block size, as this allows us to iterate over chunks when - # computing histograms, to minimize memory usage. - BLOCK = 65536 - - # The fast path uses bincount, but that only works for certain types - # of weight - simple_weights = ( - weights is None or - np.can_cast(weights.dtype, np.double) or - np.can_cast(weights.dtype, complex) - ) - - if uniform_bins is not None and simple_weights: - # Fast algorithm for equal bins - # We now convert values of a to bin indices, under the assumption of - # equal bin widths (which is valid here). - first_edge, last_edge, n_equal_bins = uniform_bins - - # Initialize empty histogram - n = np.zeros(n_equal_bins, ntype) - - # Pre-compute histogram scaling factor - norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge) - - # We iterate over blocks here for two reasons: the first is that for - # large arrays, it is actually faster (for example for a 10^8 array it - # is 2x as fast) and it results in a memory footprint 3x lower in the - # limit of large arrays. - for i in _range(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] - if weights is None: - tmp_w = None - else: - tmp_w = weights[i:i + BLOCK] - - # Only include values in the right range - keep = (tmp_a >= first_edge) - keep &= (tmp_a <= last_edge) - if not np.logical_and.reduce(keep): - tmp_a = tmp_a[keep] - if tmp_w is not None: - tmp_w = tmp_w[keep] - - # This cast ensures no type promotions occur below, which gh-10322 - # make unpredictable. Getting it wrong leads to precision errors - # like gh-8123. - tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) - - # Compute the bin indices, and for values that lie exactly on - # last_edge we need to subtract one - f_indices = _unsigned_subtract(tmp_a, first_edge) * norm - indices = f_indices.astype(np.intp) - indices[indices == n_equal_bins] -= 1 - - # The index computation is not guaranteed to give exactly - # consistent results within ~1 ULP of the bin edges. - decrement = tmp_a < bin_edges[indices] - indices[decrement] -= 1 - # The last bin includes the right edge. The other bins do not. - increment = ((tmp_a >= bin_edges[indices + 1]) - & (indices != n_equal_bins - 1)) - indices[increment] += 1 - - # We now compute the histogram using bincount - if ntype.kind == 'c': - n.real += np.bincount(indices, weights=tmp_w.real, - minlength=n_equal_bins) - n.imag += np.bincount(indices, weights=tmp_w.imag, - minlength=n_equal_bins) - else: - n += np.bincount(indices, weights=tmp_w, - minlength=n_equal_bins).astype(ntype) - else: - # Compute via cumulative histogram - cum_n = np.zeros(bin_edges.shape, ntype) - if weights is None: - for i in _range(0, len(a), BLOCK): - sa = np.sort(a[i:i+BLOCK]) - cum_n += _search_sorted_inclusive(sa, bin_edges) - else: - zero = np.zeros(1, dtype=ntype) - for i in _range(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] - tmp_w = weights[i:i+BLOCK] - sorting_index = np.argsort(tmp_a) - sa = tmp_a[sorting_index] - sw = tmp_w[sorting_index] - cw = np.concatenate((zero, sw.cumsum())) - bin_index = _search_sorted_inclusive(sa, bin_edges) - cum_n += cw[bin_index] - - n = np.diff(cum_n) - - # density overrides the normed keyword - if density is not None: - if normed is not None: - # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) - warnings.warn( - "The normed argument is ignored when density is provided. " - "In future passing both will result in an error.", - DeprecationWarning, stacklevel=3) - normed = None - - if density: - db = np.array(np.diff(bin_edges), float) - return n/db/n.sum(), bin_edges - elif normed: - # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) - warnings.warn( - "Passing `normed=True` on non-uniform bins has always been " - "broken, and computes neither the probability density " - "function nor the probability mass function. " - "The result is only correct if the bins are uniform, when " - "density=True will produce the same result anyway. " - "The argument will be removed in a future version of " - "numpy.", - np.VisibleDeprecationWarning, stacklevel=3) - - # this normalization is incorrect, but - db = np.array(np.diff(bin_edges), float) - return n/(n*db).sum(), bin_edges - else: - if normed is not None: - # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) - warnings.warn( - "Passing normed=False is deprecated, and has no effect. " - "Consider passing the density argument instead.", - DeprecationWarning, stacklevel=3) - return n, bin_edges - - -def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None, - weights=None, density=None): - if hasattr(sample, 'shape'): # same condition as used in histogramdd - yield sample - else: - yield from sample - with contextlib.suppress(TypeError): - yield from bins - yield weights - - -@array_function_dispatch(_histogramdd_dispatcher) -def histogramdd(sample, bins=10, range=None, normed=None, weights=None, - density=None): - """ - Compute the multidimensional histogram of some data. - - Parameters - ---------- - sample : (N, D) array, or (D, N) array_like - The data to be histogrammed. - - Note the unusual interpretation of sample when an array_like: - - * When an array, each row is a coordinate in a D-dimensional space - - such as ``histogramgramdd(np.array([p1, p2, p3]))``. - * When an array_like, each element is the list of values for single - coordinate - such as ``histogramgramdd((X, Y, Z))``. - - The first form should be preferred. - - bins : sequence or int, optional - The bin specification: - - * A sequence of arrays describing the monotonically increasing bin - edges along each dimension. - * The number of bins for each dimension (nx, ny, ... =bins) - * The number of bins for all dimensions (nx=ny=...=bins). - - range : sequence, optional - A sequence of length D, each an optional (lower, upper) tuple giving - the outer bin edges to be used if the edges are not given explicitly in - `bins`. - An entry of None in the sequence results in the minimum and maximum - values being used for the corresponding dimension. - The default, None, is equivalent to passing a tuple of D None values. - density : bool, optional - If False, the default, returns the number of samples in each bin. - If True, returns the probability *density* function at the bin, - ``bin_count / sample_count / bin_volume``. - normed : bool, optional - An alias for the density argument that behaves identically. To avoid - confusion with the broken normed argument to `histogram`, `density` - should be preferred. - weights : (N,) array_like, optional - An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. - Weights are normalized to 1 if normed is True. If normed is False, - the values of the returned histogram are equal to the sum of the - weights belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray - The multidimensional histogram of sample x. See normed and weights - for the different possible semantics. - edges : list - A list of D arrays describing the bin edges for each dimension. - - See Also - -------- - histogram: 1-D histogram - histogram2d: 2-D histogram - - Examples - -------- - >>> r = np.random.randn(100,3) - >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) - >>> H.shape, edges[0].size, edges[1].size, edges[2].size - ((5, 8, 4), 6, 9, 5) - - """ - - try: - # Sample is an ND-array. - N, D = sample.shape - except (AttributeError, ValueError): - # Sample is a sequence of 1D arrays. - sample = np.atleast_2d(sample).T - N, D = sample.shape - - nbin = np.empty(D, int) - edges = D*[None] - dedges = D*[None] - if weights is not None: - weights = np.asarray(weights) - - try: - M = len(bins) - if M != D: - raise ValueError( - 'The dimension of bins must be equal to the dimension of the ' - ' sample x.') - except TypeError: - # bins is an integer - bins = D*[bins] - - # normalize the range argument - if range is None: - range = (None,) * D - elif len(range) != D: - raise ValueError('range argument must have one entry per dimension') - - # Create edge arrays - for i in _range(D): - if np.ndim(bins[i]) == 0: - if bins[i] < 1: - raise ValueError( - '`bins[{}]` must be positive, when an integer'.format(i)) - smin, smax = _get_outer_edges(sample[:,i], range[i]) - edges[i] = np.linspace(smin, smax, bins[i] + 1) - elif np.ndim(bins[i]) == 1: - edges[i] = np.asarray(bins[i]) - if np.any(edges[i][:-1] > edges[i][1:]): - raise ValueError( - '`bins[{}]` must be monotonically increasing, when an array' - .format(i)) - else: - raise ValueError( - '`bins[{}]` must be a scalar or 1d array'.format(i)) - - nbin[i] = len(edges[i]) + 1 # includes an outlier on each end - dedges[i] = np.diff(edges[i]) - - # Compute the bin number each sample falls into. - Ncount = tuple( - # avoid np.digitize to work around gh-11022 - np.searchsorted(edges[i], sample[:, i], side='right') - for i in _range(D) - ) - - # Using digitize, values that fall on an edge are put in the right bin. - # For the rightmost bin, we want values equal to the right edge to be - # counted in the last bin, and not as an outlier. - for i in _range(D): - # Find which points are on the rightmost edge. - on_edge = (sample[:, i] == edges[i][-1]) - # Shift these points one bin to the left. - Ncount[i][on_edge] -= 1 - - # Compute the sample indices in the flattened histogram matrix. - # This raises an error if the array is too large. - xy = np.ravel_multi_index(Ncount, nbin) - - # Compute the number of repetitions in xy and assign it to the - # flattened histmat. - hist = np.bincount(xy, weights, minlength=nbin.prod()) - - # Shape into a proper matrix - hist = hist.reshape(nbin) - - # This preserves the (bad) behavior observed in gh-7845, for now. - hist = hist.astype(float, casting='safe') - - # Remove outliers (indices 0 and -1 for each dimension). - core = D*(slice(1, -1),) - hist = hist[core] - - # handle the aliasing normed argument - if normed is None: - if density is None: - density = False - elif density is None: - # an explicit normed argument was passed, alias it to the new name - density = normed - else: - raise TypeError("Cannot specify both 'normed' and 'density'") - - if density: - # calculate the probability density function - s = hist.sum() - for i in _range(D): - shape = np.ones(D, int) - shape[i] = nbin[i] - 2 - hist = hist / dedges[i].reshape(shape) - hist /= s - - if (hist.shape != nbin - 2).any(): - raise RuntimeError( - "Internal Shape Error") - return hist, edges diff --git a/venv/lib/python3.7/site-packages/numpy/lib/index_tricks.py b/venv/lib/python3.7/site-packages/numpy/lib/index_tricks.py deleted file mode 100644 index 0438485..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/index_tricks.py +++ /dev/null @@ -1,984 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import functools -import sys -import math - -import numpy.core.numeric as _nx -from numpy.core.numeric import ( - asarray, ScalarType, array, alltrue, cumprod, arange, ndim - ) -from numpy.core.numerictypes import find_common_type, issubdtype - -import numpy.matrixlib as matrixlib -from .function_base import diff -from numpy.core.multiarray import ravel_multi_index, unravel_index -from numpy.core.overrides import set_module -from numpy.core import overrides, linspace -from numpy.lib.stride_tricks import as_strided - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', - 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', - 'diag_indices', 'diag_indices_from' - ] - - -def _ix__dispatcher(*args): - return args - - -@array_function_dispatch(_ix__dispatcher) -def ix_(*args): - """ - Construct an open mesh from multiple sequences. - - This function takes N 1-D sequences and returns N outputs with N - dimensions each, such that the shape is 1 in all but one dimension - and the dimension with the non-unit shape value cycles through all - N dimensions. - - Using `ix_` one can quickly construct index arrays that will index - the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array - ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. - - Parameters - ---------- - args : 1-D sequences - Each sequence should be of integer or boolean type. - Boolean sequences will be interpreted as boolean masks for the - corresponding dimension (equivalent to passing in - ``np.nonzero(boolean_sequence)``). - - Returns - ------- - out : tuple of ndarrays - N arrays with N dimensions each, with N the number of input - sequences. Together these arrays form an open mesh. - - See Also - -------- - ogrid, mgrid, meshgrid - - Examples - -------- - >>> a = np.arange(10).reshape(2, 5) - >>> a - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> ixgrid = np.ix_([0, 1], [2, 4]) - >>> ixgrid - (array([[0], - [1]]), array([[2, 4]])) - >>> ixgrid[0].shape, ixgrid[1].shape - ((2, 1), (1, 2)) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - - >>> ixgrid = np.ix_([True, True], [2, 4]) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - - """ - out = [] - nd = len(args) - for k, new in enumerate(args): - if not isinstance(new, _nx.ndarray): - new = asarray(new) - if new.size == 0: - # Explicitly type empty arrays to avoid float default - new = new.astype(_nx.intp) - if new.ndim != 1: - raise ValueError("Cross index must be 1 dimensional") - if issubdtype(new.dtype, _nx.bool_): - new, = new.nonzero() - new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) - out.append(new) - return tuple(out) - -class nd_grid(object): - """ - Construct a multi-dimensional "meshgrid". - - ``grid = nd_grid()`` creates an instance which will return a mesh-grid - when indexed. The dimension and number of the output arrays are equal - to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then the - integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - If instantiated with an argument of ``sparse=True``, the mesh-grid is - open (or not fleshed out) so that only one-dimension of each returned - argument is greater than 1. - - Parameters - ---------- - sparse : bool, optional - Whether the grid is sparse or not. Default is False. - - Notes - ----- - Two instances of `nd_grid` are made available in the NumPy namespace, - `mgrid` and `ogrid`, approximately defined as:: - - mgrid = nd_grid(sparse=False) - ogrid = nd_grid(sparse=True) - - Users should use these pre-defined instances instead of using `nd_grid` - directly. - """ - - def __init__(self, sparse=False): - self.sparse = sparse - - def __getitem__(self, key): - try: - size = [] - typ = int - for k in range(len(key)): - step = key[k].step - start = key[k].start - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size.append(int(abs(step))) - typ = float - else: - size.append( - int(math.ceil((key[k].stop - start)/(step*1.0)))) - if (isinstance(step, float) or - isinstance(start, float) or - isinstance(key[k].stop, float)): - typ = float - if self.sparse: - nn = [_nx.arange(_x, dtype=_t) - for _x, _t in zip(size, (typ,)*len(size))] - else: - nn = _nx.indices(size, typ) - for k in range(len(size)): - step = key[k].step - start = key[k].start - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - step = int(abs(step)) - if step != 1: - step = (key[k].stop - start)/float(step-1) - nn[k] = (nn[k]*step+start) - if self.sparse: - slobj = [_nx.newaxis]*len(size) - for k in range(len(size)): - slobj[k] = slice(None, None) - nn[k] = nn[k][tuple(slobj)] - slobj[k] = _nx.newaxis - return nn - except (IndexError, TypeError): - step = key.step - stop = key.stop - start = key.start - if start is None: - start = 0 - if isinstance(step, complex): - step = abs(step) - length = int(step) - if step != 1: - step = (key.stop-start)/float(step-1) - stop = key.stop + step - return _nx.arange(0, length, 1, float)*step + start - else: - return _nx.arange(start, stop, step) - - -class MGridClass(nd_grid): - """ - `nd_grid` instance which returns a dense multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense - (or fleshed out) mesh-grid when indexed, so that each returned argument - has the same shape. The dimensions and number of the output arrays are - equal to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ---------- - mesh-grid `ndarrays` all of the same dimensions - - See Also - -------- - numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - ogrid : like mgrid but returns open (not fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> np.mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> np.mgrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - - """ - def __init__(self): - super(MGridClass, self).__init__(sparse=False) - -mgrid = MGridClass() - -class OGridClass(nd_grid): - """ - `nd_grid` instance which returns an open multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an open - (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension - of each returned array is greater than 1. The dimension and number of the - output arrays are equal to the number of indexing dimensions. If the step - length is not a complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ------- - mesh-grid - `ndarrays` with only one dimension not equal to 1 - - See Also - -------- - np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> from numpy import ogrid - >>> ogrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - >>> ogrid[0:5,0:5] - [array([[0], - [1], - [2], - [3], - [4]]), array([[0, 1, 2, 3, 4]])] - - """ - def __init__(self): - super(OGridClass, self).__init__(sparse=True) - -ogrid = OGridClass() - - -class AxisConcatenator(object): - """ - Translates slice objects to concatenation along an axis. - - For detailed documentation on usage, see `r_`. - """ - # allow ma.mr_ to override this - concatenate = staticmethod(_nx.concatenate) - makemat = staticmethod(matrixlib.matrix) - - def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): - self.axis = axis - self.matrix = matrix - self.trans1d = trans1d - self.ndmin = ndmin - - def __getitem__(self, key): - # handle matrix builder syntax - if isinstance(key, str): - frame = sys._getframe().f_back - mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) - return mymat - - if not isinstance(key, tuple): - key = (key,) - - # copy attributes, since they can be overridden in the first argument - trans1d = self.trans1d - ndmin = self.ndmin - matrix = self.matrix - axis = self.axis - - objs = [] - scalars = [] - arraytypes = [] - scalartypes = [] - - for k, item in enumerate(key): - scalar = False - if isinstance(item, slice): - step = item.step - start = item.start - stop = item.stop - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size = int(abs(step)) - newobj = linspace(start, stop, num=size) - else: - newobj = _nx.arange(start, stop, step) - if ndmin > 1: - newobj = array(newobj, copy=False, ndmin=ndmin) - if trans1d != -1: - newobj = newobj.swapaxes(-1, trans1d) - elif isinstance(item, str): - if k != 0: - raise ValueError("special directives must be the " - "first entry.") - if item in ('r', 'c'): - matrix = True - col = (item == 'c') - continue - if ',' in item: - vec = item.split(',') - try: - axis, ndmin = [int(x) for x in vec[:2]] - if len(vec) == 3: - trans1d = int(vec[2]) - continue - except Exception: - raise ValueError("unknown special directive") - try: - axis = int(item) - continue - except (ValueError, TypeError): - raise ValueError("unknown special directive") - elif type(item) in ScalarType: - newobj = array(item, ndmin=ndmin) - scalars.append(len(objs)) - scalar = True - scalartypes.append(newobj.dtype) - else: - item_ndim = ndim(item) - newobj = array(item, copy=False, subok=True, ndmin=ndmin) - if trans1d != -1 and item_ndim < ndmin: - k2 = ndmin - item_ndim - k1 = trans1d - if k1 < 0: - k1 += k2 + 1 - defaxes = list(range(ndmin)) - axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] - newobj = newobj.transpose(axes) - objs.append(newobj) - if not scalar and isinstance(newobj, _nx.ndarray): - arraytypes.append(newobj.dtype) - - # Ensure that scalars won't up-cast unless warranted - final_dtype = find_common_type(arraytypes, scalartypes) - if final_dtype is not None: - for k in scalars: - objs[k] = objs[k].astype(final_dtype) - - res = self.concatenate(tuple(objs), axis=axis) - - if matrix: - oldndim = res.ndim - res = self.makemat(res) - if oldndim == 1 and col: - res = res.T - return res - - def __len__(self): - return 0 - -# separate classes are used here instead of just making r_ = concatentor(0), -# etc. because otherwise we couldn't get the doc string to come out right -# in help(r_) - -class RClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the first axis. - - This is a simple way to build up arrays quickly. There are two use cases. - - 1. If the index expression contains comma separated arrays, then stack - them along their first axis. - 2. If the index expression contains slice notation or scalars then create - a 1-D array with a range indicated by the slice notation. - - If slice notation is used, the syntax ``start:stop:step`` is equivalent - to ``np.arange(start, stop, step)`` inside of the brackets. However, if - ``step`` is an imaginary number (i.e. 100j) then its integer portion is - interpreted as a number-of-points desired and the start and stop are - inclusive. In other words ``start:stop:stepj`` is interpreted as - ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. - After expansion of slice notation, all comma separated sequences are - concatenated together. - - Optional character strings placed as the first element of the index - expression can be used to change the output. The strings 'r' or 'c' result - in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. - - A string integer specifies which axis to stack multiple comma separated - arrays along. A string of two comma-separated integers allows indication - of the minimum number of dimensions to force each entry into as the - second integer (the axis to concatenate along is still the first integer). - - A string with three comma-separated integers allows specification of the - axis to concatenate along, the minimum number of dimensions to force the - entries to, and which axis should contain the start of the arrays which - are less than the specified number of dimensions. In other words the third - integer allows you to specify where the 1's should be placed in the shape - of the arrays that have their shapes upgraded. By default, they are placed - in the front of the shape tuple. The third argument allows you to specify - where the start of the array should be instead. Thus, a third argument of - '0' would place the 1's at the end of the array shape. Negative integers - specify where in the new shape tuple the last dimension of upgraded arrays - should be placed, so the default is '-1'. - - Parameters - ---------- - Not a function, so takes no parameters - - - Returns - ------- - A concatenated ndarray or matrix. - - See Also - -------- - concatenate : Join a sequence of arrays along an existing axis. - c_ : Translates slice objects to concatenation along the second axis. - - Examples - -------- - >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] - array([1, 2, 3, ..., 4, 5, 6]) - >>> np.r_[-1:1:6j, [0]*3, 5, 6] - array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) - - String integers specify the axis to concatenate along or the minimum - number of dimensions to force entries into. - - >>> a = np.array([[0, 1, 2], [3, 4, 5]]) - >>> np.r_['-1', a, a] # concatenate along last axis - array([[0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5]]) - >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 - array([[1, 2, 3], - [4, 5, 6]]) - - >>> np.r_['0,2,0', [1,2,3], [4,5,6]] - array([[1], - [2], - [3], - [4], - [5], - [6]]) - >>> np.r_['1,2,0', [1,2,3], [4,5,6]] - array([[1, 4], - [2, 5], - [3, 6]]) - - Using 'r' or 'c' as a first string argument creates a matrix. - - >>> np.r_['r',[1,2,3], [4,5,6]] - matrix([[1, 2, 3, 4, 5, 6]]) - - """ - - def __init__(self): - AxisConcatenator.__init__(self, 0) - -r_ = RClass() - -class CClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the second axis. - - This is short-hand for ``np.r_['-1,2,0', index expression]``, which is - useful because of its common occurrence. In particular, arrays will be - stacked along their last axis after being upgraded to at least 2-D with - 1's post-pended to the shape (column vectors made out of 1-D arrays). - - See Also - -------- - column_stack : Stack 1-D arrays as columns into a 2-D array. - r_ : For more detailed documentation. - - Examples - -------- - >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] - array([[1, 4], - [2, 5], - [3, 6]]) - >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] - array([[1, 2, 3, ..., 4, 5, 6]]) - - """ - - def __init__(self): - AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) - - -c_ = CClass() - - -@set_module('numpy') -class ndenumerate(object): - """ - Multidimensional index iterator. - - Return an iterator yielding pairs of array coordinates and values. - - Parameters - ---------- - arr : ndarray - Input array. - - See Also - -------- - ndindex, flatiter - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> for index, x in np.ndenumerate(a): - ... print(index, x) - (0, 0) 1 - (0, 1) 2 - (1, 0) 3 - (1, 1) 4 - - """ - - def __init__(self, arr): - self.iter = asarray(arr).flat - - def __next__(self): - """ - Standard iterator method, returns the index tuple and array value. - - Returns - ------- - coords : tuple of ints - The indices of the current iteration. - val : scalar - The array element of the current iteration. - - """ - return self.iter.coords, next(self.iter) - - def __iter__(self): - return self - - next = __next__ - - -@set_module('numpy') -class ndindex(object): - """ - An N-dimensional iterator object to index arrays. - - Given the shape of an array, an `ndindex` instance iterates over - the N-dimensional index of the array. At each iteration a tuple - of indices is returned, the last dimension is iterated over first. - - Parameters - ---------- - `*args` : ints - The size of each dimension of the array. - - See Also - -------- - ndenumerate, flatiter - - Examples - -------- - >>> for index in np.ndindex(3, 2, 1): - ... print(index) - (0, 0, 0) - (0, 1, 0) - (1, 0, 0) - (1, 1, 0) - (2, 0, 0) - (2, 1, 0) - - """ - - def __init__(self, *shape): - if len(shape) == 1 and isinstance(shape[0], tuple): - shape = shape[0] - x = as_strided(_nx.zeros(1), shape=shape, - strides=_nx.zeros_like(shape)) - self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], - order='C') - - def __iter__(self): - return self - - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - This method is for backward compatibility only: do not use. - """ - next(self) - - def __next__(self): - """ - Standard iterator method, updates the index and returns the index - tuple. - - Returns - ------- - val : tuple of ints - Returns a tuple containing the indices of the current - iteration. - - """ - next(self._it) - return self._it.multi_index - - next = __next__ - - -# You can do all this with slice() plus a few special objects, -# but there's a lot to remember. This version is simpler because -# it uses the standard array indexing syntax. -# -# Written by Konrad Hinsen -# last revision: 1999-7-23 -# -# Cosmetic changes by T. Oliphant 2001 -# -# - -class IndexExpression(object): - """ - A nicer way to build up index tuples for arrays. - - .. note:: - Use one of the two predefined instances `index_exp` or `s_` - rather than directly using `IndexExpression`. - - For any index combination, including slicing and axis insertion, - ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any - array `a`. However, ``np.index_exp[indices]`` can be used anywhere - in Python code and returns a tuple of slice objects that can be - used in the construction of complex index expressions. - - Parameters - ---------- - maketuple : bool - If True, always returns a tuple. - - See Also - -------- - index_exp : Predefined instance that always returns a tuple: - `index_exp = IndexExpression(maketuple=True)`. - s_ : Predefined instance without tuple conversion: - `s_ = IndexExpression(maketuple=False)`. - - Notes - ----- - You can do all this with `slice()` plus a few special objects, - but there's a lot to remember and this version is simpler because - it uses the standard array indexing syntax. - - Examples - -------- - >>> np.s_[2::2] - slice(2, None, 2) - >>> np.index_exp[2::2] - (slice(2, None, 2),) - - >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] - array([2, 4]) - - """ - - def __init__(self, maketuple): - self.maketuple = maketuple - - def __getitem__(self, item): - if self.maketuple and not isinstance(item, tuple): - return (item,) - else: - return item - -index_exp = IndexExpression(maketuple=True) -s_ = IndexExpression(maketuple=False) - -# End contribution from Konrad. - - -# The following functions complement those in twodim_base, but are -# applicable to N-dimensions. - - -def _fill_diagonal_dispatcher(a, val, wrap=None): - return (a,) - - -@array_function_dispatch(_fill_diagonal_dispatcher) -def fill_diagonal(a, val, wrap=False): - """Fill the main diagonal of the given array of any dimensionality. - - For an array `a` with ``a.ndim >= 2``, the diagonal is the list of - locations with indices ``a[i, ..., i]`` all identical. This function - modifies the input array in-place, it does not return a value. - - Parameters - ---------- - a : array, at least 2-D. - Array whose diagonal is to be filled, it gets modified in-place. - - val : scalar - Value to be written on the diagonal, its type must be compatible with - that of the array a. - - wrap : bool - For tall matrices in NumPy version up to 1.6.2, the - diagonal "wrapped" after N columns. You can have this behavior - with this option. This affects only tall matrices. - - See also - -------- - diag_indices, diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - This functionality can be obtained via `diag_indices`, but internally - this version uses a much faster implementation that never constructs the - indices and uses simple slicing. - - Examples - -------- - >>> a = np.zeros((3, 3), int) - >>> np.fill_diagonal(a, 5) - >>> a - array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5]]) - - The same function can operate on a 4-D array: - - >>> a = np.zeros((3, 3, 3, 3), int) - >>> np.fill_diagonal(a, 4) - - We only show a few blocks for clarity: - - >>> a[0, 0] - array([[4, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - >>> a[1, 1] - array([[0, 0, 0], - [0, 4, 0], - [0, 0, 0]]) - >>> a[2, 2] - array([[0, 0, 0], - [0, 0, 0], - [0, 0, 4]]) - - The wrap option affects only tall matrices: - - >>> # tall matrices no wrap - >>> a = np.zeros((5, 3), int) - >>> np.fill_diagonal(a, 4) - >>> a - array([[4, 0, 0], - [0, 4, 0], - [0, 0, 4], - [0, 0, 0], - [0, 0, 0]]) - - >>> # tall matrices wrap - >>> a = np.zeros((5, 3), int) - >>> np.fill_diagonal(a, 4, wrap=True) - >>> a - array([[4, 0, 0], - [0, 4, 0], - [0, 0, 4], - [0, 0, 0], - [4, 0, 0]]) - - >>> # wide matrices - >>> a = np.zeros((3, 5), int) - >>> np.fill_diagonal(a, 4, wrap=True) - >>> a - array([[4, 0, 0, 0, 0], - [0, 4, 0, 0, 0], - [0, 0, 4, 0, 0]]) - - The anti-diagonal can be filled by reversing the order of elements - using either `numpy.flipud` or `numpy.fliplr`. - - >>> a = np.zeros((3, 3), int); - >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip - >>> a - array([[0, 0, 1], - [0, 2, 0], - [3, 0, 0]]) - >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip - >>> a - array([[0, 0, 3], - [0, 2, 0], - [1, 0, 0]]) - - Note that the order in which the diagonal is filled varies depending - on the flip function. - """ - if a.ndim < 2: - raise ValueError("array must be at least 2-d") - end = None - if a.ndim == 2: - # Explicit, fast formula for the common case. For 2-d arrays, we - # accept rectangular ones. - step = a.shape[1] + 1 - #This is needed to don't have tall matrix have the diagonal wrap. - if not wrap: - end = a.shape[1] * a.shape[1] - else: - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(a.shape) == 0): - raise ValueError("All dimensions of input must be of equal length") - step = 1 + (cumprod(a.shape[:-1])).sum() - - # Write the value out into the diagonal. - a.flat[:end:step] = val - - -@set_module('numpy') -def diag_indices(n, ndim=2): - """ - Return the indices to access the main diagonal of an array. - - This returns a tuple of indices that can be used to access the main - diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape - (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for - ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` - for ``i = [0..n-1]``. - - Parameters - ---------- - n : int - The size, along each dimension, of the arrays for which the returned - indices can be used. - - ndim : int, optional - The number of dimensions. - - See also - -------- - diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Create a set of indices to access the diagonal of a (4, 4) array: - - >>> di = np.diag_indices(4) - >>> di - (array([0, 1, 2, 3]), array([0, 1, 2, 3])) - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - >>> a[di] = 100 - >>> a - array([[100, 1, 2, 3], - [ 4, 100, 6, 7], - [ 8, 9, 100, 11], - [ 12, 13, 14, 100]]) - - Now, we create indices to manipulate a 3-D array: - - >>> d3 = np.diag_indices(2, 3) - >>> d3 - (array([0, 1]), array([0, 1]), array([0, 1])) - - And use it to set the diagonal of an array of zeros to 1: - - >>> a = np.zeros((2, 2, 2), dtype=int) - >>> a[d3] = 1 - >>> a - array([[[1, 0], - [0, 0]], - [[0, 0], - [0, 1]]]) - - """ - idx = arange(n) - return (idx,) * ndim - - -def _diag_indices_from(arr): - return (arr,) - - -@array_function_dispatch(_diag_indices_from) -def diag_indices_from(arr): - """ - Return the indices to access the main diagonal of an n-dimensional array. - - See `diag_indices` for full details. - - Parameters - ---------- - arr : array, at least 2-D - - See Also - -------- - diag_indices - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - - if not arr.ndim >= 2: - raise ValueError("input array must be at least 2-d") - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(arr.shape) == 0): - raise ValueError("All dimensions of input must be of equal length") - - return diag_indices(arr.shape[0], arr.ndim) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/mixins.py b/venv/lib/python3.7/site-packages/numpy/lib/mixins.py deleted file mode 100644 index f974a77..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/mixins.py +++ /dev/null @@ -1,182 +0,0 @@ -"""Mixin classes for custom array types that don't inherit from ndarray.""" -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.core import umath as um - - -__all__ = ['NDArrayOperatorsMixin'] - - -def _disables_array_ufunc(obj): - """True when __array_ufunc__ is set to None.""" - try: - return obj.__array_ufunc__ is None - except AttributeError: - return False - - -def _binary_method(ufunc, name): - """Implement a forward binary method with a ufunc, e.g., __add__.""" - def func(self, other): - if _disables_array_ufunc(other): - return NotImplemented - return ufunc(self, other) - func.__name__ = '__{}__'.format(name) - return func - - -def _reflected_binary_method(ufunc, name): - """Implement a reflected binary method with a ufunc, e.g., __radd__.""" - def func(self, other): - if _disables_array_ufunc(other): - return NotImplemented - return ufunc(other, self) - func.__name__ = '__r{}__'.format(name) - return func - - -def _inplace_binary_method(ufunc, name): - """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" - def func(self, other): - return ufunc(self, other, out=(self,)) - func.__name__ = '__i{}__'.format(name) - return func - - -def _numeric_methods(ufunc, name): - """Implement forward, reflected and inplace binary methods with a ufunc.""" - return (_binary_method(ufunc, name), - _reflected_binary_method(ufunc, name), - _inplace_binary_method(ufunc, name)) - - -def _unary_method(ufunc, name): - """Implement a unary special method with a ufunc.""" - def func(self): - return ufunc(self) - func.__name__ = '__{}__'.format(name) - return func - - -class NDArrayOperatorsMixin(object): - """Mixin defining all operator special methods using __array_ufunc__. - - This class implements the special methods for almost all of Python's - builtin operators defined in the `operator` module, including comparisons - (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by - deferring to the ``__array_ufunc__`` method, which subclasses must - implement. - - It is useful for writing classes that do not inherit from `numpy.ndarray`, - but that should support arithmetic and numpy universal functions like - arrays as described in `A Mechanism for Overriding Ufuncs - <../../neps/nep-0013-ufunc-overrides.html>`_. - - As an trivial example, consider this implementation of an ``ArrayLike`` - class that simply wraps a NumPy array and ensures that the result of any - arithmetic operation is also an ``ArrayLike`` object:: - - class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): - def __init__(self, value): - self.value = np.asarray(value) - - # One might also consider adding the built-in list type to this - # list, to support operations like np.add(array_like, list) - _HANDLED_TYPES = (np.ndarray, numbers.Number) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - out = kwargs.get('out', ()) - for x in inputs + out: - # Only support operations with instances of _HANDLED_TYPES. - # Use ArrayLike instead of type(self) for isinstance to - # allow subclasses that don't override __array_ufunc__ to - # handle ArrayLike objects. - if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): - return NotImplemented - - # Defer to the implementation of the ufunc on unwrapped values. - inputs = tuple(x.value if isinstance(x, ArrayLike) else x - for x in inputs) - if out: - kwargs['out'] = tuple( - x.value if isinstance(x, ArrayLike) else x - for x in out) - result = getattr(ufunc, method)(*inputs, **kwargs) - - if type(result) is tuple: - # multiple return values - return tuple(type(self)(x) for x in result) - elif method == 'at': - # no return value - return None - else: - # one return value - return type(self)(result) - - def __repr__(self): - return '%s(%r)' % (type(self).__name__, self.value) - - In interactions between ``ArrayLike`` objects and numbers or numpy arrays, - the result is always another ``ArrayLike``: - - >>> x = ArrayLike([1, 2, 3]) - >>> x - 1 - ArrayLike(array([0, 1, 2])) - >>> 1 - x - ArrayLike(array([ 0, -1, -2])) - >>> np.arange(3) - x - ArrayLike(array([-1, -1, -1])) - >>> x - np.arange(3) - ArrayLike(array([1, 1, 1])) - - Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations - with arbitrary, unrecognized types. This ensures that interactions with - ArrayLike preserve a well-defined casting hierarchy. - - .. versionadded:: 1.13 - """ - # Like np.ndarray, this mixin class implements "Option 1" from the ufunc - # overrides NEP. - - # comparisons don't have reflected and in-place versions - __lt__ = _binary_method(um.less, 'lt') - __le__ = _binary_method(um.less_equal, 'le') - __eq__ = _binary_method(um.equal, 'eq') - __ne__ = _binary_method(um.not_equal, 'ne') - __gt__ = _binary_method(um.greater, 'gt') - __ge__ = _binary_method(um.greater_equal, 'ge') - - # numeric methods - __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') - __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') - __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') - __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( - um.matmul, 'matmul') - if sys.version_info.major < 3: - # Python 3 uses only __truediv__ and __floordiv__ - __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div') - __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( - um.true_divide, 'truediv') - __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( - um.floor_divide, 'floordiv') - __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') - __divmod__ = _binary_method(um.divmod, 'divmod') - __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') - # __idivmod__ does not exist - # TODO: handle the optional third argument for __pow__? - __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') - __lshift__, __rlshift__, __ilshift__ = _numeric_methods( - um.left_shift, 'lshift') - __rshift__, __rrshift__, __irshift__ = _numeric_methods( - um.right_shift, 'rshift') - __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') - __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') - __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') - - # unary methods - __neg__ = _unary_method(um.negative, 'neg') - __pos__ = _unary_method(um.positive, 'pos') - __abs__ = _unary_method(um.absolute, 'abs') - __invert__ = _unary_method(um.invert, 'invert') diff --git a/venv/lib/python3.7/site-packages/numpy/lib/nanfunctions.py b/venv/lib/python3.7/site-packages/numpy/lib/nanfunctions.py deleted file mode 100644 index 8e2a34e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/nanfunctions.py +++ /dev/null @@ -1,1672 +0,0 @@ -""" -Functions that ignore NaN. - -Functions ---------- - -- `nanmin` -- minimum non-NaN value -- `nanmax` -- maximum non-NaN value -- `nanargmin` -- index of minimum non-NaN value -- `nanargmax` -- index of maximum non-NaN value -- `nansum` -- sum of non-NaN values -- `nanprod` -- product of non-NaN values -- `nancumsum` -- cumulative sum of non-NaN values -- `nancumprod` -- cumulative product of non-NaN values -- `nanmean` -- mean of non-NaN values -- `nanvar` -- variance of non-NaN values -- `nanstd` -- standard deviation of non-NaN values -- `nanmedian` -- median of non-NaN values -- `nanquantile` -- qth quantile of non-NaN values -- `nanpercentile` -- qth percentile of non-NaN values - -""" -from __future__ import division, absolute_import, print_function - -import functools -import warnings -import numpy as np -from numpy.lib import function_base -from numpy.core import overrides - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', - 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', - 'nancumsum', 'nancumprod', 'nanquantile' - ] - - -def _nan_mask(a, out=None): - """ - Parameters - ---------- - a : array-like - Input array with at least 1 dimension. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output and will prevent the allocation of a new array. - - Returns - ------- - y : bool ndarray or True - A bool array where ``np.nan`` positions are marked with ``False`` - and other positions are marked with ``True``. If the type of ``a`` - is such that it can't possibly contain ``np.nan``, returns ``True``. - """ - # we assume that a is an array for this private function - - if a.dtype.kind not in 'fc': - return True - - y = np.isnan(a, out=out) - y = np.invert(y, out=y) - return y - -def _replace_nan(a, val): - """ - If `a` is of inexact type, make a copy of `a`, replace NaNs with - the `val` value, and return the copy together with a boolean mask - marking the locations where NaNs were present. If `a` is not of - inexact type, do nothing and return `a` together with a mask of None. - - Note that scalars will end up as array scalars, which is important - for using the result as the value of the out argument in some - operations. - - Parameters - ---------- - a : array-like - Input array. - val : float - NaN values are set to val before doing the operation. - - Returns - ------- - y : ndarray - If `a` is of inexact type, return a copy of `a` with the NaNs - replaced by the fill value, otherwise return `a`. - mask: {bool, None} - If `a` is of inexact type, return a boolean mask marking locations of - NaNs, otherwise return None. - - """ - a = np.asanyarray(a) - - if a.dtype == np.object_: - # object arrays do not support `isnan` (gh-9009), so make a guess - mask = np.not_equal(a, a, dtype=bool) - elif issubclass(a.dtype.type, np.inexact): - mask = np.isnan(a) - else: - mask = None - - if mask is not None: - a = np.array(a, subok=True, copy=True) - np.copyto(a, val, where=mask) - - return a, mask - - -def _copyto(a, val, mask): - """ - Replace values in `a` with NaN where `mask` is True. This differs from - copyto in that it will deal with the case where `a` is a numpy scalar. - - Parameters - ---------- - a : ndarray or numpy scalar - Array or numpy scalar some of whose values are to be replaced - by val. - val : numpy scalar - Value used a replacement. - mask : ndarray, scalar - Boolean array. Where True the corresponding element of `a` is - replaced by `val`. Broadcasts. - - Returns - ------- - res : ndarray, scalar - Array with elements replaced or scalar `val`. - - """ - if isinstance(a, np.ndarray): - np.copyto(a, val, where=mask, casting='unsafe') - else: - a = a.dtype.type(val) - return a - - -def _remove_nan_1d(arr1d, overwrite_input=False): - """ - Equivalent to arr1d[~arr1d.isnan()], but in a different order - - Presumably faster as it incurs fewer copies - - Parameters - ---------- - arr1d : ndarray - Array to remove nans from - overwrite_input : bool - True if `arr1d` can be modified in place - - Returns - ------- - res : ndarray - Array with nan elements removed - overwrite_input : bool - True if `res` can be modified in place, given the constraint on the - input - """ - - c = np.isnan(arr1d) - s = np.nonzero(c)[0] - if s.size == arr1d.size: - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=5) - return arr1d[:0], True - elif s.size == 0: - return arr1d, overwrite_input - else: - if not overwrite_input: - arr1d = arr1d.copy() - # select non-nans at end of array - enonan = arr1d[-s.size:][~c[-s.size:]] - # fill nans in beginning of array with non-nans of end - arr1d[s[:enonan.size]] = enonan - - return arr1d[:-s.size], True - - -def _divide_by_count(a, b, out=None): - """ - Compute a/b ignoring invalid results. If `a` is an array the division - is done in place. If `a` is a scalar, then its type is preserved in the - output. If out is None, then then a is used instead so that the - division is in place. Note that this is only called with `a` an inexact - type. - - Parameters - ---------- - a : {ndarray, numpy scalar} - Numerator. Expected to be of inexact type but not checked. - b : {ndarray, numpy scalar} - Denominator. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. - - Returns - ------- - ret : {ndarray, numpy scalar} - The return value is a/b. If `a` was an ndarray the division is done - in place. If `a` is a numpy scalar, the division preserves its type. - - """ - with np.errstate(invalid='ignore', divide='ignore'): - if isinstance(a, np.ndarray): - if out is None: - return np.divide(a, b, out=a, casting='unsafe') - else: - return np.divide(a, b, out=out, casting='unsafe') - else: - if out is None: - return a.dtype.type(a / b) - else: - # This is questionable, but currently a numpy scalar can - # be output to a zero dimensional array. - return np.divide(a, b, out=out, casting='unsafe') - - -def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmin_dispatcher) -def nanmin(a, axis=None, out=None, keepdims=np._NoValue): - """ - Return minimum of an array or minimum along an axis, ignoring any NaNs. - When all-NaN slices are encountered a ``RuntimeWarning`` is raised and - Nan is returned for that slice. - - Parameters - ---------- - a : array_like - Array containing numbers whose minimum is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the minimum is computed. The default is to compute - the minimum of the flattened array. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If the value is anything but the default, then - `keepdims` will be passed through to the `min` method - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 - - Returns - ------- - nanmin : ndarray - An array with the same shape as `a`, with the specified axis - removed. If `a` is a 0-d array, or if axis is None, an ndarray - scalar is returned. The same dtype as `a` is returned. - - See Also - -------- - nanmax : - The maximum value of an array along a given axis, ignoring any NaNs. - amin : - The minimum value of an array along a given axis, propagating any NaNs. - fmin : - Element-wise minimum of two arrays, ignoring any NaNs. - minimum : - Element-wise minimum of two arrays, propagating any NaNs. - isnan : - Shows which elements are Not a Number (NaN). - isfinite: - Shows which elements are neither NaN nor infinity. - - amax, fmax, maximum - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative - infinity is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.min. - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmin(a) - 1.0 - >>> np.nanmin(a, axis=0) - array([1., 2.]) - >>> np.nanmin(a, axis=1) - array([1., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmin([1, 2, np.nan, np.inf]) - 1.0 - >>> np.nanmin([1, 2, np.nan, np.NINF]) - -inf - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is np.ndarray and a.dtype != np.object_: - # Fast, but not safe for subclasses of ndarray, or object arrays, - # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) - res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) - if np.isnan(res).any(): - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=3) - else: - # Slow, but safe for subclasses of ndarray - a, mask = _replace_nan(a, +np.inf) - res = np.amin(a, axis=axis, out=out, **kwargs) - if mask is None: - return res - - # Check for all-NaN axis - mask = np.all(mask, axis=axis, **kwargs) - if np.any(mask): - res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning, - stacklevel=3) - return res - - -def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmax_dispatcher) -def nanmax(a, axis=None, out=None, keepdims=np._NoValue): - """ - Return the maximum of an array or maximum along an axis, ignoring any - NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is - raised and NaN is returned for that slice. - - Parameters - ---------- - a : array_like - Array containing numbers whose maximum is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the maximum is computed. The default is to compute - the maximum of the flattened array. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If the value is anything but the default, then - `keepdims` will be passed through to the `max` method - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 - - Returns - ------- - nanmax : ndarray - An array with the same shape as `a`, with the specified axis removed. - If `a` is a 0-d array, or if axis is None, an ndarray scalar is - returned. The same dtype as `a` is returned. - - See Also - -------- - nanmin : - The minimum value of an array along a given axis, ignoring any NaNs. - amax : - The maximum value of an array along a given axis, propagating any NaNs. - fmax : - Element-wise maximum of two arrays, ignoring any NaNs. - maximum : - Element-wise maximum of two arrays, propagating any NaNs. - isnan : - Shows which elements are Not a Number (NaN). - isfinite: - Shows which elements are neither NaN nor infinity. - - amin, fmin, minimum - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative - infinity is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.max. - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmax(a) - 3.0 - >>> np.nanmax(a, axis=0) - array([3., 2.]) - >>> np.nanmax(a, axis=1) - array([2., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmax([1, 2, np.nan, np.NINF]) - 2.0 - >>> np.nanmax([1, 2, np.nan, np.inf]) - inf - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is np.ndarray and a.dtype != np.object_: - # Fast, but not safe for subclasses of ndarray, or object arrays, - # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) - res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) - if np.isnan(res).any(): - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=3) - else: - # Slow, but safe for subclasses of ndarray - a, mask = _replace_nan(a, -np.inf) - res = np.amax(a, axis=axis, out=out, **kwargs) - if mask is None: - return res - - # Check for all-NaN axis - mask = np.all(mask, axis=axis, **kwargs) - if np.any(mask): - res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning, - stacklevel=3) - return res - - -def _nanargmin_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_nanargmin_dispatcher) -def nanargmin(a, axis=None): - """ - Return the indices of the minimum values in the specified axis ignoring - NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results - cannot be trusted if a slice contains only NaNs and Infs. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmin, nanargmax - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmin(a) - 0 - >>> np.nanargmin(a) - 2 - >>> np.nanargmin(a, axis=0) - array([1, 1]) - >>> np.nanargmin(a, axis=1) - array([1, 0]) - - """ - a, mask = _replace_nan(a, np.inf) - res = np.argmin(a, axis=axis) - if mask is not None: - mask = np.all(mask, axis=axis) - if np.any(mask): - raise ValueError("All-NaN slice encountered") - return res - - -def _nanargmax_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_nanargmax_dispatcher) -def nanargmax(a, axis=None): - """ - Return the indices of the maximum values in the specified axis ignoring - NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the - results cannot be trusted if a slice contains only NaNs and -Infs. - - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmax, nanargmin - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmax(a) - 0 - >>> np.nanargmax(a) - 1 - >>> np.nanargmax(a, axis=0) - array([1, 0]) - >>> np.nanargmax(a, axis=1) - array([1, 1]) - - """ - a, mask = _replace_nan(a, -np.inf) - res = np.argmax(a, axis=axis) - if mask is not None: - mask = np.all(mask, axis=axis) - if np.any(mask): - raise ValueError("All-NaN slice encountered") - return res - - -def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nansum_dispatcher) -def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Return the sum of array elements over a given axis treating Not a - Numbers (NaNs) as zero. - - In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or - empty. In later versions zero is returned. - - Parameters - ---------- - a : array_like - Array containing numbers whose sum is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the sum is computed. The default is to compute the - sum of the flattened array. - dtype : data-type, optional - The type of the returned array and of the accumulator in which the - elements are summed. By default, the dtype of `a` is used. An - exception is when `a` has an integer type with less precision than - the platform (u)intp. In that case, the default will be either - (u)int32 or (u)int64 depending on whether the platform is 32 or 64 - bits. For inexact inputs, dtype must be inexact. - - .. versionadded:: 1.8.0 - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``. If provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. The casting of NaN to integer - can yield unexpected results. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - - If the value is anything but the default, then - `keepdims` will be passed through to the `mean` or `sum` methods - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 - - Returns - ------- - nansum : ndarray. - A new array holding the result is returned unless `out` is - specified, in which it is returned. The result has the same - size as `a`, and the same shape as `a` if `axis` is not None - or `a` is a 1-d array. - - See Also - -------- - numpy.sum : Sum across array propagating NaNs. - isnan : Show which elements are NaN. - isfinite: Show which elements are not NaN or +/-inf. - - Notes - ----- - If both positive and negative infinity are present, the sum will be Not - A Number (NaN). - - Examples - -------- - >>> np.nansum(1) - 1 - >>> np.nansum([1]) - 1 - >>> np.nansum([1, np.nan]) - 1.0 - >>> a = np.array([[1, 1], [1, np.nan]]) - >>> np.nansum(a) - 3.0 - >>> np.nansum(a, axis=0) - array([2., 1.]) - >>> np.nansum([1, np.nan, np.inf]) - inf - >>> np.nansum([1, np.nan, np.NINF]) - -inf - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) - ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present - nan - - """ - a, mask = _replace_nan(a, 0) - return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - -def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanprod_dispatcher) -def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Return the product of array elements over a given axis treating Not a - Numbers (NaNs) as ones. - - One is returned for slices that are all-NaN or empty. - - .. versionadded:: 1.10.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose product is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the product is computed. The default is to compute - the product of the flattened array. - dtype : data-type, optional - The type of the returned array and of the accumulator in which the - elements are summed. By default, the dtype of `a` is used. An - exception is when `a` has an integer type with less precision than - the platform (u)intp. In that case, the default will be either - (u)int32 or (u)int64 depending on whether the platform is 32 or 64 - bits. For inexact inputs, dtype must be inexact. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``. If provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. The casting of NaN to integer - can yield unexpected results. - keepdims : bool, optional - If True, the axes which are reduced are left in the result as - dimensions with size one. With this option, the result will - broadcast correctly against the original `arr`. - - Returns - ------- - nanprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case it is returned. - - See Also - -------- - numpy.prod : Product across array propagating NaNs. - isnan : Show which elements are NaN. - - Examples - -------- - >>> np.nanprod(1) - 1 - >>> np.nanprod([1]) - 1 - >>> np.nanprod([1, np.nan]) - 1.0 - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanprod(a) - 6.0 - >>> np.nanprod(a, axis=0) - array([3., 2.]) - - """ - a, mask = _replace_nan(a, 1) - return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - -def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_nancumsum_dispatcher) -def nancumsum(a, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of array elements over a given axis treating Not a - Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are - encountered and leading NaNs are replaced by zeros. - - Zeros are returned for slices that are all-NaN or empty. - - .. versionadded:: 1.12.0 - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative sum is computed. The default - (None) is to compute the cumsum over the flattened array. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `ufuncs-output-type` for - more details. - - Returns - ------- - nancumsum : ndarray. - A new array holding the result is returned unless `out` is - specified, in which it is returned. The result has the same - size as `a`, and the same shape as `a` if `axis` is not None - or `a` is a 1-d array. - - See Also - -------- - numpy.cumsum : Cumulative sum across array propagating NaNs. - isnan : Show which elements are NaN. - - Examples - -------- - >>> np.nancumsum(1) - array([1]) - >>> np.nancumsum([1]) - array([1]) - >>> np.nancumsum([1, np.nan]) - array([1., 1.]) - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nancumsum(a) - array([1., 3., 6., 6.]) - >>> np.nancumsum(a, axis=0) - array([[1., 2.], - [4., 2.]]) - >>> np.nancumsum(a, axis=1) - array([[1., 3.], - [3., 3.]]) - - """ - a, mask = _replace_nan(a, 0) - return np.cumsum(a, axis=axis, dtype=dtype, out=out) - - -def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_nancumprod_dispatcher) -def nancumprod(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product of array elements over a given axis treating Not a - Numbers (NaNs) as one. The cumulative product does not change when NaNs are - encountered and leading NaNs are replaced by ones. - - Ones are returned for slices that are all-NaN or empty. - - .. versionadded:: 1.12.0 - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative product is computed. By default - the input is flattened. - dtype : dtype, optional - Type of the returned array, as well as of the accumulator in which - the elements are multiplied. If *dtype* is not specified, it - defaults to the dtype of `a`, unless `a` has an integer dtype with - a precision less than that of the default platform integer. In - that case, the default platform integer is used instead. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type of the resulting values will be cast if necessary. - - Returns - ------- - nancumprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case it is returned. - - See Also - -------- - numpy.cumprod : Cumulative product across array propagating NaNs. - isnan : Show which elements are NaN. - - Examples - -------- - >>> np.nancumprod(1) - array([1]) - >>> np.nancumprod([1]) - array([1]) - >>> np.nancumprod([1, np.nan]) - array([1., 1.]) - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nancumprod(a) - array([1., 2., 6., 6.]) - >>> np.nancumprod(a, axis=0) - array([[1., 2.], - [3., 2.]]) - >>> np.nancumprod(a, axis=1) - array([[1., 2.], - [3., 3.]]) - - """ - a, mask = _replace_nan(a, 1) - return np.cumprod(a, axis=axis, dtype=dtype, out=out) - - -def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmean_dispatcher) -def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Compute the arithmetic mean along the specified axis, ignoring NaNs. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - `float64` intermediate and return values are used for integer inputs. - - For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the means are computed. The default is to compute - the mean of the flattened array. - dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default - is `float64`; for inexact inputs, it is the same as the input - dtype. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If the value is anything but the default, then - `keepdims` will be passed through to the `mean` or `sum` methods - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - Returns - ------- - m : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. Nan is - returned for slices that contain only NaNs. - - See Also - -------- - average : Weighted average - mean : Arithmetic mean taken while not ignoring NaNs - var, nanvar - - Notes - ----- - The arithmetic mean is the sum of the non-NaN elements along the axis - divided by the number of non-NaN elements. - - Note that for floating-point input, the mean is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32`. Specifying a - higher-precision accumulator using the `dtype` keyword can alleviate - this issue. - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanmean(a) - 2.6666666666666665 - >>> np.nanmean(a, axis=0) - array([2., 4.]) - >>> np.nanmean(a, axis=1) - array([1., 3.5]) # may vary - - """ - arr, mask = _replace_nan(a, 0) - if mask is None: - return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - if dtype is not None: - dtype = np.dtype(dtype) - if dtype is not None and not issubclass(dtype.type, np.inexact): - raise TypeError("If a is inexact, then dtype must be inexact") - if out is not None and not issubclass(out.dtype.type, np.inexact): - raise TypeError("If a is inexact, then out must be inexact") - - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) - tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - avg = _divide_by_count(tot, cnt, out=out) - - isbad = (cnt == 0) - if isbad.any(): - warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3) - # NaN is the only possible bad value, so no further - # action is needed to handle bad results. - return avg - - -def _nanmedian1d(arr1d, overwrite_input=False): - """ - Private function for rank 1 arrays. Compute the median ignoring NaNs. - See nanmedian for parameter usage - """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) - if arr1d.size == 0: - return np.nan - - return np.median(arr1d, overwrite_input=overwrite_input) - - -def _nanmedian(a, axis=None, out=None, overwrite_input=False): - """ - Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanmedian for parameter usage - - """ - if axis is None or a.ndim == 1: - part = a.ravel() - if out is None: - return _nanmedian1d(part, overwrite_input) - else: - out[...] = _nanmedian1d(part, overwrite_input) - return out - else: - # for small medians use sort + indexing which is still faster than - # apply_along_axis - # benchmarked with shuffled (50, 50, x) containing a few NaN - if a.shape[axis] < 600: - return _nanmedian_small(a, axis, out, overwrite_input) - result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) - if out is not None: - out[...] = result - return result - - -def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): - """ - sort + indexing median, faster for small medians along multiple - dimensions due to the high overhead of apply_along_axis - - see nanmedian for parameter usage - """ - a = np.ma.masked_array(a, np.isnan(a)) - m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) - for i in range(np.count_nonzero(m.mask.ravel())): - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=4) - if out is not None: - out[...] = m.filled(np.nan) - return out - return m.filled(np.nan) - - -def _nanmedian_dispatcher( - a, axis=None, out=None, overwrite_input=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmedian_dispatcher) -def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): - """ - Compute the median along the specified axis, while ignoring NaNs. - - Returns the median of the array elements. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : {int, sequence of int, None}, optional - Axis or axes along which the medians are computed. The default - is to compute the median along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - `median`. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. If `overwrite_input` is ``True`` and `a` is not already an - `ndarray`, an error will be raised. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If this is anything but the default value it will be passed - through (in the special case of an empty array) to the - `mean` function of the underlying array. If the array is - a sub-class and `mean` does not have the kwarg `keepdims` this - will raise a RuntimeError. - - Returns - ------- - median : ndarray - A new array holding the result. If the input contains integers - or floats smaller than ``float64``, then the output data-type is - ``np.float64``. Otherwise, the data-type of the output is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean, median, percentile - - Notes - ----- - Given a vector ``V`` of length ``N``, the median of ``V`` is the - middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., - ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two - middle values of ``V_sorted`` when ``N`` is even. - - Examples - -------- - >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) - >>> a[0, 1] = np.nan - >>> a - array([[10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.median(a) - nan - >>> np.nanmedian(a) - 3.0 - >>> np.nanmedian(a, axis=0) - array([6.5, 2. , 2.5]) - >>> np.median(a, axis=1) - array([nan, 2.]) - >>> b = a.copy() - >>> np.nanmedian(b, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.nanmedian(b, axis=None, overwrite_input=True) - 3.0 - >>> assert not np.all(a==b) - - """ - a = np.asanyarray(a) - # apply_along_axis in _nanmedian doesn't handle empty arrays well, - # so deal them upfront - if a.size == 0: - return np.nanmean(a, axis, out=out, keepdims=keepdims) - - r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out, - overwrite_input=overwrite_input) - if keepdims and keepdims is not np._NoValue: - return r.reshape(k) - else: - return r - - -def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_nanpercentile_dispatcher) -def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=np._NoValue): - """ - Compute the qth percentile of the data along the specified axis, - while ignoring nan values. - - Returns the qth percentile(s) of the array elements. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array, containing - nan values to be ignored. - q : array_like of float - Percentile or sequence of percentiles to compute, which must be between - 0 and 100 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the percentiles are computed. The - default is to compute the percentile(s) along a flattened - version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired percentile lies between two data points - ``i < j``: - - * 'linear': ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * 'lower': ``i``. - * 'higher': ``j``. - * 'nearest': ``i`` or ``j``, whichever is nearest. - * 'midpoint': ``(i + j) / 2``. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - If this is anything but the default value it will be passed - through (in the special case of an empty array) to the - `mean` function of the underlying array. If the array is - a sub-class and `mean` does not have the kwarg `keepdims` this - will raise a RuntimeError. - - Returns - ------- - percentile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the result - is a scalar. If multiple percentiles are given, first axis of - the result corresponds to the percentiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - nanmean - nanmedian : equivalent to ``nanpercentile(..., 50)`` - percentile, median, mean - nanquantile : equivalent to nanpercentile, but with q in the range [0, 1]. - - Notes - ----- - Given a vector ``V`` of length ``N``, the ``q``-th percentile of - ``V`` is the value ``q/100`` of the way from the minimum to the - maximum in a sorted copy of ``V``. The values and distances of - the two nearest neighbors as well as the `interpolation` parameter - will determine the percentile if the normalized ranking does not - match the location of ``q`` exactly. This function is the same as - the median if ``q=50``, the same as the minimum if ``q=0`` and the - same as the maximum if ``q=100``. - - Examples - -------- - >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) - >>> a[0][1] = np.nan - >>> a - array([[10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.percentile(a, 50) - nan - >>> np.nanpercentile(a, 50) - 3.0 - >>> np.nanpercentile(a, 50, axis=0) - array([6.5, 2. , 2.5]) - >>> np.nanpercentile(a, 50, axis=1, keepdims=True) - array([[7.], - [2.]]) - >>> m = np.nanpercentile(a, 50, axis=0) - >>> out = np.zeros_like(m) - >>> np.nanpercentile(a, 50, axis=0, out=out) - array([6.5, 2. , 2.5]) - >>> m - array([6.5, 2. , 2.5]) - - >>> b = a.copy() - >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - - """ - a = np.asanyarray(a) - q = np.true_divide(q, 100.0) # handles the asarray for us too - if not function_base._quantile_is_valid(q): - raise ValueError("Percentiles must be in the range [0, 100]") - return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_nanquantile_dispatcher) -def nanquantile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=np._NoValue): - """ - Compute the qth quantile of the data along the specified axis, - while ignoring nan values. - Returns the qth quantile(s) of the array elements. - - .. versionadded:: 1.15.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array, containing - nan values to be ignored - q : array_like of float - Quantile or sequence of quantiles to compute, which must be between - 0 and 1 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the quantiles are computed. The - default is to compute the quantile(s) along a flattened - version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired quantile lies between two data points - ``i < j``: - - * linear: ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * lower: ``i``. - * higher: ``j``. - * nearest: ``i`` or ``j``, whichever is nearest. - * midpoint: ``(i + j) / 2``. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - If this is anything but the default value it will be passed - through (in the special case of an empty array) to the - `mean` function of the underlying array. If the array is - a sub-class and `mean` does not have the kwarg `keepdims` this - will raise a RuntimeError. - - Returns - ------- - quantile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the result - is a scalar. If multiple quantiles are given, first axis of - the result corresponds to the quantiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - quantile - nanmean, nanmedian - nanmedian : equivalent to ``nanquantile(..., 0.5)`` - nanpercentile : same as nanquantile, but with q in the range [0, 100]. - - Examples - -------- - >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) - >>> a[0][1] = np.nan - >>> a - array([[10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.quantile(a, 0.5) - nan - >>> np.nanquantile(a, 0.5) - 3.0 - >>> np.nanquantile(a, 0.5, axis=0) - array([6.5, 2. , 2.5]) - >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) - array([[7.], - [2.]]) - >>> m = np.nanquantile(a, 0.5, axis=0) - >>> out = np.zeros_like(m) - >>> np.nanquantile(a, 0.5, axis=0, out=out) - array([6.5, 2. , 2.5]) - >>> m - array([6.5, 2. , 2.5]) - >>> b = a.copy() - >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - """ - a = np.asanyarray(a) - q = np.asanyarray(q) - if not function_base._quantile_is_valid(q): - raise ValueError("Quantiles must be in the range [0, 1]") - return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=np._NoValue): - """Assumes that q is in [0, 1], and is an ndarray""" - # apply_along_axis in _nanpercentile doesn't handle empty arrays well, - # so deal them upfront - if a.size == 0: - return np.nanmean(a, axis, out=out, keepdims=keepdims) - - r, k = function_base._ureduce( - a, func=_nanquantile_ureduce_func, q=q, axis=axis, out=out, - overwrite_input=overwrite_input, interpolation=interpolation - ) - if keepdims and keepdims is not np._NoValue: - return r.reshape(q.shape + k) - else: - return r - - -def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear'): - """ - Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanpercentile for parameter usage - """ - if axis is None or a.ndim == 1: - part = a.ravel() - result = _nanquantile_1d(part, q, overwrite_input, interpolation) - else: - result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, interpolation) - # apply_along_axis fills in collapsed axis with results. - # Move that axis to the beginning to match percentile's - # convention. - if q.ndim != 0: - result = np.moveaxis(result, axis, 0) - - if out is not None: - out[...] = result - return result - - -def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'): - """ - Private function for rank 1 arrays. Compute quantile ignoring NaNs. - See nanpercentile for parameter usage - """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) - if arr1d.size == 0: - return np.full(q.shape, np.nan)[()] # convert to scalar - - return function_base._quantile_unchecked( - arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) - - -def _nanvar_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanvar_dispatcher) -def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the variance along the specified axis, while ignoring NaNs. - - Returns the variance of the array elements, a measure of the spread of - a distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - For all-NaN slices or slices with zero degrees of freedom, NaN is - returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose variance is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the variance is computed. The default is to compute - the variance of the flattened array. - dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float64`; for arrays of float types it is the same as - the array type. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output, but the type is cast if - necessary. - ddof : int, optional - "Delta Degrees of Freedom": the divisor used in the calculation is - ``N - ddof``, where ``N`` represents the number of non-NaN - elements. By default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - - Returns - ------- - variance : ndarray, see dtype parameter above - If `out` is None, return a new array containing the variance, - otherwise return a reference to the output array. If ddof is >= the - number of non-NaN elements in a slice or the slice contains only - NaNs, then the result for that slice is NaN. - - See Also - -------- - std : Standard deviation - mean : Average - var : Variance while not ignoring NaNs - nanstd, nanmean - ufuncs-output-type - - Notes - ----- - The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. - - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. - If, however, `ddof` is specified, the divisor ``N - ddof`` is used - instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of a hypothetical infinite - population. ``ddof=0`` provides a maximum likelihood estimate of the - variance for normally distributed variables. - - Note that for complex numbers, the absolute value is taken before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the variance is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32` (see example - below). Specifying a higher-accuracy accumulator using the ``dtype`` - keyword can alleviate this issue. - - For this function to work on sub-classes of ndarray, they must define - `sum` with the kwarg `keepdims` - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanvar(a) - 1.5555555555555554 - >>> np.nanvar(a, axis=0) - array([1., 0.]) - >>> np.nanvar(a, axis=1) - array([0., 0.25]) # may vary - - """ - arr, mask = _replace_nan(a, 0) - if mask is None: - return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if dtype is not None: - dtype = np.dtype(dtype) - if dtype is not None and not issubclass(dtype.type, np.inexact): - raise TypeError("If a is inexact, then dtype must be inexact") - if out is not None and not issubclass(out.dtype.type, np.inexact): - raise TypeError("If a is inexact, then out must be inexact") - - # Compute mean - if type(arr) is np.matrix: - _keepdims = np._NoValue - else: - _keepdims = True - # we need to special case matrix for reverse compatibility - # in order for this to work, these sums need to be called with - # keepdims=True, however matrix now raises an error in this case, but - # the reason that it drops the keepdims kwarg is to force keepdims=True - # so this used to work by serendipity. - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims) - avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims) - avg = _divide_by_count(avg, cnt) - - # Compute squared deviation from mean. - np.subtract(arr, avg, out=arr, casting='unsafe') - arr = _copyto(arr, 0, mask) - if issubclass(arr.dtype.type, np.complexfloating): - sqr = np.multiply(arr, arr.conj(), out=arr).real - else: - sqr = np.multiply(arr, arr, out=arr) - - # Compute variance. - var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - if var.ndim < cnt.ndim: - # Subclasses of ndarray may ignore keepdims, so check here. - cnt = cnt.squeeze(axis) - dof = cnt - ddof - var = _divide_by_count(var, dof) - - isbad = (dof <= 0) - if np.any(isbad): - warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, - stacklevel=3) - # NaN, inf, or negative numbers are all possible bad - # values, so explicitly replace them with NaN. - var = _copyto(var, np.nan, isbad) - return var - - -def _nanstd_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanstd_dispatcher) -def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the standard deviation along the specified axis, while - ignoring NaNs. - - Returns the standard deviation, a measure of the spread of a - distribution, of the non-NaN array elements. The standard deviation is - computed for the flattened array by default, otherwise over the - specified axis. - - For all-NaN slices or slices with zero degrees of freedom, NaN is - returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Calculate the standard deviation of the non-NaN values. - axis : {int, tuple of int, None}, optional - Axis or axes along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. - dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float64, for arrays of float types it - is the same as the array type. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type (of the - calculated values) will be cast if necessary. - ddof : int, optional - Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of non-NaN - elements. By default `ddof` is zero. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If this value is anything but the default it is passed through - as-is to the relevant functions of the sub-classes. If these - functions do not have a `keepdims` kwarg, a RuntimeError will - be raised. - - Returns - ------- - standard_deviation : ndarray, see dtype parameter above. - If `out` is None, return a new array containing the standard - deviation, otherwise return a reference to the output array. If - ddof is >= the number of non-NaN elements in a slice or the slice - contains only NaNs, then the result for that slice is NaN. - - See Also - -------- - var, mean, std - nanvar, nanmean - ufuncs-output-type - - Notes - ----- - The standard deviation is the square root of the average of the squared - deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as - ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is - specified, the divisor ``N - ddof`` is used instead. In standard - statistical practice, ``ddof=1`` provides an unbiased estimator of the - variance of the infinite population. ``ddof=0`` provides a maximum - likelihood estimate of the variance for normally distributed variables. - The standard deviation computed in this function is the square root of - the estimated variance, so even with ``ddof=1``, it will not be an - unbiased estimate of the standard deviation per se. - - Note that, for complex numbers, `std` takes the absolute value before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the *std* is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for float32 (see example - below). Specifying a higher-accuracy accumulator using the `dtype` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanstd(a) - 1.247219128924647 - >>> np.nanstd(a, axis=0) - array([1., 0.]) - >>> np.nanstd(a, axis=1) - array([0., 0.5]) # may vary - - """ - var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - if isinstance(var, np.ndarray): - std = np.sqrt(var, out=var) - else: - std = var.dtype.type(np.sqrt(var)) - return std diff --git a/venv/lib/python3.7/site-packages/numpy/lib/npyio.py b/venv/lib/python3.7/site-packages/numpy/lib/npyio.py deleted file mode 100644 index 3e54ff1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/npyio.py +++ /dev/null @@ -1,2380 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import os -import re -import functools -import itertools -import warnings -import weakref -import contextlib -from operator import itemgetter, index as opindex - -import numpy as np -from . import format -from ._datasource import DataSource -from numpy.core import overrides -from numpy.core.multiarray import packbits, unpackbits -from numpy.core.overrides import set_module -from numpy.core._internal import recursive -from ._iotools import ( - LineSplitter, NameValidator, StringConverter, ConverterError, - ConverterLockError, ConversionWarning, _is_string_like, - has_nested_fields, flatten_dtype, easy_dtype, _decode_line - ) - -from numpy.compat import ( - asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike, - pickle, contextlib_nullcontext - ) - -if sys.version_info[0] >= 3: - from collections.abc import Mapping -else: - from future_builtins import map - from collections import Mapping - - -@set_module('numpy') -def loads(*args, **kwargs): - # NumPy 1.15.0, 2017-12-10 - warnings.warn( - "np.loads is deprecated, use pickle.loads instead", - DeprecationWarning, stacklevel=2) - return pickle.loads(*args, **kwargs) - - -__all__ = [ - 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', - 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', - 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' - ] - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -class BagObj(object): - """ - BagObj(obj) - - Convert attribute look-ups to getitems on the object passed in. - - Parameters - ---------- - obj : class instance - Object on which attribute look-up is performed. - - Examples - -------- - >>> from numpy.lib.npyio import BagObj as BO - >>> class BagDemo(object): - ... def __getitem__(self, key): # An instance of BagObj(BagDemo) - ... # will call this method when any - ... # attribute look-up is required - ... result = "Doesn't matter what you want, " - ... return result + "you're gonna get this" - ... - >>> demo_obj = BagDemo() - >>> bagobj = BO(demo_obj) - >>> bagobj.hello_there - "Doesn't matter what you want, you're gonna get this" - >>> bagobj.I_can_be_anything - "Doesn't matter what you want, you're gonna get this" - - """ - - def __init__(self, obj): - # Use weakref to make NpzFile objects collectable by refcount - self._obj = weakref.proxy(obj) - - def __getattribute__(self, key): - try: - return object.__getattribute__(self, '_obj')[key] - except KeyError: - raise AttributeError(key) - - def __dir__(self): - """ - Enables dir(bagobj) to list the files in an NpzFile. - - This also enables tab-completion in an interpreter or IPython. - """ - return list(object.__getattribute__(self, '_obj').keys()) - - -def zipfile_factory(file, *args, **kwargs): - """ - Create a ZipFile. - - Allows for Zip64, and the `file` argument can accept file, str, or - pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile - constructor. - """ - if not hasattr(file, 'read'): - file = os_fspath(file) - import zipfile - kwargs['allowZip64'] = True - return zipfile.ZipFile(file, *args, **kwargs) - - -class NpzFile(Mapping): - """ - NpzFile(fid) - - A dictionary-like object with lazy-loading of files in the zipped - archive provided on construction. - - `NpzFile` is used to load files in the NumPy ``.npz`` data archive - format. It assumes that files in the archive have a ``.npy`` extension, - other files are ignored. - - The arrays and file strings are lazily loaded on either - getitem access using ``obj['key']`` or attribute lookup using - ``obj.f.key``. A list of all files (without ``.npy`` extensions) can - be obtained with ``obj.files`` and the ZipFile object itself using - ``obj.zip``. - - Attributes - ---------- - files : list of str - List of all files in the archive with a ``.npy`` extension. - zip : ZipFile instance - The ZipFile object initialized with the zipped archive. - f : BagObj instance - An object on which attribute can be performed as an alternative - to getitem access on the `NpzFile` instance itself. - allow_pickle : bool, optional - Allow loading pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - - pickle_kwargs : dict, optional - Additional keyword arguments to pass on to pickle.load. - These are only useful when loading object arrays saved on - Python 2 when using Python 3. - - Parameters - ---------- - fid : file or str - The zipped archive to open. This is either a file-like object - or a string containing the path to the archive. - own_fid : bool, optional - Whether NpzFile should close the file handle. - Requires that `fid` is a file-like object. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - >>> np.savez(outfile, x=x, y=y) - >>> _ = outfile.seek(0) - - >>> npz = np.load(outfile) - >>> isinstance(npz, np.lib.io.NpzFile) - True - >>> sorted(npz.files) - ['x', 'y'] - >>> npz['x'] # getitem access - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> npz.f.x # attribute lookup - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - """ - - def __init__(self, fid, own_fid=False, allow_pickle=False, - pickle_kwargs=None): - # Import is postponed to here since zipfile depends on gzip, an - # optional component of the so-called standard library. - _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] - self.allow_pickle = allow_pickle - self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) - self.zip = _zip - self.f = BagObj(self) - if own_fid: - self.fid = fid - else: - self.fid = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def close(self): - """ - Close the file. - - """ - if self.zip is not None: - self.zip.close() - self.zip = None - if self.fid is not None: - self.fid.close() - self.fid = None - self.f = None # break reference cycle - - def __del__(self): - self.close() - - # Implement the Mapping ABC - def __iter__(self): - return iter(self.files) - - def __len__(self): - return len(self.files) - - def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs) - else: - return self.zip.read(key) - else: - raise KeyError("%s is not a file in the archive" % key) - - - if sys.version_info.major == 3: - # deprecate the python 2 dict apis that we supported by accident in - # python 3. We forgot to implement itervalues() at all in earlier - # versions of numpy, so no need to deprecated it here. - - def iteritems(self): - # Numpy 1.15, 2018-02-20 - warnings.warn( - "NpzFile.iteritems is deprecated in python 3, to match the " - "removal of dict.itertems. Use .items() instead.", - DeprecationWarning, stacklevel=2) - return self.items() - - def iterkeys(self): - # Numpy 1.15, 2018-02-20 - warnings.warn( - "NpzFile.iterkeys is deprecated in python 3, to match the " - "removal of dict.iterkeys. Use .keys() instead.", - DeprecationWarning, stacklevel=2) - return self.keys() - - -@set_module('numpy') -def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, - encoding='ASCII'): - """ - Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. - - .. warning:: Loading files that contain object arrays uses the ``pickle`` - module, which is not secure against erroneous or maliciously - constructed data. Consider passing ``allow_pickle=False`` to - load data that is known not to contain object arrays for the - safer handling of untrusted sources. - - Parameters - ---------- - file : file-like object, string, or pathlib.Path - The file to read. File-like objects must support the - ``seek()`` and ``read()`` methods. Pickled files require that the - file-like object support the ``readline()`` method as well. - mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional - If not None, then memory-map the file, using the given mode (see - `numpy.memmap` for a detailed description of the modes). A - memory-mapped array is kept on disk. However, it can be accessed - and sliced like any ndarray. Memory mapping is especially useful - for accessing small fragments of large files without reading the - entire file into memory. - allow_pickle : bool, optional - Allow loading pickled object arrays stored in npy files. Reasons for - disallowing pickles include security, as loading pickled data can - execute arbitrary code. If pickles are disallowed, loading object - arrays will fail. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - - fix_imports : bool, optional - Only useful when loading Python 2 generated pickled files on Python 3, - which includes npy/npz files containing object arrays. If `fix_imports` - is True, pickle will try to map the old Python 2 names to the new names - used in Python 3. - encoding : str, optional - What encoding to use when reading Python 2 strings. Only useful when - loading Python 2 generated pickled files in Python 3, which includes - npy/npz files containing object arrays. Values other than 'latin1', - 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical - data. Default: 'ASCII' - - Returns - ------- - result : array, tuple, dict, etc. - Data stored in the file. For ``.npz`` files, the returned instance - of NpzFile class must be closed to avoid leaking file descriptors. - - Raises - ------ - IOError - If the input file does not exist or cannot be read. - ValueError - The file contains an object array, but allow_pickle=False given. - - See Also - -------- - save, savez, savez_compressed, loadtxt - memmap : Create a memory-map to an array stored in a file on disk. - lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. - - Notes - ----- - - If the file contains pickle data, then whatever object is stored - in the pickle is returned. - - If the file is a ``.npy`` file, then a single array is returned. - - If the file is a ``.npz`` file, then a dictionary-like object is - returned, containing ``{filename: array}`` key-value pairs, one for - each file in the archive. - - If the file is a ``.npz`` file, the returned value supports the - context manager protocol in a similar fashion to the open function:: - - with load('foo.npz') as data: - a = data['a'] - - The underlying file descriptor is closed when exiting the 'with' - block. - - Examples - -------- - Store data to disk, and load it again: - - >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) - >>> np.load('/tmp/123.npy') - array([[1, 2, 3], - [4, 5, 6]]) - - Store compressed data to disk, and load it again: - - >>> a=np.array([[1, 2, 3], [4, 5, 6]]) - >>> b=np.array([1, 2]) - >>> np.savez('/tmp/123.npz', a=a, b=b) - >>> data = np.load('/tmp/123.npz') - >>> data['a'] - array([[1, 2, 3], - [4, 5, 6]]) - >>> data['b'] - array([1, 2]) - >>> data.close() - - Mem-map the stored array, and then access the second row - directly from disk: - - >>> X = np.load('/tmp/123.npy', mmap_mode='r') - >>> X[1, :] - memmap([4, 5, 6]) - - """ - if encoding not in ('ASCII', 'latin1', 'bytes'): - # The 'encoding' value for pickle also affects what encoding - # the serialized binary data of NumPy arrays is loaded - # in. Pickle does not pass on the encoding information to - # NumPy. The unpickling code in numpy.core.multiarray is - # written to assume that unicode data appearing where binary - # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. - # - # Other encoding values can corrupt binary data, and we - # purposefully disallow them. For the same reason, the errors= - # argument is not exposed, as values other than 'strict' - # result can similarly silently corrupt numerical data. - raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") - - if sys.version_info[0] >= 3: - pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) - else: - # Nothing to do on Python 2 - pickle_kwargs = {} - - # TODO: Use contextlib.ExitStack once we drop Python 2 - if hasattr(file, 'read'): - fid = file - own_fid = False - else: - fid = open(os_fspath(file), "rb") - own_fid = True - - try: - # Code to distinguish from NumPy binary files and pickles. - _ZIP_PREFIX = b'PK\x03\x04' - _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this - N = len(format.MAGIC_PREFIX) - magic = fid.read(N) - # If the file size is less than N, we need to make sure not - # to seek past the beginning of the file - fid.seek(-min(N, len(magic)), 1) # back-up - if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): - # zip-file (assume .npz) - # Transfer file ownership to NpzFile - ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - own_fid = False - return ret - elif magic == format.MAGIC_PREFIX: - # .npy file - if mmap_mode: - return format.open_memmap(file, mode=mmap_mode) - else: - return format.read_array(fid, allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - else: - # Try a pickle - if not allow_pickle: - raise ValueError("Cannot load file containing pickled data " - "when allow_pickle=False") - try: - return pickle.load(fid, **pickle_kwargs) - except Exception: - raise IOError( - "Failed to interpret file %s as a pickle" % repr(file)) - finally: - if own_fid: - fid.close() - - -def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): - return (arr,) - - -@array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=True): - """ - Save an array to a binary file in NumPy ``.npy`` format. - - Parameters - ---------- - file : file, str, or pathlib.Path - File or filename to which the data is saved. If file is a file-object, - then the filename is unchanged. If file is a string or Path, a ``.npy`` - extension will be appended to the filename if it does not already - have one. - arr : array_like - Array data to be saved. - allow_pickle : bool, optional - Allow saving object arrays using Python pickles. Reasons for disallowing - pickles include security (loading pickled data can execute arbitrary - code) and portability (pickled objects may not be loadable on different - Python installations, for example if the stored objects require libraries - that are not available, and not all pickled data is compatible between - Python 2 and Python 3). - Default: True - fix_imports : bool, optional - Only useful in forcing objects in object arrays on Python 3 to be - pickled in a Python 2 compatible way. If `fix_imports` is True, pickle - will try to map the new Python 3 names to the old module names used in - Python 2, so that the pickle data stream is readable with Python 2. - - See Also - -------- - savez : Save several arrays into a ``.npz`` archive - savetxt, load - - Notes - ----- - For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. - - Any data saved to the file is appended to the end of the file. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - - >>> x = np.arange(10) - >>> np.save(outfile, x) - - >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> np.load(outfile) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - - >>> with open('test.npy', 'wb') as f: - ... np.save(f, np.array([1, 2])) - ... np.save(f, np.array([1, 3])) - >>> with open('test.npy', 'rb') as f: - ... a = np.load(f) - ... b = np.load(f) - >>> print(a, b) - # [1 2] [1 3] - """ - own_fid = False - if hasattr(file, 'write'): - fid = file - else: - file = os_fspath(file) - if not file.endswith('.npy'): - file = file + '.npy' - fid = open(file, "wb") - own_fid = True - - if sys.version_info[0] >= 3: - pickle_kwargs = dict(fix_imports=fix_imports) - else: - # Nothing to do on Python 2 - pickle_kwargs = None - - try: - arr = np.asanyarray(arr) - format.write_array(fid, arr, allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - finally: - if own_fid: - fid.close() - - -def _savez_dispatcher(file, *args, **kwds): - for a in args: - yield a - for v in kwds.values(): - yield v - - -@array_function_dispatch(_savez_dispatcher) -def savez(file, *args, **kwds): - """Save several arrays into a single file in uncompressed ``.npz`` format. - - If arguments are passed in with no keywords, the corresponding variable - names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword - arguments are given, the corresponding variable names, in the ``.npz`` - file will match the keyword names. - - Parameters - ---------- - file : str or file - Either the filename (string) or an open file (file-like object) - where the data will be saved. If file is a string or a Path, the - ``.npz`` extension will be appended to the filename if it is not - already there. - args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. - kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. - - Returns - ------- - None - - See Also - -------- - save : Save a single array to a binary file in NumPy format. - savetxt : Save an array to a file as plain text. - savez_compressed : Save several arrays into a compressed ``.npz`` archive - - Notes - ----- - The ``.npz`` file format is a zipped archive of files named after the - variables they contain. The archive is not compressed and each file - in the archive contains one variable in ``.npy`` format. For a - description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. - - When opening the saved ``.npz`` file with `load` a `NpzFile` object is - returned. This is a dictionary-like object which can be queried for - its list of arrays (with the ``.files`` attribute), and for the arrays - themselves. - - When saving dictionaries, the dictionary keys become filenames - inside the ZIP archive. Therefore, keys should be valid filenames. - E.g., avoid keys that begin with ``/`` or contain ``.``. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - - Using `savez` with \\*args, the arrays are saved with default names. - - >>> np.savez(outfile, x, y) - >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> npzfile = np.load(outfile) - >>> npzfile.files - ['arr_0', 'arr_1'] - >>> npzfile['arr_0'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - Using `savez` with \\**kwds, the arrays are saved with the keyword names. - - >>> outfile = TemporaryFile() - >>> np.savez(outfile, x=x, y=y) - >>> _ = outfile.seek(0) - >>> npzfile = np.load(outfile) - >>> sorted(npzfile.files) - ['x', 'y'] - >>> npzfile['x'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - """ - _savez(file, args, kwds, False) - - -def _savez_compressed_dispatcher(file, *args, **kwds): - for a in args: - yield a - for v in kwds.values(): - yield v - - -@array_function_dispatch(_savez_compressed_dispatcher) -def savez_compressed(file, *args, **kwds): - """ - Save several arrays into a single file in compressed ``.npz`` format. - - If keyword arguments are given, then filenames are taken from the keywords. - If arguments are passed in with no keywords, then stored filenames are - arr_0, arr_1, etc. - - Parameters - ---------- - file : str or file - Either the filename (string) or an open file (file-like object) - where the data will be saved. If file is a string or a Path, the - ``.npz`` extension will be appended to the filename if it is not - already there. - args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. - kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. - - Returns - ------- - None - - See Also - -------- - numpy.save : Save a single array to a binary file in NumPy format. - numpy.savetxt : Save an array to a file as plain text. - numpy.savez : Save several arrays into an uncompressed ``.npz`` file format - numpy.load : Load the files created by savez_compressed. - - Notes - ----- - The ``.npz`` file format is a zipped archive of files named after the - variables they contain. The archive is compressed with - ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable - in ``.npy`` format. For a description of the ``.npy`` format, see - :py:mod:`numpy.lib.format`. - - - When opening the saved ``.npz`` file with `load` a `NpzFile` object is - returned. This is a dictionary-like object which can be queried for - its list of arrays (with the ``.files`` attribute), and for the arrays - themselves. - - Examples - -------- - >>> test_array = np.random.rand(3, 2) - >>> test_vector = np.random.rand(4) - >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) - >>> loaded = np.load('/tmp/123.npz') - >>> print(np.array_equal(test_array, loaded['a'])) - True - >>> print(np.array_equal(test_vector, loaded['b'])) - True - - """ - _savez(file, args, kwds, True) - - -def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): - # Import is postponed to here since zipfile depends on gzip, an optional - # component of the so-called standard library. - import zipfile - - if not hasattr(file, 'write'): - file = os_fspath(file) - if not file.endswith('.npz'): - file = file + '.npz' - - namedict = kwds - for i, val in enumerate(args): - key = 'arr_%d' % i - if key in namedict.keys(): - raise ValueError( - "Cannot use un-named variables and keyword %s" % key) - namedict[key] = val - - if compress: - compression = zipfile.ZIP_DEFLATED - else: - compression = zipfile.ZIP_STORED - - zipf = zipfile_factory(file, mode="w", compression=compression) - - if sys.version_info >= (3, 6): - # Since Python 3.6 it is possible to write directly to a ZIP file. - for key, val in namedict.items(): - fname = key + '.npy' - val = np.asanyarray(val) - # always force zip64, gh-10776 - with zipf.open(fname, 'w', force_zip64=True) as fid: - format.write_array(fid, val, - allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - else: - # Stage arrays in a temporary file on disk, before writing to zip. - - # Import deferred for startup time improvement - import tempfile - # Since target file might be big enough to exceed capacity of a global - # temporary directory, create temp file side-by-side with the target file. - file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') - fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') - os.close(fd) - try: - for key, val in namedict.items(): - fname = key + '.npy' - fid = open(tmpfile, 'wb') - try: - format.write_array(fid, np.asanyarray(val), - allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - fid.close() - fid = None - zipf.write(tmpfile, arcname=fname) - except IOError as exc: - raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) - finally: - if fid: - fid.close() - finally: - os.remove(tmpfile) - - zipf.close() - - -def _getconv(dtype): - """ Find the correct dtype converter. Adapted from matplotlib """ - - def floatconv(x): - x.lower() - if '0x' in x: - return float.fromhex(x) - return float(x) - - typ = dtype.type - if issubclass(typ, np.bool_): - return lambda x: bool(int(x)) - if issubclass(typ, np.uint64): - return np.uint64 - if issubclass(typ, np.int64): - return np.int64 - if issubclass(typ, np.integer): - return lambda x: int(float(x)) - elif issubclass(typ, np.longdouble): - return np.longdouble - elif issubclass(typ, np.floating): - return floatconv - elif issubclass(typ, complex): - return lambda x: complex(asstr(x).replace('+-', '-')) - elif issubclass(typ, np.bytes_): - return asbytes - elif issubclass(typ, np.unicode_): - return asunicode - else: - return asstr - -# amount of lines loadtxt reads in one chunk, can be overridden for testing -_loadtxt_chunksize = 50000 - - -@set_module('numpy') -def loadtxt(fname, dtype=float, comments='#', delimiter=None, - converters=None, skiprows=0, usecols=None, unpack=False, - ndmin=0, encoding='bytes', max_rows=None): - """ - Load data from a text file. - - Each row in the text file must have the same number of values. - - Parameters - ---------- - fname : file, str, or pathlib.Path - File, filename, or generator to read. If the filename extension is - ``.gz`` or ``.bz2``, the file is first decompressed. Note that - generators should return byte strings. - dtype : data-type, optional - Data-type of the resulting array; default: float. If this is a - structured data-type, the resulting array will be 1-dimensional, and - each row will be interpreted as an element of the array. In this - case, the number of columns used must match the number of fields in - the data-type. - comments : str or sequence of str, optional - The characters or list of characters used to indicate the start of a - comment. None implies no comments. For backwards compatibility, byte - strings will be decoded as 'latin1'. The default is '#'. - delimiter : str, optional - The string used to separate values. For backwards compatibility, byte - strings will be decoded as 'latin1'. The default is whitespace. - converters : dict, optional - A dictionary mapping column number to a function that will parse the - column string into the desired value. E.g., if column 0 is a date - string: ``converters = {0: datestr2num}``. Converters can also be - used to provide a default value for missing data (but see also - `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. - Default: None. - skiprows : int, optional - Skip the first `skiprows` lines, including comments; default: 0. - usecols : int or sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. - The default, None, results in all columns being read. - - .. versionchanged:: 1.11.0 - When a single column has to be read it is possible to use - an integer instead of a tuple. E.g ``usecols = 3`` reads the - fourth column the same way as ``usecols = (3,)`` would. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)``. When used with a structured - data-type, arrays are returned for each field. Default is False. - ndmin : int, optional - The returned array will have at least `ndmin` dimensions. - Otherwise mono-dimensional axes will be squeezed. - Legal values: 0 (default), 1 or 2. - - .. versionadded:: 1.6.0 - encoding : str, optional - Encoding used to decode the inputfile. Does not apply to input streams. - The special value 'bytes' enables backward compatibility workarounds - that ensures you receive byte arrays as results if possible and passes - 'latin1' encoded strings to converters. Override this value to receive - unicode arrays and pass strings as input to converters. If set to None - the system default is used. The default value is 'bytes'. - - .. versionadded:: 1.14.0 - max_rows : int, optional - Read `max_rows` lines of content after `skiprows` lines. The default - is to read all the lines. - - .. versionadded:: 1.16.0 - - Returns - ------- - out : ndarray - Data read from the text file. - - See Also - -------- - load, fromstring, fromregex - genfromtxt : Load data with missing values handled as specified. - scipy.io.loadmat : reads MATLAB data files - - Notes - ----- - This function aims to be a fast reader for simply formatted files. The - `genfromtxt` function provides more sophisticated handling of, e.g., - lines with missing values. - - .. versionadded:: 1.10.0 - - The strings produced by the Python float.hex method can be used as - input for floats. - - Examples - -------- - >>> from io import StringIO # StringIO behaves like a file object - >>> c = StringIO(u"0 1\\n2 3") - >>> np.loadtxt(c) - array([[0., 1.], - [2., 3.]]) - - >>> d = StringIO(u"M 21 72\\nF 35 58") - >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), - ... 'formats': ('S1', 'i4', 'f4')}) - array([(b'M', 21, 72.), (b'F', 35, 58.)], - dtype=[('gender', 'S1'), ('age', '>> c = StringIO(u"1,0,2\\n3,0,4") - >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) - >>> x - array([1., 3.]) - >>> y - array([2., 4.]) - - """ - # Type conversions for Py3 convenience - if comments is not None: - if isinstance(comments, (basestring, bytes)): - comments = [comments] - comments = [_decode_line(x) for x in comments] - # Compile regex for comments beforehand - comments = (re.escape(comment) for comment in comments) - regex_comments = re.compile('|'.join(comments)) - - if delimiter is not None: - delimiter = _decode_line(delimiter) - - user_converters = converters - - if encoding == 'bytes': - encoding = None - byte_converters = True - else: - byte_converters = False - - if usecols is not None: - # Allow usecols to be a single int or a sequence of ints - try: - usecols_as_list = list(usecols) - except TypeError: - usecols_as_list = [usecols] - for col_idx in usecols_as_list: - try: - opindex(col_idx) - except TypeError as e: - e.args = ( - "usecols must be an int or a sequence of ints but " - "it contains at least one element of type %s" % - type(col_idx), - ) - raise - # Fall back to existing code - usecols = usecols_as_list - - fown = False - try: - if isinstance(fname, os_PathLike): - fname = os_fspath(fname) - if _is_string_like(fname): - fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) - fencoding = getattr(fh, 'encoding', 'latin1') - fh = iter(fh) - fown = True - else: - fh = iter(fname) - fencoding = getattr(fname, 'encoding', 'latin1') - except TypeError: - raise ValueError('fname must be a string, file handle, or generator') - - # input may be a python2 io stream - if encoding is not None: - fencoding = encoding - # we must assume local encoding - # TODO emit portability warning? - elif fencoding is None: - import locale - fencoding = locale.getpreferredencoding() - - # not to be confused with the flatten_dtype we import... - @recursive - def flatten_dtype_internal(self, dt): - """Unpack a structured data-type, and produce re-packing info.""" - if dt.names is None: - # If the dtype is flattened, return. - # If the dtype has a shape, the dtype occurs - # in the list more than once. - shape = dt.shape - if len(shape) == 0: - return ([dt.base], None) - else: - packing = [(shape[-1], list)] - if len(shape) > 1: - for dim in dt.shape[-2::-1]: - packing = [(dim*packing[0][0], packing*dim)] - return ([dt.base] * int(np.prod(dt.shape)), packing) - else: - types = [] - packing = [] - for field in dt.names: - tp, bytes = dt.fields[field] - flat_dt, flat_packing = self(tp) - types.extend(flat_dt) - # Avoid extra nesting for subarrays - if tp.ndim > 0: - packing.extend(flat_packing) - else: - packing.append((len(flat_dt), flat_packing)) - return (types, packing) - - @recursive - def pack_items(self, items, packing): - """Pack items into nested lists based on re-packing info.""" - if packing is None: - return items[0] - elif packing is tuple: - return tuple(items) - elif packing is list: - return list(items) - else: - start = 0 - ret = [] - for length, subpacking in packing: - ret.append(self(items[start:start+length], subpacking)) - start += length - return tuple(ret) - - def split_line(line): - """Chop off comments, strip, and split at delimiter. """ - line = _decode_line(line, encoding=encoding) - - if comments is not None: - line = regex_comments.split(line, maxsplit=1)[0] - line = line.strip('\r\n') - if line: - return line.split(delimiter) - else: - return [] - - def read_data(chunk_size): - """Parse each line, including the first. - - The file read, `fh`, is a global defined above. - - Parameters - ---------- - chunk_size : int - At most `chunk_size` lines are read at a time, with iteration - until all lines are read. - - """ - X = [] - line_iter = itertools.chain([first_line], fh) - line_iter = itertools.islice(line_iter, max_rows) - for i, line in enumerate(line_iter): - vals = split_line(line) - if len(vals) == 0: - continue - if usecols: - vals = [vals[j] for j in usecols] - if len(vals) != N: - line_num = i + skiprows + 1 - raise ValueError("Wrong number of columns at line %d" - % line_num) - - # Convert each value according to its column and store - items = [conv(val) for (conv, val) in zip(converters, vals)] - - # Then pack it according to the dtype's nesting - items = pack_items(items, packing) - X.append(items) - if len(X) > chunk_size: - yield X - X = [] - if X: - yield X - - try: - # Make sure we're dealing with a proper dtype - dtype = np.dtype(dtype) - defconv = _getconv(dtype) - - # Skip the first `skiprows` lines - for i in range(skiprows): - next(fh) - - # Read until we find a line with some values, and use - # it to estimate the number of columns, N. - first_vals = None - try: - while not first_vals: - first_line = next(fh) - first_vals = split_line(first_line) - except StopIteration: - # End of lines reached - first_line = '' - first_vals = [] - warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2) - N = len(usecols or first_vals) - - dtype_types, packing = flatten_dtype_internal(dtype) - if len(dtype_types) > 1: - # We're dealing with a structured array, each field of - # the dtype matches a column - converters = [_getconv(dt) for dt in dtype_types] - else: - # All fields have the same dtype - converters = [defconv for i in range(N)] - if N > 1: - packing = [(N, tuple)] - - # By preference, use the converters specified by the user - for i, conv in (user_converters or {}).items(): - if usecols: - try: - i = usecols.index(i) - except ValueError: - # Unused converter specified - continue - if byte_converters: - # converters may use decode to workaround numpy's old behaviour, - # so encode the string again before passing to the user converter - def tobytes_first(x, conv): - if type(x) is bytes: - return conv(x) - return conv(x.encode("latin1")) - converters[i] = functools.partial(tobytes_first, conv=conv) - else: - converters[i] = conv - - converters = [conv if conv is not bytes else - lambda x: x.encode(fencoding) for conv in converters] - - # read data in chunks and fill it into an array via resize - # over-allocating and shrinking the array later may be faster but is - # probably not relevant compared to the cost of actually reading and - # converting the data - X = None - for x in read_data(_loadtxt_chunksize): - if X is None: - X = np.array(x, dtype) - else: - nshape = list(X.shape) - pos = nshape[0] - nshape[0] += len(x) - X.resize(nshape, refcheck=False) - X[pos:, ...] = x - finally: - if fown: - fh.close() - - if X is None: - X = np.array([], dtype) - - # Multicolumn data are returned with shape (1, N, M), i.e. - # (1, 1, M) for a single row - remove the singleton dimension there - if X.ndim == 3 and X.shape[:2] == (1, 1): - X.shape = (1, -1) - - # Verify that the array has at least dimensions `ndmin`. - # Check correctness of the values of `ndmin` - if ndmin not in [0, 1, 2]: - raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) - # Tweak the size and shape of the arrays - remove extraneous dimensions - if X.ndim > ndmin: - X = np.squeeze(X) - # and ensure we have the minimum number of dimensions asked for - # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 - if X.ndim < ndmin: - if ndmin == 1: - X = np.atleast_1d(X) - elif ndmin == 2: - X = np.atleast_2d(X).T - - if unpack: - if len(dtype_types) > 1: - # For structured arrays, return an array for each field. - return [X[field] for field in dtype.names] - else: - return X.T - else: - return X - - -def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, - header=None, footer=None, comments=None, - encoding=None): - return (X,) - - -@array_function_dispatch(_savetxt_dispatcher) -def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', - footer='', comments='# ', encoding=None): - """ - Save an array to a text file. - - Parameters - ---------- - fname : filename or file handle - If the filename ends in ``.gz``, the file is automatically saved in - compressed gzip format. `loadtxt` understands gzipped files - transparently. - X : 1D or 2D array_like - Data to be saved to a text file. - fmt : str or sequence of strs, optional - A single format (%10.5f), a sequence of formats, or a - multi-format string, e.g. 'Iteration %d -- %10.5f', in which - case `delimiter` is ignored. For complex `X`, the legal options - for `fmt` are: - - * a single specifier, `fmt='%.4e'`, resulting in numbers formatted - like `' (%s+%sj)' % (fmt, fmt)` - * a full string specifying every real and imaginary part, e.g. - `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns - * a list of specifiers, one per column - in this case, the real - and imaginary part must have separate specifiers, - e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns - delimiter : str, optional - String or character separating columns. - newline : str, optional - String or character separating lines. - - .. versionadded:: 1.5.0 - header : str, optional - String that will be written at the beginning of the file. - - .. versionadded:: 1.7.0 - footer : str, optional - String that will be written at the end of the file. - - .. versionadded:: 1.7.0 - comments : str, optional - String that will be prepended to the ``header`` and ``footer`` strings, - to mark them as comments. Default: '# ', as expected by e.g. - ``numpy.loadtxt``. - - .. versionadded:: 1.7.0 - encoding : {None, str}, optional - Encoding used to encode the outputfile. Does not apply to output - streams. If the encoding is something other than 'bytes' or 'latin1' - you will not be able to load the file in NumPy versions < 1.14. Default - is 'latin1'. - - .. versionadded:: 1.14.0 - - - See Also - -------- - save : Save an array to a binary file in NumPy ``.npy`` format - savez : Save several arrays into an uncompressed ``.npz`` archive - savez_compressed : Save several arrays into a compressed ``.npz`` archive - - Notes - ----- - Further explanation of the `fmt` parameter - (``%[flag]width[.precision]specifier``): - - flags: - ``-`` : left justify - - ``+`` : Forces to precede result with + or -. - - ``0`` : Left pad the number with zeros instead of space (see width). - - width: - Minimum number of characters to be printed. The value is not truncated - if it has more characters. - - precision: - - For integer specifiers (eg. ``d,i,o,x``), the minimum number of - digits. - - For ``e, E`` and ``f`` specifiers, the number of digits to print - after the decimal point. - - For ``g`` and ``G``, the maximum number of significant digits. - - For ``s``, the maximum number of characters. - - specifiers: - ``c`` : character - - ``d`` or ``i`` : signed decimal integer - - ``e`` or ``E`` : scientific notation with ``e`` or ``E``. - - ``f`` : decimal floating point - - ``g,G`` : use the shorter of ``e,E`` or ``f`` - - ``o`` : signed octal - - ``s`` : string of characters - - ``u`` : unsigned decimal integer - - ``x,X`` : unsigned hexadecimal integer - - This explanation of ``fmt`` is not complete, for an exhaustive - specification see [1]_. - - References - ---------- - .. [1] `Format Specification Mini-Language - `_, - Python Documentation. - - Examples - -------- - >>> x = y = z = np.arange(0.0,5.0,1.0) - >>> np.savetxt('test.out', x, delimiter=',') # X is an array - >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays - >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation - - """ - - # Py3 conversions first - if isinstance(fmt, bytes): - fmt = asstr(fmt) - delimiter = asstr(delimiter) - - class WriteWrap(object): - """Convert to unicode in py2 or to bytes on bytestream inputs. - - """ - def __init__(self, fh, encoding): - self.fh = fh - self.encoding = encoding - self.do_write = self.first_write - - def close(self): - self.fh.close() - - def write(self, v): - self.do_write(v) - - def write_bytes(self, v): - if isinstance(v, bytes): - self.fh.write(v) - else: - self.fh.write(v.encode(self.encoding)) - - def write_normal(self, v): - self.fh.write(asunicode(v)) - - def first_write(self, v): - try: - self.write_normal(v) - self.write = self.write_normal - except TypeError: - # input is probably a bytestream - self.write_bytes(v) - self.write = self.write_bytes - - own_fh = False - if isinstance(fname, os_PathLike): - fname = os_fspath(fname) - if _is_string_like(fname): - # datasource doesn't support creating a new file ... - open(fname, 'wt').close() - fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) - own_fh = True - # need to convert str to unicode for text io output - if sys.version_info[0] == 2: - fh = WriteWrap(fh, encoding or 'latin1') - elif hasattr(fname, 'write'): - # wrap to handle byte output streams - fh = WriteWrap(fname, encoding or 'latin1') - else: - raise ValueError('fname must be a string or file handle') - - try: - X = np.asarray(X) - - # Handle 1-dimensional arrays - if X.ndim == 0 or X.ndim > 2: - raise ValueError( - "Expected 1D or 2D array, got %dD array instead" % X.ndim) - elif X.ndim == 1: - # Common case -- 1d array of numbers - if X.dtype.names is None: - X = np.atleast_2d(X).T - ncol = 1 - - # Complex dtype -- each field indicates a separate column - else: - ncol = len(X.dtype.names) - else: - ncol = X.shape[1] - - iscomplex_X = np.iscomplexobj(X) - # `fmt` can be a string with multiple insertion points or a - # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') - if type(fmt) in (list, tuple): - if len(fmt) != ncol: - raise AttributeError('fmt has wrong shape. %s' % str(fmt)) - format = asstr(delimiter).join(map(asstr, fmt)) - elif isinstance(fmt, basestring): - n_fmt_chars = fmt.count('%') - error = ValueError('fmt has wrong number of %% formats: %s' % fmt) - if n_fmt_chars == 1: - if iscomplex_X: - fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol - else: - fmt = [fmt, ] * ncol - format = delimiter.join(fmt) - elif iscomplex_X and n_fmt_chars != (2 * ncol): - raise error - elif ((not iscomplex_X) and n_fmt_chars != ncol): - raise error - else: - format = fmt - else: - raise ValueError('invalid fmt: %r' % (fmt,)) - - if len(header) > 0: - header = header.replace('\n', '\n' + comments) - fh.write(comments + header + newline) - if iscomplex_X: - for row in X: - row2 = [] - for number in row: - row2.append(number.real) - row2.append(number.imag) - s = format % tuple(row2) + newline - fh.write(s.replace('+-', '-')) - else: - for row in X: - try: - v = format % tuple(row) + newline - except TypeError: - raise TypeError("Mismatch between array dtype ('%s') and " - "format specifier ('%s')" - % (str(X.dtype), format)) - fh.write(v) - - if len(footer) > 0: - footer = footer.replace('\n', '\n' + comments) - fh.write(comments + footer + newline) - finally: - if own_fh: - fh.close() - - -@set_module('numpy') -def fromregex(file, regexp, dtype, encoding=None): - """ - Construct an array from a text file, using regular expression parsing. - - The returned array is always a structured array, and is constructed from - all matches of the regular expression in the file. Groups in the regular - expression are converted to fields of the structured array. - - Parameters - ---------- - file : str or file - Filename or file object to read. - regexp : str or regexp - Regular expression used to parse the file. - Groups in the regular expression correspond to fields in the dtype. - dtype : dtype or list of dtypes - Dtype for the structured array. - encoding : str, optional - Encoding used to decode the inputfile. Does not apply to input streams. - - .. versionadded:: 1.14.0 - - Returns - ------- - output : ndarray - The output array, containing the part of the content of `file` that - was matched by `regexp`. `output` is always a structured array. - - Raises - ------ - TypeError - When `dtype` is not a valid dtype for a structured array. - - See Also - -------- - fromstring, loadtxt - - Notes - ----- - Dtypes for structured arrays can be specified in several forms, but all - forms specify at least the data type and field name. For details see - `doc.structured_arrays`. - - Examples - -------- - >>> f = open('test.dat', 'w') - >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux") - >>> f.close() - - >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] - >>> output = np.fromregex('test.dat', regexp, - ... [('num', np.int64), ('key', 'S3')]) - >>> output - array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], - dtype=[('num', '>> output['num'] - array([1312, 1534, 444]) - - """ - own_fh = False - if not hasattr(file, "read"): - file = np.lib._datasource.open(file, 'rt', encoding=encoding) - own_fh = True - - try: - if not isinstance(dtype, np.dtype): - dtype = np.dtype(dtype) - - content = file.read() - if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode): - regexp = asbytes(regexp) - elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes): - regexp = asstr(regexp) - - if not hasattr(regexp, 'match'): - regexp = re.compile(regexp) - seq = regexp.findall(content) - if seq and not isinstance(seq[0], tuple): - # Only one group is in the regexp. - # Create the new array as a single data-type and then - # re-interpret as a single-field structured array. - newdtype = np.dtype(dtype[dtype.names[0]]) - output = np.array(seq, dtype=newdtype) - output.dtype = dtype - else: - output = np.array(seq, dtype=dtype) - - return output - finally: - if own_fh: - file.close() - - -#####-------------------------------------------------------------------------- -#---- --- ASCII functions --- -#####-------------------------------------------------------------------------- - - -@set_module('numpy') -def genfromtxt(fname, dtype=float, comments='#', delimiter=None, - skip_header=0, skip_footer=0, converters=None, - missing_values=None, filling_values=None, usecols=None, - names=None, excludelist=None, - deletechars=''.join(sorted(NameValidator.defaultdeletechars)), - replace_space='_', autostrip=False, case_sensitive=True, - defaultfmt="f%i", unpack=None, usemask=False, loose=True, - invalid_raise=True, max_rows=None, encoding='bytes'): - """ - Load data from a text file, with missing values handled as specified. - - Each line past the first `skip_header` lines is split at the `delimiter` - character, and characters following the `comments` character are discarded. - - Parameters - ---------- - fname : file, str, pathlib.Path, list of str, generator - File, filename, list, or generator to read. If the filename - extension is `.gz` or `.bz2`, the file is first decompressed. Note - that generators must return byte strings. The strings - in a list or produced by a generator are treated as lines. - dtype : dtype, optional - Data type of the resulting array. - If None, the dtypes will be determined by the contents of each - column, individually. - comments : str, optional - The character used to indicate the start of a comment. - All the characters occurring on a line after a comment are discarded - delimiter : str, int, or sequence, optional - The string used to separate values. By default, any consecutive - whitespaces act as delimiter. An integer or sequence of integers - can also be provided as width(s) of each field. - skiprows : int, optional - `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. - skip_header : int, optional - The number of lines to skip at the beginning of the file. - skip_footer : int, optional - The number of lines to skip at the end of the file. - converters : variable, optional - The set of functions that convert the data of a column to a value. - The converters can also be used to provide a default value - for missing data: ``converters = {3: lambda s: float(s or 0)}``. - missing : variable, optional - `missing` was removed in numpy 1.10. Please use `missing_values` - instead. - missing_values : variable, optional - The set of strings corresponding to missing data. - filling_values : variable, optional - The set of values to be used as default when the data are missing. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. - names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first line after - the first `skip_header` lines. This line can optionally be proceeded - by a comment delimiter. If `names` is a sequence or a single-string of - comma-separated names, the names will be used to define the field names - in a structured dtype. If `names` is None, the names of the dtype - fields will be used, if any. - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default list - ['return','file','print']. Excluded names are appended an underscore: - for example, `file` would become `file_`. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - defaultfmt : str, optional - A format used to define default field names, such as "f%i" or "f_%02i". - autostrip : bool, optional - Whether to automatically strip white spaces from the variables. - replace_space : char, optional - Character(s) used in replacement of white spaces in the variables - names. By default, use a '_'. - case_sensitive : {True, False, 'upper', 'lower'}, optional - If True, field names are case sensitive. - If False or 'upper', field names are converted to upper case. - If 'lower', field names are converted to lower case. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)`` - usemask : bool, optional - If True, return a masked array. - If False, return a regular array. - loose : bool, optional - If True, do not raise errors for invalid values. - invalid_raise : bool, optional - If True, an exception is raised if an inconsistency is detected in the - number of columns. - If False, a warning is emitted and the offending lines are skipped. - max_rows : int, optional - The maximum number of rows to read. Must not be used with skip_footer - at the same time. If given, the value must be at least 1. Default is - to read the entire file. - - .. versionadded:: 1.10.0 - encoding : str, optional - Encoding used to decode the inputfile. Does not apply when `fname` is - a file object. The special value 'bytes' enables backward compatibility - workarounds that ensure that you receive byte arrays when possible - and passes latin1 encoded strings to converters. Override this value to - receive unicode arrays and pass strings as input to converters. If set - to None the system default is used. The default value is 'bytes'. - - .. versionadded:: 1.14.0 - - Returns - ------- - out : ndarray - Data read from the text file. If `usemask` is True, this is a - masked array. - - See Also - -------- - numpy.loadtxt : equivalent function when no data is missing. - - Notes - ----- - * When spaces are used as delimiters, or when no delimiter has been given - as input, there should not be any missing data between two fields. - * When the variables are named (either by a flexible dtype or with `names`, - there must not be any header in the file (else a ValueError - exception is raised). - * Individual values are not stripped of spaces by default. - When using a custom converter, make sure the function does remove spaces. - - References - ---------- - .. [1] NumPy User Guide, section `I/O with NumPy - `_. - - Examples - --------- - >>> from io import StringIO - >>> import numpy as np - - Comma delimited file with mixed dtype - - >>> s = StringIO(u"1,1.3,abcde") - >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), - ... ('mystring','S5')], delimiter=",") - >>> data - array((1, 1.3, b'abcde'), - dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only - >>> data = np.genfromtxt(s, dtype=None, - ... names = ['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, b'abcde'), - dtype=[('myint', '>> _ = s.seek(0) - >>> data = np.genfromtxt(s, dtype="i8,f8,S5", - ... names=['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, b'abcde'), - dtype=[('myint', '>> s = StringIO(u"11.3abcde") - >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], - ... delimiter=[1,3,5]) - >>> data - array((1, 1.3, b'abcde'), - dtype=[('intvar', '>> f = StringIO(''' - ... text,# of chars - ... hello world,11 - ... numpy,5''') - >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') - array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], - dtype=[('f0', 'S12'), ('f1', 'S12')]) - - """ - if max_rows is not None: - if skip_footer: - raise ValueError( - "The keywords 'skip_footer' and 'max_rows' can not be " - "specified at the same time.") - if max_rows < 1: - raise ValueError("'max_rows' must be at least 1.") - - if usemask: - from numpy.ma import MaskedArray, make_mask_descr - # Check the input dictionary of converters - user_converters = converters or {} - if not isinstance(user_converters, dict): - raise TypeError( - "The input argument 'converter' should be a valid dictionary " - "(got '%s' instead)" % type(user_converters)) - - if encoding == 'bytes': - encoding = None - byte_converters = True - else: - byte_converters = False - - # Initialize the filehandle, the LineSplitter and the NameValidator - try: - if isinstance(fname, os_PathLike): - fname = os_fspath(fname) - if isinstance(fname, basestring): - fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) - fid_ctx = contextlib.closing(fid) - else: - fid = fname - fid_ctx = contextlib_nullcontext(fid) - fhd = iter(fid) - except TypeError: - raise TypeError( - "fname must be a string, filehandle, list of strings, " - "or generator. Got %s instead." % type(fname)) - - with fid_ctx: - split_line = LineSplitter(delimiter=delimiter, comments=comments, - autostrip=autostrip, encoding=encoding) - validate_names = NameValidator(excludelist=excludelist, - deletechars=deletechars, - case_sensitive=case_sensitive, - replace_space=replace_space) - - # Skip the first `skip_header` rows - try: - for i in range(skip_header): - next(fhd) - - # Keep on until we find the first valid values - first_values = None - - while not first_values: - first_line = _decode_line(next(fhd), encoding) - if (names is True) and (comments is not None): - if comments in first_line: - first_line = ( - ''.join(first_line.split(comments)[1:])) - first_values = split_line(first_line) - except StopIteration: - # return an empty array if the datafile is empty - first_line = '' - first_values = [] - warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) - - # Should we take the first values as names ? - if names is True: - fval = first_values[0].strip() - if comments is not None: - if fval in comments: - del first_values[0] - - # Check the columns to use: make sure `usecols` is a list - if usecols is not None: - try: - usecols = [_.strip() for _ in usecols.split(",")] - except AttributeError: - try: - usecols = list(usecols) - except TypeError: - usecols = [usecols, ] - nbcols = len(usecols or first_values) - - # Check the names and overwrite the dtype.names if needed - if names is True: - names = validate_names([str(_.strip()) for _ in first_values]) - first_line = '' - elif _is_string_like(names): - names = validate_names([_.strip() for _ in names.split(',')]) - elif names: - names = validate_names(names) - # Get the dtype - if dtype is not None: - dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, - excludelist=excludelist, - deletechars=deletechars, - case_sensitive=case_sensitive, - replace_space=replace_space) - # Make sure the names is a list (for 2.5) - if names is not None: - names = list(names) - - if usecols: - for (i, current) in enumerate(usecols): - # if usecols is a list of names, convert to a list of indices - if _is_string_like(current): - usecols[i] = names.index(current) - elif current < 0: - usecols[i] = current + len(first_values) - # If the dtype is not None, make sure we update it - if (dtype is not None) and (len(dtype) > nbcols): - descr = dtype.descr - dtype = np.dtype([descr[_] for _ in usecols]) - names = list(dtype.names) - # If `names` is not None, update the names - elif (names is not None) and (len(names) > nbcols): - names = [names[_] for _ in usecols] - elif (names is not None) and (dtype is not None): - names = list(dtype.names) - - # Process the missing values ............................... - # Rename missing_values for convenience - user_missing_values = missing_values or () - if isinstance(user_missing_values, bytes): - user_missing_values = user_missing_values.decode('latin1') - - # Define the list of missing_values (one column: one list) - missing_values = [list(['']) for _ in range(nbcols)] - - # We have a dictionary: process it field by field - if isinstance(user_missing_values, dict): - # Loop on the items - for (key, val) in user_missing_values.items(): - # Is the key a string ? - if _is_string_like(key): - try: - # Transform it into an integer - key = names.index(key) - except ValueError: - # We couldn't find it: the name must have been dropped - continue - # Redefine the key as needed if it's a column number - if usecols: - try: - key = usecols.index(key) - except ValueError: - pass - # Transform the value as a list of string - if isinstance(val, (list, tuple)): - val = [str(_) for _ in val] - else: - val = [str(val), ] - # Add the value(s) to the current list of missing - if key is None: - # None acts as default - for miss in missing_values: - miss.extend(val) - else: - missing_values[key].extend(val) - # We have a sequence : each item matches a column - elif isinstance(user_missing_values, (list, tuple)): - for (value, entry) in zip(user_missing_values, missing_values): - value = str(value) - if value not in entry: - entry.append(value) - # We have a string : apply it to all entries - elif isinstance(user_missing_values, basestring): - user_value = user_missing_values.split(",") - for entry in missing_values: - entry.extend(user_value) - # We have something else: apply it to all entries - else: - for entry in missing_values: - entry.extend([str(user_missing_values)]) - - # Process the filling_values ............................... - # Rename the input for convenience - user_filling_values = filling_values - if user_filling_values is None: - user_filling_values = [] - # Define the default - filling_values = [None] * nbcols - # We have a dictionary : update each entry individually - if isinstance(user_filling_values, dict): - for (key, val) in user_filling_values.items(): - if _is_string_like(key): - try: - # Transform it into an integer - key = names.index(key) - except ValueError: - # We couldn't find it: the name must have been dropped, - continue - # Redefine the key if it's a column number and usecols is defined - if usecols: - try: - key = usecols.index(key) - except ValueError: - pass - # Add the value to the list - filling_values[key] = val - # We have a sequence : update on a one-to-one basis - elif isinstance(user_filling_values, (list, tuple)): - n = len(user_filling_values) - if (n <= nbcols): - filling_values[:n] = user_filling_values - else: - filling_values = user_filling_values[:nbcols] - # We have something else : use it for all entries - else: - filling_values = [user_filling_values] * nbcols - - # Initialize the converters ................................ - if dtype is None: - # Note: we can't use a [...]*nbcols, as we would have 3 times the same - # ... converter, instead of 3 different converters. - converters = [StringConverter(None, missing_values=miss, default=fill) - for (miss, fill) in zip(missing_values, filling_values)] - else: - dtype_flat = flatten_dtype(dtype, flatten_base=True) - # Initialize the converters - if len(dtype_flat) > 1: - # Flexible type : get a converter from each dtype - zipit = zip(dtype_flat, missing_values, filling_values) - converters = [StringConverter(dt, locked=True, - missing_values=miss, default=fill) - for (dt, miss, fill) in zipit] - else: - # Set to a default converter (but w/ different missing values) - zipit = zip(missing_values, filling_values) - converters = [StringConverter(dtype, locked=True, - missing_values=miss, default=fill) - for (miss, fill) in zipit] - # Update the converters to use the user-defined ones - uc_update = [] - for (j, conv) in user_converters.items(): - # If the converter is specified by column names, use the index instead - if _is_string_like(j): - try: - j = names.index(j) - i = j - except ValueError: - continue - elif usecols: - try: - i = usecols.index(j) - except ValueError: - # Unused converter specified - continue - else: - i = j - # Find the value to test - first_line is not filtered by usecols: - if len(first_line): - testing_value = first_values[j] - else: - testing_value = None - if conv is bytes: - user_conv = asbytes - elif byte_converters: - # converters may use decode to workaround numpy's old behaviour, - # so encode the string again before passing to the user converter - def tobytes_first(x, conv): - if type(x) is bytes: - return conv(x) - return conv(x.encode("latin1")) - user_conv = functools.partial(tobytes_first, conv=conv) - else: - user_conv = conv - converters[i].update(user_conv, locked=True, - testing_value=testing_value, - default=filling_values[i], - missing_values=missing_values[i],) - uc_update.append((i, user_conv)) - # Make sure we have the corrected keys in user_converters... - user_converters.update(uc_update) - - # Fixme: possible error as following variable never used. - # miss_chars = [_.missing_values for _ in converters] - - # Initialize the output lists ... - # ... rows - rows = [] - append_to_rows = rows.append - # ... masks - if usemask: - masks = [] - append_to_masks = masks.append - # ... invalid - invalid = [] - append_to_invalid = invalid.append - - # Parse each line - for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): - values = split_line(line) - nbvalues = len(values) - # Skip an empty line - if nbvalues == 0: - continue - if usecols: - # Select only the columns we need - try: - values = [values[_] for _ in usecols] - except IndexError: - append_to_invalid((i + skip_header + 1, nbvalues)) - continue - elif nbvalues != nbcols: - append_to_invalid((i + skip_header + 1, nbvalues)) - continue - # Store the values - append_to_rows(tuple(values)) - if usemask: - append_to_masks(tuple([v.strip() in m - for (v, m) in zip(values, - missing_values)])) - if len(rows) == max_rows: - break - - # Upgrade the converters (if needed) - if dtype is None: - for (i, converter) in enumerate(converters): - current_column = [itemgetter(i)(_m) for _m in rows] - try: - converter.iterupgrade(current_column) - except ConverterLockError: - errmsg = "Converter #%i is locked and cannot be upgraded: " % i - current_column = map(itemgetter(i), rows) - for (j, value) in enumerate(current_column): - try: - converter.upgrade(value) - except (ConverterError, ValueError): - errmsg += "(occurred line #%i for value '%s')" - errmsg %= (j + 1 + skip_header, value) - raise ConverterError(errmsg) - - # Check that we don't have invalid values - nbinvalid = len(invalid) - if nbinvalid > 0: - nbrows = len(rows) + nbinvalid - skip_footer - # Construct the error message - template = " Line #%%i (got %%i columns instead of %i)" % nbcols - if skip_footer > 0: - nbinvalid_skipped = len([_ for _ in invalid - if _[0] > nbrows + skip_header]) - invalid = invalid[:nbinvalid - nbinvalid_skipped] - skip_footer -= nbinvalid_skipped -# -# nbrows -= skip_footer -# errmsg = [template % (i, nb) -# for (i, nb) in invalid if i < nbrows] -# else: - errmsg = [template % (i, nb) - for (i, nb) in invalid] - if len(errmsg): - errmsg.insert(0, "Some errors were detected !") - errmsg = "\n".join(errmsg) - # Raise an exception ? - if invalid_raise: - raise ValueError(errmsg) - # Issue a warning ? - else: - warnings.warn(errmsg, ConversionWarning, stacklevel=2) - - # Strip the last skip_footer data - if skip_footer > 0: - rows = rows[:-skip_footer] - if usemask: - masks = masks[:-skip_footer] - - # Convert each value according to the converter: - # We want to modify the list in place to avoid creating a new one... - if loose: - rows = list( - zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] - for (i, conv) in enumerate(converters)])) - else: - rows = list( - zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] - for (i, conv) in enumerate(converters)])) - - # Reset the dtype - data = rows - if dtype is None: - # Get the dtypes from the types of the converters - column_types = [conv.type for conv in converters] - # Find the columns with strings... - strcolidx = [i for (i, v) in enumerate(column_types) - if v == np.unicode_] - - if byte_converters and strcolidx: - # convert strings back to bytes for backward compatibility - warnings.warn( - "Reading unicode strings without specifying the encoding " - "argument is deprecated. Set the encoding, use None for the " - "system default.", - np.VisibleDeprecationWarning, stacklevel=2) - def encode_unicode_cols(row_tup): - row = list(row_tup) - for i in strcolidx: - row[i] = row[i].encode('latin1') - return tuple(row) - - try: - data = [encode_unicode_cols(r) for r in data] - except UnicodeEncodeError: - pass - else: - for i in strcolidx: - column_types[i] = np.bytes_ - - # Update string types to be the right length - sized_column_types = column_types[:] - for i, col_type in enumerate(column_types): - if np.issubdtype(col_type, np.character): - n_chars = max(len(row[i]) for row in data) - sized_column_types[i] = (col_type, n_chars) - - if names is None: - # If the dtype is uniform (before sizing strings) - base = { - c_type - for c, c_type in zip(converters, column_types) - if c._checked} - if len(base) == 1: - uniform_type, = base - (ddtype, mdtype) = (uniform_type, bool) - else: - ddtype = [(defaultfmt % i, dt) - for (i, dt) in enumerate(sized_column_types)] - if usemask: - mdtype = [(defaultfmt % i, bool) - for (i, dt) in enumerate(sized_column_types)] - else: - ddtype = list(zip(names, sized_column_types)) - mdtype = list(zip(names, [bool] * len(sized_column_types))) - output = np.array(data, dtype=ddtype) - if usemask: - outputmask = np.array(masks, dtype=mdtype) - else: - # Overwrite the initial dtype names if needed - if names and dtype.names is not None: - dtype.names = names - # Case 1. We have a structured type - if len(dtype_flat) > 1: - # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] - # First, create the array using a flattened dtype: - # [('a', int), ('b1', int), ('b2', float)] - # Then, view the array using the specified dtype. - if 'O' in (_.char for _ in dtype_flat): - if has_nested_fields(dtype): - raise NotImplementedError( - "Nested fields involving objects are not supported...") - else: - output = np.array(data, dtype=dtype) - else: - rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) - output = rows.view(dtype) - # Now, process the rowmasks the same way - if usemask: - rowmasks = np.array( - masks, dtype=np.dtype([('', bool) for t in dtype_flat])) - # Construct the new dtype - mdtype = make_mask_descr(dtype) - outputmask = rowmasks.view(mdtype) - # Case #2. We have a basic dtype - else: - # We used some user-defined converters - if user_converters: - ishomogeneous = True - descr = [] - for i, ttype in enumerate([conv.type for conv in converters]): - # Keep the dtype of the current converter - if i in user_converters: - ishomogeneous &= (ttype == dtype.type) - if np.issubdtype(ttype, np.character): - ttype = (ttype, max(len(row[i]) for row in data)) - descr.append(('', ttype)) - else: - descr.append(('', dtype)) - # So we changed the dtype ? - if not ishomogeneous: - # We have more than one field - if len(descr) > 1: - dtype = np.dtype(descr) - # We have only one field: drop the name if not needed. - else: - dtype = np.dtype(ttype) - # - output = np.array(data, dtype) - if usemask: - if dtype.names is not None: - mdtype = [(_, bool) for _ in dtype.names] - else: - mdtype = bool - outputmask = np.array(masks, dtype=mdtype) - # Try to take care of the missing data we missed - names = output.dtype.names - if usemask and names: - for (name, conv) in zip(names, converters): - missing_values = [conv(_) for _ in conv.missing_values - if _ != ''] - for mval in missing_values: - outputmask[name] |= (output[name] == mval) - # Construct the final array - if usemask: - output = output.view(MaskedArray) - output._mask = outputmask - if unpack: - return output.squeeze().T - return output.squeeze() - - -def ndfromtxt(fname, **kwargs): - """ - Load ASCII data stored in a file and return it as a single array. - - .. deprecated:: 1.17 - ndfromtxt` is a deprecated alias of `genfromtxt` which - overwrites the ``usemask`` argument with `False` even when - explicitly called as ``ndfromtxt(..., usemask=True)``. - Use `genfromtxt` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function. - - """ - kwargs['usemask'] = False - # Numpy 1.17 - warnings.warn( - "np.ndfromtxt is a deprecated alias of np.genfromtxt, " - "prefer the latter.", - DeprecationWarning, stacklevel=2) - return genfromtxt(fname, **kwargs) - - -def mafromtxt(fname, **kwargs): - """ - Load ASCII data stored in a text file and return a masked array. - - .. deprecated:: 1.17 - np.mafromtxt is a deprecated alias of `genfromtxt` which - overwrites the ``usemask`` argument with `True` even when - explicitly called as ``mafromtxt(..., usemask=False)``. - Use `genfromtxt` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - """ - kwargs['usemask'] = True - # Numpy 1.17 - warnings.warn( - "np.mafromtxt is a deprecated alias of np.genfromtxt, " - "prefer the latter.", - DeprecationWarning, stacklevel=2) - return genfromtxt(fname, **kwargs) - - -def recfromtxt(fname, **kwargs): - """ - Load ASCII data from a file and return it in a record array. - - If ``usemask=False`` a standard `recarray` is returned, - if ``usemask=True`` a MaskedRecords array is returned. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - kwargs.setdefault("dtype", None) - usemask = kwargs.get('usemask', False) - output = genfromtxt(fname, **kwargs) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output - - -def recfromcsv(fname, **kwargs): - """ - Load ASCII data stored in a comma-separated file. - - The returned array is a record array (if ``usemask=False``, see - `recarray`) or a masked record array (if ``usemask=True``, - see `ma.mrecords.MaskedRecords`). - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - # Set default kwargs for genfromtxt as relevant to csv import. - kwargs.setdefault("case_sensitive", "lower") - kwargs.setdefault("names", True) - kwargs.setdefault("delimiter", ",") - kwargs.setdefault("dtype", None) - output = genfromtxt(fname, **kwargs) - - usemask = kwargs.get("usemask", False) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output diff --git a/venv/lib/python3.7/site-packages/numpy/lib/polynomial.py b/venv/lib/python3.7/site-packages/numpy/lib/polynomial.py deleted file mode 100644 index 3d07a0d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/polynomial.py +++ /dev/null @@ -1,1379 +0,0 @@ -""" -Functions to operate on polynomials. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', - 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', - 'polyfit', 'RankWarning'] - -import functools -import re -import warnings -import numpy.core.numeric as NX - -from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, - ones) -from numpy.core import overrides -from numpy.core.overrides import set_module -from numpy.lib.twodim_base import diag, vander -from numpy.lib.function_base import trim_zeros -from numpy.lib.type_check import iscomplex, real, imag, mintypecode -from numpy.linalg import eigvals, lstsq, inv - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -@set_module('numpy') -class RankWarning(UserWarning): - """ - Issued by `polyfit` when the Vandermonde matrix is rank deficient. - - For more information, a way to suppress the warning, and an example of - `RankWarning` being issued, see `polyfit`. - - """ - pass - - -def _poly_dispatcher(seq_of_zeros): - return seq_of_zeros - - -@array_function_dispatch(_poly_dispatcher) -def poly(seq_of_zeros): - """ - Find the coefficients of a polynomial with the given sequence of roots. - - Returns the coefficients of the polynomial whose leading coefficient - is one for the given sequence of zeros (multiple roots must be included - in the sequence as many times as their multiplicity; see Examples). - A square matrix (or array, which will be treated as a matrix) can also - be given, in which case the coefficients of the characteristic polynomial - of the matrix are returned. - - Parameters - ---------- - seq_of_zeros : array_like, shape (N,) or (N, N) - A sequence of polynomial roots, or a square array or matrix object. - - Returns - ------- - c : ndarray - 1D array of polynomial coefficients from highest to lowest degree: - - ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` - where c[0] always equals 1. - - Raises - ------ - ValueError - If input is the wrong shape (the input must be a 1-D or square - 2-D array). - - See Also - -------- - polyval : Compute polynomial values. - roots : Return the roots of a polynomial. - polyfit : Least squares polynomial fit. - poly1d : A one-dimensional polynomial class. - - Notes - ----- - Specifying the roots of a polynomial still leaves one degree of - freedom, typically represented by an undetermined leading - coefficient. [1]_ In the case of this function, that coefficient - - the first one in the returned array - is always taken as one. (If - for some reason you have one other point, the only automatic way - presently to leverage that information is to use ``polyfit``.) - - The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` - matrix **A** is given by - - :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, - - where **I** is the `n`-by-`n` identity matrix. [2]_ - - References - ---------- - .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, - Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. - - .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," - Academic Press, pg. 182, 1980. - - Examples - -------- - Given a sequence of a polynomial's zeros: - - >>> np.poly((0, 0, 0)) # Multiple root example - array([1., 0., 0., 0.]) - - The line above represents z**3 + 0*z**2 + 0*z + 0. - - >>> np.poly((-1./2, 0, 1./2)) - array([ 1. , 0. , -0.25, 0. ]) - - The line above represents z**3 - z/4 - - >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) - array([ 1. , -0.77086955, 0.08618131, 0. ]) # random - - Given a square array object: - - >>> P = np.array([[0, 1./3], [-1./2, 0]]) - >>> np.poly(P) - array([1. , 0. , 0.16666667]) - - Note how in all cases the leading coefficient is always 1. - - """ - seq_of_zeros = atleast_1d(seq_of_zeros) - sh = seq_of_zeros.shape - - if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: - seq_of_zeros = eigvals(seq_of_zeros) - elif len(sh) == 1: - dt = seq_of_zeros.dtype - # Let object arrays slip through, e.g. for arbitrary precision - if dt != object: - seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) - else: - raise ValueError("input must be 1d or non-empty square 2d array.") - - if len(seq_of_zeros) == 0: - return 1.0 - dt = seq_of_zeros.dtype - a = ones((1,), dtype=dt) - for k in range(len(seq_of_zeros)): - a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), - mode='full') - - if issubclass(a.dtype.type, NX.complexfloating): - # if complex roots are all complex conjugates, the roots are real. - roots = NX.asarray(seq_of_zeros, complex) - if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): - a = a.real.copy() - - return a - - -def _roots_dispatcher(p): - return p - - -@array_function_dispatch(_roots_dispatcher) -def roots(p): - """ - Return the roots of a polynomial with coefficients given in p. - - The values in the rank-1 array `p` are coefficients of a polynomial. - If the length of `p` is n+1 then the polynomial is described by:: - - p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] - - Parameters - ---------- - p : array_like - Rank-1 array of polynomial coefficients. - - Returns - ------- - out : ndarray - An array containing the roots of the polynomial. - - Raises - ------ - ValueError - When `p` cannot be converted to a rank-1 array. - - See also - -------- - poly : Find the coefficients of a polynomial with a given sequence - of roots. - polyval : Compute polynomial values. - polyfit : Least squares polynomial fit. - poly1d : A one-dimensional polynomial class. - - Notes - ----- - The algorithm relies on computing the eigenvalues of the - companion matrix [1]_. - - References - ---------- - .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: - Cambridge University Press, 1999, pp. 146-7. - - Examples - -------- - >>> coeff = [3.2, 2, 1] - >>> np.roots(coeff) - array([-0.3125+0.46351241j, -0.3125-0.46351241j]) - - """ - # If input is scalar, this makes it an array - p = atleast_1d(p) - if p.ndim != 1: - raise ValueError("Input must be a rank-1 array.") - - # find non-zero array entries - non_zero = NX.nonzero(NX.ravel(p))[0] - - # Return an empty array if polynomial is all zeros - if len(non_zero) == 0: - return NX.array([]) - - # find the number of trailing zeros -- this is the number of roots at 0. - trailing_zeros = len(p) - non_zero[-1] - 1 - - # strip leading and trailing zeros - p = p[int(non_zero[0]):int(non_zero[-1])+1] - - # casting: if incoming array isn't floating point, make it floating point. - if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): - p = p.astype(float) - - N = len(p) - if N > 1: - # build companion matrix and find its eigenvalues (the roots) - A = diag(NX.ones((N-2,), p.dtype), -1) - A[0,:] = -p[1:] / p[0] - roots = eigvals(A) - else: - roots = NX.array([]) - - # tack any zeros onto the back of the array - roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) - return roots - - -def _polyint_dispatcher(p, m=None, k=None): - return (p,) - - -@array_function_dispatch(_polyint_dispatcher) -def polyint(p, m=1, k=None): - """ - Return an antiderivative (indefinite integral) of a polynomial. - - The returned order `m` antiderivative `P` of polynomial `p` satisfies - :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` - integration constants `k`. The constants determine the low-order - polynomial part - - .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} - - of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. - - Parameters - ---------- - p : array_like or poly1d - Polynomial to integrate. - A sequence is interpreted as polynomial coefficients, see `poly1d`. - m : int, optional - Order of the antiderivative. (Default: 1) - k : list of `m` scalars or scalar, optional - Integration constants. They are given in the order of integration: - those corresponding to highest-order terms come first. - - If ``None`` (default), all constants are assumed to be zero. - If `m = 1`, a single scalar can be given instead of a list. - - See Also - -------- - polyder : derivative of a polynomial - poly1d.integ : equivalent method - - Examples - -------- - The defining property of the antiderivative: - - >>> p = np.poly1d([1,1,1]) - >>> P = np.polyint(p) - >>> P - poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary - >>> np.polyder(P) == p - True - - The integration constants default to zero, but can be specified: - - >>> P = np.polyint(p, 3) - >>> P(0) - 0.0 - >>> np.polyder(P)(0) - 0.0 - >>> np.polyder(P, 2)(0) - 0.0 - >>> P = np.polyint(p, 3, k=[6,5,3]) - >>> P - poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary - - Note that 3 = 6 / 2!, and that the constants are given in the order of - integrations. Constant of the highest-order polynomial term comes first: - - >>> np.polyder(P, 2)(0) - 6.0 - >>> np.polyder(P, 1)(0) - 5.0 - >>> P(0) - 3.0 - - """ - m = int(m) - if m < 0: - raise ValueError("Order of integral must be positive (see polyder)") - if k is None: - k = NX.zeros(m, float) - k = atleast_1d(k) - if len(k) == 1 and m > 1: - k = k[0]*NX.ones(m, float) - if len(k) < m: - raise ValueError( - "k must be a scalar or a rank-1 array of length 1 or >m.") - - truepoly = isinstance(p, poly1d) - p = NX.asarray(p) - if m == 0: - if truepoly: - return poly1d(p) - return p - else: - # Note: this must work also with object and integer arrays - y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) - val = polyint(y, m - 1, k=k[1:]) - if truepoly: - return poly1d(val) - return val - - -def _polyder_dispatcher(p, m=None): - return (p,) - - -@array_function_dispatch(_polyder_dispatcher) -def polyder(p, m=1): - """ - Return the derivative of the specified order of a polynomial. - - Parameters - ---------- - p : poly1d or sequence - Polynomial to differentiate. - A sequence is interpreted as polynomial coefficients, see `poly1d`. - m : int, optional - Order of differentiation (default: 1) - - Returns - ------- - der : poly1d - A new polynomial representing the derivative. - - See Also - -------- - polyint : Anti-derivative of a polynomial. - poly1d : Class for one-dimensional polynomials. - - Examples - -------- - The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: - - >>> p = np.poly1d([1,1,1,1]) - >>> p2 = np.polyder(p) - >>> p2 - poly1d([3, 2, 1]) - - which evaluates to: - - >>> p2(2.) - 17.0 - - We can verify this, approximating the derivative with - ``(f(x + h) - f(x))/h``: - - >>> (p(2. + 0.001) - p(2.)) / 0.001 - 17.007000999997857 - - The fourth-order derivative of a 3rd-order polynomial is zero: - - >>> np.polyder(p, 2) - poly1d([6, 2]) - >>> np.polyder(p, 3) - poly1d([6]) - >>> np.polyder(p, 4) - poly1d([0.]) - - """ - m = int(m) - if m < 0: - raise ValueError("Order of derivative must be positive (see polyint)") - - truepoly = isinstance(p, poly1d) - p = NX.asarray(p) - n = len(p) - 1 - y = p[:-1] * NX.arange(n, 0, -1) - if m == 0: - val = p - else: - val = polyder(y, m - 1) - if truepoly: - val = poly1d(val) - return val - - -def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): - return (x, y, w) - - -@array_function_dispatch(_polyfit_dispatcher) -def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - """ - Least squares polynomial fit. - - Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` - to points `(x, y)`. Returns a vector of coefficients `p` that minimises - the squared error in the order `deg`, `deg-1`, ... `0`. - - The `Polynomial.fit ` class - method is recommended for new code as it is more stable numerically. See - the documentation of the method for more information. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (M,), optional - Weights to apply to the y-coordinates of the sample points. For - gaussian uncertainties, use 1/sigma (not 1/sigma**2). - cov : bool or str, optional - If given and not `False`, return not just the estimate but also its - covariance matrix. By default, the covariance are scaled by - chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable - except in a relative sense and everything is scaled such that the - reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``, - as is relevant for the case that the weights are 1/sigma**2, with - sigma known to be a reliable estimate of the uncertainty. - - Returns - ------- - p : ndarray, shape (deg + 1,) or (deg + 1, K) - Polynomial coefficients, highest power first. If `y` was 2-D, the - coefficients for `k`-th data set are in ``p[:,k]``. - - residuals, rank, singular_values, rcond - Present only if `full` = True. Residuals is sum of squared residuals - of the least-squares fit, the effective rank of the scaled Vandermonde - coefficient matrix, its singular values, and the specified value of - `rcond`. For more details, see `linalg.lstsq`. - - V : ndarray, shape (M,M) or (M,M,K) - Present only if `full` = False and `cov`=True. The covariance - matrix of the polynomial coefficient estimates. The diagonal of - this matrix are the variance estimates for each coefficient. If y - is a 2-D array, then the covariance matrix for the `k`-th data set - are in ``V[:,:,k]`` - - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. - - The warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - polyval : Compute polynomial values. - linalg.lstsq : Computes a least-squares fit. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution minimizes the squared error - - .. math :: - E = \\sum_{j=0}^k |p(x_j) - y_j|^2 - - in the equations:: - - x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] - x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] - ... - x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] - - The coefficient matrix of the coefficients `p` is a Vandermonde matrix. - - `polyfit` issues a `RankWarning` when the least-squares fit is badly - conditioned. This implies that the best fit is not well-defined due - to numerical error. The results may be improved by lowering the polynomial - degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter - can also be set to a value smaller than its default, but the resulting - fit may be spurious: including contributions from the small singular - values can add numerical noise to the result. - - Note that fitting polynomial coefficients is inherently badly conditioned - when the degree of the polynomial is large or the interval of sample points - is badly centered. The quality of the fit should always be checked in these - cases. When polynomial fits are not satisfactory, splines may be a good - alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - https://en.wikipedia.org/wiki/Curve_fitting - .. [2] Wikipedia, "Polynomial interpolation", - https://en.wikipedia.org/wiki/Polynomial_interpolation - - Examples - -------- - >>> import warnings - >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) - >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) - >>> z = np.polyfit(x, y, 3) - >>> z - array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary - - It is convenient to use `poly1d` objects for dealing with polynomials: - - >>> p = np.poly1d(z) - >>> p(0.5) - 0.6143849206349179 # may vary - >>> p(3.5) - -0.34732142857143039 # may vary - >>> p(10) - 22.579365079365115 # may vary - - High-order polynomials may oscillate wildly: - - >>> with warnings.catch_warnings(): - ... warnings.simplefilter('ignore', np.RankWarning) - ... p30 = np.poly1d(np.polyfit(x, y, 30)) - ... - >>> p30(4) - -0.80000000000000204 # may vary - >>> p30(5) - -0.99999999999999445 # may vary - >>> p30(4.5) - -0.10547061179440398 # may vary - - Illustration: - - >>> import matplotlib.pyplot as plt - >>> xp = np.linspace(-2, 6, 100) - >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') - >>> plt.ylim(-2,2) - (-2, 2) - >>> plt.show() - - """ - order = int(deg) + 1 - x = NX.asarray(x) + 0.0 - y = NX.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if x.shape[0] != y.shape[0]: - raise TypeError("expected x and y to have same length") - - # set rcond - if rcond is None: - rcond = len(x)*finfo(x.dtype).eps - - # set up least squares equation for powers of x - lhs = vander(x, order) - rhs = y - - # apply weighting - if w is not None: - w = NX.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected a 1-d array for weights") - if w.shape[0] != y.shape[0]: - raise TypeError("expected w and y to have the same length") - lhs *= w[:, NX.newaxis] - if rhs.ndim == 2: - rhs *= w[:, NX.newaxis] - else: - rhs *= w - - # scale lhs to improve condition number and solve - scale = NX.sqrt((lhs*lhs).sum(axis=0)) - lhs /= scale - c, resids, rank, s = lstsq(lhs, rhs, rcond) - c = (c.T/scale).T # broadcast scale coefficients - - # warn on rank reduction, which indicates an ill conditioned matrix - if rank != order and not full: - msg = "Polyfit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=4) - - if full: - return c, resids, rank, s, rcond - elif cov: - Vbase = inv(dot(lhs.T, lhs)) - Vbase /= NX.outer(scale, scale) - if cov == "unscaled": - fac = 1 - else: - if len(x) <= order: - raise ValueError("the number of data points must exceed order " - "to scale the covariance matrix") - # note, this used to be: fac = resids / (len(x) - order - 2.0) - # it was deciced that the "- 2" (originally justified by "Bayesian - # uncertainty analysis") is not was the user expects - # (see gh-11196 and gh-11197) - fac = resids / (len(x) - order) - if y.ndim == 1: - return c, Vbase * fac - else: - return c, Vbase[:,:, NX.newaxis] * fac - else: - return c - - -def _polyval_dispatcher(p, x): - return (p, x) - - -@array_function_dispatch(_polyval_dispatcher) -def polyval(p, x): - """ - Evaluate a polynomial at specific values. - - If `p` is of length N, this function returns the value: - - ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` - - If `x` is a sequence, then `p(x)` is returned for each element of `x`. - If `x` is another polynomial then the composite polynomial `p(x(t))` - is returned. - - Parameters - ---------- - p : array_like or poly1d object - 1D array of polynomial coefficients (including coefficients equal - to zero) from highest degree to the constant term, or an - instance of poly1d. - x : array_like or poly1d object - A number, an array of numbers, or an instance of poly1d, at - which to evaluate `p`. - - Returns - ------- - values : ndarray or poly1d - If `x` is a poly1d instance, the result is the composition of the two - polynomials, i.e., `x` is "substituted" in `p` and the simplified - result is returned. In addition, the type of `x` - array_like or - poly1d - governs the type of the output: `x` array_like => `values` - array_like, `x` a poly1d object => `values` is also. - - See Also - -------- - poly1d: A polynomial class. - - Notes - ----- - Horner's scheme [1]_ is used to evaluate the polynomial. Even so, - for polynomials of high degree the values may be inaccurate due to - rounding errors. Use carefully. - - If `x` is a subtype of `ndarray` the return value will be of the same type. - - References - ---------- - .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. - trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand - Reinhold Co., 1985, pg. 720. - - Examples - -------- - >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 - 76 - >>> np.polyval([3,0,1], np.poly1d(5)) - poly1d([76.]) - >>> np.polyval(np.poly1d([3,0,1]), 5) - 76 - >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) - poly1d([76.]) - - """ - p = NX.asarray(p) - if isinstance(x, poly1d): - y = 0 - else: - x = NX.asanyarray(x) - y = NX.zeros_like(x) - for i in range(len(p)): - y = y * x + p[i] - return y - - -def _binary_op_dispatcher(a1, a2): - return (a1, a2) - - -@array_function_dispatch(_binary_op_dispatcher) -def polyadd(a1, a2): - """ - Find the sum of two polynomials. - - Returns the polynomial resulting from the sum of two input polynomials. - Each input must be either a poly1d object or a 1D sequence of polynomial - coefficients, from highest to lowest degree. - - Parameters - ---------- - a1, a2 : array_like or poly1d object - Input polynomials. - - Returns - ------- - out : ndarray or poly1d object - The sum of the inputs. If either input is a poly1d object, then the - output is also a poly1d object. Otherwise, it is a 1D array of - polynomial coefficients from highest to lowest degree. - - See Also - -------- - poly1d : A one-dimensional polynomial class. - poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval - - Examples - -------- - >>> np.polyadd([1, 2], [9, 5, 4]) - array([9, 6, 6]) - - Using poly1d objects: - - >>> p1 = np.poly1d([1, 2]) - >>> p2 = np.poly1d([9, 5, 4]) - >>> print(p1) - 1 x + 2 - >>> print(p2) - 2 - 9 x + 5 x + 4 - >>> print(np.polyadd(p1, p2)) - 2 - 9 x + 6 x + 6 - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1 = atleast_1d(a1) - a2 = atleast_1d(a2) - diff = len(a2) - len(a1) - if diff == 0: - val = a1 + a2 - elif diff > 0: - zr = NX.zeros(diff, a1.dtype) - val = NX.concatenate((zr, a1)) + a2 - else: - zr = NX.zeros(abs(diff), a2.dtype) - val = a1 + NX.concatenate((zr, a2)) - if truepoly: - val = poly1d(val) - return val - - -@array_function_dispatch(_binary_op_dispatcher) -def polysub(a1, a2): - """ - Difference (subtraction) of two polynomials. - - Given two polynomials `a1` and `a2`, returns ``a1 - a2``. - `a1` and `a2` can be either array_like sequences of the polynomials' - coefficients (including coefficients equal to zero), or `poly1d` objects. - - Parameters - ---------- - a1, a2 : array_like or poly1d - Minuend and subtrahend polynomials, respectively. - - Returns - ------- - out : ndarray or poly1d - Array or `poly1d` object of the difference polynomial's coefficients. - - See Also - -------- - polyval, polydiv, polymul, polyadd - - Examples - -------- - .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) - - >>> np.polysub([2, 10, -2], [3, 10, -4]) - array([-1, 0, 2]) - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1 = atleast_1d(a1) - a2 = atleast_1d(a2) - diff = len(a2) - len(a1) - if diff == 0: - val = a1 - a2 - elif diff > 0: - zr = NX.zeros(diff, a1.dtype) - val = NX.concatenate((zr, a1)) - a2 - else: - zr = NX.zeros(abs(diff), a2.dtype) - val = a1 - NX.concatenate((zr, a2)) - if truepoly: - val = poly1d(val) - return val - - -@array_function_dispatch(_binary_op_dispatcher) -def polymul(a1, a2): - """ - Find the product of two polynomials. - - Finds the polynomial resulting from the multiplication of the two input - polynomials. Each input must be either a poly1d object or a 1D sequence - of polynomial coefficients, from highest to lowest degree. - - Parameters - ---------- - a1, a2 : array_like or poly1d object - Input polynomials. - - Returns - ------- - out : ndarray or poly1d object - The polynomial resulting from the multiplication of the inputs. If - either inputs is a poly1d object, then the output is also a poly1d - object. Otherwise, it is a 1D array of polynomial coefficients from - highest to lowest degree. - - See Also - -------- - poly1d : A one-dimensional polynomial class. - poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval - convolve : Array convolution. Same output as polymul, but has parameter - for overlap mode. - - Examples - -------- - >>> np.polymul([1, 2, 3], [9, 5, 1]) - array([ 9, 23, 38, 17, 3]) - - Using poly1d objects: - - >>> p1 = np.poly1d([1, 2, 3]) - >>> p2 = np.poly1d([9, 5, 1]) - >>> print(p1) - 2 - 1 x + 2 x + 3 - >>> print(p2) - 2 - 9 x + 5 x + 1 - >>> print(np.polymul(p1, p2)) - 4 3 2 - 9 x + 23 x + 38 x + 17 x + 3 - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1, a2 = poly1d(a1), poly1d(a2) - val = NX.convolve(a1, a2) - if truepoly: - val = poly1d(val) - return val - - -def _polydiv_dispatcher(u, v): - return (u, v) - - -@array_function_dispatch(_polydiv_dispatcher) -def polydiv(u, v): - """ - Returns the quotient and remainder of polynomial division. - - The input arrays are the coefficients (including any coefficients - equal to zero) of the "numerator" (dividend) and "denominator" - (divisor) polynomials, respectively. - - Parameters - ---------- - u : array_like or poly1d - Dividend polynomial's coefficients. - - v : array_like or poly1d - Divisor polynomial's coefficients. - - Returns - ------- - q : ndarray - Coefficients, including those equal to zero, of the quotient. - r : ndarray - Coefficients, including those equal to zero, of the remainder. - - See Also - -------- - poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub - polyval - - Notes - ----- - Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need - not equal `v.ndim`. In other words, all four possible combinations - - ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, - ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. - - Examples - -------- - .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 - - >>> x = np.array([3.0, 5.0, 2.0]) - >>> y = np.array([2.0, 1.0]) - >>> np.polydiv(x, y) - (array([1.5 , 1.75]), array([0.25])) - - """ - truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) - u = atleast_1d(u) + 0.0 - v = atleast_1d(v) + 0.0 - # w has the common type - w = u[0] + v[0] - m = len(u) - 1 - n = len(v) - 1 - scale = 1. / v[0] - q = NX.zeros((max(m - n + 1, 1),), w.dtype) - r = u.astype(w.dtype) - for k in range(0, m-n+1): - d = scale * r[k] - q[k] = d - r[k:k+n+1] -= d*v - while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): - r = r[1:] - if truepoly: - return poly1d(q), poly1d(r) - return q, r - -_poly_mat = re.compile(r"[*][*]([0-9]*)") -def _raise_power(astr, wrap=70): - n = 0 - line1 = '' - line2 = '' - output = ' ' - while True: - mat = _poly_mat.search(astr, n) - if mat is None: - break - span = mat.span() - power = mat.groups()[0] - partstr = astr[n:span[0]] - n = span[1] - toadd2 = partstr + ' '*(len(power)-1) - toadd1 = ' '*(len(partstr)-1) + power - if ((len(line2) + len(toadd2) > wrap) or - (len(line1) + len(toadd1) > wrap)): - output += line1 + "\n" + line2 + "\n " - line1 = toadd1 - line2 = toadd2 - else: - line2 += partstr + ' '*(len(power)-1) - line1 += ' '*(len(partstr)-1) + power - output += line1 + "\n" + line2 - return output + astr[n:] - - -@set_module('numpy') -class poly1d(object): - """ - A one-dimensional polynomial class. - - A convenience class, used to encapsulate "natural" operations on - polynomials so that said operations may take on their customary - form in code (see Examples). - - Parameters - ---------- - c_or_r : array_like - The polynomial's coefficients, in decreasing powers, or if - the value of the second parameter is True, the polynomial's - roots (values where the polynomial evaluates to 0). For example, - ``poly1d([1, 2, 3])`` returns an object that represents - :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns - one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. - r : bool, optional - If True, `c_or_r` specifies the polynomial's roots; the default - is False. - variable : str, optional - Changes the variable used when printing `p` from `x` to `variable` - (see Examples). - - Examples - -------- - Construct the polynomial :math:`x^2 + 2x + 3`: - - >>> p = np.poly1d([1, 2, 3]) - >>> print(np.poly1d(p)) - 2 - 1 x + 2 x + 3 - - Evaluate the polynomial at :math:`x = 0.5`: - - >>> p(0.5) - 4.25 - - Find the roots: - - >>> p.r - array([-1.+1.41421356j, -1.-1.41421356j]) - >>> p(p.r) - array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary - - These numbers in the previous line represent (0, 0) to machine precision - - Show the coefficients: - - >>> p.c - array([1, 2, 3]) - - Display the order (the leading zero-coefficients are removed): - - >>> p.order - 2 - - Show the coefficient of the k-th power in the polynomial - (which is equivalent to ``p.c[-(i+1)]``): - - >>> p[1] - 2 - - Polynomials can be added, subtracted, multiplied, and divided - (returns quotient and remainder): - - >>> p * p - poly1d([ 1, 4, 10, 12, 9]) - - >>> (p**3 + 4) / p - (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) - - ``asarray(p)`` gives the coefficient array, so polynomials can be - used in all functions that accept arrays: - - >>> p**2 # square of polynomial - poly1d([ 1, 4, 10, 12, 9]) - - >>> np.square(p) # square of individual coefficients - array([1, 4, 9]) - - The variable used in the string representation of `p` can be modified, - using the `variable` parameter: - - >>> p = np.poly1d([1,2,3], variable='z') - >>> print(p) - 2 - 1 z + 2 z + 3 - - Construct a polynomial from its roots: - - >>> np.poly1d([1, 2], True) - poly1d([ 1., -3., 2.]) - - This is the same polynomial as obtained by: - - >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) - poly1d([ 1, -3, 2]) - - """ - __hash__ = None - - @property - def coeffs(self): - """ The polynomial coefficients """ - return self._coeffs - - @coeffs.setter - def coeffs(self, value): - # allowing this makes p.coeffs *= 2 legal - if value is not self._coeffs: - raise AttributeError("Cannot set attribute") - - @property - def variable(self): - """ The name of the polynomial variable """ - return self._variable - - # calculated attributes - @property - def order(self): - """ The order or degree of the polynomial """ - return len(self._coeffs) - 1 - - @property - def roots(self): - """ The roots of the polynomial, where self(x) == 0 """ - return roots(self._coeffs) - - # our internal _coeffs property need to be backed by __dict__['coeffs'] for - # scipy to work correctly. - @property - def _coeffs(self): - return self.__dict__['coeffs'] - @_coeffs.setter - def _coeffs(self, coeffs): - self.__dict__['coeffs'] = coeffs - - # alias attributes - r = roots - c = coef = coefficients = coeffs - o = order - - def __init__(self, c_or_r, r=False, variable=None): - if isinstance(c_or_r, poly1d): - self._variable = c_or_r._variable - self._coeffs = c_or_r._coeffs - - if set(c_or_r.__dict__) - set(self.__dict__): - msg = ("In the future extra properties will not be copied " - "across when constructing one poly1d from another") - warnings.warn(msg, FutureWarning, stacklevel=2) - self.__dict__.update(c_or_r.__dict__) - - if variable is not None: - self._variable = variable - return - if r: - c_or_r = poly(c_or_r) - c_or_r = atleast_1d(c_or_r) - if c_or_r.ndim > 1: - raise ValueError("Polynomial must be 1d only.") - c_or_r = trim_zeros(c_or_r, trim='f') - if len(c_or_r) == 0: - c_or_r = NX.array([0.]) - self._coeffs = c_or_r - if variable is None: - variable = 'x' - self._variable = variable - - def __array__(self, t=None): - if t: - return NX.asarray(self.coeffs, t) - else: - return NX.asarray(self.coeffs) - - def __repr__(self): - vals = repr(self.coeffs) - vals = vals[6:-1] - return "poly1d(%s)" % vals - - def __len__(self): - return self.order - - def __str__(self): - thestr = "0" - var = self.variable - - # Remove leading zeros - coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] - N = len(coeffs)-1 - - def fmt_float(q): - s = '%.4g' % q - if s.endswith('.0000'): - s = s[:-5] - return s - - for k in range(len(coeffs)): - if not iscomplex(coeffs[k]): - coefstr = fmt_float(real(coeffs[k])) - elif real(coeffs[k]) == 0: - coefstr = '%sj' % fmt_float(imag(coeffs[k])) - else: - coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), - fmt_float(imag(coeffs[k]))) - - power = (N-k) - if power == 0: - if coefstr != '0': - newstr = '%s' % (coefstr,) - else: - if k == 0: - newstr = '0' - else: - newstr = '' - elif power == 1: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = var - else: - newstr = '%s %s' % (coefstr, var) - else: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = '%s**%d' % (var, power,) - else: - newstr = '%s %s**%d' % (coefstr, var, power) - - if k > 0: - if newstr != '': - if newstr.startswith('-'): - thestr = "%s - %s" % (thestr, newstr[1:]) - else: - thestr = "%s + %s" % (thestr, newstr) - else: - thestr = newstr - return _raise_power(thestr) - - def __call__(self, val): - return polyval(self.coeffs, val) - - def __neg__(self): - return poly1d(-self.coeffs) - - def __pos__(self): - return self - - def __mul__(self, other): - if isscalar(other): - return poly1d(self.coeffs * other) - else: - other = poly1d(other) - return poly1d(polymul(self.coeffs, other.coeffs)) - - def __rmul__(self, other): - if isscalar(other): - return poly1d(other * self.coeffs) - else: - other = poly1d(other) - return poly1d(polymul(self.coeffs, other.coeffs)) - - def __add__(self, other): - other = poly1d(other) - return poly1d(polyadd(self.coeffs, other.coeffs)) - - def __radd__(self, other): - other = poly1d(other) - return poly1d(polyadd(self.coeffs, other.coeffs)) - - def __pow__(self, val): - if not isscalar(val) or int(val) != val or val < 0: - raise ValueError("Power to non-negative integers only.") - res = [1] - for _ in range(val): - res = polymul(self.coeffs, res) - return poly1d(res) - - def __sub__(self, other): - other = poly1d(other) - return poly1d(polysub(self.coeffs, other.coeffs)) - - def __rsub__(self, other): - other = poly1d(other) - return poly1d(polysub(other.coeffs, self.coeffs)) - - def __div__(self, other): - if isscalar(other): - return poly1d(self.coeffs/other) - else: - other = poly1d(other) - return polydiv(self, other) - - __truediv__ = __div__ - - def __rdiv__(self, other): - if isscalar(other): - return poly1d(other/self.coeffs) - else: - other = poly1d(other) - return polydiv(other, self) - - __rtruediv__ = __rdiv__ - - def __eq__(self, other): - if not isinstance(other, poly1d): - return NotImplemented - if self.coeffs.shape != other.coeffs.shape: - return False - return (self.coeffs == other.coeffs).all() - - def __ne__(self, other): - if not isinstance(other, poly1d): - return NotImplemented - return not self.__eq__(other) - - - def __getitem__(self, val): - ind = self.order - val - if val > self.order: - return 0 - if val < 0: - return 0 - return self.coeffs[ind] - - def __setitem__(self, key, val): - ind = self.order - key - if key < 0: - raise ValueError("Does not support negative powers.") - if key > self.order: - zr = NX.zeros(key-self.order, self.coeffs.dtype) - self._coeffs = NX.concatenate((zr, self.coeffs)) - ind = 0 - self._coeffs[ind] = val - return - - def __iter__(self): - return iter(self.coeffs) - - def integ(self, m=1, k=0): - """ - Return an antiderivative (indefinite integral) of this polynomial. - - Refer to `polyint` for full documentation. - - See Also - -------- - polyint : equivalent function - - """ - return poly1d(polyint(self.coeffs, m=m, k=k)) - - def deriv(self, m=1): - """ - Return a derivative of this polynomial. - - Refer to `polyder` for full documentation. - - See Also - -------- - polyder : equivalent function - - """ - return poly1d(polyder(self.coeffs, m=m)) - -# Stuff to do on module import - -warnings.simplefilter('always', RankWarning) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/recfunctions.py b/venv/lib/python3.7/site-packages/numpy/lib/recfunctions.py deleted file mode 100644 index 927161d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/recfunctions.py +++ /dev/null @@ -1,1610 +0,0 @@ -""" -Collection of utilities to manipulate structured arrays. - -Most of these functions were initially implemented by John Hunter for -matplotlib. They have been rewritten and extended for convenience. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import itertools -import numpy as np -import numpy.ma as ma -from numpy import ndarray, recarray -from numpy.ma import MaskedArray -from numpy.ma.mrecords import MaskedRecords -from numpy.core.overrides import array_function_dispatch -from numpy.lib._iotools import _is_string_like -from numpy.compat import basestring -from numpy.testing import suppress_warnings - -if sys.version_info[0] < 3: - from future_builtins import zip - -_check_fill_value = np.ma.core._check_fill_value - - -__all__ = [ - 'append_fields', 'apply_along_fields', 'assign_fields_by_name', - 'drop_fields', 'find_duplicates', 'flatten_descr', - 'get_fieldstructure', 'get_names', 'get_names_flat', - 'join_by', 'merge_arrays', 'rec_append_fields', - 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', - 'rename_fields', 'repack_fields', 'require_fields', - 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured', - ] - - -def _recursive_fill_fields_dispatcher(input, output): - return (input, output) - - -@array_function_dispatch(_recursive_fill_fields_dispatcher) -def recursive_fill_fields(input, output): - """ - Fills fields from output with fields from input, - with support for nested structures. - - Parameters - ---------- - input : ndarray - Input array. - output : ndarray - Output array. - - Notes - ----- - * `output` should be at least the same size as `input` - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) - >>> b = np.zeros((3,), dtype=a.dtype) - >>> rfn.recursive_fill_fields(a, b) - array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) - >>> dt.descr - [(('a', 'A'), '>> _get_fieldspec(dt) - [(('a', 'A'), dtype('int64')), ('b', dtype(('>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names(np.empty((1,), dtype=int)) - Traceback (most recent call last): - ... - AttributeError: 'numpy.ndarray' object has no attribute 'names' - - >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) - Traceback (most recent call last): - ... - AttributeError: 'numpy.ndarray' object has no attribute 'names' - >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) - >>> rfn.get_names(adtype) - ('a', ('b', ('ba', 'bb'))) - """ - listnames = [] - names = adtype.names - for name in names: - current = adtype[name] - if current.names is not None: - listnames.append((name, tuple(get_names(current)))) - else: - listnames.append(name) - return tuple(listnames) - - -def get_names_flat(adtype): - """ - Returns the field names of the input datatype as a tuple. Nested structure - are flattened beforehand. - - Parameters - ---------- - adtype : dtype - Input datatype - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None - Traceback (most recent call last): - ... - AttributeError: 'numpy.ndarray' object has no attribute 'names' - >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) - Traceback (most recent call last): - ... - AttributeError: 'numpy.ndarray' object has no attribute 'names' - >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) - >>> rfn.get_names_flat(adtype) - ('a', 'b', 'ba', 'bb') - """ - listnames = [] - names = adtype.names - for name in names: - listnames.append(name) - current = adtype[name] - if current.names is not None: - listnames.extend(get_names_flat(current)) - return tuple(listnames) - - -def flatten_descr(ndtype): - """ - Flatten a structured data-type description. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) - (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) - - """ - names = ndtype.names - if names is None: - return (('', ndtype),) - else: - descr = [] - for field in names: - (typ, _) = ndtype.fields[field] - if typ.names is not None: - descr.extend(flatten_descr(typ)) - else: - descr.append((field, typ)) - return tuple(descr) - - -def _zip_dtype(seqarrays, flatten=False): - newdtype = [] - if flatten: - for a in seqarrays: - newdtype.extend(flatten_descr(a.dtype)) - else: - for a in seqarrays: - current = a.dtype - if current.names is not None and len(current.names) == 1: - # special case - dtypes of 1 field are flattened - newdtype.extend(_get_fieldspec(current)) - else: - newdtype.append(('', current)) - return np.dtype(newdtype) - - -def _zip_descr(seqarrays, flatten=False): - """ - Combine the dtype description of a series of arrays. - - Parameters - ---------- - seqarrays : sequence of arrays - Sequence of arrays - flatten : {boolean}, optional - Whether to collapse nested descriptions. - """ - return _zip_dtype(seqarrays, flatten=flatten).descr - - -def get_fieldstructure(adtype, lastname=None, parents=None,): - """ - Returns a dictionary with fields indexing lists of their parent fields. - - This function is used to simplify access to fields nested in other fields. - - Parameters - ---------- - adtype : np.dtype - Input datatype - lastname : optional - Last processed field name (used internally during recursion). - parents : dictionary - Dictionary of parent fields (used interbally during recursion). - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = np.dtype([('A', int), - ... ('B', [('BA', int), - ... ('BB', [('BBA', int), ('BBB', int)])])]) - >>> rfn.get_fieldstructure(ndtype) - ... # XXX: possible regression, order of BBA and BBB is swapped - {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} - - """ - if parents is None: - parents = {} - names = adtype.names - for name in names: - current = adtype[name] - if current.names is not None: - if lastname: - parents[name] = [lastname, ] - else: - parents[name] = [] - parents.update(get_fieldstructure(current, name, parents)) - else: - lastparent = [_ for _ in (parents.get(lastname, []) or [])] - if lastparent: - lastparent.append(lastname) - elif lastname: - lastparent = [lastname, ] - parents[name] = lastparent or [] - return parents - - -def _izip_fields_flat(iterable): - """ - Returns an iterator of concatenated fields from a sequence of arrays, - collapsing any nested structure. - - """ - for element in iterable: - if isinstance(element, np.void): - for f in _izip_fields_flat(tuple(element)): - yield f - else: - yield element - - -def _izip_fields(iterable): - """ - Returns an iterator of concatenated fields from a sequence of arrays. - - """ - for element in iterable: - if (hasattr(element, '__iter__') and - not isinstance(element, basestring)): - for f in _izip_fields(element): - yield f - elif isinstance(element, np.void) and len(tuple(element)) == 1: - for f in _izip_fields(element): - yield f - else: - yield element - - -def _izip_records(seqarrays, fill_value=None, flatten=True): - """ - Returns an iterator of concatenated items from a sequence of arrays. - - Parameters - ---------- - seqarrays : sequence of arrays - Sequence of arrays. - fill_value : {None, integer} - Value used to pad shorter iterables. - flatten : {True, False}, - Whether to - """ - - # Should we flatten the items, or just use a nested approach - if flatten: - zipfunc = _izip_fields_flat - else: - zipfunc = _izip_fields - - if sys.version_info[0] >= 3: - zip_longest = itertools.zip_longest - else: - zip_longest = itertools.izip_longest - - for tup in zip_longest(*seqarrays, fillvalue=fill_value): - yield tuple(zipfunc(tup)) - - -def _fix_output(output, usemask=True, asrecarray=False): - """ - Private function: return a recarray, a ndarray, a MaskedArray - or a MaskedRecords depending on the input parameters - """ - if not isinstance(output, MaskedArray): - usemask = False - if usemask: - if asrecarray: - output = output.view(MaskedRecords) - else: - output = ma.filled(output) - if asrecarray: - output = output.view(recarray) - return output - - -def _fix_defaults(output, defaults=None): - """ - Update the fill_value and masked data of `output` - from the default given in a dictionary defaults. - """ - names = output.dtype.names - (data, mask, fill_value) = (output.data, output.mask, output.fill_value) - for (k, v) in (defaults or {}).items(): - if k in names: - fill_value[k] = v - data[k][mask[k]] = v - return output - - -def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, - usemask=None, asrecarray=None): - return seqarrays - - -@array_function_dispatch(_merge_arrays_dispatcher) -def merge_arrays(seqarrays, fill_value=-1, flatten=False, - usemask=False, asrecarray=False): - """ - Merge arrays field by field. - - Parameters - ---------- - seqarrays : sequence of ndarrays - Sequence of arrays - fill_value : {float}, optional - Filling value used to pad missing data on the shorter arrays. - flatten : {False, True}, optional - Whether to collapse nested fields. - usemask : {False, True}, optional - Whether to return a masked array or not. - asrecarray : {False, True}, optional - Whether to return a recarray (MaskedRecords) or not. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) - array([( 1, 10.), ( 2, 20.), (-1, 30.)], - dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), - ... np.array([10., 20., 30.])), usemask=False) - array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), - ... np.array([10., 20., 30.])), - ... usemask=False, asrecarray=True) - rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], - dtype=[('a', '>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) - >>> rfn.drop_fields(a, 'a') - array([((2., 3),), ((5., 6),)], - dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') - array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) - array([(1,), (4,)], dtype=[('a', '>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], - ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) - >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) - array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], - dtype=[('A', ' 1: - data = merge_arrays(data, flatten=True, usemask=usemask, - fill_value=fill_value) - else: - data = data.pop() - # - output = ma.masked_all( - max(len(base), len(data)), - dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype)) - output = recursive_fill_fields(base, output) - output = recursive_fill_fields(data, output) - # - return _fix_output(output, usemask=usemask, asrecarray=asrecarray) - - -def _rec_append_fields_dispatcher(base, names, data, dtypes=None): - yield base - for d in data: - yield d - - -@array_function_dispatch(_rec_append_fields_dispatcher) -def rec_append_fields(base, names, data, dtypes=None): - """ - Add new fields to an existing array. - - The names of the fields are given with the `names` arguments, - the corresponding values with the `data` arguments. - If a single field is appended, `names`, `data` and `dtypes` do not have - to be lists but just values. - - Parameters - ---------- - base : array - Input array to extend. - names : string, sequence - String or sequence of strings corresponding to the names - of the new fields. - data : array or sequence of arrays - Array or sequence of arrays storing the fields to add to the base. - dtypes : sequence of datatypes, optional - Datatype or sequence of datatypes. - If None, the datatypes are estimated from the `data`. - - See Also - -------- - append_fields - - Returns - ------- - appended_array : np.recarray - """ - return append_fields(base, names, data=data, dtypes=dtypes, - asrecarray=True, usemask=False) - - -def _repack_fields_dispatcher(a, align=None, recurse=None): - return (a,) - - -@array_function_dispatch(_repack_fields_dispatcher) -def repack_fields(a, align=False, recurse=False): - """ - Re-pack the fields of a structured array or dtype in memory. - - The memory layout of structured datatypes allows fields at arbitrary - byte offsets. This means the fields can be separated by padding bytes, - their offsets can be non-monotonically increasing, and they can overlap. - - This method removes any overlaps and reorders the fields in memory so they - have increasing byte offsets, and adds or removes padding bytes depending - on the `align` option, which behaves like the `align` option to `np.dtype`. - - If `align=False`, this method produces a "packed" memory layout in which - each field starts at the byte the previous field ended, and any padding - bytes are removed. - - If `align=True`, this methods produces an "aligned" memory layout in which - each field's offset is a multiple of its alignment, and the total itemsize - is a multiple of the largest alignment, by adding padding bytes as needed. - - Parameters - ---------- - a : ndarray or dtype - array or dtype for which to repack the fields. - align : boolean - If true, use an "aligned" memory layout, otherwise use a "packed" layout. - recurse : boolean - If True, also repack nested structures. - - Returns - ------- - repacked : ndarray or dtype - Copy of `a` with fields repacked, or `a` itself if no repacking was - needed. - - Examples - -------- - - >>> from numpy.lib import recfunctions as rfn - >>> def print_offsets(d): - ... print("offsets:", [d.fields[name][1] for name in d.names]) - ... print("itemsize:", d.itemsize) - ... - >>> dt = np.dtype('u1, >> dt - dtype({'names':['f0','f1','f2'], 'formats':['u1','>> print_offsets(dt) - offsets: [0, 8, 16] - itemsize: 24 - >>> packed_dt = rfn.repack_fields(dt) - >>> packed_dt - dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) - offsets: [0, 1, 9] - itemsize: 17 - - """ - if not isinstance(a, np.dtype): - dt = repack_fields(a.dtype, align=align, recurse=recurse) - return a.astype(dt, copy=False) - - if a.names is None: - return a - - fieldinfo = [] - for name in a.names: - tup = a.fields[name] - if recurse: - fmt = repack_fields(tup[0], align=align, recurse=True) - else: - fmt = tup[0] - - if len(tup) == 3: - name = (tup[2], name) - - fieldinfo.append((name, fmt)) - - dt = np.dtype(fieldinfo, align=align) - return np.dtype((a.type, dt)) - -def _get_fields_and_offsets(dt, offset=0): - """ - Returns a flat list of (dtype, count, offset) tuples of all the - scalar fields in the dtype "dt", including nested fields, in left - to right order. - """ - - # counts up elements in subarrays, including nested subarrays, and returns - # base dtype and count - def count_elem(dt): - count = 1 - while dt.shape != (): - for size in dt.shape: - count *= size - dt = dt.base - return dt, count - - fields = [] - for name in dt.names: - field = dt.fields[name] - f_dt, f_offset = field[0], field[1] - f_dt, n = count_elem(f_dt) - - if f_dt.names is None: - fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) - else: - subfields = _get_fields_and_offsets(f_dt, f_offset + offset) - size = f_dt.itemsize - - for i in range(n): - if i == 0: - # optimization: avoid list comprehension if no subarray - fields.extend(subfields) - else: - fields.extend([(d, c, o + i*size) for d, c, o in subfields]) - return fields - - -def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, - casting=None): - return (arr,) - -@array_function_dispatch(_structured_to_unstructured_dispatcher) -def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): - """ - Converts and n-D structured array into an (n+1)-D unstructured array. - - The new array will have a new last dimension equal in size to the - number of field-elements of the input array. If not supplied, the output - datatype is determined from the numpy type promotion rules applied to all - the field datatypes. - - Nested fields, as well as each element of any subarray fields, all count - as a single field-elements. - - Parameters - ---------- - arr : ndarray - Structured array or dtype to convert. Cannot contain object datatype. - dtype : dtype, optional - The dtype of the output unstructured array. - copy : bool, optional - See copy argument to `ndarray.astype`. If true, always return a copy. - If false, and `dtype` requirements are satisfied, a view is returned. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - See casting argument of `ndarray.astype`. Controls what kind of data - casting may occur. - - Returns - ------- - unstructured : ndarray - Unstructured array with one more dimension. - - Examples - -------- - - >>> from numpy.lib import recfunctions as rfn - >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) - >>> a - array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), - (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], - dtype=[('a', '>> rfn.structured_to_unstructured(a) - array([[0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0.]]) - - >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], - ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) - >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) - array([ 3. , 5.5, 9. , 11. ]) - - """ - if arr.dtype.names is None: - raise ValueError('arr must be a structured array') - - fields = _get_fields_and_offsets(arr.dtype) - n_fields = len(fields) - if n_fields == 0 and dtype is None: - raise ValueError("arr has no fields. Unable to guess dtype") - elif n_fields == 0: - # too many bugs elsewhere for this to work now - raise NotImplementedError("arr with no fields is not supported") - - dts, counts, offsets = zip(*fields) - names = ['f{}'.format(n) for n in range(n_fields)] - - if dtype is None: - out_dtype = np.result_type(*[dt.base for dt in dts]) - else: - out_dtype = dtype - - # Use a series of views and casts to convert to an unstructured array: - - # first view using flattened fields (doesn't work for object arrays) - # Note: dts may include a shape for subarrays - flattened_fields = np.dtype({'names': names, - 'formats': dts, - 'offsets': offsets, - 'itemsize': arr.dtype.itemsize}) - with suppress_warnings() as sup: # until 1.16 (gh-12447) - sup.filter(FutureWarning, "Numpy has detected") - arr = arr.view(flattened_fields) - - # next cast to a packed format with all fields converted to new dtype - packed_fields = np.dtype({'names': names, - 'formats': [(out_dtype, dt.shape) for dt in dts]}) - arr = arr.astype(packed_fields, copy=copy, casting=casting) - - # finally is it safe to view the packed fields as the unstructured type - return arr.view((out_dtype, (sum(counts),))) - - -def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, - align=None, copy=None, casting=None): - return (arr,) - -@array_function_dispatch(_unstructured_to_structured_dispatcher) -def unstructured_to_structured(arr, dtype=None, names=None, align=False, - copy=False, casting='unsafe'): - """ - Converts and n-D unstructured array into an (n-1)-D structured array. - - The last dimension of the input array is converted into a structure, with - number of field-elements equal to the size of the last dimension of the - input array. By default all output fields have the input array's dtype, but - an output structured dtype with an equal number of fields-elements can be - supplied instead. - - Nested fields, as well as each element of any subarray fields, all count - towards the number of field-elements. - - Parameters - ---------- - arr : ndarray - Unstructured array or dtype to convert. - dtype : dtype, optional - The structured dtype of the output array - names : list of strings, optional - If dtype is not supplied, this specifies the field names for the output - dtype, in order. The field dtypes will be the same as the input array. - align : boolean, optional - Whether to create an aligned memory layout. - copy : bool, optional - See copy argument to `ndarray.astype`. If true, always return a copy. - If false, and `dtype` requirements are satisfied, a view is returned. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - See casting argument of `ndarray.astype`. Controls what kind of data - casting may occur. - - Returns - ------- - structured : ndarray - Structured array with fewer dimensions. - - Examples - -------- - - >>> from numpy.lib import recfunctions as rfn - >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) - >>> a = np.arange(20).reshape((4,5)) - >>> a - array([[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [10, 11, 12, 13, 14], - [15, 16, 17, 18, 19]]) - >>> rfn.unstructured_to_structured(a, dt) - array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), - (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], - dtype=[('a', '>> from numpy.lib import recfunctions as rfn - >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], - ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) - >>> rfn.apply_along_fields(np.mean, b) - array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) - >>> rfn.apply_along_fields(np.mean, b[['x', 'z']]) - array([ 3. , 5.5, 9. , 11. ]) - - """ - if arr.dtype.names is None: - raise ValueError('arr must be a structured array') - - uarr = structured_to_unstructured(arr) - return func(uarr, axis=-1) - # works and avoids axis requirement, but very, very slow: - #return np.apply_along_axis(func, -1, uarr) - -def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): - return dst, src - -@array_function_dispatch(_assign_fields_by_name_dispatcher) -def assign_fields_by_name(dst, src, zero_unassigned=True): - """ - Assigns values from one structured array to another by field name. - - Normally in numpy >= 1.14, assignment of one structured array to another - copies fields "by position", meaning that the first field from the src is - copied to the first field of the dst, and so on, regardless of field name. - - This function instead copies "by field name", such that fields in the dst - are assigned from the identically named field in the src. This applies - recursively for nested structures. This is how structure assignment worked - in numpy >= 1.6 to <= 1.13. - - Parameters - ---------- - dst : ndarray - src : ndarray - The source and destination arrays during assignment. - zero_unassigned : bool, optional - If True, fields in the dst for which there was no matching - field in the src are filled with the value 0 (zero). This - was the behavior of numpy <= 1.13. If False, those fields - are not modified. - """ - - if dst.dtype.names is None: - dst[...] = src - return - - for name in dst.dtype.names: - if name not in src.dtype.names: - if zero_unassigned: - dst[name] = 0 - else: - assign_fields_by_name(dst[name], src[name], - zero_unassigned) - -def _require_fields_dispatcher(array, required_dtype): - return (array,) - -@array_function_dispatch(_require_fields_dispatcher) -def require_fields(array, required_dtype): - """ - Casts a structured array to a new dtype using assignment by field-name. - - This function assigns from the old to the new array by name, so the - value of a field in the output array is the value of the field with the - same name in the source array. This has the effect of creating a new - ndarray containing only the fields "required" by the required_dtype. - - If a field name in the required_dtype does not exist in the - input array, that field is created and set to 0 in the output array. - - Parameters - ---------- - a : ndarray - array to cast - required_dtype : dtype - datatype for output array - - Returns - ------- - out : ndarray - array with the new dtype, with field values copied from the fields in - the input array with the same name - - Examples - -------- - - >>> from numpy.lib import recfunctions as rfn - >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) - >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')]) - array([(1., 1), (1., 1), (1., 1), (1., 1)], - dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')]) - array([(1., 0), (1., 0), (1., 0), (1., 0)], - dtype=[('b', '>> from numpy.lib import recfunctions as rfn - >>> x = np.array([1, 2,]) - >>> rfn.stack_arrays(x) is x - True - >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) - >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) - >>> test = rfn.stack_arrays((z,zz)) - >>> test - masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), - (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], - mask=[(False, False, True), (False, False, True), - (False, False, False), (False, False, False), - (False, False, False)], - fill_value=(b'N/A', 1.e+20, 1.e+20), - dtype=[('A', 'S3'), ('B', ' '%s'" % - (cdtype, fdtype)) - # Only one field: use concatenate - if len(newdescr) == 1: - output = ma.concatenate(seqarrays) - else: - # - output = ma.masked_all((np.sum(nrecords),), newdescr) - offset = np.cumsum(np.r_[0, nrecords]) - seen = [] - for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): - names = a.dtype.names - if names is None: - output['f%i' % len(seen)][i:j] = a - else: - for name in n: - output[name][i:j] = a[name] - if name not in seen: - seen.append(name) - # - return _fix_output(_fix_defaults(output, defaults), - usemask=usemask, asrecarray=asrecarray) - - -def _find_duplicates_dispatcher( - a, key=None, ignoremask=None, return_index=None): - return (a,) - - -@array_function_dispatch(_find_duplicates_dispatcher) -def find_duplicates(a, key=None, ignoremask=True, return_index=False): - """ - Find the duplicates in a structured array along a given key - - Parameters - ---------- - a : array-like - Input array - key : {string, None}, optional - Name of the fields along which to check the duplicates. - If None, the search is performed by records - ignoremask : {True, False}, optional - Whether masked data should be discarded or considered as duplicates. - return_index : {False, True}, optional - Whether to return the indices of the duplicated values. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = [('a', int)] - >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], - ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) - (masked_array(data=[(1,), (1,), (2,), (2,)], - mask=[(False,), (False,), (False,), (False,)], - fill_value=(999999,), - dtype=[('a', '= nb1)] - nb1 - (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) - if jointype == 'inner': - (r1spc, r2spc) = (0, 0) - elif jointype == 'outer': - idx_out = idx_sort[~flag_in] - idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) - idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) - (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) - elif jointype == 'leftouter': - idx_out = idx_sort[~flag_in] - idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) - (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) - # Select the entries from each input - (s1, s2) = (r1[idx_1], r2[idx_2]) - # - # Build the new description of the output array ....... - # Start with the key fields - ndtype = _get_fieldspec(r1k.dtype) - - # Add the fields from r1 - for fname, fdtype in _get_fieldspec(r1.dtype): - if fname not in key: - ndtype.append((fname, fdtype)) - - # Add the fields from r2 - for fname, fdtype in _get_fieldspec(r2.dtype): - # Have we seen the current name already ? - # we need to rebuild this list every time - names = list(name for name, dtype in ndtype) - try: - nameidx = names.index(fname) - except ValueError: - #... we haven't: just add the description to the current list - ndtype.append((fname, fdtype)) - else: - # collision - _, cdtype = ndtype[nameidx] - if fname in key: - # The current field is part of the key: take the largest dtype - ndtype[nameidx] = (fname, max(fdtype, cdtype)) - else: - # The current field is not part of the key: add the suffixes, - # and place the new field adjacent to the old one - ndtype[nameidx:nameidx + 1] = [ - (fname + r1postfix, cdtype), - (fname + r2postfix, fdtype) - ] - # Rebuild a dtype from the new fields - ndtype = np.dtype(ndtype) - # Find the largest nb of common fields : - # r1cmn and r2cmn should be equal, but... - cmn = max(r1cmn, r2cmn) - # Construct an empty array - output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) - names = output.dtype.names - for f in r1names: - selected = s1[f] - if f not in names or (f in r2names and not r2postfix and f not in key): - f += r1postfix - current = output[f] - current[:r1cmn] = selected[:r1cmn] - if jointype in ('outer', 'leftouter'): - current[cmn:cmn + r1spc] = selected[r1cmn:] - for f in r2names: - selected = s2[f] - if f not in names or (f in r1names and not r1postfix and f not in key): - f += r2postfix - current = output[f] - current[:r2cmn] = selected[:r2cmn] - if (jointype == 'outer') and r2spc: - current[-r2spc:] = selected[r2cmn:] - # Sort and finalize the output - output.sort(order=key) - kwargs = dict(usemask=usemask, asrecarray=asrecarray) - return _fix_output(_fix_defaults(output, defaults), **kwargs) - - -def _rec_join_dispatcher( - key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, - defaults=None): - return (r1, r2) - - -@array_function_dispatch(_rec_join_dispatcher) -def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', - defaults=None): - """ - Join arrays `r1` and `r2` on keys. - Alternative to join_by, that always returns a np.recarray. - - See Also - -------- - join_by : equivalent function - """ - kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, - defaults=defaults, usemask=False, asrecarray=True) - return join_by(key, r1, r2, **kwargs) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/scimath.py b/venv/lib/python3.7/site-packages/numpy/lib/scimath.py deleted file mode 100644 index 5ac790c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/scimath.py +++ /dev/null @@ -1,603 +0,0 @@ -""" -Wrapper functions to more user-friendly calling of certain math functions -whose output data-type is different than the input data-type in certain -domains of the input. - -For example, for functions like `log` with branch cuts, the versions in this -module provide the mathematically valid answers in the complex plane:: - - >>> import math - >>> from numpy.lib import scimath - >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) - True - -Similarly, `sqrt`, other base logarithms, `power` and trig functions are -correctly handled. See their respective docstrings for specific examples. - -""" -from __future__ import division, absolute_import, print_function - -import numpy.core.numeric as nx -import numpy.core.numerictypes as nt -from numpy.core.numeric import asarray, any -from numpy.core.overrides import array_function_dispatch -from numpy.lib.type_check import isreal - - -__all__ = [ - 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', - 'arctanh' - ] - - -_ln2 = nx.log(2.0) - - -def _tocomplex(arr): - """Convert its input `arr` to a complex array. - - The input is returned as a complex array of the smallest type that will fit - the original data: types like single, byte, short, etc. become csingle, - while others become cdouble. - - A copy of the input is always made. - - Parameters - ---------- - arr : array - - Returns - ------- - array - An array with the same input data as the input but in complex form. - - Examples - -------- - - First, consider an input of type short: - - >>> a = np.array([1,2,3],np.short) - - >>> ac = np.lib.scimath._tocomplex(a); ac - array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - - >>> ac.dtype - dtype('complex64') - - If the input is of type double, the output is correspondingly of the - complex double type as well: - - >>> b = np.array([1,2,3],np.double) - - >>> bc = np.lib.scimath._tocomplex(b); bc - array([1.+0.j, 2.+0.j, 3.+0.j]) - - >>> bc.dtype - dtype('complex128') - - Note that even if the input was complex to begin with, a copy is still - made, since the astype() method always copies: - - >>> c = np.array([1,2,3],np.csingle) - - >>> cc = np.lib.scimath._tocomplex(c); cc - array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - - >>> c *= 2; c - array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) - - >>> cc - array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - """ - if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, - nt.ushort, nt.csingle)): - return arr.astype(nt.csingle) - else: - return arr.astype(nt.cdouble) - - -def _fix_real_lt_zero(x): - """Convert `x` to complex if it has real, negative components. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_real_lt_zero([1,2]) - array([1, 2]) - - >>> np.lib.scimath._fix_real_lt_zero([-1,2]) - array([-1.+0.j, 2.+0.j]) - - """ - x = asarray(x) - if any(isreal(x) & (x < 0)): - x = _tocomplex(x) - return x - - -def _fix_int_lt_zero(x): - """Convert `x` to double if it has real, negative components. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_int_lt_zero([1,2]) - array([1, 2]) - - >>> np.lib.scimath._fix_int_lt_zero([-1,2]) - array([-1., 2.]) - """ - x = asarray(x) - if any(isreal(x) & (x < 0)): - x = x * 1.0 - return x - - -def _fix_real_abs_gt_1(x): - """Convert `x` to complex if it has real components x_i with abs(x_i)>1. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) - array([0, 1]) - - >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) - array([0.+0.j, 2.+0.j]) - """ - x = asarray(x) - if any(isreal(x) & (abs(x) > 1)): - x = _tocomplex(x) - return x - - -def _unary_dispatcher(x): - return (x,) - - -@array_function_dispatch(_unary_dispatcher) -def sqrt(x): - """ - Compute the square root of x. - - For negative input elements, a complex value is returned - (unlike `numpy.sqrt` which returns NaN). - - Parameters - ---------- - x : array_like - The input value(s). - - Returns - ------- - out : ndarray or scalar - The square root of `x`. If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.sqrt - - Examples - -------- - For real, non-negative inputs this works just like `numpy.sqrt`: - - >>> np.lib.scimath.sqrt(1) - 1.0 - >>> np.lib.scimath.sqrt([1, 4]) - array([1., 2.]) - - But it automatically handles negative inputs: - - >>> np.lib.scimath.sqrt(-1) - 1j - >>> np.lib.scimath.sqrt([-1,4]) - array([0.+1.j, 2.+0.j]) - - """ - x = _fix_real_lt_zero(x) - return nx.sqrt(x) - - -@array_function_dispatch(_unary_dispatcher) -def log(x): - """ - Compute the natural logarithm of `x`. - - Return the "principal value" (for a description of this, see `numpy.log`) - of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` - returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the - complex principle value is returned. - - Parameters - ---------- - x : array_like - The value(s) whose log is (are) required. - - Returns - ------- - out : ndarray or scalar - The log of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.log - - Notes - ----- - For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` - (note, however, that otherwise `numpy.log` and this `log` are identical, - i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, - notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - >>> np.emath.log(np.exp(1)) - 1.0 - - Negative arguments are handled "correctly" (recall that - ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): - - >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) - True - - """ - x = _fix_real_lt_zero(x) - return nx.log(x) - - -@array_function_dispatch(_unary_dispatcher) -def log10(x): - """ - Compute the logarithm base 10 of `x`. - - Return the "principal value" (for a description of this, see - `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this - is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` - returns ``inf``). Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose log base 10 is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array object is returned. - - See Also - -------- - numpy.log10 - - Notes - ----- - For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` - (note, however, that otherwise `numpy.log10` and this `log10` are - identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, - and, notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - - (We set the printing precision so the example can be auto-tested) - - >>> np.set_printoptions(precision=4) - - >>> np.emath.log10(10**1) - 1.0 - - >>> np.emath.log10([-10**1, -10**2, 10**2]) - array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - return nx.log10(x) - - -def _logn_dispatcher(n, x): - return (n, x,) - - -@array_function_dispatch(_logn_dispatcher) -def logn(n, x): - """ - Take log base n of x. - - If `x` contains negative inputs, the answer is computed and returned in the - complex domain. - - Parameters - ---------- - n : array_like - The integer base(s) in which the log is taken. - x : array_like - The value(s) whose log base `n` is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base `n` of the `x` value(s). If `x` was a scalar, so is - `out`, otherwise an array is returned. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.lib.scimath.logn(2, [4, 8]) - array([2., 3.]) - >>> np.lib.scimath.logn(2, [-4, -8, 8]) - array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - n = _fix_real_lt_zero(n) - return nx.log(x)/nx.log(n) - - -@array_function_dispatch(_unary_dispatcher) -def log2(x): - """ - Compute the logarithm base 2 of `x`. - - Return the "principal value" (for a description of this, see - `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is - a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns - ``inf``). Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like - The value(s) whose log base 2 is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.log2 - - Notes - ----- - For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` - (note, however, that otherwise `numpy.log2` and this `log2` are - identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, - and, notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - We set the printing precision so the example can be auto-tested: - - >>> np.set_printoptions(precision=4) - - >>> np.emath.log2(8) - 3.0 - >>> np.emath.log2([-4, -8, 8]) - array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - return nx.log2(x) - - -def _power_dispatcher(x, p): - return (x, p) - - -@array_function_dispatch(_power_dispatcher) -def power(x, p): - """ - Return x to the power p, (x**p). - - If `x` contains negative values, the output is converted to the - complex domain. - - Parameters - ---------- - x : array_like - The input value(s). - p : array_like of ints - The power(s) to which `x` is raised. If `x` contains multiple values, - `p` has to either be a scalar, or contain the same number of values - as `x`. In the latter case, the result is - ``x[0]**p[0], x[1]**p[1], ...``. - - Returns - ------- - out : ndarray or scalar - The result of ``x**p``. If `x` and `p` are scalars, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.power - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.lib.scimath.power([2, 4], 2) - array([ 4, 16]) - >>> np.lib.scimath.power([2, 4], -2) - array([0.25 , 0.0625]) - >>> np.lib.scimath.power([-2, 4], 2) - array([ 4.-0.j, 16.+0.j]) - - """ - x = _fix_real_lt_zero(x) - p = _fix_int_lt_zero(p) - return nx.power(x, p) - - -@array_function_dispatch(_unary_dispatcher) -def arccos(x): - """ - Compute the inverse cosine of x. - - Return the "principal value" (for a description of this, see - `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that - `abs(x) <= 1`, this is a real number in the closed interval - :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose arccos is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so - is `out`, otherwise an array object is returned. - - See Also - -------- - numpy.arccos - - Notes - ----- - For an arccos() that returns ``NAN`` when real `x` is not in the - interval ``[-1,1]``, use `numpy.arccos`. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arccos(1) # a scalar is returned - 0.0 - - >>> np.emath.arccos([1,2]) - array([0.-0.j , 0.-1.317j]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arccos(x) - - -@array_function_dispatch(_unary_dispatcher) -def arcsin(x): - """ - Compute the inverse sine of x. - - Return the "principal value" (for a description of this, see - `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that - `abs(x) <= 1`, this is a real number in the closed interval - :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is - returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose arcsin is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse sine(s) of the `x` value(s). If `x` was a scalar, so - is `out`, otherwise an array object is returned. - - See Also - -------- - numpy.arcsin - - Notes - ----- - For an arcsin() that returns ``NAN`` when real `x` is not in the - interval ``[-1,1]``, use `numpy.arcsin`. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arcsin(0) - 0.0 - - >>> np.emath.arcsin([0,1]) - array([0. , 1.5708]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arcsin(x) - - -@array_function_dispatch(_unary_dispatcher) -def arctanh(x): - """ - Compute the inverse hyperbolic tangent of `x`. - - Return the "principal value" (for a description of this, see - `numpy.arctanh`) of `arctanh(x)`. For real `x` such that - `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is - complex, the result is complex. Finally, `x = 1` returns``inf`` and - `x=-1` returns ``-inf``. - - Parameters - ---------- - x : array_like - The value(s) whose arctanh is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was - a scalar so is `out`, otherwise an array is returned. - - - See Also - -------- - numpy.arctanh - - Notes - ----- - For an arctanh() that returns ``NAN`` when real `x` is not in the - interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does - return +/-inf for `x = +/-1`). - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) - ... np.emath.arctanh(np.eye(2)) - array([[inf, 0.], - [ 0., inf]]) - >>> np.emath.arctanh([1j]) - array([0.+0.7854j]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arctanh(x) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/setup.py b/venv/lib/python3.7/site-packages/numpy/lib/setup.py deleted file mode 100644 index d342410..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('lib', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/shape_base.py b/venv/lib/python3.7/site-packages/numpy/lib/shape_base.py deleted file mode 100644 index dbb61c2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/shape_base.py +++ /dev/null @@ -1,1258 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import functools - -import numpy.core.numeric as _nx -from numpy.core.numeric import ( - asarray, zeros, outer, concatenate, array, asanyarray - ) -from numpy.core.fromnumeric import reshape, transpose -from numpy.core.multiarray import normalize_axis_index -from numpy.core import overrides -from numpy.core import vstack, atleast_3d -from numpy.core.numeric import normalize_axis_tuple -from numpy.core.shape_base import _arrays_for_stack_dispatcher -from numpy.lib.index_tricks import ndindex -from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells - - -__all__ = [ - 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', - 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', - 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis', - 'put_along_axis' - ] - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -def _make_along_axis_idx(arr_shape, indices, axis): - # compute dimensions to iterate over - if not _nx.issubdtype(indices.dtype, _nx.integer): - raise IndexError('`indices` must be an integer array') - if len(arr_shape) != indices.ndim: - raise ValueError( - "`indices` and `arr` must have the same number of dimensions") - shape_ones = (1,) * indices.ndim - dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) - - # build a fancy index, consisting of orthogonal aranges, with the - # requested index inserted at the right location - fancy_index = [] - for dim, n in zip(dest_dims, arr_shape): - if dim is None: - fancy_index.append(indices) - else: - ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] - fancy_index.append(_nx.arange(n).reshape(ind_shape)) - - return tuple(fancy_index) - - -def _take_along_axis_dispatcher(arr, indices, axis): - return (arr, indices) - - -@array_function_dispatch(_take_along_axis_dispatcher) -def take_along_axis(arr, indices, axis): - """ - Take values from the input array by matching 1d index and data slices. - - This iterates over matching 1d slices oriented along the specified axis in - the index and data arrays, and uses the former to look up values in the - latter. These slices can be different lengths. - - Functions returning an index along an axis, like `argsort` and - `argpartition`, produce suitable indices for this function. - - .. versionadded:: 1.15.0 - - Parameters - ---------- - arr: ndarray (Ni..., M, Nk...) - Source array - indices: ndarray (Ni..., J, Nk...) - Indices to take along each 1d slice of `arr`. This must match the - dimension of arr, but dimensions Ni and Nj only need to broadcast - against `arr`. - axis: int - The axis to take 1d slices along. If axis is None, the input array is - treated as if it had first been flattened to 1d, for consistency with - `sort` and `argsort`. - - Returns - ------- - out: ndarray (Ni..., J, Nk...) - The indexed result. - - Notes - ----- - This is equivalent to (but faster than) the following use of `ndindex` and - `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: - - Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] - J = indices.shape[axis] # Need not equal M - out = np.empty(Ni + (J,) + Nk) - - for ii in ndindex(Ni): - for kk in ndindex(Nk): - a_1d = a [ii + s_[:,] + kk] - indices_1d = indices[ii + s_[:,] + kk] - out_1d = out [ii + s_[:,] + kk] - for j in range(J): - out_1d[j] = a_1d[indices_1d[j]] - - Equivalently, eliminating the inner loop, the last two lines would be:: - - out_1d[:] = a_1d[indices_1d] - - See Also - -------- - take : Take along an axis, using the same indices for every 1d slice - put_along_axis : - Put values into the destination array by matching 1d index and data slices - - Examples - -------- - - For this sample array - - >>> a = np.array([[10, 30, 20], [60, 40, 50]]) - - We can sort either by using sort directly, or argsort and this function - - >>> np.sort(a, axis=1) - array([[10, 20, 30], - [40, 50, 60]]) - >>> ai = np.argsort(a, axis=1); ai - array([[0, 2, 1], - [1, 2, 0]]) - >>> np.take_along_axis(a, ai, axis=1) - array([[10, 20, 30], - [40, 50, 60]]) - - The same works for max and min, if you expand the dimensions: - - >>> np.expand_dims(np.max(a, axis=1), axis=1) - array([[30], - [60]]) - >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) - >>> ai - array([[1], - [0]]) - >>> np.take_along_axis(a, ai, axis=1) - array([[30], - [60]]) - - If we want to get the max and min at the same time, we can stack the - indices first - - >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1) - >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1) - >>> ai = np.concatenate([ai_min, ai_max], axis=1) - >>> ai - array([[0, 1], - [1, 0]]) - >>> np.take_along_axis(a, ai, axis=1) - array([[10, 30], - [40, 60]]) - """ - # normalize inputs - if axis is None: - arr = arr.flat - arr_shape = (len(arr),) # flatiter has no .shape - axis = 0 - else: - axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape - - # use the fancy index - return arr[_make_along_axis_idx(arr_shape, indices, axis)] - - -def _put_along_axis_dispatcher(arr, indices, values, axis): - return (arr, indices, values) - - -@array_function_dispatch(_put_along_axis_dispatcher) -def put_along_axis(arr, indices, values, axis): - """ - Put values into the destination array by matching 1d index and data slices. - - This iterates over matching 1d slices oriented along the specified axis in - the index and data arrays, and uses the former to place values into the - latter. These slices can be different lengths. - - Functions returning an index along an axis, like `argsort` and - `argpartition`, produce suitable indices for this function. - - .. versionadded:: 1.15.0 - - Parameters - ---------- - arr: ndarray (Ni..., M, Nk...) - Destination array. - indices: ndarray (Ni..., J, Nk...) - Indices to change along each 1d slice of `arr`. This must match the - dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast - against `arr`. - values: array_like (Ni..., J, Nk...) - values to insert at those indices. Its shape and dimension are - broadcast to match that of `indices`. - axis: int - The axis to take 1d slices along. If axis is None, the destination - array is treated as if a flattened 1d view had been created of it. - - Notes - ----- - This is equivalent to (but faster than) the following use of `ndindex` and - `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: - - Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] - J = indices.shape[axis] # Need not equal M - - for ii in ndindex(Ni): - for kk in ndindex(Nk): - a_1d = a [ii + s_[:,] + kk] - indices_1d = indices[ii + s_[:,] + kk] - values_1d = values [ii + s_[:,] + kk] - for j in range(J): - a_1d[indices_1d[j]] = values_1d[j] - - Equivalently, eliminating the inner loop, the last two lines would be:: - - a_1d[indices_1d] = values_1d - - See Also - -------- - take_along_axis : - Take values from the input array by matching 1d index and data slices - - Examples - -------- - - For this sample array - - >>> a = np.array([[10, 30, 20], [60, 40, 50]]) - - We can replace the maximum values with: - - >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) - >>> ai - array([[1], - [0]]) - >>> np.put_along_axis(a, ai, 99, axis=1) - >>> a - array([[10, 99, 20], - [99, 40, 50]]) - - """ - # normalize inputs - if axis is None: - arr = arr.flat - axis = 0 - arr_shape = (len(arr),) # flatiter has no .shape - else: - axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape - - # use the fancy index - arr[_make_along_axis_idx(arr_shape, indices, axis)] = values - - -def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): - return (arr,) - - -@array_function_dispatch(_apply_along_axis_dispatcher) -def apply_along_axis(func1d, axis, arr, *args, **kwargs): - """ - Apply a function to 1-D slices along the given axis. - - Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` - is a 1-D slice of `arr` along `axis`. - - This is equivalent to (but faster than) the following use of `ndindex` and - `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: - - Ni, Nk = a.shape[:axis], a.shape[axis+1:] - for ii in ndindex(Ni): - for kk in ndindex(Nk): - f = func1d(arr[ii + s_[:,] + kk]) - Nj = f.shape - for jj in ndindex(Nj): - out[ii + jj + kk] = f[jj] - - Equivalently, eliminating the inner loop, this can be expressed as:: - - Ni, Nk = a.shape[:axis], a.shape[axis+1:] - for ii in ndindex(Ni): - for kk in ndindex(Nk): - out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) - - Parameters - ---------- - func1d : function (M,) -> (Nj...) - This function should accept 1-D arrays. It is applied to 1-D - slices of `arr` along the specified axis. - axis : integer - Axis along which `arr` is sliced. - arr : ndarray (Ni..., M, Nk...) - Input array. - args : any - Additional arguments to `func1d`. - kwargs : any - Additional named arguments to `func1d`. - - .. versionadded:: 1.9.0 - - - Returns - ------- - out : ndarray (Ni..., Nj..., Nk...) - The output array. The shape of `out` is identical to the shape of - `arr`, except along the `axis` dimension. This axis is removed, and - replaced with new dimensions equal to the shape of the return value - of `func1d`. So if `func1d` returns a scalar `out` will have one - fewer dimensions than `arr`. - - See Also - -------- - apply_over_axes : Apply a function repeatedly over multiple axes. - - Examples - -------- - >>> def my_func(a): - ... \"\"\"Average first and last element of a 1-D array\"\"\" - ... return (a[0] + a[-1]) * 0.5 - >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) - >>> np.apply_along_axis(my_func, 0, b) - array([4., 5., 6.]) - >>> np.apply_along_axis(my_func, 1, b) - array([2., 5., 8.]) - - For a function that returns a 1D array, the number of dimensions in - `outarr` is the same as `arr`. - - >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) - >>> np.apply_along_axis(sorted, 1, b) - array([[1, 7, 8], - [3, 4, 9], - [2, 5, 6]]) - - For a function that returns a higher dimensional array, those dimensions - are inserted in place of the `axis` dimension. - - >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) - >>> np.apply_along_axis(np.diag, -1, b) - array([[[1, 0, 0], - [0, 2, 0], - [0, 0, 3]], - [[4, 0, 0], - [0, 5, 0], - [0, 0, 6]], - [[7, 0, 0], - [0, 8, 0], - [0, 0, 9]]]) - """ - # handle negative axes - arr = asanyarray(arr) - nd = arr.ndim - axis = normalize_axis_index(axis, nd) - - # arr, with the iteration axis at the end - in_dims = list(range(nd)) - inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis]) - - # compute indices for the iteration axes, and append a trailing ellipsis to - # prevent 0d arrays decaying to scalars, which fixes gh-8642 - inds = ndindex(inarr_view.shape[:-1]) - inds = (ind + (Ellipsis,) for ind in inds) - - # invoke the function on the first item - try: - ind0 = next(inds) - except StopIteration: - raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0') - res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) - - # build a buffer for storing evaluations of func1d. - # remove the requested axis, and add the new ones on the end. - # laid out so that each write is contiguous. - # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) - buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype) - - # permutation of axes such that out = buff.transpose(buff_permute) - buff_dims = list(range(buff.ndim)) - buff_permute = ( - buff_dims[0 : axis] + - buff_dims[buff.ndim-res.ndim : buff.ndim] + - buff_dims[axis : buff.ndim-res.ndim] - ) - - # matrices have a nasty __array_prepare__ and __array_wrap__ - if not isinstance(res, matrix): - buff = res.__array_prepare__(buff) - - # save the first result, then compute and save all remaining results - buff[ind0] = res - for ind in inds: - buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) - - if not isinstance(res, matrix): - # wrap the array, to preserve subclasses - buff = res.__array_wrap__(buff) - - # finally, rotate the inserted axes back to where they belong - return transpose(buff, buff_permute) - - else: - # matrices have to be transposed first, because they collapse dimensions! - out_arr = transpose(buff, buff_permute) - return res.__array_wrap__(out_arr) - - -def _apply_over_axes_dispatcher(func, a, axes): - return (a,) - - -@array_function_dispatch(_apply_over_axes_dispatcher) -def apply_over_axes(func, a, axes): - """ - Apply a function repeatedly over multiple axes. - - `func` is called as `res = func(a, axis)`, where `axis` is the first - element of `axes`. The result `res` of the function call must have - either the same dimensions as `a` or one less dimension. If `res` - has one less dimension than `a`, a dimension is inserted before - `axis`. The call to `func` is then repeated for each axis in `axes`, - with `res` as the first argument. - - Parameters - ---------- - func : function - This function must take two arguments, `func(a, axis)`. - a : array_like - Input array. - axes : array_like - Axes over which `func` is applied; the elements must be integers. - - Returns - ------- - apply_over_axis : ndarray - The output array. The number of dimensions is the same as `a`, - but the shape can be different. This depends on whether `func` - changes the shape of its output with respect to its input. - - See Also - -------- - apply_along_axis : - Apply a function to 1-D slices of an array along the given axis. - - Notes - ------ - This function is equivalent to tuple axis arguments to reorderable ufuncs - with keepdims=True. Tuple axis arguments to ufuncs have been available since - version 1.7.0. - - Examples - -------- - >>> a = np.arange(24).reshape(2,3,4) - >>> a - array([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]) - - Sum over axes 0 and 2. The result has same number of dimensions - as the original array: - - >>> np.apply_over_axes(np.sum, a, [0,2]) - array([[[ 60], - [ 92], - [124]]]) - - Tuple axis arguments to ufuncs are equivalent: - - >>> np.sum(a, axis=(0,2), keepdims=True) - array([[[ 60], - [ 92], - [124]]]) - - """ - val = asarray(a) - N = a.ndim - if array(axes).ndim == 0: - axes = (axes,) - for axis in axes: - if axis < 0: - axis = N + axis - args = (val, axis) - res = func(*args) - if res.ndim == val.ndim: - val = res - else: - res = expand_dims(res, axis) - if res.ndim == val.ndim: - val = res - else: - raise ValueError("function is not returning " - "an array of the correct shape") - return val - - -def _expand_dims_dispatcher(a, axis): - return (a,) - - -@array_function_dispatch(_expand_dims_dispatcher) -def expand_dims(a, axis): - """ - Expand the shape of an array. - - Insert a new axis that will appear at the `axis` position in the expanded - array shape. - - Parameters - ---------- - a : array_like - Input array. - axis : int or tuple of ints - Position in the expanded axes where the new axis (or axes) is placed. - - .. deprecated:: 1.13.0 - Passing an axis where ``axis > a.ndim`` will be treated as - ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will - be treated as ``axis == 0``. This behavior is deprecated. - - .. versionchanged:: 1.18.0 - A tuple of axes is now supported. Out of range axes as - described above are now forbidden and raise an `AxisError`. - - Returns - ------- - result : ndarray - View of `a` with the number of dimensions increased. - - See Also - -------- - squeeze : The inverse operation, removing singleton dimensions - reshape : Insert, remove, and combine dimensions, and resize existing ones - doc.indexing, atleast_1d, atleast_2d, atleast_3d - - Examples - -------- - >>> x = np.array([1, 2]) - >>> x.shape - (2,) - - The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: - - >>> y = np.expand_dims(x, axis=0) - >>> y - array([[1, 2]]) - >>> y.shape - (1, 2) - - The following is equivalent to ``x[:, np.newaxis]``: - - >>> y = np.expand_dims(x, axis=1) - >>> y - array([[1], - [2]]) - >>> y.shape - (2, 1) - - ``axis`` may also be a tuple: - - >>> y = np.expand_dims(x, axis=(0, 1)) - >>> y - array([[[1, 2]]]) - - >>> y = np.expand_dims(x, axis=(2, 0)) - >>> y - array([[[1], - [2]]]) - - Note that some examples may use ``None`` instead of ``np.newaxis``. These - are the same objects: - - >>> np.newaxis is None - True - - """ - if isinstance(a, matrix): - a = asarray(a) - else: - a = asanyarray(a) - - if type(axis) not in (tuple, list): - axis = (axis,) - - out_ndim = len(axis) + a.ndim - axis = normalize_axis_tuple(axis, out_ndim) - - shape_it = iter(a.shape) - shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] - - return a.reshape(shape) - - -row_stack = vstack - - -def _column_stack_dispatcher(tup): - return _arrays_for_stack_dispatcher(tup) - - -@array_function_dispatch(_column_stack_dispatcher) -def column_stack(tup): - """ - Stack 1-D arrays as columns into a 2-D array. - - Take a sequence of 1-D arrays and stack them as columns - to make a single 2-D array. 2-D arrays are stacked as-is, - just like with `hstack`. 1-D arrays are turned into 2-D columns - first. - - Parameters - ---------- - tup : sequence of 1-D or 2-D arrays. - Arrays to stack. All of them must have the same first dimension. - - Returns - ------- - stacked : 2-D array - The array formed by stacking the given arrays. - - See Also - -------- - stack, hstack, vstack, concatenate - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(tup, stacklevel=2) - - arrays = [] - for v in tup: - arr = array(v, copy=False, subok=True) - if arr.ndim < 2: - arr = array(arr, copy=False, subok=True, ndmin=2).T - arrays.append(arr) - return _nx.concatenate(arrays, 1) - - -def _dstack_dispatcher(tup): - return _arrays_for_stack_dispatcher(tup) - - -@array_function_dispatch(_dstack_dispatcher) -def dstack(tup): - """ - Stack arrays in sequence depth wise (along third axis). - - This is equivalent to concatenation along the third axis after 2-D arrays - of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape - `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by - `dsplit`. - - This function makes most sense for arrays with up to 3 dimensions. For - instance, for pixel-data with a height (first axis), width (second axis), - and r/g/b channels (third axis). The functions `concatenate`, `stack` and - `block` provide more general stacking and concatenation operations. - - Parameters - ---------- - tup : sequence of arrays - The arrays must have the same shape along all but the third axis. - 1-D or 2-D arrays must have the same shape. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays, will be at least 3-D. - - See Also - -------- - stack : Join a sequence of arrays along a new axis. - vstack : Stack along first axis. - hstack : Stack along second axis. - concatenate : Join a sequence of arrays along an existing axis. - dsplit : Split array along third axis. - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) - - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(tup, stacklevel=2) - - arrs = atleast_3d(*tup) - if not isinstance(arrs, list): - arrs = [arrs] - return _nx.concatenate(arrs, 2) - - -def _replace_zero_by_x_arrays(sub_arys): - for i in range(len(sub_arys)): - if _nx.ndim(sub_arys[i]) == 0: - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - return sub_arys - - -def _array_split_dispatcher(ary, indices_or_sections, axis=None): - return (ary, indices_or_sections) - - -@array_function_dispatch(_array_split_dispatcher) -def array_split(ary, indices_or_sections, axis=0): - """ - Split an array into multiple sub-arrays. - - Please refer to the ``split`` documentation. The only difference - between these functions is that ``array_split`` allows - `indices_or_sections` to be an integer that does *not* equally - divide the axis. For an array of length l that should be split - into n sections, it returns l % n sub-arrays of size l//n + 1 - and the rest of size l//n. - - See Also - -------- - split : Split array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(8.0) - >>> np.array_split(x, 3) - [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] - - >>> x = np.arange(7.0) - >>> np.array_split(x, 3) - [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])] - - """ - try: - Ntotal = ary.shape[axis] - except AttributeError: - Ntotal = len(ary) - try: - # handle array case. - Nsections = len(indices_or_sections) + 1 - div_points = [0] + list(indices_or_sections) + [Ntotal] - except TypeError: - # indices_or_sections is a scalar, not an array. - Nsections = int(indices_or_sections) - if Nsections <= 0: - raise ValueError('number sections must be larger than 0.') - Neach_section, extras = divmod(Ntotal, Nsections) - section_sizes = ([0] + - extras * [Neach_section+1] + - (Nsections-extras) * [Neach_section]) - div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() - - sub_arys = [] - sary = _nx.swapaxes(ary, axis, 0) - for i in range(Nsections): - st = div_points[i] - end = div_points[i + 1] - sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) - - return sub_arys - - -def _split_dispatcher(ary, indices_or_sections, axis=None): - return (ary, indices_or_sections) - - -@array_function_dispatch(_split_dispatcher) -def split(ary, indices_or_sections, axis=0): - """ - Split an array into multiple sub-arrays as views into `ary`. - - Parameters - ---------- - ary : ndarray - Array to be divided into sub-arrays. - indices_or_sections : int or 1-D array - If `indices_or_sections` is an integer, N, the array will be divided - into N equal arrays along `axis`. If such a split is not possible, - an error is raised. - - If `indices_or_sections` is a 1-D array of sorted integers, the entries - indicate where along `axis` the array is split. For example, - ``[2, 3]`` would, for ``axis=0``, result in - - - ary[:2] - - ary[2:3] - - ary[3:] - - If an index exceeds the dimension of the array along `axis`, - an empty sub-array is returned correspondingly. - axis : int, optional - The axis along which to split, default is 0. - - Returns - ------- - sub-arrays : list of ndarrays - A list of sub-arrays as views into `ary`. - - Raises - ------ - ValueError - If `indices_or_sections` is given as an integer, but - a split does not result in equal division. - - See Also - -------- - array_split : Split an array into multiple sub-arrays of equal or - near-equal size. Does not raise an exception if - an equal division cannot be made. - hsplit : Split array into multiple sub-arrays horizontally (column-wise). - vsplit : Split array into multiple sub-arrays vertically (row wise). - dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). - concatenate : Join a sequence of arrays along an existing axis. - stack : Join a sequence of arrays along a new axis. - hstack : Stack arrays in sequence horizontally (column wise). - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - - Examples - -------- - >>> x = np.arange(9.0) - >>> np.split(x, 3) - [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] - - >>> x = np.arange(8.0) - >>> np.split(x, [3, 5, 6, 10]) - [array([0., 1., 2.]), - array([3., 4.]), - array([5.]), - array([6., 7.]), - array([], dtype=float64)] - - """ - try: - len(indices_or_sections) - except TypeError: - sections = indices_or_sections - N = ary.shape[axis] - if N % sections: - raise ValueError( - 'array split does not result in an equal division') - return array_split(ary, indices_or_sections, axis) - - -def _hvdsplit_dispatcher(ary, indices_or_sections): - return (ary, indices_or_sections) - - -@array_function_dispatch(_hvdsplit_dispatcher) -def hsplit(ary, indices_or_sections): - """ - Split an array into multiple sub-arrays horizontally (column-wise). - - Please refer to the `split` documentation. `hsplit` is equivalent - to `split` with ``axis=1``, the array is always split along the second - axis regardless of the array dimension. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(4, 4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [12., 13., 14., 15.]]) - >>> np.hsplit(x, 2) - [array([[ 0., 1.], - [ 4., 5.], - [ 8., 9.], - [12., 13.]]), - array([[ 2., 3.], - [ 6., 7.], - [10., 11.], - [14., 15.]])] - >>> np.hsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2.], - [ 4., 5., 6.], - [ 8., 9., 10.], - [12., 13., 14.]]), - array([[ 3.], - [ 7.], - [11.], - [15.]]), - array([], shape=(4, 0), dtype=float64)] - - With a higher dimensional array the split is still along the second axis. - - >>> x = np.arange(8.0).reshape(2, 2, 2) - >>> x - array([[[0., 1.], - [2., 3.]], - [[4., 5.], - [6., 7.]]]) - >>> np.hsplit(x, 2) - [array([[[0., 1.]], - [[4., 5.]]]), - array([[[2., 3.]], - [[6., 7.]]])] - - """ - if _nx.ndim(ary) == 0: - raise ValueError('hsplit only works on arrays of 1 or more dimensions') - if ary.ndim > 1: - return split(ary, indices_or_sections, 1) - else: - return split(ary, indices_or_sections, 0) - - -@array_function_dispatch(_hvdsplit_dispatcher) -def vsplit(ary, indices_or_sections): - """ - Split an array into multiple sub-arrays vertically (row-wise). - - Please refer to the ``split`` documentation. ``vsplit`` is equivalent - to ``split`` with `axis=0` (default), the array is always split along the - first axis regardless of the array dimension. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(4, 4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [12., 13., 14., 15.]]) - >>> np.vsplit(x, 2) - [array([[0., 1., 2., 3.], - [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], - [12., 13., 14., 15.]])] - >>> np.vsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)] - - With a higher dimensional array the split is still along the first axis. - - >>> x = np.arange(8.0).reshape(2, 2, 2) - >>> x - array([[[0., 1.], - [2., 3.]], - [[4., 5.], - [6., 7.]]]) - >>> np.vsplit(x, 2) - [array([[[0., 1.], - [2., 3.]]]), array([[[4., 5.], - [6., 7.]]])] - - """ - if _nx.ndim(ary) < 2: - raise ValueError('vsplit only works on arrays of 2 or more dimensions') - return split(ary, indices_or_sections, 0) - - -@array_function_dispatch(_hvdsplit_dispatcher) -def dsplit(ary, indices_or_sections): - """ - Split array into multiple sub-arrays along the 3rd axis (depth). - - Please refer to the `split` documentation. `dsplit` is equivalent - to `split` with ``axis=2``, the array is always split along the third - axis provided the array dimension is greater than or equal to 3. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(2, 2, 4) - >>> x - array([[[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]], - [[ 8., 9., 10., 11.], - [12., 13., 14., 15.]]]) - >>> np.dsplit(x, 2) - [array([[[ 0., 1.], - [ 4., 5.]], - [[ 8., 9.], - [12., 13.]]]), array([[[ 2., 3.], - [ 6., 7.]], - [[10., 11.], - [14., 15.]]])] - >>> np.dsplit(x, np.array([3, 6])) - [array([[[ 0., 1., 2.], - [ 4., 5., 6.]], - [[ 8., 9., 10.], - [12., 13., 14.]]]), - array([[[ 3.], - [ 7.]], - [[11.], - [15.]]]), - array([], shape=(2, 2, 0), dtype=float64)] - """ - if _nx.ndim(ary) < 3: - raise ValueError('dsplit only works on arrays of 3 or more dimensions') - return split(ary, indices_or_sections, 2) - -def get_array_prepare(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None - """ - wrappers = sorted((getattr(x, '__array_priority__', 0), -i, - x.__array_prepare__) for i, x in enumerate(args) - if hasattr(x, '__array_prepare__')) - if wrappers: - return wrappers[-1][-1] - return None - -def get_array_wrap(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None - """ - wrappers = sorted((getattr(x, '__array_priority__', 0), -i, - x.__array_wrap__) for i, x in enumerate(args) - if hasattr(x, '__array_wrap__')) - if wrappers: - return wrappers[-1][-1] - return None - - -def _kron_dispatcher(a, b): - return (a, b) - - -@array_function_dispatch(_kron_dispatcher) -def kron(a, b): - """ - Kronecker product of two arrays. - - Computes the Kronecker product, a composite array made of blocks of the - second array scaled by the first. - - Parameters - ---------- - a, b : array_like - - Returns - ------- - out : ndarray - - See Also - -------- - outer : The outer product - - Notes - ----- - The function assumes that the number of dimensions of `a` and `b` - are the same, if necessary prepending the smallest with ones. - If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, - the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. - The elements are products of elements from `a` and `b`, organized - explicitly by:: - - kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] - - where:: - - kt = it * st + jt, t = 0,...,N - - In the common 2-D case (N=1), the block structure can be visualized:: - - [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], - [ ... ... ], - [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] - - - Examples - -------- - >>> np.kron([1,10,100], [5,6,7]) - array([ 5, 6, 7, ..., 500, 600, 700]) - >>> np.kron([5,6,7], [1,10,100]) - array([ 5, 50, 500, ..., 7, 70, 700]) - - >>> np.kron(np.eye(2), np.ones((2,2))) - array([[1., 1., 0., 0.], - [1., 1., 0., 0.], - [0., 0., 1., 1.], - [0., 0., 1., 1.]]) - - >>> a = np.arange(100).reshape((2,5,2,5)) - >>> b = np.arange(24).reshape((2,3,4)) - >>> c = np.kron(a,b) - >>> c.shape - (2, 10, 6, 20) - >>> I = (1,3,0,2) - >>> J = (0,2,1) - >>> J1 = (0,) + J # extend to ndim=4 - >>> S1 = (1,) + b.shape - >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) - >>> c[K] == a[I]*b[J] - True - - """ - b = asanyarray(b) - a = array(a, copy=False, subok=True, ndmin=b.ndim) - ndb, nda = b.ndim, a.ndim - if (nda == 0 or ndb == 0): - return _nx.multiply(a, b) - as_ = a.shape - bs = b.shape - if not a.flags.contiguous: - a = reshape(a, as_) - if not b.flags.contiguous: - b = reshape(b, bs) - nd = ndb - if (ndb != nda): - if (ndb > nda): - as_ = (1,)*(ndb-nda) + as_ - else: - bs = (1,)*(nda-ndb) + bs - nd = nda - result = outer(a, b).reshape(as_+bs) - axis = nd-1 - for _ in range(nd): - result = concatenate(result, axis=axis) - wrapper = get_array_prepare(a, b) - if wrapper is not None: - result = wrapper(result) - wrapper = get_array_wrap(a, b) - if wrapper is not None: - result = wrapper(result) - return result - - -def _tile_dispatcher(A, reps): - return (A, reps) - - -@array_function_dispatch(_tile_dispatcher) -def tile(A, reps): - """ - Construct an array by repeating A the number of times given by reps. - - If `reps` has length ``d``, the result will have dimension of - ``max(d, A.ndim)``. - - If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new - axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, - or shape (1, 1, 3) for 3-D replication. If this is not the desired - behavior, promote `A` to d-dimensions manually before calling this - function. - - If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. - Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as - (1, 1, 2, 2). - - Note : Although tile may be used for broadcasting, it is strongly - recommended to use numpy's broadcasting operations and functions. - - Parameters - ---------- - A : array_like - The input array. - reps : array_like - The number of repetitions of `A` along each axis. - - Returns - ------- - c : ndarray - The tiled output array. - - See Also - -------- - repeat : Repeat elements of an array. - broadcast_to : Broadcast an array to a new shape - - Examples - -------- - >>> a = np.array([0, 1, 2]) - >>> np.tile(a, 2) - array([0, 1, 2, 0, 1, 2]) - >>> np.tile(a, (2, 2)) - array([[0, 1, 2, 0, 1, 2], - [0, 1, 2, 0, 1, 2]]) - >>> np.tile(a, (2, 1, 2)) - array([[[0, 1, 2, 0, 1, 2]], - [[0, 1, 2, 0, 1, 2]]]) - - >>> b = np.array([[1, 2], [3, 4]]) - >>> np.tile(b, 2) - array([[1, 2, 1, 2], - [3, 4, 3, 4]]) - >>> np.tile(b, (2, 1)) - array([[1, 2], - [3, 4], - [1, 2], - [3, 4]]) - - >>> c = np.array([1,2,3,4]) - >>> np.tile(c,(4,1)) - array([[1, 2, 3, 4], - [1, 2, 3, 4], - [1, 2, 3, 4], - [1, 2, 3, 4]]) - """ - try: - tup = tuple(reps) - except TypeError: - tup = (reps,) - d = len(tup) - if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): - # Fixes the problem that the function does not make a copy if A is a - # numpy array and the repetitions are 1 in all dimensions - return _nx.array(A, copy=True, subok=True, ndmin=d) - else: - # Note that no copy of zero-sized arrays is made. However since they - # have no data there is no risk of an inadvertent overwrite. - c = _nx.array(A, copy=False, subok=True, ndmin=d) - if (d < c.ndim): - tup = (1,)*(c.ndim-d) + tup - shape_out = tuple(s*t for s, t in zip(c.shape, tup)) - n = c.size - if n > 0: - for dim_in, nrep in zip(c.shape, tup): - if nrep != 1: - c = c.reshape(-1, n).repeat(nrep, 0) - n //= dim_in - return c.reshape(shape_out) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/stride_tricks.py b/venv/lib/python3.7/site-packages/numpy/lib/stride_tricks.py deleted file mode 100644 index 8aafd09..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/stride_tricks.py +++ /dev/null @@ -1,271 +0,0 @@ -""" -Utilities that manipulate strides to achieve desirable effects. - -An explanation of strides can be found in the "ndarray.rst" file in the -NumPy reference guide. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.core.overrides import array_function_dispatch - -__all__ = ['broadcast_to', 'broadcast_arrays'] - - -class DummyArray(object): - """Dummy object that just exists to hang __array_interface__ dictionaries - and possibly keep alive a reference to a base array. - """ - - def __init__(self, interface, base=None): - self.__array_interface__ = interface - self.base = base - - -def _maybe_view_as_subclass(original_array, new_array): - if type(original_array) is not type(new_array): - # if input was an ndarray subclass and subclasses were OK, - # then view the result as that subclass. - new_array = new_array.view(type=type(original_array)) - # Since we have done something akin to a view from original_array, we - # should let the subclass finalize (if it has it implemented, i.e., is - # not None). - if new_array.__array_finalize__: - new_array.__array_finalize__(original_array) - return new_array - - -def as_strided(x, shape=None, strides=None, subok=False, writeable=True): - """ - Create a view into the array with the given shape and strides. - - .. warning:: This function has to be used with extreme care, see notes. - - Parameters - ---------- - x : ndarray - Array to create a new. - shape : sequence of int, optional - The shape of the new array. Defaults to ``x.shape``. - strides : sequence of int, optional - The strides of the new array. Defaults to ``x.strides``. - subok : bool, optional - .. versionadded:: 1.10 - - If True, subclasses are preserved. - writeable : bool, optional - .. versionadded:: 1.12 - - If set to False, the returned array will always be readonly. - Otherwise it will be writable if the original array was. It - is advisable to set this to False if possible (see Notes). - - Returns - ------- - view : ndarray - - See also - -------- - broadcast_to: broadcast an array to a given shape. - reshape : reshape an array. - - Notes - ----- - ``as_strided`` creates a view into the array given the exact strides - and shape. This means it manipulates the internal data structure of - ndarray and, if done incorrectly, the array elements can point to - invalid memory and can corrupt results or crash your program. - It is advisable to always use the original ``x.strides`` when - calculating new strides to avoid reliance on a contiguous memory - layout. - - Furthermore, arrays created with this function often contain self - overlapping memory, so that two elements are identical. - Vectorized write operations on such arrays will typically be - unpredictable. They may even give different results for small, large, - or transposed arrays. - Since writing to these arrays has to be tested and done with great - care, you may want to use ``writeable=False`` to avoid accidental write - operations. - - For these reasons it is advisable to avoid ``as_strided`` when - possible. - """ - # first convert input to array, possibly keeping subclass - x = np.array(x, copy=False, subok=subok) - interface = dict(x.__array_interface__) - if shape is not None: - interface['shape'] = tuple(shape) - if strides is not None: - interface['strides'] = tuple(strides) - - array = np.asarray(DummyArray(interface, base=x)) - # The route via `__interface__` does not preserve structured - # dtypes. Since dtype should remain unchanged, we set it explicitly. - array.dtype = x.dtype - - view = _maybe_view_as_subclass(x, array) - - if view.flags.writeable and not writeable: - view.flags.writeable = False - - return view - - -def _broadcast_to(array, shape, subok, readonly): - shape = tuple(shape) if np.iterable(shape) else (shape,) - array = np.array(array, copy=False, subok=subok) - if not shape and array.shape: - raise ValueError('cannot broadcast a non-scalar to a scalar array') - if any(size < 0 for size in shape): - raise ValueError('all elements of broadcast shape must be non-' - 'negative') - extras = [] - it = np.nditer( - (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, - op_flags=['readonly'], itershape=shape, order='C') - with it: - # never really has writebackifcopy semantics - broadcast = it.itviews[0] - result = _maybe_view_as_subclass(array, broadcast) - # In a future version this will go away - if not readonly and array.flags._writeable_no_warn: - result.flags.writeable = True - result.flags._warn_on_write = True - return result - - -def _broadcast_to_dispatcher(array, shape, subok=None): - return (array,) - - -@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') -def broadcast_to(array, shape, subok=False): - """Broadcast an array to a new shape. - - Parameters - ---------- - array : array_like - The array to broadcast. - shape : tuple - The shape of the desired array. - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned array will be forced to be a base-class array (default). - - Returns - ------- - broadcast : array - A readonly view on the original array with the given shape. It is - typically not contiguous. Furthermore, more than one element of a - broadcasted array may refer to a single memory location. - - Raises - ------ - ValueError - If the array is not compatible with the new shape according to NumPy's - broadcasting rules. - - Notes - ----- - .. versionadded:: 1.10.0 - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> np.broadcast_to(x, (3, 3)) - array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3]]) - """ - return _broadcast_to(array, shape, subok=subok, readonly=True) - - -def _broadcast_shape(*args): - """Returns the shape of the arrays that would result from broadcasting the - supplied arrays against each other. - """ - # use the old-iterator because np.nditer does not handle size 0 arrays - # consistently - b = np.broadcast(*args[:32]) - # unfortunately, it cannot handle 32 or more arguments directly - for pos in range(32, len(args), 31): - # ironically, np.broadcast does not properly handle np.broadcast - # objects (it treats them as scalars) - # use broadcasting to avoid allocating the full array - b = broadcast_to(0, b.shape) - b = np.broadcast(b, *args[pos:(pos + 31)]) - return b.shape - - -def _broadcast_arrays_dispatcher(*args, **kwargs): - return args - - -@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') -def broadcast_arrays(*args, **kwargs): - """ - Broadcast any number of arrays against each other. - - Parameters - ---------- - `*args` : array_likes - The arrays to broadcast. - - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned arrays will be forced to be a base-class array (default). - - Returns - ------- - broadcasted : list of arrays - These arrays are views on the original arrays. They are typically - not contiguous. Furthermore, more than one element of a - broadcasted array may refer to a single memory location. If you need - to write to the arrays, make copies first. While you can set the - ``writable`` flag True, writing to a single output value may end up - changing more than one location in the output array. - - .. deprecated:: 1.17 - The output is currently marked so that if written to, a deprecation - warning will be emitted. A future version will set the - ``writable`` flag False so writing to it will raise an error. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> y = np.array([[4],[5]]) - >>> np.broadcast_arrays(x, y) - [array([[1, 2, 3], - [1, 2, 3]]), array([[4, 4, 4], - [5, 5, 5]])] - - Here is a useful idiom for getting contiguous copies instead of - non-contiguous views. - - >>> [np.array(a) for a in np.broadcast_arrays(x, y)] - [array([[1, 2, 3], - [1, 2, 3]]), array([[4, 4, 4], - [5, 5, 5]])] - - """ - # nditer is not used here to avoid the limit of 32 arrays. - # Otherwise, something like the following one-liner would suffice: - # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], - # order='C').itviews - - subok = kwargs.pop('subok', False) - if kwargs: - raise TypeError('broadcast_arrays() got an unexpected keyword ' - 'argument {!r}'.format(list(kwargs.keys())[0])) - args = [np.array(_m, copy=False, subok=subok) for _m in args] - - shape = _broadcast_shape(*args) - - if all(array.shape == shape for array in args): - # Common case where nothing needs to be broadcasted. - return args - - return [_broadcast_to(array, shape, subok=subok, readonly=False) - for array in args] diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py2-objarr.npy b/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py2-objarr.npy deleted file mode 100644 index 12936c9..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py2-objarr.npy and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py2-objarr.npz b/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py2-objarr.npz deleted file mode 100644 index 68a3b53..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py2-objarr.npz and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py3-objarr.npy b/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py3-objarr.npy deleted file mode 100644 index 6776074..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py3-objarr.npy and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py3-objarr.npz b/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py3-objarr.npz deleted file mode 100644 index 05eac0b..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/py3-objarr.npz and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/python3.npy b/venv/lib/python3.7/site-packages/numpy/lib/tests/data/python3.npy deleted file mode 100644 index 7c6997d..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/python3.npy and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/win64python2.npy b/venv/lib/python3.7/site-packages/numpy/lib/tests/data/win64python2.npy deleted file mode 100644 index d9bc36a..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/lib/tests/data/win64python2.npy and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test__datasource.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test__datasource.py deleted file mode 100644 index 8eac16b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test__datasource.py +++ /dev/null @@ -1,378 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import pytest -from tempfile import mkdtemp, mkstemp, NamedTemporaryFile -from shutil import rmtree - -import numpy.lib._datasource as datasource -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns - ) - -if sys.version_info[0] >= 3: - import urllib.request as urllib_request - from urllib.parse import urlparse - from urllib.error import URLError -else: - import urllib2 as urllib_request - from urlparse import urlparse - from urllib2 import URLError - - -def urlopen_stub(url, data=None): - '''Stub to replace urlopen for testing.''' - if url == valid_httpurl(): - tmpfile = NamedTemporaryFile(prefix='urltmp_') - return tmpfile - else: - raise URLError('Name or service not known') - -# setup and teardown -old_urlopen = None - - -def setup_module(): - global old_urlopen - - old_urlopen = urllib_request.urlopen - urllib_request.urlopen = urlopen_stub - - -def teardown_module(): - urllib_request.urlopen = old_urlopen - -# A valid website for more robust testing -http_path = 'http://www.google.com/' -http_file = 'index.html' - -http_fakepath = 'http://fake.abc.web/site/' -http_fakefile = 'fake.txt' - -malicious_files = ['/etc/shadow', '../../shadow', - '..\\system.dat', 'c:\\windows\\system.dat'] - -magic_line = b'three is the magic number' - - -# Utility functions used by many tests -def valid_textfile(filedir): - # Generate and return a valid temporary file. - fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) - os.close(fd) - return path - - -def invalid_textfile(filedir): - # Generate and return an invalid filename. - fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) - os.close(fd) - os.remove(path) - return path - - -def valid_httpurl(): - return http_path+http_file - - -def invalid_httpurl(): - return http_fakepath+http_fakefile - - -def valid_baseurl(): - return http_path - - -def invalid_baseurl(): - return http_fakepath - - -def valid_httpfile(): - return http_file - - -def invalid_httpfile(): - return http_fakefile - - -class TestDataSourceOpen(object): - def setup(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def teardown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - fh = self.ds.open(valid_httpurl()) - assert_(fh) - fh.close() - - def test_InvalidHTTP(self): - url = invalid_httpurl() - assert_raises(IOError, self.ds.open, url) - try: - self.ds.open(url) - except IOError as e: - # Regression test for bug fixed in r4342. - assert_(e.errno is None) - - def test_InvalidHTTPCacheURLError(self): - assert_raises(URLError, self.ds._cache, invalid_httpurl()) - - def test_ValidFile(self): - local_file = valid_textfile(self.tmpdir) - fh = self.ds.open(local_file) - assert_(fh) - fh.close() - - def test_InvalidFile(self): - invalid_file = invalid_textfile(self.tmpdir) - assert_raises(IOError, self.ds.open, invalid_file) - - def test_ValidGzipFile(self): - try: - import gzip - except ImportError: - # We don't have the gzip capabilities to test. - pytest.skip() - # Test datasource's internal file_opener for Gzip files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') - fp = gzip.open(filepath, 'w') - fp.write(magic_line) - fp.close() - fp = self.ds.open(filepath) - result = fp.readline() - fp.close() - assert_equal(magic_line, result) - - def test_ValidBz2File(self): - try: - import bz2 - except ImportError: - # We don't have the bz2 capabilities to test. - pytest.skip() - # Test datasource's internal file_opener for BZip2 files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') - fp = bz2.BZ2File(filepath, 'w') - fp.write(magic_line) - fp.close() - fp = self.ds.open(filepath) - result = fp.readline() - fp.close() - assert_equal(magic_line, result) - - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only") - def test_Bz2File_text_mode_warning(self): - try: - import bz2 - except ImportError: - # We don't have the bz2 capabilities to test. - pytest.skip() - # Test datasource's internal file_opener for BZip2 files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') - fp = bz2.BZ2File(filepath, 'w') - fp.write(magic_line) - fp.close() - with assert_warns(RuntimeWarning): - fp = self.ds.open(filepath, 'rt') - result = fp.readline() - fp.close() - assert_equal(magic_line, result) - - -class TestDataSourceExists(object): - def setup(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def teardown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - assert_(self.ds.exists(valid_httpurl())) - - def test_InvalidHTTP(self): - assert_equal(self.ds.exists(invalid_httpurl()), False) - - def test_ValidFile(self): - # Test valid file in destpath - tmpfile = valid_textfile(self.tmpdir) - assert_(self.ds.exists(tmpfile)) - # Test valid local file not in destpath - localdir = mkdtemp() - tmpfile = valid_textfile(localdir) - assert_(self.ds.exists(tmpfile)) - rmtree(localdir) - - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - assert_equal(self.ds.exists(tmpfile), False) - - -class TestDataSourceAbspath(object): - def setup(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.ds = datasource.DataSource(self.tmpdir) - - def teardown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.tmpdir, netloc, - upath.strip(os.sep).strip('/')) - assert_equal(local_path, self.ds.abspath(valid_httpurl())) - - def test_ValidFile(self): - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - # Test with filename only - assert_equal(tmpfile, self.ds.abspath(tmpfilename)) - # Test filename with complete path - assert_equal(tmpfile, self.ds.abspath(tmpfile)) - - def test_InvalidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) - invalidhttp = os.path.join(self.tmpdir, netloc, - upath.strip(os.sep).strip('/')) - assert_(invalidhttp != self.ds.abspath(valid_httpurl())) - - def test_InvalidFile(self): - invalidfile = valid_textfile(self.tmpdir) - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - # Test with filename only - assert_(invalidfile != self.ds.abspath(tmpfilename)) - # Test filename with complete path - assert_(invalidfile != self.ds.abspath(tmpfile)) - - def test_sandboxing(self): - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - - tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) - - assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(tmpfile).startswith(self.tmpdir)) - assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) - for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) - - def test_windows_os_sep(self): - orig_os_sep = os.sep - try: - os.sep = '\\' - self.test_ValidHTTP() - self.test_ValidFile() - self.test_InvalidHTTP() - self.test_InvalidFile() - self.test_sandboxing() - finally: - os.sep = orig_os_sep - - -class TestRepositoryAbspath(object): - def setup(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def teardown(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.repos._destpath, netloc, - upath.strip(os.sep).strip('/')) - filepath = self.repos.abspath(valid_httpfile()) - assert_equal(local_path, filepath) - - def test_sandboxing(self): - tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) - assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) - for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) - - def test_windows_os_sep(self): - orig_os_sep = os.sep - try: - os.sep = '\\' - self.test_ValidHTTP() - self.test_sandboxing() - finally: - os.sep = orig_os_sep - - -class TestRepositoryExists(object): - def setup(self): - self.tmpdir = mkdtemp() - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def teardown(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidFile(self): - # Create local temp file - tmpfile = valid_textfile(self.tmpdir) - assert_(self.repos.exists(tmpfile)) - - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - assert_equal(self.repos.exists(tmpfile), False) - - def test_RemoveHTTPFile(self): - assert_(self.repos.exists(valid_httpurl())) - - def test_CachedHTTPFile(self): - localfile = valid_httpurl() - # Create a locally cached temp file with an URL based - # directory structure. This is similar to what Repository.open - # would do. - scheme, netloc, upath, pms, qry, frg = urlparse(localfile) - local_path = os.path.join(self.repos._destpath, netloc) - os.mkdir(local_path, 0o0700) - tmpfile = valid_textfile(local_path) - assert_(self.repos.exists(tmpfile)) - - -class TestOpenFunc(object): - def setup(self): - self.tmpdir = mkdtemp() - - def teardown(self): - rmtree(self.tmpdir) - - def test_DataSourceOpen(self): - local_file = valid_textfile(self.tmpdir) - # Test case where destpath is passed in - fp = datasource.open(local_file, destpath=self.tmpdir) - assert_(fp) - fp.close() - # Test case where default destpath is used - fp = datasource.open(local_file) - assert_(fp) - fp.close() - -def test_del_attr_handling(): - # DataSource __del__ can be called - # even if __init__ fails when the - # Exception object is caught by the - # caller as happens in refguide_check - # is_deprecated() function - - ds = datasource.DataSource() - # simulate failed __init__ by removing key attribute - # produced within __init__ and expected by __del__ - del ds._istmpdest - # should not raise an AttributeError if __del__ - # gracefully handles failed __init__: - ds.__del__() diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test__iotools.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test__iotools.py deleted file mode 100644 index 15cd3ad..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test__iotools.py +++ /dev/null @@ -1,356 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import time -from datetime import date - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_allclose, assert_raises, - ) -from numpy.lib._iotools import ( - LineSplitter, NameValidator, StringConverter, - has_nested_fields, easy_dtype, flatten_dtype - ) -from numpy.compat import unicode - - -class TestLineSplitter(object): - "Tests the LineSplitter class." - - def test_no_delimiter(self): - "Test LineSplitter w/o delimiter" - strg = " 1 2 3 4 5 # test" - test = LineSplitter()(strg) - assert_equal(test, ['1', '2', '3', '4', '5']) - test = LineSplitter('')(strg) - assert_equal(test, ['1', '2', '3', '4', '5']) - - def test_space_delimiter(self): - "Test space delimiter" - strg = " 1 2 3 4 5 # test" - test = LineSplitter(' ')(strg) - assert_equal(test, ['1', '2', '3', '4', '', '5']) - test = LineSplitter(' ')(strg) - assert_equal(test, ['1 2 3 4', '5']) - - def test_tab_delimiter(self): - "Test tab delimiter" - strg = " 1\t 2\t 3\t 4\t 5 6" - test = LineSplitter('\t')(strg) - assert_equal(test, ['1', '2', '3', '4', '5 6']) - strg = " 1 2\t 3 4\t 5 6" - test = LineSplitter('\t')(strg) - assert_equal(test, ['1 2', '3 4', '5 6']) - - def test_other_delimiter(self): - "Test LineSplitter on delimiter" - strg = "1,2,3,4,,5" - test = LineSplitter(',')(strg) - assert_equal(test, ['1', '2', '3', '4', '', '5']) - # - strg = " 1,2,3,4,,5 # test" - test = LineSplitter(',')(strg) - assert_equal(test, ['1', '2', '3', '4', '', '5']) - - # gh-11028 bytes comment/delimiters should get encoded - strg = b" 1,2,3,4,,5 % test" - test = LineSplitter(delimiter=b',', comments=b'%')(strg) - assert_equal(test, ['1', '2', '3', '4', '', '5']) - - def test_constant_fixed_width(self): - "Test LineSplitter w/ fixed-width fields" - strg = " 1 2 3 4 5 # test" - test = LineSplitter(3)(strg) - assert_equal(test, ['1', '2', '3', '4', '', '5', '']) - # - strg = " 1 3 4 5 6# test" - test = LineSplitter(20)(strg) - assert_equal(test, ['1 3 4 5 6']) - # - strg = " 1 3 4 5 6# test" - test = LineSplitter(30)(strg) - assert_equal(test, ['1 3 4 5 6']) - - def test_variable_fixed_width(self): - strg = " 1 3 4 5 6# test" - test = LineSplitter((3, 6, 6, 3))(strg) - assert_equal(test, ['1', '3', '4 5', '6']) - # - strg = " 1 3 4 5 6# test" - test = LineSplitter((6, 6, 9))(strg) - assert_equal(test, ['1', '3 4', '5 6']) - -# ----------------------------------------------------------------------------- - - -class TestNameValidator(object): - - def test_case_sensitivity(self): - "Test case sensitivity" - names = ['A', 'a', 'b', 'c'] - test = NameValidator().validate(names) - assert_equal(test, ['A', 'a', 'b', 'c']) - test = NameValidator(case_sensitive=False).validate(names) - assert_equal(test, ['A', 'A_1', 'B', 'C']) - test = NameValidator(case_sensitive='upper').validate(names) - assert_equal(test, ['A', 'A_1', 'B', 'C']) - test = NameValidator(case_sensitive='lower').validate(names) - assert_equal(test, ['a', 'a_1', 'b', 'c']) - - # check exceptions - assert_raises(ValueError, NameValidator, case_sensitive='foobar') - - def test_excludelist(self): - "Test excludelist" - names = ['dates', 'data', 'Other Data', 'mask'] - validator = NameValidator(excludelist=['dates', 'data', 'mask']) - test = validator.validate(names) - assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) - - def test_missing_names(self): - "Test validate missing names" - namelist = ('a', 'b', 'c') - validator = NameValidator() - assert_equal(validator(namelist), ['a', 'b', 'c']) - namelist = ('', 'b', 'c') - assert_equal(validator(namelist), ['f0', 'b', 'c']) - namelist = ('a', 'b', '') - assert_equal(validator(namelist), ['a', 'b', 'f0']) - namelist = ('', 'f0', '') - assert_equal(validator(namelist), ['f1', 'f0', 'f2']) - - def test_validate_nb_names(self): - "Test validate nb names" - namelist = ('a', 'b', 'c') - validator = NameValidator() - assert_equal(validator(namelist, nbfields=1), ('a',)) - assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), - ['a', 'b', 'c', 'g0', 'g1']) - - def test_validate_wo_names(self): - "Test validate no names" - namelist = None - validator = NameValidator() - assert_(validator(namelist) is None) - assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) - -# ----------------------------------------------------------------------------- - - -def _bytes_to_date(s): - return date(*time.strptime(s, "%Y-%m-%d")[:3]) - - -class TestStringConverter(object): - "Test StringConverter" - - def test_creation(self): - "Test creation of a StringConverter" - converter = StringConverter(int, -99999) - assert_equal(converter._status, 1) - assert_equal(converter.default, -99999) - - def test_upgrade(self): - "Tests the upgrade method." - - converter = StringConverter() - assert_equal(converter._status, 0) - - # test int - assert_equal(converter.upgrade('0'), 0) - assert_equal(converter._status, 1) - - # On systems where long defaults to 32-bit, the statuses will be - # offset by one, so we check for this here. - import numpy.core.numeric as nx - status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) - - # test int > 2**32 - assert_equal(converter.upgrade('17179869184'), 17179869184) - assert_equal(converter._status, 1 + status_offset) - - # test float - assert_allclose(converter.upgrade('0.'), 0.0) - assert_equal(converter._status, 2 + status_offset) - - # test complex - assert_equal(converter.upgrade('0j'), complex('0j')) - assert_equal(converter._status, 3 + status_offset) - - # test str - # note that the longdouble type has been skipped, so the - # _status increases by 2. Everything should succeed with - # unicode conversion (5). - for s in ['a', u'a', b'a']: - res = converter.upgrade(s) - assert_(type(res) is unicode) - assert_equal(res, u'a') - assert_equal(converter._status, 5 + status_offset) - - def test_missing(self): - "Tests the use of missing values." - converter = StringConverter(missing_values=('missing', - 'missed')) - converter.upgrade('0') - assert_equal(converter('0'), 0) - assert_equal(converter(''), converter.default) - assert_equal(converter('missing'), converter.default) - assert_equal(converter('missed'), converter.default) - try: - converter('miss') - except ValueError: - pass - - def test_upgrademapper(self): - "Tests updatemapper" - dateparser = _bytes_to_date - _original_mapper = StringConverter._mapper[:] - try: - StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) - convert = StringConverter(dateparser, date(2000, 1, 1)) - test = convert('2001-01-01') - assert_equal(test, date(2001, 1, 1)) - test = convert('2009-01-01') - assert_equal(test, date(2009, 1, 1)) - test = convert('') - assert_equal(test, date(2000, 1, 1)) - finally: - StringConverter._mapper = _original_mapper - - def test_string_to_object(self): - "Make sure that string-to-object functions are properly recognized" - old_mapper = StringConverter._mapper[:] # copy of list - conv = StringConverter(_bytes_to_date) - assert_equal(conv._mapper, old_mapper) - assert_(hasattr(conv, 'default')) - - def test_keep_default(self): - "Make sure we don't lose an explicit default" - converter = StringConverter(None, missing_values='', - default=-999) - converter.upgrade('3.14159265') - assert_equal(converter.default, -999) - assert_equal(converter.type, np.dtype(float)) - # - converter = StringConverter( - None, missing_values='', default=0) - converter.upgrade('3.14159265') - assert_equal(converter.default, 0) - assert_equal(converter.type, np.dtype(float)) - - def test_keep_default_zero(self): - "Check that we don't lose a default of 0" - converter = StringConverter(int, default=0, - missing_values="N/A") - assert_equal(converter.default, 0) - - def test_keep_missing_values(self): - "Check that we're not losing missing values" - converter = StringConverter(int, default=0, - missing_values="N/A") - assert_equal( - converter.missing_values, {'', 'N/A'}) - - def test_int64_dtype(self): - "Check that int64 integer types can be specified" - converter = StringConverter(np.int64, default=0) - val = "-9223372036854775807" - assert_(converter(val) == -9223372036854775807) - val = "9223372036854775807" - assert_(converter(val) == 9223372036854775807) - - def test_uint64_dtype(self): - "Check that uint64 integer types can be specified" - converter = StringConverter(np.uint64, default=0) - val = "9223372043271415339" - assert_(converter(val) == 9223372043271415339) - - -class TestMiscFunctions(object): - - def test_has_nested_dtype(self): - "Test has_nested_dtype" - ndtype = np.dtype(float) - assert_equal(has_nested_fields(ndtype), False) - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - assert_equal(has_nested_fields(ndtype), False) - ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - assert_equal(has_nested_fields(ndtype), True) - - def test_easy_dtype(self): - "Test ndtype on dtypes" - # Simple case - ndtype = float - assert_equal(easy_dtype(ndtype), np.dtype(float)) - # As string w/o names - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype), - np.dtype([('f0', "i4"), ('f1', "f8")])) - # As string w/o names but different default format - assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), - np.dtype([('field_000', "i4"), ('field_001', "f8")])) - # As string w/ names - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names="a, b"), - np.dtype([('a', "i4"), ('b', "f8")])) - # As string w/ names (too many) - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([('a', "i4"), ('b', "f8")])) - # As string w/ names (not enough) - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names=", b"), - np.dtype([('f0', "i4"), ('b', "f8")])) - # ... (with different default format) - assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), - np.dtype([('a', "i4"), ('f00', "f8")])) - # As list of tuples w/o names - ndtype = [('A', int), ('B', float)] - assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) - # As list of tuples w/ names - assert_equal(easy_dtype(ndtype, names="a,b"), - np.dtype([('a', int), ('b', float)])) - # As list of tuples w/ not enough names - assert_equal(easy_dtype(ndtype, names="a"), - np.dtype([('a', int), ('f0', float)])) - # As list of tuples w/ too many names - assert_equal(easy_dtype(ndtype, names="a,b,c"), - np.dtype([('a', int), ('b', float)])) - # As list of types w/o names - ndtype = (int, float, float) - assert_equal(easy_dtype(ndtype), - np.dtype([('f0', int), ('f1', float), ('f2', float)])) - # As list of types w names - ndtype = (int, float, float) - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([('a', int), ('b', float), ('c', float)])) - # As simple dtype w/ names - ndtype = np.dtype(float) - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([(_, float) for _ in ('a', 'b', 'c')])) - # As simple dtype w/o names (but multiple fields) - ndtype = np.dtype(float) - assert_equal( - easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), - np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) - - def test_flatten_dtype(self): - "Testing flatten_dtype" - # Standard dtype - dt = np.dtype([("a", "f8"), ("b", "f8")]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, float]) - # Recursive dtype - dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) - # dtype with shaped fields - dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, int]) - dt_flat = flatten_dtype(dt, True) - assert_equal(dt_flat, [float] * 2 + [int] * 3) - # dtype w/ titles - dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, float]) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test__version.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test__version.py deleted file mode 100644 index 8e66a0c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test__version.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Tests for the NumpyVersion class. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_, assert_raises -from numpy.lib import NumpyVersion - - -def test_main_versions(): - assert_(NumpyVersion('1.8.0') == '1.8.0') - for ver in ['1.9.0', '2.0.0', '1.8.1']: - assert_(NumpyVersion('1.8.0') < ver) - - for ver in ['1.7.0', '1.7.1', '0.9.9']: - assert_(NumpyVersion('1.8.0') > ver) - - -def test_version_1_point_10(): - # regression test for gh-2998. - assert_(NumpyVersion('1.9.0') < '1.10.0') - assert_(NumpyVersion('1.11.0') < '1.11.1') - assert_(NumpyVersion('1.11.0') == '1.11.0') - assert_(NumpyVersion('1.99.11') < '1.99.12') - - -def test_alpha_beta_rc(): - assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') - for ver in ['1.8.0', '1.8.0rc2']: - assert_(NumpyVersion('1.8.0rc1') < ver) - - for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: - assert_(NumpyVersion('1.8.0rc1') > ver) - - assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') - - -def test_dev_version(): - assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') - for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: - assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) - - assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') - - -def test_dev_a_b_rc_mixed(): - assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') - assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') - - -def test_dev0_version(): - assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') - for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: - assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) - - assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') - - -def test_dev0_a_b_rc_mixed(): - assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') - assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') - - -def test_raises(): - for ver in ['1.9', '1,9.0', '1.7.x']: - assert_raises(ValueError, NumpyVersion, ver) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arraypad.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arraypad.py deleted file mode 100644 index 65593dd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arraypad.py +++ /dev/null @@ -1,1361 +0,0 @@ -"""Tests for the array padding functions. - -""" -from __future__ import division, absolute_import, print_function - -import pytest - -import numpy as np -from numpy.testing import assert_array_equal, assert_allclose, assert_equal -from numpy.lib.arraypad import _as_pairs - - -_numeric_dtypes = ( - np.sctypes["uint"] - + np.sctypes["int"] - + np.sctypes["float"] - + np.sctypes["complex"] -) -_all_modes = { - 'constant': {'constant_values': 0}, - 'edge': {}, - 'linear_ramp': {'end_values': 0}, - 'maximum': {'stat_length': None}, - 'mean': {'stat_length': None}, - 'median': {'stat_length': None}, - 'minimum': {'stat_length': None}, - 'reflect': {'reflect_type': 'even'}, - 'symmetric': {'reflect_type': 'even'}, - 'wrap': {}, - 'empty': {} -} - - -class TestAsPairs(object): - def test_single_value(self): - """Test casting for a single value.""" - expected = np.array([[3, 3]] * 10) - for x in (3, [3], [[3]]): - result = _as_pairs(x, 10) - assert_equal(result, expected) - # Test with dtype=object - obj = object() - assert_equal( - _as_pairs(obj, 10), - np.array([[obj, obj]] * 10) - ) - - def test_two_values(self): - """Test proper casting for two different values.""" - # Broadcasting in the first dimension with numbers - expected = np.array([[3, 4]] * 10) - for x in ([3, 4], [[3, 4]]): - result = _as_pairs(x, 10) - assert_equal(result, expected) - # and with dtype=object - obj = object() - assert_equal( - _as_pairs(["a", obj], 10), - np.array([["a", obj]] * 10) - ) - - # Broadcasting in the second / last dimension with numbers - assert_equal( - _as_pairs([[3], [4]], 2), - np.array([[3, 3], [4, 4]]) - ) - # and with dtype=object - assert_equal( - _as_pairs([["a"], [obj]], 2), - np.array([["a", "a"], [obj, obj]]) - ) - - def test_with_none(self): - expected = ((None, None), (None, None), (None, None)) - assert_equal( - _as_pairs(None, 3, as_index=False), - expected - ) - assert_equal( - _as_pairs(None, 3, as_index=True), - expected - ) - - def test_pass_through(self): - """Test if `x` already matching desired output are passed through.""" - expected = np.arange(12).reshape((6, 2)) - assert_equal( - _as_pairs(expected, 6), - expected - ) - - def test_as_index(self): - """Test results if `as_index=True`.""" - assert_equal( - _as_pairs([2.6, 3.3], 10, as_index=True), - np.array([[3, 3]] * 10, dtype=np.intp) - ) - assert_equal( - _as_pairs([2.6, 4.49], 10, as_index=True), - np.array([[3, 4]] * 10, dtype=np.intp) - ) - for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], - [[1, 2]] * 9 + [[1, -2]]): - with pytest.raises(ValueError, match="negative values"): - _as_pairs(x, 10, as_index=True) - - def test_exceptions(self): - """Ensure faulty usage is discovered.""" - with pytest.raises(ValueError, match="more dimensions than allowed"): - _as_pairs([[[3]]], 10) - with pytest.raises(ValueError, match="could not be broadcast"): - _as_pairs([[1, 2], [3, 4]], 3) - with pytest.raises(ValueError, match="could not be broadcast"): - _as_pairs(np.ones((2, 3)), 3) - - -class TestConditionalShortcuts(object): - @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_zero_padding_shortcuts(self, mode): - test = np.arange(120).reshape(4, 5, 6) - pad_amt = [(0, 0) for _ in test.shape] - assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) - - @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) - def test_shallow_statistic_range(self, mode): - test = np.arange(120).reshape(4, 5, 6) - pad_amt = [(1, 1) for _ in test.shape] - assert_array_equal(np.pad(test, pad_amt, mode='edge'), - np.pad(test, pad_amt, mode=mode, stat_length=1)) - - @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) - def test_clip_statistic_range(self, mode): - test = np.arange(30).reshape(5, 6) - pad_amt = [(3, 3) for _ in test.shape] - assert_array_equal(np.pad(test, pad_amt, mode=mode), - np.pad(test, pad_amt, mode=mode, stat_length=30)) - - -class TestStatistic(object): - def test_check_mean_stat_length(self): - a = np.arange(100).astype('f') - a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) - b = np.array( - [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, - 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, - 0.5, 0.5, 0.5, 0.5, 0.5, - - 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., - 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., - 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., - 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., - 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., - 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., - 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., - 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., - - 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., - 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. - ]) - assert_array_equal(a, b) - - def test_check_maximum_1(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'maximum') - b = np.array( - [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] - ) - assert_array_equal(a, b) - - def test_check_maximum_2(self): - a = np.arange(100) + 1 - a = np.pad(a, (25, 20), 'maximum') - b = np.array( - [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, 100, 100, 100, 100, - - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, - 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, - 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] - ) - assert_array_equal(a, b) - - def test_check_maximum_stat_length(self): - a = np.arange(100) + 1 - a = np.pad(a, (25, 20), 'maximum', stat_length=10) - b = np.array( - [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, - - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, - 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, - 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] - ) - assert_array_equal(a, b) - - def test_check_minimum_1(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'minimum') - b = np.array( - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ) - assert_array_equal(a, b) - - def test_check_minimum_2(self): - a = np.arange(100) + 2 - a = np.pad(a, (25, 20), 'minimum') - b = np.array( - [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, - - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] - ) - assert_array_equal(a, b) - - def test_check_minimum_stat_length(self): - a = np.arange(100) + 1 - a = np.pad(a, (25, 20), 'minimum', stat_length=10) - b = np.array( - [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, - - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, - 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, - 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - - 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, - 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] - ) - assert_array_equal(a, b) - - def test_check_median(self): - a = np.arange(100).astype('f') - a = np.pad(a, (25, 20), 'median') - b = np.array( - [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, - - 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., - 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., - 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., - 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., - 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., - 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., - 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., - 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., - - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] - ) - assert_array_equal(a, b) - - def test_check_median_01(self): - a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) - a = np.pad(a, 1, 'median') - b = np.array( - [[4, 4, 5, 4, 4], - - [3, 3, 1, 4, 3], - [5, 4, 5, 9, 5], - [8, 9, 8, 2, 8], - - [4, 4, 5, 4, 4]] - ) - assert_array_equal(a, b) - - def test_check_median_02(self): - a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) - a = np.pad(a.T, 1, 'median').T - b = np.array( - [[5, 4, 5, 4, 5], - - [3, 3, 1, 4, 3], - [5, 4, 5, 9, 5], - [8, 9, 8, 2, 8], - - [5, 4, 5, 4, 5]] - ) - assert_array_equal(a, b) - - def test_check_median_stat_length(self): - a = np.arange(100).astype('f') - a[1] = 2. - a[97] = 96. - a = np.pad(a, (25, 20), 'median', stat_length=(3, 5)) - b = np.array( - [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., - 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., - 2., 2., 2., 2., 2., - - 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., - 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., - 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., - 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., - 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., - 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., - 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., - 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., - - 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., - 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] - ) - assert_array_equal(a, b) - - def test_check_mean_shape_one(self): - a = [[4, 5, 6]] - a = np.pad(a, (5, 7), 'mean', stat_length=2) - b = np.array( - [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] - ) - assert_array_equal(a, b) - - def test_check_mean_2(self): - a = np.arange(100).astype('f') - a = np.pad(a, (25, 20), 'mean') - b = np.array( - [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, - - 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., - 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., - 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., - 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., - 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., - 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., - 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., - 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., - - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] - ) - assert_array_equal(a, b) - - @pytest.mark.parametrize("mode", [ - "mean", - "median", - "minimum", - "maximum" - ]) - def test_same_prepend_append(self, mode): - """ Test that appended and prepended values are equal """ - # This test is constructed to trigger floating point rounding errors in - # a way that caused gh-11216 for mode=='mean' - a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) - a = np.pad(a, (1, 1), mode) - assert_equal(a[0], a[-1]) - - @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"]) - @pytest.mark.parametrize( - "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))] - ) - def test_check_negative_stat_length(self, mode, stat_length): - arr = np.arange(30).reshape((6, 5)) - match = "index can't contain negative values" - with pytest.raises(ValueError, match=match): - np.pad(arr, 2, mode, stat_length=stat_length) - - def test_simple_stat_length(self): - a = np.arange(30) - a = np.reshape(a, (6, 5)) - a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) - b = np.array( - [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], - [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], - - [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], - [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], - [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], - [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], - [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], - - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] - ) - assert_array_equal(a, b) - - @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") - @pytest.mark.filterwarnings( - "ignore:invalid value encountered in (true_divide|double_scalars):" - "RuntimeWarning" - ) - @pytest.mark.parametrize("mode", ["mean", "median"]) - def test_zero_stat_length_valid(self, mode): - arr = np.pad([1., 2.], (1, 2), mode, stat_length=0) - expected = np.array([np.nan, 1., 2., np.nan, np.nan]) - assert_equal(arr, expected) - - @pytest.mark.parametrize("mode", ["minimum", "maximum"]) - def test_zero_stat_length_invalid(self, mode): - match = "stat_length of 0 yields no value for padding" - with pytest.raises(ValueError, match=match): - np.pad([1., 2.], 0, mode, stat_length=0) - with pytest.raises(ValueError, match=match): - np.pad([1., 2.], 0, mode, stat_length=(1, 0)) - with pytest.raises(ValueError, match=match): - np.pad([1., 2.], 1, mode, stat_length=0) - with pytest.raises(ValueError, match=match): - np.pad([1., 2.], 1, mode, stat_length=(1, 0)) - - -class TestConstant(object): - def test_check_constant(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20)) - b = np.array( - [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] - ) - assert_array_equal(a, b) - - def test_check_constant_zeros(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'constant') - b = np.array( - [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ) - assert_array_equal(a, b) - - def test_check_constant_float(self): - # If input array is int, but constant_values are float, the dtype of - # the array to be padded is kept - arr = np.arange(30).reshape(5, 6) - test = np.pad(arr, (1, 2), mode='constant', - constant_values=1.1) - expected = np.array( - [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], - - [ 1, 0, 1, 2, 3, 4, 5, 1, 1], - [ 1, 6, 7, 8, 9, 10, 11, 1, 1], - [ 1, 12, 13, 14, 15, 16, 17, 1, 1], - [ 1, 18, 19, 20, 21, 22, 23, 1, 1], - [ 1, 24, 25, 26, 27, 28, 29, 1, 1], - - [ 1, 1, 1, 1, 1, 1, 1, 1, 1], - [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] - ) - assert_allclose(test, expected) - - def test_check_constant_float2(self): - # If input array is float, and constant_values are float, the dtype of - # the array to be padded is kept - here retaining the float constants - arr = np.arange(30).reshape(5, 6) - arr_float = arr.astype(np.float64) - test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', - constant_values=1.1) - expected = np.array( - [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], - - [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], - [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], - [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], - [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], - [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], - - [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], - [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] - ) - assert_allclose(test, expected) - - def test_check_constant_float3(self): - a = np.arange(100, dtype=float) - a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) - b = np.array( - [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, - -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, - -1.1, -1.1, -1.1, -1.1, -1.1, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, - -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] - ) - assert_allclose(a, b) - - def test_check_constant_odd_pad_amount(self): - arr = np.arange(30).reshape(5, 6) - test = np.pad(arr, ((1,), (2,)), mode='constant', - constant_values=3) - expected = np.array( - [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], - - [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], - [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], - [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], - [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], - [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], - - [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] - ) - assert_allclose(test, expected) - - def test_check_constant_pad_2d(self): - arr = np.arange(4).reshape(2, 2) - test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant', - constant_values=((1, 2), (3, 4))) - expected = np.array( - [[3, 1, 1, 4, 4, 4], - [3, 0, 1, 4, 4, 4], - [3, 2, 3, 4, 4, 4], - [3, 2, 2, 4, 4, 4], - [3, 2, 2, 4, 4, 4]] - ) - assert_allclose(test, expected) - - def test_check_large_integers(self): - uint64_max = 2 ** 64 - 1 - arr = np.full(5, uint64_max, dtype=np.uint64) - test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) - expected = np.full(7, uint64_max, dtype=np.uint64) - assert_array_equal(test, expected) - - int64_max = 2 ** 63 - 1 - arr = np.full(5, int64_max, dtype=np.int64) - test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) - expected = np.full(7, int64_max, dtype=np.int64) - assert_array_equal(test, expected) - - def test_check_object_array(self): - arr = np.empty(1, dtype=object) - obj_a = object() - arr[0] = obj_a - obj_b = object() - obj_c = object() - arr = np.pad(arr, pad_width=1, mode='constant', - constant_values=(obj_b, obj_c)) - - expected = np.empty((3,), dtype=object) - expected[0] = obj_b - expected[1] = obj_a - expected[2] = obj_c - - assert_array_equal(arr, expected) - - def test_pad_empty_dimension(self): - arr = np.zeros((3, 0, 2)) - result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") - assert result.shape == (3, 4, 4) - - -class TestLinearRamp(object): - def test_check_simple(self): - a = np.arange(100).astype('f') - a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) - b = np.array( - [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, - 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, - 0.80, 0.64, 0.48, 0.32, 0.16, - - 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, - 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, - 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, - 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, - 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, - 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, - 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, - 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, - 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, - 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, - - 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, - 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] - ) - assert_allclose(a, b, rtol=1e-5, atol=1e-5) - - def test_check_2d(self): - arr = np.arange(20).reshape(4, 5).astype(np.float64) - test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) - expected = np.array( - [[0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], - [0., 0., 0., 1., 2., 3., 4., 2., 0.], - [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], - [0., 5., 10., 11., 12., 13., 14., 7., 0.], - [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], - [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) - assert_allclose(test, expected) - - @pytest.mark.xfail(exceptions=(AssertionError,)) - def test_object_array(self): - from fractions import Fraction - arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) - actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) - - # deliberately chosen to have a non-power-of-2 denominator such that - # rounding to floats causes a failure. - expected = np.array([ - Fraction( 0, 12), - Fraction( 3, 12), - Fraction( 6, 12), - Fraction(-6, 12), - Fraction(-4, 12), - Fraction(-2, 12), - Fraction(-0, 12), - ]) - assert_equal(actual, expected) - - def test_end_values(self): - """Ensure that end values are exact.""" - a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") - assert_equal(a[:, 0], 0.) - assert_equal(a[:, -1], 0.) - assert_equal(a[0, :], 0.) - assert_equal(a[-1, :], 0.) - - @pytest.mark.parametrize("dtype", _numeric_dtypes) - def test_negative_difference(self, dtype): - """ - Check correct behavior of unsigned dtypes if there is a negative - difference between the edge to pad and `end_values`. Check both cases - to be independent of implementation. Test behavior for all other dtypes - in case dtype casting interferes with complex dtypes. See gh-14191. - """ - x = np.array([3], dtype=dtype) - result = np.pad(x, 3, mode="linear_ramp", end_values=0) - expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) - assert_equal(result, expected) - - x = np.array([0], dtype=dtype) - result = np.pad(x, 3, mode="linear_ramp", end_values=3) - expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) - assert_equal(result, expected) - - -class TestReflect(object): - def test_check_simple(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'reflect') - b = np.array( - [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, - 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, - 5, 4, 3, 2, 1, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, - 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] - ) - assert_array_equal(a, b) - - def test_check_odd_method(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'reflect', reflect_type='odd') - b = np.array( - [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, - -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, - -5, -4, -3, -2, -1, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] - ) - assert_array_equal(a, b) - - def test_check_large_pad(self): - a = [[4, 5, 6], [6, 7, 8]] - a = np.pad(a, (5, 7), 'reflect') - b = np.array( - [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] - ) - assert_array_equal(a, b) - - def test_check_shape(self): - a = [[4, 5, 6]] - a = np.pad(a, (5, 7), 'reflect') - b = np.array( - [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] - ) - assert_array_equal(a, b) - - def test_check_01(self): - a = np.pad([1, 2, 3], 2, 'reflect') - b = np.array([3, 2, 1, 2, 3, 2, 1]) - assert_array_equal(a, b) - - def test_check_02(self): - a = np.pad([1, 2, 3], 3, 'reflect') - b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) - assert_array_equal(a, b) - - def test_check_03(self): - a = np.pad([1, 2, 3], 4, 'reflect') - b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) - assert_array_equal(a, b) - - -class TestEmptyArray(object): - """Check how padding behaves on arrays with an empty dimension.""" - - @pytest.mark.parametrize( - # Keep parametrization ordered, otherwise pytest-xdist might believe - # that different tests were collected during parallelization - "mode", sorted(_all_modes.keys() - {"constant", "empty"}) - ) - def test_pad_empty_dimension(self, mode): - match = ("can't extend empty axis 0 using modes other than 'constant' " - "or 'empty'") - with pytest.raises(ValueError, match=match): - np.pad([], 4, mode=mode) - with pytest.raises(ValueError, match=match): - np.pad(np.ndarray(0), 4, mode=mode) - with pytest.raises(ValueError, match=match): - np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) - - @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_pad_non_empty_dimension(self, mode): - result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) - assert result.shape == (8, 0, 4) - - -class TestSymmetric(object): - def test_check_simple(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'symmetric') - b = np.array( - [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, - 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, - 4, 3, 2, 1, 0, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, - 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] - ) - assert_array_equal(a, b) - - def test_check_odd_method(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd') - b = np.array( - [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, - -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, - -4, -3, -2, -1, 0, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, - 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] - ) - assert_array_equal(a, b) - - def test_check_large_pad(self): - a = [[4, 5, 6], [6, 7, 8]] - a = np.pad(a, (5, 7), 'symmetric') - b = np.array( - [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], - [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], - - [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], - [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] - ) - - assert_array_equal(a, b) - - def test_check_large_pad_odd(self): - a = [[4, 5, 6], [6, 7, 8]] - a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd') - b = np.array( - [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], - [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], - [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], - [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], - [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], - - [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], - [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], - - [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], - [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], - [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], - [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], - [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], - [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], - [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] - ) - assert_array_equal(a, b) - - def test_check_shape(self): - a = [[4, 5, 6]] - a = np.pad(a, (5, 7), 'symmetric') - b = np.array( - [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], - [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] - ) - assert_array_equal(a, b) - - def test_check_01(self): - a = np.pad([1, 2, 3], 2, 'symmetric') - b = np.array([2, 1, 1, 2, 3, 3, 2]) - assert_array_equal(a, b) - - def test_check_02(self): - a = np.pad([1, 2, 3], 3, 'symmetric') - b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) - assert_array_equal(a, b) - - def test_check_03(self): - a = np.pad([1, 2, 3], 6, 'symmetric') - b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) - assert_array_equal(a, b) - - -class TestWrap(object): - def test_check_simple(self): - a = np.arange(100) - a = np.pad(a, (25, 20), 'wrap') - b = np.array( - [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, - 95, 96, 97, 98, 99, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ) - assert_array_equal(a, b) - - def test_check_large_pad(self): - a = np.arange(12) - a = np.reshape(a, (3, 4)) - a = np.pad(a, (10, 12), 'wrap') - b = np.array( - [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11]] - ) - assert_array_equal(a, b) - - def test_check_01(self): - a = np.pad([1, 2, 3], 3, 'wrap') - b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) - assert_array_equal(a, b) - - def test_check_02(self): - a = np.pad([1, 2, 3], 4, 'wrap') - b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) - assert_array_equal(a, b) - - def test_pad_with_zero(self): - a = np.ones((3, 5)) - b = np.pad(a, (0, 5), mode="wrap") - assert_array_equal(a, b[:-5, :-5]) - - def test_repeated_wrapping(self): - """ - Check wrapping on each side individually if the wrapped area is longer - than the original array. - """ - a = np.arange(5) - b = np.pad(a, (12, 0), mode="wrap") - assert_array_equal(np.r_[a, a, a, a][3:], b) - - a = np.arange(5) - b = np.pad(a, (0, 12), mode="wrap") - assert_array_equal(np.r_[a, a, a, a][:-3], b) - - -class TestEdge(object): - def test_check_simple(self): - a = np.arange(12) - a = np.reshape(a, (4, 3)) - a = np.pad(a, ((2, 3), (3, 2)), 'edge') - b = np.array( - [[0, 0, 0, 0, 1, 2, 2, 2], - [0, 0, 0, 0, 1, 2, 2, 2], - - [0, 0, 0, 0, 1, 2, 2, 2], - [3, 3, 3, 3, 4, 5, 5, 5], - [6, 6, 6, 6, 7, 8, 8, 8], - [9, 9, 9, 9, 10, 11, 11, 11], - - [9, 9, 9, 9, 10, 11, 11, 11], - [9, 9, 9, 9, 10, 11, 11, 11], - [9, 9, 9, 9, 10, 11, 11, 11]] - ) - assert_array_equal(a, b) - - def test_check_width_shape_1_2(self): - # Check a pad_width of the form ((1, 2),). - # Regression test for issue gh-7808. - a = np.array([1, 2, 3]) - padded = np.pad(a, ((1, 2),), 'edge') - expected = np.array([1, 1, 2, 3, 3, 3]) - assert_array_equal(padded, expected) - - a = np.array([[1, 2, 3], [4, 5, 6]]) - padded = np.pad(a, ((1, 2),), 'edge') - expected = np.pad(a, ((1, 2), (1, 2)), 'edge') - assert_array_equal(padded, expected) - - a = np.arange(24).reshape(2, 3, 4) - padded = np.pad(a, ((1, 2),), 'edge') - expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') - assert_array_equal(padded, expected) - - -class TestEmpty(object): - def test_simple(self): - arr = np.arange(24).reshape(4, 6) - result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") - assert result.shape == (9, 10) - assert_equal(arr, result[2:-3, 3:-1]) - - def test_pad_empty_dimension(self): - arr = np.zeros((3, 0, 2)) - result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") - assert result.shape == (3, 4, 4) - - -def test_legacy_vector_functionality(): - def _padwithtens(vector, pad_width, iaxis, kwargs): - vector[:pad_width[0]] = 10 - vector[-pad_width[1]:] = 10 - - a = np.arange(6).reshape(2, 3) - a = np.pad(a, 2, _padwithtens) - b = np.array( - [[10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10], - - [10, 10, 0, 1, 2, 10, 10], - [10, 10, 3, 4, 5, 10, 10], - - [10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10]] - ) - assert_array_equal(a, b) - - -def test_unicode_mode(): - a = np.pad([1], 2, mode=u'constant') - b = np.array([0, 0, 1, 0, 0]) - assert_array_equal(a, b) - - -@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"]) -def test_object_input(mode): - # Regression test for issue gh-11395. - a = np.full((4, 3), fill_value=None) - pad_amt = ((2, 3), (3, 2)) - b = np.full((9, 8), fill_value=None) - assert_array_equal(np.pad(a, pad_amt, mode=mode), b) - - -class TestPadWidth(object): - @pytest.mark.parametrize("pad_width", [ - (4, 5, 6, 7), - ((1,), (2,), (3,)), - ((1, 2), (3, 4), (5, 6)), - ((3, 4, 5), (0, 1, 2)), - ]) - @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_misshaped_pad_width(self, pad_width, mode): - arr = np.arange(30).reshape((6, 5)) - match = "operands could not be broadcast together" - with pytest.raises(ValueError, match=match): - np.pad(arr, pad_width, mode) - - @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_misshaped_pad_width_2(self, mode): - arr = np.arange(30).reshape((6, 5)) - match = ("input operand has more dimensions than allowed by the axis " - "remapping") - with pytest.raises(ValueError, match=match): - np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode) - - @pytest.mark.parametrize( - "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]) - @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_negative_pad_width(self, pad_width, mode): - arr = np.arange(30).reshape((6, 5)) - match = "index can't contain negative values" - with pytest.raises(ValueError, match=match): - np.pad(arr, pad_width, mode) - - @pytest.mark.parametrize("pad_width", [ - "3", - "word", - None, - object(), - 3.4, - ((2, 3, 4), (3, 2)), # dtype=object (tuple) - complex(1, -1), - ((-2.1, 3), (3, 2)), - ]) - @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_bad_type(self, pad_width, mode): - arr = np.arange(30).reshape((6, 5)) - match = "`pad_width` must be of integral type." - with pytest.raises(TypeError, match=match): - np.pad(arr, pad_width, mode) - with pytest.raises(TypeError, match=match): - np.pad(arr, np.array(pad_width), mode) - - def test_pad_width_as_ndarray(self): - a = np.arange(12) - a = np.reshape(a, (4, 3)) - a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge') - b = np.array( - [[0, 0, 0, 0, 1, 2, 2, 2], - [0, 0, 0, 0, 1, 2, 2, 2], - - [0, 0, 0, 0, 1, 2, 2, 2], - [3, 3, 3, 3, 4, 5, 5, 5], - [6, 6, 6, 6, 7, 8, 8, 8], - [9, 9, 9, 9, 10, 11, 11, 11], - - [9, 9, 9, 9, 10, 11, 11, 11], - [9, 9, 9, 9, 10, 11, 11, 11], - [9, 9, 9, 9, 10, 11, 11, 11]] - ) - assert_array_equal(a, b) - - @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))]) - @pytest.mark.parametrize("mode", _all_modes.keys()) - def test_zero_pad_width(self, pad_width, mode): - arr = np.arange(30).reshape(6, 5) - assert_array_equal(arr, np.pad(arr, pad_width, mode=mode)) - - -@pytest.mark.parametrize("mode", _all_modes.keys()) -def test_kwargs(mode): - """Test behavior of pad's kwargs for the given mode.""" - allowed = _all_modes[mode] - not_allowed = {} - for kwargs in _all_modes.values(): - if kwargs != allowed: - not_allowed.update(kwargs) - # Test if allowed keyword arguments pass - np.pad([1, 2, 3], 1, mode, **allowed) - # Test if prohibited keyword arguments of other modes raise an error - for key, value in not_allowed.items(): - match = "unsupported keyword arguments for mode '{}'".format(mode) - with pytest.raises(ValueError, match=match): - np.pad([1, 2, 3], 1, mode, **{key: value}) - - -def test_constant_zero_default(): - arr = np.array([1, 1]) - assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) - - -@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) -def test_unsupported_mode(mode): - match= "mode '{}' is not supported".format(mode) - with pytest.raises(ValueError, match=match): - np.pad([1, 2, 3], 4, mode=mode) - - -@pytest.mark.parametrize("mode", _all_modes.keys()) -def test_non_contiguous_array(mode): - arr = np.arange(24).reshape(4, 6)[::2, ::2] - result = np.pad(arr, (2, 3), mode) - assert result.shape == (7, 8) - assert_equal(result[2:-3, 2:-3], arr) - - -@pytest.mark.parametrize("mode", _all_modes.keys()) -def test_memory_layout_persistence(mode): - """Test if C and F order is preserved for all pad modes.""" - x = np.ones((5, 10), order='C') - assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] - x = np.ones((5, 10), order='F') - assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] - - -@pytest.mark.parametrize("dtype", _numeric_dtypes) -@pytest.mark.parametrize("mode", _all_modes.keys()) -def test_dtype_persistence(dtype, mode): - arr = np.zeros((3, 2, 1), dtype=dtype) - result = np.pad(arr, 1, mode=mode) - assert result.dtype == dtype diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arraysetops.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arraysetops.py deleted file mode 100644 index fd21a7f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arraysetops.py +++ /dev/null @@ -1,626 +0,0 @@ -"""Test functions for 1D array set operations. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np - -from numpy.testing import (assert_array_equal, assert_equal, - assert_raises, assert_raises_regex) -from numpy.lib.arraysetops import ( - ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin - ) -import pytest - - - -class TestSetOps(object): - - def test_intersect1d(self): - # unique inputs - a = np.array([5, 7, 1, 2]) - b = np.array([2, 4, 3, 1, 5]) - - ec = np.array([1, 2, 5]) - c = intersect1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - # non-unique inputs - a = np.array([5, 5, 7, 1, 2]) - b = np.array([2, 1, 4, 3, 3, 1, 5]) - - ed = np.array([1, 2, 5]) - c = intersect1d(a, b) - assert_array_equal(c, ed) - assert_array_equal([], intersect1d([], [])) - - def test_intersect1d_array_like(self): - # See gh-11772 - class Test(object): - def __array__(self): - return np.arange(3) - - a = Test() - res = intersect1d(a, a) - assert_array_equal(res, a) - res = intersect1d([1, 2, 3], [1, 2, 3]) - assert_array_equal(res, [1, 2, 3]) - - def test_intersect1d_indices(self): - # unique inputs - a = np.array([1, 2, 3, 4]) - b = np.array([2, 1, 4, 6]) - c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) - ee = np.array([1, 2, 4]) - assert_array_equal(c, ee) - assert_array_equal(a[i1], ee) - assert_array_equal(b[i2], ee) - - # non-unique inputs - a = np.array([1, 2, 2, 3, 4, 3, 2]) - b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) - c, i1, i2 = intersect1d(a, b, return_indices=True) - ef = np.array([1, 2, 3, 4]) - assert_array_equal(c, ef) - assert_array_equal(a[i1], ef) - assert_array_equal(b[i2], ef) - - # non1d, unique inputs - a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) - b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) - c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) - ui1 = np.unravel_index(i1, a.shape) - ui2 = np.unravel_index(i2, b.shape) - ea = np.array([2, 6, 7, 8]) - assert_array_equal(ea, a[ui1]) - assert_array_equal(ea, b[ui2]) - - # non1d, not assumed to be uniqueinputs - a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) - b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) - c, i1, i2 = intersect1d(a, b, return_indices=True) - ui1 = np.unravel_index(i1, a.shape) - ui2 = np.unravel_index(i2, b.shape) - ea = np.array([2, 7, 8]) - assert_array_equal(ea, a[ui1]) - assert_array_equal(ea, b[ui2]) - - def test_setxor1d(self): - a = np.array([5, 7, 1, 2]) - b = np.array([2, 4, 3, 1, 5]) - - ec = np.array([3, 4, 7]) - c = setxor1d(a, b) - assert_array_equal(c, ec) - - a = np.array([1, 2, 3]) - b = np.array([6, 5, 4]) - - ec = np.array([1, 2, 3, 4, 5, 6]) - c = setxor1d(a, b) - assert_array_equal(c, ec) - - a = np.array([1, 8, 2, 3]) - b = np.array([6, 5, 4, 8]) - - ec = np.array([1, 2, 3, 4, 5, 6]) - c = setxor1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal([], setxor1d([], [])) - - def test_ediff1d(self): - zero_elem = np.array([]) - one_elem = np.array([1]) - two_elem = np.array([1, 2]) - - assert_array_equal([], ediff1d(zero_elem)) - assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) - assert_array_equal([0], ediff1d(zero_elem, to_end=0)) - assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) - assert_array_equal([], ediff1d(one_elem)) - assert_array_equal([1], ediff1d(two_elem)) - assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9)) - assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8])) - assert_array_equal([1,9], ediff1d(two_elem, to_end=9)) - assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) - assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) - assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6])) - - @pytest.mark.parametrize("ary, prepend, append", [ - # should fail because trying to cast - # np.nan standard floating point value - # into an integer array: - (np.array([1, 2, 3], dtype=np.int64), - None, - np.nan), - # should fail because attempting - # to downcast to smaller int type: - (np.array([1, 2, 3], dtype=np.int16), - np.array([5, 1<<20, 2], dtype=np.int32), - None), - # should fail because attempting to cast - # two special floating point values - # to integers (on both sides of ary): - (np.array([1., 3., 9.], dtype=np.int8), - np.nan, - np.nan), - ]) - def test_ediff1d_forbidden_type_casts(self, ary, prepend, append): - # verify resolution of gh-11490 - - # specifically, raise an appropriate - # Exception when attempting to append or - # prepend with an incompatible type - msg = 'cannot convert' - with assert_raises_regex(ValueError, msg): - ediff1d(ary=ary, - to_end=append, - to_begin=prepend) - - @pytest.mark.parametrize("ary," - "prepend," - "append," - "expected", [ - (np.array([1, 2, 3], dtype=np.int16), - 0, - None, - np.array([0, 1, 1], dtype=np.int16)), - (np.array([1, 2, 3], dtype=np.int32), - 0, - 0, - np.array([0, 1, 1, 0], dtype=np.int32)), - (np.array([1, 2, 3], dtype=np.int64), - 3, - -9, - np.array([3, 1, 1, -9], dtype=np.int64)), - ]) - def test_ediff1d_scalar_handling(self, - ary, - prepend, - append, - expected): - # maintain backwards-compatibility - # of scalar prepend / append behavior - # in ediff1d following fix for gh-11490 - actual = np.ediff1d(ary=ary, - to_end=append, - to_begin=prepend) - assert_equal(actual, expected) - - - def test_isin(self): - # the tests for in1d cover most of isin's behavior - # if in1d is removed, would need to change those tests to test - # isin instead. - def _isin_slow(a, b): - b = np.asarray(b).flatten().tolist() - return a in b - isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) - def assert_isin_equal(a, b): - x = isin(a, b) - y = isin_slow(a, b) - assert_array_equal(x, y) - - #multidimensional arrays in both arguments - a = np.arange(24).reshape([2, 3, 4]) - b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) - assert_isin_equal(a, b) - - #array-likes as both arguments - c = [(9, 8), (7, 6)] - d = (9, 7) - assert_isin_equal(c, d) - - #zero-d array: - f = np.array(3) - assert_isin_equal(f, b) - assert_isin_equal(a, f) - assert_isin_equal(f, f) - - #scalar: - assert_isin_equal(5, b) - assert_isin_equal(a, 6) - assert_isin_equal(5, 6) - - #empty array-like: - x = [] - assert_isin_equal(x, b) - assert_isin_equal(a, x) - assert_isin_equal(x, x) - - def test_in1d(self): - # we use two different sizes for the b array here to test the - # two different paths in in1d(). - for mult in (1, 10): - # One check without np.array to make sure lists are handled correct - a = [5, 7, 1, 2] - b = [2, 4, 3, 1, 5] * mult - ec = np.array([True, False, True, True]) - c = in1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - a[0] = 8 - ec = np.array([False, False, True, True]) - c = in1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - a[0], a[3] = 4, 8 - ec = np.array([True, False, True, False]) - c = in1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) - b = [2, 3, 4] * mult - ec = [False, True, False, True, True, True, True, True, True, - False, True, False, False, False] - c = in1d(a, b) - assert_array_equal(c, ec) - - b = b + [5, 5, 4] * mult - ec = [True, True, True, True, True, True, True, True, True, True, - True, False, True, True] - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 7, 1, 2]) - b = np.array([2, 4, 3, 1, 5] * mult) - ec = np.array([True, False, True, True]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 7, 1, 1, 2]) - b = np.array([2, 4, 3, 3, 1, 5] * mult) - ec = np.array([True, False, True, True, True]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 5]) - b = np.array([2, 2] * mult) - ec = np.array([False, False]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5]) - b = np.array([2]) - ec = np.array([False]) - c = in1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal(in1d([], []), []) - - def test_in1d_char_array(self): - a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) - b = np.array(['a', 'c']) - - ec = np.array([True, False, True, False, False, True, False, False]) - c = in1d(a, b) - - assert_array_equal(c, ec) - - def test_in1d_invert(self): - "Test in1d's invert parameter" - # We use two different sizes for the b array here to test the - # two different paths in in1d(). - for mult in (1, 10): - a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) - b = [2, 3, 4] * mult - assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) - - def test_in1d_ravel(self): - # Test that in1d ravels its input arrays. This is not documented - # behavior however. The test is to ensure consistentency. - a = np.arange(6).reshape(2, 3) - b = np.arange(3, 9).reshape(3, 2) - long_b = np.arange(3, 63).reshape(30, 2) - ec = np.array([False, False, False, True, True, True]) - - assert_array_equal(in1d(a, b, assume_unique=True), ec) - assert_array_equal(in1d(a, b, assume_unique=False), ec) - assert_array_equal(in1d(a, long_b, assume_unique=True), ec) - assert_array_equal(in1d(a, long_b, assume_unique=False), ec) - - def test_in1d_first_array_is_object(self): - ar1 = [None] - ar2 = np.array([1]*10) - expected = np.array([False]) - result = np.in1d(ar1, ar2) - assert_array_equal(result, expected) - - def test_in1d_second_array_is_object(self): - ar1 = 1 - ar2 = np.array([None]*10) - expected = np.array([False]) - result = np.in1d(ar1, ar2) - assert_array_equal(result, expected) - - def test_in1d_both_arrays_are_object(self): - ar1 = [None] - ar2 = np.array([None]*10) - expected = np.array([True]) - result = np.in1d(ar1, ar2) - assert_array_equal(result, expected) - - def test_in1d_both_arrays_have_structured_dtype(self): - # Test arrays of a structured data type containing an integer field - # and a field of dtype `object` allowing for arbitrary Python objects - dt = np.dtype([('field1', int), ('field2', object)]) - ar1 = np.array([(1, None)], dtype=dt) - ar2 = np.array([(1, None)]*10, dtype=dt) - expected = np.array([True]) - result = np.in1d(ar1, ar2) - assert_array_equal(result, expected) - - def test_union1d(self): - a = np.array([5, 4, 7, 1, 2]) - b = np.array([2, 4, 3, 3, 2, 1, 5]) - - ec = np.array([1, 2, 3, 4, 5, 7]) - c = union1d(a, b) - assert_array_equal(c, ec) - - # Tests gh-10340, arguments to union1d should be - # flattened if they are not already 1D - x = np.array([[0, 1, 2], [3, 4, 5]]) - y = np.array([0, 1, 2, 3, 4]) - ez = np.array([0, 1, 2, 3, 4, 5]) - z = union1d(x, y) - assert_array_equal(z, ez) - - assert_array_equal([], union1d([], [])) - - def test_setdiff1d(self): - a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) - b = np.array([2, 4, 3, 3, 2, 1, 5]) - - ec = np.array([6, 7]) - c = setdiff1d(a, b) - assert_array_equal(c, ec) - - a = np.arange(21) - b = np.arange(19) - ec = np.array([19, 20]) - c = setdiff1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal([], setdiff1d([], [])) - a = np.array((), np.uint32) - assert_equal(setdiff1d(a, []).dtype, np.uint32) - - def test_setdiff1d_unique(self): - a = np.array([3, 2, 1]) - b = np.array([7, 5, 2]) - expected = np.array([3, 1]) - actual = setdiff1d(a, b, assume_unique=True) - assert_equal(actual, expected) - - def test_setdiff1d_char_array(self): - a = np.array(['a', 'b', 'c']) - b = np.array(['a', 'b', 's']) - assert_array_equal(setdiff1d(a, b), np.array(['c'])) - - def test_manyways(self): - a = np.array([5, 7, 1, 2, 8]) - b = np.array([9, 8, 2, 4, 3, 1, 5]) - - c1 = setxor1d(a, b) - aux1 = intersect1d(a, b) - aux2 = union1d(a, b) - c2 = setdiff1d(aux2, aux1) - assert_array_equal(c1, c2) - - -class TestUnique(object): - - def test_unique_1d(self): - - def check_all(a, b, i1, i2, c, dt): - base_msg = 'check {0} failed for type {1}' - - msg = base_msg.format('values', dt) - v = unique(a) - assert_array_equal(v, b, msg) - - msg = base_msg.format('return_index', dt) - v, j = unique(a, True, False, False) - assert_array_equal(v, b, msg) - assert_array_equal(j, i1, msg) - - msg = base_msg.format('return_inverse', dt) - v, j = unique(a, False, True, False) - assert_array_equal(v, b, msg) - assert_array_equal(j, i2, msg) - - msg = base_msg.format('return_counts', dt) - v, j = unique(a, False, False, True) - assert_array_equal(v, b, msg) - assert_array_equal(j, c, msg) - - msg = base_msg.format('return_index and return_inverse', dt) - v, j1, j2 = unique(a, True, True, False) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, i2, msg) - - msg = base_msg.format('return_index and return_counts', dt) - v, j1, j2 = unique(a, True, False, True) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, c, msg) - - msg = base_msg.format('return_inverse and return_counts', dt) - v, j1, j2 = unique(a, False, True, True) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i2, msg) - assert_array_equal(j2, c, msg) - - msg = base_msg.format(('return_index, return_inverse ' - 'and return_counts'), dt) - v, j1, j2, j3 = unique(a, True, True, True) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, i2, msg) - assert_array_equal(j3, c, msg) - - a = [5, 7, 1, 2, 1, 5, 7]*10 - b = [1, 2, 5, 7] - i1 = [2, 3, 0, 1] - i2 = [2, 3, 0, 1, 0, 2, 3]*10 - c = np.multiply([2, 1, 2, 2], 10) - - # test for numeric arrays - types = [] - types.extend(np.typecodes['AllInteger']) - types.extend(np.typecodes['AllFloat']) - types.append('datetime64[D]') - types.append('timedelta64[D]') - for dt in types: - aa = np.array(a, dt) - bb = np.array(b, dt) - check_all(aa, bb, i1, i2, c, dt) - - # test for object arrays - dt = 'O' - aa = np.empty(len(a), dt) - aa[:] = a - bb = np.empty(len(b), dt) - bb[:] = b - check_all(aa, bb, i1, i2, c, dt) - - # test for structured arrays - dt = [('', 'i'), ('', 'i')] - aa = np.array(list(zip(a, a)), dt) - bb = np.array(list(zip(b, b)), dt) - check_all(aa, bb, i1, i2, c, dt) - - # test for ticket #2799 - aa = [1. + 0.j, 1 - 1.j, 1] - assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) - - # test for ticket #4785 - a = [(1, 2), (1, 2), (2, 3)] - unq = [1, 2, 3] - inv = [0, 1, 0, 1, 1, 2] - a1 = unique(a) - assert_array_equal(a1, unq) - a2, a2_inv = unique(a, return_inverse=True) - assert_array_equal(a2, unq) - assert_array_equal(a2_inv, inv) - - # test for chararrays with return_inverse (gh-5099) - a = np.chararray(5) - a[...] = '' - a2, a2_inv = np.unique(a, return_inverse=True) - assert_array_equal(a2_inv, np.zeros(5)) - - # test for ticket #9137 - a = [] - a1_idx = np.unique(a, return_index=True)[1] - a2_inv = np.unique(a, return_inverse=True)[1] - a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:] - assert_equal(a1_idx.dtype, np.intp) - assert_equal(a2_inv.dtype, np.intp) - assert_equal(a3_idx.dtype, np.intp) - assert_equal(a3_inv.dtype, np.intp) - - def test_unique_axis_errors(self): - assert_raises(TypeError, self._run_axis_tests, object) - assert_raises(TypeError, self._run_axis_tests, - [('a', int), ('b', object)]) - - assert_raises(np.AxisError, unique, np.arange(10), axis=2) - assert_raises(np.AxisError, unique, np.arange(10), axis=-2) - - def test_unique_axis_list(self): - msg = "Unique failed on list of lists" - inp = [[0, 1, 0], [0, 1, 0]] - inp_arr = np.asarray(inp) - assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) - assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) - - def test_unique_axis(self): - types = [] - types.extend(np.typecodes['AllInteger']) - types.extend(np.typecodes['AllFloat']) - types.append('datetime64[D]') - types.append('timedelta64[D]') - types.append([('a', int), ('b', int)]) - types.append([('a', int), ('b', float)]) - - for dtype in types: - self._run_axis_tests(dtype) - - msg = 'Non-bitwise-equal booleans test failed' - data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) - result = np.array([[False, True], [True, True]], dtype=bool) - assert_array_equal(unique(data, axis=0), result, msg) - - msg = 'Negative zero equality test failed' - data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) - result = np.array([[-0.0, 0.0]]) - assert_array_equal(unique(data, axis=0), result, msg) - - def test_unique_masked(self): - # issue 8664 - x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], dtype='uint8') - y = np.ma.masked_equal(x, 0) - - v = np.unique(y) - v2, i, c = np.unique(y, return_index=True, return_counts=True) - - msg = 'Unique returned different results when asked for index' - assert_array_equal(v.data, v2.data, msg) - assert_array_equal(v.mask, v2.mask, msg) - - def test_unique_sort_order_with_axis(self): - # These tests fail if sorting along axis is done by treating subarrays - # as unsigned byte strings. See gh-10495. - fmt = "sort order incorrect for integer type '%s'" - for dt in 'bhilq': - a = np.array([[-1],[0]], dt) - b = np.unique(a, axis=0) - assert_array_equal(a, b, fmt % dt) - - def _run_axis_tests(self, dtype): - data = np.array([[0, 1, 0, 0], - [1, 0, 0, 0], - [0, 1, 0, 0], - [1, 0, 0, 0]]).astype(dtype) - - msg = 'Unique with 1d array and axis=0 failed' - result = np.array([0, 1]) - assert_array_equal(unique(data), result.astype(dtype), msg) - - msg = 'Unique with 2d array and axis=0 failed' - result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) - assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) - - msg = 'Unique with 2d array and axis=1 failed' - result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) - assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) - - msg = 'Unique with 3d array and axis=2 failed' - data3d = np.array([[[1, 1], - [1, 0]], - [[0, 1], - [0, 0]]]).astype(dtype) - result = np.take(data3d, [1, 0], axis=2) - assert_array_equal(unique(data3d, axis=2), result, msg) - - uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, - return_inverse=True, return_counts=True) - msg = "Unique's return_index=True failed with axis=0" - assert_array_equal(data[idx], uniq, msg) - msg = "Unique's return_inverse=True failed with axis=0" - assert_array_equal(uniq[inv], data) - msg = "Unique's return_counts=True failed with axis=0" - assert_array_equal(cnt, np.array([2, 2]), msg) - - uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, - return_inverse=True, return_counts=True) - msg = "Unique's return_index=True failed with axis=1" - assert_array_equal(data[:, idx], uniq) - msg = "Unique's return_inverse=True failed with axis=1" - assert_array_equal(uniq[:, inv], data) - msg = "Unique's return_counts=True failed with axis=1" - assert_array_equal(cnt, np.array([2, 1, 1]), msg) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arrayterator.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arrayterator.py deleted file mode 100644 index 2ce4456..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_arrayterator.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from operator import mul -from functools import reduce - -import numpy as np -from numpy.random import randint -from numpy.lib import Arrayterator -from numpy.testing import assert_ - - -def test(): - np.random.seed(np.arange(10)) - - # Create a random array - ndims = randint(5)+1 - shape = tuple(randint(10)+1 for dim in range(ndims)) - els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape - - buf_size = randint(2*els) - b = Arrayterator(a, buf_size) - - # Check that each block has at most ``buf_size`` elements - for block in b: - assert_(len(block.flat) <= (buf_size or els)) - - # Check that all elements are iterated correctly - assert_(list(b.flat) == list(a.flat)) - - # Slice arrayterator - start = [randint(dim) for dim in shape] - stop = [randint(dim)+1 for dim in shape] - step = [randint(dim)+1 for dim in shape] - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) - c = b[slice_] - d = a[slice_] - - # Check that each block has at most ``buf_size`` elements - for block in c: - assert_(len(block.flat) <= (buf_size or els)) - - # Check that the arrayterator is sliced correctly - assert_(np.all(c.__array__() == d)) - - # Check that all elements are iterated correctly - assert_(list(c.flat) == list(d.flat)) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_financial.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_financial.py deleted file mode 100644 index cb67f7c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_financial.py +++ /dev/null @@ -1,382 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -from decimal import Decimal - -import numpy as np -from numpy.testing import ( - assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises - ) - - -def filter_deprecation(func): - def newfunc(*args, **kwargs): - with warnings.catch_warnings(record=True) as ws: - warnings.filterwarnings('always', category=DeprecationWarning) - func(*args, **kwargs) - assert_(all(w.category is DeprecationWarning for w in ws)) - return newfunc - - -class TestFinancial(object): - @filter_deprecation - def test_npv_irr_congruence(self): - # IRR is defined as the rate required for the present value of a - # a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0 - cashflows = np.array([-40000, 5000, 8000, 12000, 30000]) - assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0) - - @filter_deprecation - def test_rate(self): - assert_almost_equal( - np.rate(10, 0, -3500, 10000), - 0.1107, 4) - - @filter_deprecation - def test_rate_decimal(self): - rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000')) - assert_equal(Decimal('0.1106908537142689284704528100'), rate) - - @filter_deprecation - def test_irr(self): - v = [-150000, 15000, 25000, 35000, 45000, 60000] - assert_almost_equal(np.irr(v), 0.0524, 2) - v = [-100, 0, 0, 74] - assert_almost_equal(np.irr(v), -0.0955, 2) - v = [-100, 39, 59, 55, 20] - assert_almost_equal(np.irr(v), 0.28095, 2) - v = [-100, 100, 0, -7] - assert_almost_equal(np.irr(v), -0.0833, 2) - v = [-100, 100, 0, 7] - assert_almost_equal(np.irr(v), 0.06206, 2) - v = [-5, 10.5, 1, -8, 1] - assert_almost_equal(np.irr(v), 0.0886, 2) - - # Test that if there is no solution then np.irr returns nan - # Fixes gh-6744 - v = [-1, -2, -3] - assert_equal(np.irr(v), np.nan) - - @filter_deprecation - def test_pv(self): - assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2) - - @filter_deprecation - def test_pv_decimal(self): - assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), - Decimal('-127128.1709461939327295222005')) - - @filter_deprecation - def test_fv(self): - assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924) - - @filter_deprecation - def test_fv_decimal(self): - assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0), - Decimal('86609.36267304300040536731624')) - - @filter_deprecation - def test_pmt(self): - res = np.pmt(0.08 / 12, 5 * 12, 15000) - tgt = -304.145914 - assert_allclose(res, tgt) - # Test the edge case where rate == 0.0 - res = np.pmt(0.0, 5 * 12, 15000) - tgt = -250.0 - assert_allclose(res, tgt) - # Test the case where we use broadcast and - # the arguments passed in are arrays. - res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000]) - tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]]) - assert_allclose(res, tgt) - - @filter_deprecation - def test_pmt_decimal(self): - res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000) - tgt = Decimal('-304.1459143262052370338701494') - assert_equal(res, tgt) - # Test the edge case where rate == 0.0 - res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000')) - tgt = -250 - assert_equal(res, tgt) - # Test the case where we use broadcast and - # the arguments passed in are arrays. - res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]], - [Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')]) - tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')], - [Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]]) - - # Cannot use the `assert_allclose` because it uses isfinite under the covers - # which does not support the Decimal type - # See issue: https://github.com/numpy/numpy/issues/9954 - assert_equal(res[0][0], tgt[0][0]) - assert_equal(res[0][1], tgt[0][1]) - assert_equal(res[1][0], tgt[1][0]) - assert_equal(res[1][1], tgt[1][1]) - - @filter_deprecation - def test_ppmt(self): - assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25) - - @filter_deprecation - def test_ppmt_decimal(self): - assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')), - Decimal('-710.2541257864217612489830917')) - - # Two tests showing how Decimal is actually getting at a more exact result - # .23 / 12 does not come out nicely as a float but does as a decimal - @filter_deprecation - def test_ppmt_special_rate(self): - assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036) - - @filter_deprecation - def test_ppmt_special_rate_decimal(self): - # When rounded out to 8 decimal places like the float based test, this should not equal the same value - # as the float, substituted for the decimal - def raise_error_because_not_equal(): - assert_equal( - round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8), - Decimal('-90238044.232277036')) - - assert_raises(AssertionError, raise_error_because_not_equal) - assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), - Decimal('-90238044.2322778884413969909')) - - @filter_deprecation - def test_ipmt(self): - assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67) - - @filter_deprecation - def test_ipmt_decimal(self): - result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000) - assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667')) - - @filter_deprecation - def test_nper(self): - assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), - 21.54, 2) - - @filter_deprecation - def test_nper2(self): - assert_almost_equal(np.nper(0.0, -2000, 0, 100000.), - 50.0, 1) - - @filter_deprecation - def test_npv(self): - assert_almost_equal( - np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), - 122.89, 2) - - @filter_deprecation - def test_npv_decimal(self): - assert_equal( - np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]), - Decimal('122.894854950942692161628715')) - - @filter_deprecation - def test_mirr(self): - val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] - assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) - - val = [-120000, 39000, 30000, 21000, 37000, 46000] - assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) - - val = [100, 200, -50, 300, -200] - assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) - - val = [39000, 30000, 21000, 37000, 46000] - assert_(np.isnan(np.mirr(val, 0.10, 0.12))) - - @filter_deprecation - def test_mirr_decimal(self): - val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'), - Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'), - Decimal('700'), Decimal('3000')] - assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')), - Decimal('0.066597175031553548874239618')) - - val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'), - Decimal('21000'), Decimal('37000'), Decimal('46000')] - assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880')) - - val = [Decimal('100'), Decimal('200'), Decimal('-50'), - Decimal('300'), Decimal('-200')] - assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868')) - - val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')] - assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12')))) - - @filter_deprecation - def test_when(self): - # begin - assert_equal(np.rate(10, 20, -3500, 10000, 1), - np.rate(10, 20, -3500, 10000, 'begin')) - # end - assert_equal(np.rate(10, 20, -3500, 10000), - np.rate(10, 20, -3500, 10000, 'end')) - assert_equal(np.rate(10, 20, -3500, 10000, 0), - np.rate(10, 20, -3500, 10000, 'end')) - - # begin - assert_equal(np.pv(0.07, 20, 12000, 0, 1), - np.pv(0.07, 20, 12000, 0, 'begin')) - # end - assert_equal(np.pv(0.07, 20, 12000, 0), - np.pv(0.07, 20, 12000, 0, 'end')) - assert_equal(np.pv(0.07, 20, 12000, 0, 0), - np.pv(0.07, 20, 12000, 0, 'end')) - - # begin - assert_equal(np.fv(0.075, 20, -2000, 0, 1), - np.fv(0.075, 20, -2000, 0, 'begin')) - # end - assert_equal(np.fv(0.075, 20, -2000, 0), - np.fv(0.075, 20, -2000, 0, 'end')) - assert_equal(np.fv(0.075, 20, -2000, 0, 0), - np.fv(0.075, 20, -2000, 0, 'end')) - - # begin - assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1), - np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin')) - # end - assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0), - np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) - assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0), - np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) - - # begin - assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1), - np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin')) - # end - assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0), - np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) - assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0), - np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) - - # begin - assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1), - np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin')) - # end - assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0), - np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) - assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0), - np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) - - # begin - assert_equal(np.nper(0.075, -2000, 0, 100000., 1), - np.nper(0.075, -2000, 0, 100000., 'begin')) - # end - assert_equal(np.nper(0.075, -2000, 0, 100000.), - np.nper(0.075, -2000, 0, 100000., 'end')) - assert_equal(np.nper(0.075, -2000, 0, 100000., 0), - np.nper(0.075, -2000, 0, 100000., 'end')) - - @filter_deprecation - def test_decimal_with_when(self): - """Test that decimals are still supported if the when argument is passed""" - # begin - assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')), - np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin')) - # end - assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')), - np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) - assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')), - np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) - - # begin - assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')), - np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin')) - # end - assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), - np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) - assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')), - np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) - - # begin - assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')), - np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin')) - # end - assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')), - np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) - assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')), - np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) - - # begin - assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), - Decimal('0'), Decimal('1')), - np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), - Decimal('0'), 'begin')) - # end - assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), - Decimal('0')), - np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), - Decimal('0'), 'end')) - assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), - Decimal('0'), Decimal('0')), - np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), - Decimal('0'), 'end')) - - # begin - assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), - Decimal('0'), Decimal('1')), - np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), - Decimal('0'), 'begin')) - # end - assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), - Decimal('0')), - np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), - Decimal('0'), 'end')) - assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), - Decimal('0'), Decimal('0')), - np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), - Decimal('0'), 'end')) - - # begin - assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), - Decimal('0'), Decimal('1')).flat[0], - np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), - Decimal('0'), 'begin').flat[0]) - # end - assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), - Decimal('0')).flat[0], - np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), - Decimal('0'), 'end').flat[0]) - assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), - Decimal('0'), Decimal('0')).flat[0], - np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), - Decimal('0'), 'end').flat[0]) - - @filter_deprecation - def test_broadcast(self): - assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), - [21.5449442, 20.76156441], 4) - - assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000), - [-17.29165168, -16.66666667, -16.03647345, - -15.40102862, -14.76028842], 4) - - assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000), - [-74.998201, -75.62318601, -76.25337923, - -76.88882405, -77.52956425], 4) - - assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0, - [0, 0, 1, 'end', 'begin']), - [-74.998201, -75.62318601, -75.62318601, - -76.88882405, -76.88882405], 4) - - @filter_deprecation - def test_broadcast_decimal(self): - # Use almost equal because precision is tested in the explicit tests, this test is to ensure - # broadcast with Decimal is not broken. - assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), - [Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'), - Decimal('-15.40102862'), Decimal('-14.76028842')], 4) - - assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), - [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'), - Decimal('-76.88882405'), Decimal('-77.52956425')], 4) - - assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'), - Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']), - [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'), - Decimal('-76.88882405'), Decimal('-76.88882405')], 4) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_format.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_format.py deleted file mode 100644 index 0592e0b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_format.py +++ /dev/null @@ -1,995 +0,0 @@ -from __future__ import division, absolute_import, print_function - -# doctest -r''' Test the .npy file format. - -Set up: - - >>> import sys - >>> from io import BytesIO - >>> from numpy.lib import format - >>> - >>> scalars = [ - ... np.uint8, - ... np.int8, - ... np.uint16, - ... np.int16, - ... np.uint32, - ... np.int32, - ... np.uint64, - ... np.int64, - ... np.float32, - ... np.float64, - ... np.complex64, - ... np.complex128, - ... object, - ... ] - >>> - >>> basic_arrays = [] - >>> - >>> for scalar in scalars: - ... for endian in '<>': - ... dtype = np.dtype(scalar).newbyteorder(endian) - ... basic = np.arange(15).astype(dtype) - ... basic_arrays.extend([ - ... np.array([], dtype=dtype), - ... np.array(10, dtype=dtype), - ... basic, - ... basic.reshape((3,5)), - ... basic.reshape((3,5)).T, - ... basic.reshape((3,5))[::-1,::2], - ... ]) - ... - >>> - >>> Pdescr = [ - ... ('x', 'i4', (2,)), - ... ('y', 'f8', (2, 2)), - ... ('z', 'u1')] - >>> - >>> - >>> PbufferT = [ - ... ([3,2], [[6.,4.],[6.,4.]], 8), - ... ([4,3], [[7.,5.],[7.,5.]], 9), - ... ] - >>> - >>> - >>> Ndescr = [ - ... ('x', 'i4', (2,)), - ... ('Info', [ - ... ('value', 'c16'), - ... ('y2', 'f8'), - ... ('Info2', [ - ... ('name', 'S2'), - ... ('value', 'c16', (2,)), - ... ('y3', 'f8', (2,)), - ... ('z3', 'u4', (2,))]), - ... ('name', 'S2'), - ... ('z2', 'b1')]), - ... ('color', 'S2'), - ... ('info', [ - ... ('Name', 'U8'), - ... ('Value', 'c16')]), - ... ('y', 'f8', (2, 2)), - ... ('z', 'u1')] - >>> - >>> - >>> NbufferT = [ - ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), - ... ] - >>> - >>> - >>> record_arrays = [ - ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), - ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), - ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), - ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), - ... ] - -Test the magic string writing. - - >>> format.magic(1, 0) - '\x93NUMPY\x01\x00' - >>> format.magic(0, 0) - '\x93NUMPY\x00\x00' - >>> format.magic(255, 255) - '\x93NUMPY\xff\xff' - >>> format.magic(2, 5) - '\x93NUMPY\x02\x05' - -Test the magic string reading. - - >>> format.read_magic(BytesIO(format.magic(1, 0))) - (1, 0) - >>> format.read_magic(BytesIO(format.magic(0, 0))) - (0, 0) - >>> format.read_magic(BytesIO(format.magic(255, 255))) - (255, 255) - >>> format.read_magic(BytesIO(format.magic(2, 5))) - (2, 5) - -Test the header writing. - - >>> for arr in basic_arrays + record_arrays: - ... f = BytesIO() - ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it - ... print(repr(f.getvalue())) - ... - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" - "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" - "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" -''' -import sys -import os -import shutil -import tempfile -import warnings -import pytest -from io import BytesIO - -import numpy as np -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, assert_raises_regex, - assert_warns - ) -from numpy.lib import format - - -tempdir = None - -# Module-level setup. - - -def setup_module(): - global tempdir - tempdir = tempfile.mkdtemp() - - -def teardown_module(): - global tempdir - if tempdir is not None and os.path.isdir(tempdir): - shutil.rmtree(tempdir) - tempdir = None - - -# Generate some basic arrays to test with. -scalars = [ - np.uint8, - np.int8, - np.uint16, - np.int16, - np.uint32, - np.int32, - np.uint64, - np.int64, - np.float32, - np.float64, - np.complex64, - np.complex128, - object, -] -basic_arrays = [] -for scalar in scalars: - for endian in '<>': - dtype = np.dtype(scalar).newbyteorder(endian) - basic = np.arange(1500).astype(dtype) - basic_arrays.extend([ - # Empty - np.array([], dtype=dtype), - # Rank-0 - np.array(10, dtype=dtype), - # 1-D - basic, - # 2-D C-contiguous - basic.reshape((30, 50)), - # 2-D F-contiguous - basic.reshape((30, 50)).T, - # 2-D non-contiguous - basic.reshape((30, 50))[::-1, ::2], - ]) - -# More complicated record arrays. -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3, 2], [[6., 4.], [6., 4.]], 8), - ([4, 3], [[7., 5.], [7., 5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), - 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), - ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), - 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), - ] - -record_arrays = [ - np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), - np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), - np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), - np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), - np.zeros(1, dtype=[('c', ('= 3: - fname = 'win64python2.npy' - else: - fname = 'python3.npy' - path = os.path.join(os.path.dirname(__file__), 'data', fname) - data = np.load(path) - assert_array_equal(data, np.ones(2)) - -def test_pickle_python2_python3(): - # Test that loading object arrays saved on Python 2 works both on - # Python 2 and Python 3 and vice versa - data_dir = os.path.join(os.path.dirname(__file__), 'data') - - if sys.version_info[0] >= 3: - xrange = range - else: - import __builtin__ - xrange = __builtin__.xrange - - expected = np.array([None, xrange, u'\u512a\u826f', - b'\xe4\xb8\x8d\xe8\x89\xaf'], - dtype=object) - - for fname in ['py2-objarr.npy', 'py2-objarr.npz', - 'py3-objarr.npy', 'py3-objarr.npz']: - path = os.path.join(data_dir, fname) - - for encoding in ['bytes', 'latin1']: - data_f = np.load(path, allow_pickle=True, encoding=encoding) - if fname.endswith('.npz'): - data = data_f['x'] - data_f.close() - else: - data = data_f - - if sys.version_info[0] >= 3: - if encoding == 'latin1' and fname.startswith('py2'): - assert_(isinstance(data[3], str)) - assert_array_equal(data[:-1], expected[:-1]) - # mojibake occurs - assert_array_equal(data[-1].encode(encoding), expected[-1]) - else: - assert_(isinstance(data[3], bytes)) - assert_array_equal(data, expected) - else: - assert_array_equal(data, expected) - - if sys.version_info[0] >= 3: - if fname.startswith('py2'): - if fname.endswith('.npz'): - data = np.load(path, allow_pickle=True) - assert_raises(UnicodeError, data.__getitem__, 'x') - data.close() - data = np.load(path, allow_pickle=True, fix_imports=False, - encoding='latin1') - assert_raises(ImportError, data.__getitem__, 'x') - data.close() - else: - assert_raises(UnicodeError, np.load, path, - allow_pickle=True) - assert_raises(ImportError, np.load, path, - allow_pickle=True, fix_imports=False, - encoding='latin1') - - -def test_pickle_disallow(): - data_dir = os.path.join(os.path.dirname(__file__), 'data') - - path = os.path.join(data_dir, 'py2-objarr.npy') - assert_raises(ValueError, np.load, path, - allow_pickle=False, encoding='latin1') - - path = os.path.join(data_dir, 'py2-objarr.npz') - f = np.load(path, allow_pickle=False, encoding='latin1') - assert_raises(ValueError, f.__getitem__, 'x') - - path = os.path.join(tempdir, 'pickle-disabled.npy') - assert_raises(ValueError, np.save, path, np.array([None], dtype=object), - allow_pickle=False) - -@pytest.mark.parametrize('dt', [ - np.dtype(np.dtype([('a', np.int8), - ('b', np.int16), - ('c', np.int32), - ], align=True), - (3,)), - np.dtype([('x', np.dtype({'names':['a','b'], - 'formats':['i1','i1'], - 'offsets':[0,4], - 'itemsize':8, - }, - (3,)), - (4,), - )]), - np.dtype([('x', - ('= 0) & (indices < 5)] - assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, - err_msg=msg) - xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0]) - assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg) - - def test_slices(self): - lims = [-6, -2, 0, 1, 2, 4, 5] - steps = [-3, -1, 1, 3] - for start in lims: - for stop in lims: - for step in steps: - s = slice(start, stop, step) - self._check_inverse_of_slicing(s) - - def test_fancy(self): - # Deprecation/FutureWarning tests should be kept after change. - self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) - with warnings.catch_warnings(): - warnings.filterwarnings('error', category=DeprecationWarning) - assert_raises(DeprecationWarning, delete, self.a, [100]) - assert_raises(DeprecationWarning, delete, self.a, [-100]) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', category=FutureWarning) - self._check_inverse_of_slicing([0, -1, 2, 2]) - obj = np.array([True, False, False], dtype=bool) - self._check_inverse_of_slicing(obj) - assert_(w[0].category is FutureWarning) - assert_(w[1].category is FutureWarning) - - def test_single(self): - self._check_inverse_of_slicing(0) - self._check_inverse_of_slicing(-4) - - def test_0d(self): - a = np.array(1) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) - assert_equal(delete(a, [], axis=0), a) - assert_(w[0].category is DeprecationWarning) - - def test_subclass(self): - class SubClass(np.ndarray): - pass - a = self.a.view(SubClass) - assert_(isinstance(delete(a, 0), SubClass)) - assert_(isinstance(delete(a, []), SubClass)) - assert_(isinstance(delete(a, [0, 1]), SubClass)) - assert_(isinstance(delete(a, slice(1, 2)), SubClass)) - assert_(isinstance(delete(a, slice(1, -2)), SubClass)) - - def test_array_order_preserve(self): - # See gh-7113 - k = np.arange(10).reshape(2, 5, order='F') - m = delete(k, slice(60, None), axis=1) - - # 'k' is Fortran ordered, and 'm' should have the - # same ordering as 'k' and NOT become C ordered - assert_equal(m.flags.c_contiguous, k.flags.c_contiguous) - assert_equal(m.flags.f_contiguous, k.flags.f_contiguous) - - -class TestGradient(object): - - def test_basic(self): - v = [[1, 1], [3, 4]] - x = np.array(v) - dx = [np.array([[2., 3.], [2., 3.]]), - np.array([[0., 0.], [1., 1.]])] - assert_array_equal(gradient(x), dx) - assert_array_equal(gradient(v), dx) - - def test_args(self): - dx = np.cumsum(np.ones(5)) - dx_uneven = [1., 2., 5., 9., 11.] - f_2d = np.arange(25).reshape(5, 5) - - # distances must be scalars or have size equal to gradient[axis] - gradient(np.arange(5), 3.) - gradient(np.arange(5), np.array(3.)) - gradient(np.arange(5), dx) - # dy is set equal to dx because scalar - gradient(f_2d, 1.5) - gradient(f_2d, np.array(1.5)) - - gradient(f_2d, dx_uneven, dx_uneven) - # mix between even and uneven spaces and - # mix between scalar and vector - gradient(f_2d, dx, 2) - - # 2D but axis specified - gradient(f_2d, dx, axis=1) - - # 2d coordinate arguments are not yet allowed - assert_raises_regex(ValueError, '.*scalars or 1d', - gradient, f_2d, np.stack([dx]*2, axis=-1), 1) - - def test_badargs(self): - f_2d = np.arange(25).reshape(5, 5) - x = np.cumsum(np.ones(5)) - - # wrong sizes - assert_raises(ValueError, gradient, f_2d, x, np.ones(2)) - assert_raises(ValueError, gradient, f_2d, 1, np.ones(2)) - assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2)) - # wrong number of arguments - assert_raises(TypeError, gradient, f_2d, x) - assert_raises(TypeError, gradient, f_2d, x, axis=(0,1)) - assert_raises(TypeError, gradient, f_2d, x, x, x) - assert_raises(TypeError, gradient, f_2d, 1, 1, 1) - assert_raises(TypeError, gradient, f_2d, x, x, axis=1) - assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1) - - def test_datetime64(self): - # Make sure gradient() can handle special types like datetime64 - x = np.array( - ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', - '1910-10-12', '1910-12-12', '1912-12-12'], - dtype='datetime64[D]') - dx = np.array( - [-5, -3, 0, 31, 61, 396, 731], - dtype='timedelta64[D]') - assert_array_equal(gradient(x), dx) - assert_(dx.dtype == np.dtype('timedelta64[D]')) - - def test_masked(self): - # Make sure that gradient supports subclasses like masked arrays - x = np.ma.array([[1, 1], [3, 4]], - mask=[[False, False], [False, False]]) - out = gradient(x)[0] - assert_equal(type(out), type(x)) - # And make sure that the output and input don't have aliased mask - # arrays - assert_(x._mask is not out._mask) - # Also check that edge_order=2 doesn't alter the original mask - x2 = np.ma.arange(5) - x2[2] = np.ma.masked - np.gradient(x2, edge_order=2) - assert_array_equal(x2.mask, [False, False, True, False, False]) - - def test_second_order_accurate(self): - # Testing that the relative numerical error is less that 3% for - # this example problem. This corresponds to second order - # accurate finite differences for all interior and boundary - # points. - x = np.linspace(0, 1, 10) - dx = x[1] - x[0] - y = 2 * x ** 3 + 4 * x ** 2 + 2 * x - analytical = 6 * x ** 2 + 8 * x + 2 - num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1) - assert_(np.all(num_error < 0.03) == True) - - # test with unevenly spaced - np.random.seed(0) - x = np.sort(np.random.random(10)) - y = 2 * x ** 3 + 4 * x ** 2 + 2 * x - analytical = 6 * x ** 2 + 8 * x + 2 - num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) - assert_(np.all(num_error < 0.03) == True) - - def test_spacing(self): - f = np.array([0, 2., 3., 4., 5., 5.]) - f = np.tile(f, (6,1)) + f.reshape(-1, 1) - x_uneven = np.array([0., 0.5, 1., 3., 5., 7.]) - x_even = np.arange(6.) - - fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1)) - fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1)) - fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1)) - fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1)) - - # evenly spaced - for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]: - res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order) - res2 = gradient(f, x_even, x_even, - axis=(0,1), edge_order=edge_order) - res3 = gradient(f, x_even, x_even, - axis=None, edge_order=edge_order) - assert_array_equal(res1, res2) - assert_array_equal(res2, res3) - assert_almost_equal(res1[0], exp_res.T) - assert_almost_equal(res1[1], exp_res) - - res1 = gradient(f, 1., axis=0, edge_order=edge_order) - res2 = gradient(f, x_even, axis=0, edge_order=edge_order) - assert_(res1.shape == res2.shape) - assert_almost_equal(res2, exp_res.T) - - res1 = gradient(f, 1., axis=1, edge_order=edge_order) - res2 = gradient(f, x_even, axis=1, edge_order=edge_order) - assert_(res1.shape == res2.shape) - assert_array_equal(res2, exp_res) - - # unevenly spaced - for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]: - res1 = gradient(f, x_uneven, x_uneven, - axis=(0,1), edge_order=edge_order) - res2 = gradient(f, x_uneven, x_uneven, - axis=None, edge_order=edge_order) - assert_array_equal(res1, res2) - assert_almost_equal(res1[0], exp_res.T) - assert_almost_equal(res1[1], exp_res) - - res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order) - assert_almost_equal(res1, exp_res.T) - - res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order) - assert_almost_equal(res1, exp_res) - - # mixed - res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1) - res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1) - assert_array_equal(res1[0], res2[1]) - assert_array_equal(res1[1], res2[0]) - assert_almost_equal(res1[0], fdx_even_ord1.T) - assert_almost_equal(res1[1], fdx_uneven_ord1) - - res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2) - res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2) - assert_array_equal(res1[0], res2[1]) - assert_array_equal(res1[1], res2[0]) - assert_almost_equal(res1[0], fdx_even_ord2.T) - assert_almost_equal(res1[1], fdx_uneven_ord2) - - def test_specific_axes(self): - # Testing that gradient can work on a given axis only - v = [[1, 1], [3, 4]] - x = np.array(v) - dx = [np.array([[2., 3.], [2., 3.]]), - np.array([[0., 0.], [1., 1.]])] - assert_array_equal(gradient(x, axis=0), dx[0]) - assert_array_equal(gradient(x, axis=1), dx[1]) - assert_array_equal(gradient(x, axis=-1), dx[1]) - assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]]) - - # test axis=None which means all axes - assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]]) - # and is the same as no axis keyword given - assert_almost_equal(gradient(x, axis=None), gradient(x)) - - # test vararg order - assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), - [dx[1]/2.0, dx[0]/3.0]) - # test maximal number of varargs - assert_raises(TypeError, gradient, x, 1, 2, axis=1) - - assert_raises(np.AxisError, gradient, x, axis=3) - assert_raises(np.AxisError, gradient, x, axis=-3) - # assert_raises(TypeError, gradient, x, axis=[1,]) - - def test_timedelta64(self): - # Make sure gradient() can handle special types like timedelta64 - x = np.array( - [-5, -3, 10, 12, 61, 321, 300], - dtype='timedelta64[D]') - dx = np.array( - [2, 7, 7, 25, 154, 119, -21], - dtype='timedelta64[D]') - assert_array_equal(gradient(x), dx) - assert_(dx.dtype == np.dtype('timedelta64[D]')) - - def test_inexact_dtypes(self): - for dt in [np.float16, np.float32, np.float64]: - # dtypes should not be promoted in a different way to what diff does - x = np.array([1, 2, 3], dtype=dt) - assert_equal(gradient(x).dtype, np.diff(x).dtype) - - def test_values(self): - # needs at least 2 points for edge_order ==1 - gradient(np.arange(2), edge_order=1) - # needs at least 3 points for edge_order ==1 - gradient(np.arange(3), edge_order=2) - - assert_raises(ValueError, gradient, np.arange(0), edge_order=1) - assert_raises(ValueError, gradient, np.arange(0), edge_order=2) - assert_raises(ValueError, gradient, np.arange(1), edge_order=1) - assert_raises(ValueError, gradient, np.arange(1), edge_order=2) - assert_raises(ValueError, gradient, np.arange(2), edge_order=2) - - @pytest.mark.parametrize('f_dtype', [np.uint8, np.uint16, - np.uint32, np.uint64]) - def test_f_decreasing_unsigned_int(self, f_dtype): - f = np.array([5, 4, 3, 2, 1], dtype=f_dtype) - g = gradient(f) - assert_array_equal(g, [-1]*len(f)) - - @pytest.mark.parametrize('f_dtype', [np.int8, np.int16, - np.int32, np.int64]) - def test_f_signed_int_big_jump(self, f_dtype): - maxint = np.iinfo(f_dtype).max - x = np.array([1, 3]) - f = np.array([-1, maxint], dtype=f_dtype) - dfdx = gradient(f, x) - assert_array_equal(dfdx, [(maxint + 1) // 2]*2) - - @pytest.mark.parametrize('x_dtype', [np.uint8, np.uint16, - np.uint32, np.uint64]) - def test_x_decreasing_unsigned(self, x_dtype): - x = np.array([3, 2, 1], dtype=x_dtype) - f = np.array([0, 2, 4]) - dfdx = gradient(f, x) - assert_array_equal(dfdx, [-2]*len(x)) - - @pytest.mark.parametrize('x_dtype', [np.int8, np.int16, - np.int32, np.int64]) - def test_x_signed_int_big_jump(self, x_dtype): - minint = np.iinfo(x_dtype).min - maxint = np.iinfo(x_dtype).max - x = np.array([-1, maxint], dtype=x_dtype) - f = np.array([minint // 2, 0]) - dfdx = gradient(f, x) - assert_array_equal(dfdx, [0.5, 0.5]) - - -class TestAngle(object): - - def test_basic(self): - x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, - 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] - y = angle(x) - yo = [ - np.arctan(3.0 / 1.0), - np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0, - -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)] - z = angle(x, deg=True) - zo = np.array(yo) * 180 / np.pi - assert_array_almost_equal(y, yo, 11) - assert_array_almost_equal(z, zo, 11) - - def test_subclass(self): - x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)]) - x[1] = np.ma.masked - expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)]) - expected[1] = np.ma.masked - actual = angle(x) - assert_equal(type(actual), type(expected)) - assert_equal(actual.mask, expected.mask) - assert_equal(actual, expected) - - -class TestTrimZeros(object): - - """ - Only testing for integer splits. - - """ - - def test_basic(self): - a = np.array([0, 0, 1, 2, 3, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, np.array([1, 2, 3, 4])) - - def test_leading_skip(self): - a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, np.array([1, 0, 2, 3, 4])) - - def test_trailing_skip(self): - a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) - - -class TestExtins(object): - - def test_basic(self): - a = np.array([1, 3, 2, 1, 2, 3, 3]) - b = extract(a > 1, a) - assert_array_equal(b, [3, 2, 2, 3, 3]) - - def test_place(self): - # Make sure that non-np.ndarray objects - # raise an error instead of doing nothing - assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) - - a = np.array([1, 4, 3, 2, 5, 8, 7]) - place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) - assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) - - place(a, np.zeros(7), []) - assert_array_equal(a, np.arange(1, 8)) - - place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) - assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) - assert_raises_regex(ValueError, "Cannot insert from an empty array", - lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) - - # See Issue #6974 - a = np.array(['12', '34']) - place(a, [0, 1], '9') - assert_array_equal(a, ['12', '9']) - - def test_both(self): - a = rand(10) - mask = a > 0.5 - ac = a.copy() - c = extract(mask, a) - place(a, mask, 0) - place(a, mask, c) - assert_array_equal(a, ac) - - -class TestVectorize(object): - - def test_simple(self): - def addsubtract(a, b): - if a > b: - return a - b - else: - return a + b - - f = vectorize(addsubtract) - r = f([0, 3, 6, 9], [1, 3, 5, 7]) - assert_array_equal(r, [1, 6, 1, 2]) - - def test_scalar(self): - def addsubtract(a, b): - if a > b: - return a - b - else: - return a + b - - f = vectorize(addsubtract) - r = f([0, 3, 6, 9], 5) - assert_array_equal(r, [5, 8, 1, 4]) - - def test_large(self): - x = np.linspace(-3, 2, 10000) - f = vectorize(lambda x: x) - y = f(x) - assert_array_equal(y, x) - - def test_ufunc(self): - import math - f = vectorize(math.cos) - args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) - r1 = f(args) - r2 = np.cos(args) - assert_array_almost_equal(r1, r2) - - def test_keywords(self): - - def foo(a, b=1): - return a + b - - f = vectorize(foo) - args = np.array([1, 2, 3]) - r1 = f(args) - r2 = np.array([2, 3, 4]) - assert_array_equal(r1, r2) - r1 = f(args, 2) - r2 = np.array([3, 4, 5]) - assert_array_equal(r1, r2) - - def test_keywords_no_func_code(self): - # This needs to test a function that has keywords but - # no func_code attribute, since otherwise vectorize will - # inspect the func_code. - import random - try: - vectorize(random.randrange) # Should succeed - except Exception: - raise AssertionError() - - def test_keywords2_ticket_2100(self): - # Test kwarg support: enhancement ticket 2100 - - def foo(a, b=1): - return a + b - - f = vectorize(foo) - args = np.array([1, 2, 3]) - r1 = f(a=args) - r2 = np.array([2, 3, 4]) - assert_array_equal(r1, r2) - r1 = f(b=1, a=args) - assert_array_equal(r1, r2) - r1 = f(args, b=2) - r2 = np.array([3, 4, 5]) - assert_array_equal(r1, r2) - - def test_keywords3_ticket_2100(self): - # Test excluded with mixed positional and kwargs: ticket 2100 - def mypolyval(x, p): - _p = list(p) - res = _p.pop(0) - while _p: - res = res * x + _p.pop(0) - return res - - vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) - ans = [3, 6] - assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) - assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) - assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) - - def test_keywords4_ticket_2100(self): - # Test vectorizing function with no positional args. - @vectorize - def f(**kw): - res = 1.0 - for _k in kw: - res *= kw[_k] - return res - - assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) - - def test_keywords5_ticket_2100(self): - # Test vectorizing function with no kwargs args. - @vectorize - def f(*v): - return np.prod(v) - - assert_array_equal(f([1, 2], [3, 4]), [3, 8]) - - def test_coverage1_ticket_2100(self): - def foo(): - return 1 - - f = vectorize(foo) - assert_array_equal(f(), 1) - - def test_assigning_docstring(self): - def foo(x): - """Original documentation""" - return x - - f = vectorize(foo) - assert_equal(f.__doc__, foo.__doc__) - - doc = "Provided documentation" - f = vectorize(foo, doc=doc) - assert_equal(f.__doc__, doc) - - def test_UnboundMethod_ticket_1156(self): - # Regression test for issue 1156 - class Foo: - b = 2 - - def bar(self, a): - return a ** self.b - - assert_array_equal(vectorize(Foo().bar)(np.arange(9)), - np.arange(9) ** 2) - assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), - np.arange(9) ** 2) - - def test_execution_order_ticket_1487(self): - # Regression test for dependence on execution order: issue 1487 - f1 = vectorize(lambda x: x) - res1a = f1(np.arange(3)) - res1b = f1(np.arange(0.1, 3)) - f2 = vectorize(lambda x: x) - res2b = f2(np.arange(0.1, 3)) - res2a = f2(np.arange(3)) - assert_equal(res1a, res2a) - assert_equal(res1b, res2b) - - def test_string_ticket_1892(self): - # Test vectorization over strings: issue 1892. - f = np.vectorize(lambda x: x) - s = '0123456789' * 10 - assert_equal(s, f(s)) - - def test_cache(self): - # Ensure that vectorized func called exactly once per argument. - _calls = [0] - - @vectorize - def f(x): - _calls[0] += 1 - return x ** 2 - - f.cache = True - x = np.arange(5) - assert_array_equal(f(x), x * x) - assert_equal(_calls[0], len(x)) - - def test_otypes(self): - f = np.vectorize(lambda x: x) - f.otypes = 'i' - x = np.arange(5) - assert_array_equal(f(x), x) - - def test_parse_gufunc_signature(self): - assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) - assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), - ([('x', 'y')], [()])) - assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), - ([('x',), ('y',)], [()])) - assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), - ([('x',)], [('y',)])) - assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), - ([('x',)], [('y',), ()])) - assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), - ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) - with assert_raises(ValueError): - nfb._parse_gufunc_signature('(x)(y)->()') - with assert_raises(ValueError): - nfb._parse_gufunc_signature('(x),(y)->') - with assert_raises(ValueError): - nfb._parse_gufunc_signature('((x))->(x)') - - def test_signature_simple(self): - def addsubtract(a, b): - if a > b: - return a - b - else: - return a + b - - f = vectorize(addsubtract, signature='(),()->()') - r = f([0, 3, 6, 9], [1, 3, 5, 7]) - assert_array_equal(r, [1, 6, 1, 2]) - - def test_signature_mean_last(self): - def mean(a): - return a.mean() - - f = vectorize(mean, signature='(n)->()') - r = f([[1, 3], [2, 4]]) - assert_array_equal(r, [2, 3]) - - def test_signature_center(self): - def center(a): - return a - a.mean() - - f = vectorize(center, signature='(n)->(n)') - r = f([[1, 3], [2, 4]]) - assert_array_equal(r, [[-1, 1], [-1, 1]]) - - def test_signature_two_outputs(self): - f = vectorize(lambda x: (x, x), signature='()->(),()') - r = f([1, 2, 3]) - assert_(isinstance(r, tuple) and len(r) == 2) - assert_array_equal(r[0], [1, 2, 3]) - assert_array_equal(r[1], [1, 2, 3]) - - def test_signature_outer(self): - f = vectorize(np.outer, signature='(a),(b)->(a,b)') - r = f([1, 2], [1, 2, 3]) - assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) - - r = f([[[1, 2]]], [1, 2, 3]) - assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) - - r = f([[1, 0], [2, 0]], [1, 2, 3]) - assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], - [[2, 4, 6], [0, 0, 0]]]) - - r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) - assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], - [[0, 0, 0], [0, 0, 0]]]) - - def test_signature_computed_size(self): - f = vectorize(lambda x: x[:-1], signature='(n)->(m)') - r = f([1, 2, 3]) - assert_array_equal(r, [1, 2]) - - r = f([[1, 2, 3], [2, 3, 4]]) - assert_array_equal(r, [[1, 2], [2, 3]]) - - def test_signature_excluded(self): - - def foo(a, b=1): - return a + b - - f = vectorize(foo, signature='()->()', excluded={'b'}) - assert_array_equal(f([1, 2, 3]), [2, 3, 4]) - assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) - - def test_signature_otypes(self): - f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) - r = f([1, 2, 3]) - assert_equal(r.dtype, np.dtype('float64')) - assert_array_equal(r, [1, 2, 3]) - - def test_signature_invalid_inputs(self): - f = vectorize(operator.add, signature='(n),(n)->(n)') - with assert_raises_regex(TypeError, 'wrong number of positional'): - f([1, 2]) - with assert_raises_regex( - ValueError, 'does not have enough dimensions'): - f(1, 2) - with assert_raises_regex( - ValueError, 'inconsistent size for core dimension'): - f([1, 2], [1, 2, 3]) - - f = vectorize(operator.add, signature='()->()') - with assert_raises_regex(TypeError, 'wrong number of positional'): - f(1, 2) - - def test_signature_invalid_outputs(self): - - f = vectorize(lambda x: x[:-1], signature='(n)->(n)') - with assert_raises_regex( - ValueError, 'inconsistent size for core dimension'): - f([1, 2, 3]) - - f = vectorize(lambda x: x, signature='()->(),()') - with assert_raises_regex(ValueError, 'wrong number of outputs'): - f(1) - - f = vectorize(lambda x: (x, x), signature='()->()') - with assert_raises_regex(ValueError, 'wrong number of outputs'): - f([1, 2]) - - def test_size_zero_output(self): - # see issue 5868 - f = np.vectorize(lambda x: x) - x = np.zeros([0, 5], dtype=int) - with assert_raises_regex(ValueError, 'otypes'): - f(x) - - f.otypes = 'i' - assert_array_equal(f(x), x) - - f = np.vectorize(lambda x: x, signature='()->()') - with assert_raises_regex(ValueError, 'otypes'): - f(x) - - f = np.vectorize(lambda x: x, signature='()->()', otypes='i') - assert_array_equal(f(x), x) - - f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') - assert_array_equal(f(x), x) - - f = np.vectorize(lambda x: x, signature='(n)->(n)') - assert_array_equal(f(x.T), x.T) - - f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') - with assert_raises_regex(ValueError, 'new output dimensions'): - f(x) - - -class TestLeaks(object): - class A(object): - iters = 20 - - def bound(self, *args): - return 0 - - @staticmethod - def unbound(*args): - return 0 - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - @pytest.mark.parametrize('name, incr', [ - ('bound', A.iters), - ('unbound', 0), - ]) - def test_frompyfunc_leaks(self, name, incr): - # exposed in gh-11867 as np.vectorized, but the problem stems from - # frompyfunc. - # class.attribute = np.frompyfunc() creates a - # reference cycle if is a bound class method. It requires a - # gc collection cycle to break the cycle (on CPython 3) - import gc - A_func = getattr(self.A, name) - gc.disable() - try: - refcount = sys.getrefcount(A_func) - for i in range(self.A.iters): - a = self.A() - a.f = np.frompyfunc(getattr(a, name), 1, 1) - out = a.f(np.arange(10)) - a = None - if PY2: - assert_equal(sys.getrefcount(A_func), refcount) - else: - # A.func is part of a reference cycle if incr is non-zero - assert_equal(sys.getrefcount(A_func), refcount + incr) - for i in range(5): - gc.collect() - assert_equal(sys.getrefcount(A_func), refcount) - finally: - gc.enable() - -class TestDigitize(object): - - def test_forward(self): - x = np.arange(-6, 5) - bins = np.arange(-5, 5) - assert_array_equal(digitize(x, bins), np.arange(11)) - - def test_reverse(self): - x = np.arange(5, -6, -1) - bins = np.arange(5, -5, -1) - assert_array_equal(digitize(x, bins), np.arange(11)) - - def test_random(self): - x = rand(10) - bin = np.linspace(x.min(), x.max(), 10) - assert_(np.all(digitize(x, bin) != 0)) - - def test_right_basic(self): - x = [1, 5, 4, 10, 8, 11, 0] - bins = [1, 5, 10] - default_answer = [1, 2, 1, 3, 2, 3, 0] - assert_array_equal(digitize(x, bins), default_answer) - right_answer = [0, 1, 1, 2, 2, 3, 0] - assert_array_equal(digitize(x, bins, True), right_answer) - - def test_right_open(self): - x = np.arange(-6, 5) - bins = np.arange(-6, 4) - assert_array_equal(digitize(x, bins, True), np.arange(11)) - - def test_right_open_reverse(self): - x = np.arange(5, -6, -1) - bins = np.arange(4, -6, -1) - assert_array_equal(digitize(x, bins, True), np.arange(11)) - - def test_right_open_random(self): - x = rand(10) - bins = np.linspace(x.min(), x.max(), 10) - assert_(np.all(digitize(x, bins, True) != 10)) - - def test_monotonic(self): - x = [-1, 0, 1, 2] - bins = [0, 0, 1] - assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) - assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) - bins = [1, 1, 0] - assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) - assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) - bins = [1, 1, 1, 1] - assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) - assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) - bins = [0, 0, 1, 0] - assert_raises(ValueError, digitize, x, bins) - bins = [1, 1, 0, 1] - assert_raises(ValueError, digitize, x, bins) - - def test_casting_error(self): - x = [1, 2, 3 + 1.j] - bins = [1, 2, 3] - assert_raises(TypeError, digitize, x, bins) - x, bins = bins, x - assert_raises(TypeError, digitize, x, bins) - - def test_return_type(self): - # Functions returning indices should always return base ndarrays - class A(np.ndarray): - pass - a = np.arange(5).view(A) - b = np.arange(1, 3).view(A) - assert_(not isinstance(digitize(b, a, False), A)) - assert_(not isinstance(digitize(b, a, True), A)) - - def test_large_integers_increasing(self): - # gh-11022 - x = 2**54 # loses precision in a float - assert_equal(np.digitize(x, [x - 1, x + 1]), 1) - - @pytest.mark.xfail( - reason="gh-11022: np.core.multiarray._monoticity loses precision") - def test_large_integers_decreasing(self): - # gh-11022 - x = 2**54 # loses precision in a float - assert_equal(np.digitize(x, [x + 1, x - 1]), 1) - - -class TestUnwrap(object): - - def test_simple(self): - # check that unwrap removes jumps greater that 2*pi - assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) - # check that unwrap maintains continuity - assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) - - -class TestFilterwindows(object): - - def test_hanning(self): - # check symmetry - w = hanning(10) - assert_array_almost_equal(w, flipud(w), 7) - # check known value - assert_almost_equal(np.sum(w, axis=0), 4.500, 4) - - def test_hamming(self): - # check symmetry - w = hamming(10) - assert_array_almost_equal(w, flipud(w), 7) - # check known value - assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) - - def test_bartlett(self): - # check symmetry - w = bartlett(10) - assert_array_almost_equal(w, flipud(w), 7) - # check known value - assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) - - def test_blackman(self): - # check symmetry - w = blackman(10) - assert_array_almost_equal(w, flipud(w), 7) - # check known value - assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) - - -class TestTrapz(object): - - def test_simple(self): - x = np.arange(-10, 10, .1) - r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) - # check integral of normal equals 1 - assert_almost_equal(r, 1, 7) - - def test_ndim(self): - x = np.linspace(0, 1, 3) - y = np.linspace(0, 2, 8) - z = np.linspace(0, 3, 13) - - wx = np.ones_like(x) * (x[1] - x[0]) - wx[0] /= 2 - wx[-1] /= 2 - wy = np.ones_like(y) * (y[1] - y[0]) - wy[0] /= 2 - wy[-1] /= 2 - wz = np.ones_like(z) * (z[1] - z[0]) - wz[0] /= 2 - wz[-1] /= 2 - - q = x[:, None, None] + y[None,:, None] + z[None, None,:] - - qx = (q * wx[:, None, None]).sum(axis=0) - qy = (q * wy[None, :, None]).sum(axis=1) - qz = (q * wz[None, None, :]).sum(axis=2) - - # n-d `x` - r = trapz(q, x=x[:, None, None], axis=0) - assert_almost_equal(r, qx) - r = trapz(q, x=y[None,:, None], axis=1) - assert_almost_equal(r, qy) - r = trapz(q, x=z[None, None,:], axis=2) - assert_almost_equal(r, qz) - - # 1-d `x` - r = trapz(q, x=x, axis=0) - assert_almost_equal(r, qx) - r = trapz(q, x=y, axis=1) - assert_almost_equal(r, qy) - r = trapz(q, x=z, axis=2) - assert_almost_equal(r, qz) - - def test_masked(self): - # Testing that masked arrays behave as if the function is 0 where - # masked - x = np.arange(5) - y = x * x - mask = x == 2 - ym = np.ma.array(y, mask=mask) - r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) - assert_almost_equal(trapz(ym, x), r) - - xm = np.ma.array(x, mask=mask) - assert_almost_equal(trapz(ym, xm), r) - - xm = np.ma.array(x, mask=mask) - assert_almost_equal(trapz(y, xm), r) - - -class TestSinc(object): - - def test_simple(self): - assert_(sinc(0) == 1) - w = sinc(np.linspace(-1, 1, 100)) - # check symmetry - assert_array_almost_equal(w, flipud(w), 7) - - def test_array_like(self): - x = [0, 0.5] - y1 = sinc(np.array(x)) - y2 = sinc(list(x)) - y3 = sinc(tuple(x)) - assert_array_equal(y1, y2) - assert_array_equal(y1, y3) - - -class TestUnique(object): - - def test_simple(self): - x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) - assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) - assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) - x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] - assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) - x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) - assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) - - -class TestCheckFinite(object): - - def test_simple(self): - a = [1, 2, 3] - b = [1, 2, np.inf] - c = [1, 2, np.nan] - np.lib.asarray_chkfinite(a) - assert_raises(ValueError, np.lib.asarray_chkfinite, b) - assert_raises(ValueError, np.lib.asarray_chkfinite, c) - - def test_dtype_order(self): - # Regression test for missing dtype and order arguments - a = [1, 2, 3] - a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) - assert_(a.dtype == np.float64) - - -class TestCorrCoef(object): - A = np.array( - [[0.15391142, 0.18045767, 0.14197213], - [0.70461506, 0.96474128, 0.27906989], - [0.9297531, 0.32296769, 0.19267156]]) - B = np.array( - [[0.10377691, 0.5417086, 0.49807457], - [0.82872117, 0.77801674, 0.39226705], - [0.9314666, 0.66800209, 0.03538394]]) - res1 = np.array( - [[1., 0.9379533, -0.04931983], - [0.9379533, 1., 0.30007991], - [-0.04931983, 0.30007991, 1.]]) - res2 = np.array( - [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], - [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], - [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], - [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], - [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], - [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) - - def test_non_array(self): - assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), - [[1., -1.], [-1., 1.]]) - - def test_simple(self): - tgt1 = corrcoef(self.A) - assert_almost_equal(tgt1, self.res1) - assert_(np.all(np.abs(tgt1) <= 1.0)) - - tgt2 = corrcoef(self.A, self.B) - assert_almost_equal(tgt2, self.res2) - assert_(np.all(np.abs(tgt2) <= 1.0)) - - def test_ddof(self): - # ddof raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - sup.filter(DeprecationWarning) - # ddof has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) - assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) - - def test_bias(self): - # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) - sup.filter(DeprecationWarning) - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, bias=1), self.res1) - - def test_complex(self): - x = np.array([[1, 2, 3], [1j, 2j, 3j]]) - res = corrcoef(x) - tgt = np.array([[1., -1.j], [1.j, 1.]]) - assert_allclose(res, tgt) - assert_(np.all(np.abs(res) <= 1.0)) - - def test_xy(self): - x = np.array([[1, 2, 3]]) - y = np.array([[1j, 2j, 3j]]) - assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) - - def test_empty(self): - with warnings.catch_warnings(record=True): - warnings.simplefilter('always', RuntimeWarning) - assert_array_equal(corrcoef(np.array([])), np.nan) - assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), - np.array([]).reshape(0, 0)) - assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), - np.array([[np.nan, np.nan], [np.nan, np.nan]])) - - def test_extreme(self): - x = [[1e-100, 1e100], [1e100, 1e-100]] - with np.errstate(all='raise'): - c = corrcoef(x) - assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) - assert_(np.all(np.abs(c) <= 1.0)) - - -class TestCov(object): - x1 = np.array([[0, 2], [1, 1], [2, 0]]).T - res1 = np.array([[1., -1.], [-1., 1.]]) - x2 = np.array([0.0, 1.0, 2.0], ndmin=2) - frequencies = np.array([1, 4, 1]) - x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T - res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) - unit_frequencies = np.ones(3, dtype=np.integer) - weights = np.array([1.0, 4.0, 1.0]) - res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) - unit_weights = np.ones(3) - x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) - - def test_basic(self): - assert_allclose(cov(self.x1), self.res1) - - def test_complex(self): - x = np.array([[1, 2, 3], [1j, 2j, 3j]]) - res = np.array([[1., -1.j], [1.j, 1.]]) - assert_allclose(cov(x), res) - assert_allclose(cov(x, aweights=np.ones(3)), res) - - def test_xy(self): - x = np.array([[1, 2, 3]]) - y = np.array([[1j, 2j, 3j]]) - assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) - - def test_empty(self): - with warnings.catch_warnings(record=True): - warnings.simplefilter('always', RuntimeWarning) - assert_array_equal(cov(np.array([])), np.nan) - assert_array_equal(cov(np.array([]).reshape(0, 2)), - np.array([]).reshape(0, 0)) - assert_array_equal(cov(np.array([]).reshape(2, 0)), - np.array([[np.nan, np.nan], [np.nan, np.nan]])) - - def test_wrong_ddof(self): - with warnings.catch_warnings(record=True): - warnings.simplefilter('always', RuntimeWarning) - assert_array_equal(cov(self.x1, ddof=5), - np.array([[np.inf, -np.inf], - [-np.inf, np.inf]])) - - def test_1D_rowvar(self): - assert_allclose(cov(self.x3), cov(self.x3, rowvar=False)) - y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) - assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False)) - - def test_1D_variance(self): - assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) - - def test_fweights(self): - assert_allclose(cov(self.x2, fweights=self.frequencies), - cov(self.x2_repeats)) - assert_allclose(cov(self.x1, fweights=self.frequencies), - self.res2) - assert_allclose(cov(self.x1, fweights=self.unit_frequencies), - self.res1) - nonint = self.frequencies + 0.5 - assert_raises(TypeError, cov, self.x1, fweights=nonint) - f = np.ones((2, 3), dtype=np.integer) - assert_raises(RuntimeError, cov, self.x1, fweights=f) - f = np.ones(2, dtype=np.integer) - assert_raises(RuntimeError, cov, self.x1, fweights=f) - f = -1 * np.ones(3, dtype=np.integer) - assert_raises(ValueError, cov, self.x1, fweights=f) - - def test_aweights(self): - assert_allclose(cov(self.x1, aweights=self.weights), self.res3) - assert_allclose(cov(self.x1, aweights=3.0 * self.weights), - cov(self.x1, aweights=self.weights)) - assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) - w = np.ones((2, 3)) - assert_raises(RuntimeError, cov, self.x1, aweights=w) - w = np.ones(2) - assert_raises(RuntimeError, cov, self.x1, aweights=w) - w = -1.0 * np.ones(3) - assert_raises(ValueError, cov, self.x1, aweights=w) - - def test_unit_fweights_and_aweights(self): - assert_allclose(cov(self.x2, fweights=self.frequencies, - aweights=self.unit_weights), - cov(self.x2_repeats)) - assert_allclose(cov(self.x1, fweights=self.frequencies, - aweights=self.unit_weights), - self.res2) - assert_allclose(cov(self.x1, fweights=self.unit_frequencies, - aweights=self.unit_weights), - self.res1) - assert_allclose(cov(self.x1, fweights=self.unit_frequencies, - aweights=self.weights), - self.res3) - assert_allclose(cov(self.x1, fweights=self.unit_frequencies, - aweights=3.0 * self.weights), - cov(self.x1, aweights=self.weights)) - assert_allclose(cov(self.x1, fweights=self.unit_frequencies, - aweights=self.unit_weights), - self.res1) - - -class Test_I0(object): - - def test_simple(self): - assert_almost_equal( - i0(0.5), - np.array(1.0634833707413234)) - - A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549]) - expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049]) - assert_almost_equal(i0(A), expected) - assert_almost_equal(i0(-A), expected) - - B = np.array([[0.827002, 0.99959078], - [0.89694769, 0.39298162], - [0.37954418, 0.05206293], - [0.36465447, 0.72446427], - [0.48164949, 0.50324519]]) - assert_almost_equal( - i0(B), - np.array([[1.17843223, 1.26583466], - [1.21147086, 1.03898290], - [1.03633899, 1.00067775], - [1.03352052, 1.13557954], - [1.05884290, 1.06432317]])) - # Regression test for gh-11205 - i0_0 = np.i0([0.]) - assert_equal(i0_0.shape, (1,)) - assert_array_equal(np.i0([0.]), np.array([1.])) - - def test_non_array(self): - a = np.arange(4) - - class array_like: - __array_interface__ = a.__array_interface__ - - def __array_wrap__(self, arr): - return self - - # E.g. pandas series survive ufunc calls through array-wrap: - assert isinstance(np.abs(array_like()), array_like) - exp = np.i0(a) - res = np.i0(array_like()) - - assert_array_equal(exp, res) - - -class TestKaiser(object): - - def test_simple(self): - assert_(np.isfinite(kaiser(1, 1.0))) - assert_almost_equal(kaiser(0, 1.0), - np.array([])) - assert_almost_equal(kaiser(2, 1.0), - np.array([0.78984831, 0.78984831])) - assert_almost_equal(kaiser(5, 1.0), - np.array([0.78984831, 0.94503323, 1., - 0.94503323, 0.78984831])) - assert_almost_equal(kaiser(5, 1.56789), - np.array([0.58285404, 0.88409679, 1., - 0.88409679, 0.58285404])) - - def test_int_beta(self): - kaiser(3, 4) - - -class TestMsort(object): - - def test_simple(self): - A = np.array([[0.44567325, 0.79115165, 0.54900530], - [0.36844147, 0.37325583, 0.96098397], - [0.64864341, 0.52929049, 0.39172155]]) - assert_almost_equal( - msort(A), - np.array([[0.36844147, 0.37325583, 0.39172155], - [0.44567325, 0.52929049, 0.54900530], - [0.64864341, 0.79115165, 0.96098397]])) - - -class TestMeshgrid(object): - - def test_simple(self): - [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) - assert_array_equal(X, np.array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3], - [1, 2, 3]])) - assert_array_equal(Y, np.array([[4, 4, 4], - [5, 5, 5], - [6, 6, 6], - [7, 7, 7]])) - - def test_single_input(self): - [X] = meshgrid([1, 2, 3, 4]) - assert_array_equal(X, np.array([1, 2, 3, 4])) - - def test_no_input(self): - args = [] - assert_array_equal([], meshgrid(*args)) - assert_array_equal([], meshgrid(*args, copy=False)) - - def test_indexing(self): - x = [1, 2, 3] - y = [4, 5, 6, 7] - [X, Y] = meshgrid(x, y, indexing='ij') - assert_array_equal(X, np.array([[1, 1, 1, 1], - [2, 2, 2, 2], - [3, 3, 3, 3]])) - assert_array_equal(Y, np.array([[4, 5, 6, 7], - [4, 5, 6, 7], - [4, 5, 6, 7]])) - - # Test expected shapes: - z = [8, 9] - assert_(meshgrid(x, y)[0].shape == (4, 3)) - assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) - assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) - assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) - - assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') - - def test_sparse(self): - [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) - assert_array_equal(X, np.array([[1, 2, 3]])) - assert_array_equal(Y, np.array([[4], [5], [6], [7]])) - - def test_invalid_arguments(self): - # Test that meshgrid complains about invalid arguments - # Regression test for issue #4755: - # https://github.com/numpy/numpy/issues/4755 - assert_raises(TypeError, meshgrid, - [1, 2, 3], [4, 5, 6, 7], indices='ij') - - def test_return_type(self): - # Test for appropriate dtype in returned arrays. - # Regression test for issue #5297 - # https://github.com/numpy/numpy/issues/5297 - x = np.arange(0, 10, dtype=np.float32) - y = np.arange(10, 20, dtype=np.float64) - - X, Y = np.meshgrid(x,y) - - assert_(X.dtype == x.dtype) - assert_(Y.dtype == y.dtype) - - # copy - X, Y = np.meshgrid(x,y, copy=True) - - assert_(X.dtype == x.dtype) - assert_(Y.dtype == y.dtype) - - # sparse - X, Y = np.meshgrid(x,y, sparse=True) - - assert_(X.dtype == x.dtype) - assert_(Y.dtype == y.dtype) - - def test_writeback(self): - # Issue 8561 - X = np.array([1.1, 2.2]) - Y = np.array([3.3, 4.4]) - x, y = np.meshgrid(X, Y, sparse=False, copy=True) - - x[0, :] = 0 - assert_equal(x[0, :], 0) - assert_equal(x[1, :], X) - - -class TestPiecewise(object): - - def test_simple(self): - # Condition is single bool list - x = piecewise([0, 0], [True, False], [1]) - assert_array_equal(x, [1, 0]) - - # List of conditions: single bool list - x = piecewise([0, 0], [[True, False]], [1]) - assert_array_equal(x, [1, 0]) - - # Conditions is single bool array - x = piecewise([0, 0], np.array([True, False]), [1]) - assert_array_equal(x, [1, 0]) - - # Condition is single int array - x = piecewise([0, 0], np.array([1, 0]), [1]) - assert_array_equal(x, [1, 0]) - - # List of conditions: int array - x = piecewise([0, 0], [np.array([1, 0])], [1]) - assert_array_equal(x, [1, 0]) - - x = piecewise([0, 0], [[False, True]], [lambda x:-1]) - assert_array_equal(x, [0, -1]) - - assert_raises_regex(ValueError, '1 or 2 functions are expected', - piecewise, [0, 0], [[False, True]], []) - assert_raises_regex(ValueError, '1 or 2 functions are expected', - piecewise, [0, 0], [[False, True]], [1, 2, 3]) - - def test_two_conditions(self): - x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) - assert_array_equal(x, [3, 4]) - - def test_scalar_domains_three_conditions(self): - x = piecewise(3, [True, False, False], [4, 2, 0]) - assert_equal(x, 4) - - def test_default(self): - # No value specified for x[1], should be 0 - x = piecewise([1, 2], [True, False], [2]) - assert_array_equal(x, [2, 0]) - - # Should set x[1] to 3 - x = piecewise([1, 2], [True, False], [2, 3]) - assert_array_equal(x, [2, 3]) - - def test_0d(self): - x = np.array(3) - y = piecewise(x, x > 3, [4, 0]) - assert_(y.ndim == 0) - assert_(y == 0) - - x = 5 - y = piecewise(x, [True, False], [1, 0]) - assert_(y.ndim == 0) - assert_(y == 1) - - # With 3 ranges (It was failing, before) - y = piecewise(x, [False, False, True], [1, 2, 3]) - assert_array_equal(y, 3) - - def test_0d_comparison(self): - x = 3 - y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. - assert_equal(y, 4) - - # With 3 ranges (It was failing, before) - x = 4 - y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) - assert_array_equal(y, 2) - - assert_raises_regex(ValueError, '2 or 3 functions are expected', - piecewise, x, [x <= 3, x > 3], [1]) - assert_raises_regex(ValueError, '2 or 3 functions are expected', - piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) - - def test_0d_0d_condition(self): - x = np.array(3) - c = np.array(x > 3) - y = piecewise(x, [c], [1, 2]) - assert_equal(y, 2) - - def test_multidimensional_extrafunc(self): - x = np.array([[-2.5, -1.5, -0.5], - [0.5, 1.5, 2.5]]) - y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) - assert_array_equal(y, np.array([[-1., -1., -1.], - [3., 3., 1.]])) - - -class TestBincount(object): - - def test_simple(self): - y = np.bincount(np.arange(4)) - assert_array_equal(y, np.ones(4)) - - def test_simple2(self): - y = np.bincount(np.array([1, 5, 2, 4, 1])) - assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) - - def test_simple_weight(self): - x = np.arange(4) - w = np.array([0.2, 0.3, 0.5, 0.1]) - y = np.bincount(x, w) - assert_array_equal(y, w) - - def test_simple_weight2(self): - x = np.array([1, 2, 4, 5, 2]) - w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) - y = np.bincount(x, w) - assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) - - def test_with_minlength(self): - x = np.array([0, 1, 0, 1, 1]) - y = np.bincount(x, minlength=3) - assert_array_equal(y, np.array([2, 3, 0])) - x = [] - y = np.bincount(x, minlength=0) - assert_array_equal(y, np.array([])) - - def test_with_minlength_smaller_than_maxvalue(self): - x = np.array([0, 1, 1, 2, 2, 3, 3]) - y = np.bincount(x, minlength=2) - assert_array_equal(y, np.array([1, 2, 2, 2])) - y = np.bincount(x, minlength=0) - assert_array_equal(y, np.array([1, 2, 2, 2])) - - def test_with_minlength_and_weights(self): - x = np.array([1, 2, 4, 5, 2]) - w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) - y = np.bincount(x, w, 8) - assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) - - def test_empty(self): - x = np.array([], dtype=int) - y = np.bincount(x) - assert_array_equal(x, y) - - def test_empty_with_minlength(self): - x = np.array([], dtype=int) - y = np.bincount(x, minlength=5) - assert_array_equal(y, np.zeros(5, dtype=int)) - - def test_with_incorrect_minlength(self): - x = np.array([], dtype=int) - assert_raises_regex(TypeError, - "'str' object cannot be interpreted", - lambda: np.bincount(x, minlength="foobar")) - assert_raises_regex(ValueError, - "must not be negative", - lambda: np.bincount(x, minlength=-1)) - - x = np.arange(5) - assert_raises_regex(TypeError, - "'str' object cannot be interpreted", - lambda: np.bincount(x, minlength="foobar")) - assert_raises_regex(ValueError, - "must not be negative", - lambda: np.bincount(x, minlength=-1)) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_dtype_reference_leaks(self): - # gh-6805 - intp_refcount = sys.getrefcount(np.dtype(np.intp)) - double_refcount = sys.getrefcount(np.dtype(np.double)) - - for j in range(10): - np.bincount([1, 2, 3]) - assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) - assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) - - for j in range(10): - np.bincount([1, 2, 3], [4, 5, 6]) - assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) - assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) - - -class TestInterp(object): - - def test_exceptions(self): - assert_raises(ValueError, interp, 0, [], []) - assert_raises(ValueError, interp, 0, [0], [1, 2]) - assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) - assert_raises(ValueError, interp, 0, [], [], period=360) - assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) - - def test_basic(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = np.linspace(0, 1, 50) - assert_almost_equal(np.interp(x0, x, y), x0) - - def test_right_left_behavior(self): - # Needs range of sizes to test different code paths. - # size ==1 is special cased, 1 < size < 5 is linear search, and - # size >= 5 goes through local search and possibly binary search. - for size in range(1, 10): - xp = np.arange(size, dtype=np.double) - yp = np.ones(size, dtype=np.double) - incpts = np.array([-1, 0, size - 1, size], dtype=np.double) - decpts = incpts[::-1] - - incres = interp(incpts, xp, yp) - decres = interp(decpts, xp, yp) - inctgt = np.array([1, 1, 1, 1], dtype=float) - dectgt = inctgt[::-1] - assert_equal(incres, inctgt) - assert_equal(decres, dectgt) - - incres = interp(incpts, xp, yp, left=0) - decres = interp(decpts, xp, yp, left=0) - inctgt = np.array([0, 1, 1, 1], dtype=float) - dectgt = inctgt[::-1] - assert_equal(incres, inctgt) - assert_equal(decres, dectgt) - - incres = interp(incpts, xp, yp, right=2) - decres = interp(decpts, xp, yp, right=2) - inctgt = np.array([1, 1, 1, 2], dtype=float) - dectgt = inctgt[::-1] - assert_equal(incres, inctgt) - assert_equal(decres, dectgt) - - incres = interp(incpts, xp, yp, left=0, right=2) - decres = interp(decpts, xp, yp, left=0, right=2) - inctgt = np.array([0, 1, 1, 2], dtype=float) - dectgt = inctgt[::-1] - assert_equal(incres, inctgt) - assert_equal(decres, dectgt) - - def test_scalar_interpolation_point(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = 0 - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = .3 - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.float32(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.float64(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.nan - assert_almost_equal(np.interp(x0, x, y), x0) - - def test_non_finite_behavior_exact_x(self): - x = [1, 2, 2.5, 3, 4] - xp = [1, 2, 3, 4] - fp = [1, 2, np.inf, 4] - assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) - fp = [1, 2, np.nan, 4] - assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) - - @pytest.fixture(params=[ - lambda x: np.float_(x), - lambda x: _make_complex(x, 0), - lambda x: _make_complex(0, x), - lambda x: _make_complex(x, np.multiply(x, -2)) - ], ids=[ - 'real', - 'complex-real', - 'complex-imag', - 'complex-both' - ]) - def sc(self, request): - """ scale function used by the below tests """ - return request.param - - def test_non_finite_any_nan(self, sc): - """ test that nans are propagated """ - assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan)) - - def test_non_finite_inf(self, sc): - """ Test that interp between opposite infs gives nan """ - assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) - - # unless the y values are equal - assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) - - def test_non_finite_half_inf_xf(self, sc): - """ Test that interp where both axes have a bound at inf gives nan """ - assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) - - def test_non_finite_half_inf_x(self, sc): - """ Test interp where the x axis has a bound at inf """ - assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) - assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) - assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) - - def test_non_finite_half_inf_f(self, sc): - """ Test interp where the f axis has a bound at inf """ - assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf)) - assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf)) - assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf)) - assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf)) - assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf)) - assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf)) - - def test_complex_interp(self): - # test complex interpolation - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j - x0 = 0.3 - y0 = x0 + (1+x0)*1.0j - assert_almost_equal(np.interp(x0, x, y), y0) - # test complex left and right - x0 = -1 - left = 2 + 3.0j - assert_almost_equal(np.interp(x0, x, y, left=left), left) - x0 = 2.0 - right = 2 + 3.0j - assert_almost_equal(np.interp(x0, x, y, right=right), right) - # test complex non finite - x = [1, 2, 2.5, 3, 4] - xp = [1, 2, 3, 4] - fp = [1, 2+1j, np.inf, 4] - y = [1, 2+1j, np.inf+0.5j, np.inf, 4] - assert_almost_equal(np.interp(x, xp, fp), y) - # test complex periodic - x = [-180, -170, -185, 185, -10, -5, 0, 365] - xp = [190, -190, 350, -350] - fp = [5+1.0j, 10+2j, 3+3j, 4+4j] - y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, - 3.5+3.5j, 3.75+3.75j] - assert_almost_equal(np.interp(x, xp, fp, period=360), y) - - def test_zero_dimensional_interpolation_point(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = np.array(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - - xp = np.array([0, 2, 4]) - fp = np.array([1, -1, 1]) - - actual = np.interp(np.array(1), xp, fp) - assert_equal(actual, 0) - assert_(isinstance(actual, np.float64)) - - actual = np.interp(np.array(4.5), xp, fp, period=4) - assert_equal(actual, 0.5) - assert_(isinstance(actual, np.float64)) - - def test_if_len_x_is_small(self): - xp = np.arange(0, 10, 0.0001) - fp = np.sin(xp) - assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) - - def test_period(self): - x = [-180, -170, -185, 185, -10, -5, 0, 365] - xp = [190, -190, 350, -350] - fp = [5, 10, 3, 4] - y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] - assert_almost_equal(np.interp(x, xp, fp, period=360), y) - x = np.array(x, order='F').reshape(2, -1) - y = np.array(y, order='C').reshape(2, -1) - assert_almost_equal(np.interp(x, xp, fp, period=360), y) - - -def compare_results(res, desired): - for i in range(len(desired)): - assert_array_equal(res[i], desired[i]) - - -class TestPercentile(object): - - def test_basic(self): - x = np.arange(8) * 0.5 - assert_equal(np.percentile(x, 0), 0.) - assert_equal(np.percentile(x, 100), 3.5) - assert_equal(np.percentile(x, 50), 1.75) - x[1] = np.nan - assert_equal(np.percentile(x, 0), np.nan) - assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan) - - def test_fraction(self): - x = [Fraction(i, 2) for i in range(8)] - - p = np.percentile(x, Fraction(0)) - assert_equal(p, Fraction(0)) - assert_equal(type(p), Fraction) - - p = np.percentile(x, Fraction(100)) - assert_equal(p, Fraction(7, 2)) - assert_equal(type(p), Fraction) - - p = np.percentile(x, Fraction(50)) - assert_equal(p, Fraction(7, 4)) - assert_equal(type(p), Fraction) - - def test_api(self): - d = np.ones(5) - np.percentile(d, 5, None, None, False) - np.percentile(d, 5, None, None, False, 'linear') - o = np.ones((1,)) - np.percentile(d, 5, None, o, False, 'linear') - - def test_2D(self): - x = np.array([[1, 1, 1], - [1, 1, 1], - [4, 4, 3], - [1, 1, 1], - [1, 1, 1]]) - assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) - - def test_linear(self): - - # Test defaults - assert_equal(np.percentile(range(10), 50), 4.5) - - # explicitly specify interpolation_method 'linear' (the default) - assert_equal(np.percentile(range(10), 50, - interpolation='linear'), 4.5) - - def test_lower_higher(self): - - # interpolation_method 'lower'/'higher' - assert_equal(np.percentile(range(10), 50, - interpolation='lower'), 4) - assert_equal(np.percentile(range(10), 50, - interpolation='higher'), 5) - - def test_midpoint(self): - assert_equal(np.percentile(range(10), 51, - interpolation='midpoint'), 4.5) - assert_equal(np.percentile(range(11), 51, - interpolation='midpoint'), 5.5) - assert_equal(np.percentile(range(11), 50, - interpolation='midpoint'), 5) - - def test_nearest(self): - assert_equal(np.percentile(range(10), 51, - interpolation='nearest'), 5) - assert_equal(np.percentile(range(10), 49, - interpolation='nearest'), 4) - - def test_sequence(self): - x = np.arange(8) * 0.5 - assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) - - def test_axis(self): - x = np.arange(12).reshape(3, 4) - - assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) - - r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] - assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) - - r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] - assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) - - # ensure qth axis is always first as with np.array(old_percentile(..)) - x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - assert_equal(np.percentile(x, (25, 50)).shape, (2,)) - assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) - assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) - assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) - assert_equal( - np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) - assert_equal(np.percentile(x, (25, 50), - interpolation="higher").shape, (2,)) - assert_equal(np.percentile(x, (25, 50, 75), - interpolation="higher").shape, (3,)) - assert_equal(np.percentile(x, (25, 50), axis=0, - interpolation="higher").shape, (2, 4, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=1, - interpolation="higher").shape, (2, 3, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=2, - interpolation="higher").shape, (2, 3, 4, 6)) - assert_equal(np.percentile(x, (25, 50), axis=3, - interpolation="higher").shape, (2, 3, 4, 5)) - assert_equal(np.percentile(x, (25, 50, 75), axis=1, - interpolation="higher").shape, (3, 3, 5, 6)) - - def test_scalar_q(self): - # test for no empty dimensions for compatibility with old percentile - x = np.arange(12).reshape(3, 4) - assert_equal(np.percentile(x, 50), 5.5) - assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) - assert_equal(np.percentile(x, 50, axis=0), r0) - assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) - r1 = np.array([1.5, 5.5, 9.5]) - assert_almost_equal(np.percentile(x, 50, axis=1), r1) - assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) - - out = np.empty(1) - assert_equal(np.percentile(x, 50, out=out), 5.5) - assert_equal(out, 5.5) - out = np.empty(4) - assert_equal(np.percentile(x, 50, axis=0, out=out), r0) - assert_equal(out, r0) - out = np.empty(3) - assert_equal(np.percentile(x, 50, axis=1, out=out), r1) - assert_equal(out, r1) - - # test for no empty dimensions for compatibility with old percentile - x = np.arange(12).reshape(3, 4) - assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) - assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) - c0 = np.percentile(x, 50, interpolation='lower', axis=0) - assert_equal(c0, r0) - assert_equal(c0.shape, r0.shape) - r1 = np.array([1., 5., 9.]) - c1 = np.percentile(x, 50, interpolation='lower', axis=1) - assert_almost_equal(c1, r1) - assert_equal(c1.shape, r1.shape) - - out = np.empty((), dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', out=out) - assert_equal(c, 5) - assert_equal(out, 5) - out = np.empty(4, dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) - assert_equal(c, r0) - assert_equal(out, r0) - out = np.empty(3, dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) - assert_equal(c, r1) - assert_equal(out, r1) - - def test_exception(self): - assert_raises(ValueError, np.percentile, [1, 2], 56, - interpolation='foobar') - assert_raises(ValueError, np.percentile, [1], 101) - assert_raises(ValueError, np.percentile, [1], -1) - assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) - assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) - - def test_percentile_list(self): - assert_equal(np.percentile([1, 2, 3], 0), 1) - - def test_percentile_out(self): - x = np.array([1, 2, 3]) - y = np.zeros((3,)) - p = (1, 2, 3) - np.percentile(x, p, out=y) - assert_equal(y, np.percentile(x, p)) - - x = np.array([[1, 2, 3], - [4, 5, 6]]) - - y = np.zeros((3, 3)) - np.percentile(x, p, axis=0, out=y) - assert_equal(y, np.percentile(x, p, axis=0)) - - y = np.zeros((3, 2)) - np.percentile(x, p, axis=1, out=y) - assert_equal(y, np.percentile(x, p, axis=1)) - - x = np.arange(12).reshape(3, 4) - # q.dim > 1, float - r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) - out = np.empty((2, 4)) - assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) - assert_equal(out, r0) - r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) - out = np.empty((2, 3)) - assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) - assert_equal(out, r1) - - # q.dim > 1, int - r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) - out = np.empty((2, 4), dtype=x.dtype) - c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) - assert_equal(c, r0) - assert_equal(out, r0) - r1 = np.array([[0, 4, 8], [1, 5, 9]]) - out = np.empty((2, 3), dtype=x.dtype) - c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) - assert_equal(c, r1) - assert_equal(out, r1) - - def test_percentile_empty_dim(self): - # empty dims are preserved - d = np.arange(11 * 2).reshape(11, 1, 2, 1) - assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) - assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) - assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) - assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) - assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) - assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) - assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) - assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) - - assert_array_equal(np.percentile(d, 50, axis=2, - interpolation='midpoint').shape, - (11, 1, 1)) - assert_array_equal(np.percentile(d, 50, axis=-2, - interpolation='midpoint').shape, - (11, 1, 1)) - - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, - (2, 1, 2, 1)) - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, - (2, 11, 2, 1)) - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, - (2, 11, 1, 1)) - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, - (2, 11, 1, 2)) - - def test_percentile_no_overwrite(self): - a = np.array([2, 3, 4, 1]) - np.percentile(a, [50], overwrite_input=False) - assert_equal(a, np.array([2, 3, 4, 1])) - - a = np.array([2, 3, 4, 1]) - np.percentile(a, [50]) - assert_equal(a, np.array([2, 3, 4, 1])) - - def test_no_p_overwrite(self): - p = np.linspace(0., 100., num=5) - np.percentile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, np.linspace(0., 100., num=5)) - p = np.linspace(0., 100., num=5).tolist() - np.percentile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) - - def test_percentile_overwrite(self): - a = np.array([2, 3, 4, 1]) - b = np.percentile(a, [50], overwrite_input=True) - assert_equal(b, np.array([2.5])) - - b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) - assert_equal(b, np.array([2.5])) - - def test_extended_axis(self): - o = np.random.normal(size=(71, 23)) - x = np.dstack([o] * 10) - assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) - x = np.moveaxis(x, -1, 0) - assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) - x = x.swapaxes(0, 1).copy() - assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) - x = x.swapaxes(0, 1).copy() - - assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), - np.percentile(x, [25, 60], axis=None)) - assert_equal(np.percentile(x, [25, 60], axis=(0,)), - np.percentile(x, [25, 60], axis=0)) - - d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) - np.random.shuffle(d.ravel()) - assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], - np.percentile(d[:,:,:, 0].flatten(), 25)) - assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], - np.percentile(d[:,:, 1,:].flatten(), [10, 90])) - assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], - np.percentile(d[:,:, 2,:].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], - np.percentile(d[2,:,:,:].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], - np.percentile(d[2, 1,:,:].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], - np.percentile(d[2,:,:, 1].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], - np.percentile(d[2,:, 2,:].flatten(), 25)) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25) - assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25) - assert_raises(np.AxisError, np.percentile, d, axis=4, q=25) - assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25) - # each of these refers to the same axis twice - assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) - assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) - assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) - - def test_keepdims(self): - d = np.ones((3, 5, 7, 11)) - assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, - (1, 1, 7, 11)) - assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, - (1, 5, 7, 1)) - assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, - (3, 1, 7, 11)) - assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, - (1, 1, 7, 1)) - - assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), - keepdims=True).shape, (2, 1, 1, 7, 1)) - assert_equal(np.percentile(d, [1, 7], axis=(0, 3), - keepdims=True).shape, (2, 1, 5, 7, 1)) - - def test_out(self): - o = np.zeros((4,)) - d = np.ones((3, 4)) - assert_equal(np.percentile(d, 0, 0, out=o), o) - assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o) - o = np.zeros((3,)) - assert_equal(np.percentile(d, 1, 1, out=o), o) - assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o) - - o = np.zeros(()) - assert_equal(np.percentile(d, 2, out=o), o) - assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o) - - def test_out_nan(self): - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', '', RuntimeWarning) - o = np.zeros((4,)) - d = np.ones((3, 4)) - d[2, 1] = np.nan - assert_equal(np.percentile(d, 0, 0, out=o), o) - assert_equal( - np.percentile(d, 0, 0, interpolation='nearest', out=o), o) - o = np.zeros((3,)) - assert_equal(np.percentile(d, 1, 1, out=o), o) - assert_equal( - np.percentile(d, 1, 1, interpolation='nearest', out=o), o) - o = np.zeros(()) - assert_equal(np.percentile(d, 1, out=o), o) - assert_equal( - np.percentile(d, 1, interpolation='nearest', out=o), o) - - def test_nan_behavior(self): - a = np.arange(24, dtype=float) - a[2] = np.nan - assert_equal(np.percentile(a, 0.3), np.nan) - assert_equal(np.percentile(a, 0.3, axis=0), np.nan) - assert_equal(np.percentile(a, [0.3, 0.6], axis=0), - np.array([np.nan] * 2)) - - a = np.arange(24, dtype=float).reshape(2, 3, 4) - a[1, 2, 3] = np.nan - a[1, 1, 2] = np.nan - - # no axis - assert_equal(np.percentile(a, 0.3), np.nan) - assert_equal(np.percentile(a, 0.3).ndim, 0) - - # axis0 zerod - b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) - b[2, 3] = np.nan - b[1, 2] = np.nan - assert_equal(np.percentile(a, 0.3, 0), b) - - # axis0 not zerod - b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), - [0.3, 0.6], 0) - b[:, 2, 3] = np.nan - b[:, 1, 2] = np.nan - assert_equal(np.percentile(a, [0.3, 0.6], 0), b) - - # axis1 zerod - b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) - b[1, 3] = np.nan - b[1, 2] = np.nan - assert_equal(np.percentile(a, 0.3, 1), b) - # axis1 not zerod - b = np.percentile( - np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) - b[:, 1, 3] = np.nan - b[:, 1, 2] = np.nan - assert_equal(np.percentile(a, [0.3, 0.6], 1), b) - - # axis02 zerod - b = np.percentile( - np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) - b[1] = np.nan - b[2] = np.nan - assert_equal(np.percentile(a, 0.3, (0, 2)), b) - # axis02 not zerod - b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), - [0.3, 0.6], (0, 2)) - b[:, 1] = np.nan - b[:, 2] = np.nan - assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) - # axis02 not zerod with nearest interpolation - b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), - [0.3, 0.6], (0, 2), interpolation='nearest') - b[:, 1] = np.nan - b[:, 2] = np.nan - assert_equal(np.percentile( - a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) - - -class TestQuantile(object): - # most of this is already tested by TestPercentile - - def test_basic(self): - x = np.arange(8) * 0.5 - assert_equal(np.quantile(x, 0), 0.) - assert_equal(np.quantile(x, 1), 3.5) - assert_equal(np.quantile(x, 0.5), 1.75) - - def test_fraction(self): - # fractional input, integral quantile - x = [Fraction(i, 2) for i in range(8)] - - q = np.quantile(x, 0) - assert_equal(q, 0) - assert_equal(type(q), Fraction) - - q = np.quantile(x, 1) - assert_equal(q, Fraction(7, 2)) - assert_equal(type(q), Fraction) - - q = np.quantile(x, Fraction(1, 2)) - assert_equal(q, Fraction(7, 4)) - assert_equal(type(q), Fraction) - - # repeat with integral input but fractional quantile - x = np.arange(8) - assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) - - def test_no_p_overwrite(self): - # this is worth retesting, because quantile does not make a copy - p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) - p = p0.copy() - np.quantile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, p0) - - p0 = p0.tolist() - p = p.tolist() - np.quantile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, p0) - - -class TestMedian(object): - - def test_basic(self): - a0 = np.array(1) - a1 = np.arange(2) - a2 = np.arange(6).reshape(2, 3) - assert_equal(np.median(a0), 1) - assert_allclose(np.median(a1), 0.5) - assert_allclose(np.median(a2), 2.5) - assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) - assert_equal(np.median(a2, axis=1), [1, 4]) - assert_allclose(np.median(a2, axis=None), 2.5) - - a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) - assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) - a = np.array([0.0463301, 0.0444502, 0.141249]) - assert_equal(a[0], np.median(a)) - a = np.array([0.0444502, 0.141249, 0.0463301]) - assert_equal(a[-1], np.median(a)) - # check array scalar result - assert_equal(np.median(a).ndim, 0) - a[1] = np.nan - assert_equal(np.median(a).ndim, 0) - - def test_axis_keyword(self): - a3 = np.array([[2, 3], - [0, 1], - [6, 7], - [4, 5]]) - for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: - orig = a.copy() - np.median(a, axis=None) - for ax in range(a.ndim): - np.median(a, axis=ax) - assert_array_equal(a, orig) - - assert_allclose(np.median(a3, axis=0), [3, 4]) - assert_allclose(np.median(a3.T, axis=1), [3, 4]) - assert_allclose(np.median(a3), 3.5) - assert_allclose(np.median(a3, axis=None), 3.5) - assert_allclose(np.median(a3.T), 3.5) - - def test_overwrite_keyword(self): - a3 = np.array([[2, 3], - [0, 1], - [6, 7], - [4, 5]]) - a0 = np.array(1) - a1 = np.arange(2) - a2 = np.arange(6).reshape(2, 3) - assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) - assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) - assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) - assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), - [1.5, 2.5, 3.5]) - assert_allclose( - np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) - assert_allclose( - np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) - assert_allclose( - np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) - assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), - [3, 4]) - - a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) - np.random.shuffle(a4.ravel()) - assert_allclose(np.median(a4, axis=None), - np.median(a4.copy(), axis=None, overwrite_input=True)) - assert_allclose(np.median(a4, axis=0), - np.median(a4.copy(), axis=0, overwrite_input=True)) - assert_allclose(np.median(a4, axis=1), - np.median(a4.copy(), axis=1, overwrite_input=True)) - assert_allclose(np.median(a4, axis=2), - np.median(a4.copy(), axis=2, overwrite_input=True)) - - def test_array_like(self): - x = [1, 2, 3] - assert_almost_equal(np.median(x), 2) - x2 = [x] - assert_almost_equal(np.median(x2), 2) - assert_allclose(np.median(x2, axis=0), x) - - def test_subclass(self): - # gh-3846 - class MySubClass(np.ndarray): - - def __new__(cls, input_array, info=None): - obj = np.asarray(input_array).view(cls) - obj.info = info - return obj - - def mean(self, axis=None, dtype=None, out=None): - return -7 - - a = MySubClass([1, 2, 3]) - assert_equal(np.median(a), -7) - - def test_out(self): - o = np.zeros((4,)) - d = np.ones((3, 4)) - assert_equal(np.median(d, 0, out=o), o) - o = np.zeros((3,)) - assert_equal(np.median(d, 1, out=o), o) - o = np.zeros(()) - assert_equal(np.median(d, out=o), o) - - def test_out_nan(self): - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', '', RuntimeWarning) - o = np.zeros((4,)) - d = np.ones((3, 4)) - d[2, 1] = np.nan - assert_equal(np.median(d, 0, out=o), o) - o = np.zeros((3,)) - assert_equal(np.median(d, 1, out=o), o) - o = np.zeros(()) - assert_equal(np.median(d, out=o), o) - - def test_nan_behavior(self): - a = np.arange(24, dtype=float) - a[2] = np.nan - assert_equal(np.median(a), np.nan) - assert_equal(np.median(a, axis=0), np.nan) - - a = np.arange(24, dtype=float).reshape(2, 3, 4) - a[1, 2, 3] = np.nan - a[1, 1, 2] = np.nan - - # no axis - assert_equal(np.median(a), np.nan) - assert_equal(np.median(a).ndim, 0) - - # axis0 - b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) - b[2, 3] = np.nan - b[1, 2] = np.nan - assert_equal(np.median(a, 0), b) - - # axis1 - b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) - b[1, 3] = np.nan - b[1, 2] = np.nan - assert_equal(np.median(a, 1), b) - - # axis02 - b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) - b[1] = np.nan - b[2] = np.nan - assert_equal(np.median(a, (0, 2)), b) - - def test_empty(self): - # mean(empty array) emits two warnings: empty slice and divide by 0 - a = np.array([], dtype=float) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a), np.nan) - assert_(w[0].category is RuntimeWarning) - assert_equal(len(w), 2) - - # multiple dimensions - a = np.array([], dtype=float, ndmin=3) - # no axis - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a), np.nan) - assert_(w[0].category is RuntimeWarning) - - # axis 0 and 1 - b = np.array([], dtype=float, ndmin=2) - assert_equal(np.median(a, axis=0), b) - assert_equal(np.median(a, axis=1), b) - - # axis 2 - b = np.array(np.nan, dtype=float, ndmin=2) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) - - def test_object(self): - o = np.arange(7.) - assert_(type(np.median(o.astype(object))), float) - o[2] = np.nan - assert_(type(np.median(o.astype(object))), float) - - def test_extended_axis(self): - o = np.random.normal(size=(71, 23)) - x = np.dstack([o] * 10) - assert_equal(np.median(x, axis=(0, 1)), np.median(o)) - x = np.moveaxis(x, -1, 0) - assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) - x = x.swapaxes(0, 1).copy() - assert_equal(np.median(x, axis=(0, -1)), np.median(o)) - - assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) - assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) - assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) - - d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) - np.random.shuffle(d.ravel()) - assert_equal(np.median(d, axis=(0, 1, 2))[0], - np.median(d[:,:,:, 0].flatten())) - assert_equal(np.median(d, axis=(0, 1, 3))[1], - np.median(d[:,:, 1,:].flatten())) - assert_equal(np.median(d, axis=(3, 1, -4))[2], - np.median(d[:,:, 2,:].flatten())) - assert_equal(np.median(d, axis=(3, 1, 2))[2], - np.median(d[2,:,:,:].flatten())) - assert_equal(np.median(d, axis=(3, 2))[2, 1], - np.median(d[2, 1,:,:].flatten())) - assert_equal(np.median(d, axis=(1, -2))[2, 1], - np.median(d[2,:,:, 1].flatten())) - assert_equal(np.median(d, axis=(1, 3))[2, 2], - np.median(d[2,:, 2,:].flatten())) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(np.AxisError, np.median, d, axis=-5) - assert_raises(np.AxisError, np.median, d, axis=(0, -5)) - assert_raises(np.AxisError, np.median, d, axis=4) - assert_raises(np.AxisError, np.median, d, axis=(0, 4)) - assert_raises(ValueError, np.median, d, axis=(1, 1)) - - def test_keepdims(self): - d = np.ones((3, 5, 7, 11)) - assert_equal(np.median(d, axis=None, keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, - (1, 1, 7, 11)) - assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, - (1, 5, 7, 1)) - assert_equal(np.median(d, axis=(1,), keepdims=True).shape, - (3, 1, 7, 11)) - assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, - (1, 1, 7, 1)) - - -class TestAdd_newdoc_ufunc(object): - - def test_ufunc_arg(self): - assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") - assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") - - def test_string_arg(self): - assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) - - -class TestAdd_newdoc(object): - - @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") - def test_add_doc(self): - # test np.add_newdoc - tgt = "Current flat index into the array." - assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) - assert_(len(np.core.ufunc.identity.__doc__) > 300) - assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) - -class TestSortComplex(object): - - @pytest.mark.parametrize("type_in, type_out", [ - ('l', 'D'), - ('h', 'F'), - ('H', 'F'), - ('b', 'F'), - ('B', 'F'), - ('g', 'G'), - ]) - def test_sort_real(self, type_in, type_out): - # sort_complex() type casting for real input types - a = np.array([5, 3, 6, 2, 1], dtype=type_in) - actual = np.sort_complex(a) - expected = np.sort(a).astype(type_out) - assert_equal(actual, expected) - assert_equal(actual.dtype, expected.dtype) - - def test_sort_complex(self): - # sort_complex() handling of complex input - a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') - expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') - actual = np.sort_complex(a) - assert_equal(actual, expected) - assert_equal(actual.dtype, expected.dtype) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_histograms.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_histograms.py deleted file mode 100644 index dbf189f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_histograms.py +++ /dev/null @@ -1,840 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np - -from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose, - assert_array_max_ulp, assert_raises_regex, suppress_warnings, - ) -import pytest - - -class TestHistogram(object): - - def setup(self): - pass - - def teardown(self): - pass - - def test_simple(self): - n = 100 - v = np.random.rand(n) - (a, b) = histogram(v) - # check if the sum of the bins equals the number of samples - assert_equal(np.sum(a, axis=0), n) - # check that the bin counts are evenly spaced when the data is from - # a linear function - (a, b) = histogram(np.linspace(0, 10, 100)) - assert_array_equal(a, 10) - - def test_one_bin(self): - # Ticket 632 - hist, edges = histogram([1, 2, 3, 4], [1, 2]) - assert_array_equal(hist, [2, ]) - assert_array_equal(edges, [1, 2]) - assert_raises(ValueError, histogram, [1, 2], bins=0) - h, e = histogram([1, 2], bins=1) - assert_equal(h, np.array([2])) - assert_allclose(e, np.array([1., 2.])) - - def test_normed(self): - sup = suppress_warnings() - with sup: - rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*') - # Check that the integral of the density equals 1. - n = 100 - v = np.random.rand(n) - a, b = histogram(v, normed=True) - area = np.sum(a * np.diff(b)) - assert_almost_equal(area, 1) - assert_equal(len(rec), 1) - - sup = suppress_warnings() - with sup: - rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*') - # Check with non-constant bin widths (buggy but backwards - # compatible) - v = np.arange(10) - bins = [0, 1, 5, 9, 10] - a, b = histogram(v, bins, normed=True) - area = np.sum(a * np.diff(b)) - assert_almost_equal(area, 1) - assert_equal(len(rec), 1) - - def test_density(self): - # Check that the integral of the density equals 1. - n = 100 - v = np.random.rand(n) - a, b = histogram(v, density=True) - area = np.sum(a * np.diff(b)) - assert_almost_equal(area, 1) - - # Check with non-constant bin widths - v = np.arange(10) - bins = [0, 1, 3, 6, 10] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, .1) - assert_equal(np.sum(a * np.diff(b)), 1) - - # Test that passing False works too - a, b = histogram(v, bins, density=False) - assert_array_equal(a, [1, 2, 3, 4]) - - # Variale bin widths are especially useful to deal with - # infinities. - v = np.arange(10) - bins = [0, 1, 3, 6, np.inf] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, [.1, .1, .1, 0.]) - - # Taken from a bug report from N. Becker on the numpy-discussion - # mailing list Aug. 6, 2010. - counts, dmy = np.histogram( - [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) - assert_equal(counts, [.25, 0]) - - def test_outliers(self): - # Check that outliers are not tallied - a = np.arange(10) + .5 - - # Lower outliers - h, b = histogram(a, range=[0, 9]) - assert_equal(h.sum(), 9) - - # Upper outliers - h, b = histogram(a, range=[1, 10]) - assert_equal(h.sum(), 9) - - # Normalization - h, b = histogram(a, range=[1, 9], density=True) - assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) - - # Weights - w = np.arange(10) + .5 - h, b = histogram(a, range=[1, 9], weights=w, density=True) - assert_equal((h * np.diff(b)).sum(), 1) - - h, b = histogram(a, bins=8, range=[1, 9], weights=w) - assert_equal(h, w[1:-1]) - - def test_arr_weights_mismatch(self): - a = np.arange(10) + .5 - w = np.arange(11) + .5 - with assert_raises_regex(ValueError, "same shape as"): - h, b = histogram(a, range=[1, 9], weights=w, density=True) - - - def test_type(self): - # Check the type of the returned histogram - a = np.arange(10) + .5 - h, b = histogram(a) - assert_(np.issubdtype(h.dtype, np.integer)) - - h, b = histogram(a, density=True) - assert_(np.issubdtype(h.dtype, np.floating)) - - h, b = histogram(a, weights=np.ones(10, int)) - assert_(np.issubdtype(h.dtype, np.integer)) - - h, b = histogram(a, weights=np.ones(10, float)) - assert_(np.issubdtype(h.dtype, np.floating)) - - def test_f32_rounding(self): - # gh-4799, check that the rounding of the edges works with float32 - x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) - y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) - counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) - assert_equal(counts_hist.sum(), 3.) - - def test_bool_conversion(self): - # gh-12107 - # Reference integer histogram - a = np.array([1, 1, 0], dtype=np.uint8) - int_hist, int_edges = np.histogram(a) - - # Should raise an warning on booleans - # Ensure that the histograms are equivalent, need to suppress - # the warnings to get the actual outputs - with suppress_warnings() as sup: - rec = sup.record(RuntimeWarning, 'Converting input from .*') - hist, edges = np.histogram([True, True, False]) - # A warning should be issued - assert_equal(len(rec), 1) - assert_array_equal(hist, int_hist) - assert_array_equal(edges, int_edges) - - def test_weights(self): - v = np.random.rand(100) - w = np.ones(100) * 5 - a, b = histogram(v) - na, nb = histogram(v, density=True) - wa, wb = histogram(v, weights=w) - nwa, nwb = histogram(v, weights=w, density=True) - assert_array_almost_equal(a * 5, wa) - assert_array_almost_equal(na, nwa) - - # Check weights are properly applied. - v = np.linspace(0, 10, 10) - w = np.concatenate((np.zeros(5), np.ones(5))) - wa, wb = histogram(v, bins=np.arange(11), weights=w) - assert_array_almost_equal(wa, w) - - # Check with integer weights - wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) - assert_array_equal(wa, [4, 5, 0, 1]) - wa, wb = histogram( - [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) - assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) - - # Check weights with non-uniform bin widths - a, b = histogram( - np.arange(9), [0, 1, 3, 6, 10], - weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) - assert_almost_equal(a, [.2, .1, .1, .075]) - - def test_exotic_weights(self): - - # Test the use of weights that are not integer or floats, but e.g. - # complex numbers or object types. - - # Complex weights - values = np.array([1.3, 2.5, 2.3]) - weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) - - # Check with custom bins - wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) - assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) - - # Check with even bins - wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) - assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) - - # Decimal weights - from decimal import Decimal - values = np.array([1.3, 2.5, 2.3]) - weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) - - # Check with custom bins - wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) - assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) - - # Check with even bins - wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) - assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) - - def test_no_side_effects(self): - # This is a regression test that ensures that values passed to - # ``histogram`` are unchanged. - values = np.array([1.3, 2.5, 2.3]) - np.histogram(values, range=[-10, 10], bins=100) - assert_array_almost_equal(values, [1.3, 2.5, 2.3]) - - def test_empty(self): - a, b = histogram([], bins=([0, 1])) - assert_array_equal(a, np.array([0])) - assert_array_equal(b, np.array([0, 1])) - - def test_error_binnum_type (self): - # Tests if right Error is raised if bins argument is float - vals = np.linspace(0.0, 1.0, num=100) - histogram(vals, 5) - assert_raises(TypeError, histogram, vals, 2.4) - - def test_finite_range(self): - # Normal ranges should be fine - vals = np.linspace(0.0, 1.0, num=100) - histogram(vals, range=[0.25,0.75]) - assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) - assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) - - def test_invalid_range(self): - # start of range must be < end of range - vals = np.linspace(0.0, 1.0, num=100) - with assert_raises_regex(ValueError, "max must be larger than"): - np.histogram(vals, range=[0.1, 0.01]) - - def test_bin_edge_cases(self): - # Ensure that floating-point computations correctly place edge cases. - arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) - hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) - mask = hist > 0 - left_edges = edges[:-1][mask] - right_edges = edges[1:][mask] - for x, left, right in zip(arr, left_edges, right_edges): - assert_(x >= left) - assert_(x < right) - - def test_last_bin_inclusive_range(self): - arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) - hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) - assert_equal(hist[-1], 1) - - def test_bin_array_dims(self): - # gracefully handle bins object > 1 dimension - vals = np.linspace(0.0, 1.0, num=100) - bins = np.array([[0, 0.5], [0.6, 1.0]]) - with assert_raises_regex(ValueError, "must be 1d"): - np.histogram(vals, bins=bins) - - def test_unsigned_monotonicity_check(self): - # Ensures ValueError is raised if bins not increasing monotonically - # when bins contain unsigned values (see #9222) - arr = np.array([2]) - bins = np.array([1, 3, 1], dtype='uint64') - with assert_raises(ValueError): - hist, edges = np.histogram(arr, bins=bins) - - def test_object_array_of_0d(self): - # gh-7864 - assert_raises(ValueError, - histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) - assert_raises(ValueError, - histogram, [np.array(0.4) for i in range(10)] + [np.inf]) - - # these should not crash - np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) - np.histogram([np.array(0.5) for i in range(10)] + [.5]) - - def test_some_nan_values(self): - # gh-7503 - one_nan = np.array([0, 1, np.nan]) - all_nan = np.array([np.nan, np.nan]) - - # the internal comparisons with NaN give warnings - sup = suppress_warnings() - sup.filter(RuntimeWarning) - with sup: - # can't infer range with nan - assert_raises(ValueError, histogram, one_nan, bins='auto') - assert_raises(ValueError, histogram, all_nan, bins='auto') - - # explicit range solves the problem - h, b = histogram(one_nan, bins='auto', range=(0, 1)) - assert_equal(h.sum(), 2) # nan is not counted - h, b = histogram(all_nan, bins='auto', range=(0, 1)) - assert_equal(h.sum(), 0) # nan is not counted - - # as does an explicit set of bins - h, b = histogram(one_nan, bins=[0, 1]) - assert_equal(h.sum(), 2) # nan is not counted - h, b = histogram(all_nan, bins=[0, 1]) - assert_equal(h.sum(), 0) # nan is not counted - - def test_datetime(self): - begin = np.datetime64('2000-01-01', 'D') - offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) - bins = np.array([0, 2, 7, 20]) - dates = begin + offsets - date_bins = begin + bins - - td = np.dtype('timedelta64[D]') - - # Results should be the same for integer offsets or datetime values. - # For now, only explicit bins are supported, since linspace does not - # work on datetimes or timedeltas - d_count, d_edge = histogram(dates, bins=date_bins) - t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) - i_count, i_edge = histogram(offsets, bins=bins) - - assert_equal(d_count, i_count) - assert_equal(t_count, i_count) - - assert_equal((d_edge - begin).astype(int), i_edge) - assert_equal(t_edge.astype(int), i_edge) - - assert_equal(d_edge.dtype, dates.dtype) - assert_equal(t_edge.dtype, td) - - def do_signed_overflow_bounds(self, dtype): - exponent = 8 * np.dtype(dtype).itemsize - 1 - arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) - hist, e = histogram(arr, bins=2) - assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) - assert_equal(hist, [1, 1]) - - def test_signed_overflow_bounds(self): - self.do_signed_overflow_bounds(np.byte) - self.do_signed_overflow_bounds(np.short) - self.do_signed_overflow_bounds(np.intc) - self.do_signed_overflow_bounds(np.int_) - self.do_signed_overflow_bounds(np.longlong) - - def do_precision_lower_bound(self, float_small, float_large): - eps = np.finfo(float_large).eps - - arr = np.array([1.0], float_small) - range = np.array([1.0 + eps, 2.0], float_large) - - # test is looking for behavior when the bounds change between dtypes - if range.astype(float_small)[0] != 1: - return - - # previously crashed - count, x_loc = np.histogram(arr, bins=1, range=range) - assert_equal(count, [1]) - - # gh-10322 means that the type comes from arr - this may change - assert_equal(x_loc.dtype, float_small) - - def do_precision_upper_bound(self, float_small, float_large): - eps = np.finfo(float_large).eps - - arr = np.array([1.0], float_small) - range = np.array([0.0, 1.0 - eps], float_large) - - # test is looking for behavior when the bounds change between dtypes - if range.astype(float_small)[-1] != 1: - return - - # previously crashed - count, x_loc = np.histogram(arr, bins=1, range=range) - assert_equal(count, [1]) - - # gh-10322 means that the type comes from arr - this may change - assert_equal(x_loc.dtype, float_small) - - def do_precision(self, float_small, float_large): - self.do_precision_lower_bound(float_small, float_large) - self.do_precision_upper_bound(float_small, float_large) - - def test_precision(self): - # not looping results in a useful stack trace upon failure - self.do_precision(np.half, np.single) - self.do_precision(np.half, np.double) - self.do_precision(np.half, np.longdouble) - self.do_precision(np.single, np.double) - self.do_precision(np.single, np.longdouble) - self.do_precision(np.double, np.longdouble) - - def test_histogram_bin_edges(self): - hist, e = histogram([1, 2, 3, 4], [1, 2]) - edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) - assert_array_equal(edges, e) - - arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) - hist, e = histogram(arr, bins=30, range=(-0.5, 5)) - edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) - assert_array_equal(edges, e) - - hist, e = histogram(arr, bins='auto', range=(0, 1)) - edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) - assert_array_equal(edges, e) - - -class TestHistogramOptimBinNums(object): - """ - Provide test coverage when using provided estimators for optimal number of - bins - """ - - def test_empty(self): - estimator_list = ['fd', 'scott', 'rice', 'sturges', - 'doane', 'sqrt', 'auto', 'stone'] - # check it can deal with empty data - for estimator in estimator_list: - a, b = histogram([], bins=estimator) - assert_array_equal(a, np.array([0])) - assert_array_equal(b, np.array([0, 1])) - - def test_simple(self): - """ - Straightforward testing with a mixture of linspace data (for - consistency). All test values have been precomputed and the values - shouldn't change - """ - # Some basic sanity checking, with some fixed data. - # Checking for the correct number of bins - basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, - 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, - 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, - 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, - 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, - 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} - - for testlen, expectedResults in basic_test.items(): - # Create some sort of non uniform data to test with - # (2 peak uniform mixture) - x1 = np.linspace(-10, -1, testlen // 5 * 2) - x2 = np.linspace(1, 10, testlen // 5 * 3) - x = np.concatenate((x1, x2)) - for estimator, numbins in expectedResults.items(): - a, b = np.histogram(x, estimator) - assert_equal(len(a), numbins, err_msg="For the {0} estimator " - "with datasize of {1}".format(estimator, testlen)) - - def test_small(self): - """ - Smaller datasets have the potential to cause issues with the data - adaptive methods, especially the FD method. All bin numbers have been - precalculated. - """ - small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, - 'doane': 1, 'sqrt': 1, 'stone': 1}, - 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, - 'doane': 1, 'sqrt': 2, 'stone': 1}, - 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, - 'doane': 3, 'sqrt': 2, 'stone': 1}} - - for testlen, expectedResults in small_dat.items(): - testdat = np.arange(testlen) - for estimator, expbins in expectedResults.items(): - a, b = np.histogram(testdat, estimator) - assert_equal(len(a), expbins, err_msg="For the {0} estimator " - "with datasize of {1}".format(estimator, testlen)) - - def test_incorrect_methods(self): - """ - Check a Value Error is thrown when an unknown string is passed in - """ - check_list = ['mad', 'freeman', 'histograms', 'IQR'] - for estimator in check_list: - assert_raises(ValueError, histogram, [1, 2, 3], estimator) - - def test_novariance(self): - """ - Check that methods handle no variance in data - Primarily for Scott and FD as the SD and IQR are both 0 in this case - """ - novar_dataset = np.ones(100) - novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, - 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} - - for estimator, numbins in novar_resultdict.items(): - a, b = np.histogram(novar_dataset, estimator) - assert_equal(len(a), numbins, err_msg="{0} estimator, " - "No Variance test".format(estimator)) - - def test_limited_variance(self): - """ - Check when IQR is 0, but variance exists, we return the sturges value - and not the fd value. - """ - lim_var_data = np.ones(1000) - lim_var_data[:3] = 0 - lim_var_data[-4:] = 100 - - edges_auto = histogram_bin_edges(lim_var_data, 'auto') - assert_equal(edges_auto, np.linspace(0, 100, 12)) - - edges_fd = histogram_bin_edges(lim_var_data, 'fd') - assert_equal(edges_fd, np.array([0, 100])) - - edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') - assert_equal(edges_sturges, np.linspace(0, 100, 12)) - - def test_outlier(self): - """ - Check the FD, Scott and Doane with outliers. - - The FD estimates a smaller binwidth since it's less affected by - outliers. Since the range is so (artificially) large, this means more - bins, most of which will be empty, but the data of interest usually is - unaffected. The Scott estimator is more affected and returns fewer bins, - despite most of the variance being in one area of the data. The Doane - estimator lies somewhere between the other two. - """ - xcenter = np.linspace(-10, 10, 50) - outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) - - outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} - - for estimator, numbins in outlier_resultdict.items(): - a, b = np.histogram(outlier_dataset, estimator) - assert_equal(len(a), numbins) - - def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" - - def nbins_ratio(seed, size): - rng = np.random.RandomState(seed) - x = rng.normal(loc=0, scale=2, size=size) - a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) - return a / (a + b) - - ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] - for seed in range(10)] - - # the average difference between the two methods decreases as the dataset size increases. - avg = abs(np.mean(ll, axis=0) - 0.5) - assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) - - def test_simple_range(self): - """ - Straightforward testing with a mixture of linspace data (for - consistency). Adding in a 3rd mixture that will then be - completely ignored. All test values have been precomputed and - the shouldn't change. - """ - # some basic sanity checking, with some fixed data. - # Checking for the correct number of bins - basic_test = { - 50: {'fd': 8, 'scott': 8, 'rice': 15, - 'sturges': 14, 'auto': 14, 'stone': 8}, - 500: {'fd': 15, 'scott': 16, 'rice': 32, - 'sturges': 20, 'auto': 20, 'stone': 80}, - 5000: {'fd': 33, 'scott': 33, 'rice': 69, - 'sturges': 27, 'auto': 33, 'stone': 80} - } - - for testlen, expectedResults in basic_test.items(): - # create some sort of non uniform data to test with - # (3 peak uniform mixture) - x1 = np.linspace(-10, -1, testlen // 5 * 2) - x2 = np.linspace(1, 10, testlen // 5 * 3) - x3 = np.linspace(-100, -50, testlen) - x = np.hstack((x1, x2, x3)) - for estimator, numbins in expectedResults.items(): - a, b = np.histogram(x, estimator, range = (-20, 20)) - msg = "For the {0} estimator".format(estimator) - msg += " with datasize of {0}".format(testlen) - assert_equal(len(a), numbins, err_msg=msg) - - @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', - 'stone', 'rice', 'sturges']) - def test_signed_integer_data(self, bins): - # Regression test for gh-14379. - a = np.array([-2, 0, 127], dtype=np.int8) - hist, edges = np.histogram(a, bins=bins) - hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) - assert_array_equal(hist, hist32) - assert_array_equal(edges, edges32) - - def test_simple_weighted(self): - """ - Check that weighted data raises a TypeError - """ - estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] - for estimator in estimator_list: - assert_raises(TypeError, histogram, [1, 2, 3], - estimator, weights=[1, 2, 3]) - - -class TestHistogramdd(object): - - def test_simple(self): - x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], - [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) - H, edges = histogramdd(x, (2, 3, 3), - range=[[-1, 1], [0, 3], [0, 3]]) - answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], - [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) - assert_array_equal(H, answer) - - # Check normalization - ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] - H, edges = histogramdd(x, bins=ed, density=True) - assert_(np.all(H == answer / 12.)) - - # Check that H has the correct shape. - H, edges = histogramdd(x, (2, 3, 4), - range=[[-1, 1], [0, 3], [0, 4]], - density=True) - answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], - [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) - assert_array_almost_equal(H, answer / 6., 4) - # Check that a sequence of arrays is accepted and H has the correct - # shape. - z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] - H, edges = histogramdd( - z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) - answer = np.array([[[0, 0], [0, 0], [0, 0]], - [[0, 1], [0, 0], [1, 0]], - [[0, 1], [0, 0], [0, 0]], - [[0, 0], [0, 0], [0, 0]]]) - assert_array_equal(H, answer) - - Z = np.zeros((5, 5, 5)) - Z[list(range(5)), list(range(5)), list(range(5))] = 1. - H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) - assert_array_equal(H, Z) - - def test_shape_3d(self): - # All possible permutations for bins of different lengths in 3D. - bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), - (4, 5, 6)) - r = np.random.rand(10, 3) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_shape_4d(self): - # All possible permutations for bins of different lengths in 4D. - bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), - (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), - (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), - (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), - (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), - (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) - - r = np.random.rand(10, 4) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_weights(self): - v = np.random.rand(100, 2) - hist, edges = histogramdd(v) - n_hist, edges = histogramdd(v, density=True) - w_hist, edges = histogramdd(v, weights=np.ones(100)) - assert_array_equal(w_hist, hist) - w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) - assert_array_equal(w_hist, n_hist) - w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) - assert_array_equal(w_hist, 2 * hist) - - def test_identical_samples(self): - x = np.zeros((10, 2), int) - hist, edges = histogramdd(x, bins=2) - assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) - - def test_empty(self): - a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) - assert_array_max_ulp(a, np.array([[0.]])) - a, b = np.histogramdd([[], [], []], bins=2) - assert_array_max_ulp(a, np.zeros((2, 2, 2))) - - def test_bins_errors(self): - # There are two ways to specify bins. Check for the right errors - # when mixing those. - x = np.arange(8).reshape(2, 4) - assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) - assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) - assert_raises( - ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) - assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) - - def test_inf_edges(self): - # Test using +/-inf bin edges works. See #1788. - with np.errstate(invalid='ignore'): - x = np.arange(6).reshape(3, 2) - expected = np.array([[1, 0], [0, 1], [0, 1]]) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) - assert_allclose(h, expected) - - def test_rightmost_binedge(self): - # Test event very close to rightmost binedge. See Github issue #4266 - x = [0.9999999995] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0000000001] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 0.0) - x = [1.0001] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 0.0) - - def test_finite_range(self): - vals = np.random.random((100, 3)) - histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) - assert_raises(ValueError, histogramdd, vals, - range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) - assert_raises(ValueError, histogramdd, vals, - range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) - - def test_equal_edges(self): - """ Test that adjacent entries in an edge array can be equal """ - x = np.array([0, 1, 2]) - y = np.array([0, 1, 2]) - x_edges = np.array([0, 2, 2]) - y_edges = 1 - hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) - - hist_expected = np.array([ - [2.], - [1.], # x == 2 falls in the final bin - ]) - assert_equal(hist, hist_expected) - - def test_edge_dtype(self): - """ Test that if an edge array is input, its type is preserved """ - x = np.array([0, 10, 20]) - y = x / 10 - x_edges = np.array([0, 5, 15, 20]) - y_edges = x_edges / 10 - hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) - - assert_equal(edges[0].dtype, x_edges.dtype) - assert_equal(edges[1].dtype, y_edges.dtype) - - def test_large_integers(self): - big = 2**60 # Too large to represent with a full precision float - - x = np.array([0], np.int64) - x_edges = np.array([-1, +1], np.int64) - y = big + x - y_edges = big + x_edges - - hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) - - assert_equal(hist[0, 0], 1) - - def test_density_non_uniform_2d(self): - # Defines the following grid: - # - # 0 2 8 - # 0+-+-----+ - # + | + - # + | + - # 6+-+-----+ - # 8+-+-----+ - x_edges = np.array([0, 2, 8]) - y_edges = np.array([0, 6, 8]) - relative_areas = np.array([ - [3, 9], - [1, 3]]) - - # ensure the number of points in each region is proportional to its area - x = np.array([1] + [1]*3 + [7]*3 + [7]*9) - y = np.array([7] + [1]*3 + [7]*3 + [1]*9) - - # sanity check that the above worked as intended - hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) - assert_equal(hist, relative_areas) - - # resulting histogram should be uniform, since counts and areas are proportional - hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) - assert_equal(hist, 1 / (8*8)) - - def test_density_non_uniform_1d(self): - # compare to histogram to show the results are the same - v = np.arange(10) - bins = np.array([0, 1, 3, 6, 10]) - hist, edges = histogram(v, bins, density=True) - hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) - assert_equal(hist, hist_dd) - assert_equal(edges, edges_dd[0]) - - def test_density_via_normed(self): - # normed should simply alias to density argument - v = np.arange(10) - bins = np.array([0, 1, 3, 6, 10]) - hist, edges = histogram(v, bins, density=True) - hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True) - assert_equal(hist, hist_dd) - assert_equal(edges, edges_dd[0]) - - def test_density_normed_redundancy(self): - v = np.arange(10) - bins = np.array([0, 1, 3, 6, 10]) - with assert_raises_regex(TypeError, "Cannot specify both"): - hist_dd, edges_dd = histogramdd((v,), (bins,), - density=True, - normed=True) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_index_tricks.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_index_tricks.py deleted file mode 100644 index dbe445c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_index_tricks.py +++ /dev/null @@ -1,500 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_raises_regex, - assert_warns - ) -from numpy.lib.index_tricks import ( - mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, - index_exp, ndindex, r_, s_, ix_ - ) - - -class TestRavelUnravelIndex(object): - def test_basic(self): - assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) - - # test backwards compatibility with older dims - # keyword argument; see Issue #10586 - with assert_warns(DeprecationWarning): - # we should achieve the correct result - # AND raise the appropriate warning - # when using older "dims" kw argument - assert_equal(np.unravel_index(indices=2, - dims=(2, 2)), - (1, 0)) - - # test that new shape argument works properly - assert_equal(np.unravel_index(indices=2, - shape=(2, 2)), - (1, 0)) - - # test that an invalid second keyword argument - # is properly handled - with assert_raises(TypeError): - np.unravel_index(indices=2, hape=(2, 2)) - - with assert_raises(TypeError): - np.unravel_index(2, hape=(2, 2)) - - with assert_raises(TypeError): - np.unravel_index(254, ims=(17, 94)) - - assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) - assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) - assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) - assert_raises(ValueError, np.unravel_index, -1, (2, 2)) - assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) - assert_raises(ValueError, np.unravel_index, 4, (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) - assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) - - assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) - assert_equal( - np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) - - arr = np.array([[3, 6, 6], [4, 5, 1]]) - assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) - assert_equal( - np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) - assert_equal( - np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) - assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), - [12, 13, 13]) - assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) - - assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), - [[3, 6, 6], [4, 5, 1]]) - assert_equal( - np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), - [[3, 6, 6], [4, 5, 1]]) - assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) - - def test_empty_indices(self): - msg1 = 'indices must be integral: the provided empty sequence was' - msg2 = 'only int indices permitted' - assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) - assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) - assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), - (10, 3, 5)) - assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)), - [[], [], []]) - assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), - (10, 3)) - assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), - (10, 3)) - assert_raises_regex(TypeError, msg2, np.ravel_multi_index, - (np.array([]), np.array([])), (5, 3)) - assert_equal(np.ravel_multi_index( - (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) - assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), - (5, 3)), []) - - def test_big_indices(self): - # ravel_multi_index for big indices (issue #7546) - if np.intp == np.int64: - arr = ([1, 29], [3, 5], [3, 117], [19, 2], - [2379, 1284], [2, 2], [0, 1]) - assert_equal( - np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), - [5627771580, 117259570957]) - - # test unravel_index for big indices (issue #9538) - assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1)) - - # test overflow checking for too big array (issue #7546) - dummy_arr = ([0],[0]) - half_max = np.iinfo(np.intp).max // 2 - assert_equal( - np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) - assert_raises(ValueError, - np.ravel_multi_index, dummy_arr, (half_max+1, 2)) - assert_equal( - np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) - assert_raises(ValueError, - np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') - - def test_dtypes(self): - # Test with different data types - for dtype in [np.int16, np.uint16, np.int32, - np.uint32, np.int64, np.uint64]: - coords = np.array( - [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) - shape = (5, 8) - uncoords = 8*coords[0]+coords[1] - assert_equal(np.ravel_multi_index(coords, shape), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*coords[1] - assert_equal( - np.ravel_multi_index(coords, shape, order='F'), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) - - coords = np.array( - [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], - dtype=dtype) - shape = (5, 8, 10) - uncoords = 10*(8*coords[0]+coords[1])+coords[2] - assert_equal(np.ravel_multi_index(coords, shape), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*(coords[1]+8*coords[2]) - assert_equal( - np.ravel_multi_index(coords, shape, order='F'), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) - - def test_clipmodes(self): - # Test clipmodes - assert_equal( - np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), - np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) - assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), - mode=( - 'wrap', 'raise', 'clip', 'raise')), - np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) - assert_raises( - ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) - - def test_writeability(self): - # See gh-7269 - x, y = np.unravel_index([1, 2, 3], (4, 5)) - assert_(x.flags.writeable) - assert_(y.flags.writeable) - - def test_0d(self): - # gh-580 - x = np.unravel_index(0, ()) - assert_equal(x, ()) - - assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) - assert_raises_regex( - ValueError, "out of bounds", np.unravel_index, [1], ()) - - @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) - def test_empty_array_ravel(self, mode): - res = np.ravel_multi_index( - np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) - assert(res.shape == (0,)) - - with assert_raises(ValueError): - np.ravel_multi_index( - np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) - - def test_empty_array_unravel(self): - res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) - # res is a tuple of three empty arrays - assert(len(res) == 3) - assert(all(a.shape == (0,) for a in res)) - - with assert_raises(ValueError): - np.unravel_index([1], (2, 1, 0)) - -class TestGrid(object): - def test_basic(self): - a = mgrid[-1:1:10j] - b = mgrid[-1:1:0.1] - assert_(a.shape == (10,)) - assert_(b.shape == (20,)) - assert_(a[0] == -1) - assert_almost_equal(a[-1], 1) - assert_(b[0] == -1) - assert_almost_equal(b[1]-b[0], 0.1, 11) - assert_almost_equal(b[-1], b[0]+19*0.1, 11) - assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) - - def test_linspace_equivalence(self): - y, st = np.linspace(2, 10, retstep=True) - assert_almost_equal(st, 8/49.0) - assert_array_almost_equal(y, mgrid[2:10:50j], 13) - - def test_nd(self): - c = mgrid[-1:1:10j, -2:2:10j] - d = mgrid[-1:1:0.1, -2:2:0.2] - assert_(c.shape == (2, 10, 10)) - assert_(d.shape == (2, 20, 20)) - assert_array_equal(c[0][0, :], -np.ones(10, 'd')) - assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) - assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) - assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) - assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], - 0.1*np.ones(20, 'd'), 11) - assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], - 0.2*np.ones(20, 'd'), 11) - - def test_sparse(self): - grid_full = mgrid[-1:1:10j, -2:2:10j] - grid_sparse = ogrid[-1:1:10j, -2:2:10j] - - # sparse grids can be made dense by broadcasting - grid_broadcast = np.broadcast_arrays(*grid_sparse) - for f, b in zip(grid_full, grid_broadcast): - assert_equal(f, b) - - @pytest.mark.parametrize("start, stop, step, expected", [ - (None, 10, 10j, (200, 10)), - (-10, 20, None, (1800, 30)), - ]) - def test_mgrid_size_none_handling(self, start, stop, step, expected): - # regression test None value handling for - # start and step values used by mgrid; - # internally, this aims to cover previously - # unexplored code paths in nd_grid() - grid = mgrid[start:stop:step, start:stop:step] - # need a smaller grid to explore one of the - # untested code paths - grid_small = mgrid[start:stop:step] - assert_equal(grid.size, expected[0]) - assert_equal(grid_small.size, expected[1]) - - -class TestConcatenator(object): - def test_1d(self): - assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) - b = np.ones(5) - c = r_[b, 0, 0, b] - assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) - - def test_mixed_type(self): - g = r_[10.1, 1:10] - assert_(g.dtype == 'f8') - - def test_more_mixed_type(self): - g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] - assert_(g.dtype == 'f8') - - def test_complex_step(self): - # Regression test for #12262 - g = r_[0:36:100j] - assert_(g.shape == (100,)) - - def test_2d(self): - b = np.random.rand(5, 5) - c = np.random.rand(5, 5) - d = r_['1', b, c] # append columns - assert_(d.shape == (5, 10)) - assert_array_equal(d[:, :5], b) - assert_array_equal(d[:, 5:], c) - d = r_[b, c] - assert_(d.shape == (10, 5)) - assert_array_equal(d[:5, :], b) - assert_array_equal(d[5:, :], c) - - def test_0d(self): - assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) - assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) - assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) - - -class TestNdenumerate(object): - def test_basic(self): - a = np.array([[1, 2], [3, 4]]) - assert_equal(list(ndenumerate(a)), - [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) - - -class TestIndexExpression(object): - def test_regression_1(self): - # ticket #1196 - a = np.arange(2) - assert_equal(a[:-1], a[s_[:-1]]) - assert_equal(a[:-1], a[index_exp[:-1]]) - - def test_simple_1(self): - a = np.random.rand(4, 5, 6) - - assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) - assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) - - -class TestIx_(object): - def test_regression_1(self): - # Test empty untyped inputs create outputs of indexing type, gh-5804 - a, = np.ix_(range(0)) - assert_equal(a.dtype, np.intp) - - a, = np.ix_([]) - assert_equal(a.dtype, np.intp) - - # but if the type is specified, don't change it - a, = np.ix_(np.array([], dtype=np.float32)) - assert_equal(a.dtype, np.float32) - - def test_shape_and_dtype(self): - sizes = (4, 5, 3, 2) - # Test both lists and arrays - for func in (range, np.arange): - arrays = np.ix_(*[func(sz) for sz in sizes]) - for k, (a, sz) in enumerate(zip(arrays, sizes)): - assert_equal(a.shape[k], sz) - assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) - assert_(np.issubdtype(a.dtype, np.integer)) - - def test_bool(self): - bool_a = [True, False, True, True] - int_a, = np.nonzero(bool_a) - assert_equal(np.ix_(bool_a)[0], int_a) - - def test_1d_only(self): - idx2d = [[1, 2, 3], [4, 5, 6]] - assert_raises(ValueError, np.ix_, idx2d) - - def test_repeated_input(self): - length_of_vector = 5 - x = np.arange(length_of_vector) - out = ix_(x, x) - assert_equal(out[0].shape, (length_of_vector, 1)) - assert_equal(out[1].shape, (1, length_of_vector)) - # check that input shape is not modified - assert_equal(x.shape, (length_of_vector,)) - - -def test_c_(): - a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] - assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) - - -class TestFillDiagonal(object): - def test_basic(self): - a = np.zeros((3, 3), int) - fill_diagonal(a, 5) - assert_array_equal( - a, np.array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5]]) - ) - - def test_tall_matrix(self): - a = np.zeros((10, 3), int) - fill_diagonal(a, 5) - assert_array_equal( - a, np.array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - ) - - def test_tall_matrix_wrap(self): - a = np.zeros((10, 3), int) - fill_diagonal(a, 5, True) - assert_array_equal( - a, np.array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5], - [0, 0, 0], - [5, 0, 0], - [0, 5, 0], - [0, 0, 5], - [0, 0, 0], - [5, 0, 0], - [0, 5, 0]]) - ) - - def test_wide_matrix(self): - a = np.zeros((3, 10), int) - fill_diagonal(a, 5) - assert_array_equal( - a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) - ) - - def test_operate_4d_array(self): - a = np.zeros((3, 3, 3, 3), int) - fill_diagonal(a, 4) - i = np.array([0, 1, 2]) - assert_equal(np.where(a != 0), (i, i, i, i)) - - def test_low_dim_handling(self): - # raise error with low dimensionality - a = np.zeros(3, int) - with assert_raises_regex(ValueError, "at least 2-d"): - fill_diagonal(a, 5) - - def test_hetero_shape_handling(self): - # raise error with high dimensionality and - # shape mismatch - a = np.zeros((3,3,7,3), int) - with assert_raises_regex(ValueError, "equal length"): - fill_diagonal(a, 2) - - -def test_diag_indices(): - di = diag_indices(4) - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - a[di] = 100 - assert_array_equal( - a, np.array([[100, 2, 3, 4], - [5, 100, 7, 8], - [9, 10, 100, 12], - [13, 14, 15, 100]]) - ) - - # Now, we create indices to manipulate a 3-d array: - d3 = diag_indices(2, 3) - - # And use it to set the diagonal of a zeros array to 1: - a = np.zeros((2, 2, 2), int) - a[d3] = 1 - assert_array_equal( - a, np.array([[[1, 0], - [0, 0]], - [[0, 0], - [0, 1]]]) - ) - - -class TestDiagIndicesFrom(object): - - def test_diag_indices_from(self): - x = np.random.random((4, 4)) - r, c = diag_indices_from(x) - assert_array_equal(r, np.arange(4)) - assert_array_equal(c, np.arange(4)) - - def test_error_small_input(self): - x = np.ones(7) - with assert_raises_regex(ValueError, "at least 2-d"): - diag_indices_from(x) - - def test_error_shape_mismatch(self): - x = np.zeros((3, 3, 2, 3), int) - with assert_raises_regex(ValueError, "equal length"): - diag_indices_from(x) - - -def test_ndindex(): - x = list(ndindex(1, 2, 3)) - expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] - assert_array_equal(x, expected) - - x = list(ndindex((1, 2, 3))) - assert_array_equal(x, expected) - - # Test use of scalars and tuples - x = list(ndindex((3,))) - assert_array_equal(x, list(ndindex(3))) - - # Make sure size argument is optional - x = list(ndindex()) - assert_equal(x, [()]) - - x = list(ndindex(())) - assert_equal(x, [()]) - - # Make sure 0-sized ndindex works correctly - x = list(ndindex(*[0])) - assert_equal(x, []) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_io.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_io.py deleted file mode 100644 index a095e25..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_io.py +++ /dev/null @@ -1,2596 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import gzip -import os -import threading -import time -import warnings -import io -import re -import pytest -from tempfile import NamedTemporaryFile -from io import BytesIO, StringIO -from datetime import datetime -import locale - -import numpy as np -import numpy.ma as ma -from numpy.lib._iotools import ConverterError, ConversionWarning -from numpy.compat import asbytes, bytes, Path -from numpy.ma.testutils import assert_equal -from numpy.testing import ( - assert_warns, assert_, assert_raises_regex, assert_raises, - assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY, - HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings - ) -from numpy.testing._private.utils import requires_memory - - -class TextIO(BytesIO): - """Helper IO class. - - Writes encode strings to bytes if needed, reads return bytes. - This makes it easier to emulate files opened in binary mode - without needing to explicitly convert strings to bytes in - setting up the test data. - - """ - def __init__(self, s=""): - BytesIO.__init__(self, asbytes(s)) - - def write(self, s): - BytesIO.write(self, asbytes(s)) - - def writelines(self, lines): - BytesIO.writelines(self, [asbytes(s) for s in lines]) - - -MAJVER, MINVER = sys.version_info[:2] -IS_64BIT = sys.maxsize > 2**32 -try: - import bz2 - HAS_BZ2 = True -except ImportError: - HAS_BZ2 = False -try: - import lzma - HAS_LZMA = True -except ImportError: - HAS_LZMA = False - - -def strptime(s, fmt=None): - """ - This function is available in the datetime module only from Python >= - 2.5. - - """ - if type(s) == bytes: - s = s.decode("latin1") - return datetime(*time.strptime(s, fmt)[:3]) - - -class RoundtripTest(object): - def roundtrip(self, save_func, *args, **kwargs): - """ - save_func : callable - Function used to save arrays to file. - file_on_disk : bool - If true, store the file on disk, instead of in a - string buffer. - save_kwds : dict - Parameters passed to `save_func`. - load_kwds : dict - Parameters passed to `numpy.load`. - args : tuple of arrays - Arrays stored to file. - - """ - save_kwds = kwargs.get('save_kwds', {}) - load_kwds = kwargs.get('load_kwds', {"allow_pickle": True}) - file_on_disk = kwargs.get('file_on_disk', False) - - if file_on_disk: - target_file = NamedTemporaryFile(delete=False) - load_file = target_file.name - else: - target_file = BytesIO() - load_file = target_file - - try: - arr = args - - save_func(target_file, *arr, **save_kwds) - target_file.flush() - target_file.seek(0) - - if sys.platform == 'win32' and not isinstance(target_file, BytesIO): - target_file.close() - - arr_reloaded = np.load(load_file, **load_kwds) - - self.arr = arr - self.arr_reloaded = arr_reloaded - finally: - if not isinstance(target_file, BytesIO): - target_file.close() - # holds an open file descriptor so it can't be deleted on win - if 'arr_reloaded' in locals(): - if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): - os.remove(target_file.name) - - def check_roundtrips(self, a): - self.roundtrip(a) - self.roundtrip(a, file_on_disk=True) - self.roundtrip(np.asfortranarray(a)) - self.roundtrip(np.asfortranarray(a), file_on_disk=True) - if a.shape[0] > 1: - # neither C nor Fortran contiguous for 2D arrays or more - self.roundtrip(np.asfortranarray(a)[1:]) - self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) - - def test_array(self): - a = np.array([], float) - self.check_roundtrips(a) - - a = np.array([[1, 2], [3, 4]], float) - self.check_roundtrips(a) - - a = np.array([[1, 2], [3, 4]], int) - self.check_roundtrips(a) - - a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) - self.check_roundtrips(a) - - a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) - self.check_roundtrips(a) - - def test_array_object(self): - a = np.array([], object) - self.check_roundtrips(a) - - a = np.array([[1, 2], [3, 4]], object) - self.check_roundtrips(a) - - def test_1D(self): - a = np.array([1, 2, 3, 4], int) - self.roundtrip(a) - - @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") - def test_mmap(self): - a = np.array([[1, 2.5], [4, 7.3]]) - self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) - - a = np.asfortranarray([[1, 2.5], [4, 7.3]]) - self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) - - def test_record(self): - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - self.check_roundtrips(a) - - @pytest.mark.slow - def test_format_2_0(self): - dt = [(("%d" % i) * 100, float) for i in range(500)] - a = np.ones(1000, dtype=dt) - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', '', UserWarning) - self.check_roundtrips(a) - - -class TestSaveLoad(RoundtripTest): - def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.save, *args, **kwargs) - assert_equal(self.arr[0], self.arr_reloaded) - assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) - assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) - - -class TestSavezLoad(RoundtripTest): - def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) - try: - for n, arr in enumerate(self.arr): - reloaded = self.arr_reloaded['arr_%d' % n] - assert_equal(arr, reloaded) - assert_equal(arr.dtype, reloaded.dtype) - assert_equal(arr.flags.fnc, reloaded.flags.fnc) - finally: - # delete tempfile, must be done here on windows - if self.arr_reloaded.fid: - self.arr_reloaded.fid.close() - os.remove(self.arr_reloaded.fid.name) - - @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") - @pytest.mark.slow - def test_big_arrays(self): - L = (1 << 31) + 100000 - a = np.empty(L, dtype=np.uint8) - with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: - np.savez(tmp, a=a) - del a - npfile = np.load(tmp) - a = npfile['a'] # Should succeed - npfile.close() - del a # Avoid pyflakes unused variable warning. - - def test_multiple_arrays(self): - a = np.array([[1, 2], [3, 4]], float) - b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) - self.roundtrip(a, b) - - def test_named_arrays(self): - a = np.array([[1, 2], [3, 4]], float) - b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) - c = BytesIO() - np.savez(c, file_a=a, file_b=b) - c.seek(0) - l = np.load(c) - assert_equal(a, l['file_a']) - assert_equal(b, l['file_b']) - - def test_BagObj(self): - a = np.array([[1, 2], [3, 4]], float) - b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) - c = BytesIO() - np.savez(c, file_a=a, file_b=b) - c.seek(0) - l = np.load(c) - assert_equal(sorted(dir(l.f)), ['file_a','file_b']) - assert_equal(a, l.f.file_a) - assert_equal(b, l.f.file_b) - - def test_savez_filename_clashes(self): - # Test that issue #852 is fixed - # and savez functions in multithreaded environment - - def writer(error_list): - with temppath(suffix='.npz') as tmp: - arr = np.random.randn(500, 500) - try: - np.savez(tmp, arr=arr) - except OSError as err: - error_list.append(err) - - errors = [] - threads = [threading.Thread(target=writer, args=(errors,)) - for j in range(3)] - for t in threads: - t.start() - for t in threads: - t.join() - - if errors: - raise AssertionError(errors) - - def test_not_closing_opened_fid(self): - # Test that issue #2178 is fixed: - # verify could seek on 'loaded' file - with temppath(suffix='.npz') as tmp: - with open(tmp, 'wb') as fp: - np.savez(fp, data='LOVELY LOAD') - with open(tmp, 'rb', 10000) as fp: - fp.seek(0) - assert_(not fp.closed) - np.load(fp)['data'] - # fp must not get closed by .load - assert_(not fp.closed) - fp.seek(0) - assert_(not fp.closed) - - #FIXME: Is this still true? - @pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy") - def test_closing_fid(self): - # Test that issue #1517 (too many opened files) remains closed - # It might be a "weak" test since failed to get triggered on - # e.g. Debian sid of 2012 Jul 05 but was reported to - # trigger the failure on Ubuntu 10.04: - # http://projects.scipy.org/numpy/ticket/1517#comment:2 - with temppath(suffix='.npz') as tmp: - np.savez(tmp, data='LOVELY LOAD') - # We need to check if the garbage collector can properly close - # numpy npz file returned by np.load when their reference count - # goes to zero. Python 3 running in debug mode raises a - # ResourceWarning when file closing is left to the garbage - # collector, so we catch the warnings. Because ResourceWarning - # is unknown in Python < 3.x, we take the easy way out and - # catch all warnings. - with suppress_warnings() as sup: - sup.filter(Warning) # TODO: specify exact message - for i in range(1, 1025): - try: - np.load(tmp)["data"] - except Exception as e: - msg = "Failed to load data from a file: %s" % e - raise AssertionError(msg) - - def test_closing_zipfile_after_load(self): - # Check that zipfile owns file and can close it. This needs to - # pass a file name to load for the test. On windows failure will - # cause a second error will be raised when the attempt to remove - # the open file is made. - prefix = 'numpy_test_closing_zipfile_after_load_' - with temppath(suffix='.npz', prefix=prefix) as tmp: - np.savez(tmp, lab='place holder') - data = np.load(tmp) - fp = data.zip.fp - data.close() - assert_(fp.closed) - - -class TestSaveTxt(object): - def test_array(self): - a = np.array([[1, 2], [3, 4]], float) - fmt = "%.18e" - c = BytesIO() - np.savetxt(c, a, fmt=fmt) - c.seek(0) - assert_equal(c.readlines(), - [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), - asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) - - a = np.array([[1, 2], [3, 4]], int) - c = BytesIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) - - def test_1D(self): - a = np.array([1, 2, 3, 4], int) - c = BytesIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - lines = c.readlines() - assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) - - def test_0D_3D(self): - c = BytesIO() - assert_raises(ValueError, np.savetxt, c, np.array(1)) - assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) - - def test_structured(self): - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - c = BytesIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) - - def test_structured_padded(self): - # gh-13297 - a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[ - ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') - ]) - c = BytesIO() - np.savetxt(c, a[['foo', 'baz']], fmt='%d') - c.seek(0) - assert_equal(c.readlines(), [b'1 3\n', b'4 6\n']) - - @pytest.mark.skipif(Path is None, reason="No pathlib.Path") - def test_multifield_view(self): - a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) - v = a[['x', 'z']] - with temppath(suffix='.npy') as path: - path = Path(path) - np.save(path, v) - data = np.load(path) - assert_array_equal(data, v) - - def test_delimiter(self): - a = np.array([[1., 2.], [3., 4.]]) - c = BytesIO() - np.savetxt(c, a, delimiter=',', fmt='%d') - c.seek(0) - assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) - - def test_format(self): - a = np.array([(1, 2), (3, 4)]) - c = BytesIO() - # Sequence of formats - np.savetxt(c, a, fmt=['%02d', '%3.1f']) - c.seek(0) - assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) - - # A single multiformat string - c = BytesIO() - np.savetxt(c, a, fmt='%02d : %3.1f') - c.seek(0) - lines = c.readlines() - assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) - - # Specify delimiter, should be overridden - c = BytesIO() - np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') - c.seek(0) - lines = c.readlines() - assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) - - # Bad fmt, should raise a ValueError - c = BytesIO() - assert_raises(ValueError, np.savetxt, c, a, fmt=99) - - def test_header_footer(self): - # Test the functionality of the header and footer keyword argument. - - c = BytesIO() - a = np.array([(1, 2), (3, 4)], dtype=int) - test_header_footer = 'Test header / footer' - # Test the header keyword argument - np.savetxt(c, a, fmt='%1d', header=test_header_footer) - c.seek(0) - assert_equal(c.read(), - asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) - # Test the footer keyword argument - c = BytesIO() - np.savetxt(c, a, fmt='%1d', footer=test_header_footer) - c.seek(0) - assert_equal(c.read(), - asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) - # Test the commentstr keyword argument used on the header - c = BytesIO() - commentstr = '% ' - np.savetxt(c, a, fmt='%1d', - header=test_header_footer, comments=commentstr) - c.seek(0) - assert_equal(c.read(), - asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) - # Test the commentstr keyword argument used on the footer - c = BytesIO() - commentstr = '% ' - np.savetxt(c, a, fmt='%1d', - footer=test_header_footer, comments=commentstr) - c.seek(0) - assert_equal(c.read(), - asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) - - def test_file_roundtrip(self): - with temppath() as name: - a = np.array([(1, 2), (3, 4)]) - np.savetxt(name, a) - b = np.loadtxt(name) - assert_array_equal(a, b) - - def test_complex_arrays(self): - ncols = 2 - nrows = 2 - a = np.zeros((ncols, nrows), dtype=np.complex128) - re = np.pi - im = np.e - a[:] = re + 1.0j * im - - # One format only - c = BytesIO() - np.savetxt(c, a, fmt=' %+.3e') - c.seek(0) - lines = c.readlines() - assert_equal( - lines, - [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', - b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) - - # One format for each real and imaginary part - c = BytesIO() - np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) - c.seek(0) - lines = c.readlines() - assert_equal( - lines, - [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', - b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) - - # One format for each complex number - c = BytesIO() - np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) - c.seek(0) - lines = c.readlines() - assert_equal( - lines, - [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', - b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) - - def test_complex_negative_exponent(self): - # Previous to 1.15, some formats generated x+-yj, gh 7895 - ncols = 2 - nrows = 2 - a = np.zeros((ncols, nrows), dtype=np.complex128) - re = np.pi - im = np.e - a[:] = re - 1.0j * im - c = BytesIO() - np.savetxt(c, a, fmt='%.3e') - c.seek(0) - lines = c.readlines() - assert_equal( - lines, - [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', - b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) - - - def test_custom_writer(self): - - class CustomWriter(list): - def write(self, text): - self.extend(text.split(b'\n')) - - w = CustomWriter() - a = np.array([(1, 2), (3, 4)]) - np.savetxt(w, a) - b = np.loadtxt(w) - assert_array_equal(a, b) - - def test_unicode(self): - utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode_) - with tempdir() as tmpdir: - # set encoding as on windows it may not be unicode even on py3 - np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], - encoding='UTF-8') - - def test_unicode_roundtrip(self): - utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode_) - # our gz wrapper support encoding - suffixes = ['', '.gz'] - # stdlib 2 versions do not support encoding - if MAJVER > 2: - if HAS_BZ2: - suffixes.append('.bz2') - if HAS_LZMA: - suffixes.extend(['.xz', '.lzma']) - with tempdir() as tmpdir: - for suffix in suffixes: - np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, - fmt=['%s'], encoding='UTF-16-LE') - b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), - encoding='UTF-16-LE', dtype=np.unicode_) - assert_array_equal(a, b) - - def test_unicode_bytestream(self): - utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode_) - s = BytesIO() - np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') - s.seek(0) - assert_equal(s.read().decode('UTF-8'), utf8 + '\n') - - def test_unicode_stringstream(self): - utf8 = b'\xcf\x96'.decode('UTF-8') - a = np.array([utf8], dtype=np.unicode_) - s = StringIO() - np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') - s.seek(0) - assert_equal(s.read(), utf8 + '\n') - - @pytest.mark.parametrize("fmt", [u"%f", b"%f"]) - @pytest.mark.parametrize("iotype", [StringIO, BytesIO]) - def test_unicode_and_bytes_fmt(self, fmt, iotype): - # string type of fmt should not matter, see also gh-4053 - a = np.array([1.]) - s = iotype() - np.savetxt(s, a, fmt=fmt) - s.seek(0) - if iotype is StringIO: - assert_equal(s.read(), u"%f\n" % 1.) - else: - assert_equal(s.read(), b"%f\n" % 1.) - - @pytest.mark.skipif(sys.platform=='win32', - reason="large files cause problems") - @pytest.mark.slow - @requires_memory(free_bytes=7e9) - def test_large_zip(self): - # The test takes at least 6GB of memory, writes a file larger than 4GB - test_data = np.asarray([np.random.rand(np.random.randint(50,100),4) - for i in range(800000)]) - with tempdir() as tmpdir: - np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data) - -class LoadTxtBase(object): - def check_compressed(self, fopen, suffixes): - # Test that we can load data from a compressed file - wanted = np.arange(6).reshape((2, 3)) - linesep = ('\n', '\r\n', '\r') - for sep in linesep: - data = '0 1 2' + sep + '3 4 5' - for suffix in suffixes: - with temppath(suffix=suffix) as name: - with fopen(name, mode='wt', encoding='UTF-32-LE') as f: - f.write(data) - res = self.loadfunc(name, encoding='UTF-32-LE') - assert_array_equal(res, wanted) - with fopen(name, "rt", encoding='UTF-32-LE') as f: - res = self.loadfunc(f) - assert_array_equal(res, wanted) - - # Python2 .open does not support encoding - @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") - def test_compressed_gzip(self): - self.check_compressed(gzip.open, ('.gz',)) - - @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") - @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") - def test_compressed_bz2(self): - self.check_compressed(bz2.open, ('.bz2',)) - - @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") - @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") - def test_compressed_lzma(self): - self.check_compressed(lzma.open, ('.xz', '.lzma')) - - def test_encoding(self): - with temppath() as path: - with open(path, "wb") as f: - f.write('0.\n1.\n2.'.encode("UTF-16")) - x = self.loadfunc(path, encoding="UTF-16") - assert_array_equal(x, [0., 1., 2.]) - - def test_stringload(self): - # umlaute - nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") - with temppath() as path: - with open(path, "wb") as f: - f.write(nonascii.encode("UTF-16")) - x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_) - assert_array_equal(x, nonascii) - - def test_binary_decode(self): - utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' - v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16') - assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) - - def test_converters_decode(self): - # test converters that decode strings - c = TextIO() - c.write(b'\xcf\x96') - c.seek(0) - x = self.loadfunc(c, dtype=np.unicode_, - converters={0: lambda x: x.decode('UTF-8')}) - a = np.array([b'\xcf\x96'.decode('UTF-8')]) - assert_array_equal(x, a) - - def test_converters_nodecode(self): - # test native string converters enabled by setting an encoding - utf8 = b'\xcf\x96'.decode('UTF-8') - with temppath() as path: - with io.open(path, 'wt', encoding='UTF-8') as f: - f.write(utf8) - x = self.loadfunc(path, dtype=np.unicode_, - converters={0: lambda x: x + 't'}, - encoding='UTF-8') - a = np.array([utf8 + 't']) - assert_array_equal(x, a) - - -class TestLoadTxt(LoadTxtBase): - loadfunc = staticmethod(np.loadtxt) - - def setup(self): - # lower chunksize for testing - self.orig_chunk = np.lib.npyio._loadtxt_chunksize - np.lib.npyio._loadtxt_chunksize = 1 - def teardown(self): - np.lib.npyio._loadtxt_chunksize = self.orig_chunk - - def test_record(self): - c = TextIO() - c.write('1 2\n3 4') - c.seek(0) - x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - assert_array_equal(x, a) - - d = TextIO() - d.write('M 64.0 75.0\nF 25.0 60.0') - d.seek(0) - mydescriptor = {'names': ('gender', 'age', 'weight'), - 'formats': ('S1', 'i4', 'f4')} - b = np.array([('M', 64.0, 75.0), - ('F', 25.0, 60.0)], dtype=mydescriptor) - y = np.loadtxt(d, dtype=mydescriptor) - assert_array_equal(y, b) - - def test_array(self): - c = TextIO() - c.write('1 2\n3 4') - - c.seek(0) - x = np.loadtxt(c, dtype=int) - a = np.array([[1, 2], [3, 4]], int) - assert_array_equal(x, a) - - c.seek(0) - x = np.loadtxt(c, dtype=float) - a = np.array([[1, 2], [3, 4]], float) - assert_array_equal(x, a) - - def test_1D(self): - c = TextIO() - c.write('1\n2\n3\n4\n') - c.seek(0) - x = np.loadtxt(c, dtype=int) - a = np.array([1, 2, 3, 4], int) - assert_array_equal(x, a) - - c = TextIO() - c.write('1,2,3,4\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',') - a = np.array([1, 2, 3, 4], int) - assert_array_equal(x, a) - - def test_missing(self): - c = TextIO() - c.write('1,2,3,,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - converters={3: lambda s: int(s or - 999)}) - a = np.array([1, 2, 3, -999, 5], int) - assert_array_equal(x, a) - - def test_converters_with_usecols(self): - c = TextIO() - c.write('1,2,3,,5\n6,7,8,9,10\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - converters={3: lambda s: int(s or - 999)}, - usecols=(1, 3,)) - a = np.array([[2, -999], [7, 9]], int) - assert_array_equal(x, a) - - def test_comments_unicode(self): - c = TextIO() - c.write('# comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - comments=u'#') - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - def test_comments_byte(self): - c = TextIO() - c.write('# comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - comments=b'#') - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - def test_comments_multiple(self): - c = TextIO() - c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - comments=['#', '@', '//']) - a = np.array([[1, 2, 3], [4, 5, 6]], int) - assert_array_equal(x, a) - - def test_comments_multi_chars(self): - c = TextIO() - c.write('/* comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - comments='/*') - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - # Check that '/*' is not transformed to ['/', '*'] - c = TextIO() - c.write('*/ comment\n1,2,3,5\n') - c.seek(0) - assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', - comments='/*') - - def test_skiprows(self): - c = TextIO() - c.write('comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - skiprows=1) - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - c = TextIO() - c.write('# comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - skiprows=1) - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - def test_usecols(self): - a = np.array([[1, 2], [3, 4]], float) - c = BytesIO() - np.savetxt(c, a) - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(1,)) - assert_array_equal(x, a[:, 1]) - - a = np.array([[1, 2, 3], [3, 4, 5]], float) - c = BytesIO() - np.savetxt(c, a) - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(1, 2)) - assert_array_equal(x, a[:, 1:]) - - # Testing with arrays instead of tuples. - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) - assert_array_equal(x, a[:, 1:]) - - # Testing with an integer instead of a sequence - for int_type in [int, np.int8, np.int16, - np.int32, np.int64, np.uint8, np.uint16, - np.uint32, np.uint64]: - to_read = int_type(1) - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=to_read) - assert_array_equal(x, a[:, 1]) - - # Testing with some crazy custom integer type - class CrazyInt(object): - def __index__(self): - return 1 - - crazy_int = CrazyInt() - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=crazy_int) - assert_array_equal(x, a[:, 1]) - - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) - assert_array_equal(x, a[:, 1]) - - # Checking with dtypes defined converters. - data = '''JOE 70.1 25.3 - BOB 60.5 27.9 - ''' - c = TextIO(data) - names = ['stid', 'temp'] - dtypes = ['S4', 'f8'] - arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) - assert_equal(arr['stid'], [b"JOE", b"BOB"]) - assert_equal(arr['temp'], [25.3, 27.9]) - - # Testing non-ints in usecols - c.seek(0) - bogus_idx = 1.5 - assert_raises_regex( - TypeError, - '^usecols must be.*%s' % type(bogus_idx), - np.loadtxt, c, usecols=bogus_idx - ) - - assert_raises_regex( - TypeError, - '^usecols must be.*%s' % type(bogus_idx), - np.loadtxt, c, usecols=[0, bogus_idx, 0] - ) - - def test_fancy_dtype(self): - c = TextIO() - c.write('1,2,3.0\n4,5,6.0\n') - c.seek(0) - dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - x = np.loadtxt(c, dtype=dt, delimiter=',') - a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) - assert_array_equal(x, a) - - def test_shaped_dtype(self): - c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") - dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ('block', int, (2, 3))]) - x = np.loadtxt(c, dtype=dt) - a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], - dtype=dt) - assert_array_equal(x, a) - - def test_3d_shaped_dtype(self): - c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") - dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ('block', int, (2, 2, 3))]) - x = np.loadtxt(c, dtype=dt) - a = np.array([('aaaa', 1.0, 8.0, - [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], - dtype=dt) - assert_array_equal(x, a) - - def test_str_dtype(self): - # see gh-8033 - c = ["str1", "str2"] - - for dt in (str, np.bytes_): - a = np.array(["str1", "str2"], dtype=dt) - x = np.loadtxt(c, dtype=dt) - assert_array_equal(x, a) - - def test_empty_file(self): - with suppress_warnings() as sup: - sup.filter(message="loadtxt: Empty input file:") - c = TextIO() - x = np.loadtxt(c) - assert_equal(x.shape, (0,)) - x = np.loadtxt(c, dtype=np.int64) - assert_equal(x.shape, (0,)) - assert_(x.dtype == np.int64) - - def test_unused_converter(self): - c = TextIO() - c.writelines(['1 21\n', '3 42\n']) - c.seek(0) - data = np.loadtxt(c, usecols=(1,), - converters={0: lambda s: int(s, 16)}) - assert_array_equal(data, [21, 42]) - - c.seek(0) - data = np.loadtxt(c, usecols=(1,), - converters={1: lambda s: int(s, 16)}) - assert_array_equal(data, [33, 66]) - - def test_dtype_with_object(self): - # Test using an explicit dtype with an object - data = """ 1; 2001-01-01 - 2; 2002-01-31 """ - ndtype = [('idx', int), ('code', object)] - func = lambda s: strptime(s.strip(), "%Y-%m-%d") - converters = {1: func} - test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, - converters=converters) - control = np.array( - [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], - dtype=ndtype) - assert_equal(test, control) - - def test_uint64_type(self): - tgt = (9223372043271415339, 9223372043271415853) - c = TextIO() - c.write("%s %s" % tgt) - c.seek(0) - res = np.loadtxt(c, dtype=np.uint64) - assert_equal(res, tgt) - - def test_int64_type(self): - tgt = (-9223372036854775807, 9223372036854775807) - c = TextIO() - c.write("%s %s" % tgt) - c.seek(0) - res = np.loadtxt(c, dtype=np.int64) - assert_equal(res, tgt) - - def test_from_float_hex(self): - # IEEE doubles and floats only, otherwise the float32 - # conversion may fail. - tgt = np.logspace(-10, 10, 5).astype(np.float32) - tgt = np.hstack((tgt, -tgt)).astype(float) - inp = '\n'.join(map(float.hex, tgt)) - c = TextIO() - c.write(inp) - for dt in [float, np.float32]: - c.seek(0) - res = np.loadtxt(c, dtype=dt) - assert_equal(res, tgt, err_msg="%s" % dt) - - def test_from_complex(self): - tgt = (complex(1, 1), complex(1, -1)) - c = TextIO() - c.write("%s %s" % tgt) - c.seek(0) - res = np.loadtxt(c, dtype=complex) - assert_equal(res, tgt) - - def test_complex_misformatted(self): - # test for backward compatibility - # some complex formats used to generate x+-yj - a = np.zeros((2, 2), dtype=np.complex128) - re = np.pi - im = np.e - a[:] = re - 1.0j * im - c = BytesIO() - np.savetxt(c, a, fmt='%.16e') - c.seek(0) - txt = c.read() - c.seek(0) - # misformat the sign on the imaginary part, gh 7895 - txt_bad = txt.replace(b'e+00-', b'e00+-') - assert_(txt_bad != txt) - c.write(txt_bad) - c.seek(0) - res = np.loadtxt(c, dtype=complex) - assert_equal(res, a) - - def test_universal_newline(self): - with temppath() as name: - with open(name, 'w') as f: - f.write('1 21\r3 42\r') - data = np.loadtxt(name) - assert_array_equal(data, [[1, 21], [3, 42]]) - - def test_empty_field_after_tab(self): - c = TextIO() - c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') - c.seek(0) - dt = {'names': ('x', 'y', 'z', 'comment'), - 'formats': (' num rows - c = TextIO() - c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - skiprows=1, max_rows=6) - a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) - assert_array_equal(x, a) - -class Testfromregex(object): - def test_record(self): - c = TextIO() - c.write('1.312 foo\n1.534 bar\n4.444 qux') - c.seek(0) - - dt = [('num', np.float64), ('val', 'S3')] - x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) - a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], - dtype=dt) - assert_array_equal(x, a) - - def test_record_2(self): - c = TextIO() - c.write('1312 foo\n1534 bar\n4444 qux') - c.seek(0) - - dt = [('num', np.int32), ('val', 'S3')] - x = np.fromregex(c, r"(\d+)\s+(...)", dt) - a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], - dtype=dt) - assert_array_equal(x, a) - - def test_record_3(self): - c = TextIO() - c.write('1312 foo\n1534 bar\n4444 qux') - c.seek(0) - - dt = [('num', np.float64)] - x = np.fromregex(c, r"(\d+)\s+...", dt) - a = np.array([(1312,), (1534,), (4444,)], dtype=dt) - assert_array_equal(x, a) - - def test_record_unicode(self): - utf8 = b'\xcf\x96' - with temppath() as path: - with open(path, 'wb') as f: - f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') - - dt = [('num', np.float64), ('val', 'U4')] - x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') - a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), - (4.444, 'qux')], dtype=dt) - assert_array_equal(x, a) - - regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) - x = np.fromregex(path, regexp, dt, encoding='UTF-8') - assert_array_equal(x, a) - - def test_compiled_bytes(self): - regexp = re.compile(b'(\\d)') - c = BytesIO(b'123') - dt = [('num', np.float64)] - a = np.array([1, 2, 3], dtype=dt) - x = np.fromregex(c, regexp, dt) - assert_array_equal(x, a) - -#####-------------------------------------------------------------------------- - - -class TestFromTxt(LoadTxtBase): - loadfunc = staticmethod(np.genfromtxt) - - def test_record(self): - # Test w/ explicit dtype - data = TextIO('1 2\n3 4') - test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) - control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - assert_equal(test, control) - # - data = TextIO('M 64.0 75.0\nF 25.0 60.0') - descriptor = {'names': ('gender', 'age', 'weight'), - 'formats': ('S1', 'i4', 'f4')} - control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], - dtype=descriptor) - test = np.genfromtxt(data, dtype=descriptor) - assert_equal(test, control) - - def test_array(self): - # Test outputting a standard ndarray - data = TextIO('1 2\n3 4') - control = np.array([[1, 2], [3, 4]], dtype=int) - test = np.genfromtxt(data, dtype=int) - assert_array_equal(test, control) - # - data.seek(0) - control = np.array([[1, 2], [3, 4]], dtype=float) - test = np.loadtxt(data, dtype=float) - assert_array_equal(test, control) - - def test_1D(self): - # Test squeezing to 1D - control = np.array([1, 2, 3, 4], int) - # - data = TextIO('1\n2\n3\n4\n') - test = np.genfromtxt(data, dtype=int) - assert_array_equal(test, control) - # - data = TextIO('1,2,3,4\n') - test = np.genfromtxt(data, dtype=int, delimiter=',') - assert_array_equal(test, control) - - def test_comments(self): - # Test the stripping of comments - control = np.array([1, 2, 3, 5], int) - # Comment on its own line - data = TextIO('# comment\n1,2,3,5\n') - test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') - assert_equal(test, control) - # Comment at the end of a line - data = TextIO('1,2,3,5# comment\n') - test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') - assert_equal(test, control) - - def test_skiprows(self): - # Test row skipping - control = np.array([1, 2, 3, 5], int) - kwargs = dict(dtype=int, delimiter=',') - # - data = TextIO('comment\n1,2,3,5\n') - test = np.genfromtxt(data, skip_header=1, **kwargs) - assert_equal(test, control) - # - data = TextIO('# comment\n1,2,3,5\n') - test = np.loadtxt(data, skiprows=1, **kwargs) - assert_equal(test, control) - - def test_skip_footer(self): - data = ["# %i" % i for i in range(1, 6)] - data.append("A, B, C") - data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) - data[-1] = "99,99" - kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) - test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) - ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], - dtype=[(_, float) for _ in "ABC"]) - assert_equal(test, ctrl) - - def test_skip_footer_with_invalid(self): - with suppress_warnings() as sup: - sup.filter(ConversionWarning) - basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' - # Footer too small to get rid of all invalid values - assert_raises(ValueError, np.genfromtxt, - TextIO(basestr), skip_footer=1) - # except ValueError: - # pass - a = np.genfromtxt( - TextIO(basestr), skip_footer=1, invalid_raise=False) - assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) - # - a = np.genfromtxt(TextIO(basestr), skip_footer=3) - assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) - # - basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' - a = np.genfromtxt( - TextIO(basestr), skip_footer=1, invalid_raise=False) - assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) - a = np.genfromtxt( - TextIO(basestr), skip_footer=3, invalid_raise=False) - assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) - - def test_header(self): - # Test retrieving a header - data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(data, dtype=None, names=True) - assert_(w[0].category is np.VisibleDeprecationWarning) - control = {'gender': np.array([b'M', b'F']), - 'age': np.array([64.0, 25.0]), - 'weight': np.array([75.0, 60.0])} - assert_equal(test['gender'], control['gender']) - assert_equal(test['age'], control['age']) - assert_equal(test['weight'], control['weight']) - - def test_auto_dtype(self): - # Test the automatic definition of the output dtype - data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(data, dtype=None) - assert_(w[0].category is np.VisibleDeprecationWarning) - control = [np.array([b'A', b'BCD']), - np.array([64, 25]), - np.array([75.0, 60.0]), - np.array([3 + 4j, 5 + 6j]), - np.array([True, False]), ] - assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) - for (i, ctrl) in enumerate(control): - assert_equal(test['f%i' % i], ctrl) - - def test_auto_dtype_uniform(self): - # Tests whether the output dtype can be uniformized - data = TextIO('1 2 3 4\n5 6 7 8\n') - test = np.genfromtxt(data, dtype=None) - control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) - assert_equal(test, control) - - def test_fancy_dtype(self): - # Check that a nested dtype isn't MIA - data = TextIO('1,2,3.0\n4,5,6.0\n') - fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - test = np.genfromtxt(data, dtype=fancydtype, delimiter=',') - control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) - assert_equal(test, control) - - def test_names_overwrite(self): - # Test overwriting the names of the dtype - descriptor = {'names': ('g', 'a', 'w'), - 'formats': ('S1', 'i4', 'f4')} - data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') - names = ('gender', 'age', 'weight') - test = np.genfromtxt(data, dtype=descriptor, names=names) - descriptor['names'] = names - control = np.array([('M', 64.0, 75.0), - ('F', 25.0, 60.0)], dtype=descriptor) - assert_equal(test, control) - - def test_commented_header(self): - # Check that names can be retrieved even if the line is commented out. - data = TextIO(""" -#gender age weight -M 21 72.100000 -F 35 58.330000 -M 33 21.99 - """) - # The # is part of the first name and should be deleted automatically. - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(data, names=True, dtype=None) - assert_(w[0].category is np.VisibleDeprecationWarning) - ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], - dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) - assert_equal(test, ctrl) - # Ditto, but we should get rid of the first element - data = TextIO(b""" -# gender age weight -M 21 72.100000 -F 35 58.330000 -M 33 21.99 - """) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(data, names=True, dtype=None) - assert_(w[0].category is np.VisibleDeprecationWarning) - assert_equal(test, ctrl) - - def test_names_and_comments_none(self): - # Tests case when names is true but comments is None (gh-10780) - data = TextIO('col1 col2\n 1 2\n 3 4') - test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) - control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) - assert_equal(test, control) - - def test_file_is_closed_on_error(self): - # gh-13200 - with tempdir() as tmpdir: - fpath = os.path.join(tmpdir, "test.csv") - with open(fpath, "wb") as f: - f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8')) - - # ResourceWarnings are emitted from a destructor, so won't be - # detected by regular propagation to errors. - with assert_no_warnings(): - with pytest.raises(UnicodeDecodeError): - np.genfromtxt(fpath, encoding="ascii") - - def test_autonames_and_usecols(self): - # Tests names and usecols - data = TextIO('A B C D\n aaaa 121 45 9.1') - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(data, usecols=('A', 'C', 'D'), - names=True, dtype=None) - assert_(w[0].category is np.VisibleDeprecationWarning) - control = np.array(('aaaa', 45, 9.1), - dtype=[('A', '|S4'), ('C', int), ('D', float)]) - assert_equal(test, control) - - def test_converters_with_usecols(self): - # Test the combination user-defined converters and usecol - data = TextIO('1,2,3,,5\n6,7,8,9,10\n') - test = np.genfromtxt(data, dtype=int, delimiter=',', - converters={3: lambda s: int(s or - 999)}, - usecols=(1, 3,)) - control = np.array([[2, -999], [7, 9]], int) - assert_equal(test, control) - - def test_converters_with_usecols_and_names(self): - # Tests names and usecols - data = TextIO('A B C D\n aaaa 121 45 9.1') - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True, - dtype=None, - converters={'C': lambda s: 2 * int(s)}) - assert_(w[0].category is np.VisibleDeprecationWarning) - control = np.array(('aaaa', 90, 9.1), - dtype=[('A', '|S4'), ('C', int), ('D', float)]) - assert_equal(test, control) - - def test_converters_cornercases(self): - # Test the conversion to datetime. - converter = { - 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} - data = TextIO('2009-02-03 12:00:00Z, 72214.0') - test = np.genfromtxt(data, delimiter=',', dtype=None, - names=['date', 'stid'], converters=converter) - control = np.array((datetime(2009, 2, 3), 72214.), - dtype=[('date', np.object_), ('stid', float)]) - assert_equal(test, control) - - def test_converters_cornercases2(self): - # Test the conversion to datetime64. - converter = { - 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} - data = TextIO('2009-02-03 12:00:00Z, 72214.0') - test = np.genfromtxt(data, delimiter=',', dtype=None, - names=['date', 'stid'], converters=converter) - control = np.array((datetime(2009, 2, 3), 72214.), - dtype=[('date', 'datetime64[us]'), ('stid', float)]) - assert_equal(test, control) - - def test_unused_converter(self): - # Test whether unused converters are forgotten - data = TextIO("1 21\n 3 42\n") - test = np.genfromtxt(data, usecols=(1,), - converters={0: lambda s: int(s, 16)}) - assert_equal(test, [21, 42]) - # - data.seek(0) - test = np.genfromtxt(data, usecols=(1,), - converters={1: lambda s: int(s, 16)}) - assert_equal(test, [33, 66]) - - def test_invalid_converter(self): - strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or - (b'r' not in x.lower() and x.strip() or 0.0)) - strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or - (b'%' not in x.lower() and x.strip() or 0.0)) - s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" - "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" - "D02N03,10/10/2004,R 1,,7,145.55") - kwargs = dict( - converters={2: strip_per, 3: strip_rand}, delimiter=",", - dtype=None) - assert_raises(ConverterError, np.genfromtxt, s, **kwargs) - - def test_tricky_converter_bug1666(self): - # Test some corner cases - s = TextIO('q1,2\nq3,4') - cnv = lambda s: float(s[1:]) - test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) - control = np.array([[1., 2.], [3., 4.]]) - assert_equal(test, control) - - def test_dtype_with_converters(self): - dstr = "2009; 23; 46" - test = np.genfromtxt(TextIO(dstr,), - delimiter=";", dtype=float, converters={0: bytes}) - control = np.array([('2009', 23., 46)], - dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) - assert_equal(test, control) - test = np.genfromtxt(TextIO(dstr,), - delimiter=";", dtype=float, converters={0: float}) - control = np.array([2009., 23., 46],) - assert_equal(test, control) - - def test_dtype_with_converters_and_usecols(self): - dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" - dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} - dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] - conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} - test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - names=None, converters=conv) - control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) - assert_equal(test, control) - dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')] - test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - usecols=(0,1,3), names=None, converters=conv) - control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) - assert_equal(test, control) - - def test_dtype_with_object(self): - # Test using an explicit dtype with an object - data = """ 1; 2001-01-01 - 2; 2002-01-31 """ - ndtype = [('idx', int), ('code', object)] - func = lambda s: strptime(s.strip(), "%Y-%m-%d") - converters = {1: func} - test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, - converters=converters) - control = np.array( - [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], - dtype=ndtype) - assert_equal(test, control) - - ndtype = [('nest', [('idx', int), ('code', object)])] - with assert_raises_regex(NotImplementedError, - 'Nested fields.* not supported.*'): - test = np.genfromtxt(TextIO(data), delimiter=";", - dtype=ndtype, converters=converters) - - # nested but empty fields also aren't supported - ndtype = [('idx', int), ('code', object), ('nest', [])] - with assert_raises_regex(NotImplementedError, - 'Nested fields.* not supported.*'): - test = np.genfromtxt(TextIO(data), delimiter=";", - dtype=ndtype, converters=converters) - - def test_userconverters_with_explicit_dtype(self): - # Test user_converters w/ explicit (standard) dtype - data = TextIO('skip,skip,2001-01-01,1.0,skip') - test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, - usecols=(2, 3), converters={2: bytes}) - control = np.array([('2001-01-01', 1.)], - dtype=[('', '|S10'), ('', float)]) - assert_equal(test, control) - - def test_utf8_userconverters_with_explicit_dtype(self): - utf8 = b'\xcf\x96' - with temppath() as path: - with open(path, 'wb') as f: - f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') - test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, - usecols=(2, 3), converters={2: np.compat.unicode}, - encoding='UTF-8') - control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], - dtype=[('', '|U11'), ('', float)]) - assert_equal(test, control) - - def test_spacedelimiter(self): - # Test space delimiter - data = TextIO("1 2 3 4 5\n6 7 8 9 10") - test = np.genfromtxt(data) - control = np.array([[1., 2., 3., 4., 5.], - [6., 7., 8., 9., 10.]]) - assert_equal(test, control) - - def test_integer_delimiter(self): - # Test using an integer for delimiter - data = " 1 2 3\n 4 5 67\n890123 4" - test = np.genfromtxt(TextIO(data), delimiter=3) - control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) - assert_equal(test, control) - - def test_missing(self): - data = TextIO('1,2,3,,5\n') - test = np.genfromtxt(data, dtype=int, delimiter=',', - converters={3: lambda s: int(s or - 999)}) - control = np.array([1, 2, 3, -999, 5], int) - assert_equal(test, control) - - def test_missing_with_tabs(self): - # Test w/ a delimiter tab - txt = "1\t2\t3\n\t2\t\n1\t\t3" - test = np.genfromtxt(TextIO(txt), delimiter="\t", - usemask=True,) - ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) - ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) - assert_equal(test.data, ctrl_d) - assert_equal(test.mask, ctrl_m) - - def test_usecols(self): - # Test the selection of columns - # Select 1 column - control = np.array([[1, 2], [3, 4]], float) - data = TextIO() - np.savetxt(data, control) - data.seek(0) - test = np.genfromtxt(data, dtype=float, usecols=(1,)) - assert_equal(test, control[:, 1]) - # - control = np.array([[1, 2, 3], [3, 4, 5]], float) - data = TextIO() - np.savetxt(data, control) - data.seek(0) - test = np.genfromtxt(data, dtype=float, usecols=(1, 2)) - assert_equal(test, control[:, 1:]) - # Testing with arrays instead of tuples. - data.seek(0) - test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2])) - assert_equal(test, control[:, 1:]) - - def test_usecols_as_css(self): - # Test giving usecols with a comma-separated string - data = "1 2 3\n4 5 6" - test = np.genfromtxt(TextIO(data), - names="a, b, c", usecols="a, c") - ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) - assert_equal(test, ctrl) - - def test_usecols_with_structured_dtype(self): - # Test usecols with an explicit structured dtype - data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") - names = ['stid', 'temp'] - dtypes = ['S4', 'f8'] - test = np.genfromtxt( - data, usecols=(0, 2), dtype=list(zip(names, dtypes))) - assert_equal(test['stid'], [b"JOE", b"BOB"]) - assert_equal(test['temp'], [25.3, 27.9]) - - def test_usecols_with_integer(self): - # Test usecols with an integer - test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) - assert_equal(test, np.array([1., 4.])) - - def test_usecols_with_named_columns(self): - # Test usecols with named columns - ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) - data = "1 2 3\n4 5 6" - kwargs = dict(names="a, b, c") - test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) - assert_equal(test, ctrl) - test = np.genfromtxt(TextIO(data), - usecols=('a', 'c'), **kwargs) - assert_equal(test, ctrl) - - def test_empty_file(self): - # Test that an empty file raises the proper warning. - with suppress_warnings() as sup: - sup.filter(message="genfromtxt: Empty input file:") - data = TextIO() - test = np.genfromtxt(data) - assert_equal(test, np.array([])) - - # when skip_header > 0 - test = np.genfromtxt(data, skip_header=1) - assert_equal(test, np.array([])) - - def test_fancy_dtype_alt(self): - # Check that a nested dtype isn't MIA - data = TextIO('1,2,3.0\n4,5,6.0\n') - fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True) - control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) - assert_equal(test, control) - - def test_shaped_dtype(self): - c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") - dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ('block', int, (2, 3))]) - x = np.genfromtxt(c, dtype=dt) - a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], - dtype=dt) - assert_array_equal(x, a) - - def test_withmissing(self): - data = TextIO('A,B\n0,1\n2,N/A') - kwargs = dict(delimiter=",", missing_values="N/A", names=True) - test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - # - data.seek(0) - test = np.genfromtxt(data, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', float), ('B', float)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - def test_user_missing_values(self): - data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" - basekwargs = dict(dtype=None, delimiter=",", names=True,) - mdtype = [('A', int), ('B', float), ('C', complex)] - # - test = np.genfromtxt(TextIO(data), missing_values="N/A", - **basekwargs) - control = ma.array([(0, 0.0, 0j), (1, -999, 1j), - (-9, 2.2, -999j), (3, -99, 3j)], - mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], - dtype=mdtype) - assert_equal(test, control) - # - basekwargs['dtype'] = mdtype - test = np.genfromtxt(TextIO(data), - missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) - control = ma.array([(0, 0.0, 0j), (1, -999, 1j), - (-9, 2.2, -999j), (3, -99, 3j)], - mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], - dtype=mdtype) - assert_equal(test, control) - # - test = np.genfromtxt(TextIO(data), - missing_values={0: -9, 'B': -99, 'C': -999j}, - usemask=True, - **basekwargs) - control = ma.array([(0, 0.0, 0j), (1, -999, 1j), - (-9, 2.2, -999j), (3, -99, 3j)], - mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], - dtype=mdtype) - assert_equal(test, control) - - def test_user_filling_values(self): - # Test with missing and filling values - ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) - data = "N/A, 2, 3\n4, ,???" - kwargs = dict(delimiter=",", - dtype=int, - names="a,b,c", - missing_values={0: "N/A", 'b': " ", 2: "???"}, - filling_values={0: 0, 'b': 0, 2: -999}) - test = np.genfromtxt(TextIO(data), **kwargs) - ctrl = np.array([(0, 2, 3), (4, 0, -999)], - dtype=[(_, int) for _ in "abc"]) - assert_equal(test, ctrl) - # - test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) - ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) - assert_equal(test, ctrl) - - data2 = "1,2,*,4\n5,*,7,8\n" - test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, - missing_values="*", filling_values=0) - ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) - assert_equal(test, ctrl) - test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, - missing_values="*", filling_values=-1) - ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) - assert_equal(test, ctrl) - - def test_withmissing_float(self): - data = TextIO('A,B\n0,1.5\n2,-999.00') - test = np.genfromtxt(data, dtype=None, delimiter=',', - missing_values='-999.0', names=True, usemask=True) - control = ma.array([(0, 1.5), (2, -1.)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', float)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - def test_with_masked_column_uniform(self): - # Test masked column - data = TextIO('1 2 3\n4 5 6\n') - test = np.genfromtxt(data, dtype=None, - missing_values='2,5', usemask=True) - control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) - assert_equal(test, control) - - def test_with_masked_column_various(self): - # Test masked column - data = TextIO('True 2 3\nFalse 5 6\n') - test = np.genfromtxt(data, dtype=None, - missing_values='2,5', usemask=True) - control = ma.array([(1, 2, 3), (0, 5, 6)], - mask=[(0, 1, 0), (0, 1, 0)], - dtype=[('f0', bool), ('f1', bool), ('f2', int)]) - assert_equal(test, control) - - def test_invalid_raise(self): - # Test invalid raise - data = ["1, 1, 1, 1, 1"] * 50 - for i in range(5): - data[10 * i] = "2, 2, 2, 2 2" - data.insert(0, "a, b, c, d, e") - mdata = TextIO("\n".join(data)) - # - kwargs = dict(delimiter=",", dtype=None, names=True) - # XXX: is there a better way to get the return value of the - # callable in assert_warns ? - ret = {} - - def f(_ret={}): - _ret['mtest'] = np.genfromtxt(mdata, invalid_raise=False, **kwargs) - assert_warns(ConversionWarning, f, _ret=ret) - mtest = ret['mtest'] - assert_equal(len(mtest), 45) - assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) - # - mdata.seek(0) - assert_raises(ValueError, np.genfromtxt, mdata, - delimiter=",", names=True) - - def test_invalid_raise_with_usecols(self): - # Test invalid_raise with usecols - data = ["1, 1, 1, 1, 1"] * 50 - for i in range(5): - data[10 * i] = "2, 2, 2, 2 2" - data.insert(0, "a, b, c, d, e") - mdata = TextIO("\n".join(data)) - kwargs = dict(delimiter=",", dtype=None, names=True, - invalid_raise=False) - # XXX: is there a better way to get the return value of the - # callable in assert_warns ? - ret = {} - - def f(_ret={}): - _ret['mtest'] = np.genfromtxt(mdata, usecols=(0, 4), **kwargs) - assert_warns(ConversionWarning, f, _ret=ret) - mtest = ret['mtest'] - assert_equal(len(mtest), 45) - assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) - # - mdata.seek(0) - mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs) - assert_equal(len(mtest), 50) - control = np.ones(50, dtype=[(_, int) for _ in 'ab']) - control[[10 * _ for _ in range(5)]] = (2, 2) - assert_equal(mtest, control) - - def test_inconsistent_dtype(self): - # Test inconsistent dtype - data = ["1, 1, 1, 1, -1.1"] * 50 - mdata = TextIO("\n".join(data)) - - converters = {4: lambda x: "(%s)" % x.decode()} - kwargs = dict(delimiter=",", converters=converters, - dtype=[(_, int) for _ in 'abcde'],) - assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) - - def test_default_field_format(self): - # Test default format - data = "0, 1, 2.3\n4, 5, 6.7" - mtest = np.genfromtxt(TextIO(data), - delimiter=",", dtype=None, defaultfmt="f%02i") - ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], - dtype=[("f00", int), ("f01", int), ("f02", float)]) - assert_equal(mtest, ctrl) - - def test_single_dtype_wo_names(self): - # Test single dtype w/o names - data = "0, 1, 2.3\n4, 5, 6.7" - mtest = np.genfromtxt(TextIO(data), - delimiter=",", dtype=float, defaultfmt="f%02i") - ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) - assert_equal(mtest, ctrl) - - def test_single_dtype_w_explicit_names(self): - # Test single dtype w explicit names - data = "0, 1, 2.3\n4, 5, 6.7" - mtest = np.genfromtxt(TextIO(data), - delimiter=",", dtype=float, names="a, b, c") - ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], - dtype=[(_, float) for _ in "abc"]) - assert_equal(mtest, ctrl) - - def test_single_dtype_w_implicit_names(self): - # Test single dtype w implicit names - data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" - mtest = np.genfromtxt(TextIO(data), - delimiter=",", dtype=float, names=True) - ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], - dtype=[(_, float) for _ in "abc"]) - assert_equal(mtest, ctrl) - - def test_easy_structured_dtype(self): - # Test easy structured dtype - data = "0, 1, 2.3\n4, 5, 6.7" - mtest = np.genfromtxt(TextIO(data), delimiter=",", - dtype=(int, float, float), defaultfmt="f_%02i") - ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], - dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) - assert_equal(mtest, ctrl) - - def test_autostrip(self): - # Test autostrip - data = "01/01/2003 , 1.3, abcde" - kwargs = dict(delimiter=",", dtype=None) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - mtest = np.genfromtxt(TextIO(data), **kwargs) - assert_(w[0].category is np.VisibleDeprecationWarning) - ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], - dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) - assert_equal(mtest, ctrl) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs) - assert_(w[0].category is np.VisibleDeprecationWarning) - ctrl = np.array([('01/01/2003', 1.3, 'abcde')], - dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) - assert_equal(mtest, ctrl) - - def test_replace_space(self): - # Test the 'replace_space' option - txt = "A.A, B (B), C:C\n1, 2, 3.14" - # Test default: replace ' ' by '_' and delete non-alphanum chars - test = np.genfromtxt(TextIO(txt), - delimiter=",", names=True, dtype=None) - ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] - ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) - assert_equal(test, ctrl) - # Test: no replace, no delete - test = np.genfromtxt(TextIO(txt), - delimiter=",", names=True, dtype=None, - replace_space='', deletechars='') - ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] - ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) - assert_equal(test, ctrl) - # Test: no delete (spaces are replaced by _) - test = np.genfromtxt(TextIO(txt), - delimiter=",", names=True, dtype=None, - deletechars='') - ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] - ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) - assert_equal(test, ctrl) - - def test_replace_space_known_dtype(self): - # Test the 'replace_space' (and related) options when dtype != None - txt = "A.A, B (B), C:C\n1, 2, 3" - # Test default: replace ' ' by '_' and delete non-alphanum chars - test = np.genfromtxt(TextIO(txt), - delimiter=",", names=True, dtype=int) - ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] - ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) - assert_equal(test, ctrl) - # Test: no replace, no delete - test = np.genfromtxt(TextIO(txt), - delimiter=",", names=True, dtype=int, - replace_space='', deletechars='') - ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] - ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) - assert_equal(test, ctrl) - # Test: no delete (spaces are replaced by _) - test = np.genfromtxt(TextIO(txt), - delimiter=",", names=True, dtype=int, - deletechars='') - ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] - ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) - assert_equal(test, ctrl) - - def test_incomplete_names(self): - # Test w/ incomplete names - data = "A,,C\n0,1,2\n3,4,5" - kwargs = dict(delimiter=",", names=True) - # w/ dtype=None - ctrl = np.array([(0, 1, 2), (3, 4, 5)], - dtype=[(_, int) for _ in ('A', 'f0', 'C')]) - test = np.genfromtxt(TextIO(data), dtype=None, **kwargs) - assert_equal(test, ctrl) - # w/ default dtype - ctrl = np.array([(0, 1, 2), (3, 4, 5)], - dtype=[(_, float) for _ in ('A', 'f0', 'C')]) - test = np.genfromtxt(TextIO(data), **kwargs) - - def test_names_auto_completion(self): - # Make sure that names are properly completed - data = "1 2 3\n 4 5 6" - test = np.genfromtxt(TextIO(data), - dtype=(int, float, int), names="a") - ctrl = np.array([(1, 2, 3), (4, 5, 6)], - dtype=[('a', int), ('f0', float), ('f1', int)]) - assert_equal(test, ctrl) - - def test_names_with_usecols_bug1636(self): - # Make sure we pick up the right names w/ usecols - data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" - ctrl_names = ("A", "C", "E") - test = np.genfromtxt(TextIO(data), - dtype=(int, int, int), delimiter=",", - usecols=(0, 2, 4), names=True) - assert_equal(test.dtype.names, ctrl_names) - # - test = np.genfromtxt(TextIO(data), - dtype=(int, int, int), delimiter=",", - usecols=("A", "C", "E"), names=True) - assert_equal(test.dtype.names, ctrl_names) - # - test = np.genfromtxt(TextIO(data), - dtype=int, delimiter=",", - usecols=("A", "C", "E"), names=True) - assert_equal(test.dtype.names, ctrl_names) - - def test_fixed_width_names(self): - # Test fix-width w/ names - data = " A B C\n 0 1 2.3\n 45 67 9." - kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) - ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], - dtype=[('A', int), ('B', int), ('C', float)]) - test = np.genfromtxt(TextIO(data), **kwargs) - assert_equal(test, ctrl) - # - kwargs = dict(delimiter=5, names=True, dtype=None) - ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], - dtype=[('A', int), ('B', int), ('C', float)]) - test = np.genfromtxt(TextIO(data), **kwargs) - assert_equal(test, ctrl) - - def test_filling_values(self): - # Test missing values - data = b"1, 2, 3\n1, , 5\n0, 6, \n" - kwargs = dict(delimiter=",", dtype=None, filling_values=-999) - ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) - test = np.genfromtxt(TextIO(data), **kwargs) - assert_equal(test, ctrl) - - def test_comments_is_none(self): - # Github issue 329 (None was previously being converted to 'None'). - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), - dtype=None, comments=None, delimiter=',') - assert_(w[0].category is np.VisibleDeprecationWarning) - assert_equal(test[1], b'testNonetherestofthedata') - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), - dtype=None, comments=None, delimiter=',') - assert_(w[0].category is np.VisibleDeprecationWarning) - assert_equal(test[1], b' testNonetherestofthedata') - - def test_latin1(self): - latin1 = b'\xf6\xfc\xf6' - norm = b"norm1,norm2,norm3\n" - enc = b"test1,testNonethe" + latin1 + b",test3\n" - s = norm + enc + norm - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(TextIO(s), - dtype=None, comments=None, delimiter=',') - assert_(w[0].category is np.VisibleDeprecationWarning) - assert_equal(test[1, 0], b"test1") - assert_equal(test[1, 1], b"testNonethe" + latin1) - assert_equal(test[1, 2], b"test3") - test = np.genfromtxt(TextIO(s), - dtype=None, comments=None, delimiter=',', - encoding='latin1') - assert_equal(test[1, 0], u"test1") - assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1')) - assert_equal(test[1, 2], u"test3") - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), - dtype=None, comments=None, delimiter=',') - assert_(w[0].category is np.VisibleDeprecationWarning) - assert_equal(test['f0'], 0) - assert_equal(test['f1'], b"testNonethe" + latin1) - - def test_binary_decode_autodtype(self): - utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' - v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') - assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) - - def test_utf8_byte_encoding(self): - utf8 = b"\xcf\x96" - norm = b"norm1,norm2,norm3\n" - enc = b"test1,testNonethe" + utf8 + b",test3\n" - s = norm + enc + norm - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) - test = np.genfromtxt(TextIO(s), - dtype=None, comments=None, delimiter=',') - assert_(w[0].category is np.VisibleDeprecationWarning) - ctl = np.array([ - [b'norm1', b'norm2', b'norm3'], - [b'test1', b'testNonethe' + utf8, b'test3'], - [b'norm1', b'norm2', b'norm3']]) - assert_array_equal(test, ctl) - - def test_utf8_file(self): - utf8 = b"\xcf\x96" - with temppath() as path: - with open(path, "wb") as f: - f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) - test = np.genfromtxt(path, dtype=None, comments=None, - delimiter=',', encoding="UTF-8") - ctl = np.array([ - ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], - ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], - dtype=np.unicode_) - assert_array_equal(test, ctl) - - # test a mixed dtype - with open(path, "wb") as f: - f.write(b"0,testNonethe" + utf8) - test = np.genfromtxt(path, dtype=None, comments=None, - delimiter=',', encoding="UTF-8") - assert_equal(test['f0'], 0) - assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) - - def test_utf8_file_nodtype_unicode(self): - # bytes encoding with non-latin1 -> unicode upcast - utf8 = u'\u03d6' - latin1 = u'\xf6\xfc\xf6' - - # skip test if cannot encode utf8 test string with preferred - # encoding. The preferred encoding is assumed to be the default - # encoding of io.open. Will need to change this for PyTest, maybe - # using pytest.mark.xfail(raises=***). - try: - encoding = locale.getpreferredencoding() - utf8.encode(encoding) - except (UnicodeError, ImportError): - pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' - 'unable to encode utf8 in preferred encoding') - - with temppath() as path: - with io.open(path, "wt") as f: - f.write(u"norm1,norm2,norm3\n") - f.write(u"norm1," + latin1 + u",norm3\n") - f.write(u"test1,testNonethe" + utf8 + u",test3\n") - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', - np.VisibleDeprecationWarning) - test = np.genfromtxt(path, dtype=None, comments=None, - delimiter=',') - # Check for warning when encoding not specified. - assert_(w[0].category is np.VisibleDeprecationWarning) - ctl = np.array([ - ["norm1", "norm2", "norm3"], - ["norm1", latin1, "norm3"], - ["test1", "testNonethe" + utf8, "test3"]], - dtype=np.unicode_) - assert_array_equal(test, ctl) - - def test_recfromtxt(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = dict(delimiter=",", missing_values="N/A", names=True) - test = np.recfromtxt(data, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - - def test_recfromcsv(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) - test = np.recfromcsv(data, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - # - data = TextIO('A,B\n0,1\n2,3') - test = np.recfromcsv(data, missing_values='N/A',) - control = np.array([(0, 1), (2, 3)], - dtype=[('a', int), ('b', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,3') - dtype = [('a', int), ('b', float)] - test = np.recfromcsv(data, missing_values='N/A', dtype=dtype) - control = np.array([(0, 1), (2, 3)], - dtype=dtype) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - #gh-10394 - data = TextIO('color\n"red"\n"blue"') - test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')}) - control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))]) - assert_equal(test.dtype, control.dtype) - assert_equal(test, control) - - def test_max_rows(self): - # Test the `max_rows` keyword argument. - data = '1 2\n3 4\n5 6\n7 8\n9 10\n' - txt = TextIO(data) - a1 = np.genfromtxt(txt, max_rows=3) - a2 = np.genfromtxt(txt) - assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) - assert_equal(a2, [[7, 8], [9, 10]]) - - # max_rows must be at least 1. - assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) - - # An input with several invalid rows. - data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' - - test = np.genfromtxt(TextIO(data), max_rows=2) - control = np.array([[1., 1.], [2., 2.]]) - assert_equal(test, control) - - # Test keywords conflict - assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, - max_rows=4) - - # Test with invalid value - assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) - - # Test with invalid not raise - with suppress_warnings() as sup: - sup.filter(ConversionWarning) - - test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) - control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) - assert_equal(test, control) - - test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) - control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) - assert_equal(test, control) - - # Structured array with field names. - data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' - - # Test with header, names and comments - txt = TextIO(data) - test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) - control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], - dtype=[('c', ' should convert to float - # 2**34 = 17179869184 => should convert to int64 - # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, - # int64 on 64-bit systems) - - data = TextIO('73786976294838206464 17179869184 1024') - - test = np.genfromtxt(data, dtype=None) - - assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) - - assert_(test.dtype['f0'] == float) - assert_(test.dtype['f1'] == np.int64) - assert_(test.dtype['f2'] == np.integer) - - assert_allclose(test['f0'], 73786976294838206464.) - assert_equal(test['f1'], 17179869184) - assert_equal(test['f2'], 1024) - - -@pytest.mark.skipif(Path is None, reason="No pathlib.Path") -class TestPathUsage(object): - # Test that pathlib.Path can be used - def test_loadtxt(self): - with temppath(suffix='.txt') as path: - path = Path(path) - a = np.array([[1.1, 2], [3, 4]]) - np.savetxt(path, a) - x = np.loadtxt(path) - assert_array_equal(x, a) - - def test_save_load(self): - # Test that pathlib.Path instances can be used with save. - with temppath(suffix='.npy') as path: - path = Path(path) - a = np.array([[1, 2], [3, 4]], int) - np.save(path, a) - data = np.load(path) - assert_array_equal(data, a) - - def test_save_load_memmap(self): - # Test that pathlib.Path instances can be loaded mem-mapped. - with temppath(suffix='.npy') as path: - path = Path(path) - a = np.array([[1, 2], [3, 4]], int) - np.save(path, a) - data = np.load(path, mmap_mode='r') - assert_array_equal(data, a) - # close the mem-mapped file - del data - - def test_save_load_memmap_readwrite(self): - # Test that pathlib.Path instances can be written mem-mapped. - with temppath(suffix='.npy') as path: - path = Path(path) - a = np.array([[1, 2], [3, 4]], int) - np.save(path, a) - b = np.load(path, mmap_mode='r+') - a[0][0] = 5 - b[0][0] = 5 - del b # closes the file - data = np.load(path) - assert_array_equal(data, a) - - def test_savez_load(self): - # Test that pathlib.Path instances can be used with savez. - with temppath(suffix='.npz') as path: - path = Path(path) - np.savez(path, lab='place holder') - with np.load(path) as data: - assert_array_equal(data['lab'], 'place holder') - - def test_savez_compressed_load(self): - # Test that pathlib.Path instances can be used with savez. - with temppath(suffix='.npz') as path: - path = Path(path) - np.savez_compressed(path, lab='place holder') - data = np.load(path) - assert_array_equal(data['lab'], 'place holder') - data.close() - - def test_genfromtxt(self): - with temppath(suffix='.txt') as path: - path = Path(path) - a = np.array([(1, 2), (3, 4)]) - np.savetxt(path, a) - data = np.genfromtxt(path) - assert_array_equal(a, data) - - def test_ndfromtxt(self): - # Test outputting a standard ndarray - with temppath(suffix='.txt') as path: - path = Path(path) - with path.open('w') as f: - f.write(u'1 2\n3 4') - - control = np.array([[1, 2], [3, 4]], dtype=int) - test = np.genfromtxt(path, dtype=int) - assert_array_equal(test, control) - - def test_mafromtxt(self): - # From `test_fancy_dtype_alt` above - with temppath(suffix='.txt') as path: - path = Path(path) - with path.open('w') as f: - f.write(u'1,2,3.0\n4,5,6.0\n') - - test = np.genfromtxt(path, delimiter=',', usemask=True) - control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]) - assert_equal(test, control) - - def test_recfromtxt(self): - with temppath(suffix='.txt') as path: - path = Path(path) - with path.open('w') as f: - f.write(u'A,B\n0,1\n2,3') - - kwargs = dict(delimiter=",", missing_values="N/A", names=True) - test = np.recfromtxt(path, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - def test_recfromcsv(self): - with temppath(suffix='.txt') as path: - path = Path(path) - with path.open('w') as f: - f.write(u'A,B\n0,1\n2,3') - - kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) - test = np.recfromcsv(path, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - -def test_gzip_load(): - a = np.random.random((5, 5)) - - s = BytesIO() - f = gzip.GzipFile(fileobj=s, mode="w") - - np.save(f, a) - f.close() - s.seek(0) - - f = gzip.GzipFile(fileobj=s, mode="r") - assert_array_equal(np.load(f), a) - - -# These next two classes encode the minimal API needed to save()/load() arrays. -# The `test_ducktyping` ensures they work correctly -class JustWriter(object): - def __init__(self, base): - self.base = base - - def write(self, s): - return self.base.write(s) - - def flush(self): - return self.base.flush() - -class JustReader(object): - def __init__(self, base): - self.base = base - - def read(self, n): - return self.base.read(n) - - def seek(self, off, whence=0): - return self.base.seek(off, whence) - - -def test_ducktyping(): - a = np.random.random((5, 5)) - - s = BytesIO() - f = JustWriter(s) - - np.save(f, a) - f.flush() - s.seek(0) - - f = JustReader(s) - assert_array_equal(np.load(f), a) - - - -def test_gzip_loadtxt(): - # Thanks to another windows brokenness, we can't use - # NamedTemporaryFile: a file created from this function cannot be - # reopened by another open call. So we first put the gzipped string - # of the test reference array, write it to a securely opened file, - # which is then read from by the loadtxt function - s = BytesIO() - g = gzip.GzipFile(fileobj=s, mode='w') - g.write(b'1 2 3\n') - g.close() - - s.seek(0) - with temppath(suffix='.gz') as name: - with open(name, 'wb') as f: - f.write(s.read()) - res = np.loadtxt(name) - s.close() - - assert_array_equal(res, [1, 2, 3]) - - -def test_gzip_loadtxt_from_string(): - s = BytesIO() - f = gzip.GzipFile(fileobj=s, mode="w") - f.write(b'1 2 3\n') - f.close() - s.seek(0) - - f = gzip.GzipFile(fileobj=s, mode="r") - assert_array_equal(np.loadtxt(f), [1, 2, 3]) - - -def test_npzfile_dict(): - s = BytesIO() - x = np.zeros((3, 3)) - y = np.zeros((3, 3)) - - np.savez(s, x=x, y=y) - s.seek(0) - - z = np.load(s) - - assert_('x' in z) - assert_('y' in z) - assert_('x' in z.keys()) - assert_('y' in z.keys()) - - for f, a in z.items(): - assert_(f in ['x', 'y']) - assert_equal(a.shape, (3, 3)) - - assert_(len(z.items()) == 2) - - for f in z: - assert_(f in ['x', 'y']) - - assert_('x' in z.keys()) - - -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -def test_load_refcount(): - # Check that objects returned by np.load are directly freed based on - # their refcount, rather than needing the gc to collect them. - - f = BytesIO() - np.savez(f, [1, 2, 3]) - f.seek(0) - - with assert_no_gc_cycles(): - np.load(f) - - f.seek(0) - dt = [("a", 'u1', 2), ("b", 'u1', 2)] - with assert_no_gc_cycles(): - x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_mixins.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_mixins.py deleted file mode 100644 index 3dd5346..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_mixins.py +++ /dev/null @@ -1,224 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numbers -import operator -import sys - -import numpy as np -from numpy.testing import assert_, assert_equal, assert_raises - - -PY2 = sys.version_info.major < 3 - - -# NOTE: This class should be kept as an exact copy of the example from the -# docstring for NDArrayOperatorsMixin. - -class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): - def __init__(self, value): - self.value = np.asarray(value) - - # One might also consider adding the built-in list type to this - # list, to support operations like np.add(array_like, list) - _HANDLED_TYPES = (np.ndarray, numbers.Number) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - out = kwargs.get('out', ()) - for x in inputs + out: - # Only support operations with instances of _HANDLED_TYPES. - # Use ArrayLike instead of type(self) for isinstance to - # allow subclasses that don't override __array_ufunc__ to - # handle ArrayLike objects. - if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): - return NotImplemented - - # Defer to the implementation of the ufunc on unwrapped values. - inputs = tuple(x.value if isinstance(x, ArrayLike) else x - for x in inputs) - if out: - kwargs['out'] = tuple( - x.value if isinstance(x, ArrayLike) else x - for x in out) - result = getattr(ufunc, method)(*inputs, **kwargs) - - if type(result) is tuple: - # multiple return values - return tuple(type(self)(x) for x in result) - elif method == 'at': - # no return value - return None - else: - # one return value - return type(self)(result) - - def __repr__(self): - return '%s(%r)' % (type(self).__name__, self.value) - - -def wrap_array_like(result): - if type(result) is tuple: - return tuple(ArrayLike(r) for r in result) - else: - return ArrayLike(result) - - -def _assert_equal_type_and_value(result, expected, err_msg=None): - assert_equal(type(result), type(expected), err_msg=err_msg) - if isinstance(result, tuple): - assert_equal(len(result), len(expected), err_msg=err_msg) - for result_item, expected_item in zip(result, expected): - _assert_equal_type_and_value(result_item, expected_item, err_msg) - else: - assert_equal(result.value, expected.value, err_msg=err_msg) - assert_equal(getattr(result.value, 'dtype', None), - getattr(expected.value, 'dtype', None), err_msg=err_msg) - - -_ALL_BINARY_OPERATORS = [ - operator.lt, - operator.le, - operator.eq, - operator.ne, - operator.gt, - operator.ge, - operator.add, - operator.sub, - operator.mul, - operator.truediv, - operator.floordiv, - # TODO: test div on Python 2, only - operator.mod, - divmod, - pow, - operator.lshift, - operator.rshift, - operator.and_, - operator.xor, - operator.or_, -] - - -class TestNDArrayOperatorsMixin(object): - - def test_array_like_add(self): - - def check(result): - _assert_equal_type_and_value(result, ArrayLike(0)) - - check(ArrayLike(0) + 0) - check(0 + ArrayLike(0)) - - check(ArrayLike(0) + np.array(0)) - check(np.array(0) + ArrayLike(0)) - - check(ArrayLike(np.array(0)) + 0) - check(0 + ArrayLike(np.array(0))) - - check(ArrayLike(np.array(0)) + np.array(0)) - check(np.array(0) + ArrayLike(np.array(0))) - - def test_inplace(self): - array_like = ArrayLike(np.array([0])) - array_like += 1 - _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) - - array = np.array([0]) - array += ArrayLike(1) - _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) - - def test_opt_out(self): - - class OptOut(object): - """Object that opts out of __array_ufunc__.""" - __array_ufunc__ = None - - def __add__(self, other): - return self - - def __radd__(self, other): - return self - - array_like = ArrayLike(1) - opt_out = OptOut() - - # supported operations - assert_(array_like + opt_out is opt_out) - assert_(opt_out + array_like is opt_out) - - # not supported - with assert_raises(TypeError): - # don't use the Python default, array_like = array_like + opt_out - array_like += opt_out - with assert_raises(TypeError): - array_like - opt_out - with assert_raises(TypeError): - opt_out - array_like - - def test_subclass(self): - - class SubArrayLike(ArrayLike): - """Should take precedence over ArrayLike.""" - - x = ArrayLike(0) - y = SubArrayLike(1) - _assert_equal_type_and_value(x + y, y) - _assert_equal_type_and_value(y + x, y) - - def test_object(self): - x = ArrayLike(0) - obj = object() - with assert_raises(TypeError): - x + obj - with assert_raises(TypeError): - obj + x - with assert_raises(TypeError): - x += obj - - def test_unary_methods(self): - array = np.array([-1, 0, 1, 2]) - array_like = ArrayLike(array) - for op in [operator.neg, - operator.pos, - abs, - operator.invert]: - _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) - - def test_forward_binary_methods(self): - array = np.array([-1, 0, 1, 2]) - array_like = ArrayLike(array) - for op in _ALL_BINARY_OPERATORS: - expected = wrap_array_like(op(array, 1)) - actual = op(array_like, 1) - err_msg = 'failed for operator {}'.format(op) - _assert_equal_type_and_value(expected, actual, err_msg=err_msg) - - def test_reflected_binary_methods(self): - for op in _ALL_BINARY_OPERATORS: - expected = wrap_array_like(op(2, 1)) - actual = op(2, ArrayLike(1)) - err_msg = 'failed for operator {}'.format(op) - _assert_equal_type_and_value(expected, actual, err_msg=err_msg) - - def test_matmul(self): - array = np.array([1, 2], dtype=np.float64) - array_like = ArrayLike(array) - expected = ArrayLike(np.float64(5)) - _assert_equal_type_and_value(expected, np.matmul(array_like, array)) - if not PY2: - _assert_equal_type_and_value( - expected, operator.matmul(array_like, array)) - _assert_equal_type_and_value( - expected, operator.matmul(array, array_like)) - - def test_ufunc_at(self): - array = ArrayLike(np.array([1, 2, 3, 4])) - assert_(np.negative.at(array, np.array([0, 1])) is None) - _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) - - def test_ufunc_two_outputs(self): - mantissa, exponent = np.frexp(2 ** -3) - expected = (ArrayLike(mantissa), ArrayLike(exponent)) - _assert_equal_type_and_value( - np.frexp(ArrayLike(2 ** -3)), expected) - _assert_equal_type_and_value( - np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_nanfunctions.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_nanfunctions.py deleted file mode 100644 index da2d0cc..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_nanfunctions.py +++ /dev/null @@ -1,982 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import pytest - -import numpy as np -from numpy.lib.nanfunctions import _nan_mask, _replace_nan -from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_no_warnings, - assert_raises, assert_array_equal, suppress_warnings - ) - - -# Test data -_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], - [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], - [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], - [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) - - -# Rows of _ndat with nans removed -_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), - np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), - np.array([0.1042, -0.5954]), - np.array([0.1610, 0.1859, 0.3146])] - -# Rows of _ndat with nans converted to ones -_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], - [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], - [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], - [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) - -# Rows of _ndat with nans converted to zeros -_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], - [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], - [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], - [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) - - -class TestNanFunctions_MinMax(object): - - nanfuncs = [np.nanmin, np.nanmax] - stdfuncs = [np.min, np.max] - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - for f in self.nanfuncs: - f(ndat) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for axis in [None, 0, 1]: - tgt = rf(mat, axis=axis, keepdims=True) - res = nf(mat, axis=axis, keepdims=True) - assert_(res.ndim == tgt.ndim) - - def test_out(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - resout = np.zeros(3) - tgt = rf(mat, axis=1) - res = nf(mat, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_dtype_from_input(self): - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - mat = np.eye(3, dtype=c) - tgt = rf(mat, axis=1).dtype.type - res = nf(mat, axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = rf(mat, axis=None).dtype.type - res = nf(mat, axis=None).dtype.type - assert_(res is tgt) - - def test_result_values(self): - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - tgt = [rf(d) for d in _rdat] - res = nf(_ndat, axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalars - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(np.nan))) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_masked(self): - mat = np.ma.fix_invalid(_ndat) - msk = mat._mask.copy() - for f in [np.nanmin]: - res = f(mat, axis=1) - tgt = f(_ndat, axis=1) - assert_equal(res, tgt) - assert_equal(mat._mask, msk) - assert_(not np.isinf(mat).any()) - - def test_scalar(self): - for f in self.nanfuncs: - assert_(f(0.) == 0.) - - def test_subclass(self): - class MyNDArray(np.ndarray): - pass - - # Check that it works and that type and - # shape are preserved - mine = np.eye(3).view(MyNDArray) - for f in self.nanfuncs: - res = f(mine, axis=0) - assert_(isinstance(res, MyNDArray)) - assert_(res.shape == (3,)) - res = f(mine, axis=1) - assert_(isinstance(res, MyNDArray)) - assert_(res.shape == (3,)) - res = f(mine) - assert_(res.shape == ()) - - # check that rows of nan are dealt with for subclasses (#4628) - mine[1] = np.nan - for f in self.nanfuncs: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mine, axis=0) - assert_(isinstance(res, MyNDArray)) - assert_(not np.any(np.isnan(res))) - assert_(len(w) == 0) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mine, axis=1) - assert_(isinstance(res, MyNDArray)) - assert_(np.isnan(res[1]) and not np.isnan(res[0]) - and not np.isnan(res[2])) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mine) - assert_(res.shape == ()) - assert_(res != np.nan) - assert_(len(w) == 0) - - def test_object_array(self): - arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) - assert_equal(np.nanmin(arr), 1.0) - assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - # assert_equal does not work on object arrays of nan - assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - - -class TestNanFunctions_ArgminArgmax(object): - - nanfuncs = [np.nanargmin, np.nanargmax] - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - for f in self.nanfuncs: - f(ndat) - assert_equal(ndat, _ndat) - - def test_result_values(self): - for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): - for row in _ndat: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in") - ind = f(row) - val = row[ind] - # comparing with NaN is tricky as the result - # is always false except for NaN != NaN - assert_(not np.isnan(val)) - assert_(not fcmp(val, row).any()) - assert_(not np.equal(val, row[:ind]).any()) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - assert_raises(ValueError, f, mat, axis=axis) - assert_raises(ValueError, f, np.nan) - - def test_empty(self): - mat = np.zeros((0, 3)) - for f in self.nanfuncs: - for axis in [0, None]: - assert_raises(ValueError, f, mat, axis=axis) - for axis in [1]: - res = f(mat, axis=axis) - assert_equal(res, np.zeros(0)) - - def test_scalar(self): - for f in self.nanfuncs: - assert_(f(0.) == 0.) - - def test_subclass(self): - class MyNDArray(np.ndarray): - pass - - # Check that it works and that type and - # shape are preserved - mine = np.eye(3).view(MyNDArray) - for f in self.nanfuncs: - res = f(mine, axis=0) - assert_(isinstance(res, MyNDArray)) - assert_(res.shape == (3,)) - res = f(mine, axis=1) - assert_(isinstance(res, MyNDArray)) - assert_(res.shape == (3,)) - res = f(mine) - assert_(res.shape == ()) - - -class TestNanFunctions_IntTypes(object): - - int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, - np.uint16, np.uint32, np.uint64) - - mat = np.array([127, 39, 93, 87, 46]) - - def integer_arrays(self): - for dtype in self.int_types: - yield self.mat.astype(dtype) - - def test_nanmin(self): - tgt = np.min(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmin(mat), tgt) - - def test_nanmax(self): - tgt = np.max(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmax(mat), tgt) - - def test_nanargmin(self): - tgt = np.argmin(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanargmin(mat), tgt) - - def test_nanargmax(self): - tgt = np.argmax(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanargmax(mat), tgt) - - def test_nansum(self): - tgt = np.sum(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nansum(mat), tgt) - - def test_nanprod(self): - tgt = np.prod(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanprod(mat), tgt) - - def test_nancumsum(self): - tgt = np.cumsum(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nancumsum(mat), tgt) - - def test_nancumprod(self): - tgt = np.cumprod(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nancumprod(mat), tgt) - - def test_nanmean(self): - tgt = np.mean(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmean(mat), tgt) - - def test_nanvar(self): - tgt = np.var(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanvar(mat), tgt) - - tgt = np.var(mat, ddof=1) - for mat in self.integer_arrays(): - assert_equal(np.nanvar(mat, ddof=1), tgt) - - def test_nanstd(self): - tgt = np.std(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanstd(mat), tgt) - - tgt = np.std(self.mat, ddof=1) - for mat in self.integer_arrays(): - assert_equal(np.nanstd(mat, ddof=1), tgt) - - -class SharedNanFunctionsTestsMixin(object): - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - for f in self.nanfuncs: - f(ndat) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for axis in [None, 0, 1]: - tgt = rf(mat, axis=axis, keepdims=True) - res = nf(mat, axis=axis, keepdims=True) - assert_(res.ndim == tgt.ndim) - - def test_out(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - resout = np.zeros(3) - tgt = rf(mat, axis=1) - res = nf(mat, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_dtype_from_dtype(self): - mat = np.eye(3) - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - with suppress_warnings() as sup: - if nf in {np.nanstd, np.nanvar} and c in 'FDG': - # Giving the warning is a small bug, see gh-8000 - sup.filter(np.ComplexWarning) - tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type - res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type - res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type - assert_(res is tgt) - - def test_dtype_from_char(self): - mat = np.eye(3) - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - with suppress_warnings() as sup: - if nf in {np.nanstd, np.nanvar} and c in 'FDG': - # Giving the warning is a small bug, see gh-8000 - sup.filter(np.ComplexWarning) - tgt = rf(mat, dtype=c, axis=1).dtype.type - res = nf(mat, dtype=c, axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = rf(mat, dtype=c, axis=None).dtype.type - res = nf(mat, dtype=c, axis=None).dtype.type - assert_(res is tgt) - - def test_dtype_from_input(self): - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - mat = np.eye(3, dtype=c) - tgt = rf(mat, axis=1).dtype.type - res = nf(mat, axis=1).dtype.type - assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) - # scalar case - tgt = rf(mat, axis=None).dtype.type - res = nf(mat, axis=None).dtype.type - assert_(res is tgt) - - def test_result_values(self): - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - tgt = [rf(d) for d in _rdat] - res = nf(_ndat, axis=1) - assert_almost_equal(res, tgt) - - def test_scalar(self): - for f in self.nanfuncs: - assert_(f(0.) == 0.) - - def test_subclass(self): - class MyNDArray(np.ndarray): - pass - - # Check that it works and that type and - # shape are preserved - array = np.eye(3) - mine = array.view(MyNDArray) - for f in self.nanfuncs: - expected_shape = f(array, axis=0).shape - res = f(mine, axis=0) - assert_(isinstance(res, MyNDArray)) - assert_(res.shape == expected_shape) - expected_shape = f(array, axis=1).shape - res = f(mine, axis=1) - assert_(isinstance(res, MyNDArray)) - assert_(res.shape == expected_shape) - expected_shape = f(array).shape - res = f(mine) - assert_(isinstance(res, MyNDArray)) - assert_(res.shape == expected_shape) - - -class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): - - nanfuncs = [np.nansum, np.nanprod] - stdfuncs = [np.sum, np.prod] - - def test_allnans(self): - # Check for FutureWarning - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = np.nansum([np.nan]*3, axis=None) - assert_(res == 0, 'result is not 0') - assert_(len(w) == 0, 'warning raised') - # Check scalar - res = np.nansum(np.nan) - assert_(res == 0, 'result is not 0') - assert_(len(w) == 0, 'warning raised') - # Check there is no warning for not all-nan - np.nansum([0]*3, axis=None) - assert_(len(w) == 0, 'unwanted warning raised') - - def test_empty(self): - for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): - mat = np.zeros((0, 3)) - tgt = [tgt_value]*3 - res = f(mat, axis=0) - assert_equal(res, tgt) - tgt = [] - res = f(mat, axis=1) - assert_equal(res, tgt) - tgt = tgt_value - res = f(mat, axis=None) - assert_equal(res, tgt) - - -class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): - - nanfuncs = [np.nancumsum, np.nancumprod] - stdfuncs = [np.cumsum, np.cumprod] - - def test_allnans(self): - for f, tgt_value in zip(self.nanfuncs, [0, 1]): - # Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input - with assert_no_warnings(): - res = f([np.nan]*3, axis=None) - tgt = tgt_value*np.ones((3)) - assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value)) - # Check scalar - res = f(np.nan) - tgt = tgt_value*np.ones((1)) - assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value)) - # Check there is no warning for not all-nan - f([0]*3, axis=None) - - def test_empty(self): - for f, tgt_value in zip(self.nanfuncs, [0, 1]): - mat = np.zeros((0, 3)) - tgt = tgt_value*np.ones((0, 3)) - res = f(mat, axis=0) - assert_equal(res, tgt) - tgt = mat - res = f(mat, axis=1) - assert_equal(res, tgt) - tgt = np.zeros((0)) - res = f(mat, axis=None) - assert_equal(res, tgt) - - def test_keepdims(self): - for f, g in zip(self.nanfuncs, self.stdfuncs): - mat = np.eye(3) - for axis in [None, 0, 1]: - tgt = f(mat, axis=axis, out=None) - res = g(mat, axis=axis, out=None) - assert_(res.ndim == tgt.ndim) - - for f in self.nanfuncs: - d = np.ones((3, 5, 7, 11)) - # Randomly set some elements to NaN: - rs = np.random.RandomState(0) - d[rs.rand(*d.shape) < 0.5] = np.nan - res = f(d, axis=None) - assert_equal(res.shape, (1155,)) - for axis in np.arange(4): - res = f(d, axis=axis) - assert_equal(res.shape, (3, 5, 7, 11)) - - def test_result_values(self): - for axis in (-2, -1, 0, 1, None): - tgt = np.cumprod(_ndat_ones, axis=axis) - res = np.nancumprod(_ndat, axis=axis) - assert_almost_equal(res, tgt) - tgt = np.cumsum(_ndat_zeros,axis=axis) - res = np.nancumsum(_ndat, axis=axis) - assert_almost_equal(res, tgt) - - def test_out(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - resout = np.eye(3) - for axis in (-2, -1, 0, 1): - tgt = rf(mat, axis=axis) - res = nf(mat, axis=axis, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - -class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): - - nanfuncs = [np.nanmean, np.nanvar, np.nanstd] - stdfuncs = [np.mean, np.var, np.std] - - def test_dtype_error(self): - for f in self.nanfuncs: - for dtype in [np.bool_, np.int_, np.object_]: - assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) - - def test_out_dtype_error(self): - for f in self.nanfuncs: - for dtype in [np.bool_, np.int_, np.object_]: - out = np.empty(_ndat.shape[0], dtype=dtype) - assert_raises(TypeError, f, _ndat, axis=1, out=out) - - def test_ddof(self): - nanfuncs = [np.nanvar, np.nanstd] - stdfuncs = [np.var, np.std] - for nf, rf in zip(nanfuncs, stdfuncs): - for ddof in [0, 1]: - tgt = [rf(d, ddof=ddof) for d in _rdat] - res = nf(_ndat, axis=1, ddof=ddof) - assert_almost_equal(res, tgt) - - def test_ddof_too_big(self): - nanfuncs = [np.nanvar, np.nanstd] - stdfuncs = [np.var, np.std] - dsize = [len(d) for d in _rdat] - for nf, rf in zip(nanfuncs, stdfuncs): - for ddof in range(5): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - sup.filter(np.ComplexWarning) - tgt = [ddof >= d for d in dsize] - res = nf(_ndat, axis=1, ddof=ddof) - assert_equal(np.isnan(res), tgt) - if any(tgt): - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 0) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalar - assert_(np.isnan(f(np.nan))) - assert_(len(w) == 2) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - mat = np.zeros((0, 3)) - for f in self.nanfuncs: - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(f(mat, axis=axis), np.zeros([])) - assert_(len(w) == 0) - - -class TestNanFunctions_Median(object): - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - np.nanmedian(ndat) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for axis in [None, 0, 1]: - tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) - res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) - assert_(res.ndim == tgt.ndim) - - d = np.ones((3, 5, 7, 11)) - # Randomly set some elements to NaN: - w = np.random.random((4, 200)) * np.array(d.shape)[:, None] - w = w.astype(np.intp) - d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) - res = np.nanmedian(d, axis=None, keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanmedian(d, axis=(0, 1), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 11)) - res = np.nanmedian(d, axis=(0, 3), keepdims=True) - assert_equal(res.shape, (1, 5, 7, 1)) - res = np.nanmedian(d, axis=(1,), keepdims=True) - assert_equal(res.shape, (3, 1, 7, 11)) - res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 1)) - - def test_out(self): - mat = np.random.rand(3, 3) - nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) - resout = np.zeros(3) - tgt = np.median(mat, axis=1) - res = np.nanmedian(nan_mat, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - # 0-d output: - resout = np.zeros(()) - tgt = np.median(mat, axis=None) - res = np.nanmedian(nan_mat, axis=None, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_small_large(self): - # test the small and large code paths, current cutoff 400 elements - for s in [5, 20, 51, 200, 1000]: - d = np.random.randn(4, s) - # Randomly set some elements to NaN: - w = np.random.randint(0, d.size, size=d.size // 5) - d.ravel()[w] = np.nan - d[:,0] = 1. # ensure at least one good value - # use normal median without nans to compare - tgt = [] - for x in d: - nonan = np.compress(~np.isnan(x), x) - tgt.append(np.median(nonan, overwrite_input=True)) - - assert_array_equal(np.nanmedian(d, axis=-1), tgt) - - def test_result_values(self): - tgt = [np.median(d) for d in _rdat] - res = np.nanmedian(_ndat, axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for axis in [None, 0, 1]: - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - - assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) - if axis is None: - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 3) - # Check scalar - assert_(np.isnan(np.nanmedian(np.nan))) - if axis is None: - assert_(len(sup.log) == 2) - else: - assert_(len(sup.log) == 4) - - def test_empty(self): - mat = np.zeros((0, 3)) - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) - assert_(len(w) == 0) - - def test_scalar(self): - assert_(np.nanmedian(0.) == 0.) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(np.AxisError, np.nanmedian, d, axis=-5) - assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5)) - assert_raises(np.AxisError, np.nanmedian, d, axis=4) - assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4)) - assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) - - def test_float_special(self): - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) - for inf in [np.inf, -np.inf]: - a = np.array([[inf, np.nan], [np.nan, np.nan]]) - assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) - assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) - assert_equal(np.nanmedian(a), inf) - - # minimum fill value check - a = np.array([[np.nan, np.nan, inf], - [np.nan, np.nan, inf]]) - assert_equal(np.nanmedian(a), inf) - assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) - assert_equal(np.nanmedian(a, axis=1), inf) - - # no mask path - a = np.array([[inf, inf], [inf, inf]]) - assert_equal(np.nanmedian(a, axis=1), inf) - - a = np.array([[inf, 7, -inf, -9], - [-10, np.nan, np.nan, 5], - [4, np.nan, np.nan, inf]], - dtype=np.float32) - if inf > 0: - assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) - assert_equal(np.nanmedian(a), 4.5) - else: - assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) - assert_equal(np.nanmedian(a), -2.5) - assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) - - for i in range(0, 10): - for j in range(1, 10): - a = np.array([([np.nan] * i) + ([inf] * j)] * 2) - assert_equal(np.nanmedian(a), inf) - assert_equal(np.nanmedian(a, axis=1), inf) - assert_equal(np.nanmedian(a, axis=0), - ([np.nan] * i) + [inf] * j) - - a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) - assert_equal(np.nanmedian(a), -inf) - assert_equal(np.nanmedian(a, axis=1), -inf) - assert_equal(np.nanmedian(a, axis=0), - ([np.nan] * i) + [-inf] * j) - - -class TestNanFunctions_Percentile(object): - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - np.nanpercentile(ndat, 30) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for axis in [None, 0, 1]: - tgt = np.percentile(mat, 70, axis=axis, out=None, - overwrite_input=False) - res = np.nanpercentile(mat, 70, axis=axis, out=None, - overwrite_input=False) - assert_(res.ndim == tgt.ndim) - - d = np.ones((3, 5, 7, 11)) - # Randomly set some elements to NaN: - w = np.random.random((4, 200)) * np.array(d.shape)[:, None] - w = w.astype(np.intp) - d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) - res = np.nanpercentile(d, 90, axis=None, keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 11)) - res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) - assert_equal(res.shape, (1, 5, 7, 1)) - res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) - assert_equal(res.shape, (3, 1, 7, 11)) - res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 1)) - - def test_out(self): - mat = np.random.rand(3, 3) - nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) - resout = np.zeros(3) - tgt = np.percentile(mat, 42, axis=1) - res = np.nanpercentile(nan_mat, 42, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - # 0-d output: - resout = np.zeros(()) - tgt = np.percentile(mat, 42, axis=None) - res = np.nanpercentile(nan_mat, 42, axis=None, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_result_values(self): - tgt = [np.percentile(d, 28) for d in _rdat] - res = np.nanpercentile(_ndat, 28, axis=1) - assert_almost_equal(res, tgt) - # Transpose the array to fit the output convention of numpy.percentile - tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat]) - res = np.nanpercentile(_ndat, (28, 98), axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all()) - if axis is None: - assert_(len(w) == 1) - else: - assert_(len(w) == 3) - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalar - assert_(np.isnan(np.nanpercentile(np.nan, 60))) - if axis is None: - assert_(len(w) == 2) - else: - assert_(len(w) == 4) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - mat = np.zeros((0, 3)) - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) - assert_(len(w) == 0) - - def test_scalar(self): - assert_equal(np.nanpercentile(0., 100), 0.) - a = np.arange(6) - r = np.nanpercentile(a, 50, axis=0) - assert_equal(r, 2.5) - assert_(np.isscalar(r)) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5) - assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) - assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4) - assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) - assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) - - def test_multiple_percentiles(self): - perc = [50, 100] - mat = np.ones((4, 3)) - nan_mat = np.nan * mat - # For checking consistency in higher dimensional case - large_mat = np.ones((3, 4, 5)) - large_mat[:, 0:2:4, :] = 0 - large_mat[:, :, 3:] *= 2 - for axis in [None, 0, 1]: - for keepdim in [False, True]: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "All-NaN slice encountered") - val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) - nan_val = np.nanpercentile(nan_mat, perc, axis=axis, - keepdims=keepdim) - assert_equal(nan_val.shape, val.shape) - - val = np.percentile(large_mat, perc, axis=axis, - keepdims=keepdim) - nan_val = np.nanpercentile(large_mat, perc, axis=axis, - keepdims=keepdim) - assert_equal(nan_val, val) - - megamat = np.ones((3, 4, 5, 6)) - assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)) - - -class TestNanFunctions_Quantile(object): - # most of this is already tested by TestPercentile - - def test_regression(self): - ar = np.arange(24).reshape(2, 3, 4).astype(float) - ar[0][1] = np.nan - - assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50)) - assert_equal(np.nanquantile(ar, q=0.5, axis=0), - np.nanpercentile(ar, q=50, axis=0)) - assert_equal(np.nanquantile(ar, q=0.5, axis=1), - np.nanpercentile(ar, q=50, axis=1)) - assert_equal(np.nanquantile(ar, q=[0.5], axis=1), - np.nanpercentile(ar, q=[50], axis=1)) - assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1), - np.nanpercentile(ar, q=[25, 50, 75], axis=1)) - - def test_basic(self): - x = np.arange(8) * 0.5 - assert_equal(np.nanquantile(x, 0), 0.) - assert_equal(np.nanquantile(x, 1), 3.5) - assert_equal(np.nanquantile(x, 0.5), 1.75) - - def test_no_p_overwrite(self): - # this is worth retesting, because quantile does not make a copy - p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) - p = p0.copy() - np.nanquantile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, p0) - - p0 = p0.tolist() - p = p.tolist() - np.nanquantile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, p0) - -@pytest.mark.parametrize("arr, expected", [ - # array of floats with some nans - (np.array([np.nan, 5.0, np.nan, np.inf]), - np.array([False, True, False, True])), - # int64 array that can't possibly have nans - (np.array([1, 5, 7, 9], dtype=np.int64), - True), - # bool array that can't possibly have nans - (np.array([False, True, False, True]), - True), - # 2-D complex array with nans - (np.array([[np.nan, 5.0], - [np.nan, np.inf]], dtype=np.complex64), - np.array([[False, True], - [False, True]])), - ]) -def test__nan_mask(arr, expected): - for out in [None, np.empty(arr.shape, dtype=np.bool_)]: - actual = _nan_mask(arr, out=out) - assert_equal(actual, expected) - # the above won't distinguish between True proper - # and an array of True values; we want True proper - # for types that can't possibly contain NaN - if type(expected) is not np.ndarray: - assert actual is True - - -def test__replace_nan(): - """ Test that _replace_nan returns the original array if there are no - NaNs, not a copy. - """ - for dtype in [np.bool, np.int32, np.int64]: - arr = np.array([0, 1], dtype=dtype) - result, mask = _replace_nan(arr, 0) - assert mask is None - # do not make a copy if there are no nans - assert result is arr - - for dtype in [np.float32, np.float64]: - arr = np.array([0, 1], dtype=dtype) - result, mask = _replace_nan(arr, 2) - assert (mask == False).all() - # mask is not None, so we make a copy - assert result is not arr - assert_equal(result, arr) - - arr_nan = np.array([0, 1, np.nan], dtype=dtype) - result_nan, mask_nan = _replace_nan(arr_nan, 2) - assert_equal(mask_nan, np.array([False, False, True])) - assert result_nan is not arr_nan - assert_equal(result_nan, np.array([0, 1, 2])) - assert np.isnan(arr_nan[-1]) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_packbits.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_packbits.py deleted file mode 100644 index 95a465c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_packbits.py +++ /dev/null @@ -1,378 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_array_equal, assert_equal, assert_raises -import pytest -from itertools import chain - -def test_packbits(): - # Copied from the docstring. - a = [[[1, 0, 1], [0, 1, 0]], - [[1, 1, 0], [0, 0, 1]]] - for dt in '?bBhHiIlLqQ': - arr = np.array(a, dtype=dt) - b = np.packbits(arr, axis=-1) - assert_equal(b.dtype, np.uint8) - assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) - - assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) - - -def test_packbits_empty(): - shapes = [ - (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), - (0, 0, 20), (0, 0, 0), - ] - for dt in '?bBhHiIlLqQ': - for shape in shapes: - a = np.empty(shape, dtype=dt) - b = np.packbits(a) - assert_equal(b.dtype, np.uint8) - assert_equal(b.shape, (0,)) - - -def test_packbits_empty_with_axis(): - # Original shapes and lists of packed shapes for different axes. - shapes = [ - ((0,), [(0,)]), - ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), - ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), - ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), - ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), - ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), - ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), - ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), - ] - for dt in '?bBhHiIlLqQ': - for in_shape, out_shapes in shapes: - for ax, out_shape in enumerate(out_shapes): - a = np.empty(in_shape, dtype=dt) - b = np.packbits(a, axis=ax) - assert_equal(b.dtype, np.uint8) - assert_equal(b.shape, out_shape) - -@pytest.mark.parametrize('bitorder', ('little', 'big')) -def test_packbits_large(bitorder): - # test data large enough for 16 byte vectorization - a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, - 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, - 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, - 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, - 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, - 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, - 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, - 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, - 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, - 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, - 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, - 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, - 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, - 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, - 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) - a = a.repeat(3) - for dtype in '?bBhHiIlLqQ': - arr = np.array(a, dtype=dtype) - b = np.packbits(arr, axis=None, bitorder=bitorder) - assert_equal(b.dtype, np.uint8) - r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, - 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, - 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, - 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, - 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, - 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, - 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, - 129, 248, 227, 129, 199, 31, 128] - if bitorder == 'big': - assert_array_equal(b, r) - # equal for size being multiple of 8 - assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a) - - # check last byte of different remainders (16 byte vectorization) - b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] - assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, - 198, 196, 192]) - - - arr = arr.reshape(36, 25) - b = np.packbits(arr, axis=0) - assert_equal(b.dtype, np.uint8) - assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, - 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, - 107, 75, 74, 88], - [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, - 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, - 41, 104, 122, 90, 18], - [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, - 150, 150, 146, 210, 210, 246, 255, 255, 223, - 151, 21, 17, 17, 131, 163], - [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, - 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, - 202, 234, 170, 168], - [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, - 240, 208, 144, 128, 160, 224, 240, 208, 144, - 144, 176, 240, 224, 192, 128]]) - - b = np.packbits(arr, axis=1) - assert_equal(b.dtype, np.uint8) - assert_array_equal(b, [[252, 127, 192, 0], - [ 7, 252, 15, 128], - [240, 0, 28, 0], - [255, 128, 0, 128], - [192, 31, 255, 128], - [142, 63, 0, 0], - [255, 240, 7, 0], - [ 7, 224, 14, 0], - [126, 0, 224, 0], - [255, 255, 199, 0], - [ 56, 28, 126, 0], - [113, 248, 227, 128], - [227, 142, 63, 0], - [ 0, 28, 112, 0], - [ 15, 248, 3, 128], - [ 28, 126, 56, 0], - [ 56, 255, 241, 128], - [240, 7, 224, 0], - [227, 129, 192, 128], - [255, 255, 254, 0], - [126, 0, 224, 0], - [ 3, 241, 248, 0], - [ 0, 255, 241, 128], - [128, 0, 255, 128], - [224, 1, 255, 128], - [248, 252, 126, 0], - [ 0, 7, 3, 128], - [224, 113, 248, 0], - [ 0, 252, 127, 128], - [142, 63, 224, 0], - [224, 14, 63, 0], - [ 7, 3, 128, 0], - [113, 255, 255, 128], - [ 28, 113, 199, 0], - [ 7, 227, 142, 0], - [ 14, 56, 252, 0]]) - - arr = arr.T.copy() - b = np.packbits(arr, axis=0) - assert_equal(b.dtype, np.uint8) - assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, - 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, - 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, - 7, 113, 28, 7, 14], - [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, - 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, - 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, - 3, 255, 113, 227, 56], - [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, - 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, - 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, - 128, 255, 199, 142, 252], - [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, - 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, - 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) - - b = np.packbits(arr, axis=1) - assert_equal(b.dtype, np.uint8) - assert_array_equal(b, [[190, 72, 113, 214, 0], - [186, 216, 120, 210, 128], - [178, 248, 248, 210, 128], - [178, 241, 216, 64, 192], - [150, 227, 152, 68, 80], - [215, 195, 24, 5, 112], - [ 87, 202, 60, 5, 48], - [ 83, 90, 52, 1, 160], - [ 83, 90, 182, 72, 160], - [195, 83, 150, 88, 224], - [199, 83, 150, 92, 240], - [206, 119, 150, 92, 208], - [204, 127, 146, 78, 144], - [204, 109, 210, 110, 128], - [140, 73, 210, 39, 160], - [140, 64, 246, 181, 224], - [136, 208, 255, 149, 240], - [136, 244, 255, 220, 208], - [ 8, 189, 223, 222, 144], - [ 40, 45, 151, 218, 144], - [105, 41, 21, 218, 176], - [107, 104, 17, 202, 240], - [ 75, 122, 17, 234, 224], - [ 74, 90, 131, 170, 192], - [ 88, 18, 163, 168, 128]]) - - - # result is the same if input is multiplied with a nonzero value - for dtype in 'bBhHiIlLqQ': - arr = np.array(a, dtype=dtype) - rnd = np.random.randint(low=np.iinfo(dtype).min, - high=np.iinfo(dtype).max, size=arr.size, - dtype=dtype) - rnd[rnd == 0] = 1 - arr *= rnd.astype(dtype) - b = np.packbits(arr, axis=-1) - assert_array_equal(np.unpackbits(b)[:-4], a) - - assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) - - -def test_packbits_very_large(): - # test some with a larger arrays gh-8637 - # code is covered earlier but larger array makes crash on bug more likely - for s in range(950, 1050): - for dt in '?bBhHiIlLqQ': - x = np.ones((200, s), dtype=bool) - np.packbits(x, axis=1) - - -def test_unpackbits(): - # Copied from the docstring. - a = np.array([[2], [7], [23]], dtype=np.uint8) - b = np.unpackbits(a, axis=1) - assert_equal(b.dtype, np.uint8) - assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 0, 1, 1, 1]])) - -def test_pack_unpack_order(): - a = np.array([[2], [7], [23]], dtype=np.uint8) - b = np.unpackbits(a, axis=1) - assert_equal(b.dtype, np.uint8) - b_little = np.unpackbits(a, axis=1, bitorder='little') - b_big = np.unpackbits(a, axis=1, bitorder='big') - assert_array_equal(b, b_big) - assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) - assert_array_equal(b[:,::-1], b_little) - assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) - assert_raises(ValueError, np.unpackbits, a, bitorder='r') - assert_raises(TypeError, np.unpackbits, a, bitorder=10) - - - -def test_unpackbits_empty(): - a = np.empty((0,), dtype=np.uint8) - b = np.unpackbits(a) - assert_equal(b.dtype, np.uint8) - assert_array_equal(b, np.empty((0,))) - - -def test_unpackbits_empty_with_axis(): - # Lists of packed shapes for different axes and unpacked shapes. - shapes = [ - ([(0,)], (0,)), - ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), - ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), - ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), - ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), - ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), - ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), - ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), - ] - for in_shapes, out_shape in shapes: - for ax, in_shape in enumerate(in_shapes): - a = np.empty(in_shape, dtype=np.uint8) - b = np.unpackbits(a, axis=ax) - assert_equal(b.dtype, np.uint8) - assert_equal(b.shape, out_shape) - - -def test_unpackbits_large(): - # test all possible numbers via comparison to already tested packbits - d = np.arange(277, dtype=np.uint8) - assert_array_equal(np.packbits(np.unpackbits(d)), d) - assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) - d = np.tile(d, (3, 1)) - assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) - d = d.T.copy() - assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) - - -class TestCount(): - x = np.array([ - [1, 0, 1, 0, 0, 1, 0], - [0, 1, 1, 1, 0, 0, 0], - [0, 0, 1, 0, 0, 1, 1], - [1, 1, 0, 0, 0, 1, 1], - [1, 0, 1, 0, 1, 0, 1], - [0, 0, 1, 1, 1, 0, 0], - [0, 1, 0, 1, 0, 1, 0], - ], dtype=np.uint8) - padded1 = np.zeros(57, dtype=np.uint8) - padded1[:49] = x.ravel() - padded1b = np.zeros(57, dtype=np.uint8) - padded1b[:49] = x[::-1].copy().ravel() - padded2 = np.zeros((9, 9), dtype=np.uint8) - padded2[:7, :7] = x - - @pytest.mark.parametrize('bitorder', ('little', 'big')) - @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1))) - def test_roundtrip(self, bitorder, count): - if count < 0: - # one extra zero of padding - cutoff = count - 1 - else: - cutoff = count - # test complete invertibility of packbits and unpackbits with count - packed = np.packbits(self.x, bitorder=bitorder) - unpacked = np.unpackbits(packed, count=count, bitorder=bitorder) - assert_equal(unpacked.dtype, np.uint8) - assert_array_equal(unpacked, self.padded1[:cutoff]) - - @pytest.mark.parametrize('kwargs', [ - {}, {'count': None}, - ]) - def test_count(self, kwargs): - packed = np.packbits(self.x) - unpacked = np.unpackbits(packed, **kwargs) - assert_equal(unpacked.dtype, np.uint8) - assert_array_equal(unpacked, self.padded1[:-1]) - - @pytest.mark.parametrize('bitorder', ('little', 'big')) - # delta==-1 when count<0 because one extra zero of padding - @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1))) - def test_roundtrip_axis(self, bitorder, count): - if count < 0: - # one extra zero of padding - cutoff = count - 1 - else: - cutoff = count - packed0 = np.packbits(self.x, axis=0, bitorder=bitorder) - unpacked0 = np.unpackbits(packed0, axis=0, count=count, - bitorder=bitorder) - assert_equal(unpacked0.dtype, np.uint8) - assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]]) - - packed1 = np.packbits(self.x, axis=1, bitorder=bitorder) - unpacked1 = np.unpackbits(packed1, axis=1, count=count, - bitorder=bitorder) - assert_equal(unpacked1.dtype, np.uint8) - assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff]) - - @pytest.mark.parametrize('kwargs', [ - {}, {'count': None}, - {'bitorder' : 'little'}, - {'bitorder': 'little', 'count': None}, - {'bitorder' : 'big'}, - {'bitorder': 'big', 'count': None}, - ]) - def test_axis_count(self, kwargs): - packed0 = np.packbits(self.x, axis=0) - unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) - assert_equal(unpacked0.dtype, np.uint8) - if kwargs.get('bitorder', 'big') == 'big': - assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]]) - else: - assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]]) - - packed1 = np.packbits(self.x, axis=1) - unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) - assert_equal(unpacked1.dtype, np.uint8) - if kwargs.get('bitorder', 'big') == 'big': - assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1]) - else: - assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1]) - - def test_bad_count(self): - packed0 = np.packbits(self.x, axis=0) - assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) - packed1 = np.packbits(self.x, axis=1) - assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) - packed = np.packbits(self.x) - assert_raises(ValueError, np.unpackbits, packed, count=-57) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_polynomial.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_polynomial.py deleted file mode 100644 index 89759bd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_polynomial.py +++ /dev/null @@ -1,261 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose - ) - - -class TestPolynomial(object): - def test_poly1d_str_and_repr(self): - p = np.poly1d([1., 2, 3]) - assert_equal(repr(p), 'poly1d([1., 2., 3.])') - assert_equal(str(p), - ' 2\n' - '1 x + 2 x + 3') - - q = np.poly1d([3., 2, 1]) - assert_equal(repr(q), 'poly1d([3., 2., 1.])') - assert_equal(str(q), - ' 2\n' - '3 x + 2 x + 1') - - r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) - assert_equal(str(r), - ' 3 2\n' - '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') - - assert_equal(str(np.poly1d([-3, -2, -1])), - ' 2\n' - '-3 x - 2 x - 1') - - def test_poly1d_resolution(self): - p = np.poly1d([1., 2, 3]) - q = np.poly1d([3., 2, 1]) - assert_equal(p(0), 3.0) - assert_equal(p(5), 38.0) - assert_equal(q(0), 1.0) - assert_equal(q(5), 86.0) - - def test_poly1d_math(self): - # here we use some simple coeffs to make calculations easier - p = np.poly1d([1., 2, 4]) - q = np.poly1d([4., 2, 1]) - assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) - assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.])) - assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.])) - - p = np.poly1d([1., 2, 3]) - q = np.poly1d([3., 2, 1]) - assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) - assert_equal(p + q, np.poly1d([4., 4., 4.])) - assert_equal(p - q, np.poly1d([-2., 0., 2.])) - assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) - assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) - assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) - assert_equal(p.deriv(), np.poly1d([2., 2.])) - assert_equal(p.deriv(2), np.poly1d([2.])) - assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), - (np.poly1d([1., -1.]), np.poly1d([0.]))) - - def test_poly1d_misc(self): - p = np.poly1d([1., 2, 3]) - assert_equal(np.asarray(p), np.array([1., 2., 3.])) - assert_equal(len(p), 2) - assert_equal((p[0], p[1], p[2], p[3]), (3.0, 2.0, 1.0, 0)) - - def test_poly1d_variable_arg(self): - q = np.poly1d([1., 2, 3], variable='y') - assert_equal(str(q), - ' 2\n' - '1 y + 2 y + 3') - q = np.poly1d([1., 2, 3], variable='lambda') - assert_equal(str(q), - ' 2\n' - '1 lambda + 2 lambda + 3') - - def test_poly(self): - assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), - [1, -3, -2, 6]) - - # From matlab docs - A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] - assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) - - # Should produce real output for perfect conjugates - assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) - assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, - 1-2j, 1.+3.5j, 1-3.5j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) - assert_(np.isrealobj(np.poly([1j, -1j]))) - assert_(np.isrealobj(np.poly([1, -1]))) - - assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) - - np.random.seed(42) - a = np.random.randn(100) + 1j*np.random.randn(100) - assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) - - def test_roots(self): - assert_array_equal(np.roots([1, 0, 0]), [0, 0]) - - def test_str_leading_zeros(self): - p = np.poly1d([4, 3, 2, 1]) - p[3] = 0 - assert_equal(str(p), - " 2\n" - "3 x + 2 x + 1") - - p = np.poly1d([1, 2]) - p[0] = 0 - p[1] = 0 - assert_equal(str(p), " \n0") - - def test_polyfit(self): - c = np.array([3., 2., 1.]) - x = np.linspace(0, 2, 7) - y = np.polyval(c, x) - err = [1, -1, 1, -1, 1, -1, 1] - weights = np.arange(8, 1, -1)**2/7.0 - - # Check exception when too few points for variance estimate. Note that - # the estimate requires the number of data points to exceed - # degree + 1 - assert_raises(ValueError, np.polyfit, - [1], [1], deg=0, cov=True) - - # check 1D case - m, cov = np.polyfit(x, y+err, 2, cov=True) - est = [3.8571, 0.2857, 1.619] - assert_almost_equal(est, m, decimal=4) - val0 = [[ 1.4694, -2.9388, 0.8163], - [-2.9388, 6.3673, -2.1224], - [ 0.8163, -2.1224, 1.161 ]] - assert_almost_equal(val0, cov, decimal=4) - - m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) - assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) - val = [[ 4.3964, -5.0052, 0.4878], - [-5.0052, 6.8067, -0.9089], - [ 0.4878, -0.9089, 0.3337]] - assert_almost_equal(val, cov2, decimal=4) - - m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled") - assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) - val = [[ 0.1473, -0.1677, 0.0163], - [-0.1677, 0.228 , -0.0304], - [ 0.0163, -0.0304, 0.0112]] - assert_almost_equal(val, cov3, decimal=4) - - # check 2D (n,1) case - y = y[:, np.newaxis] - c = c[:, np.newaxis] - assert_almost_equal(c, np.polyfit(x, y, 2)) - # check 2D (n,2) case - yy = np.concatenate((y, y), axis=1) - cc = np.concatenate((c, c), axis=1) - assert_almost_equal(cc, np.polyfit(x, yy, 2)) - - m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) - assert_almost_equal(est, m[:, 0], decimal=4) - assert_almost_equal(est, m[:, 1], decimal=4) - assert_almost_equal(val0, cov[:, :, 0], decimal=4) - assert_almost_equal(val0, cov[:, :, 1], decimal=4) - - # check order 1 (deg=0) case, were the analytic results are simple - np.random.seed(123) - y = np.random.normal(size=(4, 10000)) - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) - # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) - # Without scaling, since reduced chi2 is 1, the result should be the same. - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), - deg=0, cov="unscaled") - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_almost_equal(np.sqrt(cov.mean()), 0.5) - # If we estimate our errors wrong, no change with scaling: - w = np.full(y.shape[0], 1./0.5) - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) - # But if we do not scale, our estimate for the error in the mean will - # differ. - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_almost_equal(np.sqrt(cov.mean()), 0.25) - - def test_objects(self): - from decimal import Decimal - p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) - p2 = p * Decimal('1.333333333333333') - assert_(p2[1] == Decimal("3.9999999999999990")) - p2 = p.deriv() - assert_(p2[1] == Decimal('8.0')) - p2 = p.integ() - assert_(p2[3] == Decimal("1.333333333333333333333333333")) - assert_(p2[2] == Decimal('1.5')) - assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) - p = np.poly([Decimal(1), Decimal(2)]) - assert_equal(np.poly([Decimal(1), Decimal(2)]), - [1, Decimal(-3), Decimal(2)]) - - def test_complex(self): - p = np.poly1d([3j, 2j, 1j]) - p2 = p.integ() - assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) - p2 = p.deriv() - assert_((p2.coeffs == [6j, 2j]).all()) - - def test_integ_coeffs(self): - p = np.poly1d([3, 2, 1]) - p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) - - def test_zero_dims(self): - try: - np.poly(np.zeros((0, 0))) - except ValueError: - pass - - def test_poly_int_overflow(self): - """ - Regression test for gh-5096. - """ - v = np.arange(1, 21) - assert_almost_equal(np.poly(v), np.poly(np.diag(v))) - - def test_poly_eq(self): - p = np.poly1d([1, 2, 3]) - p2 = np.poly1d([1, 2, 4]) - assert_equal(p == None, False) - assert_equal(p != None, True) - assert_equal(p == p, True) - assert_equal(p == p2, False) - assert_equal(p != p2, True) - - def test_polydiv(self): - b = np.poly1d([2, 6, 6, 1]) - a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) - q, r = np.polydiv(b, a) - assert_equal(q.coeffs.dtype, np.complex128) - assert_equal(r.coeffs.dtype, np.complex128) - assert_equal(q*a + r, b) - - def test_poly_coeffs_mutable(self): - """ Coefficients should be modifiable """ - p = np.poly1d([1, 2, 3]) - - p.coeffs += 1 - assert_equal(p.coeffs, [2, 3, 4]) - - p.coeffs[2] += 10 - assert_equal(p.coeffs, [2, 3, 14]) - - # this never used to be allowed - let's not add features to deprecated - # APIs - assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_recfunctions.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_recfunctions.py deleted file mode 100644 index fa5f4de..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_recfunctions.py +++ /dev/null @@ -1,982 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -import numpy as np -import numpy.ma as ma -from numpy.ma.mrecords import MaskedRecords -from numpy.ma.testutils import assert_equal -from numpy.testing import assert_, assert_raises -from numpy.lib.recfunctions import ( - drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, - find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, - repack_fields, unstructured_to_structured, structured_to_unstructured, - apply_along_fields, require_fields, assign_fields_by_name) -get_fieldspec = np.lib.recfunctions._get_fieldspec -get_names = np.lib.recfunctions.get_names -get_names_flat = np.lib.recfunctions.get_names_flat -zip_descr = np.lib.recfunctions._zip_descr -zip_dtype = np.lib.recfunctions._zip_dtype - - -class TestRecFunctions(object): - # Misc tests - - def setup(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array([('A', 1.), ('B', 2.)], - dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_zip_descr(self): - # Test zip_descr - (w, x, y, z) = self.data - - # Std array - test = zip_descr((x, x), flatten=True) - assert_equal(test, - np.dtype([('', int), ('', int)])) - test = zip_descr((x, x), flatten=False) - assert_equal(test, - np.dtype([('', int), ('', int)])) - - # Std & flexible-dtype - test = zip_descr((x, z), flatten=True) - assert_equal(test, - np.dtype([('', int), ('A', '|S3'), ('B', float)])) - test = zip_descr((x, z), flatten=False) - assert_equal(test, - np.dtype([('', int), - ('', [('A', '|S3'), ('B', float)])])) - - # Standard & nested dtype - test = zip_descr((x, w), flatten=True) - assert_equal(test, - np.dtype([('', int), - ('a', int), - ('ba', float), ('bb', int)])) - test = zip_descr((x, w), flatten=False) - assert_equal(test, - np.dtype([('', int), - ('', [('a', int), - ('b', [('ba', float), ('bb', int)])])])) - - def test_drop_fields(self): - # Test drop_fields - a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - - # A basic field - test = drop_fields(a, 'a') - control = np.array([((2, 3.0),), ((5, 6.0),)], - dtype=[('b', [('ba', float), ('bb', int)])]) - assert_equal(test, control) - - # Another basic field (but nesting two fields) - test = drop_fields(a, 'b') - control = np.array([(1,), (4,)], dtype=[('a', int)]) - assert_equal(test, control) - - # A nested sub-field - test = drop_fields(a, ['ba', ]) - control = np.array([(1, (3.0,)), (4, (6.0,))], - dtype=[('a', int), ('b', [('bb', int)])]) - assert_equal(test, control) - - # All the nested sub-field from a field: zap that field - test = drop_fields(a, ['ba', 'bb']) - control = np.array([(1,), (4,)], dtype=[('a', int)]) - assert_equal(test, control) - - # dropping all fields results in an array with no fields - test = drop_fields(a, ['a', 'b']) - control = np.array([(), ()], dtype=[]) - assert_equal(test, control) - - def test_rename_fields(self): - # Test rename fields - a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], - dtype=[('a', int), - ('b', [('ba', float), ('bb', (float, 2))])]) - test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) - newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] - control = a.view(newdtype) - assert_equal(test.dtype, newdtype) - assert_equal(test, control) - - def test_get_names(self): - # Test get_names - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_names(ndtype) - assert_equal(test, ('A', 'B')) - - ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) - test = get_names(ndtype) - assert_equal(test, ('a', ('b', ('ba', 'bb')))) - - ndtype = np.dtype([('a', int), ('b', [])]) - test = get_names(ndtype) - assert_equal(test, ('a', ('b', ()))) - - ndtype = np.dtype([]) - test = get_names(ndtype) - assert_equal(test, ()) - - def test_get_names_flat(self): - # Test get_names_flat - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_names_flat(ndtype) - assert_equal(test, ('A', 'B')) - - ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) - test = get_names_flat(ndtype) - assert_equal(test, ('a', 'b', 'ba', 'bb')) - - ndtype = np.dtype([('a', int), ('b', [])]) - test = get_names_flat(ndtype) - assert_equal(test, ('a', 'b')) - - ndtype = np.dtype([]) - test = get_names_flat(ndtype) - assert_equal(test, ()) - - def test_get_fieldstructure(self): - # Test get_fieldstructure - - # No nested fields - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_fieldstructure(ndtype) - assert_equal(test, {'A': [], 'B': []}) - - # One 1-nested field - ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - test = get_fieldstructure(ndtype) - assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) - - # One 2-nested fields - ndtype = np.dtype([('A', int), - ('B', [('BA', int), - ('BB', [('BBA', int), ('BBB', int)])])]) - test = get_fieldstructure(ndtype) - control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], - 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} - assert_equal(test, control) - - # 0 fields - ndtype = np.dtype([]) - test = get_fieldstructure(ndtype) - assert_equal(test, {}) - - def test_find_duplicates(self): - # Test find_duplicates - a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), - (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], - mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), - (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], - dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - - test = find_duplicates(a, ignoremask=False, return_index=True) - control = [0, 2] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='A', return_index=True) - control = [0, 1, 2, 3, 5] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='B', return_index=True) - control = [0, 1, 2, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='BA', return_index=True) - control = [0, 1, 2, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='BB', return_index=True) - control = [0, 1, 2, 3, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - def test_find_duplicates_ignoremask(self): - # Test the ignoremask option of find_duplicates - ndtype = [('a', int)] - a = ma.array([1, 1, 1, 2, 2, 3, 3], - mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - test = find_duplicates(a, ignoremask=True, return_index=True) - control = [0, 1, 3, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, ignoremask=False, return_index=True) - control = [0, 1, 2, 3, 4, 6] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - def test_repack_fields(self): - dt = np.dtype('u1,f4,i8', align=True) - a = np.zeros(2, dtype=dt) - - assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) - assert_equal(repack_fields(a).itemsize, 13) - assert_equal(repack_fields(repack_fields(dt), align=True), dt) - - # make sure type is preserved - dt = np.dtype((np.record, dt)) - assert_(repack_fields(dt).type is np.record) - - def test_structured_to_unstructured(self): - a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) - out = structured_to_unstructured(a) - assert_equal(out, np.zeros((4,5), dtype='f8')) - - b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], - dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) - out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) - assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ])) - out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) - assert_equal(out, np.array([ 1. , 4. , 7. , 10. ])) - - c = np.arange(20).reshape((4,5)) - out = unstructured_to_structured(c, a.dtype) - want = np.array([( 0, ( 1., 2), [ 3., 4.]), - ( 5, ( 6., 7), [ 8., 9.]), - (10, (11., 12), [13., 14.]), - (15, (16., 17), [18., 19.])], - dtype=[('a', 'i4'), - ('b', [('f0', 'f4'), ('f1', 'u2')]), - ('c', 'f4', (2,))]) - assert_equal(out, want) - - d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], - dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) - assert_equal(apply_along_fields(np.mean, d), - np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ])) - assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), - np.array([ 3. , 5.5, 9. , 11. ])) - - # check that for uniform field dtypes we get a view, not a copy: - d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], - dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) - dd = structured_to_unstructured(d) - ddd = unstructured_to_structured(dd, d.dtype) - assert_(dd.base is d) - assert_(ddd.base is d) - - # including uniform fields with subarrays unpacked - d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), - (8, [9, 10], [[11, 12], [13, 14]])], - dtype=[('x0', 'i4'), ('x1', ('i4', 2)), - ('x2', ('i4', (2, 2)))]) - dd = structured_to_unstructured(d) - ddd = unstructured_to_structured(dd, d.dtype) - assert_(dd.base is d) - assert_(ddd.base is d) - - # test that nested fields with identical names don't break anything - point = np.dtype([('x', int), ('y', int)]) - triangle = np.dtype([('a', point), ('b', point), ('c', point)]) - arr = np.zeros(10, triangle) - res = structured_to_unstructured(arr, dtype=int) - assert_equal(res, np.zeros((10, 6), dtype=int)) - - - # test nested combinations of subarrays and structured arrays, gh-13333 - def subarray(dt, shape): - return np.dtype((dt, shape)) - - def structured(*dts): - return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)]) - - def inspect(dt, dtype=None): - arr = np.zeros((), dt) - ret = structured_to_unstructured(arr, dtype=dtype) - backarr = unstructured_to_structured(ret, dt) - return ret.shape, ret.dtype, backarr.dtype - - dt = structured(subarray(structured(np.int32, np.int32), 3)) - assert_equal(inspect(dt), ((6,), np.int32, dt)) - - dt = structured(subarray(subarray(np.int32, 2), 2)) - assert_equal(inspect(dt), ((4,), np.int32, dt)) - - dt = structured(np.int32) - assert_equal(inspect(dt), ((1,), np.int32, dt)) - - dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) - assert_equal(inspect(dt), ((5,), np.int32, dt)) - - dt = structured() - assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) - - # these currently don't work, but we may make it work in the future - assert_raises(NotImplementedError, structured_to_unstructured, - np.zeros(3, dt), dtype=np.int32) - assert_raises(NotImplementedError, unstructured_to_structured, - np.zeros((3,0), dtype=np.int32)) - - def test_field_assignment_by_name(self): - a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) - newdt = [('b', 'f4'), ('c', 'u1')] - - assert_equal(require_fields(a, newdt), np.ones(2, newdt)) - - b = np.array([(1,2), (3,4)], dtype=newdt) - assign_fields_by_name(a, b, zero_unassigned=False) - assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype)) - assign_fields_by_name(a, b) - assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype)) - - # test nested fields - a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) - newdt = [('a', [('c', 'u1')])] - assert_equal(require_fields(a, newdt), np.ones(2, newdt)) - b = np.array([((2,),), ((3,),)], dtype=newdt) - assign_fields_by_name(a, b, zero_unassigned=False) - assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype)) - assign_fields_by_name(a, b) - assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype)) - - # test unstructured code path for 0d arrays - a, b = np.array(3), np.array(0) - assign_fields_by_name(b, a) - assert_equal(b[()], 3) - - -class TestRecursiveFillFields(object): - # Test recursive_fill_fields. - def test_simple_flexible(self): - # Test recursive_fill_fields on flexible-array - a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) - b = np.zeros((3,), dtype=a.dtype) - test = recursive_fill_fields(a, b) - control = np.array([(1, 10.), (2, 20.), (0, 0.)], - dtype=[('A', int), ('B', float)]) - assert_equal(test, control) - - def test_masked_flexible(self): - # Test recursive_fill_fields on masked flexible-array - a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], - dtype=[('A', int), ('B', float)]) - b = ma.zeros((3,), dtype=a.dtype) - test = recursive_fill_fields(a, b) - control = ma.array([(1, 10.), (2, 20.), (0, 0.)], - mask=[(0, 1), (1, 0), (0, 0)], - dtype=[('A', int), ('B', float)]) - assert_equal(test, control) - - -class TestMergeArrays(object): - # Test merge_arrays - - def setup(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array( - [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array( - [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) - self.data = (w, x, y, z) - - def test_solo(self): - # Test merge_arrays on a single array. - (_, x, _, z) = self.data - - test = merge_arrays(x) - control = np.array([(1,), (2,)], dtype=[('f0', int)]) - assert_equal(test, control) - test = merge_arrays((x,)) - assert_equal(test, control) - - test = merge_arrays(z, flatten=False) - assert_equal(test, z) - test = merge_arrays(z, flatten=True) - assert_equal(test, z) - - def test_solo_w_flatten(self): - # Test merge_arrays on a single array w & w/o flattening - w = self.data[0] - test = merge_arrays(w, flatten=False) - assert_equal(test, w) - - test = merge_arrays(w, flatten=True) - control = np.array([(1, 2, 3.0), (4, 5, 6.0)], - dtype=[('a', int), ('ba', float), ('bb', int)]) - assert_equal(test, control) - - def test_standard(self): - # Test standard & standard - # Test merge arrays - (_, x, y, _) = self.data - test = merge_arrays((x, y), usemask=False) - control = np.array([(1, 10), (2, 20), (-1, 30)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - - test = merge_arrays((x, y), usemask=True) - control = ma.array([(1, 10), (2, 20), (-1, 30)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - def test_flatten(self): - # Test standard & flexible - (_, x, _, z) = self.data - test = merge_arrays((x, z), flatten=True) - control = np.array([(1, 'A', 1.), (2, 'B', 2.)], - dtype=[('f0', int), ('A', '|S3'), ('B', float)]) - assert_equal(test, control) - - test = merge_arrays((x, z), flatten=False) - control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], - dtype=[('f0', int), - ('f1', [('A', '|S3'), ('B', float)])]) - assert_equal(test, control) - - def test_flatten_wflexible(self): - # Test flatten standard & nested - (w, x, _, _) = self.data - test = merge_arrays((x, w), flatten=True) - control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], - dtype=[('f0', int), - ('a', int), ('ba', float), ('bb', int)]) - assert_equal(test, control) - - test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int), ('bc', [])])])] - control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], - dtype=controldtype) - assert_equal(test, control) - - def test_wmasked_arrays(self): - # Test merge_arrays masked arrays - (_, x, _, _) = self.data - mx = ma.array([1, 2, 3], mask=[1, 0, 0]) - test = merge_arrays((x, mx), usemask=True) - control = ma.array([(1, 1), (2, 2), (-1, 3)], - mask=[(0, 1), (0, 0), (1, 0)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - test = merge_arrays((x, mx), usemask=True, asrecarray=True) - assert_equal(test, control) - assert_(isinstance(test, MaskedRecords)) - - def test_w_singlefield(self): - # Test single field - test = merge_arrays((np.array([1, 2]).view([('a', int)]), - np.array([10., 20., 30.])),) - control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('a', int), ('f1', float)]) - assert_equal(test, control) - - def test_w_shorter_flex(self): - # Test merge_arrays w/ a shorter flexndarray. - z = self.data[-1] - - # Fixme, this test looks incomplete and broken - #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) - #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], - # dtype=[('A', '|S3'), ('B', float), ('C', int)]) - #assert_equal(test, control) - - # Hack to avoid pyflakes warnings about unused variables - merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) - np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], - dtype=[('A', '|S3'), ('B', float), ('C', int)]) - - def test_singlerecord(self): - (_, x, y, z) = self.data - test = merge_arrays((x[0], y[0], z[0]), usemask=False) - control = np.array([(1, 10, ('A', 1))], - dtype=[('f0', int), - ('f1', int), - ('f2', [('A', '|S3'), ('B', float)])]) - assert_equal(test, control) - - -class TestAppendFields(object): - # Test append_fields - - def setup(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array( - [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_append_single(self): - # Test simple case - (_, x, _, _) = self.data - test = append_fields(x, 'A', data=[10, 20, 30]) - control = ma.array([(1, 10), (2, 20), (-1, 30)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('f0', int), ('A', int)],) - assert_equal(test, control) - - def test_append_double(self): - # Test simple case - (_, x, _, _) = self.data - test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) - control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], - mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], - dtype=[('f0', int), ('A', int), ('B', int)],) - assert_equal(test, control) - - def test_append_on_flex(self): - # Test append_fields on flexible type arrays - z = self.data[-1] - test = append_fields(z, 'C', data=[10, 20, 30]) - control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], - mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('C', int)],) - assert_equal(test, control) - - def test_append_on_nested(self): - # Test append_fields on nested fields - w = self.data[0] - test = append_fields(w, 'C', data=[10, 20, 30]) - control = ma.array([(1, (2, 3.0), 10), - (4, (5, 6.0), 20), - (-1, (-1, -1.), 30)], - mask=[( - 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], - dtype=[('a', int), - ('b', [('ba', float), ('bb', int)]), - ('C', int)],) - assert_equal(test, control) - - -class TestStackArrays(object): - # Test stack_arrays - def setup(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array( - [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_solo(self): - # Test stack_arrays on single arrays - (_, x, _, _) = self.data - test = stack_arrays((x,)) - assert_equal(test, x) - assert_(test is x) - - test = stack_arrays(x) - assert_equal(test, x) - assert_(test is x) - - def test_unnamed_fields(self): - # Tests combinations of arrays w/o named fields - (_, x, y, _) = self.data - - test = stack_arrays((x, x), usemask=False) - control = np.array([1, 2, 1, 2]) - assert_equal(test, control) - - test = stack_arrays((x, y), usemask=False) - control = np.array([1, 2, 10, 20, 30]) - assert_equal(test, control) - - test = stack_arrays((y, x), usemask=False) - control = np.array([10, 20, 30, 1, 2]) - assert_equal(test, control) - - def test_unnamed_and_named_fields(self): - # Test combination of arrays w/ & w/o named fields - (_, x, _, z) = self.data - - test = stack_arrays((x, z)) - control = ma.array([(1, -1, -1), (2, -1, -1), - (-1, 'A', 1), (-1, 'B', 2)], - mask=[(0, 1, 1), (0, 1, 1), - (1, 0, 0), (1, 0, 0)], - dtype=[('f0', int), ('A', '|S3'), ('B', float)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - test = stack_arrays((z, x)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - (-1, -1, 1), (-1, -1, 2), ], - mask=[(0, 0, 1), (0, 0, 1), - (1, 1, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('f2', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - test = stack_arrays((z, z, x)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - ('A', 1, -1), ('B', 2, -1), - (-1, -1, 1), (-1, -1, 2), ], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 1), (0, 0, 1), - (1, 1, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('f2', int)]) - assert_equal(test, control) - - def test_matching_named_fields(self): - # Test combination of arrays w/ matching field names - (_, x, _, z) = self.data - zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)]) - test = stack_arrays((z, zz)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - ( - 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 0), (0, 0, 0), (0, 0, 0)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - test = stack_arrays((z, zz, x)) - ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] - control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), - ('a', 10., 100., -1), ('b', 20., 200., -1), - ('c', 30., 300., -1), - (-1, -1, -1, 1), (-1, -1, -1, 2)], - dtype=ndtype, - mask=[(0, 0, 1, 1), (0, 0, 1, 1), - (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), - (1, 1, 1, 0), (1, 1, 1, 0)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - def test_defaults(self): - # Test defaults: no exception raised if keys of defaults are not fields. - (_, _, _, z) = self.data - zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)]) - defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} - test = stack_arrays((z, zz), defaults=defaults) - control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), - ( - 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 0), (0, 0, 0), (0, 0, 0)]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_autoconversion(self): - # Tests autoconversion - adtype = [('A', int), ('B', bool), ('C', float)] - a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) - bdtype = [('A', int), ('B', float), ('C', float)] - b = ma.array([(4, 5, 6)], dtype=bdtype) - control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], - dtype=bdtype) - test = stack_arrays((a, b), autoconvert=True) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - with assert_raises(TypeError): - stack_arrays((a, b), autoconvert=False) - - def test_checktitles(self): - # Test using titles in the field names - adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] - a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) - bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] - b = ma.array([(4, 5, 6)], dtype=bdtype) - test = stack_arrays((a, b)) - control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], - dtype=bdtype) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - def test_subdtype(self): - z = np.array([ - ('A', 1), ('B', 2) - ], dtype=[('A', '|S3'), ('B', float, (1,))]) - zz = np.array([ - ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) - ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) - - res = stack_arrays((z, zz)) - expected = ma.array( - data=[ - (b'A', [1.0], 0), - (b'B', [2.0], 0), - (b'a', [10.0], 100.0), - (b'b', [20.0], 200.0), - (b'c', [30.0], 300.0)], - mask=[ - (False, [False], True), - (False, [False], True), - (False, [False], False), - (False, [False], False), - (False, [False], False) - ], - dtype=zz.dtype - ) - assert_equal(res.dtype, expected.dtype) - assert_equal(res, expected) - assert_equal(res.mask, expected.mask) - - -class TestJoinBy(object): - def setup(self): - self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), - np.arange(100, 110))), - dtype=[('a', int), ('b', int), ('c', int)]) - self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), - np.arange(100, 110))), - dtype=[('a', int), ('b', int), ('d', int)]) - - def test_inner_join(self): - # Basic test of join_by - a, b = self.a, self.b - - test = join_by('a', a, b, jointype='inner') - control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), - (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), - (9, 59, 69, 109, 104)], - dtype=[('a', int), ('b1', int), ('b2', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_join(self): - a, b = self.a, self.b - - # Fixme, this test is broken - #test = join_by(('a', 'b'), a, b) - #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), - # (7, 57, 107, 102), (8, 58, 108, 103), - # (9, 59, 109, 104)], - # dtype=[('a', int), ('b', int), - # ('c', int), ('d', int)]) - #assert_equal(test, control) - - # Hack to avoid pyflakes unused variable warnings - join_by(('a', 'b'), a, b) - np.array([(5, 55, 105, 100), (6, 56, 106, 101), - (7, 57, 107, 102), (8, 58, 108, 103), - (9, 59, 109, 104)], - dtype=[('a', int), ('b', int), - ('c', int), ('d', int)]) - - def test_join_subdtype(self): - # tests the bug in https://stackoverflow.com/q/44769632/102441 - from numpy.lib import recfunctions as rfn - foo = np.array([(1,)], - dtype=[('key', int)]) - bar = np.array([(1, np.array([1,2,3]))], - dtype=[('key', int), ('value', 'uint16', 3)]) - res = join_by('key', foo, bar) - assert_equal(res, bar.view(ma.MaskedArray)) - - def test_outer_join(self): - a, b = self.a, self.b - - test = join_by(('a', 'b'), a, b, 'outer') - control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), - (2, 52, 102, -1), (3, 53, 103, -1), - (4, 54, 104, -1), (5, 55, 105, -1), - (5, 65, -1, 100), (6, 56, 106, -1), - (6, 66, -1, 101), (7, 57, 107, -1), - (7, 67, -1, 102), (8, 58, 108, -1), - (8, 68, -1, 103), (9, 59, 109, -1), - (9, 69, -1, 104), (10, 70, -1, 105), - (11, 71, -1, 106), (12, 72, -1, 107), - (13, 73, -1, 108), (14, 74, -1, 109)], - mask=[(0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 1, 0), - (0, 0, 1, 0), (0, 0, 1, 0), - (0, 0, 1, 0), (0, 0, 1, 0)], - dtype=[('a', int), ('b', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_leftouter_join(self): - a, b = self.a, self.b - - test = join_by(('a', 'b'), a, b, 'leftouter') - control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), - (2, 52, 102, -1), (3, 53, 103, -1), - (4, 54, 104, -1), (5, 55, 105, -1), - (6, 56, 106, -1), (7, 57, 107, -1), - (8, 58, 108, -1), (9, 59, 109, -1)], - mask=[(0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1)], - dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_different_field_order(self): - # gh-8940 - a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) - b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) - # this should not give a FutureWarning: - j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) - assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) - - def test_duplicate_keys(self): - a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) - b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) - assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) - - @pytest.mark.xfail(reason="See comment at gh-9343") - def test_same_name_different_dtypes_key(self): - a_dtype = np.dtype([('key', 'S5'), ('value', '= 3: - from io import StringIO - else: - from StringIO import StringIO - - dt = [("a", 'u1', 2), ("b", 'u1', 2)] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) - - dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt)) - - dt = [("a", 'u1', (2, 2))] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt)) - - dt = [("a", 'u1', (2, 3, 2))] - x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt) - data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)] - assert_equal(x, np.array(data, dtype=dt)) - - def test_nansum_with_boolean(self): - # gh-2978 - a = np.zeros(2, dtype=bool) - try: - np.nansum(a) - except Exception: - raise AssertionError() - - def test_py3_compat(self): - # gh-2561 - # Test if the oldstyle class test is bypassed in python3 - class C(): - """Old-style class in python2, normal class in python3""" - pass - - out = open(os.devnull, 'w') - try: - np.info(C(), output=out) - except AttributeError: - raise AssertionError() - finally: - out.close() diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_shape_base.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_shape_base.py deleted file mode 100644 index be1604a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_shape_base.py +++ /dev/null @@ -1,720 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import warnings -import functools -import sys -import pytest - -from numpy.lib.shape_base import ( - apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, - vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, - put_along_axis - ) -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns - ) - - -IS_64BIT = sys.maxsize > 2**32 - - -def _add_keepdims(func): - """ hack in keepdims behavior into a function taking an axis """ - @functools.wraps(func) - def wrapped(a, axis, **kwargs): - res = func(a, axis=axis, **kwargs) - if axis is None: - axis = 0 # res is now a scalar, so we can insert this anywhere - return np.expand_dims(res, axis=axis) - return wrapped - - -class TestTakeAlongAxis(object): - def test_argequivalent(self): - """ Test it translates from arg to """ - from numpy.random import rand - a = rand(3, 4, 5) - - funcs = [ - (np.sort, np.argsort, dict()), - (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), - (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), - (np.partition, np.argpartition, dict(kth=2)), - ] - - for func, argfunc, kwargs in funcs: - for axis in list(range(a.ndim)) + [None]: - a_func = func(a, axis=axis, **kwargs) - ai_func = argfunc(a, axis=axis, **kwargs) - assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) - - def test_invalid(self): - """ Test it errors when indices has too few dimensions """ - a = np.ones((10, 10)) - ai = np.ones((10, 2), dtype=np.intp) - - # sanity check - take_along_axis(a, ai, axis=1) - - # not enough indices - assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) - # bool arrays not allowed - assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) - # float arrays not allowed - assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) - # invalid axis - assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) - - def test_empty(self): - """ Test everything is ok with empty results, even with inserted dims """ - a = np.ones((3, 4, 5)) - ai = np.ones((3, 0, 5), dtype=np.intp) - - actual = take_along_axis(a, ai, axis=1) - assert_equal(actual.shape, ai.shape) - - def test_broadcast(self): - """ Test that non-indexing dimensions are broadcast in both directions """ - a = np.ones((3, 4, 1)) - ai = np.ones((1, 2, 5), dtype=np.intp) - actual = take_along_axis(a, ai, axis=1) - assert_equal(actual.shape, (3, 2, 5)) - - -class TestPutAlongAxis(object): - def test_replace_max(self): - a_base = np.array([[10, 30, 20], [60, 40, 50]]) - - for axis in list(range(a_base.ndim)) + [None]: - # we mutate this in the loop - a = a_base.copy() - - # replace the max with a small value - i_max = _add_keepdims(np.argmax)(a, axis=axis) - put_along_axis(a, i_max, -99, axis=axis) - - # find the new minimum, which should max - i_min = _add_keepdims(np.argmin)(a, axis=axis) - - assert_equal(i_min, i_max) - - def test_broadcast(self): - """ Test that non-indexing dimensions are broadcast in both directions """ - a = np.ones((3, 4, 1)) - ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 - put_along_axis(a, ai, 20, axis=1) - assert_equal(take_along_axis(a, ai, axis=1), 20) - - -class TestApplyAlongAxis(object): - def test_simple(self): - a = np.ones((20, 10), 'd') - assert_array_equal( - apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) - - def test_simple101(self): - a = np.ones((10, 101), 'd') - assert_array_equal( - apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) - - def test_3d(self): - a = np.arange(27).reshape((3, 3, 3)) - assert_array_equal(apply_along_axis(np.sum, 0, a), - [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) - - def test_preserve_subclass(self): - def double(row): - return row * 2 - - class MyNDArray(np.ndarray): - pass - - m = np.array([[0, 1], [2, 3]]).view(MyNDArray) - expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) - - result = apply_along_axis(double, 0, m) - assert_(isinstance(result, MyNDArray)) - assert_array_equal(result, expected) - - result = apply_along_axis(double, 1, m) - assert_(isinstance(result, MyNDArray)) - assert_array_equal(result, expected) - - def test_subclass(self): - class MinimalSubclass(np.ndarray): - data = 1 - - def minimal_function(array): - return array.data - - a = np.zeros((6, 3)).view(MinimalSubclass) - - assert_array_equal( - apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) - ) - - def test_scalar_array(self, cls=np.ndarray): - a = np.ones((6, 3)).view(cls) - res = apply_along_axis(np.sum, 0, a) - assert_(isinstance(res, cls)) - assert_array_equal(res, np.array([6, 6, 6]).view(cls)) - - def test_0d_array(self, cls=np.ndarray): - def sum_to_0d(x): - """ Sum x, returning a 0d array of the same class """ - assert_equal(x.ndim, 1) - return np.squeeze(np.sum(x, keepdims=True)) - a = np.ones((6, 3)).view(cls) - res = apply_along_axis(sum_to_0d, 0, a) - assert_(isinstance(res, cls)) - assert_array_equal(res, np.array([6, 6, 6]).view(cls)) - - res = apply_along_axis(sum_to_0d, 1, a) - assert_(isinstance(res, cls)) - assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) - - def test_axis_insertion(self, cls=np.ndarray): - def f1to2(x): - """produces an asymmetric non-square matrix from x""" - assert_equal(x.ndim, 1) - return (x[::-1] * x[1:,None]).view(cls) - - a2d = np.arange(6*3).reshape((6, 3)) - - # 2d insertion along first axis - actual = apply_along_axis(f1to2, 0, a2d) - expected = np.stack([ - f1to2(a2d[:,i]) for i in range(a2d.shape[1]) - ], axis=-1).view(cls) - assert_equal(type(actual), type(expected)) - assert_equal(actual, expected) - - # 2d insertion along last axis - actual = apply_along_axis(f1to2, 1, a2d) - expected = np.stack([ - f1to2(a2d[i,:]) for i in range(a2d.shape[0]) - ], axis=0).view(cls) - assert_equal(type(actual), type(expected)) - assert_equal(actual, expected) - - # 3d insertion along middle axis - a3d = np.arange(6*5*3).reshape((6, 5, 3)) - - actual = apply_along_axis(f1to2, 1, a3d) - expected = np.stack([ - np.stack([ - f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) - ], axis=0) - for j in range(a3d.shape[2]) - ], axis=-1).view(cls) - assert_equal(type(actual), type(expected)) - assert_equal(actual, expected) - - def test_subclass_preservation(self): - class MinimalSubclass(np.ndarray): - pass - self.test_scalar_array(MinimalSubclass) - self.test_0d_array(MinimalSubclass) - self.test_axis_insertion(MinimalSubclass) - - def test_axis_insertion_ma(self): - def f1to2(x): - """produces an asymmetric non-square matrix from x""" - assert_equal(x.ndim, 1) - res = x[::-1] * x[1:,None] - return np.ma.masked_where(res%5==0, res) - a = np.arange(6*3).reshape((6, 3)) - res = apply_along_axis(f1to2, 0, a) - assert_(isinstance(res, np.ma.masked_array)) - assert_equal(res.ndim, 3) - assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) - assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) - assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) - - def test_tuple_func1d(self): - def sample_1d(x): - return x[1], x[0] - res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) - assert_array_equal(res, np.array([[2, 1], [4, 3]])) - - def test_empty(self): - # can't apply_along_axis when there's no chance to call the function - def never_call(x): - assert_(False) # should never be reached - - a = np.empty((0, 0)) - assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) - assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) - - # but it's sometimes ok with some non-zero dimensions - def empty_to_1(x): - assert_(len(x) == 0) - return 1 - - a = np.empty((10, 0)) - actual = np.apply_along_axis(empty_to_1, 1, a) - assert_equal(actual, np.ones(10)) - assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) - - def test_with_iterable_object(self): - # from issue 5248 - d = np.array([ - [{1, 11}, {2, 22}, {3, 33}], - [{4, 44}, {5, 55}, {6, 66}] - ]) - actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) - expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) - - assert_equal(actual, expected) - - # issue 8642 - assert_equal doesn't detect this! - for i in np.ndindex(actual.shape): - assert_equal(type(actual[i]), type(expected[i])) - - -class TestApplyOverAxes(object): - def test_simple(self): - a = np.arange(24).reshape(2, 3, 4) - aoa_a = apply_over_axes(np.sum, a, [0, 2]) - assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) - - -class TestExpandDims(object): - def test_functionality(self): - s = (2, 3, 4, 5) - a = np.empty(s) - for axis in range(-5, 4): - b = expand_dims(a, axis) - assert_(b.shape[axis] == 1) - assert_(np.squeeze(b).shape == s) - - def test_axis_tuple(self): - a = np.empty((3, 3, 3)) - assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) - assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) - assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) - assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) - - def test_axis_out_of_range(self): - s = (2, 3, 4, 5) - a = np.empty(s) - assert_raises(np.AxisError, expand_dims, a, -6) - assert_raises(np.AxisError, expand_dims, a, 5) - - a = np.empty((3, 3, 3)) - assert_raises(np.AxisError, expand_dims, a, (0, -6)) - assert_raises(np.AxisError, expand_dims, a, (0, 5)) - - def test_repeated_axis(self): - a = np.empty((3, 3, 3)) - assert_raises(ValueError, expand_dims, a, axis=(1, 1)) - - def test_subclasses(self): - a = np.arange(10).reshape((2, 5)) - a = np.ma.array(a, mask=a%3 == 0) - - expanded = np.expand_dims(a, axis=1) - assert_(isinstance(expanded, np.ma.MaskedArray)) - assert_equal(expanded.shape, (2, 1, 5)) - assert_equal(expanded.mask.shape, (2, 1, 5)) - - -class TestArraySplit(object): - def test_integer_0_split(self): - a = np.arange(10) - assert_raises(ValueError, array_split, a, 0) - - def test_integer_split(self): - a = np.arange(10) - res = array_split(a, 1) - desired = [np.arange(10)] - compare_results(res, desired) - - res = array_split(a, 2) - desired = [np.arange(5), np.arange(5, 10)] - compare_results(res, desired) - - res = array_split(a, 3) - desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] - compare_results(res, desired) - - res = array_split(a, 4) - desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), - np.arange(8, 10)] - compare_results(res, desired) - - res = array_split(a, 5) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), - np.arange(6, 8), np.arange(8, 10)] - compare_results(res, desired) - - res = array_split(a, 6) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), - np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 7) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), - np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), - np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 8) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), - np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), - np.arange(8, 9), np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 9) - desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), - np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), - np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 10) - desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), - np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), - np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), - np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 11) - desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), - np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), - np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), - np.arange(9, 10), np.array([])] - compare_results(res, desired) - - def test_integer_split_2D_rows(self): - a = np.array([np.arange(10), np.arange(10)]) - res = array_split(a, 3, axis=0) - tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), - np.zeros((0, 10))] - compare_results(res, tgt) - assert_(a.dtype.type is res[-1].dtype.type) - - # Same thing for manual splits: - res = array_split(a, [0, 1, 2], axis=0) - tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), - np.array([np.arange(10)])] - compare_results(res, tgt) - assert_(a.dtype.type is res[-1].dtype.type) - - def test_integer_split_2D_cols(self): - a = np.array([np.arange(10), np.arange(10)]) - res = array_split(a, 3, axis=-1) - desired = [np.array([np.arange(4), np.arange(4)]), - np.array([np.arange(4, 7), np.arange(4, 7)]), - np.array([np.arange(7, 10), np.arange(7, 10)])] - compare_results(res, desired) - - def test_integer_split_2D_default(self): - """ This will fail if we change default axis - """ - a = np.array([np.arange(10), np.arange(10)]) - res = array_split(a, 3) - tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), - np.zeros((0, 10))] - compare_results(res, tgt) - assert_(a.dtype.type is res[-1].dtype.type) - # perhaps should check higher dimensions - - @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") - def test_integer_split_2D_rows_greater_max_int32(self): - a = np.broadcast_to([0], (1 << 32, 2)) - res = array_split(a, 4) - chunk = np.broadcast_to([0], (1 << 30, 2)) - tgt = [chunk] * 4 - for i in range(len(tgt)): - assert_equal(res[i].shape, tgt[i].shape) - - def test_index_split_simple(self): - a = np.arange(10) - indices = [1, 5, 7] - res = array_split(a, indices, axis=-1) - desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), - np.arange(7, 10)] - compare_results(res, desired) - - def test_index_split_low_bound(self): - a = np.arange(10) - indices = [0, 5, 7] - res = array_split(a, indices, axis=-1) - desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), - np.arange(7, 10)] - compare_results(res, desired) - - def test_index_split_high_bound(self): - a = np.arange(10) - indices = [0, 5, 7, 10, 12] - res = array_split(a, indices, axis=-1) - desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), - np.arange(7, 10), np.array([]), np.array([])] - compare_results(res, desired) - - -class TestSplit(object): - # The split function is essentially the same as array_split, - # except that it test if splitting will result in an - # equal split. Only test for this case. - - def test_equal_split(self): - a = np.arange(10) - res = split(a, 2) - desired = [np.arange(5), np.arange(5, 10)] - compare_results(res, desired) - - def test_unequal_split(self): - a = np.arange(10) - assert_raises(ValueError, split, a, 3) - - -class TestColumnStack(object): - def test_non_iterable(self): - assert_raises(TypeError, column_stack, 1) - - def test_1D_arrays(self): - # example from docstring - a = np.array((1, 2, 3)) - b = np.array((2, 3, 4)) - expected = np.array([[1, 2], - [2, 3], - [3, 4]]) - actual = np.column_stack((a, b)) - assert_equal(actual, expected) - - def test_2D_arrays(self): - # same as hstack 2D docstring example - a = np.array([[1], [2], [3]]) - b = np.array([[2], [3], [4]]) - expected = np.array([[1, 2], - [2, 3], - [3, 4]]) - actual = np.column_stack((a, b)) - assert_equal(actual, expected) - - def test_generator(self): - with assert_warns(FutureWarning): - column_stack((np.arange(3) for _ in range(2))) - - -class TestDstack(object): - def test_non_iterable(self): - assert_raises(TypeError, dstack, 1) - - def test_0D_array(self): - a = np.array(1) - b = np.array(2) - res = dstack([a, b]) - desired = np.array([[[1, 2]]]) - assert_array_equal(res, desired) - - def test_1D_array(self): - a = np.array([1]) - b = np.array([2]) - res = dstack([a, b]) - desired = np.array([[[1, 2]]]) - assert_array_equal(res, desired) - - def test_2D_array(self): - a = np.array([[1], [2]]) - b = np.array([[1], [2]]) - res = dstack([a, b]) - desired = np.array([[[1, 1]], [[2, 2, ]]]) - assert_array_equal(res, desired) - - def test_2D_array2(self): - a = np.array([1, 2]) - b = np.array([1, 2]) - res = dstack([a, b]) - desired = np.array([[[1, 1], [2, 2]]]) - assert_array_equal(res, desired) - - def test_generator(self): - with assert_warns(FutureWarning): - dstack((np.arange(3) for _ in range(2))) - - -# array_split has more comprehensive test of splitting. -# only do simple test on hsplit, vsplit, and dsplit -class TestHsplit(object): - """Only testing for integer splits. - - """ - def test_non_iterable(self): - assert_raises(ValueError, hsplit, 1, 1) - - def test_0D_array(self): - a = np.array(1) - try: - hsplit(a, 2) - assert_(0) - except ValueError: - pass - - def test_1D_array(self): - a = np.array([1, 2, 3, 4]) - res = hsplit(a, 2) - desired = [np.array([1, 2]), np.array([3, 4])] - compare_results(res, desired) - - def test_2D_array(self): - a = np.array([[1, 2, 3, 4], - [1, 2, 3, 4]]) - res = hsplit(a, 2) - desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] - compare_results(res, desired) - - -class TestVsplit(object): - """Only testing for integer splits. - - """ - def test_non_iterable(self): - assert_raises(ValueError, vsplit, 1, 1) - - def test_0D_array(self): - a = np.array(1) - assert_raises(ValueError, vsplit, a, 2) - - def test_1D_array(self): - a = np.array([1, 2, 3, 4]) - try: - vsplit(a, 2) - assert_(0) - except ValueError: - pass - - def test_2D_array(self): - a = np.array([[1, 2, 3, 4], - [1, 2, 3, 4]]) - res = vsplit(a, 2) - desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] - compare_results(res, desired) - - -class TestDsplit(object): - # Only testing for integer splits. - def test_non_iterable(self): - assert_raises(ValueError, dsplit, 1, 1) - - def test_0D_array(self): - a = np.array(1) - assert_raises(ValueError, dsplit, a, 2) - - def test_1D_array(self): - a = np.array([1, 2, 3, 4]) - assert_raises(ValueError, dsplit, a, 2) - - def test_2D_array(self): - a = np.array([[1, 2, 3, 4], - [1, 2, 3, 4]]) - try: - dsplit(a, 2) - assert_(0) - except ValueError: - pass - - def test_3D_array(self): - a = np.array([[[1, 2, 3, 4], - [1, 2, 3, 4]], - [[1, 2, 3, 4], - [1, 2, 3, 4]]]) - res = dsplit(a, 2) - desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), - np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] - compare_results(res, desired) - - -class TestSqueeze(object): - def test_basic(self): - from numpy.random import rand - - a = rand(20, 10, 10, 1, 1) - b = rand(20, 1, 10, 1, 20) - c = rand(1, 1, 20, 10) - assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) - assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) - assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) - - # Squeezing to 0-dim should still give an ndarray - a = [[[1.5]]] - res = np.squeeze(a) - assert_equal(res, 1.5) - assert_equal(res.ndim, 0) - assert_equal(type(res), np.ndarray) - - -class TestKron(object): - def test_return_type(self): - class myarray(np.ndarray): - __array_priority__ = 0.0 - - a = np.ones([2, 2]) - ma = myarray(a.shape, a.dtype, a.data) - assert_equal(type(kron(a, a)), np.ndarray) - assert_equal(type(kron(ma, ma)), myarray) - assert_equal(type(kron(a, ma)), np.ndarray) - assert_equal(type(kron(ma, a)), myarray) - - -class TestTile(object): - def test_basic(self): - a = np.array([0, 1, 2]) - b = [[1, 2], [3, 4]] - assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) - assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) - assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) - assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) - assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) - assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], - [1, 2, 1, 2], [3, 4, 3, 4]]) - - def test_tile_one_repetition_on_array_gh4679(self): - a = np.arange(5) - b = tile(a, 1) - b += 2 - assert_equal(a, np.arange(5)) - - def test_empty(self): - a = np.array([[[]]]) - b = np.array([[], []]) - c = tile(b, 2).shape - d = tile(a, (3, 2, 5)).shape - assert_equal(c, (2, 0)) - assert_equal(d, (3, 2, 0)) - - def test_kroncompare(self): - from numpy.random import randint - - reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] - shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] - for s in shape: - b = randint(0, 10, size=s) - for r in reps: - a = np.ones(r, b.dtype) - large = tile(b, r) - klarge = kron(a, b) - assert_equal(large, klarge) - - -class TestMayShareMemory(object): - def test_basic(self): - d = np.ones((50, 60)) - d2 = np.ones((30, 60, 6)) - assert_(np.may_share_memory(d, d)) - assert_(np.may_share_memory(d, d[::-1])) - assert_(np.may_share_memory(d, d[::2])) - assert_(np.may_share_memory(d, d[1:, ::-1])) - - assert_(not np.may_share_memory(d[::-1], d2)) - assert_(not np.may_share_memory(d[::2], d2)) - assert_(not np.may_share_memory(d[1:, ::-1], d2)) - assert_(np.may_share_memory(d2[1:, ::-1], d2)) - - -# Utility -def compare_results(res, desired): - for i in range(len(desired)): - assert_array_equal(res[i], desired[i]) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_stride_tricks.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_stride_tricks.py deleted file mode 100644 index 85fccee..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_stride_tricks.py +++ /dev/null @@ -1,484 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.core._rational_tests import rational -from numpy.testing import ( - assert_equal, assert_array_equal, assert_raises, assert_, - assert_raises_regex, assert_warns, - ) -from numpy.lib.stride_tricks import ( - as_strided, broadcast_arrays, _broadcast_shape, broadcast_to - ) - -def assert_shapes_correct(input_shapes, expected_shape): - # Broadcast a list of arrays with the given input shapes and check the - # common output shape. - - inarrays = [np.zeros(s) for s in input_shapes] - outarrays = broadcast_arrays(*inarrays) - outshapes = [a.shape for a in outarrays] - expected = [expected_shape] * len(inarrays) - assert_equal(outshapes, expected) - - -def assert_incompatible_shapes_raise(input_shapes): - # Broadcast a list of arrays with the given (incompatible) input shapes - # and check that they raise a ValueError. - - inarrays = [np.zeros(s) for s in input_shapes] - assert_raises(ValueError, broadcast_arrays, *inarrays) - - -def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): - # Broadcast two shapes against each other and check that the data layout - # is the same as if a ufunc did the broadcasting. - - x0 = np.zeros(shape0, dtype=int) - # Note that multiply.reduce's identity element is 1.0, so when shape1==(), - # this gives the desired n==1. - n = int(np.multiply.reduce(shape1)) - x1 = np.arange(n).reshape(shape1) - if transposed: - x0 = x0.T - x1 = x1.T - if flipped: - x0 = x0[::-1] - x1 = x1[::-1] - # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the - # result should be exactly the same as the broadcasted view of x1. - y = x0 + x1 - b0, b1 = broadcast_arrays(x0, x1) - assert_array_equal(y, b1) - - -def test_same(): - x = np.arange(10) - y = np.arange(10) - bx, by = broadcast_arrays(x, y) - assert_array_equal(x, bx) - assert_array_equal(y, by) - -def test_broadcast_kwargs(): - # ensure that a TypeError is appropriately raised when - # np.broadcast_arrays() is called with any keyword - # argument other than 'subok' - x = np.arange(10) - y = np.arange(10) - - with assert_raises_regex(TypeError, - r'broadcast_arrays\(\) got an unexpected keyword*'): - broadcast_arrays(x, y, dtype='float64') - - -def test_one_off(): - x = np.array([[1, 2, 3]]) - y = np.array([[1], [2], [3]]) - bx, by = broadcast_arrays(x, y) - bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) - by0 = bx0.T - assert_array_equal(bx0, bx) - assert_array_equal(by0, by) - - -def test_same_input_shapes(): - # Check that the final shape is just the input shape. - - data = [ - (), - (1,), - (3,), - (0, 1), - (0, 3), - (1, 0), - (3, 0), - (1, 3), - (3, 1), - (3, 3), - ] - for shape in data: - input_shapes = [shape] - # Single input. - assert_shapes_correct(input_shapes, shape) - # Double input. - input_shapes2 = [shape, shape] - assert_shapes_correct(input_shapes2, shape) - # Triple input. - input_shapes3 = [shape, shape, shape] - assert_shapes_correct(input_shapes3, shape) - - -def test_two_compatible_by_ones_input_shapes(): - # Check that two different input shapes of the same length, but some have - # ones, broadcast to the correct shape. - - data = [ - [[(1,), (3,)], (3,)], - [[(1, 3), (3, 3)], (3, 3)], - [[(3, 1), (3, 3)], (3, 3)], - [[(1, 3), (3, 1)], (3, 3)], - [[(1, 1), (3, 3)], (3, 3)], - [[(1, 1), (1, 3)], (1, 3)], - [[(1, 1), (3, 1)], (3, 1)], - [[(1, 0), (0, 0)], (0, 0)], - [[(0, 1), (0, 0)], (0, 0)], - [[(1, 0), (0, 1)], (0, 0)], - [[(1, 1), (0, 0)], (0, 0)], - [[(1, 1), (1, 0)], (1, 0)], - [[(1, 1), (0, 1)], (0, 1)], - ] - for input_shapes, expected_shape in data: - assert_shapes_correct(input_shapes, expected_shape) - # Reverse the input shapes since broadcasting should be symmetric. - assert_shapes_correct(input_shapes[::-1], expected_shape) - - -def test_two_compatible_by_prepending_ones_input_shapes(): - # Check that two different input shapes (of different lengths) broadcast - # to the correct shape. - - data = [ - [[(), (3,)], (3,)], - [[(3,), (3, 3)], (3, 3)], - [[(3,), (3, 1)], (3, 3)], - [[(1,), (3, 3)], (3, 3)], - [[(), (3, 3)], (3, 3)], - [[(1, 1), (3,)], (1, 3)], - [[(1,), (3, 1)], (3, 1)], - [[(1,), (1, 3)], (1, 3)], - [[(), (1, 3)], (1, 3)], - [[(), (3, 1)], (3, 1)], - [[(), (0,)], (0,)], - [[(0,), (0, 0)], (0, 0)], - [[(0,), (0, 1)], (0, 0)], - [[(1,), (0, 0)], (0, 0)], - [[(), (0, 0)], (0, 0)], - [[(1, 1), (0,)], (1, 0)], - [[(1,), (0, 1)], (0, 1)], - [[(1,), (1, 0)], (1, 0)], - [[(), (1, 0)], (1, 0)], - [[(), (0, 1)], (0, 1)], - ] - for input_shapes, expected_shape in data: - assert_shapes_correct(input_shapes, expected_shape) - # Reverse the input shapes since broadcasting should be symmetric. - assert_shapes_correct(input_shapes[::-1], expected_shape) - - -def test_incompatible_shapes_raise_valueerror(): - # Check that a ValueError is raised for incompatible shapes. - - data = [ - [(3,), (4,)], - [(2, 3), (2,)], - [(3,), (3,), (4,)], - [(1, 3, 4), (2, 3, 3)], - ] - for input_shapes in data: - assert_incompatible_shapes_raise(input_shapes) - # Reverse the input shapes since broadcasting should be symmetric. - assert_incompatible_shapes_raise(input_shapes[::-1]) - - -def test_same_as_ufunc(): - # Check that the data layout is the same as if a ufunc did the operation. - - data = [ - [[(1,), (3,)], (3,)], - [[(1, 3), (3, 3)], (3, 3)], - [[(3, 1), (3, 3)], (3, 3)], - [[(1, 3), (3, 1)], (3, 3)], - [[(1, 1), (3, 3)], (3, 3)], - [[(1, 1), (1, 3)], (1, 3)], - [[(1, 1), (3, 1)], (3, 1)], - [[(1, 0), (0, 0)], (0, 0)], - [[(0, 1), (0, 0)], (0, 0)], - [[(1, 0), (0, 1)], (0, 0)], - [[(1, 1), (0, 0)], (0, 0)], - [[(1, 1), (1, 0)], (1, 0)], - [[(1, 1), (0, 1)], (0, 1)], - [[(), (3,)], (3,)], - [[(3,), (3, 3)], (3, 3)], - [[(3,), (3, 1)], (3, 3)], - [[(1,), (3, 3)], (3, 3)], - [[(), (3, 3)], (3, 3)], - [[(1, 1), (3,)], (1, 3)], - [[(1,), (3, 1)], (3, 1)], - [[(1,), (1, 3)], (1, 3)], - [[(), (1, 3)], (1, 3)], - [[(), (3, 1)], (3, 1)], - [[(), (0,)], (0,)], - [[(0,), (0, 0)], (0, 0)], - [[(0,), (0, 1)], (0, 0)], - [[(1,), (0, 0)], (0, 0)], - [[(), (0, 0)], (0, 0)], - [[(1, 1), (0,)], (1, 0)], - [[(1,), (0, 1)], (0, 1)], - [[(1,), (1, 0)], (1, 0)], - [[(), (1, 0)], (1, 0)], - [[(), (0, 1)], (0, 1)], - ] - for input_shapes, expected_shape in data: - assert_same_as_ufunc(input_shapes[0], input_shapes[1], - "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) - # Reverse the input shapes since broadcasting should be symmetric. - assert_same_as_ufunc(input_shapes[1], input_shapes[0]) - # Try them transposed, too. - assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) - # ... and flipped for non-rank-0 inputs in order to test negative - # strides. - if () not in input_shapes: - assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) - assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) - - -def test_broadcast_to_succeeds(): - data = [ - [np.array(0), (0,), np.array(0)], - [np.array(0), (1,), np.zeros(1)], - [np.array(0), (3,), np.zeros(3)], - [np.ones(1), (1,), np.ones(1)], - [np.ones(1), (2,), np.ones(2)], - [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], - [np.arange(3), (3,), np.arange(3)], - [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], - [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], - # test if shape is not a tuple - [np.ones(0), 0, np.ones(0)], - [np.ones(1), 1, np.ones(1)], - [np.ones(1), 2, np.ones(2)], - # these cases with size 0 are strange, but they reproduce the behavior - # of broadcasting with ufuncs (see test_same_as_ufunc above) - [np.ones(1), (0,), np.ones(0)], - [np.ones((1, 2)), (0, 2), np.ones((0, 2))], - [np.ones((2, 1)), (2, 0), np.ones((2, 0))], - ] - for input_array, shape, expected in data: - actual = broadcast_to(input_array, shape) - assert_array_equal(expected, actual) - - -def test_broadcast_to_raises(): - data = [ - [(0,), ()], - [(1,), ()], - [(3,), ()], - [(3,), (1,)], - [(3,), (2,)], - [(3,), (4,)], - [(1, 2), (2, 1)], - [(1, 1), (1,)], - [(1,), -1], - [(1,), (-1,)], - [(1, 2), (-1, 2)], - ] - for orig_shape, target_shape in data: - arr = np.zeros(orig_shape) - assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) - - -def test_broadcast_shape(): - # broadcast_shape is already exercized indirectly by broadcast_arrays - assert_equal(_broadcast_shape(), ()) - assert_equal(_broadcast_shape([1, 2]), (2,)) - assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) - assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) - assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) - assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) - - # regression tests for gh-5862 - assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) - bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 - assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) - - -def test_as_strided(): - a = np.array([None]) - a_view = as_strided(a) - expected = np.array([None]) - assert_array_equal(a_view, np.array([None])) - - a = np.array([1, 2, 3, 4]) - a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) - expected = np.array([1, 3]) - assert_array_equal(a_view, expected) - - a = np.array([1, 2, 3, 4]) - a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) - expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) - assert_array_equal(a_view, expected) - - # Regression test for gh-5081 - dt = np.dtype([('num', 'i4'), ('obj', 'O')]) - a = np.empty((4,), dtype=dt) - a['num'] = np.arange(1, 5) - a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) - expected_num = [[1, 2, 3, 4]] * 3 - expected_obj = [[None]*4]*3 - assert_equal(a_view.dtype, dt) - assert_array_equal(expected_num, a_view['num']) - assert_array_equal(expected_obj, a_view['obj']) - - # Make sure that void types without fields are kept unchanged - a = np.empty((4,), dtype='V4') - a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) - assert_equal(a.dtype, a_view.dtype) - - # Make sure that the only type that could fail is properly handled - dt = np.dtype({'names': [''], 'formats': ['V4']}) - a = np.empty((4,), dtype=dt) - a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) - assert_equal(a.dtype, a_view.dtype) - - # Custom dtypes should not be lost (gh-9161) - r = [rational(i) for i in range(4)] - a = np.array(r, dtype=rational) - a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) - assert_equal(a.dtype, a_view.dtype) - assert_array_equal([r] * 3, a_view) - -def as_strided_writeable(): - arr = np.ones(10) - view = as_strided(arr, writeable=False) - assert_(not view.flags.writeable) - - # Check that writeable also is fine: - view = as_strided(arr, writeable=True) - assert_(view.flags.writeable) - view[...] = 3 - assert_array_equal(arr, np.full_like(arr, 3)) - - # Test that things do not break down for readonly: - arr.flags.writeable = False - view = as_strided(arr, writeable=False) - view = as_strided(arr, writeable=True) - assert_(not view.flags.writeable) - - -class VerySimpleSubClass(np.ndarray): - def __new__(cls, *args, **kwargs): - kwargs['subok'] = True - return np.array(*args, **kwargs).view(cls) - - -class SimpleSubClass(VerySimpleSubClass): - def __new__(cls, *args, **kwargs): - kwargs['subok'] = True - self = np.array(*args, **kwargs).view(cls) - self.info = 'simple' - return self - - def __array_finalize__(self, obj): - self.info = getattr(obj, 'info', '') + ' finalized' - - -def test_subclasses(): - # test that subclass is preserved only if subok=True - a = VerySimpleSubClass([1, 2, 3, 4]) - assert_(type(a) is VerySimpleSubClass) - a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) - assert_(type(a_view) is np.ndarray) - a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) - assert_(type(a_view) is VerySimpleSubClass) - # test that if a subclass has __array_finalize__, it is used - a = SimpleSubClass([1, 2, 3, 4]) - a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) - assert_(type(a_view) is SimpleSubClass) - assert_(a_view.info == 'simple finalized') - - # similar tests for broadcast_arrays - b = np.arange(len(a)).reshape(-1, 1) - a_view, b_view = broadcast_arrays(a, b) - assert_(type(a_view) is np.ndarray) - assert_(type(b_view) is np.ndarray) - assert_(a_view.shape == b_view.shape) - a_view, b_view = broadcast_arrays(a, b, subok=True) - assert_(type(a_view) is SimpleSubClass) - assert_(a_view.info == 'simple finalized') - assert_(type(b_view) is np.ndarray) - assert_(a_view.shape == b_view.shape) - - # and for broadcast_to - shape = (2, 4) - a_view = broadcast_to(a, shape) - assert_(type(a_view) is np.ndarray) - assert_(a_view.shape == shape) - a_view = broadcast_to(a, shape, subok=True) - assert_(type(a_view) is SimpleSubClass) - assert_(a_view.info == 'simple finalized') - assert_(a_view.shape == shape) - - -def test_writeable(): - # broadcast_to should return a readonly array - original = np.array([1, 2, 3]) - result = broadcast_to(original, (2, 3)) - assert_equal(result.flags.writeable, False) - assert_raises(ValueError, result.__setitem__, slice(None), 0) - - # but the result of broadcast_arrays needs to be writeable, to - # preserve backwards compatibility - for is_broadcast, results in [(False, broadcast_arrays(original,)), - (True, broadcast_arrays(0, original))]: - for result in results: - # This will change to False in a future version - if is_broadcast: - with assert_warns(FutureWarning): - assert_equal(result.flags.writeable, True) - with assert_warns(DeprecationWarning): - result[:] = 0 - # Warning not emitted, writing to the array resets it - assert_equal(result.flags.writeable, True) - else: - # No warning: - assert_equal(result.flags.writeable, True) - - for results in [broadcast_arrays(original), - broadcast_arrays(0, original)]: - for result in results: - # resets the warn_on_write DeprecationWarning - result.flags.writeable = True - # check: no warning emitted - assert_equal(result.flags.writeable, True) - result[:] = 0 - - # keep readonly input readonly - original.flags.writeable = False - _, result = broadcast_arrays(0, original) - assert_equal(result.flags.writeable, False) - - # regression test for GH6491 - shape = (2,) - strides = [0] - tricky_array = as_strided(np.array(0), shape, strides) - other = np.zeros((1,)) - first, second = broadcast_arrays(tricky_array, other) - assert_(first.shape == second.shape) - - -def test_writeable_memoryview(): - # The result of broadcast_arrays exports as a non-writeable memoryview - # because otherwise there is no good way to opt in to the new behaviour - # (i.e. you would need to set writeable to False explicitly). - # See gh-13929. - original = np.array([1, 2, 3]) - - for is_broadcast, results in [(False, broadcast_arrays(original,)), - (True, broadcast_arrays(0, original))]: - for result in results: - # This will change to False in a future version - if is_broadcast: - # memoryview(result, writable=True) will give warning but cannot - # be tested using the python API. - assert memoryview(result).readonly - else: - assert not memoryview(result).readonly - - -def test_reference_types(): - input_array = np.array('a', dtype=object) - expected = np.array(['a'] * 3, dtype=object) - actual = broadcast_to(input_array, (3,)) - assert_array_equal(expected, actual) - - actual, _ = broadcast_arrays(input_array, np.ones(3)) - assert_array_equal(expected, actual) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_twodim_base.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_twodim_base.py deleted file mode 100644 index bb844e4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_twodim_base.py +++ /dev/null @@ -1,534 +0,0 @@ -"""Test functions for matrix module - -""" -from __future__ import division, absolute_import, print_function - -from numpy.testing import ( - assert_equal, assert_array_equal, assert_array_max_ulp, - assert_array_almost_equal, assert_raises, assert_ - ) - -from numpy import ( - arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, - tri, mask_indices, triu_indices, triu_indices_from, tril_indices, - tril_indices_from, vander, - ) - -import numpy as np - - -from numpy.core.tests.test_overrides import requires_array_function - - -def get_mat(n): - data = arange(n) - data = add.outer(data, data) - return data - - -class TestEye(object): - def test_basic(self): - assert_equal(eye(4), - array([[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]])) - - assert_equal(eye(4, dtype='f'), - array([[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]], 'f')) - - assert_equal(eye(3) == 1, - eye(3, dtype=bool)) - - def test_diag(self): - assert_equal(eye(4, k=1), - array([[0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1], - [0, 0, 0, 0]])) - - assert_equal(eye(4, k=-1), - array([[0, 0, 0, 0], - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0]])) - - def test_2d(self): - assert_equal(eye(4, 3), - array([[1, 0, 0], - [0, 1, 0], - [0, 0, 1], - [0, 0, 0]])) - - assert_equal(eye(3, 4), - array([[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0]])) - - def test_diag2d(self): - assert_equal(eye(3, 4, k=2), - array([[0, 0, 1, 0], - [0, 0, 0, 1], - [0, 0, 0, 0]])) - - assert_equal(eye(4, 3, k=-2), - array([[0, 0, 0], - [0, 0, 0], - [1, 0, 0], - [0, 1, 0]])) - - def test_eye_bounds(self): - assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) - assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) - assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) - assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) - assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) - assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) - assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) - assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) - assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) - - def test_strings(self): - assert_equal(eye(2, 2, dtype='S3'), - [[b'1', b''], [b'', b'1']]) - - def test_bool(self): - assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) - - def test_order(self): - mat_c = eye(4, 3, k=-1) - mat_f = eye(4, 3, k=-1, order='F') - assert_equal(mat_c, mat_f) - assert mat_c.flags.c_contiguous - assert not mat_c.flags.f_contiguous - assert not mat_f.flags.c_contiguous - assert mat_f.flags.f_contiguous - - -class TestDiag(object): - def test_vector(self): - vals = (100 * arange(5)).astype('l') - b = zeros((5, 5)) - for k in range(5): - b[k, k] = vals[k] - assert_equal(diag(vals), b) - b = zeros((7, 7)) - c = b.copy() - for k in range(5): - b[k, k + 2] = vals[k] - c[k + 2, k] = vals[k] - assert_equal(diag(vals, k=2), b) - assert_equal(diag(vals, k=-2), c) - - def test_matrix(self, vals=None): - if vals is None: - vals = (100 * get_mat(5) + 1).astype('l') - b = zeros((5,)) - for k in range(5): - b[k] = vals[k, k] - assert_equal(diag(vals), b) - b = b * 0 - for k in range(3): - b[k] = vals[k, k + 2] - assert_equal(diag(vals, 2), b[:3]) - for k in range(3): - b[k] = vals[k + 2, k] - assert_equal(diag(vals, -2), b[:3]) - - def test_fortran_order(self): - vals = array((100 * get_mat(5) + 1), order='F', dtype='l') - self.test_matrix(vals) - - def test_diag_bounds(self): - A = [[1, 2], [3, 4], [5, 6]] - assert_equal(diag(A, k=2), []) - assert_equal(diag(A, k=1), [2]) - assert_equal(diag(A, k=0), [1, 4]) - assert_equal(diag(A, k=-1), [3, 6]) - assert_equal(diag(A, k=-2), [5]) - assert_equal(diag(A, k=-3), []) - - def test_failure(self): - assert_raises(ValueError, diag, [[[1]]]) - - -class TestFliplr(object): - def test_basic(self): - assert_raises(ValueError, fliplr, ones(4)) - a = get_mat(4) - b = a[:, ::-1] - assert_equal(fliplr(a), b) - a = [[0, 1, 2], - [3, 4, 5]] - b = [[2, 1, 0], - [5, 4, 3]] - assert_equal(fliplr(a), b) - - -class TestFlipud(object): - def test_basic(self): - a = get_mat(4) - b = a[::-1, :] - assert_equal(flipud(a), b) - a = [[0, 1, 2], - [3, 4, 5]] - b = [[3, 4, 5], - [0, 1, 2]] - assert_equal(flipud(a), b) - - -class TestHistogram2d(object): - def test_simple(self): - x = array( - [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) - y = array( - [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) - xedges = np.linspace(0, 1, 10) - yedges = np.linspace(0, 1, 10) - H = histogram2d(x, y, (xedges, yedges))[0] - answer = array( - [[0, 0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 0, 1, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]]) - assert_array_equal(H.T, answer) - H = histogram2d(x, y, xedges)[0] - assert_array_equal(H.T, answer) - H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) - assert_array_equal(H, eye(10, 10)) - assert_array_equal(xedges, np.linspace(0, 9, 11)) - assert_array_equal(yedges, np.linspace(0, 9, 11)) - - def test_asym(self): - x = array([1, 1, 2, 3, 4, 4, 4, 5]) - y = array([1, 3, 2, 0, 1, 2, 3, 4]) - H, xed, yed = histogram2d( - x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) - answer = array( - [[0., 0, 0, 0, 0], - [0, 1, 0, 1, 0], - [0, 0, 1, 0, 0], - [1, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 1]]) - assert_array_almost_equal(H, answer/8., 3) - assert_array_equal(xed, np.linspace(0, 6, 7)) - assert_array_equal(yed, np.linspace(0, 5, 6)) - - def test_density(self): - x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) - y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) - H, xed, yed = histogram2d( - x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) - answer = array([[1, 1, .5], - [1, 1, .5], - [.5, .5, .25]])/9. - assert_array_almost_equal(H, answer, 3) - - def test_all_outliers(self): - r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 - H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) - assert_array_equal(H, 0) - - def test_empty(self): - a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) - assert_array_max_ulp(a, array([[0.]])) - - a, edge1, edge2 = histogram2d([], [], bins=4) - assert_array_max_ulp(a, np.zeros((4, 4))) - - def test_binparameter_combination(self): - x = array( - [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, - 0.59944483, 1]) - y = array( - [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, - 0.15886423, 1]) - edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) - H, xe, ye = histogram2d(x, y, (edges, 4)) - answer = array( - [[2., 0., 0., 0.], - [0., 1., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 0.], - [0., 1., 0., 0.], - [1., 0., 0., 0.], - [0., 1., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 1.]]) - assert_array_equal(H, answer) - assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) - H, xe, ye = histogram2d(x, y, (4, edges)) - answer = array( - [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], - [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) - assert_array_equal(H, answer) - assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) - - @requires_array_function - def test_dispatch(self): - class ShouldDispatch: - def __array_function__(self, function, types, args, kwargs): - return types, args, kwargs - - xy = [1, 2] - s_d = ShouldDispatch() - r = histogram2d(s_d, xy) - # Cannot use assert_equal since that dispatches... - assert_(r == ((ShouldDispatch,), (s_d, xy), {})) - r = histogram2d(xy, s_d) - assert_(r == ((ShouldDispatch,), (xy, s_d), {})) - r = histogram2d(xy, xy, bins=s_d) - assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d))) - r = histogram2d(xy, xy, bins=[s_d, 5]) - assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5]))) - assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) - r = histogram2d(xy, xy, weights=s_d) - assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d))) - - -class TestTri(object): - def test_dtype(self): - out = array([[1, 0, 0], - [1, 1, 0], - [1, 1, 1]]) - assert_array_equal(tri(3), out) - assert_array_equal(tri(3, dtype=bool), out.astype(bool)) - - -def test_tril_triu_ndim2(): - for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: - a = np.ones((2, 2), dtype=dtype) - b = np.tril(a) - c = np.triu(a) - assert_array_equal(b, [[1, 0], [1, 1]]) - assert_array_equal(c, b.T) - # should return the same dtype as the original array - assert_equal(b.dtype, a.dtype) - assert_equal(c.dtype, a.dtype) - - -def test_tril_triu_ndim3(): - for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: - a = np.array([ - [[1, 1], [1, 1]], - [[1, 1], [1, 0]], - [[1, 1], [0, 0]], - ], dtype=dtype) - a_tril_desired = np.array([ - [[1, 0], [1, 1]], - [[1, 0], [1, 0]], - [[1, 0], [0, 0]], - ], dtype=dtype) - a_triu_desired = np.array([ - [[1, 1], [0, 1]], - [[1, 1], [0, 0]], - [[1, 1], [0, 0]], - ], dtype=dtype) - a_triu_observed = np.triu(a) - a_tril_observed = np.tril(a) - assert_array_equal(a_triu_observed, a_triu_desired) - assert_array_equal(a_tril_observed, a_tril_desired) - assert_equal(a_triu_observed.dtype, a.dtype) - assert_equal(a_tril_observed.dtype, a.dtype) - - -def test_tril_triu_with_inf(): - # Issue 4859 - arr = np.array([[1, 1, np.inf], - [1, 1, 1], - [np.inf, 1, 1]]) - out_tril = np.array([[1, 0, 0], - [1, 1, 0], - [np.inf, 1, 1]]) - out_triu = out_tril.T - assert_array_equal(np.triu(arr), out_triu) - assert_array_equal(np.tril(arr), out_tril) - - -def test_tril_triu_dtype(): - # Issue 4916 - # tril and triu should return the same dtype as input - for c in np.typecodes['All']: - if c == 'V': - continue - arr = np.zeros((3, 3), dtype=c) - assert_equal(np.triu(arr).dtype, arr.dtype) - assert_equal(np.tril(arr).dtype, arr.dtype) - - # check special cases - arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], - ['2004-01-01T12:00', '2003-01-03T13:45']], - dtype='datetime64') - assert_equal(np.triu(arr).dtype, arr.dtype) - assert_equal(np.tril(arr).dtype, arr.dtype) - - arr = np.zeros((3,3), dtype='f4,f4') - assert_equal(np.triu(arr).dtype, arr.dtype) - assert_equal(np.tril(arr).dtype, arr.dtype) - - -def test_mask_indices(): - # simple test without offset - iu = mask_indices(3, np.triu) - a = np.arange(9).reshape(3, 3) - assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) - # Now with an offset - iu1 = mask_indices(3, np.triu, 1) - assert_array_equal(a[iu1], array([1, 2, 5])) - - -def test_tril_indices(): - # indices without and with offset - il1 = tril_indices(4) - il2 = tril_indices(4, k=2) - il3 = tril_indices(4, m=5) - il4 = tril_indices(4, k=2, m=5) - - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - b = np.arange(1, 21).reshape(4, 5) - - # indexing: - assert_array_equal(a[il1], - array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) - assert_array_equal(b[il3], - array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) - - # And for assigning values: - a[il1] = -1 - assert_array_equal(a, - array([[-1, 2, 3, 4], - [-1, -1, 7, 8], - [-1, -1, -1, 12], - [-1, -1, -1, -1]])) - b[il3] = -1 - assert_array_equal(b, - array([[-1, 2, 3, 4, 5], - [-1, -1, 8, 9, 10], - [-1, -1, -1, 14, 15], - [-1, -1, -1, -1, 20]])) - # These cover almost the whole array (two diagonals right of the main one): - a[il2] = -10 - assert_array_equal(a, - array([[-10, -10, -10, 4], - [-10, -10, -10, -10], - [-10, -10, -10, -10], - [-10, -10, -10, -10]])) - b[il4] = -10 - assert_array_equal(b, - array([[-10, -10, -10, 4, 5], - [-10, -10, -10, -10, 10], - [-10, -10, -10, -10, -10], - [-10, -10, -10, -10, -10]])) - - -class TestTriuIndices(object): - def test_triu_indices(self): - iu1 = triu_indices(4) - iu2 = triu_indices(4, k=2) - iu3 = triu_indices(4, m=5) - iu4 = triu_indices(4, k=2, m=5) - - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - b = np.arange(1, 21).reshape(4, 5) - - # Both for indexing: - assert_array_equal(a[iu1], - array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) - assert_array_equal(b[iu3], - array([1, 2, 3, 4, 5, 7, 8, 9, - 10, 13, 14, 15, 19, 20])) - - # And for assigning values: - a[iu1] = -1 - assert_array_equal(a, - array([[-1, -1, -1, -1], - [5, -1, -1, -1], - [9, 10, -1, -1], - [13, 14, 15, -1]])) - b[iu3] = -1 - assert_array_equal(b, - array([[-1, -1, -1, -1, -1], - [6, -1, -1, -1, -1], - [11, 12, -1, -1, -1], - [16, 17, 18, -1, -1]])) - - # These cover almost the whole array (two diagonals right of the - # main one): - a[iu2] = -10 - assert_array_equal(a, - array([[-1, -1, -10, -10], - [5, -1, -1, -10], - [9, 10, -1, -1], - [13, 14, 15, -1]])) - b[iu4] = -10 - assert_array_equal(b, - array([[-1, -1, -10, -10, -10], - [6, -1, -1, -10, -10], - [11, 12, -1, -1, -10], - [16, 17, 18, -1, -1]])) - - -class TestTrilIndicesFrom(object): - def test_exceptions(self): - assert_raises(ValueError, tril_indices_from, np.ones((2,))) - assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) - # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) - - -class TestTriuIndicesFrom(object): - def test_exceptions(self): - assert_raises(ValueError, triu_indices_from, np.ones((2,))) - assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) - # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) - - -class TestVander(object): - def test_basic(self): - c = np.array([0, 1, -2, 3]) - v = vander(c) - powers = np.array([[0, 0, 0, 0, 1], - [1, 1, 1, 1, 1], - [16, -8, 4, -2, 1], - [81, 27, 9, 3, 1]]) - # Check default value of N: - assert_array_equal(v, powers[:, 1:]) - # Check a range of N values, including 0 and 5 (greater than default) - m = powers.shape[1] - for n in range(6): - v = vander(c, N=n) - assert_array_equal(v, powers[:, m-n:m]) - - def test_dtypes(self): - c = array([11, -12, 13], dtype=np.int8) - v = vander(c) - expected = np.array([[121, 11, 1], - [144, -12, 1], - [169, 13, 1]]) - assert_array_equal(v, expected) - - c = array([1.0+1j, 1.0-1j]) - v = vander(c, N=3) - expected = np.array([[2j, 1+1j, 1], - [-2j, 1-1j, 1]]) - # The data is floating point, but the values are small integers, - # so assert_array_equal *should* be safe here (rather than, say, - # assert_array_almost_equal). - assert_array_equal(v, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_type_check.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_type_check.py deleted file mode 100644 index b3f114b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_type_check.py +++ /dev/null @@ -1,482 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.compat import long -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises - ) -from numpy.lib.type_check import ( - common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, - nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close - ) - - -def assert_all(x): - assert_(np.all(x), x) - - -class TestCommonType(object): - def test_basic(self): - ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) - af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) - af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) - af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) - acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) - acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) - assert_(common_type(ai32) == np.float64) - assert_(common_type(af16) == np.float16) - assert_(common_type(af32) == np.float32) - assert_(common_type(af64) == np.float64) - assert_(common_type(acs) == np.csingle) - assert_(common_type(acd) == np.cdouble) - - -class TestMintypecode(object): - - def test_default_1(self): - for itype in '1bcsuwil': - assert_equal(mintypecode(itype), 'd') - assert_equal(mintypecode('f'), 'f') - assert_equal(mintypecode('d'), 'd') - assert_equal(mintypecode('F'), 'F') - assert_equal(mintypecode('D'), 'D') - - def test_default_2(self): - for itype in '1bcsuwil': - assert_equal(mintypecode(itype+'f'), 'f') - assert_equal(mintypecode(itype+'d'), 'd') - assert_equal(mintypecode(itype+'F'), 'F') - assert_equal(mintypecode(itype+'D'), 'D') - assert_equal(mintypecode('ff'), 'f') - assert_equal(mintypecode('fd'), 'd') - assert_equal(mintypecode('fF'), 'F') - assert_equal(mintypecode('fD'), 'D') - assert_equal(mintypecode('df'), 'd') - assert_equal(mintypecode('dd'), 'd') - #assert_equal(mintypecode('dF',savespace=1),'F') - assert_equal(mintypecode('dF'), 'D') - assert_equal(mintypecode('dD'), 'D') - assert_equal(mintypecode('Ff'), 'F') - #assert_equal(mintypecode('Fd',savespace=1),'F') - assert_equal(mintypecode('Fd'), 'D') - assert_equal(mintypecode('FF'), 'F') - assert_equal(mintypecode('FD'), 'D') - assert_equal(mintypecode('Df'), 'D') - assert_equal(mintypecode('Dd'), 'D') - assert_equal(mintypecode('DF'), 'D') - assert_equal(mintypecode('DD'), 'D') - - def test_default_3(self): - assert_equal(mintypecode('fdF'), 'D') - #assert_equal(mintypecode('fdF',savespace=1),'F') - assert_equal(mintypecode('fdD'), 'D') - assert_equal(mintypecode('fFD'), 'D') - assert_equal(mintypecode('dFD'), 'D') - - assert_equal(mintypecode('ifd'), 'd') - assert_equal(mintypecode('ifF'), 'F') - assert_equal(mintypecode('ifD'), 'D') - assert_equal(mintypecode('idF'), 'D') - #assert_equal(mintypecode('idF',savespace=1),'F') - assert_equal(mintypecode('idD'), 'D') - - -class TestIsscalar(object): - - def test_basic(self): - assert_(np.isscalar(3)) - assert_(not np.isscalar([3])) - assert_(not np.isscalar((3,))) - assert_(np.isscalar(3j)) - assert_(np.isscalar(long(10))) - assert_(np.isscalar(4.0)) - - -class TestReal(object): - - def test_real(self): - y = np.random.rand(10,) - assert_array_equal(y, np.real(y)) - - y = np.array(1) - out = np.real(y) - assert_array_equal(y, out) - assert_(isinstance(out, np.ndarray)) - - y = 1 - out = np.real(y) - assert_equal(y, out) - assert_(not isinstance(out, np.ndarray)) - - def test_cmplx(self): - y = np.random.rand(10,)+1j*np.random.rand(10,) - assert_array_equal(y.real, np.real(y)) - - y = np.array(1 + 1j) - out = np.real(y) - assert_array_equal(y.real, out) - assert_(isinstance(out, np.ndarray)) - - y = 1 + 1j - out = np.real(y) - assert_equal(1.0, out) - assert_(not isinstance(out, np.ndarray)) - - -class TestImag(object): - - def test_real(self): - y = np.random.rand(10,) - assert_array_equal(0, np.imag(y)) - - y = np.array(1) - out = np.imag(y) - assert_array_equal(0, out) - assert_(isinstance(out, np.ndarray)) - - y = 1 - out = np.imag(y) - assert_equal(0, out) - assert_(not isinstance(out, np.ndarray)) - - def test_cmplx(self): - y = np.random.rand(10,)+1j*np.random.rand(10,) - assert_array_equal(y.imag, np.imag(y)) - - y = np.array(1 + 1j) - out = np.imag(y) - assert_array_equal(y.imag, out) - assert_(isinstance(out, np.ndarray)) - - y = 1 + 1j - out = np.imag(y) - assert_equal(1.0, out) - assert_(not isinstance(out, np.ndarray)) - - -class TestIscomplex(object): - - def test_fail(self): - z = np.array([-1, 0, 1]) - res = iscomplex(z) - assert_(not np.sometrue(res, axis=0)) - - def test_pass(self): - z = np.array([-1j, 1, 0]) - res = iscomplex(z) - assert_array_equal(res, [1, 0, 0]) - - -class TestIsreal(object): - - def test_pass(self): - z = np.array([-1, 0, 1j]) - res = isreal(z) - assert_array_equal(res, [1, 1, 0]) - - def test_fail(self): - z = np.array([-1j, 1, 0]) - res = isreal(z) - assert_array_equal(res, [0, 1, 1]) - - -class TestIscomplexobj(object): - - def test_basic(self): - z = np.array([-1, 0, 1]) - assert_(not iscomplexobj(z)) - z = np.array([-1j, 0, -1]) - assert_(iscomplexobj(z)) - - def test_scalar(self): - assert_(not iscomplexobj(1.0)) - assert_(iscomplexobj(1+0j)) - - def test_list(self): - assert_(iscomplexobj([3, 1+0j, True])) - assert_(not iscomplexobj([3, 1, True])) - - def test_duck(self): - class DummyComplexArray: - @property - def dtype(self): - return np.dtype(complex) - dummy = DummyComplexArray() - assert_(iscomplexobj(dummy)) - - def test_pandas_duck(self): - # This tests a custom np.dtype duck-typed class, such as used by pandas - # (pandas.core.dtypes) - class PdComplex(np.complex128): - pass - class PdDtype(object): - name = 'category' - names = None - type = PdComplex - kind = 'c' - str = ' 1e10) and assert_all(np.isfinite(vals[2])) - assert_equal(type(vals), np.ndarray) - - # perform the same tests but with nan, posinf and neginf keywords - with np.errstate(divide='ignore', invalid='ignore'): - vals = nan_to_num(np.array((-1., 0, 1))/0., - nan=10, posinf=20, neginf=30) - assert_equal(vals, [30, 10, 20]) - assert_all(np.isfinite(vals[[0, 2]])) - assert_equal(type(vals), np.ndarray) - - # perform the same test but in-place - with np.errstate(divide='ignore', invalid='ignore'): - vals = np.array((-1., 0, 1))/0. - result = nan_to_num(vals, copy=False) - - assert_(result is vals) - assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) - assert_(vals[1] == 0) - assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) - assert_equal(type(vals), np.ndarray) - - # perform the same test but in-place - with np.errstate(divide='ignore', invalid='ignore'): - vals = np.array((-1., 0, 1))/0. - result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) - - assert_(result is vals) - assert_equal(vals, [30, 10, 20]) - assert_all(np.isfinite(vals[[0, 2]])) - assert_equal(type(vals), np.ndarray) - - def test_array(self): - vals = nan_to_num([1]) - assert_array_equal(vals, np.array([1], int)) - assert_equal(type(vals), np.ndarray) - vals = nan_to_num([1], nan=10, posinf=20, neginf=30) - assert_array_equal(vals, np.array([1], int)) - assert_equal(type(vals), np.ndarray) - - def test_integer(self): - vals = nan_to_num(1) - assert_all(vals == 1) - assert_equal(type(vals), np.int_) - vals = nan_to_num(1, nan=10, posinf=20, neginf=30) - assert_all(vals == 1) - assert_equal(type(vals), np.int_) - - def test_float(self): - vals = nan_to_num(1.0) - assert_all(vals == 1.0) - assert_equal(type(vals), np.float_) - vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) - assert_all(vals == 1.1) - assert_equal(type(vals), np.float_) - - def test_complex_good(self): - vals = nan_to_num(1+1j) - assert_all(vals == 1+1j) - assert_equal(type(vals), np.complex_) - vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30) - assert_all(vals == 1+1j) - assert_equal(type(vals), np.complex_) - - def test_complex_bad(self): - with np.errstate(divide='ignore', invalid='ignore'): - v = 1 + 1j - v += np.array(0+1.j)/0. - vals = nan_to_num(v) - # !! This is actually (unexpectedly) zero - assert_all(np.isfinite(vals)) - assert_equal(type(vals), np.complex_) - - def test_complex_bad2(self): - with np.errstate(divide='ignore', invalid='ignore'): - v = 1 + 1j - v += np.array(-1+1.j)/0. - vals = nan_to_num(v) - assert_all(np.isfinite(vals)) - assert_equal(type(vals), np.complex_) - # Fixme - #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) - # !! This is actually (unexpectedly) positive - # !! inf. Comment out for now, and see if it - # !! changes - #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) - - def test_do_not_rewrite_previous_keyword(self): - # This is done to test that when, for instance, nan=np.inf then these - # values are not rewritten by posinf keyword to the posinf value. - with np.errstate(divide='ignore', invalid='ignore'): - vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) - assert_all(np.isfinite(vals[[0, 2]])) - assert_all(vals[0] < -1e10) - assert_equal(vals[[1, 2]], [np.inf, 999]) - assert_equal(type(vals), np.ndarray) - - -class TestRealIfClose(object): - - def test_basic(self): - a = np.random.rand(10) - b = real_if_close(a+1e-15j) - assert_all(isrealobj(b)) - assert_array_equal(a, b) - b = real_if_close(a+1e-7j) - assert_all(iscomplexobj(b)) - b = real_if_close(a+1e-7j, tol=1e-6) - assert_all(isrealobj(b)) - - -class TestArrayConversion(object): - - def test_asfarray(self): - a = asfarray(np.array([1, 2, 3])) - assert_equal(a.__class__, np.ndarray) - assert_(np.issubdtype(a.dtype, np.floating)) - - # previously this would infer dtypes from arrays, unlike every single - # other numpy function - assert_raises(TypeError, - asfarray, np.array([1, 2, 3]), dtype=np.array(1.0)) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_ufunclike.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_ufunclike.py deleted file mode 100644 index 6428061..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_ufunclike.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.core as nx -import numpy.lib.ufunclike as ufl -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_warns, assert_raises -) - - -class TestUfunclike(object): - - def test_isposinf(self): - a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) - out = nx.zeros(a.shape, bool) - tgt = nx.array([True, False, False, False, False, False]) - - res = ufl.isposinf(a) - assert_equal(res, tgt) - res = ufl.isposinf(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - a = a.astype(np.complex_) - with assert_raises(TypeError): - ufl.isposinf(a) - - def test_isneginf(self): - a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) - out = nx.zeros(a.shape, bool) - tgt = nx.array([False, True, False, False, False, False]) - - res = ufl.isneginf(a) - assert_equal(res, tgt) - res = ufl.isneginf(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - a = a.astype(np.complex_) - with assert_raises(TypeError): - ufl.isneginf(a) - - def test_fix(self): - a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) - out = nx.zeros(a.shape, float) - tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) - - res = ufl.fix(a) - assert_equal(res, tgt) - res = ufl.fix(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - assert_equal(ufl.fix(3.14), 3) - - def test_fix_with_subclass(self): - class MyArray(nx.ndarray): - def __new__(cls, data, metadata=None): - res = nx.array(data, copy=True).view(cls) - res.metadata = metadata - return res - - def __array_wrap__(self, obj, context=None): - if isinstance(obj, MyArray): - obj.metadata = self.metadata - return obj - - def __array_finalize__(self, obj): - self.metadata = getattr(obj, 'metadata', None) - return self - - a = nx.array([1.1, -1.1]) - m = MyArray(a, metadata='foo') - f = ufl.fix(m) - assert_array_equal(f, nx.array([1, -1])) - assert_(isinstance(f, MyArray)) - assert_equal(f.metadata, 'foo') - - # check 0d arrays don't decay to scalars - m0d = m[0,...] - m0d.metadata = 'bar' - f0d = ufl.fix(m0d) - assert_(isinstance(f0d, MyArray)) - assert_equal(f0d.metadata, 'bar') - - def test_deprecated(self): - # NumPy 1.13.0, 2017-04-26 - assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2)) - assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2)) - assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2)) - - def test_scalar(self): - x = np.inf - actual = np.isposinf(x) - expected = np.True_ - assert_equal(actual, expected) - assert_equal(type(actual), type(expected)) - - x = -3.4 - actual = np.fix(x) - expected = np.float64(-3.0) - assert_equal(actual, expected) - assert_equal(type(actual), type(expected)) - - out = np.array(0.0) - actual = np.fix(x, out=out) - assert_(actual is out) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_utils.py b/venv/lib/python3.7/site-packages/numpy/lib/tests/test_utils.py deleted file mode 100644 index 9673a05..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/tests/test_utils.py +++ /dev/null @@ -1,137 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import inspect -import sys -import pytest - -from numpy.core import arange -from numpy.testing import assert_, assert_equal, assert_raises_regex -from numpy.lib import deprecate -import numpy.lib.utils as utils - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - - -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -def test_lookfor(): - out = StringIO() - utils.lookfor('eigenvalue', module='numpy', output=out, - import_modules=False) - out = out.getvalue() - assert_('numpy.linalg.eig' in out) - - -@deprecate -def old_func(self, x): - return x - - -@deprecate(message="Rather use new_func2") -def old_func2(self, x): - return x - - -def old_func3(self, x): - return x -new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") - - -def old_func4(self, x): - """Summary. - - Further info. - """ - return x -new_func4 = deprecate(old_func4) - - -def old_func5(self, x): - """Summary. - - Bizarre indentation. - """ - return x -new_func5 = deprecate(old_func5) - - -def old_func6(self, x): - """ - Also in PEP-257. - """ - return x -new_func6 = deprecate(old_func6) - - -def test_deprecate_decorator(): - assert_('deprecated' in old_func.__doc__) - - -def test_deprecate_decorator_message(): - assert_('Rather use new_func2' in old_func2.__doc__) - - -def test_deprecate_fn(): - assert_('old_func3' in new_func3.__doc__) - assert_('new_func3' in new_func3.__doc__) - - -@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") -def test_deprecate_help_indentation(): - _compare_docs(old_func4, new_func4) - _compare_docs(old_func5, new_func5) - _compare_docs(old_func6, new_func6) - - -def _compare_docs(old_func, new_func): - old_doc = inspect.getdoc(old_func) - new_doc = inspect.getdoc(new_func) - index = new_doc.index('\n\n') + 2 - assert_equal(new_doc[index:], old_doc) - - -@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") -def test_deprecate_preserve_whitespace(): - assert_('\n Bizarre' in new_func5.__doc__) - - -def test_safe_eval_nameconstant(): - # Test if safe_eval supports Python 3.4 _ast.NameConstant - utils.safe_eval('None') - - -class TestByteBounds(object): - - def test_byte_bounds(self): - # pointer difference matches size * itemsize - # due to contiguity - a = arange(12).reshape(3, 4) - low, high = utils.byte_bounds(a) - assert_equal(high - low, a.size * a.itemsize) - - def test_unusual_order_positive_stride(self): - a = arange(12).reshape(3, 4) - b = a.T - low, high = utils.byte_bounds(b) - assert_equal(high - low, b.size * b.itemsize) - - def test_unusual_order_negative_stride(self): - a = arange(12).reshape(3, 4) - b = a.T[::-1] - low, high = utils.byte_bounds(b) - assert_equal(high - low, b.size * b.itemsize) - - def test_strided(self): - a = arange(12) - b = a[::2] - low, high = utils.byte_bounds(b) - # the largest pointer address is lost (even numbers only in the - # stride), and compensate addresses for striding by 2 - assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) - - -def test_assert_raises_regex_context_manager(): - with assert_raises_regex(ValueError, 'no deprecation warning'): - raise ValueError('no deprecation warning') diff --git a/venv/lib/python3.7/site-packages/numpy/lib/twodim_base.py b/venv/lib/python3.7/site-packages/numpy/lib/twodim_base.py deleted file mode 100644 index f453921..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/twodim_base.py +++ /dev/null @@ -1,1017 +0,0 @@ -""" Basic functions for manipulating 2d arrays - -""" -from __future__ import division, absolute_import, print_function - -import functools - -from numpy.core.numeric import ( - absolute, asanyarray, arange, zeros, greater_equal, multiply, ones, - asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal, - nonzero - ) -from numpy.core.overrides import set_module -from numpy.core import overrides -from numpy.core import iinfo, transpose - - -__all__ = [ - 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', - 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', - 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -i1 = iinfo(int8) -i2 = iinfo(int16) -i4 = iinfo(int32) - - -def _min_int(low, high): - """ get small int that fits the range """ - if high <= i1.max and low >= i1.min: - return int8 - if high <= i2.max and low >= i2.min: - return int16 - if high <= i4.max and low >= i4.min: - return int32 - return int64 - - -def _flip_dispatcher(m): - return (m,) - - -@array_function_dispatch(_flip_dispatcher) -def fliplr(m): - """ - Flip array in the left/right direction. - - Flip the entries in each row in the left/right direction. - Columns are preserved, but appear in a different order than before. - - Parameters - ---------- - m : array_like - Input array, must be at least 2-D. - - Returns - ------- - f : ndarray - A view of `m` with the columns reversed. Since a view - is returned, this operation is :math:`\\mathcal O(1)`. - - See Also - -------- - flipud : Flip array in the up/down direction. - rot90 : Rotate array counterclockwise. - - Notes - ----- - Equivalent to m[:,::-1]. Requires the array to be at least 2-D. - - Examples - -------- - >>> A = np.diag([1.,2.,3.]) - >>> A - array([[1., 0., 0.], - [0., 2., 0.], - [0., 0., 3.]]) - >>> np.fliplr(A) - array([[0., 0., 1.], - [0., 2., 0.], - [3., 0., 0.]]) - - >>> A = np.random.randn(2,3,5) - >>> np.all(np.fliplr(A) == A[:,::-1,...]) - True - - """ - m = asanyarray(m) - if m.ndim < 2: - raise ValueError("Input must be >= 2-d.") - return m[:, ::-1] - - -@array_function_dispatch(_flip_dispatcher) -def flipud(m): - """ - Flip array in the up/down direction. - - Flip the entries in each column in the up/down direction. - Rows are preserved, but appear in a different order than before. - - Parameters - ---------- - m : array_like - Input array. - - Returns - ------- - out : array_like - A view of `m` with the rows reversed. Since a view is - returned, this operation is :math:`\\mathcal O(1)`. - - See Also - -------- - fliplr : Flip array in the left/right direction. - rot90 : Rotate array counterclockwise. - - Notes - ----- - Equivalent to ``m[::-1,...]``. - Does not require the array to be two-dimensional. - - Examples - -------- - >>> A = np.diag([1.0, 2, 3]) - >>> A - array([[1., 0., 0.], - [0., 2., 0.], - [0., 0., 3.]]) - >>> np.flipud(A) - array([[0., 0., 3.], - [0., 2., 0.], - [1., 0., 0.]]) - - >>> A = np.random.randn(2,3,5) - >>> np.all(np.flipud(A) == A[::-1,...]) - True - - >>> np.flipud([1,2]) - array([2, 1]) - - """ - m = asanyarray(m) - if m.ndim < 1: - raise ValueError("Input must be >= 1-d.") - return m[::-1, ...] - - -@set_module('numpy') -def eye(N, M=None, k=0, dtype=float, order='C'): - """ - Return a 2-D array with ones on the diagonal and zeros elsewhere. - - Parameters - ---------- - N : int - Number of rows in the output. - M : int, optional - Number of columns in the output. If None, defaults to `N`. - k : int, optional - Index of the diagonal: 0 (the default) refers to the main diagonal, - a positive value refers to an upper diagonal, and a negative value - to a lower diagonal. - dtype : data-type, optional - Data-type of the returned array. - order : {'C', 'F'}, optional - Whether the output should be stored in row-major (C-style) or - column-major (Fortran-style) order in memory. - - .. versionadded:: 1.14.0 - - Returns - ------- - I : ndarray of shape (N,M) - An array where all elements are equal to zero, except for the `k`-th - diagonal, whose values are equal to one. - - See Also - -------- - identity : (almost) equivalent function - diag : diagonal 2-D array from a 1-D array specified by the user. - - Examples - -------- - >>> np.eye(2, dtype=int) - array([[1, 0], - [0, 1]]) - >>> np.eye(3, k=1) - array([[0., 1., 0.], - [0., 0., 1.], - [0., 0., 0.]]) - - """ - if M is None: - M = N - m = zeros((N, M), dtype=dtype, order=order) - if k >= M: - return m - if k >= 0: - i = k - else: - i = (-k) * M - m[:M-k].flat[i::M+1] = 1 - return m - - -def _diag_dispatcher(v, k=None): - return (v,) - - -@array_function_dispatch(_diag_dispatcher) -def diag(v, k=0): - """ - Extract a diagonal or construct a diagonal array. - - See the more detailed documentation for ``numpy.diagonal`` if you use this - function to extract a diagonal and wish to write to the resulting array; - whether it returns a copy or a view depends on what version of numpy you - are using. - - Parameters - ---------- - v : array_like - If `v` is a 2-D array, return a copy of its `k`-th diagonal. - If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th - diagonal. - k : int, optional - Diagonal in question. The default is 0. Use `k>0` for diagonals - above the main diagonal, and `k<0` for diagonals below the main - diagonal. - - Returns - ------- - out : ndarray - The extracted diagonal or constructed diagonal array. - - See Also - -------- - diagonal : Return specified diagonals. - diagflat : Create a 2-D array with the flattened input as a diagonal. - trace : Sum along diagonals. - triu : Upper triangle of an array. - tril : Lower triangle of an array. - - Examples - -------- - >>> x = np.arange(9).reshape((3,3)) - >>> x - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - - >>> np.diag(x) - array([0, 4, 8]) - >>> np.diag(x, k=1) - array([1, 5]) - >>> np.diag(x, k=-1) - array([3, 7]) - - >>> np.diag(np.diag(x)) - array([[0, 0, 0], - [0, 4, 0], - [0, 0, 8]]) - - """ - v = asanyarray(v) - s = v.shape - if len(s) == 1: - n = s[0]+abs(k) - res = zeros((n, n), v.dtype) - if k >= 0: - i = k - else: - i = (-k) * n - res[:n-k].flat[i::n+1] = v - return res - elif len(s) == 2: - return diagonal(v, k) - else: - raise ValueError("Input must be 1- or 2-d.") - - -@array_function_dispatch(_diag_dispatcher) -def diagflat(v, k=0): - """ - Create a two-dimensional array with the flattened input as a diagonal. - - Parameters - ---------- - v : array_like - Input data, which is flattened and set as the `k`-th - diagonal of the output. - k : int, optional - Diagonal to set; 0, the default, corresponds to the "main" diagonal, - a positive (negative) `k` giving the number of the diagonal above - (below) the main. - - Returns - ------- - out : ndarray - The 2-D output array. - - See Also - -------- - diag : MATLAB work-alike for 1-D and 2-D arrays. - diagonal : Return specified diagonals. - trace : Sum along diagonals. - - Examples - -------- - >>> np.diagflat([[1,2], [3,4]]) - array([[1, 0, 0, 0], - [0, 2, 0, 0], - [0, 0, 3, 0], - [0, 0, 0, 4]]) - - >>> np.diagflat([1,2], 1) - array([[0, 1, 0], - [0, 0, 2], - [0, 0, 0]]) - - """ - try: - wrap = v.__array_wrap__ - except AttributeError: - wrap = None - v = asarray(v).ravel() - s = len(v) - n = s + abs(k) - res = zeros((n, n), v.dtype) - if (k >= 0): - i = arange(0, n-k) - fi = i+k+i*n - else: - i = arange(0, n+k) - fi = i+(i-k)*n - res.flat[fi] = v - if not wrap: - return res - return wrap(res) - - -@set_module('numpy') -def tri(N, M=None, k=0, dtype=float): - """ - An array with ones at and below the given diagonal and zeros elsewhere. - - Parameters - ---------- - N : int - Number of rows in the array. - M : int, optional - Number of columns in the array. - By default, `M` is taken equal to `N`. - k : int, optional - The sub-diagonal at and below which the array is filled. - `k` = 0 is the main diagonal, while `k` < 0 is below it, - and `k` > 0 is above. The default is 0. - dtype : dtype, optional - Data type of the returned array. The default is float. - - Returns - ------- - tri : ndarray of shape (N, M) - Array with its lower triangle filled with ones and zero elsewhere; - in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. - - Examples - -------- - >>> np.tri(3, 5, 2, dtype=int) - array([[1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 1]]) - - >>> np.tri(3, 5, -1) - array([[0., 0., 0., 0., 0.], - [1., 0., 0., 0., 0.], - [1., 1., 0., 0., 0.]]) - - """ - if M is None: - M = N - - m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), - arange(-k, M-k, dtype=_min_int(-k, M - k))) - - # Avoid making a copy if the requested type is already bool - m = m.astype(dtype, copy=False) - - return m - - -def _trilu_dispatcher(m, k=None): - return (m,) - - -@array_function_dispatch(_trilu_dispatcher) -def tril(m, k=0): - """ - Lower triangle of an array. - - Return a copy of an array with elements above the `k`-th diagonal zeroed. - - Parameters - ---------- - m : array_like, shape (M, N) - Input array. - k : int, optional - Diagonal above which to zero elements. `k = 0` (the default) is the - main diagonal, `k < 0` is below it and `k > 0` is above. - - Returns - ------- - tril : ndarray, shape (M, N) - Lower triangle of `m`, of same shape and data-type as `m`. - - See Also - -------- - triu : same thing, only for the upper triangle - - Examples - -------- - >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 0, 0, 0], - [ 4, 0, 0], - [ 7, 8, 0], - [10, 11, 12]]) - - """ - m = asanyarray(m) - mask = tri(*m.shape[-2:], k=k, dtype=bool) - - return where(mask, m, zeros(1, m.dtype)) - - -@array_function_dispatch(_trilu_dispatcher) -def triu(m, k=0): - """ - Upper triangle of an array. - - Return a copy of a matrix with the elements below the `k`-th diagonal - zeroed. - - Please refer to the documentation for `tril` for further details. - - See Also - -------- - tril : lower triangle of an array - - Examples - -------- - >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 1, 2, 3], - [ 4, 5, 6], - [ 0, 8, 9], - [ 0, 0, 12]]) - - """ - m = asanyarray(m) - mask = tri(*m.shape[-2:], k=k-1, dtype=bool) - - return where(mask, zeros(1, m.dtype), m) - - -def _vander_dispatcher(x, N=None, increasing=None): - return (x,) - - -# Originally borrowed from John Hunter and matplotlib -@array_function_dispatch(_vander_dispatcher) -def vander(x, N=None, increasing=False): - """ - Generate a Vandermonde matrix. - - The columns of the output matrix are powers of the input vector. The - order of the powers is determined by the `increasing` boolean argument. - Specifically, when `increasing` is False, the `i`-th output column is - the input vector raised element-wise to the power of ``N - i - 1``. Such - a matrix with a geometric progression in each row is named for Alexandre- - Theophile Vandermonde. - - Parameters - ---------- - x : array_like - 1-D input array. - N : int, optional - Number of columns in the output. If `N` is not specified, a square - array is returned (``N = len(x)``). - increasing : bool, optional - Order of the powers of the columns. If True, the powers increase - from left to right, if False (the default) they are reversed. - - .. versionadded:: 1.9.0 - - Returns - ------- - out : ndarray - Vandermonde matrix. If `increasing` is False, the first column is - ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is - True, the columns are ``x^0, x^1, ..., x^(N-1)``. - - See Also - -------- - polynomial.polynomial.polyvander - - Examples - -------- - >>> x = np.array([1, 2, 3, 5]) - >>> N = 3 - >>> np.vander(x, N) - array([[ 1, 1, 1], - [ 4, 2, 1], - [ 9, 3, 1], - [25, 5, 1]]) - - >>> np.column_stack([x**(N-1-i) for i in range(N)]) - array([[ 1, 1, 1], - [ 4, 2, 1], - [ 9, 3, 1], - [25, 5, 1]]) - - >>> x = np.array([1, 2, 3, 5]) - >>> np.vander(x) - array([[ 1, 1, 1, 1], - [ 8, 4, 2, 1], - [ 27, 9, 3, 1], - [125, 25, 5, 1]]) - >>> np.vander(x, increasing=True) - array([[ 1, 1, 1, 1], - [ 1, 2, 4, 8], - [ 1, 3, 9, 27], - [ 1, 5, 25, 125]]) - - The determinant of a square Vandermonde matrix is the product - of the differences between the values of the input vector: - - >>> np.linalg.det(np.vander(x)) - 48.000000000000043 # may vary - >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) - 48 - - """ - x = asarray(x) - if x.ndim != 1: - raise ValueError("x must be a one-dimensional array or sequence.") - if N is None: - N = len(x) - - v = empty((len(x), N), dtype=promote_types(x.dtype, int)) - tmp = v[:, ::-1] if not increasing else v - - if N > 0: - tmp[:, 0] = 1 - if N > 1: - tmp[:, 1:] = x[:, None] - multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) - - return v - - -def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None, - weights=None, density=None): - yield x - yield y - - # This terrible logic is adapted from the checks in histogram2d - try: - N = len(bins) - except TypeError: - N = 1 - if N == 2: - yield from bins # bins=[x, y] - else: - yield bins - - yield weights - - -@array_function_dispatch(_histogram2d_dispatcher) -def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, - density=None): - """ - Compute the bi-dimensional histogram of two data samples. - - Parameters - ---------- - x : array_like, shape (N,) - An array containing the x coordinates of the points to be - histogrammed. - y : array_like, shape (N,) - An array containing the y coordinates of the points to be - histogrammed. - bins : int or array_like or [int, int] or [array, array], optional - The bin specification: - - * If int, the number of bins for the two dimensions (nx=ny=bins). - * If array_like, the bin edges for the two dimensions - (x_edges=y_edges=bins). - * If [int, int], the number of bins in each dimension - (nx, ny = bins). - * If [array, array], the bin edges in each dimension - (x_edges, y_edges = bins). - * A combination [int, array] or [array, int], where int - is the number of bins and array is the bin edges. - - range : array_like, shape(2,2), optional - The leftmost and rightmost edges of the bins along each dimension - (if not specified explicitly in the `bins` parameters): - ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range - will be considered outliers and not tallied in the histogram. - density : bool, optional - If False, the default, returns the number of samples in each bin. - If True, returns the probability *density* function at the bin, - ``bin_count / sample_count / bin_area``. - normed : bool, optional - An alias for the density argument that behaves identically. To avoid - confusion with the broken normed argument to `histogram`, `density` - should be preferred. - weights : array_like, shape(N,), optional - An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. - Weights are normalized to 1 if `normed` is True. If `normed` is - False, the values of the returned histogram are equal to the sum of - the weights belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray, shape(nx, ny) - The bi-dimensional histogram of samples `x` and `y`. Values in `x` - are histogrammed along the first dimension and values in `y` are - histogrammed along the second dimension. - xedges : ndarray, shape(nx+1,) - The bin edges along the first dimension. - yedges : ndarray, shape(ny+1,) - The bin edges along the second dimension. - - See Also - -------- - histogram : 1D histogram - histogramdd : Multidimensional histogram - - Notes - ----- - When `normed` is True, then the returned histogram is the sample - density, defined such that the sum over bins of the product - ``bin_value * bin_area`` is 1. - - Please note that the histogram does not follow the Cartesian convention - where `x` values are on the abscissa and `y` values on the ordinate - axis. Rather, `x` is histogrammed along the first dimension of the - array (vertical), and `y` along the second dimension of the array - (horizontal). This ensures compatibility with `histogramdd`. - - Examples - -------- - >>> from matplotlib.image import NonUniformImage - >>> import matplotlib.pyplot as plt - - Construct a 2-D histogram with variable bin width. First define the bin - edges: - - >>> xedges = [0, 1, 3, 5] - >>> yedges = [0, 2, 3, 4, 6] - - Next we create a histogram H with random bin content: - - >>> x = np.random.normal(2, 1, 100) - >>> y = np.random.normal(1, 1, 100) - >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) - >>> H = H.T # Let each row list bins with common y range. - - :func:`imshow ` can only display square bins: - - >>> fig = plt.figure(figsize=(7, 3)) - >>> ax = fig.add_subplot(131, title='imshow: square bins') - >>> plt.imshow(H, interpolation='nearest', origin='low', - ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) - - - :func:`pcolormesh ` can display actual edges: - - >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', - ... aspect='equal') - >>> X, Y = np.meshgrid(xedges, yedges) - >>> ax.pcolormesh(X, Y, H) - - - :class:`NonUniformImage ` can be used to - display actual bin edges with interpolation: - - >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', - ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) - >>> im = NonUniformImage(ax, interpolation='bilinear') - >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 - >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 - >>> im.set_data(xcenters, ycenters, H) - >>> ax.images.append(im) - >>> plt.show() - - """ - from numpy import histogramdd - - try: - N = len(bins) - except TypeError: - N = 1 - - if N != 1 and N != 2: - xedges = yedges = asarray(bins) - bins = [xedges, yedges] - hist, edges = histogramdd([x, y], bins, range, normed, weights, density) - return hist, edges[0], edges[1] - - -@set_module('numpy') -def mask_indices(n, mask_func, k=0): - """ - Return the indices to access (n, n) arrays, given a masking function. - - Assume `mask_func` is a function that, for a square array a of size - ``(n, n)`` with a possible offset argument `k`, when called as - ``mask_func(a, k)`` returns a new array with zeros in certain locations - (functions like `triu` or `tril` do precisely this). Then this function - returns the indices where the non-zero values would be located. - - Parameters - ---------- - n : int - The returned indices will be valid to access arrays of shape (n, n). - mask_func : callable - A function whose call signature is similar to that of `triu`, `tril`. - That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. - `k` is an optional argument to the function. - k : scalar - An optional argument which is passed through to `mask_func`. Functions - like `triu`, `tril` take a second argument that is interpreted as an - offset. - - Returns - ------- - indices : tuple of arrays. - The `n` arrays of indices corresponding to the locations where - ``mask_func(np.ones((n, n)), k)`` is True. - - See Also - -------- - triu, tril, triu_indices, tril_indices - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - These are the indices that would allow you to access the upper triangular - part of any 3x3 array: - - >>> iu = np.mask_indices(3, np.triu) - - For example, if `a` is a 3x3 array: - - >>> a = np.arange(9).reshape(3, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - >>> a[iu] - array([0, 1, 2, 4, 5, 8]) - - An offset can be passed also to the masking function. This gets us the - indices starting on the first diagonal right of the main one: - - >>> iu1 = np.mask_indices(3, np.triu, 1) - - with which we now extract only three elements: - - >>> a[iu1] - array([1, 2, 5]) - - """ - m = ones((n, n), int) - a = mask_func(m, k) - return nonzero(a != 0) - - -@set_module('numpy') -def tril_indices(n, k=0, m=None): - """ - Return the indices for the lower-triangle of an (n, m) array. - - Parameters - ---------- - n : int - The row dimension of the arrays for which the returned - indices will be valid. - k : int, optional - Diagonal offset (see `tril` for details). - m : int, optional - .. versionadded:: 1.9.0 - - The column dimension of the arrays for which the returned - arrays will be valid. - By default `m` is taken equal to `n`. - - - Returns - ------- - inds : tuple of arrays - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. - - See also - -------- - triu_indices : similar function, for upper-triangular. - mask_indices : generic function accepting an arbitrary mask function. - tril, triu - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Compute two different sets of indices to access 4x4 arrays, one for the - lower triangular part starting at the main diagonal, and one starting two - diagonals further right: - - >>> il1 = np.tril_indices(4) - >>> il2 = np.tril_indices(4, 2) - - Here is how they can be used with a sample array: - - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - - Both for indexing: - - >>> a[il1] - array([ 0, 4, 5, ..., 13, 14, 15]) - - And for assigning values: - - >>> a[il1] = -1 - >>> a - array([[-1, 1, 2, 3], - [-1, -1, 6, 7], - [-1, -1, -1, 11], - [-1, -1, -1, -1]]) - - These cover almost the whole array (two diagonals right of the main one): - - >>> a[il2] = -10 - >>> a - array([[-10, -10, -10, 3], - [-10, -10, -10, -10], - [-10, -10, -10, -10], - [-10, -10, -10, -10]]) - - """ - return nonzero(tri(n, m, k=k, dtype=bool)) - - -def _trilu_indices_form_dispatcher(arr, k=None): - return (arr,) - - -@array_function_dispatch(_trilu_indices_form_dispatcher) -def tril_indices_from(arr, k=0): - """ - Return the indices for the lower-triangle of arr. - - See `tril_indices` for full details. - - Parameters - ---------- - arr : array_like - The indices will be valid for square arrays whose dimensions are - the same as arr. - k : int, optional - Diagonal offset (see `tril` for details). - - See Also - -------- - tril_indices, tril - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if arr.ndim != 2: - raise ValueError("input array must be 2-d") - return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) - - -@set_module('numpy') -def triu_indices(n, k=0, m=None): - """ - Return the indices for the upper-triangle of an (n, m) array. - - Parameters - ---------- - n : int - The size of the arrays for which the returned indices will - be valid. - k : int, optional - Diagonal offset (see `triu` for details). - m : int, optional - .. versionadded:: 1.9.0 - - The column dimension of the arrays for which the returned - arrays will be valid. - By default `m` is taken equal to `n`. - - - Returns - ------- - inds : tuple, shape(2) of ndarrays, shape(`n`) - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. Can be used - to slice a ndarray of shape(`n`, `n`). - - See also - -------- - tril_indices : similar function, for lower-triangular. - mask_indices : generic function accepting an arbitrary mask function. - triu, tril - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Compute two different sets of indices to access 4x4 arrays, one for the - upper triangular part starting at the main diagonal, and one starting two - diagonals further right: - - >>> iu1 = np.triu_indices(4) - >>> iu2 = np.triu_indices(4, 2) - - Here is how they can be used with a sample array: - - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - - Both for indexing: - - >>> a[iu1] - array([ 0, 1, 2, ..., 10, 11, 15]) - - And for assigning values: - - >>> a[iu1] = -1 - >>> a - array([[-1, -1, -1, -1], - [ 4, -1, -1, -1], - [ 8, 9, -1, -1], - [12, 13, 14, -1]]) - - These cover only a small part of the whole array (two diagonals right - of the main one): - - >>> a[iu2] = -10 - >>> a - array([[ -1, -1, -10, -10], - [ 4, -1, -1, -10], - [ 8, 9, -1, -1], - [ 12, 13, 14, -1]]) - - """ - return nonzero(~tri(n, m, k=k-1, dtype=bool)) - - -@array_function_dispatch(_trilu_indices_form_dispatcher) -def triu_indices_from(arr, k=0): - """ - Return the indices for the upper-triangle of arr. - - See `triu_indices` for full details. - - Parameters - ---------- - arr : ndarray, shape(N, N) - The indices will be valid for square arrays. - k : int, optional - Diagonal offset (see `triu` for details). - - Returns - ------- - triu_indices_from : tuple, shape(2) of ndarray, shape(N) - Indices for the upper-triangle of `arr`. - - See Also - -------- - triu_indices, triu - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if arr.ndim != 2: - raise ValueError("input array must be 2-d") - return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/type_check.py b/venv/lib/python3.7/site-packages/numpy/lib/type_check.py deleted file mode 100644 index 9771172..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/type_check.py +++ /dev/null @@ -1,732 +0,0 @@ -"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py - -""" -from __future__ import division, absolute_import, print_function -import functools -import warnings - -__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', - 'isreal', 'nan_to_num', 'real', 'real_if_close', - 'typename', 'asfarray', 'mintypecode', 'asscalar', - 'common_type'] - -import numpy.core.numeric as _nx -from numpy.core.numeric import asarray, asanyarray, isnan, zeros -from numpy.core.overrides import set_module -from numpy.core import overrides -from .ufunclike import isneginf, isposinf - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' - - -@set_module('numpy') -def mintypecode(typechars, typeset='GDFgdf', default='d'): - """ - Return the character for the minimum-size type to which given types can - be safely cast. - - The returned type character must represent the smallest size dtype such - that an array of the returned type can handle the data from an array of - all types in `typechars` (or if `typechars` is an array, then its - dtype.char). - - Parameters - ---------- - typechars : list of str or array_like - If a list of strings, each string should represent a dtype. - If array_like, the character representation of the array dtype is used. - typeset : str or list of str, optional - The set of characters that the returned character is chosen from. - The default set is 'GDFgdf'. - default : str, optional - The default character, this is returned if none of the characters in - `typechars` matches a character in `typeset`. - - Returns - ------- - typechar : str - The character representing the minimum-size type that was found. - - See Also - -------- - dtype, sctype2char, maximum_sctype - - Examples - -------- - >>> np.mintypecode(['d', 'f', 'S']) - 'd' - >>> x = np.array([1.1, 2-3.j]) - >>> np.mintypecode(x) - 'D' - - >>> np.mintypecode('abceh', default='G') - 'G' - - """ - typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char - for t in typechars) - intersection = set(t for t in typecodes if t in typeset) - if not intersection: - return default - if 'F' in intersection and 'd' in intersection: - return 'D' - return min(intersection, key=_typecodes_by_elsize.index) - - -def _asfarray_dispatcher(a, dtype=None): - return (a,) - - -@array_function_dispatch(_asfarray_dispatcher) -def asfarray(a, dtype=_nx.float_): - """ - Return an array converted to a float type. - - Parameters - ---------- - a : array_like - The input array. - dtype : str or dtype object, optional - Float type code to coerce input array `a`. If `dtype` is one of the - 'int' dtypes, it is replaced with float64. - - Returns - ------- - out : ndarray - The input `a` as a float ndarray. - - Examples - -------- - >>> np.asfarray([2, 3]) - array([2., 3.]) - >>> np.asfarray([2, 3], dtype='float') - array([2., 3.]) - >>> np.asfarray([2, 3], dtype='int8') - array([2., 3.]) - - """ - if not _nx.issubdtype(dtype, _nx.inexact): - dtype = _nx.float_ - return asarray(a, dtype=dtype) - - -def _real_dispatcher(val): - return (val,) - - -@array_function_dispatch(_real_dispatcher) -def real(val): - """ - Return the real part of the complex argument. - - Parameters - ---------- - val : array_like - Input array. - - Returns - ------- - out : ndarray or scalar - The real component of the complex argument. If `val` is real, the type - of `val` is used for the output. If `val` has complex elements, the - returned type is float. - - See Also - -------- - real_if_close, imag, angle - - Examples - -------- - >>> a = np.array([1+2j, 3+4j, 5+6j]) - >>> a.real - array([1., 3., 5.]) - >>> a.real = 9 - >>> a - array([9.+2.j, 9.+4.j, 9.+6.j]) - >>> a.real = np.array([9, 8, 7]) - >>> a - array([9.+2.j, 8.+4.j, 7.+6.j]) - >>> np.real(1 + 1j) - 1.0 - - """ - try: - return val.real - except AttributeError: - return asanyarray(val).real - - -def _imag_dispatcher(val): - return (val,) - - -@array_function_dispatch(_imag_dispatcher) -def imag(val): - """ - Return the imaginary part of the complex argument. - - Parameters - ---------- - val : array_like - Input array. - - Returns - ------- - out : ndarray or scalar - The imaginary component of the complex argument. If `val` is real, - the type of `val` is used for the output. If `val` has complex - elements, the returned type is float. - - See Also - -------- - real, angle, real_if_close - - Examples - -------- - >>> a = np.array([1+2j, 3+4j, 5+6j]) - >>> a.imag - array([2., 4., 6.]) - >>> a.imag = np.array([8, 10, 12]) - >>> a - array([1. +8.j, 3.+10.j, 5.+12.j]) - >>> np.imag(1 + 1j) - 1.0 - - """ - try: - return val.imag - except AttributeError: - return asanyarray(val).imag - - -def _is_type_dispatcher(x): - return (x,) - - -@array_function_dispatch(_is_type_dispatcher) -def iscomplex(x): - """ - Returns a bool array, where True if input element is complex. - - What is tested is whether the input has a non-zero imaginary part, not if - the input type is complex. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray of bools - Output array. - - See Also - -------- - isreal - iscomplexobj : Return True if x is a complex type or an array of complex - numbers. - - Examples - -------- - >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([ True, False, False, False, False, True]) - - """ - ax = asanyarray(x) - if issubclass(ax.dtype.type, _nx.complexfloating): - return ax.imag != 0 - res = zeros(ax.shape, bool) - return res[()] # convert to scalar if needed - - -@array_function_dispatch(_is_type_dispatcher) -def isreal(x): - """ - Returns a bool array, where True if input element is real. - - If element has complex type with zero complex part, the return value - for that element is True. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray, bool - Boolean array of same shape as `x`. - - See Also - -------- - iscomplex - isrealobj : Return True if x is not a complex type. - - Examples - -------- - >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([False, True, True, True, True, False]) - - """ - return imag(x) == 0 - - -@array_function_dispatch(_is_type_dispatcher) -def iscomplexobj(x): - """ - Check for a complex type or an array of complex numbers. - - The type of the input is checked, not the value. Even if the input - has an imaginary part equal to zero, `iscomplexobj` evaluates to True. - - Parameters - ---------- - x : any - The input can be of any type and shape. - - Returns - ------- - iscomplexobj : bool - The return value, True if `x` is of a complex type or has at least - one complex element. - - See Also - -------- - isrealobj, iscomplex - - Examples - -------- - >>> np.iscomplexobj(1) - False - >>> np.iscomplexobj(1+0j) - True - >>> np.iscomplexobj([3, 1+0j, True]) - True - - """ - try: - dtype = x.dtype - type_ = dtype.type - except AttributeError: - type_ = asarray(x).dtype.type - return issubclass(type_, _nx.complexfloating) - - -@array_function_dispatch(_is_type_dispatcher) -def isrealobj(x): - """ - Return True if x is a not complex type or an array of complex numbers. - - The type of the input is checked, not the value. So even if the input - has an imaginary part equal to zero, `isrealobj` evaluates to False - if the data type is complex. - - Parameters - ---------- - x : any - The input can be of any type and shape. - - Returns - ------- - y : bool - The return value, False if `x` is of a complex type. - - See Also - -------- - iscomplexobj, isreal - - Examples - -------- - >>> np.isrealobj(1) - True - >>> np.isrealobj(1+0j) - False - >>> np.isrealobj([3, 1+0j, True]) - False - - """ - return not iscomplexobj(x) - -#----------------------------------------------------------------------------- - -def _getmaxmin(t): - from numpy.core import getlimits - f = getlimits.finfo(t) - return f.max, f.min - - -def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): - return (x,) - - -@array_function_dispatch(_nan_to_num_dispatcher) -def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): - """ - Replace NaN with zero and infinity with large finite numbers (default - behaviour) or with the numbers defined by the user using the `nan`, - `posinf` and/or `neginf` keywords. - - If `x` is inexact, NaN is replaced by zero or by the user defined value in - `nan` keyword, infinity is replaced by the largest finite floating point - values representable by ``x.dtype`` or by the user defined value in - `posinf` keyword and -infinity is replaced by the most negative finite - floating point values representable by ``x.dtype`` or by the user defined - value in `neginf` keyword. - - For complex dtypes, the above is applied to each of the real and - imaginary components of `x` separately. - - If `x` is not inexact, then no replacements are made. - - Parameters - ---------- - x : scalar or array_like - Input data. - copy : bool, optional - Whether to create a copy of `x` (True) or to replace values - in-place (False). The in-place operation only occurs if - casting to an array does not require a copy. - Default is True. - - .. versionadded:: 1.13 - nan : int, float, optional - Value to be used to fill NaN values. If no value is passed - then NaN values will be replaced with 0.0. - - .. versionadded:: 1.17 - posinf : int, float, optional - Value to be used to fill positive infinity values. If no value is - passed then positive infinity values will be replaced with a very - large number. - - .. versionadded:: 1.17 - neginf : int, float, optional - Value to be used to fill negative infinity values. If no value is - passed then negative infinity values will be replaced with a very - small (or negative) number. - - .. versionadded:: 1.17 - - - - Returns - ------- - out : ndarray - `x`, with the non-finite values replaced. If `copy` is False, this may - be `x` itself. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity. - isneginf : Shows which elements are negative infinity. - isposinf : Shows which elements are positive infinity. - isnan : Shows which elements are Not a Number (NaN). - isfinite : Shows which elements are finite (not NaN, not infinity) - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - Examples - -------- - >>> np.nan_to_num(np.inf) - 1.7976931348623157e+308 - >>> np.nan_to_num(-np.inf) - -1.7976931348623157e+308 - >>> np.nan_to_num(np.nan) - 0.0 - >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) - >>> np.nan_to_num(x) - array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary - -1.28000000e+002, 1.28000000e+002]) - >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) - array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, - -1.2800000e+02, 1.2800000e+02]) - >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) - array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary - -1.28000000e+002, 1.28000000e+002]) - >>> np.nan_to_num(y) - array([ 1.79769313e+308 +0.00000000e+000j, # may vary - 0.00000000e+000 +0.00000000e+000j, - 0.00000000e+000 +1.79769313e+308j]) - >>> np.nan_to_num(y, nan=111111, posinf=222222) - array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) - """ - x = _nx.array(x, subok=True, copy=copy) - xtype = x.dtype.type - - isscalar = (x.ndim == 0) - - if not issubclass(xtype, _nx.inexact): - return x[()] if isscalar else x - - iscomplex = issubclass(xtype, _nx.complexfloating) - - dest = (x.real, x.imag) if iscomplex else (x,) - maxf, minf = _getmaxmin(x.real.dtype) - if posinf is not None: - maxf = posinf - if neginf is not None: - minf = neginf - for d in dest: - idx_nan = isnan(d) - idx_posinf = isposinf(d) - idx_neginf = isneginf(d) - _nx.copyto(d, nan, where=idx_nan) - _nx.copyto(d, maxf, where=idx_posinf) - _nx.copyto(d, minf, where=idx_neginf) - return x[()] if isscalar else x - -#----------------------------------------------------------------------------- - -def _real_if_close_dispatcher(a, tol=None): - return (a,) - - -@array_function_dispatch(_real_if_close_dispatcher) -def real_if_close(a, tol=100): - """ - If complex input returns a real array if complex parts are close to zero. - - "Close to zero" is defined as `tol` * (machine epsilon of the type for - `a`). - - Parameters - ---------- - a : array_like - Input array. - tol : float - Tolerance in machine epsilons for the complex part of the elements - in the array. - - Returns - ------- - out : ndarray - If `a` is real, the type of `a` is used for the output. If `a` - has complex elements, the returned type is float. - - See Also - -------- - real, imag, angle - - Notes - ----- - Machine epsilon varies from machine to machine and between data types - but Python floats on most platforms have a machine epsilon equal to - 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print - out the machine epsilon for floats. - - Examples - -------- - >>> np.finfo(float).eps - 2.2204460492503131e-16 # may vary - - >>> np.real_if_close([2.1 + 4e-14j], tol=1000) - array([2.1]) - >>> np.real_if_close([2.1 + 4e-13j], tol=1000) - array([2.1+4.e-13j]) - - """ - a = asanyarray(a) - if not issubclass(a.dtype.type, _nx.complexfloating): - return a - if tol > 1: - from numpy.core import getlimits - f = getlimits.finfo(a.dtype.type) - tol = f.eps * tol - if _nx.all(_nx.absolute(a.imag) < tol): - a = a.real - return a - - -def _asscalar_dispatcher(a): - # 2018-10-10, 1.16 - warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use ' - 'a.item() instead', DeprecationWarning, stacklevel=3) - return (a,) - - -@array_function_dispatch(_asscalar_dispatcher) -def asscalar(a): - """ - Convert an array of size 1 to its scalar equivalent. - - .. deprecated:: 1.16 - - Deprecated, use `numpy.ndarray.item()` instead. - - Parameters - ---------- - a : ndarray - Input array of size 1. - - Returns - ------- - out : scalar - Scalar representation of `a`. The output data type is the same type - returned by the input's `item` method. - - Examples - -------- - >>> np.asscalar(np.array([24])) - 24 - """ - return a.item() - -#----------------------------------------------------------------------------- - -_namefromtype = {'S1': 'character', - '?': 'bool', - 'b': 'signed char', - 'B': 'unsigned char', - 'h': 'short', - 'H': 'unsigned short', - 'i': 'integer', - 'I': 'unsigned integer', - 'l': 'long integer', - 'L': 'unsigned long integer', - 'q': 'long long integer', - 'Q': 'unsigned long long integer', - 'f': 'single precision', - 'd': 'double precision', - 'g': 'long precision', - 'F': 'complex single precision', - 'D': 'complex double precision', - 'G': 'complex long double precision', - 'S': 'string', - 'U': 'unicode', - 'V': 'void', - 'O': 'object' - } - -@set_module('numpy') -def typename(char): - """ - Return a description for the given data type code. - - Parameters - ---------- - char : str - Data type code. - - Returns - ------- - out : str - Description of the input data type code. - - See Also - -------- - dtype, typecodes - - Examples - -------- - >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', - ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] - >>> for typechar in typechars: - ... print(typechar, ' : ', np.typename(typechar)) - ... - S1 : character - ? : bool - B : unsigned char - D : complex double precision - G : complex long double precision - F : complex single precision - I : unsigned integer - H : unsigned short - L : unsigned long integer - O : object - Q : unsigned long long integer - S : string - U : unicode - V : void - b : signed char - d : double precision - g : long precision - f : single precision - i : integer - h : short - l : long integer - q : long long integer - - """ - return _namefromtype[char] - -#----------------------------------------------------------------------------- - -#determine the "minimum common type" for a group of arrays. -array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble], - [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]] -array_precision = {_nx.half: 0, - _nx.single: 1, - _nx.double: 2, - _nx.longdouble: 3, - _nx.csingle: 1, - _nx.cdouble: 2, - _nx.clongdouble: 3} - - -def _common_type_dispatcher(*arrays): - return arrays - - -@array_function_dispatch(_common_type_dispatcher) -def common_type(*arrays): - """ - Return a scalar type which is common to the input arrays. - - The return type will always be an inexact (i.e. floating point) scalar - type, even if all the arrays are integer arrays. If one of the inputs is - an integer array, the minimum precision type that is returned is a - 64-bit floating point dtype. - - All input arrays except int64 and uint64 can be safely cast to the - returned dtype without loss of information. - - Parameters - ---------- - array1, array2, ... : ndarrays - Input arrays. - - Returns - ------- - out : data type code - Data type code. - - See Also - -------- - dtype, mintypecode - - Examples - -------- - >>> np.common_type(np.arange(2, dtype=np.float32)) - - >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) - - >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) - - - """ - is_complex = False - precision = 0 - for a in arrays: - t = a.dtype.type - if iscomplexobj(a): - is_complex = True - if issubclass(t, _nx.integer): - p = 2 # array_precision[_nx.double] - else: - p = array_precision.get(t, None) - if p is None: - raise TypeError("can't get common type for non-numeric array") - precision = max(precision, p) - if is_complex: - return array_type[1][precision] - else: - return array_type[0][precision] diff --git a/venv/lib/python3.7/site-packages/numpy/lib/ufunclike.py b/venv/lib/python3.7/site-packages/numpy/lib/ufunclike.py deleted file mode 100644 index 96fd5b3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/ufunclike.py +++ /dev/null @@ -1,258 +0,0 @@ -""" -Module of functions that are like ufuncs in acting on arrays and optionally -storing results in an output array. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['fix', 'isneginf', 'isposinf'] - -import numpy.core.numeric as nx -from numpy.core.overrides import ( - array_function_dispatch, ARRAY_FUNCTION_ENABLED, -) -import warnings -import functools - - -def _deprecate_out_named_y(f): - """ - Allow the out argument to be passed as the name `y` (deprecated) - - In future, this decorator should be removed. - """ - @functools.wraps(f) - def func(x, out=None, **kwargs): - if 'y' in kwargs: - if 'out' in kwargs: - raise TypeError( - "{} got multiple values for argument 'out'/'y'" - .format(f.__name__) - ) - out = kwargs.pop('y') - # NumPy 1.13.0, 2017-04-26 - warnings.warn( - "The name of the out argument to {} has changed from `y` to " - "`out`, to match other ufuncs.".format(f.__name__), - DeprecationWarning, stacklevel=3) - return f(x, out=out, **kwargs) - - return func - - -def _fix_out_named_y(f): - """ - Allow the out argument to be passed as the name `y` (deprecated) - - This decorator should only be used if _deprecate_out_named_y is used on - a corresponding dispatcher function. - """ - @functools.wraps(f) - def func(x, out=None, **kwargs): - if 'y' in kwargs: - # we already did error checking in _deprecate_out_named_y - out = kwargs.pop('y') - return f(x, out=out, **kwargs) - - return func - - -def _fix_and_maybe_deprecate_out_named_y(f): - """ - Use the appropriate decorator, depending upon if dispatching is being used. - """ - if ARRAY_FUNCTION_ENABLED: - return _fix_out_named_y(f) - else: - return _deprecate_out_named_y(f) - - -@_deprecate_out_named_y -def _dispatcher(x, out=None): - return (x, out) - - -@array_function_dispatch(_dispatcher, verify=False, module='numpy') -@_fix_and_maybe_deprecate_out_named_y -def fix(x, out=None): - """ - Round to nearest integer towards zero. - - Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. - - Parameters - ---------- - x : array_like - An array of floats to be rounded - y : ndarray, optional - Output array - - Returns - ------- - out : ndarray of floats - The array of rounded numbers - - See Also - -------- - trunc, floor, ceil - around : Round to given number of decimals - - Examples - -------- - >>> np.fix(3.14) - 3.0 - >>> np.fix(3) - 3.0 - >>> np.fix([2.1, 2.9, -2.1, -2.9]) - array([ 2., 2., -2., -2.]) - - """ - # promote back to an array if flattened - res = nx.asanyarray(nx.ceil(x, out=out)) - res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) - - # when no out argument is passed and no subclasses are involved, flatten - # scalars - if out is None and type(res) is nx.ndarray: - res = res[()] - return res - - -@array_function_dispatch(_dispatcher, verify=False, module='numpy') -@_fix_and_maybe_deprecate_out_named_y -def isposinf(x, out=None): - """ - Test element-wise for positive infinity, return result as bool array. - - Parameters - ---------- - x : array_like - The input array. - y : array_like, optional - A boolean array with the same shape as `x` to store the result. - - Returns - ------- - out : ndarray - A boolean array with the same dimensions as the input. - If second argument is not supplied then a boolean array is returned - with values True where the corresponding element of the input is - positive infinity and values False where the element of the input is - not positive infinity. - - If a second argument is supplied the result is stored there. If the - type of that array is a numeric type the result is represented as zeros - and ones, if the type is boolean then as False and True. - The return value `out` is then a reference to that array. - - See Also - -------- - isinf, isneginf, isfinite, isnan - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). - - Errors result if the second argument is also supplied when x is a scalar - input, if first and second arguments have different shapes, or if the - first argument has complex values - - Examples - -------- - >>> np.isposinf(np.PINF) - True - >>> np.isposinf(np.inf) - True - >>> np.isposinf(np.NINF) - False - >>> np.isposinf([-np.inf, 0., np.inf]) - array([False, False, True]) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isposinf(x, y) - array([0, 0, 1]) - >>> y - array([0, 0, 1]) - - """ - is_inf = nx.isinf(x) - try: - signbit = ~nx.signbit(x) - except TypeError: - raise TypeError('This operation is not supported for complex values ' - 'because it would be ambiguous.') - else: - return nx.logical_and(is_inf, signbit, out) - - -@array_function_dispatch(_dispatcher, verify=False, module='numpy') -@_fix_and_maybe_deprecate_out_named_y -def isneginf(x, out=None): - """ - Test element-wise for negative infinity, return result as bool array. - - Parameters - ---------- - x : array_like - The input array. - out : array_like, optional - A boolean array with the same shape and type as `x` to store the - result. - - Returns - ------- - out : ndarray - A boolean array with the same dimensions as the input. - If second argument is not supplied then a numpy boolean array is - returned with values True where the corresponding element of the - input is negative infinity and values False where the element of - the input is not negative infinity. - - If a second argument is supplied the result is stored there. If the - type of that array is a numeric type the result is represented as - zeros and ones, if the type is boolean then as False and True. The - return value `out` is then a reference to that array. - - See Also - -------- - isinf, isposinf, isnan, isfinite - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). - - Errors result if the second argument is also supplied when x is a scalar - input, if first and second arguments have different shapes, or if the - first argument has complex values. - - Examples - -------- - >>> np.isneginf(np.NINF) - True - >>> np.isneginf(np.inf) - False - >>> np.isneginf(np.PINF) - False - >>> np.isneginf([-np.inf, 0., np.inf]) - array([ True, False, False]) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isneginf(x, y) - array([1, 0, 0]) - >>> y - array([1, 0, 0]) - - """ - is_inf = nx.isinf(x) - try: - signbit = nx.signbit(x) - except TypeError: - raise TypeError('This operation is not supported for complex values ' - 'because it would be ambiguous.') - else: - return nx.logical_and(is_inf, signbit, out) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/user_array.py b/venv/lib/python3.7/site-packages/numpy/lib/user_array.py deleted file mode 100644 index f1510a7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/user_array.py +++ /dev/null @@ -1,288 +0,0 @@ -""" -Standard container-class for easy multiple-inheritance. - -Try to inherit from the ndarray instead of using this class as this is not -complete. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.core import ( - array, asarray, absolute, add, subtract, multiply, divide, - remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, - bitwise_xor, invert, less, less_equal, not_equal, equal, greater, - greater_equal, shape, reshape, arange, sin, sqrt, transpose -) -from numpy.compat import long - - -class container(object): - """ - container(data, dtype=None, copy=True) - - Standard container-class for easy multiple-inheritance. - - Methods - ------- - copy - tostring - byteswap - astype - - """ - def __init__(self, data, dtype=None, copy=True): - self.array = array(data, dtype, copy=copy) - - def __repr__(self): - if self.ndim > 0: - return self.__class__.__name__ + repr(self.array)[len("array"):] - else: - return self.__class__.__name__ + "(" + repr(self.array) + ")" - - def __array__(self, t=None): - if t: - return self.array.astype(t) - return self.array - - # Array as sequence - def __len__(self): - return len(self.array) - - def __getitem__(self, index): - return self._rc(self.array[index]) - - def __setitem__(self, index, value): - self.array[index] = asarray(value, self.dtype) - - def __abs__(self): - return self._rc(absolute(self.array)) - - def __neg__(self): - return self._rc(-self.array) - - def __add__(self, other): - return self._rc(self.array + asarray(other)) - - __radd__ = __add__ - - def __iadd__(self, other): - add(self.array, other, self.array) - return self - - def __sub__(self, other): - return self._rc(self.array - asarray(other)) - - def __rsub__(self, other): - return self._rc(asarray(other) - self.array) - - def __isub__(self, other): - subtract(self.array, other, self.array) - return self - - def __mul__(self, other): - return self._rc(multiply(self.array, asarray(other))) - - __rmul__ = __mul__ - - def __imul__(self, other): - multiply(self.array, other, self.array) - return self - - def __div__(self, other): - return self._rc(divide(self.array, asarray(other))) - - def __rdiv__(self, other): - return self._rc(divide(asarray(other), self.array)) - - def __idiv__(self, other): - divide(self.array, other, self.array) - return self - - def __mod__(self, other): - return self._rc(remainder(self.array, other)) - - def __rmod__(self, other): - return self._rc(remainder(other, self.array)) - - def __imod__(self, other): - remainder(self.array, other, self.array) - return self - - def __divmod__(self, other): - return (self._rc(divide(self.array, other)), - self._rc(remainder(self.array, other))) - - def __rdivmod__(self, other): - return (self._rc(divide(other, self.array)), - self._rc(remainder(other, self.array))) - - def __pow__(self, other): - return self._rc(power(self.array, asarray(other))) - - def __rpow__(self, other): - return self._rc(power(asarray(other), self.array)) - - def __ipow__(self, other): - power(self.array, other, self.array) - return self - - def __lshift__(self, other): - return self._rc(left_shift(self.array, other)) - - def __rshift__(self, other): - return self._rc(right_shift(self.array, other)) - - def __rlshift__(self, other): - return self._rc(left_shift(other, self.array)) - - def __rrshift__(self, other): - return self._rc(right_shift(other, self.array)) - - def __ilshift__(self, other): - left_shift(self.array, other, self.array) - return self - - def __irshift__(self, other): - right_shift(self.array, other, self.array) - return self - - def __and__(self, other): - return self._rc(bitwise_and(self.array, other)) - - def __rand__(self, other): - return self._rc(bitwise_and(other, self.array)) - - def __iand__(self, other): - bitwise_and(self.array, other, self.array) - return self - - def __xor__(self, other): - return self._rc(bitwise_xor(self.array, other)) - - def __rxor__(self, other): - return self._rc(bitwise_xor(other, self.array)) - - def __ixor__(self, other): - bitwise_xor(self.array, other, self.array) - return self - - def __or__(self, other): - return self._rc(bitwise_or(self.array, other)) - - def __ror__(self, other): - return self._rc(bitwise_or(other, self.array)) - - def __ior__(self, other): - bitwise_or(self.array, other, self.array) - return self - - def __pos__(self): - return self._rc(self.array) - - def __invert__(self): - return self._rc(invert(self.array)) - - def _scalarfunc(self, func): - if self.ndim == 0: - return func(self[0]) - else: - raise TypeError( - "only rank-0 arrays can be converted to Python scalars.") - - def __complex__(self): - return self._scalarfunc(complex) - - def __float__(self): - return self._scalarfunc(float) - - def __int__(self): - return self._scalarfunc(int) - - def __long__(self): - return self._scalarfunc(long) - - def __hex__(self): - return self._scalarfunc(hex) - - def __oct__(self): - return self._scalarfunc(oct) - - def __lt__(self, other): - return self._rc(less(self.array, other)) - - def __le__(self, other): - return self._rc(less_equal(self.array, other)) - - def __eq__(self, other): - return self._rc(equal(self.array, other)) - - def __ne__(self, other): - return self._rc(not_equal(self.array, other)) - - def __gt__(self, other): - return self._rc(greater(self.array, other)) - - def __ge__(self, other): - return self._rc(greater_equal(self.array, other)) - - def copy(self): - "" - return self._rc(self.array.copy()) - - def tostring(self): - "" - return self.array.tostring() - - def byteswap(self): - "" - return self._rc(self.array.byteswap()) - - def astype(self, typecode): - "" - return self._rc(self.array.astype(typecode)) - - def _rc(self, a): - if len(shape(a)) == 0: - return a - else: - return self.__class__(a) - - def __array_wrap__(self, *args): - return self.__class__(args[0]) - - def __setattr__(self, attr, value): - if attr == 'array': - object.__setattr__(self, attr, value) - return - try: - self.array.__setattr__(attr, value) - except AttributeError: - object.__setattr__(self, attr, value) - - # Only called after other approaches fail. - def __getattr__(self, attr): - if (attr == 'array'): - return object.__getattribute__(self, attr) - return self.array.__getattribute__(attr) - -############################################################# -# Test of class container -############################################################# -if __name__ == '__main__': - temp = reshape(arange(10000), (100, 100)) - - ua = container(temp) - # new object created begin test - print(dir(ua)) - print(shape(ua), ua.shape) # I have changed Numeric.py - - ua_small = ua[:3, :5] - print(ua_small) - # this did not change ua[0,0], which is not normal behavior - ua_small[0, 0] = 10 - print(ua_small[0, 0], ua[0, 0]) - print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) - print(less(ua_small, 103), type(less(ua_small, 103))) - print(type(ua_small * reshape(arange(15), shape(ua_small)))) - print(reshape(ua_small, (5, 3))) - print(transpose(ua_small)) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/utils.py b/venv/lib/python3.7/site-packages/numpy/lib/utils.py deleted file mode 100644 index 3c71d2a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/utils.py +++ /dev/null @@ -1,1088 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import types -import re -import warnings - -from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype -from numpy.core.overrides import set_module -from numpy.core import ndarray, ufunc, asarray -import numpy as np - -# getargspec and formatargspec were removed in Python 3.6 -from numpy.compat import getargspec, formatargspec - -__all__ = [ - 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', - 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', - 'lookfor', 'byte_bounds', 'safe_eval' - ] - -def get_include(): - """ - Return the directory that contains the NumPy \\*.h header files. - - Extension modules that need to compile against NumPy should use this - function to locate the appropriate include directory. - - Notes - ----- - When using ``distutils``, for example in ``setup.py``. - :: - - import numpy as np - ... - Extension('extension_name', ... - include_dirs=[np.get_include()]) - ... - - """ - import numpy - if numpy.show_config is None: - # running from numpy source directory - d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') - else: - # using installed numpy core headers - import numpy.core as core - d = os.path.join(os.path.dirname(core.__file__), 'include') - return d - - -def _set_function_name(func, name): - func.__name__ = name - return func - - -class _Deprecate(object): - """ - Decorator class to deprecate old functions. - - Refer to `deprecate` for details. - - See Also - -------- - deprecate - - """ - - def __init__(self, old_name=None, new_name=None, message=None): - self.old_name = old_name - self.new_name = new_name - self.message = message - - def __call__(self, func, *args, **kwargs): - """ - Decorator call. Refer to ``decorate``. - - """ - old_name = self.old_name - new_name = self.new_name - message = self.message - - if old_name is None: - try: - old_name = func.__name__ - except AttributeError: - old_name = func.__name__ - if new_name is None: - depdoc = "`%s` is deprecated!" % old_name - else: - depdoc = "`%s` is deprecated, use `%s` instead!" % \ - (old_name, new_name) - - if message is not None: - depdoc += "\n" + message - - def newfunc(*args,**kwds): - """`arrayrange` is deprecated, use `arange` instead!""" - warnings.warn(depdoc, DeprecationWarning, stacklevel=2) - return func(*args, **kwds) - - newfunc = _set_function_name(newfunc, old_name) - doc = func.__doc__ - if doc is None: - doc = depdoc - else: - lines = doc.expandtabs().split('\n') - indent = _get_indent(lines[1:]) - if lines[0].lstrip(): - # Indent the original first line to let inspect.cleandoc() - # dedent the docstring despite the deprecation notice. - doc = indent * ' ' + doc - else: - # Remove the same leading blank lines as cleandoc() would. - skip = len(lines[0]) + 1 - for line in lines[1:]: - if len(line) > indent: - break - skip += len(line) + 1 - doc = doc[skip:] - doc = '\n\n'.join([depdoc, doc]) - newfunc.__doc__ = doc - try: - d = func.__dict__ - except AttributeError: - pass - else: - newfunc.__dict__.update(d) - return newfunc - - -def _get_indent(lines): - """ - Determines the leading whitespace that could be removed from all the lines. - """ - indent = sys.maxsize - for line in lines: - content = len(line.lstrip()) - if content: - indent = min(indent, len(line) - content) - if indent == sys.maxsize: - indent = 0 - return indent - - -def deprecate(*args, **kwargs): - """ - Issues a DeprecationWarning, adds warning to `old_name`'s - docstring, rebinds ``old_name.__name__`` and returns the new - function object. - - This function may also be used as a decorator. - - Parameters - ---------- - func : function - The function to be deprecated. - old_name : str, optional - The name of the function to be deprecated. Default is None, in - which case the name of `func` is used. - new_name : str, optional - The new name for the function. Default is None, in which case the - deprecation message is that `old_name` is deprecated. If given, the - deprecation message is that `old_name` is deprecated and `new_name` - should be used instead. - message : str, optional - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - old_func : function - The deprecated function. - - Examples - -------- - Note that ``olduint`` returns a value after printing Deprecation - Warning: - - >>> olduint = np.deprecate(np.uint) - DeprecationWarning: `uint64` is deprecated! # may vary - >>> olduint(6) - 6 - - """ - # Deprecate may be run as a function or as a decorator - # If run as a function, we initialise the decorator class - # and execute its __call__ method. - - if args: - fn = args[0] - args = args[1:] - - return _Deprecate(*args, **kwargs)(fn) - else: - return _Deprecate(*args, **kwargs) - -deprecate_with_doc = lambda msg: _Deprecate(message=msg) - - -#-------------------------------------------- -# Determine if two arrays can share memory -#-------------------------------------------- - -def byte_bounds(a): - """ - Returns pointers to the end-points of an array. - - Parameters - ---------- - a : ndarray - Input array. It must conform to the Python-side of the array - interface. - - Returns - ------- - (low, high) : tuple of 2 integers - The first integer is the first byte of the array, the second - integer is just past the last byte of the array. If `a` is not - contiguous it will not use every byte between the (`low`, `high`) - values. - - Examples - -------- - >>> I = np.eye(2, dtype='f'); I.dtype - dtype('float32') - >>> low, high = np.byte_bounds(I) - >>> high - low == I.size*I.itemsize - True - >>> I = np.eye(2); I.dtype - dtype('float64') - >>> low, high = np.byte_bounds(I) - >>> high - low == I.size*I.itemsize - True - - """ - ai = a.__array_interface__ - a_data = ai['data'][0] - astrides = ai['strides'] - ashape = ai['shape'] - bytes_a = asarray(a).dtype.itemsize - - a_low = a_high = a_data - if astrides is None: - # contiguous case - a_high += a.size * bytes_a - else: - for shape, stride in zip(ashape, astrides): - if stride < 0: - a_low += (shape-1)*stride - else: - a_high += (shape-1)*stride - a_high += bytes_a - return a_low, a_high - - -#----------------------------------------------------------------------------- -# Function for output and information on the variables used. -#----------------------------------------------------------------------------- - - -def who(vardict=None): - """ - Print the NumPy arrays in the given dictionary. - - If there is no dictionary passed in or `vardict` is None then returns - NumPy arrays in the globals() dictionary (all NumPy arrays in the - namespace). - - Parameters - ---------- - vardict : dict, optional - A dictionary possibly containing ndarrays. Default is globals(). - - Returns - ------- - out : None - Returns 'None'. - - Notes - ----- - Prints out the name, shape, bytes and type of all of the ndarrays - present in `vardict`. - - Examples - -------- - >>> a = np.arange(10) - >>> b = np.ones(20) - >>> np.who() - Name Shape Bytes Type - =========================================================== - a 10 80 int64 - b 20 160 float64 - Upper bound on total bytes = 240 - - >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', - ... 'idx':5} - >>> np.who(d) - Name Shape Bytes Type - =========================================================== - x 2 16 float64 - y 3 24 float64 - Upper bound on total bytes = 40 - - """ - if vardict is None: - frame = sys._getframe().f_back - vardict = frame.f_globals - sta = [] - cache = {} - for name in vardict.keys(): - if isinstance(vardict[name], ndarray): - var = vardict[name] - idv = id(var) - if idv in cache.keys(): - namestr = name + " (%s)" % cache[idv] - original = 0 - else: - cache[idv] = name - namestr = name - original = 1 - shapestr = " x ".join(map(str, var.shape)) - bytestr = str(var.nbytes) - sta.append([namestr, shapestr, bytestr, var.dtype.name, - original]) - - maxname = 0 - maxshape = 0 - maxbyte = 0 - totalbytes = 0 - for k in range(len(sta)): - val = sta[k] - if maxname < len(val[0]): - maxname = len(val[0]) - if maxshape < len(val[1]): - maxshape = len(val[1]) - if maxbyte < len(val[2]): - maxbyte = len(val[2]) - if val[4]: - totalbytes += int(val[2]) - - if len(sta) > 0: - sp1 = max(10, maxname) - sp2 = max(10, maxshape) - sp3 = max(10, maxbyte) - prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') - print(prval + "\n" + "="*(len(prval)+5) + "\n") - - for k in range(len(sta)): - val = sta[k] - print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), - val[1], ' '*(sp2-len(val[1])+5), - val[2], ' '*(sp3-len(val[2])+5), - val[3])) - print("\nUpper bound on total bytes = %d" % totalbytes) - return - -#----------------------------------------------------------------------------- - - -# NOTE: pydoc defines a help function which works similarly to this -# except it uses a pager to take over the screen. - -# combine name and arguments and split to multiple lines of width -# characters. End lines on a comma and begin argument list indented with -# the rest of the arguments. -def _split_line(name, arguments, width): - firstwidth = len(name) - k = firstwidth - newstr = name - sepstr = ", " - arglist = arguments.split(sepstr) - for argument in arglist: - if k == firstwidth: - addstr = "" - else: - addstr = sepstr - k = k + len(argument) + len(addstr) - if k > width: - k = firstwidth + 1 + len(argument) - newstr = newstr + ",\n" + " "*(firstwidth+2) + argument - else: - newstr = newstr + addstr + argument - return newstr - -_namedict = None -_dictlist = None - -# Traverse all module directories underneath globals -# to see if something is defined -def _makenamedict(module='numpy'): - module = __import__(module, globals(), locals(), []) - thedict = {module.__name__:module.__dict__} - dictlist = [module.__name__] - totraverse = [module.__dict__] - while True: - if len(totraverse) == 0: - break - thisdict = totraverse.pop(0) - for x in thisdict.keys(): - if isinstance(thisdict[x], types.ModuleType): - modname = thisdict[x].__name__ - if modname not in dictlist: - moddict = thisdict[x].__dict__ - dictlist.append(modname) - totraverse.append(moddict) - thedict[modname] = moddict - return thedict, dictlist - - -def _info(obj, output=sys.stdout): - """Provide information about ndarray obj. - - Parameters - ---------- - obj : ndarray - Must be ndarray, not checked. - output - Where printed output goes. - - Notes - ----- - Copied over from the numarray module prior to its removal. - Adapted somewhat as only numpy is an option now. - - Called by info. - - """ - extra = "" - tic = "" - bp = lambda x: x - cls = getattr(obj, '__class__', type(obj)) - nm = getattr(cls, '__name__', cls) - strides = obj.strides - endian = obj.dtype.byteorder - - print("class: ", nm, file=output) - print("shape: ", obj.shape, file=output) - print("strides: ", strides, file=output) - print("itemsize: ", obj.itemsize, file=output) - print("aligned: ", bp(obj.flags.aligned), file=output) - print("contiguous: ", bp(obj.flags.contiguous), file=output) - print("fortran: ", obj.flags.fortran, file=output) - print( - "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), - file=output - ) - print("byteorder: ", end=' ', file=output) - if endian in ['|', '=']: - print("%s%s%s" % (tic, sys.byteorder, tic), file=output) - byteswap = False - elif endian == '>': - print("%sbig%s" % (tic, tic), file=output) - byteswap = sys.byteorder != "big" - else: - print("%slittle%s" % (tic, tic), file=output) - byteswap = sys.byteorder != "little" - print("byteswap: ", bp(byteswap), file=output) - print("type: %s" % obj.dtype, file=output) - - -@set_module('numpy') -def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): - """ - Get help information for a function, class, or module. - - Parameters - ---------- - object : object or str, optional - Input object or name to get information about. If `object` is a - numpy object, its docstring is given. If it is a string, available - modules are searched for matching objects. If None, information - about `info` itself is returned. - maxwidth : int, optional - Printing width. - output : file like object, optional - File like object that the output is written to, default is - ``stdout``. The object has to be opened in 'w' or 'a' mode. - toplevel : str, optional - Start search at this level. - - See Also - -------- - source, lookfor - - Notes - ----- - When used interactively with an object, ``np.info(obj)`` is equivalent - to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython - prompt. - - Examples - -------- - >>> np.info(np.polyval) # doctest: +SKIP - polyval(p, x) - Evaluate the polynomial p at x. - ... - - When using a string for `object` it is possible to get multiple results. - - >>> np.info('fft') # doctest: +SKIP - *** Found in numpy *** - Core FFT routines - ... - *** Found in numpy.fft *** - fft(a, n=None, axis=-1) - ... - *** Repeat reference found in numpy.fft.fftpack *** - *** Total of 3 references found. *** - - """ - global _namedict, _dictlist - # Local import to speed up numpy's import time. - import pydoc - import inspect - - if (hasattr(object, '_ppimport_importer') or - hasattr(object, '_ppimport_module')): - object = object._ppimport_module - elif hasattr(object, '_ppimport_attr'): - object = object._ppimport_attr - - if object is None: - info(info) - elif isinstance(object, ndarray): - _info(object, output=output) - elif isinstance(object, str): - if _namedict is None: - _namedict, _dictlist = _makenamedict(toplevel) - numfound = 0 - objlist = [] - for namestr in _dictlist: - try: - obj = _namedict[namestr][object] - if id(obj) in objlist: - print("\n " - "*** Repeat reference found in %s *** " % namestr, - file=output - ) - else: - objlist.append(id(obj)) - print(" *** Found in %s ***" % namestr, file=output) - info(obj) - print("-"*maxwidth, file=output) - numfound += 1 - except KeyError: - pass - if numfound == 0: - print("Help for %s not found." % object, file=output) - else: - print("\n " - "*** Total of %d references found. ***" % numfound, - file=output - ) - - elif inspect.isfunction(object): - name = object.__name__ - arguments = formatargspec(*getargspec(object)) - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - print(inspect.getdoc(object), file=output) - - elif inspect.isclass(object): - name = object.__name__ - arguments = "()" - try: - if hasattr(object, '__init__'): - arguments = formatargspec( - *getargspec(object.__init__.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - except Exception: - pass - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - doc1 = inspect.getdoc(object) - if doc1 is None: - if hasattr(object, '__init__'): - print(inspect.getdoc(object.__init__), file=output) - else: - print(inspect.getdoc(object), file=output) - - methods = pydoc.allmethods(object) - if methods != []: - print("\n\nMethods:\n", file=output) - for meth in methods: - if meth[0] == '_': - continue - thisobj = getattr(object, meth, None) - if thisobj is not None: - methstr, other = pydoc.splitdoc( - inspect.getdoc(thisobj) or "None" - ) - print(" %s -- %s" % (meth, methstr), file=output) - - elif (sys.version_info[0] < 3 - and isinstance(object, types.InstanceType)): - # check for __call__ method - # types.InstanceType is the type of the instances of oldstyle classes - print("Instance of class: ", object.__class__.__name__, file=output) - print(file=output) - if hasattr(object, '__call__'): - arguments = formatargspec( - *getargspec(object.__call__.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - else: - arguments = "()" - - if hasattr(object, 'name'): - name = "%s" % object.name - else: - name = "" - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - doc = inspect.getdoc(object.__call__) - if doc is not None: - print(inspect.getdoc(object.__call__), file=output) - print(inspect.getdoc(object), file=output) - - else: - print(inspect.getdoc(object), file=output) - - elif inspect.ismethod(object): - name = object.__name__ - arguments = formatargspec( - *getargspec(object.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - else: - arguments = "()" - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - print(inspect.getdoc(object), file=output) - - elif hasattr(object, '__doc__'): - print(inspect.getdoc(object), file=output) - - -@set_module('numpy') -def source(object, output=sys.stdout): - """ - Print or write to a file the source code for a NumPy object. - - The source code is only returned for objects written in Python. Many - functions and classes are defined in C and will therefore not return - useful information. - - Parameters - ---------- - object : numpy object - Input object. This can be any object (function, class, module, - ...). - output : file object, optional - If `output` not supplied then source code is printed to screen - (sys.stdout). File object must be created with either write 'w' or - append 'a' modes. - - See Also - -------- - lookfor, info - - Examples - -------- - >>> np.source(np.interp) #doctest: +SKIP - In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py - def interp(x, xp, fp, left=None, right=None): - \"\"\".... (full docstring printed)\"\"\" - if isinstance(x, (float, int, number)): - return compiled_interp([x], xp, fp, left, right).item() - else: - return compiled_interp(x, xp, fp, left, right) - - The source code is only returned for objects written in Python. - - >>> np.source(np.array) #doctest: +SKIP - Not available for this object. - - """ - # Local import to speed up numpy's import time. - import inspect - try: - print("In file: %s\n" % inspect.getsourcefile(object), file=output) - print(inspect.getsource(object), file=output) - except Exception: - print("Not available for this object.", file=output) - - -# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} -# where kind: "func", "class", "module", "object" -# and index: index in breadth-first namespace traversal -_lookfor_caches = {} - -# regexp whose match indicates that the string may contain a function -# signature -_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) - - -@set_module('numpy') -def lookfor(what, module=None, import_modules=True, regenerate=False, - output=None): - """ - Do a keyword search on docstrings. - - A list of objects that matched the search is displayed, - sorted by relevance. All given keywords need to be found in the - docstring for it to be returned as a result, but the order does - not matter. - - Parameters - ---------- - what : str - String containing words to look for. - module : str or list, optional - Name of module(s) whose docstrings to go through. - import_modules : bool, optional - Whether to import sub-modules in packages. Default is True. - regenerate : bool, optional - Whether to re-generate the docstring cache. Default is False. - output : file-like, optional - File-like object to write the output to. If omitted, use a pager. - - See Also - -------- - source, info - - Notes - ----- - Relevance is determined only roughly, by checking if the keywords occur - in the function name, at the start of a docstring, etc. - - Examples - -------- - >>> np.lookfor('binary representation') # doctest: +SKIP - Search results for 'binary representation' - ------------------------------------------ - numpy.binary_repr - Return the binary representation of the input number as a string. - numpy.core.setup_common.long_double_representation - Given a binary dump as given by GNU od -b, look for long double - numpy.base_repr - Return a string representation of a number in the given base system. - ... - - """ - import pydoc - - # Cache - cache = _lookfor_generate_cache(module, import_modules, regenerate) - - # Search - # XXX: maybe using a real stemming search engine would be better? - found = [] - whats = str(what).lower().split() - if not whats: - return - - for name, (docstring, kind, index) in cache.items(): - if kind in ('module', 'object'): - # don't show modules or objects - continue - doc = docstring.lower() - if all(w in doc for w in whats): - found.append(name) - - # Relevance sort - # XXX: this is full Harrison-Stetson heuristics now, - # XXX: it probably could be improved - - kind_relevance = {'func': 1000, 'class': 1000, - 'module': -1000, 'object': -1000} - - def relevance(name, docstr, kind, index): - r = 0 - # do the keywords occur within the start of the docstring? - first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) - r += sum([200 for w in whats if w in first_doc]) - # do the keywords occur in the function name? - r += sum([30 for w in whats if w in name]) - # is the full name long? - r += -len(name) * 5 - # is the object of bad type? - r += kind_relevance.get(kind, -1000) - # is the object deep in namespace hierarchy? - r += -name.count('.') * 10 - r += max(-index / 100, -100) - return r - - def relevance_value(a): - return relevance(a, *cache[a]) - found.sort(key=relevance_value) - - # Pretty-print - s = "Search results for '%s'" % (' '.join(whats)) - help_text = [s, "-"*len(s)] - for name in found[::-1]: - doc, kind, ix = cache[name] - - doclines = [line.strip() for line in doc.strip().split("\n") - if line.strip()] - - # find a suitable short description - try: - first_doc = doclines[0].strip() - if _function_signature_re.search(first_doc): - first_doc = doclines[1].strip() - except IndexError: - first_doc = "" - help_text.append("%s\n %s" % (name, first_doc)) - - if not found: - help_text.append("Nothing found.") - - # Output - if output is not None: - output.write("\n".join(help_text)) - elif len(help_text) > 10: - pager = pydoc.getpager() - pager("\n".join(help_text)) - else: - print("\n".join(help_text)) - -def _lookfor_generate_cache(module, import_modules, regenerate): - """ - Generate docstring cache for given module. - - Parameters - ---------- - module : str, None, module - Module for which to generate docstring cache - import_modules : bool - Whether to import sub-modules in packages. - regenerate : bool - Re-generate the docstring cache - - Returns - ------- - cache : dict {obj_full_name: (docstring, kind, index), ...} - Docstring cache for the module, either cached one (regenerate=False) - or newly generated. - - """ - global _lookfor_caches - # Local import to speed up numpy's import time. - import inspect - - if sys.version_info[0] >= 3: - # In Python3 stderr, stdout are text files. - from io import StringIO - else: - from StringIO import StringIO - - if module is None: - module = "numpy" - - if isinstance(module, str): - try: - __import__(module) - except ImportError: - return {} - module = sys.modules[module] - elif isinstance(module, list) or isinstance(module, tuple): - cache = {} - for mod in module: - cache.update(_lookfor_generate_cache(mod, import_modules, - regenerate)) - return cache - - if id(module) in _lookfor_caches and not regenerate: - return _lookfor_caches[id(module)] - - # walk items and collect docstrings - cache = {} - _lookfor_caches[id(module)] = cache - seen = {} - index = 0 - stack = [(module.__name__, module)] - while stack: - name, item = stack.pop(0) - if id(item) in seen: - continue - seen[id(item)] = True - - index += 1 - kind = "object" - - if inspect.ismodule(item): - kind = "module" - try: - _all = item.__all__ - except AttributeError: - _all = None - - # import sub-packages - if import_modules and hasattr(item, '__path__'): - for pth in item.__path__: - for mod_path in os.listdir(pth): - this_py = os.path.join(pth, mod_path) - init_py = os.path.join(pth, mod_path, '__init__.py') - if (os.path.isfile(this_py) and - mod_path.endswith('.py')): - to_import = mod_path[:-3] - elif os.path.isfile(init_py): - to_import = mod_path - else: - continue - if to_import == '__init__': - continue - - try: - old_stdout = sys.stdout - old_stderr = sys.stderr - try: - sys.stdout = StringIO() - sys.stderr = StringIO() - __import__("%s.%s" % (name, to_import)) - finally: - sys.stdout = old_stdout - sys.stderr = old_stderr - # Catch SystemExit, too - except BaseException: - continue - - for n, v in _getmembers(item): - try: - item_name = getattr(v, '__name__', "%s.%s" % (name, n)) - mod_name = getattr(v, '__module__', None) - except NameError: - # ref. SWIG's global cvars - # NameError: Unknown C global variable - item_name = "%s.%s" % (name, n) - mod_name = None - if '.' not in item_name and mod_name: - item_name = "%s.%s" % (mod_name, item_name) - - if not item_name.startswith(name + '.'): - # don't crawl "foreign" objects - if isinstance(v, ufunc): - # ... unless they are ufuncs - pass - else: - continue - elif not (inspect.ismodule(v) or _all is None or n in _all): - continue - stack.append(("%s.%s" % (name, n), v)) - elif inspect.isclass(item): - kind = "class" - for n, v in _getmembers(item): - stack.append(("%s.%s" % (name, n), v)) - elif hasattr(item, "__call__"): - kind = "func" - - try: - doc = inspect.getdoc(item) - except NameError: - # ref SWIG's NameError: Unknown C global variable - doc = None - if doc is not None: - cache[name] = (doc, kind, index) - - return cache - -def _getmembers(item): - import inspect - try: - members = inspect.getmembers(item) - except Exception: - members = [(x, getattr(item, x)) for x in dir(item) - if hasattr(item, x)] - return members - - -def safe_eval(source): - """ - Protected string evaluation. - - Evaluate a string containing a Python literal expression without - allowing the execution of arbitrary non-literal code. - - Parameters - ---------- - source : str - The string to evaluate. - - Returns - ------- - obj : object - The result of evaluating `source`. - - Raises - ------ - SyntaxError - If the code has invalid Python syntax, or if it contains - non-literal code. - - Examples - -------- - >>> np.safe_eval('1') - 1 - >>> np.safe_eval('[1, 2, 3]') - [1, 2, 3] - >>> np.safe_eval('{"foo": ("bar", 10.0)}') - {'foo': ('bar', 10.0)} - - >>> np.safe_eval('import os') - Traceback (most recent call last): - ... - SyntaxError: invalid syntax - - >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') - Traceback (most recent call last): - ... - ValueError: malformed node or string: <_ast.Call object at 0x...> - - """ - # Local import to speed up numpy's import time. - import ast - return ast.literal_eval(source) - - -def _median_nancheck(data, result, axis, out): - """ - Utility function to check median result from data for NaN values at the end - and return NaN in that case. Input result can also be a MaskedArray. - - Parameters - ---------- - data : array - Input data to median function - result : Array or MaskedArray - Result of median function - axis : {int, sequence of int, None}, optional - Axis or axes along which the median was computed. - out : ndarray, optional - Output array in which to place the result. - Returns - ------- - median : scalar or ndarray - Median or NaN in axes which contained NaN in the input. - """ - if data.size == 0: - return result - data = np.moveaxis(data, axis, -1) - n = np.isnan(data[..., -1]) - # masked NaN values are ok - if np.ma.isMaskedArray(n): - n = n.filled(False) - if result.ndim == 0: - if n == True: - if out is not None: - out[...] = data.dtype.type(np.nan) - result = out - else: - result = data.dtype.type(np.nan) - elif np.count_nonzero(n.ravel()) > 0: - result[n] = np.nan - return result - -#----------------------------------------------------------------------------- diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/__init__.py b/venv/lib/python3.7/site-packages/numpy/linalg/__init__.py deleted file mode 100644 index 5556081..0000000 --- a/venv/lib/python3.7/site-packages/numpy/linalg/__init__.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -``numpy.linalg`` -================ - -The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient -low level implementations of standard linear algebra algorithms. Those -libraries may be provided by NumPy itself using C versions of a subset of their -reference implementations but, when possible, highly optimized libraries that -take advantage of specialized processor functionality are preferred. Examples -of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries -are multithreaded and processor dependent, environmental variables and external -packages such as threadpoolctl may be needed to control the number of threads -or specify the processor architecture. - -- OpenBLAS: https://www.openblas.net/ -- threadpoolctl: https://github.com/joblib/threadpoolctl - -Please note that the most-used linear algebra functions in NumPy are present in -the main ``numpy`` namespace rather than in ``numpy.linalg``. There are: -``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``, -``einsum_path`` and ``kron``. - -Functions present in numpy.linalg are listed below. - - -Matrix and vector products --------------------------- - - multi_dot - matrix_power - -Decompositions --------------- - - cholesky - qr - svd - -Matrix eigenvalues ------------------- - - eig - eigh - eigvals - eigvalsh - -Norms and other numbers ------------------------ - - norm - cond - det - matrix_rank - slogdet - -Solving equations and inverting matrices ----------------------------------------- - - solve - tensorsolve - lstsq - inv - pinv - tensorinv - -Exceptions ----------- - - LinAlgError - -""" -from __future__ import division, absolute_import, print_function - -# To get sub-modules -from .linalg import * - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/_umath_linalg.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/linalg/_umath_linalg.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b4dcf77..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/linalg/_umath_linalg.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/lapack_lite.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/linalg/lapack_lite.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index ccde147..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/linalg/lapack_lite.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/linalg.py b/venv/lib/python3.7/site-packages/numpy/linalg/linalg.py deleted file mode 100644 index f1b2c22..0000000 --- a/venv/lib/python3.7/site-packages/numpy/linalg/linalg.py +++ /dev/null @@ -1,2746 +0,0 @@ -"""Lite version of scipy.linalg. - -Notes ------ -This module is a lite version of the linalg.py module in SciPy which -contains high-level Python interface to the LAPACK library. The lite -version only accesses the following LAPACK functions: dgesv, zgesv, -dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, -zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. -""" -from __future__ import division, absolute_import, print_function - - -__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', - 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', - 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', - 'LinAlgError', 'multi_dot'] - -import functools -import operator -import warnings - -from numpy.core import ( - array, asarray, zeros, empty, empty_like, intc, single, double, - csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot, - add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite, - finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs, - atleast_2d, intp, asanyarray, object_, matmul, - swapaxes, divide, count_nonzero, isnan, sign -) -from numpy.core.multiarray import normalize_axis_index -from numpy.core.overrides import set_module -from numpy.core import overrides -from numpy.lib.twodim_base import triu, eye -from numpy.linalg import lapack_lite, _umath_linalg - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy.linalg') - - -# For Python2/3 compatibility -_N = b'N' -_V = b'V' -_A = b'A' -_S = b'S' -_L = b'L' - -fortran_int = intc - - -@set_module('numpy.linalg') -class LinAlgError(Exception): - """ - Generic Python-exception-derived object raised by linalg functions. - - General purpose exception class, derived from Python's exception.Exception - class, programmatically raised in linalg functions when a Linear - Algebra-related condition would prevent further correct execution of the - function. - - Parameters - ---------- - None - - Examples - -------- - >>> from numpy import linalg as LA - >>> LA.inv(np.zeros((2,2))) - Traceback (most recent call last): - File "", line 1, in - File "...linalg.py", line 350, - in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) - File "...linalg.py", line 249, - in solve - raise LinAlgError('Singular matrix') - numpy.linalg.LinAlgError: Singular matrix - - """ - - -def _determine_error_states(): - errobj = geterrobj() - bufsize = errobj[0] - - with errstate(invalid='call', over='ignore', - divide='ignore', under='ignore'): - invalid_call_errmask = geterrobj()[1] - - return [bufsize, invalid_call_errmask, None] - -# Dealing with errors in _umath_linalg -_linalg_error_extobj = _determine_error_states() -del _determine_error_states - -def _raise_linalgerror_singular(err, flag): - raise LinAlgError("Singular matrix") - -def _raise_linalgerror_nonposdef(err, flag): - raise LinAlgError("Matrix is not positive definite") - -def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): - raise LinAlgError("Eigenvalues did not converge") - -def _raise_linalgerror_svd_nonconvergence(err, flag): - raise LinAlgError("SVD did not converge") - -def _raise_linalgerror_lstsq(err, flag): - raise LinAlgError("SVD did not converge in Linear Least Squares") - -def get_linalg_error_extobj(callback): - extobj = list(_linalg_error_extobj) # make a copy - extobj[2] = callback - return extobj - -def _makearray(a): - new = asarray(a) - wrap = getattr(a, "__array_prepare__", new.__array_wrap__) - return new, wrap - -def isComplexType(t): - return issubclass(t, complexfloating) - -_real_types_map = {single : single, - double : double, - csingle : single, - cdouble : double} - -_complex_types_map = {single : csingle, - double : cdouble, - csingle : csingle, - cdouble : cdouble} - -def _realType(t, default=double): - return _real_types_map.get(t, default) - -def _complexType(t, default=cdouble): - return _complex_types_map.get(t, default) - -def _linalgRealType(t): - """Cast the type t to either double or cdouble.""" - return double - -def _commonType(*arrays): - # in lite version, use higher precision (always double or cdouble) - result_type = single - is_complex = False - for a in arrays: - if issubclass(a.dtype.type, inexact): - if isComplexType(a.dtype.type): - is_complex = True - rt = _realType(a.dtype.type, default=None) - if rt is None: - # unsupported inexact scalar - raise TypeError("array type %s is unsupported in linalg" % - (a.dtype.name,)) - else: - rt = double - if rt is double: - result_type = double - if is_complex: - t = cdouble - result_type = _complex_types_map[result_type] - else: - t = double - return t, result_type - - -# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). - -_fastCT = fastCopyAndTranspose - -def _to_native_byte_order(*arrays): - ret = [] - for arr in arrays: - if arr.dtype.byteorder not in ('=', '|'): - ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) - else: - ret.append(arr) - if len(ret) == 1: - return ret[0] - else: - return ret - -def _fastCopyAndTranspose(type, *arrays): - cast_arrays = () - for a in arrays: - if a.dtype.type is type: - cast_arrays = cast_arrays + (_fastCT(a),) - else: - cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) - if len(cast_arrays) == 1: - return cast_arrays[0] - else: - return cast_arrays - -def _assert_2d(*arrays): - for a in arrays: - if a.ndim != 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'two-dimensional' % a.ndim) - -def _assert_stacked_2d(*arrays): - for a in arrays: - if a.ndim < 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'at least two-dimensional' % a.ndim) - -def _assert_stacked_square(*arrays): - for a in arrays: - m, n = a.shape[-2:] - if m != n: - raise LinAlgError('Last 2 dimensions of the array must be square') - -def _assert_finite(*arrays): - for a in arrays: - if not isfinite(a).all(): - raise LinAlgError("Array must not contain infs or NaNs") - -def _is_empty_2d(arr): - # check size first for efficiency - return arr.size == 0 and product(arr.shape[-2:]) == 0 - - -def transpose(a): - """ - Transpose each matrix in a stack of matrices. - - Unlike np.transpose, this only swaps the last two axes, rather than all of - them - - Parameters - ---------- - a : (...,M,N) array_like - - Returns - ------- - aT : (...,N,M) ndarray - """ - return swapaxes(a, -1, -2) - -# Linear equations - -def _tensorsolve_dispatcher(a, b, axes=None): - return (a, b) - - -@array_function_dispatch(_tensorsolve_dispatcher) -def tensorsolve(a, b, axes=None): - """ - Solve the tensor equation ``a x = b`` for x. - - It is assumed that all indices of `x` are summed over in the product, - together with the rightmost indices of `a`, as is done in, for example, - ``tensordot(a, x, axes=b.ndim)``. - - Parameters - ---------- - a : array_like - Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals - the shape of that sub-tensor of `a` consisting of the appropriate - number of its rightmost indices, and must be such that - ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be - 'square'). - b : array_like - Right-hand tensor, which can be of any shape. - axes : tuple of ints, optional - Axes in `a` to reorder to the right, before inversion. - If None (default), no reordering is done. - - Returns - ------- - x : ndarray, shape Q - - Raises - ------ - LinAlgError - If `a` is singular or not 'square' (in the above sense). - - See Also - -------- - numpy.tensordot, tensorinv, numpy.einsum - - Examples - -------- - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) - >>> b = np.random.randn(2*3, 4) - >>> x = np.linalg.tensorsolve(a, b) - >>> x.shape - (2, 3, 4) - >>> np.allclose(np.tensordot(a, x, axes=3), b) - True - - """ - a, wrap = _makearray(a) - b = asarray(b) - an = a.ndim - - if axes is not None: - allaxes = list(range(0, an)) - for k in axes: - allaxes.remove(k) - allaxes.insert(an, k) - a = a.transpose(allaxes) - - oldshape = a.shape[-(an-b.ndim):] - prod = 1 - for k in oldshape: - prod *= k - - a = a.reshape(-1, prod) - b = b.ravel() - res = wrap(solve(a, b)) - res.shape = oldshape - return res - - -def _solve_dispatcher(a, b): - return (a, b) - - -@array_function_dispatch(_solve_dispatcher) -def solve(a, b): - """ - Solve a linear matrix equation, or system of linear scalar equations. - - Computes the "exact" solution, `x`, of the well-determined, i.e., full - rank, linear matrix equation `ax = b`. - - Parameters - ---------- - a : (..., M, M) array_like - Coefficient matrix. - b : {(..., M,), (..., M, K)}, array_like - Ordinate or "dependent variable" values. - - Returns - ------- - x : {(..., M,), (..., M, K)} ndarray - Solution to the system a x = b. Returned shape is identical to `b`. - - Raises - ------ - LinAlgError - If `a` is singular or not square. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The solutions are computed using LAPACK routine ``_gesv``. - - `a` must be square and of full-rank, i.e., all rows (or, equivalently, - columns) must be linearly independent; if either is not true, use - `lstsq` for the least-squares best "solution" of the - system/equation. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pg. 22. - - Examples - -------- - Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: - - >>> a = np.array([[3,1], [1,2]]) - >>> b = np.array([9,8]) - >>> x = np.linalg.solve(a, b) - >>> x - array([2., 3.]) - - Check that the solution is correct: - - >>> np.allclose(np.dot(a, x), b) - True - - """ - a, _ = _makearray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - b, wrap = _makearray(b) - t, result_t = _commonType(a, b) - - # We use the b = (..., M,) logic, only if the number of extra dimensions - # match exactly - if b.ndim == a.ndim - 1: - gufunc = _umath_linalg.solve1 - else: - gufunc = _umath_linalg.solve - - signature = 'DD->D' if isComplexType(t) else 'dd->d' - extobj = get_linalg_error_extobj(_raise_linalgerror_singular) - r = gufunc(a, b, signature=signature, extobj=extobj) - - return wrap(r.astype(result_t, copy=False)) - - -def _tensorinv_dispatcher(a, ind=None): - return (a,) - - -@array_function_dispatch(_tensorinv_dispatcher) -def tensorinv(a, ind=2): - """ - Compute the 'inverse' of an N-dimensional array. - - The result is an inverse for `a` relative to the tensordot operation - ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, - ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the - tensordot operation. - - Parameters - ---------- - a : array_like - Tensor to 'invert'. Its shape must be 'square', i. e., - ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. - ind : int, optional - Number of first indices that are involved in the inverse sum. - Must be a positive integer, default is 2. - - Returns - ------- - b : ndarray - `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. - - Raises - ------ - LinAlgError - If `a` is singular or not 'square' (in the above sense). - - See Also - -------- - numpy.tensordot, tensorsolve - - Examples - -------- - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) - >>> ainv = np.linalg.tensorinv(a, ind=2) - >>> ainv.shape - (8, 3, 4, 6) - >>> b = np.random.randn(4, 6) - >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) - True - - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) - >>> ainv = np.linalg.tensorinv(a, ind=1) - >>> ainv.shape - (8, 3, 24) - >>> b = np.random.randn(24) - >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) - True - - """ - a = asarray(a) - oldshape = a.shape - prod = 1 - if ind > 0: - invshape = oldshape[ind:] + oldshape[:ind] - for k in oldshape[ind:]: - prod *= k - else: - raise ValueError("Invalid ind argument.") - a = a.reshape(prod, -1) - ia = inv(a) - return ia.reshape(*invshape) - - -# Matrix inversion - -def _unary_dispatcher(a): - return (a,) - - -@array_function_dispatch(_unary_dispatcher) -def inv(a): - """ - Compute the (multiplicative) inverse of a matrix. - - Given a square matrix `a`, return the matrix `ainv` satisfying - ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. - - Parameters - ---------- - a : (..., M, M) array_like - Matrix to be inverted. - - Returns - ------- - ainv : (..., M, M) ndarray or matrix - (Multiplicative) inverse of the matrix `a`. - - Raises - ------ - LinAlgError - If `a` is not square or inversion fails. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - Examples - -------- - >>> from numpy.linalg import inv - >>> a = np.array([[1., 2.], [3., 4.]]) - >>> ainv = inv(a) - >>> np.allclose(np.dot(a, ainv), np.eye(2)) - True - >>> np.allclose(np.dot(ainv, a), np.eye(2)) - True - - If a is a matrix object, then the return value is a matrix as well: - - >>> ainv = inv(np.matrix(a)) - >>> ainv - matrix([[-2. , 1. ], - [ 1.5, -0.5]]) - - Inverses of several matrices can be computed at once: - - >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) - >>> inv(a) - array([[[-2. , 1. ], - [ 1.5 , -0.5 ]], - [[-1.25, 0.75], - [ 0.75, -0.25]]]) - - """ - a, wrap = _makearray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - t, result_t = _commonType(a) - - signature = 'D->D' if isComplexType(t) else 'd->d' - extobj = get_linalg_error_extobj(_raise_linalgerror_singular) - ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) - return wrap(ainv.astype(result_t, copy=False)) - - -def _matrix_power_dispatcher(a, n): - return (a,) - - -@array_function_dispatch(_matrix_power_dispatcher) -def matrix_power(a, n): - """ - Raise a square matrix to the (integer) power `n`. - - For positive integers `n`, the power is computed by repeated matrix - squarings and matrix multiplications. If ``n == 0``, the identity matrix - of the same shape as M is returned. If ``n < 0``, the inverse - is computed and then raised to the ``abs(n)``. - - .. note:: Stacks of object matrices are not currently supported. - - Parameters - ---------- - a : (..., M, M) array_like - Matrix to be "powered". - n : int - The exponent can be any integer or long integer, positive, - negative, or zero. - - Returns - ------- - a**n : (..., M, M) ndarray or matrix object - The return value is the same shape and type as `M`; - if the exponent is positive or zero then the type of the - elements is the same as those of `M`. If the exponent is - negative the elements are floating-point. - - Raises - ------ - LinAlgError - For matrices that are not square or that (for negative powers) cannot - be inverted numerically. - - Examples - -------- - >>> from numpy.linalg import matrix_power - >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit - >>> matrix_power(i, 3) # should = -i - array([[ 0, -1], - [ 1, 0]]) - >>> matrix_power(i, 0) - array([[1, 0], - [0, 1]]) - >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements - array([[ 0., 1.], - [-1., 0.]]) - - Somewhat more sophisticated example - - >>> q = np.zeros((4, 4)) - >>> q[0:2, 0:2] = -i - >>> q[2:4, 2:4] = i - >>> q # one of the three quaternion units not equal to 1 - array([[ 0., -1., 0., 0.], - [ 1., 0., 0., 0.], - [ 0., 0., 0., 1.], - [ 0., 0., -1., 0.]]) - >>> matrix_power(q, 2) # = -np.eye(4) - array([[-1., 0., 0., 0.], - [ 0., -1., 0., 0.], - [ 0., 0., -1., 0.], - [ 0., 0., 0., -1.]]) - - """ - a = asanyarray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - - try: - n = operator.index(n) - except TypeError: - raise TypeError("exponent must be an integer") - - # Fall back on dot for object arrays. Object arrays are not supported by - # the current implementation of matmul using einsum - if a.dtype != object: - fmatmul = matmul - elif a.ndim == 2: - fmatmul = dot - else: - raise NotImplementedError( - "matrix_power not supported for stacks of object arrays") - - if n == 0: - a = empty_like(a) - a[...] = eye(a.shape[-2], dtype=a.dtype) - return a - - elif n < 0: - a = inv(a) - n = abs(n) - - # short-cuts. - if n == 1: - return a - - elif n == 2: - return fmatmul(a, a) - - elif n == 3: - return fmatmul(fmatmul(a, a), a) - - # Use binary decomposition to reduce the number of matrix multiplications. - # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to - # increasing powers of 2, and multiply into the result as needed. - z = result = None - while n > 0: - z = a if z is None else fmatmul(z, z) - n, bit = divmod(n, 2) - if bit: - result = z if result is None else fmatmul(result, z) - - return result - - -# Cholesky decomposition - - -@array_function_dispatch(_unary_dispatcher) -def cholesky(a): - """ - Cholesky decomposition. - - Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, - where `L` is lower-triangular and .H is the conjugate transpose operator - (which is the ordinary transpose if `a` is real-valued). `a` must be - Hermitian (symmetric if real-valued) and positive-definite. Only `L` is - actually returned. - - Parameters - ---------- - a : (..., M, M) array_like - Hermitian (symmetric if all elements are real), positive-definite - input matrix. - - Returns - ------- - L : (..., M, M) array_like - Upper or lower-triangular Cholesky factor of `a`. Returns a - matrix object if `a` is a matrix object. - - Raises - ------ - LinAlgError - If the decomposition fails, for example, if `a` is not - positive-definite. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The Cholesky decomposition is often used as a fast way of solving - - .. math:: A \\mathbf{x} = \\mathbf{b} - - (when `A` is both Hermitian/symmetric and positive-definite). - - First, we solve for :math:`\\mathbf{y}` in - - .. math:: L \\mathbf{y} = \\mathbf{b}, - - and then for :math:`\\mathbf{x}` in - - .. math:: L.H \\mathbf{x} = \\mathbf{y}. - - Examples - -------- - >>> A = np.array([[1,-2j],[2j,5]]) - >>> A - array([[ 1.+0.j, -0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> L = np.linalg.cholesky(A) - >>> L - array([[1.+0.j, 0.+0.j], - [0.+2.j, 1.+0.j]]) - >>> np.dot(L, L.T.conj()) # verify that L * L.H = A - array([[1.+0.j, 0.-2.j], - [0.+2.j, 5.+0.j]]) - >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? - >>> np.linalg.cholesky(A) # an ndarray object is returned - array([[1.+0.j, 0.+0.j], - [0.+2.j, 1.+0.j]]) - >>> # But a matrix object is returned if A is a matrix object - >>> np.linalg.cholesky(np.matrix(A)) - matrix([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - - """ - extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) - gufunc = _umath_linalg.cholesky_lo - a, wrap = _makearray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - t, result_t = _commonType(a) - signature = 'D->D' if isComplexType(t) else 'd->d' - r = gufunc(a, signature=signature, extobj=extobj) - return wrap(r.astype(result_t, copy=False)) - - -# QR decompostion - -def _qr_dispatcher(a, mode=None): - return (a,) - - -@array_function_dispatch(_qr_dispatcher) -def qr(a, mode='reduced'): - """ - Compute the qr factorization of a matrix. - - Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is - upper-triangular. - - Parameters - ---------- - a : array_like, shape (M, N) - Matrix to be factored. - mode : {'reduced', 'complete', 'r', 'raw'}, optional - If K = min(M, N), then - - * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) - * 'complete' : returns q, r with dimensions (M, M), (M, N) - * 'r' : returns r only with dimensions (K, N) - * 'raw' : returns h, tau with dimensions (N, M), (K,) - - The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, - see the notes for more information. The default is 'reduced', and to - maintain backward compatibility with earlier versions of numpy both - it and the old default 'full' can be omitted. Note that array h - returned in 'raw' mode is transposed for calling Fortran. The - 'economic' mode is deprecated. The modes 'full' and 'economic' may - be passed using only the first letter for backwards compatibility, - but all others must be spelled out. See the Notes for more - explanation. - - - Returns - ------- - q : ndarray of float or complex, optional - A matrix with orthonormal columns. When mode = 'complete' the - result is an orthogonal/unitary matrix depending on whether or not - a is real/complex. The determinant may be either +/- 1 in that - case. - r : ndarray of float or complex, optional - The upper-triangular matrix. - (h, tau) : ndarrays of np.double or np.cdouble, optional - The array h contains the Householder reflectors that generate q - along with r. The tau array contains scaling factors for the - reflectors. In the deprecated 'economic' mode only h is returned. - - Raises - ------ - LinAlgError - If factoring fails. - - Notes - ----- - This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, - ``dorgqr``, and ``zungqr``. - - For more information on the qr factorization, see for example: - https://en.wikipedia.org/wiki/QR_factorization - - Subclasses of `ndarray` are preserved except for the 'raw' mode. So if - `a` is of type `matrix`, all the return values will be matrices too. - - New 'reduced', 'complete', and 'raw' options for mode were added in - NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In - addition the options 'full' and 'economic' were deprecated. Because - 'full' was the previous default and 'reduced' is the new default, - backward compatibility can be maintained by letting `mode` default. - The 'raw' option was added so that LAPACK routines that can multiply - arrays by q using the Householder reflectors can be used. Note that in - this case the returned arrays are of type np.double or np.cdouble and - the h array is transposed to be FORTRAN compatible. No routines using - the 'raw' return are currently exposed by numpy, but some are available - in lapack_lite and just await the necessary work. - - Examples - -------- - >>> a = np.random.randn(9, 6) - >>> q, r = np.linalg.qr(a) - >>> np.allclose(a, np.dot(q, r)) # a does equal qr - True - >>> r2 = np.linalg.qr(a, mode='r') - >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' - True - - Example illustrating a common use of `qr`: solving of least squares - problems - - What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for - the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points - and you'll see that it should be y0 = 0, m = 1.) The answer is provided - by solving the over-determined matrix equation ``Ax = b``, where:: - - A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) - x = array([[y0], [m]]) - b = array([[1], [0], [2], [1]]) - - If A = qr such that q is orthonormal (which is always possible via - Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, - however, we simply use `lstsq`.) - - >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) - >>> A - array([[0, 1], - [1, 1], - [1, 1], - [2, 1]]) - >>> b = np.array([1, 0, 2, 1]) - >>> q, r = np.linalg.qr(A) - >>> p = np.dot(q.T, b) - >>> np.dot(np.linalg.inv(r), p) - array([ 1.1e-16, 1.0e+00]) - - """ - if mode not in ('reduced', 'complete', 'r', 'raw'): - if mode in ('f', 'full'): - # 2013-04-01, 1.8 - msg = "".join(( - "The 'full' option is deprecated in favor of 'reduced'.\n", - "For backward compatibility let mode default.")) - warnings.warn(msg, DeprecationWarning, stacklevel=3) - mode = 'reduced' - elif mode in ('e', 'economic'): - # 2013-04-01, 1.8 - msg = "The 'economic' option is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=3) - mode = 'economic' - else: - raise ValueError("Unrecognized mode '%s'" % mode) - - a, wrap = _makearray(a) - _assert_2d(a) - m, n = a.shape - t, result_t = _commonType(a) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - mn = min(m, n) - tau = zeros((mn,), t) - - if isComplexType(t): - lapack_routine = lapack_lite.zgeqrf - routine_name = 'zgeqrf' - else: - lapack_routine = lapack_lite.dgeqrf - routine_name = 'dgeqrf' - - # calculate optimal size of work data 'work' - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - # do qr decomposition - lwork = max(1, n, int(abs(work[0]))) - work = zeros((lwork,), t) - results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - # handle modes that don't return q - if mode == 'r': - r = _fastCopyAndTranspose(result_t, a[:, :mn]) - return wrap(triu(r)) - - if mode == 'raw': - return a, tau - - if mode == 'economic': - if t != result_t : - a = a.astype(result_t, copy=False) - return wrap(a.T) - - # generate q from a - if mode == 'complete' and m > n: - mc = m - q = empty((m, m), t) - else: - mc = mn - q = empty((n, m), t) - q[:n] = a - - if isComplexType(t): - lapack_routine = lapack_lite.zungqr - routine_name = 'zungqr' - else: - lapack_routine = lapack_lite.dorgqr - routine_name = 'dorgqr' - - # determine optimal lwork - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - # compute q - lwork = max(1, n, int(abs(work[0]))) - work = zeros((lwork,), t) - results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - q = _fastCopyAndTranspose(result_t, q[:mc]) - r = _fastCopyAndTranspose(result_t, a[:, :mc]) - - return wrap(q), wrap(triu(r)) - - -# Eigenvalues - - -@array_function_dispatch(_unary_dispatcher) -def eigvals(a): - """ - Compute the eigenvalues of a general matrix. - - Main difference between `eigvals` and `eig`: the eigenvectors aren't - returned. - - Parameters - ---------- - a : (..., M, M) array_like - A complex- or real-valued matrix whose eigenvalues will be computed. - - Returns - ------- - w : (..., M,) ndarray - The eigenvalues, each repeated according to its multiplicity. - They are not necessarily ordered, nor are they necessarily - real for real matrices. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eig : eigenvalues and right eigenvectors of general arrays - eigvalsh : eigenvalues of real symmetric or complex Hermitian - (conjugate symmetric) arrays. - eigh : eigenvalues and eigenvectors of real symmetric or complex - Hermitian (conjugate symmetric) arrays. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - This is implemented using the ``_geev`` LAPACK routines which compute - the eigenvalues and eigenvectors of general square arrays. - - Examples - -------- - Illustration, using the fact that the eigenvalues of a diagonal matrix - are its diagonal elements, that multiplying a matrix on the left - by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose - of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, - if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as - ``A``: - - >>> from numpy import linalg as LA - >>> x = np.random.random() - >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) - >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) - (1.0, 1.0, 0.0) - - Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other: - - >>> D = np.diag((-1,1)) - >>> LA.eigvals(D) - array([-1., 1.]) - >>> A = np.dot(Q, D) - >>> A = np.dot(A, Q.T) - >>> LA.eigvals(A) - array([ 1., -1.]) # random - - """ - a, wrap = _makearray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - _assert_finite(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - signature = 'D->D' if isComplexType(t) else 'd->D' - w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) - - if not isComplexType(t): - if all(w.imag == 0): - w = w.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - return w.astype(result_t, copy=False) - - -def _eigvalsh_dispatcher(a, UPLO=None): - return (a,) - - -@array_function_dispatch(_eigvalsh_dispatcher) -def eigvalsh(a, UPLO='L'): - """ - Compute the eigenvalues of a complex Hermitian or real symmetric matrix. - - Main difference from eigh: the eigenvectors are not computed. - - Parameters - ---------- - a : (..., M, M) array_like - A complex- or real-valued matrix whose eigenvalues are to be - computed. - UPLO : {'L', 'U'}, optional - Specifies whether the calculation is done with the lower triangular - part of `a` ('L', default) or the upper triangular part ('U'). - Irrespective of this value only the real parts of the diagonal will - be considered in the computation to preserve the notion of a Hermitian - matrix. It therefore follows that the imaginary part of the diagonal - will always be treated as zero. - - Returns - ------- - w : (..., M,) ndarray - The eigenvalues in ascending order, each repeated according to - its multiplicity. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian - (conjugate symmetric) arrays. - eigvals : eigenvalues of general real or complex arrays. - eig : eigenvalues and right eigenvectors of general real or complex - arrays. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, -2j], [2j, 5]]) - >>> LA.eigvalsh(a) - array([ 0.17157288, 5.82842712]) # may vary - - >>> # demonstrate the treatment of the imaginary part of the diagonal - >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) - >>> a - array([[5.+2.j, 9.-2.j], - [0.+2.j, 2.-1.j]]) - >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() - >>> # with: - >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) - >>> b - array([[5.+0.j, 0.-2.j], - [0.+2.j, 2.+0.j]]) - >>> wa = LA.eigvalsh(a) - >>> wb = LA.eigvals(b) - >>> wa; wb - array([1., 6.]) - array([6.+0.j, 1.+0.j]) - - """ - UPLO = UPLO.upper() - if UPLO not in ('L', 'U'): - raise ValueError("UPLO argument must be 'L' or 'U'") - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - if UPLO == 'L': - gufunc = _umath_linalg.eigvalsh_lo - else: - gufunc = _umath_linalg.eigvalsh_up - - a, wrap = _makearray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - t, result_t = _commonType(a) - signature = 'D->d' if isComplexType(t) else 'd->d' - w = gufunc(a, signature=signature, extobj=extobj) - return w.astype(_realType(result_t), copy=False) - -def _convertarray(a): - t, result_t = _commonType(a) - a = _fastCT(a.astype(t)) - return a, t, result_t - - -# Eigenvectors - - -@array_function_dispatch(_unary_dispatcher) -def eig(a): - """ - Compute the eigenvalues and right eigenvectors of a square array. - - Parameters - ---------- - a : (..., M, M) array - Matrices for which the eigenvalues and right eigenvectors will - be computed - - Returns - ------- - w : (..., M) array - The eigenvalues, each repeated according to its multiplicity. - The eigenvalues are not necessarily ordered. The resulting - array will be of complex type, unless the imaginary part is - zero in which case it will be cast to a real type. When `a` - is real the resulting eigenvalues will be real (0 imaginary - part) or occur in conjugate pairs - - v : (..., M, M) array - The normalized (unit "length") eigenvectors, such that the - column ``v[:,i]`` is the eigenvector corresponding to the - eigenvalue ``w[i]``. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigvals : eigenvalues of a non-symmetric array. - - eigh : eigenvalues and eigenvectors of a real symmetric or complex - Hermitian (conjugate symmetric) array. - - eigvalsh : eigenvalues of a real symmetric or complex Hermitian - (conjugate symmetric) array. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - This is implemented using the ``_geev`` LAPACK routines which compute - the eigenvalues and eigenvectors of general square arrays. - - The number `w` is an eigenvalue of `a` if there exists a vector - `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and - `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]`` - for :math:`i \\in \\{0,...,M-1\\}`. - - The array `v` of eigenvectors may not be of maximum rank, that is, some - of the columns may be linearly dependent, although round-off error may - obscure that fact. If the eigenvalues are all different, then theoretically - the eigenvectors are linearly independent. Likewise, the (complex-valued) - matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., - if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate - transpose of `a`. - - Finally, it is emphasized that `v` consists of the *right* (as in - right-hand side) eigenvectors of `a`. A vector `y` satisfying - ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* - eigenvector of `a`, and, in general, the left and right eigenvectors - of a matrix are not necessarily the (perhaps conjugate) transposes - of each other. - - References - ---------- - G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, - Academic Press, Inc., 1980, Various pp. - - Examples - -------- - >>> from numpy import linalg as LA - - (Almost) trivial example with real e-values and e-vectors. - - >>> w, v = LA.eig(np.diag((1, 2, 3))) - >>> w; v - array([1., 2., 3.]) - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - Real matrix possessing complex e-values and e-vectors; note that the - e-values are complex conjugates of each other. - - >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) - >>> w; v - array([1.+1.j, 1.-1.j]) - array([[0.70710678+0.j , 0.70710678-0.j ], - [0. -0.70710678j, 0. +0.70710678j]]) - - Complex-valued matrix with real e-values (but complex-valued e-vectors); - note that ``a.conj().T == a``, i.e., `a` is Hermitian. - - >>> a = np.array([[1, 1j], [-1j, 1]]) - >>> w, v = LA.eig(a) - >>> w; v - array([2.+0.j, 0.+0.j]) - array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary - [ 0.70710678+0.j , -0. +0.70710678j]]) - - Be careful about round-off error! - - >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) - >>> # Theor. e-values are 1 +/- 1e-9 - >>> w, v = LA.eig(a) - >>> w; v - array([1., 1.]) - array([[1., 0.], - [0., 1.]]) - - """ - a, wrap = _makearray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - _assert_finite(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - signature = 'D->DD' if isComplexType(t) else 'd->DD' - w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) - - if not isComplexType(t) and all(w.imag == 0.0): - w = w.real - vt = vt.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - vt = vt.astype(result_t, copy=False) - return w.astype(result_t, copy=False), wrap(vt) - - -@array_function_dispatch(_eigvalsh_dispatcher) -def eigh(a, UPLO='L'): - """ - Return the eigenvalues and eigenvectors of a complex Hermitian - (conjugate symmetric) or a real symmetric matrix. - - Returns two objects, a 1-D array containing the eigenvalues of `a`, and - a 2-D square array or matrix (depending on the input type) of the - corresponding eigenvectors (in columns). - - Parameters - ---------- - a : (..., M, M) array - Hermitian or real symmetric matrices whose eigenvalues and - eigenvectors are to be computed. - UPLO : {'L', 'U'}, optional - Specifies whether the calculation is done with the lower triangular - part of `a` ('L', default) or the upper triangular part ('U'). - Irrespective of this value only the real parts of the diagonal will - be considered in the computation to preserve the notion of a Hermitian - matrix. It therefore follows that the imaginary part of the diagonal - will always be treated as zero. - - Returns - ------- - w : (..., M) ndarray - The eigenvalues in ascending order, each repeated according to - its multiplicity. - v : {(..., M, M) ndarray, (..., M, M) matrix} - The column ``v[:, i]`` is the normalized eigenvector corresponding - to the eigenvalue ``w[i]``. Will return a matrix object if `a` is - a matrix object. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigvalsh : eigenvalues of real symmetric or complex Hermitian - (conjugate symmetric) arrays. - eig : eigenvalues and right eigenvectors for non-symmetric arrays. - eigvals : eigenvalues of non-symmetric arrays. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, - ``_heevd``. - - The eigenvalues of real symmetric or complex Hermitian matrices are - always real. [1]_ The array `v` of (column) eigenvectors is unitary - and `a`, `w`, and `v` satisfy the equations - ``dot(a, v[:, i]) = w[i] * v[:, i]``. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pg. 222. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, -2j], [2j, 5]]) - >>> a - array([[ 1.+0.j, -0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> w, v = LA.eigh(a) - >>> w; v - array([0.17157288, 5.82842712]) - array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary - [ 0. +0.38268343j, 0. -0.92387953j]]) - - >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair - array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) - >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair - array([0.+0.j, 0.+0.j]) - - >>> A = np.matrix(a) # what happens if input is a matrix object - >>> A - matrix([[ 1.+0.j, -0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> w, v = LA.eigh(A) - >>> w; v - array([0.17157288, 5.82842712]) - matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary - [ 0. +0.38268343j, 0. -0.92387953j]]) - - >>> # demonstrate the treatment of the imaginary part of the diagonal - >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) - >>> a - array([[5.+2.j, 9.-2.j], - [0.+2.j, 2.-1.j]]) - >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: - >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) - >>> b - array([[5.+0.j, 0.-2.j], - [0.+2.j, 2.+0.j]]) - >>> wa, va = LA.eigh(a) - >>> wb, vb = LA.eig(b) - >>> wa; wb - array([1., 6.]) - array([6.+0.j, 1.+0.j]) - >>> va; vb - array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary - [ 0. +0.89442719j, 0. -0.4472136j ]]) - array([[ 0.89442719+0.j , -0. +0.4472136j], - [-0. +0.4472136j, 0.89442719+0.j ]]) - """ - UPLO = UPLO.upper() - if UPLO not in ('L', 'U'): - raise ValueError("UPLO argument must be 'L' or 'U'") - - a, wrap = _makearray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - if UPLO == 'L': - gufunc = _umath_linalg.eigh_lo - else: - gufunc = _umath_linalg.eigh_up - - signature = 'D->dD' if isComplexType(t) else 'd->dd' - w, vt = gufunc(a, signature=signature, extobj=extobj) - w = w.astype(_realType(result_t), copy=False) - vt = vt.astype(result_t, copy=False) - return w, wrap(vt) - - -# Singular value decomposition - -def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): - return (a,) - - -@array_function_dispatch(_svd_dispatcher) -def svd(a, full_matrices=True, compute_uv=True, hermitian=False): - """ - Singular Value Decomposition. - - When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh - = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D - array of `a`'s singular values. When `a` is higher-dimensional, SVD is - applied in stacked mode as explained below. - - Parameters - ---------- - a : (..., M, N) array_like - A real or complex array with ``a.ndim >= 2``. - full_matrices : bool, optional - If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and - ``(..., N, N)``, respectively. Otherwise, the shapes are - ``(..., M, K)`` and ``(..., K, N)``, respectively, where - ``K = min(M, N)``. - compute_uv : bool, optional - Whether or not to compute `u` and `vh` in addition to `s`. True - by default. - hermitian : bool, optional - If True, `a` is assumed to be Hermitian (symmetric if real-valued), - enabling a more efficient method for finding singular values. - Defaults to False. - - .. versionadded:: 1.17.0 - - Returns - ------- - u : { (..., M, M), (..., M, K) } array - Unitary array(s). The first ``a.ndim - 2`` dimensions have the same - size as those of the input `a`. The size of the last two dimensions - depends on the value of `full_matrices`. Only returned when - `compute_uv` is True. - s : (..., K) array - Vector(s) with the singular values, within each vector sorted in - descending order. The first ``a.ndim - 2`` dimensions have the same - size as those of the input `a`. - vh : { (..., N, N), (..., K, N) } array - Unitary array(s). The first ``a.ndim - 2`` dimensions have the same - size as those of the input `a`. The size of the last two dimensions - depends on the value of `full_matrices`. Only returned when - `compute_uv` is True. - - Raises - ------ - LinAlgError - If SVD computation does not converge. - - Notes - ----- - - .. versionchanged:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The decomposition is performed using LAPACK routine ``_gesdd``. - - SVD is usually described for the factorization of a 2D matrix :math:`A`. - The higher-dimensional case will be discussed below. In the 2D case, SVD is - written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, - :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` - contains the singular values of `a` and `u` and `vh` are unitary. The rows - of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are - the eigenvectors of :math:`A A^H`. In both cases the corresponding - (possibly non-zero) eigenvalues are given by ``s**2``. - - If `a` has more than two dimensions, then broadcasting rules apply, as - explained in :ref:`routines.linalg-broadcasting`. This means that SVD is - working in "stacked" mode: it iterates over all indices of the first - ``a.ndim - 2`` dimensions and for each combination SVD is applied to the - last two indices. The matrix `a` can be reconstructed from the - decomposition with either ``(u * s[..., None, :]) @ vh`` or - ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the - function ``np.matmul`` for python versions below 3.5.) - - If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are - all the return values. - - Examples - -------- - >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) - >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) - - Reconstruction based on full SVD, 2D case: - - >>> u, s, vh = np.linalg.svd(a, full_matrices=True) - >>> u.shape, s.shape, vh.shape - ((9, 9), (6,), (6, 6)) - >>> np.allclose(a, np.dot(u[:, :6] * s, vh)) - True - >>> smat = np.zeros((9, 6), dtype=complex) - >>> smat[:6, :6] = np.diag(s) - >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) - True - - Reconstruction based on reduced SVD, 2D case: - - >>> u, s, vh = np.linalg.svd(a, full_matrices=False) - >>> u.shape, s.shape, vh.shape - ((9, 6), (6,), (6, 6)) - >>> np.allclose(a, np.dot(u * s, vh)) - True - >>> smat = np.diag(s) - >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) - True - - Reconstruction based on full SVD, 4D case: - - >>> u, s, vh = np.linalg.svd(b, full_matrices=True) - >>> u.shape, s.shape, vh.shape - ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) - >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh)) - True - >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh)) - True - - Reconstruction based on reduced SVD, 4D case: - - >>> u, s, vh = np.linalg.svd(b, full_matrices=False) - >>> u.shape, s.shape, vh.shape - ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) - >>> np.allclose(b, np.matmul(u * s[..., None, :], vh)) - True - >>> np.allclose(b, np.matmul(u, s[..., None] * vh)) - True - - """ - a, wrap = _makearray(a) - - if hermitian: - # note: lapack returns eigenvalues in reverse order to our contract. - # reversing is cheap by design in numpy, so we do so to be consistent - if compute_uv: - s, u = eigh(a) - s = s[..., ::-1] - u = u[..., ::-1] - # singular values are unsigned, move the sign into v - vt = transpose(u * sign(s)[..., None, :]).conjugate() - s = abs(s) - return wrap(u), s, wrap(vt) - else: - s = eigvalsh(a) - s = s[..., ::-1] - s = abs(s) - return s - - _assert_stacked_2d(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) - - m, n = a.shape[-2:] - if compute_uv: - if full_matrices: - if m < n: - gufunc = _umath_linalg.svd_m_f - else: - gufunc = _umath_linalg.svd_n_f - else: - if m < n: - gufunc = _umath_linalg.svd_m_s - else: - gufunc = _umath_linalg.svd_n_s - - signature = 'D->DdD' if isComplexType(t) else 'd->ddd' - u, s, vh = gufunc(a, signature=signature, extobj=extobj) - u = u.astype(result_t, copy=False) - s = s.astype(_realType(result_t), copy=False) - vh = vh.astype(result_t, copy=False) - return wrap(u), s, wrap(vh) - else: - if m < n: - gufunc = _umath_linalg.svd_m - else: - gufunc = _umath_linalg.svd_n - - signature = 'D->d' if isComplexType(t) else 'd->d' - s = gufunc(a, signature=signature, extobj=extobj) - s = s.astype(_realType(result_t), copy=False) - return s - - -def _cond_dispatcher(x, p=None): - return (x,) - - -@array_function_dispatch(_cond_dispatcher) -def cond(x, p=None): - """ - Compute the condition number of a matrix. - - This function is capable of returning the condition number using - one of seven different norms, depending on the value of `p` (see - Parameters below). - - Parameters - ---------- - x : (..., M, N) array_like - The matrix whose condition number is sought. - p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional - Order of the norm: - - ===== ============================ - p norm for matrices - ===== ============================ - None 2-norm, computed directly using the ``SVD`` - 'fro' Frobenius norm - inf max(sum(abs(x), axis=1)) - -inf min(sum(abs(x), axis=1)) - 1 max(sum(abs(x), axis=0)) - -1 min(sum(abs(x), axis=0)) - 2 2-norm (largest sing. value) - -2 smallest singular value - ===== ============================ - - inf means the numpy.inf object, and the Frobenius norm is - the root-of-sum-of-squares norm. - - Returns - ------- - c : {float, inf} - The condition number of the matrix. May be infinite. - - See Also - -------- - numpy.linalg.norm - - Notes - ----- - The condition number of `x` is defined as the norm of `x` times the - norm of the inverse of `x` [1]_; the norm can be the usual L2-norm - (root-of-sum-of-squares) or one of a number of other matrix norms. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, - Academic Press, Inc., 1980, pg. 285. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) - >>> a - array([[ 1, 0, -1], - [ 0, 1, 0], - [ 1, 0, 1]]) - >>> LA.cond(a) - 1.4142135623730951 - >>> LA.cond(a, 'fro') - 3.1622776601683795 - >>> LA.cond(a, np.inf) - 2.0 - >>> LA.cond(a, -np.inf) - 1.0 - >>> LA.cond(a, 1) - 2.0 - >>> LA.cond(a, -1) - 1.0 - >>> LA.cond(a, 2) - 1.4142135623730951 - >>> LA.cond(a, -2) - 0.70710678118654746 # may vary - >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False)) - 0.70710678118654746 # may vary - - """ - x = asarray(x) # in case we have a matrix - if _is_empty_2d(x): - raise LinAlgError("cond is not defined on empty arrays") - if p is None or p == 2 or p == -2: - s = svd(x, compute_uv=False) - with errstate(all='ignore'): - if p == -2: - r = s[..., -1] / s[..., 0] - else: - r = s[..., 0] / s[..., -1] - else: - # Call inv(x) ignoring errors. The result array will - # contain nans in the entries where inversion failed. - _assert_stacked_2d(x) - _assert_stacked_square(x) - t, result_t = _commonType(x) - signature = 'D->D' if isComplexType(t) else 'd->d' - with errstate(all='ignore'): - invx = _umath_linalg.inv(x, signature=signature) - r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) - r = r.astype(result_t, copy=False) - - # Convert nans to infs unless the original array had nan entries - r = asarray(r) - nan_mask = isnan(r) - if nan_mask.any(): - nan_mask &= ~isnan(x).any(axis=(-2, -1)) - if r.ndim > 0: - r[nan_mask] = Inf - elif nan_mask: - r[()] = Inf - - # Convention is to return scalars instead of 0d arrays - if r.ndim == 0: - r = r[()] - - return r - - -def _matrix_rank_dispatcher(M, tol=None, hermitian=None): - return (M,) - - -@array_function_dispatch(_matrix_rank_dispatcher) -def matrix_rank(M, tol=None, hermitian=False): - """ - Return matrix rank of array using SVD method - - Rank of the array is the number of singular values of the array that are - greater than `tol`. - - .. versionchanged:: 1.14 - Can now operate on stacks of matrices - - Parameters - ---------- - M : {(M,), (..., M, N)} array_like - Input vector or stack of matrices. - tol : (...) array_like, float, optional - Threshold below which SVD values are considered zero. If `tol` is - None, and ``S`` is an array with singular values for `M`, and - ``eps`` is the epsilon value for datatype of ``S``, then `tol` is - set to ``S.max() * max(M.shape) * eps``. - - .. versionchanged:: 1.14 - Broadcasted against the stack of matrices - hermitian : bool, optional - If True, `M` is assumed to be Hermitian (symmetric if real-valued), - enabling a more efficient method for finding singular values. - Defaults to False. - - .. versionadded:: 1.14 - - Returns - ------- - rank : (...) array_like - Rank of M. - - Notes - ----- - The default threshold to detect rank deficiency is a test on the magnitude - of the singular values of `M`. By default, we identify singular values less - than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with - the symbols defined above). This is the algorithm MATLAB uses [1]. It also - appears in *Numerical recipes* in the discussion of SVD solutions for linear - least squares [2]. - - This default threshold is designed to detect rank deficiency accounting for - the numerical errors of the SVD computation. Imagine that there is a column - in `M` that is an exact (in floating point) linear combination of other - columns in `M`. Computing the SVD on `M` will not produce a singular value - exactly equal to 0 in general: any difference of the smallest SVD value from - 0 will be caused by numerical imprecision in the calculation of the SVD. - Our threshold for small SVD values takes this numerical imprecision into - account, and the default threshold will detect such numerical rank - deficiency. The threshold may declare a matrix `M` rank deficient even if - the linear combination of some columns of `M` is not exactly equal to - another column of `M` but only numerically very close to another column of - `M`. - - We chose our default threshold because it is in wide use. Other thresholds - are possible. For example, elsewhere in the 2007 edition of *Numerical - recipes* there is an alternative threshold of ``S.max() * - np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe - this threshold as being based on "expected roundoff error" (p 71). - - The thresholds above deal with floating point roundoff error in the - calculation of the SVD. However, you may have more information about the - sources of error in `M` that would make you consider other tolerance values - to detect *effective* rank deficiency. The most useful measure of the - tolerance depends on the operations you intend to use on your matrix. For - example, if your data come from uncertain measurements with uncertainties - greater than floating point epsilon, choosing a tolerance near that - uncertainty may be preferable. The tolerance may be absolute if the - uncertainties are absolute rather than relative. - - References - ---------- - .. [1] MATLAB reference documention, "Rank" - https://www.mathworks.com/help/techdoc/ref/rank.html - .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, - "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, - page 795. - - Examples - -------- - >>> from numpy.linalg import matrix_rank - >>> matrix_rank(np.eye(4)) # Full rank matrix - 4 - >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix - >>> matrix_rank(I) - 3 - >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 - 1 - >>> matrix_rank(np.zeros((4,))) - 0 - """ - M = asarray(M) - if M.ndim < 2: - return int(not all(M==0)) - S = svd(M, compute_uv=False, hermitian=hermitian) - if tol is None: - tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps - else: - tol = asarray(tol)[..., newaxis] - return count_nonzero(S > tol, axis=-1) - - -# Generalized inverse - -def _pinv_dispatcher(a, rcond=None, hermitian=None): - return (a,) - - -@array_function_dispatch(_pinv_dispatcher) -def pinv(a, rcond=1e-15, hermitian=False): - """ - Compute the (Moore-Penrose) pseudo-inverse of a matrix. - - Calculate the generalized inverse of a matrix using its - singular-value decomposition (SVD) and including all - *large* singular values. - - .. versionchanged:: 1.14 - Can now operate on stacks of matrices - - Parameters - ---------- - a : (..., M, N) array_like - Matrix or stack of matrices to be pseudo-inverted. - rcond : (...) array_like of float - Cutoff for small singular values. - Singular values less than or equal to - ``rcond * largest_singular_value`` are set to zero. - Broadcasts against the stack of matrices. - hermitian : bool, optional - If True, `a` is assumed to be Hermitian (symmetric if real-valued), - enabling a more efficient method for finding singular values. - Defaults to False. - - .. versionadded:: 1.17.0 - - Returns - ------- - B : (..., N, M) ndarray - The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so - is `B`. - - Raises - ------ - LinAlgError - If the SVD computation does not converge. - - Notes - ----- - The pseudo-inverse of a matrix A, denoted :math:`A^+`, is - defined as: "the matrix that 'solves' [the least-squares problem] - :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then - :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. - - It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular - value decomposition of A, then - :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are - orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting - of A's so-called singular values, (followed, typically, by - zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix - consisting of the reciprocals of A's singular values - (again, followed by zeros). [1]_ - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pp. 139-142. - - Examples - -------- - The following example checks that ``a * a+ * a == a`` and - ``a+ * a * a+ == a+``: - - >>> a = np.random.randn(9, 6) - >>> B = np.linalg.pinv(a) - >>> np.allclose(a, np.dot(a, np.dot(B, a))) - True - >>> np.allclose(B, np.dot(B, np.dot(a, B))) - True - - """ - a, wrap = _makearray(a) - rcond = asarray(rcond) - if _is_empty_2d(a): - m, n = a.shape[-2:] - res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) - return wrap(res) - a = a.conjugate() - u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) - - # discard small singular values - cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) - large = s > cutoff - s = divide(1, s, where=large, out=s) - s[~large] = 0 - - res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) - return wrap(res) - - -# Determinant - - -@array_function_dispatch(_unary_dispatcher) -def slogdet(a): - """ - Compute the sign and (natural) logarithm of the determinant of an array. - - If an array has a very small or very large determinant, then a call to - `det` may overflow or underflow. This routine is more robust against such - issues, because it computes the logarithm of the determinant rather than - the determinant itself. - - Parameters - ---------- - a : (..., M, M) array_like - Input array, has to be a square 2-D array. - - Returns - ------- - sign : (...) array_like - A number representing the sign of the determinant. For a real matrix, - this is 1, 0, or -1. For a complex matrix, this is a complex number - with absolute value 1 (i.e., it is on the unit circle), or else 0. - logdet : (...) array_like - The natural log of the absolute value of the determinant. - - If the determinant is zero, then `sign` will be 0 and `logdet` will be - -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. - - See Also - -------- - det - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - .. versionadded:: 1.6.0 - - The determinant is computed via LU factorization using the LAPACK - routine ``z/dgetrf``. - - - Examples - -------- - The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> (sign, logdet) = np.linalg.slogdet(a) - >>> (sign, logdet) - (-1, 0.69314718055994529) # may vary - >>> sign * np.exp(logdet) - -2.0 - - Computing log-determinants for a stack of matrices: - - >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) - >>> a.shape - (3, 2, 2) - >>> sign, logdet = np.linalg.slogdet(a) - >>> (sign, logdet) - (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) - >>> sign * np.exp(logdet) - array([-2., -3., -8.]) - - This routine succeeds where ordinary `det` does not: - - >>> np.linalg.det(np.eye(500) * 0.1) - 0.0 - >>> np.linalg.slogdet(np.eye(500) * 0.1) - (1, -1151.2925464970228) - - """ - a = asarray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - t, result_t = _commonType(a) - real_t = _realType(result_t) - signature = 'D->Dd' if isComplexType(t) else 'd->dd' - sign, logdet = _umath_linalg.slogdet(a, signature=signature) - sign = sign.astype(result_t, copy=False) - logdet = logdet.astype(real_t, copy=False) - return sign, logdet - - -@array_function_dispatch(_unary_dispatcher) -def det(a): - """ - Compute the determinant of an array. - - Parameters - ---------- - a : (..., M, M) array_like - Input array to compute determinants for. - - Returns - ------- - det : (...) array_like - Determinant of `a`. - - See Also - -------- - slogdet : Another way to represent the determinant, more suitable - for large matrices where underflow/overflow may occur. - - Notes - ----- - - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The determinant is computed via LU factorization using the LAPACK - routine ``z/dgetrf``. - - Examples - -------- - The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.linalg.det(a) - -2.0 # may vary - - Computing determinants for a stack of matrices: - - >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) - >>> a.shape - (3, 2, 2) - >>> np.linalg.det(a) - array([-2., -3., -8.]) - - """ - a = asarray(a) - _assert_stacked_2d(a) - _assert_stacked_square(a) - t, result_t = _commonType(a) - signature = 'D->D' if isComplexType(t) else 'd->d' - r = _umath_linalg.det(a, signature=signature) - r = r.astype(result_t, copy=False) - return r - - -# Linear Least Squares - -def _lstsq_dispatcher(a, b, rcond=None): - return (a, b) - - -@array_function_dispatch(_lstsq_dispatcher) -def lstsq(a, b, rcond="warn"): - r""" - Return the least-squares solution to a linear matrix equation. - - Solves the equation :math:`a x = b` by computing a vector `x` that - minimizes the squared Euclidean 2-norm :math:`\| b - a x \|^2_2`. - The equation may be under-, well-, or over-determined (i.e., the - number of linearly independent rows of `a` can be less than, equal - to, or greater than its number of linearly independent columns). - If `a` is square and of full rank, then `x` (but for round-off error) - is the "exact" solution of the equation. - - Parameters - ---------- - a : (M, N) array_like - "Coefficient" matrix. - b : {(M,), (M, K)} array_like - Ordinate or "dependent variable" values. If `b` is two-dimensional, - the least-squares solution is calculated for each of the `K` columns - of `b`. - rcond : float, optional - Cut-off ratio for small singular values of `a`. - For the purposes of rank determination, singular values are treated - as zero if they are smaller than `rcond` times the largest singular - value of `a`. - - .. versionchanged:: 1.14.0 - If not set, a FutureWarning is given. The previous default - of ``-1`` will use the machine precision as `rcond` parameter, - the new default will use the machine precision times `max(M, N)`. - To silence the warning and use the new default, use ``rcond=None``, - to keep using the old behavior, use ``rcond=-1``. - - Returns - ------- - x : {(N,), (N, K)} ndarray - Least-squares solution. If `b` is two-dimensional, - the solutions are in the `K` columns of `x`. - residuals : {(1,), (K,), (0,)} ndarray - Sums of residuals; squared Euclidean 2-norm for each column in - ``b - a*x``. - If the rank of `a` is < N or M <= N, this is an empty array. - If `b` is 1-dimensional, this is a (1,) shape array. - Otherwise the shape is (K,). - rank : int - Rank of matrix `a`. - s : (min(M, N),) ndarray - Singular values of `a`. - - Raises - ------ - LinAlgError - If computation does not converge. - - Notes - ----- - If `b` is a matrix, then all array results are returned as matrices. - - Examples - -------- - Fit a line, ``y = mx + c``, through some noisy data-points: - - >>> x = np.array([0, 1, 2, 3]) - >>> y = np.array([-1, 0.2, 0.9, 2.1]) - - By examining the coefficients, we see that the line should have a - gradient of roughly 1 and cut the y-axis at, more or less, -1. - - We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` - and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: - - >>> A = np.vstack([x, np.ones(len(x))]).T - >>> A - array([[ 0., 1.], - [ 1., 1.], - [ 2., 1.], - [ 3., 1.]]) - - >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0] - >>> m, c - (1.0 -0.95) # may vary - - Plot the data along with the fitted line: - - >>> import matplotlib.pyplot as plt - >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) - >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') - >>> _ = plt.legend() - >>> plt.show() - - """ - a, _ = _makearray(a) - b, wrap = _makearray(b) - is_1d = b.ndim == 1 - if is_1d: - b = b[:, newaxis] - _assert_2d(a, b) - m, n = a.shape[-2:] - m2, n_rhs = b.shape[-2:] - if m != m2: - raise LinAlgError('Incompatible dimensions') - - t, result_t = _commonType(a, b) - # FIXME: real_t is unused - real_t = _linalgRealType(t) - result_real_t = _realType(result_t) - - # Determine default rcond value - if rcond == "warn": - # 2017-08-19, 1.14.0 - warnings.warn("`rcond` parameter will change to the default of " - "machine precision times ``max(M, N)`` where M and N " - "are the input matrix dimensions.\n" - "To use the future default and silence this warning " - "we advise to pass `rcond=None`, to keep using the old, " - "explicitly pass `rcond=-1`.", - FutureWarning, stacklevel=3) - rcond = -1 - if rcond is None: - rcond = finfo(t).eps * max(n, m) - - if m <= n: - gufunc = _umath_linalg.lstsq_m - else: - gufunc = _umath_linalg.lstsq_n - - signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' - extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq) - if n_rhs == 0: - # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis - b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) - x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj) - if m == 0: - x[...] = 0 - if n_rhs == 0: - # remove the item we added - x = x[..., :n_rhs] - resids = resids[..., :n_rhs] - - # remove the axis we added - if is_1d: - x = x.squeeze(axis=-1) - # we probably should squeeze resids too, but we can't - # without breaking compatibility. - - # as documented - if rank != n or m <= n: - resids = array([], result_real_t) - - # coerce output arrays - s = s.astype(result_real_t, copy=False) - resids = resids.astype(result_real_t, copy=False) - x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed - return wrap(x), wrap(resids), rank, s - - -def _multi_svd_norm(x, row_axis, col_axis, op): - """Compute a function of the singular values of the 2-D matrices in `x`. - - This is a private utility function used by `numpy.linalg.norm()`. - - Parameters - ---------- - x : ndarray - row_axis, col_axis : int - The axes of `x` that hold the 2-D matrices. - op : callable - This should be either numpy.amin or `numpy.amax` or `numpy.sum`. - - Returns - ------- - result : float or ndarray - If `x` is 2-D, the return values is a float. - Otherwise, it is an array with ``x.ndim - 2`` dimensions. - The return values are either the minimum or maximum or sum of the - singular values of the matrices, depending on whether `op` - is `numpy.amin` or `numpy.amax` or `numpy.sum`. - - """ - y = moveaxis(x, (row_axis, col_axis), (-2, -1)) - result = op(svd(y, compute_uv=False), axis=-1) - return result - - -def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): - return (x,) - - -@array_function_dispatch(_norm_dispatcher) -def norm(x, ord=None, axis=None, keepdims=False): - """ - Matrix or vector norm. - - This function is able to return one of eight different matrix norms, - or one of an infinite number of vector norms (described below), depending - on the value of the ``ord`` parameter. - - Parameters - ---------- - x : array_like - Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` - is None. If both `axis` and `ord` are None, the 2-norm of - ``x.ravel`` will be returned. - ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional - Order of the norm (see table under ``Notes``). inf means numpy's - `inf` object. The default is None. - axis : {None, int, 2-tuple of ints}, optional. - If `axis` is an integer, it specifies the axis of `x` along which to - compute the vector norms. If `axis` is a 2-tuple, it specifies the - axes that hold 2-D matrices, and the matrix norms of these matrices - are computed. If `axis` is None then either a vector norm (when `x` - is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default - is None. - - .. versionadded:: 1.8.0 - - keepdims : bool, optional - If this is set to True, the axes which are normed over are left in the - result as dimensions with size one. With this option the result will - broadcast correctly against the original `x`. - - .. versionadded:: 1.10.0 - - Returns - ------- - n : float or ndarray - Norm of the matrix or vector(s). - - Notes - ----- - For values of ``ord <= 0``, the result is, strictly speaking, not a - mathematical 'norm', but it may still be useful for various numerical - purposes. - - The following norms can be calculated: - - ===== ============================ ========================== - ord norm for matrices norm for vectors - ===== ============================ ========================== - None Frobenius norm 2-norm - 'fro' Frobenius norm -- - 'nuc' nuclear norm -- - inf max(sum(abs(x), axis=1)) max(abs(x)) - -inf min(sum(abs(x), axis=1)) min(abs(x)) - 0 -- sum(x != 0) - 1 max(sum(abs(x), axis=0)) as below - -1 min(sum(abs(x), axis=0)) as below - 2 2-norm (largest sing. value) as below - -2 smallest singular value as below - other -- sum(abs(x)**ord)**(1./ord) - ===== ============================ ========================== - - The Frobenius norm is given by [1]_: - - :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` - - The nuclear norm is the sum of the singular values. - - References - ---------- - .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, - Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.arange(9) - 4 - >>> a - array([-4, -3, -2, ..., 2, 3, 4]) - >>> b = a.reshape((3, 3)) - >>> b - array([[-4, -3, -2], - [-1, 0, 1], - [ 2, 3, 4]]) - - >>> LA.norm(a) - 7.745966692414834 - >>> LA.norm(b) - 7.745966692414834 - >>> LA.norm(b, 'fro') - 7.745966692414834 - >>> LA.norm(a, np.inf) - 4.0 - >>> LA.norm(b, np.inf) - 9.0 - >>> LA.norm(a, -np.inf) - 0.0 - >>> LA.norm(b, -np.inf) - 2.0 - - >>> LA.norm(a, 1) - 20.0 - >>> LA.norm(b, 1) - 7.0 - >>> LA.norm(a, -1) - -4.6566128774142013e-010 - >>> LA.norm(b, -1) - 6.0 - >>> LA.norm(a, 2) - 7.745966692414834 - >>> LA.norm(b, 2) - 7.3484692283495345 - - >>> LA.norm(a, -2) - 0.0 - >>> LA.norm(b, -2) - 1.8570331885190563e-016 # may vary - >>> LA.norm(a, 3) - 5.8480354764257312 # may vary - >>> LA.norm(a, -3) - 0.0 - - Using the `axis` argument to compute vector norms: - - >>> c = np.array([[ 1, 2, 3], - ... [-1, 1, 4]]) - >>> LA.norm(c, axis=0) - array([ 1.41421356, 2.23606798, 5. ]) - >>> LA.norm(c, axis=1) - array([ 3.74165739, 4.24264069]) - >>> LA.norm(c, ord=1, axis=1) - array([ 6., 6.]) - - Using the `axis` argument to compute matrix norms: - - >>> m = np.arange(8).reshape(2,2,2) - >>> LA.norm(m, axis=(1,2)) - array([ 3.74165739, 11.22497216]) - >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) - (3.7416573867739413, 11.224972160321824) - - """ - x = asarray(x) - - if not issubclass(x.dtype.type, (inexact, object_)): - x = x.astype(float) - - # Immediately handle some default, simple, fast, and common cases. - if axis is None: - ndim = x.ndim - if ((ord is None) or - (ord in ('f', 'fro') and ndim == 2) or - (ord == 2 and ndim == 1)): - - x = x.ravel(order='K') - if isComplexType(x.dtype.type): - sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) - else: - sqnorm = dot(x, x) - ret = sqrt(sqnorm) - if keepdims: - ret = ret.reshape(ndim*[1]) - return ret - - # Normalize the `axis` argument to a tuple. - nd = x.ndim - if axis is None: - axis = tuple(range(nd)) - elif not isinstance(axis, tuple): - try: - axis = int(axis) - except Exception: - raise TypeError("'axis' must be None, an integer or a tuple of integers") - axis = (axis,) - - if len(axis) == 1: - if ord == Inf: - return abs(x).max(axis=axis, keepdims=keepdims) - elif ord == -Inf: - return abs(x).min(axis=axis, keepdims=keepdims) - elif ord == 0: - # Zero norm - return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims) - elif ord == 1: - # special case for speedup - return add.reduce(abs(x), axis=axis, keepdims=keepdims) - elif ord is None or ord == 2: - # special case for speedup - s = (x.conj() * x).real - return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) - else: - try: - ord + 1 - except TypeError: - raise ValueError("Invalid norm order for vectors.") - absx = abs(x) - absx **= ord - ret = add.reduce(absx, axis=axis, keepdims=keepdims) - ret **= (1 / ord) - return ret - elif len(axis) == 2: - row_axis, col_axis = axis - row_axis = normalize_axis_index(row_axis, nd) - col_axis = normalize_axis_index(col_axis, nd) - if row_axis == col_axis: - raise ValueError('Duplicate axes given.') - if ord == 2: - ret = _multi_svd_norm(x, row_axis, col_axis, amax) - elif ord == -2: - ret = _multi_svd_norm(x, row_axis, col_axis, amin) - elif ord == 1: - if col_axis > row_axis: - col_axis -= 1 - ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) - elif ord == Inf: - if row_axis > col_axis: - row_axis -= 1 - ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) - elif ord == -1: - if col_axis > row_axis: - col_axis -= 1 - ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) - elif ord == -Inf: - if row_axis > col_axis: - row_axis -= 1 - ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) - elif ord in [None, 'fro', 'f']: - ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) - elif ord == 'nuc': - ret = _multi_svd_norm(x, row_axis, col_axis, sum) - else: - raise ValueError("Invalid norm order for matrices.") - if keepdims: - ret_shape = list(x.shape) - ret_shape[axis[0]] = 1 - ret_shape[axis[1]] = 1 - ret = ret.reshape(ret_shape) - return ret - else: - raise ValueError("Improper number of dimensions to norm.") - - -# multi_dot - -def _multidot_dispatcher(arrays): - return arrays - - -@array_function_dispatch(_multidot_dispatcher) -def multi_dot(arrays): - """ - Compute the dot product of two or more arrays in a single function call, - while automatically selecting the fastest evaluation order. - - `multi_dot` chains `numpy.dot` and uses optimal parenthesization - of the matrices [1]_ [2]_. Depending on the shapes of the matrices, - this can speed up the multiplication a lot. - - If the first argument is 1-D it is treated as a row vector. - If the last argument is 1-D it is treated as a column vector. - The other arguments must be 2-D. - - Think of `multi_dot` as:: - - def multi_dot(arrays): return functools.reduce(np.dot, arrays) - - - Parameters - ---------- - arrays : sequence of array_like - If the first argument is 1-D it is treated as row vector. - If the last argument is 1-D it is treated as column vector. - The other arguments must be 2-D. - - Returns - ------- - output : ndarray - Returns the dot product of the supplied arrays. - - See Also - -------- - dot : dot multiplication with two arguments. - - References - ---------- - - .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 - .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication - - Examples - -------- - `multi_dot` allows you to write:: - - >>> from numpy.linalg import multi_dot - >>> # Prepare some data - >>> A = np.random.random((10000, 100)) - >>> B = np.random.random((100, 1000)) - >>> C = np.random.random((1000, 5)) - >>> D = np.random.random((5, 333)) - >>> # the actual dot multiplication - >>> _ = multi_dot([A, B, C, D]) - - instead of:: - - >>> _ = np.dot(np.dot(np.dot(A, B), C), D) - >>> # or - >>> _ = A.dot(B).dot(C).dot(D) - - Notes - ----- - The cost for a matrix multiplication can be calculated with the - following function:: - - def cost(A, B): - return A.shape[0] * A.shape[1] * B.shape[1] - - Assume we have three matrices - :math:`A_{10x100}, B_{100x5}, C_{5x50}`. - - The costs for the two different parenthesizations are as follows:: - - cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 - cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 - - """ - n = len(arrays) - # optimization only makes sense for len(arrays) > 2 - if n < 2: - raise ValueError("Expecting at least two arrays.") - elif n == 2: - return dot(arrays[0], arrays[1]) - - arrays = [asanyarray(a) for a in arrays] - - # save original ndim to reshape the result array into the proper form later - ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim - # Explicitly convert vectors to 2D arrays to keep the logic of the internal - # _multi_dot_* functions as simple as possible. - if arrays[0].ndim == 1: - arrays[0] = atleast_2d(arrays[0]) - if arrays[-1].ndim == 1: - arrays[-1] = atleast_2d(arrays[-1]).T - _assert_2d(*arrays) - - # _multi_dot_three is much faster than _multi_dot_matrix_chain_order - if n == 3: - result = _multi_dot_three(arrays[0], arrays[1], arrays[2]) - else: - order = _multi_dot_matrix_chain_order(arrays) - result = _multi_dot(arrays, order, 0, n - 1) - - # return proper shape - if ndim_first == 1 and ndim_last == 1: - return result[0, 0] # scalar - elif ndim_first == 1 or ndim_last == 1: - return result.ravel() # 1-D - else: - return result - - -def _multi_dot_three(A, B, C): - """ - Find the best order for three arrays and do the multiplication. - - For three arguments `_multi_dot_three` is approximately 15 times faster - than `_multi_dot_matrix_chain_order` - - """ - a0, a1b0 = A.shape - b1c0, c1 = C.shape - # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 - cost1 = a0 * b1c0 * (a1b0 + c1) - # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 - cost2 = a1b0 * c1 * (a0 + b1c0) - - if cost1 < cost2: - return dot(dot(A, B), C) - else: - return dot(A, dot(B, C)) - - -def _multi_dot_matrix_chain_order(arrays, return_costs=False): - """ - Return a np.array that encodes the optimal order of mutiplications. - - The optimal order array is then used by `_multi_dot()` to do the - multiplication. - - Also return the cost matrix if `return_costs` is `True` - - The implementation CLOSELY follows Cormen, "Introduction to Algorithms", - Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. - - cost[i, j] = min([ - cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) - for k in range(i, j)]) - - """ - n = len(arrays) - # p stores the dimensions of the matrices - # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] - p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] - # m is a matrix of costs of the subproblems - # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} - m = zeros((n, n), dtype=double) - # s is the actual ordering - # s[i, j] is the value of k at which we split the product A_i..A_j - s = empty((n, n), dtype=intp) - - for l in range(1, n): - for i in range(n - l): - j = i + l - m[i, j] = Inf - for k in range(i, j): - q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] - if q < m[i, j]: - m[i, j] = q - s[i, j] = k # Note that Cormen uses 1-based index - - return (s, m) if return_costs else s - - -def _multi_dot(arrays, order, i, j): - """Actually do the multiplication with the given order.""" - if i == j: - return arrays[i] - else: - return dot(_multi_dot(arrays, order, i, order[i, j]), - _multi_dot(arrays, order, order[i, j] + 1, j)) diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/setup.py b/venv/lib/python3.7/site-packages/numpy/linalg/setup.py deleted file mode 100644 index 6315a34..0000000 --- a/venv/lib/python3.7/site-packages/numpy/linalg/setup.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import division, print_function - -import os -import sys - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('linalg', parent_package, top_path) - - config.add_data_dir('tests') - - # Configure lapack_lite - - src_dir = 'lapack_lite' - lapack_lite_src = [ - os.path.join(src_dir, 'python_xerbla.c'), - os.path.join(src_dir, 'f2c_z_lapack.c'), - os.path.join(src_dir, 'f2c_c_lapack.c'), - os.path.join(src_dir, 'f2c_d_lapack.c'), - os.path.join(src_dir, 'f2c_s_lapack.c'), - os.path.join(src_dir, 'f2c_lapack.c'), - os.path.join(src_dir, 'f2c_blas.c'), - os.path.join(src_dir, 'f2c_config.c'), - os.path.join(src_dir, 'f2c.c'), - ] - all_sources = config.paths(lapack_lite_src) - - if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0": - lapack_info = get_info('lapack_ilp64_opt', 2) - else: - lapack_info = get_info('lapack_opt', 0) # and {} - - def get_lapack_lite_sources(ext, build_dir): - if not lapack_info: - print("### Warning: Using unoptimized lapack ###") - return all_sources - else: - if sys.platform == 'win32': - print("### Warning: python_xerbla.c is disabled ###") - return [] - return [all_sources[0]] - - config.add_extension( - 'lapack_lite', - sources=['lapack_litemodule.c', get_lapack_lite_sources], - depends=['lapack_lite/f2c.h'], - extra_info=lapack_info, - ) - - # umath_linalg module - config.add_extension( - '_umath_linalg', - sources=['umath_linalg.c.src', get_lapack_lite_sources], - depends=['lapack_lite/f2c.h'], - extra_info=lapack_info, - libraries=['npymath'], - ) - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/linalg/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_build.py b/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_build.py deleted file mode 100644 index 921390d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_build.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from subprocess import PIPE, Popen -import sys -import re -import pytest - -from numpy.linalg import lapack_lite -from numpy.testing import assert_ - - -class FindDependenciesLdd(object): - - def __init__(self): - self.cmd = ['ldd'] - - try: - p = Popen(self.cmd, stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - except OSError: - raise RuntimeError("command %s cannot be run" % self.cmd) - - def get_dependencies(self, lfile): - p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if not (p.returncode == 0): - raise RuntimeError("failed dependencies check for %s" % lfile) - - return stdout - - def grep_dependencies(self, lfile, deps): - stdout = self.get_dependencies(lfile) - - rdeps = dict([(dep, re.compile(dep)) for dep in deps]) - founds = [] - for l in stdout.splitlines(): - for k, v in rdeps.items(): - if v.search(l): - founds.append(k) - - return founds - - -class TestF77Mismatch(object): - - @pytest.mark.skipif(not(sys.platform[:5] == 'linux'), - reason="no fortran compiler on non-Linux platform") - def test_lapack(self): - f = FindDependenciesLdd() - deps = f.grep_dependencies(lapack_lite.__file__, - [b'libg2c', b'libgfortran']) - assert_(len(deps) <= 1, - """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to -cause random crashes and wrong results. See numpy INSTALL.txt for more -information.""") diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_deprecations.py b/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_deprecations.py deleted file mode 100644 index e12755e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_deprecations.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Test deprecation and future warnings. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_warns - - -def test_qr_mode_full_future_warning(): - """Check mode='full' FutureWarning. - - In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were - deprecated. The release date will probably be sometime in the summer - of 2013. - - """ - a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_linalg.py b/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_linalg.py deleted file mode 100644 index ef05b59..0000000 --- a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_linalg.py +++ /dev/null @@ -1,2046 +0,0 @@ -""" Test functions for linalg module - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import itertools -import traceback -import textwrap -import subprocess -import pytest - -import numpy as np -from numpy import array, single, double, csingle, cdouble, dot, identity, matmul -from numpy import multiply, atleast_2d, inf, asarray -from numpy import linalg -from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError -from numpy.linalg.linalg import _multi_dot_matrix_chain_order -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_allclose, suppress_warnings, - assert_raises_regex, HAS_LAPACK64, - ) -from numpy.testing._private.utils import requires_memory - - -def consistent_subclass(out, in_): - # For ndarray subclass input, our output should have the same subclass - # (non-ndarray input gets converted to ndarray). - return type(out) is (type(in_) if isinstance(in_, np.ndarray) - else np.ndarray) - - -old_assert_almost_equal = assert_almost_equal - - -def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): - if asarray(a).dtype.type in (single, csingle): - decimal = single_decimal - else: - decimal = double_decimal - old_assert_almost_equal(a, b, decimal=decimal, **kw) - - -def get_real_dtype(dtype): - return {single: single, double: double, - csingle: single, cdouble: double}[dtype] - - -def get_complex_dtype(dtype): - return {single: csingle, double: cdouble, - csingle: csingle, cdouble: cdouble}[dtype] - - -def get_rtol(dtype): - # Choose a safe rtol - if dtype in (single, csingle): - return 1e-5 - else: - return 1e-11 - - -# used to categorize tests -all_tags = { - 'square', 'nonsquare', 'hermitian', # mutually exclusive - 'generalized', 'size-0', 'strided' # optional additions -} - - -class LinalgCase(object): - def __init__(self, name, a, b, tags=set()): - """ - A bundle of arguments to be passed to a test case, with an identifying - name, the operands a and b, and a set of tags to filter the tests - """ - assert_(isinstance(name, str)) - self.name = name - self.a = a - self.b = b - self.tags = frozenset(tags) # prevent shared tags - - def check(self, do): - """ - Run the function `do` on this test case, expanding arguments - """ - do(self.a, self.b, tags=self.tags) - - def __repr__(self): - return "" % (self.name,) - - -def apply_tag(tag, cases): - """ - Add the given tag (a string) to each of the cases (a list of LinalgCase - objects) - """ - assert tag in all_tags, "Invalid tag" - for case in cases: - case.tags = case.tags | {tag} - return cases - - -# -# Base test cases -# - -np.random.seed(1234) - -CASES = [] - -# square test cases -CASES += apply_tag('square', [ - LinalgCase("single", - array([[1., 2.], [3., 4.]], dtype=single), - array([2., 1.], dtype=single)), - LinalgCase("double", - array([[1., 2.], [3., 4.]], dtype=double), - array([2., 1.], dtype=double)), - LinalgCase("double_2", - array([[1., 2.], [3., 4.]], dtype=double), - array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), - LinalgCase("csingle", - array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), - array([2. + 1j, 1. + 2j], dtype=csingle)), - LinalgCase("cdouble", - array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), - array([2. + 1j, 1. + 2j], dtype=cdouble)), - LinalgCase("cdouble_2", - array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), - array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), - LinalgCase("0x0", - np.empty((0, 0), dtype=double), - np.empty((0,), dtype=double), - tags={'size-0'}), - LinalgCase("8x8", - np.random.rand(8, 8), - np.random.rand(8)), - LinalgCase("1x1", - np.random.rand(1, 1), - np.random.rand(1)), - LinalgCase("nonarray", - [[1, 2], [3, 4]], - [2, 1]), -]) - -# non-square test-cases -CASES += apply_tag('nonsquare', [ - LinalgCase("single_nsq_1", - array([[1., 2., 3.], [3., 4., 6.]], dtype=single), - array([2., 1.], dtype=single)), - LinalgCase("single_nsq_2", - array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), - array([2., 1., 3.], dtype=single)), - LinalgCase("double_nsq_1", - array([[1., 2., 3.], [3., 4., 6.]], dtype=double), - array([2., 1.], dtype=double)), - LinalgCase("double_nsq_2", - array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), - array([2., 1., 3.], dtype=double)), - LinalgCase("csingle_nsq_1", - array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), - array([2. + 1j, 1. + 2j], dtype=csingle)), - LinalgCase("csingle_nsq_2", - array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), - array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), - LinalgCase("cdouble_nsq_1", - array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), - array([2. + 1j, 1. + 2j], dtype=cdouble)), - LinalgCase("cdouble_nsq_2", - array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), - array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), - LinalgCase("cdouble_nsq_1_2", - array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), - array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), - LinalgCase("cdouble_nsq_2_2", - array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), - array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), - LinalgCase("8x11", - np.random.rand(8, 11), - np.random.rand(8)), - LinalgCase("1x5", - np.random.rand(1, 5), - np.random.rand(1)), - LinalgCase("5x1", - np.random.rand(5, 1), - np.random.rand(5)), - LinalgCase("0x4", - np.random.rand(0, 4), - np.random.rand(0), - tags={'size-0'}), - LinalgCase("4x0", - np.random.rand(4, 0), - np.random.rand(4), - tags={'size-0'}), -]) - -# hermitian test-cases -CASES += apply_tag('hermitian', [ - LinalgCase("hsingle", - array([[1., 2.], [2., 1.]], dtype=single), - None), - LinalgCase("hdouble", - array([[1., 2.], [2., 1.]], dtype=double), - None), - LinalgCase("hcsingle", - array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), - None), - LinalgCase("hcdouble", - array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), - None), - LinalgCase("hempty", - np.empty((0, 0), dtype=double), - None, - tags={'size-0'}), - LinalgCase("hnonarray", - [[1, 2], [2, 1]], - None), - LinalgCase("matrix_b_only", - array([[1., 2.], [2., 1.]]), - None), - LinalgCase("hmatrix_1x1", - np.random.rand(1, 1), - None), -]) - - -# -# Gufunc test cases -# -def _make_generalized_cases(): - new_cases = [] - - for case in CASES: - if not isinstance(case.a, np.ndarray): - continue - - a = np.array([case.a, 2 * case.a, 3 * case.a]) - if case.b is None: - b = None - else: - b = np.array([case.b, 7 * case.b, 6 * case.b]) - new_case = LinalgCase(case.name + "_tile3", a, b, - tags=case.tags | {'generalized'}) - new_cases.append(new_case) - - a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) - if case.b is None: - b = None - else: - b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) - new_case = LinalgCase(case.name + "_tile213", a, b, - tags=case.tags | {'generalized'}) - new_cases.append(new_case) - - return new_cases - - -CASES += _make_generalized_cases() - - -# -# Generate stride combination variations of the above -# -def _stride_comb_iter(x): - """ - Generate cartesian product of strides for all axes - """ - - if not isinstance(x, np.ndarray): - yield x, "nop" - return - - stride_set = [(1,)] * x.ndim - stride_set[-1] = (1, 3, -4) - if x.ndim > 1: - stride_set[-2] = (1, 3, -4) - if x.ndim > 2: - stride_set[-3] = (1, -4) - - for repeats in itertools.product(*tuple(stride_set)): - new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] - slices = tuple([slice(None, None, repeat) for repeat in repeats]) - - # new array with different strides, but same data - xi = np.empty(new_shape, dtype=x.dtype) - xi.view(np.uint32).fill(0xdeadbeef) - xi = xi[slices] - xi[...] = x - xi = xi.view(x.__class__) - assert_(np.all(xi == x)) - yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) - - # generate also zero strides if possible - if x.ndim >= 1 and x.shape[-1] == 1: - s = list(x.strides) - s[-1] = 0 - xi = np.lib.stride_tricks.as_strided(x, strides=s) - yield xi, "stride_xxx_0" - if x.ndim >= 2 and x.shape[-2] == 1: - s = list(x.strides) - s[-2] = 0 - xi = np.lib.stride_tricks.as_strided(x, strides=s) - yield xi, "stride_xxx_0_x" - if x.ndim >= 2 and x.shape[:-2] == (1, 1): - s = list(x.strides) - s[-1] = 0 - s[-2] = 0 - xi = np.lib.stride_tricks.as_strided(x, strides=s) - yield xi, "stride_xxx_0_0" - - -def _make_strided_cases(): - new_cases = [] - for case in CASES: - for a, a_label in _stride_comb_iter(case.a): - for b, b_label in _stride_comb_iter(case.b): - new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, - tags=case.tags | {'strided'}) - new_cases.append(new_case) - return new_cases - - -CASES += _make_strided_cases() - - -# -# Test different routines against the above cases -# -class LinalgTestCase(object): - TEST_CASES = CASES - - def check_cases(self, require=set(), exclude=set()): - """ - Run func on each of the cases with all of the tags in require, and none - of the tags in exclude - """ - for case in self.TEST_CASES: - # filter by require and exclude - if case.tags & require != require: - continue - if case.tags & exclude: - continue - - try: - case.check(self.do) - except Exception: - msg = "In test case: %r\n\n" % case - msg += traceback.format_exc() - raise AssertionError(msg) - - -class LinalgSquareTestCase(LinalgTestCase): - - def test_sq_cases(self): - self.check_cases(require={'square'}, - exclude={'generalized', 'size-0'}) - - def test_empty_sq_cases(self): - self.check_cases(require={'square', 'size-0'}, - exclude={'generalized'}) - - -class LinalgNonsquareTestCase(LinalgTestCase): - - def test_nonsq_cases(self): - self.check_cases(require={'nonsquare'}, - exclude={'generalized', 'size-0'}) - - def test_empty_nonsq_cases(self): - self.check_cases(require={'nonsquare', 'size-0'}, - exclude={'generalized'}) - - -class HermitianTestCase(LinalgTestCase): - - def test_herm_cases(self): - self.check_cases(require={'hermitian'}, - exclude={'generalized', 'size-0'}) - - def test_empty_herm_cases(self): - self.check_cases(require={'hermitian', 'size-0'}, - exclude={'generalized'}) - - -class LinalgGeneralizedSquareTestCase(LinalgTestCase): - - @pytest.mark.slow - def test_generalized_sq_cases(self): - self.check_cases(require={'generalized', 'square'}, - exclude={'size-0'}) - - @pytest.mark.slow - def test_generalized_empty_sq_cases(self): - self.check_cases(require={'generalized', 'square', 'size-0'}) - - -class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): - - @pytest.mark.slow - def test_generalized_nonsq_cases(self): - self.check_cases(require={'generalized', 'nonsquare'}, - exclude={'size-0'}) - - @pytest.mark.slow - def test_generalized_empty_nonsq_cases(self): - self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) - - -class HermitianGeneralizedTestCase(LinalgTestCase): - - @pytest.mark.slow - def test_generalized_herm_cases(self): - self.check_cases(require={'generalized', 'hermitian'}, - exclude={'size-0'}) - - @pytest.mark.slow - def test_generalized_empty_herm_cases(self): - self.check_cases(require={'generalized', 'hermitian', 'size-0'}, - exclude={'none'}) - - -def dot_generalized(a, b): - a = asarray(a) - if a.ndim >= 3: - if a.ndim == b.ndim: - # matrix x matrix - new_shape = a.shape[:-1] + b.shape[-1:] - elif a.ndim == b.ndim + 1: - # matrix x vector - new_shape = a.shape[:-1] - else: - raise ValueError("Not implemented...") - r = np.empty(new_shape, dtype=np.common_type(a, b)) - for c in itertools.product(*map(range, a.shape[:-2])): - r[c] = dot(a[c], b[c]) - return r - else: - return dot(a, b) - - -def identity_like_generalized(a): - a = asarray(a) - if a.ndim >= 3: - r = np.empty(a.shape, dtype=a.dtype) - r[...] = identity(a.shape[-2]) - return r - else: - return identity(a.shape[0]) - - -class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - # kept apart from TestSolve for use for testing with matrices. - def do(self, a, b, tags): - x = linalg.solve(a, b) - assert_almost_equal(b, dot_generalized(a, x)) - assert_(consistent_subclass(x, b)) - - -class TestSolve(SolveCases): - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.solve(x, x).dtype, dtype) - - def test_0_size(self): - class ArraySubclass(np.ndarray): - pass - # Test system of 0x0 matrices - a = np.arange(8).reshape(2, 2, 2) - b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) - - expected = linalg.solve(a, b)[:, 0:0, :] - result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - # Test errors for non-square and only b's dimension being 0 - assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) - assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) - - # Test broadcasting error - b = np.arange(6).reshape(1, 3, 2) # broadcasting error - assert_raises(ValueError, linalg.solve, a, b) - assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) - - # Test zero "single equations" with 0x0 matrices. - b = np.arange(2).reshape(1, 2).view(ArraySubclass) - expected = linalg.solve(a, b)[:, 0:0] - result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - b = np.arange(3).reshape(1, 3) - assert_raises(ValueError, linalg.solve, a, b) - assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) - assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) - - def test_0_size_k(self): - # test zero multiple equation (K=0) case. - class ArraySubclass(np.ndarray): - pass - a = np.arange(4).reshape(1, 2, 2) - b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) - - expected = linalg.solve(a, b)[:, :, 0:0] - result = linalg.solve(a, b[:, :, 0:0]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - # test both zero. - expected = linalg.solve(a, b)[:, 0:0, 0:0] - result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - -class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - - def do(self, a, b, tags): - a_inv = linalg.inv(a) - assert_almost_equal(dot_generalized(a, a_inv), - identity_like_generalized(a)) - assert_(consistent_subclass(a_inv, a)) - - -class TestInv(InvCases): - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.inv(x).dtype, dtype) - - def test_0_size(self): - # Check that all kinds of 0-sized arrays work - class ArraySubclass(np.ndarray): - pass - a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) - res = linalg.inv(a) - assert_(res.dtype.type is np.float64) - assert_equal(a.shape, res.shape) - assert_(isinstance(res, ArraySubclass)) - - a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) - res = linalg.inv(a) - assert_(res.dtype.type is np.complex64) - assert_equal(a.shape, res.shape) - assert_(isinstance(res, ArraySubclass)) - - -class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - - def do(self, a, b, tags): - ev = linalg.eigvals(a) - evalues, evectors = linalg.eig(a) - assert_almost_equal(ev, evalues) - - -class TestEigvals(EigvalsCases): - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.eigvals(x).dtype, dtype) - x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) - assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) - - def test_0_size(self): - # Check that all kinds of 0-sized arrays work - class ArraySubclass(np.ndarray): - pass - a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) - res = linalg.eigvals(a) - assert_(res.dtype.type is np.float64) - assert_equal((0, 1), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(res, np.ndarray)) - - a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) - res = linalg.eigvals(a) - assert_(res.dtype.type is np.complex64) - assert_equal((0,), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(res, np.ndarray)) - - -class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - - def do(self, a, b, tags): - evalues, evectors = linalg.eig(a) - assert_allclose(dot_generalized(a, evectors), - np.asarray(evectors) * np.asarray(evalues)[..., None, :], - rtol=get_rtol(evalues.dtype)) - assert_(consistent_subclass(evectors, a)) - - -class TestEig(EigCases): - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - w, v = np.linalg.eig(x) - assert_equal(w.dtype, dtype) - assert_equal(v.dtype, dtype) - - x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) - w, v = np.linalg.eig(x) - assert_equal(w.dtype, get_complex_dtype(dtype)) - assert_equal(v.dtype, get_complex_dtype(dtype)) - - def test_0_size(self): - # Check that all kinds of 0-sized arrays work - class ArraySubclass(np.ndarray): - pass - a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) - res, res_v = linalg.eig(a) - assert_(res_v.dtype.type is np.float64) - assert_(res.dtype.type is np.float64) - assert_equal(a.shape, res_v.shape) - assert_equal((0, 1), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(a, np.ndarray)) - - a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) - res, res_v = linalg.eig(a) - assert_(res_v.dtype.type is np.complex64) - assert_(res.dtype.type is np.complex64) - assert_equal(a.shape, res_v.shape) - assert_equal((0,), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(a, np.ndarray)) - - -class SVDBaseTests(object): - hermitian = False - - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - u, s, vh = linalg.svd(x) - assert_equal(u.dtype, dtype) - assert_equal(s.dtype, get_real_dtype(dtype)) - assert_equal(vh.dtype, dtype) - s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) - assert_equal(s.dtype, get_real_dtype(dtype)) - - -class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - - def do(self, a, b, tags): - u, s, vt = linalg.svd(a, False) - assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], - np.asarray(vt)), - rtol=get_rtol(u.dtype)) - assert_(consistent_subclass(u, a)) - assert_(consistent_subclass(vt, a)) - - -class TestSVD(SVDCases, SVDBaseTests): - def test_empty_identity(self): - """ Empty input should put an identity matrix in u or vh """ - x = np.empty((4, 0)) - u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) - assert_equal(u.shape, (4, 4)) - assert_equal(vh.shape, (0, 0)) - assert_equal(u, np.eye(4)) - - x = np.empty((0, 4)) - u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) - assert_equal(u.shape, (0, 0)) - assert_equal(vh.shape, (4, 4)) - assert_equal(vh, np.eye(4)) - - -class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): - - def do(self, a, b, tags): - u, s, vt = linalg.svd(a, False, hermitian=True) - assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], - np.asarray(vt)), - rtol=get_rtol(u.dtype)) - assert_(consistent_subclass(u, a)) - assert_(consistent_subclass(vt, a)) - - -class TestSVDHermitian(SVDHermitianCases, SVDBaseTests): - hermitian = True - - -class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - # cond(x, p) for p in (None, 2, -2) - - def do(self, a, b, tags): - c = asarray(a) # a might be a matrix - if 'size-0' in tags: - assert_raises(LinAlgError, linalg.cond, c) - return - - # +-2 norms - s = linalg.svd(c, compute_uv=False) - assert_almost_equal( - linalg.cond(a), s[..., 0] / s[..., -1], - single_decimal=5, double_decimal=11) - assert_almost_equal( - linalg.cond(a, 2), s[..., 0] / s[..., -1], - single_decimal=5, double_decimal=11) - assert_almost_equal( - linalg.cond(a, -2), s[..., -1] / s[..., 0], - single_decimal=5, double_decimal=11) - - # Other norms - cinv = np.linalg.inv(c) - assert_almost_equal( - linalg.cond(a, 1), - abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), - single_decimal=5, double_decimal=11) - assert_almost_equal( - linalg.cond(a, -1), - abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), - single_decimal=5, double_decimal=11) - assert_almost_equal( - linalg.cond(a, np.inf), - abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), - single_decimal=5, double_decimal=11) - assert_almost_equal( - linalg.cond(a, -np.inf), - abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), - single_decimal=5, double_decimal=11) - assert_almost_equal( - linalg.cond(a, 'fro'), - np.sqrt((abs(c)**2).sum(-1).sum(-1) - * (abs(cinv)**2).sum(-1).sum(-1)), - single_decimal=5, double_decimal=11) - - -class TestCond(CondCases): - def test_basic_nonsvd(self): - # Smoketest the non-svd norms - A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) - assert_almost_equal(linalg.cond(A, inf), 4) - assert_almost_equal(linalg.cond(A, -inf), 2/3) - assert_almost_equal(linalg.cond(A, 1), 4) - assert_almost_equal(linalg.cond(A, -1), 0.5) - assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) - - def test_singular(self): - # Singular matrices have infinite condition number for - # positive norms, and negative norms shouldn't raise - # exceptions - As = [np.zeros((2, 2)), np.ones((2, 2))] - p_pos = [None, 1, 2, 'fro'] - p_neg = [-1, -2] - for A, p in itertools.product(As, p_pos): - # Inversion may not hit exact infinity, so just check the - # number is large - assert_(linalg.cond(A, p) > 1e15) - for A, p in itertools.product(As, p_neg): - linalg.cond(A, p) - - def test_nan(self): - # nans should be passed through, not converted to infs - ps = [None, 1, -1, 2, -2, 'fro'] - p_pos = [None, 1, 2, 'fro'] - - A = np.ones((2, 2)) - A[0,1] = np.nan - for p in ps: - c = linalg.cond(A, p) - assert_(isinstance(c, np.float_)) - assert_(np.isnan(c)) - - A = np.ones((3, 2, 2)) - A[1,0,1] = np.nan - for p in ps: - c = linalg.cond(A, p) - assert_(np.isnan(c[1])) - if p in p_pos: - assert_(c[0] > 1e15) - assert_(c[2] > 1e15) - else: - assert_(not np.isnan(c[0])) - assert_(not np.isnan(c[2])) - - def test_stacked_singular(self): - # Check behavior when only some of the stacked matrices are - # singular - np.random.seed(1234) - A = np.random.rand(2, 2, 2, 2) - A[0,0] = 0 - A[1,1] = 0 - - for p in (None, 1, 2, 'fro', -1, -2): - c = linalg.cond(A, p) - assert_equal(c[0,0], np.inf) - assert_equal(c[1,1], np.inf) - assert_(np.isfinite(c[0,1])) - assert_(np.isfinite(c[1,0])) - - -class PinvCases(LinalgSquareTestCase, - LinalgNonsquareTestCase, - LinalgGeneralizedSquareTestCase, - LinalgGeneralizedNonsquareTestCase): - - def do(self, a, b, tags): - a_ginv = linalg.pinv(a) - # `a @ a_ginv == I` does not hold if a is singular - dot = dot_generalized - assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) - assert_(consistent_subclass(a_ginv, a)) - - -class TestPinv(PinvCases): - pass - - -class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): - - def do(self, a, b, tags): - a_ginv = linalg.pinv(a, hermitian=True) - # `a @ a_ginv == I` does not hold if a is singular - dot = dot_generalized - assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) - assert_(consistent_subclass(a_ginv, a)) - - -class TestPinvHermitian(PinvHermitianCases): - pass - - -class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): - - def do(self, a, b, tags): - d = linalg.det(a) - (s, ld) = linalg.slogdet(a) - if asarray(a).dtype.type in (single, double): - ad = asarray(a).astype(double) - else: - ad = asarray(a).astype(cdouble) - ev = linalg.eigvals(ad) - assert_almost_equal(d, multiply.reduce(ev, axis=-1)) - assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) - - s = np.atleast_1d(s) - ld = np.atleast_1d(ld) - m = (s != 0) - assert_almost_equal(np.abs(s[m]), 1) - assert_equal(ld[~m], -inf) - - -class TestDet(DetCases): - def test_zero(self): - assert_equal(linalg.det([[0.0]]), 0.0) - assert_equal(type(linalg.det([[0.0]])), double) - assert_equal(linalg.det([[0.0j]]), 0.0) - assert_equal(type(linalg.det([[0.0j]])), cdouble) - - assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) - assert_equal(type(linalg.slogdet([[0.0]])[0]), double) - assert_equal(type(linalg.slogdet([[0.0]])[1]), double) - assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) - assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) - assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) - - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(np.linalg.det(x).dtype, dtype) - ph, s = np.linalg.slogdet(x) - assert_equal(s.dtype, get_real_dtype(dtype)) - assert_equal(ph.dtype, dtype) - - def test_0_size(self): - a = np.zeros((0, 0), dtype=np.complex64) - res = linalg.det(a) - assert_equal(res, 1.) - assert_(res.dtype.type is np.complex64) - res = linalg.slogdet(a) - assert_equal(res, (1, 0)) - assert_(res[0].dtype.type is np.complex64) - assert_(res[1].dtype.type is np.float32) - - a = np.zeros((0, 0), dtype=np.float64) - res = linalg.det(a) - assert_equal(res, 1.) - assert_(res.dtype.type is np.float64) - res = linalg.slogdet(a) - assert_equal(res, (1, 0)) - assert_(res[0].dtype.type is np.float64) - assert_(res[1].dtype.type is np.float64) - - -class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): - - def do(self, a, b, tags): - arr = np.asarray(a) - m, n = arr.shape - u, s, vt = linalg.svd(a, False) - x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) - if m == 0: - assert_((x == 0).all()) - if m <= n: - assert_almost_equal(b, dot(a, x)) - assert_equal(rank, m) - else: - assert_equal(rank, n) - assert_almost_equal(sv, sv.__array_wrap__(s)) - if rank == n and m > n: - expect_resids = ( - np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) - expect_resids = np.asarray(expect_resids) - if np.asarray(b).ndim == 1: - expect_resids.shape = (1,) - assert_equal(residuals.shape, expect_resids.shape) - else: - expect_resids = np.array([]).view(type(x)) - assert_almost_equal(residuals, expect_resids) - assert_(np.issubdtype(residuals.dtype, np.floating)) - assert_(consistent_subclass(x, b)) - assert_(consistent_subclass(residuals, b)) - - -class TestLstsq(LstsqCases): - def test_future_rcond(self): - a = np.array([[0., 1., 0., 1., 2., 0.], - [0., 2., 0., 0., 1., 0.], - [1., 0., 1., 0., 0., 4.], - [0., 0., 0., 2., 3., 0.]]).T - - b = np.array([1, 0, 0, 0, 0, 0]) - with suppress_warnings() as sup: - w = sup.record(FutureWarning, "`rcond` parameter will change") - x, residuals, rank, s = linalg.lstsq(a, b) - assert_(rank == 4) - x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) - assert_(rank == 4) - x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) - assert_(rank == 3) - # Warning should be raised exactly once (first command) - assert_(len(w) == 1) - - @pytest.mark.parametrize(["m", "n", "n_rhs"], [ - (4, 2, 2), - (0, 4, 1), - (0, 4, 2), - (4, 0, 1), - (4, 0, 2), - (4, 2, 0), - (0, 0, 0) - ]) - def test_empty_a_b(self, m, n, n_rhs): - a = np.arange(m * n).reshape(m, n) - b = np.ones((m, n_rhs)) - x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) - if m == 0: - assert_((x == 0).all()) - assert_equal(x.shape, (n, n_rhs)) - assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) - if m > n and n_rhs > 0: - # residuals are exactly the squared norms of b's columns - r = b - np.dot(a, x) - assert_almost_equal(residuals, (r * r).sum(axis=-2)) - assert_equal(rank, min(m, n)) - assert_equal(s.shape, (min(m, n),)) - - def test_incompatible_dims(self): - # use modified version of docstring example - x = np.array([0, 1, 2, 3]) - y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) - A = np.vstack([x, np.ones(len(x))]).T - with assert_raises_regex(LinAlgError, "Incompatible dimensions"): - linalg.lstsq(A, y, rcond=None) - - -@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) -class TestMatrixPower(object): - - rshft_0 = np.eye(4) - rshft_1 = rshft_0[[3, 0, 1, 2]] - rshft_2 = rshft_0[[2, 3, 0, 1]] - rshft_3 = rshft_0[[1, 2, 3, 0]] - rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] - noninv = array([[1, 0], [0, 0]]) - stacked = np.block([[[rshft_0]]]*2) - #FIXME the 'e' dtype might work in future - dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] - - def test_large_power(self, dt): - rshft = self.rshft_1.astype(dt) - assert_equal( - matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) - assert_equal( - matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) - assert_equal( - matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) - assert_equal( - matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) - - def test_power_is_zero(self, dt): - def tz(M): - mz = matrix_power(M, 0) - assert_equal(mz, identity_like_generalized(M)) - assert_equal(mz.dtype, M.dtype) - - for mat in self.rshft_all: - tz(mat.astype(dt)) - if dt != object: - tz(self.stacked.astype(dt)) - - def test_power_is_one(self, dt): - def tz(mat): - mz = matrix_power(mat, 1) - assert_equal(mz, mat) - assert_equal(mz.dtype, mat.dtype) - - for mat in self.rshft_all: - tz(mat.astype(dt)) - if dt != object: - tz(self.stacked.astype(dt)) - - def test_power_is_two(self, dt): - def tz(mat): - mz = matrix_power(mat, 2) - mmul = matmul if mat.dtype != object else dot - assert_equal(mz, mmul(mat, mat)) - assert_equal(mz.dtype, mat.dtype) - - for mat in self.rshft_all: - tz(mat.astype(dt)) - if dt != object: - tz(self.stacked.astype(dt)) - - def test_power_is_minus_one(self, dt): - def tz(mat): - invmat = matrix_power(mat, -1) - mmul = matmul if mat.dtype != object else dot - assert_almost_equal( - mmul(invmat, mat), identity_like_generalized(mat)) - - for mat in self.rshft_all: - if dt not in self.dtnoinv: - tz(mat.astype(dt)) - - def test_exceptions_bad_power(self, dt): - mat = self.rshft_0.astype(dt) - assert_raises(TypeError, matrix_power, mat, 1.5) - assert_raises(TypeError, matrix_power, mat, [1]) - - def test_exceptions_non_square(self, dt): - assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) - assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) - assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) - - def test_exceptions_not_invertible(self, dt): - if dt in self.dtnoinv: - return - mat = self.noninv.astype(dt) - assert_raises(LinAlgError, matrix_power, mat, -1) - - - -class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): - - def do(self, a, b, tags): - # note that eigenvalue arrays returned by eig must be sorted since - # their order isn't guaranteed. - ev = linalg.eigvalsh(a, 'L') - evalues, evectors = linalg.eig(a) - evalues.sort(axis=-1) - assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) - - ev2 = linalg.eigvalsh(a, 'U') - assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) - - -class TestEigvalsh(object): - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - w = np.linalg.eigvalsh(x) - assert_equal(w.dtype, get_real_dtype(dtype)) - - def test_invalid(self): - x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) - assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") - assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") - assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") - - def test_UPLO(self): - Klo = np.array([[0, 0], [1, 0]], dtype=np.double) - Kup = np.array([[0, 1], [0, 0]], dtype=np.double) - tgt = np.array([-1, 1], dtype=np.double) - rtol = get_rtol(np.double) - - # Check default is 'L' - w = np.linalg.eigvalsh(Klo) - assert_allclose(w, tgt, rtol=rtol) - # Check 'L' - w = np.linalg.eigvalsh(Klo, UPLO='L') - assert_allclose(w, tgt, rtol=rtol) - # Check 'l' - w = np.linalg.eigvalsh(Klo, UPLO='l') - assert_allclose(w, tgt, rtol=rtol) - # Check 'U' - w = np.linalg.eigvalsh(Kup, UPLO='U') - assert_allclose(w, tgt, rtol=rtol) - # Check 'u' - w = np.linalg.eigvalsh(Kup, UPLO='u') - assert_allclose(w, tgt, rtol=rtol) - - def test_0_size(self): - # Check that all kinds of 0-sized arrays work - class ArraySubclass(np.ndarray): - pass - a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) - res = linalg.eigvalsh(a) - assert_(res.dtype.type is np.float64) - assert_equal((0, 1), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(res, np.ndarray)) - - a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) - res = linalg.eigvalsh(a) - assert_(res.dtype.type is np.float32) - assert_equal((0,), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(res, np.ndarray)) - - -class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): - - def do(self, a, b, tags): - # note that eigenvalue arrays returned by eig must be sorted since - # their order isn't guaranteed. - ev, evc = linalg.eigh(a) - evalues, evectors = linalg.eig(a) - evalues.sort(axis=-1) - assert_almost_equal(ev, evalues) - - assert_allclose(dot_generalized(a, evc), - np.asarray(ev)[..., None, :] * np.asarray(evc), - rtol=get_rtol(ev.dtype)) - - ev2, evc2 = linalg.eigh(a, 'U') - assert_almost_equal(ev2, evalues) - - assert_allclose(dot_generalized(a, evc2), - np.asarray(ev2)[..., None, :] * np.asarray(evc2), - rtol=get_rtol(ev.dtype), err_msg=repr(a)) - - -class TestEigh(object): - @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) - def test_types(self, dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - w, v = np.linalg.eigh(x) - assert_equal(w.dtype, get_real_dtype(dtype)) - assert_equal(v.dtype, dtype) - - def test_invalid(self): - x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) - assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") - assert_raises(ValueError, np.linalg.eigh, x, "lower") - assert_raises(ValueError, np.linalg.eigh, x, "upper") - - def test_UPLO(self): - Klo = np.array([[0, 0], [1, 0]], dtype=np.double) - Kup = np.array([[0, 1], [0, 0]], dtype=np.double) - tgt = np.array([-1, 1], dtype=np.double) - rtol = get_rtol(np.double) - - # Check default is 'L' - w, v = np.linalg.eigh(Klo) - assert_allclose(w, tgt, rtol=rtol) - # Check 'L' - w, v = np.linalg.eigh(Klo, UPLO='L') - assert_allclose(w, tgt, rtol=rtol) - # Check 'l' - w, v = np.linalg.eigh(Klo, UPLO='l') - assert_allclose(w, tgt, rtol=rtol) - # Check 'U' - w, v = np.linalg.eigh(Kup, UPLO='U') - assert_allclose(w, tgt, rtol=rtol) - # Check 'u' - w, v = np.linalg.eigh(Kup, UPLO='u') - assert_allclose(w, tgt, rtol=rtol) - - def test_0_size(self): - # Check that all kinds of 0-sized arrays work - class ArraySubclass(np.ndarray): - pass - a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) - res, res_v = linalg.eigh(a) - assert_(res_v.dtype.type is np.float64) - assert_(res.dtype.type is np.float64) - assert_equal(a.shape, res_v.shape) - assert_equal((0, 1), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(a, np.ndarray)) - - a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) - res, res_v = linalg.eigh(a) - assert_(res_v.dtype.type is np.complex64) - assert_(res.dtype.type is np.float32) - assert_equal(a.shape, res_v.shape) - assert_equal((0,), res.shape) - # This is just for documentation, it might make sense to change: - assert_(isinstance(a, np.ndarray)) - - -class _TestNormBase(object): - dt = None - dec = None - - -class _TestNormGeneral(_TestNormBase): - - def test_empty(self): - assert_equal(norm([]), 0.0) - assert_equal(norm(array([], dtype=self.dt)), 0.0) - assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) - - def test_vector_return_type(self): - a = np.array([1, 0, 1]) - - exact_types = np.typecodes['AllInteger'] - inexact_types = np.typecodes['AllFloat'] - - all_types = exact_types + inexact_types - - for each_inexact_types in all_types: - at = a.astype(each_inexact_types) - - an = norm(at, -np.inf) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 0.0) - - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") - an = norm(at, -1) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 0.0) - - an = norm(at, 0) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2) - - an = norm(at, 1) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - an = norm(at, 2) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0)) - - an = norm(at, 4) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0)) - - an = norm(at, np.inf) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 1.0) - - def test_vector(self): - a = [1, 2, 3, 4] - b = [-1, -2, -3, -4] - c = [-1, 2, -3, 4] - - def _test(v): - np.testing.assert_almost_equal(norm(v), 30 ** 0.5, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, inf), 4.0, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, -inf), 1.0, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, 1), 10.0, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, 0), 4, - decimal=self.dec) - - for v in (a, b, c,): - _test(v) - - for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), - array(c, dtype=self.dt)): - _test(v) - - def test_axis(self): - # Vector norms. - # Compare the use of `axis` with computing the norm of each row - # or column separately. - A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) - for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: - expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] - assert_almost_equal(norm(A, ord=order, axis=0), expected0) - expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] - assert_almost_equal(norm(A, ord=order, axis=1), expected1) - - # Matrix norms. - B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) - nd = B.ndim - for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: - for axis in itertools.combinations(range(-nd, nd), 2): - row_axis, col_axis = axis - if row_axis < 0: - row_axis += nd - if col_axis < 0: - col_axis += nd - if row_axis == col_axis: - assert_raises(ValueError, norm, B, ord=order, axis=axis) - else: - n = norm(B, ord=order, axis=axis) - - # The logic using k_index only works for nd = 3. - # This has to be changed if nd is increased. - k_index = nd - (row_axis + col_axis) - if row_axis < col_axis: - expected = [norm(B[:].take(k, axis=k_index), ord=order) - for k in range(B.shape[k_index])] - else: - expected = [norm(B[:].take(k, axis=k_index).T, ord=order) - for k in range(B.shape[k_index])] - assert_almost_equal(n, expected) - - def test_keepdims(self): - A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) - - allclose_err = 'order {0}, axis = {1}' - shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' - - # check the order=None, axis=None case - expected = norm(A, ord=None, axis=None) - found = norm(A, ord=None, axis=None, keepdims=True) - assert_allclose(np.squeeze(found), expected, - err_msg=allclose_err.format(None, None)) - expected_shape = (1, 1, 1) - assert_(found.shape == expected_shape, - shape_err.format(found.shape, expected_shape, None, None)) - - # Vector norms. - for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: - for k in range(A.ndim): - expected = norm(A, ord=order, axis=k) - found = norm(A, ord=order, axis=k, keepdims=True) - assert_allclose(np.squeeze(found), expected, - err_msg=allclose_err.format(order, k)) - expected_shape = list(A.shape) - expected_shape[k] = 1 - expected_shape = tuple(expected_shape) - assert_(found.shape == expected_shape, - shape_err.format(found.shape, expected_shape, order, k)) - - # Matrix norms. - for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']: - for k in itertools.permutations(range(A.ndim), 2): - expected = norm(A, ord=order, axis=k) - found = norm(A, ord=order, axis=k, keepdims=True) - assert_allclose(np.squeeze(found), expected, - err_msg=allclose_err.format(order, k)) - expected_shape = list(A.shape) - expected_shape[k[0]] = 1 - expected_shape[k[1]] = 1 - expected_shape = tuple(expected_shape) - assert_(found.shape == expected_shape, - shape_err.format(found.shape, expected_shape, order, k)) - - -class _TestNorm2D(_TestNormBase): - # Define the part for 2d arrays separately, so we can subclass this - # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. - array = np.array - - def test_matrix_empty(self): - assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) - - def test_matrix_return_type(self): - a = self.array([[1, 0, 1], [0, 1, 1]]) - - exact_types = np.typecodes['AllInteger'] - - # float32, complex64, float64, complex128 types are the only types - # allowed by `linalg`, which performs the matrix operations used - # within `norm`. - inexact_types = 'fdFD' - - all_types = exact_types + inexact_types - - for each_inexact_types in all_types: - at = a.astype(each_inexact_types) - - an = norm(at, -np.inf) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") - an = norm(at, -1) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 1.0) - - an = norm(at, 1) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - an = norm(at, 2) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 3.0**(1.0/2.0)) - - an = norm(at, -2) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 1.0) - - an = norm(at, np.inf) - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - an = norm(at, 'fro') - assert_(issubclass(an.dtype.type, np.floating)) - assert_almost_equal(an, 2.0) - - an = norm(at, 'nuc') - assert_(issubclass(an.dtype.type, np.floating)) - # Lower bar needed to support low precision floats. - # They end up being off by 1 in the 7th place. - np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) - - def test_matrix_2x2(self): - A = self.array([[1, 3], [5, 7]], dtype=self.dt) - assert_almost_equal(norm(A), 84 ** 0.5) - assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) - assert_almost_equal(norm(A, 'nuc'), 10.0) - assert_almost_equal(norm(A, inf), 12.0) - assert_almost_equal(norm(A, -inf), 4.0) - assert_almost_equal(norm(A, 1), 10.0) - assert_almost_equal(norm(A, -1), 6.0) - assert_almost_equal(norm(A, 2), 9.1231056256176615) - assert_almost_equal(norm(A, -2), 0.87689437438234041) - - assert_raises(ValueError, norm, A, 'nofro') - assert_raises(ValueError, norm, A, -3) - assert_raises(ValueError, norm, A, 0) - - def test_matrix_3x3(self): - # This test has been added because the 2x2 example - # happened to have equal nuclear norm and induced 1-norm. - # The 1/10 scaling factor accommodates the absolute tolerance - # used in assert_almost_equal. - A = (1 / 10) * \ - self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) - assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) - assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) - assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) - assert_almost_equal(norm(A, inf), 1.1) - assert_almost_equal(norm(A, -inf), 0.6) - assert_almost_equal(norm(A, 1), 1.0) - assert_almost_equal(norm(A, -1), 0.4) - assert_almost_equal(norm(A, 2), 0.88722940323461277) - assert_almost_equal(norm(A, -2), 0.19456584790481812) - - def test_bad_args(self): - # Check that bad arguments raise the appropriate exceptions. - - A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) - B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) - - # Using `axis=` or passing in a 1-D array implies vector - # norms are being computed, so also using `ord='fro'` - # or `ord='nuc'` raises a ValueError. - assert_raises(ValueError, norm, A, 'fro', 0) - assert_raises(ValueError, norm, A, 'nuc', 0) - assert_raises(ValueError, norm, [3, 4], 'fro', None) - assert_raises(ValueError, norm, [3, 4], 'nuc', None) - - # Similarly, norm should raise an exception when ord is any finite - # number other than 1, 2, -1 or -2 when computing matrix norms. - for order in [0, 3]: - assert_raises(ValueError, norm, A, order, None) - assert_raises(ValueError, norm, A, order, (0, 1)) - assert_raises(ValueError, norm, B, order, (1, 2)) - - # Invalid axis - assert_raises(np.AxisError, norm, B, None, 3) - assert_raises(np.AxisError, norm, B, None, (2, 3)) - assert_raises(ValueError, norm, B, None, (0, 1, 2)) - - -class _TestNorm(_TestNorm2D, _TestNormGeneral): - pass - - -class TestNorm_NonSystematic(object): - - def test_longdouble_norm(self): - # Non-regression test: p-norm of longdouble would previously raise - # UnboundLocalError. - x = np.arange(10, dtype=np.longdouble) - old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) - - def test_intmin(self): - # Non-regression test: p-norm of signed integer would previously do - # float cast and abs in the wrong order. - x = np.array([-2 ** 31], dtype=np.int32) - old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) - - def test_complex_high_ord(self): - # gh-4156 - d = np.empty((2,), dtype=np.clongdouble) - d[0] = 6 + 7j - d[1] = -6 + 7j - res = 11.615898132184 - old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) - d = d.astype(np.complex128) - old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) - d = d.astype(np.complex64) - old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) - - -# Separate definitions so we can use them for matrix tests. -class _TestNormDoubleBase(_TestNormBase): - dt = np.double - dec = 12 - - -class _TestNormSingleBase(_TestNormBase): - dt = np.float32 - dec = 6 - - -class _TestNormInt64Base(_TestNormBase): - dt = np.int64 - dec = 12 - - -class TestNormDouble(_TestNorm, _TestNormDoubleBase): - pass - - -class TestNormSingle(_TestNorm, _TestNormSingleBase): - pass - - -class TestNormInt64(_TestNorm, _TestNormInt64Base): - pass - - -class TestMatrixRank(object): - - def test_matrix_rank(self): - # Full rank matrix - assert_equal(4, matrix_rank(np.eye(4))) - # rank deficient matrix - I = np.eye(4) - I[-1, -1] = 0. - assert_equal(matrix_rank(I), 3) - # All zeros - zero rank - assert_equal(matrix_rank(np.zeros((4, 4))), 0) - # 1 dimension - rank 1 unless all 0 - assert_equal(matrix_rank([1, 0, 0, 0]), 1) - assert_equal(matrix_rank(np.zeros((4,))), 0) - # accepts array-like - assert_equal(matrix_rank([1]), 1) - # greater than 2 dimensions treated as stacked matrices - ms = np.array([I, np.eye(4), np.zeros((4,4))]) - assert_equal(matrix_rank(ms), np.array([3, 4, 0])) - # works on scalar - assert_equal(matrix_rank(1), 1) - - def test_symmetric_rank(self): - assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) - assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) - assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) - # rank deficient matrix - I = np.eye(4) - I[-1, -1] = 0. - assert_equal(3, matrix_rank(I, hermitian=True)) - # manually supplied tolerance - I[-1, -1] = 1e-8 - assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) - assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) - - -def test_reduced_rank(): - # Test matrices with reduced rank - rng = np.random.RandomState(20120714) - for i in range(100): - # Make a rank deficient matrix - X = rng.normal(size=(40, 10)) - X[:, 0] = X[:, 1] + X[:, 2] - # Assert that matrix_rank detected deficiency - assert_equal(matrix_rank(X), 9) - X[:, 3] = X[:, 4] + X[:, 5] - assert_equal(matrix_rank(X), 8) - - -class TestQR(object): - # Define the array class here, so run this on matrices elsewhere. - array = np.array - - def check_qr(self, a): - # This test expects the argument `a` to be an ndarray or - # a subclass of an ndarray of inexact type. - a_type = type(a) - a_dtype = a.dtype - m, n = a.shape - k = min(m, n) - - # mode == 'complete' - q, r = linalg.qr(a, mode='complete') - assert_(q.dtype == a_dtype) - assert_(r.dtype == a_dtype) - assert_(isinstance(q, a_type)) - assert_(isinstance(r, a_type)) - assert_(q.shape == (m, m)) - assert_(r.shape == (m, n)) - assert_almost_equal(dot(q, r), a) - assert_almost_equal(dot(q.T.conj(), q), np.eye(m)) - assert_almost_equal(np.triu(r), r) - - # mode == 'reduced' - q1, r1 = linalg.qr(a, mode='reduced') - assert_(q1.dtype == a_dtype) - assert_(r1.dtype == a_dtype) - assert_(isinstance(q1, a_type)) - assert_(isinstance(r1, a_type)) - assert_(q1.shape == (m, k)) - assert_(r1.shape == (k, n)) - assert_almost_equal(dot(q1, r1), a) - assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) - assert_almost_equal(np.triu(r1), r1) - - # mode == 'r' - r2 = linalg.qr(a, mode='r') - assert_(r2.dtype == a_dtype) - assert_(isinstance(r2, a_type)) - assert_almost_equal(r2, r1) - - - @pytest.mark.parametrize(["m", "n"], [ - (3, 0), - (0, 3), - (0, 0) - ]) - def test_qr_empty(self, m, n): - k = min(m, n) - a = np.empty((m, n)) - - self.check_qr(a) - - h, tau = np.linalg.qr(a, mode='raw') - assert_equal(h.dtype, np.double) - assert_equal(tau.dtype, np.double) - assert_equal(h.shape, (n, m)) - assert_equal(tau.shape, (k,)) - - def test_mode_raw(self): - # The factorization is not unique and varies between libraries, - # so it is not possible to check against known values. Functional - # testing is a possibility, but awaits the exposure of more - # of the functions in lapack_lite. Consequently, this test is - # very limited in scope. Note that the results are in FORTRAN - # order, hence the h arrays are transposed. - a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) - - # Test double - h, tau = linalg.qr(a, mode='raw') - assert_(h.dtype == np.double) - assert_(tau.dtype == np.double) - assert_(h.shape == (2, 3)) - assert_(tau.shape == (2,)) - - h, tau = linalg.qr(a.T, mode='raw') - assert_(h.dtype == np.double) - assert_(tau.dtype == np.double) - assert_(h.shape == (3, 2)) - assert_(tau.shape == (2,)) - - def test_mode_all_but_economic(self): - a = self.array([[1, 2], [3, 4]]) - b = self.array([[1, 2], [3, 4], [5, 6]]) - for dt in "fd": - m1 = a.astype(dt) - m2 = b.astype(dt) - self.check_qr(m1) - self.check_qr(m2) - self.check_qr(m2.T) - - for dt in "fd": - m1 = 1 + 1j * a.astype(dt) - m2 = 1 + 1j * b.astype(dt) - self.check_qr(m1) - self.check_qr(m2) - self.check_qr(m2.T) - - -class TestCholesky(object): - # TODO: are there no other tests for cholesky? - - def test_basic_property(self): - # Check A = L L^H - shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] - dtypes = (np.float32, np.float64, np.complex64, np.complex128) - - for shape, dtype in itertools.product(shapes, dtypes): - np.random.seed(1) - a = np.random.randn(*shape) - if np.issubdtype(dtype, np.complexfloating): - a = a + 1j*np.random.randn(*shape) - - t = list(range(len(shape))) - t[-2:] = -1, -2 - - a = np.matmul(a.transpose(t).conj(), a) - a = np.asarray(a, dtype=dtype) - - c = np.linalg.cholesky(a) - - b = np.matmul(c, c.transpose(t).conj()) - assert_allclose(b, a, - err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c), - atol=500 * a.shape[0] * np.finfo(dtype).eps) - - def test_0_size(self): - class ArraySubclass(np.ndarray): - pass - a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) - res = linalg.cholesky(a) - assert_equal(a.shape, res.shape) - assert_(res.dtype.type is np.float64) - # for documentation purpose: - assert_(isinstance(res, np.ndarray)) - - a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) - res = linalg.cholesky(a) - assert_equal(a.shape, res.shape) - assert_(res.dtype.type is np.complex64) - assert_(isinstance(res, np.ndarray)) - - -def test_byteorder_check(): - # Byte order check should pass for native order - if sys.byteorder == 'little': - native = '<' - else: - native = '>' - - for dtt in (np.float32, np.float64): - arr = np.eye(4, dtype=dtt) - n_arr = arr.newbyteorder(native) - sw_arr = arr.newbyteorder('S').byteswap() - assert_equal(arr.dtype.byteorder, '=') - for routine in (linalg.inv, linalg.det, linalg.pinv): - # Normal call - res = routine(arr) - # Native but not '=' - assert_array_equal(res, routine(n_arr)) - # Swapped - assert_array_equal(res, routine(sw_arr)) - - -def test_generalized_raise_multiloop(): - # It should raise an error even if the error doesn't occur in the - # last iteration of the ufunc inner loop - - invertible = np.array([[1, 2], [3, 4]]) - non_invertible = np.array([[1, 1], [1, 1]]) - - x = np.zeros([4, 4, 2, 2])[1::2] - x[...] = invertible - x[0, 0] = non_invertible - - assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) - - -def test_xerbla_override(): - # Check that our xerbla has been successfully linked in. If it is not, - # the default xerbla routine is called, which prints a message to stdout - # and may, or may not, abort the process depending on the LAPACK package. - - XERBLA_OK = 255 - - try: - pid = os.fork() - except (OSError, AttributeError): - # fork failed, or not running on POSIX - pytest.skip("Not POSIX or fork failed.") - - if pid == 0: - # child; close i/o file handles - os.close(1) - os.close(0) - # Avoid producing core files. - import resource - resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) - # These calls may abort. - try: - np.linalg.lapack_lite.xerbla() - except ValueError: - pass - except Exception: - os._exit(os.EX_CONFIG) - - try: - a = np.array([[1.]]) - np.linalg.lapack_lite.dorgqr( - 1, 1, 1, a, - 0, # <- invalid value - a, a, 0, 0) - except ValueError as e: - if "DORGQR parameter number 5" in str(e): - # success, reuse error code to mark success as - # FORTRAN STOP returns as success. - os._exit(XERBLA_OK) - - # Did not abort, but our xerbla was not linked in. - os._exit(os.EX_CONFIG) - else: - # parent - pid, status = os.wait() - if os.WEXITSTATUS(status) != XERBLA_OK: - pytest.skip('Numpy xerbla not linked in.') - - -def test_sdot_bug_8577(): - # Regression test that loading certain other libraries does not - # result to wrong results in float32 linear algebra. - # - # There's a bug gh-8577 on OSX that can trigger this, and perhaps - # there are also other situations in which it occurs. - # - # Do the check in a separate process. - - bad_libs = ['PyQt5.QtWidgets', 'IPython'] - - template = textwrap.dedent(""" - import sys - {before} - try: - import {bad_lib} - except ImportError: - sys.exit(0) - {after} - x = np.ones(2, dtype=np.float32) - sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) - """) - - for bad_lib in bad_libs: - code = template.format(before="import numpy as np", after="", - bad_lib=bad_lib) - subprocess.check_call([sys.executable, "-c", code]) - - # Swapped import order - code = template.format(after="import numpy as np", before="", - bad_lib=bad_lib) - subprocess.check_call([sys.executable, "-c", code]) - - -class TestMultiDot(object): - - def test_basic_function_with_three_arguments(self): - # multi_dot with three arguments uses a fast hand coded algorithm to - # determine the optimal order. Therefore test it separately. - A = np.random.random((6, 2)) - B = np.random.random((2, 6)) - C = np.random.random((6, 2)) - - assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) - assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) - - def test_basic_function_with_two_arguments(self): - # separate code path with two arguments - A = np.random.random((6, 2)) - B = np.random.random((2, 6)) - - assert_almost_equal(multi_dot([A, B]), A.dot(B)) - assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) - - def test_basic_function_with_dynamic_programing_optimization(self): - # multi_dot with four or more arguments uses the dynamic programing - # optimization and therefore deserve a separate - A = np.random.random((6, 2)) - B = np.random.random((2, 6)) - C = np.random.random((6, 2)) - D = np.random.random((2, 1)) - assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) - - def test_vector_as_first_argument(self): - # The first argument can be 1-D - A1d = np.random.random(2) # 1-D - B = np.random.random((2, 6)) - C = np.random.random((6, 2)) - D = np.random.random((2, 2)) - - # the result should be 1-D - assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) - - def test_vector_as_last_argument(self): - # The last argument can be 1-D - A = np.random.random((6, 2)) - B = np.random.random((2, 6)) - C = np.random.random((6, 2)) - D1d = np.random.random(2) # 1-D - - # the result should be 1-D - assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) - - def test_vector_as_first_and_last_argument(self): - # The first and last arguments can be 1-D - A1d = np.random.random(2) # 1-D - B = np.random.random((2, 6)) - C = np.random.random((6, 2)) - D1d = np.random.random(2) # 1-D - - # the result should be a scalar - assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) - - def test_dynamic_programming_logic(self): - # Test for the dynamic programming part - # This test is directly taken from Cormen page 376. - arrays = [np.random.random((30, 35)), - np.random.random((35, 15)), - np.random.random((15, 5)), - np.random.random((5, 10)), - np.random.random((10, 20)), - np.random.random((20, 25))] - m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], - [0., 0., 2625., 4375., 7125., 10500.], - [0., 0., 0., 750., 2500., 5375.], - [0., 0., 0., 0., 1000., 3500.], - [0., 0., 0., 0., 0., 5000.], - [0., 0., 0., 0., 0., 0.]]) - s_expected = np.array([[0, 1, 1, 3, 3, 3], - [0, 0, 2, 3, 3, 3], - [0, 0, 0, 3, 3, 3], - [0, 0, 0, 0, 4, 5], - [0, 0, 0, 0, 0, 5], - [0, 0, 0, 0, 0, 0]], dtype=int) - s_expected -= 1 # Cormen uses 1-based index, python does not. - - s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) - - # Only the upper triangular part (without the diagonal) is interesting. - assert_almost_equal(np.triu(s[:-1, 1:]), - np.triu(s_expected[:-1, 1:])) - assert_almost_equal(np.triu(m), np.triu(m_expected)) - - def test_too_few_input_arrays(self): - assert_raises(ValueError, multi_dot, []) - assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) - - -class TestTensorinv(object): - - @pytest.mark.parametrize("arr, ind", [ - (np.ones((4, 6, 8, 2)), 2), - (np.ones((3, 3, 2)), 1), - ]) - def test_non_square_handling(self, arr, ind): - with assert_raises(LinAlgError): - linalg.tensorinv(arr, ind=ind) - - @pytest.mark.parametrize("shape, ind", [ - # examples from docstring - ((4, 6, 8, 3), 2), - ((24, 8, 3), 1), - ]) - def test_tensorinv_shape(self, shape, ind): - a = np.eye(24) - a.shape = shape - ainv = linalg.tensorinv(a=a, ind=ind) - expected = a.shape[ind:] + a.shape[:ind] - actual = ainv.shape - assert_equal(actual, expected) - - @pytest.mark.parametrize("ind", [ - 0, -2, - ]) - def test_tensorinv_ind_limit(self, ind): - a = np.eye(24) - a.shape = (4, 6, 8, 3) - with assert_raises(ValueError): - linalg.tensorinv(a=a, ind=ind) - - def test_tensorinv_result(self): - # mimic a docstring example - a = np.eye(24) - a.shape = (24, 8, 3) - ainv = linalg.tensorinv(a, ind=1) - b = np.ones(24) - assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) - - -def test_unsupported_commontype(): - # linalg gracefully handles unsupported type - arr = np.array([[1, -2], [2, 5]], dtype='float16') - with assert_raises_regex(TypeError, "unsupported in linalg"): - linalg.cholesky(arr) - - -@pytest.mark.slow -@pytest.mark.xfail(not HAS_LAPACK64, run=False, - reason="Numpy not compiled with 64-bit BLAS/LAPACK") -@requires_memory(free_bytes=16e9) -def test_blas64_dot(): - n = 2**32 - a = np.zeros([1, n], dtype=np.float32) - b = np.ones([1, 1], dtype=np.float32) - a[0,-1] = 1 - c = np.dot(b, a) - assert_equal(c[0,-1], 1) - - -@pytest.mark.xfail(not HAS_LAPACK64, - reason="Numpy not compiled with 64-bit BLAS/LAPACK") -def test_blas64_geqrf_lwork_smoketest(): - # Smoke test LAPACK geqrf lwork call with 64-bit integers - dtype = np.float64 - lapack_routine = np.linalg.lapack_lite.dgeqrf - - m = 2**32 + 1 - n = 2**32 + 1 - lda = m - - # Dummy arrays, not referenced by the lapack routine, so don't - # need to be of the right size - a = np.zeros([1, 1], dtype=dtype) - work = np.zeros([1], dtype=dtype) - tau = np.zeros([1], dtype=dtype) - - # Size query - results = lapack_routine(m, n, a, lda, tau, work, -1, 0) - assert_equal(results['info'], 0) - assert_equal(results['m'], m) - assert_equal(results['n'], m) - - # Should result to an integer of a reasonable size - lwork = int(work.item()) - assert_(2**32 < lwork < 2**42) diff --git a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_regression.py b/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_regression.py deleted file mode 100644 index bd3a458..0000000 --- a/venv/lib/python3.7/site-packages/numpy/linalg/tests/test_regression.py +++ /dev/null @@ -1,150 +0,0 @@ -""" Test functions for linalg module -""" -from __future__ import division, absolute_import, print_function - -import warnings - -import numpy as np -from numpy import linalg, arange, float64, array, dot, transpose -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal, - assert_array_almost_equal, assert_array_less -) - - -class TestRegression(object): - - def test_eig_build(self): - # Ticket #652 - rva = array([1.03221168e+02 + 0.j, - -1.91843603e+01 + 0.j, - -6.04004526e-01 + 15.84422474j, - -6.04004526e-01 - 15.84422474j, - -1.13692929e+01 + 0.j, - -6.57612485e-01 + 10.41755503j, - -6.57612485e-01 - 10.41755503j, - 1.82126812e+01 + 0.j, - 1.06011014e+01 + 0.j, - 7.80732773e+00 + 0.j, - -7.65390898e-01 + 0.j, - 1.51971555e-15 + 0.j, - -1.51308713e-15 + 0.j]) - a = arange(13 * 13, dtype=float64) - a.shape = (13, 13) - a = a % 17 - va, ve = linalg.eig(a) - va.sort() - rva.sort() - assert_array_almost_equal(va, rva) - - def test_eigh_build(self): - # Ticket 662. - rvals = [68.60568999, 89.57756725, 106.67185574] - - cov = array([[77.70273908, 3.51489954, 15.64602427], - [3.51489954, 88.97013878, -1.07431931], - [15.64602427, -1.07431931, 98.18223512]]) - - vals, vecs = linalg.eigh(cov) - assert_array_almost_equal(vals, rvals) - - def test_svd_build(self): - # Ticket 627. - a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) - m, n = a.shape - u, s, vh = linalg.svd(a) - - b = dot(transpose(u[:, n:]), a) - - assert_array_almost_equal(b, np.zeros((2, 2))) - - def test_norm_vector_badarg(self): - # Regression for #786: Froebenius norm for vectors raises - # TypeError. - assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') - - def test_lapack_endian(self): - # For bug #1482 - a = array([[5.7998084, -2.1825367], - [-2.1825367, 9.85910595]], dtype='>f8') - b = array(a, dtype=' 0.5) - assert_equal(c, 1) - assert_equal(np.linalg.matrix_rank(a), 1) - assert_array_less(1, np.linalg.norm(a, ord=2)) - - def test_norm_object_array(self): - # gh-7575 - testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) - - norm = linalg.norm(testvector) - assert_array_equal(norm, [0, 1]) - assert_(norm.dtype == np.dtype('float64')) - - norm = linalg.norm(testvector, ord=1) - assert_array_equal(norm, [0, 1]) - assert_(norm.dtype != np.dtype('float64')) - - norm = linalg.norm(testvector, ord=2) - assert_array_equal(norm, [0, 1]) - assert_(norm.dtype == np.dtype('float64')) - - assert_raises(ValueError, linalg.norm, testvector, ord='fro') - assert_raises(ValueError, linalg.norm, testvector, ord='nuc') - assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) - assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - assert_raises((AttributeError, DeprecationWarning), - linalg.norm, testvector, ord=0) - assert_raises(ValueError, linalg.norm, testvector, ord=-1) - assert_raises(ValueError, linalg.norm, testvector, ord=-2) - - testmatrix = np.array([[np.array([0, 1]), 0, 0], - [0, 0, 0]], dtype=object) - - norm = linalg.norm(testmatrix) - assert_array_equal(norm, [0, 1]) - assert_(norm.dtype == np.dtype('float64')) - - norm = linalg.norm(testmatrix, ord='fro') - assert_array_equal(norm, [0, 1]) - assert_(norm.dtype == np.dtype('float64')) - - assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') - assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) - assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) - assert_raises(ValueError, linalg.norm, testmatrix, ord=0) - assert_raises(ValueError, linalg.norm, testmatrix, ord=1) - assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) - assert_raises(TypeError, linalg.norm, testmatrix, ord=2) - assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) - assert_raises(ValueError, linalg.norm, testmatrix, ord=3) - - def test_lstsq_complex_larger_rhs(self): - # gh-9891 - size = 20 - n_rhs = 70 - G = np.random.randn(size, size) + 1j * np.random.randn(size, size) - u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) - b = G.dot(u) - # This should work without segmentation fault. - u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) - # check results just in case - assert_array_almost_equal(u_lstsq, u) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/__init__.py b/venv/lib/python3.7/site-packages/numpy/ma/__init__.py deleted file mode 100644 index 36ceb1f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -============= -Masked Arrays -============= - -Arrays sometimes contain invalid or missing data. When doing operations -on such arrays, we wish to suppress invalid values, which is the purpose masked -arrays fulfill (an example of typical use is given below). - -For example, examine the following array: - ->>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) - -When we try to calculate the mean of the data, the result is undetermined: - ->>> np.mean(x) -nan - -The mean is calculated using roughly ``np.sum(x)/len(x)``, but since -any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter -masked arrays: - ->>> m = np.ma.masked_array(x, np.isnan(x)) ->>> m -masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], - mask = [False False False True False False False True], - fill_value=1e+20) - -Here, we construct a masked array that suppress all ``NaN`` values. We -may now proceed to calculate the mean of the other values: - ->>> np.mean(m) -2.6666666666666665 - -.. [1] Not-a-Number, a floating point value that is the result of an - invalid operation. - -.. moduleauthor:: Pierre Gerard-Marchant -.. moduleauthor:: Jarrod Millman - -""" -from __future__ import division, absolute_import, print_function - -from . import core -from .core import * - -from . import extras -from .extras import * - -__all__ = ['core', 'extras'] -__all__ += core.__all__ -__all__ += extras.__all__ - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/ma/bench.py b/venv/lib/python3.7/site-packages/numpy/ma/bench.py deleted file mode 100644 index a9ba42d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/bench.py +++ /dev/null @@ -1,133 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from __future__ import division, print_function - -import timeit -import numpy - - -############################################################################### -# Global variables # -############################################################################### - - -# Small arrays -xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3) -ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3) -zs = xs + 1j * ys -m1 = [[True, False, False], [False, False, True]] -m2 = [[True, False, True], [False, False, True]] -nmxs = numpy.ma.array(xs, mask=m1) -nmys = numpy.ma.array(ys, mask=m2) -nmzs = numpy.ma.array(zs, mask=m1) - -# Big arrays -xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) -yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) -zl = xl + 1j * yl -maskx = xl > 0.8 -masky = yl < -0.8 -nmxl = numpy.ma.array(xl, mask=maskx) -nmyl = numpy.ma.array(yl, mask=masky) -nmzl = numpy.ma.array(zl, mask=maskx) - - -############################################################################### -# Functions # -############################################################################### - - -def timer(s, v='', nloop=500, nrep=3): - units = ["s", "ms", "µs", "ns"] - scaling = [1, 1e3, 1e6, 1e9] - print("%s : %-50s : " % (v, s), end=' ') - varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] - setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) - Timer = timeit.Timer(stmt=s, setup=setup) - best = min(Timer.repeat(nrep, nloop)) / nloop - if best > 0.0: - order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) - else: - order = 3 - print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, - 3, - best * scaling[order], - units[order])) - - -def compare_functions_1v(func, nloop=500, - xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): - funcname = func.__name__ - print("-"*50) - print("%s on small arrays" % funcname) - module, data = "numpy.ma", "nmxs" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - - print("%s on large arrays" % funcname) - module, data = "numpy.ma", "nmxl" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - return - -def compare_methods(methodname, args, vars='x', nloop=500, test=True, - xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): - print("-"*50) - print("%s on small arrays" % methodname) - data, ver = "nm%ss" % vars, 'numpy.ma' - timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) - - print("%s on large arrays" % methodname) - data, ver = "nm%sl" % vars, 'numpy.ma' - timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) - return - -def compare_functions_2v(func, nloop=500, test=True, - xs=xs, nmxs=nmxs, - ys=ys, nmys=nmys, - xl=xl, nmxl=nmxl, - yl=yl, nmyl=nmyl): - funcname = func.__name__ - print("-"*50) - print("%s on small arrays" % funcname) - module, data = "numpy.ma", "nmxs,nmys" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - - print("%s on large arrays" % funcname) - module, data = "numpy.ma", "nmxl,nmyl" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - return - - -if __name__ == '__main__': - compare_functions_1v(numpy.sin) - compare_functions_1v(numpy.log) - compare_functions_1v(numpy.sqrt) - - compare_functions_2v(numpy.multiply) - compare_functions_2v(numpy.divide) - compare_functions_2v(numpy.power) - - compare_methods('ravel', '', nloop=1000) - compare_methods('conjugate', '', 'z', nloop=1000) - compare_methods('transpose', '', nloop=1000) - compare_methods('compressed', '', nloop=1000) - compare_methods('__getitem__', '0', nloop=1000) - compare_methods('__getitem__', '(0,0)', nloop=1000) - compare_methods('__getitem__', '[0,-1]', nloop=1000) - compare_methods('__setitem__', '0, 17', nloop=1000, test=False) - compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False) - - print("-"*50) - print("__setitem__ on small arrays") - timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) - - print("-"*50) - print("__setitem__ on large arrays") - timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) - - print("-"*50) - print("where on small arrays") - timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000) - print("-"*50) - print("where on large arrays") - timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/core.py b/venv/lib/python3.7/site-packages/numpy/ma/core.py deleted file mode 100644 index 2baf547..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/core.py +++ /dev/null @@ -1,8084 +0,0 @@ -""" -numpy.ma : a package to handle missing or invalid values. - -This package was initially written for numarray by Paul F. Dubois -at Lawrence Livermore National Laboratory. -In 2006, the package was completely rewritten by Pierre Gerard-Marchant -(University of Georgia) to make the MaskedArray class a subclass of ndarray, -and to improve support of structured arrays. - - -Copyright 1999, 2000, 2001 Regents of the University of California. -Released for unlimited redistribution. - -* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. -* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant - (pgmdevlist_AT_gmail_DOT_com) -* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) - -.. moduleauthor:: Pierre Gerard-Marchant - -""" -# pylint: disable-msg=E1002 -from __future__ import division, absolute_import, print_function - -import sys -import operator -import warnings -import textwrap -import re -from functools import reduce - -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins - -import numpy as np -import numpy.core.umath as umath -import numpy.core.numerictypes as ntypes -from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue -from numpy import array as narray -from numpy.lib.function_base import angle -from numpy.compat import ( - getargspec, formatargspec, long, basestring, unicode, bytes - ) -from numpy import expand_dims -from numpy.core.numeric import normalize_axis_tuple -from numpy.core._internal import recursive -from numpy.compat import pickle - - -__all__ = [ - 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', - 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', - 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', - 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', - 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', - 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', - 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', - 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', - 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', - 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', - 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', - 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', - 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', - 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', - 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', - 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', - 'less', 'less_equal', 'log', 'log10', 'log2', - 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', - 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', - 'masked_array', 'masked_equal', 'masked_greater', - 'masked_greater_equal', 'masked_inside', 'masked_invalid', - 'masked_less', 'masked_less_equal', 'masked_not_equal', - 'masked_object', 'masked_outside', 'masked_print_option', - 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', - 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', - 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', - 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod', - 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', - 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', - 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', - 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', - 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', - 'var', 'where', 'zeros', - ] - -MaskType = np.bool_ -nomask = MaskType(0) - -class MaskedArrayFutureWarning(FutureWarning): - pass - -def _deprecate_argsort_axis(arr): - """ - Adjust the axis passed to argsort, warning if necessary - - Parameters - ---------- - arr - The array which argsort was called on - - np.ma.argsort has a long-term bug where the default of the axis argument - is wrong (gh-8701), which now must be kept for backwards compatibiity. - Thankfully, this only makes a difference when arrays are 2- or more- - dimensional, so we only need a warning then. - """ - if arr.ndim <= 1: - # no warning needed - but switch to -1 anyway, to avoid surprising - # subclasses, which are more likely to implement scalar axes. - return -1 - else: - # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default - warnings.warn( - "In the future the default for argsort will be axis=-1, not the " - "current None, to match its documentation and np.argsort. " - "Explicitly pass -1 or None to silence this warning.", - MaskedArrayFutureWarning, stacklevel=3) - return None - - -def doc_note(initialdoc, note): - """ - Adds a Notes section to an existing docstring. - - """ - if initialdoc is None: - return - if note is None: - return initialdoc - - notesplit = re.split(r'\n\s*?Notes\n\s*?-----', initialdoc) - - notedoc = """\ -Notes - ----- - %s""" % note - - if len(notesplit) > 1: - notedoc = '\n\n ' + notedoc + '\n' - - return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) - - -def get_object_signature(obj): - """ - Get the signature from obj - - """ - try: - sig = formatargspec(*getargspec(obj)) - except TypeError: - sig = '' - return sig - - -############################################################################### -# Exceptions # -############################################################################### - - -class MAError(Exception): - """ - Class for masked array related errors. - - """ - pass - - -class MaskError(MAError): - """ - Class for mask related errors. - - """ - pass - - -############################################################################### -# Filling options # -############################################################################### - - -# b: boolean - c: complex - f: floats - i: integer - O: object - S: string -default_filler = {'b': True, - 'c': 1.e20 + 0.0j, - 'f': 1.e20, - 'i': 999999, - 'O': '?', - 'S': b'N/A', - 'u': 999999, - 'V': b'???', - 'U': u'N/A' - } - -# Add datetime64 and timedelta64 types -for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", - "fs", "as"]: - default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) - default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) - -max_filler = ntypes._minvals -max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) -min_filler = ntypes._maxvals -min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) -if 'float128' in ntypes.typeDict: - max_filler.update([(np.float128, -np.inf)]) - min_filler.update([(np.float128, +np.inf)]) - - -def _recursive_fill_value(dtype, f): - """ - Recursively produce a fill value for `dtype`, calling f on scalar dtypes - """ - if dtype.names is not None: - vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names) - return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d - elif dtype.subdtype: - subtype, shape = dtype.subdtype - subval = _recursive_fill_value(subtype, f) - return np.full(shape, subval) - else: - return f(dtype) - - -def _get_dtype_of(obj): - """ Convert the argument for *_fill_value into a dtype """ - if isinstance(obj, np.dtype): - return obj - elif hasattr(obj, 'dtype'): - return obj.dtype - else: - return np.asanyarray(obj).dtype - - -def default_fill_value(obj): - """ - Return the default fill value for the argument object. - - The default filling value depends on the datatype of the input - array or the type of the input scalar: - - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== - - For structured types, a structured scalar is returned, with each field the - default fill value for its type. - - For subarray types, the fill value is an array of the same size containing - the default scalar fill value. - - Parameters - ---------- - obj : ndarray, dtype or scalar - The array data-type or scalar for which the default fill value - is returned. - - Returns - ------- - fill_value : scalar - The default fill value. - - Examples - -------- - >>> np.ma.default_fill_value(1) - 999999 - >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) - 1e+20 - >>> np.ma.default_fill_value(np.dtype(complex)) - (1e+20+0j) - - """ - def _scalar_fill_value(dtype): - if dtype.kind in 'Mm': - return default_filler.get(dtype.str[1:], '?') - else: - return default_filler.get(dtype.kind, '?') - - dtype = _get_dtype_of(obj) - return _recursive_fill_value(dtype, _scalar_fill_value) - - -def _extremum_fill_value(obj, extremum, extremum_name): - - def _scalar_fill_value(dtype): - try: - return extremum[dtype] - except KeyError: - raise TypeError( - "Unsuitable type {} for calculating {}." - .format(dtype, extremum_name) - ) - - dtype = _get_dtype_of(obj) - return _recursive_fill_value(dtype, _scalar_fill_value) - - -def minimum_fill_value(obj): - """ - Return the maximum value that can be represented by the dtype of an object. - - This function is useful for calculating a fill value suitable for - taking the minimum of an array with a given dtype. - - Parameters - ---------- - obj : ndarray, dtype or scalar - An object that can be queried for it's numeric type. - - Returns - ------- - val : scalar - The maximum representable value. - - Raises - ------ - TypeError - If `obj` isn't a suitable numeric type. - - See Also - -------- - maximum_fill_value : The inverse function. - set_fill_value : Set the filling value of a masked array. - MaskedArray.fill_value : Return current fill value. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.int8() - >>> ma.minimum_fill_value(a) - 127 - >>> a = np.int32() - >>> ma.minimum_fill_value(a) - 2147483647 - - An array of numeric data can also be passed. - - >>> a = np.array([1, 2, 3], dtype=np.int8) - >>> ma.minimum_fill_value(a) - 127 - >>> a = np.array([1, 2, 3], dtype=np.float32) - >>> ma.minimum_fill_value(a) - inf - - """ - return _extremum_fill_value(obj, min_filler, "minimum") - - -def maximum_fill_value(obj): - """ - Return the minimum value that can be represented by the dtype of an object. - - This function is useful for calculating a fill value suitable for - taking the maximum of an array with a given dtype. - - Parameters - ---------- - obj : ndarray, dtype or scalar - An object that can be queried for it's numeric type. - - Returns - ------- - val : scalar - The minimum representable value. - - Raises - ------ - TypeError - If `obj` isn't a suitable numeric type. - - See Also - -------- - minimum_fill_value : The inverse function. - set_fill_value : Set the filling value of a masked array. - MaskedArray.fill_value : Return current fill value. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.int8() - >>> ma.maximum_fill_value(a) - -128 - >>> a = np.int32() - >>> ma.maximum_fill_value(a) - -2147483648 - - An array of numeric data can also be passed. - - >>> a = np.array([1, 2, 3], dtype=np.int8) - >>> ma.maximum_fill_value(a) - -128 - >>> a = np.array([1, 2, 3], dtype=np.float32) - >>> ma.maximum_fill_value(a) - -inf - - """ - return _extremum_fill_value(obj, max_filler, "maximum") - - -def _recursive_set_fill_value(fillvalue, dt): - """ - Create a fill value for a structured dtype. - - Parameters - ---------- - fillvalue: scalar or array_like - Scalar or array representing the fill value. If it is of shorter - length than the number of fields in dt, it will be resized. - dt: dtype - The structured dtype for which to create the fill value. - - Returns - ------- - val: tuple - A tuple of values corresponding to the structured fill value. - - """ - fillvalue = np.resize(fillvalue, len(dt.names)) - output_value = [] - for (fval, name) in zip(fillvalue, dt.names): - cdtype = dt[name] - if cdtype.subdtype: - cdtype = cdtype.subdtype[0] - - if cdtype.names is not None: - output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) - else: - output_value.append(np.array(fval, dtype=cdtype).item()) - return tuple(output_value) - - -def _check_fill_value(fill_value, ndtype): - """ - Private function validating the given `fill_value` for the given dtype. - - If fill_value is None, it is set to the default corresponding to the dtype. - - If fill_value is not None, its value is forced to the given dtype. - - The result is always a 0d array. - - """ - ndtype = np.dtype(ndtype) - if fill_value is None: - fill_value = default_fill_value(ndtype) - elif ndtype.names is not None: - if isinstance(fill_value, (ndarray, np.void)): - try: - fill_value = np.array(fill_value, copy=False, dtype=ndtype) - except ValueError: - err_msg = "Unable to transform %s to dtype %s" - raise ValueError(err_msg % (fill_value, ndtype)) - else: - fill_value = np.asarray(fill_value, dtype=object) - fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), - dtype=ndtype) - else: - if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'): - # Note this check doesn't work if fill_value is not a scalar - err_msg = "Cannot set fill value of string with array of dtype %s" - raise TypeError(err_msg % ndtype) - else: - # In case we want to convert 1e20 to int. - # Also in case of converting string arrays. - try: - fill_value = np.array(fill_value, copy=False, dtype=ndtype) - except (OverflowError, ValueError): - # Raise TypeError instead of OverflowError or ValueError. - # OverflowError is seldom used, and the real problem here is - # that the passed fill_value is not compatible with the ndtype. - err_msg = "Cannot convert fill_value %s to dtype %s" - raise TypeError(err_msg % (fill_value, ndtype)) - return np.array(fill_value) - - -def set_fill_value(a, fill_value): - """ - Set the filling value of a, if a is a masked array. - - This function changes the fill value of the masked array `a` in place. - If `a` is not a masked array, the function returns silently, without - doing anything. - - Parameters - ---------- - a : array_like - Input array. - fill_value : dtype - Filling value. A consistency test is performed to make sure - the value is compatible with the dtype of `a`. - - Returns - ------- - None - Nothing returned by this function. - - See Also - -------- - maximum_fill_value : Return the default fill value for a dtype. - MaskedArray.fill_value : Return current fill value. - MaskedArray.set_fill_value : Equivalent method. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(5) - >>> a - array([0, 1, 2, 3, 4]) - >>> a = ma.masked_where(a < 3, a) - >>> a - masked_array(data=[--, --, --, 3, 4], - mask=[ True, True, True, False, False], - fill_value=999999) - >>> ma.set_fill_value(a, -999) - >>> a - masked_array(data=[--, --, --, 3, 4], - mask=[ True, True, True, False, False], - fill_value=-999) - - Nothing happens if `a` is not a masked array. - - >>> a = list(range(5)) - >>> a - [0, 1, 2, 3, 4] - >>> ma.set_fill_value(a, 100) - >>> a - [0, 1, 2, 3, 4] - >>> a = np.arange(5) - >>> a - array([0, 1, 2, 3, 4]) - >>> ma.set_fill_value(a, 100) - >>> a - array([0, 1, 2, 3, 4]) - - """ - if isinstance(a, MaskedArray): - a.set_fill_value(fill_value) - return - - -def get_fill_value(a): - """ - Return the filling value of a, if any. Otherwise, returns the - default filling value for that type. - - """ - if isinstance(a, MaskedArray): - result = a.fill_value - else: - result = default_fill_value(a) - return result - - -def common_fill_value(a, b): - """ - Return the common filling value of two masked arrays, if any. - - If ``a.fill_value == b.fill_value``, return the fill value, - otherwise return None. - - Parameters - ---------- - a, b : MaskedArray - The masked arrays for which to compare fill values. - - Returns - ------- - fill_value : scalar or None - The common fill value, or None. - - Examples - -------- - >>> x = np.ma.array([0, 1.], fill_value=3) - >>> y = np.ma.array([0, 1.], fill_value=3) - >>> np.ma.common_fill_value(x, y) - 3.0 - - """ - t1 = get_fill_value(a) - t2 = get_fill_value(b) - if t1 == t2: - return t1 - return None - - -def filled(a, fill_value=None): - """ - Return input as an array with masked data replaced by a fill value. - - If `a` is not a `MaskedArray`, `a` itself is returned. - If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to - ``a.fill_value``. - - Parameters - ---------- - a : MaskedArray or array_like - An input object. - fill_value : array_like, optional. - Can be scalar or non-scalar. If non-scalar, the - resulting filled array should be broadcastable - over input array. Default is None. - - Returns - ------- - a : ndarray - The filled array. - - See Also - -------- - compressed - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], - ... [1, 0, 0], - ... [0, 0, 0]]) - >>> x.filled() - array([[999999, 1, 2], - [999999, 4, 5], - [ 6, 7, 8]]) - >>> x.filled(fill_value=333) - array([[333, 1, 2], - [333, 4, 5], - [ 6, 7, 8]]) - >>> x.filled(fill_value=np.arange(3)) - array([[0, 1, 2], - [0, 4, 5], - [6, 7, 8]]) - - """ - if hasattr(a, 'filled'): - return a.filled(fill_value) - - elif isinstance(a, ndarray): - # Should we check for contiguity ? and a.flags['CONTIGUOUS']: - return a - elif isinstance(a, dict): - return np.array(a, 'O') - else: - return np.array(a) - - -def get_masked_subclass(*arrays): - """ - Return the youngest subclass of MaskedArray from a list of (masked) arrays. - - In case of siblings, the first listed takes over. - - """ - if len(arrays) == 1: - arr = arrays[0] - if isinstance(arr, MaskedArray): - rcls = type(arr) - else: - rcls = MaskedArray - else: - arrcls = [type(a) for a in arrays] - rcls = arrcls[0] - if not issubclass(rcls, MaskedArray): - rcls = MaskedArray - for cls in arrcls[1:]: - if issubclass(cls, rcls): - rcls = cls - # Don't return MaskedConstant as result: revert to MaskedArray - if rcls.__name__ == 'MaskedConstant': - return MaskedArray - return rcls - - -def getdata(a, subok=True): - """ - Return the data of a masked array as an ndarray. - - Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, - else return `a` as a ndarray or subclass (depending on `subok`) if not. - - Parameters - ---------- - a : array_like - Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. - subok : bool - Whether to force the output to be a `pure` ndarray (False) or to - return a subclass of ndarray if appropriate (True, default). - - See Also - -------- - getmask : Return the mask of a masked array, or nomask. - getmaskarray : Return the mask of a masked array, or full array of False. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array( - data=[[1, --], - [3, 4]], - mask=[[False, True], - [False, False]], - fill_value=2) - >>> ma.getdata(a) - array([[1, 2], - [3, 4]]) - - Equivalently use the ``MaskedArray`` `data` attribute. - - >>> a.data - array([[1, 2], - [3, 4]]) - - """ - try: - data = a._data - except AttributeError: - data = np.array(a, copy=False, subok=subok) - if not subok: - return data.view(ndarray) - return data - - -get_data = getdata - - -def fix_invalid(a, mask=nomask, copy=True, fill_value=None): - """ - Return input with invalid data masked and replaced by a fill value. - - Invalid data means values of `nan`, `inf`, etc. - - Parameters - ---------- - a : array_like - Input array, a (subclass of) ndarray. - mask : sequence, optional - Mask. Must be convertible to an array of booleans with the same - shape as `data`. True indicates a masked (i.e. invalid) data. - copy : bool, optional - Whether to use a copy of `a` (True) or to fix `a` in place (False). - Default is True. - fill_value : scalar, optional - Value used for fixing invalid data. Default is None, in which case - the ``a.fill_value`` is used. - - Returns - ------- - b : MaskedArray - The input array with invalid entries fixed. - - Notes - ----- - A copy is performed by default. - - Examples - -------- - >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) - >>> x - masked_array(data=[--, -1.0, nan, inf], - mask=[ True, False, False, False], - fill_value=1e+20) - >>> np.ma.fix_invalid(x) - masked_array(data=[--, -1.0, --, --], - mask=[ True, False, True, True], - fill_value=1e+20) - - >>> fixed = np.ma.fix_invalid(x) - >>> fixed.data - array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) - >>> x.data - array([ 1., -1., nan, inf]) - - """ - a = masked_array(a, copy=copy, mask=mask, subok=True) - invalid = np.logical_not(np.isfinite(a._data)) - if not invalid.any(): - return a - a._mask |= invalid - if fill_value is None: - fill_value = a.fill_value - a._data[invalid] = fill_value - return a - -def is_string_or_list_of_strings(val): - return (isinstance(val, basestring) or - (isinstance(val, list) and val and - builtins.all(isinstance(s, basestring) for s in val))) - -############################################################################### -# Ufuncs # -############################################################################### - - -ufunc_domain = {} -ufunc_fills = {} - - -class _DomainCheckInterval(object): - """ - Define a valid interval, so that : - - ``domain_check_interval(a,b)(x) == True`` where - ``x < a`` or ``x > b``. - - """ - - def __init__(self, a, b): - "domain_check_interval(a,b)(x) = true where x < a or y > b" - if a > b: - (a, b) = (b, a) - self.a = a - self.b = b - - def __call__(self, x): - "Execute the call behavior." - # nans at masked positions cause RuntimeWarnings, even though - # they are masked. To avoid this we suppress warnings. - with np.errstate(invalid='ignore'): - return umath.logical_or(umath.greater(x, self.b), - umath.less(x, self.a)) - - -class _DomainTan(object): - """ - Define a valid interval for the `tan` function, so that: - - ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` - - """ - - def __init__(self, eps): - "domain_tan(eps) = true where abs(cos(x)) < eps)" - self.eps = eps - - def __call__(self, x): - "Executes the call behavior." - with np.errstate(invalid='ignore'): - return umath.less(umath.absolute(umath.cos(x)), self.eps) - - -class _DomainSafeDivide(object): - """ - Define a domain for safe division. - - """ - - def __init__(self, tolerance=None): - self.tolerance = tolerance - - def __call__(self, a, b): - # Delay the selection of the tolerance to here in order to reduce numpy - # import times. The calculation of these parameters is a substantial - # component of numpy's import time. - if self.tolerance is None: - self.tolerance = np.finfo(float).tiny - # don't call ma ufuncs from __array_wrap__ which would fail for scalars - a, b = np.asarray(a), np.asarray(b) - with np.errstate(invalid='ignore'): - return umath.absolute(a) * self.tolerance >= umath.absolute(b) - - -class _DomainGreater(object): - """ - DomainGreater(v)(x) is True where x <= v. - - """ - - def __init__(self, critical_value): - "DomainGreater(v)(x) = true where x <= v" - self.critical_value = critical_value - - def __call__(self, x): - "Executes the call behavior." - with np.errstate(invalid='ignore'): - return umath.less_equal(x, self.critical_value) - - -class _DomainGreaterEqual(object): - """ - DomainGreaterEqual(v)(x) is True where x < v. - - """ - - def __init__(self, critical_value): - "DomainGreaterEqual(v)(x) = true where x < v" - self.critical_value = critical_value - - def __call__(self, x): - "Executes the call behavior." - with np.errstate(invalid='ignore'): - return umath.less(x, self.critical_value) - - -class _MaskedUFunc(object): - def __init__(self, ufunc): - self.f = ufunc - self.__doc__ = ufunc.__doc__ - self.__name__ = ufunc.__name__ - - def __str__(self): - return "Masked version of {}".format(self.f) - - -class _MaskedUnaryOperation(_MaskedUFunc): - """ - Defines masked version of unary operations, where invalid values are - pre-masked. - - Parameters - ---------- - mufunc : callable - The function for which to define a masked version. Made available - as ``_MaskedUnaryOperation.f``. - fill : scalar, optional - Filling value, default is 0. - domain : class instance - Domain for the function. Should be one of the ``_Domain*`` - classes. Default is None. - - """ - - def __init__(self, mufunc, fill=0, domain=None): - super(_MaskedUnaryOperation, self).__init__(mufunc) - self.fill = fill - self.domain = domain - ufunc_domain[mufunc] = domain - ufunc_fills[mufunc] = fill - - def __call__(self, a, *args, **kwargs): - """ - Execute the call behavior. - - """ - d = getdata(a) - # Deal with domain - if self.domain is not None: - # Case 1.1. : Domained function - # nans at masked positions cause RuntimeWarnings, even though - # they are masked. To avoid this we suppress warnings. - with np.errstate(divide='ignore', invalid='ignore'): - result = self.f(d, *args, **kwargs) - # Make a mask - m = ~umath.isfinite(result) - m |= self.domain(d) - m |= getmask(a) - else: - # Case 1.2. : Function without a domain - # Get the result and the mask - with np.errstate(divide='ignore', invalid='ignore'): - result = self.f(d, *args, **kwargs) - m = getmask(a) - - if not result.ndim: - # Case 2.1. : The result is scalarscalar - if m: - return masked - return result - - if m is not nomask: - # Case 2.2. The result is an array - # We need to fill the invalid data back w/ the input Now, - # that's plain silly: in C, we would just skip the element and - # keep the original, but we do have to do it that way in Python - - # In case result has a lower dtype than the inputs (as in - # equal) - try: - np.copyto(result, d, where=m) - except TypeError: - pass - # Transform to - masked_result = result.view(get_masked_subclass(a)) - masked_result._mask = m - masked_result._update_from(a) - return masked_result - - -class _MaskedBinaryOperation(_MaskedUFunc): - """ - Define masked version of binary operations, where invalid - values are pre-masked. - - Parameters - ---------- - mbfunc : function - The function for which to define a masked version. Made available - as ``_MaskedBinaryOperation.f``. - domain : class instance - Default domain for the function. Should be one of the ``_Domain*`` - classes. Default is None. - fillx : scalar, optional - Filling value for the first argument, default is 0. - filly : scalar, optional - Filling value for the second argument, default is 0. - - """ - - def __init__(self, mbfunc, fillx=0, filly=0): - """ - abfunc(fillx, filly) must be defined. - - abfunc(x, filly) = x for all x to enable reduce. - - """ - super(_MaskedBinaryOperation, self).__init__(mbfunc) - self.fillx = fillx - self.filly = filly - ufunc_domain[mbfunc] = None - ufunc_fills[mbfunc] = (fillx, filly) - - def __call__(self, a, b, *args, **kwargs): - """ - Execute the call behavior. - - """ - # Get the data, as ndarray - (da, db) = (getdata(a), getdata(b)) - # Get the result - with np.errstate(): - np.seterr(divide='ignore', invalid='ignore') - result = self.f(da, db, *args, **kwargs) - # Get the mask for the result - (ma, mb) = (getmask(a), getmask(b)) - if ma is nomask: - if mb is nomask: - m = nomask - else: - m = umath.logical_or(getmaskarray(a), mb) - elif mb is nomask: - m = umath.logical_or(ma, getmaskarray(b)) - else: - m = umath.logical_or(ma, mb) - - # Case 1. : scalar - if not result.ndim: - if m: - return masked - return result - - # Case 2. : array - # Revert result to da where masked - if m is not nomask and m.any(): - # any errors, just abort; impossible to guarantee masked values - try: - np.copyto(result, da, casting='unsafe', where=m) - except Exception: - pass - - # Transforms to a (subclass of) MaskedArray - masked_result = result.view(get_masked_subclass(a, b)) - masked_result._mask = m - if isinstance(a, MaskedArray): - masked_result._update_from(a) - elif isinstance(b, MaskedArray): - masked_result._update_from(b) - return masked_result - - def reduce(self, target, axis=0, dtype=None): - """ - Reduce `target` along the given `axis`. - - """ - tclass = get_masked_subclass(target) - m = getmask(target) - t = filled(target, self.filly) - if t.shape == (): - t = t.reshape(1) - if m is not nomask: - m = make_mask(m, copy=True) - m.shape = (1,) - - if m is nomask: - tr = self.f.reduce(t, axis) - mr = nomask - else: - tr = self.f.reduce(t, axis, dtype=dtype or t.dtype) - mr = umath.logical_and.reduce(m, axis) - - if not tr.shape: - if mr: - return masked - else: - return tr - masked_tr = tr.view(tclass) - masked_tr._mask = mr - return masked_tr - - def outer(self, a, b): - """ - Return the function applied to the outer product of a and b. - - """ - (da, db) = (getdata(a), getdata(b)) - d = self.f.outer(da, db) - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = umath.logical_or.outer(ma, mb) - if (not m.ndim) and m: - return masked - if m is not nomask: - np.copyto(d, da, where=m) - if not d.shape: - return d - masked_d = d.view(get_masked_subclass(a, b)) - masked_d._mask = m - return masked_d - - def accumulate(self, target, axis=0): - """Accumulate `target` along `axis` after filling with y fill - value. - - """ - tclass = get_masked_subclass(target) - t = filled(target, self.filly) - result = self.f.accumulate(t, axis) - masked_result = result.view(tclass) - return masked_result - - - -class _DomainedBinaryOperation(_MaskedUFunc): - """ - Define binary operations that have a domain, like divide. - - They have no reduce, outer or accumulate. - - Parameters - ---------- - mbfunc : function - The function for which to define a masked version. Made available - as ``_DomainedBinaryOperation.f``. - domain : class instance - Default domain for the function. Should be one of the ``_Domain*`` - classes. - fillx : scalar, optional - Filling value for the first argument, default is 0. - filly : scalar, optional - Filling value for the second argument, default is 0. - - """ - - def __init__(self, dbfunc, domain, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - super(_DomainedBinaryOperation, self).__init__(dbfunc) - self.domain = domain - self.fillx = fillx - self.filly = filly - ufunc_domain[dbfunc] = domain - ufunc_fills[dbfunc] = (fillx, filly) - - def __call__(self, a, b, *args, **kwargs): - "Execute the call behavior." - # Get the data - (da, db) = (getdata(a), getdata(b)) - # Get the result - with np.errstate(divide='ignore', invalid='ignore'): - result = self.f(da, db, *args, **kwargs) - # Get the mask as a combination of the source masks and invalid - m = ~umath.isfinite(result) - m |= getmask(a) - m |= getmask(b) - # Apply the domain - domain = ufunc_domain.get(self.f, None) - if domain is not None: - m |= domain(da, db) - # Take care of the scalar case first - if not m.ndim: - if m: - return masked - else: - return result - # When the mask is True, put back da if possible - # any errors, just abort; impossible to guarantee masked values - try: - np.copyto(result, 0, casting='unsafe', where=m) - # avoid using "*" since this may be overlaid - masked_da = umath.multiply(m, da) - # only add back if it can be cast safely - if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): - result += masked_da - except Exception: - pass - - # Transforms to a (subclass of) MaskedArray - masked_result = result.view(get_masked_subclass(a, b)) - masked_result._mask = m - if isinstance(a, MaskedArray): - masked_result._update_from(a) - elif isinstance(b, MaskedArray): - masked_result._update_from(b) - return masked_result - - -# Unary ufuncs -exp = _MaskedUnaryOperation(umath.exp) -conjugate = _MaskedUnaryOperation(umath.conjugate) -sin = _MaskedUnaryOperation(umath.sin) -cos = _MaskedUnaryOperation(umath.cos) -arctan = _MaskedUnaryOperation(umath.arctan) -arcsinh = _MaskedUnaryOperation(umath.arcsinh) -sinh = _MaskedUnaryOperation(umath.sinh) -cosh = _MaskedUnaryOperation(umath.cosh) -tanh = _MaskedUnaryOperation(umath.tanh) -abs = absolute = _MaskedUnaryOperation(umath.absolute) -angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base -fabs = _MaskedUnaryOperation(umath.fabs) -negative = _MaskedUnaryOperation(umath.negative) -floor = _MaskedUnaryOperation(umath.floor) -ceil = _MaskedUnaryOperation(umath.ceil) -around = _MaskedUnaryOperation(np.round_) -logical_not = _MaskedUnaryOperation(umath.logical_not) - -# Domained unary ufuncs -sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, - _DomainGreaterEqual(0.0)) -log = _MaskedUnaryOperation(umath.log, 1.0, - _DomainGreater(0.0)) -log2 = _MaskedUnaryOperation(umath.log2, 1.0, - _DomainGreater(0.0)) -log10 = _MaskedUnaryOperation(umath.log10, 1.0, - _DomainGreater(0.0)) -tan = _MaskedUnaryOperation(umath.tan, 0.0, - _DomainTan(1e-35)) -arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, - _DomainCheckInterval(-1.0, 1.0)) -arccos = _MaskedUnaryOperation(umath.arccos, 0.0, - _DomainCheckInterval(-1.0, 1.0)) -arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, - _DomainGreaterEqual(1.0)) -arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, - _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) - -# Binary ufuncs -add = _MaskedBinaryOperation(umath.add) -subtract = _MaskedBinaryOperation(umath.subtract) -multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) -arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) -equal = _MaskedBinaryOperation(umath.equal) -equal.reduce = None -not_equal = _MaskedBinaryOperation(umath.not_equal) -not_equal.reduce = None -less_equal = _MaskedBinaryOperation(umath.less_equal) -less_equal.reduce = None -greater_equal = _MaskedBinaryOperation(umath.greater_equal) -greater_equal.reduce = None -less = _MaskedBinaryOperation(umath.less) -less.reduce = None -greater = _MaskedBinaryOperation(umath.greater) -greater.reduce = None -logical_and = _MaskedBinaryOperation(umath.logical_and) -alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce -logical_or = _MaskedBinaryOperation(umath.logical_or) -sometrue = logical_or.reduce -logical_xor = _MaskedBinaryOperation(umath.logical_xor) -bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) -bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) -bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) -hypot = _MaskedBinaryOperation(umath.hypot) - -# Domained binary ufuncs -divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) -true_divide = _DomainedBinaryOperation(umath.true_divide, - _DomainSafeDivide(), 0, 1) -floor_divide = _DomainedBinaryOperation(umath.floor_divide, - _DomainSafeDivide(), 0, 1) -remainder = _DomainedBinaryOperation(umath.remainder, - _DomainSafeDivide(), 0, 1) -fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) -mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) - - -############################################################################### -# Mask creation functions # -############################################################################### - - -def _replace_dtype_fields_recursive(dtype, primitive_dtype): - "Private function allowing recursion in _replace_dtype_fields." - _recurse = _replace_dtype_fields_recursive - - # Do we have some name fields ? - if dtype.names is not None: - descr = [] - for name in dtype.names: - field = dtype.fields[name] - if len(field) == 3: - # Prepend the title to the name - name = (field[-1], name) - descr.append((name, _recurse(field[0], primitive_dtype))) - new_dtype = np.dtype(descr) - - # Is this some kind of composite a la (float,2) - elif dtype.subdtype: - descr = list(dtype.subdtype) - descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) - new_dtype = np.dtype(tuple(descr)) - - # this is a primitive type, so do a direct replacement - else: - new_dtype = primitive_dtype - - # preserve identity of dtypes - if new_dtype == dtype: - new_dtype = dtype - - return new_dtype - - -def _replace_dtype_fields(dtype, primitive_dtype): - """ - Construct a dtype description list from a given dtype. - - Returns a new dtype object, with all fields and subtypes in the given type - recursively replaced with `primitive_dtype`. - - Arguments are coerced to dtypes first. - """ - dtype = np.dtype(dtype) - primitive_dtype = np.dtype(primitive_dtype) - return _replace_dtype_fields_recursive(dtype, primitive_dtype) - - -def make_mask_descr(ndtype): - """ - Construct a dtype description list from a given dtype. - - Returns a new dtype object, with the type of all fields in `ndtype` to a - boolean type. Field names are not altered. - - Parameters - ---------- - ndtype : dtype - The dtype to convert. - - Returns - ------- - result : dtype - A dtype that looks like `ndtype`, the type of all fields is boolean. - - Examples - -------- - >>> import numpy.ma as ma - >>> dtype = np.dtype({'names':['foo', 'bar'], - ... 'formats':[np.float32, np.int64]}) - >>> dtype - dtype([('foo', '>> ma.make_mask_descr(dtype) - dtype([('foo', '|b1'), ('bar', '|b1')]) - >>> ma.make_mask_descr(np.float32) - dtype('bool') - - """ - return _replace_dtype_fields(ndtype, MaskType) - - -def getmask(a): - """ - Return the mask of a masked array, or nomask. - - Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the - mask is not `nomask`, else return `nomask`. To guarantee a full array - of booleans of the same shape as a, use `getmaskarray`. - - Parameters - ---------- - a : array_like - Input `MaskedArray` for which the mask is required. - - See Also - -------- - getdata : Return the data of a masked array as an ndarray. - getmaskarray : Return the mask of a masked array, or full array of False. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array( - data=[[1, --], - [3, 4]], - mask=[[False, True], - [False, False]], - fill_value=2) - >>> ma.getmask(a) - array([[False, True], - [False, False]]) - - Equivalently use the `MaskedArray` `mask` attribute. - - >>> a.mask - array([[False, True], - [False, False]]) - - Result when mask == `nomask` - - >>> b = ma.masked_array([[1,2],[3,4]]) - >>> b - masked_array( - data=[[1, 2], - [3, 4]], - mask=False, - fill_value=999999) - >>> ma.nomask - False - >>> ma.getmask(b) == ma.nomask - True - >>> b.mask == ma.nomask - True - - """ - return getattr(a, '_mask', nomask) - - -get_mask = getmask - - -def getmaskarray(arr): - """ - Return the mask of a masked array, or full boolean array of False. - - Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and - the mask is not `nomask`, else return a full boolean array of False of - the same shape as `arr`. - - Parameters - ---------- - arr : array_like - Input `MaskedArray` for which the mask is required. - - See Also - -------- - getmask : Return the mask of a masked array, or nomask. - getdata : Return the data of a masked array as an ndarray. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array( - data=[[1, --], - [3, 4]], - mask=[[False, True], - [False, False]], - fill_value=2) - >>> ma.getmaskarray(a) - array([[False, True], - [False, False]]) - - Result when mask == ``nomask`` - - >>> b = ma.masked_array([[1,2],[3,4]]) - >>> b - masked_array( - data=[[1, 2], - [3, 4]], - mask=False, - fill_value=999999) - >>> ma.getmaskarray(b) - array([[False, False], - [False, False]]) - - """ - mask = getmask(arr) - if mask is nomask: - mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) - return mask - - -def is_mask(m): - """ - Return True if m is a valid, standard mask. - - This function does not check the contents of the input, only that the - type is MaskType. In particular, this function returns False if the - mask has a flexible dtype. - - Parameters - ---------- - m : array_like - Array to test. - - Returns - ------- - result : bool - True if `m.dtype.type` is MaskType, False otherwise. - - See Also - -------- - isMaskedArray : Test whether input is an instance of MaskedArray. - - Examples - -------- - >>> import numpy.ma as ma - >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) - >>> m - masked_array(data=[--, 1, --, 2, 3], - mask=[ True, False, True, False, False], - fill_value=0) - >>> ma.is_mask(m) - False - >>> ma.is_mask(m.mask) - True - - Input must be an ndarray (or have similar attributes) - for it to be considered a valid mask. - - >>> m = [False, True, False] - >>> ma.is_mask(m) - False - >>> m = np.array([False, True, False]) - >>> m - array([False, True, False]) - >>> ma.is_mask(m) - True - - Arrays with complex dtypes don't return True. - - >>> dtype = np.dtype({'names':['monty', 'pithon'], - ... 'formats':[bool, bool]}) - >>> dtype - dtype([('monty', '|b1'), ('pithon', '|b1')]) - >>> m = np.array([(True, False), (False, True), (True, False)], - ... dtype=dtype) - >>> m - array([( True, False), (False, True), ( True, False)], - dtype=[('monty', '?'), ('pithon', '?')]) - >>> ma.is_mask(m) - False - - """ - try: - return m.dtype.type is MaskType - except AttributeError: - return False - - -def _shrink_mask(m): - """ - Shrink a mask to nomask if possible - """ - if m.dtype.names is None and not m.any(): - return nomask - else: - return m - - -def make_mask(m, copy=False, shrink=True, dtype=MaskType): - """ - Create a boolean mask from an array. - - Return `m` as a boolean mask, creating a copy if necessary or requested. - The function can accept any sequence that is convertible to integers, - or ``nomask``. Does not require that contents must be 0s and 1s, values - of 0 are interpreted as False, everything else as True. - - Parameters - ---------- - m : array_like - Potential mask. - copy : bool, optional - Whether to return a copy of `m` (True) or `m` itself (False). - shrink : bool, optional - Whether to shrink `m` to ``nomask`` if all its values are False. - dtype : dtype, optional - Data-type of the output mask. By default, the output mask has a - dtype of MaskType (bool). If the dtype is flexible, each field has - a boolean dtype. This is ignored when `m` is ``nomask``, in which - case ``nomask`` is always returned. - - Returns - ------- - result : ndarray - A boolean mask derived from `m`. - - Examples - -------- - >>> import numpy.ma as ma - >>> m = [True, False, True, True] - >>> ma.make_mask(m) - array([ True, False, True, True]) - >>> m = [1, 0, 1, 1] - >>> ma.make_mask(m) - array([ True, False, True, True]) - >>> m = [1, 0, 2, -3] - >>> ma.make_mask(m) - array([ True, False, True, True]) - - Effect of the `shrink` parameter. - - >>> m = np.zeros(4) - >>> m - array([0., 0., 0., 0.]) - >>> ma.make_mask(m) - False - >>> ma.make_mask(m, shrink=False) - array([False, False, False, False]) - - Using a flexible `dtype`. - - >>> m = [1, 0, 1, 1] - >>> n = [0, 1, 0, 0] - >>> arr = [] - >>> for man, mouse in zip(m, n): - ... arr.append((man, mouse)) - >>> arr - [(1, 0), (0, 1), (1, 0), (1, 0)] - >>> dtype = np.dtype({'names':['man', 'mouse'], - ... 'formats':[np.int64, np.int64]}) - >>> arr = np.array(arr, dtype=dtype) - >>> arr - array([(1, 0), (0, 1), (1, 0), (1, 0)], - dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) - array([(True, False), (False, True), (True, False), (True, False)], - dtype=[('man', '|b1'), ('mouse', '|b1')]) - - """ - if m is nomask: - return nomask - - # Make sure the input dtype is valid. - dtype = make_mask_descr(dtype) - - # legacy boolean special case: "existence of fields implies true" - if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_: - return np.ones(m.shape, dtype=dtype) - - # Fill the mask in case there are missing data; turn it into an ndarray. - result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) - # Bas les masques ! - if shrink: - result = _shrink_mask(result) - return result - - -def make_mask_none(newshape, dtype=None): - """ - Return a boolean mask of the given shape, filled with False. - - This function returns a boolean ndarray with all entries False, that can - be used in common mask manipulations. If a complex dtype is specified, the - type of each field is converted to a boolean type. - - Parameters - ---------- - newshape : tuple - A tuple indicating the shape of the mask. - dtype : {None, dtype}, optional - If None, use a MaskType instance. Otherwise, use a new datatype with - the same fields as `dtype`, converted to boolean types. - - Returns - ------- - result : ndarray - An ndarray of appropriate shape and dtype, filled with False. - - See Also - -------- - make_mask : Create a boolean mask from an array. - make_mask_descr : Construct a dtype description list from a given dtype. - - Examples - -------- - >>> import numpy.ma as ma - >>> ma.make_mask_none((3,)) - array([False, False, False]) - - Defining a more complex dtype. - - >>> dtype = np.dtype({'names':['foo', 'bar'], - ... 'formats':[np.float32, np.int64]}) - >>> dtype - dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) - array([(False, False), (False, False), (False, False)], - dtype=[('foo', '|b1'), ('bar', '|b1')]) - - """ - if dtype is None: - result = np.zeros(newshape, dtype=MaskType) - else: - result = np.zeros(newshape, dtype=make_mask_descr(dtype)) - return result - - -def mask_or(m1, m2, copy=False, shrink=True): - """ - Combine two masks with the ``logical_or`` operator. - - The result may be a view on `m1` or `m2` if the other is `nomask` - (i.e. False). - - Parameters - ---------- - m1, m2 : array_like - Input masks. - copy : bool, optional - If copy is False and one of the inputs is `nomask`, return a view - of the other input mask. Defaults to False. - shrink : bool, optional - Whether to shrink the output to `nomask` if all its values are - False. Defaults to True. - - Returns - ------- - mask : output mask - The result masks values that are masked in either `m1` or `m2`. - - Raises - ------ - ValueError - If `m1` and `m2` have different flexible dtypes. - - Examples - -------- - >>> m1 = np.ma.make_mask([0, 1, 1, 0]) - >>> m2 = np.ma.make_mask([1, 0, 0, 0]) - >>> np.ma.mask_or(m1, m2) - array([ True, True, True, False]) - - """ - - @recursive - def _recursive_mask_or(self, m1, m2, newmask): - names = m1.dtype.names - for name in names: - current1 = m1[name] - if current1.dtype.names is not None: - self(current1, m2[name], newmask[name]) - else: - umath.logical_or(current1, m2[name], newmask[name]) - return - - if (m1 is nomask) or (m1 is False): - dtype = getattr(m2, 'dtype', MaskType) - return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) - if (m2 is nomask) or (m2 is False): - dtype = getattr(m1, 'dtype', MaskType) - return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) - if m1 is m2 and is_mask(m1): - return m1 - (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) - if dtype1 != dtype2: - raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) - if dtype1.names is not None: - # Allocate an output mask array with the properly broadcast shape. - newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) - _recursive_mask_or(m1, m2, newmask) - return newmask - return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) - - -def flatten_mask(mask): - """ - Returns a completely flattened version of the mask, where nested fields - are collapsed. - - Parameters - ---------- - mask : array_like - Input array, which will be interpreted as booleans. - - Returns - ------- - flattened_mask : ndarray of bools - The flattened input. - - Examples - -------- - >>> mask = np.array([0, 0, 1]) - >>> np.ma.flatten_mask(mask) - array([False, False, True]) - - >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) - >>> np.ma.flatten_mask(mask) - array([False, False, False, True]) - - >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] - >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) - >>> np.ma.flatten_mask(mask) - array([False, False, False, False, False, True]) - - """ - - def _flatmask(mask): - "Flatten the mask and returns a (maybe nested) sequence of booleans." - mnames = mask.dtype.names - if mnames is not None: - return [flatten_mask(mask[name]) for name in mnames] - else: - return mask - - def _flatsequence(sequence): - "Generates a flattened version of the sequence." - try: - for element in sequence: - if hasattr(element, '__iter__'): - for f in _flatsequence(element): - yield f - else: - yield element - except TypeError: - yield sequence - - mask = np.asarray(mask) - flattened = _flatsequence(_flatmask(mask)) - return np.array([_ for _ in flattened], dtype=bool) - - -def _check_mask_axis(mask, axis, keepdims=np._NoValue): - "Check whether there are masked values along the given axis" - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - if mask is not nomask: - return mask.all(axis=axis, **kwargs) - return nomask - - -############################################################################### -# Masking functions # -############################################################################### - -def masked_where(condition, a, copy=True): - """ - Mask an array where a condition is met. - - Return `a` as an array masked where `condition` is True. - Any masked values of `a` or `condition` are also masked in the output. - - Parameters - ---------- - condition : array_like - Masking condition. When `condition` tests floating point values for - equality, consider using ``masked_values`` instead. - a : array_like - Array to mask. - copy : bool - If True (default) make a copy of `a` in the result. If False modify - `a` in place and return a view. - - Returns - ------- - result : MaskedArray - The result of masking `a` where `condition` is True. - - See Also - -------- - masked_values : Mask using floating point equality. - masked_equal : Mask where equal to a given value. - masked_not_equal : Mask where `not` equal to a given value. - masked_less_equal : Mask where less than or equal to a given value. - masked_greater_equal : Mask where greater than or equal to a given value. - masked_less : Mask where less than a given value. - masked_greater : Mask where greater than a given value. - masked_inside : Mask inside a given interval. - masked_outside : Mask outside a given interval. - masked_invalid : Mask invalid values (NaNs or infs). - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_where(a <= 2, a) - masked_array(data=[--, --, --, 3], - mask=[ True, True, True, False], - fill_value=999999) - - Mask array `b` conditional on `a`. - - >>> b = ['a', 'b', 'c', 'd'] - >>> ma.masked_where(a == 2, b) - masked_array(data=['a', 'b', --, 'd'], - mask=[False, False, True, False], - fill_value='N/A', - dtype='>> c = ma.masked_where(a <= 2, a) - >>> c - masked_array(data=[--, --, --, 3], - mask=[ True, True, True, False], - fill_value=999999) - >>> c[0] = 99 - >>> c - masked_array(data=[99, --, --, 3], - mask=[False, True, True, False], - fill_value=999999) - >>> a - array([0, 1, 2, 3]) - >>> c = ma.masked_where(a <= 2, a, copy=False) - >>> c[0] = 99 - >>> c - masked_array(data=[99, --, --, 3], - mask=[False, True, True, False], - fill_value=999999) - >>> a - array([99, 1, 2, 3]) - - When `condition` or `a` contain masked values. - - >>> a = np.arange(4) - >>> a = ma.masked_where(a == 2, a) - >>> a - masked_array(data=[0, 1, --, 3], - mask=[False, False, True, False], - fill_value=999999) - >>> b = np.arange(4) - >>> b = ma.masked_where(b == 0, b) - >>> b - masked_array(data=[--, 1, 2, 3], - mask=[ True, False, False, False], - fill_value=999999) - >>> ma.masked_where(a == 3, b) - masked_array(data=[--, 1, --, --], - mask=[ True, False, True, True], - fill_value=999999) - - """ - # Make sure that condition is a valid standard-type mask. - cond = make_mask(condition, shrink=False) - a = np.array(a, copy=copy, subok=True) - - (cshape, ashape) = (cond.shape, a.shape) - if cshape and cshape != ashape: - raise IndexError("Inconsistent shape between the condition and the input" - " (got %s and %s)" % (cshape, ashape)) - if hasattr(a, '_mask'): - cond = mask_or(cond, a._mask) - cls = type(a) - else: - cls = MaskedArray - result = a.view(cls) - # Assign to *.mask so that structured masks are handled correctly. - result.mask = _shrink_mask(cond) - return result - - -def masked_greater(x, value, copy=True): - """ - Mask an array where greater than a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x > value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_greater(a, 2) - masked_array(data=[0, 1, 2, --], - mask=[False, False, False, True], - fill_value=999999) - - """ - return masked_where(greater(x, value), x, copy=copy) - - -def masked_greater_equal(x, value, copy=True): - """ - Mask an array where greater than or equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x >= value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_greater_equal(a, 2) - masked_array(data=[0, 1, --, --], - mask=[False, False, True, True], - fill_value=999999) - - """ - return masked_where(greater_equal(x, value), x, copy=copy) - - -def masked_less(x, value, copy=True): - """ - Mask an array where less than a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x < value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_less(a, 2) - masked_array(data=[--, --, 2, 3], - mask=[ True, True, False, False], - fill_value=999999) - - """ - return masked_where(less(x, value), x, copy=copy) - - -def masked_less_equal(x, value, copy=True): - """ - Mask an array where less than or equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x <= value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_less_equal(a, 2) - masked_array(data=[--, --, --, 3], - mask=[ True, True, True, False], - fill_value=999999) - - """ - return masked_where(less_equal(x, value), x, copy=copy) - - -def masked_not_equal(x, value, copy=True): - """ - Mask an array where `not` equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x != value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_not_equal(a, 2) - masked_array(data=[--, --, 2, --], - mask=[ True, True, False, True], - fill_value=999999) - - """ - return masked_where(not_equal(x, value), x, copy=copy) - - -def masked_equal(x, value, copy=True): - """ - Mask an array where equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x == value). For floating point arrays, - consider using ``masked_values(x, value)``. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_values : Mask using floating point equality. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_equal(a, 2) - masked_array(data=[0, 1, --, 3], - mask=[False, False, True, False], - fill_value=2) - - """ - output = masked_where(equal(x, value), x, copy=copy) - output.fill_value = value - return output - - -def masked_inside(x, v1, v2, copy=True): - """ - Mask an array inside a given interval. - - Shortcut to ``masked_where``, where `condition` is True for `x` inside - the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` - can be given in either order. - - See Also - -------- - masked_where : Mask where a condition is met. - - Notes - ----- - The array `x` is prefilled with its filling value. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] - >>> ma.masked_inside(x, -0.3, 0.3) - masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], - mask=[False, False, True, True, False, False], - fill_value=1e+20) - - The order of `v1` and `v2` doesn't matter. - - >>> ma.masked_inside(x, 0.3, -0.3) - masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], - mask=[False, False, True, True, False, False], - fill_value=1e+20) - - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf >= v1) & (xf <= v2) - return masked_where(condition, x, copy=copy) - - -def masked_outside(x, v1, v2, copy=True): - """ - Mask an array outside a given interval. - - Shortcut to ``masked_where``, where `condition` is True for `x` outside - the interval [v1,v2] (x < v1)|(x > v2). - The boundaries `v1` and `v2` can be given in either order. - - See Also - -------- - masked_where : Mask where a condition is met. - - Notes - ----- - The array `x` is prefilled with its filling value. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] - >>> ma.masked_outside(x, -0.3, 0.3) - masked_array(data=[--, --, 0.01, 0.2, --, --], - mask=[ True, True, False, False, True, True], - fill_value=1e+20) - - The order of `v1` and `v2` doesn't matter. - - >>> ma.masked_outside(x, 0.3, -0.3) - masked_array(data=[--, --, 0.01, 0.2, --, --], - mask=[ True, True, False, False, True, True], - fill_value=1e+20) - - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf < v1) | (xf > v2) - return masked_where(condition, x, copy=copy) - - -def masked_object(x, value, copy=True, shrink=True): - """ - Mask the array `x` where the data are exactly equal to value. - - This function is similar to `masked_values`, but only suitable - for object arrays: for floating point, use `masked_values` instead. - - Parameters - ---------- - x : array_like - Array to mask - value : object - Comparison value - copy : {True, False}, optional - Whether to return a copy of `x`. - shrink : {True, False}, optional - Whether to collapse a mask full of False to nomask - - Returns - ------- - result : MaskedArray - The result of masking `x` where equal to `value`. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_equal : Mask where equal to a given value (integers). - masked_values : Mask using floating point equality. - - Examples - -------- - >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) - >>> # don't eat spoiled food - >>> eat = ma.masked_object(food, 'green_eggs') - >>> eat - masked_array(data=[--, 'ham'], - mask=[ True, False], - fill_value='green_eggs', - dtype=object) - >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) - >>> eat = ma.masked_object(fresh_food, 'green_eggs') - >>> eat - masked_array(data=['cheese', 'ham', 'pineapple'], - mask=False, - fill_value='green_eggs', - dtype=object) - - Note that `mask` is set to ``nomask`` if possible. - - >>> eat - masked_array(data=['cheese', 'ham', 'pineapple'], - mask=False, - fill_value='green_eggs', - dtype=object) - - """ - if isMaskedArray(x): - condition = umath.equal(x._data, value) - mask = x._mask - else: - condition = umath.equal(np.asarray(x), value) - mask = nomask - mask = mask_or(mask, make_mask(condition, shrink=shrink)) - return masked_array(x, mask=mask, copy=copy, fill_value=value) - - -def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): - """ - Mask using floating point equality. - - Return a MaskedArray, masked where the data in array `x` are approximately - equal to `value`, determined using `isclose`. The default tolerances for - `masked_values` are the same as those for `isclose`. - - For integer types, exact equality is used, in the same way as - `masked_equal`. - - The fill_value is set to `value` and the mask is set to ``nomask`` if - possible. - - Parameters - ---------- - x : array_like - Array to mask. - value : float - Masking value. - rtol, atol : float, optional - Tolerance parameters passed on to `isclose` - copy : bool, optional - Whether to return a copy of `x`. - shrink : bool, optional - Whether to collapse a mask full of False to ``nomask``. - - Returns - ------- - result : MaskedArray - The result of masking `x` where approximately equal to `value`. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_equal : Mask where equal to a given value (integers). - - Examples - -------- - >>> import numpy.ma as ma - >>> x = np.array([1, 1.1, 2, 1.1, 3]) - >>> ma.masked_values(x, 1.1) - masked_array(data=[1.0, --, 2.0, --, 3.0], - mask=[False, True, False, True, False], - fill_value=1.1) - - Note that `mask` is set to ``nomask`` if possible. - - >>> ma.masked_values(x, 1.5) - masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], - mask=False, - fill_value=1.5) - - For integers, the fill value will be different in general to the - result of ``masked_equal``. - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - >>> ma.masked_values(x, 2) - masked_array(data=[0, 1, --, 3, 4], - mask=[False, False, True, False, False], - fill_value=2) - >>> ma.masked_equal(x, 2) - masked_array(data=[0, 1, --, 3, 4], - mask=[False, False, True, False, False], - fill_value=2) - - """ - xnew = filled(x, value) - if np.issubdtype(xnew.dtype, np.floating): - mask = np.isclose(xnew, value, atol=atol, rtol=rtol) - else: - mask = umath.equal(xnew, value) - ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) - if shrink: - ret.shrink_mask() - return ret - - -def masked_invalid(a, copy=True): - """ - Mask an array where invalid values occur (NaNs or infs). - - This function is a shortcut to ``masked_where``, with - `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. - Only applies to arrays with a dtype where NaNs or infs make sense - (i.e. floating point types), but accepts any array_like object. - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=float) - >>> a[2] = np.NaN - >>> a[3] = np.PINF - >>> a - array([ 0., 1., nan, inf, 4.]) - >>> ma.masked_invalid(a) - masked_array(data=[0.0, 1.0, --, --, 4.0], - mask=[False, False, True, True, False], - fill_value=1e+20) - - """ - a = np.array(a, copy=copy, subok=True) - mask = getattr(a, '_mask', None) - if mask is not None: - condition = ~(np.isfinite(getdata(a))) - if mask is not nomask: - condition |= mask - cls = type(a) - else: - condition = ~(np.isfinite(a)) - cls = MaskedArray - result = a.view(cls) - result._mask = condition - return result - - -############################################################################### -# Printing options # -############################################################################### - - -class _MaskedPrintOption(object): - """ - Handle the string used to represent missing data in a masked array. - - """ - - def __init__(self, display): - """ - Create the masked_print_option object. - - """ - self._display = display - self._enabled = True - - def display(self): - """ - Display the string to print for masked values. - - """ - return self._display - - def set_display(self, s): - """ - Set the string to print for masked values. - - """ - self._display = s - - def enabled(self): - """ - Is the use of the display value enabled? - - """ - return self._enabled - - def enable(self, shrink=1): - """ - Set the enabling shrink to `shrink`. - - """ - self._enabled = shrink - - def __str__(self): - return str(self._display) - - __repr__ = __str__ - -# if you single index into a masked location you get this object. -masked_print_option = _MaskedPrintOption('--') - - -def _recursive_printoption(result, mask, printopt): - """ - Puts printoptions in result where mask is True. - - Private function allowing for recursion - - """ - names = result.dtype.names - if names is not None: - for name in names: - curdata = result[name] - curmask = mask[name] - _recursive_printoption(curdata, curmask, printopt) - else: - np.copyto(result, printopt, where=mask) - return - -# For better or worse, these end in a newline -_legacy_print_templates = dict( - long_std=textwrap.dedent("""\ - masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, - %(nlen)s fill_value = %(fill)s) - """), - long_flx=textwrap.dedent("""\ - masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, - %(nlen)s fill_value = %(fill)s, - %(nlen)s dtype = %(dtype)s) - """), - short_std=textwrap.dedent("""\ - masked_%(name)s(data = %(data)s, - %(nlen)s mask = %(mask)s, - %(nlen)s fill_value = %(fill)s) - """), - short_flx=textwrap.dedent("""\ - masked_%(name)s(data = %(data)s, - %(nlen)s mask = %(mask)s, - %(nlen)s fill_value = %(fill)s, - %(nlen)s dtype = %(dtype)s) - """) -) - -############################################################################### -# MaskedArray class # -############################################################################### - - -def _recursive_filled(a, mask, fill_value): - """ - Recursively fill `a` with `fill_value`. - - """ - names = a.dtype.names - for name in names: - current = a[name] - if current.dtype.names is not None: - _recursive_filled(current, mask[name], fill_value[name]) - else: - np.copyto(current, fill_value[name], where=mask[name]) - - -def flatten_structured_array(a): - """ - Flatten a structured array. - - The data type of the output is chosen such that it can represent all of the - (nested) fields. - - Parameters - ---------- - a : structured array - - Returns - ------- - output : masked array or ndarray - A flattened masked array if the input is a masked array, otherwise a - standard ndarray. - - Examples - -------- - >>> ndtype = [('a', int), ('b', float)] - >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) - >>> np.ma.flatten_structured_array(a) - array([[1., 1.], - [2., 2.]]) - - """ - - def flatten_sequence(iterable): - """ - Flattens a compound of nested iterables. - - """ - for elm in iter(iterable): - if hasattr(elm, '__iter__'): - for f in flatten_sequence(elm): - yield f - else: - yield elm - - a = np.asanyarray(a) - inishape = a.shape - a = a.ravel() - if isinstance(a, MaskedArray): - out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) - out = out.view(MaskedArray) - out._mask = np.array([tuple(flatten_sequence(d.item())) - for d in getmaskarray(a)]) - else: - out = np.array([tuple(flatten_sequence(d.item())) for d in a]) - if len(inishape) > 1: - newshape = list(out.shape) - newshape[0] = inishape - out.shape = tuple(flatten_sequence(newshape)) - return out - - -def _arraymethod(funcname, onmask=True): - """ - Return a class method wrapper around a basic array method. - - Creates a class method which returns a masked array, where the new - ``_data`` array is the output of the corresponding basic method called - on the original ``_data``. - - If `onmask` is True, the new mask is the output of the method called - on the initial mask. Otherwise, the new mask is just a reference - to the initial mask. - - Parameters - ---------- - funcname : str - Name of the function to apply on data. - onmask : bool - Whether the mask must be processed also (True) or left - alone (False). Default is True. Make available as `_onmask` - attribute. - - Returns - ------- - method : instancemethod - Class method wrapper of the specified basic array method. - - """ - def wrapped_method(self, *args, **params): - result = getattr(self._data, funcname)(*args, **params) - result = result.view(type(self)) - result._update_from(self) - mask = self._mask - if not onmask: - result.__setmask__(mask) - elif mask is not nomask: - # __setmask__ makes a copy, which we don't want - result._mask = getattr(mask, funcname)(*args, **params) - return result - methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) - if methdoc is not None: - wrapped_method.__doc__ = methdoc.__doc__ - wrapped_method.__name__ = funcname - return wrapped_method - - -class MaskedIterator(object): - """ - Flat iterator object to iterate over masked arrays. - - A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array - `x`. It allows iterating over the array as if it were a 1-D array, - either in a for-loop or by calling its `next` method. - - Iteration is done in C-contiguous style, with the last index varying the - fastest. The iterator can also be indexed using basic slicing or - advanced indexing. - - See Also - -------- - MaskedArray.flat : Return a flat iterator over an array. - MaskedArray.flatten : Returns a flattened copy of an array. - - Notes - ----- - `MaskedIterator` is not exported by the `ma` module. Instead of - instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. - - Examples - -------- - >>> x = np.ma.array(arange(6).reshape(2, 3)) - >>> fl = x.flat - >>> type(fl) - - >>> for item in fl: - ... print(item) - ... - 0 - 1 - 2 - 3 - 4 - 5 - - Extracting more than a single element b indexing the `MaskedIterator` - returns a masked array: - - >>> fl[2:4] - masked_array(data = [2 3], - mask = False, - fill_value = 999999) - - """ - - def __init__(self, ma): - self.ma = ma - self.dataiter = ma._data.flat - - if ma._mask is nomask: - self.maskiter = None - else: - self.maskiter = ma._mask.flat - - def __iter__(self): - return self - - def __getitem__(self, indx): - result = self.dataiter.__getitem__(indx).view(type(self.ma)) - if self.maskiter is not None: - _mask = self.maskiter.__getitem__(indx) - if isinstance(_mask, ndarray): - # set shape to match that of data; this is needed for matrices - _mask.shape = result.shape - result._mask = _mask - elif isinstance(_mask, np.void): - return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) - elif _mask: # Just a scalar, masked - return masked - return result - - # This won't work if ravel makes a copy - def __setitem__(self, index, value): - self.dataiter[index] = getdata(value) - if self.maskiter is not None: - self.maskiter[index] = getmaskarray(value) - - def __next__(self): - """ - Return the next value, or raise StopIteration. - - Examples - -------- - >>> x = np.ma.array([3, 2], mask=[0, 1]) - >>> fl = x.flat - >>> next(fl) - 3 - >>> next(fl) - masked - >>> next(fl) - Traceback (most recent call last): - ... - StopIteration - - """ - d = next(self.dataiter) - if self.maskiter is not None: - m = next(self.maskiter) - if isinstance(m, np.void): - return mvoid(d, mask=m, hardmask=self.ma._hardmask) - elif m: # Just a scalar, masked - return masked - return d - - next = __next__ - - -class MaskedArray(ndarray): - """ - An array class with possibly masked values. - - Masked values of True exclude the corresponding element from any - computation. - - Construction:: - - x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, - ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, - shrink=True, order=None) - - Parameters - ---------- - data : array_like - Input data. - mask : sequence, optional - Mask. Must be convertible to an array of booleans with the same - shape as `data`. True indicates a masked (i.e. invalid) data. - dtype : dtype, optional - Data type of the output. - If `dtype` is None, the type of the data argument (``data.dtype``) - is used. If `dtype` is not None and different from ``data.dtype``, - a copy is performed. - copy : bool, optional - Whether to copy the input data (True), or to use a reference instead. - Default is False. - subok : bool, optional - Whether to return a subclass of `MaskedArray` if possible (True) or a - plain `MaskedArray`. Default is True. - ndmin : int, optional - Minimum number of dimensions. Default is 0. - fill_value : scalar, optional - Value used to fill in the masked values when necessary. - If None, a default based on the data-type is used. - keep_mask : bool, optional - Whether to combine `mask` with the mask of the input data, if any - (True), or to use only `mask` for the output (False). Default is True. - hard_mask : bool, optional - Whether to use a hard mask or not. With a hard mask, masked values - cannot be unmasked. Default is False. - shrink : bool, optional - Whether to force compression of an empty mask. Default is True. - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C', then the array - will be in C-contiguous order (last-index varies the fastest). - If order is 'F', then the returned array will be in - Fortran-contiguous order (first-index varies the fastest). - If order is 'A' (default), then the returned array may be - in any order (either C-, Fortran-contiguous, or even discontiguous), - unless a copy is required, in which case it will be C-contiguous. - - """ - - __array_priority__ = 15 - _defaultmask = nomask - _defaulthardmask = False - _baseclass = ndarray - - # Maximum number of elements per axis used when printing an array. The - # 1d case is handled separately because we need more values in this case. - _print_width = 100 - _print_width_1d = 1500 - - def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, - subok=True, ndmin=0, fill_value=None, keep_mask=True, - hard_mask=None, shrink=True, order=None, **options): - """ - Create a new masked array from scratch. - - Notes - ----- - A masked array can also be created by taking a .view(MaskedArray). - - """ - # Process data. - _data = np.array(data, dtype=dtype, copy=copy, - order=order, subok=True, ndmin=ndmin) - _baseclass = getattr(data, '_baseclass', type(_data)) - # Check that we're not erasing the mask. - if isinstance(data, MaskedArray) and (data.shape != _data.shape): - copy = True - - # Here, we copy the _view_, so that we can attach new properties to it - # we must never do .view(MaskedConstant), as that would create a new - # instance of np.ma.masked, which make identity comparison fail - if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): - _data = ndarray.view(_data, type(data)) - else: - _data = ndarray.view(_data, cls) - # Backwards compatibility w/ numpy.core.ma. - if hasattr(data, '_mask') and not isinstance(data, ndarray): - _data._mask = data._mask - # FIXME _sharedmask is never used. - _sharedmask = True - # Process mask. - # Type of the mask - mdtype = make_mask_descr(_data.dtype) - - if mask is nomask: - # Case 1. : no mask in input. - # Erase the current mask ? - if not keep_mask: - # With a reduced version - if shrink: - _data._mask = nomask - # With full version - else: - _data._mask = np.zeros(_data.shape, dtype=mdtype) - # Check whether we missed something - elif isinstance(data, (tuple, list)): - try: - # If data is a sequence of masked array - mask = np.array([getmaskarray(m) for m in data], - dtype=mdtype) - except ValueError: - # If data is nested - mask = nomask - # Force shrinking of the mask if needed (and possible) - if (mdtype == MaskType) and mask.any(): - _data._mask = mask - _data._sharedmask = False - else: - _data._sharedmask = not copy - if copy: - _data._mask = _data._mask.copy() - # Reset the shape of the original mask - if getmask(data) is not nomask: - data._mask.shape = data.shape - else: - # Case 2. : With a mask in input. - # If mask is boolean, create an array of True or False - if mask is True and mdtype == MaskType: - mask = np.ones(_data.shape, dtype=mdtype) - elif mask is False and mdtype == MaskType: - mask = np.zeros(_data.shape, dtype=mdtype) - else: - # Read the mask with the current mdtype - try: - mask = np.array(mask, copy=copy, dtype=mdtype) - # Or assume it's a sequence of bool/int - except TypeError: - mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - # Make sure the mask and the data have the same shape - if mask.shape != _data.shape: - (nd, nm) = (_data.size, mask.size) - if nm == 1: - mask = np.resize(mask, _data.shape) - elif nm == nd: - mask = np.reshape(mask, _data.shape) - else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MaskError(msg % (nd, nm)) - copy = True - # Set the mask to the new value - if _data._mask is nomask: - _data._mask = mask - _data._sharedmask = not copy - else: - if not keep_mask: - _data._mask = mask - _data._sharedmask = not copy - else: - if _data.dtype.names is not None: - def _recursive_or(a, b): - "do a|=b on each field of a, recursively" - for name in a.dtype.names: - (af, bf) = (a[name], b[name]) - if af.dtype.names is not None: - _recursive_or(af, bf) - else: - af |= bf - - _recursive_or(_data._mask, mask) - else: - _data._mask = np.logical_or(mask, _data._mask) - _data._sharedmask = False - # Update fill_value. - if fill_value is None: - fill_value = getattr(data, '_fill_value', None) - # But don't run the check unless we have something to check. - if fill_value is not None: - _data._fill_value = _check_fill_value(fill_value, _data.dtype) - # Process extra options .. - if hard_mask is None: - _data._hardmask = getattr(data, '_hardmask', False) - else: - _data._hardmask = hard_mask - _data._baseclass = _baseclass - return _data - - - def _update_from(self, obj): - """ - Copies some attributes of obj to self. - - """ - if isinstance(obj, ndarray): - _baseclass = type(obj) - else: - _baseclass = ndarray - # We need to copy the _basedict to avoid backward propagation - _optinfo = {} - _optinfo.update(getattr(obj, '_optinfo', {})) - _optinfo.update(getattr(obj, '_basedict', {})) - if not isinstance(obj, MaskedArray): - _optinfo.update(getattr(obj, '__dict__', {})) - _dict = dict(_fill_value=getattr(obj, '_fill_value', None), - _hardmask=getattr(obj, '_hardmask', False), - _sharedmask=getattr(obj, '_sharedmask', False), - _isfield=getattr(obj, '_isfield', False), - _baseclass=getattr(obj, '_baseclass', _baseclass), - _optinfo=_optinfo, - _basedict=_optinfo) - self.__dict__.update(_dict) - self.__dict__.update(_optinfo) - return - - def __array_finalize__(self, obj): - """ - Finalizes the masked array. - - """ - # Get main attributes. - self._update_from(obj) - - # We have to decide how to initialize self.mask, based on - # obj.mask. This is very difficult. There might be some - # correspondence between the elements in the array we are being - # created from (= obj) and us. Or there might not. This method can - # be called in all kinds of places for all kinds of reasons -- could - # be empty_like, could be slicing, could be a ufunc, could be a view. - # The numpy subclassing interface simply doesn't give us any way - # to know, which means that at best this method will be based on - # guesswork and heuristics. To make things worse, there isn't even any - # clear consensus about what the desired behavior is. For instance, - # most users think that np.empty_like(marr) -- which goes via this - # method -- should return a masked array with an empty mask (see - # gh-3404 and linked discussions), but others disagree, and they have - # existing code which depends on empty_like returning an array that - # matches the input mask. - # - # Historically our algorithm was: if the template object mask had the - # same *number of elements* as us, then we used *it's mask object - # itself* as our mask, so that writes to us would also write to the - # original array. This is horribly broken in multiple ways. - # - # Now what we do instead is, if the template object mask has the same - # number of elements as us, and we do not have the same base pointer - # as the template object (b/c views like arr[...] should keep the same - # mask), then we make a copy of the template object mask and use - # that. This is also horribly broken but somewhat less so. Maybe. - if isinstance(obj, ndarray): - # XX: This looks like a bug -- shouldn't it check self.dtype - # instead? - if obj.dtype.names is not None: - _mask = getmaskarray(obj) - else: - _mask = getmask(obj) - - # If self and obj point to exactly the same data, then probably - # self is a simple view of obj (e.g., self = obj[...]), so they - # should share the same mask. (This isn't 100% reliable, e.g. self - # could be the first row of obj, or have strange strides, but as a - # heuristic it's not bad.) In all other cases, we make a copy of - # the mask, so that future modifications to 'self' do not end up - # side-effecting 'obj' as well. - if (_mask is not nomask and obj.__array_interface__["data"][0] - != self.__array_interface__["data"][0]): - # We should make a copy. But we could get here via astype, - # in which case the mask might need a new dtype as well - # (e.g., changing to or from a structured dtype), and the - # order could have changed. So, change the mask type if - # needed and use astype instead of copy. - if self.dtype == obj.dtype: - _mask_dtype = _mask.dtype - else: - _mask_dtype = make_mask_descr(self.dtype) - - if self.flags.c_contiguous: - order = "C" - elif self.flags.f_contiguous: - order = "F" - else: - order = "K" - - _mask = _mask.astype(_mask_dtype, order) - else: - # Take a view so shape changes, etc., do not propagate back. - _mask = _mask.view() - else: - _mask = nomask - - self._mask = _mask - # Finalize the mask - if self._mask is not nomask: - try: - self._mask.shape = self.shape - except ValueError: - self._mask = nomask - except (TypeError, AttributeError): - # When _mask.shape is not writable (because it's a void) - pass - - # Finalize the fill_value - if self._fill_value is not None: - self._fill_value = _check_fill_value(self._fill_value, self.dtype) - elif self.dtype.names is not None: - # Finalize the default fill_value for structured arrays - self._fill_value = _check_fill_value(None, self.dtype) - - def __array_wrap__(self, obj, context=None): - """ - Special hook for ufuncs. - - Wraps the numpy array and sets the mask according to context. - - """ - if obj is self: # for in-place operations - result = obj - else: - result = obj.view(type(self)) - result._update_from(self) - - if context is not None: - result._mask = result._mask.copy() - func, args, out_i = context - # args sometimes contains outputs (gh-10459), which we don't want - input_args = args[:func.nin] - m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) - # Get the domain mask - domain = ufunc_domain.get(func, None) - if domain is not None: - # Take the domain, and make sure it's a ndarray - with np.errstate(divide='ignore', invalid='ignore'): - d = filled(domain(*input_args), True) - - if d.any(): - # Fill the result where the domain is wrong - try: - # Binary domain: take the last value - fill_value = ufunc_fills[func][-1] - except TypeError: - # Unary domain: just use this one - fill_value = ufunc_fills[func] - except KeyError: - # Domain not recognized, use fill_value instead - fill_value = self.fill_value - - np.copyto(result, fill_value, where=d) - - # Update the mask - if m is nomask: - m = d - else: - # Don't modify inplace, we risk back-propagation - m = (m | d) - - # Make sure the mask has the proper size - if result is not self and result.shape == () and m: - return masked - else: - result._mask = m - result._sharedmask = False - - return result - - def view(self, dtype=None, type=None, fill_value=None): - """ - Return a view of the MaskedArray data. - - Parameters - ---------- - dtype : data-type or ndarray sub-class, optional - Data-type descriptor of the returned view, e.g., float32 or int16. - The default, None, results in the view having the same data-type - as `a`. As with ``ndarray.view``, dtype can also be specified as - an ndarray sub-class, which then specifies the type of the - returned object (this is equivalent to setting the ``type`` - parameter). - type : Python type, optional - Type of the returned view, either ndarray or a subclass. The - default None results in type preservation. - fill_value : scalar, optional - The value to use for invalid entries (None by default). - If None, then this argument is inferred from the passed `dtype`, or - in its absence the original array, as discussed in the notes below. - - See Also - -------- - numpy.ndarray.view : Equivalent method on ndarray object. - - Notes - ----- - - ``a.view()`` is used two different ways: - - ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view - of the array's memory with a different data-type. This can cause a - reinterpretation of the bytes of memory. - - ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just - returns an instance of `ndarray_subclass` that looks at the same array - (same shape, dtype, etc.) This does not cause a reinterpretation of the - memory. - - If `fill_value` is not specified, but `dtype` is specified (and is not - an ndarray sub-class), the `fill_value` of the MaskedArray will be - reset. If neither `fill_value` nor `dtype` are specified (or if - `dtype` is an ndarray sub-class), then the fill value is preserved. - Finally, if `fill_value` is specified, but `dtype` is not, the fill - value is set to the specified value. - - For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of - bytes per entry than the previous dtype (for example, converting a - regular array to a structured array), then the behavior of the view - cannot be predicted just from the superficial appearance of ``a`` (shown - by ``print(a)``). It also depends on exactly how ``a`` is stored in - memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus - defined as a slice or transpose, etc., the view may give different - results. - """ - - if dtype is None: - if type is None: - output = ndarray.view(self) - else: - output = ndarray.view(self, type) - elif type is None: - try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) - dtype = None - else: - output = ndarray.view(self, dtype) - except TypeError: - output = ndarray.view(self, dtype) - else: - output = ndarray.view(self, dtype, type) - - # also make the mask be a view (so attr changes to the view's - # mask do no affect original object's mask) - # (especially important to avoid affecting np.masked singleton) - if getmask(output) is not nomask: - output._mask = output._mask.view() - - # Make sure to reset the _fill_value if needed - if getattr(output, '_fill_value', None) is not None: - if fill_value is None: - if dtype is None: - pass # leave _fill_value as is - else: - output._fill_value = None - else: - output.fill_value = fill_value - return output - - def __getitem__(self, indx): - """ - x.__getitem__(y) <==> x[y] - - Return the item described by i, as a masked array. - - """ - # We could directly use ndarray.__getitem__ on self. - # But then we would have to modify __array_finalize__ to prevent the - # mask of being reshaped if it hasn't been set up properly yet - # So it's easier to stick to the current version - dout = self.data[indx] - _mask = self._mask - - def _is_scalar(m): - return not isinstance(m, np.ndarray) - - def _scalar_heuristic(arr, elem): - """ - Return whether `elem` is a scalar result of indexing `arr`, or None - if undecidable without promoting nomask to a full mask - """ - # obviously a scalar - if not isinstance(elem, np.ndarray): - return True - - # object array scalar indexing can return anything - elif arr.dtype.type is np.object_: - if arr.dtype is not elem.dtype: - # elem is an array, but dtypes do not match, so must be - # an element - return True - - # well-behaved subclass that only returns 0d arrays when - # expected - this is not a scalar - elif type(arr).__getitem__ == ndarray.__getitem__: - return False - - return None - - if _mask is not nomask: - # _mask cannot be a subclass, so it tells us whether we should - # expect a scalar. It also cannot be of dtype object. - mout = _mask[indx] - scalar_expected = _is_scalar(mout) - - else: - # attempt to apply the heuristic to avoid constructing a full mask - mout = nomask - scalar_expected = _scalar_heuristic(self.data, dout) - if scalar_expected is None: - # heuristics have failed - # construct a full array, so we can be certain. This is costly. - # we could also fall back on ndarray.__getitem__(self.data, indx) - scalar_expected = _is_scalar(getmaskarray(self)[indx]) - - # Did we extract a single item? - if scalar_expected: - # A record - if isinstance(dout, np.void): - # We should always re-cast to mvoid, otherwise users can - # change masks on rows that already have masked values, but not - # on rows that have no masked values, which is inconsistent. - return mvoid(dout, mask=mout, hardmask=self._hardmask) - - # special case introduced in gh-5962 - elif (self.dtype.type is np.object_ and - isinstance(dout, np.ndarray) and - dout is not masked): - # If masked, turn into a MaskedArray, with everything masked. - if mout: - return MaskedArray(dout, mask=True) - else: - return dout - - # Just a scalar - else: - if mout: - return masked - else: - return dout - else: - # Force dout to MA - dout = dout.view(type(self)) - # Inherit attributes from self - dout._update_from(self) - # Check the fill_value - if is_string_or_list_of_strings(indx): - if self._fill_value is not None: - dout._fill_value = self._fill_value[indx] - - # If we're indexing a multidimensional field in a - # structured array (such as dtype("(2,)i2,(2,)i1")), - # dimensionality goes up (M[field].ndim == M.ndim + - # M.dtype[field].ndim). That's fine for - # M[field] but problematic for M[field].fill_value - # which should have shape () to avoid breaking several - # methods. There is no great way out, so set to - # first element. See issue #6723. - if dout._fill_value.ndim > 0: - if not (dout._fill_value == - dout._fill_value.flat[0]).all(): - warnings.warn( - "Upon accessing multidimensional field " - "{indx:s}, need to keep dimensionality " - "of fill_value at 0. Discarding " - "heterogeneous fill_value and setting " - "all to {fv!s}.".format(indx=indx, - fv=dout._fill_value[0]), - stacklevel=2) - dout._fill_value = dout._fill_value.flat[0] - dout._isfield = True - # Update the mask if needed - if mout is not nomask: - # set shape to match that of data; this is needed for matrices - dout._mask = reshape(mout, dout.shape) - dout._sharedmask = True - # Note: Don't try to check for m.any(), that'll take too long - return dout - - def __setitem__(self, indx, value): - """ - x.__setitem__(i, y) <==> x[i]=y - - Set item described by index. If value is masked, masks those - locations. - - """ - if self is masked: - raise MaskError('Cannot alter the masked element.') - _data = self._data - _mask = self._mask - if isinstance(indx, basestring): - _data[indx] = value - if _mask is nomask: - self._mask = _mask = make_mask_none(self.shape, self.dtype) - _mask[indx] = getmask(value) - return - - _dtype = _data.dtype - - if value is masked: - # The mask wasn't set: create a full version. - if _mask is nomask: - _mask = self._mask = make_mask_none(self.shape, _dtype) - # Now, set the mask to its value. - if _dtype.names is not None: - _mask[indx] = tuple([True] * len(_dtype.names)) - else: - _mask[indx] = True - return - - # Get the _data part of the new value - dval = getattr(value, '_data', value) - # Get the _mask part of the new value - mval = getmask(value) - if _dtype.names is not None and mval is nomask: - mval = tuple([False] * len(_dtype.names)) - if _mask is nomask: - # Set the data, then the mask - _data[indx] = dval - if mval is not nomask: - _mask = self._mask = make_mask_none(self.shape, _dtype) - _mask[indx] = mval - elif not self._hardmask: - # Set the data, then the mask - _data[indx] = dval - _mask[indx] = mval - elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): - indx = indx * umath.logical_not(_mask) - _data[indx] = dval - else: - if _dtype.names is not None: - err_msg = "Flexible 'hard' masks are not yet supported." - raise NotImplementedError(err_msg) - mindx = mask_or(_mask[indx], mval, copy=True) - dindx = self._data[indx] - if dindx.size > 1: - np.copyto(dindx, dval, where=~mindx) - elif mindx is nomask: - dindx = dval - _data[indx] = dindx - _mask[indx] = mindx - return - - # Define so that we can overwrite the setter. - @property - def dtype(self): - return super(MaskedArray, self).dtype - - @dtype.setter - def dtype(self, dtype): - super(MaskedArray, type(self)).dtype.__set__(self, dtype) - if self._mask is not nomask: - self._mask = self._mask.view(make_mask_descr(dtype), ndarray) - # Try to reset the shape of the mask (if we don't have a void). - # This raises a ValueError if the dtype change won't work. - try: - self._mask.shape = self.shape - except (AttributeError, TypeError): - pass - - @property - def shape(self): - return super(MaskedArray, self).shape - - @shape.setter - def shape(self, shape): - super(MaskedArray, type(self)).shape.__set__(self, shape) - # Cannot use self._mask, since it may not (yet) exist when a - # masked matrix sets the shape. - if getmask(self) is not nomask: - self._mask.shape = self.shape - - def __setmask__(self, mask, copy=False): - """ - Set the mask. - - """ - idtype = self.dtype - current_mask = self._mask - if mask is masked: - mask = True - - if current_mask is nomask: - # Make sure the mask is set - # Just don't do anything if there's nothing to do. - if mask is nomask: - return - current_mask = self._mask = make_mask_none(self.shape, idtype) - - if idtype.names is None: - # No named fields. - # Hardmask: don't unmask the data - if self._hardmask: - current_mask |= mask - # Softmask: set everything to False - # If it's obviously a compatible scalar, use a quick update - # method. - elif isinstance(mask, (int, float, np.bool_, np.number)): - current_mask[...] = mask - # Otherwise fall back to the slower, general purpose way. - else: - current_mask.flat = mask - else: - # Named fields w/ - mdtype = current_mask.dtype - mask = np.array(mask, copy=False) - # Mask is a singleton - if not mask.ndim: - # It's a boolean : make a record - if mask.dtype.kind == 'b': - mask = np.array(tuple([mask.item()] * len(mdtype)), - dtype=mdtype) - # It's a record: make sure the dtype is correct - else: - mask = mask.astype(mdtype) - # Mask is a sequence - else: - # Make sure the new mask is a ndarray with the proper dtype - try: - mask = np.array(mask, copy=copy, dtype=mdtype) - # Or assume it's a sequence of bool/int - except TypeError: - mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - # Hardmask: don't unmask the data - if self._hardmask: - for n in idtype.names: - current_mask[n] |= mask[n] - # Softmask: set everything to False - # If it's obviously a compatible scalar, use a quick update - # method. - elif isinstance(mask, (int, float, np.bool_, np.number)): - current_mask[...] = mask - # Otherwise fall back to the slower, general purpose way. - else: - current_mask.flat = mask - # Reshape if needed - if current_mask.shape: - current_mask.shape = self.shape - return - - _set_mask = __setmask__ - - @property - def mask(self): - """ Current mask. """ - - # We could try to force a reshape, but that wouldn't work in some - # cases. - # Return a view so that the dtype and shape cannot be changed in place - # This still preserves nomask by identity - return self._mask.view() - - @mask.setter - def mask(self, value): - self.__setmask__(value) - - @property - def recordmask(self): - """ - Get or set the mask of the array if it has no named fields. For - structured arrays, returns a ndarray of booleans where entries are - ``True`` if **all** the fields are masked, ``False`` otherwise: - - >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], - ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], - ... dtype=[('a', int), ('b', int)]) - >>> x.recordmask - array([False, False, True, False, False]) - """ - - _mask = self._mask.view(ndarray) - if _mask.dtype.names is None: - return _mask - return np.all(flatten_structured_array(_mask), axis=-1) - - @recordmask.setter - def recordmask(self, mask): - raise NotImplementedError("Coming soon: setting the mask per records!") - - def harden_mask(self): - """ - Force the mask to hard. - - Whether the mask of a masked array is hard or soft is determined by - its `hardmask` property. `harden_mask` sets `hardmask` to True. - - See Also - -------- - hardmask - - """ - self._hardmask = True - return self - - def soften_mask(self): - """ - Force the mask to soft. - - Whether the mask of a masked array is hard or soft is determined by - its `hardmask` property. `soften_mask` sets `hardmask` to False. - - See Also - -------- - hardmask - - """ - self._hardmask = False - return self - - @property - def hardmask(self): - """ Hardness of the mask """ - return self._hardmask - - def unshare_mask(self): - """ - Copy the mask and set the sharedmask flag to False. - - Whether the mask is shared between masked arrays can be seen from - the `sharedmask` property. `unshare_mask` ensures the mask is not shared. - A copy of the mask is only made if it was shared. - - See Also - -------- - sharedmask - - """ - if self._sharedmask: - self._mask = self._mask.copy() - self._sharedmask = False - return self - - @property - def sharedmask(self): - """ Share status of the mask (read-only). """ - return self._sharedmask - - def shrink_mask(self): - """ - Reduce a mask to nomask when possible. - - Parameters - ---------- - None - - Returns - ------- - None - - Examples - -------- - >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) - >>> x.mask - array([[False, False], - [False, False]]) - >>> x.shrink_mask() - masked_array( - data=[[1, 2], - [3, 4]], - mask=False, - fill_value=999999) - >>> x.mask - False - - """ - self._mask = _shrink_mask(self._mask) - return self - - @property - def baseclass(self): - """ Class of the underlying data (read-only). """ - return self._baseclass - - def _get_data(self): - """ - Returns the underlying data, as a view of the masked array. - - If the underlying data is a subclass of :class:`numpy.ndarray`, it is - returned as such. - - >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.data - matrix([[1, 2], - [3, 4]]) - - The type of the data can be accessed through the :attr:`baseclass` - attribute. - """ - return ndarray.view(self, self._baseclass) - - _data = property(fget=_get_data) - data = property(fget=_get_data) - - @property - def flat(self): - """ Return a flat iterator, or set a flattened version of self to value. """ - return MaskedIterator(self) - - @flat.setter - def flat(self, value): - y = self.ravel() - y[:] = value - - @property - def fill_value(self): - """ - The filling value of the masked array is a scalar. When setting, None - will set to a default based on the data type. - - Examples - -------- - >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: - ... np.ma.array([0, 1], dtype=dt).get_fill_value() - ... - 999999 - 999999 - 1e+20 - (1e+20+0j) - - >>> x = np.ma.array([0, 1.], fill_value=-np.inf) - >>> x.fill_value - -inf - >>> x.fill_value = np.pi - >>> x.fill_value - 3.1415926535897931 # may vary - - Reset to default: - - >>> x.fill_value = None - >>> x.fill_value - 1e+20 - - """ - if self._fill_value is None: - self._fill_value = _check_fill_value(None, self.dtype) - - # Temporary workaround to account for the fact that str and bytes - # scalars cannot be indexed with (), whereas all other numpy - # scalars can. See issues #7259 and #7267. - # The if-block can be removed after #7267 has been fixed. - if isinstance(self._fill_value, ndarray): - return self._fill_value[()] - return self._fill_value - - @fill_value.setter - def fill_value(self, value=None): - target = _check_fill_value(value, self.dtype) - if not target.ndim == 0: - # 2019-11-12, 1.18.0 - warnings.warn( - "Non-scalar arrays for the fill value are deprecated. Use " - "arrays with scalar values instead. The filled function " - "still supports any array as `fill_value`.", - DeprecationWarning, stacklevel=2) - - _fill_value = self._fill_value - if _fill_value is None: - # Create the attribute if it was undefined - self._fill_value = target - else: - # Don't overwrite the attribute, just fill it (for propagation) - _fill_value[()] = target - - # kept for compatibility - get_fill_value = fill_value.fget - set_fill_value = fill_value.fset - - def filled(self, fill_value=None): - """ - Return a copy of self, with masked values filled with a given value. - **However**, if there are no masked values to fill, self will be - returned instead as an ndarray. - - Parameters - ---------- - fill_value : array_like, optional - The value to use for invalid entries. Can be scalar or non-scalar. - If non-scalar, the resulting ndarray must be broadcastable over - input array. Default is None, in which case, the `fill_value` - attribute of the array is used instead. - - Returns - ------- - filled_array : ndarray - A copy of ``self`` with invalid entries replaced by *fill_value* - (be it the function argument or the attribute of ``self``), or - ``self`` itself as an ndarray if there are no invalid entries to - be replaced. - - Notes - ----- - The result is **not** a MaskedArray! - - Examples - -------- - >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) - >>> x.filled() - array([ 1, 2, -999, 4, -999]) - >>> x.filled(fill_value=1000) - array([ 1, 2, 1000, 4, 1000]) - >>> type(x.filled()) - - - Subclassing is preserved. This means that if, e.g., the data part of - the masked array is a recarray, `filled` returns a recarray: - - >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) - >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) - >>> m.filled() - rec.array([(999999, 2), ( -3, 999999)], - dtype=[('f0', '>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) - >>> x.compressed() - array([0, 1]) - >>> type(x.compressed()) - - - """ - data = ndarray.ravel(self._data) - if self._mask is not nomask: - data = data.compress(np.logical_not(ndarray.ravel(self._mask))) - return data - - def compress(self, condition, axis=None, out=None): - """ - Return `a` where condition is ``True``. - - If condition is a `MaskedArray`, missing values are considered - as ``False``. - - Parameters - ---------- - condition : var - Boolean 1-d array selecting which entries to return. If len(condition) - is less than the size of a along the axis, then output is truncated - to length of condition array. - axis : {None, int}, optional - Axis along which the operation must be performed. - out : {None, ndarray}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - Returns - ------- - result : MaskedArray - A :class:`MaskedArray` object. - - Notes - ----- - Please note the difference with :meth:`compressed` ! - The output of :meth:`compress` has a mask, the output of - :meth:`compressed` does not. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> x - masked_array( - data=[[1, --, 3], - [--, 5, --], - [7, --, 9]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - >>> x.compress([1, 0, 1]) - masked_array(data=[1, 3], - mask=[False, False], - fill_value=999999) - - >>> x.compress([1, 0, 1], axis=1) - masked_array( - data=[[1, 3], - [--, --], - [7, 9]], - mask=[[False, False], - [ True, True], - [False, False]], - fill_value=999999) - - """ - # Get the basic components - (_data, _mask) = (self._data, self._mask) - - # Force the condition to a regular ndarray and forget the missing - # values. - condition = np.array(condition, copy=False, subok=False) - - _new = _data.compress(condition, axis=axis, out=out).view(type(self)) - _new._update_from(self) - if _mask is not nomask: - _new._mask = _mask.compress(condition, axis=axis) - return _new - - def _insert_masked_print(self): - """ - Replace masked values with masked_print_option, casting all innermost - dtypes to object. - """ - if masked_print_option.enabled(): - mask = self._mask - if mask is nomask: - res = self._data - else: - # convert to object array to make filled work - data = self._data - # For big arrays, to avoid a costly conversion to the - # object dtype, extract the corners before the conversion. - print_width = (self._print_width if self.ndim > 1 - else self._print_width_1d) - for axis in range(self.ndim): - if data.shape[axis] > print_width: - ind = print_width // 2 - arr = np.split(data, (ind, -ind), axis=axis) - data = np.concatenate((arr[0], arr[2]), axis=axis) - arr = np.split(mask, (ind, -ind), axis=axis) - mask = np.concatenate((arr[0], arr[2]), axis=axis) - - rdtype = _replace_dtype_fields(self.dtype, "O") - res = data.astype(rdtype) - _recursive_printoption(res, mask, masked_print_option) - else: - res = self.filled(self.fill_value) - return res - - def __str__(self): - return str(self._insert_masked_print()) - - if sys.version_info.major < 3: - def __unicode__(self): - return unicode(self._insert_masked_print()) - - def __repr__(self): - """ - Literal string representation. - - """ - if self._baseclass is np.ndarray: - name = 'array' - else: - name = self._baseclass.__name__ - - - # 2016-11-19: Demoted to legacy format - if np.get_printoptions()['legacy'] == '1.13': - is_long = self.ndim > 1 - parameters = dict( - name=name, - nlen=" " * len(name), - data=str(self), - mask=str(self._mask), - fill=str(self.fill_value), - dtype=str(self.dtype) - ) - is_structured = bool(self.dtype.names) - key = '{}_{}'.format( - 'long' if is_long else 'short', - 'flx' if is_structured else 'std' - ) - return _legacy_print_templates[key] % parameters - - prefix = 'masked_{}('.format(name) - - dtype_needed = ( - not np.core.arrayprint.dtype_is_implied(self.dtype) or - np.all(self.mask) or - self.size == 0 - ) - - # determine which keyword args need to be shown - keys = ['data', 'mask', 'fill_value'] - if dtype_needed: - keys.append('dtype') - - # array has only one row (non-column) - is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) - - # choose what to indent each keyword with - min_indent = 2 - if is_one_row: - # first key on the same line as the type, remaining keys - # aligned by equals - indents = {} - indents[keys[0]] = prefix - for k in keys[1:]: - n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) - indents[k] = ' ' * n - prefix = '' # absorbed into the first indent - else: - # each key on its own line, indented by two spaces - indents = {k: ' ' * min_indent for k in keys} - prefix = prefix + '\n' # first key on the next line - - # format the field values - reprs = {} - reprs['data'] = np.array2string( - self._insert_masked_print(), - separator=", ", - prefix=indents['data'] + 'data=', - suffix=',') - reprs['mask'] = np.array2string( - self._mask, - separator=", ", - prefix=indents['mask'] + 'mask=', - suffix=',') - reprs['fill_value'] = repr(self.fill_value) - if dtype_needed: - reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype) - - # join keys with values and indentations - result = ',\n'.join( - '{}{}={}'.format(indents[k], k, reprs[k]) - for k in keys - ) - return prefix + result + ')' - - def _delegate_binop(self, other): - # This emulates the logic in - # private/binop_override.h:forward_binop_should_defer - if isinstance(other, type(self)): - return False - array_ufunc = getattr(other, "__array_ufunc__", False) - if array_ufunc is False: - other_priority = getattr(other, "__array_priority__", -1000000) - return self.__array_priority__ < other_priority - else: - # If array_ufunc is not None, it will be called inside the ufunc; - # None explicitly tells us to not call the ufunc, i.e., defer. - return array_ufunc is None - - def _comparison(self, other, compare): - """Compare self with other using operator.eq or operator.ne. - - When either of the elements is masked, the result is masked as well, - but the underlying boolean data are still set, with self and other - considered equal if both are masked, and unequal otherwise. - - For structured arrays, all fields are combined, with masked values - ignored. The result is masked if all fields were masked, with self - and other considered equal only if both were fully masked. - """ - omask = getmask(other) - smask = self.mask - mask = mask_or(smask, omask, copy=True) - - odata = getdata(other) - if mask.dtype.names is not None: - # For possibly masked structured arrays we need to be careful, - # since the standard structured array comparison will use all - # fields, masked or not. To avoid masked fields influencing the - # outcome, we set all masked fields in self to other, so they'll - # count as equal. To prepare, we ensure we have the right shape. - broadcast_shape = np.broadcast(self, odata).shape - sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) - sbroadcast._mask = mask - sdata = sbroadcast.filled(odata) - # Now take care of the mask; the merged mask should have an item - # masked if all fields were masked (in one and/or other). - mask = (mask == np.ones((), mask.dtype)) - - else: - # For regular arrays, just use the data as they come. - sdata = self.data - - check = compare(sdata, odata) - - if isinstance(check, (np.bool_, bool)): - return masked if mask else check - - if mask is not nomask: - # Adjust elements that were masked, which should be treated - # as equal if masked in both, unequal if masked in one. - # Note that this works automatically for structured arrays too. - check = np.where(mask, compare(smask, omask), check) - if mask.shape != check.shape: - # Guarantee consistency of the shape, making a copy since the - # the mask may need to get written to later. - mask = np.broadcast_to(mask, check.shape).copy() - - check = check.view(type(self)) - check._update_from(self) - check._mask = mask - - # Cast fill value to bool_ if needed. If it cannot be cast, the - # default boolean fill value is used. - if check._fill_value is not None: - try: - fill = _check_fill_value(check._fill_value, np.bool_) - except (TypeError, ValueError): - fill = _check_fill_value(None, np.bool_) - check._fill_value = fill - - return check - - def __eq__(self, other): - """Check whether other equals self elementwise. - - When either of the elements is masked, the result is masked as well, - but the underlying boolean data are still set, with self and other - considered equal if both are masked, and unequal otherwise. - - For structured arrays, all fields are combined, with masked values - ignored. The result is masked if all fields were masked, with self - and other considered equal only if both were fully masked. - """ - return self._comparison(other, operator.eq) - - def __ne__(self, other): - """Check whether other does not equal self elementwise. - - When either of the elements is masked, the result is masked as well, - but the underlying boolean data are still set, with self and other - considered equal if both are masked, and unequal otherwise. - - For structured arrays, all fields are combined, with masked values - ignored. The result is masked if all fields were masked, with self - and other considered equal only if both were fully masked. - """ - return self._comparison(other, operator.ne) - - def __add__(self, other): - """ - Add self to other, and return a new masked array. - - """ - if self._delegate_binop(other): - return NotImplemented - return add(self, other) - - def __radd__(self, other): - """ - Add other to self, and return a new masked array. - - """ - # In analogy with __rsub__ and __rdiv__, use original order: - # we get here from `other + self`. - return add(other, self) - - def __sub__(self, other): - """ - Subtract other from self, and return a new masked array. - - """ - if self._delegate_binop(other): - return NotImplemented - return subtract(self, other) - - def __rsub__(self, other): - """ - Subtract self from other, and return a new masked array. - - """ - return subtract(other, self) - - def __mul__(self, other): - "Multiply self by other, and return a new masked array." - if self._delegate_binop(other): - return NotImplemented - return multiply(self, other) - - def __rmul__(self, other): - """ - Multiply other by self, and return a new masked array. - - """ - # In analogy with __rsub__ and __rdiv__, use original order: - # we get here from `other * self`. - return multiply(other, self) - - def __div__(self, other): - """ - Divide other into self, and return a new masked array. - - """ - if self._delegate_binop(other): - return NotImplemented - return divide(self, other) - - def __truediv__(self, other): - """ - Divide other into self, and return a new masked array. - - """ - if self._delegate_binop(other): - return NotImplemented - return true_divide(self, other) - - def __rtruediv__(self, other): - """ - Divide self into other, and return a new masked array. - - """ - return true_divide(other, self) - - def __floordiv__(self, other): - """ - Divide other into self, and return a new masked array. - - """ - if self._delegate_binop(other): - return NotImplemented - return floor_divide(self, other) - - def __rfloordiv__(self, other): - """ - Divide self into other, and return a new masked array. - - """ - return floor_divide(other, self) - - def __pow__(self, other): - """ - Raise self to the power other, masking the potential NaNs/Infs - - """ - if self._delegate_binop(other): - return NotImplemented - return power(self, other) - - def __rpow__(self, other): - """ - Raise other to the power self, masking the potential NaNs/Infs - - """ - return power(other, self) - - def __iadd__(self, other): - """ - Add other to self in-place. - - """ - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - else: - if m is not nomask: - self._mask += m - self._data.__iadd__(np.where(self._mask, self.dtype.type(0), - getdata(other))) - return self - - def __isub__(self, other): - """ - Subtract other from self in-place. - - """ - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - elif m is not nomask: - self._mask += m - self._data.__isub__(np.where(self._mask, self.dtype.type(0), - getdata(other))) - return self - - def __imul__(self, other): - """ - Multiply self by other in-place. - - """ - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - elif m is not nomask: - self._mask += m - self._data.__imul__(np.where(self._mask, self.dtype.type(1), - getdata(other))) - return self - - def __idiv__(self, other): - """ - Divide self by other in-place. - - """ - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.divide] - other_data = np.where(dom_mask, fval, other_data) - self._mask |= new_mask - self._data.__idiv__(np.where(self._mask, self.dtype.type(1), - other_data)) - return self - - def __ifloordiv__(self, other): - """ - Floor divide self by other in-place. - - """ - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.floor_divide] - other_data = np.where(dom_mask, fval, other_data) - self._mask |= new_mask - self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1), - other_data)) - return self - - def __itruediv__(self, other): - """ - True divide self by other in-place. - - """ - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.true_divide] - other_data = np.where(dom_mask, fval, other_data) - self._mask |= new_mask - self._data.__itruediv__(np.where(self._mask, self.dtype.type(1), - other_data)) - return self - - def __ipow__(self, other): - """ - Raise self to the power other, in place. - - """ - other_data = getdata(other) - other_mask = getmask(other) - with np.errstate(divide='ignore', invalid='ignore'): - self._data.__ipow__(np.where(self._mask, self.dtype.type(1), - other_data)) - invalid = np.logical_not(np.isfinite(self._data)) - if invalid.any(): - if self._mask is not nomask: - self._mask |= invalid - else: - self._mask = invalid - np.copyto(self._data, self.fill_value, where=invalid) - new_mask = mask_or(other_mask, invalid) - self._mask = mask_or(self._mask, new_mask) - return self - - def __float__(self): - """ - Convert to float. - - """ - if self.size > 1: - raise TypeError("Only length-1 arrays can be converted " - "to Python scalars") - elif self._mask: - warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) - return np.nan - return float(self.item()) - - def __int__(self): - """ - Convert to int. - - """ - if self.size > 1: - raise TypeError("Only length-1 arrays can be converted " - "to Python scalars") - elif self._mask: - raise MaskError('Cannot convert masked element to a Python int.') - return int(self.item()) - - def __long__(self): - """ - Convert to long. - """ - if self.size > 1: - raise TypeError("Only length-1 arrays can be converted " - "to Python scalars") - elif self._mask: - raise MaskError('Cannot convert masked element to a Python long.') - return long(self.item()) - - @property - def imag(self): - """ - The imaginary part of the masked array. - - This property is a view on the imaginary part of this `MaskedArray`. - - See Also - -------- - real - - Examples - -------- - >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.imag - masked_array(data=[1.0, --, 1.6], - mask=[False, True, False], - fill_value=1e+20) - - """ - result = self._data.imag.view(type(self)) - result.__setmask__(self._mask) - return result - - # kept for compatibility - get_imag = imag.fget - - @property - def real(self): - """ - The real part of the masked array. - - This property is a view on the real part of this `MaskedArray`. - - See Also - -------- - imag - - Examples - -------- - >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.real - masked_array(data=[1.0, --, 3.45], - mask=[False, True, False], - fill_value=1e+20) - - """ - result = self._data.real.view(type(self)) - result.__setmask__(self._mask) - return result - - # kept for compatibility - get_real = real.fget - - def count(self, axis=None, keepdims=np._NoValue): - """ - Count the non-masked elements of the array along the given axis. - - Parameters - ---------- - axis : None or int or tuple of ints, optional - Axis or axes along which the count is performed. - The default, None, performs the count over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.10.0 - - If this is a tuple of ints, the count is performed on multiple - axes, instead of a single axis or all the axes as before. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the array. - - Returns - ------- - result : ndarray or scalar - An array with the same shape as the input array, with the specified - axis removed. If the array is a 0-d array, or if `axis` is None, a - scalar is returned. - - See Also - -------- - count_masked : Count masked elements in array or along a given axis. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.arange(6).reshape((2, 3)) - >>> a[1, :] = ma.masked - >>> a - masked_array( - data=[[0, 1, 2], - [--, --, --]], - mask=[[False, False, False], - [ True, True, True]], - fill_value=999999) - >>> a.count() - 3 - - When the `axis` keyword is specified an array of appropriate size is - returned. - - >>> a.count(axis=0) - array([1, 1, 1]) - >>> a.count(axis=1) - array([3, 0]) - - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - m = self._mask - # special case for matrices (we assume no other subclasses modify - # their dimensions) - if isinstance(self.data, np.matrix): - if m is nomask: - m = np.zeros(self.shape, dtype=np.bool_) - m = m.view(type(self.data)) - - if m is nomask: - # compare to _count_reduce_items in _methods.py - - if self.shape == (): - if axis not in (None, 0): - raise np.AxisError(axis=axis, ndim=self.ndim) - return 1 - elif axis is None: - if kwargs.get('keepdims', False): - return np.array(self.size, dtype=np.intp, ndmin=self.ndim) - return self.size - - axes = normalize_axis_tuple(axis, self.ndim) - items = 1 - for ax in axes: - items *= self.shape[ax] - - if kwargs.get('keepdims', False): - out_dims = list(self.shape) - for a in axes: - out_dims[a] = 1 - else: - out_dims = [d for n, d in enumerate(self.shape) - if n not in axes] - # make sure to return a 0-d array if axis is supplied - return np.full(out_dims, items, dtype=np.intp) - - # take care of the masked singleton - if self is masked: - return 0 - - return (~m).sum(axis=axis, dtype=np.intp, **kwargs) - - def ravel(self, order='C'): - """ - Returns a 1D version of self, as a view. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - The elements of `a` are read using this index order. 'C' means to - index the elements in C-like order, with the last axis index - changing fastest, back to the first axis index changing slowest. - 'F' means to index the elements in Fortran-like index order, with - the first index changing fastest, and the last index changing - slowest. Note that the 'C' and 'F' options take no account of the - memory layout of the underlying array, and only refer to the order - of axis indexing. 'A' means to read the elements in Fortran-like - index order if `m` is Fortran *contiguous* in memory, C-like order - otherwise. 'K' means to read the elements in the order they occur - in memory, except for reversing the data when strides are negative. - By default, 'C' index order is used. - - Returns - ------- - MaskedArray - Output view is of shape ``(self.size,)`` (or - ``(np.ma.product(self.shape),)``). - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> x - masked_array( - data=[[1, --, 3], - [--, 5, --], - [7, --, 9]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - >>> x.ravel() - masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], - mask=[False, True, False, True, False, True, False, True, - False], - fill_value=999999) - - """ - r = ndarray.ravel(self._data, order=order).view(type(self)) - r._update_from(self) - if self._mask is not nomask: - r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) - else: - r._mask = nomask - return r - - - def reshape(self, *s, **kwargs): - """ - Give a new shape to the array without changing its data. - - Returns a masked array containing the same data, but with a new shape. - The result is a view on the original array; if this is not possible, a - ValueError is raised. - - Parameters - ---------- - shape : int or tuple of ints - The new shape should be compatible with the original shape. If an - integer is supplied, then the result will be a 1-D array of that - length. - order : {'C', 'F'}, optional - Determines whether the array data should be viewed as in C - (row-major) or FORTRAN (column-major) order. - - Returns - ------- - reshaped_array : array - A new view on the array. - - See Also - -------- - reshape : Equivalent function in the masked array module. - numpy.ndarray.reshape : Equivalent method on ndarray object. - numpy.reshape : Equivalent function in the NumPy module. - - Notes - ----- - The reshaping operation cannot guarantee that a copy will not be made, - to modify the shape in place, use ``a.shape = s`` - - Examples - -------- - >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) - >>> x - masked_array( - data=[[--, 2], - [3, --]], - mask=[[ True, False], - [False, True]], - fill_value=999999) - >>> x = x.reshape((4,1)) - >>> x - masked_array( - data=[[--], - [2], - [3], - [--]], - mask=[[ True], - [False], - [False], - [ True]], - fill_value=999999) - - """ - kwargs.update(order=kwargs.get('order', 'C')) - result = self._data.reshape(*s, **kwargs).view(type(self)) - result._update_from(self) - mask = self._mask - if mask is not nomask: - result._mask = mask.reshape(*s, **kwargs) - return result - - def resize(self, newshape, refcheck=True, order=False): - """ - .. warning:: - - This method does nothing, except raise a ValueError exception. A - masked array does not own its data and therefore cannot safely be - resized in place. Use the `numpy.ma.resize` function instead. - - This method is difficult to implement safely and may be deprecated in - future releases of NumPy. - - """ - # Note : the 'order' keyword looks broken, let's just drop it - errmsg = "A masked array does not own its data "\ - "and therefore cannot be resized.\n" \ - "Use the numpy.ma.resize function instead." - raise ValueError(errmsg) - - def put(self, indices, values, mode='raise'): - """ - Set storage-indexed locations to corresponding values. - - Sets self._data.flat[n] = values[n] for each n in indices. - If `values` is shorter than `indices` then it will repeat. - If `values` has some masked values, the initial mask is updated - in consequence, else the corresponding values are unmasked. - - Parameters - ---------- - indices : 1-D array_like - Target indices, interpreted as integers. - values : array_like - Values to place in self._data copy at target indices. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - 'raise' : raise an error. - 'wrap' : wrap around. - 'clip' : clip to the range. - - Notes - ----- - `values` can be a scalar or length 1 array. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> x - masked_array( - data=[[1, --, 3], - [--, 5, --], - [7, --, 9]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - >>> x.put([0,4,8],[10,20,30]) - >>> x - masked_array( - data=[[10, --, 3], - [--, 20, --], - [7, --, 30]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - - >>> x.put(4,999) - >>> x - masked_array( - data=[[10, --, 3], - [--, 999, --], - [7, --, 30]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - - """ - # Hard mask: Get rid of the values/indices that fall on masked data - if self._hardmask and self._mask is not nomask: - mask = self._mask[indices] - indices = narray(indices, copy=False) - values = narray(values, copy=False, subok=True) - values.resize(indices.shape) - indices = indices[~mask] - values = values[~mask] - - self._data.put(indices, values, mode=mode) - - # short circuit if neither self nor values are masked - if self._mask is nomask and getmask(values) is nomask: - return - - m = getmaskarray(self) - - if getmask(values) is nomask: - m.put(indices, False, mode=mode) - else: - m.put(indices, values._mask, mode=mode) - m = make_mask(m, copy=False, shrink=True) - self._mask = m - return - - def ids(self): - """ - Return the addresses of the data and mask areas. - - Parameters - ---------- - None - - Examples - -------- - >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) - >>> x.ids() - (166670640, 166659832) # may vary - - If the array has no mask, the address of `nomask` is returned. This address - is typically not close to the data in memory: - - >>> x = np.ma.array([1, 2, 3]) - >>> x.ids() - (166691080, 3083169284L) # may vary - - """ - if self._mask is nomask: - return (self.ctypes.data, id(nomask)) - return (self.ctypes.data, self._mask.ctypes.data) - - def iscontiguous(self): - """ - Return a boolean indicating whether the data is contiguous. - - Parameters - ---------- - None - - Examples - -------- - >>> x = np.ma.array([1, 2, 3]) - >>> x.iscontiguous() - True - - `iscontiguous` returns one of the flags of the masked array: - - >>> x.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : True - OWNDATA : False - WRITEABLE : True - ALIGNED : True - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - - """ - return self.flags['CONTIGUOUS'] - - def all(self, axis=None, out=None, keepdims=np._NoValue): - """ - Returns True if all elements evaluate to True. - - The output array is masked where all the values along the given axis - are masked: if the output would have been a scalar and that all the - values are masked, then the output is `masked`. - - Refer to `numpy.all` for full documentation. - - See Also - -------- - numpy.ndarray.all : corresponding function for ndarrays - numpy.all : equivalent function - - Examples - -------- - >>> np.ma.array([1,2,3]).all() - True - >>> a = np.ma.array([1,2,3], mask=True) - >>> (a.all() is np.ma.masked) - True - - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - mask = _check_mask_axis(self._mask, axis, **kwargs) - if out is None: - d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) - if d.ndim: - d.__setmask__(mask) - elif mask: - return masked - return d - self.filled(True).all(axis=axis, out=out, **kwargs) - if isinstance(out, MaskedArray): - if out.ndim or mask: - out.__setmask__(mask) - return out - - def any(self, axis=None, out=None, keepdims=np._NoValue): - """ - Returns True if any of the elements of `a` evaluate to True. - - Masked values are considered as False during computation. - - Refer to `numpy.any` for full documentation. - - See Also - -------- - numpy.ndarray.any : corresponding function for ndarrays - numpy.any : equivalent function - - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - mask = _check_mask_axis(self._mask, axis, **kwargs) - if out is None: - d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) - if d.ndim: - d.__setmask__(mask) - elif mask: - d = masked - return d - self.filled(False).any(axis=axis, out=out, **kwargs) - if isinstance(out, MaskedArray): - if out.ndim or mask: - out.__setmask__(mask) - return out - - def nonzero(self): - """ - Return the indices of unmasked elements that are not zero. - - Returns a tuple of arrays, one for each dimension, containing the - indices of the non-zero elements in that dimension. The corresponding - non-zero values can be obtained with:: - - a[a.nonzero()] - - To group the indices by element, rather than dimension, use - instead:: - - np.transpose(a.nonzero()) - - The result of this is always a 2d array, with a row for each non-zero - element. - - Parameters - ---------- - None - - Returns - ------- - tuple_of_arrays : tuple - Indices of elements that are non-zero. - - See Also - -------- - numpy.nonzero : - Function operating on ndarrays. - flatnonzero : - Return indices that are non-zero in the flattened version of the input - array. - numpy.ndarray.nonzero : - Equivalent ndarray method. - count_nonzero : - Counts the number of non-zero elements in the input array. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.array(np.eye(3)) - >>> x - masked_array( - data=[[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]], - mask=False, - fill_value=1e+20) - >>> x.nonzero() - (array([0, 1, 2]), array([0, 1, 2])) - - Masked elements are ignored. - - >>> x[1, 1] = ma.masked - >>> x - masked_array( - data=[[1.0, 0.0, 0.0], - [0.0, --, 0.0], - [0.0, 0.0, 1.0]], - mask=[[False, False, False], - [False, True, False], - [False, False, False]], - fill_value=1e+20) - >>> x.nonzero() - (array([0, 2]), array([0, 2])) - - Indices can also be grouped by element. - - >>> np.transpose(x.nonzero()) - array([[0, 0], - [2, 2]]) - - A common use for ``nonzero`` is to find the indices of an array, where - a condition is True. Given an array `a`, the condition `a` > 3 is a - boolean array and since False is interpreted as 0, ma.nonzero(a > 3) - yields the indices of the `a` where the condition is true. - - >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) - >>> a > 3 - masked_array( - data=[[False, False, False], - [ True, True, True], - [ True, True, True]], - mask=False, - fill_value=True) - >>> ma.nonzero(a > 3) - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - The ``nonzero`` method of the condition array can also be called. - - >>> (a > 3).nonzero() - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - """ - return narray(self.filled(0), copy=False).nonzero() - - def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """ - (this docstring should be overwritten) - """ - #!!!: implement out + test! - m = self._mask - if m is nomask: - result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, - axis2=axis2, out=out) - return result.astype(dtype) - else: - D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) - return D.astype(dtype).filled(0).sum(axis=-1, out=out) - trace.__doc__ = ndarray.trace.__doc__ - - def dot(self, b, out=None, strict=False): - """ - a.dot(b, out=None) - - Masked dot product of two arrays. Note that `out` and `strict` are - located in different positions than in `ma.dot`. In order to - maintain compatibility with the functional version, it is - recommended that the optional arguments be treated as keyword only. - At some point that may be mandatory. - - .. versionadded:: 1.10.0 - - Parameters - ---------- - b : masked_array_like - Inputs array. - out : masked_array, optional - Output argument. This must have the exact kind that would be - returned if it was not used. In particular, it must have the - right type, must be C-contiguous, and its dtype must be the - dtype that would be returned for `ma.dot(a,b)`. This is a - performance feature. Therefore, if these conditions are not - met, an exception is raised, instead of attempting to be - flexible. - strict : bool, optional - Whether masked data are propagated (True) or set to 0 (False) - for the computation. Default is False. Propagating the mask - means that if a masked value appears in a row or column, the - whole row or column is considered masked. - - .. versionadded:: 1.10.2 - - See Also - -------- - numpy.ma.dot : equivalent function - - """ - return dot(self, b, out=out, strict=strict) - - def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Return the sum of the array elements over the given axis. - - Masked elements are set to 0 internally. - - Refer to `numpy.sum` for full documentation. - - See Also - -------- - numpy.ndarray.sum : corresponding function for ndarrays - numpy.sum : equivalent function - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> x - masked_array( - data=[[1, --, 3], - [--, 5, --], - [7, --, 9]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - >>> x.sum() - 25 - >>> x.sum(axis=1) - masked_array(data=[4, 5, 16], - mask=[False, False, False], - fill_value=999999) - >>> x.sum(axis=0) - masked_array(data=[8, 5, 12], - mask=[False, False, False], - fill_value=999999) - >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) - - - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - _mask = self._mask - newmask = _check_mask_axis(_mask, axis, **kwargs) - # No explicit output - if out is None: - result = self.filled(0).sum(axis, dtype=dtype, **kwargs) - rndim = getattr(result, 'ndim', 0) - if rndim: - result = result.view(type(self)) - result.__setmask__(newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) - if isinstance(out, MaskedArray): - outmask = getmask(out) - if outmask is nomask: - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - return out - - def cumsum(self, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of the array elements over the given axis. - - Masked values are set to 0 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. - - Refer to `numpy.cumsum` for full documentation. - - Notes - ----- - The mask is lost if `out` is not a valid :class:`MaskedArray` ! - - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - See Also - -------- - numpy.ndarray.cumsum : corresponding function for ndarrays - numpy.cumsum : equivalent function - - Examples - -------- - >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) - >>> marr.cumsum() - masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], - mask=[False, False, False, True, True, True, False, False, - False, False], - fill_value=999999) - - """ - result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(self.mask) - return out - result = result.view(type(self)) - result.__setmask__(self._mask) - return result - - def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Return the product of the array elements over the given axis. - - Masked elements are set to 1 internally for computation. - - Refer to `numpy.prod` for full documentation. - - Notes - ----- - Arithmetic is modular when using integer types, and no error is raised - on overflow. - - See Also - -------- - numpy.ndarray.prod : corresponding function for ndarrays - numpy.prod : equivalent function - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - _mask = self._mask - newmask = _check_mask_axis(_mask, axis, **kwargs) - # No explicit output - if out is None: - result = self.filled(1).prod(axis, dtype=dtype, **kwargs) - rndim = getattr(result, 'ndim', 0) - if rndim: - result = result.view(type(self)) - result.__setmask__(newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) - if isinstance(out, MaskedArray): - outmask = getmask(out) - if outmask is nomask: - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - return out - product = prod - - def cumprod(self, axis=None, dtype=None, out=None): - """ - Return the cumulative product of the array elements over the given axis. - - Masked values are set to 1 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. - - Refer to `numpy.cumprod` for full documentation. - - Notes - ----- - The mask is lost if `out` is not a valid MaskedArray ! - - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - See Also - -------- - numpy.ndarray.cumprod : corresponding function for ndarrays - numpy.cumprod : equivalent function - """ - result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(self._mask) - return out - result = result.view(type(self)) - result.__setmask__(self._mask) - return result - - def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Returns the average of the array elements along given axis. - - Masked entries are ignored, and result elements which are not - finite will be masked. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.ndarray.mean : corresponding function for ndarrays - numpy.mean : Equivalent function - numpy.ma.average: Weighted average. - - Examples - -------- - >>> a = np.ma.array([1,2,3], mask=[False, False, True]) - >>> a - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> a.mean() - 1.5 - - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - if self._mask is nomask: - result = super(MaskedArray, self).mean(axis=axis, - dtype=dtype, **kwargs)[()] - else: - dsum = self.sum(axis=axis, dtype=dtype, **kwargs) - cnt = self.count(axis=axis, **kwargs) - if cnt.shape == () and (cnt == 0): - result = masked - else: - result = dsum * 1. / cnt - if out is not None: - out.flat = result - if isinstance(out, MaskedArray): - outmask = getmask(out) - if outmask is nomask: - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = getmask(result) - return out - return result - - def anom(self, axis=None, dtype=None): - """ - Compute the anomalies (deviations from the arithmetic mean) - along the given axis. - - Returns an array of anomalies, with the same shape as the input and - where the arithmetic mean is computed along the given axis. - - Parameters - ---------- - axis : int, optional - Axis over which the anomalies are taken. - The default is to use the mean of the flattened array as reference. - dtype : dtype, optional - Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. - - See Also - -------- - mean : Compute the mean of the array. - - Examples - -------- - >>> a = np.ma.array([1,2,3]) - >>> a.anom() - masked_array(data=[-1., 0., 1.], - mask=False, - fill_value=1e+20) - - """ - m = self.mean(axis, dtype) - if m is masked: - return m - - if not axis: - return self - m - else: - return self - expand_dims(m, axis) - - def var(self, axis=None, dtype=None, out=None, ddof=0, - keepdims=np._NoValue): - """ - Returns the variance of the array elements along given axis. - - Masked entries are ignored, and result elements which are not - finite will be masked. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.ndarray.var : corresponding function for ndarrays - numpy.var : Equivalent function - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - # Easy case: nomask, business as usual - if self._mask is nomask: - ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out, - ddof=ddof, **kwargs)[()] - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(nomask) - return out - return ret - - # Some data are masked, yay! - cnt = self.count(axis=axis, **kwargs) - ddof - danom = self - self.mean(axis, dtype, keepdims=True) - if iscomplexobj(self): - danom = umath.absolute(danom) ** 2 - else: - danom *= danom - dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) - # Apply the mask if it's not a scalar - if dvar.ndim: - dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) - dvar._update_from(self) - elif getmask(dvar): - # Make sure that masked is returned when the scalar is masked. - dvar = masked - if out is not None: - if isinstance(out, MaskedArray): - out.flat = 0 - out.__setmask__(True) - elif out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or "\ - "more location." - raise MaskError(errmsg) - else: - out.flat = np.nan - return out - # In case with have an explicit output - if out is not None: - # Set the data - out.flat = dvar - # Set the mask if needed - if isinstance(out, MaskedArray): - out.__setmask__(dvar.mask) - return out - return dvar - var.__doc__ = np.var.__doc__ - - def std(self, axis=None, dtype=None, out=None, ddof=0, - keepdims=np._NoValue): - """ - Returns the standard deviation of the array elements along given axis. - - Masked entries are ignored. - - Refer to `numpy.std` for full documentation. - - See Also - -------- - numpy.ndarray.std : corresponding function for ndarrays - numpy.std : Equivalent function - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - dvar = self.var(axis, dtype, out, ddof, **kwargs) - if dvar is not masked: - if out is not None: - np.power(out, 0.5, out=out, casting='unsafe') - return out - dvar = sqrt(dvar) - return dvar - - def round(self, decimals=0, out=None): - """ - Return each element rounded to the given number of decimals. - - Refer to `numpy.around` for full documentation. - - See Also - -------- - numpy.ndarray.around : corresponding function for ndarrays - numpy.around : equivalent function - """ - result = self._data.round(decimals=decimals, out=out).view(type(self)) - if result.ndim > 0: - result._mask = self._mask - result._update_from(self) - elif self._mask: - # Return masked when the scalar is masked - result = masked - # No explicit output: we're done - if out is None: - return result - if isinstance(out, MaskedArray): - out.__setmask__(self._mask) - return out - - def argsort(self, axis=np._NoValue, kind=None, order=None, - endwith=True, fill_value=None): - """ - Return an ndarray of indices that sort the array along the - specified axis. Masked values are filled beforehand to - `fill_value`. - - Parameters - ---------- - axis : int, optional - Axis along which to sort. If None, the default, the flattened array - is used. - - .. versionchanged:: 1.13.0 - Previously, the default was documented to be -1, but that was - in error. At some future date, the default will change to -1, as - originally intended. - Until then, the axis should be given explicitly when - ``arr.ndim > 1``, to avoid a FutureWarning. - kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - The sorting algorithm used. - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - endwith : {True, False}, optional - Whether missing values (if any) should be treated as the largest values - (True) or the smallest values (False) - When the array contains unmasked values at the same extremes of the - datatype, the ordering of these values and the masked values is - undefined. - fill_value : {var}, optional - Value used internally for the masked values. - If ``fill_value`` is not None, it supersedes ``endwith``. - - Returns - ------- - index_array : ndarray, int - Array of indices that sort `a` along the specified axis. - In other words, ``a[index_array]`` yields a sorted `a`. - - See Also - -------- - MaskedArray.sort : Describes sorting algorithms used. - lexsort : Indirect stable sort with multiple keys. - numpy.ndarray.sort : Inplace sort. - - Notes - ----- - See `sort` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.ma.array([3,2,1], mask=[False, False, True]) - >>> a - masked_array(data=[3, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> a.argsort() - array([1, 0, 2]) - - """ - - # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default - if axis is np._NoValue: - axis = _deprecate_argsort_axis(self) - - if fill_value is None: - if endwith: - # nan > inf - if np.issubdtype(self.dtype, np.floating): - fill_value = np.nan - else: - fill_value = minimum_fill_value(self) - else: - fill_value = maximum_fill_value(self) - - filled = self.filled(fill_value) - return filled.argsort(axis=axis, kind=kind, order=order) - - def argmin(self, axis=None, fill_value=None, out=None): - """ - Return array of indices to the minimum values along the given axis. - - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - fill_value : {var}, optional - Value used to fill in the masked values. If None, the output of - minimum_fill_value(self._data) is used instead. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - ndarray or scalar - If multi-dimension input, returns a new ndarray of indices to the - minimum values along the given axis. Otherwise, returns a scalar - of index to the minimum values along the given axis. - - Examples - -------- - >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) - >>> x.shape = (2,2) - >>> x - masked_array( - data=[[--, --], - [2, 3]], - mask=[[ True, True], - [False, False]], - fill_value=999999) - >>> x.argmin(axis=0, fill_value=-1) - array([0, 0]) - >>> x.argmin(axis=0, fill_value=9) - array([1, 1]) - - """ - if fill_value is None: - fill_value = minimum_fill_value(self) - d = self.filled(fill_value).view(ndarray) - return d.argmin(axis, out=out) - - def argmax(self, axis=None, fill_value=None, out=None): - """ - Returns array of indices of the maximum values along the given axis. - Masked values are treated as if they had the value fill_value. - - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - fill_value : {var}, optional - Value used to fill in the masked values. If None, the output of - maximum_fill_value(self._data) is used instead. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - index_array : {integer_array} - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a.argmax() - 5 - >>> a.argmax(0) - array([1, 1, 1]) - >>> a.argmax(1) - array([2, 2]) - - """ - if fill_value is None: - fill_value = maximum_fill_value(self._data) - d = self.filled(fill_value).view(ndarray) - return d.argmax(axis, out=out) - - def sort(self, axis=-1, kind=None, order=None, - endwith=True, fill_value=None): - """ - Sort the array, in-place - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - The sorting algorithm used. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - endwith : {True, False}, optional - Whether missing values (if any) should be treated as the largest values - (True) or the smallest values (False) - When the array contains unmasked values sorting at the same extremes of the - datatype, the ordering of these values and the masked values is - undefined. - fill_value : {var}, optional - Value used internally for the masked values. - If ``fill_value`` is not None, it supersedes ``endwith``. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - numpy.ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - - Notes - ----- - See ``sort`` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # Default - >>> a.sort() - >>> a - masked_array(data=[1, 3, 5, --, --], - mask=[False, False, False, True, True], - fill_value=999999) - - >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # Put missing values in the front - >>> a.sort(endwith=False) - >>> a - masked_array(data=[--, --, 1, 3, 5], - mask=[ True, True, False, False, False], - fill_value=999999) - - >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # fill_value takes over endwith - >>> a.sort(endwith=False, fill_value=3) - >>> a - masked_array(data=[1, --, --, 3, 5], - mask=[False, True, True, False, False], - fill_value=999999) - - """ - if self._mask is nomask: - ndarray.sort(self, axis=axis, kind=kind, order=order) - return - - if self is masked: - return - - sidx = self.argsort(axis=axis, kind=kind, order=order, - fill_value=fill_value, endwith=endwith) - - self[...] = np.take_along_axis(self, sidx, axis=axis) - - def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): - """ - Return the minimum along a given axis. - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to operate. By default, ``axis`` is None and the - flattened input is used. - out : array_like, optional - Alternative output array in which to place the result. Must be of - the same shape and buffer length as the expected output. - fill_value : {var}, optional - Value used to fill in the masked values. - If None, use the output of `minimum_fill_value`. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the array. - - Returns - ------- - amin : array_like - New array holding the result. - If ``out`` was specified, ``out`` is returned. - - See Also - -------- - minimum_fill_value - Returns the minimum filling value for a given datatype. - - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - _mask = self._mask - newmask = _check_mask_axis(_mask, axis, **kwargs) - if fill_value is None: - fill_value = minimum_fill_value(self) - # No explicit output - if out is None: - result = self.filled(fill_value).min( - axis=axis, out=out, **kwargs).view(type(self)) - if result.ndim: - # Set the mask - result.__setmask__(newmask) - # Get rid of Infs - if newmask.ndim: - np.copyto(result, result.fill_value, where=newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) - if isinstance(out, MaskedArray): - outmask = getmask(out) - if outmask is nomask: - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - else: - if out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or more"\ - " location." - raise MaskError(errmsg) - np.copyto(out, np.nan, where=newmask) - return out - - # unique to masked arrays - def mini(self, axis=None): - """ - Return the array minimum along the specified axis. - - .. deprecated:: 1.13.0 - This function is identical to both: - - * ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)`` - * ``np.ma.minimum.reduce(self, axis=axis)`` - - Typically though, ``self.min(axis=axis)`` is sufficient. - - Parameters - ---------- - axis : int, optional - The axis along which to find the minima. Default is None, in which case - the minimum value in the whole array is returned. - - Returns - ------- - min : scalar or MaskedArray - If `axis` is None, the result is a scalar. Otherwise, if `axis` is - given and the array is at least 2-D, the result is a masked array with - dimension one smaller than the array on which `mini` is called. - - Examples - -------- - >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) - >>> x - masked_array( - data=[[0, --], - [2, 3], - [4, --]], - mask=[[False, True], - [False, False], - [False, True]], - fill_value=999999) - >>> x.mini() - masked_array(data=0, - mask=False, - fill_value=999999) - >>> x.mini(axis=0) - masked_array(data=[0, 3], - mask=[False, False], - fill_value=999999) - >>> x.mini(axis=1) - masked_array(data=[0, 2, 4], - mask=[False, False, False], - fill_value=999999) - - There is a small difference between `mini` and `min`: - - >>> x[:,1].mini(axis=0) - masked_array(data=3, - mask=False, - fill_value=999999) - >>> x[:,1].min(axis=0) - 3 - """ - - # 2016-04-13, 1.13.0, gh-8764 - warnings.warn( - "`mini` is deprecated; use the `min` method or " - "`np.ma.minimum.reduce instead.", - DeprecationWarning, stacklevel=2) - return minimum.reduce(self, axis) - - def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): - """ - Return the maximum along a given axis. - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to operate. By default, ``axis`` is None and the - flattened input is used. - out : array_like, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - fill_value : {var}, optional - Value used to fill in the masked values. - If None, use the output of maximum_fill_value(). - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the array. - - Returns - ------- - amax : array_like - New array holding the result. - If ``out`` was specified, ``out`` is returned. - - See Also - -------- - maximum_fill_value - Returns the maximum filling value for a given datatype. - - """ - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - _mask = self._mask - newmask = _check_mask_axis(_mask, axis, **kwargs) - if fill_value is None: - fill_value = maximum_fill_value(self) - # No explicit output - if out is None: - result = self.filled(fill_value).max( - axis=axis, out=out, **kwargs).view(type(self)) - if result.ndim: - # Set the mask - result.__setmask__(newmask) - # Get rid of Infs - if newmask.ndim: - np.copyto(result, result.fill_value, where=newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) - if isinstance(out, MaskedArray): - outmask = getmask(out) - if outmask is nomask: - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - else: - - if out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or more"\ - " location." - raise MaskError(errmsg) - np.copyto(out, np.nan, where=newmask) - return out - - def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): - """ - Return (maximum - minimum) along the given dimension - (i.e. peak-to-peak value). - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to find the peaks. If None (default) the - flattened array is used. - out : {None, array_like}, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - fill_value : {var}, optional - Value used to fill in the masked values. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the array. - - Returns - ------- - ptp : ndarray. - A new array holding the result, unless ``out`` was - specified, in which case a reference to ``out`` is returned. - - """ - if out is None: - result = self.max(axis=axis, fill_value=fill_value, - keepdims=keepdims) - result -= self.min(axis=axis, fill_value=fill_value, - keepdims=keepdims) - return result - out.flat = self.max(axis=axis, out=out, fill_value=fill_value, - keepdims=keepdims) - min_value = self.min(axis=axis, fill_value=fill_value, - keepdims=keepdims) - np.subtract(out, min_value, out=out, casting='unsafe') - return out - - def partition(self, *args, **kwargs): - warnings.warn("Warning: 'partition' will ignore the 'mask' " - "of the {}.".format(self.__class__.__name__), - stacklevel=2) - return super(MaskedArray, self).partition(*args, **kwargs) - - def argpartition(self, *args, **kwargs): - warnings.warn("Warning: 'argpartition' will ignore the 'mask' " - "of the {}.".format(self.__class__.__name__), - stacklevel=2) - return super(MaskedArray, self).argpartition(*args, **kwargs) - - def take(self, indices, axis=None, out=None, mode='raise'): - """ - """ - (_data, _mask) = (self._data, self._mask) - cls = type(self) - # Make sure the indices are not masked - maskindices = getmask(indices) - if maskindices is not nomask: - indices = indices.filled(0) - # Get the data, promoting scalars to 0d arrays with [...] so that - # .view works correctly - if out is None: - out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) - else: - np.take(_data, indices, axis=axis, mode=mode, out=out) - # Get the mask - if isinstance(out, MaskedArray): - if _mask is nomask: - outmask = maskindices - else: - outmask = _mask.take(indices, axis=axis, mode=mode) - outmask |= maskindices - out.__setmask__(outmask) - # demote 0d arrays back to scalars, for consistency with ndarray.take - return out[()] - - # Array methods - copy = _arraymethod('copy') - diagonal = _arraymethod('diagonal') - flatten = _arraymethod('flatten') - repeat = _arraymethod('repeat') - squeeze = _arraymethod('squeeze') - swapaxes = _arraymethod('swapaxes') - T = property(fget=lambda self: self.transpose()) - transpose = _arraymethod('transpose') - - def tolist(self, fill_value=None): - """ - Return the data portion of the masked array as a hierarchical Python list. - - Data items are converted to the nearest compatible Python type. - Masked values are converted to `fill_value`. If `fill_value` is None, - the corresponding entries in the output list will be ``None``. - - Parameters - ---------- - fill_value : scalar, optional - The value to use for invalid entries. Default is None. - - Returns - ------- - result : list - The Python list representation of the masked array. - - Examples - -------- - >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) - >>> x.tolist() - [[1, None, 3], [None, 5, None], [7, None, 9]] - >>> x.tolist(-999) - [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] - - """ - _mask = self._mask - # No mask ? Just return .data.tolist ? - if _mask is nomask: - return self._data.tolist() - # Explicit fill_value: fill the array and get the list - if fill_value is not None: - return self.filled(fill_value).tolist() - # Structured array. - names = self.dtype.names - if names: - result = self._data.astype([(_, object) for _ in names]) - for n in names: - result[n][_mask[n]] = None - return result.tolist() - # Standard arrays. - if _mask is nomask: - return [None] - # Set temps to save time when dealing w/ marrays. - inishape = self.shape - result = np.array(self._data.ravel(), dtype=object) - result[_mask.ravel()] = None - result.shape = inishape - return result.tolist() - - def tostring(self, fill_value=None, order='C'): - """ - This function is a compatibility alias for tobytes. Despite its name it - returns bytes not strings. - """ - - return self.tobytes(fill_value, order=order) - - def tobytes(self, fill_value=None, order='C'): - """ - Return the array data as a string containing the raw bytes in the array. - - The array is filled with a fill value before the string conversion. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fill_value : scalar, optional - Value used to fill in the masked values. Default is None, in which - case `MaskedArray.fill_value` is used. - order : {'C','F','A'}, optional - Order of the data item in the copy. Default is 'C'. - - - 'C' -- C order (row major). - - 'F' -- Fortran order (column major). - - 'A' -- Any, current order of array. - - None -- Same as 'A'. - - See Also - -------- - numpy.ndarray.tobytes - tolist, tofile - - Notes - ----- - As for `ndarray.tobytes`, information about the shape, dtype, etc., - but also about `fill_value`, will be lost. - - Examples - -------- - >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.tobytes() - b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' - - """ - return self.filled(fill_value).tobytes(order=order) - - def tofile(self, fid, sep="", format="%s"): - """ - Save a masked array to a file in binary format. - - .. warning:: - This function is not implemented yet. - - Raises - ------ - NotImplementedError - When `tofile` is called. - - """ - raise NotImplementedError("MaskedArray.tofile() not implemented yet.") - - def toflex(self): - """ - Transforms a masked array into a flexible-type array. - - The flexible type array that is returned will have two fields: - - * the ``_data`` field stores the ``_data`` part of the array. - * the ``_mask`` field stores the ``_mask`` part of the array. - - Parameters - ---------- - None - - Returns - ------- - record : ndarray - A new flexible-type `ndarray` with two fields: the first element - containing a value, the second element containing the corresponding - mask boolean. The returned record shape matches self.shape. - - Notes - ----- - A side-effect of transforming a masked array into a flexible `ndarray` is - that meta information (``fill_value``, ...) will be lost. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> x - masked_array( - data=[[1, --, 3], - [--, 5, --], - [7, --, 9]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - >>> x.toflex() - array([[(1, False), (2, True), (3, False)], - [(4, True), (5, False), (6, True)], - [(7, False), (8, True), (9, False)]], - dtype=[('_data', 'i2", (2,))]) - # x = A[0]; y = x["A"]; then y.mask["A"].size==2 - # and we can not say masked/unmasked. - # The result is no longer mvoid! - # See also issue #6724. - return masked_array( - data=self._data[indx], mask=m[indx], - fill_value=self._fill_value[indx], - hard_mask=self._hardmask) - if m is not nomask and m[indx]: - return masked - return self._data[indx] - - def __setitem__(self, indx, value): - self._data[indx] = value - if self._hardmask: - self._mask[indx] |= getattr(value, "_mask", False) - else: - self._mask[indx] = getattr(value, "_mask", False) - - def __str__(self): - m = self._mask - if m is nomask: - return str(self._data) - - rdtype = _replace_dtype_fields(self._data.dtype, "O") - data_arr = super(mvoid, self)._data - res = data_arr.astype(rdtype) - _recursive_printoption(res, self._mask, masked_print_option) - return str(res) - - __repr__ = __str__ - - def __iter__(self): - "Defines an iterator for mvoid" - (_data, _mask) = (self._data, self._mask) - if _mask is nomask: - for d in _data: - yield d - else: - for (d, m) in zip(_data, _mask): - if m: - yield masked - else: - yield d - - def __len__(self): - return self._data.__len__() - - def filled(self, fill_value=None): - """ - Return a copy with masked fields filled with a given value. - - Parameters - ---------- - fill_value : array_like, optional - The value to use for invalid entries. Can be scalar or - non-scalar. If latter is the case, the filled array should - be broadcastable over input array. Default is None, in - which case the `fill_value` attribute is used instead. - - Returns - ------- - filled_void - A `np.void` object - - See Also - -------- - MaskedArray.filled - - """ - return asarray(self).filled(fill_value)[()] - - def tolist(self): - """ - Transforms the mvoid object into a tuple. - - Masked fields are replaced by None. - - Returns - ------- - returned_tuple - Tuple of fields - """ - _mask = self._mask - if _mask is nomask: - return self._data.tolist() - result = [] - for (d, m) in zip(self._data, self._mask): - if m: - result.append(None) - else: - # .item() makes sure we return a standard Python object - result.append(d.item()) - return tuple(result) - - -############################################################################## -# Shortcuts # -############################################################################## - - -def isMaskedArray(x): - """ - Test whether input is an instance of MaskedArray. - - This function returns True if `x` is an instance of MaskedArray - and returns False otherwise. Any object is accepted as input. - - Parameters - ---------- - x : object - Object to test. - - Returns - ------- - result : bool - True if `x` is a MaskedArray. - - See Also - -------- - isMA : Alias to isMaskedArray. - isarray : Alias to isMaskedArray. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.eye(3, 3) - >>> a - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> m = ma.masked_values(a, 0) - >>> m - masked_array( - data=[[1.0, --, --], - [--, 1.0, --], - [--, --, 1.0]], - mask=[[False, True, True], - [ True, False, True], - [ True, True, False]], - fill_value=0.0) - >>> ma.isMaskedArray(a) - False - >>> ma.isMaskedArray(m) - True - >>> ma.isMaskedArray([0, 1, 2]) - False - - """ - return isinstance(x, MaskedArray) - - -isarray = isMaskedArray -isMA = isMaskedArray # backward compatibility - - -class MaskedConstant(MaskedArray): - # the lone np.ma.masked instance - __singleton = None - - @classmethod - def __has_singleton(cls): - # second case ensures `cls.__singleton` is not just a view on the - # superclass singleton - return cls.__singleton is not None and type(cls.__singleton) is cls - - def __new__(cls): - if not cls.__has_singleton(): - # We define the masked singleton as a float for higher precedence. - # Note that it can be tricky sometimes w/ type comparison - data = np.array(0.) - mask = np.array(True) - - # prevent any modifications - data.flags.writeable = False - mask.flags.writeable = False - - # don't fall back on MaskedArray.__new__(MaskedConstant), since - # that might confuse it - this way, the construction is entirely - # within our control - cls.__singleton = MaskedArray(data, mask=mask).view(cls) - - return cls.__singleton - - def __array_finalize__(self, obj): - if not self.__has_singleton(): - # this handles the `.view` in __new__, which we want to copy across - # properties normally - return super(MaskedConstant, self).__array_finalize__(obj) - elif self is self.__singleton: - # not clear how this can happen, play it safe - pass - else: - # everywhere else, we want to downcast to MaskedArray, to prevent a - # duplicate maskedconstant. - self.__class__ = MaskedArray - MaskedArray.__array_finalize__(self, obj) - - def __array_prepare__(self, obj, context=None): - return self.view(MaskedArray).__array_prepare__(obj, context) - - def __array_wrap__(self, obj, context=None): - return self.view(MaskedArray).__array_wrap__(obj, context) - - def __str__(self): - return str(masked_print_option._display) - - if sys.version_info.major < 3: - def __unicode__(self): - return unicode(masked_print_option._display) - - def __repr__(self): - if self is MaskedConstant.__singleton: - return 'masked' - else: - # it's a subclass, or something is wrong, make it obvious - return object.__repr__(self) - - def __reduce__(self): - """Override of MaskedArray's __reduce__. - """ - return (self.__class__, ()) - - # inplace operations have no effect. We have to override them to avoid - # trying to modify the readonly data and mask arrays - def __iop__(self, other): - return self - __iadd__ = \ - __isub__ = \ - __imul__ = \ - __ifloordiv__ = \ - __itruediv__ = \ - __ipow__ = \ - __iop__ - del __iop__ # don't leave this around - - def copy(self, *args, **kwargs): - """ Copy is a no-op on the maskedconstant, as it is a scalar """ - # maskedconstant is a scalar, so copy doesn't need to copy. There's - # precedent for this with `np.bool_` scalars. - return self - - def __copy__(self): - return self - - def __deepcopy__(self, memo): - return self - - def __setattr__(self, attr, value): - if not self.__has_singleton(): - # allow the singleton to be initialized - return super(MaskedConstant, self).__setattr__(attr, value) - elif self is self.__singleton: - raise AttributeError( - "attributes of {!r} are not writeable".format(self)) - else: - # duplicate instance - we can end up here from __array_finalize__, - # where we set the __class__ attribute - return super(MaskedConstant, self).__setattr__(attr, value) - - -masked = masked_singleton = MaskedConstant() -masked_array = MaskedArray - - -def array(data, dtype=None, copy=False, order=None, - mask=nomask, fill_value=None, keep_mask=True, - hard_mask=False, shrink=True, subok=True, ndmin=0): - """ - Shortcut to MaskedArray. - - The options are in a different order for convenience and backwards - compatibility. - - """ - return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, - subok=subok, keep_mask=keep_mask, - hard_mask=hard_mask, fill_value=fill_value, - ndmin=ndmin, shrink=shrink, order=order) -array.__doc__ = masked_array.__doc__ - - -def is_masked(x): - """ - Determine whether input has masked values. - - Accepts any object as input, but always returns False unless the - input is a MaskedArray containing masked values. - - Parameters - ---------- - x : array_like - Array to check for masked values. - - Returns - ------- - result : bool - True if `x` is a MaskedArray with masked values, False otherwise. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) - >>> x - masked_array(data=[--, 1, --, 2, 3], - mask=[ True, False, True, False, False], - fill_value=0) - >>> ma.is_masked(x) - True - >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) - >>> x - masked_array(data=[0, 1, 0, 2, 3], - mask=False, - fill_value=42) - >>> ma.is_masked(x) - False - - Always returns False if `x` isn't a MaskedArray. - - >>> x = [False, True, False] - >>> ma.is_masked(x) - False - >>> x = 'a string' - >>> ma.is_masked(x) - False - - """ - m = getmask(x) - if m is nomask: - return False - elif m.any(): - return True - return False - - -############################################################################## -# Extrema functions # -############################################################################## - - -class _extrema_operation(_MaskedUFunc): - """ - Generic class for maximum/minimum functions. - - .. note:: - This is the base class for `_maximum_operation` and - `_minimum_operation`. - - """ - def __init__(self, ufunc, compare, fill_value): - super(_extrema_operation, self).__init__(ufunc) - self.compare = compare - self.fill_value_func = fill_value - - def __call__(self, a, b=None): - "Executes the call behavior." - if b is None: - # 2016-04-13, 1.13.0 - warnings.warn( - "Single-argument form of np.ma.{0} is deprecated. Use " - "np.ma.{0}.reduce instead.".format(self.__name__), - DeprecationWarning, stacklevel=2) - return self.reduce(a) - return where(self.compare(a, b), a, b) - - def reduce(self, target, axis=np._NoValue): - "Reduce target along the given axis." - target = narray(target, copy=False, subok=True) - m = getmask(target) - - if axis is np._NoValue and target.ndim > 1: - # 2017-05-06, Numpy 1.13.0: warn on axis default - warnings.warn( - "In the future the default for ma.{0}.reduce will be axis=0, " - "not the current None, to match np.{0}.reduce. " - "Explicitly pass 0 or None to silence this warning.".format( - self.__name__ - ), - MaskedArrayFutureWarning, stacklevel=2) - axis = None - - if axis is not np._NoValue: - kwargs = dict(axis=axis) - else: - kwargs = dict() - - if m is nomask: - t = self.f.reduce(target, **kwargs) - else: - target = target.filled( - self.fill_value_func(target)).view(type(target)) - t = self.f.reduce(target, **kwargs) - m = umath.logical_and.reduce(m, **kwargs) - if hasattr(t, '_mask'): - t._mask = m - elif m: - t = masked - return t - - def outer(self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - result = self.f.outer(filled(a), filled(b)) - if not isinstance(result, MaskedArray): - result = result.view(MaskedArray) - result._mask = m - return result - -def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - try: - return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) - except (AttributeError, TypeError): - # If obj doesn't have a min method, or if the method doesn't accept a - # fill_value argument - return asanyarray(obj).min(axis=axis, fill_value=fill_value, - out=out, **kwargs) -min.__doc__ = MaskedArray.min.__doc__ - -def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - - try: - return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) - except (AttributeError, TypeError): - # If obj doesn't have a max method, or if the method doesn't accept a - # fill_value argument - return asanyarray(obj).max(axis=axis, fill_value=fill_value, - out=out, **kwargs) -max.__doc__ = MaskedArray.max.__doc__ - - -def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): - kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} - try: - return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) - except (AttributeError, TypeError): - # If obj doesn't have a ptp method or if the method doesn't accept - # a fill_value argument - return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, - out=out, **kwargs) -ptp.__doc__ = MaskedArray.ptp.__doc__ - - -############################################################################## -# Definition of functions from the corresponding methods # -############################################################################## - - -class _frommethod(object): - """ - Define functions from existing MaskedArray methods. - - Parameters - ---------- - methodname : str - Name of the method to transform. - - """ - - def __init__(self, methodname, reversed=False): - self.__name__ = methodname - self.__doc__ = self.getdoc() - self.reversed = reversed - - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = """ %s\n%s""" % ( - signature, getattr(meth, '__doc__', None)) - return doc - - def __call__(self, a, *args, **params): - if self.reversed: - args = list(args) - a, args[0] = args[0], a - - marr = asanyarray(a) - method_name = self.__name__ - method = getattr(type(marr), method_name, None) - if method is None: - # use the corresponding np function - method = getattr(np, method_name) - - return method(marr, *args, **params) - - -all = _frommethod('all') -anomalies = anom = _frommethod('anom') -any = _frommethod('any') -compress = _frommethod('compress', reversed=True) -cumprod = _frommethod('cumprod') -cumsum = _frommethod('cumsum') -copy = _frommethod('copy') -diagonal = _frommethod('diagonal') -harden_mask = _frommethod('harden_mask') -ids = _frommethod('ids') -maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) -mean = _frommethod('mean') -minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) -nonzero = _frommethod('nonzero') -prod = _frommethod('prod') -product = _frommethod('prod') -ravel = _frommethod('ravel') -repeat = _frommethod('repeat') -shrink_mask = _frommethod('shrink_mask') -soften_mask = _frommethod('soften_mask') -std = _frommethod('std') -sum = _frommethod('sum') -swapaxes = _frommethod('swapaxes') -#take = _frommethod('take') -trace = _frommethod('trace') -var = _frommethod('var') - -count = _frommethod('count') - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - """ - a = masked_array(a) - return a.take(indices, axis=axis, out=out, mode=mode) - - -def power(a, b, third=None): - """ - Returns element-wise base array raised to power from second array. - - This is the masked array version of `numpy.power`. For details see - `numpy.power`. - - See Also - -------- - numpy.power - - Notes - ----- - The *out* argument to `numpy.power` is not supported, `third` has to be - None. - - """ - if third is not None: - raise MaskError("3-argument power not supported.") - # Get the masks - ma = getmask(a) - mb = getmask(b) - m = mask_or(ma, mb) - # Get the rawdata - fa = getdata(a) - fb = getdata(b) - # Get the type of the result (so that we preserve subclasses) - if isinstance(a, MaskedArray): - basetype = type(a) - else: - basetype = MaskedArray - # Get the result and view it as a (subclass of) MaskedArray - with np.errstate(divide='ignore', invalid='ignore'): - result = np.where(m, fa, umath.power(fa, fb)).view(basetype) - result._update_from(a) - # Find where we're in trouble w/ NaNs and Infs - invalid = np.logical_not(np.isfinite(result.view(ndarray))) - # Add the initial mask - if m is not nomask: - if not result.ndim: - return masked - result._mask = np.logical_or(m, invalid) - # Fix the invalid parts - if invalid.any(): - if not result.ndim: - return masked - elif result._mask is nomask: - result._mask = invalid - result._data[invalid] = result.fill_value - return result - -argmin = _frommethod('argmin') -argmax = _frommethod('argmax') - -def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None): - "Function version of the eponymous method." - a = np.asanyarray(a) - - # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default - if axis is np._NoValue: - axis = _deprecate_argsort_axis(a) - - if isinstance(a, MaskedArray): - return a.argsort(axis=axis, kind=kind, order=order, - endwith=endwith, fill_value=fill_value) - else: - return a.argsort(axis=axis, kind=kind, order=order) -argsort.__doc__ = MaskedArray.argsort.__doc__ - -def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None): - "Function version of the eponymous method." - a = np.array(a, copy=True, subok=True) - if axis is None: - a = a.flatten() - axis = 0 - - if isinstance(a, MaskedArray): - a.sort(axis=axis, kind=kind, order=order, - endwith=endwith, fill_value=fill_value) - else: - a.sort(axis=axis, kind=kind, order=order) - return a -sort.__doc__ = MaskedArray.sort.__doc__ - - -def compressed(x): - """ - Return all the non-masked data as a 1-D array. - - This function is equivalent to calling the "compressed" method of a - `MaskedArray`, see `MaskedArray.compressed` for details. - - See Also - -------- - MaskedArray.compressed - Equivalent method. - - """ - return asanyarray(x).compressed() - - -def concatenate(arrays, axis=0): - """ - Concatenate a sequence of arrays along the given axis. - - Parameters - ---------- - arrays : sequence of array_like - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - The axis along which the arrays will be joined. Default is 0. - - Returns - ------- - result : MaskedArray - The concatenated array with any masked entries preserved. - - See Also - -------- - numpy.concatenate : Equivalent function in the top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.arange(3) - >>> a[1] = ma.masked - >>> b = ma.arange(2, 5) - >>> a - masked_array(data=[0, --, 2], - mask=[False, True, False], - fill_value=999999) - >>> b - masked_array(data=[2, 3, 4], - mask=False, - fill_value=999999) - >>> ma.concatenate([a, b]) - masked_array(data=[0, --, 2, 2, 3, 4], - mask=[False, True, False, False, False, False], - fill_value=999999) - - """ - d = np.concatenate([getdata(a) for a in arrays], axis) - rcls = get_masked_subclass(*arrays) - data = d.view(rcls) - # Check whether one of the arrays has a non-empty mask. - for x in arrays: - if getmask(x) is not nomask: - break - else: - return data - # OK, so we have to concatenate the masks - dm = np.concatenate([getmaskarray(a) for a in arrays], axis) - dm = dm.reshape(d.shape) - - # If we decide to keep a '_shrinkmask' option, we want to check that - # all of them are True, and then check for dm.any() - data._mask = _shrink_mask(dm) - return data - - -def diag(v, k=0): - """ - Extract a diagonal or construct a diagonal array. - - This function is the equivalent of `numpy.diag` that takes masked - values into account, see `numpy.diag` for details. - - See Also - -------- - numpy.diag : Equivalent function for ndarrays. - - """ - output = np.diag(v, k).view(MaskedArray) - if getmask(v) is not nomask: - output._mask = np.diag(v._mask, k) - return output - - -def left_shift(a, n): - """ - Shift the bits of an integer to the left. - - This is the masked array version of `numpy.left_shift`, for details - see that function. - - See Also - -------- - numpy.left_shift - - """ - m = getmask(a) - if m is nomask: - d = umath.left_shift(filled(a), n) - return masked_array(d) - else: - d = umath.left_shift(filled(a, 0), n) - return masked_array(d, mask=m) - - -def right_shift(a, n): - """ - Shift the bits of an integer to the right. - - This is the masked array version of `numpy.right_shift`, for details - see that function. - - See Also - -------- - numpy.right_shift - - """ - m = getmask(a) - if m is nomask: - d = umath.right_shift(filled(a), n) - return masked_array(d) - else: - d = umath.right_shift(filled(a, 0), n) - return masked_array(d, mask=m) - - -def put(a, indices, values, mode='raise'): - """ - Set storage-indexed locations to corresponding values. - - This function is equivalent to `MaskedArray.put`, see that method - for details. - - See Also - -------- - MaskedArray.put - - """ - # We can't use 'frommethod', the order of arguments is different - try: - return a.put(indices, values, mode=mode) - except AttributeError: - return narray(a, copy=False).put(indices, values, mode=mode) - - -def putmask(a, mask, values): # , mode='raise'): - """ - Changes elements of an array based on conditional and input values. - - This is the masked array version of `numpy.putmask`, for details see - `numpy.putmask`. - - See Also - -------- - numpy.putmask - - Notes - ----- - Using a masked array as `values` will **not** transform a `ndarray` into - a `MaskedArray`. - - """ - # We can't use 'frommethod', the order of arguments is different - if not isinstance(a, MaskedArray): - a = a.view(MaskedArray) - (valdata, valmask) = (getdata(values), getmask(values)) - if getmask(a) is nomask: - if valmask is not nomask: - a._sharedmask = True - a._mask = make_mask_none(a.shape, a.dtype) - np.copyto(a._mask, valmask, where=mask) - elif a._hardmask: - if valmask is not nomask: - m = a._mask.copy() - np.copyto(m, valmask, where=mask) - a.mask |= m - else: - if valmask is nomask: - valmask = getmaskarray(values) - np.copyto(a._mask, valmask, where=mask) - np.copyto(a._data, valdata, where=mask) - return - - -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - This function is exactly equivalent to `numpy.transpose`. - - See Also - -------- - numpy.transpose : Equivalent function in top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.arange(4).reshape((2,2)) - >>> x[1, 1] = ma.masked - >>> x - masked_array( - data=[[0, 1], - [2, --]], - mask=[[False, False], - [False, True]], - fill_value=999999) - - >>> ma.transpose(x) - masked_array( - data=[[0, 2], - [1, --]], - mask=[[False, False], - [False, True]], - fill_value=999999) - """ - # We can't use 'frommethod', as 'transpose' doesn't take keywords - try: - return a.transpose(axes) - except AttributeError: - return narray(a, copy=False).transpose(axes).view(MaskedArray) - - -def reshape(a, new_shape, order='C'): - """ - Returns an array containing the same data with a new shape. - - Refer to `MaskedArray.reshape` for full documentation. - - See Also - -------- - MaskedArray.reshape : equivalent function - - """ - # We can't use 'frommethod', it whine about some parameters. Dmmit. - try: - return a.reshape(new_shape, order=order) - except AttributeError: - _tmp = narray(a, copy=False).reshape(new_shape, order=order) - return _tmp.view(MaskedArray) - - -def resize(x, new_shape): - """ - Return a new masked array with the specified size and shape. - - This is the masked equivalent of the `numpy.resize` function. The new - array is filled with repeated copies of `x` (in the order that the - data are stored in memory). If `x` is masked, the new array will be - masked, and the new mask will be a repetition of the old one. - - See Also - -------- - numpy.resize : Equivalent function in the top level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.array([[1, 2] ,[3, 4]]) - >>> a[0, 1] = ma.masked - >>> a - masked_array( - data=[[1, --], - [3, 4]], - mask=[[False, True], - [False, False]], - fill_value=999999) - >>> np.resize(a, (3, 3)) - masked_array( - data=[[1, 2, 3], - [4, 1, 2], - [3, 4, 1]], - mask=False, - fill_value=999999) - >>> ma.resize(a, (3, 3)) - masked_array( - data=[[1, --, 3], - [4, 1, --], - [3, 4, 1]], - mask=[[False, True, False], - [False, False, True], - [False, False, False]], - fill_value=999999) - - A MaskedArray is always returned, regardless of the input type. - - >>> a = np.array([[1, 2] ,[3, 4]]) - >>> ma.resize(a, (3, 3)) - masked_array( - data=[[1, 2, 3], - [4, 1, 2], - [3, 4, 1]], - mask=False, - fill_value=999999) - - """ - # We can't use _frommethods here, as N.resize is notoriously whiny. - m = getmask(x) - if m is not nomask: - m = np.resize(m, new_shape) - result = np.resize(x, new_shape).view(get_masked_subclass(x)) - if result.ndim: - result._mask = m - return result - - -def ndim(obj): - """ - maskedarray version of the numpy function. - - """ - return np.ndim(getdata(obj)) - -ndim.__doc__ = np.ndim.__doc__ - - -def shape(obj): - "maskedarray version of the numpy function." - return np.shape(getdata(obj)) -shape.__doc__ = np.shape.__doc__ - - -def size(obj, axis=None): - "maskedarray version of the numpy function." - return np.size(getdata(obj), axis) -size.__doc__ = np.size.__doc__ - - -############################################################################## -# Extra functions # -############################################################################## - - -def where(condition, x=_NoValue, y=_NoValue): - """ - Return a masked array with elements from `x` or `y`, depending on condition. - - .. note:: - When only `condition` is provided, this function is identical to - `nonzero`. The rest of this documentation covers only the case where - all three arguments are provided. - - Parameters - ---------- - condition : array_like, bool - Where True, yield `x`, otherwise yield `y`. - x, y : array_like, optional - Values from which to choose. `x`, `y` and `condition` need to be - broadcastable to some shape. - - Returns - ------- - out : MaskedArray - An masked array with `masked` elements where the condition is masked, - elements from `x` where `condition` is True, and elements from `y` - elsewhere. - - See Also - -------- - numpy.where : Equivalent function in the top-level NumPy module. - nonzero : The function that is called when x and y are omitted - - Examples - -------- - >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], - ... [1, 0, 1], - ... [0, 1, 0]]) - >>> x - masked_array( - data=[[0.0, --, 2.0], - [--, 4.0, --], - [6.0, --, 8.0]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=1e+20) - >>> np.ma.where(x > 5, x, -3.1416) - masked_array( - data=[[-3.1416, --, -3.1416], - [--, -3.1416, --], - [6.0, --, 8.0]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=1e+20) - - """ - - # handle the single-argument case - missing = (x is _NoValue, y is _NoValue).count(True) - if missing == 1: - raise ValueError("Must provide both 'x' and 'y' or neither.") - if missing == 2: - return nonzero(condition) - - # we only care if the condition is true - false or masked pick y - cf = filled(condition, False) - xd = getdata(x) - yd = getdata(y) - - # we need the full arrays here for correct final dimensions - cm = getmaskarray(condition) - xm = getmaskarray(x) - ym = getmaskarray(y) - - # deal with the fact that masked.dtype == float64, but we don't actually - # want to treat it as that. - if x is masked and y is not masked: - xd = np.zeros((), dtype=yd.dtype) - xm = np.ones((), dtype=ym.dtype) - elif y is masked and x is not masked: - yd = np.zeros((), dtype=xd.dtype) - ym = np.ones((), dtype=xm.dtype) - - data = np.where(cf, xd, yd) - mask = np.where(cf, xm, ym) - mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) - - # collapse the mask, for backwards compatibility - mask = _shrink_mask(mask) - - return masked_array(data, mask=mask) - - -def choose(indices, choices, out=None, mode='raise'): - """ - Use an index array to construct a new array from a set of choices. - - Given an array of integers and a set of n choice arrays, this method - will create a new array that merges each of the choice arrays. Where a - value in `a` is i, the new array will have the value that choices[i] - contains in the same place. - - Parameters - ---------- - a : ndarray of ints - This array must contain integers in ``[0, n-1]``, where n is the - number of choices. - choices : sequence of arrays - Choice arrays. The index array and all of the choices should be - broadcastable to the same shape. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and `dtype`. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' : raise an error - * 'wrap' : wrap around - * 'clip' : clip to the range - - Returns - ------- - merged_array : array - - See Also - -------- - choose : equivalent function - - Examples - -------- - >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) - >>> a = np.array([2, 1, 0]) - >>> np.ma.choose(a, choice) - masked_array(data=[3, 2, 1], - mask=False, - fill_value=999999) - - """ - def fmask(x): - "Returns the filled array, or True if masked." - if x is masked: - return True - return filled(x) - - def nmask(x): - "Returns the mask, True if ``masked``, False if ``nomask``." - if x is masked: - return True - return getmask(x) - # Get the indices. - c = filled(indices, 0) - # Get the masks. - masks = [nmask(x) for x in choices] - data = [fmask(x) for x in choices] - # Construct the mask - outputmask = np.choose(c, masks, mode=mode) - outputmask = make_mask(mask_or(outputmask, getmask(indices)), - copy=False, shrink=True) - # Get the choices. - d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(outputmask) - return out - d.__setmask__(outputmask) - return d - - -def round_(a, decimals=0, out=None): - """ - Return a copy of a, rounded to 'decimals' places. - - When 'decimals' is negative, it specifies the number of positions - to the left of the decimal point. The real and imaginary parts of - complex numbers are rounded separately. Nothing is done if the - array is not of float type and 'decimals' is greater than or equal - to 0. - - Parameters - ---------- - decimals : int - Number of decimals to round to. May be negative. - out : array_like - Existing array to use for output. - If not given, returns a default copy of a. - - Notes - ----- - If out is given and does not have a mask attribute, the mask of a - is lost! - - """ - if out is None: - return np.round_(a, decimals, out) - else: - np.round_(getdata(a), decimals, out) - if hasattr(out, '_mask'): - out._mask = getmask(a) - return out -round = round_ - - -# Needed by dot, so move here from extras.py. It will still be exported -# from extras.py for compatibility. -def mask_rowcols(a, axis=None): - """ - Mask rows and/or columns of a 2D array that contain masked values. - - Mask whole rows and/or columns of a 2D array that contain - masked values. The masking behavior is selected using the - `axis` parameter. - - - If `axis` is None, rows *and* columns are masked. - - If `axis` is 0, only rows are masked. - - If `axis` is 1 or -1, only columns are masked. - - Parameters - ---------- - a : array_like, MaskedArray - The array to mask. If not a MaskedArray instance (or if no array - elements are masked). The result is a MaskedArray with `mask` set - to `nomask` (False). Must be a 2D array. - axis : int, optional - Axis along which to perform the operation. If None, applies to a - flattened version of the array. - - Returns - ------- - a : MaskedArray - A modified version of the input array, masked depending on the value - of the `axis` parameter. - - Raises - ------ - NotImplementedError - If input array `a` is not 2D. - - See Also - -------- - mask_rows : Mask rows of a 2D array that contain masked values. - mask_cols : Mask cols of a 2D array that contain masked values. - masked_where : Mask where a condition is met. - - Notes - ----- - The input array's mask is modified by this function. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array( - data=[[0, 0, 0], - [0, --, 0], - [0, 0, 0]], - mask=[[False, False, False], - [False, True, False], - [False, False, False]], - fill_value=1) - >>> ma.mask_rowcols(a) - masked_array( - data=[[0, --, 0], - [--, --, --], - [0, --, 0]], - mask=[[False, True, False], - [ True, True, True], - [False, True, False]], - fill_value=1) - - """ - a = array(a, subok=False) - if a.ndim != 2: - raise NotImplementedError("mask_rowcols works for 2D arrays only.") - m = getmask(a) - # Nothing is masked: return a - if m is nomask or not m.any(): - return a - maskedval = m.nonzero() - a._mask = a._mask.copy() - if not axis: - a[np.unique(maskedval[0])] = masked - if axis in [None, 1, -1]: - a[:, np.unique(maskedval[1])] = masked - return a - - -# Include masked dot here to avoid import problems in getting it from -# extras.py. Note that it is not included in __all__, but rather exported -# from extras in order to avoid backward compatibility problems. -def dot(a, b, strict=False, out=None): - """ - Return the dot product of two arrays. - - This function is the equivalent of `numpy.dot` that takes masked values - into account. Note that `strict` and `out` are in different position - than in the method version. In order to maintain compatibility with the - corresponding method, it is recommended that the optional arguments be - treated as keyword only. At some point that may be mandatory. - - .. note:: - Works only with 2-D arrays at the moment. - - - Parameters - ---------- - a, b : masked_array_like - Inputs arrays. - strict : bool, optional - Whether masked data are propagated (True) or set to 0 (False) for - the computation. Default is False. Propagating the mask means that - if a masked value appears in a row or column, the whole row or - column is considered masked. - out : masked_array, optional - Output argument. This must have the exact kind that would be returned - if it was not used. In particular, it must have the right type, must be - C-contiguous, and its dtype must be the dtype that would be returned - for `dot(a,b)`. This is a performance feature. Therefore, if these - conditions are not met, an exception is raised, instead of attempting - to be flexible. - - .. versionadded:: 1.10.2 - - See Also - -------- - numpy.dot : Equivalent function for ndarrays. - - Examples - -------- - >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) - >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) - >>> np.ma.dot(a, b) - masked_array( - data=[[21, 26], - [45, 64]], - mask=[[False, False], - [False, False]], - fill_value=999999) - >>> np.ma.dot(a, b, strict=True) - masked_array( - data=[[--, --], - [--, 64]], - mask=[[ True, True], - [ True, False]], - fill_value=999999) - - """ - # !!!: Works only with 2D arrays. There should be a way to get it to run - # with higher dimension - if strict and (a.ndim == 2) and (b.ndim == 2): - a = mask_rowcols(a, 0) - b = mask_rowcols(b, 1) - am = ~getmaskarray(a) - bm = ~getmaskarray(b) - - if out is None: - d = np.dot(filled(a, 0), filled(b, 0)) - m = ~np.dot(am, bm) - if d.ndim == 0: - d = np.asarray(d) - r = d.view(get_masked_subclass(a, b)) - r.__setmask__(m) - return r - else: - d = np.dot(filled(a, 0), filled(b, 0), out._data) - if out.mask.shape != d.shape: - out._mask = np.empty(d.shape, MaskType) - np.dot(am, bm, out._mask) - np.logical_not(out._mask, out._mask) - return out - - -def inner(a, b): - """ - Returns the inner product of a and b for arrays of floating point types. - - Like the generic NumPy equivalent the product sum is over the last dimension - of a and b. The first argument is not conjugated. - - """ - fa = filled(a, 0) - fb = filled(b, 0) - if fa.ndim == 0: - fa.shape = (1,) - if fb.ndim == 0: - fb.shape = (1,) - return np.inner(fa, fb).view(MaskedArray) -inner.__doc__ = doc_note(np.inner.__doc__, - "Masked values are replaced by 0.") -innerproduct = inner - - -def outer(a, b): - "maskedarray version of the numpy function." - fa = filled(a, 0).ravel() - fb = filled(b, 0).ravel() - d = np.outer(fa, fb) - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - return masked_array(d) - ma = getmaskarray(a) - mb = getmaskarray(b) - m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) - return masked_array(d, mask=m) -outer.__doc__ = doc_note(np.outer.__doc__, - "Masked values are replaced by 0.") -outerproduct = outer - - -def _convolve_or_correlate(f, a, v, mode, propagate_mask): - """ - Helper function for ma.correlate and ma.convolve - """ - if propagate_mask: - # results which are contributed to by either item in any pair being invalid - mask = ( - f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) - | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) - ) - data = f(getdata(a), getdata(v), mode=mode) - else: - # results which are not contributed to by any pair of valid elements - mask = ~f(~getmaskarray(a), ~getmaskarray(v)) - data = f(filled(a, 0), filled(v, 0), mode=mode) - - return masked_array(data, mask=mask) - - -def correlate(a, v, mode='valid', propagate_mask=True): - """ - Cross-correlation of two 1-dimensional sequences. - - Parameters - ---------- - a, v : array_like - Input sequences. - mode : {'valid', 'same', 'full'}, optional - Refer to the `np.convolve` docstring. Note that the default - is 'valid', unlike `convolve`, which uses 'full'. - propagate_mask : bool - If True, then a result element is masked if any masked element contributes towards it. - If False, then a result element is only masked if no non-masked element - contribute towards it - - Returns - ------- - out : MaskedArray - Discrete cross-correlation of `a` and `v`. - - See Also - -------- - numpy.correlate : Equivalent function in the top-level NumPy module. - """ - return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) - - -def convolve(a, v, mode='full', propagate_mask=True): - """ - Returns the discrete, linear convolution of two one-dimensional sequences. - - Parameters - ---------- - a, v : array_like - Input sequences. - mode : {'valid', 'same', 'full'}, optional - Refer to the `np.convolve` docstring. - propagate_mask : bool - If True, then if any masked element is included in the sum for a result - element, then the result is masked. - If False, then the result element is only masked if no non-masked cells - contribute towards it - - Returns - ------- - out : MaskedArray - Discrete, linear convolution of `a` and `v`. - - See Also - -------- - numpy.convolve : Equivalent function in the top-level NumPy module. - """ - return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) - - -def allequal(a, b, fill_value=True): - """ - Return True if all entries of a and b are equal, using - fill_value as a truth value where either or both are masked. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - fill_value : bool, optional - Whether masked values in a or b are considered equal (True) or not - (False). - - Returns - ------- - y : bool - Returns True if the two arrays are equal within the given - tolerance, False otherwise. If either array contains NaN, - then False is returned. - - See Also - -------- - all, any - numpy.ma.allclose - - Examples - -------- - >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) - >>> a - masked_array(data=[10000000000.0, 1e-07, --], - mask=[False, False, True], - fill_value=1e+20) - - >>> b = np.array([1e10, 1e-7, -42.0]) - >>> b - array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) - >>> np.ma.allequal(a, b, fill_value=False) - False - >>> np.ma.allequal(a, b) - True - - """ - m = mask_or(getmask(a), getmask(b)) - if m is nomask: - x = getdata(a) - y = getdata(b) - d = umath.equal(x, y) - return d.all() - elif fill_value: - x = getdata(a) - y = getdata(b) - d = umath.equal(x, y) - dm = array(d, mask=m, copy=False) - return dm.filled(True).all(None) - else: - return False - - -def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): - """ - Returns True if two arrays are element-wise equal within a tolerance. - - This function is equivalent to `allclose` except that masked values - are treated as equal (default) or unequal, depending on the `masked_equal` - argument. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - masked_equal : bool, optional - Whether masked values in `a` and `b` are considered equal (True) or not - (False). They are considered equal by default. - rtol : float, optional - Relative tolerance. The relative difference is equal to ``rtol * b``. - Default is 1e-5. - atol : float, optional - Absolute tolerance. The absolute difference is equal to `atol`. - Default is 1e-8. - - Returns - ------- - y : bool - Returns True if the two arrays are equal within the given - tolerance, False otherwise. If either array contains NaN, then - False is returned. - - See Also - -------- - all, any - numpy.allclose : the non-masked `allclose`. - - Notes - ----- - If the following equation is element-wise True, then `allclose` returns - True:: - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - Return True if all elements of `a` and `b` are equal subject to - given tolerances. - - Examples - -------- - >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) - >>> a - masked_array(data=[10000000000.0, 1e-07, --], - mask=[False, False, True], - fill_value=1e+20) - >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) - >>> np.ma.allclose(a, b) - False - - >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) - >>> np.ma.allclose(a, b) - True - >>> np.ma.allclose(a, b, masked_equal=False) - False - - Masked values are not compared directly. - - >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) - >>> np.ma.allclose(a, b) - True - >>> np.ma.allclose(a, b, masked_equal=False) - False - - """ - x = masked_array(a, copy=False) - y = masked_array(b, copy=False) - - # make sure y is an inexact type to avoid abs(MIN_INT); will cause - # casting of x later. - dtype = np.result_type(y, 1.) - if y.dtype != dtype: - y = masked_array(y, dtype=dtype, copy=False) - - m = mask_or(getmask(x), getmask(y)) - xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) - # If we have some infs, they should fall at the same place. - if not np.all(xinf == filled(np.isinf(y), False)): - return False - # No infs at all - if not np.any(xinf): - d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), - masked_equal) - return np.all(d) - - if not np.all(filled(x[xinf] == y[xinf], masked_equal)): - return False - x = x[~xinf] - y = y[~xinf] - - d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), - masked_equal) - - return np.all(d) - - -def asarray(a, dtype=None, order=None): - """ - Convert the input to a masked array of the given data-type. - - No copy is performed if the input is already an `ndarray`. If `a` is - a subclass of `MaskedArray`, a base class `MaskedArray` is returned. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to a masked array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists, ndarrays and masked arrays. - dtype : dtype, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. - - Returns - ------- - out : MaskedArray - Masked array interpretation of `a`. - - See Also - -------- - asanyarray : Similar to `asarray`, but conserves subclasses. - - Examples - -------- - >>> x = np.arange(10.).reshape(2, 5) - >>> x - array([[0., 1., 2., 3., 4.], - [5., 6., 7., 8., 9.]]) - >>> np.ma.asarray(x) - masked_array( - data=[[0., 1., 2., 3., 4.], - [5., 6., 7., 8., 9.]], - mask=False, - fill_value=1e+20) - >>> type(np.ma.asarray(x)) - - - """ - order = order or 'C' - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, - subok=False, order=order) - - -def asanyarray(a, dtype=None): - """ - Convert the input to a masked array, conserving subclasses. - - If `a` is a subclass of `MaskedArray`, its class is conserved. - No copy is performed if the input is already an `ndarray`. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. - dtype : dtype, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. - - Returns - ------- - out : MaskedArray - MaskedArray interpretation of `a`. - - See Also - -------- - asarray : Similar to `asanyarray`, but does not conserve subclass. - - Examples - -------- - >>> x = np.arange(10.).reshape(2, 5) - >>> x - array([[0., 1., 2., 3., 4.], - [5., 6., 7., 8., 9.]]) - >>> np.ma.asanyarray(x) - masked_array( - data=[[0., 1., 2., 3., 4.], - [5., 6., 7., 8., 9.]], - mask=False, - fill_value=1e+20) - >>> type(np.ma.asanyarray(x)) - - - """ - # workaround for #8666, to preserve identity. Ideally the bottom line - # would handle this for us. - if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): - return a - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) - - -############################################################################## -# Pickling # -############################################################################## - -def _pickle_warn(method): - # NumPy 1.15.0, 2017-12-10 - warnings.warn( - "np.ma.{method} is deprecated, use pickle.{method} instead" - .format(method=method), - DeprecationWarning, - stacklevel=3) - - -def fromfile(file, dtype=float, count=-1, sep=''): - raise NotImplementedError( - "fromfile() not yet implemented for a MaskedArray.") - - -def fromflex(fxarray): - """ - Build a masked array from a suitable flexible-type array. - - The input array has to have a data-type with ``_data`` and ``_mask`` - fields. This type of array is output by `MaskedArray.toflex`. - - Parameters - ---------- - fxarray : ndarray - The structured input array, containing ``_data`` and ``_mask`` - fields. If present, other fields are discarded. - - Returns - ------- - result : MaskedArray - The constructed masked array. - - See Also - -------- - MaskedArray.toflex : Build a flexible-type array from a masked array. - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) - >>> rec = x.toflex() - >>> rec - array([[(0, False), (1, True), (2, False)], - [(3, True), (4, False), (5, True)], - [(6, False), (7, True), (8, False)]], - dtype=[('_data', '>> x2 = np.ma.fromflex(rec) - >>> x2 - masked_array( - data=[[0, --, 2], - [--, 4, --], - [6, --, 8]], - mask=[[False, True, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - - Extra fields can be present in the structured array but are discarded: - - >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) - >>> rec2 - array([[(0, False, 0.), (0, False, 0.)], - [(0, False, 0.), (0, False, 0.)]], - dtype=[('_data', '>> y = np.ma.fromflex(rec2) - >>> y - masked_array( - data=[[0, 0], - [0, 0]], - mask=[[False, False], - [False, False]], - fill_value=999999, - dtype=int32) - - """ - return masked_array(fxarray['_data'], mask=fxarray['_mask']) - - -class _convert2ma(object): - - """ - Convert functions from numpy to numpy.ma. - - Parameters - ---------- - _methodname : string - Name of the method to transform. - - """ - __doc__ = None - - def __init__(self, funcname, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc() - self._extras = params or {} - - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - # Add the signature of the function at the beginning of the doc - if sig: - sig = "%s%s\n" % (self._func.__name__, sig) - doc = sig + doc - return doc - - def __call__(self, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(*args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result - -arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) -clip = np.clip -diff = np.diff -empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) -empty_like = _convert2ma('empty_like') -frombuffer = _convert2ma('frombuffer') -fromfunction = _convert2ma('fromfunction') -identity = _convert2ma( - 'identity', params=dict(fill_value=None, hardmask=False)) -indices = np.indices -ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) -ones_like = np.ones_like -squeeze = np.squeeze -zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) -zeros_like = np.zeros_like - - -def append(a, b, axis=None): - """Append values to the end of an array. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - a : array_like - Values are appended to a copy of this array. - b : array_like - These values are appended to a copy of `a`. It must be of the - correct shape (the same shape as `a`, excluding `axis`). If `axis` - is not specified, `b` can be any shape and will be flattened - before use. - axis : int, optional - The axis along which `v` are appended. If `axis` is not given, - both `a` and `b` are flattened before use. - - Returns - ------- - append : MaskedArray - A copy of `a` with `b` appended to `axis`. Note that `append` - does not occur in-place: a new array is allocated and filled. If - `axis` is None, the result is a flattened array. - - See Also - -------- - numpy.append : Equivalent function in the top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_values([1, 2, 3], 2) - >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) - >>> ma.append(a, b) - masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], - mask=[False, True, False, False, False, False, True, False, - False], - fill_value=999999) - """ - return concatenate([a, b], axis) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/extras.py b/venv/lib/python3.7/site-packages/numpy/ma/extras.py deleted file mode 100644 index f4a9144..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/extras.py +++ /dev/null @@ -1,1930 +0,0 @@ -""" -Masked arrays add-ons. - -A collection of utilities for `numpy.ma`. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -__all__ = [ - 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', - 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', - 'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols', - 'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', - 'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', - 'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', - 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_', - 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', - 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', - ] - -import itertools -import warnings - -from . import core as ma -from .core import ( - MaskedArray, MAError, add, array, asarray, concatenate, filled, count, - getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, - nomask, ones, sort, zeros, getdata, get_masked_subclass, dot, - mask_rowcols - ) - -import numpy as np -from numpy import ndarray, array as nxarray -import numpy.core.umath as umath -from numpy.core.multiarray import normalize_axis_index -from numpy.core.numeric import normalize_axis_tuple -from numpy.lib.function_base import _ureduce -from numpy.lib.index_tricks import AxisConcatenator - - -def issequence(seq): - """ - Is seq a sequence (ndarray, list or tuple)? - - """ - return isinstance(seq, (ndarray, tuple, list)) - - -def count_masked(arr, axis=None): - """ - Count the number of masked elements along the given axis. - - Parameters - ---------- - arr : array_like - An array with (possibly) masked elements. - axis : int, optional - Axis along which to count. If None (default), a flattened - version of the array is used. - - Returns - ------- - count : int, ndarray - The total number of masked elements (axis=None) or the number - of masked elements along each slice of the given axis. - - See Also - -------- - MaskedArray.count : Count non-masked elements. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(9).reshape((3,3)) - >>> a = ma.array(a) - >>> a[1, 0] = ma.masked - >>> a[1, 2] = ma.masked - >>> a[2, 1] = ma.masked - >>> a - masked_array( - data=[[0, 1, 2], - [--, 4, --], - [6, --, 8]], - mask=[[False, False, False], - [ True, False, True], - [False, True, False]], - fill_value=999999) - >>> ma.count_masked(a) - 3 - - When the `axis` keyword is used an array is returned. - - >>> ma.count_masked(a, axis=0) - array([1, 1, 1]) - >>> ma.count_masked(a, axis=1) - array([0, 2, 1]) - - """ - m = getmaskarray(arr) - return m.sum(axis) - - -def masked_all(shape, dtype=float): - """ - Empty masked array with all elements masked. - - Return an empty masked array of the given shape and dtype, where all the - data are masked. - - Parameters - ---------- - shape : tuple - Shape of the required MaskedArray. - dtype : dtype, optional - Data type of the output. - - Returns - ------- - a : MaskedArray - A masked array with all data masked. - - See Also - -------- - masked_all_like : Empty masked array modelled on an existing array. - - Examples - -------- - >>> import numpy.ma as ma - >>> ma.masked_all((3, 3)) - masked_array( - data=[[--, --, --], - [--, --, --], - [--, --, --]], - mask=[[ True, True, True], - [ True, True, True], - [ True, True, True]], - fill_value=1e+20, - dtype=float64) - - The `dtype` parameter defines the underlying data type. - - >>> a = ma.masked_all((3, 3)) - >>> a.dtype - dtype('float64') - >>> a = ma.masked_all((3, 3), dtype=np.int32) - >>> a.dtype - dtype('int32') - - """ - a = masked_array(np.empty(shape, dtype), - mask=np.ones(shape, make_mask_descr(dtype))) - return a - - -def masked_all_like(arr): - """ - Empty masked array with the properties of an existing array. - - Return an empty masked array of the same shape and dtype as - the array `arr`, where all the data are masked. - - Parameters - ---------- - arr : ndarray - An array describing the shape and dtype of the required MaskedArray. - - Returns - ------- - a : MaskedArray - A masked array with all data masked. - - Raises - ------ - AttributeError - If `arr` doesn't have a shape attribute (i.e. not an ndarray) - - See Also - -------- - masked_all : Empty masked array with all elements masked. - - Examples - -------- - >>> import numpy.ma as ma - >>> arr = np.zeros((2, 3), dtype=np.float32) - >>> arr - array([[0., 0., 0.], - [0., 0., 0.]], dtype=float32) - >>> ma.masked_all_like(arr) - masked_array( - data=[[--, --, --], - [--, --, --]], - mask=[[ True, True, True], - [ True, True, True]], - fill_value=1e+20, - dtype=float32) - - The dtype of the masked array matches the dtype of `arr`. - - >>> arr.dtype - dtype('float32') - >>> ma.masked_all_like(arr).dtype - dtype('float32') - - """ - a = np.empty_like(arr).view(MaskedArray) - a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) - return a - - -#####-------------------------------------------------------------------------- -#---- --- Standard functions --- -#####-------------------------------------------------------------------------- -class _fromnxfunction(object): - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - - This class should not be used directly. Instead, one of its extensions that - provides support for a specific type of input should be used. - - Parameters - ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). - - """ - - def __init__(self, funcname): - self.__name__ = funcname - self.__doc__ = self.getdoc() - - def getdoc(self): - """ - Retrieve the docstring and signature from the function. - - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. - - .. warning:: - If the function docstring already contained a Notes section, the - new docstring will have two Notes sections instead of appending a note - to the existing section. - - Parameters - ---------- - None - - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = self.__name__ + ma.get_object_signature(npfunc) - locdoc = "Notes\n-----\nThe function is applied to both the _data"\ - " and the _mask, if any." - return '\n'.join((sig, doc, locdoc)) - return - - def __call__(self, *args, **params): - pass - - -class _fromnxfunction_single(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with a single array - argument followed by auxiliary args that are passed verbatim for - both the data and mask calls. - """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - if isinstance(x, ndarray): - _d = func(x.__array__(), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - else: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_seq(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with a single sequence - of arrays followed by auxiliary args that are passed verbatim for - both the data and mask calls. - """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - _d = func(tuple([np.asarray(a) for a in x]), *args, **params) - _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_args(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. The first non-array-like input marks the beginning of the - arguments that are passed verbatim for both the data and mask calls. - Array arguments are processed independently and the results are - returned in a list. If only one array is found, the return value is - just the processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - if len(arrays) == 1: - return res[0] - return res - - -class _fromnxfunction_allargs(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. Similar to `_fromnxfunction_args` except that all args - are converted to arrays even if they are not so already. This makes - it possible to process scalars as 1-D arrays. Only keyword arguments - are passed through verbatim for the data and mask calls. Arrays - arguments are processed independently and the results are returned - in a list. If only one arg is present, the return value is just the - processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - res = [] - for x in args: - _d = func(np.asarray(x), **params) - _m = func(getmaskarray(x), **params) - res.append(masked_array(_d, mask=_m)) - if len(args) == 1: - return res[0] - return res - - -atleast_1d = _fromnxfunction_allargs('atleast_1d') -atleast_2d = _fromnxfunction_allargs('atleast_2d') -atleast_3d = _fromnxfunction_allargs('atleast_3d') - -vstack = row_stack = _fromnxfunction_seq('vstack') -hstack = _fromnxfunction_seq('hstack') -column_stack = _fromnxfunction_seq('column_stack') -dstack = _fromnxfunction_seq('dstack') -stack = _fromnxfunction_seq('stack') - -hsplit = _fromnxfunction_single('hsplit') - -diagflat = _fromnxfunction_single('diagflat') - - -#####-------------------------------------------------------------------------- -#---- -#####-------------------------------------------------------------------------- -def flatten_inplace(seq): - """Flatten a sequence in place.""" - k = 0 - while (k != len(seq)): - while hasattr(seq[k], '__iter__'): - seq[k:(k + 1)] = seq[k] - k += 1 - return seq - - -def apply_along_axis(func1d, axis, arr, *args, **kwargs): - """ - (This docstring should be overwritten) - """ - arr = array(arr, copy=False, subok=True) - nd = arr.ndim - axis = normalize_axis_index(axis, nd) - ind = [0] * (nd - 1) - i = np.zeros(nd, 'O') - indlist = list(range(nd)) - indlist.remove(axis) - i[axis] = slice(None, None) - outshape = np.asarray(arr.shape).take(indlist) - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - # if res is a number, then we have a smaller output array - asscalar = np.isscalar(res) - if not asscalar: - try: - len(res) - except TypeError: - asscalar = True - # Note: we shouldn't set the dtype of the output from the first result - # so we force the type to object, and build a list of dtypes. We'll - # just take the largest, to avoid some downcasting - dtypes = [] - if asscalar: - dtypes.append(np.asarray(res).dtype) - outarr = zeros(outshape, object) - outarr[tuple(ind)] = res - Ntot = np.product(outshape) - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= outshape[n]) and (n > (1 - nd)): - ind[n - 1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(ind)] = res - dtypes.append(asarray(res).dtype) - k += 1 - else: - res = array(res, copy=False, subok=True) - j = i.copy() - j[axis] = ([slice(None, None)] * res.ndim) - j.put(indlist, ind) - Ntot = np.product(outshape) - holdshape = outshape - outshape = list(arr.shape) - outshape[axis] = res.shape - dtypes.append(asarray(res).dtype) - outshape = flatten_inplace(outshape) - outarr = zeros(outshape, object) - outarr[tuple(flatten_inplace(j.tolist()))] = res - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= holdshape[n]) and (n > (1 - nd)): - ind[n - 1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - j.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(flatten_inplace(j.tolist()))] = res - dtypes.append(asarray(res).dtype) - k += 1 - max_dtypes = np.dtype(np.asarray(dtypes).max()) - if not hasattr(arr, '_mask'): - result = np.asarray(outarr, dtype=max_dtypes) - else: - result = asarray(outarr, dtype=max_dtypes) - result.fill_value = ma.default_fill_value(result) - return result -apply_along_axis.__doc__ = np.apply_along_axis.__doc__ - - -def apply_over_axes(func, a, axes): - """ - (This docstring will be overwritten) - """ - val = asarray(a) - N = a.ndim - if array(axes).ndim == 0: - axes = (axes,) - for axis in axes: - if axis < 0: - axis = N + axis - args = (val, axis) - res = func(*args) - if res.ndim == val.ndim: - val = res - else: - res = ma.expand_dims(res, axis) - if res.ndim == val.ndim: - val = res - else: - raise ValueError("function is not returning " - "an array of the correct shape") - return val - -if apply_over_axes.__doc__ is not None: - apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ - :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ - """ - - Examples - -------- - >>> a = np.ma.arange(24).reshape(2,3,4) - >>> a[:,0,1] = np.ma.masked - >>> a[:,1,:] = np.ma.masked - >>> a - masked_array( - data=[[[0, --, 2, 3], - [--, --, --, --], - [8, 9, 10, 11]], - [[12, --, 14, 15], - [--, --, --, --], - [20, 21, 22, 23]]], - mask=[[[False, True, False, False], - [ True, True, True, True], - [False, False, False, False]], - [[False, True, False, False], - [ True, True, True, True], - [False, False, False, False]]], - fill_value=999999) - >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) - masked_array( - data=[[[46], - [--], - [124]]], - mask=[[[False], - [ True], - [False]]], - fill_value=999999) - - Tuple axis arguments to ufuncs are equivalent: - - >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) - masked_array( - data=[[[46], - [--], - [124]]], - mask=[[[False], - [ True], - [False]]], - fill_value=999999) - """ - - -def average(a, axis=None, weights=None, returned=False): - """ - Return the weighted average of array over the given axis. - - Parameters - ---------- - a : array_like - Data to be averaged. - Masked entries are not taken into account in the computation. - axis : int, optional - Axis along which to average `a`. If None, averaging is done over - the flattened array. - weights : array_like, optional - The importance that each element has in the computation of the average. - The weights array can either be 1-D (in which case its length must be - the size of `a` along the given axis) or of the same shape as `a`. - If ``weights=None``, then all data in `a` are assumed to have a - weight equal to one. The 1-D calculation is:: - - avg = sum(a * weights) / sum(weights) - - The only constraint on `weights` is that `sum(weights)` must not be 0. - returned : bool, optional - Flag indicating whether a tuple ``(result, sum of weights)`` - should be returned as output (True), or just the result (False). - Default is False. - - Returns - ------- - average, [sum_of_weights] : (tuple of) scalar or MaskedArray - The average along the specified axis. When returned is `True`, - return a tuple with the average as the first element and the sum - of the weights as the second element. The return type is `np.float64` - if `a` is of integer type and floats smaller than `float64`, or the - input data-type, otherwise. If returned, `sum_of_weights` is always - `float64`. - - Examples - -------- - >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) - >>> np.ma.average(a, weights=[3, 1, 0, 0]) - 1.25 - - >>> x = np.ma.arange(6.).reshape(3, 2) - >>> x - masked_array( - data=[[0., 1.], - [2., 3.], - [4., 5.]], - mask=False, - fill_value=1e+20) - >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], - ... returned=True) - >>> avg - masked_array(data=[2.6666666666666665, 3.6666666666666665], - mask=[False, False], - fill_value=1e+20) - - """ - a = asarray(a) - m = getmask(a) - - # inspired by 'average' in numpy/lib/function_base.py - - if weights is None: - avg = a.mean(axis) - scl = avg.dtype.type(a.count(axis)) - else: - wgt = np.asanyarray(weights) - - if issubclass(a.dtype.type, (np.integer, np.bool_)): - result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') - else: - result_dtype = np.result_type(a.dtype, wgt.dtype) - - # Sanity checks - if a.shape != wgt.shape: - if axis is None: - raise TypeError( - "Axis must be specified when shapes of a and weights " - "differ.") - if wgt.ndim != 1: - raise TypeError( - "1D weights expected when shapes of a and weights differ.") - if wgt.shape[0] != a.shape[axis]: - raise ValueError( - "Length of weights not compatible with specified axis.") - - # setup wgt to broadcast along axis - wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) - wgt = wgt.swapaxes(-1, axis) - - if m is not nomask: - wgt = wgt*(~a.mask) - - scl = wgt.sum(axis=axis, dtype=result_dtype) - avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl - - if returned: - if scl.shape != avg.shape: - scl = np.broadcast_to(scl, avg.shape).copy() - return avg, scl - else: - return avg - - -def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): - """ - Compute the median along the specified axis. - - Returns the median of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional - Axis along which the medians are computed. The default (None) is - to compute the median along a flattened version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array (a) for - calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. Note that, if `overwrite_input` is True, and the input - is not already an `ndarray`, an error will be raised. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - .. versionadded:: 1.10.0 - - Returns - ------- - median : ndarray - A new array holding the result is returned unless out is - specified, in which case a reference to out is returned. - Return data-type is `float64` for integers and floats smaller than - `float64`, or the input data-type, otherwise. - - See Also - -------- - mean - - Notes - ----- - Given a vector ``V`` with ``N`` non masked values, the median of ``V`` - is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. - ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` - when ``N`` is even. - - Examples - -------- - >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) - >>> np.ma.median(x) - 1.5 - - >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) - >>> np.ma.median(x) - 2.5 - >>> np.ma.median(x, axis=-1, overwrite_input=True) - masked_array(data=[2.0, 5.0], - mask=[False, False], - fill_value=1e+20) - - """ - if not hasattr(a, 'mask'): - m = np.median(getdata(a, subok=True), axis=axis, - out=out, overwrite_input=overwrite_input, - keepdims=keepdims) - if isinstance(m, np.ndarray) and 1 <= m.ndim: - return masked_array(m, copy=False) - else: - return m - - r, k = _ureduce(a, func=_median, axis=axis, out=out, - overwrite_input=overwrite_input) - if keepdims: - return r.reshape(k) - else: - return r - -def _median(a, axis=None, out=None, overwrite_input=False): - # when an unmasked NaN is present return it, so we need to sort the NaN - # values behind the mask - if np.issubdtype(a.dtype, np.inexact): - fill_value = np.inf - else: - fill_value = None - if overwrite_input: - if axis is None: - asorted = a.ravel() - asorted.sort(fill_value=fill_value) - else: - a.sort(axis=axis, fill_value=fill_value) - asorted = a - else: - asorted = sort(a, axis=axis, fill_value=fill_value) - - if axis is None: - axis = 0 - else: - axis = normalize_axis_index(axis, asorted.ndim) - - if asorted.shape[axis] == 0: - # for empty axis integer indices fail so use slicing to get same result - # as median (which is mean of empty slice = nan) - indexer = [slice(None)] * asorted.ndim - indexer[axis] = slice(0, 0) - indexer = tuple(indexer) - return np.ma.mean(asorted[indexer], axis=axis, out=out) - - if asorted.ndim == 1: - counts = count(asorted) - idx, odd = divmod(count(asorted), 2) - mid = asorted[idx + odd - 1:idx + 1] - if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: - # avoid inf / x = masked - s = mid.sum(out=out) - if not odd: - s = np.true_divide(s, 2., casting='safe', out=out) - s = np.lib.utils._median_nancheck(asorted, s, axis, out) - else: - s = mid.mean(out=out) - - # if result is masked either the input contained enough - # minimum_fill_value so that it would be the median or all values - # masked - if np.ma.is_masked(s) and not np.all(asorted.mask): - return np.ma.minimum_fill_value(asorted) - return s - - counts = count(asorted, axis=axis, keepdims=True) - h = counts // 2 - - # duplicate high if odd number of elements so mean does nothing - odd = counts % 2 == 1 - l = np.where(odd, h, h-1) - - lh = np.concatenate([l,h], axis=axis) - - # get low and high median - low_high = np.take_along_axis(asorted, lh, axis=axis) - - def replace_masked(s): - # Replace masked entries with minimum_full_value unless it all values - # are masked. This is required as the sort order of values equal or - # larger than the fill value is undefined and a valid value placed - # elsewhere, e.g. [4, --, inf]. - if np.ma.is_masked(s): - rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask - s.data[rep] = np.ma.minimum_fill_value(asorted) - s.mask[rep] = False - - replace_masked(low_high) - - if np.issubdtype(asorted.dtype, np.inexact): - # avoid inf / x = masked - s = np.ma.sum(low_high, axis=axis, out=out) - np.true_divide(s.data, 2., casting='unsafe', out=s.data) - - s = np.lib.utils._median_nancheck(asorted, s, axis, out) - else: - s = np.ma.mean(low_high, axis=axis, out=out) - - return s - - -def compress_nd(x, axis=None): - """Suppress slices from multiple dimensions which contain masked values. - - Parameters - ---------- - x : array_like, MaskedArray - The array to operate on. If not a MaskedArray instance (or if no array - elements are masked, `x` is interpreted as a MaskedArray with `mask` - set to `nomask`. - axis : tuple of ints or int, optional - Which dimensions to suppress slices from can be configured with this - parameter. - - If axis is a tuple of ints, those are the axes to suppress slices from. - - If axis is an int, then that is the only axis to suppress slices from. - - If axis is None, all axis are selected. - - Returns - ------- - compress_array : ndarray - The compressed array. - """ - x = asarray(x) - m = getmask(x) - # Set axis to tuple of ints - if axis is None: - axis = tuple(range(x.ndim)) - else: - axis = normalize_axis_tuple(axis, x.ndim) - - # Nothing is masked: return x - if m is nomask or not m.any(): - return x._data - # All is masked: return empty - if m.all(): - return nxarray([]) - # Filter elements through boolean indexing - data = x._data - for ax in axis: - axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) - data = data[(slice(None),)*ax + (~m.any(axis=axes),)] - return data - -def compress_rowcols(x, axis=None): - """ - Suppress the rows and/or columns of a 2-D array that contain - masked values. - - The suppression behavior is selected with the `axis` parameter. - - - If axis is None, both rows and columns are suppressed. - - If axis is 0, only rows are suppressed. - - If axis is 1 or -1, only columns are suppressed. - - Parameters - ---------- - x : array_like, MaskedArray - The array to operate on. If not a MaskedArray instance (or if no array - elements are masked), `x` is interpreted as a MaskedArray with - `mask` set to `nomask`. Must be a 2D array. - axis : int, optional - Axis along which to perform the operation. Default is None. - - Returns - ------- - compressed_array : ndarray - The compressed array. - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], - ... [1, 0, 0], - ... [0, 0, 0]]) - >>> x - masked_array( - data=[[--, 1, 2], - [--, 4, 5], - [6, 7, 8]], - mask=[[ True, False, False], - [ True, False, False], - [False, False, False]], - fill_value=999999) - - >>> np.ma.compress_rowcols(x) - array([[7, 8]]) - >>> np.ma.compress_rowcols(x, 0) - array([[6, 7, 8]]) - >>> np.ma.compress_rowcols(x, 1) - array([[1, 2], - [4, 5], - [7, 8]]) - - """ - if asarray(x).ndim != 2: - raise NotImplementedError("compress_rowcols works for 2D arrays only.") - return compress_nd(x, axis=axis) - - -def compress_rows(a): - """ - Suppress whole rows of a 2-D array that contain masked values. - - This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see - `extras.compress_rowcols` for details. - - See Also - -------- - extras.compress_rowcols - - """ - a = asarray(a) - if a.ndim != 2: - raise NotImplementedError("compress_rows works for 2D arrays only.") - return compress_rowcols(a, 0) - -def compress_cols(a): - """ - Suppress whole columns of a 2-D array that contain masked values. - - This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see - `extras.compress_rowcols` for details. - - See Also - -------- - extras.compress_rowcols - - """ - a = asarray(a) - if a.ndim != 2: - raise NotImplementedError("compress_cols works for 2D arrays only.") - return compress_rowcols(a, 1) - -def mask_rows(a, axis=np._NoValue): - """ - Mask rows of a 2D array that contain masked values. - - This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. - - See Also - -------- - mask_rowcols : Mask rows and/or columns of a 2D array. - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array( - data=[[0, 0, 0], - [0, --, 0], - [0, 0, 0]], - mask=[[False, False, False], - [False, True, False], - [False, False, False]], - fill_value=1) - - >>> ma.mask_rows(a) - masked_array( - data=[[0, 0, 0], - [--, --, --], - [0, 0, 0]], - mask=[[False, False, False], - [ True, True, True], - [False, False, False]], - fill_value=1) - - """ - if axis is not np._NoValue: - # remove the axis argument when this deprecation expires - # NumPy 1.18.0, 2019-11-28 - warnings.warn( - "The axis argument has always been ignored, in future passing it " - "will raise TypeError", DeprecationWarning, stacklevel=2) - return mask_rowcols(a, 0) - -def mask_cols(a, axis=np._NoValue): - """ - Mask columns of a 2D array that contain masked values. - - This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. - - See Also - -------- - mask_rowcols : Mask rows and/or columns of a 2D array. - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array( - data=[[0, 0, 0], - [0, --, 0], - [0, 0, 0]], - mask=[[False, False, False], - [False, True, False], - [False, False, False]], - fill_value=1) - >>> ma.mask_cols(a) - masked_array( - data=[[0, --, 0], - [0, --, 0], - [0, --, 0]], - mask=[[False, True, False], - [False, True, False], - [False, True, False]], - fill_value=1) - - """ - if axis is not np._NoValue: - # remove the axis argument when this deprecation expires - # NumPy 1.18.0, 2019-11-28 - warnings.warn( - "The axis argument has always been ignored, in future passing it " - "will raise TypeError", DeprecationWarning, stacklevel=2) - return mask_rowcols(a, 1) - - -#####-------------------------------------------------------------------------- -#---- --- arraysetops --- -#####-------------------------------------------------------------------------- - -def ediff1d(arr, to_end=None, to_begin=None): - """ - Compute the differences between consecutive elements of an array. - - This function is the equivalent of `numpy.ediff1d` that takes masked - values into account, see `numpy.ediff1d` for details. - - See Also - -------- - numpy.ediff1d : Equivalent function for ndarrays. - - """ - arr = ma.asanyarray(arr).flat - ed = arr[1:] - arr[:-1] - arrays = [ed] - # - if to_begin is not None: - arrays.insert(0, to_begin) - if to_end is not None: - arrays.append(to_end) - # - if len(arrays) != 1: - # We'll save ourselves a copy of a potentially large array in the common - # case where neither to_begin or to_end was given. - ed = hstack(arrays) - # - return ed - - -def unique(ar1, return_index=False, return_inverse=False): - """ - Finds the unique elements of an array. - - Masked values are considered the same element (masked). The output array - is always a masked array. See `numpy.unique` for more details. - - See Also - -------- - numpy.unique : Equivalent function for ndarrays. - - """ - output = np.unique(ar1, - return_index=return_index, - return_inverse=return_inverse) - if isinstance(output, tuple): - output = list(output) - output[0] = output[0].view(MaskedArray) - output = tuple(output) - else: - output = output.view(MaskedArray) - return output - - -def intersect1d(ar1, ar2, assume_unique=False): - """ - Returns the unique elements common to both arrays. - - Masked values are considered equal one to the other. - The output is always a masked array. - - See `numpy.intersect1d` for more details. - - See Also - -------- - numpy.intersect1d : Equivalent function for ndarrays. - - Examples - -------- - >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - >>> np.ma.intersect1d(x, y) - masked_array(data=[1, 3, --], - mask=[False, False, True], - fill_value=999999) - - """ - if assume_unique: - aux = ma.concatenate((ar1, ar2)) - else: - # Might be faster than unique( intersect1d( ar1, ar2 ) )? - aux = ma.concatenate((unique(ar1), unique(ar2))) - aux.sort() - return aux[:-1][aux[1:] == aux[:-1]] - - -def setxor1d(ar1, ar2, assume_unique=False): - """ - Set exclusive-or of 1-D arrays with unique elements. - - The output is always a masked array. See `numpy.setxor1d` for more details. - - See Also - -------- - numpy.setxor1d : Equivalent function for ndarrays. - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - - aux = ma.concatenate((ar1, ar2)) - if aux.size == 0: - return aux - aux.sort() - auxf = aux.filled() -# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 - flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) -# flag2 = ediff1d( flag ) == 0 - flag2 = (flag[1:] == flag[:-1]) - return aux[flag2] - - -def in1d(ar1, ar2, assume_unique=False, invert=False): - """ - Test whether each element of an array is also present in a second - array. - - The output is always a masked array. See `numpy.in1d` for more details. - - We recommend using :func:`isin` instead of `in1d` for new code. - - See Also - -------- - isin : Version of this function that preserves the shape of ar1. - numpy.in1d : Equivalent function for ndarrays. - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if not assume_unique: - ar1, rev_idx = unique(ar1, return_inverse=True) - ar2 = unique(ar2) - - ar = ma.concatenate((ar1, ar2)) - # We need this to be a stable sort, so always use 'mergesort' - # here. The values from the first array should always come before - # the values from the second array. - order = ar.argsort(kind='mergesort') - sar = ar[order] - if invert: - bool_ar = (sar[1:] != sar[:-1]) - else: - bool_ar = (sar[1:] == sar[:-1]) - flag = ma.concatenate((bool_ar, [invert])) - indx = order.argsort(kind='mergesort')[:len(ar1)] - - if assume_unique: - return flag[indx] - else: - return flag[indx][rev_idx] - - -def isin(element, test_elements, assume_unique=False, invert=False): - """ - Calculates `element in test_elements`, broadcasting over - `element` only. - - The output is always a masked array of the same shape as `element`. - See `numpy.isin` for more details. - - See Also - -------- - in1d : Flattened version of this function. - numpy.isin : Equivalent function for ndarrays. - - Notes - ----- - .. versionadded:: 1.13.0 - - """ - element = ma.asarray(element) - return in1d(element, test_elements, assume_unique=assume_unique, - invert=invert).reshape(element.shape) - - -def union1d(ar1, ar2): - """ - Union of two arrays. - - The output is always a masked array. See `numpy.union1d` for more details. - - See also - -------- - numpy.union1d : Equivalent function for ndarrays. - - """ - return unique(ma.concatenate((ar1, ar2), axis=None)) - - -def setdiff1d(ar1, ar2, assume_unique=False): - """ - Set difference of 1D arrays with unique elements. - - The output is always a masked array. See `numpy.setdiff1d` for more - details. - - See Also - -------- - numpy.setdiff1d : Equivalent function for ndarrays. - - Examples - -------- - >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) - >>> np.ma.setdiff1d(x, [1, 2]) - masked_array(data=[3, --], - mask=[False, True], - fill_value=999999) - - """ - if assume_unique: - ar1 = ma.asarray(ar1).ravel() - else: - ar1 = unique(ar1) - ar2 = unique(ar2) - return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] - - -############################################################################### -# Covariance # -############################################################################### - - -def _covhelper(x, y=None, rowvar=True, allow_masked=True): - """ - Private function for the computation of covariance and correlation - coefficients. - - """ - x = ma.array(x, ndmin=2, copy=True, dtype=float) - xmask = ma.getmaskarray(x) - # Quick exit if we can't process masked data - if not allow_masked and xmask.any(): - raise ValueError("Cannot process masked data.") - # - if x.shape[0] == 1: - rowvar = True - # Make sure that rowvar is either 0 or 1 - rowvar = int(bool(rowvar)) - axis = 1 - rowvar - if rowvar: - tup = (slice(None), None) - else: - tup = (None, slice(None)) - # - if y is None: - xnotmask = np.logical_not(xmask).astype(int) - else: - y = array(y, copy=False, ndmin=2, dtype=float) - ymask = ma.getmaskarray(y) - if not allow_masked and ymask.any(): - raise ValueError("Cannot process masked data.") - if xmask.any() or ymask.any(): - if y.shape == x.shape: - # Define some common mask - common_mask = np.logical_or(xmask, ymask) - if common_mask is not nomask: - xmask = x._mask = y._mask = ymask = common_mask - x._sharedmask = False - y._sharedmask = False - x = ma.concatenate((x, y), axis) - xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) - x -= x.mean(axis=rowvar)[tup] - return (x, xnotmask, rowvar) - - -def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): - """ - Estimate the covariance matrix. - - Except for the handling of missing data this function does the same as - `numpy.cov`. For more details and examples, see `numpy.cov`. - - By default, masked values are recognized as such. If `x` and `y` have the - same shape, a common mask is allocated: if ``x[i,j]`` is masked, then - ``y[i,j]`` will also be masked. - Setting `allow_masked` to False will raise an exception if values are - missing in either of the input arrays. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - form as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : bool, optional - Default normalization (False) is by ``(N-1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, - then normalization is by ``N``. This keyword can be overridden by - the keyword ``ddof`` in numpy versions >= 1.5. - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises a `ValueError` exception when some values are missing. - ddof : {None, int}, optional - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - .. versionadded:: 1.5 - - Raises - ------ - ValueError - Raised if some values are missing and `allow_masked` is False. - - See Also - -------- - numpy.cov - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError("ddof must be an integer") - # Set up ddof - if ddof is None: - if bias: - ddof = 0 - else: - ddof = 1 - - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof - result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof - result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() - return result - - -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, - ddof=np._NoValue): - """ - Return Pearson product-moment correlation coefficients. - - Except for the handling of missing data this function does the same as - `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - shape as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises an exception. Because `bias` is deprecated, this - argument needs to be treated as keyword only to avoid a warning. - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - - See Also - -------- - numpy.corrcoef : Equivalent function in top-level NumPy module. - cov : Estimate the covariance matrix. - - Notes - ----- - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - """ - msg = 'bias and ddof have no effect and are deprecated' - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn(msg, DeprecationWarning, stacklevel=2) - # Get the data - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - # Compute the covariance matrix - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() - # Check whether we have a scalar - try: - diag = ma.diagonal(c) - except ValueError: - return 1 - # - if xnotmask.all(): - _denom = ma.sqrt(ma.multiply.outer(diag, diag)) - else: - _denom = diagflat(diag) - _denom._sharedmask = False # We know return is always a copy - n = x.shape[1 - rowvar] - if rowvar: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - else: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols( - vstack((x[:, i], x[:, j]))).var(axis=1) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - return c / _denom - -#####-------------------------------------------------------------------------- -#---- --- Concatenation helpers --- -#####-------------------------------------------------------------------------- - -class MAxisConcatenator(AxisConcatenator): - """ - Translate slice objects to concatenation along an axis. - - For documentation on usage, see `mr_class`. - - See Also - -------- - mr_class - - """ - concatenate = staticmethod(concatenate) - - @classmethod - def makemat(cls, arr): - # There used to be a view as np.matrix here, but we may eventually - # deprecate that class. In preparation, we use the unmasked version - # to construct the matrix (with copy=False for backwards compatibility - # with the .view) - data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False) - return array(data, mask=arr.mask) - - def __getitem__(self, key): - # matrix builder syntax, like 'a, b; c, d' - if isinstance(key, str): - raise MAError("Unavailable for masked array.") - - return super(MAxisConcatenator, self).__getitem__(key) - - -class mr_class(MAxisConcatenator): - """ - Translate slice objects to concatenation along the first axis. - - This is the masked array version of `lib.index_tricks.RClass`. - - See Also - -------- - lib.index_tricks.RClass - - Examples - -------- - >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] - masked_array(data=[1, 2, 3, ..., 4, 5, 6], - mask=False, - fill_value=999999) - - """ - def __init__(self): - MAxisConcatenator.__init__(self, 0) - -mr_ = mr_class() - -#####-------------------------------------------------------------------------- -#---- Find unmasked data --- -#####-------------------------------------------------------------------------- - -def flatnotmasked_edges(a): - """ - Find the indices of the first and last unmasked values. - - Expects a 1-D `MaskedArray`, returns None if all values are masked. - - Parameters - ---------- - a : array_like - Input 1-D `MaskedArray` - - Returns - ------- - edges : ndarray or None - The indices of first and last non-masked value in the array. - Returns None if all values are masked. - - See Also - -------- - flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 1-D arrays. - - Examples - -------- - >>> a = np.ma.arange(10) - >>> np.ma.flatnotmasked_edges(a) - array([0, 9]) - - >>> mask = (a < 3) | (a > 8) | (a == 5) - >>> a[mask] = np.ma.masked - >>> np.array(a[~a.mask]) - array([3, 4, 6, 7, 8]) - - >>> np.ma.flatnotmasked_edges(a) - array([3, 8]) - - >>> a[:] = np.ma.masked - >>> print(np.ma.flatnotmasked_edges(a)) - None - - """ - m = getmask(a) - if m is nomask or not np.any(m): - return np.array([0, a.size - 1]) - unmasked = np.flatnonzero(~m) - if len(unmasked) > 0: - return unmasked[[0, -1]] - else: - return None - - -def notmasked_edges(a, axis=None): - """ - Find the indices of the first and last unmasked values along an axis. - - If all values are masked, return None. Otherwise, return a list - of two tuples, corresponding to the indices of the first and last - unmasked values respectively. - - Parameters - ---------- - a : array_like - The input array. - axis : int, optional - Axis along which to perform the operation. - If None (default), applies to a flattened version of the array. - - Returns - ------- - edges : ndarray or list - An array of start and end indexes if there are any masked data in - the array. If there are no masked data in the array, `edges` is a - list of the first and last index. - - See Also - -------- - flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous - clump_masked, clump_unmasked - - Examples - -------- - >>> a = np.arange(9).reshape((3, 3)) - >>> m = np.zeros_like(a) - >>> m[1:, 1:] = 1 - - >>> am = np.ma.array(a, mask=m) - >>> np.array(am[~am.mask]) - array([0, 1, 2, 3, 6]) - - >>> np.ma.notmasked_edges(am) - array([0, 6]) - - """ - a = asarray(a) - if axis is None or a.ndim == 1: - return flatnotmasked_edges(a) - m = getmaskarray(a) - idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) - return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), - tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] - - -def flatnotmasked_contiguous(a): - """ - Find contiguous unmasked data in a masked array along the given axis. - - Parameters - ---------- - a : narray - The input array. - - Returns - ------- - slice_list : list - A sorted sequence of `slice` objects (start index, end index). - - ..versionchanged:: 1.15.0 - Now returns an empty list instead of None for a fully masked array - - See Also - -------- - flatnotmasked_edges, notmasked_contiguous, notmasked_edges - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 2-D arrays at most. - - Examples - -------- - >>> a = np.ma.arange(10) - >>> np.ma.flatnotmasked_contiguous(a) - [slice(0, 10, None)] - - >>> mask = (a < 3) | (a > 8) | (a == 5) - >>> a[mask] = np.ma.masked - >>> np.array(a[~a.mask]) - array([3, 4, 6, 7, 8]) - - >>> np.ma.flatnotmasked_contiguous(a) - [slice(3, 5, None), slice(6, 9, None)] - >>> a[:] = np.ma.masked - >>> np.ma.flatnotmasked_contiguous(a) - [] - - """ - m = getmask(a) - if m is nomask: - return [slice(0, a.size)] - i = 0 - result = [] - for (k, g) in itertools.groupby(m.ravel()): - n = len(list(g)) - if not k: - result.append(slice(i, i + n)) - i += n - return result - -def notmasked_contiguous(a, axis=None): - """ - Find contiguous unmasked data in a masked array along the given axis. - - Parameters - ---------- - a : array_like - The input array. - axis : int, optional - Axis along which to perform the operation. - If None (default), applies to a flattened version of the array, and this - is the same as `flatnotmasked_contiguous`. - - Returns - ------- - endpoints : list - A list of slices (start and end indexes) of unmasked indexes - in the array. - - If the input is 2d and axis is specified, the result is a list of lists. - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 2-D arrays at most. - - Examples - -------- - >>> a = np.arange(12).reshape((3, 4)) - >>> mask = np.zeros_like(a) - >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 - >>> ma = np.ma.array(a, mask=mask) - >>> ma - masked_array( - data=[[0, --, 2, 3], - [--, --, --, 7], - [8, --, --, 11]], - mask=[[False, True, False, False], - [ True, True, True, False], - [False, True, True, False]], - fill_value=999999) - >>> np.array(ma[~ma.mask]) - array([ 0, 2, 3, 7, 8, 11]) - - >>> np.ma.notmasked_contiguous(ma) - [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] - - >>> np.ma.notmasked_contiguous(ma, axis=0) - [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] - - >>> np.ma.notmasked_contiguous(ma, axis=1) - [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] - - """ - a = asarray(a) - nd = a.ndim - if nd > 2: - raise NotImplementedError("Currently limited to atmost 2D array.") - if axis is None or nd == 1: - return flatnotmasked_contiguous(a) - # - result = [] - # - other = (axis + 1) % 2 - idx = [0, 0] - idx[axis] = slice(None, None) - # - for i in range(a.shape[other]): - idx[other] = i - result.append(flatnotmasked_contiguous(a[tuple(idx)])) - return result - - -def _ezclump(mask): - """ - Finds the clumps (groups of data with the same values) for a 1D bool array. - - Returns a series of slices. - """ - if mask.ndim > 1: - mask = mask.ravel() - idx = (mask[1:] ^ mask[:-1]).nonzero() - idx = idx[0] + 1 - - if mask[0]: - if len(idx) == 0: - return [slice(0, mask.size)] - - r = [slice(0, idx[0])] - r.extend((slice(left, right) - for left, right in zip(idx[1:-1:2], idx[2::2]))) - else: - if len(idx) == 0: - return [] - - r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] - - if mask[-1]: - r.append(slice(idx[-1], mask.size)) - return r - - -def clump_unmasked(a): - """ - Return list of slices corresponding to the unmasked clumps of a 1-D array. - (A "clump" is defined as a contiguous region of the array). - - Parameters - ---------- - a : ndarray - A one-dimensional masked array. - - Returns - ------- - slices : list of slice - The list of slices, one for each continuous region of unmasked - elements in `a`. - - Notes - ----- - .. versionadded:: 1.4.0 - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges - notmasked_contiguous, clump_masked - - Examples - -------- - >>> a = np.ma.masked_array(np.arange(10)) - >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked - >>> np.ma.clump_unmasked(a) - [slice(3, 6, None), slice(7, 8, None)] - - """ - mask = getattr(a, '_mask', nomask) - if mask is nomask: - return [slice(0, a.size)] - return _ezclump(~mask) - - -def clump_masked(a): - """ - Returns a list of slices corresponding to the masked clumps of a 1-D array. - (A "clump" is defined as a contiguous region of the array). - - Parameters - ---------- - a : ndarray - A one-dimensional masked array. - - Returns - ------- - slices : list of slice - The list of slices, one for each continuous region of masked elements - in `a`. - - Notes - ----- - .. versionadded:: 1.4.0 - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges - notmasked_contiguous, clump_unmasked - - Examples - -------- - >>> a = np.ma.masked_array(np.arange(10)) - >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked - >>> np.ma.clump_masked(a) - [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] - - """ - mask = ma.getmask(a) - if mask is nomask: - return [] - return _ezclump(mask) - - -############################################################################### -# Polynomial fit # -############################################################################### - - -def vander(x, n=None): - """ - Masked values in the input array result in rows of zeros. - - """ - _vander = np.vander(x, n) - m = getmask(x) - if m is not nomask: - _vander[m] = 0 - return _vander - -vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) - - -def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - """ - Any masked values in x is propagated in y, and vice-versa. - - """ - x = asarray(x) - y = asarray(y) - - m = getmask(x) - if y.ndim == 1: - m = mask_or(m, getmask(y)) - elif y.ndim == 2: - my = getmask(mask_rows(y)) - if my is not nomask: - m = mask_or(m, my[:, 0]) - else: - raise TypeError("Expected a 1D or 2D array for y!") - - if w is not None: - w = asarray(w) - if w.ndim != 1: - raise TypeError("expected a 1-d array for weights") - if w.shape[0] != y.shape[0]: - raise TypeError("expected w and y to have the same length") - m = mask_or(m, getmask(w)) - - if m is not nomask: - not_m = ~m - if w is not None: - w = w[not_m] - return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) - else: - return np.polyfit(x, y, deg, rcond, full, w, cov) - -polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/mrecords.py b/venv/lib/python3.7/site-packages/numpy/ma/mrecords.py deleted file mode 100644 index ae1a12c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/mrecords.py +++ /dev/null @@ -1,774 +0,0 @@ -""":mod:`numpy.ma..mrecords` - -Defines the equivalent of :class:`numpy.recarrays` for masked arrays, -where fields can be accessed as attributes. -Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes -and the masking of individual fields. - -.. moduleauthor:: Pierre Gerard-Marchant - -""" -from __future__ import division, absolute_import, print_function - -# We should make sure that no field is called '_mask','mask','_fieldmask', -# or whatever restricted keywords. An idea would be to no bother in the -# first place, and then rename the invalid fields with a trailing -# underscore. Maybe we could just overload the parser function ? - -import sys -import warnings - -import numpy as np -from numpy.compat import basestring -from numpy import ( - bool_, dtype, ndarray, recarray, array as narray - ) -from numpy.core.records import ( - fromarrays as recfromarrays, fromrecords as recfromrecords - ) - -_byteorderconv = np.core.records._byteorderconv - -import numpy.ma as ma -from numpy.ma import ( - MAError, MaskedArray, masked, nomask, masked_array, getdata, - getmaskarray, filled - ) - -_check_fill_value = ma.core._check_fill_value - - -__all__ = [ - 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', - 'fromtextfile', 'addfield', - ] - -reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] - - -def _checknames(descr, names=None): - """ - Checks that field names ``descr`` are not reserved keywords. - - If this is the case, a default 'f%i' is substituted. If the argument - `names` is not None, updates the field names to valid names. - - """ - ndescr = len(descr) - default_names = ['f%i' % i for i in range(ndescr)] - if names is None: - new_names = default_names - else: - if isinstance(names, (tuple, list)): - new_names = names - elif isinstance(names, str): - new_names = names.split(',') - else: - raise NameError("illegal input names %s" % repr(names)) - nnames = len(new_names) - if nnames < ndescr: - new_names += default_names[nnames:] - ndescr = [] - for (n, d, t) in zip(new_names, default_names, descr.descr): - if n in reserved_fields: - if t[0] in reserved_fields: - ndescr.append((d, t[1])) - else: - ndescr.append(t) - else: - ndescr.append((n, t[1])) - return np.dtype(ndescr) - - -def _get_fieldmask(self): - mdescr = [(n, '|b1') for n in self.dtype.names] - fdmask = np.empty(self.shape, dtype=mdescr) - fdmask.flat = tuple([False] * len(mdescr)) - return fdmask - - -class MaskedRecords(MaskedArray, object): - """ - - Attributes - ---------- - _data : recarray - Underlying data, as a record array. - _mask : boolean array - Mask of the records. A record is masked when all its fields are - masked. - _fieldmask : boolean recarray - Record array of booleans, setting the mask of each individual field - of each record. - _fill_value : record - Filling values for each field. - - """ - - def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, - formats=None, names=None, titles=None, - byteorder=None, aligned=False, - mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, - copy=False, - **options): - - self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, - strides=strides, formats=formats, names=names, - titles=titles, byteorder=byteorder, - aligned=aligned,) - - mdtype = ma.make_mask_descr(self.dtype) - if mask is nomask or not np.size(mask): - if not keep_mask: - self._mask = tuple([False] * len(mdtype)) - else: - mask = np.array(mask, copy=copy) - if mask.shape != self.shape: - (nd, nm) = (self.size, mask.size) - if nm == 1: - mask = np.resize(mask, self.shape) - elif nm == nd: - mask = np.reshape(mask, self.shape) - else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MAError(msg % (nd, nm)) - copy = True - if not keep_mask: - self.__setmask__(mask) - self._sharedmask = True - else: - if mask.dtype == mdtype: - _mask = mask - else: - _mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - self._mask = _mask - return self - - def __array_finalize__(self, obj): - # Make sure we have a _fieldmask by default - _mask = getattr(obj, '_mask', None) - if _mask is None: - objmask = getattr(obj, '_mask', nomask) - _dtype = ndarray.__getattribute__(self, 'dtype') - if objmask is nomask: - _mask = ma.make_mask_none(self.shape, dtype=_dtype) - else: - mdescr = ma.make_mask_descr(_dtype) - _mask = narray([tuple([m] * len(mdescr)) for m in objmask], - dtype=mdescr).view(recarray) - # Update some of the attributes - _dict = self.__dict__ - _dict.update(_mask=_mask) - self._update_from(obj) - if _dict['_baseclass'] == ndarray: - _dict['_baseclass'] = recarray - return - - @property - def _data(self): - """ - Returns the data as a recarray. - - """ - return ndarray.view(self, recarray) - - @property - def _fieldmask(self): - """ - Alias to mask. - - """ - return self._mask - - def __len__(self): - """ - Returns the length - - """ - # We have more than one record - if self.ndim: - return len(self._data) - # We have only one record: return the nb of fields - return len(self.dtype) - - def __getattribute__(self, attr): - try: - return object.__getattribute__(self, attr) - except AttributeError: - # attr must be a fieldname - pass - fielddict = ndarray.__getattribute__(self, 'dtype').fields - try: - res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError("record array has no attribute %s" % attr) - # So far, so good - _localdict = ndarray.__getattribute__(self, '__dict__') - _data = ndarray.view(self, _localdict['_baseclass']) - obj = _data.getfield(*res) - if obj.dtype.names is not None: - raise NotImplementedError("MaskedRecords is currently limited to" - "simple records.") - # Get some special attributes - # Reset the object's mask - hasmasked = False - _mask = _localdict.get('_mask', None) - if _mask is not None: - try: - _mask = _mask[attr] - except IndexError: - # Couldn't find a mask: use the default (nomask) - pass - tp_len = len(_mask.dtype) - hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() - if (obj.shape or hasmasked): - obj = obj.view(MaskedArray) - obj._baseclass = ndarray - obj._isfield = True - obj._mask = _mask - # Reset the field values - _fill_value = _localdict.get('_fill_value', None) - if _fill_value is not None: - try: - obj._fill_value = _fill_value[attr] - except ValueError: - obj._fill_value = None - else: - obj = obj.item() - return obj - - def __setattr__(self, attr, val): - """ - Sets the attribute attr to the value val. - - """ - # Should we call __setmask__ first ? - if attr in ['mask', 'fieldmask']: - self.__setmask__(val) - return - # Create a shortcut (so that we don't have to call getattr all the time) - _localdict = object.__getattribute__(self, '__dict__') - # Check whether we're creating a new field - newattr = attr not in _localdict - try: - # Is attr a generic attribute ? - ret = object.__setattr__(self, attr, val) - except Exception: - # Not a generic attribute: exit if it's not a valid field - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - optinfo = ndarray.__getattribute__(self, '_optinfo') or {} - if not (attr in fielddict or attr in optinfo): - raise - else: - # Get the list of names - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - # Check the attribute - if attr not in fielddict: - return ret - if newattr: - # We just added this one or this setattr worked on an - # internal attribute. - try: - object.__delattr__(self, attr) - except Exception: - return ret - # Let's try to set the field - try: - res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError("record array has no attribute %s" % attr) - - if val is masked: - _fill_value = _localdict['_fill_value'] - if _fill_value is not None: - dval = _localdict['_fill_value'][attr] - else: - dval = val - mval = True - else: - dval = filled(val) - mval = getmaskarray(val) - obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) - _localdict['_mask'].__setitem__(attr, mval) - return obj - - def __getitem__(self, indx): - """ - Returns all the fields sharing the same fieldname base. - - The fieldname base is either `_data` or `_mask`. - - """ - _localdict = self.__dict__ - _mask = ndarray.__getattribute__(self, '_mask') - _data = ndarray.view(self, _localdict['_baseclass']) - # We want a field - if isinstance(indx, basestring): - # Make sure _sharedmask is True to propagate back to _fieldmask - # Don't use _set_mask, there are some copies being made that - # break propagation Don't force the mask to nomask, that wreaks - # easy masking - obj = _data[indx].view(MaskedArray) - obj._mask = _mask[indx] - obj._sharedmask = True - fval = _localdict['_fill_value'] - if fval is not None: - obj._fill_value = fval[indx] - # Force to masked if the mask is True - if not obj.ndim and obj._mask: - return masked - return obj - # We want some elements. - # First, the data. - obj = np.array(_data[indx], copy=False).view(mrecarray) - obj._mask = np.array(_mask[indx], copy=False).view(recarray) - return obj - - def __setitem__(self, indx, value): - """ - Sets the given record to value. - - """ - MaskedArray.__setitem__(self, indx, value) - if isinstance(indx, basestring): - self._mask[indx] = ma.getmaskarray(value) - - def __str__(self): - """ - Calculates the string representation. - - """ - if self.size > 1: - mstr = ["(%s)" % ",".join([str(i) for i in s]) - for s in zip(*[getattr(self, f) for f in self.dtype.names])] - return "[%s]" % ", ".join(mstr) - else: - mstr = ["%s" % ",".join([str(i) for i in s]) - for s in zip([getattr(self, f) for f in self.dtype.names])] - return "(%s)" % ", ".join(mstr) - - def __repr__(self): - """ - Calculates the repr representation. - - """ - _names = self.dtype.names - fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) - reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] - reprstr.insert(0, 'masked_records(') - reprstr.extend([fmt % (' fill_value', self.fill_value), - ' )']) - return str("\n".join(reprstr)) - - def view(self, dtype=None, type=None): - """ - Returns a view of the mrecarray. - - """ - # OK, basic copy-paste from MaskedArray.view. - if dtype is None: - if type is None: - output = ndarray.view(self) - else: - output = ndarray.view(self, type) - # Here again. - elif type is None: - try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) - dtype = None - else: - output = ndarray.view(self, dtype) - # OK, there's the change - except TypeError: - dtype = np.dtype(dtype) - # we need to revert to MaskedArray, but keeping the possibility - # of subclasses (eg, TimeSeriesRecords), so we'll force a type - # set to the first parent - if dtype.fields is None: - basetype = self.__class__.__bases__[0] - output = self.__array__().view(dtype, basetype) - output._update_from(self) - else: - output = ndarray.view(self, dtype) - output._fill_value = None - else: - output = ndarray.view(self, dtype, type) - # Update the mask, just like in MaskedArray.view - if (getattr(output, '_mask', nomask) is not nomask): - mdtype = ma.make_mask_descr(output.dtype) - output._mask = self._mask.view(mdtype, ndarray) - output._mask.shape = output.shape - return output - - def harden_mask(self): - """ - Forces the mask to hard. - - """ - self._hardmask = True - - def soften_mask(self): - """ - Forces the mask to soft - - """ - self._hardmask = False - - def copy(self): - """ - Returns a copy of the masked record. - - """ - copied = self._data.copy().view(type(self)) - copied._mask = self._mask.copy() - return copied - - def tolist(self, fill_value=None): - """ - Return the data portion of the array as a list. - - Data items are converted to the nearest compatible Python type. - Masked values are converted to fill_value. If fill_value is None, - the corresponding entries in the output list will be ``None``. - - """ - if fill_value is not None: - return self.filled(fill_value).tolist() - result = narray(self.filled().tolist(), dtype=object) - mask = narray(self._mask.tolist()) - result[mask] = None - return result.tolist() - - def __getstate__(self): - """Return the internal state of the masked array. - - This is for pickling. - - """ - state = (1, - self.shape, - self.dtype, - self.flags.fnc, - self._data.tobytes(), - self._mask.tobytes(), - self._fill_value, - ) - return state - - def __setstate__(self, state): - """ - Restore the internal state of the masked array. - - This is for pickling. ``state`` is typically the output of the - ``__getstate__`` output, and is a 5-tuple: - - - class name - - a tuple giving the shape of the data - - a typecode for the data - - a binary string for the data - - a binary string for the mask. - - """ - (ver, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) - self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) - self.fill_value = flv - - def __reduce__(self): - """ - Return a 3-tuple for pickling a MaskedArray. - - """ - return (_mrreconstruct, - (self.__class__, self._baseclass, (0,), 'b',), - self.__getstate__()) - -def _mrreconstruct(subtype, baseclass, baseshape, basetype,): - """ - Build a new MaskedArray from the information stored in a pickle. - - """ - _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) - _mask = ndarray.__new__(ndarray, baseshape, 'b1') - return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) - -mrecarray = MaskedRecords - - -############################################################################### -# Constructors # -############################################################################### - - -def fromarrays(arraylist, dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, - fill_value=None): - """ - Creates a mrecarray from a (flat) list of masked arrays. - - Parameters - ---------- - arraylist : sequence - A list of (masked) arrays. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - dtype : {None, dtype}, optional - Data type descriptor. - shape : {None, integer}, optional - Number of records. If None, shape is defined from the shape of the - first array in the list. - formats : {None, sequence}, optional - Sequence of formats for each individual field. If None, the formats will - be autodetected by inspecting the fields and selecting the highest dtype - possible. - names : {None, sequence}, optional - Sequence of the names of each field. - fill_value : {None, sequence}, optional - Sequence of data to be used as filling values. - - Notes - ----- - Lists of tuples should be preferred over lists of lists for faster processing. - - """ - datalist = [getdata(x) for x in arraylist] - masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] - _array = recfromarrays(datalist, - dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, aligned=aligned, - byteorder=byteorder).view(mrecarray) - _array._mask.flat = list(zip(*masklist)) - if fill_value is not None: - _array.fill_value = fill_value - return _array - - -def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None, - fill_value=None, mask=nomask): - """ - Creates a MaskedRecords from a list of records. - - Parameters - ---------- - reclist : sequence - A list of records. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - dtype : {None, dtype}, optional - Data type descriptor. - shape : {None,int}, optional - Number of records. If None, ``shape`` is defined from the shape of the - first array in the list. - formats : {None, sequence}, optional - Sequence of formats for each individual field. If None, the formats will - be autodetected by inspecting the fields and selecting the highest dtype - possible. - names : {None, sequence}, optional - Sequence of the names of each field. - fill_value : {None, sequence}, optional - Sequence of data to be used as filling values. - mask : {nomask, sequence}, optional. - External mask to apply on the data. - - Notes - ----- - Lists of tuples should be preferred over lists of lists for faster processing. - - """ - # Grab the initial _fieldmask, if needed: - _mask = getattr(reclist, '_mask', None) - # Get the list of records. - if isinstance(reclist, ndarray): - # Make sure we don't have some hidden mask - if isinstance(reclist, MaskedArray): - reclist = reclist.filled().view(ndarray) - # Grab the initial dtype, just in case - if dtype is None: - dtype = reclist.dtype - reclist = reclist.tolist() - mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, - aligned=aligned, byteorder=byteorder).view(mrecarray) - # Set the fill_value if needed - if fill_value is not None: - mrec.fill_value = fill_value - # Now, let's deal w/ the mask - if mask is not nomask: - mask = np.array(mask, copy=False) - maskrecordlength = len(mask.dtype) - if maskrecordlength: - mrec._mask.flat = mask - elif mask.ndim == 2: - mrec._mask.flat = [tuple(m) for m in mask] - else: - mrec.__setmask__(mask) - if _mask is not None: - mrec._mask[:] = _mask - return mrec - - -def _guessvartypes(arr): - """ - Tries to guess the dtypes of the str_ ndarray `arr`. - - Guesses by testing element-wise conversion. Returns a list of dtypes. - The array is first converted to ndarray. If the array is 2D, the test - is performed on the first line. An exception is raised if the file is - 3D or more. - - """ - vartypes = [] - arr = np.asarray(arr) - if arr.ndim == 2: - arr = arr[0] - elif arr.ndim > 2: - raise ValueError("The array should be 2D at most!") - # Start the conversion loop. - for f in arr: - try: - int(f) - except (ValueError, TypeError): - try: - float(f) - except (ValueError, TypeError): - try: - complex(f) - except (ValueError, TypeError): - vartypes.append(arr.dtype) - else: - vartypes.append(np.dtype(complex)) - else: - vartypes.append(np.dtype(float)) - else: - vartypes.append(np.dtype(int)) - return vartypes - - -def openfile(fname): - """ - Opens the file handle of file `fname`. - - """ - # A file handle - if hasattr(fname, 'readline'): - return fname - # Try to open the file and guess its type - try: - f = open(fname) - except IOError: - raise IOError("No such file: '%s'" % fname) - if f.readline()[:2] != "\\x": - f.seek(0, 0) - return f - f.close() - raise NotImplementedError("Wow, binary file") - - -def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', - varnames=None, vartypes=None): - """ - Creates a mrecarray from data stored in the file `filename`. - - Parameters - ---------- - fname : {file name/handle} - Handle of an opened file. - delimitor : {None, string}, optional - Alphanumeric character used to separate columns in the file. - If None, any (group of) white spacestring(s) will be used. - commentchar : {'#', string}, optional - Alphanumeric character used to mark the start of a comment. - missingchar : {'', string}, optional - String indicating missing data, and used to create the masks. - varnames : {None, sequence}, optional - Sequence of the variable names. If None, a list will be created from - the first non empty line of the file. - vartypes : {None, sequence}, optional - Sequence of the variables dtypes. If None, it will be estimated from - the first non-commented line. - - - Ultra simple: the varnames are in the header, one line""" - # Try to open the file. - ftext = openfile(fname) - - # Get the first non-empty line as the varnames - while True: - line = ftext.readline() - firstline = line[:line.find(commentchar)].strip() - _varnames = firstline.split(delimitor) - if len(_varnames) > 1: - break - if varnames is None: - varnames = _varnames - - # Get the data. - _variables = masked_array([line.strip().split(delimitor) for line in ftext - if line[0] != commentchar and len(line) > 1]) - (_, nfields) = _variables.shape - ftext.close() - - # Try to guess the dtype. - if vartypes is None: - vartypes = _guessvartypes(_variables[0]) - else: - vartypes = [np.dtype(v) for v in vartypes] - if len(vartypes) != nfields: - msg = "Attempting to %i dtypes for %i fields!" - msg += " Reverting to default." - warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) - vartypes = _guessvartypes(_variables[0]) - - # Construct the descriptor. - mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] - mfillv = [ma.default_fill_value(f) for f in vartypes] - - # Get the data and the mask. - # We just need a list of masked_arrays. It's easier to create it like that: - _mask = (_variables.T == missingchar) - _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) - for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] - - return fromarrays(_datalist, dtype=mdescr) - - -def addfield(mrecord, newfield, newfieldname=None): - """Adds a new field to the masked record array - - Uses `newfield` as data and `newfieldname` as name. If `newfieldname` - is None, the new field name is set to 'fi', where `i` is the number of - existing fields. - - """ - _data = mrecord._data - _mask = mrecord._mask - if newfieldname is None or newfieldname in reserved_fields: - newfieldname = 'f%i' % len(_data.dtype) - newfield = ma.array(newfield) - # Get the new data. - # Create a new empty recarray - newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) - newdata = recarray(_data.shape, newdtype) - # Add the existing field - [newdata.setfield(_data.getfield(*f), *f) - for f in _data.dtype.fields.values()] - # Add the new field - newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) - newdata = newdata.view(MaskedRecords) - # Get the new mask - # Create a new empty recarray - newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) - newmask = recarray(_data.shape, newmdtype) - # Add the old masks - [newmask.setfield(_mask.getfield(*f), *f) - for f in _mask.dtype.fields.values()] - # Add the mask of the new field - newmask.setfield(getmaskarray(newfield), - *newmask.dtype.fields[newfieldname]) - newdata._mask = newmask - return newdata diff --git a/venv/lib/python3.7/site-packages/numpy/ma/setup.py b/venv/lib/python3.7/site-packages/numpy/ma/setup.py deleted file mode 100644 index d1d6c89..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('ma', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/ma/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_core.py b/venv/lib/python3.7/site-packages/numpy/ma/tests/test_core.py deleted file mode 100644 index b72ce56..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_core.py +++ /dev/null @@ -1,5219 +0,0 @@ -# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -""" -from __future__ import division, absolute_import, print_function - -__author__ = "Pierre GF Gerard-Marchant" - -import sys -import warnings -import operator -import itertools -import textwrap -import pytest - -from functools import reduce - - -import numpy as np -import numpy.ma.core -import numpy.core.fromnumeric as fromnumeric -import numpy.core.umath as umath -from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings - ) -from numpy import ndarray -from numpy.compat import asbytes -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal, - assert_equal_records, fail_if_equal, assert_not_equal, - assert_mask_equal - ) -from numpy.ma.core import ( - MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, - allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, - arcsin, arctan, argsort, array, asarray, choose, concatenate, - conjugate, cos, cosh, count, default_fill_value, diag, divide, empty, - empty_like, equal, exp, flatten_mask, filled, fix_invalid, - flatten_structured_array, fromflex, getmask, getmaskarray, greater, - greater_equal, identity, inner, isMaskedArray, less, less_equal, log, - log10, make_mask, make_mask_descr, mask_or, masked, masked_array, - masked_equal, masked_greater, masked_greater_equal, masked_inside, - masked_less, masked_less_equal, masked_not_equal, masked_outside, - masked_print_option, masked_values, masked_where, max, maximum, - maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, - mvoid, nomask, not_equal, ones, outer, power, product, put, putmask, - ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt, - subtract, sum, take, tan, tanh, transpose, where, zeros, - ) -from numpy.compat import pickle - -pi = np.pi - - -suppress_copy_mask_on_assignment = suppress_warnings() -suppress_copy_mask_on_assignment.filter( - numpy.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a shared mask will not copy") - - -# For parametrized numeric testing -num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] -num_ids = [dt_.char for dt_ in num_dts] - - -class TestMaskedArray(object): - # Base test class for MaskedArrays. - - def setup(self): - # Base data definition. - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1e+20, x) - xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - - def test_basicattributes(self): - # Tests some basic array attributes. - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a.ndim, 1) - assert_equal(b.ndim, 1) - assert_equal(a.size, 3) - assert_equal(b.size, 3) - assert_equal(a.shape, (3,)) - assert_equal(b.shape, (3,)) - - def test_basic0d(self): - # Checks masking a scalar - x = masked_array(0) - assert_equal(str(x), '0') - x = masked_array(0, mask=True) - assert_equal(str(x), str(masked_print_option)) - x = masked_array(0, mask=False) - assert_equal(str(x), '0') - x = array(0, mask=1) - assert_(x.filled().dtype is x._data.dtype) - - def test_basic1d(self): - # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_(not isMaskedArray(x)) - assert_(isMaskedArray(xm)) - assert_((xm - ym).filled(0).any()) - fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) - s = x.shape - assert_equal(np.shape(xm), s) - assert_equal(xm.shape, s) - assert_equal(xm.dtype, x.dtype) - assert_equal(zm.dtype, z.dtype) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) - assert_array_equal(xm, xf) - assert_array_equal(filled(xm, 1.e20), xf) - assert_array_equal(x, xm) - - def test_basic2d(self): - # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - - assert_(not isMaskedArray(x)) - assert_(isMaskedArray(xm)) - assert_equal(shape(xm), s) - assert_equal(xm.shape, s) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) - assert_equal(xm, xf) - assert_equal(filled(xm, 1.e20), xf) - assert_equal(x, xm) - - def test_concatenate_basic(self): - # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - # basic concatenation - assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) - assert_equal(np.concatenate((x, y)), concatenate((x, y))) - assert_equal(np.concatenate((x, y)), concatenate((xm, y))) - assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) - - def test_concatenate_alongaxis(self): - # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - # Concatenation along an axis - s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s - assert_equal(xm.mask, np.reshape(m1, s)) - assert_equal(ym.mask, np.reshape(m2, s)) - xmym = concatenate((xm, ym), 1) - assert_equal(np.concatenate((x, y), 1), xmym) - assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) - - x = zeros(2) - y = array(ones(2), mask=[False, True]) - z = concatenate((x, y)) - assert_array_equal(z, [0, 0, 1, 1]) - assert_array_equal(z.mask, [False, False, False, True]) - z = concatenate((y, x)) - assert_array_equal(z, [1, 1, 0, 0]) - assert_array_equal(z.mask, [False, True, False, False]) - - def test_concatenate_flexible(self): - # Tests the concatenation on flexible arrays. - data = masked_array(list(zip(np.random.rand(10), - np.arange(10))), - dtype=[('a', float), ('b', int)]) - - test = concatenate([data[:5], data[5:]]) - assert_equal_records(test, data) - - def test_creation_ndmin(self): - # Check the use of ndmin - x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) - assert_equal(x.shape, (1, 3)) - assert_equal(x._data, [[1, 2, 3]]) - assert_equal(x._mask, [[1, 0, 0]]) - - def test_creation_ndmin_from_maskedarray(self): - # Make sure we're not losing the original mask w/ ndmin - x = array([1, 2, 3]) - x[-1] = masked - xx = array(x, ndmin=2, dtype=float) - assert_equal(x.shape, x._mask.shape) - assert_equal(xx.shape, xx._mask.shape) - - def test_creation_maskcreation(self): - # Tests how masks are initialized at the creation of Maskedarrays. - data = arange(24, dtype=float) - data[[3, 6, 15]] = masked - dma_1 = MaskedArray(data) - assert_equal(dma_1.mask, data.mask) - dma_2 = MaskedArray(dma_1) - assert_equal(dma_2.mask, dma_1.mask) - dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) - fail_if_equal(dma_3.mask, dma_1.mask) - - x = array([1, 2, 3], mask=True) - assert_equal(x._mask, [True, True, True]) - x = array([1, 2, 3], mask=False) - assert_equal(x._mask, [False, False, False]) - y = array([1, 2, 3], mask=x._mask, copy=False) - assert_(np.may_share_memory(x.mask, y.mask)) - y = array([1, 2, 3], mask=x._mask, copy=True) - assert_(not np.may_share_memory(x.mask, y.mask)) - - def test_creation_with_list_of_maskedarrays(self): - # Tests creating a masked array from a list of masked arrays. - x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) - data = array((x, x[::-1])) - assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) - assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) - - x.mask = nomask - data = array((x, x[::-1])) - assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) - assert_(data.mask is nomask) - - def test_creation_from_ndarray_with_padding(self): - x = np.array([('A', 0)], dtype={'names':['f0','f1'], - 'formats':['S4','i8'], - 'offsets':[0,8]}) - array(x) # used to fail due to 'V' padding field in x.dtype.descr - - def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - xm.fill_value = -9999 - xm._hardmask = True - xmm = asarray(xm) - assert_equal(xmm._data, xm._data) - assert_equal(xmm._mask, xm._mask) - assert_equal(xmm.fill_value, xm.fill_value) - assert_equal(xmm._hardmask, xm._hardmask) - - def test_asarray_default_order(self): - # See Issue #6646 - m = np.eye(3).T - assert_(not m.flags.c_contiguous) - - new_m = asarray(m) - assert_(new_m.flags.c_contiguous) - - def test_asarray_enforce_order(self): - # See Issue #6646 - m = np.eye(3).T - assert_(not m.flags.c_contiguous) - - new_m = asarray(m, order='C') - assert_(new_m.flags.c_contiguous) - - def test_fix_invalid(self): - # Checks fix_invalid. - with np.errstate(invalid='ignore'): - data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) - data_fixed = fix_invalid(data) - assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) - assert_equal(data_fixed._mask, [1., 0., 1.]) - - def test_maskedelement(self): - # Test of masked element - x = arange(6) - x[1] = masked - assert_(str(masked) == '--') - assert_(x[1] is masked) - assert_equal(filled(x[1], 0), 0) - - def test_set_element_as_object(self): - # Tests setting elements with object - a = empty(1, dtype=object) - x = (1, 2, 3, 4, 5) - a[0] = x - assert_equal(a[0], x) - assert_(a[0] is x) - - import datetime - dt = datetime.datetime.now() - a[0] = dt - assert_(a[0] is dt) - - def test_indexing(self): - # Tests conversions and indexing - x1 = np.array([1, 2, 4, 3]) - x2 = array(x1, mask=[1, 0, 0, 0]) - x3 = array(x1, mask=[0, 1, 0, 1]) - x4 = array(x1) - # test conversion to strings - str(x2) # raises? - repr(x2) # raises? - assert_equal(np.sort(x1), sort(x2, endwith=False)) - # tests of indexing - assert_(type(x2[1]) is type(x1[1])) - assert_(x1[1] == x2[1]) - assert_(x2[0] is masked) - assert_equal(x1[2], x2[2]) - assert_equal(x1[2:5], x2[2:5]) - assert_equal(x1[:], x2[:]) - assert_equal(x1[1:], x3[1:]) - x1[2] = 9 - x2[2] = 9 - assert_equal(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 - assert_equal(x1, x2) - x2[1] = masked - assert_equal(x1, x2) - x2[1:3] = masked - assert_equal(x1, x2) - x2[:] = x1 - x2[1] = masked - assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) - x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) - x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) - assert_(allequal(x4, array([1, 2, 3, 4]))) - x1 = np.arange(5) * 1.0 - x2 = masked_values(x1, 3.0) - assert_equal(x1, x2) - assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) - assert_equal(3.0, x2.fill_value) - x1 = array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - s1 = x1[1] - s2 = x2[1] - assert_equal(type(s2), str) - assert_equal(type(s1), str) - assert_equal(s1, s2) - assert_(x1[1:1].shape == (0,)) - - @suppress_copy_mask_on_assignment - def test_copy(self): - # Tests of some subtle points of copying and sizing. - n = [0, 0, 1, 0, 0] - m = make_mask(n) - m2 = make_mask(m) - assert_(m is m2) - m3 = make_mask(m, copy=True) - assert_(m is not m3) - - x1 = np.arange(5) - y1 = array(x1, mask=m) - assert_equal(y1._data.__array_interface__, x1.__array_interface__) - assert_(allequal(x1, y1.data)) - assert_equal(y1._mask.__array_interface__, m.__array_interface__) - - y1a = array(y1) - # Default for masked array is not to copy; see gh-10318. - assert_(y1a._data.__array_interface__ == - y1._data.__array_interface__) - assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) - - y2 = array(x1, mask=m3) - assert_(y2._data.__array_interface__ == x1.__array_interface__) - assert_(y2._mask.__array_interface__ == m3.__array_interface__) - assert_(y2[2] is masked) - y2[2] = 9 - assert_(y2[2] is not masked) - assert_(y2._mask.__array_interface__ == m3.__array_interface__) - assert_(allequal(y2.mask, 0)) - - y2a = array(x1, mask=m, copy=1) - assert_(y2a._data.__array_interface__ != x1.__array_interface__) - #assert_( y2a._mask is not m) - assert_(y2a._mask.__array_interface__ != m.__array_interface__) - assert_(y2a[2] is masked) - y2a[2] = 9 - assert_(y2a[2] is not masked) - #assert_( y2a._mask is not m) - assert_(y2a._mask.__array_interface__ != m.__array_interface__) - assert_(allequal(y2a.mask, 0)) - - y3 = array(x1 * 1.0, mask=m) - assert_(filled(y3).dtype is (x1 * 1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - assert_equal(concatenate([x4, x4]), y4) - assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) - y5 = repeat(x4, (2, 2, 2, 2), axis=0) - assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) - y6 = repeat(x4, 2, axis=0) - assert_equal(y5, y6) - y7 = x4.repeat((2, 2, 2, 2), axis=0) - assert_equal(y5, y7) - y8 = x4.repeat(2, 0) - assert_equal(y5, y8) - - y9 = x4.copy() - assert_equal(y9._data, x4._data) - assert_equal(y9._mask, x4._mask) - - x = masked_array([1, 2, 3], mask=[0, 1, 0]) - # Copy is False by default - y = masked_array(x) - assert_equal(y._data.ctypes.data, x._data.ctypes.data) - assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) - y = masked_array(x, copy=True) - assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) - assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) - - def test_copy_0d(self): - # gh-9430 - x = np.ma.array(43, mask=True) - xc = x.copy() - assert_equal(xc.mask, True) - - def test_copy_on_python_builtins(self): - # Tests copy works on python builtins (issue#8019) - assert_(isMaskedArray(np.ma.copy([1,2,3]))) - assert_(isMaskedArray(np.ma.copy((1,2,3)))) - - def test_copy_immutable(self): - # Tests that the copy method is immutable, GitHub issue #5247 - a = np.ma.array([1, 2, 3]) - b = np.ma.array([4, 5, 6]) - a_copy_method = a.copy - b.copy - assert_equal(a_copy_method(), [1, 2, 3]) - - def test_deepcopy(self): - from copy import deepcopy - a = array([0, 1, 2], mask=[False, True, False]) - copied = deepcopy(a) - assert_equal(copied.mask, a.mask) - assert_not_equal(id(a._mask), id(copied._mask)) - - copied[1] = 1 - assert_equal(copied.mask, [0, 0, 0]) - assert_equal(a.mask, [0, 1, 0]) - - copied = deepcopy(a) - assert_equal(copied.mask, a.mask) - copied.mask[1] = False - assert_equal(copied.mask, [0, 0, 0]) - assert_equal(a.mask, [0, 1, 0]) - - def test_str_repr(self): - a = array([0, 1, 2], mask=[False, True, False]) - assert_equal(str(a), '[0 -- 2]') - assert_equal( - repr(a), - textwrap.dedent('''\ - masked_array(data=[0, --, 2], - mask=[False, True, False], - fill_value=999999)''') - ) - - # arrays with a continuation - a = np.ma.arange(2000) - a[1:50] = np.ma.masked - assert_equal( - repr(a), - textwrap.dedent('''\ - masked_array(data=[0, --, --, ..., 1997, 1998, 1999], - mask=[False, True, True, ..., False, False, False], - fill_value=999999)''') - ) - - # line-wrapped 1d arrays are correctly aligned - a = np.ma.arange(20) - assert_equal( - repr(a), - textwrap.dedent('''\ - masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19], - mask=False, - fill_value=999999)''') - ) - - # 2d arrays cause wrapping - a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) - a[1,1] = np.ma.masked - assert_equal( - repr(a), - textwrap.dedent('''\ - masked_array( - data=[[1, 2, 3], - [4, --, 6]], - mask=[[False, False, False], - [False, True, False]], - fill_value=999999, - dtype=int8)''') - ) - - # but not it they're a row vector - assert_equal( - repr(a[:1]), - textwrap.dedent('''\ - masked_array(data=[[1, 2, 3]], - mask=[[False, False, False]], - fill_value=999999, - dtype=int8)''') - ) - - # dtype=int is implied, so not shown - assert_equal( - repr(a.astype(int)), - textwrap.dedent('''\ - masked_array( - data=[[1, 2, 3], - [4, --, 6]], - mask=[[False, False, False], - [False, True, False]], - fill_value=999999)''') - ) - - def test_str_repr_legacy(self): - oldopts = np.get_printoptions() - np.set_printoptions(legacy='1.13') - try: - a = array([0, 1, 2], mask=[False, True, False]) - assert_equal(str(a), '[0 -- 2]') - assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' - ' mask = [False True False],\n' - ' fill_value = 999999)\n') - - a = np.ma.arange(2000) - a[1:50] = np.ma.masked - assert_equal( - repr(a), - 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' - ' mask = [False True True ..., False False False],\n' - ' fill_value = 999999)\n' - ) - finally: - np.set_printoptions(**oldopts) - - def test_0d_unicode(self): - u = u'caf\xe9' - utype = type(u) - - arr_nomask = np.ma.array(u) - arr_masked = np.ma.array(u, mask=True) - - assert_equal(utype(arr_nomask), u) - assert_equal(utype(arr_masked), u'--') - - def test_pickling(self): - # Tests pickling - for dtype in (int, float, str, object): - a = arange(10).astype(dtype) - a.fill_value = 999 - - masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked - True, # Fully masked - False) # Fully unmasked - - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - for mask in masks: - a.mask = mask - a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled._data, a._data) - if dtype in (object, int): - assert_equal(a_pickled.fill_value, 999) - else: - assert_equal(a_pickled.fill_value, dtype(999)) - assert_array_equal(a_pickled.mask, mask) - - def test_pickling_subbaseclass(self): - # Test pickling w/ a subclass of ndarray - x = np.array([(1.0, 2), (3.0, 4)], - dtype=[('x', float), ('y', int)]).view(np.recarray) - a = masked_array(x, mask=[(True, False), (False, True)]) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - assert_(isinstance(a_pickled._data, np.recarray)) - - def test_pickling_maskedconstant(self): - # Test pickling MaskedConstant - mc = np.ma.masked - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) - assert_equal(mc_pickled._baseclass, mc._baseclass) - assert_equal(mc_pickled._mask, mc._mask) - assert_equal(mc_pickled._data, mc._data) - - def test_pickling_wstructured(self): - # Tests pickling w/ structured array - a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], - dtype=[('a', int), ('b', float)]) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - - def test_pickling_keepalignment(self): - # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) - b = a.T - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - test = pickle.loads(pickle.dumps(b, protocol=proto)) - assert_equal(test, b) - - def test_single_element_subscript(self): - # Tests single element subscripts of Maskedarrays. - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a[0].shape, ()) - assert_equal(b[0].shape, ()) - assert_equal(b[1].shape, ()) - - def test_topython(self): - # Tests some communication issues with Python. - assert_equal(1, int(array(1))) - assert_equal(1.0, float(array(1))) - assert_equal(1, int(array([[[1]]]))) - assert_equal(1.0, float(array([[1]]))) - assert_raises(TypeError, float, array([1, 1])) - - with suppress_warnings() as sup: - sup.filter(UserWarning, 'Warning: converting a masked element') - assert_(np.isnan(float(array([1], mask=[1])))) - - a = array([1, 2, 3], mask=[1, 0, 0]) - assert_raises(TypeError, lambda: float(a)) - assert_equal(float(a[-1]), 3.) - assert_(np.isnan(float(a[0]))) - assert_raises(TypeError, int, a) - assert_equal(int(a[-1]), 3) - assert_raises(MAError, lambda:int(a[0])) - - def test_oddfeatures_1(self): - # Test of other odd features - x = arange(20) - x = x.reshape(4, 5) - x.flat[5] = 12 - assert_(x[1, 0] == 12) - z = x + 10j * x - assert_equal(z.real, x) - assert_equal(z.imag, 10 * x) - assert_equal((z * conjugate(z)).real, 101 * x * x) - z.imag[...] = 0.0 - - x = arange(10) - x[3] = masked - assert_(str(x[3]) == str(masked)) - c = x >= 8 - assert_(count(where(c, masked, masked)) == 0) - assert_(shape(where(c, masked, masked)) == c.shape) - - z = masked_where(c, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - assert_equal(x, z) - - def test_oddfeatures_2(self): - # Tests some more features. - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - - @suppress_copy_mask_on_assignment - def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) - - def test_filled_with_object_dtype(self): - a = np.ma.masked_all(1, dtype='O') - assert_equal(a.filled('x')[0], 'x') - - def test_filled_with_flexible_dtype(self): - # Test filled w/ flexible dtype - flexi = array([(1, 1, 1)], - dtype=[('i', int), ('s', '|S8'), ('f', float)]) - flexi[0] = masked - assert_equal(flexi.filled(), - np.array([(default_fill_value(0), - default_fill_value('0'), - default_fill_value(0.),)], dtype=flexi.dtype)) - flexi[0] = masked - assert_equal(flexi.filled(1), - np.array([(1, '1', 1.)], dtype=flexi.dtype)) - - def test_filled_with_mvoid(self): - # Test filled w/ mvoid - ndtype = [('a', int), ('b', float)] - a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) - # Filled using default - test = a.filled() - assert_equal(tuple(test), (1, default_fill_value(1.))) - # Explicit fill_value - test = a.filled((-1, -1)) - assert_equal(tuple(test), (1, -1)) - # Using predefined filling values - a.fill_value = (-999, -999) - assert_equal(tuple(a.filled()), (1, -999)) - - def test_filled_with_nested_dtype(self): - # Test filled w/ nested dtype - ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] - a = array([(1, (1, 1)), (2, (2, 2))], - mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) - test = a.filled(0) - control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) - assert_equal(test, control) - - test = a['B'].filled(0) - control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) - assert_equal(test, control) - - # test if mask gets set correctly (see #6760) - Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) - assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), - ('f1', 'i1', (2, 2))], (2, 2))])) - assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), - ('f1', '?', (2, 2))], (2, 2))])) - - def test_filled_with_f_order(self): - # Test filled w/ F-contiguous array - a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), - mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), - order='F') # this is currently ignored - assert_(a.flags['F_CONTIGUOUS']) - assert_(a.filled(0).flags['F_CONTIGUOUS']) - - def test_optinfo_propagation(self): - # Checks that _optinfo dictionary isn't back-propagated - x = array([1, 2, 3, ], dtype=float) - x._optinfo['info'] = '???' - y = x.copy() - assert_equal(y._optinfo['info'], '???') - y._optinfo['info'] = '!!!' - assert_equal(x._optinfo['info'], '???') - - def test_optinfo_forward_propagation(self): - a = array([1,2,2,4]) - a._optinfo["key"] = "value" - assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) - assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) - assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) - assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) - assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) - assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) - assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) - - def test_fancy_printoptions(self): - # Test printing a masked array w/ fancy dtype. - fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - test = array([(1, (2, 3.0)), (4, (5, 6.0))], - mask=[(1, (0, 1)), (0, (1, 0))], - dtype=fancydtype) - control = "[(--, (2, --)) (4, (--, 6.0))]" - assert_equal(str(test), control) - - # Test 0-d array with multi-dimensional dtype - t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - 0.0), - mask = (False, [[True, False, True], - [False, False, True]], - False), - dtype = "int, (2,3)float, float") - control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" - assert_equal(str(t_2d0), control) - - def test_flatten_structured_array(self): - # Test flatten_structured_array on arrays - # On ndarray - ndtype = [('a', int), ('b', float)] - a = np.array([(1, 1), (2, 2)], dtype=ndtype) - test = flatten_structured_array(a) - control = np.array([[1., 1.], [2., 2.]], dtype=float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - # On masked_array - a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = flatten_structured_array(a) - control = array([[1., 1.], [2., 2.]], - mask=[[0, 1], [1, 0]], dtype=float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - assert_equal(test.mask, control.mask) - # On masked array with nested structure - ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] - a = array([(1, (1, 1.1)), (2, (2, 2.2))], - mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) - test = flatten_structured_array(a) - control = array([[1., 1., 1.1], [2., 2., 2.2]], - mask=[[0, 1, 0], [1, 0, 1]], dtype=float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - assert_equal(test.mask, control.mask) - # Keeping the initial shape - ndtype = [('a', int), ('b', float)] - a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) - test = flatten_structured_array(a) - control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - - def test_void0d(self): - # Test creating a mvoid object - ndtype = [('a', int), ('b', int)] - a = np.array([(1, 2,)], dtype=ndtype)[0] - f = mvoid(a) - assert_(isinstance(f, mvoid)) - - a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] - assert_(isinstance(a, mvoid)) - - a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - f = mvoid(a._data[0], a._mask[0]) - assert_(isinstance(f, mvoid)) - - def test_mvoid_getitem(self): - # Test mvoid.__getitem__ - ndtype = [('a', int), ('b', int)] - a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], - dtype=ndtype) - # w/o mask - f = a[0] - assert_(isinstance(f, mvoid)) - assert_equal((f[0], f['a']), (1, 1)) - assert_equal(f['b'], 2) - # w/ mask - f = a[1] - assert_(isinstance(f, mvoid)) - assert_(f[0] is masked) - assert_(f['a'] is masked) - assert_equal(f[1], 4) - - # exotic dtype - A = masked_array(data=[([0,1],)], - mask=[([True, False],)], - dtype=[("A", ">i2", (2,))]) - assert_equal(A[0]["A"], A["A"][0]) - assert_equal(A[0]["A"], masked_array(data=[0, 1], - mask=[True, False], dtype=">i2")) - - def test_mvoid_iter(self): - # Test iteration on __getitem__ - ndtype = [('a', int), ('b', int)] - a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], - dtype=ndtype) - # w/o mask - assert_equal(list(a[0]), [1, 2]) - # w/ mask - assert_equal(list(a[1]), [masked, 4]) - - def test_mvoid_print(self): - # Test printing a mvoid - mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) - assert_equal(str(mx[0]), "(1, 1)") - mx['b'][0] = masked - ini_display = masked_print_option._display - masked_print_option.set_display("-X-") - try: - assert_equal(str(mx[0]), "(1, -X-)") - assert_equal(repr(mx[0]), "(1, -X-)") - finally: - masked_print_option.set_display(ini_display) - - # also check if there are object datatypes (see gh-7493) - mx = array([(1,), (2,)], dtype=[('a', 'O')]) - assert_equal(str(mx[0]), "(1,)") - - def test_mvoid_multidim_print(self): - - # regression test for gh-6019 - t_ma = masked_array(data = [([1, 2, 3],)], - mask = [([False, True, False],)], - fill_value = ([999999, 999999, 999999],), - dtype = [('a', ' 1: - assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) - assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) - assert_equal(np.sum(x, 1), sum(x, 1)) - assert_equal(np.product(x, 1), product(x, 1)) - - def test_binops_d2D(self): - # Test binary operations on 2D data - a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) - b = array([[2., 3.], [4., 5.], [6., 7.]]) - - test = a * b - control = array([[2., 3.], [2., 2.], [3., 3.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - test = b * a - control = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - a = array([[1.], [2.], [3.]]) - b = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [0, 0], [0, 1]]) - test = a * b - control = array([[2, 3], [8, 10], [18, 3]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - test = b * a - control = array([[2, 3], [8, 10], [18, 7]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_domained_binops_d2D(self): - # Test domained binary operations on 2D data - a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) - b = array([[2., 3.], [4., 5.], [6., 7.]]) - - test = a / b - control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - test = b / a - control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - a = array([[1.], [2.], [3.]]) - b = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [0, 0], [0, 1]]) - test = a / b - control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - test = b / a - control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_noshrinking(self): - # Check that we don't shrink a mask when not wanted - # Binary operations - a = masked_array([1., 2., 3.], mask=[False, False, False], - shrink=False) - b = a + 1 - assert_equal(b.mask, [0, 0, 0]) - # In place binary operation - a += 1 - assert_equal(a.mask, [0, 0, 0]) - # Domained binary operation - b = a / 1. - assert_equal(b.mask, [0, 0, 0]) - # In place binary operation - a /= 1. - assert_equal(a.mask, [0, 0, 0]) - - def test_ufunc_nomask(self): - # check the case ufuncs should set the mask to false - m = np.ma.array([1]) - # check we don't get array([False], dtype=bool) - assert_equal(np.true_divide(m, 5).mask.shape, ()) - - def test_noshink_on_creation(self): - # Check that the mask is not shrunk on array creation when not wanted - a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) - assert_equal(a.mask, [0, 0, 0]) - - def test_mod(self): - # Tests mod - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(mod(x, y), mod(xm, ym)) - test = mod(ym, xm) - assert_equal(test, np.mod(ym, xm)) - assert_equal(test.mask, mask_or(xm.mask, ym.mask)) - test = mod(xm, ym) - assert_equal(test, np.mod(xm, ym)) - assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) - - def test_TakeTransposeInnerOuter(self): - # Test of take, transpose, inner, outer products - x = arange(24) - y = np.arange(24) - x[5:6] = masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) - assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) - assert_equal(np.inner(filled(x, 0), filled(y, 0)), - inner(x, y)) - assert_equal(np.outer(filled(x, 0), filled(y, 0)), - outer(x, y)) - y = array(['abc', 1, 'def', 2, 3], object) - y[2] = masked - t = take(y, [0, 3, 4]) - assert_(t[0] == 'abc') - assert_(t[1] == 2) - assert_(t[2] == 3) - - def test_imag_real(self): - # Check complex - xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) - assert_equal(xx.imag, [10, 2]) - assert_equal(xx.imag.filled(), [1e+20, 2]) - assert_equal(xx.imag.dtype, xx._data.imag.dtype) - assert_equal(xx.real, [1, 20]) - assert_equal(xx.real.filled(), [1e+20, 20]) - assert_equal(xx.real.dtype, xx._data.real.dtype) - - def test_methods_with_output(self): - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - - funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) - - for funcname in funclist: - npfunc = getattr(np, funcname) - xmmeth = getattr(xm, funcname) - # A ndarray as explicit input - output = np.empty(4, dtype=float) - output.fill(-9999) - result = npfunc(xm, axis=0, out=output) - # ... the result should be the given output - assert_(result is output) - assert_equal(result, xmmeth(axis=0, out=output)) - - output = empty(4, dtype=int) - result = xmmeth(axis=0, out=output) - assert_(result is output) - assert_(output[0] is masked) - - def test_eq_on_structured(self): - # Test the equality of structured arrays - ndtype = [('A', int), ('B', int)] - a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) - - test = (a == a) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [False, False]) - assert_(test.fill_value == True) - - test = (a == a[0]) - assert_equal(test.data, [True, False]) - assert_equal(test.mask, [False, False]) - assert_(test.fill_value == True) - - b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - test = (a == b) - assert_equal(test.data, [False, True]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - test = (a[0] == b) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = (a == b) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [False, False]) - assert_(test.fill_value == True) - - # complicated dtype, 2-dimensional array. - ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] - a = array([[(1, (1, 1)), (2, (2, 2))], - [(3, (3, 3)), (4, (4, 4))]], - mask=[[(0, (1, 0)), (0, (0, 1))], - [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) - test = (a[0, 0] == a) - assert_equal(test.data, [[True, False], [False, False]]) - assert_equal(test.mask, [[False, False], [False, True]]) - assert_(test.fill_value == True) - - def test_ne_on_structured(self): - # Test the equality of structured arrays - ndtype = [('A', int), ('B', int)] - a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) - - test = (a != a) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [False, False]) - assert_(test.fill_value == True) - - test = (a != a[0]) - assert_equal(test.data, [False, True]) - assert_equal(test.mask, [False, False]) - assert_(test.fill_value == True) - - b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - test = (a != b) - assert_equal(test.data, [True, False]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - test = (a[0] != b) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = (a != b) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [False, False]) - assert_(test.fill_value == True) - - # complicated dtype, 2-dimensional array. - ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] - a = array([[(1, (1, 1)), (2, (2, 2))], - [(3, (3, 3)), (4, (4, 4))]], - mask=[[(0, (1, 0)), (0, (0, 1))], - [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) - test = (a[0, 0] != a) - assert_equal(test.data, [[False, True], [True, True]]) - assert_equal(test.mask, [[False, False], [False, True]]) - assert_(test.fill_value == True) - - def test_eq_ne_structured_extra(self): - # ensure simple examples are symmetric and make sense. - # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 - dt = np.dtype('i4,i4') - for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), - mvoid((1, 2), mask=(0, 1), dtype=dt), - mvoid((1, 2), mask=(1, 0), dtype=dt), - mvoid((1, 2), mask=(1, 1), dtype=dt)): - ma1 = m1.view(MaskedArray) - r1 = ma1.view('2i4') - for m2 in (np.array((1, 1), dtype=dt), - mvoid((1, 1), dtype=dt), - mvoid((1, 0), mask=(0, 1), dtype=dt), - mvoid((3, 2), mask=(0, 1), dtype=dt)): - ma2 = m2.view(MaskedArray) - r2 = ma2.view('2i4') - eq_expected = (r1 == r2).all() - assert_equal(m1 == m2, eq_expected) - assert_equal(m2 == m1, eq_expected) - assert_equal(ma1 == m2, eq_expected) - assert_equal(m1 == ma2, eq_expected) - assert_equal(ma1 == ma2, eq_expected) - # Also check it is the same if we do it element by element. - el_by_el = [m1[name] == m2[name] for name in dt.names] - assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) - ne_expected = (r1 != r2).any() - assert_equal(m1 != m2, ne_expected) - assert_equal(m2 != m1, ne_expected) - assert_equal(ma1 != m2, ne_expected) - assert_equal(m1 != ma2, ne_expected) - assert_equal(ma1 != ma2, ne_expected) - el_by_el = [m1[name] != m2[name] for name in dt.names] - assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) - - @pytest.mark.parametrize('dt', ['S', 'U']) - @pytest.mark.parametrize('fill', [None, 'A']) - def test_eq_for_strings(self, dt, fill): - # Test the equality of structured arrays - a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) - - test = (a == a) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - test = (a == a[0]) - assert_equal(test.data, [True, False]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) - test = (a == b) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [True, True]) - assert_(test.fill_value == True) - - # test = (a[0] == b) # doesn't work in Python2 - test = (b == a[0]) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - @pytest.mark.parametrize('dt', ['S', 'U']) - @pytest.mark.parametrize('fill', [None, 'A']) - def test_ne_for_strings(self, dt, fill): - # Test the equality of structured arrays - a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) - - test = (a != a) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - test = (a != a[0]) - assert_equal(test.data, [False, True]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) - test = (a != b) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [True, True]) - assert_(test.fill_value == True) - - # test = (a[0] != b) # doesn't work in Python2 - test = (b != a[0]) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) - @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) - @pytest.mark.parametrize('fill', [None, 1]) - def test_eq_for_numeric(self, dt1, dt2, fill): - # Test the equality of structured arrays - a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) - - test = (a == a) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - test = (a == a[0]) - assert_equal(test.data, [True, False]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) - test = (a == b) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [True, True]) - assert_(test.fill_value == True) - - # test = (a[0] == b) # doesn't work in Python2 - test = (b == a[0]) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) - @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) - @pytest.mark.parametrize('fill', [None, 1]) - def test_ne_for_numeric(self, dt1, dt2, fill): - # Test the equality of structured arrays - a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) - - test = (a != a) - assert_equal(test.data, [False, False]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - test = (a != a[0]) - assert_equal(test.data, [False, True]) - assert_equal(test.mask, [False, True]) - assert_(test.fill_value == True) - - b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) - test = (a != b) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [True, True]) - assert_(test.fill_value == True) - - # test = (a[0] != b) # doesn't work in Python2 - test = (b != a[0]) - assert_equal(test.data, [True, True]) - assert_equal(test.mask, [True, False]) - assert_(test.fill_value == True) - - def test_eq_with_None(self): - # Really, comparisons with None should not be done, but check them - # anyway. Note that pep8 will flag these tests. - # Deprecation is in place for arrays, and when it happens this - # test will fail (and have to be changed accordingly). - - # With partial mask - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Comparison to `None`") - a = array([None, 1], mask=[0, 1]) - assert_equal(a == None, array([True, False], mask=[0, 1])) - assert_equal(a.data == None, [True, False]) - assert_equal(a != None, array([False, True], mask=[0, 1])) - # With nomask - a = array([None, 1], mask=False) - assert_equal(a == None, [True, False]) - assert_equal(a != None, [False, True]) - # With complete mask - a = array([None, 2], mask=True) - assert_equal(a == None, array([False, True], mask=True)) - assert_equal(a != None, array([True, False], mask=True)) - # Fully masked, even comparison to None should return "masked" - a = masked - assert_equal(a == None, masked) - - def test_eq_with_scalar(self): - a = array(1) - assert_equal(a == 1, True) - assert_equal(a == 0, False) - assert_equal(a != 1, False) - assert_equal(a != 0, True) - b = array(1, mask=True) - assert_equal(b == 0, masked) - assert_equal(b == 1, masked) - assert_equal(b != 0, masked) - assert_equal(b != 1, masked) - - def test_eq_different_dimensions(self): - m1 = array([1, 1], mask=[0, 1]) - # test comparison with both masked and regular arrays. - for m2 in (array([[0, 1], [1, 2]]), - np.array([[0, 1], [1, 2]])): - test = (m1 == m2) - assert_equal(test.data, [[False, False], - [True, False]]) - assert_equal(test.mask, [[False, True], - [False, True]]) - - def test_numpyarithmetics(self): - # Check that the mask is not back-propagated when using numpy functions - a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) - control = masked_array([np.nan, np.nan, 0, np.log(2), -1], - mask=[1, 1, 0, 0, 1]) - - test = log(a) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(a.mask, [0, 0, 0, 0, 1]) - - test = np.log(a) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(a.mask, [0, 0, 0, 0, 1]) - - -class TestMaskedArrayAttributes(object): - - def test_keepmask(self): - # Tests the keep mask flag - x = masked_array([1, 2, 3], mask=[1, 0, 0]) - mx = masked_array(x) - assert_equal(mx.mask, x.mask) - mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) - assert_equal(mx.mask, [0, 1, 0]) - mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) - assert_equal(mx.mask, [1, 1, 0]) - # We default to true - mx = masked_array(x, mask=[0, 1, 0]) - assert_equal(mx.mask, [1, 1, 0]) - - def test_hardmask(self): - # Test hard_mask - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d, mask=m, hard_mask=True) - # We need to copy, to avoid updating d in xh ! - xs = array(d, mask=m, hard_mask=False, copy=True) - xh[[1, 4]] = [10, 40] - xs[[1, 4]] = [10, 40] - assert_equal(xh._data, [0, 10, 2, 3, 4]) - assert_equal(xs._data, [0, 10, 2, 3, 40]) - assert_equal(xs.mask, [0, 0, 0, 1, 0]) - assert_(xh._hardmask) - assert_(not xs._hardmask) - xh[1:4] = [10, 20, 30] - xs[1:4] = [10, 20, 30] - assert_equal(xh._data, [0, 10, 20, 3, 4]) - assert_equal(xs._data, [0, 10, 20, 30, 40]) - assert_equal(xs.mask, nomask) - xh[0] = masked - xs[0] = masked - assert_equal(xh.mask, [1, 0, 0, 1, 1]) - assert_equal(xs.mask, [1, 0, 0, 0, 0]) - xh[:] = 1 - xs[:] = 1 - assert_equal(xh._data, [0, 1, 1, 3, 4]) - assert_equal(xs._data, [1, 1, 1, 1, 1]) - assert_equal(xh.mask, [1, 0, 0, 1, 1]) - assert_equal(xs.mask, nomask) - # Switch to soft mask - xh.soften_mask() - xh[:] = arange(5) - assert_equal(xh._data, [0, 1, 2, 3, 4]) - assert_equal(xh.mask, nomask) - # Switch back to hard mask - xh.harden_mask() - xh[xh < 3] = masked - assert_equal(xh._data, [0, 1, 2, 3, 4]) - assert_equal(xh._mask, [1, 1, 1, 0, 0]) - xh[filled(xh > 1, False)] = 5 - assert_equal(xh._data, [0, 1, 2, 5, 5]) - assert_equal(xh._mask, [1, 1, 1, 0, 0]) - - xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) - xh[0] = 0 - assert_equal(xh._data, [[1, 0], [3, 4]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - xh[-1, -1] = 5 - assert_equal(xh._data, [[1, 0], [3, 5]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - xh[filled(xh < 5, False)] = 2 - assert_equal(xh._data, [[1, 2], [2, 5]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - - def test_hardmask_again(self): - # Another test of hardmask - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d, mask=m, hard_mask=True) - xh[4:5] = 999 - xh[0:1] = 999 - assert_equal(xh._data, [999, 1, 2, 3, 4]) - - def test_hardmask_oncemore_yay(self): - # OK, yet another test of hardmask - # Make sure that harden_mask/soften_mask//unshare_mask returns self - a = array([1, 2, 3], mask=[1, 0, 0]) - b = a.harden_mask() - assert_equal(a, b) - b[0] = 0 - assert_equal(a, b) - assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) - a = b.soften_mask() - a[0] = 0 - assert_equal(a, b) - assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) - - def test_smallmask(self): - # Checks the behaviour of _smallmask - a = arange(10) - a[1] = masked - a[1] = 1 - assert_equal(a._mask, nomask) - a = arange(10) - a._smallmask = False - a[1] = masked - a[1] = 1 - assert_equal(a._mask, zeros(10)) - - def test_shrink_mask(self): - # Tests .shrink_mask() - a = array([1, 2, 3], mask=[0, 0, 0]) - b = a.shrink_mask() - assert_equal(a, b) - assert_equal(a.mask, nomask) - - # Mask cannot be shrunk on structured types, so is a no-op - a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) - b = a.copy() - a.shrink_mask() - assert_equal(a.mask, b.mask) - - def test_flat(self): - # Test that flat can return all types of items [#4585, #4615] - # test 2-D record array - # ... on structured array w/ masked records - x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], - [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], - dtype=[('a', int), ('b', float), ('c', '|S8')]) - x['a'][0, 1] = masked - x['b'][1, 0] = masked - x['c'][0, 2] = masked - x[-1, -1] = masked - xflat = x.flat - assert_equal(xflat[0], x[0, 0]) - assert_equal(xflat[1], x[0, 1]) - assert_equal(xflat[2], x[0, 2]) - assert_equal(xflat[:3], x[0]) - assert_equal(xflat[3], x[1, 0]) - assert_equal(xflat[4], x[1, 1]) - assert_equal(xflat[5], x[1, 2]) - assert_equal(xflat[3:], x[1]) - assert_equal(xflat[-1], x[-1, -1]) - i = 0 - j = 0 - for xf in xflat: - assert_equal(xf, x[j, i]) - i += 1 - if i >= x.shape[-1]: - i = 0 - j += 1 - - def test_assign_dtype(self): - # check that the mask's dtype is updated when dtype is changed - a = np.zeros(4, dtype='f4,i4') - - m = np.ma.array(a) - m.dtype = np.dtype('f4') - repr(m) # raises? - assert_equal(m.dtype, np.dtype('f4')) - - # check that dtype changes that change shape of mask too much - # are not allowed - def assign(): - m = np.ma.array(a) - m.dtype = np.dtype('f8') - assert_raises(ValueError, assign) - - b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? - assert_equal(b.dtype, np.dtype('f4')) - - # check that nomask is preserved - a = np.zeros(4, dtype='f4') - m = np.ma.array(a) - m.dtype = np.dtype('f4,i4') - assert_equal(m.dtype, np.dtype('f4,i4')) - assert_equal(m._mask, np.ma.nomask) - - -class TestFillingValues(object): - - def test_check_on_scalar(self): - # Test _check_fill_value set to valid and invalid values - _check_fill_value = np.ma.core._check_fill_value - - fval = _check_fill_value(0, int) - assert_equal(fval, 0) - fval = _check_fill_value(None, int) - assert_equal(fval, default_fill_value(0)) - - fval = _check_fill_value(0, "|S3") - assert_equal(fval, b"0") - fval = _check_fill_value(None, "|S3") - assert_equal(fval, default_fill_value(b"camelot!")) - assert_raises(TypeError, _check_fill_value, 1e+20, int) - assert_raises(TypeError, _check_fill_value, 'stuff', int) - - def test_check_on_fields(self): - # Tests _check_fill_value with records - _check_fill_value = np.ma.core._check_fill_value - ndtype = [('a', int), ('b', float), ('c', "|S3")] - # A check on a list should return a single record - fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) - assert_(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, b"???"]) - # A check on None should output the defaults - fval = _check_fill_value(None, ndtype) - assert_(isinstance(fval, ndarray)) - assert_equal(fval.item(), [default_fill_value(0), - default_fill_value(0.), - asbytes(default_fill_value("0"))]) - #.....Using a structured type as fill_value should work - fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) - fval = _check_fill_value(fill_val, ndtype) - assert_(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, b"???"]) - - #.....Using a flexible type w/ a different type shouldn't matter - # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured - # types by position - fill_val = np.array((-999, -12345678.9, "???"), - dtype=[("A", int), ("B", float), ("C", "|S3")]) - fval = _check_fill_value(fill_val, ndtype) - assert_(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, b"???"]) - - #.....Using an object-array shouldn't matter either - fill_val = np.ndarray(shape=(1,), dtype=object) - fill_val[0] = (-999, -12345678.9, b"???") - fval = _check_fill_value(fill_val, object) - assert_(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, b"???"]) - # NOTE: This test was never run properly as "fill_value" rather than - # "fill_val" was assigned. Written properly, it fails. - #fill_val = np.array((-999, -12345678.9, "???")) - #fval = _check_fill_value(fill_val, ndtype) - #assert_(isinstance(fval, ndarray)) - #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) - #.....One-field-only flexible type should work as well - ndtype = [("a", int)] - fval = _check_fill_value(-999999999, ndtype) - assert_(isinstance(fval, ndarray)) - assert_equal(fval.item(), (-999999999,)) - - def test_fillvalue_conversion(self): - # Tests the behavior of fill_value during conversion - # We had a tailored comment to make sure special attributes are - # properly dealt with - a = array([b'3', b'4', b'5']) - a._optinfo.update({'comment':"updated!"}) - - b = array(a, dtype=int) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0)) - - b = array(a, dtype=float) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0.)) - - b = a.astype(int) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0)) - assert_equal(b._optinfo['comment'], "updated!") - - b = a.astype([('a', '|S3')]) - assert_equal(b['a']._data, a._data) - assert_equal(b['a'].fill_value, a.fill_value) - - def test_default_fill_value(self): - # check all calling conventions - f1 = default_fill_value(1.) - f2 = default_fill_value(np.array(1.)) - f3 = default_fill_value(np.array(1.).dtype) - assert_equal(f1, f2) - assert_equal(f1, f3) - - def test_default_fill_value_structured(self): - fields = array([(1, 1, 1)], - dtype=[('i', int), ('s', '|S8'), ('f', float)]) - - f1 = default_fill_value(fields) - f2 = default_fill_value(fields.dtype) - expected = np.array((default_fill_value(0), - default_fill_value('0'), - default_fill_value(0.)), dtype=fields.dtype) - assert_equal(f1, expected) - assert_equal(f2, expected) - - def test_default_fill_value_void(self): - dt = np.dtype([('v', 'V7')]) - f = default_fill_value(dt) - assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) - - def test_fillvalue(self): - # Yet more fun with the fill_value - data = masked_array([1, 2, 3], fill_value=-999) - series = data[[0, 2, 1]] - assert_equal(series._fill_value, data._fill_value) - - mtype = [('f', float), ('s', '|S3')] - x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) - x.fill_value = 999 - assert_equal(x.fill_value.item(), [999., b'999']) - assert_equal(x['f'].fill_value, 999) - assert_equal(x['s'].fill_value, b'999') - - x.fill_value = (9, '???') - assert_equal(x.fill_value.item(), (9, b'???')) - assert_equal(x['f'].fill_value, 9) - assert_equal(x['s'].fill_value, b'???') - - x = array([1, 2, 3.1]) - x.fill_value = 999 - assert_equal(np.asarray(x.fill_value).dtype, float) - assert_equal(x.fill_value, 999.) - assert_equal(x._fill_value, np.array(999.)) - - def test_subarray_fillvalue(self): - # gh-10483 test multi-field index fill value - fields = array([(1, 1, 1)], - dtype=[('i', int), ('s', '|S8'), ('f', float)]) - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Numpy has detected") - subfields = fields[['i', 'f']] - assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) - # test comparison does not raise: - subfields[1:] == subfields[:-1] - - def test_fillvalue_exotic_dtype(self): - # Tests yet more exotic flexible dtypes - _check_fill_value = np.ma.core._check_fill_value - ndtype = [('i', int), ('s', '|S8'), ('f', float)] - control = np.array((default_fill_value(0), - default_fill_value('0'), - default_fill_value(0.),), - dtype=ndtype) - assert_equal(_check_fill_value(None, ndtype), control) - # The shape shouldn't matter - ndtype = [('f0', float, (2, 2))] - control = np.array((default_fill_value(0.),), - dtype=[('f0', float)]).astype(ndtype) - assert_equal(_check_fill_value(None, ndtype), control) - control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) - assert_equal(_check_fill_value(0, ndtype), control) - - ndtype = np.dtype("int, (2,3)float, float") - control = np.array((default_fill_value(0), - default_fill_value(0.), - default_fill_value(0.),), - dtype="int, float, float").astype(ndtype) - test = _check_fill_value(None, ndtype) - assert_equal(test, control) - control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) - assert_equal(_check_fill_value(0, ndtype), control) - # but when indexing, fill value should become scalar not tuple - # See issue #6723 - M = masked_array(control) - assert_equal(M["f1"].fill_value.ndim, 0) - - def test_fillvalue_datetime_timedelta(self): - # Test default fillvalue for datetime64 and timedelta64 types. - # See issue #4476, this would return '?' which would cause errors - # elsewhere - - for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", - "h", "D", "W", "M", "Y"): - control = numpy.datetime64("NaT", timecode) - test = default_fill_value(numpy.dtype(" 0 - - # test different unary domains - sqrt(m) - log(m) - tan(m) - arcsin(m) - arccos(m) - arccosh(m) - - # test binary domains - divide(m, 2) - - # also check that allclose uses ma ufuncs, to avoid warning - allclose(m, 0.5) - -class TestMaskedArrayInPlaceArithmetics(object): - # Test MaskedArray Arithmetics - - def setup(self): - x = arange(10) - y = arange(10) - xm = arange(10) - xm[2] = masked - self.intdata = (x, y, xm) - self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) - self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - self.othertypes = [np.dtype(_).type for _ in self.othertypes] - self.uint8data = ( - x.astype(np.uint8), - y.astype(np.uint8), - xm.astype(np.uint8) - ) - - def test_inplace_addition_scalar(self): - # Test of inplace additions - (x, y, xm) = self.intdata - xm[2] = masked - x += 1 - assert_equal(x, y + 1) - xm += 1 - assert_equal(xm, y + 1) - - (x, _, xm) = self.floatdata - id1 = x.data.ctypes.data - x += 1. - assert_(id1 == x.data.ctypes.data) - assert_equal(x, y + 1.) - - def test_inplace_addition_array(self): - # Test of inplace additions - (x, y, xm) = self.intdata - m = xm.mask - a = arange(10, dtype=np.int16) - a[-1] = masked - x += a - xm += a - assert_equal(x, y + a) - assert_equal(xm, y + a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_subtraction_scalar(self): - # Test of inplace subtractions - (x, y, xm) = self.intdata - x -= 1 - assert_equal(x, y - 1) - xm -= 1 - assert_equal(xm, y - 1) - - def test_inplace_subtraction_array(self): - # Test of inplace subtractions - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x -= a - xm -= a - assert_equal(x, y - a) - assert_equal(xm, y - a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_multiplication_scalar(self): - # Test of inplace multiplication - (x, y, xm) = self.floatdata - x *= 2.0 - assert_equal(x, y * 2) - xm *= 2.0 - assert_equal(xm, y * 2) - - def test_inplace_multiplication_array(self): - # Test of inplace multiplication - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x *= a - xm *= a - assert_equal(x, y * a) - assert_equal(xm, y * a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_division_scalar_int(self): - # Test of inplace division - (x, y, xm) = self.intdata - x = arange(10) * 2 - xm = arange(10) * 2 - xm[2] = masked - x //= 2 - assert_equal(x, y) - xm //= 2 - assert_equal(xm, y) - - def test_inplace_division_scalar_float(self): - # Test of inplace division - (x, y, xm) = self.floatdata - x /= 2.0 - assert_equal(x, y / 2.0) - xm /= arange(10) - assert_equal(xm, ones((10,))) - - def test_inplace_division_array_float(self): - # Test of inplace division - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x /= a - xm /= a - assert_equal(x, y / a) - assert_equal(xm, y / a) - assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) - - def test_inplace_division_misc(self): - - x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] - y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - - z = xm / ym - assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) - assert_equal(z._data, - [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - - xm = xm.copy() - xm /= ym - assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) - assert_equal(z._data, - [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - - def test_datafriendly_add(self): - # Test keeping data w/ (inplace) addition - x = array([1, 2, 3], mask=[0, 0, 1]) - # Test add w/ scalar - xx = x + 1 - assert_equal(xx.data, [2, 3, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test iadd w/ scalar - x += 1 - assert_equal(x.data, [2, 3, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test add w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x + array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 4, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test iadd w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x += array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(x.data, [1, 4, 3]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_sub(self): - # Test keeping data w/ (inplace) subtraction - # Test sub w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x - 1 - assert_equal(xx.data, [0, 1, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test isub w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - x -= 1 - assert_equal(x.data, [0, 1, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test sub w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x - array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 0, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test isub w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x -= array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(x.data, [1, 0, 3]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_mul(self): - # Test keeping data w/ (inplace) multiplication - # Test mul w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x * 2 - assert_equal(xx.data, [2, 4, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test imul w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - x *= 2 - assert_equal(x.data, [2, 4, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test mul w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x * array([10, 20, 30], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 40, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test imul w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x *= array([10, 20, 30], mask=[1, 0, 0]) - assert_equal(x.data, [1, 40, 3]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_div(self): - # Test keeping data w/ (inplace) division - # Test div on scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x / 2. - assert_equal(xx.data, [1 / 2., 2 / 2., 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test idiv on scalar - x = array([1., 2., 3.], mask=[0, 0, 1]) - x /= 2. - assert_equal(x.data, [1 / 2., 2 / 2., 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test div on array - x = array([1., 2., 3.], mask=[0, 0, 1]) - xx = x / array([10., 20., 30.], mask=[1, 0, 0]) - assert_equal(xx.data, [1., 2. / 20., 3.]) - assert_equal(xx.mask, [1, 0, 1]) - # Test idiv on array - x = array([1., 2., 3.], mask=[0, 0, 1]) - x /= array([10., 20., 30.], mask=[1, 0, 0]) - assert_equal(x.data, [1., 2 / 20., 3.]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_pow(self): - # Test keeping data w/ (inplace) power - # Test pow on scalar - x = array([1., 2., 3.], mask=[0, 0, 1]) - xx = x ** 2.5 - assert_equal(xx.data, [1., 2. ** 2.5, 3.]) - assert_equal(xx.mask, [0, 0, 1]) - # Test ipow on scalar - x **= 2.5 - assert_equal(x.data, [1., 2. ** 2.5, 3]) - assert_equal(x.mask, [0, 0, 1]) - - def test_datafriendly_add_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a += b - assert_equal(a, [[2, 2], [4, 4]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a += b - assert_equal(a, [[2, 2], [4, 4]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - def test_datafriendly_sub_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a -= b - assert_equal(a, [[0, 0], [2, 2]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a -= b - assert_equal(a, [[0, 0], [2, 2]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - def test_datafriendly_mul_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a *= b - assert_equal(a, [[1, 1], [3, 3]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a *= b - assert_equal(a, [[1, 1], [3, 3]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - def test_inplace_addition_scalar_type(self): - # Test of inplace additions - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - xm[2] = masked - x += t(1) - assert_equal(x, y + t(1)) - xm += t(1) - assert_equal(xm, y + t(1)) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_addition_array_type(self): - # Test of inplace additions - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - m = xm.mask - a = arange(10, dtype=t) - a[-1] = masked - x += a - xm += a - assert_equal(x, y + a) - assert_equal(xm, y + a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_subtraction_scalar_type(self): - # Test of inplace subtractions - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - x -= t(1) - assert_equal(x, y - t(1)) - xm -= t(1) - assert_equal(xm, y - t(1)) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_subtraction_array_type(self): - # Test of inplace subtractions - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - m = xm.mask - a = arange(10, dtype=t) - a[-1] = masked - x -= a - xm -= a - assert_equal(x, y - a) - assert_equal(xm, y - a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_multiplication_scalar_type(self): - # Test of inplace multiplication - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - x *= t(2) - assert_equal(x, y * t(2)) - xm *= t(2) - assert_equal(xm, y * t(2)) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_multiplication_array_type(self): - # Test of inplace multiplication - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - m = xm.mask - a = arange(10, dtype=t) - a[-1] = masked - x *= a - xm *= a - assert_equal(x, y * a) - assert_equal(xm, y * a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_floor_division_scalar_type(self): - # Test of inplace division - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - x = arange(10, dtype=t) * t(2) - xm = arange(10, dtype=t) * t(2) - xm[2] = masked - x //= t(2) - xm //= t(2) - assert_equal(x, y) - assert_equal(xm, y) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_floor_division_array_type(self): - # Test of inplace division - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - m = xm.mask - a = arange(10, dtype=t) - a[-1] = masked - x //= a - xm //= a - assert_equal(x, y // a) - assert_equal(xm, y // a) - assert_equal( - xm.mask, - mask_or(mask_or(m, a.mask), (a == t(0))) - ) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - def test_inplace_division_scalar_type(self): - # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) - - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - x = arange(10, dtype=t) * t(2) - xm = arange(10, dtype=t) * t(2) - xm[2] = masked - - # May get a DeprecationWarning or a TypeError. - # - # This is a consequence of the fact that this is true divide - # and will require casting to float for calculation and - # casting back to the original type. This will only be raised - # with integers. Whether it is an error or warning is only - # dependent on how stringent the casting rules are. - # - # Will handle the same way. - try: - x /= t(2) - assert_equal(x, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) - try: - xm /= t(2) - assert_equal(xm, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) - - if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, "Failed on type=%s." % t) - else: - assert_equal(len(sup.log), 0, "Failed on type=%s." % t) - - def test_inplace_division_array_type(self): - # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) - (x, y, xm) = (_.astype(t) for _ in self.uint8data) - m = xm.mask - a = arange(10, dtype=t) - a[-1] = masked - - # May get a DeprecationWarning or a TypeError. - # - # This is a consequence of the fact that this is true divide - # and will require casting to float for calculation and - # casting back to the original type. This will only be raised - # with integers. Whether it is an error or warning is only - # dependent on how stringent the casting rules are. - # - # Will handle the same way. - try: - x /= a - assert_equal(x, y / a) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) - try: - xm /= a - assert_equal(xm, y / a) - assert_equal( - xm.mask, - mask_or(mask_or(m, a.mask), (a == t(0))) - ) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) - - if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, "Failed on type=%s." % t) - else: - assert_equal(len(sup.log), 0, "Failed on type=%s." % t) - - def test_inplace_pow_type(self): - # Test keeping data w/ (inplace) power - for t in self.othertypes: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always") - # Test pow on scalar - x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) - xx = x ** t(2) - xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) - assert_equal(xx.data, xx_r.data) - assert_equal(xx.mask, xx_r.mask) - # Test ipow on scalar - x **= t(2) - assert_equal(x.data, xx_r.data) - assert_equal(x.mask, xx_r.mask) - - assert_equal(len(w), 0, "Failed on type=%s." % t) - - -class TestMaskedArrayMethods(object): - # Test class for miscellaneous MaskedArrays methods. - def setup(self): - # Base data definition. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - def test_generic_methods(self): - # Tests some MaskedArray methods. - a = array([1, 3, 2]) - assert_equal(a.any(), a._data.any()) - assert_equal(a.all(), a._data.all()) - assert_equal(a.argmax(), a._data.argmax()) - assert_equal(a.argmin(), a._data.argmin()) - assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) - assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) - assert_equal(a.conj(), a._data.conj()) - assert_equal(a.conjugate(), a._data.conjugate()) - - m = array([[1, 2], [3, 4]]) - assert_equal(m.diagonal(), m._data.diagonal()) - assert_equal(a.sum(), a._data.sum()) - assert_equal(a.take([1, 2]), a._data.take([1, 2])) - assert_equal(m.transpose(), m._data.transpose()) - - def test_allclose(self): - # Tests allclose on arrays - a = np.random.rand(10) - b = a + np.random.rand(10) * 1e-8 - assert_(allclose(a, b)) - # Test allclose w/ infs - a[0] = np.inf - assert_(not allclose(a, b)) - b[0] = np.inf - assert_(allclose(a, b)) - # Test allclose w/ masked - a = masked_array(a) - a[-1] = masked - assert_(allclose(a, b, masked_equal=True)) - assert_(not allclose(a, b, masked_equal=False)) - # Test comparison w/ scalar - a *= 1e-8 - a[0] = 0 - assert_(allclose(a, 0, masked_equal=True)) - - # Test that the function works for MIN_INT integer typed arrays - a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) - assert_(allclose(a, a)) - - def test_allany(self): - # Checks the any/all methods/functions. - x = np.array([[0.13, 0.26, 0.90], - [0.28, 0.33, 0.63], - [0.31, 0.87, 0.70]]) - m = np.array([[True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mx = masked_array(x, mask=m) - mxbig = (mx > 0.5) - mxsmall = (mx < 0.5) - - assert_(not mxbig.all()) - assert_(mxbig.any()) - assert_equal(mxbig.all(0), [False, False, True]) - assert_equal(mxbig.all(1), [False, False, True]) - assert_equal(mxbig.any(0), [False, False, True]) - assert_equal(mxbig.any(1), [True, True, True]) - - assert_(not mxsmall.all()) - assert_(mxsmall.any()) - assert_equal(mxsmall.all(0), [True, True, False]) - assert_equal(mxsmall.all(1), [False, False, False]) - assert_equal(mxsmall.any(0), [True, True, False]) - assert_equal(mxsmall.any(1), [True, True, False]) - - def test_allany_oddities(self): - # Some fun with all and any - store = empty((), dtype=bool) - full = array([1, 2, 3], mask=True) - - assert_(full.all() is masked) - full.all(out=store) - assert_(store) - assert_(store._mask, True) - assert_(store is not masked) - - store = empty((), dtype=bool) - assert_(full.any() is masked) - full.any(out=store) - assert_(not store) - assert_(store._mask, True) - assert_(store is not masked) - - def test_argmax_argmin(self): - # Tests argmin & argmax on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - - assert_equal(mx.argmin(), 35) - assert_equal(mX.argmin(), 35) - assert_equal(m2x.argmin(), 4) - assert_equal(m2X.argmin(), 4) - assert_equal(mx.argmax(), 28) - assert_equal(mX.argmax(), 28) - assert_equal(m2x.argmax(), 31) - assert_equal(m2X.argmax(), 31) - - assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) - assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) - assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) - assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) - - assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) - assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) - assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) - assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) - - def test_clip(self): - # Tests clip on MaskedArrays. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) - mx = array(x, mask=m) - clipped = mx.clip(2, 8) - assert_equal(clipped.mask, mx.mask) - assert_equal(clipped._data, x.clip(2, 8)) - assert_equal(clipped._data, mx._data.clip(2, 8)) - - def test_clip_out(self): - # gh-14140 - a = np.arange(10) - m = np.ma.MaskedArray(a, mask=[0, 1] * 5) - m.clip(0, 5, out=m) - assert_equal(m.mask, [0, 1] * 5) - - def test_compress(self): - # test compress - a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) - condition = (a > 1.5) & (a < 3.5) - assert_equal(a.compress(condition), [2., 3.]) - - a[[2, 3]] = masked - b = a.compress(condition) - assert_equal(b._data, [2., 3.]) - assert_equal(b._mask, [0, 1]) - assert_equal(b.fill_value, 9999) - assert_equal(b, a[condition]) - - condition = (a < 4.) - b = a.compress(condition) - assert_equal(b._data, [1., 2., 3.]) - assert_equal(b._mask, [0, 0, 1]) - assert_equal(b.fill_value, 9999) - assert_equal(b, a[condition]) - - a = masked_array([[10, 20, 30], [40, 50, 60]], - mask=[[0, 0, 1], [1, 0, 0]]) - b = a.compress(a.ravel() >= 22) - assert_equal(b._data, [30, 40, 50, 60]) - assert_equal(b._mask, [1, 1, 0, 0]) - - x = np.array([3, 1, 2]) - b = a.compress(x >= 2, axis=1) - assert_equal(b._data, [[10, 30], [40, 60]]) - assert_equal(b._mask, [[0, 1], [1, 0]]) - - def test_compressed(self): - # Tests compressed - a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - a[0] = masked - b = a.compressed() - assert_equal(b, [2, 3, 4]) - - def test_empty(self): - # Tests empty/like - datatype = [('a', int), ('b', float), ('c', '|S8')] - a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], - dtype=datatype) - assert_equal(len(a.fill_value.item()), len(datatype)) - - b = empty_like(a) - assert_equal(b.shape, a.shape) - assert_equal(b.fill_value, a.fill_value) - - b = empty(len(a), dtype=datatype) - assert_equal(b.shape, a.shape) - assert_equal(b.fill_value, a.fill_value) - - # check empty_like mask handling - a = masked_array([1, 2, 3], mask=[False, True, False]) - b = empty_like(a) - assert_(not np.may_share_memory(a.mask, b.mask)) - b = a.view(masked_array) - assert_(np.may_share_memory(a.mask, b.mask)) - - @suppress_copy_mask_on_assignment - def test_put(self): - # Tests put. - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - x = array(d, mask=m) - assert_(x[3] is masked) - assert_(x[4] is masked) - x[[1, 4]] = [10, 40] - assert_(x[3] is masked) - assert_(x[4] is not masked) - assert_equal(x, [0, 10, 2, -1, 40]) - - x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) - i = [0, 2, 4, 6] - x.put(i, [6, 4, 2, 0]) - assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) - assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) - x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) - assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) - assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) - - x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) - put(x, i, [6, 4, 2, 0]) - assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) - assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) - put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) - assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) - assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) - - def test_put_nomask(self): - # GitHub issue 6425 - x = zeros(10) - z = array([3., -1.], mask=[False, True]) - - x.put([1, 2], z) - assert_(x[0] is not masked) - assert_equal(x[0], 0) - assert_(x[1] is not masked) - assert_equal(x[1], 3) - assert_(x[2] is masked) - assert_(x[3] is not masked) - assert_equal(x[3], 0) - - def test_put_hardmask(self): - # Tests put on hardmask - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d + 1, mask=m, hard_mask=True, copy=True) - xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) - assert_equal(xh._data, [3, 4, 2, 4, 5]) - - def test_putmask(self): - x = arange(6) + 1 - mx = array(x, mask=[0, 0, 0, 1, 1, 1]) - mask = [0, 0, 1, 0, 0, 1] - # w/o mask, w/o masked values - xx = x.copy() - putmask(xx, mask, 99) - assert_equal(xx, [1, 2, 99, 4, 5, 99]) - # w/ mask, w/o masked values - mxx = mx.copy() - putmask(mxx, mask, 99) - assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) - assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) - # w/o mask, w/ masked values - values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) - xx = x.copy() - putmask(xx, mask, values) - assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) - assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) - # w/ mask, w/ masked values - mxx = mx.copy() - putmask(mxx, mask, values) - assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) - assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) - # w/ mask, w/ masked values + hardmask - mxx = mx.copy() - mxx.harden_mask() - putmask(mxx, mask, values) - assert_equal(mxx, [1, 2, 30, 4, 5, 60]) - - def test_ravel(self): - # Tests ravel - a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(aravel._mask.shape, aravel.shape) - a = array([0, 0], mask=[1, 1]) - aravel = a.ravel() - assert_equal(aravel._mask.shape, a.shape) - # Checks that small_mask is preserved - a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) - assert_equal(a.ravel()._mask, [0, 0, 0, 0]) - # Test that the fill_value is preserved - a.fill_value = -99 - a.shape = (2, 2) - ar = a.ravel() - assert_equal(ar._mask, [0, 0, 0, 0]) - assert_equal(ar._data, [1, 2, 3, 4]) - assert_equal(ar.fill_value, -99) - # Test index ordering - assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) - assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) - - def test_reshape(self): - # Tests reshape - x = arange(4) - x[0] = masked - y = x.reshape(2, 2) - assert_equal(y.shape, (2, 2,)) - assert_equal(y._mask.shape, (2, 2,)) - assert_equal(x.shape, (4,)) - assert_equal(x._mask.shape, (4,)) - - def test_sort(self): - # Test sort - x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) - - sortedx = sort(x) - assert_equal(sortedx._data, [1, 2, 3, 4]) - assert_equal(sortedx._mask, [0, 0, 0, 1]) - - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [4, 1, 2, 3]) - assert_equal(sortedx._mask, [1, 0, 0, 0]) - - x.sort() - assert_equal(x._data, [1, 2, 3, 4]) - assert_equal(x._mask, [0, 0, 0, 1]) - - x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) - x.sort(endwith=False) - assert_equal(x._data, [4, 1, 2, 3]) - assert_equal(x._mask, [1, 0, 0, 0]) - - x = [1, 4, 2, 3] - sortedx = sort(x) - assert_(not isinstance(sorted, MaskedArray)) - - x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) - x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [1, 2, -2, -1, 0]) - assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) - - def test_stable_sort(self): - x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) - expected = array([0, 3, 1, 4, 2, 5]) - computed = argsort(x, kind='stable') - assert_equal(computed, expected) - - def test_argsort_matches_sort(self): - x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) - - for kwargs in [dict(), - dict(endwith=True), - dict(endwith=False), - dict(fill_value=2), - dict(fill_value=2, endwith=True), - dict(fill_value=2, endwith=False)]: - sortedx = sort(x, **kwargs) - argsortedx = x[argsort(x, **kwargs)] - assert_equal(sortedx._data, argsortedx._data) - assert_equal(sortedx._mask, argsortedx._mask) - - def test_sort_2d(self): - # Check sort of 2D array. - # 2D array w/o mask - a = masked_array([[8, 4, 1], [2, 0, 9]]) - a.sort(0) - assert_equal(a, [[2, 0, 1], [8, 4, 9]]) - a = masked_array([[8, 4, 1], [2, 0, 9]]) - a.sort(1) - assert_equal(a, [[1, 4, 8], [0, 2, 9]]) - # 2D array w/mask - a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) - a.sort(0) - assert_equal(a, [[2, 0, 1], [8, 4, 9]]) - assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) - a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) - a.sort(1) - assert_equal(a, [[1, 4, 8], [0, 2, 9]]) - assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) - # 3D - a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], - [[1, 2, 3], [7, 8, 9], [4, 5, 6]], - [[7, 8, 9], [1, 2, 3], [4, 5, 6]], - [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) - a[a % 4 == 0] = masked - am = a.copy() - an = a.filled(99) - am.sort(0) - an.sort(0) - assert_equal(am, an) - am = a.copy() - an = a.filled(99) - am.sort(1) - an.sort(1) - assert_equal(am, an) - am = a.copy() - an = a.filled(99) - am.sort(2) - an.sort(2) - assert_equal(am, an) - - def test_sort_flexible(self): - # Test sort on structured dtype. - a = array( - data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], - mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], - dtype=[('A', int), ('B', int)]) - mask_last = array( - data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], - mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], - dtype=[('A', int), ('B', int)]) - mask_first = array( - data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], - mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], - dtype=[('A', int), ('B', int)]) - - test = sort(a) - assert_equal(test, mask_last) - assert_equal(test.mask, mask_last.mask) - - test = sort(a, endwith=False) - assert_equal(test, mask_first) - assert_equal(test.mask, mask_first.mask) - - # Test sort on dtype with subarray (gh-8069) - # Just check that the sort does not error, structured array subarrays - # are treated as byte strings and that leads to differing behavior - # depending on endianess and `endwith`. - dt = np.dtype([('v', int, 2)]) - a = a.view(dt) - test = sort(a) - test = sort(a, endwith=False) - - def test_argsort(self): - # Test argsort - a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) - assert_equal(np.argsort(a), argsort(a)) - - def test_squeeze(self): - # Check squeeze - data = masked_array([[1, 2, 3]]) - assert_equal(data.squeeze(), [1, 2, 3]) - data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) - assert_equal(data.squeeze(), [1, 2, 3]) - assert_equal(data.squeeze()._mask, [1, 1, 1]) - - # normal ndarrays return a view - arr = np.array([[1]]) - arr_sq = arr.squeeze() - assert_equal(arr_sq, 1) - arr_sq[...] = 2 - assert_equal(arr[0,0], 2) - - # so maskedarrays should too - m_arr = masked_array([[1]], mask=True) - m_arr_sq = m_arr.squeeze() - assert_(m_arr_sq is not np.ma.masked) - assert_equal(m_arr_sq.mask, True) - m_arr_sq[...] = 2 - assert_equal(m_arr[0,0], 2) - - def test_swapaxes(self): - # Tests swapaxes on MaskedArrays. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mX = array(x, mask=m).reshape(6, 6) - mXX = mX.reshape(3, 2, 2, 3) - - mXswapped = mX.swapaxes(0, 1) - assert_equal(mXswapped[-1], mX[:, -1]) - - mXXswapped = mXX.swapaxes(0, 2) - assert_equal(mXXswapped.shape, (2, 2, 3, 3)) - - def test_take(self): - # Tests take - x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) - assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) - assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) - assert_equal(x.take([[0, 1], [0, 1]]), - masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) - - # assert_equal crashes when passed np.ma.mask - assert_(x[1] is np.ma.masked) - assert_(x.take(1) is np.ma.masked) - - x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) - assert_equal(x.take([0, 2], axis=1), - array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) - assert_equal(take(x, [0, 2], axis=1), - array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) - - def test_take_masked_indices(self): - # Test take w/ masked indices - a = np.array((40, 18, 37, 9, 22)) - indices = np.arange(3)[None,:] + np.arange(5)[:, None] - mindices = array(indices, mask=(indices >= len(a))) - # No mask - test = take(a, mindices, mode='clip') - ctrl = array([[40, 18, 37], - [18, 37, 9], - [37, 9, 22], - [9, 22, 22], - [22, 22, 22]]) - assert_equal(test, ctrl) - # Masked indices - test = take(a, mindices) - ctrl = array([[40, 18, 37], - [18, 37, 9], - [37, 9, 22], - [9, 22, 40], - [22, 40, 40]]) - ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - # Masked input + masked indices - a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) - test = take(a, mindices) - ctrl[0, 1] = ctrl[1, 0] = masked - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - - def test_tolist(self): - # Tests to list - # ... on 1D - x = array(np.arange(12)) - x[[1, -2]] = masked - xlist = x.tolist() - assert_(xlist[1] is None) - assert_(xlist[-2] is None) - # ... on 2D - x.shape = (3, 4) - xlist = x.tolist() - ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] - assert_equal(xlist[0], [0, None, 2, 3]) - assert_equal(xlist[1], [4, 5, 6, 7]) - assert_equal(xlist[2], [8, 9, None, 11]) - assert_equal(xlist, ctrl) - # ... on structured array w/ masked records - x = array(list(zip([1, 2, 3], - [1.1, 2.2, 3.3], - ['one', 'two', 'thr'])), - dtype=[('a', int), ('b', float), ('c', '|S8')]) - x[-1] = masked - assert_equal(x.tolist(), - [(1, 1.1, b'one'), - (2, 2.2, b'two'), - (None, None, None)]) - # ... on structured array w/ masked fields - a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], - dtype=[('a', int), ('b', int)]) - test = a.tolist() - assert_equal(test, [[1, None], [3, 4]]) - # ... on mvoid - a = a[0] - test = a.tolist() - assert_equal(test, [1, None]) - - def test_tolist_specialcase(self): - # Test mvoid.tolist: make sure we return a standard Python object - a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) - # w/o mask: each entry is a np.void whose elements are standard Python - for entry in a: - for item in entry.tolist(): - assert_(not isinstance(item, np.generic)) - # w/ mask: each entry is a ma.void whose elements should be - # standard Python - a.mask[0] = (0, 1) - for entry in a: - for item in entry.tolist(): - assert_(not isinstance(item, np.generic)) - - def test_toflex(self): - # Test the conversion to records - data = arange(10) - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - - ndtype = [('i', int), ('s', '|S3'), ('f', float)] - data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), - 'ABCDEFGHIJKLM', - np.random.rand(10))], - dtype=ndtype) - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - - ndtype = np.dtype("int, (2,3)float, float") - data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), - np.random.rand(10), - np.random.rand(10))], - dtype=ndtype) - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal_records(record['_data'], data._data) - assert_equal_records(record['_mask'], data._mask) - - def test_fromflex(self): - # Test the reconstruction of a masked_array from a record - a = array([1, 2, 3]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.mask, a.mask) - - a = array([1, 2, 3], mask=[0, 0, 1]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.mask, a.mask) - - a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], - dtype=[('A', int), ('B', float)]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.data, a.data) - - def test_arraymethod(self): - # Test a _arraymethod w/ n argument - marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) - control = masked_array([[1], [2], [3], [4], [5]], - mask=[0, 0, 1, 0, 0]) - assert_equal(marray.T, control) - assert_equal(marray.transpose(), control) - - assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) - - def test_arraymethod_0d(self): - # gh-9430 - x = np.ma.array(42, mask=True) - assert_equal(x.T.mask, x.mask) - assert_equal(x.T.data, x.data) - - def test_transpose_view(self): - x = np.ma.array([[1, 2, 3], [4, 5, 6]]) - x[0,1] = np.ma.masked - xt = x.T - - xt[1,0] = 10 - xt[0,1] = np.ma.masked - - assert_equal(x.data, xt.T.data) - assert_equal(x.mask, xt.T.mask) - - def test_diagonal_view(self): - x = np.ma.zeros((3,3)) - x[0,0] = 10 - x[1,1] = np.ma.masked - x[2,2] = 20 - xd = x.diagonal() - x[1,1] = 15 - assert_equal(xd.mask, x.diagonal().mask) - assert_equal(xd.data, x.diagonal().data) - - -class TestMaskedArrayMathMethods(object): - - def setup(self): - # Base data definition. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - def test_cumsumprod(self): - # Tests cumsum & cumprod on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - mXcp = mX.cumsum(0) - assert_equal(mXcp._data, mX.filled(0).cumsum(0)) - mXcp = mX.cumsum(1) - assert_equal(mXcp._data, mX.filled(0).cumsum(1)) - - mXcp = mX.cumprod(0) - assert_equal(mXcp._data, mX.filled(1).cumprod(0)) - mXcp = mX.cumprod(1) - assert_equal(mXcp._data, mX.filled(1).cumprod(1)) - - def test_cumsumprod_with_output(self): - # Tests cumsum/cumprod w/ output - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - - for funcname in ('cumsum', 'cumprod'): - npfunc = getattr(np, funcname) - xmmeth = getattr(xm, funcname) - - # A ndarray as explicit input - output = np.empty((3, 4), dtype=float) - output.fill(-9999) - result = npfunc(xm, axis=0, out=output) - # ... the result should be the given output - assert_(result is output) - assert_equal(result, xmmeth(axis=0, out=output)) - - output = empty((3, 4), dtype=int) - result = xmmeth(axis=0, out=output) - assert_(result is output) - - def test_ptp(self): - # Tests ptp on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - (n, m) = X.shape - assert_equal(mx.ptp(), mx.compressed().ptp()) - rows = np.zeros(n, float) - cols = np.zeros(m, float) - for k in range(m): - cols[k] = mX[:, k].compressed().ptp() - for k in range(n): - rows[k] = mX[k].compressed().ptp() - assert_equal(mX.ptp(0), cols) - assert_equal(mX.ptp(1), rows) - - def test_add_object(self): - x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) - y = x + 'x' - assert_equal(y[1], 'bx') - assert_(y.mask[0]) - - def test_sum_object(self): - # Test sum on object dtype - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) - assert_equal(a.sum(), 5) - a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) - assert_equal(a.sum(axis=0), [5, 7, 9]) - - def test_prod_object(self): - # Test prod on object dtype - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) - assert_equal(a.prod(), 2 * 3) - a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) - assert_equal(a.prod(axis=0), [4, 10, 18]) - - def test_meananom_object(self): - # Test mean/anom on object dtype - a = masked_array([1, 2, 3], dtype=object) - assert_equal(a.mean(), 2) - assert_equal(a.anom(), [-1, 0, 1]) - - def test_trace(self): - # Tests trace on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - mXdiag = mX.diagonal() - assert_equal(mX.trace(), mX.diagonal().compressed().sum()) - assert_almost_equal(mX.trace(), - X.trace() - sum(mXdiag.mask * X.diagonal(), - axis=0)) - assert_equal(np.trace(mX), mX.trace()) - - # gh-5560 - arr = np.arange(2*4*4).reshape(2,4,4) - m_arr = np.ma.masked_array(arr, False) - assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) - - def test_dot(self): - # Tests dot on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - fx = mx.filled(0) - r = mx.dot(mx) - assert_almost_equal(r.filled(0), fx.dot(fx)) - assert_(r.mask is nomask) - - fX = mX.filled(0) - r = mX.dot(mX) - assert_almost_equal(r.filled(0), fX.dot(fX)) - assert_(r.mask[1,3]) - r1 = empty_like(r) - mX.dot(mX, out=r1) - assert_almost_equal(r, r1) - - mYY = mXX.swapaxes(-1, -2) - fXX, fYY = mXX.filled(0), mYY.filled(0) - r = mXX.dot(mYY) - assert_almost_equal(r.filled(0), fXX.dot(fYY)) - r1 = empty_like(r) - mXX.dot(mYY, out=r1) - assert_almost_equal(r, r1) - - def test_dot_shape_mismatch(self): - # regression test - x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) - y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) - z = masked_array([[0,1],[3,3]]) - x.dot(y, out=z) - assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) - assert_almost_equal(z.mask, [[0, 1], [0, 0]]) - - def test_varmean_nomask(self): - # gh-5769 - foo = array([1,2,3,4], dtype='f8') - bar = array([1,2,3,4], dtype='f8') - assert_equal(type(foo.mean()), np.float64) - assert_equal(type(foo.var()), np.float64) - assert((foo.mean() == bar.mean()) is np.bool_(True)) - - # check array type is preserved and out works - foo = array(np.arange(16).reshape((4,4)), dtype='f8') - bar = empty(4, dtype='f4') - assert_equal(type(foo.mean(axis=1)), MaskedArray) - assert_equal(type(foo.var(axis=1)), MaskedArray) - assert_(foo.mean(axis=1, out=bar) is bar) - assert_(foo.var(axis=1, out=bar) is bar) - - def test_varstd(self): - # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - assert_almost_equal(mX.var(axis=None), mX.compressed().var()) - assert_almost_equal(mX.std(axis=None), mX.compressed().std()) - assert_almost_equal(mX.std(axis=None, ddof=1), - mX.compressed().std(ddof=1)) - assert_almost_equal(mX.var(axis=None, ddof=1), - mX.compressed().var(ddof=1)) - assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) - assert_equal(mX.var().shape, X.var().shape) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - assert_almost_equal(mX.var(axis=None, ddof=2), - mX.compressed().var(ddof=2)) - assert_almost_equal(mX.std(axis=None, ddof=2), - mX.compressed().std(ddof=2)) - for k in range(6): - assert_almost_equal(mXvar1[k], mX[k].compressed().var()) - assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) - assert_almost_equal(np.sqrt(mXvar0[k]), - mX[:, k].compressed().std()) - - @pytest.mark.skipif(sys.platform=='win32' and sys.version_info < (3, 6), - reason='Fails on Python < 3.6 on Windows, gh-9671') - @suppress_copy_mask_on_assignment - def test_varstd_specialcases(self): - # Test a special case for var - nout = np.array(-1, dtype=float) - mout = array(-1, dtype=float) - - x = array(arange(10), mask=True) - for methodname in ('var', 'std'): - method = getattr(x, methodname) - assert_(method() is masked) - assert_(method(0) is masked) - assert_(method(-1) is masked) - # Using a masked array as explicit output - method(out=mout) - assert_(mout is not masked) - assert_equal(mout.mask, True) - # Using a ndarray as explicit output - method(out=nout) - assert_(np.isnan(nout)) - - x = array(arange(10), mask=True) - x[-1] = 9 - for methodname in ('var', 'std'): - method = getattr(x, methodname) - assert_(method(ddof=1) is masked) - assert_(method(0, ddof=1) is masked) - assert_(method(-1, ddof=1) is masked) - # Using a masked array as explicit output - method(out=mout, ddof=1) - assert_(mout is not masked) - assert_equal(mout.mask, True) - # Using a ndarray as explicit output - method(out=nout, ddof=1) - assert_(np.isnan(nout)) - - def test_varstd_ddof(self): - a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) - test = a.std(axis=0, ddof=0) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [0, 0, 1]) - test = a.std(axis=0, ddof=1) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [0, 0, 1]) - test = a.std(axis=0, ddof=2) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [1, 1, 1]) - - def test_diag(self): - # Test diag - x = arange(9).reshape((3, 3)) - x[1, 1] = masked - out = np.diag(x) - assert_equal(out, [0, 4, 8]) - out = diag(x) - assert_equal(out, [0, 4, 8]) - assert_equal(out.mask, [0, 1, 0]) - out = diag(out) - control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], - mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(out, control) - - def test_axis_methods_nomask(self): - # Test the combination nomask & methods w/ axis - a = array([[1, 2, 3], [4, 5, 6]]) - - assert_equal(a.sum(0), [5, 7, 9]) - assert_equal(a.sum(-1), [6, 15]) - assert_equal(a.sum(1), [6, 15]) - - assert_equal(a.prod(0), [4, 10, 18]) - assert_equal(a.prod(-1), [6, 120]) - assert_equal(a.prod(1), [6, 120]) - - assert_equal(a.min(0), [1, 2, 3]) - assert_equal(a.min(-1), [1, 4]) - assert_equal(a.min(1), [1, 4]) - - assert_equal(a.max(0), [4, 5, 6]) - assert_equal(a.max(-1), [3, 6]) - assert_equal(a.max(1), [3, 6]) - - -class TestMaskedArrayMathMethodsComplex(object): - # Test class for miscellaneous MaskedArrays methods. - def setup(self): - # Base data definition. - x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, - 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - def test_varstd(self): - # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - assert_almost_equal(mX.var(axis=None), mX.compressed().var()) - assert_almost_equal(mX.std(axis=None), mX.compressed().std()) - assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) - assert_equal(mX.var().shape, X.var().shape) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - assert_almost_equal(mX.var(axis=None, ddof=2), - mX.compressed().var(ddof=2)) - assert_almost_equal(mX.std(axis=None, ddof=2), - mX.compressed().std(ddof=2)) - for k in range(6): - assert_almost_equal(mXvar1[k], mX[k].compressed().var()) - assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) - assert_almost_equal(np.sqrt(mXvar0[k]), - mX[:, k].compressed().std()) - - -class TestMaskedArrayFunctions(object): - # Test class for miscellaneous functions. - - def setup(self): - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - - def test_masked_where_bool(self): - x = [1, 2] - y = masked_where(False, x) - assert_equal(y, [1, 2]) - assert_equal(y[1], 2) - - def test_masked_equal_wlist(self): - x = [1, 2, 3] - mx = masked_equal(x, 3) - assert_equal(mx, x) - assert_equal(mx._mask, [0, 0, 1]) - mx = masked_not_equal(x, 3) - assert_equal(mx, x) - assert_equal(mx._mask, [1, 1, 0]) - - def test_masked_equal_fill_value(self): - x = [1, 2, 3] - mx = masked_equal(x, 3) - assert_equal(mx._mask, [0, 0, 1]) - assert_equal(mx.fill_value, 3) - - def test_masked_where_condition(self): - # Tests masking functions. - x = array([1., 2., 3., 4., 5.]) - x[2] = masked - assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) - assert_equal(masked_where(greater_equal(x, 2), x), - masked_greater_equal(x, 2)) - assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) - assert_equal(masked_where(less_equal(x, 2), x), - masked_less_equal(x, 2)) - assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) - assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), - [99, 99, 3, 4, 5]) - - def test_masked_where_oddities(self): - # Tests some generic features. - atest = ones((10, 10, 10), dtype=float) - btest = zeros(atest.shape, MaskType) - ctest = masked_where(btest, atest) - assert_equal(atest, ctest) - - def test_masked_where_shape_constraint(self): - a = arange(10) - with assert_raises(IndexError): - masked_equal(1, a) - test = masked_equal(a, 1) - assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) - - def test_masked_where_structured(self): - # test that masked_where on a structured array sets a structured - # mask (see issue #2972) - a = np.zeros(10, dtype=[("A", " 6, x) - - def test_masked_otherfunctions(self): - assert_equal(masked_inside(list(range(5)), 1, 3), - [0, 199, 199, 199, 4]) - assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) - assert_equal(masked_inside(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 1, 3).mask, - [1, 1, 1, 1, 0]) - assert_equal(masked_outside(array(list(range(5)), - mask=[0, 1, 0, 0, 0]), 1, 3).mask, - [1, 1, 0, 0, 1]) - assert_equal(masked_equal(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 0]) - assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 1]) - - def test_round(self): - a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], - mask=[0, 1, 0, 0, 0]) - assert_equal(a.round(), [1., 2., 3., 5., 6.]) - assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) - assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) - b = empty_like(a) - a.round(out=b) - assert_equal(b, [1., 2., 3., 5., 6.]) - - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - - def test_round_with_output(self): - # Testing round with an explicit output - - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - - # A ndarray as explicit input - output = np.empty((3, 4), dtype=float) - output.fill(-9999) - result = np.round(xm, decimals=2, out=output) - # ... the result should be the given output - assert_(result is output) - assert_equal(result, xm.round(decimals=2, out=output)) - - output = empty((3, 4), dtype=float) - result = xm.round(decimals=2, out=output) - assert_(result is output) - - def test_round_with_scalar(self): - # Testing round with scalar/zero dimension input - # GH issue 2244 - a = array(1.1, mask=[False]) - assert_equal(a.round(), 1) - - a = array(1.1, mask=[True]) - assert_(a.round() is masked) - - a = array(1.1, mask=[False]) - output = np.empty(1, dtype=float) - output.fill(-9999) - a.round(out=output) - assert_equal(output, 1) - - a = array(1.1, mask=[False]) - output = array(-9999., mask=[True]) - a.round(out=output) - assert_equal(output[()], 1) - - a = array(1.1, mask=[True]) - output = array(-9999., mask=[False]) - a.round(out=output) - assert_(output[()] is masked) - - def test_identity(self): - a = identity(5) - assert_(isinstance(a, MaskedArray)) - assert_equal(a, np.identity(5)) - - def test_power(self): - x = -1.1 - assert_almost_equal(power(x, 2.), 1.21) - assert_(power(x, masked) is masked) - x = array([-1.1, -1.1, 1.1, 1.1, 0.]) - b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) - y = power(x, b) - assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) - assert_equal(y._mask, [1, 0, 0, 0, 1]) - b.mask = nomask - y = power(x, b) - assert_equal(y._mask, [1, 0, 0, 0, 1]) - z = x ** b - assert_equal(z._mask, y._mask) - assert_almost_equal(z, y) - assert_almost_equal(z._data, y._data) - x **= b - assert_equal(x._mask, y._mask) - assert_almost_equal(x, y) - assert_almost_equal(x._data, y._data) - - def test_power_with_broadcasting(self): - # Test power w/ broadcasting - a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) - a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) - b1 = np.array([2, 4, 3]) - b2 = np.array([b1, b1]) - b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) - - ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], - mask=[[1, 1, 0], [0, 1, 1]]) - # No broadcasting, base & exp w/ mask - test = a2m ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - # No broadcasting, base w/ mask, exp w/o mask - test = a2m ** b2 - assert_equal(test, ctrl) - assert_equal(test.mask, a2m.mask) - # No broadcasting, base w/o mask, exp w/ mask - test = a2 ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, b2m.mask) - - ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], - mask=[[0, 1, 0], [0, 1, 0]]) - test = b1 ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - test = b2m ** b1 - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - - def test_where(self): - # Test the where function - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - - d = where(xm > 2, xm, -9) - assert_equal(d, [-9., -9., -9., -9., -9., 4., - -9., -9., 10., -9., -9., 3.]) - assert_equal(d._mask, xm._mask) - d = where(xm > 2, -9, ym) - assert_equal(d, [5., 0., 3., 2., -1., -9., - -9., -10., -9., 1., 0., -9.]) - assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) - d = where(xm > 2, xm, masked) - assert_equal(d, [-9., -9., -9., -9., -9., 4., - -9., -9., 10., -9., -9., 3.]) - tmp = xm._mask.copy() - tmp[(xm <= 2).filled(True)] = True - assert_equal(d._mask, tmp) - - ixm = xm.astype(int) - d = where(ixm > 2, ixm, masked) - assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) - assert_equal(d.dtype, ixm.dtype) - - def test_where_object(self): - a = np.array(None) - b = masked_array(None) - r = b.copy() - assert_equal(np.ma.where(True, a, a), r) - assert_equal(np.ma.where(True, b, b), r) - - def test_where_with_masked_choice(self): - x = arange(10) - x[3] = masked - c = x >= 8 - # Set False to masked - z = where(c, x, masked) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is masked) - assert_(z[7] is masked) - assert_(z[8] is not masked) - assert_(z[9] is not masked) - assert_equal(x, z) - # Set True to masked - z = where(c, masked, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - - def test_where_with_masked_condition(self): - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - - x = arange(1, 6) - x[-1] = masked - y = arange(1, 6) * 10 - y[2] = masked - c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) - cm = c.filled(1) - z = where(c, x, y) - zm = where(cm, x, y) - assert_equal(z, zm) - assert_(getmask(zm) is nomask) - assert_equal(zm, [1, 2, 3, 40, 50]) - z = where(c, masked, 1) - assert_equal(z, [99, 99, 99, 1, 1]) - z = where(c, 1, masked) - assert_equal(z, [99, 1, 1, 99, 99]) - - def test_where_type(self): - # Test the type conservation with where - x = np.arange(4, dtype=np.int32) - y = np.arange(4, dtype=np.float32) * 2.2 - test = where(x > 1.5, y, x).dtype - control = np.find_common_type([np.int32, np.float32], []) - assert_equal(test, control) - - def test_where_broadcast(self): - # Issue 8599 - x = np.arange(9).reshape(3, 3) - y = np.zeros(3) - core = np.where([1, 0, 1], x, y) - ma = where([1, 0, 1], x, y) - - assert_equal(core, ma) - assert_equal(core.dtype, ma.dtype) - - def test_where_structured(self): - # Issue 8600 - dt = np.dtype([('a', int), ('b', int)]) - x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) - y = np.array((10, 20), dtype=dt) - core = np.where([0, 1, 1], x, y) - ma = np.where([0, 1, 1], x, y) - - assert_equal(core, ma) - assert_equal(core.dtype, ma.dtype) - - def test_where_structured_masked(self): - dt = np.dtype([('a', int), ('b', int)]) - x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) - - ma = where([0, 1, 1], x, masked) - expected = masked_where([1, 0, 0], x) - - assert_equal(ma.dtype, expected.dtype) - assert_equal(ma, expected) - assert_equal(ma.mask, expected.mask) - - def test_choose(self): - # Test choose - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - chosen = choose([2, 3, 1, 0], choices) - assert_equal(chosen, array([20, 31, 12, 3])) - chosen = choose([2, 4, 1, 0], choices, mode='clip') - assert_equal(chosen, array([20, 31, 12, 3])) - chosen = choose([2, 4, 1, 0], choices, mode='wrap') - assert_equal(chosen, array([20, 1, 12, 3])) - # Check with some masked indices - indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) - chosen = choose(indices_, choices, mode='wrap') - assert_equal(chosen, array([99, 1, 12, 99])) - assert_equal(chosen.mask, [1, 0, 0, 1]) - # Check with some masked choices - choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], - [1, 0, 0, 0], [0, 0, 0, 0]]) - indices_ = [2, 3, 1, 0] - chosen = choose(indices_, choices, mode='wrap') - assert_equal(chosen, array([20, 31, 12, 3])) - assert_equal(chosen.mask, [1, 0, 0, 1]) - - def test_choose_with_out(self): - # Test choose with an explicit out keyword - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - store = empty(4, dtype=int) - chosen = choose([2, 3, 1, 0], choices, out=store) - assert_equal(store, array([20, 31, 12, 3])) - assert_(store is chosen) - # Check with some masked indices + out - store = empty(4, dtype=int) - indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) - chosen = choose(indices_, choices, mode='wrap', out=store) - assert_equal(store, array([99, 31, 12, 99])) - assert_equal(store.mask, [1, 0, 0, 1]) - # Check with some masked choices + out ina ndarray ! - choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], - [1, 0, 0, 0], [0, 0, 0, 0]]) - indices_ = [2, 3, 1, 0] - store = empty(4, dtype=int).view(ndarray) - chosen = choose(indices_, choices, mode='wrap', out=store) - assert_equal(store, array([999999, 31, 12, 999999])) - - def test_reshape(self): - a = arange(10) - a[0] = masked - # Try the default - b = a.reshape((5, 2)) - assert_equal(b.shape, (5, 2)) - assert_(b.flags['C']) - # Try w/ arguments as list instead of tuple - b = a.reshape(5, 2) - assert_equal(b.shape, (5, 2)) - assert_(b.flags['C']) - # Try w/ order - b = a.reshape((5, 2), order='F') - assert_equal(b.shape, (5, 2)) - assert_(b.flags['F']) - # Try w/ order - b = a.reshape(5, 2, order='F') - assert_equal(b.shape, (5, 2)) - assert_(b.flags['F']) - - c = np.reshape(a, (2, 5)) - assert_(isinstance(c, MaskedArray)) - assert_equal(c.shape, (2, 5)) - assert_(c[0, 0] is masked) - assert_(c.flags['C']) - - def test_make_mask_descr(self): - # Flexible - ntype = [('a', float), ('b', float)] - test = make_mask_descr(ntype) - assert_equal(test, [('a', bool), ('b', bool)]) - assert_(test is make_mask_descr(test)) - - # Standard w/ shape - ntype = (float, 2) - test = make_mask_descr(ntype) - assert_equal(test, (bool, 2)) - assert_(test is make_mask_descr(test)) - - # Standard standard - ntype = float - test = make_mask_descr(ntype) - assert_equal(test, np.dtype(bool)) - assert_(test is make_mask_descr(test)) - - # Nested - ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] - test = make_mask_descr(ntype) - control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) - assert_equal(test, control) - assert_(test is make_mask_descr(test)) - - # Named+ shape - ntype = [('a', (float, 2))] - test = make_mask_descr(ntype) - assert_equal(test, np.dtype([('a', (bool, 2))])) - assert_(test is make_mask_descr(test)) - - # 2 names - ntype = [(('A', 'a'), float)] - test = make_mask_descr(ntype) - assert_equal(test, np.dtype([(('A', 'a'), bool)])) - assert_(test is make_mask_descr(test)) - - # nested boolean types should preserve identity - base_type = np.dtype([('a', int, 3)]) - base_mtype = make_mask_descr(base_type) - sub_type = np.dtype([('a', int), ('b', base_mtype)]) - test = make_mask_descr(sub_type) - assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) - assert_(test.fields['b'][0] is base_mtype) - - def test_make_mask(self): - # Test make_mask - # w/ a list as an input - mask = [0, 1] - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [0, 1]) - # w/ a ndarray as an input - mask = np.array([0, 1], dtype=bool) - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [0, 1]) - # w/ a flexible-type ndarray as an input - use default - mdtype = [('a', bool), ('b', bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [1, 1]) - # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', bool), ('b', bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask, dtype=mask.dtype) - assert_equal(test.dtype, mdtype) - assert_equal(test, mask) - # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', float), ('b', float)] - bdtype = [('a', bool), ('b', bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask, dtype=mask.dtype) - assert_equal(test.dtype, bdtype) - assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) - # Ensure this also works for void - mask = np.array((False, True), dtype='?,?')[()] - assert_(isinstance(mask, np.void)) - test = make_mask(mask, dtype=mask.dtype) - assert_equal(test, mask) - assert_(test is not mask) - mask = np.array((0, 1), dtype='i4,i4')[()] - test2 = make_mask(mask, dtype=mask.dtype) - assert_equal(test2, test) - # test that nomask is returned when m is nomask. - bools = [True, False] - dtypes = [MaskType, float] - msgformat = 'copy=%s, shrink=%s, dtype=%s' - for cpy, shr, dt in itertools.product(bools, bools, dtypes): - res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) - assert_(res is nomask, msgformat % (cpy, shr, dt)) - - def test_mask_or(self): - # Initialize - mtype = [('a', bool), ('b', bool)] - mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) - # Test using nomask as input - test = mask_or(mask, nomask) - assert_equal(test, mask) - test = mask_or(nomask, mask) - assert_equal(test, mask) - # Using False as input - test = mask_or(mask, False) - assert_equal(test, mask) - # Using another array w / the same dtype - other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) - test = mask_or(mask, other) - control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) - assert_equal(test, control) - # Using another array w / a different dtype - othertype = [('A', bool), ('B', bool)] - other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) - try: - test = mask_or(mask, other) - except ValueError: - pass - # Using nested arrays - dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] - amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) - bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) - cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) - assert_equal(mask_or(amask, bmask), cntrl) - - def test_flatten_mask(self): - # Tests flatten mask - # Standard dtype - mask = np.array([0, 0, 1], dtype=bool) - assert_equal(flatten_mask(mask), mask) - # Flexible dtype - mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) - test = flatten_mask(mask) - control = np.array([0, 0, 0, 1], dtype=bool) - assert_equal(test, control) - - mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] - data = [(0, (0, 0)), (0, (0, 1))] - mask = np.array(data, dtype=mdtype) - test = flatten_mask(mask) - control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) - assert_equal(test, control) - - def test_on_ndarray(self): - # Test functions on ndarrays - a = np.array([1, 2, 3, 4]) - m = array(a, mask=False) - test = anom(a) - assert_equal(test, m.anom()) - test = reshape(a, (2, 2)) - assert_equal(test, m.reshape(2, 2)) - - def test_compress(self): - # Test compress function on ndarray and masked array - # Address Github #2495. - arr = np.arange(8) - arr.shape = 4, 2 - cond = np.array([True, False, True, True]) - control = arr[[0, 2, 3]] - test = np.ma.compress(cond, arr, axis=0) - assert_equal(test, control) - marr = np.ma.array(arr) - test = np.ma.compress(cond, marr, axis=0) - assert_equal(test, control) - - def test_compressed(self): - # Test ma.compressed function. - # Address gh-4026 - a = np.ma.array([1, 2]) - test = np.ma.compressed(a) - assert_(type(test) is np.ndarray) - - # Test case when input data is ndarray subclass - class A(np.ndarray): - pass - - a = np.ma.array(A(shape=0)) - test = np.ma.compressed(a) - assert_(type(test) is A) - - # Test that compress flattens - test = np.ma.compressed([[1],[2]]) - assert_equal(test.ndim, 1) - test = np.ma.compressed([[[[[1]]]]]) - assert_equal(test.ndim, 1) - - # Test case when input is MaskedArray subclass - class M(MaskedArray): - pass - - test = np.ma.compressed(M(shape=(0,1,2))) - assert_equal(test.ndim, 1) - - # with .compressed() overridden - class M(MaskedArray): - def compressed(self): - return 42 - - test = np.ma.compressed(M(shape=(0,1,2))) - assert_equal(test, 42) - - def test_convolve(self): - a = masked_equal(np.arange(5), 2) - b = np.array([1, 1]) - test = np.ma.convolve(a, b) - assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1)) - - test = np.ma.convolve(a, b, propagate_mask=False) - assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1)) - - test = np.ma.convolve([1, 1], [1, 1, 1]) - assert_equal(test, masked_equal([1, 2, 2, 1], -1)) - - a = [1, 1] - b = masked_equal([1, -1, -1, 1], -1) - test = np.ma.convolve(a, b, propagate_mask=False) - assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) - test = np.ma.convolve(a, b, propagate_mask=True) - assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) - - -class TestMaskedFields(object): - - def setup(self): - ilist = [1, 2, 3, 4, 5] - flist = [1.1, 2.2, 3.3, 4.4, 5.5] - slist = ['one', 'two', 'three', 'four', 'five'] - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mdtype = [('a', bool), ('b', bool), ('c', bool)] - mask = [0, 1, 0, 0, 1] - base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) - - def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] - # Set w/ nomask or masked - base.mask = nomask - assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) - base.mask = masked - assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) - # Set w/ simple boolean - base.mask = False - assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) - base.mask = True - assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) - # Set w/ list - base.mask = [0, 0, 0, 1, 1] - assert_equal_records(base._mask, - np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], - dtype=mdtype)) - - def test_set_record_element(self): - # Check setting an element of a record) - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[0] = (pi, pi, 'pi') - - assert_equal(base_a.dtype, int) - assert_equal(base_a._data, [3, 2, 3, 4, 5]) - - assert_equal(base_b.dtype, float) - assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) - - assert_equal(base_c.dtype, '|S8') - assert_equal(base_c._data, - [b'pi', b'two', b'three', b'four', b'five']) - - def test_set_record_slice(self): - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[:3] = (pi, pi, 'pi') - - assert_equal(base_a.dtype, int) - assert_equal(base_a._data, [3, 3, 3, 4, 5]) - - assert_equal(base_b.dtype, float) - assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) - - assert_equal(base_c.dtype, '|S8') - assert_equal(base_c._data, - [b'pi', b'pi', b'pi', b'four', b'five']) - - def test_mask_element(self): - "Check record access" - base = self.data['base'] - base[0] = masked - - for n in ('a', 'b', 'c'): - assert_equal(base[n].mask, [1, 1, 0, 0, 1]) - assert_equal(base[n]._data, base._data[n]) - - def test_getmaskarray(self): - # Test getmaskarray on flexible dtype - ndtype = [('a', int), ('b', float)] - test = empty(3, dtype=ndtype) - assert_equal(getmaskarray(test), - np.array([(0, 0), (0, 0), (0, 0)], - dtype=[('a', '|b1'), ('b', '|b1')])) - test[:] = masked - assert_equal(getmaskarray(test), - np.array([(1, 1), (1, 1), (1, 1)], - dtype=[('a', '|b1'), ('b', '|b1')])) - - def test_view(self): - # Test view w/ flexible dtype - iterator = list(zip(np.arange(10), np.random.rand(10))) - data = np.array(iterator) - a = array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - controlmask = np.array([1] + 19 * [0], dtype=bool) - # Transform globally to simple dtype - test = a.view(float) - assert_equal(test, data.ravel()) - assert_equal(test.mask, controlmask) - # Transform globally to dty - test = a.view((float, 2)) - assert_equal(test, data) - assert_equal(test.mask, controlmask.reshape(-1, 2)) - - def test_getitem(self): - ndtype = [('a', float), ('b', float)] - a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) - a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], - [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), - dtype=[('a', bool), ('b', bool)]) - - def _test_index(i): - assert_equal(type(a[i]), mvoid) - assert_equal_records(a[i]._data, a._data[i]) - assert_equal_records(a[i]._mask, a._mask[i]) - - assert_equal(type(a[i, ...]), MaskedArray) - assert_equal_records(a[i,...]._data, a._data[i,...]) - assert_equal_records(a[i,...]._mask, a._mask[i,...]) - - _test_index(1) # No mask - _test_index(0) # One element masked - _test_index(-2) # All element masked - - def test_setitem(self): - # Issue 4866: check that one can set individual items in [record][col] - # and [col][record] order - ndtype = np.dtype([('a', float), ('b', int)]) - ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) - ma['a'][1] = 3.0 - assert_equal(ma['a'], np.array([1.0, 3.0])) - ma[1]['a'] = 4.0 - assert_equal(ma['a'], np.array([1.0, 4.0])) - # Issue 2403 - mdtype = np.dtype([('a', bool), ('b', bool)]) - # soft mask - control = np.array([(False, True), (True, True)], dtype=mdtype) - a = np.ma.masked_all((2,), dtype=ndtype) - a['a'][0] = 2 - assert_equal(a.mask, control) - a = np.ma.masked_all((2,), dtype=ndtype) - a[0]['a'] = 2 - assert_equal(a.mask, control) - # hard mask - control = np.array([(True, True), (True, True)], dtype=mdtype) - a = np.ma.masked_all((2,), dtype=ndtype) - a.harden_mask() - a['a'][0] = 2 - assert_equal(a.mask, control) - a = np.ma.masked_all((2,), dtype=ndtype) - a.harden_mask() - a[0]['a'] = 2 - assert_equal(a.mask, control) - - def test_setitem_scalar(self): - # 8510 - mask_0d = np.ma.masked_array(1, mask=True) - arr = np.ma.arange(3) - arr[0] = mask_0d - assert_array_equal(arr.mask, [True, False, False]) - - def test_element_len(self): - # check that len() works for mvoid (Github issue #576) - for rec in self.data['base']: - assert_equal(len(rec), len(self.data['ddtype'])) - - -class TestMaskedObjectArray(object): - - def test_getitem(self): - arr = np.ma.array([None, None]) - for dt in [float, object]: - a0 = np.eye(2).astype(dt) - a1 = np.eye(3).astype(dt) - arr[0] = a0 - arr[1] = a1 - - assert_(arr[0] is a0) - assert_(arr[1] is a1) - assert_(isinstance(arr[0,...], MaskedArray)) - assert_(isinstance(arr[1,...], MaskedArray)) - assert_(arr[0,...][()] is a0) - assert_(arr[1,...][()] is a1) - - arr[0] = np.ma.masked - - assert_(arr[1] is a1) - assert_(isinstance(arr[0,...], MaskedArray)) - assert_(isinstance(arr[1,...], MaskedArray)) - assert_equal(arr[0,...].mask, True) - assert_(arr[1,...][()] is a1) - - # gh-5962 - object arrays of arrays do something special - assert_equal(arr[0].data, a0) - assert_equal(arr[0].mask, True) - assert_equal(arr[0,...][()].data, a0) - assert_equal(arr[0,...][()].mask, True) - - def test_nested_ma(self): - - arr = np.ma.array([None, None]) - # set the first object to be an unmasked masked constant. A little fiddly - arr[0,...] = np.array([np.ma.masked], object)[0,...] - - # check the above line did what we were aiming for - assert_(arr.data[0] is np.ma.masked) - - # test that getitem returned the value by identity - assert_(arr[0] is np.ma.masked) - - # now mask the masked value! - arr[0] = np.ma.masked - assert_(arr[0] is np.ma.masked) - - -class TestMaskedView(object): - - def setup(self): - iterator = list(zip(np.arange(10), np.random.rand(10))) - data = np.array(iterator) - a = array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) - - def test_view_to_nothing(self): - (data, a, controlmask) = self.data - test = a.view() - assert_(isinstance(test, MaskedArray)) - assert_equal(test._data, a._data) - assert_equal(test._mask, a._mask) - - def test_view_to_type(self): - (data, a, controlmask) = self.data - test = a.view(np.ndarray) - assert_(not isinstance(test, MaskedArray)) - assert_equal(test, a._data) - assert_equal_records(test, data.view(a.dtype).squeeze()) - - def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data - # View globally - test = a.view(float) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, data.ravel()) - assert_equal(test.mask, controlmask) - - def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data - - test = a.view([('A', float), ('B', float)]) - assert_equal(test.mask.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a']) - assert_equal(test['B'], a['b']) - - test = a[0].view([('A', float), ('B', float)]) - assert_(isinstance(test, MaskedArray)) - assert_equal(test.mask.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a'][0]) - assert_equal(test['B'], a['b'][0]) - - test = a[-1].view([('A', float), ('B', float)]) - assert_(isinstance(test, MaskedArray)) - assert_equal(test.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a'][-1]) - assert_equal(test['B'], a['b'][-1]) - - def test_view_to_subdtype(self): - (data, a, controlmask) = self.data - # View globally - test = a.view((float, 2)) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, data) - assert_equal(test.mask, controlmask.reshape(-1, 2)) - # View on 1 masked element - test = a[0].view((float, 2)) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, data[0]) - assert_equal(test.mask, (1, 0)) - # View on 1 unmasked element - test = a[-1].view((float, 2)) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, data[-1]) - - def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data - - test = a.view((float, 2), np.recarray) - assert_equal(test, data) - assert_(isinstance(test, np.recarray)) - assert_(not isinstance(test, MaskedArray)) - - -class TestOptionalArgs(object): - def test_ndarrayfuncs(self): - # test axis arg behaves the same as ndarray (including multiple axes) - - d = np.arange(24.0).reshape((2,3,4)) - m = np.zeros(24, dtype=bool).reshape((2,3,4)) - # mask out last element of last dimension - m[:,:,-1] = True - a = np.ma.array(d, mask=m) - - def testaxis(f, a, d): - numpy_f = numpy.__getattribute__(f) - ma_f = np.ma.__getattribute__(f) - - # test axis arg - assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) - assert_equal(ma_f(a, axis=(0,1))[...,:-1], - numpy_f(d[...,:-1], axis=(0,1))) - - def testkeepdims(f, a, d): - numpy_f = numpy.__getattribute__(f) - ma_f = np.ma.__getattribute__(f) - - # test keepdims arg - assert_equal(ma_f(a, keepdims=True).shape, - numpy_f(d, keepdims=True).shape) - assert_equal(ma_f(a, keepdims=False).shape, - numpy_f(d, keepdims=False).shape) - - # test both at once - assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], - numpy_f(d[...,:-1], axis=1, keepdims=True)) - assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], - numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) - - for f in ['sum', 'prod', 'mean', 'var', 'std']: - testaxis(f, a, d) - testkeepdims(f, a, d) - - for f in ['min', 'max']: - testaxis(f, a, d) - - d = (np.arange(24).reshape((2,3,4))%2 == 0) - a = np.ma.array(d, mask=m) - for f in ['all', 'any']: - testaxis(f, a, d) - testkeepdims(f, a, d) - - def test_count(self): - # test np.ma.count specially - - d = np.arange(24.0).reshape((2,3,4)) - m = np.zeros(24, dtype=bool).reshape((2,3,4)) - m[:,0,:] = True - a = np.ma.array(d, mask=m) - - assert_equal(count(a), 16) - assert_equal(count(a, axis=1), 2*ones((2,4))) - assert_equal(count(a, axis=(0,1)), 4*ones((4,))) - assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) - assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) - assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) - assert_equal(count(a, axis=-2), 2*ones((2,4))) - assert_raises(ValueError, count, a, axis=(1,1)) - assert_raises(np.AxisError, count, a, axis=3) - - # check the 'nomask' path - a = np.ma.array(d, mask=nomask) - - assert_equal(count(a), 24) - assert_equal(count(a, axis=1), 3*ones((2,4))) - assert_equal(count(a, axis=(0,1)), 6*ones((4,))) - assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) - assert_equal(np.ndim(count(a, keepdims=True)), 3) - assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) - assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) - assert_equal(count(a, axis=-2), 3*ones((2,4))) - assert_raises(ValueError, count, a, axis=(1,1)) - assert_raises(np.AxisError, count, a, axis=3) - - # check the 'masked' singleton - assert_equal(count(np.ma.masked), 0) - - # check 0-d arrays do not allow axis > 0 - assert_raises(np.AxisError, count, np.ma.array(1), axis=1) - - -class TestMaskedConstant(object): - def _do_add_test(self, add): - # sanity check - assert_(add(np.ma.masked, 1) is np.ma.masked) - - # now try with a vector - vector = np.array([1, 2, 3]) - result = add(np.ma.masked, vector) - - # lots of things could go wrong here - assert_(result is not np.ma.masked) - assert_(not isinstance(result, np.ma.core.MaskedConstant)) - assert_equal(result.shape, vector.shape) - assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) - - def test_ufunc(self): - self._do_add_test(np.add) - - def test_operator(self): - self._do_add_test(lambda a, b: a + b) - - def test_ctor(self): - m = np.ma.array(np.ma.masked) - - # most importantly, we do not want to create a new MaskedConstant - # instance - assert_(not isinstance(m, np.ma.core.MaskedConstant)) - assert_(m is not np.ma.masked) - - def test_repr(self): - # copies should not exist, but if they do, it should be obvious that - # something is wrong - assert_equal(repr(np.ma.masked), 'masked') - - # create a new instance in a weird way - masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) - assert_not_equal(repr(masked2), 'masked') - - def test_pickle(self): - from io import BytesIO - - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - with BytesIO() as f: - pickle.dump(np.ma.masked, f, protocol=proto) - f.seek(0) - res = pickle.load(f) - assert_(res is np.ma.masked) - - def test_copy(self): - # gh-9328 - # copy is a no-op, like it is with np.True_ - assert_equal( - np.ma.masked.copy() is np.ma.masked, - np.True_.copy() is np.True_) - - def test__copy(self): - import copy - assert_( - copy.copy(np.ma.masked) is np.ma.masked) - - def test_deepcopy(self): - import copy - assert_( - copy.deepcopy(np.ma.masked) is np.ma.masked) - - def test_immutable(self): - orig = np.ma.masked - assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) - assert_raises(ValueError,operator.setitem, orig.data, (), 1) - assert_raises(ValueError, operator.setitem, orig.mask, (), False) - - view = np.ma.masked.view(np.ma.MaskedArray) - assert_raises(ValueError, operator.setitem, view, (), 1) - assert_raises(ValueError, operator.setitem, view.data, (), 1) - assert_raises(ValueError, operator.setitem, view.mask, (), False) - - def test_coercion_int(self): - a_i = np.zeros((), int) - assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) - assert_raises(MaskError, int, np.ma.masked) - - @pytest.mark.skipif(sys.version_info.major == 3, - reason="long doesn't exist in Python 3") - def test_coercion_long(self): - assert_raises(MaskError, long, np.ma.masked) - - def test_coercion_float(self): - a_f = np.zeros((), float) - assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) - assert_(np.isnan(a_f[()])) - - @pytest.mark.xfail(reason="See gh-9750") - def test_coercion_unicode(self): - a_u = np.zeros((), 'U10') - a_u[()] = np.ma.masked - assert_equal(a_u[()], u'--') - - @pytest.mark.xfail(reason="See gh-9750") - def test_coercion_bytes(self): - a_b = np.zeros((), 'S10') - a_b[()] = np.ma.masked - assert_equal(a_b[()], b'--') - - def test_subclass(self): - # https://github.com/astropy/astropy/issues/6645 - class Sub(type(np.ma.masked)): pass - - a = Sub() - assert_(a is Sub()) - assert_(a is not np.ma.masked) - assert_not_equal(repr(a), 'masked') - - def test_attributes_readonly(self): - assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) - assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) - - -class TestMaskedWhereAliases(object): - - # TODO: Test masked_object, masked_equal, ... - - def test_masked_values(self): - res = masked_values(np.array([-32768.0]), np.int16(-32768)) - assert_equal(res.mask, [True]) - - res = masked_values(np.inf, np.inf) - assert_equal(res.mask, True) - - res = np.ma.masked_values(np.inf, -np.inf) - assert_equal(res.mask, False) - - res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) - assert_(res.mask is np.ma.nomask) - - res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) - assert_equal(res.mask, [False] * 4) - - -def test_masked_array(): - a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) - assert_equal(np.argwhere(a), [[1], [3]]) - -def test_append_masked_array(): - a = np.ma.masked_equal([1,2,3], value=2) - b = np.ma.masked_equal([4,3,2], value=2) - - result = np.ma.append(a, b) - expected_data = [1, 2, 3, 4, 3, 2] - expected_mask = [False, True, False, False, False, True] - assert_array_equal(result.data, expected_data) - assert_array_equal(result.mask, expected_mask) - - a = np.ma.masked_all((2,2)) - b = np.ma.ones((3,1)) - - result = np.ma.append(a, b) - expected_data = [1] * 3 - expected_mask = [True] * 4 + [False] * 3 - assert_array_equal(result.data[-3], expected_data) - assert_array_equal(result.mask, expected_mask) - - result = np.ma.append(a, b, axis=None) - assert_array_equal(result.data[-3], expected_data) - assert_array_equal(result.mask, expected_mask) - - -def test_append_masked_array_along_axis(): - a = np.ma.masked_equal([1,2,3], value=2) - b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) - - # When `axis` is specified, `values` must have the correct shape. - assert_raises(ValueError, np.ma.append, a, b, axis=0) - - result = np.ma.append(a[np.newaxis,:], b, axis=0) - expected = np.ma.arange(1, 10) - expected[[1, 6]] = np.ma.masked - expected = expected.reshape((3,3)) - assert_array_equal(result.data, expected.data) - assert_array_equal(result.mask, expected.mask) - - -def test_default_fill_value_complex(): - # regression test for Python 3, where 'unicode' was not defined - assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) - - -def test_ufunc_with_output(): - # check that giving an output argument always returns that output. - # Regression test for gh-8416. - x = array([1., 2., 3.], mask=[0, 0, 1]) - y = np.add(x, 1., out=x) - assert_(y is x) - - -def test_ufunc_with_out_varied(): - """ Test that masked arrays are immune to gh-10459 """ - # the mask of the output should not affect the result, however it is passed - a = array([ 1, 2, 3], mask=[1, 0, 0]) - b = array([10, 20, 30], mask=[1, 0, 0]) - out = array([ 0, 0, 0], mask=[0, 0, 1]) - expected = array([11, 22, 33], mask=[1, 0, 0]) - - out_pos = out.copy() - res_pos = np.add(a, b, out_pos) - - out_kw = out.copy() - res_kw = np.add(a, b, out=out_kw) - - out_tup = out.copy() - res_tup = np.add(a, b, out=(out_tup,)) - - assert_equal(res_kw.mask, expected.mask) - assert_equal(res_kw.data, expected.data) - assert_equal(res_tup.mask, expected.mask) - assert_equal(res_tup.data, expected.data) - assert_equal(res_pos.mask, expected.mask) - assert_equal(res_pos.data, expected.data) - - -def test_astype_mask_ordering(): - descr = [('v', int, 3), ('x', [('y', float)])] - x = array([ - [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], - [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) - x[0]['v'][0] = np.ma.masked - - x_a = x.astype(descr) - assert x_a.dtype.names == np.dtype(descr).names - assert x_a.mask.dtype.names == np.dtype(descr).names - assert_equal(x, x_a) - - assert_(x is x.astype(x.dtype, copy=False)) - assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) - - x_f = x.astype(x.dtype, order='F') - assert_(x_f.flags.f_contiguous) - assert_(x_f.mask.flags.f_contiguous) - - # Also test the same indirectly, via np.array - x_a2 = np.array(x, dtype=descr, subok=True) - assert x_a2.dtype.names == np.dtype(descr).names - assert x_a2.mask.dtype.names == np.dtype(descr).names - assert_equal(x, x_a2) - - assert_(x is np.array(x, dtype=descr, copy=False, subok=True)) - - x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) - assert_(x_f2.flags.f_contiguous) - assert_(x_f2.mask.flags.f_contiguous) - - -@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) -@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) -@pytest.mark.filterwarnings('ignore::numpy.ComplexWarning') -def test_astype_basic(dt1, dt2): - # See gh-12070 - src = np.ma.array(ones(3, dt1), fill_value=1) - dst = src.astype(dt2) - - assert_(src.fill_value == 1) - assert_(src.dtype == dt1) - assert_(src.fill_value.dtype == dt1) - - assert_(dst.fill_value == 1) - assert_(dst.dtype == dt2) - assert_(dst.fill_value.dtype == dt2) - - assert_equal(src, dst) - - -def test_fieldless_void(): - dt = np.dtype([]) # a void dtype with no fields - x = np.empty(4, dt) - - # these arrays contain no values, so there's little to test - but this - # shouldn't crash - mx = np.ma.array(x) - assert_equal(mx.dtype, x.dtype) - assert_equal(mx.shape, x.shape) - - mx = np.ma.array(x, mask=x) - assert_equal(mx.dtype, x.dtype) - assert_equal(mx.shape, x.shape) - - -def test_mask_shape_assignment_does_not_break_masked(): - a = np.ma.masked - b = np.ma.array(1, mask=a.mask) - b.shape = (1,) - assert_equal(a.mask.shape, ()) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_deprecations.py b/venv/lib/python3.7/site-packages/numpy/ma/tests/test_deprecations.py deleted file mode 100644 index 72cc29a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_deprecations.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Test deprecation and future warnings. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import assert_equal -from numpy.ma.core import MaskedArrayFutureWarning - -class TestArgsort(object): - """ gh-8701 """ - def _test_base(self, argsort, cls): - arr_0d = np.array(1).view(cls) - argsort(arr_0d) - - arr_1d = np.array([1, 2, 3]).view(cls) - argsort(arr_1d) - - # argsort has a bad default for >1d arrays - arr_2d = np.array([[1, 2], [3, 4]]).view(cls) - result = assert_warns( - np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) - assert_equal(result, argsort(arr_2d, axis=None)) - - # should be no warnings for explicitly specifying it - argsort(arr_2d, axis=None) - argsort(arr_2d, axis=-1) - - def test_function_ndarray(self): - return self._test_base(np.ma.argsort, np.ndarray) - - def test_function_maskedarray(self): - return self._test_base(np.ma.argsort, np.ma.MaskedArray) - - def test_method(self): - return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) - - -class TestMinimumMaximum(object): - def test_minimum(self): - assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2])) - - def test_maximum(self): - assert_warns(DeprecationWarning, np.ma.maximum, np.ma.array([1, 2])) - - def test_axis_default(self): - # NumPy 1.13, 2017-05-06 - - data1d = np.ma.arange(6) - data2d = data1d.reshape(2, 3) - - ma_min = np.ma.minimum.reduce - ma_max = np.ma.maximum.reduce - - # check that the default axis is still None, but warns on 2d arrays - result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) - assert_equal(result, ma_max(data2d, axis=None)) - - result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) - assert_equal(result, ma_min(data2d, axis=None)) - - # no warnings on 1d, as both new and old defaults are equivalent - result = ma_min(data1d) - assert_equal(result, ma_min(data1d, axis=None)) - assert_equal(result, ma_min(data1d, axis=0)) - - result = ma_max(data1d) - assert_equal(result, ma_max(data1d, axis=None)) - assert_equal(result, ma_max(data1d, axis=0)) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_extras.py b/venv/lib/python3.7/site-packages/numpy/ma/tests/test_extras.py deleted file mode 100644 index c75c478..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_extras.py +++ /dev/null @@ -1,1668 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511 -"""Tests suite for MaskedArray. -Adapted from the original test_ma by Pierre Gerard-Marchant - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import itertools -import pytest - -import numpy as np -from numpy.testing import ( - assert_warns, suppress_warnings - ) -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal - ) -from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, - nomask, ones, zeros, count - ) -from numpy.ma.extras import ( - atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, - median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, - ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, - mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, - notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, - diagflat, stack, vstack - ) - - -class TestGeneric(object): - # - def test_masked_all(self): - # Tests masked_all - # Standard dtype - test = masked_all((2,), dtype=float) - control = array([1, 1], mask=[1, 1], dtype=float) - assert_equal(test, control) - # Flexible dtype - dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) - test = masked_all((2,), dtype=dt) - control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) - assert_equal(test, control) - test = masked_all((2, 2), dtype=dt) - control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], - mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], - dtype=dt) - assert_equal(test, control) - # Nested dtype - dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) - test = masked_all((2,), dtype=dt) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - assert_equal(test, control) - test = masked_all((2,), dtype=dt) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - assert_equal(test, control) - test = masked_all((1, 1), dtype=dt) - control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) - assert_equal(test, control) - - def test_masked_all_like(self): - # Tests masked_all - # Standard dtype - base = array([1, 2], dtype=float) - test = masked_all_like(base) - control = array([1, 1], mask=[1, 1], dtype=float) - assert_equal(test, control) - # Flexible dtype - dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) - base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) - test = masked_all_like(base) - control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) - assert_equal(test, control) - # Nested dtype - dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - test = masked_all_like(control) - assert_equal(test, control) - - def check_clump(self, f): - for i in range(1, 7): - for j in range(2**i): - k = np.arange(i, dtype=int) - ja = np.full(i, j, dtype=int) - a = masked_array(2**k) - a.mask = (ja & (2**k)) != 0 - s = 0 - for sl in f(a): - s += a.data[sl].sum() - if f == clump_unmasked: - assert_equal(a.compressed().sum(), s) - else: - a.mask = ~a.mask - assert_equal(a.compressed().sum(), s) - - def test_clump_masked(self): - # Test clump_masked - a = masked_array(np.arange(10)) - a[[0, 1, 2, 6, 8, 9]] = masked - # - test = clump_masked(a) - control = [slice(0, 3), slice(6, 7), slice(8, 10)] - assert_equal(test, control) - - self.check_clump(clump_masked) - - def test_clump_unmasked(self): - # Test clump_unmasked - a = masked_array(np.arange(10)) - a[[0, 1, 2, 6, 8, 9]] = masked - test = clump_unmasked(a) - control = [slice(3, 6), slice(7, 8), ] - assert_equal(test, control) - - self.check_clump(clump_unmasked) - - def test_flatnotmasked_contiguous(self): - # Test flatnotmasked_contiguous - a = arange(10) - # No mask - test = flatnotmasked_contiguous(a) - assert_equal(test, [slice(0, a.size)]) - # mask of all false - a.mask = np.zeros(10, dtype=bool) - assert_equal(test, [slice(0, a.size)]) - # Some mask - a[(a < 3) | (a > 8) | (a == 5)] = masked - test = flatnotmasked_contiguous(a) - assert_equal(test, [slice(3, 5), slice(6, 9)]) - # - a[:] = masked - test = flatnotmasked_contiguous(a) - assert_equal(test, []) - - -class TestAverage(object): - # Several tests of average. Why so many ? Good point... - def test_testAverage1(self): - # Test of average. - ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) - assert_equal(2.0, average(ott, axis=0)) - assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) - result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) - assert_equal(2.0, result) - assert_(wts == 4.0) - ott[:] = masked - assert_equal(average(ott, axis=0).mask, [True]) - ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) - ott = ott.reshape(2, 2) - ott[:, 1] = masked - assert_equal(average(ott, axis=0), [2.0, 0.0]) - assert_equal(average(ott, axis=1).mask[0], [True]) - assert_equal([2., 0.], average(ott, axis=0)) - result, wts = average(ott, axis=0, returned=True) - assert_equal(wts, [1., 0.]) - - def test_testAverage2(self): - # More tests of average. - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = arange(6, dtype=np.float_) - assert_equal(average(x, axis=0), 2.5) - assert_equal(average(x, axis=0, weights=w1), 2.5) - y = array([arange(6, dtype=np.float_), 2.0 * arange(6)]) - assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) - assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) - assert_equal(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0]) - assert_equal(average(y, None, weights=w2), 20. / 6.) - assert_equal(average(y, axis=0, weights=w2), - [0., 1., 2., 3., 4., 10.]) - assert_equal(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0]) - m1 = zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = ones(6) - m5 = [0, 1, 1, 1, 1, 1] - assert_equal(average(masked_array(x, m1), axis=0), 2.5) - assert_equal(average(masked_array(x, m2), axis=0), 2.5) - assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) - assert_equal(average(masked_array(x, m5), axis=0), 0.0) - assert_equal(count(average(masked_array(x, m4), axis=0)), 0) - z = masked_array(y, m3) - assert_equal(average(z, None), 20. / 6.) - assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) - assert_equal(average(z, axis=1), [2.5, 5.0]) - assert_equal(average(z, axis=0, weights=w2), - [0., 1., 99., 99., 4.0, 10.0]) - - def test_testAverage3(self): - # Yet more tests of average! - a = arange(6) - b = arange(6) * 3 - r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) - assert_equal(shape(r1), shape(w1)) - assert_equal(r1.shape, w1.shape) - r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) - assert_equal(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), returned=True) - assert_equal(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) - assert_equal(shape(w2), shape(r2)) - a2d = array([[1, 2], [0, 4]], float) - a2dm = masked_array(a2d, [[False, False], [True, False]]) - a2da = average(a2d, axis=0) - assert_equal(a2da, [0.5, 3.0]) - a2dma = average(a2dm, axis=0) - assert_equal(a2dma, [1.0, 3.0]) - a2dma = average(a2dm, axis=None) - assert_equal(a2dma, 7. / 3.) - a2dma = average(a2dm, axis=1) - assert_equal(a2dma, [1.5, 4.0]) - - def test_onintegers_with_mask(self): - # Test average on integers with mask - a = average(array([1, 2])) - assert_equal(a, 1.5) - a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) - assert_equal(a, 1.5) - - def test_complex(self): - # Test with complex data. - # (Regression test for https://github.com/numpy/numpy/issues/2684) - mask = np.array([[0, 0, 0, 1, 0], - [0, 1, 0, 0, 0]], dtype=bool) - a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], - [9j, 0+1j, 2+3j, 4+5j, 7+7j]], - mask=mask) - - av = average(a) - expected = np.average(a.compressed()) - assert_almost_equal(av.real, expected.real) - assert_almost_equal(av.imag, expected.imag) - - av0 = average(a, axis=0) - expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j - assert_almost_equal(av0.real, expected0.real) - assert_almost_equal(av0.imag, expected0.imag) - - av1 = average(a, axis=1) - expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j - assert_almost_equal(av1.real, expected1.real) - assert_almost_equal(av1.imag, expected1.imag) - - # Test with the 'weights' argument. - wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], - [1.0, 1.0, 1.0, 1.0, 1.0]]) - wav = average(a, weights=wts) - expected = np.average(a.compressed(), weights=wts[~mask]) - assert_almost_equal(wav.real, expected.real) - assert_almost_equal(wav.imag, expected.imag) - - wav0 = average(a, weights=wts, axis=0) - expected0 = (average(a.real, weights=wts, axis=0) + - average(a.imag, weights=wts, axis=0)*1j) - assert_almost_equal(wav0.real, expected0.real) - assert_almost_equal(wav0.imag, expected0.imag) - - wav1 = average(a, weights=wts, axis=1) - expected1 = (average(a.real, weights=wts, axis=1) + - average(a.imag, weights=wts, axis=1)*1j) - assert_almost_equal(wav1.real, expected1.real) - assert_almost_equal(wav1.imag, expected1.imag) - - -class TestConcatenator(object): - # Tests for mr_, the equivalent of r_ for masked arrays. - - def test_1d(self): - # Tests mr_ on 1D arrays. - assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) - b = ones(5) - m = [1, 0, 0, 0, 0] - d = masked_array(b, mask=m) - c = mr_[d, 0, 0, d] - assert_(isinstance(c, MaskedArray)) - assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) - assert_array_equal(c.mask, mr_[m, 0, 0, m]) - - def test_2d(self): - # Tests mr_ on 2D arrays. - a_1 = np.random.rand(5, 5) - a_2 = np.random.rand(5, 5) - m_1 = np.round_(np.random.rand(5, 5), 0) - m_2 = np.round_(np.random.rand(5, 5), 0) - b_1 = masked_array(a_1, mask=m_1) - b_2 = masked_array(a_2, mask=m_2) - # append columns - d = mr_['1', b_1, b_2] - assert_(d.shape == (5, 10)) - assert_array_equal(d[:, :5], b_1) - assert_array_equal(d[:, 5:], b_2) - assert_array_equal(d.mask, np.r_['1', m_1, m_2]) - d = mr_[b_1, b_2] - assert_(d.shape == (10, 5)) - assert_array_equal(d[:5,:], b_1) - assert_array_equal(d[5:,:], b_2) - assert_array_equal(d.mask, np.r_[m_1, m_2]) - - def test_masked_constant(self): - actual = mr_[np.ma.masked, 1] - assert_equal(actual.mask, [True, False]) - assert_equal(actual.data[1], 1) - - actual = mr_[[1, 2], np.ma.masked] - assert_equal(actual.mask, [False, False, True]) - assert_equal(actual.data[:2], [1, 2]) - - -class TestNotMasked(object): - # Tests notmasked_edges and notmasked_contiguous. - - def test_edges(self): - # Tests unmasked_edges - data = masked_array(np.arange(25).reshape(5, 5), - mask=[[0, 0, 1, 0, 0], - [0, 0, 0, 1, 1], - [1, 1, 0, 0, 0], - [0, 0, 0, 0, 0], - [1, 1, 1, 0, 0]],) - test = notmasked_edges(data, None) - assert_equal(test, [0, 24]) - test = notmasked_edges(data, 0) - assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data, 1) - assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) - assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) - # - test = notmasked_edges(data.data, None) - assert_equal(test, [0, 24]) - test = notmasked_edges(data.data, 0) - assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data.data, -1) - assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) - assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) - # - data[-2] = masked - test = notmasked_edges(data, 0) - assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data, -1) - assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) - assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) - - def test_contiguous(self): - # Tests notmasked_contiguous - a = masked_array(np.arange(24).reshape(3, 8), - mask=[[0, 0, 0, 0, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 1, 0]]) - tmp = notmasked_contiguous(a, None) - assert_equal(tmp, [ - slice(0, 4, None), - slice(16, 22, None), - slice(23, 24, None) - ]) - - tmp = notmasked_contiguous(a, 0) - assert_equal(tmp, [ - [slice(0, 1, None), slice(2, 3, None)], - [slice(0, 1, None), slice(2, 3, None)], - [slice(0, 1, None), slice(2, 3, None)], - [slice(0, 1, None), slice(2, 3, None)], - [slice(2, 3, None)], - [slice(2, 3, None)], - [], - [slice(2, 3, None)] - ]) - # - tmp = notmasked_contiguous(a, 1) - assert_equal(tmp, [ - [slice(0, 4, None)], - [], - [slice(0, 6, None), slice(7, 8, None)] - ]) - - -class TestCompressFunctions(object): - - def test_compress_nd(self): - # Tests compress_nd - x = np.array(list(range(3*4*5))).reshape(3, 4, 5) - m = np.zeros((3,4,5)).astype(bool) - m[1,1,1] = True - x = array(x, mask=m) - - # axis=None - a = compress_nd(x) - assert_equal(a, [[[ 0, 2, 3, 4], - [10, 12, 13, 14], - [15, 17, 18, 19]], - [[40, 42, 43, 44], - [50, 52, 53, 54], - [55, 57, 58, 59]]]) - - # axis=0 - a = compress_nd(x, 0) - assert_equal(a, [[[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [10, 11, 12, 13, 14], - [15, 16, 17, 18, 19]], - [[40, 41, 42, 43, 44], - [45, 46, 47, 48, 49], - [50, 51, 52, 53, 54], - [55, 56, 57, 58, 59]]]) - - # axis=1 - a = compress_nd(x, 1) - assert_equal(a, [[[ 0, 1, 2, 3, 4], - [10, 11, 12, 13, 14], - [15, 16, 17, 18, 19]], - [[20, 21, 22, 23, 24], - [30, 31, 32, 33, 34], - [35, 36, 37, 38, 39]], - [[40, 41, 42, 43, 44], - [50, 51, 52, 53, 54], - [55, 56, 57, 58, 59]]]) - - a2 = compress_nd(x, (1,)) - a3 = compress_nd(x, -2) - a4 = compress_nd(x, (-2,)) - assert_equal(a, a2) - assert_equal(a, a3) - assert_equal(a, a4) - - # axis=2 - a = compress_nd(x, 2) - assert_equal(a, [[[ 0, 2, 3, 4], - [ 5, 7, 8, 9], - [10, 12, 13, 14], - [15, 17, 18, 19]], - [[20, 22, 23, 24], - [25, 27, 28, 29], - [30, 32, 33, 34], - [35, 37, 38, 39]], - [[40, 42, 43, 44], - [45, 47, 48, 49], - [50, 52, 53, 54], - [55, 57, 58, 59]]]) - - a2 = compress_nd(x, (2,)) - a3 = compress_nd(x, -1) - a4 = compress_nd(x, (-1,)) - assert_equal(a, a2) - assert_equal(a, a3) - assert_equal(a, a4) - - # axis=(0, 1) - a = compress_nd(x, (0, 1)) - assert_equal(a, [[[ 0, 1, 2, 3, 4], - [10, 11, 12, 13, 14], - [15, 16, 17, 18, 19]], - [[40, 41, 42, 43, 44], - [50, 51, 52, 53, 54], - [55, 56, 57, 58, 59]]]) - a2 = compress_nd(x, (0, -2)) - assert_equal(a, a2) - - # axis=(1, 2) - a = compress_nd(x, (1, 2)) - assert_equal(a, [[[ 0, 2, 3, 4], - [10, 12, 13, 14], - [15, 17, 18, 19]], - [[20, 22, 23, 24], - [30, 32, 33, 34], - [35, 37, 38, 39]], - [[40, 42, 43, 44], - [50, 52, 53, 54], - [55, 57, 58, 59]]]) - - a2 = compress_nd(x, (-2, 2)) - a3 = compress_nd(x, (1, -1)) - a4 = compress_nd(x, (-2, -1)) - assert_equal(a, a2) - assert_equal(a, a3) - assert_equal(a, a4) - - # axis=(0, 2) - a = compress_nd(x, (0, 2)) - assert_equal(a, [[[ 0, 2, 3, 4], - [ 5, 7, 8, 9], - [10, 12, 13, 14], - [15, 17, 18, 19]], - [[40, 42, 43, 44], - [45, 47, 48, 49], - [50, 52, 53, 54], - [55, 57, 58, 59]]]) - - a2 = compress_nd(x, (0, -1)) - assert_equal(a, a2) - - def test_compress_rowcols(self): - # Tests compress_rowcols - x = array(np.arange(9).reshape(3, 3), - mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) - assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) - assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) - x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) - assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) - assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[8]]) - assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) - assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - assert_equal(compress_rowcols(x).size, 0) - assert_equal(compress_rowcols(x, 0).size, 0) - assert_equal(compress_rowcols(x, 1).size, 0) - - def test_mask_rowcols(self): - # Tests mask_rowcols. - x = array(np.arange(9).reshape(3, 3), - mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, - [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) - assert_equal(mask_rowcols(x, 0).mask, - [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1).mask, - [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) - x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, - [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - assert_equal(mask_rowcols(x, 0).mask, - [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1).mask, - [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, - [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) - assert_equal(mask_rowcols(x, 0).mask, - [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1,).mask, - [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - assert_(mask_rowcols(x).all() is masked) - assert_(mask_rowcols(x, 0).all() is masked) - assert_(mask_rowcols(x, 1).all() is masked) - assert_(mask_rowcols(x).mask.all()) - assert_(mask_rowcols(x, 0).mask.all()) - assert_(mask_rowcols(x, 1).mask.all()) - - @pytest.mark.parametrize("axis", [None, 0, 1]) - @pytest.mark.parametrize(["func", "rowcols_axis"], - [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) - def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): - # Test deprecation of the axis argument to `mask_rows` and `mask_cols` - x = array(np.arange(9).reshape(3, 3), - mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - - with assert_warns(DeprecationWarning): - res = func(x, axis=axis) - assert_equal(res, mask_rowcols(x, rowcols_axis)) - - def test_dot(self): - # Tests dot product - n = np.arange(1, 7) - # - m = [1, 0, 0, 0, 0, 0] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, strict=True) - assert_equal(c.mask, [[1, 1], [1, 0]]) - c = dot(b, a, strict=True) - assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) - c = dot(a, b, strict=False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, strict=False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - m = [0, 0, 0, 0, 0, 1] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, strict=True) - assert_equal(c.mask, [[0, 1], [1, 1]]) - c = dot(b, a, strict=True) - assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) - c = dot(a, b, strict=False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - assert_equal(c, dot(a, b)) - c = dot(b, a, strict=False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - m = [0, 0, 0, 0, 0, 0] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b) - assert_equal(c.mask, nomask) - c = dot(b, a) - assert_equal(c.mask, nomask) - # - a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, strict=True) - assert_equal(c.mask, [[1, 1], [0, 0]]) - c = dot(a, b, strict=False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, strict=True) - assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) - c = dot(b, a, strict=False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, strict=True) - assert_equal(c.mask, [[0, 0], [1, 1]]) - c = dot(a, b) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, strict=True) - assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) - c = dot(b, a, strict=False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, strict=True) - assert_equal(c.mask, [[1, 0], [1, 1]]) - c = dot(a, b, strict=False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, strict=True) - assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) - c = dot(b, a, strict=False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - - def test_dot_returns_maskedarray(self): - # See gh-6611 - a = np.eye(3) - b = array(a) - assert_(type(dot(a, a)) is MaskedArray) - assert_(type(dot(a, b)) is MaskedArray) - assert_(type(dot(b, a)) is MaskedArray) - assert_(type(dot(b, b)) is MaskedArray) - - def test_dot_out(self): - a = array(np.eye(3)) - out = array(np.zeros((3, 3))) - res = dot(a, a, out=out) - assert_(res is out) - assert_equal(a, res) - - -class TestApplyAlongAxis(object): - # Tests 2D functions - def test_3d(self): - a = arange(12.).reshape(2, 2, 3) - - def myfunc(b): - return b[1] - - xa = apply_along_axis(myfunc, 2, a) - assert_equal(xa, [[1, 4], [7, 10]]) - - # Tests kwargs functions - def test_3d_kwargs(self): - a = arange(12).reshape(2, 2, 3) - - def myfunc(b, offset=0): - return b[1+offset] - - xa = apply_along_axis(myfunc, 2, a, offset=1) - assert_equal(xa, [[2, 5], [8, 11]]) - - -class TestApplyOverAxes(object): - # Tests apply_over_axes - def test_basic(self): - a = arange(24).reshape(2, 3, 4) - test = apply_over_axes(np.sum, a, [0, 2]) - ctrl = np.array([[[60], [92], [124]]]) - assert_equal(test, ctrl) - a[(a % 2).astype(bool)] = masked - test = apply_over_axes(np.sum, a, [0, 2]) - ctrl = np.array([[[28], [44], [60]]]) - assert_equal(test, ctrl) - - -class TestMedian(object): - def test_pytype(self): - r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) - assert_equal(r, np.inf) - - def test_inf(self): - # test that even which computes handles inf / x = masked - r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], - [np.inf, np.inf]]), axis=-1) - assert_equal(r, np.inf) - r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], - [np.inf, np.inf]]), axis=None) - assert_equal(r, np.inf) - # all masked - r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], - [np.inf, np.inf]], mask=True), - axis=-1) - assert_equal(r.mask, True) - r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], - [np.inf, np.inf]], mask=True), - axis=None) - assert_equal(r.mask, True) - - def test_non_masked(self): - x = np.arange(9) - assert_equal(np.ma.median(x), 4.) - assert_(type(np.ma.median(x)) is not MaskedArray) - x = range(8) - assert_equal(np.ma.median(x), 3.5) - assert_(type(np.ma.median(x)) is not MaskedArray) - x = 5 - assert_equal(np.ma.median(x), 5.) - assert_(type(np.ma.median(x)) is not MaskedArray) - # integer - x = np.arange(9 * 8).reshape(9, 8) - assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) - assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) - assert_(np.ma.median(x, axis=1) is not MaskedArray) - # float - x = np.arange(9 * 8.).reshape(9, 8) - assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) - assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) - assert_(np.ma.median(x, axis=1) is not MaskedArray) - - def test_docstring_examples(self): - "test the examples given in the docstring of ma.median" - x = array(np.arange(8), mask=[0]*4 + [1]*4) - assert_equal(np.ma.median(x), 1.5) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) - assert_equal(np.ma.median(x), 2.5) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - ma_x = np.ma.median(x, axis=-1, overwrite_input=True) - assert_equal(ma_x, [2., 5.]) - assert_equal(ma_x.shape, (2,), "shape mismatch") - assert_(type(ma_x) is MaskedArray) - - def test_axis_argument_errors(self): - msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" - for ndmin in range(5): - for mask in [False, True]: - x = array(1, ndmin=ndmin, mask=mask) - - # Valid axis values should not raise exception - args = itertools.product(range(-ndmin, ndmin), [False, True]) - for axis, over in args: - try: - np.ma.median(x, axis=axis, overwrite_input=over) - except Exception: - raise AssertionError(msg % (mask, ndmin, axis, over)) - - # Invalid axis values should raise exception - args = itertools.product([-(ndmin + 1), ndmin], [False, True]) - for axis, over in args: - try: - np.ma.median(x, axis=axis, overwrite_input=over) - except np.AxisError: - pass - else: - raise AssertionError(msg % (mask, ndmin, axis, over)) - - def test_masked_0d(self): - # Check values - x = array(1, mask=False) - assert_equal(np.ma.median(x), 1) - x = array(1, mask=True) - assert_equal(np.ma.median(x), np.ma.masked) - - def test_masked_1d(self): - x = array(np.arange(5), mask=True) - assert_equal(np.ma.median(x), np.ma.masked) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) - x = array(np.arange(5), mask=False) - assert_equal(np.ma.median(x), 2.) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(5), mask=[0,1,0,0,0]) - assert_equal(np.ma.median(x), 2.5) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(5), mask=[0,1,1,1,1]) - assert_equal(np.ma.median(x), 0.) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - # integer - x = array(np.arange(5), mask=[0,1,1,0,0]) - assert_equal(np.ma.median(x), 3.) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - # float - x = array(np.arange(5.), mask=[0,1,1,0,0]) - assert_equal(np.ma.median(x), 3.) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - # integer - x = array(np.arange(6), mask=[0,1,1,1,1,0]) - assert_equal(np.ma.median(x), 2.5) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - # float - x = array(np.arange(6.), mask=[0,1,1,1,1,0]) - assert_equal(np.ma.median(x), 2.5) - assert_equal(np.ma.median(x).shape, (), "shape mismatch") - assert_(type(np.ma.median(x)) is not MaskedArray) - - def test_1d_shape_consistency(self): - assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, - np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) - - def test_2d(self): - # Tests median w/ 2D - (n, p) = (101, 30) - x = masked_array(np.linspace(-1., 1., n),) - x[:10] = x[-10:] = masked - z = masked_array(np.empty((n, p), dtype=float)) - z[:, 0] = x[:] - idx = np.arange(len(x)) - for i in range(1, p): - np.random.shuffle(idx) - z[:, i] = x[idx] - assert_equal(median(z[:, 0]), 0) - assert_equal(median(z), 0) - assert_equal(median(z, axis=0), np.zeros(p)) - assert_equal(median(z.T, axis=1), np.zeros(p)) - - def test_2d_waxis(self): - # Tests median w/ 2D arrays and different axis. - x = masked_array(np.arange(30).reshape(10, 3)) - x[:3] = x[-3:] = masked - assert_equal(median(x), 14.5) - assert_(type(np.ma.median(x)) is not MaskedArray) - assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) - assert_(type(np.ma.median(x, axis=0)) is MaskedArray) - assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) - assert_(type(np.ma.median(x, axis=1)) is MaskedArray) - assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) - - def test_3d(self): - # Tests median w/ 3D - x = np.ma.arange(24).reshape(3, 4, 2) - x[x % 3 == 0] = masked - assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) - assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) - x = np.ma.arange(24).reshape(4, 3, 2) - x[x % 5 == 0] = masked - assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) - - def test_neg_axis(self): - x = masked_array(np.arange(30).reshape(10, 3)) - x[:3] = x[-3:] = masked - assert_equal(median(x, axis=-1), median(x, axis=1)) - - def test_out_1d(self): - # integer float even odd - for v in (30, 30., 31, 31.): - x = masked_array(np.arange(v)) - x[:3] = x[-3:] = masked - out = masked_array(np.ones(())) - r = median(x, out=out) - if v == 30: - assert_equal(out, 14.5) - else: - assert_equal(out, 15.) - assert_(r is out) - assert_(type(r) is MaskedArray) - - def test_out(self): - # integer float even odd - for v in (40, 40., 30, 30.): - x = masked_array(np.arange(v).reshape(10, -1)) - x[:3] = x[-3:] = masked - out = masked_array(np.ones(10)) - r = median(x, axis=1, out=out) - if v == 30: - e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, - mask=[True] * 3 + [False] * 4 + [True] * 3) - else: - e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, - mask=[True]*3 + [False]*4 + [True]*3) - assert_equal(r, e) - assert_(r is out) - assert_(type(r) is MaskedArray) - - def test_single_non_masked_value_on_axis(self): - data = [[1., 0.], - [0., 3.], - [0., 0.]] - masked_arr = np.ma.masked_equal(data, 0) - expected = [1., 3.] - assert_array_equal(np.ma.median(masked_arr, axis=0), - expected) - - def test_nan(self): - for mask in (False, np.zeros(6, dtype=bool)): - dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) - dm.mask = mask - - # scalar result - r = np.ma.median(dm, axis=None) - assert_(np.isscalar(r)) - assert_array_equal(r, np.nan) - r = np.ma.median(dm.ravel(), axis=0) - assert_(np.isscalar(r)) - assert_array_equal(r, np.nan) - - r = np.ma.median(dm, axis=0) - assert_equal(type(r), MaskedArray) - assert_array_equal(r, [1, np.nan, 3]) - r = np.ma.median(dm, axis=1) - assert_equal(type(r), MaskedArray) - assert_array_equal(r, [np.nan, 2]) - r = np.ma.median(dm, axis=-1) - assert_equal(type(r), MaskedArray) - assert_array_equal(r, [np.nan, 2]) - - dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) - dm[:, 2] = np.ma.masked - assert_array_equal(np.ma.median(dm, axis=None), np.nan) - assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) - assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) - - def test_out_nan(self): - o = np.ma.masked_array(np.zeros((4,))) - d = np.ma.masked_array(np.ones((3, 4))) - d[2, 1] = np.nan - d[2, 2] = np.ma.masked - assert_equal(np.ma.median(d, 0, out=o), o) - o = np.ma.masked_array(np.zeros((3,))) - assert_equal(np.ma.median(d, 1, out=o), o) - o = np.ma.masked_array(np.zeros(())) - assert_equal(np.ma.median(d, out=o), o) - - def test_nan_behavior(self): - a = np.ma.masked_array(np.arange(24, dtype=float)) - a[::3] = np.ma.masked - a[2] = np.nan - assert_array_equal(np.ma.median(a), np.nan) - assert_array_equal(np.ma.median(a, axis=0), np.nan) - - a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) - a.mask = np.arange(a.size) % 2 == 1 - aorig = a.copy() - a[1, 2, 3] = np.nan - a[1, 1, 2] = np.nan - - # no axis - assert_array_equal(np.ma.median(a), np.nan) - assert_(np.isscalar(np.ma.median(a))) - - # axis0 - b = np.ma.median(aorig, axis=0) - b[2, 3] = np.nan - b[1, 2] = np.nan - assert_equal(np.ma.median(a, 0), b) - - # axis1 - b = np.ma.median(aorig, axis=1) - b[1, 3] = np.nan - b[1, 2] = np.nan - assert_equal(np.ma.median(a, 1), b) - - # axis02 - b = np.ma.median(aorig, axis=(0, 2)) - b[1] = np.nan - b[2] = np.nan - assert_equal(np.ma.median(a, (0, 2)), b) - - def test_ambigous_fill(self): - # 255 is max value, used as filler for sort - a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) - a = np.ma.masked_array(a, mask=a == 3) - assert_array_equal(np.ma.median(a, axis=1), 255) - assert_array_equal(np.ma.median(a, axis=1).mask, False) - assert_array_equal(np.ma.median(a, axis=0), a[0]) - assert_array_equal(np.ma.median(a), 255) - - def test_special(self): - for inf in [np.inf, -np.inf]: - a = np.array([[inf, np.nan], [np.nan, np.nan]]) - a = np.ma.masked_array(a, mask=np.isnan(a)) - assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) - assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) - assert_equal(np.ma.median(a), inf) - - a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) - a = np.ma.masked_array(a, mask=np.isnan(a)) - assert_array_equal(np.ma.median(a, axis=1), inf) - assert_array_equal(np.ma.median(a, axis=1).mask, False) - assert_array_equal(np.ma.median(a, axis=0), a[0]) - assert_array_equal(np.ma.median(a), inf) - - # no mask - a = np.array([[inf, inf], [inf, inf]]) - assert_equal(np.ma.median(a), inf) - assert_equal(np.ma.median(a, axis=0), inf) - assert_equal(np.ma.median(a, axis=1), inf) - - a = np.array([[inf, 7, -inf, -9], - [-10, np.nan, np.nan, 5], - [4, np.nan, np.nan, inf]], - dtype=np.float32) - a = np.ma.masked_array(a, mask=np.isnan(a)) - if inf > 0: - assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) - assert_equal(np.ma.median(a), 4.5) - else: - assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) - assert_equal(np.ma.median(a), -2.5) - assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) - - for i in range(0, 10): - for j in range(1, 10): - a = np.array([([np.nan] * i) + ([inf] * j)] * 2) - a = np.ma.masked_array(a, mask=np.isnan(a)) - assert_equal(np.ma.median(a), inf) - assert_equal(np.ma.median(a, axis=1), inf) - assert_equal(np.ma.median(a, axis=0), - ([np.nan] * i) + [inf] * j) - - def test_empty(self): - # empty arrays - a = np.ma.masked_array(np.array([], dtype=float)) - with suppress_warnings() as w: - w.record(RuntimeWarning) - assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) - - # multiple dimensions - a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) - # no axis - with suppress_warnings() as w: - w.record(RuntimeWarning) - warnings.filterwarnings('always', '', RuntimeWarning) - assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) - - # axis 0 and 1 - b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) - assert_equal(np.ma.median(a, axis=0), b) - assert_equal(np.ma.median(a, axis=1), b) - - # axis 2 - b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.ma.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) - - def test_object(self): - o = np.ma.masked_array(np.arange(7.)) - assert_(type(np.ma.median(o.astype(object))), float) - o[2] = np.nan - assert_(type(np.ma.median(o.astype(object))), float) - - -class TestCov(object): - - def setup(self): - self.data = array(np.random.rand(12)) - - def test_1d_without_missing(self): - # Test cov on 1D variable w/o missing values - x = self.data - assert_almost_equal(np.cov(x), cov(x)) - assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(x, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - - def test_2d_without_missing(self): - # Test cov on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) - assert_almost_equal(np.cov(x), cov(x)) - assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(x, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - - def test_1d_with_missing(self): - # Test cov 1 1D variable w/missing values - x = self.data - x[-1] = masked - x -= x.mean() - nx = x.compressed() - assert_almost_equal(np.cov(nx), cov(x)) - assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(nx, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - # - try: - cov(x, allow_masked=False) - except ValueError: - pass - # - # 2 1D variables w/ missing values - nx = x[1:-1] - assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) - assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), - cov(x, x[::-1], rowvar=False)) - assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), - cov(x, x[::-1], rowvar=False, bias=True)) - - def test_2d_with_missing(self): - # Test cov on 2D variable w/ missing value - x = self.data - x[-1] = masked - x = x.reshape(3, 4) - valid = np.logical_not(getmaskarray(x)).astype(int) - frac = np.dot(valid, valid.T) - xf = (x - x.mean(1)[:, None]).filled(0) - assert_almost_equal(cov(x), - np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) - assert_almost_equal(cov(x, bias=True), - np.cov(xf, bias=True) * x.shape[1] / frac) - frac = np.dot(valid.T, valid) - xf = (x - x.mean(0)).filled(0) - assert_almost_equal(cov(x, rowvar=False), - (np.cov(xf, rowvar=False) * - (x.shape[0] - 1) / (frac - 1.))) - assert_almost_equal(cov(x, rowvar=False, bias=True), - (np.cov(xf, rowvar=False, bias=True) * - x.shape[0] / frac)) - - -class TestCorrcoef(object): - - def setup(self): - self.data = array(np.random.rand(12)) - self.data2 = array(np.random.rand(12)) - - def test_ddof(self): - # ddof raises DeprecationWarning - x, y = self.data, self.data2 - expected = np.corrcoef(x) - expected2 = np.corrcoef(x, y) - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof has no or negligible effect on the function - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - assert_almost_equal(corrcoef(x, ddof=-1), expected) - assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) - assert_almost_equal(corrcoef(x, ddof=3), expected) - assert_almost_equal(corrcoef(x, y, ddof=3), expected2) - - def test_bias(self): - x, y = self.data, self.data2 - expected = np.corrcoef(x) - # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, y, True, False) - assert_warns(DeprecationWarning, corrcoef, x, y, True, True) - assert_warns(DeprecationWarning, corrcoef, x, bias=False) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(x, bias=1), expected) - - def test_1d_without_missing(self): - # Test cov on 1D variable w/o missing values - x = self.data - assert_almost_equal(np.corrcoef(x), corrcoef(x)) - assert_almost_equal(np.corrcoef(x, rowvar=False), - corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - - def test_2d_without_missing(self): - # Test corrcoef on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) - assert_almost_equal(np.corrcoef(x), corrcoef(x)) - assert_almost_equal(np.corrcoef(x, rowvar=False), - corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - - def test_1d_with_missing(self): - # Test corrcoef 1 1D variable w/missing values - x = self.data - x[-1] = masked - x -= x.mean() - nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) - assert_almost_equal(np.corrcoef(nx, rowvar=False), - corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - try: - corrcoef(x, allow_masked=False) - except ValueError: - pass - # 2 1D variables w/ missing values - nx = x[1:-1] - assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) - assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), - corrcoef(x, x[::-1], rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], bias=1)) - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], ddof=2)) - - def test_2d_with_missing(self): - # Test corrcoef on 2D variable w/ missing value - x = self.data - x[-1] = masked - x = x.reshape(3, 4) - - test = corrcoef(x) - control = np.corrcoef(x) - assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], - control[:-1, :-1]) - - -class TestPolynomial(object): - # - def test_polyfit(self): - # Tests polyfit - # On ndarrays - x = np.random.rand(10) - y = np.random.rand(20).reshape(-1, 2) - assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) - # ON 1D maskedarrays - x = x.view(MaskedArray) - x[0] = masked - y = y.view(MaskedArray) - y[0, 0] = y[-1, -1] = masked - # - (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, - full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - w = np.random.rand(10) + 1 - wo = w.copy() - xs = x[1:-1] - ys = y[1:-1] - ws = w[1:-1] - (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) - (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) - assert_equal(w, wo) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - - def test_polyfit_with_masked_NaNs(self): - x = np.random.rand(10) - y = np.random.rand(20).reshape(-1, 2) - - x[0] = np.nan - y[-1,-1] = np.nan - x = x.view(MaskedArray) - y = y.view(MaskedArray) - x[0] = masked - y[-1,-1] = masked - - (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - - -class TestArraySetOps(object): - - def test_unique_onlist(self): - # Test unique on list - data = [1, 1, 1, 2, 2, 3] - test = unique(data, return_index=True, return_inverse=True) - assert_(isinstance(test[0], MaskedArray)) - assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) - assert_equal(test[1], [0, 3, 5]) - assert_equal(test[2], [0, 0, 0, 1, 1, 2]) - - def test_unique_onmaskedarray(self): - # Test unique on masked data w/use_mask=True - data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) - assert_equal(test[1], [0, 3, 5, 2]) - assert_equal(test[2], [0, 0, 3, 1, 3, 2]) - # - data.fill_value = 3 - data = masked_array(data=[1, 1, 1, 2, 2, 3], - mask=[0, 0, 1, 0, 1, 0], fill_value=3) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) - assert_equal(test[1], [0, 3, 5, 2]) - assert_equal(test[2], [0, 0, 3, 1, 3, 2]) - - def test_unique_allmasked(self): - # Test all masked - data = masked_array([1, 1, 1], mask=True) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, ], mask=[True])) - assert_equal(test[1], [0]) - assert_equal(test[2], [0, 0, 0]) - # - # Test masked - data = masked - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array(masked)) - assert_equal(test[1], [0]) - assert_equal(test[2], [0]) - - def test_ediff1d(self): - # Tests mediff1d - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) - test = ediff1d(x) - assert_equal(test, control) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - - def test_ediff1d_tobegin(self): - # Test ediff1d w/ to_begin - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_begin=masked) - control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_begin=[1, 2, 3]) - control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - - def test_ediff1d_toend(self): - # Test ediff1d w/ to_end - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_end=masked) - control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) - assert_equal(test, control) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=[1, 2, 3]) - control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) - assert_equal(test, control) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - - def test_ediff1d_tobegin_toend(self): - # Test ediff1d w/ to_begin and to_end - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_end=masked, to_begin=masked) - control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) - assert_equal(test, control) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) - control = array([0, 1, 1, 1, 4, 1, 2, 3], - mask=[1, 1, 0, 0, 1, 0, 0, 0]) - assert_equal(test, control) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - - def test_ediff1d_ndarray(self): - # Test ediff1d w/ a ndarray - x = np.arange(5) - test = ediff1d(x) - control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) - assert_equal(test, control) - assert_(isinstance(test, MaskedArray)) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=masked, to_begin=masked) - control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) - assert_(isinstance(test, MaskedArray)) - assert_equal(test.filled(0), control.filled(0)) - assert_equal(test.mask, control.mask) - - def test_intersect1d(self): - # Test intersect1d - x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - test = intersect1d(x, y) - control = array([1, 3, -1], mask=[0, 0, 1]) - assert_equal(test, control) - - def test_setxor1d(self): - # Test setxor1d - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = setxor1d(a, b) - assert_equal(test, array([3, 4, 7])) - # - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = [1, 2, 3, 4, 5] - test = setxor1d(a, b) - assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) - # - a = array([1, 2, 3]) - b = array([6, 5, 4]) - test = setxor1d(a, b) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, [1, 2, 3, 4, 5, 6]) - # - a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) - b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) - test = setxor1d(a, b) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, [1, 2, 3, 4, 5, 6]) - # - assert_array_equal([], setxor1d([], [])) - - def test_isin(self): - # the tests for in1d cover most of isin's behavior - # if in1d is removed, would need to change those tests to test - # isin instead. - a = np.arange(24).reshape([2, 3, 4]) - mask = np.zeros([2, 3, 4]) - mask[1, 2, 0] = 1 - a = array(a, mask=mask) - b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], - mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) - ec = zeros((2, 3, 4), dtype=bool) - ec[0, 0, 0] = True - ec[0, 0, 1] = True - ec[0, 2, 3] = True - c = isin(a, b) - assert_(isinstance(c, MaskedArray)) - assert_array_equal(c, ec) - #compare results of np.isin to ma.isin - d = np.isin(a, b[~b.mask]) & ~a.mask - assert_array_equal(c, d) - - def test_in1d(self): - # Test in1d - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = in1d(a, b) - assert_equal(test, [True, True, True, False, True]) - # - a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 5, -1], mask=[0, 0, 1]) - test = in1d(a, b) - assert_equal(test, [True, True, False, True, True]) - # - assert_array_equal([], in1d([], [])) - - def test_in1d_invert(self): - # Test in1d's invert parameter - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) - - a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 5, -1], mask=[0, 0, 1]) - assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) - - assert_array_equal([], in1d([], [], invert=True)) - - def test_union1d(self): - # Test union1d - a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = union1d(a, b) - control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) - assert_equal(test, control) - - # Tests gh-10340, arguments to union1d should be - # flattened if they are not already 1D - x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) - y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) - ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) - z = union1d(x, y) - assert_equal(z, ez) - # - assert_array_equal([], union1d([], [])) - - def test_setdiff1d(self): - # Test setdiff1d - a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) - b = array([2, 4, 3, 3, 2, 1, 5]) - test = setdiff1d(a, b) - assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) - # - a = arange(10) - b = arange(8) - assert_equal(setdiff1d(a, b), array([8, 9])) - a = array([], np.uint32, mask=[]) - assert_equal(setdiff1d(a, []).dtype, np.uint32) - - def test_setdiff1d_char_array(self): - # Test setdiff1d_charray - a = np.array(['a', 'b', 'c']) - b = np.array(['a', 'b', 's']) - assert_array_equal(setdiff1d(a, b), np.array(['c'])) - - -class TestShapeBase(object): - - def test_atleast_2d(self): - # Test atleast_2d - a = masked_array([0, 1, 2], mask=[0, 1, 0]) - b = atleast_2d(a) - assert_equal(b.shape, (1, 3)) - assert_equal(b.mask.shape, b.data.shape) - assert_equal(a.shape, (3,)) - assert_equal(a.mask.shape, a.data.shape) - assert_equal(b.mask.shape, b.data.shape) - - def test_shape_scalar(self): - # the atleast and diagflat function should work with scalars - # GitHub issue #3367 - # Additionally, the atleast functions should accept multiple scalars - # correctly - b = atleast_1d(1.0) - assert_equal(b.shape, (1,)) - assert_equal(b.mask.shape, b.shape) - assert_equal(b.data.shape, b.shape) - - b = atleast_1d(1.0, 2.0) - for a in b: - assert_equal(a.shape, (1,)) - assert_equal(a.mask.shape, a.shape) - assert_equal(a.data.shape, a.shape) - - b = atleast_2d(1.0) - assert_equal(b.shape, (1, 1)) - assert_equal(b.mask.shape, b.shape) - assert_equal(b.data.shape, b.shape) - - b = atleast_2d(1.0, 2.0) - for a in b: - assert_equal(a.shape, (1, 1)) - assert_equal(a.mask.shape, a.shape) - assert_equal(a.data.shape, a.shape) - - b = atleast_3d(1.0) - assert_equal(b.shape, (1, 1, 1)) - assert_equal(b.mask.shape, b.shape) - assert_equal(b.data.shape, b.shape) - - b = atleast_3d(1.0, 2.0) - for a in b: - assert_equal(a.shape, (1, 1, 1)) - assert_equal(a.mask.shape, a.shape) - assert_equal(a.data.shape, a.shape) - - - b = diagflat(1.0) - assert_equal(b.shape, (1, 1)) - assert_equal(b.mask.shape, b.data.shape) - - -class TestStack(object): - - def test_stack_1d(self): - a = masked_array([0, 1, 2], mask=[0, 1, 0]) - b = masked_array([9, 8, 7], mask=[1, 0, 0]) - - c = stack([a, b], axis=0) - assert_equal(c.shape, (2, 3)) - assert_array_equal(a.mask, c[0].mask) - assert_array_equal(b.mask, c[1].mask) - - d = vstack([a, b]) - assert_array_equal(c.data, d.data) - assert_array_equal(c.mask, d.mask) - - c = stack([a, b], axis=1) - assert_equal(c.shape, (3, 2)) - assert_array_equal(a.mask, c[:, 0].mask) - assert_array_equal(b.mask, c[:, 1].mask) - - def test_stack_masks(self): - a = masked_array([0, 1, 2], mask=True) - b = masked_array([9, 8, 7], mask=False) - - c = stack([a, b], axis=0) - assert_equal(c.shape, (2, 3)) - assert_array_equal(a.mask, c[0].mask) - assert_array_equal(b.mask, c[1].mask) - - d = vstack([a, b]) - assert_array_equal(c.data, d.data) - assert_array_equal(c.mask, d.mask) - - c = stack([a, b], axis=1) - assert_equal(c.shape, (3, 2)) - assert_array_equal(a.mask, c[:, 0].mask) - assert_array_equal(b.mask, c[:, 1].mask) - - def test_stack_nd(self): - # 2D - shp = (3, 2) - d1 = np.random.randint(0, 10, shp) - d2 = np.random.randint(0, 10, shp) - m1 = np.random.randint(0, 2, shp).astype(bool) - m2 = np.random.randint(0, 2, shp).astype(bool) - a1 = masked_array(d1, mask=m1) - a2 = masked_array(d2, mask=m2) - - c = stack([a1, a2], axis=0) - c_shp = (2,) + shp - assert_equal(c.shape, c_shp) - assert_array_equal(a1.mask, c[0].mask) - assert_array_equal(a2.mask, c[1].mask) - - c = stack([a1, a2], axis=-1) - c_shp = shp + (2,) - assert_equal(c.shape, c_shp) - assert_array_equal(a1.mask, c[..., 0].mask) - assert_array_equal(a2.mask, c[..., 1].mask) - - # 4D - shp = (3, 2, 4, 5,) - d1 = np.random.randint(0, 10, shp) - d2 = np.random.randint(0, 10, shp) - m1 = np.random.randint(0, 2, shp).astype(bool) - m2 = np.random.randint(0, 2, shp).astype(bool) - a1 = masked_array(d1, mask=m1) - a2 = masked_array(d2, mask=m2) - - c = stack([a1, a2], axis=0) - c_shp = (2,) + shp - assert_equal(c.shape, c_shp) - assert_array_equal(a1.mask, c[0].mask) - assert_array_equal(a2.mask, c[1].mask) - - c = stack([a1, a2], axis=-1) - c_shp = shp + (2,) - assert_equal(c.shape, c_shp) - assert_array_equal(a1.mask, c[..., 0].mask) - assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_mrecords.py b/venv/lib/python3.7/site-packages/numpy/ma/tests/test_mrecords.py deleted file mode 100644 index 94e772d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_mrecords.py +++ /dev/null @@ -1,495 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for mrecords. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.ma as ma -from numpy import recarray -from numpy.ma import masked, nomask -from numpy.testing import temppath -from numpy.core.records import ( - fromrecords as recfromrecords, fromarrays as recfromarrays - ) -from numpy.ma.mrecords import ( - MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, - addfield - ) -from numpy.ma.testutils import ( - assert_, assert_equal, - assert_equal_records, - ) -from numpy.compat import pickle - - -class TestMRecords(object): - - ilist = [1, 2, 3, 4, 5] - flist = [1.1, 2.2, 3.3, 4.4, 5.5] - slist = [b'one', b'two', b'three', b'four', b'five'] - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mask = [0, 1, 0, 0, 1] - base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - - def test_byview(self): - # Test creation by view - base = self.base - mbase = base.view(mrecarray) - assert_equal(mbase.recordmask, base.recordmask) - assert_equal_records(mbase._mask, base._mask) - assert_(isinstance(mbase._data, recarray)) - assert_equal_records(mbase._data, base._data.view(recarray)) - for field in ('a', 'b', 'c'): - assert_equal(base[field], mbase[field]) - assert_equal_records(mbase.view(mrecarray), mbase) - - def test_get(self): - # Tests fields retrieval - base = self.base.copy() - mbase = base.view(mrecarray) - # As fields.......... - for field in ('a', 'b', 'c'): - assert_equal(getattr(mbase, field), mbase[field]) - assert_equal(base[field], mbase[field]) - # as elements ....... - mbase_first = mbase[0] - assert_(isinstance(mbase_first, mrecarray)) - assert_equal(mbase_first.dtype, mbase.dtype) - assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) - # Used to be mask, now it's recordmask - assert_equal(mbase_first.recordmask, nomask) - assert_equal(mbase_first._mask.item(), (False, False, False)) - assert_equal(mbase_first['a'], mbase['a'][0]) - mbase_last = mbase[-1] - assert_(isinstance(mbase_last, mrecarray)) - assert_equal(mbase_last.dtype, mbase.dtype) - assert_equal(mbase_last.tolist(), (None, None, None)) - # Used to be mask, now it's recordmask - assert_equal(mbase_last.recordmask, True) - assert_equal(mbase_last._mask.item(), (True, True, True)) - assert_equal(mbase_last['a'], mbase['a'][-1]) - assert_((mbase_last['a'] is masked)) - # as slice .......... - mbase_sl = mbase[:2] - assert_(isinstance(mbase_sl, mrecarray)) - assert_equal(mbase_sl.dtype, mbase.dtype) - # Used to be mask, now it's recordmask - assert_equal(mbase_sl.recordmask, [0, 1]) - assert_equal_records(mbase_sl.mask, - np.array([(False, False, False), - (True, True, True)], - dtype=mbase._mask.dtype)) - assert_equal_records(mbase_sl, base[:2].view(mrecarray)) - for field in ('a', 'b', 'c'): - assert_equal(getattr(mbase_sl, field), base[:2][field]) - - def test_set_fields(self): - # Tests setting fields. - base = self.base.copy() - mbase = base.view(mrecarray) - mbase = mbase.copy() - mbase.fill_value = (999999, 1e20, 'N/A') - # Change the data, the mask should be conserved - mbase.a._data[:] = 5 - assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) - assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) - # Change the elements, and the mask will follow - mbase.a = 1 - assert_equal(mbase['a']._data, [1]*5) - assert_equal(ma.getmaskarray(mbase['a']), [0]*5) - # Use to be _mask, now it's recordmask - assert_equal(mbase.recordmask, [False]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0), - (0, 1, 1), - (0, 0, 0), - (0, 0, 0), - (0, 1, 1)], - dtype=bool)) - # Set a field to mask ........................ - mbase.c = masked - # Use to be mask, and now it's still mask ! - assert_equal(mbase.c.mask, [1]*5) - assert_equal(mbase.c.recordmask, [1]*5) - assert_equal(ma.getmaskarray(mbase['c']), [1]*5) - assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 1), - (0, 1, 1), - (0, 0, 1), - (0, 0, 1), - (0, 1, 1)], - dtype=bool)) - # Set fields by slices ....................... - mbase = base.view(mrecarray).copy() - mbase.a[3:] = 5 - assert_equal(mbase.a, [1, 2, 3, 5, 5]) - assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) - mbase.b[3:] = masked - assert_equal(mbase.b, base['b']) - assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) - # Set fields globally.......................... - ndtype = [('alpha', '|S1'), ('num', int)] - data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) - rdata = data.view(MaskedRecords) - val = ma.array([10, 20, 30], mask=[1, 0, 0]) - - rdata['num'] = val - assert_equal(rdata.num, val) - assert_equal(rdata.num.mask, [1, 0, 0]) - - def test_set_fields_mask(self): - # Tests setting the mask of a field. - base = self.base.copy() - # This one has already a mask.... - mbase = base.view(mrecarray) - mbase['a'][-2] = masked - assert_equal(mbase.a, [1, 2, 3, 4, 5]) - assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) - # This one has not yet - mbase = fromarrays([np.arange(5), np.random.rand(5)], - dtype=[('a', int), ('b', float)]) - mbase['a'][-2] = masked - assert_equal(mbase.a, [0, 1, 2, 3, 4]) - assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) - - def test_set_mask(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Set the mask to True ....................... - mbase.mask = masked - assert_equal(ma.getmaskarray(mbase['b']), [1]*5) - assert_equal(mbase['a']._mask, mbase['b']._mask) - assert_equal(mbase['a']._mask, mbase['c']._mask) - assert_equal(mbase._mask.tolist(), - np.array([(1, 1, 1)]*5, dtype=bool)) - # Delete the mask ............................ - mbase.mask = nomask - assert_equal(ma.getmaskarray(mbase['c']), [0]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0)]*5, dtype=bool)) - - def test_set_mask_fromarray(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Sets the mask w/ an array - mbase.mask = [1, 0, 0, 0, 1] - assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) - assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) - assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) - # Yay, once more ! - mbase.mask = [0, 0, 0, 0, 1] - assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) - - def test_set_mask_fromfields(self): - mbase = self.base.copy().view(mrecarray) - - nmask = np.array( - [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], - dtype=[('a', bool), ('b', bool), ('c', bool)]) - mbase.mask = nmask - assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) - assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) - assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) - # Reinitialize and redo - mbase.mask = False - mbase.fieldmask = nmask - assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) - assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) - assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) - - def test_set_elements(self): - base = self.base.copy() - # Set an element to mask ..................... - mbase = base.view(mrecarray).copy() - mbase[-2] = masked - assert_equal( - mbase._mask.tolist(), - np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], - dtype=bool)) - # Used to be mask, now it's recordmask! - assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) - # Set slices ................................. - mbase = base.view(mrecarray).copy() - mbase[:2] = (5, 5, 5) - assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) - assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) - assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.c._data, - [b'5', b'5', b'three', b'four', b'five']) - assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) - - mbase = base.view(mrecarray).copy() - mbase[:2] = masked - assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) - assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) - assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) - assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) - assert_equal(mbase.c._data, - [b'one', b'two', b'three', b'four', b'five']) - assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) - - def test_setslices_hardmask(self): - # Tests setting slices w/ hardmask. - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - try: - mbase[-2:] = (5, 5, 5) - assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) - assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) - assert_equal(mbase.c._data, - [b'one', b'two', b'three', b'5', b'five']) - assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) - assert_equal(mbase.b._mask, mbase.a._mask) - assert_equal(mbase.b._mask, mbase.c._mask) - except NotImplementedError: - # OK, not implemented yet... - pass - except AssertionError: - raise - else: - raise Exception("Flexible hard masks should be supported !") - # Not using a tuple should crash - try: - mbase[-2:] = 3 - except (NotImplementedError, TypeError): - pass - else: - raise TypeError("Should have expected a readable buffer object!") - - def test_hardmask(self): - # Test hardmask - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - assert_(mbase._hardmask) - mbase.mask = nomask - assert_equal_records(mbase._mask, base._mask) - mbase.soften_mask() - assert_(not mbase._hardmask) - mbase.mask = nomask - # So, the mask of a field is no longer set to nomask... - assert_equal_records(mbase._mask, - ma.make_mask_none(base.shape, base.dtype)) - assert_(ma.make_mask(mbase['b']._mask) is nomask) - assert_equal(mbase['a']._mask, mbase['b']._mask) - - def test_pickling(self): - # Test pickling - base = self.base.copy() - mrec = base.view(mrecarray) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - _ = pickle.dumps(mrec, protocol=proto) - mrec_ = pickle.loads(_) - assert_equal(mrec_.dtype, mrec.dtype) - assert_equal_records(mrec_._data, mrec._data) - assert_equal(mrec_._mask, mrec._mask) - assert_equal_records(mrec_._mask, mrec._mask) - - def test_filled(self): - # Test filling the array - _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) - _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) - _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mrec = fromarrays([_a, _b, _c], dtype=ddtype, - fill_value=(99999, 99999., 'N/A')) - mrecfilled = mrec.filled() - assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) - assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), - dtype=float)) - assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), - dtype='|S8')) - - def test_tolist(self): - # Test tolist. - _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) - _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) - _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mrec = fromarrays([_a, _b, _c], dtype=ddtype, - fill_value=(99999, 99999., 'N/A')) - - assert_equal(mrec.tolist(), - [(1, 1.1, None), (2, 2.2, b'two'), - (None, None, b'three')]) - - def test_withnames(self): - # Test the creation w/ format and names - x = mrecarray(1, formats=float, names='base') - x[0]['base'] = 10 - assert_equal(x['base'][0], 10) - - def test_exotic_formats(self): - # Test that 'exotic' formats are processed properly - easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) - easy[0] = masked - assert_equal(easy.filled(1).item(), (1, b'1', 1.)) - - solo = mrecarray(1, dtype=[('f0', ' 1: - assert_(eq(np.concatenate((x, y), 1), - concatenate((xm, ym), 1))) - assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) - assert_(eq(np.sum(x, 1), sum(x, 1))) - assert_(eq(np.product(x, 1), product(x, 1))) - - def test_testCI(self): - # Test of conversions and indexing - x1 = np.array([1, 2, 4, 3]) - x2 = array(x1, mask=[1, 0, 0, 0]) - x3 = array(x1, mask=[0, 1, 0, 1]) - x4 = array(x1) - # test conversion to strings - str(x2) # raises? - repr(x2) # raises? - assert_(eq(np.sort(x1), sort(x2, fill_value=0))) - # tests of indexing - assert_(type(x2[1]) is type(x1[1])) - assert_(x1[1] == x2[1]) - assert_(x2[0] is masked) - assert_(eq(x1[2], x2[2])) - assert_(eq(x1[2:5], x2[2:5])) - assert_(eq(x1[:], x2[:])) - assert_(eq(x1[1:], x3[1:])) - x1[2] = 9 - x2[2] = 9 - assert_(eq(x1, x2)) - x1[1:3] = 99 - x2[1:3] = 99 - assert_(eq(x1, x2)) - x2[1] = masked - assert_(eq(x1, x2)) - x2[1:3] = masked - assert_(eq(x1, x2)) - x2[:] = x1 - x2[1] = masked - assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) - x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) - x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) - assert_(allequal(x4, array([1, 2, 3, 4]))) - x1 = np.arange(5) * 1.0 - x2 = masked_values(x1, 3.0) - assert_(eq(x1, x2)) - assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) - assert_(eq(3.0, x2.fill_value)) - x1 = array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - s1 = x1[1] - s2 = x2[1] - assert_equal(type(s2), str) - assert_equal(type(s1), str) - assert_equal(s1, s2) - assert_(x1[1:1].shape == (0,)) - - def test_testCopySize(self): - # Tests of some subtle points of copying and sizing. - n = [0, 0, 1, 0, 0] - m = make_mask(n) - m2 = make_mask(m) - assert_(m is m2) - m3 = make_mask(m, copy=True) - assert_(m is not m3) - - x1 = np.arange(5) - y1 = array(x1, mask=m) - assert_(y1._data is not x1) - assert_(allequal(x1, y1._data)) - assert_(y1._mask is m) - - y1a = array(y1, copy=0) - # For copy=False, one might expect that the array would just - # passed on, i.e., that it would be "is" instead of "==". - # See gh-4043 for discussion. - assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) - - y2 = array(x1, mask=m3, copy=0) - assert_(y2._mask is m3) - assert_(y2[2] is masked) - y2[2] = 9 - assert_(y2[2] is not masked) - assert_(y2._mask is m3) - assert_(allequal(y2.mask, 0)) - - y2a = array(x1, mask=m, copy=1) - assert_(y2a._mask is not m) - assert_(y2a[2] is masked) - y2a[2] = 9 - assert_(y2a[2] is not masked) - assert_(y2a._mask is not m) - assert_(allequal(y2a.mask, 0)) - - y3 = array(x1 * 1.0, mask=m) - assert_(filled(y3).dtype is (x1 * 1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - assert_(eq(concatenate([x4, x4]), y4)) - assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) - y5 = repeat(x4, (2, 2, 2, 2), axis=0) - assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) - y6 = repeat(x4, 2, axis=0) - assert_(eq(y5, y6)) - - def test_testPut(self): - # Test of put - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - m2 = m.copy() - x = array(d, mask=m) - assert_(x[3] is masked) - assert_(x[4] is masked) - x[[1, 4]] = [10, 40] - assert_(x._mask is m) - assert_(x[3] is masked) - assert_(x[4] is not masked) - assert_(eq(x, [0, 10, 2, -1, 40])) - - x = array(d, mask=m2, copy=True) - x.put([0, 1, 2], [-1, 100, 200]) - assert_(x._mask is not m2) - assert_(x[3] is masked) - assert_(x[4] is masked) - assert_(eq(x, [-1, 100, 200, 0, 0])) - - def test_testPut2(self): - # Test of put - d = arange(5) - x = array(d, mask=[0, 0, 0, 0, 0]) - z = array([10, 40], mask=[1, 0]) - assert_(x[2] is not masked) - assert_(x[3] is not masked) - x[2:4] = z - assert_(x[2] is masked) - assert_(x[3] is not masked) - assert_(eq(x, [0, 1, 10, 40, 4])) - - d = arange(5) - x = array(d, mask=[0, 0, 0, 0, 0]) - y = x[2:4] - z = array([10, 40], mask=[1, 0]) - assert_(x[2] is not masked) - assert_(x[3] is not masked) - y[:] = z - assert_(y[0] is masked) - assert_(y[1] is not masked) - assert_(eq(y, [10, 40])) - assert_(x[2] is masked) - assert_(x[3] is not masked) - assert_(eq(x, [0, 1, 10, 40, 4])) - - def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] - i = np.nonzero(m)[0] - put(ym, i, zm) - assert_(all(take(ym, i, axis=0) == zm)) - - def test_testOddFeatures(self): - # Test of other odd features - x = arange(20) - x = x.reshape(4, 5) - x.flat[5] = 12 - assert_(x[1, 0] == 12) - z = x + 10j * x - assert_(eq(z.real, x)) - assert_(eq(z.imag, 10 * x)) - assert_(eq((z * conjugate(z)).real, 101 * x * x)) - z.imag[...] = 0.0 - - x = arange(10) - x[3] = masked - assert_(str(x[3]) == str(masked)) - c = x >= 8 - assert_(count(where(c, masked, masked)) == 0) - assert_(shape(where(c, masked, masked)) == c.shape) - z = where(c, x, masked) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is masked) - assert_(z[7] is masked) - assert_(z[8] is not masked) - assert_(z[9] is not masked) - assert_(eq(x, z)) - z = where(c, masked, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - z = masked_where(c, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - assert_(eq(x, z)) - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_(eq(z, [1., 2., 0., -4., -5])) - c[0] = masked - z = where(c, x, -x) - assert_(eq(z, [1., 2., 0., -4., -5])) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) - assert_(eq(masked_where(greater_equal(x, 2), x), - masked_greater_equal(x, 2))) - assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) - assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) - assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) - assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) - assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) - assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) - assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) - assert_(eq(masked_inside(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 1, 3).mask, - [1, 1, 1, 1, 0])) - assert_(eq(masked_outside(array(list(range(5)), - mask=[0, 1, 0, 0, 0]), 1, 3).mask, - [1, 1, 0, 0, 1])) - assert_(eq(masked_equal(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 0])) - assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 1])) - assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), - [99, 99, 3, 4, 5])) - atest = ones((10, 10, 10), dtype=np.float32) - btest = zeros(atest.shape, MaskType) - ctest = masked_where(btest, atest) - assert_(eq(atest, ctest)) - z = choose(c, (-x, x)) - assert_(eq(z, [1., 2., 0., -4., -5])) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - x = arange(6) - x[5] = masked - y = arange(6) * 10 - y[2] = masked - c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) - cm = c.filled(1) - z = where(c, x, y) - zm = where(cm, x, y) - assert_(eq(z, zm)) - assert_(getmask(zm) is nomask) - assert_(eq(zm, [0, 1, 2, 30, 40, 50])) - z = where(c, masked, 1) - assert_(eq(z, [99, 99, 99, 1, 1, 1])) - z = where(c, 1, masked) - assert_(eq(z, [99, 1, 1, 99, 99, 99])) - - def test_testMinMax2(self): - # Test of minimum, maximum. - assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) - assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) - x = arange(5) - y = arange(5) - 2 - x[3] = masked - y[0] = masked - assert_(eq(minimum(x, y), where(less(x, y), x, y))) - assert_(eq(maximum(x, y), where(greater(x, y), x, y))) - assert_(minimum.reduce(x) == 0) - assert_(maximum.reduce(x) == 4) - - def test_testTakeTransposeInnerOuter(self): - # Test of take, transpose, inner, outer products - x = arange(24) - y = np.arange(24) - x[5:6] = masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) - assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) - assert_(eq(np.inner(filled(x, 0), filled(y, 0)), - inner(x, y))) - assert_(eq(np.outer(filled(x, 0), filled(y, 0)), - outer(x, y))) - y = array(['abc', 1, 'def', 2, 3], object) - y[2] = masked - t = take(y, [0, 3, 4]) - assert_(t[0] == 'abc') - assert_(t[1] == 2) - assert_(t[2] == 3) - - def test_testInplace(self): - # Test of inplace operations and rich comparisons - y = arange(10) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x += 1 - assert_(eq(x, y + 1)) - xm += 1 - assert_(eq(x, y + 1)) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x -= 1 - assert_(eq(x, y - 1)) - xm -= 1 - assert_(eq(xm, y - 1)) - - x = arange(10) * 1.0 - xm = arange(10) * 1.0 - xm[2] = masked - x *= 2.0 - assert_(eq(x, y * 2)) - xm *= 2.0 - assert_(eq(xm, y * 2)) - - x = arange(10) * 2 - xm = arange(10) - xm[2] = masked - x //= 2 - assert_(eq(x, y)) - xm //= 2 - assert_(eq(x, y)) - - x = arange(10) * 1.0 - xm = arange(10) * 1.0 - xm[2] = masked - x /= 2.0 - assert_(eq(x, y / 2.0)) - xm /= arange(10) - assert_(eq(xm, ones((10,)))) - - x = arange(10).astype(np.float32) - xm = arange(10) - xm[2] = masked - x += 1. - assert_(eq(x, y + 1.)) - - def test_testPickle(self): - # Test of pickling - x = arange(12) - x[4:10:2] = masked - x = x.reshape(4, 3) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - s = pickle.dumps(x, protocol=proto) - y = pickle.loads(s) - assert_(eq(x, y)) - - def test_testMasked(self): - # Test of masked element - xx = arange(6) - xx[1] = masked - assert_(str(masked) == '--') - assert_(xx[1] is masked) - assert_equal(filled(xx[1], 0), 0) - - def test_testAverage1(self): - # Test of average. - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - assert_(eq(2.0, average(ott, axis=0))) - assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) - result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) - assert_(eq(2.0, result)) - assert_(wts == 4.0) - ott[:] = masked - assert_(average(ott, axis=0) is masked) - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - ott = ott.reshape(2, 2) - ott[:, 1] = masked - assert_(eq(average(ott, axis=0), [2.0, 0.0])) - assert_(average(ott, axis=1)[0] is masked) - assert_(eq([2., 0.], average(ott, axis=0))) - result, wts = average(ott, axis=0, returned=True) - assert_(eq(wts, [1., 0.])) - - def test_testAverage2(self): - # More tests of average. - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = arange(6) - assert_(allclose(average(x, axis=0), 2.5)) - assert_(allclose(average(x, axis=0, weights=w1), 2.5)) - y = array([arange(6), 2.0 * arange(6)]) - assert_(allclose(average(y, None), - np.add.reduce(np.arange(6)) * 3. / 12.)) - assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) - assert_(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) - assert_(allclose(average(y, None, weights=w2), 20. / 6.)) - assert_(allclose(average(y, axis=0, weights=w2), - [0., 1., 2., 3., 4., 10.])) - assert_(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) - m1 = zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = ones(6) - m5 = [0, 1, 1, 1, 1, 1] - assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) - assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) - assert_(average(masked_array(x, m4), axis=0) is masked) - assert_equal(average(masked_array(x, m5), axis=0), 0.0) - assert_equal(count(average(masked_array(x, m4), axis=0)), 0) - z = masked_array(y, m3) - assert_(allclose(average(z, None), 20. / 6.)) - assert_(allclose(average(z, axis=0), - [0., 1., 99., 99., 4.0, 7.5])) - assert_(allclose(average(z, axis=1), [2.5, 5.0])) - assert_(allclose(average(z, axis=0, weights=w2), - [0., 1., 99., 99., 4.0, 10.0])) - - a = arange(6) - b = arange(6) * 3 - r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) - assert_equal(shape(r1), shape(w1)) - assert_equal(r1.shape, w1.shape) - r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) - assert_equal(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), returned=True) - assert_equal(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) - assert_(shape(w2) == shape(r2)) - a2d = array([[1, 2], [0, 4]], float) - a2dm = masked_array(a2d, [[0, 0], [1, 0]]) - a2da = average(a2d, axis=0) - assert_(eq(a2da, [0.5, 3.0])) - a2dma = average(a2dm, axis=0) - assert_(eq(a2dma, [1.0, 3.0])) - a2dma = average(a2dm, axis=None) - assert_(eq(a2dma, 7. / 3.)) - a2dma = average(a2dm, axis=1) - assert_(eq(a2dma, [1.5, 4.0])) - - def test_testToPython(self): - assert_equal(1, int(array(1))) - assert_equal(1.0, float(array(1))) - assert_equal(1, int(array([[[1]]]))) - assert_equal(1.0, float(array([[1]]))) - assert_raises(TypeError, float, array([1, 1])) - assert_raises(ValueError, bool, array([0, 1])) - assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) - - def test_testScalarArithmetic(self): - xm = array(0, mask=1) - #TODO FIXME: Find out what the following raises a warning in r8247 - with np.errstate(divide='ignore'): - assert_((1 / array(0)).mask) - assert_((1 + xm).mask) - assert_((-xm).mask) - assert_((-xm).mask) - assert_(maximum(xm, xm).mask) - assert_(minimum(xm, xm).mask) - assert_(xm.filled().dtype is xm._data.dtype) - x = array(0, mask=0) - assert_(x.filled() == x._data) - assert_equal(str(xm), str(masked_print_option)) - - def test_testArrayMethods(self): - a = array([1, 3, 2]) - assert_(eq(a.any(), a._data.any())) - assert_(eq(a.all(), a._data.all())) - assert_(eq(a.argmax(), a._data.argmax())) - assert_(eq(a.argmin(), a._data.argmin())) - assert_(eq(a.choose(0, 1, 2, 3, 4), - a._data.choose(0, 1, 2, 3, 4))) - assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) - assert_(eq(a.conj(), a._data.conj())) - assert_(eq(a.conjugate(), a._data.conjugate())) - m = array([[1, 2], [3, 4]]) - assert_(eq(m.diagonal(), m._data.diagonal())) - assert_(eq(a.sum(), a._data.sum())) - assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) - assert_(eq(m.transpose(), m._data.transpose())) - - def test_testArrayAttributes(self): - a = array([1, 3, 2]) - assert_equal(a.ndim, 1) - - def test_testAPI(self): - assert_(not [m for m in dir(np.ndarray) - if m not in dir(MaskedArray) and - not m.startswith('_')]) - - def test_testSingleElementSubscript(self): - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a[0].shape, ()) - assert_equal(b[0].shape, ()) - assert_equal(b[1].shape, ()) - - -class TestUfuncs(object): - def setup(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), - array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - - def test_testUfuncRegression(self): - f_invalid_ignore = [ - 'sqrt', 'arctanh', 'arcsin', 'arccos', - 'arccosh', 'arctanh', 'log', 'log10', 'divide', - 'true_divide', 'floor_divide', 'remainder', 'fmod'] - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', - 'sin', 'cos', 'tan', - 'arcsin', 'arccos', 'arctan', - 'sinh', 'cosh', 'tanh', - 'arcsinh', - 'arccosh', - 'arctanh', - 'absolute', 'fabs', 'negative', - 'floor', 'ceil', - 'logical_not', - 'add', 'subtract', 'multiply', - 'divide', 'true_divide', 'floor_divide', - 'remainder', 'fmod', 'hypot', 'arctan2', - 'equal', 'not_equal', 'less_equal', 'greater_equal', - 'less', 'greater', - 'logical_and', 'logical_or', 'logical_xor']: - try: - uf = getattr(umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(np.ma, f) - args = self.d[:uf.nin] - with np.errstate(): - if f in f_invalid_ignore: - np.seterr(invalid='ignore') - if f in ['arctanh', 'log', 'log10']: - np.seterr(divide='ignore') - ur = uf(*args) - mr = mf(*args) - assert_(eq(ur.filled(0), mr.filled(0), f)) - assert_(eqmask(ur.mask, mr.mask)) - - def test_reduce(self): - a = self.d[0] - assert_(not alltrue(a, axis=0)) - assert_(sometrue(a, axis=0)) - assert_equal(sum(a[:3], axis=0), 0) - assert_equal(product(a, axis=0), 0) - - def test_minmax(self): - a = arange(1, 13).reshape(3, 4) - amask = masked_where(a < 5, a) - assert_equal(amask.max(), a.max()) - assert_equal(amask.min(), 5) - assert_((amask.max(0) == a.max(0)).all()) - assert_((amask.min(0) == [5, 6, 7, 8]).all()) - assert_(amask.max(1)[0].mask) - assert_(amask.min(1)[0].mask) - - def test_nonzero(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) - assert_(eq(nonzero(x), [0])) - - -class TestArrayMethods(object): - - def setup(self): - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - self.d = (x, X, XX, m, mx, mX, mXX) - - def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXdiag = mX.diagonal() - assert_equal(mX.trace(), mX.diagonal().compressed().sum()) - assert_(eq(mX.trace(), - X.trace() - sum(mXdiag.mask * X.diagonal(), - axis=0))) - - def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - clipped = mx.clip(2, 8) - assert_(eq(clipped.mask, mx.mask)) - assert_(eq(clipped._data, x.clip(2, 8))) - assert_(eq(clipped._data, mx._data.clip(2, 8))) - - def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape - assert_equal(mx.ptp(), mx.compressed().ptp()) - rows = np.zeros(n, np.float_) - cols = np.zeros(m, np.float_) - for k in range(m): - cols[k] = mX[:, k].compressed().ptp() - for k in range(n): - rows[k] = mX[k].compressed().ptp() - assert_(eq(mX.ptp(0), cols)) - assert_(eq(mX.ptp(1), rows)) - - def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXswapped = mX.swapaxes(0, 1) - assert_(eq(mXswapped[-1], mX[:, -1])) - mXXswapped = mXX.swapaxes(0, 2) - assert_equal(mXXswapped.shape, (2, 2, 3, 3)) - - def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXcp = mX.cumprod(0) - assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) - mXcp = mX.cumprod(1) - assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) - - def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXcp = mX.cumsum(0) - assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) - mXcp = mX.cumsum(1) - assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) - - def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - assert_(eq(mX.var(axis=None), mX.compressed().var())) - assert_(eq(mX.std(axis=None), mX.compressed().std())) - assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) - assert_(eq(mX.var().shape, X.var().shape)) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - for k in range(6): - assert_(eq(mXvar1[k], mX[k].compressed().var())) - assert_(eq(mXvar0[k], mX[:, k].compressed().var())) - assert_(eq(np.sqrt(mXvar0[k]), - mX[:, k].compressed().std())) - - -def eqmask(m1, m2): - if m1 is nomask: - return m2 is nomask - if m2 is nomask: - return m1 is nomask - return (m1 == m2).all() diff --git a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_regression.py b/venv/lib/python3.7/site-packages/numpy/ma/tests/test_regression.py deleted file mode 100644 index b83873a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_regression.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import ( - assert_, assert_array_equal, assert_allclose, suppress_warnings - ) - - -class TestRegression(object): - def test_masked_array_create(self): - # Ticket #17 - x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], - mask=[0, 0, 0, 1, 1, 1, 0, 0]) - assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) - - def test_masked_array(self): - # Ticket #61 - np.ma.array(1, mask=[1]) - - def test_mem_masked_where(self): - # Ticket #62 - from numpy.ma import masked_where, MaskType - a = np.zeros((1, 1)) - b = np.zeros(a.shape, MaskType) - c = masked_where(b, a) - a-c - - def test_masked_array_multiply(self): - # Ticket #254 - a = np.ma.zeros((4, 1)) - a[2, 0] = np.ma.masked - b = np.zeros((4, 2)) - a*b - b*a - - def test_masked_array_repeat(self): - # Ticket #271 - np.ma.array([1], mask=False).repeat(10) - - def test_masked_array_repr_unicode(self): - # Ticket #1256 - repr(np.ma.array(u"Unicode")) - - def test_atleast_2d(self): - # Ticket #1559 - a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) - b = np.atleast_2d(a) - assert_(a.mask.ndim == 1) - assert_(b.mask.ndim == 2) - - def test_set_fill_value_unicode_py3(self): - # Ticket #2733 - a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) - a.fill_value = 'X' - assert_(a.fill_value == 'X') - - def test_var_sets_maskedarray_scalar(self): - # Issue gh-2757 - a = np.ma.array(np.arange(5), mask=True) - mout = np.ma.array(-1, dtype=float) - a.var(out=mout) - assert_(mout._data == 0) - - def test_ddof_corrcoef(self): - # See gh-3336 - x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) - y = np.array([2, 2.5, 3.1, 3, 5]) - # this test can be removed after deprecation. - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - r0 = np.ma.corrcoef(x, y, ddof=0) - r1 = np.ma.corrcoef(x, y, ddof=1) - # ddof should not have an effect (it gets cancelled out) - assert_allclose(r0.data, r1.data) - - def test_mask_not_backmangled(self): - # See gh-10314. Test case taken from gh-3140. - a = np.ma.MaskedArray([1., 2.], mask=[False, False]) - assert_(a.mask.shape == (2,)) - b = np.tile(a, (2, 1)) - # Check that the above no longer changes a.shape to (1, 2) - assert_(a.mask.shape == (2,)) - assert_(b.shape == (2, 2)) - assert_(b.mask.shape == (2, 2)) - - def test_empty_list_on_structured(self): - # See gh-12464. Indexing with empty list should give empty result. - ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') - assert_array_equal(ma[[]], ma[:0]) - - def test_masked_array_tostring_fortran(self): - ma = np.ma.arange(4).reshape((2,2)) - assert_array_equal(ma.tostring(order='F'), ma.T.tostring()) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_subclassing.py b/venv/lib/python3.7/site-packages/numpy/ma/tests/test_subclassing.py deleted file mode 100644 index 440b367..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/tests/test_subclassing.py +++ /dev/null @@ -1,351 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_, assert_raises -from numpy.ma.testutils import assert_equal -from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, log, add, hypot, - divide, asarray, asanyarray, nomask - ) -# from numpy.ma.core import ( - -def assert_startswith(a, b): - # produces a better error message than assert_(a.startswith(b)) - assert_equal(a[:len(b)], b) - -class SubArray(np.ndarray): - # Defines a generic np.ndarray subclass, that stores some metadata - # in the dictionary `info`. - def __new__(cls,arr,info={}): - x = np.asanyarray(arr).view(cls) - x.info = info.copy() - return x - - def __array_finalize__(self, obj): - if callable(getattr(super(SubArray, self), - '__array_finalize__', None)): - super(SubArray, self).__array_finalize__(obj) - self.info = getattr(obj, 'info', {}).copy() - return - - def __add__(self, other): - result = super(SubArray, self).__add__(other) - result.info['added'] = result.info.get('added', 0) + 1 - return result - - def __iadd__(self, other): - result = super(SubArray, self).__iadd__(other) - result.info['iadded'] = result.info.get('iadded', 0) + 1 - return result - - -subarray = SubArray - - -class SubMaskedArray(MaskedArray): - """Pure subclass of MaskedArray, keeping some info on subclass.""" - def __new__(cls, info=None, **kwargs): - obj = super(SubMaskedArray, cls).__new__(cls, **kwargs) - obj._optinfo['info'] = info - return obj - - -class MSubArray(SubArray, MaskedArray): - - def __new__(cls, data, info={}, mask=nomask): - subarr = SubArray(data, info) - _data = MaskedArray.__new__(cls, data=subarr, mask=mask) - _data.info = subarr.info - return _data - - @property - def _series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - -msubarray = MSubArray - - -# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing -# setting to non-class values (and thus np.ma.core.masked_print_option) -# and overrides __array_wrap__, updating the info dict, to check that this -# doesn't get destroyed by MaskedArray._update_from. But this one also needs -# its own iterator... -class CSAIterator(object): - """ - Flat iterator object that uses its own setter/getter - (works around ndarray.flat not propagating subclass setters/getters - see https://github.com/numpy/numpy/issues/4564) - roughly following MaskedIterator - """ - def __init__(self, a): - self._original = a - self._dataiter = a.view(np.ndarray).flat - - def __iter__(self): - return self - - def __getitem__(self, indx): - out = self._dataiter.__getitem__(indx) - if not isinstance(out, np.ndarray): - out = out.__array__() - out = out.view(type(self._original)) - return out - - def __setitem__(self, index, value): - self._dataiter[index] = self._original._validate_input(value) - - def __next__(self): - return next(self._dataiter).__array__().view(type(self._original)) - - next = __next__ - - -class ComplicatedSubArray(SubArray): - - def __str__(self): - return 'myprefix {0} mypostfix'.format(self.view(SubArray)) - - def __repr__(self): - # Return a repr that does not start with 'name(' - return '<{0} {1}>'.format(self.__class__.__name__, self) - - def _validate_input(self, value): - if not isinstance(value, ComplicatedSubArray): - raise ValueError("Can only set to MySubArray values") - return value - - def __setitem__(self, item, value): - # validation ensures direct assignment with ndarray or - # masked_print_option will fail - super(ComplicatedSubArray, self).__setitem__( - item, self._validate_input(value)) - - def __getitem__(self, item): - # ensure getter returns our own class also for scalars - value = super(ComplicatedSubArray, self).__getitem__(item) - if not isinstance(value, np.ndarray): # scalar - value = value.__array__().view(ComplicatedSubArray) - return value - - @property - def flat(self): - return CSAIterator(self) - - @flat.setter - def flat(self, value): - y = self.ravel() - y[:] = value - - def __array_wrap__(self, obj, context=None): - obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context) - if context is not None and context[0] is np.multiply: - obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 - - return obj - - -class TestSubclassing(object): - # Test suite for masked subclasses of ndarray. - - def setup(self): - x = np.arange(5, dtype='float') - mx = msubarray(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) - - def test_data_subclassing(self): - # Tests whether the subclass is kept. - x = np.arange(5) - m = [0, 0, 1, 0, 0] - xsub = SubArray(x) - xmsub = masked_array(xsub, mask=m) - assert_(isinstance(xmsub, MaskedArray)) - assert_equal(xmsub._data, xsub) - assert_(isinstance(xmsub._data, SubArray)) - - def test_maskedarray_subclassing(self): - # Tests subclassing MaskedArray - (x, mx) = self.data - assert_(isinstance(mx._data, subarray)) - - def test_masked_unary_operations(self): - # Tests masked_unary_operation - (x, mx) = self.data - with np.errstate(divide='ignore'): - assert_(isinstance(log(mx), msubarray)) - assert_equal(log(x), np.log(x)) - - def test_masked_binary_operations(self): - # Tests masked_binary_operation - (x, mx) = self.data - # Result should be a msubarray - assert_(isinstance(add(mx, mx), msubarray)) - assert_(isinstance(add(mx, x), msubarray)) - # Result should work - assert_equal(add(mx, x), mx+x) - assert_(isinstance(add(mx, mx)._data, subarray)) - assert_(isinstance(add.outer(mx, mx), msubarray)) - assert_(isinstance(hypot(mx, mx), msubarray)) - assert_(isinstance(hypot(mx, x), msubarray)) - - def test_masked_binary_operations2(self): - # Tests domained_masked_binary_operation - (x, mx) = self.data - xmx = masked_array(mx.data.__array__(), mask=mx.mask) - assert_(isinstance(divide(mx, mx), msubarray)) - assert_(isinstance(divide(mx, x), msubarray)) - assert_equal(divide(mx, mx), divide(xmx, xmx)) - - def test_attributepropagation(self): - x = array(arange(5), mask=[0]+[1]*4) - my = masked_array(subarray(x)) - ym = msubarray(x) - # - z = (my+1) - assert_(isinstance(z, MaskedArray)) - assert_(not isinstance(z, MSubArray)) - assert_(isinstance(z._data, SubArray)) - assert_equal(z._data.info, {}) - # - z = (ym+1) - assert_(isinstance(z, MaskedArray)) - assert_(isinstance(z, MSubArray)) - assert_(isinstance(z._data, SubArray)) - assert_(z._data.info['added'] > 0) - # Test that inplace methods from data get used (gh-4617) - ym += 1 - assert_(isinstance(ym, MaskedArray)) - assert_(isinstance(ym, MSubArray)) - assert_(isinstance(ym._data, SubArray)) - assert_(ym._data.info['iadded'] > 0) - # - ym._set_mask([1, 0, 0, 0, 1]) - assert_equal(ym._mask, [1, 0, 0, 0, 1]) - ym._series._set_mask([0, 0, 0, 0, 1]) - assert_equal(ym._mask, [0, 0, 0, 0, 1]) - # - xsub = subarray(x, info={'name':'x'}) - mxsub = masked_array(xsub) - assert_(hasattr(mxsub, 'info')) - assert_equal(mxsub.info, xsub.info) - - def test_subclasspreservation(self): - # Checks that masked_array(...,subok=True) preserves the class. - x = np.arange(5) - m = [0, 0, 1, 0, 0] - xinfo = [(i, j) for (i, j) in zip(x, m)] - xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) - # - mxsub = masked_array(xsub, subok=False) - assert_(not isinstance(mxsub, MSubArray)) - assert_(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = asarray(xsub) - assert_(not isinstance(mxsub, MSubArray)) - assert_(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = masked_array(xsub, subok=True) - assert_(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, xsub._mask) - # - mxsub = asanyarray(xsub) - assert_(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, m) - - def test_subclass_items(self): - """test that getter and setter go via baseclass""" - x = np.arange(5) - xcsub = ComplicatedSubArray(x) - mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) - # getter should return a ComplicatedSubArray, even for single item - # first check we wrote ComplicatedSubArray correctly - assert_(isinstance(xcsub[1], ComplicatedSubArray)) - assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) - assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) - - # now that it propagates inside the MaskedArray - assert_(isinstance(mxcsub[1], ComplicatedSubArray)) - assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) - assert_(mxcsub[0] is masked) - assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) - assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) - - # also for flattened version (which goes via MaskedIterator) - assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) - assert_(mxcsub.flat[0] is masked) - assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) - - # setter should only work with ComplicatedSubArray input - # first check we wrote ComplicatedSubArray correctly - assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) - # now that it propagates inside the MaskedArray - assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) - assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) - mxcsub[1] = xcsub[4] - mxcsub[1:4] = xcsub[1:4] - # also for flattened version (which goes via MaskedIterator) - assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) - assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) - mxcsub.flat[1] = xcsub[4] - mxcsub.flat[1:4] = xcsub[1:4] - - def test_subclass_nomask_items(self): - x = np.arange(5) - xcsub = ComplicatedSubArray(x) - mxcsub_nomask = masked_array(xcsub) - - assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) - assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) - - assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) - assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) - - def test_subclass_repr(self): - """test that repr uses the name of the subclass - and 'array' for np.ndarray""" - x = np.arange(5) - mx = masked_array(x, mask=[True, False, True, False, False]) - assert_startswith(repr(mx), 'masked_array') - xsub = SubArray(x) - mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - assert_startswith(repr(mxsub), - 'masked_{0}(data=[--, 1, --, 3, 4]'.format(SubArray.__name__)) - - def test_subclass_str(self): - """test str with subclass that has overridden str, setitem""" - # first without override - x = np.arange(5) - xsub = SubArray(x) - mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - assert_equal(str(mxsub), '[-- 1 -- 3 4]') - - xcsub = ComplicatedSubArray(x) - assert_raises(ValueError, xcsub.__setitem__, 0, - np.ma.core.masked_print_option) - mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) - assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') - - def test_pure_subclass_info_preservation(self): - # Test that ufuncs and methods conserve extra information consistently; - # see gh-7122. - arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) - arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) - diff1 = np.subtract(arr1, arr2) - assert_('info' in diff1._optinfo) - assert_(diff1._optinfo['info'] == 'test') - diff2 = arr1 - arr2 - assert_('info' in diff2._optinfo) - assert_(diff2._optinfo['info'] == 'test') diff --git a/venv/lib/python3.7/site-packages/numpy/ma/testutils.py b/venv/lib/python3.7/site-packages/numpy/ma/testutils.py deleted file mode 100644 index c0deaa9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/testutils.py +++ /dev/null @@ -1,290 +0,0 @@ -"""Miscellaneous functions for testing masked arrays and subclasses - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -import operator - -import numpy as np -from numpy import ndarray, float_ -import numpy.core.umath as umath -import numpy.testing -from numpy.testing import ( - assert_, assert_allclose, assert_array_almost_equal_nulp, - assert_raises, build_err_msg - ) -from .core import mask_or, getmask, masked_array, nomask, masked, filled - -__all__masked = [ - 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', - 'assert_array_approx_equal', 'assert_array_compare', - 'assert_array_equal', 'assert_array_less', 'assert_close', - 'assert_equal', 'assert_equal_records', 'assert_mask_equal', - 'assert_not_equal', 'fail_if_array_equal', - ] - -# Include some normal test functions to avoid breaking other projects who -# have mistakenly included them from this file. SciPy is one. That is -# unfortunate, as some of these functions are not intended to work with -# masked arrays. But there was no way to tell before. -from unittest import TestCase -__some__from_testing = [ - 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', - 'assert_raises' - ] - -__all__ = __all__masked + __some__from_testing - - -def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): - """ - Returns true if all components of a and b are equal to given tolerances. - - If fill_value is True, masked values considered equal. Otherwise, - masked values are considered unequal. The relative error rtol should - be positive and << 1.0 The absolute error atol comes into play for - those elements of b that are very small or zero; it says how small a - must be also. - - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - if d1.dtype.char == "O" or d2.dtype.char == "O": - return np.equal(d1, d2).ravel() - x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) - y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) - d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) - return d.ravel() - - -def almost(a, b, decimal=6, fill_value=True): - """ - Returns True if a and b are equal up to decimal places. - - If fill_value is True, masked values considered equal. Otherwise, - masked values are considered unequal. - - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - if d1.dtype.char == "O" or d2.dtype.char == "O": - return np.equal(d1, d2).ravel() - x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) - y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) - d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) - return d.ravel() - - -def _assert_equal_on_sequences(actual, desired, err_msg=''): - """ - Asserts the equality of two non-array sequences. - - """ - assert_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) - return - - -def assert_equal_records(a, b): - """ - Asserts that two records are equal. - - Pretty crude for now. - - """ - assert_equal(a.dtype, b.dtype) - for f in a.dtype.names: - (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) - if not (af is masked) and not (bf is masked): - assert_equal(operator.getitem(a, f), operator.getitem(b, f)) - return - - -def assert_equal(actual, desired, err_msg=''): - """ - Asserts that two items are equal. - - """ - # Case #1: dictionary ..... - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): - if k not in actual: - raise AssertionError("%s not in %s" % (k, actual)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) - return - # Case #2: lists ..... - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - return _assert_equal_on_sequences(actual, desired, err_msg='') - if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): - msg = build_err_msg([actual, desired], err_msg,) - if not desired == actual: - raise AssertionError(msg) - return - # Case #4. arrays or equivalent - if ((actual is masked) and not (desired is masked)) or \ - ((desired is masked) and not (actual is masked)): - msg = build_err_msg([actual, desired], - err_msg, header='', names=('x', 'y')) - raise ValueError(msg) - actual = np.array(actual, copy=False, subok=True) - desired = np.array(desired, copy=False, subok=True) - (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) - if actual_dtype.char == "S" and desired_dtype.char == "S": - return _assert_equal_on_sequences(actual.tolist(), - desired.tolist(), - err_msg='') - return assert_array_equal(actual, desired, err_msg) - - -def fail_if_equal(actual, desired, err_msg='',): - """ - Raises an assertion error if two items are equal. - - """ - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): - if k not in actual: - raise AssertionError(repr(k)) - fail_if_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) - return - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - fail_if_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - fail_if_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) - return - if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): - return fail_if_array_equal(actual, desired, err_msg) - msg = build_err_msg([actual, desired], err_msg) - if not desired != actual: - raise AssertionError(msg) - - -assert_not_equal = fail_if_equal - - -def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): - """ - Asserts that two items are almost equal. - - The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). - - """ - if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): - return assert_array_almost_equal(actual, desired, decimal=decimal, - err_msg=err_msg, verbose=verbose) - msg = build_err_msg([actual, desired], - err_msg=err_msg, verbose=verbose) - if not round(abs(desired - actual), decimal) == 0: - raise AssertionError(msg) - - -assert_close = assert_almost_equal - - -def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', - fill_value=True): - """ - Asserts that comparison between two masked arrays is satisfied. - - The comparison is elementwise. - - """ - # Allocate a common mask and refill - m = mask_or(getmask(x), getmask(y)) - x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) - y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) - if ((x is masked) and not (y is masked)) or \ - ((y is masked) and not (x is masked)): - msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, - header=header, names=('x', 'y')) - raise ValueError(msg) - # OK, now run the basic tests on filled versions - return np.testing.assert_array_compare(comparison, - x.filled(fill_value), - y.filled(fill_value), - err_msg=err_msg, - verbose=verbose, header=header) - - -def assert_array_equal(x, y, err_msg='', verbose=True): - """ - Checks the elementwise equality of two masked arrays. - - """ - assert_array_compare(operator.__eq__, x, y, - err_msg=err_msg, verbose=verbose, - header='Arrays are not equal') - - -def fail_if_array_equal(x, y, err_msg='', verbose=True): - """ - Raises an assertion error if two masked arrays are not equal elementwise. - - """ - def compare(x, y): - return (not np.alltrue(approx(x, y))) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not equal') - - -def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): - """ - Checks the equality of two masked arrays, up to given number odecimals. - - The equality is checked elementwise. - - """ - def compare(x, y): - "Returns the result of the loose comparison between x and y)." - return approx(x, y, rtol=10. ** -decimal) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not almost equal') - - -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): - """ - Checks the equality of two masked arrays, up to given number odecimals. - - The equality is checked elementwise. - - """ - def compare(x, y): - "Returns the result of the loose comparison between x and y)." - return almost(x, y, decimal) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not almost equal') - - -def assert_array_less(x, y, err_msg='', verbose=True): - """ - Checks that x is smaller than y elementwise. - - """ - assert_array_compare(operator.__lt__, x, y, - err_msg=err_msg, verbose=verbose, - header='Arrays are not less-ordered') - - -def assert_mask_equal(m1, m2, err_msg=''): - """ - Asserts the equality of two masks. - - """ - if m1 is nomask: - assert_(m2 is nomask) - if m2 is nomask: - assert_(m1 is nomask) - assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/venv/lib/python3.7/site-packages/numpy/ma/timer_comparison.py b/venv/lib/python3.7/site-packages/numpy/ma/timer_comparison.py deleted file mode 100644 index 4ad635e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/ma/timer_comparison.py +++ /dev/null @@ -1,439 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import timeit -from functools import reduce - -import numpy as np -from numpy import float_ -import numpy.core.fromnumeric as fromnumeric - -from numpy.testing import build_err_msg - -# Fixme: this does not look right. -np.seterr(all='ignore') - -pi = np.pi - - -class ModuleTester(object): - def __init__(self, module): - self.module = module - self.allequal = module.allequal - self.arange = module.arange - self.array = module.array - self.concatenate = module.concatenate - self.count = module.count - self.equal = module.equal - self.filled = module.filled - self.getmask = module.getmask - self.getmaskarray = module.getmaskarray - self.id = id - self.inner = module.inner - self.make_mask = module.make_mask - self.masked = module.masked - self.masked_array = module.masked_array - self.masked_values = module.masked_values - self.mask_or = module.mask_or - self.nomask = module.nomask - self.ones = module.ones - self.outer = module.outer - self.repeat = module.repeat - self.resize = module.resize - self.sort = module.sort - self.take = module.take - self.transpose = module.transpose - self.zeros = module.zeros - self.MaskType = module.MaskType - try: - self.umath = module.umath - except AttributeError: - self.umath = module.core.umath - self.testnames = [] - - def assert_array_compare(self, comparison, x, y, err_msg='', header='', - fill_value=True): - """ - Assert that a comparison of two masked arrays is satisfied elementwise. - - """ - xf = self.filled(x) - yf = self.filled(y) - m = self.mask_or(self.getmask(x), self.getmask(y)) - - x = self.filled(self.masked_array(xf, mask=m), fill_value) - y = self.filled(self.masked_array(yf, mask=m), fill_value) - if (x.dtype.char != "O"): - x = x.astype(float_) - if isinstance(x, np.ndarray) and x.size > 1: - x[np.isnan(x)] = 0 - elif np.isnan(x): - x = 0 - if (y.dtype.char != "O"): - y = y.astype(float_) - if isinstance(y, np.ndarray) and y.size > 1: - y[np.isnan(y)] = 0 - elif np.isnan(y): - y = 0 - try: - cond = (x.shape == () or y.shape == ()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + '\n(shapes %s, %s mismatch)' % (x.shape, - y.shape), - header=header, - names=('x', 'y')) - assert cond, msg - val = comparison(x, y) - if m is not self.nomask and fill_value: - val = self.masked_array(val, mask=m) - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - header=header, - names=('x', 'y')) - assert cond, msg - except ValueError: - msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) - raise ValueError(msg) - - def assert_array_equal(self, x, y, err_msg=''): - """ - Checks the elementwise equality of two masked arrays. - - """ - self.assert_array_compare(self.equal, x, y, err_msg=err_msg, - header='Arrays are not equal') - - def test_0(self): - """ - Tests creation - - """ - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - xm = self.masked_array(x, mask=m) - xm[0] - - def test_1(self): - """ - Tests creation - - """ - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = self.masked_array(x, mask=m1) - ym = self.masked_array(y, mask=m2) - xf = np.where(m1, 1.e+20, x) - xm.set_fill_value(1.e+20) - - assert((xm-ym).filled(0).any()) - s = x.shape - assert(xm.size == reduce(lambda x, y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) - - for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) - - def test_2(self): - """ - Tests conversions and indexing. - - """ - x1 = np.array([1, 2, 4, 3]) - x2 = self.array(x1, mask=[1, 0, 0, 0]) - x3 = self.array(x1, mask=[0, 1, 0, 1]) - x4 = self.array(x1) - # test conversion to strings, no errors - str(x2) - repr(x2) - # tests of indexing - assert type(x2[1]) is type(x1[1]) - assert x1[1] == x2[1] - x1[2] = 9 - x2[2] = 9 - self.assert_array_equal(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 - x2[1] = self.masked - x2[1:3] = self.masked - x2[:] = x1 - x2[1] = self.masked - x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - x1 = np.arange(5)*1.0 - x2 = self.masked_values(x1, 3.0) - x1 = self.array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - # check that no error occurs. - x1[1] - x2[1] - assert x1[1:1].shape == (0,) - # Tests copy-size - n = [0, 0, 1, 0, 0] - m = self.make_mask(n) - m2 = self.make_mask(m) - assert(m is m2) - m3 = self.make_mask(m, copy=1) - assert(m is not m3) - - def test_3(self): - """ - Tests resize/repeat - - """ - x4 = self.arange(4) - x4[2] = self.masked - y4 = self.resize(x4, (8,)) - assert self.allequal(self.concatenate([x4, x4]), y4) - assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) - y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) - self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) - y6 = self.repeat(x4, 2, axis=0) - assert self.allequal(y5, y6) - y7 = x4.repeat((2, 2, 2, 2), axis=0) - assert self.allequal(y5, y7) - y8 = x4.repeat(2, 0) - assert self.allequal(y5, y8) - - def test_4(self): - """ - Test of take, transpose, inner, outer products. - - """ - x = self.arange(24) - y = np.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) - assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) - assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), - self.inner(x, y)) - assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), - self.outer(x, y)) - y = self.array(['abc', 1, 'def', 2, 3], object) - y[2] = self.masked - t = self.take(y, [0, 3, 4]) - assert t[0] == 'abc' - assert t[1] == 2 - assert t[2] == 3 - - def test_5(self): - """ - Tests inplace w/ scalar - - """ - x = self.arange(10) - y = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x += 1 - assert self.allequal(x, y+1) - xm += 1 - assert self.allequal(xm, y+1) - - x = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x -= 1 - assert self.allequal(x, y-1) - xm -= 1 - assert self.allequal(xm, y-1) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x *= 2.0 - assert self.allequal(x, y*2) - xm *= 2.0 - assert self.allequal(xm, y*2) - - x = self.arange(10)*2 - xm = self.arange(10)*2 - xm[2] = self.masked - x /= 2 - assert self.allequal(x, y) - xm /= 2 - assert self.allequal(xm, y) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x /= 2.0 - assert self.allequal(x, y/2.0) - xm /= self.arange(10) - self.assert_array_equal(xm, self.ones((10,))) - - x = self.arange(10).astype(float_) - xm = self.arange(10) - xm[2] = self.masked - x += 1. - assert self.allequal(x, y + 1.) - - def test_6(self): - """ - Tests inplace w/ array - - """ - x = self.arange(10, dtype=float_) - y = self.arange(10) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x += a - xm += a - assert self.allequal(x, y+a) - assert self.allequal(xm, y+a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x -= a - xm -= a - assert self.allequal(x, y-a) - assert self.allequal(xm, y-a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x *= a - xm *= a - assert self.allequal(x, y*a) - assert self.allequal(xm, y*a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x /= a - xm /= a - - def test_7(self): - "Tests ufunc" - d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), - self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', -# 'sin', 'cos', 'tan', -# 'arcsin', 'arccos', 'arctan', -# 'sinh', 'cosh', 'tanh', -# 'arcsinh', -# 'arccosh', -# 'arctanh', -# 'absolute', 'fabs', 'negative', -# # 'nonzero', 'around', -# 'floor', 'ceil', -# # 'sometrue', 'alltrue', -# 'logical_not', -# 'add', 'subtract', 'multiply', -# 'divide', 'true_divide', 'floor_divide', -# 'remainder', 'fmod', 'hypot', 'arctan2', -# 'equal', 'not_equal', 'less_equal', 'greater_equal', -# 'less', 'greater', -# 'logical_and', 'logical_or', 'logical_xor', - ]: - try: - uf = getattr(self.umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(self.module, f) - args = d[:uf.nin] - ur = uf(*args) - mr = mf(*args) - self.assert_array_equal(ur.filled(0), mr.filled(0), f) - self.assert_array_equal(ur._mask, mr._mask) - - def test_99(self): - # test average - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assert_array_equal(2.0, self.average(ott, axis=0)) - self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) - result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) - self.assert_array_equal(2.0, result) - assert(wts == 4.0) - ott[:] = self.masked - assert(self.average(ott, axis=0) is self.masked) - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - ott = ott.reshape(2, 2) - ott[:, 1] = self.masked - self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) - assert(self.average(ott, axis=1)[0] is self.masked) - self.assert_array_equal([2., 0.], self.average(ott, axis=0)) - result, wts = self.average(ott, axis=0, returned=1) - self.assert_array_equal(wts, [1., 0.]) - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = self.arange(6) - self.assert_array_equal(self.average(x, axis=0), 2.5) - self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) - y = self.array([self.arange(6), 2.0*self.arange(6)]) - self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) - self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) - self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - m1 = self.zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = self.ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) - self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) - z = self.masked_array(y, m3) - self.assert_array_equal(self.average(z, None), 20./6.) - self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) - self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) - self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) - - def test_A(self): - x = self.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - - -if __name__ == '__main__': - setup_base = ("from __main__ import ModuleTester \n" - "import numpy\n" - "tester = ModuleTester(module)\n") - setup_cur = "import numpy.ma.core as module\n" + setup_base - (nrepeat, nloop) = (10, 10) - - for i in range(1, 8): - func = 'tester.test_%i()' % i - cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) - cur = np.sort(cur) - print("#%i" % i + 50*'.') - print(eval("ModuleTester.test_%i.__doc__" % i)) - print("core_current : %.3f - %.3f" % (cur[0], cur[1])) diff --git a/venv/lib/python3.7/site-packages/numpy/matlib.py b/venv/lib/python3.7/site-packages/numpy/matlib.py deleted file mode 100644 index b1b1555..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matlib.py +++ /dev/null @@ -1,365 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.matrixlib.defmatrix import matrix, asmatrix -# need * as we're copying the numpy namespace (FIXME: this makes little sense) -from numpy import * - -__version__ = np.__version__ - -__all__ = np.__all__[:] # copy numpy namespace -__all__ += ['rand', 'randn', 'repmat'] - -def empty(shape, dtype=None, order='C'): - """Return a new matrix of given shape and type, without initializing entries. - - Parameters - ---------- - shape : int or tuple of int - Shape of the empty matrix. - dtype : data-type, optional - Desired output data-type. - order : {'C', 'F'}, optional - Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. - - See Also - -------- - empty_like, zeros - - Notes - ----- - `empty`, unlike `zeros`, does not set the matrix values to zero, - and may therefore be marginally faster. On the other hand, it requires - the user to manually set all the values in the array, and should be - used with caution. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.empty((2, 2)) # filled with random data - matrix([[ 6.76425276e-320, 9.79033856e-307], # random - [ 7.39337286e-309, 3.22135945e-309]]) - >>> np.matlib.empty((2, 2), dtype=int) - matrix([[ 6600475, 0], # random - [ 6586976, 22740995]]) - - """ - return ndarray.__new__(matrix, shape, dtype, order=order) - -def ones(shape, dtype=None, order='C'): - """ - Matrix of ones. - - Return a matrix of given shape and type, filled with ones. - - Parameters - ---------- - shape : {sequence of ints, int} - Shape of the matrix - dtype : data-type, optional - The desired data-type for the matrix, default is np.float64. - order : {'C', 'F'}, optional - Whether to store matrix in C- or Fortran-contiguous order, - default is 'C'. - - Returns - ------- - out : matrix - Matrix of ones of given shape, dtype, and order. - - See Also - -------- - ones : Array of ones. - matlib.zeros : Zero matrix. - - Notes - ----- - If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, - `out` becomes a single row matrix of shape ``(1,N)``. - - Examples - -------- - >>> np.matlib.ones((2,3)) - matrix([[1., 1., 1.], - [1., 1., 1.]]) - - >>> np.matlib.ones(2) - matrix([[1., 1.]]) - - """ - a = ndarray.__new__(matrix, shape, dtype, order=order) - a.fill(1) - return a - -def zeros(shape, dtype=None, order='C'): - """ - Return a matrix of given shape and type, filled with zeros. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the matrix - dtype : data-type, optional - The desired data-type for the matrix, default is float. - order : {'C', 'F'}, optional - Whether to store the result in C- or Fortran-contiguous order, - default is 'C'. - - Returns - ------- - out : matrix - Zero matrix of given shape, dtype, and order. - - See Also - -------- - numpy.zeros : Equivalent array function. - matlib.ones : Return a matrix of ones. - - Notes - ----- - If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, - `out` becomes a single row matrix of shape ``(1,N)``. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.zeros((2, 3)) - matrix([[0., 0., 0.], - [0., 0., 0.]]) - - >>> np.matlib.zeros(2) - matrix([[0., 0.]]) - - """ - a = ndarray.__new__(matrix, shape, dtype, order=order) - a.fill(0) - return a - -def identity(n,dtype=None): - """ - Returns the square identity matrix of given size. - - Parameters - ---------- - n : int - Size of the returned identity matrix. - dtype : data-type, optional - Data-type of the output. Defaults to ``float``. - - Returns - ------- - out : matrix - `n` x `n` matrix with its main diagonal set to one, - and all other elements zero. - - See Also - -------- - numpy.identity : Equivalent array function. - matlib.eye : More general matrix identity function. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) - matrix([[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]) - - """ - a = array([1]+n*[0], dtype=dtype) - b = empty((n, n), dtype=dtype) - b.flat = a - return b - -def eye(n,M=None, k=0, dtype=float, order='C'): - """ - Return a matrix with ones on the diagonal and zeros elsewhere. - - Parameters - ---------- - n : int - Number of rows in the output. - M : int, optional - Number of columns in the output, defaults to `n`. - k : int, optional - Index of the diagonal: 0 refers to the main diagonal, - a positive value refers to an upper diagonal, - and a negative value to a lower diagonal. - dtype : dtype, optional - Data-type of the returned matrix. - order : {'C', 'F'}, optional - Whether the output should be stored in row-major (C-style) or - column-major (Fortran-style) order in memory. - - .. versionadded:: 1.14.0 - - Returns - ------- - I : matrix - A `n` x `M` matrix where all elements are equal to zero, - except for the `k`-th diagonal, whose values are equal to one. - - See Also - -------- - numpy.eye : Equivalent array function. - identity : Square identity matrix. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) - matrix([[0., 1., 0.], - [0., 0., 1.], - [0., 0., 0.]]) - - """ - return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) - -def rand(*args): - """ - Return a matrix of random values with given shape. - - Create a matrix of the given shape and propagate it with - random samples from a uniform distribution over ``[0, 1)``. - - Parameters - ---------- - \\*args : Arguments - Shape of the output. - If given as N integers, each integer specifies the size of one - dimension. - If given as a tuple, this tuple gives the complete shape. - - Returns - ------- - out : ndarray - The matrix of random values with shape given by `\\*args`. - - See Also - -------- - randn, numpy.random.RandomState.rand - - Examples - -------- - >>> np.random.seed(123) - >>> import numpy.matlib - >>> np.matlib.rand(2, 3) - matrix([[0.69646919, 0.28613933, 0.22685145], - [0.55131477, 0.71946897, 0.42310646]]) - >>> np.matlib.rand((2, 3)) - matrix([[0.9807642 , 0.68482974, 0.4809319 ], - [0.39211752, 0.34317802, 0.72904971]]) - - If the first argument is a tuple, other arguments are ignored: - - >>> np.matlib.rand((2, 3), 4) - matrix([[0.43857224, 0.0596779 , 0.39804426], - [0.73799541, 0.18249173, 0.17545176]]) - - """ - if isinstance(args[0], tuple): - args = args[0] - return asmatrix(np.random.rand(*args)) - -def randn(*args): - """ - Return a random matrix with data from the "standard normal" distribution. - - `randn` generates a matrix filled with random floats sampled from a - univariate "normal" (Gaussian) distribution of mean 0 and variance 1. - - Parameters - ---------- - \\*args : Arguments - Shape of the output. - If given as N integers, each integer specifies the size of one - dimension. If given as a tuple, this tuple gives the complete shape. - - Returns - ------- - Z : matrix of floats - A matrix of floating-point samples drawn from the standard normal - distribution. - - See Also - -------- - rand, numpy.random.RandomState.randn - - Notes - ----- - For random samples from :math:`N(\\mu, \\sigma^2)`, use: - - ``sigma * np.matlib.randn(...) + mu`` - - Examples - -------- - >>> np.random.seed(123) - >>> import numpy.matlib - >>> np.matlib.randn(1) - matrix([[-1.0856306]]) - >>> np.matlib.randn(1, 2, 3) - matrix([[ 0.99734545, 0.2829785 , -1.50629471], - [-0.57860025, 1.65143654, -2.42667924]]) - - Two-by-four matrix of samples from :math:`N(3, 6.25)`: - - >>> 2.5 * np.matlib.randn((2, 4)) + 3 - matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], - [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) - - """ - if isinstance(args[0], tuple): - args = args[0] - return asmatrix(np.random.randn(*args)) - -def repmat(a, m, n): - """ - Repeat a 0-D to 2-D array or matrix MxN times. - - Parameters - ---------- - a : array_like - The array or matrix to be repeated. - m, n : int - The number of times `a` is repeated along the first and second axes. - - Returns - ------- - out : ndarray - The result of repeating `a`. - - Examples - -------- - >>> import numpy.matlib - >>> a0 = np.array(1) - >>> np.matlib.repmat(a0, 2, 3) - array([[1, 1, 1], - [1, 1, 1]]) - - >>> a1 = np.arange(4) - >>> np.matlib.repmat(a1, 2, 2) - array([[0, 1, 2, 3, 0, 1, 2, 3], - [0, 1, 2, 3, 0, 1, 2, 3]]) - - >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) - >>> np.matlib.repmat(a2, 2, 3) - matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5, 3, 4, 5], - [0, 1, 2, 0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5, 3, 4, 5]]) - - """ - a = asanyarray(a) - ndim = a.ndim - if ndim == 0: - origrows, origcols = (1, 1) - elif ndim == 1: - origrows, origcols = (1, a.shape[0]) - else: - origrows, origcols = a.shape - rows = origrows * m - cols = origcols * n - c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) - return c.reshape(rows, cols) diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/__init__.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/__init__.py deleted file mode 100644 index 777e0cd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Sub-package containing the matrix class and related functions. - -""" -from __future__ import division, absolute_import, print_function - -from .defmatrix import * - -__all__ = defmatrix.__all__ - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/defmatrix.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/defmatrix.py deleted file mode 100644 index cabd413..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/defmatrix.py +++ /dev/null @@ -1,1115 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] - -import sys -import warnings -import ast -import numpy.core.numeric as N -from numpy.core.numeric import concatenate, isscalar -from numpy.core.overrides import set_module -# While not in __all__, matrix_power used to be defined here, so we import -# it for backward compatibility. -from numpy.linalg import matrix_power - - -def _convert_from_string(data): - for char in '[]': - data = data.replace(char, '') - - rows = data.split(';') - newdata = [] - count = 0 - for row in rows: - trow = row.split(',') - newrow = [] - for col in trow: - temp = col.split() - newrow.extend(map(ast.literal_eval, temp)) - if count == 0: - Ncols = len(newrow) - elif len(newrow) != Ncols: - raise ValueError("Rows not the same size.") - count += 1 - newdata.append(newrow) - return newdata - - -@set_module('numpy') -def asmatrix(data, dtype=None): - """ - Interpret the input as a matrix. - - Unlike `matrix`, `asmatrix` does not make a copy if the input is already - a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. - - Parameters - ---------- - data : array_like - Input data. - dtype : data-type - Data-type of the output matrix. - - Returns - ------- - mat : matrix - `data` interpreted as a matrix. - - Examples - -------- - >>> x = np.array([[1, 2], [3, 4]]) - - >>> m = np.asmatrix(x) - - >>> x[0,0] = 5 - - >>> m - matrix([[5, 2], - [3, 4]]) - - """ - return matrix(data, dtype=dtype, copy=False) - - -@set_module('numpy') -class matrix(N.ndarray): - """ - matrix(data, dtype=None, copy=True) - - .. note:: It is no longer recommended to use this class, even for linear - algebra. Instead use regular arrays. The class may be removed - in the future. - - Returns a matrix from an array-like object, or from a string of data. - A matrix is a specialized 2-D array that retains its 2-D nature - through operations. It has certain special operators, such as ``*`` - (matrix multiplication) and ``**`` (matrix power). - - Parameters - ---------- - data : array_like or string - If `data` is a string, it is interpreted as a matrix with commas - or spaces separating columns, and semicolons separating rows. - dtype : data-type - Data-type of the output matrix. - copy : bool - If `data` is already an `ndarray`, then this flag determines - whether the data is copied (the default), or whether a view is - constructed. - - See Also - -------- - array - - Examples - -------- - >>> a = np.matrix('1 2; 3 4') - >>> a - matrix([[1, 2], - [3, 4]]) - - >>> np.matrix([[1, 2], [3, 4]]) - matrix([[1, 2], - [3, 4]]) - - """ - __array_priority__ = 10.0 - def __new__(subtype, data, dtype=None, copy=True): - warnings.warn('the matrix subclass is not the recommended way to ' - 'represent matrices or deal with linear algebra (see ' - 'https://docs.scipy.org/doc/numpy/user/' - 'numpy-for-matlab-users.html). ' - 'Please adjust your code to use regular ndarray.', - PendingDeprecationWarning, stacklevel=2) - if isinstance(data, matrix): - dtype2 = data.dtype - if (dtype is None): - dtype = dtype2 - if (dtype2 == dtype) and (not copy): - return data - return data.astype(dtype) - - if isinstance(data, N.ndarray): - if dtype is None: - intype = data.dtype - else: - intype = N.dtype(dtype) - new = data.view(subtype) - if intype != data.dtype: - return new.astype(intype) - if copy: return new.copy() - else: return new - - if isinstance(data, str): - data = _convert_from_string(data) - - # now convert data to an array - arr = N.array(data, dtype=dtype, copy=copy) - ndim = arr.ndim - shape = arr.shape - if (ndim > 2): - raise ValueError("matrix must be 2-dimensional") - elif ndim == 0: - shape = (1, 1) - elif ndim == 1: - shape = (1, shape[0]) - - order = 'C' - if (ndim == 2) and arr.flags.fortran: - order = 'F' - - if not (order or arr.flags.contiguous): - arr = arr.copy() - - ret = N.ndarray.__new__(subtype, shape, arr.dtype, - buffer=arr, - order=order) - return ret - - def __array_finalize__(self, obj): - self._getitem = False - if (isinstance(obj, matrix) and obj._getitem): return - ndim = self.ndim - if (ndim == 2): - return - if (ndim > 2): - newshape = tuple([x for x in self.shape if x > 1]) - ndim = len(newshape) - if ndim == 2: - self.shape = newshape - return - elif (ndim > 2): - raise ValueError("shape too large to be a matrix.") - else: - newshape = self.shape - if ndim == 0: - self.shape = (1, 1) - elif ndim == 1: - self.shape = (1, newshape[0]) - return - - def __getitem__(self, index): - self._getitem = True - - try: - out = N.ndarray.__getitem__(self, index) - finally: - self._getitem = False - - if not isinstance(out, N.ndarray): - return out - - if out.ndim == 0: - return out[()] - if out.ndim == 1: - sh = out.shape[0] - # Determine when we should have a column array - try: - n = len(index) - except Exception: - n = 0 - if n > 1 and isscalar(index[1]): - out.shape = (sh, 1) - else: - out.shape = (1, sh) - return out - - def __mul__(self, other): - if isinstance(other, (N.ndarray, list, tuple)) : - # This promotes 1-D vectors to row vectors - return N.dot(self, asmatrix(other)) - if isscalar(other) or not hasattr(other, '__rmul__') : - return N.dot(self, other) - return NotImplemented - - def __rmul__(self, other): - return N.dot(other, self) - - def __imul__(self, other): - self[:] = self * other - return self - - def __pow__(self, other): - return matrix_power(self, other) - - def __ipow__(self, other): - self[:] = self ** other - return self - - def __rpow__(self, other): - return NotImplemented - - def _align(self, axis): - """A convenience function for operations that need to preserve axis - orientation. - """ - if axis is None: - return self[0, 0] - elif axis==0: - return self - elif axis==1: - return self.transpose() - else: - raise ValueError("unsupported axis") - - def _collapse(self, axis): - """A convenience function for operations that want to collapse - to a scalar like _align, but are using keepdims=True - """ - if axis is None: - return self[0, 0] - else: - return self - - # Necessary because base-class tolist expects dimension - # reduction by x[0] - def tolist(self): - """ - Return the matrix as a (possibly nested) list. - - See `ndarray.tolist` for full documentation. - - See Also - -------- - ndarray.tolist - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.tolist() - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] - - """ - return self.__array__().tolist() - - # To preserve orientation of result... - def sum(self, axis=None, dtype=None, out=None): - """ - Returns the sum of the matrix elements, along the given axis. - - Refer to `numpy.sum` for full documentation. - - See Also - -------- - numpy.sum - - Notes - ----- - This is the same as `ndarray.sum`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix([[1, 2], [4, 3]]) - >>> x.sum() - 10 - >>> x.sum(axis=1) - matrix([[3], - [7]]) - >>> x.sum(axis=1, dtype='float') - matrix([[3.], - [7.]]) - >>> out = np.zeros((2, 1), dtype='float') - >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) - matrix([[3.], - [7.]]) - - """ - return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) - - - # To update docstring from array to matrix... - def squeeze(self, axis=None): - """ - Return a possibly reshaped matrix. - - Refer to `numpy.squeeze` for more documentation. - - Parameters - ---------- - axis : None or int or tuple of ints, optional - Selects a subset of the single-dimensional entries in the shape. - If an axis is selected with shape entry greater than one, - an error is raised. - - Returns - ------- - squeezed : matrix - The matrix, but as a (1, N) matrix if it had shape (N, 1). - - See Also - -------- - numpy.squeeze : related function - - Notes - ----- - If `m` has a single column then that column is returned - as the single row of a matrix. Otherwise `m` is returned. - The returned matrix is always either `m` itself or a view into `m`. - Supplying an axis keyword argument will not affect the returned matrix - but it may cause an error to be raised. - - Examples - -------- - >>> c = np.matrix([[1], [2]]) - >>> c - matrix([[1], - [2]]) - >>> c.squeeze() - matrix([[1, 2]]) - >>> r = c.T - >>> r - matrix([[1, 2]]) - >>> r.squeeze() - matrix([[1, 2]]) - >>> m = np.matrix([[1, 2], [3, 4]]) - >>> m.squeeze() - matrix([[1, 2], - [3, 4]]) - - """ - return N.ndarray.squeeze(self, axis=axis) - - - # To update docstring from array to matrix... - def flatten(self, order='C'): - """ - Return a flattened copy of the matrix. - - All `N` elements of the matrix are placed into a single row. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - 'C' means to flatten in row-major (C-style) order. 'F' means to - flatten in column-major (Fortran-style) order. 'A' means to - flatten in column-major order if `m` is Fortran *contiguous* in - memory, row-major order otherwise. 'K' means to flatten `m` in - the order the elements occur in memory. The default is 'C'. - - Returns - ------- - y : matrix - A copy of the matrix, flattened to a `(1, N)` matrix where `N` - is the number of elements in the original matrix. - - See Also - -------- - ravel : Return a flattened array. - flat : A 1-D flat iterator over the matrix. - - Examples - -------- - >>> m = np.matrix([[1,2], [3,4]]) - >>> m.flatten() - matrix([[1, 2, 3, 4]]) - >>> m.flatten('F') - matrix([[1, 3, 2, 4]]) - - """ - return N.ndarray.flatten(self, order=order) - - def mean(self, axis=None, dtype=None, out=None): - """ - Returns the average of the matrix elements along the given axis. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.mean - - Notes - ----- - Same as `ndarray.mean` except that, where that returns an `ndarray`, - this returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.mean() - 5.5 - >>> x.mean(0) - matrix([[4., 5., 6., 7.]]) - >>> x.mean(1) - matrix([[ 1.5], - [ 5.5], - [ 9.5]]) - - """ - return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) - - def std(self, axis=None, dtype=None, out=None, ddof=0): - """ - Return the standard deviation of the array elements along the given axis. - - Refer to `numpy.std` for full documentation. - - See Also - -------- - numpy.std - - Notes - ----- - This is the same as `ndarray.std`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.std() - 3.4520525295346629 # may vary - >>> x.std(0) - matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary - >>> x.std(1) - matrix([[ 1.11803399], - [ 1.11803399], - [ 1.11803399]]) - - """ - return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) - - def var(self, axis=None, dtype=None, out=None, ddof=0): - """ - Returns the variance of the matrix elements, along the given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var - - Notes - ----- - This is the same as `ndarray.var`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.var() - 11.916666666666666 - >>> x.var(0) - matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary - >>> x.var(1) - matrix([[1.25], - [1.25], - [1.25]]) - - """ - return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) - - def prod(self, axis=None, dtype=None, out=None): - """ - Return the product of the array elements over the given axis. - - Refer to `prod` for full documentation. - - See Also - -------- - prod, ndarray.prod - - Notes - ----- - Same as `ndarray.prod`, except, where that returns an `ndarray`, this - returns a `matrix` object instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.prod() - 0 - >>> x.prod(0) - matrix([[ 0, 45, 120, 231]]) - >>> x.prod(1) - matrix([[ 0], - [ 840], - [7920]]) - - """ - return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) - - def any(self, axis=None, out=None): - """ - Test whether any array element along a given axis evaluates to True. - - Refer to `numpy.any` for full documentation. - - Parameters - ---------- - axis : int, optional - Axis along which logical OR is performed - out : ndarray, optional - Output to existing array instead of creating new one, must have - same shape as expected output - - Returns - ------- - any : bool, ndarray - Returns a single bool if `axis` is ``None``; otherwise, - returns `ndarray` - - """ - return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) - - def all(self, axis=None, out=None): - """ - Test whether all matrix elements along a given axis evaluate to True. - - Parameters - ---------- - See `numpy.all` for complete descriptions - - See Also - -------- - numpy.all - - Notes - ----- - This is the same as `ndarray.all`, but it returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> y = x[0]; y - matrix([[0, 1, 2, 3]]) - >>> (x == y) - matrix([[ True, True, True, True], - [False, False, False, False], - [False, False, False, False]]) - >>> (x == y).all() - False - >>> (x == y).all(0) - matrix([[False, False, False, False]]) - >>> (x == y).all(1) - matrix([[ True], - [False], - [False]]) - - """ - return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) - - def max(self, axis=None, out=None): - """ - Return the maximum value along an axis. - - Parameters - ---------- - See `amax` for complete descriptions - - See Also - -------- - amax, ndarray.max - - Notes - ----- - This is the same as `ndarray.max`, but returns a `matrix` object - where `ndarray.max` would return an ndarray. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.max() - 11 - >>> x.max(0) - matrix([[ 8, 9, 10, 11]]) - >>> x.max(1) - matrix([[ 3], - [ 7], - [11]]) - - """ - return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) - - def argmax(self, axis=None, out=None): - """ - Indexes of the maximum values along an axis. - - Return the indexes of the first occurrences of the maximum values - along the specified axis. If axis is None, the index is for the - flattened matrix. - - Parameters - ---------- - See `numpy.argmax` for complete descriptions - - See Also - -------- - numpy.argmax - - Notes - ----- - This is the same as `ndarray.argmax`, but returns a `matrix` object - where `ndarray.argmax` would return an `ndarray`. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.argmax() - 11 - >>> x.argmax(0) - matrix([[2, 2, 2, 2]]) - >>> x.argmax(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.argmax(self, axis, out)._align(axis) - - def min(self, axis=None, out=None): - """ - Return the minimum value along an axis. - - Parameters - ---------- - See `amin` for complete descriptions. - - See Also - -------- - amin, ndarray.min - - Notes - ----- - This is the same as `ndarray.min`, but returns a `matrix` object - where `ndarray.min` would return an ndarray. - - Examples - -------- - >>> x = -np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, -1, -2, -3], - [ -4, -5, -6, -7], - [ -8, -9, -10, -11]]) - >>> x.min() - -11 - >>> x.min(0) - matrix([[ -8, -9, -10, -11]]) - >>> x.min(1) - matrix([[ -3], - [ -7], - [-11]]) - - """ - return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) - - def argmin(self, axis=None, out=None): - """ - Indexes of the minimum values along an axis. - - Return the indexes of the first occurrences of the minimum values - along the specified axis. If axis is None, the index is for the - flattened matrix. - - Parameters - ---------- - See `numpy.argmin` for complete descriptions. - - See Also - -------- - numpy.argmin - - Notes - ----- - This is the same as `ndarray.argmin`, but returns a `matrix` object - where `ndarray.argmin` would return an `ndarray`. - - Examples - -------- - >>> x = -np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, -1, -2, -3], - [ -4, -5, -6, -7], - [ -8, -9, -10, -11]]) - >>> x.argmin() - 11 - >>> x.argmin(0) - matrix([[2, 2, 2, 2]]) - >>> x.argmin(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.argmin(self, axis, out)._align(axis) - - def ptp(self, axis=None, out=None): - """ - Peak-to-peak (maximum - minimum) value along the given axis. - - Refer to `numpy.ptp` for full documentation. - - See Also - -------- - numpy.ptp - - Notes - ----- - Same as `ndarray.ptp`, except, where that would return an `ndarray` object, - this returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.ptp() - 11 - >>> x.ptp(0) - matrix([[8, 8, 8, 8]]) - >>> x.ptp(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.ptp(self, axis, out)._align(axis) - - @property - def I(self): - """ - Returns the (multiplicative) inverse of invertible `self`. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - If `self` is non-singular, `ret` is such that ``ret * self`` == - ``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return - ``True``. - - Raises - ------ - numpy.linalg.LinAlgError: Singular matrix - If `self` is singular. - - See Also - -------- - linalg.inv - - Examples - -------- - >>> m = np.matrix('[1, 2; 3, 4]'); m - matrix([[1, 2], - [3, 4]]) - >>> m.getI() - matrix([[-2. , 1. ], - [ 1.5, -0.5]]) - >>> m.getI() * m - matrix([[ 1., 0.], # may vary - [ 0., 1.]]) - - """ - M, N = self.shape - if M == N: - from numpy.dual import inv as func - else: - from numpy.dual import pinv as func - return asmatrix(func(self)) - - @property - def A(self): - """ - Return `self` as an `ndarray` object. - - Equivalent to ``np.asarray(self)``. - - Parameters - ---------- - None - - Returns - ------- - ret : ndarray - `self` as an `ndarray` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.getA() - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - """ - return self.__array__() - - @property - def A1(self): - """ - Return `self` as a flattened `ndarray`. - - Equivalent to ``np.asarray(x).ravel()`` - - Parameters - ---------- - None - - Returns - ------- - ret : ndarray - `self`, 1-D, as an `ndarray` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.getA1() - array([ 0, 1, 2, ..., 9, 10, 11]) - - - """ - return self.__array__().ravel() - - - def ravel(self, order='C'): - """ - Return a flattened matrix. - - Refer to `numpy.ravel` for more documentation. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - The elements of `m` are read using this index order. 'C' means to - index the elements in C-like order, with the last axis index - changing fastest, back to the first axis index changing slowest. - 'F' means to index the elements in Fortran-like index order, with - the first index changing fastest, and the last index changing - slowest. Note that the 'C' and 'F' options take no account of the - memory layout of the underlying array, and only refer to the order - of axis indexing. 'A' means to read the elements in Fortran-like - index order if `m` is Fortran *contiguous* in memory, C-like order - otherwise. 'K' means to read the elements in the order they occur - in memory, except for reversing the data when strides are negative. - By default, 'C' index order is used. - - Returns - ------- - ret : matrix - Return the matrix flattened to shape `(1, N)` where `N` - is the number of elements in the original matrix. - A copy is made only if necessary. - - See Also - -------- - matrix.flatten : returns a similar output matrix but always a copy - matrix.flat : a flat iterator on the array. - numpy.ravel : related function which returns an ndarray - - """ - return N.ndarray.ravel(self, order=order) - - @property - def T(self): - """ - Returns the transpose of the matrix. - - Does *not* conjugate! For the complex conjugate transpose, use ``.H``. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - The (non-conjugated) transpose of the matrix. - - See Also - -------- - transpose, getH - - Examples - -------- - >>> m = np.matrix('[1, 2; 3, 4]') - >>> m - matrix([[1, 2], - [3, 4]]) - >>> m.getT() - matrix([[1, 3], - [2, 4]]) - - """ - return self.transpose() - - @property - def H(self): - """ - Returns the (complex) conjugate transpose of `self`. - - Equivalent to ``np.transpose(self)`` if `self` is real-valued. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - complex conjugate transpose of `self` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))) - >>> z = x - 1j*x; z - matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], - [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], - [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) - >>> z.getH() - matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], - [ 1. +1.j, 5. +5.j, 9. +9.j], - [ 2. +2.j, 6. +6.j, 10.+10.j], - [ 3. +3.j, 7. +7.j, 11.+11.j]]) - - """ - if issubclass(self.dtype.type, N.complexfloating): - return self.transpose().conjugate() - else: - return self.transpose() - - # kept for compatibility - getT = T.fget - getA = A.fget - getA1 = A1.fget - getH = H.fget - getI = I.fget - -def _from_string(str, gdict, ldict): - rows = str.split(';') - rowtup = [] - for row in rows: - trow = row.split(',') - newrow = [] - for x in trow: - newrow.extend(x.split()) - trow = newrow - coltup = [] - for col in trow: - col = col.strip() - try: - thismat = ldict[col] - except KeyError: - try: - thismat = gdict[col] - except KeyError: - raise KeyError("%s not found" % (col,)) - - coltup.append(thismat) - rowtup.append(concatenate(coltup, axis=-1)) - return concatenate(rowtup, axis=0) - - -@set_module('numpy') -def bmat(obj, ldict=None, gdict=None): - """ - Build a matrix object from a string, nested sequence, or array. - - Parameters - ---------- - obj : str or array_like - Input data. If a string, variables in the current scope may be - referenced by name. - ldict : dict, optional - A dictionary that replaces local operands in current frame. - Ignored if `obj` is not a string or `gdict` is None. - gdict : dict, optional - A dictionary that replaces global operands in current frame. - Ignored if `obj` is not a string. - - Returns - ------- - out : matrix - Returns a matrix object, which is a specialized 2-D array. - - See Also - -------- - block : - A generalization of this function for N-d arrays, that returns normal - ndarrays. - - Examples - -------- - >>> A = np.mat('1 1; 1 1') - >>> B = np.mat('2 2; 2 2') - >>> C = np.mat('3 4; 5 6') - >>> D = np.mat('7 8; 9 0') - - All the following expressions construct the same block matrix: - - >>> np.bmat([[A, B], [C, D]]) - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - >>> np.bmat('A,B; C,D') - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - - """ - if isinstance(obj, str): - if gdict is None: - # get previous frame - frame = sys._getframe().f_back - glob_dict = frame.f_globals - loc_dict = frame.f_locals - else: - glob_dict = gdict - loc_dict = ldict - - return matrix(_from_string(obj, glob_dict, loc_dict)) - - if isinstance(obj, (tuple, list)): - # [[A,B],[C,D]] - arr_rows = [] - for row in obj: - if isinstance(row, N.ndarray): # not 2-d - return matrix(concatenate(obj, axis=-1)) - else: - arr_rows.append(concatenate(row, axis=-1)) - return matrix(concatenate(arr_rows, axis=0)) - if isinstance(obj, N.ndarray): - return matrix(obj) - -mat = asmatrix diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/setup.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/setup.py deleted file mode 100644 index d0981d6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('matrixlib', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_defmatrix.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_defmatrix.py deleted file mode 100644 index aa6e08d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_defmatrix.py +++ /dev/null @@ -1,460 +0,0 @@ -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc - -import numpy as np -from numpy import matrix, asmatrix, bmat -from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_array_equal, - assert_array_almost_equal, assert_raises - ) -from numpy.linalg import matrix_power -from numpy.matrixlib import mat - -class TestCtor(object): - def test_basic(self): - A = np.array([[1, 2], [3, 4]]) - mA = matrix(A) - assert_(np.all(mA.A == A)) - - B = bmat("A,A;A,A") - C = bmat([[A, A], [A, A]]) - D = np.array([[1, 2, 1, 2], - [3, 4, 3, 4], - [1, 2, 1, 2], - [3, 4, 3, 4]]) - assert_(np.all(B.A == D)) - assert_(np.all(C.A == D)) - - E = np.array([[5, 6], [7, 8]]) - AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) - assert_(np.all(bmat([A, E]) == AEresult)) - - vec = np.arange(5) - mvec = matrix(vec) - assert_(mvec.shape == (1, 5)) - - def test_exceptions(self): - # Check for ValueError when called with invalid string data. - assert_raises(ValueError, matrix, "invalid") - - def test_bmat_nondefault_str(self): - A = np.array([[1, 2], [3, 4]]) - B = np.array([[5, 6], [7, 8]]) - Aresult = np.array([[1, 2, 1, 2], - [3, 4, 3, 4], - [1, 2, 1, 2], - [3, 4, 3, 4]]) - mixresult = np.array([[1, 2, 5, 6], - [3, 4, 7, 8], - [5, 6, 1, 2], - [7, 8, 3, 4]]) - assert_(np.all(bmat("A,A;A,A") == Aresult)) - assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) - assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) - assert_( - np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) - b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) - assert_(np.all(b2 == mixresult)) - - -class TestProperties(object): - def test_sum(self): - """Test whether matrix.sum(axis=1) preserves orientation. - Fails in NumPy <= 0.9.6.2127. - """ - M = matrix([[1, 2, 0, 0], - [3, 4, 0, 0], - [1, 2, 1, 2], - [3, 4, 3, 4]]) - sum0 = matrix([8, 12, 4, 6]) - sum1 = matrix([3, 7, 6, 14]).T - sumall = 30 - assert_array_equal(sum0, M.sum(axis=0)) - assert_array_equal(sum1, M.sum(axis=1)) - assert_equal(sumall, M.sum()) - - assert_array_equal(sum0, np.sum(M, axis=0)) - assert_array_equal(sum1, np.sum(M, axis=1)) - assert_equal(sumall, np.sum(M)) - - def test_prod(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x.prod(), 720) - assert_equal(x.prod(0), matrix([[4, 10, 18]])) - assert_equal(x.prod(1), matrix([[6], [120]])) - - assert_equal(np.prod(x), 720) - assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) - assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) - - y = matrix([0, 1, 3]) - assert_(y.prod() == 0) - - def test_max(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x.max(), 6) - assert_equal(x.max(0), matrix([[4, 5, 6]])) - assert_equal(x.max(1), matrix([[3], [6]])) - - assert_equal(np.max(x), 6) - assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) - assert_equal(np.max(x, axis=1), matrix([[3], [6]])) - - def test_min(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x.min(), 1) - assert_equal(x.min(0), matrix([[1, 2, 3]])) - assert_equal(x.min(1), matrix([[1], [4]])) - - assert_equal(np.min(x), 1) - assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) - assert_equal(np.min(x, axis=1), matrix([[1], [4]])) - - def test_ptp(self): - x = np.arange(4).reshape((2, 2)) - assert_(x.ptp() == 3) - assert_(np.all(x.ptp(0) == np.array([2, 2]))) - assert_(np.all(x.ptp(1) == np.array([1, 1]))) - - def test_var(self): - x = np.arange(9).reshape((3, 3)) - mx = x.view(np.matrix) - assert_equal(x.var(ddof=0), mx.var(ddof=0)) - assert_equal(x.var(ddof=1), mx.var(ddof=1)) - - def test_basic(self): - import numpy.linalg as linalg - - A = np.array([[1., 2.], - [3., 4.]]) - mA = matrix(A) - assert_(np.allclose(linalg.inv(A), mA.I)) - assert_(np.all(np.array(np.transpose(A) == mA.T))) - assert_(np.all(np.array(np.transpose(A) == mA.H))) - assert_(np.all(A == mA.A)) - - B = A + 2j*A - mB = matrix(B) - assert_(np.allclose(linalg.inv(B), mB.I)) - assert_(np.all(np.array(np.transpose(B) == mB.T))) - assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) - - def test_pinv(self): - x = matrix(np.arange(6).reshape(2, 3)) - xpinv = matrix([[-0.77777778, 0.27777778], - [-0.11111111, 0.11111111], - [ 0.55555556, -0.05555556]]) - assert_almost_equal(x.I, xpinv) - - def test_comparisons(self): - A = np.arange(100).reshape(10, 10) - mA = matrix(A) - mB = matrix(A) + 0.1 - assert_(np.all(mB == A+0.1)) - assert_(np.all(mB == matrix(A+0.1))) - assert_(not np.any(mB == matrix(A-0.1))) - assert_(np.all(mA < mB)) - assert_(np.all(mA <= mB)) - assert_(np.all(mA <= mA)) - assert_(not np.any(mA < mA)) - - assert_(not np.any(mB < mA)) - assert_(np.all(mB >= mA)) - assert_(np.all(mB >= mB)) - assert_(not np.any(mB > mB)) - - assert_(np.all(mA == mA)) - assert_(not np.any(mA == mB)) - assert_(np.all(mB != mA)) - - assert_(not np.all(abs(mA) > 0)) - assert_(np.all(abs(mB > 0))) - - def test_asmatrix(self): - A = np.arange(100).reshape(10, 10) - mA = asmatrix(A) - A[0, 0] = -10 - assert_(A[0, 0] == mA[0, 0]) - - def test_noaxis(self): - A = matrix([[1, 0], [0, 1]]) - assert_(A.sum() == matrix(2)) - assert_(A.mean() == matrix(0.5)) - - def test_repr(self): - A = matrix([[1, 0], [0, 1]]) - assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") - - def test_make_bool_matrix_from_str(self): - A = matrix('True; True; False') - B = matrix([[True], [True], [False]]) - assert_array_equal(A, B) - -class TestCasting(object): - def test_basic(self): - A = np.arange(100).reshape(10, 10) - mA = matrix(A) - - mB = mA.copy() - O = np.ones((10, 10), np.float64) * 0.1 - mB = mB + O - assert_(mB.dtype.type == np.float64) - assert_(np.all(mA != mB)) - assert_(np.all(mB == mA+0.1)) - - mC = mA.copy() - O = np.ones((10, 10), np.complex128) - mC = mC * O - assert_(mC.dtype.type == np.complex128) - assert_(np.all(mA != mB)) - - -class TestAlgebra(object): - def test_basic(self): - import numpy.linalg as linalg - - A = np.array([[1., 2.], [3., 4.]]) - mA = matrix(A) - - B = np.identity(2) - for i in range(6): - assert_(np.allclose((mA ** i).A, B)) - B = np.dot(B, A) - - Ainv = linalg.inv(A) - B = np.identity(2) - for i in range(6): - assert_(np.allclose((mA ** -i).A, B)) - B = np.dot(B, Ainv) - - assert_(np.allclose((mA * mA).A, np.dot(A, A))) - assert_(np.allclose((mA + mA).A, (A + A))) - assert_(np.allclose((3*mA).A, (3*A))) - - mA2 = matrix(A) - mA2 *= 3 - assert_(np.allclose(mA2.A, 3*A)) - - def test_pow(self): - """Test raising a matrix to an integer power works as expected.""" - m = matrix("1. 2.; 3. 4.") - m2 = m.copy() - m2 **= 2 - mi = m.copy() - mi **= -1 - m4 = m2.copy() - m4 **= 2 - assert_array_almost_equal(m2, m**2) - assert_array_almost_equal(m4, np.dot(m2, m2)) - assert_array_almost_equal(np.dot(mi, m), np.eye(2)) - - def test_scalar_type_pow(self): - m = matrix([[1, 2], [3, 4]]) - for scalar_t in [np.int8, np.uint8]: - two = scalar_t(2) - assert_array_almost_equal(m ** 2, m ** two) - - def test_notimplemented(self): - '''Check that 'not implemented' operations produce a failure.''' - A = matrix([[1., 2.], - [3., 4.]]) - - # __rpow__ - with assert_raises(TypeError): - 1.0**A - - # __mul__ with something not a list, ndarray, tuple, or scalar - with assert_raises(TypeError): - A*object() - - -class TestMatrixReturn(object): - def test_instance_methods(self): - a = matrix([1.0], dtype='f8') - methodargs = { - 'astype': ('intc',), - 'clip': (0.0, 1.0), - 'compress': ([1],), - 'repeat': (1,), - 'reshape': (1,), - 'swapaxes': (0, 0), - 'dot': np.array([1.0]), - } - excluded_methods = [ - 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', - 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', - 'searchsorted', 'setflags', 'setfield', 'sort', - 'partition', 'argpartition', - 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', - 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', - ] - for attrib in dir(a): - if attrib.startswith('_') or attrib in excluded_methods: - continue - f = getattr(a, attrib) - if isinstance(f, collections_abc.Callable): - # reset contents of a - a.astype('f8') - a.fill(1.0) - if attrib in methodargs: - args = methodargs[attrib] - else: - args = () - b = f(*args) - assert_(type(b) is matrix, "%s" % attrib) - assert_(type(a.real) is matrix) - assert_(type(a.imag) is matrix) - c, d = matrix([0.0]).nonzero() - assert_(type(c) is np.ndarray) - assert_(type(d) is np.ndarray) - - -class TestIndexing(object): - def test_basic(self): - x = asmatrix(np.zeros((3, 2), float)) - y = np.zeros((3, 1), float) - y[:, 0] = [0.8, 0.2, 0.3] - x[:, 1] = y > 0.5 - assert_equal(x, [[0, 1], [0, 0], [0, 0]]) - - -class TestNewScalarIndexing(object): - a = matrix([[1, 2], [3, 4]]) - - def test_dimesions(self): - a = self.a - x = a[0] - assert_equal(x.ndim, 2) - - def test_array_from_matrix_list(self): - a = self.a - x = np.array([a, a]) - assert_equal(x.shape, [2, 2, 2]) - - def test_array_to_list(self): - a = self.a - assert_equal(a.tolist(), [[1, 2], [3, 4]]) - - def test_fancy_indexing(self): - a = self.a - x = a[1, [0, 1, 0]] - assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[3, 4, 3]])) - x = a[[1, 0]] - assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[3, 4], [1, 2]])) - x = a[[[1], [0]], [[1, 0], [0, 1]]] - assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[4, 3], [1, 2]])) - - def test_matrix_element(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x[0][0], matrix([[1, 2, 3]])) - assert_equal(x[0][0].shape, (1, 3)) - assert_equal(x[0].shape, (1, 3)) - assert_equal(x[:, 0].shape, (2, 1)) - - x = matrix(0) - assert_equal(x[0, 0], 0) - assert_equal(x[0], 0) - assert_equal(x[:, 0].shape, x.shape) - - def test_scalar_indexing(self): - x = asmatrix(np.zeros((3, 2), float)) - assert_equal(x[0, 0], x[0][0]) - - def test_row_column_indexing(self): - x = asmatrix(np.eye(2)) - assert_array_equal(x[0,:], [[1, 0]]) - assert_array_equal(x[1,:], [[0, 1]]) - assert_array_equal(x[:, 0], [[1], [0]]) - assert_array_equal(x[:, 1], [[0], [1]]) - - def test_boolean_indexing(self): - A = np.arange(6) - A.shape = (3, 2) - x = asmatrix(A) - assert_array_equal(x[:, np.array([True, False])], x[:, 0]) - assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) - - def test_list_indexing(self): - A = np.arange(6) - A.shape = (3, 2) - x = asmatrix(A) - assert_array_equal(x[:, [1, 0]], x[:, ::-1]) - assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) - - -class TestPower(object): - def test_returntype(self): - a = np.array([[0, 1], [0, 0]]) - assert_(type(matrix_power(a, 2)) is np.ndarray) - a = mat(a) - assert_(type(matrix_power(a, 2)) is matrix) - - def test_list(self): - assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) - - -class TestShape(object): - - a = np.array([[1], [2]]) - m = matrix([[1], [2]]) - - def test_shape(self): - assert_equal(self.a.shape, (2, 1)) - assert_equal(self.m.shape, (2, 1)) - - def test_numpy_ravel(self): - assert_equal(np.ravel(self.a).shape, (2,)) - assert_equal(np.ravel(self.m).shape, (2,)) - - def test_member_ravel(self): - assert_equal(self.a.ravel().shape, (2,)) - assert_equal(self.m.ravel().shape, (1, 2)) - - def test_member_flatten(self): - assert_equal(self.a.flatten().shape, (2,)) - assert_equal(self.m.flatten().shape, (1, 2)) - - def test_numpy_ravel_order(self): - x = np.array([[1, 2, 3], [4, 5, 6]]) - assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) - assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) - assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) - assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) - assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) - assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) - assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) - - def test_matrix_ravel_order(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) - assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) - assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) - assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) - - def test_array_memory_sharing(self): - assert_(np.may_share_memory(self.a, self.a.ravel())) - assert_(not np.may_share_memory(self.a, self.a.flatten())) - - def test_matrix_memory_sharing(self): - assert_(np.may_share_memory(self.m, self.m.ravel())) - assert_(not np.may_share_memory(self.m, self.m.flatten())) - - def test_expand_dims_matrix(self): - # matrices are always 2d - so expand_dims only makes sense when the - # type is changed away from matrix. - a = np.arange(10).reshape((2, 5)).view(np.matrix) - expanded = np.expand_dims(a, axis=1) - assert_equal(expanded.ndim, 3) - assert_(not isinstance(expanded, np.matrix)) diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_interaction.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_interaction.py deleted file mode 100644 index 088ae3c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_interaction.py +++ /dev/null @@ -1,363 +0,0 @@ -"""Tests of interaction of matrix with other parts of numpy. - -Note that tests with MaskedArray and linalg are done in separate files. -""" -from __future__ import division, absolute_import, print_function - -import pytest - -import textwrap -import warnings - -import numpy as np -from numpy.testing import (assert_, assert_equal, assert_raises, - assert_raises_regex, assert_array_equal, - assert_almost_equal, assert_array_almost_equal) - - -def test_fancy_indexing(): - # The matrix class messes with the shape. While this is always - # weird (getitem is not used, it does not have setitem nor knows - # about fancy indexing), this tests gh-3110 - # 2018-04-29: moved here from core.tests.test_index. - m = np.matrix([[1, 2], [3, 4]]) - - assert_(isinstance(m[[0, 1, 0], :], np.matrix)) - - # gh-3110. Note the transpose currently because matrices do *not* - # support dimension fixing for fancy indexing correctly. - x = np.asmatrix(np.arange(50).reshape(5, 10)) - assert_equal(x[:2, np.array(-1)], x[:2, -1].T) - - -def test_polynomial_mapdomain(): - # test that polynomial preserved matrix subtype. - # 2018-04-29: moved here from polynomial.tests.polyutils. - dom1 = [0, 4] - dom2 = [1, 3] - x = np.matrix([dom1, dom1]) - res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) - assert_(isinstance(res, np.matrix)) - - -def test_sort_matrix_none(): - # 2018-04-29: moved here from core.tests.test_multiarray - a = np.matrix([[2, 1, 0]]) - actual = np.sort(a, axis=None) - expected = np.matrix([[0, 1, 2]]) - assert_equal(actual, expected) - assert_(type(expected) is np.matrix) - - -def test_partition_matrix_none(): - # gh-4301 - # 2018-04-29: moved here from core.tests.test_multiarray - a = np.matrix([[2, 1, 0]]) - actual = np.partition(a, 1, axis=None) - expected = np.matrix([[0, 1, 2]]) - assert_equal(actual, expected) - assert_(type(expected) is np.matrix) - - -def test_dot_scalar_and_matrix_of_objects(): - # Ticket #2469 - # 2018-04-29: moved here from core.tests.test_multiarray - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.dot(arr, 3), desired) - assert_equal(np.dot(3, arr), desired) - - -def test_inner_scalar_and_matrix(): - # 2018-04-29: moved here from core.tests.test_multiarray - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - sca = np.array(3, dtype=dt)[()] - arr = np.matrix([[1, 2], [3, 4]], dtype=dt) - desired = np.matrix([[3, 6], [9, 12]], dtype=dt) - assert_equal(np.inner(arr, sca), desired) - assert_equal(np.inner(sca, arr), desired) - - -def test_inner_scalar_and_matrix_of_objects(): - # Ticket #4482 - # 2018-04-29: moved here from core.tests.test_multiarray - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.inner(arr, 3), desired) - assert_equal(np.inner(3, arr), desired) - - -def test_iter_allocate_output_subtype(): - # Make sure that the subtype with priority wins - # 2018-04-29: moved here from core.tests.test_nditer, given the - # matrix specific shape test. - - # matrix vs ndarray - a = np.matrix([[1, 2], [3, 4]]) - b = np.arange(4).reshape(2, 2).T - i = np.nditer([a, b, None], [], - [['readonly'], ['readonly'], ['writeonly', 'allocate']]) - assert_(type(i.operands[2]) is np.matrix) - assert_(type(i.operands[2]) is not np.ndarray) - assert_equal(i.operands[2].shape, (2, 2)) - - # matrix always wants things to be 2D - b = np.arange(4).reshape(1, 2, 2) - assert_raises(RuntimeError, np.nditer, [a, b, None], [], - [['readonly'], ['readonly'], ['writeonly', 'allocate']]) - # but if subtypes are disabled, the result can still work - i = np.nditer([a, b, None], [], - [['readonly'], ['readonly'], - ['writeonly', 'allocate', 'no_subtype']]) - assert_(type(i.operands[2]) is np.ndarray) - assert_(type(i.operands[2]) is not np.matrix) - assert_equal(i.operands[2].shape, (1, 2, 2)) - - -def like_function(): - # 2018-04-29: moved here from core.tests.test_numeric - a = np.matrix([[1, 2], [3, 4]]) - for like_function in np.zeros_like, np.ones_like, np.empty_like: - b = like_function(a) - assert_(type(b) is np.matrix) - - c = like_function(a, subok=False) - assert_(type(c) is not np.matrix) - - -def test_array_astype(): - # 2018-04-29: copied here from core.tests.test_api - # subok=True passes through a matrix - a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') - b = a.astype('f4', subok=True, copy=False) - assert_(a is b) - - # subok=True is default, and creates a subtype on a cast - b = a.astype('i4', copy=False) - assert_equal(a, b) - assert_equal(type(b), np.matrix) - - # subok=False never returns a matrix - b = a.astype('f4', subok=False, copy=False) - assert_equal(a, b) - assert_(not (a is b)) - assert_(type(b) is not np.matrix) - - -def test_stack(): - # 2018-04-29: copied here from core.tests.test_shape_base - # check np.matrix cannot be stacked - m = np.matrix([[1, 2], [3, 4]]) - assert_raises_regex(ValueError, 'shape too large to be a matrix', - np.stack, [m, m]) - - -def test_object_scalar_multiply(): - # Tickets #2469 and #4482 - # 2018-04-29: moved here from core.tests.test_ufunc - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.multiply(arr, 3), desired) - assert_equal(np.multiply(3, arr), desired) - - -def test_nanfunctions_matrices(): - # Check that it works and that type and - # shape are preserved - # 2018-04-29: moved here from core.tests.test_nanfunctions - mat = np.matrix(np.eye(3)) - for f in [np.nanmin, np.nanmax]: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) - # check that rows of nan are dealt with for subclasses (#4628) - mat[1] = np.nan - for f in [np.nanmin, np.nanmax]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(not np.any(np.isnan(res))) - assert_(len(w) == 0) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) - and not np.isnan(res[2, 0])) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mat) - assert_(np.isscalar(res)) - assert_(res != np.nan) - assert_(len(w) == 0) - - -def test_nanfunctions_matrices_general(): - # Check that it works and that type and - # shape are preserved - # 2018-04-29: moved here from core.tests.test_nanfunctions - mat = np.matrix(np.eye(3)) - for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, - np.nanmean, np.nanvar, np.nanstd): - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) - - for f in np.nancumsum, np.nancumprod: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 3)) - res = f(mat) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3*3)) - - -def test_average_matrix(): - # 2018-04-29: moved here from core.tests.test_function_base. - y = np.matrix(np.random.rand(5, 5)) - assert_array_equal(y.mean(0), np.average(y, 0)) - - a = np.matrix([[1, 2], [3, 4]]) - w = np.matrix([[1, 2], [3, 4]]) - - r = np.average(a, axis=0, weights=w) - assert_equal(type(r), np.matrix) - assert_equal(r, [[2.5, 10.0/3]]) - - -def test_trapz_matrix(): - # Test to make sure matrices give the same answer as ndarrays - # 2018-04-29: moved here from core.tests.test_function_base. - x = np.linspace(0, 5) - y = x * x - r = np.trapz(y, x) - mx = np.matrix(x) - my = np.matrix(y) - mr = np.trapz(my, mx) - assert_almost_equal(mr, r) - - -def test_ediff1d_matrix(): - # 2018-04-29: moved here from core.tests.test_arraysetops. - assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) - assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) - - -def test_apply_along_axis_matrix(): - # this test is particularly malicious because matrix - # refuses to become 1d - # 2018-04-29: moved here from core.tests.test_shape_base. - def double(row): - return row * 2 - - m = np.matrix([[0, 1], [2, 3]]) - expected = np.matrix([[0, 2], [4, 6]]) - - result = np.apply_along_axis(double, 0, m) - assert_(isinstance(result, np.matrix)) - assert_array_equal(result, expected) - - result = np.apply_along_axis(double, 1, m) - assert_(isinstance(result, np.matrix)) - assert_array_equal(result, expected) - - -def test_kron_matrix(): - # 2018-04-29: moved here from core.tests.test_shape_base. - a = np.ones([2, 2]) - m = np.asmatrix(a) - assert_equal(type(np.kron(a, a)), np.ndarray) - assert_equal(type(np.kron(m, m)), np.matrix) - assert_equal(type(np.kron(a, m)), np.matrix) - assert_equal(type(np.kron(m, a)), np.matrix) - - -class TestConcatenatorMatrix(object): - # 2018-04-29: moved here from core.tests.test_index_tricks. - def test_matrix(self): - a = [1, 2] - b = [3, 4] - - ab_r = np.r_['r', a, b] - ab_c = np.r_['c', a, b] - - assert_equal(type(ab_r), np.matrix) - assert_equal(type(ab_c), np.matrix) - - assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) - assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) - - assert_raises(ValueError, lambda: np.r_['rc', a, b]) - - def test_matrix_scalar(self): - r = np.r_['r', [1, 2], 3] - assert_equal(type(r), np.matrix) - assert_equal(np.array(r), [[1, 2, 3]]) - - def test_matrix_builder(self): - a = np.array([1]) - b = np.array([2]) - c = np.array([3]) - d = np.array([4]) - actual = np.r_['a, b; c, d'] - expected = np.bmat([[a, b], [c, d]]) - - assert_equal(actual, expected) - assert_equal(type(actual), type(expected)) - - -def test_array_equal_error_message_matrix(): - # 2018-04-29: moved here from testing.tests.test_utils. - try: - assert_equal(np.array([1, 2]), np.matrix([1, 2])) - except AssertionError as e: - msg = str(e) - msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)") - msg_reference = textwrap.dedent("""\ - - Arrays are not equal - - (shapes (2,), (1, 2) mismatch) - x: array([1, 2]) - y: matrix([[1, 2]])""") - try: - assert_equal(msg, msg_reference) - except AssertionError: - assert_equal(msg2, msg_reference) - else: - raise AssertionError("Did not raise") - - -def test_array_almost_equal_matrix(): - # Matrix slicing keeps things 2-D, while array does not necessarily. - # See gh-8452. - # 2018-04-29: moved here from testing.tests.test_utils. - m1 = np.matrix([[1., 2.]]) - m2 = np.matrix([[1., np.nan]]) - m3 = np.matrix([[1., -np.inf]]) - m4 = np.matrix([[np.nan, np.inf]]) - m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) - for assert_func in assert_array_almost_equal, assert_almost_equal: - for m in m1, m2, m3, m4, m5: - assert_func(m, m) - a = np.array(m) - assert_func(a, m) - assert_func(m, a) diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.py deleted file mode 100644 index d3911d2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.py +++ /dev/null @@ -1,231 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.ma.testutils import (assert_, assert_equal, assert_raises, - assert_array_equal) -from numpy.ma.core import (masked_array, masked_values, masked, allequal, - MaskType, getmask, MaskedArray, nomask, - log, add, hypot, divide) -from numpy.ma.extras import mr_ -from numpy.compat import pickle - - -class MMatrix(MaskedArray, np.matrix,): - - def __new__(cls, data, mask=nomask): - mat = np.matrix(data) - _data = MaskedArray.__new__(cls, data=mat, mask=mask) - return _data - - def __array_finalize__(self, obj): - np.matrix.__array_finalize__(self, obj) - MaskedArray.__array_finalize__(self, obj) - return - - @property - def _series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - - -class TestMaskedMatrix(object): - def test_matrix_indexing(self): - # Tests conversions and indexing - x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) - x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) - x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) - x4 = masked_array(x1) - # test conversion to strings - str(x2) # raises? - repr(x2) # raises? - # tests of indexing - assert_(type(x2[1, 0]) is type(x1[1, 0])) - assert_(x1[1, 0] == x2[1, 0]) - assert_(x2[1, 1] is masked) - assert_equal(x1[0, 2], x2[0, 2]) - assert_equal(x1[0, 1:], x2[0, 1:]) - assert_equal(x1[:, 2], x2[:, 2]) - assert_equal(x1[:], x2[:]) - assert_equal(x1[1:], x3[1:]) - x1[0, 2] = 9 - x2[0, 2] = 9 - assert_equal(x1, x2) - x1[0, 1:] = 99 - x2[0, 1:] = 99 - assert_equal(x1, x2) - x2[0, 1] = masked - assert_equal(x1, x2) - x2[0, 1:] = masked - assert_equal(x1, x2) - x2[0, :] = x1[0, :] - x2[0, 1] = masked - assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) - x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) - assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) - assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) - x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) - assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) - assert_(allequal(x4[1], masked_array([1, 2, 3]))) - x1 = np.matrix(np.arange(5) * 1.0) - x2 = masked_values(x1, 3.0) - assert_equal(x1, x2) - assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), - x2.mask)) - assert_equal(3.0, x2.fill_value) - - def test_pickling_subbaseclass(self): - # Test pickling w/ a subclass of ndarray - a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - assert_(isinstance(a_pickled._data, np.matrix)) - - def test_count_mean_with_matrix(self): - m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) - - assert_equal(m.count(axis=0).shape, (1, 2)) - assert_equal(m.count(axis=1).shape, (2, 1)) - - # Make sure broadcasting inside mean and var work - assert_equal(m.mean(axis=0), [[2., 3.]]) - assert_equal(m.mean(axis=1), [[1.5], [3.5]]) - - def test_flat(self): - # Test that flat can return items even for matrices [#4585, #4615] - # test simple access - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - assert_equal(test.flat[1], 2) - assert_equal(test.flat[2], masked) - assert_(np.all(test.flat[0:2] == test[0, 0:2])) - # Test flat on masked_matrices - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) - control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) - assert_equal(test, control) - # Test setting - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] - assert_equal(test, control) - testflat[0] = 9 - # test that matrices keep the correct shape (#4615) - a = masked_array(np.matrix(np.eye(2)), mask=0) - b = a.flat - b01 = b[:2] - assert_equal(b01.data, np.array([[1., 0.]])) - assert_equal(b01.mask, np.array([[False, False]])) - - def test_allany_onmatrices(self): - x = np.array([[0.13, 0.26, 0.90], - [0.28, 0.33, 0.63], - [0.31, 0.87, 0.70]]) - X = np.matrix(x) - m = np.array([[True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mX = masked_array(X, mask=m) - mXbig = (mX > 0.5) - mXsmall = (mX < 0.5) - - assert_(not mXbig.all()) - assert_(mXbig.any()) - assert_equal(mXbig.all(0), np.matrix([False, False, True])) - assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) - assert_equal(mXbig.any(0), np.matrix([False, False, True])) - assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) - - assert_(not mXsmall.all()) - assert_(mXsmall.any()) - assert_equal(mXsmall.all(0), np.matrix([True, True, False])) - assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) - assert_equal(mXsmall.any(0), np.matrix([True, True, False])) - assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) - - def test_compressed(self): - a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - assert_(isinstance(b, np.matrix)) - a[0, 0] = masked - b = a.compressed() - assert_equal(b, [[2, 3, 4]]) - - def test_ravel(self): - a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(aravel.shape, (1, 5)) - assert_equal(aravel._mask.shape, a.shape) - - def test_view(self): - # Test view w/ flexible dtype - iterator = list(zip(np.arange(10), np.random.rand(10))) - data = np.array(iterator) - a = masked_array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - test = a.view((float, 2), np.matrix) - assert_equal(test, data) - assert_(isinstance(test, np.matrix)) - assert_(not isinstance(test, MaskedArray)) - - -class TestSubclassing(object): - # Test suite for masked subclasses of ndarray. - - def setup(self): - x = np.arange(5, dtype='float') - mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) - - def test_maskedarray_subclassing(self): - # Tests subclassing MaskedArray - (x, mx) = self.data - assert_(isinstance(mx._data, np.matrix)) - - def test_masked_unary_operations(self): - # Tests masked_unary_operation - (x, mx) = self.data - with np.errstate(divide='ignore'): - assert_(isinstance(log(mx), MMatrix)) - assert_equal(log(x), np.log(x)) - - def test_masked_binary_operations(self): - # Tests masked_binary_operation - (x, mx) = self.data - # Result should be a MMatrix - assert_(isinstance(add(mx, mx), MMatrix)) - assert_(isinstance(add(mx, x), MMatrix)) - # Result should work - assert_equal(add(mx, x), mx+x) - assert_(isinstance(add(mx, mx)._data, np.matrix)) - assert_(isinstance(add.outer(mx, mx), MMatrix)) - assert_(isinstance(hypot(mx, mx), MMatrix)) - assert_(isinstance(hypot(mx, x), MMatrix)) - - def test_masked_binary_operations2(self): - # Tests domained_masked_binary_operation - (x, mx) = self.data - xmx = masked_array(mx.data.__array__(), mask=mx.mask) - assert_(isinstance(divide(mx, mx), MMatrix)) - assert_(isinstance(divide(mx, x), MMatrix)) - assert_equal(divide(mx, mx), divide(xmx, xmx)) - -class TestConcatenator(object): - # Tests for mr_, the equivalent of r_ for masked arrays. - - def test_matrix_builder(self): - assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) - - def test_matrix(self): - # Test consistency with unmasked version. If we ever deprecate - # matrix, this test should either still pass, or both actual and - # expected should fail to be build. - actual = mr_['r', 1, 2, 3] - expected = np.ma.array(np.r_['r', 1, 2, 3]) - assert_array_equal(actual, expected) - - # outer type is masked array, inner type is matrix - assert_equal(type(actual), type(expected)) - assert_equal(type(actual.data), type(expected.data)) diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py deleted file mode 100644 index 6fc733c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py +++ /dev/null @@ -1,95 +0,0 @@ -""" Test functions for linalg module using the matrix class.""" -from __future__ import division, absolute_import, print_function - -import numpy as np - -from numpy.linalg.tests.test_linalg import ( - LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, - _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, - SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, - PinvCases, DetCases, LstsqCases) - - -CASES = [] - -# square test cases -CASES += apply_tag('square', [ - LinalgCase("0x0_matrix", - np.empty((0, 0), dtype=np.double).view(np.matrix), - np.empty((0, 1), dtype=np.double).view(np.matrix), - tags={'size-0'}), - LinalgCase("matrix_b_only", - np.array([[1., 2.], [3., 4.]]), - np.matrix([2., 1.]).T), - LinalgCase("matrix_a_and_b", - np.matrix([[1., 2.], [3., 4.]]), - np.matrix([2., 1.]).T), -]) - -# hermitian test-cases -CASES += apply_tag('hermitian', [ - LinalgCase("hmatrix_a_and_b", - np.matrix([[1., 2.], [2., 1.]]), - None), -]) -# No need to make generalized or strided cases for matrices. - - -class MatrixTestCase(LinalgTestCase): - TEST_CASES = CASES - - -class TestSolveMatrix(SolveCases, MatrixTestCase): - pass - - -class TestInvMatrix(InvCases, MatrixTestCase): - pass - - -class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): - pass - - -class TestEigMatrix(EigCases, MatrixTestCase): - pass - - -class TestSVDMatrix(SVDCases, MatrixTestCase): - pass - - -class TestCondMatrix(CondCases, MatrixTestCase): - pass - - -class TestPinvMatrix(PinvCases, MatrixTestCase): - pass - - -class TestDetMatrix(DetCases, MatrixTestCase): - pass - - -class TestLstsqMatrix(LstsqCases, MatrixTestCase): - pass - - -class _TestNorm2DMatrix(_TestNorm2D): - array = np.matrix - - -class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): - pass - - -class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): - pass - - -class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): - pass - - -class TestQRMatrix(_TestQR): - array = np.matrix diff --git a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_multiarray.py b/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_multiarray.py deleted file mode 100644 index 6d84bd4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/matrixlib/tests/test_multiarray.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_, assert_equal, assert_array_equal - -class TestView(object): - def test_type(self): - x = np.array([1, 2, 3]) - assert_(isinstance(x.view(np.matrix), np.matrix)) - - def test_keywords(self): - x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - # We must be specific about the endianness here: - y = x.view(dtype='= 2.6. - -""" -from __future__ import division, absolute_import, print_function - -import abc -import numbers - -import numpy as np -from . import polyutils as pu - -__all__ = ['ABCPolyBase'] - -class ABCPolyBase(abc.ABC): - """An abstract base class for immutable series classes. - - ABCPolyBase provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the - methods listed below. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - coef : array_like - Series coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where - ``P_i`` is the basis polynomials of degree ``i``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is the derived class domain. - window : (2,) array_like, optional - Window, see domain for its use. The default value is the - derived class window. - - Attributes - ---------- - coef : (N,) ndarray - Series coefficients in order of increasing degree. - domain : (2,) ndarray - Domain that is mapped to window. - window : (2,) ndarray - Window that domain is mapped to. - - Class Attributes - ---------------- - maxpower : int - Maximum power allowed, i.e., the largest number ``n`` such that - ``p(x)**n`` is allowed. This is to limit runaway polynomial size. - domain : (2,) ndarray - Default domain of the class. - window : (2,) ndarray - Default window of the class. - - """ - - # Not hashable - __hash__ = None - - # Opt out of numpy ufuncs and Python ops with ndarray subclasses. - __array_ufunc__ = None - - # Limit runaway size. T_n^m has degree n*m - maxpower = 100 - - @property - @abc.abstractmethod - def domain(self): - pass - - @property - @abc.abstractmethod - def window(self): - pass - - @property - @abc.abstractmethod - def nickname(self): - pass - - @property - @abc.abstractmethod - def basis_name(self): - pass - - @staticmethod - @abc.abstractmethod - def _add(c1, c2): - pass - - @staticmethod - @abc.abstractmethod - def _sub(c1, c2): - pass - - @staticmethod - @abc.abstractmethod - def _mul(c1, c2): - pass - - @staticmethod - @abc.abstractmethod - def _div(c1, c2): - pass - - @staticmethod - @abc.abstractmethod - def _pow(c, pow, maxpower=None): - pass - - @staticmethod - @abc.abstractmethod - def _val(x, c): - pass - - @staticmethod - @abc.abstractmethod - def _int(c, m, k, lbnd, scl): - pass - - @staticmethod - @abc.abstractmethod - def _der(c, m, scl): - pass - - @staticmethod - @abc.abstractmethod - def _fit(x, y, deg, rcond, full): - pass - - @staticmethod - @abc.abstractmethod - def _line(off, scl): - pass - - @staticmethod - @abc.abstractmethod - def _roots(c): - pass - - @staticmethod - @abc.abstractmethod - def _fromroots(r): - pass - - def has_samecoef(self, other): - """Check if coefficients match. - - .. versionadded:: 1.6.0 - - Parameters - ---------- - other : class instance - The other class must have the ``coef`` attribute. - - Returns - ------- - bool : boolean - True if the coefficients are the same, False otherwise. - - """ - if len(self.coef) != len(other.coef): - return False - elif not np.all(self.coef == other.coef): - return False - else: - return True - - def has_samedomain(self, other): - """Check if domains match. - - .. versionadded:: 1.6.0 - - Parameters - ---------- - other : class instance - The other class must have the ``domain`` attribute. - - Returns - ------- - bool : boolean - True if the domains are the same, False otherwise. - - """ - return np.all(self.domain == other.domain) - - def has_samewindow(self, other): - """Check if windows match. - - .. versionadded:: 1.6.0 - - Parameters - ---------- - other : class instance - The other class must have the ``window`` attribute. - - Returns - ------- - bool : boolean - True if the windows are the same, False otherwise. - - """ - return np.all(self.window == other.window) - - def has_sametype(self, other): - """Check if types match. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - other : object - Class instance. - - Returns - ------- - bool : boolean - True if other is same class as self - - """ - return isinstance(other, self.__class__) - - def _get_coefficients(self, other): - """Interpret other as polynomial coefficients. - - The `other` argument is checked to see if it is of the same - class as self with identical domain and window. If so, - return its coefficients, otherwise return `other`. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - other : anything - Object to be checked. - - Returns - ------- - coef - The coefficients of`other` if it is a compatible instance, - of ABCPolyBase, otherwise `other`. - - Raises - ------ - TypeError - When `other` is an incompatible instance of ABCPolyBase. - - """ - if isinstance(other, ABCPolyBase): - if not isinstance(other, self.__class__): - raise TypeError("Polynomial types differ") - elif not np.all(self.domain == other.domain): - raise TypeError("Domains differ") - elif not np.all(self.window == other.window): - raise TypeError("Windows differ") - return other.coef - return other - - def __init__(self, coef, domain=None, window=None): - [coef] = pu.as_series([coef], trim=False) - self.coef = coef - - if domain is not None: - [domain] = pu.as_series([domain], trim=False) - if len(domain) != 2: - raise ValueError("Domain has wrong number of elements.") - self.domain = domain - - if window is not None: - [window] = pu.as_series([window], trim=False) - if len(window) != 2: - raise ValueError("Window has wrong number of elements.") - self.window = window - - def __repr__(self): - format = "%s(%s, domain=%s, window=%s)" - coef = repr(self.coef)[6:-1] - domain = repr(self.domain)[6:-1] - window = repr(self.window)[6:-1] - name = self.__class__.__name__ - return format % (name, coef, domain, window) - - def __str__(self): - format = "%s(%s)" - coef = str(self.coef) - name = self.nickname - return format % (name, coef) - - @classmethod - def _repr_latex_term(cls, i, arg_str, needs_parens): - if cls.basis_name is None: - raise NotImplementedError( - "Subclasses must define either a basis name, or override " - "_repr_latex_term(i, arg_str, needs_parens)") - # since we always add parens, we don't care if the expression needs them - return "{{{basis}}}_{{{i}}}({arg_str})".format( - basis=cls.basis_name, i=i, arg_str=arg_str - ) - - @staticmethod - def _repr_latex_scalar(x): - # TODO: we're stuck with disabling math formatting until we handle - # exponents in this function - return r'\text{{{}}}'.format(x) - - def _repr_latex_(self): - # get the scaled argument string to the basis functions - off, scale = self.mapparms() - if off == 0 and scale == 1: - term = 'x' - needs_parens = False - elif scale == 1: - term = '{} + x'.format( - self._repr_latex_scalar(off) - ) - needs_parens = True - elif off == 0: - term = '{}x'.format( - self._repr_latex_scalar(scale) - ) - needs_parens = True - else: - term = '{} + {}x'.format( - self._repr_latex_scalar(off), - self._repr_latex_scalar(scale) - ) - needs_parens = True - - mute = r"\color{{LightGray}}{{{}}}".format - - parts = [] - for i, c in enumerate(self.coef): - # prevent duplication of + and - signs - if i == 0: - coef_str = '{}'.format(self._repr_latex_scalar(c)) - elif not isinstance(c, numbers.Real): - coef_str = ' + ({})'.format(self._repr_latex_scalar(c)) - elif not np.signbit(c): - coef_str = ' + {}'.format(self._repr_latex_scalar(c)) - else: - coef_str = ' - {}'.format(self._repr_latex_scalar(-c)) - - # produce the string for the term - term_str = self._repr_latex_term(i, term, needs_parens) - if term_str == '1': - part = coef_str - else: - part = r'{}\,{}'.format(coef_str, term_str) - - if c == 0: - part = mute(part) - - parts.append(part) - - if parts: - body = ''.join(parts) - else: - # in case somehow there are no coefficients at all - body = '0' - - return r'$x \mapsto {}$'.format(body) - - - - # Pickle and copy - - def __getstate__(self): - ret = self.__dict__.copy() - ret['coef'] = self.coef.copy() - ret['domain'] = self.domain.copy() - ret['window'] = self.window.copy() - return ret - - def __setstate__(self, dict): - self.__dict__ = dict - - # Call - - def __call__(self, arg): - off, scl = pu.mapparms(self.domain, self.window) - arg = off + scl*arg - return self._val(arg, self.coef) - - def __iter__(self): - return iter(self.coef) - - def __len__(self): - return len(self.coef) - - # Numeric properties. - - def __neg__(self): - return self.__class__(-self.coef, self.domain, self.window) - - def __pos__(self): - return self - - def __add__(self, other): - othercoef = self._get_coefficients(other) - try: - coef = self._add(self.coef, othercoef) - except Exception: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __sub__(self, other): - othercoef = self._get_coefficients(other) - try: - coef = self._sub(self.coef, othercoef) - except Exception: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __mul__(self, other): - othercoef = self._get_coefficients(other) - try: - coef = self._mul(self.coef, othercoef) - except Exception: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __div__(self, other): - # this can be removed when python 2 support is dropped. - return self.__floordiv__(other) - - def __truediv__(self, other): - # there is no true divide if the rhs is not a Number, although it - # could return the first n elements of an infinite series. - # It is hard to see where n would come from, though. - if not isinstance(other, numbers.Number) or isinstance(other, bool): - form = "unsupported types for true division: '%s', '%s'" - raise TypeError(form % (type(self), type(other))) - return self.__floordiv__(other) - - def __floordiv__(self, other): - res = self.__divmod__(other) - if res is NotImplemented: - return res - return res[0] - - def __mod__(self, other): - res = self.__divmod__(other) - if res is NotImplemented: - return res - return res[1] - - def __divmod__(self, other): - othercoef = self._get_coefficients(other) - try: - quo, rem = self._div(self.coef, othercoef) - except ZeroDivisionError as e: - raise e - except Exception: - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - def __pow__(self, other): - coef = self._pow(self.coef, other, maxpower=self.maxpower) - res = self.__class__(coef, self.domain, self.window) - return res - - def __radd__(self, other): - try: - coef = self._add(other, self.coef) - except Exception: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rsub__(self, other): - try: - coef = self._sub(other, self.coef) - except Exception: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rmul__(self, other): - try: - coef = self._mul(other, self.coef) - except Exception: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rdiv__(self, other): - # set to __floordiv__ /. - return self.__rfloordiv__(other) - - def __rtruediv__(self, other): - # An instance of ABCPolyBase is not considered a - # Number. - return NotImplemented - - def __rfloordiv__(self, other): - res = self.__rdivmod__(other) - if res is NotImplemented: - return res - return res[0] - - def __rmod__(self, other): - res = self.__rdivmod__(other) - if res is NotImplemented: - return res - return res[1] - - def __rdivmod__(self, other): - try: - quo, rem = self._div(other, self.coef) - except ZeroDivisionError as e: - raise e - except Exception: - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - def __eq__(self, other): - res = (isinstance(other, self.__class__) and - np.all(self.domain == other.domain) and - np.all(self.window == other.window) and - (self.coef.shape == other.coef.shape) and - np.all(self.coef == other.coef)) - return res - - def __ne__(self, other): - return not self.__eq__(other) - - # - # Extra methods. - # - - def copy(self): - """Return a copy. - - Returns - ------- - new_series : series - Copy of self. - - """ - return self.__class__(self.coef, self.domain, self.window) - - def degree(self): - """The degree of the series. - - .. versionadded:: 1.5.0 - - Returns - ------- - degree : int - Degree of the series, one less than the number of coefficients. - - """ - return len(self) - 1 - - def cutdeg(self, deg): - """Truncate series to the given degree. - - Reduce the degree of the series to `deg` by discarding the - high order terms. If `deg` is greater than the current degree a - copy of the current series is returned. This can be useful in least - squares where the coefficients of the high degree terms may be very - small. - - .. versionadded:: 1.5.0 - - Parameters - ---------- - deg : non-negative int - The series is reduced to degree `deg` by discarding the high - order terms. The value of `deg` must be a non-negative integer. - - Returns - ------- - new_series : series - New instance of series with reduced degree. - - """ - return self.truncate(deg + 1) - - def trim(self, tol=0): - """Remove trailing coefficients - - Remove trailing coefficients until a coefficient is reached whose - absolute value greater than `tol` or the beginning of the series is - reached. If all the coefficients would be removed the series is set - to ``[0]``. A new series instance is returned with the new - coefficients. The current instance remains unchanged. - - Parameters - ---------- - tol : non-negative number. - All trailing coefficients less than `tol` will be removed. - - Returns - ------- - new_series : series - Contains the new set of coefficients. - - """ - coef = pu.trimcoef(self.coef, tol) - return self.__class__(coef, self.domain, self.window) - - def truncate(self, size): - """Truncate series to length `size`. - - Reduce the series to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. This - can be useful in least squares where the coefficients of the - high degree terms may be very small. - - Parameters - ---------- - size : positive int - The series is reduced to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. - - Returns - ------- - new_series : series - New instance of series with truncated coefficients. - - """ - isize = int(size) - if isize != size or isize < 1: - raise ValueError("size must be a positive integer") - if isize >= len(self.coef): - coef = self.coef - else: - coef = self.coef[:isize] - return self.__class__(coef, self.domain, self.window) - - def convert(self, domain=None, kind=None, window=None): - """Convert series to a different kind and/or domain and/or window. - - Parameters - ---------- - domain : array_like, optional - The domain of the converted series. If the value is None, - the default domain of `kind` is used. - kind : class, optional - The polynomial series type class to which the current instance - should be converted. If kind is None, then the class of the - current instance is used. - window : array_like, optional - The window of the converted series. If the value is None, - the default window of `kind` is used. - - Returns - ------- - new_series : series - The returned class can be of different type than the current - instance and/or have a different domain and/or different - window. - - Notes - ----- - Conversion between domains and class types can result in - numerically ill defined series. - - Examples - -------- - - """ - if kind is None: - kind = self.__class__ - if domain is None: - domain = kind.domain - if window is None: - window = kind.window - return self(kind.identity(domain, window=window)) - - def mapparms(self): - """Return the mapping parameters. - - The returned values define a linear map ``off + scl*x`` that is - applied to the input arguments before the series is evaluated. The - map depends on the ``domain`` and ``window``; if the current - ``domain`` is equal to the ``window`` the resulting map is the - identity. If the coefficients of the series instance are to be - used by themselves outside this class, then the linear function - must be substituted for the ``x`` in the standard representation of - the base polynomials. - - Returns - ------- - off, scl : float or complex - The mapping function is defined by ``off + scl*x``. - - Notes - ----- - If the current domain is the interval ``[l1, r1]`` and the window - is ``[l2, r2]``, then the linear mapping function ``L`` is - defined by the equations:: - - L(l1) = l2 - L(r1) = r2 - - """ - return pu.mapparms(self.domain, self.window) - - def integ(self, m=1, k=[], lbnd=None): - """Integrate. - - Return a series instance that is the definite integral of the - current series. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - k : array_like - Integration constants. The first constant is applied to the - first integration, the second to the second, and so on. The - list of values must less than or equal to `m` in length and any - missing values are set to zero. - lbnd : Scalar - The lower bound of the definite integral. - - Returns - ------- - new_series : series - A new series representing the integral. The domain is the same - as the domain of the integrated series. - - """ - off, scl = self.mapparms() - if lbnd is None: - lbnd = 0 - else: - lbnd = off + scl*lbnd - coef = self._int(self.coef, m, k, lbnd, 1./scl) - return self.__class__(coef, self.domain, self.window) - - def deriv(self, m=1): - """Differentiate. - - Return a series instance of that is the derivative of the current - series. - - Parameters - ---------- - m : non-negative int - Find the derivative of order `m`. - - Returns - ------- - new_series : series - A new series representing the derivative. The domain is the same - as the domain of the differentiated series. - - """ - off, scl = self.mapparms() - coef = self._der(self.coef, m, scl) - return self.__class__(coef, self.domain, self.window) - - def roots(self): - """Return the roots of the series polynomial. - - Compute the roots for the series. Note that the accuracy of the - roots decrease the further outside the domain they lie. - - Returns - ------- - roots : ndarray - Array containing the roots of the series. - - """ - roots = self._roots(self.coef) - return pu.mapdomain(roots, self.window, self.domain) - - def linspace(self, n=100, domain=None): - """Return x, y values at equally spaced points in domain. - - Returns the x, y values at `n` linearly spaced points across the - domain. Here y is the value of the polynomial at the points x. By - default the domain is the same as that of the series instance. - This method is intended mostly as a plotting aid. - - .. versionadded:: 1.5.0 - - Parameters - ---------- - n : int, optional - Number of point pairs to return. The default value is 100. - domain : {None, array_like}, optional - If not None, the specified domain is used instead of that of - the calling instance. It should be of the form ``[beg,end]``. - The default is None which case the class domain is used. - - Returns - ------- - x, y : ndarray - x is equal to linspace(self.domain[0], self.domain[1], n) and - y is the series evaluated at element of x. - - """ - if domain is None: - domain = self.domain - x = np.linspace(domain[0], domain[1], n) - y = self(x) - return x, y - - @classmethod - def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, - window=None): - """Least squares fit to data. - - Return a series instance that is the least squares fit to the data - `y` sampled at `x`. The domain of the returned instance can be - specified and this will often result in a superior fit with less - chance of ill conditioning. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer - all terms up to and including the `deg`'th term are included in the - fit. For NumPy versions >= 1.11.0 a list of integers specifying the - degrees of the terms to include may be used instead. - domain : {None, [beg, end], []}, optional - Domain to use for the returned series. If ``None``, - then a minimal domain that covers the points `x` is chosen. If - ``[]`` the class domain is used. The default value was the - class domain in NumPy 1.4 and ``None`` in later versions. - The ``[]`` option was added in numpy 1.5.0. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than this relative to the largest singular value will be - ignored. The default value is len(x)*eps, where eps is the - relative precision of the float type, about 2e-16 in most - cases. - full : bool, optional - Switch determining nature of return value. When it is False - (the default) just the coefficients are returned, when True - diagnostic information from the singular value decomposition is - also returned. - w : array_like, shape (M,), optional - Weights. If not None the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products - ``w[i]*y[i]`` all have the same variance. The default value is - None. - - .. versionadded:: 1.5.0 - window : {[beg, end]}, optional - Window to use for the returned series. The default - value is the default class domain - - .. versionadded:: 1.6.0 - - Returns - ------- - new_series : series - A series that represents the least squares fit to the data and - has the domain and window specified in the call. If the - coefficients for the unscaled and unshifted basis polynomials are - of interest, do ``new_series.convert().coef``. - - [resid, rank, sv, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - """ - if domain is None: - domain = pu.getdomain(x) - elif type(domain) is list and len(domain) == 0: - domain = cls.domain - - if window is None: - window = cls.window - - xnew = pu.mapdomain(x, domain, window) - res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) - if full: - [coef, status] = res - return cls(coef, domain=domain, window=window), status - else: - coef = res - return cls(coef, domain=domain, window=window) - - @classmethod - def fromroots(cls, roots, domain=[], window=None): - """Return series instance that has the specified roots. - - Returns a series representing the product - ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a - list of roots. - - Parameters - ---------- - roots : array_like - List of roots. - domain : {[], None, array_like}, optional - Domain for the resulting series. If None the domain is the - interval from the smallest root to the largest. If [] the - domain is the class domain. The default is []. - window : {None, array_like}, optional - Window for the returned series. If None the class window is - used. The default is None. - - Returns - ------- - new_series : series - Series with the specified roots. - - """ - [roots] = pu.as_series([roots], trim=False) - if domain is None: - domain = pu.getdomain(roots) - elif type(domain) is list and len(domain) == 0: - domain = cls.domain - - if window is None: - window = cls.window - - deg = len(roots) - off, scl = pu.mapparms(domain, window) - rnew = off + scl*roots - coef = cls._fromroots(rnew) / scl**deg - return cls(coef, domain=domain, window=window) - - @classmethod - def identity(cls, domain=None, window=None): - """Identity function. - - If ``p`` is the returned series, then ``p(x) == x`` for all - values of x. - - Parameters - ---------- - domain : {None, array_like}, optional - If given, the array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. If None is - given then the class domain is used. The default is None. - window : {None, array_like}, optional - If given, the resulting array must be if the form - ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of - the window. If None is given then the class window is used. The - default is None. - - Returns - ------- - new_series : series - Series of representing the identity. - - """ - if domain is None: - domain = cls.domain - if window is None: - window = cls.window - off, scl = pu.mapparms(window, domain) - coef = cls._line(off, scl) - return cls(coef, domain, window) - - @classmethod - def basis(cls, deg, domain=None, window=None): - """Series basis polynomial of degree `deg`. - - Returns the series representing the basis polynomial of degree `deg`. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - deg : int - Degree of the basis polynomial for the series. Must be >= 0. - domain : {None, array_like}, optional - If given, the array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. If None is - given then the class domain is used. The default is None. - window : {None, array_like}, optional - If given, the resulting array must be if the form - ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of - the window. If None is given then the class window is used. The - default is None. - - Returns - ------- - new_series : series - A series with the coefficient of the `deg` term set to one and - all others zero. - - """ - if domain is None: - domain = cls.domain - if window is None: - window = cls.window - ideg = int(deg) - - if ideg != deg or ideg < 0: - raise ValueError("deg must be non-negative integer") - return cls([0]*ideg + [1], domain, window) - - @classmethod - def cast(cls, series, domain=None, window=None): - """Convert series to series of this class. - - The `series` is expected to be an instance of some polynomial - series of one of the types supported by by the numpy.polynomial - module, but could be some other class that supports the convert - method. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - series : series - The series instance to be converted. - domain : {None, array_like}, optional - If given, the array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. If None is - given then the class domain is used. The default is None. - window : {None, array_like}, optional - If given, the resulting array must be if the form - ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of - the window. If None is given then the class window is used. The - default is None. - - Returns - ------- - new_series : series - A series of the same kind as the calling class and equal to - `series` when evaluated. - - See Also - -------- - convert : similar instance method - - """ - if domain is None: - domain = cls.domain - if window is None: - window = cls.window - return series.convert(domain, cls, window) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/chebyshev.py b/venv/lib/python3.7/site-packages/numpy/polynomial/chebyshev.py deleted file mode 100644 index 0cd9c4d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/chebyshev.py +++ /dev/null @@ -1,2048 +0,0 @@ -""" -Objects for dealing with Chebyshev series. - -This module provides a number of objects (mostly functions) useful for -dealing with Chebyshev series, including a `Chebyshev` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `chebdomain` -- Chebyshev series default domain, [-1,1]. -- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates - identically to 0. -- `chebone` -- (Coefficients of the) Chebyshev series that evaluates - identically to 1. -- `chebx` -- (Coefficients of the) Chebyshev series for the identity map, - ``f(x) = x``. - -Arithmetic ----------- -- `chebadd` -- add two Chebyshev series. -- `chebsub` -- subtract one Chebyshev series from another. -- `chebmulx` -- multiply a Chebyshev series in ``P_i(x)`` by ``x``. -- `chebmul` -- multiply two Chebyshev series. -- `chebdiv` -- divide one Chebyshev series by another. -- `chebpow` -- raise a Chebyshev series to a positive integer power. -- `chebval` -- evaluate a Chebyshev series at given points. -- `chebval2d` -- evaluate a 2D Chebyshev series at given points. -- `chebval3d` -- evaluate a 3D Chebyshev series at given points. -- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product. -- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product. - -Calculus --------- -- `chebder` -- differentiate a Chebyshev series. -- `chebint` -- integrate a Chebyshev series. - -Misc Functions --------------- -- `chebfromroots` -- create a Chebyshev series with specified roots. -- `chebroots` -- find the roots of a Chebyshev series. -- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials. -- `chebvander2d` -- Vandermonde-like matrix for 2D power series. -- `chebvander3d` -- Vandermonde-like matrix for 3D power series. -- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights. -- `chebweight` -- Chebyshev weight function. -- `chebcompanion` -- symmetrized companion matrix in Chebyshev form. -- `chebfit` -- least-squares fit returning a Chebyshev series. -- `chebpts1` -- Chebyshev points of the first kind. -- `chebpts2` -- Chebyshev points of the second kind. -- `chebtrim` -- trim leading coefficients from a Chebyshev series. -- `chebline` -- Chebyshev series representing given straight line. -- `cheb2poly` -- convert a Chebyshev series to a polynomial. -- `poly2cheb` -- convert a polynomial to a Chebyshev series. -- `chebinterpolate` -- interpolate a function at the Chebyshev points. - -Classes -------- -- `Chebyshev` -- A Chebyshev series class. - -See also --------- -`numpy.polynomial` - -Notes ------ -The implementations of multiplication, division, integration, and -differentiation use the algebraic identities [1]_: - -.. math :: - T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ - z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. - -where - -.. math :: x = \\frac{z + z^{-1}}{2}. - -These identities allow a Chebyshev series to be expressed as a finite, -symmetric Laurent series. In this module, this sort of Laurent series -is referred to as a "z-series." - -References ----------- -.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev - Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 - (preprint: https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la -from numpy.core.multiarray import normalize_axis_index - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', - 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', - 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', - 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', - 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', - 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', - 'chebgauss', 'chebweight', 'chebinterpolate'] - -chebtrim = pu.trimcoef - -# -# A collection of functions for manipulating z-series. These are private -# functions and do minimal error checking. -# - -def _cseries_to_zseries(c): - """Covert Chebyshev series to z-series. - - Covert a Chebyshev series to the equivalent z-series. The result is - never an empty array. The dtype of the return is the same as that of - the input. No checks are run on the arguments as this routine is for - internal use. - - Parameters - ---------- - c : 1-D ndarray - Chebyshev coefficients, ordered from low to high - - Returns - ------- - zs : 1-D ndarray - Odd length symmetric z-series, ordered from low to high. - - """ - n = c.size - zs = np.zeros(2*n-1, dtype=c.dtype) - zs[n-1:] = c/2 - return zs + zs[::-1] - - -def _zseries_to_cseries(zs): - """Covert z-series to a Chebyshev series. - - Covert a z series to the equivalent Chebyshev series. The result is - never an empty array. The dtype of the return is the same as that of - the input. No checks are run on the arguments as this routine is for - internal use. - - Parameters - ---------- - zs : 1-D ndarray - Odd length symmetric z-series, ordered from low to high. - - Returns - ------- - c : 1-D ndarray - Chebyshev coefficients, ordered from low to high. - - """ - n = (zs.size + 1)//2 - c = zs[n-1:].copy() - c[1:n] *= 2 - return c - - -def _zseries_mul(z1, z2): - """Multiply two z-series. - - Multiply two z-series to produce a z-series. - - Parameters - ---------- - z1, z2 : 1-D ndarray - The arrays must be 1-D but this is not checked. - - Returns - ------- - product : 1-D ndarray - The product z-series. - - Notes - ----- - This is simply convolution. If symmetric/anti-symmetric z-series are - denoted by S/A then the following rules apply: - - S*S, A*A -> S - S*A, A*S -> A - - """ - return np.convolve(z1, z2) - - -def _zseries_div(z1, z2): - """Divide the first z-series by the second. - - Divide `z1` by `z2` and return the quotient and remainder as z-series. - Warning: this implementation only applies when both z1 and z2 have the - same symmetry, which is sufficient for present purposes. - - Parameters - ---------- - z1, z2 : 1-D ndarray - The arrays must be 1-D and have the same symmetry, but this is not - checked. - - Returns - ------- - - (quotient, remainder) : 1-D ndarrays - Quotient and remainder as z-series. - - Notes - ----- - This is not the same as polynomial division on account of the desired form - of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A - then the following rules apply: - - S/S -> S,S - A/A -> S,A - - The restriction to types of the same symmetry could be fixed but seems like - unneeded generality. There is no natural form for the remainder in the case - where there is no symmetry. - - """ - z1 = z1.copy() - z2 = z2.copy() - lc1 = len(z1) - lc2 = len(z2) - if lc2 == 1: - z1 /= z2 - return z1, z1[:1]*0 - elif lc1 < lc2: - return z1[:1]*0, z1 - else: - dlen = lc1 - lc2 - scl = z2[0] - z2 /= scl - quo = np.empty(dlen + 1, dtype=z1.dtype) - i = 0 - j = dlen - while i < j: - r = z1[i] - quo[i] = z1[i] - quo[dlen - i] = r - tmp = r*z2 - z1[i:i+lc2] -= tmp - z1[j:j+lc2] -= tmp - i += 1 - j -= 1 - r = z1[i] - quo[i] = r - tmp = r*z2 - z1[i:i+lc2] -= tmp - quo /= scl - rem = z1[i+1:i-1+lc2].copy() - return quo, rem - - -def _zseries_der(zs): - """Differentiate a z-series. - - The derivative is with respect to x, not z. This is achieved using the - chain rule and the value of dx/dz given in the module notes. - - Parameters - ---------- - zs : z-series - The z-series to differentiate. - - Returns - ------- - derivative : z-series - The derivative - - Notes - ----- - The zseries for x (ns) has been multiplied by two in order to avoid - using floats that are incompatible with Decimal and likely other - specialized scalar types. This scaling has been compensated by - multiplying the value of zs by two also so that the two cancels in the - division. - - """ - n = len(zs)//2 - ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs *= np.arange(-n, n+1)*2 - d, r = _zseries_div(zs, ns) - return d - - -def _zseries_int(zs): - """Integrate a z-series. - - The integral is with respect to x, not z. This is achieved by a change - of variable using dx/dz given in the module notes. - - Parameters - ---------- - zs : z-series - The z-series to integrate - - Returns - ------- - integral : z-series - The indefinite integral - - Notes - ----- - The zseries for x (ns) has been multiplied by two in order to avoid - using floats that are incompatible with Decimal and likely other - specialized scalar types. This scaling has been compensated by - dividing the resulting zs by two. - - """ - n = 1 + len(zs)//2 - ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs = _zseries_mul(zs, ns) - div = np.arange(-n, n+1)*2 - zs[:n] /= div[:n] - zs[n+1:] /= div[n+1:] - zs[n] = 0 - return zs - -# -# Chebyshev series functions -# - - -def poly2cheb(pol): - """ - Convert a polynomial to a Chebyshev series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Chebyshev series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Chebyshev - series. - - See Also - -------- - cheb2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> p = P.Polynomial(range(4)) - >>> p - Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) - >>> c = p.convert(kind=P.Chebyshev) - >>> c - Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.]) - >>> P.chebyshev.poly2cheb(range(4)) - array([1. , 3.25, 1. , 0.75]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = chebadd(chebmulx(res), pol[i]) - return res - - -def cheb2poly(c): - """ - Convert a Chebyshev series to a polynomial. - - Convert an array representing the coefficients of a Chebyshev series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Chebyshev series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2cheb - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> c = P.Chebyshev(range(4)) - >>> c - Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) - >>> p = c.convert(kind=P.Polynomial) - >>> p - Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.]) - >>> P.chebyshev.cheb2poly(range(4)) - array([-2., -8., 4., 12.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n < 3: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], c1) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)) - - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Chebyshev default domain. -chebdomain = np.array([-1, 1]) - -# Chebyshev coefficients representing zero. -chebzero = np.array([0]) - -# Chebyshev coefficients representing one. -chebone = np.array([1]) - -# Chebyshev coefficients representing the identity x. -chebx = np.array([0, 1]) - - -def chebline(off, scl): - """ - Chebyshev series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Chebyshev series for - ``off + scl*x``. - - See Also - -------- - polyline - - Examples - -------- - >>> import numpy.polynomial.chebyshev as C - >>> C.chebline(3,2) - array([3, 2]) - >>> C.chebval(-3, C.chebline(3,2)) # should be -3 - -3.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def chebfromroots(roots): - """ - Generate a Chebyshev series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Chebyshev form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Chebyshev form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, lagfromroots, hermfromroots, hermefromroots - - Examples - -------- - >>> import numpy.polynomial.chebyshev as C - >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis - array([ 0. , -0.25, 0. , 0.25]) - >>> j = complex(0,1) - >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([1.5+0.j, 0. +0.j, 0.5+0.j]) - - """ - return pu._fromroots(chebline, chebmul, roots) - - -def chebadd(c1, c2): - """ - Add one Chebyshev series to another. - - Returns the sum of two Chebyshev series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Chebyshev series of their sum. - - See Also - -------- - chebsub, chebmulx, chebmul, chebdiv, chebpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Chebyshev series - is a Chebyshev series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebadd(c1,c2) - array([4., 4., 4.]) - - """ - return pu._add(c1, c2) - - -def chebsub(c1, c2): - """ - Subtract one Chebyshev series from another. - - Returns the difference of two Chebyshev series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Chebyshev series coefficients representing their difference. - - See Also - -------- - chebadd, chebmulx, chebmul, chebdiv, chebpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Chebyshev - series is a Chebyshev series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebsub(c1,c2) - array([-2., 0., 2.]) - >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) - array([ 2., 0., -2.]) - - """ - return pu._sub(c1, c2) - - -def chebmulx(c): - """Multiply a Chebyshev series by x. - - Multiply the polynomial `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - - .. versionadded:: 1.5.0 - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> C.chebmulx([1,2,3]) - array([1. , 2.5, 1. , 1.5]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0] - if len(c) > 1: - tmp = c[1:]/2 - prd[2:] = tmp - prd[0:-2] += tmp - return prd - - -def chebmul(c1, c2): - """ - Multiply one Chebyshev series by another. - - Returns the product of two Chebyshev series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Chebyshev series coefficients representing their product. - - See Also - -------- - chebadd, chebsub, chebmulx, chebdiv, chebpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Chebyshev polynomial basis set. Thus, to express - the product as a C-series, it is typically necessary to "reproject" - the product onto said basis set, which typically produces - "unintuitive live" (but correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebmul(c1,c2) # multiplication requires "reprojection" - array([ 6.5, 12. , 12. , 4. , 1.5]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - z1 = _cseries_to_zseries(c1) - z2 = _cseries_to_zseries(c2) - prd = _zseries_mul(z1, z2) - ret = _zseries_to_cseries(prd) - return pu.trimseq(ret) - - -def chebdiv(c1, c2): - """ - Divide one Chebyshev series by another. - - Returns the quotient-with-remainder of two Chebyshev series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Chebyshev series coefficients representing the quotient and - remainder. - - See Also - -------- - chebadd, chebsub, chemulx, chebmul, chebpow - - Notes - ----- - In general, the (polynomial) division of one C-series by another - results in quotient and remainder terms that are not in the Chebyshev - polynomial basis set. Thus, to express these results as C-series, it - is typically necessary to "reproject" the results onto said basis - set, which typically produces "unintuitive" (but correct) results; - see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not - (array([3.]), array([-8., -4.])) - >>> c2 = (0,1,2,3) - >>> C.chebdiv(c2,c1) # neither "intuitive" - (array([0., 2.]), array([-2., -4.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - # note: this is more efficient than `pu._div(chebmul, c1, c2)` - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - z1 = _cseries_to_zseries(c1) - z2 = _cseries_to_zseries(c2) - quo, rem = _zseries_div(z1, z2) - quo = pu.trimseq(_zseries_to_cseries(quo)) - rem = pu.trimseq(_zseries_to_cseries(rem)) - return quo, rem - - -def chebpow(c, pow, maxpower=16): - """Raise a Chebyshev series to a power. - - Returns the Chebyshev series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Chebyshev series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Chebyshev series of power. - - See Also - -------- - chebadd, chebsub, chebmulx, chebmul, chebdiv - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> C.chebpow([1, 2, 3, 4], 2) - array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) - - """ - # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it - # avoids converting between z and c series repeatedly - - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - zs = _cseries_to_zseries(c) - prd = zs - for i in range(2, power + 1): - prd = np.convolve(prd, zs) - return _zseries_to_cseries(prd) - - -def chebder(c, m=1, scl=1, axis=0): - """ - Differentiate a Chebyshev series. - - Returns the Chebyshev series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` - while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + - 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Chebyshev series coefficients. If c is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Chebyshev series of the derivative. - - See Also - -------- - chebint - - Notes - ----- - In general, the result of differentiating a C-series needs to be - "reprojected" onto the C-series basis set. Thus, typically, the - result of this function is "unintuitive," albeit correct; see Examples - section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c = (1,2,3,4) - >>> C.chebder(c) - array([14., 12., 24.]) - >>> C.chebder(c,3) - array([96.]) - >>> C.chebder(c,scl=-1) - array([-14., -12., -24.]) - >>> C.chebder(c,2,-1) - array([12., 96.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt = pu._deprecate_as_int(m, "the order of derivation") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 2, -1): - der[j - 1] = (2*j)*c[j] - c[j - 2] += (j*c[j])/(j - 2) - if n > 1: - der[1] = 4*c[2] - der[0] = c[1] - c = der - c = np.moveaxis(c, 0, iaxis) - return c - - -def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Chebyshev series. - - Returns the Chebyshev series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] - represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + - 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Chebyshev series coefficients. If c is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at zero - is the first value in the list, the value of the second integral - at zero is the second value, etc. If ``k == []`` (the default), - all constants are set to zero. If ``m == 1``, a single scalar can - be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - C-series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or - ``np.ndim(scl) != 0``. - - See Also - -------- - chebder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a`- perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c = (1,2,3) - >>> C.chebint(c) - array([ 0.5, -0.5, 0.5, 0.5]) - >>> C.chebint(c,3) - array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary - 0.00625 ]) - >>> C.chebint(c, k=3) - array([ 3.5, -0.5, 0.5, 0.5]) - >>> C.chebint(c,lbnd=-2) - array([ 8.5, -0.5, 0.5, 0.5]) - >>> C.chebint(c,scl=-2) - array([-1., 1., -1., -1.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt = pu._deprecate_as_int(m, "the order of integration") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if np.ndim(lbnd) != 0: - raise ValueError("lbnd must be a scalar.") - if np.ndim(scl) != 0: - raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0] - if n > 1: - tmp[2] = c[1]/4 - for j in range(2, n): - t = c[j]/(2*j + 1) # FIXME: t never used - tmp[j + 1] = c[j]/(2*(j + 1)) - tmp[j - 1] -= c[j]/(2*(j - 1)) - tmp[0] += k[i] - chebval(lbnd, tmp) - c = tmp - c = np.moveaxis(c, 0, iaxis) - return c - - -def chebval(x, c, tensor=True): - """ - Evaluate a Chebyshev series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - chebval2d, chebgrid2d, chebval3d, chebgrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - x2 = 2*x - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - c0 = c[-i] - c1 - c1 = tmp + c1*x2 - return c0 + c1*x - - -def chebval2d(x, y, c): - """ - Evaluate a 2-D Chebyshev series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than 2 the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points formed - from pairs of corresponding values from `x` and `y`. - - See Also - -------- - chebval, chebgrid2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(chebval, c, x, y) - - -def chebgrid2d(x, y, c): - """ - Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j is contained in `c[i,j]`. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points in the - Cartesian product of `x` and `y`. - - See Also - -------- - chebval, chebval2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(chebval, c, x, y) - - -def chebval3d(x, y, z, c): - """ - Evaluate a 3-D Chebyshev series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - chebval, chebval2d, chebgrid2d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(chebval, c, x, y, z) - - -def chebgrid3d(x, y, z, c): - """ - Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - chebval, chebval2d, chebgrid2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(chebval, c, x, y, z) - - -def chebvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = T_i(x), - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Chebyshev polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and - ``chebval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Chebyshev series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Chebyshev polynomial. The dtype will be the same as - the converted `x`. - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=False, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - # Use forward recursion to generate the entries. - v[0] = x*0 + 1 - if ideg > 0: - x2 = 2*x - v[1] = x - for i in range(2, ideg + 1): - v[i] = v[i-1]*x2 - v[i-2] - return np.moveaxis(v, 0, -1) - - -def chebvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Chebyshev polynomials. - - If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Chebyshev - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - chebvander, chebvander3d, chebval2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) - - -def chebvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Chebyshev polynomials. - - If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Chebyshev - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - chebvander, chebvander3d, chebval2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) - - -def chebfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Chebyshev series to data. - - Return the coefficients of a Chebyshev series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer, - all terms up to and including the `deg`'th term are included in the - fit. For NumPy versions >= 1.11.0 a list of integers specifying the - degrees of the terms to include may be used instead. - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Chebyshev coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - polyfit, legfit, lagfit, hermfit, hermefit - chebval : Evaluates a Chebyshev series. - chebvander : Vandermonde matrix of Chebyshev series. - chebweight : Chebyshev weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Chebyshev series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where :math:`w_j` are the weights. This problem is solved by setting up - as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Chebyshev series are usually better conditioned than fits - using power series, but much can depend on the distribution of the - sample points and the smoothness of the data. If the quality of the fit - is inadequate splines may be a good alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - https://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - - """ - return pu._fit(chebvander, x, y, deg, rcond, full, w) - - -def chebcompanion(c): - """Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is a Chebyshev basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of Chebyshev series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[0] = np.sqrt(.5) - top[1:] = 1/2 - bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 - return mat - - -def chebroots(c): - """ - Compute the roots of a Chebyshev series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * T_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, lagroots, hermroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The Chebyshev series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> import numpy.polynomial.chebyshev as cheb - >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots - array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - # rotated companion matrix reduces error - m = chebcompanion(c)[::-1,::-1] - r = la.eigvals(m) - r.sort() - return r - - -def chebinterpolate(func, deg, args=()): - """Interpolate a function at the Chebyshev points of the first kind. - - Returns the Chebyshev series that interpolates `func` at the Chebyshev - points of the first kind in the interval [-1, 1]. The interpolating - series tends to a minmax approximation to `func` with increasing `deg` - if the function is continuous in the interval. - - .. versionadded:: 1.14.0 - - Parameters - ---------- - func : function - The function to be approximated. It must be a function of a single - variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are - extra arguments passed in the `args` parameter. - deg : int - Degree of the interpolating polynomial - args : tuple, optional - Extra arguments to be used in the function call. Default is no extra - arguments. - - Returns - ------- - coef : ndarray, shape (deg + 1,) - Chebyshev coefficients of the interpolating series ordered from low to - high. - - Examples - -------- - >>> import numpy.polynomial.chebyshev as C - >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8) - array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, - -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, - 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) - - Notes - ----- - - The Chebyshev polynomials used in the interpolation are orthogonal when - sampled at the Chebyshev points of the first kind. If it is desired to - constrain some of the coefficients they can simply be set to the desired - value after the interpolation, no new interpolation or fit is needed. This - is especially useful if it is known apriori that some of coefficients are - zero. For instance, if the function is even then the coefficients of the - terms of odd degree in the result can be set to zero. - - """ - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int") - if deg < 0: - raise ValueError("expected deg >= 0") - - order = deg + 1 - xcheb = chebpts1(order) - yfunc = func(xcheb, *args) - m = chebvander(xcheb, deg) - c = np.dot(m.T, yfunc) - c[0] /= order - c[1:] /= 0.5*order - - return c - - -def chebgauss(deg): - """ - Gauss-Chebyshev quadrature. - - Computes the sample points and weights for Gauss-Chebyshev quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with - the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded:: 1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. For Gauss-Chebyshev there are closed form solutions for - the sample points and weights. If n = `deg`, then - - .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) - - .. math:: w_i = \\pi / n - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg <= 0: - raise ValueError("deg must be a positive integer") - - x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) - w = np.ones(ideg)*(np.pi/ideg) - - return x, w - - -def chebweight(x): - """ - The weight function of the Chebyshev polynomials. - - The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of - integration is :math:`[-1, 1]`. The Chebyshev polynomials are - orthogonal, but not normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) - return w - - -def chebpts1(npts): - """ - Chebyshev points of the first kind. - - The Chebyshev points of the first kind are the points ``cos(x)``, - where ``x = [pi*(k + .5)/npts for k in range(npts)]``. - - Parameters - ---------- - npts : int - Number of sample points desired. - - Returns - ------- - pts : ndarray - The Chebyshev points of the first kind. - - See Also - -------- - chebpts2 - - Notes - ----- - - .. versionadded:: 1.5.0 - - """ - _npts = int(npts) - if _npts != npts: - raise ValueError("npts must be integer") - if _npts < 1: - raise ValueError("npts must be >= 1") - - x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts) - return np.cos(x) - - -def chebpts2(npts): - """ - Chebyshev points of the second kind. - - The Chebyshev points of the second kind are the points ``cos(x)``, - where ``x = [pi*k/(npts - 1) for k in range(npts)]``. - - Parameters - ---------- - npts : int - Number of sample points desired. - - Returns - ------- - pts : ndarray - The Chebyshev points of the second kind. - - Notes - ----- - - .. versionadded:: 1.5.0 - - """ - _npts = int(npts) - if _npts != npts: - raise ValueError("npts must be integer") - if _npts < 2: - raise ValueError("npts must be >= 2") - - x = np.linspace(-np.pi, 0, _npts) - return np.cos(x) - - -# -# Chebyshev series class -# - -class Chebyshev(ABCPolyBase): - """A Chebyshev series class. - - The Chebyshev class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - methods listed below. - - Parameters - ---------- - coef : array_like - Chebyshev coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(chebadd) - _sub = staticmethod(chebsub) - _mul = staticmethod(chebmul) - _div = staticmethod(chebdiv) - _pow = staticmethod(chebpow) - _val = staticmethod(chebval) - _int = staticmethod(chebint) - _der = staticmethod(chebder) - _fit = staticmethod(chebfit) - _line = staticmethod(chebline) - _roots = staticmethod(chebroots) - _fromroots = staticmethod(chebfromroots) - - @classmethod - def interpolate(cls, func, deg, domain=None, args=()): - """Interpolate a function at the Chebyshev points of the first kind. - - Returns the series that interpolates `func` at the Chebyshev points of - the first kind scaled and shifted to the `domain`. The resulting series - tends to a minmax approximation of `func` when the function is - continuous in the domain. - - .. versionadded:: 1.14.0 - - Parameters - ---------- - func : function - The function to be interpolated. It must be a function of a single - variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are - extra arguments passed in the `args` parameter. - deg : int - Degree of the interpolating polynomial. - domain : {None, [beg, end]}, optional - Domain over which `func` is interpolated. The default is None, in - which case the domain is [-1, 1]. - args : tuple, optional - Extra arguments to be used in the function call. Default is no - extra arguments. - - Returns - ------- - polynomial : Chebyshev instance - Interpolating Chebyshev instance. - - Notes - ----- - See `numpy.polynomial.chebfromfunction` for more details. - - """ - if domain is None: - domain = cls.domain - xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) - coef = chebinterpolate(xfunc, deg) - return cls(coef, domain=domain) - - # Virtual properties - nickname = 'cheb' - domain = np.array(chebdomain) - window = np.array(chebdomain) - basis_name = 'T' diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/hermite.py b/venv/lib/python3.7/site-packages/numpy/polynomial/hermite.py deleted file mode 100644 index 9b1aea2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/hermite.py +++ /dev/null @@ -1,1667 +0,0 @@ -""" -Objects for dealing with Hermite series. - -This module provides a number of objects (mostly functions) useful for -dealing with Hermite series, including a `Hermite` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `hermdomain` -- Hermite series default domain, [-1,1]. -- `hermzero` -- Hermite series that evaluates identically to 0. -- `hermone` -- Hermite series that evaluates identically to 1. -- `hermx` -- Hermite series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `hermadd` -- add two Hermite series. -- `hermsub` -- subtract one Hermite series from another. -- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. -- `hermmul` -- multiply two Hermite series. -- `hermdiv` -- divide one Hermite series by another. -- `hermpow` -- raise a Hermite series to a positive integer power. -- `hermval` -- evaluate a Hermite series at given points. -- `hermval2d` -- evaluate a 2D Hermite series at given points. -- `hermval3d` -- evaluate a 3D Hermite series at given points. -- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product. -- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product. - -Calculus --------- -- `hermder` -- differentiate a Hermite series. -- `hermint` -- integrate a Hermite series. - -Misc Functions --------------- -- `hermfromroots` -- create a Hermite series with specified roots. -- `hermroots` -- find the roots of a Hermite series. -- `hermvander` -- Vandermonde-like matrix for Hermite polynomials. -- `hermvander2d` -- Vandermonde-like matrix for 2D power series. -- `hermvander3d` -- Vandermonde-like matrix for 3D power series. -- `hermgauss` -- Gauss-Hermite quadrature, points and weights. -- `hermweight` -- Hermite weight function. -- `hermcompanion` -- symmetrized companion matrix in Hermite form. -- `hermfit` -- least-squares fit returning a Hermite series. -- `hermtrim` -- trim leading coefficients from a Hermite series. -- `hermline` -- Hermite series of given straight line. -- `herm2poly` -- convert a Hermite series to a polynomial. -- `poly2herm` -- convert a polynomial to a Hermite series. - -Classes -------- -- `Hermite` -- A Hermite series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la -from numpy.core.multiarray import normalize_axis_index - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', - 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', - 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', - 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', - 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', - 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] - -hermtrim = pu.trimcoef - - -def poly2herm(pol): - """ - poly2herm(pol) - - Convert a polynomial to a Hermite series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Hermite series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Hermite - series. - - See Also - -------- - herm2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite import poly2herm - >>> poly2herm(np.arange(4)) - array([1. , 2.75 , 0.5 , 0.375]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = hermadd(hermmulx(res), pol[i]) - return res - - -def herm2poly(c): - """ - Convert a Hermite series to a polynomial. - - Convert an array representing the coefficients of a Hermite series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Hermite series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2herm - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite import herm2poly - >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) - array([0., 1., 2., 3.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n == 1: - return c - if n == 2: - c[1] *= 2 - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], c1*(2*(i - 1))) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)*2) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Hermite -hermdomain = np.array([-1, 1]) - -# Hermite coefficients representing zero. -hermzero = np.array([0]) - -# Hermite coefficients representing one. -hermone = np.array([1]) - -# Hermite coefficients representing the identity x. -hermx = np.array([0, 1/2]) - - -def hermline(off, scl): - """ - Hermite series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Hermite series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.hermite import hermline, hermval - >>> hermval(0,hermline(3, 2)) - 3.0 - >>> hermval(1,hermline(3, 2)) - 5.0 - - """ - if scl != 0: - return np.array([off, scl/2]) - else: - return np.array([off]) - - -def hermfromroots(roots): - """ - Generate a Hermite series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Hermite form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Hermite form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, lagfromroots, chebfromroots, hermefromroots - - Examples - -------- - >>> from numpy.polynomial.hermite import hermfromroots, hermval - >>> coef = hermfromroots((-1, 0, 1)) - >>> hermval((-1, 0, 1), coef) - array([0., 0., 0.]) - >>> coef = hermfromroots((-1j, 1j)) - >>> hermval((-1j, 1j), coef) - array([0.+0.j, 0.+0.j]) - - """ - return pu._fromroots(hermline, hermmul, roots) - - -def hermadd(c1, c2): - """ - Add one Hermite series to another. - - Returns the sum of two Hermite series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Hermite series of their sum. - - See Also - -------- - hermsub, hermmulx, hermmul, hermdiv, hermpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Hermite series - is a Hermite series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite import hermadd - >>> hermadd([1, 2, 3], [1, 2, 3, 4]) - array([2., 4., 6., 4.]) - - """ - return pu._add(c1, c2) - - -def hermsub(c1, c2): - """ - Subtract one Hermite series from another. - - Returns the difference of two Hermite series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their difference. - - See Also - -------- - hermadd, hermmulx, hermmul, hermdiv, hermpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Hermite - series is a Hermite series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite import hermsub - >>> hermsub([1, 2, 3, 4], [1, 2, 3]) - array([0., 0., 0., 4.]) - - """ - return pu._sub(c1, c2) - - -def hermmulx(c): - """Multiply a Hermite series by x. - - Multiply the Hermite series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - See Also - -------- - hermadd, hermsub, hermmul, hermdiv, hermpow - - Notes - ----- - The multiplication uses the recursion relationship for Hermite - polynomials in the form - - .. math:: - - xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) - - Examples - -------- - >>> from numpy.polynomial.hermite import hermmulx - >>> hermmulx([1, 2, 3]) - array([2. , 6.5, 1. , 1.5]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0]/2 - for i in range(1, len(c)): - prd[i + 1] = c[i]/2 - prd[i - 1] += c[i]*i - return prd - - -def hermmul(c1, c2): - """ - Multiply one Hermite series by another. - - Returns the product of two Hermite series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their product. - - See Also - -------- - hermadd, hermsub, hermmulx, hermdiv, hermpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Hermite polynomial basis set. Thus, to express - the product as a Hermite series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermmul - >>> hermmul([1, 2, 3], [0, 1, 2]) - array([52., 29., 52., 7., 6.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) - c1 = hermadd(tmp, hermmulx(c1)*2) - return hermadd(c0, hermmulx(c1)*2) - - -def hermdiv(c1, c2): - """ - Divide one Hermite series by another. - - Returns the quotient-with-remainder of two Hermite series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Hermite series coefficients representing the quotient and - remainder. - - See Also - -------- - hermadd, hermsub, hermmulx, hermmul, hermpow - - Notes - ----- - In general, the (polynomial) division of one Hermite series by another - results in quotient and remainder terms that are not in the Hermite - polynomial basis set. Thus, to express these results as a Hermite - series, it is necessary to "reproject" the results onto the Hermite - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermdiv - >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) - (array([1., 2., 3.]), array([0.])) - >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) - (array([1., 2., 3.]), array([2., 2.])) - >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) - (array([1., 2., 3.]), array([1., 1.])) - - """ - return pu._div(hermmul, c1, c2) - - -def hermpow(c, pow, maxpower=16): - """Raise a Hermite series to a power. - - Returns the Hermite series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Hermite series of power. - - See Also - -------- - hermadd, hermsub, hermmulx, hermmul, hermdiv - - Examples - -------- - >>> from numpy.polynomial.hermite import hermpow - >>> hermpow([1, 2, 3], 2) - array([81., 52., 82., 12., 9.]) - - """ - return pu._pow(hermmul, c, pow, maxpower) - - -def hermder(c, m=1, scl=1, axis=0): - """ - Differentiate a Hermite series. - - Returns the Hermite series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` - while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + - 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite series coefficients. If `c` is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Hermite series of the derivative. - - See Also - -------- - hermint - - Notes - ----- - In general, the result of differentiating a Hermite series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermder - >>> hermder([ 1. , 0.5, 0.5, 0.5]) - array([1., 2., 3.]) - >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) - array([1., 2., 3.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt = pu._deprecate_as_int(m, "the order of derivation") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 0, -1): - der[j - 1] = (2*j)*c[j] - c = der - c = np.moveaxis(c, 0, iaxis) - return c - - -def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Hermite series. - - Returns the Hermite series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] - represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + - 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite series coefficients. If c is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Hermite series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or - ``np.ndim(scl) != 0``. - - See Also - -------- - hermder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermint - >>> hermint([1,2,3]) # integrate once, value 0 at 0. - array([1. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary - >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. - array([2. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 - array([-2. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) - array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt = pu._deprecate_as_int(m, "the order of integration") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if np.ndim(lbnd) != 0: - raise ValueError("lbnd must be a scalar.") - if np.ndim(scl) != 0: - raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0]/2 - for j in range(1, n): - tmp[j + 1] = c[j]/(2*(j + 1)) - tmp[0] += k[i] - hermval(lbnd, tmp) - c = tmp - c = np.moveaxis(c, 0, iaxis) - return c - - -def hermval(x, c, tensor=True): - """ - Evaluate an Hermite series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - hermval2d, hermgrid2d, hermval3d, hermgrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermval - >>> coef = [1,2,3] - >>> hermval(1, coef) - 11.0 - >>> hermval([[1,2],[3,4]], coef) - array([[ 11., 51.], - [115., 203.]]) - - """ - c = np.array(c, ndmin=1, copy=False) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - x2 = x*2 - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - c1*(2*(nd - 1)) - c1 = tmp + c1*x2 - return c0 + c1*x2 - - -def hermval2d(x, y, c): - """ - Evaluate a 2-D Hermite series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - hermval, hermgrid2d, hermval3d, hermgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(hermval, c, x, y) - - -def hermgrid2d(x, y, c): - """ - Evaluate a 2-D Hermite series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermval, hermval2d, hermval3d, hermgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(hermval, c, x, y) - - -def hermval3d(x, y, z, c): - """ - Evaluate a 3-D Hermite series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - hermval, hermval2d, hermgrid2d, hermgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(hermval, c, x, y, z) - - -def hermgrid3d(x, y, z, c): - """ - Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermval, hermval2d, hermgrid2d, hermval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(hermval, c, x, y, z) - - -def hermvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = H_i(x), - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Hermite polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and - ``hermval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Hermite series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Hermite polynomial. The dtype will be the same as - the converted `x`. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermvander - >>> x = np.array([-1, 0, 1]) - >>> hermvander(x, 3) - array([[ 1., -2., 2., 4.], - [ 1., 0., -2., -0.], - [ 1., 2., 2., -4.]]) - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=False, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - x2 = x*2 - v[1] = x2 - for i in range(2, ideg + 1): - v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) - return np.moveaxis(v, 0, -1) - - -def hermvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Hermite polynomials. - - If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Hermite - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - hermvander, hermvander3d, hermval2d, hermval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg) - - -def hermvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Hermite polynomials. - - If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Hermite - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - hermvander, hermvander3d, hermval2d, hermval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) - - -def hermfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Hermite series to data. - - Return the coefficients of a Hermite series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer - all terms up to and including the `deg`'th term are included in the - fit. For NumPy versions >= 1.11.0 a list of integers specifying the - degrees of the terms to include may be used instead. - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Hermite coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - chebfit, legfit, lagfit, polyfit, hermefit - hermval : Evaluates a Hermite series. - hermvander : Vandermonde matrix of Hermite series. - hermweight : Hermite weight function - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Hermite series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Hermite series are probably most useful when the data can be - approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite - weight. In that case the weight ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `hermweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - https://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.hermite import hermfit, hermval - >>> x = np.linspace(-10, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = hermval(x, [1, 2, 3]) + err - >>> hermfit(x, y, 2) - array([1.0218, 1.9986, 2.9999]) # may vary - - """ - return pu._fit(hermvander, x, y, deg, rcond, full, w) - - -def hermcompanion(c): - """Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Hermite basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-.5*c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) - scl = np.multiply.accumulate(scl)[::-1] - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(.5*np.arange(1, n)) - bot[...] = top - mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) - return mat - - -def hermroots(c): - """ - Compute the roots of a Hermite series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * H_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, lagroots, chebroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The Hermite series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermroots, hermfromroots - >>> coef = hermfromroots([-1, 0, 1]) - >>> coef - array([0. , 0.25 , 0. , 0.125]) - >>> hermroots(coef) - array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) <= 1: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-.5*c[0]/c[1]]) - - # rotated companion matrix reduces error - m = hermcompanion(c)[::-1,::-1] - r = la.eigvals(m) - r.sort() - return r - - -def _normed_hermite_n(x, n): - """ - Evaluate a normalized Hermite polynomial. - - Compute the value of the normalized Hermite polynomial of degree ``n`` - at the points ``x``. - - - Parameters - ---------- - x : ndarray of double. - Points at which to evaluate the function - n : int - Degree of the normalized Hermite function to be evaluated. - - Returns - ------- - values : ndarray - The shape of the return value is described above. - - Notes - ----- - .. versionadded:: 1.10.0 - - This function is needed for finding the Gauss points and integration - weights for high degrees. The values of the standard Hermite functions - overflow when n >= 207. - - """ - if n == 0: - return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi))) - - c0 = 0. - c1 = 1./np.sqrt(np.sqrt(np.pi)) - nd = float(n) - for i in range(n - 1): - tmp = c0 - c0 = -c1*np.sqrt((nd - 1.)/nd) - c1 = tmp + c1*x*np.sqrt(2./nd) - nd = nd - 1.0 - return c0 + c1*x*np.sqrt(2) - - -def hermgauss(deg): - """ - Gauss-Hermite quadrature. - - Computes the sample points and weights for Gauss-Hermite quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` - with the weight function :math:`f(x) = \\exp(-x^2)`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded:: 1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`H_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg <= 0: - raise ValueError("deg must be a positive integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1], dtype=np.float64) - m = hermcompanion(c) - x = la.eigvalsh(m) - - # improve roots by one application of Newton - dy = _normed_hermite_n(x, ideg) - df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = _normed_hermite_n(x, ideg - 1) - fm /= np.abs(fm).max() - w = 1/(fm * fm) - - # for Hermite we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 - - # scale w to get the right value - w *= np.sqrt(np.pi) / w.sum() - - return x, w - - -def hermweight(x): - """ - Weight function of the Hermite polynomials. - - The weight function is :math:`\\exp(-x^2)` and the interval of - integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are - orthogonal, but not normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - w = np.exp(-x**2) - return w - - -# -# Hermite series class -# - -class Hermite(ABCPolyBase): - """An Hermite series class. - - The Hermite class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Hermite coefficients in order of increasing degree, i.e, - ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(hermadd) - _sub = staticmethod(hermsub) - _mul = staticmethod(hermmul) - _div = staticmethod(hermdiv) - _pow = staticmethod(hermpow) - _val = staticmethod(hermval) - _int = staticmethod(hermint) - _der = staticmethod(hermder) - _fit = staticmethod(hermfit) - _line = staticmethod(hermline) - _roots = staticmethod(hermroots) - _fromroots = staticmethod(hermfromroots) - - # Virtual properties - nickname = 'herm' - domain = np.array(hermdomain) - window = np.array(hermdomain) - basis_name = 'H' diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/hermite_e.py b/venv/lib/python3.7/site-packages/numpy/polynomial/hermite_e.py deleted file mode 100644 index c5a0a05..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/hermite_e.py +++ /dev/null @@ -1,1661 +0,0 @@ -""" -Objects for dealing with Hermite_e series. - -This module provides a number of objects (mostly functions) useful for -dealing with Hermite_e series, including a `HermiteE` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `hermedomain` -- Hermite_e series default domain, [-1,1]. -- `hermezero` -- Hermite_e series that evaluates identically to 0. -- `hermeone` -- Hermite_e series that evaluates identically to 1. -- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `hermeadd` -- add two Hermite_e series. -- `hermesub` -- subtract one Hermite_e series from another. -- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. -- `hermemul` -- multiply two Hermite_e series. -- `hermediv` -- divide one Hermite_e series by another. -- `hermepow` -- raise a Hermite_e series to a positive integer power. -- `hermeval` -- evaluate a Hermite_e series at given points. -- `hermeval2d` -- evaluate a 2D Hermite_e series at given points. -- `hermeval3d` -- evaluate a 3D Hermite_e series at given points. -- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product. -- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product. - -Calculus --------- -- `hermeder` -- differentiate a Hermite_e series. -- `hermeint` -- integrate a Hermite_e series. - -Misc Functions --------------- -- `hermefromroots` -- create a Hermite_e series with specified roots. -- `hermeroots` -- find the roots of a Hermite_e series. -- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials. -- `hermevander2d` -- Vandermonde-like matrix for 2D power series. -- `hermevander3d` -- Vandermonde-like matrix for 3D power series. -- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights. -- `hermeweight` -- Hermite_e weight function. -- `hermecompanion` -- symmetrized companion matrix in Hermite_e form. -- `hermefit` -- least-squares fit returning a Hermite_e series. -- `hermetrim` -- trim leading coefficients from a Hermite_e series. -- `hermeline` -- Hermite_e series of given straight line. -- `herme2poly` -- convert a Hermite_e series to a polynomial. -- `poly2herme` -- convert a polynomial to a Hermite_e series. - -Classes -------- -- `HermiteE` -- A Hermite_e series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la -from numpy.core.multiarray import normalize_axis_index - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', - 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', - 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', - 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', - 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', - 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', - 'hermegauss', 'hermeweight'] - -hermetrim = pu.trimcoef - - -def poly2herme(pol): - """ - poly2herme(pol) - - Convert a polynomial to a Hermite series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Hermite series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Hermite - series. - - See Also - -------- - herme2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import poly2herme - >>> poly2herme(np.arange(4)) - array([ 2., 10., 2., 3.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = hermeadd(hermemulx(res), pol[i]) - return res - - -def herme2poly(c): - """ - Convert a Hermite series to a polynomial. - - Convert an array representing the coefficients of a Hermite series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Hermite series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2herme - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import herme2poly - >>> herme2poly([ 2., 10., 2., 3.]) - array([0., 1., 2., 3.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n == 1: - return c - if n == 2: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], c1*(i - 1)) - c1 = polyadd(tmp, polymulx(c1)) - return polyadd(c0, polymulx(c1)) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Hermite -hermedomain = np.array([-1, 1]) - -# Hermite coefficients representing zero. -hermezero = np.array([0]) - -# Hermite coefficients representing one. -hermeone = np.array([1]) - -# Hermite coefficients representing the identity x. -hermex = np.array([0, 1]) - - -def hermeline(off, scl): - """ - Hermite series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Hermite series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeline - >>> from numpy.polynomial.hermite_e import hermeline, hermeval - >>> hermeval(0,hermeline(3, 2)) - 3.0 - >>> hermeval(1,hermeline(3, 2)) - 5.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def hermefromroots(roots): - """ - Generate a HermiteE series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in HermiteE form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in HermiteE form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, lagfromroots, hermfromroots, chebfromroots - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval - >>> coef = hermefromroots((-1, 0, 1)) - >>> hermeval((-1, 0, 1), coef) - array([0., 0., 0.]) - >>> coef = hermefromroots((-1j, 1j)) - >>> hermeval((-1j, 1j), coef) - array([0.+0.j, 0.+0.j]) - - """ - return pu._fromroots(hermeline, hermemul, roots) - - -def hermeadd(c1, c2): - """ - Add one Hermite series to another. - - Returns the sum of two Hermite series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Hermite series of their sum. - - See Also - -------- - hermesub, hermemulx, hermemul, hermediv, hermepow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Hermite series - is a Hermite series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeadd - >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) - array([2., 4., 6., 4.]) - - """ - return pu._add(c1, c2) - - -def hermesub(c1, c2): - """ - Subtract one Hermite series from another. - - Returns the difference of two Hermite series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their difference. - - See Also - -------- - hermeadd, hermemulx, hermemul, hermediv, hermepow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Hermite - series is a Hermite series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermesub - >>> hermesub([1, 2, 3, 4], [1, 2, 3]) - array([0., 0., 0., 4.]) - - """ - return pu._sub(c1, c2) - - -def hermemulx(c): - """Multiply a Hermite series by x. - - Multiply the Hermite series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Hermite - polynomials in the form - - .. math:: - - xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermemulx - >>> hermemulx([1, 2, 3]) - array([2., 7., 2., 3.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0] - for i in range(1, len(c)): - prd[i + 1] = c[i] - prd[i - 1] += c[i]*i - return prd - - -def hermemul(c1, c2): - """ - Multiply one Hermite series by another. - - Returns the product of two Hermite series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their product. - - See Also - -------- - hermeadd, hermesub, hermemulx, hermediv, hermepow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Hermite polynomial basis set. Thus, to express - the product as a Hermite series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermemul - >>> hermemul([1, 2, 3], [0, 1, 2]) - array([14., 15., 28., 7., 6.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = hermesub(c[-i]*xs, c1*(nd - 1)) - c1 = hermeadd(tmp, hermemulx(c1)) - return hermeadd(c0, hermemulx(c1)) - - -def hermediv(c1, c2): - """ - Divide one Hermite series by another. - - Returns the quotient-with-remainder of two Hermite series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Hermite series coefficients representing the quotient and - remainder. - - See Also - -------- - hermeadd, hermesub, hermemulx, hermemul, hermepow - - Notes - ----- - In general, the (polynomial) division of one Hermite series by another - results in quotient and remainder terms that are not in the Hermite - polynomial basis set. Thus, to express these results as a Hermite - series, it is necessary to "reproject" the results onto the Hermite - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermediv - >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) - (array([1., 2., 3.]), array([0.])) - >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) - (array([1., 2., 3.]), array([1., 2.])) - - """ - return pu._div(hermemul, c1, c2) - - -def hermepow(c, pow, maxpower=16): - """Raise a Hermite series to a power. - - Returns the Hermite series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Hermite series of power. - - See Also - -------- - hermeadd, hermesub, hermemulx, hermemul, hermediv - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermepow - >>> hermepow([1, 2, 3], 2) - array([23., 28., 46., 12., 9.]) - - """ - return pu._pow(hermemul, c, pow, maxpower) - - -def hermeder(c, m=1, scl=1, axis=0): - """ - Differentiate a Hermite_e series. - - Returns the series coefficients `c` differentiated `m` times along - `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` - while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) - + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 - is ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite_e series coefficients. If `c` is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Hermite series of the derivative. - - See Also - -------- - hermeint - - Notes - ----- - In general, the result of differentiating a Hermite series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeder - >>> hermeder([ 1., 1., 1., 1.]) - array([1., 2., 3.]) - >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) - array([1., 2., 3.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt = pu._deprecate_as_int(m, "the order of derivation") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - n = len(c) - if cnt >= n: - return c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 0, -1): - der[j - 1] = j*c[j] - c = der - c = np.moveaxis(c, 0, iaxis) - return c - - -def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Hermite_e series. - - Returns the Hermite_e series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] - represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + - 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite_e series coefficients. If c is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Hermite_e series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or - ``np.ndim(scl) != 0``. - - See Also - -------- - hermeder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeint - >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. - array([1., 1., 1., 1.]) - >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary - >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. - array([2., 1., 1., 1.]) - >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 - array([-1., 1., 1., 1.]) - >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) - array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt = pu._deprecate_as_int(m, "the order of integration") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if np.ndim(lbnd) != 0: - raise ValueError("lbnd must be a scalar.") - if np.ndim(scl) != 0: - raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0] - for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) - tmp[0] += k[i] - hermeval(lbnd, tmp) - c = tmp - c = np.moveaxis(c, 0, iaxis) - return c - - -def hermeval(x, c, tensor=True): - """ - Evaluate an HermiteE series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - hermeval2d, hermegrid2d, hermeval3d, hermegrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeval - >>> coef = [1,2,3] - >>> hermeval(1, coef) - 3.0 - >>> hermeval([[1,2],[3,4]], coef) - array([[ 3., 14.], - [31., 54.]]) - - """ - c = np.array(c, ndmin=1, copy=False) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - c1*(nd - 1) - c1 = tmp + c1*x - return c0 + c1*x - - -def hermeval2d(x, y, c): - """ - Evaluate a 2-D HermiteE series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - hermeval, hermegrid2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(hermeval, c, x, y) - - -def hermegrid2d(x, y, c): - """ - Evaluate a 2-D HermiteE series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermeval, hermeval2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(hermeval, c, x, y) - - -def hermeval3d(x, y, z, c): - """ - Evaluate a 3-D Hermite_e series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - hermeval, hermeval2d, hermegrid2d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(hermeval, c, x, y, z) - - -def hermegrid3d(x, y, z, c): - """ - Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermeval, hermeval2d, hermegrid2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(hermeval, c, x, y, z) - - -def hermevander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = He_i(x), - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the HermiteE polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and - ``hermeval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of HermiteE series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding HermiteE polynomial. The dtype will be the same as - the converted `x`. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermevander - >>> x = np.array([-1, 0, 1]) - >>> hermevander(x, 3) - array([[ 1., -1., 0., 2.], - [ 1., 0., -1., -0.], - [ 1., 1., 0., -2.]]) - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=False, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - v[1] = x - for i in range(2, ideg + 1): - v[i] = (v[i-1]*x - v[i-2]*(i - 1)) - return np.moveaxis(v, 0, -1) - - -def hermevander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the HermiteE polynomials. - - If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D HermiteE - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - hermevander, hermevander3d, hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) - - -def hermevander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then Hehe pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the HermiteE polynomials. - - If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D HermiteE - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - hermevander, hermevander3d, hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) - - -def hermefit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Hermite series to data. - - Return the coefficients of a HermiteE series of degree `deg` that is - the least squares fit to the data values `y` given at points `x`. If - `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D - multiple fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer - all terms up to and including the `deg`'th term are included in the - fit. For NumPy versions >= 1.11.0 a list of integers specifying the - degrees of the terms to include may be used instead. - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Hermite coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - chebfit, legfit, polyfit, hermfit, polyfit - hermeval : Evaluates a Hermite series. - hermevander : pseudo Vandermonde matrix of Hermite series. - hermeweight : HermiteE weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the HermiteE series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` - are the coefficients to be solved for, and the elements of `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using HermiteE series are probably most useful when the data can - be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE - weight. In that case the weight ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `hermeweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - https://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermefit, hermeval - >>> x = np.linspace(-10, 10) - >>> np.random.seed(123) - >>> err = np.random.randn(len(x))/10 - >>> y = hermeval(x, [1, 2, 3]) + err - >>> hermefit(x, y, 2) - array([ 1.01690445, 1.99951418, 2.99948696]) # may vary - - """ - return pu._fit(hermevander, x, y, deg, rcond, full, w) - - -def hermecompanion(c): - """ - Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an HermiteE basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of HermiteE series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) - scl = np.multiply.accumulate(scl)[::-1] - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(np.arange(1, n)) - bot[...] = top - mat[:, -1] -= scl*c[:-1]/c[-1] - return mat - - -def hermeroots(c): - """ - Compute the roots of a HermiteE series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * He_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, lagroots, hermroots, chebroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The HermiteE series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots - >>> coef = hermefromroots([-1, 0, 1]) - >>> coef - array([0., 2., 0., 1.]) - >>> hermeroots(coef) - array([-1., 0., 1.]) # may vary - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) <= 1: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - # rotated companion matrix reduces error - m = hermecompanion(c)[::-1,::-1] - r = la.eigvals(m) - r.sort() - return r - - -def _normed_hermite_e_n(x, n): - """ - Evaluate a normalized HermiteE polynomial. - - Compute the value of the normalized HermiteE polynomial of degree ``n`` - at the points ``x``. - - - Parameters - ---------- - x : ndarray of double. - Points at which to evaluate the function - n : int - Degree of the normalized HermiteE function to be evaluated. - - Returns - ------- - values : ndarray - The shape of the return value is described above. - - Notes - ----- - .. versionadded:: 1.10.0 - - This function is needed for finding the Gauss points and integration - weights for high degrees. The values of the standard HermiteE functions - overflow when n >= 207. - - """ - if n == 0: - return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) - - c0 = 0. - c1 = 1./np.sqrt(np.sqrt(2*np.pi)) - nd = float(n) - for i in range(n - 1): - tmp = c0 - c0 = -c1*np.sqrt((nd - 1.)/nd) - c1 = tmp + c1*x*np.sqrt(1./nd) - nd = nd - 1.0 - return c0 + c1*x - - -def hermegauss(deg): - """ - Gauss-HermiteE quadrature. - - Computes the sample points and weights for Gauss-HermiteE quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` - with the weight function :math:`f(x) = \\exp(-x^2/2)`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded:: 1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`He_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg <= 0: - raise ValueError("deg must be a positive integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) - m = hermecompanion(c) - x = la.eigvalsh(m) - - # improve roots by one application of Newton - dy = _normed_hermite_e_n(x, ideg) - df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = _normed_hermite_e_n(x, ideg - 1) - fm /= np.abs(fm).max() - w = 1/(fm * fm) - - # for Hermite_e we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 - - # scale w to get the right value - w *= np.sqrt(2*np.pi) / w.sum() - - return x, w - - -def hermeweight(x): - """Weight function of the Hermite_e polynomials. - - The weight function is :math:`\\exp(-x^2/2)` and the interval of - integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are - orthogonal, but not normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - w = np.exp(-.5*x**2) - return w - - -# -# HermiteE series class -# - -class HermiteE(ABCPolyBase): - """An HermiteE series class. - - The HermiteE class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - HermiteE coefficients in order of increasing degree, i.e, - ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(hermeadd) - _sub = staticmethod(hermesub) - _mul = staticmethod(hermemul) - _div = staticmethod(hermediv) - _pow = staticmethod(hermepow) - _val = staticmethod(hermeval) - _int = staticmethod(hermeint) - _der = staticmethod(hermeder) - _fit = staticmethod(hermefit) - _line = staticmethod(hermeline) - _roots = staticmethod(hermeroots) - _fromroots = staticmethod(hermefromroots) - - # Virtual properties - nickname = 'herme' - domain = np.array(hermedomain) - window = np.array(hermedomain) - basis_name = 'He' diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/laguerre.py b/venv/lib/python3.7/site-packages/numpy/polynomial/laguerre.py deleted file mode 100644 index 538a1d4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/laguerre.py +++ /dev/null @@ -1,1618 +0,0 @@ -""" -Objects for dealing with Laguerre series. - -This module provides a number of objects (mostly functions) useful for -dealing with Laguerre series, including a `Laguerre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `lagdomain` -- Laguerre series default domain, [-1,1]. -- `lagzero` -- Laguerre series that evaluates identically to 0. -- `lagone` -- Laguerre series that evaluates identically to 1. -- `lagx` -- Laguerre series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `lagadd` -- add two Laguerre series. -- `lagsub` -- subtract one Laguerre series from another. -- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. -- `lagmul` -- multiply two Laguerre series. -- `lagdiv` -- divide one Laguerre series by another. -- `lagpow` -- raise a Laguerre series to a positive integer power. -- `lagval` -- evaluate a Laguerre series at given points. -- `lagval2d` -- evaluate a 2D Laguerre series at given points. -- `lagval3d` -- evaluate a 3D Laguerre series at given points. -- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product. -- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product. - -Calculus --------- -- `lagder` -- differentiate a Laguerre series. -- `lagint` -- integrate a Laguerre series. - -Misc Functions --------------- -- `lagfromroots` -- create a Laguerre series with specified roots. -- `lagroots` -- find the roots of a Laguerre series. -- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials. -- `lagvander2d` -- Vandermonde-like matrix for 2D power series. -- `lagvander3d` -- Vandermonde-like matrix for 3D power series. -- `laggauss` -- Gauss-Laguerre quadrature, points and weights. -- `lagweight` -- Laguerre weight function. -- `lagcompanion` -- symmetrized companion matrix in Laguerre form. -- `lagfit` -- least-squares fit returning a Laguerre series. -- `lagtrim` -- trim leading coefficients from a Laguerre series. -- `lagline` -- Laguerre series of given straight line. -- `lag2poly` -- convert a Laguerre series to a polynomial. -- `poly2lag` -- convert a polynomial to a Laguerre series. - -Classes -------- -- `Laguerre` -- A Laguerre series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la -from numpy.core.multiarray import normalize_axis_index - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', - 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', - 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', - 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', - 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', - 'laggauss', 'lagweight'] - -lagtrim = pu.trimcoef - - -def poly2lag(pol): - """ - poly2lag(pol) - - Convert a polynomial to a Laguerre series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Laguerre series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Laguerre - series. - - See Also - -------- - lag2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.laguerre import poly2lag - >>> poly2lag(np.arange(4)) - array([ 23., -63., 58., -18.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = lagadd(lagmulx(res), pol[i]) - return res - - -def lag2poly(c): - """ - Convert a Laguerre series to a polynomial. - - Convert an array representing the coefficients of a Laguerre series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Laguerre series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2lag - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lag2poly - >>> lag2poly([ 23., -63., 58., -18.]) - array([0., 1., 2., 3.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n == 1: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) - return polyadd(c0, polysub(c1, polymulx(c1))) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Laguerre -lagdomain = np.array([0, 1]) - -# Laguerre coefficients representing zero. -lagzero = np.array([0]) - -# Laguerre coefficients representing one. -lagone = np.array([1]) - -# Laguerre coefficients representing the identity x. -lagx = np.array([1, -1]) - - -def lagline(off, scl): - """ - Laguerre series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Laguerre series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagline, lagval - >>> lagval(0,lagline(3, 2)) - 3.0 - >>> lagval(1,lagline(3, 2)) - 5.0 - - """ - if scl != 0: - return np.array([off + scl, -scl]) - else: - return np.array([off]) - - -def lagfromroots(roots): - """ - Generate a Laguerre series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Laguerre form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Laguerre form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, chebfromroots, hermfromroots, hermefromroots - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagfromroots, lagval - >>> coef = lagfromroots((-1, 0, 1)) - >>> lagval((-1, 0, 1), coef) - array([0., 0., 0.]) - >>> coef = lagfromroots((-1j, 1j)) - >>> lagval((-1j, 1j), coef) - array([0.+0.j, 0.+0.j]) - - """ - return pu._fromroots(lagline, lagmul, roots) - - -def lagadd(c1, c2): - """ - Add one Laguerre series to another. - - Returns the sum of two Laguerre series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Laguerre series of their sum. - - See Also - -------- - lagsub, lagmulx, lagmul, lagdiv, lagpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Laguerre series - is a Laguerre series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagadd - >>> lagadd([1, 2, 3], [1, 2, 3, 4]) - array([2., 4., 6., 4.]) - - - """ - return pu._add(c1, c2) - - -def lagsub(c1, c2): - """ - Subtract one Laguerre series from another. - - Returns the difference of two Laguerre series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Laguerre series coefficients representing their difference. - - See Also - -------- - lagadd, lagmulx, lagmul, lagdiv, lagpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Laguerre - series is a Laguerre series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagsub - >>> lagsub([1, 2, 3, 4], [1, 2, 3]) - array([0., 0., 0., 4.]) - - """ - return pu._sub(c1, c2) - - -def lagmulx(c): - """Multiply a Laguerre series by x. - - Multiply the Laguerre series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - See Also - -------- - lagadd, lagsub, lagmul, lagdiv, lagpow - - Notes - ----- - The multiplication uses the recursion relationship for Laguerre - polynomials in the form - - .. math:: - - xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagmulx - >>> lagmulx([1, 2, 3]) - array([-1., -1., 11., -9.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0] - prd[1] = -c[0] - for i in range(1, len(c)): - prd[i + 1] = -c[i]*(i + 1) - prd[i] += c[i]*(2*i + 1) - prd[i - 1] -= c[i]*i - return prd - - -def lagmul(c1, c2): - """ - Multiply one Laguerre series by another. - - Returns the product of two Laguerre series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Laguerre series coefficients representing their product. - - See Also - -------- - lagadd, lagsub, lagmulx, lagdiv, lagpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Laguerre polynomial basis set. Thus, to express - the product as a Laguerre series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagmul - >>> lagmul([1, 2, 3], [0, 1, 2]) - array([ 8., -13., 38., -51., 36.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) - return lagadd(c0, lagsub(c1, lagmulx(c1))) - - -def lagdiv(c1, c2): - """ - Divide one Laguerre series by another. - - Returns the quotient-with-remainder of two Laguerre series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Laguerre series coefficients representing the quotient and - remainder. - - See Also - -------- - lagadd, lagsub, lagmulx, lagmul, lagpow - - Notes - ----- - In general, the (polynomial) division of one Laguerre series by another - results in quotient and remainder terms that are not in the Laguerre - polynomial basis set. Thus, to express these results as a Laguerre - series, it is necessary to "reproject" the results onto the Laguerre - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagdiv - >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) - (array([1., 2., 3.]), array([0.])) - >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) - (array([1., 2., 3.]), array([1., 1.])) - - """ - return pu._div(lagmul, c1, c2) - - -def lagpow(c, pow, maxpower=16): - """Raise a Laguerre series to a power. - - Returns the Laguerre series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Laguerre series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Laguerre series of power. - - See Also - -------- - lagadd, lagsub, lagmulx, lagmul, lagdiv - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagpow - >>> lagpow([1, 2, 3], 2) - array([ 14., -16., 56., -72., 54.]) - - """ - return pu._pow(lagmul, c, pow, maxpower) - - -def lagder(c, m=1, scl=1, axis=0): - """ - Differentiate a Laguerre series. - - Returns the Laguerre series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` - while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + - 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Laguerre series coefficients. If `c` is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Laguerre series of the derivative. - - See Also - -------- - lagint - - Notes - ----- - In general, the result of differentiating a Laguerre series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagder - >>> lagder([ 1., 1., 1., -3.]) - array([1., 2., 3.]) - >>> lagder([ 1., 0., 0., -4., 3.], m=2) - array([1., 2., 3.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - - cnt = pu._deprecate_as_int(m, "the order of derivation") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 1, -1): - der[j - 1] = -c[j] - c[j - 1] += c[j] - der[0] = -c[1] - c = der - c = np.moveaxis(c, 0, iaxis) - return c - - -def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Laguerre series. - - Returns the Laguerre series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] - represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + - 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - - Parameters - ---------- - c : array_like - Array of Laguerre series coefficients. If `c` is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Laguerre series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or - ``np.ndim(scl) != 0``. - - See Also - -------- - lagder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagint - >>> lagint([1,2,3]) - array([ 1., 1., 1., -3.]) - >>> lagint([1,2,3], m=2) - array([ 1., 0., 0., -4., 3.]) - >>> lagint([1,2,3], k=1) - array([ 2., 1., 1., -3.]) - >>> lagint([1,2,3], lbnd=-1) - array([11.5, 1. , 1. , -3. ]) - >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) - array([ 11.16666667, -5. , -3. , 2. ]) # may vary - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt = pu._deprecate_as_int(m, "the order of integration") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if np.ndim(lbnd) != 0: - raise ValueError("lbnd must be a scalar.") - if np.ndim(scl) != 0: - raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0] - tmp[1] = -c[0] - for j in range(1, n): - tmp[j] += c[j] - tmp[j + 1] = -c[j] - tmp[0] += k[i] - lagval(lbnd, tmp) - c = tmp - c = np.moveaxis(c, 0, iaxis) - return c - - -def lagval(x, c, tensor=True): - """ - Evaluate a Laguerre series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - lagval2d, laggrid2d, lagval3d, laggrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagval - >>> coef = [1,2,3] - >>> lagval(1, coef) - -0.5 - >>> lagval([[1,2],[3,4]], coef) - array([[-0.5, -4. ], - [-4.5, -2. ]]) - - """ - c = np.array(c, ndmin=1, copy=False) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*((2*nd - 1) - x))/nd - return c0 + c1*(1 - x) - - -def lagval2d(x, y, c): - """ - Evaluate a 2-D Laguerre series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - lagval, laggrid2d, lagval3d, laggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(lagval, c, x, y) - - -def laggrid2d(x, y, c): - """ - Evaluate a 2-D Laguerre series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j is contained in `c[i,j]`. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points in the - Cartesian product of `x` and `y`. - - See Also - -------- - lagval, lagval2d, lagval3d, laggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(lagval, c, x, y) - - -def lagval3d(x, y, z, c): - """ - Evaluate a 3-D Laguerre series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimension polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - lagval, lagval2d, laggrid2d, laggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(lagval, c, x, y, z) - - -def laggrid3d(x, y, z, c): - """ - Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - lagval, lagval2d, laggrid2d, lagval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(lagval, c, x, y, z) - - -def lagvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = L_i(x) - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Laguerre polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and - ``lagval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Laguerre series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Laguerre polynomial. The dtype will be the same as - the converted `x`. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagvander - >>> x = np.array([0, 1, 2]) - >>> lagvander(x, 3) - array([[ 1. , 1. , 1. , 1. ], - [ 1. , 0. , -0.5 , -0.66666667], - [ 1. , -1. , -1. , -0.33333333]]) - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=False, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - v[1] = 1 - x - for i in range(2, ideg + 1): - v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i - return np.moveaxis(v, 0, -1) - - -def lagvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Laguerre polynomials. - - If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Laguerre - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - lagvander, lagvander3d, lagval2d, lagval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) - - -def lagvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Laguerre polynomials. - - If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Laguerre - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - lagvander, lagvander3d, lagval2d, lagval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg) - - -def lagfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Laguerre series to data. - - Return the coefficients of a Laguerre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer - all terms up to and including the `deg`'th term are included in the - fit. For NumPy versions >= 1.11.0 a list of integers specifying the - degrees of the terms to include may be used instead. - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Laguerre coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - chebfit, legfit, polyfit, hermfit, hermefit - lagval : Evaluates a Laguerre series. - lagvander : pseudo Vandermonde matrix of Laguerre series. - lagweight : Laguerre weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Laguerre series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Laguerre series are probably most useful when the data can - be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre - weight. In that case the weight ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `lagweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - https://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagfit, lagval - >>> x = np.linspace(0, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = lagval(x, [1, 2, 3]) + err - >>> lagfit(x, y, 2) - array([ 0.96971004, 2.00193749, 3.00288744]) # may vary - - """ - return pu._fit(lagvander, x, y, deg, rcond, full, w) - - -def lagcompanion(c): - """ - Return the companion matrix of c. - - The usual companion matrix of the Laguerre polynomials is already - symmetric when `c` is a basis Laguerre polynomial, so no scaling is - applied. - - Parameters - ---------- - c : array_like - 1-D array of Laguerre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[1 + c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - top = mat.reshape(-1)[1::n+1] - mid = mat.reshape(-1)[0::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = -np.arange(1, n) - mid[...] = 2.*np.arange(n) + 1. - bot[...] = top - mat[:, -1] += (c[:-1]/c[-1])*n - return mat - - -def lagroots(c): - """ - Compute the roots of a Laguerre series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * L_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, chebroots, hermroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The Laguerre series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagroots, lagfromroots - >>> coef = lagfromroots([0, 1, 2]) - >>> coef - array([ 2., -8., 12., -6.]) - >>> lagroots(coef) - array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) <= 1: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([1 + c[0]/c[1]]) - - # rotated companion matrix reduces error - m = lagcompanion(c)[::-1,::-1] - r = la.eigvals(m) - r.sort() - return r - - -def laggauss(deg): - """ - Gauss-Laguerre quadrature. - - Computes the sample points and weights for Gauss-Laguerre quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` - with the weight function :math:`f(x) = \\exp(-x)`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded:: 1.7.0 - - The results have only been tested up to degree 100 higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`L_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg <= 0: - raise ValueError("deg must be a positive integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) - m = lagcompanion(c) - x = la.eigvalsh(m) - - # improve roots by one application of Newton - dy = lagval(x, c) - df = lagval(x, lagder(c)) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = lagval(x, c[1:]) - fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) - - # scale w to get the right value, 1 in this case - w /= w.sum() - - return x, w - - -def lagweight(x): - """Weight function of the Laguerre polynomials. - - The weight function is :math:`exp(-x)` and the interval of integration - is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not - normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - w = np.exp(-x) - return w - -# -# Laguerre series class -# - -class Laguerre(ABCPolyBase): - """A Laguerre series class. - - The Laguerre class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Laguerre coefficients in order of increasing degree, i.e, - ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [0, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [0, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(lagadd) - _sub = staticmethod(lagsub) - _mul = staticmethod(lagmul) - _div = staticmethod(lagdiv) - _pow = staticmethod(lagpow) - _val = staticmethod(lagval) - _int = staticmethod(lagint) - _der = staticmethod(lagder) - _fit = staticmethod(lagfit) - _line = staticmethod(lagline) - _roots = staticmethod(lagroots) - _fromroots = staticmethod(lagfromroots) - - # Virtual properties - nickname = 'lag' - domain = np.array(lagdomain) - window = np.array(lagdomain) - basis_name = 'L' diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/legendre.py b/venv/lib/python3.7/site-packages/numpy/polynomial/legendre.py deleted file mode 100644 index c118247..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/legendre.py +++ /dev/null @@ -1,1653 +0,0 @@ -""" -Legendre Series (:mod: `numpy.polynomial.legendre`) -=================================================== - -.. currentmodule:: numpy.polynomial.polynomial - -This module provides a number of objects (mostly functions) useful for -dealing with Legendre series, including a `Legendre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- - -.. autosummary:: - :toctree: generated/ - - legdomain Legendre series default domain, [-1,1]. - legzero Legendre series that evaluates identically to 0. - legone Legendre series that evaluates identically to 1. - legx Legendre series for the identity map, ``f(x) = x``. - -Arithmetic ----------- - -.. autosummary:: - :toctree: generated/ - - legadd add two Legendre series. - legsub subtract one Legendre series from another. - legmulx multiply a Legendre series in ``P_i(x)`` by ``x``. - legmul multiply two Legendre series. - legdiv divide one Legendre series by another. - legpow raise a Legendre series to a positive integer power. - legval evaluate a Legendre series at given points. - legval2d evaluate a 2D Legendre series at given points. - legval3d evaluate a 3D Legendre series at given points. - leggrid2d evaluate a 2D Legendre series on a Cartesian product. - leggrid3d evaluate a 3D Legendre series on a Cartesian product. - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - legder differentiate a Legendre series. - legint integrate a Legendre series. - -Misc Functions --------------- - -.. autosummary:: - :toctree: generated/ - - legfromroots create a Legendre series with specified roots. - legroots find the roots of a Legendre series. - legvander Vandermonde-like matrix for Legendre polynomials. - legvander2d Vandermonde-like matrix for 2D power series. - legvander3d Vandermonde-like matrix for 3D power series. - leggauss Gauss-Legendre quadrature, points and weights. - legweight Legendre weight function. - legcompanion symmetrized companion matrix in Legendre form. - legfit least-squares fit returning a Legendre series. - legtrim trim leading coefficients from a Legendre series. - legline Legendre series representing given straight line. - leg2poly convert a Legendre series to a polynomial. - poly2leg convert a polynomial to a Legendre series. - -Classes -------- - Legendre A Legendre series class. - -See also --------- -numpy.polynomial.polynomial -numpy.polynomial.chebyshev -numpy.polynomial.laguerre -numpy.polynomial.hermite -numpy.polynomial.hermite_e - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la -from numpy.core.multiarray import normalize_axis_index - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', - 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', - 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', - 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', - 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', - 'leggauss', 'legweight'] - -legtrim = pu.trimcoef - - -def poly2leg(pol): - """ - Convert a polynomial to a Legendre series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Legendre series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Legendre - series. - - See Also - -------- - leg2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> p = P.Polynomial(np.arange(4)) - >>> p - Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) - >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) - >>> c - Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = legadd(legmulx(res), pol[i]) - return res - - -def leg2poly(c): - """ - Convert a Legendre series to a polynomial. - - Convert an array representing the coefficients of a Legendre series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Legendre series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2leg - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> c = P.Legendre(range(4)) - >>> c - Legendre([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) - >>> p = c.convert(kind=P.Polynomial) - >>> p - Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., 1.]) - >>> P.leg2poly(range(4)) - array([-1. , -3.5, 3. , 7.5]) - - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n < 3: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) - return polyadd(c0, polymulx(c1)) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Legendre -legdomain = np.array([-1, 1]) - -# Legendre coefficients representing zero. -legzero = np.array([0]) - -# Legendre coefficients representing one. -legone = np.array([1]) - -# Legendre coefficients representing the identity x. -legx = np.array([0, 1]) - - -def legline(off, scl): - """ - Legendre series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Legendre series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> import numpy.polynomial.legendre as L - >>> L.legline(3,2) - array([3, 2]) - >>> L.legval(-3, L.legline(3,2)) # should be -3 - -3.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def legfromroots(roots): - """ - Generate a Legendre series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Legendre form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Legendre form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, chebfromroots, lagfromroots, hermfromroots, hermefromroots - - Examples - -------- - >>> import numpy.polynomial.legendre as L - >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis - array([ 0. , -0.4, 0. , 0.4]) - >>> j = complex(0,1) - >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary - - """ - return pu._fromroots(legline, legmul, roots) - - -def legadd(c1, c2): - """ - Add one Legendre series to another. - - Returns the sum of two Legendre series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Legendre series of their sum. - - See Also - -------- - legsub, legmulx, legmul, legdiv, legpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Legendre series - is a Legendre series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legadd(c1,c2) - array([4., 4., 4.]) - - """ - return pu._add(c1, c2) - - -def legsub(c1, c2): - """ - Subtract one Legendre series from another. - - Returns the difference of two Legendre series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Legendre series coefficients representing their difference. - - See Also - -------- - legadd, legmulx, legmul, legdiv, legpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Legendre - series is a Legendre series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legsub(c1,c2) - array([-2., 0., 2.]) - >>> L.legsub(c2,c1) # -C.legsub(c1,c2) - array([ 2., 0., -2.]) - - """ - return pu._sub(c1, c2) - - -def legmulx(c): - """Multiply a Legendre series by x. - - Multiply the Legendre series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - See Also - -------- - legadd, legmul, legmul, legdiv, legpow - - Notes - ----- - The multiplication uses the recursion relationship for Legendre - polynomials in the form - - .. math:: - - xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> L.legmulx([1,2,3]) - array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0] - for i in range(1, len(c)): - j = i + 1 - k = i - 1 - s = i + j - prd[j] = (c[i]*j)/s - prd[k] += (c[i]*i)/s - return prd - - -def legmul(c1, c2): - """ - Multiply one Legendre series by another. - - Returns the product of two Legendre series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Legendre series coefficients representing their product. - - See Also - -------- - legadd, legsub, legmulx, legdiv, legpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Legendre polynomial basis set. Thus, to express - the product as a Legendre series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2) - >>> L.legmul(c1,c2) # multiplication requires "reprojection" - array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) - return legadd(c0, legmulx(c1)) - - -def legdiv(c1, c2): - """ - Divide one Legendre series by another. - - Returns the quotient-with-remainder of two Legendre series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - quo, rem : ndarrays - Of Legendre series coefficients representing the quotient and - remainder. - - See Also - -------- - legadd, legsub, legmulx, legmul, legpow - - Notes - ----- - In general, the (polynomial) division of one Legendre series by another - results in quotient and remainder terms that are not in the Legendre - polynomial basis set. Thus, to express these results as a Legendre - series, it is necessary to "reproject" the results onto the Legendre - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not - (array([3.]), array([-8., -4.])) - >>> c2 = (0,1,2,3) - >>> L.legdiv(c2,c1) # neither "intuitive" - (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary - - """ - return pu._div(legmul, c1, c2) - - -def legpow(c, pow, maxpower=16): - """Raise a Legendre series to a power. - - Returns the Legendre series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Legendre series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Legendre series of power. - - See Also - -------- - legadd, legsub, legmulx, legmul, legdiv - - Examples - -------- - - """ - return pu._pow(legmul, c, pow, maxpower) - - -def legder(c, m=1, scl=1, axis=0): - """ - Differentiate a Legendre series. - - Returns the Legendre series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` - while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + - 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Legendre series coefficients. If c is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Legendre series of the derivative. - - See Also - -------- - legint - - Notes - ----- - In general, the result of differentiating a Legendre series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c = (1,2,3,4) - >>> L.legder(c) - array([ 6., 9., 20.]) - >>> L.legder(c, 3) - array([60.]) - >>> L.legder(c, scl=-1) - array([ -6., -9., -20.]) - >>> L.legder(c, 2,-1) - array([ 9., 60.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt = pu._deprecate_as_int(m, "the order of derivation") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 2, -1): - der[j - 1] = (2*j - 1)*c[j] - c[j - 2] += c[j] - if n > 1: - der[1] = 3*c[2] - der[0] = c[1] - c = der - c = np.moveaxis(c, 0, iaxis) - return c - - -def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Legendre series. - - Returns the Legendre series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] - represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + - 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Legendre series coefficients. If c is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Legendre series coefficient array of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or - ``np.ndim(scl) != 0``. - - See Also - -------- - legder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c = (1,2,3) - >>> L.legint(c) - array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary - >>> L.legint(c, 3) - array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary - -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) - >>> L.legint(c, k=3) - array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary - >>> L.legint(c, lbnd=-2) - array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary - >>> L.legint(c, scl=2) - array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt = pu._deprecate_as_int(m, "the order of integration") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if np.ndim(lbnd) != 0: - raise ValueError("lbnd must be a scalar.") - if np.ndim(scl) != 0: - raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0] - if n > 1: - tmp[2] = c[1]/3 - for j in range(2, n): - t = c[j]/(2*j + 1) - tmp[j + 1] = t - tmp[j - 1] -= t - tmp[0] += k[i] - legval(lbnd, tmp) - c = tmp - c = np.moveaxis(c, 0, iaxis) - return c - - -def legval(x, c, tensor=True): - """ - Evaluate a Legendre series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - legval2d, leggrid2d, legval3d, leggrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - - """ - c = np.array(c, ndmin=1, copy=False) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*x*(2*nd - 1))/nd - return c0 + c1*x - - -def legval2d(x, y, c): - """ - Evaluate a 2-D Legendre series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Legendre series at points formed - from pairs of corresponding values from `x` and `y`. - - See Also - -------- - legval, leggrid2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(legval, c, x, y) - - -def leggrid2d(x, y, c): - """ - Evaluate a 2-D Legendre series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j is contained in `c[i,j]`. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points in the - Cartesian product of `x` and `y`. - - See Also - -------- - legval, legval2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(legval, c, x, y) - - -def legval3d(x, y, z, c): - """ - Evaluate a 3-D Legendre series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - legval, legval2d, leggrid2d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(legval, c, x, y, z) - - -def leggrid3d(x, y, z, c): - """ - Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - legval, legval2d, leggrid2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(legval, c, x, y, z) - - -def legvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = L_i(x) - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Legendre polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and - ``legval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Legendre series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Legendre polynomial. The dtype will be the same as - the converted `x`. - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=False, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - # Use forward recursion to generate the entries. This is not as accurate - # as reverse recursion in this application but it is more efficient. - v[0] = x*0 + 1 - if ideg > 0: - v[1] = x - for i in range(2, ideg + 1): - v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i - return np.moveaxis(v, 0, -1) - - -def legvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Legendre polynomials. - - If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Legendre - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - legvander, legvander3d, legval2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((legvander, legvander), (x, y), deg) - - -def legvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Legendre polynomials. - - If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Legendre - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - legvander, legvander3d, legval2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) - - -def legfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Legendre series to data. - - Return the coefficients of a Legendre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer - all terms up to and including the `deg`'th term are included in the - fit. For NumPy versions >= 1.11.0 a list of integers specifying the - degrees of the terms to include may be used instead. - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Legendre coefficients ordered from low to high. If `y` was - 2-D, the coefficients for the data in column k of `y` are in - column `k`. If `deg` is specified as a list, coefficients for - terms not included in the fit are set equal to zero in the - returned `coef`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - chebfit, polyfit, lagfit, hermfit, hermefit - legval : Evaluates a Legendre series. - legvander : Vandermonde matrix of Legendre series. - legweight : Legendre weight function (= 1). - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Legendre series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where :math:`w_j` are the weights. This problem is solved by setting up - as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Legendre series are usually better conditioned than fits - using power series, but much can depend on the distribution of the - sample points and the smoothness of the data. If the quality of the fit - is inadequate splines may be a good alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - https://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - - """ - return pu._fit(legvander, x, y, deg, rcond, full, w) - - -def legcompanion(c): - """Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Legendre basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of Legendre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = 1./np.sqrt(2*np.arange(n) + 1) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] - bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) - return mat - - -def legroots(c): - """ - Compute the roots of a Legendre series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * L_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, chebroots, lagroots, hermroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such values. - Roots with multiplicity greater than 1 will also show larger errors as - the value of the series near such points is relatively insensitive to - errors in the roots. Isolated roots near the origin can be improved by - a few iterations of Newton's method. - - The Legendre series basis polynomials aren't powers of ``x`` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> import numpy.polynomial.legendre as leg - >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots - array([-0.85099543, -0.11407192, 0.51506735]) # may vary - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - # rotated companion matrix reduces error - m = legcompanion(c)[::-1,::-1] - r = la.eigvals(m) - r.sort() - return r - - -def leggauss(deg): - """ - Gauss-Legendre quadrature. - - Computes the sample points and weights for Gauss-Legendre quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with - the weight function :math:`f(x) = 1`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded:: 1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`L_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg <= 0: - raise ValueError("deg must be a positive integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) - m = legcompanion(c) - x = la.eigvalsh(m) - - # improve roots by one application of Newton - dy = legval(x, c) - df = legval(x, legder(c)) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = legval(x, c[1:]) - fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) - - # for Legendre we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 - - # scale w to get the right value - w *= 2. / w.sum() - - return x, w - - -def legweight(x): - """ - Weight function of the Legendre polynomials. - - The weight function is :math:`1` and the interval of integration is - :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not - normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - w = x*0.0 + 1.0 - return w - -# -# Legendre series class -# - -class Legendre(ABCPolyBase): - """A Legendre series class. - - The Legendre class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Legendre coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(legadd) - _sub = staticmethod(legsub) - _mul = staticmethod(legmul) - _div = staticmethod(legdiv) - _pow = staticmethod(legpow) - _val = staticmethod(legval) - _int = staticmethod(legint) - _der = staticmethod(legder) - _fit = staticmethod(legfit) - _line = staticmethod(legline) - _roots = staticmethod(legroots) - _fromroots = staticmethod(legfromroots) - - # Virtual properties - nickname = 'leg' - domain = np.array(legdomain) - window = np.array(legdomain) - basis_name = 'P' diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/polynomial.py b/venv/lib/python3.7/site-packages/numpy/polynomial/polynomial.py deleted file mode 100644 index 315ea14..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/polynomial.py +++ /dev/null @@ -1,1493 +0,0 @@ -""" -Objects for dealing with polynomials. - -This module provides a number of objects (mostly functions) useful for -dealing with polynomials, including a `Polynomial` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with polynomial objects is in -the docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `polydomain` -- Polynomial default domain, [-1,1]. -- `polyzero` -- (Coefficients of the) "zero polynomial." -- `polyone` -- (Coefficients of the) constant polynomial 1. -- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``. - -Arithmetic ----------- -- `polyadd` -- add two polynomials. -- `polysub` -- subtract one polynomial from another. -- `polymulx` -- multiply a polynomial in ``P_i(x)`` by ``x``. -- `polymul` -- multiply two polynomials. -- `polydiv` -- divide one polynomial by another. -- `polypow` -- raise a polynomial to a positive integer power. -- `polyval` -- evaluate a polynomial at given points. -- `polyval2d` -- evaluate a 2D polynomial at given points. -- `polyval3d` -- evaluate a 3D polynomial at given points. -- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product. -- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product. - -Calculus --------- -- `polyder` -- differentiate a polynomial. -- `polyint` -- integrate a polynomial. - -Misc Functions --------------- -- `polyfromroots` -- create a polynomial with specified roots. -- `polyroots` -- find the roots of a polynomial. -- `polyvalfromroots` -- evaluate a polynomial at given points from roots. -- `polyvander` -- Vandermonde-like matrix for powers. -- `polyvander2d` -- Vandermonde-like matrix for 2D power series. -- `polyvander3d` -- Vandermonde-like matrix for 3D power series. -- `polycompanion` -- companion matrix in power series form. -- `polyfit` -- least-squares fit returning a polynomial. -- `polytrim` -- trim leading coefficients from a polynomial. -- `polyline` -- polynomial representing given straight line. - -Classes -------- -- `Polynomial` -- polynomial class. - -See Also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -__all__ = [ - 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', - 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', - 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', - 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', - 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] - -import warnings -import numpy as np -import numpy.linalg as la -from numpy.core.multiarray import normalize_axis_index - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -polytrim = pu.trimcoef - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Polynomial default domain. -polydomain = np.array([-1, 1]) - -# Polynomial coefficients representing zero. -polyzero = np.array([0]) - -# Polynomial coefficients representing one. -polyone = np.array([1]) - -# Polynomial coefficients representing the identity x. -polyx = np.array([0, 1]) - -# -# Polynomial series functions -# - - -def polyline(off, scl): - """ - Returns an array representing a linear polynomial. - - Parameters - ---------- - off, scl : scalars - The "y-intercept" and "slope" of the line, respectively. - - Returns - ------- - y : ndarray - This module's representation of the linear polynomial ``off + - scl*x``. - - See Also - -------- - chebline - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> P.polyline(1,-1) - array([ 1, -1]) - >>> P.polyval(1, P.polyline(1,-1)) # should be 0 - 0.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def polyfromroots(roots): - """ - Generate a monic polynomial with given roots. - - Return the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - where the `r_n` are the roots specified in `roots`. If a zero has - multiplicity n, then it must appear in `roots` n times. For instance, - if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, - then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear - in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * x + ... + x^n - - The coefficient of the last term is 1 for monic polynomials in this - form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of the polynomial's coefficients If all the roots are - real, then `out` is also real, otherwise it is complex. (see - Examples below). - - See Also - -------- - chebfromroots, legfromroots, lagfromroots, hermfromroots - hermefromroots - - Notes - ----- - The coefficients are determined by multiplying together linear factors - of the form `(x - r_i)`, i.e. - - .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) - - where ``n == len(roots) - 1``; note that this implies that `1` is always - returned for :math:`a_n`. - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x - array([ 0., -1., 0., 1.]) - >>> j = complex(0,1) - >>> P.polyfromroots((-j,j)) # complex returned, though values are real - array([1.+0.j, 0.+0.j, 1.+0.j]) - - """ - return pu._fromroots(polyline, polymul, roots) - - -def polyadd(c1, c2): - """ - Add one polynomial to another. - - Returns the sum of two polynomials `c1` + `c2`. The arguments are - sequences of coefficients from lowest order term to highest, i.e., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of polynomial coefficients ordered from low to high. - - Returns - ------- - out : ndarray - The coefficient array representing their sum. - - See Also - -------- - polysub, polymulx, polymul, polydiv, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> sum = P.polyadd(c1,c2); sum - array([4., 4., 4.]) - >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) - 28.0 - - """ - return pu._add(c1, c2) - - -def polysub(c1, c2): - """ - Subtract one polynomial from another. - - Returns the difference of two polynomials `c1` - `c2`. The arguments - are sequences of coefficients from lowest order term to highest, i.e., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of polynomial coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of coefficients representing their difference. - - See Also - -------- - polyadd, polymulx, polymul, polydiv, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polysub(c1,c2) - array([-2., 0., 2.]) - >>> P.polysub(c2,c1) # -P.polysub(c1,c2) - array([ 2., 0., -2.]) - - """ - return pu._sub(c1, c2) - - -def polymulx(c): - """Multiply a polynomial by x. - - Multiply the polynomial `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of polynomial coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - See Also - -------- - polyadd, polysub, polymul, polydiv, polypow - - Notes - ----- - - .. versionadded:: 1.5.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1:] = c - return prd - - -def polymul(c1, c2): - """ - Multiply one polynomial by another. - - Returns the product of two polynomials `c1` * `c2`. The arguments are - sequences of coefficients, from lowest order term to highest, e.g., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of coefficients representing a polynomial, relative to the - "standard" basis, and ordered from lowest order term to highest. - - Returns - ------- - out : ndarray - Of the coefficients of their product. - - See Also - -------- - polyadd, polysub, polymulx, polydiv, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polymul(c1,c2) - array([ 3., 8., 14., 8., 3.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - ret = np.convolve(c1, c2) - return pu.trimseq(ret) - - -def polydiv(c1, c2): - """ - Divide one polynomial by another. - - Returns the quotient-with-remainder of two polynomials `c1` / `c2`. - The arguments are sequences of coefficients, from lowest order term - to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of polynomial coefficients ordered from low to high. - - Returns - ------- - [quo, rem] : ndarrays - Of coefficient series representing the quotient and remainder. - - See Also - -------- - polyadd, polysub, polymulx, polymul, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polydiv(c1,c2) - (array([3.]), array([-8., -4.])) - >>> P.polydiv(c2,c1) - (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - # note: this is more efficient than `pu._div(polymul, c1, c2)` - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - dlen = lc1 - lc2 - scl = c2[-1] - c2 = c2[:-1]/scl - i = dlen - j = lc1 - 1 - while i >= 0: - c1[i:j] -= c2*c1[j] - i -= 1 - j -= 1 - return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) - - -def polypow(c, pow, maxpower=None): - """Raise a polynomial to a power. - - Returns the polynomial `c` raised to the power `pow`. The argument - `c` is a sequence of coefficients ordered from low to high. i.e., - [1,2,3] is the series ``1 + 2*x + 3*x**2.`` - - Parameters - ---------- - c : array_like - 1-D array of array of series coefficients ordered from low to - high degree. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Power series of power. - - See Also - -------- - polyadd, polysub, polymulx, polymul, polydiv - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> P.polypow([1,2,3], 2) - array([ 1., 4., 10., 12., 9.]) - - """ - # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it - # avoids calling `as_series` repeatedly - return pu._pow(np.convolve, c, pow, maxpower) - - -def polyder(c, m=1, scl=1, axis=0): - """ - Differentiate a polynomial. - - Returns the polynomial coefficients `c` differentiated `m` times along - `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The - argument `c` is an array of coefficients from low to high degree along - each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` - while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is - ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of polynomial coefficients. If c is multidimensional the - different axis correspond to different variables with the degree - in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change - of variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Polynomial coefficients of the derivative. - - See Also - -------- - polyint - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 - >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 - array([ 2., 6., 12.]) - >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 - array([24.]) - >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 - array([ -2., -6., -12.]) - >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x - array([ 6., 24.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - # astype fails with NA - c = c + 0.0 - cdt = c.dtype - cnt = pu._deprecate_as_int(m, "the order of derivation") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - c = np.moveaxis(c, iaxis, 0) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=cdt) - for j in range(n, 0, -1): - der[j - 1] = j*c[j] - c = der - c = np.moveaxis(c, 0, iaxis) - return c - - -def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a polynomial. - - Returns the polynomial coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients, from low to high degree along each axis, e.g., [1,2,3] - represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] - represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - 1-D array of polynomial coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at zero - is the first value in the list, the value of the second integral - at zero is the second value, etc. If ``k == []`` (the default), - all constants are set to zero. If ``m == 1``, a single scalar can - be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Coefficient array of the integral. - - Raises - ------ - ValueError - If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or - ``np.ndim(scl) != 0``. - - See Also - -------- - polyder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. Why - is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - :math:`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c = (1,2,3) - >>> P.polyint(c) # should return array([0, 1, 1, 1]) - array([0., 1., 1., 1.]) - >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) - array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary - 0.05 ]) - >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) - array([3., 1., 1., 1.]) - >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) - array([6., 1., 1., 1.]) - >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) - array([ 0., -2., -2., -2.]) - - """ - c = np.array(c, ndmin=1, copy=True) - if c.dtype.char in '?bBhHiIlLqQpP': - # astype doesn't preserve mask attribute. - c = c + 0.0 - cdt = c.dtype - if not np.iterable(k): - k = [k] - cnt = pu._deprecate_as_int(m, "the order of integration") - iaxis = pu._deprecate_as_int(axis, "the axis") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if np.ndim(lbnd) != 0: - raise ValueError("lbnd must be a scalar.") - if np.ndim(scl) != 0: - raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) - - if cnt == 0: - return c - - k = list(k) + [0]*(cnt - len(k)) - c = np.moveaxis(c, iaxis, 0) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) - tmp[0] = c[0]*0 - tmp[1] = c[0] - for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) - tmp[0] += k[i] - polyval(lbnd, tmp) - c = tmp - c = np.moveaxis(c, 0, iaxis) - return c - - -def polyval(x, c, tensor=True): - """ - Evaluate a polynomial at points x. - - If `c` is of length `n + 1`, this function returns the value - - .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, compatible object - The shape of the returned array is described above. - - See Also - -------- - polyval2d, polygrid2d, polyval3d, polygrid3d - - Notes - ----- - The evaluation uses Horner's method. - - Examples - -------- - >>> from numpy.polynomial.polynomial import polyval - >>> polyval(1, [1,2,3]) - 6.0 - >>> a = np.arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> polyval(a, [1,2,3]) - array([[ 1., 6.], - [17., 34.]]) - >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients - >>> coef - array([[0, 1], - [2, 3]]) - >>> polyval([1,2], coef, tensor=True) - array([[2., 4.], - [4., 7.]]) - >>> polyval([1,2], coef, tensor=False) - array([2., 7.]) - - """ - c = np.array(c, ndmin=1, copy=False) - if c.dtype.char in '?bBhHiIlLqQpP': - # astype fails with NA - c = c + 0.0 - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - c0 = c[-1] + x*0 - for i in range(2, len(c) + 1): - c0 = c[-i] + c0*x - return c0 - - -def polyvalfromroots(x, r, tensor=True): - """ - Evaluate a polynomial specified by its roots at points x. - - If `r` is of length `N`, this function returns the value - - .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `r`. - - If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r` - is multidimensional, then the shape of the result depends on the value of - `tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + x.shape; - that is, each polynomial is evaluated at every value of `x`. If `tensor` is - ``False``, the shape will be r.shape[1:]; that is, each polynomial is - evaluated only for the corresponding broadcast value of `x`. Note that - scalars have shape (,). - - .. versionadded:: 1.12 - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `r`. - r : array_like - Array of roots. If `r` is multidimensional the first index is the - root index, while the remaining indices enumerate multiple - polynomials. For instance, in the two dimensional case the roots - of each polynomial may be thought of as stored in the columns of `r`. - tensor : boolean, optional - If True, the shape of the roots array is extended with ones on the - right, one for each dimension of `x`. Scalars have dimension 0 for this - action. The result is that every column of coefficients in `r` is - evaluated for every element of `x`. If False, `x` is broadcast over the - columns of `r` for the evaluation. This keyword is useful when `r` is - multidimensional. The default value is True. - - Returns - ------- - values : ndarray, compatible object - The shape of the returned array is described above. - - See Also - -------- - polyroots, polyfromroots, polyval - - Examples - -------- - >>> from numpy.polynomial.polynomial import polyvalfromroots - >>> polyvalfromroots(1, [1,2,3]) - 0.0 - >>> a = np.arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> polyvalfromroots(a, [-1, 0, 1]) - array([[-0., 0.], - [ 6., 24.]]) - >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients - >>> r # each column of r defines one polynomial - array([[-2, -1], - [ 0, 1]]) - >>> b = [-2, 1] - >>> polyvalfromroots(b, r, tensor=True) - array([[-0., 3.], - [ 3., 0.]]) - >>> polyvalfromroots(b, r, tensor=False) - array([-0., 0.]) - """ - r = np.array(r, ndmin=1, copy=False) - if r.dtype.char in '?bBhHiIlLqQpP': - r = r.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray): - if tensor: - r = r.reshape(r.shape + (1,)*x.ndim) - elif x.ndim >= r.ndim: - raise ValueError("x.ndim must be < r.ndim when tensor == False") - return np.prod(x - r, axis=0) - - -def polyval2d(x, y, c): - """ - Evaluate a 2-D polynomial at points (x, y). - - This function returns the value - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in `c[i,j]`. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - polyval, polygrid2d, polyval3d, polygrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(polyval, c, x, y) - - -def polygrid2d(x, y, c): - """ - Evaluate a 2-D polynomial on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - polyval, polyval2d, polyval3d, polygrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(polyval, c, x, y) - - -def polyval3d(x, y, z, c): - """ - Evaluate a 3-D polynomial at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - polyval, polyval2d, polygrid2d, polygrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._valnd(polyval, c, x, y, z) - - -def polygrid3d(x, y, z, c): - """ - Evaluate a 3-D polynomial on the Cartesian product of x, y and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - polyval, polyval2d, polygrid2d, polyval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._gridnd(polyval, c, x, y, z) - - -def polyvander(x, deg): - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points - `x`. The Vandermonde matrix is defined by - - .. math:: V[..., i] = x^i, - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the power of `x`. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and - ``polyval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of polynomials of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray. - The Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where the last index is the power of `x`. - The dtype will be the same as the converted `x`. - - See Also - -------- - polyvander2d, polyvander3d - - """ - ideg = pu._deprecate_as_int(deg, "deg") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=False, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - v[1] = x - for i in range(2, ideg + 1): - v[i] = v[i-1]*x - return np.moveaxis(v, 0, -1) - - -def polyvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the powers of - `x` and `y`. - - If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D polynomials - of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - polyvander, polyvander3d, polyval2d, polyval3d - - """ - return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) - - -def polyvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the powers of `x`, `y`, and `z`. - - If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D polynomials - of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - polyvander, polyvander3d, polyval2d, polyval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg) - - -def polyfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least-squares fit of a polynomial to data. - - Return the coefficients of a polynomial of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (`M`,) - x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. - y : array_like, shape (`M`,) or (`M`, `K`) - y-coordinates of the sample points. Several sets of sample points - sharing the same x-coordinates can be (independently) fit with one - call to `polyfit` by passing in for `y` a 2-D array that contains - one data set per column. - deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer - all terms up to and including the `deg`'th term are included in the - fit. For NumPy versions >= 1.11.0 a list of integers specifying the - degrees of the terms to include may be used instead. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than `rcond`, relative to the largest singular value, will be - ignored. The default value is ``len(x)*eps``, where `eps` is the - relative precision of the platform's float type, about 2e-16 in - most cases. - full : bool, optional - Switch determining the nature of the return value. When ``False`` - (the default) just the coefficients are returned; when ``True``, - diagnostic information from the singular value decomposition (used - to solve the fit's matrix equation) is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) - Polynomial coefficients ordered from low to high. If `y` was 2-D, - the coefficients in column `k` of `coef` represent the polynomial - fit to the data in `y`'s `k`-th column. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Raises - ------ - RankWarning - Raised if the matrix in the least-squares fit is rank deficient. - The warning is only raised if `full` == False. The warnings can - be turned off by: - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - chebfit, legfit, lagfit, hermfit, hermefit - polyval : Evaluates a polynomial. - polyvander : Vandermonde matrix for powers. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the polynomial `p` that minimizes - the sum of the weighted squared errors - - .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) over-determined matrix equation: - - .. math :: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected (and `full` == ``False``), a `RankWarning` will be raised. - This means that the coefficient values may be poorly determined. - Fitting to a lower order polynomial will usually get rid of the warning - (but may not be what you want, of course; if you have independent - reason(s) for choosing the degree which isn't working, you may have to: - a) reconsider those reasons, and/or b) reconsider the quality of your - data). The `rcond` parameter can also be set to a value smaller than - its default, but the resulting fit may be spurious and have large - contributions from roundoff error. - - Polynomial fits using double precision tend to "fail" at about - (polynomial) degree 20. Fits using Chebyshev or Legendre series are - generally better conditioned, but much can still depend on the - distribution of the sample points and the smoothness of the data. If - the quality of the fit is inadequate, splines may be a good - alternative. - - Examples - -------- - >>> np.random.seed(123) - >>> from numpy.polynomial import polynomial as P - >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] - >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" - >>> c, stats = P.polyfit(x,y,3,full=True) - >>> np.random.seed(123) - >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 - array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary - >>> stats # note the large SSR, explaining the rather poor results - [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary - 0.28853036]), 1.1324274851176597e-014] - - Same thing without the added noise - - >>> y = x**3 - x - >>> c, stats = P.polyfit(x,y,3,full=True) - >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 - array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) - >>> stats # note the minuscule SSR - [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary - 0.50443316, 0.28853036]), 1.1324274851176597e-014] - - """ - return pu._fit(polyvander, x, y, deg, rcond, full, w) - - -def polycompanion(c): - """ - Return the companion matrix of c. - - The companion matrix for power series cannot be made symmetric by - scaling the basis, so this function differs from those for the - orthogonal polynomials. - - Parameters - ---------- - c : array_like - 1-D array of polynomial coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - bot = mat.reshape(-1)[n::n+1] - bot[...] = 1 - mat[:, -1] -= c[:-1]/c[-1] - return mat - - -def polyroots(c): - """ - Compute the roots of a polynomial. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * x^i. - - Parameters - ---------- - c : 1-D array_like - 1-D array of polynomial coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the polynomial. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - chebroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the power series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - Examples - -------- - >>> import numpy.polynomial.polynomial as poly - >>> poly.polyroots(poly.polyfromroots((-1,0,1))) - array([-1., 0., 1.]) - >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype - dtype('float64') - >>> j = complex(0,1) - >>> poly.polyroots(poly.polyfromroots((-j,0,j))) - array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - # rotated companion matrix reduces error - m = polycompanion(c)[::-1,::-1] - r = la.eigvals(m) - r.sort() - return r - - -# -# polynomial class -# - -class Polynomial(ABCPolyBase): - """A power series class. - - The Polynomial class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Polynomial coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(polyadd) - _sub = staticmethod(polysub) - _mul = staticmethod(polymul) - _div = staticmethod(polydiv) - _pow = staticmethod(polypow) - _val = staticmethod(polyval) - _int = staticmethod(polyint) - _der = staticmethod(polyder) - _fit = staticmethod(polyfit) - _line = staticmethod(polyline) - _roots = staticmethod(polyroots) - _fromroots = staticmethod(polyfromroots) - - # Virtual properties - nickname = 'poly' - domain = np.array(polydomain) - window = np.array(polydomain) - basis_name = None - - @staticmethod - def _repr_latex_term(i, arg_str, needs_parens): - if needs_parens: - arg_str = r'\left({}\right)'.format(arg_str) - if i == 0: - return '1' - elif i == 1: - return arg_str - else: - return '{}^{{{}}}'.format(arg_str, i) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/polyutils.py b/venv/lib/python3.7/site-packages/numpy/polynomial/polyutils.py deleted file mode 100644 index 5dcfa7a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/polyutils.py +++ /dev/null @@ -1,801 +0,0 @@ -""" -Utility classes and functions for the polynomial modules. - -This module provides: error and warning objects; a polynomial base class; -and some routines used in both the `polynomial` and `chebyshev` modules. - -Error objects -------------- - -.. autosummary:: - :toctree: generated/ - - PolyError base class for this sub-package's errors. - PolyDomainError raised when domains are mismatched. - -Warning objects ---------------- - -.. autosummary:: - :toctree: generated/ - - RankWarning raised in least-squares fit for rank-deficient matrix. - -Base class ----------- - -.. autosummary:: - :toctree: generated/ - - PolyBase Obsolete base class for the polynomial classes. Do not use. - -Functions ---------- - -.. autosummary:: - :toctree: generated/ - - as_series convert list of array_likes into 1-D arrays of common type. - trimseq remove trailing zeros. - trimcoef remove small trailing coefficients. - getdomain return the domain appropriate for a given set of abscissae. - mapdomain maps points between domains. - mapparms parameters of the linear map between domains. - -""" -from __future__ import division, absolute_import, print_function - -import operator -import functools -import warnings - -import numpy as np - -__all__ = [ - 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', - 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase'] - -# -# Warnings and Exceptions -# - -class RankWarning(UserWarning): - """Issued by chebfit when the design matrix is rank deficient.""" - pass - -class PolyError(Exception): - """Base class for errors in this module.""" - pass - -class PolyDomainError(PolyError): - """Issued by the generic Poly class when two domains don't match. - - This is raised when an binary operation is passed Poly objects with - different domains. - - """ - pass - -# -# Base class for all polynomial types -# - -class PolyBase(object): - """ - Base class for all polynomial types. - - Deprecated in numpy 1.9.0, use the abstract - ABCPolyBase class instead. Note that the latter - requires a number of virtual functions to be - implemented. - - """ - pass - -# -# Helper functions to convert inputs to 1-D arrays -# -def trimseq(seq): - """Remove small Poly series coefficients. - - Parameters - ---------- - seq : sequence - Sequence of Poly series coefficients. This routine fails for - empty sequences. - - Returns - ------- - series : sequence - Subsequence with trailing zeros removed. If the resulting sequence - would be empty, return the first element. The returned sequence may - or may not be a view. - - Notes - ----- - Do not lose the type info if the sequence contains unknown objects. - - """ - if len(seq) == 0: - return seq - else: - for i in range(len(seq) - 1, -1, -1): - if seq[i] != 0: - break - return seq[:i+1] - - -def as_series(alist, trim=True): - """ - Return argument as a list of 1-d arrays. - - The returned list contains array(s) of dtype double, complex double, or - object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of - size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays - of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array - raises a Value Error if it is not first reshaped into either a 1-d or 2-d - array. - - Parameters - ---------- - alist : array_like - A 1- or 2-d array_like - trim : boolean, optional - When True, trailing zeros are removed from the inputs. - When False, the inputs are passed through intact. - - Returns - ------- - [a1, a2,...] : list of 1-D arrays - A copy of the input data as a list of 1-d arrays. - - Raises - ------ - ValueError - Raised when `as_series` cannot convert its input to 1-d arrays, or at - least one of the resulting arrays is empty. - - Examples - -------- - >>> from numpy.polynomial import polyutils as pu - >>> a = np.arange(4) - >>> pu.as_series(a) - [array([0.]), array([1.]), array([2.]), array([3.])] - >>> b = np.arange(6).reshape((2,3)) - >>> pu.as_series(b) - [array([0., 1., 2.]), array([3., 4., 5.])] - - >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) - [array([1.]), array([0., 1., 2.]), array([0., 1.])] - - >>> pu.as_series([2, [1.1, 0.]]) - [array([2.]), array([1.1])] - - >>> pu.as_series([2, [1.1, 0.]], trim=False) - [array([2.]), array([1.1, 0. ])] - - """ - arrays = [np.array(a, ndmin=1, copy=False) for a in alist] - if min([a.size for a in arrays]) == 0: - raise ValueError("Coefficient array is empty") - if any([a.ndim != 1 for a in arrays]): - raise ValueError("Coefficient array is not 1-d") - if trim: - arrays = [trimseq(a) for a in arrays] - - if any([a.dtype == np.dtype(object) for a in arrays]): - ret = [] - for a in arrays: - if a.dtype != np.dtype(object): - tmp = np.empty(len(a), dtype=np.dtype(object)) - tmp[:] = a[:] - ret.append(tmp) - else: - ret.append(a.copy()) - else: - try: - dtype = np.common_type(*arrays) - except Exception: - raise ValueError("Coefficient arrays have no common type") - ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] - return ret - - -def trimcoef(c, tol=0): - """ - Remove "small" "trailing" coefficients from a polynomial. - - "Small" means "small in absolute value" and is controlled by the - parameter `tol`; "trailing" means highest order coefficient(s), e.g., in - ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) - both the 3-rd and 4-th order coefficients would be "trimmed." - - Parameters - ---------- - c : array_like - 1-d array of coefficients, ordered from lowest order to highest. - tol : number, optional - Trailing (i.e., highest order) elements with absolute value less - than or equal to `tol` (default value is zero) are removed. - - Returns - ------- - trimmed : ndarray - 1-d array with trailing zeros removed. If the resulting series - would be empty, a series containing a single zero is returned. - - Raises - ------ - ValueError - If `tol` < 0 - - See Also - -------- - trimseq - - Examples - -------- - >>> from numpy.polynomial import polyutils as pu - >>> pu.trimcoef((0,0,3,0,5,0,0)) - array([0., 0., 3., 0., 5.]) - >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed - array([0.]) - >>> i = complex(0,1) # works for complex - >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) - array([0.0003+0.j , 0.001 -0.001j]) - - """ - if tol < 0: - raise ValueError("tol must be non-negative") - - [c] = as_series([c]) - [ind] = np.nonzero(np.abs(c) > tol) - if len(ind) == 0: - return c[:1]*0 - else: - return c[:ind[-1] + 1].copy() - -def getdomain(x): - """ - Return a domain suitable for given abscissae. - - Find a domain suitable for a polynomial or Chebyshev series - defined at the values supplied. - - Parameters - ---------- - x : array_like - 1-d array of abscissae whose domain will be determined. - - Returns - ------- - domain : ndarray - 1-d array containing two values. If the inputs are complex, then - the two returned points are the lower left and upper right corners - of the smallest rectangle (aligned with the axes) in the complex - plane containing the points `x`. If the inputs are real, then the - two points are the ends of the smallest interval containing the - points `x`. - - See Also - -------- - mapparms, mapdomain - - Examples - -------- - >>> from numpy.polynomial import polyutils as pu - >>> points = np.arange(4)**2 - 5; points - array([-5, -4, -1, 4]) - >>> pu.getdomain(points) - array([-5., 4.]) - >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle - >>> pu.getdomain(c) - array([-1.-1.j, 1.+1.j]) - - """ - [x] = as_series([x], trim=False) - if x.dtype.char in np.typecodes['Complex']: - rmin, rmax = x.real.min(), x.real.max() - imin, imax = x.imag.min(), x.imag.max() - return np.array((complex(rmin, imin), complex(rmax, imax))) - else: - return np.array((x.min(), x.max())) - -def mapparms(old, new): - """ - Linear map parameters between domains. - - Return the parameters of the linear map ``offset + scale*x`` that maps - `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. - - Parameters - ---------- - old, new : array_like - Domains. Each domain must (successfully) convert to a 1-d array - containing precisely two values. - - Returns - ------- - offset, scale : scalars - The map ``L(x) = offset + scale*x`` maps the first domain to the - second. - - See Also - -------- - getdomain, mapdomain - - Notes - ----- - Also works for complex numbers, and thus can be used to calculate the - parameters required to map any line in the complex plane to any other - line therein. - - Examples - -------- - >>> from numpy.polynomial import polyutils as pu - >>> pu.mapparms((-1,1),(-1,1)) - (0.0, 1.0) - >>> pu.mapparms((1,-1),(-1,1)) - (-0.0, -1.0) - >>> i = complex(0,1) - >>> pu.mapparms((-i,-1),(1,i)) - ((1+1j), (1-0j)) - - """ - oldlen = old[1] - old[0] - newlen = new[1] - new[0] - off = (old[1]*new[0] - old[0]*new[1])/oldlen - scl = newlen/oldlen - return off, scl - -def mapdomain(x, old, new): - """ - Apply linear map to input points. - - The linear map ``offset + scale*x`` that maps the domain `old` to - the domain `new` is applied to the points `x`. - - Parameters - ---------- - x : array_like - Points to be mapped. If `x` is a subtype of ndarray the subtype - will be preserved. - old, new : array_like - The two domains that determine the map. Each must (successfully) - convert to 1-d arrays containing precisely two values. - - Returns - ------- - x_out : ndarray - Array of points of the same shape as `x`, after application of the - linear map between the two domains. - - See Also - -------- - getdomain, mapparms - - Notes - ----- - Effectively, this implements: - - .. math :: - x\\_out = new[0] + m(x - old[0]) - - where - - .. math :: - m = \\frac{new[1]-new[0]}{old[1]-old[0]} - - Examples - -------- - >>> from numpy.polynomial import polyutils as pu - >>> old_domain = (-1,1) - >>> new_domain = (0,2*np.pi) - >>> x = np.linspace(-1,1,6); x - array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) - >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out - array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary - 6.28318531]) - >>> x - pu.mapdomain(x_out, new_domain, old_domain) - array([0., 0., 0., 0., 0., 0.]) - - Also works for complex numbers (and thus can be used to map any line in - the complex plane to any other line therein). - - >>> i = complex(0,1) - >>> old = (-1 - i, 1 + i) - >>> new = (-1 + i, 1 - i) - >>> z = np.linspace(old[0], old[1], 6); z - array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) - >>> new_z = pu.mapdomain(z, old, new); new_z - array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary - - """ - x = np.asanyarray(x) - off, scl = mapparms(old, new) - return off + scl*x - - -def _nth_slice(i, ndim): - sl = [np.newaxis] * ndim - sl[i] = slice(None) - return tuple(sl) - - -def _vander_nd(vander_fs, points, degrees): - r""" - A generalization of the Vandermonde matrix for N dimensions - - The result is built by combining the results of 1d Vandermonde matrices, - - .. math:: - W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]} - - where - - .. math:: - N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\ - M &= \texttt{points[k].ndim} \\ - V_k &= \texttt{vander\_fs[k]} \\ - x_k &= \texttt{points[k]} \\ - 0 \le j_k &\le \texttt{degrees[k]} - - Expanding the one-dimensional :math:`V_k` functions gives: - - .. math:: - W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])} - - where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along - dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`. - - Parameters - ---------- - vander_fs : Sequence[function(array_like, int) -> ndarray] - The 1d vander function to use for each axis, such as ``polyvander`` - points : Sequence[array_like] - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - This must be the same length as `vander_fs`. - degrees : Sequence[int] - The maximum degree (inclusive) to use for each axis. - This must be the same length as `vander_fs`. - - Returns - ------- - vander_nd : ndarray - An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. - """ - n_dims = len(vander_fs) - if n_dims != len(points): - raise ValueError( - "Expected {} dimensions of sample points, got {}".format(n_dims, len(points))) - if n_dims != len(degrees): - raise ValueError( - "Expected {} dimensions of degrees, got {}".format(n_dims, len(degrees))) - if n_dims == 0: - raise ValueError("Unable to guess a dtype or shape when no points are given") - - # convert to the same shape and type - points = tuple(np.array(tuple(points), copy=False) + 0.0) - - # produce the vandermonde matrix for each dimension, placing the last - # axis of each in an independent trailing axis of the output - vander_arrays = ( - vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] - for i in range(n_dims) - ) - - # we checked this wasn't empty already, so no `initial` needed - return functools.reduce(operator.mul, vander_arrays) - - -def _vander_nd_flat(vander_fs, points, degrees): - """ - Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis - - Used to implement the public ``vanderd`` functions. - """ - v = _vander_nd(vander_fs, points, degrees) - return v.reshape(v.shape[:-len(degrees)] + (-1,)) - - -def _fromroots(line_f, mul_f, roots): - """ - Helper function used to implement the ``fromroots`` functions. - - Parameters - ---------- - line_f : function(float, float) -> ndarray - The ``line`` function, such as ``polyline`` - mul_f : function(array_like, array_like) -> ndarray - The ``mul`` function, such as ``polymul`` - roots : - See the ``fromroots`` functions for more detail - """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = as_series([roots], trim=False) - roots.sort() - p = [line_f(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [mul_f(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = mul_f(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def _valnd(val_f, c, *args): - """ - Helper function used to implement the ``vald`` functions. - - Parameters - ---------- - val_f : function(array_like, array_like, tensor: bool) -> array_like - The ``val`` function, such as ``polyval`` - c, args : - See the ``vald`` functions for more detail - """ - try: - args = tuple(np.array(args, copy=False)) - except Exception: - # preserve the old error message - if len(args) == 2: - raise ValueError('x, y, z are incompatible') - elif len(args) == 3: - raise ValueError('x, y are incompatible') - else: - raise ValueError('ordinates are incompatible') - - it = iter(args) - x0 = next(it) - - # use tensor on only the first - c = val_f(x0, c) - for xi in it: - c = val_f(xi, c, tensor=False) - return c - - -def _gridnd(val_f, c, *args): - """ - Helper function used to implement the ``gridd`` functions. - - Parameters - ---------- - val_f : function(array_like, array_like, tensor: bool) -> array_like - The ``val`` function, such as ``polyval`` - c, args : - See the ``gridd`` functions for more detail - """ - for xi in args: - c = val_f(xi, c) - return c - - -def _div(mul_f, c1, c2): - """ - Helper function used to implement the ``div`` functions. - - Implementation uses repeated subtraction of c2 multiplied by the nth basis. - For some polynomial types, a more efficient approach may be possible. - - Parameters - ---------- - mul_f : function(array_like, array_like) -> array_like - The ``mul`` function, such as ``polymul`` - c1, c2 : - See the ``div`` functions for more detail - """ - # c1, c2 are trimmed copies - [c1, c2] = as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = mul_f([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, trimseq(rem) - - -def _add(c1, c2): - """ Helper function used to implement the ``add`` functions. """ - # c1, c2 are trimmed copies - [c1, c2] = as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return trimseq(ret) - - -def _sub(c1, c2): - """ Helper function used to implement the ``sub`` functions. """ - # c1, c2 are trimmed copies - [c1, c2] = as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return trimseq(ret) - - -def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): - """ - Helper function used to implement the ``fit`` functions. - - Parameters - ---------- - vander_f : function(array_like, int) -> ndarray - The 1d vander function, such as ``polyvander`` - c1, c2 : - See the ``fit`` functions for more detail - """ - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int or non-empty 1-D array of int") - if deg.min() < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - if deg.ndim == 0: - lmax = deg - order = lmax + 1 - van = vander_f(x, lmax) - else: - deg = np.sort(deg) - lmax = deg[-1] - order = len(deg) - van = vander_f(x, lmax)[:, deg] - - # set up the least squares matrices in transposed form - lhs = van.T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # Expand c to include non-fitted coefficients which are set to zero - if deg.ndim > 0: - if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) - else: - cc = np.zeros(lmax+1, dtype=c.dtype) - cc[deg] = c - c = cc - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=2) - - if full: - return c, [resids, rank, s, rcond] - else: - return c - - -def _pow(mul_f, c, pow, maxpower): - """ - Helper function used to implement the ``pow`` functions. - - Parameters - ---------- - vander_f : function(array_like, int) -> ndarray - The 1d vander function, such as ``polyvander`` - pow, maxpower : - See the ``pow`` functions for more detail - mul_f : function(array_like, array_like) -> ndarray - The ``mul`` function, such as ``polymul`` - """ - # c is a trimmed copy - [c] = as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = mul_f(prd, c) - return prd - - -def _deprecate_as_int(x, desc): - """ - Like `operator.index`, but emits a deprecation warning when passed a float - - Parameters - ---------- - x : int-like, or float with integral value - Value to interpret as an integer - desc : str - description to include in any error message - - Raises - ------ - TypeError : if x is a non-integral float or non-numeric - DeprecationWarning : if x is an integral float - """ - try: - return operator.index(x) - except TypeError: - # Numpy 1.17.0, 2019-03-11 - try: - ix = int(x) - except TypeError: - pass - else: - if ix == x: - warnings.warn( - "In future, this will raise TypeError, as {} will need to " - "be an integer not just an integral float." - .format(desc), - DeprecationWarning, - stacklevel=3 - ) - return ix - - raise TypeError("{} must be an integer".format(desc)) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/setup.py b/venv/lib/python3.7/site-packages/numpy/polynomial/setup.py deleted file mode 100644 index cb59ee1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('polynomial', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_chebyshev.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_chebyshev.py deleted file mode 100644 index c8d2d6d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_chebyshev.py +++ /dev/null @@ -1,621 +0,0 @@ -"""Tests for chebyshev module. - -""" -from __future__ import division, absolute_import, print_function - -from functools import reduce - -import numpy as np -import numpy.polynomial.chebyshev as cheb -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - - -def trim(x): - return cheb.chebtrim(x, tol=1e-6) - -T0 = [1] -T1 = [0, 1] -T2 = [-1, 0, 2] -T3 = [0, -3, 0, 4] -T4 = [1, 0, -8, 0, 8] -T5 = [0, 5, 0, -20, 0, 16] -T6 = [-1, 0, 18, 0, -48, 0, 32] -T7 = [0, -7, 0, 56, 0, -112, 0, 64] -T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] -T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] - -Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] - - -class TestPrivate(object): - - def test__cseries_to_zseries(self): - for i in range(5): - inp = np.array([2] + [1]*i, np.double) - tgt = np.array([.5]*i + [2] + [.5]*i, np.double) - res = cheb._cseries_to_zseries(inp) - assert_equal(res, tgt) - - def test__zseries_to_cseries(self): - for i in range(5): - inp = np.array([.5]*i + [2] + [.5]*i, np.double) - tgt = np.array([2] + [1]*i, np.double) - res = cheb._zseries_to_cseries(inp) - assert_equal(res, tgt) - - -class TestConstants(object): - - def test_chebdomain(self): - assert_equal(cheb.chebdomain, [-1, 1]) - - def test_chebzero(self): - assert_equal(cheb.chebzero, [0]) - - def test_chebone(self): - assert_equal(cheb.chebone, [1]) - - def test_chebx(self): - assert_equal(cheb.chebx, [0, 1]) - - -class TestArithmetic(object): - - def test_chebadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = cheb.chebadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = cheb.chebsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebmulx(self): - assert_equal(cheb.chebmulx([0]), [0]) - assert_equal(cheb.chebmulx([1]), [0, 1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [.5, 0, .5] - assert_equal(cheb.chebmulx(ser), tgt) - - def test_chebmul(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(i + j + 1) - tgt[i + j] += .5 - tgt[abs(i - j)] += .5 - res = cheb.chebmul([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = cheb.chebadd(ci, cj) - quo, rem = cheb.chebdiv(tgt, ci) - res = cheb.chebadd(cheb.chebmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebpow(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - c = np.arange(i + 1) - tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) - res = cheb.chebpow(c, j) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(object): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([2.5, 2., 1.5]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_chebval(self): - #check empty input - assert_equal(cheb.chebval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Tlist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = cheb.chebval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(cheb.chebval(x, [1]).shape, dims) - assert_equal(cheb.chebval(x, [1, 0]).shape, dims) - assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) - - def test_chebval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = cheb.chebval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_chebval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = cheb.chebval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_chebgrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = cheb.chebgrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_chebgrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = cheb.chebgrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(object): - - def test_chebint(self): - # check exceptions - assert_raises(TypeError, cheb.chebint, [0], .5) - assert_raises(ValueError, cheb.chebint, [0], -1) - assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) - assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) - assert_raises(ValueError, cheb.chebint, [0], scl=[0]) - assert_raises(TypeError, cheb.chebint, [0], axis=.5) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = cheb.chebint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - chebpol = cheb.poly2cheb(pol) - chebint = cheb.chebint(chebpol, m=1, k=[i]) - res = cheb.cheb2poly(chebint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - chebpol = cheb.poly2cheb(pol) - chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(cheb.chebval(-1, chebint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - chebpol = cheb.poly2cheb(pol) - chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) - res = cheb.cheb2poly(chebint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1) - res = cheb.chebint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1, k=[k]) - res = cheb.chebint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) - res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) - res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T - res = cheb.chebint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([cheb.chebint(c) for c in c2d]) - res = cheb.chebint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) - res = cheb.chebint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(object): - - def test_chebder(self): - # check exceptions - assert_raises(TypeError, cheb.chebder, [0], .5) - assert_raises(ValueError, cheb.chebder, [0], -1) - - # check that zeroth derivative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = cheb.chebder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T - res = cheb.chebder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([cheb.chebder(c) for c in c2d]) - res = cheb.chebder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(object): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_chebvander(self): - # check for 1d x - x = np.arange(3) - v = cheb.chebvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], cheb.chebval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = cheb.chebvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], cheb.chebval(x, coef)) - - def test_chebvander2d(self): - # also tests chebval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = cheb.chebvander2d(x1, x2, [1, 2]) - tgt = cheb.chebval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = cheb.chebvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_chebvander3d(self): - # also tests chebval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) - tgt = cheb.chebval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(object): - - def test_chebfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - def f2(x): - return x**4 + x**2 + 1 - - # Test exceptions - assert_raises(ValueError, cheb.chebfit, [1], [1], -1) - assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) - assert_raises(TypeError, cheb.chebfit, [], [1], 0) - assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) - assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) - assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) - assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) - assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) - assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) - assert_raises(TypeError, cheb.chebfit, [1], [1], []) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = cheb.chebfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(cheb.chebval(x, coef3), y) - coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) - assert_equal(len(coef3), 4) - assert_almost_equal(cheb.chebval(x, coef3), y) - # - coef4 = cheb.chebfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(cheb.chebval(x, coef4), y) - coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) - assert_equal(len(coef4), 5) - assert_almost_equal(cheb.chebval(x, coef4), y) - # check things still work if deg is not in strict increasing - coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) - assert_equal(len(coef4), 5) - assert_almost_equal(cheb.chebval(x, coef4), y) - # - coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = cheb.chebfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) - assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) - # test fitting only even polynomials - x = np.linspace(-1, 1) - y = f2(x) - coef1 = cheb.chebfit(x, y, 4) - assert_almost_equal(cheb.chebval(x, coef1), y) - coef2 = cheb.chebfit(x, y, [0, 2, 4]) - assert_almost_equal(cheb.chebval(x, coef2), y) - assert_almost_equal(coef1, coef2) - - -class TestInterpolate(object): - - def f(self, x): - return x * (x - 1) * (x - 2) - - def test_raises(self): - assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) - assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) - - def test_dimensions(self): - for deg in range(1, 5): - assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) - - def test_approximation(self): - - def powx(x, p): - return x**p - - x = np.linspace(-1, 1, 10) - for deg in range(0, 10): - for p in range(0, deg + 1): - c = cheb.chebinterpolate(powx, deg, (p,)) - assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) - - -class TestCompanion(object): - - def test_raises(self): - assert_raises(ValueError, cheb.chebcompanion, []) - assert_raises(ValueError, cheb.chebcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(cheb.chebcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) - - -class TestGauss(object): - - def test_100(self): - x, w = cheb.chebgauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = cheb.chebvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = np.pi - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(object): - - def test_chebfromroots(self): - res = cheb.chebfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = [0]*i + [1] - res = cheb.chebfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebroots(self): - assert_almost_equal(cheb.chebroots([1]), []) - assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = cheb.chebroots(cheb.chebfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, cheb.chebtrim, coef, -1) - - # Test results - assert_equal(cheb.chebtrim(coef), coef[:-1]) - assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) - assert_equal(cheb.chebtrim(coef, 2), [0]) - - def test_chebline(self): - assert_equal(cheb.chebline(3, 4), [3, 4]) - - def test_cheb2poly(self): - for i in range(10): - assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) - - def test_poly2cheb(self): - for i in range(10): - assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-1, 1, 11)[1:-1] - tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) - res = cheb.chebweight(x) - assert_almost_equal(res, tgt) - - def test_chebpts1(self): - #test exceptions - assert_raises(ValueError, cheb.chebpts1, 1.5) - assert_raises(ValueError, cheb.chebpts1, 0) - - #test points - tgt = [0] - assert_almost_equal(cheb.chebpts1(1), tgt) - tgt = [-0.70710678118654746, 0.70710678118654746] - assert_almost_equal(cheb.chebpts1(2), tgt) - tgt = [-0.86602540378443871, 0, 0.86602540378443871] - assert_almost_equal(cheb.chebpts1(3), tgt) - tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] - assert_almost_equal(cheb.chebpts1(4), tgt) - - def test_chebpts2(self): - #test exceptions - assert_raises(ValueError, cheb.chebpts2, 1.5) - assert_raises(ValueError, cheb.chebpts2, 1) - - #test points - tgt = [-1, 1] - assert_almost_equal(cheb.chebpts2(2), tgt) - tgt = [-1, 0, 1] - assert_almost_equal(cheb.chebpts2(3), tgt) - tgt = [-1, -0.5, .5, 1] - assert_almost_equal(cheb.chebpts2(4), tgt) - tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] - assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_classes.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_classes.py deleted file mode 100644 index 2261f96..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_classes.py +++ /dev/null @@ -1,653 +0,0 @@ -"""Test inter-conversion of different polynomial classes. - -This tests the convert and cast methods of all the polynomial classes. - -""" -from __future__ import division, absolute_import, print_function - -import operator as op -from numbers import Number - -import pytest -import numpy as np -from numpy.polynomial import ( - Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) -from numpy.compat import long -from numpy.polynomial.polyutils import RankWarning - -# -# fixtures -# - -classes = ( - Polynomial, Legendre, Chebyshev, Laguerre, - Hermite, HermiteE - ) -classids = tuple(cls.__name__ for cls in classes) - -@pytest.fixture(params=classes, ids=classids) -def Poly(request): - return request.param - -# -# helper functions -# -random = np.random.random - - -def assert_poly_almost_equal(p1, p2, msg=""): - try: - assert_(np.all(p1.domain == p2.domain)) - assert_(np.all(p1.window == p2.window)) - assert_almost_equal(p1.coef, p2.coef) - except AssertionError: - msg = "Result: %s\nTarget: %s", (p1, p2) - raise AssertionError(msg) - - -# -# Test conversion methods that depend on combinations of two classes. -# - -Poly1 = Poly -Poly2 = Poly - - -def test_conversion(Poly1, Poly2): - x = np.linspace(0, 1, 10) - coef = random((3,)) - - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 - p1 = Poly1(coef, domain=d1, window=w1) - - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 - p2 = p1.convert(kind=Poly2, domain=d2, window=w2) - - assert_almost_equal(p2.domain, d2) - assert_almost_equal(p2.window, w2) - assert_almost_equal(p2(x), p1(x)) - - -def test_cast(Poly1, Poly2): - x = np.linspace(0, 1, 10) - coef = random((3,)) - - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 - p1 = Poly1(coef, domain=d1, window=w1) - - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 - p2 = Poly2.cast(p1, domain=d2, window=w2) - - assert_almost_equal(p2.domain, d2) - assert_almost_equal(p2.window, w2) - assert_almost_equal(p2(x), p1(x)) - - -# -# test methods that depend on one class -# - - -def test_identity(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - x = np.linspace(d[0], d[1], 11) - p = Poly.identity(domain=d, window=w) - assert_equal(p.domain, d) - assert_equal(p.window, w) - assert_almost_equal(p(x), x) - - -def test_basis(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p = Poly.basis(5, domain=d, window=w) - assert_equal(p.domain, d) - assert_equal(p.window, w) - assert_equal(p.coef, [0]*5 + [1]) - - -def test_fromroots(Poly): - # check that requested roots are zeros of a polynomial - # of correct degree, domain, and window. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - r = random((5,)) - p1 = Poly.fromroots(r, domain=d, window=w) - assert_equal(p1.degree(), len(r)) - assert_equal(p1.domain, d) - assert_equal(p1.window, w) - assert_almost_equal(p1(r), 0) - - # check that polynomial is monic - pdom = Polynomial.domain - pwin = Polynomial.window - p2 = Polynomial.cast(p1, domain=pdom, window=pwin) - assert_almost_equal(p2.coef[-1], 1) - - -def test_bad_conditioned_fit(Poly): - - x = [0., 0., 1.] - y = [1., 2., 3.] - - # check RankWarning is raised - with pytest.warns(RankWarning) as record: - Poly.fit(x, y, 2) - assert record[0].message.args[0] == "The fit may be poorly conditioned" - - -def test_fit(Poly): - - def f(x): - return x*(x - 1)*(x - 2) - x = np.linspace(0, 3) - y = f(x) - - # check default value of domain and window - p = Poly.fit(x, y, 3) - assert_almost_equal(p.domain, [0, 3]) - assert_almost_equal(p(x), y) - assert_equal(p.degree(), 3) - - # check with given domains and window - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p = Poly.fit(x, y, 3, domain=d, window=w) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, d) - assert_almost_equal(p.window, w) - p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, d) - assert_almost_equal(p.window, w) - - # check with class domain default - p = Poly.fit(x, y, 3, []) - assert_equal(p.domain, Poly.domain) - assert_equal(p.window, Poly.window) - p = Poly.fit(x, y, [0, 1, 2, 3], []) - assert_equal(p.domain, Poly.domain) - assert_equal(p.window, Poly.window) - - # check that fit accepts weights. - w = np.zeros_like(x) - z = y + random(y.shape)*.25 - w[::2] = 1 - p1 = Poly.fit(x[::2], z[::2], 3) - p2 = Poly.fit(x, z, 3, w=w) - p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) - assert_almost_equal(p1(x), p2(x)) - assert_almost_equal(p2(x), p3(x)) - - -def test_equal(Poly): - p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) - p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) - p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) - p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) - assert_(p1 == p1) - assert_(not p1 == p2) - assert_(not p1 == p3) - assert_(not p1 == p4) - - -def test_not_equal(Poly): - p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) - p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) - p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) - p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) - assert_(not p1 != p1) - assert_(p1 != p2) - assert_(p1 != p3) - assert_(p1 != p4) - - -def test_add(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = p1 + p2 - assert_poly_almost_equal(p2 + p1, p3) - assert_poly_almost_equal(p1 + c2, p3) - assert_poly_almost_equal(c2 + p1, p3) - assert_poly_almost_equal(p1 + tuple(c2), p3) - assert_poly_almost_equal(tuple(c2) + p1, p3) - assert_poly_almost_equal(p1 + np.array(c2), p3) - assert_poly_almost_equal(np.array(c2) + p1, p3) - assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.add, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.add, p1, Polynomial([0])) - - -def test_sub(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = p1 - p2 - assert_poly_almost_equal(p2 - p1, -p3) - assert_poly_almost_equal(p1 - c2, p3) - assert_poly_almost_equal(c2 - p1, -p3) - assert_poly_almost_equal(p1 - tuple(c2), p3) - assert_poly_almost_equal(tuple(c2) - p1, -p3) - assert_poly_almost_equal(p1 - np.array(c2), p3) - assert_poly_almost_equal(np.array(c2) - p1, -p3) - assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.sub, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.sub, p1, Polynomial([0])) - - -def test_mul(Poly): - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = p1 * p2 - assert_poly_almost_equal(p2 * p1, p3) - assert_poly_almost_equal(p1 * c2, p3) - assert_poly_almost_equal(c2 * p1, p3) - assert_poly_almost_equal(p1 * tuple(c2), p3) - assert_poly_almost_equal(tuple(c2) * p1, p3) - assert_poly_almost_equal(p1 * np.array(c2), p3) - assert_poly_almost_equal(np.array(c2) * p1, p3) - assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) - assert_poly_almost_equal(2 * p1, p1 * Poly([2])) - assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.mul, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.mul, p1, Polynomial([0])) - - -def test_floordiv(Poly): - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - c3 = list(random((2,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = Poly(c3) - p4 = p1 * p2 + p3 - c4 = list(p4.coef) - assert_poly_almost_equal(p4 // p2, p1) - assert_poly_almost_equal(p4 // c2, p1) - assert_poly_almost_equal(c4 // p2, p1) - assert_poly_almost_equal(p4 // tuple(c2), p1) - assert_poly_almost_equal(tuple(c4) // p2, p1) - assert_poly_almost_equal(p4 // np.array(c2), p1) - assert_poly_almost_equal(np.array(c4) // p2, p1) - assert_poly_almost_equal(2 // p2, Poly([0])) - assert_poly_almost_equal(p2 // 2, 0.5*p2) - assert_raises( - TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises( - TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) - - -def test_truediv(Poly): - # true division is valid only if the denominator is a Number and - # not a python bool. - p1 = Poly([1,2,3]) - p2 = p1 * 5 - - for stype in np.ScalarType: - if not issubclass(stype, Number) or issubclass(stype, bool): - continue - s = stype(5) - assert_poly_almost_equal(op.truediv(p2, s), p1) - assert_raises(TypeError, op.truediv, s, p2) - for stype in (int, long, float): - s = stype(5) - assert_poly_almost_equal(op.truediv(p2, s), p1) - assert_raises(TypeError, op.truediv, s, p2) - for stype in [complex]: - s = stype(5, 0) - assert_poly_almost_equal(op.truediv(p2, s), p1) - assert_raises(TypeError, op.truediv, s, p2) - for s in [tuple(), list(), dict(), bool(), np.array([1])]: - assert_raises(TypeError, op.truediv, p2, s) - assert_raises(TypeError, op.truediv, s, p2) - for ptype in classes: - assert_raises(TypeError, op.truediv, p2, ptype(1)) - - -def test_mod(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - c3 = list(random((2,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = Poly(c3) - p4 = p1 * p2 + p3 - c4 = list(p4.coef) - assert_poly_almost_equal(p4 % p2, p3) - assert_poly_almost_equal(p4 % c2, p3) - assert_poly_almost_equal(c4 % p2, p3) - assert_poly_almost_equal(p4 % tuple(c2), p3) - assert_poly_almost_equal(tuple(c4) % p2, p3) - assert_poly_almost_equal(p4 % np.array(c2), p3) - assert_poly_almost_equal(np.array(c4) % p2, p3) - assert_poly_almost_equal(2 % p2, Poly([2])) - assert_poly_almost_equal(p2 % 2, Poly([0])) - assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.mod, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.mod, p1, Polynomial([0])) - - -def test_divmod(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - c3 = list(random((2,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = Poly(c3) - p4 = p1 * p2 + p3 - c4 = list(p4.coef) - quo, rem = divmod(p4, p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p4, c2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(c4, p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p4, tuple(c2)) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(tuple(c4), p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p4, np.array(c2)) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(np.array(c4), p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p2, 2) - assert_poly_almost_equal(quo, 0.5*p2) - assert_poly_almost_equal(rem, Poly([0])) - quo, rem = divmod(2, p2) - assert_poly_almost_equal(quo, Poly([0])) - assert_poly_almost_equal(rem, Poly([2])) - assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, divmod, p1, Chebyshev([0])) - else: - assert_raises(TypeError, divmod, p1, Polynomial([0])) - - -def test_roots(Poly): - d = Poly.domain * 1.25 + .25 - w = Poly.window - tgt = np.linspace(d[0], d[1], 5) - res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) - assert_almost_equal(res, tgt) - # default domain and window - res = np.sort(Poly.fromroots(tgt).roots()) - assert_almost_equal(res, tgt) - - -def test_degree(Poly): - p = Poly.basis(5) - assert_equal(p.degree(), 5) - - -def test_copy(Poly): - p1 = Poly.basis(5) - p2 = p1.copy() - assert_(p1 == p2) - assert_(p1 is not p2) - assert_(p1.coef is not p2.coef) - assert_(p1.domain is not p2.domain) - assert_(p1.window is not p2.window) - - -def test_integ(Poly): - P = Polynomial - # Check defaults - p0 = Poly.cast(P([1*2, 2*3, 3*4])) - p1 = P.cast(p0.integ()) - p2 = P.cast(p0.integ(2)) - assert_poly_almost_equal(p1, P([0, 2, 3, 4])) - assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) - # Check with k - p0 = Poly.cast(P([1*2, 2*3, 3*4])) - p1 = P.cast(p0.integ(k=1)) - p2 = P.cast(p0.integ(2, k=[1, 1])) - assert_poly_almost_equal(p1, P([1, 2, 3, 4])) - assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) - # Check with lbnd - p0 = Poly.cast(P([1*2, 2*3, 3*4])) - p1 = P.cast(p0.integ(lbnd=1)) - p2 = P.cast(p0.integ(2, lbnd=1)) - assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) - assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) - # Check scaling - d = 2*Poly.domain - p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) - p1 = P.cast(p0.integ()) - p2 = P.cast(p0.integ(2)) - assert_poly_almost_equal(p1, P([0, 2, 3, 4])) - assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) - - -def test_deriv(Poly): - # Check that the derivative is the inverse of integration. It is - # assumes that the integration has been checked elsewhere. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p1 = Poly([1, 2, 3], domain=d, window=w) - p2 = p1.integ(2, k=[1, 2]) - p3 = p1.integ(1, k=[1]) - assert_almost_equal(p2.deriv(1).coef, p3.coef) - assert_almost_equal(p2.deriv(2).coef, p1.coef) - # default domain and window - p1 = Poly([1, 2, 3]) - p2 = p1.integ(2, k=[1, 2]) - p3 = p1.integ(1, k=[1]) - assert_almost_equal(p2.deriv(1).coef, p3.coef) - assert_almost_equal(p2.deriv(2).coef, p1.coef) - - -def test_linspace(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p = Poly([1, 2, 3], domain=d, window=w) - # check default domain - xtgt = np.linspace(d[0], d[1], 20) - ytgt = p(xtgt) - xres, yres = p.linspace(20) - assert_almost_equal(xres, xtgt) - assert_almost_equal(yres, ytgt) - # check specified domain - xtgt = np.linspace(0, 2, 20) - ytgt = p(xtgt) - xres, yres = p.linspace(20, domain=[0, 2]) - assert_almost_equal(xres, xtgt) - assert_almost_equal(yres, ytgt) - - -def test_pow(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - tgt = Poly([1], domain=d, window=w) - tst = Poly([1, 2, 3], domain=d, window=w) - for i in range(5): - assert_poly_almost_equal(tst**i, tgt) - tgt = tgt * tst - # default domain and window - tgt = Poly([1]) - tst = Poly([1, 2, 3]) - for i in range(5): - assert_poly_almost_equal(tst**i, tgt) - tgt = tgt * tst - # check error for invalid powers - assert_raises(ValueError, op.pow, tgt, 1.5) - assert_raises(ValueError, op.pow, tgt, -1) - - -def test_call(Poly): - P = Polynomial - d = Poly.domain - x = np.linspace(d[0], d[1], 11) - - # Check defaults - p = Poly.cast(P([1, 2, 3])) - tgt = 1 + x*(2 + 3*x) - res = p(x) - assert_almost_equal(res, tgt) - - -def test_cutdeg(Poly): - p = Poly([1, 2, 3]) - assert_raises(ValueError, p.cutdeg, .5) - assert_raises(ValueError, p.cutdeg, -1) - assert_equal(len(p.cutdeg(3)), 3) - assert_equal(len(p.cutdeg(2)), 3) - assert_equal(len(p.cutdeg(1)), 2) - assert_equal(len(p.cutdeg(0)), 1) - - -def test_truncate(Poly): - p = Poly([1, 2, 3]) - assert_raises(ValueError, p.truncate, .5) - assert_raises(ValueError, p.truncate, 0) - assert_equal(len(p.truncate(4)), 3) - assert_equal(len(p.truncate(3)), 3) - assert_equal(len(p.truncate(2)), 2) - assert_equal(len(p.truncate(1)), 1) - - -def test_trim(Poly): - c = [1, 1e-6, 1e-12, 0] - p = Poly(c) - assert_equal(p.trim().coef, c[:3]) - assert_equal(p.trim(1e-10).coef, c[:2]) - assert_equal(p.trim(1e-5).coef, c[:1]) - - -def test_mapparms(Poly): - # check with defaults. Should be identity. - d = Poly.domain - w = Poly.window - p = Poly([1], domain=d, window=w) - assert_almost_equal([0, 1], p.mapparms()) - # - w = 2*d + 1 - p = Poly([1], domain=d, window=w) - assert_almost_equal([1, 2], p.mapparms()) - - -def test_ufunc_override(Poly): - p = Poly([1, 2, 3]) - x = np.ones(3) - assert_raises(TypeError, np.add, p, x) - assert_raises(TypeError, np.add, x, p) - - - -class TestLatexRepr(object): - """Test the latex repr used by ipython """ - - def as_latex(self, obj): - # right now we ignore the formatting of scalars in our tests, since - # it makes them too verbose. Ideally, the formatting of scalars will - # be fixed such that tests below continue to pass - obj._repr_latex_scalar = lambda x: str(x) - try: - return obj._repr_latex_() - finally: - del obj._repr_latex_scalar - - def test_simple_polynomial(self): - # default input - p = Polynomial([1, 2, 3]) - assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') - - # translated input - p = Polynomial([1, 2, 3], domain=[-2, 0]) - assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') - - # scaled input - p = Polynomial([1, 2, 3], domain=[-0.5, 0.5]) - assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') - - # affine input - p = Polynomial([1, 2, 3], domain=[-1, 0]) - assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') - - def test_basis_func(self): - p = Chebyshev([1, 2, 3]) - assert_equal(self.as_latex(p), - r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') - # affine input - check no surplus parens are added - p = Chebyshev([1, 2, 3], domain=[-1, 0]) - assert_equal(self.as_latex(p), - r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') - - def test_multichar_basis_func(self): - p = HermiteE([1, 2, 3]) - assert_equal(self.as_latex(p), - r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') - - -# -# Test class method that only exists for some classes -# - - -class TestInterpolate(object): - - def f(self, x): - return x * (x - 1) * (x - 2) - - def test_raises(self): - assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) - assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) - - def test_dimensions(self): - for deg in range(1, 5): - assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) - - def test_approximation(self): - - def powx(x, p): - return x**p - - x = np.linspace(0, 2, 10) - for deg in range(0, 10): - for t in range(0, deg + 1): - p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) - assert_almost_equal(p(x), powx(x, t), decimal=12) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_hermite.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_hermite.py deleted file mode 100644 index 271c196..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_hermite.py +++ /dev/null @@ -1,557 +0,0 @@ -"""Tests for hermite module. - -""" -from __future__ import division, absolute_import, print_function - -from functools import reduce - -import numpy as np -import numpy.polynomial.hermite as herm -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - -H0 = np.array([1]) -H1 = np.array([0, 2]) -H2 = np.array([-2, 0, 4]) -H3 = np.array([0, -12, 0, 8]) -H4 = np.array([12, 0, -48, 0, 16]) -H5 = np.array([0, 120, 0, -160, 0, 32]) -H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) -H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) -H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) -H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) - -Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] - - -def trim(x): - return herm.hermtrim(x, tol=1e-6) - - -class TestConstants(object): - - def test_hermdomain(self): - assert_equal(herm.hermdomain, [-1, 1]) - - def test_hermzero(self): - assert_equal(herm.hermzero, [0]) - - def test_hermone(self): - assert_equal(herm.hermone, [1]) - - def test_hermx(self): - assert_equal(herm.hermx, [0, .5]) - - -class TestArithmetic(object): - x = np.linspace(-3, 3, 100) - - def test_hermadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = herm.hermadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = herm.hermsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermmulx(self): - assert_equal(herm.hermmulx([0]), [0]) - assert_equal(herm.hermmulx([1]), [0, .5]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, .5] - assert_equal(herm.hermmulx(ser), tgt) - - def test_hermmul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = herm.hermval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = herm.hermval(self.x, pol2) - pol3 = herm.hermmul(pol1, pol2) - val3 = herm.hermval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_hermdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = herm.hermadd(ci, cj) - quo, rem = herm.hermdiv(tgt, ci) - res = herm.hermadd(herm.hermmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermpow(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - c = np.arange(i + 1) - tgt = reduce(herm.hermmul, [c]*j, np.array([1])) - res = herm.hermpow(c, j) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(object): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([2.5, 1., .75]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_hermval(self): - #check empty input - assert_equal(herm.hermval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Hlist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = herm.hermval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(herm.hermval(x, [1]).shape, dims) - assert_equal(herm.hermval(x, [1, 0]).shape, dims) - assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) - - def test_hermval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = herm.hermval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_hermval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = herm.hermval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_hermgrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = herm.hermgrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_hermgrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = herm.hermgrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(object): - - def test_hermint(self): - # check exceptions - assert_raises(TypeError, herm.hermint, [0], .5) - assert_raises(ValueError, herm.hermint, [0], -1) - assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) - assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) - assert_raises(ValueError, herm.hermint, [0], scl=[0]) - assert_raises(TypeError, herm.hermint, [0], axis=.5) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = herm.hermint([0], m=i, k=k) - assert_almost_equal(res, [0, .5]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i]) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(herm.hermval(-1, hermint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1) - res = herm.hermint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k]) - res = herm.hermint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) - res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k], scl=2) - res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T - res = herm.hermint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermint(c) for c in c2d]) - res = herm.hermint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) - res = herm.hermint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(object): - - def test_hermder(self): - # check exceptions - assert_raises(TypeError, herm.hermder, [0], .5) - assert_raises(ValueError, herm.hermder, [0], -1) - - # check that zeroth derivative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = herm.hermder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herm.hermder(herm.hermint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T - res = herm.hermder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermder(c) for c in c2d]) - res = herm.hermder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(object): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_hermvander(self): - # check for 1d x - x = np.arange(3) - v = herm.hermvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herm.hermval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = herm.hermvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herm.hermval(x, coef)) - - def test_hermvander2d(self): - # also tests hermval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = herm.hermvander2d(x1, x2, [1, 2]) - tgt = herm.hermval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herm.hermvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_hermvander3d(self): - # also tests hermval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) - tgt = herm.hermval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(object): - - def test_hermfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - def f2(x): - return x**4 + x**2 + 1 - - # Test exceptions - assert_raises(ValueError, herm.hermfit, [1], [1], -1) - assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) - assert_raises(TypeError, herm.hermfit, [], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) - assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) - assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) - assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) - assert_raises(TypeError, herm.hermfit, [1], [1], []) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = herm.hermfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(herm.hermval(x, coef3), y) - coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) - assert_equal(len(coef3), 4) - assert_almost_equal(herm.hermval(x, coef3), y) - # - coef4 = herm.hermfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - # check things still work if deg is not in strict increasing - coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - # - coef2d = herm.hermfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = herm.hermfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) - assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) - # test fitting only even Legendre polynomials - x = np.linspace(-1, 1) - y = f2(x) - coef1 = herm.hermfit(x, y, 4) - assert_almost_equal(herm.hermval(x, coef1), y) - coef2 = herm.hermfit(x, y, [0, 2, 4]) - assert_almost_equal(herm.hermval(x, coef2), y) - assert_almost_equal(coef1, coef2) - - -class TestCompanion(object): - - def test_raises(self): - assert_raises(ValueError, herm.hermcompanion, []) - assert_raises(ValueError, herm.hermcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(herm.hermcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) - - -class TestGauss(object): - - def test_100(self): - x, w = herm.hermgauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = herm.hermvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = np.sqrt(np.pi) - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(object): - - def test_hermfromroots(self): - res = herm.hermfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = herm.hermfromroots(roots) - res = herm.hermval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(herm.herm2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_hermroots(self): - assert_almost_equal(herm.hermroots([1]), []) - assert_almost_equal(herm.hermroots([1, 1]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = herm.hermroots(herm.hermfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, herm.hermtrim, coef, -1) - - # Test results - assert_equal(herm.hermtrim(coef), coef[:-1]) - assert_equal(herm.hermtrim(coef, 1), coef[:-3]) - assert_equal(herm.hermtrim(coef, 2), [0]) - - def test_hermline(self): - assert_equal(herm.hermline(3, 4), [3, 2]) - - def test_herm2poly(self): - for i in range(10): - assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) - - def test_poly2herm(self): - for i in range(10): - assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-5, 5, 11) - tgt = np.exp(-x**2) - res = herm.hermweight(x) - assert_almost_equal(res, tgt) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_hermite_e.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_hermite_e.py deleted file mode 100644 index 434b30e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_hermite_e.py +++ /dev/null @@ -1,558 +0,0 @@ -"""Tests for hermite_e module. - -""" -from __future__ import division, absolute_import, print_function - -from functools import reduce - -import numpy as np -import numpy.polynomial.hermite_e as herme -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - -He0 = np.array([1]) -He1 = np.array([0, 1]) -He2 = np.array([-1, 0, 1]) -He3 = np.array([0, -3, 0, 1]) -He4 = np.array([3, 0, -6, 0, 1]) -He5 = np.array([0, 15, 0, -10, 0, 1]) -He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) -He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) -He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) -He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) - -Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] - - -def trim(x): - return herme.hermetrim(x, tol=1e-6) - - -class TestConstants(object): - - def test_hermedomain(self): - assert_equal(herme.hermedomain, [-1, 1]) - - def test_hermezero(self): - assert_equal(herme.hermezero, [0]) - - def test_hermeone(self): - assert_equal(herme.hermeone, [1]) - - def test_hermex(self): - assert_equal(herme.hermex, [0, 1]) - - -class TestArithmetic(object): - x = np.linspace(-3, 3, 100) - - def test_hermeadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = herme.hermeadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermesub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = herme.hermesub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermemulx(self): - assert_equal(herme.hermemulx([0]), [0]) - assert_equal(herme.hermemulx([1]), [0, 1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, 1] - assert_equal(herme.hermemulx(ser), tgt) - - def test_hermemul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = herme.hermeval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = herme.hermeval(self.x, pol2) - pol3 = herme.hermemul(pol1, pol2) - val3 = herme.hermeval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_hermediv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = herme.hermeadd(ci, cj) - quo, rem = herme.hermediv(tgt, ci) - res = herme.hermeadd(herme.hermemul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermepow(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - c = np.arange(i + 1) - tgt = reduce(herme.hermemul, [c]*j, np.array([1])) - res = herme.hermepow(c, j) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(object): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([4., 2., 3.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_hermeval(self): - #check empty input - assert_equal(herme.hermeval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Helist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = herme.hermeval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(herme.hermeval(x, [1]).shape, dims) - assert_equal(herme.hermeval(x, [1, 0]).shape, dims) - assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) - - def test_hermeval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = herme.hermeval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermeval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_hermeval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = herme.hermeval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermeval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_hermegrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = herme.hermegrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermegrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_hermegrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = herme.hermegrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermegrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(object): - - def test_hermeint(self): - # check exceptions - assert_raises(TypeError, herme.hermeint, [0], .5) - assert_raises(ValueError, herme.hermeint, [0], -1) - assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) - assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) - assert_raises(ValueError, herme.hermeint, [0], scl=[0]) - assert_raises(TypeError, herme.hermeint, [0], axis=.5) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = herme.hermeint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i]) - res = herme.herme2poly(hermeint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) - assert_almost_equal(herme.hermeval(-1, hermeint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) - res = herme.herme2poly(hermeint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1) - res = herme.hermeint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1, k=[k]) - res = herme.hermeint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) - res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) - res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermeint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T - res = herme.hermeint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herme.hermeint(c) for c in c2d]) - res = herme.hermeint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) - res = herme.hermeint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(object): - - def test_hermeder(self): - # check exceptions - assert_raises(TypeError, herme.hermeder, [0], .5) - assert_raises(ValueError, herme.hermeder, [0], -1) - - # check that zeroth derivative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = herme.hermeder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herme.hermeder( - herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermeder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T - res = herme.hermeder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herme.hermeder(c) for c in c2d]) - res = herme.hermeder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(object): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_hermevander(self): - # check for 1d x - x = np.arange(3) - v = herme.hermevander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herme.hermeval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = herme.hermevander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herme.hermeval(x, coef)) - - def test_hermevander2d(self): - # also tests hermeval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = herme.hermevander2d(x1, x2, [1, 2]) - tgt = herme.hermeval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herme.hermevander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_hermevander3d(self): - # also tests hermeval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) - tgt = herme.hermeval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(object): - - def test_hermefit(self): - def f(x): - return x*(x - 1)*(x - 2) - - def f2(x): - return x**4 + x**2 + 1 - - # Test exceptions - assert_raises(ValueError, herme.hermefit, [1], [1], -1) - assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) - assert_raises(TypeError, herme.hermefit, [], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) - assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) - assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) - assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) - assert_raises(TypeError, herme.hermefit, [1], [1], []) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = herme.hermefit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(herme.hermeval(x, coef3), y) - coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) - assert_equal(len(coef3), 4) - assert_almost_equal(herme.hermeval(x, coef3), y) - # - coef4 = herme.hermefit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(herme.hermeval(x, coef4), y) - coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) - assert_equal(len(coef4), 5) - assert_almost_equal(herme.hermeval(x, coef4), y) - # check things still work if deg is not in strict increasing - coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) - assert_equal(len(coef4), 5) - assert_almost_equal(herme.hermeval(x, coef4), y) - # - coef2d = herme.hermefit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = herme.hermefit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) - assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) - # test fitting only even Legendre polynomials - x = np.linspace(-1, 1) - y = f2(x) - coef1 = herme.hermefit(x, y, 4) - assert_almost_equal(herme.hermeval(x, coef1), y) - coef2 = herme.hermefit(x, y, [0, 2, 4]) - assert_almost_equal(herme.hermeval(x, coef2), y) - assert_almost_equal(coef1, coef2) - - -class TestCompanion(object): - - def test_raises(self): - assert_raises(ValueError, herme.hermecompanion, []) - assert_raises(ValueError, herme.hermecompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(herme.hermecompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) - - -class TestGauss(object): - - def test_100(self): - x, w = herme.hermegauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = herme.hermevander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = np.sqrt(2*np.pi) - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(object): - - def test_hermefromroots(self): - res = herme.hermefromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = herme.hermefromroots(roots) - res = herme.hermeval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(herme.herme2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_hermeroots(self): - assert_almost_equal(herme.hermeroots([1]), []) - assert_almost_equal(herme.hermeroots([1, 1]), [-1]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = herme.hermeroots(herme.hermefromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermetrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, herme.hermetrim, coef, -1) - - # Test results - assert_equal(herme.hermetrim(coef), coef[:-1]) - assert_equal(herme.hermetrim(coef, 1), coef[:-3]) - assert_equal(herme.hermetrim(coef, 2), [0]) - - def test_hermeline(self): - assert_equal(herme.hermeline(3, 4), [3, 4]) - - def test_herme2poly(self): - for i in range(10): - assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) - - def test_poly2herme(self): - for i in range(10): - assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-5, 5, 11) - tgt = np.exp(-.5*x**2) - res = herme.hermeweight(x) - assert_almost_equal(res, tgt) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_laguerre.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_laguerre.py deleted file mode 100644 index 4b9b286..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_laguerre.py +++ /dev/null @@ -1,539 +0,0 @@ -"""Tests for laguerre module. - -""" -from __future__ import division, absolute_import, print_function - -from functools import reduce - -import numpy as np -import numpy.polynomial.laguerre as lag -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - -L0 = np.array([1])/1 -L1 = np.array([1, -1])/1 -L2 = np.array([2, -4, 1])/2 -L3 = np.array([6, -18, 9, -1])/6 -L4 = np.array([24, -96, 72, -16, 1])/24 -L5 = np.array([120, -600, 600, -200, 25, -1])/120 -L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 - -Llist = [L0, L1, L2, L3, L4, L5, L6] - - -def trim(x): - return lag.lagtrim(x, tol=1e-6) - - -class TestConstants(object): - - def test_lagdomain(self): - assert_equal(lag.lagdomain, [0, 1]) - - def test_lagzero(self): - assert_equal(lag.lagzero, [0]) - - def test_lagone(self): - assert_equal(lag.lagone, [1]) - - def test_lagx(self): - assert_equal(lag.lagx, [1, -1]) - - -class TestArithmetic(object): - x = np.linspace(-3, 3, 100) - - def test_lagadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = lag.lagadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_lagsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = lag.lagsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_lagmulx(self): - assert_equal(lag.lagmulx([0]), [0]) - assert_equal(lag.lagmulx([1]), [1, -1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] - assert_almost_equal(lag.lagmulx(ser), tgt) - - def test_lagmul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = lag.lagval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = lag.lagval(self.x, pol2) - pol3 = lag.lagmul(pol1, pol2) - val3 = lag.lagval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_lagdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = lag.lagadd(ci, cj) - quo, rem = lag.lagdiv(tgt, ci) - res = lag.lagadd(lag.lagmul(quo, ci), rem) - assert_almost_equal(trim(res), trim(tgt), err_msg=msg) - - def test_lagpow(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - c = np.arange(i + 1) - tgt = reduce(lag.lagmul, [c]*j, np.array([1])) - res = lag.lagpow(c, j) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(object): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([9., -14., 6.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_lagval(self): - #check empty input - assert_equal(lag.lagval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Llist] - for i in range(7): - msg = "At i=%d" % i - tgt = y[i] - res = lag.lagval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(lag.lagval(x, [1]).shape, dims) - assert_equal(lag.lagval(x, [1, 0]).shape, dims) - assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) - - def test_lagval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = lag.lagval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.lagval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_lagval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = lag.lagval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.lagval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_laggrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = lag.laggrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.laggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_laggrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = lag.laggrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.laggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(object): - - def test_lagint(self): - # check exceptions - assert_raises(TypeError, lag.lagint, [0], .5) - assert_raises(ValueError, lag.lagint, [0], -1) - assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) - assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) - assert_raises(ValueError, lag.lagint, [0], scl=[0]) - assert_raises(TypeError, lag.lagint, [0], axis=.5) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = lag.lagint([0], m=i, k=k) - assert_almost_equal(res, [1, -1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i]) - res = lag.lag2poly(lagint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(lag.lagval(-1, lagint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) - res = lag.lag2poly(lagint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1) - res = lag.lagint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1, k=[k]) - res = lag.lagint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) - res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1, k=[k], scl=2) - res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T - res = lag.lagint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([lag.lagint(c) for c in c2d]) - res = lag.lagint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) - res = lag.lagint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(object): - - def test_lagder(self): - # check exceptions - assert_raises(TypeError, lag.lagder, [0], .5) - assert_raises(ValueError, lag.lagder, [0], -1) - - # check that zeroth derivative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = lag.lagder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = lag.lagder(lag.lagint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T - res = lag.lagder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([lag.lagder(c) for c in c2d]) - res = lag.lagder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(object): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_lagvander(self): - # check for 1d x - x = np.arange(3) - v = lag.lagvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], lag.lagval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = lag.lagvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], lag.lagval(x, coef)) - - def test_lagvander2d(self): - # also tests lagval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = lag.lagvander2d(x1, x2, [1, 2]) - tgt = lag.lagval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = lag.lagvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_lagvander3d(self): - # also tests lagval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) - tgt = lag.lagval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(object): - - def test_lagfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, lag.lagfit, [1], [1], -1) - assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) - assert_raises(TypeError, lag.lagfit, [], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) - assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) - assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) - assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) - assert_raises(TypeError, lag.lagfit, [1], [1], []) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = lag.lagfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(lag.lagval(x, coef3), y) - coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) - assert_equal(len(coef3), 4) - assert_almost_equal(lag.lagval(x, coef3), y) - # - coef4 = lag.lagfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(lag.lagval(x, coef4), y) - coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) - assert_equal(len(coef4), 5) - assert_almost_equal(lag.lagval(x, coef4), y) - # - coef2d = lag.lagfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = lag.lagfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) - assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) - - -class TestCompanion(object): - - def test_raises(self): - assert_raises(ValueError, lag.lagcompanion, []) - assert_raises(ValueError, lag.lagcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(lag.lagcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) - - -class TestGauss(object): - - def test_100(self): - x, w = lag.laggauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = lag.lagvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = 1.0 - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(object): - - def test_lagfromroots(self): - res = lag.lagfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = lag.lagfromroots(roots) - res = lag.lagval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(lag.lag2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_lagroots(self): - assert_almost_equal(lag.lagroots([1]), []) - assert_almost_equal(lag.lagroots([0, 1]), [1]) - for i in range(2, 5): - tgt = np.linspace(0, 3, i) - res = lag.lagroots(lag.lagfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, lag.lagtrim, coef, -1) - - # Test results - assert_equal(lag.lagtrim(coef), coef[:-1]) - assert_equal(lag.lagtrim(coef, 1), coef[:-3]) - assert_equal(lag.lagtrim(coef, 2), [0]) - - def test_lagline(self): - assert_equal(lag.lagline(3, 4), [7, -4]) - - def test_lag2poly(self): - for i in range(7): - assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) - - def test_poly2lag(self): - for i in range(7): - assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(0, 10, 11) - tgt = np.exp(-x) - res = lag.lagweight(x) - assert_almost_equal(res, tgt) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_legendre.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_legendre.py deleted file mode 100644 index 917a7e0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_legendre.py +++ /dev/null @@ -1,558 +0,0 @@ -"""Tests for legendre module. - -""" -from __future__ import division, absolute_import, print_function - -from functools import reduce - -import numpy as np -import numpy.polynomial.legendre as leg -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - -L0 = np.array([1]) -L1 = np.array([0, 1]) -L2 = np.array([-1, 0, 3])/2 -L3 = np.array([0, -3, 0, 5])/2 -L4 = np.array([3, 0, -30, 0, 35])/8 -L5 = np.array([0, 15, 0, -70, 0, 63])/8 -L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 -L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 -L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 -L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 - -Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] - - -def trim(x): - return leg.legtrim(x, tol=1e-6) - - -class TestConstants(object): - - def test_legdomain(self): - assert_equal(leg.legdomain, [-1, 1]) - - def test_legzero(self): - assert_equal(leg.legzero, [0]) - - def test_legone(self): - assert_equal(leg.legone, [1]) - - def test_legx(self): - assert_equal(leg.legx, [0, 1]) - - -class TestArithmetic(object): - x = np.linspace(-1, 1, 100) - - def test_legadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = leg.legadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_legsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = leg.legsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_legmulx(self): - assert_equal(leg.legmulx([0]), [0]) - assert_equal(leg.legmulx([1]), [0, 1]) - for i in range(1, 5): - tmp = 2*i + 1 - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] - assert_equal(leg.legmulx(ser), tgt) - - def test_legmul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = leg.legval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = leg.legval(self.x, pol2) - pol3 = leg.legmul(pol1, pol2) - val3 = leg.legval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_legdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = leg.legadd(ci, cj) - quo, rem = leg.legdiv(tgt, ci) - res = leg.legadd(leg.legmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_legpow(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - c = np.arange(i + 1) - tgt = reduce(leg.legmul, [c]*j, np.array([1])) - res = leg.legpow(c, j) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(object): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([2., 2., 2.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_legval(self): - #check empty input - assert_equal(leg.legval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Llist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = leg.legval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(leg.legval(x, [1]).shape, dims) - assert_equal(leg.legval(x, [1, 0]).shape, dims) - assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) - - def test_legval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = leg.legval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.legval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_legval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = leg.legval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.legval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_leggrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = leg.leggrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.leggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_leggrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = leg.leggrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.leggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(object): - - def test_legint(self): - # check exceptions - assert_raises(TypeError, leg.legint, [0], .5) - assert_raises(ValueError, leg.legint, [0], -1) - assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) - assert_raises(ValueError, leg.legint, [0], lbnd=[0]) - assert_raises(ValueError, leg.legint, [0], scl=[0]) - assert_raises(TypeError, leg.legint, [0], axis=.5) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = leg.legint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i]) - res = leg.leg2poly(legint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(leg.legval(-1, legint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i], scl=2) - res = leg.leg2poly(legint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1) - res = leg.legint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1, k=[k]) - res = leg.legint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) - res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1, k=[k], scl=2) - res = leg.legint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([leg.legint(c) for c in c2d.T]).T - res = leg.legint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([leg.legint(c) for c in c2d]) - res = leg.legint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) - res = leg.legint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(object): - - def test_legder(self): - # check exceptions - assert_raises(TypeError, leg.legder, [0], .5) - assert_raises(ValueError, leg.legder, [0], -1) - - # check that zeroth derivative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = leg.legder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = leg.legder(leg.legint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([leg.legder(c) for c in c2d.T]).T - res = leg.legder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([leg.legder(c) for c in c2d]) - res = leg.legder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(object): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_legvander(self): - # check for 1d x - x = np.arange(3) - v = leg.legvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], leg.legval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = leg.legvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], leg.legval(x, coef)) - - def test_legvander2d(self): - # also tests polyval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = leg.legvander2d(x1, x2, [1, 2]) - tgt = leg.legval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = leg.legvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_legvander3d(self): - # also tests polyval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) - tgt = leg.legval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(object): - - def test_legfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - def f2(x): - return x**4 + x**2 + 1 - - # Test exceptions - assert_raises(ValueError, leg.legfit, [1], [1], -1) - assert_raises(TypeError, leg.legfit, [[1]], [1], 0) - assert_raises(TypeError, leg.legfit, [], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) - assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) - assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) - assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) - assert_raises(TypeError, leg.legfit, [1], [1], []) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = leg.legfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(leg.legval(x, coef3), y) - coef3 = leg.legfit(x, y, [0, 1, 2, 3]) - assert_equal(len(coef3), 4) - assert_almost_equal(leg.legval(x, coef3), y) - # - coef4 = leg.legfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(leg.legval(x, coef4), y) - coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) - assert_equal(len(coef4), 5) - assert_almost_equal(leg.legval(x, coef4), y) - # check things still work if deg is not in strict increasing - coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) - assert_equal(len(coef4), 5) - assert_almost_equal(leg.legval(x, coef4), y) - # - coef2d = leg.legfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = leg.legfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) - assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) - # test fitting only even Legendre polynomials - x = np.linspace(-1, 1) - y = f2(x) - coef1 = leg.legfit(x, y, 4) - assert_almost_equal(leg.legval(x, coef1), y) - coef2 = leg.legfit(x, y, [0, 2, 4]) - assert_almost_equal(leg.legval(x, coef2), y) - assert_almost_equal(coef1, coef2) - - -class TestCompanion(object): - - def test_raises(self): - assert_raises(ValueError, leg.legcompanion, []) - assert_raises(ValueError, leg.legcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(leg.legcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(leg.legcompanion([1, 2])[0, 0] == -.5) - - -class TestGauss(object): - - def test_100(self): - x, w = leg.leggauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = leg.legvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = 2.0 - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(object): - - def test_legfromroots(self): - res = leg.legfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = leg.legfromroots(roots) - res = leg.legval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(leg.leg2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_legroots(self): - assert_almost_equal(leg.legroots([1]), []) - assert_almost_equal(leg.legroots([1, 2]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = leg.legroots(leg.legfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, leg.legtrim, coef, -1) - - # Test results - assert_equal(leg.legtrim(coef), coef[:-1]) - assert_equal(leg.legtrim(coef, 1), coef[:-3]) - assert_equal(leg.legtrim(coef, 2), [0]) - - def test_legline(self): - assert_equal(leg.legline(3, 4), [3, 4]) - - def test_leg2poly(self): - for i in range(10): - assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) - - def test_poly2leg(self): - for i in range(10): - assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-1, 1, 11) - tgt = 1. - res = leg.legweight(x) - assert_almost_equal(res, tgt) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_polynomial.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_polynomial.py deleted file mode 100644 index 1436963..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_polynomial.py +++ /dev/null @@ -1,593 +0,0 @@ -"""Tests for polynomial module. - -""" -from __future__ import division, absolute_import, print_function - -from functools import reduce - -import numpy as np -import numpy.polynomial.polynomial as poly -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - assert_warns, assert_array_equal) - - -def trim(x): - return poly.polytrim(x, tol=1e-6) - -T0 = [1] -T1 = [0, 1] -T2 = [-1, 0, 2] -T3 = [0, -3, 0, 4] -T4 = [1, 0, -8, 0, 8] -T5 = [0, 5, 0, -20, 0, 16] -T6 = [-1, 0, 18, 0, -48, 0, 32] -T7 = [0, -7, 0, 56, 0, -112, 0, 64] -T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] -T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] - -Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] - - -class TestConstants(object): - - def test_polydomain(self): - assert_equal(poly.polydomain, [-1, 1]) - - def test_polyzero(self): - assert_equal(poly.polyzero, [0]) - - def test_polyone(self): - assert_equal(poly.polyone, [1]) - - def test_polyx(self): - assert_equal(poly.polyx, [0, 1]) - - -class TestArithmetic(object): - - def test_polyadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = poly.polyadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polysub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = poly.polysub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polymulx(self): - assert_equal(poly.polymulx([0]), [0]) - assert_equal(poly.polymulx([1]), [0, 1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i + 1) + [1] - assert_equal(poly.polymulx(ser), tgt) - - def test_polymul(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(i + j + 1) - tgt[i + j] += 1 - res = poly.polymul([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polydiv(self): - # check zero division - assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) - - # check scalar division - quo, rem = poly.polydiv([2], [2]) - assert_equal((quo, rem), (1, 0)) - quo, rem = poly.polydiv([2, 2], [2]) - assert_equal((quo, rem), ((1, 1), 0)) - - # check rest. - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1, 2] - cj = [0]*j + [1, 2] - tgt = poly.polyadd(ci, cj) - quo, rem = poly.polydiv(tgt, ci) - res = poly.polyadd(poly.polymul(quo, ci), rem) - assert_equal(res, tgt, err_msg=msg) - - def test_polypow(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - c = np.arange(i + 1) - tgt = reduce(poly.polymul, [c]*j, np.array([1])) - res = poly.polypow(c, j) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(object): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([1., 2., 3.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = poly.polyval(x, [1., 2., 3.]) - - def test_polyval(self): - #check empty input - assert_equal(poly.polyval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [x**i for i in range(5)] - for i in range(5): - tgt = y[i] - res = poly.polyval(x, [0]*i + [1]) - assert_almost_equal(res, tgt) - tgt = x*(x**2 - 1) - res = poly.polyval(x, [0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(poly.polyval(x, [1]).shape, dims) - assert_equal(poly.polyval(x, [1, 0]).shape, dims) - assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) - - #check masked arrays are processed correctly - mask = [False, True, False] - mx = np.ma.array([1, 2, 3], mask=mask) - res = np.polyval([7, 5, 3], mx) - assert_array_equal(res.mask, mask) - - #check subtypes of ndarray are preserved - class C(np.ndarray): - pass - - cx = np.array([1, 2, 3]).view(C) - assert_equal(type(np.polyval([2, 3, 4], cx)), C) - - def test_polyvalfromroots(self): - # check exception for broadcasting x values over root array with - # too few dimensions - assert_raises(ValueError, poly.polyvalfromroots, - [1], [1], tensor=False) - - # check empty input - assert_equal(poly.polyvalfromroots([], [1]).size, 0) - assert_(poly.polyvalfromroots([], [1]).shape == (0,)) - - # check empty input + multidimensional roots - assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) - assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) - - # check scalar input - assert_equal(poly.polyvalfromroots(1, 1), 0) - assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) - - # check normal input) - x = np.linspace(-1, 1) - y = [x**i for i in range(5)] - for i in range(1, 5): - tgt = y[i] - res = poly.polyvalfromroots(x, [0]*i) - assert_almost_equal(res, tgt) - tgt = x*(x - 1)*(x + 1) - res = poly.polyvalfromroots(x, [-1, 0, 1]) - assert_almost_equal(res, tgt) - - # check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) - assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) - assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) - - # check compatibility with factorization - ptest = [15, 2, -16, -2, 1] - r = poly.polyroots(ptest) - x = np.linspace(-1, 1) - assert_almost_equal(poly.polyval(x, ptest), - poly.polyvalfromroots(x, r)) - - # check multidimensional arrays of roots and values - # check tensor=False - rshape = (3, 5) - x = np.arange(-3, 2) - r = np.random.randint(-5, 5, size=rshape) - res = poly.polyvalfromroots(x, r, tensor=False) - tgt = np.empty(r.shape[1:]) - for ii in range(tgt.size): - tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) - assert_equal(res, tgt) - - # check tensor=True - x = np.vstack([x, 2*x]) - res = poly.polyvalfromroots(x, r, tensor=True) - tgt = np.empty(r.shape[1:] + x.shape) - for ii in range(r.shape[1]): - for jj in range(x.shape[0]): - tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) - assert_equal(res, tgt) - - def test_polyval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = poly.polyval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polyval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_polyval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = poly.polyval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polyval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_polygrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = poly.polygrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polygrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_polygrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = poly.polygrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polygrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(object): - - def test_polyint(self): - # check exceptions - assert_raises(TypeError, poly.polyint, [0], .5) - assert_raises(ValueError, poly.polyint, [0], -1) - assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) - assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) - assert_raises(ValueError, poly.polyint, [0], scl=[0]) - assert_raises(TypeError, poly.polyint, [0], axis=.5) - with assert_warns(DeprecationWarning): - poly.polyint([1, 1], 1.) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = poly.polyint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - res = poly.polyint(pol, m=1, k=[i]) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - res = poly.polyint(pol, m=1, k=[i], lbnd=-1) - assert_almost_equal(poly.polyval(-1, res), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - res = poly.polyint(pol, m=1, k=[i], scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1) - res = poly.polyint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1, k=[k]) - res = poly.polyint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) - res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1, k=[k], scl=2) - res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T - res = poly.polyint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([poly.polyint(c) for c in c2d]) - res = poly.polyint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) - res = poly.polyint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(object): - - def test_polyder(self): - # check exceptions - assert_raises(TypeError, poly.polyder, [0], .5) - assert_raises(ValueError, poly.polyder, [0], -1) - - # check that zeroth derivative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = poly.polyder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = poly.polyder(poly.polyint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T - res = poly.polyder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([poly.polyder(c) for c in c2d]) - res = poly.polyder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(object): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_polyvander(self): - # check for 1d x - x = np.arange(3) - v = poly.polyvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], poly.polyval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = poly.polyvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], poly.polyval(x, coef)) - - def test_polyvander2d(self): - # also tests polyval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = poly.polyvander2d(x1, x2, [1, 2]) - tgt = poly.polyval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = poly.polyvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_polyvander3d(self): - # also tests polyval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) - tgt = poly.polyval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestCompanion(object): - - def test_raises(self): - assert_raises(ValueError, poly.polycompanion, []) - assert_raises(ValueError, poly.polycompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(poly.polycompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(poly.polycompanion([1, 2])[0, 0] == -.5) - - -class TestMisc(object): - - def test_polyfromroots(self): - res = poly.polyfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = Tlist[i] - res = poly.polyfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyroots(self): - assert_almost_equal(poly.polyroots([1]), []) - assert_almost_equal(poly.polyroots([1, 2]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = poly.polyroots(poly.polyfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - def f2(x): - return x**4 + x**2 + 1 - - # Test exceptions - assert_raises(ValueError, poly.polyfit, [1], [1], -1) - assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) - assert_raises(TypeError, poly.polyfit, [], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) - assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) - assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) - assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) - assert_raises(TypeError, poly.polyfit, [1], [1], []) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = poly.polyfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(poly.polyval(x, coef3), y) - coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) - assert_equal(len(coef3), 4) - assert_almost_equal(poly.polyval(x, coef3), y) - # - coef4 = poly.polyfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(poly.polyval(x, coef4), y) - coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) - assert_equal(len(coef4), 5) - assert_almost_equal(poly.polyval(x, coef4), y) - # - coef2d = poly.polyfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - wcoef3 = poly.polyfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) - assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) - # test fitting only even Polyendre polynomials - x = np.linspace(-1, 1) - y = f2(x) - coef1 = poly.polyfit(x, y, 4) - assert_almost_equal(poly.polyval(x, coef1), y) - coef2 = poly.polyfit(x, y, [0, 2, 4]) - assert_almost_equal(poly.polyval(x, coef2), y) - assert_almost_equal(coef1, coef2) - - def test_polytrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, poly.polytrim, coef, -1) - - # Test results - assert_equal(poly.polytrim(coef), coef[:-1]) - assert_equal(poly.polytrim(coef, 1), coef[:-3]) - assert_equal(poly.polytrim(coef, 2), [0]) - - def test_polyline(self): - assert_equal(poly.polyline(3, 4), [3, 4]) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_polyutils.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_polyutils.py deleted file mode 100644 index 801c558..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_polyutils.py +++ /dev/null @@ -1,108 +0,0 @@ -"""Tests for polyutils module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - - -class TestMisc(object): - - def test_trimseq(self): - for i in range(5): - tgt = [1] - res = pu.trimseq([1] + [0]*5) - assert_equal(res, tgt) - - def test_as_series(self): - # check exceptions - assert_raises(ValueError, pu.as_series, [[]]) - assert_raises(ValueError, pu.as_series, [[[1, 2]]]) - assert_raises(ValueError, pu.as_series, [[1], ['a']]) - # check common types - types = ['i', 'd', 'O'] - for i in range(len(types)): - for j in range(i): - ci = np.ones(1, types[i]) - cj = np.ones(1, types[j]) - [resi, resj] = pu.as_series([ci, cj]) - assert_(resi.dtype.char == resj.dtype.char) - assert_(resj.dtype.char == types[i]) - - def test_trimcoef(self): - coef = [2, -1, 1, 0] - # Test exceptions - assert_raises(ValueError, pu.trimcoef, coef, -1) - # Test results - assert_equal(pu.trimcoef(coef), coef[:-1]) - assert_equal(pu.trimcoef(coef, 1), coef[:-3]) - assert_equal(pu.trimcoef(coef, 2), [0]) - - -class TestDomain(object): - - def test_getdomain(self): - # test for real values - x = [1, 10, 3, -1] - tgt = [-1, 10] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - # test for complex values - x = [1 + 1j, 1 - 1j, 0, 2] - tgt = [-1j, 2 + 1j] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - def test_mapdomain(self): - # test for real values - dom1 = [0, 4] - dom2 = [1, 3] - tgt = dom2 - res = pu.mapdomain(dom1, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = dom2 - x = dom1 - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for multidimensional arrays - dom1 = [0, 4] - dom2 = [1, 3] - tgt = np.array([dom2, dom2]) - x = np.array([dom1, dom1]) - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test that subtypes are preserved. - class MyNDArray(np.ndarray): - pass - - dom1 = [0, 4] - dom2 = [1, 3] - x = np.array([dom1, dom1]).view(MyNDArray) - res = pu.mapdomain(x, dom1, dom2) - assert_(isinstance(res, MyNDArray)) - - def test_mapparms(self): - # test for real values - dom1 = [0, 4] - dom2 = [1, 3] - tgt = [1, .5] - res = pu. mapparms(dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = [-1 + 1j, 1 - 1j] - res = pu.mapparms(dom1, dom2) - assert_almost_equal(res, tgt) diff --git a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_printing.py b/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_printing.py deleted file mode 100644 index 3f12364..0000000 --- a/venv/lib/python3.7/site-packages/numpy/polynomial/tests/test_printing.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy.polynomial as poly -from numpy.testing import assert_equal - - -class TestStr(object): - def test_polynomial_str(self): - res = str(poly.Polynomial([0, 1])) - tgt = 'poly([0. 1.])' - assert_equal(res, tgt) - - def test_chebyshev_str(self): - res = str(poly.Chebyshev([0, 1])) - tgt = 'cheb([0. 1.])' - assert_equal(res, tgt) - - def test_legendre_str(self): - res = str(poly.Legendre([0, 1])) - tgt = 'leg([0. 1.])' - assert_equal(res, tgt) - - def test_hermite_str(self): - res = str(poly.Hermite([0, 1])) - tgt = 'herm([0. 1.])' - assert_equal(res, tgt) - - def test_hermiteE_str(self): - res = str(poly.HermiteE([0, 1])) - tgt = 'herme([0. 1.])' - assert_equal(res, tgt) - - def test_laguerre_str(self): - res = str(poly.Laguerre([0, 1])) - tgt = 'lag([0. 1.])' - assert_equal(res, tgt) - - -class TestRepr(object): - def test_polynomial_str(self): - res = repr(poly.Polynomial([0, 1])) - tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])' - assert_equal(res, tgt) - - def test_chebyshev_str(self): - res = repr(poly.Chebyshev([0, 1])) - tgt = 'Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1])' - assert_equal(res, tgt) - - def test_legendre_repr(self): - res = repr(poly.Legendre([0, 1])) - tgt = 'Legendre([0., 1.], domain=[-1, 1], window=[-1, 1])' - assert_equal(res, tgt) - - def test_hermite_repr(self): - res = repr(poly.Hermite([0, 1])) - tgt = 'Hermite([0., 1.], domain=[-1, 1], window=[-1, 1])' - assert_equal(res, tgt) - - def test_hermiteE_repr(self): - res = repr(poly.HermiteE([0, 1])) - tgt = 'HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1])' - assert_equal(res, tgt) - - def test_laguerre_repr(self): - res = repr(poly.Laguerre([0, 1])) - tgt = 'Laguerre([0., 1.], domain=[0, 1], window=[0, 1])' - assert_equal(res, tgt) diff --git a/venv/lib/python3.7/site-packages/numpy/random/__init__.pxd b/venv/lib/python3.7/site-packages/numpy/random/__init__.pxd deleted file mode 100644 index 05e0738..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/__init__.pxd +++ /dev/null @@ -1,14 +0,0 @@ -cimport numpy as np -from libc.stdint cimport uint32_t, uint64_t - -cdef extern from "numpy/random/bitgen.h": - struct bitgen: - void *state - uint64_t (*next_uint64)(void *st) nogil - uint32_t (*next_uint32)(void *st) nogil - double (*next_double)(void *st) nogil - uint64_t (*next_raw)(void *st) nogil - - ctypedef bitgen bitgen_t - -from numpy.random._bit_generator cimport BitGenerator, SeedSequence diff --git a/venv/lib/python3.7/site-packages/numpy/random/__init__.py b/venv/lib/python3.7/site-packages/numpy/random/__init__.py deleted file mode 100644 index 1ceb5c4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/__init__.py +++ /dev/null @@ -1,215 +0,0 @@ -""" -======================== -Random Number Generation -======================== - -Use ``default_rng()`` to create a `Generator` and call its methods. - -=============== ========================================================= -Generator ---------------- --------------------------------------------------------- -Generator Class implementing all of the random number distributions -default_rng Default constructor for ``Generator`` -=============== ========================================================= - -============================================= === -BitGenerator Streams that work with Generator ---------------------------------------------- --- -MT19937 -PCG64 -Philox -SFC64 -============================================= === - -============================================= === -Getting entropy to initialize a BitGenerator ---------------------------------------------- --- -SeedSequence -============================================= === - - -Legacy ------- - -For backwards compatibility with previous versions of numpy before 1.17, the -various aliases to the global `RandomState` methods are left alone and do not -use the new `Generator` API. - -==================== ========================================================= -Utility functions --------------------- --------------------------------------------------------- -random Uniformly distributed floats over ``[0, 1)`` -bytes Uniformly distributed random bytes. -permutation Randomly permute a sequence / generate a random sequence. -shuffle Randomly permute a sequence in place. -choice Random sample from 1-D array. -==================== ========================================================= - -==================== ========================================================= -Compatibility -functions - removed -in the new API --------------------- --------------------------------------------------------- -rand Uniformly distributed values. -randn Normally distributed values. -ranf Uniformly distributed floating point numbers. -random_integers Uniformly distributed integers in a given range. - (deprecated, use ``integers(..., closed=True)`` instead) -random_sample Alias for `random_sample` -randint Uniformly distributed integers in a given range -seed Seed the legacy random number generator. -==================== ========================================================= - -==================== ========================================================= -Univariate -distributions --------------------- --------------------------------------------------------- -beta Beta distribution over ``[0, 1]``. -binomial Binomial distribution. -chisquare :math:`\\chi^2` distribution. -exponential Exponential distribution. -f F (Fisher-Snedecor) distribution. -gamma Gamma distribution. -geometric Geometric distribution. -gumbel Gumbel distribution. -hypergeometric Hypergeometric distribution. -laplace Laplace distribution. -logistic Logistic distribution. -lognormal Log-normal distribution. -logseries Logarithmic series distribution. -negative_binomial Negative binomial distribution. -noncentral_chisquare Non-central chi-square distribution. -noncentral_f Non-central F distribution. -normal Normal / Gaussian distribution. -pareto Pareto distribution. -poisson Poisson distribution. -power Power distribution. -rayleigh Rayleigh distribution. -triangular Triangular distribution. -uniform Uniform distribution. -vonmises Von Mises circular distribution. -wald Wald (inverse Gaussian) distribution. -weibull Weibull distribution. -zipf Zipf's distribution over ranked data. -==================== ========================================================= - -==================== ========================================================== -Multivariate -distributions --------------------- ---------------------------------------------------------- -dirichlet Multivariate generalization of Beta distribution. -multinomial Multivariate generalization of the binomial distribution. -multivariate_normal Multivariate generalization of the normal distribution. -==================== ========================================================== - -==================== ========================================================= -Standard -distributions --------------------- --------------------------------------------------------- -standard_cauchy Standard Cauchy-Lorentz distribution. -standard_exponential Standard exponential distribution. -standard_gamma Standard Gamma distribution. -standard_normal Standard normal distribution. -standard_t Standard Student's t-distribution. -==================== ========================================================= - -==================== ========================================================= -Internal functions --------------------- --------------------------------------------------------- -get_state Get tuple representing internal state of generator. -set_state Set state of generator. -==================== ========================================================= - - -""" -from __future__ import division, absolute_import, print_function - -__all__ = [ - 'beta', - 'binomial', - 'bytes', - 'chisquare', - 'choice', - 'dirichlet', - 'exponential', - 'f', - 'gamma', - 'geometric', - 'get_state', - 'gumbel', - 'hypergeometric', - 'laplace', - 'logistic', - 'lognormal', - 'logseries', - 'multinomial', - 'multivariate_normal', - 'negative_binomial', - 'noncentral_chisquare', - 'noncentral_f', - 'normal', - 'pareto', - 'permutation', - 'poisson', - 'power', - 'rand', - 'randint', - 'randn', - 'random', - 'random_integers', - 'random_sample', - 'ranf', - 'rayleigh', - 'sample', - 'seed', - 'set_state', - 'shuffle', - 'standard_cauchy', - 'standard_exponential', - 'standard_gamma', - 'standard_normal', - 'standard_t', - 'triangular', - 'uniform', - 'vonmises', - 'wald', - 'weibull', - 'zipf', -] - -# add these for module-freeze analysis (like PyInstaller) -from . import _pickle -from . import _common -from . import _bounded_integers - -from ._generator import Generator, default_rng -from ._bit_generator import SeedSequence, BitGenerator -from ._mt19937 import MT19937 -from ._pcg64 import PCG64 -from ._philox import Philox -from ._sfc64 import SFC64 -from .mtrand import * - -__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', - 'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator'] - - -def __RandomState_ctor(): - """Return a RandomState instance. - - This function exists solely to assist (un)pickling. - - Note that the state of the RandomState returned here is irrelevant, as this - function's entire purpose is to return a newly allocated RandomState whose - state pickle can set. Consequently the RandomState returned by this function - is a freshly allocated copy with a seed=0. - - See https://github.com/numpy/numpy/issues/4763 for a detailed discussion - - """ - return RandomState(seed=0) - - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/random/_bit_generator.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_bit_generator.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index ac72c9a..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_bit_generator.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/_bit_generator.pxd b/venv/lib/python3.7/site-packages/numpy/random/_bit_generator.pxd deleted file mode 100644 index bd5e47a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_bit_generator.pxd +++ /dev/null @@ -1,35 +0,0 @@ -cimport numpy as np -from libc.stdint cimport uint32_t, uint64_t - -cdef extern from "numpy/random/bitgen.h": - struct bitgen: - void *state - uint64_t (*next_uint64)(void *st) nogil - uint32_t (*next_uint32)(void *st) nogil - double (*next_double)(void *st) nogil - uint64_t (*next_raw)(void *st) nogil - - ctypedef bitgen bitgen_t - -cdef class BitGenerator(): - cdef readonly object _seed_seq - cdef readonly object lock - cdef bitgen_t _bitgen - cdef readonly object _ctypes - cdef readonly object _cffi - cdef readonly object capsule - - -cdef class SeedSequence(): - cdef readonly object entropy - cdef readonly tuple spawn_key - cdef readonly uint32_t pool_size - cdef readonly object pool - cdef readonly uint32_t n_children_spawned - - cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, - np.ndarray[np.npy_uint32, ndim=1] entropy_array) - cdef get_assembled_entropy(self) - -cdef class SeedlessSequence(): - pass diff --git a/venv/lib/python3.7/site-packages/numpy/random/_bounded_integers.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_bounded_integers.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 7590f09..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_bounded_integers.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/_bounded_integers.pxd b/venv/lib/python3.7/site-packages/numpy/random/_bounded_integers.pxd deleted file mode 100644 index 7e41463..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_bounded_integers.pxd +++ /dev/null @@ -1,29 +0,0 @@ -from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, - int8_t, int16_t, int32_t, int64_t, intptr_t) -import numpy as np -cimport numpy as np -ctypedef np.npy_bool bool_t - -from numpy.random cimport bitgen_t - -cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: - """Mask generator for use in bounded random numbers""" - # Smallest bit mask >= max - cdef uint64_t mask = max_val - mask |= mask >> 1 - mask |= mask >> 2 - mask |= mask >> 4 - mask |= mask >> 8 - mask |= mask >> 16 - mask |= mask >> 32 - return mask - -cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) -cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/venv/lib/python3.7/site-packages/numpy/random/_common.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_common.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 757826d..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_common.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/_common.pxd b/venv/lib/python3.7/site-packages/numpy/random/_common.pxd deleted file mode 100644 index 588f613..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_common.pxd +++ /dev/null @@ -1,104 +0,0 @@ -#cython: language_level=3 - -from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t - -import numpy as np -cimport numpy as np - -from numpy.random cimport bitgen_t - -cdef double POISSON_LAM_MAX -cdef double LEGACY_POISSON_LAM_MAX -cdef uint64_t MAXSIZE - -cdef enum ConstraintType: - CONS_NONE - CONS_NON_NEGATIVE - CONS_POSITIVE - CONS_POSITIVE_NOT_NAN - CONS_BOUNDED_0_1 - CONS_BOUNDED_0_1_NOTNAN - CONS_BOUNDED_GT_0_1 - CONS_GT_1 - CONS_GTE_1 - CONS_POISSON - LEGACY_CONS_POISSON - -ctypedef ConstraintType constraint_type - -cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) -cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) -cdef object prepare_cffi(bitgen_t *bitgen) -cdef object prepare_ctypes(bitgen_t *bitgen) -cdef int check_constraint(double val, object name, constraint_type cons) except -1 -cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 - -cdef extern from "include/aligned_malloc.h": - cdef void *PyArray_realloc_aligned(void *p, size_t n) - cdef void *PyArray_malloc_aligned(size_t n) - cdef void *PyArray_calloc_aligned(size_t n, size_t s) - cdef void PyArray_free_aligned(void *p) - -ctypedef double (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil -ctypedef double (*random_double_0)(void *state) nogil -ctypedef double (*random_double_1)(void *state, double a) nogil -ctypedef double (*random_double_2)(void *state, double a, double b) nogil -ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil - -ctypedef double (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil -ctypedef float (*random_float_0)(bitgen_t *state) nogil -ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil - -ctypedef int64_t (*random_uint_0)(void *state) nogil -ctypedef int64_t (*random_uint_d)(void *state, double a) nogil -ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil -ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil -ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil -ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil - -ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil -ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil - -ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil -ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil - -cdef double kahan_sum(double *darr, np.npy_intp n) - -cdef inline double uint64_to_double(uint64_t rnd) nogil: - return (rnd >> 11) * (1.0 / 9007199254740992.0) - -cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) - -cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) - -cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) - -cdef object wrap_int(object val, object bits) - -cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) - -cdef object cont(void *func, void *state, object size, object lock, int narg, - object a, object a_name, constraint_type a_constraint, - object b, object b_name, constraint_type b_constraint, - object c, object c_name, constraint_type c_constraint, - object out) - -cdef object disc(void *func, void *state, object size, object lock, - int narg_double, int narg_int64, - object a, object a_name, constraint_type a_constraint, - object b, object b_name, constraint_type b_constraint, - object c, object c_name, constraint_type c_constraint) - -cdef object cont_f(void *func, bitgen_t *state, object size, object lock, - object a, object a_name, constraint_type a_constraint, - object out) - -cdef object cont_broadcast_3(void *func, void *state, object size, object lock, - np.ndarray a_arr, object a_name, constraint_type a_constraint, - np.ndarray b_arr, object b_name, constraint_type b_constraint, - np.ndarray c_arr, object c_name, constraint_type c_constraint) - -cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, - np.ndarray a_arr, object a_name, constraint_type a_constraint, - np.ndarray b_arr, object b_name, constraint_type b_constraint, - np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/venv/lib/python3.7/site-packages/numpy/random/_examples/cffi/extending.py b/venv/lib/python3.7/site-packages/numpy/random/_examples/cffi/extending.py deleted file mode 100644 index 8440d40..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_examples/cffi/extending.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -Use cffi to access any of the underlying C functions from distributions.h -""" -import os -import numpy as np -import cffi -from .parse import parse_distributions_h -ffi = cffi.FFI() - -inc_dir = os.path.join(np.get_include(), 'numpy') - -# Basic numpy types -ffi.cdef(''' - typedef intptr_t npy_intp; - typedef unsigned char npy_bool; - -''') - -parse_distributions_h(ffi, inc_dir) - -lib = ffi.dlopen(np.random._generator.__file__) - -# Compare the distributions.h random_standard_normal_fill to -# Generator.standard_random -bit_gen = np.random.PCG64() -rng = np.random.Generator(bit_gen) -state = bit_gen.state - -interface = rng.bit_generator.cffi -n = 100 -vals_cffi = ffi.new('double[%d]' % n) -lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) - -# reset the state -bit_gen.state = state - -vals = rng.standard_normal(n) - -for i in range(n): - assert vals[i] == vals_cffi[i] diff --git a/venv/lib/python3.7/site-packages/numpy/random/_examples/cffi/parse.py b/venv/lib/python3.7/site-packages/numpy/random/_examples/cffi/parse.py deleted file mode 100644 index 73d8646..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_examples/cffi/parse.py +++ /dev/null @@ -1,46 +0,0 @@ -import os - - -def parse_distributions_h(ffi, inc_dir): - """ - Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef - - Read the function declarations without the "#define ..." macros that will - be filled in when loading the library. - """ - - with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid: - s = [] - for line in fid: - # massage the include file - if line.strip().startswith('#'): - continue - s.append(line) - ffi.cdef('\n'.join(s)) - - with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid: - s = [] - in_skip = 0 - for line in fid: - # massage the include file - if line.strip().startswith('#'): - continue - - # skip any inlined function definition - # which starts with 'static NPY_INLINE xxx(...) {' - # and ends with a closing '}' - if line.strip().startswith('static NPY_INLINE'): - in_skip += line.count('{') - continue - elif in_skip > 0: - in_skip += line.count('{') - in_skip -= line.count('}') - continue - - # replace defines with their value or remove them - line = line.replace('DECLDIR', '') - line = line.replace('NPY_INLINE', '') - line = line.replace('RAND_INT_TYPE', 'int64_t') - s.append(line) - ffi.cdef('\n'.join(s)) - diff --git a/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/extending.pyx b/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/extending.pyx deleted file mode 100644 index 7a0dfe0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/extending.pyx +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -#cython: language_level=3 - -from libc.stdint cimport uint32_t -from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer - -import numpy as np -cimport numpy as np -cimport cython - -from numpy.random cimport bitgen_t -from numpy.random import PCG64 - -np.import_array() - - -@cython.boundscheck(False) -@cython.wraparound(False) -def uniform_mean(Py_ssize_t n): - cdef Py_ssize_t i - cdef bitgen_t *rng - cdef const char *capsule_name = "BitGenerator" - cdef double[::1] random_values - cdef np.ndarray randoms - - x = PCG64() - capsule = x.capsule - if not PyCapsule_IsValid(capsule, capsule_name): - raise ValueError("Invalid pointer to anon_func_state") - rng = PyCapsule_GetPointer(capsule, capsule_name) - random_values = np.empty(n) - # Best practice is to acquire the lock whenever generating random values. - # This prevents other threads from modifying the state. Acquiring the lock - # is only necessary if if the GIL is also released, as in this example. - with x.lock, nogil: - for i in range(n): - random_values[i] = rng.next_double(rng.state) - randoms = np.asarray(random_values) - return randoms.mean() - - -# This function is declared nogil so it can be used without the GIL below -cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil: - cdef uint32_t mask, delta, val - mask = delta = ub - lb - mask |= mask >> 1 - mask |= mask >> 2 - mask |= mask >> 4 - mask |= mask >> 8 - mask |= mask >> 16 - - val = rng.next_uint32(rng.state) & mask - while val > delta: - val = rng.next_uint32(rng.state) & mask - - return lb + val - - -@cython.boundscheck(False) -@cython.wraparound(False) -def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n): - cdef Py_ssize_t i - cdef bitgen_t *rng - cdef uint32_t[::1] out - cdef const char *capsule_name = "BitGenerator" - - x = PCG64() - out = np.empty(n, dtype=np.uint32) - capsule = x.capsule - - if not PyCapsule_IsValid(capsule, capsule_name): - raise ValueError("Invalid pointer to anon_func_state") - rng = PyCapsule_GetPointer(capsule, capsule_name) - - with x.lock, nogil: - for i in range(n): - out[i] = bounded_uint(lb, ub, rng) - return np.asarray(out) diff --git a/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/extending_distributions.pyx b/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/extending_distributions.pyx deleted file mode 100644 index 1bef506..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/extending_distributions.pyx +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python -#cython: language_level=3 -""" -This file shows how the to use a BitGenerator to create a distribution. -""" -import numpy as np -cimport numpy as np -cimport cython -from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer -from libc.stdint cimport uint16_t, uint64_t -from numpy.random cimport bitgen_t -from numpy.random import PCG64 - - -@cython.boundscheck(False) -@cython.wraparound(False) -def uniforms(Py_ssize_t n): - """ - Create an array of `n` uniformly distributed doubles. - A 'real' distribution would want to process the values into - some non-uniform distribution - """ - cdef Py_ssize_t i - cdef bitgen_t *rng - cdef const char *capsule_name = "BitGenerator" - cdef double[::1] random_values - - x = PCG64() - capsule = x.capsule - # Optional check that the capsule if from a BitGenerator - if not PyCapsule_IsValid(capsule, capsule_name): - raise ValueError("Invalid pointer to anon_func_state") - # Cast the pointer - rng = PyCapsule_GetPointer(capsule, capsule_name) - random_values = np.empty(n, dtype='float64') - with x.lock, nogil: - for i in range(n): - # Call the function - random_values[i] = rng.next_double(rng.state) - randoms = np.asarray(random_values) - - return randoms - -# cython example 2 -@cython.boundscheck(False) -@cython.wraparound(False) -def uint10_uniforms(Py_ssize_t n): - """Uniform 10 bit integers stored as 16-bit unsigned integers""" - cdef Py_ssize_t i - cdef bitgen_t *rng - cdef const char *capsule_name = "BitGenerator" - cdef uint16_t[::1] random_values - cdef int bits_remaining - cdef int width = 10 - cdef uint64_t buff, mask = 0x3FF - - x = PCG64() - capsule = x.capsule - if not PyCapsule_IsValid(capsule, capsule_name): - raise ValueError("Invalid pointer to anon_func_state") - rng = PyCapsule_GetPointer(capsule, capsule_name) - random_values = np.empty(n, dtype='uint16') - # Best practice is to release GIL and acquire the lock - bits_remaining = 0 - with x.lock, nogil: - for i in range(n): - if bits_remaining < width: - buff = rng.next_uint64(rng.state) - random_values[i] = buff & mask - buff >>= width - - randoms = np.asarray(random_values) - return randoms - diff --git a/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/setup.py b/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/setup.py deleted file mode 100644 index 20cedc4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_examples/cython/setup.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -""" -Build the Cython demonstrations of low-level access to NumPy random - -Usage: python setup.py build_ext -i -""" - -import numpy as np -from distutils.core import setup -from Cython.Build import cythonize -from setuptools.extension import Extension -from os.path import join, dirname - -path = dirname(__file__) -defs = [('NPY_NO_DEPRECATED_API', 0)] - -extending = Extension("extending", - sources=[join(path, 'extending.pyx')], - include_dirs=[ - np.get_include(), - join(path, '..', '..') - ], - define_macros=defs, - ) -distributions = Extension("extending_distributions", - sources=[join(path, 'extending_distributions.pyx')], - include_dirs=[np.get_include()], - define_macros=defs, - ) - -extensions = [extending, distributions] - -setup( - ext_modules=cythonize(extensions) -) diff --git a/venv/lib/python3.7/site-packages/numpy/random/_examples/numba/extending.py b/venv/lib/python3.7/site-packages/numpy/random/_examples/numba/extending.py deleted file mode 100644 index 0d24059..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_examples/numba/extending.py +++ /dev/null @@ -1,84 +0,0 @@ -import numpy as np -import numba as nb - -from numpy.random import PCG64 -from timeit import timeit - -bit_gen = PCG64() -next_d = bit_gen.cffi.next_double -state_addr = bit_gen.cffi.state_address - -def normals(n, state): - out = np.empty(n) - for i in range((n + 1) // 2): - x1 = 2.0 * next_d(state) - 1.0 - x2 = 2.0 * next_d(state) - 1.0 - r2 = x1 * x1 + x2 * x2 - while r2 >= 1.0 or r2 == 0.0: - x1 = 2.0 * next_d(state) - 1.0 - x2 = 2.0 * next_d(state) - 1.0 - r2 = x1 * x1 + x2 * x2 - f = np.sqrt(-2.0 * np.log(r2) / r2) - out[2 * i] = f * x1 - if 2 * i + 1 < n: - out[2 * i + 1] = f * x2 - return out - -# Compile using Numba -normalsj = nb.jit(normals, nopython=True) -# Must use state address not state with numba -n = 10000 - -def numbacall(): - return normalsj(n, state_addr) - -rg = np.random.Generator(PCG64()) - -def numpycall(): - return rg.normal(size=n) - -# Check that the functions work -r1 = numbacall() -r2 = numpycall() -assert r1.shape == (n,) -assert r1.shape == r2.shape - -t1 = timeit(numbacall, number=1000) -print('{:.2f} secs for {} PCG64 (Numba/PCG64) gaussian randoms'.format(t1, n)) -t2 = timeit(numpycall, number=1000) -print('{:.2f} secs for {} PCG64 (NumPy/PCG64) gaussian randoms'.format(t2, n)) - -# example 2 - -next_u32 = bit_gen.ctypes.next_uint32 -ctypes_state = bit_gen.ctypes.state - -@nb.jit(nopython=True) -def bounded_uint(lb, ub, state): - mask = delta = ub - lb - mask |= mask >> 1 - mask |= mask >> 2 - mask |= mask >> 4 - mask |= mask >> 8 - mask |= mask >> 16 - - val = next_u32(state) & mask - while val > delta: - val = next_u32(state) & mask - - return lb + val - - -print(bounded_uint(323, 2394691, ctypes_state.value)) - - -@nb.jit(nopython=True) -def bounded_uints(lb, ub, n, state): - out = np.empty(n, dtype=np.uint32) - for i in range(n): - out[i] = bounded_uint(lb, ub, state) - - -bounded_uints(323, 2394691, 10000000, ctypes_state.value) - - diff --git a/venv/lib/python3.7/site-packages/numpy/random/_examples/numba/extending_distributions.py b/venv/lib/python3.7/site-packages/numpy/random/_examples/numba/extending_distributions.py deleted file mode 100644 index 7cf8bf0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_examples/numba/extending_distributions.py +++ /dev/null @@ -1,67 +0,0 @@ -r""" -Building the required library in this example requires a source distribution -of NumPy or clone of the NumPy git repository since distributions.c is not -included in binary distributions. - -On *nix, execute in numpy/random/src/distributions - -export ${PYTHON_VERSION}=3.8 # Python version -export PYTHON_INCLUDE=#path to Python's include folder, usually \ - ${PYTHON_HOME}/include/python${PYTHON_VERSION}m -export NUMPY_INCLUDE=#path to numpy's include folder, usually \ - ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include -gcc -shared -o libdistributions.so -fPIC distributions.c \ - -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE} -mv libdistributions.so ../../_examples/numba/ - -On Windows - -rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example -set PYTHON_HOME=c:\Anaconda -set PYTHON_VERSION=38 -cl.exe /LD .\distributions.c -DDLL_EXPORT \ - -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \ - -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib -move distributions.dll ../../_examples/numba/ -""" -import os - -import numba as nb -import numpy as np -from cffi import FFI - -from numpy.random import PCG64 - -ffi = FFI() -if os.path.exists('./distributions.dll'): - lib = ffi.dlopen('./distributions.dll') -elif os.path.exists('./libdistributions.so'): - lib = ffi.dlopen('./libdistributions.so') -else: - raise RuntimeError('Required DLL/so file was not found.') - -ffi.cdef(""" -double random_standard_normal(void *bitgen_state); -""") -x = PCG64() -xffi = x.cffi -bit_generator = xffi.bit_generator - -random_standard_normal = lib.random_standard_normal - - -def normals(n, bit_generator): - out = np.empty(n) - for i in range(n): - out[i] = random_standard_normal(bit_generator) - return out - - -normalsj = nb.jit(normals, nopython=True) - -# Numba requires a memory address for void * -# Can also get address from x.ctypes.bit_generator.value -bit_generator_address = int(ffi.cast('uintptr_t', bit_generator)) - -norm = normalsj(1000, bit_generator_address) -print(norm[:12]) diff --git a/venv/lib/python3.7/site-packages/numpy/random/_generator.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_generator.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index fb0e940..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_generator.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/_mt19937.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_mt19937.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 4a17772..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_mt19937.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/_pcg64.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_pcg64.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 8941ac9..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_pcg64.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/_philox.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_philox.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 549cc1b..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_philox.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/_pickle.py b/venv/lib/python3.7/site-packages/numpy/random/_pickle.py deleted file mode 100644 index 29ff696..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/_pickle.py +++ /dev/null @@ -1,82 +0,0 @@ -from .mtrand import RandomState -from ._philox import Philox -from ._pcg64 import PCG64 -from ._sfc64 import SFC64 - -from ._generator import Generator -from ._mt19937 import MT19937 - -BitGenerators = {'MT19937': MT19937, - 'PCG64': PCG64, - 'Philox': Philox, - 'SFC64': SFC64, - } - - -def __generator_ctor(bit_generator_name='MT19937'): - """ - Pickling helper function that returns a Generator object - - Parameters - ---------- - bit_generator_name: str - String containing the core BitGenerator - - Returns - ------- - rg: Generator - Generator using the named core BitGenerator - """ - if bit_generator_name in BitGenerators: - bit_generator = BitGenerators[bit_generator_name] - else: - raise ValueError(str(bit_generator_name) + ' is not a known ' - 'BitGenerator module.') - - return Generator(bit_generator()) - - -def __bit_generator_ctor(bit_generator_name='MT19937'): - """ - Pickling helper function that returns a bit generator object - - Parameters - ---------- - bit_generator_name: str - String containing the name of the BitGenerator - - Returns - ------- - bit_generator: BitGenerator - BitGenerator instance - """ - if bit_generator_name in BitGenerators: - bit_generator = BitGenerators[bit_generator_name] - else: - raise ValueError(str(bit_generator_name) + ' is not a known ' - 'BitGenerator module.') - - return bit_generator() - - -def __randomstate_ctor(bit_generator_name='MT19937'): - """ - Pickling helper function that returns a legacy RandomState-like object - - Parameters - ---------- - bit_generator_name: str - String containing the core BitGenerator - - Returns - ------- - rs: RandomState - Legacy RandomState using the named core BitGenerator - """ - if bit_generator_name in BitGenerators: - bit_generator = BitGenerators[bit_generator_name] - else: - raise ValueError(str(bit_generator_name) + ' is not a known ' - 'BitGenerator module.') - - return RandomState(bit_generator()) diff --git a/venv/lib/python3.7/site-packages/numpy/random/_sfc64.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/_sfc64.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 2b2077e..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/_sfc64.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/mtrand.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/random/mtrand.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index c3c7904..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/random/mtrand.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/random/setup.py b/venv/lib/python3.7/site-packages/numpy/random/setup.py deleted file mode 100644 index 1b093d6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/setup.py +++ /dev/null @@ -1,135 +0,0 @@ -from __future__ import division, print_function - -import os -import platform -import sys -from os.path import join - -from numpy.distutils.system_info import platform_bits - -is_msvc = (platform.platform().startswith('Windows') and - platform.python_compiler().startswith('MS')) - - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration, get_mathlibs - config = Configuration('random', parent_package, top_path) - - def generate_libraries(ext, build_dir): - config_cmd = config.get_config_cmd() - libs = get_mathlibs() - if sys.platform == 'win32': - libs.extend(['Advapi32', 'Kernel32']) - ext.libraries.extend(libs) - return None - - # enable unix large file support on 32 bit systems - # (64 bit off_t, lseek -> lseek64 etc.) - if sys.platform[:3] == "aix": - defs = [('_LARGE_FILES', None)] - else: - defs = [('_FILE_OFFSET_BITS', '64'), - ('_LARGEFILE_SOURCE', '1'), - ('_LARGEFILE64_SOURCE', '1')] - - defs.append(('NPY_NO_DEPRECATED_API', 0)) - config.add_data_dir('tests') - config.add_data_dir('_examples') - - EXTRA_LINK_ARGS = [] - # Math lib - EXTRA_LIBRARIES = ['m'] if os.name != 'nt' else [] - # Some bit generators exclude GCC inlining - EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__'] - - if is_msvc and platform_bits == 32: - # 32-bit windows requires explicit sse2 option - EXTRA_COMPILE_ARGS += ['/arch:SSE2'] - elif not is_msvc: - # Some bit generators require c99 - EXTRA_COMPILE_ARGS += ['-std=c99'] - - # Use legacy integer variable sizes - LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')] - PCG64_DEFS = [] - # One can force emulated 128-bit arithmetic if one wants. - #PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')] - - for gen in ['mt19937']: - # gen.pyx, src/gen/gen.c, src/gen/gen-jump.c - config.add_extension('_{0}'.format(gen), - sources=['_{0}.c'.format(gen), - 'src/{0}/{0}.c'.format(gen), - 'src/{0}/{0}-jump.c'.format(gen)], - include_dirs=['.', 'src', join('src', gen)], - libraries=EXTRA_LIBRARIES, - extra_compile_args=EXTRA_COMPILE_ARGS, - extra_link_args=EXTRA_LINK_ARGS, - depends=['_%s.pyx' % gen], - define_macros=defs, - ) - for gen in ['philox', 'pcg64', 'sfc64']: - # gen.pyx, src/gen/gen.c - _defs = defs + PCG64_DEFS if gen == 'pcg64' else defs - config.add_extension('_{0}'.format(gen), - sources=['_{0}.c'.format(gen), - 'src/{0}/{0}.c'.format(gen)], - include_dirs=['.', 'src', join('src', gen)], - libraries=EXTRA_LIBRARIES, - extra_compile_args=EXTRA_COMPILE_ARGS, - extra_link_args=EXTRA_LINK_ARGS, - depends=['_%s.pyx' % gen, '_bit_generator.pyx', - '_bit_generator.pxd'], - define_macros=_defs, - ) - for gen in ['_common', '_bit_generator']: - # gen.pyx - config.add_extension(gen, - sources=['{0}.c'.format(gen)], - libraries=EXTRA_LIBRARIES, - extra_compile_args=EXTRA_COMPILE_ARGS, - extra_link_args=EXTRA_LINK_ARGS, - include_dirs=['.', 'src'], - depends=['%s.pyx' % gen, '%s.pxd' % gen,], - define_macros=defs, - ) - config.add_data_files('{0}.pxd'.format(gen)) - other_srcs = [ - 'src/distributions/logfactorial.c', - 'src/distributions/distributions.c', - 'src/distributions/random_mvhg_count.c', - 'src/distributions/random_mvhg_marginals.c', - 'src/distributions/random_hypergeometric.c', - ] - for gen in ['_generator', '_bounded_integers']: - # gen.pyx, src/distributions/distributions.c - config.add_extension(gen, - sources=['{0}.c'.format(gen)] + other_srcs, - libraries=EXTRA_LIBRARIES, - extra_compile_args=EXTRA_COMPILE_ARGS, - include_dirs=['.', 'src'], - extra_link_args=EXTRA_LINK_ARGS, - depends=['%s.pyx' % gen], - define_macros=defs, - ) - config.add_data_files('_bounded_integers.pxd') - config.add_extension('mtrand', - sources=['mtrand.c', - 'src/legacy/legacy-distributions.c', - 'src/distributions/logfactorial.c', - 'src/distributions/distributions.c'], - include_dirs=['.', 'src', 'src/legacy'], - libraries=EXTRA_LIBRARIES, - extra_compile_args=EXTRA_COMPILE_ARGS, - extra_link_args=EXTRA_LINK_ARGS, - depends=['mtrand.pyx'], - define_macros=defs + LEGACY_DEFS, - ) - config.add_data_files('__init__.pxd') - return config - - -if __name__ == '__main__': - from numpy.distutils.core import setup - - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/random/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/__init__.py b/venv/lib/python3.7/site-packages/numpy/random/tests/data/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/mt19937-testset-1.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/mt19937-testset-1.csv deleted file mode 100644 index b97bfa6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/mt19937-testset-1.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0xdeadbeaf -0, 0xc816921f -1, 0xb3623c6d -2, 0x5fa391bb -3, 0x40178d9 -4, 0x7dcc9811 -5, 0x548eb8e6 -6, 0x92ba3125 -7, 0x65fde68d -8, 0x2f81ec95 -9, 0xbd94f7a2 -10, 0xdc4d9bcc -11, 0xa672bf13 -12, 0xb41113e -13, 0xec7e0066 -14, 0x50239372 -15, 0xd9d66b1d -16, 0xab72a161 -17, 0xddc2e29f -18, 0x7ea29ab4 -19, 0x80d141ba -20, 0xb1c7edf1 -21, 0x44d29203 -22, 0xe224d98 -23, 0x5b3e9d26 -24, 0x14fd567c -25, 0x27d98c96 -26, 0x838779fc -27, 0x92a138a -28, 0x5d08965b -29, 0x531e0ad6 -30, 0x984ee8f4 -31, 0x1ed78539 -32, 0x32bd6d8d -33, 0xc37c8516 -34, 0x9aef5c6b -35, 0x3aacd139 -36, 0xd96ed154 -37, 0x489cd1ed -38, 0x2cba4b3b -39, 0x76c6ae72 -40, 0x2dae02b9 -41, 0x52ac5fd6 -42, 0xc2b5e265 -43, 0x630e6a28 -44, 0x3f560d5d -45, 0x9315bdf3 -46, 0xf1055aba -47, 0x840e42c6 -48, 0xf2099c6b -49, 0x15ff7696 -50, 0x7948d146 -51, 0x97342961 -52, 0x7a7a21c -53, 0xc66f4fb1 -54, 0x23c4103e -55, 0xd7321f98 -56, 0xeb7efb75 -57, 0xe02490b5 -58, 0x2aa02de -59, 0x8bee0bf7 -60, 0xfc2da059 -61, 0xae835034 -62, 0x678f2075 -63, 0x6d03094b -64, 0x56455e05 -65, 0x18b32373 -66, 0x8ff0356b -67, 0x1fe442fb -68, 0x3f1ab6c3 -69, 0xb6fd21b -70, 0xfc310eb2 -71, 0xb19e9a4d -72, 0x17ddee72 -73, 0xfd534251 -74, 0x9e500564 -75, 0x9013a036 -76, 0xcf08f118 -77, 0x6b6d5969 -78, 0x3ccf1977 -79, 0x7cc11497 -80, 0x651c6ac9 -81, 0x4d6b104b -82, 0x9a28314e -83, 0x14c237be -84, 0x9cfc8d52 -85, 0x2947fad5 -86, 0xd71eff49 -87, 0x5188730e -88, 0x4b894614 -89, 0xf4fa2a34 -90, 0x42f7cc69 -91, 0x4089c9e8 -92, 0xbf0bbfe4 -93, 0x3cea65c -94, 0xc6221207 -95, 0x1bb71a8f -96, 0x54843fe7 -97, 0xbc59de4c -98, 0x79c6ee64 -99, 0x14e57a26 -100, 0x68d88fe -101, 0x2b86ef64 -102, 0x8ffff3c1 -103, 0x5bdd573f -104, 0x85671813 -105, 0xefe32ca2 -106, 0x105ded1e -107, 0x90ca2769 -108, 0xb33963ac -109, 0x363fbbc3 -110, 0x3b3763ae -111, 0x1d50ab88 -112, 0xc9ec01eb -113, 0xc8bbeada -114, 0x5d704692 -115, 0x5fd9e40 -116, 0xe61c125 -117, 0x2fe05792 -118, 0xda8afb72 -119, 0x4cbaa653 -120, 0xdd2243df -121, 0x896fd3f5 -122, 0x5bc23db -123, 0xa1c4e807 -124, 0x57d1a24d -125, 0x66503ddc -126, 0xcf7c0838 -127, 0x19e034fc -128, 0x66807450 -129, 0xfc219b3b -130, 0xe8a843e7 -131, 0x9ce61f08 -132, 0x92b950d6 -133, 0xce955ec4 -134, 0xda0d1f0d -135, 0x960c6250 -136, 0x39552432 -137, 0xde845e84 -138, 0xff3b4b11 -139, 0x5d918e6f -140, 0xbb930df2 -141, 0x7cfb0993 -142, 0x5400e1e9 -143, 0x3bfa0954 -144, 0x7e2605fb -145, 0x11941591 -146, 0x887e6994 -147, 0xdc8bed45 -148, 0x45b3fb50 -149, 0xfbdf8358 -150, 0x41507468 -151, 0x34c87166 -152, 0x17f64d77 -153, 0x3bbaf4f8 -154, 0x4f26f37e -155, 0x4a56ebf2 -156, 0x81100f1 -157, 0x96d94eae -158, 0xca88fda5 -159, 0x2eef3a60 -160, 0x952afbd3 -161, 0x2bec88c7 -162, 0x52335c4b -163, 0x8296db8e -164, 0x4da7d00a -165, 0xc00ac899 -166, 0xadff8c72 -167, 0xbecf26cf -168, 0x8835c83c -169, 0x1d13c804 -170, 0xaa940ddc -171, 0x68222cfe -172, 0x4569c0e1 -173, 0x29077976 -174, 0x32d4a5af -175, 0xd31fcdef -176, 0xdc60682b -177, 0x7c95c368 -178, 0x75a70213 -179, 0x43021751 -180, 0x5e52e0a6 -181, 0xf7e190b5 -182, 0xee3e4bb -183, 0x2fe3b150 -184, 0xcf419c07 -185, 0x478a4570 -186, 0xe5c3ea50 -187, 0x417f30a8 -188, 0xf0cfdaa0 -189, 0xd1f7f738 -190, 0x2c70fc23 -191, 0x54fc89f9 -192, 0x444dcf01 -193, 0xec2a002d -194, 0xef0c3a88 -195, 0xde21be9 -196, 0x88ab3296 -197, 0x3028897c -198, 0x264b200b -199, 0xd8ae0706 -200, 0x9eef901a -201, 0xbd1b96e0 -202, 0xea71366c -203, 0x1465b694 -204, 0x5a794650 -205, 0x83df52d4 -206, 0x8262413d -207, 0x5bc148c0 -208, 0xe0ecd80c -209, 0x40649571 -210, 0xb4d2ee5f -211, 0xedfd7d09 -212, 0xa082e25f -213, 0xc62992d1 -214, 0xbc7e65ee -215, 0x5499cf8a -216, 0xac28f775 -217, 0x649840fb -218, 0xd4c54805 -219, 0x1d166ba6 -220, 0xbeb1171f -221, 0x45b66703 -222, 0x78c03349 -223, 0x38d2a6ff -224, 0x935cae8b -225, 0x1d07dc3f -226, 0x6c1ed365 -227, 0x579fc585 -228, 0x1320c0ec -229, 0x632757eb -230, 0xd265a397 -231, 0x70e9b6c2 -232, 0xc81e322c -233, 0xa27153cf -234, 0x2118ba19 -235, 0x514ec400 -236, 0x2bd0ecd6 -237, 0xc3e7dae3 -238, 0xfa39355e -239, 0x48f23cc1 -240, 0xbcf75948 -241, 0x53ccc70c -242, 0x75346423 -243, 0x951181e0 -244, 0x348e90df -245, 0x14365d7f -246, 0xfbc95d7a -247, 0xdc98a9e6 -248, 0xed202df7 -249, 0xa59ec913 -250, 0x6b6e9ae2 -251, 0x1697f265 -252, 0x15d322d0 -253, 0xa2e7ee0a -254, 0x88860b7e -255, 0x455d8b9d -256, 0x2f5c59cb -257, 0xac49c9f1 -258, 0xa6a6a039 -259, 0xc057f56b -260, 0xf1ff1208 -261, 0x5eb8dc9d -262, 0xe6702509 -263, 0xe238b0ed -264, 0x5ae32e3d -265, 0xa88ebbdf -266, 0xef885ae7 -267, 0xafa6d49b -268, 0xc94499e0 -269, 0x1a196325 -270, 0x88938da3 -271, 0x14f4345 -272, 0xd8e33637 -273, 0xa3551bd5 -274, 0x73fe35c7 -275, 0x9561e94b -276, 0xd673bf68 -277, 0x16134872 -278, 0x68c42f9f -279, 0xdf7574c8 -280, 0x8809bab9 -281, 0x1432cf69 -282, 0xafb66bf1 -283, 0xc184aa7b -284, 0xedbf2007 -285, 0xbd420ce1 -286, 0x761033a0 -287, 0xff7e351f -288, 0xd6c3780e -289, 0x5844416f -290, 0xc6c0ee1c -291, 0xd2e147db -292, 0x92ac601a -293, 0x393e846b -294, 0x18196cca -295, 0x54a22be -296, 0x32bab1c4 -297, 0x60365183 -298, 0x64fa342 -299, 0xca24a493 -300, 0xd8cc8b83 -301, 0x3faf102b -302, 0x6e09bb58 -303, 0x812f0ea -304, 0x592c95d8 -305, 0xe45ea4c5 -306, 0x23aebf83 -307, 0xbd9691d4 -308, 0xf47b4baa -309, 0x4ac7b487 -310, 0xcce18803 -311, 0x3377556e -312, 0x3ff8e6b6 -313, 0x99d22063 -314, 0x23250bec -315, 0x4e1f9861 -316, 0x8554249b -317, 0x8635c2fc -318, 0xe8426e8a -319, 0x966c29d8 -320, 0x270b6082 -321, 0x3180a8a1 -322, 0xe7e1668b -323, 0x7f868dc -324, 0xcf4c17cf -325, 0xe31de4d1 -326, 0xc8c8aff4 -327, 0xae8db704 -328, 0x3c928cc2 -329, 0xe12cd48 -330, 0xb33ecd04 -331, 0xb93d7cbe -332, 0x49c69d6a -333, 0x7d3bce64 -334, 0x86bc219 -335, 0x8408233b -336, 0x44dc7479 -337, 0xdf80d538 -338, 0xf3db02c3 -339, 0xbbbd31d7 -340, 0x121281f -341, 0x7521e9a3 -342, 0x8859675a -343, 0x75aa6502 -344, 0x430ed15b -345, 0xecf0a28d -346, 0x659774fd -347, 0xd58a2311 -348, 0x512389a9 -349, 0xff65e1ff -350, 0xb6ddf222 -351, 0xe3458895 -352, 0x8b13cd6e -353, 0xd4a22870 -354, 0xe604c50c -355, 0x27f54f26 -356, 0x8f7f422f -357, 0x9735b4cf -358, 0x414072b0 -359, 0x76a1c6d5 -360, 0xa2208c06 -361, 0x83cd0f61 -362, 0x6c4f7ead -363, 0x6553cf76 -364, 0xeffcf44 -365, 0x7f434a3f -366, 0x9dc364bd -367, 0x3cdf52ed -368, 0xad597594 -369, 0x9c3e211b -370, 0x6c04a33f -371, 0x885dafa6 -372, 0xbbdaca71 -373, 0x7ae5dd5c -374, 0x37675644 -375, 0x251853c6 -376, 0x130b086b -377, 0x143fa54b -378, 0x54cdc282 -379, 0x9faff5b3 -380, 0x502a5c8b -381, 0xd9524550 -382, 0xae221aa6 -383, 0x55cf759b -384, 0x24782da4 -385, 0xd715d815 -386, 0x250ea09a -387, 0x4e0744ac -388, 0x11e15814 -389, 0xabe5f9df -390, 0xc8146350 -391, 0xfba67d9b -392, 0x2b82e42f -393, 0xd4ea96fc -394, 0x5ffc179e -395, 0x1598bafe -396, 0x7fb6d662 -397, 0x1a12a0db -398, 0x450cee4a -399, 0x85f8e12 -400, 0xce71b594 -401, 0xd4bb1d19 -402, 0x968f379d -403, 0x54cc1d52 -404, 0x467e6066 -405, 0x7da5f9a9 -406, 0x70977034 -407, 0x49e65c4b -408, 0xd08570d1 -409, 0x7acdf60b -410, 0xdffa038b -411, 0x9ce14e4c -412, 0x107cbbf8 -413, 0xdd746ca0 -414, 0xc6370a46 -415, 0xe7f83312 -416, 0x373fa9ce -417, 0xd822a2c6 -418, 0x1d4efea6 -419, 0xc53dcadb -420, 0x9b4e898f -421, 0x71daa6bf -422, 0x7a0bc78b -423, 0xd7b86f50 -424, 0x1b8b3286 -425, 0xcf9425dd -426, 0xd5263220 -427, 0x4ea0b647 -428, 0xc767fe64 -429, 0xcfc5e67 -430, 0xcc6a2942 -431, 0xa51eff00 -432, 0x76092e1b -433, 0xf606e80f -434, 0x824b5e20 -435, 0xebb55e14 -436, 0x783d96a6 -437, 0x10696512 -438, 0x17ee510a -439, 0x3ab70a1f -440, 0xcce6b210 -441, 0x8f72f0fb -442, 0xf0610b41 -443, 0x83d01fb5 -444, 0x6b3de36 -445, 0xe4c2e84f -446, 0x9c43bb15 -447, 0xddf2905 -448, 0x7dd63556 -449, 0x3662ca09 -450, 0xfb81f35b -451, 0xc2c8a72a -452, 0x8e93c37 -453, 0xa93da2d4 -454, 0xa03af8f1 -455, 0x8d75159a -456, 0x15f010b0 -457, 0xa296ab06 -458, 0xe55962ba -459, 0xeae700a9 -460, 0xe388964a -461, 0x917f2bec -462, 0x1c203fea -463, 0x792a01ba -464, 0xa93a80ac -465, 0x9eb8a197 -466, 0x56c0bc73 -467, 0xb8f05799 -468, 0xf429a8c8 -469, 0xb92cee42 -470, 0xf8864ec -471, 0x62f2518a -472, 0x3a7bfa3e -473, 0x12e56e6d -474, 0xd7a18313 -475, 0x41fa3899 -476, 0xa09c4956 -477, 0xebcfd94a -478, 0xc485f90b -479, 0x4391ce40 -480, 0x742a3333 -481, 0xc932f9e5 -482, 0x75c6c263 -483, 0x80937f0 -484, 0xcf21833c -485, 0x16027520 -486, 0xd42e669f -487, 0xb0f01fb7 -488, 0xb35896f1 -489, 0x763737a9 -490, 0x1bb20209 -491, 0x3551f189 -492, 0x56bc2602 -493, 0xb6eacf4 -494, 0x42ec4d11 -495, 0x245cc68 -496, 0xc27ac43b -497, 0x9d903466 -498, 0xce3f0c05 -499, 0xb708c31c -500, 0xc0fd37eb -501, 0x95938b2c -502, 0xf20175a7 -503, 0x4a86ee9b -504, 0xbe039a58 -505, 0xd41cabe7 -506, 0x83bc99ba -507, 0x761d60e1 -508, 0x7737cc2e -509, 0x2b82fc4b -510, 0x375aa401 -511, 0xfe9597a0 -512, 0x5543806a -513, 0x44f31238 -514, 0x7df31538 -515, 0x74cfa770 -516, 0x8755d881 -517, 0x1fde665a -518, 0xda8bf315 -519, 0x973d8e95 -520, 0x72205228 -521, 0x8fe59717 -522, 0x7bb90b34 -523, 0xef6ed945 -524, 0x16fd4a38 -525, 0x5db44de1 -526, 0xf09f93b3 -527, 0xe84824cc -528, 0x945bb50e -529, 0xd0be4aa5 -530, 0x47c277c2 -531, 0xd3800c28 -532, 0xac1c33ec -533, 0xd3dacce -534, 0x811c8387 -535, 0x6761b36 -536, 0x70d3882f -537, 0xd6e62e3a -538, 0xea25daa2 -539, 0xb07f39d1 -540, 0x391d89d7 -541, 0x84b6fb5e -542, 0x3dda3fca -543, 0x229e80a4 -544, 0x3d94a4b7 -545, 0x5d3d576a -546, 0xad7818a0 -547, 0xce23b03a -548, 0x7aa2079c -549, 0x9a6be555 -550, 0x83f3b34a -551, 0x1848f9d9 -552, 0xd8fefc1c -553, 0x48e6ce48 -554, 0x52e55750 -555, 0xf41a71cf -556, 0xba08e259 -557, 0xfaf06a15 -558, 0xeaaac0fb -559, 0x34f90098 -560, 0xb1dfffbb -561, 0x718daec2 -562, 0xab4dda21 -563, 0xd27cc1ee -564, 0x4aafbc4c -565, 0x356dfb4f -566, 0x83fcdfd6 -567, 0x8f0bcde0 -568, 0x4363f844 -569, 0xadc0f4d5 -570, 0x3bde994e -571, 0x3884d452 -572, 0x21876b4a -573, 0x9c985398 -574, 0xca55a226 -575, 0x3a88c583 -576, 0x916dc33c -577, 0x8f67d1d7 -578, 0x3b26a667 -579, 0xe4ddeb4b -580, 0x1a9d8c33 -581, 0x81c9b74f -582, 0x9ed1e9df -583, 0x6e61aecf -584, 0x95e95a5d -585, 0x68864ff5 -586, 0xb8fa5b9 -587, 0x72b1b3de -588, 0x5e18a86b -589, 0xd7f2337d -590, 0xd70e0925 -591, 0xb573a4c1 -592, 0xc77b3f8a -593, 0x389b20de -594, 0x16cf6afb -595, 0xa39bd275 -596, 0xf491cf01 -597, 0x6f88a802 -598, 0x8510af05 -599, 0xe7cd549a -600, 0x8603179a -601, 0xef43f191 -602, 0xf9b64c60 -603, 0xb00254a7 -604, 0xd7c06a2d -605, 0x17e9380b -606, 0x529e727b -607, 0xaaa8fe0a -608, 0xfb64ff4c -609, 0xcd75af26 -610, 0xfb717c87 -611, 0xa0789899 -612, 0x10391ec9 -613, 0x7e9b40b3 -614, 0x18536554 -615, 0x728c05f7 -616, 0x787dca98 -617, 0xad948d1 -618, 0x44c18def -619, 0x3303f2ec -620, 0xa15acb5 -621, 0xb58d38f4 -622, 0xfe041ef8 -623, 0xd151a956 -624, 0x7b9168e8 -625, 0x5ebeca06 -626, 0x90fe95df -627, 0xf76875aa -628, 0xb2e0d664 -629, 0x2e3253b7 -630, 0x68e34469 -631, 0x1f0c2d89 -632, 0x13a34ac2 -633, 0x5ffeb841 -634, 0xe381e91c -635, 0xb8549a92 -636, 0x3f35cf1 -637, 0xda0f9dcb -638, 0xdd9828a6 -639, 0xe1428f29 -640, 0xf4db80b5 -641, 0xdac30af5 -642, 0x1af1dd17 -643, 0x9a540254 -644, 0xcab68a38 -645, 0x33560361 -646, 0x2fbf3886 -647, 0xbc785923 -648, 0xe081cd10 -649, 0x8e473356 -650, 0xd102c357 -651, 0xeea4fe48 -652, 0x248d3453 -653, 0x1da79ac -654, 0x815a65ff -655, 0x27693e76 -656, 0xb7d5af40 -657, 0x6d245d30 -658, 0x9e06fa8f -659, 0xb0570dcb -660, 0x469f0005 -661, 0x3e0ca132 -662, 0xd89bbf3 -663, 0xd61ccd47 -664, 0x6383878 -665, 0x62b5956 -666, 0x4dc83675 -667, 0x93fd8492 -668, 0x5a0091f5 -669, 0xc9f9bc3 -670, 0xa26e7778 -671, 0xeabf2d01 -672, 0xe612dc06 -673, 0x85d89ff9 -674, 0xd1763179 -675, 0xcb88947b -676, 0x9e8757a5 -677, 0xe100e85c -678, 0x904166eb -679, 0x4996243d -680, 0x4038e1cb -681, 0x2be2c63d -682, 0x77017e81 -683, 0x3b1f556b -684, 0x1c785c77 -685, 0x6869b8bd -686, 0xe1217ed4 -687, 0x4012ab2f -688, 0xc06c0d8e -689, 0x2122eb68 -690, 0xad1783fd -691, 0x5f0c80e3 -692, 0x828f7efa -693, 0x29328399 -694, 0xeadf1087 -695, 0x85dc0037 -696, 0x9691ef26 -697, 0xc0947a53 -698, 0x2a178d2a -699, 0x2a2c7e8f -700, 0x90378380 -701, 0xaad8d326 -702, 0x9cf1c3c8 -703, 0x84eccd44 -704, 0x79e61808 -705, 0x8b3f454e -706, 0x209e6e1 -707, 0x51f88378 -708, 0xc210226f -709, 0xd982adb5 -710, 0x55d44a31 -711, 0x9817d443 -712, 0xa328c626 -713, 0x13455966 -714, 0xb8f681d3 -715, 0x2a3c713b -716, 0xc186959b -717, 0x814a74b0 -718, 0xed7bc90 -719, 0xa88d3d6d -720, 0x88a9f561 -721, 0x73aa1c0a -722, 0xdfeff404 -723, 0xec037e4b -724, 0xa5c209f0 -725, 0xb3a223b4 -726, 0x24ce3709 -727, 0x3184c790 -728, 0xa1398c62 -729, 0x2f92034e -730, 0xbb37a79a -731, 0x605287b4 -732, 0x8faa772c -733, 0x6ce56c1d -734, 0xc035fb4c -735, 0x7cf5b316 -736, 0x6502645 -737, 0xa283d810 -738, 0x778bc2f1 -739, 0xfdf99313 -740, 0x1f513265 -741, 0xbd3837e2 -742, 0x9b84a9a -743, 0x2139ce91 -744, 0x61a8e890 -745, 0xf9ff12db -746, 0xb43d2ea7 -747, 0x88532e61 -748, 0x175a6655 -749, 0x7a6c4f72 -750, 0x6dafc1b7 -751, 0x449b1459 -752, 0x514f654f -753, 0x9a6731e2 -754, 0x8632da43 -755, 0xc81b0422 -756, 0x81fe9005 -757, 0x15b79618 -758, 0xb5fa629f -759, 0x987a474f -760, 0x1c74f54e -761, 0xf9743232 -762, 0xec4b55f -763, 0x87d761e5 -764, 0xd1ad78b7 -765, 0x453d9350 -766, 0xc7a7d85 -767, 0xb2576ff5 -768, 0xcdde49b7 -769, 0x8e1f763e -770, 0x1338583e -771, 0xfd65b9dc -772, 0x4f19c4f4 -773, 0x3a52d73d -774, 0xd3509c4c -775, 0xda24fe31 -776, 0xe2de56ba -777, 0x2db5e540 -778, 0x23172734 -779, 0x4db572f -780, 0xeb941718 -781, 0x84c2649a -782, 0x3b1e5b6a -783, 0x4c9c61b9 -784, 0x3bccd11 -785, 0xb4d7b78e -786, 0x48580ae5 -787, 0xd273ab68 -788, 0x25c11615 -789, 0x470b53f6 -790, 0x329c2068 -791, 0x1693721b -792, 0xf8c9aacf -793, 0x4c3d5693 -794, 0xd778284e -795, 0xae1cb24f -796, 0x3c11d1b3 -797, 0xddd2b0c0 -798, 0x90269fa7 -799, 0x5666e0a2 -800, 0xf9f195a4 -801, 0x61d78eb2 -802, 0xada5a7c0 -803, 0xaa272fbe -804, 0xba3bae2f -805, 0xd0b70fc2 -806, 0x529f32b -807, 0xda7a3e21 -808, 0x9a776a20 -809, 0xb21f9635 -810, 0xb3acc14e -811, 0xac55f56 -812, 0x29dccf41 -813, 0x32dabdb3 -814, 0xaa032f58 -815, 0xfa406af4 -816, 0xce3c415d -817, 0xb44fb4d9 -818, 0x32248d1c -819, 0x680c6440 -820, 0xae2337b -821, 0x294cb597 -822, 0x5bca48fe -823, 0xaef19f40 -824, 0xad60406 -825, 0x4781f090 -826, 0xfd691ffc -827, 0xb6568268 -828, 0xa56c72cb -829, 0xf8a9e0fc -830, 0x9af4fd02 -831, 0x2cd30932 -832, 0x776cefd7 -833, 0xe31f476e -834, 0x6d94a437 -835, 0xb3cab598 -836, 0xf582d13f -837, 0x3bf8759d -838, 0xc3777dc -839, 0x5e425ea8 -840, 0x1c7ff4ed -841, 0x1c2e97d1 -842, 0xc062d2b4 -843, 0x46dc80e0 -844, 0xbcdb47e6 -845, 0x32282fe0 -846, 0xaba89063 -847, 0x5e94e9bb -848, 0x3e667f78 -849, 0xea6eb21a -850, 0xe56e54e8 -851, 0xa0383510 -852, 0x6768fe2b -853, 0xb53ac3e0 -854, 0x779569a0 -855, 0xeca83c6a -856, 0x24db4d2d -857, 0x4585f696 -858, 0xf84748b2 -859, 0xf6a4dd5b -860, 0x31fb524d -861, 0x67ab39fe -862, 0x5882a899 -863, 0x9a05fcf6 -864, 0x712b5674 -865, 0xe8c6958f -866, 0x4b448bb3 -867, 0x530b9abf -868, 0xb491f491 -869, 0x98352c62 -870, 0x2d0a50e3 -871, 0xeb4384da -872, 0x36246f07 -873, 0xcbc5c1a -874, 0xae24031d -875, 0x44d11ed6 -876, 0xf07f1608 -877, 0xf296aadd -878, 0x3bcfe3be -879, 0x8fa1e7df -880, 0xfd317a6e -881, 0xe4975c44 -882, 0x15205892 -883, 0xa762d4df -884, 0xf1167365 -885, 0x6811cc00 -886, 0x8315f23 -887, 0xe045b4b1 -888, 0xa8496414 -889, 0xbed313ae -890, 0xcdae3ddb -891, 0xa9c22c9 -892, 0x275fab1a -893, 0xedd65fa -894, 0x4c188229 -895, 0x63a83e58 -896, 0x18aa9207 -897, 0xa41f2e78 -898, 0xd9f63653 -899, 0xbe2be73b -900, 0xa3364d39 -901, 0x896d5428 -902, 0xc737539e -903, 0x745a78c6 -904, 0xf0b2b042 -905, 0x510773b4 -906, 0x92ad8e37 -907, 0x27f2f8c4 -908, 0x23704cc8 -909, 0x3d95a77f -910, 0xf08587a4 -911, 0xbd696a25 -912, 0x948924f3 -913, 0x8cddb634 -914, 0xcd2a4910 -915, 0x8e0e300e -916, 0x83815a9b -917, 0x67383510 -918, 0x3c18f0d0 -919, 0xc7a7bccc -920, 0x7cc2d3a2 -921, 0x52eb2eeb -922, 0xe4a257e5 -923, 0xec76160e -924, 0x63f9ad68 -925, 0x36d0bbbf -926, 0x957bc4e4 -927, 0xc9ed90ff -928, 0x4cb6059d -929, 0x2f86eca1 -930, 0x3e3665a3 -931, 0x9b7eb6f4 -932, 0x492e7e18 -933, 0xa098aa51 -934, 0x7eb568b2 -935, 0x3fd639ba -936, 0x7bebcf1 -937, 0x99c844ad -938, 0x43cb5ec7 -939, 0x8dfbbef5 -940, 0x5be413ff -941, 0xd93b976d -942, 0xc1c7a86d -943, 0x1f0e93d0 -944, 0x498204a2 -945, 0xe8fe832a -946, 0x2236bd7 -947, 0x89953769 -948, 0x2acc3491 -949, 0x2c4f22c6 -950, 0xd7996277 -951, 0x3bcdc349 -952, 0xfc286630 -953, 0x5f8909fd -954, 0x242677c0 -955, 0x4cb34104 -956, 0xa6ff8100 -957, 0x39ea47ec -958, 0x9bd54140 -959, 0x7502ffe8 -960, 0x7ebef8ae -961, 0x1ed8abe4 -962, 0xfaba8450 -963, 0xc197b65f -964, 0x19431455 -965, 0xe229c176 -966, 0xeb2967da -967, 0xe0c5dc05 -968, 0xa84e3227 -969, 0x10dd9e0f -970, 0xbdb70b02 -971, 0xce24808a -972, 0x423edab8 -973, 0x194caf71 -974, 0x144f150d -975, 0xf811c2d2 -976, 0xc224ee85 -977, 0x2b217a5b -978, 0xf78a5a79 -979, 0x6554a4b1 -980, 0x769582df -981, 0xf4b2cf93 -982, 0x89648483 -983, 0xb3283a3e -984, 0x82b895db -985, 0x79388ef0 -986, 0x54bc42a6 -987, 0xc4dd39d9 -988, 0x45b33b7d -989, 0x8703b2c1 -990, 0x1cc94806 -991, 0xe0f43e49 -992, 0xcaa7b6bc -993, 0x4f88e9af -994, 0x1477cce5 -995, 0x347dd115 -996, 0x36e335fa -997, 0xb93c9a31 -998, 0xaac3a175 -999, 0x68a19647 diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/mt19937-testset-2.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/mt19937-testset-2.csv deleted file mode 100644 index cdb8e47..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/mt19937-testset-2.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0x0 -0, 0x7ab4ea94 -1, 0x9b561119 -2, 0x4957d02e -3, 0x7dd3fdc2 -4, 0x5affe54 -5, 0x5a01741c -6, 0x8b9e8c1f -7, 0xda5bf11a -8, 0x509226 -9, 0x64e2ea17 -10, 0x82c6dab5 -11, 0xe4302515 -12, 0x8198b873 -13, 0xc3ec9a82 -14, 0x829dff28 -15, 0x5278e44f -16, 0x994a7d2c -17, 0xf1c89398 -18, 0xaf2fddec -19, 0x22abc6ee -20, 0x963dbd43 -21, 0xc29edffb -22, 0x41c1ce07 -23, 0x9c90034d -24, 0x1f17a796 -25, 0x3833caa8 -26, 0xb8795528 -27, 0xebc595a2 -28, 0xf8f5b5dd -29, 0xc2881f72 -30, 0x18e5d3f0 -31, 0x9b19ac7a -32, 0xb9992436 -33, 0xc00052b3 -34, 0xb63f4475 -35, 0x962642d9 -36, 0x63506c10 -37, 0x2be6b127 -38, 0x569bdbc6 -39, 0x7f185e01 -40, 0xebb55f53 -41, 0x1c30198c -42, 0x7c8d75c6 -43, 0xd3f2186b -44, 0xaca5b9b1 -45, 0xbc49ff45 -46, 0xc4a802af -47, 0x2cecd86f -48, 0x8e0da529 -49, 0x1f22b00e -50, 0x4559ea80 -51, 0x60f587d8 -52, 0x7c7460e9 -53, 0x67be0a4a -54, 0x987a0183 -55, 0x7bd30f1 -56, 0xab18c4ac -57, 0xffdbfb64 -58, 0x9ea917f9 -59, 0x1239dab7 -60, 0x38efabeb -61, 0x5da91888 -62, 0x8f49ed62 -63, 0x83f60b1e -64, 0x5950a3fc -65, 0xd8911104 -66, 0x19e8859e -67, 0x1a4d89ec -68, 0x968ca180 -69, 0x9e1b6da3 -70, 0x3d99c2c -71, 0x55f76289 -72, 0x8fa28b9e -73, 0x9fe01d33 -74, 0xdade4e38 -75, 0x1ea04290 -76, 0xa7263313 -77, 0xaafc762e -78, 0x460476d6 -79, 0x31226e12 -80, 0x451d3f05 -81, 0xd0d2764b -82, 0xd06e1ab3 -83, 0x1394e3f4 -84, 0x2fc04ea3 -85, 0x5b8401c -86, 0xebd6c929 -87, 0xe881687c -88, 0x94bdd66a -89, 0xabf85983 -90, 0x223ad12d -91, 0x2aaeeaa3 -92, 0x1f704934 -93, 0x2db2efb6 -94, 0xf49b8dfb -95, 0x5bdbbb9d -96, 0xba0cd0db -97, 0x4ec4674e -98, 0xad0129e -99, 0x7a66129b -100, 0x50d12c5e -101, 0x85b1d335 -102, 0x3efda58a -103, 0xecd886fb -104, 0x8ecadd3d -105, 0x60ebac0f -106, 0x5e10fe79 -107, 0xa84f7e5d -108, 0x43931288 -109, 0xfacf448 -110, 0x4ee01997 -111, 0xcdc0a651 -112, 0x33c87037 -113, 0x8b50fc03 -114, 0xf52aad34 -115, 0xda6cd856 -116, 0x7585bea0 -117, 0xe947c762 -118, 0x4ddff5d8 -119, 0xe0e79b3b -120, 0xb804cf09 -121, 0x84765c44 -122, 0x3ff666b4 -123, 0xe31621ad -124, 0x816f2236 -125, 0x228176bc -126, 0xfdc14904 -127, 0x635f5077 -128, 0x6981a817 -129, 0xfd9a0300 -130, 0xd3fa8a24 -131, 0xd67c1a77 -132, 0x903fe97a -133, 0xf7c4a4d5 -134, 0x109f2058 -135, 0x48ab87fe -136, 0xfd6f1928 -137, 0x707e9452 -138, 0xf327db9e -139, 0x7b80d76d -140, 0xfb6ba193 -141, 0x454a1ad0 -142, 0xe20b51e -143, 0xb774d085 -144, 0x6b1ed574 -145, 0xb1e77de4 -146, 0xe2a83b37 -147, 0x33d3176f -148, 0x2f0ca0fc -149, 0x17f51e2 -150, 0x7c1fbf55 -151, 0xf09e9cd0 -152, 0xe3d9bacd -153, 0x4244db0a -154, 0x876c09fc -155, 0x9db4fc2f -156, 0xd3771d60 -157, 0x25fc6a75 -158, 0xb309915c -159, 0xc50ee027 -160, 0xaa5b7b38 -161, 0x4c650ded -162, 0x1acb2879 -163, 0x50db5887 -164, 0x90054847 -165, 0xfef23e5b -166, 0x2dd7b7d5 -167, 0x990b8c2e -168, 0x6001a601 -169, 0xb5d314c4 -170, 0xfbfb7bf9 -171, 0x1aba997d -172, 0x814e7304 -173, 0x989d956a -174, 0x86d5a29c -175, 0x70a9fa08 -176, 0xc4ccba87 -177, 0x7e9cb366 -178, 0xee18eb0a -179, 0x44f5be58 -180, 0x91d4af2d -181, 0x5ab6e593 -182, 0x9fd6bb4d -183, 0x85894ce -184, 0x728a2401 -185, 0xf006f6d4 -186, 0xd782741e -187, 0x842cd5bd -188, 0xfb5883aa -189, 0x7e5a471 -190, 0x83ff6965 -191, 0xc9675c6b -192, 0xb6ced3c7 -193, 0x3de6425b -194, 0x25e14db4 -195, 0x69ca3dec -196, 0x81342d13 -197, 0xd7cd8417 -198, 0x88d15e69 -199, 0xefba17c9 -200, 0x43d595e6 -201, 0x89d4cf25 -202, 0x7cae9b9b -203, 0x2242c621 -204, 0x27fc3598 -205, 0x467b1d84 -206, 0xe84d4622 -207, 0xa26bf980 -208, 0x80411010 -209, 0xe2c2bfea -210, 0xbc6ca25a -211, 0x3ddb592a -212, 0xdd46eb9e -213, 0xdfe8f657 -214, 0x2cedc974 -215, 0xf0dc546b -216, 0xd46be68f -217, 0x26d8a5aa -218, 0x76e96ba3 -219, 0x7d5b5353 -220, 0xf532237c -221, 0x6478b79 -222, 0x9b81a5e5 -223, 0x5fc68e5c -224, 0x68436e70 -225, 0x2a0043f9 -226, 0x108d523c -227, 0x7a4c32a3 -228, 0x9c84c742 -229, 0x6f813dae -230, 0xfcc5bbcc -231, 0x215b6f3a -232, 0x84cb321d -233, 0x7913a248 -234, 0xb1e6b585 -235, 0x49376b31 -236, 0x1dc896b0 -237, 0x347051ad -238, 0x5524c042 -239, 0xda0eef9d -240, 0xf2e73342 -241, 0xbeee2f9d -242, 0x7c702874 -243, 0x9eb3bd34 -244, 0x97b09700 -245, 0xcdbab1d4 -246, 0x4a2f6ed1 -247, 0x2047bda5 -248, 0x3ecc7005 -249, 0x8d0d5e67 -250, 0x40876fb5 -251, 0xb5fd2187 -252, 0xe915d8af -253, 0x9a2351c7 -254, 0xccc658ae -255, 0xebb1eddc -256, 0xc4a83671 -257, 0xffb2548f -258, 0xe4fe387a -259, 0x477aaab4 -260, 0x8475a4e4 -261, 0xf8823e46 -262, 0xe4130f71 -263, 0xbdb54482 -264, 0x98fe0462 -265, 0xf36b27b8 -266, 0xed7733da -267, 0x5f428afc -268, 0x43a3a21a -269, 0xf8370b55 -270, 0xfade1de1 -271, 0xd9a038ea -272, 0x3c69af23 -273, 0x24df7dd0 -274, 0xf66d9353 -275, 0x71d811be -276, 0xcc4d024b -277, 0xb8c30bf0 -278, 0x4198509d -279, 0x8b37ba36 -280, 0xa41ae29a -281, 0x8cf7799e -282, 0x5cd0136a -283, 0xa11324ef -284, 0x2f8b6d4b -285, 0x3657cf17 -286, 0x35b6873f -287, 0xee6e5bd7 -288, 0xbeeaa98 -289, 0x9ad3c581 -290, 0xe2376c3f -291, 0x738027cc -292, 0x536ac839 -293, 0xf066227 -294, 0x6c9cb0f9 -295, 0x84082ae6 -296, 0xab38ae9d -297, 0x493eade9 -298, 0xcb630b3a -299, 0x64d44250 -300, 0xe5efb557 -301, 0xea2424d9 -302, 0x11a690ba -303, 0x30a48ae4 -304, 0x58987e53 -305, 0x94ec6076 -306, 0x5d3308fa -307, 0xf1635ebb -308, 0x56a5ab90 -309, 0x2b2f2ee4 -310, 0x6f9e6483 -311, 0x8b93e327 -312, 0xa7ce140b -313, 0x4c8aa42 -314, 0x7657bb3f -315, 0xf250fd75 -316, 0x1edfcb0f -317, 0xdb42ace3 -318, 0xf8147e16 -319, 0xd1992bd -320, 0x64bb14d1 -321, 0x423e724d -322, 0x7b172f7c -323, 0x17171696 -324, 0x4acaf83b -325, 0x7a83527e -326, 0xfc980c60 -327, 0xc8b56bb -328, 0x2453f77f -329, 0x85ad1bf9 -330, 0x62a85dfe -331, 0x48238c4d -332, 0xbb3ec1eb -333, 0x4c1c039c -334, 0x1f37f571 -335, 0x98aecb63 -336, 0xc3b3ddd6 -337, 0xd22dad4 -338, 0xe49671a3 -339, 0xe3baf945 -340, 0xb9e21680 -341, 0xda562856 -342, 0xe8b88ce4 -343, 0x86f88de2 -344, 0x986faf76 -345, 0x6f0025c3 -346, 0x3fe21234 -347, 0xd8d3f729 -348, 0xc2d11c6f -349, 0xd4f9e8f -350, 0xf61a0aa -351, 0xc48bb313 -352, 0xe944e940 -353, 0xf1801b2e -354, 0x253590be -355, 0x981f069d -356, 0x891454d8 -357, 0xa4f824ad -358, 0x6dd2cc48 -359, 0x3018827e -360, 0x3fb329e6 -361, 0x65276517 -362, 0x8d2c0dd2 -363, 0xc965b48e -364, 0x85d14d90 -365, 0x5a51623c -366, 0xa9573d6a -367, 0x82d00edf -368, 0x5ed7ce07 -369, 0x1d946abc -370, 0x24fa567b -371, 0x83ef5ecc -372, 0x9001724a -373, 0xc4fe48f3 -374, 0x1e07c25c -375, 0xf4d5e65e -376, 0xb734f6e9 -377, 0x327a2df8 -378, 0x766d59b7 -379, 0x625e6b61 -380, 0xe82f32d7 -381, 0x1566c638 -382, 0x2e815871 -383, 0x606514aa -384, 0x36b7386e -385, 0xcaa8ce08 -386, 0xb453fe9c -387, 0x48574e23 -388, 0x71f0da06 -389, 0xa8a79463 -390, 0x6b590210 -391, 0x86e989db -392, 0x42899f4f -393, 0x7a654ef9 -394, 0x4c4fe932 -395, 0x77b2fd10 -396, 0xb6b4565c -397, 0xa2e537a3 -398, 0xef5a3dca -399, 0x41235ea8 -400, 0x95c90541 -401, 0x50ad32c4 -402, 0xc1b8e0a4 -403, 0x498e9aab -404, 0xffc965f1 -405, 0x72633485 -406, 0x3a731aef -407, 0x7cfddd0b -408, 0xb04d4129 -409, 0x184fc28e -410, 0x424369b0 -411, 0xf9ae13a1 -412, 0xaf357c8d -413, 0x7a19228e -414, 0xb46de2a8 -415, 0xeff2ac76 -416, 0xa6c9357b -417, 0x614f19c1 -418, 0x8ee1a53f -419, 0xbe1257b1 -420, 0xf72651fe -421, 0xd347c298 -422, 0x96dd2f23 -423, 0x5bb1d63e -424, 0x32e10887 -425, 0x36a144da -426, 0x9d70e791 -427, 0x5e535a25 -428, 0x214253da -429, 0x2e43dd40 -430, 0xfc0413f4 -431, 0x1f5ea409 -432, 0x1754c126 -433, 0xcdbeebbe -434, 0x1fb44a14 -435, 0xaec7926 -436, 0xb9d9a1e -437, 0x9e4a6577 -438, 0x8b1f04c5 -439, 0x19854e8a -440, 0x531080cd -441, 0xc0cbd73 -442, 0x20399d77 -443, 0x7d8e9ed5 -444, 0x66177598 -445, 0x4d18a5c2 -446, 0xe08ebf58 -447, 0xb1f9c87b -448, 0x66bedb10 -449, 0x26670d21 -450, 0x7a7892da -451, 0x69b69d86 -452, 0xd04f1d1c -453, 0xaf469625 -454, 0x7946b813 -455, 0x1ee596bd -456, 0x7f365d85 -457, 0x795b662b -458, 0x194ad02d -459, 0x5a9649b5 -460, 0x6085e278 -461, 0x2cf54550 -462, 0x9c77ea0b -463, 0x3c6ff8b -464, 0x2141cd34 -465, 0xb90bc671 -466, 0x35037c4b -467, 0xd04c0d76 -468, 0xc75bff8 -469, 0x8f52003b -470, 0xfad3d031 -471, 0x667024bc -472, 0xcb04ea36 -473, 0x3e03d587 -474, 0x2644d3a0 -475, 0xa8fe99ba -476, 0x2b9a55fc -477, 0x45c4d44a -478, 0xd059881 -479, 0xe07fcd20 -480, 0x4e22046c -481, 0x7c2cbf81 -482, 0xbf7f23de -483, 0x69d924c3 -484, 0xe53cd01 -485, 0x3879017c -486, 0xa590e558 -487, 0x263bc076 -488, 0x245465b1 -489, 0x449212c6 -490, 0x249dcb29 -491, 0x703d42d7 -492, 0x140eb9ec -493, 0xc86c5741 -494, 0x7992aa5b -495, 0xb8b76a91 -496, 0x771dac3d -497, 0x4ecd81e3 -498, 0xe5ac30b3 -499, 0xf4d7a5a6 -500, 0xac24b97 -501, 0x63494d78 -502, 0x627ffa89 -503, 0xfa4f330 -504, 0x8098a1aa -505, 0xcc0c61dc -506, 0x34749fa0 -507, 0x7f217822 -508, 0x418d6f15 -509, 0xa4b6e51e -510, 0x1036de68 -511, 0x1436986e -512, 0x44df961d -513, 0x368e4651 -514, 0x6a9e5d8c -515, 0x27d1597e -516, 0xa1926c62 -517, 0x8d1f2b55 -518, 0x5797eb42 -519, 0xa90f9e81 -520, 0x57547b10 -521, 0xdbbcca8e -522, 0x9edd2d86 -523, 0xbb0a7527 -524, 0x7662380c -525, 0xe7c98590 -526, 0x950fbf3f -527, 0xdc2b76b3 -528, 0x8a945102 -529, 0x3f0a1a85 -530, 0xeb215834 -531, 0xc59f2802 -532, 0xe2a4610 -533, 0x8b5a8665 -534, 0x8b2d9933 -535, 0x40a4f0bc -536, 0xaab5bc67 -537, 0x1442a69e -538, 0xdf531193 -539, 0x698d3db4 -540, 0x2d40324e -541, 0x1a25feb2 -542, 0xe8cc898f -543, 0xf12e98f5 -544, 0xc03ad34c -545, 0xf62fceff -546, 0xdd827e1e -547, 0x7d8ccb3b -548, 0xab2d6bc1 -549, 0xc323a124 -550, 0x8184a19a -551, 0xc3c4e934 -552, 0x5487424d -553, 0xd6a81a44 -554, 0x90a8689d -555, 0xe69c4c67 -556, 0xbdae02dd -557, 0x72a18a79 -558, 0x2a88e907 -559, 0x31cf4b5d -560, 0xb157772f -561, 0x206ba601 -562, 0x18529232 -563, 0x7dac90d8 -564, 0x3a5f8a09 -565, 0x9f4b64a3 -566, 0xae373af9 -567, 0x1d79447c -568, 0x2a23684b -569, 0x41fb7ba4 -570, 0x55e4bb9e -571, 0xd7619d3e -572, 0xc04e4dd8 -573, 0x8418d516 -574, 0x2b2ca585 -575, 0xfa8eedf -576, 0x5bafd977 -577, 0x31974fb0 -578, 0x9eb6697b -579, 0xc8be22f5 -580, 0x173b126a -581, 0x8809becf -582, 0x3e41efe1 -583, 0x3d6cbbb8 -584, 0x278c81d8 -585, 0xa6f08434 -586, 0xa0e6601d -587, 0x2fccd88d -588, 0x3cbc8beb -589, 0x5f65d864 -590, 0xa1ff8ddf -591, 0x609dcb7c -592, 0x4a4e1663 -593, 0xeae5531 -594, 0x962a7c85 -595, 0x1e110607 -596, 0x8c5db5d0 -597, 0xc7f2337e -598, 0xc94fcc9c -599, 0xe7f62629 -600, 0x6c9aa9f8 -601, 0x2e27fe0e -602, 0x4d0dae12 -603, 0x9eecf588 -604, 0x977ba3f2 -605, 0xed0a51af -606, 0x3f3ec633 -607, 0xc174b2ec -608, 0x590be8a9 -609, 0x4f630d18 -610, 0xf579e989 -611, 0xe2a55584 -612, 0xee11edcd -613, 0x150a4833 -614, 0xc0a0535c -615, 0xb5e00993 -616, 0xb6435700 -617, 0xa98dbff -618, 0x315716af -619, 0x94395776 -620, 0x6cbd48d9 -621, 0xab17f8fc -622, 0xa794ffb7 -623, 0x6b55e231 -624, 0x89ff5783 -625, 0x431dcb26 -626, 0x270f9bf8 -627, 0x2af1b8d0 -628, 0x881745ed -629, 0x17e1be4e -630, 0x132a0ec4 -631, 0x5712df17 -632, 0x2dfb3334 -633, 0xf5a35519 -634, 0xcafbdac6 -635, 0x73b6189d -636, 0x10107cac -637, 0x18c1045e -638, 0xbc19bbad -639, 0x8b4f05ac -640, 0x5830d038 -641, 0x468cd98a -642, 0x5b83a201 -643, 0xf0ccdd9c -644, 0xcb20c4bd -645, 0x1ff186c9 -646, 0xcdddb47f -647, 0x5c65ce6 -648, 0xb748c580 -649, 0x23b6f262 -650, 0xe2ba8e5c -651, 0x9a164a03 -652, 0x62d3322e -653, 0x918d8b43 -654, 0x45c8b49d -655, 0xce172c6e -656, 0x23febc6 -657, 0x84fdc5b7 -658, 0xe7d1fd82 -659, 0xf0ddf3a6 -660, 0x87050436 -661, 0x13d46375 -662, 0x5b191c78 -663, 0x2cbd99c0 -664, 0x7686c7f -665, 0xcff56c84 -666, 0x7f9b4486 -667, 0xefc997fe -668, 0x984d4588 -669, 0xfa44f36a -670, 0x7a5276c1 -671, 0xcfde6176 -672, 0xcacf7b1d -673, 0xcffae9a7 -674, 0xe98848d5 -675, 0xd4346001 -676, 0xa2196cac -677, 0x217f07dc -678, 0x42d5bef -679, 0x6f2e8838 -680, 0x4677a24 -681, 0x4ad9cd54 -682, 0x43df42af -683, 0x2dde417 -684, 0xaef5acb1 -685, 0xf377f4b3 -686, 0x7d870d40 -687, 0xe53df1c2 -688, 0xaeb5be50 -689, 0x7c92eac0 -690, 0x4f00838c -691, 0x91e05e84 -692, 0x23856c80 -693, 0xc4266fa6 -694, 0x912fddb -695, 0x34d42d22 -696, 0x6c02ffa -697, 0xe47d093 -698, 0x183c55b3 -699, 0xc161d142 -700, 0x3d43ff5f -701, 0xc944a36 -702, 0x27bb9fc6 -703, 0x75c91080 -704, 0x2460d0dc -705, 0xd2174558 -706, 0x68062dbf -707, 0x778e5c6e -708, 0xa4dc9a -709, 0x7a191e69 -710, 0xc084b2ba -711, 0xbb391d2 -712, 0x88849be -713, 0x69c02714 -714, 0x69d4a389 -715, 0x8f51854d -716, 0xaf10bb82 -717, 0x4d5d1c77 -718, 0x53b53109 -719, 0xa0a92aa0 -720, 0x83ecb757 -721, 0x5325752a -722, 0x114e466e -723, 0x4b3f2780 -724, 0xa7a6a39c -725, 0x5e723357 -726, 0xa6b8be9b -727, 0x157c32ff -728, 0x8b898012 -729, 0xd7ff2b1e -730, 0x69cd8444 -731, 0x6ad8030c -732, 0xa08a49ec -733, 0xfbc055d3 -734, 0xedf17e46 -735, 0xc9526200 -736, 0x3849b88a -737, 0x2746860b -738, 0xae13d0c1 -739, 0x4f15154f -740, 0xd65c3975 -741, 0x6a377278 -742, 0x54d501f7 -743, 0x81a054ea -744, 0x143592ba -745, 0x97714ad6 -746, 0x4f9926d9 -747, 0x4f7ac56d -748, 0xe87ca939 -749, 0x58b76f6f -750, 0x60901ad8 -751, 0x3e401bb6 -752, 0xa058468e -753, 0xc0bb14f6 -754, 0x2cb8f02a -755, 0x7c2cf756 -756, 0x34c31de5 -757, 0x9b243e83 -758, 0xa5c85ab4 -759, 0x2741e3b3 -760, 0x1249000e -761, 0x3fc4e72b -762, 0xa3e038a2 -763, 0x952dd92c -764, 0x2b821966 -765, 0xfa81b365 -766, 0x530919b9 -767, 0x4486d66f -768, 0xccf4f3c1 -769, 0xa8bddd1d -770, 0xcc295eb9 -771, 0xfccbe42f -772, 0x38bacd8d -773, 0x2261854f -774, 0x56068c62 -775, 0x9bdaeb8 -776, 0x555fa5b6 -777, 0x20fe615e -778, 0x49fb23d3 -779, 0xd093bad6 -780, 0x54919e86 -781, 0x7373eb24 -782, 0xfbaa7a98 -783, 0x5f62fb39 -784, 0xe03bc9ec -785, 0xa5074d41 -786, 0xa1cefb1 -787, 0x13912d74 -788, 0xf6421b8 -789, 0xfcb48812 -790, 0x8f1db50b -791, 0xc1654b87 -792, 0x948b43c2 -793, 0xf503ef77 -794, 0x117d891d -795, 0x5493ffa -796, 0x171313b1 -797, 0xa4b62e1e -798, 0x77454ea6 -799, 0xbea0aff0 -800, 0x13c36389 -801, 0xe3b60bac -802, 0xa176bed3 -803, 0x2863d428 -804, 0xe2314f46 -805, 0xa85cd3d4 -806, 0x7866e57 -807, 0x8f03f5bc -808, 0x239ae -809, 0x46f279fb -810, 0xcca00559 -811, 0xaa07a104 -812, 0x89123d08 -813, 0x2e6856ba -814, 0x43a9780d -815, 0x676cff25 -816, 0x6744b87d -817, 0xee260d4f -818, 0xb98d8b77 -819, 0x9b0ca455 -820, 0x659f6fe -821, 0x28d20d1c -822, 0x601f2657 -823, 0xdec3073e -824, 0x61263863 -825, 0x1a13435a -826, 0x27497d1e -827, 0x17a8458e -828, 0xdddc407d -829, 0x4bb2e8ac -830, 0x16b2aedb -831, 0x77ccd696 -832, 0x9d108fcd -833, 0x25ad233e -834, 0xaa9bc370 -835, 0xa873ab50 -836, 0xaf19c9d9 -837, 0x696e1e6b -838, 0x1fdc4bf4 -839, 0x4c2ebc81 -840, 0xde4929ed -841, 0xf4d0c10c -842, 0xb6595b76 -843, 0x75cbb1b3 -844, 0xbcb6de49 -845, 0xe23157fd -846, 0x5e596078 -847, 0xa69b0d29 -848, 0x2118a41 -849, 0x7088c16 -850, 0xc75e1e1 -851, 0x6a4af2d6 -852, 0xf19c6521 -853, 0xaff7b3b1 -854, 0x615295c7 -855, 0xbda3a8d7 -856, 0x5b5ca72e -857, 0xdad9d80f -858, 0xfa81c084 -859, 0xf4703fa -860, 0x3ca54540 -861, 0xa8961d51 -862, 0x53d1ecc2 -863, 0x808d83b6 -864, 0x68e8c48e -865, 0x89be2039 -866, 0x9088ea11 -867, 0xb8665d12 -868, 0x91272f9 -869, 0x53dddff2 -870, 0xb7a54ab -871, 0xd2b645ca -872, 0x99fb8590 -873, 0x5315c8e -874, 0x2a913806 -875, 0x7f15eb2b -876, 0xa7f1cc5d -877, 0xbb2ee836 -878, 0xd9fafd60 -879, 0x17448d6f -880, 0x999ec436 -881, 0x482ec606 -882, 0x9b403c0e -883, 0x569eb51b -884, 0xb275d1a6 -885, 0xadd29c31 -886, 0xb7ebdb15 -887, 0xdfef3662 -888, 0x51aba6db -889, 0x6d41946d -890, 0x77bf8896 -891, 0xcafa6fab -892, 0x976ab40f -893, 0x49a6d86b -894, 0x56639e55 -895, 0x9945b996 -896, 0x81459b50 -897, 0xbce97542 -898, 0xe397c9c9 -899, 0x247a5955 -900, 0xb72b1573 -901, 0x86306f86 -902, 0x34f65dc5 -903, 0x909360c0 -904, 0xf3f696ef -905, 0xcb9faae5 -906, 0x93daecd9 -907, 0xde1af7af -908, 0x43a1f2d -909, 0x6d75cde5 -910, 0x9e412b6 -911, 0x5673fed -912, 0x16bb511a -913, 0x35ef4cca -914, 0x4e615aca -915, 0x5cdaf47a -916, 0x26676047 -917, 0x8c199325 -918, 0x2adf0cb9 -919, 0x84f2e6fd -920, 0x5e627f64 -921, 0xb7cee354 -922, 0x542ab4a6 -923, 0xe59cd83b -924, 0x89cc3f10 -925, 0x92b0f5f -926, 0xc1328370 -927, 0x8208d9f7 -928, 0x68eb00cf -929, 0xfadd4ac4 -930, 0x2517784f -931, 0x4042b99 -932, 0x75ce0230 -933, 0x97c5a1b4 -934, 0x1a97f709 -935, 0x4c62781e -936, 0xf530a83 -937, 0x75776413 -938, 0x321c7240 -939, 0x6afe4e36 -940, 0xad00a2b4 -941, 0xbc05477d -942, 0xb0911e80 -943, 0x9935b87d -944, 0xd535eec5 -945, 0x149af45e -946, 0x786934b0 -947, 0xbc13cdac -948, 0x208bfa2e -949, 0xcf4b39cc -950, 0x6ac6c172 -951, 0xbfa9a37 -952, 0x42d28db6 -953, 0x2bf1ea63 -954, 0xbed6e677 -955, 0x50325d27 -956, 0xa79d3b8b -957, 0x52448bb1 -958, 0xefaad1bd -959, 0x833a2e54 -960, 0xd9de549a -961, 0x9f59672f -962, 0x9d5f5f16 -963, 0x1c914489 -964, 0xc08fa058 -965, 0xb188698b -966, 0xdc4672b5 -967, 0x594f720e -968, 0x56ed428f -969, 0x9b0898af -970, 0x8a64d3d5 -971, 0x773308d6 -972, 0x84d62098 -973, 0x46da7cf9 -974, 0x1114eae7 -975, 0xf9f2a092 -976, 0x5363a28 -977, 0xf2db7b3a -978, 0x102c71a9 -979, 0xe8e76aaf -980, 0x77a97b3b -981, 0x77b090d -982, 0x1099620e -983, 0xa6daaae6 -984, 0x86ff4713 -985, 0xc0ef85b8 -986, 0xf621d409 -987, 0xfd1561e2 -988, 0x4bcc687d -989, 0x596f760 -990, 0x7c8819f9 -991, 0x8cb865b8 -992, 0xadea115a -993, 0x56609348 -994, 0xb321ac14 -995, 0x1bac7db2 -996, 0x5fe6ee2 -997, 0xe9bfe072 -998, 0x15549e74 -999, 0xad8c191b diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/pcg64-testset-1.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/pcg64-testset-1.csv deleted file mode 100644 index 0c8271f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/pcg64-testset-1.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0xdeadbeaf -0, 0x60d24054e17a0698 -1, 0xd5e79d89856e4f12 -2, 0xd254972fe64bd782 -3, 0xf1e3072a53c72571 -4, 0xd7c1d7393d4115c9 -5, 0x77b75928b763e1e2 -6, 0xee6dee05190f7909 -7, 0x15f7b1c51d7fa319 -8, 0x27e44105f26ac2d7 -9, 0xcc0d88b29e5b415 -10, 0xe07b1a90c685e361 -11, 0xd2e430240de95e38 -12, 0x3260bca9a24ca9da -13, 0x9b3cf2e92385adb7 -14, 0x30b5514548271976 -15, 0xa3a1fa16c124faf9 -16, 0xf53e17e918e45bb6 -17, 0x26f19faaeb833bfc -18, 0x95e1d605730cce1b -19, 0xa7b520c5c093c1aa -20, 0x4b68c010c9b106a3 -21, 0x25e19fe91df703f0 -22, 0x898364bb0bf593cb -23, 0x5bd6ab7dbaa125db -24, 0xd1fe47f25152045c -25, 0x3bb11919addf2409 -26, 0x26a8cb7b3f54af8 -27, 0xe6a27ee11200aa24 -28, 0x7cb585ab01e22000 -29, 0x78e60028676d2ef3 -30, 0x5c32535e5a899528 -31, 0x83e8b6f8c4a46fb3 -32, 0xe56ef7668a161246 -33, 0x36dcbc15aeb73055 -34, 0x5ea247f0bd188acb -35, 0x438b547b84601a80 -36, 0x8acda2a1273e9e3d -37, 0x2b05e30a4b40c24c -38, 0xfd87236bd13af032 -39, 0x471df211d8d985ef -40, 0x18e8a5609a793292 -41, 0x46f0951fab6dc4e3 -42, 0x6c199c4e700f6795 -43, 0xf04aa16bfb7d22cb -44, 0xd763d269fbaffc89 -45, 0x9991930cefbe5c2b -46, 0xb2a11b953f824c96 -47, 0x63fd9f52172c44b0 -48, 0x183bdad907b1d848 -49, 0xe17953cddb931c52 -50, 0x515cf16726ec205a -51, 0x88c327605150711a -52, 0xc7090dd79cbc8dc3 -53, 0xcb487cedeb00a350 -54, 0xc8abf254d87b657 -55, 0xd43cc4cbfb493d1a -56, 0x8705452e5d9ed1e -57, 0xcecd11446769cf43 -58, 0xde72156c8d65bc69 -59, 0x796a8f0f47d52ee8 -60, 0xb4c0da443917d6c3 -61, 0xe07ad7568a8e3dc3 -62, 0xc24a8da39ce6dc21 -63, 0x92b21ea80a8556eb -64, 0x572f21e531edf3af -65, 0x9b917ed56bbed198 -66, 0xe65fd8ddc5ab3d7d -67, 0xf55a80a8ec84fa18 -68, 0x18fc22e1a5227b61 -69, 0x72305dc7eeaa79d3 -70, 0x47ce58a36e7592cf -71, 0x14c6374340c0f7cc -72, 0x6f98273d4eb5a2c -73, 0x59a8702c46fe8f8a -74, 0xb67cbd8113cfe57f -75, 0xaa03c5db5f5b7690 -76, 0x3fb0f77ea4568013 -77, 0x756530990398b26e -78, 0x4c1952b2a3a6a343 -79, 0x1da15c5383074582 -80, 0xb405b21c81c274f7 -81, 0xbe664677a16788b -82, 0x9d2e37550bcee656 -83, 0x8b4589f0d9defe02 -84, 0x2935f018ee06a59 -85, 0x3834bf88be97ed11 -86, 0xa610d049cea79b6d -87, 0xd49ffc0d09a59ea9 -88, 0x4073365b76567adf -89, 0x499eefb9bb7513e2 -90, 0x74a743ee6b0138a9 -91, 0x3bf0880f2d947594 -92, 0x555d1c0498600a99 -93, 0x923b32a88ef2ffa4 -94, 0x7325411065fbedea -95, 0x9f4129ff8b79d300 -96, 0xab2b0a9b8a3785dc -97, 0x11734bdfba3a1713 -98, 0xc8333398841ba585 -99, 0xee2409cc234e6742 -100, 0xf6638e700872ecd2 -101, 0x10875300c13cd284 -102, 0x27a9bbed7c15b2d3 -103, 0x3c87f8fef31ce9bd -104, 0x92be263cd0914a95 -105, 0xa7b0f11bc742307e -106, 0x4a56f788cc1c1a3c -107, 0x4a130fa32257a48b -108, 0x5d4d9eda16e90286 -109, 0x7cc2af564844bedc -110, 0x2532867bfe7cda1a -111, 0xb1c504676611fd17 -112, 0xce8e86cfb4189aee -113, 0x99685898980d1970 -114, 0x8c3b67db23bcf1e -115, 0x73e14c93905b135f -116, 0xf0271b64ac2bd4d3 -117, 0xf4beba82f3ec1b2d -118, 0x1cdbf3ee9f210af -119, 0x2e938557c09c3ea6 -120, 0x2d314ccfa6ffd81d -121, 0x31ad47079950ade4 -122, 0x342b27547b900872 -123, 0x171b0e20b9ef1a76 -124, 0xdf10ce6318b03654 -125, 0x1d625df4aa718897 -126, 0x8712715a9f6e02ec -127, 0xb4a072da725bca3b -128, 0x19d346cb7734bd42 -129, 0xfd4281d311cb2958 -130, 0x58274c9519fc8789 -131, 0x4cacf29d885fd544 -132, 0x784b14d1c2523b80 -133, 0x2d25242131bb2373 -134, 0xcd2a5e43a7d9abf9 -135, 0x15eda3806e650ecb -136, 0xdaac5e277d764d96 -137, 0xdc5a5dd59aaa94e0 -138, 0x40d00237a46d5999 -139, 0x6205dd35a692743f -140, 0xbbd8236740361f09 -141, 0x1625c9f4e7288bf9 -142, 0xb74f12df1479e3ce -143, 0xb2d72a51b43d7131 -144, 0xf006a324b3707c83 -145, 0x28e8ab4abe7655b8 -146, 0xfb480093ad7ab55 -147, 0x3f8abd0d6ff8d272 -148, 0xc81a94177ac26bb7 -149, 0x3cdc178307751b14 -150, 0x9de84cc2b10ba025 -151, 0x3f8ab5aefcd046e2 -152, 0x43bdb894e1ee83b2 -153, 0xe288a40f3f06ac9d -154, 0xdab62a7d04b4f30f -155, 0x49f4e20295e1a805 -156, 0x3643764805e0edef -157, 0x9449954618b6b -158, 0x6c87e0d4508e0ce0 -159, 0x3a334be688a9dd7b -160, 0xb35c39228776e499 -161, 0xc4118bfff938490e -162, 0x88cbde3dcbb034b2 -163, 0xf91b287793c417c3 -164, 0x42b15f731a59f5b3 -165, 0xffa27104bbe4814d -166, 0x1b6789d138beccde -167, 0x542c2c1440d0ceb9 -168, 0x367294504d18fa0d -169, 0xf918b60e804a1b58 -170, 0xd390964e33a9d0e3 -171, 0x23bb1be7c4030fe8 -172, 0x9731054d039a8afb -173, 0x1a6205026b9d139b -174, 0x2fa13b318254a07e -175, 0x69571de7d8520626 -176, 0x641a13d7c03332b7 -177, 0x76a6237818f7a441 -178, 0x4e77860d0c660d81 -179, 0x4441448a1c1cbdb2 -180, 0xccd7783a042046e5 -181, 0xf620d8e0805e3200 -182, 0x7de02971367fdd0c -183, 0x539c263c5914cab1 -184, 0x9c3b9ba1a87bbf08 -185, 0x6d95baa34cda215f -186, 0x2db3f83ace0bac5f -187, 0x7f5af1da2dc670a4 -188, 0xfcc098d16c891bfb -189, 0x81a33df1d7a5ab12 -190, 0x767b0f863c8e9882 -191, 0x7a92983830de483d -192, 0xfa7598c37a79ac25 -193, 0xb89b3ca42ce03053 -194, 0x457a542b8efed4f7 -195, 0x571b7737fd0eeda7 -196, 0xa0f59e524485c0a -197, 0x82dca766b7901efd -198, 0xa68243caf6a3bd5d -199, 0x1bac981c6c740e5e -200, 0xbcd51bedf9103e44 -201, 0x4e197efd3ae5a7bf -202, 0x523568efd782268b -203, 0x5ec4ef1191fef09 -204, 0xed751ed5e31c9ab -205, 0x44eac24de03e1b29 -206, 0x9237d57c011d3fb3 -207, 0xa8c6da0f7692f235 -208, 0x9f9eb6bc15d6cac7 -209, 0x34bb8e0c93427aad -210, 0x115febd738eaac4a -211, 0xa439991ed139d27a -212, 0x45c7c2633d8710a2 -213, 0x48b7475f3405a3ce -214, 0x80158497c77bd00b -215, 0x935c316a5b1657cb -216, 0x59c5d54440e9695e -217, 0x337c78c5b3d0ede2 -218, 0x8c46bb956b93790d -219, 0xbf1dd03e471d71c5 -220, 0x2d375e90a4bef583 -221, 0xd0365428331b3790 -222, 0xfcd3969ac827ecd4 -223, 0x392fb6c580498410 -224, 0x6d6db4ceab5ea6c0 -225, 0x9bf84f1972e24786 -226, 0x798dfd820959dcc5 -227, 0x2e425095e65e8bfb -228, 0x8c1aa11536b1c9c3 -229, 0xd28e2ef9b12f6f74 -230, 0x86583bc98c8f78d2 -231, 0x489877530e3f93e7 -232, 0xb1d9430631104a15 -233, 0x1814f6098e6263bd -234, 0x8e2658a4e0d4cd53 -235, 0x5afe20e2531cdb2a -236, 0x30d02f7c4755c9bf -237, 0xe1e217cda16ed2d2 -238, 0xccb4913a42e3b791 -239, 0xfff21363ac183226 -240, 0xe788690bbda147a7 -241, 0x76905cf5917bfc6a -242, 0x2a8fa58f7916f52c -243, 0xf903c0cc0357815a -244, 0x15d20f243a4998d2 -245, 0x5b7decee5a86ea44 -246, 0x114f7fc421211185 -247, 0x328eb21715764c50 -248, 0xaffaa3f45c0678fd -249, 0x2579e6ef50378393 -250, 0x7610ab7743c19795 -251, 0xf9923d2bd101b197 -252, 0x57e42e7a62ba7e53 -253, 0x9f1dc217b4f02901 -254, 0x88a9ebd86509b234 -255, 0x867fc926aecc8591 -256, 0xaf22c1bfef04c718 -257, 0x39f701f0313f4288 -258, 0x6171ad397e6faab2 -259, 0x239bb5b9abdec4fc -260, 0xd9a591e25dd01c6e -261, 0x826dc4a75b628e49 -262, 0xf112b152c408f47 -263, 0x6843a06110f86c0 -264, 0x965e56a7185c1332 -265, 0x8d84492edbc71710 -266, 0xeee8ec111cfd1319 -267, 0xf2858e94ad98e458 -268, 0xbc9589fdf5f3a97e -269, 0xaf0ceef3bc375130 -270, 0x48f4aaf13fa75c1e -271, 0x111e9db47bee758f -272, 0xea3171df130164ba -273, 0x2a7bbe30bf827ab6 -274, 0xc516c3fdbf758c35 -275, 0xec55097754b04be5 -276, 0x374a997d52b6d3e6 -277, 0x487df5456085ffbc -278, 0x528883b84df8eafe -279, 0x805f77ab5ba26f86 -280, 0x8eb81477dc04f213 -281, 0x471ea08ec6794d72 -282, 0x69d3667ecc4d2176 -283, 0x98b7b6e295548a66 -284, 0x3877713c173f8f2 -285, 0xa00542570d0e8de3 -286, 0xf534b1bfa4033e50 -287, 0x7e1fedeac8bf6b26 -288, 0x8043f37c89628af4 -289, 0x1dd7039ec295e86d -290, 0xce9c05b763a40cc4 -291, 0x246926481e61028f -292, 0xb7cb0f1babf5893b -293, 0xefe6b777f37fc63e -294, 0xebbcabb4cb35cdcb -295, 0x39fa63cd711eeea9 -296, 0xad5d3ba7aaf30c8d -297, 0x8e9e78fe46021990 -298, 0xc7eaef6e7d5a3c62 -299, 0xefccdd5495d3f386 -300, 0x2179557ee8cfc76a -301, 0x88a77f621f0885ce -302, 0xafda62674543d90c -303, 0xb8e6fbe2e13e56c0 -304, 0x8bfbbe26a14f9b1a -305, 0x1404f59f5851f8c3 -306, 0x1140c53a0489566d -307, 0x3edf2d138b5c3f1d -308, 0x75d6bb275d817dc -309, 0x8e660ae27107664e -310, 0x7a8021038ee303e1 -311, 0x2042ef5eefa9079f -312, 0xe3e7b90bbf6d457a -313, 0xf3f819d2bb9405b -314, 0x522e42155cae0c10 -315, 0xf5bfbb975b40e233 -316, 0x2cf82b614dd95cfa -317, 0x183ef4a96bc40e55 -318, 0x9f6e351c5ba4e752 -319, 0x37c1110683c90846 -320, 0x1d89b7a996d8a977 -321, 0x18a444f77c7cb4d9 -322, 0xd0a8a971b78dc893 -323, 0x860232fb9e6543f1 -324, 0x60b6097f51002555 -325, 0xca1e5214123e3894 -326, 0xe03fe695c95f99bb -327, 0x2c7c6779d5f03622 -328, 0xafeeee42f63055d1 -329, 0x670dde905515936a -330, 0x9a922f42b59fb094 -331, 0xddb5ff49af5a651a -332, 0xe61b04c9e58ebbf8 -333, 0x4e459dcf272e7fc4 -334, 0xd549e92c16adceeb -335, 0x7a17dba1299d4a9c -336, 0x825d756109f2b585 -337, 0xba142e61a9cb203e -338, 0xc2a19f00e9c04a30 -339, 0x2d0f8140d23d0652 -340, 0x8b866d4d4d6caaf4 -341, 0x4f11d90dd91f8217 -342, 0xf6efc37373b9e0d -343, 0x248493d6cd6a4736 -344, 0xd12b6ae74a951a3e -345, 0x56e34722070b70a7 -346, 0x22d3f201cc9fa0eb -347, 0xbfdcc320008291b7 -348, 0x1a7a6922e9204fbd -349, 0x831421e0c4945ae4 -350, 0x66316feddddf0e11 -351, 0xa8c86a1517456554 -352, 0x14a9049ad989e335 -353, 0x837022259f141ecd -354, 0xcb71793a06c261f7 -355, 0x4aeefc07ebe09a79 -356, 0x8982f15aa3b6594b -357, 0x67bccfa7ed9b0d5b -358, 0xb377463b523e9dec -359, 0x53d3d594870fecb7 -360, 0xa5274b1caec5a60a -361, 0xd6316d0cb643db39 -362, 0xabc1a9b536de88ce -363, 0xed2fdb1383d2a077 -364, 0x12319c6feb97221b -365, 0x7e0f6cd40ef47403 -366, 0x86135c84fe26dbf8 -367, 0xc96622d3fbbee19b -368, 0xe3989d8d8511573f -369, 0x42cc365554d1fdc7 -370, 0x4c1a1eb8bbce8b4f -371, 0xfc4e30e7ef2034c1 -372, 0xc490444317a91e76 -373, 0x7ccdf469ff5dc81c -374, 0xf5a0da4110cc09d7 -375, 0x505227baf34c0fb5 -376, 0xbe58737e8a35cc88 -377, 0xd449bee91b3e8c41 -378, 0x3e590e23299d0e6 -379, 0x291a7d9e0a64caf7 -380, 0xdc6fafbdfebd2293 -381, 0x8223f1e259fe8a65 -382, 0x6186fbc9efd9e3df -383, 0xfda39b07e4007ffb -384, 0xfc19aea98574dc02 -385, 0xd0e10d354fcacd8c -386, 0xc9619916544a55a5 -387, 0xd454d50a8c8558cd -388, 0xcd94a246712d91e -389, 0x76a771f5d1231cce -390, 0xdd20cb2b7b370ee5 -391, 0xa6f4f50feca57c49 -392, 0x78c8fb431f17ab9c -393, 0x1b692b79a59b43cc -394, 0x4c45045d287da7e6 -395, 0x522132e18bf43928 -396, 0x25c458983138b41c -397, 0x2a1fb426ef229796 -398, 0x74dc324c74e5dd3d -399, 0x6df75e3eb6eb5374 -400, 0xb63f2f4f9ca25b61 -401, 0xac72286112ee54d6 -402, 0x5a966f3d0a6863c4 -403, 0x8d7046bc64a46fc2 -404, 0xa7b740fd6e3087eb -405, 0xcdbcbe0340cfcdf5 -406, 0xcb632613bf312b65 -407, 0xa91b3f2c2aac238b -408, 0xa06deb3f5ae555a3 -409, 0x29d72e1f8db69 -410, 0x2d004bae09728ea6 -411, 0xc6eee5dce0736cc1 -412, 0xa7493145500ff60f -413, 0xc4d68c4aa18ab93c -414, 0x8210c29e79d48d7f -415, 0xd0999d7889ecbef6 -416, 0x6e3bd61e66e93566 -417, 0xe6cc13d47d7d7b1f -418, 0x3d6f181f42e03979 -419, 0xbed4e14fd867604a -420, 0xbe511c84067bd86d -421, 0x49a876d89e697d38 -422, 0xc04c3dde8f889c98 -423, 0xaf293eeab0f53e3f -424, 0x9f6291dd65732cd6 -425, 0xd7811ac01de78c01 -426, 0xe385cf0261d50ec2 -427, 0x5a64134b3542bbf -428, 0xf9d1302bc6f13a68 -429, 0x5d2aabbea37d8c31 -430, 0xd9842e99a5192970 -431, 0x713eadc4cd30e837 -432, 0xb7b002fc72abb413 -433, 0x276cfeea526af1cf -434, 0x8519fe79b633a0ce -435, 0x2f0e87363705a3e2 -436, 0x9adbac0be3c371e7 -437, 0xf3f44ba899a6173c -438, 0x782d6c29618fde2b -439, 0x7f61062acec408f -440, 0x6e79cd836359258f -441, 0x5c8e9b138df5785a -442, 0xa54359c9f39a9a84 -443, 0xeec3f033135084b0 -444, 0x883ee717787a535c -445, 0x9a2422b513a73b00 -446, 0x2dd4beddcdd64a58 -447, 0x90c8a13202239c7b -448, 0x85b352ab759646d9 -449, 0x139f5cb2e46c53aa -450, 0xe1d3ba6c721c66d1 -451, 0xaa66e0edc4b60a98 -452, 0x3521275c75be29b6 -453, 0x490a5190b3edfa5d -454, 0xd2abcdd2ccb2f14e -455, 0x9d9be8bef4a5857d -456, 0xde19676f13ef7755 -457, 0xdac2fee2e42615f3 -458, 0xf4239801cb02f2ab -459, 0xaa8bf923ed91875c -460, 0x61d18a1940e4c7c0 -461, 0x1eb6aa3d5f077a6d -462, 0xee7374c063bf29d8 -463, 0x2f0a59e34d76268d -464, 0xc92e80e17d1eb3e9 -465, 0xafd05b3ec3d2ca72 -466, 0x28a61ad8d6c497b8 -467, 0xa7094d6834ad7d47 -468, 0x57d80ea9eccbb4f -469, 0xb047e0fee6cdaf16 -470, 0x44f41b5eb48c00bb -471, 0xd6dc8e1eb9c8c9ba -472, 0x47adfd2c638c7849 -473, 0x365d63db7d526c68 -474, 0xc21cda439016135d -475, 0x14d10c3f0f98863c -476, 0xa93e56f74e037602 -477, 0x3b4e9c8915bdc9 -478, 0xb46f5ae155e54aa2 -479, 0x8e470d21ce1943e1 -480, 0x60b96301b5ba2e8d -481, 0x1b473a41d381f9ff -482, 0xabcf5a8e3269e73f -483, 0xd410f6e94fb21fa1 -484, 0x65d1a47eebf87e5e -485, 0x48eaa201c61cb843 -486, 0x212c1abc2499bfc5 -487, 0x4255ad8377d2d8d -488, 0x44caeef472010612 -489, 0xffae764524f572f2 -490, 0x78d374d20c9ee550 -491, 0x6e003206c0511cee -492, 0x7998a159145bfb82 -493, 0x921239650bda1d4d -494, 0xae05025509bcfdc5 -495, 0xc6430c980be407b4 -496, 0x78524f1744b153f1 -497, 0x84089e6f468181fe -498, 0x8d0d21d7dfb6c254 -499, 0x90bad90502a33603 -500, 0x3072a403cbd16315 -501, 0xdfadddf3f1c040c2 -502, 0x22f0b0639d9ff975 -503, 0xb49e48a4cad0765b -504, 0x95a0a04f8239709d -505, 0x56e147a24a4c481f -506, 0xacf16ef61dea4c7e -507, 0x424040afd2700de6 -508, 0xc67e8096a3c717a9 -509, 0x39f164181dd0a399 -510, 0x2449cedc1d62198c -511, 0x7a53df11a1f1a61c -512, 0x5596f1d4a3badae3 -513, 0x38ed4c822072b3d0 -514, 0xf07ef346b3fd730a -515, 0xfd349c35c3ed51fd -516, 0x2f15c9c7890f8f32 -517, 0x3b470df52b173c29 -518, 0xd31bfc8981281af7 -519, 0xbbcc9bdf561215bb -520, 0x5782fffea326574f -521, 0xb0ebdcfcc5e03290 -522, 0x7fd89d93d2b3fbef -523, 0x280ea1865d9ba2 -524, 0xe726959845b2c100 -525, 0xd0361f032cd7dbb1 -526, 0x3c65ec2028b81a22 -527, 0x5221e9b2188920bf -528, 0xeb5ab27c4125ec20 -529, 0x80a32dd48b54f0a4 -530, 0x369b5ced1012bebb -531, 0x582d35d76530bc6f -532, 0x7b50dc9b48e1e37d -533, 0x37fdfe8bbacf8dad -534, 0x7a0cb7e6e93840ea -535, 0xa1132c870be0b2ce -536, 0x9d8ac2c68267cd1a -537, 0x470969b647fa7df4 -538, 0xabcb7d8adf7e2d24 -539, 0xacdebec9bdf9eb1c -540, 0xe30f4cbf7eb6a59 -541, 0x746673836c4df41d -542, 0x75120a6b647bb326 -543, 0x2f4eab556c3f6878 -544, 0xd84651ab05405b7a -545, 0x9e695808b9622284 -546, 0xc93b71e56aa6e1a5 -547, 0x2be7f3be4a7b7050 -548, 0x6497e910b6733241 -549, 0xcf7050dfd08076fc -550, 0x4e3cc156eca183f7 -551, 0xf801a33d9326c265 -552, 0x6aa293c8a47d40e6 -553, 0x28c429755faa6230 -554, 0x82b818651f54e7bb -555, 0xa84d726d7acdbead -556, 0x5cfa535d5774965d -557, 0x4a34b7b1cb48d53 -558, 0x86a7b5bce426de84 -559, 0xfcd2307cecdb7318 -560, 0x16dbaaa71181a038 -561, 0x88e7e8cd261c2547 -562, 0x3c09ba6d1d5ea913 -563, 0x5dd3d643734ee5b6 -564, 0x326d725fe8cbb33 -565, 0x7bcca9ca2da8e784 -566, 0x482dcf6b11d7f9a4 -567, 0x1291b605b4cd3e04 -568, 0x6988181b50e2f4a8 -569, 0x649e3c37131fc292 -570, 0x4eeb67b9e21eba54 -571, 0xc051d39073dec45f -572, 0xc99c52e110270d67 -573, 0xcb813d5d77868add -574, 0x423a5f13573e7ac0 -575, 0x231ac4cc4fe73616 -576, 0x4c22b888a6e600ea -577, 0x8059a6dc7c9e25c6 -578, 0x49f498a5b8ad22de -579, 0xf1e812cc6d1826c8 -580, 0xbbaf60abe8b11e00 -581, 0x1d31d7f4d8be9a6a -582, 0xfeadce70a9a10c14 -583, 0xb47c635bc136996a -584, 0xd88e694c8da030cb -585, 0xc41bbe132aff1364 -586, 0x34249ab18a4b0800 -587, 0xf14b5c825aa736cc -588, 0x2710be6b08df78e -589, 0x2ab56bcc9bf9e740 -590, 0x9b7f6e591b5f648 -591, 0xfb665c3772f34135 -592, 0x628a0a5d2db5d8d5 -593, 0xb3e3f251e61b5259 -594, 0x82310ae33faf1b23 -595, 0x24af8723a65cbd0b -596, 0x671c93282fc4ad97 -597, 0x6cabeaac77270cad -598, 0xef4643fe38b02b7f -599, 0x7b011549d1ac6653 -600, 0xe2af87b9fccfe89 -601, 0x36b71ad67197ac8a -602, 0xdbba55d06f2fd93b -603, 0xf571dbd764b7f7e5 -604, 0x38ea402501cdbd45 -605, 0xb8ab5b5b1bab2913 -606, 0xfab973c4d45f32bd -607, 0x9364f1717c2636b9 -608, 0xfad00f4d983e00fe -609, 0xc90c532a11aef75a -610, 0x64a6eda96e44783c -611, 0x35891f2eb84520be -612, 0x28d216080caed43 -613, 0x129629cc5bd206f6 -614, 0x22c3d39822cbb4b3 -615, 0xf1efbf4cce1eaa2b -616, 0x7070cba12524ed08 -617, 0xa7ed0be9deabf20d -618, 0x8ddb4cd6b454f76b -619, 0xb82814b1db37b63 -620, 0x418e83b36de01876 -621, 0x9a538c7f39c6413 -622, 0xee0cd7abf8a2ecb9 -623, 0xa9222b07e95590f3 -624, 0x6296a415d68341e6 -625, 0x981e0a5a8f811929 -626, 0x4bb372d3b0de283d -627, 0xa9805b5971866e16 -628, 0xaf3b5f5183497657 -629, 0x2152b0fd23c3d9f -630, 0xb730c325b7173180 -631, 0x1e3439d231608c19 -632, 0x1c5ba6031379823c -633, 0x87f5d12d6d365cbc -634, 0xd3bc7f29614bc594 -635, 0x63102214bb391268 -636, 0x482bbd5bba648a44 -637, 0x6a23604690759dc4 -638, 0x4091d41408d3a39e -639, 0x7cd017f922101b15 -640, 0x7ce9004ac5f9231 -641, 0x978bc3d8ec7f7fdf -642, 0x5bd0c4d780580c11 -643, 0x4313c068bb040153 -644, 0x3ab7dab7bc38bf80 -645, 0x3aaf9c187728deea -646, 0x6633a4ce8efb88d9 -647, 0x7263b089878f00fc -648, 0xd0d767e96fe00eb8 -649, 0x184a7c0c01908028 -650, 0x1ebdf41e6f76e186 -651, 0xeb740ee1d0402083 -652, 0xfccf4974edb1c339 -653, 0x16e2707aa28306d -654, 0x1684f0bdb018c3a5 -655, 0x887b6b67b88aa862 -656, 0x923d7810a2bea33a -657, 0x56b3560babef5d6b -658, 0xb39a14614c54b8c6 -659, 0x33e4dc545a509fc8 -660, 0x26e21f84142da9b -661, 0xdd07598125756855 -662, 0x572d49a071d7ae0a -663, 0xba3c7e3baea28760 -664, 0x7ecdb2d714db4b61 -665, 0x1c62b4920e1b2fe2 -666, 0x71bfafb70092834a -667, 0xd710a4228f60d56a -668, 0xeb16277d4ce4e95b -669, 0x968168c90b16d3a1 -670, 0xac3439dfe8ad0062 -671, 0x5a8226f9dd5876ad -672, 0xb843affe917291b0 -673, 0xd76d1e67051f8259 -674, 0xb73a6638cce8ccde -675, 0xa0e6afd3c7295f9 -676, 0xff8857b4bbb5f4c6 -677, 0x99becf78938f0426 -678, 0xfcd17edc1e70f004 -679, 0x6223b8b23f2f50 -680, 0xca875f3e84587b4c -681, 0x7d1e81e589f87fb9 -682, 0x9eb621586aa826fc -683, 0xf46fb9ef5b9c2086 -684, 0x2882c9b7092725f3 -685, 0x5493f099bbedcd02 -686, 0x90c1ec979ffa811d -687, 0x963f765025bcc53 -688, 0x56194e3ec3d9d4e9 -689, 0x7ec4720954cac1f0 -690, 0xfab3145171af7f90 -691, 0x52a0b4e41a13b593 -692, 0x740e2d4d5909d126 -693, 0x98f5339c09c94a28 -694, 0x1700e462fe8dec76 -695, 0x3dbffc2aa4695ac3 -696, 0x5763edacabdfe2a1 -697, 0x7b5b623ce49ef21d -698, 0x30addc66f49860df -699, 0xcc7511a6c31bceda -700, 0x1b25b61ca75db43b -701, 0x416bc4c298e59046 -702, 0x4cd11fe2d74e4649 -703, 0xb54458a9229fc978 -704, 0x8c21a27882b6ca35 -705, 0x57887c8b5e01639b -706, 0xf4e893da996680bb -707, 0x8d601297702c9c0d -708, 0x2a27904a30aa53af -709, 0x497800f6917ea8d0 -710, 0xe96db3340ada9c00 -711, 0xcc23166f14c010ee -712, 0x782690d78fa65ec9 -713, 0xf3e00d74a0878eda -714, 0xa7cbb683decca0a3 -715, 0xdd2e038e683a94aa -716, 0xe2096ff8da896ca5 -717, 0xf7c83400afdabe11 -718, 0x395b8c6f6a4086a4 -719, 0x4a164ec05bee71d4 -720, 0xe87aa5d1ca0462fe -721, 0x8dbc5aed6dff9ceb -722, 0x12120d1e9552707b -723, 0x877dca6889b3e6cd -724, 0xbd65605c01e900fb -725, 0xbd6b82c4157c3115 -726, 0x8b60282732caf78a -727, 0x279fcf5e5de9e57f -728, 0x34b34ebfb6a37eae -729, 0xd258cc1a14e03b7b -730, 0x9a528ba3db4a13fb -731, 0xffa0aea59d057746 -732, 0x27fa7f456cd37c4e -733, 0xe1117a57a6fdce63 -734, 0xdc8fc903970a1551 -735, 0x492dd104f30faf29 -736, 0x110def0959e5652b -737, 0x7f8d1997636fdd15 -738, 0xfb77b05e538a9b59 -739, 0x2e41fa35b4b01fc6 -740, 0xbc35ae69a3374085 -741, 0x192c2a681c2d9b4b -742, 0x12566b8866c189d6 -743, 0x9d88ea785c5185c8 -744, 0x30a621ad5f983c4 -745, 0x8b875efe1206f587 -746, 0x224d25c3af6e3423 -747, 0x7503e976a1ac7bcc -748, 0x3c98aa869e823859 -749, 0x3d8835304b646892 -750, 0xf6353330ff970bc2 -751, 0x8a673f5e2edb8acb -752, 0xf2fdcc53493838b9 -753, 0x85ddcd526236af16 -754, 0x60afb99814c676c5 -755, 0x32a1c2749e281ca8 -756, 0x2367a92ae3bee9ca -757, 0x219fe082703743cc -758, 0x34d8b74dc85182a9 -759, 0xdd04164c72db23f -760, 0xe293ac28fe2671a9 -761, 0x9ca7d169cbda6f45 -762, 0x705c47972b4240ed -763, 0xc10eda9eeb536209 -764, 0xc36ddacd0c94e85d -765, 0x8eb592c27e8cd0d2 -766, 0x3e815991c76e7cc4 -767, 0xac9cfce31acf7580 -768, 0xbf7a4cb31c7aee94 -769, 0x663077444aceecf6 -770, 0xe7f614ff386eb568 -771, 0x79d7a229c66912c0 -772, 0x161ed4311f63e1f3 -773, 0x308a5faeb9982ede -774, 0x7b38ddb9b7efd10 -775, 0x1e103a2589b27ecf -776, 0x67b02baf4259f27e -777, 0x868921c115ea2eee -778, 0x959791912200f71e -779, 0x4dd55f36dec10557 -780, 0xe3464d90080cb99d -781, 0xfb2d4f6accce652f -782, 0x109900a9257d77ba -783, 0x3c4bda8e2c83684c -784, 0xc9ae040fb7f868c6 -785, 0x78098ffe994f4905 -786, 0x7a94c33eca77f0b4 -787, 0xbe6a2a95e9b5c0e8 -788, 0x797d39cf963f4837 -789, 0x8d2e249e4425d06d -790, 0x6ae2c30cd5da06f4 -791, 0x904489de762b179f -792, 0x84713e2dfb591e3b -793, 0x6405a40da3f6f51b -794, 0x976b560d663a2df1 -795, 0xed1c544784ba1e22 -796, 0xca658e995ed9344c -797, 0x2b1c6b8e4db49025 -798, 0x52b1513da528bad -799, 0x3c63406d256d9968 -800, 0x63a31ca3d423f85e -801, 0xb05a81f55789a720 -802, 0xd04412992c476c8e -803, 0x828ec2f77a150a3d -804, 0xee50926671bb60c6 -805, 0x5aa70f93e2df61b4 -806, 0x94d60fa2e8655858 -807, 0x3f5e5b770703cc7d -808, 0xc62dfb2688ca7784 -809, 0xaaf02e1e8ba89fe4 -810, 0x4ab74e0d8c047405 -811, 0x31ee04fbac6fcead -812, 0x1203b78b8228f5af -813, 0x412a70836f9aa71a -814, 0xab51cf98c03f1819 -815, 0x783a3ce9ce137f65 -816, 0x8897085b0a072cf2 -817, 0x685dd9bde8798cb -818, 0x9a1fac7b1705e2c1 -819, 0xf3e9ff98de48e9cb -820, 0x5c2d3eb1a1fbe917 -821, 0x3bda718b6b54d82e -822, 0x29f2dd18f22f0821 -823, 0xb992da1572ac3597 -824, 0xacb69e7aa14b34f7 -825, 0xcd36e3ad14f088d1 -826, 0x6aaacc96a1ec55e8 -827, 0xf8ac593f154fe68f -828, 0x18fc9cbff012339f -829, 0x2f3368ccbbb99899 -830, 0x7cec7d17f37031f7 -831, 0x96e86bfaadcb8fc2 -832, 0x74f9e7ee3d42a752 -833, 0xbd52f6c7d9b0733 -834, 0xa48e6d96bb6ce1c9 -835, 0xaefa058254b82133 -836, 0xb7a19edfd0929107 -837, 0x6160ce9125b26e26 -838, 0x6537dbbde1d2aed -839, 0xc567f9a6bec52dde -840, 0xca29fd3f22443342 -841, 0x7732aa6db6a1c476 -842, 0x8f5a4d7df6b11b3 -843, 0x76649262aa7e31e1 -844, 0x60a13eb125fbc829 -845, 0xc81e4d123dd21ac1 -846, 0x643cbb09bb72f86b -847, 0xf971a98fb25555a6 -848, 0xffa2774c66692d56 -849, 0xcb33c16c50b13ea9 -850, 0xfabf388dffda0e9b -851, 0x55d41ec12ca24b9f -852, 0x91cf693a3467e807 -853, 0x6be2c00b2c31d6dd -854, 0xc5cf513b5251ae28 -855, 0xffc4384212403dec -856, 0x45d4e1865255a69d -857, 0xfb1dcf956972086a -858, 0xcae946a55c4c55b8 -859, 0x7351ac7720e385c1 -860, 0x19aa8ffd86240254 -861, 0x8f515ae78f4040da -862, 0x1e1ed2058de50fce -863, 0x22d006dcdb374243 -864, 0x6e0f0ede7c95b441 -865, 0x70e8aa81b53b4d25 -866, 0x998f309ea41e3814 -867, 0x89ed6598fb66f390 -868, 0xb5997dc3278060df -869, 0xb2a021eac4f7e046 -870, 0x3705b60aa2fd0768 -871, 0xfc415079ab9200e -872, 0xf2871ac4cf45ecc9 -873, 0x24bf758d2246175f -874, 0xac503dd6f8141b3 -875, 0x4e879d12d9f03b3 -876, 0x82034af8cf93b644 -877, 0x59899dd7e478a6c7 -878, 0xae90addb6eb11507 -879, 0x1524ddf76730cdef -880, 0x6fd4afd5456b1c9d -881, 0xcddb9221ea001cbc -882, 0x64ff400bbf2e8604 -883, 0x6dda10549b06ed9b -884, 0xed2c85104c261527 -885, 0xc7e09217d29929a8 -886, 0x56284df611a428b1 -887, 0x1a7608289c0a61 -888, 0x7cb63db15166ff66 -889, 0xc6013c76fcdcdc72 -890, 0x8e5dd566c7a5a676 -891, 0x5a8e8565f40d133b -892, 0xe465973455848c44 -893, 0xf92eecbfe0f3c2c0 -894, 0x7d64155d4dcc5cac -895, 0xf17595706f988dad -896, 0xd590a001a6a19c5c -897, 0x82a164475758db3d -898, 0x6b144993ea1bbe32 -899, 0x22a81a7a6e453779 -900, 0x8e8c298df1a68a73 -901, 0x78056afd6d936b4c -902, 0xaaceef0325faaf62 -903, 0xe78bb7699f82266f -904, 0x523a2d283c5a5166 -905, 0x7076d87088f6c6db -906, 0x6087dd54cff5aeb2 -907, 0x7ef82e62cb851680 -908, 0x4e8bcc8ed84d03d8 -909, 0xd12fa0361df3cfd3 -910, 0xefb89c79f8127297 -911, 0xa9af4e2fbce0b1f8 -912, 0x462136685b70331e -913, 0xe9e74c93da699b77 -914, 0x9ec69215fb11d0c3 -915, 0xc10f229939e3e111 -916, 0x3f67fa79e41d2374 -917, 0xd5e7c1a9a7185162 -918, 0xa1dcce9ec91492fe -919, 0xd4e61f0727b5d21b -920, 0xdf6cdce46551800a -921, 0xa3f256ce906982d3 -922, 0x209742a6b9ffc27 -923, 0x4006c96958526a57 -924, 0x9606aebc75a1967e -925, 0x91b9f42fb64189df -926, 0xb27119defcb938bc -927, 0x128cc7a84ba05597 -928, 0x6c3df613c62d0d30 -929, 0x3adf69d48b629ec7 -930, 0xda42ee493837b128 -931, 0xb8e770480e760bb5 -932, 0x9feb55d57c99c626 -933, 0x29812d80afdae3ed -934, 0xae4222a64276a8c7 -935, 0xe3897212a5b4ed53 -936, 0x98bedfd13886e669 -937, 0xca858675d7fc0d0e -938, 0x28a359f665354234 -939, 0xfac2ccabe4128b35 -940, 0x61373cc5d11ca180 -941, 0x7007605a4512a87a -942, 0xe71f8eade7b30b3d -943, 0x3a9e77f9b99bd04d -944, 0x70d3e42488098866 -945, 0xd30fc159c7cd4d99 -946, 0xe4d3f6600d2e2d6f -947, 0x1088324dfa955c25 -948, 0x516437acd4764623 -949, 0x38a31abe50d0aa03 -950, 0x72e1054e9dc02ba -951, 0xe6971dd664d1a2e2 -952, 0xf6698cb095d3b702 -953, 0xad995a5a8c19bd92 -954, 0x34e53c6936f656e6 -955, 0x10de240bc07c757a -956, 0x3e3b9a6861c2bd1c -957, 0x9c0b0b97d3712ec9 -958, 0xabf1505a75043aed -959, 0xbdf93d3de3274179 -960, 0x28fa5904d3f62c28 -961, 0xc3b97b39ef6c5133 -962, 0xf2b2219225b8679d -963, 0x8be4ec0f930c0aaa -964, 0x47de5a56aa590643 -965, 0xb6f871b304129856 -966, 0x80a61c06233ab0f9 -967, 0x3ce6c3af8101b055 -968, 0x85b911708274e7d1 -969, 0x4cab65d093a488b7 -970, 0xaabc4b10661fe28e -971, 0x35b16dea64474a68 -972, 0x1d6eb5b093361223 -973, 0xc39107b92f0fe1fb -974, 0x1d09e048073c4841 -975, 0xc6a02f43aca8cb2f -976, 0xaf6613dbc7da909c -977, 0x5ac2a40c230aa756 -978, 0x33afb5e7c01c39a5 -979, 0xc7b0b20ea8b7d0ef -980, 0xdf7306c8ccb1bbea -981, 0x9710efc0c188b2a0 -982, 0xd6303eadb72c873e -983, 0xa38ca609b118f35a -984, 0x8390613065c6e535 -985, 0xdf9a0106757e431f -986, 0x8bcf77039788e143 -987, 0x6026806a986b378e -988, 0x482ff3b1394cb1dc -989, 0x2a27d0ccac9ede9c -990, 0x53c77f26e271b3ab -991, 0x1ba004cf276cf3f -992, 0xc135b0517dc81f7c -993, 0x5d137838db75e442 -994, 0x3fe505f93d1dbdd7 -995, 0x351654ae7d598294 -996, 0x173f8d182af9d84d -997, 0xf97dfcd164fe11c5 -998, 0xcda423e5ad43b290 -999, 0xa5cb380b8de10d10 diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/pcg64-testset-2.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/pcg64-testset-2.csv deleted file mode 100644 index 7c13e31..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/pcg64-testset-2.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0x0 -0, 0xa30febcfd9c2825f -1, 0x4510bdf882d9d721 -2, 0xa7d3da94ecde8b8 -3, 0x43b27b61342f01d -4, 0xd0327a782cde513b -5, 0xe9aa5979a6401c4e -6, 0x9b4c7b7180edb27f -7, 0xbac0495ff8829a45 -8, 0x8b2b01e7a1dc7fbf -9, 0xef60e8078f56bfed -10, 0xd0dbc74d4700374c -11, 0xb37868abbe90b0 -12, 0xdb7ed8bf64e6f5f0 -13, 0x89910738de7951f -14, 0xbacab307c3cfd379 -15, 0x2cf7c449d8b927a6 -16, 0xdcf94b3a16db7f0e -17, 0x8a9d33d905a8792e -18, 0x4cb9eb2014951238 -19, 0x6c353acf7b26d6f1 -20, 0x73ff53d673aa30c -21, 0x1fd10760015eca68 -22, 0xabae0aa9021eeba8 -23, 0xa5ae363a868ee2bb -24, 0x9d89e0f041de6631 -25, 0x6238b133c3991a65 -26, 0xff49267d75fef51a -27, 0xfb180656ce13c53f -28, 0xaf7fadf36128712d -29, 0xa6847fc6f339c63e -30, 0xb03e0b80d71ea5bc -31, 0x63905abcb43969af -32, 0x2295af3ee00a3bba -33, 0xb8b375b994330415 -34, 0x867d9ef1d8716a3b -35, 0x4f6c02f5601b4e18 -36, 0x7c5fb4c16c470d18 -37, 0xe3b57986b804b343 -38, 0xef1d79d212aca692 -39, 0x5b98774c8806209c -40, 0x924fc76bac38a5d1 -41, 0x5266084c412ddeed -42, 0x98240bf9b831d6a3 -43, 0x5681599e81219442 -44, 0x6441248fc2ba92bc -45, 0xe3e9051a540349ea -46, 0x3a2700034390baa3 -47, 0x9f893155b6d402bc -48, 0x158207910c6d8aef -49, 0xd5282ab7608c2cbc -50, 0xc97f4651669dee4f -51, 0x3d4750d95103ed60 -52, 0xe0614542caac1f04 -53, 0xefe5092144cfc6c -54, 0x560bc486abd7e9ae -55, 0x2678b71392daa4b8 -56, 0x734970d3dc2ba416 -57, 0xcbdbe849e51e4aaf -58, 0x3b0b5e28b491556c -59, 0xd51449ac45abd88 -60, 0x6790b59991f1b7ab -61, 0x32d1c039ff2415bc -62, 0x173b9772f24f72e0 -63, 0x9490a9ca9f883b1b -64, 0x4c775989e6214222 -65, 0xac07db37e6ee6114 -66, 0x331371b2e3f10aee -67, 0xf12e5326c21c28e4 -68, 0x5d77dc280c70d614 -69, 0x1b01bd17a2f281ec -70, 0xa10d3b5882938487 -71, 0xed5a0033c394ae8f -72, 0x70bc8ea568ea44b4 -73, 0xf4600ae77965e730 -74, 0x7ff92c0b321ce233 -75, 0x6cdbc87d0cc1d670 -76, 0x9ec64f0cf2000eb1 -77, 0xfebea50259800f68 -78, 0xf2edf9019a8fd343 -79, 0x75c584ac042e5468 -80, 0xc1fa8481d5bf9a1d -81, 0x7f57180168514ac2 -82, 0x878100716b94f81e -83, 0xc929406e3af17fd2 -84, 0x6a26e2c013e4bf4d -85, 0xbc071d8848280955 -86, 0xb60d75abbfd1bdac -87, 0xee9b76afeca9fa69 -88, 0x1d6c399d2f452810 -89, 0xbaa0bc1621e25c83 -90, 0xed6ba792f8671ba5 -91, 0xf7ca02c2ab11d8d7 -92, 0x3c3cadadf0b21e3 -93, 0xdd1784571e864e9c -94, 0xfb2f992015157509 -95, 0xf50bb9f0d3ced743 -96, 0x261565f75c3e185f -97, 0xf8fe33b284513e60 -98, 0xe3d2d10b5e024664 -99, 0xd28717566242cf35 -100, 0x7ae07d133ac5b789 -101, 0x3b7ccaaa53ac338e -102, 0xcd480bace4871650 -103, 0xec6c78f923c080e9 -104, 0x44211d0ff8919d59 -105, 0x89f79af76d2a45fe -106, 0x71583fd8a837548b -107, 0xee57269c261511f5 -108, 0xa5ee8f3b128c5d1 -109, 0xbb64c20ed0765a17 -110, 0x9d4790ab2eeaf7e4 -111, 0x742f3db806d9e98 -112, 0xb81ec97aed6a0d1b -113, 0x41808b34f6a8a23 -114, 0xc20913af175dfd4d -115, 0x834427db263b22bb -116, 0xedd9c632e611828a -117, 0x10eac8524496f571 -118, 0xd76091b97eb00ab7 -119, 0x111298ae9fe95666 -120, 0x5824b2e2a6719c43 -121, 0x6e280ec539e934ed -122, 0xf74fd832df90083e -123, 0x8fee6d0f241c2e97 -124, 0x4244f331c2f19c3c -125, 0x3dde75a845cce97f -126, 0xe35bb8e635a9915b -127, 0x39d2943037f7932e -128, 0x1fe2d134201d0970 -129, 0x49d00b63c749b804 -130, 0x960c2942cd4e4e04 -131, 0x8dd8e009dbc0435f -132, 0xcf493495c3a055cd -133, 0x8f7b5a1c0f9fe9cd -134, 0x49d5f90374641a25 -135, 0x69b3932073d3524c -136, 0xd170603e7de84ee2 -137, 0xa062ba3ed3539948 -138, 0xf5861cc5b5d56c82 -139, 0x5e914998a30c7e76 -140, 0x8d77f2ad1503c0f1 -141, 0x980b6a9e3b4181fb -142, 0xd9299cd50694c084 -143, 0x253dc0f8f1cec4c5 -144, 0x68110fb9d1b3e695 -145, 0xe8f3120d0aabc461 -146, 0xb066e7df0dfb042 -147, 0xd29ce0f797e6b60b -148, 0x6a569bb7ca33bd42 -149, 0xd46e08b2dc2385f8 -150, 0x28c61d11d055767 -151, 0x5d73aa3d1a2bb725 -152, 0x1421191e1c14829a -153, 0xa711bfb6423df35e -154, 0x461af97a86308006 -155, 0xb3e1018ff3519367 -156, 0xf19cf866a268ef2b -157, 0x207715eac9199d1d -158, 0xdd621c410975b78c -159, 0xf390aea68683610 -160, 0x617a2d107a0047d9 -161, 0x6e05ac416e5bebf0 -162, 0x7d253e70506c1bed -163, 0xf9f96f4a7dd53810 -164, 0xc693b29cb1573f73 -165, 0x4f1146b0020ea544 -166, 0x45140608fbd40579 -167, 0xdcf57219828ce6be -168, 0xe19d58cca37b5b32 -169, 0x82bda95b2a161235 -170, 0x5823c3d8a2b6c9ba -171, 0xfeb2e74092fdf89a -172, 0x50e1ad1abc8f869d -173, 0x2ec63d0c105eb8da -174, 0xe14e1c4845a3264a -175, 0xcff53670455eb6aa -176, 0xaafaccd24619fa3e -177, 0xf55a988486e2422a -178, 0xecfba16a90ff4d04 -179, 0xbf8d36c2f644757a -180, 0xdc56ed75a0dd6249 -181, 0x3f45023eff17c3bb -182, 0x2428bbfe90023fab -183, 0xab892c611adcb70c -184, 0xb6f13d8c0c2b9d74 -185, 0x2ac3fb11d224f2a8 -186, 0x65433dcfae2d9351 -187, 0xe906859ae4b45f82 -188, 0x8fb7f5f093d76a3b -189, 0x940dd290b5e88d1a -190, 0x31b27d21bef116e7 -191, 0x86a964e2c83b5296 -192, 0x85ffd17bc079a9e8 -193, 0x16c47c724e7ab7f1 -194, 0xfb6098a9867e7d7f -195, 0x9246fb69092c6cb2 -196, 0x1a4033572760f32 -197, 0xc5cc568a8b273b84 -198, 0xfa6f9f2fbdd44abc -199, 0x9701b8e087718ba3 -200, 0x51d6a7dcf73f8f3a -201, 0x30008172cc6a972d -202, 0xac2ab49a5ca6ac81 -203, 0x31f28ef79461e54c -204, 0x93e35a8da8cc6132 -205, 0x9a2c58beeba3d5b9 -206, 0xf6615c1de266ac39 -207, 0x127ff9f8166b766b -208, 0x7ffe380e80a69556 -209, 0xbe7d2c228e1542f7 -210, 0x2d5ebb4e50ba1746 -211, 0x63585761ae1bf684 -212, 0x1019eb5cee022fea -213, 0xb9d3540ab58da30d -214, 0x1677f4cb45620eb9 -215, 0x6524baee51783822 -216, 0xdf9f2ddcfabb0adc -217, 0x78e8acc43b287935 -218, 0xe9a1974e999222b5 -219, 0xc41324ec2291e780 -220, 0xea52abc9ecdcbc9f -221, 0x209d7bcd46ec6b04 -222, 0x12d504c09803db2e -223, 0x1200e6bf21475d81 -224, 0xde6d3c2b35fd2cfc -225, 0xa2526900ac33bd3c -226, 0x7f1f5290fc432bc5 -227, 0x29ddfb380a3d69c8 -228, 0xac79cb6942a2909d -229, 0x516996685b67a92a -230, 0xb5fc39041cb828bb -231, 0x75d9d8ca0644a276 -232, 0x81e98b76be92a3e9 -233, 0xca27888fafe12179 -234, 0x17be2ae039925765 -235, 0x9429846c0e6d0342 -236, 0x327dfd50439815e9 -237, 0xcee20cd7bc254aeb -238, 0x7d250389f453f29e -239, 0xfd1b232a85c95569 -240, 0x2ed55fac80f3e9e9 -241, 0xf6886c20417a1be7 -242, 0xcd08e61f0b0fdfde -243, 0x7b33e34da5c27bff -244, 0xd043c4b7d5603dd5 -245, 0x9a544e4c70a3b686 -246, 0xa7b60398c381f771 -247, 0xe9e7a3487c4bd4f2 -248, 0x10b58fdfe1ff112c -249, 0xd5c1c9748c0f4ceb -250, 0x61be9d09159d54ff -251, 0x5356f51e8239f510 -252, 0xfe7889d9b202ecef -253, 0xc7fc19ca5d263d5d -254, 0x7c4c07e61dfd9f69 -255, 0x6c315fe5015f300a -256, 0xe0a5bc00039747b4 -257, 0x16397fdcf829ee80 -258, 0xb55aee80d16a5169 -259, 0xca0609944d007eea -260, 0xcc982249f65a02ce -261, 0x528161feb149c148 -262, 0xcbf08ba49b41c006 -263, 0x39af1ff0b6f14138 -264, 0x5cc036be69799aec -265, 0x6adde125b1db21c5 -266, 0x8a99d83d6b613b67 -267, 0x1cd43fca9451f74c -268, 0x682dbb26ecc96365 -269, 0x13b4be2ceb43e3 -270, 0xbe8fbc3b6f4f581e -271, 0xda148a2f4bda5719 -272, 0x239106ca3319f393 -273, 0xb42b4dde641f0dd5 -274, 0xd233cfdf4cb0af74 -275, 0xfb5919d905589afc -276, 0xd802a8860c10b66a -277, 0x6c923e1d00e7b5bc -278, 0xfacce1134f383b89 -279, 0xf9570abda7a6d553 -280, 0x80f0f9796a208f18 -281, 0xc0e1df5280951c57 -282, 0xe9f143f08257bbe0 -283, 0x79e4c6463123d588 -284, 0xdd2118583f2b1684 -285, 0xb399ff5f2329fa18 -286, 0x4b3e9ebae96f813c -287, 0xc484dbf247787384 -288, 0x921865eb97603f2c -289, 0x18063c68e257d300 -290, 0x643181f345e7fc26 -291, 0x12e0b0e8eadf9fa7 -292, 0x79e613fe73dfa354 -293, 0x6db4c59203b7217a -294, 0x6c7a0e9ba6139eaf -295, 0x9617c7ac4e3f6d97 -296, 0x1f68a7b4fb1b4b75 -297, 0xef0b7ab24944f466 -298, 0xaf1dee1f4be1bc89 -299, 0xd2e355c959f5fd8d -300, 0xe594c3fb95d96efc -301, 0x9554766ca3342906 -302, 0xa4bbdc77d12842c -303, 0xb62400211ee489a8 -304, 0x91abadaaa3bbe67c -305, 0xd371eeb91deb42bb -306, 0x883bab35cbd2b6e5 -307, 0xd030c3d9411a9041 -308, 0xff3c110a858ff000 -309, 0x59bdf5ca47d0bde7 -310, 0x2bc80fa3cdba1853 -311, 0x6444ccb652662cb8 -312, 0xc0c7e256b9e90339 -313, 0x70714ea9c9d72302 -314, 0x96a0142f9d897d27 -315, 0x209a9097c5a91ef7 -316, 0xb9e33afc5171e009 -317, 0x47b37af433a58d40 -318, 0x30cc4ffbfa831d26 -319, 0xdcea4a85ff815466 -320, 0x907d5bd027f2e5cc -321, 0x7c081f6852e04a4b -322, 0xe61950749c1d502b -323, 0x1604e937ee69834a -324, 0xb2372d952dd25309 -325, 0x53f6a5b834c72577 -326, 0x2ce7a74395e0b694 -327, 0xacbf9ab4fe91f225 -328, 0x5ce1e63d3a2bb90f -329, 0x54740da3a5ed139b -330, 0xf194ddb39f29880b -331, 0x3305374f5d8ec08b -332, 0x831dd0164927ff4a -333, 0x625baa78e4458cf -334, 0x29d27dc0a4a71152 -335, 0xe227bae9a1401034 -336, 0xca0c209831846b2b -337, 0x8e8cc54b08b5a411 -338, 0x38f2b4acaac27db6 -339, 0x8ec88baac814e86b -340, 0x31c08e46b007bde -341, 0xb686c02722794c09 -342, 0xb77cf8fc682e3907 -343, 0xa56334e7f606f4b2 -344, 0x9c80b127bddd5f4f -345, 0x12df14834cd858bf -346, 0x3f14762a9cf5fb9f -347, 0x930a70941ef5779e -348, 0x64e96c849c30c080 -349, 0xfdf53bfba1300484 -350, 0xec7a9363c21bc616 -351, 0x26e9fd6a115ecb47 -352, 0x9707a84b5bc77fbb -353, 0xb23b2737b20d5903 -354, 0x22f4825ae80f6501 -355, 0x500644b12be6a01b -356, 0xb746645b2af082db -357, 0xe6af051f697892f8 -358, 0x577c724248a1cfc6 -359, 0x3d2b6a434c84eed3 -360, 0xd260f5efd7328314 -361, 0x95c16cc84bb3f55c -362, 0x7a01b2e4e0e80ca7 -363, 0x41930c3ce70a0935 -364, 0x1299bccf39d4e110 -365, 0x494883ba1a8a87f -366, 0x9478ecfe2d918e60 -367, 0x30ec9a5670cda8af -368, 0xf9bc877e833e2b99 -369, 0x1b83a0acfbb4a8db -370, 0x73bc1740c0d18880 -371, 0x65086ca9773cb3e1 -372, 0x3b78c3ccd63cff2e -373, 0xbfae748795acfb31 -374, 0xa4c9d5d56a15ba20 -375, 0xb9cb41721e52b71e -376, 0x1532f15d4dc47748 -377, 0x5a4d647a4b9ee632 -378, 0x8513c7c5a50898d9 -379, 0x6d3d98ccd5461b2e -380, 0xa65e99be2fe98d6 -381, 0x31abc8855334a0e5 -382, 0xf1ed22a661dca5b8 -383, 0x299e2b63229e03be -384, 0xda201a06687bce48 -385, 0xd27794b302142c55 -386, 0x642bd3e1c7898a9d -387, 0x777f1ff00afa1a87 -388, 0xd2f1c84fb3877baa -389, 0xae417583289191fd -390, 0xd641f1d88e0e2d55 -391, 0xc1f1d98fb5d18ebf -392, 0xb0f72aecdadce97b -393, 0xe9b8abc764f6018a -394, 0xd2a37cff8e890594 -395, 0x2dd70d631a528771 -396, 0xbf8ba0478c18e336 -397, 0x1630bf47f372ce0a -398, 0x6d04ea20dc3f46b8 -399, 0x6591881bf34337f2 -400, 0x33c149c7eb5b4103 -401, 0xf01a8c9857c86748 -402, 0x184348cdfc16d215 -403, 0x141168b253d2ed7 -404, 0x52aaf012ef50a6f1 -405, 0xfda1722387e16f4c -406, 0x43c30f57d6c038fa -407, 0xd4a8611f5f96d214 -408, 0x2c512ce17e987f2c -409, 0x961ce450f0fa2822 -410, 0xf55a506ec6cea9cd -411, 0xb76d694d9c7f5ef6 -412, 0xfb029216dbd8e988 -413, 0x93162501896a0081 -414, 0xfbbbd2c5ab300f5c -415, 0xd648b6da7387d491 -416, 0xc73b4697471d9d98 -417, 0xe37412bf1c93ee76 -418, 0xa1a96d96570e6637 -419, 0x5b3ab4f82428f65c -420, 0x873d849b188aa36f -421, 0x39fbee0ffc9fa9ff -422, 0xc70d21b744d677fe -423, 0x2b8a43c23043d209 -424, 0x93c33eaa37370d16 -425, 0x8930ac1880f2b0ef -426, 0xac01d27707036af0 -427, 0xc2af3fee504343a0 -428, 0x1c1dae2ad5535d97 -429, 0x9ffc21804b76a480 -430, 0x69f903412cc13563 -431, 0x9d3c4e2759a0c47d -432, 0xb1a8f894be6302b9 -433, 0x95e1fd7951479506 -434, 0xbb9e6c03cd4ae8e3 -435, 0x85206010c9b737cf -436, 0x767e813694d6238c -437, 0x4969af329ccbb30a -438, 0x3aa9af1075aaea5c -439, 0xb1ff519e8118a993 -440, 0xb21a23a3c91180fe -441, 0x320b24582ca3fd88 -442, 0xf8ca56415fb4e453 -443, 0xabd0899c07205e77 -444, 0x87fdc7a44b4ad50f -445, 0xd75744911641a278 -446, 0x7c8c9a65df6fcb95 -447, 0x79d785e3c7a5b695 -448, 0x421e4565ba1f592f -449, 0x27f87eb2517835cf -450, 0xb62cc4297441c83e -451, 0xd817a80ac815ca6d -452, 0xad84388130df2aa8 -453, 0x5e6b1640452d6ac8 -454, 0x936285e15edce2a3 -455, 0x903bccc4969768e8 -456, 0xefc2cb7b109d3140 -457, 0x633e9dfdda2d903a -458, 0x2a2f3225925678a1 -459, 0xe07eac91a27f8547 -460, 0xe50ced40eda78cb3 -461, 0xc5b22500e1c7441 -462, 0x32becf61bca3aa72 -463, 0xa2e37c4b30671344 -464, 0xc9f1c1910f45d544 -465, 0x9b50333b2dcdf730 -466, 0x310bfd53a1684b94 -467, 0x1e1dc21e66ac6455 -468, 0x81876c2bfb1ed5a1 -469, 0xd0c54a3e25eadc7b -470, 0x3791b6fbbd5c7ba0 -471, 0x133be57356c599fc -472, 0x8d1148eb8e83fdea -473, 0x311aedba0d8b42cc -474, 0x1142ae52745f94bb -475, 0xc5f4ab2fbde8c4a3 -476, 0xd23be827b5b24f6d -477, 0x65f95194cd122715 -478, 0x4b48969d73125922 -479, 0x46f165052b8ff988 -480, 0x5c689f94b9275ff4 -481, 0x93b03823ff2d536b -482, 0x871f3775aa4e3523 -483, 0x5af829f7cc0f66a5 -484, 0xa32e05739cbeac8c -485, 0xacff1856ddace0fe -486, 0x8eeb5e7f991a5322 -487, 0x6325c2720e0dbdea -488, 0x9fb817bc4fdf5200 -489, 0x9786f0d850e43d78 -490, 0x571f76dd7f9fb77a -491, 0x4d9e94e181cbc63f -492, 0x8bb632d3376c547a -493, 0x9cc26d9efd1c88b9 -494, 0x9c5d49579df52b0b -495, 0x6201abf7e1cda07b -496, 0x90d68f0c6c884963 -497, 0xfc5b66188ef7f561 -498, 0x6d9303cf2e0e0f95 -499, 0xd7cfcff535f5ed07 -500, 0x14d1a1228daa4ac6 -501, 0xe00ef5762f66ae50 -502, 0xf113a79471582978 -503, 0x430985281785dc7a -504, 0x31914108c206ed5 -505, 0x7ba6707b6419971c -506, 0x2ec63b033ce112e5 -507, 0xf8bcd36ced3b41e3 -508, 0xe5cf908c8010414b -509, 0xf5ee224b7c703e30 -510, 0x9a9733af0b12338b -511, 0x83e18cc00ace34f8 -512, 0xd52cff39e23008b8 -513, 0xa700578136b9c0c5 -514, 0x3fa179d32ac51f99 -515, 0xef2d5eab6d4ad380 -516, 0x709024a5abd032df -517, 0xc607c7ee349ede87 -518, 0x803d784e9731eb5f -519, 0x2ef06f4ba769282d -520, 0x4bc1dca1e9f07eb9 -521, 0x930c958a7a72f94d -522, 0x249bc8db2cc7a3bf -523, 0x3845305798f9a5d -524, 0x6f137eca9ab6f948 -525, 0xc31f5a963d31bd67 -526, 0x9d39693d5383626f -527, 0x52fb41c335a8b98e -528, 0xb79d1a29a06006ec -529, 0x7c0926a7a3eda2cc -530, 0xffdf5214406fd53e -531, 0xc6aa02a7e94282b9 -532, 0xd4a4431b4aa301ee -533, 0x4271cc0f9420d3ab -534, 0x26fccd7cc7fc2485 -535, 0x330594bb945b8d5a -536, 0x6ea8eaad12e5cb8c -537, 0x831c3467726bede3 -538, 0x31d1eb10017eaa61 -539, 0xc7aa75e41508f5cb -540, 0xde51810f0cadd0b5 -541, 0x50e5b3e73692f80b -542, 0x82107ec55636e188 -543, 0x9828ef175d843ab4 -544, 0xb8edc6a860dd421e -545, 0x25c0c138fd537ac3 -546, 0x47e72a771e8eb563 -547, 0xbb0f8c5333f4a2cc -548, 0x91750d2fb9b2d479 -549, 0xe662d8f6fe38df36 -550, 0x72a6d879fb5619f0 -551, 0x6817c7878dcbf077 -552, 0x4e7741cb484661e8 -553, 0x3b3b3ba0be5711bf -554, 0xa6989f5d25868765 -555, 0x43c276398997e4e0 -556, 0xdcbe16a94da28870 -557, 0x454936980a699c99 -558, 0xac614bfa8f0266c6 -559, 0x9174841392e213d5 -560, 0xa0e2acffc5fc9d1f -561, 0xe53a08a7a0e6521a -562, 0x2b845cf7c24172e0 -563, 0x265a4fc5f7adec0d -564, 0x1f34fbe5f1e49420 -565, 0x139181f6fb647f20 -566, 0x88c35d46e2fcd05e -567, 0x2a6d5b55903c0459 -568, 0xcea28eb621ad7bf1 -569, 0x5c9cdc13e7aaa30 -570, 0x5fe63e14746e7103 -571, 0x7923e53d73835db9 -572, 0x376e661210bf1b06 -573, 0x5b1cab85450efdd5 -574, 0x3908dc096c70b452 -575, 0x4825e303cd1f396f -576, 0xed476bfd702957c3 -577, 0x6acc013aff5db743 -578, 0x62c80b776343d488 -579, 0x9c75edcd5b012697 -580, 0xaa053362a3b9770a -581, 0xa907e236c7c07e94 -582, 0x15b2c380451692c0 -583, 0x94f79142697bd61f -584, 0xbc657d31ea98d44f -585, 0xcbaa5e52517a1f5e -586, 0x96aa2e44a7c4a03f -587, 0x216d3c66db2b515d -588, 0x157001807e3ca88a -589, 0x52b3a596bdd3859a -590, 0xed747e7fc5e3adac -591, 0x78fd765ddb2c448d -592, 0xe53dc7299ed8614e -593, 0x75ad41fb1d7a790a -594, 0xc14f6b944b0e6cb1 -595, 0x7c314b69fce3df1c -596, 0xb56d82eb740d7abc -597, 0x5132a93c41251fdb -598, 0xe3ce35bd2a82f958 -599, 0x440571a981c722f2 -600, 0x194cdfd9f186bc9 -601, 0xb89e522a5db00939 -602, 0xad35f339f68df3c8 -603, 0xa82ab18420322293 -604, 0xaffa6df9b72b27c4 -605, 0x9615694d23beaa2c -606, 0x1d82ebe563abad91 -607, 0xab50ef65fbd94385 -608, 0x1b070dbd70a9a14 -609, 0x2ececa796abbadf0 -610, 0x6bbeafe9e81ab2a2 -611, 0x60dcd0d2a9b76914 -612, 0x1e748039ef05c33f -613, 0x6d4d17f2213ccdff -614, 0x9fa56132957bc987 -615, 0x60a17185de2428eb -616, 0xb56038ddf306479c -617, 0x3b1db5df92d06d8b -618, 0x24d1bba8bdedf580 -619, 0xbfb7e6740ebaa4d9 -620, 0xab31c4473e46f61d -621, 0x6deb3cdd8fd5869f -622, 0x23032e47746d72d6 -623, 0xa9e72d734e10f2e8 -624, 0xbffd199b6157bc23 -625, 0x29f8254df273fb62 -626, 0xb076142130ee55ec -627, 0x5b0b08374126c309 -628, 0xea4536aae979521f -629, 0xc064e7abec91a174 -630, 0x46133ef80c59d935 -631, 0xf0227e2da1b14160 -632, 0x675a76641e1af5a -633, 0x2f50a069b33d198c -634, 0x3ded5a65e1d657eb -635, 0xbb6999b020694f6b -636, 0x86b2f2b33487aed7 -637, 0x76e14e85f8bfb4cf -638, 0x38f7f1e44bd4e0db -639, 0xc1a7d41b7e80d4ae -640, 0x1dfaaf80bbceb42e -641, 0x3f51c11497720c2b -642, 0xce6da1415ddb8b80 -643, 0x7377d8bcd359b5f3 -644, 0xe077208f3f810aca -645, 0x9a06a8a2dacbffce -646, 0xca1f99156b09b735 -647, 0x2ff9a93064d91451 -648, 0x50f3ea93f351a7ef -649, 0x606fceccb07054de -650, 0x7e83d6d2f8f6685d -651, 0x78f3995291c5d407 -652, 0xd28d2460e22d0228 -653, 0x2c5636f68a0054dd -654, 0xd9fafb1c56c8f6cb -655, 0xe39889b5f9d74464 -656, 0x1355372bf5db2cc1 -657, 0x26768426b9ac323 -658, 0x4af1dbdc1111fd89 -659, 0x66973587943b927f -660, 0xf86f5f50684dfb1d -661, 0x1247d574ff79b534 -662, 0xc8039f3259210fe2 -663, 0x79b573235c92a9f5 -664, 0x213f642d8450e2f0 -665, 0x5db7706973376566 -666, 0x6182c12e69b373d7 -667, 0x3e5ac47300aec07f -668, 0x4b5b6c57b1574376 -669, 0x6b7fcceefd56b17c -670, 0xf656c3455cb9d4b8 -671, 0x7577e2e13329721f -672, 0xf33c0c53ce956e8d -673, 0x7d0f328ee356174 -674, 0x10ec9a168088686e -675, 0x71ef1776d062dfa -676, 0xaa7b590a488a6bc4 -677, 0x38612b6dd8049a1c -678, 0x939045e36874f731 -679, 0xcb9d1d74c56d5ac9 -680, 0x54f1c1c8fef1d8ff -681, 0x3ee4b85c8c7e939e -682, 0xb9b4608e019f352c -683, 0x79d4701275d12e6a -684, 0x2632a2d9835c7f19 -685, 0x1662cd9fba293692 -686, 0xbcb70265115ee944 -687, 0xdc43fb9761468604 -688, 0xe3eec4e7d3871352 -689, 0x829531753226989d -690, 0x2748cc67f540e074 -691, 0x39c4af25d607837d -692, 0x741a243f4cb5df99 -693, 0xda1353287e18b49a -694, 0xa6735689d751ea74 -695, 0x46326d587340ce0b -696, 0xc18531df4550012b -697, 0x6f7901e05dd4b818 -698, 0xfb966afc4c001d63 -699, 0x6dc10fca67a9cfdb -700, 0xd6527ffadf0feaae -701, 0x3b900172045e25d -702, 0xb7dd594cdded6a46 -703, 0x6602aee7ec1599fc -704, 0x7fbf12f23747546a -705, 0x32e63f662bd2de0d -706, 0xedf47770b67ed641 -707, 0x331bef83481c5c2a -708, 0x8fc4256fdf05158c -709, 0x98eba48dabccf5e0 -710, 0xdbc2f2cdb7b1c154 -711, 0x7777755616517ad3 -712, 0xd473c147d2628ac1 -713, 0x861e15d1d760b5a7 -714, 0xf4d25926405ecb07 -715, 0xb7739c69effff86e -716, 0xe97fbafa6f96830c -717, 0xf13e8a334e8bede1 -718, 0xcd60010cba4ee4f9 -719, 0x1f537ac2b82e6008 -720, 0x1fda8d781a89140a -721, 0x9dc204f3f4a463f0 -722, 0x456dcd18eb56a1ab -723, 0x629957bc87bd16a1 -724, 0x2c8000ddb8c75253 -725, 0xc31dae9ec8449284 -726, 0xdac05c8baa2b691a -727, 0x21ff7be9ffa3e7ac -728, 0x844f4b5ed4ee08d0 -729, 0x651f913fd636c994 -730, 0xca3e71a2110b2d49 -731, 0x7709bc42253ed09d -732, 0xbb164d45b6569d43 -733, 0x90ec2f040c20a112 -734, 0xfa6e77e9166f5be4 -735, 0x6b6d12c1842d587d -736, 0xfcd7ff8466e25e2a -737, 0x6a5a2ed8bd971297 -738, 0x2ec35f6bba5adcbc -739, 0xc83676e16651249a -740, 0x458f6064cefe10ba -741, 0x90d54d527e6cd028 -742, 0xa5613e88db27c388 -743, 0x331e0c7d85aa1abc -744, 0x8cee4977e210358 -745, 0xfcae379aa6cbff8e -746, 0xd1407afc97a57e86 -747, 0x1fab25c864f094ae -748, 0xd914864a63004552 -749, 0x4214d226a20f1384 -750, 0x3f4e0d80c488b715 -751, 0xc5ca2f654024b7c8 -752, 0xc1e27a124e7c821c -753, 0xd890a915ffc7918c -754, 0x22fba040ce51a9f8 -755, 0xbf61cebd8891617a -756, 0x7846609ee228e319 -757, 0x536d1854375509b8 -758, 0xbbfb45fc6e666f50 -759, 0xd85b4c0527f9d7d6 -760, 0x528cc9c7fa2a84c8 -761, 0x27a1baece647f2cb -762, 0xfddf0cb92fe09dc3 -763, 0xeb5008fe965d8d96 -764, 0x4a3307937eb2e5c8 -765, 0xd07d74c240c6c363 -766, 0x16f62290179d1bbf -767, 0xe99c9bcc9cb1ece7 -768, 0xc64f9be03c8a93be -769, 0x32659effaf666c1f -770, 0x4bb228cfb30b6672 -771, 0x98764870842068a5 -772, 0x5b12ef2d2cd8bdcc -773, 0xbc79d1c1b41f28b8 -774, 0x97a517cf3279fc9a -775, 0x34ffd46c1d4d6025 -776, 0x9c302307ee25c8f0 -777, 0x399604eed1f18a8 -778, 0x1c9b813c2043142a -779, 0x2944ea5e55267fe9 -780, 0x5a8a9f5e728ea667 -781, 0x30c8440adb804a0 -782, 0xee0e6b627099a937 -783, 0x3d50757ada3c52da -784, 0x4548916b32c813ab -785, 0x602a186fe5bf109b -786, 0xf0d440a2227ba304 -787, 0x5a10d4e0ca9ea32b -788, 0x6e5eb90da13ba64c -789, 0x4c6af8fd04241ab2 -790, 0xf9eb31d26e093006 -791, 0x5d674878839fe3ea -792, 0x1562b55b2484e47c -793, 0xa87188c099c1cb61 -794, 0xb7736b8aa02a3392 -795, 0x5f4b301125abb20f -796, 0x361d566984637f44 -797, 0x68c4b3feac8bd0c3 -798, 0x7066c634dd2503c1 -799, 0xfecbf7c9441eb6ea -800, 0xdbc26ae0fc81436b -801, 0x9ef3e2b48252e7a4 -802, 0x31a49b4c339b37c7 -803, 0xb01b2a83cf346cf4 -804, 0xc24dc2347f82fbe3 -805, 0x134cad272dcd410f -806, 0x61260742823ba59c -807, 0x53ac4c193a97c730 -808, 0x9207c9833af34b52 -809, 0xa72e7ee77078d1f5 -810, 0x2e6f6e1b05936885 -811, 0x783b99ce5dbf9464 -812, 0xfdfeb6f0d027bb44 -813, 0x40eeb27096f92b0 -814, 0x5ef96ff5d4a4521f -815, 0x5595806ae873718a -816, 0x67d449eecf4ca1c3 -817, 0xde837ab611364f3f -818, 0x7034c24d2b139be9 -819, 0xe21166603e0a9c86 -820, 0x935694435c1f0d51 -821, 0x6cb3bec90c126088 -822, 0x4096ef662b7a9f89 -823, 0xd2d85b8d238d8c15 -824, 0xa4ea533ce3ec59b2 -825, 0x3654729d80a2db29 -826, 0x214c4cc3906d29d4 -827, 0x201c447e7588e373 -828, 0xe8b8f0ae25f683eb -829, 0x6744aaf5754e38af -830, 0xd1ffb10d6f27a061 -831, 0xe536733a7b3a6c30 -832, 0x39f0f66e47cbf2c9 -833, 0x856a9593526fde2 -834, 0x2e2a817a0098ea4b -835, 0xc5e1eeb551a0e3d3 -836, 0x3f21e2f5e2d50b2 -837, 0x906af56c66dd9f8c -838, 0x30f6dbd70329fac8 -839, 0xc443dfddf3c01a60 -840, 0x7ab85d9aa9675470 -841, 0x8c9080bd39717bfc -842, 0x4b1ccdb3c3597f6f -843, 0x74e2542d70ab5d67 -844, 0xbb3d236aad00f74 -845, 0xcf3cadf9a2804774 -846, 0xe851d9750e42bd07 -847, 0xc0ad82029b1c371f -848, 0x7ee119eb552d6c07 -849, 0xd8024049bd1d784a -850, 0xfa67a899760363 -851, 0xaa7c2f438b178197 -852, 0xc473674a47ffe064 -853, 0x539fbe3fc674c270 -854, 0xdb48484748a76f3b -855, 0xc73b2b092060d -856, 0xa1d2a15345016f5d -857, 0x4d0fe8599f9bba47 -858, 0xa0edc275e6f8f1d1 -859, 0x40590a8655bc8d72 -860, 0x35b4223161f05f75 -861, 0xa04c0c0f616752dc -862, 0x7f371ed2ca45432d -863, 0x2ff1a08f75ac6438 -864, 0xe2dc5c3682282f48 -865, 0xe1e4179fa98d9013 -866, 0x8cb083d6843a73d5 -867, 0xb4c2b5921b706854 -868, 0x738e14c0e7352445 -869, 0xcd2b646f91afd8c7 -870, 0xd5779a5b57a264fd -871, 0xc39ff855586c7d07 -872, 0x3e3f0098c631a859 -873, 0x644e02fae032110 -874, 0xa8834613c0a45278 -875, 0x69482f2c08e10657 -876, 0xe4ee475bdb87e69a -877, 0xdc1ef7b25c0d0019 -878, 0x88a3fa2be18d8744 -879, 0x60a02e0b21c5bec7 -880, 0xb6867b88aa19bc1a -881, 0xb599409affcf10eb -882, 0xaeaa1778a5e59daa -883, 0xd7a91a52c16663e3 -884, 0x93cb269affe07b1c -885, 0x841b6ced3a4ba815 -886, 0x84541768e1540a5c -887, 0xe3943c84f83b3020 -888, 0x5de366fbd7b45258 -889, 0xd787cc3bde91a661 -890, 0x814071446edecb57 -891, 0x15d8c602a1141514 -892, 0x72f07bc8002d1d0d -893, 0x4a8bd8dc9a1f0f3e -894, 0x8723796ae0f20d35 -895, 0xda7283c2051f73b2 -896, 0x2df0cc247f90bd3b -897, 0x79a8522b968f990a -898, 0x951ede190c8b9d02 -899, 0xc512f1a5b14b018a -900, 0xf0e3ddc03b9a4259 -901, 0x8cf4a35ad312e15f -902, 0xebef28926b11094b -903, 0x5628ba687325921c -904, 0xc3aa75e57edc49c3 -905, 0xc38382fa98e762ba -906, 0x8d209e896285848e -907, 0x2c7d6adf592b4a3e -908, 0x62de48e36f8338f3 -909, 0x4a752741e00de30e -910, 0xf7855b70f1f6ec2b -911, 0xa505fa4428199e43 -912, 0xe8b6b423b826bbac -913, 0x4bd1206cf8786d05 -914, 0x6dcf040391fe3bf4 -915, 0x913f500f87e1bba3 -916, 0x5acf775aa180a5d5 -917, 0x74dd28d9432ce739 -918, 0x996c2ff2f0dc2495 -919, 0x73dbfe6c56effe4 -920, 0x56fddd25196f5e40 -921, 0xe87810158f5b7 -922, 0x7b8795e996383f1f -923, 0x9ba5ee7c777c4c82 -924, 0x17ce3908d270fe1c -925, 0x3df9e613c1aedfae -926, 0xcdd26871b32fc8e1 -927, 0xd71cb13afc633979 -928, 0x63427c8ea9b1c79e -929, 0xd070f7664d3b405d -930, 0x46f2a9e32d9fb769 -931, 0xb4c3822a45e9fe9b -932, 0x8ba30b97fe6f5ec7 -933, 0x70aa554ee2fc11f9 -934, 0xa80c99dbe0cfcfaf -935, 0x36d9250cb2d68ed -936, 0x2995e4b9e1cd1db4 -937, 0x4b3803ba57fc570f -938, 0xae3959e7d740eaa5 -939, 0xb4cbd6662adbae08 -940, 0xae46576446e8dbc4 -941, 0xc4828e008a9a8a54 -942, 0x145d7db8e6554b2f -943, 0x1b1b8916a730c371 -944, 0xdaf84b2bebe31963 -945, 0x5b59b80ef23a2403 -946, 0x9180c7e89cab6fd3 -947, 0x80e58f5411babf34 -948, 0xa06cf55185b9b005 -949, 0x13b2c798424173ad -950, 0xc510f8e706311d49 -951, 0x1f974b83b6046d3a -952, 0xae6e8e85e822d1c3 -953, 0x66f2c8dc3274a31a -954, 0x7e04dbcbf65bd377 -955, 0xabf41ede01ec20a4 -956, 0x5efa0948f6bbb2ea -957, 0xbc91c99d8592255 -958, 0xf6d6917911d86d75 -959, 0x85ce273d54e9097a -960, 0xbdfd30f2420fff92 -961, 0x8802f02f610b537c -962, 0xd1d70037ed543229 -963, 0x908aaf97f9693a46 -964, 0x1f6cfeaa0834d53a -965, 0xa453fd1648ce04d2 -966, 0x2c38bb85ebc64af9 -967, 0xd2daff551c90c4f8 -968, 0xae5a0d949797d784 -969, 0xf0974c8552ac9593 -970, 0xa10b70499f65c693 -971, 0x39a449ebd594ddff -972, 0x8ea090f2b17b9b49 -973, 0xc592de318090fd83 -974, 0xb63e4fbc467b6912 -975, 0x57a0c1c5ce0e4dcc -976, 0xa7c517cf3d436b35 -977, 0xef6dcb0f3fad038b -978, 0xaf4fb60315b91287 -979, 0x5e0776f67304f331 -980, 0xe927753b8e6f7932 -981, 0xd3df2dd92559e304 -982, 0xdaed52aa6af44413 -983, 0x1b59f4dac1e181f8 -984, 0x4a73c2293877ef39 -985, 0xca45d0d015fe44de -986, 0x4659c8b7853735a8 -987, 0x12de6466bdf8adeb -988, 0xaeea857a09bfec15 -989, 0xcc9cf4b3c0b88a23 -990, 0xa44ae52396a5e1bf -991, 0x5847a724305d137f -992, 0x8f4d4de223956182 -993, 0x58254dfada867a8 -994, 0x900a98222c2f339e -995, 0xdb575260935d51d5 -996, 0x13fb4bfbbc0d7b53 -997, 0x62213850186bb92b -998, 0x2a34823312c00388 -999, 0x6148329042f743b0 diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/philox-testset-1.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/philox-testset-1.csv deleted file mode 100644 index e448cbf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/philox-testset-1.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0xdeadbeaf -0, 0xedc95200e2bd66a5 -1, 0x581d4e43b7682352 -2, 0x4be7278f5e373eab -3, 0xee47f17991a9e7ea -4, 0x38a7d2ae422f2e2c -5, 0xe2a6730a3b4a8a15 -6, 0x1588b7a841486442 -7, 0x13ad777246700504 -8, 0x14d157e0f5e18204 -9, 0xd87c22a7ee8c13f1 -10, 0x30cc389ce3542ba1 -11, 0xb8a53348955bb2e9 -12, 0xc08802e3c454f74f -13, 0xb444f627671a5780 -14, 0x4b6dd42b29cbf567 -15, 0x6109c7dc0bc5f7d5 -16, 0x85c954715d6b5b1e -17, 0x646178d3d9a3a5d5 -18, 0xebbde42b1cd83465 -19, 0x3d015102f6bc9c1a -20, 0x720fe2ec3798d5fd -21, 0x93120961289ceb2e -22, 0xc9207e960a56fae2 -23, 0xa7f042f31d991b98 -24, 0x5fac117415fae74b -25, 0xd0a970ba8dddc287 -26, 0x84b4e7e51b43106 -27, 0x6ad02bf525ea265f -28, 0xcdc7e5992b36ef8f -29, 0x44d4985209261d60 -30, 0x628c02d50f4b902e -31, 0xc7b1914922d1e76d -32, 0xfde99ff895cba51d -33, 0x175a0be050fa985f -34, 0x47297d3699e03228 -35, 0xccf1e9aeaa3339cd -36, 0x9fdd18ebeeaf15b1 -37, 0x7c94c9ab68747011 -38, 0x612d8ef22c1fa80f -39, 0x13f52b860de89ab5 -40, 0x81f264b8c139c43b -41, 0x8d017ba4ef1e85ba -42, 0x6d0556f46219951e -43, 0x8ee7b85663cf67b6 -44, 0x2432fc707645fe67 -45, 0xaf814046051e5941 -46, 0x4d432a83739ac76f -47, 0x59e5060d0983ccdd -48, 0xdd20e828b83d9b53 -49, 0x1b891800d7385f4c -50, 0x10e86a026c52ff5e -51, 0xb932f11723f7b90c -52, 0xb2413d0a1f3582d0 -53, 0xe7cd4edda65fc6b5 -54, 0x6d3808848d56593b -55, 0x192a727c3c7f47d9 -56, 0x9659d8aea5db8c16 -57, 0x4242c79fe2c77c16 -58, 0x605f90c913827cea -59, 0x53e153c8bfc2138a -60, 0xed2158fbdef5910e -61, 0xae9e6e29d4cb5060 -62, 0x7dd51afaad3b11ce -63, 0x2b9ba533d01a5453 -64, 0x7e0e9cf2b6c72c8 -65, 0x1cc8b3c7747ed147 -66, 0x9b102651e2e11b48 -67, 0x30b0b53cbaac33ea -68, 0x70c28aec39b99b85 -69, 0x5f1417ff536fdb75 -70, 0x3a1d91abd53acf58 -71, 0xba116a1772168259 -72, 0xf5369bc9bd284151 -73, 0x67bf11373bf183ca -74, 0xef0b2d44dbd33dc7 -75, 0xbfd567ee1a2953ed -76, 0x7d373f2579b5e5c6 -77, 0x756eeae7bcdd99be -78, 0x75f16eb9faa56f3b -79, 0x96d55ded2b54b9a5 -80, 0x94495191db692c24 -81, 0x32358bdd56bab38c -82, 0x3f6b64078576579 -83, 0x7177e7948bc064c9 -84, 0x2cbf23f09ba9bc91 -85, 0x9b97cc31c26645f5 -86, 0x5af2d239ff9028b1 -87, 0x316fa920e0332abe -88, 0x46535b7d1cae10a0 -89, 0x21f0a6869298022c -90, 0xf395c623b12deb14 -91, 0x8573995180675aa7 -92, 0xc3076509f4dc42d5 -93, 0x15e11e49760c6066 -94, 0xe8a6d311e67a021d -95, 0x7482f389c883339b -96, 0xda6f881573cba403 -97, 0xb110ffb847e42f07 -98, 0x2c3393140605ccf9 -99, 0xba1c8ba37d8bdc33 -100, 0x59adf43db7a86fe0 -101, 0xb4fcbf6aa585ca85 -102, 0xd794a93c18033fa6 -103, 0x6e839c01985f9d4 -104, 0x64065bf28222b2c7 -105, 0x6a6359b293fa0640 -106, 0x5ff610969e383e44 -107, 0xa8172c263f05c7f7 -108, 0x62a0172e8bd75d07 -109, 0x7be66e3c453b65ac -110, 0x6a3b8d5a14014292 -111, 0xa2583e6087450020 -112, 0xd5d3ecc480c627d2 -113, 0xa24e83f1eec8a27c -114, 0xa23febd2a99ee75a -115, 0x9a5fbf91c7310366 -116, 0x5b63156932e039b -117, 0x942af3c569908505 -118, 0x89a850f71ab6a912 -119, 0xfeadc803ac132fe9 -120, 0x67bf60e758250f3 -121, 0x533c25103466a697 -122, 0xb7deede3482f9769 -123, 0x325e043b53bba915 -124, 0x9e8d9e7fde132006 -125, 0x6bacc6860bbc436e -126, 0xb3ea0534c42b1c53 -127, 0xb2389334db583172 -128, 0xa74b1bfbf5242ee4 -129, 0x53a487e2dc51d15c -130, 0xe5a3b538d2c7a82e -131, 0x7b6c70bb0c4cadaf -132, 0xae20791b2081df1 -133, 0xc685c12e3c61d32c -134, 0x60110e6b0286e882 -135, 0x49682119c774045c -136, 0x53dc11a3bbd072e -137, 0xbdc87c6e732d9c2d -138, 0xcc4620861ebac8fd -139, 0x7e9c3558759350cc -140, 0x157408dee34891ba -141, 0x9bcad1855b80651b -142, 0xd81b29141d636908 -143, 0x1ed041a9f319c69d -144, 0x805b2f541208b490 -145, 0x484ef3bba2eb7c66 -146, 0xb6b5e37d50a99691 -147, 0xabc26a7d9e97e85f -148, 0xcba2a3cce0417c2f -149, 0xa030dfffd701993c -150, 0x2bf2dc50582ebf33 -151, 0xd9df13dd3eb9993e -152, 0x31ca28b757232ae5 -153, 0x614562a0ccf37263 -154, 0x44d635b01725afbb -155, 0x5ae230bc9ca9cd -156, 0xb23a124eb98705c6 -157, 0x6395675444981b11 -158, 0xd97314c34119f9ca -159, 0x9de61048327dd980 -160, 0x16bac6bded819707 -161, 0xcea3700e3e84b8c7 -162, 0xaa96955e2ee9c408 -163, 0x95361dcc93b5bc99 -164, 0x306921aed3713287 -165, 0x4df87f3130cd302a -166, 0x37c451daeb6a4af5 -167, 0x8dbbe35f911d5cc1 -168, 0x518157ce61cb10f9 -169, 0x669f577aebc7b35b -170, 0x4b0a5824a8786040 -171, 0x519bc3528de379f5 -172, 0x6128012516b54e02 -173, 0x98e4f165e5e6a6dd -174, 0x6404d03618a9b882 -175, 0x15b6aeb3d9cd8dc5 -176, 0x87ed2c1bae83c35b -177, 0x8377fc0252d41278 -178, 0x843f89d257a9ba02 -179, 0xcdda696ea95d0180 -180, 0xcfc4b23a50a89def -181, 0xf37fd270d5e29902 -182, 0xafe14418f76b7efa -183, 0xf984b81577076842 -184, 0xe8c60649ccb5458d -185, 0x3b7be8e50f8ff27b -186, 0xaa7506f25cef1464 -187, 0x5e513da59f106688 -188, 0x3c585e1f21a90d91 -189, 0x1df0e2075af292a -190, 0x29fdd36d4f72795f -191, 0xb162fe6c24cb4741 -192, 0x45073a8c02bd12c4 -193, 0xcbaaa395c2106f34 -194, 0x5db3c4c6011bc21c -195, 0x1b02aac4f752e377 -196, 0xa2dfb583eb7bec5 -197, 0xfe1d728805d34bb1 -198, 0xf647fb78bb4601ec -199, 0xd17be06f0d1f51ef -200, 0x39ec97c26e3d18a0 -201, 0xb7117c6037e142c8 -202, 0xe3a6ce6e6c71a028 -203, 0xe70a265e5db90bb2 -204, 0x24da4480530def1e -205, 0xfd82b28ce11d9a90 -206, 0x5bf61ead55074a1d -207, 0xbe9899c61dec480d -208, 0xae7d66d21e51ec9e -209, 0x384ee62c26a08419 -210, 0x6648dccb7c2f4abf -211, 0xc72aa0c2c708bdc9 -212, 0x205c5946b2b5ba71 -213, 0xd4d8d0b01890a812 -214, 0x56f185493625378d -215, 0x92f8072c81d39bd0 -216, 0xa60b3ceecb3e4979 -217, 0xfcf41d88b63b5896 -218, 0xf5a49aa845c14003 -219, 0xffcc7e99eee1e705 -220, 0xdd98312a7a43b32d -221, 0xa6339bd7730b004 -222, 0xdac7874ba7e30386 -223, 0xadf6f0b0d321c8 -224, 0x126a173ae4ffa39f -225, 0x5c854b137385c1e7 -226, 0x8173d471b1e69c00 -227, 0x23fa34de43581e27 -228, 0x343b373aef4507b1 -229, 0xa482d262b4ea919c -230, 0xf7fbef1b6f7fbba -231, 0xd8ce559487976613 -232, 0xbf3c8dd1e6ebc654 -233, 0xda41ed375451e988 -234, 0xf54906371fd4b9b3 -235, 0x5b6bb41231a04230 -236, 0x866d816482b29c17 -237, 0x11315b96941f27dc -238, 0xff95c79205c47d50 -239, 0x19c4fff96fbdac98 -240, 0xbfb1ae6e4131d0f4 -241, 0x9d20923f3cdb82c9 -242, 0x282175507c865dff -243, 0xdfd5e58a40fe29be -244, 0xedbd906ff40c8e4f -245, 0x11b04fc82614ccb3 -246, 0xeceb8afda76ae49f -247, 0xa4856913847c2cdf -248, 0x6f1425f15a627f2a -249, 0xdf144ffedf60349e -250, 0x392d7ecfd77cc65f -251, 0x72b8e2531049b2c6 -252, 0x5a7eb2bdb0ec9529 -253, 0xdcfd4306443e78c1 -254, 0x89ad67ed86cd7583 -255, 0x276b06c0779a6c8f -256, 0xb2dbb723196a0ac3 -257, 0x66c86a3b65906016 -258, 0x938348768a730b47 -259, 0x5f5282de938d1a96 -260, 0xa4d4588c4b473b1f -261, 0x8daed5962be4796f -262, 0x9dde8d796985a56e -263, 0x46be06dbd9ed9543 -264, 0xdf98286ceb9c5955 -265, 0xa1da1f52d7a7ca2b -266, 0x5a7f1449f24bbd62 -267, 0x3aedc4e324e525fd -268, 0xced62464cd0154e1 -269, 0x148fc035e7d88ce3 -270, 0x82f8878948f40d4c -271, 0x4c04d9cdd6135c17 -272, 0xdf046948d86b3b93 -273, 0x2f0dec84f403fe40 -274, 0xa61954fb71e63c0d -275, 0x616d8496f00382e8 -276, 0x162c622472746e27 -277, 0x43bcfe48731d2ceb -278, 0xff22432f9ff16d85 -279, 0xc033ed32bb0ad5a4 -280, 0x5d3717cc91c0ce09 -281, 0x7a39a4852d251075 -282, 0x61cd73d71d6e6a6 -283, 0xe37e2ea4783ab1a5 -284, 0x60e1882162579ea8 -285, 0x9258ec33f1a88e00 -286, 0x24b32acf029f0407 -287, 0x1410fc9aea6d3fac -288, 0x6054cf2a3c71d8f7 -289, 0x82f7605157a66183 -290, 0x3b34c1c0dff9eac5 -291, 0xfebe01b6d5c61819 -292, 0x7372187c68b777f2 -293, 0xc6923812cda479f0 -294, 0x386613be41b45156 -295, 0x92cfebe8cc4014b -296, 0x8e13c4595849828b -297, 0x90e47390d412291f -298, 0x6b21a1d93d285138 -299, 0xbf5b1f5922f04b12 -300, 0x21e65d1643b3cb69 -301, 0xf7683b131948ac3c -302, 0xe5d99fc926196ed2 -303, 0x7b138debbec90116 -304, 0x8a2650a75c2c2a5c -305, 0x20689a768f9b347b -306, 0xdfa2900cfb72dc6e -307, 0x98959c3855611cc2 -308, 0x5fdb71b89596cc7c -309, 0x1c14ac5c49568c7b -310, 0x958c4293016091fe -311, 0x7484522eb0087243 -312, 0xc4018dfb34fc190f -313, 0xca638567e9888860 -314, 0x102cd4805f0c0e89 -315, 0xcc3bc438e04548f8 -316, 0xb808944bb56ea5be -317, 0xffd4778dbf945c57 -318, 0xfe42617784c0233b -319, 0x3eccbfeae9b42d3c -320, 0xd9f1b585fd0bfa60 -321, 0x5c063d1b2705d5dd -322, 0x8e8bec3519941b64 -323, 0x9e94c36cbec2a42 -324, 0x1cd19f5b64ffd3ad -325, 0x9632e3aebfc68e66 -326, 0x98960c2d9da4ae45 -327, 0xb76994b1f2bbfc1f -328, 0xca184a737d3971cc -329, 0x964d31b07183adfb -330, 0xe9e0ff351cd276d4 -331, 0xb5747c860b05bbe4 -332, 0x5549ddc3bd3862e2 -333, 0x495496677b27873b -334, 0x53910baa26e3ea18 -335, 0xaa07a07ad0a688d3 -336, 0xbb43bd1f09ecdb1e -337, 0xe2ebc105699dd84 -338, 0x6e815a2729584035 -339, 0x2caab1713b17948a -340, 0x43d39d209fa41c90 -341, 0xfe3e71089d5d1c3a -342, 0xa778646c32f81177 -343, 0x8d42bfb86e6e92d5 -344, 0x175571f70b4fcfbe -345, 0x2a66a6fe10dc3b5b -346, 0xd9545e85235ca709 -347, 0x5642781c77ced48a -348, 0x24facc40b72ccd09 -349, 0xa800fbacce33f6f8 -350, 0x675f58a0ff19fba -351, 0x35aedf57bb5cde1b -352, 0xe5535a6b63f6d068 -353, 0x84dffd0102aaa85d -354, 0x621faad65467aaa7 -355, 0x596ad85b556b112f -356, 0x837545fff8894c7a -357, 0x3d9a4ae1356bc6a6 -358, 0xcd8b7153205d4ad0 -359, 0x98afdd40f1ed09a6 -360, 0xa38b2dc55a5cf87f -361, 0x484aecce2b6838bc -362, 0x6af05c26bdab18d9 -363, 0xf418b7399dcf2e4b -364, 0x1cfa38789b0d2445 -365, 0xfbed23c34166ee67 -366, 0x38e6820039e4912a -367, 0x1fe94911e963591e -368, 0x1291c79aee29ad70 -369, 0x65eccfc89506f963 -370, 0x7d14de3b2f55b1f6 -371, 0x82eb79c36cd2a739 -372, 0x41ffe3b75ea0def5 -373, 0x9eba9156470a51d9 -374, 0xd17c00b981db37d1 -375, 0xf688769a75601aa7 -376, 0xbcf738e9e03d571e -377, 0x14712e56df8f919b -378, 0xab14e227d156e310 -379, 0xf53d193e993e351e -380, 0x857fae46bd312141 -381, 0xc2dd71e41b639966 -382, 0x74f8b987a3d00ad1 -383, 0x5bce8526dc527981 -384, 0x94910926c172a379 -385, 0x503c45557688a9d5 -386, 0x244d03834e05807f -387, 0x6e014cbab9c7a31f -388, 0xae544c638530facf -389, 0x9b853aaaf9cbc22d -390, 0xfb42ab7024d060ed -391, 0x74cc3fba0dfd7ff2 -392, 0x24ec9e8f62144ad5 -393, 0x72f082954307bbe7 -394, 0x36feda21bbf67577 -395, 0x3222191611b832f1 -396, 0xd0584e81bcac8b0b -397, 0xdce8d793ef75e771 -398, 0x978824c6c2578fc -399, 0x6e8f77503b3c2ee4 -400, 0xc85d2d86fecf5d03 -401, 0x3d35b4a5d4d723c4 -402, 0xd3987dfd4727fff3 -403, 0xd3cde63fb6a31add -404, 0xf6699e86165bdaeb -405, 0x9d60ba158ec364c4 -406, 0x920c3c18b346bfc9 -407, 0x770fd1fdfbc236ca -408, 0x45998cfc5fc12ddd -409, 0xd74a3454e888834b -410, 0xbf2aa68081a4a28f -411, 0xea41b26a6f1da1b3 -412, 0x5560a2d24b9d5903 -413, 0xe3791f652a228d8b -414, 0x365116d3b5a8520c -415, 0xb1b2bd46528f8969 -416, 0xfcfe14943ef16ae7 -417, 0xf4d43425e8a535dc -418, 0xe6cf10a78782a7e0 -419, 0x9c7ac0de46556e3e -420, 0xc667ae0856eed9ef -421, 0x47dbb532e16f9c7e -422, 0xdf4785a5d89ee82e -423, 0xbd014925ce79dbcf -424, 0xea0d663fb58fa5be -425, 0x51af07d5cc3821fb -426, 0x27a1bdcdc4159a9d -427, 0x520c986c59b1e140 -428, 0x50b73fd9bacd5b39 -429, 0xae5240641f51e4f3 -430, 0x71faecc164ed9681 -431, 0xda95aa35529a7ee -432, 0xe25ba29b853c1c6d -433, 0x9871a925cda53735 -434, 0xde481ad8540e114d -435, 0xa2997f540e8abca0 -436, 0xc9683c5035e28185 -437, 0x1082471b57182bac -438, 0xbd3ecf0f0b788988 -439, 0xf479760776fbb342 -440, 0x3730929200d91f44 -441, 0xc1762d79ae72809c -442, 0xfaa0a4c7b1686cb3 -443, 0xd581e6d55afdafcd -444, 0x6cf57bdfba2dcf6d -445, 0xdef79d9fe6a5bcef -446, 0x13ed376e18132bd3 -447, 0xbe67efd72defa2a -448, 0x5acc176c468966ea -449, 0x8b35b626af139187 -450, 0x446de3fac0d973ac -451, 0xe1d49e06dc890317 -452, 0x817bc3fd21fc09b7 -453, 0xb71c3958a13d5579 -454, 0x8746e010f73d7148 -455, 0x1b61c06009922e83 -456, 0xba17e62e6b092316 -457, 0x1375fa23c4db8290 -458, 0x3f071230f51245a6 -459, 0x51c99a086a61cd13 -460, 0x5f0f2ae78589e1fd -461, 0x604834e114bbbc27 -462, 0x5eb2a7a34814e9a9 -463, 0x77a6907f386bf11e -464, 0x99525de2bd407eeb -465, 0xb818348c57b3b98f -466, 0x25f5f9e702fbe78d -467, 0x8f66669e6f884473 -468, 0x1e47d46e2af4f919 -469, 0xf6a19df846476833 -470, 0xff00c67bcd06621f -471, 0xe3dfe069795d72d8 -472, 0x8affc88b2fea4d73 -473, 0x66df747e5f827168 -474, 0xf368ec338d898a0e -475, 0x9e1f1a739c5984a2 -476, 0x46a1c90e1ca32cbc -477, 0xc261bc305ed8d762 -478, 0x754d7949f7da9e72 -479, 0x4c8fbbb14ef47b17 -480, 0xccbdc67a3848d80d -481, 0x3c25e6f58bae751d -482, 0x7078b163b936d9b6 -483, 0x440e27463c134ecf -484, 0x6c83ee39f324db0f -485, 0x27cf901b22aea535 -486, 0x57262dec79a3f366 -487, 0x91db09f1dbb524fb -488, 0xd7436eefba865df2 -489, 0x16c86b0a275a3f43 -490, 0x689493e6681deaa9 -491, 0x7e1dc536c1a9ac42 -492, 0x1145beac3ac7f5cc -493, 0x3d05e211a104b2b0 -494, 0x4f9e77ced3c52f44 -495, 0x53de1369354add72 -496, 0x1fb60f835f47cdeb -497, 0x6ab36f089e40c106 -498, 0xaabffcb0d3d04c7 -499, 0xaa399686d921bd25 -500, 0x2bf8dd8b6d6fa7f0 -501, 0x1ddbf4e124329613 -502, 0x466a740241466a72 -503, 0x98d7381eb68a761 -504, 0x817691510bc4857a -505, 0x8837622c0171fe33 -506, 0xcba078873179ee16 -507, 0x13adad1ab7b75af4 -508, 0x3bac3f502428840c -509, 0xbeb3cce138de9a91 -510, 0x30ef556e40b5f0b4 -511, 0x19c22abdf3bbb108 -512, 0x977e66ea4ddc7cf -513, 0x9f4a505f223d3bf3 -514, 0x6bc3f42ac79ec87b -515, 0x31e77712158d6c23 -516, 0x6d8de4295a28af0d -517, 0xee1807dbda72adb7 -518, 0xda54140179cd038f -519, 0x715aa5cdac38e062 -520, 0x5a7e55e99a22fa16 -521, 0xf190c36aa8edbe4f -522, 0xccadd93a82c1d044 -523, 0x7070e6d5012c3f15 -524, 0x50a83341a26c1ba5 -525, 0x11bca7cc634142e5 -526, 0x623a0d27867d8b04 -527, 0x75c18acff54fbf6e -528, 0x455ae7d933497a6f -529, 0xf624cf27d030c3d3 -530, 0x7a852716f8758bac -531, 0xe7a497ac1fa2b5b4 -532, 0xf84f097498f57562 -533, 0xc4bb392f87f65943 -534, 0x618e79a5d499fbfb -535, 0xb3c0b61d82b48b8 -536, 0x4750a10815c78ea7 -537, 0x9cf09cca3ddece69 -538, 0x2a69f1c94cc901a2 -539, 0x347a0e446e1ce86d -540, 0xb06f3a5a5ab37bb1 -541, 0x8035bd0713d591db -542, 0x539c9637042c3a1f -543, 0xd7ba4dc6b273cbd7 -544, 0x12f3f99933444f85 -545, 0x4a9517b9783fb9a4 -546, 0x6422b2ea95093bc5 -547, 0x3a5ecff0f996c2a6 -548, 0x31de504efc76a723 -549, 0x7ccb7c5233c21a9f -550, 0xc687d9e6ce4186e8 -551, 0x6e40769d6940376a -552, 0xf51207314f1f7528 -553, 0x67ee3acb190865e3 -554, 0xe08d586270588761 -555, 0xe387fa489af1a75c -556, 0x73414a52d29d8375 -557, 0x671a38191cf2a357 -558, 0xe00fb25b1aa54008 -559, 0x11a0610e22cf549b -560, 0xc90cc865d57c75be -561, 0x90d0863cc15f2b79 -562, 0x8b3e60d32ebcb856 -563, 0xb28cc55af621e04a -564, 0xcf60bd3cb2a5ab1d -565, 0x212cb5d421948f86 -566, 0xee297b96e0a3363f -567, 0x4e9392ff998760d1 -568, 0x61940c8d0105ba3e -569, 0x14ebcbae72a59a16 -570, 0xdf0f39a3d10c02af -571, 0xfc047b2b3c1c549d -572, 0x91718b5b98e3b286 -573, 0x9ea9539b1547d326 -574, 0x7a5a624a89a165e6 -575, 0x145b37dcaa8c4166 -576, 0x63814bbb90e5616c -577, 0xc4bc3ca6c38bb739 -578, 0x853c3a61ddc6626c -579, 0xa7ce8481c433829a -580, 0x8aff426941cc07b -581, 0x2dc3347ca68d8b95 -582, 0xce69f44f349e9917 -583, 0x2fa5cb8aca009b11 -584, 0xf26bb012115d9aca -585, 0xafa01c2f2d27235a -586, 0xabcba21f1b40305e -587, 0xfec20c896c0c1128 -588, 0xc5f7a71ebacadfa0 -589, 0xc8479ad14bab4eef -590, 0xad86ec9a3e7d3dc -591, 0xbbecd65292b915c5 -592, 0xb1f9e28149e67446 -593, 0x708d081c03dad352 -594, 0xaa8a84dbd1de916c -595, 0x9aa3efb29ba9480b -596, 0xd3c63969ff11443e -597, 0x1e9e9ac861315919 -598, 0x4fe227f91e66b41d -599, 0xefc0212d43d253ab -600, 0x98341437727c42d1 -601, 0x5ea85c0fe9008adc -602, 0x7891b15faa808613 -603, 0x32db2d63989aacfd -604, 0xc92f7f28e88fd7bc -605, 0x3513545eb6549475 -606, 0x49abe0082906fbf8 -607, 0xcee1e1a6551e729c -608, 0x38556672b592a28e -609, 0xc3e61409c4ec2d45 -610, 0x96c67ce2995a0fd4 -611, 0x9b9b0cada870293 -612, 0x82d6dd5dada48037 -613, 0xeea4f415299f1706 -614, 0x371107895f152ab3 -615, 0x2f6686159f4396bb -616, 0x61005a2ff3680089 -617, 0x9d2f2cafb595e6b6 -618, 0x4a812a920f011672 -619, 0x317554d3a77385d7 -620, 0x24c01086727eb74b -621, 0xa15ff76d618a3a9e -622, 0x2121bfd983859940 -623, 0x384d11577eea8114 -624, 0xab0f4299f3c44d88 -625, 0x136fd4b07cfa14d9 -626, 0x665fe45cbfaa972a -627, 0x76c5a23398a314e9 -628, 0x5507036357ccda98 -629, 0xd9b8c5ac9dce632b -630, 0x366bc71781da6e27 -631, 0xdd2b2ba1d6be6d15 -632, 0xf33ed0d50ea6f1a6 -633, 0xf05a9b1900174c18 -634, 0x3947e1419e2787cf -635, 0x6c742b1e029637d0 -636, 0x32aba12196a0d2e8 -637, 0x1b94aab2e82e7df -638, 0x68b617db19229d6 -639, 0x6c88a95ac0a33f98 -640, 0xdc9b95fd60c2d23e -641, 0x999e6971d3afc8b3 -642, 0x7071fc6ad8b60129 -643, 0x41a8184ef62485f6 -644, 0xb68e0605c7d5e713 -645, 0x272b961a1d1bbee -646, 0x23f04e76446187b0 -647, 0x999a7a8f6d33f260 -648, 0xdbd6318df4f168d -649, 0x8f5e74c84c40711e -650, 0x8ccc6b04393a19d6 -651, 0xadcd24b782dd8d3d -652, 0x1a966b4f80ef9499 -653, 0xcb6d4f9ff5a280f0 -654, 0x8095ff2b8484018a -655, 0xbfd3389611b8e771 -656, 0x278eb670b7d12d51 -657, 0x31df54ca8d65c20f -658, 0x121c7fb38af6985e -659, 0x84fb94f38fe1d0a -660, 0x15ae8af1a6d48f02 -661, 0x8d51e4a62cba1a28 -662, 0x58e6b6b3ae0f9e42 -663, 0x9365a0a85669cc99 -664, 0xe56e92f65a2106df -665, 0x68fa299c66b428fc -666, 0x55e51bb0b0a832c6 -667, 0x48b565293f9bc494 -668, 0x73d8132b1cbabb57 -669, 0x9178ac3926c36cbc -670, 0xe2f22c7b28ea5e0f -671, 0x6af45322a99afb12 -672, 0x59072fcb486a46f4 -673, 0x166b717b08d3d8e -674, 0xd4e627a2dfacc4ab -675, 0x33dad6f2921dedaa -676, 0x4b13b806834a6704 -677, 0xe5f7971b398ed54d -678, 0x20bfae65e3e6899b -679, 0x881dab45d2b4fc98 -680, 0x6f248126b5b885be -681, 0x7aeb39e986f9deee -682, 0xf819f9574b8c3a03 -683, 0xff3d93ed6bd9781a -684, 0x3a31e2e24a2f6385 -685, 0x7888a88f8944a5e -686, 0x4faee12f5de95537 -687, 0x7f3e4efccdb2ed67 -688, 0x91e0f2fc12593af5 -689, 0xb5be8a4b886a40d3 -690, 0x998e8288ac3a9b1b -691, 0x85c48fc8b1349e7b -692, 0xf03af25222d8fae5 -693, 0x45467e805b242c2e -694, 0xa2350db793dbebdc -695, 0xfebe5b61d2174553 -696, 0xa9a331f02c54ad0b -697, 0xe94e49a0f905aef3 -698, 0xe54b4c812b55e3da -699, 0xdc454114c6bc0278 -700, 0x99c7765ab476baa2 -701, 0xccd9590e47fdff7c -702, 0xfa2bcae7afd6cb71 -703, 0x2c1bf1a433a6f0f7 -704, 0x53882c62ff0aab28 -705, 0x80ac900f844dacc -706, 0x27ba8eb5c4a44d54 -707, 0x78f3dfb072a46004 -708, 0x34e00e6ec629edce -709, 0x5b88d19b552d1fbd -710, 0xe4df375dc79df432 -711, 0x37446312ff79c3b4 -712, 0xb72256900a95fa6d -713, 0x89f3171fbdff0bfc -714, 0xd37885b048687eba -715, 0xbb033213b283b60e -716, 0xcf10b523ee769030 -717, 0xbf8070b6cfd7bafb -718, 0xb7194da81fd1763b -719, 0xbfc303de88e68d24 -720, 0xb949c7a5aea8a072 -721, 0x844216e7bae90455 -722, 0xf1e7f20840049a33 -723, 0x96e3263ad0cae794 -724, 0x10772d51f6e9ba49 -725, 0xcea24fccae9d23b3 -726, 0xefd378add9dde040 -727, 0xba0c7c5275805976 -728, 0x2e2a04608f64fa8c -729, 0xafb42ec43aa0fa7 -730, 0x30444b84241ac465 -731, 0x19ef384bac4493ab -732, 0xfd1ac615d3ba5ab9 -733, 0x6cc781ba38643aff -734, 0x30ff27ebed875cfd -735, 0xee1a261aca97ae62 -736, 0xc5a92715202bc940 -737, 0x9e6ec76f93c657ff -738, 0x9b9fd55f55191ca5 -739, 0x654b13af008d8f03 -740, 0x1b7f030d9bd0719f -741, 0x6d622e277550cb7f -742, 0x3f8ee6b8830d0538 -743, 0x475462bcd0de190f -744, 0x21380e8a513bdbcd -745, 0x629bf3771b1bd7a4 -746, 0x3b5fd0b62c353709 -747, 0xf95634006ec3867e -748, 0x1be8bb584a6653c2 -749, 0x2e2d3cfa85320ce8 -750, 0x5b904b692252d11d -751, 0x4bfd76631d527990 -752, 0xc019571ca2bec4a0 -753, 0xf2eb730cea4cd751 -754, 0xd4571d709530191a -755, 0x3b5bd947061f5a7d -756, 0x56e2322cd2d1d1c0 -757, 0xa8830a5f62019f83 -758, 0x901d130c1b873cf3 -759, 0xb5dd29b363c61299 -760, 0xbb710bec3a17b26d -761, 0xc0c464daca0f2328 -762, 0x4dc8055df02650f5 -763, 0x3d3cd9bbe8b957af -764, 0xdb79612c2635b828 -765, 0xe25b3a8ad8fa3040 -766, 0xd5875c563cbf236b -767, 0x46861c1c3849c9bc -768, 0xf84bf1a2814dff43 -769, 0x6d8103902e0ad5e6 -770, 0x99f51c9be8af79e5 -771, 0xb0bfa8540ff94a96 -772, 0xaf45109a4e06f7d0 -773, 0x281df3e55aea9bfc -774, 0x6a1155ca8aa40e60 -775, 0x754d32c5de1f5da -776, 0xce1eafb1c6ca916f -777, 0xc4f2185fa8577bd1 -778, 0x4a188e9bdb5501d9 -779, 0xbb14107e99bd5550 -780, 0xf0381d8425ec2962 -781, 0x213dbfffc16ec4f6 -782, 0x7a999c5a28ea65bc -783, 0x23758c2aba7709ff -784, 0xea7e4bb205e93b44 -785, 0x9c5a31e53911c658 -786, 0x7f04d0bbdc689ddc -787, 0xe3ed89ab8d78dcb3 -788, 0x73c38bfb43986210 -789, 0x740c7d787eb8e158 -790, 0x5284fafdfb3fb9ec -791, 0x2e91a58ac1fb1409 -792, 0xb94a600bf0a09af3 -793, 0x533ea4dbe07d81dd -794, 0x48c3f1a736b3c5fd -795, 0x56ae3499fa8720ce -796, 0x526f2def663ca818 -797, 0x2f085759c65665c4 -798, 0xf715f042c69e0db4 -799, 0x110889c399231e60 -800, 0x64584a244866f3a0 -801, 0xf02ec101a39405d3 -802, 0xe73cd5e9a7f17283 -803, 0xfea64869e7028234 -804, 0x97559974ad877891 -805, 0xc8695aba1dc9f2e5 -806, 0x7b62b76ffc2264ec -807, 0xf5e1df172ec5ccd -808, 0xafaeb68765e443bd -809, 0xd3870eb2e8337623 -810, 0x4f944d684138fb39 -811, 0x6977c575038916ad -812, 0x8ada1a225df95a56 -813, 0xe4044c6c58d15e54 -814, 0x4e5121366681cf2 -815, 0xcf8640b079357b0d -816, 0xcd5b157d44106fa3 -817, 0x9d7a5481279e25a1 -818, 0xe10e9db41fb4b34f -819, 0x1052607be1eadff9 -820, 0x3403d67232fe2265 -821, 0xac9358f498c34afc -822, 0x820172da0dc39c9 -823, 0xe186e91a3b826b6a -824, 0x1a838e2a40284445 -825, 0x1870b617ebd7bce6 -826, 0xcb7cba4424be1ed7 -827, 0x6a2e56e40fdf9041 -828, 0xace93bbe108f97ee -829, 0xfeb9bc74ac41ca08 -830, 0x8cb2d05b0f6a1f51 -831, 0x73792309f3fac0a9 -832, 0x2507343d431308ca -833, 0xd0ea1197be615412 -834, 0xb1870812f1d2fa94 -835, 0x6d067b6935dcd23e -836, 0xaf161014e5492c31 -837, 0xd4be0dce97064be4 -838, 0xf8edfe3fc75c20f1 -839, 0x894751dc442d2d9c -840, 0xb4a95f6a6663456c -841, 0x74e93162e2d805db -842, 0x784bc5f3a7a2f645 -843, 0xd234d7c5b0582ea9 -844, 0x491f28d0ab6cb97c -845, 0xa79419e5cf4336c3 -846, 0x66b00141978c849 -847, 0xa7ddbd64698d563f -848, 0xefc33a4a5d97d4b2 -849, 0x95075514a65aebdc -850, 0x40eca5b3e28cd25e -851, 0x90ec7d00e9c9e35d -852, 0x63e84104d5af417a -853, 0xdaca0ea32df5744 -854, 0x7ed54f2587795881 -855, 0x5a73931760af4ee0 -856, 0x857d1a185a3081ec -857, 0x6eac2aabe67fb463 -858, 0xd1f86155d8bfc55f -859, 0x6d56398f3e7877ef -860, 0x7642f61dfc62bc17 -861, 0x1d76b12843246ffa -862, 0xde7817809b8a31d0 -863, 0xbcca9cd091198f9d -864, 0xf71ca566dddcdfd4 -865, 0xea4386ee8b61d082 -866, 0xe351729d6010bac4 -867, 0xfd685d8a49910dd6 -868, 0xa7a20ea6c686bd3 -869, 0x1cdaf82f4dbd5536 -870, 0xa3da1d1e77dda3e0 -871, 0x4f723b3818ff8b2a -872, 0x1290669eca152469 -873, 0xb54158b52d30651b -874, 0xc06b74f2c7f0fee -875, 0x7d5840bcbf702379 -876, 0x19fa4c1254a82ed -877, 0xcf5ce090ad0b38ea -878, 0xd4edd6ac9437e16d -879, 0xc6ebf25eb623b426 -880, 0xd2b6dbdf00d8fea2 -881, 0x949cf98391cc59e1 -882, 0x380a0c7d0356f7b3 -883, 0x8ffefe32465473bf -884, 0x637b6542d27c861e -885, 0x347d12ffc664ecd9 -886, 0xea66e3a0c75a6b37 -887, 0xc3aff6f34fb537a1 -888, 0x67bdf3579959bf49 -889, 0xa17a348e3a74b723 -890, 0x93c9ef26ddadd569 -891, 0x483909059a5ac0b2 -892, 0x26ec9074b56d5a0d -893, 0x6216000d9a48403a -894, 0x79b43909eab1ec05 -895, 0xe4a8e8d03649e0de -896, 0x1435d666f3ccdc08 -897, 0xb9e22ba902650a0e -898, 0x44dffcccc68b41f8 -899, 0x23e60dcc7a559a17 -900, 0x6fd1735eacd81266 -901, 0xf6bda0745ea20c8e -902, 0x85efcaefe271e07c -903, 0x9be996ee931cef42 -904, 0xe78b41c158611d64 -905, 0xd6201df605839830 -906, 0x702e8e47d2769fd3 -907, 0xb8dcf70e18cf14c -908, 0xac2690bab1bf5c17 -909, 0x92b166b71205d696 -910, 0xb0e73c795fc6df28 -911, 0x4bf2322c8b6b6f0d -912, 0xa842fbe67918cea0 -913, 0xb01a8675d9294e54 -914, 0xfbe3c94f03ca5af2 -915, 0x51a5c089600c441f -916, 0x60f0fd7512d85ded -917, 0xef3113d3bc2cadb0 -918, 0xe1ea128ade300d60 -919, 0xde413b7f8d92d746 -920, 0xfc32c6d43f47c5d8 -921, 0x69d551d8c2b54c68 -922, 0xb9bc68c175777943 -923, 0xb9c79c687f0dae90 -924, 0xd799421ef883c06e -925, 0xbff553ca95a29a3e -926, 0xfc9ffac46bd0aca1 -927, 0x4f6c3a30c80c3e5a -928, 0x8b7245bc6dc4a0a -929, 0xaf4e191a4575ff60 -930, 0x41218c4a76b90f0b -931, 0x986052aa51b8e89b -932, 0x284b464ed5622f9 -933, 0xba6bded912626b40 -934, 0x43cad3ed7443cb5c -935, 0x21641fa95725f328 -936, 0x6d99d6d09d755822 -937, 0x8246dfa2d4838492 -938, 0xd2ee70b9056f4726 -939, 0x87db515a786fbb8b -940, 0x7c63e4c1d7786e7d -941, 0xd1a9d548f10b3e88 -942, 0xa00856475f3b74c9 -943, 0x7f1964ce67148bf4 -944, 0x446650ec71e6018c -945, 0xb1805ca07d1b6345 -946, 0x869c0a1625b7271b -947, 0x79d6da06ce2ecfe2 -948, 0xec7b3cafc5e3c85f -949, 0x1745ce21e39f2c3d -950, 0xd9a0a7af6ee97825 -951, 0x680e0e52a6e11d5c -952, 0xd86b3f344ff7f4cd -953, 0xab56af117c840b9c -954, 0x5c5404c7e333a10e -955, 0x4f1eb462f35d990d -956, 0xf857605a5644458e -957, 0x3bb87cdf09262f86 -958, 0xd57295baf6da64b -959, 0xb5993f48472f2894 -960, 0x7d1a501608c060b2 -961, 0x45fabe2d0e54adf0 -962, 0xbb41c3806afb4efe -963, 0xbfbc506049424c8 -964, 0xb7dd6b67f2203344 -965, 0x389ce52eff883b81 -966, 0xe259c55c0cf6d000 -967, 0x70fb3e3824f7d213 -968, 0x9f36d5599ed55f4b -969, 0xd14cf6f12f83c4f7 -970, 0x570a09d56aaa0b66 -971, 0x8accafd527f4598 -972, 0xa42d64c62175adfd -973, 0xddb9c6a87b6e1558 -974, 0xd80b6c69fa1cde2a -975, 0x44ebaac10082207b -976, 0xf99be8889552fa1a -977, 0x38253cd4b38b5dc5 -978, 0x85356c8b02675791 -979, 0xbf91677b2ecdcf55 -980, 0x2316cb85e93f366e -981, 0x9abf35954db6b053 -982, 0xf49f7425e086b45a -983, 0x8f5b625e074afde2 -984, 0xe0d614559791b080 -985, 0xbf7b866afab2a525 -986, 0xde89d7e1641a6412 -987, 0x1d10687d8ae5b86f -988, 0x1f034caa0e904cbd -989, 0x2086357aec8a7a2c -990, 0x22dc476b80c56e1e -991, 0xbef5a73cc0e3a493 -992, 0xddfa3829b26ed797 -993, 0x8917a87ec3d4dc78 -994, 0xfeabe390628c365e -995, 0x581b0c4f6fb2d642 -996, 0x1ef8c590adbf5b9a -997, 0x4d8e13aac0cce879 -998, 0xfe38f71e5977fad0 -999, 0x1f83a32d4adfd2ed diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/philox-testset-2.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/philox-testset-2.csv deleted file mode 100644 index 69d24c3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/philox-testset-2.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0x0 -0, 0x399e5b222b82fa9 -1, 0x41fd08c1f00f3bc5 -2, 0x78b8824162ee4d04 -3, 0x176747919e02739d -4, 0xfaa88f002a8d3596 -5, 0x418eb6f592e6c227 -6, 0xef83020b8344dd45 -7, 0x30a74a1a6eaa064b -8, 0x93d43bf97a490c3 -9, 0xe4ba28b442194cc -10, 0xc829083a168a8656 -11, 0x73f45d50f8e22849 -12, 0xf912db57352824cc -13, 0xf524216927b12ada -14, 0x22b7697473b1dfda -15, 0x311e2a936414b39f -16, 0xb905abfdcc425be6 -17, 0x4b14630d031eac9c -18, 0x1cf0c4ae01222bc8 -19, 0xa6c33efc6e82ef3 -20, 0x43b3576937ba0948 -21, 0x1e483d17cdde108a -22, 0x6722784cac11ac88 -23, 0xee87569a48fc45d7 -24, 0xb821dcbe74d18661 -25, 0xa5d1876ef3da1a81 -26, 0xe4121c2af72a483 -27, 0x2d747e355a52cf43 -28, 0x609059957bd03725 -29, 0xc3327244b49e16c5 -30, 0xb5ae6cb000dde769 -31, 0x774315003209017 -32, 0xa2013397ba8db605 -33, 0x73b228945dbcd957 -34, 0x801af7190375d3c0 -35, 0xae6dca29f24c9c67 -36, 0xd1cc0bcb1ca26249 -37, 0x1defa62a5bd853be -38, 0x67c2f5557fa89462 -39, 0xf1729b58122fab02 -40, 0xb67eb71949ec6c42 -41, 0x5456366ec1f8f7d7 -42, 0x44492b32eb7966f5 -43, 0xa801804159f175f1 -44, 0x5a416f23cac70d84 -45, 0x186f55293302303d -46, 0x7339d5d7b6a43639 -47, 0xfc6df38d6a566121 -48, 0xed2fe018f150b39e -49, 0x508e0b04a781fa1b -50, 0x8bee9d50f32eaf50 -51, 0x9870015d37e63cc -52, 0x93c6b12309c14f2d -53, 0xb571cf798abe93ff -54, 0x85c35a297a88ae6e -55, 0x9b1b79afe497a2ae -56, 0x1ca02e5b95d96b8d -57, 0x5bb695a666c0a94a -58, 0x4e3caf9bbab0b208 -59, 0x44a44be1a89f2dc1 -60, 0x4ff37c33445758d1 -61, 0xd0e02875322f35da -62, 0xfd449a91fb92646b -63, 0xbe0b49096b95db4d -64, 0xffa3647cad13ef5d -65, 0x75c127a61acd10c8 -66, 0xd65f697756f5f98e -67, 0x3ced84be93d94434 -68, 0x4da3095c2fc46d68 -69, 0x67564e2a771ee9ac -70, 0x36944775180644a9 -71, 0xf458db1c177cdb60 -72, 0x5b58406dcd034c8 -73, 0x793301a3fdab2a73 -74, 0x1c2a1a16d6db6128 -75, 0xc2dacd4ddddbe56c -76, 0x2e7d15be2301a111 -77, 0xd4f4a6341b3bcd18 -78, 0x3622996bbe6a9e3b -79, 0xaf29aa9a7d6d47da -80, 0x6d7dbb74a4cd68ae -81, 0xc260a17e0f39f841 -82, 0xdee0170f2af66f0d -83, 0xf84ae780d7b5a06e -84, 0x8326247b73f43c3a -85, 0xd44eef44b4f98b84 -86, 0x3d10aee62ec895e3 -87, 0x4f23fef01bf703b3 -88, 0xf8e50aa57d888df6 -89, 0x7da67411e3bef261 -90, 0x1d00f2769b2f96d7 -91, 0x7ef9a15b7444b84e -92, 0xcfa16436cc2b7e21 -93, 0x29ab8cfac00460ff -94, 0x23613de8608b0e70 -95, 0xb1aa0980625798a8 -96, 0xb9256fd29db7df99 -97, 0xdacf311bf3e7fa18 -98, 0xa013c8f9fada20d8 -99, 0xaf5fd4fe8230fe3e -100, 0xd3d59ca55102bc5c -101, 0x9d08e2aa5242767f -102, 0x40278fe131e83b53 -103, 0x56397d03c7c14c98 -104, 0xe874b77b119359b3 -105, 0x926a1ba4304ab19f -106, 0x1e115d5aa695a91d -107, 0xc6a459df441f2fe3 -108, 0x2ca842bc1b0b3c6a -109, 0x24c804cf8e5eed16 -110, 0x7ca00fc4a4c3ebd3 -111, 0x546af7cecc4a4ba6 -112, 0x8faae1fa18fd6e3 -113, 0x40420b0089641a6a -114, 0x88175a35d9abcb83 -115, 0xf7d746d1b8b1357c -116, 0x7dae771a651be970 -117, 0x2f6485247ee4df84 -118, 0x6883702fab2d8ec5 -119, 0xeb7eea829a67f9a6 -120, 0x60d5880b485562ed -121, 0x7d4ca3d7e41a4e7e -122, 0xbb7fef961ab8de18 -123, 0x3b92452fb810c164 -124, 0x5f4b4755348b338 -125, 0xca45a715a7539806 -126, 0xc33efd9da5399dd -127, 0x593d665a51d4aedd -128, 0x75d6b8636563036b -129, 0x7b57caa55e262082 -130, 0x4ede7427969e0dd5 -131, 0xc3f19b6f78ea00b -132, 0xeea7bab9be2181ea -133, 0x652c45fe9c420c04 -134, 0x14ba9e3d175670ee -135, 0xd2ad156ba6490474 -136, 0x4d65ae41065f614 -137, 0x6ff911c8afa28eb1 -138, 0xedc2b33588f3cb68 -139, 0x437c8bc324666a2f -140, 0x828cee25457a3f0 -141, 0x530c986091f31b9b -142, 0x2f34671e8326ade7 -143, 0x4f686a8f4d77f6da -144, 0xa4c1987083498895 -145, 0xbce5a88b672b0fb1 -146, 0x8476115a9e6a00cc -147, 0x16de18a55dd2c238 -148, 0xdf38cf4c416232bc -149, 0x2cb837924e7559f3 -150, 0xfad4727484e982ed -151, 0x32a55d4b7801e4f -152, 0x8b9ef96804bd10a5 -153, 0xa1fd422c9b5cf2a9 -154, 0xf46ddb122eb7e442 -155, 0x6e3842547afa3b33 -156, 0x863dee1c34afe5c4 -157, 0x6a43a1935b6db171 -158, 0x1060a5c2f8145821 -159, 0xf783ec9ed34c4607 -160, 0x1da4a86bf5f8c0b0 -161, 0x4c7714041ba12af8 -162, 0x580da7010be2f192 -163, 0xad682fe795a7ea7a -164, 0x6687b6cb88a9ed2c -165, 0x3c8d4b175517cd18 -166, 0xe9247c3a524a6b6b -167, 0x337ca9cfaa02658 -168, 0xed95399481c6feec -169, 0x58726a088e606062 -170, 0xfe7588a5b4ee342a -171, 0xee434c7ed146fdee -172, 0xe2ade8b60fdc4ba5 -173, 0xd57e4c155de4eaab -174, 0xdefeae12de1137cb -175, 0xb7a276a241316ac1 -176, 0xeb838b1b1df4ca15 -177, 0x6f78965edea32f6f -178, 0x18bebd264d7a5d53 -179, 0x3641c691d77005ec -180, 0xbe70ed7efea8c24c -181, 0x33047fa8d03ca560 -182, 0x3bed0d2221ff0f87 -183, 0x23083a6ffbcf38a2 -184, 0xc23eb827073d3fa5 -185, 0xc873bb3415e9fb9b -186, 0xa4645179e54147fe -187, 0x2c72fb443f66e207 -188, 0x98084915dd89d8f4 -189, 0x88baa2de12c99037 -190, 0x85c74ab238cb795f -191, 0xe122186469ea3a26 -192, 0x4c3bba99b3249292 -193, 0x85d6845d9a015234 -194, 0x147ddd69c13e6a31 -195, 0x255f4d678c9a570b -196, 0x2d7c0c410bf962b4 -197, 0x58eb7649e0aa16ca -198, 0x9d240bf662fe0783 -199, 0x5f74f6fa32d293cc -200, 0x4928e52f0f79d9b9 -201, 0xe61c2b87146b706d -202, 0xcfcd90d100cf5431 -203, 0xf15ea8138e6aa178 -204, 0x6ab8287024f9a819 -205, 0xed8942593db74e01 -206, 0xefc00e4ec2ae36dd -207, 0xc21429fb9387f334 -208, 0xf9a3389e285a9bce -209, 0xacdee8c43aae49b3 -210, 0xefc382f02ad55c25 -211, 0x1153b50e8d406b72 -212, 0xb00d39ebcc2f89d8 -213, 0xde62f0b9831c8850 -214, 0xc076994662eef6c7 -215, 0x66f08f4752f1e3ef -216, 0x283b90619796249a -217, 0x4e4869bc4227499e -218, 0xb45ad78a49efd7ed -219, 0xffe19aa77abf5f4b -220, 0xfce11a0daf913aef -221, 0x7e4e64450d5cdceb -222, 0xe9621997cfd62762 -223, 0x4d2c9e156868081 -224, 0x4e2d96eb7cc9a08 -225, 0xda74849bba6e3bd3 -226, 0x6f4621da935e7fde -227, 0xb94b914aa0497259 -228, 0xd50d03e8b8db1563 -229, 0x1a45c1ce5dca422e -230, 0xc8d30d33276f843f -231, 0xb57245774e4176b4 -232, 0x8d36342c05abbbb1 -233, 0x3591ad893ecf9e78 -234, 0x62f4717239ee0ac8 -235, 0x9b71148a1a1d4200 -236, 0x65f8e0f56dd94463 -237, 0x453b1fcfd4fac8c2 -238, 0x4c25e48e54a55865 -239, 0xa866baa05112ace2 -240, 0x7741d3c69c6e79c5 -241, 0x7deb375e8f4f7a8a -242, 0xc242087ede42abd8 -243, 0x2fa9d1d488750c4b -244, 0xe8940137a935d3d3 -245, 0x1dab4918ca24b2f2 -246, 0xe2368c782168fe3e -247, 0x6e8b2d1d73695909 -248, 0x70455ebea268b33e -249, 0x656a919202e28da1 -250, 0x5a5a8935647da999 -251, 0x428c6f77e118c13c -252, 0xa87aee2b675bb083 -253, 0x3873a6412b239969 -254, 0x5f72c1e91cb8a2ee -255, 0xa25af80a1beb5679 -256, 0x1af65d27c7b4abc3 -257, 0x133437060670e067 -258, 0xb1990fa39a97d32e -259, 0x724adc89ae10ed17 -260, 0x3f682a3f2363a240 -261, 0x29198f8dbd343499 -262, 0xdfaeeaa42bc51105 -263, 0x5baff3901b9480c2 -264, 0x3f760a67043e77f5 -265, 0x610fa7aa355a43ba -266, 0x394856ac09c4f7a7 -267, 0x1d9229d058aee82e -268, 0x19c674804c41aeec -269, 0x74cf12372012f4aa -270, 0xa5d89b353fa2f6ca -271, 0x697e4f672ac363dd -272, 0xde6f55ba73df5af9 -273, 0x679cf537510bd68f -274, 0x3dc916114ae9ef7e -275, 0xd7e31a66ec2ee7ba -276, 0xc21bebb968728495 -277, 0xc5e0781414e2adfd -278, 0x71147b5412ddd4bd -279, 0x3b864b410625cca9 -280, 0x433d67c0036cdc6 -281, 0x48083afa0ae20b1b -282, 0x2d80beecd64ac4e8 -283, 0x2a753c27c3a3ee3e -284, 0xb2c5e6afd1fe051a -285, 0xea677930cd66c46b -286, 0x4c3960932f92810a -287, 0xf1b367a9e527eaba -288, 0xb7d92a8a9a69a98e -289, 0x9f9ad3210bd6b453 -290, 0x817f2889db2dcbd8 -291, 0x4270a665ac15813c -292, 0x90b85353bd2be4dd -293, 0x10c0460f7b2d68d -294, 0x11cef32b94f947f5 -295, 0x3cf29ed8e7d477e8 -296, 0x793aaa9bd50599ef -297, 0xbac15d1190014aad -298, 0x987944ae80b5cb13 -299, 0x460aa51f8d57c484 -300, 0xc77df0385f97c2d3 -301, 0x92e743b7293a3822 -302, 0xbc3458bcfbcbb8c0 -303, 0xe277bcf3d04b4ed7 -304, 0xa537ae5cf1c9a31c -305, 0x95eb00d30bd8cfb2 -306, 0x6376361c24e4f2dd -307, 0x374477fe87b9ea8e -308, 0x8210f1a9a039902e -309, 0xe7628f7031321f68 -310, 0x8b8e9c0888fc1d3d -311, 0x306be461fdc9e0ed -312, 0x510009372f9b56f5 -313, 0xa6e6fa486b7a027a -314, 0x9d3f002025203b5a -315, 0x7a46e0e81ecbef86 -316, 0x41e280c611d04df0 -317, 0xedcec10418a99e8a -318, 0x5c27b6327e0b9dbd -319, 0xa81ed2035b509f07 -320, 0x3581e855983a4cc4 -321, 0x4744594b25e9809d -322, 0xc737ac7c27fbd0ed -323, 0x1b523a307045433a -324, 0x8b4ce9171076f1d9 -325, 0x2db02d817cd5eec0 -326, 0x24a1f1229af50288 -327, 0x5550c0dcf583ff16 -328, 0x3587baaa122ec422 -329, 0xf9d3dc894229e510 -330, 0xf3100430d5cf8e87 -331, 0xc31af79862f8e2fb -332, 0xd20582063b9f3537 -333, 0xac5e90ac95fcc7ad -334, 0x107c4c704d5109d4 -335, 0xebc8628906dbfd70 -336, 0x215242776da8c531 -337, 0xa98002f1dcf08b51 -338, 0xbc3bdc07f3b09718 -339, 0x238677062495b512 -340, 0x53b4796f2a3c49e8 -341, 0x6424286467e22f0e -342, 0x14d0952a11a71bac -343, 0x2f97098149b82514 -344, 0x3777f2fdc425ad2 -345, 0xa32f2382938876d4 -346, 0xda8a39a021f20ae3 -347, 0x364361ef0a6ac32c -348, 0x4413eede008ff05a -349, 0x8dda8ace851aa327 -350, 0x4303cabbdcecd1ee -351, 0x2e69f06d74aa549f -352, 0x4797079cd4d9275c -353, 0xc7b1890917e98307 -354, 0x34031b0e822a4b4c -355, 0xfc79f76b566303ea -356, 0x77014adbe255a930 -357, 0xab6c43dd162f3be5 -358, 0xa430041f3463f6b9 -359, 0x5c191a32ada3f84a -360, 0xe8674a0781645a31 -361, 0x3a11cb667b8d0916 -362, 0xaedc73e80c39fd8a -363, 0xfde12c1b42328765 -364, 0x97abb7dcccdc1a0b -365, 0x52475c14d2167bc8 -366, 0x540e8811196d5aff -367, 0xa867e4ccdb2b4b77 -368, 0x2be04af61e5bcfb9 -369, 0x81b645102bfc5dfd -370, 0x96a52c9a66c6450f -371, 0x632ec2d136889234 -372, 0x4ed530c0b36a6c25 -373, 0x6f4851225546b75 -374, 0x2c065d6ba46a1144 -375, 0xf8a3613ff416551d -376, 0xb5f0fd60e9c971a9 -377, 0x339011a03bb4be65 -378, 0x9439f72b6995ded6 -379, 0xc1b03f3ef3b2292d -380, 0xad12fd221daab3ae -381, 0xf615b770f2cf996f -382, 0x269d0fdcb764172 -383, 0x67837025e8039256 -384, 0x6402831fc823fafa -385, 0x22854146a4abb964 -386, 0x7b5ad9b5a1bad7a8 -387, 0x67170e7beb6ac935 -388, 0xfc2d1e8e24adfaaa -389, 0x7ded4395345ff40d -390, 0x418981760a80dd07 -391, 0xc03bef38022c1d2 -392, 0x3a11850b26eade29 -393, 0xaa56d02c7175c5f4 -394, 0xd83b7917b9bfbff5 -395, 0x3c1df2f8fa6fced3 -396, 0xf3d6e2999c0bb760 -397, 0xc66d683a59a950e3 -398, 0x8e3972a9d73ffabf -399, 0x97720a0443edffd9 -400, 0xa85f5d2fe198444a -401, 0xfc5f0458e1b0de5e -402, 0xe3973f03df632b87 -403, 0xe151073c84c594b3 -404, 0x68eb4e22e7ff8ecf -405, 0x274f36eaed7cae27 -406, 0x3b87b1eb60896b13 -407, 0xbe0b2f831442d70a -408, 0x2782ed7a48a1b328 -409, 0xb3619d890310f704 -410, 0xb03926b11b55921a -411, 0xdb46fc44aa6a0ce4 -412, 0x4b063e2ef2e9453a -413, 0xe1584f1aeec60fb5 -414, 0x7092bd6a879c5a49 -415, 0xb84e1e7c7d52b0e6 -416, 0x29d09ca48db64dfb -417, 0x8f6c4a402066e905 -418, 0x77390795eabc36b -419, 0xcc2dc2e4141cc69f -420, 0x2727f83beb9e3c7c -421, 0x1b29868619331de0 -422, 0xd38c571e192c246f -423, 0x535327479fe37b6f -424, 0xaff9ce5758617eb3 -425, 0x5658539e9288a4e4 -426, 0x8df91d87126c4c6d -427, 0xe931cf8fdba6e255 -428, 0x815dfdf25fbee9e8 -429, 0x5c61f4c7cba91697 -430, 0xdd5f5512fe2313a1 -431, 0x499dd918a92a53cd -432, 0xa7e969d007c97dfd -433, 0xb8d39c6fc81ac0bb -434, 0x1d646983def5746c -435, 0x44d4b3b17432a60c -436, 0x65664232a14db1e3 -437, 0xda8fae6433e7500b -438, 0xbe51b94ff2a3fe94 -439, 0xe9b1bd9a9098ef9f -440, 0xfe47d54176297ef5 -441, 0xb8ab99bc03bb7135 -442, 0xcfad97f608565b38 -443, 0xf05da71f6760d9c1 -444, 0xef8da40a7c70e7b -445, 0xe0465d58dbd5d138 -446, 0xb54a2d70eb1a938 -447, 0xfdd50c905958f2d8 -448, 0x3c41933c90a57d43 -449, 0x678f6d894c6ad0bb -450, 0x403e8f4582274e8 -451, 0x5cbbe975668df6b0 -452, 0x297e6520a7902f03 -453, 0x8f6dded33cd1efd7 -454, 0x8e903c97be8d783b -455, 0x10bd015577e30f77 -456, 0x3fcd69d1c36eab0c -457, 0xb45989f3ca198d3 -458, 0x507655ce02b491a9 -459, 0xa92cf99bb78602ce -460, 0xebfb82055fbc2f0f -461, 0x3334256279289b7a -462, 0xc19d2a0f740ee0ac -463, 0x8bb070dea3934905 -464, 0xa4ab57d3a8d1b3eb -465, 0xfee1b09bcacf7ff4 -466, 0xccc7fb41ceec41fa -467, 0xd4da49094eb5a74d -468, 0xed5c693770af02ed -469, 0x369dabc9bbfaa8e4 -470, 0x7eab9f360d054199 -471, 0xe36dbebf5ee94076 -472, 0xd30840e499b23d7 -473, 0x8678e6cb545015ff -474, 0x3a47932ca0b336e -475, 0xeb7c742b6e93d6fe -476, 0x1404ea51fe5a62a9 -477, 0xa72cd49db978e288 -478, 0xfd7bada020173dcf -479, 0xc9e74fc7abe50054 -480, 0x93197847bb66808d -481, 0x25fd5f053dce5698 -482, 0xe198a9b18cc21f4 -483, 0x5cc27b1689452d5d -484, 0x8b3657af955a98dc -485, 0xc17f7584f54aa1c0 -486, 0xe821b088246b1427 -487, 0x32b5a9f6b45b6fa0 -488, 0x2aef7c315c2bae0c -489, 0xe1af8129846b705a -490, 0x4123b4c091b34614 -491, 0x6999d61ec341c073 -492, 0x14b9a8fcf86831ea -493, 0xfd4cff6548f46c9f -494, 0x350c3b7e6cc8d7d6 -495, 0x202a5047fecafcd5 -496, 0xa82509fe496bb57d -497, 0x835e4b2608b575fe -498, 0xf3abe3da919f54ec -499, 0x8705a21e2c9b8796 -500, 0xfd02d1427005c314 -501, 0xa38458faa637f49b -502, 0x61622f2360e7622a -503, 0xe89335a773c2963b -504, 0x481264b659b0e0d0 -505, 0x1e82ae94ebf62f15 -506, 0x8ea7812de49209d4 -507, 0xff963d764680584 -508, 0x418a68bef717f4af -509, 0x581f0e7621a8ab91 -510, 0x840337e9a0ec4150 -511, 0x951ef61b344be505 -512, 0xc8b1b899feb61ec2 -513, 0x8b78ca13c56f6ed9 -514, 0x3d2fd793715a946f -515, 0xf1c04fabcd0f4084 -516, 0x92b602614a9a9fcc -517, 0x7991bd7a94a65be7 -518, 0x5dead10b06cad2d7 -519, 0xda7719b33f722f06 -520, 0x9d87a722b7bff71e -521, 0xb038e479071409e9 -522, 0xf4e8bbec48054775 -523, 0x4fec2cd7a28a88ea -524, 0x839e28526aad3e56 -525, 0xd37ec57852a98bf0 -526, 0xdef2cbbe00f3a02d -527, 0x1aecfe01a9e4d801 -528, 0x59018d3c8beaf067 -529, 0x892753e6ac8bf3cd -530, 0xefdd3437023d2d1c -531, 0x447bfbd148c8cb88 -532, 0x282380221bd442b8 -533, 0xfce8658d1347384a -534, 0x60b211a7ec6bfa8 -535, 0xd21729cfcc692974 -536, 0x162087ecd5038a47 -537, 0x2b17000c4bce39d2 -538, 0x3a1f75ff6adcdce0 -539, 0x721a411d312f1a2c -540, 0x9c13b6133f66934d -541, 0xaa975d14978980e5 -542, 0x9403dbd4754203fa -543, 0x588c15762fdd643 -544, 0xdd1290f8d0ada73a -545, 0xd9b77380936103f4 -546, 0xb2e2047a356eb829 -547, 0x7019e5e7f76f7a47 -548, 0x3c29a461f62b001d -549, 0xa07dc6cfab59c116 -550, 0x9b97e278433f8eb -551, 0x6affc714e7236588 -552, 0x36170aeb32911a73 -553, 0x4a665104d364a789 -554, 0x4be01464ec276c9c -555, 0x71bb10271a8b4ecf -556, 0xbf62e1d068bc018 -557, 0xc9ada5db2cbbb413 -558, 0x2bded75e726650e5 -559, 0x33d5a7af2f34385d -560, 0x8179c46661d85657 -561, 0x324ebcfd29267359 -562, 0xac4c9311dc9f9110 -563, 0xc14bb6a52f9f9c0 -564, 0xc430abe15e7fb9db -565, 0xf1cce5c14df91c38 -566, 0x651e3efa2c0750d3 -567, 0x38a33604a8be5c75 -568, 0x7aaf77fe7ff56a49 -569, 0xc0d1cc56bbf27706 -570, 0x887aa47324e156c6 -571, 0x12547c004b085e8d -572, 0xd86a8d6fbbbfd011 -573, 0x57c860188c92d7b4 -574, 0xcd5d3843d361b8ca -575, 0x8f586ef05a9cb3ef -576, 0x174456e1ba6267d5 -577, 0xf5dc302c62fe583c -578, 0xa349442fabcdb71 -579, 0xe5123c1a8b6fd08e -580, 0x80681552aa318593 -581, 0xb295396deaef1e31 -582, 0xabb626e0b900e32b -583, 0xf024db8d3f19c15e -584, 0x1d04bb9548e2fb6c -585, 0xd8ed2b2214936c2b -586, 0x618ca1e430a52bc9 -587, 0xccbca44a6088136b -588, 0xd0481855c8b9ccbe -589, 0x3c92a2fade28bdf7 -590, 0x855e9fefc38c0816 -591, 0x1269bbfe55a7b27c -592, 0x1d6c853d83726d43 -593, 0xc8655511cc7fcafc -594, 0x301503eb125a9b0e -595, 0xb3108e4532016b11 -596, 0xbb7ab6245da9cb3d -597, 0x18004c49116d85eb -598, 0x3480849c20f61129 -599, 0xe28f45157463937b -600, 0x8e85e61060f2ce1 -601, 0x1673da4ec589ba5e -602, 0x74b9a6bd1b194712 -603, 0xed39e147fa8b7601 -604, 0x28ce54019102ca77 -605, 0x42e0347f6d7a2f30 -606, 0xb6a908d1c4814731 -607, 0x16c3435e4e9a126d -608, 0x8880190514c1ad54 -609, 0xfffd86229a6f773c -610, 0x4f2420cdb0aa1a93 -611, 0xf8e1acb4120fc1fa -612, 0x63a8c553ab36a2f2 -613, 0x86b88cf3c0a6a190 -614, 0x44d8b2801622c792 -615, 0xf6eae14e93082ff1 -616, 0xd9ed4f5d1b8fac61 -617, 0x1808ce17f4e1f70 -618, 0x446e83ea336f262f -619, 0xc7c802b04c0917b7 -620, 0x626f45fd64968b73 -621, 0x9ffa540edc9b2c5c -622, 0xa96a1e219e486af8 -623, 0x2bb8963884e887a1 -624, 0xba7f68a5d029e3c4 -625, 0xefc45f44392d9ca0 -626, 0x98d77762503c5eab -627, 0xd89bcf62f2da627c -628, 0xa3cab8347f833151 -629, 0xa095b7595907d5c7 -630, 0x3b3041274286181 -631, 0xb518db8919eb71fa -632, 0x187036c14fdc9a36 -633, 0xd06e28301e696f5d -634, 0xdbc71184e0c56492 -635, 0xfe51e9cae6125bfd -636, 0x3b12d17cd014df24 -637, 0x3b95e4e2c986ac1a -638, 0x29c1cce59fb2dea2 -639, 0x58c05793182a49d6 -640, 0xc016477e330d8c00 -641, 0x79ef335133ada5d -642, 0x168e2cad941203f3 -643, 0xf99d0f219d702ef0 -644, 0x655628068f8f135b -645, 0xdcdea51910ae3f92 -646, 0x8e4505039c567892 -647, 0x91a9ec7e947c89ae -648, 0x8717172530f93949 -649, 0x1c80aba9a440171a -650, 0x9c8f83f6ebe7441e -651, 0x6c05e1efea4aa7f9 -652, 0x10af696b777c01b -653, 0x5892e9d9a92fc309 -654, 0xd2ba7da71e709432 -655, 0x46378c7c3269a466 -656, 0x942c63dfe18e772c -657, 0x6245cf02ef2476f -658, 0x6f265b2759ea2aea -659, 0x5aa757f17d17f4a6 -660, 0x1ad6a3c44fa09be6 -661, 0xe861af14e7015fb8 -662, 0x86be2e7db388c77 -663, 0x5c7bba32b519e9a0 -664, 0x3feb314850c4437b -665, 0x97955add60cfb45b -666, 0xfdb536230a540bdc -667, 0xdac9d7bf6e58512e -668, 0x4894c00e474e8120 -669, 0xa1918a37739da366 -670, 0xa8097f2096532807 -671, 0x592afe50e6c5e643 -672, 0xd69050ee6dcb33dc -673, 0xa6956b262dd3c561 -674, 0x1a55c815555e63f7 -675, 0x2ec7fd37516de2bb -676, 0x8ec251d9c70e76ba -677, 0x9b76e4abafd2689 -678, 0x9ce3f5c751a57df1 -679, 0x915c4818bf287bc7 -680, 0x2293a0d1fe07c735 -681, 0x7627dcd5d5a66d3d -682, 0xb5e4f92cc49c7138 -683, 0x6fc51298731d268c -684, 0xd19800aa95441f87 -685, 0x14f70f31162fa115 -686, 0x41a3da3752936f59 -687, 0xbec0652be95652ee -688, 0x7aa4bdb1020a290f -689, 0x4382d0d9bee899ef -690, 0xe6d988ae4277d6ff -691, 0xe618088ccb2a32d1 -692, 0x411669dfaa899e90 -693, 0x234e2bf4ba76d9f -694, 0xe109fe4cb7828687 -695, 0x1fb96b5022b0b360 -696, 0x6b24ad76c061a716 -697, 0x7e1781d4d7ecee15 -698, 0xf20c2dbe82ba38ba -699, 0xeda8e8ae1d943655 -700, 0xa58d196e2a77eaec -701, 0x44564765a5995a0b -702, 0x11902fe871ecae21 -703, 0x2ea60279900e675d -704, 0x38427227c18a9a96 -705, 0xe0af01490a1b1b48 -706, 0x826f91997e057824 -707, 0x1e57308e6e50451 -708, 0xb42d469bbbfdc350 -709, 0xb9734cff1109c49b -710, 0x98967559bb9d364f -711, 0xd6be360041907c12 -712, 0xa86a1279122a1e21 -713, 0x26f99a8527bfc698 -714, 0xfa8b85758f28f5d6 -715, 0xe3057429940806ae -716, 0x4bee2d7e84f93b2b -717, 0x948350a76ea506f4 -718, 0xa139154488045e74 -719, 0x8893579ba5e78085 -720, 0x5f21c215c6a9e397 -721, 0x456134f3a59641dc -722, 0x92c0273f8e97a9c6 -723, 0xd2936c9c3f0c6936 -724, 0xcfa4221e752c4735 -725, 0x28cd5a7457355dca -726, 0xecdfdde23d90999f -727, 0x60631b2d494d032b -728, 0xf67289df269a827f -729, 0xcbe8011ef0f5b7ef -730, 0x20eea973c70a84f5 -731, 0xbe1fd200398557ce -732, 0xd2279ee030191bba -733, 0xf2bd4291dedaf819 -734, 0xfc6d167dbe8c402 -735, 0x39ac298da5d0044b -736, 0xceac026f5f561ce -737, 0x10a5b0bdd8ad60e6 -738, 0xdeb3c626df6d4bcb -739, 0x3c128962e77ff6ca -740, 0xc786262e9c67a0e5 -741, 0x4332855b3febcdc0 -742, 0x7bda9724d1c0e020 -743, 0x6a8c93399bc4df22 -744, 0xa9b20100ac707396 -745, 0xa11a3458502c4eb5 -746, 0xb185461c60478941 -747, 0x13131d56195b7ff6 -748, 0x8d55875ddbd4aa1c -749, 0xc09b67425f469aa5 -750, 0x39e33786cc7594c4 -751, 0x75e96db8e4b08b93 -752, 0xda01cd12a3275d1e -753, 0x2c49e7822344fab5 -754, 0x9bd5f10612514ca7 -755, 0x1c801a5c828e7332 -756, 0x29797d3f4f6c7b4c -757, 0xac992715e21e4e53 -758, 0xe40e89ee887ddb37 -759, 0x15189a2b265a783b -760, 0xa854159a52af5c5 -761, 0xb9d8a5a81c12bead -762, 0x3240cdc9d59e2a58 -763, 0x1d0b872234cf8e23 -764, 0xc01224cf6ce12cff -765, 0x2601e9f3905c8663 -766, 0xd4ecf9890168d6b4 -767, 0xa45db796d89bfdd5 -768, 0x9f389406dad64ab4 -769, 0xa5a851adce43ffe3 -770, 0xd0962c41c26e5aa9 -771, 0x8a671679e48510a4 -772, 0xc196dc0924a6bfeb -773, 0x3ead661043b549cb -774, 0x51af4ca737d405ac -775, 0xf4425b5c62275fb6 -776, 0x71e69d1f818c10f5 -777, 0xacaf4af2d3c70162 -778, 0x2e1f1d4fd7524244 -779, 0xe54fdd8f388890e8 -780, 0xfda0d33e84eb2b83 -781, 0x53965c5e392b81da -782, 0x5c92288267263097 -783, 0xcac1b431c878c66c -784, 0x36c0e1cf417241c6 -785, 0x5cc4d9cd1a36bf2c -786, 0x32e4257bb5d3e470 -787, 0x4aecff904adb44fb -788, 0x4d91a8e0d1d60cac -789, 0xa3b478388385b038 -790, 0x48d955f24eba70be -791, 0x310e4deb07f24f68 -792, 0x8853e73b1f30a5a -793, 0x278aee45c2a65c5 -794, 0xf6932eedbd62fb0b -795, 0xafb95958c82fafad -796, 0x78e807c18616c16c -797, 0xd7abadda7488ed9f -798, 0x2dd72e2572aa2ae6 -799, 0x6ec3791982c2be09 -800, 0x6865bb314fac478f -801, 0xa14dc0ce09000d1a -802, 0xb8081ad134da10f2 -803, 0xc4ac1534aa825ef5 -804, 0xd83aeb48ae2d538f -805, 0x38052027e3074be4 -806, 0xa9833e06ef136582 -807, 0x4f02d790ec9fd78 -808, 0xec2f60bc711c5bdc -809, 0x9253b0d12268e561 -810, 0xa8ac607fdd62c206 -811, 0x895e28ebc920289f -812, 0xe2fd42b154243ac7 -813, 0xc69cac2f776eee19 -814, 0xf4d4ac11db56d0dc -815, 0xa8d37049b9f39833 -816, 0x75abbf8a196c337c -817, 0xb115bb76750d27b8 -818, 0x39426d187839154 -819, 0xd488423e7f38bf83 -820, 0xbb92e0c76ecb6a62 -821, 0x3055a018ce39f4e3 -822, 0xc93fe0e907729bfb -823, 0x65985d17c5863340 -824, 0x2088ae081b2028e1 -825, 0x6e628de873314057 -826, 0x864377cccf573f0e -827, 0xae03f4c9aa63d132 -828, 0xb1db766d6404c66d -829, 0xdce5a22414a374b -830, 0x622155b777819997 -831, 0x69fe96e620371f3c -832, 0xa9c67dbc326d94fc -833, 0x932a84ae5dd43bab -834, 0xe2301a20f6c48c3f -835, 0x795d2e79c6477300 -836, 0xd8e3e631289521e7 -837, 0xae2684979002dfd6 -838, 0xc9c2392377550f89 -839, 0xa1b0c99d508ef7ec -840, 0x593aef3c5a5272ec -841, 0xe32e511a4b7162cd -842, 0xab3b81655f5a2857 -843, 0x1b535e1a0aaf053e -844, 0x5b33f56c1b6a07e2 -845, 0x782dc8cfcac4ef36 -846, 0xb3d4f256eecfd202 -847, 0xf73a6598f58c4f7e -848, 0xd5722189524870ae -849, 0x707878de6b995fc0 -850, 0xc3eb6ba73e3d7e8a -851, 0xca75c017655b75a7 -852, 0x1b29369ea3541e5f -853, 0x352e98858bdb58a3 -854, 0x1e4412d184b6b27d -855, 0x2d375ba0304b2d17 -856, 0x56c30fce69a5d08e -857, 0x6b8c2b0c06584bda -858, 0xde4dfff228c8c91f -859, 0xb7c9edd574e6287f -860, 0xf6078281c9fca2b2 -861, 0xb9b9a51de02a2f1e -862, 0xa411bef31c0103b0 -863, 0xc5facd8fc5e1d7a3 -864, 0x54e631c05ddf7359 -865, 0x815b42b3fd06c474 -866, 0xc9ac07566fda18ec -867, 0xd84ea62957bd8e15 -868, 0x5575f74b5cfd8803 -869, 0x5779a8d460c2e304 -870, 0xfd6e87e264a85587 -871, 0xa1d674daa320b26d -872, 0x2c3c3ec64b35afc4 -873, 0x393a274ff03e6935 -874, 0x1f40ecbac52c50ea -875, 0xc3de64fa324ffc0c -876, 0x56ae828b7f9deb04 -877, 0xe7c1a77b5c1f2cb3 -878, 0xa4c4aab19ea921cc -879, 0xec164c238825822c -880, 0xa6a3304770c03b03 -881, 0x3a63641d5b1e8123 -882, 0x42677be3a54617ef -883, 0xa2680423e3a200c0 -884, 0x8b17cf75f3f37277 -885, 0xe7ce65a49242be3d -886, 0x7f85934271323e4b -887, 0xcfb0f431f79a4fab -888, 0x392e4041a8505b65 -889, 0xd3e5daf0d8b25ea6 -890, 0x9447eff675d80f53 -891, 0xea27a9d53cfaeea8 -892, 0xe3f2335945a83ba -893, 0x8875a43ce216413b -894, 0xe49941f9eabce33e -895, 0x9357c1296683a5b1 -896, 0xf0f16439e81ee701 -897, 0x3181515295ffd79a -898, 0x9d7150fffd169ed8 -899, 0x2d6a1d281e255a72 -900, 0x81bf1286fb3a92b6 -901, 0x566d3079b499e279 -902, 0xc7939ca8f047341 -903, 0xb1f8050e7c2d59f6 -904, 0x605701045e7be192 -905, 0x51b73360e8e31a1c -906, 0x9f4ad54483ba9fe0 -907, 0xd3085b8fcf69d1c8 -908, 0xc3e7475026dc5f0b -909, 0x5800f8554b157354 -910, 0x37dfdf858cfcd963 -911, 0x3a1fce05ce385072 -912, 0xf495c062645c20c3 -913, 0xdcbeec2c3492c773 -914, 0xc38f427589d1d0b4 -915, 0x681ead60216a8184 -916, 0x4bd569c40cc88c41 -917, 0x49b0d442e130b7a2 -918, 0xee349156b7d1fa3f -919, 0x2bde2d2db055135b -920, 0xc6a460d2fbcb2378 -921, 0xd0f170494ff3dbb -922, 0xb294422492528a23 -923, 0xfc95873c854e7b86 -924, 0x6c9c3ad1797bb19c -925, 0xe0c06f2aab65062d -926, 0x58e32ce0f11e3a81 -927, 0xa745fcd729ff5036 -928, 0x599b249b2fc2cdb2 -929, 0x78f23b5b0dd5b082 -930, 0x6de3e957f549ecfc -931, 0x9d0712fa6d878756 -932, 0x9076e8554e4a413a -933, 0xf3185818c0294de8 -934, 0x5de7cdf4b455b9b6 -935, 0xb15f6908ed703f7d -936, 0x98c654dfedc6818 -937, 0x120502ab0e93ae42 -938, 0x67966a98a58dc120 -939, 0x1caa0fc628989482 -940, 0xd8b2c3cd480a8625 -941, 0x85c70071b3aed671 -942, 0xff385f8473714662 -943, 0xe2868e4bf3773b63 -944, 0x96cf8019b279298e -945, 0x8511cc930bd74800 -946, 0x5312e48fdd55f5ab -947, 0xfcdae564b52df78d -948, 0x9eee48373e652176 -949, 0x953788f6bcbc56b0 -950, 0xd1a3855dbd2f6b37 -951, 0x3ad32acf77f4d1e9 -952, 0x917c7be81b003e30 -953, 0x9ce817da1e2e9dfb -954, 0x2968983db162d44d -955, 0x1e005decef5828ad -956, 0xc38fe59d1aa4f3d5 -957, 0xf357f1710dc02f1d -958, 0x2613912a4c83ec67 -959, 0x832a11470b9a17cb -960, 0x5e85508a611f0dad -961, 0x2781131677f59d56 -962, 0xa82358d7d4b0237f -963, 0xfbf8b3cc030c3af6 -964, 0x68b2f68ac8a55adb -965, 0x3b6fcf353add0ada -966, 0xd1956049bcd15bd5 -967, 0x95b76f31c7f98b6d -968, 0x814b6690df971a84 -969, 0xdcf7959cddd819e4 -970, 0xcf8c72c5d804fc88 -971, 0x56883769c8945a22 -972, 0x1f034652f658cf46 -973, 0x41df1324cda235a1 -974, 0xeccd32524504a054 -975, 0x974e0910a04ec02c -976, 0x72104507b821f6db -977, 0x791f8d089f273044 -978, 0xe0f79a4f567f73c3 -979, 0x52fe5bea3997f024 -980, 0x5f8b9b446494f78 -981, 0xfd9f511947059190 -982, 0x3aea9dac6063bce3 -983, 0xbfdae4dfc24aee60 -984, 0xa82cdbbf0a280318 -985, 0xf460aae18d70aa9d -986, 0x997367cb204a57c4 -987, 0x616e21ab95ba05ef -988, 0x9bfc93bec116769f -989, 0x2b2ee27c37a3fa5b -990, 0xb25c6ed54006ee38 -991, 0xab04d4a5c69e69a5 -992, 0x6d2f6b45f2d8438f -993, 0x4ad2f32afc82f092 -994, 0x513d718908f709c0 -995, 0x5272aadc4fffca51 -996, 0xeb3f87e66156ef5d -997, 0xf8a3d5a46a86ba85 -998, 0xdb4548a86f27abfd -999, 0x57c05f47ff62380d diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/sfc64-testset-1.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/sfc64-testset-1.csv deleted file mode 100644 index 4fffe69..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/sfc64-testset-1.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0xdeadbeaf -0, 0xa475f55fbb6bc638 -1, 0xb2d594b6c29d971c -2, 0x275bc4ece4484fb1 -3, 0x569be72d9b3492fb -4, 0x89a5bb9b206a670c -5, 0xd951bfa06afdc3f9 -6, 0x7ee2e1029d52a265 -7, 0x12ef1d4de0cb4d4c -8, 0x41658ba8f0ef0280 -9, 0x5b650c82e4fe09c5 -10, 0x638a9f3e30ec4e94 -11, 0x147487fb2ba9233e -12, 0x89ef035603d2d1fb -13, 0xe66ca57a190e6cbe -14, 0x330f673740dd61fc -15, 0xc71d3dce2f8bb34e -16, 0x3c07c39ff150b185 -17, 0x5df952b6cae8f099 -18, 0x9f09f2b1f0ceac80 -19, 0x19598eee2d0c4c67 -20, 0x64e06483702e0ebd -21, 0xda04d1fdb545f7fa -22, 0xf2cf53b61a0c4f9b -23, 0xf0bb724ce196f66e -24, 0x71cefde55d9cf0f -25, 0x6323f62824a20048 -26, 0x1e93604680f14b4e -27, 0xd9d8fad1d4654025 -28, 0xf4ee25af2e76ca08 -29, 0x6af3325896befa98 -30, 0xad9e43abf5e04053 -31, 0xbf930e318ce09de3 -32, 0x61f9583b4f9ffe76 -33, 0x9b69d0b3d5ec8958 -34, 0xa608f250f9b2ca41 -35, 0x6fdba7073dc2bb5d -36, 0xa9d57601efea6d26 -37, 0xc24a88a994954105 -38, 0xc728b1f78d88fe5b -39, 0x88da88c2b083b3b2 -40, 0xa9e27f7303c76cfd -41, 0xc4c24608c29176eb -42, 0x5420b58466b972fd -43, 0xd2018a661b6756c8 -44, 0x7caed83d9573fc7 -45, 0x562a3d81b849a06a -46, 0x16588af120c21f2c -47, 0x658109a7e0eb4837 -48, 0x877aabb14d3822e1 -49, 0x95704c342c3745fe -50, 0xeeb8a0dc81603616 -51, 0x431bf94889290419 -52, 0xe4a9410ab92a5863 -53, 0xbc6be64ea60f12ba -54, 0x328a2da920015063 -55, 0x40f6b3bf8271ae07 -56, 0x4068ff00a0e854f8 -57, 0x1b287572ca13fa78 -58, 0xa11624a600490b99 -59, 0x4a04ef29eb7150fa -60, 0xcc9469ab5ffb739 -61, 0x99a6a9f8d95e782 -62, 0x8e90356573e7a070 -63, 0xa740b8fb415c81c4 -64, 0x47eccef67447f3da -65, 0x2c720afe3a62a49b -66, 0xe2a747f0a43eacf4 -67, 0xba063a87ab165576 -68, 0xbc1c78ed27feb5a3 -69, 0x285a19fa3974f9d -70, 0x489c61e704f5f0e3 -71, 0xf5ab04f6b03f238b -72, 0x7e25f88138a110dd -73, 0xc3d1cef3d7c1f1d1 -74, 0xc3de6ec64d0d8e00 -75, 0x73682a15b6cc5088 -76, 0x6fecbeb319163dc5 -77, 0x7e100d5defe570a1 -78, 0xad2af9af076dce57 -79, 0x3c65100e23cd3a9a -80, 0x4b442cc6cfe521bb -81, 0xe89dc50f8ab1ef75 -82, 0x8b3c6fdc2496566 -83, 0xdfc50042bc2c308c -84, 0xe39c5f158b33d2b2 -85, 0x92f6adefdfeb0ac -86, 0xdf5808a949c85b3e -87, 0x437384021c9dace9 -88, 0xa7b5ed0d3d67d8f -89, 0xe1408f8b21da3c34 -90, 0xa1bba125c1e80522 -91, 0x7611dc4710385264 -92, 0xb00a46ea84082917 -93, 0x51bf8002ffa87cef -94, 0x9bb81013e9810adc -95, 0xd28f6600013541cd -96, 0xc2ca3b1fa7791c1f -97, 0x47f9ad58f099c82c -98, 0x4d1bb9458469caf9 -99, 0xca0b165b2844257 -100, 0xc3b2e667d075dc66 -101, 0xde22f71136a3dbb1 -102, 0x23b4e3b6f219e4c3 -103, 0x327e0db4c9782f66 -104, 0x9365506a6c7a1807 -105, 0x3e868382dedd3be7 -106, 0xff04fa6534bcaa99 -107, 0x96621a8862995305 -108, 0x81bf39cb5f8e1df7 -109, 0x79b684bb8c37af7a -110, 0xae3bc073c3cde33c -111, 0x7805674112c899ac -112, 0xd95a27995abb20f2 -113, 0x71a503c57b105c40 -114, 0x5ff00d6a73ec8acc -115, 0x12f96391d91e47c2 -116, 0xd55ca097b3bd4947 -117, 0x794d79d20468b04 -118, 0x35d814efb0d7a07d -119, 0xfa9ac9bd0aae76d3 -120, 0xa77b8a3711e175cd -121, 0xe6694fbf421f9489 -122, 0xd8f1756525a1a0aa -123, 0xe38dfa8426277433 -124, 0x16b640c269bbcd44 -125, 0x2a7a5a67ca24cfeb -126, 0x669039c28d5344b4 -127, 0x2a445ee81fd596bb -128, 0x600df94cf25607e0 -129, 0x9358561a7579abff -130, 0xee1d52ea179fc274 -131, 0x21a8b325e89d31be -132, 0x36fc0917486eec0a -133, 0x3d99f40717a6be9f -134, 0x39ac140051ca55ff -135, 0xcef7447c26711575 -136, 0xf22666870eff441d -137, 0x4a53c6134e1c7268 -138, 0xd26de518ad6bdb1b -139, 0x1a736bf75b8b0e55 -140, 0xef1523f4e6bd0219 -141, 0xb287b32fd615ad92 -142, 0x2583d6af5e841dd5 -143, 0x4b9294aae7ca670c -144, 0xf5aa4a84174f3ca9 -145, 0x886300f9e0dc6376 -146, 0x3611401e475ef130 -147, 0x69b56432b367e1ac -148, 0x30c330e9ab36b7c4 -149, 0x1e0e73079a85b8d5 -150, 0x40fdfc7a5bfaecf -151, 0xd7760f3e8e75a085 -152, 0x1cc1891f7f625313 -153, 0xeece1fe6165b4272 -154, 0xe61111b0c166a3c1 -155, 0x2f1201563312f185 -156, 0xfd10e8ecdd2a57cb -157, 0x51cdc8c9dd3a89bf -158, 0xed13cc93938b5496 -159, 0x843816129750526b -160, 0xd09995cd6819ada -161, 0x4601e778d40607df -162, 0xef9df06bd66c2ea0 -163, 0xae0bdecd3db65d69 -164, 0xbb921a3c65a4ae9a -165, 0xd66698ce8e9361be -166, 0xacdc91647b6068f4 -167, 0xe505ef68f2a5c1c0 -168, 0xd6e62fd27c6ab137 -169, 0x6a2ba2c6a4641d86 -170, 0x9c89143715c3b81 -171, 0xe408c4e00362601a -172, 0x986155cbf5d4bd9d -173, 0xb9e6831728c893a7 -174, 0xb985497c3bf88d8c -175, 0xd0d729214b727bec -176, 0x4e557f75fece38a -177, 0x6572067fdfd623ca -178, 0x178d49bb4d5cd794 -179, 0xe6baf59f60445d82 -180, 0x5607d53518e3a8d2 -181, 0xba7931adb6ebbd61 -182, 0xe853576172611329 -183, 0xe945daff96000c44 -184, 0x565b9ba3d952a176 -185, 0xcdb54d4f88c584c8 -186, 0x482a7499bee9b5e5 -187, 0x76560dd0affe825b -188, 0x2a56221faa5ca22c -189, 0x7729be5b361f5a25 -190, 0xd6f2195795764876 -191, 0x59ef7f8f423f18c5 -192, 0x7ebefed6d02adde1 -193, 0xcfec7265329c73e5 -194, 0x4fd8606a5e59881c -195, 0x95860982ae370b73 -196, 0xdecfa33b1f902acc -197, 0xf9b8a57400b7c0a6 -198, 0xd20b822672ec857b -199, 0x4eb81084096c7364 -200, 0xe535c29a44d9b6ad -201, 0xdef8b48ebacb2e29 -202, 0x1063bc2b8ba0e915 -203, 0xe4e837fb53d76d02 -204, 0x4df935db53579fb8 -205, 0xa30a0c8053869a89 -206, 0xe891ee58a388a7b5 -207, 0x17931a0c64b8a985 -208, 0xaf2d350b494ce1b3 -209, 0x2ab9345ffbcfed82 -210, 0x7de3fe628a2592f0 -211, 0x85cf54fab8b7e79d -212, 0x42d221520edab71b -213, 0x17b695b3af36c233 -214, 0xa4ffe50fe53eb485 -215, 0x1102d242db800e4d -216, 0xc8dc01f0233b3b6 -217, 0x984a030321053d36 -218, 0x27fa8dc7b7112c0e -219, 0xba634dd8294e177f -220, 0xe67ce34b36332eb -221, 0x8f1351e1894fb41a -222, 0xb522a3048761fd30 -223, 0xc350ad9bc6729edc -224, 0xe0ed105bd3c805e1 -225, 0xa14043d2b0825aa7 -226, 0xee7779ce7fc11fdf -227, 0xc0fa8ba23a60ab25 -228, 0xb596d1ce259afbad -229, 0xaa9b8445537fdf62 -230, 0x770ab2c700762e13 -231, 0xe812f1183e40cc1 -232, 0x44bc898e57aefbbd -233, 0xdd8a871df785c996 -234, 0x88836a5e371eb36b -235, 0xb6081c9152623f27 -236, 0x895acbcd6528ca96 -237, 0xfb67e33ddfbed435 -238, 0xaf7af47d323ce26 -239, 0xe354a510c3c39b2d -240, 0x5cacdedda0672ba3 -241, 0xa440d9a2c6c22b09 -242, 0x6395099f48d64304 -243, 0xc11cf04c75f655b5 -244, 0x1c4e054d144ddb30 -245, 0x3e0c2db89d336636 -246, 0x127ecf18a5b0b9a7 -247, 0x3b50551a88ea7a73 -248, 0xbd27003e47f1f684 -249, 0xf32d657782baac9b -250, 0x727f5cabf020bc9 -251, 0x39c1c1c226197dc7 -252, 0x5552c87b35deeb69 -253, 0x64d54067b5ce493f -254, 0x3494b091fe28dda0 -255, 0xdf0278bc85ee2965 -256, 0xdef16fec25efbd66 -257, 0xe2be09f578c4ce28 -258, 0xd27a9271979d3019 -259, 0x427f6fcd71845e3 -260, 0x26b52c5f81ec142b -261, 0x98267efc3986ad46 -262, 0x7bf4165ddb7e4374 -263, 0xd05f7996d7941010 -264, 0x3b3991de97b45f14 -265, 0x9068217fb4f27a30 -266, 0xd8fe295160afc7f3 -267, 0x8a159fab4c3bc06f -268, 0x57855506d19080b6 -269, 0x7636df6b3f2367a4 -270, 0x2844ee3abd1d5ec9 -271, 0xe5788de061f51c16 -272, 0x69e78cc9132a164 -273, 0xacd53cde6d8cd421 -274, 0xb23f3100068e91da -275, 0x4140070a47f53891 -276, 0xe4a422225a96e53a -277, 0xb82a8925a272a2ac -278, 0x7c2f9573590fe3b7 -279, 0xbaf80764db170575 -280, 0x955abffa54358368 -281, 0x355ce7460614a869 -282, 0x3700ede779a4afbf -283, 0x10a6ec01d92d68cd -284, 0x3308f5a0a4c0afef -285, 0x97b892d7601136c9 -286, 0x4955c3b941b8552e -287, 0xca85aa67e941961d -288, 0xb1859ae5db28e9d2 -289, 0x305d072ac1521fbd -290, 0xed52a868996085bb -291, 0x723bfa6a76358852 -292, 0x78d946ecd97c5fb3 -293, 0x39205b30a8e23e79 -294, 0xb927e3d086baadbe -295, 0xa18d6946136e1ff5 -296, 0xdab6f0b51c1eb5ff -297, 0xf0a640bf7a1af60c -298, 0xf0e81db09004d0d4 -299, 0xfe76cebdbe5a4dde -300, 0x2dafe9cc3decc376 -301, 0x4c871fdf1af34205 -302, 0xe79617d0c8fa893b -303, 0xee658aaad3a141f7 -304, 0xfd91aa74863e19f1 -305, 0x841b8f55c103cc22 -306, 0x22766ed65444ad5d -307, 0x56d03d1beca6c17a -308, 0x5fd4c112c92036ae -309, 0x75466ae58a5616dc -310, 0xfbf98b1081e802a9 -311, 0xdc325e957bf6d8f5 -312, 0xb08da7015ebd19b7 -313, 0xf25a9c0944f0c073 -314, 0xf4625bafa0ced718 -315, 0x4349c9e093a9e692 -316, 0x75a9ccd4dd8935cb -317, 0x7e6cf9e539361e91 -318, 0x20fdd22fb6edd475 -319, 0x5973021b57c2311f -320, 0x75392403667edc15 -321, 0xed9b2156ea70d9f1 -322, 0xf40c114db50b64a0 -323, 0xe26bb2c9eef20c62 -324, 0x409c1e3037869f03 -325, 0xcdfd71fdda3b7f91 -326, 0xa0dfae46816777d6 -327, 0xde060a8f61a8deb8 -328, 0x890e082a8b0ca4fc -329, 0xb9f2958eddf2d0db -330, 0xd17c148020d20e30 -331, 0xffdc9cc176fe7201 -332, 0xffb83d925b764c1 -333, 0x817ea639e313da8d -334, 0xa4dd335dd891ca91 -335, 0x1342d25a5e81f488 -336, 0xfa7eb9c3cf466b03 -337, 0xfe0a423d44b185d0 -338, 0x101cfd430ab96049 -339, 0x7b5d3eda9c4504b -340, 0xe20ccc006e0193f1 -341, 0xf54ccddedebc5df0 -342, 0xc0edd142bd58f1db -343, 0x3831f40d378d2430 -344, 0x80132353f0a88289 -345, 0x688f23c419d03ef8 -346, 0x4c6837e697884066 -347, 0x699387bb2e9a3a8f -348, 0x8996f860342448d8 -349, 0xb0f80dff99bfa5cc -350, 0x3e927a7f9ea12c8e -351, 0xd7e498d1e5f9dff3 -352, 0x78ecb97bb3f864cc -353, 0x3c4ffd069a014d38 -354, 0xf8d5073a1e09b4d4 -355, 0x8717e854f9faef23 -356, 0xfbcc5478d8d0ad7 -357, 0xd3cd8b233ca274ff -358, 0x8bd8f11f79beb265 -359, 0xf64498a832d8fd0e -360, 0xb01bba75112131ec -361, 0x55572445a7869781 -362, 0x7b56622f18cb3d7a -363, 0x7f192c9e075bdb83 -364, 0xd9a112f836b83ff3 -365, 0x68673b37269653dc -366, 0xe46a9433fb6a0879 -367, 0x127d756ca4779001 -368, 0xc1378e8b1e8eab94 -369, 0x1006edb0f51d078c -370, 0xc6dd53961232d926 -371, 0x9a4aeef44038256d -372, 0xd357f4fa652d4f5f -373, 0x59f3d2cc3378598 -374, 0xe76e6207a824a7fc -375, 0x5fc5e33712ceffef -376, 0x77d24aeb0ccb1adc -377, 0x5be4b2826805659e -378, 0x257c69d787e64634 -379, 0x58dd52ca6bc727b1 -380, 0x3ab997767235ea33 -381, 0x986a2a7a966fad14 -382, 0xc900f8b27761dcc4 -383, 0x44991bdb13795700 -384, 0xe5c145a4fe733b2 -385, 0x56f041b56bffe0d3 -386, 0x5779c4fef8067996 -387, 0xa0fe8748e829532d -388, 0x840c1277d78d9dd4 -389, 0x37ebcb315432acbc -390, 0xf4bc8738433ba3be -391, 0x8b122993f2e10062 -392, 0xe1fe8481f2681ed5 -393, 0x8e23f1630d9f494a -394, 0xda24661a01b7d0b3 -395, 0x7a02942a179cee36 -396, 0xf1e08a3c09b71ac -397, 0x3dec2cc7ee0bd8fd -398, 0x1f3e480113d805d4 -399, 0xc061b973ad4e3f2c -400, 0x6bea750f17a66836 -401, 0xbc2add72eac84c25 -402, 0xcff058d3f97934ca -403, 0x54ccc30987778ec2 -404, 0x93449ec1e1469558 -405, 0xe2ff369eb0c6836 -406, 0x41c2df2d63bf8e55 -407, 0xf9302629b6c71be2 -408, 0xdd30376b8e5ab29a -409, 0x12db9e04f911d754 -410, 0x8d03d6cd359f1b97 -411, 0xe15956511abf1cee -412, 0x9b68e10e2c2fd940 -413, 0x2e28de6491c1ce53 -414, 0x52b329b72d0c109d -415, 0xc2c0b115f9da2a60 -416, 0x6ca084105271bbff -417, 0x49b92b8676058c1e -418, 0x767fc92a70f7e5a3 -419, 0x87ba4ed4b65a6aa0 -420, 0xf70b052e0a3975e9 -421, 0x3e925c3306db9eec -422, 0x43253f1d96ac9513 -423, 0xe3e04f1a1ea454c4 -424, 0x763e3f4cc81ba0c8 -425, 0x2a2721ac69265705 -426, 0xdf3b0ac6416ea214 -427, 0xa6a6b57450f3e000 -428, 0xc3d3b1ac7dbfe6ac -429, 0xb66e5e6f7d2e4ec0 -430, 0x43c65296f98f0f04 -431, 0xdb0f6e3ff974d842 -432, 0x3d6b48e02ebb203b -433, 0xd74674ebf09d8f27 -434, 0xbe65243c58fc1200 -435, 0x55eb210a68d42625 -436, 0x87badab097dbe883 -437, 0xada3fda85a53824f -438, 0xef2791e8f48cd37a -439, 0x3fe7fceb927a641a -440, 0xd3bffd3ff031ac78 -441, 0xb94efe03da4d18fb -442, 0x162a0ad8da65ea68 -443, 0x300f234ef5b7e4a6 -444, 0xa2a8b4c77024e4fb -445, 0x5950f095ddd7b109 -446, 0xded66dd2b1bb02ba -447, 0x8ec24b7fa509bcb6 -448, 0x9bede53d924bdad6 -449, 0xa9c3f46423be1930 -450, 0x6dfc90597f8de8b4 -451, 0xb7419ebc65b434f0 -452, 0xa6596949238f58b9 -453, 0x966cbade640829b8 -454, 0x58c74877bdcbf65e -455, 0xaa103b8f89b0c453 -456, 0x219f0a86e41179a4 -457, 0x90f534fc06ddc57f -458, 0x8db7cdd644f1affa -459, 0x38f91de0167127ac -460, 0xdcd2a65e4df43daa -461, 0x3e04f34a7e01f834 -462, 0x5b237eea68007768 -463, 0x7ff4d2b015921768 -464, 0xf786b286549d3d51 -465, 0xaefa053fc2c3884c -466, 0x8e6a8ff381515d36 -467, 0x35b94f3d0a1fce3c -468, 0x165266d19e9abb64 -469, 0x1deb5caa5f9d8076 -470, 0x13ab91290c7cfe9d -471, 0x3651ca9856be3e05 -472, 0xe7b705f6e9cccc19 -473, 0xd6e7f79668c127ed -474, 0xa9faf37154896f92 -475, 0x89fbf190603e0ab1 -476, 0xb34d155a86f942d0 -477, 0xb2d4400a78bfdd76 -478, 0x7c0946aca8cfb3f0 -479, 0x7492771591c9d0e8 -480, 0xd084d95c5ca2eb28 -481, 0xb18d12bd3a6023e -482, 0xea217ed7b864d80b -483, 0xe52f69a755dd5c6f -484, 0x127133993d81c4aa -485, 0xe07188fcf1670bfb -486, 0x178fbfe668e4661d -487, 0x1c9ee14bb0cda154 -488, 0x8d043b96b6668f98 -489, 0xbc858986ec96ca2b -490, 0x7660f779d528b6b7 -491, 0xd448c6a1f74ae1d3 -492, 0x178e122cfc2a6862 -493, 0x236f000abaf2d23b -494, 0x171b27f3f0921915 -495, 0x4c3ff07652f50a70 -496, 0x18663e5e7d3a66ca -497, 0xb38c97946c750cc9 -498, 0xc5031aae6f78f909 -499, 0x4d1514e2925e95c1 -500, 0x4c2184a741dabfbb -501, 0xfd410364edf77182 -502, 0xc228157f863ee873 -503, 0x9856fdc735cc09fc -504, 0x660496cd1e41d60e -505, 0x2edf1d7e01954c32 -506, 0xd32e94639bdd98cf -507, 0x8e153f48709a77d -508, 0x89357f332d2d6561 -509, 0x1840d512c97085e6 -510, 0x2f18d035c9e26a85 -511, 0x77b88b1448b26d5b -512, 0xc1ca6ef4cdae0799 -513, 0xcc203f9e4508165f -514, 0xeaf762fbc9e0cbbe -515, 0xc070c687f3c4a290 -516, 0xd49ed321068d5c15 -517, 0x84a55eec17ee64ee -518, 0x4d8ee685298a8871 -519, 0x9ff5f17d7e029793 -520, 0x791d7d0d62e46302 -521, 0xab218b9114e22bc6 -522, 0x4902b7ab3f7119a7 -523, 0x694930f2e29b049e -524, 0x1a3c90650848999f -525, 0x79f1b9d8499c932b -526, 0xfacb6d3d55e3c92f -527, 0x8fd8b4f25a5da9f5 -528, 0xd037dcc3a7e62ae7 -529, 0xfecf57300d8f84f4 -530, 0x32079b1e1dc12d48 -531, 0xe5f8f1e62b288f54 -532, 0x97feba3a9c108894 -533, 0xd279a51e1899a9a0 -534, 0xd68eea8e8e363fa8 -535, 0x7394cf2deeca9386 -536, 0x5f70b0c80f1dbf10 -537, 0x8d646916ed40462 -538, 0xd253bb1c8a12bbb6 -539, 0x38f399a821fbd73e -540, 0x947523a26333ac90 -541, 0xb52e90affbc52a37 -542, 0xcf899cd964654da4 -543, 0xdf66ae9cca8d99e7 -544, 0x6051478e57c21b6a -545, 0xffa7dc975af3c1da -546, 0x195c7bff2d1a8f5 -547, 0x64f12b6575cf984d -548, 0x536034cb842cf9e1 -549, 0x180f247ce5bbfad -550, 0x8ced45081b134867 -551, 0x532bbfdf426710f3 -552, 0x4747933e74c4f54d -553, 0x197a890dc4793401 -554, 0x76c7cc2bd42fae2 -555, 0xdabfd67f69675dd0 -556, 0x85c690a68cdb3197 -557, 0xe482cec89ce8f92 -558, 0x20bc9fb7797011b1 -559, 0x76dc85a2185782ad -560, 0x3df37c164422117a -561, 0x99211f5d231e0ab0 -562, 0xef7fd794a0a91f4 -563, 0x419577151915f5fe -564, 0x3ce14a0a7135dae3 -565, 0x389b57598a075d6a -566, 0x8cc2a9d51b5af9aa -567, 0xe80a9beffbd13f13 -568, 0x65e96b22ea8a54d8 -569, 0x79f38c4164138ede -570, 0xd1955846cba03d81 -571, 0x60359fe58e4f26d6 -572, 0x4ea724f585f8d13e -573, 0x316dfdbadc801a3c -574, 0x20aa29b7c6dd66fe -575, 0x65eaf83a6a008caa -576, 0x407000aff1b9e8cb -577, 0xb4d49bfb2b268c40 -578, 0xd4e6fe8a7a0f14a9 -579, 0xe34afef924e8f58e -580, 0xe377b0c891844824 -581, 0x29c2e20c112d30c8 -582, 0x906aad1fe0c18a95 -583, 0x308385f0efbb6474 -584, 0xf23900481bf70445 -585, 0xfdfe3ade7f937a55 -586, 0xf37aae71c33c4f97 -587, 0x1c81e3775a8bed85 -588, 0x7eb5013882ce35ea -589, 0x37a1c1692495818d -590, 0x3f90ae118622a0ba -591, 0x58e4fe6fea29b037 -592, 0xd10ff1d269808825 -593, 0xbce30edb60c21bba -594, 0x123732329afd6fee -595, 0x429b4059f797d840 -596, 0x421166568a8c4be1 -597, 0x88f895c424c1bd7f -598, 0x2adaf7a7b9f781cb -599, 0xa425644b26cb698 -600, 0x8cc44d2486cc5743 -601, 0xdb9f357a33abf6ba -602, 0x1a57c4ea77a4d70c -603, 0x1dea29be75239e44 -604, 0x463141a137121a06 -605, 0x8fecfbbe0b8a9517 -606, 0x92c83984b3566123 -607, 0x3b1c69180ed28665 -608, 0x14a6073425ea8717 -609, 0x71f4c2b3283238d7 -610, 0xb3d491e3152f19f -611, 0x3a0ba3a11ebac5d2 -612, 0xddb4d1dd4c0f54ac -613, 0xdb8f36fe02414035 -614, 0x1cf5df5031b1902c -615, 0x23a20ed12ef95870 -616, 0xf113e573b2dedcbb -617, 0x308e2395cde0a9fa -618, 0xd377a22581c3a7da -619, 0xe0ced97a947a66fb -620, 0xe44f4de9cd754b00 -621, 0x2344943337d9d1bf -622, 0x4b5ae5e2ea6e749c -623, 0x9b8d2e3ef41d1c01 -624, 0x59a5a53ebbd24c6b -625, 0x4f7611bf9e8a06fb -626, 0xea38c7b61361cd06 -627, 0xf125a2bfdd2c0c7 -628, 0x2df8dcb5926b9ebb -629, 0x233e18720cc56988 -630, 0x974c61379b4aa95e -631, 0xc7fe24c1c868910b -632, 0x818fd1affc82a842 -633, 0xcee92a952a26d38e -634, 0x8962f575ebcbf43 -635, 0x7770687e3678c460 -636, 0xdfb1db4ed1298117 -637, 0xb9db54cb03d434d3 -638, 0x34aebbf2244257ad -639, 0xd836db0cb210c490 -640, 0x935daed7138957cd -641, 0x3cd914b14e7948fd -642, 0xd0472e9ed0a0f7f0 -643, 0xa9df33dca697f75e -644, 0x15e9ea259398721a -645, 0x23eeba0f970abd60 -646, 0x2217fdf8bbe99a12 -647, 0x5ea490a95717b198 -648, 0xf4e2bfc28280b639 -649, 0x9d19916072d6f05c -650, 0x5e0387cab1734c6a -651, 0x93c2c8ac26e5f01e -652, 0xb0d934354d957eb1 -653, 0xee5099a1eef3188c -654, 0x8be0abca8edc1115 -655, 0x989a60845dbf5aa3 -656, 0x181c7ed964eee892 -657, 0x49838ea07481288d -658, 0x17dbc75d66116b2e -659, 0xa4cafb7a87c0117e -660, 0xab2d0ae44cdc2e6e -661, 0xdf802f2457e7da6 -662, 0x4b966c4b9187e124 -663, 0x62de9db6f4811e1a -664, 0x1e20485968bc62 -665, 0xe9ac288265caca94 -666, 0xc5c694d349aa8c1a -667, 0x3d67f2083d9bdf10 -668, 0x9a2468e503085486 -669, 0x9d6acd3dc152d1a3 -670, 0xca951e2aeee8df77 -671, 0x2707371af9cdd7b0 -672, 0x2347ae6a4eb5ecbd -673, 0x16abe5582cb426f -674, 0x523af4ff980bbccb -675, 0xb07a0f043e3694aa -676, 0x14d7c3da81b2de7 -677, 0xf471f1b8ac22305b -678, 0xdb087ffff9e18520 -679, 0x1a352db3574359e8 -680, 0x48d5431502cc7476 -681, 0x7c9b7e7003dfd1bf -682, 0x4f43a48aae987169 -683, 0x9a5d3eb66dedb3e9 -684, 0xa7b331af76a9f817 -685, 0xba440154b118ab2d -686, 0x64d22344ce24c9c6 -687, 0xa22377bd52bd043 -688, 0x9dfa1bb18ca6c5f7 -689, 0xdccf44a92f644c8b -690, 0xf623d0a49fd18145 -691, 0x556d5c37978e28b3 -692, 0xad96e32ce9d2bb8b -693, 0x2e479c120be52798 -694, 0x7501cf871af7b2f7 -695, 0xd02536a5d026a5b8 -696, 0x4b37ff53e76ab5a4 -697, 0xdb3a4039caaeab13 -698, 0x6cbd65e3b700c7be -699, 0x7367abd98761a147 -700, 0xf4f9ba216a35aa77 -701, 0xf88ca25ce921eb86 -702, 0xb211de082ec2cbf2 -703, 0xdd94aa46ec57e12e -704, 0xa967d74ad8210240 -705, 0xdaa1fada8cfa887 -706, 0x85901d081c4488ee -707, 0xcf67f79a699ef06 -708, 0x7f2f1f0de921ee14 -709, 0x28bc61e9d3f2328b -710, 0x3332f2963faf18e5 -711, 0x4167ac71fcf43a6 -712, 0x843c1746b0160b74 -713, 0xd9be80070c578a5e -714, 0xbd7250c9af1473e7 -715, 0x43f78afaa3647899 -716, 0x91c6b5dd715a75a5 -717, 0x29cc66c8a07bfef3 -718, 0x3f5c667311dc22be -719, 0x4f49cd47958260cd -720, 0xbef8be43d920b64e -721, 0x7a892a5f13061d8b -722, 0x9532f40125c819b1 -723, 0x924fca3045f8a564 -724, 0x9b2c6442453b0c20 -725, 0x7e21009085b8e793 -726, 0x9b98c17e17af59d2 -727, 0xba61acb73e3ae89a -728, 0xb9d61a710555c138 -729, 0xc2a425d80978974b -730, 0xa275e13592da7d67 -731, 0xe962103202d9ad0f -732, 0xbdf8367a4d6f33fd -733, 0xe59beb2f8648bdc8 -734, 0xb4c387d8fbc4ac1c -735, 0x5e3f276b63054b75 -736, 0xf27e616aa54d8464 -737, 0x3f271661d1cd7426 -738, 0x43a69dbee7502c78 -739, 0x8066fcea6df059a1 -740, 0x3c10f19409bdc993 -741, 0x6ba6f43fb21f23e0 -742, 0x9e182d70a5bccf09 -743, 0x1520783d2a63a199 -744, 0xba1dcc0c70b9cace -745, 0x1009e1e9b1032d8 -746, 0xf632f6a95fb0315 -747, 0x48e711c7114cbfff -748, 0xef281dcec67debf7 -749, 0x33789894d6abf59b -750, 0x6c8e541fffbe7f9c -751, 0x85417f13b08e0a88 -752, 0x9a581e36d589608f -753, 0x461dca50b1befd35 -754, 0x5a3231680dde6462 -755, 0xcc57acf729780b97 -756, 0x50301efef62e1054 -757, 0x675d042cd4f6bbc9 -758, 0x1652fdd3794384c9 -759, 0x1c93bbeeb763cd4d -760, 0x44b7240c4b105242 -761, 0x4c6af2a1b606ccfb -762, 0x18fc43ece2ec1a40 -763, 0x859a5511aeae8acb -764, 0x2f56826f1996ad2f -765, 0xa8e95ce8bb363bdf -766, 0xf4da396054e50e4b -767, 0x5493865e9895883c -768, 0x768e4c8b332ac0e3 -769, 0x32195d2aa583fca5 -770, 0xf2f353f21266bc15 -771, 0x43cddf1d021307d -772, 0x6031e3aa30300e4a -773, 0x4f1298469ac6088f -774, 0x4b4d450bafac574e -775, 0x23e1cf9c0582a22b -776, 0x2e9036980db49cd0 -777, 0xe4e228b113c411b2 -778, 0x8bddcdb82b51706 -779, 0xd2a7ea8288593629 -780, 0x67fe90e98fdda61 -781, 0x7b63494dba95717b -782, 0x105625904510d782 -783, 0xdf4aa2242454e50a -784, 0x32541d6cd7d6c7e3 -785, 0x5661fb432591cf3b -786, 0xce920a5ed047bce7 -787, 0xed4178a3c96eea8f -788, 0xe378cd996e39863b -789, 0x169e1fdc8e2b05e1 -790, 0xaee1812ef7149a96 -791, 0x648571c7453d12c5 -792, 0xb7b6bc9328573c43 -793, 0xe7fb969078e270d7 -794, 0xdfc2b1b8985f6e6f -795, 0x862b6527ee39a1aa -796, 0x1ee329aea91d7882 -797, 0x20d25324f2fe704 -798, 0xbfcc47401fc3bbfd -799, 0x1515cdc8d48b2904 -800, 0xbd6eefe86284261c -801, 0x9b1f28e3b35f22ee -802, 0x842a29d35e5aecda -803, 0xf2346109ad370765 -804, 0x24d68add5a71afd9 -805, 0x4a691421613d91e2 -806, 0x60e3058b3c244051 -807, 0x79194905cdaa5de8 -808, 0xe0e2df35c01e8987 -809, 0xe29b78beffbb5e4a -810, 0xcdcdbc020218c19e -811, 0x5ae0af8c16feae43 -812, 0x8109292feeaf14fa -813, 0x34113f7508dfa521 -814, 0xc062ac163f56730a -815, 0xf1660e66ec6d4c4c -816, 0x5966c55f60151c80 -817, 0x3865ae8ec934b17 -818, 0x472a7314afb055ec -819, 0x7a24277309a44a44 -820, 0x556e02dd35d38baa -821, 0x9849611a1bc96ec1 -822, 0xd176f5d5a8eb0843 -823, 0x44db12ec60510030 -824, 0x272e3a06a0030078 -825, 0x7c4764dbefc075ea -826, 0x910712f3735c1183 -827, 0xd49a2da74ae7aff6 -828, 0xcf9b3e6e8f776d71 -829, 0x27789fe3ec481a02 -830, 0x86659f82c6b5912b -831, 0xe044b3dbf339158c -832, 0x99d81f6bb62a37b0 -833, 0x5f5830c246fada9a -834, 0xe68abab1eeb432cb -835, 0x49c5c5ace04e104 -836, 0x1ac3871b3fc6771b -837, 0x773b39f32d070652 -838, 0x9c4138c2ae58b1f3 -839, 0xac41c63d7452ac60 -840, 0x9248826b245359e1 -841, 0x99bba1c7a64f1670 -842, 0xe0dc99ff4ebb92f2 -843, 0x113638652740f87c -844, 0xebf51e94da88cfc -845, 0x5441c344b81b2585 -846, 0xe1e69e0bc2de652a -847, 0xe9ab6d64ae42ed1e -848, 0x879af8730e305f31 -849, 0x36b9ad912c7e00d6 -850, 0x83ef5e9fca853886 -851, 0xda54d48bb20ea974 -852, 0x32c6d93aefa92aa2 -853, 0x4e887b2c3391847d -854, 0x50966e815f42b1b8 -855, 0x53411ac087832837 -856, 0x46f64fef79df4f29 -857, 0xb34aae3924cd272c -858, 0xf5ad455869a0adbe -859, 0x8351ded7144edac8 -860, 0xeb558af089677494 -861, 0x36ed71d69293a8d6 -862, 0x659f90bf5431b254 -863, 0x53349102b7519949 -864, 0x3db83e20b1713610 -865, 0x6d63f96090556254 -866, 0x4cc0467e8f45c645 -867, 0xb8840c4bd5cd4091 -868, 0xbd381463cc93d584 -869, 0x203410d878c2066d -870, 0x2ebea06213cf71c8 -871, 0x598e8fb75e3fceb4 -872, 0xdcca41ceba0fce02 -873, 0x61bf69212b56aae5 -874, 0x97eed7f70c9114fa -875, 0xf46f37a8b7a063f9 -876, 0x66c8f4ffe5bd6efa -877, 0xe43fd6efda2d4e32 -878, 0x12d6c799e5ad01de -879, 0x9ac83e7f8b709360 -880, 0xbbb7bb3c1957513d -881, 0x7f87c08d4b3796b0 -882, 0x9a7d1d74b6aa4a5c -883, 0xa4314530ff741b6f -884, 0x99a80c6b6f15fca8 -885, 0xd2fec81d6d5fc3ce -886, 0x15a98be1cc40cea -887, 0x98693eb7719366f3 -888, 0x36ccdc2a9e9d4de8 -889, 0x3c8208f63d77df25 -890, 0xca2e376e2343df6 -891, 0xcc9b17cbb54420c6 -892, 0x8724c44a64d7dcb8 -893, 0x9d00c6949ff33869 -894, 0xf4f8e584d2699372 -895, 0x88f4748cdd5a2d53 -896, 0xe215072a1205bc6d -897, 0x190934fe6d740442 -898, 0x7fac5c0ab2af106d -899, 0x1b86633a0bd84fa1 -900, 0x1293e54318492dfb -901, 0x433324fd390f34b9 -902, 0x4c5eb2c67a44643b -903, 0x59a6e281c388b0dd -904, 0xe78e03f9c44623b7 -905, 0x91307a93c768fc3d -906, 0xde8867b004d8e3ff -907, 0xdf52c3f57b7c5862 -908, 0x993f3e1d10358a92 -909, 0x9ccb10bc3e18662d -910, 0x45093ce48a114c73 -911, 0xd59d05979d26330a -912, 0x417c0e03300119a9 -913, 0x1c336500f90cde81 -914, 0x1c8ccd29ead9b85b -915, 0xb76baf3e55d4d950 -916, 0x133ad6196c75fd7e -917, 0x34200b0cde7ed560 -918, 0x9c7c3dacb213c8d9 -919, 0xd97563c4fd9bf1b6 -920, 0x5d910e871835b6cb -921, 0x7d46c4733a16bdf9 -922, 0xe41d73194ddc87b2 -923, 0x7d3d8a0855a465a9 -924, 0x70c2a8b5d3f90c0f -925, 0x9e7565ca5dccfe12 -926, 0x2c0acb4577aa51b1 -927, 0x3d2cd211145b79c7 -928, 0x15a7b17aa6da7732 -929, 0xab44a3730c27d780 -930, 0xf008bd6c802bde3a -931, 0x82ed86ddf3619f77 -932, 0xaabe982ab15c49f9 -933, 0x9bcad8fa6d8e58a4 -934, 0x8f39ed8243718aa1 -935, 0xe9489340e03e3cb6 -936, 0xc722314f5eefb8d0 -937, 0x870e8869a436df59 -938, 0x4dae75b8087a8204 -939, 0xe1d790f6ec6e425b -940, 0xafd39ea1b1d0ed09 -941, 0xdf2c99e464ddf08f -942, 0x74936d859ab9644d -943, 0x3871302164250e73 -944, 0x764b68921e911886 -945, 0x2a1d024b26bb9d66 -946, 0x797fba43918e75b4 -947, 0x62ec6d24ccca335b -948, 0xf4bd8b951762b520 -949, 0x9d450dede9119397 -950, 0x5393a26d10f8c124 -951, 0x6b74769392896b57 -952, 0x7f61dbcc0e328581 -953, 0x64e1df3884d0d94 -954, 0xba77dcdf23738c37 -955, 0xf8e288bc0a177475 -956, 0x4a8abfd1702ecb7d -957, 0x53f22886694736a7 -958, 0x8fc982597ced3e3 -959, 0x1bc46090f820fff7 -960, 0x8bd31f965d02229f -961, 0x65cd0cb29996ee53 -962, 0x702e0f4fcf8c2e9f -963, 0x293b77bff307a9a0 -964, 0x125a986b8b305788 -965, 0x416b0eea428ebf3c -966, 0xeac85421ab0e8469 -967, 0x7f5496095019aa68 -968, 0x1a96d7afbc708e0 -969, 0xb91262e6766e01e1 -970, 0xd0a549cc4ccc6954 -971, 0x75a9a073f50c8a0d -972, 0xae275d2c1c6cd23c -973, 0xcf159b5ec5d28fd4 -974, 0x75d0838ce9b92b -975, 0xd4eddcee6dc4677f -976, 0x6a0a8ad5df6b75b8 -977, 0x6f3fd0ef0f13ecc4 -978, 0xb75a5826c1a8f8a8 -979, 0xd47098bbc7943766 -980, 0x3d4ddd62d5f23dd1 -981, 0x760a904e4583841c -982, 0x2afeb5022b4cf1f -983, 0x66d5f653729f0a13 -984, 0x9a6a5ab62980d30f -985, 0xc332f5643bbf8d5b -986, 0x848fb702e4056a90 -987, 0xa057beaf3f9e8c5f -988, 0x6cc603e4560a6c6a -989, 0xec761811a7b23211 -990, 0xb14aa4090a82aaa5 -991, 0xe29d9d028a5b2dbb -992, 0x5564e53738d68f97 -993, 0xfabca36542eaaf3b -994, 0xb9912fcb782020a2 -995, 0xe865e01b349284fd -996, 0x540b5ff11c5f9274 -997, 0x3463f64e1e7451dc -998, 0xe15d3e2f33b735f8 -999, 0xf5433336eadef6e diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/data/sfc64-testset-2.csv b/venv/lib/python3.7/site-packages/numpy/random/tests/data/sfc64-testset-2.csv deleted file mode 100644 index 70aebd5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/data/sfc64-testset-2.csv +++ /dev/null @@ -1,1001 +0,0 @@ -seed, 0x0 -0, 0x91959e5fb96a6332 -1, 0x3c1dd8a25a7e9f21 -2, 0x657bdffc99798d9e -3, 0x1a04de320b19e022 -4, 0x65b92af0e5f3c61c -5, 0x9c84070ce8f743c0 -6, 0xbb10e573693cdb25 -7, 0xd65ea9e76b37fb6b -8, 0x503efd0e76c8ae66 -9, 0xd711dcd04c26d0f -10, 0x12f53f435814ac8c -11, 0xb392cd402cfc82bd -12, 0x461764550e06c889 -13, 0x716a48b3514e6979 -14, 0xdd0a322213c18ad7 -15, 0x6673a8ca0a05c4d7 -16, 0x2992ef333437f844 -17, 0xc4aaf7e8240b2aad -18, 0x6ab0a1af1f41474f -19, 0xb0bae400c226941d -20, 0xe5f80c2eeeab48c6 -21, 0x3832c6a93a4024bf -22, 0x280bd824fabe8368 -23, 0x66b626228321e5ff -24, 0xe0bdfba5325a307e -25, 0x3a5f65c6ef254e05 -26, 0x99ea12503cb02f94 -27, 0x5d01fd2db77d420b -28, 0x6959bf5f36b2368d -29, 0xd856e30c62b5f5be -30, 0xe33233e1d8140e66 -31, 0xb78be619d415fa8d -32, 0x4f943bb2cc63d3b -33, 0x9b1460b290952d81 -34, 0x19205d794826740e -35, 0x64617bd9d7a6a1ff -36, 0x30442124b55ea76a -37, 0xebbbc3b29d0333fc -38, 0x39235a0fe359751c -39, 0xf9629768891121aa -40, 0x32052f53f366e05a -41, 0x60cc5b412c925bc8 -42, 0xf8b7ecda1c0e5a9 -43, 0x195f036e170a2568 -44, 0xfe06d0381a9ca782 -45, 0x919d89e8b88eebbf -46, 0xa47fb30148cf0d43 -47, 0x5c983e99d5f9fd56 -48, 0xe7492cdb6a1d42cd -49, 0xf9cfe5c865b0cfd8 -50, 0x35b653367bbc3b99 -51, 0xb1d92f6f4d4e440b -52, 0x737e1d5bd87ed9c0 -53, 0x7a880ca1498f8e17 -54, 0x687dae8494f9a3f7 -55, 0x6bae1989f441d5d7 -56, 0x71ad3fa5a9195c2e -57, 0x16b3969779f5d03 -58, 0xd1bce2ac973f15b3 -59, 0xa114b1ee2ce0dcdd -60, 0x270d75c11eb1b8d5 -61, 0xc48ffa087c0a7bc -62, 0xaaf9dc48cda9848d -63, 0x8111cf10ef6e584d -64, 0x6736df6af40ee6f4 -65, 0x1a1a111682fbf98d -66, 0xeb217658e1cb3b5d -67, 0xcaf58a8b79de9dec -68, 0x25d0ffd63c88d7a1 -69, 0x4c498cd871b7f176 -70, 0x4069a6156eb0cf3c -71, 0xdf012f12edcdd867 -72, 0x7734c0ac8edb1689 -73, 0xed6960ac53dbc245 -74, 0x305e20da8868c661 -75, 0x5f0c7a3719956f95 -76, 0x66842bbe3b28895 -77, 0xb608bc9a31eac410 -78, 0xfcb17d5529503abd -79, 0x829ae5cbc29b92ee -80, 0x17f2f0027bc24f3a -81, 0x435926c33d8f44cc -82, 0x3ab899327098dbec -83, 0xaf78573b27f8ead8 -84, 0xa8b334fabcf8dc60 -85, 0xcdf3b366a6a303db -86, 0x8da9379dd62b34c8 -87, 0xb0ba511955f264a7 -88, 0x9d72e21a644f961d -89, 0xfac28382e2e7e710 -90, 0xd457065f048410aa -91, 0x1cae57d952563969 -92, 0x5a160a6223253e03 -93, 0x2c45df736d73c8bd -94, 0x7f651ebc6ad9cec5 -95, 0x77a6be96c7d2e7e7 -96, 0x1721fb1dbfd6546a -97, 0xf73f433ecff3c997 -98, 0xed1e80f680965bfe -99, 0x6705ad67a3003b30 -100, 0xac21134efcadb9f7 -101, 0x4d2ba0a91d456ac -102, 0x59da7b59434eb52b -103, 0x26c1d070fd414b5f -104, 0xed7079ddfce83d9a -105, 0x9277d21f88e0fb7a -106, 0xfae16b9a8d53d282 -107, 0xb08a0e2e405fdf7d -108, 0x2ea20df44229d6ec -109, 0x80e4634cd3612825 -110, 0xbe62e8aeba8f8a1a -111, 0x4981209769c190fb -112, 0xcec96ef14c7e1f65 -113, 0x73fe4457b47e7b53 -114, 0x1d66300677315c31 -115, 0xe26821290498c4cc -116, 0xf6110248fd8fb1c5 -117, 0x30fd7fe32dbd8be3 -118, 0x534ec9b910a2bd72 -119, 0x8f9bfe878bbf7382 -120, 0x4f4eb5295c0c2193 -121, 0xdeb22f03a913be9e -122, 0x40f716f8e2a8886c -123, 0xc65007d0e386cdb1 -124, 0x9bdd26d92b143a14 -125, 0xf644b0b77ea44625 -126, 0x75f5a53f6b01993a -127, 0xfe803e347bf41010 -128, 0x594bff5fa17bc360 -129, 0x3551edfb450373c7 -130, 0x898f9dad433615db -131, 0x923d2406daa26d49 -132, 0x99e07faccbc33426 -133, 0x7389f9ff4470f807 -134, 0xdc2a25957c6df90b -135, 0x33c6d8965ef3053f -136, 0x51a8f07e838f1ab -137, 0x91c5db369380274f -138, 0xc37de65ac56b207e -139, 0xfcc6d2375dde7f14 -140, 0xa4e6418bff505958 -141, 0x4b8b9f78e46953c4 -142, 0x255ab2e0f93cf278 -143, 0xdf650717af3d96ef -144, 0x2caa21cba3aae2b2 -145, 0xce7e46c6f393daa4 -146, 0x1d5b3573f9997ac7 -147, 0x5280c556e850847d -148, 0x32edc31bef920ad7 -149, 0xefaa6b0b08cf2c6 -150, 0x5151c99d97b111c5 -151, 0x35ccf4bf53d17590 -152, 0xa210d7bd8697b385 -153, 0xa9419f95738fbe61 -154, 0xdeccf93a1a4fdc90 -155, 0xd0ea3365b18e7a05 -156, 0x84122df6dcd31b9a -157, 0x33040a2125cea5f5 -158, 0xfe18306a862f6d86 -159, 0xdb97c8392e5c4457 -160, 0xc3e0fa735e80e422 -161, 0x7d106ff36467a0c1 -162, 0xb9825eecc720a76d -163, 0x7fefc6f771647081 -164, 0xf5df3f5b3977bf13 -165, 0x18fb22736d36f1e0 -166, 0xadc4637b4953abfc -167, 0x174e66d3e17974bd -168, 0xf1614c51df4db5db -169, 0x6664ecde5717b293 -170, 0xd5bc5b6839265c26 -171, 0xf6ca9ce1af3f1832 -172, 0xca696789a9d506ea -173, 0x7399c246c8f9d53 -174, 0xadf49049626417e2 -175, 0xbcd84af37d09ab91 -176, 0xbb41c177f3a3fa45 -177, 0x592becc814d55302 -178, 0xa88b4e65f6cfe5f7 -179, 0xa0a55e34ff879426 -180, 0x3c2ea6aa725b42b7 -181, 0x65ac4a407b1f9521 -182, 0xde63d53f7e88b556 -183, 0x18bc76696d015f40 -184, 0xd1363f2cd4c116a8 -185, 0x2fe859be19a48e4a -186, 0x83d6099b1415e656 -187, 0x43f2cbc1a4ee6410 -188, 0xb2eca3d3421c533d -189, 0xc52b98ea3f031f5d -190, 0xfe57eb01da07e9d1 -191, 0xf9377883537a6031 -192, 0x364030c05dac7add -193, 0x6815cb06b35d4404 -194, 0xceae2d4ce31894be -195, 0xc602bcdf6062bf6a -196, 0xc8e4bd8dcc6062e3 -197, 0x9c29e87b92a1a791 -198, 0x41e626b871ca9651 -199, 0x325c3d1fb8efbcd8 -200, 0x7dbbacf8e3419fb3 -201, 0x3602e72516bb7319 -202, 0x537a008ebd94d24b -203, 0xda7714fc9d4d161d -204, 0x1c8c73700e1b621b -205, 0x2749b80937d6c939 -206, 0x76ee6abac5b14d33 -207, 0xf18d1e92cb6a8b5c -208, 0x6ce9579d9291c721 -209, 0x60523c745a40e58 -210, 0x637f837fcc901757 -211, 0x2ff71b19661dc5b3 -212, 0x393ab586326ad16f -213, 0xa0970ea30fe742b7 -214, 0x570222d7f27fe5ae -215, 0x3b5806d43fd38629 -216, 0x129a0ad7420180c5 -217, 0x1c4726355778d52c -218, 0x7c1459cf77656499 -219, 0xfe038a0932132069 -220, 0x4c4cc317a937483a -221, 0xa333d24067e926ba -222, 0x401d9b6ab37f6ef2 -223, 0x87ad0e491ebe4a2a -224, 0xfc02f312e72d121d -225, 0xfde715b3b99767b2 -226, 0xd111c342ba521c92 -227, 0x83b221b10879c617 -228, 0x6a1bf5c01fdf4277 -229, 0x166bfc0c3f5892ee -230, 0x4608d556d7c57856 -231, 0x8d786857c95ece49 -232, 0x2d357445a1aca4ac -233, 0x79620dae28ecd796 -234, 0x90e715dc0f2201c4 -235, 0x173b68b4c9f4b665 -236, 0x4e14d040ebac4eef -237, 0xbd25960b4b892e -238, 0x911a199db6f1989d -239, 0xfe822d7c601fd2e0 -240, 0x9b4c1d58d8223a69 -241, 0x907c1891283843b0 -242, 0xf4868bf54061c4b2 -243, 0x17f8cd1fc24efd85 -244, 0xd44253f9af14c3aa -245, 0x16d0da0cb911d43c -246, 0x3c6a46615828e79a -247, 0x498591c1138e11a5 -248, 0xcc0f26336d0d6141 -249, 0x4d3ebc873212309a -250, 0x16bad7792d5c2c6a -251, 0x474215a80b2bbd11 -252, 0x7159848abd8492fc -253, 0x359341c50973685f -254, 0x27512ee7bf784a4a -255, 0x45228ea080f70447 -256, 0x880cab616500d50e -257, 0x12fae93f9830d56e -258, 0x6744ee64348d9acd -259, 0x484dada28cd2a828 -260, 0x98491d0729e41863 -261, 0x2f15aac43c2863b0 -262, 0x5727a34d77a1da0f -263, 0xa435cebef6a62eed -264, 0xd211697d57b053b0 -265, 0x65aa757b68bd557 -266, 0xe3a1b7a2d8a3e06a -267, 0x2adf64e67252a7a9 -268, 0xadadcb75cadee276 -269, 0x7934bc57ac8d97bf -270, 0xccff0d0f412e0606 -271, 0x101a82aa3e8f3db9 -272, 0xb0f2498094b4575c -273, 0xba2561d9ef26ed8a -274, 0xfbcd1268fc3febe1 -275, 0x9aa10bb19eb152e0 -276, 0xf496217a601a6d72 -277, 0xe4be1e4f2fa91363 -278, 0x473a602bf3dd68eb -279, 0xfe8ed2a48c26f4b5 -280, 0x20e94b1a00159476 -281, 0x93e1cb1c6af86ec7 -282, 0x4fcba3898f7442ba -283, 0x5150c3a3d94891df -284, 0x91cfce6c85b033ea -285, 0x625e8a832a806491 -286, 0x28c97ba72e3ec0b2 -287, 0x8e172de217c71ea1 -288, 0x926b80216c732639 -289, 0x28b19431a649ae3d -290, 0x57c039a6e95a3795 -291, 0xfbc354182fe52718 -292, 0x819dfd7c7d534cef -293, 0xabb4093a619ed44f -294, 0xe785b7ac6f656745 -295, 0xb647b4588b2f942f -296, 0x64cf870a14c72d27 -297, 0x6d4a4a2a0ba9b37e -298, 0x78bfb0427d7ce6b0 -299, 0x8dcc72b8bfc79ac6 -300, 0x1c14d915d5e76c99 -301, 0xaf48ddea6f096d79 -302, 0x51b39b67aa130d8 -303, 0x1aeeb39d4def06de -304, 0xd678092ffedfdd27 -305, 0x8f54787f325111d3 -306, 0xf2ca2e827beaa6bc -307, 0x339d134099e98545 -308, 0x1f6a8a7b33942e43 -309, 0x952c8065dbef669a -310, 0xe066aeb6690147f7 -311, 0xed25aa92cf58ebb6 -312, 0x7601edce215ef521 -313, 0xed1c5b396abd9434 -314, 0x4fd1e407535de9d5 -315, 0xccc8315a0d4d1441 -316, 0x85753e250bb86976 -317, 0xf232e469378761c3 -318, 0x81d691b8e9aef3c6 -319, 0x224a2f9cab0ad0e -320, 0x978f3d3e50007f4e -321, 0xd3713e6a6c0cbe60 -322, 0xcce8f1eadd41f80d -323, 0x34bda028a97d469 -324, 0x90e242fdf0f59183 -325, 0x4d749754fbc5f092 -326, 0x4399f5b7851cc87b -327, 0xcb921a5f25f6c5d7 -328, 0x120bf5d0162101 -329, 0x1304cc2aa352735a -330, 0xf7236c5d0d5d417b -331, 0xc31b320fc1654306 -332, 0xb468c6b23f3fb4e7 -333, 0xb5985b5bfaca4166 -334, 0x898285a1cd2f8375 -335, 0xa13493da372aa7c9 -336, 0x15c80c09c12634e7 -337, 0x9b765c5cc9d438bd -338, 0xee7da816a9201dcb -339, 0x92e269f73b5a248e -340, 0xa8086c5de81400ce -341, 0xe0053901853d42be -342, 0x821df32c012f433e -343, 0x17a6d69ca37387c7 -344, 0x2b10044bfba3501f -345, 0x8dfd262afc2e8515 -346, 0xd68c2c7b60226371 -347, 0xe81ac114e4416774 -348, 0x5896d60061ebc471 -349, 0xa996e3147811dbd1 -350, 0xa819c7b80ecb3661 -351, 0x982ad71b38afbc01 -352, 0xab152b65aa17b7fe -353, 0x4582bc282ef187ef -354, 0xab5a17fe8d9bc669 -355, 0x83664fa9cb0284b7 -356, 0x234c4b0091968f52 -357, 0x8ab5f51805688d37 -358, 0xe9e11186e0c53eda -359, 0x10df37ef1de2eccf -360, 0x780f1b0d52db968f -361, 0x50bd4ff292872cd5 -362, 0x51e681c265f5ad0 -363, 0x842c49660a527566 -364, 0x6e56ee026e9eda87 -365, 0x4cf39e40d8c80393 -366, 0x13e466df371f7e1f -367, 0xf2ce1799f38e028e -368, 0x833c8db7adc6ff0e -369, 0xc6e189abc2ec98f -370, 0xafebb3721283fec5 -371, 0xb49bc1eb5cc17bdc -372, 0xf1d02e818f5e4488 -373, 0xe5e9d5b41a1dd815 -374, 0xce8aca6573b1bfe5 -375, 0x9b0a5d70e268b1d5 -376, 0xf3c0503a8358f4de -377, 0x2681605dd755669d -378, 0xea265ca7601efc70 -379, 0xa93747f0a159439f -380, 0x62a86ede78a23e50 -381, 0xac8a18935c3d063c -382, 0x729c0a298f5059f5 -383, 0xbbf195e5b54399f4 -384, 0x38aa9d551f968900 -385, 0x3b3e700c58778caa -386, 0x68e6e33c4443957a -387, 0x7c56fc13eb269815 -388, 0xaf7daca39711804a -389, 0x50fde6d10f9544b3 -390, 0xf3d37159f6f6c03d -391, 0x82d298f5c1a71685 -392, 0x478661ac54c5002c -393, 0x6053768e1a324ae0 -394, 0xde8fb4a7e56707ea -395, 0xaa2809301faa8cf4 -396, 0x690a8d49fedd0722 -397, 0xe17c481b9c217de9 -398, 0x60d1d8a2b57288e3 -399, 0x149adfaadc6b0886 -400, 0xa3c18b6eb79cd5fa -401, 0x5774e3a091af5f58 -402, 0x2acca57ff30e5712 -403, 0x94454d67367c4b0c -404, 0x581b2985ac2df5ca -405, 0x71618e50744f3e70 -406, 0x270a7f3bd9a94ae6 -407, 0x3ef81af9bb36cd7b -408, 0x8a4a2592875254aa -409, 0x704ac6086fbb414a -410, 0xda774d5d3f57414d -411, 0xe20d3358b918ae9e -412, 0x934a6b9f7b91e247 -413, 0xf91649cde87ec42c -414, 0x248cec5f9b6ced30 -415, 0x56791809fd8d64ba -416, 0xf502b2765c1395f -417, 0x6b04ec973d75aa7f -418, 0xb0339f2794bb26f -419, 0x4c524636efbaea49 -420, 0x6bbf3876e9738748 -421, 0xf686524e754e9e24 -422, 0x8dafa05a42d19cd3 -423, 0xc5f069ab2434008e -424, 0x4fd64cc713cba76 -425, 0xdbf93450c881ed5f -426, 0x492e278ebabb59a2 -427, 0x993fddfde4542642 -428, 0xecde68a72c8d4e52 -429, 0xe0760b3074c311fd -430, 0x68dc0e7e06528707 -431, 0x52b50edf49c0fdc7 -432, 0xb2bd4185c138f412 -433, 0x431496d7e1d86f3 -434, 0xa4e605b037e26c44 -435, 0x58236ae1f0aca2b5 -436, 0x26c72c420fc314d8 -437, 0x20134e982ab99a2b -438, 0x544b59b8b211374b -439, 0x1301c42f3a14d993 -440, 0x52a6ea740f763b0f -441, 0xf209d70c2bebf119 -442, 0xac66a4ebc2aa1be -443, 0x683713ed35878788 -444, 0x2b5578acec06b80c -445, 0x86428efa11c45b36 -446, 0xb49010adb17d291e -447, 0x73b686bd8664b6be -448, 0x6d28ebf57b6884cc -449, 0x9712091230ff58d9 -450, 0xc9c91f74c38b286 -451, 0x776310ac41dc008e -452, 0x2f3739df0bf6a88e -453, 0x5792dc62b94db675 -454, 0x5715910d024b06af -455, 0xeb1dd745458da08 -456, 0xfce7b07ccfa851a7 -457, 0xc305f1e983ac368 -458, 0x485aa9519ac00bb0 -459, 0xa5354f6589fb0ea0 -460, 0x32fee02dfdbf4454 -461, 0x4d1ddc304bbefaaa -462, 0x789a270a1737e57e -463, 0x9f3072f4b1ed8156 -464, 0x4de3c00e89058120 -465, 0xb00a02529e0a86fa -466, 0x539f6f0edd845d9a -467, 0x85e578fe15a8c001 -468, 0xa12c8e1a72cce7d8 -469, 0xc6908abbc2b1828 -470, 0xcf70090774cbb38c -471, 0x3b636a6977b45d4a -472, 0xf0a731b220680b57 -473, 0x18973929f51443a8 -474, 0xe93e1fbe7eadabe -475, 0x8233730f0a6dfa02 -476, 0x66e50b6919b0ab74 -477, 0xb1aba87c97fd08a2 -478, 0xd4dffc1fbc117ad6 -479, 0x6f7fa65724b96e6a -480, 0x4bd5800dee92e0fa -481, 0xe18a959db6256da -482, 0xe53a291bc66df487 -483, 0xb7ec306a08651806 -484, 0x1847a6b80d2821e1 -485, 0xda50391283b14d39 -486, 0xacc4d3cd7cceb97a -487, 0x57f70185165b7bc6 -488, 0x302b6d597c3aaba7 -489, 0xa47f32d037eab51e -490, 0xe1509b4408abc559 -491, 0x4f30a1d7c2934157 -492, 0x2ad03e6c60b650b2 -493, 0x334d9c337b0a9064 -494, 0xc7f442821e7aac12 -495, 0xbcdeb09298694cdd -496, 0xe42402389f8f0fb4 -497, 0xe5de56af539df727 -498, 0x7017f9b2101ee240 -499, 0x1ee5e68d5b10001d -500, 0x436229051836387a -501, 0xcd532d6d6ec38fb7 -502, 0x30a66606fdf38272 -503, 0xfdaa2ab9cf798496 -504, 0x4277b4adec70e7df -505, 0x72cfc30256e0eaef -506, 0x3c3359fd9bd34917 -507, 0xb7aa89598856efb0 -508, 0xf72226f8bf299ef5 -509, 0x258c499275a4356f -510, 0x999a56bfc7f20d76 -511, 0x2b3e7432e20c18b -512, 0x2d1251332f760cb5 -513, 0x7420e0eea62157c5 -514, 0xe85c895aa27cec3d -515, 0x27a0545c7020d57c -516, 0xc68638a65b4fff0d -517, 0xfda473983a4ea747 -518, 0xd19fe65fb4c06062 -519, 0x6b1374e050ee15e4 -520, 0x80065ecd49bc4bef -521, 0x4ee655954bc838de -522, 0xe8fb777504a72299 -523, 0x86b652ea70f4bdde -524, 0xcdc9e0fbde7e4f33 -525, 0x352c0a50cd3ac56 -526, 0x4b8605d368be75dc -527, 0x1ac9ea8129efbc37 -528, 0x470325faa99f39c5 -529, 0x25dd7ef9adccf7a1 -530, 0x5ae2c7a03e965816 -531, 0xf733d2df59dacc7d -532, 0xa05bbf0a8a1a7a70 -533, 0xe8aa3f102846ef5f -534, 0xc9b85ec49ae71789 -535, 0xb904c14ed1cb1936 -536, 0x5ae618230b5f0444 -537, 0x97987fe47b5d7467 -538, 0xabb3aca8865ca761 -539, 0x38bfdf29d4508228 -540, 0x353654f408353330 -541, 0xeb7e92930ae4ef0d -542, 0xec50f1a7ca526b96 -543, 0xd5e2dc08b5697544 -544, 0x24c7fd69d5ec32df -545, 0x6f7e1095568b8620 -546, 0x6ed9c16ca13b3c8 -547, 0xe676ef460002130f -548, 0xa3a01a3992c4b430 -549, 0xe2130406c3b1f202 -550, 0xa8f7263e2aedcd20 -551, 0xc45d71ef2e35f507 -552, 0x37155594021da7ba -553, 0x22dc94f19de73159 -554, 0x7969fc6bffc5443f -555, 0x97def7e44faa6bfe -556, 0x8b940f5e8931d71f -557, 0xd95b1dd3f1a3fdd5 -558, 0x1c83bfdca615701a -559, 0xb7fcb56279ceca6b -560, 0xd84f8950f20dcd0 -561, 0xb03343698de3cbe0 -562, 0xf64565d448d71f71 -563, 0xda52b4676e0ae662 -564, 0xda39c2c05b4ffb91 -565, 0xb35e2560421f6a85 -566, 0x1a7b108d48ac3646 -567, 0xc4e264dc390d79ed -568, 0xa10727dfd9813256 -569, 0x40d23154e720e4f7 -570, 0xd9fa7cd7e313e119 -571, 0xcbf29107859e6013 -572, 0xc357338553d940b7 -573, 0x2641b7ab0bdfcbaa -574, 0xd12f2b6060533ae7 -575, 0xd0435aa626411c56 -576, 0x44af4a488a9cec72 -577, 0xb934232ea8fa5696 -578, 0x760a8b12072b572d -579, 0xfab18f9942cfa9b3 -580, 0x5676834c1fe84d16 -581, 0x9c54e4fddb353236 -582, 0xab49edfc9551f293 -583, 0x567f1fb45a871d -584, 0x32a967c873998834 -585, 0x99240aad380ef8d1 -586, 0x7f66cbd432859a64 -587, 0x4cdc8a4658166822 -588, 0x984e3984a5766492 -589, 0xa3b2d0a3d64d3d94 -590, 0x177f667172f2affc -591, 0xb1a90607a73a303f -592, 0xe600b6c36427f878 -593, 0xf758f9834cb7f466 -594, 0x8ee9fce4a3f36449 -595, 0xcb8f11533e7da347 -596, 0xe7cf647794dabd7c -597, 0xc9d92cfe6110806 -598, 0xea1335fa9145a1ec -599, 0xbc6c29821d094552 -600, 0x37b9d6a858cc8bc3 -601, 0xf24e4c694929893e -602, 0x55d025ce2d7d0004 -603, 0xccdc69acccf4267b -604, 0xc491c04340c222eb -605, 0xba50f75ecec9befb -606, 0x1ec7bd85b8fe3bb9 -607, 0xe4de66498c59ae8a -608, 0x38aa9e912712c889 -609, 0xcee0e43c5cc31566 -610, 0x72b69aa708fc7ed -611, 0xdff70b7f6fa96679 -612, 0xd6d71d82112aadc3 -613, 0x365177892cb78531 -614, 0xa54852b39de4f72c -615, 0x11dd5832bf16dd59 -616, 0x248a0f3369c97097 -617, 0xa14cec0260e26792 -618, 0x3517616ff142bed1 -619, 0x9b693ad39dab7636 -620, 0x739dff825e994434 -621, 0x67711e7356098c9 -622, 0xa81f8515d2fdf458 -623, 0xdac2908113fe568e -624, 0xe99944ebc6e2806a -625, 0x671728ca5b030975 -626, 0xfdad20edb2b4a789 -627, 0xedc6e466bd0369d2 -628, 0x88b5d469821f7e1b -629, 0x2eabf94049a522a5 -630, 0x247794b7a2f5a8e3 -631, 0x278942bdbe02c649 -632, 0xbe5a9a9196ab99c1 -633, 0x75955060866da1b5 -634, 0xdedcfa149273c0b5 -635, 0xdbeb7a57758f3867 -636, 0x7b9053347a2c8d5a -637, 0xa059b3f2eed338a5 -638, 0x59401a46ded3b79f -639, 0x38044ba56a6d19fb -640, 0x72c7221b4e77e779 -641, 0x526df3491a3a34da -642, 0xc3b31184ba16c0c2 -643, 0xd94c7144488624af -644, 0xcf966ee4dc373f91 -645, 0x62049e65dd416266 -646, 0x7c2adccb925bf8f -647, 0xd5fa5c22ed4ef8e1 -648, 0xd00134ebd11f2cd1 -649, 0xfbdf81767bed3634 -650, 0x62e8cc8ff66b6e26 -651, 0x3a72d6bcd4f2dcf7 -652, 0xf1cd45b1b46a86ed -653, 0x1271f98e0938bb9a -654, 0x82e6927e83dc31fa -655, 0x7b9b0e0acb67b92d -656, 0x6df503e397b2e701 -657, 0x93888f6fb561e0c3 -658, 0x393fb6069a40291 -659, 0x967a7d894cc0754d -660, 0x6e298996ad866333 -661, 0x5ff3cf5559d6ab46 -662, 0xd0d70508c40349f5 -663, 0xc64c66c0dd426b33 -664, 0x8fea340ee35c64dd -665, 0xf9cd381eb3060005 -666, 0xfcc37c2799fc0b11 -667, 0x6a37c91d65b489fa -668, 0x57231000fa0a0c9d -669, 0x55f6e292c6703f9a -670, 0xd0508ffbfa55a7a6 -671, 0x885db543276bdac8 -672, 0xc26dbe6a26b0e704 -673, 0x21f884874ebd709e -674, 0x711f0b6c8f732220 -675, 0x354d0a361eaee195 -676, 0x721344d8d30b006a -677, 0xa0e090a0d3a56f07 -678, 0x16b3d5d823a4952b -679, 0x59d7874bc9eae7b6 -680, 0x9bbb32710076455f -681, 0xd4fb22242ffabafd -682, 0xe1d4ac6770be1d89 -683, 0xb259cedebc73dc8a -684, 0x35faaa3b4246ab69 -685, 0x5d26addefdaee89 -686, 0x8e7ec350da0f3545 -687, 0xd0f316eed9f8fc79 -688, 0x98b2a52c9bf291b2 -689, 0xe4d294a8aca6a314 -690, 0x25bd554e6aa7673c -691, 0xcfde5dcba5be2a6c -692, 0xb5e01fb48d2d2107 -693, 0xe1caf28948028536 -694, 0xd434aa0a26f3ee9b -695, 0xd17723381641b8f6 -696, 0xfe73bd1f3f3768a2 -697, 0x1cc6b1abd08d67e9 -698, 0x247e328371a28de0 -699, 0x502e7942e5a9104a -700, 0x6a030fd242eb4502 -701, 0xa2ffe02744014ce8 -702, 0x59290763b18fe04e -703, 0xcf14241564271436 -704, 0xb0fb73c3c1503aff -705, 0x94e27c622f82137a -706, 0x747a5b406ac3e1f0 -707, 0x9a914e96a732031d -708, 0x59f68c6c8f078835 -709, 0x809d012c73eb4724 -710, 0x5b3c3b73e1b37d74 -711, 0xdde60ef3ba49cdf7 -712, 0x87a14e1f9c761986 -713, 0x4109b960604522af -714, 0x122d0e1ed0eb6bb9 -715, 0xadc0d29e80bfe33 -716, 0xa25b1b44f5fc8e4e -717, 0xbab85d8a9b793f20 -718, 0x825f4cbced0e7d1e -719, 0x2d6ae8807acb37ea -720, 0x8234420adce2e39 -721, 0x4a8ad4da6b804807 -722, 0x1e19f9bc215e5245 -723, 0x1d6f4848a916dd5e -724, 0x9ac40dfcdc2d39cc -725, 0x9f3524e3086155ec -726, 0x861fffc43124b2ef -727, 0xe640e3b756396372 -728, 0x41cb0f0c5e149669 -729, 0xe0bd37e1192e4205 -730, 0x62917d3858f4ce47 -731, 0xa36e7eb4d855820a -732, 0x204b90255a3bf724 -733, 0x66ee83a0175535bc -734, 0x2c14ce7c6b0c1423 -735, 0x85d9495fa514f70d -736, 0x5a4fe45ead874dbc -737, 0xe72248dcb8cfc863 -738, 0xfc21ff2932ed98cd -739, 0xcbba1edd735b5cad -740, 0x91ddc32809679bf5 -741, 0x192cdf2c7631ea1f -742, 0xbbc451ddf2ea286f -743, 0xad9e80cae2397a64 -744, 0x6918f0119b95d0e5 -745, 0xa40379017a27d70a -746, 0x1aaeddb600e61e1 -747, 0x15afd93cbd7adda9 -748, 0x156719bc2b757ff4 -749, 0x13d9a59e2b2df49d -750, 0x9a490986eaddf0a -751, 0xef9a350f0b3eb6b4 -752, 0x5de7f6295ba4fa4d -753, 0x7f37fd087c3fdb49 -754, 0xa9fe3749d6f3f209 -755, 0x50912ac036d9bfb -756, 0x982cb4d726a441f8 -757, 0x8ca8d8af59b872d0 -758, 0x7f8adfb0ceeade8a -759, 0xdad390ec742be44 -760, 0xa637944d0045be5b -761, 0x3569a3b3af807061 -762, 0x9599da8eae14511d -763, 0xc333e8d19589b01a -764, 0xfb9b524a20b571e1 -765, 0xbd9dc8b37ce5c3e1 -766, 0x142333005fa389ac -767, 0x1368bc37cd5bcce1 -768, 0x16094907ad6ecf73 -769, 0xb32c90dbba4c1130 -770, 0x82761d97c1747dd0 -771, 0x599f9f267ae3444d -772, 0x79ad3382994852e1 -773, 0x2511f06d9ef06e54 -774, 0xb35e6ab7d5bbddae -775, 0xfca9fa83a2988732 -776, 0x7d4350f0394ac3ba -777, 0xa52a9527bb176ea3 -778, 0xb49fa0ceb2aa8353 -779, 0x1f62e504d1468cc0 -780, 0xe1a77bfccce6efc3 -781, 0x776cdff4dc0d6797 -782, 0x56612e39b652c1f2 -783, 0x5f096a29294eda04 -784, 0x7978abc3aabd8b23 -785, 0x79dd875e0485b979 -786, 0x8a98aa4d5735d778 -787, 0xcca43940f69d2388 -788, 0xb2d4b156f144f93a -789, 0xbd528a676e9a862 -790, 0x2a394939c8e7ec5e -791, 0xb1da900c6efe4abc -792, 0x9869af479de4c034 -793, 0x78dbdfb88ac7c1db -794, 0x18cb169143088041 -795, 0xe69e5461c51a3e13 -796, 0x5389fa16ea98183c -797, 0xed7c80d1be1ea520 -798, 0x87246fc359758ced -799, 0xab323eba95fae4ed -800, 0xbc4c0dde7f8a1828 -801, 0xdb739f7955610b1a -802, 0xecd8c68c3434cc -803, 0x138c2eb88c477f44 -804, 0x28a65f96727aae41 -805, 0xdee879f2cf5629d -806, 0x684f0c90ef20070f -807, 0xa24a819ef5621800 -808, 0x8d0054f870e4fdcb -809, 0x99e8c6e695b600b -810, 0x50b705245891f7c3 -811, 0xc02eed3a6e58e51a -812, 0x443d64e95443606c -813, 0xca24959cfbd2d120 -814, 0xe072609ea48815bc -815, 0xbcc715026590315b -816, 0x3e76df24d7aa5938 -817, 0xd8ff04940d9b79ae -818, 0x54474ce790059bcd -819, 0x278390dd6aa70e81 -820, 0xf4df619fe35414e4 -821, 0x757d71270264e615 -822, 0x1e8a373699c11b23 -823, 0xef68c82046e67dd6 -824, 0xe280006599972620 -825, 0x234e095183b0f4d6 -826, 0xe3b7560ed9839749 -827, 0xcd5ec4086572332e -828, 0xc41c0d4aaa279108 -829, 0x4b9cd6126bc16a6d -830, 0x4a7252734f3e3dd0 -831, 0xb3132df156cc103a -832, 0xf9e4abbf7b64464a -833, 0xf936df27fb3c47b7 -834, 0x9142960873f6d71a -835, 0x4ba6aa3235cdb10d -836, 0x3237a2e765ba7766 -837, 0xd62f0b94c8e99e54 -838, 0x26b682f90a3ae41b -839, 0x40ad5e82072b6f81 -840, 0xd0198101f5484000 -841, 0xe4fac60ba11c332 -842, 0x472d0b0a95ef9d38 -843, 0x8512557aec5a3d8f -844, 0xef83169d3efd4de9 -845, 0x53fe89283e7a7676 -846, 0x2f50933053d69fc4 -847, 0x76f5e4362e2e53a2 -848, 0x8676fdccce28874a -849, 0x2737764c1fb1f821 -850, 0x4a6f70afc066ab55 -851, 0x27f8e151e310fca4 -852, 0xd606960ccbe85161 -853, 0xcce51d7ddd270a32 -854, 0xb4235999794875c2 -855, 0x580084e358e884 -856, 0x2159d5e6dc8586d7 -857, 0x87bd54d8599b3ba4 -858, 0x3e9ade6a2181664 -859, 0x5e6e140406d97623 -860, 0x511545d5aa0080a2 -861, 0xf49d78ed219aac57 -862, 0xbece1f9c90b8ea87 -863, 0x1c741cac36a2c514 -864, 0x7453c141047db967 -865, 0xd751832a5037eba2 -866, 0x71370a3f30ada1f7 -867, 0x7c01cf2dcb408631 -868, 0x1052a4fbdccc0fa1 -869, 0x13d525c9df3fb6c -870, 0xa3aa8dbfee760c55 -871, 0xc0288d200f5155cf -872, 0x79f4bcd12af567c3 -873, 0x8160d163bb548755 -874, 0x5cf2995fb69fd2df -875, 0xcc98ed01396639df -876, 0xad95f1d9cfc8256e -877, 0xa3df27d9fbdbfb9d -878, 0x83e5f5dda4d52929 -879, 0x9adc05043009f55b -880, 0xdfe8329dfde1c001 -881, 0x9980ccdd5298e6a2 -882, 0x636a7bd134f6ef56 -883, 0xef5ff780c4be6ba4 -884, 0x290d71dc77a56d16 -885, 0x6d65db9ff58de1e6 -886, 0x944b063b3805a696 -887, 0xce468ca2cce33008 -888, 0x5ba1ccb840f80f48 -889, 0x28ddce36fc9ad268 -890, 0x4f77ef254d507a21 -891, 0xce9b4057fadf3ab -892, 0xb518bc68298730e6 -893, 0xd2eb5b8e2ec665b0 -894, 0xe1583303a4f87344 -895, 0x9d5a0df4fbe1bed5 -896, 0x2ba9bc03ec8cfd07 -897, 0x479ed880a96ca669 -898, 0xcedf96338324771a -899, 0x312f4fc2da41ffaa -900, 0xa0eb9cf23b5e1ed8 -901, 0xf8f88f975dc3f539 -902, 0x4a37e185d0e96e0f -903, 0xf829654a5c0b46f9 -904, 0x3909cca7a7f8c7fb -905, 0x4c2e1d66ceb45105 -906, 0xaffaa19e1db8af87 -907, 0x9ec498246bd18c76 -908, 0x21d51558edc089da -909, 0xe8984112cd1b1561 -910, 0x7de1d2cf54b0c0e1 -911, 0xa06729aed50bfb9d -912, 0xcf19f733e5db19e1 -913, 0x70edf2624ab777cd -914, 0x46685becad10e078 -915, 0x825e0f6add46785 -916, 0x66d4af3b15f70de4 -917, 0xc676614b0666b21 -918, 0x282a916c864f5cb7 -919, 0x2707283a3f512167 -920, 0x37ff3afda7461623 -921, 0xc767eb1205e4ca86 -922, 0x46b359aecc4ea25b -923, 0x67fbbb797a16dbb1 -924, 0x64fd4ba57122290e -925, 0x8acc2a8ae59d8fac -926, 0x64a49298599acc67 -927, 0xedf00de67177ce30 -928, 0x1ea9d8d7e76d2d2c -929, 0x363fcac323f70eb2 -930, 0x19e6e3ec8a9712eb -931, 0xca541e96b0961f09 -932, 0x4d8fd34c2822ec46 -933, 0x2fdd56a50b32f705 -934, 0xaac2fcf251e3fd3 -935, 0xb0c600299e57045c -936, 0xd951ec589e909e38 -937, 0x4dc8414390cae508 -938, 0x537ef9d5e2321344 -939, 0xa57bc21fd31aa2dc -940, 0xa3a60df564183750 -941, 0xbe69a5ce2e369fb6 -942, 0x7744601f4c053ec8 -943, 0x3838452af42f2612 -944, 0xd4f0dad7115a54e9 -945, 0x629cf68d8009a624 -946, 0x2211c8fa34cb98cb -947, 0x8040b19e2213db83 -948, 0xb2a86d3ba2384fd -949, 0x4b85cec4f93f0dab -950, 0xc8d212d21ea6845d -951, 0x5b271a03a4fe2be0 -952, 0xff4f671319ad8434 -953, 0x8e615a919d5afa96 -954, 0xea7f47c53161160a -955, 0x33273930b13c6efc -956, 0x98eedda27fb59c3c -957, 0x188dc5e92e939677 -958, 0x9dbd0fa0911430f1 -959, 0x5b3dcf3fa75dfd2b -960, 0x3f03846febdb275d -961, 0x20cc24faea9e9cf6 -962, 0x854f3ac66199ff5d -963, 0x31169ac99d341e6f -964, 0xa85daed3c0bc1bbe -965, 0x64633711e71ba5dd -966, 0x530e79978dc73334 -967, 0x636f2ee6e20aef13 -968, 0xf6220f8b6d9a58fb -969, 0x425db8fa32141a7b -970, 0xac7c210f4b02be95 -971, 0x5fe8cfbe197a7754 -972, 0xfff7d40c79420ea -973, 0x5f8bab9ef4697b77 -974, 0xaf6fe54e45b23fe8 -975, 0xce79456ccc70bbce -976, 0x645ef680f48f1c00 -977, 0xa4dfac46e2028595 -978, 0x6bece4c41effc5df -979, 0xd316df886442641f -980, 0xa4f6ff994edd2a6 -981, 0x30281ae3cc49abe4 -982, 0x39acb7b663dea974 -983, 0x5e8829b01a7c06fb -984, 0x87bdb08cf027f13e -985, 0xdfa5ede784e802f6 -986, 0x46d03d55711c38cc -987, 0xa55a961fc9788306 -988, 0xbf09ded495a2e57a -989, 0xcd601b29a639cc16 -990, 0x2193ce026bfd1085 -991, 0x25ba27f3f225be13 -992, 0x6f685be82f64f2fe -993, 0xec8454108229c450 -994, 0x6e79d8d205447a44 -995, 0x9ed7b6a96b9ccd68 -996, 0xae7134b3b7f8ee37 -997, 0x66963de0e5ebcc02 -998, 0x29c8dcd0d17c423f -999, 0xfb8482c827eb90bc diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_direct.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_direct.py deleted file mode 100644 index 9f77f0a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_direct.py +++ /dev/null @@ -1,425 +0,0 @@ -import os -from os.path import join -import sys - -import numpy as np -from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, - assert_raises) -import pytest - -from numpy.random import ( - Generator, MT19937, PCG64, Philox, RandomState, SeedSequence, SFC64, - default_rng -) -from numpy.random._common import interface - -try: - import cffi # noqa: F401 - - MISSING_CFFI = False -except ImportError: - MISSING_CFFI = True - -try: - import ctypes # noqa: F401 - - MISSING_CTYPES = False -except ImportError: - MISSING_CTYPES = False - -if sys.flags.optimize > 1: - # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 - # cffi cannot succeed - MISSING_CFFI = True - - -pwd = os.path.dirname(os.path.abspath(__file__)) - - -def assert_state_equal(actual, target): - for key in actual: - if isinstance(actual[key], dict): - assert_state_equal(actual[key], target[key]) - elif isinstance(actual[key], np.ndarray): - assert_array_equal(actual[key], target[key]) - else: - assert actual[key] == target[key] - - -def uniform32_from_uint64(x): - x = np.uint64(x) - upper = np.array(x >> np.uint64(32), dtype=np.uint32) - lower = np.uint64(0xffffffff) - lower = np.array(x & lower, dtype=np.uint32) - joined = np.column_stack([lower, upper]).ravel() - out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23) - return out.astype(np.float32) - - -def uniform32_from_uint53(x): - x = np.uint64(x) >> np.uint64(16) - x = np.uint32(x & np.uint64(0xffffffff)) - out = (x >> np.uint32(9)) * (1.0 / 2 ** 23) - return out.astype(np.float32) - - -def uniform32_from_uint32(x): - return (x >> np.uint32(9)) * (1.0 / 2 ** 23) - - -def uniform32_from_uint(x, bits): - if bits == 64: - return uniform32_from_uint64(x) - elif bits == 53: - return uniform32_from_uint53(x) - elif bits == 32: - return uniform32_from_uint32(x) - else: - raise NotImplementedError - - -def uniform_from_uint(x, bits): - if bits in (64, 63, 53): - return uniform_from_uint64(x) - elif bits == 32: - return uniform_from_uint32(x) - - -def uniform_from_uint64(x): - return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0) - - -def uniform_from_uint32(x): - out = np.empty(len(x) // 2) - for i in range(0, len(x), 2): - a = x[i] >> 5 - b = x[i + 1] >> 6 - out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0 - return out - - -def uniform_from_dsfmt(x): - return x.view(np.double) - 1.0 - - -def gauss_from_uint(x, n, bits): - if bits in (64, 63): - doubles = uniform_from_uint64(x) - elif bits == 32: - doubles = uniform_from_uint32(x) - else: # bits == 'dsfmt' - doubles = uniform_from_dsfmt(x) - gauss = [] - loc = 0 - x1 = x2 = 0.0 - while len(gauss) < n: - r2 = 2 - while r2 >= 1.0 or r2 == 0.0: - x1 = 2.0 * doubles[loc] - 1.0 - x2 = 2.0 * doubles[loc + 1] - 1.0 - r2 = x1 * x1 + x2 * x2 - loc += 2 - - f = np.sqrt(-2.0 * np.log(r2) / r2) - gauss.append(f * x2) - gauss.append(f * x1) - - return gauss[:n] - -def test_seedsequence(): - from numpy.random._bit_generator import (ISeedSequence, - ISpawnableSeedSequence, - SeedlessSeedSequence) - - s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) - s1.spawn(10) - s2 = SeedSequence(**s1.state) - assert_equal(s1.state, s2.state) - assert_equal(s1.n_children_spawned, s2.n_children_spawned) - - # The interfaces cannot be instantiated themselves. - assert_raises(TypeError, ISeedSequence) - assert_raises(TypeError, ISpawnableSeedSequence) - dummy = SeedlessSeedSequence() - assert_raises(NotImplementedError, dummy.generate_state, 10) - assert len(dummy.spawn(10)) == 10 - - -class Base(object): - dtype = np.uint64 - data2 = data1 = {} - - @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.bits = 64 - cls.dtype = np.uint64 - cls.seed_error_type = TypeError - cls.invalid_init_types = [] - cls.invalid_init_values = [] - - @classmethod - def _read_csv(cls, filename): - with open(filename) as csv: - seed = csv.readline() - seed = seed.split(',') - seed = [int(s.strip(), 0) for s in seed[1:]] - data = [] - for line in csv: - data.append(int(line.split(',')[-1].strip(), 0)) - return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)} - - def test_raw(self): - bit_generator = self.bit_generator(*self.data1['seed']) - uints = bit_generator.random_raw(1000) - assert_equal(uints, self.data1['data']) - - bit_generator = self.bit_generator(*self.data1['seed']) - uints = bit_generator.random_raw() - assert_equal(uints, self.data1['data'][0]) - - bit_generator = self.bit_generator(*self.data2['seed']) - uints = bit_generator.random_raw(1000) - assert_equal(uints, self.data2['data']) - - def test_random_raw(self): - bit_generator = self.bit_generator(*self.data1['seed']) - uints = bit_generator.random_raw(output=False) - assert uints is None - uints = bit_generator.random_raw(1000, output=False) - assert uints is None - - def test_gauss_inv(self): - n = 25 - rs = RandomState(self.bit_generator(*self.data1['seed'])) - gauss = rs.standard_normal(n) - assert_allclose(gauss, - gauss_from_uint(self.data1['data'], n, self.bits)) - - rs = RandomState(self.bit_generator(*self.data2['seed'])) - gauss = rs.standard_normal(25) - assert_allclose(gauss, - gauss_from_uint(self.data2['data'], n, self.bits)) - - def test_uniform_double(self): - rs = Generator(self.bit_generator(*self.data1['seed'])) - vals = uniform_from_uint(self.data1['data'], self.bits) - uniforms = rs.random(len(vals)) - assert_allclose(uniforms, vals) - assert_equal(uniforms.dtype, np.float64) - - rs = Generator(self.bit_generator(*self.data2['seed'])) - vals = uniform_from_uint(self.data2['data'], self.bits) - uniforms = rs.random(len(vals)) - assert_allclose(uniforms, vals) - assert_equal(uniforms.dtype, np.float64) - - def test_uniform_float(self): - rs = Generator(self.bit_generator(*self.data1['seed'])) - vals = uniform32_from_uint(self.data1['data'], self.bits) - uniforms = rs.random(len(vals), dtype=np.float32) - assert_allclose(uniforms, vals) - assert_equal(uniforms.dtype, np.float32) - - rs = Generator(self.bit_generator(*self.data2['seed'])) - vals = uniform32_from_uint(self.data2['data'], self.bits) - uniforms = rs.random(len(vals), dtype=np.float32) - assert_allclose(uniforms, vals) - assert_equal(uniforms.dtype, np.float32) - - def test_repr(self): - rs = Generator(self.bit_generator(*self.data1['seed'])) - assert 'Generator' in repr(rs) - assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') in repr(rs) - - def test_str(self): - rs = Generator(self.bit_generator(*self.data1['seed'])) - assert 'Generator' in str(rs) - assert str(self.bit_generator.__name__) in str(rs) - assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') not in str(rs) - - def test_pickle(self): - import pickle - - bit_generator = self.bit_generator(*self.data1['seed']) - state = bit_generator.state - bitgen_pkl = pickle.dumps(bit_generator) - reloaded = pickle.loads(bitgen_pkl) - reloaded_state = reloaded.state - assert_array_equal(Generator(bit_generator).standard_normal(1000), - Generator(reloaded).standard_normal(1000)) - assert bit_generator is not reloaded - assert_state_equal(reloaded_state, state) - - ss = SeedSequence(100) - aa = pickle.loads(pickle.dumps(ss)) - assert_equal(ss.state, aa.state) - - def test_invalid_state_type(self): - bit_generator = self.bit_generator(*self.data1['seed']) - with pytest.raises(TypeError): - bit_generator.state = {'1'} - - def test_invalid_state_value(self): - bit_generator = self.bit_generator(*self.data1['seed']) - state = bit_generator.state - state['bit_generator'] = 'otherBitGenerator' - with pytest.raises(ValueError): - bit_generator.state = state - - def test_invalid_init_type(self): - bit_generator = self.bit_generator - for st in self.invalid_init_types: - with pytest.raises(TypeError): - bit_generator(*st) - - def test_invalid_init_values(self): - bit_generator = self.bit_generator - for st in self.invalid_init_values: - with pytest.raises((ValueError, OverflowError)): - bit_generator(*st) - - def test_benchmark(self): - bit_generator = self.bit_generator(*self.data1['seed']) - bit_generator._benchmark(1) - bit_generator._benchmark(1, 'double') - with pytest.raises(ValueError): - bit_generator._benchmark(1, 'int32') - - @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available') - def test_cffi(self): - bit_generator = self.bit_generator(*self.data1['seed']) - cffi_interface = bit_generator.cffi - assert isinstance(cffi_interface, interface) - other_cffi_interface = bit_generator.cffi - assert other_cffi_interface is cffi_interface - - @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available') - def test_ctypes(self): - bit_generator = self.bit_generator(*self.data1['seed']) - ctypes_interface = bit_generator.ctypes - assert isinstance(ctypes_interface, interface) - other_ctypes_interface = bit_generator.ctypes - assert other_ctypes_interface is ctypes_interface - - def test_getstate(self): - bit_generator = self.bit_generator(*self.data1['seed']) - state = bit_generator.state - alt_state = bit_generator.__getstate__() - assert_state_equal(state, alt_state) - - -class TestPhilox(Base): - @classmethod - def setup_class(cls): - cls.bit_generator = Philox - cls.bits = 64 - cls.dtype = np.uint64 - cls.data1 = cls._read_csv( - join(pwd, './data/philox-testset-1.csv')) - cls.data2 = cls._read_csv( - join(pwd, './data/philox-testset-2.csv')) - cls.seed_error_type = TypeError - cls.invalid_init_types = [] - cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)] - - def test_set_key(self): - bit_generator = self.bit_generator(*self.data1['seed']) - state = bit_generator.state - keyed = self.bit_generator(counter=state['state']['counter'], - key=state['state']['key']) - assert_state_equal(bit_generator.state, keyed.state) - - -class TestPCG64(Base): - @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.bits = 64 - cls.dtype = np.uint64 - cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv')) - cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv')) - cls.seed_error_type = (ValueError, TypeError) - cls.invalid_init_types = [(3.2,), ([None],), (1, None)] - cls.invalid_init_values = [(-1,)] - - def test_advance_symmetry(self): - rs = Generator(self.bit_generator(*self.data1['seed'])) - state = rs.bit_generator.state - step = -0x9e3779b97f4a7c150000000000000000 - rs.bit_generator.advance(step) - val_neg = rs.integers(10) - rs.bit_generator.state = state - rs.bit_generator.advance(2**128 + step) - val_pos = rs.integers(10) - rs.bit_generator.state = state - rs.bit_generator.advance(10 * 2**128 + step) - val_big = rs.integers(10) - assert val_neg == val_pos - assert val_big == val_pos - - -class TestMT19937(Base): - @classmethod - def setup_class(cls): - cls.bit_generator = MT19937 - cls.bits = 32 - cls.dtype = np.uint32 - cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv')) - cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv')) - cls.seed_error_type = ValueError - cls.invalid_init_types = [] - cls.invalid_init_values = [(-1,)] - - def test_seed_float_array(self): - assert_raises(TypeError, self.bit_generator, np.array([np.pi])) - assert_raises(TypeError, self.bit_generator, np.array([-np.pi])) - assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi])) - assert_raises(TypeError, self.bit_generator, np.array([0, np.pi])) - assert_raises(TypeError, self.bit_generator, [np.pi]) - assert_raises(TypeError, self.bit_generator, [0, np.pi]) - - def test_state_tuple(self): - rs = Generator(self.bit_generator(*self.data1['seed'])) - bit_generator = rs.bit_generator - state = bit_generator.state - desired = rs.integers(2 ** 16) - tup = (state['bit_generator'], state['state']['key'], - state['state']['pos']) - bit_generator.state = tup - actual = rs.integers(2 ** 16) - assert_equal(actual, desired) - tup = tup + (0, 0.0) - bit_generator.state = tup - actual = rs.integers(2 ** 16) - assert_equal(actual, desired) - - -class TestSFC64(Base): - @classmethod - def setup_class(cls): - cls.bit_generator = SFC64 - cls.bits = 64 - cls.dtype = np.uint64 - cls.data1 = cls._read_csv( - join(pwd, './data/sfc64-testset-1.csv')) - cls.data2 = cls._read_csv( - join(pwd, './data/sfc64-testset-2.csv')) - cls.seed_error_type = (ValueError, TypeError) - cls.invalid_init_types = [(3.2,), ([None],), (1, None)] - cls.invalid_init_values = [(-1,)] - - -class TestDefaultRNG(object): - def test_seed(self): - for args in [(), (None,), (1234,), ([1234, 5678],)]: - rg = default_rng(*args) - assert isinstance(rg.bit_generator, PCG64) - - def test_passthrough(self): - bg = Philox() - rg = default_rng(bg) - assert rg.bit_generator is bg - rg2 = default_rng(rg) - assert rg2 is rg - assert rg2.bit_generator is bg diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_extending.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_extending.py deleted file mode 100644 index ce34c1b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_extending.py +++ /dev/null @@ -1,57 +0,0 @@ -import os, sys -import pytest -import warnings -import shutil -import subprocess - -try: - import cffi -except ImportError: - cffi = None - -if sys.flags.optimize > 1: - # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 - # cffi cannot succeed - cffi = None - -try: - with warnings.catch_warnings(record=True) as w: - # numba issue gh-4733 - warnings.filterwarnings('always', '', DeprecationWarning) - import numba -except ImportError: - numba = None - -try: - import cython - from Cython.Compiler.Version import version as cython_version -except ImportError: - cython = None -else: - from distutils.version import LooseVersion - # Cython 0.29.14 is required for Python 3.8 and there are - # other fixes in the 0.29 series that are needed even for earlier - # Python versions. - # Note: keep in sync with the one in pyproject.toml - required_version = LooseVersion('0.29.14') - if LooseVersion(cython_version) < required_version: - # too old or wrong cython, skip the test - cython = None - -@pytest.mark.skipif(cython is None, reason="requires cython") -@pytest.mark.slow -def test_cython(tmp_path): - examples = os.path.join(os.path.dirname(__file__), '..', '_examples') - # CPython 3.5 and below does not handle __fspath__ well: see bpo-26027 - shutil.copytree(examples, str(tmp_path / '_examples')) - subprocess.check_call([sys.executable, 'setup.py', 'build'], - cwd=str(tmp_path / '_examples' / 'cython')) - -@pytest.mark.skipif(numba is None or cffi is None, - reason="requires numba and cffi") -def test_numba(): - from numpy.random._examples.numba import extending - -@pytest.mark.skipif(cffi is None, reason="requires cffi") -def test_cffi(): - from numpy.random._examples.cffi import extending diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_generator_mt19937.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_generator_mt19937.py deleted file mode 100644 index d835f16..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_generator_mt19937.py +++ /dev/null @@ -1,2250 +0,0 @@ -import sys - -import pytest - -import numpy as np -from numpy.dual import cholesky, eigh, svd -from numpy.linalg import LinAlgError -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_allclose, - assert_warns, assert_no_warnings, assert_array_equal, - assert_array_almost_equal, suppress_warnings) - -from numpy.random import Generator, MT19937, SeedSequence - -random = Generator(MT19937()) - - -@pytest.fixture(scope='module', params=[True, False]) -def endpoint(request): - return request.param - - -class TestSeed(object): - def test_scalar(self): - s = Generator(MT19937(0)) - assert_equal(s.integers(1000), 479) - s = Generator(MT19937(4294967295)) - assert_equal(s.integers(1000), 324) - - def test_array(self): - s = Generator(MT19937(range(10))) - assert_equal(s.integers(1000), 465) - s = Generator(MT19937(np.arange(10))) - assert_equal(s.integers(1000), 465) - s = Generator(MT19937([0])) - assert_equal(s.integers(1000), 479) - s = Generator(MT19937([4294967295])) - assert_equal(s.integers(1000), 324) - - def test_seedsequence(self): - s = MT19937(SeedSequence(0)) - assert_equal(s.random_raw(1), 2058676884) - - def test_invalid_scalar(self): - # seed must be an unsigned 32 bit integer - assert_raises(TypeError, MT19937, -0.5) - assert_raises(ValueError, MT19937, -1) - - def test_invalid_array(self): - # seed must be an unsigned integer - assert_raises(TypeError, MT19937, [-0.5]) - assert_raises(ValueError, MT19937, [-1]) - assert_raises(ValueError, MT19937, [1, -2, 4294967296]) - - def test_noninstantized_bitgen(self): - assert_raises(ValueError, Generator, MT19937) - - -class TestBinomial(object): - def test_n_zero(self): - # Tests the corner case of n == 0 for the binomial distribution. - # binomial(0, p) should be zero for any p in [0, 1]. - # This test addresses issue #3480. - zeros = np.zeros(2, dtype='int') - for p in [0, .5, 1]: - assert_(random.binomial(0, p) == 0) - assert_array_equal(random.binomial(zeros, p), zeros) - - def test_p_is_nan(self): - # Issue #4571. - assert_raises(ValueError, random.binomial, 1, np.nan) - - -class TestMultinomial(object): - def test_basic(self): - random.multinomial(100, [0.2, 0.8]) - - def test_zero_probability(self): - random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) - - def test_int_negative_interval(self): - assert_(-5 <= random.integers(-5, -1) < -1) - x = random.integers(-5, -1, 5) - assert_(np.all(-5 <= x)) - assert_(np.all(x < -1)) - - def test_size(self): - # gh-3173 - p = [0.5, 0.5] - assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) - assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) - assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, - (2, 2, 2)) - - assert_raises(TypeError, random.multinomial, 1, p, - float(1)) - - def test_invalid_prob(self): - assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) - assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) - - def test_invalid_n(self): - assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) - assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2]) - - def test_p_non_contiguous(self): - p = np.arange(15.) - p /= np.sum(p[1::3]) - pvals = p[1::3] - random = Generator(MT19937(1432985819)) - non_contig = random.multinomial(100, pvals=pvals) - random = Generator(MT19937(1432985819)) - contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) - assert_array_equal(non_contig, contig) - - -class TestMultivariateHypergeometric(object): - - def setup(self): - self.seed = 8675309 - - def test_argument_validation(self): - # Error cases... - - # `colors` must be a 1-d sequence - assert_raises(ValueError, random.multivariate_hypergeometric, - 10, 4) - - # Negative nsample - assert_raises(ValueError, random.multivariate_hypergeometric, - [2, 3, 4], -1) - - # Negative color - assert_raises(ValueError, random.multivariate_hypergeometric, - [-1, 2, 3], 2) - - # nsample exceeds sum(colors) - assert_raises(ValueError, random.multivariate_hypergeometric, - [2, 3, 4], 10) - - # nsample exceeds sum(colors) (edge case of empty colors) - assert_raises(ValueError, random.multivariate_hypergeometric, - [], 1) - - # Validation errors associated with very large values in colors. - assert_raises(ValueError, random.multivariate_hypergeometric, - [999999999, 101], 5, 1, 'marginals') - - int64_info = np.iinfo(np.int64) - max_int64 = int64_info.max - max_int64_index = max_int64 // int64_info.dtype.itemsize - assert_raises(ValueError, random.multivariate_hypergeometric, - [max_int64_index - 100, 101], 5, 1, 'count') - - @pytest.mark.parametrize('method', ['count', 'marginals']) - def test_edge_cases(self, method): - # Set the seed, but in fact, all the results in this test are - # deterministic, so we don't really need this. - random = Generator(MT19937(self.seed)) - - x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) - assert_array_equal(x, [0, 0, 0]) - - x = random.multivariate_hypergeometric([], 0, method=method) - assert_array_equal(x, []) - - x = random.multivariate_hypergeometric([], 0, size=1, method=method) - assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) - - x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) - assert_array_equal(x, [0, 0, 0]) - - x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) - assert_array_equal(x, [3, 0, 0]) - - colors = [1, 1, 0, 1, 1] - x = random.multivariate_hypergeometric(colors, sum(colors), - method=method) - assert_array_equal(x, colors) - - x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, - method=method) - assert_array_equal(x, [[3, 4, 5]]*3) - - # Cases for nsample: - # nsample < 10 - # 10 <= nsample < colors.sum()/2 - # colors.sum()/2 < nsample < colors.sum() - 10 - # colors.sum() - 10 < nsample < colors.sum() - @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) - @pytest.mark.parametrize('method', ['count', 'marginals']) - @pytest.mark.parametrize('size', [5, (2, 3), 150000]) - def test_typical_cases(self, nsample, method, size): - random = Generator(MT19937(self.seed)) - - colors = np.array([10, 5, 20, 25]) - sample = random.multivariate_hypergeometric(colors, nsample, size, - method=method) - if isinstance(size, int): - expected_shape = (size,) + colors.shape - else: - expected_shape = size + colors.shape - assert_equal(sample.shape, expected_shape) - assert_((sample >= 0).all()) - assert_((sample <= colors).all()) - assert_array_equal(sample.sum(axis=-1), - np.full(size, fill_value=nsample, dtype=int)) - if isinstance(size, int) and size >= 100000: - # This sample is large enough to compare its mean to - # the expected values. - assert_allclose(sample.mean(axis=0), - nsample * colors / colors.sum(), - rtol=1e-3, atol=0.005) - - def test_repeatability1(self): - random = Generator(MT19937(self.seed)) - sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, - method='count') - expected = np.array([[2, 1, 2], - [2, 1, 2], - [1, 1, 3], - [2, 0, 3], - [2, 1, 2]]) - assert_array_equal(sample, expected) - - def test_repeatability2(self): - random = Generator(MT19937(self.seed)) - sample = random.multivariate_hypergeometric([20, 30, 50], 50, - size=5, - method='marginals') - expected = np.array([[ 9, 17, 24], - [ 7, 13, 30], - [ 9, 15, 26], - [ 9, 17, 24], - [12, 14, 24]]) - assert_array_equal(sample, expected) - - def test_repeatability3(self): - random = Generator(MT19937(self.seed)) - sample = random.multivariate_hypergeometric([20, 30, 50], 12, - size=5, - method='marginals') - expected = np.array([[2, 3, 7], - [5, 3, 4], - [2, 5, 5], - [5, 3, 4], - [1, 5, 6]]) - assert_array_equal(sample, expected) - - -class TestSetState(object): - def setup(self): - self.seed = 1234567890 - self.rg = Generator(MT19937(self.seed)) - self.bit_generator = self.rg.bit_generator - self.state = self.bit_generator.state - self.legacy_state = (self.state['bit_generator'], - self.state['state']['key'], - self.state['state']['pos']) - - def test_gaussian_reset(self): - # Make sure the cached every-other-Gaussian is reset. - old = self.rg.standard_normal(size=3) - self.bit_generator.state = self.state - new = self.rg.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_gaussian_reset_in_media_res(self): - # When the state is saved with a cached Gaussian, make sure the - # cached Gaussian is restored. - - self.rg.standard_normal() - state = self.bit_generator.state - old = self.rg.standard_normal(size=3) - self.bit_generator.state = state - new = self.rg.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_negative_binomial(self): - # Ensure that the negative binomial results take floating point - # arguments without truncation. - self.rg.negative_binomial(0.5, 0.5) - - -class TestIntegers(object): - rfunc = random.integers - - # valid integer/boolean types - itype = [bool, np.int8, np.uint8, np.int16, np.uint16, - np.int32, np.uint32, np.int64, np.uint64] - - def test_unsupported_type(self, endpoint): - assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float) - - def test_bounds_checking(self, endpoint): - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, - dtype=dt) - - assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd, - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1], - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, [ubnd], [lbnd], - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, [0], - endpoint=endpoint, dtype=dt) - - def test_bounds_checking_array(self, endpoint): - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint) - - assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2, - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, [lbnd] * 2, - [ubnd + 1] * 2, endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2, - endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, [1] * 2, 0, - endpoint=endpoint, dtype=dt) - - def test_rng_zero_and_extremes(self, endpoint): - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - is_open = not endpoint - - tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, - endpoint=endpoint, dtype=dt), tgt) - assert_equal(self.rfunc([tgt], tgt + is_open, size=1000, - endpoint=endpoint, dtype=dt), tgt) - - tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, - endpoint=endpoint, dtype=dt), tgt) - assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000, - endpoint=endpoint, dtype=dt), tgt) - - tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, - endpoint=endpoint, dtype=dt), tgt) - assert_equal(self.rfunc([tgt], [tgt + is_open], - size=1000, endpoint=endpoint, dtype=dt), - tgt) - - def test_rng_zero_and_extremes_array(self, endpoint): - size = 1000 - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - - tgt = ubnd - 1 - assert_equal(self.rfunc([tgt], [tgt + 1], - size=size, dtype=dt), tgt) - assert_equal(self.rfunc( - [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) - assert_equal(self.rfunc( - [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) - - tgt = lbnd - assert_equal(self.rfunc([tgt], [tgt + 1], - size=size, dtype=dt), tgt) - assert_equal(self.rfunc( - [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) - assert_equal(self.rfunc( - [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) - - tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc([tgt], [tgt + 1], - size=size, dtype=dt), tgt) - assert_equal(self.rfunc( - [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) - assert_equal(self.rfunc( - [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) - - def test_full_range(self, endpoint): - # Test for ticket #1690 - - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - - try: - self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) - except Exception as e: - raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) - - def test_full_range_array(self, endpoint): - # Test for ticket #1690 - - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - - try: - self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt) - except Exception as e: - raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) - - def test_in_bounds_fuzz(self, endpoint): - # Don't use fixed seed - random = Generator(MT19937()) - - for dt in self.itype[1:]: - for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16, - endpoint=endpoint, dtype=dt) - assert_(vals.max() < ubnd) - assert_(vals.min() >= 2) - - vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, - dtype=bool) - assert_(vals.max() < 2) - assert_(vals.min() >= 0) - - def test_scalar_array_equiv(self, endpoint): - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - - size = 1000 - random = Generator(MT19937(1234)) - scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint, - dtype=dt) - - random = Generator(MT19937(1234)) - scalar_array = random.integers([lbnd], [ubnd], size=size, - endpoint=endpoint, dtype=dt) - - random = Generator(MT19937(1234)) - array = random.integers([lbnd] * size, [ubnd] * - size, size=size, endpoint=endpoint, dtype=dt) - assert_array_equal(scalar, scalar_array) - assert_array_equal(scalar, array) - - def test_repeatability(self, endpoint): - import hashlib - # We use a md5 hash of generated sequences of 1000 samples - # in the range [0, 6) for all but bool, where the range - # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe', - 'int16': '39624ead49ad67e37545744024d2648b', - 'int32': '5c4810373f979336c6c0c999996e47a1', - 'int64': 'ab126c15edff26f55c50d2b7e37391ac', - 'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c', - 'uint16': '39624ead49ad67e37545744024d2648b', - 'uint32': '5c4810373f979336c6c0c999996e47a1', - 'uint64': 'ab126c15edff26f55c50d2b7e37391ac', - 'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'} - - for dt in self.itype[1:]: - random = Generator(MT19937(1234)) - - # view as little endian for hash - if sys.byteorder == 'little': - val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, - dtype=dt) - else: - val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, - dtype=dt).byteswap() - - res = hashlib.md5(val.view(np.int8)).hexdigest() - assert_(tgt[np.dtype(dt).name] == res) - - # bools do not depend on endianness - random = Generator(MT19937(1234)) - val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint, - dtype=bool).view(np.int8) - res = hashlib.md5(val).hexdigest() - assert_(tgt[np.dtype(bool).name] == res) - - def test_repeatability_broadcasting(self, endpoint): - for dt in self.itype: - lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min - ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - - # view as little endian for hash - random = Generator(MT19937(1234)) - val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint, - dtype=dt) - - random = Generator(MT19937(1234)) - val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint, - dtype=dt) - - assert_array_equal(val, val_bc) - - random = Generator(MT19937(1234)) - val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000, - endpoint=endpoint, dtype=dt) - - assert_array_equal(val, val_bc) - - def test_int64_uint64_broadcast_exceptions(self, endpoint): - configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), - np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), - (-2**63-1, -2**63-1))} - for dtype in configs: - for config in configs[dtype]: - low, high = config - high = high - endpoint - low_a = np.array([[low]*10]) - high_a = np.array([high] * 10) - assert_raises(ValueError, random.integers, low, high, - endpoint=endpoint, dtype=dtype) - assert_raises(ValueError, random.integers, low_a, high, - endpoint=endpoint, dtype=dtype) - assert_raises(ValueError, random.integers, low, high_a, - endpoint=endpoint, dtype=dtype) - assert_raises(ValueError, random.integers, low_a, high_a, - endpoint=endpoint, dtype=dtype) - - low_o = np.array([[low]*10], dtype=object) - high_o = np.array([high] * 10, dtype=object) - assert_raises(ValueError, random.integers, low_o, high, - endpoint=endpoint, dtype=dtype) - assert_raises(ValueError, random.integers, low, high_o, - endpoint=endpoint, dtype=dtype) - assert_raises(ValueError, random.integers, low_o, high_o, - endpoint=endpoint, dtype=dtype) - - def test_int64_uint64_corner_case(self, endpoint): - # When stored in Numpy arrays, `lbnd` is casted - # as np.int64, and `ubnd` is casted as np.uint64. - # Checking whether `lbnd` >= `ubnd` used to be - # done solely via direct comparison, which is incorrect - # because when Numpy tries to compare both numbers, - # it casts both to np.float64 because there is - # no integer superset of np.int64 and np.uint64. However, - # `ubnd` is too large to be represented in np.float64, - # causing it be round down to np.iinfo(np.int64).max, - # leading to a ValueError because `lbnd` now equals - # the new `ubnd`. - - dt = np.int64 - tgt = np.iinfo(np.int64).max - lbnd = np.int64(np.iinfo(np.int64).max) - ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint) - - # None of these function calls should - # generate a ValueError now. - actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt) - assert_equal(actual, tgt) - - def test_respect_dtype_singleton(self, endpoint): - # See gh-7203 - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - dt = np.bool_ if dt is bool else dt - - sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) - assert_equal(sample.dtype, dt) - - for dt in (bool, int, np.compat.long): - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - - # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) - assert not hasattr(sample, 'dtype') - assert_equal(type(sample), dt) - - def test_respect_dtype_array(self, endpoint): - # See gh-7203 - for dt in self.itype: - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - ubnd = ubnd - 1 if endpoint else ubnd - dt = np.bool_ if dt is bool else dt - - sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt) - assert_equal(sample.dtype, dt) - sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, - dtype=dt) - assert_equal(sample.dtype, dt) - - def test_zero_size(self, endpoint): - # See gh-7203 - for dt in self.itype: - sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt) - assert sample.shape == (3, 0, 4) - assert sample.dtype == dt - assert self.rfunc(0, -10, 0, endpoint=endpoint, - dtype=dt).shape == (0,) - assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, - (3, 0, 4)) - assert_equal(random.integers(0, -10, size=0).shape, (0,)) - assert_equal(random.integers(10, 10, size=0).shape, (0,)) - - def test_error_byteorder(self): - other_byteord_dt = 'i4' - with pytest.raises(ValueError): - random.integers(0, 200, size=10, dtype=other_byteord_dt) - - # chi2max is the maximum acceptable chi-squared value. - @pytest.mark.slow - @pytest.mark.parametrize('sample_size,high,dtype,chi2max', - [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25 - (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30 - (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25 - (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25 - ]) - def test_integers_small_dtype_chisquared(self, sample_size, high, - dtype, chi2max): - # Regression test for gh-14774. - samples = random.integers(high, size=sample_size, dtype=dtype) - - values, counts = np.unique(samples, return_counts=True) - expected = sample_size / high - chi2 = ((counts - expected)**2 / expected).sum() - assert chi2 < chi2max - - -class TestRandomDist(object): - # Make sure the random distribution returns the correct value for a - # given seed - - def setup(self): - self.seed = 1234567890 - - def test_integers(self): - random = Generator(MT19937(self.seed)) - actual = random.integers(-99, 99, size=(3, 2)) - desired = np.array([[-80, -56], [41, 37], [-83, -16]]) - assert_array_equal(actual, desired) - - def test_integers_masked(self): - # Test masked rejection sampling algorithm to generate array of - # uint32 in an interval. - random = Generator(MT19937(self.seed)) - actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32) - desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32) - assert_array_equal(actual, desired) - - def test_integers_closed(self): - random = Generator(MT19937(self.seed)) - actual = random.integers(-99, 99, size=(3, 2), endpoint=True) - desired = np.array([[-80, -56], [ 41, 38], [-83, -15]]) - assert_array_equal(actual, desired) - - def test_integers_max_int(self): - # Tests whether integers with closed=True can generate the - # maximum allowed Python int that can be converted - # into a C long. Previous implementations of this - # method have thrown an OverflowError when attempting - # to generate this integer. - actual = random.integers(np.iinfo('l').max, np.iinfo('l').max, - endpoint=True) - - desired = np.iinfo('l').max - assert_equal(actual, desired) - - def test_random(self): - random = Generator(MT19937(self.seed)) - actual = random.random((3, 2)) - desired = np.array([[0.096999199829214, 0.707517457682192], - [0.084364834598269, 0.767731206553125], - [0.665069021359413, 0.715487190596693]]) - assert_array_almost_equal(actual, desired, decimal=15) - - random = Generator(MT19937(self.seed)) - actual = random.random() - assert_array_almost_equal(actual, desired[0, 0], decimal=15) - - def test_random_float(self): - random = Generator(MT19937(self.seed)) - actual = random.random((3, 2)) - desired = np.array([[0.0969992 , 0.70751746], - [0.08436483, 0.76773121], - [0.66506902, 0.71548719]]) - assert_array_almost_equal(actual, desired, decimal=7) - - def test_random_float_scalar(self): - random = Generator(MT19937(self.seed)) - actual = random.random(dtype=np.float32) - desired = 0.0969992 - assert_array_almost_equal(actual, desired, decimal=7) - - def test_random_unsupported_type(self): - assert_raises(TypeError, random.random, dtype='int32') - - def test_choice_uniform_replace(self): - random = Generator(MT19937(self.seed)) - actual = random.choice(4, 4) - desired = np.array([0, 0, 2, 2], dtype=np.int64) - assert_array_equal(actual, desired) - - def test_choice_nonuniform_replace(self): - random = Generator(MT19937(self.seed)) - actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) - desired = np.array([0, 1, 0, 1], dtype=np.int64) - assert_array_equal(actual, desired) - - def test_choice_uniform_noreplace(self): - random = Generator(MT19937(self.seed)) - actual = random.choice(4, 3, replace=False) - desired = np.array([2, 0, 3], dtype=np.int64) - assert_array_equal(actual, desired) - actual = random.choice(4, 4, replace=False, shuffle=False) - desired = np.arange(4, dtype=np.int64) - assert_array_equal(actual, desired) - - def test_choice_nonuniform_noreplace(self): - random = Generator(MT19937(self.seed)) - actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) - desired = np.array([0, 2, 3], dtype=np.int64) - assert_array_equal(actual, desired) - - def test_choice_noninteger(self): - random = Generator(MT19937(self.seed)) - actual = random.choice(['a', 'b', 'c', 'd'], 4) - desired = np.array(['a', 'a', 'c', 'c']) - assert_array_equal(actual, desired) - - def test_choice_multidimensional_default_axis(self): - random = Generator(MT19937(self.seed)) - actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3) - desired = np.array([[0, 1], [0, 1], [4, 5]]) - assert_array_equal(actual, desired) - - def test_choice_multidimensional_custom_axis(self): - random = Generator(MT19937(self.seed)) - actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1) - desired = np.array([[0], [2], [4], [6]]) - assert_array_equal(actual, desired) - - def test_choice_exceptions(self): - sample = random.choice - assert_raises(ValueError, sample, -1, 3) - assert_raises(ValueError, sample, 3., 3) - assert_raises(ValueError, sample, [], 3) - assert_raises(ValueError, sample, [1, 2, 3, 4], 3, - p=[[0.25, 0.25], [0.25, 0.25]]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) - assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) - assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) - # gh-13087 - assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) - assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) - assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) - assert_raises(ValueError, sample, [1, 2, 3], 2, - replace=False, p=[1, 0, 0]) - - def test_choice_return_shape(self): - p = [0.1, 0.9] - # Check scalar - assert_(np.isscalar(random.choice(2, replace=True))) - assert_(np.isscalar(random.choice(2, replace=False))) - assert_(np.isscalar(random.choice(2, replace=True, p=p))) - assert_(np.isscalar(random.choice(2, replace=False, p=p))) - assert_(np.isscalar(random.choice([1, 2], replace=True))) - assert_(random.choice([None], replace=True) is None) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(random.choice(arr, replace=True) is a) - - # Check 0-d array - s = tuple() - assert_(not np.isscalar(random.choice(2, s, replace=True))) - assert_(not np.isscalar(random.choice(2, s, replace=False))) - assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) - assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) - assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) - assert_(random.choice([None], s, replace=True).ndim == 0) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(random.choice(arr, s, replace=True).item() is a) - - # Check multi dimensional array - s = (2, 3) - p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] - assert_equal(random.choice(6, s, replace=True).shape, s) - assert_equal(random.choice(6, s, replace=False).shape, s) - assert_equal(random.choice(6, s, replace=True, p=p).shape, s) - assert_equal(random.choice(6, s, replace=False, p=p).shape, s) - assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) - - # Check zero-size - assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) - assert_equal(random.integers(0, -10, size=0).shape, (0,)) - assert_equal(random.integers(10, 10, size=0).shape, (0,)) - assert_equal(random.choice(0, size=0).shape, (0,)) - assert_equal(random.choice([], size=(0,)).shape, (0,)) - assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, - (3, 0, 4)) - assert_raises(ValueError, random.choice, [], 10) - - def test_choice_nan_probabilities(self): - a = np.array([42, 1, 2]) - p = [None, None, None] - assert_raises(ValueError, random.choice, a, p=p) - - def test_choice_p_non_contiguous(self): - p = np.ones(10) / 5 - p[1::2] = 3.0 - random = Generator(MT19937(self.seed)) - non_contig = random.choice(5, 3, p=p[::2]) - random = Generator(MT19937(self.seed)) - contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) - assert_array_equal(non_contig, contig) - - def test_choice_return_type(self): - # gh 9867 - p = np.ones(4) / 4. - actual = random.choice(4, 2) - assert actual.dtype == np.int64 - actual = random.choice(4, 2, replace=False) - assert actual.dtype == np.int64 - actual = random.choice(4, 2, p=p) - assert actual.dtype == np.int64 - actual = random.choice(4, 2, p=p, replace=False) - assert actual.dtype == np.int64 - - def test_choice_large_sample(self): - import hashlib - - choice_hash = 'd44962a0b1e92f4a3373c23222244e21' - random = Generator(MT19937(self.seed)) - actual = random.choice(10000, 5000, replace=False) - if sys.byteorder != 'little': - actual = actual.byteswap() - res = hashlib.md5(actual.view(np.int8)).hexdigest() - assert_(choice_hash == res) - - def test_bytes(self): - random = Generator(MT19937(self.seed)) - actual = random.bytes(10) - desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd' - assert_equal(actual, desired) - - def test_shuffle(self): - # Test lists, arrays (of various dtypes), and multidimensional versions - # of both, c-contiguous or not: - for conv in [lambda x: np.array([]), - lambda x: x, - lambda x: np.asarray(x).astype(np.int8), - lambda x: np.asarray(x).astype(np.float32), - lambda x: np.asarray(x).astype(np.complex64), - lambda x: np.asarray(x).astype(object), - lambda x: [(i, i) for i in x], - lambda x: np.asarray([[i, i] for i in x]), - lambda x: np.vstack([x, x]).T, - # gh-11442 - lambda x: (np.asarray([(i, i) for i in x], - [("a", int), ("b", int)]) - .view(np.recarray)), - # gh-4270 - lambda x: np.asarray([(i, i) for i in x], - [("a", object, (1,)), - ("b", np.int32, (1,))])]: - random = Generator(MT19937(self.seed)) - alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - random.shuffle(alist) - actual = alist - desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) - assert_array_equal(actual, desired) - - def test_shuffle_custom_axis(self): - random = Generator(MT19937(self.seed)) - actual = np.arange(16).reshape((4, 4)) - random.shuffle(actual, axis=1) - desired = np.array([[ 0, 3, 1, 2], - [ 4, 7, 5, 6], - [ 8, 11, 9, 10], - [12, 15, 13, 14]]) - assert_array_equal(actual, desired) - random = Generator(MT19937(self.seed)) - actual = np.arange(16).reshape((4, 4)) - random.shuffle(actual, axis=-1) - assert_array_equal(actual, desired) - - def test_shuffle_axis_nonsquare(self): - y1 = np.arange(20).reshape(2, 10) - y2 = y1.copy() - random = Generator(MT19937(self.seed)) - random.shuffle(y1, axis=1) - random = Generator(MT19937(self.seed)) - random.shuffle(y2.T) - assert_array_equal(y1, y2) - - def test_shuffle_masked(self): - # gh-3263 - a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) - b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) - a_orig = a.copy() - b_orig = b.copy() - for i in range(50): - random.shuffle(a) - assert_equal( - sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) - random.shuffle(b) - assert_equal( - sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) - - def test_shuffle_exceptions(self): - random = Generator(MT19937(self.seed)) - arr = np.arange(10) - assert_raises(np.AxisError, random.shuffle, arr, 1) - arr = np.arange(9).reshape((3, 3)) - assert_raises(np.AxisError, random.shuffle, arr, 3) - assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) - arr = [[1, 2, 3], [4, 5, 6]] - assert_raises(NotImplementedError, random.shuffle, arr, 1) - - def test_permutation(self): - random = Generator(MT19937(self.seed)) - alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] - actual = random.permutation(alist) - desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7] - assert_array_equal(actual, desired) - - random = Generator(MT19937(self.seed)) - arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T - actual = random.permutation(arr_2d) - assert_array_equal(actual, np.atleast_2d(desired).T) - - bad_x_str = "abcd" - assert_raises(np.AxisError, random.permutation, bad_x_str) - - bad_x_float = 1.2 - assert_raises(np.AxisError, random.permutation, bad_x_float) - - random = Generator(MT19937(self.seed)) - integer_val = 10 - desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] - - actual = random.permutation(integer_val) - assert_array_equal(actual, desired) - - def test_permutation_custom_axis(self): - a = np.arange(16).reshape((4, 4)) - desired = np.array([[ 0, 3, 1, 2], - [ 4, 7, 5, 6], - [ 8, 11, 9, 10], - [12, 15, 13, 14]]) - random = Generator(MT19937(self.seed)) - actual = random.permutation(a, axis=1) - assert_array_equal(actual, desired) - random = Generator(MT19937(self.seed)) - actual = random.permutation(a, axis=-1) - assert_array_equal(actual, desired) - - def test_permutation_exceptions(self): - random = Generator(MT19937(self.seed)) - arr = np.arange(10) - assert_raises(np.AxisError, random.permutation, arr, 1) - arr = np.arange(9).reshape((3, 3)) - assert_raises(np.AxisError, random.permutation, arr, 3) - assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) - - def test_beta(self): - random = Generator(MT19937(self.seed)) - actual = random.beta(.1, .9, size=(3, 2)) - desired = np.array( - [[1.083029353267698e-10, 2.449965303168024e-11], - [2.397085162969853e-02, 3.590779671820755e-08], - [2.830254190078299e-04, 1.744709918330393e-01]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_binomial(self): - random = Generator(MT19937(self.seed)) - actual = random.binomial(100.123, .456, size=(3, 2)) - desired = np.array([[42, 41], - [42, 48], - [44, 50]]) - assert_array_equal(actual, desired) - - random = Generator(MT19937(self.seed)) - actual = random.binomial(100.123, .456) - desired = 42 - assert_array_equal(actual, desired) - - def test_chisquare(self): - random = Generator(MT19937(self.seed)) - actual = random.chisquare(50, size=(3, 2)) - desired = np.array([[32.9850547060149, 39.0219480493301], - [56.2006134779419, 57.3474165711485], - [55.4243733880198, 55.4209797925213]]) - assert_array_almost_equal(actual, desired, decimal=13) - - def test_dirichlet(self): - random = Generator(MT19937(self.seed)) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha, size=(3, 2)) - desired = np.array([[[0.5439892869558927, 0.45601071304410745], - [0.5588917345860708, 0.4411082654139292 ]], - [[0.5632074165063435, 0.43679258349365657], - [0.54862581112627, 0.45137418887373015]], - [[0.49961831357047226, 0.5003816864295278 ], - [0.52374806183482, 0.47625193816517997]]]) - assert_array_almost_equal(actual, desired, decimal=15) - bad_alpha = np.array([5.4e-01, -1.0e-16]) - assert_raises(ValueError, random.dirichlet, bad_alpha) - - random = Generator(MT19937(self.seed)) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha) - assert_array_almost_equal(actual, desired[0, 0], decimal=15) - - def test_dirichlet_size(self): - # gh-3173 - p = np.array([51.72840233779265162, 39.74494232180943953]) - assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) - assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) - assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) - - assert_raises(TypeError, random.dirichlet, p, float(1)) - - def test_dirichlet_bad_alpha(self): - # gh-2089 - alpha = np.array([5.4e-01, -1.0e-16]) - assert_raises(ValueError, random.dirichlet, alpha) - - def test_dirichlet_alpha_non_contiguous(self): - a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) - alpha = a[::2] - random = Generator(MT19937(self.seed)) - non_contig = random.dirichlet(alpha, size=(3, 2)) - random = Generator(MT19937(self.seed)) - contig = random.dirichlet(np.ascontiguousarray(alpha), - size=(3, 2)) - assert_array_almost_equal(non_contig, contig) - - def test_exponential(self): - random = Generator(MT19937(self.seed)) - actual = random.exponential(1.1234, size=(3, 2)) - desired = np.array([[0.098845481066258, 1.560752510746964], - [0.075730916041636, 1.769098974710777], - [1.488602544592235, 2.49684815275751 ]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_exponential_0(self): - assert_equal(random.exponential(scale=0), 0) - assert_raises(ValueError, random.exponential, scale=-0.) - - def test_f(self): - random = Generator(MT19937(self.seed)) - actual = random.f(12, 77, size=(3, 2)) - desired = np.array([[0.461720027077085, 1.100441958872451], - [1.100337455217484, 0.91421736740018 ], - [0.500811891303113, 0.826802454552058]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_gamma(self): - random = Generator(MT19937(self.seed)) - actual = random.gamma(5, 3, size=(3, 2)) - desired = np.array([[ 5.03850858902096, 7.9228656732049 ], - [18.73983605132985, 19.57961681699238], - [18.17897755150825, 18.17653912505234]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_gamma_0(self): - assert_equal(random.gamma(shape=0, scale=0), 0) - assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) - - def test_geometric(self): - random = Generator(MT19937(self.seed)) - actual = random.geometric(.123456789, size=(3, 2)) - desired = np.array([[ 1, 10], - [ 1, 12], - [ 9, 10]]) - assert_array_equal(actual, desired) - - def test_geometric_exceptions(self): - assert_raises(ValueError, random.geometric, 1.1) - assert_raises(ValueError, random.geometric, [1.1] * 10) - assert_raises(ValueError, random.geometric, -0.1) - assert_raises(ValueError, random.geometric, [-0.1] * 10) - with np.errstate(invalid='ignore'): - assert_raises(ValueError, random.geometric, np.nan) - assert_raises(ValueError, random.geometric, [np.nan] * 10) - - def test_gumbel(self): - random = Generator(MT19937(self.seed)) - actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[ 4.688397515056245, -0.289514845417841], - [ 4.981176042584683, -0.633224272589149], - [-0.055915275687488, -0.333962478257953]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_gumbel_0(self): - assert_equal(random.gumbel(scale=0), 0) - assert_raises(ValueError, random.gumbel, scale=-0.) - - def test_hypergeometric(self): - random = Generator(MT19937(self.seed)) - actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) - desired = np.array([[ 9, 9], - [ 9, 9], - [10, 9]]) - assert_array_equal(actual, desired) - - # Test nbad = 0 - actual = random.hypergeometric(5, 0, 3, size=4) - desired = np.array([3, 3, 3, 3]) - assert_array_equal(actual, desired) - - actual = random.hypergeometric(15, 0, 12, size=4) - desired = np.array([12, 12, 12, 12]) - assert_array_equal(actual, desired) - - # Test ngood = 0 - actual = random.hypergeometric(0, 5, 3, size=4) - desired = np.array([0, 0, 0, 0]) - assert_array_equal(actual, desired) - - actual = random.hypergeometric(0, 15, 12, size=4) - desired = np.array([0, 0, 0, 0]) - assert_array_equal(actual, desired) - - def test_laplace(self): - random = Generator(MT19937(self.seed)) - actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[-3.156353949272393, 1.195863024830054], - [-3.435458081645966, 1.656882398925444], - [ 0.924824032467446, 1.251116432209336]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_laplace_0(self): - assert_equal(random.laplace(scale=0), 0) - assert_raises(ValueError, random.laplace, scale=-0.) - - def test_logistic(self): - random = Generator(MT19937(self.seed)) - actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[-4.338584631510999, 1.890171436749954], - [-4.64547787337966 , 2.514545562919217], - [ 1.495389489198666, 1.967827627577474]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_lognormal(self): - random = Generator(MT19937(self.seed)) - actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) - desired = np.array([[ 0.0268252166335, 13.9534486483053], - [ 0.1204014788936, 2.2422077497792], - [ 4.2484199496128, 12.0093343977523]]) - assert_array_almost_equal(actual, desired, decimal=13) - - def test_lognormal_0(self): - assert_equal(random.lognormal(sigma=0), 1) - assert_raises(ValueError, random.lognormal, sigma=-0.) - - def test_logseries(self): - random = Generator(MT19937(self.seed)) - actual = random.logseries(p=.923456789, size=(3, 2)) - desired = np.array([[14, 17], - [3, 18], - [5, 1]]) - assert_array_equal(actual, desired) - - def test_logseries_exceptions(self): - with np.errstate(invalid='ignore'): - assert_raises(ValueError, random.logseries, np.nan) - assert_raises(ValueError, random.logseries, [np.nan] * 10) - - def test_multinomial(self): - random = Generator(MT19937(self.seed)) - actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) - desired = np.array([[[1, 5, 1, 6, 4, 3], - [4, 2, 6, 2, 4, 2]], - [[5, 3, 2, 6, 3, 1], - [4, 4, 0, 2, 3, 7]], - [[6, 3, 1, 5, 3, 2], - [5, 5, 3, 1, 2, 4]]]) - assert_array_equal(actual, desired) - - @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) - def test_multivariate_normal(self, method): - random = Generator(MT19937(self.seed)) - mean = (.123456789, 10) - cov = [[1, 0], [0, 1]] - size = (3, 2) - actual = random.multivariate_normal(mean, cov, size, method=method) - desired = np.array([[[-1.747478062846581, 11.25613495182354 ], - [-0.9967333370066214, 10.342002097029821 ]], - [[ 0.7850019631242964, 11.181113712443013 ], - [ 0.8901349653255224, 8.873825399642492 ]], - [[ 0.7130260107430003, 9.551628690083056 ], - [ 0.7127098726541128, 11.991709234143173 ]]]) - - assert_array_almost_equal(actual, desired, decimal=15) - - # Check for default size, was raising deprecation warning - actual = random.multivariate_normal(mean, cov, method=method) - desired = np.array([0.233278563284287, 9.424140804347195]) - assert_array_almost_equal(actual, desired, decimal=15) - # Check that non symmetric covariance input raises exception when - # check_valid='raises' if using default svd method. - mean = [0, 0] - cov = [[1, 2], [1, 2]] - assert_raises(ValueError, random.multivariate_normal, mean, cov, - check_valid='raise') - - # Check that non positive-semidefinite covariance warns with - # RuntimeWarning - cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, - method='eigh') - assert_raises(LinAlgError, random.multivariate_normal, mean, cov, - method='cholesky') - - # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(random.multivariate_normal, mean, cov, - check_valid='ignore') - - # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, random.multivariate_normal, mean, cov, - check_valid='raise') - assert_raises(ValueError, random.multivariate_normal, mean, cov, - check_valid='raise', method='eigh') - - cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - random.multivariate_normal(mean, cov, method=method) - w = sup.record(RuntimeWarning) - assert len(w) == 0 - - mu = np.zeros(2) - cov = np.eye(2) - assert_raises(ValueError, random.multivariate_normal, mean, cov, - check_valid='other') - assert_raises(ValueError, random.multivariate_normal, - np.zeros((2, 1, 1)), cov) - assert_raises(ValueError, random.multivariate_normal, - mu, np.empty((3, 2))) - assert_raises(ValueError, random.multivariate_normal, - mu, np.eye(3)) - - def test_negative_binomial(self): - random = Generator(MT19937(self.seed)) - actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) - desired = np.array([[543, 727], - [775, 760], - [600, 674]]) - assert_array_equal(actual, desired) - - def test_negative_binomial_exceptions(self): - with np.errstate(invalid='ignore'): - assert_raises(ValueError, random.negative_binomial, 100, np.nan) - assert_raises(ValueError, random.negative_binomial, 100, - [np.nan] * 10) - - def test_noncentral_chisquare(self): - random = Generator(MT19937(self.seed)) - actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) - desired = np.array([[ 1.70561552362133, 15.97378184942111], - [13.71483425173724, 20.17859633310629], - [11.3615477156643 , 3.67891108738029]]) - assert_array_almost_equal(actual, desired, decimal=14) - - actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) - desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04], - [1.14554372041263e+00, 1.38187755933435e-03], - [1.90659181905387e+00, 1.21772577941822e+00]]) - assert_array_almost_equal(actual, desired, decimal=14) - - random = Generator(MT19937(self.seed)) - actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) - desired = np.array([[0.82947954590419, 1.80139670767078], - [6.58720057417794, 7.00491463609814], - [6.31101879073157, 6.30982307753005]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_noncentral_f(self): - random = Generator(MT19937(self.seed)) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, - size=(3, 2)) - desired = np.array([[0.060310671139 , 0.23866058175939], - [0.86860246709073, 0.2668510459738 ], - [0.23375780078364, 1.88922102885943]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_noncentral_f_nan(self): - random = Generator(MT19937(self.seed)) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) - assert np.isnan(actual) - - def test_normal(self): - random = Generator(MT19937(self.seed)) - actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[-3.618412914693162, 2.635726692647081], - [-2.116923463013243, 0.807460983059643], - [ 1.446547137248593, 2.485684213886024]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_normal_0(self): - assert_equal(random.normal(scale=0), 0) - assert_raises(ValueError, random.normal, scale=-0.) - - def test_pareto(self): - random = Generator(MT19937(self.seed)) - actual = random.pareto(a=.123456789, size=(3, 2)) - desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04], - [7.2640150889064703e-01, 3.4650454783825594e+05], - [4.5852344481994740e+04, 6.5851383009539105e+07]]) - # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this - # matrix differs by 24 nulps. Discussion: - # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html - # Consensus is that this is probably some gcc quirk that affects - # rounding but not in any important way, so we just use a looser - # tolerance on this test: - np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) - - def test_poisson(self): - random = Generator(MT19937(self.seed)) - actual = random.poisson(lam=.123456789, size=(3, 2)) - desired = np.array([[0, 0], - [0, 0], - [0, 0]]) - assert_array_equal(actual, desired) - - def test_poisson_exceptions(self): - lambig = np.iinfo('int64').max - lamneg = -1 - assert_raises(ValueError, random.poisson, lamneg) - assert_raises(ValueError, random.poisson, [lamneg] * 10) - assert_raises(ValueError, random.poisson, lambig) - assert_raises(ValueError, random.poisson, [lambig] * 10) - with np.errstate(invalid='ignore'): - assert_raises(ValueError, random.poisson, np.nan) - assert_raises(ValueError, random.poisson, [np.nan] * 10) - - def test_power(self): - random = Generator(MT19937(self.seed)) - actual = random.power(a=.123456789, size=(3, 2)) - desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02], - [2.482442984543471e-10, 1.527108843266079e-01], - [8.188283434244285e-02, 3.950547209346948e-01]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_rayleigh(self): - random = Generator(MT19937(self.seed)) - actual = random.rayleigh(scale=10, size=(3, 2)) - desired = np.array([[ 4.51734079831581, 15.6802442485758 ], - [ 4.19850651287094, 17.08718809823704], - [14.7907457708776 , 15.85545333419775]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_rayleigh_0(self): - assert_equal(random.rayleigh(scale=0), 0) - assert_raises(ValueError, random.rayleigh, scale=-0.) - - def test_standard_cauchy(self): - random = Generator(MT19937(self.seed)) - actual = random.standard_cauchy(size=(3, 2)) - desired = np.array([[-1.489437778266206, -3.275389641569784], - [ 0.560102864910406, -0.680780916282552], - [-1.314912905226277, 0.295852965660225]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_exponential(self): - random = Generator(MT19937(self.seed)) - actual = random.standard_exponential(size=(3, 2), method='inv') - desired = np.array([[0.102031839440643, 1.229350298474972], - [0.088137284693098, 1.459859985522667], - [1.093830802293668, 1.256977002164613]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_expoential_type_error(self): - assert_raises(TypeError, random.standard_exponential, dtype=np.int32) - - def test_standard_gamma(self): - random = Generator(MT19937(self.seed)) - actual = random.standard_gamma(shape=3, size=(3, 2)) - desired = np.array([[0.62970724056362, 1.22379851271008], - [3.899412530884 , 4.12479964250139], - [3.74994102464584, 3.74929307690815]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_standard_gammma_scalar_float(self): - random = Generator(MT19937(self.seed)) - actual = random.standard_gamma(3, dtype=np.float32) - desired = 2.9242148399353027 - assert_array_almost_equal(actual, desired, decimal=6) - - def test_standard_gamma_float(self): - random = Generator(MT19937(self.seed)) - actual = random.standard_gamma(shape=3, size=(3, 2)) - desired = np.array([[0.62971, 1.2238 ], - [3.89941, 4.1248 ], - [3.74994, 3.74929]]) - assert_array_almost_equal(actual, desired, decimal=5) - - def test_standard_gammma_float_out(self): - actual = np.zeros((3, 2), dtype=np.float32) - random = Generator(MT19937(self.seed)) - random.standard_gamma(10.0, out=actual, dtype=np.float32) - desired = np.array([[10.14987, 7.87012], - [ 9.46284, 12.56832], - [13.82495, 7.81533]], dtype=np.float32) - assert_array_almost_equal(actual, desired, decimal=5) - - random = Generator(MT19937(self.seed)) - random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32) - assert_array_almost_equal(actual, desired, decimal=5) - - def test_standard_gamma_unknown_type(self): - assert_raises(TypeError, random.standard_gamma, 1., - dtype='int32') - - def test_out_size_mismatch(self): - out = np.zeros(10) - assert_raises(ValueError, random.standard_gamma, 10.0, size=20, - out=out) - assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), - out=out) - - def test_standard_gamma_0(self): - assert_equal(random.standard_gamma(shape=0), 0) - assert_raises(ValueError, random.standard_gamma, shape=-0.) - - def test_standard_normal(self): - random = Generator(MT19937(self.seed)) - actual = random.standard_normal(size=(3, 2)) - desired = np.array([[-1.870934851846581, 1.25613495182354 ], - [-1.120190126006621, 0.342002097029821], - [ 0.661545174124296, 1.181113712443012]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_normal_unsupported_type(self): - assert_raises(TypeError, random.standard_normal, dtype=np.int32) - - def test_standard_t(self): - random = Generator(MT19937(self.seed)) - actual = random.standard_t(df=10, size=(3, 2)) - desired = np.array([[-1.484666193042647, 0.30597891831161 ], - [ 1.056684299648085, -0.407312602088507], - [ 0.130704414281157, -2.038053410490321]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_triangular(self): - random = Generator(MT19937(self.seed)) - actual = random.triangular(left=5.12, mode=10.23, right=20.34, - size=(3, 2)) - desired = np.array([[ 7.86664070590917, 13.6313848513185 ], - [ 7.68152445215983, 14.36169131136546], - [13.16105603911429, 13.72341621856971]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_uniform(self): - random = Generator(MT19937(self.seed)) - actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) - desired = np.array([[2.13306255040998 , 7.816987531021207], - [2.015436610109887, 8.377577533009589], - [7.421792588856135, 7.891185744455209]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_uniform_range_bounds(self): - fmin = np.finfo('float').min - fmax = np.finfo('float').max - - func = random.uniform - assert_raises(OverflowError, func, -np.inf, 0) - assert_raises(OverflowError, func, 0, np.inf) - assert_raises(OverflowError, func, fmin, fmax) - assert_raises(OverflowError, func, [-np.inf], [0]) - assert_raises(OverflowError, func, [0], [np.inf]) - - # (fmax / 1e17) - fmin is within range, so this should not throw - # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > - # DBL_MAX by increasing fmin a bit - random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) - - def test_scalar_exception_propagation(self): - # Tests that exceptions are correctly propagated in distributions - # when called with objects that throw exceptions when converted to - # scalars. - # - # Regression test for gh: 8865 - - class ThrowingFloat(np.ndarray): - def __float__(self): - raise TypeError - - throwing_float = np.array(1.0).view(ThrowingFloat) - assert_raises(TypeError, random.uniform, throwing_float, - throwing_float) - - class ThrowingInteger(np.ndarray): - def __int__(self): - raise TypeError - - throwing_int = np.array(1).view(ThrowingInteger) - assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) - - def test_vonmises(self): - random = Generator(MT19937(self.seed)) - actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) - desired = np.array([[ 1.107972248690106, 2.841536476232361], - [ 1.832602376042457, 1.945511926976032], - [-0.260147475776542, 2.058047492231698]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_vonmises_small(self): - # check infinite loop, gh-4720 - random = Generator(MT19937(self.seed)) - r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) - assert_(np.isfinite(r).all()) - - def test_vonmises_nan(self): - random = Generator(MT19937(self.seed)) - r = random.vonmises(mu=0., kappa=np.nan) - assert_(np.isnan(r)) - - def test_wald(self): - random = Generator(MT19937(self.seed)) - actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) - desired = np.array([[0.26871721804551, 3.2233942732115 ], - [2.20328374987066, 2.40958405189353], - [2.07093587449261, 0.73073890064369]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_weibull(self): - random = Generator(MT19937(self.seed)) - actual = random.weibull(a=1.23, size=(3, 2)) - desired = np.array([[0.138613914769468, 1.306463419753191], - [0.111623365934763, 1.446570494646721], - [1.257145775276011, 1.914247725027957]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_weibull_0(self): - random = Generator(MT19937(self.seed)) - assert_equal(random.weibull(a=0, size=12), np.zeros(12)) - assert_raises(ValueError, random.weibull, a=-0.) - - def test_zipf(self): - random = Generator(MT19937(self.seed)) - actual = random.zipf(a=1.23, size=(3, 2)) - desired = np.array([[ 1, 1], - [ 10, 867], - [354, 2]]) - assert_array_equal(actual, desired) - - -class TestBroadcast(object): - # tests that functions that broadcast behave - # correctly when presented with non-scalar arguments - def setup(self): - self.seed = 123456789 - - - def test_uniform(self): - random = Generator(MT19937(self.seed)) - low = [0] - high = [1] - uniform = random.uniform - desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095]) - - random = Generator(MT19937(self.seed)) - actual = random.uniform(low * 3, high) - assert_array_almost_equal(actual, desired, decimal=14) - - random = Generator(MT19937(self.seed)) - actual = random.uniform(low, high * 3) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_normal(self): - loc = [0] - scale = [1] - bad_scale = [-1] - random = Generator(MT19937(self.seed)) - desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) - - random = Generator(MT19937(self.seed)) - actual = random.normal(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.normal, loc * 3, bad_scale) - - random = Generator(MT19937(self.seed)) - normal = random.normal - actual = normal(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) - - def test_beta(self): - a = [1] - b = [2] - bad_a = [-1] - bad_b = [-2] - desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455]) - - random = Generator(MT19937(self.seed)) - beta = random.beta - actual = beta(a * 3, b) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) - - random = Generator(MT19937(self.seed)) - actual = random.beta(a, b * 3) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_exponential(self): - scale = [1] - bad_scale = [-1] - desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) - - random = Generator(MT19937(self.seed)) - actual = random.exponential(scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.exponential, bad_scale * 3) - - def test_standard_gamma(self): - shape = [1] - bad_shape = [-1] - desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) - - random = Generator(MT19937(self.seed)) - std_gamma = random.standard_gamma - actual = std_gamma(shape * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) - - def test_gamma(self): - shape = [1] - scale = [2] - bad_shape = [-1] - bad_scale = [-2] - desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258]) - - random = Generator(MT19937(self.seed)) - gamma = random.gamma - actual = gamma(shape * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) - - random = Generator(MT19937(self.seed)) - gamma = random.gamma - actual = gamma(shape, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) - - def test_f(self): - dfnum = [1] - dfden = [2] - bad_dfnum = [-1] - bad_dfden = [-2] - desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763]) - - random = Generator(MT19937(self.seed)) - f = random.f - actual = f(dfnum * 3, dfden) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) - - random = Generator(MT19937(self.seed)) - f = random.f - actual = f(dfnum, dfden * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) - - def test_noncentral_f(self): - dfnum = [2] - dfden = [3] - nonc = [4] - bad_dfnum = [0] - bad_dfden = [-1] - bad_nonc = [-2] - desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629]) - - random = Generator(MT19937(self.seed)) - nonc_f = random.noncentral_f - actual = nonc_f(dfnum * 3, dfden, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) - - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) - - random = Generator(MT19937(self.seed)) - nonc_f = random.noncentral_f - actual = nonc_f(dfnum, dfden * 3, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) - - random = Generator(MT19937(self.seed)) - nonc_f = random.noncentral_f - actual = nonc_f(dfnum, dfden, nonc * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) - - def test_noncentral_f_small_df(self): - random = Generator(MT19937(self.seed)) - desired = np.array([0.04714867120827, 0.1239390327694]) - actual = random.noncentral_f(0.9, 0.9, 2, size=2) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_chisquare(self): - df = [1] - bad_df = [-1] - desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589]) - - random = Generator(MT19937(self.seed)) - actual = random.chisquare(df * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.chisquare, bad_df * 3) - - def test_noncentral_chisquare(self): - df = [1] - nonc = [2] - bad_df = [-1] - bad_nonc = [-2] - desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399]) - - random = Generator(MT19937(self.seed)) - nonc_chi = random.noncentral_chisquare - actual = nonc_chi(df * 3, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) - - random = Generator(MT19937(self.seed)) - nonc_chi = random.noncentral_chisquare - actual = nonc_chi(df, nonc * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) - - def test_standard_t(self): - df = [1] - bad_df = [-1] - desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983]) - - random = Generator(MT19937(self.seed)) - actual = random.standard_t(df * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.standard_t, bad_df * 3) - - def test_vonmises(self): - mu = [2] - kappa = [1] - bad_kappa = [-1] - desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326]) - - random = Generator(MT19937(self.seed)) - actual = random.vonmises(mu * 3, kappa) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa) - - random = Generator(MT19937(self.seed)) - actual = random.vonmises(mu, kappa * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3) - - def test_pareto(self): - a = [1] - bad_a = [-1] - desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013]) - - random = Generator(MT19937(self.seed)) - actual = random.pareto(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.pareto, bad_a * 3) - - def test_weibull(self): - a = [1] - bad_a = [-1] - desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) - - random = Generator(MT19937(self.seed)) - actual = random.weibull(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.weibull, bad_a * 3) - - def test_power(self): - a = [1] - bad_a = [-1] - desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807]) - - random = Generator(MT19937(self.seed)) - actual = random.power(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.power, bad_a * 3) - - def test_laplace(self): - loc = [0] - scale = [1] - bad_scale = [-1] - desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202]) - - random = Generator(MT19937(self.seed)) - laplace = random.laplace - actual = laplace(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) - - random = Generator(MT19937(self.seed)) - laplace = random.laplace - actual = laplace(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) - - def test_gumbel(self): - loc = [0] - scale = [1] - bad_scale = [-1] - desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081]) - - random = Generator(MT19937(self.seed)) - gumbel = random.gumbel - actual = gumbel(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) - - random = Generator(MT19937(self.seed)) - gumbel = random.gumbel - actual = gumbel(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) - - def test_logistic(self): - loc = [0] - scale = [1] - bad_scale = [-1] - desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397]) - - random = Generator(MT19937(self.seed)) - actual = random.logistic(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.logistic, loc * 3, bad_scale) - - random = Generator(MT19937(self.seed)) - actual = random.logistic(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.logistic, loc, bad_scale * 3) - assert_equal(random.logistic(1.0, 0.0), 1.0) - - def test_lognormal(self): - mean = [0] - sigma = [1] - bad_sigma = [-1] - desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276]) - - random = Generator(MT19937(self.seed)) - lognormal = random.lognormal - actual = lognormal(mean * 3, sigma) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) - - random = Generator(MT19937(self.seed)) - actual = random.lognormal(mean, sigma * 3) - assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) - - def test_rayleigh(self): - scale = [1] - bad_scale = [-1] - desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499]) - - random = Generator(MT19937(self.seed)) - actual = random.rayleigh(scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.rayleigh, bad_scale * 3) - - def test_wald(self): - mean = [0.5] - scale = [1] - bad_mean = [0] - bad_scale = [-2] - desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864]) - - random = Generator(MT19937(self.seed)) - actual = random.wald(mean * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.wald, bad_mean * 3, scale) - assert_raises(ValueError, random.wald, mean * 3, bad_scale) - - random = Generator(MT19937(self.seed)) - actual = random.wald(mean, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, random.wald, bad_mean, scale * 3) - assert_raises(ValueError, random.wald, mean, bad_scale * 3) - - def test_triangular(self): - left = [1] - right = [3] - mode = [2] - bad_left_one = [3] - bad_mode_one = [4] - bad_left_two, bad_mode_two = right * 2 - desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326]) - - random = Generator(MT19937(self.seed)) - triangular = random.triangular - actual = triangular(left * 3, mode, right) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, - right) - - random = Generator(MT19937(self.seed)) - triangular = random.triangular - actual = triangular(left, mode * 3, right) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, - right) - - random = Generator(MT19937(self.seed)) - triangular = random.triangular - actual = triangular(left, mode, right * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, - right * 3) - - assert_raises(ValueError, triangular, 10., 0., 20.) - assert_raises(ValueError, triangular, 10., 25., 20.) - assert_raises(ValueError, triangular, 10., 10., 10.) - - def test_binomial(self): - n = [1] - p = [0.5] - bad_n = [-1] - bad_p_one = [-1] - bad_p_two = [1.5] - desired = np.array([0, 0, 1]) - - random = Generator(MT19937(self.seed)) - binom = random.binomial - actual = binom(n * 3, p) - assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) - - random = Generator(MT19937(self.seed)) - actual = random.binomial(n, p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) - - def test_negative_binomial(self): - n = [1] - p = [0.5] - bad_n = [-1] - bad_p_one = [-1] - bad_p_two = [1.5] - desired = np.array([0, 2, 1], dtype=np.int64) - - random = Generator(MT19937(self.seed)) - neg_binom = random.negative_binomial - actual = neg_binom(n * 3, p) - assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) - - random = Generator(MT19937(self.seed)) - neg_binom = random.negative_binomial - actual = neg_binom(n, p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) - - def test_poisson(self): - - lam = [1] - bad_lam_one = [-1] - desired = np.array([0, 0, 3]) - - random = Generator(MT19937(self.seed)) - max_lam = random._poisson_lam_max - bad_lam_two = [max_lam * 2] - poisson = random.poisson - actual = poisson(lam * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) - - def test_zipf(self): - a = [2] - bad_a = [0] - desired = np.array([1, 8, 1]) - - random = Generator(MT19937(self.seed)) - zipf = random.zipf - actual = zipf(a * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) - with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) - - def test_geometric(self): - p = [0.5] - bad_p_one = [-1] - bad_p_two = [1.5] - desired = np.array([1, 1, 3]) - - random = Generator(MT19937(self.seed)) - geometric = random.geometric - actual = geometric(p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, geometric, bad_p_one * 3) - assert_raises(ValueError, geometric, bad_p_two * 3) - - def test_hypergeometric(self): - ngood = [1] - nbad = [2] - nsample = [2] - bad_ngood = [-1] - bad_nbad = [-2] - bad_nsample_one = [-1] - bad_nsample_two = [4] - desired = np.array([0, 0, 1]) - - random = Generator(MT19937(self.seed)) - actual = random.hypergeometric(ngood * 3, nbad, nsample) - assert_array_equal(actual, desired) - assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) - - random = Generator(MT19937(self.seed)) - actual = random.hypergeometric(ngood, nbad * 3, nsample) - assert_array_equal(actual, desired) - assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) - - random = Generator(MT19937(self.seed)) - hypergeom = random.hypergeometric - actual = hypergeom(ngood, nbad, nsample * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) - - assert_raises(ValueError, hypergeom, -1, 10, 20) - assert_raises(ValueError, hypergeom, 10, -1, 20) - assert_raises(ValueError, hypergeom, 10, 10, -1) - assert_raises(ValueError, hypergeom, 10, 10, 25) - - # ValueError for arguments that are too big. - assert_raises(ValueError, hypergeom, 2**30, 10, 20) - assert_raises(ValueError, hypergeom, 999, 2**31, 50) - assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000) - - def test_logseries(self): - p = [0.5] - bad_p_one = [2] - bad_p_two = [-1] - desired = np.array([1, 1, 1]) - - random = Generator(MT19937(self.seed)) - logseries = random.logseries - actual = logseries(p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) - - def test_multinomial(self): - random = Generator(MT19937(self.seed)) - actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2)) - desired = np.array([[[0, 0, 2, 1, 2, 0], - [2, 3, 6, 4, 2, 3]], - [[1, 0, 1, 0, 2, 1], - [7, 2, 2, 1, 4, 4]], - [[0, 2, 0, 1, 2, 0], - [3, 2, 3, 3, 4, 5]]], dtype=np.int64) - assert_array_equal(actual, desired) - - random = Generator(MT19937(self.seed)) - actual = random.multinomial([5, 20], [1 / 6.] * 6) - desired = np.array([[0, 0, 2, 1, 2, 0], - [2, 3, 6, 4, 2, 3]], dtype=np.int64) - assert_array_equal(actual, desired) - - -class TestThread(object): - # make sure each state produces the same sequence even in threads - def setup(self): - self.seeds = range(4) - - def check_function(self, function, sz): - from threading import Thread - - out1 = np.empty((len(self.seeds),) + sz) - out2 = np.empty((len(self.seeds),) + sz) - - # threaded generation - t = [Thread(target=function, args=(Generator(MT19937(s)), o)) - for s, o in zip(self.seeds, out1)] - [x.start() for x in t] - [x.join() for x in t] - - # the same serial - for s, o in zip(self.seeds, out2): - function(Generator(MT19937(s)), o) - - # these platforms change x87 fpu precision mode in threads - if np.intp().dtype.itemsize == 4 and sys.platform == "win32": - assert_array_almost_equal(out1, out2) - else: - assert_array_equal(out1, out2) - - def test_normal(self): - def gen_random(state, out): - out[...] = state.normal(size=10000) - - self.check_function(gen_random, sz=(10000,)) - - def test_exp(self): - def gen_random(state, out): - out[...] = state.exponential(scale=np.ones((100, 1000))) - - self.check_function(gen_random, sz=(100, 1000)) - - def test_multinomial(self): - def gen_random(state, out): - out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) - - self.check_function(gen_random, sz=(10000, 6)) - - -# See Issue #4263 -class TestSingleEltArrayInput(object): - def setup(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) - - def test_one_arg_funcs(self): - funcs = (random.exponential, random.standard_gamma, - random.chisquare, random.standard_t, - random.pareto, random.weibull, - random.power, random.rayleigh, - random.poisson, random.zipf, - random.geometric, random.logseries) - - probfuncs = (random.geometric, random.logseries) - - for func in funcs: - if func in probfuncs: # p < 1.0 - out = func(np.array([0.5])) - - else: - out = func(self.argOne) - - assert_equal(out.shape, self.tgtShape) - - def test_two_arg_funcs(self): - funcs = (random.uniform, random.normal, - random.beta, random.gamma, - random.f, random.noncentral_chisquare, - random.vonmises, random.laplace, - random.gumbel, random.logistic, - random.lognormal, random.wald, - random.binomial, random.negative_binomial) - - probfuncs = (random.binomial, random.negative_binomial) - - for func in funcs: - if func in probfuncs: # p <= 1 - argTwo = np.array([0.5]) - - else: - argTwo = self.argTwo - - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) - - def test_integers(self, endpoint): - itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, - np.int32, np.uint32, np.int64, np.uint64] - func = random.integers - high = np.array([1]) - low = np.array([0]) - - for dt in itype: - out = func(low, high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) - - out = func(low[0], high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) - - out = func(low, high[0], endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) - - def test_three_arg_funcs(self): - funcs = [random.noncentral_f, random.triangular, - random.hypergeometric] - - for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py deleted file mode 100644 index 3a937f9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py +++ /dev/null @@ -1,158 +0,0 @@ -import sys -from numpy.testing import (assert_, assert_array_equal) -from numpy.compat import long -import numpy as np -import pytest -from numpy.random import Generator, MT19937 - -mt19937 = Generator(MT19937()) - - -class TestRegression(object): - - def test_VonMises_range(self): - # Make sure generated random variables are in [-pi, pi]. - # Regression test for ticket #986. - for mu in np.linspace(-7., 7., 5): - r = mt19937.vonmises(mu, 1, 50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - - def test_hypergeometric_range(self): - # Test for ticket #921 - assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) - - # Test for ticket #5623 - args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems - assert_(mt19937.hypergeometric(*args) > 0) - - def test_logseries_convergence(self): - # Test for ticket #923 - N = 1000 - mt19937 = Generator(MT19937(0)) - rvsn = mt19937.logseries(0.8, size=N) - # these two frequency counts should be close to theoretical - # numbers with this large sample - # theoretical large N result is 0.49706795 - freq = np.sum(rvsn == 1) / float(N) - msg = "Frequency was %f, should be > 0.45" % freq - assert_(freq > 0.45, msg) - # theoretical large N result is 0.19882718 - freq = np.sum(rvsn == 2) / float(N) - msg = "Frequency was %f, should be < 0.23" % freq - assert_(freq < 0.23, msg) - - def test_permutation_longs(self): - mt19937 = Generator(MT19937(1234)) - a = mt19937.permutation(12) - mt19937 = Generator(MT19937(1234)) - b = mt19937.permutation(long(12)) - assert_array_equal(a, b) - - def test_shuffle_mixed_dimension(self): - # Test for trac ticket #2074 - for t in [[1, 2, 3, None], - [(1, 1), (2, 2), (3, 3), None], - [1, (2, 2), (3, 3), None], - [(1, 1), 2, 3, None]]: - mt19937 = Generator(MT19937(12345)) - shuffled = list(t) - mt19937.shuffle(shuffled) - assert_array_equal(shuffled, [t[2], t[0], t[3], t[1]]) - - def test_call_within_randomstate(self): - # Check that custom BitGenerator does not call into global state - res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4]) - for i in range(3): - mt19937 = Generator(MT19937(i)) - m = Generator(MT19937(4321)) - # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) - - def test_multivariate_normal_size_types(self): - # Test for multivariate_normal issue with 'size' argument. - # Check that the multivariate_normal size argument can be a - # numpy integer. - mt19937.multivariate_normal([0], [[0]], size=1) - mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) - mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) - - def test_beta_small_parameters(self): - # Test that beta with small a and b parameters does not produce - # NaNs due to roundoff errors causing 0 / 0, gh-5851 - mt19937 = Generator(MT19937(1234567890)) - x = mt19937.beta(0.0001, 0.0001, size=100) - assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') - - def test_choice_sum_of_probs_tolerance(self): - # The sum of probs should be 1.0 with some tolerance. - # For low precision dtypes the tolerance was too tight. - # See numpy github issue 6123. - mt19937 = Generator(MT19937(1234)) - a = [1, 2, 3] - counts = [4, 4, 2] - for dt in np.float16, np.float32, np.float64: - probs = np.array(counts, dtype=dt) / sum(counts) - c = mt19937.choice(a, p=probs) - assert_(c in a) - with pytest.raises(ValueError): - mt19937.choice(a, p=probs*0.9) - - def test_shuffle_of_array_of_different_length_strings(self): - # Test that permuting an array of different length strings - # will not cause a segfault on garbage collection - # Tests gh-7710 - mt19937 = Generator(MT19937(1234)) - - a = np.array(['a', 'a' * 1000]) - - for _ in range(100): - mt19937.shuffle(a) - - # Force Garbage Collection - should not segfault. - import gc - gc.collect() - - def test_shuffle_of_array_of_objects(self): - # Test that permuting an array of objects will not cause - # a segfault on garbage collection. - # See gh-7719 - mt19937 = Generator(MT19937(1234)) - a = np.array([np.arange(1), np.arange(4)]) - - for _ in range(1000): - mt19937.shuffle(a) - - # Force Garbage Collection - should not segfault. - import gc - gc.collect() - - def test_permutation_subclass(self): - class N(np.ndarray): - pass - - mt19937 = Generator(MT19937(1)) - orig = np.arange(3).view(N) - perm = mt19937.permutation(orig) - assert_array_equal(perm, np.array([2, 0, 1])) - assert_array_equal(orig, np.arange(3).view(N)) - - class M(object): - a = np.arange(5) - - def __array__(self): - return self.a - - mt19937 = Generator(MT19937(1)) - m = M() - perm = mt19937.permutation(m) - assert_array_equal(perm, np.array([4, 1, 3, 0, 2])) - assert_array_equal(m.__array__(), np.arange(5)) - - def test_gamma_0(self): - assert mt19937.standard_gamma(0.0) == 0.0 - assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) - - actual = mt19937.standard_gamma([0.0], dtype='float') - expected = np.array([0.], dtype=np.float32) - assert_array_equal(actual, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_random.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_random.py deleted file mode 100644 index 2e2eced..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_random.py +++ /dev/null @@ -1,1674 +0,0 @@ -from __future__ import division, absolute_import, print_function -import warnings - -import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings - ) -from numpy import random -import sys - - -class TestSeed(object): - def test_scalar(self): - s = np.random.RandomState(0) - assert_equal(s.randint(1000), 684) - s = np.random.RandomState(4294967295) - assert_equal(s.randint(1000), 419) - - def test_array(self): - s = np.random.RandomState(range(10)) - assert_equal(s.randint(1000), 468) - s = np.random.RandomState(np.arange(10)) - assert_equal(s.randint(1000), 468) - s = np.random.RandomState([0]) - assert_equal(s.randint(1000), 973) - s = np.random.RandomState([4294967295]) - assert_equal(s.randint(1000), 265) - - def test_invalid_scalar(self): - # seed must be an unsigned 32 bit integer - assert_raises(TypeError, np.random.RandomState, -0.5) - assert_raises(ValueError, np.random.RandomState, -1) - - def test_invalid_array(self): - # seed must be an unsigned 32 bit integer - assert_raises(TypeError, np.random.RandomState, [-0.5]) - assert_raises(ValueError, np.random.RandomState, [-1]) - assert_raises(ValueError, np.random.RandomState, [4294967296]) - assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) - assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) - - def test_invalid_array_shape(self): - # gh-9832 - assert_raises(ValueError, np.random.RandomState, - np.array([], dtype=np.int64)) - assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) - assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], - [4, 5, 6]]) - - -class TestBinomial(object): - def test_n_zero(self): - # Tests the corner case of n == 0 for the binomial distribution. - # binomial(0, p) should be zero for any p in [0, 1]. - # This test addresses issue #3480. - zeros = np.zeros(2, dtype='int') - for p in [0, .5, 1]: - assert_(random.binomial(0, p) == 0) - assert_array_equal(random.binomial(zeros, p), zeros) - - def test_p_is_nan(self): - # Issue #4571. - assert_raises(ValueError, random.binomial, 1, np.nan) - - -class TestMultinomial(object): - def test_basic(self): - random.multinomial(100, [0.2, 0.8]) - - def test_zero_probability(self): - random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) - - def test_int_negative_interval(self): - assert_(-5 <= random.randint(-5, -1) < -1) - x = random.randint(-5, -1, 5) - assert_(np.all(-5 <= x)) - assert_(np.all(x < -1)) - - def test_size(self): - # gh-3173 - p = [0.5, 0.5] - assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) - assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) - assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, - (2, 2, 2)) - - assert_raises(TypeError, np.random.multinomial, 1, p, - float(1)) - - -class TestSetState(object): - def setup(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() - - def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) - assert_(np.all(old == new)) - - def test_gaussian_reset(self): - # Make sure the cached every-other-Gaussian is reset. - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_gaussian_reset_in_media_res(self): - # When the state is saved with a cached Gaussian, make sure the - # cached Gaussian is restored. - - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_backwards_compatibility(self): - # Make sure we can accept old state tuples that do not have the - # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) - assert_(np.all(x1 == x2)) - assert_(np.all(x1 == x3)) - - def test_negative_binomial(self): - # Ensure that the negative binomial results take floating point - # arguments without truncation. - self.prng.negative_binomial(0.5, 0.5) - - -class TestRandint(object): - - rfunc = np.random.randint - - # valid integer/boolean types - itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, - np.int32, np.uint32, np.int64, np.uint64] - - def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) - - def test_bounds_checking(self): - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) - - def test_rng_zero_and_extremes(self): - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - - tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) - - tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) - - tgt = (lbnd + ubnd)//2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) - - def test_full_range(self): - # Test for ticket #1690 - - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - - try: - self.rfunc(lbnd, ubnd, dtype=dt) - except Exception as e: - raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) - - def test_in_bounds_fuzz(self): - # Don't use fixed seed - np.random.seed() - - for dt in self.itype[1:]: - for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) - assert_(vals.max() < ubnd) - assert_(vals.min() >= 2) - - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) - - assert_(vals.max() < 2) - assert_(vals.min() >= 0) - - def test_repeatability(self): - import hashlib - # We use a md5 hash of generated sequences of 1000 samples - # in the range [0, 6) for all but bool, where the range - # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0', - 'int16': '1b7741b80964bb190c50d541dca1cac1', - 'int32': '4dc9fcc2b395577ebb51793e58ed1a05', - 'int64': '17db902806f448331b5a758d7d2ee672', - 'int8': '27dd30c4e08a797063dffac2490b0be6', - 'uint16': '1b7741b80964bb190c50d541dca1cac1', - 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05', - 'uint64': '17db902806f448331b5a758d7d2ee672', - 'uint8': '27dd30c4e08a797063dffac2490b0be6'} - - for dt in self.itype[1:]: - np.random.seed(1234) - - # view as little endian for hash - if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) - else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() - - res = hashlib.md5(val.view(np.int8)).hexdigest() - assert_(tgt[np.dtype(dt).name] == res) - - # bools do not depend on endianness - np.random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) - res = hashlib.md5(val).hexdigest() - assert_(tgt[np.dtype(bool).name] == res) - - def test_int64_uint64_corner_case(self): - # When stored in Numpy arrays, `lbnd` is casted - # as np.int64, and `ubnd` is casted as np.uint64. - # Checking whether `lbnd` >= `ubnd` used to be - # done solely via direct comparison, which is incorrect - # because when Numpy tries to compare both numbers, - # it casts both to np.float64 because there is - # no integer superset of np.int64 and np.uint64. However, - # `ubnd` is too large to be represented in np.float64, - # causing it be round down to np.iinfo(np.int64).max, - # leading to a ValueError because `lbnd` now equals - # the new `ubnd`. - - dt = np.int64 - tgt = np.iinfo(np.int64).max - lbnd = np.int64(np.iinfo(np.int64).max) - ubnd = np.uint64(np.iinfo(np.int64).max + 1) - - # None of these function calls should - # generate a ValueError now. - actual = np.random.randint(lbnd, ubnd, dtype=dt) - assert_equal(actual, tgt) - - def test_respect_dtype_singleton(self): - # See gh-7203 - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - - sample = self.rfunc(lbnd, ubnd, dtype=dt) - assert_equal(sample.dtype, np.dtype(dt)) - - for dt in (bool, int, np.compat.long): - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - - # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, dtype=dt) - assert_(not hasattr(sample, 'dtype')) - assert_equal(type(sample), dt) - - -class TestRandomDist(object): - # Make sure the random distribution returns the correct value for a - # given seed - - def setup(self): - self.seed = 1234567890 - - def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) - desired = np.array([[0.61879477158567997, 0.59162362775974664], - [0.88868358904449662, 0.89165480011560816], - [0.4575674820298663, 0.7781880808593471]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) - desired = np.array([[1.34016345771863121, 1.73759122771936081], - [1.498988344300628, -0.2286433324536169], - [2.031033998682787, 2.17032494605655257]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3, 2)) - desired = np.array([[31, 3], - [-52, 41], - [-48, -66]]) - assert_array_equal(actual, desired) - - def test_random_integers(self): - np.random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = np.random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) - desired = np.array([[31, 3], - [-52, 41], - [-48, -66]]) - assert_array_equal(actual, desired) - - def test_random_integers_max_int(self): - # Tests whether random_integers can generate the - # maximum allowed Python int that can be converted - # into a C long. Previous implementations of this - # method have thrown an OverflowError when attempting - # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = np.random.random_integers(np.iinfo('l').max, - np.iinfo('l').max) - assert_(len(w) == 1) - - desired = np.iinfo('l').max - assert_equal(actual, desired) - - def test_random_integers_deprecated(self): - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - - # DeprecationWarning raised with high == None - assert_raises(DeprecationWarning, - np.random.random_integers, - np.iinfo('l').max) - - # DeprecationWarning raised with high != None - assert_raises(DeprecationWarning, - np.random.random_integers, - np.iinfo('l').max, np.iinfo('l').max) - - def test_random(self): - np.random.seed(self.seed) - actual = np.random.random((3, 2)) - desired = np.array([[0.61879477158567997, 0.59162362775974664], - [0.88868358904449662, 0.89165480011560816], - [0.4575674820298663, 0.7781880808593471]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_choice_uniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4) - desired = np.array([2, 3, 2, 3]) - assert_array_equal(actual, desired) - - def test_choice_nonuniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) - desired = np.array([1, 1, 2, 2]) - assert_array_equal(actual, desired) - - def test_choice_uniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False) - desired = np.array([0, 1, 3]) - assert_array_equal(actual, desired) - - def test_choice_nonuniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False, - p=[0.1, 0.3, 0.5, 0.1]) - desired = np.array([2, 3, 1]) - assert_array_equal(actual, desired) - - def test_choice_noninteger(self): - np.random.seed(self.seed) - actual = np.random.choice(['a', 'b', 'c', 'd'], 4) - desired = np.array(['c', 'd', 'c', 'd']) - assert_array_equal(actual, desired) - - def test_choice_exceptions(self): - sample = np.random.choice - assert_raises(ValueError, sample, -1, 3) - assert_raises(ValueError, sample, 3., 3) - assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) - assert_raises(ValueError, sample, [], 3) - assert_raises(ValueError, sample, [1, 2, 3, 4], 3, - p=[[0.25, 0.25], [0.25, 0.25]]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) - assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) - assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) - # gh-13087 - assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) - assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) - assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) - assert_raises(ValueError, sample, [1, 2, 3], 2, - replace=False, p=[1, 0, 0]) - - def test_choice_return_shape(self): - p = [0.1, 0.9] - # Check scalar - assert_(np.isscalar(np.random.choice(2, replace=True))) - assert_(np.isscalar(np.random.choice(2, replace=False))) - assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) - assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) - assert_(np.isscalar(np.random.choice([1, 2], replace=True))) - assert_(np.random.choice([None], replace=True) is None) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(np.random.choice(arr, replace=True) is a) - - # Check 0-d array - s = tuple() - assert_(not np.isscalar(np.random.choice(2, s, replace=True))) - assert_(not np.isscalar(np.random.choice(2, s, replace=False))) - assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) - assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) - assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) - assert_(np.random.choice([None], s, replace=True).ndim == 0) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(np.random.choice(arr, s, replace=True).item() is a) - - # Check multi dimensional array - s = (2, 3) - p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] - assert_equal(np.random.choice(6, s, replace=True).shape, s) - assert_equal(np.random.choice(6, s, replace=False).shape, s) - assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) - assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) - assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) - - # Check zero-size - assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) - assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) - assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) - assert_equal(np.random.choice(0, size=0).shape, (0,)) - assert_equal(np.random.choice([], size=(0,)).shape, (0,)) - assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, - (3, 0, 4)) - assert_raises(ValueError, np.random.choice, [], 10) - - def test_choice_nan_probabilities(self): - a = np.array([42, 1, 2]) - p = [None, None, None] - assert_raises(ValueError, np.random.choice, a, p=p) - - def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) - desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' - assert_equal(actual, desired) - - def test_shuffle(self): - # Test lists, arrays (of various dtypes), and multidimensional versions - # of both, c-contiguous or not: - for conv in [lambda x: np.array([]), - lambda x: x, - lambda x: np.asarray(x).astype(np.int8), - lambda x: np.asarray(x).astype(np.float32), - lambda x: np.asarray(x).astype(np.complex64), - lambda x: np.asarray(x).astype(object), - lambda x: [(i, i) for i in x], - lambda x: np.asarray([[i, i] for i in x]), - lambda x: np.vstack([x, x]).T, - # gh-11442 - lambda x: (np.asarray([(i, i) for i in x], - [("a", int), ("b", int)]) - .view(np.recarray)), - # gh-4270 - lambda x: np.asarray([(i, i) for i in x], - [("a", object), ("b", np.int32)])]: - np.random.seed(self.seed) - alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - np.random.shuffle(alist) - actual = alist - desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) - assert_array_equal(actual, desired) - - def test_shuffle_masked(self): - # gh-3263 - a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) - b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) - a_orig = a.copy() - b_orig = b.copy() - for i in range(50): - np.random.shuffle(a) - assert_equal( - sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) - np.random.shuffle(b) - assert_equal( - sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) - - def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) - desired = np.array( - [[1.45341850513746058e-02, 5.31297615662868145e-04], - [1.85366619058432324e-06, 4.19214516800110563e-03], - [1.58405155108498093e-04, 1.26252891949397652e-04]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100, .456, size=(3, 2)) - desired = np.array([[37, 43], - [42, 48], - [46, 45]]) - assert_array_equal(actual, desired) - - def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) - desired = np.array([[63.87858175501090585, 68.68407748911370447], - [65.77116116901505904, 47.09686762438974483], - [72.3828403199695174, 74.18408615260374006]]) - assert_array_almost_equal(actual, desired, decimal=13) - - def test_dirichlet(self): - np.random.seed(self.seed) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) - desired = np.array([[[0.54539444573611562, 0.45460555426388438], - [0.62345816822039413, 0.37654183177960598]], - [[0.55206000085785778, 0.44793999914214233], - [0.58964023305154301, 0.41035976694845688]], - [[0.59266909280647828, 0.40733090719352177], - [0.56974431743975207, 0.43025568256024799]]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_dirichlet_size(self): - # gh-3173 - p = np.array([51.72840233779265162, 39.74494232180943953]) - assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) - assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) - assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) - - assert_raises(TypeError, np.random.dirichlet, p, float(1)) - - def test_dirichlet_bad_alpha(self): - # gh-2089 - alpha = np.array([5.4e-01, -1.0e-16]) - assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) - - def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) - desired = np.array([[1.08342649775011624, 1.00607889924557314], - [2.46628830085216721, 2.49668106809923884], - [0.68717433461363442, 1.69175666993575979]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_exponential_0(self): - assert_equal(np.random.exponential(scale=0), 0) - assert_raises(ValueError, np.random.exponential, scale=-0.) - - def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) - desired = np.array([[1.21975394418575878, 1.75135759791559775], - [1.44803115017146489, 1.22108959480396262], - [1.02176975757740629, 1.34431827623300415]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) - desired = np.array([[24.60509188649287182, 28.54993563207210627], - [26.13476110204064184, 12.56988482927716078], - [31.71863275789960568, 33.30143302795922011]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_gamma_0(self): - assert_equal(np.random.gamma(shape=0, scale=0), 0) - assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) - - def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) - desired = np.array([[8, 7], - [17, 17], - [5, 12]]) - assert_array_equal(actual, desired) - - def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[0.19591898743416816, 0.34405539668096674], - [-1.4492522252274278, -1.47374816298446865], - [1.10651090478803416, -0.69535848626236174]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_gumbel_0(self): - assert_equal(np.random.gumbel(scale=0), 0) - assert_raises(ValueError, np.random.gumbel, scale=-0.) - - def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) - desired = np.array([[10, 10], - [10, 10], - [9, 9]]) - assert_array_equal(actual, desired) - - # Test nbad = 0 - actual = np.random.hypergeometric(5, 0, 3, size=4) - desired = np.array([3, 3, 3, 3]) - assert_array_equal(actual, desired) - - actual = np.random.hypergeometric(15, 0, 12, size=4) - desired = np.array([12, 12, 12, 12]) - assert_array_equal(actual, desired) - - # Test ngood = 0 - actual = np.random.hypergeometric(0, 5, 3, size=4) - desired = np.array([0, 0, 0, 0]) - assert_array_equal(actual, desired) - - actual = np.random.hypergeometric(0, 15, 12, size=4) - desired = np.array([0, 0, 0, 0]) - assert_array_equal(actual, desired) - - def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[0.66599721112760157, 0.52829452552221945], - [3.12791959514407125, 3.18202813572992005], - [-0.05391065675859356, 1.74901336242837324]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_laplace_0(self): - assert_equal(np.random.laplace(scale=0), 0) - assert_raises(ValueError, np.random.laplace, scale=-0.) - - def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[1.09232835305011444, 0.8648196662399954], - [4.27818590694950185, 4.33897006346929714], - [-0.21682183359214885, 2.63373365386060332]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) - desired = np.array([[16.50698631688883822, 36.54846706092654784], - [22.67886599981281748, 0.71617561058995771], - [65.72798501792723869, 86.84341601437161273]]) - assert_array_almost_equal(actual, desired, decimal=13) - - def test_lognormal_0(self): - assert_equal(np.random.lognormal(sigma=0), 1) - assert_raises(ValueError, np.random.lognormal, sigma=-0.) - - def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) - desired = np.array([[2, 2], - [6, 17], - [3, 6]]) - assert_array_equal(actual, desired) - - def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) - desired = np.array([[[4, 3, 5, 4, 2, 2], - [5, 2, 8, 2, 2, 1]], - [[3, 4, 3, 6, 0, 4], - [2, 1, 4, 3, 6, 4]], - [[4, 4, 2, 5, 2, 3], - [4, 3, 4, 2, 3, 4]]]) - assert_array_equal(actual, desired) - - def test_multivariate_normal(self): - np.random.seed(self.seed) - mean = (.123456789, 10) - cov = [[1, 0], [0, 1]] - size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) - desired = np.array([[[1.463620246718631, 11.73759122771936], - [1.622445133300628, 9.771356667546383]], - [[2.154490787682787, 12.170324946056553], - [1.719909438201865, 9.230548443648306]], - [[0.689515026297799, 9.880729819607714], - [-0.023054015651998, 9.201096623542879]]]) - - assert_array_almost_equal(actual, desired, decimal=15) - - # Check for default size, was raising deprecation warning - actual = np.random.multivariate_normal(mean, cov) - desired = np.array([0.895289569463708, 9.17180864067987]) - assert_array_almost_equal(actual, desired, decimal=15) - - # Check that non positive-semidefinite covariance warns with - # RuntimeWarning - mean = [0, 0] - cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) - - # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(np.random.multivariate_normal, mean, cov, - check_valid='ignore') - - # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, np.random.multivariate_normal, mean, cov, - check_valid='raise') - - cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - np.random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 - - def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) - desired = np.array([[848, 841], - [892, 611], - [779, 647]]) - assert_array_equal(actual, desired) - - def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) - desired = np.array([[23.91905354498517511, 13.35324692733826346], - [31.22452661329736401, 16.60047399466177254], - [5.03461598262724586, 17.94973089023519464]]) - assert_array_almost_equal(actual, desired, decimal=14) - - actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) - desired = np.array([[1.47145377828516666, 0.15052899268012659], - [0.00943803056963588, 1.02647251615666169], - [0.332334982684171, 0.15451287602753125]]) - assert_array_almost_equal(actual, desired, decimal=14) - - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) - desired = np.array([[9.597154162763948, 11.725484450296079], - [10.413711048138335, 3.694475922923986], - [13.484222138963087, 14.377255424602957]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, - size=(3, 2)) - desired = np.array([[1.40598099674926669, 0.34207973179285761], - [3.57715069265772545, 7.92632662577829805], - [0.43741599463544162, 1.1774208752428319]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[2.80378370443726244, 3.59863924443872163], - [3.121433477601256, -0.33382987590723379], - [4.18552478636557357, 4.46410668111310471]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_normal_0(self): - assert_equal(np.random.normal(scale=0), 0) - assert_raises(ValueError, np.random.normal, scale=-0.) - - def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a=.123456789, size=(3, 2)) - desired = np.array( - [[2.46852460439034849e+03, 1.41286880810518346e+03], - [5.28287797029485181e+07, 6.57720981047328785e+07], - [1.40840323350391515e+02, 1.98390255135251704e+05]]) - # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this - # matrix differs by 24 nulps. Discussion: - # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html - # Consensus is that this is probably some gcc quirk that affects - # rounding but not in any important way, so we just use a looser - # tolerance on this test: - np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) - - def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam=.123456789, size=(3, 2)) - desired = np.array([[0, 0], - [1, 0], - [0, 0]]) - assert_array_equal(actual, desired) - - def test_poisson_exceptions(self): - lambig = np.iinfo('l').max - lamneg = -1 - assert_raises(ValueError, np.random.poisson, lamneg) - assert_raises(ValueError, np.random.poisson, [lamneg]*10) - assert_raises(ValueError, np.random.poisson, lambig) - assert_raises(ValueError, np.random.poisson, [lambig]*10) - - def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a=.123456789, size=(3, 2)) - desired = np.array([[0.02048932883240791, 0.01424192241128213], - [0.38446073748535298, 0.39499689943484395], - [0.00177699707563439, 0.13115505880863756]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale=10, size=(3, 2)) - desired = np.array([[13.8882496494248393, 13.383318339044731], - [20.95413364294492098, 21.08285015800712614], - [11.06066537006854311, 17.35468505778271009]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_rayleigh_0(self): - assert_equal(np.random.rayleigh(scale=0), 0) - assert_raises(ValueError, np.random.rayleigh, scale=-0.) - - def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size=(3, 2)) - desired = np.array([[0.77127660196445336, -6.55601161955910605], - [0.93582023391158309, -2.07479293013759447], - [-4.74601644297011926, 0.18338989290760804]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size=(3, 2)) - desired = np.array([[0.96441739162374596, 0.89556604882105506], - [2.1953785836319808, 2.22243285392490542], - [0.6116915921431676, 1.50592546727413201]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape=3, size=(3, 2)) - desired = np.array([[5.50841531318455058, 6.62953470301903103], - [5.93988484943779227, 2.31044849402133989], - [7.54838614231317084, 8.012756093271868]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_standard_gamma_0(self): - assert_equal(np.random.standard_gamma(shape=0), 0) - assert_raises(ValueError, np.random.standard_gamma, shape=-0.) - - def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size=(3, 2)) - desired = np.array([[1.34016345771863121, 1.73759122771936081], - [1.498988344300628, -0.2286433324536169], - [2.031033998682787, 2.17032494605655257]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df=10, size=(3, 2)) - desired = np.array([[0.97140611862659965, -0.08830486548450577], - [1.36311143689505321, -0.55317463909867071], - [-0.18473749069684214, 0.61181537341755321]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, - size=(3, 2)) - desired = np.array([[12.68117178949215784, 12.4129206149193152], - [16.20131377335158263, 16.25692138747600524], - [11.20400690911820263, 14.4978144835829923]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) - desired = np.array([[6.99097932346268003, 6.73801597444323974], - [9.50364421400426274, 9.53130618907631089], - [5.48995325769805476, 8.47493103280052118]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_uniform_range_bounds(self): - fmin = np.finfo('float').min - fmax = np.finfo('float').max - - func = np.random.uniform - assert_raises(OverflowError, func, -np.inf, 0) - assert_raises(OverflowError, func, 0, np.inf) - assert_raises(OverflowError, func, fmin, fmax) - assert_raises(OverflowError, func, [-np.inf], [0]) - assert_raises(OverflowError, func, [0], [np.inf]) - - # (fmax / 1e17) - fmin is within range, so this should not throw - # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > - # DBL_MAX by increasing fmin a bit - np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) - - def test_scalar_exception_propagation(self): - # Tests that exceptions are correctly propagated in distributions - # when called with objects that throw exceptions when converted to - # scalars. - # - # Regression test for gh: 8865 - - class ThrowingFloat(np.ndarray): - def __float__(self): - raise TypeError - - throwing_float = np.array(1.0).view(ThrowingFloat) - assert_raises(TypeError, np.random.uniform, throwing_float, - throwing_float) - - class ThrowingInteger(np.ndarray): - def __int__(self): - raise TypeError - - __index__ = __int__ - - throwing_int = np.array(1).view(ThrowingInteger) - assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) - - def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) - desired = np.array([[2.28567572673902042, 2.89163838442285037], - [0.38198375564286025, 2.57638023113890746], - [1.19153771588353052, 1.83509849681825354]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_vonmises_small(self): - # check infinite loop, gh-4720 - np.random.seed(self.seed) - r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) - np.testing.assert_(np.isfinite(r).all()) - - def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) - desired = np.array([[3.82935265715889983, 5.13125249184285526], - [0.35045403618358717, 1.50832396872003538], - [0.24124319895843183, 0.22031101461955038]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a=1.23, size=(3, 2)) - desired = np.array([[0.97097342648766727, 0.91422896443565516], - [1.89517770034962929, 1.91414357960479564], - [0.67057783752390987, 1.39494046635066793]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_weibull_0(self): - np.random.seed(self.seed) - assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) - assert_raises(ValueError, np.random.weibull, a=-0.) - - def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a=1.23, size=(3, 2)) - desired = np.array([[66, 29], - [1, 1], - [3, 13]]) - assert_array_equal(actual, desired) - - -class TestBroadcast(object): - # tests that functions that broadcast behave - # correctly when presented with non-scalar arguments - def setup(self): - self.seed = 123456789 - - def setSeed(self): - np.random.seed(self.seed) - - # TODO: Include test for randint once it can broadcast - # Can steal the test written in PR #6938 - - def test_uniform(self): - low = [0] - high = [1] - uniform = np.random.uniform - desired = np.array([0.53283302478975902, - 0.53413660089041659, - 0.50955303552646702]) - - self.setSeed() - actual = uniform(low * 3, high) - assert_array_almost_equal(actual, desired, decimal=14) - - self.setSeed() - actual = uniform(low, high * 3) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_normal(self): - loc = [0] - scale = [1] - bad_scale = [-1] - normal = np.random.normal - desired = np.array([2.2129019979039612, - 2.1283977976520019, - 1.8417114045748335]) - - self.setSeed() - actual = normal(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) - - self.setSeed() - actual = normal(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) - - def test_beta(self): - a = [1] - b = [2] - bad_a = [-1] - bad_b = [-2] - beta = np.random.beta - desired = np.array([0.19843558305989056, - 0.075230336409423643, - 0.24976865978980844]) - - self.setSeed() - actual = beta(a * 3, b) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) - - self.setSeed() - actual = beta(a, b * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) - - def test_exponential(self): - scale = [1] - bad_scale = [-1] - exponential = np.random.exponential - desired = np.array([0.76106853658845242, - 0.76386282278691653, - 0.71243813125891797]) - - self.setSeed() - actual = exponential(scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) - - def test_standard_gamma(self): - shape = [1] - bad_shape = [-1] - std_gamma = np.random.standard_gamma - desired = np.array([0.76106853658845242, - 0.76386282278691653, - 0.71243813125891797]) - - self.setSeed() - actual = std_gamma(shape * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) - - def test_gamma(self): - shape = [1] - scale = [2] - bad_shape = [-1] - bad_scale = [-2] - gamma = np.random.gamma - desired = np.array([1.5221370731769048, - 1.5277256455738331, - 1.4248762625178359]) - - self.setSeed() - actual = gamma(shape * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) - - self.setSeed() - actual = gamma(shape, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) - - def test_f(self): - dfnum = [1] - dfden = [2] - bad_dfnum = [-1] - bad_dfden = [-2] - f = np.random.f - desired = np.array([0.80038951638264799, - 0.86768719635363512, - 2.7251095168386801]) - - self.setSeed() - actual = f(dfnum * 3, dfden) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) - - self.setSeed() - actual = f(dfnum, dfden * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) - - def test_noncentral_f(self): - dfnum = [2] - dfden = [3] - nonc = [4] - bad_dfnum = [0] - bad_dfden = [-1] - bad_nonc = [-2] - nonc_f = np.random.noncentral_f - desired = np.array([9.1393943263705211, - 13.025456344595602, - 8.8018098359100545]) - - self.setSeed() - actual = nonc_f(dfnum * 3, dfden, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) - - self.setSeed() - actual = nonc_f(dfnum, dfden * 3, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) - - self.setSeed() - actual = nonc_f(dfnum, dfden, nonc * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) - - def test_noncentral_f_small_df(self): - self.setSeed() - desired = np.array([6.869638627492048, 0.785880199263955]) - actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_chisquare(self): - df = [1] - bad_df = [-1] - chisquare = np.random.chisquare - desired = np.array([0.57022801133088286, - 0.51947702108840776, - 0.1320969254923558]) - - self.setSeed() - actual = chisquare(df * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) - - def test_noncentral_chisquare(self): - df = [1] - nonc = [2] - bad_df = [-1] - bad_nonc = [-2] - nonc_chi = np.random.noncentral_chisquare - desired = np.array([9.0015599467913763, - 4.5804135049718742, - 6.0872302432834564]) - - self.setSeed() - actual = nonc_chi(df * 3, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) - - self.setSeed() - actual = nonc_chi(df, nonc * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) - - def test_standard_t(self): - df = [1] - bad_df = [-1] - t = np.random.standard_t - desired = np.array([3.0702872575217643, - 5.8560725167361607, - 1.0274791436474273]) - - self.setSeed() - actual = t(df * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) - - def test_vonmises(self): - mu = [2] - kappa = [1] - bad_kappa = [-1] - vonmises = np.random.vonmises - desired = np.array([2.9883443664201312, - -2.7064099483995943, - -1.8672476700665914]) - - self.setSeed() - actual = vonmises(mu * 3, kappa) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) - - self.setSeed() - actual = vonmises(mu, kappa * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) - - def test_pareto(self): - a = [1] - bad_a = [-1] - pareto = np.random.pareto - desired = np.array([1.1405622680198362, - 1.1465519762044529, - 1.0389564467453547]) - - self.setSeed() - actual = pareto(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) - - def test_weibull(self): - a = [1] - bad_a = [-1] - weibull = np.random.weibull - desired = np.array([0.76106853658845242, - 0.76386282278691653, - 0.71243813125891797]) - - self.setSeed() - actual = weibull(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) - - def test_power(self): - a = [1] - bad_a = [-1] - power = np.random.power - desired = np.array([0.53283302478975902, - 0.53413660089041659, - 0.50955303552646702]) - - self.setSeed() - actual = power(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) - - def test_laplace(self): - loc = [0] - scale = [1] - bad_scale = [-1] - laplace = np.random.laplace - desired = np.array([0.067921356028507157, - 0.070715642226971326, - 0.019290950698972624]) - - self.setSeed() - actual = laplace(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) - - self.setSeed() - actual = laplace(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) - - def test_gumbel(self): - loc = [0] - scale = [1] - bad_scale = [-1] - gumbel = np.random.gumbel - desired = np.array([0.2730318639556768, - 0.26936705726291116, - 0.33906220393037939]) - - self.setSeed() - actual = gumbel(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) - - self.setSeed() - actual = gumbel(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) - - def test_logistic(self): - loc = [0] - scale = [1] - bad_scale = [-1] - logistic = np.random.logistic - desired = np.array([0.13152135837586171, - 0.13675915696285773, - 0.038216792802833396]) - - self.setSeed() - actual = logistic(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) - - self.setSeed() - actual = logistic(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) - - def test_lognormal(self): - mean = [0] - sigma = [1] - bad_sigma = [-1] - lognormal = np.random.lognormal - desired = np.array([9.1422086044848427, - 8.4013952870126261, - 6.3073234116578671]) - - self.setSeed() - actual = lognormal(mean * 3, sigma) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) - - self.setSeed() - actual = lognormal(mean, sigma * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) - - def test_rayleigh(self): - scale = [1] - bad_scale = [-1] - rayleigh = np.random.rayleigh - desired = np.array([1.2337491937897689, - 1.2360119924878694, - 1.1936818095781789]) - - self.setSeed() - actual = rayleigh(scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) - - def test_wald(self): - mean = [0.5] - scale = [1] - bad_mean = [0] - bad_scale = [-2] - wald = np.random.wald - desired = np.array([0.11873681120271318, - 0.12450084820795027, - 0.9096122728408238]) - - self.setSeed() - actual = wald(mean * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) - - self.setSeed() - actual = wald(mean, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) - - def test_triangular(self): - left = [1] - right = [3] - mode = [2] - bad_left_one = [3] - bad_mode_one = [4] - bad_left_two, bad_mode_two = right * 2 - triangular = np.random.triangular - desired = np.array([2.03339048710429, - 2.0347400359389356, - 2.0095991069536208]) - - self.setSeed() - actual = triangular(left * 3, mode, right) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, - right) - - self.setSeed() - actual = triangular(left, mode * 3, right) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, - right) - - self.setSeed() - actual = triangular(left, mode, right * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, - right * 3) - - def test_binomial(self): - n = [1] - p = [0.5] - bad_n = [-1] - bad_p_one = [-1] - bad_p_two = [1.5] - binom = np.random.binomial - desired = np.array([1, 1, 1]) - - self.setSeed() - actual = binom(n * 3, p) - assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) - - self.setSeed() - actual = binom(n, p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) - - def test_negative_binomial(self): - n = [1] - p = [0.5] - bad_n = [-1] - bad_p_one = [-1] - bad_p_two = [1.5] - neg_binom = np.random.negative_binomial - desired = np.array([1, 0, 1]) - - self.setSeed() - actual = neg_binom(n * 3, p) - assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) - - self.setSeed() - actual = neg_binom(n, p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) - - def test_poisson(self): - max_lam = np.random.RandomState()._poisson_lam_max - - lam = [1] - bad_lam_one = [-1] - bad_lam_two = [max_lam * 2] - poisson = np.random.poisson - desired = np.array([1, 1, 0]) - - self.setSeed() - actual = poisson(lam * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) - - def test_zipf(self): - a = [2] - bad_a = [0] - zipf = np.random.zipf - desired = np.array([2, 2, 1]) - - self.setSeed() - actual = zipf(a * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) - with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) - - def test_geometric(self): - p = [0.5] - bad_p_one = [-1] - bad_p_two = [1.5] - geom = np.random.geometric - desired = np.array([2, 2, 2]) - - self.setSeed() - actual = geom(p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) - - def test_hypergeometric(self): - ngood = [1] - nbad = [2] - nsample = [2] - bad_ngood = [-1] - bad_nbad = [-2] - bad_nsample_one = [0] - bad_nsample_two = [4] - hypergeom = np.random.hypergeometric - desired = np.array([1, 1, 1]) - - self.setSeed() - actual = hypergeom(ngood * 3, nbad, nsample) - assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) - - self.setSeed() - actual = hypergeom(ngood, nbad * 3, nsample) - assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) - - self.setSeed() - actual = hypergeom(ngood, nbad, nsample * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) - - def test_logseries(self): - p = [0.5] - bad_p_one = [2] - bad_p_two = [-1] - logseries = np.random.logseries - desired = np.array([1, 1, 1]) - - self.setSeed() - actual = logseries(p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) - - -class TestThread(object): - # make sure each state produces the same sequence even in threads - def setup(self): - self.seeds = range(4) - - def check_function(self, function, sz): - from threading import Thread - - out1 = np.empty((len(self.seeds),) + sz) - out2 = np.empty((len(self.seeds),) + sz) - - # threaded generation - t = [Thread(target=function, args=(np.random.RandomState(s), o)) - for s, o in zip(self.seeds, out1)] - [x.start() for x in t] - [x.join() for x in t] - - # the same serial - for s, o in zip(self.seeds, out2): - function(np.random.RandomState(s), o) - - # these platforms change x87 fpu precision mode in threads - if np.intp().dtype.itemsize == 4 and sys.platform == "win32": - assert_array_almost_equal(out1, out2) - else: - assert_array_equal(out1, out2) - - def test_normal(self): - def gen_random(state, out): - out[...] = state.normal(size=10000) - self.check_function(gen_random, sz=(10000,)) - - def test_exp(self): - def gen_random(state, out): - out[...] = state.exponential(scale=np.ones((100, 1000))) - self.check_function(gen_random, sz=(100, 1000)) - - def test_multinomial(self): - def gen_random(state, out): - out[...] = state.multinomial(10, [1/6.]*6, size=10000) - self.check_function(gen_random, sz=(10000, 6)) - - -# See Issue #4263 -class TestSingleEltArrayInput(object): - def setup(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) - - def test_one_arg_funcs(self): - funcs = (np.random.exponential, np.random.standard_gamma, - np.random.chisquare, np.random.standard_t, - np.random.pareto, np.random.weibull, - np.random.power, np.random.rayleigh, - np.random.poisson, np.random.zipf, - np.random.geometric, np.random.logseries) - - probfuncs = (np.random.geometric, np.random.logseries) - - for func in funcs: - if func in probfuncs: # p < 1.0 - out = func(np.array([0.5])) - - else: - out = func(self.argOne) - - assert_equal(out.shape, self.tgtShape) - - def test_two_arg_funcs(self): - funcs = (np.random.uniform, np.random.normal, - np.random.beta, np.random.gamma, - np.random.f, np.random.noncentral_chisquare, - np.random.vonmises, np.random.laplace, - np.random.gumbel, np.random.logistic, - np.random.lognormal, np.random.wald, - np.random.binomial, np.random.negative_binomial) - - probfuncs = (np.random.binomial, np.random.negative_binomial) - - for func in funcs: - if func in probfuncs: # p <= 1 - argTwo = np.array([0.5]) - - else: - argTwo = self.argTwo - - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) - -# TODO: Uncomment once randint can broadcast arguments -# def test_randint(self): -# itype = [bool, np.int8, np.uint8, np.int16, np.uint16, -# np.int32, np.uint32, np.int64, np.uint64] -# func = np.random.randint -# high = np.array([1]) -# low = np.array([0]) -# -# for dt in itype: -# out = func(low, high, dtype=dt) -# self.assert_equal(out.shape, self.tgtShape) -# -# out = func(low[0], high, dtype=dt) -# self.assert_equal(out.shape, self.tgtShape) -# -# out = func(low, high[0], dtype=dt) -# self.assert_equal(out.shape, self.tgtShape) - - def test_three_arg_funcs(self): - funcs = [np.random.noncentral_f, np.random.triangular, - np.random.hypergeometric] - - for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_randomstate.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_randomstate.py deleted file mode 100644 index c12b685..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_randomstate.py +++ /dev/null @@ -1,1967 +0,0 @@ -import hashlib -import pickle -import sys -import warnings - -import numpy as np -import pytest -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings - ) - -from numpy.random import MT19937, PCG64 -from numpy import random - -INT_FUNCS = {'binomial': (100.0, 0.6), - 'geometric': (.5,), - 'hypergeometric': (20, 20, 10), - 'logseries': (.5,), - 'multinomial': (20, np.ones(6) / 6.0), - 'negative_binomial': (100, .5), - 'poisson': (10.0,), - 'zipf': (2,), - } - -if np.iinfo(int).max < 2**32: - # Windows and some 32-bit platforms, e.g., ARM - INT_FUNC_HASHES = {'binomial': '670e1c04223ffdbab27e08fbbad7bdba', - 'logseries': '6bd0183d2f8030c61b0d6e11aaa60caf', - 'geometric': '6e9df886f3e1e15a643168568d5280c0', - 'hypergeometric': '7964aa611b046aecd33063b90f4dec06', - 'multinomial': '68a0b049c16411ed0aa4aff3572431e4', - 'negative_binomial': 'dc265219eec62b4338d39f849cd36d09', - 'poisson': '7b4dce8e43552fc82701c2fa8e94dc6e', - 'zipf': 'fcd2a2095f34578723ac45e43aca48c5', - } -else: - INT_FUNC_HASHES = {'binomial': 'b5f8dcd74f172836536deb3547257b14', - 'geometric': '8814571f45c87c59699d62ccd3d6c350', - 'hypergeometric': 'bc64ae5976eac452115a16dad2dcf642', - 'logseries': '84be924b37485a27c4a98797bc88a7a4', - 'multinomial': 'ec3c7f9cf9664044bb0c6fb106934200', - 'negative_binomial': '210533b2234943591364d0117a552969', - 'poisson': '0536a8850c79da0c78defd742dccc3e0', - 'zipf': 'f2841f504dd2525cd67cdcad7561e532', - } - - -@pytest.fixture(scope='module', params=INT_FUNCS) -def int_func(request): - return (request.param, INT_FUNCS[request.param], - INT_FUNC_HASHES[request.param]) - - -def assert_mt19937_state_equal(a, b): - assert_equal(a['bit_generator'], b['bit_generator']) - assert_array_equal(a['state']['key'], b['state']['key']) - assert_array_equal(a['state']['pos'], b['state']['pos']) - assert_equal(a['has_gauss'], b['has_gauss']) - assert_equal(a['gauss'], b['gauss']) - - -class TestSeed(object): - def test_scalar(self): - s = random.RandomState(0) - assert_equal(s.randint(1000), 684) - s = random.RandomState(4294967295) - assert_equal(s.randint(1000), 419) - - def test_array(self): - s = random.RandomState(range(10)) - assert_equal(s.randint(1000), 468) - s = random.RandomState(np.arange(10)) - assert_equal(s.randint(1000), 468) - s = random.RandomState([0]) - assert_equal(s.randint(1000), 973) - s = random.RandomState([4294967295]) - assert_equal(s.randint(1000), 265) - - def test_invalid_scalar(self): - # seed must be an unsigned 32 bit integer - assert_raises(TypeError, random.RandomState, -0.5) - assert_raises(ValueError, random.RandomState, -1) - - def test_invalid_array(self): - # seed must be an unsigned 32 bit integer - assert_raises(TypeError, random.RandomState, [-0.5]) - assert_raises(ValueError, random.RandomState, [-1]) - assert_raises(ValueError, random.RandomState, [4294967296]) - assert_raises(ValueError, random.RandomState, [1, 2, 4294967296]) - assert_raises(ValueError, random.RandomState, [1, -2, 4294967296]) - - def test_invalid_array_shape(self): - # gh-9832 - assert_raises(ValueError, random.RandomState, np.array([], - dtype=np.int64)) - assert_raises(ValueError, random.RandomState, [[1, 2, 3]]) - assert_raises(ValueError, random.RandomState, [[1, 2, 3], - [4, 5, 6]]) - - def test_cannot_seed(self): - rs = random.RandomState(PCG64(0)) - with assert_raises(TypeError): - rs.seed(1234) - - def test_invalid_initialization(self): - assert_raises(ValueError, random.RandomState, MT19937) - - -class TestBinomial(object): - def test_n_zero(self): - # Tests the corner case of n == 0 for the binomial distribution. - # binomial(0, p) should be zero for any p in [0, 1]. - # This test addresses issue #3480. - zeros = np.zeros(2, dtype='int') - for p in [0, .5, 1]: - assert_(random.binomial(0, p) == 0) - assert_array_equal(random.binomial(zeros, p), zeros) - - def test_p_is_nan(self): - # Issue #4571. - assert_raises(ValueError, random.binomial, 1, np.nan) - - -class TestMultinomial(object): - def test_basic(self): - random.multinomial(100, [0.2, 0.8]) - - def test_zero_probability(self): - random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) - - def test_int_negative_interval(self): - assert_(-5 <= random.randint(-5, -1) < -1) - x = random.randint(-5, -1, 5) - assert_(np.all(-5 <= x)) - assert_(np.all(x < -1)) - - def test_size(self): - # gh-3173 - p = [0.5, 0.5] - assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) - assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) - assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, - (2, 2, 2)) - - assert_raises(TypeError, random.multinomial, 1, p, - float(1)) - - def test_invalid_prob(self): - assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) - assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) - - def test_invalid_n(self): - assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) - - def test_p_non_contiguous(self): - p = np.arange(15.) - p /= np.sum(p[1::3]) - pvals = p[1::3] - random.seed(1432985819) - non_contig = random.multinomial(100, pvals=pvals) - random.seed(1432985819) - contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) - assert_array_equal(non_contig, contig) - - -class TestSetState(object): - def setup(self): - self.seed = 1234567890 - self.random_state = random.RandomState(self.seed) - self.state = self.random_state.get_state() - - def test_basic(self): - old = self.random_state.tomaxint(16) - self.random_state.set_state(self.state) - new = self.random_state.tomaxint(16) - assert_(np.all(old == new)) - - def test_gaussian_reset(self): - # Make sure the cached every-other-Gaussian is reset. - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(self.state) - new = self.random_state.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_gaussian_reset_in_media_res(self): - # When the state is saved with a cached Gaussian, make sure the - # cached Gaussian is restored. - - self.random_state.standard_normal() - state = self.random_state.get_state() - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(state) - new = self.random_state.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_backwards_compatibility(self): - # Make sure we can accept old state tuples that do not have the - # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.random_state.standard_normal(size=16) - self.random_state.set_state(old_state) - x2 = self.random_state.standard_normal(size=16) - self.random_state.set_state(self.state) - x3 = self.random_state.standard_normal(size=16) - assert_(np.all(x1 == x2)) - assert_(np.all(x1 == x3)) - - def test_negative_binomial(self): - # Ensure that the negative binomial results take floating point - # arguments without truncation. - self.random_state.negative_binomial(0.5, 0.5) - - def test_get_state_warning(self): - rs = random.RandomState(PCG64()) - with suppress_warnings() as sup: - w = sup.record(RuntimeWarning) - state = rs.get_state() - assert_(len(w) == 1) - assert isinstance(state, dict) - assert state['bit_generator'] == 'PCG64' - - def test_invalid_legacy_state_setting(self): - state = self.random_state.get_state() - new_state = ('Unknown', ) + state[1:] - assert_raises(ValueError, self.random_state.set_state, new_state) - assert_raises(TypeError, self.random_state.set_state, - np.array(new_state, dtype=object)) - state = self.random_state.get_state(legacy=False) - del state['bit_generator'] - assert_raises(ValueError, self.random_state.set_state, state) - - def test_pickle(self): - self.random_state.seed(0) - self.random_state.random_sample(100) - self.random_state.standard_normal() - pickled = self.random_state.get_state(legacy=False) - assert_equal(pickled['has_gauss'], 1) - rs_unpick = pickle.loads(pickle.dumps(self.random_state)) - unpickled = rs_unpick.get_state(legacy=False) - assert_mt19937_state_equal(pickled, unpickled) - - def test_state_setting(self): - attr_state = self.random_state.__getstate__() - self.random_state.standard_normal() - self.random_state.__setstate__(attr_state) - state = self.random_state.get_state(legacy=False) - assert_mt19937_state_equal(attr_state, state) - - def test_repr(self): - assert repr(self.random_state).startswith('RandomState(MT19937)') - - -class TestRandint(object): - - rfunc = random.randint - - # valid integer/boolean types - itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, - np.int32, np.uint32, np.int64, np.uint64] - - def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) - - def test_bounds_checking(self): - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) - - def test_rng_zero_and_extremes(self): - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - - tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) - - tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) - - tgt = (lbnd + ubnd)//2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) - - def test_full_range(self): - # Test for ticket #1690 - - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - - try: - self.rfunc(lbnd, ubnd, dtype=dt) - except Exception as e: - raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) - - def test_in_bounds_fuzz(self): - # Don't use fixed seed - random.seed() - - for dt in self.itype[1:]: - for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) - assert_(vals.max() < ubnd) - assert_(vals.min() >= 2) - - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) - - assert_(vals.max() < 2) - assert_(vals.min() >= 0) - - def test_repeatability(self): - # We use a md5 hash of generated sequences of 1000 samples - # in the range [0, 6) for all but bool, where the range - # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0', - 'int16': '1b7741b80964bb190c50d541dca1cac1', - 'int32': '4dc9fcc2b395577ebb51793e58ed1a05', - 'int64': '17db902806f448331b5a758d7d2ee672', - 'int8': '27dd30c4e08a797063dffac2490b0be6', - 'uint16': '1b7741b80964bb190c50d541dca1cac1', - 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05', - 'uint64': '17db902806f448331b5a758d7d2ee672', - 'uint8': '27dd30c4e08a797063dffac2490b0be6'} - - for dt in self.itype[1:]: - random.seed(1234) - - # view as little endian for hash - if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) - else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() - - res = hashlib.md5(val.view(np.int8)).hexdigest() - assert_(tgt[np.dtype(dt).name] == res) - - # bools do not depend on endianness - random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) - res = hashlib.md5(val).hexdigest() - assert_(tgt[np.dtype(bool).name] == res) - - def test_int64_uint64_corner_case(self): - # When stored in Numpy arrays, `lbnd` is casted - # as np.int64, and `ubnd` is casted as np.uint64. - # Checking whether `lbnd` >= `ubnd` used to be - # done solely via direct comparison, which is incorrect - # because when Numpy tries to compare both numbers, - # it casts both to np.float64 because there is - # no integer superset of np.int64 and np.uint64. However, - # `ubnd` is too large to be represented in np.float64, - # causing it be round down to np.iinfo(np.int64).max, - # leading to a ValueError because `lbnd` now equals - # the new `ubnd`. - - dt = np.int64 - tgt = np.iinfo(np.int64).max - lbnd = np.int64(np.iinfo(np.int64).max) - ubnd = np.uint64(np.iinfo(np.int64).max + 1) - - # None of these function calls should - # generate a ValueError now. - actual = random.randint(lbnd, ubnd, dtype=dt) - assert_equal(actual, tgt) - - def test_respect_dtype_singleton(self): - # See gh-7203 - for dt in self.itype: - lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min - ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 - - sample = self.rfunc(lbnd, ubnd, dtype=dt) - assert_equal(sample.dtype, np.dtype(dt)) - - for dt in (bool, int, np.compat.long): - lbnd = 0 if dt is bool else np.iinfo(dt).min - ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 - - # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, dtype=dt) - assert_(not hasattr(sample, 'dtype')) - assert_equal(type(sample), dt) - - -class TestRandomDist(object): - # Make sure the random distribution returns the correct value for a - # given seed - - def setup(self): - self.seed = 1234567890 - - def test_rand(self): - random.seed(self.seed) - actual = random.rand(3, 2) - desired = np.array([[0.61879477158567997, 0.59162362775974664], - [0.88868358904449662, 0.89165480011560816], - [0.4575674820298663, 0.7781880808593471]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_rand_singleton(self): - random.seed(self.seed) - actual = random.rand() - desired = 0.61879477158567997 - assert_array_almost_equal(actual, desired, decimal=15) - - def test_randn(self): - random.seed(self.seed) - actual = random.randn(3, 2) - desired = np.array([[1.34016345771863121, 1.73759122771936081], - [1.498988344300628, -0.2286433324536169], - [2.031033998682787, 2.17032494605655257]]) - assert_array_almost_equal(actual, desired, decimal=15) - - random.seed(self.seed) - actual = random.randn() - assert_array_almost_equal(actual, desired[0, 0], decimal=15) - - def test_randint(self): - random.seed(self.seed) - actual = random.randint(-99, 99, size=(3, 2)) - desired = np.array([[31, 3], - [-52, 41], - [-48, -66]]) - assert_array_equal(actual, desired) - - def test_random_integers(self): - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) - desired = np.array([[31, 3], - [-52, 41], - [-48, -66]]) - assert_array_equal(actual, desired) - - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(198, size=(3, 2)) - assert_(len(w) == 1) - assert_array_equal(actual, desired + 100) - - def test_tomaxint(self): - random.seed(self.seed) - rs = random.RandomState(self.seed) - actual = rs.tomaxint(size=(3, 2)) - if np.iinfo(int).max == 2147483647: - desired = np.array([[1328851649, 731237375], - [1270502067, 320041495], - [1908433478, 499156889]], dtype=np.int64) - else: - desired = np.array([[5707374374421908479, 5456764827585442327], - [8196659375100692377, 8224063923314595285], - [4220315081820346526, 7177518203184491332]], - dtype=np.int64) - - assert_equal(actual, desired) - - rs.seed(self.seed) - actual = rs.tomaxint() - assert_equal(actual, desired[0, 0]) - - def test_random_integers_max_int(self): - # Tests whether random_integers can generate the - # maximum allowed Python int that can be converted - # into a C long. Previous implementations of this - # method have thrown an OverflowError when attempting - # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(np.iinfo('l').max, - np.iinfo('l').max) - assert_(len(w) == 1) - - desired = np.iinfo('l').max - assert_equal(actual, desired) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - typer = np.dtype('l').type - actual = random.random_integers(typer(np.iinfo('l').max), - typer(np.iinfo('l').max)) - assert_(len(w) == 1) - assert_equal(actual, desired) - - def test_random_integers_deprecated(self): - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - - # DeprecationWarning raised with high == None - assert_raises(DeprecationWarning, - random.random_integers, - np.iinfo('l').max) - - # DeprecationWarning raised with high != None - assert_raises(DeprecationWarning, - random.random_integers, - np.iinfo('l').max, np.iinfo('l').max) - - def test_random_sample(self): - random.seed(self.seed) - actual = random.random_sample((3, 2)) - desired = np.array([[0.61879477158567997, 0.59162362775974664], - [0.88868358904449662, 0.89165480011560816], - [0.4575674820298663, 0.7781880808593471]]) - assert_array_almost_equal(actual, desired, decimal=15) - - random.seed(self.seed) - actual = random.random_sample() - assert_array_almost_equal(actual, desired[0, 0], decimal=15) - - def test_choice_uniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4) - desired = np.array([2, 3, 2, 3]) - assert_array_equal(actual, desired) - - def test_choice_nonuniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) - desired = np.array([1, 1, 2, 2]) - assert_array_equal(actual, desired) - - def test_choice_uniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False) - desired = np.array([0, 1, 3]) - assert_array_equal(actual, desired) - - def test_choice_nonuniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) - desired = np.array([2, 3, 1]) - assert_array_equal(actual, desired) - - def test_choice_noninteger(self): - random.seed(self.seed) - actual = random.choice(['a', 'b', 'c', 'd'], 4) - desired = np.array(['c', 'd', 'c', 'd']) - assert_array_equal(actual, desired) - - def test_choice_exceptions(self): - sample = random.choice - assert_raises(ValueError, sample, -1, 3) - assert_raises(ValueError, sample, 3., 3) - assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) - assert_raises(ValueError, sample, [], 3) - assert_raises(ValueError, sample, [1, 2, 3, 4], 3, - p=[[0.25, 0.25], [0.25, 0.25]]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) - assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) - assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) - # gh-13087 - assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) - assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) - assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) - assert_raises(ValueError, sample, [1, 2, 3], 2, - replace=False, p=[1, 0, 0]) - - def test_choice_return_shape(self): - p = [0.1, 0.9] - # Check scalar - assert_(np.isscalar(random.choice(2, replace=True))) - assert_(np.isscalar(random.choice(2, replace=False))) - assert_(np.isscalar(random.choice(2, replace=True, p=p))) - assert_(np.isscalar(random.choice(2, replace=False, p=p))) - assert_(np.isscalar(random.choice([1, 2], replace=True))) - assert_(random.choice([None], replace=True) is None) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(random.choice(arr, replace=True) is a) - - # Check 0-d array - s = tuple() - assert_(not np.isscalar(random.choice(2, s, replace=True))) - assert_(not np.isscalar(random.choice(2, s, replace=False))) - assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) - assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) - assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) - assert_(random.choice([None], s, replace=True).ndim == 0) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(random.choice(arr, s, replace=True).item() is a) - - # Check multi dimensional array - s = (2, 3) - p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] - assert_equal(random.choice(6, s, replace=True).shape, s) - assert_equal(random.choice(6, s, replace=False).shape, s) - assert_equal(random.choice(6, s, replace=True, p=p).shape, s) - assert_equal(random.choice(6, s, replace=False, p=p).shape, s) - assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) - - # Check zero-size - assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) - assert_equal(random.randint(0, -10, size=0).shape, (0,)) - assert_equal(random.randint(10, 10, size=0).shape, (0,)) - assert_equal(random.choice(0, size=0).shape, (0,)) - assert_equal(random.choice([], size=(0,)).shape, (0,)) - assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, - (3, 0, 4)) - assert_raises(ValueError, random.choice, [], 10) - - def test_choice_nan_probabilities(self): - a = np.array([42, 1, 2]) - p = [None, None, None] - assert_raises(ValueError, random.choice, a, p=p) - - def test_choice_p_non_contiguous(self): - p = np.ones(10) / 5 - p[1::2] = 3.0 - random.seed(self.seed) - non_contig = random.choice(5, 3, p=p[::2]) - random.seed(self.seed) - contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) - assert_array_equal(non_contig, contig) - - def test_bytes(self): - random.seed(self.seed) - actual = random.bytes(10) - desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' - assert_equal(actual, desired) - - def test_shuffle(self): - # Test lists, arrays (of various dtypes), and multidimensional versions - # of both, c-contiguous or not: - for conv in [lambda x: np.array([]), - lambda x: x, - lambda x: np.asarray(x).astype(np.int8), - lambda x: np.asarray(x).astype(np.float32), - lambda x: np.asarray(x).astype(np.complex64), - lambda x: np.asarray(x).astype(object), - lambda x: [(i, i) for i in x], - lambda x: np.asarray([[i, i] for i in x]), - lambda x: np.vstack([x, x]).T, - # gh-11442 - lambda x: (np.asarray([(i, i) for i in x], - [("a", int), ("b", int)]) - .view(np.recarray)), - # gh-4270 - lambda x: np.asarray([(i, i) for i in x], - [("a", object, (1,)), - ("b", np.int32, (1,))])]: - random.seed(self.seed) - alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - random.shuffle(alist) - actual = alist - desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) - assert_array_equal(actual, desired) - - def test_shuffle_masked(self): - # gh-3263 - a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) - b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) - a_orig = a.copy() - b_orig = b.copy() - for i in range(50): - random.shuffle(a) - assert_equal( - sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) - random.shuffle(b) - assert_equal( - sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) - - def test_permutation(self): - random.seed(self.seed) - alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] - actual = random.permutation(alist) - desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] - assert_array_equal(actual, desired) - - random.seed(self.seed) - arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T - actual = random.permutation(arr_2d) - assert_array_equal(actual, np.atleast_2d(desired).T) - - random.seed(self.seed) - bad_x_str = "abcd" - assert_raises(IndexError, random.permutation, bad_x_str) - - random.seed(self.seed) - bad_x_float = 1.2 - assert_raises(IndexError, random.permutation, bad_x_float) - - integer_val = 10 - desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] - - random.seed(self.seed) - actual = random.permutation(integer_val) - assert_array_equal(actual, desired) - - def test_beta(self): - random.seed(self.seed) - actual = random.beta(.1, .9, size=(3, 2)) - desired = np.array( - [[1.45341850513746058e-02, 5.31297615662868145e-04], - [1.85366619058432324e-06, 4.19214516800110563e-03], - [1.58405155108498093e-04, 1.26252891949397652e-04]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_binomial(self): - random.seed(self.seed) - actual = random.binomial(100.123, .456, size=(3, 2)) - desired = np.array([[37, 43], - [42, 48], - [46, 45]]) - assert_array_equal(actual, desired) - - random.seed(self.seed) - actual = random.binomial(100.123, .456) - desired = 37 - assert_array_equal(actual, desired) - - def test_chisquare(self): - random.seed(self.seed) - actual = random.chisquare(50, size=(3, 2)) - desired = np.array([[63.87858175501090585, 68.68407748911370447], - [65.77116116901505904, 47.09686762438974483], - [72.3828403199695174, 74.18408615260374006]]) - assert_array_almost_equal(actual, desired, decimal=13) - - def test_dirichlet(self): - random.seed(self.seed) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha, size=(3, 2)) - desired = np.array([[[0.54539444573611562, 0.45460555426388438], - [0.62345816822039413, 0.37654183177960598]], - [[0.55206000085785778, 0.44793999914214233], - [0.58964023305154301, 0.41035976694845688]], - [[0.59266909280647828, 0.40733090719352177], - [0.56974431743975207, 0.43025568256024799]]]) - assert_array_almost_equal(actual, desired, decimal=15) - bad_alpha = np.array([5.4e-01, -1.0e-16]) - assert_raises(ValueError, random.dirichlet, bad_alpha) - - random.seed(self.seed) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha) - assert_array_almost_equal(actual, desired[0, 0], decimal=15) - - def test_dirichlet_size(self): - # gh-3173 - p = np.array([51.72840233779265162, 39.74494232180943953]) - assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) - assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) - assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) - - assert_raises(TypeError, random.dirichlet, p, float(1)) - - def test_dirichlet_bad_alpha(self): - # gh-2089 - alpha = np.array([5.4e-01, -1.0e-16]) - assert_raises(ValueError, random.dirichlet, alpha) - - def test_dirichlet_alpha_non_contiguous(self): - a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) - alpha = a[::2] - random.seed(self.seed) - non_contig = random.dirichlet(alpha, size=(3, 2)) - random.seed(self.seed) - contig = random.dirichlet(np.ascontiguousarray(alpha), - size=(3, 2)) - assert_array_almost_equal(non_contig, contig) - - def test_exponential(self): - random.seed(self.seed) - actual = random.exponential(1.1234, size=(3, 2)) - desired = np.array([[1.08342649775011624, 1.00607889924557314], - [2.46628830085216721, 2.49668106809923884], - [0.68717433461363442, 1.69175666993575979]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_exponential_0(self): - assert_equal(random.exponential(scale=0), 0) - assert_raises(ValueError, random.exponential, scale=-0.) - - def test_f(self): - random.seed(self.seed) - actual = random.f(12, 77, size=(3, 2)) - desired = np.array([[1.21975394418575878, 1.75135759791559775], - [1.44803115017146489, 1.22108959480396262], - [1.02176975757740629, 1.34431827623300415]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_gamma(self): - random.seed(self.seed) - actual = random.gamma(5, 3, size=(3, 2)) - desired = np.array([[24.60509188649287182, 28.54993563207210627], - [26.13476110204064184, 12.56988482927716078], - [31.71863275789960568, 33.30143302795922011]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_gamma_0(self): - assert_equal(random.gamma(shape=0, scale=0), 0) - assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) - - def test_geometric(self): - random.seed(self.seed) - actual = random.geometric(.123456789, size=(3, 2)) - desired = np.array([[8, 7], - [17, 17], - [5, 12]]) - assert_array_equal(actual, desired) - - def test_geometric_exceptions(self): - assert_raises(ValueError, random.geometric, 1.1) - assert_raises(ValueError, random.geometric, [1.1] * 10) - assert_raises(ValueError, random.geometric, -0.1) - assert_raises(ValueError, random.geometric, [-0.1] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - assert_raises(ValueError, random.geometric, np.nan) - assert_raises(ValueError, random.geometric, [np.nan] * 10) - - def test_gumbel(self): - random.seed(self.seed) - actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[0.19591898743416816, 0.34405539668096674], - [-1.4492522252274278, -1.47374816298446865], - [1.10651090478803416, -0.69535848626236174]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_gumbel_0(self): - assert_equal(random.gumbel(scale=0), 0) - assert_raises(ValueError, random.gumbel, scale=-0.) - - def test_hypergeometric(self): - random.seed(self.seed) - actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) - desired = np.array([[10, 10], - [10, 10], - [9, 9]]) - assert_array_equal(actual, desired) - - # Test nbad = 0 - actual = random.hypergeometric(5, 0, 3, size=4) - desired = np.array([3, 3, 3, 3]) - assert_array_equal(actual, desired) - - actual = random.hypergeometric(15, 0, 12, size=4) - desired = np.array([12, 12, 12, 12]) - assert_array_equal(actual, desired) - - # Test ngood = 0 - actual = random.hypergeometric(0, 5, 3, size=4) - desired = np.array([0, 0, 0, 0]) - assert_array_equal(actual, desired) - - actual = random.hypergeometric(0, 15, 12, size=4) - desired = np.array([0, 0, 0, 0]) - assert_array_equal(actual, desired) - - def test_laplace(self): - random.seed(self.seed) - actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[0.66599721112760157, 0.52829452552221945], - [3.12791959514407125, 3.18202813572992005], - [-0.05391065675859356, 1.74901336242837324]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_laplace_0(self): - assert_equal(random.laplace(scale=0), 0) - assert_raises(ValueError, random.laplace, scale=-0.) - - def test_logistic(self): - random.seed(self.seed) - actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[1.09232835305011444, 0.8648196662399954], - [4.27818590694950185, 4.33897006346929714], - [-0.21682183359214885, 2.63373365386060332]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_lognormal(self): - random.seed(self.seed) - actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) - desired = np.array([[16.50698631688883822, 36.54846706092654784], - [22.67886599981281748, 0.71617561058995771], - [65.72798501792723869, 86.84341601437161273]]) - assert_array_almost_equal(actual, desired, decimal=13) - - def test_lognormal_0(self): - assert_equal(random.lognormal(sigma=0), 1) - assert_raises(ValueError, random.lognormal, sigma=-0.) - - def test_logseries(self): - random.seed(self.seed) - actual = random.logseries(p=.923456789, size=(3, 2)) - desired = np.array([[2, 2], - [6, 17], - [3, 6]]) - assert_array_equal(actual, desired) - - def test_logseries_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - assert_raises(ValueError, random.logseries, np.nan) - assert_raises(ValueError, random.logseries, [np.nan] * 10) - - def test_multinomial(self): - random.seed(self.seed) - actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) - desired = np.array([[[4, 3, 5, 4, 2, 2], - [5, 2, 8, 2, 2, 1]], - [[3, 4, 3, 6, 0, 4], - [2, 1, 4, 3, 6, 4]], - [[4, 4, 2, 5, 2, 3], - [4, 3, 4, 2, 3, 4]]]) - assert_array_equal(actual, desired) - - def test_multivariate_normal(self): - random.seed(self.seed) - mean = (.123456789, 10) - cov = [[1, 0], [0, 1]] - size = (3, 2) - actual = random.multivariate_normal(mean, cov, size) - desired = np.array([[[1.463620246718631, 11.73759122771936], - [1.622445133300628, 9.771356667546383]], - [[2.154490787682787, 12.170324946056553], - [1.719909438201865, 9.230548443648306]], - [[0.689515026297799, 9.880729819607714], - [-0.023054015651998, 9.201096623542879]]]) - - assert_array_almost_equal(actual, desired, decimal=15) - - # Check for default size, was raising deprecation warning - actual = random.multivariate_normal(mean, cov) - desired = np.array([0.895289569463708, 9.17180864067987]) - assert_array_almost_equal(actual, desired, decimal=15) - - # Check that non positive-semidefinite covariance warns with - # RuntimeWarning - mean = [0, 0] - cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - - # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(random.multivariate_normal, mean, cov, - check_valid='ignore') - - # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, random.multivariate_normal, mean, cov, - check_valid='raise') - - cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 - - mu = np.zeros(2) - cov = np.eye(2) - assert_raises(ValueError, random.multivariate_normal, mean, cov, - check_valid='other') - assert_raises(ValueError, random.multivariate_normal, - np.zeros((2, 1, 1)), cov) - assert_raises(ValueError, random.multivariate_normal, - mu, np.empty((3, 2))) - assert_raises(ValueError, random.multivariate_normal, - mu, np.eye(3)) - - def test_negative_binomial(self): - random.seed(self.seed) - actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) - desired = np.array([[848, 841], - [892, 611], - [779, 647]]) - assert_array_equal(actual, desired) - - def test_negative_binomial_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - assert_raises(ValueError, random.negative_binomial, 100, np.nan) - assert_raises(ValueError, random.negative_binomial, 100, - [np.nan] * 10) - - def test_noncentral_chisquare(self): - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) - desired = np.array([[23.91905354498517511, 13.35324692733826346], - [31.22452661329736401, 16.60047399466177254], - [5.03461598262724586, 17.94973089023519464]]) - assert_array_almost_equal(actual, desired, decimal=14) - - actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) - desired = np.array([[1.47145377828516666, 0.15052899268012659], - [0.00943803056963588, 1.02647251615666169], - [0.332334982684171, 0.15451287602753125]]) - assert_array_almost_equal(actual, desired, decimal=14) - - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) - desired = np.array([[9.597154162763948, 11.725484450296079], - [10.413711048138335, 3.694475922923986], - [13.484222138963087, 14.377255424602957]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_noncentral_f(self): - random.seed(self.seed) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, - size=(3, 2)) - desired = np.array([[1.40598099674926669, 0.34207973179285761], - [3.57715069265772545, 7.92632662577829805], - [0.43741599463544162, 1.1774208752428319]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_noncentral_f_nan(self): - random.seed(self.seed) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) - assert np.isnan(actual) - - def test_normal(self): - random.seed(self.seed) - actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[2.80378370443726244, 3.59863924443872163], - [3.121433477601256, -0.33382987590723379], - [4.18552478636557357, 4.46410668111310471]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_normal_0(self): - assert_equal(random.normal(scale=0), 0) - assert_raises(ValueError, random.normal, scale=-0.) - - def test_pareto(self): - random.seed(self.seed) - actual = random.pareto(a=.123456789, size=(3, 2)) - desired = np.array( - [[2.46852460439034849e+03, 1.41286880810518346e+03], - [5.28287797029485181e+07, 6.57720981047328785e+07], - [1.40840323350391515e+02, 1.98390255135251704e+05]]) - # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this - # matrix differs by 24 nulps. Discussion: - # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html - # Consensus is that this is probably some gcc quirk that affects - # rounding but not in any important way, so we just use a looser - # tolerance on this test: - np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) - - def test_poisson(self): - random.seed(self.seed) - actual = random.poisson(lam=.123456789, size=(3, 2)) - desired = np.array([[0, 0], - [1, 0], - [0, 0]]) - assert_array_equal(actual, desired) - - def test_poisson_exceptions(self): - lambig = np.iinfo('l').max - lamneg = -1 - assert_raises(ValueError, random.poisson, lamneg) - assert_raises(ValueError, random.poisson, [lamneg] * 10) - assert_raises(ValueError, random.poisson, lambig) - assert_raises(ValueError, random.poisson, [lambig] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - assert_raises(ValueError, random.poisson, np.nan) - assert_raises(ValueError, random.poisson, [np.nan] * 10) - - def test_power(self): - random.seed(self.seed) - actual = random.power(a=.123456789, size=(3, 2)) - desired = np.array([[0.02048932883240791, 0.01424192241128213], - [0.38446073748535298, 0.39499689943484395], - [0.00177699707563439, 0.13115505880863756]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_rayleigh(self): - random.seed(self.seed) - actual = random.rayleigh(scale=10, size=(3, 2)) - desired = np.array([[13.8882496494248393, 13.383318339044731], - [20.95413364294492098, 21.08285015800712614], - [11.06066537006854311, 17.35468505778271009]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_rayleigh_0(self): - assert_equal(random.rayleigh(scale=0), 0) - assert_raises(ValueError, random.rayleigh, scale=-0.) - - def test_standard_cauchy(self): - random.seed(self.seed) - actual = random.standard_cauchy(size=(3, 2)) - desired = np.array([[0.77127660196445336, -6.55601161955910605], - [0.93582023391158309, -2.07479293013759447], - [-4.74601644297011926, 0.18338989290760804]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_exponential(self): - random.seed(self.seed) - actual = random.standard_exponential(size=(3, 2)) - desired = np.array([[0.96441739162374596, 0.89556604882105506], - [2.1953785836319808, 2.22243285392490542], - [0.6116915921431676, 1.50592546727413201]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_gamma(self): - random.seed(self.seed) - actual = random.standard_gamma(shape=3, size=(3, 2)) - desired = np.array([[5.50841531318455058, 6.62953470301903103], - [5.93988484943779227, 2.31044849402133989], - [7.54838614231317084, 8.012756093271868]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_standard_gamma_0(self): - assert_equal(random.standard_gamma(shape=0), 0) - assert_raises(ValueError, random.standard_gamma, shape=-0.) - - def test_standard_normal(self): - random.seed(self.seed) - actual = random.standard_normal(size=(3, 2)) - desired = np.array([[1.34016345771863121, 1.73759122771936081], - [1.498988344300628, -0.2286433324536169], - [2.031033998682787, 2.17032494605655257]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_randn_singleton(self): - random.seed(self.seed) - actual = random.randn() - desired = np.array(1.34016345771863121) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_t(self): - random.seed(self.seed) - actual = random.standard_t(df=10, size=(3, 2)) - desired = np.array([[0.97140611862659965, -0.08830486548450577], - [1.36311143689505321, -0.55317463909867071], - [-0.18473749069684214, 0.61181537341755321]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_triangular(self): - random.seed(self.seed) - actual = random.triangular(left=5.12, mode=10.23, right=20.34, - size=(3, 2)) - desired = np.array([[12.68117178949215784, 12.4129206149193152], - [16.20131377335158263, 16.25692138747600524], - [11.20400690911820263, 14.4978144835829923]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_uniform(self): - random.seed(self.seed) - actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) - desired = np.array([[6.99097932346268003, 6.73801597444323974], - [9.50364421400426274, 9.53130618907631089], - [5.48995325769805476, 8.47493103280052118]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_uniform_range_bounds(self): - fmin = np.finfo('float').min - fmax = np.finfo('float').max - - func = random.uniform - assert_raises(OverflowError, func, -np.inf, 0) - assert_raises(OverflowError, func, 0, np.inf) - assert_raises(OverflowError, func, fmin, fmax) - assert_raises(OverflowError, func, [-np.inf], [0]) - assert_raises(OverflowError, func, [0], [np.inf]) - - # (fmax / 1e17) - fmin is within range, so this should not throw - # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > - # DBL_MAX by increasing fmin a bit - random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) - - def test_scalar_exception_propagation(self): - # Tests that exceptions are correctly propagated in distributions - # when called with objects that throw exceptions when converted to - # scalars. - # - # Regression test for gh: 8865 - - class ThrowingFloat(np.ndarray): - def __float__(self): - raise TypeError - - throwing_float = np.array(1.0).view(ThrowingFloat) - assert_raises(TypeError, random.uniform, throwing_float, - throwing_float) - - class ThrowingInteger(np.ndarray): - def __int__(self): - raise TypeError - - throwing_int = np.array(1).view(ThrowingInteger) - assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) - - def test_vonmises(self): - random.seed(self.seed) - actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) - desired = np.array([[2.28567572673902042, 2.89163838442285037], - [0.38198375564286025, 2.57638023113890746], - [1.19153771588353052, 1.83509849681825354]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_vonmises_small(self): - # check infinite loop, gh-4720 - random.seed(self.seed) - r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) - assert_(np.isfinite(r).all()) - - def test_vonmises_nan(self): - random.seed(self.seed) - r = random.vonmises(mu=0., kappa=np.nan) - assert_(np.isnan(r)) - - def test_wald(self): - random.seed(self.seed) - actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) - desired = np.array([[3.82935265715889983, 5.13125249184285526], - [0.35045403618358717, 1.50832396872003538], - [0.24124319895843183, 0.22031101461955038]]) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_weibull(self): - random.seed(self.seed) - actual = random.weibull(a=1.23, size=(3, 2)) - desired = np.array([[0.97097342648766727, 0.91422896443565516], - [1.89517770034962929, 1.91414357960479564], - [0.67057783752390987, 1.39494046635066793]]) - assert_array_almost_equal(actual, desired, decimal=15) - - def test_weibull_0(self): - random.seed(self.seed) - assert_equal(random.weibull(a=0, size=12), np.zeros(12)) - assert_raises(ValueError, random.weibull, a=-0.) - - def test_zipf(self): - random.seed(self.seed) - actual = random.zipf(a=1.23, size=(3, 2)) - desired = np.array([[66, 29], - [1, 1], - [3, 13]]) - assert_array_equal(actual, desired) - - -class TestBroadcast(object): - # tests that functions that broadcast behave - # correctly when presented with non-scalar arguments - def setup(self): - self.seed = 123456789 - - def set_seed(self): - random.seed(self.seed) - - def test_uniform(self): - low = [0] - high = [1] - uniform = random.uniform - desired = np.array([0.53283302478975902, - 0.53413660089041659, - 0.50955303552646702]) - - self.set_seed() - actual = uniform(low * 3, high) - assert_array_almost_equal(actual, desired, decimal=14) - - self.set_seed() - actual = uniform(low, high * 3) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_normal(self): - loc = [0] - scale = [1] - bad_scale = [-1] - normal = random.normal - desired = np.array([2.2129019979039612, - 2.1283977976520019, - 1.8417114045748335]) - - self.set_seed() - actual = normal(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) - - self.set_seed() - actual = normal(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) - - def test_beta(self): - a = [1] - b = [2] - bad_a = [-1] - bad_b = [-2] - beta = random.beta - desired = np.array([0.19843558305989056, - 0.075230336409423643, - 0.24976865978980844]) - - self.set_seed() - actual = beta(a * 3, b) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) - - self.set_seed() - actual = beta(a, b * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) - - def test_exponential(self): - scale = [1] - bad_scale = [-1] - exponential = random.exponential - desired = np.array([0.76106853658845242, - 0.76386282278691653, - 0.71243813125891797]) - - self.set_seed() - actual = exponential(scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) - - def test_standard_gamma(self): - shape = [1] - bad_shape = [-1] - std_gamma = random.standard_gamma - desired = np.array([0.76106853658845242, - 0.76386282278691653, - 0.71243813125891797]) - - self.set_seed() - actual = std_gamma(shape * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) - - def test_gamma(self): - shape = [1] - scale = [2] - bad_shape = [-1] - bad_scale = [-2] - gamma = random.gamma - desired = np.array([1.5221370731769048, - 1.5277256455738331, - 1.4248762625178359]) - - self.set_seed() - actual = gamma(shape * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) - - self.set_seed() - actual = gamma(shape, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) - - def test_f(self): - dfnum = [1] - dfden = [2] - bad_dfnum = [-1] - bad_dfden = [-2] - f = random.f - desired = np.array([0.80038951638264799, - 0.86768719635363512, - 2.7251095168386801]) - - self.set_seed() - actual = f(dfnum * 3, dfden) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) - - self.set_seed() - actual = f(dfnum, dfden * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) - - def test_noncentral_f(self): - dfnum = [2] - dfden = [3] - nonc = [4] - bad_dfnum = [0] - bad_dfden = [-1] - bad_nonc = [-2] - nonc_f = random.noncentral_f - desired = np.array([9.1393943263705211, - 13.025456344595602, - 8.8018098359100545]) - - self.set_seed() - actual = nonc_f(dfnum * 3, dfden, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) - - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) - - self.set_seed() - actual = nonc_f(dfnum, dfden * 3, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) - - self.set_seed() - actual = nonc_f(dfnum, dfden, nonc * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) - - def test_noncentral_f_small_df(self): - self.set_seed() - desired = np.array([6.869638627492048, 0.785880199263955]) - actual = random.noncentral_f(0.9, 0.9, 2, size=2) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_chisquare(self): - df = [1] - bad_df = [-1] - chisquare = random.chisquare - desired = np.array([0.57022801133088286, - 0.51947702108840776, - 0.1320969254923558]) - - self.set_seed() - actual = chisquare(df * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) - - def test_noncentral_chisquare(self): - df = [1] - nonc = [2] - bad_df = [-1] - bad_nonc = [-2] - nonc_chi = random.noncentral_chisquare - desired = np.array([9.0015599467913763, - 4.5804135049718742, - 6.0872302432834564]) - - self.set_seed() - actual = nonc_chi(df * 3, nonc) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) - - self.set_seed() - actual = nonc_chi(df, nonc * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) - - def test_standard_t(self): - df = [1] - bad_df = [-1] - t = random.standard_t - desired = np.array([3.0702872575217643, - 5.8560725167361607, - 1.0274791436474273]) - - self.set_seed() - actual = t(df * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) - assert_raises(ValueError, random.standard_t, bad_df * 3) - - def test_vonmises(self): - mu = [2] - kappa = [1] - bad_kappa = [-1] - vonmises = random.vonmises - desired = np.array([2.9883443664201312, - -2.7064099483995943, - -1.8672476700665914]) - - self.set_seed() - actual = vonmises(mu * 3, kappa) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) - - self.set_seed() - actual = vonmises(mu, kappa * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) - - def test_pareto(self): - a = [1] - bad_a = [-1] - pareto = random.pareto - desired = np.array([1.1405622680198362, - 1.1465519762044529, - 1.0389564467453547]) - - self.set_seed() - actual = pareto(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) - assert_raises(ValueError, random.pareto, bad_a * 3) - - def test_weibull(self): - a = [1] - bad_a = [-1] - weibull = random.weibull - desired = np.array([0.76106853658845242, - 0.76386282278691653, - 0.71243813125891797]) - - self.set_seed() - actual = weibull(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) - assert_raises(ValueError, random.weibull, bad_a * 3) - - def test_power(self): - a = [1] - bad_a = [-1] - power = random.power - desired = np.array([0.53283302478975902, - 0.53413660089041659, - 0.50955303552646702]) - - self.set_seed() - actual = power(a * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) - assert_raises(ValueError, random.power, bad_a * 3) - - def test_laplace(self): - loc = [0] - scale = [1] - bad_scale = [-1] - laplace = random.laplace - desired = np.array([0.067921356028507157, - 0.070715642226971326, - 0.019290950698972624]) - - self.set_seed() - actual = laplace(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) - - self.set_seed() - actual = laplace(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) - - def test_gumbel(self): - loc = [0] - scale = [1] - bad_scale = [-1] - gumbel = random.gumbel - desired = np.array([0.2730318639556768, - 0.26936705726291116, - 0.33906220393037939]) - - self.set_seed() - actual = gumbel(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) - - self.set_seed() - actual = gumbel(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) - - def test_logistic(self): - loc = [0] - scale = [1] - bad_scale = [-1] - logistic = random.logistic - desired = np.array([0.13152135837586171, - 0.13675915696285773, - 0.038216792802833396]) - - self.set_seed() - actual = logistic(loc * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) - - self.set_seed() - actual = logistic(loc, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) - assert_equal(random.logistic(1.0, 0.0), 1.0) - - def test_lognormal(self): - mean = [0] - sigma = [1] - bad_sigma = [-1] - lognormal = random.lognormal - desired = np.array([9.1422086044848427, - 8.4013952870126261, - 6.3073234116578671]) - - self.set_seed() - actual = lognormal(mean * 3, sigma) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) - assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) - - self.set_seed() - actual = lognormal(mean, sigma * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) - assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) - - def test_rayleigh(self): - scale = [1] - bad_scale = [-1] - rayleigh = random.rayleigh - desired = np.array([1.2337491937897689, - 1.2360119924878694, - 1.1936818095781789]) - - self.set_seed() - actual = rayleigh(scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) - - def test_wald(self): - mean = [0.5] - scale = [1] - bad_mean = [0] - bad_scale = [-2] - wald = random.wald - desired = np.array([0.11873681120271318, - 0.12450084820795027, - 0.9096122728408238]) - - self.set_seed() - actual = wald(mean * 3, scale) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) - assert_raises(ValueError, random.wald, bad_mean * 3, scale) - assert_raises(ValueError, random.wald, mean * 3, bad_scale) - - self.set_seed() - actual = wald(mean, scale * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) - - def test_triangular(self): - left = [1] - right = [3] - mode = [2] - bad_left_one = [3] - bad_mode_one = [4] - bad_left_two, bad_mode_two = right * 2 - triangular = random.triangular - desired = np.array([2.03339048710429, - 2.0347400359389356, - 2.0095991069536208]) - - self.set_seed() - actual = triangular(left * 3, mode, right) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, - right) - - self.set_seed() - actual = triangular(left, mode * 3, right) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, - right) - - self.set_seed() - actual = triangular(left, mode, right * 3) - assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, - right * 3) - - assert_raises(ValueError, triangular, 10., 0., 20.) - assert_raises(ValueError, triangular, 10., 25., 20.) - assert_raises(ValueError, triangular, 10., 10., 10.) - - def test_binomial(self): - n = [1] - p = [0.5] - bad_n = [-1] - bad_p_one = [-1] - bad_p_two = [1.5] - binom = random.binomial - desired = np.array([1, 1, 1]) - - self.set_seed() - actual = binom(n * 3, p) - assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) - - self.set_seed() - actual = binom(n, p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) - - def test_negative_binomial(self): - n = [1] - p = [0.5] - bad_n = [-1] - bad_p_one = [-1] - bad_p_two = [1.5] - neg_binom = random.negative_binomial - desired = np.array([1, 0, 1]) - - self.set_seed() - actual = neg_binom(n * 3, p) - assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) - - self.set_seed() - actual = neg_binom(n, p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) - - def test_poisson(self): - max_lam = random.RandomState()._poisson_lam_max - - lam = [1] - bad_lam_one = [-1] - bad_lam_two = [max_lam * 2] - poisson = random.poisson - desired = np.array([1, 1, 0]) - - self.set_seed() - actual = poisson(lam * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) - - def test_zipf(self): - a = [2] - bad_a = [0] - zipf = random.zipf - desired = np.array([2, 2, 1]) - - self.set_seed() - actual = zipf(a * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) - with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) - - def test_geometric(self): - p = [0.5] - bad_p_one = [-1] - bad_p_two = [1.5] - geom = random.geometric - desired = np.array([2, 2, 2]) - - self.set_seed() - actual = geom(p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) - - def test_hypergeometric(self): - ngood = [1] - nbad = [2] - nsample = [2] - bad_ngood = [-1] - bad_nbad = [-2] - bad_nsample_one = [0] - bad_nsample_two = [4] - hypergeom = random.hypergeometric - desired = np.array([1, 1, 1]) - - self.set_seed() - actual = hypergeom(ngood * 3, nbad, nsample) - assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) - - self.set_seed() - actual = hypergeom(ngood, nbad * 3, nsample) - assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) - - self.set_seed() - actual = hypergeom(ngood, nbad, nsample * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) - - assert_raises(ValueError, hypergeom, -1, 10, 20) - assert_raises(ValueError, hypergeom, 10, -1, 20) - assert_raises(ValueError, hypergeom, 10, 10, 0) - assert_raises(ValueError, hypergeom, 10, 10, 25) - - def test_logseries(self): - p = [0.5] - bad_p_one = [2] - bad_p_two = [-1] - logseries = random.logseries - desired = np.array([1, 1, 1]) - - self.set_seed() - actual = logseries(p * 3) - assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) - - -class TestThread(object): - # make sure each state produces the same sequence even in threads - def setup(self): - self.seeds = range(4) - - def check_function(self, function, sz): - from threading import Thread - - out1 = np.empty((len(self.seeds),) + sz) - out2 = np.empty((len(self.seeds),) + sz) - - # threaded generation - t = [Thread(target=function, args=(random.RandomState(s), o)) - for s, o in zip(self.seeds, out1)] - [x.start() for x in t] - [x.join() for x in t] - - # the same serial - for s, o in zip(self.seeds, out2): - function(random.RandomState(s), o) - - # these platforms change x87 fpu precision mode in threads - if np.intp().dtype.itemsize == 4 and sys.platform == "win32": - assert_array_almost_equal(out1, out2) - else: - assert_array_equal(out1, out2) - - def test_normal(self): - def gen_random(state, out): - out[...] = state.normal(size=10000) - - self.check_function(gen_random, sz=(10000,)) - - def test_exp(self): - def gen_random(state, out): - out[...] = state.exponential(scale=np.ones((100, 1000))) - - self.check_function(gen_random, sz=(100, 1000)) - - def test_multinomial(self): - def gen_random(state, out): - out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) - - self.check_function(gen_random, sz=(10000, 6)) - - -# See Issue #4263 -class TestSingleEltArrayInput(object): - def setup(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) - - def test_one_arg_funcs(self): - funcs = (random.exponential, random.standard_gamma, - random.chisquare, random.standard_t, - random.pareto, random.weibull, - random.power, random.rayleigh, - random.poisson, random.zipf, - random.geometric, random.logseries) - - probfuncs = (random.geometric, random.logseries) - - for func in funcs: - if func in probfuncs: # p < 1.0 - out = func(np.array([0.5])) - - else: - out = func(self.argOne) - - assert_equal(out.shape, self.tgtShape) - - def test_two_arg_funcs(self): - funcs = (random.uniform, random.normal, - random.beta, random.gamma, - random.f, random.noncentral_chisquare, - random.vonmises, random.laplace, - random.gumbel, random.logistic, - random.lognormal, random.wald, - random.binomial, random.negative_binomial) - - probfuncs = (random.binomial, random.negative_binomial) - - for func in funcs: - if func in probfuncs: # p <= 1 - argTwo = np.array([0.5]) - - else: - argTwo = self.argTwo - - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) - - def test_three_arg_funcs(self): - funcs = [random.noncentral_f, random.triangular, - random.hypergeometric] - - for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) - - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) - - -# Ensure returned array dtype is correct for platform -def test_integer_dtype(int_func): - random.seed(123456789) - fname, args, md5 = int_func - f = getattr(random, fname) - actual = f(*args, size=2) - assert_(actual.dtype == np.dtype('l')) - - -def test_integer_repeat(int_func): - random.seed(123456789) - fname, args, md5 = int_func - f = getattr(random, fname) - val = f(*args, size=1000000) - if sys.byteorder != 'little': - val = val.byteswap() - res = hashlib.md5(val.view(np.int8)).hexdigest() - assert_(res == md5) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_randomstate_regression.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_randomstate_regression.py deleted file mode 100644 index bdc2214..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_randomstate_regression.py +++ /dev/null @@ -1,210 +0,0 @@ -import sys - -import pytest - -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) -from numpy.compat import long -import numpy as np - -from numpy import random - - -class TestRegression(object): - - def test_VonMises_range(self): - # Make sure generated random variables are in [-pi, pi]. - # Regression test for ticket #986. - for mu in np.linspace(-7., 7., 5): - r = random.vonmises(mu, 1, 50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - - def test_hypergeometric_range(self): - # Test for ticket #921 - assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0)) - - # Test for ticket #5623 - args = [ - (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems - ] - is_64bits = sys.maxsize > 2**32 - if is_64bits and sys.platform != 'win32': - # Check for 64-bit systems - args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) - for arg in args: - assert_(random.hypergeometric(*arg) > 0) - - def test_logseries_convergence(self): - # Test for ticket #923 - N = 1000 - random.seed(0) - rvsn = random.logseries(0.8, size=N) - # these two frequency counts should be close to theoretical - # numbers with this large sample - # theoretical large N result is 0.49706795 - freq = np.sum(rvsn == 1) / float(N) - msg = "Frequency was %f, should be > 0.45" % freq - assert_(freq > 0.45, msg) - # theoretical large N result is 0.19882718 - freq = np.sum(rvsn == 2) / float(N) - msg = "Frequency was %f, should be < 0.23" % freq - assert_(freq < 0.23, msg) - - def test_permutation_longs(self): - random.seed(1234) - a = random.permutation(12) - random.seed(1234) - b = random.permutation(long(12)) - assert_array_equal(a, b) - - def test_shuffle_mixed_dimension(self): - # Test for trac ticket #2074 - for t in [[1, 2, 3, None], - [(1, 1), (2, 2), (3, 3), None], - [1, (2, 2), (3, 3), None], - [(1, 1), 2, 3, None]]: - random.seed(12345) - shuffled = list(t) - random.shuffle(shuffled) - assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) - - def test_call_within_randomstate(self): - # Check that custom RandomState does not call into global state - m = random.RandomState() - res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) - for i in range(3): - random.seed(i) - m.seed(4321) - # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) - - def test_multivariate_normal_size_types(self): - # Test for multivariate_normal issue with 'size' argument. - # Check that the multivariate_normal size argument can be a - # numpy integer. - random.multivariate_normal([0], [[0]], size=1) - random.multivariate_normal([0], [[0]], size=np.int_(1)) - random.multivariate_normal([0], [[0]], size=np.int64(1)) - - def test_beta_small_parameters(self): - # Test that beta with small a and b parameters does not produce - # NaNs due to roundoff errors causing 0 / 0, gh-5851 - random.seed(1234567890) - x = random.beta(0.0001, 0.0001, size=100) - assert_(not np.any(np.isnan(x)), 'Nans in random.beta') - - def test_choice_sum_of_probs_tolerance(self): - # The sum of probs should be 1.0 with some tolerance. - # For low precision dtypes the tolerance was too tight. - # See numpy github issue 6123. - random.seed(1234) - a = [1, 2, 3] - counts = [4, 4, 2] - for dt in np.float16, np.float32, np.float64: - probs = np.array(counts, dtype=dt) / sum(counts) - c = random.choice(a, p=probs) - assert_(c in a) - assert_raises(ValueError, random.choice, a, p=probs*0.9) - - def test_shuffle_of_array_of_different_length_strings(self): - # Test that permuting an array of different length strings - # will not cause a segfault on garbage collection - # Tests gh-7710 - random.seed(1234) - - a = np.array(['a', 'a' * 1000]) - - for _ in range(100): - random.shuffle(a) - - # Force Garbage Collection - should not segfault. - import gc - gc.collect() - - def test_shuffle_of_array_of_objects(self): - # Test that permuting an array of objects will not cause - # a segfault on garbage collection. - # See gh-7719 - random.seed(1234) - a = np.array([np.arange(1), np.arange(4)]) - - for _ in range(1000): - random.shuffle(a) - - # Force Garbage Collection - should not segfault. - import gc - gc.collect() - - def test_permutation_subclass(self): - class N(np.ndarray): - pass - - random.seed(1) - orig = np.arange(3).view(N) - perm = random.permutation(orig) - assert_array_equal(perm, np.array([0, 2, 1])) - assert_array_equal(orig, np.arange(3).view(N)) - - class M(object): - a = np.arange(5) - - def __array__(self): - return self.a - - random.seed(1) - m = M() - perm = random.permutation(m) - assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) - assert_array_equal(m.__array__(), np.arange(5)) - - def test_warns_byteorder(self): - # GH 13159 - other_byteord_dt = 'i4' - with pytest.deprecated_call(match='non-native byteorder is not'): - random.randint(0, 200, size=10, dtype=other_byteord_dt) - - def test_named_argument_initialization(self): - # GH 13669 - rs1 = np.random.RandomState(123456789) - rs2 = np.random.RandomState(seed=123456789) - assert rs1.randint(0, 100) == rs2.randint(0, 100) - - def test_choice_retun_dtype(self): - # GH 9867 - c = np.random.choice(10, p=[.1]*10, size=2) - assert c.dtype == np.dtype(int) - c = np.random.choice(10, p=[.1]*10, replace=False, size=2) - assert c.dtype == np.dtype(int) - c = np.random.choice(10, size=2) - assert c.dtype == np.dtype(int) - c = np.random.choice(10, replace=False, size=2) - assert c.dtype == np.dtype(int) - - @pytest.mark.skipif(np.iinfo('l').max < 2**32, - reason='Cannot test with 32-bit C long') - def test_randint_117(self): - # GH 14189 - random.seed(0) - expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, - 2588848963, 3684848379, 2340255427, 3638918503, - 1819583497, 2678185683], dtype='int64') - actual = random.randint(2**32, size=10) - assert_array_equal(actual, expected) - - def test_p_zero_stream(self): - # Regression test for gh-14522. Ensure that future versions - # generate the same variates as version 1.16. - np.random.seed(12345) - assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), - [0, 0, 0, 1, 1]) - - def test_n_zero_stream(self): - # Regression test for gh-14522. Ensure that future versions - # generate the same variates as version 1.16. - np.random.seed(8675309) - expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) - assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), - expected) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_regression.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_regression.py deleted file mode 100644 index 509e2d5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_regression.py +++ /dev/null @@ -1,158 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) -from numpy import random -from numpy.compat import long -import numpy as np - - -class TestRegression(object): - - def test_VonMises_range(self): - # Make sure generated random variables are in [-pi, pi]. - # Regression test for ticket #986. - for mu in np.linspace(-7., 7., 5): - r = random.mtrand.vonmises(mu, 1, 50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - - def test_hypergeometric_range(self): - # Test for ticket #921 - assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) - - # Test for ticket #5623 - args = [ - (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems - ] - is_64bits = sys.maxsize > 2**32 - if is_64bits and sys.platform != 'win32': - # Check for 64-bit systems - args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) - for arg in args: - assert_(np.random.hypergeometric(*arg) > 0) - - def test_logseries_convergence(self): - # Test for ticket #923 - N = 1000 - np.random.seed(0) - rvsn = np.random.logseries(0.8, size=N) - # these two frequency counts should be close to theoretical - # numbers with this large sample - # theoretical large N result is 0.49706795 - freq = np.sum(rvsn == 1) / float(N) - msg = "Frequency was %f, should be > 0.45" % freq - assert_(freq > 0.45, msg) - # theoretical large N result is 0.19882718 - freq = np.sum(rvsn == 2) / float(N) - msg = "Frequency was %f, should be < 0.23" % freq - assert_(freq < 0.23, msg) - - def test_permutation_longs(self): - np.random.seed(1234) - a = np.random.permutation(12) - np.random.seed(1234) - b = np.random.permutation(long(12)) - assert_array_equal(a, b) - - def test_shuffle_mixed_dimension(self): - # Test for trac ticket #2074 - for t in [[1, 2, 3, None], - [(1, 1), (2, 2), (3, 3), None], - [1, (2, 2), (3, 3), None], - [(1, 1), 2, 3, None]]: - np.random.seed(12345) - shuffled = list(t) - random.shuffle(shuffled) - assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) - - def test_call_within_randomstate(self): - # Check that custom RandomState does not call into global state - m = np.random.RandomState() - res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) - for i in range(3): - np.random.seed(i) - m.seed(4321) - # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) - - def test_multivariate_normal_size_types(self): - # Test for multivariate_normal issue with 'size' argument. - # Check that the multivariate_normal size argument can be a - # numpy integer. - np.random.multivariate_normal([0], [[0]], size=1) - np.random.multivariate_normal([0], [[0]], size=np.int_(1)) - np.random.multivariate_normal([0], [[0]], size=np.int64(1)) - - def test_beta_small_parameters(self): - # Test that beta with small a and b parameters does not produce - # NaNs due to roundoff errors causing 0 / 0, gh-5851 - np.random.seed(1234567890) - x = np.random.beta(0.0001, 0.0001, size=100) - assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta') - - def test_choice_sum_of_probs_tolerance(self): - # The sum of probs should be 1.0 with some tolerance. - # For low precision dtypes the tolerance was too tight. - # See numpy github issue 6123. - np.random.seed(1234) - a = [1, 2, 3] - counts = [4, 4, 2] - for dt in np.float16, np.float32, np.float64: - probs = np.array(counts, dtype=dt) / sum(counts) - c = np.random.choice(a, p=probs) - assert_(c in a) - assert_raises(ValueError, np.random.choice, a, p=probs*0.9) - - def test_shuffle_of_array_of_different_length_strings(self): - # Test that permuting an array of different length strings - # will not cause a segfault on garbage collection - # Tests gh-7710 - np.random.seed(1234) - - a = np.array(['a', 'a' * 1000]) - - for _ in range(100): - np.random.shuffle(a) - - # Force Garbage Collection - should not segfault. - import gc - gc.collect() - - def test_shuffle_of_array_of_objects(self): - # Test that permuting an array of objects will not cause - # a segfault on garbage collection. - # See gh-7719 - np.random.seed(1234) - a = np.array([np.arange(1), np.arange(4)]) - - for _ in range(1000): - np.random.shuffle(a) - - # Force Garbage Collection - should not segfault. - import gc - gc.collect() - - def test_permutation_subclass(self): - class N(np.ndarray): - pass - - np.random.seed(1) - orig = np.arange(3).view(N) - perm = np.random.permutation(orig) - assert_array_equal(perm, np.array([0, 2, 1])) - assert_array_equal(orig, np.arange(3).view(N)) - - class M(object): - a = np.arange(5) - - def __array__(self): - return self.a - - np.random.seed(1) - m = M() - perm = np.random.permutation(m) - assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) - assert_array_equal(m.__array__(), np.arange(5)) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_seed_sequence.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_seed_sequence.py deleted file mode 100644 index fe23680..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_seed_sequence.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy as np -from numpy.testing import assert_array_equal - -from numpy.random import SeedSequence - - -def test_reference_data(): - """ Check that SeedSequence generates data the same as the C++ reference. - - https://gist.github.com/imneme/540829265469e673d045 - """ - inputs = [ - [3735928559, 195939070, 229505742, 305419896], - [3668361503, 4165561550, 1661411377, 3634257570], - [164546577, 4166754639, 1765190214, 1303880213], - [446610472, 3941463886, 522937693, 1882353782], - [1864922766, 1719732118, 3882010307, 1776744564], - [4141682960, 3310988675, 553637289, 902896340], - [1134851934, 2352871630, 3699409824, 2648159817], - [1240956131, 3107113773, 1283198141, 1924506131], - [2669565031, 579818610, 3042504477, 2774880435], - [2766103236, 2883057919, 4029656435, 862374500], - ] - outputs = [ - [3914649087, 576849849, 3593928901, 2229911004], - [2240804226, 3691353228, 1365957195, 2654016646], - [3562296087, 3191708229, 1147942216, 3726991905], - [1403443605, 3591372999, 1291086759, 441919183], - [1086200464, 2191331643, 560336446, 3658716651], - [3249937430, 2346751812, 847844327, 2996632307], - [2584285912, 4034195531, 3523502488, 169742686], - [959045797, 3875435559, 1886309314, 359682705], - [3978441347, 432478529, 3223635119, 138903045], - [296367413, 4262059219, 13109864, 3283683422], - ] - outputs64 = [ - [2477551240072187391, 9577394838764454085], - [15854241394484835714, 11398914698975566411], - [13708282465491374871, 16007308345579681096], - [15424829579845884309, 1898028439751125927], - [9411697742461147792, 15714068361935982142], - [10079222287618677782, 12870437757549876199], - [17326737873898640088, 729039288628699544], - [16644868984619524261, 1544825456798124994], - [1857481142255628931, 596584038813451439], - [18305404959516669237, 14103312907920476776], - ] - for seed, expected, expected64 in zip(inputs, outputs, outputs64): - expected = np.array(expected, dtype=np.uint32) - ss = SeedSequence(seed) - state = ss.generate_state(len(expected)) - assert_array_equal(state, expected) - state64 = ss.generate_state(len(expected64), dtype=np.uint64) - assert_array_equal(state64, expected64) diff --git a/venv/lib/python3.7/site-packages/numpy/random/tests/test_smoke.py b/venv/lib/python3.7/site-packages/numpy/random/tests/test_smoke.py deleted file mode 100644 index 58ef6a0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/random/tests/test_smoke.py +++ /dev/null @@ -1,808 +0,0 @@ -import pickle -import time -from functools import partial - -import numpy as np -import pytest -from numpy.testing import assert_equal, assert_, assert_array_equal -from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64) - -@pytest.fixture(scope='module', - params=(np.bool_, np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64)) -def dtype(request): - return request.param - - -def params_0(f): - val = f() - assert_(np.isscalar(val)) - val = f(10) - assert_(val.shape == (10,)) - val = f((10, 10)) - assert_(val.shape == (10, 10)) - val = f((10, 10, 10)) - assert_(val.shape == (10, 10, 10)) - val = f(size=(5, 5)) - assert_(val.shape == (5, 5)) - - -def params_1(f, bounded=False): - a = 5.0 - b = np.arange(2.0, 12.0) - c = np.arange(2.0, 102.0).reshape((10, 10)) - d = np.arange(2.0, 1002.0).reshape((10, 10, 10)) - e = np.array([2.0, 3.0]) - g = np.arange(2.0, 12.0).reshape((1, 10, 1)) - if bounded: - a = 0.5 - b = b / (1.5 * b.max()) - c = c / (1.5 * c.max()) - d = d / (1.5 * d.max()) - e = e / (1.5 * e.max()) - g = g / (1.5 * g.max()) - - # Scalar - f(a) - # Scalar - size - f(a, size=(10, 10)) - # 1d - f(b) - # 2d - f(c) - # 3d - f(d) - # 1d size - f(b, size=10) - # 2d - size - broadcast - f(e, size=(10, 2)) - # 3d - size - f(g, size=(10, 10, 10)) - - -def comp_state(state1, state2): - identical = True - if isinstance(state1, dict): - for key in state1: - identical &= comp_state(state1[key], state2[key]) - elif type(state1) != type(state2): - identical &= type(state1) == type(state2) - else: - if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( - state2, (list, tuple, np.ndarray))): - for s1, s2 in zip(state1, state2): - identical &= comp_state(s1, s2) - else: - identical &= state1 == state2 - return identical - - -def warmup(rg, n=None): - if n is None: - n = 11 + np.random.randint(0, 20) - rg.standard_normal(n) - rg.standard_normal(n) - rg.standard_normal(n, dtype=np.float32) - rg.standard_normal(n, dtype=np.float32) - rg.integers(0, 2 ** 24, n, dtype=np.uint64) - rg.integers(0, 2 ** 48, n, dtype=np.uint64) - rg.standard_gamma(11.0, n) - rg.standard_gamma(11.0, n, dtype=np.float32) - rg.random(n, dtype=np.float64) - rg.random(n, dtype=np.float32) - - -class RNG(object): - @classmethod - def setup_class(cls): - # Overridden in test classes. Place holder to silence IDE noise - cls.bit_generator = PCG64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - @classmethod - def _extra_setup(cls): - cls.vec_1d = np.arange(2.0, 102.0) - cls.vec_2d = np.arange(2.0, 102.0)[None, :] - cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) - cls.seed_error = TypeError - - def _reset_state(self): - self.rg.bit_generator.state = self.initial_state - - def test_init(self): - rg = Generator(self.bit_generator()) - state = rg.bit_generator.state - rg.standard_normal(1) - rg.standard_normal(1) - rg.bit_generator.state = state - new_state = rg.bit_generator.state - assert_(comp_state(state, new_state)) - - def test_advance(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'advance'): - self.rg.bit_generator.advance(self.advance) - assert_(not comp_state(state, self.rg.bit_generator.state)) - else: - bitgen_name = self.rg.bit_generator.__class__.__name__ - pytest.skip('Advance is not supported by {0}'.format(bitgen_name)) - - def test_jump(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'jumped'): - bit_gen2 = self.rg.bit_generator.jumped() - jumped_state = bit_gen2.state - assert_(not comp_state(state, jumped_state)) - self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) - self.rg.bit_generator.state = state - bit_gen3 = self.rg.bit_generator.jumped() - rejumped_state = bit_gen3.state - assert_(comp_state(jumped_state, rejumped_state)) - else: - bitgen_name = self.rg.bit_generator.__class__.__name__ - if bitgen_name not in ('SFC64',): - raise AttributeError('no "jumped" in %s' % bitgen_name) - pytest.skip('Jump is not supported by {0}'.format(bitgen_name)) - - def test_uniform(self): - r = self.rg.uniform(-1.0, 0.0, size=10) - assert_(len(r) == 10) - assert_((r > -1).all()) - assert_((r <= 0).all()) - - def test_uniform_array(self): - r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) - assert_(len(r) == 10) - assert_((r > -1).all()) - assert_((r <= 0).all()) - r = self.rg.uniform(np.array([-1.0] * 10), - np.array([0.0] * 10), size=10) - assert_(len(r) == 10) - assert_((r > -1).all()) - assert_((r <= 0).all()) - r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) - assert_(len(r) == 10) - assert_((r > -1).all()) - assert_((r <= 0).all()) - - def test_random(self): - assert_(len(self.rg.random(10)) == 10) - params_0(self.rg.random) - - def test_standard_normal_zig(self): - assert_(len(self.rg.standard_normal(10)) == 10) - - def test_standard_normal(self): - assert_(len(self.rg.standard_normal(10)) == 10) - params_0(self.rg.standard_normal) - - def test_standard_gamma(self): - assert_(len(self.rg.standard_gamma(10, 10)) == 10) - assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) - params_1(self.rg.standard_gamma) - - def test_standard_exponential(self): - assert_(len(self.rg.standard_exponential(10)) == 10) - params_0(self.rg.standard_exponential) - - def test_standard_exponential_float(self): - randoms = self.rg.standard_exponential(10, dtype='float32') - assert_(len(randoms) == 10) - assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32')) - - def test_standard_exponential_float_log(self): - randoms = self.rg.standard_exponential(10, dtype='float32', - method='inv') - assert_(len(randoms) == 10) - assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32', - method='inv')) - - def test_standard_cauchy(self): - assert_(len(self.rg.standard_cauchy(10)) == 10) - params_0(self.rg.standard_cauchy) - - def test_standard_t(self): - assert_(len(self.rg.standard_t(10, 10)) == 10) - params_1(self.rg.standard_t) - - def test_binomial(self): - assert_(self.rg.binomial(10, .5) >= 0) - assert_(self.rg.binomial(1000, .5) >= 0) - - def test_reset_state(self): - state = self.rg.bit_generator.state - int_1 = self.rg.integers(2**31) - self.rg.bit_generator.state = state - int_2 = self.rg.integers(2**31) - assert_(int_1 == int_2) - - def test_entropy_init(self): - rg = Generator(self.bit_generator()) - rg2 = Generator(self.bit_generator()) - assert_(not comp_state(rg.bit_generator.state, - rg2.bit_generator.state)) - - def test_seed(self): - rg = Generator(self.bit_generator(*self.seed)) - rg2 = Generator(self.bit_generator(*self.seed)) - rg.random() - rg2.random() - assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) - - def test_reset_state_gauss(self): - rg = Generator(self.bit_generator(*self.seed)) - rg.standard_normal() - state = rg.bit_generator.state - n1 = rg.standard_normal(size=10) - rg2 = Generator(self.bit_generator()) - rg2.bit_generator.state = state - n2 = rg2.standard_normal(size=10) - assert_array_equal(n1, n2) - - def test_reset_state_uint32(self): - rg = Generator(self.bit_generator(*self.seed)) - rg.integers(0, 2 ** 24, 120, dtype=np.uint32) - state = rg.bit_generator.state - n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) - rg2 = Generator(self.bit_generator()) - rg2.bit_generator.state = state - n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) - assert_array_equal(n1, n2) - - def test_reset_state_float(self): - rg = Generator(self.bit_generator(*self.seed)) - rg.random(dtype='float32') - state = rg.bit_generator.state - n1 = rg.random(size=10, dtype='float32') - rg2 = Generator(self.bit_generator()) - rg2.bit_generator.state = state - n2 = rg2.random(size=10, dtype='float32') - assert_((n1 == n2).all()) - - def test_shuffle(self): - original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) - assert_((original != permuted).any()) - - def test_permutation(self): - original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) - assert_((original != permuted).any()) - - def test_beta(self): - vals = self.rg.beta(2.0, 2.0, 10) - assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), 2.0) - assert_(len(vals) == 10) - vals = self.rg.beta(2.0, np.array([2.0] * 10)) - assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) - assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) - assert_(vals.shape == (10, 10)) - - def test_bytes(self): - vals = self.rg.bytes(10) - assert_(len(vals) == 10) - - def test_chisquare(self): - vals = self.rg.chisquare(2.0, 10) - assert_(len(vals) == 10) - params_1(self.rg.chisquare) - - def test_exponential(self): - vals = self.rg.exponential(2.0, 10) - assert_(len(vals) == 10) - params_1(self.rg.exponential) - - def test_f(self): - vals = self.rg.f(3, 1000, 10) - assert_(len(vals) == 10) - - def test_gamma(self): - vals = self.rg.gamma(3, 2, 10) - assert_(len(vals) == 10) - - def test_geometric(self): - vals = self.rg.geometric(0.5, 10) - assert_(len(vals) == 10) - params_1(self.rg.exponential, bounded=True) - - def test_gumbel(self): - vals = self.rg.gumbel(2.0, 2.0, 10) - assert_(len(vals) == 10) - - def test_laplace(self): - vals = self.rg.laplace(2.0, 2.0, 10) - assert_(len(vals) == 10) - - def test_logitic(self): - vals = self.rg.logistic(2.0, 2.0, 10) - assert_(len(vals) == 10) - - def test_logseries(self): - vals = self.rg.logseries(0.5, 10) - assert_(len(vals) == 10) - - def test_negative_binomial(self): - vals = self.rg.negative_binomial(10, 0.2, 10) - assert_(len(vals) == 10) - - def test_noncentral_chisquare(self): - vals = self.rg.noncentral_chisquare(10, 2, 10) - assert_(len(vals) == 10) - - def test_noncentral_f(self): - vals = self.rg.noncentral_f(3, 1000, 2, 10) - assert_(len(vals) == 10) - vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) - assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) - assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) - assert_(len(vals) == 10) - - def test_normal(self): - vals = self.rg.normal(10, 0.2, 10) - assert_(len(vals) == 10) - - def test_pareto(self): - vals = self.rg.pareto(3.0, 10) - assert_(len(vals) == 10) - - def test_poisson(self): - vals = self.rg.poisson(10, 10) - assert_(len(vals) == 10) - vals = self.rg.poisson(np.array([10] * 10)) - assert_(len(vals) == 10) - params_1(self.rg.poisson) - - def test_power(self): - vals = self.rg.power(0.2, 10) - assert_(len(vals) == 10) - - def test_integers(self): - vals = self.rg.integers(10, 20, 10) - assert_(len(vals) == 10) - - def test_rayleigh(self): - vals = self.rg.rayleigh(0.2, 10) - assert_(len(vals) == 10) - params_1(self.rg.rayleigh, bounded=True) - - def test_vonmises(self): - vals = self.rg.vonmises(10, 0.2, 10) - assert_(len(vals) == 10) - - def test_wald(self): - vals = self.rg.wald(1.0, 1.0, 10) - assert_(len(vals) == 10) - - def test_weibull(self): - vals = self.rg.weibull(1.0, 10) - assert_(len(vals) == 10) - - def test_zipf(self): - vals = self.rg.zipf(10, 10) - assert_(len(vals) == 10) - vals = self.rg.zipf(self.vec_1d) - assert_(len(vals) == 100) - vals = self.rg.zipf(self.vec_2d) - assert_(vals.shape == (1, 100)) - vals = self.rg.zipf(self.mat) - assert_(vals.shape == (100, 100)) - - def test_hypergeometric(self): - vals = self.rg.hypergeometric(25, 25, 20) - assert_(np.isscalar(vals)) - vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) - assert_(vals.shape == (10,)) - - def test_triangular(self): - vals = self.rg.triangular(-5, 0, 5) - assert_(np.isscalar(vals)) - vals = self.rg.triangular(-5, np.array([0] * 10), 5) - assert_(vals.shape == (10,)) - - def test_multivariate_normal(self): - mean = [0, 0] - cov = [[1, 0], [0, 100]] # diagonal covariance - x = self.rg.multivariate_normal(mean, cov, 5000) - assert_(x.shape == (5000, 2)) - x_zig = self.rg.multivariate_normal(mean, cov, 5000) - assert_(x.shape == (5000, 2)) - x_inv = self.rg.multivariate_normal(mean, cov, 5000) - assert_(x.shape == (5000, 2)) - assert_((x_zig != x_inv).any()) - - def test_multinomial(self): - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) - assert_(vals.shape == (2,)) - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) - assert_(vals.shape == (10, 2)) - - def test_dirichlet(self): - s = self.rg.dirichlet((10, 5, 3), 20) - assert_(s.shape == (20, 3)) - - def test_pickle(self): - pick = pickle.dumps(self.rg) - unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) - assert_(comp_state(self.rg.bit_generator.state, - unpick.bit_generator.state)) - - pick = pickle.dumps(self.rg) - unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) - assert_(comp_state(self.rg.bit_generator.state, - unpick.bit_generator.state)) - - def test_seed_array(self): - if self.seed_vector_bits is None: - bitgen_name = self.bit_generator.__name__ - pytest.skip('Vector seeding is not supported by ' - '{0}'.format(bitgen_name)) - - if self.seed_vector_bits == 32: - dtype = np.uint32 - else: - dtype = np.uint64 - seed = np.array([1], dtype=dtype) - bg = self.bit_generator(seed) - state1 = bg.state - bg = self.bit_generator(1) - state2 = bg.state - assert_(comp_state(state1, state2)) - - seed = np.arange(4, dtype=dtype) - bg = self.bit_generator(seed) - state1 = bg.state - bg = self.bit_generator(seed[0]) - state2 = bg.state - assert_(not comp_state(state1, state2)) - - seed = np.arange(1500, dtype=dtype) - bg = self.bit_generator(seed) - state1 = bg.state - bg = self.bit_generator(seed[0]) - state2 = bg.state - assert_(not comp_state(state1, state2)) - - seed = 2 ** np.mod(np.arange(1500, dtype=dtype), - self.seed_vector_bits - 1) + 1 - bg = self.bit_generator(seed) - state1 = bg.state - bg = self.bit_generator(seed[0]) - state2 = bg.state - assert_(not comp_state(state1, state2)) - - def test_uniform_float(self): - rg = Generator(self.bit_generator(12345)) - warmup(rg) - state = rg.bit_generator.state - r1 = rg.random(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) - warmup(rg2) - rg2.bit_generator.state = state - r2 = rg2.random(11, dtype=np.float32) - assert_array_equal(r1, r2) - assert_equal(r1.dtype, np.float32) - assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) - - def test_gamma_floats(self): - rg = Generator(self.bit_generator()) - warmup(rg) - state = rg.bit_generator.state - r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) - warmup(rg2) - rg2.bit_generator.state = state - r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) - assert_array_equal(r1, r2) - assert_equal(r1.dtype, np.float32) - assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) - - def test_normal_floats(self): - rg = Generator(self.bit_generator()) - warmup(rg) - state = rg.bit_generator.state - r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) - warmup(rg2) - rg2.bit_generator.state = state - r2 = rg2.standard_normal(11, dtype=np.float32) - assert_array_equal(r1, r2) - assert_equal(r1.dtype, np.float32) - assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) - - def test_normal_zig_floats(self): - rg = Generator(self.bit_generator()) - warmup(rg) - state = rg.bit_generator.state - r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) - warmup(rg2) - rg2.bit_generator.state = state - r2 = rg2.standard_normal(11, dtype=np.float32) - assert_array_equal(r1, r2) - assert_equal(r1.dtype, np.float32) - assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) - - def test_output_fill(self): - rg = self.rg - state = rg.bit_generator.state - size = (31, 7, 97) - existing = np.empty(size) - rg.bit_generator.state = state - rg.standard_normal(out=existing) - rg.bit_generator.state = state - direct = rg.standard_normal(size=size) - assert_equal(direct, existing) - - sized = np.empty(size) - rg.bit_generator.state = state - rg.standard_normal(out=sized, size=sized.shape) - - existing = np.empty(size, dtype=np.float32) - rg.bit_generator.state = state - rg.standard_normal(out=existing, dtype=np.float32) - rg.bit_generator.state = state - direct = rg.standard_normal(size=size, dtype=np.float32) - assert_equal(direct, existing) - - def test_output_filling_uniform(self): - rg = self.rg - state = rg.bit_generator.state - size = (31, 7, 97) - existing = np.empty(size) - rg.bit_generator.state = state - rg.random(out=existing) - rg.bit_generator.state = state - direct = rg.random(size=size) - assert_equal(direct, existing) - - existing = np.empty(size, dtype=np.float32) - rg.bit_generator.state = state - rg.random(out=existing, dtype=np.float32) - rg.bit_generator.state = state - direct = rg.random(size=size, dtype=np.float32) - assert_equal(direct, existing) - - def test_output_filling_exponential(self): - rg = self.rg - state = rg.bit_generator.state - size = (31, 7, 97) - existing = np.empty(size) - rg.bit_generator.state = state - rg.standard_exponential(out=existing) - rg.bit_generator.state = state - direct = rg.standard_exponential(size=size) - assert_equal(direct, existing) - - existing = np.empty(size, dtype=np.float32) - rg.bit_generator.state = state - rg.standard_exponential(out=existing, dtype=np.float32) - rg.bit_generator.state = state - direct = rg.standard_exponential(size=size, dtype=np.float32) - assert_equal(direct, existing) - - def test_output_filling_gamma(self): - rg = self.rg - state = rg.bit_generator.state - size = (31, 7, 97) - existing = np.zeros(size) - rg.bit_generator.state = state - rg.standard_gamma(1.0, out=existing) - rg.bit_generator.state = state - direct = rg.standard_gamma(1.0, size=size) - assert_equal(direct, existing) - - existing = np.zeros(size, dtype=np.float32) - rg.bit_generator.state = state - rg.standard_gamma(1.0, out=existing, dtype=np.float32) - rg.bit_generator.state = state - direct = rg.standard_gamma(1.0, size=size, dtype=np.float32) - assert_equal(direct, existing) - - def test_output_filling_gamma_broadcast(self): - rg = self.rg - state = rg.bit_generator.state - size = (31, 7, 97) - mu = np.arange(97.0) + 1.0 - existing = np.zeros(size) - rg.bit_generator.state = state - rg.standard_gamma(mu, out=existing) - rg.bit_generator.state = state - direct = rg.standard_gamma(mu, size=size) - assert_equal(direct, existing) - - existing = np.zeros(size, dtype=np.float32) - rg.bit_generator.state = state - rg.standard_gamma(mu, out=existing, dtype=np.float32) - rg.bit_generator.state = state - direct = rg.standard_gamma(mu, size=size, dtype=np.float32) - assert_equal(direct, existing) - - def test_output_fill_error(self): - rg = self.rg - size = (31, 7, 97) - existing = np.empty(size) - with pytest.raises(TypeError): - rg.standard_normal(out=existing, dtype=np.float32) - with pytest.raises(ValueError): - rg.standard_normal(out=existing[::3]) - existing = np.empty(size, dtype=np.float32) - with pytest.raises(TypeError): - rg.standard_normal(out=existing, dtype=np.float64) - - existing = np.zeros(size, dtype=np.float32) - with pytest.raises(TypeError): - rg.standard_gamma(1.0, out=existing, dtype=np.float64) - with pytest.raises(ValueError): - rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32) - existing = np.zeros(size, dtype=np.float64) - with pytest.raises(TypeError): - rg.standard_gamma(1.0, out=existing, dtype=np.float32) - with pytest.raises(ValueError): - rg.standard_gamma(1.0, out=existing[::3]) - - def test_integers_broadcast(self, dtype): - if dtype == np.bool_: - upper = 2 - lower = 0 - else: - info = np.iinfo(dtype) - upper = int(info.max) + 1 - lower = info.min - self._reset_state() - a = self.rg.integers(lower, [upper] * 10, dtype=dtype) - self._reset_state() - b = self.rg.integers([lower] * 10, upper, dtype=dtype) - assert_equal(a, b) - self._reset_state() - c = self.rg.integers(lower, upper, size=10, dtype=dtype) - assert_equal(a, c) - self._reset_state() - d = self.rg.integers(np.array( - [lower] * 10), np.array([upper], dtype=object), size=10, - dtype=dtype) - assert_equal(a, d) - self._reset_state() - e = self.rg.integers( - np.array([lower] * 10), np.array([upper] * 10), size=10, - dtype=dtype) - assert_equal(a, e) - - self._reset_state() - a = self.rg.integers(0, upper, size=10, dtype=dtype) - self._reset_state() - b = self.rg.integers([upper] * 10, dtype=dtype) - assert_equal(a, b) - - def test_integers_numpy(self, dtype): - high = np.array([1]) - low = np.array([0]) - - out = self.rg.integers(low, high, dtype=dtype) - assert out.shape == (1,) - - out = self.rg.integers(low[0], high, dtype=dtype) - assert out.shape == (1,) - - out = self.rg.integers(low, high[0], dtype=dtype) - assert out.shape == (1,) - - def test_integers_broadcast_errors(self, dtype): - if dtype == np.bool_: - upper = 2 - lower = 0 - else: - info = np.iinfo(dtype) - upper = int(info.max) + 1 - lower = info.min - with pytest.raises(ValueError): - self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) - with pytest.raises(ValueError): - self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) - with pytest.raises(ValueError): - self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) - with pytest.raises(ValueError): - self.rg.integers([0], [0], dtype=dtype) - - -class TestMT19937(RNG): - @classmethod - def setup_class(cls): - cls.bit_generator = MT19937 - cls.advance = None - cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 32 - cls._extra_setup() - cls.seed_error = ValueError - - def test_numpy_state(self): - nprg = np.random.RandomState() - nprg.standard_normal(99) - state = nprg.get_state() - self.rg.bit_generator.state = state - state2 = self.rg.bit_generator.state - assert_((state[1] == state2['state']['key']).all()) - assert_((state[2] == state2['state']['pos'])) - - -class TestPhilox(RNG): - @classmethod - def setup_class(cls): - cls.bit_generator = Philox - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - -class TestSFC64(RNG): - @classmethod - def setup_class(cls): - cls.bit_generator = SFC64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 192 - cls._extra_setup() - - -class TestPCG64(RNG): - @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - -class TestDefaultRNG(RNG): - @classmethod - def setup_class(cls): - # This will duplicate some tests that directly instantiate a fresh - # Generator(), but that's okay. - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = np.random.default_rng(*cls.seed) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - def test_default_is_pcg64(self): - # In order to change the default BitGenerator, we'll go through - # a deprecation cycle to move to a different function. - assert_(isinstance(self.rg.bit_generator, PCG64)) - - def test_seed(self): - np.random.default_rng() - np.random.default_rng(None) - np.random.default_rng(12345) - np.random.default_rng(0) - np.random.default_rng(43660444402423911716352051725018508569) - np.random.default_rng([43660444402423911716352051725018508569, - 279705150948142787361475340226491943209]) - with pytest.raises(ValueError): - np.random.default_rng(-1) - with pytest.raises(ValueError): - np.random.default_rng([12345, -1]) diff --git a/venv/lib/python3.7/site-packages/numpy/setup.py b/venv/lib/python3.7/site-packages/numpy/setup.py deleted file mode 100644 index 4ccdaee..0000000 --- a/venv/lib/python3.7/site-packages/numpy/setup.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('numpy', parent_package, top_path) - - config.add_subpackage('compat') - config.add_subpackage('core') - config.add_subpackage('distutils') - config.add_subpackage('doc') - config.add_subpackage('f2py') - config.add_subpackage('fft') - config.add_subpackage('lib') - config.add_subpackage('linalg') - config.add_subpackage('ma') - config.add_subpackage('matrixlib') - config.add_subpackage('polynomial') - config.add_subpackage('random') - config.add_subpackage('testing') - config.add_data_dir('doc') - config.add_data_dir('tests') - config.make_config_py() # installs __config__.py - return config - -if __name__ == '__main__': - print('This is the wrong setup.py file to run') diff --git a/venv/lib/python3.7/site-packages/numpy/testing/__init__.py b/venv/lib/python3.7/site-packages/numpy/testing/__init__.py deleted file mode 100644 index a8bd4fc..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Common test support for all numpy test scripts. - -This single module should provide all the common functionality for numpy tests -in a single location, so that test scripts can just import it and work right -away. - -""" -from __future__ import division, absolute_import, print_function - -from unittest import TestCase - -from ._private.utils import * -from ._private import decorators as dec -from ._private.nosetester import ( - run_module_suite, NoseTester as Tester - ) - -__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite'] - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/testing/_private/__init__.py b/venv/lib/python3.7/site-packages/numpy/testing/_private/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/testing/_private/decorators.py b/venv/lib/python3.7/site-packages/numpy/testing/_private/decorators.py deleted file mode 100644 index 24c4e38..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/_private/decorators.py +++ /dev/null @@ -1,292 +0,0 @@ -""" -Decorators for labeling and modifying behavior of test objects. - -Decorators that merely return a modified version of the original -function object are straightforward. Decorators that return a new -function object need to use -:: - - nose.tools.make_decorator(original_function)(decorator) - -in returning the decorator, in order to preserve meta-data such as -function name, setup and teardown functions and so on - see -``nose.tools`` for more information. - -""" -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc - -from .utils import SkipTest, assert_warns, HAS_REFCOUNT - -__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated', - 'parametrize', '_needs_refcount',] - - -def slow(t): - """ - Label a test as 'slow'. - - The exact definition of a slow test is obviously both subjective and - hardware-dependent, but in general any individual test that requires more - than a second or two should be labeled as slow (the whole suite consists of - thousands of tests, so even a second is significant). - - Parameters - ---------- - t : callable - The test to label as slow. - - Returns - ------- - t : callable - The decorated test `t`. - - Examples - -------- - The `numpy.testing` module includes ``import decorators as dec``. - A test can be decorated as slow like this:: - - from numpy.testing import * - - @dec.slow - def test_big(self): - print('Big, slow test') - - """ - - t.slow = True - return t - -def setastest(tf=True): - """ - Signals to nose that this function is or is not a test. - - Parameters - ---------- - tf : bool - If True, specifies that the decorated callable is a test. - If False, specifies that the decorated callable is not a test. - Default is True. - - Notes - ----- - This decorator can't use the nose namespace, because it can be - called from a non-test module. See also ``istest`` and ``nottest`` in - ``nose.tools``. - - Examples - -------- - `setastest` can be used in the following way:: - - from numpy.testing import dec - - @dec.setastest(False) - def func_with_test_in_name(arg1, arg2): - pass - - """ - def set_test(t): - t.__test__ = tf - return t - return set_test - -def skipif(skip_condition, msg=None): - """ - Make function raise SkipTest exception if a given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - skip_condition : bool or callable - Flag to determine whether to skip the decorated test. - msg : str, optional - Message to give on raising a SkipTest exception. Default is None. - - Returns - ------- - decorator : function - Decorator which, when applied to a function, causes SkipTest - to be raised when `skip_condition` is True, and the function - to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - - def skip_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - - # Allow for both boolean or callable skip conditions. - if isinstance(skip_condition, collections_abc.Callable): - skip_val = lambda: skip_condition() - else: - skip_val = lambda: skip_condition - - def get_msg(func,msg=None): - """Skip message with information about function being skipped.""" - if msg is None: - out = 'Test skipped due to test condition' - else: - out = msg - - return "Skipping test: %s: %s" % (func.__name__, out) - - # We need to define *two* skippers because Python doesn't allow both - # return with value and yield inside the same function. - def skipper_func(*args, **kwargs): - """Skipper for normal test functions.""" - if skip_val(): - raise SkipTest(get_msg(f, msg)) - else: - return f(*args, **kwargs) - - def skipper_gen(*args, **kwargs): - """Skipper for test generators.""" - if skip_val(): - raise SkipTest(get_msg(f, msg)) - else: - for x in f(*args, **kwargs): - yield x - - # Choose the right skipper to use when building the actual decorator. - if nose.util.isgenerator(f): - skipper = skipper_gen - else: - skipper = skipper_func - - return nose.tools.make_decorator(f)(skipper) - - return skip_decorator - - -def knownfailureif(fail_condition, msg=None): - """ - Make function raise KnownFailureException exception if given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - fail_condition : bool or callable - Flag to determine whether to mark the decorated test as a known - failure (if True) or not (if False). - msg : str, optional - Message to give on raising a KnownFailureException exception. - Default is None. - - Returns - ------- - decorator : function - Decorator, which, when applied to a function, causes - KnownFailureException to be raised when `fail_condition` is True, - and the function to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - if msg is None: - msg = 'Test skipped due to known failure' - - # Allow for both boolean or callable known failure conditions. - if isinstance(fail_condition, collections_abc.Callable): - fail_val = lambda: fail_condition() - else: - fail_val = lambda: fail_condition - - def knownfail_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - from .noseclasses import KnownFailureException - - def knownfailer(*args, **kwargs): - if fail_val(): - raise KnownFailureException(msg) - else: - return f(*args, **kwargs) - return nose.tools.make_decorator(f)(knownfailer) - - return knownfail_decorator - -def deprecated(conditional=True): - """ - Filter deprecation warnings while running the test suite. - - This decorator can be used to filter DeprecationWarning's, to avoid - printing them during the test suite run, while checking that the test - actually raises a DeprecationWarning. - - Parameters - ---------- - conditional : bool or callable, optional - Flag to determine whether to mark test as deprecated or not. If the - condition is a callable, it is used at runtime to dynamically make the - decision. Default is True. - - Returns - ------- - decorator : function - The `deprecated` decorator itself. - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - def deprecate_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - - def _deprecated_imp(*args, **kwargs): - # Poor man's replacement for the with statement - with assert_warns(DeprecationWarning): - f(*args, **kwargs) - - if isinstance(conditional, collections_abc.Callable): - cond = conditional() - else: - cond = conditional - if cond: - return nose.tools.make_decorator(f)(_deprecated_imp) - else: - return f - return deprecate_decorator - - -def parametrize(vars, input): - """ - Pytest compatibility class. This implements the simplest level of - pytest.mark.parametrize for use in nose as an aid in making the transition - to pytest. It achieves that by adding a dummy var parameter and ignoring - the doc_func parameter of the base class. It does not support variable - substitution by name, nor does it support nesting or classes. See the - pytest documentation for usage. - - .. versionadded:: 1.14.0 - - """ - from .parameterized import parameterized - - return parameterized(input) - -_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") diff --git a/venv/lib/python3.7/site-packages/numpy/testing/_private/noseclasses.py b/venv/lib/python3.7/site-packages/numpy/testing/_private/noseclasses.py deleted file mode 100644 index e99bbc9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/_private/noseclasses.py +++ /dev/null @@ -1,366 +0,0 @@ -# These classes implement a doctest runner plugin for nose, a "known failure" -# error class, and a customized TestProgram for NumPy. - -# Because this module imports nose directly, it should not -# be used except by nosetester.py to avoid a general NumPy -# dependency on nose. -from __future__ import division, absolute_import, print_function - -import os -import sys -import doctest -import inspect - -import numpy -import nose -from nose.plugins import doctests as npd -from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin -from nose.plugins.base import Plugin -from nose.util import src -from .nosetester import get_package_name -from .utils import KnownFailureException, KnownFailureTest - - -# Some of the classes in this module begin with 'Numpy' to clearly distinguish -# them from the plethora of very similar names from nose/unittest/doctest - -#----------------------------------------------------------------------------- -# Modified version of the one in the stdlib, that fixes a python bug (doctests -# not found in extension modules, https://bugs.python.org/issue3158) -class NumpyDocTestFinder(doctest.DocTestFinder): - - def _from_module(self, module, object): - """ - Return true if the given object is defined in the given - module. - """ - if module is None: - return True - elif inspect.isfunction(object): - return module.__dict__ is object.__globals__ - elif inspect.isbuiltin(object): - return module.__name__ == object.__module__ - elif inspect.isclass(object): - return module.__name__ == object.__module__ - elif inspect.ismethod(object): - # This one may be a bug in cython that fails to correctly set the - # __module__ attribute of methods, but since the same error is easy - # to make by extension code writers, having this safety in place - # isn't such a bad idea - return module.__name__ == object.__self__.__class__.__module__ - elif inspect.getmodule(object) is not None: - return module is inspect.getmodule(object) - elif hasattr(object, '__module__'): - return module.__name__ == object.__module__ - elif isinstance(object, property): - return True # [XX] no way not be sure. - else: - raise ValueError("object must be a class or function") - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - """ - Find tests for the given object and any contained objects, and - add them to `tests`. - """ - - doctest.DocTestFinder._find(self, tests, obj, name, module, - source_lines, globs, seen) - - # Below we re-run pieces of the above method with manual modifications, - # because the original code is buggy and fails to correctly identify - # doctests in extension modules. - - # Local shorthands - from inspect import ( - isroutine, isclass, ismodule, isfunction, ismethod - ) - - # Look for tests in a module's contained objects. - if ismodule(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - valname1 = '%s.%s' % (name, valname) - if ( (isroutine(val) or isclass(val)) - and self._from_module(module, val)): - - self._find(tests, val, valname1, module, source_lines, - globs, seen) - - # Look for tests in a class's contained objects. - if isclass(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - # Special handling for staticmethod/classmethod. - if isinstance(val, staticmethod): - val = getattr(obj, valname) - if isinstance(val, classmethod): - val = getattr(obj, valname).__func__ - - # Recurse to methods, properties, and nested classes. - if ((isfunction(val) or isclass(val) or - ismethod(val) or isinstance(val, property)) and - self._from_module(module, val)): - valname = '%s.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - -# second-chance checker; if the default comparison doesn't -# pass, then see if the expected output string contains flags that -# tell us to ignore the output -class NumpyOutputChecker(doctest.OutputChecker): - def check_output(self, want, got, optionflags): - ret = doctest.OutputChecker.check_output(self, want, got, - optionflags) - if not ret: - if "#random" in want: - return True - - # it would be useful to normalize endianness so that - # bigendian machines don't fail all the tests (and there are - # actually some bigendian examples in the doctests). Let's try - # making them all little endian - got = got.replace("'>", "'<") - want = want.replace("'>", "'<") - - # try to normalize out 32 and 64 bit default int sizes - for sz in [4, 8]: - got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') - 'numpy' - - """ - - fullpath = filepath[:] - pkg_name = [] - while 'site-packages' in filepath or 'dist-packages' in filepath: - filepath, p2 = os.path.split(filepath) - if p2 in ('site-packages', 'dist-packages'): - break - pkg_name.append(p2) - - # if package name determination failed, just default to numpy/scipy - if not pkg_name: - if 'scipy' in fullpath: - return 'scipy' - else: - return 'numpy' - - # otherwise, reverse to get correct order and return - pkg_name.reverse() - - # don't include the outer egg directory - if pkg_name[0].endswith('.egg'): - pkg_name.pop(0) - - return '.'.join(pkg_name) - - -def run_module_suite(file_to_run=None, argv=None): - """ - Run a test module. - - Equivalent to calling ``$ nosetests `` from - the command line - - Parameters - ---------- - file_to_run : str, optional - Path to test module, or None. - By default, run the module from which this function is called. - argv : list of strings - Arguments to be passed to the nose test runner. ``argv[0]`` is - ignored. All command line arguments accepted by ``nosetests`` - will work. If it is the default value None, sys.argv is used. - - .. versionadded:: 1.9.0 - - Examples - -------- - Adding the following:: - - if __name__ == "__main__" : - run_module_suite(argv=sys.argv) - - at the end of a test module will run the tests when that module is - called in the python interpreter. - - Alternatively, calling:: - - >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") # doctest: +SKIP - - from an interpreter will run all the test routine in 'test_matlib.py'. - """ - if file_to_run is None: - f = sys._getframe(1) - file_to_run = f.f_locals.get('__file__', None) - if file_to_run is None: - raise AssertionError - - if argv is None: - argv = sys.argv + [file_to_run] - else: - argv = argv + [file_to_run] - - nose = import_nose() - from .noseclasses import KnownFailurePlugin - nose.run(argv=argv, addplugins=[KnownFailurePlugin()]) - - -class NoseTester(object): - """ - Nose test runner. - - This class is made available as numpy.testing.Tester, and a test function - is typically added to a package's __init__.py like so:: - - from numpy.testing import Tester - test = Tester().test - - Calling this test function finds and runs all tests associated with the - package and all its sub-packages. - - Attributes - ---------- - package_path : str - Full path to the package to test. - package_name : str - Name of the package to test. - - Parameters - ---------- - package : module, str or None, optional - The package to test. If a string, this should be the full path to - the package. If None (default), `package` is set to the module from - which `NoseTester` is initialized. - raise_warnings : None, str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of being shown once during the test execution. Valid strings are: - - - "develop" : equals ``(Warning,)`` - - "release" : equals ``()``, don't raise on any warnings. - - Default is "release". - depth : int, optional - If `package` is None, then this can be used to initialize from the - module of the caller of (the caller of (...)) the code that - initializes `NoseTester`. Default of 0 means the module of the - immediate caller; higher values are useful for utility routines that - want to initialize `NoseTester` objects on behalf of other code. - - """ - def __init__(self, package=None, raise_warnings="release", depth=0, - check_fpu_mode=False): - # Back-compat: 'None' used to mean either "release" or "develop" - # depending on whether this was a release or develop version of - # numpy. Those semantics were fine for testing numpy, but not so - # helpful for downstream projects like scipy that use - # numpy.testing. (They want to set this based on whether *they* are a - # release or develop version, not whether numpy is.) So we continue to - # accept 'None' for back-compat, but it's now just an alias for the - # default "release". - if raise_warnings is None: - raise_warnings = "release" - - package_name = None - if package is None: - f = sys._getframe(1 + depth) - package_path = f.f_locals.get('__file__', None) - if package_path is None: - raise AssertionError - package_path = os.path.dirname(package_path) - package_name = f.f_locals.get('__name__', None) - elif isinstance(package, type(os)): - package_path = os.path.dirname(package.__file__) - package_name = getattr(package, '__name__', None) - else: - package_path = str(package) - - self.package_path = package_path - - # Find the package name under test; this name is used to limit coverage - # reporting (if enabled). - if package_name is None: - package_name = get_package_name(package_path) - self.package_name = package_name - - # Set to "release" in constructor in maintenance branches. - self.raise_warnings = raise_warnings - - # Whether to check for FPU mode changes - self.check_fpu_mode = check_fpu_mode - - def _test_argv(self, label, verbose, extra_argv): - ''' Generate argv for nosetest command - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - see ``test`` docstring - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - argv : list - command line arguments that will be passed to nose - ''' - argv = [__file__, self.package_path, '-s'] - if label and label != 'full': - if not isinstance(label, basestring): - raise TypeError('Selection label should be a string') - if label == 'fast': - label = 'not slow' - argv += ['-A', label] - argv += ['--verbosity', str(verbose)] - - # When installing with setuptools, and also in some other cases, the - # test_*.py files end up marked +x executable. Nose, by default, does - # not run files marked with +x as they might be scripts. However, in - # our case nose only looks for test_*.py files under the package - # directory, which should be safe. - argv += ['--exe'] - - if extra_argv: - argv += extra_argv - return argv - - def _show_system_info(self): - nose = import_nose() - - import numpy - print("NumPy version %s" % numpy.__version__) - relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous - print("NumPy relaxed strides checking option:", relaxed_strides) - npdir = os.path.dirname(numpy.__file__) - print("NumPy is installed in %s" % npdir) - - if 'scipy' in self.package_name: - import scipy - print("SciPy version %s" % scipy.__version__) - spdir = os.path.dirname(scipy.__file__) - print("SciPy is installed in %s" % spdir) - - pyversion = sys.version.replace('\n', '') - print("Python version %s" % pyversion) - print("nose version %d.%d.%d" % nose.__versioninfo__) - - def _get_custom_doctester(self): - """ Return instantiated plugin for doctests - - Allows subclassing of this class to override doctester - - A return value of None means use the nose builtin doctest plugin - """ - from .noseclasses import NumpyDoctest - return NumpyDoctest() - - def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False, timer=False): - """ - Run tests for module using nose. - - This method does the heavy lifting for the `test` method. It takes all - the same arguments, for details see `test`. - - See Also - -------- - test - - """ - # fail with nice error message if nose is not present - import_nose() - # compile argv - argv = self._test_argv(label, verbose, extra_argv) - # our way of doing coverage - if coverage: - argv += ['--cover-package=%s' % self.package_name, '--with-coverage', - '--cover-tests', '--cover-erase'] - - if timer: - if timer is True: - argv += ['--with-timer'] - elif isinstance(timer, int): - argv += ['--with-timer', '--timer-top-n', str(timer)] - - # construct list of plugins - import nose.plugins.builtin - from nose.plugins import EntryPointPluginManager - from .noseclasses import (KnownFailurePlugin, Unplugger, - FPUModeCheckPlugin) - plugins = [KnownFailurePlugin()] - plugins += [p() for p in nose.plugins.builtin.plugins] - if self.check_fpu_mode: - plugins += [FPUModeCheckPlugin()] - argv += ["--with-fpumodecheckplugin"] - try: - # External plugins (like nose-timer) - entrypoint_manager = EntryPointPluginManager() - entrypoint_manager.loadPlugins() - plugins += [p for p in entrypoint_manager.plugins] - except ImportError: - # Relies on pkg_resources, not a hard dependency - pass - - # add doctesting if required - doctest_argv = '--with-doctest' in argv - if doctests == False and doctest_argv: - doctests = True - plug = self._get_custom_doctester() - if plug is None: - # use standard doctesting - if doctests and not doctest_argv: - argv += ['--with-doctest'] - else: # custom doctesting - if doctest_argv: # in fact the unplugger would take care of this - argv.remove('--with-doctest') - plugins += [Unplugger('doctest'), plug] - if doctests: - argv += ['--with-' + plug.name] - return argv, plugins - - def test(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False, raise_warnings=None, - timer=False): - """ - Run tests for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the tests to run. This can be a string to pass to - the nosetests executable with the '-A' option, or one of several - special values. Special values are: - - * 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - * 'full' - fast (as above) and slow tests as in the - 'no -A' option to nosetests - this is the same as ''. - * None or '' - run all tests. - * attribute_identifier - string passed directly to nosetests as '-A'. - - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - doctests : bool, optional - If True, run doctests in module. Default is False. - coverage : bool, optional - If True, report coverage of NumPy code. Default is False. - (This requires the - `coverage module `_). - raise_warnings : None, str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of being shown once during the test execution. Valid strings are: - - * "develop" : equals ``(Warning,)`` - * "release" : equals ``()``, do not raise on any warnings. - timer : bool or int, optional - Timing of individual tests with ``nose-timer`` (which needs to be - installed). If True, time tests and report on all of them. - If an integer (say ``N``), report timing results for ``N`` slowest - tests. - - Returns - ------- - result : object - Returns the result of running the tests as a - ``nose.result.TextTestResult`` object. - - Notes - ----- - Each NumPy module exposes `test` in its namespace to run all tests for it. - For example, to run all tests for numpy.lib: - - >>> np.lib.test() #doctest: +SKIP - - Examples - -------- - >>> result = np.lib.test() #doctest: +SKIP - Running unit tests for numpy.lib - ... - Ran 976 tests in 3.933s - - OK - - >>> result.errors #doctest: +SKIP - [] - >>> result.knownfail #doctest: +SKIP - [] - """ - - # cap verbosity at 3 because nose becomes *very* verbose beyond that - verbose = min(verbose, 3) - - from . import utils - utils.verbose = verbose - - argv, plugins = self.prepare_test_args( - label, verbose, extra_argv, doctests, coverage, timer) - - if doctests: - print("Running unit tests and doctests for %s" % self.package_name) - else: - print("Running unit tests for %s" % self.package_name) - - self._show_system_info() - - # reset doctest state on every run - import doctest - doctest.master = None - - if raise_warnings is None: - raise_warnings = self.raise_warnings - - _warn_opts = dict(develop=(Warning,), - release=()) - if isinstance(raise_warnings, basestring): - raise_warnings = _warn_opts[raise_warnings] - - with suppress_warnings("location") as sup: - # Reset the warning filters to the default state, - # so that running the tests is more repeatable. - warnings.resetwarnings() - # Set all warnings to 'warn', this is because the default 'once' - # has the bad property of possibly shadowing later warnings. - warnings.filterwarnings('always') - # Force the requested warnings to raise - for warningtype in raise_warnings: - warnings.filterwarnings('error', category=warningtype) - # Filter out annoying import messages. - sup.filter(message='Not importing directory') - sup.filter(message="numpy.dtype size changed") - sup.filter(message="numpy.ufunc size changed") - sup.filter(category=np.ModuleDeprecationWarning) - # Filter out boolean '-' deprecation messages. This allows - # older versions of scipy to test without a flood of messages. - sup.filter(message=".*boolean negative.*") - sup.filter(message=".*boolean subtract.*") - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - with warnings.catch_warnings(): - warnings.simplefilter("always") - from ...distutils import cpuinfo - sup.filter(category=UserWarning, module=cpuinfo) - # See #7949: Filter out deprecation warnings due to the -3 flag to - # python 2 - if sys.version_info.major == 2 and sys.py3kwarning: - # This is very specific, so using the fragile module filter - # is fine - import threading - sup.filter(DeprecationWarning, - r"sys\.exc_clear\(\) not supported in 3\.x", - module=threading) - sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__") - sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__") - sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x") - sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x") - sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x") - # Filter out some deprecation warnings inside nose 1.3.7 when run - # on python 3.5b2. See - # https://github.com/nose-devs/nose/issues/929 - # Note: it is hard to filter based on module for sup (lineno could - # be implemented). - warnings.filterwarnings("ignore", message=".*getargspec.*", - category=DeprecationWarning, - module=r"nose\.") - - from .noseclasses import NumpyTestProgram - - t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) - - return t.result - - def bench(self, label='fast', verbose=1, extra_argv=None): - """ - Run benchmarks for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the benchmarks to run. This can be a string to pass to - the nosetests executable with the '-A' option, or one of several - special values. Special values are: - - * 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - * 'full' - fast (as above) and slow benchmarks as in the - 'no -A' option to nosetests - this is the same as ''. - * None or '' - run all tests. - * attribute_identifier - string passed directly to nosetests as '-A'. - - verbose : int, optional - Verbosity value for benchmark outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - success : bool - Returns True if running the benchmarks works, False if an error - occurred. - - Notes - ----- - Benchmarks are like tests, but have names starting with "bench" instead - of "test", and can be found under the "benchmarks" sub-directory of the - module. - - Each NumPy module exposes `bench` in its namespace to run all benchmarks - for it. - - Examples - -------- - >>> success = np.lib.bench() #doctest: +SKIP - Running benchmarks for numpy.lib - ... - using 562341 items: - unique: - 0.11 - unique1d: - 0.11 - ratio: 1.0 - nUnique: 56230 == 56230 - ... - OK - - >>> success #doctest: +SKIP - True - - """ - - print("Running benchmarks for %s" % self.package_name) - self._show_system_info() - - argv = self._test_argv(label, verbose, extra_argv) - argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] - - # import nose or make informative error - nose = import_nose() - - # get plugin to disable doctests - from .noseclasses import Unplugger - add_plugins = [Unplugger('doctest')] - - return nose.run(argv=argv, addplugins=add_plugins) - - -def _numpy_tester(): - if hasattr(np, "__version__") and ".dev0" in np.__version__: - mode = "develop" - else: - mode = "release" - return NoseTester(raise_warnings=mode, depth=1, - check_fpu_mode=True) diff --git a/venv/lib/python3.7/site-packages/numpy/testing/_private/parameterized.py b/venv/lib/python3.7/site-packages/numpy/testing/_private/parameterized.py deleted file mode 100644 index 489d8e0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/_private/parameterized.py +++ /dev/null @@ -1,489 +0,0 @@ -""" -tl;dr: all code code is licensed under simplified BSD, unless stated otherwise. - -Unless stated otherwise in the source files, all code is copyright 2010 David -Wolever . All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, -either expressed or implied, of David Wolever. - -""" -import re -import sys -import inspect -import warnings -from functools import wraps -from types import MethodType as MethodType -from collections import namedtuple - -try: - from collections import OrderedDict as MaybeOrderedDict -except ImportError: - MaybeOrderedDict = dict - -from unittest import TestCase - -PY2 = sys.version_info[0] == 2 - - -if PY2: - from types import InstanceType - lzip = zip - text_type = unicode - bytes_type = str - string_types = basestring, - def make_method(func, instance, type): - return MethodType(func, instance, type) -else: - # Python 3 doesn't have an InstanceType, so just use a dummy type. - class InstanceType(): - pass - lzip = lambda *a: list(zip(*a)) - text_type = str - string_types = str, - bytes_type = bytes - def make_method(func, instance, type): - if instance is None: - return func - return MethodType(func, instance) - -_param = namedtuple("param", "args kwargs") - -class param(_param): - """ Represents a single parameter to a test case. - - For example:: - - >>> p = param("foo", bar=16) - >>> p - param("foo", bar=16) - >>> p.args - ('foo', ) - >>> p.kwargs - {'bar': 16} - - Intended to be used as an argument to ``@parameterized``:: - - @parameterized([ - param("foo", bar=16), - ]) - def test_stuff(foo, bar=16): - pass - """ - - def __new__(cls, *args , **kwargs): - return _param.__new__(cls, args, kwargs) - - @classmethod - def explicit(cls, args=None, kwargs=None): - """ Creates a ``param`` by explicitly specifying ``args`` and - ``kwargs``:: - - >>> param.explicit([1,2,3]) - param(*(1, 2, 3)) - >>> param.explicit(kwargs={"foo": 42}) - param(*(), **{"foo": "42"}) - """ - args = args or () - kwargs = kwargs or {} - return cls(*args, **kwargs) - - @classmethod - def from_decorator(cls, args): - """ Returns an instance of ``param()`` for ``@parameterized`` argument - ``args``:: - - >>> param.from_decorator((42, )) - param(args=(42, ), kwargs={}) - >>> param.from_decorator("foo") - param(args=("foo", ), kwargs={}) - """ - if isinstance(args, param): - return args - elif isinstance(args, string_types): - args = (args, ) - try: - return cls(*args) - except TypeError as e: - if "after * must be" not in str(e): - raise - raise TypeError( - "Parameters must be tuples, but %r is not (hint: use '(%r, )')" - %(args, args), - ) - - def __repr__(self): - return "param(*%r, **%r)" %self - - -class QuietOrderedDict(MaybeOrderedDict): - """ When OrderedDict is available, use it to make sure that the kwargs in - doc strings are consistently ordered. """ - __str__ = dict.__str__ - __repr__ = dict.__repr__ - - -def parameterized_argument_value_pairs(func, p): - """Return tuples of parameterized arguments and their values. - - This is useful if you are writing your own doc_func - function and need to know the values for each parameter name:: - - >>> def func(a, foo=None, bar=42, **kwargs): pass - >>> p = param(1, foo=7, extra=99) - >>> parameterized_argument_value_pairs(func, p) - [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})] - - If the function's first argument is named ``self`` then it will be - ignored:: - - >>> def func(self, a): pass - >>> p = param(1) - >>> parameterized_argument_value_pairs(func, p) - [("a", 1)] - - Additionally, empty ``*args`` or ``**kwargs`` will be ignored:: - - >>> def func(foo, *args): pass - >>> p = param(1) - >>> parameterized_argument_value_pairs(func, p) - [("foo", 1)] - >>> p = param(1, 16) - >>> parameterized_argument_value_pairs(func, p) - [("foo", 1), ("*args", (16, ))] - """ - argspec = inspect.getargspec(func) - arg_offset = 1 if argspec.args[:1] == ["self"] else 0 - - named_args = argspec.args[arg_offset:] - - result = lzip(named_args, p.args) - named_args = argspec.args[len(result) + arg_offset:] - varargs = p.args[len(result):] - - result.extend([ - (name, p.kwargs.get(name, default)) - for (name, default) - in zip(named_args, argspec.defaults or []) - ]) - - seen_arg_names = {n for (n, _) in result} - keywords = QuietOrderedDict(sorted([ - (name, p.kwargs[name]) - for name in p.kwargs - if name not in seen_arg_names - ])) - - if varargs: - result.append(("*%s" %(argspec.varargs, ), tuple(varargs))) - - if keywords: - result.append(("**%s" %(argspec.keywords, ), keywords)) - - return result - -def short_repr(x, n=64): - """ A shortened repr of ``x`` which is guaranteed to be ``unicode``:: - - >>> short_repr("foo") - u"foo" - >>> short_repr("123456789", n=4) - u"12...89" - """ - - x_repr = repr(x) - if isinstance(x_repr, bytes_type): - try: - x_repr = text_type(x_repr, "utf-8") - except UnicodeDecodeError: - x_repr = text_type(x_repr, "latin1") - if len(x_repr) > n: - x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:] - return x_repr - -def default_doc_func(func, num, p): - if func.__doc__ is None: - return None - - all_args_with_values = parameterized_argument_value_pairs(func, p) - - # Assumes that the function passed is a bound method. - descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values] - - # The documentation might be a multiline string, so split it - # and just work with the first string, ignoring the period - # at the end if there is one. - first, nl, rest = func.__doc__.lstrip().partition("\n") - suffix = "" - if first.endswith("."): - suffix = "." - first = first[:-1] - args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs)) - return "".join([first.rstrip(), args, suffix, nl, rest]) - -def default_name_func(func, num, p): - base_name = func.__name__ - name_suffix = "_%s" %(num, ) - if len(p.args) > 0 and isinstance(p.args[0], string_types): - name_suffix += "_" + parameterized.to_safe_name(p.args[0]) - return base_name + name_suffix - - -# force nose for numpy purposes. -_test_runner_override = 'nose' -_test_runner_guess = False -_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"]) -_test_runner_aliases = { - "_pytest": "pytest", -} - -def set_test_runner(name): - global _test_runner_override - if name not in _test_runners: - raise TypeError( - "Invalid test runner: %r (must be one of: %s)" - %(name, ", ".join(_test_runners)), - ) - _test_runner_override = name - -def detect_runner(): - """ Guess which test runner we're using by traversing the stack and looking - for the first matching module. This *should* be reasonably safe, as - it's done during test disocvery where the test runner should be the - stack frame immediately outside. """ - if _test_runner_override is not None: - return _test_runner_override - global _test_runner_guess - if _test_runner_guess is False: - stack = inspect.stack() - for record in reversed(stack): - frame = record[0] - module = frame.f_globals.get("__name__").partition(".")[0] - if module in _test_runner_aliases: - module = _test_runner_aliases[module] - if module in _test_runners: - _test_runner_guess = module - break - if record[1].endswith("python2.6/unittest.py"): - _test_runner_guess = "unittest" - break - else: - _test_runner_guess = None - return _test_runner_guess - -class parameterized(object): - """ Parameterize a test case:: - - class TestInt(object): - @parameterized([ - ("A", 10), - ("F", 15), - param("10", 42, base=42) - ]) - def test_int(self, input, expected, base=16): - actual = int(input, base=base) - assert_equal(actual, expected) - - @parameterized([ - (2, 3, 5) - (3, 5, 8), - ]) - def test_add(a, b, expected): - assert_equal(a + b, expected) - """ - - def __init__(self, input, doc_func=None): - self.get_input = self.input_as_callable(input) - self.doc_func = doc_func or default_doc_func - - def __call__(self, test_func): - self.assert_not_in_testcase_subclass() - - @wraps(test_func) - def wrapper(test_self=None): - test_cls = test_self and type(test_self) - if test_self is not None: - if issubclass(test_cls, InstanceType): - raise TypeError(( - "@parameterized can't be used with old-style classes, but " - "%r has an old-style class. Consider using a new-style " - "class, or '@parameterized.expand' " - "(see http://stackoverflow.com/q/54867/71522 for more " - "information on old-style classes)." - ) %(test_self, )) - - original_doc = wrapper.__doc__ - for num, args in enumerate(wrapper.parameterized_input): - p = param.from_decorator(args) - unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p) - try: - wrapper.__doc__ = nose_tuple[0].__doc__ - # Nose uses `getattr(instance, test_func.__name__)` to get - # a method bound to the test instance (as opposed to a - # method bound to the instance of the class created when - # tests were being enumerated). Set a value here to make - # sure nose can get the correct test method. - if test_self is not None: - setattr(test_cls, test_func.__name__, unbound_func) - yield nose_tuple - finally: - if test_self is not None: - delattr(test_cls, test_func.__name__) - wrapper.__doc__ = original_doc - wrapper.parameterized_input = self.get_input() - wrapper.parameterized_func = test_func - test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, ) - return wrapper - - def param_as_nose_tuple(self, test_self, func, num, p): - nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1])) - nose_func.__doc__ = self.doc_func(func, num, p) - # Track the unbound function because we need to setattr the unbound - # function onto the class for nose to work (see comments above), and - # Python 3 doesn't let us pull the function out of a bound method. - unbound_func = nose_func - if test_self is not None: - # Under nose on Py2 we need to return an unbound method to make - # sure that the `self` in the method is properly shared with the - # `self` used in `setUp` and `tearDown`. But only there. Everyone - # else needs a bound method. - func_self = ( - None if PY2 and detect_runner() == "nose" else - test_self - ) - nose_func = make_method(nose_func, func_self, type(test_self)) - return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, ) - - def assert_not_in_testcase_subclass(self): - parent_classes = self._terrible_magic_get_defining_classes() - if any(issubclass(cls, TestCase) for cls in parent_classes): - raise Exception("Warning: '@parameterized' tests won't work " - "inside subclasses of 'TestCase' - use " - "'@parameterized.expand' instead.") - - def _terrible_magic_get_defining_classes(self): - """ Returns the set of parent classes of the class currently being defined. - Will likely only work if called from the ``parameterized`` decorator. - This function is entirely @brandon_rhodes's fault, as he suggested - the implementation: http://stackoverflow.com/a/8793684/71522 - """ - stack = inspect.stack() - if len(stack) <= 4: - return [] - frame = stack[4] - code_context = frame[4] and frame[4][0].strip() - if not (code_context and code_context.startswith("class ")): - return [] - _, _, parents = code_context.partition("(") - parents, _, _ = parents.partition(")") - return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals) - - @classmethod - def input_as_callable(cls, input): - if callable(input): - return lambda: cls.check_input_values(input()) - input_values = cls.check_input_values(input) - return lambda: input_values - - @classmethod - def check_input_values(cls, input_values): - # Explicitly convert non-list inputs to a list so that: - # 1. A helpful exception will be raised if they aren't iterable, and - # 2. Generators are unwrapped exactly once (otherwise `nosetests - # --processes=n` has issues; see: - # https://github.com/wolever/nose-parameterized/pull/31) - if not isinstance(input_values, list): - input_values = list(input_values) - return [ param.from_decorator(p) for p in input_values ] - - @classmethod - def expand(cls, input, name_func=None, doc_func=None, **legacy): - """ A "brute force" method of parameterizing test cases. Creates new - test cases and injects them into the namespace that the wrapped - function is being defined in. Useful for parameterizing tests in - subclasses of 'UnitTest', where Nose test generators don't work. - - >>> @parameterized.expand([("foo", 1, 2)]) - ... def test_add1(name, input, expected): - ... actual = add1(input) - ... assert_equal(actual, expected) - ... - >>> locals() - ... 'test_add1_foo_0': ... - >>> - """ - - if "testcase_func_name" in legacy: - warnings.warn("testcase_func_name= is deprecated; use name_func=", - DeprecationWarning, stacklevel=2) - if not name_func: - name_func = legacy["testcase_func_name"] - - if "testcase_func_doc" in legacy: - warnings.warn("testcase_func_doc= is deprecated; use doc_func=", - DeprecationWarning, stacklevel=2) - if not doc_func: - doc_func = legacy["testcase_func_doc"] - - doc_func = doc_func or default_doc_func - name_func = name_func or default_name_func - - def parameterized_expand_wrapper(f, instance=None): - stack = inspect.stack() - frame = stack[1] - frame_locals = frame[0].f_locals - - parameters = cls.input_as_callable(input)() - for num, p in enumerate(parameters): - name = name_func(f, num, p) - frame_locals[name] = cls.param_as_standalone_func(p, f, name) - frame_locals[name].__doc__ = doc_func(f, num, p) - - f.__test__ = False - return parameterized_expand_wrapper - - @classmethod - def param_as_standalone_func(cls, p, func, name): - @wraps(func) - def standalone_func(*a): - return func(*(a + p.args), **p.kwargs) - standalone_func.__name__ = name - - # place_as is used by py.test to determine what source file should be - # used for this test. - standalone_func.place_as = func - - # Remove __wrapped__ because py.test will try to look at __wrapped__ - # to determine which parameters should be used with this test case, - # and obviously we don't need it to do any parameterization. - try: - del standalone_func.__wrapped__ - except AttributeError: - pass - return standalone_func - - @classmethod - def to_safe_name(cls, s): - return str(re.sub("[^a-zA-Z0-9_]+", "_", s)) diff --git a/venv/lib/python3.7/site-packages/numpy/testing/_private/utils.py b/venv/lib/python3.7/site-packages/numpy/testing/_private/utils.py deleted file mode 100644 index d00538c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/_private/utils.py +++ /dev/null @@ -1,2499 +0,0 @@ -""" -Utility function to facilitate testing. - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import platform -import re -import gc -import operator -import warnings -from functools import partial, wraps -import shutil -import contextlib -from tempfile import mkdtemp, mkstemp -from unittest.case import SkipTest -from warnings import WarningMessage -import pprint - -from numpy.core import( - intp, float32, empty, arange, array_repr, ndarray, isnat, array) -import numpy.__config__ - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -__all__ = [ - 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', - 'assert_array_equal', 'assert_array_less', 'assert_string_equal', - 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', - 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', - 'raises', 'rundocs', 'runstring', 'verbose', 'measure', - 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', - 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', - 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', - 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', - 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', - '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles', - 'break_cycles', 'HAS_LAPACK64' - ] - - -class KnownFailureException(Exception): - '''Raise this exception to mark a test as a known failing test.''' - pass - - -KnownFailureTest = KnownFailureException # backwards compat -verbose = 0 - -IS_PYPY = platform.python_implementation() == 'PyPy' -HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None -HAS_LAPACK64 = hasattr(numpy.__config__, 'lapack_ilp64_opt_info') - - -def import_nose(): - """ Import nose only when needed. - """ - nose_is_good = True - minimum_nose_version = (1, 0, 0) - try: - import nose - except ImportError: - nose_is_good = False - else: - if nose.__versioninfo__ < minimum_nose_version: - nose_is_good = False - - if not nose_is_good: - msg = ('Need nose >= %d.%d.%d for tests - see ' - 'https://nose.readthedocs.io' % - minimum_nose_version) - raise ImportError(msg) - - return nose - - -def assert_(val, msg=''): - """ - Assert that works in release mode. - Accepts callable msg to allow deferring evaluation until failure. - - The Python built-in ``assert`` does not work when executing code in - optimized mode (the ``-O`` flag) - no byte-code is generated for it. - - For documentation on usage, refer to the Python documentation. - - """ - __tracebackhide__ = True # Hide traceback for py.test - if not val: - try: - smsg = msg() - except TypeError: - smsg = msg - raise AssertionError(smsg) - - -def gisnan(x): - """like isnan, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isnan and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isnan - st = isnan(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isnan not supported for this type") - return st - - -def gisfinite(x): - """like isfinite, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isfinite and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isfinite, errstate - with errstate(invalid='ignore'): - st = isfinite(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isfinite not supported for this type") - return st - - -def gisinf(x): - """like isinf, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isinf and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isinf, errstate - with errstate(invalid='ignore'): - st = isinf(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isinf not supported for this type") - return st - - -if os.name == 'nt': - # Code "stolen" from enthought/debug/memusage.py - def GetPerformanceAttributes(object, counter, instance=None, - inum=-1, format=None, machine=None): - # NOTE: Many counters require 2 samples to give accurate results, - # including "% Processor Time" (as by definition, at any instant, a - # thread's CPU usage is either 0 or 100). To read counters like this, - # you should copy this function, but keep the counter open, and call - # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) - # My older explanation for this was that the "AddCounter" process forced - # the CPU to 100%, but the above makes more sense :) - import win32pdh - if format is None: - format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter)) - hq = win32pdh.OpenQuery() - try: - hc = win32pdh.AddCounter(hq, path) - try: - win32pdh.CollectQueryData(hq) - type, val = win32pdh.GetFormattedCounterValue(hc, format) - return val - finally: - win32pdh.RemoveCounter(hc) - finally: - win32pdh.CloseQuery(hq) - - def memusage(processName="python", instance=0): - # from win32pdhutil, part of the win32all package - import win32pdh - return GetPerformanceAttributes("Process", "Virtual Bytes", - processName, instance, - win32pdh.PDH_FMT_LONG, None) -elif sys.platform[:5] == 'linux': - - def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())): - """ - Return virtual memory size in bytes of the running python. - - """ - try: - f = open(_proc_pid_stat, 'r') - l = f.readline().split(' ') - f.close() - return int(l[22]) - except Exception: - return -else: - def memusage(): - """ - Return memory usage of running python. [Not implemented] - - """ - raise NotImplementedError - - -if sys.platform[:5] == 'linux': - def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()), - _load_time=[]): - """ - Return number of jiffies elapsed. - - Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. See man 5 proc. - - """ - import time - if not _load_time: - _load_time.append(time.time()) - try: - f = open(_proc_pid_stat, 'r') - l = f.readline().split(' ') - f.close() - return int(l[13]) - except Exception: - return int(100*(time.time()-_load_time[0])) -else: - # os.getpid is not in all platforms available. - # Using time is safe but inaccurate, especially when process - # was suspended or sleeping. - def jiffies(_load_time=[]): - """ - Return number of jiffies elapsed. - - Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. See man 5 proc. - - """ - import time - if not _load_time: - _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) - - -def build_err_msg(arrays, err_msg, header='Items are not equal:', - verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): - msg = ['\n' + header] - if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): - msg = [msg[0] + ' ' + err_msg] - else: - msg.append(err_msg) - if verbose: - for i, a in enumerate(arrays): - - if isinstance(a, ndarray): - # precision argument is only needed if the objects are ndarrays - r_func = partial(array_repr, precision=precision) - else: - r_func = repr - - try: - r = r_func(a) - except Exception as exc: - r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc) - if r.count('\n') > 3: - r = '\n'.join(r.splitlines()[:3]) - r += '...' - msg.append(' %s: %s' % (names[i], r)) - return '\n'.join(msg) - - -def assert_equal(actual, desired, err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal. - - Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), - check that all elements of these objects are equal. An exception is raised - at the first conflicting values. - - When one of `actual` and `desired` is a scalar and the other is array_like, - the function checks that each element of the array_like object is equal to - the scalar. - - This function handles NaN comparisons as if NaN was a "normal" number. - That is, no assertion is raised if both objects have NaNs in the same - positions. This is in contrast to the IEEE standard on NaNs, which says - that NaN compared to anything must return False. - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal. - - Examples - -------- - >>> np.testing.assert_equal([4,5], [4,6]) - Traceback (most recent call last): - ... - AssertionError: - Items are not equal: - item=1 - ACTUAL: 5 - DESIRED: 6 - - The following comparison does not raise an exception. There are NaNs - in the inputs, but they are in the same positions. - - >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan]) - - """ - __tracebackhide__ = True # Hide traceback for py.test - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): - if k not in actual: - raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) - return - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - assert_equal(len(actual), len(desired), err_msg, verbose) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) - return - from numpy.core import ndarray, isscalar, signbit - from numpy.lib import iscomplexobj, real, imag - if isinstance(actual, ndarray) or isinstance(desired, ndarray): - return assert_array_equal(actual, desired, err_msg, verbose) - msg = build_err_msg([actual, desired], err_msg, verbose=verbose) - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except (ValueError, TypeError): - usecomplex = False - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_equal(actualr, desiredr) - assert_equal(actuali, desiredi) - except AssertionError: - raise AssertionError(msg) - - # isscalar test to check cases such as [np.nan] != np.nan - if isscalar(desired) != isscalar(actual): - raise AssertionError(msg) - - try: - isdesnat = isnat(desired) - isactnat = isnat(actual) - dtypes_match = array(desired).dtype.type == array(actual).dtype.type - if isdesnat and isactnat: - # If both are NaT (and have the same dtype -- datetime or - # timedelta) they are considered equal. - if dtypes_match: - return - else: - raise AssertionError(msg) - - except (TypeError, ValueError, NotImplementedError): - pass - - # Inf/nan/negative zero handling - try: - isdesnan = gisnan(desired) - isactnan = gisnan(actual) - if isdesnan and isactnan: - return # both nan, so equal - - # handle signed zero specially for floats - array_actual = array(actual) - array_desired = array(desired) - if (array_actual.dtype.char in 'Mm' or - array_desired.dtype.char in 'Mm'): - # version 1.18 - # until this version, gisnan failed for datetime64 and timedelta64. - # Now it succeeds but comparison to scalar with a different type - # emits a DeprecationWarning. - # Avoid that by skipping the next check - raise NotImplementedError('cannot compare to a scalar ' - 'with a different type') - - if desired == 0 and actual == 0: - if not signbit(desired) == signbit(actual): - raise AssertionError(msg) - - except (TypeError, ValueError, NotImplementedError): - pass - - try: - # Explicitly use __eq__ for comparison, gh-2552 - if not (desired == actual): - raise AssertionError(msg) - - except (DeprecationWarning, FutureWarning) as e: - # this handles the case when the two types are not even comparable - if 'elementwise == comparison' in e.args[0]: - raise AssertionError(msg) - else: - raise - - -def print_assert_equal(test_string, actual, desired): - """ - Test if two objects are equal, and print an error message if test fails. - - The test is performed with ``actual == desired``. - - Parameters - ---------- - test_string : str - The message supplied to AssertionError. - actual : object - The object to test for equality against `desired`. - desired : object - The expected result. - - Examples - -------- - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) - Traceback (most recent call last): - ... - AssertionError: Test XYZ of func xyz failed - ACTUAL: - [0, 1] - DESIRED: - [0, 2] - - """ - __tracebackhide__ = True # Hide traceback for py.test - import pprint - - if not (actual == desired): - msg = StringIO() - msg.write(test_string) - msg.write(' failed\nACTUAL: \n') - pprint.pprint(actual, msg) - msg.write('DESIRED: \n') - pprint.pprint(desired, msg) - raise AssertionError(msg.getvalue()) - - -def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): - """ - Raises an AssertionError if two items are not equal up to desired - precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test verifies that the elements of ``actual`` and ``desired`` satisfy. - - ``abs(desired-actual) < 1.5 * 10**(-decimal)`` - - That is a looser test than originally documented, but agrees with what the - actual implementation in `assert_array_almost_equal` did up to rounding - vagaries. An exception is raised at conflicting values. For ndarrays this - delegates to assert_array_almost_equal - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - decimal : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> import numpy.testing as npt - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not almost equal to 10 decimals - ACTUAL: 2.3333333333333 - DESIRED: 2.33333334 - - >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), - ... np.array([1.0,2.33333334]), decimal=9) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not almost equal to 9 decimals - Mismatch: 50% - Max absolute difference: 6.66669964e-09 - Max relative difference: 2.85715698e-09 - x: array([1. , 2.333333333]) - y: array([1. , 2.33333334]) - - """ - __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import ndarray - from numpy.lib import iscomplexobj, real, imag - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except ValueError: - usecomplex = False - - def _build_err_msg(): - header = ('Arrays are not almost equal to %d decimals' % decimal) - return build_err_msg([actual, desired], err_msg, verbose=verbose, - header=header) - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_almost_equal(actualr, desiredr, decimal=decimal) - assert_almost_equal(actuali, desiredi, decimal=decimal) - except AssertionError: - raise AssertionError(_build_err_msg()) - - if isinstance(actual, (ndarray, tuple, list)) \ - or isinstance(desired, (ndarray, tuple, list)): - return assert_array_almost_equal(actual, desired, decimal, err_msg) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(_build_err_msg()) - else: - if not desired == actual: - raise AssertionError(_build_err_msg()) - return - except (NotImplementedError, TypeError): - pass - if abs(desired - actual) >= 1.5 * 10.0**(-decimal): - raise AssertionError(_build_err_msg()) - - -def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): - """ - Raises an AssertionError if two items are not equal up to significant - digits. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - Given two numbers, check that they are approximately equal. - Approximately equal is defined as the number of significant digits - that agree. - - Parameters - ---------- - actual : scalar - The object to check. - desired : scalar - The expected object. - significant : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, - ... significant=8) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, - ... significant=8) - Traceback (most recent call last): - ... - AssertionError: - Items are not equal to 8 significant digits: - ACTUAL: 1.234567e-21 - DESIRED: 1.2345672e-21 - - the evaluated condition that raises the exception is - - >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) - True - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - - (actual, desired) = map(float, (actual, desired)) - if desired == actual: - return - # Normalized the numbers to be in range (-10.0,10.0) - # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) - with np.errstate(invalid='ignore'): - scale = 0.5*(np.abs(desired) + np.abs(actual)) - scale = np.power(10, np.floor(np.log10(scale))) - try: - sc_desired = desired/scale - except ZeroDivisionError: - sc_desired = 0.0 - try: - sc_actual = actual/scale - except ZeroDivisionError: - sc_actual = 0.0 - msg = build_err_msg( - [actual, desired], err_msg, - header='Items are not equal to %d significant digits:' % significant, - verbose=verbose) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - except (TypeError, NotImplementedError): - pass - if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): - raise AssertionError(msg) - - -def assert_array_compare(comparison, x, y, err_msg='', verbose=True, - header='', precision=6, equal_nan=True, - equal_inf=True): - __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_ - - x = array(x, copy=False, subok=True) - y = array(y, copy=False, subok=True) - - # original array for output formatting - ox, oy = x, y - - def isnumber(x): - return x.dtype.char in '?bhilqpBHILQPefdgFDG' - - def istime(x): - return x.dtype.char in "Mm" - - def func_assert_same_pos(x, y, func=isnan, hasval='nan'): - """Handling nan/inf. - - Combine results of running func on x and y, checking that they are True - at the same locations. - - """ - x_id = func(x) - y_id = func(y) - # We include work-arounds here to handle three types of slightly - # pathological ndarray subclasses: - # (1) all() on `masked` array scalars can return masked arrays, so we - # use != True - # (2) __eq__ on some ndarray subclasses returns Python booleans - # instead of element-wise comparisons, so we cast to bool_() and - # use isinstance(..., bool) checks - # (3) subclasses with bare-bones __array_function__ implementations may - # not implement np.all(), so favor using the .all() method - # We are not committed to supporting such subclasses, but it's nice to - # support them if possible. - if bool_(x_id == y_id).all() != True: - msg = build_err_msg([x, y], - err_msg + '\nx and y %s location mismatch:' - % (hasval), verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise AssertionError(msg) - # If there is a scalar, then here we know the array has the same - # flag as it everywhere, so we should return the scalar flag. - if isinstance(x_id, bool) or x_id.ndim == 0: - return bool_(x_id) - elif isinstance(x_id, bool) or y_id.ndim == 0: - return bool_(y_id) - else: - return y_id - - try: - cond = (x.shape == () or y.shape == ()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + '\n(shapes %s, %s mismatch)' % (x.shape, - y.shape), - verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise AssertionError(msg) - - flagged = bool_(False) - if isnumber(x) and isnumber(y): - if equal_nan: - flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') - - if equal_inf: - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == +inf, - hasval='+inf') - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == -inf, - hasval='-inf') - - elif istime(x) and istime(y): - # If one is datetime64 and the other timedelta64 there is no point - if equal_nan and x.dtype.type == y.dtype.type: - flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT") - - if flagged.ndim > 0: - x, y = x[~flagged], y[~flagged] - # Only do the comparison if actual values are left - if x.size == 0: - return - elif flagged: - # no sense doing comparison if everything is flagged. - return - - val = comparison(x, y) - - if isinstance(val, bool): - cond = val - reduced = array([val]) - else: - reduced = val.ravel() - cond = reduced.all() - - # The below comparison is a hack to ensure that fully masked - # results, for which val.ravel().all() returns np.ma.masked, - # do not trigger a failure (np.ma.masked != True evaluates as - # np.ma.masked, which is falsy). - if cond != True: - n_mismatch = reduced.size - reduced.sum(dtype=intp) - n_elements = flagged.size if flagged.ndim != 0 else reduced.size - percent_mismatch = 100 * n_mismatch / n_elements - remarks = [ - 'Mismatched elements: {} / {} ({:.3g}%)'.format( - n_mismatch, n_elements, percent_mismatch)] - - with errstate(invalid='ignore', divide='ignore'): - # ignore errors for non-numeric types - with contextlib.suppress(TypeError): - error = abs(x - y) - max_abs_error = max(error) - if getattr(error, 'dtype', object_) == object_: - remarks.append('Max absolute difference: ' - + str(max_abs_error)) - else: - remarks.append('Max absolute difference: ' - + array2string(max_abs_error)) - - # note: this definition of relative error matches that one - # used by assert_allclose (found in np.isclose) - # Filter values where the divisor would be zero - nonzero = bool_(y != 0) - if all(~nonzero): - max_rel_error = array(inf) - else: - max_rel_error = max(error[nonzero] / abs(y[nonzero])) - if getattr(error, 'dtype', object_) == object_: - remarks.append('Max relative difference: ' - + str(max_rel_error)) - else: - remarks.append('Max relative difference: ' - + array2string(max_rel_error)) - - err_msg += '\n' + '\n'.join(remarks) - msg = build_err_msg([ox, oy], err_msg, - verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise AssertionError(msg) - except ValueError: - import traceback - efmt = traceback.format_exc() - header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) - - msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise ValueError(msg) - - -def assert_array_equal(x, y, err_msg='', verbose=True): - """ - Raises an AssertionError if two array_like objects are not equal. - - Given two array_like objects, check that the shape is equal and all - elements of these objects are equal (but see the Notes for the special - handling of a scalar). An exception is raised at shape mismatch or - conflicting values. In contrast to the standard usage in numpy, NaNs - are compared like numbers, no assertion is raised if both objects have - NaNs in the same positions. - - The usual caution for verifying equality with floating point numbers is - advised. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Notes - ----- - When one of `x` and `y` is a scalar and the other is array_like, the - function checks that each element of the array_like object is equal to - the scalar. - - Examples - -------- - The first assert does not raise an exception: - - >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], - ... [np.exp(0),2.33333, np.nan]) - - Assert fails with numerical imprecision with floats: - - >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan]) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not equal - Mismatch: 33.3% - Max absolute difference: 4.4408921e-16 - Max relative difference: 1.41357986e-16 - x: array([1. , 3.141593, nan]) - y: array([1. , 3.141593, nan]) - - Use `assert_allclose` or one of the nulp (number of floating point values) - functions for these cases instead: - - >>> np.testing.assert_allclose([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan], - ... rtol=1e-10, atol=0) - - As mentioned in the Notes section, `assert_array_equal` has special - handling for scalars. Here the test checks that each value in `x` is 3: - - >>> x = np.full((2, 5), fill_value=3) - >>> np.testing.assert_array_equal(x, 3) - - """ - __tracebackhide__ = True # Hide traceback for py.test - assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, - verbose=verbose, header='Arrays are not equal') - - -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal up to desired - precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test verifies identical shapes and that the elements of ``actual`` and - ``desired`` satisfy. - - ``abs(desired-actual) < 1.5 * 10**(-decimal)`` - - That is a looser test than originally documented, but agrees with what the - actual implementation did up to rounding vagaries. An exception is raised - at shape mismatch or conflicting values. In contrast to the standard usage - in numpy, NaNs are compared like numbers, no assertion is raised if both - objects have NaNs in the same positions. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - decimal : int, optional - Desired precision, default is 6. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - the first assert does not raise an exception - - >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], - ... [1.0,2.333,np.nan]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33339,np.nan], decimal=5) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not almost equal to 5 decimals - Mismatch: 33.3% - Max absolute difference: 6.e-05 - Max relative difference: 2.57136612e-05 - x: array([1. , 2.33333, nan]) - y: array([1. , 2.33339, nan]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33333, 5], decimal=5) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not almost equal to 5 decimals - x and y nan location mismatch: - x: array([1. , 2.33333, nan]) - y: array([1. , 2.33333, 5. ]) - - """ - __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import number, float_, result_type, array - from numpy.core.numerictypes import issubdtype - from numpy.core.fromnumeric import any as npany - - def compare(x, y): - try: - if npany(gisinf(x)) or npany( gisinf(y)): - xinfid = gisinf(x) - yinfid = gisinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - - # make sure y is an inexact type to avoid abs(MIN_INT); will cause - # casting of x later. - dtype = result_type(y, 1.) - y = array(y, dtype=dtype, copy=False, subok=True) - z = abs(x - y) - - if not issubdtype(z.dtype, number): - z = z.astype(float_) # handle object arrays - - return z < 1.5 * 10.0**(-decimal) - - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header=('Arrays are not almost equal to %d decimals' % decimal), - precision=decimal) - - -def assert_array_less(x, y, err_msg='', verbose=True): - """ - Raises an AssertionError if two array_like objects are not ordered by less - than. - - Given two array_like objects, check that the shape is equal and all - elements of the first object are strictly smaller than those of the - second object. An exception is raised at shape mismatch or incorrectly - ordered values. Shape mismatch does not raise if an object has zero - dimension. In contrast to the standard usage in numpy, NaNs are - compared, no assertion is raised if both objects have NaNs in the same - positions. - - - - Parameters - ---------- - x : array_like - The smaller object to check. - y : array_like - The larger object to compare. - err_msg : string - The error message to be printed in case of failure. - verbose : bool - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_array_equal: tests objects for equality - assert_array_almost_equal: test objects for equality up to precision - - - - Examples - -------- - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not less-ordered - Mismatch: 33.3% - Max absolute difference: 1. - Max relative difference: 0.5 - x: array([ 1., 1., nan]) - y: array([ 1., 2., nan]) - - >>> np.testing.assert_array_less([1.0, 4.0], 3) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not less-ordered - Mismatch: 50% - Max absolute difference: 2. - Max relative difference: 0.66666667 - x: array([1., 4.]) - y: array(3) - - >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) - Traceback (most recent call last): - ... - AssertionError: - Arrays are not less-ordered - (shapes (3,), (1,) mismatch) - x: array([1., 2., 3.]) - y: array([4]) - - """ - __tracebackhide__ = True # Hide traceback for py.test - assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, - verbose=verbose, - header='Arrays are not less-ordered', - equal_inf=False) - - -def runstring(astr, dict): - exec(astr, dict) - - -def assert_string_equal(actual, desired): - """ - Test if two strings are equal. - - If the given strings are equal, `assert_string_equal` does nothing. - If they are not equal, an AssertionError is raised, and the diff - between the strings is shown. - - Parameters - ---------- - actual : str - The string to test for equality against the expected string. - desired : str - The expected string. - - Examples - -------- - >>> np.testing.assert_string_equal('abc', 'abc') - >>> np.testing.assert_string_equal('abc', 'abcd') - Traceback (most recent call last): - File "", line 1, in - ... - AssertionError: Differences in strings: - - abc+ abcd? + - - """ - # delay import of difflib to reduce startup time - __tracebackhide__ = True # Hide traceback for py.test - import difflib - - if not isinstance(actual, str): - raise AssertionError(repr(type(actual))) - if not isinstance(desired, str): - raise AssertionError(repr(type(desired))) - if desired == actual: - return - - diff = list(difflib.Differ().compare(actual.splitlines(True), desired.splitlines(True))) - diff_list = [] - while diff: - d1 = diff.pop(0) - if d1.startswith(' '): - continue - if d1.startswith('- '): - l = [d1] - d2 = diff.pop(0) - if d2.startswith('? '): - l.append(d2) - d2 = diff.pop(0) - if not d2.startswith('+ '): - raise AssertionError(repr(d2)) - l.append(d2) - if diff: - d3 = diff.pop(0) - if d3.startswith('? '): - l.append(d3) - else: - diff.insert(0, d3) - if d2[2:] == d1[2:]: - continue - diff_list.extend(l) - continue - raise AssertionError(repr(d1)) - if not diff_list: - return - msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() - if actual != desired: - raise AssertionError(msg) - - -def rundocs(filename=None, raise_on_error=True): - """ - Run doctests found in the given file. - - By default `rundocs` raises an AssertionError on failure. - - Parameters - ---------- - filename : str - The path to the file for which the doctests are run. - raise_on_error : bool - Whether to raise an AssertionError when a doctest fails. Default is - True. - - Notes - ----- - The doctests can be run by the user/developer by adding the ``doctests`` - argument to the ``test()`` call. For example, to run all tests (including - doctests) for `numpy.lib`: - - >>> np.lib.test(doctests=True) # doctest: +SKIP - """ - from numpy.compat import npy_load_module - import doctest - if filename is None: - f = sys._getframe(1) - filename = f.f_globals['__file__'] - name = os.path.splitext(os.path.basename(filename))[0] - m = npy_load_module(name, filename) - - tests = doctest.DocTestFinder().find(m) - runner = doctest.DocTestRunner(verbose=False) - - msg = [] - if raise_on_error: - out = lambda s: msg.append(s) - else: - out = None - - for test in tests: - runner.run(test, out=out) - - if runner.failures > 0 and raise_on_error: - raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) - - -def raises(*args): - """Decorator to check for raised exceptions. - - The decorated test function must raise one of the passed exceptions to - pass. If you want to test many assertions about exceptions in a single - test, you may want to use `assert_raises` instead. - - .. warning:: - This decorator is nose specific, do not use it if you are using a - different test framework. - - Parameters - ---------- - args : exceptions - The test passes if any of the passed exceptions is raised. - - Raises - ------ - AssertionError - - Examples - -------- - - Usage:: - - @raises(TypeError, ValueError) - def test_raises_type_error(): - raise TypeError("This test passes") - - @raises(Exception) - def test_that_fails_by_passing(): - pass - - """ - nose = import_nose() - return nose.tools.raises(*args) - -# -# assert_raises and assert_raises_regex are taken from unittest. -# -import unittest - - -class _Dummy(unittest.TestCase): - def nop(self): - pass - -_d = _Dummy('nop') - -def assert_raises(*args, **kwargs): - """ - assert_raises(exception_class, callable, *args, **kwargs) - assert_raises(exception_class) - - Fail unless an exception of class exception_class is thrown - by callable when invoked with arguments args and keyword - arguments kwargs. If a different type of exception is - thrown, it will not be caught, and the test case will be - deemed to have suffered an error, exactly as for an - unexpected exception. - - Alternatively, `assert_raises` can be used as a context manager: - - >>> from numpy.testing import assert_raises - >>> with assert_raises(ZeroDivisionError): - ... 1 / 0 - - is equivalent to - - >>> def div(x, y): - ... return x / y - >>> assert_raises(ZeroDivisionError, div, 1, 0) - - """ - __tracebackhide__ = True # Hide traceback for py.test - return _d.assertRaises(*args,**kwargs) - - -def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): - """ - assert_raises_regex(exception_class, expected_regexp, callable, *args, - **kwargs) - assert_raises_regex(exception_class, expected_regexp) - - Fail unless an exception of class exception_class and with message that - matches expected_regexp is thrown by callable when invoked with arguments - args and keyword arguments kwargs. - - Alternatively, can be used as a context manager like `assert_raises`. - - Name of this function adheres to Python 3.2+ reference, but should work in - all versions down to 2.6. - - Notes - ----- - .. versionadded:: 1.9.0 - - """ - __tracebackhide__ = True # Hide traceback for py.test - - if sys.version_info.major >= 3: - funcname = _d.assertRaisesRegex - else: - # Only present in Python 2.7, missing from unittest in 2.6 - funcname = _d.assertRaisesRegexp - - return funcname(exception_class, expected_regexp, *args, **kwargs) - - -def decorate_methods(cls, decorator, testmatch=None): - """ - Apply a decorator to all methods in a class matching a regular expression. - - The given decorator is applied to all public methods of `cls` that are - matched by the regular expression `testmatch` - (``testmatch.search(methodname)``). Methods that are private, i.e. start - with an underscore, are ignored. - - Parameters - ---------- - cls : class - Class whose methods to decorate. - decorator : function - Decorator to apply to methods - testmatch : compiled regexp or str, optional - The regular expression. Default value is None, in which case the - nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) - is used. - If `testmatch` is a string, it is compiled to a regular expression - first. - - """ - if testmatch is None: - testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) - else: - testmatch = re.compile(testmatch) - cls_attr = cls.__dict__ - - # delayed import to reduce startup time - from inspect import isfunction - - methods = [_m for _m in cls_attr.values() if isfunction(_m)] - for function in methods: - try: - if hasattr(function, 'compat_func_name'): - funcname = function.compat_func_name - else: - funcname = function.__name__ - except AttributeError: - # not a function - continue - if testmatch.search(funcname) and not funcname.startswith('_'): - setattr(cls, funcname, decorator(function)) - return - - -def measure(code_str, times=1, label=None): - """ - Return elapsed time for executing code in the namespace of the caller. - - The supplied code string is compiled with the Python builtin ``compile``. - The precision of the timing is 10 milli-seconds. If the code will execute - fast on this timescale, it can be executed many times to get reasonable - timing accuracy. - - Parameters - ---------- - code_str : str - The code to be timed. - times : int, optional - The number of times the code is executed. Default is 1. The code is - only compiled once. - label : str, optional - A label to identify `code_str` with. This is passed into ``compile`` - as the second argument (for run-time error messages). - - Returns - ------- - elapsed : float - Total elapsed time in seconds for executing `code_str` `times` times. - - Examples - -------- - >>> times = 10 - >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times) - >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP - Time for a single execution : 0.005 s - - """ - frame = sys._getframe(1) - locs, globs = frame.f_locals, frame.f_globals - - code = compile(code_str, - 'Test name: %s ' % label, - 'exec') - i = 0 - elapsed = jiffies() - while i < times: - i += 1 - exec(code, globs, locs) - elapsed = jiffies() - elapsed - return 0.01*elapsed - - -def _assert_valid_refcount(op): - """ - Check that ufuncs don't mishandle refcount of object `1`. - Used in a few regression tests. - """ - if not HAS_REFCOUNT: - return True - import numpy as np, gc - - b = np.arange(100*100).reshape(100, 100) - c = b - i = 1 - - gc.disable() - try: - rc = sys.getrefcount(i) - for j in range(15): - d = op(b, c) - assert_(sys.getrefcount(i) >= rc) - finally: - gc.enable() - del d # for pyflakes - - -def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, - err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal up to desired - tolerance. - - The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note - that ``allclose`` has different default values). It compares the difference - between `actual` and `desired` to ``atol + rtol * abs(desired)``. - - .. versionadded:: 1.5.0 - - Parameters - ---------- - actual : array_like - Array obtained. - desired : array_like - Array desired. - rtol : float, optional - Relative tolerance. - atol : float, optional - Absolute tolerance. - equal_nan : bool, optional. - If True, NaNs will compare equal. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_array_almost_equal_nulp, assert_array_max_ulp - - Examples - -------- - >>> x = [1e-5, 1e-3, 1e-1] - >>> y = np.arccos(np.cos(x)) - >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - - def compare(x, y): - return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, - equal_nan=equal_nan) - - actual, desired = np.asanyarray(actual), np.asanyarray(desired) - header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) - assert_array_compare(compare, actual, desired, err_msg=str(err_msg), - verbose=verbose, header=header, equal_nan=equal_nan) - - -def assert_array_almost_equal_nulp(x, y, nulp=1): - """ - Compare two arrays relatively to their spacing. - - This is a relatively robust method to compare two arrays whose amplitude - is variable. - - Parameters - ---------- - x, y : array_like - Input arrays. - nulp : int, optional - The maximum number of unit in the last place for tolerance (see Notes). - Default is 1. - - Returns - ------- - None - - Raises - ------ - AssertionError - If the spacing between `x` and `y` for one or more elements is larger - than `nulp`. - - See Also - -------- - assert_array_max_ulp : Check that all items of arrays differ in at most - N Units in the Last Place. - spacing : Return the distance between x and the nearest adjacent number. - - Notes - ----- - An assertion is raised if the following condition is not met:: - - abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) - - Examples - -------- - >>> x = np.array([1., 1e-10, 1e-20]) - >>> eps = np.finfo(x.dtype).eps - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) - - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) - Traceback (most recent call last): - ... - AssertionError: X and Y are not equal to 1 ULP (max is 2) - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - ax = np.abs(x) - ay = np.abs(y) - ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): - if np.iscomplexobj(x) or np.iscomplexobj(y): - msg = "X and Y are not equal to %d ULP" % nulp - else: - max_nulp = np.max(nulp_diff(x, y)) - msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) - raise AssertionError(msg) - - -def assert_array_max_ulp(a, b, maxulp=1, dtype=None): - """ - Check that all items of arrays differ in at most N Units in the Last Place. - - Parameters - ---------- - a, b : array_like - Input arrays to be compared. - maxulp : int, optional - The maximum number of units in the last place that elements of `a` and - `b` can differ. Default is 1. - dtype : dtype, optional - Data-type to convert `a` and `b` to if given. Default is None. - - Returns - ------- - ret : ndarray - Array containing number of representable floating point numbers between - items in `a` and `b`. - - Raises - ------ - AssertionError - If one or more elements differ by more than `maxulp`. - - See Also - -------- - assert_array_almost_equal_nulp : Compare two arrays relatively to their - spacing. - - Examples - -------- - >>> a = np.linspace(0., 1., 100) - >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - ret = nulp_diff(a, b, dtype) - if not np.all(ret <= maxulp): - raise AssertionError("Arrays are not almost equal up to %g ULP" % - maxulp) - return ret - - -def nulp_diff(x, y, dtype=None): - """For each item in x and y, return the number of representable floating - points between them. - - Parameters - ---------- - x : array_like - first input array - y : array_like - second input array - dtype : dtype, optional - Data-type to convert `x` and `y` to if given. Default is None. - - Returns - ------- - nulp : array_like - number of representable floating point numbers between each item in x - and y. - - Examples - -------- - # By definition, epsilon is the smallest number such as 1 + eps != 1, so - # there should be exactly one ULP between 1 and 1 + eps - >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) - 1.0 - """ - import numpy as np - if dtype: - x = np.array(x, dtype=dtype) - y = np.array(y, dtype=dtype) - else: - x = np.array(x) - y = np.array(y) - - t = np.common_type(x, y) - if np.iscomplexobj(x) or np.iscomplexobj(y): - raise NotImplementedError("_nulp not implemented for complex array") - - x = np.array(x, dtype=t) - y = np.array(y, dtype=t) - - if not x.shape == y.shape: - raise ValueError("x and y do not have the same shape: %s - %s" % - (x.shape, y.shape)) - - def _diff(rx, ry, vdt): - diff = np.array(rx-ry, dtype=vdt) - return np.abs(diff) - - rx = integer_repr(x) - ry = integer_repr(y) - return _diff(rx, ry, t) - - -def _integer_repr(x, vdt, comp): - # Reinterpret binary representation of the float as sign-magnitude: - # take into account two-complement representation - # See also - # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ - rx = x.view(vdt) - if not (rx.size == 1): - rx[rx < 0] = comp - rx[rx < 0] - else: - if rx < 0: - rx = comp - rx - - return rx - - -def integer_repr(x): - """Return the signed-magnitude interpretation of the binary representation of - x.""" - import numpy as np - if x.dtype == np.float16: - return _integer_repr(x, np.int16, np.int16(-2**15)) - elif x.dtype == np.float32: - return _integer_repr(x, np.int32, np.int32(-2**31)) - elif x.dtype == np.float64: - return _integer_repr(x, np.int64, np.int64(-2**63)) - else: - raise ValueError("Unsupported dtype %s" % x.dtype) - - -@contextlib.contextmanager -def _assert_warns_context(warning_class, name=None): - __tracebackhide__ = True # Hide traceback for py.test - with suppress_warnings() as sup: - l = sup.record(warning_class) - yield - if not len(l) > 0: - name_str = " when calling %s" % name if name is not None else "" - raise AssertionError("No warning raised" + name_str) - - -def assert_warns(warning_class, *args, **kwargs): - """ - Fail unless the given callable throws the specified warning. - - A warning of class warning_class should be thrown by the callable when - invoked with arguments args and keyword arguments kwargs. - If a different type of warning is thrown, it will not be caught. - - If called with all arguments other than the warning class omitted, may be - used as a context manager: - - with assert_warns(SomeWarning): - do_something() - - The ability to be used as a context manager is new in NumPy v1.11.0. - - .. versionadded:: 1.4.0 - - Parameters - ---------- - warning_class : class - The class defining the warning that `func` is expected to throw. - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - The value returned by `func`. - - """ - if not args: - return _assert_warns_context(warning_class) - - func = args[0] - args = args[1:] - with _assert_warns_context(warning_class, name=func.__name__): - return func(*args, **kwargs) - - -@contextlib.contextmanager -def _assert_no_warnings_context(name=None): - __tracebackhide__ = True # Hide traceback for py.test - with warnings.catch_warnings(record=True) as l: - warnings.simplefilter('always') - yield - if len(l) > 0: - name_str = " when calling %s" % name if name is not None else "" - raise AssertionError("Got warnings%s: %s" % (name_str, l)) - - -def assert_no_warnings(*args, **kwargs): - """ - Fail if the given callable produces any warnings. - - If called with all arguments omitted, may be used as a context manager: - - with assert_no_warnings(): - do_something() - - The ability to be used as a context manager is new in NumPy v1.11.0. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - The value returned by `func`. - - """ - if not args: - return _assert_no_warnings_context() - - func = args[0] - args = args[1:] - with _assert_no_warnings_context(name=func.__name__): - return func(*args, **kwargs) - - -def _gen_alignment_data(dtype=float32, type='binary', max_size=24): - """ - generator producing data with different alignment and offsets - to test simd vectorization - - Parameters - ---------- - dtype : dtype - data type to produce - type : string - 'unary': create data for unary operations, creates one input - and output array - 'binary': create data for unary operations, creates two input - and output array - max_size : integer - maximum size of data to produce - - Returns - ------- - if type is 'unary' yields one output, one input array and a message - containing information on the data - if type is 'binary' yields one output array, two input array and a message - containing information on the data - - """ - ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' - bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' - for o in range(3): - for s in range(o + 2, max(o + 3, max_size)): - if type == 'unary': - inp = lambda: arange(s, dtype=dtype)[o:] - out = empty((s,), dtype=dtype)[o:] - yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') - d = inp() - yield d, d, ufmt % (o, o, s, dtype, 'in place') - yield out[1:], inp()[:-1], ufmt % \ - (o + 1, o, s - 1, dtype, 'out of place') - yield out[:-1], inp()[1:], ufmt % \ - (o, o + 1, s - 1, dtype, 'out of place') - yield inp()[:-1], inp()[1:], ufmt % \ - (o, o + 1, s - 1, dtype, 'aliased') - yield inp()[1:], inp()[:-1], ufmt % \ - (o + 1, o, s - 1, dtype, 'aliased') - if type == 'binary': - inp1 = lambda: arange(s, dtype=dtype)[o:] - inp2 = lambda: arange(s, dtype=dtype)[o:] - out = empty((s,), dtype=dtype)[o:] - yield out, inp1(), inp2(), bfmt % \ - (o, o, o, s, dtype, 'out of place') - d = inp1() - yield d, d, inp2(), bfmt % \ - (o, o, o, s, dtype, 'in place1') - d = inp2() - yield d, inp1(), d, bfmt % \ - (o, o, o, s, dtype, 'in place2') - yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ - (o + 1, o, o, s - 1, dtype, 'out of place') - yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ - (o, o + 1, o, s - 1, dtype, 'out of place') - yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ - (o, o, o + 1, s - 1, dtype, 'out of place') - yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ - (o + 1, o, o, s - 1, dtype, 'aliased') - yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ - (o, o + 1, o, s - 1, dtype, 'aliased') - yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ - (o, o, o + 1, s - 1, dtype, 'aliased') - - -class IgnoreException(Exception): - "Ignoring this exception due to disabled feature" - pass - - -@contextlib.contextmanager -def tempdir(*args, **kwargs): - """Context manager to provide a temporary test folder. - - All arguments are passed as this to the underlying tempfile.mkdtemp - function. - - """ - tmpdir = mkdtemp(*args, **kwargs) - try: - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - -@contextlib.contextmanager -def temppath(*args, **kwargs): - """Context manager for temporary files. - - Context manager that returns the path to a closed temporary file. Its - parameters are the same as for tempfile.mkstemp and are passed directly - to that function. The underlying file is removed when the context is - exited, so it should be closed at that time. - - Windows does not allow a temporary file to be opened if it is already - open, so the underlying file must be closed after opening before it - can be opened again. - - """ - fd, path = mkstemp(*args, **kwargs) - os.close(fd) - try: - yield path - finally: - os.remove(path) - - -class clear_and_catch_warnings(warnings.catch_warnings): - """ Context manager that resets warning registry for catching warnings - - Warnings can be slippery, because, whenever a warning is triggered, Python - adds a ``__warningregistry__`` member to the *calling* module. This makes - it impossible to retrigger the warning in this module, whatever you put in - the warnings filters. This context manager accepts a sequence of `modules` - as a keyword argument to its constructor and: - - * stores and removes any ``__warningregistry__`` entries in given `modules` - on entry; - * resets ``__warningregistry__`` to its previous state on exit. - - This makes it possible to trigger any warning afresh inside the context - manager without disturbing the state of warnings outside. - - For compatibility with Python 3.0, please consider all arguments to be - keyword-only. - - Parameters - ---------- - record : bool, optional - Specifies whether warnings should be captured by a custom - implementation of ``warnings.showwarning()`` and be appended to a list - returned by the context manager. Otherwise None is returned by the - context manager. The objects appended to the list are arguments whose - attributes mirror the arguments to ``showwarning()``. - modules : sequence, optional - Sequence of modules for which to reset warnings registry on entry and - restore on exit. To work correctly, all 'ignore' filters should - filter by one of these modules. - - Examples - -------- - >>> import warnings - >>> with np.testing.clear_and_catch_warnings( - ... modules=[np.core.fromnumeric]): - ... warnings.simplefilter('always') - ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') - ... # do something that raises a warning but ignore those in - ... # np.core.fromnumeric - """ - class_modules = () - - def __init__(self, record=False, modules=()): - self.modules = set(modules).union(self.class_modules) - self._warnreg_copies = {} - super(clear_and_catch_warnings, self).__init__(record=record) - - def __enter__(self): - for mod in self.modules: - if hasattr(mod, '__warningregistry__'): - mod_reg = mod.__warningregistry__ - self._warnreg_copies[mod] = mod_reg.copy() - mod_reg.clear() - return super(clear_and_catch_warnings, self).__enter__() - - def __exit__(self, *exc_info): - super(clear_and_catch_warnings, self).__exit__(*exc_info) - for mod in self.modules: - if hasattr(mod, '__warningregistry__'): - mod.__warningregistry__.clear() - if mod in self._warnreg_copies: - mod.__warningregistry__.update(self._warnreg_copies[mod]) - - -class suppress_warnings(object): - """ - Context manager and decorator doing much the same as - ``warnings.catch_warnings``. - - However, it also provides a filter mechanism to work around - https://bugs.python.org/issue4180. - - This bug causes Python before 3.4 to not reliably show warnings again - after they have been ignored once (even within catch_warnings). It - means that no "ignore" filter can be used easily, since following - tests might need to see the warning. Additionally it allows easier - specificity for testing warnings and can be nested. - - Parameters - ---------- - forwarding_rule : str, optional - One of "always", "once", "module", or "location". Analogous to - the usual warnings module filter mode, it is useful to reduce - noise mostly on the outmost level. Unsuppressed and unrecorded - warnings will be forwarded based on this rule. Defaults to "always". - "location" is equivalent to the warnings "default", match by exact - location the warning warning originated from. - - Notes - ----- - Filters added inside the context manager will be discarded again - when leaving it. Upon entering all filters defined outside a - context will be applied automatically. - - When a recording filter is added, matching warnings are stored in the - ``log`` attribute as well as in the list returned by ``record``. - - If filters are added and the ``module`` keyword is given, the - warning registry of this module will additionally be cleared when - applying it, entering the context, or exiting it. This could cause - warnings to appear a second time after leaving the context if they - were configured to be printed once (default) and were already - printed before the context was entered. - - Nesting this context manager will work as expected when the - forwarding rule is "always" (default). Unfiltered and unrecorded - warnings will be passed out and be matched by the outer level. - On the outmost level they will be printed (or caught by another - warnings context). The forwarding rule argument can modify this - behaviour. - - Like ``catch_warnings`` this context manager is not threadsafe. - - Examples - -------- - - With a context manager:: - - with np.testing.suppress_warnings() as sup: - sup.filter(DeprecationWarning, "Some text") - sup.filter(module=np.ma.core) - log = sup.record(FutureWarning, "Does this occur?") - command_giving_warnings() - # The FutureWarning was given once, the filtered warnings were - # ignored. All other warnings abide outside settings (may be - # printed/error) - assert_(len(log) == 1) - assert_(len(sup.log) == 1) # also stored in log attribute - - Or as a decorator:: - - sup = np.testing.suppress_warnings() - sup.filter(module=np.ma.core) # module must match exactly - @sup - def some_function(): - # do something which causes a warning in np.ma.core - pass - """ - def __init__(self, forwarding_rule="always"): - self._entered = False - - # Suppressions are either instance or defined inside one with block: - self._suppressions = [] - - if forwarding_rule not in {"always", "module", "once", "location"}: - raise ValueError("unsupported forwarding rule.") - self._forwarding_rule = forwarding_rule - - def _clear_registries(self): - if hasattr(warnings, "_filters_mutated"): - # clearing the registry should not be necessary on new pythons, - # instead the filters should be mutated. - warnings._filters_mutated() - return - # Simply clear the registry, this should normally be harmless, - # note that on new pythons it would be invalidated anyway. - for module in self._tmp_modules: - if hasattr(module, "__warningregistry__"): - module.__warningregistry__.clear() - - def _filter(self, category=Warning, message="", module=None, record=False): - if record: - record = [] # The log where to store warnings - else: - record = None - if self._entered: - if module is None: - warnings.filterwarnings( - "always", category=category, message=message) - else: - module_regex = module.__name__.replace('.', r'\.') + '$' - warnings.filterwarnings( - "always", category=category, message=message, - module=module_regex) - self._tmp_modules.add(module) - self._clear_registries() - - self._tmp_suppressions.append( - (category, message, re.compile(message, re.I), module, record)) - else: - self._suppressions.append( - (category, message, re.compile(message, re.I), module, record)) - - return record - - def filter(self, category=Warning, message="", module=None): - """ - Add a new suppressing filter or apply it if the state is entered. - - Parameters - ---------- - category : class, optional - Warning class to filter - message : string, optional - Regular expression matching the warning message. - module : module, optional - Module to filter for. Note that the module (and its file) - must match exactly and cannot be a submodule. This may make - it unreliable for external modules. - - Notes - ----- - When added within a context, filters are only added inside - the context and will be forgotten when the context is exited. - """ - self._filter(category=category, message=message, module=module, - record=False) - - def record(self, category=Warning, message="", module=None): - """ - Append a new recording filter or apply it if the state is entered. - - All warnings matching will be appended to the ``log`` attribute. - - Parameters - ---------- - category : class, optional - Warning class to filter - message : string, optional - Regular expression matching the warning message. - module : module, optional - Module to filter for. Note that the module (and its file) - must match exactly and cannot be a submodule. This may make - it unreliable for external modules. - - Returns - ------- - log : list - A list which will be filled with all matched warnings. - - Notes - ----- - When added within a context, filters are only added inside - the context and will be forgotten when the context is exited. - """ - return self._filter(category=category, message=message, module=module, - record=True) - - def __enter__(self): - if self._entered: - raise RuntimeError("cannot enter suppress_warnings twice.") - - self._orig_show = warnings.showwarning - self._filters = warnings.filters - warnings.filters = self._filters[:] - - self._entered = True - self._tmp_suppressions = [] - self._tmp_modules = set() - self._forwarded = set() - - self.log = [] # reset global log (no need to keep same list) - - for cat, mess, _, mod, log in self._suppressions: - if log is not None: - del log[:] # clear the log - if mod is None: - warnings.filterwarnings( - "always", category=cat, message=mess) - else: - module_regex = mod.__name__.replace('.', r'\.') + '$' - warnings.filterwarnings( - "always", category=cat, message=mess, - module=module_regex) - self._tmp_modules.add(mod) - warnings.showwarning = self._showwarning - self._clear_registries() - - return self - - def __exit__(self, *exc_info): - warnings.showwarning = self._orig_show - warnings.filters = self._filters - self._clear_registries() - self._entered = False - del self._orig_show - del self._filters - - def _showwarning(self, message, category, filename, lineno, - *args, **kwargs): - use_warnmsg = kwargs.pop("use_warnmsg", None) - for cat, _, pattern, mod, rec in ( - self._suppressions + self._tmp_suppressions)[::-1]: - if (issubclass(category, cat) and - pattern.match(message.args[0]) is not None): - if mod is None: - # Message and category match, either recorded or ignored - if rec is not None: - msg = WarningMessage(message, category, filename, - lineno, **kwargs) - self.log.append(msg) - rec.append(msg) - return - # Use startswith, because warnings strips the c or o from - # .pyc/.pyo files. - elif mod.__file__.startswith(filename): - # The message and module (filename) match - if rec is not None: - msg = WarningMessage(message, category, filename, - lineno, **kwargs) - self.log.append(msg) - rec.append(msg) - return - - # There is no filter in place, so pass to the outside handler - # unless we should only pass it once - if self._forwarding_rule == "always": - if use_warnmsg is None: - self._orig_show(message, category, filename, lineno, - *args, **kwargs) - else: - self._orig_showmsg(use_warnmsg) - return - - if self._forwarding_rule == "once": - signature = (message.args, category) - elif self._forwarding_rule == "module": - signature = (message.args, category, filename) - elif self._forwarding_rule == "location": - signature = (message.args, category, filename, lineno) - - if signature in self._forwarded: - return - self._forwarded.add(signature) - if use_warnmsg is None: - self._orig_show(message, category, filename, lineno, *args, - **kwargs) - else: - self._orig_showmsg(use_warnmsg) - - def __call__(self, func): - """ - Function decorator to apply certain suppressions to a whole - function. - """ - @wraps(func) - def new_func(*args, **kwargs): - with self: - return func(*args, **kwargs) - - return new_func - - -@contextlib.contextmanager -def _assert_no_gc_cycles_context(name=None): - __tracebackhide__ = True # Hide traceback for py.test - - # not meaningful to test if there is no refcounting - if not HAS_REFCOUNT: - yield - return - - assert_(gc.isenabled()) - gc.disable() - gc_debug = gc.get_debug() - try: - for i in range(100): - if gc.collect() == 0: - break - else: - raise RuntimeError( - "Unable to fully collect garbage - perhaps a __del__ method is " - "creating more reference cycles?") - - gc.set_debug(gc.DEBUG_SAVEALL) - yield - # gc.collect returns the number of unreachable objects in cycles that - # were found -- we are checking that no cycles were created in the context - n_objects_in_cycles = gc.collect() - objects_in_cycles = gc.garbage[:] - finally: - del gc.garbage[:] - gc.set_debug(gc_debug) - gc.enable() - - if n_objects_in_cycles: - name_str = " when calling %s" % name if name is not None else "" - raise AssertionError( - "Reference cycles were found{}: {} objects were collected, " - "of which {} are shown below:{}" - .format( - name_str, - n_objects_in_cycles, - len(objects_in_cycles), - ''.join( - "\n {} object with id={}:\n {}".format( - type(o).__name__, - id(o), - pprint.pformat(o).replace('\n', '\n ') - ) for o in objects_in_cycles - ) - ) - ) - - -def assert_no_gc_cycles(*args, **kwargs): - """ - Fail if the given callable produces any reference cycles. - - If called with all arguments omitted, may be used as a context manager: - - with assert_no_gc_cycles(): - do_something() - - .. versionadded:: 1.15.0 - - Parameters - ---------- - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - Nothing. The result is deliberately discarded to ensure that all cycles - are found. - - """ - if not args: - return _assert_no_gc_cycles_context() - - func = args[0] - args = args[1:] - with _assert_no_gc_cycles_context(name=func.__name__): - func(*args, **kwargs) - -def break_cycles(): - """ - Break reference cycles by calling gc.collect - Objects can call other objects' methods (for instance, another object's - __del__) inside their own __del__. On PyPy, the interpreter only runs - between calls to gc.collect, so multiple calls are needed to completely - release all cycles. - """ - - gc.collect() - if IS_PYPY: - # interpreter runs now, to call deleted objects' __del__ methods - gc.collect() - # one more, just to make sure - gc.collect() - - -def requires_memory(free_bytes): - """Decorator to skip a test if not enough memory is available""" - import pytest - - def decorator(func): - @wraps(func) - def wrapper(*a, **kw): - msg = check_free_memory(free_bytes) - if msg is not None: - pytest.skip(msg) - - try: - return func(*a, **kw) - except MemoryError: - # Probably ran out of memory regardless: don't regard as failure - pytest.xfail("MemoryError raised") - - return wrapper - - return decorator - - -def check_free_memory(free_bytes): - """ - Check whether `free_bytes` amount of memory is currently free. - Returns: None if enough memory available, otherwise error message - """ - env_var = 'NPY_AVAILABLE_MEM' - env_value = os.environ.get(env_var) - if env_value is not None: - try: - mem_free = _parse_size(env_value) - except ValueError as exc: - raise ValueError('Invalid environment variable {}: {!s}'.format( - env_var, exc)) - - msg = ('{0} GB memory required, but environment variable ' - 'NPY_AVAILABLE_MEM={1} set'.format( - free_bytes/1e9, env_value)) - else: - mem_free = _get_mem_available() - - if mem_free is None: - msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM " - "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run " - "the test.") - mem_free = -1 - else: - msg = '{0} GB memory required, but {1} GB available'.format( - free_bytes/1e9, mem_free/1e9) - - return msg if mem_free < free_bytes else None - - -def _parse_size(size_str): - """Convert memory size strings ('12 GB' etc.) to float""" - suffixes = {'': 1, 'b': 1, - 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4, - 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, - 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} - - size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format( - '|'.join(suffixes.keys())), re.I) - - m = size_re.match(size_str.lower()) - if not m or m.group(2) not in suffixes: - raise ValueError("value {!r} not a valid size".format(size_str)) - return int(float(m.group(1)) * suffixes[m.group(2)]) - - -def _get_mem_available(): - """Return available memory in bytes, or None if unknown.""" - try: - import psutil - return psutil.virtual_memory().available - except (ImportError, AttributeError): - pass - - if sys.platform.startswith('linux'): - info = {} - with open('/proc/meminfo', 'r') as f: - for line in f: - p = line.split() - info[p[0].strip(':').lower()] = int(p[1]) * 1024 - - if 'memavailable' in info: - # Linux >= 3.14 - return info['memavailable'] - else: - return info['memfree'] + info['cached'] - - return None - - -def _no_tracing(func): - """ - Decorator to temporarily turn off tracing for the duration of a test. - Needed in tests that check refcounting, otherwise the tracing itself - influences the refcounts - """ - if not hasattr(sys, 'gettrace'): - return func - else: - @wraps(func) - def wrapper(*args, **kwargs): - original_trace = sys.gettrace() - try: - sys.settrace(None) - return func(*args, **kwargs) - finally: - sys.settrace(original_trace) - return wrapper - diff --git a/venv/lib/python3.7/site-packages/numpy/testing/print_coercion_tables.py b/venv/lib/python3.7/site-packages/numpy/testing/print_coercion_tables.py deleted file mode 100644 index 72b22ce..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/print_coercion_tables.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -"""Prints type-coercion tables for the built-in NumPy types - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np - -# Generic object that can be added, but doesn't do anything else -class GenericObject(object): - def __init__(self, v): - self.v = v - - def __add__(self, other): - return self - - def __radd__(self, other): - return self - - dtype = np.dtype('O') - -def print_cancast_table(ntypes): - print('X', end=' ') - for char in ntypes: - print(char, end=' ') - print() - for row in ntypes: - print(row, end=' ') - for col in ntypes: - print(int(np.can_cast(row, col)), end=' ') - print() - -def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): - print('+', end=' ') - for char in ntypes: - print(char, end=' ') - print() - for row in ntypes: - if row == 'O': - rowtype = GenericObject - else: - rowtype = np.obj2sctype(row) - - print(row, end=' ') - for col in ntypes: - if col == 'O': - coltype = GenericObject - else: - coltype = np.obj2sctype(col) - try: - if firstarray: - rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) - else: - rowvalue = rowtype(inputfirstvalue) - colvalue = coltype(inputsecondvalue) - if use_promote_types: - char = np.promote_types(rowvalue.dtype, colvalue.dtype).char - else: - value = np.add(rowvalue, colvalue) - if isinstance(value, np.ndarray): - char = value.dtype.char - else: - char = np.dtype(type(value)).char - except ValueError: - char = '!' - except OverflowError: - char = '@' - except TypeError: - char = '#' - print(char, end=' ') - print() - - -if __name__ == '__main__': - print("can cast") - print_cancast_table(np.typecodes['All']) - print() - print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") - print() - print("scalar + scalar") - print_coercion_table(np.typecodes['All'], 0, 0, False) - print() - print("scalar + neg scalar") - print_coercion_table(np.typecodes['All'], 0, -1, False) - print() - print("array + scalar") - print_coercion_table(np.typecodes['All'], 0, 0, True) - print() - print("array + neg scalar") - print_coercion_table(np.typecodes['All'], 0, -1, True) - print() - print("promote_types") - print_coercion_table(np.typecodes['All'], 0, 0, False, True) diff --git a/venv/lib/python3.7/site-packages/numpy/testing/setup.py b/venv/lib/python3.7/site-packages/numpy/testing/setup.py deleted file mode 100644 index 7c3f2fb..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testing', parent_package, top_path) - - config.add_subpackage('_private') - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(maintainer="NumPy Developers", - maintainer_email="numpy-dev@numpy.org", - description="NumPy test module", - url="https://www.numpy.org", - license="NumPy License (BSD Style)", - configuration=configuration, - ) diff --git a/venv/lib/python3.7/site-packages/numpy/testing/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/testing/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/testing/tests/test_decorators.py b/venv/lib/python3.7/site-packages/numpy/testing/tests/test_decorators.py deleted file mode 100644 index c029bf9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/tests/test_decorators.py +++ /dev/null @@ -1,216 +0,0 @@ -""" -Test the decorators from ``testing.decorators``. - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import pytest - -from numpy.testing import ( - assert_, assert_raises, dec, SkipTest, KnownFailureException, - ) - - -try: - with warnings.catch_warnings(): - warnings.simplefilter("always") - import nose # noqa: F401 -except ImportError: - HAVE_NOSE = False -else: - HAVE_NOSE = True - - -@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose") -class TestNoseDecorators(object): - # These tests are run in a class for simplicity while still - # getting a report on each, skipped or success. - - class DidntSkipException(Exception): - pass - - def test_slow(self): - @dec.slow - def slow_func(x, y, z): - pass - - assert_(slow_func.slow) - - def test_setastest(self): - @dec.setastest() - def f_default(a): - pass - - @dec.setastest(True) - def f_istest(a): - pass - - @dec.setastest(False) - def f_isnottest(a): - pass - - assert_(f_default.__test__) - assert_(f_istest.__test__) - assert_(not f_isnottest.__test__) - - def test_skip_functions_hardcoded(self): - @dec.skipif(True) - def f1(x): - raise self.DidntSkipException - - try: - f1('a') - except self.DidntSkipException: - raise Exception('Failed to skip') - except SkipTest().__class__: - pass - - @dec.skipif(False) - def f2(x): - raise self.DidntSkipException - - try: - f2('a') - except self.DidntSkipException: - pass - except SkipTest().__class__: - raise Exception('Skipped when not expected to') - - def test_skip_functions_callable(self): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.skipif(skip_tester) - def f1(x): - raise self.DidntSkipException - - try: - skip_flag = 'skip me!' - f1('a') - except self.DidntSkipException: - raise Exception('Failed to skip') - except SkipTest().__class__: - pass - - @dec.skipif(skip_tester) - def f2(x): - raise self.DidntSkipException - - try: - skip_flag = 'five is right out!' - f2('a') - except self.DidntSkipException: - pass - except SkipTest().__class__: - raise Exception('Skipped when not expected to') - - def test_skip_generators_hardcoded(self): - @dec.knownfailureif(True, "This test is known to fail") - def g1(x): - for i in range(x): - yield i - - try: - for j in g1(10): - pass - except KnownFailureException().__class__: - pass - else: - raise Exception('Failed to mark as known failure') - - @dec.knownfailureif(False, "This test is NOT known to fail") - def g2(x): - for i in range(x): - yield i - raise self.DidntSkipException('FAIL') - - try: - for j in g2(10): - pass - except KnownFailureException().__class__: - raise Exception('Marked incorrectly as known failure') - except self.DidntSkipException: - pass - - def test_skip_generators_callable(self): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.knownfailureif(skip_tester, "This test is known to fail") - def g1(x): - for i in range(x): - yield i - - try: - skip_flag = 'skip me!' - for j in g1(10): - pass - except KnownFailureException().__class__: - pass - else: - raise Exception('Failed to mark as known failure') - - @dec.knownfailureif(skip_tester, "This test is NOT known to fail") - def g2(x): - for i in range(x): - yield i - raise self.DidntSkipException('FAIL') - - try: - skip_flag = 'do not skip' - for j in g2(10): - pass - except KnownFailureException().__class__: - raise Exception('Marked incorrectly as known failure') - except self.DidntSkipException: - pass - - def test_deprecated(self): - @dec.deprecated(True) - def non_deprecated_func(): - pass - - @dec.deprecated() - def deprecated_func(): - import warnings - warnings.warn("TEST: deprecated func", DeprecationWarning) - - @dec.deprecated() - def deprecated_func2(): - import warnings - warnings.warn("AHHHH") - raise ValueError - - @dec.deprecated() - def deprecated_func3(): - import warnings - warnings.warn("AHHHH") - - # marked as deprecated, but does not raise DeprecationWarning - assert_raises(AssertionError, non_deprecated_func) - # should be silent - deprecated_func() - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") # do not propagate unrelated warnings - # fails if deprecated decorator just disables test. See #1453. - assert_raises(ValueError, deprecated_func2) - # warning is not a DeprecationWarning - assert_raises(AssertionError, deprecated_func3) - - def test_parametrize(self): - # dec.parametrize assumes that it is being run by nose. Because - # we are running under pytest, we need to explicitly check the - # results. - @dec.parametrize('base, power, expected', - [(1, 1, 1), - (2, 1, 2), - (2, 2, 4)]) - def check_parametrize(base, power, expected): - assert_(base**power == expected) - - count = 0 - for test in check_parametrize(): - test[0](*test[1:]) - count += 1 - assert_(count == 3) diff --git a/venv/lib/python3.7/site-packages/numpy/testing/tests/test_doctesting.py b/venv/lib/python3.7/site-packages/numpy/testing/tests/test_doctesting.py deleted file mode 100644 index b77cd93..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/tests/test_doctesting.py +++ /dev/null @@ -1,59 +0,0 @@ -""" Doctests for NumPy-specific nose/doctest modifications - -""" -from __future__ import division, absolute_import, print_function - -#FIXME: None of these tests is run, because 'check' is not a recognized -# testing prefix. - -# try the #random directive on the output line -def check_random_directive(): - ''' - >>> 2+2 - #random: may vary on your system - ''' - -# check the implicit "import numpy as np" -def check_implicit_np(): - ''' - >>> np.array([1,2,3]) - array([1, 2, 3]) - ''' - -# there's some extraneous whitespace around the correct responses -def check_whitespace_enabled(): - ''' - # whitespace after the 3 - >>> 1+2 - 3 - - # whitespace before the 7 - >>> 3+4 - 7 - ''' - -def check_empty_output(): - """ Check that no output does not cause an error. - - This is related to nose bug 445; the numpy plugin changed the - doctest-result-variable default and therefore hit this bug: - http://code.google.com/p/python-nose/issues/detail?id=445 - - >>> a = 10 - """ - -def check_skip(): - """ Check skip directive - - The test below should not run - - >>> 1/0 #doctest: +SKIP - """ - - -if __name__ == '__main__': - # Run tests outside numpy test rig - import nose - from numpy.testing.noseclasses import NumpyDoctest - argv = ['', __file__, '--with-numpydoctest'] - nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()]) diff --git a/venv/lib/python3.7/site-packages/numpy/testing/tests/test_utils.py b/venv/lib/python3.7/site-packages/numpy/testing/tests/test_utils.py deleted file mode 100644 index 1e3f527..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/tests/test_utils.py +++ /dev/null @@ -1,1597 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import sys -import os -import itertools -import textwrap -import pytest -import weakref - -import numpy as np -from numpy.testing import ( - assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_array_less, build_err_msg, raises, - assert_raises, assert_warns, assert_no_warnings, assert_allclose, - assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, - clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, - tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT - ) -from numpy.core.overrides import ARRAY_FUNCTION_ENABLED - - -class _GenericTest(object): - - def _test_equal(self, a, b): - self._assert_func(a, b) - - def _test_not_equal(self, a, b): - with assert_raises(AssertionError): - self._assert_func(a, b) - - def test_array_rank1_eq(self): - """Test two equal array of rank 1 are found equal.""" - a = np.array([1, 2]) - b = np.array([1, 2]) - - self._test_equal(a, b) - - def test_array_rank1_noteq(self): - """Test two different array of rank 1 are found not equal.""" - a = np.array([1, 2]) - b = np.array([2, 2]) - - self._test_not_equal(a, b) - - def test_array_rank2_eq(self): - """Test two equal array of rank 2 are found equal.""" - a = np.array([[1, 2], [3, 4]]) - b = np.array([[1, 2], [3, 4]]) - - self._test_equal(a, b) - - def test_array_diffshape(self): - """Test two arrays with different shapes are found not equal.""" - a = np.array([1, 2]) - b = np.array([[1, 2], [1, 2]]) - - self._test_not_equal(a, b) - - def test_objarray(self): - """Test object arrays.""" - a = np.array([1, 1], dtype=object) - self._test_equal(a, 1) - - def test_array_likes(self): - self._test_equal([1, 2, 3], (1, 2, 3)) - - -class TestArrayEqual(_GenericTest): - - def setup(self): - self._assert_func = assert_array_equal - - def test_generic_rank1(self): - """Test rank 1 array for all dtypes.""" - def foo(t): - a = np.empty(2, t) - a.fill(1) - b = a.copy() - c = a.copy() - c.fill(0) - self._test_equal(a, b) - self._test_not_equal(c, b) - - # Test numeric types and object - for t in '?bhilqpBHILQPfdgFDG': - foo(t) - - # Test strings - for t in ['S1', 'U1']: - foo(t) - - def test_0_ndim_array(self): - x = np.array(473963742225900817127911193656584771) - y = np.array(18535119325151578301457182298393896) - assert_raises(AssertionError, self._assert_func, x, y) - - y = x - self._assert_func(x, y) - - x = np.array(43) - y = np.array(10) - assert_raises(AssertionError, self._assert_func, x, y) - - y = x - self._assert_func(x, y) - - def test_generic_rank3(self): - """Test rank 3 array for all dtypes.""" - def foo(t): - a = np.empty((4, 2, 3), t) - a.fill(1) - b = a.copy() - c = a.copy() - c.fill(0) - self._test_equal(a, b) - self._test_not_equal(c, b) - - # Test numeric types and object - for t in '?bhilqpBHILQPfdgFDG': - foo(t) - - # Test strings - for t in ['S1', 'U1']: - foo(t) - - def test_nan_array(self): - """Test arrays with nan values in them.""" - a = np.array([1, 2, np.nan]) - b = np.array([1, 2, np.nan]) - - self._test_equal(a, b) - - c = np.array([1, 2, 3]) - self._test_not_equal(c, b) - - def test_string_arrays(self): - """Test two arrays with different shapes are found not equal.""" - a = np.array(['floupi', 'floupa']) - b = np.array(['floupi', 'floupa']) - - self._test_equal(a, b) - - c = np.array(['floupipi', 'floupa']) - - self._test_not_equal(c, b) - - def test_recarrays(self): - """Test record arrays.""" - a = np.empty(2, [('floupi', float), ('floupa', float)]) - a['floupi'] = [1, 2] - a['floupa'] = [1, 2] - b = a.copy() - - self._test_equal(a, b) - - c = np.empty(2, [('floupipi', float), ('floupa', float)]) - c['floupipi'] = a['floupi'].copy() - c['floupa'] = a['floupa'].copy() - - with suppress_warnings() as sup: - l = sup.record(FutureWarning, message="elementwise == ") - self._test_not_equal(c, b) - assert_equal(len(l), 1) - - def test_masked_nan_inf(self): - # Regression test for gh-11121 - a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) - b = np.array([3., np.nan, 6.5]) - self._test_equal(a, b) - self._test_equal(b, a) - a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) - b = np.array([np.inf, 4., 6.5]) - self._test_equal(a, b) - self._test_equal(b, a) - - def test_subclass_that_overrides_eq(self): - # While we cannot guarantee testing functions will always work for - # subclasses, the tests should ideally rely only on subclasses having - # comparison operators, not on them being able to store booleans - # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. - class MyArray(np.ndarray): - def __eq__(self, other): - return bool(np.equal(self, other).all()) - - def __ne__(self, other): - return not self == other - - a = np.array([1., 2.]).view(MyArray) - b = np.array([2., 3.]).view(MyArray) - assert_(type(a == a), bool) - assert_(a == a) - assert_(a != b) - self._test_equal(a, a) - self._test_not_equal(a, b) - self._test_not_equal(b, a) - - @pytest.mark.skipif( - not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__') - def test_subclass_that_does_not_implement_npall(self): - class MyArray(np.ndarray): - def __array_function__(self, *args, **kwargs): - return NotImplemented - - a = np.array([1., 2.]).view(MyArray) - b = np.array([2., 3.]).view(MyArray) - with assert_raises(TypeError): - np.all(a) - self._test_equal(a, a) - self._test_not_equal(a, b) - self._test_not_equal(b, a) - - -class TestBuildErrorMessage(object): - - def test_build_err_msg_defaults(self): - x = np.array([1.00001, 2.00002, 3.00003]) - y = np.array([1.00002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg) - b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' - '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' - '2.00003, 3.00004])') - assert_equal(a, b) - - def test_build_err_msg_no_verbose(self): - x = np.array([1.00001, 2.00002, 3.00003]) - y = np.array([1.00002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg, verbose=False) - b = '\nItems are not equal: There is a mismatch' - assert_equal(a, b) - - def test_build_err_msg_custom_names(self): - x = np.array([1.00001, 2.00002, 3.00003]) - y = np.array([1.00002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) - b = ('\nItems are not equal: There is a mismatch\n FOO: array([' - '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' - '3.00004])') - assert_equal(a, b) - - def test_build_err_msg_custom_precision(self): - x = np.array([1.000000001, 2.00002, 3.00003]) - y = np.array([1.000000002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg, precision=10) - b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' - '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' - '1.000000002, 2.00003 , 3.00004 ])') - assert_equal(a, b) - - -class TestEqual(TestArrayEqual): - - def setup(self): - self._assert_func = assert_equal - - def test_nan_items(self): - self._assert_func(np.nan, np.nan) - self._assert_func([np.nan], [np.nan]) - self._test_not_equal(np.nan, [np.nan]) - self._test_not_equal(np.nan, 1) - - def test_inf_items(self): - self._assert_func(np.inf, np.inf) - self._assert_func([np.inf], [np.inf]) - self._test_not_equal(np.inf, [np.inf]) - - def test_datetime(self): - self._test_equal( - np.datetime64("2017-01-01", "s"), - np.datetime64("2017-01-01", "s") - ) - self._test_equal( - np.datetime64("2017-01-01", "s"), - np.datetime64("2017-01-01", "m") - ) - - # gh-10081 - self._test_not_equal( - np.datetime64("2017-01-01", "s"), - np.datetime64("2017-01-02", "s") - ) - self._test_not_equal( - np.datetime64("2017-01-01", "s"), - np.datetime64("2017-01-02", "m") - ) - - def test_nat_items(self): - # not a datetime - nadt_no_unit = np.datetime64("NaT") - nadt_s = np.datetime64("NaT", "s") - nadt_d = np.datetime64("NaT", "ns") - # not a timedelta - natd_no_unit = np.timedelta64("NaT") - natd_s = np.timedelta64("NaT", "s") - natd_d = np.timedelta64("NaT", "ns") - - dts = [nadt_no_unit, nadt_s, nadt_d] - tds = [natd_no_unit, natd_s, natd_d] - for a, b in itertools.product(dts, dts): - self._assert_func(a, b) - self._assert_func([a], [b]) - self._test_not_equal([a], b) - - for a, b in itertools.product(tds, tds): - self._assert_func(a, b) - self._assert_func([a], [b]) - self._test_not_equal([a], b) - - for a, b in itertools.product(tds, dts): - self._test_not_equal(a, b) - self._test_not_equal(a, [b]) - self._test_not_equal([a], [b]) - self._test_not_equal([a], np.datetime64("2017-01-01", "s")) - self._test_not_equal([b], np.datetime64("2017-01-01", "s")) - self._test_not_equal([a], np.timedelta64(123, "s")) - self._test_not_equal([b], np.timedelta64(123, "s")) - - def test_non_numeric(self): - self._assert_func('ab', 'ab') - self._test_not_equal('ab', 'abb') - - def test_complex_item(self): - self._assert_func(complex(1, 2), complex(1, 2)) - self._assert_func(complex(1, np.nan), complex(1, np.nan)) - self._test_not_equal(complex(1, np.nan), complex(1, 2)) - self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) - self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) - - def test_negative_zero(self): - self._test_not_equal(np.PZERO, np.NZERO) - - def test_complex(self): - x = np.array([complex(1, 2), complex(1, np.nan)]) - y = np.array([complex(1, 2), complex(1, 2)]) - self._assert_func(x, x) - self._test_not_equal(x, y) - - def test_error_message(self): - with pytest.raises(AssertionError) as exc_info: - self._assert_func(np.array([1, 2]), np.array([[1, 2]])) - msg = str(exc_info.value) - msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)") - msg_reference = textwrap.dedent("""\ - - Arrays are not equal - - (shapes (2,), (1, 2) mismatch) - x: array([1, 2]) - y: array([[1, 2]])""") - - try: - assert_equal(msg, msg_reference) - except AssertionError: - assert_equal(msg2, msg_reference) - - def test_object(self): - #gh-12942 - import datetime - a = np.array([datetime.datetime(2000, 1, 1), - datetime.datetime(2000, 1, 2)]) - self._test_not_equal(a, a[::-1]) - - -class TestArrayAlmostEqual(_GenericTest): - - def setup(self): - self._assert_func = assert_array_almost_equal - - def test_closeness(self): - # Note that in the course of time we ended up with - # `abs(x - y) < 1.5 * 10**(-decimal)` - # instead of the previously documented - # `abs(x - y) < 0.5 * 10**(-decimal)` - # so this check serves to preserve the wrongness. - - # test scalars - self._assert_func(1.499999, 0.0, decimal=0) - assert_raises(AssertionError, - lambda: self._assert_func(1.5, 0.0, decimal=0)) - - # test arrays - self._assert_func([1.499999], [0.0], decimal=0) - assert_raises(AssertionError, - lambda: self._assert_func([1.5], [0.0], decimal=0)) - - def test_simple(self): - x = np.array([1234.2222]) - y = np.array([1234.2223]) - - self._assert_func(x, y, decimal=3) - self._assert_func(x, y, decimal=4) - assert_raises(AssertionError, - lambda: self._assert_func(x, y, decimal=5)) - - def test_nan(self): - anan = np.array([np.nan]) - aone = np.array([1]) - ainf = np.array([np.inf]) - self._assert_func(anan, anan) - assert_raises(AssertionError, - lambda: self._assert_func(anan, aone)) - assert_raises(AssertionError, - lambda: self._assert_func(anan, ainf)) - assert_raises(AssertionError, - lambda: self._assert_func(ainf, anan)) - - def test_inf(self): - a = np.array([[1., 2.], [3., 4.]]) - b = a.copy() - a[0, 0] = np.inf - assert_raises(AssertionError, - lambda: self._assert_func(a, b)) - b[0, 0] = -np.inf - assert_raises(AssertionError, - lambda: self._assert_func(a, b)) - - def test_subclass(self): - a = np.array([[1., 2.], [3., 4.]]) - b = np.ma.masked_array([[1., 2.], [0., 4.]], - [[False, False], [True, False]]) - self._assert_func(a, b) - self._assert_func(b, a) - self._assert_func(b, b) - - # Test fully masked as well (see gh-11123). - a = np.ma.MaskedArray(3.5, mask=True) - b = np.array([3., 4., 6.5]) - self._test_equal(a, b) - self._test_equal(b, a) - a = np.ma.masked - b = np.array([3., 4., 6.5]) - self._test_equal(a, b) - self._test_equal(b, a) - a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) - b = np.array([1., 2., 3.]) - self._test_equal(a, b) - self._test_equal(b, a) - a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) - b = np.array(1.) - self._test_equal(a, b) - self._test_equal(b, a) - - def test_subclass_that_cannot_be_bool(self): - # While we cannot guarantee testing functions will always work for - # subclasses, the tests should ideally rely only on subclasses having - # comparison operators, not on them being able to store booleans - # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. - class MyArray(np.ndarray): - def __eq__(self, other): - return super(MyArray, self).__eq__(other).view(np.ndarray) - - def __lt__(self, other): - return super(MyArray, self).__lt__(other).view(np.ndarray) - - def all(self, *args, **kwargs): - raise NotImplementedError - - a = np.array([1., 2.]).view(MyArray) - self._assert_func(a, a) - - -class TestAlmostEqual(_GenericTest): - - def setup(self): - self._assert_func = assert_almost_equal - - def test_closeness(self): - # Note that in the course of time we ended up with - # `abs(x - y) < 1.5 * 10**(-decimal)` - # instead of the previously documented - # `abs(x - y) < 0.5 * 10**(-decimal)` - # so this check serves to preserve the wrongness. - - # test scalars - self._assert_func(1.499999, 0.0, decimal=0) - assert_raises(AssertionError, - lambda: self._assert_func(1.5, 0.0, decimal=0)) - - # test arrays - self._assert_func([1.499999], [0.0], decimal=0) - assert_raises(AssertionError, - lambda: self._assert_func([1.5], [0.0], decimal=0)) - - def test_nan_item(self): - self._assert_func(np.nan, np.nan) - assert_raises(AssertionError, - lambda: self._assert_func(np.nan, 1)) - assert_raises(AssertionError, - lambda: self._assert_func(np.nan, np.inf)) - assert_raises(AssertionError, - lambda: self._assert_func(np.inf, np.nan)) - - def test_inf_item(self): - self._assert_func(np.inf, np.inf) - self._assert_func(-np.inf, -np.inf) - assert_raises(AssertionError, - lambda: self._assert_func(np.inf, 1)) - assert_raises(AssertionError, - lambda: self._assert_func(-np.inf, np.inf)) - - def test_simple_item(self): - self._test_not_equal(1, 2) - - def test_complex_item(self): - self._assert_func(complex(1, 2), complex(1, 2)) - self._assert_func(complex(1, np.nan), complex(1, np.nan)) - self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) - self._test_not_equal(complex(1, np.nan), complex(1, 2)) - self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) - self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) - - def test_complex(self): - x = np.array([complex(1, 2), complex(1, np.nan)]) - z = np.array([complex(1, 2), complex(np.nan, 1)]) - y = np.array([complex(1, 2), complex(1, 2)]) - self._assert_func(x, x) - self._test_not_equal(x, y) - self._test_not_equal(x, z) - - def test_error_message(self): - """Check the message is formatted correctly for the decimal value. - Also check the message when input includes inf or nan (gh12200)""" - x = np.array([1.00000000001, 2.00000000002, 3.00003]) - y = np.array([1.00000000002, 2.00000000003, 3.00004]) - - # Test with a different amount of decimal digits - with pytest.raises(AssertionError) as exc_info: - self._assert_func(x, y, decimal=12) - msgs = str(exc_info.value).split('\n') - assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)') - assert_equal(msgs[4], 'Max absolute difference: 1.e-05') - assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') - assert_equal( - msgs[6], - ' x: array([1.00000000001, 2.00000000002, 3.00003 ])') - assert_equal( - msgs[7], - ' y: array([1.00000000002, 2.00000000003, 3.00004 ])') - - # With the default value of decimal digits, only the 3rd element - # differs. Note that we only check for the formatting of the arrays - # themselves. - with pytest.raises(AssertionError) as exc_info: - self._assert_func(x, y) - msgs = str(exc_info.value).split('\n') - assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)') - assert_equal(msgs[4], 'Max absolute difference: 1.e-05') - assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') - assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])') - assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])') - - # Check the error message when input includes inf - x = np.array([np.inf, 0]) - y = np.array([np.inf, 1]) - with pytest.raises(AssertionError) as exc_info: - self._assert_func(x, y) - msgs = str(exc_info.value).split('\n') - assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)') - assert_equal(msgs[4], 'Max absolute difference: 1.') - assert_equal(msgs[5], 'Max relative difference: 1.') - assert_equal(msgs[6], ' x: array([inf, 0.])') - assert_equal(msgs[7], ' y: array([inf, 1.])') - - # Check the error message when dividing by zero - x = np.array([1, 2]) - y = np.array([0, 0]) - with pytest.raises(AssertionError) as exc_info: - self._assert_func(x, y) - msgs = str(exc_info.value).split('\n') - assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)') - assert_equal(msgs[4], 'Max absolute difference: 2') - assert_equal(msgs[5], 'Max relative difference: inf') - - def test_error_message_2(self): - """Check the message is formatted correctly when either x or y is a scalar.""" - x = 2 - y = np.ones(20) - with pytest.raises(AssertionError) as exc_info: - self._assert_func(x, y) - msgs = str(exc_info.value).split('\n') - assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') - assert_equal(msgs[4], 'Max absolute difference: 1.') - assert_equal(msgs[5], 'Max relative difference: 1.') - - y = 2 - x = np.ones(20) - with pytest.raises(AssertionError) as exc_info: - self._assert_func(x, y) - msgs = str(exc_info.value).split('\n') - assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') - assert_equal(msgs[4], 'Max absolute difference: 1.') - assert_equal(msgs[5], 'Max relative difference: 0.5') - - def test_subclass_that_cannot_be_bool(self): - # While we cannot guarantee testing functions will always work for - # subclasses, the tests should ideally rely only on subclasses having - # comparison operators, not on them being able to store booleans - # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. - class MyArray(np.ndarray): - def __eq__(self, other): - return super(MyArray, self).__eq__(other).view(np.ndarray) - - def __lt__(self, other): - return super(MyArray, self).__lt__(other).view(np.ndarray) - - def all(self, *args, **kwargs): - raise NotImplementedError - - a = np.array([1., 2.]).view(MyArray) - self._assert_func(a, a) - - -class TestApproxEqual(object): - - def setup(self): - self._assert_func = assert_approx_equal - - def test_simple_arrays(self): - x = np.array([1234.22]) - y = np.array([1234.23]) - - self._assert_func(x, y, significant=5) - self._assert_func(x, y, significant=6) - assert_raises(AssertionError, - lambda: self._assert_func(x, y, significant=7)) - - def test_simple_items(self): - x = 1234.22 - y = 1234.23 - - self._assert_func(x, y, significant=4) - self._assert_func(x, y, significant=5) - self._assert_func(x, y, significant=6) - assert_raises(AssertionError, - lambda: self._assert_func(x, y, significant=7)) - - def test_nan_array(self): - anan = np.array(np.nan) - aone = np.array(1) - ainf = np.array(np.inf) - self._assert_func(anan, anan) - assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) - assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) - assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) - - def test_nan_items(self): - anan = np.array(np.nan) - aone = np.array(1) - ainf = np.array(np.inf) - self._assert_func(anan, anan) - assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) - assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) - assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) - - -class TestArrayAssertLess(object): - - def setup(self): - self._assert_func = assert_array_less - - def test_simple_arrays(self): - x = np.array([1.1, 2.2]) - y = np.array([1.2, 2.3]) - - self._assert_func(x, y) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - y = np.array([1.0, 2.3]) - - assert_raises(AssertionError, lambda: self._assert_func(x, y)) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - def test_rank2(self): - x = np.array([[1.1, 2.2], [3.3, 4.4]]) - y = np.array([[1.2, 2.3], [3.4, 4.5]]) - - self._assert_func(x, y) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - y = np.array([[1.0, 2.3], [3.4, 4.5]]) - - assert_raises(AssertionError, lambda: self._assert_func(x, y)) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - def test_rank3(self): - x = np.ones(shape=(2, 2, 2)) - y = np.ones(shape=(2, 2, 2))+1 - - self._assert_func(x, y) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - y[0, 0, 0] = 0 - - assert_raises(AssertionError, lambda: self._assert_func(x, y)) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - def test_simple_items(self): - x = 1.1 - y = 2.2 - - self._assert_func(x, y) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - y = np.array([2.2, 3.3]) - - self._assert_func(x, y) - assert_raises(AssertionError, lambda: self._assert_func(y, x)) - - y = np.array([1.0, 3.3]) - - assert_raises(AssertionError, lambda: self._assert_func(x, y)) - - def test_nan_noncompare(self): - anan = np.array(np.nan) - aone = np.array(1) - ainf = np.array(np.inf) - self._assert_func(anan, anan) - assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) - assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) - assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) - assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) - - def test_nan_noncompare_array(self): - x = np.array([1.1, 2.2, 3.3]) - anan = np.array(np.nan) - - assert_raises(AssertionError, lambda: self._assert_func(x, anan)) - assert_raises(AssertionError, lambda: self._assert_func(anan, x)) - - x = np.array([1.1, 2.2, np.nan]) - - assert_raises(AssertionError, lambda: self._assert_func(x, anan)) - assert_raises(AssertionError, lambda: self._assert_func(anan, x)) - - y = np.array([1.0, 2.0, np.nan]) - - self._assert_func(y, x) - assert_raises(AssertionError, lambda: self._assert_func(x, y)) - - def test_inf_compare(self): - aone = np.array(1) - ainf = np.array(np.inf) - - self._assert_func(aone, ainf) - self._assert_func(-ainf, aone) - self._assert_func(-ainf, ainf) - assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) - assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) - assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) - assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) - assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) - - def test_inf_compare_array(self): - x = np.array([1.1, 2.2, np.inf]) - ainf = np.array(np.inf) - - assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) - assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) - assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) - assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) - assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) - self._assert_func(-ainf, x) - - -@pytest.mark.skip(reason="The raises decorator depends on Nose") -class TestRaises(object): - - def setup(self): - class MyException(Exception): - pass - - self.e = MyException - - def raises_exception(self, e): - raise e - - def does_not_raise_exception(self): - pass - - def test_correct_catch(self): - raises(self.e)(self.raises_exception)(self.e) # raises? - - def test_wrong_exception(self): - try: - raises(self.e)(self.raises_exception)(RuntimeError) # raises? - except RuntimeError: - return - else: - raise AssertionError("should have caught RuntimeError") - - def test_catch_no_raise(self): - try: - raises(self.e)(self.does_not_raise_exception)() # raises? - except AssertionError: - return - else: - raise AssertionError("should have raised an AssertionError") - - -class TestWarns(object): - - def test_warn(self): - def f(): - warnings.warn("yo") - return 3 - - before_filters = sys.modules['warnings'].filters[:] - assert_equal(assert_warns(UserWarning, f), 3) - after_filters = sys.modules['warnings'].filters - - assert_raises(AssertionError, assert_no_warnings, f) - assert_equal(assert_no_warnings(lambda x: x, 1), 1) - - # Check that the warnings state is unchanged - assert_equal(before_filters, after_filters, - "assert_warns does not preserver warnings state") - - def test_context_manager(self): - - before_filters = sys.modules['warnings'].filters[:] - with assert_warns(UserWarning): - warnings.warn("yo") - after_filters = sys.modules['warnings'].filters - - def no_warnings(): - with assert_no_warnings(): - warnings.warn("yo") - - assert_raises(AssertionError, no_warnings) - assert_equal(before_filters, after_filters, - "assert_warns does not preserver warnings state") - - def test_warn_wrong_warning(self): - def f(): - warnings.warn("yo", DeprecationWarning) - - failed = False - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - try: - # Should raise a DeprecationWarning - assert_warns(UserWarning, f) - failed = True - except DeprecationWarning: - pass - - if failed: - raise AssertionError("wrong warning caught by assert_warn") - - -class TestAssertAllclose(object): - - def test_simple(self): - x = 1e-3 - y = 1e-9 - - assert_allclose(x, y, atol=1) - assert_raises(AssertionError, assert_allclose, x, y) - - a = np.array([x, y, x, y]) - b = np.array([x, y, x, x]) - - assert_allclose(a, b, atol=1) - assert_raises(AssertionError, assert_allclose, a, b) - - b[-1] = y * (1 + 1e-8) - assert_allclose(a, b) - assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) - - assert_allclose(6, 10, rtol=0.5) - assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) - - def test_min_int(self): - a = np.array([np.iinfo(np.int_).min], dtype=np.int_) - # Should not raise: - assert_allclose(a, a) - - def test_report_fail_percentage(self): - a = np.array([1, 1, 1, 1]) - b = np.array([1, 1, 1, 2]) - - with pytest.raises(AssertionError) as exc_info: - assert_allclose(a, b) - msg = str(exc_info.value) - assert_('Mismatched elements: 1 / 4 (25%)\n' - 'Max absolute difference: 1\n' - 'Max relative difference: 0.5' in msg) - - def test_equal_nan(self): - a = np.array([np.nan]) - b = np.array([np.nan]) - # Should not raise: - assert_allclose(a, b, equal_nan=True) - - def test_not_equal_nan(self): - a = np.array([np.nan]) - b = np.array([np.nan]) - assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) - - def test_equal_nan_default(self): - # Make sure equal_nan default behavior remains unchanged. (All - # of these functions use assert_array_compare under the hood.) - # None of these should raise. - a = np.array([np.nan]) - b = np.array([np.nan]) - assert_array_equal(a, b) - assert_array_almost_equal(a, b) - assert_array_less(a, b) - assert_allclose(a, b) - - def test_report_max_relative_error(self): - a = np.array([0, 1]) - b = np.array([0, 2]) - - with pytest.raises(AssertionError) as exc_info: - assert_allclose(a, b) - msg = str(exc_info.value) - assert_('Max relative difference: 0.5' in msg) - - -class TestArrayAlmostEqualNulp(object): - - def test_float64_pass(self): - # The number of units of least precision - # In this case, use a few places above the lowest level (ie nulp=1) - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float64) - x = 10**x - x = np.r_[-x, x] - - # Addition - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(x, y, nulp) - - # Subtraction - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(x, y, nulp) - - def test_float64_fail(self): - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float64) - x = 10**x - x = np.r_[-x, x] - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - x, y, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - x, y, nulp) - - def test_float32_pass(self): - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float32) - x = 10**x - x = np.r_[-x, x] - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(x, y, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(x, y, nulp) - - def test_float32_fail(self): - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float32) - x = 10**x - x = np.r_[-x, x] - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - x, y, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - x, y, nulp) - - def test_float16_pass(self): - nulp = 5 - x = np.linspace(-4, 4, 10, dtype=np.float16) - x = 10**x - x = np.r_[-x, x] - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(x, y, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(x, y, nulp) - - def test_float16_fail(self): - nulp = 5 - x = np.linspace(-4, 4, 10, dtype=np.float16) - x = 10**x - x = np.r_[-x, x] - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - x, y, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - x, y, nulp) - - def test_complex128_pass(self): - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float64) - x = 10**x - x = np.r_[-x, x] - xi = x + x*1j - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - # The test condition needs to be at least a factor of sqrt(2) smaller - # because the real and imaginary parts both change - y = x + x*eps*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x - x*epsneg*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) - - def test_complex128_fail(self): - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float64) - x = 10**x - x = np.r_[-x, x] - xi = x + x*1j - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - # The test condition needs to be at least a factor of sqrt(2) smaller - # because the real and imaginary parts both change - y = x + x*eps*nulp - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x - x*epsneg*nulp - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) - - def test_complex64_pass(self): - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float32) - x = 10**x - x = np.r_[-x, x] - xi = x + x*1j - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x + x*eps*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x - x*epsneg*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) - - def test_complex64_fail(self): - nulp = 5 - x = np.linspace(-20, 20, 50, dtype=np.float32) - x = 10**x - x = np.r_[-x, x] - xi = x + x*1j - - eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x + x*eps*nulp - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) - - epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x - x*epsneg*nulp - assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) - - -class TestULP(object): - - def test_equal(self): - x = np.random.randn(10) - assert_array_max_ulp(x, x, maxulp=0) - - def test_single(self): - # Generate 1 + small deviation, check that adding eps gives a few UNL - x = np.ones(10).astype(np.float32) - x += 0.01 * np.random.randn(10).astype(np.float32) - eps = np.finfo(np.float32).eps - assert_array_max_ulp(x, x+eps, maxulp=20) - - def test_double(self): - # Generate 1 + small deviation, check that adding eps gives a few UNL - x = np.ones(10).astype(np.float64) - x += 0.01 * np.random.randn(10).astype(np.float64) - eps = np.finfo(np.float64).eps - assert_array_max_ulp(x, x+eps, maxulp=200) - - def test_inf(self): - for dt in [np.float32, np.float64]: - inf = np.array([np.inf]).astype(dt) - big = np.array([np.finfo(dt).max]) - assert_array_max_ulp(inf, big, maxulp=200) - - def test_nan(self): - # Test that nan is 'far' from small, tiny, inf, max and min - for dt in [np.float32, np.float64]: - if dt == np.float32: - maxulp = 1e6 - else: - maxulp = 1e12 - inf = np.array([np.inf]).astype(dt) - nan = np.array([np.nan]).astype(dt) - big = np.array([np.finfo(dt).max]) - tiny = np.array([np.finfo(dt).tiny]) - zero = np.array([np.PZERO]).astype(dt) - nzero = np.array([np.NZERO]).astype(dt) - assert_raises(AssertionError, - lambda: assert_array_max_ulp(nan, inf, - maxulp=maxulp)) - assert_raises(AssertionError, - lambda: assert_array_max_ulp(nan, big, - maxulp=maxulp)) - assert_raises(AssertionError, - lambda: assert_array_max_ulp(nan, tiny, - maxulp=maxulp)) - assert_raises(AssertionError, - lambda: assert_array_max_ulp(nan, zero, - maxulp=maxulp)) - assert_raises(AssertionError, - lambda: assert_array_max_ulp(nan, nzero, - maxulp=maxulp)) - - -class TestStringEqual(object): - def test_simple(self): - assert_string_equal("hello", "hello") - assert_string_equal("hello\nmultiline", "hello\nmultiline") - - with pytest.raises(AssertionError) as exc_info: - assert_string_equal("foo\nbar", "hello\nbar") - msg = str(exc_info.value) - assert_equal(msg, "Differences in strings:\n- foo\n+ hello") - - assert_raises(AssertionError, - lambda: assert_string_equal("foo", "hello")) - - def test_regex(self): - assert_string_equal("a+*b", "a+*b") - - assert_raises(AssertionError, - lambda: assert_string_equal("aaa", "a+b")) - - -def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None): - try: - mod_warns = mod.__warningregistry__ - except AttributeError: - # the lack of a __warningregistry__ - # attribute means that no warning has - # occurred; this can be triggered in - # a parallel test scenario, while in - # a serial test scenario an initial - # warning (and therefore the attribute) - # are always created first - mod_warns = {} - - num_warns = len(mod_warns) - # Python 3.4 appears to clear any pre-existing warnings of the same type, - # when raising warnings inside a catch_warnings block. So, there is a - # warning generated by the tests within the context manager, but no - # previous warnings. - if 'version' in mod_warns: - # Python 3 adds a 'version' entry to the registry, - # do not count it. - num_warns -= 1 - - # Behavior of warnings is Python version dependent. Adjust the - # expected result to compensate. In particular, Python 3.7 does - # not make an entry for ignored warnings. - if sys.version_info[:2] >= (3, 7): - if py37 is not None: - n_in_context = py37 - elif sys.version_info[:2] >= (3, 4): - if py34 is not None: - n_in_context = py34 - assert_equal(num_warns, n_in_context) - -def test_warn_len_equal_call_scenarios(): - # assert_warn_len_equal is called under - # varying circumstances depending on serial - # vs. parallel test scenarios; this test - # simply aims to probe both code paths and - # check that no assertion is uncaught - - # parallel scenario -- no warning issued yet - class mod(object): - pass - - mod_inst = mod() - - assert_warn_len_equal(mod=mod_inst, - n_in_context=0) - - # serial test scenario -- the __warningregistry__ - # attribute should be present - class mod(object): - def __init__(self): - self.__warningregistry__ = {'warning1':1, - 'warning2':2} - - mod_inst = mod() - assert_warn_len_equal(mod=mod_inst, - n_in_context=2) - - -def _get_fresh_mod(): - # Get this module, with warning registry empty - my_mod = sys.modules[__name__] - try: - my_mod.__warningregistry__.clear() - except AttributeError: - # will not have a __warningregistry__ unless warning has been - # raised in the module at some point - pass - return my_mod - - -def test_clear_and_catch_warnings(): - # Initial state of module, no warnings - my_mod = _get_fresh_mod() - assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) - with clear_and_catch_warnings(modules=[my_mod]): - warnings.simplefilter('ignore') - warnings.warn('Some warning') - assert_equal(my_mod.__warningregistry__, {}) - # Without specified modules, don't clear warnings during context - # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. - with clear_and_catch_warnings(): - warnings.simplefilter('ignore') - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 1, py37=0) - # Confirm that specifying module keeps old warning, does not add new - with clear_and_catch_warnings(modules=[my_mod]): - warnings.simplefilter('ignore') - warnings.warn('Another warning') - assert_warn_len_equal(my_mod, 1, py37=0) - # Another warning, no module spec does add to warnings dict, except on - # Python 3.4 (see comments in `assert_warn_len_equal`) - # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. - with clear_and_catch_warnings(): - warnings.simplefilter('ignore') - warnings.warn('Another warning') - assert_warn_len_equal(my_mod, 2, py34=1, py37=0) - - -def test_suppress_warnings_module(): - # Initial state of module, no warnings - my_mod = _get_fresh_mod() - assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) - - def warn_other_module(): - # Apply along axis is implemented in python; stacklevel=2 means - # we end up inside its module, not ours. - def warn(arr): - warnings.warn("Some warning 2", stacklevel=2) - return arr - np.apply_along_axis(warn, 0, [0]) - - # Test module based warning suppression: - assert_warn_len_equal(my_mod, 0) - with suppress_warnings() as sup: - sup.record(UserWarning) - # suppress warning from other module (may have .pyc ending), - # if apply_along_axis is moved, had to be changed. - sup.filter(module=np.lib.shape_base) - warnings.warn("Some warning") - warn_other_module() - # Check that the suppression did test the file correctly (this module - # got filtered) - assert_equal(len(sup.log), 1) - assert_equal(sup.log[0].message.args[0], "Some warning") - assert_warn_len_equal(my_mod, 0, py37=0) - sup = suppress_warnings() - # Will have to be changed if apply_along_axis is moved: - sup.filter(module=my_mod) - with sup: - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 0) - # And test repeat works: - sup.filter(module=my_mod) - with sup: - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 0) - - # Without specified modules, don't clear warnings during context - # Python 3.7 does not add ignored warnings. - with suppress_warnings(): - warnings.simplefilter('ignore') - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 1, py37=0) - -def test_suppress_warnings_type(): - # Initial state of module, no warnings - my_mod = _get_fresh_mod() - assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) - - # Test module based warning suppression: - with suppress_warnings() as sup: - sup.filter(UserWarning) - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 0) - sup = suppress_warnings() - sup.filter(UserWarning) - with sup: - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 0) - # And test repeat works: - sup.filter(module=my_mod) - with sup: - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 0) - - # Without specified modules, don't clear warnings during context - # Python 3.7 does not add ignored warnings. - with suppress_warnings(): - warnings.simplefilter('ignore') - warnings.warn('Some warning') - assert_warn_len_equal(my_mod, 1, py37=0) - - -def test_suppress_warnings_decorate_no_record(): - sup = suppress_warnings() - sup.filter(UserWarning) - - @sup - def warn(category): - warnings.warn('Some warning', category) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - warn(UserWarning) # should be supppressed - warn(RuntimeWarning) - assert_equal(len(w), 1) - - -def test_suppress_warnings_record(): - sup = suppress_warnings() - log1 = sup.record() - - with sup: - log2 = sup.record(message='Some other warning 2') - sup.filter(message='Some warning') - warnings.warn('Some warning') - warnings.warn('Some other warning') - warnings.warn('Some other warning 2') - - assert_equal(len(sup.log), 2) - assert_equal(len(log1), 1) - assert_equal(len(log2),1) - assert_equal(log2[0].message.args[0], 'Some other warning 2') - - # Do it again, with the same context to see if some warnings survived: - with sup: - log2 = sup.record(message='Some other warning 2') - sup.filter(message='Some warning') - warnings.warn('Some warning') - warnings.warn('Some other warning') - warnings.warn('Some other warning 2') - - assert_equal(len(sup.log), 2) - assert_equal(len(log1), 1) - assert_equal(len(log2), 1) - assert_equal(log2[0].message.args[0], 'Some other warning 2') - - # Test nested: - with suppress_warnings() as sup: - sup.record() - with suppress_warnings() as sup2: - sup2.record(message='Some warning') - warnings.warn('Some warning') - warnings.warn('Some other warning') - assert_equal(len(sup2.log), 1) - assert_equal(len(sup.log), 1) - - -def test_suppress_warnings_forwarding(): - def warn_other_module(): - # Apply along axis is implemented in python; stacklevel=2 means - # we end up inside its module, not ours. - def warn(arr): - warnings.warn("Some warning", stacklevel=2) - return arr - np.apply_along_axis(warn, 0, [0]) - - with suppress_warnings() as sup: - sup.record() - with suppress_warnings("always"): - for i in range(2): - warnings.warn("Some warning") - - assert_equal(len(sup.log), 2) - - with suppress_warnings() as sup: - sup.record() - with suppress_warnings("location"): - for i in range(2): - warnings.warn("Some warning") - warnings.warn("Some warning") - - assert_equal(len(sup.log), 2) - - with suppress_warnings() as sup: - sup.record() - with suppress_warnings("module"): - for i in range(2): - warnings.warn("Some warning") - warnings.warn("Some warning") - warn_other_module() - - assert_equal(len(sup.log), 2) - - with suppress_warnings() as sup: - sup.record() - with suppress_warnings("once"): - for i in range(2): - warnings.warn("Some warning") - warnings.warn("Some other warning") - warn_other_module() - - assert_equal(len(sup.log), 2) - - -def test_tempdir(): - with tempdir() as tdir: - fpath = os.path.join(tdir, 'tmp') - with open(fpath, 'w'): - pass - assert_(not os.path.isdir(tdir)) - - raised = False - try: - with tempdir() as tdir: - raise ValueError() - except ValueError: - raised = True - assert_(raised) - assert_(not os.path.isdir(tdir)) - - -def test_temppath(): - with temppath() as fpath: - with open(fpath, 'w'): - pass - assert_(not os.path.isfile(fpath)) - - raised = False - try: - with temppath() as fpath: - raise ValueError() - except ValueError: - raised = True - assert_(raised) - assert_(not os.path.isfile(fpath)) - - -class my_cacw(clear_and_catch_warnings): - - class_modules = (sys.modules[__name__],) - - -def test_clear_and_catch_warnings_inherit(): - # Test can subclass and add default modules - my_mod = _get_fresh_mod() - with my_cacw(): - warnings.simplefilter('ignore') - warnings.warn('Some warning') - assert_equal(my_mod.__warningregistry__, {}) - - -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -class TestAssertNoGcCycles(object): - """ Test assert_no_gc_cycles """ - def test_passes(self): - def no_cycle(): - b = [] - b.append([]) - return b - - with assert_no_gc_cycles(): - no_cycle() - - assert_no_gc_cycles(no_cycle) - - def test_asserts(self): - def make_cycle(): - a = [] - a.append(a) - a.append(a) - return a - - with assert_raises(AssertionError): - with assert_no_gc_cycles(): - make_cycle() - - with assert_raises(AssertionError): - assert_no_gc_cycles(make_cycle) - - @pytest.mark.slow - def test_fails(self): - """ - Test that in cases where the garbage cannot be collected, we raise an - error, instead of hanging forever trying to clear it. - """ - - class ReferenceCycleInDel(object): - """ - An object that not only contains a reference cycle, but creates new - cycles whenever it's garbage-collected and its __del__ runs - """ - make_cycle = True - - def __init__(self): - self.cycle = self - - def __del__(self): - # break the current cycle so that `self` can be freed - self.cycle = None - - if ReferenceCycleInDel.make_cycle: - # but create a new one so that the garbage collector has more - # work to do. - ReferenceCycleInDel() - - try: - w = weakref.ref(ReferenceCycleInDel()) - try: - with assert_raises(RuntimeError): - # this will be unable to get a baseline empty garbage - assert_no_gc_cycles(lambda: None) - except AssertionError: - # the above test is only necessary if the GC actually tried to free - # our object anyway, which python 2.7 does not. - if w() is not None: - pytest.skip("GC does not call __del__ on cyclic objects") - raise - - finally: - # make sure that we stop creating reference cycles - ReferenceCycleInDel.make_cycle = False diff --git a/venv/lib/python3.7/site-packages/numpy/testing/utils.py b/venv/lib/python3.7/site-packages/numpy/testing/utils.py deleted file mode 100644 index 975f6ad..0000000 --- a/venv/lib/python3.7/site-packages/numpy/testing/utils.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Back compatibility utils module. It will import the appropriate -set of tools - -""" -from __future__ import division, absolute_import, print_function - -import warnings - -# 2018-04-04, numpy 1.15.0 ImportWarning -# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed) -warnings.warn("Importing from numpy.testing.utils is deprecated " - "since 1.15.0, import from numpy.testing instead.", - DeprecationWarning, stacklevel=2) - -from ._private.utils import * - -__all__ = [ - 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', - 'assert_array_equal', 'assert_array_less', 'assert_string_equal', - 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', - 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', - 'raises', 'rundocs', 'runstring', 'verbose', 'measure', - 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', - 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', - 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', - 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', - 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', - '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles' - ] diff --git a/venv/lib/python3.7/site-packages/numpy/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/tests/test_ctypeslib.py b/venv/lib/python3.7/site-packages/numpy/tests/test_ctypeslib.py deleted file mode 100644 index 521208c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/tests/test_ctypeslib.py +++ /dev/null @@ -1,367 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import pytest -import weakref - -import numpy as np -from numpy.ctypeslib import ndpointer, load_library, as_array -from numpy.distutils.misc_util import get_shared_lib_extension -from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal - -try: - import ctypes -except ImportError: - ctypes = None -else: - cdll = None - test_cdll = None - if hasattr(sys, 'gettotalrefcount'): - try: - cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__) - except OSError: - pass - try: - test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) - except OSError: - pass - if cdll is None: - cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__) - if test_cdll is None: - test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) - - c_forward_pointer = test_cdll.forward_pointer - - -@pytest.mark.skipif(ctypes is None, - reason="ctypes not available in this python") -@pytest.mark.skipif(sys.platform == 'cygwin', - reason="Known to fail on cygwin") -class TestLoadLibrary(object): - def test_basic(self): - try: - # Should succeed - load_library('_multiarray_umath', np.core._multiarray_umath.__file__) - except ImportError as e: - msg = ("ctypes is not available on this python: skipping the test" - " (import error was: %s)" % str(e)) - print(msg) - - def test_basic2(self): - # Regression for #801: load_library with a full library name - # (including extension) does not work. - try: - try: - so = get_shared_lib_extension(is_python_ext=True) - # Should succeed - load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__) - except ImportError: - print("No distutils available, skipping test.") - except ImportError as e: - msg = ("ctypes is not available on this python: skipping the test" - " (import error was: %s)" % str(e)) - print(msg) - - -class TestNdpointer(object): - def test_dtype(self): - dt = np.intc - p = ndpointer(dtype=dt) - assert_(p.from_param(np.array([1], dt))) - dt = 'i4') - p = ndpointer(dtype=dt) - p.from_param(np.array([1], dt)) - assert_raises(TypeError, p.from_param, - np.array([1], dt.newbyteorder('swap'))) - dtnames = ['x', 'y'] - dtformats = [np.intc, np.float64] - dtdescr = {'names': dtnames, 'formats': dtformats} - dt = np.dtype(dtdescr) - p = ndpointer(dtype=dt) - assert_(p.from_param(np.zeros((10,), dt))) - samedt = np.dtype(dtdescr) - p = ndpointer(dtype=samedt) - assert_(p.from_param(np.zeros((10,), dt))) - dt2 = np.dtype(dtdescr, align=True) - if dt.itemsize != dt2.itemsize: - assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) - else: - assert_(p.from_param(np.zeros((10,), dt2))) - - def test_ndim(self): - p = ndpointer(ndim=0) - assert_(p.from_param(np.array(1))) - assert_raises(TypeError, p.from_param, np.array([1])) - p = ndpointer(ndim=1) - assert_raises(TypeError, p.from_param, np.array(1)) - assert_(p.from_param(np.array([1]))) - p = ndpointer(ndim=2) - assert_(p.from_param(np.array([[1]]))) - - def test_shape(self): - p = ndpointer(shape=(1, 2)) - assert_(p.from_param(np.array([[1, 2]]))) - assert_raises(TypeError, p.from_param, np.array([[1], [2]])) - p = ndpointer(shape=()) - assert_(p.from_param(np.array(1))) - - def test_flags(self): - x = np.array([[1, 2], [3, 4]], order='F') - p = ndpointer(flags='FORTRAN') - assert_(p.from_param(x)) - p = ndpointer(flags='CONTIGUOUS') - assert_raises(TypeError, p.from_param, x) - p = ndpointer(flags=x.flags.num) - assert_(p.from_param(x)) - assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) - - def test_cache(self): - assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) - - # shapes are normalized - assert_(ndpointer(shape=2) is ndpointer(shape=(2,))) - - # 1.12 <= v < 1.16 had a bug that made these fail - assert_(ndpointer(shape=2) is not ndpointer(ndim=2)) - assert_(ndpointer(ndim=2) is not ndpointer(shape=2)) - -@pytest.mark.skipif(ctypes is None, - reason="ctypes not available on this python installation") -class TestNdpointerCFunc(object): - def test_arguments(self): - """ Test that arguments are coerced from arrays """ - c_forward_pointer.restype = ctypes.c_void_p - c_forward_pointer.argtypes = (ndpointer(ndim=2),) - - c_forward_pointer(np.zeros((2, 3))) - # too many dimensions - assert_raises( - ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4))) - - @pytest.mark.parametrize( - 'dt', [ - float, - np.dtype(dict( - formats=['u2') - ct = np.ctypeslib.as_ctypes_type(dt) - assert_equal(ct, ctypes.c_uint16.__ctype_be__) - - dt = np.dtype('u2') - ct = np.ctypeslib.as_ctypes_type(dt) - assert_equal(ct, ctypes.c_uint16) - - def test_subarray(self): - dt = np.dtype((np.int32, (2, 3))) - ct = np.ctypeslib.as_ctypes_type(dt) - assert_equal(ct, 2 * (3 * ctypes.c_int32)) - - def test_structure(self): - dt = np.dtype([ - ('a', np.uint16), - ('b', np.uint32), - ]) - - ct = np.ctypeslib.as_ctypes_type(dt) - assert_(issubclass(ct, ctypes.Structure)) - assert_equal(ctypes.sizeof(ct), dt.itemsize) - assert_equal(ct._fields_, [ - ('a', ctypes.c_uint16), - ('b', ctypes.c_uint32), - ]) - - def test_structure_aligned(self): - dt = np.dtype([ - ('a', np.uint16), - ('b', np.uint32), - ], align=True) - - ct = np.ctypeslib.as_ctypes_type(dt) - assert_(issubclass(ct, ctypes.Structure)) - assert_equal(ctypes.sizeof(ct), dt.itemsize) - assert_equal(ct._fields_, [ - ('a', ctypes.c_uint16), - ('', ctypes.c_char * 2), # padding - ('b', ctypes.c_uint32), - ]) - - def test_union(self): - dt = np.dtype(dict( - names=['a', 'b'], - offsets=[0, 0], - formats=[np.uint16, np.uint32] - )) - - ct = np.ctypeslib.as_ctypes_type(dt) - assert_(issubclass(ct, ctypes.Union)) - assert_equal(ctypes.sizeof(ct), dt.itemsize) - assert_equal(ct._fields_, [ - ('a', ctypes.c_uint16), - ('b', ctypes.c_uint32), - ]) - - def test_padded_union(self): - dt = np.dtype(dict( - names=['a', 'b'], - offsets=[0, 0], - formats=[np.uint16, np.uint32], - itemsize=5, - )) - - ct = np.ctypeslib.as_ctypes_type(dt) - assert_(issubclass(ct, ctypes.Union)) - assert_equal(ctypes.sizeof(ct), dt.itemsize) - assert_equal(ct._fields_, [ - ('a', ctypes.c_uint16), - ('b', ctypes.c_uint32), - ('', ctypes.c_char * 5), # padding - ]) - - def test_overlapping(self): - dt = np.dtype(dict( - names=['a', 'b'], - offsets=[0, 2], - formats=[np.uint32, np.uint32] - )) - assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) diff --git a/venv/lib/python3.7/site-packages/numpy/tests/test_matlib.py b/venv/lib/python3.7/site-packages/numpy/tests/test_matlib.py deleted file mode 100644 index 38a7e39..0000000 --- a/venv/lib/python3.7/site-packages/numpy/tests/test_matlib.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import division, absolute_import, print_function - -# As we are testing matrices, we ignore its PendingDeprecationWarnings -try: - import pytest - pytestmark = pytest.mark.filterwarnings( - 'ignore:the matrix subclass is not:PendingDeprecationWarning') -except ImportError: - pass - -import numpy as np -import numpy.matlib -from numpy.testing import assert_array_equal, assert_ - -def test_empty(): - x = numpy.matlib.empty((2,)) - assert_(isinstance(x, np.matrix)) - assert_(x.shape, (1, 2)) - -def test_ones(): - assert_array_equal(numpy.matlib.ones((2, 3)), - np.matrix([[ 1., 1., 1.], - [ 1., 1., 1.]])) - - assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]])) - -def test_zeros(): - assert_array_equal(numpy.matlib.zeros((2, 3)), - np.matrix([[ 0., 0., 0.], - [ 0., 0., 0.]])) - - assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]])) - -def test_identity(): - x = numpy.matlib.identity(2, dtype=int) - assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) - -def test_eye(): - xc = numpy.matlib.eye(3, k=1, dtype=int) - assert_array_equal(xc, np.matrix([[ 0, 1, 0], - [ 0, 0, 1], - [ 0, 0, 0]])) - assert xc.flags.c_contiguous - assert not xc.flags.f_contiguous - - xf = numpy.matlib.eye(3, 4, dtype=int, order='F') - assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], - [ 0, 1, 0, 0], - [ 0, 0, 1, 0]])) - assert not xf.flags.c_contiguous - assert xf.flags.f_contiguous - -def test_rand(): - x = numpy.matlib.rand(3) - # check matrix type, array would have shape (3,) - assert_(x.ndim == 2) - -def test_randn(): - x = np.matlib.randn(3) - # check matrix type, array would have shape (3,) - assert_(x.ndim == 2) - -def test_repmat(): - a1 = np.arange(4) - x = numpy.matlib.repmat(a1, 2, 2) - y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], - [0, 1, 2, 3, 0, 1, 2, 3]]) - assert_array_equal(x, y) diff --git a/venv/lib/python3.7/site-packages/numpy/tests/test_numpy_version.py b/venv/lib/python3.7/site-packages/numpy/tests/test_numpy_version.py deleted file mode 100644 index 7fac8fd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/tests/test_numpy_version.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re - -import numpy as np -from numpy.testing import assert_ - - -def test_valid_numpy_version(): - # Verify that the numpy version is a valid one (no .post suffix or other - # nonsense). See gh-6431 for an issue caused by an invalid version. - version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" - dev_suffix = r"(\.dev0\+([0-9a-f]{7}|Unknown))" - if np.version.release: - res = re.match(version_pattern, np.__version__) - else: - res = re.match(version_pattern + dev_suffix, np.__version__) - - assert_(res is not None, np.__version__) diff --git a/venv/lib/python3.7/site-packages/numpy/tests/test_public_api.py b/venv/lib/python3.7/site-packages/numpy/tests/test_public_api.py deleted file mode 100644 index 0484bb8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/tests/test_public_api.py +++ /dev/null @@ -1,490 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import subprocess -import pkgutil -import types -import importlib -import warnings - -import numpy as np -import numpy -import pytest - -try: - import ctypes -except ImportError: - ctypes = None - - -def check_dir(module, module_name=None): - """Returns a mapping of all objects with the wrong __module__ attribute.""" - if module_name is None: - module_name = module.__name__ - results = {} - for name in dir(module): - item = getattr(module, name) - if (hasattr(item, '__module__') and hasattr(item, '__name__') - and item.__module__ != module_name): - results[name] = item.__module__ + '.' + item.__name__ - return results - - -@pytest.mark.skipif( - sys.version_info[0] < 3, - reason="NumPy exposes slightly different functions on Python 2") -def test_numpy_namespace(): - # None of these objects are publicly documented to be part of the main - # NumPy namespace (some are useful though, others need to be cleaned up) - undocumented = { - 'Tester': 'numpy.testing._private.nosetester.NoseTester', - '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', - 'add_docstring': 'numpy.core._multiarray_umath.add_docstring', - 'add_newdoc': 'numpy.core.function_base.add_newdoc', - 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', - 'byte_bounds': 'numpy.lib.utils.byte_bounds', - 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays', - 'deprecate': 'numpy.lib.utils.deprecate', - 'deprecate_with_doc': 'numpy.lib.utils.', - 'disp': 'numpy.lib.function_base.disp', - 'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose', - 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap', - 'get_include': 'numpy.lib.utils.get_include', - 'int_asbuffer': 'numpy.core._multiarray_umath.int_asbuffer', - 'mafromtxt': 'numpy.lib.npyio.mafromtxt', - 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt', - 'recfromcsv': 'numpy.lib.npyio.recfromcsv', - 'recfromtxt': 'numpy.lib.npyio.recfromtxt', - 'safe_eval': 'numpy.lib.utils.safe_eval', - 'set_string_function': 'numpy.core.arrayprint.set_string_function', - 'show_config': 'numpy.__config__.show', - 'who': 'numpy.lib.utils.who', - } - # These built-in types are re-exported by numpy. - builtins = { - 'bool': 'builtins.bool', - 'complex': 'builtins.complex', - 'float': 'builtins.float', - 'int': 'builtins.int', - 'long': 'builtins.int', - 'object': 'builtins.object', - 'str': 'builtins.str', - 'unicode': 'builtins.str', - } - whitelist = dict(undocumented, **builtins) - bad_results = check_dir(np) - # pytest gives better error messages with the builtin assert than with - # assert_equal - assert bad_results == whitelist - - -@pytest.mark.parametrize('name', ['testing', 'Tester']) -def test_import_lazy_import(name): - """Make sure we can actually use the modules we lazy load. - - While not exported as part of the public API, it was accessible. With the - use of __getattr__ and __dir__, this isn't always true It can happen that - an infinite recursion may happen. - - This is the only way I found that would force the failure to appear on the - badly implemented code. - - We also test for the presence of the lazily imported modules in dir - - """ - exe = (sys.executable, '-c', "import numpy; numpy." + name) - result = subprocess.check_output(exe) - assert not result - - # Make sure they are still in the __dir__ - assert name in dir(np) - - -def test_numpy_linalg(): - bad_results = check_dir(np.linalg) - assert bad_results == {} - - -def test_numpy_fft(): - bad_results = check_dir(np.fft) - assert bad_results == {} - - -@pytest.mark.skipif(ctypes is None, - reason="ctypes not available in this python") -def test_NPY_NO_EXPORT(): - cdll = ctypes.CDLL(np.core._multiarray_tests.__file__) - # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden - f = getattr(cdll, 'test_not_exported', None) - assert f is None, ("'test_not_exported' is mistakenly exported, " - "NPY_NO_EXPORT does not work") - - -# Historically NumPy has not used leading underscores for private submodules -# much. This has resulted in lots of things that look like public modules -# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`), -# but were never intended to be public. The PUBLIC_MODULES list contains -# modules that are either public because they were meant to be, or because they -# contain public functions/objects that aren't present in any other namespace -# for whatever reason and therefore should be treated as public. -# -# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack -# of underscores) but should not be used. For many of those modules the -# current status is fine. For others it may make sense to work on making them -# private, to clean up our public API and avoid confusion. -PUBLIC_MODULES = ['numpy.' + s for s in [ - "ctypeslib", - "distutils", - "distutils.cpuinfo", - "distutils.exec_command", - "distutils.misc_util", - "distutils.log", - "distutils.system_info", - "doc", - "doc.basics", - "doc.broadcasting", - "doc.byteswapping", - "doc.constants", - "doc.creation", - "doc.dispatch", - "doc.glossary", - "doc.indexing", - "doc.internals", - "doc.misc", - "doc.structured_arrays", - "doc.subclassing", - "doc.ufuncs", - "dual", - "f2py", - "fft", - "lib", - "lib.format", # was this meant to be public? - "lib.mixins", - "lib.recfunctions", - "lib.scimath", - "linalg", - "ma", - "ma.extras", - "ma.mrecords", - "matlib", - "polynomial", - "polynomial.chebyshev", - "polynomial.hermite", - "polynomial.hermite_e", - "polynomial.laguerre", - "polynomial.legendre", - "polynomial.polynomial", - "polynomial.polyutils", - "random", - "testing", - "version", -]] - - -PUBLIC_ALIASED_MODULES = [ - "numpy.char", - "numpy.emath", - "numpy.rec", -] - - -PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ - "compat", - "compat.py3k", - "conftest", - "core", - "core.arrayprint", - "core.defchararray", - "core.einsumfunc", - "core.fromnumeric", - "core.function_base", - "core.getlimits", - "core.machar", - "core.memmap", - "core.multiarray", - "core.numeric", - "core.numerictypes", - "core.overrides", - "core.records", - "core.shape_base", - "core.umath", - "core.umath_tests", - "distutils.ccompiler", - "distutils.command", - "distutils.command.autodist", - "distutils.command.bdist_rpm", - "distutils.command.build", - "distutils.command.build_clib", - "distutils.command.build_ext", - "distutils.command.build_py", - "distutils.command.build_scripts", - "distutils.command.build_src", - "distutils.command.config", - "distutils.command.config_compiler", - "distutils.command.develop", - "distutils.command.egg_info", - "distutils.command.install", - "distutils.command.install_clib", - "distutils.command.install_data", - "distutils.command.install_headers", - "distutils.command.sdist", - "distutils.compat", - "distutils.conv_template", - "distutils.core", - "distutils.extension", - "distutils.fcompiler", - "distutils.fcompiler.absoft", - "distutils.fcompiler.compaq", - "distutils.fcompiler.environment", - "distutils.fcompiler.g95", - "distutils.fcompiler.gnu", - "distutils.fcompiler.hpux", - "distutils.fcompiler.ibm", - "distutils.fcompiler.intel", - "distutils.fcompiler.lahey", - "distutils.fcompiler.mips", - "distutils.fcompiler.nag", - "distutils.fcompiler.none", - "distutils.fcompiler.pathf95", - "distutils.fcompiler.pg", - "distutils.fcompiler.sun", - "distutils.fcompiler.vast", - "distutils.from_template", - "distutils.intelccompiler", - "distutils.lib2def", - "distutils.line_endings", - "distutils.mingw32ccompiler", - "distutils.msvccompiler", - "distutils.npy_pkg_config", - "distutils.numpy_distribution", - "distutils.pathccompiler", - "distutils.unixccompiler", - "f2py.auxfuncs", - "f2py.capi_maps", - "f2py.cb_rules", - "f2py.cfuncs", - "f2py.common_rules", - "f2py.crackfortran", - "f2py.diagnose", - "f2py.f2py2e", - "f2py.f2py_testing", - "f2py.f90mod_rules", - "f2py.func2subr", - "f2py.rules", - "f2py.use_rules", - "fft.helper", - "lib.arraypad", - "lib.arraysetops", - "lib.arrayterator", - "lib.financial", - "lib.function_base", - "lib.histograms", - "lib.index_tricks", - "lib.nanfunctions", - "lib.npyio", - "lib.polynomial", - "lib.shape_base", - "lib.stride_tricks", - "lib.twodim_base", - "lib.type_check", - "lib.ufunclike", - "lib.user_array", # note: not in np.lib, but probably should just be deleted - "lib.utils", - "linalg.lapack_lite", - "linalg.linalg", - "ma.bench", - "ma.core", - "ma.testutils", - "ma.timer_comparison", - "matrixlib", - "matrixlib.defmatrix", - "random.mtrand", - "testing.print_coercion_tables", - "testing.utils", -]] - - -def is_unexpected(name): - """Check if this needs to be considered.""" - if '._' in name or '.tests' in name or '.setup' in name: - return False - - if name in PUBLIC_MODULES: - return False - - if name in PUBLIC_ALIASED_MODULES: - return False - - if name in PRIVATE_BUT_PRESENT_MODULES: - return False - - return True - - -# These are present in a directory with an __init__.py but cannot be imported -# code_generators/ isn't installed, but present for an inplace build -SKIP_LIST = [ - "numpy.core.code_generators", - "numpy.core.code_generators.genapi", - "numpy.core.code_generators.generate_umath", - "numpy.core.code_generators.ufunc_docstrings", - "numpy.core.code_generators.generate_numpy_api", - "numpy.core.code_generators.generate_ufunc_api", - "numpy.core.code_generators.numpy_api", - "numpy.core.cversions", - "numpy.core.generate_numpy_api", - "numpy.distutils.msvc9compiler", -] - - -def test_all_modules_are_expected(): - """ - Test that we don't add anything that looks like a new public module by - accident. Check is based on filenames. - """ - - modnames = [] - for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, - prefix=np.__name__ + '.', - onerror=None): - if is_unexpected(modname) and modname not in SKIP_LIST: - # We have a name that is new. If that's on purpose, add it to - # PUBLIC_MODULES. We don't expect to have to add anything to - # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! - modnames.append(modname) - - if modnames: - raise AssertionError("Found unexpected modules: {}".format(modnames)) - - -# Stuff that clearly shouldn't be in the API and is detected by the next test -# below -SKIP_LIST_2 = [ - 'numpy.math', - 'numpy.distutils.log.sys', - 'numpy.distutils.system_info.copy', - 'numpy.distutils.system_info.distutils', - 'numpy.distutils.system_info.log', - 'numpy.distutils.system_info.os', - 'numpy.distutils.system_info.platform', - 'numpy.distutils.system_info.re', - 'numpy.distutils.system_info.shutil', - 'numpy.distutils.system_info.subprocess', - 'numpy.distutils.system_info.sys', - 'numpy.distutils.system_info.tempfile', - 'numpy.distutils.system_info.textwrap', - 'numpy.distutils.system_info.warnings', - 'numpy.doc.constants.re', - 'numpy.doc.constants.textwrap', - 'numpy.lib.emath', - 'numpy.lib.math', - 'numpy.matlib.char', - 'numpy.matlib.rec', - 'numpy.matlib.emath', - 'numpy.matlib.math', - 'numpy.matlib.linalg', - 'numpy.matlib.fft', - 'numpy.matlib.random', - 'numpy.matlib.ctypeslib', - 'numpy.matlib.ma', -] - - -def test_all_modules_are_expected_2(): - """ - Method checking all objects. The pkgutil-based method in - `test_all_modules_are_expected` does not catch imports into a namespace, - only filenames. So this test is more thorough, and checks this like: - - import .lib.scimath as emath - - To check if something in a module is (effectively) public, one can check if - there's anything in that namespace that's a public function/object but is - not exposed in a higher-level namespace. For example for a `numpy.lib` - submodule:: - - mod = np.lib.mixins - for obj in mod.__all__: - if obj in np.__all__: - continue - elif obj in np.lib.__all__: - continue - - else: - print(obj) - - """ - - def find_unexpected_members(mod_name): - members = [] - module = importlib.import_module(mod_name) - if hasattr(module, '__all__'): - objnames = module.__all__ - else: - objnames = dir(module) - - for objname in objnames: - if not objname.startswith('_'): - fullobjname = mod_name + '.' + objname - if isinstance(getattr(module, objname), types.ModuleType): - if is_unexpected(fullobjname): - if fullobjname not in SKIP_LIST_2: - members.append(fullobjname) - - return members - - unexpected_members = find_unexpected_members("numpy") - for modname in PUBLIC_MODULES: - unexpected_members.extend(find_unexpected_members(modname)) - - if unexpected_members: - raise AssertionError("Found unexpected object(s) that look like " - "modules: {}".format(unexpected_members)) - - -def test_api_importable(): - """ - Check that all submodules listed higher up in this file can be imported - - Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may - simply need to be removed from the list (deprecation may or may not be - needed - apply common sense). - """ - def check_importable(module_name): - try: - importlib.import_module(module_name) - except (ImportError, AttributeError): - return False - - return True - - module_names = [] - for module_name in PUBLIC_MODULES: - if not check_importable(module_name): - module_names.append(module_name) - - if module_names: - raise AssertionError("Modules in the public API that cannot be " - "imported: {}".format(module_names)) - - for module_name in PUBLIC_ALIASED_MODULES: - try: - eval(module_name) - except AttributeError: - module_names.append(module_name) - - if module_names: - raise AssertionError("Modules in the public API that were not " - "found: {}".format(module_names)) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', category=DeprecationWarning) - warnings.filterwarnings('always', category=ImportWarning) - for module_name in PRIVATE_BUT_PRESENT_MODULES: - if not check_importable(module_name): - module_names.append(module_name) - - if module_names: - raise AssertionError("Modules that are not really public but looked " - "public and can not be imported: " - "{}".format(module_names)) diff --git a/venv/lib/python3.7/site-packages/numpy/tests/test_reloading.py b/venv/lib/python3.7/site-packages/numpy/tests/test_reloading.py deleted file mode 100644 index e378d14..0000000 --- a/venv/lib/python3.7/site-packages/numpy/tests/test_reloading.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.testing import assert_raises, assert_, assert_equal -from numpy.compat import pickle - -if sys.version_info[:2] >= (3, 4): - from importlib import reload -else: - from imp import reload - -def test_numpy_reloading(): - # gh-7844. Also check that relevant globals retain their identity. - import numpy as np - import numpy._globals - - _NoValue = np._NoValue - VisibleDeprecationWarning = np.VisibleDeprecationWarning - ModuleDeprecationWarning = np.ModuleDeprecationWarning - - reload(np) - assert_(_NoValue is np._NoValue) - assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) - assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) - - assert_raises(RuntimeError, reload, numpy._globals) - reload(np) - assert_(_NoValue is np._NoValue) - assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) - assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) - -def test_novalue(): - import numpy as np - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - assert_equal(repr(np._NoValue), '') - assert_(pickle.loads(pickle.dumps(np._NoValue, - protocol=proto)) is np._NoValue) diff --git a/venv/lib/python3.7/site-packages/numpy/tests/test_scripts.py b/venv/lib/python3.7/site-packages/numpy/tests/test_scripts.py deleted file mode 100644 index e42dc25..0000000 --- a/venv/lib/python3.7/site-packages/numpy/tests/test_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Test scripts - -Test that we can run executable scripts that have been installed with numpy. -""" -from __future__ import division, print_function, absolute_import - -import sys -import os -import pytest -from os.path import join as pathjoin, isfile, dirname -import subprocess - -import numpy as np -from numpy.compat.py3k import basestring -from numpy.testing import assert_, assert_equal - -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) - - -def find_f2py_commands(): - if sys.platform == 'win32': - exe_dir = dirname(sys.executable) - if exe_dir.endswith('Scripts'): # virtualenv - return [os.path.join(exe_dir, 'f2py')] - else: - return [os.path.join(exe_dir, "Scripts", 'f2py')] - else: - # Three scripts are installed in Unix-like systems: - # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, - # if installed with python3.7 the scripts would be named - # 'f2py', 'f2py3', and 'f2py3.7'. - version = sys.version_info - major = str(version.major) - minor = str(version.minor) - return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] - - -@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") -@pytest.mark.xfail(reason="Test is unreliable") -@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) -def test_f2py(f2py_cmd): - # test that we can run f2py script - stdout = subprocess.check_output([f2py_cmd, '-v']) - assert_equal(stdout.strip(), b'2') - - -def test_pep338(): - stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) - assert_equal(stdout.strip(), b'2') diff --git a/venv/lib/python3.7/site-packages/numpy/tests/test_warnings.py b/venv/lib/python3.7/site-packages/numpy/tests/test_warnings.py deleted file mode 100644 index f5560a0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/tests/test_warnings.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Tests which scan for certain occurrences in the code, they may not find -all of these occurrences but should catch almost all. -""" -from __future__ import division, absolute_import, print_function - -import sys -import pytest - -if sys.version_info >= (3, 4): - from pathlib import Path - import ast - import tokenize - import numpy - - class ParseCall(ast.NodeVisitor): - def __init__(self): - self.ls = [] - - def visit_Attribute(self, node): - ast.NodeVisitor.generic_visit(self, node) - self.ls.append(node.attr) - - def visit_Name(self, node): - self.ls.append(node.id) - - - class FindFuncs(ast.NodeVisitor): - def __init__(self, filename): - super().__init__() - self.__filename = filename - - def visit_Call(self, node): - p = ParseCall() - p.visit(node.func) - ast.NodeVisitor.generic_visit(self, node) - - if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].s == "ignore": - raise AssertionError( - "ignore filter should not be used; found in " - "{} on line {}".format(self.__filename, node.lineno)) - - if p.ls[-1] == 'warn' and ( - len(p.ls) == 1 or p.ls[-2] == 'warnings'): - - if "testing/tests/test_warnings.py" == self.__filename: - # This file - return - - # See if stacklevel exists: - if len(node.args) == 3: - return - args = {kw.arg for kw in node.keywords} - if "stacklevel" in args: - return - raise AssertionError( - "warnings should have an appropriate stacklevel; found in " - "{} on line {}".format(self.__filename, node.lineno)) - - - @pytest.mark.slow - def test_warning_calls(): - # combined "ignore" and stacklevel error - base = Path(numpy.__file__).parent - - for path in base.rglob("*.py"): - if base / "testing" in path.parents: - continue - if path == base / "__init__.py": - continue - if path == base / "random" / "__init__.py": - continue - # use tokenize to auto-detect encoding on systems where no - # default encoding is defined (e.g. LANG='C') - with tokenize.open(str(path)) as file: - tree = ast.parse(file.read()) - FindFuncs(path).visit(tree) diff --git a/venv/lib/python3.7/site-packages/numpy/version.py b/venv/lib/python3.7/site-packages/numpy/version.py deleted file mode 100644 index 41ee83e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/version.py +++ /dev/null @@ -1,12 +0,0 @@ - -# THIS FILE IS GENERATED FROM NUMPY SETUP.PY -# -# To compare versions robustly, use `numpy.lib.NumpyVersion` -short_version = '1.18.2' -version = '1.18.2' -full_version = '1.18.2' -git_revision = 'df256d0d2f3bc6833699529824781c58f9c6e697' -release = True - -if not release: - version = full_version diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/PKG-INFO b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/PKG-INFO deleted file mode 100644 index 0b410a2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/PKG-INFO +++ /dev/null @@ -1,73 +0,0 @@ -Metadata-Version: 1.2 -Name: pip -Version: 19.0.3 -Summary: The PyPA recommended tool for installing Python packages. -Home-page: https://pip.pypa.io/ -Author: The pip developers -Author-email: pypa-dev@groups.google.com -License: MIT -Description: pip - The Python Package Installer - ================================== - - .. image:: https://img.shields.io/pypi/v/pip.svg - :target: https://pypi.org/project/pip/ - - .. image:: https://readthedocs.org/projects/pip/badge/?version=latest - :target: https://pip.pypa.io/en/latest - - pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes. - - Please take a look at our documentation for how to install and use pip: - - * `Installation`_ - * `Usage`_ - * `Release notes`_ - - If you find bugs, need help, or want to talk to the developers please use our mailing lists or chat rooms: - - * `Issue tracking`_ - * `Discourse channel`_ - * `User IRC`_ - - If you want to get involved head over to GitHub to get the source code and feel free to jump on the developer mailing lists and chat rooms: - - * `GitHub page`_ - * `Dev mailing list`_ - * `Dev IRC`_ - - Code of Conduct - --------------- - - Everyone interacting in the pip project's codebases, issue trackers, chat - rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. - - .. _package installer: https://packaging.python.org/en/latest/current/ - .. _Python Package Index: https://pypi.org - .. _Installation: https://pip.pypa.io/en/stable/installing.html - .. _Usage: https://pip.pypa.io/en/stable/ - .. _Release notes: https://pip.pypa.io/en/stable/news.html - .. _GitHub page: https://github.com/pypa/pip - .. _Issue tracking: https://github.com/pypa/pip/issues - .. _Discourse channel: https://discuss.python.org/c/packaging - .. _Dev mailing list: https://groups.google.com/forum/#!forum/pypa-dev - .. _User IRC: https://webchat.freenode.net/?channels=%23pypa - .. _Dev IRC: https://webchat.freenode.net/?channels=%23pypa-dev - .. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - -Keywords: distutils easy_install egg setuptools wheel virtualenv -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Topic :: Software Development :: Build Tools -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/SOURCES.txt b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/SOURCES.txt deleted file mode 100644 index eb4810d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/SOURCES.txt +++ /dev/null @@ -1,391 +0,0 @@ -AUTHORS.txt -LICENSE.txt -MANIFEST.in -NEWS.rst -README.rst -pyproject.toml -setup.cfg -setup.py -docs/pip_sphinxext.py -docs/html/conf.py -docs/html/cookbook.rst -docs/html/index.rst -docs/html/installing.rst -docs/html/logic.rst -docs/html/news.rst -docs/html/quickstart.rst -docs/html/usage.rst -docs/html/user_guide.rst -docs/html/development/configuration.rst -docs/html/development/contributing.rst -docs/html/development/getting-started.rst -docs/html/development/index.rst -docs/html/development/release-process.rst -docs/html/development/vendoring-policy.rst -docs/html/reference/index.rst -docs/html/reference/pip.rst -docs/html/reference/pip_check.rst -docs/html/reference/pip_config.rst -docs/html/reference/pip_download.rst -docs/html/reference/pip_freeze.rst -docs/html/reference/pip_hash.rst -docs/html/reference/pip_install.rst -docs/html/reference/pip_list.rst -docs/html/reference/pip_search.rst -docs/html/reference/pip_show.rst -docs/html/reference/pip_uninstall.rst -docs/html/reference/pip_wheel.rst -docs/man/index.rst -docs/man/commands/check.rst -docs/man/commands/config.rst -docs/man/commands/download.rst -docs/man/commands/freeze.rst -docs/man/commands/hash.rst -docs/man/commands/help.rst -docs/man/commands/install.rst -docs/man/commands/list.rst -docs/man/commands/search.rst -docs/man/commands/show.rst -docs/man/commands/uninstall.rst -docs/man/commands/wheel.rst -src/pip/__init__.py -src/pip/__main__.py -src/pip.egg-info/PKG-INFO -src/pip.egg-info/SOURCES.txt -src/pip.egg-info/dependency_links.txt -src/pip.egg-info/entry_points.txt -src/pip.egg-info/not-zip-safe -src/pip.egg-info/top_level.txt -src/pip/_internal/__init__.py -src/pip/_internal/build_env.py -src/pip/_internal/cache.py -src/pip/_internal/configuration.py -src/pip/_internal/download.py -src/pip/_internal/exceptions.py -src/pip/_internal/index.py -src/pip/_internal/locations.py -src/pip/_internal/pep425tags.py -src/pip/_internal/pyproject.py -src/pip/_internal/resolve.py -src/pip/_internal/wheel.py -src/pip/_internal/cli/__init__.py -src/pip/_internal/cli/autocompletion.py -src/pip/_internal/cli/base_command.py -src/pip/_internal/cli/cmdoptions.py -src/pip/_internal/cli/main_parser.py -src/pip/_internal/cli/parser.py -src/pip/_internal/cli/status_codes.py -src/pip/_internal/commands/__init__.py -src/pip/_internal/commands/check.py -src/pip/_internal/commands/completion.py -src/pip/_internal/commands/configuration.py -src/pip/_internal/commands/download.py -src/pip/_internal/commands/freeze.py -src/pip/_internal/commands/hash.py -src/pip/_internal/commands/help.py -src/pip/_internal/commands/install.py -src/pip/_internal/commands/list.py -src/pip/_internal/commands/search.py -src/pip/_internal/commands/show.py -src/pip/_internal/commands/uninstall.py -src/pip/_internal/commands/wheel.py -src/pip/_internal/models/__init__.py -src/pip/_internal/models/candidate.py -src/pip/_internal/models/format_control.py -src/pip/_internal/models/index.py -src/pip/_internal/models/link.py -src/pip/_internal/operations/__init__.py -src/pip/_internal/operations/check.py -src/pip/_internal/operations/freeze.py -src/pip/_internal/operations/prepare.py -src/pip/_internal/req/__init__.py -src/pip/_internal/req/constructors.py -src/pip/_internal/req/req_file.py -src/pip/_internal/req/req_install.py -src/pip/_internal/req/req_set.py -src/pip/_internal/req/req_tracker.py -src/pip/_internal/req/req_uninstall.py -src/pip/_internal/utils/__init__.py -src/pip/_internal/utils/appdirs.py -src/pip/_internal/utils/compat.py -src/pip/_internal/utils/deprecation.py -src/pip/_internal/utils/encoding.py -src/pip/_internal/utils/filesystem.py -src/pip/_internal/utils/glibc.py -src/pip/_internal/utils/hashes.py -src/pip/_internal/utils/logging.py -src/pip/_internal/utils/misc.py -src/pip/_internal/utils/models.py -src/pip/_internal/utils/outdated.py -src/pip/_internal/utils/packaging.py -src/pip/_internal/utils/setuptools_build.py -src/pip/_internal/utils/temp_dir.py -src/pip/_internal/utils/typing.py -src/pip/_internal/utils/ui.py -src/pip/_internal/vcs/__init__.py -src/pip/_internal/vcs/bazaar.py -src/pip/_internal/vcs/git.py -src/pip/_internal/vcs/mercurial.py -src/pip/_internal/vcs/subversion.py -src/pip/_vendor/README.rst -src/pip/_vendor/__init__.py -src/pip/_vendor/appdirs.LICENSE.txt -src/pip/_vendor/appdirs.py -src/pip/_vendor/distro.LICENSE -src/pip/_vendor/distro.py -src/pip/_vendor/ipaddress.LICENSE -src/pip/_vendor/ipaddress.py -src/pip/_vendor/pyparsing.LICENSE -src/pip/_vendor/pyparsing.py -src/pip/_vendor/retrying.LICENSE -src/pip/_vendor/retrying.py -src/pip/_vendor/six.LICENSE -src/pip/_vendor/six.py -src/pip/_vendor/vendor.txt -src/pip/_vendor/cachecontrol/LICENSE.txt -src/pip/_vendor/cachecontrol/__init__.py -src/pip/_vendor/cachecontrol/_cmd.py -src/pip/_vendor/cachecontrol/adapter.py -src/pip/_vendor/cachecontrol/cache.py -src/pip/_vendor/cachecontrol/compat.py -src/pip/_vendor/cachecontrol/controller.py -src/pip/_vendor/cachecontrol/filewrapper.py -src/pip/_vendor/cachecontrol/heuristics.py -src/pip/_vendor/cachecontrol/serialize.py -src/pip/_vendor/cachecontrol/wrapper.py -src/pip/_vendor/cachecontrol/caches/__init__.py -src/pip/_vendor/cachecontrol/caches/file_cache.py -src/pip/_vendor/cachecontrol/caches/redis_cache.py -src/pip/_vendor/certifi/LICENSE -src/pip/_vendor/certifi/__init__.py -src/pip/_vendor/certifi/__main__.py -src/pip/_vendor/certifi/cacert.pem -src/pip/_vendor/certifi/core.py -src/pip/_vendor/chardet/LICENSE -src/pip/_vendor/chardet/__init__.py -src/pip/_vendor/chardet/big5freq.py -src/pip/_vendor/chardet/big5prober.py -src/pip/_vendor/chardet/chardistribution.py -src/pip/_vendor/chardet/charsetgroupprober.py -src/pip/_vendor/chardet/charsetprober.py -src/pip/_vendor/chardet/codingstatemachine.py -src/pip/_vendor/chardet/compat.py -src/pip/_vendor/chardet/cp949prober.py -src/pip/_vendor/chardet/enums.py -src/pip/_vendor/chardet/escprober.py -src/pip/_vendor/chardet/escsm.py -src/pip/_vendor/chardet/eucjpprober.py -src/pip/_vendor/chardet/euckrfreq.py -src/pip/_vendor/chardet/euckrprober.py -src/pip/_vendor/chardet/euctwfreq.py -src/pip/_vendor/chardet/euctwprober.py -src/pip/_vendor/chardet/gb2312freq.py -src/pip/_vendor/chardet/gb2312prober.py -src/pip/_vendor/chardet/hebrewprober.py -src/pip/_vendor/chardet/jisfreq.py -src/pip/_vendor/chardet/jpcntx.py -src/pip/_vendor/chardet/langbulgarianmodel.py -src/pip/_vendor/chardet/langcyrillicmodel.py -src/pip/_vendor/chardet/langgreekmodel.py -src/pip/_vendor/chardet/langhebrewmodel.py -src/pip/_vendor/chardet/langhungarianmodel.py -src/pip/_vendor/chardet/langthaimodel.py -src/pip/_vendor/chardet/langturkishmodel.py -src/pip/_vendor/chardet/latin1prober.py -src/pip/_vendor/chardet/mbcharsetprober.py -src/pip/_vendor/chardet/mbcsgroupprober.py -src/pip/_vendor/chardet/mbcssm.py -src/pip/_vendor/chardet/sbcharsetprober.py -src/pip/_vendor/chardet/sbcsgroupprober.py -src/pip/_vendor/chardet/sjisprober.py -src/pip/_vendor/chardet/universaldetector.py -src/pip/_vendor/chardet/utf8prober.py -src/pip/_vendor/chardet/version.py -src/pip/_vendor/chardet/cli/__init__.py -src/pip/_vendor/chardet/cli/chardetect.py -src/pip/_vendor/colorama/LICENSE.txt -src/pip/_vendor/colorama/__init__.py -src/pip/_vendor/colorama/ansi.py -src/pip/_vendor/colorama/ansitowin32.py -src/pip/_vendor/colorama/initialise.py -src/pip/_vendor/colorama/win32.py -src/pip/_vendor/colorama/winterm.py -src/pip/_vendor/distlib/LICENSE.txt -src/pip/_vendor/distlib/__init__.py -src/pip/_vendor/distlib/compat.py -src/pip/_vendor/distlib/database.py -src/pip/_vendor/distlib/index.py -src/pip/_vendor/distlib/locators.py -src/pip/_vendor/distlib/manifest.py -src/pip/_vendor/distlib/markers.py -src/pip/_vendor/distlib/metadata.py -src/pip/_vendor/distlib/resources.py -src/pip/_vendor/distlib/scripts.py -src/pip/_vendor/distlib/t32.exe -src/pip/_vendor/distlib/t64.exe -src/pip/_vendor/distlib/util.py -src/pip/_vendor/distlib/version.py -src/pip/_vendor/distlib/w32.exe -src/pip/_vendor/distlib/w64.exe -src/pip/_vendor/distlib/wheel.py -src/pip/_vendor/distlib/_backport/__init__.py -src/pip/_vendor/distlib/_backport/misc.py -src/pip/_vendor/distlib/_backport/shutil.py -src/pip/_vendor/distlib/_backport/sysconfig.cfg -src/pip/_vendor/distlib/_backport/sysconfig.py -src/pip/_vendor/distlib/_backport/tarfile.py -src/pip/_vendor/html5lib/LICENSE -src/pip/_vendor/html5lib/__init__.py -src/pip/_vendor/html5lib/_ihatexml.py -src/pip/_vendor/html5lib/_inputstream.py -src/pip/_vendor/html5lib/_tokenizer.py -src/pip/_vendor/html5lib/_utils.py -src/pip/_vendor/html5lib/constants.py -src/pip/_vendor/html5lib/html5parser.py -src/pip/_vendor/html5lib/serializer.py -src/pip/_vendor/html5lib/_trie/__init__.py -src/pip/_vendor/html5lib/_trie/_base.py -src/pip/_vendor/html5lib/_trie/datrie.py -src/pip/_vendor/html5lib/_trie/py.py -src/pip/_vendor/html5lib/filters/__init__.py -src/pip/_vendor/html5lib/filters/alphabeticalattributes.py -src/pip/_vendor/html5lib/filters/base.py -src/pip/_vendor/html5lib/filters/inject_meta_charset.py -src/pip/_vendor/html5lib/filters/lint.py -src/pip/_vendor/html5lib/filters/optionaltags.py -src/pip/_vendor/html5lib/filters/sanitizer.py -src/pip/_vendor/html5lib/filters/whitespace.py -src/pip/_vendor/html5lib/treeadapters/__init__.py -src/pip/_vendor/html5lib/treeadapters/genshi.py -src/pip/_vendor/html5lib/treeadapters/sax.py -src/pip/_vendor/html5lib/treebuilders/__init__.py -src/pip/_vendor/html5lib/treebuilders/base.py -src/pip/_vendor/html5lib/treebuilders/dom.py -src/pip/_vendor/html5lib/treebuilders/etree.py -src/pip/_vendor/html5lib/treebuilders/etree_lxml.py -src/pip/_vendor/html5lib/treewalkers/__init__.py -src/pip/_vendor/html5lib/treewalkers/base.py -src/pip/_vendor/html5lib/treewalkers/dom.py -src/pip/_vendor/html5lib/treewalkers/etree.py -src/pip/_vendor/html5lib/treewalkers/etree_lxml.py -src/pip/_vendor/html5lib/treewalkers/genshi.py -src/pip/_vendor/idna/LICENSE.rst -src/pip/_vendor/idna/__init__.py -src/pip/_vendor/idna/codec.py -src/pip/_vendor/idna/compat.py -src/pip/_vendor/idna/core.py -src/pip/_vendor/idna/idnadata.py -src/pip/_vendor/idna/intranges.py -src/pip/_vendor/idna/package_data.py -src/pip/_vendor/idna/uts46data.py -src/pip/_vendor/lockfile/LICENSE -src/pip/_vendor/lockfile/__init__.py -src/pip/_vendor/lockfile/linklockfile.py -src/pip/_vendor/lockfile/mkdirlockfile.py -src/pip/_vendor/lockfile/pidlockfile.py -src/pip/_vendor/lockfile/sqlitelockfile.py -src/pip/_vendor/lockfile/symlinklockfile.py -src/pip/_vendor/msgpack/COPYING -src/pip/_vendor/msgpack/__init__.py -src/pip/_vendor/msgpack/_version.py -src/pip/_vendor/msgpack/exceptions.py -src/pip/_vendor/msgpack/fallback.py -src/pip/_vendor/packaging/LICENSE -src/pip/_vendor/packaging/LICENSE.APACHE -src/pip/_vendor/packaging/LICENSE.BSD -src/pip/_vendor/packaging/__about__.py -src/pip/_vendor/packaging/__init__.py -src/pip/_vendor/packaging/_compat.py -src/pip/_vendor/packaging/_structures.py -src/pip/_vendor/packaging/markers.py -src/pip/_vendor/packaging/requirements.py -src/pip/_vendor/packaging/specifiers.py -src/pip/_vendor/packaging/utils.py -src/pip/_vendor/packaging/version.py -src/pip/_vendor/pep517/LICENSE -src/pip/_vendor/pep517/__init__.py -src/pip/_vendor/pep517/_in_process.py -src/pip/_vendor/pep517/build.py -src/pip/_vendor/pep517/check.py -src/pip/_vendor/pep517/colorlog.py -src/pip/_vendor/pep517/compat.py -src/pip/_vendor/pep517/envbuild.py -src/pip/_vendor/pep517/wrappers.py -src/pip/_vendor/pkg_resources/LICENSE -src/pip/_vendor/pkg_resources/__init__.py -src/pip/_vendor/pkg_resources/py31compat.py -src/pip/_vendor/progress/LICENSE -src/pip/_vendor/progress/__init__.py -src/pip/_vendor/progress/bar.py -src/pip/_vendor/progress/counter.py -src/pip/_vendor/progress/helpers.py -src/pip/_vendor/progress/spinner.py -src/pip/_vendor/pytoml/LICENSE -src/pip/_vendor/pytoml/__init__.py -src/pip/_vendor/pytoml/core.py -src/pip/_vendor/pytoml/parser.py -src/pip/_vendor/pytoml/test.py -src/pip/_vendor/pytoml/utils.py -src/pip/_vendor/pytoml/writer.py -src/pip/_vendor/requests/LICENSE -src/pip/_vendor/requests/__init__.py -src/pip/_vendor/requests/__version__.py -src/pip/_vendor/requests/_internal_utils.py -src/pip/_vendor/requests/adapters.py -src/pip/_vendor/requests/api.py -src/pip/_vendor/requests/auth.py -src/pip/_vendor/requests/certs.py -src/pip/_vendor/requests/compat.py -src/pip/_vendor/requests/cookies.py -src/pip/_vendor/requests/exceptions.py -src/pip/_vendor/requests/help.py -src/pip/_vendor/requests/hooks.py -src/pip/_vendor/requests/models.py -src/pip/_vendor/requests/packages.py -src/pip/_vendor/requests/sessions.py -src/pip/_vendor/requests/status_codes.py -src/pip/_vendor/requests/structures.py -src/pip/_vendor/requests/utils.py -src/pip/_vendor/urllib3/LICENSE.txt -src/pip/_vendor/urllib3/__init__.py -src/pip/_vendor/urllib3/_collections.py -src/pip/_vendor/urllib3/connection.py -src/pip/_vendor/urllib3/connectionpool.py -src/pip/_vendor/urllib3/exceptions.py -src/pip/_vendor/urllib3/fields.py -src/pip/_vendor/urllib3/filepost.py -src/pip/_vendor/urllib3/poolmanager.py -src/pip/_vendor/urllib3/request.py -src/pip/_vendor/urllib3/response.py -src/pip/_vendor/urllib3/contrib/__init__.py -src/pip/_vendor/urllib3/contrib/_appengine_environ.py -src/pip/_vendor/urllib3/contrib/appengine.py -src/pip/_vendor/urllib3/contrib/ntlmpool.py -src/pip/_vendor/urllib3/contrib/pyopenssl.py -src/pip/_vendor/urllib3/contrib/securetransport.py -src/pip/_vendor/urllib3/contrib/socks.py -src/pip/_vendor/urllib3/contrib/_securetransport/__init__.py -src/pip/_vendor/urllib3/contrib/_securetransport/bindings.py -src/pip/_vendor/urllib3/contrib/_securetransport/low_level.py -src/pip/_vendor/urllib3/packages/__init__.py -src/pip/_vendor/urllib3/packages/six.py -src/pip/_vendor/urllib3/packages/backports/__init__.py -src/pip/_vendor/urllib3/packages/backports/makefile.py -src/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py -src/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py -src/pip/_vendor/urllib3/util/__init__.py -src/pip/_vendor/urllib3/util/connection.py -src/pip/_vendor/urllib3/util/queue.py -src/pip/_vendor/urllib3/util/request.py -src/pip/_vendor/urllib3/util/response.py -src/pip/_vendor/urllib3/util/retry.py -src/pip/_vendor/urllib3/util/ssl_.py -src/pip/_vendor/urllib3/util/timeout.py -src/pip/_vendor/urllib3/util/url.py -src/pip/_vendor/urllib3/util/wait.py -src/pip/_vendor/webencodings/LICENSE -src/pip/_vendor/webencodings/__init__.py -src/pip/_vendor/webencodings/labels.py -src/pip/_vendor/webencodings/mklabels.py -src/pip/_vendor/webencodings/tests.py -src/pip/_vendor/webencodings/x_user_defined.py \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/dependency_links.txt b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/entry_points.txt b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/entry_points.txt deleted file mode 100644 index f5809cb..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/entry_points.txt +++ /dev/null @@ -1,5 +0,0 @@ -[console_scripts] -pip = pip._internal:main -pip3 = pip._internal:main -pip3.7 = pip._internal:main - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/not-zip-safe b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/not-zip-safe deleted file mode 100644 index 8b13789..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/top_level.txt b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/top_level.txt deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/EGG-INFO/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/__init__.py deleted file mode 100644 index f48c1ca..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "19.0.3" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/__main__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/__main__.py deleted file mode 100644 index 0c223f8..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/__main__.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import absolute_import - -import os -import sys - -# If we are running from a wheel, add the wheel to sys.path -# This allows the usage python pip-*.whl/pip install pip-*.whl -if __package__ == '': - # __file__ is pip-*.whl/pip/__main__.py - # first dirname call strips of '/__main__.py', second strips off '/pip' - # Resulting path is the name of the wheel itself - # Add that to sys.path so we can import pip - path = os.path.dirname(os.path.dirname(__file__)) - sys.path.insert(0, path) - -from pip._internal import main as _main # isort:skip # noqa - -if __name__ == '__main__': - sys.exit(_main()) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/__init__.py deleted file mode 100644 index 276124d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/__init__.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -from __future__ import absolute_import - -import locale -import logging -import os -import warnings - -import sys - -# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks, -# but if invoked (i.e. imported), it will issue a warning to stderr if socks -# isn't available. requests unconditionally imports urllib3's socks contrib -# module, triggering this warning. The warning breaks DEP-8 tests (because of -# the stderr output) and is just plain annoying in normal usage. I don't want -# to add socks as yet another dependency for pip, nor do I want to allow-stder -# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to -# be done before the import of pip.vcs. -from pip._vendor.urllib3.exceptions import DependencyWarning -warnings.filterwarnings("ignore", category=DependencyWarning) # noqa - -# We want to inject the use of SecureTransport as early as possible so that any -# references or sessions or what have you are ensured to have it, however we -# only want to do this in the case that we're running on macOS and the linked -# OpenSSL is too old to handle TLSv1.2 -try: - import ssl -except ImportError: - pass -else: - # Checks for OpenSSL 1.0.1 on MacOS - if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f: - try: - from pip._vendor.urllib3.contrib import securetransport - except (ImportError, OSError): - pass - else: - securetransport.inject_into_urllib3() - -from pip._internal.cli.autocompletion import autocomplete -from pip._internal.cli.main_parser import parse_command -from pip._internal.commands import commands_dict -from pip._internal.exceptions import PipError -from pip._internal.utils import deprecation -from pip._internal.vcs import git, mercurial, subversion, bazaar # noqa -from pip._vendor.urllib3.exceptions import InsecureRequestWarning - -logger = logging.getLogger(__name__) - -# Hide the InsecureRequestWarning from urllib3 -warnings.filterwarnings("ignore", category=InsecureRequestWarning) - - -def main(args=None): - if args is None: - args = sys.argv[1:] - - # Configure our deprecation warnings to be sent through loggers - deprecation.install_warning_logger() - - autocomplete() - - try: - cmd_name, cmd_args = parse_command(args) - except PipError as exc: - sys.stderr.write("ERROR: %s" % exc) - sys.stderr.write(os.linesep) - sys.exit(1) - - # Needed for locale.getpreferredencoding(False) to work - # in pip._internal.utils.encoding.auto_decode - try: - locale.setlocale(locale.LC_ALL, '') - except locale.Error as e: - # setlocale can apparently crash if locale are uninitialized - logger.debug("Ignoring error %s when setting locale", e) - command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args)) - return command.main(cmd_args) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/build_env.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/build_env.py deleted file mode 100644 index d744cc7..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/build_env.py +++ /dev/null @@ -1,215 +0,0 @@ -"""Build Environment used for isolation during sdist building -""" - -import logging -import os -import sys -import textwrap -from collections import OrderedDict -from distutils.sysconfig import get_python_lib -from sysconfig import get_paths - -from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet - -from pip import __file__ as pip_location -from pip._internal.utils.misc import call_subprocess -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.utils.ui import open_spinner - -if MYPY_CHECK_RUNNING: - from typing import Tuple, Set, Iterable, Optional, List # noqa: F401 - from pip._internal.index import PackageFinder # noqa: F401 - -logger = logging.getLogger(__name__) - - -class _Prefix: - - def __init__(self, path): - # type: (str) -> None - self.path = path - self.setup = False - self.bin_dir = get_paths( - 'nt' if os.name == 'nt' else 'posix_prefix', - vars={'base': path, 'platbase': path} - )['scripts'] - # Note: prefer distutils' sysconfig to get the - # library paths so PyPy is correctly supported. - purelib = get_python_lib(plat_specific=False, prefix=path) - platlib = get_python_lib(plat_specific=True, prefix=path) - if purelib == platlib: - self.lib_dirs = [purelib] - else: - self.lib_dirs = [purelib, platlib] - - -class BuildEnvironment(object): - """Creates and manages an isolated environment to install build deps - """ - - def __init__(self): - # type: () -> None - self._temp_dir = TempDirectory(kind="build-env") - self._temp_dir.create() - - self._prefixes = OrderedDict(( - (name, _Prefix(os.path.join(self._temp_dir.path, name))) - for name in ('normal', 'overlay') - )) - - self._bin_dirs = [] # type: List[str] - self._lib_dirs = [] # type: List[str] - for prefix in reversed(list(self._prefixes.values())): - self._bin_dirs.append(prefix.bin_dir) - self._lib_dirs.extend(prefix.lib_dirs) - - # Customize site to: - # - ensure .pth files are honored - # - prevent access to system site packages - system_sites = { - os.path.normcase(site) for site in ( - get_python_lib(plat_specific=False), - get_python_lib(plat_specific=True), - ) - } - self._site_dir = os.path.join(self._temp_dir.path, 'site') - if not os.path.exists(self._site_dir): - os.mkdir(self._site_dir) - with open(os.path.join(self._site_dir, 'sitecustomize.py'), 'w') as fp: - fp.write(textwrap.dedent( - ''' - import os, site, sys - - # First, drop system-sites related paths. - original_sys_path = sys.path[:] - known_paths = set() - for path in {system_sites!r}: - site.addsitedir(path, known_paths=known_paths) - system_paths = set( - os.path.normcase(path) - for path in sys.path[len(original_sys_path):] - ) - original_sys_path = [ - path for path in original_sys_path - if os.path.normcase(path) not in system_paths - ] - sys.path = original_sys_path - - # Second, add lib directories. - # ensuring .pth file are processed. - for path in {lib_dirs!r}: - assert not path in sys.path - site.addsitedir(path) - ''' - ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)) - - def __enter__(self): - self._save_env = { - name: os.environ.get(name, None) - for name in ('PATH', 'PYTHONNOUSERSITE', 'PYTHONPATH') - } - - path = self._bin_dirs[:] - old_path = self._save_env['PATH'] - if old_path: - path.extend(old_path.split(os.pathsep)) - - pythonpath = [self._site_dir] - - os.environ.update({ - 'PATH': os.pathsep.join(path), - 'PYTHONNOUSERSITE': '1', - 'PYTHONPATH': os.pathsep.join(pythonpath), - }) - - def __exit__(self, exc_type, exc_val, exc_tb): - for varname, old_value in self._save_env.items(): - if old_value is None: - os.environ.pop(varname, None) - else: - os.environ[varname] = old_value - - def cleanup(self): - # type: () -> None - self._temp_dir.cleanup() - - def check_requirements(self, reqs): - # type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]] - """Return 2 sets: - - conflicting requirements: set of (installed, wanted) reqs tuples - - missing requirements: set of reqs - """ - missing = set() - conflicting = set() - if reqs: - ws = WorkingSet(self._lib_dirs) - for req in reqs: - try: - if ws.find(Requirement.parse(req)) is None: - missing.add(req) - except VersionConflict as e: - conflicting.add((str(e.args[0].as_requirement()), - str(e.args[1]))) - return conflicting, missing - - def install_requirements( - self, - finder, # type: PackageFinder - requirements, # type: Iterable[str] - prefix_as_string, # type: str - message # type: Optional[str] - ): - # type: (...) -> None - prefix = self._prefixes[prefix_as_string] - assert not prefix.setup - prefix.setup = True - if not requirements: - return - args = [ - sys.executable, os.path.dirname(pip_location), 'install', - '--ignore-installed', '--no-user', '--prefix', prefix.path, - '--no-warn-script-location', - ] # type: List[str] - if logger.getEffectiveLevel() <= logging.DEBUG: - args.append('-v') - for format_control in ('no_binary', 'only_binary'): - formats = getattr(finder.format_control, format_control) - args.extend(('--' + format_control.replace('_', '-'), - ','.join(sorted(formats or {':none:'})))) - if finder.index_urls: - args.extend(['-i', finder.index_urls[0]]) - for extra_index in finder.index_urls[1:]: - args.extend(['--extra-index-url', extra_index]) - else: - args.append('--no-index') - for link in finder.find_links: - args.extend(['--find-links', link]) - for _, host, _ in finder.secure_origins: - args.extend(['--trusted-host', host]) - if finder.allow_all_prereleases: - args.append('--pre') - args.append('--') - args.extend(requirements) - with open_spinner(message) as spinner: - call_subprocess(args, show_stdout=False, spinner=spinner) - - -class NoOpBuildEnvironment(BuildEnvironment): - """A no-op drop-in replacement for BuildEnvironment - """ - - def __init__(self): - pass - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - def cleanup(self): - pass - - def install_requirements(self, finder, requirements, prefix, message): - raise NotImplementedError() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cache.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cache.py deleted file mode 100644 index eb295c4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cache.py +++ /dev/null @@ -1,224 +0,0 @@ -"""Cache Management -""" - -import errno -import hashlib -import logging -import os - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.download import path_to_url -from pip._internal.models.link import Link -from pip._internal.utils.compat import expanduser -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.wheel import InvalidWheelFilename, Wheel - -if MYPY_CHECK_RUNNING: - from typing import Optional, Set, List, Any # noqa: F401 - from pip._internal.index import FormatControl # noqa: F401 - -logger = logging.getLogger(__name__) - - -class Cache(object): - """An abstract class - provides cache directories for data from links - - - :param cache_dir: The root of the cache. - :param format_control: An object of FormatControl class to limit - binaries being read from the cache. - :param allowed_formats: which formats of files the cache should store. - ('binary' and 'source' are the only allowed values) - """ - - def __init__(self, cache_dir, format_control, allowed_formats): - # type: (str, FormatControl, Set[str]) -> None - super(Cache, self).__init__() - self.cache_dir = expanduser(cache_dir) if cache_dir else None - self.format_control = format_control - self.allowed_formats = allowed_formats - - _valid_formats = {"source", "binary"} - assert self.allowed_formats.union(_valid_formats) == _valid_formats - - def _get_cache_path_parts(self, link): - # type: (Link) -> List[str] - """Get parts of part that must be os.path.joined with cache_dir - """ - - # We want to generate an url to use as our cache key, we don't want to - # just re-use the URL because it might have other items in the fragment - # and we don't care about those. - key_parts = [link.url_without_fragment] - if link.hash_name is not None and link.hash is not None: - key_parts.append("=".join([link.hash_name, link.hash])) - key_url = "#".join(key_parts) - - # Encode our key url with sha224, we'll use this because it has similar - # security properties to sha256, but with a shorter total output (and - # thus less secure). However the differences don't make a lot of - # difference for our use case here. - hashed = hashlib.sha224(key_url.encode()).hexdigest() - - # We want to nest the directories some to prevent having a ton of top - # level directories where we might run out of sub directories on some - # FS. - parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] - - return parts - - def _get_candidates(self, link, package_name): - # type: (Link, Optional[str]) -> List[Any] - can_not_cache = ( - not self.cache_dir or - not package_name or - not link - ) - if can_not_cache: - return [] - - canonical_name = canonicalize_name(package_name) - formats = self.format_control.get_allowed_formats( - canonical_name - ) - if not self.allowed_formats.intersection(formats): - return [] - - root = self.get_path_for_link(link) - try: - return os.listdir(root) - except OSError as err: - if err.errno in {errno.ENOENT, errno.ENOTDIR}: - return [] - raise - - def get_path_for_link(self, link): - # type: (Link) -> str - """Return a directory to store cached items in for link. - """ - raise NotImplementedError() - - def get(self, link, package_name): - # type: (Link, Optional[str]) -> Link - """Returns a link to a cached item if it exists, otherwise returns the - passed link. - """ - raise NotImplementedError() - - def _link_for_candidate(self, link, candidate): - # type: (Link, str) -> Link - root = self.get_path_for_link(link) - path = os.path.join(root, candidate) - - return Link(path_to_url(path)) - - def cleanup(self): - # type: () -> None - pass - - -class SimpleWheelCache(Cache): - """A cache of wheels for future installs. - """ - - def __init__(self, cache_dir, format_control): - # type: (str, FormatControl) -> None - super(SimpleWheelCache, self).__init__( - cache_dir, format_control, {"binary"} - ) - - def get_path_for_link(self, link): - # type: (Link) -> str - """Return a directory to store cached wheels for link - - Because there are M wheels for any one sdist, we provide a directory - to cache them in, and then consult that directory when looking up - cache hits. - - We only insert things into the cache if they have plausible version - numbers, so that we don't contaminate the cache with things that were - not unique. E.g. ./package might have dozens of installs done for it - and build a version of 0.0...and if we built and cached a wheel, we'd - end up using the same wheel even if the source has been edited. - - :param link: The link of the sdist for which this will cache wheels. - """ - parts = self._get_cache_path_parts(link) - - # Store wheels within the root cache_dir - return os.path.join(self.cache_dir, "wheels", *parts) - - def get(self, link, package_name): - # type: (Link, Optional[str]) -> Link - candidates = [] - - for wheel_name in self._get_candidates(link, package_name): - try: - wheel = Wheel(wheel_name) - except InvalidWheelFilename: - continue - if not wheel.supported(): - # Built for a different python/arch/etc - continue - candidates.append((wheel.support_index_min(), wheel_name)) - - if not candidates: - return link - - return self._link_for_candidate(link, min(candidates)[1]) - - -class EphemWheelCache(SimpleWheelCache): - """A SimpleWheelCache that creates it's own temporary cache directory - """ - - def __init__(self, format_control): - # type: (FormatControl) -> None - self._temp_dir = TempDirectory(kind="ephem-wheel-cache") - self._temp_dir.create() - - super(EphemWheelCache, self).__init__( - self._temp_dir.path, format_control - ) - - def cleanup(self): - # type: () -> None - self._temp_dir.cleanup() - - -class WheelCache(Cache): - """Wraps EphemWheelCache and SimpleWheelCache into a single Cache - - This Cache allows for gracefully degradation, using the ephem wheel cache - when a certain link is not found in the simple wheel cache first. - """ - - def __init__(self, cache_dir, format_control): - # type: (str, FormatControl) -> None - super(WheelCache, self).__init__( - cache_dir, format_control, {'binary'} - ) - self._wheel_cache = SimpleWheelCache(cache_dir, format_control) - self._ephem_cache = EphemWheelCache(format_control) - - def get_path_for_link(self, link): - # type: (Link) -> str - return self._wheel_cache.get_path_for_link(link) - - def get_ephem_path_for_link(self, link): - # type: (Link) -> str - return self._ephem_cache.get_path_for_link(link) - - def get(self, link, package_name): - # type: (Link, Optional[str]) -> Link - retval = self._wheel_cache.get(link, package_name) - if retval is link: - retval = self._ephem_cache.get(link, package_name) - return retval - - def cleanup(self): - # type: () -> None - self._wheel_cache.cleanup() - self._ephem_cache.cleanup() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/__init__.py deleted file mode 100644 index e589bb9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Subpackage containing all of pip's command line interface related code -""" - -# This file intentionally does not import submodules diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/autocompletion.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/autocompletion.py deleted file mode 100644 index 0a04199..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/autocompletion.py +++ /dev/null @@ -1,152 +0,0 @@ -"""Logic that powers autocompletion installed by ``pip completion``. -""" - -import optparse -import os -import sys - -from pip._internal.cli.main_parser import create_main_parser -from pip._internal.commands import commands_dict, get_summaries -from pip._internal.utils.misc import get_installed_distributions - - -def autocomplete(): - """Entry Point for completion of main and subcommand options. - """ - # Don't complete if user hasn't sourced bash_completion file. - if 'PIP_AUTO_COMPLETE' not in os.environ: - return - cwords = os.environ['COMP_WORDS'].split()[1:] - cword = int(os.environ['COMP_CWORD']) - try: - current = cwords[cword - 1] - except IndexError: - current = '' - - subcommands = [cmd for cmd, summary in get_summaries()] - options = [] - # subcommand - try: - subcommand_name = [w for w in cwords if w in subcommands][0] - except IndexError: - subcommand_name = None - - parser = create_main_parser() - # subcommand options - if subcommand_name: - # special case: 'help' subcommand has no options - if subcommand_name == 'help': - sys.exit(1) - # special case: list locally installed dists for show and uninstall - should_list_installed = ( - subcommand_name in ['show', 'uninstall'] and - not current.startswith('-') - ) - if should_list_installed: - installed = [] - lc = current.lower() - for dist in get_installed_distributions(local_only=True): - if dist.key.startswith(lc) and dist.key not in cwords[1:]: - installed.append(dist.key) - # if there are no dists installed, fall back to option completion - if installed: - for dist in installed: - print(dist) - sys.exit(1) - - subcommand = commands_dict[subcommand_name]() - - for opt in subcommand.parser.option_list_all: - if opt.help != optparse.SUPPRESS_HELP: - for opt_str in opt._long_opts + opt._short_opts: - options.append((opt_str, opt.nargs)) - - # filter out previously specified options from available options - prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] - options = [(x, v) for (x, v) in options if x not in prev_opts] - # filter options by current input - options = [(k, v) for k, v in options if k.startswith(current)] - # get completion type given cwords and available subcommand options - completion_type = get_path_completion_type( - cwords, cword, subcommand.parser.option_list_all, - ) - # get completion files and directories if ``completion_type`` is - # ````, ```` or ```` - if completion_type: - options = auto_complete_paths(current, completion_type) - options = ((opt, 0) for opt in options) - for option in options: - opt_label = option[0] - # append '=' to options which require args - if option[1] and option[0][:2] == "--": - opt_label += '=' - print(opt_label) - else: - # show main parser options only when necessary - - opts = [i.option_list for i in parser.option_groups] - opts.append(parser.option_list) - opts = (o for it in opts for o in it) - if current.startswith('-'): - for opt in opts: - if opt.help != optparse.SUPPRESS_HELP: - subcommands += opt._long_opts + opt._short_opts - else: - # get completion type given cwords and all available options - completion_type = get_path_completion_type(cwords, cword, opts) - if completion_type: - subcommands = auto_complete_paths(current, completion_type) - - print(' '.join([x for x in subcommands if x.startswith(current)])) - sys.exit(1) - - -def get_path_completion_type(cwords, cword, opts): - """Get the type of path completion (``file``, ``dir``, ``path`` or None) - - :param cwords: same as the environmental variable ``COMP_WORDS`` - :param cword: same as the environmental variable ``COMP_CWORD`` - :param opts: The available options to check - :return: path completion type (``file``, ``dir``, ``path`` or None) - """ - if cword < 2 or not cwords[cword - 2].startswith('-'): - return - for opt in opts: - if opt.help == optparse.SUPPRESS_HELP: - continue - for o in str(opt).split('/'): - if cwords[cword - 2].split('=')[0] == o: - if not opt.metavar or any( - x in ('path', 'file', 'dir') - for x in opt.metavar.split('/')): - return opt.metavar - - -def auto_complete_paths(current, completion_type): - """If ``completion_type`` is ``file`` or ``path``, list all regular files - and directories starting with ``current``; otherwise only list directories - starting with ``current``. - - :param current: The word to be completed - :param completion_type: path completion type(`file`, `path` or `dir`)i - :return: A generator of regular files and/or directories - """ - directory, filename = os.path.split(current) - current_path = os.path.abspath(directory) - # Don't complete paths if they can't be accessed - if not os.access(current_path, os.R_OK): - return - filename = os.path.normcase(filename) - # list all files that start with ``filename`` - file_list = (x for x in os.listdir(current_path) - if os.path.normcase(x).startswith(filename)) - for f in file_list: - opt = os.path.join(current_path, f) - comp_file = os.path.normcase(os.path.join(directory, f)) - # complete regular files when there is not ```` after option - # complete directories when there is ````, ```` or - # ````after option - if completion_type != 'dir' and os.path.isfile(opt): - yield comp_file - elif os.path.isdir(opt): - yield os.path.join(comp_file, '') diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/base_command.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/base_command.py deleted file mode 100644 index 3ceea49..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/base_command.py +++ /dev/null @@ -1,341 +0,0 @@ -"""Base Command class, and related routines""" -from __future__ import absolute_import, print_function - -import logging -import logging.config -import optparse -import os -import platform -import sys -import traceback - -from pip._internal.cli import cmdoptions -from pip._internal.cli.parser import ( - ConfigOptionParser, UpdatingDefaultsHelpFormatter, -) -from pip._internal.cli.status_codes import ( - ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR, - VIRTUALENV_NOT_FOUND, -) -from pip._internal.download import PipSession -from pip._internal.exceptions import ( - BadCommand, CommandError, InstallationError, PreviousBuildDirError, - UninstallationError, -) -from pip._internal.index import PackageFinder -from pip._internal.locations import running_under_virtualenv -from pip._internal.req.constructors import ( - install_req_from_editable, install_req_from_line, -) -from pip._internal.req.req_file import parse_requirements -from pip._internal.utils.deprecation import deprecated -from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging -from pip._internal.utils.misc import ( - get_prog, normalize_path, redact_password_from_url, -) -from pip._internal.utils.outdated import pip_version_check -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Optional, List, Tuple, Any # noqa: F401 - from optparse import Values # noqa: F401 - from pip._internal.cache import WheelCache # noqa: F401 - from pip._internal.req.req_set import RequirementSet # noqa: F401 - -__all__ = ['Command'] - -logger = logging.getLogger(__name__) - - -class Command(object): - name = None # type: Optional[str] - usage = None # type: Optional[str] - hidden = False # type: bool - ignore_require_venv = False # type: bool - - def __init__(self, isolated=False): - # type: (bool) -> None - parser_kw = { - 'usage': self.usage, - 'prog': '%s %s' % (get_prog(), self.name), - 'formatter': UpdatingDefaultsHelpFormatter(), - 'add_help_option': False, - 'name': self.name, - 'description': self.__doc__, - 'isolated': isolated, - } - - self.parser = ConfigOptionParser(**parser_kw) - - # Commands should add options to this option group - optgroup_name = '%s Options' % self.name.capitalize() - self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) - - # Add the general options - gen_opts = cmdoptions.make_option_group( - cmdoptions.general_group, - self.parser, - ) - self.parser.add_option_group(gen_opts) - - def run(self, options, args): - # type: (Values, List[Any]) -> Any - raise NotImplementedError - - def _build_session(self, options, retries=None, timeout=None): - # type: (Values, Optional[int], Optional[int]) -> PipSession - session = PipSession( - cache=( - normalize_path(os.path.join(options.cache_dir, "http")) - if options.cache_dir else None - ), - retries=retries if retries is not None else options.retries, - insecure_hosts=options.trusted_hosts, - ) - - # Handle custom ca-bundles from the user - if options.cert: - session.verify = options.cert - - # Handle SSL client certificate - if options.client_cert: - session.cert = options.client_cert - - # Handle timeouts - if options.timeout or timeout: - session.timeout = ( - timeout if timeout is not None else options.timeout - ) - - # Handle configured proxies - if options.proxy: - session.proxies = { - "http": options.proxy, - "https": options.proxy, - } - - # Determine if we can prompt the user for authentication or not - session.auth.prompting = not options.no_input - - return session - - def parse_args(self, args): - # type: (List[str]) -> Tuple - # factored out for testability - return self.parser.parse_args(args) - - def main(self, args): - # type: (List[str]) -> int - options, args = self.parse_args(args) - - # Set verbosity so that it can be used elsewhere. - self.verbosity = options.verbose - options.quiet - - level_number = setup_logging( - verbosity=self.verbosity, - no_color=options.no_color, - user_log_file=options.log, - ) - - if sys.version_info[:2] == (3, 4): - deprecated( - "Python 3.4 support has been deprecated. pip 19.1 will be the " - "last one supporting it. Please upgrade your Python as Python " - "3.4 won't be maintained after March 2019 (cf PEP 429).", - replacement=None, - gone_in='19.2', - ) - elif sys.version_info[:2] == (2, 7): - message = ( - "A future version of pip will drop support for Python 2.7." - ) - if platform.python_implementation() == "CPython": - message = ( - "Python 2.7 will reach the end of its life on January " - "1st, 2020. Please upgrade your Python as Python 2.7 " - "won't be maintained after that date. " - ) + message - deprecated(message, replacement=None, gone_in=None) - - # TODO: Try to get these passing down from the command? - # without resorting to os.environ to hold these. - # This also affects isolated builds and it should. - - if options.no_input: - os.environ['PIP_NO_INPUT'] = '1' - - if options.exists_action: - os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action) - - if options.require_venv and not self.ignore_require_venv: - # If a venv is required check if it can really be found - if not running_under_virtualenv(): - logger.critical( - 'Could not find an activated virtualenv (required).' - ) - sys.exit(VIRTUALENV_NOT_FOUND) - - try: - status = self.run(options, args) - # FIXME: all commands should return an exit status - # and when it is done, isinstance is not needed anymore - if isinstance(status, int): - return status - except PreviousBuildDirError as exc: - logger.critical(str(exc)) - logger.debug('Exception information:', exc_info=True) - - return PREVIOUS_BUILD_DIR_ERROR - except (InstallationError, UninstallationError, BadCommand) as exc: - logger.critical(str(exc)) - logger.debug('Exception information:', exc_info=True) - - return ERROR - except CommandError as exc: - logger.critical('ERROR: %s', exc) - logger.debug('Exception information:', exc_info=True) - - return ERROR - except BrokenStdoutLoggingError: - # Bypass our logger and write any remaining messages to stderr - # because stdout no longer works. - print('ERROR: Pipe to stdout was broken', file=sys.stderr) - if level_number <= logging.DEBUG: - traceback.print_exc(file=sys.stderr) - - return ERROR - except KeyboardInterrupt: - logger.critical('Operation cancelled by user') - logger.debug('Exception information:', exc_info=True) - - return ERROR - except BaseException: - logger.critical('Exception:', exc_info=True) - - return UNKNOWN_ERROR - finally: - allow_version_check = ( - # Does this command have the index_group options? - hasattr(options, "no_index") and - # Is this command allowed to perform this check? - not (options.disable_pip_version_check or options.no_index) - ) - # Check if we're using the latest version of pip available - if allow_version_check: - session = self._build_session( - options, - retries=0, - timeout=min(5, options.timeout) - ) - with session: - pip_version_check(session, options) - - # Shutdown the logging module - logging.shutdown() - - return SUCCESS - - -class RequirementCommand(Command): - - @staticmethod - def populate_requirement_set(requirement_set, # type: RequirementSet - args, # type: List[str] - options, # type: Values - finder, # type: PackageFinder - session, # type: PipSession - name, # type: str - wheel_cache # type: Optional[WheelCache] - ): - # type: (...) -> None - """ - Marshal cmd line args into a requirement set. - """ - # NOTE: As a side-effect, options.require_hashes and - # requirement_set.require_hashes may be updated - - for filename in options.constraints: - for req_to_add in parse_requirements( - filename, - constraint=True, finder=finder, options=options, - session=session, wheel_cache=wheel_cache): - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - - for req in args: - req_to_add = install_req_from_line( - req, None, isolated=options.isolated_mode, - use_pep517=options.use_pep517, - wheel_cache=wheel_cache - ) - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - - for req in options.editables: - req_to_add = install_req_from_editable( - req, - isolated=options.isolated_mode, - use_pep517=options.use_pep517, - wheel_cache=wheel_cache - ) - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - - for filename in options.requirements: - for req_to_add in parse_requirements( - filename, - finder=finder, options=options, session=session, - wheel_cache=wheel_cache, - use_pep517=options.use_pep517): - req_to_add.is_direct = True - requirement_set.add_requirement(req_to_add) - # If --require-hashes was a line in a requirements file, tell - # RequirementSet about it: - requirement_set.require_hashes = options.require_hashes - - if not (args or options.editables or options.requirements): - opts = {'name': name} - if options.find_links: - raise CommandError( - 'You must give at least one requirement to %(name)s ' - '(maybe you meant "pip %(name)s %(links)s"?)' % - dict(opts, links=' '.join(options.find_links))) - else: - raise CommandError( - 'You must give at least one requirement to %(name)s ' - '(see "pip help %(name)s")' % opts) - - def _build_package_finder( - self, - options, # type: Values - session, # type: PipSession - platform=None, # type: Optional[str] - python_versions=None, # type: Optional[List[str]] - abi=None, # type: Optional[str] - implementation=None # type: Optional[str] - ): - # type: (...) -> PackageFinder - """ - Create a package finder appropriate to this requirement command. - """ - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug( - 'Ignoring indexes: %s', - ','.join(redact_password_from_url(url) for url in index_urls), - ) - index_urls = [] - - return PackageFinder( - find_links=options.find_links, - format_control=options.format_control, - index_urls=index_urls, - trusted_hosts=options.trusted_hosts, - allow_all_prereleases=options.pre, - session=session, - platform=platform, - versions=python_versions, - abi=abi, - implementation=implementation, - prefer_binary=options.prefer_binary, - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/cmdoptions.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/cmdoptions.py deleted file mode 100644 index 5cf5ee9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/cmdoptions.py +++ /dev/null @@ -1,809 +0,0 @@ -""" -shared options and groups - -The principle here is to define options once, but *not* instantiate them -globally. One reason being that options with action='append' can carry state -between parses. pip parses general options twice internally, and shouldn't -pass on state. To be consistent, all options will follow this design. - -""" -from __future__ import absolute_import - -import textwrap -import warnings -from distutils.util import strtobool -from functools import partial -from optparse import SUPPRESS_HELP, Option, OptionGroup - -from pip._internal.exceptions import CommandError -from pip._internal.locations import USER_CACHE_DIR, src_prefix -from pip._internal.models.format_control import FormatControl -from pip._internal.models.index import PyPI -from pip._internal.utils.hashes import STRONG_HASHES -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.utils.ui import BAR_TYPES - -if MYPY_CHECK_RUNNING: - from typing import Any, Callable, Dict, List, Optional, Union # noqa: F401 - from optparse import OptionParser, Values # noqa: F401 - from pip._internal.cli.parser import ConfigOptionParser # noqa: F401 - - -def raise_option_error(parser, option, msg): - """ - Raise an option parsing error using parser.error(). - - Args: - parser: an OptionParser instance. - option: an Option instance. - msg: the error text. - """ - msg = '{} error: {}'.format(option, msg) - msg = textwrap.fill(' '.join(msg.split())) - parser.error(msg) - - -def make_option_group(group, parser): - # type: (Dict[str, Any], ConfigOptionParser) -> OptionGroup - """ - Return an OptionGroup object - group -- assumed to be dict with 'name' and 'options' keys - parser -- an optparse Parser - """ - option_group = OptionGroup(parser, group['name']) - for option in group['options']: - option_group.add_option(option()) - return option_group - - -def check_install_build_global(options, check_options=None): - # type: (Values, Optional[Values]) -> None - """Disable wheels if per-setup.py call options are set. - - :param options: The OptionParser options to update. - :param check_options: The options to check, if not supplied defaults to - options. - """ - if check_options is None: - check_options = options - - def getname(n): - return getattr(check_options, n, None) - names = ["build_options", "global_options", "install_options"] - if any(map(getname, names)): - control = options.format_control - control.disallow_binaries() - warnings.warn( - 'Disabling all use of wheels due to the use of --build-options ' - '/ --global-options / --install-options.', stacklevel=2, - ) - - -def check_dist_restriction(options, check_target=False): - # type: (Values, bool) -> None - """Function for determining if custom platform options are allowed. - - :param options: The OptionParser options. - :param check_target: Whether or not to check if --target is being used. - """ - dist_restriction_set = any([ - options.python_version, - options.platform, - options.abi, - options.implementation, - ]) - - binary_only = FormatControl(set(), {':all:'}) - sdist_dependencies_allowed = ( - options.format_control != binary_only and - not options.ignore_dependencies - ) - - # Installations or downloads using dist restrictions must not combine - # source distributions and dist-specific wheels, as they are not - # gauranteed to be locally compatible. - if dist_restriction_set and sdist_dependencies_allowed: - raise CommandError( - "When restricting platform and interpreter constraints using " - "--python-version, --platform, --abi, or --implementation, " - "either --no-deps must be set, or --only-binary=:all: must be " - "set and --no-binary must not be set (or must be set to " - ":none:)." - ) - - if check_target: - if dist_restriction_set and not options.target_dir: - raise CommandError( - "Can not use any platform or abi specific options unless " - "installing via '--target'" - ) - - -########### -# options # -########### - -help_ = partial( - Option, - '-h', '--help', - dest='help', - action='help', - help='Show help.', -) # type: Callable[..., Option] - -isolated_mode = partial( - Option, - "--isolated", - dest="isolated_mode", - action="store_true", - default=False, - help=( - "Run pip in an isolated mode, ignoring environment variables and user " - "configuration." - ), -) # type: Callable[..., Option] - -require_virtualenv = partial( - Option, - # Run only if inside a virtualenv, bail if not. - '--require-virtualenv', '--require-venv', - dest='require_venv', - action='store_true', - default=False, - help=SUPPRESS_HELP -) # type: Callable[..., Option] - -verbose = partial( - Option, - '-v', '--verbose', - dest='verbose', - action='count', - default=0, - help='Give more output. Option is additive, and can be used up to 3 times.' -) # type: Callable[..., Option] - -no_color = partial( - Option, - '--no-color', - dest='no_color', - action='store_true', - default=False, - help="Suppress colored output", -) # type: Callable[..., Option] - -version = partial( - Option, - '-V', '--version', - dest='version', - action='store_true', - help='Show version and exit.', -) # type: Callable[..., Option] - -quiet = partial( - Option, - '-q', '--quiet', - dest='quiet', - action='count', - default=0, - help=( - 'Give less output. Option is additive, and can be used up to 3' - ' times (corresponding to WARNING, ERROR, and CRITICAL logging' - ' levels).' - ), -) # type: Callable[..., Option] - -progress_bar = partial( - Option, - '--progress-bar', - dest='progress_bar', - type='choice', - choices=list(BAR_TYPES.keys()), - default='on', - help=( - 'Specify type of progress to be displayed [' + - '|'.join(BAR_TYPES.keys()) + '] (default: %default)' - ), -) # type: Callable[..., Option] - -log = partial( - Option, - "--log", "--log-file", "--local-log", - dest="log", - metavar="path", - help="Path to a verbose appending log." -) # type: Callable[..., Option] - -no_input = partial( - Option, - # Don't ask for input - '--no-input', - dest='no_input', - action='store_true', - default=False, - help=SUPPRESS_HELP -) # type: Callable[..., Option] - -proxy = partial( - Option, - '--proxy', - dest='proxy', - type='str', - default='', - help="Specify a proxy in the form [user:passwd@]proxy.server:port." -) # type: Callable[..., Option] - -retries = partial( - Option, - '--retries', - dest='retries', - type='int', - default=5, - help="Maximum number of retries each connection should attempt " - "(default %default times).", -) # type: Callable[..., Option] - -timeout = partial( - Option, - '--timeout', '--default-timeout', - metavar='sec', - dest='timeout', - type='float', - default=15, - help='Set the socket timeout (default %default seconds).', -) # type: Callable[..., Option] - -skip_requirements_regex = partial( - Option, - # A regex to be used to skip requirements - '--skip-requirements-regex', - dest='skip_requirements_regex', - type='str', - default='', - help=SUPPRESS_HELP, -) # type: Callable[..., Option] - - -def exists_action(): - # type: () -> Option - return Option( - # Option when path already exist - '--exists-action', - dest='exists_action', - type='choice', - choices=['s', 'i', 'w', 'b', 'a'], - default=[], - action='append', - metavar='action', - help="Default action when a path already exists: " - "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort).", - ) - - -cert = partial( - Option, - '--cert', - dest='cert', - type='str', - metavar='path', - help="Path to alternate CA bundle.", -) # type: Callable[..., Option] - -client_cert = partial( - Option, - '--client-cert', - dest='client_cert', - type='str', - default=None, - metavar='path', - help="Path to SSL client certificate, a single file containing the " - "private key and the certificate in PEM format.", -) # type: Callable[..., Option] - -index_url = partial( - Option, - '-i', '--index-url', '--pypi-url', - dest='index_url', - metavar='URL', - default=PyPI.simple_url, - help="Base URL of Python Package Index (default %default). " - "This should point to a repository compliant with PEP 503 " - "(the simple repository API) or a local directory laid out " - "in the same format.", -) # type: Callable[..., Option] - - -def extra_index_url(): - return Option( - '--extra-index-url', - dest='extra_index_urls', - metavar='URL', - action='append', - default=[], - help="Extra URLs of package indexes to use in addition to " - "--index-url. Should follow the same rules as " - "--index-url.", - ) - - -no_index = partial( - Option, - '--no-index', - dest='no_index', - action='store_true', - default=False, - help='Ignore package index (only looking at --find-links URLs instead).', -) # type: Callable[..., Option] - - -def find_links(): - # type: () -> Option - return Option( - '-f', '--find-links', - dest='find_links', - action='append', - default=[], - metavar='url', - help="If a url or path to an html file, then parse for links to " - "archives. If a local path or file:// url that's a directory, " - "then look for archives in the directory listing.", - ) - - -def trusted_host(): - # type: () -> Option - return Option( - "--trusted-host", - dest="trusted_hosts", - action="append", - metavar="HOSTNAME", - default=[], - help="Mark this host as trusted, even though it does not have valid " - "or any HTTPS.", - ) - - -def constraints(): - # type: () -> Option - return Option( - '-c', '--constraint', - dest='constraints', - action='append', - default=[], - metavar='file', - help='Constrain versions using the given constraints file. ' - 'This option can be used multiple times.' - ) - - -def requirements(): - # type: () -> Option - return Option( - '-r', '--requirement', - dest='requirements', - action='append', - default=[], - metavar='file', - help='Install from the given requirements file. ' - 'This option can be used multiple times.' - ) - - -def editable(): - # type: () -> Option - return Option( - '-e', '--editable', - dest='editables', - action='append', - default=[], - metavar='path/url', - help=('Install a project in editable mode (i.e. setuptools ' - '"develop mode") from a local project path or a VCS url.'), - ) - - -src = partial( - Option, - '--src', '--source', '--source-dir', '--source-directory', - dest='src_dir', - metavar='dir', - default=src_prefix, - help='Directory to check out editable projects into. ' - 'The default in a virtualenv is "/src". ' - 'The default for global installs is "/src".' -) # type: Callable[..., Option] - - -def _get_format_control(values, option): - # type: (Values, Option) -> Any - """Get a format_control object.""" - return getattr(values, option.dest) - - -def _handle_no_binary(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None - existing = _get_format_control(parser.values, option) - FormatControl.handle_mutual_excludes( - value, existing.no_binary, existing.only_binary, - ) - - -def _handle_only_binary(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None - existing = _get_format_control(parser.values, option) - FormatControl.handle_mutual_excludes( - value, existing.only_binary, existing.no_binary, - ) - - -def no_binary(): - # type: () -> Option - format_control = FormatControl(set(), set()) - return Option( - "--no-binary", dest="format_control", action="callback", - callback=_handle_no_binary, type="str", - default=format_control, - help="Do not use binary packages. Can be supplied multiple times, and " - "each time adds to the existing value. Accepts either :all: to " - "disable all binary packages, :none: to empty the set, or one or " - "more package names with commas between them. Note that some " - "packages are tricky to compile and may fail to install when " - "this option is used on them.", - ) - - -def only_binary(): - # type: () -> Option - format_control = FormatControl(set(), set()) - return Option( - "--only-binary", dest="format_control", action="callback", - callback=_handle_only_binary, type="str", - default=format_control, - help="Do not use source packages. Can be supplied multiple times, and " - "each time adds to the existing value. Accepts either :all: to " - "disable all source packages, :none: to empty the set, or one or " - "more package names with commas between them. Packages without " - "binary distributions will fail to install when this option is " - "used on them.", - ) - - -platform = partial( - Option, - '--platform', - dest='platform', - metavar='platform', - default=None, - help=("Only use wheels compatible with . " - "Defaults to the platform of the running system."), -) # type: Callable[..., Option] - - -python_version = partial( - Option, - '--python-version', - dest='python_version', - metavar='python_version', - default=None, - help=("Only use wheels compatible with Python " - "interpreter version . If not specified, then the " - "current system interpreter minor version is used. A major " - "version (e.g. '2') can be specified to match all " - "minor revs of that major version. A minor version " - "(e.g. '34') can also be specified."), -) # type: Callable[..., Option] - - -implementation = partial( - Option, - '--implementation', - dest='implementation', - metavar='implementation', - default=None, - help=("Only use wheels compatible with Python " - "implementation , e.g. 'pp', 'jy', 'cp', " - " or 'ip'. If not specified, then the current " - "interpreter implementation is used. Use 'py' to force " - "implementation-agnostic wheels."), -) # type: Callable[..., Option] - - -abi = partial( - Option, - '--abi', - dest='abi', - metavar='abi', - default=None, - help=("Only use wheels compatible with Python " - "abi , e.g. 'pypy_41'. If not specified, then the " - "current interpreter abi tag is used. Generally " - "you will need to specify --implementation, " - "--platform, and --python-version when using " - "this option."), -) # type: Callable[..., Option] - - -def prefer_binary(): - # type: () -> Option - return Option( - "--prefer-binary", - dest="prefer_binary", - action="store_true", - default=False, - help="Prefer older binary packages over newer source packages." - ) - - -cache_dir = partial( - Option, - "--cache-dir", - dest="cache_dir", - default=USER_CACHE_DIR, - metavar="dir", - help="Store the cache data in ." -) # type: Callable[..., Option] - - -def no_cache_dir_callback(option, opt, value, parser): - """ - Process a value provided for the --no-cache-dir option. - - This is an optparse.Option callback for the --no-cache-dir option. - """ - # The value argument will be None if --no-cache-dir is passed via the - # command-line, since the option doesn't accept arguments. However, - # the value can be non-None if the option is triggered e.g. by an - # environment variable, like PIP_NO_CACHE_DIR=true. - if value is not None: - # Then parse the string value to get argument error-checking. - try: - strtobool(value) - except ValueError as exc: - raise_option_error(parser, option=option, msg=str(exc)) - - # Originally, setting PIP_NO_CACHE_DIR to a value that strtobool() - # converted to 0 (like "false" or "no") caused cache_dir to be disabled - # rather than enabled (logic would say the latter). Thus, we disable - # the cache directory not just on values that parse to True, but (for - # backwards compatibility reasons) also on values that parse to False. - # In other words, always set it to False if the option is provided in - # some (valid) form. - parser.values.cache_dir = False - - -no_cache = partial( - Option, - "--no-cache-dir", - dest="cache_dir", - action="callback", - callback=no_cache_dir_callback, - help="Disable the cache.", -) # type: Callable[..., Option] - -no_deps = partial( - Option, - '--no-deps', '--no-dependencies', - dest='ignore_dependencies', - action='store_true', - default=False, - help="Don't install package dependencies.", -) # type: Callable[..., Option] - -build_dir = partial( - Option, - '-b', '--build', '--build-dir', '--build-directory', - dest='build_dir', - metavar='dir', - help='Directory to unpack packages into and build in. Note that ' - 'an initial build still takes place in a temporary directory. ' - 'The location of temporary directories can be controlled by setting ' - 'the TMPDIR environment variable (TEMP on Windows) appropriately. ' - 'When passed, build directories are not cleaned in case of failures.' -) # type: Callable[..., Option] - -ignore_requires_python = partial( - Option, - '--ignore-requires-python', - dest='ignore_requires_python', - action='store_true', - help='Ignore the Requires-Python information.' -) # type: Callable[..., Option] - -no_build_isolation = partial( - Option, - '--no-build-isolation', - dest='build_isolation', - action='store_false', - default=True, - help='Disable isolation when building a modern source distribution. ' - 'Build dependencies specified by PEP 518 must be already installed ' - 'if this option is used.' -) # type: Callable[..., Option] - - -def no_use_pep517_callback(option, opt, value, parser): - """ - Process a value provided for the --no-use-pep517 option. - - This is an optparse.Option callback for the no_use_pep517 option. - """ - # Since --no-use-pep517 doesn't accept arguments, the value argument - # will be None if --no-use-pep517 is passed via the command-line. - # However, the value can be non-None if the option is triggered e.g. - # by an environment variable, for example "PIP_NO_USE_PEP517=true". - if value is not None: - msg = """A value was passed for --no-use-pep517, - probably using either the PIP_NO_USE_PEP517 environment variable - or the "no-use-pep517" config file option. Use an appropriate value - of the PIP_USE_PEP517 environment variable or the "use-pep517" - config file option instead. - """ - raise_option_error(parser, option=option, msg=msg) - - # Otherwise, --no-use-pep517 was passed via the command-line. - parser.values.use_pep517 = False - - -use_pep517 = partial( - Option, - '--use-pep517', - dest='use_pep517', - action='store_true', - default=None, - help='Use PEP 517 for building source distributions ' - '(use --no-use-pep517 to force legacy behaviour).' -) # type: Any - -no_use_pep517 = partial( - Option, - '--no-use-pep517', - dest='use_pep517', - action='callback', - callback=no_use_pep517_callback, - default=None, - help=SUPPRESS_HELP -) # type: Any - -install_options = partial( - Option, - '--install-option', - dest='install_options', - action='append', - metavar='options', - help="Extra arguments to be supplied to the setup.py install " - "command (use like --install-option=\"--install-scripts=/usr/local/" - "bin\"). Use multiple --install-option options to pass multiple " - "options to setup.py install. If you are using an option with a " - "directory path, be sure to use absolute path.", -) # type: Callable[..., Option] - -global_options = partial( - Option, - '--global-option', - dest='global_options', - action='append', - metavar='options', - help="Extra global options to be supplied to the setup.py " - "call before the install command.", -) # type: Callable[..., Option] - -no_clean = partial( - Option, - '--no-clean', - action='store_true', - default=False, - help="Don't clean up build directories." -) # type: Callable[..., Option] - -pre = partial( - Option, - '--pre', - action='store_true', - default=False, - help="Include pre-release and development versions. By default, " - "pip only finds stable versions.", -) # type: Callable[..., Option] - -disable_pip_version_check = partial( - Option, - "--disable-pip-version-check", - dest="disable_pip_version_check", - action="store_true", - default=False, - help="Don't periodically check PyPI to determine whether a new version " - "of pip is available for download. Implied with --no-index.", -) # type: Callable[..., Option] - - -# Deprecated, Remove later -always_unzip = partial( - Option, - '-Z', '--always-unzip', - dest='always_unzip', - action='store_true', - help=SUPPRESS_HELP, -) # type: Callable[..., Option] - - -def _merge_hash(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None - """Given a value spelled "algo:digest", append the digest to a list - pointed to in a dict by the algo name.""" - if not parser.values.hashes: - parser.values.hashes = {} # type: ignore - try: - algo, digest = value.split(':', 1) - except ValueError: - parser.error('Arguments to %s must be a hash name ' - 'followed by a value, like --hash=sha256:abcde...' % - opt_str) - if algo not in STRONG_HASHES: - parser.error('Allowed hash algorithms for %s are %s.' % - (opt_str, ', '.join(STRONG_HASHES))) - parser.values.hashes.setdefault(algo, []).append(digest) - - -hash = partial( - Option, - '--hash', - # Hash values eventually end up in InstallRequirement.hashes due to - # __dict__ copying in process_line(). - dest='hashes', - action='callback', - callback=_merge_hash, - type='string', - help="Verify that the package's archive matches this " - 'hash before installing. Example: --hash=sha256:abcdef...', -) # type: Callable[..., Option] - - -require_hashes = partial( - Option, - '--require-hashes', - dest='require_hashes', - action='store_true', - default=False, - help='Require a hash to check each requirement against, for ' - 'repeatable installs. This option is implied when any package in a ' - 'requirements file has a --hash option.', -) # type: Callable[..., Option] - - -########## -# groups # -########## - -general_group = { - 'name': 'General Options', - 'options': [ - help_, - isolated_mode, - require_virtualenv, - verbose, - version, - quiet, - log, - no_input, - proxy, - retries, - timeout, - skip_requirements_regex, - exists_action, - trusted_host, - cert, - client_cert, - cache_dir, - no_cache, - disable_pip_version_check, - no_color, - ] -} # type: Dict[str, Any] - -index_group = { - 'name': 'Package Index Options', - 'options': [ - index_url, - extra_index_url, - no_index, - find_links, - ] -} # type: Dict[str, Any] diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/main_parser.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/main_parser.py deleted file mode 100644 index b17c749..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/main_parser.py +++ /dev/null @@ -1,104 +0,0 @@ -"""A single place for constructing and exposing the main parser -""" - -import os -import sys - -from pip import __version__ -from pip._internal.cli import cmdoptions -from pip._internal.cli.parser import ( - ConfigOptionParser, UpdatingDefaultsHelpFormatter, -) -from pip._internal.commands import ( - commands_dict, get_similar_commands, get_summaries, -) -from pip._internal.exceptions import CommandError -from pip._internal.utils.misc import get_prog -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Tuple, List # noqa: F401 - - -__all__ = ["create_main_parser", "parse_command"] - - -def create_main_parser(): - # type: () -> ConfigOptionParser - """Creates and returns the main parser for pip's CLI - """ - - parser_kw = { - 'usage': '\n%prog [options]', - 'add_help_option': False, - 'formatter': UpdatingDefaultsHelpFormatter(), - 'name': 'global', - 'prog': get_prog(), - } - - parser = ConfigOptionParser(**parser_kw) - parser.disable_interspersed_args() - - pip_pkg_dir = os.path.abspath(os.path.join( - os.path.dirname(__file__), "..", "..", - )) - parser.version = 'pip %s from %s (python %s)' % ( - __version__, pip_pkg_dir, sys.version[:3], - ) - - # add the general options - gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) - parser.add_option_group(gen_opts) - - # so the help formatter knows - parser.main = True # type: ignore - - # create command listing for description - command_summaries = get_summaries() - description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] - parser.description = '\n'.join(description) - - return parser - - -def parse_command(args): - # type: (List[str]) -> Tuple[str, List[str]] - parser = create_main_parser() - - # Note: parser calls disable_interspersed_args(), so the result of this - # call is to split the initial args into the general options before the - # subcommand and everything else. - # For example: - # args: ['--timeout=5', 'install', '--user', 'INITools'] - # general_options: ['--timeout==5'] - # args_else: ['install', '--user', 'INITools'] - general_options, args_else = parser.parse_args(args) - - # --version - if general_options.version: - sys.stdout.write(parser.version) # type: ignore - sys.stdout.write(os.linesep) - sys.exit() - - # pip || pip help -> print_help() - if not args_else or (args_else[0] == 'help' and len(args_else) == 1): - parser.print_help() - sys.exit() - - # the subcommand name - cmd_name = args_else[0] - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = ['unknown command "%s"' % cmd_name] - if guess: - msg.append('maybe you meant "%s"' % guess) - - raise CommandError(' - '.join(msg)) - - # all the args without the subcommand - cmd_args = args[:] - cmd_args.remove(cmd_name) - - return cmd_name, cmd_args diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/parser.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/parser.py deleted file mode 100644 index e1eaac4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/parser.py +++ /dev/null @@ -1,261 +0,0 @@ -"""Base option parser setup""" -from __future__ import absolute_import - -import logging -import optparse -import sys -import textwrap -from distutils.util import strtobool - -from pip._vendor.six import string_types - -from pip._internal.cli.status_codes import UNKNOWN_ERROR -from pip._internal.configuration import Configuration, ConfigurationError -from pip._internal.utils.compat import get_terminal_size - -logger = logging.getLogger(__name__) - - -class PrettyHelpFormatter(optparse.IndentedHelpFormatter): - """A prettier/less verbose help formatter for optparse.""" - - def __init__(self, *args, **kwargs): - # help position must be aligned with __init__.parseopts.description - kwargs['max_help_position'] = 30 - kwargs['indent_increment'] = 1 - kwargs['width'] = get_terminal_size()[0] - 2 - optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) - - def format_option_strings(self, option): - return self._format_option_strings(option, ' <%s>', ', ') - - def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '): - """ - Return a comma-separated list of option strings and metavars. - - :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') - :param mvarfmt: metavar format string - evaluated as mvarfmt % metavar - :param optsep: separator - """ - opts = [] - - if option._short_opts: - opts.append(option._short_opts[0]) - if option._long_opts: - opts.append(option._long_opts[0]) - if len(opts) > 1: - opts.insert(1, optsep) - - if option.takes_value(): - metavar = option.metavar or option.dest.lower() - opts.append(mvarfmt % metavar.lower()) - - return ''.join(opts) - - def format_heading(self, heading): - if heading == 'Options': - return '' - return heading + ':\n' - - def format_usage(self, usage): - """ - Ensure there is only one newline between usage and the first heading - if there is no description. - """ - msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ") - return msg - - def format_description(self, description): - # leave full control over description to us - if description: - if hasattr(self.parser, 'main'): - label = 'Commands' - else: - label = 'Description' - # some doc strings have initial newlines, some don't - description = description.lstrip('\n') - # some doc strings have final newlines and spaces, some don't - description = description.rstrip() - # dedent, then reindent - description = self.indent_lines(textwrap.dedent(description), " ") - description = '%s:\n%s\n' % (label, description) - return description - else: - return '' - - def format_epilog(self, epilog): - # leave full control over epilog to us - if epilog: - return epilog - else: - return '' - - def indent_lines(self, text, indent): - new_lines = [indent + line for line in text.split('\n')] - return "\n".join(new_lines) - - -class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): - """Custom help formatter for use in ConfigOptionParser. - - This is updates the defaults before expanding them, allowing - them to show up correctly in the help listing. - """ - - def expand_default(self, option): - if self.parser is not None: - self.parser._update_defaults(self.parser.defaults) - return optparse.IndentedHelpFormatter.expand_default(self, option) - - -class CustomOptionParser(optparse.OptionParser): - - def insert_option_group(self, idx, *args, **kwargs): - """Insert an OptionGroup at a given position.""" - group = self.add_option_group(*args, **kwargs) - - self.option_groups.pop() - self.option_groups.insert(idx, group) - - return group - - @property - def option_list_all(self): - """Get a list of all options, including those in option groups.""" - res = self.option_list[:] - for i in self.option_groups: - res.extend(i.option_list) - - return res - - -class ConfigOptionParser(CustomOptionParser): - """Custom option parser which updates its defaults by checking the - configuration files and environmental variables""" - - def __init__(self, *args, **kwargs): - self.name = kwargs.pop('name') - - isolated = kwargs.pop("isolated", False) - self.config = Configuration(isolated) - - assert self.name - optparse.OptionParser.__init__(self, *args, **kwargs) - - def check_default(self, option, key, val): - try: - return option.check_value(key, val) - except optparse.OptionValueError as exc: - print("An error occurred during configuration: %s" % exc) - sys.exit(3) - - def _get_ordered_configuration_items(self): - # Configuration gives keys in an unordered manner. Order them. - override_order = ["global", self.name, ":env:"] - - # Pool the options into different groups - section_items = {name: [] for name in override_order} - for section_key, val in self.config.items(): - # ignore empty values - if not val: - logger.debug( - "Ignoring configuration key '%s' as it's value is empty.", - section_key - ) - continue - - section, key = section_key.split(".", 1) - if section in override_order: - section_items[section].append((key, val)) - - # Yield each group in their override order - for section in override_order: - for key, val in section_items[section]: - yield key, val - - def _update_defaults(self, defaults): - """Updates the given defaults with values from the config files and - the environ. Does a little special handling for certain types of - options (lists).""" - - # Accumulate complex default state. - self.values = optparse.Values(self.defaults) - late_eval = set() - # Then set the options with those values - for key, val in self._get_ordered_configuration_items(): - # '--' because configuration supports only long names - option = self.get_option('--' + key) - - # Ignore options not present in this parser. E.g. non-globals put - # in [global] by users that want them to apply to all applicable - # commands. - if option is None: - continue - - if option.action in ('store_true', 'store_false', 'count'): - try: - val = strtobool(val) - except ValueError: - error_msg = invalid_config_error_message( - option.action, key, val - ) - self.error(error_msg) - - elif option.action == 'append': - val = val.split() - val = [self.check_default(option, key, v) for v in val] - elif option.action == 'callback': - late_eval.add(option.dest) - opt_str = option.get_opt_string() - val = option.convert_value(opt_str, val) - # From take_action - args = option.callback_args or () - kwargs = option.callback_kwargs or {} - option.callback(option, opt_str, val, self, *args, **kwargs) - else: - val = self.check_default(option, key, val) - - defaults[option.dest] = val - - for key in late_eval: - defaults[key] = getattr(self.values, key) - self.values = None - return defaults - - def get_default_values(self): - """Overriding to make updating the defaults after instantiation of - the option parser possible, _update_defaults() does the dirty work.""" - if not self.process_default_values: - # Old, pre-Optik 1.5 behaviour. - return optparse.Values(self.defaults) - - # Load the configuration, or error out in case of an error - try: - self.config.load() - except ConfigurationError as err: - self.exit(UNKNOWN_ERROR, str(err)) - - defaults = self._update_defaults(self.defaults.copy()) # ours - for option in self._get_all_options(): - default = defaults.get(option.dest) - if isinstance(default, string_types): - opt_str = option.get_opt_string() - defaults[option.dest] = option.check_value(opt_str, default) - return optparse.Values(defaults) - - def error(self, msg): - self.print_usage(sys.stderr) - self.exit(UNKNOWN_ERROR, "%s\n" % msg) - - -def invalid_config_error_message(action, key, val): - """Returns a better error message when invalid configuration option - is provided.""" - if action in ('store_true', 'store_false'): - return ("{0} is not a valid value for {1} option, " - "please specify a boolean value like yes/no, " - "true/false or 1/0 instead.").format(val, key) - - return ("{0} is not a valid value for {1} option, " - "please specify a numerical value like 1/0 " - "instead.").format(val, key) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/status_codes.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/status_codes.py deleted file mode 100644 index 275360a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cli/status_codes.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import - -SUCCESS = 0 -ERROR = 1 -UNKNOWN_ERROR = 2 -VIRTUALENV_NOT_FOUND = 3 -PREVIOUS_BUILD_DIR_ERROR = 4 -NO_MATCHES_FOUND = 23 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/__init__.py deleted file mode 100644 index c7d1da3..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/__init__.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Package containing all pip commands -""" -from __future__ import absolute_import - -from pip._internal.commands.completion import CompletionCommand -from pip._internal.commands.configuration import ConfigurationCommand -from pip._internal.commands.download import DownloadCommand -from pip._internal.commands.freeze import FreezeCommand -from pip._internal.commands.hash import HashCommand -from pip._internal.commands.help import HelpCommand -from pip._internal.commands.list import ListCommand -from pip._internal.commands.check import CheckCommand -from pip._internal.commands.search import SearchCommand -from pip._internal.commands.show import ShowCommand -from pip._internal.commands.install import InstallCommand -from pip._internal.commands.uninstall import UninstallCommand -from pip._internal.commands.wheel import WheelCommand - -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import List, Type # noqa: F401 - from pip._internal.cli.base_command import Command # noqa: F401 - -commands_order = [ - InstallCommand, - DownloadCommand, - UninstallCommand, - FreezeCommand, - ListCommand, - ShowCommand, - CheckCommand, - ConfigurationCommand, - SearchCommand, - WheelCommand, - HashCommand, - CompletionCommand, - HelpCommand, -] # type: List[Type[Command]] - -commands_dict = {c.name: c for c in commands_order} - - -def get_summaries(ordered=True): - """Yields sorted (command name, command summary) tuples.""" - - if ordered: - cmditems = _sort_commands(commands_dict, commands_order) - else: - cmditems = commands_dict.items() - - for name, command_class in cmditems: - yield (name, command_class.summary) - - -def get_similar_commands(name): - """Command name auto-correct.""" - from difflib import get_close_matches - - name = name.lower() - - close_commands = get_close_matches(name, commands_dict.keys()) - - if close_commands: - return close_commands[0] - else: - return False - - -def _sort_commands(cmddict, order): - def keyfn(key): - try: - return order.index(key[1]) - except ValueError: - # unordered items should come last - return 0xff - - return sorted(cmddict.items(), key=keyfn) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/check.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/check.py deleted file mode 100644 index 801cecc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/check.py +++ /dev/null @@ -1,41 +0,0 @@ -import logging - -from pip._internal.cli.base_command import Command -from pip._internal.operations.check import ( - check_package_set, create_package_set_from_installed, -) - -logger = logging.getLogger(__name__) - - -class CheckCommand(Command): - """Verify installed packages have compatible dependencies.""" - name = 'check' - usage = """ - %prog [options]""" - summary = 'Verify installed packages have compatible dependencies.' - - def run(self, options, args): - package_set, parsing_probs = create_package_set_from_installed() - missing, conflicting = check_package_set(package_set) - - for project_name in missing: - version = package_set[project_name].version - for dependency in missing[project_name]: - logger.info( - "%s %s requires %s, which is not installed.", - project_name, version, dependency[0], - ) - - for project_name in conflicting: - version = package_set[project_name].version - for dep_name, dep_version, req in conflicting[project_name]: - logger.info( - "%s %s has requirement %s, but you have %s %s.", - project_name, version, req, dep_name, dep_version, - ) - - if missing or conflicting or parsing_probs: - return 1 - else: - logger.info("No broken requirements found.") diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/completion.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/completion.py deleted file mode 100644 index 2fcdd39..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/completion.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import absolute_import - -import sys -import textwrap - -from pip._internal.cli.base_command import Command -from pip._internal.utils.misc import get_prog - -BASE_COMPLETION = """ -# pip %(shell)s completion start%(script)s# pip %(shell)s completion end -""" - -COMPLETION_SCRIPTS = { - 'bash': """ - _pip_completion() - { - COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ - COMP_CWORD=$COMP_CWORD \\ - PIP_AUTO_COMPLETE=1 $1 ) ) - } - complete -o default -F _pip_completion %(prog)s - """, - 'zsh': """ - function _pip_completion { - local words cword - read -Ac words - read -cn cword - reply=( $( COMP_WORDS="$words[*]" \\ - COMP_CWORD=$(( cword-1 )) \\ - PIP_AUTO_COMPLETE=1 $words[1] ) ) - } - compctl -K _pip_completion %(prog)s - """, - 'fish': """ - function __fish_complete_pip - set -lx COMP_WORDS (commandline -o) "" - set -lx COMP_CWORD ( \\ - math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\ - ) - set -lx PIP_AUTO_COMPLETE 1 - string split \\ -- (eval $COMP_WORDS[1]) - end - complete -fa "(__fish_complete_pip)" -c %(prog)s - """, -} - - -class CompletionCommand(Command): - """A helper command to be used for command completion.""" - name = 'completion' - summary = 'A helper command used for command completion.' - ignore_require_venv = True - - def __init__(self, *args, **kw): - super(CompletionCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '--bash', '-b', - action='store_const', - const='bash', - dest='shell', - help='Emit completion code for bash') - cmd_opts.add_option( - '--zsh', '-z', - action='store_const', - const='zsh', - dest='shell', - help='Emit completion code for zsh') - cmd_opts.add_option( - '--fish', '-f', - action='store_const', - const='fish', - dest='shell', - help='Emit completion code for fish') - - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - """Prints the completion code of the given shell""" - shells = COMPLETION_SCRIPTS.keys() - shell_options = ['--' + shell for shell in sorted(shells)] - if options.shell in shells: - script = textwrap.dedent( - COMPLETION_SCRIPTS.get(options.shell, '') % { - 'prog': get_prog(), - } - ) - print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) - else: - sys.stderr.write( - 'ERROR: You must pass %s\n' % ' or '.join(shell_options) - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/configuration.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/configuration.py deleted file mode 100644 index 826c08d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/configuration.py +++ /dev/null @@ -1,227 +0,0 @@ -import logging -import os -import subprocess - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import ERROR, SUCCESS -from pip._internal.configuration import Configuration, kinds -from pip._internal.exceptions import PipError -from pip._internal.locations import venv_config_file -from pip._internal.utils.misc import get_prog - -logger = logging.getLogger(__name__) - - -class ConfigurationCommand(Command): - """Manage local and global configuration. - - Subcommands: - - list: List the active configuration (or from the file specified) - edit: Edit the configuration file in an editor - get: Get the value associated with name - set: Set the name=value - unset: Unset the value associated with name - - If none of --user, --global and --venv are passed, a virtual - environment configuration file is used if one is active and the file - exists. Otherwise, all modifications happen on the to the user file by - default. - """ - - name = 'config' - usage = """ - %prog [] list - %prog [] [--editor ] edit - - %prog [] get name - %prog [] set name value - %prog [] unset name - """ - - summary = "Manage local and global configuration." - - def __init__(self, *args, **kwargs): - super(ConfigurationCommand, self).__init__(*args, **kwargs) - - self.configuration = None - - self.cmd_opts.add_option( - '--editor', - dest='editor', - action='store', - default=None, - help=( - 'Editor to use to edit the file. Uses VISUAL or EDITOR ' - 'environment variables if not provided.' - ) - ) - - self.cmd_opts.add_option( - '--global', - dest='global_file', - action='store_true', - default=False, - help='Use the system-wide configuration file only' - ) - - self.cmd_opts.add_option( - '--user', - dest='user_file', - action='store_true', - default=False, - help='Use the user configuration file only' - ) - - self.cmd_opts.add_option( - '--venv', - dest='venv_file', - action='store_true', - default=False, - help='Use the virtualenv configuration file only' - ) - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - handlers = { - "list": self.list_values, - "edit": self.open_in_editor, - "get": self.get_name, - "set": self.set_name_value, - "unset": self.unset_name - } - - # Determine action - if not args or args[0] not in handlers: - logger.error("Need an action ({}) to perform.".format( - ", ".join(sorted(handlers))) - ) - return ERROR - - action = args[0] - - # Determine which configuration files are to be loaded - # Depends on whether the command is modifying. - try: - load_only = self._determine_file( - options, need_value=(action in ["get", "set", "unset", "edit"]) - ) - except PipError as e: - logger.error(e.args[0]) - return ERROR - - # Load a new configuration - self.configuration = Configuration( - isolated=options.isolated_mode, load_only=load_only - ) - self.configuration.load() - - # Error handling happens here, not in the action-handlers. - try: - handlers[action](options, args[1:]) - except PipError as e: - logger.error(e.args[0]) - return ERROR - - return SUCCESS - - def _determine_file(self, options, need_value): - file_options = { - kinds.USER: options.user_file, - kinds.GLOBAL: options.global_file, - kinds.VENV: options.venv_file - } - - if sum(file_options.values()) == 0: - if not need_value: - return None - # Default to user, unless there's a virtualenv file. - elif os.path.exists(venv_config_file): - return kinds.VENV - else: - return kinds.USER - elif sum(file_options.values()) == 1: - # There's probably a better expression for this. - return [key for key in file_options if file_options[key]][0] - - raise PipError( - "Need exactly one file to operate upon " - "(--user, --venv, --global) to perform." - ) - - def list_values(self, options, args): - self._get_n_args(args, "list", n=0) - - for key, value in sorted(self.configuration.items()): - logger.info("%s=%r", key, value) - - def get_name(self, options, args): - key = self._get_n_args(args, "get [name]", n=1) - value = self.configuration.get_value(key) - - logger.info("%s", value) - - def set_name_value(self, options, args): - key, value = self._get_n_args(args, "set [name] [value]", n=2) - self.configuration.set_value(key, value) - - self._save_configuration() - - def unset_name(self, options, args): - key = self._get_n_args(args, "unset [name]", n=1) - self.configuration.unset_value(key) - - self._save_configuration() - - def open_in_editor(self, options, args): - editor = self._determine_editor(options) - - fname = self.configuration.get_file_to_edit() - if fname is None: - raise PipError("Could not determine appropriate file.") - - try: - subprocess.check_call([editor, fname]) - except subprocess.CalledProcessError as e: - raise PipError( - "Editor Subprocess exited with exit code {}" - .format(e.returncode) - ) - - def _get_n_args(self, args, example, n): - """Helper to make sure the command got the right number of arguments - """ - if len(args) != n: - msg = ( - 'Got unexpected number of arguments, expected {}. ' - '(example: "{} config {}")' - ).format(n, get_prog(), example) - raise PipError(msg) - - if n == 1: - return args[0] - else: - return args - - def _save_configuration(self): - # We successfully ran a modifying command. Need to save the - # configuration. - try: - self.configuration.save() - except Exception: - logger.error( - "Unable to save configuration. Please report this as a bug.", - exc_info=1 - ) - raise PipError("Internal Error.") - - def _determine_editor(self, options): - if options.editor is not None: - return options.editor - elif "VISUAL" in os.environ: - return os.environ["VISUAL"] - elif "EDITOR" in os.environ: - return os.environ["EDITOR"] - else: - raise PipError("Could not determine editor to use.") diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/download.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/download.py deleted file mode 100644 index a57e4bc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/download.py +++ /dev/null @@ -1,176 +0,0 @@ -from __future__ import absolute_import - -import logging -import os - -from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import RequirementCommand -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req import RequirementSet -from pip._internal.req.req_tracker import RequirementTracker -from pip._internal.resolve import Resolver -from pip._internal.utils.filesystem import check_path_owner -from pip._internal.utils.misc import ensure_dir, normalize_path -from pip._internal.utils.temp_dir import TempDirectory - -logger = logging.getLogger(__name__) - - -class DownloadCommand(RequirementCommand): - """ - Download packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports downloading from "requirements files", which provide - an easy way to specify a whole environment to be downloaded. - """ - name = 'download' - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] ... - %prog [options] ... - %prog [options] ...""" - - summary = 'Download packages.' - - def __init__(self, *args, **kw): - super(DownloadCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.requirements()) - cmd_opts.add_option(cmdoptions.build_dir()) - cmd_opts.add_option(cmdoptions.no_deps()) - cmd_opts.add_option(cmdoptions.global_options()) - cmd_opts.add_option(cmdoptions.no_binary()) - cmd_opts.add_option(cmdoptions.only_binary()) - cmd_opts.add_option(cmdoptions.prefer_binary()) - cmd_opts.add_option(cmdoptions.src()) - cmd_opts.add_option(cmdoptions.pre()) - cmd_opts.add_option(cmdoptions.no_clean()) - cmd_opts.add_option(cmdoptions.require_hashes()) - cmd_opts.add_option(cmdoptions.progress_bar()) - cmd_opts.add_option(cmdoptions.no_build_isolation()) - cmd_opts.add_option(cmdoptions.use_pep517()) - cmd_opts.add_option(cmdoptions.no_use_pep517()) - - cmd_opts.add_option( - '-d', '--dest', '--destination-dir', '--destination-directory', - dest='download_dir', - metavar='dir', - default=os.curdir, - help=("Download packages into ."), - ) - - cmd_opts.add_option(cmdoptions.platform()) - cmd_opts.add_option(cmdoptions.python_version()) - cmd_opts.add_option(cmdoptions.implementation()) - cmd_opts.add_option(cmdoptions.abi()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - options.ignore_installed = True - # editable doesn't really make sense for `pip download`, but the bowels - # of the RequirementSet code require that property. - options.editables = [] - - if options.python_version: - python_versions = [options.python_version] - else: - python_versions = None - - cmdoptions.check_dist_restriction(options) - - options.src_dir = os.path.abspath(options.src_dir) - options.download_dir = normalize_path(options.download_dir) - - ensure_dir(options.download_dir) - - with self._build_session(options) as session: - finder = self._build_package_finder( - options=options, - session=session, - platform=options.platform, - python_versions=python_versions, - abi=options.abi, - implementation=options.implementation, - ) - build_delete = (not (options.no_clean or options.build_dir)) - if options.cache_dir and not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "by the current user and caching wheels has been " - "disabled. check the permissions and owner of that " - "directory. If executing pip with sudo, you may want " - "sudo's -H flag.", - options.cache_dir, - ) - options.cache_dir = None - - with RequirementTracker() as req_tracker, TempDirectory( - options.build_dir, delete=build_delete, kind="download" - ) as directory: - - requirement_set = RequirementSet( - require_hashes=options.require_hashes, - ) - self.populate_requirement_set( - requirement_set, - args, - options, - finder, - session, - self.name, - None - ) - - preparer = RequirementPreparer( - build_dir=directory.path, - src_dir=options.src_dir, - download_dir=options.download_dir, - wheel_download_dir=None, - progress_bar=options.progress_bar, - build_isolation=options.build_isolation, - req_tracker=req_tracker, - ) - - resolver = Resolver( - preparer=preparer, - finder=finder, - session=session, - wheel_cache=None, - use_user_site=False, - upgrade_strategy="to-satisfy-only", - force_reinstall=False, - ignore_dependencies=options.ignore_dependencies, - ignore_requires_python=False, - ignore_installed=True, - isolated=options.isolated_mode, - ) - resolver.resolve(requirement_set) - - downloaded = ' '.join([ - req.name for req in requirement_set.successfully_downloaded - ]) - if downloaded: - logger.info('Successfully downloaded %s', downloaded) - - # Clean up - if not options.no_clean: - requirement_set.cleanup_files() - - return requirement_set diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/freeze.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/freeze.py deleted file mode 100644 index dc9c53a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/freeze.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import absolute_import - -import sys - -from pip._internal.cache import WheelCache -from pip._internal.cli.base_command import Command -from pip._internal.models.format_control import FormatControl -from pip._internal.operations.freeze import freeze -from pip._internal.utils.compat import stdlib_pkgs - -DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'} - - -class FreezeCommand(Command): - """ - Output installed packages in requirements format. - - packages are listed in a case-insensitive sorted order. - """ - name = 'freeze' - usage = """ - %prog [options]""" - summary = 'Output installed packages in requirements format.' - log_streams = ("ext://sys.stderr", "ext://sys.stderr") - - def __init__(self, *args, **kw): - super(FreezeCommand, self).__init__(*args, **kw) - - self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', - default=[], - metavar='file', - help="Use the order in the given requirements file and its " - "comments when generating output. This option can be " - "used multiple times.") - self.cmd_opts.add_option( - '-f', '--find-links', - dest='find_links', - action='append', - default=[], - metavar='URL', - help='URL for finding packages, which will be added to the ' - 'output.') - self.cmd_opts.add_option( - '-l', '--local', - dest='local', - action='store_true', - default=False, - help='If in a virtualenv that has global access, do not output ' - 'globally-installed packages.') - self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', - default=False, - help='Only output packages installed in user-site.') - self.cmd_opts.add_option( - '--all', - dest='freeze_all', - action='store_true', - help='Do not skip these packages in the output:' - ' %s' % ', '.join(DEV_PKGS)) - self.cmd_opts.add_option( - '--exclude-editable', - dest='exclude_editable', - action='store_true', - help='Exclude editable package from output.') - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - format_control = FormatControl(set(), set()) - wheel_cache = WheelCache(options.cache_dir, format_control) - skip = set(stdlib_pkgs) - if not options.freeze_all: - skip.update(DEV_PKGS) - - freeze_kwargs = dict( - requirement=options.requirements, - find_links=options.find_links, - local_only=options.local, - user_only=options.user, - skip_regex=options.skip_requirements_regex, - isolated=options.isolated_mode, - wheel_cache=wheel_cache, - skip=skip, - exclude_editable=options.exclude_editable, - ) - - try: - for line in freeze(**freeze_kwargs): - sys.stdout.write(line + '\n') - finally: - wheel_cache.cleanup() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/hash.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/hash.py deleted file mode 100644 index 423440e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/hash.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import absolute_import - -import hashlib -import logging -import sys - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import ERROR -from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES -from pip._internal.utils.misc import read_chunks - -logger = logging.getLogger(__name__) - - -class HashCommand(Command): - """ - Compute a hash of a local package archive. - - These can be used with --hash in a requirements file to do repeatable - installs. - - """ - name = 'hash' - usage = '%prog [options] ...' - summary = 'Compute hashes of package archives.' - ignore_require_venv = True - - def __init__(self, *args, **kw): - super(HashCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-a', '--algorithm', - dest='algorithm', - choices=STRONG_HASHES, - action='store', - default=FAVORITE_HASH, - help='The hash algorithm to use: one of %s' % - ', '.join(STRONG_HASHES)) - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - if not args: - self.parser.print_usage(sys.stderr) - return ERROR - - algorithm = options.algorithm - for path in args: - logger.info('%s:\n--hash=%s:%s', - path, algorithm, _hash_of_file(path, algorithm)) - - -def _hash_of_file(path, algorithm): - """Return the hash digest of a file.""" - with open(path, 'rb') as archive: - hash = hashlib.new(algorithm) - for chunk in read_chunks(archive): - hash.update(chunk) - return hash.hexdigest() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/help.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/help.py deleted file mode 100644 index 49a81cb..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/help.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import absolute_import - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import SUCCESS -from pip._internal.exceptions import CommandError - - -class HelpCommand(Command): - """Show help for commands""" - name = 'help' - usage = """ - %prog """ - summary = 'Show help for commands.' - ignore_require_venv = True - - def run(self, options, args): - from pip._internal.commands import commands_dict, get_similar_commands - - try: - # 'pip help' with no args is handled by pip.__init__.parseopt() - cmd_name = args[0] # the command we need help for - except IndexError: - return SUCCESS - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = ['unknown command "%s"' % cmd_name] - if guess: - msg.append('maybe you meant "%s"' % guess) - - raise CommandError(' - '.join(msg)) - - command = commands_dict[cmd_name]() - command.parser.print_help() - - return SUCCESS diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/install.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/install.py deleted file mode 100644 index 1c244d2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/install.py +++ /dev/null @@ -1,566 +0,0 @@ -from __future__ import absolute_import - -import errno -import logging -import operator -import os -import shutil -from optparse import SUPPRESS_HELP - -from pip._vendor import pkg_resources - -from pip._internal.cache import WheelCache -from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import RequirementCommand -from pip._internal.cli.status_codes import ERROR -from pip._internal.exceptions import ( - CommandError, InstallationError, PreviousBuildDirError, -) -from pip._internal.locations import distutils_scheme, virtualenv_no_global -from pip._internal.operations.check import check_install_conflicts -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req import RequirementSet, install_given_reqs -from pip._internal.req.req_tracker import RequirementTracker -from pip._internal.resolve import Resolver -from pip._internal.utils.filesystem import check_path_owner -from pip._internal.utils.misc import ( - ensure_dir, get_installed_version, - protect_pip_from_modification_on_windows, -) -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.wheel import WheelBuilder - -logger = logging.getLogger(__name__) - - -class InstallCommand(RequirementCommand): - """ - Install packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports installing from "requirements files", which provide - an easy way to specify a whole environment to be installed. - """ - name = 'install' - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - summary = 'Install packages.' - - def __init__(self, *args, **kw): - super(InstallCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option(cmdoptions.requirements()) - cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.no_deps()) - cmd_opts.add_option(cmdoptions.pre()) - - cmd_opts.add_option(cmdoptions.editable()) - cmd_opts.add_option( - '-t', '--target', - dest='target_dir', - metavar='dir', - default=None, - help='Install packages into . ' - 'By default this will not replace existing files/folders in ' - '. Use --upgrade to replace existing packages in ' - 'with new versions.' - ) - cmd_opts.add_option(cmdoptions.platform()) - cmd_opts.add_option(cmdoptions.python_version()) - cmd_opts.add_option(cmdoptions.implementation()) - cmd_opts.add_option(cmdoptions.abi()) - - cmd_opts.add_option( - '--user', - dest='use_user_site', - action='store_true', - help="Install to the Python user install directory for your " - "platform. Typically ~/.local/, or %APPDATA%\\Python on " - "Windows. (See the Python documentation for site.USER_BASE " - "for full details.)") - cmd_opts.add_option( - '--no-user', - dest='use_user_site', - action='store_false', - help=SUPPRESS_HELP) - cmd_opts.add_option( - '--root', - dest='root_path', - metavar='dir', - default=None, - help="Install everything relative to this alternate root " - "directory.") - cmd_opts.add_option( - '--prefix', - dest='prefix_path', - metavar='dir', - default=None, - help="Installation prefix where lib, bin and other top-level " - "folders are placed") - - cmd_opts.add_option(cmdoptions.build_dir()) - - cmd_opts.add_option(cmdoptions.src()) - - cmd_opts.add_option( - '-U', '--upgrade', - dest='upgrade', - action='store_true', - help='Upgrade all specified packages to the newest available ' - 'version. The handling of dependencies depends on the ' - 'upgrade-strategy used.' - ) - - cmd_opts.add_option( - '--upgrade-strategy', - dest='upgrade_strategy', - default='only-if-needed', - choices=['only-if-needed', 'eager'], - help='Determines how dependency upgrading should be handled ' - '[default: %default]. ' - '"eager" - dependencies are upgraded regardless of ' - 'whether the currently installed version satisfies the ' - 'requirements of the upgraded package(s). ' - '"only-if-needed" - are upgraded only when they do not ' - 'satisfy the requirements of the upgraded package(s).' - ) - - cmd_opts.add_option( - '--force-reinstall', - dest='force_reinstall', - action='store_true', - help='Reinstall all packages even if they are already ' - 'up-to-date.') - - cmd_opts.add_option( - '-I', '--ignore-installed', - dest='ignore_installed', - action='store_true', - help='Ignore the installed packages (reinstalling instead).') - - cmd_opts.add_option(cmdoptions.ignore_requires_python()) - cmd_opts.add_option(cmdoptions.no_build_isolation()) - cmd_opts.add_option(cmdoptions.use_pep517()) - cmd_opts.add_option(cmdoptions.no_use_pep517()) - - cmd_opts.add_option(cmdoptions.install_options()) - cmd_opts.add_option(cmdoptions.global_options()) - - cmd_opts.add_option( - "--compile", - action="store_true", - dest="compile", - default=True, - help="Compile Python source files to bytecode", - ) - - cmd_opts.add_option( - "--no-compile", - action="store_false", - dest="compile", - help="Do not compile Python source files to bytecode", - ) - - cmd_opts.add_option( - "--no-warn-script-location", - action="store_false", - dest="warn_script_location", - default=True, - help="Do not warn when installing scripts outside PATH", - ) - cmd_opts.add_option( - "--no-warn-conflicts", - action="store_false", - dest="warn_about_conflicts", - default=True, - help="Do not warn about broken dependencies", - ) - - cmd_opts.add_option(cmdoptions.no_binary()) - cmd_opts.add_option(cmdoptions.only_binary()) - cmd_opts.add_option(cmdoptions.prefer_binary()) - cmd_opts.add_option(cmdoptions.no_clean()) - cmd_opts.add_option(cmdoptions.require_hashes()) - cmd_opts.add_option(cmdoptions.progress_bar()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - cmdoptions.check_install_build_global(options) - upgrade_strategy = "to-satisfy-only" - if options.upgrade: - upgrade_strategy = options.upgrade_strategy - - if options.build_dir: - options.build_dir = os.path.abspath(options.build_dir) - - cmdoptions.check_dist_restriction(options, check_target=True) - - if options.python_version: - python_versions = [options.python_version] - else: - python_versions = None - - options.src_dir = os.path.abspath(options.src_dir) - install_options = options.install_options or [] - if options.use_user_site: - if options.prefix_path: - raise CommandError( - "Can not combine '--user' and '--prefix' as they imply " - "different installation locations" - ) - if virtualenv_no_global(): - raise InstallationError( - "Can not perform a '--user' install. User site-packages " - "are not visible in this virtualenv." - ) - install_options.append('--user') - install_options.append('--prefix=') - - target_temp_dir = TempDirectory(kind="target") - if options.target_dir: - options.ignore_installed = True - options.target_dir = os.path.abspath(options.target_dir) - if (os.path.exists(options.target_dir) and not - os.path.isdir(options.target_dir)): - raise CommandError( - "Target path exists but is not a directory, will not " - "continue." - ) - - # Create a target directory for using with the target option - target_temp_dir.create() - install_options.append('--home=' + target_temp_dir.path) - - global_options = options.global_options or [] - - with self._build_session(options) as session: - finder = self._build_package_finder( - options=options, - session=session, - platform=options.platform, - python_versions=python_versions, - abi=options.abi, - implementation=options.implementation, - ) - build_delete = (not (options.no_clean or options.build_dir)) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - - if options.cache_dir and not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "by the current user and caching wheels has been " - "disabled. check the permissions and owner of that " - "directory. If executing pip with sudo, you may want " - "sudo's -H flag.", - options.cache_dir, - ) - options.cache_dir = None - - with RequirementTracker() as req_tracker, TempDirectory( - options.build_dir, delete=build_delete, kind="install" - ) as directory: - requirement_set = RequirementSet( - require_hashes=options.require_hashes, - check_supported_wheels=not options.target_dir, - ) - - try: - self.populate_requirement_set( - requirement_set, args, options, finder, session, - self.name, wheel_cache - ) - preparer = RequirementPreparer( - build_dir=directory.path, - src_dir=options.src_dir, - download_dir=None, - wheel_download_dir=None, - progress_bar=options.progress_bar, - build_isolation=options.build_isolation, - req_tracker=req_tracker, - ) - - resolver = Resolver( - preparer=preparer, - finder=finder, - session=session, - wheel_cache=wheel_cache, - use_user_site=options.use_user_site, - upgrade_strategy=upgrade_strategy, - force_reinstall=options.force_reinstall, - ignore_dependencies=options.ignore_dependencies, - ignore_requires_python=options.ignore_requires_python, - ignore_installed=options.ignore_installed, - isolated=options.isolated_mode, - use_pep517=options.use_pep517 - ) - resolver.resolve(requirement_set) - - protect_pip_from_modification_on_windows( - modifying_pip=requirement_set.has_requirement("pip") - ) - - # Consider legacy and PEP517-using requirements separately - legacy_requirements = [] - pep517_requirements = [] - for req in requirement_set.requirements.values(): - if req.use_pep517: - pep517_requirements.append(req) - else: - legacy_requirements.append(req) - - # We don't build wheels for legacy requirements if we - # don't have wheel installed or we don't have a cache dir - try: - import wheel # noqa: F401 - build_legacy = bool(options.cache_dir) - except ImportError: - build_legacy = False - - wb = WheelBuilder( - finder, preparer, wheel_cache, - build_options=[], global_options=[], - ) - - # Always build PEP 517 requirements - build_failures = wb.build( - pep517_requirements, - session=session, autobuilding=True - ) - - if build_legacy: - # We don't care about failures building legacy - # requirements, as we'll fall through to a direct - # install for those. - wb.build( - legacy_requirements, - session=session, autobuilding=True - ) - - # If we're using PEP 517, we cannot do a direct install - # so we fail here. - if build_failures: - raise InstallationError( - "Could not build wheels for {} which use" - " PEP 517 and cannot be installed directly".format( - ", ".join(r.name for r in build_failures))) - - to_install = resolver.get_installation_order( - requirement_set - ) - - # Consistency Checking of the package set we're installing. - should_warn_about_conflicts = ( - not options.ignore_dependencies and - options.warn_about_conflicts - ) - if should_warn_about_conflicts: - self._warn_about_conflicts(to_install) - - # Don't warn about script install locations if - # --target has been specified - warn_script_location = options.warn_script_location - if options.target_dir: - warn_script_location = False - - installed = install_given_reqs( - to_install, - install_options, - global_options, - root=options.root_path, - home=target_temp_dir.path, - prefix=options.prefix_path, - pycompile=options.compile, - warn_script_location=warn_script_location, - use_user_site=options.use_user_site, - ) - - lib_locations = get_lib_location_guesses( - user=options.use_user_site, - home=target_temp_dir.path, - root=options.root_path, - prefix=options.prefix_path, - isolated=options.isolated_mode, - ) - working_set = pkg_resources.WorkingSet(lib_locations) - - reqs = sorted(installed, key=operator.attrgetter('name')) - items = [] - for req in reqs: - item = req.name - try: - installed_version = get_installed_version( - req.name, working_set=working_set - ) - if installed_version: - item += '-' + installed_version - except Exception: - pass - items.append(item) - installed = ' '.join(items) - if installed: - logger.info('Successfully installed %s', installed) - except EnvironmentError as error: - show_traceback = (self.verbosity >= 1) - - message = create_env_error_message( - error, show_traceback, options.use_user_site, - ) - logger.error(message, exc_info=show_traceback) - - return ERROR - except PreviousBuildDirError: - options.no_clean = True - raise - finally: - # Clean up - if not options.no_clean: - requirement_set.cleanup_files() - wheel_cache.cleanup() - - if options.target_dir: - self._handle_target_dir( - options.target_dir, target_temp_dir, options.upgrade - ) - return requirement_set - - def _handle_target_dir(self, target_dir, target_temp_dir, upgrade): - ensure_dir(target_dir) - - # Checking both purelib and platlib directories for installed - # packages to be moved to target directory - lib_dir_list = [] - - with target_temp_dir: - # Checking both purelib and platlib directories for installed - # packages to be moved to target directory - scheme = distutils_scheme('', home=target_temp_dir.path) - purelib_dir = scheme['purelib'] - platlib_dir = scheme['platlib'] - data_dir = scheme['data'] - - if os.path.exists(purelib_dir): - lib_dir_list.append(purelib_dir) - if os.path.exists(platlib_dir) and platlib_dir != purelib_dir: - lib_dir_list.append(platlib_dir) - if os.path.exists(data_dir): - lib_dir_list.append(data_dir) - - for lib_dir in lib_dir_list: - for item in os.listdir(lib_dir): - if lib_dir == data_dir: - ddir = os.path.join(data_dir, item) - if any(s.startswith(ddir) for s in lib_dir_list[:-1]): - continue - target_item_dir = os.path.join(target_dir, item) - if os.path.exists(target_item_dir): - if not upgrade: - logger.warning( - 'Target directory %s already exists. Specify ' - '--upgrade to force replacement.', - target_item_dir - ) - continue - if os.path.islink(target_item_dir): - logger.warning( - 'Target directory %s already exists and is ' - 'a link. Pip will not automatically replace ' - 'links, please remove if replacement is ' - 'desired.', - target_item_dir - ) - continue - if os.path.isdir(target_item_dir): - shutil.rmtree(target_item_dir) - else: - os.remove(target_item_dir) - - shutil.move( - os.path.join(lib_dir, item), - target_item_dir - ) - - def _warn_about_conflicts(self, to_install): - try: - package_set, _dep_info = check_install_conflicts(to_install) - except Exception: - logger.error("Error checking for conflicts.", exc_info=True) - return - missing, conflicting = _dep_info - - # NOTE: There is some duplication here from pip check - for project_name in missing: - version = package_set[project_name][0] - for dependency in missing[project_name]: - logger.critical( - "%s %s requires %s, which is not installed.", - project_name, version, dependency[1], - ) - - for project_name in conflicting: - version = package_set[project_name][0] - for dep_name, dep_version, req in conflicting[project_name]: - logger.critical( - "%s %s has requirement %s, but you'll have %s %s which is " - "incompatible.", - project_name, version, req, dep_name, dep_version, - ) - - -def get_lib_location_guesses(*args, **kwargs): - scheme = distutils_scheme('', *args, **kwargs) - return [scheme['purelib'], scheme['platlib']] - - -def create_env_error_message(error, show_traceback, using_user_site): - """Format an error message for an EnvironmentError - - It may occur anytime during the execution of the install command. - """ - parts = [] - - # Mention the error if we are not going to show a traceback - parts.append("Could not install packages due to an EnvironmentError") - if not show_traceback: - parts.append(": ") - parts.append(str(error)) - else: - parts.append(".") - - # Spilt the error indication from a helper message (if any) - parts[-1] += "\n" - - # Suggest useful actions to the user: - # (1) using user site-packages or (2) verifying the permissions - if error.errno == errno.EACCES: - user_option_part = "Consider using the `--user` option" - permissions_part = "Check the permissions" - - if not using_user_site: - parts.extend([ - user_option_part, " or ", - permissions_part.lower(), - ]) - else: - parts.append(permissions_part) - parts.append(".\n") - - return "".join(parts).strip() + "\n" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/list.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/list.py deleted file mode 100644 index a640274..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/list.py +++ /dev/null @@ -1,301 +0,0 @@ -from __future__ import absolute_import - -import json -import logging - -from pip._vendor import six -from pip._vendor.six.moves import zip_longest - -from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import Command -from pip._internal.exceptions import CommandError -from pip._internal.index import PackageFinder -from pip._internal.utils.misc import ( - dist_is_editable, get_installed_distributions, -) -from pip._internal.utils.packaging import get_installer - -logger = logging.getLogger(__name__) - - -class ListCommand(Command): - """ - List installed packages, including editables. - - Packages are listed in a case-insensitive sorted order. - """ - name = 'list' - usage = """ - %prog [options]""" - summary = 'List installed packages.' - - def __init__(self, *args, **kw): - super(ListCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '-o', '--outdated', - action='store_true', - default=False, - help='List outdated packages') - cmd_opts.add_option( - '-u', '--uptodate', - action='store_true', - default=False, - help='List uptodate packages') - cmd_opts.add_option( - '-e', '--editable', - action='store_true', - default=False, - help='List editable projects.') - cmd_opts.add_option( - '-l', '--local', - action='store_true', - default=False, - help=('If in a virtualenv that has global access, do not list ' - 'globally-installed packages.'), - ) - self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', - default=False, - help='Only output packages installed in user-site.') - - cmd_opts.add_option( - '--pre', - action='store_true', - default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), - ) - - cmd_opts.add_option( - '--format', - action='store', - dest='list_format', - default="columns", - choices=('columns', 'freeze', 'json'), - help="Select the output format among: columns (default), freeze, " - "or json", - ) - - cmd_opts.add_option( - '--not-required', - action='store_true', - dest='not_required', - help="List packages that are not dependencies of " - "installed packages.", - ) - - cmd_opts.add_option( - '--exclude-editable', - action='store_false', - dest='include_editable', - help='Exclude editable package from output.', - ) - cmd_opts.add_option( - '--include-editable', - action='store_true', - dest='include_editable', - help='Include editable package from output.', - default=True, - ) - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, self.parser - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def _build_package_finder(self, options, index_urls, session): - """ - Create a package finder appropriate to this list command. - """ - return PackageFinder( - find_links=options.find_links, - index_urls=index_urls, - allow_all_prereleases=options.pre, - trusted_hosts=options.trusted_hosts, - session=session, - ) - - def run(self, options, args): - if options.outdated and options.uptodate: - raise CommandError( - "Options --outdated and --uptodate cannot be combined.") - - packages = get_installed_distributions( - local_only=options.local, - user_only=options.user, - editables_only=options.editable, - include_editables=options.include_editable, - ) - - # get_not_required must be called firstly in order to find and - # filter out all dependencies correctly. Otherwise a package - # can't be identified as requirement because some parent packages - # could be filtered out before. - if options.not_required: - packages = self.get_not_required(packages, options) - - if options.outdated: - packages = self.get_outdated(packages, options) - elif options.uptodate: - packages = self.get_uptodate(packages, options) - - self.output_package_listing(packages, options) - - def get_outdated(self, packages, options): - return [ - dist for dist in self.iter_packages_latest_infos(packages, options) - if dist.latest_version > dist.parsed_version - ] - - def get_uptodate(self, packages, options): - return [ - dist for dist in self.iter_packages_latest_infos(packages, options) - if dist.latest_version == dist.parsed_version - ] - - def get_not_required(self, packages, options): - dep_keys = set() - for dist in packages: - dep_keys.update(requirement.key for requirement in dist.requires()) - return {pkg for pkg in packages if pkg.key not in dep_keys} - - def iter_packages_latest_infos(self, packages, options): - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - - with self._build_session(options) as session: - finder = self._build_package_finder(options, index_urls, session) - - for dist in packages: - typ = 'unknown' - all_candidates = finder.find_all_candidates(dist.key) - if not options.pre: - # Remove prereleases - all_candidates = [candidate for candidate in all_candidates - if not candidate.version.is_prerelease] - - if not all_candidates: - continue - best_candidate = max(all_candidates, - key=finder._candidate_sort_key) - remote_version = best_candidate.version - if best_candidate.location.is_wheel: - typ = 'wheel' - else: - typ = 'sdist' - # This is dirty but makes the rest of the code much cleaner - dist.latest_version = remote_version - dist.latest_filetype = typ - yield dist - - def output_package_listing(self, packages, options): - packages = sorted( - packages, - key=lambda dist: dist.project_name.lower(), - ) - if options.list_format == 'columns' and packages: - data, header = format_for_columns(packages, options) - self.output_package_listing_columns(data, header) - elif options.list_format == 'freeze': - for dist in packages: - if options.verbose >= 1: - logger.info("%s==%s (%s)", dist.project_name, - dist.version, dist.location) - else: - logger.info("%s==%s", dist.project_name, dist.version) - elif options.list_format == 'json': - logger.info(format_for_json(packages, options)) - - def output_package_listing_columns(self, data, header): - # insert the header first: we need to know the size of column names - if len(data) > 0: - data.insert(0, header) - - pkg_strings, sizes = tabulate(data) - - # Create and add a separator. - if len(data) > 0: - pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) - - for val in pkg_strings: - logger.info(val) - - -def tabulate(vals): - # From pfmoore on GitHub: - # https://github.com/pypa/pip/issues/3651#issuecomment-216932564 - assert len(vals) > 0 - - sizes = [0] * max(len(x) for x in vals) - for row in vals: - sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)] - - result = [] - for row in vals: - display = " ".join([str(c).ljust(s) if c is not None else '' - for s, c in zip_longest(sizes, row)]) - result.append(display) - - return result, sizes - - -def format_for_columns(pkgs, options): - """ - Convert the package data into something usable - by output_package_listing_columns. - """ - running_outdated = options.outdated - # Adjust the header for the `pip list --outdated` case. - if running_outdated: - header = ["Package", "Version", "Latest", "Type"] - else: - header = ["Package", "Version"] - - data = [] - if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs): - header.append("Location") - if options.verbose >= 1: - header.append("Installer") - - for proj in pkgs: - # if we're working on the 'outdated' list, separate out the - # latest_version and type - row = [proj.project_name, proj.version] - - if running_outdated: - row.append(proj.latest_version) - row.append(proj.latest_filetype) - - if options.verbose >= 1 or dist_is_editable(proj): - row.append(proj.location) - if options.verbose >= 1: - row.append(get_installer(proj)) - - data.append(row) - - return data, header - - -def format_for_json(packages, options): - data = [] - for dist in packages: - info = { - 'name': dist.project_name, - 'version': six.text_type(dist.version), - } - if options.verbose >= 1: - info['location'] = dist.location - info['installer'] = get_installer(dist) - if options.outdated: - info['latest_version'] = six.text_type(dist.latest_version) - info['latest_filetype'] = dist.latest_filetype - data.append(info) - return json.dumps(data) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/search.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/search.py deleted file mode 100644 index c157a31..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/search.py +++ /dev/null @@ -1,135 +0,0 @@ -from __future__ import absolute_import - -import logging -import sys -import textwrap -from collections import OrderedDict - -from pip._vendor import pkg_resources -from pip._vendor.packaging.version import parse as parse_version -# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is -# why we ignore the type on this import -from pip._vendor.six.moves import xmlrpc_client # type: ignore - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS -from pip._internal.download import PipXmlrpcTransport -from pip._internal.exceptions import CommandError -from pip._internal.models.index import PyPI -from pip._internal.utils.compat import get_terminal_size -from pip._internal.utils.logging import indent_log - -logger = logging.getLogger(__name__) - - -class SearchCommand(Command): - """Search for PyPI packages whose name or summary contains .""" - name = 'search' - usage = """ - %prog [options] """ - summary = 'Search PyPI for packages.' - ignore_require_venv = True - - def __init__(self, *args, **kw): - super(SearchCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-i', '--index', - dest='index', - metavar='URL', - default=PyPI.pypi_url, - help='Base URL of Python Package Index (default %default)') - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - if not args: - raise CommandError('Missing required argument (search query).') - query = args - pypi_hits = self.search(query, options) - hits = transform_hits(pypi_hits) - - terminal_width = None - if sys.stdout.isatty(): - terminal_width = get_terminal_size()[0] - - print_results(hits, terminal_width=terminal_width) - if pypi_hits: - return SUCCESS - return NO_MATCHES_FOUND - - def search(self, query, options): - index_url = options.index - with self._build_session(options) as session: - transport = PipXmlrpcTransport(index_url, session) - pypi = xmlrpc_client.ServerProxy(index_url, transport) - hits = pypi.search({'name': query, 'summary': query}, 'or') - return hits - - -def transform_hits(hits): - """ - The list from pypi is really a list of versions. We want a list of - packages with the list of versions stored inline. This converts the - list from pypi into one we can use. - """ - packages = OrderedDict() - for hit in hits: - name = hit['name'] - summary = hit['summary'] - version = hit['version'] - - if name not in packages.keys(): - packages[name] = { - 'name': name, - 'summary': summary, - 'versions': [version], - } - else: - packages[name]['versions'].append(version) - - # if this is the highest version, replace summary and score - if version == highest_version(packages[name]['versions']): - packages[name]['summary'] = summary - - return list(packages.values()) - - -def print_results(hits, name_column_width=None, terminal_width=None): - if not hits: - return - if name_column_width is None: - name_column_width = max([ - len(hit['name']) + len(highest_version(hit.get('versions', ['-']))) - for hit in hits - ]) + 4 - - installed_packages = [p.project_name for p in pkg_resources.working_set] - for hit in hits: - name = hit['name'] - summary = hit['summary'] or '' - latest = highest_version(hit.get('versions', ['-'])) - if terminal_width is not None: - target_width = terminal_width - name_column_width - 5 - if target_width > 10: - # wrap and indent summary to fit terminal - summary = textwrap.wrap(summary, target_width) - summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) - - line = '%-*s - %s' % (name_column_width, - '%s (%s)' % (name, latest), summary) - try: - logger.info(line) - if name in installed_packages: - dist = pkg_resources.get_distribution(name) - with indent_log(): - if dist.version == latest: - logger.info('INSTALLED: %s (latest)', dist.version) - else: - logger.info('INSTALLED: %s', dist.version) - logger.info('LATEST: %s', latest) - except UnicodeEncodeError: - pass - - -def highest_version(versions): - return max(versions, key=parse_version) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/show.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/show.py deleted file mode 100644 index f92c9bc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/show.py +++ /dev/null @@ -1,168 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -from email.parser import FeedParser # type: ignore - -from pip._vendor import pkg_resources -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import ERROR, SUCCESS - -logger = logging.getLogger(__name__) - - -class ShowCommand(Command): - """ - Show information about one or more installed packages. - - The output is in RFC-compliant mail header format. - """ - name = 'show' - usage = """ - %prog [options] ...""" - summary = 'Show information about installed packages.' - ignore_require_venv = True - - def __init__(self, *args, **kw): - super(ShowCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-f', '--files', - dest='files', - action='store_true', - default=False, - help='Show the full list of installed files for each package.') - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - if not args: - logger.warning('ERROR: Please provide a package name or names.') - return ERROR - query = args - - results = search_packages_info(query) - if not print_results( - results, list_files=options.files, verbose=options.verbose): - return ERROR - return SUCCESS - - -def search_packages_info(query): - """ - Gather details from installed distributions. Print distribution name, - version, location, and installed files. Installed files requires a - pip generated 'installed-files.txt' in the distributions '.egg-info' - directory. - """ - installed = {} - for p in pkg_resources.working_set: - installed[canonicalize_name(p.project_name)] = p - - query_names = [canonicalize_name(name) for name in query] - - for dist in [installed[pkg] for pkg in query_names if pkg in installed]: - package = { - 'name': dist.project_name, - 'version': dist.version, - 'location': dist.location, - 'requires': [dep.project_name for dep in dist.requires()], - } - file_list = None - metadata = None - if isinstance(dist, pkg_resources.DistInfoDistribution): - # RECORDs should be part of .dist-info metadatas - if dist.has_metadata('RECORD'): - lines = dist.get_metadata_lines('RECORD') - paths = [l.split(',')[0] for l in lines] - paths = [os.path.join(dist.location, p) for p in paths] - file_list = [os.path.relpath(p, dist.location) for p in paths] - - if dist.has_metadata('METADATA'): - metadata = dist.get_metadata('METADATA') - else: - # Otherwise use pip's log for .egg-info's - if dist.has_metadata('installed-files.txt'): - paths = dist.get_metadata_lines('installed-files.txt') - paths = [os.path.join(dist.egg_info, p) for p in paths] - file_list = [os.path.relpath(p, dist.location) for p in paths] - - if dist.has_metadata('PKG-INFO'): - metadata = dist.get_metadata('PKG-INFO') - - if dist.has_metadata('entry_points.txt'): - entry_points = dist.get_metadata_lines('entry_points.txt') - package['entry_points'] = entry_points - - if dist.has_metadata('INSTALLER'): - for line in dist.get_metadata_lines('INSTALLER'): - if line.strip(): - package['installer'] = line.strip() - break - - # @todo: Should pkg_resources.Distribution have a - # `get_pkg_info` method? - feed_parser = FeedParser() - feed_parser.feed(metadata) - pkg_info_dict = feed_parser.close() - for key in ('metadata-version', 'summary', - 'home-page', 'author', 'author-email', 'license'): - package[key] = pkg_info_dict.get(key) - - # It looks like FeedParser cannot deal with repeated headers - classifiers = [] - for line in metadata.splitlines(): - if line.startswith('Classifier: '): - classifiers.append(line[len('Classifier: '):]) - package['classifiers'] = classifiers - - if file_list: - package['files'] = sorted(file_list) - yield package - - -def print_results(distributions, list_files=False, verbose=False): - """ - Print the informations from installed distributions found. - """ - results_printed = False - for i, dist in enumerate(distributions): - results_printed = True - if i > 0: - logger.info("---") - - name = dist.get('name', '') - required_by = [ - pkg.project_name for pkg in pkg_resources.working_set - if name in [required.name for required in pkg.requires()] - ] - - logger.info("Name: %s", name) - logger.info("Version: %s", dist.get('version', '')) - logger.info("Summary: %s", dist.get('summary', '')) - logger.info("Home-page: %s", dist.get('home-page', '')) - logger.info("Author: %s", dist.get('author', '')) - logger.info("Author-email: %s", dist.get('author-email', '')) - logger.info("License: %s", dist.get('license', '')) - logger.info("Location: %s", dist.get('location', '')) - logger.info("Requires: %s", ', '.join(dist.get('requires', []))) - logger.info("Required-by: %s", ', '.join(required_by)) - - if verbose: - logger.info("Metadata-Version: %s", - dist.get('metadata-version', '')) - logger.info("Installer: %s", dist.get('installer', '')) - logger.info("Classifiers:") - for classifier in dist.get('classifiers', []): - logger.info(" %s", classifier) - logger.info("Entry-points:") - for entry in dist.get('entry_points', []): - logger.info(" %s", entry.strip()) - if list_files: - logger.info("Files:") - for line in dist.get('files', []): - logger.info(" %s", line.strip()) - if "files" not in dist: - logger.info("Cannot locate installed-files.txt") - return results_printed diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/uninstall.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/uninstall.py deleted file mode 100644 index 0cd6f54..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/uninstall.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import absolute_import - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.cli.base_command import Command -from pip._internal.exceptions import InstallationError -from pip._internal.req import parse_requirements -from pip._internal.req.constructors import install_req_from_line -from pip._internal.utils.misc import protect_pip_from_modification_on_windows - - -class UninstallCommand(Command): - """ - Uninstall packages. - - pip is able to uninstall most installed packages. Known exceptions are: - - - Pure distutils packages installed with ``python setup.py install``, which - leave behind no metadata to determine what files were installed. - - Script wrappers installed by ``python setup.py develop``. - """ - name = 'uninstall' - usage = """ - %prog [options] ... - %prog [options] -r ...""" - summary = 'Uninstall packages.' - - def __init__(self, *args, **kw): - super(UninstallCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', - default=[], - metavar='file', - help='Uninstall all the packages listed in the given requirements ' - 'file. This option can be used multiple times.', - ) - self.cmd_opts.add_option( - '-y', '--yes', - dest='yes', - action='store_true', - help="Don't ask for confirmation of uninstall deletions.") - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - with self._build_session(options) as session: - reqs_to_uninstall = {} - for name in args: - req = install_req_from_line( - name, isolated=options.isolated_mode, - ) - if req.name: - reqs_to_uninstall[canonicalize_name(req.name)] = req - for filename in options.requirements: - for req in parse_requirements( - filename, - options=options, - session=session): - if req.name: - reqs_to_uninstall[canonicalize_name(req.name)] = req - if not reqs_to_uninstall: - raise InstallationError( - 'You must give at least one requirement to %(name)s (see ' - '"pip help %(name)s")' % dict(name=self.name) - ) - - protect_pip_from_modification_on_windows( - modifying_pip="pip" in reqs_to_uninstall - ) - - for req in reqs_to_uninstall.values(): - uninstall_pathset = req.uninstall( - auto_confirm=options.yes, verbose=self.verbosity > 0, - ) - if uninstall_pathset: - uninstall_pathset.commit() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/wheel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/wheel.py deleted file mode 100644 index cd72a3d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/commands/wheel.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import logging -import os - -from pip._internal.cache import WheelCache -from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import RequirementCommand -from pip._internal.exceptions import CommandError, PreviousBuildDirError -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req import RequirementSet -from pip._internal.req.req_tracker import RequirementTracker -from pip._internal.resolve import Resolver -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.wheel import WheelBuilder - -logger = logging.getLogger(__name__) - - -class WheelCommand(RequirementCommand): - """ - Build Wheel archives for your requirements and dependencies. - - Wheel is a built-package format, and offers the advantage of not - recompiling your software during every install. For more details, see the - wheel docs: https://wheel.readthedocs.io/en/latest/ - - Requirements: setuptools>=0.8, and wheel. - - 'pip wheel' uses the bdist_wheel setuptools extension from the wheel - package to build individual wheels. - - """ - - name = 'wheel' - usage = """ - %prog [options] ... - %prog [options] -r ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - summary = 'Build wheels from your requirements.' - - def __init__(self, *args, **kw): - super(WheelCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '-w', '--wheel-dir', - dest='wheel_dir', - metavar='dir', - default=os.curdir, - help=("Build wheels into , where the default is the " - "current working directory."), - ) - cmd_opts.add_option(cmdoptions.no_binary()) - cmd_opts.add_option(cmdoptions.only_binary()) - cmd_opts.add_option(cmdoptions.prefer_binary()) - cmd_opts.add_option( - '--build-option', - dest='build_options', - metavar='options', - action='append', - help="Extra arguments to be supplied to 'setup.py bdist_wheel'.", - ) - cmd_opts.add_option(cmdoptions.no_build_isolation()) - cmd_opts.add_option(cmdoptions.use_pep517()) - cmd_opts.add_option(cmdoptions.no_use_pep517()) - cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.editable()) - cmd_opts.add_option(cmdoptions.requirements()) - cmd_opts.add_option(cmdoptions.src()) - cmd_opts.add_option(cmdoptions.ignore_requires_python()) - cmd_opts.add_option(cmdoptions.no_deps()) - cmd_opts.add_option(cmdoptions.build_dir()) - cmd_opts.add_option(cmdoptions.progress_bar()) - - cmd_opts.add_option( - '--global-option', - dest='global_options', - action='append', - metavar='options', - help="Extra global options to be supplied to the setup.py " - "call before the 'bdist_wheel' command.") - - cmd_opts.add_option( - '--pre', - action='store_true', - default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), - ) - - cmd_opts.add_option(cmdoptions.no_clean()) - cmd_opts.add_option(cmdoptions.require_hashes()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - cmdoptions.check_install_build_global(options) - - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - - if options.build_dir: - options.build_dir = os.path.abspath(options.build_dir) - - options.src_dir = os.path.abspath(options.src_dir) - - with self._build_session(options) as session: - finder = self._build_package_finder(options, session) - build_delete = (not (options.no_clean or options.build_dir)) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - - with RequirementTracker() as req_tracker, TempDirectory( - options.build_dir, delete=build_delete, kind="wheel" - ) as directory: - - requirement_set = RequirementSet( - require_hashes=options.require_hashes, - ) - - try: - self.populate_requirement_set( - requirement_set, args, options, finder, session, - self.name, wheel_cache - ) - - preparer = RequirementPreparer( - build_dir=directory.path, - src_dir=options.src_dir, - download_dir=None, - wheel_download_dir=options.wheel_dir, - progress_bar=options.progress_bar, - build_isolation=options.build_isolation, - req_tracker=req_tracker, - ) - - resolver = Resolver( - preparer=preparer, - finder=finder, - session=session, - wheel_cache=wheel_cache, - use_user_site=False, - upgrade_strategy="to-satisfy-only", - force_reinstall=False, - ignore_dependencies=options.ignore_dependencies, - ignore_requires_python=options.ignore_requires_python, - ignore_installed=True, - isolated=options.isolated_mode, - use_pep517=options.use_pep517 - ) - resolver.resolve(requirement_set) - - # build wheels - wb = WheelBuilder( - finder, preparer, wheel_cache, - build_options=options.build_options or [], - global_options=options.global_options or [], - no_clean=options.no_clean, - ) - build_failures = wb.build( - requirement_set.requirements.values(), session=session, - ) - if len(build_failures) != 0: - raise CommandError( - "Failed to build one or more wheels" - ) - except PreviousBuildDirError: - options.no_clean = True - raise - finally: - if not options.no_clean: - requirement_set.cleanup_files() - wheel_cache.cleanup() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/configuration.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/configuration.py deleted file mode 100644 index fe6df9b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/configuration.py +++ /dev/null @@ -1,387 +0,0 @@ -"""Configuration management setup - -Some terminology: -- name - As written in config files. -- value - Value associated with a name -- key - Name combined with it's section (section.name) -- variant - A single word describing where the configuration key-value pair came from -""" - -import locale -import logging -import os - -from pip._vendor import six -from pip._vendor.six.moves import configparser - -from pip._internal.exceptions import ( - ConfigurationError, ConfigurationFileCouldNotBeLoaded, -) -from pip._internal.locations import ( - legacy_config_file, new_config_file, running_under_virtualenv, - site_config_files, venv_config_file, -) -from pip._internal.utils.misc import ensure_dir, enum -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Any, Dict, Iterable, List, NewType, Optional, Tuple - ) - - RawConfigParser = configparser.RawConfigParser # Shorthand - Kind = NewType("Kind", str) - -logger = logging.getLogger(__name__) - - -# NOTE: Maybe use the optionx attribute to normalize keynames. -def _normalize_name(name): - # type: (str) -> str - """Make a name consistent regardless of source (environment or file) - """ - name = name.lower().replace('_', '-') - if name.startswith('--'): - name = name[2:] # only prefer long opts - return name - - -def _disassemble_key(name): - # type: (str) -> List[str] - return name.split(".", 1) - - -# The kinds of configurations there are. -kinds = enum( - USER="user", # User Specific - GLOBAL="global", # System Wide - VENV="venv", # Virtual Environment Specific - ENV="env", # from PIP_CONFIG_FILE - ENV_VAR="env-var", # from Environment Variables -) - - -class Configuration(object): - """Handles management of configuration. - - Provides an interface to accessing and managing configuration files. - - This class converts provides an API that takes "section.key-name" style - keys and stores the value associated with it as "key-name" under the - section "section". - - This allows for a clean interface wherein the both the section and the - key-name are preserved in an easy to manage form in the configuration files - and the data stored is also nice. - """ - - def __init__(self, isolated, load_only=None): - # type: (bool, Kind) -> None - super(Configuration, self).__init__() - - _valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.VENV, None] - if load_only not in _valid_load_only: - raise ConfigurationError( - "Got invalid value for load_only - should be one of {}".format( - ", ".join(map(repr, _valid_load_only[:-1])) - ) - ) - self.isolated = isolated # type: bool - self.load_only = load_only # type: Optional[Kind] - - # The order here determines the override order. - self._override_order = [ - kinds.GLOBAL, kinds.USER, kinds.VENV, kinds.ENV, kinds.ENV_VAR - ] - - self._ignore_env_names = ["version", "help"] - - # Because we keep track of where we got the data from - self._parsers = { - variant: [] for variant in self._override_order - } # type: Dict[Kind, List[Tuple[str, RawConfigParser]]] - self._config = { - variant: {} for variant in self._override_order - } # type: Dict[Kind, Dict[str, Any]] - self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]] - - def load(self): - # type: () -> None - """Loads configuration from configuration files and environment - """ - self._load_config_files() - if not self.isolated: - self._load_environment_vars() - - def get_file_to_edit(self): - # type: () -> Optional[str] - """Returns the file with highest priority in configuration - """ - assert self.load_only is not None, \ - "Need to be specified a file to be editing" - - try: - return self._get_parser_to_modify()[0] - except IndexError: - return None - - def items(self): - # type: () -> Iterable[Tuple[str, Any]] - """Returns key-value pairs like dict.items() representing the loaded - configuration - """ - return self._dictionary.items() - - def get_value(self, key): - # type: (str) -> Any - """Get a value from the configuration. - """ - try: - return self._dictionary[key] - except KeyError: - raise ConfigurationError("No such key - {}".format(key)) - - def set_value(self, key, value): - # type: (str, Any) -> None - """Modify a value in the configuration. - """ - self._ensure_have_load_only() - - fname, parser = self._get_parser_to_modify() - - if parser is not None: - section, name = _disassemble_key(key) - - # Modify the parser and the configuration - if not parser.has_section(section): - parser.add_section(section) - parser.set(section, name, value) - - self._config[self.load_only][key] = value - self._mark_as_modified(fname, parser) - - def unset_value(self, key): - # type: (str) -> None - """Unset a value in the configuration. - """ - self._ensure_have_load_only() - - if key not in self._config[self.load_only]: - raise ConfigurationError("No such key - {}".format(key)) - - fname, parser = self._get_parser_to_modify() - - if parser is not None: - section, name = _disassemble_key(key) - - # Remove the key in the parser - modified_something = False - if parser.has_section(section): - # Returns whether the option was removed or not - modified_something = parser.remove_option(section, name) - - if modified_something: - # name removed from parser, section may now be empty - section_iter = iter(parser.items(section)) - try: - val = six.next(section_iter) - except StopIteration: - val = None - - if val is None: - parser.remove_section(section) - - self._mark_as_modified(fname, parser) - else: - raise ConfigurationError( - "Fatal Internal error [id=1]. Please report as a bug." - ) - - del self._config[self.load_only][key] - - def save(self): - # type: () -> None - """Save the currentin-memory state. - """ - self._ensure_have_load_only() - - for fname, parser in self._modified_parsers: - logger.info("Writing to %s", fname) - - # Ensure directory exists. - ensure_dir(os.path.dirname(fname)) - - with open(fname, "w") as f: - parser.write(f) # type: ignore - - # - # Private routines - # - - def _ensure_have_load_only(self): - # type: () -> None - if self.load_only is None: - raise ConfigurationError("Needed a specific file to be modifying.") - logger.debug("Will be working with %s variant only", self.load_only) - - @property - def _dictionary(self): - # type: () -> Dict[str, Any] - """A dictionary representing the loaded configuration. - """ - # NOTE: Dictionaries are not populated if not loaded. So, conditionals - # are not needed here. - retval = {} - - for variant in self._override_order: - retval.update(self._config[variant]) - - return retval - - def _load_config_files(self): - # type: () -> None - """Loads configuration from configuration files - """ - config_files = dict(self._iter_config_files()) - if config_files[kinds.ENV][0:1] == [os.devnull]: - logger.debug( - "Skipping loading configuration files due to " - "environment's PIP_CONFIG_FILE being os.devnull" - ) - return - - for variant, files in config_files.items(): - for fname in files: - # If there's specific variant set in `load_only`, load only - # that variant, not the others. - if self.load_only is not None and variant != self.load_only: - logger.debug( - "Skipping file '%s' (variant: %s)", fname, variant - ) - continue - - parser = self._load_file(variant, fname) - - # Keeping track of the parsers used - self._parsers[variant].append((fname, parser)) - - def _load_file(self, variant, fname): - # type: (Kind, str) -> RawConfigParser - logger.debug("For variant '%s', will try loading '%s'", variant, fname) - parser = self._construct_parser(fname) - - for section in parser.sections(): - items = parser.items(section) - self._config[variant].update(self._normalized_keys(section, items)) - - return parser - - def _construct_parser(self, fname): - # type: (str) -> RawConfigParser - parser = configparser.RawConfigParser() - # If there is no such file, don't bother reading it but create the - # parser anyway, to hold the data. - # Doing this is useful when modifying and saving files, where we don't - # need to construct a parser. - if os.path.exists(fname): - try: - parser.read(fname) - except UnicodeDecodeError: - # See https://github.com/pypa/pip/issues/4963 - raise ConfigurationFileCouldNotBeLoaded( - reason="contains invalid {} characters".format( - locale.getpreferredencoding(False) - ), - fname=fname, - ) - except configparser.Error as error: - # See https://github.com/pypa/pip/issues/4893 - raise ConfigurationFileCouldNotBeLoaded(error=error) - return parser - - def _load_environment_vars(self): - # type: () -> None - """Loads configuration from environment variables - """ - self._config[kinds.ENV_VAR].update( - self._normalized_keys(":env:", self._get_environ_vars()) - ) - - def _normalized_keys(self, section, items): - # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any] - """Normalizes items to construct a dictionary with normalized keys. - - This routine is where the names become keys and are made the same - regardless of source - configuration files or environment. - """ - normalized = {} - for name, val in items: - key = section + "." + _normalize_name(name) - normalized[key] = val - return normalized - - def _get_environ_vars(self): - # type: () -> Iterable[Tuple[str, str]] - """Returns a generator with all environmental vars with prefix PIP_""" - for key, val in os.environ.items(): - should_be_yielded = ( - key.startswith("PIP_") and - key[4:].lower() not in self._ignore_env_names - ) - if should_be_yielded: - yield key[4:].lower(), val - - # XXX: This is patched in the tests. - def _iter_config_files(self): - # type: () -> Iterable[Tuple[Kind, List[str]]] - """Yields variant and configuration files associated with it. - - This should be treated like items of a dictionary. - """ - # SMELL: Move the conditions out of this function - - # environment variables have the lowest priority - config_file = os.environ.get('PIP_CONFIG_FILE', None) - if config_file is not None: - yield kinds.ENV, [config_file] - else: - yield kinds.ENV, [] - - # at the base we have any global configuration - yield kinds.GLOBAL, list(site_config_files) - - # per-user configuration next - should_load_user_config = not self.isolated and not ( - config_file and os.path.exists(config_file) - ) - if should_load_user_config: - # The legacy config file is overridden by the new config file - yield kinds.USER, [legacy_config_file, new_config_file] - - # finally virtualenv configuration first trumping others - if running_under_virtualenv(): - yield kinds.VENV, [venv_config_file] - - def _get_parser_to_modify(self): - # type: () -> Tuple[str, RawConfigParser] - # Determine which parser to modify - parsers = self._parsers[self.load_only] - if not parsers: - # This should not happen if everything works correctly. - raise ConfigurationError( - "Fatal Internal error [id=2]. Please report as a bug." - ) - - # Use the highest priority parser. - return parsers[-1] - - # XXX: This is patched in the tests. - def _mark_as_modified(self, fname, parser): - # type: (str, RawConfigParser) -> None - file_parser_tuple = (fname, parser) - if file_parser_tuple not in self._modified_parsers: - self._modified_parsers.append(file_parser_tuple) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/download.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/download.py deleted file mode 100644 index 2bbe176..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/download.py +++ /dev/null @@ -1,971 +0,0 @@ -from __future__ import absolute_import - -import cgi -import email.utils -import getpass -import json -import logging -import mimetypes -import os -import platform -import re -import shutil -import sys - -from pip._vendor import requests, six, urllib3 -from pip._vendor.cachecontrol import CacheControlAdapter -from pip._vendor.cachecontrol.caches import FileCache -from pip._vendor.lockfile import LockError -from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter -from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth -from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response -from pip._vendor.requests.structures import CaseInsensitiveDict -from pip._vendor.requests.utils import get_netrc_auth -# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is -# why we ignore the type on this import -from pip._vendor.six.moves import xmlrpc_client # type: ignore -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request -from pip._vendor.urllib3.util import IS_PYOPENSSL - -import pip -from pip._internal.exceptions import HashMismatch, InstallationError -from pip._internal.locations import write_delete_marker_file -from pip._internal.models.index import PyPI -from pip._internal.utils.encoding import auto_decode -from pip._internal.utils.filesystem import check_path_owner -from pip._internal.utils.glibc import libc_ver -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ( - ARCHIVE_EXTENSIONS, ask_path_exists, backup_dir, call_subprocess, consume, - display_path, format_size, get_installed_version, rmtree, - split_auth_from_netloc, splitext, unpack_file, -) -from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.utils.ui import DownloadProgressProvider -from pip._internal.vcs import vcs - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Tuple, Dict, IO, Text, Union - ) - from pip._internal.models.link import Link # noqa: F401 - from pip._internal.utils.hashes import Hashes # noqa: F401 - from pip._internal.vcs import AuthInfo # noqa: F401 - -try: - import ssl # noqa -except ImportError: - ssl = None - -HAS_TLS = (ssl is not None) or IS_PYOPENSSL - -__all__ = ['get_file_content', - 'is_url', 'url_to_path', 'path_to_url', - 'is_archive_file', 'unpack_vcs_link', - 'unpack_file_url', 'is_vcs_url', 'is_file_url', - 'unpack_http_url', 'unpack_url'] - - -logger = logging.getLogger(__name__) - - -def user_agent(): - """ - Return a string representing the user agent. - """ - data = { - "installer": {"name": "pip", "version": pip.__version__}, - "python": platform.python_version(), - "implementation": { - "name": platform.python_implementation(), - }, - } - - if data["implementation"]["name"] == 'CPython': - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'PyPy': - if sys.pypy_version_info.releaselevel == 'final': - pypy_version_info = sys.pypy_version_info[:3] - else: - pypy_version_info = sys.pypy_version_info - data["implementation"]["version"] = ".".join( - [str(x) for x in pypy_version_info] - ) - elif data["implementation"]["name"] == 'Jython': - # Complete Guess - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'IronPython': - # Complete Guess - data["implementation"]["version"] = platform.python_version() - - if sys.platform.startswith("linux"): - from pip._vendor import distro - distro_infos = dict(filter( - lambda x: x[1], - zip(["name", "version", "id"], distro.linux_distribution()), - )) - libc = dict(filter( - lambda x: x[1], - zip(["lib", "version"], libc_ver()), - )) - if libc: - distro_infos["libc"] = libc - if distro_infos: - data["distro"] = distro_infos - - if sys.platform.startswith("darwin") and platform.mac_ver()[0]: - data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} - - if platform.system(): - data.setdefault("system", {})["name"] = platform.system() - - if platform.release(): - data.setdefault("system", {})["release"] = platform.release() - - if platform.machine(): - data["cpu"] = platform.machine() - - if HAS_TLS: - data["openssl_version"] = ssl.OPENSSL_VERSION - - setuptools_version = get_installed_version("setuptools") - if setuptools_version is not None: - data["setuptools_version"] = setuptools_version - - return "{data[installer][name]}/{data[installer][version]} {json}".format( - data=data, - json=json.dumps(data, separators=(",", ":"), sort_keys=True), - ) - - -class MultiDomainBasicAuth(AuthBase): - - def __init__(self, prompting=True): - # type: (bool) -> None - self.prompting = prompting - self.passwords = {} # type: Dict[str, AuthInfo] - - def __call__(self, req): - parsed = urllib_parse.urlparse(req.url) - - # Split the credentials from the netloc. - netloc, url_user_password = split_auth_from_netloc(parsed.netloc) - - # Set the url of the request to the url without any credentials - req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:]) - - # Use any stored credentials that we have for this netloc - username, password = self.passwords.get(netloc, (None, None)) - - # Use the credentials embedded in the url if we have none stored - if username is None: - username, password = url_user_password - - # Get creds from netrc if we still don't have them - if username is None and password is None: - netrc_auth = get_netrc_auth(req.url) - username, password = netrc_auth if netrc_auth else (None, None) - - if username or password: - # Store the username and password - self.passwords[netloc] = (username, password) - - # Send the basic auth with this request - req = HTTPBasicAuth(username or "", password or "")(req) - - # Attach a hook to handle 401 responses - req.register_hook("response", self.handle_401) - - return req - - def handle_401(self, resp, **kwargs): - # We only care about 401 responses, anything else we want to just - # pass through the actual response - if resp.status_code != 401: - return resp - - # We are not able to prompt the user so simply return the response - if not self.prompting: - return resp - - parsed = urllib_parse.urlparse(resp.url) - - # Prompt the user for a new username and password - username = six.moves.input("User for %s: " % parsed.netloc) - password = getpass.getpass("Password: ") - - # Store the new username and password to use for future requests - if username or password: - self.passwords[parsed.netloc] = (username, password) - - # Consume content and release the original connection to allow our new - # request to reuse the same one. - resp.content - resp.raw.release_conn() - - # Add our new username and password to the request - req = HTTPBasicAuth(username or "", password or "")(resp.request) - req.register_hook("response", self.warn_on_401) - - # Send our new request - new_resp = resp.connection.send(req, **kwargs) - new_resp.history.append(resp) - - return new_resp - - def warn_on_401(self, resp, **kwargs): - # warn user that they provided incorrect credentials - if resp.status_code == 401: - logger.warning('401 Error, Credentials not correct for %s', - resp.request.url) - - -class LocalFSAdapter(BaseAdapter): - - def send(self, request, stream=None, timeout=None, verify=None, cert=None, - proxies=None): - pathname = url_to_path(request.url) - - resp = Response() - resp.status_code = 200 - resp.url = request.url - - try: - stats = os.stat(pathname) - except OSError as exc: - resp.status_code = 404 - resp.raw = exc - else: - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - content_type = mimetypes.guess_type(pathname)[0] or "text/plain" - resp.headers = CaseInsensitiveDict({ - "Content-Type": content_type, - "Content-Length": stats.st_size, - "Last-Modified": modified, - }) - - resp.raw = open(pathname, "rb") - resp.close = resp.raw.close - - return resp - - def close(self): - pass - - -class SafeFileCache(FileCache): - """ - A file based cache which is safe to use even when the target directory may - not be accessible or writable. - """ - - def __init__(self, *args, **kwargs): - super(SafeFileCache, self).__init__(*args, **kwargs) - - # Check to ensure that the directory containing our cache directory - # is owned by the user current executing pip. If it does not exist - # we will check the parent directory until we find one that does exist. - # If it is not owned by the user executing pip then we will disable - # the cache and log a warning. - if not check_path_owner(self.directory): - logger.warning( - "The directory '%s' or its parent directory is not owned by " - "the current user and the cache has been disabled. Please " - "check the permissions and owner of that directory. If " - "executing pip with sudo, you may want sudo's -H flag.", - self.directory, - ) - - # Set our directory to None to disable the Cache - self.directory = None - - def get(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).get(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - def set(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).set(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - def delete(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).delete(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - -class InsecureHTTPAdapter(HTTPAdapter): - - def cert_verify(self, conn, url, verify, cert): - conn.cert_reqs = 'CERT_NONE' - conn.ca_certs = None - - -class PipSession(requests.Session): - - timeout = None # type: Optional[int] - - def __init__(self, *args, **kwargs): - retries = kwargs.pop("retries", 0) - cache = kwargs.pop("cache", None) - insecure_hosts = kwargs.pop("insecure_hosts", []) - - super(PipSession, self).__init__(*args, **kwargs) - - # Attach our User Agent to the request - self.headers["User-Agent"] = user_agent() - - # Attach our Authentication handler to the session - self.auth = MultiDomainBasicAuth() - - # Create our urllib3.Retry instance which will allow us to customize - # how we handle retries. - retries = urllib3.Retry( - # Set the total number of retries that a particular request can - # have. - total=retries, - - # A 503 error from PyPI typically means that the Fastly -> Origin - # connection got interrupted in some way. A 503 error in general - # is typically considered a transient error so we'll go ahead and - # retry it. - # A 500 may indicate transient error in Amazon S3 - # A 520 or 527 - may indicate transient error in CloudFlare - status_forcelist=[500, 503, 520, 527], - - # Add a small amount of back off between failed requests in - # order to prevent hammering the service. - backoff_factor=0.25, - ) - - # We want to _only_ cache responses on securely fetched origins. We do - # this because we can't validate the response of an insecurely fetched - # origin, and we don't want someone to be able to poison the cache and - # require manual eviction from the cache to fix it. - if cache: - secure_adapter = CacheControlAdapter( - cache=SafeFileCache(cache, use_dir_lock=True), - max_retries=retries, - ) - else: - secure_adapter = HTTPAdapter(max_retries=retries) - - # Our Insecure HTTPAdapter disables HTTPS validation. It does not - # support caching (see above) so we'll use it for all http:// URLs as - # well as any https:// host that we've marked as ignoring TLS errors - # for. - insecure_adapter = InsecureHTTPAdapter(max_retries=retries) - - self.mount("https://", secure_adapter) - self.mount("http://", insecure_adapter) - - # Enable file:// urls - self.mount("file://", LocalFSAdapter()) - - # We want to use a non-validating adapter for any requests which are - # deemed insecure. - for host in insecure_hosts: - self.mount("https://{}/".format(host), insecure_adapter) - - def request(self, method, url, *args, **kwargs): - # Allow setting a default timeout on a session - kwargs.setdefault("timeout", self.timeout) - - # Dispatch the actual request - return super(PipSession, self).request(method, url, *args, **kwargs) - - -def get_file_content(url, comes_from=None, session=None): - # type: (str, Optional[str], Optional[PipSession]) -> Tuple[str, Text] - """Gets the content of a file; it may be a filename, file: URL, or - http: URL. Returns (location, content). Content is unicode. - - :param url: File path or url. - :param comes_from: Origin description of requirements. - :param session: Instance of pip.download.PipSession. - """ - if session is None: - raise TypeError( - "get_file_content() missing 1 required keyword argument: 'session'" - ) - - match = _scheme_re.search(url) - if match: - scheme = match.group(1).lower() - if (scheme == 'file' and comes_from and - comes_from.startswith('http')): - raise InstallationError( - 'Requirements file %s references URL %s, which is local' - % (comes_from, url)) - if scheme == 'file': - path = url.split(':', 1)[1] - path = path.replace('\\', '/') - match = _url_slash_drive_re.match(path) - if match: - path = match.group(1) + ':' + path.split('|', 1)[1] - path = urllib_parse.unquote(path) - if path.startswith('/'): - path = '/' + path.lstrip('/') - url = path - else: - # FIXME: catch some errors - resp = session.get(url) - resp.raise_for_status() - return resp.url, resp.text - try: - with open(url, 'rb') as f: - content = auto_decode(f.read()) - except IOError as exc: - raise InstallationError( - 'Could not open requirements file: %s' % str(exc) - ) - return url, content - - -_scheme_re = re.compile(r'^(http|https|file):', re.I) -_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I) - - -def is_url(name): - # type: (Union[str, Text]) -> bool - """Returns true if the name looks like a URL""" - if ':' not in name: - return False - scheme = name.split(':', 1)[0].lower() - return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes - - -def url_to_path(url): - # type: (str) -> str - """ - Convert a file: URL to a path. - """ - assert url.startswith('file:'), ( - "You can only turn file: urls into filenames (not %r)" % url) - - _, netloc, path, _, _ = urllib_parse.urlsplit(url) - - # if we have a UNC path, prepend UNC share notation - if netloc: - netloc = '\\\\' + netloc - - path = urllib_request.url2pathname(netloc + path) - return path - - -def path_to_url(path): - # type: (Union[str, Text]) -> str - """ - Convert a path to a file: URL. The path will be made absolute and have - quoted path parts. - """ - path = os.path.normpath(os.path.abspath(path)) - url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path)) - return url - - -def is_archive_file(name): - # type: (str) -> bool - """Return True if `name` is a considered as an archive file.""" - ext = splitext(name)[1].lower() - if ext in ARCHIVE_EXTENSIONS: - return True - return False - - -def unpack_vcs_link(link, location): - vcs_backend = _get_used_vcs_backend(link) - vcs_backend.unpack(location) - - -def _get_used_vcs_backend(link): - for backend in vcs.backends: - if link.scheme in backend.schemes: - vcs_backend = backend(link.url) - return vcs_backend - - -def is_vcs_url(link): - # type: (Link) -> bool - return bool(_get_used_vcs_backend(link)) - - -def is_file_url(link): - # type: (Link) -> bool - return link.url.lower().startswith('file:') - - -def is_dir_url(link): - # type: (Link) -> bool - """Return whether a file:// Link points to a directory. - - ``link`` must not have any other scheme but file://. Call is_file_url() - first. - - """ - link_path = url_to_path(link.url_without_fragment) - return os.path.isdir(link_path) - - -def _progress_indicator(iterable, *args, **kwargs): - return iterable - - -def _download_url( - resp, # type: Response - link, # type: Link - content_file, # type: IO - hashes, # type: Hashes - progress_bar # type: str -): - # type: (...) -> None - try: - total_length = int(resp.headers['content-length']) - except (ValueError, KeyError, TypeError): - total_length = 0 - - cached_resp = getattr(resp, "from_cache", False) - if logger.getEffectiveLevel() > logging.INFO: - show_progress = False - elif cached_resp: - show_progress = False - elif total_length > (40 * 1000): - show_progress = True - elif not total_length: - show_progress = True - else: - show_progress = False - - show_url = link.show_url - - def resp_read(chunk_size): - try: - # Special case for urllib3. - for chunk in resp.raw.stream( - chunk_size, - # We use decode_content=False here because we don't - # want urllib3 to mess with the raw bytes we get - # from the server. If we decompress inside of - # urllib3 then we cannot verify the checksum - # because the checksum will be of the compressed - # file. This breakage will only occur if the - # server adds a Content-Encoding header, which - # depends on how the server was configured: - # - Some servers will notice that the file isn't a - # compressible file and will leave the file alone - # and with an empty Content-Encoding - # - Some servers will notice that the file is - # already compressed and will leave the file - # alone and will add a Content-Encoding: gzip - # header - # - Some servers won't notice anything at all and - # will take a file that's already been compressed - # and compress it again and set the - # Content-Encoding: gzip header - # - # By setting this not to decode automatically we - # hope to eliminate problems with the second case. - decode_content=False): - yield chunk - except AttributeError: - # Standard file-like object. - while True: - chunk = resp.raw.read(chunk_size) - if not chunk: - break - yield chunk - - def written_chunks(chunks): - for chunk in chunks: - content_file.write(chunk) - yield chunk - - progress_indicator = _progress_indicator - - if link.netloc == PyPI.netloc: - url = show_url - else: - url = link.url_without_fragment - - if show_progress: # We don't show progress on cached responses - progress_indicator = DownloadProgressProvider(progress_bar, - max=total_length) - if total_length: - logger.info("Downloading %s (%s)", url, format_size(total_length)) - else: - logger.info("Downloading %s", url) - elif cached_resp: - logger.info("Using cached %s", url) - else: - logger.info("Downloading %s", url) - - logger.debug('Downloading from URL %s', link) - - downloaded_chunks = written_chunks( - progress_indicator( - resp_read(CONTENT_CHUNK_SIZE), - CONTENT_CHUNK_SIZE - ) - ) - if hashes: - hashes.check_against_chunks(downloaded_chunks) - else: - consume(downloaded_chunks) - - -def _copy_file(filename, location, link): - copy = True - download_location = os.path.join(location, link.filename) - if os.path.exists(download_location): - response = ask_path_exists( - 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' % - display_path(download_location), ('i', 'w', 'b', 'a')) - if response == 'i': - copy = False - elif response == 'w': - logger.warning('Deleting %s', display_path(download_location)) - os.remove(download_location) - elif response == 'b': - dest_file = backup_dir(download_location) - logger.warning( - 'Backing up %s to %s', - display_path(download_location), - display_path(dest_file), - ) - shutil.move(download_location, dest_file) - elif response == 'a': - sys.exit(-1) - if copy: - shutil.copy(filename, download_location) - logger.info('Saved %s', display_path(download_location)) - - -def unpack_http_url( - link, # type: Link - location, # type: str - download_dir=None, # type: Optional[str] - session=None, # type: Optional[PipSession] - hashes=None, # type: Optional[Hashes] - progress_bar="on" # type: str -): - # type: (...) -> None - if session is None: - raise TypeError( - "unpack_http_url() missing 1 required keyword argument: 'session'" - ) - - with TempDirectory(kind="unpack") as temp_dir: - # If a download dir is specified, is the file already downloaded there? - already_downloaded_path = None - if download_dir: - already_downloaded_path = _check_download_dir(link, - download_dir, - hashes) - - if already_downloaded_path: - from_path = already_downloaded_path - content_type = mimetypes.guess_type(from_path)[0] - else: - # let's download to a tmp dir - from_path, content_type = _download_http_url(link, - session, - temp_dir.path, - hashes, - progress_bar) - - # unpack the archive to the build dir location. even when only - # downloading archives, they have to be unpacked to parse dependencies - unpack_file(from_path, location, content_type, link) - - # a download dir is specified; let's copy the archive there - if download_dir and not already_downloaded_path: - _copy_file(from_path, download_dir, link) - - if not already_downloaded_path: - os.unlink(from_path) - - -def unpack_file_url( - link, # type: Link - location, # type: str - download_dir=None, # type: Optional[str] - hashes=None # type: Optional[Hashes] -): - # type: (...) -> None - """Unpack link into location. - - If download_dir is provided and link points to a file, make a copy - of the link file inside download_dir. - """ - link_path = url_to_path(link.url_without_fragment) - - # If it's a url to a local directory - if is_dir_url(link): - if os.path.isdir(location): - rmtree(location) - shutil.copytree(link_path, location, symlinks=True) - if download_dir: - logger.info('Link is a directory, ignoring download_dir') - return - - # If --require-hashes is off, `hashes` is either empty, the - # link's embedded hash, or MissingHashes; it is required to - # match. If --require-hashes is on, we are satisfied by any - # hash in `hashes` matching: a URL-based or an option-based - # one; no internet-sourced hash will be in `hashes`. - if hashes: - hashes.check_against_path(link_path) - - # If a download dir is specified, is the file already there and valid? - already_downloaded_path = None - if download_dir: - already_downloaded_path = _check_download_dir(link, - download_dir, - hashes) - - if already_downloaded_path: - from_path = already_downloaded_path - else: - from_path = link_path - - content_type = mimetypes.guess_type(from_path)[0] - - # unpack the archive to the build dir location. even when only downloading - # archives, they have to be unpacked to parse dependencies - unpack_file(from_path, location, content_type, link) - - # a download dir is specified and not already downloaded - if download_dir and not already_downloaded_path: - _copy_file(from_path, download_dir, link) - - -def _copy_dist_from_dir(link_path, location): - """Copy distribution files in `link_path` to `location`. - - Invoked when user requests to install a local directory. E.g.: - - pip install . - pip install ~/dev/git-repos/python-prompt-toolkit - - """ - - # Note: This is currently VERY SLOW if you have a lot of data in the - # directory, because it copies everything with `shutil.copytree`. - # What it should really do is build an sdist and install that. - # See https://github.com/pypa/pip/issues/2195 - - if os.path.isdir(location): - rmtree(location) - - # build an sdist - setup_py = 'setup.py' - sdist_args = [sys.executable] - sdist_args.append('-c') - sdist_args.append(SETUPTOOLS_SHIM % setup_py) - sdist_args.append('sdist') - sdist_args += ['--dist-dir', location] - logger.info('Running setup.py sdist for %s', link_path) - - with indent_log(): - call_subprocess(sdist_args, cwd=link_path, show_stdout=False) - - # unpack sdist into `location` - sdist = os.path.join(location, os.listdir(location)[0]) - logger.info('Unpacking sdist %s into %s', sdist, location) - unpack_file(sdist, location, content_type=None, link=None) - - -class PipXmlrpcTransport(xmlrpc_client.Transport): - """Provide a `xmlrpclib.Transport` implementation via a `PipSession` - object. - """ - - def __init__(self, index_url, session, use_datetime=False): - xmlrpc_client.Transport.__init__(self, use_datetime) - index_parts = urllib_parse.urlparse(index_url) - self._scheme = index_parts.scheme - self._session = session - - def request(self, host, handler, request_body, verbose=False): - parts = (self._scheme, host, handler, None, None, None) - url = urllib_parse.urlunparse(parts) - try: - headers = {'Content-Type': 'text/xml'} - response = self._session.post(url, data=request_body, - headers=headers, stream=True) - response.raise_for_status() - self.verbose = verbose - return self.parse_response(response.raw) - except requests.HTTPError as exc: - logger.critical( - "HTTP error %s while getting %s", - exc.response.status_code, url, - ) - raise - - -def unpack_url( - link, # type: Optional[Link] - location, # type: Optional[str] - download_dir=None, # type: Optional[str] - only_download=False, # type: bool - session=None, # type: Optional[PipSession] - hashes=None, # type: Optional[Hashes] - progress_bar="on" # type: str -): - # type: (...) -> None - """Unpack link. - If link is a VCS link: - if only_download, export into download_dir and ignore location - else unpack into location - for other types of link: - - unpack into location - - if download_dir, copy the file into download_dir - - if only_download, mark location for deletion - - :param hashes: A Hashes object, one of whose embedded hashes must match, - or HashMismatch will be raised. If the Hashes is empty, no matches are - required, and unhashable types of requirements (like VCS ones, which - would ordinarily raise HashUnsupported) are allowed. - """ - # non-editable vcs urls - if is_vcs_url(link): - unpack_vcs_link(link, location) - - # file urls - elif is_file_url(link): - unpack_file_url(link, location, download_dir, hashes=hashes) - - # http urls - else: - if session is None: - session = PipSession() - - unpack_http_url( - link, - location, - download_dir, - session, - hashes=hashes, - progress_bar=progress_bar - ) - if only_download: - write_delete_marker_file(location) - - -def _download_http_url( - link, # type: Link - session, # type: PipSession - temp_dir, # type: str - hashes, # type: Hashes - progress_bar # type: str -): - # type: (...) -> Tuple[str, str] - """Download link url into temp_dir using provided session""" - target_url = link.url.split('#', 1)[0] - try: - resp = session.get( - target_url, - # We use Accept-Encoding: identity here because requests - # defaults to accepting compressed responses. This breaks in - # a variety of ways depending on how the server is configured. - # - Some servers will notice that the file isn't a compressible - # file and will leave the file alone and with an empty - # Content-Encoding - # - Some servers will notice that the file is already - # compressed and will leave the file alone and will add a - # Content-Encoding: gzip header - # - Some servers won't notice anything at all and will take - # a file that's already been compressed and compress it again - # and set the Content-Encoding: gzip header - # By setting this to request only the identity encoding We're - # hoping to eliminate the third case. Hopefully there does not - # exist a server which when given a file will notice it is - # already compressed and that you're not asking for a - # compressed file and will then decompress it before sending - # because if that's the case I don't think it'll ever be - # possible to make this work. - headers={"Accept-Encoding": "identity"}, - stream=True, - ) - resp.raise_for_status() - except requests.HTTPError as exc: - logger.critical( - "HTTP error %s while getting %s", exc.response.status_code, link, - ) - raise - - content_type = resp.headers.get('content-type', '') - filename = link.filename # fallback - # Have a look at the Content-Disposition header for a better guess - content_disposition = resp.headers.get('content-disposition') - if content_disposition: - type, params = cgi.parse_header(content_disposition) - # We use ``or`` here because we don't want to use an "empty" value - # from the filename param. - filename = params.get('filename') or filename - ext = splitext(filename)[1] - if not ext: - ext = mimetypes.guess_extension(content_type) - if ext: - filename += ext - if not ext and link.url != resp.url: - ext = os.path.splitext(resp.url)[1] - if ext: - filename += ext - file_path = os.path.join(temp_dir, filename) - with open(file_path, 'wb') as content_file: - _download_url(resp, link, content_file, hashes, progress_bar) - return file_path, content_type - - -def _check_download_dir(link, download_dir, hashes): - # type: (Link, str, Hashes) -> Optional[str] - """ Check download_dir for previously downloaded file with correct hash - If a correct file is found return its path else None - """ - download_path = os.path.join(download_dir, link.filename) - if os.path.exists(download_path): - # If already downloaded, does its hash match? - logger.info('File was already downloaded %s', download_path) - if hashes: - try: - hashes.check_against_path(download_path) - except HashMismatch: - logger.warning( - 'Previously-downloaded file %s has bad hash. ' - 'Re-downloading.', - download_path - ) - os.unlink(download_path) - return None - return download_path - return None diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/exceptions.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/exceptions.py deleted file mode 100644 index 38ceeea..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/exceptions.py +++ /dev/null @@ -1,274 +0,0 @@ -"""Exceptions used throughout package""" -from __future__ import absolute_import - -from itertools import chain, groupby, repeat - -from pip._vendor.six import iteritems - -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Optional # noqa: F401 - from pip._internal.req.req_install import InstallRequirement # noqa: F401 - - -class PipError(Exception): - """Base pip exception""" - - -class ConfigurationError(PipError): - """General exception in configuration""" - - -class InstallationError(PipError): - """General exception during installation""" - - -class UninstallationError(PipError): - """General exception during uninstallation""" - - -class DistributionNotFound(InstallationError): - """Raised when a distribution cannot be found to satisfy a requirement""" - - -class RequirementsFileParseError(InstallationError): - """Raised when a general error occurs parsing a requirements file line.""" - - -class BestVersionAlreadyInstalled(PipError): - """Raised when the most up-to-date version of a package is already - installed.""" - - -class BadCommand(PipError): - """Raised when virtualenv or a command is not found""" - - -class CommandError(PipError): - """Raised when there is an error in command-line arguments""" - - -class PreviousBuildDirError(PipError): - """Raised when there's a previous conflicting build directory""" - - -class InvalidWheelFilename(InstallationError): - """Invalid wheel filename.""" - - -class UnsupportedWheel(InstallationError): - """Unsupported wheel.""" - - -class HashErrors(InstallationError): - """Multiple HashError instances rolled into one for reporting""" - - def __init__(self): - self.errors = [] - - def append(self, error): - self.errors.append(error) - - def __str__(self): - lines = [] - self.errors.sort(key=lambda e: e.order) - for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__): - lines.append(cls.head) - lines.extend(e.body() for e in errors_of_cls) - if lines: - return '\n'.join(lines) - - def __nonzero__(self): - return bool(self.errors) - - def __bool__(self): - return self.__nonzero__() - - -class HashError(InstallationError): - """ - A failure to verify a package against known-good hashes - - :cvar order: An int sorting hash exception classes by difficulty of - recovery (lower being harder), so the user doesn't bother fretting - about unpinned packages when he has deeper issues, like VCS - dependencies, to deal with. Also keeps error reports in a - deterministic order. - :cvar head: A section heading for display above potentially many - exceptions of this kind - :ivar req: The InstallRequirement that triggered this error. This is - pasted on after the exception is instantiated, because it's not - typically available earlier. - - """ - req = None # type: Optional[InstallRequirement] - head = '' - - def body(self): - """Return a summary of me for display under the heading. - - This default implementation simply prints a description of the - triggering requirement. - - :param req: The InstallRequirement that provoked this error, with - populate_link() having already been called - - """ - return ' %s' % self._requirement_name() - - def __str__(self): - return '%s\n%s' % (self.head, self.body()) - - def _requirement_name(self): - """Return a description of the requirement that triggered me. - - This default implementation returns long description of the req, with - line numbers - - """ - return str(self.req) if self.req else 'unknown package' - - -class VcsHashUnsupported(HashError): - """A hash was provided for a version-control-system-based requirement, but - we don't have a method for hashing those.""" - - order = 0 - head = ("Can't verify hashes for these requirements because we don't " - "have a way to hash version control repositories:") - - -class DirectoryUrlHashUnsupported(HashError): - """A hash was provided for a version-control-system-based requirement, but - we don't have a method for hashing those.""" - - order = 1 - head = ("Can't verify hashes for these file:// requirements because they " - "point to directories:") - - -class HashMissing(HashError): - """A hash was needed for a requirement but is absent.""" - - order = 2 - head = ('Hashes are required in --require-hashes mode, but they are ' - 'missing from some requirements. Here is a list of those ' - 'requirements along with the hashes their downloaded archives ' - 'actually had. Add lines like these to your requirements files to ' - 'prevent tampering. (If you did not enable --require-hashes ' - 'manually, note that it turns on automatically when any package ' - 'has a hash.)') - - def __init__(self, gotten_hash): - """ - :param gotten_hash: The hash of the (possibly malicious) archive we - just downloaded - """ - self.gotten_hash = gotten_hash - - def body(self): - # Dodge circular import. - from pip._internal.utils.hashes import FAVORITE_HASH - - package = None - if self.req: - # In the case of URL-based requirements, display the original URL - # seen in the requirements file rather than the package name, - # so the output can be directly copied into the requirements file. - package = (self.req.original_link if self.req.original_link - # In case someone feeds something downright stupid - # to InstallRequirement's constructor. - else getattr(self.req, 'req', None)) - return ' %s --hash=%s:%s' % (package or 'unknown package', - FAVORITE_HASH, - self.gotten_hash) - - -class HashUnpinned(HashError): - """A requirement had a hash specified but was not pinned to a specific - version.""" - - order = 3 - head = ('In --require-hashes mode, all requirements must have their ' - 'versions pinned with ==. These do not:') - - -class HashMismatch(HashError): - """ - Distribution file hash values don't match. - - :ivar package_name: The name of the package that triggered the hash - mismatch. Feel free to write to this after the exception is raise to - improve its error message. - - """ - order = 4 - head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS ' - 'FILE. If you have updated the package versions, please update ' - 'the hashes. Otherwise, examine the package contents carefully; ' - 'someone may have tampered with them.') - - def __init__(self, allowed, gots): - """ - :param allowed: A dict of algorithm names pointing to lists of allowed - hex digests - :param gots: A dict of algorithm names pointing to hashes we - actually got from the files under suspicion - """ - self.allowed = allowed - self.gots = gots - - def body(self): - return ' %s:\n%s' % (self._requirement_name(), - self._hash_comparison()) - - def _hash_comparison(self): - """ - Return a comparison of actual and expected hash values. - - Example:: - - Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde - or 123451234512345123451234512345123451234512345 - Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef - - """ - def hash_then_or(hash_name): - # For now, all the decent hashes have 6-char names, so we can get - # away with hard-coding space literals. - return chain([hash_name], repeat(' or')) - - lines = [] - for hash_name, expecteds in iteritems(self.allowed): - prefix = hash_then_or(hash_name) - lines.extend((' Expected %s %s' % (next(prefix), e)) - for e in expecteds) - lines.append(' Got %s\n' % - self.gots[hash_name].hexdigest()) - prefix = ' or' - return '\n'.join(lines) - - -class UnsupportedPythonVersion(InstallationError): - """Unsupported python version according to Requires-Python package - metadata.""" - - -class ConfigurationFileCouldNotBeLoaded(ConfigurationError): - """When there are errors while loading a configuration file - """ - - def __init__(self, reason="could not be loaded", fname=None, error=None): - super(ConfigurationFileCouldNotBeLoaded, self).__init__(error) - self.reason = reason - self.fname = fname - self.error = error - - def __str__(self): - if self.fname is not None: - message_part = " in {}.".format(self.fname) - else: - assert self.error is not None - message_part = ".\n{}\n".format(self.error.message) - return "Configuration file {}{}".format(self.reason, message_part) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/index.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/index.py deleted file mode 100644 index 9eda3a3..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/index.py +++ /dev/null @@ -1,990 +0,0 @@ -"""Routines related to PyPI, indexes""" -from __future__ import absolute_import - -import cgi -import itertools -import logging -import mimetypes -import os -import posixpath -import re -import sys -from collections import namedtuple - -from pip._vendor import html5lib, requests, six -from pip._vendor.distlib.compat import unescape -from pip._vendor.packaging import specifiers -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.requests.exceptions import RetryError, SSLError -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request - -from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path -from pip._internal.exceptions import ( - BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename, - UnsupportedWheel, -) -from pip._internal.models.candidate import InstallationCandidate -from pip._internal.models.format_control import FormatControl -from pip._internal.models.index import PyPI -from pip._internal.models.link import Link -from pip._internal.pep425tags import get_supported -from pip._internal.utils.compat import ipaddress -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ( - ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, WHEEL_EXTENSION, normalize_path, - redact_password_from_url, -) -from pip._internal.utils.packaging import check_requires_python -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.wheel import Wheel - -if MYPY_CHECK_RUNNING: - from logging import Logger # noqa: F401 - from typing import ( # noqa: F401 - Tuple, Optional, Any, List, Union, Callable, Set, Sequence, - Iterable, MutableMapping - ) - from pip._vendor.packaging.version import _BaseVersion # noqa: F401 - from pip._vendor.requests import Response # noqa: F401 - from pip._internal.req import InstallRequirement # noqa: F401 - from pip._internal.download import PipSession # noqa: F401 - - SecureOrigin = Tuple[str, str, Optional[str]] - BuildTag = Tuple[Any, ...] # either emply tuple or Tuple[int, str] - CandidateSortingKey = Tuple[int, _BaseVersion, BuildTag, Optional[int]] - -__all__ = ['FormatControl', 'PackageFinder'] - - -SECURE_ORIGINS = [ - # protocol, hostname, port - # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) - ("https", "*", "*"), - ("*", "localhost", "*"), - ("*", "127.0.0.0/8", "*"), - ("*", "::1/128", "*"), - ("file", "*", None), - # ssh is always secure. - ("ssh", "*", "*"), -] # type: List[SecureOrigin] - - -logger = logging.getLogger(__name__) - - -def _match_vcs_scheme(url): - # type: (str) -> Optional[str] - """Look for VCS schemes in the URL. - - Returns the matched VCS scheme, or None if there's no match. - """ - from pip._internal.vcs import VcsSupport - for scheme in VcsSupport.schemes: - if url.lower().startswith(scheme) and url[len(scheme)] in '+:': - return scheme - return None - - -def _is_url_like_archive(url): - # type: (str) -> bool - """Return whether the URL looks like an archive. - """ - filename = Link(url).filename - for bad_ext in ARCHIVE_EXTENSIONS: - if filename.endswith(bad_ext): - return True - return False - - -class _NotHTML(Exception): - def __init__(self, content_type, request_desc): - # type: (str, str) -> None - super(_NotHTML, self).__init__(content_type, request_desc) - self.content_type = content_type - self.request_desc = request_desc - - -def _ensure_html_header(response): - # type: (Response) -> None - """Check the Content-Type header to ensure the response contains HTML. - - Raises `_NotHTML` if the content type is not text/html. - """ - content_type = response.headers.get("Content-Type", "") - if not content_type.lower().startswith("text/html"): - raise _NotHTML(content_type, response.request.method) - - -class _NotHTTP(Exception): - pass - - -def _ensure_html_response(url, session): - # type: (str, PipSession) -> None - """Send a HEAD request to the URL, and ensure the response contains HTML. - - Raises `_NotHTTP` if the URL is not available for a HEAD request, or - `_NotHTML` if the content type is not text/html. - """ - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) - if scheme not in {'http', 'https'}: - raise _NotHTTP() - - resp = session.head(url, allow_redirects=True) - resp.raise_for_status() - - _ensure_html_header(resp) - - -def _get_html_response(url, session): - # type: (str, PipSession) -> Response - """Access an HTML page with GET, and return the response. - - This consists of three parts: - - 1. If the URL looks suspiciously like an archive, send a HEAD first to - check the Content-Type is HTML, to avoid downloading a large file. - Raise `_NotHTTP` if the content type cannot be determined, or - `_NotHTML` if it is not HTML. - 2. Actually perform the request. Raise HTTP exceptions on network failures. - 3. Check the Content-Type header to make sure we got HTML, and raise - `_NotHTML` otherwise. - """ - if _is_url_like_archive(url): - _ensure_html_response(url, session=session) - - logger.debug('Getting page %s', url) - - resp = session.get( - url, - headers={ - "Accept": "text/html", - # We don't want to blindly returned cached data for - # /simple/, because authors generally expecting that - # twine upload && pip install will function, but if - # they've done a pip install in the last ~10 minutes - # it won't. Thus by setting this to zero we will not - # blindly use any cached data, however the benefit of - # using max-age=0 instead of no-cache, is that we will - # still support conditional requests, so we will still - # minimize traffic sent in cases where the page hasn't - # changed at all, we will just always incur the round - # trip for the conditional GET now instead of only - # once per 10 minutes. - # For more information, please see pypa/pip#5670. - "Cache-Control": "max-age=0", - }, - ) - resp.raise_for_status() - - # The check for archives above only works if the url ends with - # something that looks like an archive. However that is not a - # requirement of an url. Unless we issue a HEAD request on every - # url we cannot know ahead of time for sure if something is HTML - # or not. However we can check after we've downloaded it. - _ensure_html_header(resp) - - return resp - - -def _handle_get_page_fail( - link, # type: Link - reason, # type: Union[str, Exception] - meth=None # type: Optional[Callable[..., None]] -): - # type: (...) -> None - if meth is None: - meth = logger.debug - meth("Could not fetch URL %s: %s - skipping", link, reason) - - -def _get_html_page(link, session=None): - # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] - if session is None: - raise TypeError( - "_get_html_page() missing 1 required keyword argument: 'session'" - ) - - url = link.url.split('#', 1)[0] - - # Check for VCS schemes that do not support lookup as web pages. - vcs_scheme = _match_vcs_scheme(url) - if vcs_scheme: - logger.debug('Cannot look at %s URL %s', vcs_scheme, link) - return None - - # Tack index.html onto file:// URLs that point to directories - scheme, _, path, _, _, _ = urllib_parse.urlparse(url) - if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): - # add trailing slash if not present so urljoin doesn't trim - # final segment - if not url.endswith('/'): - url += '/' - url = urllib_parse.urljoin(url, 'index.html') - logger.debug(' file: URL is directory, getting %s', url) - - try: - resp = _get_html_response(url, session=session) - except _NotHTTP as exc: - logger.debug( - 'Skipping page %s because it looks like an archive, and cannot ' - 'be checked by HEAD.', link, - ) - except _NotHTML as exc: - logger.debug( - 'Skipping page %s because the %s request got Content-Type: %s', - link, exc.request_desc, exc.content_type, - ) - except requests.HTTPError as exc: - _handle_get_page_fail(link, exc) - except RetryError as exc: - _handle_get_page_fail(link, exc) - except SSLError as exc: - reason = "There was a problem confirming the ssl certificate: " - reason += str(exc) - _handle_get_page_fail(link, reason, meth=logger.info) - except requests.ConnectionError as exc: - _handle_get_page_fail(link, "connection error: %s" % exc) - except requests.Timeout: - _handle_get_page_fail(link, "timed out") - else: - return HTMLPage(resp.content, resp.url, resp.headers) - return None - - -class PackageFinder(object): - """This finds packages. - - This is meant to match easy_install's technique for looking for - packages, by reading pages and looking for appropriate links. - """ - - def __init__( - self, - find_links, # type: List[str] - index_urls, # type: List[str] - allow_all_prereleases=False, # type: bool - trusted_hosts=None, # type: Optional[Iterable[str]] - session=None, # type: Optional[PipSession] - format_control=None, # type: Optional[FormatControl] - platform=None, # type: Optional[str] - versions=None, # type: Optional[List[str]] - abi=None, # type: Optional[str] - implementation=None, # type: Optional[str] - prefer_binary=False # type: bool - ): - # type: (...) -> None - """Create a PackageFinder. - - :param format_control: A FormatControl object or None. Used to control - the selection of source packages / binary packages when consulting - the index and links. - :param platform: A string or None. If None, searches for packages - that are supported by the current system. Otherwise, will find - packages that can be built on the platform passed in. These - packages will only be downloaded for distribution: they will - not be built locally. - :param versions: A list of strings or None. This is passed directly - to pep425tags.py in the get_supported() method. - :param abi: A string or None. This is passed directly - to pep425tags.py in the get_supported() method. - :param implementation: A string or None. This is passed directly - to pep425tags.py in the get_supported() method. - """ - if session is None: - raise TypeError( - "PackageFinder() missing 1 required keyword argument: " - "'session'" - ) - - # Build find_links. If an argument starts with ~, it may be - # a local file relative to a home directory. So try normalizing - # it and if it exists, use the normalized version. - # This is deliberately conservative - it might be fine just to - # blindly normalize anything starting with a ~... - self.find_links = [] # type: List[str] - for link in find_links: - if link.startswith('~'): - new_link = normalize_path(link) - if os.path.exists(new_link): - link = new_link - self.find_links.append(link) - - self.index_urls = index_urls - - # These are boring links that have already been logged somehow: - self.logged_links = set() # type: Set[Link] - - self.format_control = format_control or FormatControl(set(), set()) - - # Domains that we won't emit warnings for when not using HTTPS - self.secure_origins = [ - ("*", host, "*") - for host in (trusted_hosts if trusted_hosts else []) - ] # type: List[SecureOrigin] - - # Do we want to allow _all_ pre-releases? - self.allow_all_prereleases = allow_all_prereleases - - # The Session we'll use to make requests - self.session = session - - # The valid tags to check potential found wheel candidates against - self.valid_tags = get_supported( - versions=versions, - platform=platform, - abi=abi, - impl=implementation, - ) - - # Do we prefer old, but valid, binary dist over new source dist - self.prefer_binary = prefer_binary - - # If we don't have TLS enabled, then WARN if anyplace we're looking - # relies on TLS. - if not HAS_TLS: - for link in itertools.chain(self.index_urls, self.find_links): - parsed = urllib_parse.urlparse(link) - if parsed.scheme == "https": - logger.warning( - "pip is configured with locations that require " - "TLS/SSL, however the ssl module in Python is not " - "available." - ) - break - - def get_formatted_locations(self): - # type: () -> str - lines = [] - if self.index_urls and self.index_urls != [PyPI.simple_url]: - lines.append( - "Looking in indexes: {}".format(", ".join( - redact_password_from_url(url) for url in self.index_urls)) - ) - if self.find_links: - lines.append( - "Looking in links: {}".format(", ".join(self.find_links)) - ) - return "\n".join(lines) - - @staticmethod - def _sort_locations(locations, expand_dir=False): - # type: (Sequence[str], bool) -> Tuple[List[str], List[str]] - """ - Sort locations into "files" (archives) and "urls", and return - a pair of lists (files,urls) - """ - files = [] - urls = [] - - # puts the url for the given file path into the appropriate list - def sort_path(path): - url = path_to_url(path) - if mimetypes.guess_type(url, strict=False)[0] == 'text/html': - urls.append(url) - else: - files.append(url) - - for url in locations: - - is_local_path = os.path.exists(url) - is_file_url = url.startswith('file:') - - if is_local_path or is_file_url: - if is_local_path: - path = url - else: - path = url_to_path(url) - if os.path.isdir(path): - if expand_dir: - path = os.path.realpath(path) - for item in os.listdir(path): - sort_path(os.path.join(path, item)) - elif is_file_url: - urls.append(url) - else: - logger.warning( - "Path '{0}' is ignored: " - "it is a directory.".format(path), - ) - elif os.path.isfile(path): - sort_path(path) - else: - logger.warning( - "Url '%s' is ignored: it is neither a file " - "nor a directory.", url, - ) - elif is_url(url): - # Only add url with clear scheme - urls.append(url) - else: - logger.warning( - "Url '%s' is ignored. It is either a non-existing " - "path or lacks a specific scheme.", url, - ) - - return files, urls - - def _candidate_sort_key(self, candidate): - # type: (InstallationCandidate) -> CandidateSortingKey - """ - Function used to generate link sort key for link tuples. - The greater the return value, the more preferred it is. - If not finding wheels, then sorted by version only. - If finding wheels, then the sort order is by version, then: - 1. existing installs - 2. wheels ordered via Wheel.support_index_min(self.valid_tags) - 3. source archives - If prefer_binary was set, then all wheels are sorted above sources. - Note: it was considered to embed this logic into the Link - comparison operators, but then different sdist links - with the same version, would have to be considered equal - """ - support_num = len(self.valid_tags) - build_tag = tuple() # type: BuildTag - binary_preference = 0 - if candidate.location.is_wheel: - # can raise InvalidWheelFilename - wheel = Wheel(candidate.location.filename) - if not wheel.supported(self.valid_tags): - raise UnsupportedWheel( - "%s is not a supported wheel for this platform. It " - "can't be sorted." % wheel.filename - ) - if self.prefer_binary: - binary_preference = 1 - pri = -(wheel.support_index_min(self.valid_tags)) - if wheel.build_tag is not None: - match = re.match(r'^(\d+)(.*)$', wheel.build_tag) - build_tag_groups = match.groups() - build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) - else: # sdist - pri = -(support_num) - return (binary_preference, candidate.version, build_tag, pri) - - def _validate_secure_origin(self, logger, location): - # type: (Logger, Link) -> bool - # Determine if this url used a secure transport mechanism - parsed = urllib_parse.urlparse(str(location)) - origin = (parsed.scheme, parsed.hostname, parsed.port) - - # The protocol to use to see if the protocol matches. - # Don't count the repository type as part of the protocol: in - # cases such as "git+ssh", only use "ssh". (I.e., Only verify against - # the last scheme.) - protocol = origin[0].rsplit('+', 1)[-1] - - # Determine if our origin is a secure origin by looking through our - # hardcoded list of secure origins, as well as any additional ones - # configured on this PackageFinder instance. - for secure_origin in (SECURE_ORIGINS + self.secure_origins): - if protocol != secure_origin[0] and secure_origin[0] != "*": - continue - - try: - # We need to do this decode dance to ensure that we have a - # unicode object, even on Python 2.x. - addr = ipaddress.ip_address( - origin[1] - if ( - isinstance(origin[1], six.text_type) or - origin[1] is None - ) - else origin[1].decode("utf8") - ) - network = ipaddress.ip_network( - secure_origin[1] - if isinstance(secure_origin[1], six.text_type) - # setting secure_origin[1] to proper Union[bytes, str] - # creates problems in other places - else secure_origin[1].decode("utf8") # type: ignore - ) - except ValueError: - # We don't have both a valid address or a valid network, so - # we'll check this origin against hostnames. - if (origin[1] and - origin[1].lower() != secure_origin[1].lower() and - secure_origin[1] != "*"): - continue - else: - # We have a valid address and network, so see if the address - # is contained within the network. - if addr not in network: - continue - - # Check to see if the port patches - if (origin[2] != secure_origin[2] and - secure_origin[2] != "*" and - secure_origin[2] is not None): - continue - - # If we've gotten here, then this origin matches the current - # secure origin and we should return True - return True - - # If we've gotten to this point, then the origin isn't secure and we - # will not accept it as a valid location to search. We will however - # log a warning that we are ignoring it. - logger.warning( - "The repository located at %s is not a trusted or secure host and " - "is being ignored. If this repository is available via HTTPS we " - "recommend you use HTTPS instead, otherwise you may silence " - "this warning and allow it anyway with '--trusted-host %s'.", - parsed.hostname, - parsed.hostname, - ) - - return False - - def _get_index_urls_locations(self, project_name): - # type: (str) -> List[str] - """Returns the locations found via self.index_urls - - Checks the url_name on the main (first in the list) index and - use this url_name to produce all locations - """ - - def mkurl_pypi_url(url): - loc = posixpath.join( - url, - urllib_parse.quote(canonicalize_name(project_name))) - # For maximum compatibility with easy_install, ensure the path - # ends in a trailing slash. Although this isn't in the spec - # (and PyPI can handle it without the slash) some other index - # implementations might break if they relied on easy_install's - # behavior. - if not loc.endswith('/'): - loc = loc + '/' - return loc - - return [mkurl_pypi_url(url) for url in self.index_urls] - - def find_all_candidates(self, project_name): - # type: (str) -> List[Optional[InstallationCandidate]] - """Find all available InstallationCandidate for project_name - - This checks index_urls and find_links. - All versions found are returned as an InstallationCandidate list. - - See _link_package_versions for details on which files are accepted - """ - index_locations = self._get_index_urls_locations(project_name) - index_file_loc, index_url_loc = self._sort_locations(index_locations) - fl_file_loc, fl_url_loc = self._sort_locations( - self.find_links, expand_dir=True, - ) - - file_locations = (Link(url) for url in itertools.chain( - index_file_loc, fl_file_loc, - )) - - # We trust every url that the user has given us whether it was given - # via --index-url or --find-links. - # We want to filter out any thing which does not have a secure origin. - url_locations = [ - link for link in itertools.chain( - (Link(url) for url in index_url_loc), - (Link(url) for url in fl_url_loc), - ) - if self._validate_secure_origin(logger, link) - ] - - logger.debug('%d location(s) to search for versions of %s:', - len(url_locations), project_name) - - for location in url_locations: - logger.debug('* %s', location) - - canonical_name = canonicalize_name(project_name) - formats = self.format_control.get_allowed_formats(canonical_name) - search = Search(project_name, canonical_name, formats) - find_links_versions = self._package_versions( - # We trust every directly linked archive in find_links - (Link(url, '-f') for url in self.find_links), - search - ) - - page_versions = [] - for page in self._get_pages(url_locations, project_name): - logger.debug('Analyzing links from page %s', page.url) - with indent_log(): - page_versions.extend( - self._package_versions(page.iter_links(), search) - ) - - file_versions = self._package_versions(file_locations, search) - if file_versions: - file_versions.sort(reverse=True) - logger.debug( - 'Local files found: %s', - ', '.join([ - url_to_path(candidate.location.url) - for candidate in file_versions - ]) - ) - - # This is an intentional priority ordering - return file_versions + find_links_versions + page_versions - - def find_requirement(self, req, upgrade): - # type: (InstallRequirement, bool) -> Optional[Link] - """Try to find a Link matching req - - Expects req, an InstallRequirement and upgrade, a boolean - Returns a Link if found, - Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise - """ - all_candidates = self.find_all_candidates(req.name) - - # Filter out anything which doesn't match our specifier - compatible_versions = set( - req.specifier.filter( - # We turn the version object into a str here because otherwise - # when we're debundled but setuptools isn't, Python will see - # packaging.version.Version and - # pkg_resources._vendor.packaging.version.Version as different - # types. This way we'll use a str as a common data interchange - # format. If we stop using the pkg_resources provided specifier - # and start using our own, we can drop the cast to str(). - [str(c.version) for c in all_candidates], - prereleases=( - self.allow_all_prereleases - if self.allow_all_prereleases else None - ), - ) - ) - applicable_candidates = [ - # Again, converting to str to deal with debundling. - c for c in all_candidates if str(c.version) in compatible_versions - ] - - if applicable_candidates: - best_candidate = max(applicable_candidates, - key=self._candidate_sort_key) - else: - best_candidate = None - - if req.satisfied_by is not None: - installed_version = parse_version(req.satisfied_by.version) - else: - installed_version = None - - if installed_version is None and best_candidate is None: - logger.critical( - 'Could not find a version that satisfies the requirement %s ' - '(from versions: %s)', - req, - ', '.join( - sorted( - {str(c.version) for c in all_candidates}, - key=parse_version, - ) - ) - ) - - raise DistributionNotFound( - 'No matching distribution found for %s' % req - ) - - best_installed = False - if installed_version and ( - best_candidate is None or - best_candidate.version <= installed_version): - best_installed = True - - if not upgrade and installed_version is not None: - if best_installed: - logger.debug( - 'Existing installed version (%s) is most up-to-date and ' - 'satisfies requirement', - installed_version, - ) - else: - logger.debug( - 'Existing installed version (%s) satisfies requirement ' - '(most up-to-date version is %s)', - installed_version, - best_candidate.version, - ) - return None - - if best_installed: - # We have an existing version, and its the best version - logger.debug( - 'Installed version (%s) is most up-to-date (past versions: ' - '%s)', - installed_version, - ', '.join(sorted(compatible_versions, key=parse_version)) or - "none", - ) - raise BestVersionAlreadyInstalled - - logger.debug( - 'Using version %s (newest of versions: %s)', - best_candidate.version, - ', '.join(sorted(compatible_versions, key=parse_version)) - ) - return best_candidate.location - - def _get_pages(self, locations, project_name): - # type: (Iterable[Link], str) -> Iterable[HTMLPage] - """ - Yields (page, page_url) from the given locations, skipping - locations that have errors. - """ - seen = set() # type: Set[Link] - for location in locations: - if location in seen: - continue - seen.add(location) - - page = _get_html_page(location, session=self.session) - if page is None: - continue - - yield page - - _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') - - def _sort_links(self, links): - # type: (Iterable[Link]) -> List[Link] - """ - Returns elements of links in order, non-egg links first, egg links - second, while eliminating duplicates - """ - eggs, no_eggs = [], [] - seen = set() # type: Set[Link] - for link in links: - if link not in seen: - seen.add(link) - if link.egg_fragment: - eggs.append(link) - else: - no_eggs.append(link) - return no_eggs + eggs - - def _package_versions( - self, - links, # type: Iterable[Link] - search # type: Search - ): - # type: (...) -> List[Optional[InstallationCandidate]] - result = [] - for link in self._sort_links(links): - v = self._link_package_versions(link, search) - if v is not None: - result.append(v) - return result - - def _log_skipped_link(self, link, reason): - # type: (Link, str) -> None - if link not in self.logged_links: - logger.debug('Skipping link %s; %s', link, reason) - self.logged_links.add(link) - - def _link_package_versions(self, link, search): - # type: (Link, Search) -> Optional[InstallationCandidate] - """Return an InstallationCandidate or None""" - version = None - if link.egg_fragment: - egg_info = link.egg_fragment - ext = link.ext - else: - egg_info, ext = link.splitext() - if not ext: - self._log_skipped_link(link, 'not a file') - return None - if ext not in SUPPORTED_EXTENSIONS: - self._log_skipped_link( - link, 'unsupported archive format: %s' % ext, - ) - return None - if "binary" not in search.formats and ext == WHEEL_EXTENSION: - self._log_skipped_link( - link, 'No binaries permitted for %s' % search.supplied, - ) - return None - if "macosx10" in link.path and ext == '.zip': - self._log_skipped_link(link, 'macosx10 one') - return None - if ext == WHEEL_EXTENSION: - try: - wheel = Wheel(link.filename) - except InvalidWheelFilename: - self._log_skipped_link(link, 'invalid wheel filename') - return None - if canonicalize_name(wheel.name) != search.canonical: - self._log_skipped_link( - link, 'wrong project name (not %s)' % search.supplied) - return None - - if not wheel.supported(self.valid_tags): - self._log_skipped_link( - link, 'it is not compatible with this Python') - return None - - version = wheel.version - - # This should be up by the search.ok_binary check, but see issue 2700. - if "source" not in search.formats and ext != WHEEL_EXTENSION: - self._log_skipped_link( - link, 'No sources permitted for %s' % search.supplied, - ) - return None - - if not version: - version = _egg_info_matches(egg_info, search.canonical) - if not version: - self._log_skipped_link( - link, 'Missing project version for %s' % search.supplied) - return None - - match = self._py_version_re.search(version) - if match: - version = version[:match.start()] - py_version = match.group(1) - if py_version != sys.version[:3]: - self._log_skipped_link( - link, 'Python version is incorrect') - return None - try: - support_this_python = check_requires_python(link.requires_python) - except specifiers.InvalidSpecifier: - logger.debug("Package %s has an invalid Requires-Python entry: %s", - link.filename, link.requires_python) - support_this_python = True - - if not support_this_python: - logger.debug("The package %s is incompatible with the python " - "version in use. Acceptable python versions are: %s", - link, link.requires_python) - return None - logger.debug('Found link %s, version: %s', link, version) - - return InstallationCandidate(search.supplied, version, link) - - -def _find_name_version_sep(egg_info, canonical_name): - # type: (str, str) -> int - """Find the separator's index based on the package's canonical name. - - `egg_info` must be an egg info string for the given package, and - `canonical_name` must be the package's canonical name. - - This function is needed since the canonicalized name does not necessarily - have the same length as the egg info's name part. An example:: - - >>> egg_info = 'foo__bar-1.0' - >>> canonical_name = 'foo-bar' - >>> _find_name_version_sep(egg_info, canonical_name) - 8 - """ - # Project name and version must be separated by one single dash. Find all - # occurrences of dashes; if the string in front of it matches the canonical - # name, this is the one separating the name and version parts. - for i, c in enumerate(egg_info): - if c != "-": - continue - if canonicalize_name(egg_info[:i]) == canonical_name: - return i - raise ValueError("{} does not match {}".format(egg_info, canonical_name)) - - -def _egg_info_matches(egg_info, canonical_name): - # type: (str, str) -> Optional[str] - """Pull the version part out of a string. - - :param egg_info: The string to parse. E.g. foo-2.1 - :param canonical_name: The canonicalized name of the package this - belongs to. - """ - try: - version_start = _find_name_version_sep(egg_info, canonical_name) + 1 - except ValueError: - return None - version = egg_info[version_start:] - if not version: - return None - return version - - -def _determine_base_url(document, page_url): - """Determine the HTML document's base URL. - - This looks for a ```` tag in the HTML document. If present, its href - attribute denotes the base URL of anchor tags in the document. If there is - no such tag (or if it does not have a valid href attribute), the HTML - file's URL is used as the base URL. - - :param document: An HTML document representation. The current - implementation expects the result of ``html5lib.parse()``. - :param page_url: The URL of the HTML document. - """ - for base in document.findall(".//base"): - href = base.get("href") - if href is not None: - return href - return page_url - - -def _get_encoding_from_headers(headers): - """Determine if we have any encoding information in our headers. - """ - if headers and "Content-Type" in headers: - content_type, params = cgi.parse_header(headers["Content-Type"]) - if "charset" in params: - return params['charset'] - return None - - -_CLEAN_LINK_RE = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - -def _clean_link(url): - # type: (str) -> str - """Makes sure a link is fully encoded. That is, if a ' ' shows up in - the link, it will be rewritten to %20 (while not over-quoting - % or other characters).""" - return _CLEAN_LINK_RE.sub(lambda match: '%%%2x' % ord(match.group(0)), url) - - -class HTMLPage(object): - """Represents one page, along with its URL""" - - def __init__(self, content, url, headers=None): - # type: (bytes, str, MutableMapping[str, str]) -> None - self.content = content - self.url = url - self.headers = headers - - def __str__(self): - return redact_password_from_url(self.url) - - def iter_links(self): - # type: () -> Iterable[Link] - """Yields all links in the page""" - document = html5lib.parse( - self.content, - transport_encoding=_get_encoding_from_headers(self.headers), - namespaceHTMLElements=False, - ) - base_url = _determine_base_url(document, self.url) - for anchor in document.findall(".//a"): - if anchor.get("href"): - href = anchor.get("href") - url = _clean_link(urllib_parse.urljoin(base_url, href)) - pyrequire = anchor.get('data-requires-python') - pyrequire = unescape(pyrequire) if pyrequire else None - yield Link(url, self.url, requires_python=pyrequire) - - -Search = namedtuple('Search', 'supplied canonical formats') -"""Capture key aspects of a search. - -:attribute supplied: The user supplied package. -:attribute canonical: The canonical package name. -:attribute formats: The formats allowed for this package. Should be a set - with 'binary' or 'source' or both in it. -""" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/locations.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/locations.py deleted file mode 100644 index c6e2a3e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/locations.py +++ /dev/null @@ -1,211 +0,0 @@ -"""Locations where we look for configs, install stuff, etc""" -from __future__ import absolute_import - -import os -import os.path -import platform -import site -import sys -import sysconfig -from distutils import sysconfig as distutils_sysconfig -from distutils.command.install import SCHEME_KEYS # type: ignore - -from pip._internal.utils import appdirs -from pip._internal.utils.compat import WINDOWS, expanduser -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Any, Union, Dict, List, Optional # noqa: F401 - - -# Application Directories -USER_CACHE_DIR = appdirs.user_cache_dir("pip") - - -DELETE_MARKER_MESSAGE = '''\ -This file is placed here by pip to indicate the source was put -here by pip. - -Once this package is successfully installed this source code will be -deleted (unless you remove this file). -''' -PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt' - - -def write_delete_marker_file(directory): - # type: (str) -> None - """ - Write the pip delete marker file into this directory. - """ - filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) - with open(filepath, 'w') as marker_fp: - marker_fp.write(DELETE_MARKER_MESSAGE) - - -def running_under_virtualenv(): - # type: () -> bool - """ - Return True if we're running inside a virtualenv, False otherwise. - - """ - if hasattr(sys, 'real_prefix'): - return True - elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): - return True - - return False - - -def virtualenv_no_global(): - # type: () -> bool - """ - Return True if in a venv and no system site packages. - """ - # this mirrors the logic in virtualenv.py for locating the - # no-global-site-packages.txt file - site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) - no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') - if running_under_virtualenv() and os.path.isfile(no_global_file): - return True - else: - return False - - -if running_under_virtualenv(): - src_prefix = os.path.join(sys.prefix, 'src') -else: - # FIXME: keep src in cwd for now (it is not a temporary folder) - try: - src_prefix = os.path.join(os.getcwd(), 'src') - except OSError: - # In case the current working directory has been renamed or deleted - sys.exit( - "The folder you are executing pip from can no longer be found." - ) - -# under macOS + virtualenv sys.prefix is not properly resolved -# it is something like /path/to/python/bin/.. -# Note: using realpath due to tmp dirs on OSX being symlinks -src_prefix = os.path.abspath(src_prefix) - -# FIXME doesn't account for venv linked to global site-packages - -site_packages = sysconfig.get_path("purelib") # type: Optional[str] - -# This is because of a bug in PyPy's sysconfig module, see -# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths -# for more information. -if platform.python_implementation().lower() == "pypy": - site_packages = distutils_sysconfig.get_python_lib() -try: - # Use getusersitepackages if this is present, as it ensures that the - # value is initialised properly. - user_site = site.getusersitepackages() -except AttributeError: - user_site = site.USER_SITE -user_dir = expanduser('~') -if WINDOWS: - bin_py = os.path.join(sys.prefix, 'Scripts') - bin_user = os.path.join(user_site, 'Scripts') - # buildout uses 'bin' on Windows too? - if not os.path.exists(bin_py): - bin_py = os.path.join(sys.prefix, 'bin') - bin_user = os.path.join(user_site, 'bin') - - config_basename = 'pip.ini' - - legacy_storage_dir = os.path.join(user_dir, 'pip') - legacy_config_file = os.path.join( - legacy_storage_dir, - config_basename, - ) -else: - bin_py = os.path.join(sys.prefix, 'bin') - bin_user = os.path.join(user_site, 'bin') - - config_basename = 'pip.conf' - - legacy_storage_dir = os.path.join(user_dir, '.pip') - legacy_config_file = os.path.join( - legacy_storage_dir, - config_basename, - ) - # Forcing to use /usr/local/bin for standard macOS framework installs - # Also log to ~/Library/Logs/ for use with the Console.app log viewer - if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/': - bin_py = '/usr/local/bin' - -site_config_files = [ - os.path.join(path, config_basename) - for path in appdirs.site_config_dirs('pip') -] - -venv_config_file = os.path.join(sys.prefix, config_basename) -new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename) - - -def distutils_scheme(dist_name, user=False, home=None, root=None, - isolated=False, prefix=None): - # type:(str, bool, str, str, bool, str) -> dict - """ - Return a distutils install scheme - """ - from distutils.dist import Distribution - - scheme = {} - - if isolated: - extra_dist_args = {"script_args": ["--no-user-cfg"]} - else: - extra_dist_args = {} - dist_args = {'name': dist_name} # type: Dict[str, Union[str, List[str]]] - dist_args.update(extra_dist_args) - - d = Distribution(dist_args) - # Ignoring, typeshed issue reported python/typeshed/issues/2567 - d.parse_config_files() - # NOTE: Ignoring type since mypy can't find attributes on 'Command' - i = d.get_command_obj('install', create=True) # type: Any - assert i is not None - # NOTE: setting user or home has the side-effect of creating the home dir - # or user base for installations during finalize_options() - # ideally, we'd prefer a scheme class that has no side-effects. - assert not (user and prefix), "user={} prefix={}".format(user, prefix) - i.user = user or i.user - if user: - i.prefix = "" - i.prefix = prefix or i.prefix - i.home = home or i.home - i.root = root or i.root - i.finalize_options() - for key in SCHEME_KEYS: - scheme[key] = getattr(i, 'install_' + key) - - # install_lib specified in setup.cfg should install *everything* - # into there (i.e. it takes precedence over both purelib and - # platlib). Note, i.install_lib is *always* set after - # finalize_options(); we only want to override here if the user - # has explicitly requested it hence going back to the config - - # Ignoring, typeshed issue reported python/typeshed/issues/2567 - if 'install_lib' in d.get_option_dict('install'): # type: ignore - scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) - - if running_under_virtualenv(): - scheme['headers'] = os.path.join( - sys.prefix, - 'include', - 'site', - 'python' + sys.version[:3], - dist_name, - ) - - if root is not None: - path_no_drive = os.path.splitdrive( - os.path.abspath(scheme["headers"]))[1] - scheme["headers"] = os.path.join( - root, - path_no_drive[1:], - ) - - return scheme diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/__init__.py deleted file mode 100644 index 7855226..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""A package that contains models that represent entities. -""" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/candidate.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/candidate.py deleted file mode 100644 index 4475458..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/candidate.py +++ /dev/null @@ -1,31 +0,0 @@ -from pip._vendor.packaging.version import parse as parse_version - -from pip._internal.utils.models import KeyBasedCompareMixin -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from pip._vendor.packaging.version import _BaseVersion # noqa: F401 - from pip._internal.models.link import Link # noqa: F401 - from typing import Any, Union # noqa: F401 - - -class InstallationCandidate(KeyBasedCompareMixin): - """Represents a potential "candidate" for installation. - """ - - def __init__(self, project, version, location): - # type: (Any, str, Link) -> None - self.project = project - self.version = parse_version(version) # type: _BaseVersion - self.location = location - - super(InstallationCandidate, self).__init__( - key=(self.project, self.version, self.location), - defining_class=InstallationCandidate - ) - - def __repr__(self): - # type: () -> str - return "".format( - self.project, self.version, self.location, - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/format_control.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/format_control.py deleted file mode 100644 index 971a391..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/format_control.py +++ /dev/null @@ -1,73 +0,0 @@ -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Optional, Set, FrozenSet # noqa: F401 - - -class FormatControl(object): - """Helper for managing formats from which a package can be installed. - """ - - def __init__(self, no_binary=None, only_binary=None): - # type: (Optional[Set], Optional[Set]) -> None - if no_binary is None: - no_binary = set() - if only_binary is None: - only_binary = set() - - self.no_binary = no_binary - self.only_binary = only_binary - - def __eq__(self, other): - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "{}({}, {})".format( - self.__class__.__name__, - self.no_binary, - self.only_binary - ) - - @staticmethod - def handle_mutual_excludes(value, target, other): - # type: (str, Optional[Set], Optional[Set]) -> None - new = value.split(',') - while ':all:' in new: - other.clear() - target.clear() - target.add(':all:') - del new[:new.index(':all:') + 1] - # Without a none, we want to discard everything as :all: covers it - if ':none:' not in new: - return - for name in new: - if name == ':none:': - target.clear() - continue - name = canonicalize_name(name) - other.discard(name) - target.add(name) - - def get_allowed_formats(self, canonical_name): - # type: (str) -> FrozenSet - result = {"binary", "source"} - if canonical_name in self.only_binary: - result.discard('source') - elif canonical_name in self.no_binary: - result.discard('binary') - elif ':all:' in self.only_binary: - result.discard('source') - elif ':all:' in self.no_binary: - result.discard('binary') - return frozenset(result) - - def disallow_binaries(self): - # type: () -> None - self.handle_mutual_excludes( - ':all:', self.no_binary, self.only_binary, - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/index.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/index.py deleted file mode 100644 index ead1efb..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/index.py +++ /dev/null @@ -1,31 +0,0 @@ -from pip._vendor.six.moves.urllib import parse as urllib_parse - - -class PackageIndex(object): - """Represents a Package Index and provides easier access to endpoints - """ - - def __init__(self, url, file_storage_domain): - # type: (str, str) -> None - super(PackageIndex, self).__init__() - self.url = url - self.netloc = urllib_parse.urlsplit(url).netloc - self.simple_url = self._url_for_path('simple') - self.pypi_url = self._url_for_path('pypi') - - # This is part of a temporary hack used to block installs of PyPI - # packages which depend on external urls only necessary until PyPI can - # block such packages themselves - self.file_storage_domain = file_storage_domain - - def _url_for_path(self, path): - # type: (str) -> str - return urllib_parse.urljoin(self.url, path) - - -PyPI = PackageIndex( - 'https://pypi.org/', file_storage_domain='files.pythonhosted.org' -) -TestPyPI = PackageIndex( - 'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org' -) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/link.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/link.py deleted file mode 100644 index ad2f93e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/link.py +++ /dev/null @@ -1,163 +0,0 @@ -import posixpath -import re - -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip._internal.download import path_to_url -from pip._internal.utils.misc import ( - WHEEL_EXTENSION, redact_password_from_url, splitext, -) -from pip._internal.utils.models import KeyBasedCompareMixin -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Optional, Tuple, Union, Text # noqa: F401 - from pip._internal.index import HTMLPage # noqa: F401 - - -class Link(KeyBasedCompareMixin): - """Represents a parsed link from a Package Index's simple URL - """ - - def __init__(self, url, comes_from=None, requires_python=None): - # type: (str, Optional[Union[str, HTMLPage]], Optional[str]) -> None - """ - url: - url of the resource pointed to (href of the link) - comes_from: - instance of HTMLPage where the link was found, or string. - requires_python: - String containing the `Requires-Python` metadata field, specified - in PEP 345. This may be specified by a data-requires-python - attribute in the HTML link tag, as described in PEP 503. - """ - - # url can be a UNC windows share - if url.startswith('\\\\'): - url = path_to_url(url) - - self.url = url - self.comes_from = comes_from - self.requires_python = requires_python if requires_python else None - - super(Link, self).__init__( - key=(self.url), - defining_class=Link - ) - - def __str__(self): - if self.requires_python: - rp = ' (requires-python:%s)' % self.requires_python - else: - rp = '' - if self.comes_from: - return '%s (from %s)%s' % (redact_password_from_url(self.url), - self.comes_from, rp) - else: - return redact_password_from_url(str(self.url)) - - def __repr__(self): - return '' % self - - @property - def filename(self): - # type: () -> str - _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) - name = posixpath.basename(path.rstrip('/')) or netloc - name = urllib_parse.unquote(name) - assert name, ('URL %r produced no filename' % self.url) - return name - - @property - def scheme(self): - # type: () -> str - return urllib_parse.urlsplit(self.url)[0] - - @property - def netloc(self): - # type: () -> str - return urllib_parse.urlsplit(self.url)[1] - - @property - def path(self): - # type: () -> str - return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) - - def splitext(self): - # type: () -> Tuple[str, str] - return splitext(posixpath.basename(self.path.rstrip('/'))) - - @property - def ext(self): - # type: () -> str - return self.splitext()[1] - - @property - def url_without_fragment(self): - # type: () -> str - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) - return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) - - _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') - - @property - def egg_fragment(self): - # type: () -> Optional[str] - match = self._egg_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') - - @property - def subdirectory_fragment(self): - # type: () -> Optional[str] - match = self._subdirectory_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _hash_re = re.compile( - r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' - ) - - @property - def hash(self): - # type: () -> Optional[str] - match = self._hash_re.search(self.url) - if match: - return match.group(2) - return None - - @property - def hash_name(self): - # type: () -> Optional[str] - match = self._hash_re.search(self.url) - if match: - return match.group(1) - return None - - @property - def show_url(self): - # type: () -> Optional[str] - return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) - - @property - def is_wheel(self): - # type: () -> bool - return self.ext == WHEEL_EXTENSION - - @property - def is_artifact(self): - # type: () -> bool - """ - Determines if this points to an actual artifact (e.g. a tarball) or if - it points to an "abstract" thing like a path or a VCS location. - """ - from pip._internal.vcs import vcs - - if self.scheme in vcs.all_schemes: - return False - - return True diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/check.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/check.py deleted file mode 100644 index 0b56eda..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/check.py +++ /dev/null @@ -1,155 +0,0 @@ -"""Validation of dependencies of packages -""" - -import logging -from collections import namedtuple - -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.pkg_resources import RequirementParseError - -from pip._internal.operations.prepare import make_abstract_dist -from pip._internal.utils.misc import get_installed_distributions -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -logger = logging.getLogger(__name__) - -if MYPY_CHECK_RUNNING: - from pip._internal.req.req_install import InstallRequirement # noqa: F401 - from typing import ( # noqa: F401 - Any, Callable, Dict, Optional, Set, Tuple, List - ) - - # Shorthands - PackageSet = Dict[str, 'PackageDetails'] - Missing = Tuple[str, Any] - Conflicting = Tuple[str, str, Any] - - MissingDict = Dict[str, List[Missing]] - ConflictingDict = Dict[str, List[Conflicting]] - CheckResult = Tuple[MissingDict, ConflictingDict] - -PackageDetails = namedtuple('PackageDetails', ['version', 'requires']) - - -def create_package_set_from_installed(**kwargs): - # type: (**Any) -> Tuple[PackageSet, bool] - """Converts a list of distributions into a PackageSet. - """ - # Default to using all packages installed on the system - if kwargs == {}: - kwargs = {"local_only": False, "skip": ()} - - package_set = {} - problems = False - for dist in get_installed_distributions(**kwargs): - name = canonicalize_name(dist.project_name) - try: - package_set[name] = PackageDetails(dist.version, dist.requires()) - except RequirementParseError as e: - # Don't crash on broken metadata - logging.warning("Error parsing requirements for %s: %s", name, e) - problems = True - return package_set, problems - - -def check_package_set(package_set, should_ignore=None): - # type: (PackageSet, Optional[Callable[[str], bool]]) -> CheckResult - """Check if a package set is consistent - - If should_ignore is passed, it should be a callable that takes a - package name and returns a boolean. - """ - if should_ignore is None: - def should_ignore(name): - return False - - missing = dict() - conflicting = dict() - - for package_name in package_set: - # Info about dependencies of package_name - missing_deps = set() # type: Set[Missing] - conflicting_deps = set() # type: Set[Conflicting] - - if should_ignore(package_name): - continue - - for req in package_set[package_name].requires: - name = canonicalize_name(req.project_name) # type: str - - # Check if it's missing - if name not in package_set: - missed = True - if req.marker is not None: - missed = req.marker.evaluate() - if missed: - missing_deps.add((name, req)) - continue - - # Check if there's a conflict - version = package_set[name].version # type: str - if not req.specifier.contains(version, prereleases=True): - conflicting_deps.add((name, version, req)) - - if missing_deps: - missing[package_name] = sorted(missing_deps, key=str) - if conflicting_deps: - conflicting[package_name] = sorted(conflicting_deps, key=str) - - return missing, conflicting - - -def check_install_conflicts(to_install): - # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult] - """For checking if the dependency graph would be consistent after \ - installing given requirements - """ - # Start from the current state - package_set, _ = create_package_set_from_installed() - # Install packages - would_be_installed = _simulate_installation_of(to_install, package_set) - - # Only warn about directly-dependent packages; create a whitelist of them - whitelist = _create_whitelist(would_be_installed, package_set) - - return ( - package_set, - check_package_set( - package_set, should_ignore=lambda name: name not in whitelist - ) - ) - - -def _simulate_installation_of(to_install, package_set): - # type: (List[InstallRequirement], PackageSet) -> Set[str] - """Computes the version of packages after installing to_install. - """ - - # Keep track of packages that were installed - installed = set() - - # Modify it as installing requirement_set would (assuming no errors) - for inst_req in to_install: - dist = make_abstract_dist(inst_req).dist() - name = canonicalize_name(dist.key) - package_set[name] = PackageDetails(dist.version, dist.requires()) - - installed.add(name) - - return installed - - -def _create_whitelist(would_be_installed, package_set): - # type: (Set[str], PackageSet) -> Set[str] - packages_affected = set(would_be_installed) - - for package_name in package_set: - if package_name in packages_affected: - continue - - for req in package_set[package_name].requires: - if canonicalize_name(req.name) in packages_affected: - packages_affected.add(package_name) - break - - return packages_affected diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/freeze.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/freeze.py deleted file mode 100644 index 388bb73..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/freeze.py +++ /dev/null @@ -1,247 +0,0 @@ -from __future__ import absolute_import - -import collections -import logging -import os -import re - -from pip._vendor import six -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.pkg_resources import RequirementParseError - -from pip._internal.exceptions import BadCommand, InstallationError -from pip._internal.req.constructors import ( - install_req_from_editable, install_req_from_line, -) -from pip._internal.req.req_file import COMMENT_RE -from pip._internal.utils.misc import ( - dist_is_editable, get_installed_distributions, -) -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Iterator, Optional, List, Container, Set, Dict, Tuple, Iterable, Union - ) - from pip._internal.cache import WheelCache # noqa: F401 - from pip._vendor.pkg_resources import ( # noqa: F401 - Distribution, Requirement - ) - - RequirementInfo = Tuple[Optional[Union[str, Requirement]], bool, List[str]] - - -logger = logging.getLogger(__name__) - - -def freeze( - requirement=None, # type: Optional[List[str]] - find_links=None, # type: Optional[List[str]] - local_only=None, # type: Optional[bool] - user_only=None, # type: Optional[bool] - skip_regex=None, # type: Optional[str] - isolated=False, # type: bool - wheel_cache=None, # type: Optional[WheelCache] - exclude_editable=False, # type: bool - skip=() # type: Container[str] -): - # type: (...) -> Iterator[str] - find_links = find_links or [] - skip_match = None - - if skip_regex: - skip_match = re.compile(skip_regex).search - - for link in find_links: - yield '-f %s' % link - installations = {} # type: Dict[str, FrozenRequirement] - for dist in get_installed_distributions(local_only=local_only, - skip=(), - user_only=user_only): - try: - req = FrozenRequirement.from_dist(dist) - except RequirementParseError: - logger.warning( - "Could not parse requirement: %s", - dist.project_name - ) - continue - if exclude_editable and req.editable: - continue - installations[req.name] = req - - if requirement: - # the options that don't get turned into an InstallRequirement - # should only be emitted once, even if the same option is in multiple - # requirements files, so we need to keep track of what has been emitted - # so that we don't emit it again if it's seen again - emitted_options = set() # type: Set[str] - # keep track of which files a requirement is in so that we can - # give an accurate warning if a requirement appears multiple times. - req_files = collections.defaultdict(list) # type: Dict[str, List[str]] - for req_file_path in requirement: - with open(req_file_path) as req_file: - for line in req_file: - if (not line.strip() or - line.strip().startswith('#') or - (skip_match and skip_match(line)) or - line.startswith(( - '-r', '--requirement', - '-Z', '--always-unzip', - '-f', '--find-links', - '-i', '--index-url', - '--pre', - '--trusted-host', - '--process-dependency-links', - '--extra-index-url'))): - line = line.rstrip() - if line not in emitted_options: - emitted_options.add(line) - yield line - continue - - if line.startswith('-e') or line.startswith('--editable'): - if line.startswith('-e'): - line = line[2:].strip() - else: - line = line[len('--editable'):].strip().lstrip('=') - line_req = install_req_from_editable( - line, - isolated=isolated, - wheel_cache=wheel_cache, - ) - else: - line_req = install_req_from_line( - COMMENT_RE.sub('', line).strip(), - isolated=isolated, - wheel_cache=wheel_cache, - ) - - if not line_req.name: - logger.info( - "Skipping line in requirement file [%s] because " - "it's not clear what it would install: %s", - req_file_path, line.strip(), - ) - logger.info( - " (add #egg=PackageName to the URL to avoid" - " this warning)" - ) - elif line_req.name not in installations: - # either it's not installed, or it is installed - # but has been processed already - if not req_files[line_req.name]: - logger.warning( - "Requirement file [%s] contains %s, but " - "package %r is not installed", - req_file_path, - COMMENT_RE.sub('', line).strip(), line_req.name - ) - else: - req_files[line_req.name].append(req_file_path) - else: - yield str(installations[line_req.name]).rstrip() - del installations[line_req.name] - req_files[line_req.name].append(req_file_path) - - # Warn about requirements that were included multiple times (in a - # single requirements file or in different requirements files). - for name, files in six.iteritems(req_files): - if len(files) > 1: - logger.warning("Requirement %s included multiple times [%s]", - name, ', '.join(sorted(set(files)))) - - yield( - '## The following requirements were added by ' - 'pip freeze:' - ) - for installation in sorted( - installations.values(), key=lambda x: x.name.lower()): - if canonicalize_name(installation.name) not in skip: - yield str(installation).rstrip() - - -def get_requirement_info(dist): - # type: (Distribution) -> RequirementInfo - """ - Compute and return values (req, editable, comments) for use in - FrozenRequirement.from_dist(). - """ - if not dist_is_editable(dist): - return (None, False, []) - - location = os.path.normcase(os.path.abspath(dist.location)) - - from pip._internal.vcs import vcs, RemoteNotFoundError - vc_type = vcs.get_backend_type(location) - - if not vc_type: - req = dist.as_requirement() - logger.debug( - 'No VCS found for editable requirement {!r} in: {!r}', req, - location, - ) - comments = [ - '# Editable install with no version control ({})'.format(req) - ] - return (location, True, comments) - - try: - req = vc_type.get_src_requirement(location, dist.project_name) - except RemoteNotFoundError: - req = dist.as_requirement() - comments = [ - '# Editable {} install with no remote ({})'.format( - vc_type.__name__, req, - ) - ] - return (location, True, comments) - - except BadCommand: - logger.warning( - 'cannot determine version of editable source in %s ' - '(%s command not found in path)', - location, - vc_type.name, - ) - return (None, True, []) - - except InstallationError as exc: - logger.warning( - "Error when trying to get requirement for VCS system %s, " - "falling back to uneditable format", exc - ) - else: - if req is not None: - return (req, True, []) - - logger.warning( - 'Could not determine repository location of %s', location - ) - comments = ['## !! Could not determine repository location'] - - return (None, False, comments) - - -class FrozenRequirement(object): - def __init__(self, name, req, editable, comments=()): - # type: (str, Union[str, Requirement], bool, Iterable[str]) -> None - self.name = name - self.req = req - self.editable = editable - self.comments = comments - - @classmethod - def from_dist(cls, dist): - # type: (Distribution) -> FrozenRequirement - req, editable, comments = get_requirement_info(dist) - if req is None: - req = dist.as_requirement() - - return cls(dist.project_name, req, editable, comments=comments) - - def __str__(self): - req = self.req - if self.editable: - req = '-e %s' % req - return '\n'.join(list(self.comments) + [str(req)]) + '\n' diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/prepare.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/prepare.py deleted file mode 100644 index 4f31dd5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/operations/prepare.py +++ /dev/null @@ -1,413 +0,0 @@ -"""Prepares a distribution for installation -""" - -import logging -import os - -from pip._vendor import pkg_resources, requests - -from pip._internal.build_env import BuildEnvironment -from pip._internal.download import ( - is_dir_url, is_file_url, is_vcs_url, unpack_url, url_to_path, -) -from pip._internal.exceptions import ( - DirectoryUrlHashUnsupported, HashUnpinned, InstallationError, - PreviousBuildDirError, VcsHashUnsupported, -) -from pip._internal.utils.compat import expanduser -from pip._internal.utils.hashes import MissingHashes -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import display_path, normalize_path -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.vcs import vcs - -if MYPY_CHECK_RUNNING: - from typing import Any, Optional # noqa: F401 - from pip._internal.req.req_install import InstallRequirement # noqa: F401 - from pip._internal.index import PackageFinder # noqa: F401 - from pip._internal.download import PipSession # noqa: F401 - from pip._internal.req.req_tracker import RequirementTracker # noqa: F401 - -logger = logging.getLogger(__name__) - - -def make_abstract_dist(req): - # type: (InstallRequirement) -> DistAbstraction - """Factory to make an abstract dist object. - - Preconditions: Either an editable req with a source_dir, or satisfied_by or - a wheel link, or a non-editable req with a source_dir. - - :return: A concrete DistAbstraction. - """ - if req.editable: - return IsSDist(req) - elif req.link and req.link.is_wheel: - return IsWheel(req) - else: - return IsSDist(req) - - -class DistAbstraction(object): - """Abstracts out the wheel vs non-wheel Resolver.resolve() logic. - - The requirements for anything installable are as follows: - - we must be able to determine the requirement name - (or we can't correctly handle the non-upgrade case). - - we must be able to generate a list of run-time dependencies - without installing any additional packages (or we would - have to either burn time by doing temporary isolated installs - or alternatively violate pips 'don't start installing unless - all requirements are available' rule - neither of which are - desirable). - - for packages with setup requirements, we must also be able - to determine their requirements without installing additional - packages (for the same reason as run-time dependencies) - - we must be able to create a Distribution object exposing the - above metadata. - """ - - def __init__(self, req): - # type: (InstallRequirement) -> None - self.req = req # type: InstallRequirement - - def dist(self): - # type: () -> Any - """Return a setuptools Dist object.""" - raise NotImplementedError - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> Any - """Ensure that we can get a Dist for this requirement.""" - raise NotImplementedError - - -class IsWheel(DistAbstraction): - - def dist(self): - # type: () -> pkg_resources.Distribution - return list(pkg_resources.find_distributions( - self.req.source_dir))[0] - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> Any - # FIXME:https://github.com/pypa/pip/issues/1112 - pass - - -class IsSDist(DistAbstraction): - - def dist(self): - return self.req.get_dist() - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> None - # Prepare for building. We need to: - # 1. Load pyproject.toml (if it exists) - # 2. Set up the build environment - - self.req.load_pyproject_toml() - should_isolate = self.req.use_pep517 and build_isolation - - def _raise_conflicts(conflicting_with, conflicting_reqs): - raise InstallationError( - "Some build dependencies for %s conflict with %s: %s." % ( - self.req, conflicting_with, ', '.join( - '%s is incompatible with %s' % (installed, wanted) - for installed, wanted in sorted(conflicting)))) - - if should_isolate: - # Isolate in a BuildEnvironment and install the build-time - # requirements. - self.req.build_env = BuildEnvironment() - self.req.build_env.install_requirements( - finder, self.req.pyproject_requires, 'overlay', - "Installing build dependencies" - ) - conflicting, missing = self.req.build_env.check_requirements( - self.req.requirements_to_check - ) - if conflicting: - _raise_conflicts("PEP 517/518 supported requirements", - conflicting) - if missing: - logger.warning( - "Missing build requirements in pyproject.toml for %s.", - self.req, - ) - logger.warning( - "The project does not specify a build backend, and " - "pip cannot fall back to setuptools without %s.", - " and ".join(map(repr, sorted(missing))) - ) - # Install any extra build dependencies that the backend requests. - # This must be done in a second pass, as the pyproject.toml - # dependencies must be installed before we can call the backend. - with self.req.build_env: - # We need to have the env active when calling the hook. - self.req.spin_message = "Getting requirements to build wheel" - reqs = self.req.pep517_backend.get_requires_for_build_wheel() - conflicting, missing = self.req.build_env.check_requirements(reqs) - if conflicting: - _raise_conflicts("the backend dependencies", conflicting) - self.req.build_env.install_requirements( - finder, missing, 'normal', - "Installing backend dependencies" - ) - - self.req.prepare_metadata() - self.req.assert_source_matches_version() - - -class Installed(DistAbstraction): - - def dist(self): - # type: () -> pkg_resources.Distribution - return self.req.satisfied_by - - def prep_for_dist(self, finder, build_isolation): - # type: (PackageFinder, bool) -> Any - pass - - -class RequirementPreparer(object): - """Prepares a Requirement - """ - - def __init__( - self, - build_dir, # type: str - download_dir, # type: Optional[str] - src_dir, # type: str - wheel_download_dir, # type: Optional[str] - progress_bar, # type: str - build_isolation, # type: bool - req_tracker # type: RequirementTracker - ): - # type: (...) -> None - super(RequirementPreparer, self).__init__() - - self.src_dir = src_dir - self.build_dir = build_dir - self.req_tracker = req_tracker - - # Where still packed archives should be written to. If None, they are - # not saved, and are deleted immediately after unpacking. - self.download_dir = download_dir - - # Where still-packed .whl files should be written to. If None, they are - # written to the download_dir parameter. Separate to download_dir to - # permit only keeping wheel archives for pip wheel. - if wheel_download_dir: - wheel_download_dir = normalize_path(wheel_download_dir) - self.wheel_download_dir = wheel_download_dir - - # NOTE - # download_dir and wheel_download_dir overlap semantically and may - # be combined if we're willing to have non-wheel archives present in - # the wheelhouse output by 'pip wheel'. - - self.progress_bar = progress_bar - - # Is build isolation allowed? - self.build_isolation = build_isolation - - @property - def _download_should_save(self): - # type: () -> bool - # TODO: Modify to reduce indentation needed - if self.download_dir: - self.download_dir = expanduser(self.download_dir) - if os.path.exists(self.download_dir): - return True - else: - logger.critical('Could not find download directory') - raise InstallationError( - "Could not find or access download directory '%s'" - % display_path(self.download_dir)) - return False - - def prepare_linked_requirement( - self, - req, # type: InstallRequirement - session, # type: PipSession - finder, # type: PackageFinder - upgrade_allowed, # type: bool - require_hashes # type: bool - ): - # type: (...) -> DistAbstraction - """Prepare a requirement that would be obtained from req.link - """ - # TODO: Breakup into smaller functions - if req.link and req.link.scheme == 'file': - path = url_to_path(req.link.url) - logger.info('Processing %s', display_path(path)) - else: - logger.info('Collecting %s', req) - - with indent_log(): - # @@ if filesystem packages are not marked - # editable in a req, a non deterministic error - # occurs when the script attempts to unpack the - # build directory - req.ensure_has_source_dir(self.build_dir) - # If a checkout exists, it's unwise to keep going. version - # inconsistencies are logged later, but do not fail the - # installation. - # FIXME: this won't upgrade when there's an existing - # package unpacked in `req.source_dir` - # package unpacked in `req.source_dir` - if os.path.exists(os.path.join(req.source_dir, 'setup.py')): - raise PreviousBuildDirError( - "pip can't proceed with requirements '%s' due to a" - " pre-existing build directory (%s). This is " - "likely due to a previous installation that failed" - ". pip is being responsible and not assuming it " - "can delete this. Please delete it and try again." - % (req, req.source_dir) - ) - req.populate_link(finder, upgrade_allowed, require_hashes) - - # We can't hit this spot and have populate_link return None. - # req.satisfied_by is None here (because we're - # guarded) and upgrade has no impact except when satisfied_by - # is not None. - # Then inside find_requirement existing_applicable -> False - # If no new versions are found, DistributionNotFound is raised, - # otherwise a result is guaranteed. - assert req.link - link = req.link - - # Now that we have the real link, we can tell what kind of - # requirements we have and raise some more informative errors - # than otherwise. (For example, we can raise VcsHashUnsupported - # for a VCS URL rather than HashMissing.) - if require_hashes: - # We could check these first 2 conditions inside - # unpack_url and save repetition of conditions, but then - # we would report less-useful error messages for - # unhashable requirements, complaining that there's no - # hash provided. - if is_vcs_url(link): - raise VcsHashUnsupported() - elif is_file_url(link) and is_dir_url(link): - raise DirectoryUrlHashUnsupported() - if not req.original_link and not req.is_pinned: - # Unpinned packages are asking for trouble when a new - # version is uploaded. This isn't a security check, but - # it saves users a surprising hash mismatch in the - # future. - # - # file:/// URLs aren't pinnable, so don't complain - # about them not being pinned. - raise HashUnpinned() - - hashes = req.hashes(trust_internet=not require_hashes) - if require_hashes and not hashes: - # Known-good hashes are missing for this requirement, so - # shim it with a facade object that will provoke hash - # computation and then raise a HashMissing exception - # showing the user what the hash should be. - hashes = MissingHashes() - - try: - download_dir = self.download_dir - # We always delete unpacked sdists after pip ran. - autodelete_unpacked = True - if req.link.is_wheel and self.wheel_download_dir: - # when doing 'pip wheel` we download wheels to a - # dedicated dir. - download_dir = self.wheel_download_dir - if req.link.is_wheel: - if download_dir: - # When downloading, we only unpack wheels to get - # metadata. - autodelete_unpacked = True - else: - # When installing a wheel, we use the unpacked - # wheel. - autodelete_unpacked = False - unpack_url( - req.link, req.source_dir, - download_dir, autodelete_unpacked, - session=session, hashes=hashes, - progress_bar=self.progress_bar - ) - except requests.HTTPError as exc: - logger.critical( - 'Could not install requirement %s because of error %s', - req, - exc, - ) - raise InstallationError( - 'Could not install requirement %s because of HTTP ' - 'error %s for URL %s' % - (req, exc, req.link) - ) - abstract_dist = make_abstract_dist(req) - with self.req_tracker.track(req): - abstract_dist.prep_for_dist(finder, self.build_isolation) - if self._download_should_save: - # Make a .zip of the source_dir we already created. - if req.link.scheme in vcs.all_schemes: - req.archive(self.download_dir) - return abstract_dist - - def prepare_editable_requirement( - self, - req, # type: InstallRequirement - require_hashes, # type: bool - use_user_site, # type: bool - finder # type: PackageFinder - ): - # type: (...) -> DistAbstraction - """Prepare an editable requirement - """ - assert req.editable, "cannot prepare a non-editable req as editable" - - logger.info('Obtaining %s', req) - - with indent_log(): - if require_hashes: - raise InstallationError( - 'The editable requirement %s cannot be installed when ' - 'requiring hashes, because there is no single file to ' - 'hash.' % req - ) - req.ensure_has_source_dir(self.src_dir) - req.update_editable(not self._download_should_save) - - abstract_dist = make_abstract_dist(req) - with self.req_tracker.track(req): - abstract_dist.prep_for_dist(finder, self.build_isolation) - - if self._download_should_save: - req.archive(self.download_dir) - req.check_if_exists(use_user_site) - - return abstract_dist - - def prepare_installed_requirement(self, req, require_hashes, skip_reason): - # type: (InstallRequirement, bool, Optional[str]) -> DistAbstraction - """Prepare an already-installed requirement - """ - assert req.satisfied_by, "req should have been satisfied but isn't" - assert skip_reason is not None, ( - "did not get skip reason skipped but req.satisfied_by " - "is set to %r" % (req.satisfied_by,) - ) - logger.info( - 'Requirement %s: %s (%s)', - skip_reason, req, req.satisfied_by.version - ) - with indent_log(): - if require_hashes: - logger.debug( - 'Since it is already installed, we are trusting this ' - 'package without checking its hash. To ensure a ' - 'completely repeatable environment, install into an ' - 'empty virtualenv.' - ) - abstract_dist = Installed(req) - - return abstract_dist diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/pep425tags.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/pep425tags.py deleted file mode 100644 index 1e782d1..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/pep425tags.py +++ /dev/null @@ -1,381 +0,0 @@ -"""Generate and work with PEP 425 Compatibility Tags.""" -from __future__ import absolute_import - -import distutils.util -import logging -import platform -import re -import sys -import sysconfig -import warnings -from collections import OrderedDict - -import pip._internal.utils.glibc -from pip._internal.utils.compat import get_extension_suffixes -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Tuple, Callable, List, Optional, Union, Dict - ) - - Pep425Tag = Tuple[str, str, str] - -logger = logging.getLogger(__name__) - -_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') - - -def get_config_var(var): - # type: (str) -> Optional[str] - try: - return sysconfig.get_config_var(var) - except IOError as e: # Issue #1074 - warnings.warn("{}".format(e), RuntimeWarning) - return None - - -def get_abbr_impl(): - # type: () -> str - """Return abbreviated implementation name.""" - if hasattr(sys, 'pypy_version_info'): - pyimpl = 'pp' - elif sys.platform.startswith('java'): - pyimpl = 'jy' - elif sys.platform == 'cli': - pyimpl = 'ip' - else: - pyimpl = 'cp' - return pyimpl - - -def get_impl_ver(): - # type: () -> str - """Return implementation version.""" - impl_ver = get_config_var("py_version_nodot") - if not impl_ver or get_abbr_impl() == 'pp': - impl_ver = ''.join(map(str, get_impl_version_info())) - return impl_ver - - -def get_impl_version_info(): - # type: () -> Tuple[int, ...] - """Return sys.version_info-like tuple for use in decrementing the minor - version.""" - if get_abbr_impl() == 'pp': - # as per https://github.com/pypa/pip/issues/2882 - # attrs exist only on pypy - return (sys.version_info[0], - sys.pypy_version_info.major, # type: ignore - sys.pypy_version_info.minor) # type: ignore - else: - return sys.version_info[0], sys.version_info[1] - - -def get_impl_tag(): - # type: () -> str - """ - Returns the Tag for this specific implementation. - """ - return "{}{}".format(get_abbr_impl(), get_impl_ver()) - - -def get_flag(var, fallback, expected=True, warn=True): - # type: (str, Callable[..., bool], Union[bool, int], bool) -> bool - """Use a fallback method for determining SOABI flags if the needed config - var is unset or unavailable.""" - val = get_config_var(var) - if val is None: - if warn: - logger.debug("Config variable '%s' is unset, Python ABI tag may " - "be incorrect", var) - return fallback() - return val == expected - - -def get_abi_tag(): - # type: () -> Optional[str] - """Return the ABI tag based on SOABI (if available) or emulate SOABI - (CPython 2, PyPy).""" - soabi = get_config_var('SOABI') - impl = get_abbr_impl() - if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): - d = '' - m = '' - u = '' - if get_flag('Py_DEBUG', - lambda: hasattr(sys, 'gettotalrefcount'), - warn=(impl == 'cp')): - d = 'd' - if get_flag('WITH_PYMALLOC', - lambda: impl == 'cp', - warn=(impl == 'cp')): - m = 'm' - if get_flag('Py_UNICODE_SIZE', - lambda: sys.maxunicode == 0x10ffff, - expected=4, - warn=(impl == 'cp' and - sys.version_info < (3, 3))) \ - and sys.version_info < (3, 3): - u = 'u' - abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) - elif soabi and soabi.startswith('cpython-'): - abi = 'cp' + soabi.split('-')[1] - elif soabi: - abi = soabi.replace('.', '_').replace('-', '_') - else: - abi = None - return abi - - -def _is_running_32bit(): - # type: () -> bool - return sys.maxsize == 2147483647 - - -def get_platform(): - # type: () -> str - """Return our platform name 'win32', 'linux_x86_64'""" - if sys.platform == 'darwin': - # distutils.util.get_platform() returns the release based on the value - # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may - # be significantly older than the user's current machine. - release, _, machine = platform.mac_ver() - split_ver = release.split('.') - - if machine == "x86_64" and _is_running_32bit(): - machine = "i386" - elif machine == "ppc64" and _is_running_32bit(): - machine = "ppc" - - return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) - - # XXX remove distutils dependency - result = distutils.util.get_platform().replace('.', '_').replace('-', '_') - if result == "linux_x86_64" and _is_running_32bit(): - # 32 bit Python program (running on a 64 bit Linux): pip should only - # install and run 32 bit compiled extensions in that case. - result = "linux_i686" - - return result - - -def is_manylinux1_compatible(): - # type: () -> bool - # Only Linux, and only x86-64 / i686 - if get_platform() not in {"linux_x86_64", "linux_i686"}: - return False - - # Check for presence of _manylinux module - try: - import _manylinux - return bool(_manylinux.manylinux1_compatible) - except (ImportError, AttributeError): - # Fall through to heuristic check below - pass - - # Check glibc version. CentOS 5 uses glibc 2.5. - return pip._internal.utils.glibc.have_compatible_glibc(2, 5) - - -def is_manylinux2010_compatible(): - # type: () -> bool - # Only Linux, and only x86-64 / i686 - if get_platform() not in {"linux_x86_64", "linux_i686"}: - return False - - # Check for presence of _manylinux module - try: - import _manylinux - return bool(_manylinux.manylinux2010_compatible) - except (ImportError, AttributeError): - # Fall through to heuristic check below - pass - - # Check glibc version. CentOS 6 uses glibc 2.12. - return pip._internal.utils.glibc.have_compatible_glibc(2, 12) - - -def get_darwin_arches(major, minor, machine): - # type: (int, int, str) -> List[str] - """Return a list of supported arches (including group arches) for - the given major, minor and machine architecture of an macOS machine. - """ - arches = [] - - def _supports_arch(major, minor, arch): - # type: (int, int, str) -> bool - # Looking at the application support for macOS versions in the chart - # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears - # our timeline looks roughly like: - # - # 10.0 - Introduces ppc support. - # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 - # and x86_64 support is CLI only, and cannot be used for GUI - # applications. - # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. - # 10.6 - Drops support for ppc64 - # 10.7 - Drops support for ppc - # - # Given that we do not know if we're installing a CLI or a GUI - # application, we must be conservative and assume it might be a GUI - # application and behave as if ppc64 and x86_64 support did not occur - # until 10.5. - # - # Note: The above information is taken from the "Application support" - # column in the chart not the "Processor support" since I believe - # that we care about what instruction sets an application can use - # not which processors the OS supports. - if arch == 'ppc': - return (major, minor) <= (10, 5) - if arch == 'ppc64': - return (major, minor) == (10, 5) - if arch == 'i386': - return (major, minor) >= (10, 4) - if arch == 'x86_64': - return (major, minor) >= (10, 5) - if arch in groups: - for garch in groups[arch]: - if _supports_arch(major, minor, garch): - return True - return False - - groups = OrderedDict([ - ("fat", ("i386", "ppc")), - ("intel", ("x86_64", "i386")), - ("fat64", ("x86_64", "ppc64")), - ("fat32", ("x86_64", "i386", "ppc")), - ]) # type: Dict[str, Tuple[str, ...]] - - if _supports_arch(major, minor, machine): - arches.append(machine) - - for garch in groups: - if machine in groups[garch] and _supports_arch(major, minor, garch): - arches.append(garch) - - arches.append('universal') - - return arches - - -def get_all_minor_versions_as_strings(version_info): - # type: (Tuple[int, ...]) -> List[str] - versions = [] - major = version_info[:-1] - # Support all previous minor Python versions. - for minor in range(version_info[-1], -1, -1): - versions.append(''.join(map(str, major + (minor,)))) - return versions - - -def get_supported( - versions=None, # type: Optional[List[str]] - noarch=False, # type: bool - platform=None, # type: Optional[str] - impl=None, # type: Optional[str] - abi=None # type: Optional[str] -): - # type: (...) -> List[Pep425Tag] - """Return a list of supported tags for each version specified in - `versions`. - - :param versions: a list of string versions, of the form ["33", "32"], - or None. The first version will be assumed to support our ABI. - :param platform: specify the exact platform you want valid - tags for, or None. If None, use the local system platform. - :param impl: specify the exact implementation you want valid - tags for, or None. If None, use the local interpreter impl. - :param abi: specify the exact abi you want valid - tags for, or None. If None, use the local interpreter abi. - """ - supported = [] - - # Versions must be given with respect to the preference - if versions is None: - version_info = get_impl_version_info() - versions = get_all_minor_versions_as_strings(version_info) - - impl = impl or get_abbr_impl() - - abis = [] # type: List[str] - - abi = abi or get_abi_tag() - if abi: - abis[0:0] = [abi] - - abi3s = set() - for suffix in get_extension_suffixes(): - if suffix.startswith('.abi'): - abi3s.add(suffix.split('.', 2)[1]) - - abis.extend(sorted(list(abi3s))) - - abis.append('none') - - if not noarch: - arch = platform or get_platform() - arch_prefix, arch_sep, arch_suffix = arch.partition('_') - if arch.startswith('macosx'): - # support macosx-10.6-intel on macosx-10.9-x86_64 - match = _osx_arch_pat.match(arch) - if match: - name, major, minor, actual_arch = match.groups() - tpl = '{}_{}_%i_%s'.format(name, major) - arches = [] - for m in reversed(range(int(minor) + 1)): - for a in get_darwin_arches(int(major), m, actual_arch): - arches.append(tpl % (m, a)) - else: - # arch pattern didn't match (?!) - arches = [arch] - elif arch_prefix == 'manylinux2010': - # manylinux1 wheels run on most manylinux2010 systems with the - # exception of wheels depending on ncurses. PEP 571 states - # manylinux1 wheels should be considered manylinux2010 wheels: - # https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels - arches = [arch, 'manylinux1' + arch_sep + arch_suffix] - elif platform is None: - arches = [] - if is_manylinux2010_compatible(): - arches.append('manylinux2010' + arch_sep + arch_suffix) - if is_manylinux1_compatible(): - arches.append('manylinux1' + arch_sep + arch_suffix) - arches.append(arch) - else: - arches = [arch] - - # Current version, current API (built specifically for our Python): - for abi in abis: - for arch in arches: - supported.append(('%s%s' % (impl, versions[0]), abi, arch)) - - # abi3 modules compatible with older version of Python - for version in versions[1:]: - # abi3 was introduced in Python 3.2 - if version in {'31', '30'}: - break - for abi in abi3s: # empty set if not Python 3 - for arch in arches: - supported.append(("%s%s" % (impl, version), abi, arch)) - - # Has binaries, does not use the Python API: - for arch in arches: - supported.append(('py%s' % (versions[0][0]), 'none', arch)) - - # No abi / arch, but requires our implementation: - supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) - # Tagged specifically as being cross-version compatible - # (with just the major version specified) - supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) - - # No abi / arch, generic Python - for i, version in enumerate(versions): - supported.append(('py%s' % (version,), 'none', 'any')) - if i == 0: - supported.append(('py%s' % (version[0]), 'none', 'any')) - - return supported - - -implementation_tag = get_impl_tag() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/pyproject.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/pyproject.py deleted file mode 100644 index 8d739a6..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/pyproject.py +++ /dev/null @@ -1,171 +0,0 @@ -from __future__ import absolute_import - -import io -import os -import sys - -from pip._vendor import pytoml, six - -from pip._internal.exceptions import InstallationError -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Any, Tuple, Optional, List # noqa: F401 - - -def _is_list_of_str(obj): - # type: (Any) -> bool - return ( - isinstance(obj, list) and - all(isinstance(item, six.string_types) for item in obj) - ) - - -def make_pyproject_path(setup_py_dir): - # type: (str) -> str - path = os.path.join(setup_py_dir, 'pyproject.toml') - - # Python2 __file__ should not be unicode - if six.PY2 and isinstance(path, six.text_type): - path = path.encode(sys.getfilesystemencoding()) - - return path - - -def load_pyproject_toml( - use_pep517, # type: Optional[bool] - pyproject_toml, # type: str - setup_py, # type: str - req_name # type: str -): - # type: (...) -> Optional[Tuple[List[str], str, List[str]]] - """Load the pyproject.toml file. - - Parameters: - use_pep517 - Has the user requested PEP 517 processing? None - means the user hasn't explicitly specified. - pyproject_toml - Location of the project's pyproject.toml file - setup_py - Location of the project's setup.py file - req_name - The name of the requirement we're processing (for - error reporting) - - Returns: - None if we should use the legacy code path, otherwise a tuple - ( - requirements from pyproject.toml, - name of PEP 517 backend, - requirements we should check are installed after setting - up the build environment - ) - """ - has_pyproject = os.path.isfile(pyproject_toml) - has_setup = os.path.isfile(setup_py) - - if has_pyproject: - with io.open(pyproject_toml, encoding="utf-8") as f: - pp_toml = pytoml.load(f) - build_system = pp_toml.get("build-system") - else: - build_system = None - - # The following cases must use PEP 517 - # We check for use_pep517 being non-None and falsey because that means - # the user explicitly requested --no-use-pep517. The value 0 as - # opposed to False can occur when the value is provided via an - # environment variable or config file option (due to the quirk of - # strtobool() returning an integer in pip's configuration code). - if has_pyproject and not has_setup: - if use_pep517 is not None and not use_pep517: - raise InstallationError( - "Disabling PEP 517 processing is invalid: " - "project does not have a setup.py" - ) - use_pep517 = True - elif build_system and "build-backend" in build_system: - if use_pep517 is not None and not use_pep517: - raise InstallationError( - "Disabling PEP 517 processing is invalid: " - "project specifies a build backend of {} " - "in pyproject.toml".format( - build_system["build-backend"] - ) - ) - use_pep517 = True - - # If we haven't worked out whether to use PEP 517 yet, - # and the user hasn't explicitly stated a preference, - # we do so if the project has a pyproject.toml file. - elif use_pep517 is None: - use_pep517 = has_pyproject - - # At this point, we know whether we're going to use PEP 517. - assert use_pep517 is not None - - # If we're using the legacy code path, there is nothing further - # for us to do here. - if not use_pep517: - return None - - if build_system is None: - # Either the user has a pyproject.toml with no build-system - # section, or the user has no pyproject.toml, but has opted in - # explicitly via --use-pep517. - # In the absence of any explicit backend specification, we - # assume the setuptools backend that most closely emulates the - # traditional direct setup.py execution, and require wheel and - # a version of setuptools that supports that backend. - - build_system = { - "requires": ["setuptools>=40.8.0", "wheel"], - "build-backend": "setuptools.build_meta:__legacy__", - } - - # If we're using PEP 517, we have build system information (either - # from pyproject.toml, or defaulted by the code above). - # Note that at this point, we do not know if the user has actually - # specified a backend, though. - assert build_system is not None - - # Ensure that the build-system section in pyproject.toml conforms - # to PEP 518. - error_template = ( - "{package} has a pyproject.toml file that does not comply " - "with PEP 518: {reason}" - ) - - # Specifying the build-system table but not the requires key is invalid - if "requires" not in build_system: - raise InstallationError( - error_template.format(package=req_name, reason=( - "it has a 'build-system' table but not " - "'build-system.requires' which is mandatory in the table" - )) - ) - - # Error out if requires is not a list of strings - requires = build_system["requires"] - if not _is_list_of_str(requires): - raise InstallationError(error_template.format( - package=req_name, - reason="'build-system.requires' is not a list of strings.", - )) - - backend = build_system.get("build-backend") - check = [] # type: List[str] - if backend is None: - # If the user didn't specify a backend, we assume they want to use - # the setuptools backend. But we can't be sure they have included - # a version of setuptools which supplies the backend, or wheel - # (which is needed by the backend) in their requirements. So we - # make a note to check that those requirements are present once - # we have set up the environment. - # This is quite a lot of work to check for a very specific case. But - # the problem is, that case is potentially quite common - projects that - # adopted PEP 518 early for the ability to specify requirements to - # execute setup.py, but never considered needing to mention the build - # tools themselves. The original PEP 518 code had a similar check (but - # implemented in a different way). - backend = "setuptools.build_meta:__legacy__" - check = ["setuptools>=40.8.0", "wheel"] - - return (requires, backend, check) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/__init__.py deleted file mode 100644 index 5e4eb92..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import absolute_import - -import logging - -from .req_install import InstallRequirement -from .req_set import RequirementSet -from .req_file import parse_requirements -from pip._internal.utils.logging import indent_log -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import List, Sequence # noqa: F401 - -__all__ = [ - "RequirementSet", "InstallRequirement", - "parse_requirements", "install_given_reqs", -] - -logger = logging.getLogger(__name__) - - -def install_given_reqs( - to_install, # type: List[InstallRequirement] - install_options, # type: List[str] - global_options=(), # type: Sequence[str] - *args, **kwargs -): - # type: (...) -> List[InstallRequirement] - """ - Install everything in the given list. - - (to be called after having downloaded and unpacked the packages) - """ - - if to_install: - logger.info( - 'Installing collected packages: %s', - ', '.join([req.name for req in to_install]), - ) - - with indent_log(): - for requirement in to_install: - if requirement.conflicts_with: - logger.info( - 'Found existing installation: %s', - requirement.conflicts_with, - ) - with indent_log(): - uninstalled_pathset = requirement.uninstall( - auto_confirm=True - ) - try: - requirement.install( - install_options, - global_options, - *args, - **kwargs - ) - except Exception: - should_rollback = ( - requirement.conflicts_with and - not requirement.install_succeeded - ) - # if install did not succeed, rollback previous uninstall - if should_rollback: - uninstalled_pathset.rollback() - raise - else: - should_commit = ( - requirement.conflicts_with and - requirement.install_succeeded - ) - if should_commit: - uninstalled_pathset.commit() - requirement.remove_temporary_source() - - return to_install diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/constructors.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/constructors.py deleted file mode 100644 index 1eed1dd..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/constructors.py +++ /dev/null @@ -1,339 +0,0 @@ -"""Backing implementation for InstallRequirement's various constructors - -The idea here is that these formed a major chunk of InstallRequirement's size -so, moving them and support code dedicated to them outside of that class -helps creates for better understandability for the rest of the code. - -These are meant to be used elsewhere within pip to create instances of -InstallRequirement. -""" - -import logging -import os -import re - -from pip._vendor.packaging.markers import Marker -from pip._vendor.packaging.requirements import InvalidRequirement, Requirement -from pip._vendor.packaging.specifiers import Specifier -from pip._vendor.pkg_resources import RequirementParseError, parse_requirements - -from pip._internal.download import ( - is_archive_file, is_url, path_to_url, url_to_path, -) -from pip._internal.exceptions import InstallationError -from pip._internal.models.index import PyPI, TestPyPI -from pip._internal.models.link import Link -from pip._internal.pyproject import make_pyproject_path -from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.misc import is_installable_dir -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.vcs import vcs -from pip._internal.wheel import Wheel - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Tuple, Set, Any, Union, Text, Dict, - ) - from pip._internal.cache import WheelCache # noqa: F401 - - -__all__ = [ - "install_req_from_editable", "install_req_from_line", - "parse_editable" -] - -logger = logging.getLogger(__name__) -operators = Specifier._operators.keys() - - -def _strip_extras(path): - # type: (str) -> Tuple[str, Optional[str]] - m = re.match(r'^(.+)(\[[^\]]+\])$', path) - extras = None - if m: - path_no_extras = m.group(1) - extras = m.group(2) - else: - path_no_extras = path - - return path_no_extras, extras - - -def parse_editable(editable_req): - # type: (str) -> Tuple[Optional[str], str, Optional[Set[str]]] - """Parses an editable requirement into: - - a requirement name - - an URL - - extras - - editable options - Accepted requirements: - svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir - .[some_extra] - """ - - url = editable_req - - # If a file path is specified with extras, strip off the extras. - url_no_extras, extras = _strip_extras(url) - - if os.path.isdir(url_no_extras): - if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): - msg = ( - 'File "setup.py" not found. Directory cannot be installed ' - 'in editable mode: {}'.format(os.path.abspath(url_no_extras)) - ) - pyproject_path = make_pyproject_path(url_no_extras) - if os.path.isfile(pyproject_path): - msg += ( - '\n(A "pyproject.toml" file was found, but editable ' - 'mode currently requires a setup.py based build.)' - ) - raise InstallationError(msg) - - # Treating it as code that has already been checked out - url_no_extras = path_to_url(url_no_extras) - - if url_no_extras.lower().startswith('file:'): - package_name = Link(url_no_extras).egg_fragment - if extras: - return ( - package_name, - url_no_extras, - Requirement("placeholder" + extras.lower()).extras, - ) - else: - return package_name, url_no_extras, None - - for version_control in vcs: - if url.lower().startswith('%s:' % version_control): - url = '%s+%s' % (version_control, url) - break - - if '+' not in url: - raise InstallationError( - '%s should either be a path to a local project or a VCS url ' - 'beginning with svn+, git+, hg+, or bzr+' % - editable_req - ) - - vc_type = url.split('+', 1)[0].lower() - - if not vcs.get_backend(vc_type): - error_message = 'For --editable=%s only ' % editable_req + \ - ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ - ' is currently supported' - raise InstallationError(error_message) - - package_name = Link(url).egg_fragment - if not package_name: - raise InstallationError( - "Could not detect requirement name for '%s', please specify one " - "with #egg=your_package_name" % editable_req - ) - return package_name, url, None - - -def deduce_helpful_msg(req): - # type: (str) -> str - """Returns helpful msg in case requirements file does not exist, - or cannot be parsed. - - :params req: Requirements file path - """ - msg = "" - if os.path.exists(req): - msg = " It does exist." - # Try to parse and check if it is a requirements file. - try: - with open(req, 'r') as fp: - # parse first line only - next(parse_requirements(fp.read())) - msg += " The argument you provided " + \ - "(%s) appears to be a" % (req) + \ - " requirements file. If that is the" + \ - " case, use the '-r' flag to install" + \ - " the packages specified within it." - except RequirementParseError: - logger.debug("Cannot parse '%s' as requirements \ - file" % (req), exc_info=True) - else: - msg += " File '%s' does not exist." % (req) - return msg - - -# ---- The actual constructors follow ---- - - -def install_req_from_editable( - editable_req, # type: str - comes_from=None, # type: Optional[str] - use_pep517=None, # type: Optional[bool] - isolated=False, # type: bool - options=None, # type: Optional[Dict[str, Any]] - wheel_cache=None, # type: Optional[WheelCache] - constraint=False # type: bool -): - # type: (...) -> InstallRequirement - name, url, extras_override = parse_editable(editable_req) - if url.startswith('file:'): - source_dir = url_to_path(url) - else: - source_dir = None - - if name is not None: - try: - req = Requirement(name) - except InvalidRequirement: - raise InstallationError("Invalid requirement: '%s'" % name) - else: - req = None - return InstallRequirement( - req, comes_from, source_dir=source_dir, - editable=True, - link=Link(url), - constraint=constraint, - use_pep517=use_pep517, - isolated=isolated, - options=options if options else {}, - wheel_cache=wheel_cache, - extras=extras_override or (), - ) - - -def install_req_from_line( - name, # type: str - comes_from=None, # type: Optional[Union[str, InstallRequirement]] - use_pep517=None, # type: Optional[bool] - isolated=False, # type: bool - options=None, # type: Optional[Dict[str, Any]] - wheel_cache=None, # type: Optional[WheelCache] - constraint=False # type: bool -): - # type: (...) -> InstallRequirement - """Creates an InstallRequirement from a name, which might be a - requirement, directory containing 'setup.py', filename, or URL. - """ - if is_url(name): - marker_sep = '; ' - else: - marker_sep = ';' - if marker_sep in name: - name, markers_as_string = name.split(marker_sep, 1) - markers_as_string = markers_as_string.strip() - if not markers_as_string: - markers = None - else: - markers = Marker(markers_as_string) - else: - markers = None - name = name.strip() - req_as_string = None - path = os.path.normpath(os.path.abspath(name)) - link = None - extras_as_string = None - - if is_url(name): - link = Link(name) - else: - p, extras_as_string = _strip_extras(path) - looks_like_dir = os.path.isdir(p) and ( - os.path.sep in name or - (os.path.altsep is not None and os.path.altsep in name) or - name.startswith('.') - ) - if looks_like_dir: - if not is_installable_dir(p): - raise InstallationError( - "Directory %r is not installable. Neither 'setup.py' " - "nor 'pyproject.toml' found." % name - ) - link = Link(path_to_url(p)) - elif is_archive_file(p): - if not os.path.isfile(p): - logger.warning( - 'Requirement %r looks like a filename, but the ' - 'file does not exist', - name - ) - link = Link(path_to_url(p)) - - # it's a local file, dir, or url - if link: - # Handle relative file URLs - if link.scheme == 'file' and re.search(r'\.\./', link.url): - link = Link( - path_to_url(os.path.normpath(os.path.abspath(link.path)))) - # wheel file - if link.is_wheel: - wheel = Wheel(link.filename) # can raise InvalidWheelFilename - req_as_string = "%s==%s" % (wheel.name, wheel.version) - else: - # set the req to the egg fragment. when it's not there, this - # will become an 'unnamed' requirement - req_as_string = link.egg_fragment - - # a requirement specifier - else: - req_as_string = name - - if extras_as_string: - extras = Requirement("placeholder" + extras_as_string.lower()).extras - else: - extras = () - if req_as_string is not None: - try: - req = Requirement(req_as_string) - except InvalidRequirement: - if os.path.sep in req_as_string: - add_msg = "It looks like a path." - add_msg += deduce_helpful_msg(req_as_string) - elif ('=' in req_as_string and - not any(op in req_as_string for op in operators)): - add_msg = "= is not a valid operator. Did you mean == ?" - else: - add_msg = "" - raise InstallationError( - "Invalid requirement: '%s'\n%s" % (req_as_string, add_msg) - ) - else: - req = None - - return InstallRequirement( - req, comes_from, link=link, markers=markers, - use_pep517=use_pep517, isolated=isolated, - options=options if options else {}, - wheel_cache=wheel_cache, - constraint=constraint, - extras=extras, - ) - - -def install_req_from_req_string( - req_string, # type: str - comes_from=None, # type: Optional[InstallRequirement] - isolated=False, # type: bool - wheel_cache=None, # type: Optional[WheelCache] - use_pep517=None # type: Optional[bool] -): - # type: (...) -> InstallRequirement - try: - req = Requirement(req_string) - except InvalidRequirement: - raise InstallationError("Invalid requirement: '%s'" % req) - - domains_not_allowed = [ - PyPI.file_storage_domain, - TestPyPI.file_storage_domain, - ] - if req.url and comes_from.link.netloc in domains_not_allowed: - # Explicitly disallow pypi packages that depend on external urls - raise InstallationError( - "Packages installed from PyPI cannot depend on packages " - "which are not also hosted on PyPI.\n" - "%s depends on %s " % (comes_from.name, req) - ) - - return InstallRequirement( - req, comes_from, isolated=isolated, wheel_cache=wheel_cache, - use_pep517=use_pep517 - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_file.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_file.py deleted file mode 100644 index 726f2f6..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_file.py +++ /dev/null @@ -1,382 +0,0 @@ -""" -Requirements file parsing -""" - -from __future__ import absolute_import - -import optparse -import os -import re -import shlex -import sys - -from pip._vendor.six.moves import filterfalse -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip._internal.cli import cmdoptions -from pip._internal.download import get_file_content -from pip._internal.exceptions import RequirementsFileParseError -from pip._internal.req.constructors import ( - install_req_from_editable, install_req_from_line, -) -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Iterator, Tuple, Optional, List, Callable, Text - ) - from pip._internal.req import InstallRequirement # noqa: F401 - from pip._internal.cache import WheelCache # noqa: F401 - from pip._internal.index import PackageFinder # noqa: F401 - from pip._internal.download import PipSession # noqa: F401 - - ReqFileLines = Iterator[Tuple[int, Text]] - -__all__ = ['parse_requirements'] - -SCHEME_RE = re.compile(r'^(http|https|file):', re.I) -COMMENT_RE = re.compile(r'(^|\s)+#.*$') - -# Matches environment variable-style values in '${MY_VARIABLE_1}' with the -# variable name consisting of only uppercase letters, digits or the '_' -# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1, -# 2013 Edition. -ENV_VAR_RE = re.compile(r'(?P\$\{(?P[A-Z0-9_]+)\})') - -SUPPORTED_OPTIONS = [ - cmdoptions.constraints, - cmdoptions.editable, - cmdoptions.requirements, - cmdoptions.no_index, - cmdoptions.index_url, - cmdoptions.find_links, - cmdoptions.extra_index_url, - cmdoptions.always_unzip, - cmdoptions.no_binary, - cmdoptions.only_binary, - cmdoptions.pre, - cmdoptions.trusted_host, - cmdoptions.require_hashes, -] # type: List[Callable[..., optparse.Option]] - -# options to be passed to requirements -SUPPORTED_OPTIONS_REQ = [ - cmdoptions.install_options, - cmdoptions.global_options, - cmdoptions.hash, -] # type: List[Callable[..., optparse.Option]] - -# the 'dest' string values -SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ] - - -def parse_requirements( - filename, # type: str - finder=None, # type: Optional[PackageFinder] - comes_from=None, # type: Optional[str] - options=None, # type: Optional[optparse.Values] - session=None, # type: Optional[PipSession] - constraint=False, # type: bool - wheel_cache=None, # type: Optional[WheelCache] - use_pep517=None # type: Optional[bool] -): - # type: (...) -> Iterator[InstallRequirement] - """Parse a requirements file and yield InstallRequirement instances. - - :param filename: Path or url of requirements file. - :param finder: Instance of pip.index.PackageFinder. - :param comes_from: Origin description of requirements. - :param options: cli options. - :param session: Instance of pip.download.PipSession. - :param constraint: If true, parsing a constraint file rather than - requirements file. - :param wheel_cache: Instance of pip.wheel.WheelCache - :param use_pep517: Value of the --use-pep517 option. - """ - if session is None: - raise TypeError( - "parse_requirements() missing 1 required keyword argument: " - "'session'" - ) - - _, content = get_file_content( - filename, comes_from=comes_from, session=session - ) - - lines_enum = preprocess(content, options) - - for line_number, line in lines_enum: - req_iter = process_line(line, filename, line_number, finder, - comes_from, options, session, wheel_cache, - use_pep517=use_pep517, constraint=constraint) - for req in req_iter: - yield req - - -def preprocess(content, options): - # type: (Text, Optional[optparse.Values]) -> ReqFileLines - """Split, filter, and join lines, and return a line iterator - - :param content: the content of the requirements file - :param options: cli options - """ - lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines - lines_enum = join_lines(lines_enum) - lines_enum = ignore_comments(lines_enum) - lines_enum = skip_regex(lines_enum, options) - lines_enum = expand_env_variables(lines_enum) - return lines_enum - - -def process_line( - line, # type: Text - filename, # type: str - line_number, # type: int - finder=None, # type: Optional[PackageFinder] - comes_from=None, # type: Optional[str] - options=None, # type: Optional[optparse.Values] - session=None, # type: Optional[PipSession] - wheel_cache=None, # type: Optional[WheelCache] - use_pep517=None, # type: Optional[bool] - constraint=False # type: bool -): - # type: (...) -> Iterator[InstallRequirement] - """Process a single requirements line; This can result in creating/yielding - requirements, or updating the finder. - - For lines that contain requirements, the only options that have an effect - are from SUPPORTED_OPTIONS_REQ, and they are scoped to the - requirement. Other options from SUPPORTED_OPTIONS may be present, but are - ignored. - - For lines that do not contain requirements, the only options that have an - effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may - be present, but are ignored. These lines may contain multiple options - (although our docs imply only one is supported), and all our parsed and - affect the finder. - - :param constraint: If True, parsing a constraints file. - :param options: OptionParser options that we may update - """ - parser = build_parser(line) - defaults = parser.get_default_values() - defaults.index_url = None - if finder: - defaults.format_control = finder.format_control - args_str, options_str = break_args_options(line) - # Prior to 2.7.3, shlex cannot deal with unicode entries - if sys.version_info < (2, 7, 3): - # https://github.com/python/mypy/issues/1174 - options_str = options_str.encode('utf8') # type: ignore - # https://github.com/python/mypy/issues/1174 - opts, _ = parser.parse_args( - shlex.split(options_str), defaults) # type: ignore - - # preserve for the nested code path - line_comes_from = '%s %s (line %s)' % ( - '-c' if constraint else '-r', filename, line_number, - ) - - # yield a line requirement - if args_str: - isolated = options.isolated_mode if options else False - if options: - cmdoptions.check_install_build_global(options, opts) - # get the options that apply to requirements - req_options = {} - for dest in SUPPORTED_OPTIONS_REQ_DEST: - if dest in opts.__dict__ and opts.__dict__[dest]: - req_options[dest] = opts.__dict__[dest] - yield install_req_from_line( - args_str, line_comes_from, constraint=constraint, - use_pep517=use_pep517, - isolated=isolated, options=req_options, wheel_cache=wheel_cache - ) - - # yield an editable requirement - elif opts.editables: - isolated = options.isolated_mode if options else False - yield install_req_from_editable( - opts.editables[0], comes_from=line_comes_from, - use_pep517=use_pep517, - constraint=constraint, isolated=isolated, wheel_cache=wheel_cache - ) - - # parse a nested requirements file - elif opts.requirements or opts.constraints: - if opts.requirements: - req_path = opts.requirements[0] - nested_constraint = False - else: - req_path = opts.constraints[0] - nested_constraint = True - # original file is over http - if SCHEME_RE.search(filename): - # do a url join so relative paths work - req_path = urllib_parse.urljoin(filename, req_path) - # original file and nested file are paths - elif not SCHEME_RE.search(req_path): - # do a join so relative paths work - req_path = os.path.join(os.path.dirname(filename), req_path) - # TODO: Why not use `comes_from='-r {} (line {})'` here as well? - parsed_reqs = parse_requirements( - req_path, finder, comes_from, options, session, - constraint=nested_constraint, wheel_cache=wheel_cache - ) - for req in parsed_reqs: - yield req - - # percolate hash-checking option upward - elif opts.require_hashes: - options.require_hashes = opts.require_hashes - - # set finder options - elif finder: - if opts.index_url: - finder.index_urls = [opts.index_url] - if opts.no_index is True: - finder.index_urls = [] - if opts.extra_index_urls: - finder.index_urls.extend(opts.extra_index_urls) - if opts.find_links: - # FIXME: it would be nice to keep track of the source - # of the find_links: support a find-links local path - # relative to a requirements file. - value = opts.find_links[0] - req_dir = os.path.dirname(os.path.abspath(filename)) - relative_to_reqs_file = os.path.join(req_dir, value) - if os.path.exists(relative_to_reqs_file): - value = relative_to_reqs_file - finder.find_links.append(value) - if opts.pre: - finder.allow_all_prereleases = True - if opts.trusted_hosts: - finder.secure_origins.extend( - ("*", host, "*") for host in opts.trusted_hosts) - - -def break_args_options(line): - # type: (Text) -> Tuple[str, Text] - """Break up the line into an args and options string. We only want to shlex - (and then optparse) the options, not the args. args can contain markers - which are corrupted by shlex. - """ - tokens = line.split(' ') - args = [] - options = tokens[:] - for token in tokens: - if token.startswith('-') or token.startswith('--'): - break - else: - args.append(token) - options.pop(0) - return ' '.join(args), ' '.join(options) # type: ignore - - -def build_parser(line): - # type: (Text) -> optparse.OptionParser - """ - Return a parser for parsing requirement lines - """ - parser = optparse.OptionParser(add_help_option=False) - - option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ - for option_factory in option_factories: - option = option_factory() - parser.add_option(option) - - # By default optparse sys.exits on parsing errors. We want to wrap - # that in our own exception. - def parser_exit(self, msg): - # add offending line - msg = 'Invalid requirement: %s\n%s' % (line, msg) - raise RequirementsFileParseError(msg) - # NOTE: mypy disallows assigning to a method - # https://github.com/python/mypy/issues/2427 - parser.exit = parser_exit # type: ignore - - return parser - - -def join_lines(lines_enum): - # type: (ReqFileLines) -> ReqFileLines - """Joins a line ending in '\' with the previous line (except when following - comments). The joined line takes on the index of the first line. - """ - primary_line_number = None - new_line = [] # type: List[Text] - for line_number, line in lines_enum: - if not line.endswith('\\') or COMMENT_RE.match(line): - if COMMENT_RE.match(line): - # this ensures comments are always matched later - line = ' ' + line - if new_line: - new_line.append(line) - yield primary_line_number, ''.join(new_line) - new_line = [] - else: - yield line_number, line - else: - if not new_line: - primary_line_number = line_number - new_line.append(line.strip('\\')) - - # last line contains \ - if new_line: - yield primary_line_number, ''.join(new_line) - - # TODO: handle space after '\'. - - -def ignore_comments(lines_enum): - # type: (ReqFileLines) -> ReqFileLines - """ - Strips comments and filter empty lines. - """ - for line_number, line in lines_enum: - line = COMMENT_RE.sub('', line) - line = line.strip() - if line: - yield line_number, line - - -def skip_regex(lines_enum, options): - # type: (ReqFileLines, Optional[optparse.Values]) -> ReqFileLines - """ - Skip lines that match '--skip-requirements-regex' pattern - - Note: the regex pattern is only built once - """ - skip_regex = options.skip_requirements_regex if options else None - if skip_regex: - pattern = re.compile(skip_regex) - lines_enum = filterfalse(lambda e: pattern.search(e[1]), lines_enum) - return lines_enum - - -def expand_env_variables(lines_enum): - # type: (ReqFileLines) -> ReqFileLines - """Replace all environment variables that can be retrieved via `os.getenv`. - - The only allowed format for environment variables defined in the - requirement file is `${MY_VARIABLE_1}` to ensure two things: - - 1. Strings that contain a `$` aren't accidentally (partially) expanded. - 2. Ensure consistency across platforms for requirement files. - - These points are the result of a discusssion on the `github pull - request #3514 `_. - - Valid characters in variable names follow the `POSIX standard - `_ and are limited - to uppercase letter, digits and the `_` (underscore). - """ - for line_number, line in lines_enum: - for env_var, var_name in ENV_VAR_RE.findall(line): - value = os.getenv(var_name) - if not value: - continue - - line = line.replace(env_var, value) - - yield line_number, line diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_install.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_install.py deleted file mode 100644 index a4834b0..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_install.py +++ /dev/null @@ -1,1021 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import shutil -import sys -import sysconfig -import zipfile -from distutils.util import change_root - -from pip._vendor import pkg_resources, six -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import Version -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.pep517.wrappers import Pep517HookCaller - -from pip._internal import wheel -from pip._internal.build_env import NoOpBuildEnvironment -from pip._internal.exceptions import InstallationError -from pip._internal.locations import ( - PIP_DELETE_MARKER_FILENAME, running_under_virtualenv, -) -from pip._internal.models.link import Link -from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path -from pip._internal.req.req_uninstall import UninstallPathSet -from pip._internal.utils.compat import native_str -from pip._internal.utils.hashes import Hashes -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ( - _make_build_dir, ask_path_exists, backup_dir, call_subprocess, - display_path, dist_in_site_packages, dist_in_usersite, ensure_dir, - get_installed_version, redact_password_from_url, rmtree, -) -from pip._internal.utils.packaging import get_metadata -from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.utils.ui import open_spinner -from pip._internal.vcs import vcs -from pip._internal.wheel import move_wheel_files - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Iterable, List, Union, Any, Text, Sequence, Dict - ) - from pip._internal.build_env import BuildEnvironment # noqa: F401 - from pip._internal.cache import WheelCache # noqa: F401 - from pip._internal.index import PackageFinder # noqa: F401 - from pip._vendor.pkg_resources import Distribution # noqa: F401 - from pip._vendor.packaging.specifiers import SpecifierSet # noqa: F401 - from pip._vendor.packaging.markers import Marker # noqa: F401 - - -logger = logging.getLogger(__name__) - - -class InstallRequirement(object): - """ - Represents something that may be installed later on, may have information - about where to fetch the relavant requirement and also contains logic for - installing the said requirement. - """ - - def __init__( - self, - req, # type: Optional[Requirement] - comes_from, # type: Optional[Union[str, InstallRequirement]] - source_dir=None, # type: Optional[str] - editable=False, # type: bool - link=None, # type: Optional[Link] - update=True, # type: bool - markers=None, # type: Optional[Marker] - use_pep517=None, # type: Optional[bool] - isolated=False, # type: bool - options=None, # type: Optional[Dict[str, Any]] - wheel_cache=None, # type: Optional[WheelCache] - constraint=False, # type: bool - extras=() # type: Iterable[str] - ): - # type: (...) -> None - assert req is None or isinstance(req, Requirement), req - self.req = req - self.comes_from = comes_from - self.constraint = constraint - if source_dir is not None: - self.source_dir = os.path.normpath(os.path.abspath(source_dir)) - else: - self.source_dir = None - self.editable = editable - - self._wheel_cache = wheel_cache - if link is None and req and req.url: - # PEP 508 URL requirement - link = Link(req.url) - self.link = self.original_link = link - - if extras: - self.extras = extras - elif req: - self.extras = { - pkg_resources.safe_extra(extra) for extra in req.extras - } - else: - self.extras = set() - if markers is None and req: - markers = req.marker - self.markers = markers - - self._egg_info_path = None # type: Optional[str] - # This holds the pkg_resources.Distribution object if this requirement - # is already available: - self.satisfied_by = None - # This hold the pkg_resources.Distribution object if this requirement - # conflicts with another installed distribution: - self.conflicts_with = None - # Temporary build location - self._temp_build_dir = TempDirectory(kind="req-build") - # Used to store the global directory where the _temp_build_dir should - # have been created. Cf _correct_build_location method. - self._ideal_build_dir = None # type: Optional[str] - # True if the editable should be updated: - self.update = update - # Set to True after successful installation - self.install_succeeded = None # type: Optional[bool] - # UninstallPathSet of uninstalled distribution (for possible rollback) - self.uninstalled_pathset = None - self.options = options if options else {} - # Set to True after successful preparation of this requirement - self.prepared = False - self.is_direct = False - - self.isolated = isolated - self.build_env = NoOpBuildEnvironment() # type: BuildEnvironment - - # For PEP 517, the directory where we request the project metadata - # gets stored. We need this to pass to build_wheel, so the backend - # can ensure that the wheel matches the metadata (see the PEP for - # details). - self.metadata_directory = None # type: Optional[str] - - # The static build requirements (from pyproject.toml) - self.pyproject_requires = None # type: Optional[List[str]] - - # Build requirements that we will check are available - self.requirements_to_check = [] # type: List[str] - - # The PEP 517 backend we should use to build the project - self.pep517_backend = None # type: Optional[Pep517HookCaller] - - # Are we using PEP 517 for this requirement? - # After pyproject.toml has been loaded, the only valid values are True - # and False. Before loading, None is valid (meaning "use the default"). - # Setting an explicit value before loading pyproject.toml is supported, - # but after loading this flag should be treated as read only. - self.use_pep517 = use_pep517 - - def __str__(self): - if self.req: - s = str(self.req) - if self.link: - s += ' from %s' % redact_password_from_url(self.link.url) - elif self.link: - s = redact_password_from_url(self.link.url) - else: - s = '' - if self.satisfied_by is not None: - s += ' in %s' % display_path(self.satisfied_by.location) - if self.comes_from: - if isinstance(self.comes_from, six.string_types): - comes_from = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += ' (from %s)' % comes_from - return s - - def __repr__(self): - return '<%s object: %s editable=%r>' % ( - self.__class__.__name__, str(self), self.editable) - - def populate_link(self, finder, upgrade, require_hashes): - # type: (PackageFinder, bool, bool) -> None - """Ensure that if a link can be found for this, that it is found. - - Note that self.link may still be None - if Upgrade is False and the - requirement is already installed. - - If require_hashes is True, don't use the wheel cache, because cached - wheels, always built locally, have different hashes than the files - downloaded from the index server and thus throw false hash mismatches. - Furthermore, cached wheels at present have undeterministic contents due - to file modification times. - """ - if self.link is None: - self.link = finder.find_requirement(self, upgrade) - if self._wheel_cache is not None and not require_hashes: - old_link = self.link - self.link = self._wheel_cache.get(self.link, self.name) - if old_link != self.link: - logger.debug('Using cached wheel link: %s', self.link) - - # Things that are valid for all kinds of requirements? - @property - def name(self): - # type: () -> Optional[str] - if self.req is None: - return None - return native_str(pkg_resources.safe_name(self.req.name)) - - @property - def specifier(self): - # type: () -> SpecifierSet - return self.req.specifier - - @property - def is_pinned(self): - # type: () -> bool - """Return whether I am pinned to an exact version. - - For example, some-package==1.2 is pinned; some-package>1.2 is not. - """ - specifiers = self.specifier - return (len(specifiers) == 1 and - next(iter(specifiers)).operator in {'==', '==='}) - - @property - def installed_version(self): - return get_installed_version(self.name) - - def match_markers(self, extras_requested=None): - # type: (Optional[Iterable[str]]) -> bool - if not extras_requested: - # Provide an extra to safely evaluate the markers - # without matching any extra - extras_requested = ('',) - if self.markers is not None: - return any( - self.markers.evaluate({'extra': extra}) - for extra in extras_requested) - else: - return True - - @property - def has_hash_options(self): - # type: () -> bool - """Return whether any known-good hashes are specified as options. - - These activate --require-hashes mode; hashes specified as part of a - URL do not. - - """ - return bool(self.options.get('hashes', {})) - - def hashes(self, trust_internet=True): - # type: (bool) -> Hashes - """Return a hash-comparer that considers my option- and URL-based - hashes to be known-good. - - Hashes in URLs--ones embedded in the requirements file, not ones - downloaded from an index server--are almost peers with ones from - flags. They satisfy --require-hashes (whether it was implicitly or - explicitly activated) but do not activate it. md5 and sha224 are not - allowed in flags, which should nudge people toward good algos. We - always OR all hashes together, even ones from URLs. - - :param trust_internet: Whether to trust URL-based (#md5=...) hashes - downloaded from the internet, as by populate_link() - - """ - good_hashes = self.options.get('hashes', {}).copy() - link = self.link if trust_internet else self.original_link - if link and link.hash: - good_hashes.setdefault(link.hash_name, []).append(link.hash) - return Hashes(good_hashes) - - def from_path(self): - # type: () -> Optional[str] - """Format a nice indicator to show where this "comes from" - """ - if self.req is None: - return None - s = str(self.req) - if self.comes_from: - if isinstance(self.comes_from, six.string_types): - comes_from = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += '->' + comes_from - return s - - def build_location(self, build_dir): - # type: (str) -> Optional[str] - assert build_dir is not None - if self._temp_build_dir.path is not None: - return self._temp_build_dir.path - if self.req is None: - # for requirement via a path to a directory: the name of the - # package is not available yet so we create a temp directory - # Once run_egg_info will have run, we'll be able - # to fix it via _correct_build_location - # Some systems have /tmp as a symlink which confuses custom - # builds (such as numpy). Thus, we ensure that the real path - # is returned. - self._temp_build_dir.create() - self._ideal_build_dir = build_dir - - return self._temp_build_dir.path - if self.editable: - name = self.name.lower() - else: - name = self.name - # FIXME: Is there a better place to create the build_dir? (hg and bzr - # need this) - if not os.path.exists(build_dir): - logger.debug('Creating directory %s', build_dir) - _make_build_dir(build_dir) - return os.path.join(build_dir, name) - - def _correct_build_location(self): - # type: () -> None - """Move self._temp_build_dir to self._ideal_build_dir/self.req.name - - For some requirements (e.g. a path to a directory), the name of the - package is not available until we run egg_info, so the build_location - will return a temporary directory and store the _ideal_build_dir. - - This is only called by self.run_egg_info to fix the temporary build - directory. - """ - if self.source_dir is not None: - return - assert self.req is not None - assert self._temp_build_dir.path - assert (self._ideal_build_dir is not None and - self._ideal_build_dir.path) # type: ignore - old_location = self._temp_build_dir.path - self._temp_build_dir.path = None - - new_location = self.build_location(self._ideal_build_dir) - if os.path.exists(new_location): - raise InstallationError( - 'A package already exists in %s; please remove it to continue' - % display_path(new_location)) - logger.debug( - 'Moving package %s from %s to new location %s', - self, display_path(old_location), display_path(new_location), - ) - shutil.move(old_location, new_location) - self._temp_build_dir.path = new_location - self._ideal_build_dir = None - self.source_dir = os.path.normpath(os.path.abspath(new_location)) - self._egg_info_path = None - - # Correct the metadata directory, if it exists - if self.metadata_directory: - old_meta = self.metadata_directory - rel = os.path.relpath(old_meta, start=old_location) - new_meta = os.path.join(new_location, rel) - new_meta = os.path.normpath(os.path.abspath(new_meta)) - self.metadata_directory = new_meta - - def remove_temporary_source(self): - # type: () -> None - """Remove the source files from this requirement, if they are marked - for deletion""" - if self.source_dir and os.path.exists( - os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): - logger.debug('Removing source in %s', self.source_dir) - rmtree(self.source_dir) - self.source_dir = None - self._temp_build_dir.cleanup() - self.build_env.cleanup() - - def check_if_exists(self, use_user_site): - # type: (bool) -> bool - """Find an installed distribution that satisfies or conflicts - with this requirement, and set self.satisfied_by or - self.conflicts_with appropriately. - """ - if self.req is None: - return False - try: - # get_distribution() will resolve the entire list of requirements - # anyway, and we've already determined that we need the requirement - # in question, so strip the marker so that we don't try to - # evaluate it. - no_marker = Requirement(str(self.req)) - no_marker.marker = None - self.satisfied_by = pkg_resources.get_distribution(str(no_marker)) - if self.editable and self.satisfied_by: - self.conflicts_with = self.satisfied_by - # when installing editables, nothing pre-existing should ever - # satisfy - self.satisfied_by = None - return True - except pkg_resources.DistributionNotFound: - return False - except pkg_resources.VersionConflict: - existing_dist = pkg_resources.get_distribution( - self.req.name - ) - if use_user_site: - if dist_in_usersite(existing_dist): - self.conflicts_with = existing_dist - elif (running_under_virtualenv() and - dist_in_site_packages(existing_dist)): - raise InstallationError( - "Will not install to the user site because it will " - "lack sys.path precedence to %s in %s" % - (existing_dist.project_name, existing_dist.location) - ) - else: - self.conflicts_with = existing_dist - return True - - # Things valid for wheels - @property - def is_wheel(self): - # type: () -> bool - if not self.link: - return False - return self.link.is_wheel - - def move_wheel_files( - self, - wheeldir, # type: str - root=None, # type: Optional[str] - home=None, # type: Optional[str] - prefix=None, # type: Optional[str] - warn_script_location=True, # type: bool - use_user_site=False, # type: bool - pycompile=True # type: bool - ): - # type: (...) -> None - move_wheel_files( - self.name, self.req, wheeldir, - user=use_user_site, - home=home, - root=root, - prefix=prefix, - pycompile=pycompile, - isolated=self.isolated, - warn_script_location=warn_script_location, - ) - - # Things valid for sdists - @property - def setup_py_dir(self): - # type: () -> str - return os.path.join( - self.source_dir, - self.link and self.link.subdirectory_fragment or '') - - @property - def setup_py(self): - # type: () -> str - assert self.source_dir, "No source dir for %s" % self - - setup_py = os.path.join(self.setup_py_dir, 'setup.py') - - # Python2 __file__ should not be unicode - if six.PY2 and isinstance(setup_py, six.text_type): - setup_py = setup_py.encode(sys.getfilesystemencoding()) - - return setup_py - - @property - def pyproject_toml(self): - # type: () -> str - assert self.source_dir, "No source dir for %s" % self - - return make_pyproject_path(self.setup_py_dir) - - def load_pyproject_toml(self): - # type: () -> None - """Load the pyproject.toml file. - - After calling this routine, all of the attributes related to PEP 517 - processing for this requirement have been set. In particular, the - use_pep517 attribute can be used to determine whether we should - follow the PEP 517 or legacy (setup.py) code path. - """ - pep517_data = load_pyproject_toml( - self.use_pep517, - self.pyproject_toml, - self.setup_py, - str(self) - ) - - if pep517_data is None: - self.use_pep517 = False - else: - self.use_pep517 = True - requires, backend, check = pep517_data - self.requirements_to_check = check - self.pyproject_requires = requires - self.pep517_backend = Pep517HookCaller(self.setup_py_dir, backend) - - # Use a custom function to call subprocesses - self.spin_message = "" - - def runner(cmd, cwd=None, extra_environ=None): - with open_spinner(self.spin_message) as spinner: - call_subprocess( - cmd, - cwd=cwd, - extra_environ=extra_environ, - show_stdout=False, - spinner=spinner - ) - self.spin_message = "" - - self.pep517_backend._subprocess_runner = runner - - def prepare_metadata(self): - # type: () -> None - """Ensure that project metadata is available. - - Under PEP 517, call the backend hook to prepare the metadata. - Under legacy processing, call setup.py egg-info. - """ - assert self.source_dir - - with indent_log(): - if self.use_pep517: - self.prepare_pep517_metadata() - else: - self.run_egg_info() - - if not self.req: - if isinstance(parse_version(self.metadata["Version"]), Version): - op = "==" - else: - op = "===" - self.req = Requirement( - "".join([ - self.metadata["Name"], - op, - self.metadata["Version"], - ]) - ) - self._correct_build_location() - else: - metadata_name = canonicalize_name(self.metadata["Name"]) - if canonicalize_name(self.req.name) != metadata_name: - logger.warning( - 'Generating metadata for package %s ' - 'produced metadata for project name %s. Fix your ' - '#egg=%s fragments.', - self.name, metadata_name, self.name - ) - self.req = Requirement(metadata_name) - - def prepare_pep517_metadata(self): - # type: () -> None - assert self.pep517_backend is not None - - metadata_dir = os.path.join( - self.setup_py_dir, - 'pip-wheel-metadata' - ) - ensure_dir(metadata_dir) - - with self.build_env: - # Note that Pep517HookCaller implements a fallback for - # prepare_metadata_for_build_wheel, so we don't have to - # consider the possibility that this hook doesn't exist. - backend = self.pep517_backend - self.spin_message = "Preparing wheel metadata" - distinfo_dir = backend.prepare_metadata_for_build_wheel( - metadata_dir - ) - - self.metadata_directory = os.path.join(metadata_dir, distinfo_dir) - - def run_egg_info(self): - # type: () -> None - if self.name: - logger.debug( - 'Running setup.py (path:%s) egg_info for package %s', - self.setup_py, self.name, - ) - else: - logger.debug( - 'Running setup.py (path:%s) egg_info for package from %s', - self.setup_py, self.link, - ) - script = SETUPTOOLS_SHIM % self.setup_py - base_cmd = [sys.executable, '-c', script] - if self.isolated: - base_cmd += ["--no-user-cfg"] - egg_info_cmd = base_cmd + ['egg_info'] - # We can't put the .egg-info files at the root, because then the - # source code will be mistaken for an installed egg, causing - # problems - if self.editable: - egg_base_option = [] # type: List[str] - else: - egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info') - ensure_dir(egg_info_dir) - egg_base_option = ['--egg-base', 'pip-egg-info'] - with self.build_env: - call_subprocess( - egg_info_cmd + egg_base_option, - cwd=self.setup_py_dir, - show_stdout=False, - command_desc='python setup.py egg_info') - - @property - def egg_info_path(self): - # type: () -> str - if self._egg_info_path is None: - if self.editable: - base = self.source_dir - else: - base = os.path.join(self.setup_py_dir, 'pip-egg-info') - filenames = os.listdir(base) - if self.editable: - filenames = [] - for root, dirs, files in os.walk(base): - for dir in vcs.dirnames: - if dir in dirs: - dirs.remove(dir) - # Iterate over a copy of ``dirs``, since mutating - # a list while iterating over it can cause trouble. - # (See https://github.com/pypa/pip/pull/462.) - for dir in list(dirs): - # Don't search in anything that looks like a virtualenv - # environment - if ( - os.path.lexists( - os.path.join(root, dir, 'bin', 'python') - ) or - os.path.exists( - os.path.join( - root, dir, 'Scripts', 'Python.exe' - ) - )): - dirs.remove(dir) - # Also don't search through tests - elif dir == 'test' or dir == 'tests': - dirs.remove(dir) - filenames.extend([os.path.join(root, dir) - for dir in dirs]) - filenames = [f for f in filenames if f.endswith('.egg-info')] - - if not filenames: - raise InstallationError( - "Files/directories not found in %s" % base - ) - # if we have more than one match, we pick the toplevel one. This - # can easily be the case if there is a dist folder which contains - # an extracted tarball for testing purposes. - if len(filenames) > 1: - filenames.sort( - key=lambda x: x.count(os.path.sep) + - (os.path.altsep and x.count(os.path.altsep) or 0) - ) - self._egg_info_path = os.path.join(base, filenames[0]) - return self._egg_info_path - - @property - def metadata(self): - if not hasattr(self, '_metadata'): - self._metadata = get_metadata(self.get_dist()) - - return self._metadata - - def get_dist(self): - # type: () -> Distribution - """Return a pkg_resources.Distribution for this requirement""" - if self.metadata_directory: - base_dir, distinfo = os.path.split(self.metadata_directory) - metadata = pkg_resources.PathMetadata( - base_dir, self.metadata_directory - ) - dist_name = os.path.splitext(distinfo)[0] - typ = pkg_resources.DistInfoDistribution - else: - egg_info = self.egg_info_path.rstrip(os.path.sep) - base_dir = os.path.dirname(egg_info) - metadata = pkg_resources.PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - # https://github.com/python/mypy/issues/1174 - typ = pkg_resources.Distribution # type: ignore - - return typ( - base_dir, - project_name=dist_name, - metadata=metadata, - ) - - def assert_source_matches_version(self): - # type: () -> None - assert self.source_dir - version = self.metadata['version'] - if self.req.specifier and version not in self.req.specifier: - logger.warning( - 'Requested %s, but installing version %s', - self, - version, - ) - else: - logger.debug( - 'Source in %s has version %s, which satisfies requirement %s', - display_path(self.source_dir), - version, - self, - ) - - # For both source distributions and editables - def ensure_has_source_dir(self, parent_dir): - # type: (str) -> str - """Ensure that a source_dir is set. - - This will create a temporary build dir if the name of the requirement - isn't known yet. - - :param parent_dir: The ideal pip parent_dir for the source_dir. - Generally src_dir for editables and build_dir for sdists. - :return: self.source_dir - """ - if self.source_dir is None: - self.source_dir = self.build_location(parent_dir) - return self.source_dir - - # For editable installations - def install_editable( - self, - install_options, # type: List[str] - global_options=(), # type: Sequence[str] - prefix=None # type: Optional[str] - ): - # type: (...) -> None - logger.info('Running setup.py develop for %s', self.name) - - if self.isolated: - global_options = list(global_options) + ["--no-user-cfg"] - - if prefix: - prefix_param = ['--prefix={}'.format(prefix)] - install_options = list(install_options) + prefix_param - - with indent_log(): - # FIXME: should we do --install-headers here too? - with self.build_env: - call_subprocess( - [ - sys.executable, - '-c', - SETUPTOOLS_SHIM % self.setup_py - ] + - list(global_options) + - ['develop', '--no-deps'] + - list(install_options), - - cwd=self.setup_py_dir, - show_stdout=False, - ) - - self.install_succeeded = True - - def update_editable(self, obtain=True): - # type: (bool) -> None - if not self.link: - logger.debug( - "Cannot update repository at %s; repository location is " - "unknown", - self.source_dir, - ) - return - assert self.editable - assert self.source_dir - if self.link.scheme == 'file': - # Static paths don't get updated - return - assert '+' in self.link.url, "bad url: %r" % self.link.url - if not self.update: - return - vc_type, url = self.link.url.split('+', 1) - backend = vcs.get_backend(vc_type) - if backend: - vcs_backend = backend(self.link.url) - if obtain: - vcs_backend.obtain(self.source_dir) - else: - vcs_backend.export(self.source_dir) - else: - assert 0, ( - 'Unexpected version control type (in %s): %s' - % (self.link, vc_type)) - - # Top-level Actions - def uninstall(self, auto_confirm=False, verbose=False, - use_user_site=False): - # type: (bool, bool, bool) -> Optional[UninstallPathSet] - """ - Uninstall the distribution currently satisfying this requirement. - - Prompts before removing or modifying files unless - ``auto_confirm`` is True. - - Refuses to delete or modify files outside of ``sys.prefix`` - - thus uninstallation within a virtual environment can only - modify that virtual environment, even if the virtualenv is - linked to global site-packages. - - """ - if not self.check_if_exists(use_user_site): - logger.warning("Skipping %s as it is not installed.", self.name) - return None - dist = self.satisfied_by or self.conflicts_with - - uninstalled_pathset = UninstallPathSet.from_dist(dist) - uninstalled_pathset.remove(auto_confirm, verbose) - return uninstalled_pathset - - def _clean_zip_name(self, name, prefix): # only used by archive. - assert name.startswith(prefix + os.path.sep), ( - "name %r doesn't start with prefix %r" % (name, prefix) - ) - name = name[len(prefix) + 1:] - name = name.replace(os.path.sep, '/') - return name - - def _get_archive_name(self, path, parentdir, rootdir): - # type: (str, str, str) -> str - path = os.path.join(parentdir, path) - name = self._clean_zip_name(path, rootdir) - return self.name + '/' + name - - # TODO: Investigate if this should be kept in InstallRequirement - # Seems to be used only when VCS + downloads - def archive(self, build_dir): - # type: (str) -> None - assert self.source_dir - create_archive = True - archive_name = '%s-%s.zip' % (self.name, self.metadata["version"]) - archive_path = os.path.join(build_dir, archive_name) - if os.path.exists(archive_path): - response = ask_path_exists( - 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' % - display_path(archive_path), ('i', 'w', 'b', 'a')) - if response == 'i': - create_archive = False - elif response == 'w': - logger.warning('Deleting %s', display_path(archive_path)) - os.remove(archive_path) - elif response == 'b': - dest_file = backup_dir(archive_path) - logger.warning( - 'Backing up %s to %s', - display_path(archive_path), - display_path(dest_file), - ) - shutil.move(archive_path, dest_file) - elif response == 'a': - sys.exit(-1) - if create_archive: - zip = zipfile.ZipFile( - archive_path, 'w', zipfile.ZIP_DEFLATED, - allowZip64=True - ) - dir = os.path.normcase(os.path.abspath(self.setup_py_dir)) - for dirpath, dirnames, filenames in os.walk(dir): - if 'pip-egg-info' in dirnames: - dirnames.remove('pip-egg-info') - for dirname in dirnames: - dir_arcname = self._get_archive_name(dirname, - parentdir=dirpath, - rootdir=dir) - zipdir = zipfile.ZipInfo(dir_arcname + '/') - zipdir.external_attr = 0x1ED << 16 # 0o755 - zip.writestr(zipdir, '') - for filename in filenames: - if filename == PIP_DELETE_MARKER_FILENAME: - continue - file_arcname = self._get_archive_name(filename, - parentdir=dirpath, - rootdir=dir) - filename = os.path.join(dirpath, filename) - zip.write(filename, file_arcname) - zip.close() - logger.info('Saved %s', display_path(archive_path)) - - def install( - self, - install_options, # type: List[str] - global_options=None, # type: Optional[Sequence[str]] - root=None, # type: Optional[str] - home=None, # type: Optional[str] - prefix=None, # type: Optional[str] - warn_script_location=True, # type: bool - use_user_site=False, # type: bool - pycompile=True # type: bool - ): - # type: (...) -> None - global_options = global_options if global_options is not None else [] - if self.editable: - self.install_editable( - install_options, global_options, prefix=prefix, - ) - return - if self.is_wheel: - version = wheel.wheel_version(self.source_dir) - wheel.check_compatibility(version, self.name) - - self.move_wheel_files( - self.source_dir, root=root, prefix=prefix, home=home, - warn_script_location=warn_script_location, - use_user_site=use_user_site, pycompile=pycompile, - ) - self.install_succeeded = True - return - - # Extend the list of global and install options passed on to - # the setup.py call with the ones from the requirements file. - # Options specified in requirements file override those - # specified on the command line, since the last option given - # to setup.py is the one that is used. - global_options = list(global_options) + \ - self.options.get('global_options', []) - install_options = list(install_options) + \ - self.options.get('install_options', []) - - if self.isolated: - # https://github.com/python/mypy/issues/1174 - global_options = global_options + ["--no-user-cfg"] # type: ignore - - with TempDirectory(kind="record") as temp_dir: - record_filename = os.path.join(temp_dir.path, 'install-record.txt') - install_args = self.get_install_args( - global_options, record_filename, root, prefix, pycompile, - ) - msg = 'Running setup.py install for %s' % (self.name,) - with open_spinner(msg) as spinner: - with indent_log(): - with self.build_env: - call_subprocess( - install_args + install_options, - cwd=self.setup_py_dir, - show_stdout=False, - spinner=spinner, - ) - - if not os.path.exists(record_filename): - logger.debug('Record file %s not found', record_filename) - return - self.install_succeeded = True - - def prepend_root(path): - if root is None or not os.path.isabs(path): - return path - else: - return change_root(root, path) - - with open(record_filename) as f: - for line in f: - directory = os.path.dirname(line) - if directory.endswith('.egg-info'): - egg_info_dir = prepend_root(directory) - break - else: - logger.warning( - 'Could not find .egg-info directory in install record' - ' for %s', - self, - ) - # FIXME: put the record somewhere - # FIXME: should this be an error? - return - new_lines = [] - with open(record_filename) as f: - for line in f: - filename = line.strip() - if os.path.isdir(filename): - filename += os.path.sep - new_lines.append( - os.path.relpath(prepend_root(filename), egg_info_dir) - ) - new_lines.sort() - ensure_dir(egg_info_dir) - inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') - with open(inst_files_path, 'w') as f: - f.write('\n'.join(new_lines) + '\n') - - def get_install_args( - self, - global_options, # type: Sequence[str] - record_filename, # type: str - root, # type: Optional[str] - prefix, # type: Optional[str] - pycompile # type: bool - ): - # type: (...) -> List[str] - install_args = [sys.executable, "-u"] - install_args.append('-c') - install_args.append(SETUPTOOLS_SHIM % self.setup_py) - install_args += list(global_options) + \ - ['install', '--record', record_filename] - install_args += ['--single-version-externally-managed'] - - if root is not None: - install_args += ['--root', root] - if prefix is not None: - install_args += ['--prefix', prefix] - - if pycompile: - install_args += ["--compile"] - else: - install_args += ["--no-compile"] - - if running_under_virtualenv(): - py_ver_str = 'python' + sysconfig.get_python_version() - install_args += ['--install-headers', - os.path.join(sys.prefix, 'include', 'site', - py_ver_str, self.name)] - - return install_args diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_set.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_set.py deleted file mode 100644 index d1410e9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_set.py +++ /dev/null @@ -1,197 +0,0 @@ -from __future__ import absolute_import - -import logging -from collections import OrderedDict - -from pip._internal.exceptions import InstallationError -from pip._internal.utils.logging import indent_log -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.wheel import Wheel - -if MYPY_CHECK_RUNNING: - from typing import Optional, List, Tuple, Dict, Iterable # noqa: F401 - from pip._internal.req.req_install import InstallRequirement # noqa: F401 - - -logger = logging.getLogger(__name__) - - -class RequirementSet(object): - - def __init__(self, require_hashes=False, check_supported_wheels=True): - # type: (bool, bool) -> None - """Create a RequirementSet. - """ - - self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] # noqa: E501 - self.require_hashes = require_hashes - self.check_supported_wheels = check_supported_wheels - - # Mapping of alias: real_name - self.requirement_aliases = {} # type: Dict[str, str] - self.unnamed_requirements = [] # type: List[InstallRequirement] - self.successfully_downloaded = [] # type: List[InstallRequirement] - self.reqs_to_cleanup = [] # type: List[InstallRequirement] - - def __str__(self): - reqs = [req for req in self.requirements.values() - if not req.comes_from] - reqs.sort(key=lambda req: req.name.lower()) - return ' '.join([str(req.req) for req in reqs]) - - def __repr__(self): - reqs = [req for req in self.requirements.values()] - reqs.sort(key=lambda req: req.name.lower()) - reqs_str = ', '.join([str(req.req) for req in reqs]) - return ('<%s object; %d requirement(s): %s>' - % (self.__class__.__name__, len(reqs), reqs_str)) - - def add_requirement( - self, - install_req, # type: InstallRequirement - parent_req_name=None, # type: Optional[str] - extras_requested=None # type: Optional[Iterable[str]] - ): - # type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501 - """Add install_req as a requirement to install. - - :param parent_req_name: The name of the requirement that needed this - added. The name is used because when multiple unnamed requirements - resolve to the same name, we could otherwise end up with dependency - links that point outside the Requirements set. parent_req must - already be added. Note that None implies that this is a user - supplied requirement, vs an inferred one. - :param extras_requested: an iterable of extras used to evaluate the - environment markers. - :return: Additional requirements to scan. That is either [] if - the requirement is not applicable, or [install_req] if the - requirement is applicable and has just been added. - """ - name = install_req.name - - # If the markers do not match, ignore this requirement. - if not install_req.match_markers(extras_requested): - logger.info( - "Ignoring %s: markers '%s' don't match your environment", - name, install_req.markers, - ) - return [], None - - # If the wheel is not supported, raise an error. - # Should check this after filtering out based on environment markers to - # allow specifying different wheels based on the environment/OS, in a - # single requirements file. - if install_req.link and install_req.link.is_wheel: - wheel = Wheel(install_req.link.filename) - if self.check_supported_wheels and not wheel.supported(): - raise InstallationError( - "%s is not a supported wheel on this platform." % - wheel.filename - ) - - # This next bit is really a sanity check. - assert install_req.is_direct == (parent_req_name is None), ( - "a direct req shouldn't have a parent and also, " - "a non direct req should have a parent" - ) - - # Unnamed requirements are scanned again and the requirement won't be - # added as a dependency until after scanning. - if not name: - # url or path requirement w/o an egg fragment - self.unnamed_requirements.append(install_req) - return [install_req], None - - try: - existing_req = self.get_requirement(name) - except KeyError: - existing_req = None - - has_conflicting_requirement = ( - parent_req_name is None and - existing_req and - not existing_req.constraint and - existing_req.extras == install_req.extras and - existing_req.req.specifier != install_req.req.specifier - ) - if has_conflicting_requirement: - raise InstallationError( - "Double requirement given: %s (already in %s, name=%r)" - % (install_req, existing_req, name) - ) - - # When no existing requirement exists, add the requirement as a - # dependency and it will be scanned again after. - if not existing_req: - self.requirements[name] = install_req - # FIXME: what about other normalizations? E.g., _ vs. -? - if name.lower() != name: - self.requirement_aliases[name.lower()] = name - # We'd want to rescan this requirements later - return [install_req], install_req - - # Assume there's no need to scan, and that we've already - # encountered this for scanning. - if install_req.constraint or not existing_req.constraint: - return [], existing_req - - does_not_satisfy_constraint = ( - install_req.link and - not ( - existing_req.link and - install_req.link.path == existing_req.link.path - ) - ) - if does_not_satisfy_constraint: - self.reqs_to_cleanup.append(install_req) - raise InstallationError( - "Could not satisfy constraints for '%s': " - "installation from path or url cannot be " - "constrained to a version" % name, - ) - # If we're now installing a constraint, mark the existing - # object for real installation. - existing_req.constraint = False - existing_req.extras = tuple(sorted( - set(existing_req.extras) | set(install_req.extras) - )) - logger.debug( - "Setting %s extras to: %s", - existing_req, existing_req.extras, - ) - # Return the existing requirement for addition to the parent and - # scanning again. - return [existing_req], existing_req - - def has_requirement(self, project_name): - # type: (str) -> bool - name = project_name.lower() - if (name in self.requirements and - not self.requirements[name].constraint or - name in self.requirement_aliases and - not self.requirements[self.requirement_aliases[name]].constraint): - return True - return False - - @property - def has_requirements(self): - # type: () -> List[InstallRequirement] - return list(req for req in self.requirements.values() if not - req.constraint) or self.unnamed_requirements - - def get_requirement(self, project_name): - # type: (str) -> InstallRequirement - for name in project_name, project_name.lower(): - if name in self.requirements: - return self.requirements[name] - if name in self.requirement_aliases: - return self.requirements[self.requirement_aliases[name]] - raise KeyError("No project with the name %r" % project_name) - - def cleanup_files(self): - # type: () -> None - """Clean up files, remove builds.""" - logger.debug('Cleaning up...') - with indent_log(): - for req in self.reqs_to_cleanup: - req.remove_temporary_source() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_tracker.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_tracker.py deleted file mode 100644 index 82e084a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_tracker.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import absolute_import - -import contextlib -import errno -import hashlib -import logging -import os - -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Set, Iterator # noqa: F401 - from pip._internal.req.req_install import InstallRequirement # noqa: F401 - from pip._internal.models.link import Link # noqa: F401 - -logger = logging.getLogger(__name__) - - -class RequirementTracker(object): - - def __init__(self): - # type: () -> None - self._root = os.environ.get('PIP_REQ_TRACKER') - if self._root is None: - self._temp_dir = TempDirectory(delete=False, kind='req-tracker') - self._temp_dir.create() - self._root = os.environ['PIP_REQ_TRACKER'] = self._temp_dir.path - logger.debug('Created requirements tracker %r', self._root) - else: - self._temp_dir = None - logger.debug('Re-using requirements tracker %r', self._root) - self._entries = set() # type: Set[InstallRequirement] - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.cleanup() - - def _entry_path(self, link): - # type: (Link) -> str - hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest() - return os.path.join(self._root, hashed) - - def add(self, req): - # type: (InstallRequirement) -> None - link = req.link - info = str(req) - entry_path = self._entry_path(link) - try: - with open(entry_path) as fp: - # Error, these's already a build in progress. - raise LookupError('%s is already being built: %s' - % (link, fp.read())) - except IOError as e: - if e.errno != errno.ENOENT: - raise - assert req not in self._entries - with open(entry_path, 'w') as fp: - fp.write(info) - self._entries.add(req) - logger.debug('Added %s to build tracker %r', req, self._root) - - def remove(self, req): - # type: (InstallRequirement) -> None - link = req.link - self._entries.remove(req) - os.unlink(self._entry_path(link)) - logger.debug('Removed %s from build tracker %r', req, self._root) - - def cleanup(self): - # type: () -> None - for req in set(self._entries): - self.remove(req) - remove = self._temp_dir is not None - if remove: - self._temp_dir.cleanup() - logger.debug('%s build tracker %r', - 'Removed' if remove else 'Cleaned', - self._root) - - @contextlib.contextmanager - def track(self, req): - # type: (InstallRequirement) -> Iterator[None] - self.add(req) - yield - self.remove(req) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_uninstall.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_uninstall.py deleted file mode 100644 index c80959e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/req/req_uninstall.py +++ /dev/null @@ -1,596 +0,0 @@ -from __future__ import absolute_import - -import csv -import functools -import logging -import os -import sys -import sysconfig - -from pip._vendor import pkg_resources - -from pip._internal.exceptions import UninstallationError -from pip._internal.locations import bin_py, bin_user -from pip._internal.utils.compat import WINDOWS, cache_from_source, uses_pycache -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ( - FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local, - normalize_path, renames, rmtree, -) -from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory - -logger = logging.getLogger(__name__) - - -def _script_names(dist, script_name, is_gui): - """Create the fully qualified name of the files created by - {console,gui}_scripts for the given ``dist``. - Returns the list of file names - """ - if dist_in_usersite(dist): - bin_dir = bin_user - else: - bin_dir = bin_py - exe_name = os.path.join(bin_dir, script_name) - paths_to_remove = [exe_name] - if WINDOWS: - paths_to_remove.append(exe_name + '.exe') - paths_to_remove.append(exe_name + '.exe.manifest') - if is_gui: - paths_to_remove.append(exe_name + '-script.pyw') - else: - paths_to_remove.append(exe_name + '-script.py') - return paths_to_remove - - -def _unique(fn): - @functools.wraps(fn) - def unique(*args, **kw): - seen = set() - for item in fn(*args, **kw): - if item not in seen: - seen.add(item) - yield item - return unique - - -@_unique -def uninstallation_paths(dist): - """ - Yield all the uninstallation paths for dist based on RECORD-without-.py[co] - - Yield paths to all the files in RECORD. For each .py file in RECORD, add - the .pyc and .pyo in the same directory. - - UninstallPathSet.add() takes care of the __pycache__ .py[co]. - """ - r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD'))) - for row in r: - path = os.path.join(dist.location, row[0]) - yield path - if path.endswith('.py'): - dn, fn = os.path.split(path) - base = fn[:-3] - path = os.path.join(dn, base + '.pyc') - yield path - path = os.path.join(dn, base + '.pyo') - yield path - - -def compact(paths): - """Compact a path set to contain the minimal number of paths - necessary to contain all paths in the set. If /a/path/ and - /a/path/to/a/file.txt are both in the set, leave only the - shorter path.""" - - sep = os.path.sep - short_paths = set() - for path in sorted(paths, key=len): - should_skip = any( - path.startswith(shortpath.rstrip("*")) and - path[len(shortpath.rstrip("*").rstrip(sep))] == sep - for shortpath in short_paths - ) - if not should_skip: - short_paths.add(path) - return short_paths - - -def compress_for_rename(paths): - """Returns a set containing the paths that need to be renamed. - - This set may include directories when the original sequence of paths - included every file on disk. - """ - case_map = dict((os.path.normcase(p), p) for p in paths) - remaining = set(case_map) - unchecked = sorted(set(os.path.split(p)[0] - for p in case_map.values()), key=len) - wildcards = set() - - def norm_join(*a): - return os.path.normcase(os.path.join(*a)) - - for root in unchecked: - if any(os.path.normcase(root).startswith(w) - for w in wildcards): - # This directory has already been handled. - continue - - all_files = set() - all_subdirs = set() - for dirname, subdirs, files in os.walk(root): - all_subdirs.update(norm_join(root, dirname, d) - for d in subdirs) - all_files.update(norm_join(root, dirname, f) - for f in files) - # If all the files we found are in our remaining set of files to - # remove, then remove them from the latter set and add a wildcard - # for the directory. - if not (all_files - remaining): - remaining.difference_update(all_files) - wildcards.add(root + os.sep) - - return set(map(case_map.__getitem__, remaining)) | wildcards - - -def compress_for_output_listing(paths): - """Returns a tuple of 2 sets of which paths to display to user - - The first set contains paths that would be deleted. Files of a package - are not added and the top-level directory of the package has a '*' added - at the end - to signify that all it's contents are removed. - - The second set contains files that would have been skipped in the above - folders. - """ - - will_remove = list(paths) - will_skip = set() - - # Determine folders and files - folders = set() - files = set() - for path in will_remove: - if path.endswith(".pyc"): - continue - if path.endswith("__init__.py") or ".dist-info" in path: - folders.add(os.path.dirname(path)) - files.add(path) - - _normcased_files = set(map(os.path.normcase, files)) - - folders = compact(folders) - - # This walks the tree using os.walk to not miss extra folders - # that might get added. - for folder in folders: - for dirpath, _, dirfiles in os.walk(folder): - for fname in dirfiles: - if fname.endswith(".pyc"): - continue - - file_ = os.path.join(dirpath, fname) - if (os.path.isfile(file_) and - os.path.normcase(file_) not in _normcased_files): - # We are skipping this file. Add it to the set. - will_skip.add(file_) - - will_remove = files | { - os.path.join(folder, "*") for folder in folders - } - - return will_remove, will_skip - - -class StashedUninstallPathSet(object): - """A set of file rename operations to stash files while - tentatively uninstalling them.""" - def __init__(self): - # Mapping from source file root to [Adjacent]TempDirectory - # for files under that directory. - self._save_dirs = {} - # (old path, new path) tuples for each move that may need - # to be undone. - self._moves = [] - - def _get_directory_stash(self, path): - """Stashes a directory. - - Directories are stashed adjacent to their original location if - possible, or else moved/copied into the user's temp dir.""" - - try: - save_dir = AdjacentTempDirectory(path) - save_dir.create() - except OSError: - save_dir = TempDirectory(kind="uninstall") - save_dir.create() - self._save_dirs[os.path.normcase(path)] = save_dir - - return save_dir.path - - def _get_file_stash(self, path): - """Stashes a file. - - If no root has been provided, one will be created for the directory - in the user's temp directory.""" - path = os.path.normcase(path) - head, old_head = os.path.dirname(path), None - save_dir = None - - while head != old_head: - try: - save_dir = self._save_dirs[head] - break - except KeyError: - pass - head, old_head = os.path.dirname(head), head - else: - # Did not find any suitable root - head = os.path.dirname(path) - save_dir = TempDirectory(kind='uninstall') - save_dir.create() - self._save_dirs[head] = save_dir - - relpath = os.path.relpath(path, head) - if relpath and relpath != os.path.curdir: - return os.path.join(save_dir.path, relpath) - return save_dir.path - - def stash(self, path): - """Stashes the directory or file and returns its new location. - """ - if os.path.isdir(path): - new_path = self._get_directory_stash(path) - else: - new_path = self._get_file_stash(path) - - self._moves.append((path, new_path)) - if os.path.isdir(path) and os.path.isdir(new_path): - # If we're moving a directory, we need to - # remove the destination first or else it will be - # moved to inside the existing directory. - # We just created new_path ourselves, so it will - # be removable. - os.rmdir(new_path) - renames(path, new_path) - return new_path - - def commit(self): - """Commits the uninstall by removing stashed files.""" - for _, save_dir in self._save_dirs.items(): - save_dir.cleanup() - self._moves = [] - self._save_dirs = {} - - def rollback(self): - """Undoes the uninstall by moving stashed files back.""" - for p in self._moves: - logging.info("Moving to %s\n from %s", *p) - - for new_path, path in self._moves: - try: - logger.debug('Replacing %s from %s', new_path, path) - if os.path.isfile(new_path): - os.unlink(new_path) - elif os.path.isdir(new_path): - rmtree(new_path) - renames(path, new_path) - except OSError as ex: - logger.error("Failed to restore %s", new_path) - logger.debug("Exception: %s", ex) - - self.commit() - - @property - def can_rollback(self): - return bool(self._moves) - - -class UninstallPathSet(object): - """A set of file paths to be removed in the uninstallation of a - requirement.""" - def __init__(self, dist): - self.paths = set() - self._refuse = set() - self.pth = {} - self.dist = dist - self._moved_paths = StashedUninstallPathSet() - - def _permitted(self, path): - """ - Return True if the given path is one we are permitted to - remove/modify, False otherwise. - - """ - return is_local(path) - - def add(self, path): - head, tail = os.path.split(path) - - # we normalize the head to resolve parent directory symlinks, but not - # the tail, since we only want to uninstall symlinks, not their targets - path = os.path.join(normalize_path(head), os.path.normcase(tail)) - - if not os.path.exists(path): - return - if self._permitted(path): - self.paths.add(path) - else: - self._refuse.add(path) - - # __pycache__ files can show up after 'installed-files.txt' is created, - # due to imports - if os.path.splitext(path)[1] == '.py' and uses_pycache: - self.add(cache_from_source(path)) - - def add_pth(self, pth_file, entry): - pth_file = normalize_path(pth_file) - if self._permitted(pth_file): - if pth_file not in self.pth: - self.pth[pth_file] = UninstallPthEntries(pth_file) - self.pth[pth_file].add(entry) - else: - self._refuse.add(pth_file) - - def remove(self, auto_confirm=False, verbose=False): - """Remove paths in ``self.paths`` with confirmation (unless - ``auto_confirm`` is True).""" - - if not self.paths: - logger.info( - "Can't uninstall '%s'. No files were found to uninstall.", - self.dist.project_name, - ) - return - - dist_name_version = ( - self.dist.project_name + "-" + self.dist.version - ) - logger.info('Uninstalling %s:', dist_name_version) - - with indent_log(): - if auto_confirm or self._allowed_to_proceed(verbose): - moved = self._moved_paths - - for_rename = compress_for_rename(self.paths) - - for path in sorted(compact(for_rename)): - moved.stash(path) - logger.debug('Removing file or directory %s', path) - - for pth in self.pth.values(): - pth.remove() - - logger.info('Successfully uninstalled %s', dist_name_version) - - def _allowed_to_proceed(self, verbose): - """Display which files would be deleted and prompt for confirmation - """ - - def _display(msg, paths): - if not paths: - return - - logger.info(msg) - with indent_log(): - for path in sorted(compact(paths)): - logger.info(path) - - if not verbose: - will_remove, will_skip = compress_for_output_listing(self.paths) - else: - # In verbose mode, display all the files that are going to be - # deleted. - will_remove = list(self.paths) - will_skip = set() - - _display('Would remove:', will_remove) - _display('Would not remove (might be manually added):', will_skip) - _display('Would not remove (outside of prefix):', self._refuse) - if verbose: - _display('Will actually move:', compress_for_rename(self.paths)) - - return ask('Proceed (y/n)? ', ('y', 'n')) == 'y' - - def rollback(self): - """Rollback the changes previously made by remove().""" - if not self._moved_paths.can_rollback: - logger.error( - "Can't roll back %s; was not uninstalled", - self.dist.project_name, - ) - return False - logger.info('Rolling back uninstall of %s', self.dist.project_name) - self._moved_paths.rollback() - for pth in self.pth.values(): - pth.rollback() - - def commit(self): - """Remove temporary save dir: rollback will no longer be possible.""" - self._moved_paths.commit() - - @classmethod - def from_dist(cls, dist): - dist_path = normalize_path(dist.location) - if not dist_is_local(dist): - logger.info( - "Not uninstalling %s at %s, outside environment %s", - dist.key, - dist_path, - sys.prefix, - ) - return cls(dist) - - if dist_path in {p for p in {sysconfig.get_path("stdlib"), - sysconfig.get_path("platstdlib")} - if p}: - logger.info( - "Not uninstalling %s at %s, as it is in the standard library.", - dist.key, - dist_path, - ) - return cls(dist) - - paths_to_remove = cls(dist) - develop_egg_link = egg_link_path(dist) - develop_egg_link_egg_info = '{}.egg-info'.format( - pkg_resources.to_filename(dist.project_name)) - egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) - # Special case for distutils installed package - distutils_egg_info = getattr(dist._provider, 'path', None) - - # Uninstall cases order do matter as in the case of 2 installs of the - # same package, pip needs to uninstall the currently detected version - if (egg_info_exists and dist.egg_info.endswith('.egg-info') and - not dist.egg_info.endswith(develop_egg_link_egg_info)): - # if dist.egg_info.endswith(develop_egg_link_egg_info), we - # are in fact in the develop_egg_link case - paths_to_remove.add(dist.egg_info) - if dist.has_metadata('installed-files.txt'): - for installed_file in dist.get_metadata( - 'installed-files.txt').splitlines(): - path = os.path.normpath( - os.path.join(dist.egg_info, installed_file) - ) - paths_to_remove.add(path) - # FIXME: need a test for this elif block - # occurs with --single-version-externally-managed/--record outside - # of pip - elif dist.has_metadata('top_level.txt'): - if dist.has_metadata('namespace_packages.txt'): - namespaces = dist.get_metadata('namespace_packages.txt') - else: - namespaces = [] - for top_level_pkg in [ - p for p - in dist.get_metadata('top_level.txt').splitlines() - if p and p not in namespaces]: - path = os.path.join(dist.location, top_level_pkg) - paths_to_remove.add(path) - paths_to_remove.add(path + '.py') - paths_to_remove.add(path + '.pyc') - paths_to_remove.add(path + '.pyo') - - elif distutils_egg_info: - raise UninstallationError( - "Cannot uninstall {!r}. It is a distutils installed project " - "and thus we cannot accurately determine which files belong " - "to it which would lead to only a partial uninstall.".format( - dist.project_name, - ) - ) - - elif dist.location.endswith('.egg'): - # package installed by easy_install - # We cannot match on dist.egg_name because it can slightly vary - # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg - paths_to_remove.add(dist.location) - easy_install_egg = os.path.split(dist.location)[1] - easy_install_pth = os.path.join(os.path.dirname(dist.location), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) - - elif egg_info_exists and dist.egg_info.endswith('.dist-info'): - for path in uninstallation_paths(dist): - paths_to_remove.add(path) - - elif develop_egg_link: - # develop egg - with open(develop_egg_link, 'r') as fh: - link_pointer = os.path.normcase(fh.readline().strip()) - assert (link_pointer == dist.location), ( - 'Egg-link %s does not match installed location of %s ' - '(at %s)' % (link_pointer, dist.project_name, dist.location) - ) - paths_to_remove.add(develop_egg_link) - easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, dist.location) - - else: - logger.debug( - 'Not sure how to uninstall: %s - Check: %s', - dist, dist.location, - ) - - # find distutils scripts= scripts - if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): - for script in dist.metadata_listdir('scripts'): - if dist_in_usersite(dist): - bin_dir = bin_user - else: - bin_dir = bin_py - paths_to_remove.add(os.path.join(bin_dir, script)) - if WINDOWS: - paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') - - # find console_scripts - _scripts_to_remove = [] - console_scripts = dist.get_entry_map(group='console_scripts') - for name in console_scripts.keys(): - _scripts_to_remove.extend(_script_names(dist, name, False)) - # find gui_scripts - gui_scripts = dist.get_entry_map(group='gui_scripts') - for name in gui_scripts.keys(): - _scripts_to_remove.extend(_script_names(dist, name, True)) - - for s in _scripts_to_remove: - paths_to_remove.add(s) - - return paths_to_remove - - -class UninstallPthEntries(object): - def __init__(self, pth_file): - if not os.path.isfile(pth_file): - raise UninstallationError( - "Cannot remove entries from nonexistent file %s" % pth_file - ) - self.file = pth_file - self.entries = set() - self._saved_lines = None - - def add(self, entry): - entry = os.path.normcase(entry) - # On Windows, os.path.normcase converts the entry to use - # backslashes. This is correct for entries that describe absolute - # paths outside of site-packages, but all the others use forward - # slashes. - if WINDOWS and not os.path.splitdrive(entry)[0]: - entry = entry.replace('\\', '/') - self.entries.add(entry) - - def remove(self): - logger.debug('Removing pth entries from %s:', self.file) - with open(self.file, 'rb') as fh: - # windows uses '\r\n' with py3k, but uses '\n' with py2.x - lines = fh.readlines() - self._saved_lines = lines - if any(b'\r\n' in line for line in lines): - endline = '\r\n' - else: - endline = '\n' - # handle missing trailing newline - if lines and not lines[-1].endswith(endline.encode("utf-8")): - lines[-1] = lines[-1] + endline.encode("utf-8") - for entry in self.entries: - try: - logger.debug('Removing entry: %s', entry) - lines.remove((entry + endline).encode("utf-8")) - except ValueError: - pass - with open(self.file, 'wb') as fh: - fh.writelines(lines) - - def rollback(self): - if self._saved_lines is None: - logger.error( - 'Cannot roll back changes to %s, none were made', self.file - ) - return False - logger.debug('Rolling %s back to previous state', self.file) - with open(self.file, 'wb') as fh: - fh.writelines(self._saved_lines) - return True diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/resolve.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/resolve.py deleted file mode 100644 index 33f572f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/resolve.py +++ /dev/null @@ -1,393 +0,0 @@ -"""Dependency Resolution - -The dependency resolution in pip is performed as follows: - -for top-level requirements: - a. only one spec allowed per project, regardless of conflicts or not. - otherwise a "double requirement" exception is raised - b. they override sub-dependency requirements. -for sub-dependencies - a. "first found, wins" (where the order is breadth first) -""" - -import logging -from collections import defaultdict -from itertools import chain - -from pip._internal.exceptions import ( - BestVersionAlreadyInstalled, DistributionNotFound, HashError, HashErrors, - UnsupportedPythonVersion, -) -from pip._internal.req.constructors import install_req_from_req_string -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import dist_in_usersite, ensure_dir -from pip._internal.utils.packaging import check_dist_requires_python -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Optional, DefaultDict, List, Set # noqa: F401 - from pip._internal.download import PipSession # noqa: F401 - from pip._internal.req.req_install import InstallRequirement # noqa: F401 - from pip._internal.index import PackageFinder # noqa: F401 - from pip._internal.req.req_set import RequirementSet # noqa: F401 - from pip._internal.operations.prepare import ( # noqa: F401 - DistAbstraction, RequirementPreparer - ) - from pip._internal.cache import WheelCache # noqa: F401 - -logger = logging.getLogger(__name__) - - -class Resolver(object): - """Resolves which packages need to be installed/uninstalled to perform \ - the requested operation without breaking the requirements of any package. - """ - - _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} - - def __init__( - self, - preparer, # type: RequirementPreparer - session, # type: PipSession - finder, # type: PackageFinder - wheel_cache, # type: Optional[WheelCache] - use_user_site, # type: bool - ignore_dependencies, # type: bool - ignore_installed, # type: bool - ignore_requires_python, # type: bool - force_reinstall, # type: bool - isolated, # type: bool - upgrade_strategy, # type: str - use_pep517=None # type: Optional[bool] - ): - # type: (...) -> None - super(Resolver, self).__init__() - assert upgrade_strategy in self._allowed_strategies - - self.preparer = preparer - self.finder = finder - self.session = session - - # NOTE: This would eventually be replaced with a cache that can give - # information about both sdist and wheels transparently. - self.wheel_cache = wheel_cache - - # This is set in resolve - self.require_hashes = None # type: Optional[bool] - - self.upgrade_strategy = upgrade_strategy - self.force_reinstall = force_reinstall - self.isolated = isolated - self.ignore_dependencies = ignore_dependencies - self.ignore_installed = ignore_installed - self.ignore_requires_python = ignore_requires_python - self.use_user_site = use_user_site - self.use_pep517 = use_pep517 - - self._discovered_dependencies = \ - defaultdict(list) # type: DefaultDict[str, List] - - def resolve(self, requirement_set): - # type: (RequirementSet) -> None - """Resolve what operations need to be done - - As a side-effect of this method, the packages (and their dependencies) - are downloaded, unpacked and prepared for installation. This - preparation is done by ``pip.operations.prepare``. - - Once PyPI has static dependency metadata available, it would be - possible to move the preparation to become a step separated from - dependency resolution. - """ - # make the wheelhouse - if self.preparer.wheel_download_dir: - ensure_dir(self.preparer.wheel_download_dir) - - # If any top-level requirement has a hash specified, enter - # hash-checking mode, which requires hashes from all. - root_reqs = ( - requirement_set.unnamed_requirements + - list(requirement_set.requirements.values()) - ) - self.require_hashes = ( - requirement_set.require_hashes or - any(req.has_hash_options for req in root_reqs) - ) - - # Display where finder is looking for packages - locations = self.finder.get_formatted_locations() - if locations: - logger.info(locations) - - # Actually prepare the files, and collect any exceptions. Most hash - # exceptions cannot be checked ahead of time, because - # req.populate_link() needs to be called before we can make decisions - # based on link type. - discovered_reqs = [] # type: List[InstallRequirement] - hash_errors = HashErrors() - for req in chain(root_reqs, discovered_reqs): - try: - discovered_reqs.extend( - self._resolve_one(requirement_set, req) - ) - except HashError as exc: - exc.req = req - hash_errors.append(exc) - - if hash_errors: - raise hash_errors - - def _is_upgrade_allowed(self, req): - # type: (InstallRequirement) -> bool - if self.upgrade_strategy == "to-satisfy-only": - return False - elif self.upgrade_strategy == "eager": - return True - else: - assert self.upgrade_strategy == "only-if-needed" - return req.is_direct - - def _set_req_to_reinstall(self, req): - # type: (InstallRequirement) -> None - """ - Set a requirement to be installed. - """ - # Don't uninstall the conflict if doing a user install and the - # conflict is not a user install. - if not self.use_user_site or dist_in_usersite(req.satisfied_by): - req.conflicts_with = req.satisfied_by - req.satisfied_by = None - - # XXX: Stop passing requirement_set for options - def _check_skip_installed(self, req_to_install): - # type: (InstallRequirement) -> Optional[str] - """Check if req_to_install should be skipped. - - This will check if the req is installed, and whether we should upgrade - or reinstall it, taking into account all the relevant user options. - - After calling this req_to_install will only have satisfied_by set to - None if the req_to_install is to be upgraded/reinstalled etc. Any - other value will be a dist recording the current thing installed that - satisfies the requirement. - - Note that for vcs urls and the like we can't assess skipping in this - routine - we simply identify that we need to pull the thing down, - then later on it is pulled down and introspected to assess upgrade/ - reinstalls etc. - - :return: A text reason for why it was skipped, or None. - """ - if self.ignore_installed: - return None - - req_to_install.check_if_exists(self.use_user_site) - if not req_to_install.satisfied_by: - return None - - if self.force_reinstall: - self._set_req_to_reinstall(req_to_install) - return None - - if not self._is_upgrade_allowed(req_to_install): - if self.upgrade_strategy == "only-if-needed": - return 'already satisfied, skipping upgrade' - return 'already satisfied' - - # Check for the possibility of an upgrade. For link-based - # requirements we have to pull the tree down and inspect to assess - # the version #, so it's handled way down. - if not req_to_install.link: - try: - self.finder.find_requirement(req_to_install, upgrade=True) - except BestVersionAlreadyInstalled: - # Then the best version is installed. - return 'already up-to-date' - except DistributionNotFound: - # No distribution found, so we squash the error. It will - # be raised later when we re-try later to do the install. - # Why don't we just raise here? - pass - - self._set_req_to_reinstall(req_to_install) - return None - - def _get_abstract_dist_for(self, req): - # type: (InstallRequirement) -> DistAbstraction - """Takes a InstallRequirement and returns a single AbstractDist \ - representing a prepared variant of the same. - """ - assert self.require_hashes is not None, ( - "require_hashes should have been set in Resolver.resolve()" - ) - - if req.editable: - return self.preparer.prepare_editable_requirement( - req, self.require_hashes, self.use_user_site, self.finder, - ) - - # satisfied_by is only evaluated by calling _check_skip_installed, - # so it must be None here. - assert req.satisfied_by is None - skip_reason = self._check_skip_installed(req) - - if req.satisfied_by: - return self.preparer.prepare_installed_requirement( - req, self.require_hashes, skip_reason - ) - - upgrade_allowed = self._is_upgrade_allowed(req) - abstract_dist = self.preparer.prepare_linked_requirement( - req, self.session, self.finder, upgrade_allowed, - self.require_hashes - ) - - # NOTE - # The following portion is for determining if a certain package is - # going to be re-installed/upgraded or not and reporting to the user. - # This should probably get cleaned up in a future refactor. - - # req.req is only avail after unpack for URL - # pkgs repeat check_if_exists to uninstall-on-upgrade - # (#14) - if not self.ignore_installed: - req.check_if_exists(self.use_user_site) - - if req.satisfied_by: - should_modify = ( - self.upgrade_strategy != "to-satisfy-only" or - self.force_reinstall or - self.ignore_installed or - req.link.scheme == 'file' - ) - if should_modify: - self._set_req_to_reinstall(req) - else: - logger.info( - 'Requirement already satisfied (use --upgrade to upgrade):' - ' %s', req, - ) - - return abstract_dist - - def _resolve_one( - self, - requirement_set, # type: RequirementSet - req_to_install # type: InstallRequirement - ): - # type: (...) -> List[InstallRequirement] - """Prepare a single requirements file. - - :return: A list of additional InstallRequirements to also install. - """ - # Tell user what we are doing for this requirement: - # obtain (editable), skipping, processing (local url), collecting - # (remote url or package name) - if req_to_install.constraint or req_to_install.prepared: - return [] - - req_to_install.prepared = True - - # register tmp src for cleanup in case something goes wrong - requirement_set.reqs_to_cleanup.append(req_to_install) - - abstract_dist = self._get_abstract_dist_for(req_to_install) - - # Parse and return dependencies - dist = abstract_dist.dist() - try: - check_dist_requires_python(dist) - except UnsupportedPythonVersion as err: - if self.ignore_requires_python: - logger.warning(err.args[0]) - else: - raise - - more_reqs = [] # type: List[InstallRequirement] - - def add_req(subreq, extras_requested): - sub_install_req = install_req_from_req_string( - str(subreq), - req_to_install, - isolated=self.isolated, - wheel_cache=self.wheel_cache, - use_pep517=self.use_pep517 - ) - parent_req_name = req_to_install.name - to_scan_again, add_to_parent = requirement_set.add_requirement( - sub_install_req, - parent_req_name=parent_req_name, - extras_requested=extras_requested, - ) - if parent_req_name and add_to_parent: - self._discovered_dependencies[parent_req_name].append( - add_to_parent - ) - more_reqs.extend(to_scan_again) - - with indent_log(): - # We add req_to_install before its dependencies, so that we - # can refer to it when adding dependencies. - if not requirement_set.has_requirement(req_to_install.name): - # 'unnamed' requirements will get added here - req_to_install.is_direct = True - requirement_set.add_requirement( - req_to_install, parent_req_name=None, - ) - - if not self.ignore_dependencies: - if req_to_install.extras: - logger.debug( - "Installing extra requirements: %r", - ','.join(req_to_install.extras), - ) - missing_requested = sorted( - set(req_to_install.extras) - set(dist.extras) - ) - for missing in missing_requested: - logger.warning( - '%s does not provide the extra \'%s\'', - dist, missing - ) - - available_requested = sorted( - set(dist.extras) & set(req_to_install.extras) - ) - for subreq in dist.requires(available_requested): - add_req(subreq, extras_requested=available_requested) - - if not req_to_install.editable and not req_to_install.satisfied_by: - # XXX: --no-install leads this to report 'Successfully - # downloaded' for only non-editable reqs, even though we took - # action on them. - requirement_set.successfully_downloaded.append(req_to_install) - - return more_reqs - - def get_installation_order(self, req_set): - # type: (RequirementSet) -> List[InstallRequirement] - """Create the installation order. - - The installation order is topological - requirements are installed - before the requiring thing. We break cycles at an arbitrary point, - and make no other guarantees. - """ - # The current implementation, which we may change at any point - # installs the user specified things in the order given, except when - # dependencies must come earlier to achieve topological order. - order = [] - ordered_reqs = set() # type: Set[InstallRequirement] - - def schedule(req): - if req.satisfied_by or req in ordered_reqs: - return - if req.constraint: - return - ordered_reqs.add(req) - for dep in self._discovered_dependencies[req.name]: - schedule(dep) - order.append(req) - - for install_req in req_set.requirements.values(): - schedule(install_req) - return order diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/appdirs.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/appdirs.py deleted file mode 100644 index 9af9fa7..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/appdirs.py +++ /dev/null @@ -1,270 +0,0 @@ -""" -This code was taken from https://github.com/ActiveState/appdirs and modified -to suit our purposes. -""" -from __future__ import absolute_import - -import os -import sys - -from pip._vendor.six import PY2, text_type - -from pip._internal.utils.compat import WINDOWS, expanduser -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - List, Union - ) - - -def user_cache_dir(appname): - # type: (str) -> str - r""" - Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - - Typical user cache directories are: - macOS: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Windows: C:\Users\\AppData\Local\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go - in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the - non-roaming app data dir (the default returned by `user_data_dir`). Apps - typically put cache data somewhere *under* the given dir here. Some - examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - """ - if WINDOWS: - # Get the base path - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - - # When using Python 2, return paths as bytes on Windows like we do on - # other operating systems. See helper function docs for more details. - if PY2 and isinstance(path, text_type): - path = _win_path_to_bytes(path) - - # Add our app name and Cache directory to it - path = os.path.join(path, appname, "Cache") - elif sys.platform == "darwin": - # Get the base path - path = expanduser("~/Library/Caches") - - # Add our app name to it - path = os.path.join(path, appname) - else: - # Get the base path - path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache")) - - # Add our app name to it - path = os.path.join(path, appname) - - return path - - -def user_data_dir(appname, roaming=False): - # type: (str, bool) -> str - r""" - Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - macOS: ~/Library/Application Support/ - if it exists, else ~/.config/ - Unix: ~/.local/share/ # or in - $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\ ... - ...Application Data\ - Win XP (roaming): C:\Documents and Settings\\Local ... - ...Settings\Application Data\ - Win 7 (not roaming): C:\\Users\\AppData\Local\ - Win 7 (roaming): C:\\Users\\AppData\Roaming\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if WINDOWS: - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) - elif sys.platform == "darwin": - path = os.path.join( - expanduser('~/Library/Application Support/'), - appname, - ) if os.path.isdir(os.path.join( - expanduser('~/Library/Application Support/'), - appname, - ) - ) else os.path.join( - expanduser('~/.config/'), - appname, - ) - else: - path = os.path.join( - os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")), - appname, - ) - - return path - - -def user_config_dir(appname, roaming=True): - # type: (str, bool) -> str - """Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "roaming" (boolean, default True) can be set False to not use the - Windows roaming appdata directory. That means that for users on a - Windows network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - macOS: same as user_data_dir - Unix: ~/.config/ - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if WINDOWS: - path = user_data_dir(appname, roaming=roaming) - elif sys.platform == "darwin": - path = user_data_dir(appname) - else: - path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config")) - path = os.path.join(path, appname) - - return path - - -# for the discussion regarding site_config_dirs locations -# see -def site_config_dirs(appname): - # type: (str) -> List[str] - r"""Return a list of potential user-shared config dirs for this application. - - "appname" is the name of application. - - Typical user config directories are: - macOS: /Library/Application Support// - Unix: /etc or $XDG_CONFIG_DIRS[i]// for each value in - $XDG_CONFIG_DIRS - Win XP: C:\Documents and Settings\All Users\Application ... - ...Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory - on Vista.) - Win 7: Hidden, but writeable on Win 7: - C:\ProgramData\\ - """ - if WINDOWS: - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - pathlist = [os.path.join(path, appname)] - elif sys.platform == 'darwin': - pathlist = [os.path.join('/Library/Application Support', appname)] - else: - # try looking in $XDG_CONFIG_DIRS - xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - if xdg_config_dirs: - pathlist = [ - os.path.join(expanduser(x), appname) - for x in xdg_config_dirs.split(os.pathsep) - ] - else: - pathlist = [] - - # always look in /etc directly as well - pathlist.append('/etc') - - return pathlist - - -# -- Windows support functions -- - -def _get_win_folder_from_registry(csidl_name): - # type: (str) -> str - """ - This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - directory, _type = _winreg.QueryValueEx(key, shell_folder_name) - return directory - - -def _get_win_folder_with_ctypes(csidl_name): - # type: (str) -> str - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - - -if WINDOWS: - try: - import ctypes - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -def _win_path_to_bytes(path): - """Encode Windows paths to bytes. Only used on Python 2. - - Motivation is to be consistent with other operating systems where paths - are also returned as bytes. This avoids problems mixing bytes and Unicode - elsewhere in the codebase. For more details and discussion see - . - - If encoding using ASCII and MBCS fails, return the original Unicode path. - """ - for encoding in ('ASCII', 'MBCS'): - try: - return path.encode(encoding) - except (UnicodeEncodeError, LookupError): - pass - return path diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/compat.py deleted file mode 100644 index 2d8b3bf..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/compat.py +++ /dev/null @@ -1,264 +0,0 @@ -"""Stuff that differs in different Python versions and platform -distributions.""" -from __future__ import absolute_import, division - -import codecs -import locale -import logging -import os -import shutil -import sys - -from pip._vendor.six import text_type - -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Tuple, Text # noqa: F401 - -try: - import ipaddress -except ImportError: - try: - from pip._vendor import ipaddress # type: ignore - except ImportError: - import ipaddr as ipaddress # type: ignore - ipaddress.ip_address = ipaddress.IPAddress # type: ignore - ipaddress.ip_network = ipaddress.IPNetwork # type: ignore - - -__all__ = [ - "ipaddress", "uses_pycache", "console_to_str", "native_str", - "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", "get_terminal_size", - "get_extension_suffixes", -] - - -logger = logging.getLogger(__name__) - -if sys.version_info >= (3, 4): - uses_pycache = True - from importlib.util import cache_from_source -else: - import imp - - try: - cache_from_source = imp.cache_from_source # type: ignore - except AttributeError: - # does not use __pycache__ - cache_from_source = None - - uses_pycache = cache_from_source is not None - - -if sys.version_info >= (3, 5): - backslashreplace_decode = "backslashreplace" -else: - # In version 3.4 and older, backslashreplace exists - # but does not support use for decoding. - # We implement our own replace handler for this - # situation, so that we can consistently use - # backslash replacement for all versions. - def backslashreplace_decode_fn(err): - raw_bytes = (err.object[i] for i in range(err.start, err.end)) - if sys.version_info[0] == 2: - # Python 2 gave us characters - convert to numeric bytes - raw_bytes = (ord(b) for b in raw_bytes) - return u"".join(u"\\x%x" % c for c in raw_bytes), err.end - codecs.register_error( - "backslashreplace_decode", - backslashreplace_decode_fn, - ) - backslashreplace_decode = "backslashreplace_decode" - - -def console_to_str(data): - # type: (bytes) -> Text - """Return a string, safe for output, of subprocess output. - - We assume the data is in the locale preferred encoding. - If it won't decode properly, we warn the user but decode as - best we can. - - We also ensure that the output can be safely written to - standard output without encoding errors. - """ - - # First, get the encoding we assume. This is the preferred - # encoding for the locale, unless that is not found, or - # it is ASCII, in which case assume UTF-8 - encoding = locale.getpreferredencoding() - if (not encoding) or codecs.lookup(encoding).name == "ascii": - encoding = "utf-8" - - # Now try to decode the data - if we fail, warn the user and - # decode with replacement. - try: - decoded_data = data.decode(encoding) - except UnicodeDecodeError: - logger.warning( - "Subprocess output does not appear to be encoded as %s", - encoding, - ) - decoded_data = data.decode(encoding, errors=backslashreplace_decode) - - # Make sure we can print the output, by encoding it to the output - # encoding with replacement of unencodable characters, and then - # decoding again. - # We use stderr's encoding because it's less likely to be - # redirected and if we don't find an encoding we skip this - # step (on the assumption that output is wrapped by something - # that won't fail). - # The double getattr is to deal with the possibility that we're - # being called in a situation where sys.__stderr__ doesn't exist, - # or doesn't have an encoding attribute. Neither of these cases - # should occur in normal pip use, but there's no harm in checking - # in case people use pip in (unsupported) unusual situations. - output_encoding = getattr(getattr(sys, "__stderr__", None), - "encoding", None) - - if output_encoding: - output_encoded = decoded_data.encode( - output_encoding, - errors="backslashreplace" - ) - decoded_data = output_encoded.decode(output_encoding) - - return decoded_data - - -if sys.version_info >= (3,): - def native_str(s, replace=False): - # type: (str, bool) -> str - if isinstance(s, bytes): - return s.decode('utf-8', 'replace' if replace else 'strict') - return s - -else: - def native_str(s, replace=False): - # type: (str, bool) -> str - # Replace is ignored -- unicode to UTF-8 can't fail - if isinstance(s, text_type): - return s.encode('utf-8') - return s - - -def get_path_uid(path): - # type: (str) -> int - """ - Return path's uid. - - Does not follow symlinks: - https://github.com/pypa/pip/pull/935#discussion_r5307003 - - Placed this function in compat due to differences on AIX and - Jython, that should eventually go away. - - :raises OSError: When path is a symlink or can't be read. - """ - if hasattr(os, 'O_NOFOLLOW'): - fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) - file_uid = os.fstat(fd).st_uid - os.close(fd) - else: # AIX and Jython - # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW - if not os.path.islink(path): - # older versions of Jython don't have `os.fstat` - file_uid = os.stat(path).st_uid - else: - # raise OSError for parity with os.O_NOFOLLOW above - raise OSError( - "%s is a symlink; Will not return uid for symlinks" % path - ) - return file_uid - - -if sys.version_info >= (3, 4): - from importlib.machinery import EXTENSION_SUFFIXES - - def get_extension_suffixes(): - return EXTENSION_SUFFIXES -else: - from imp import get_suffixes - - def get_extension_suffixes(): - return [suffix[0] for suffix in get_suffixes()] - - -def expanduser(path): - # type: (str) -> str - """ - Expand ~ and ~user constructions. - - Includes a workaround for https://bugs.python.org/issue14768 - """ - expanded = os.path.expanduser(path) - if path.startswith('~/') and expanded.startswith('//'): - expanded = expanded[1:] - return expanded - - -# packages in the stdlib that may have installation metadata, but should not be -# considered 'installed'. this theoretically could be determined based on -# dist.location (py27:`sysconfig.get_paths()['stdlib']`, -# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may -# make this ineffective, so hard-coding -stdlib_pkgs = {"python", "wsgiref", "argparse"} - - -# windows detection, covers cpython and ironpython -WINDOWS = (sys.platform.startswith("win") or - (sys.platform == 'cli' and os.name == 'nt')) - - -def samefile(file1, file2): - # type: (str, str) -> bool - """Provide an alternative for os.path.samefile on Windows/Python2""" - if hasattr(os.path, 'samefile'): - return os.path.samefile(file1, file2) - else: - path1 = os.path.normcase(os.path.abspath(file1)) - path2 = os.path.normcase(os.path.abspath(file2)) - return path1 == path2 - - -if hasattr(shutil, 'get_terminal_size'): - def get_terminal_size(): - # type: () -> Tuple[int, int] - """ - Returns a tuple (x, y) representing the width(x) and the height(y) - in characters of the terminal window. - """ - return tuple(shutil.get_terminal_size()) # type: ignore -else: - def get_terminal_size(): - # type: () -> Tuple[int, int] - """ - Returns a tuple (x, y) representing the width(x) and the height(y) - in characters of the terminal window. - """ - def ioctl_GWINSZ(fd): - try: - import fcntl - import termios - import struct - cr = struct.unpack_from( - 'hh', - fcntl.ioctl(fd, termios.TIOCGWINSZ, '12345678') - ) - except Exception: - return None - if cr == (0, 0): - return None - return cr - cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - cr = ioctl_GWINSZ(fd) - os.close(fd) - except Exception: - pass - if not cr: - cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) - return int(cr[1]), int(cr[0]) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/deprecation.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/deprecation.py deleted file mode 100644 index 0beaf74..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/deprecation.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -A module that implements tooling to enable easy warnings about deprecations. -""" -from __future__ import absolute_import - -import logging -import warnings - -from pip._vendor.packaging.version import parse - -from pip import __version__ as current_version -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Any, Optional # noqa: F401 - - -class PipDeprecationWarning(Warning): - pass - - -_original_showwarning = None # type: Any - - -# Warnings <-> Logging Integration -def _showwarning(message, category, filename, lineno, file=None, line=None): - if file is not None: - if _original_showwarning is not None: - _original_showwarning( - message, category, filename, lineno, file, line, - ) - elif issubclass(category, PipDeprecationWarning): - # We use a specially named logger which will handle all of the - # deprecation messages for pip. - logger = logging.getLogger("pip._internal.deprecations") - logger.warning(message) - else: - _original_showwarning( - message, category, filename, lineno, file, line, - ) - - -def install_warning_logger(): - # type: () -> None - # Enable our Deprecation Warnings - warnings.simplefilter("default", PipDeprecationWarning, append=True) - - global _original_showwarning - - if _original_showwarning is None: - _original_showwarning = warnings.showwarning - warnings.showwarning = _showwarning - - -def deprecated(reason, replacement, gone_in, issue=None): - # type: (str, Optional[str], Optional[str], Optional[int]) -> None - """Helper to deprecate existing functionality. - - reason: - Textual reason shown to the user about why this functionality has - been deprecated. - replacement: - Textual suggestion shown to the user about what alternative - functionality they can use. - gone_in: - The version of pip does this functionality should get removed in. - Raises errors if pip's current version is greater than or equal to - this. - issue: - Issue number on the tracker that would serve as a useful place for - users to find related discussion and provide feedback. - - Always pass replacement, gone_in and issue as keyword arguments for clarity - at the call site. - """ - - # Construct a nice message. - # This is purposely eagerly formatted as we want it to appear as if someone - # typed this entire message out. - message = "DEPRECATION: " + reason - if replacement is not None: - message += " A possible replacement is {}.".format(replacement) - if issue is not None: - url = "https://github.com/pypa/pip/issues/" + str(issue) - message += " You can find discussion regarding this at {}.".format(url) - - # Raise as an error if it has to be removed. - if gone_in is not None and parse(current_version) >= parse(gone_in): - raise PipDeprecationWarning(message) - warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/encoding.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/encoding.py deleted file mode 100644 index d36defa..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/encoding.py +++ /dev/null @@ -1,39 +0,0 @@ -import codecs -import locale -import re -import sys - -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import List, Tuple, Text # noqa: F401 - -BOMS = [ - (codecs.BOM_UTF8, 'utf8'), - (codecs.BOM_UTF16, 'utf16'), - (codecs.BOM_UTF16_BE, 'utf16-be'), - (codecs.BOM_UTF16_LE, 'utf16-le'), - (codecs.BOM_UTF32, 'utf32'), - (codecs.BOM_UTF32_BE, 'utf32-be'), - (codecs.BOM_UTF32_LE, 'utf32-le'), -] # type: List[Tuple[bytes, Text]] - -ENCODING_RE = re.compile(br'coding[:=]\s*([-\w.]+)') - - -def auto_decode(data): - # type: (bytes) -> Text - """Check a bytes string for a BOM to correctly detect the encoding - - Fallback to locale.getpreferredencoding(False) like open() on Python3""" - for bom, encoding in BOMS: - if data.startswith(bom): - return data[len(bom):].decode(encoding) - # Lets check the first two lines as in PEP263 - for line in data.split(b'\n')[:2]: - if line[0:1] == b'#' and ENCODING_RE.search(line): - encoding = ENCODING_RE.search(line).groups()[0].decode('ascii') - return data.decode(encoding) - return data.decode( - locale.getpreferredencoding(False) or sys.getdefaultencoding(), - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/filesystem.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/filesystem.py deleted file mode 100644 index 1e6b033..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/filesystem.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import os.path - -from pip._internal.utils.compat import get_path_uid - - -def check_path_owner(path): - # type: (str) -> bool - # If we don't have a way to check the effective uid of this process, then - # we'll just assume that we own the directory. - if not hasattr(os, "geteuid"): - return True - - previous = None - while path != previous: - if os.path.lexists(path): - # Check if path is writable by current user. - if os.geteuid() == 0: - # Special handling for root user in order to handle properly - # cases where users use sudo without -H flag. - try: - path_uid = get_path_uid(path) - except OSError: - return False - return path_uid == 0 - else: - return os.access(path, os.W_OK) - else: - previous, path = path, os.path.dirname(path) - return False # assume we don't own the path diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/glibc.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/glibc.py deleted file mode 100644 index 8a51f69..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/glibc.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import absolute_import - -import ctypes -import re -import warnings - -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Optional, Tuple # noqa: F401 - - -def glibc_version_string(): - # type: () -> Optional[str] - "Returns glibc version string, or None if not using glibc." - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - process_namespace = ctypes.CDLL(None) - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -# Separated out from have_compatible_glibc for easier unit testing -def check_glibc_version(version_str, required_major, minimum_minor): - # type: (str, int, int) -> bool - # Parse string and check against requested version. - # - # We use a regexp instead of str.split because we want to discard any - # random junk that might come after the minor version -- this might happen - # in patched/forked versions of glibc (e.g. Linaro's version of glibc - # uses version strings like "2.20-2014.11"). See gh-3588. - m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) - if not m: - warnings.warn("Expected glibc version with 2 components major.minor," - " got: %s" % version_str, RuntimeWarning) - return False - return (int(m.group("major")) == required_major and - int(m.group("minor")) >= minimum_minor) - - -def have_compatible_glibc(required_major, minimum_minor): - # type: (int, int) -> bool - version_str = glibc_version_string() # type: Optional[str] - if version_str is None: - return False - return check_glibc_version(version_str, required_major, minimum_minor) - - -# platform.libc_ver regularly returns completely nonsensical glibc -# versions. E.g. on my computer, platform says: -# -# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.7') -# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.9') -# -# But the truth is: -# -# ~$ ldd --version -# ldd (Debian GLIBC 2.22-11) 2.22 -# -# This is unfortunate, because it means that the linehaul data on libc -# versions that was generated by pip 8.1.2 and earlier is useless and -# misleading. Solution: instead of using platform, use our code that actually -# works. -def libc_ver(): - # type: () -> Tuple[str, str] - """Try to determine the glibc version - - Returns a tuple of strings (lib, version) which default to empty strings - in case the lookup fails. - """ - glibc_version = glibc_version_string() - if glibc_version is None: - return ("", "") - else: - return ("glibc", glibc_version) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/hashes.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/hashes.py deleted file mode 100644 index c6df7a1..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/hashes.py +++ /dev/null @@ -1,115 +0,0 @@ -from __future__ import absolute_import - -import hashlib - -from pip._vendor.six import iteritems, iterkeys, itervalues - -from pip._internal.exceptions import ( - HashMismatch, HashMissing, InstallationError, -) -from pip._internal.utils.misc import read_chunks -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Dict, List, BinaryIO, NoReturn, Iterator - ) - from pip._vendor.six import PY3 - if PY3: - from hashlib import _Hash # noqa: F401 - else: - from hashlib import _hash as _Hash # noqa: F401 - - -# The recommended hash algo of the moment. Change this whenever the state of -# the art changes; it won't hurt backward compatibility. -FAVORITE_HASH = 'sha256' - - -# Names of hashlib algorithms allowed by the --hash option and ``pip hash`` -# Currently, those are the ones at least as collision-resistant as sha256. -STRONG_HASHES = ['sha256', 'sha384', 'sha512'] - - -class Hashes(object): - """A wrapper that builds multiple hashes at once and checks them against - known-good values - - """ - def __init__(self, hashes=None): - # type: (Dict[str, List[str]]) -> None - """ - :param hashes: A dict of algorithm names pointing to lists of allowed - hex digests - """ - self._allowed = {} if hashes is None else hashes - - def check_against_chunks(self, chunks): - # type: (Iterator[bytes]) -> None - """Check good hashes against ones built from iterable of chunks of - data. - - Raise HashMismatch if none match. - - """ - gots = {} - for hash_name in iterkeys(self._allowed): - try: - gots[hash_name] = hashlib.new(hash_name) - except (ValueError, TypeError): - raise InstallationError('Unknown hash name: %s' % hash_name) - - for chunk in chunks: - for hash in itervalues(gots): - hash.update(chunk) - - for hash_name, got in iteritems(gots): - if got.hexdigest() in self._allowed[hash_name]: - return - self._raise(gots) - - def _raise(self, gots): - # type: (Dict[str, _Hash]) -> NoReturn - raise HashMismatch(self._allowed, gots) - - def check_against_file(self, file): - # type: (BinaryIO) -> None - """Check good hashes against a file-like object - - Raise HashMismatch if none match. - - """ - return self.check_against_chunks(read_chunks(file)) - - def check_against_path(self, path): - # type: (str) -> None - with open(path, 'rb') as file: - return self.check_against_file(file) - - def __nonzero__(self): - # type: () -> bool - """Return whether I know any known-good hashes.""" - return bool(self._allowed) - - def __bool__(self): - # type: () -> bool - return self.__nonzero__() - - -class MissingHashes(Hashes): - """A workalike for Hashes used when we're missing a hash for a requirement - - It computes the actual hash of the requirement and raises a HashMissing - exception showing it to the user. - - """ - def __init__(self): - # type: () -> None - """Don't offer the ``hashes`` kwarg.""" - # Pass our favorite hash in to generate a "gotten hash". With the - # empty list, it will never match, so an error will always raise. - super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []}) - - def _raise(self, gots): - # type: (Dict[str, _Hash]) -> NoReturn - raise HashMissing(gots[FAVORITE_HASH].hexdigest()) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/logging.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/logging.py deleted file mode 100644 index 579d696..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/logging.py +++ /dev/null @@ -1,318 +0,0 @@ -from __future__ import absolute_import - -import contextlib -import errno -import logging -import logging.handlers -import os -import sys - -from pip._vendor.six import PY2 - -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.misc import ensure_dir - -try: - import threading -except ImportError: - import dummy_threading as threading # type: ignore - - -try: - from pip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - - -_log_state = threading.local() -_log_state.indentation = 0 - - -class BrokenStdoutLoggingError(Exception): - """ - Raised if BrokenPipeError occurs for the stdout stream while logging. - """ - pass - - -# BrokenPipeError does not exist in Python 2 and, in addition, manifests -# differently in Windows and non-Windows. -if WINDOWS: - # In Windows, a broken pipe can show up as EINVAL rather than EPIPE: - # https://bugs.python.org/issue19612 - # https://bugs.python.org/issue30418 - if PY2: - def _is_broken_pipe_error(exc_class, exc): - """See the docstring for non-Windows Python 3 below.""" - return (exc_class is IOError and - exc.errno in (errno.EINVAL, errno.EPIPE)) - else: - # In Windows, a broken pipe IOError became OSError in Python 3. - def _is_broken_pipe_error(exc_class, exc): - """See the docstring for non-Windows Python 3 below.""" - return ((exc_class is BrokenPipeError) or # noqa: F821 - (exc_class is OSError and - exc.errno in (errno.EINVAL, errno.EPIPE))) -elif PY2: - def _is_broken_pipe_error(exc_class, exc): - """See the docstring for non-Windows Python 3 below.""" - return (exc_class is IOError and exc.errno == errno.EPIPE) -else: - # Then we are in the non-Windows Python 3 case. - def _is_broken_pipe_error(exc_class, exc): - """ - Return whether an exception is a broken pipe error. - - Args: - exc_class: an exception class. - exc: an exception instance. - """ - return (exc_class is BrokenPipeError) # noqa: F821 - - -@contextlib.contextmanager -def indent_log(num=2): - """ - A context manager which will cause the log output to be indented for any - log messages emitted inside it. - """ - _log_state.indentation += num - try: - yield - finally: - _log_state.indentation -= num - - -def get_indentation(): - return getattr(_log_state, 'indentation', 0) - - -class IndentingFormatter(logging.Formatter): - def __init__(self, *args, **kwargs): - """ - A logging.Formatter obeying containing indent_log contexts. - - :param add_timestamp: A bool indicating output lines should be prefixed - with their record's timestamp. - """ - self.add_timestamp = kwargs.pop("add_timestamp", False) - super(IndentingFormatter, self).__init__(*args, **kwargs) - - def format(self, record): - """ - Calls the standard formatter, but will indent all of the log messages - by our current indentation level. - """ - formatted = super(IndentingFormatter, self).format(record) - prefix = '' - if self.add_timestamp: - prefix = self.formatTime(record, "%Y-%m-%dT%H:%M:%S ") - prefix += " " * get_indentation() - formatted = "".join([ - prefix + line - for line in formatted.splitlines(True) - ]) - return formatted - - -def _color_wrap(*colors): - def wrapped(inp): - return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) - return wrapped - - -class ColorizedStreamHandler(logging.StreamHandler): - - # Don't build up a list of colors if we don't have colorama - if colorama: - COLORS = [ - # This needs to be in order from highest logging level to lowest. - (logging.ERROR, _color_wrap(colorama.Fore.RED)), - (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), - ] - else: - COLORS = [] - - def __init__(self, stream=None, no_color=None): - logging.StreamHandler.__init__(self, stream) - self._no_color = no_color - - if WINDOWS and colorama: - self.stream = colorama.AnsiToWin32(self.stream) - - def _using_stdout(self): - """ - Return whether the handler is using sys.stdout. - """ - if WINDOWS and colorama: - # Then self.stream is an AnsiToWin32 object. - return self.stream.wrapped is sys.stdout - - return self.stream is sys.stdout - - def should_color(self): - # Don't colorize things if we do not have colorama or if told not to - if not colorama or self._no_color: - return False - - real_stream = ( - self.stream if not isinstance(self.stream, colorama.AnsiToWin32) - else self.stream.wrapped - ) - - # If the stream is a tty we should color it - if hasattr(real_stream, "isatty") and real_stream.isatty(): - return True - - # If we have an ANSI term we should color it - if os.environ.get("TERM") == "ANSI": - return True - - # If anything else we should not color it - return False - - def format(self, record): - msg = logging.StreamHandler.format(self, record) - - if self.should_color(): - for level, color in self.COLORS: - if record.levelno >= level: - msg = color(msg) - break - - return msg - - # The logging module says handleError() can be customized. - def handleError(self, record): - exc_class, exc = sys.exc_info()[:2] - # If a broken pipe occurred while calling write() or flush() on the - # stdout stream in logging's Handler.emit(), then raise our special - # exception so we can handle it in main() instead of logging the - # broken pipe error and continuing. - if (exc_class and self._using_stdout() and - _is_broken_pipe_error(exc_class, exc)): - raise BrokenStdoutLoggingError() - - return super(ColorizedStreamHandler, self).handleError(record) - - -class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): - - def _open(self): - ensure_dir(os.path.dirname(self.baseFilename)) - return logging.handlers.RotatingFileHandler._open(self) - - -class MaxLevelFilter(logging.Filter): - - def __init__(self, level): - self.level = level - - def filter(self, record): - return record.levelno < self.level - - -def setup_logging(verbosity, no_color, user_log_file): - """Configures and sets up all of the logging - - Returns the requested logging level, as its integer value. - """ - - # Determine the level to be logging at. - if verbosity >= 1: - level = "DEBUG" - elif verbosity == -1: - level = "WARNING" - elif verbosity == -2: - level = "ERROR" - elif verbosity <= -3: - level = "CRITICAL" - else: - level = "INFO" - - level_number = getattr(logging, level) - - # The "root" logger should match the "console" level *unless* we also need - # to log to a user log file. - include_user_log = user_log_file is not None - if include_user_log: - additional_log_file = user_log_file - root_level = "DEBUG" - else: - additional_log_file = "/dev/null" - root_level = level - - # Disable any logging besides WARNING unless we have DEBUG level logging - # enabled for vendored libraries. - vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG" - - # Shorthands for clarity - log_streams = { - "stdout": "ext://sys.stdout", - "stderr": "ext://sys.stderr", - } - handler_classes = { - "stream": "pip._internal.utils.logging.ColorizedStreamHandler", - "file": "pip._internal.utils.logging.BetterRotatingFileHandler", - } - - logging.config.dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "filters": { - "exclude_warnings": { - "()": "pip._internal.utils.logging.MaxLevelFilter", - "level": logging.WARNING, - }, - }, - "formatters": { - "indent": { - "()": IndentingFormatter, - "format": "%(message)s", - }, - "indent_with_timestamp": { - "()": IndentingFormatter, - "format": "%(message)s", - "add_timestamp": True, - }, - }, - "handlers": { - "console": { - "level": level, - "class": handler_classes["stream"], - "no_color": no_color, - "stream": log_streams["stdout"], - "filters": ["exclude_warnings"], - "formatter": "indent", - }, - "console_errors": { - "level": "WARNING", - "class": handler_classes["stream"], - "no_color": no_color, - "stream": log_streams["stderr"], - "formatter": "indent", - }, - "user_log": { - "level": "DEBUG", - "class": handler_classes["file"], - "filename": additional_log_file, - "delay": True, - "formatter": "indent_with_timestamp", - }, - }, - "root": { - "level": root_level, - "handlers": ["console", "console_errors"] + ( - ["user_log"] if include_user_log else [] - ), - }, - "loggers": { - "pip._vendor": { - "level": vendored_log_level - } - }, - }) - - return level_number diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/misc.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/misc.py deleted file mode 100644 index 84605ee..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/misc.py +++ /dev/null @@ -1,1040 +0,0 @@ -from __future__ import absolute_import - -import contextlib -import errno -import io -import locale -# we have a submodule named 'logging' which would shadow this if we used the -# regular name: -import logging as std_logging -import os -import posixpath -import re -import shutil -import stat -import subprocess -import sys -import tarfile -import zipfile -from collections import deque - -from pip._vendor import pkg_resources -# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is -# why we ignore the type on this import. -from pip._vendor.retrying import retry # type: ignore -from pip._vendor.six import PY2 -from pip._vendor.six.moves import input -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote - -from pip._internal.exceptions import CommandError, InstallationError -from pip._internal.locations import ( - running_under_virtualenv, site_packages, user_site, virtualenv_no_global, - write_delete_marker_file, -) -from pip._internal.utils.compat import ( - WINDOWS, console_to_str, expanduser, stdlib_pkgs, -) -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if PY2: - from io import BytesIO as StringIO -else: - from io import StringIO - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text, - AnyStr, Container - ) - from pip._vendor.pkg_resources import Distribution # noqa: F401 - from pip._internal.models.link import Link # noqa: F401 - from pip._internal.utils.ui import SpinnerInterface # noqa: F401 - - -__all__ = ['rmtree', 'display_path', 'backup_dir', - 'ask', 'splitext', - 'format_size', 'is_installable_dir', - 'is_svn_page', 'file_contents', - 'split_leading_dir', 'has_leading_dir', - 'normalize_path', - 'renames', 'get_prog', - 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess', - 'captured_stdout', 'ensure_dir', - 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION', - 'get_installed_version', 'remove_auth_from_url'] - - -logger = std_logging.getLogger(__name__) - -WHEEL_EXTENSION = '.whl' -BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') -XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma') -ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION) -TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') -ARCHIVE_EXTENSIONS = ( - ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS) -SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS - -try: - import bz2 # noqa - SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS -except ImportError: - logger.debug('bz2 module is not available') - -try: - # Only for Python 3.3+ - import lzma # noqa - SUPPORTED_EXTENSIONS += XZ_EXTENSIONS -except ImportError: - logger.debug('lzma module is not available') - - -def ensure_dir(path): - # type: (AnyStr) -> None - """os.path.makedirs without EEXIST.""" - try: - os.makedirs(path) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - -def get_prog(): - # type: () -> str - try: - prog = os.path.basename(sys.argv[0]) - if prog in ('__main__.py', '-c'): - return "%s -m pip" % sys.executable - else: - return prog - except (AttributeError, TypeError, IndexError): - pass - return 'pip' - - -# Retry every half second for up to 3 seconds -@retry(stop_max_delay=3000, wait_fixed=500) -def rmtree(dir, ignore_errors=False): - # type: (str, bool) -> None - shutil.rmtree(dir, ignore_errors=ignore_errors, - onerror=rmtree_errorhandler) - - -def rmtree_errorhandler(func, path, exc_info): - """On Windows, the files in .svn are read-only, so when rmtree() tries to - remove them, an exception is thrown. We catch that here, remove the - read-only attribute, and hopefully continue without problems.""" - # if file type currently read only - if os.stat(path).st_mode & stat.S_IREAD: - # convert to read/write - os.chmod(path, stat.S_IWRITE) - # use the original function to repeat the operation - func(path) - return - else: - raise - - -def display_path(path): - # type: (Union[str, Text]) -> str - """Gives the display value for a given path, making it relative to cwd - if possible.""" - path = os.path.normcase(os.path.abspath(path)) - if sys.version_info[0] == 2: - path = path.decode(sys.getfilesystemencoding(), 'replace') - path = path.encode(sys.getdefaultencoding(), 'replace') - if path.startswith(os.getcwd() + os.path.sep): - path = '.' + path[len(os.getcwd()):] - return path - - -def backup_dir(dir, ext='.bak'): - # type: (str, str) -> str - """Figure out the name of a directory to back up the given dir to - (adding .bak, .bak2, etc)""" - n = 1 - extension = ext - while os.path.exists(dir + extension): - n += 1 - extension = ext + str(n) - return dir + extension - - -def ask_path_exists(message, options): - # type: (str, Iterable[str]) -> str - for action in os.environ.get('PIP_EXISTS_ACTION', '').split(): - if action in options: - return action - return ask(message, options) - - -def ask(message, options): - # type: (str, Iterable[str]) -> str - """Ask the message interactively, with the given possible responses""" - while 1: - if os.environ.get('PIP_NO_INPUT'): - raise Exception( - 'No input was expected ($PIP_NO_INPUT set); question: %s' % - message - ) - response = input(message) - response = response.strip().lower() - if response not in options: - print( - 'Your response (%r) was not one of the expected responses: ' - '%s' % (response, ', '.join(options)) - ) - else: - return response - - -def format_size(bytes): - # type: (float) -> str - if bytes > 1000 * 1000: - return '%.1fMB' % (bytes / 1000.0 / 1000) - elif bytes > 10 * 1000: - return '%ikB' % (bytes / 1000) - elif bytes > 1000: - return '%.1fkB' % (bytes / 1000.0) - else: - return '%ibytes' % bytes - - -def is_installable_dir(path): - # type: (str) -> bool - """Is path is a directory containing setup.py or pyproject.toml? - """ - if not os.path.isdir(path): - return False - setup_py = os.path.join(path, 'setup.py') - if os.path.isfile(setup_py): - return True - pyproject_toml = os.path.join(path, 'pyproject.toml') - if os.path.isfile(pyproject_toml): - return True - return False - - -def is_svn_page(html): - # type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]] - """ - Returns true if the page appears to be the index page of an svn repository - """ - return (re.search(r'[^<]*Revision \d+:', html) and - re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) - - -def file_contents(filename): - # type: (str) -> Text - with open(filename, 'rb') as fp: - return fp.read().decode('utf-8') - - -def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): - """Yield pieces of data from a file-like object until EOF.""" - while True: - chunk = file.read(size) - if not chunk: - break - yield chunk - - -def split_leading_dir(path): - # type: (Union[str, Text]) -> List[Union[str, Text]] - path = path.lstrip('/').lstrip('\\') - if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or - '\\' not in path): - return path.split('/', 1) - elif '\\' in path: - return path.split('\\', 1) - else: - return [path, ''] - - -def has_leading_dir(paths): - # type: (Iterable[Union[str, Text]]) -> bool - """Returns true if all the paths have the same leading path name - (i.e., everything is in one subdirectory in an archive)""" - common_prefix = None - for path in paths: - prefix, rest = split_leading_dir(path) - if not prefix: - return False - elif common_prefix is None: - common_prefix = prefix - elif prefix != common_prefix: - return False - return True - - -def normalize_path(path, resolve_symlinks=True): - # type: (str, bool) -> str - """ - Convert a path to its canonical, case-normalized, absolute version. - - """ - path = expanduser(path) - if resolve_symlinks: - path = os.path.realpath(path) - else: - path = os.path.abspath(path) - return os.path.normcase(path) - - -def splitext(path): - # type: (str) -> Tuple[str, str] - """Like os.path.splitext, but take off .tar too""" - base, ext = posixpath.splitext(path) - if base.lower().endswith('.tar'): - ext = base[-4:] + ext - base = base[:-4] - return base, ext - - -def renames(old, new): - # type: (str, str) -> None - """Like os.renames(), but handles renaming across devices.""" - # Implementation borrowed from os.renames(). - head, tail = os.path.split(new) - if head and tail and not os.path.exists(head): - os.makedirs(head) - - shutil.move(old, new) - - head, tail = os.path.split(old) - if head and tail: - try: - os.removedirs(head) - except OSError: - pass - - -def is_local(path): - # type: (str) -> bool - """ - Return True if path is within sys.prefix, if we're running in a virtualenv. - - If we're not in a virtualenv, all paths are considered "local." - - """ - if not running_under_virtualenv(): - return True - return normalize_path(path).startswith(normalize_path(sys.prefix)) - - -def dist_is_local(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution object is installed locally - (i.e. within current virtualenv). - - Always True if we're not in a virtualenv. - - """ - return is_local(dist_location(dist)) - - -def dist_in_usersite(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is installed in user site. - """ - norm_path = normalize_path(dist_location(dist)) - return norm_path.startswith(normalize_path(user_site)) - - -def dist_in_site_packages(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is installed in - sysconfig.get_python_lib(). - """ - return normalize_path( - dist_location(dist) - ).startswith(normalize_path(site_packages)) - - -def dist_is_editable(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is an editable install. - """ - for path_item in sys.path: - egg_link = os.path.join(path_item, dist.project_name + '.egg-link') - if os.path.isfile(egg_link): - return True - return False - - -def get_installed_distributions(local_only=True, - skip=stdlib_pkgs, - include_editables=True, - editables_only=False, - user_only=False): - # type: (bool, Container[str], bool, bool, bool) -> List[Distribution] - """ - Return a list of installed Distribution objects. - - If ``local_only`` is True (default), only return installations - local to the current virtualenv, if in a virtualenv. - - ``skip`` argument is an iterable of lower-case project names to - ignore; defaults to stdlib_pkgs - - If ``include_editables`` is False, don't report editables. - - If ``editables_only`` is True , only report editables. - - If ``user_only`` is True , only report installations in the user - site directory. - - """ - if local_only: - local_test = dist_is_local - else: - def local_test(d): - return True - - if include_editables: - def editable_test(d): - return True - else: - def editable_test(d): - return not dist_is_editable(d) - - if editables_only: - def editables_only_test(d): - return dist_is_editable(d) - else: - def editables_only_test(d): - return True - - if user_only: - user_test = dist_in_usersite - else: - def user_test(d): - return True - - # because of pkg_resources vendoring, mypy cannot find stub in typeshed - return [d for d in pkg_resources.working_set # type: ignore - if local_test(d) and - d.key not in skip and - editable_test(d) and - editables_only_test(d) and - user_test(d) - ] - - -def egg_link_path(dist): - # type: (Distribution) -> Optional[str] - """ - Return the path for the .egg-link file if it exists, otherwise, None. - - There's 3 scenarios: - 1) not in a virtualenv - try to find in site.USER_SITE, then site_packages - 2) in a no-global virtualenv - try to find in site_packages - 3) in a yes-global virtualenv - try to find in site_packages, then site.USER_SITE - (don't look in global location) - - For #1 and #3, there could be odd cases, where there's an egg-link in 2 - locations. - - This method will just return the first one found. - """ - sites = [] - if running_under_virtualenv(): - if virtualenv_no_global(): - sites.append(site_packages) - else: - sites.append(site_packages) - if user_site: - sites.append(user_site) - else: - if user_site: - sites.append(user_site) - sites.append(site_packages) - - for site in sites: - egglink = os.path.join(site, dist.project_name) + '.egg-link' - if os.path.isfile(egglink): - return egglink - return None - - -def dist_location(dist): - # type: (Distribution) -> str - """ - Get the site-packages location of this distribution. Generally - this is dist.location, except in the case of develop-installed - packages, where dist.location is the source code location, and we - want to know where the egg-link file is. - - """ - egg_link = egg_link_path(dist) - if egg_link: - return egg_link - return dist.location - - -def current_umask(): - """Get the current umask which involves having to set it temporarily.""" - mask = os.umask(0) - os.umask(mask) - return mask - - -def unzip_file(filename, location, flatten=True): - # type: (str, str, bool) -> None - """ - Unzip the file (with path `filename`) to the destination `location`. All - files are written based on system defaults and umask (i.e. permissions are - not preserved), except that regular file members with any execute - permissions (user, group, or world) have "chmod +x" applied after being - written. Note that for windows, any execute changes using os.chmod are - no-ops per the python docs. - """ - ensure_dir(location) - zipfp = open(filename, 'rb') - try: - zip = zipfile.ZipFile(zipfp, allowZip64=True) - leading = has_leading_dir(zip.namelist()) and flatten - for info in zip.infolist(): - name = info.filename - fn = name - if leading: - fn = split_leading_dir(name)[1] - fn = os.path.join(location, fn) - dir = os.path.dirname(fn) - if fn.endswith('/') or fn.endswith('\\'): - # A directory - ensure_dir(fn) - else: - ensure_dir(dir) - # Don't use read() to avoid allocating an arbitrarily large - # chunk of memory for the file's content - fp = zip.open(name) - try: - with open(fn, 'wb') as destfp: - shutil.copyfileobj(fp, destfp) - finally: - fp.close() - mode = info.external_attr >> 16 - # if mode and regular file and any execute permissions for - # user/group/world? - if mode and stat.S_ISREG(mode) and mode & 0o111: - # make dest file have execute for user/group/world - # (chmod +x) no-op on windows per python docs - os.chmod(fn, (0o777 - current_umask() | 0o111)) - finally: - zipfp.close() - - -def untar_file(filename, location): - # type: (str, str) -> None - """ - Untar the file (with path `filename`) to the destination `location`. - All files are written based on system defaults and umask (i.e. permissions - are not preserved), except that regular file members with any execute - permissions (user, group, or world) have "chmod +x" applied after being - written. Note that for windows, any execute changes using os.chmod are - no-ops per the python docs. - """ - ensure_dir(location) - if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): - mode = 'r:gz' - elif filename.lower().endswith(BZ2_EXTENSIONS): - mode = 'r:bz2' - elif filename.lower().endswith(XZ_EXTENSIONS): - mode = 'r:xz' - elif filename.lower().endswith('.tar'): - mode = 'r' - else: - logger.warning( - 'Cannot determine compression type for file %s', filename, - ) - mode = 'r:*' - tar = tarfile.open(filename, mode) - try: - leading = has_leading_dir([ - member.name for member in tar.getmembers() - ]) - for member in tar.getmembers(): - fn = member.name - if leading: - # https://github.com/python/mypy/issues/1174 - fn = split_leading_dir(fn)[1] # type: ignore - path = os.path.join(location, fn) - if member.isdir(): - ensure_dir(path) - elif member.issym(): - try: - # https://github.com/python/typeshed/issues/2673 - tar._extract_member(member, path) # type: ignore - except Exception as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - 'In the tar file %s the member %s is invalid: %s', - filename, member.name, exc, - ) - continue - else: - try: - fp = tar.extractfile(member) - except (KeyError, AttributeError) as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - 'In the tar file %s the member %s is invalid: %s', - filename, member.name, exc, - ) - continue - ensure_dir(os.path.dirname(path)) - with open(path, 'wb') as destfp: - shutil.copyfileobj(fp, destfp) - fp.close() - # Update the timestamp (useful for cython compiled files) - # https://github.com/python/typeshed/issues/2673 - tar.utime(member, path) # type: ignore - # member have any execute permissions for user/group/world? - if member.mode & 0o111: - # make dest file have execute for user/group/world - # no-op on windows per python docs - os.chmod(path, (0o777 - current_umask() | 0o111)) - finally: - tar.close() - - -def unpack_file( - filename, # type: str - location, # type: str - content_type, # type: Optional[str] - link # type: Optional[Link] -): - # type: (...) -> None - filename = os.path.realpath(filename) - if (content_type == 'application/zip' or - filename.lower().endswith(ZIP_EXTENSIONS) or - zipfile.is_zipfile(filename)): - unzip_file( - filename, - location, - flatten=not filename.endswith('.whl') - ) - elif (content_type == 'application/x-gzip' or - tarfile.is_tarfile(filename) or - filename.lower().endswith( - TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)): - untar_file(filename, location) - elif (content_type and content_type.startswith('text/html') and - is_svn_page(file_contents(filename))): - # We don't really care about this - from pip._internal.vcs.subversion import Subversion - Subversion('svn+' + link.url).unpack(location) - else: - # FIXME: handle? - # FIXME: magic signatures? - logger.critical( - 'Cannot unpack file %s (downloaded from %s, content-type: %s); ' - 'cannot detect archive format', - filename, location, content_type, - ) - raise InstallationError( - 'Cannot determine archive format of %s' % location - ) - - -def call_subprocess( - cmd, # type: List[str] - show_stdout=True, # type: bool - cwd=None, # type: Optional[str] - on_returncode='raise', # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - unset_environ=None, # type: Optional[Iterable[str]] - spinner=None # type: Optional[SpinnerInterface] -): - # type: (...) -> Optional[Text] - """ - Args: - extra_ok_returncodes: an iterable of integer return codes that are - acceptable, in addition to 0. Defaults to None, which means []. - unset_environ: an iterable of environment variable names to unset - prior to calling subprocess.Popen(). - """ - if extra_ok_returncodes is None: - extra_ok_returncodes = [] - if unset_environ is None: - unset_environ = [] - # This function's handling of subprocess output is confusing and I - # previously broke it terribly, so as penance I will write a long comment - # explaining things. - # - # The obvious thing that affects output is the show_stdout= - # kwarg. show_stdout=True means, let the subprocess write directly to our - # stdout. Even though it is nominally the default, it is almost never used - # inside pip (and should not be used in new code without a very good - # reason); as of 2016-02-22 it is only used in a few places inside the VCS - # wrapper code. Ideally we should get rid of it entirely, because it - # creates a lot of complexity here for a rarely used feature. - # - # Most places in pip set show_stdout=False. What this means is: - # - We connect the child stdout to a pipe, which we read. - # - By default, we hide the output but show a spinner -- unless the - # subprocess exits with an error, in which case we show the output. - # - If the --verbose option was passed (= loglevel is DEBUG), then we show - # the output unconditionally. (But in this case we don't want to show - # the output a second time if it turns out that there was an error.) - # - # stderr is always merged with stdout (even if show_stdout=True). - if show_stdout: - stdout = None - else: - stdout = subprocess.PIPE - if command_desc is None: - cmd_parts = [] - for part in cmd: - if ' ' in part or '\n' in part or '"' in part or "'" in part: - part = '"%s"' % part.replace('"', '\\"') - cmd_parts.append(part) - command_desc = ' '.join(cmd_parts) - logger.debug("Running command %s", command_desc) - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - for name in unset_environ: - env.pop(name, None) - try: - proc = subprocess.Popen( - cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, - stdout=stdout, cwd=cwd, env=env, - ) - proc.stdin.close() - except Exception as exc: - logger.critical( - "Error %s while executing command %s", exc, command_desc, - ) - raise - all_output = [] - if stdout is not None: - while True: - line = console_to_str(proc.stdout.readline()) - if not line: - break - line = line.rstrip() - all_output.append(line + '\n') - if logger.getEffectiveLevel() <= std_logging.DEBUG: - # Show the line immediately - logger.debug(line) - else: - # Update the spinner - if spinner is not None: - spinner.spin() - try: - proc.wait() - finally: - if proc.stdout: - proc.stdout.close() - if spinner is not None: - if proc.returncode: - spinner.finish("error") - else: - spinner.finish("done") - if proc.returncode and proc.returncode not in extra_ok_returncodes: - if on_returncode == 'raise': - if (logger.getEffectiveLevel() > std_logging.DEBUG and - not show_stdout): - logger.info( - 'Complete output from command %s:', command_desc, - ) - logger.info( - ''.join(all_output) + - '\n----------------------------------------' - ) - raise InstallationError( - 'Command "%s" failed with error code %s in %s' - % (command_desc, proc.returncode, cwd)) - elif on_returncode == 'warn': - logger.warning( - 'Command "%s" had error code %s in %s', - command_desc, proc.returncode, cwd, - ) - elif on_returncode == 'ignore': - pass - else: - raise ValueError('Invalid value: on_returncode=%s' % - repr(on_returncode)) - if not show_stdout: - return ''.join(all_output) - return None - - -def read_text_file(filename): - # type: (str) -> str - """Return the contents of *filename*. - - Try to decode the file contents with utf-8, the preferred system encoding - (e.g., cp1252 on some Windows machines), and latin1, in that order. - Decoding a byte string with latin1 will never raise an error. In the worst - case, the returned string will contain some garbage characters. - - """ - with open(filename, 'rb') as fp: - data = fp.read() - - encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1'] - for enc in encodings: - try: - # https://github.com/python/mypy/issues/1174 - data = data.decode(enc) # type: ignore - except UnicodeDecodeError: - continue - break - - assert not isinstance(data, bytes) # Latin1 should have worked. - return data - - -def _make_build_dir(build_dir): - os.makedirs(build_dir) - write_delete_marker_file(build_dir) - - -class FakeFile(object): - """Wrap a list of lines in an object with readline() to make - ConfigParser happy.""" - def __init__(self, lines): - self._gen = (l for l in lines) - - def readline(self): - try: - try: - return next(self._gen) - except NameError: - return self._gen.next() - except StopIteration: - return '' - - def __iter__(self): - return self._gen - - -class StreamWrapper(StringIO): - - @classmethod - def from_stream(cls, orig_stream): - cls.orig_stream = orig_stream - return cls() - - # compileall.compile_dir() needs stdout.encoding to print to stdout - @property - def encoding(self): - return self.orig_stream.encoding - - -@contextlib.contextmanager -def captured_output(stream_name): - """Return a context manager used by captured_stdout/stdin/stderr - that temporarily replaces the sys stream *stream_name* with a StringIO. - - Taken from Lib/support/__init__.py in the CPython repo. - """ - orig_stdout = getattr(sys, stream_name) - setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) - try: - yield getattr(sys, stream_name) - finally: - setattr(sys, stream_name, orig_stdout) - - -def captured_stdout(): - """Capture the output of sys.stdout: - - with captured_stdout() as stdout: - print('hello') - self.assertEqual(stdout.getvalue(), 'hello\n') - - Taken from Lib/support/__init__.py in the CPython repo. - """ - return captured_output('stdout') - - -def captured_stderr(): - """ - See captured_stdout(). - """ - return captured_output('stderr') - - -class cached_property(object): - """A property that is only computed once per instance and then replaces - itself with an ordinary attribute. Deleting the attribute resets the - property. - - Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175 - """ - - def __init__(self, func): - self.__doc__ = getattr(func, '__doc__') - self.func = func - - def __get__(self, obj, cls): - if obj is None: - # We're being accessed from the class itself, not from an object - return self - value = obj.__dict__[self.func.__name__] = self.func(obj) - return value - - -def get_installed_version(dist_name, working_set=None): - """Get the installed version of dist_name avoiding pkg_resources cache""" - # Create a requirement that we'll look for inside of setuptools. - req = pkg_resources.Requirement.parse(dist_name) - - if working_set is None: - # We want to avoid having this cached, so we need to construct a new - # working set each time. - working_set = pkg_resources.WorkingSet() - - # Get the installed distribution from our working set - dist = working_set.find(req) - - # Check to see if we got an installed distribution or not, if we did - # we want to return it's version. - return dist.version if dist else None - - -def consume(iterator): - """Consume an iterable at C speed.""" - deque(iterator, maxlen=0) - - -# Simulates an enum -def enum(*sequential, **named): - enums = dict(zip(sequential, range(len(sequential))), **named) - reverse = {value: key for key, value in enums.items()} - enums['reverse_mapping'] = reverse - return type('Enum', (), enums) - - -def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): - """ - Return the URL for a VCS requirement. - - Args: - repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). - project_name: the (unescaped) project name. - """ - egg_project_name = pkg_resources.to_filename(project_name) - req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name) - if subdir: - req += '&subdirectory={}'.format(subdir) - - return req - - -def split_auth_from_netloc(netloc): - """ - Parse out and remove the auth information from a netloc. - - Returns: (netloc, (username, password)). - """ - if '@' not in netloc: - return netloc, (None, None) - - # Split from the right because that's how urllib.parse.urlsplit() - # behaves if more than one @ is present (which can be checked using - # the password attribute of urlsplit()'s return value). - auth, netloc = netloc.rsplit('@', 1) - if ':' in auth: - # Split from the left because that's how urllib.parse.urlsplit() - # behaves if more than one : is present (which again can be checked - # using the password attribute of the return value) - user_pass = auth.split(':', 1) - else: - user_pass = auth, None - - user_pass = tuple( - None if x is None else urllib_unquote(x) for x in user_pass - ) - - return netloc, user_pass - - -def redact_netloc(netloc): - # type: (str) -> str - """ - Replace the password in a netloc with "****", if it exists. - - For example, "user:pass@example.com" returns "user:****@example.com". - """ - netloc, (user, password) = split_auth_from_netloc(netloc) - if user is None: - return netloc - password = '' if password is None else ':****' - return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user), - password=password, - netloc=netloc) - - -def _transform_url(url, transform_netloc): - purl = urllib_parse.urlsplit(url) - netloc = transform_netloc(purl.netloc) - # stripped url - url_pieces = ( - purl.scheme, netloc, purl.path, purl.query, purl.fragment - ) - surl = urllib_parse.urlunsplit(url_pieces) - return surl - - -def _get_netloc(netloc): - return split_auth_from_netloc(netloc)[0] - - -def remove_auth_from_url(url): - # type: (str) -> str - # Return a copy of url with 'username:password@' removed. - # username/pass params are passed to subversion through flags - # and are not recognized in the url. - return _transform_url(url, _get_netloc) - - -def redact_password_from_url(url): - # type: (str) -> str - """Replace the password in a given url with ****.""" - return _transform_url(url, redact_netloc) - - -def protect_pip_from_modification_on_windows(modifying_pip): - """Protection of pip.exe from modification on Windows - - On Windows, any operation modifying pip should be run as: - python -m pip ... - """ - pip_names = [ - "pip.exe", - "pip{}.exe".format(sys.version_info[0]), - "pip{}.{}.exe".format(*sys.version_info[:2]) - ] - - # See https://github.com/pypa/pip/issues/1299 for more discussion - should_show_use_python_msg = ( - modifying_pip and - WINDOWS and - os.path.basename(sys.argv[0]) in pip_names - ) - - if should_show_use_python_msg: - new_command = [ - sys.executable, "-m", "pip" - ] + sys.argv[1:] - raise CommandError( - 'To modify pip, please run the following command:\n{}' - .format(" ".join(new_command)) - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/models.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/models.py deleted file mode 100644 index d5cb80a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/models.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Utilities for defining models -""" - -import operator - - -class KeyBasedCompareMixin(object): - """Provides comparision capabilities that is based on a key - """ - - def __init__(self, key, defining_class): - self._compare_key = key - self._defining_class = defining_class - - def __hash__(self): - return hash(self._compare_key) - - def __lt__(self, other): - return self._compare(other, operator.__lt__) - - def __le__(self, other): - return self._compare(other, operator.__le__) - - def __gt__(self, other): - return self._compare(other, operator.__gt__) - - def __ge__(self, other): - return self._compare(other, operator.__ge__) - - def __eq__(self, other): - return self._compare(other, operator.__eq__) - - def __ne__(self, other): - return self._compare(other, operator.__ne__) - - def _compare(self, other, method): - if not isinstance(other, self._defining_class): - return NotImplemented - - return method(self._compare_key, other._compare_key) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/outdated.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/outdated.py deleted file mode 100644 index 37c47a4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/outdated.py +++ /dev/null @@ -1,164 +0,0 @@ -from __future__ import absolute_import - -import datetime -import json -import logging -import os.path -import sys - -from pip._vendor import lockfile, pkg_resources -from pip._vendor.packaging import version as packaging_version - -from pip._internal.index import PackageFinder -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.filesystem import check_path_owner -from pip._internal.utils.misc import ensure_dir, get_installed_version -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - import optparse # noqa: F401 - from typing import Any, Dict # noqa: F401 - from pip._internal.download import PipSession # noqa: F401 - - -SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" - - -logger = logging.getLogger(__name__) - - -class SelfCheckState(object): - def __init__(self, cache_dir): - # type: (str) -> None - self.state = {} # type: Dict[str, Any] - self.statefile_path = None - - # Try to load the existing state - if cache_dir: - self.statefile_path = os.path.join(cache_dir, "selfcheck.json") - try: - with open(self.statefile_path) as statefile: - self.state = json.load(statefile)[sys.prefix] - except (IOError, ValueError, KeyError): - # Explicitly suppressing exceptions, since we don't want to - # error out if the cache file is invalid. - pass - - def save(self, pypi_version, current_time): - # type: (str, datetime.datetime) -> None - # If we do not have a path to cache in, don't bother saving. - if not self.statefile_path: - return - - # Check to make sure that we own the directory - if not check_path_owner(os.path.dirname(self.statefile_path)): - return - - # Now that we've ensured the directory is owned by this user, we'll go - # ahead and make sure that all our directories are created. - ensure_dir(os.path.dirname(self.statefile_path)) - - # Attempt to write out our version check file - with lockfile.LockFile(self.statefile_path): - if os.path.exists(self.statefile_path): - with open(self.statefile_path) as statefile: - state = json.load(statefile) - else: - state = {} - - state[sys.prefix] = { - "last_check": current_time.strftime(SELFCHECK_DATE_FMT), - "pypi_version": pypi_version, - } - - with open(self.statefile_path, "w") as statefile: - json.dump(state, statefile, sort_keys=True, - separators=(",", ":")) - - -def was_installed_by_pip(pkg): - # type: (str) -> bool - """Checks whether pkg was installed by pip - - This is used not to display the upgrade message when pip is in fact - installed by system package manager, such as dnf on Fedora. - """ - try: - dist = pkg_resources.get_distribution(pkg) - return (dist.has_metadata('INSTALLER') and - 'pip' in dist.get_metadata_lines('INSTALLER')) - except pkg_resources.DistributionNotFound: - return False - - -def pip_version_check(session, options): - # type: (PipSession, optparse.Values) -> None - """Check for an update for pip. - - Limit the frequency of checks to once per week. State is stored either in - the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix - of the pip script path. - """ - installed_version = get_installed_version("pip") - if not installed_version: - return - - pip_version = packaging_version.parse(installed_version) - pypi_version = None - - try: - state = SelfCheckState(cache_dir=options.cache_dir) - - current_time = datetime.datetime.utcnow() - # Determine if we need to refresh the state - if "last_check" in state.state and "pypi_version" in state.state: - last_check = datetime.datetime.strptime( - state.state["last_check"], - SELFCHECK_DATE_FMT - ) - if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60: - pypi_version = state.state["pypi_version"] - - # Refresh the version if we need to or just see if we need to warn - if pypi_version is None: - # Lets use PackageFinder to see what the latest pip version is - finder = PackageFinder( - find_links=options.find_links, - index_urls=[options.index_url] + options.extra_index_urls, - allow_all_prereleases=False, # Explicitly set to False - trusted_hosts=options.trusted_hosts, - session=session, - ) - all_candidates = finder.find_all_candidates("pip") - if not all_candidates: - return - pypi_version = str( - max(all_candidates, key=lambda c: c.version).version - ) - - # save that we've performed a check - state.save(pypi_version, current_time) - - remote_version = packaging_version.parse(pypi_version) - - # Determine if our pypi_version is older - if (pip_version < remote_version and - pip_version.base_version != remote_version.base_version and - was_installed_by_pip('pip')): - # Advise "python -m pip" on Windows to avoid issues - # with overwriting pip.exe. - if WINDOWS: - pip_cmd = "python -m pip" - else: - pip_cmd = "pip" - logger.warning( - "You are using pip version %s, however version %s is " - "available.\nYou should consider upgrading via the " - "'%s install --upgrade pip' command.", - pip_version, pypi_version, pip_cmd - ) - except Exception: - logger.debug( - "There was an error checking the latest version of pip", - exc_info=True, - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/packaging.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/packaging.py deleted file mode 100644 index 7aaf7b5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/packaging.py +++ /dev/null @@ -1,85 +0,0 @@ -from __future__ import absolute_import - -import logging -import sys -from email.parser import FeedParser - -from pip._vendor import pkg_resources -from pip._vendor.packaging import specifiers, version - -from pip._internal import exceptions -from pip._internal.utils.misc import display_path -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Optional # noqa: F401 - from email.message import Message # noqa: F401 - from pip._vendor.pkg_resources import Distribution # noqa: F401 - - -logger = logging.getLogger(__name__) - - -def check_requires_python(requires_python): - # type: (Optional[str]) -> bool - """ - Check if the python version in use match the `requires_python` specifier. - - Returns `True` if the version of python in use matches the requirement. - Returns `False` if the version of python in use does not matches the - requirement. - - Raises an InvalidSpecifier if `requires_python` have an invalid format. - """ - if requires_python is None: - # The package provides no information - return True - requires_python_specifier = specifiers.SpecifierSet(requires_python) - - # We only use major.minor.micro - python_version = version.parse('.'.join(map(str, sys.version_info[:3]))) - return python_version in requires_python_specifier - - -def get_metadata(dist): - # type: (Distribution) -> Message - if (isinstance(dist, pkg_resources.DistInfoDistribution) and - dist.has_metadata('METADATA')): - metadata = dist.get_metadata('METADATA') - elif dist.has_metadata('PKG-INFO'): - metadata = dist.get_metadata('PKG-INFO') - else: - logger.warning("No metadata found in %s", display_path(dist.location)) - metadata = '' - - feed_parser = FeedParser() - feed_parser.feed(metadata) - return feed_parser.close() - - -def check_dist_requires_python(dist): - pkg_info_dict = get_metadata(dist) - requires_python = pkg_info_dict.get('Requires-Python') - try: - if not check_requires_python(requires_python): - raise exceptions.UnsupportedPythonVersion( - "%s requires Python '%s' but the running Python is %s" % ( - dist.project_name, - requires_python, - '.'.join(map(str, sys.version_info[:3])),) - ) - except specifiers.InvalidSpecifier as e: - logger.warning( - "Package %s has an invalid Requires-Python entry %s - %s", - dist.project_name, requires_python, e, - ) - return - - -def get_installer(dist): - # type: (Distribution) -> str - if dist.has_metadata('INSTALLER'): - for line in dist.get_metadata_lines('INSTALLER'): - if line.strip(): - return line.strip() - return '' diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/setuptools_build.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/setuptools_build.py deleted file mode 100644 index 03973e9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/setuptools_build.py +++ /dev/null @@ -1,8 +0,0 @@ -# Shim to wrap setup.py invocation with setuptools -SETUPTOOLS_SHIM = ( - "import setuptools, tokenize;__file__=%r;" - "f=getattr(tokenize, 'open', open)(__file__);" - "code=f.read().replace('\\r\\n', '\\n');" - "f.close();" - "exec(compile(code, __file__, 'exec'))" -) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/temp_dir.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/temp_dir.py deleted file mode 100644 index 2c81ad5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/temp_dir.py +++ /dev/null @@ -1,155 +0,0 @@ -from __future__ import absolute_import - -import errno -import itertools -import logging -import os.path -import tempfile - -from pip._internal.utils.misc import rmtree - -logger = logging.getLogger(__name__) - - -class TempDirectory(object): - """Helper class that owns and cleans up a temporary directory. - - This class can be used as a context manager or as an OO representation of a - temporary directory. - - Attributes: - path - Location to the created temporary directory or None - delete - Whether the directory should be deleted when exiting - (when used as a contextmanager) - - Methods: - create() - Creates a temporary directory and stores its path in the path - attribute. - cleanup() - Deletes the temporary directory and sets path attribute to None - - When used as a context manager, a temporary directory is created on - entering the context and, if the delete attribute is True, on exiting the - context the created directory is deleted. - """ - - def __init__(self, path=None, delete=None, kind="temp"): - super(TempDirectory, self).__init__() - - if path is None and delete is None: - # If we were not given an explicit directory, and we were not given - # an explicit delete option, then we'll default to deleting. - delete = True - - self.path = path - self.delete = delete - self.kind = kind - - def __repr__(self): - return "<{} {!r}>".format(self.__class__.__name__, self.path) - - def __enter__(self): - self.create() - return self - - def __exit__(self, exc, value, tb): - if self.delete: - self.cleanup() - - def create(self): - """Create a temporary directory and store its path in self.path - """ - if self.path is not None: - logger.debug( - "Skipped creation of temporary directory: {}".format(self.path) - ) - return - # We realpath here because some systems have their default tmpdir - # symlinked to another directory. This tends to confuse build - # scripts, so we canonicalize the path by traversing potential - # symlinks here. - self.path = os.path.realpath( - tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) - ) - logger.debug("Created temporary directory: {}".format(self.path)) - - def cleanup(self): - """Remove the temporary directory created and reset state - """ - if self.path is not None and os.path.exists(self.path): - rmtree(self.path) - self.path = None - - -class AdjacentTempDirectory(TempDirectory): - """Helper class that creates a temporary directory adjacent to a real one. - - Attributes: - original - The original directory to create a temp directory for. - path - After calling create() or entering, contains the full - path to the temporary directory. - delete - Whether the directory should be deleted when exiting - (when used as a contextmanager) - - """ - # The characters that may be used to name the temp directory - # We always prepend a ~ and then rotate through these until - # a usable name is found. - # pkg_resources raises a different error for .dist-info folder - # with leading '-' and invalid metadata - LEADING_CHARS = "-~.=%0123456789" - - def __init__(self, original, delete=None): - super(AdjacentTempDirectory, self).__init__(delete=delete) - self.original = original.rstrip('/\\') - - @classmethod - def _generate_names(cls, name): - """Generates a series of temporary names. - - The algorithm replaces the leading characters in the name - with ones that are valid filesystem characters, but are not - valid package names (for both Python and pip definitions of - package). - """ - for i in range(1, len(name)): - for candidate in itertools.combinations_with_replacement( - cls.LEADING_CHARS, i - 1): - new_name = '~' + ''.join(candidate) + name[i:] - if new_name != name: - yield new_name - - # If we make it this far, we will have to make a longer name - for i in range(len(cls.LEADING_CHARS)): - for candidate in itertools.combinations_with_replacement( - cls.LEADING_CHARS, i): - new_name = '~' + ''.join(candidate) + name - if new_name != name: - yield new_name - - def create(self): - root, name = os.path.split(self.original) - for candidate in self._generate_names(name): - path = os.path.join(root, candidate) - try: - os.mkdir(path) - except OSError as ex: - # Continue if the name exists already - if ex.errno != errno.EEXIST: - raise - else: - self.path = os.path.realpath(path) - break - - if not self.path: - # Final fallback on the default behavior. - self.path = os.path.realpath( - tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) - ) - logger.debug("Created temporary directory: {}".format(self.path)) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/typing.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/typing.py deleted file mode 100644 index e085cdf..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/typing.py +++ /dev/null @@ -1,29 +0,0 @@ -"""For neatly implementing static typing in pip. - -`mypy` - the static type analysis tool we use - uses the `typing` module, which -provides core functionality fundamental to mypy's functioning. - -Generally, `typing` would be imported at runtime and used in that fashion - -it acts as a no-op at runtime and does not have any run-time overhead by -design. - -As it turns out, `typing` is not vendorable - it uses separate sources for -Python 2/Python 3. Thus, this codebase can not expect it to be present. -To work around this, mypy allows the typing import to be behind a False-y -optional to prevent it from running at runtime and type-comments can be used -to remove the need for the types to be accessible directly during runtime. - -This module provides the False-y guard in a nicely named fashion so that a -curious maintainer can reach here to read this. - -In pip, all static-typing related imports should be guarded as follows: - - from pip._internal.utils.typing import MYPY_CHECK_RUNNING - - if MYPY_CHECK_RUNNING: - from typing import ... # noqa: F401 - -Ref: https://github.com/python/mypy/issues/3216 -""" - -MYPY_CHECK_RUNNING = False diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/ui.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/ui.py deleted file mode 100644 index 433675d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/utils/ui.py +++ /dev/null @@ -1,441 +0,0 @@ -from __future__ import absolute_import, division - -import contextlib -import itertools -import logging -import sys -import time -from signal import SIGINT, default_int_handler, signal - -from pip._vendor import six -from pip._vendor.progress.bar import ( - Bar, ChargingBar, FillingCirclesBar, FillingSquaresBar, IncrementalBar, - ShadyBar, -) -from pip._vendor.progress.helpers import HIDE_CURSOR, SHOW_CURSOR, WritelnMixin -from pip._vendor.progress.spinner import Spinner - -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.logging import get_indentation -from pip._internal.utils.misc import format_size -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import Any, Iterator, IO # noqa: F401 - -try: - from pip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - -logger = logging.getLogger(__name__) - - -def _select_progress_class(preferred, fallback): - encoding = getattr(preferred.file, "encoding", None) - - # If we don't know what encoding this file is in, then we'll just assume - # that it doesn't support unicode and use the ASCII bar. - if not encoding: - return fallback - - # Collect all of the possible characters we want to use with the preferred - # bar. - characters = [ - getattr(preferred, "empty_fill", six.text_type()), - getattr(preferred, "fill", six.text_type()), - ] - characters += list(getattr(preferred, "phases", [])) - - # Try to decode the characters we're using for the bar using the encoding - # of the given file, if this works then we'll assume that we can use the - # fancier bar and if not we'll fall back to the plaintext bar. - try: - six.text_type().join(characters).encode(encoding) - except UnicodeEncodeError: - return fallback - else: - return preferred - - -_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any - - -class InterruptibleMixin(object): - """ - Helper to ensure that self.finish() gets called on keyboard interrupt. - - This allows downloads to be interrupted without leaving temporary state - (like hidden cursors) behind. - - This class is similar to the progress library's existing SigIntMixin - helper, but as of version 1.2, that helper has the following problems: - - 1. It calls sys.exit(). - 2. It discards the existing SIGINT handler completely. - 3. It leaves its own handler in place even after an uninterrupted finish, - which will have unexpected delayed effects if the user triggers an - unrelated keyboard interrupt some time after a progress-displaying - download has already completed, for example. - """ - - def __init__(self, *args, **kwargs): - """ - Save the original SIGINT handler for later. - """ - super(InterruptibleMixin, self).__init__(*args, **kwargs) - - self.original_handler = signal(SIGINT, self.handle_sigint) - - # If signal() returns None, the previous handler was not installed from - # Python, and we cannot restore it. This probably should not happen, - # but if it does, we must restore something sensible instead, at least. - # The least bad option should be Python's default SIGINT handler, which - # just raises KeyboardInterrupt. - if self.original_handler is None: - self.original_handler = default_int_handler - - def finish(self): - """ - Restore the original SIGINT handler after finishing. - - This should happen regardless of whether the progress display finishes - normally, or gets interrupted. - """ - super(InterruptibleMixin, self).finish() - signal(SIGINT, self.original_handler) - - def handle_sigint(self, signum, frame): - """ - Call self.finish() before delegating to the original SIGINT handler. - - This handler should only be in place while the progress display is - active. - """ - self.finish() - self.original_handler(signum, frame) - - -class SilentBar(Bar): - - def update(self): - pass - - -class BlueEmojiBar(IncrementalBar): - - suffix = "%(percent)d%%" - bar_prefix = " " - bar_suffix = " " - phases = (u"\U0001F539", u"\U0001F537", u"\U0001F535") # type: Any - - -class DownloadProgressMixin(object): - - def __init__(self, *args, **kwargs): - super(DownloadProgressMixin, self).__init__(*args, **kwargs) - self.message = (" " * (get_indentation() + 2)) + self.message - - @property - def downloaded(self): - return format_size(self.index) - - @property - def download_speed(self): - # Avoid zero division errors... - if self.avg == 0.0: - return "..." - return format_size(1 / self.avg) + "/s" - - @property - def pretty_eta(self): - if self.eta: - return "eta %s" % self.eta_td - return "" - - def iter(self, it, n=1): - for x in it: - yield x - self.next(n) - self.finish() - - -class WindowsMixin(object): - - def __init__(self, *args, **kwargs): - # The Windows terminal does not support the hide/show cursor ANSI codes - # even with colorama. So we'll ensure that hide_cursor is False on - # Windows. - # This call neds to go before the super() call, so that hide_cursor - # is set in time. The base progress bar class writes the "hide cursor" - # code to the terminal in its init, so if we don't set this soon - # enough, we get a "hide" with no corresponding "show"... - if WINDOWS and self.hide_cursor: - self.hide_cursor = False - - super(WindowsMixin, self).__init__(*args, **kwargs) - - # Check if we are running on Windows and we have the colorama module, - # if we do then wrap our file with it. - if WINDOWS and colorama: - self.file = colorama.AnsiToWin32(self.file) - # The progress code expects to be able to call self.file.isatty() - # but the colorama.AnsiToWin32() object doesn't have that, so we'll - # add it. - self.file.isatty = lambda: self.file.wrapped.isatty() - # The progress code expects to be able to call self.file.flush() - # but the colorama.AnsiToWin32() object doesn't have that, so we'll - # add it. - self.file.flush = lambda: self.file.wrapped.flush() - - -class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, - DownloadProgressMixin): - - file = sys.stdout - message = "%(percent)d%%" - suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" - -# NOTE: The "type: ignore" comments on the following classes are there to -# work around https://github.com/python/typing/issues/241 - - -class DefaultDownloadProgressBar(BaseDownloadProgressBar, - _BaseBar): - pass - - -class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): # type: ignore - pass - - -class DownloadIncrementalBar(BaseDownloadProgressBar, # type: ignore - IncrementalBar): - pass - - -class DownloadChargingBar(BaseDownloadProgressBar, # type: ignore - ChargingBar): - pass - - -class DownloadShadyBar(BaseDownloadProgressBar, ShadyBar): # type: ignore - pass - - -class DownloadFillingSquaresBar(BaseDownloadProgressBar, # type: ignore - FillingSquaresBar): - pass - - -class DownloadFillingCirclesBar(BaseDownloadProgressBar, # type: ignore - FillingCirclesBar): - pass - - -class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, # type: ignore - BlueEmojiBar): - pass - - -class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, - DownloadProgressMixin, WritelnMixin, Spinner): - - file = sys.stdout - suffix = "%(downloaded)s %(download_speed)s" - - def next_phase(self): - if not hasattr(self, "_phaser"): - self._phaser = itertools.cycle(self.phases) - return next(self._phaser) - - def update(self): - message = self.message % self - phase = self.next_phase() - suffix = self.suffix % self - line = ''.join([ - message, - " " if message else "", - phase, - " " if suffix else "", - suffix, - ]) - - self.writeln(line) - - -BAR_TYPES = { - "off": (DownloadSilentBar, DownloadSilentBar), - "on": (DefaultDownloadProgressBar, DownloadProgressSpinner), - "ascii": (DownloadIncrementalBar, DownloadProgressSpinner), - "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner), - "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner) -} - - -def DownloadProgressProvider(progress_bar, max=None): - if max is None or max == 0: - return BAR_TYPES[progress_bar][1]().iter - else: - return BAR_TYPES[progress_bar][0](max=max).iter - - -################################################################ -# Generic "something is happening" spinners -# -# We don't even try using progress.spinner.Spinner here because it's actually -# simpler to reimplement from scratch than to coerce their code into doing -# what we need. -################################################################ - -@contextlib.contextmanager -def hidden_cursor(file): - # type: (IO) -> Iterator[None] - # The Windows terminal does not support the hide/show cursor ANSI codes, - # even via colorama. So don't even try. - if WINDOWS: - yield - # We don't want to clutter the output with control characters if we're - # writing to a file, or if the user is running with --quiet. - # See https://github.com/pypa/pip/issues/3418 - elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: - yield - else: - file.write(HIDE_CURSOR) - try: - yield - finally: - file.write(SHOW_CURSOR) - - -class RateLimiter(object): - def __init__(self, min_update_interval_seconds): - # type: (float) -> None - self._min_update_interval_seconds = min_update_interval_seconds - self._last_update = 0 # type: float - - def ready(self): - # type: () -> bool - now = time.time() - delta = now - self._last_update - return delta >= self._min_update_interval_seconds - - def reset(self): - # type: () -> None - self._last_update = time.time() - - -class SpinnerInterface(object): - def spin(self): - # type: () -> None - raise NotImplementedError() - - def finish(self, final_status): - # type: (str) -> None - raise NotImplementedError() - - -class InteractiveSpinner(SpinnerInterface): - def __init__(self, message, file=None, spin_chars="-\\|/", - # Empirically, 8 updates/second looks nice - min_update_interval_seconds=0.125): - self._message = message - if file is None: - file = sys.stdout - self._file = file - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._finished = False - - self._spin_cycle = itertools.cycle(spin_chars) - - self._file.write(" " * get_indentation() + self._message + " ... ") - self._width = 0 - - def _write(self, status): - assert not self._finished - # Erase what we wrote before by backspacing to the beginning, writing - # spaces to overwrite the old text, and then backspacing again - backup = "\b" * self._width - self._file.write(backup + " " * self._width + backup) - # Now we have a blank slate to add our status - self._file.write(status) - self._width = len(status) - self._file.flush() - self._rate_limiter.reset() - - def spin(self): - # type: () -> None - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._write(next(self._spin_cycle)) - - def finish(self, final_status): - # type: (str) -> None - if self._finished: - return - self._write(final_status) - self._file.write("\n") - self._file.flush() - self._finished = True - - -# Used for dumb terminals, non-interactive installs (no tty), etc. -# We still print updates occasionally (once every 60 seconds by default) to -# act as a keep-alive for systems like Travis-CI that take lack-of-output as -# an indication that a task has frozen. -class NonInteractiveSpinner(SpinnerInterface): - def __init__(self, message, min_update_interval_seconds=60): - # type: (str, float) -> None - self._message = message - self._finished = False - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._update("started") - - def _update(self, status): - assert not self._finished - self._rate_limiter.reset() - logger.info("%s: %s", self._message, status) - - def spin(self): - # type: () -> None - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._update("still running...") - - def finish(self, final_status): - # type: (str) -> None - if self._finished: - return - self._update("finished with status '%s'" % (final_status,)) - self._finished = True - - -@contextlib.contextmanager -def open_spinner(message): - # type: (str) -> Iterator[SpinnerInterface] - # Interactive spinner goes directly to sys.stdout rather than being routed - # through the logging system, but it acts like it has level INFO, - # i.e. it's only displayed if we're at level INFO or better. - # Non-interactive spinner goes through the logging system, so it is always - # in sync with logging configuration. - if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: - spinner = InteractiveSpinner(message) # type: SpinnerInterface - else: - spinner = NonInteractiveSpinner(message) - try: - with hidden_cursor(sys.stdout): - yield spinner - except KeyboardInterrupt: - spinner.finish("canceled") - raise - except Exception: - spinner.finish("error") - raise - else: - spinner.finish("done") diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/__init__.py deleted file mode 100644 index 9cba764..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/__init__.py +++ /dev/null @@ -1,534 +0,0 @@ -"""Handles all VCS (version control) support""" -from __future__ import absolute_import - -import errno -import logging -import os -import shutil -import sys - -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip._internal.exceptions import BadCommand -from pip._internal.utils.misc import ( - display_path, backup_dir, call_subprocess, rmtree, ask_path_exists, -) -from pip._internal.utils.typing import MYPY_CHECK_RUNNING - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type - ) - from pip._internal.utils.ui import SpinnerInterface # noqa: F401 - - AuthInfo = Tuple[Optional[str], Optional[str]] - -__all__ = ['vcs'] - - -logger = logging.getLogger(__name__) - - -class RemoteNotFoundError(Exception): - pass - - -class RevOptions(object): - - """ - Encapsulates a VCS-specific revision to install, along with any VCS - install options. - - Instances of this class should be treated as if immutable. - """ - - def __init__(self, vcs, rev=None, extra_args=None): - # type: (VersionControl, Optional[str], Optional[List[str]]) -> None - """ - Args: - vcs: a VersionControl object. - rev: the name of the revision to install. - extra_args: a list of extra options. - """ - if extra_args is None: - extra_args = [] - - self.extra_args = extra_args - self.rev = rev - self.vcs = vcs - - def __repr__(self): - return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev) - - @property - def arg_rev(self): - # type: () -> Optional[str] - if self.rev is None: - return self.vcs.default_arg_rev - - return self.rev - - def to_args(self): - # type: () -> List[str] - """ - Return the VCS-specific command arguments. - """ - args = [] # type: List[str] - rev = self.arg_rev - if rev is not None: - args += self.vcs.get_base_rev_args(rev) - args += self.extra_args - - return args - - def to_display(self): - # type: () -> str - if not self.rev: - return '' - - return ' (to revision {})'.format(self.rev) - - def make_new(self, rev): - # type: (str) -> RevOptions - """ - Make a copy of the current instance, but with a new rev. - - Args: - rev: the name of the revision for the new object. - """ - return self.vcs.make_rev_options(rev, extra_args=self.extra_args) - - -class VcsSupport(object): - _registry = {} # type: Dict[str, Type[VersionControl]] - schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] - - def __init__(self): - # type: () -> None - # Register more schemes with urlparse for various version control - # systems - urllib_parse.uses_netloc.extend(self.schemes) - # Python >= 2.7.4, 3.3 doesn't have uses_fragment - if getattr(urllib_parse, 'uses_fragment', None): - urllib_parse.uses_fragment.extend(self.schemes) - super(VcsSupport, self).__init__() - - def __iter__(self): - return self._registry.__iter__() - - @property - def backends(self): - # type: () -> List[Type[VersionControl]] - return list(self._registry.values()) - - @property - def dirnames(self): - # type: () -> List[str] - return [backend.dirname for backend in self.backends] - - @property - def all_schemes(self): - # type: () -> List[str] - schemes = [] # type: List[str] - for backend in self.backends: - schemes.extend(backend.schemes) - return schemes - - def register(self, cls): - # type: (Type[VersionControl]) -> None - if not hasattr(cls, 'name'): - logger.warning('Cannot register VCS %s', cls.__name__) - return - if cls.name not in self._registry: - self._registry[cls.name] = cls - logger.debug('Registered VCS backend: %s', cls.name) - - def unregister(self, cls=None, name=None): - # type: (Optional[Type[VersionControl]], Optional[str]) -> None - if name in self._registry: - del self._registry[name] - elif cls in self._registry.values(): - del self._registry[cls.name] - else: - logger.warning('Cannot unregister because no class or name given') - - def get_backend_type(self, location): - # type: (str) -> Optional[Type[VersionControl]] - """ - Return the type of the version control backend if found at given - location, e.g. vcs.get_backend_type('/path/to/vcs/checkout') - """ - for vc_type in self._registry.values(): - if vc_type.controls_location(location): - logger.debug('Determine that %s uses VCS: %s', - location, vc_type.name) - return vc_type - return None - - def get_backend(self, name): - # type: (str) -> Optional[Type[VersionControl]] - name = name.lower() - if name in self._registry: - return self._registry[name] - return None - - -vcs = VcsSupport() - - -class VersionControl(object): - name = '' - dirname = '' - repo_name = '' - # List of supported schemes for this Version Control - schemes = () # type: Tuple[str, ...] - # Iterable of environment variable names to pass to call_subprocess(). - unset_environ = () # type: Tuple[str, ...] - default_arg_rev = None # type: Optional[str] - - def __init__(self, url=None, *args, **kwargs): - self.url = url - super(VersionControl, self).__init__(*args, **kwargs) - - def get_base_rev_args(self, rev): - """ - Return the base revision arguments for a vcs command. - - Args: - rev: the name of a revision to install. Cannot be None. - """ - raise NotImplementedError - - def make_rev_options(self, rev=None, extra_args=None): - # type: (Optional[str], Optional[List[str]]) -> RevOptions - """ - Return a RevOptions object. - - Args: - rev: the name of a revision to install. - extra_args: a list of extra options. - """ - return RevOptions(self, rev, extra_args=extra_args) - - @classmethod - def _is_local_repository(cls, repo): - # type: (str) -> bool - """ - posix absolute paths start with os.path.sep, - win32 ones start with drive (like c:\\folder) - """ - drive, tail = os.path.splitdrive(repo) - return repo.startswith(os.path.sep) or bool(drive) - - def export(self, location): - """ - Export the repository at the url to the destination location - i.e. only download the files, without vcs informations - """ - raise NotImplementedError - - def get_netloc_and_auth(self, netloc, scheme): - """ - Parse the repository URL's netloc, and return the new netloc to use - along with auth information. - - Args: - netloc: the original repository URL netloc. - scheme: the repository URL's scheme without the vcs prefix. - - This is mainly for the Subversion class to override, so that auth - information can be provided via the --username and --password options - instead of through the URL. For other subclasses like Git without - such an option, auth information must stay in the URL. - - Returns: (netloc, (username, password)). - """ - return netloc, (None, None) - - def get_url_rev_and_auth(self, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] - """ - Parse the repository URL to use, and return the URL, revision, - and auth info to use. - - Returns: (url, rev, (username, password)). - """ - scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) - if '+' not in scheme: - raise ValueError( - "Sorry, {!r} is a malformed VCS url. " - "The format is <vcs>+<protocol>://<url>, " - "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) - ) - # Remove the vcs prefix. - scheme = scheme.split('+', 1)[1] - netloc, user_pass = self.get_netloc_and_auth(netloc, scheme) - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) - return url, rev, user_pass - - def make_rev_args(self, username, password): - """ - Return the RevOptions "extra arguments" to use in obtain(). - """ - return [] - - def get_url_rev_options(self, url): - # type: (str) -> Tuple[str, RevOptions] - """ - Return the URL and RevOptions object to use in obtain() and in - some cases export(), as a tuple (url, rev_options). - """ - url, rev, user_pass = self.get_url_rev_and_auth(url) - username, password = user_pass - extra_args = self.make_rev_args(username, password) - rev_options = self.make_rev_options(rev, extra_args=extra_args) - - return url, rev_options - - def normalize_url(self, url): - # type: (str) -> str - """ - Normalize a URL for comparison by unquoting it and removing any - trailing slash. - """ - return urllib_parse.unquote(url).rstrip('/') - - def compare_urls(self, url1, url2): - # type: (str, str) -> bool - """ - Compare two repo URLs for identity, ignoring incidental differences. - """ - return (self.normalize_url(url1) == self.normalize_url(url2)) - - def fetch_new(self, dest, url, rev_options): - """ - Fetch a revision from a repository, in the case that this is the - first fetch from the repository. - - Args: - dest: the directory to fetch the repository to. - rev_options: a RevOptions object. - """ - raise NotImplementedError - - def switch(self, dest, url, rev_options): - """ - Switch the repo at ``dest`` to point to ``URL``. - - Args: - rev_options: a RevOptions object. - """ - raise NotImplementedError - - def update(self, dest, url, rev_options): - """ - Update an already-existing repo to the given ``rev_options``. - - Args: - rev_options: a RevOptions object. - """ - raise NotImplementedError - - def is_commit_id_equal(self, dest, name): - """ - Return whether the id of the current commit equals the given name. - - Args: - dest: the repository directory. - name: a string name. - """ - raise NotImplementedError - - def obtain(self, dest): - # type: (str) -> None - """ - Install or update in editable mode the package represented by this - VersionControl object. - - Args: - dest: the repository directory in which to install or update. - """ - url, rev_options = self.get_url_rev_options(self.url) - - if not os.path.exists(dest): - self.fetch_new(dest, url, rev_options) - return - - rev_display = rev_options.to_display() - if self.is_repository_directory(dest): - existing_url = self.get_remote_url(dest) - if self.compare_urls(existing_url, url): - logger.debug( - '%s in %s exists, and has correct URL (%s)', - self.repo_name.title(), - display_path(dest), - url, - ) - if not self.is_commit_id_equal(dest, rev_options.rev): - logger.info( - 'Updating %s %s%s', - display_path(dest), - self.repo_name, - rev_display, - ) - self.update(dest, url, rev_options) - else: - logger.info('Skipping because already up-to-date.') - return - - logger.warning( - '%s %s in %s exists with URL %s', - self.name, - self.repo_name, - display_path(dest), - existing_url, - ) - prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', - ('s', 'i', 'w', 'b')) - else: - logger.warning( - 'Directory %s already exists, and is not a %s %s.', - dest, - self.name, - self.repo_name, - ) - # https://github.com/python/mypy/issues/1174 - prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore - ('i', 'w', 'b')) - - logger.warning( - 'The plan is to install the %s repository %s', - self.name, - url, - ) - response = ask_path_exists('What to do? %s' % prompt[0], prompt[1]) - - if response == 'a': - sys.exit(-1) - - if response == 'w': - logger.warning('Deleting %s', display_path(dest)) - rmtree(dest) - self.fetch_new(dest, url, rev_options) - return - - if response == 'b': - dest_dir = backup_dir(dest) - logger.warning( - 'Backing up %s to %s', display_path(dest), dest_dir, - ) - shutil.move(dest, dest_dir) - self.fetch_new(dest, url, rev_options) - return - - # Do nothing if the response is "i". - if response == 's': - logger.info( - 'Switching %s %s to %s%s', - self.repo_name, - display_path(dest), - url, - rev_display, - ) - self.switch(dest, url, rev_options) - - def unpack(self, location): - # type: (str) -> None - """ - Clean up current location and download the url repository - (and vcs infos) into location - """ - if os.path.exists(location): - rmtree(location) - self.obtain(location) - - @classmethod - def get_src_requirement(cls, location, project_name): - """ - Return a string representing the requirement needed to - redownload the files currently present in location, something - like: - {repository_url}@{revision}#egg={project_name}-{version_identifier} - """ - raise NotImplementedError - - @classmethod - def get_remote_url(cls, location): - """ - Return the url used at location - - Raises RemoteNotFoundError if the repository does not have a remote - url configured. - """ - raise NotImplementedError - - @classmethod - def get_revision(cls, location): - """ - Return the current commit id of the files at the given location. - """ - raise NotImplementedError - - @classmethod - def run_command( - cls, - cmd, # type: List[str] - show_stdout=True, # type: bool - cwd=None, # type: Optional[str] - on_returncode='raise', # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - spinner=None # type: Optional[SpinnerInterface] - ): - # type: (...) -> Optional[Text] - """ - Run a VCS subcommand - This is simply a wrapper around call_subprocess that adds the VCS - command name, and checks that the VCS is available - """ - cmd = [cls.name] + cmd - try: - return call_subprocess(cmd, show_stdout, cwd, - on_returncode=on_returncode, - extra_ok_returncodes=extra_ok_returncodes, - command_desc=command_desc, - extra_environ=extra_environ, - unset_environ=cls.unset_environ, - spinner=spinner) - except OSError as e: - # errno.ENOENT = no such file or directory - # In other words, the VCS executable isn't available - if e.errno == errno.ENOENT: - raise BadCommand( - 'Cannot find command %r - do you have ' - '%r installed and in your ' - 'PATH?' % (cls.name, cls.name)) - else: - raise # re-raise exception if a different error occurred - - @classmethod - def is_repository_directory(cls, path): - # type: (str) -> bool - """ - Return whether a directory path is a repository directory. - """ - logger.debug('Checking in %s for %s (%s)...', - path, cls.dirname, cls.name) - return os.path.exists(os.path.join(path, cls.dirname)) - - @classmethod - def controls_location(cls, location): - # type: (str) -> bool - """ - Check if a location is controlled by the vcs. - It is meant to be overridden to implement smarter detection - mechanisms for specific vcs. - - This can do more than is_repository_directory() alone. For example, - the Git override checks that Git is actually available. - """ - return cls.is_repository_directory(location) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/bazaar.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/bazaar.py deleted file mode 100644 index 4c6ac79..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/bazaar.py +++ /dev/null @@ -1,114 +0,0 @@ -from __future__ import absolute_import - -import logging -import os - -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip._internal.download import path_to_url -from pip._internal.utils.misc import ( - display_path, make_vcs_requirement_url, rmtree, -) -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.vcs import VersionControl, vcs - -logger = logging.getLogger(__name__) - - -class Bazaar(VersionControl): - name = 'bzr' - dirname = '.bzr' - repo_name = 'branch' - schemes = ( - 'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', - 'bzr+lp', - ) - - def __init__(self, url=None, *args, **kwargs): - super(Bazaar, self).__init__(url, *args, **kwargs) - # This is only needed for python <2.7.5 - # Register lp but do not expose as a scheme to support bzr+lp. - if getattr(urllib_parse, 'uses_fragment', None): - urllib_parse.uses_fragment.extend(['lp']) - - def get_base_rev_args(self, rev): - return ['-r', rev] - - def export(self, location): - """ - Export the Bazaar repository at the url to the destination location - """ - # Remove the location to make sure Bazaar can export it correctly - if os.path.exists(location): - rmtree(location) - - with TempDirectory(kind="export") as temp_dir: - self.unpack(temp_dir.path) - - self.run_command( - ['export', location], - cwd=temp_dir.path, show_stdout=False, - ) - - def fetch_new(self, dest, url, rev_options): - rev_display = rev_options.to_display() - logger.info( - 'Checking out %s%s to %s', - url, - rev_display, - display_path(dest), - ) - cmd_args = ['branch', '-q'] + rev_options.to_args() + [url, dest] - self.run_command(cmd_args) - - def switch(self, dest, url, rev_options): - self.run_command(['switch', url], cwd=dest) - - def update(self, dest, url, rev_options): - cmd_args = ['pull', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) - - def get_url_rev_and_auth(self, url): - # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it - url, rev, user_pass = super(Bazaar, self).get_url_rev_and_auth(url) - if url.startswith('ssh://'): - url = 'bzr+' + url - return url, rev, user_pass - - @classmethod - def get_remote_url(cls, location): - urls = cls.run_command(['info'], show_stdout=False, cwd=location) - for line in urls.splitlines(): - line = line.strip() - for x in ('checkout of branch: ', - 'parent branch: '): - if line.startswith(x): - repo = line.split(x)[1] - if cls._is_local_repository(repo): - return path_to_url(repo) - return repo - return None - - @classmethod - def get_revision(cls, location): - revision = cls.run_command( - ['revno'], show_stdout=False, cwd=location, - ) - return revision.splitlines()[-1] - - @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if not repo: - return None - if not repo.lower().startswith('bzr:'): - repo = 'bzr+' + repo - current_rev = cls.get_revision(location) - return make_vcs_requirement_url(repo, current_rev, project_name) - - def is_commit_id_equal(self, dest, name): - """Always assume the versions don't match""" - return False - - -vcs.register(Bazaar) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/git.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/git.py deleted file mode 100644 index dd2bd61..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/git.py +++ /dev/null @@ -1,369 +0,0 @@ -from __future__ import absolute_import - -import logging -import os.path -import re - -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request - -from pip._internal.exceptions import BadCommand -from pip._internal.utils.compat import samefile -from pip._internal.utils.misc import ( - display_path, make_vcs_requirement_url, redact_password_from_url, -) -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.vcs import RemoteNotFoundError, VersionControl, vcs - -urlsplit = urllib_parse.urlsplit -urlunsplit = urllib_parse.urlunsplit - - -logger = logging.getLogger(__name__) - - -HASH_REGEX = re.compile('[a-fA-F0-9]{40}') - - -def looks_like_hash(sha): - return bool(HASH_REGEX.match(sha)) - - -class Git(VersionControl): - name = 'git' - dirname = '.git' - repo_name = 'clone' - schemes = ( - 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', - ) - # Prevent the user's environment variables from interfering with pip: - # https://github.com/pypa/pip/issues/1130 - unset_environ = ('GIT_DIR', 'GIT_WORK_TREE') - default_arg_rev = 'HEAD' - - def __init__(self, url=None, *args, **kwargs): - - # Works around an apparent Git bug - # (see https://article.gmane.org/gmane.comp.version-control.git/146500) - if url: - scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith('file'): - initial_slashes = path[:-len(path.lstrip('/'))] - newpath = ( - initial_slashes + - urllib_request.url2pathname(path) - .replace('\\', '/').lstrip('/') - ) - url = urlunsplit((scheme, netloc, newpath, query, fragment)) - after_plus = scheme.find('+') + 1 - url = scheme[:after_plus] + urlunsplit( - (scheme[after_plus:], netloc, newpath, query, fragment), - ) - - super(Git, self).__init__(url, *args, **kwargs) - - def get_base_rev_args(self, rev): - return [rev] - - def get_git_version(self): - VERSION_PFX = 'git version ' - version = self.run_command(['version'], show_stdout=False) - if version.startswith(VERSION_PFX): - version = version[len(VERSION_PFX):].split()[0] - else: - version = '' - # get first 3 positions of the git version becasue - # on windows it is x.y.z.windows.t, and this parses as - # LegacyVersion which always smaller than a Version. - version = '.'.join(version.split('.')[:3]) - return parse_version(version) - - def get_current_branch(self, location): - """ - Return the current branch, or None if HEAD isn't at a branch - (e.g. detached HEAD). - """ - # git-symbolic-ref exits with empty stdout if "HEAD" is a detached - # HEAD rather than a symbolic ref. In addition, the -q causes the - # command to exit with status code 1 instead of 128 in this case - # and to suppress the message to stderr. - args = ['symbolic-ref', '-q', 'HEAD'] - output = self.run_command( - args, extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, - ) - ref = output.strip() - - if ref.startswith('refs/heads/'): - return ref[len('refs/heads/'):] - - return None - - def export(self, location): - """Export the Git repository at the url to the destination location""" - if not location.endswith('/'): - location = location + '/' - - with TempDirectory(kind="export") as temp_dir: - self.unpack(temp_dir.path) - self.run_command( - ['checkout-index', '-a', '-f', '--prefix', location], - show_stdout=False, cwd=temp_dir.path - ) - - def get_revision_sha(self, dest, rev): - """ - Return (sha_or_none, is_branch), where sha_or_none is a commit hash - if the revision names a remote branch or tag, otherwise None. - - Args: - dest: the repository directory. - rev: the revision name. - """ - # Pass rev to pre-filter the list. - output = self.run_command(['show-ref', rev], cwd=dest, - show_stdout=False, on_returncode='ignore') - refs = {} - for line in output.strip().splitlines(): - try: - sha, ref = line.split() - except ValueError: - # Include the offending line to simplify troubleshooting if - # this error ever occurs. - raise ValueError('unexpected show-ref line: {!r}'.format(line)) - - refs[ref] = sha - - branch_ref = 'refs/remotes/origin/{}'.format(rev) - tag_ref = 'refs/tags/{}'.format(rev) - - sha = refs.get(branch_ref) - if sha is not None: - return (sha, True) - - sha = refs.get(tag_ref) - - return (sha, False) - - def resolve_revision(self, dest, url, rev_options): - """ - Resolve a revision to a new RevOptions object with the SHA1 of the - branch, tag, or ref if found. - - Args: - rev_options: a RevOptions object. - """ - rev = rev_options.arg_rev - sha, is_branch = self.get_revision_sha(dest, rev) - - if sha is not None: - rev_options = rev_options.make_new(sha) - rev_options.branch_name = rev if is_branch else None - - return rev_options - - # Do not show a warning for the common case of something that has - # the form of a Git commit hash. - if not looks_like_hash(rev): - logger.warning( - "Did not find branch or tag '%s', assuming revision or ref.", - rev, - ) - - if not rev.startswith('refs/'): - return rev_options - - # If it looks like a ref, we have to fetch it explicitly. - self.run_command( - ['fetch', '-q', url] + rev_options.to_args(), - cwd=dest, - ) - # Change the revision to the SHA of the ref we fetched - sha = self.get_revision(dest, rev='FETCH_HEAD') - rev_options = rev_options.make_new(sha) - - return rev_options - - def is_commit_id_equal(self, dest, name): - """ - Return whether the current commit hash equals the given name. - - Args: - dest: the repository directory. - name: a string name. - """ - if not name: - # Then avoid an unnecessary subprocess call. - return False - - return self.get_revision(dest) == name - - def fetch_new(self, dest, url, rev_options): - rev_display = rev_options.to_display() - logger.info( - 'Cloning %s%s to %s', redact_password_from_url(url), - rev_display, display_path(dest), - ) - self.run_command(['clone', '-q', url, dest]) - - if rev_options.rev: - # Then a specific revision was requested. - rev_options = self.resolve_revision(dest, url, rev_options) - branch_name = getattr(rev_options, 'branch_name', None) - if branch_name is None: - # Only do a checkout if the current commit id doesn't match - # the requested revision. - if not self.is_commit_id_equal(dest, rev_options.rev): - cmd_args = ['checkout', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) - elif self.get_current_branch(dest) != branch_name: - # Then a specific branch was requested, and that branch - # is not yet checked out. - track_branch = 'origin/{}'.format(branch_name) - cmd_args = [ - 'checkout', '-b', branch_name, '--track', track_branch, - ] - self.run_command(cmd_args, cwd=dest) - - #: repo may contain submodules - self.update_submodules(dest) - - def switch(self, dest, url, rev_options): - self.run_command(['config', 'remote.origin.url', url], cwd=dest) - cmd_args = ['checkout', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) - - self.update_submodules(dest) - - def update(self, dest, url, rev_options): - # First fetch changes from the default remote - if self.get_git_version() >= parse_version('1.9.0'): - # fetch tags in addition to everything else - self.run_command(['fetch', '-q', '--tags'], cwd=dest) - else: - self.run_command(['fetch', '-q'], cwd=dest) - # Then reset to wanted revision (maybe even origin/master) - rev_options = self.resolve_revision(dest, url, rev_options) - cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) - #: update submodules - self.update_submodules(dest) - - @classmethod - def get_remote_url(cls, location): - """ - Return URL of the first remote encountered. - - Raises RemoteNotFoundError if the repository does not have a remote - url configured. - """ - # We need to pass 1 for extra_ok_returncodes since the command - # exits with return code 1 if there are no matching lines. - stdout = cls.run_command( - ['config', '--get-regexp', r'remote\..*\.url'], - extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, - ) - remotes = stdout.splitlines() - try: - found_remote = remotes[0] - except IndexError: - raise RemoteNotFoundError - - for remote in remotes: - if remote.startswith('remote.origin.url '): - found_remote = remote - break - url = found_remote.split(' ')[1] - return url.strip() - - @classmethod - def get_revision(cls, location, rev=None): - if rev is None: - rev = 'HEAD' - current_rev = cls.run_command( - ['rev-parse', rev], show_stdout=False, cwd=location, - ) - return current_rev.strip() - - @classmethod - def _get_subdirectory(cls, location): - """Return the relative path of setup.py to the git repo root.""" - # find the repo root - git_dir = cls.run_command(['rev-parse', '--git-dir'], - show_stdout=False, cwd=location).strip() - if not os.path.isabs(git_dir): - git_dir = os.path.join(location, git_dir) - root_dir = os.path.join(git_dir, '..') - # find setup.py - orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding setup.py - logger.warning( - "Could not find setup.py for directory %s (tried all " - "parent directories)", - orig_location, - ) - return None - # relative path of setup.py to repo root - if samefile(root_dir, location): - return None - return os.path.relpath(location, root_dir) - - @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if not repo.lower().startswith('git:'): - repo = 'git+' + repo - current_rev = cls.get_revision(location) - subdir = cls._get_subdirectory(location) - req = make_vcs_requirement_url(repo, current_rev, project_name, - subdir=subdir) - - return req - - def get_url_rev_and_auth(self, url): - """ - Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. - That's required because although they use SSH they sometimes don't - work with a ssh:// scheme (e.g. GitHub). But we need a scheme for - parsing. Hence we remove it again afterwards and return it as a stub. - """ - if '://' not in url: - assert 'file:' not in url - url = url.replace('git+', 'git+ssh://') - url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) - url = url.replace('ssh://', '') - else: - url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) - - return url, rev, user_pass - - def update_submodules(self, location): - if not os.path.exists(os.path.join(location, '.gitmodules')): - return - self.run_command( - ['submodule', 'update', '--init', '--recursive', '-q'], - cwd=location, - ) - - @classmethod - def controls_location(cls, location): - if super(Git, cls).controls_location(location): - return True - try: - r = cls.run_command(['rev-parse'], - cwd=location, - show_stdout=False, - on_returncode='ignore') - return not r - except BadCommand: - logger.debug("could not determine if %s is under git control " - "because git is not available", location) - return False - - -vcs.register(Git) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/mercurial.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/mercurial.py deleted file mode 100644 index 26e75de..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/mercurial.py +++ /dev/null @@ -1,103 +0,0 @@ -from __future__ import absolute_import - -import logging -import os - -from pip._vendor.six.moves import configparser - -from pip._internal.download import path_to_url -from pip._internal.utils.misc import display_path, make_vcs_requirement_url -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.vcs import VersionControl, vcs - -logger = logging.getLogger(__name__) - - -class Mercurial(VersionControl): - name = 'hg' - dirname = '.hg' - repo_name = 'clone' - schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http') - - def get_base_rev_args(self, rev): - return [rev] - - def export(self, location): - """Export the Hg repository at the url to the destination location""" - with TempDirectory(kind="export") as temp_dir: - self.unpack(temp_dir.path) - - self.run_command( - ['archive', location], show_stdout=False, cwd=temp_dir.path - ) - - def fetch_new(self, dest, url, rev_options): - rev_display = rev_options.to_display() - logger.info( - 'Cloning hg %s%s to %s', - url, - rev_display, - display_path(dest), - ) - self.run_command(['clone', '--noupdate', '-q', url, dest]) - cmd_args = ['update', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) - - def switch(self, dest, url, rev_options): - repo_config = os.path.join(dest, self.dirname, 'hgrc') - config = configparser.SafeConfigParser() - try: - config.read(repo_config) - config.set('paths', 'default', url) - with open(repo_config, 'w') as config_file: - config.write(config_file) - except (OSError, configparser.NoSectionError) as exc: - logger.warning( - 'Could not switch Mercurial repository to %s: %s', url, exc, - ) - else: - cmd_args = ['update', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) - - def update(self, dest, url, rev_options): - self.run_command(['pull', '-q'], cwd=dest) - cmd_args = ['update', '-q'] + rev_options.to_args() - self.run_command(cmd_args, cwd=dest) - - @classmethod - def get_remote_url(cls, location): - url = cls.run_command( - ['showconfig', 'paths.default'], - show_stdout=False, cwd=location).strip() - if cls._is_local_repository(url): - url = path_to_url(url) - return url.strip() - - @classmethod - def get_revision(cls, location): - current_revision = cls.run_command( - ['parents', '--template={rev}'], - show_stdout=False, cwd=location).strip() - return current_revision - - @classmethod - def get_revision_hash(cls, location): - current_rev_hash = cls.run_command( - ['parents', '--template={node}'], - show_stdout=False, cwd=location).strip() - return current_rev_hash - - @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if not repo.lower().startswith('hg:'): - repo = 'hg+' + repo - current_rev_hash = cls.get_revision_hash(location) - return make_vcs_requirement_url(repo, current_rev_hash, project_name) - - def is_commit_id_equal(self, dest, name): - """Always assume the versions don't match""" - return False - - -vcs.register(Mercurial) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/subversion.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/subversion.py deleted file mode 100644 index 42ac5ac..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/vcs/subversion.py +++ /dev/null @@ -1,200 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import re - -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ( - display_path, make_vcs_requirement_url, rmtree, split_auth_from_netloc, -) -from pip._internal.vcs import VersionControl, vcs - -_svn_xml_url_re = re.compile('url="([^"]+)"') -_svn_rev_re = re.compile(r'committed-rev="(\d+)"') -_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') -_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') - - -logger = logging.getLogger(__name__) - - -class Subversion(VersionControl): - name = 'svn' - dirname = '.svn' - repo_name = 'checkout' - schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') - - def get_base_rev_args(self, rev): - return ['-r', rev] - - def export(self, location): - """Export the svn repository at the url to the destination location""" - url, rev_options = self.get_url_rev_options(self.url) - - logger.info('Exporting svn repository %s to %s', url, location) - with indent_log(): - if os.path.exists(location): - # Subversion doesn't like to check out over an existing - # directory --force fixes this, but was only added in svn 1.5 - rmtree(location) - cmd_args = ['export'] + rev_options.to_args() + [url, location] - self.run_command(cmd_args, show_stdout=False) - - def fetch_new(self, dest, url, rev_options): - rev_display = rev_options.to_display() - logger.info( - 'Checking out %s%s to %s', - url, - rev_display, - display_path(dest), - ) - cmd_args = ['checkout', '-q'] + rev_options.to_args() + [url, dest] - self.run_command(cmd_args) - - def switch(self, dest, url, rev_options): - cmd_args = ['switch'] + rev_options.to_args() + [url, dest] - self.run_command(cmd_args) - - def update(self, dest, url, rev_options): - cmd_args = ['update'] + rev_options.to_args() + [dest] - self.run_command(cmd_args) - - @classmethod - def get_revision(cls, location): - """ - Return the maximum revision for all files under a given location - """ - # Note: taken from setuptools.command.egg_info - revision = 0 - - for base, dirs, files in os.walk(location): - if cls.dirname not in dirs: - dirs[:] = [] - continue # no sense walking uncontrolled subdirs - dirs.remove(cls.dirname) - entries_fn = os.path.join(base, cls.dirname, 'entries') - if not os.path.exists(entries_fn): - # FIXME: should we warn? - continue - - dirurl, localrev = cls._get_svn_url_rev(base) - - if base == location: - base = dirurl + '/' # save the root url - elif not dirurl or not dirurl.startswith(base): - dirs[:] = [] - continue # not part of the same svn tree, skip it - revision = max(revision, localrev) - return revision - - def get_netloc_and_auth(self, netloc, scheme): - """ - This override allows the auth information to be passed to svn via the - --username and --password options instead of via the URL. - """ - if scheme == 'ssh': - # The --username and --password options can't be used for - # svn+ssh URLs, so keep the auth information in the URL. - return super(Subversion, self).get_netloc_and_auth( - netloc, scheme) - - return split_auth_from_netloc(netloc) - - def get_url_rev_and_auth(self, url): - # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it - url, rev, user_pass = super(Subversion, self).get_url_rev_and_auth(url) - if url.startswith('ssh://'): - url = 'svn+' + url - return url, rev, user_pass - - def make_rev_args(self, username, password): - extra_args = [] - if username: - extra_args += ['--username', username] - if password: - extra_args += ['--password', password] - - return extra_args - - @classmethod - def get_remote_url(cls, location): - # In cases where the source is in a subdirectory, not alongside - # setup.py we have to look up in the location until we find a real - # setup.py - orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding setup.py - logger.warning( - "Could not find setup.py for directory %s (tried all " - "parent directories)", - orig_location, - ) - return None - - return cls._get_svn_url_rev(location)[0] - - @classmethod - def _get_svn_url_rev(cls, location): - from pip._internal.exceptions import InstallationError - - entries_path = os.path.join(location, cls.dirname, 'entries') - if os.path.exists(entries_path): - with open(entries_path) as f: - data = f.read() - else: # subversion >= 1.7 does not have the 'entries' file - data = '' - - if (data.startswith('8') or - data.startswith('9') or - data.startswith('10')): - data = list(map(str.splitlines, data.split('\n\x0c\n'))) - del data[0][0] # get rid of the '8' - url = data[0][3] - revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0] - elif data.startswith('<?xml'): - match = _svn_xml_url_re.search(data) - if not match: - raise ValueError('Badly formatted data: %r' % data) - url = match.group(1) # get repository URL - revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] - else: - try: - # subversion >= 1.7 - xml = cls.run_command( - ['info', '--xml', location], - show_stdout=False, - ) - url = _svn_info_xml_url_re.search(xml).group(1) - revs = [ - int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml) - ] - except InstallationError: - url, revs = None, [] - - if revs: - rev = max(revs) - else: - rev = 0 - - return url, rev - - @classmethod - def get_src_requirement(cls, location, project_name): - repo = cls.get_remote_url(location) - if repo is None: - return None - repo = 'svn+' + repo - rev = cls.get_revision(location) - return make_vcs_requirement_url(repo, rev, project_name) - - def is_commit_id_equal(self, dest, name): - """Always assume the versions don't match""" - return False - - -vcs.register(Subversion) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/wheel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/wheel.py deleted file mode 100644 index 67bcc7f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/wheel.py +++ /dev/null @@ -1,1095 +0,0 @@ -""" -Support for installing and building the "wheel" binary package format. -""" -from __future__ import absolute_import - -import collections -import compileall -import csv -import hashlib -import logging -import os.path -import re -import shutil -import stat -import sys -import warnings -from base64 import urlsafe_b64encode -from email.parser import Parser - -from pip._vendor import pkg_resources -from pip._vendor.distlib.scripts import ScriptMaker -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.six import StringIO - -from pip._internal import pep425tags -from pip._internal.download import path_to_url, unpack_url -from pip._internal.exceptions import ( - InstallationError, InvalidWheelFilename, UnsupportedWheel, -) -from pip._internal.locations import ( - PIP_DELETE_MARKER_FILENAME, distutils_scheme, -) -from pip._internal.models.link import Link -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ( - call_subprocess, captured_stdout, ensure_dir, read_chunks, -) -from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.typing import MYPY_CHECK_RUNNING -from pip._internal.utils.ui import open_spinner - -if MYPY_CHECK_RUNNING: - from typing import ( # noqa: F401 - Dict, List, Optional, Sequence, Mapping, Tuple, IO, Text, Any, - Union, Iterable - ) - from pip._vendor.packaging.requirements import Requirement # noqa: F401 - from pip._internal.req.req_install import InstallRequirement # noqa: F401 - from pip._internal.download import PipSession # noqa: F401 - from pip._internal.index import FormatControl, PackageFinder # noqa: F401 - from pip._internal.operations.prepare import ( # noqa: F401 - RequirementPreparer - ) - from pip._internal.cache import WheelCache # noqa: F401 - from pip._internal.pep425tags import Pep425Tag # noqa: F401 - - InstalledCSVRow = Tuple[str, ...] - - -VERSION_COMPATIBLE = (1, 0) - - -logger = logging.getLogger(__name__) - - -def normpath(src, p): - return os.path.relpath(src, p).replace(os.path.sep, '/') - - -def rehash(path, blocksize=1 << 20): - # type: (str, int) -> Tuple[str, str] - """Return (hash, length) for path using hashlib.sha256()""" - h = hashlib.sha256() - length = 0 - with open(path, 'rb') as f: - for block in read_chunks(f, size=blocksize): - length += len(block) - h.update(block) - digest = 'sha256=' + urlsafe_b64encode( - h.digest() - ).decode('latin1').rstrip('=') - # unicode/str python2 issues - return (digest, str(length)) # type: ignore - - -def open_for_csv(name, mode): - # type: (str, Text) -> IO - if sys.version_info[0] < 3: - nl = {} # type: Dict[str, Any] - bin = 'b' - else: - nl = {'newline': ''} # type: Dict[str, Any] - bin = '' - return open(name, mode + bin, **nl) - - -def replace_python_tag(wheelname, new_tag): - # type: (str, str) -> str - """Replace the Python tag in a wheel file name with a new value. - """ - parts = wheelname.split('-') - parts[-3] = new_tag - return '-'.join(parts) - - -def fix_script(path): - # type: (str) -> Optional[bool] - """Replace #!python with #!/path/to/python - Return True if file was changed.""" - # XXX RECORD hashes will need to be updated - if os.path.isfile(path): - with open(path, 'rb') as script: - firstline = script.readline() - if not firstline.startswith(b'#!python'): - return False - exename = sys.executable.encode(sys.getfilesystemencoding()) - firstline = b'#!' + exename + os.linesep.encode("ascii") - rest = script.read() - with open(path, 'wb') as script: - script.write(firstline) - script.write(rest) - return True - return None - - -dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>.+?))?) - \.dist-info$""", re.VERBOSE) - - -def root_is_purelib(name, wheeldir): - # type: (str, str) -> bool - """ - Return True if the extracted wheel in wheeldir should go into purelib. - """ - name_folded = name.replace("-", "_") - for item in os.listdir(wheeldir): - match = dist_info_re.match(item) - if match and match.group('name') == name_folded: - with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel: - for line in wheel: - line = line.lower().rstrip() - if line == "root-is-purelib: true": - return True - return False - - -def get_entrypoints(filename): - # type: (str) -> Tuple[Dict[str, str], Dict[str, str]] - if not os.path.exists(filename): - return {}, {} - - # This is done because you can pass a string to entry_points wrappers which - # means that they may or may not be valid INI files. The attempt here is to - # strip leading and trailing whitespace in order to make them valid INI - # files. - with open(filename) as fp: - data = StringIO() - for line in fp: - data.write(line.strip()) - data.write("\n") - data.seek(0) - - # get the entry points and then the script names - entry_points = pkg_resources.EntryPoint.parse_map(data) - console = entry_points.get('console_scripts', {}) - gui = entry_points.get('gui_scripts', {}) - - def _split_ep(s): - """get the string representation of EntryPoint, remove space and split - on '='""" - return str(s).replace(" ", "").split("=") - - # convert the EntryPoint objects into strings with module:function - console = dict(_split_ep(v) for v in console.values()) - gui = dict(_split_ep(v) for v in gui.values()) - return console, gui - - -def message_about_scripts_not_on_PATH(scripts): - # type: (Sequence[str]) -> Optional[str] - """Determine if any scripts are not on PATH and format a warning. - - Returns a warning message if one or more scripts are not on PATH, - otherwise None. - """ - if not scripts: - return None - - # Group scripts by the path they were installed in - grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set] - for destfile in scripts: - parent_dir = os.path.dirname(destfile) - script_name = os.path.basename(destfile) - grouped_by_dir[parent_dir].add(script_name) - - # We don't want to warn for directories that are on PATH. - not_warn_dirs = [ - os.path.normcase(i).rstrip(os.sep) for i in - os.environ.get("PATH", "").split(os.pathsep) - ] - # If an executable sits with sys.executable, we don't warn for it. - # This covers the case of venv invocations without activating the venv. - not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable))) - warn_for = { - parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() - if os.path.normcase(parent_dir) not in not_warn_dirs - } - if not warn_for: - return None - - # Format a message - msg_lines = [] - for parent_dir, scripts in warn_for.items(): - scripts = sorted(scripts) - if len(scripts) == 1: - start_text = "script {} is".format(scripts[0]) - else: - start_text = "scripts {} are".format( - ", ".join(scripts[:-1]) + " and " + scripts[-1] - ) - - msg_lines.append( - "The {} installed in '{}' which is not on PATH." - .format(start_text, parent_dir) - ) - - last_line_fmt = ( - "Consider adding {} to PATH or, if you prefer " - "to suppress this warning, use --no-warn-script-location." - ) - if len(msg_lines) == 1: - msg_lines.append(last_line_fmt.format("this directory")) - else: - msg_lines.append(last_line_fmt.format("these directories")) - - # Returns the formatted multiline message - return "\n".join(msg_lines) - - -def sorted_outrows(outrows): - # type: (Iterable[InstalledCSVRow]) -> List[InstalledCSVRow] - """ - Return the given rows of a RECORD file in sorted order. - - Each row is a 3-tuple (path, hash, size) and corresponds to a record of - a RECORD file (see PEP 376 and PEP 427 for details). For the rows - passed to this function, the size can be an integer as an int or string, - or the empty string. - """ - # Normally, there should only be one row per path, in which case the - # second and third elements don't come into play when sorting. - # However, in cases in the wild where a path might happen to occur twice, - # we don't want the sort operation to trigger an error (but still want - # determinism). Since the third element can be an int or string, we - # coerce each element to a string to avoid a TypeError in this case. - # For additional background, see-- - # https://github.com/pypa/pip/issues/5868 - return sorted(outrows, key=lambda row: tuple(str(x) for x in row)) - - -def get_csv_rows_for_installed( - old_csv_rows, # type: Iterable[List[str]] - installed, # type: Dict[str, str] - changed, # type: set - generated, # type: List[str] - lib_dir, # type: str -): - # type: (...) -> List[InstalledCSVRow] - """ - :param installed: A map from archive RECORD path to installation RECORD - path. - """ - installed_rows = [] # type: List[InstalledCSVRow] - for row in old_csv_rows: - if len(row) > 3: - logger.warning( - 'RECORD line has more than three elements: {}'.format(row) - ) - # Make a copy because we are mutating the row. - row = list(row) - old_path = row[0] - new_path = installed.pop(old_path, old_path) - row[0] = new_path - if new_path in changed: - digest, length = rehash(new_path) - row[1] = digest - row[2] = length - installed_rows.append(tuple(row)) - for f in generated: - digest, length = rehash(f) - installed_rows.append((normpath(f, lib_dir), digest, str(length))) - for f in installed: - installed_rows.append((installed[f], '', '')) - return installed_rows - - -def move_wheel_files( - name, # type: str - req, # type: Requirement - wheeldir, # type: str - user=False, # type: bool - home=None, # type: Optional[str] - root=None, # type: Optional[str] - pycompile=True, # type: bool - scheme=None, # type: Optional[Mapping[str, str]] - isolated=False, # type: bool - prefix=None, # type: Optional[str] - warn_script_location=True # type: bool -): - # type: (...) -> None - """Install a wheel""" - # TODO: Investigate and break this up. - # TODO: Look into moving this into a dedicated class for representing an - # installation. - - if not scheme: - scheme = distutils_scheme( - name, user=user, home=home, root=root, isolated=isolated, - prefix=prefix, - ) - - if root_is_purelib(name, wheeldir): - lib_dir = scheme['purelib'] - else: - lib_dir = scheme['platlib'] - - info_dir = [] # type: List[str] - data_dirs = [] - source = wheeldir.rstrip(os.path.sep) + os.path.sep - - # Record details of the files moved - # installed = files copied from the wheel to the destination - # changed = files changed while installing (scripts #! line typically) - # generated = files newly generated during the install (script wrappers) - installed = {} # type: Dict[str, str] - changed = set() - generated = [] # type: List[str] - - # Compile all of the pyc files that we're going to be installing - if pycompile: - with captured_stdout() as stdout: - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') - compileall.compile_dir(source, force=True, quiet=True) - logger.debug(stdout.getvalue()) - - def record_installed(srcfile, destfile, modified=False): - """Map archive RECORD paths to installation RECORD paths.""" - oldpath = normpath(srcfile, wheeldir) - newpath = normpath(destfile, lib_dir) - installed[oldpath] = newpath - if modified: - changed.add(destfile) - - def clobber(source, dest, is_base, fixer=None, filter=None): - ensure_dir(dest) # common for the 'include' path - - for dir, subdirs, files in os.walk(source): - basedir = dir[len(source):].lstrip(os.path.sep) - destdir = os.path.join(dest, basedir) - if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'): - continue - for s in subdirs: - destsubdir = os.path.join(dest, basedir, s) - if is_base and basedir == '' and destsubdir.endswith('.data'): - data_dirs.append(s) - continue - elif (is_base and - s.endswith('.dist-info') and - canonicalize_name(s).startswith( - canonicalize_name(req.name))): - assert not info_dir, ('Multiple .dist-info directories: ' + - destsubdir + ', ' + - ', '.join(info_dir)) - info_dir.append(destsubdir) - for f in files: - # Skip unwanted files - if filter and filter(f): - continue - srcfile = os.path.join(dir, f) - destfile = os.path.join(dest, basedir, f) - # directory creation is lazy and after the file filtering above - # to ensure we don't install empty dirs; empty dirs can't be - # uninstalled. - ensure_dir(destdir) - - # copyfile (called below) truncates the destination if it - # exists and then writes the new contents. This is fine in most - # cases, but can cause a segfault if pip has loaded a shared - # object (e.g. from pyopenssl through its vendored urllib3) - # Since the shared object is mmap'd an attempt to call a - # symbol in it will then cause a segfault. Unlinking the file - # allows writing of new contents while allowing the process to - # continue to use the old copy. - if os.path.exists(destfile): - os.unlink(destfile) - - # We use copyfile (not move, copy, or copy2) to be extra sure - # that we are not moving directories over (copyfile fails for - # directories) as well as to ensure that we are not copying - # over any metadata because we want more control over what - # metadata we actually copy over. - shutil.copyfile(srcfile, destfile) - - # Copy over the metadata for the file, currently this only - # includes the atime and mtime. - st = os.stat(srcfile) - if hasattr(os, "utime"): - os.utime(destfile, (st.st_atime, st.st_mtime)) - - # If our file is executable, then make our destination file - # executable. - if os.access(srcfile, os.X_OK): - st = os.stat(srcfile) - permissions = ( - st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH - ) - os.chmod(destfile, permissions) - - changed = False - if fixer: - changed = fixer(destfile) - record_installed(srcfile, destfile, changed) - - clobber(source, lib_dir, True) - - assert info_dir, "%s .dist-info directory not found" % req - - # Get the defined entry points - ep_file = os.path.join(info_dir[0], 'entry_points.txt') - console, gui = get_entrypoints(ep_file) - - def is_entrypoint_wrapper(name): - # EP, EP.exe and EP-script.py are scripts generated for - # entry point EP by setuptools - if name.lower().endswith('.exe'): - matchname = name[:-4] - elif name.lower().endswith('-script.py'): - matchname = name[:-10] - elif name.lower().endswith(".pya"): - matchname = name[:-4] - else: - matchname = name - # Ignore setuptools-generated scripts - return (matchname in console or matchname in gui) - - for datadir in data_dirs: - fixer = None - filter = None - for subdir in os.listdir(os.path.join(wheeldir, datadir)): - fixer = None - if subdir == 'scripts': - fixer = fix_script - filter = is_entrypoint_wrapper - source = os.path.join(wheeldir, datadir, subdir) - dest = scheme[subdir] - clobber(source, dest, False, fixer=fixer, filter=filter) - - maker = ScriptMaker(None, scheme['scripts']) - - # Ensure old scripts are overwritten. - # See https://github.com/pypa/pip/issues/1800 - maker.clobber = True - - # Ensure we don't generate any variants for scripts because this is almost - # never what somebody wants. - # See https://bitbucket.org/pypa/distlib/issue/35/ - maker.variants = {''} - - # This is required because otherwise distlib creates scripts that are not - # executable. - # See https://bitbucket.org/pypa/distlib/issue/32/ - maker.set_mode = True - - # Simplify the script and fix the fact that the default script swallows - # every single stack trace. - # See https://bitbucket.org/pypa/distlib/issue/34/ - # See https://bitbucket.org/pypa/distlib/issue/33/ - def _get_script_text(entry): - if entry.suffix is None: - raise InstallationError( - "Invalid script entry point: %s for req: %s - A callable " - "suffix is required. Cf https://packaging.python.org/en/" - "latest/distributing.html#console-scripts for more " - "information." % (entry, req) - ) - return maker.script_template % { - "module": entry.prefix, - "import_name": entry.suffix.split(".")[0], - "func": entry.suffix, - } - # ignore type, because mypy disallows assigning to a method, - # see https://github.com/python/mypy/issues/2427 - maker._get_script_text = _get_script_text # type: ignore - maker.script_template = r"""# -*- coding: utf-8 -*- -import re -import sys - -from %(module)s import %(import_name)s - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(%(func)s()) -""" - - # Special case pip and setuptools to generate versioned wrappers - # - # The issue is that some projects (specifically, pip and setuptools) use - # code in setup.py to create "versioned" entry points - pip2.7 on Python - # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into - # the wheel metadata at build time, and so if the wheel is installed with - # a *different* version of Python the entry points will be wrong. The - # correct fix for this is to enhance the metadata to be able to describe - # such versioned entry points, but that won't happen till Metadata 2.0 is - # available. - # In the meantime, projects using versioned entry points will either have - # incorrect versioned entry points, or they will not be able to distribute - # "universal" wheels (i.e., they will need a wheel per Python version). - # - # Because setuptools and pip are bundled with _ensurepip and virtualenv, - # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we - # override the versioned entry points in the wheel and generate the - # correct ones. This code is purely a short-term measure until Metadata 2.0 - # is available. - # - # To add the level of hack in this section of code, in order to support - # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment - # variable which will control which version scripts get installed. - # - # ENSUREPIP_OPTIONS=altinstall - # - Only pipX.Y and easy_install-X.Y will be generated and installed - # ENSUREPIP_OPTIONS=install - # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note - # that this option is technically if ENSUREPIP_OPTIONS is set and is - # not altinstall - # DEFAULT - # - The default behavior is to install pip, pipX, pipX.Y, easy_install - # and easy_install-X.Y. - pip_script = console.pop('pip', None) - if pip_script: - if "ENSUREPIP_OPTIONS" not in os.environ: - spec = 'pip = ' + pip_script - generated.extend(maker.make(spec)) - - if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": - spec = 'pip%s = %s' % (sys.version[:1], pip_script) - generated.extend(maker.make(spec)) - - spec = 'pip%s = %s' % (sys.version[:3], pip_script) - generated.extend(maker.make(spec)) - # Delete any other versioned pip entry points - pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)] - for k in pip_ep: - del console[k] - easy_install_script = console.pop('easy_install', None) - if easy_install_script: - if "ENSUREPIP_OPTIONS" not in os.environ: - spec = 'easy_install = ' + easy_install_script - generated.extend(maker.make(spec)) - - spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script) - generated.extend(maker.make(spec)) - # Delete any other versioned easy_install entry points - easy_install_ep = [ - k for k in console if re.match(r'easy_install(-\d\.\d)?$', k) - ] - for k in easy_install_ep: - del console[k] - - # Generate the console and GUI entry points specified in the wheel - if len(console) > 0: - generated_console_scripts = maker.make_multiple( - ['%s = %s' % kv for kv in console.items()] - ) - generated.extend(generated_console_scripts) - - if warn_script_location: - msg = message_about_scripts_not_on_PATH(generated_console_scripts) - if msg is not None: - logger.warning(msg) - - if len(gui) > 0: - generated.extend( - maker.make_multiple( - ['%s = %s' % kv for kv in gui.items()], - {'gui': True} - ) - ) - - # Record pip as the installer - installer = os.path.join(info_dir[0], 'INSTALLER') - temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip') - with open(temp_installer, 'wb') as installer_file: - installer_file.write(b'pip\n') - shutil.move(temp_installer, installer) - generated.append(installer) - - # Record details of all files installed - record = os.path.join(info_dir[0], 'RECORD') - temp_record = os.path.join(info_dir[0], 'RECORD.pip') - with open_for_csv(record, 'r') as record_in: - with open_for_csv(temp_record, 'w+') as record_out: - reader = csv.reader(record_in) - outrows = get_csv_rows_for_installed( - reader, installed=installed, changed=changed, - generated=generated, lib_dir=lib_dir, - ) - writer = csv.writer(record_out) - # Sort to simplify testing. - for row in sorted_outrows(outrows): - writer.writerow(row) - shutil.move(temp_record, record) - - -def wheel_version(source_dir): - # type: (Optional[str]) -> Optional[Tuple[int, ...]] - """ - Return the Wheel-Version of an extracted wheel, if possible. - - Otherwise, return None if we couldn't parse / extract it. - """ - try: - dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0] - - wheel_data = dist.get_metadata('WHEEL') - wheel_data = Parser().parsestr(wheel_data) - - version = wheel_data['Wheel-Version'].strip() - version = tuple(map(int, version.split('.'))) - return version - except Exception: - return None - - -def check_compatibility(version, name): - # type: (Optional[Tuple[int, ...]], str) -> None - """ - Raises errors or warns if called with an incompatible Wheel-Version. - - Pip should refuse to install a Wheel-Version that's a major series - ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when - installing a version only minor version ahead (e.g 1.2 > 1.1). - - version: a 2-tuple representing a Wheel-Version (Major, Minor) - name: name of wheel or package to raise exception about - - :raises UnsupportedWheel: when an incompatible Wheel-Version is given - """ - if not version: - raise UnsupportedWheel( - "%s is in an unsupported or invalid wheel" % name - ) - if version[0] > VERSION_COMPATIBLE[0]: - raise UnsupportedWheel( - "%s's Wheel-Version (%s) is not compatible with this version " - "of pip" % (name, '.'.join(map(str, version))) - ) - elif version > VERSION_COMPATIBLE: - logger.warning( - 'Installing from a newer Wheel-Version (%s)', - '.'.join(map(str, version)), - ) - - -class Wheel(object): - """A wheel file""" - - # TODO: Maybe move the class into the models sub-package - # TODO: Maybe move the install code into this class - - wheel_file_re = re.compile( - r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?)) - ((-(?P<build>\d[^-]*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?) - \.whl|\.dist-info)$""", - re.VERBOSE - ) - - def __init__(self, filename): - # type: (str) -> None - """ - :raises InvalidWheelFilename: when the filename is invalid for a wheel - """ - wheel_info = self.wheel_file_re.match(filename) - if not wheel_info: - raise InvalidWheelFilename( - "%s is not a valid wheel filename." % filename - ) - self.filename = filename - self.name = wheel_info.group('name').replace('_', '-') - # we'll assume "_" means "-" due to wheel naming scheme - # (https://github.com/pypa/pip/issues/1150) - self.version = wheel_info.group('ver').replace('_', '-') - self.build_tag = wheel_info.group('build') - self.pyversions = wheel_info.group('pyver').split('.') - self.abis = wheel_info.group('abi').split('.') - self.plats = wheel_info.group('plat').split('.') - - # All the tag combinations from this file - self.file_tags = { - (x, y, z) for x in self.pyversions - for y in self.abis for z in self.plats - } - - def support_index_min(self, tags=None): - # type: (Optional[List[Pep425Tag]]) -> Optional[int] - """ - Return the lowest index that one of the wheel's file_tag combinations - achieves in the supported_tags list e.g. if there are 8 supported tags, - and one of the file tags is first in the list, then return 0. Returns - None is the wheel is not supported. - """ - if tags is None: # for mock - tags = pep425tags.get_supported() - indexes = [tags.index(c) for c in self.file_tags if c in tags] - return min(indexes) if indexes else None - - def supported(self, tags=None): - # type: (Optional[List[Pep425Tag]]) -> bool - """Is this wheel supported on this system?""" - if tags is None: # for mock - tags = pep425tags.get_supported() - return bool(set(tags).intersection(self.file_tags)) - - -def _contains_egg_info( - s, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)): - """Determine whether the string looks like an egg_info. - - :param s: The string to parse. E.g. foo-2.1 - """ - return bool(_egg_info_re.search(s)) - - -def should_use_ephemeral_cache( - req, # type: InstallRequirement - format_control, # type: FormatControl - autobuilding, # type: bool - cache_available # type: bool -): - # type: (...) -> Optional[bool] - """ - Return whether to build an InstallRequirement object using the - ephemeral cache. - - :param cache_available: whether a cache directory is available for the - autobuilding=True case. - - :return: True or False to build the requirement with ephem_cache=True - or False, respectively; or None not to build the requirement. - """ - if req.constraint: - return None - if req.is_wheel: - if not autobuilding: - logger.info( - 'Skipping %s, due to already being wheel.', req.name, - ) - return None - if not autobuilding: - return False - - if req.editable or not req.source_dir: - return None - - if req.link and not req.link.is_artifact: - # VCS checkout. Build wheel just for this run. - return True - - if "binary" not in format_control.get_allowed_formats( - canonicalize_name(req.name)): - logger.info( - "Skipping bdist_wheel for %s, due to binaries " - "being disabled for it.", req.name, - ) - return None - - link = req.link - base, ext = link.splitext() - if cache_available and _contains_egg_info(base): - return False - - # Otherwise, build the wheel just for this run using the ephemeral - # cache since we are either in the case of e.g. a local directory, or - # no cache directory is available to use. - return True - - -def format_command( - command_args, # type: List[str] - command_output, # type: str -): - # type: (...) -> str - """ - Format command information for logging. - """ - text = 'Command arguments: {}\n'.format(command_args) - - if not command_output: - text += 'Command output: None' - elif logger.getEffectiveLevel() > logging.DEBUG: - text += 'Command output: [use --verbose to show]' - else: - if not command_output.endswith('\n'): - command_output += '\n' - text += ( - 'Command output:\n{}' - '-----------------------------------------' - ).format(command_output) - - return text - - -def get_legacy_build_wheel_path( - names, # type: List[str] - temp_dir, # type: str - req, # type: InstallRequirement - command_args, # type: List[str] - command_output, # type: str -): - # type: (...) -> Optional[str] - """ - Return the path to the wheel in the temporary build directory. - """ - # Sort for determinism. - names = sorted(names) - if not names: - msg = ( - 'Legacy build of wheel for {!r} created no files.\n' - ).format(req.name) - msg += format_command(command_args, command_output) - logger.warning(msg) - return None - - if len(names) > 1: - msg = ( - 'Legacy build of wheel for {!r} created more than one file.\n' - 'Filenames (choosing first): {}\n' - ).format(req.name, names) - msg += format_command(command_args, command_output) - logger.warning(msg) - - return os.path.join(temp_dir, names[0]) - - -class WheelBuilder(object): - """Build wheels from a RequirementSet.""" - - def __init__( - self, - finder, # type: PackageFinder - preparer, # type: RequirementPreparer - wheel_cache, # type: WheelCache - build_options=None, # type: Optional[List[str]] - global_options=None, # type: Optional[List[str]] - no_clean=False # type: bool - ): - # type: (...) -> None - self.finder = finder - self.preparer = preparer - self.wheel_cache = wheel_cache - - self._wheel_dir = preparer.wheel_download_dir - - self.build_options = build_options or [] - self.global_options = global_options or [] - self.no_clean = no_clean - - def _build_one(self, req, output_dir, python_tag=None): - """Build one wheel. - - :return: The filename of the built wheel, or None if the build failed. - """ - # Install build deps into temporary directory (PEP 518) - with req.build_env: - return self._build_one_inside_env(req, output_dir, - python_tag=python_tag) - - def _build_one_inside_env(self, req, output_dir, python_tag=None): - with TempDirectory(kind="wheel") as temp_dir: - if req.use_pep517: - builder = self._build_one_pep517 - else: - builder = self._build_one_legacy - wheel_path = builder(req, temp_dir.path, python_tag=python_tag) - if wheel_path is not None: - wheel_name = os.path.basename(wheel_path) - dest_path = os.path.join(output_dir, wheel_name) - try: - shutil.move(wheel_path, dest_path) - logger.info('Stored in directory: %s', output_dir) - return dest_path - except Exception: - pass - # Ignore return, we can't do anything else useful. - self._clean_one(req) - return None - - def _base_setup_args(self, req): - # NOTE: Eventually, we'd want to also -S to the flags here, when we're - # isolating. Currently, it breaks Python in virtualenvs, because it - # relies on site.py to find parts of the standard library outside the - # virtualenv. - return [ - sys.executable, '-u', '-c', - SETUPTOOLS_SHIM % req.setup_py - ] + list(self.global_options) - - def _build_one_pep517(self, req, tempd, python_tag=None): - """Build one InstallRequirement using the PEP 517 build process. - - Returns path to wheel if successfully built. Otherwise, returns None. - """ - assert req.metadata_directory is not None - try: - req.spin_message = 'Building wheel for %s (PEP 517)' % (req.name,) - logger.debug('Destination directory: %s', tempd) - wheel_name = req.pep517_backend.build_wheel( - tempd, - metadata_directory=req.metadata_directory - ) - if python_tag: - # General PEP 517 backends don't necessarily support - # a "--python-tag" option, so we rename the wheel - # file directly. - new_name = replace_python_tag(wheel_name, python_tag) - os.rename( - os.path.join(tempd, wheel_name), - os.path.join(tempd, new_name) - ) - # Reassign to simplify the return at the end of function - wheel_name = new_name - except Exception: - logger.error('Failed building wheel for %s', req.name) - return None - return os.path.join(tempd, wheel_name) - - def _build_one_legacy(self, req, tempd, python_tag=None): - """Build one InstallRequirement using the "legacy" build process. - - Returns path to wheel if successfully built. Otherwise, returns None. - """ - base_args = self._base_setup_args(req) - - spin_message = 'Building wheel for %s (setup.py)' % (req.name,) - with open_spinner(spin_message) as spinner: - logger.debug('Destination directory: %s', tempd) - wheel_args = base_args + ['bdist_wheel', '-d', tempd] \ - + self.build_options - - if python_tag is not None: - wheel_args += ["--python-tag", python_tag] - - try: - output = call_subprocess(wheel_args, cwd=req.setup_py_dir, - show_stdout=False, spinner=spinner) - except Exception: - spinner.finish("error") - logger.error('Failed building wheel for %s', req.name) - return None - names = os.listdir(tempd) - wheel_path = get_legacy_build_wheel_path( - names=names, - temp_dir=tempd, - req=req, - command_args=wheel_args, - command_output=output, - ) - return wheel_path - - def _clean_one(self, req): - base_args = self._base_setup_args(req) - - logger.info('Running setup.py clean for %s', req.name) - clean_args = base_args + ['clean', '--all'] - try: - call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False) - return True - except Exception: - logger.error('Failed cleaning build dir for %s', req.name) - return False - - def build( - self, - requirements, # type: Iterable[InstallRequirement] - session, # type: PipSession - autobuilding=False # type: bool - ): - # type: (...) -> List[InstallRequirement] - """Build wheels. - - :param unpack: If True, replace the sdist we built from with the - newly built wheel, in preparation for installation. - :return: True if all the wheels built correctly. - """ - buildset = [] - format_control = self.finder.format_control - # Whether a cache directory is available for autobuilding=True. - cache_available = bool(self._wheel_dir or self.wheel_cache.cache_dir) - - for req in requirements: - ephem_cache = should_use_ephemeral_cache( - req, format_control=format_control, autobuilding=autobuilding, - cache_available=cache_available, - ) - if ephem_cache is None: - continue - - buildset.append((req, ephem_cache)) - - if not buildset: - return [] - - # Is any wheel build not using the ephemeral cache? - if any(not ephem_cache for _, ephem_cache in buildset): - have_directory_for_build = self._wheel_dir or ( - autobuilding and self.wheel_cache.cache_dir - ) - assert have_directory_for_build - - # TODO by @pradyunsg - # Should break up this method into 2 separate methods. - - # Build the wheels. - logger.info( - 'Building wheels for collected packages: %s', - ', '.join([req.name for (req, _) in buildset]), - ) - _cache = self.wheel_cache # shorter name - with indent_log(): - build_success, build_failure = [], [] - for req, ephem in buildset: - python_tag = None - if autobuilding: - python_tag = pep425tags.implementation_tag - if ephem: - output_dir = _cache.get_ephem_path_for_link(req.link) - else: - output_dir = _cache.get_path_for_link(req.link) - try: - ensure_dir(output_dir) - except OSError as e: - logger.warning("Building wheel for %s failed: %s", - req.name, e) - build_failure.append(req) - continue - else: - output_dir = self._wheel_dir - wheel_file = self._build_one( - req, output_dir, - python_tag=python_tag, - ) - if wheel_file: - build_success.append(req) - if autobuilding: - # XXX: This is mildly duplicative with prepare_files, - # but not close enough to pull out to a single common - # method. - # The code below assumes temporary source dirs - - # prevent it doing bad things. - if req.source_dir and not os.path.exists(os.path.join( - req.source_dir, PIP_DELETE_MARKER_FILENAME)): - raise AssertionError( - "bad source dir - missing marker") - # Delete the source we built the wheel from - req.remove_temporary_source() - # set the build directory again - name is known from - # the work prepare_files did. - req.source_dir = req.build_location( - self.preparer.build_dir - ) - # Update the link for this. - req.link = Link(path_to_url(wheel_file)) - assert req.link.is_wheel - # extract the wheel into the dir - unpack_url( - req.link, req.source_dir, None, False, - session=session, - ) - else: - build_failure.append(req) - - # notify success/failure - if build_success: - logger.info( - 'Successfully built %s', - ' '.join([req.name for req in build_success]), - ) - if build_failure: - logger.info( - 'Failed to build %s', - ' '.join([req.name for req in build_failure]), - ) - # Return a list of requirements that failed to build - return build_failure diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/__init__.py deleted file mode 100644 index b919b54..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/__init__.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -pip._vendor is for vendoring dependencies of pip to prevent needing pip to -depend on something external. - -Files inside of pip._vendor should be considered immutable and should only be -updated to versions from upstream. -""" -from __future__ import absolute_import - -import glob -import os.path -import sys - -# Downstream redistributors which have debundled our dependencies should also -# patch this value to be true. This will trigger the additional patching -# to cause things like "six" to be available as pip. -DEBUNDLED = False - -# By default, look in this directory for a bunch of .whl files which we will -# add to the beginning of sys.path before attempting to import anything. This -# is done to support downstream re-distributors like Debian and Fedora who -# wish to create their own Wheels for our dependencies to aid in debundling. -WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) - - -# Define a small helper function to alias our vendored modules to the real ones -# if the vendored ones do not exist. This idea of this was taken from -# https://github.com/kennethreitz/requests/pull/2567. -def vendored(modulename): - vendored_name = "{0}.{1}".format(__name__, modulename) - - try: - __import__(vendored_name, globals(), locals(), level=0) - except ImportError: - try: - __import__(modulename, globals(), locals(), level=0) - except ImportError: - # We can just silently allow import failures to pass here. If we - # got to this point it means that ``import pip._vendor.whatever`` - # failed and so did ``import whatever``. Since we're importing this - # upfront in an attempt to alias imports, not erroring here will - # just mean we get a regular import error whenever pip *actually* - # tries to import one of these modules to use it, which actually - # gives us a better error message than we would have otherwise - # gotten. - pass - else: - sys.modules[vendored_name] = sys.modules[modulename] - base, head = vendored_name.rsplit(".", 1) - setattr(sys.modules[base], head, sys.modules[modulename]) - - -# If we're operating in a debundled setup, then we want to go ahead and trigger -# the aliasing of our vendored libraries as well as looking for wheels to add -# to our sys.path. This will cause all of this code to be a no-op typically -# however downstream redistributors can enable it in a consistent way across -# all platforms. -if DEBUNDLED: - # Actually look inside of WHEEL_DIR to find .whl files and add them to the - # front of our sys.path. - sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path - - # Actually alias all of our vendored dependencies. - vendored("cachecontrol") - vendored("colorama") - vendored("distlib") - vendored("distro") - vendored("html5lib") - vendored("lockfile") - vendored("six") - vendored("six.moves") - vendored("six.moves.urllib") - vendored("six.moves.urllib.parse") - vendored("packaging") - vendored("packaging.version") - vendored("packaging.specifiers") - vendored("pep517") - vendored("pkg_resources") - vendored("progress") - vendored("pytoml") - vendored("retrying") - vendored("requests") - vendored("requests.packages") - vendored("requests.packages.urllib3") - vendored("requests.packages.urllib3._collections") - vendored("requests.packages.urllib3.connection") - vendored("requests.packages.urllib3.connectionpool") - vendored("requests.packages.urllib3.contrib") - vendored("requests.packages.urllib3.contrib.ntlmpool") - vendored("requests.packages.urllib3.contrib.pyopenssl") - vendored("requests.packages.urllib3.exceptions") - vendored("requests.packages.urllib3.fields") - vendored("requests.packages.urllib3.filepost") - vendored("requests.packages.urllib3.packages") - vendored("requests.packages.urllib3.packages.ordered_dict") - vendored("requests.packages.urllib3.packages.six") - vendored("requests.packages.urllib3.packages.ssl_match_hostname") - vendored("requests.packages.urllib3.packages.ssl_match_hostname." - "_implementation") - vendored("requests.packages.urllib3.poolmanager") - vendored("requests.packages.urllib3.request") - vendored("requests.packages.urllib3.response") - vendored("requests.packages.urllib3.util") - vendored("requests.packages.urllib3.util.connection") - vendored("requests.packages.urllib3.util.request") - vendored("requests.packages.urllib3.util.response") - vendored("requests.packages.urllib3.util.retry") - vendored("requests.packages.urllib3.util.ssl_") - vendored("requests.packages.urllib3.util.timeout") - vendored("requests.packages.urllib3.util.url") - vendored("urllib3") diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/appdirs.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/appdirs.py deleted file mode 100644 index 2bd3911..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/appdirs.py +++ /dev/null @@ -1,604 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See <http://github.com/ActiveState/appdirs> for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 3) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/<AppName> - Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> - Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> - Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> - Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/<AppName>". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/<AppName>', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/<AppName> - Unix: /usr/local/share/<AppName> or /usr/share/<AppName> - Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/<AppName>". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/<AppName> - Unix: ~/.cache/<AppName> (XDG default) - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state> - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/<AppName>". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/<AppName> - Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/__init__.py deleted file mode 100644 index 8fdee66..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -"""CacheControl import Interface. - -Make it easy to import from cachecontrol without long namespaces. -""" -__author__ = "Eric Larson" -__email__ = "eric@ionrock.org" -__version__ = "0.12.5" - -from .wrapper import CacheControl -from .adapter import CacheControlAdapter -from .controller import CacheController diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/_cmd.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/_cmd.py deleted file mode 100644 index f1e0ad9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/_cmd.py +++ /dev/null @@ -1,57 +0,0 @@ -import logging - -from pip._vendor import requests - -from pip._vendor.cachecontrol.adapter import CacheControlAdapter -from pip._vendor.cachecontrol.cache import DictCache -from pip._vendor.cachecontrol.controller import logger - -from argparse import ArgumentParser - - -def setup_logging(): - logger.setLevel(logging.DEBUG) - handler = logging.StreamHandler() - logger.addHandler(handler) - - -def get_session(): - adapter = CacheControlAdapter( - DictCache(), cache_etags=True, serializer=None, heuristic=None - ) - sess = requests.Session() - sess.mount("http://", adapter) - sess.mount("https://", adapter) - - sess.cache_controller = adapter.controller - return sess - - -def get_args(): - parser = ArgumentParser() - parser.add_argument("url", help="The URL to try and cache") - return parser.parse_args() - - -def main(args=None): - args = get_args() - sess = get_session() - - # Make a request to get a response - resp = sess.get(args.url) - - # Turn on logging - setup_logging() - - # try setting the cache - sess.cache_controller.cache_response(resp.request, resp.raw) - - # Now try to get it - if sess.cache_controller.cached_request(resp.request): - print("Cached!") - else: - print("Not cached :(") - - -if __name__ == "__main__": - main() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/adapter.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/adapter.py deleted file mode 100644 index 780eb28..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/adapter.py +++ /dev/null @@ -1,133 +0,0 @@ -import types -import functools -import zlib - -from pip._vendor.requests.adapters import HTTPAdapter - -from .controller import CacheController -from .cache import DictCache -from .filewrapper import CallbackFileWrapper - - -class CacheControlAdapter(HTTPAdapter): - invalidating_methods = {"PUT", "DELETE"} - - def __init__( - self, - cache=None, - cache_etags=True, - controller_class=None, - serializer=None, - heuristic=None, - cacheable_methods=None, - *args, - **kw - ): - super(CacheControlAdapter, self).__init__(*args, **kw) - self.cache = cache or DictCache() - self.heuristic = heuristic - self.cacheable_methods = cacheable_methods or ("GET",) - - controller_factory = controller_class or CacheController - self.controller = controller_factory( - self.cache, cache_etags=cache_etags, serializer=serializer - ) - - def send(self, request, cacheable_methods=None, **kw): - """ - Send a request. Use the request information to see if it - exists in the cache and cache the response if we need to and can. - """ - cacheable = cacheable_methods or self.cacheable_methods - if request.method in cacheable: - try: - cached_response = self.controller.cached_request(request) - except zlib.error: - cached_response = None - if cached_response: - return self.build_response(request, cached_response, from_cache=True) - - # check for etags and add headers if appropriate - request.headers.update(self.controller.conditional_headers(request)) - - resp = super(CacheControlAdapter, self).send(request, **kw) - - return resp - - def build_response( - self, request, response, from_cache=False, cacheable_methods=None - ): - """ - Build a response by making a request or using the cache. - - This will end up calling send and returning a potentially - cached response - """ - cacheable = cacheable_methods or self.cacheable_methods - if not from_cache and request.method in cacheable: - # Check for any heuristics that might update headers - # before trying to cache. - if self.heuristic: - response = self.heuristic.apply(response) - - # apply any expiration heuristics - if response.status == 304: - # We must have sent an ETag request. This could mean - # that we've been expired already or that we simply - # have an etag. In either case, we want to try and - # update the cache if that is the case. - cached_response = self.controller.update_cached_response( - request, response - ) - - if cached_response is not response: - from_cache = True - - # We are done with the server response, read a - # possible response body (compliant servers will - # not return one, but we cannot be 100% sure) and - # release the connection back to the pool. - response.read(decode_content=False) - response.release_conn() - - response = cached_response - - # We always cache the 301 responses - elif response.status == 301: - self.controller.cache_response(request, response) - else: - # Wrap the response file with a wrapper that will cache the - # response when the stream has been consumed. - response._fp = CallbackFileWrapper( - response._fp, - functools.partial( - self.controller.cache_response, request, response - ), - ) - if response.chunked: - super_update_chunk_length = response._update_chunk_length - - def _update_chunk_length(self): - super_update_chunk_length() - if self.chunk_left == 0: - self._fp._close() - - response._update_chunk_length = types.MethodType( - _update_chunk_length, response - ) - - resp = super(CacheControlAdapter, self).build_response(request, response) - - # See if we should invalidate the cache. - if request.method in self.invalidating_methods and resp.ok: - cache_url = self.controller.cache_url(request.url) - self.cache.delete(cache_url) - - # Give the request a from_cache attr to let people use it - resp.from_cache = from_cache - - return resp - - def close(self): - self.cache.close() - super(CacheControlAdapter, self).close() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/cache.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/cache.py deleted file mode 100644 index 94e0773..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/cache.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -The cache object API for implementing caches. The default is a thread -safe in-memory dictionary. -""" -from threading import Lock - - -class BaseCache(object): - - def get(self, key): - raise NotImplementedError() - - def set(self, key, value): - raise NotImplementedError() - - def delete(self, key): - raise NotImplementedError() - - def close(self): - pass - - -class DictCache(BaseCache): - - def __init__(self, init_dict=None): - self.lock = Lock() - self.data = init_dict or {} - - def get(self, key): - return self.data.get(key, None) - - def set(self, key, value): - with self.lock: - self.data.update({key: value}) - - def delete(self, key): - with self.lock: - if key in self.data: - self.data.pop(key) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/__init__.py deleted file mode 100644 index 0e1658f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .file_cache import FileCache # noqa -from .redis_cache import RedisCache # noqa diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/file_cache.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/file_cache.py deleted file mode 100644 index 1ba0080..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/file_cache.py +++ /dev/null @@ -1,146 +0,0 @@ -import hashlib -import os -from textwrap import dedent - -from ..cache import BaseCache -from ..controller import CacheController - -try: - FileNotFoundError -except NameError: - # py2.X - FileNotFoundError = (IOError, OSError) - - -def _secure_open_write(filename, fmode): - # We only want to write to this file, so open it in write only mode - flags = os.O_WRONLY - - # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only - # will open *new* files. - # We specify this because we want to ensure that the mode we pass is the - # mode of the file. - flags |= os.O_CREAT | os.O_EXCL - - # Do not follow symlinks to prevent someone from making a symlink that - # we follow and insecurely open a cache file. - if hasattr(os, "O_NOFOLLOW"): - flags |= os.O_NOFOLLOW - - # On Windows we'll mark this file as binary - if hasattr(os, "O_BINARY"): - flags |= os.O_BINARY - - # Before we open our file, we want to delete any existing file that is - # there - try: - os.remove(filename) - except (IOError, OSError): - # The file must not exist already, so we can just skip ahead to opening - pass - - # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a - # race condition happens between the os.remove and this line, that an - # error will be raised. Because we utilize a lockfile this should only - # happen if someone is attempting to attack us. - fd = os.open(filename, flags, fmode) - try: - return os.fdopen(fd, "wb") - - except: - # An error occurred wrapping our FD in a file object - os.close(fd) - raise - - -class FileCache(BaseCache): - - def __init__( - self, - directory, - forever=False, - filemode=0o0600, - dirmode=0o0700, - use_dir_lock=None, - lock_class=None, - ): - - if use_dir_lock is not None and lock_class is not None: - raise ValueError("Cannot use use_dir_lock and lock_class together") - - try: - from pip._vendor.lockfile import LockFile - from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile - except ImportError: - notice = dedent( - """ - NOTE: In order to use the FileCache you must have - lockfile installed. You can install it via pip: - pip install lockfile - """ - ) - raise ImportError(notice) - - else: - if use_dir_lock: - lock_class = MkdirLockFile - - elif lock_class is None: - lock_class = LockFile - - self.directory = directory - self.forever = forever - self.filemode = filemode - self.dirmode = dirmode - self.lock_class = lock_class - - @staticmethod - def encode(x): - return hashlib.sha224(x.encode()).hexdigest() - - def _fn(self, name): - # NOTE: This method should not change as some may depend on it. - # See: https://github.com/ionrock/cachecontrol/issues/63 - hashed = self.encode(name) - parts = list(hashed[:5]) + [hashed] - return os.path.join(self.directory, *parts) - - def get(self, key): - name = self._fn(key) - try: - with open(name, "rb") as fh: - return fh.read() - - except FileNotFoundError: - return None - - def set(self, key, value): - name = self._fn(key) - - # Make sure the directory exists - try: - os.makedirs(os.path.dirname(name), self.dirmode) - except (IOError, OSError): - pass - - with self.lock_class(name) as lock: - # Write our actual file - with _secure_open_write(lock.path, self.filemode) as fh: - fh.write(value) - - def delete(self, key): - name = self._fn(key) - if not self.forever: - try: - os.remove(name) - except FileNotFoundError: - pass - - -def url_to_file_path(url, filecache): - """Return the file cache path based on the URL. - - This does not ensure the file exists! - """ - key = CacheController.cache_url(url) - return filecache._fn(key) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/redis_cache.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/redis_cache.py deleted file mode 100644 index ed705ce..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/caches/redis_cache.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import division - -from datetime import datetime -from pip._vendor.cachecontrol.cache import BaseCache - - -class RedisCache(BaseCache): - - def __init__(self, conn): - self.conn = conn - - def get(self, key): - return self.conn.get(key) - - def set(self, key, value, expires=None): - if not expires: - self.conn.set(key, value) - else: - expires = expires - datetime.utcnow() - self.conn.setex(key, int(expires.total_seconds()), value) - - def delete(self, key): - self.conn.delete(key) - - def clear(self): - """Helper for clearing all the keys in a database. Use with - caution!""" - for key in self.conn.keys(): - self.conn.delete(key) - - def close(self): - """Redis uses connection pooling, no need to close the connection.""" - pass diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/compat.py deleted file mode 100644 index 33b5aed..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/compat.py +++ /dev/null @@ -1,29 +0,0 @@ -try: - from urllib.parse import urljoin -except ImportError: - from urlparse import urljoin - - -try: - import cPickle as pickle -except ImportError: - import pickle - - -# Handle the case where the requests module has been patched to not have -# urllib3 bundled as part of its source. -try: - from pip._vendor.requests.packages.urllib3.response import HTTPResponse -except ImportError: - from pip._vendor.urllib3.response import HTTPResponse - -try: - from pip._vendor.requests.packages.urllib3.util import is_fp_closed -except ImportError: - from pip._vendor.urllib3.util import is_fp_closed - -# Replicate some six behaviour -try: - text_type = unicode -except NameError: - text_type = str diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/controller.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/controller.py deleted file mode 100644 index 1b2b943..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/controller.py +++ /dev/null @@ -1,367 +0,0 @@ -""" -The httplib2 algorithms ported for use with requests. -""" -import logging -import re -import calendar -import time -from email.utils import parsedate_tz - -from pip._vendor.requests.structures import CaseInsensitiveDict - -from .cache import DictCache -from .serialize import Serializer - - -logger = logging.getLogger(__name__) - -URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") - - -def parse_uri(uri): - """Parses a URI using the regex given in Appendix B of RFC 3986. - - (scheme, authority, path, query, fragment) = parse_uri(uri) - """ - groups = URI.match(uri).groups() - return (groups[1], groups[3], groups[4], groups[6], groups[8]) - - -class CacheController(object): - """An interface to see if request should cached or not. - """ - - def __init__( - self, cache=None, cache_etags=True, serializer=None, status_codes=None - ): - self.cache = cache or DictCache() - self.cache_etags = cache_etags - self.serializer = serializer or Serializer() - self.cacheable_status_codes = status_codes or (200, 203, 300, 301) - - @classmethod - def _urlnorm(cls, uri): - """Normalize the URL to create a safe key for the cache""" - (scheme, authority, path, query, fragment) = parse_uri(uri) - if not scheme or not authority: - raise Exception("Only absolute URIs are allowed. uri = %s" % uri) - - scheme = scheme.lower() - authority = authority.lower() - - if not path: - path = "/" - - # Could do syntax based normalization of the URI before - # computing the digest. See Section 6.2.2 of Std 66. - request_uri = query and "?".join([path, query]) or path - defrag_uri = scheme + "://" + authority + request_uri - - return defrag_uri - - @classmethod - def cache_url(cls, uri): - return cls._urlnorm(uri) - - def parse_cache_control(self, headers): - known_directives = { - # https://tools.ietf.org/html/rfc7234#section-5.2 - "max-age": (int, True), - "max-stale": (int, False), - "min-fresh": (int, True), - "no-cache": (None, False), - "no-store": (None, False), - "no-transform": (None, False), - "only-if-cached": (None, False), - "must-revalidate": (None, False), - "public": (None, False), - "private": (None, False), - "proxy-revalidate": (None, False), - "s-maxage": (int, True), - } - - cc_headers = headers.get("cache-control", headers.get("Cache-Control", "")) - - retval = {} - - for cc_directive in cc_headers.split(","): - if not cc_directive.strip(): - continue - - parts = cc_directive.split("=", 1) - directive = parts[0].strip() - - try: - typ, required = known_directives[directive] - except KeyError: - logger.debug("Ignoring unknown cache-control directive: %s", directive) - continue - - if not typ or not required: - retval[directive] = None - if typ: - try: - retval[directive] = typ(parts[1].strip()) - except IndexError: - if required: - logger.debug( - "Missing value for cache-control " "directive: %s", - directive, - ) - except ValueError: - logger.debug( - "Invalid value for cache-control directive " "%s, must be %s", - directive, - typ.__name__, - ) - - return retval - - def cached_request(self, request): - """ - Return a cached response if it exists in the cache, otherwise - return False. - """ - cache_url = self.cache_url(request.url) - logger.debug('Looking up "%s" in the cache', cache_url) - cc = self.parse_cache_control(request.headers) - - # Bail out if the request insists on fresh data - if "no-cache" in cc: - logger.debug('Request header has "no-cache", cache bypassed') - return False - - if "max-age" in cc and cc["max-age"] == 0: - logger.debug('Request header has "max_age" as 0, cache bypassed') - return False - - # Request allows serving from the cache, let's see if we find something - cache_data = self.cache.get(cache_url) - if cache_data is None: - logger.debug("No cache entry available") - return False - - # Check whether it can be deserialized - resp = self.serializer.loads(request, cache_data) - if not resp: - logger.warning("Cache entry deserialization failed, entry ignored") - return False - - # If we have a cached 301, return it immediately. We don't - # need to test our response for other headers b/c it is - # intrinsically "cacheable" as it is Permanent. - # See: - # https://tools.ietf.org/html/rfc7231#section-6.4.2 - # - # Client can try to refresh the value by repeating the request - # with cache busting headers as usual (ie no-cache). - if resp.status == 301: - msg = ( - 'Returning cached "301 Moved Permanently" response ' - "(ignoring date and etag information)" - ) - logger.debug(msg) - return resp - - headers = CaseInsensitiveDict(resp.headers) - if not headers or "date" not in headers: - if "etag" not in headers: - # Without date or etag, the cached response can never be used - # and should be deleted. - logger.debug("Purging cached response: no date or etag") - self.cache.delete(cache_url) - logger.debug("Ignoring cached response: no date") - return False - - now = time.time() - date = calendar.timegm(parsedate_tz(headers["date"])) - current_age = max(0, now - date) - logger.debug("Current age based on date: %i", current_age) - - # TODO: There is an assumption that the result will be a - # urllib3 response object. This may not be best since we - # could probably avoid instantiating or constructing the - # response until we know we need it. - resp_cc = self.parse_cache_control(headers) - - # determine freshness - freshness_lifetime = 0 - - # Check the max-age pragma in the cache control header - if "max-age" in resp_cc: - freshness_lifetime = resp_cc["max-age"] - logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) - - # If there isn't a max-age, check for an expires header - elif "expires" in headers: - expires = parsedate_tz(headers["expires"]) - if expires is not None: - expire_time = calendar.timegm(expires) - date - freshness_lifetime = max(0, expire_time) - logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) - - # Determine if we are setting freshness limit in the - # request. Note, this overrides what was in the response. - if "max-age" in cc: - freshness_lifetime = cc["max-age"] - logger.debug( - "Freshness lifetime from request max-age: %i", freshness_lifetime - ) - - if "min-fresh" in cc: - min_fresh = cc["min-fresh"] - # adjust our current age by our min fresh - current_age += min_fresh - logger.debug("Adjusted current age from min-fresh: %i", current_age) - - # Return entry if it is fresh enough - if freshness_lifetime > current_age: - logger.debug('The response is "fresh", returning cached response') - logger.debug("%i > %i", freshness_lifetime, current_age) - return resp - - # we're not fresh. If we don't have an Etag, clear it out - if "etag" not in headers: - logger.debug('The cached response is "stale" with no etag, purging') - self.cache.delete(cache_url) - - # return the original handler - return False - - def conditional_headers(self, request): - cache_url = self.cache_url(request.url) - resp = self.serializer.loads(request, self.cache.get(cache_url)) - new_headers = {} - - if resp: - headers = CaseInsensitiveDict(resp.headers) - - if "etag" in headers: - new_headers["If-None-Match"] = headers["ETag"] - - if "last-modified" in headers: - new_headers["If-Modified-Since"] = headers["Last-Modified"] - - return new_headers - - def cache_response(self, request, response, body=None, status_codes=None): - """ - Algorithm for caching requests. - - This assumes a requests Response object. - """ - # From httplib2: Don't cache 206's since we aren't going to - # handle byte range requests - cacheable_status_codes = status_codes or self.cacheable_status_codes - if response.status not in cacheable_status_codes: - logger.debug( - "Status code %s not in %s", response.status, cacheable_status_codes - ) - return - - response_headers = CaseInsensitiveDict(response.headers) - - # If we've been given a body, our response has a Content-Length, that - # Content-Length is valid then we can check to see if the body we've - # been given matches the expected size, and if it doesn't we'll just - # skip trying to cache it. - if ( - body is not None - and "content-length" in response_headers - and response_headers["content-length"].isdigit() - and int(response_headers["content-length"]) != len(body) - ): - return - - cc_req = self.parse_cache_control(request.headers) - cc = self.parse_cache_control(response_headers) - - cache_url = self.cache_url(request.url) - logger.debug('Updating cache with response from "%s"', cache_url) - - # Delete it from the cache if we happen to have it stored there - no_store = False - if "no-store" in cc: - no_store = True - logger.debug('Response header has "no-store"') - if "no-store" in cc_req: - no_store = True - logger.debug('Request header has "no-store"') - if no_store and self.cache.get(cache_url): - logger.debug('Purging existing cache entry to honor "no-store"') - self.cache.delete(cache_url) - if no_store: - return - - # If we've been given an etag, then keep the response - if self.cache_etags and "etag" in response_headers: - logger.debug("Caching due to etag") - self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) - ) - - # Add to the cache any 301s. We do this before looking that - # the Date headers. - elif response.status == 301: - logger.debug("Caching permanant redirect") - self.cache.set(cache_url, self.serializer.dumps(request, response)) - - # Add to the cache if the response headers demand it. If there - # is no date header then we can't do anything about expiring - # the cache. - elif "date" in response_headers: - # cache when there is a max-age > 0 - if "max-age" in cc and cc["max-age"] > 0: - logger.debug("Caching b/c date exists and max-age > 0") - self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) - ) - - # If the request can expire, it means we should cache it - # in the meantime. - elif "expires" in response_headers: - if response_headers["expires"]: - logger.debug("Caching b/c of expires header") - self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) - ) - - def update_cached_response(self, request, response): - """On a 304 we will get a new set of headers that we want to - update our cached value with, assuming we have one. - - This should only ever be called when we've sent an ETag and - gotten a 304 as the response. - """ - cache_url = self.cache_url(request.url) - - cached_response = self.serializer.loads(request, self.cache.get(cache_url)) - - if not cached_response: - # we didn't have a cached response - return response - - # Lets update our headers with the headers from the new request: - # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 - # - # The server isn't supposed to send headers that would make - # the cached body invalid. But... just in case, we'll be sure - # to strip out ones we know that might be problmatic due to - # typical assumptions. - excluded_headers = ["content-length"] - - cached_response.headers.update( - dict( - (k, v) - for k, v in response.headers.items() - if k.lower() not in excluded_headers - ) - ) - - # we want a 200 b/c we have content via the cache - cached_response.status = 200 - - # update our cache - self.cache.set(cache_url, self.serializer.dumps(request, cached_response)) - - return cached_response diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/filewrapper.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/filewrapper.py deleted file mode 100644 index 30ed4c5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/filewrapper.py +++ /dev/null @@ -1,80 +0,0 @@ -from io import BytesIO - - -class CallbackFileWrapper(object): - """ - Small wrapper around a fp object which will tee everything read into a - buffer, and when that file is closed it will execute a callback with the - contents of that buffer. - - All attributes are proxied to the underlying file object. - - This class uses members with a double underscore (__) leading prefix so as - not to accidentally shadow an attribute. - """ - - def __init__(self, fp, callback): - self.__buf = BytesIO() - self.__fp = fp - self.__callback = callback - - def __getattr__(self, name): - # The vaguaries of garbage collection means that self.__fp is - # not always set. By using __getattribute__ and the private - # name[0] allows looking up the attribute value and raising an - # AttributeError when it doesn't exist. This stop thigns from - # infinitely recursing calls to getattr in the case where - # self.__fp hasn't been set. - # - # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers - fp = self.__getattribute__("_CallbackFileWrapper__fp") - return getattr(fp, name) - - def __is_fp_closed(self): - try: - return self.__fp.fp is None - - except AttributeError: - pass - - try: - return self.__fp.closed - - except AttributeError: - pass - - # We just don't cache it then. - # TODO: Add some logging here... - return False - - def _close(self): - if self.__callback: - self.__callback(self.__buf.getvalue()) - - # We assign this to None here, because otherwise we can get into - # really tricky problems where the CPython interpreter dead locks - # because the callback is holding a reference to something which - # has a __del__ method. Setting this to None breaks the cycle - # and allows the garbage collector to do it's thing normally. - self.__callback = None - - def read(self, amt=None): - data = self.__fp.read(amt) - self.__buf.write(data) - if self.__is_fp_closed(): - self._close() - - return data - - def _safe_read(self, amt): - data = self.__fp._safe_read(amt) - if amt == 2 and data == b"\r\n": - # urllib executes this read to toss the CRLF at the end - # of the chunk. - return data - - self.__buf.write(data) - if self.__is_fp_closed(): - self._close() - - return data diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/heuristics.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/heuristics.py deleted file mode 100644 index 6c0e979..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/heuristics.py +++ /dev/null @@ -1,135 +0,0 @@ -import calendar -import time - -from email.utils import formatdate, parsedate, parsedate_tz - -from datetime import datetime, timedelta - -TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT" - - -def expire_after(delta, date=None): - date = date or datetime.utcnow() - return date + delta - - -def datetime_to_header(dt): - return formatdate(calendar.timegm(dt.timetuple())) - - -class BaseHeuristic(object): - - def warning(self, response): - """ - Return a valid 1xx warning header value describing the cache - adjustments. - - The response is provided too allow warnings like 113 - http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need - to explicitly say response is over 24 hours old. - """ - return '110 - "Response is Stale"' - - def update_headers(self, response): - """Update the response headers with any new headers. - - NOTE: This SHOULD always include some Warning header to - signify that the response was cached by the client, not - by way of the provided headers. - """ - return {} - - def apply(self, response): - updated_headers = self.update_headers(response) - - if updated_headers: - response.headers.update(updated_headers) - warning_header_value = self.warning(response) - if warning_header_value is not None: - response.headers.update({"Warning": warning_header_value}) - - return response - - -class OneDayCache(BaseHeuristic): - """ - Cache the response by providing an expires 1 day in the - future. - """ - - def update_headers(self, response): - headers = {} - - if "expires" not in response.headers: - date = parsedate(response.headers["date"]) - expires = expire_after(timedelta(days=1), date=datetime(*date[:6])) - headers["expires"] = datetime_to_header(expires) - headers["cache-control"] = "public" - return headers - - -class ExpiresAfter(BaseHeuristic): - """ - Cache **all** requests for a defined time period. - """ - - def __init__(self, **kw): - self.delta = timedelta(**kw) - - def update_headers(self, response): - expires = expire_after(self.delta) - return {"expires": datetime_to_header(expires), "cache-control": "public"} - - def warning(self, response): - tmpl = "110 - Automatically cached for %s. Response might be stale" - return tmpl % self.delta - - -class LastModified(BaseHeuristic): - """ - If there is no Expires header already, fall back on Last-Modified - using the heuristic from - http://tools.ietf.org/html/rfc7234#section-4.2.2 - to calculate a reasonable value. - - Firefox also does something like this per - https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ - http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397 - Unlike mozilla we limit this to 24-hr. - """ - cacheable_by_default_statuses = { - 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501 - } - - def update_headers(self, resp): - headers = resp.headers - - if "expires" in headers: - return {} - - if "cache-control" in headers and headers["cache-control"] != "public": - return {} - - if resp.status not in self.cacheable_by_default_statuses: - return {} - - if "date" not in headers or "last-modified" not in headers: - return {} - - date = calendar.timegm(parsedate_tz(headers["date"])) - last_modified = parsedate(headers["last-modified"]) - if date is None or last_modified is None: - return {} - - now = time.time() - current_age = max(0, now - date) - delta = date - calendar.timegm(last_modified) - freshness_lifetime = max(0, min(delta / 10, 24 * 3600)) - if freshness_lifetime <= current_age: - return {} - - expires = date + freshness_lifetime - return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))} - - def warning(self, resp): - return None diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/serialize.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/serialize.py deleted file mode 100644 index ec43ff2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/serialize.py +++ /dev/null @@ -1,186 +0,0 @@ -import base64 -import io -import json -import zlib - -from pip._vendor import msgpack -from pip._vendor.requests.structures import CaseInsensitiveDict - -from .compat import HTTPResponse, pickle, text_type - - -def _b64_decode_bytes(b): - return base64.b64decode(b.encode("ascii")) - - -def _b64_decode_str(s): - return _b64_decode_bytes(s).decode("utf8") - - -class Serializer(object): - - def dumps(self, request, response, body=None): - response_headers = CaseInsensitiveDict(response.headers) - - if body is None: - body = response.read(decode_content=False) - - # NOTE: 99% sure this is dead code. I'm only leaving it - # here b/c I don't have a test yet to prove - # it. Basically, before using - # `cachecontrol.filewrapper.CallbackFileWrapper`, - # this made an effort to reset the file handle. The - # `CallbackFileWrapper` short circuits this code by - # setting the body as the content is consumed, the - # result being a `body` argument is *always* passed - # into cache_response, and in turn, - # `Serializer.dump`. - response._fp = io.BytesIO(body) - - # NOTE: This is all a bit weird, but it's really important that on - # Python 2.x these objects are unicode and not str, even when - # they contain only ascii. The problem here is that msgpack - # understands the difference between unicode and bytes and we - # have it set to differentiate between them, however Python 2 - # doesn't know the difference. Forcing these to unicode will be - # enough to have msgpack know the difference. - data = { - u"response": { - u"body": body, - u"headers": dict( - (text_type(k), text_type(v)) for k, v in response.headers.items() - ), - u"status": response.status, - u"version": response.version, - u"reason": text_type(response.reason), - u"strict": response.strict, - u"decode_content": response.decode_content, - } - } - - # Construct our vary headers - data[u"vary"] = {} - if u"vary" in response_headers: - varied_headers = response_headers[u"vary"].split(",") - for header in varied_headers: - header = text_type(header).strip() - header_value = request.headers.get(header, None) - if header_value is not None: - header_value = text_type(header_value) - data[u"vary"][header] = header_value - - return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)]) - - def loads(self, request, data): - # Short circuit if we've been given an empty set of data - if not data: - return - - # Determine what version of the serializer the data was serialized - # with - try: - ver, data = data.split(b",", 1) - except ValueError: - ver = b"cc=0" - - # Make sure that our "ver" is actually a version and isn't a false - # positive from a , being in the data stream. - if ver[:3] != b"cc=": - data = ver + data - ver = b"cc=0" - - # Get the version number out of the cc=N - ver = ver.split(b"=", 1)[-1].decode("ascii") - - # Dispatch to the actual load method for the given version - try: - return getattr(self, "_loads_v{}".format(ver))(request, data) - - except AttributeError: - # This is a version we don't have a loads function for, so we'll - # just treat it as a miss and return None - return - - def prepare_response(self, request, cached): - """Verify our vary headers match and construct a real urllib3 - HTTPResponse object. - """ - # Special case the '*' Vary value as it means we cannot actually - # determine if the cached response is suitable for this request. - if "*" in cached.get("vary", {}): - return - - # Ensure that the Vary headers for the cached response match our - # request - for header, value in cached.get("vary", {}).items(): - if request.headers.get(header, None) != value: - return - - body_raw = cached["response"].pop("body") - - headers = CaseInsensitiveDict(data=cached["response"]["headers"]) - if headers.get("transfer-encoding", "") == "chunked": - headers.pop("transfer-encoding") - - cached["response"]["headers"] = headers - - try: - body = io.BytesIO(body_raw) - except TypeError: - # This can happen if cachecontrol serialized to v1 format (pickle) - # using Python 2. A Python 2 str(byte string) will be unpickled as - # a Python 3 str (unicode string), which will cause the above to - # fail with: - # - # TypeError: 'str' does not support the buffer interface - body = io.BytesIO(body_raw.encode("utf8")) - - return HTTPResponse(body=body, preload_content=False, **cached["response"]) - - def _loads_v0(self, request, data): - # The original legacy cache data. This doesn't contain enough - # information to construct everything we need, so we'll treat this as - # a miss. - return - - def _loads_v1(self, request, data): - try: - cached = pickle.loads(data) - except ValueError: - return - - return self.prepare_response(request, cached) - - def _loads_v2(self, request, data): - try: - cached = json.loads(zlib.decompress(data).decode("utf8")) - except (ValueError, zlib.error): - return - - # We need to decode the items that we've base64 encoded - cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"]) - cached["response"]["headers"] = dict( - (_b64_decode_str(k), _b64_decode_str(v)) - for k, v in cached["response"]["headers"].items() - ) - cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"]) - cached["vary"] = dict( - (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v) - for k, v in cached["vary"].items() - ) - - return self.prepare_response(request, cached) - - def _loads_v3(self, request, data): - # Due to Python 2 encoding issues, it's impossible to know for sure - # exactly how to load v3 entries, thus we'll treat these as a miss so - # that they get rewritten out as v4 entries. - return - - def _loads_v4(self, request, data): - try: - cached = msgpack.loads(data, encoding="utf-8") - except ValueError: - return - - return self.prepare_response(request, cached) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/wrapper.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/wrapper.py deleted file mode 100644 index 265bfc8..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/cachecontrol/wrapper.py +++ /dev/null @@ -1,29 +0,0 @@ -from .adapter import CacheControlAdapter -from .cache import DictCache - - -def CacheControl( - sess, - cache=None, - cache_etags=True, - serializer=None, - heuristic=None, - controller_class=None, - adapter_class=None, - cacheable_methods=None, -): - - cache = cache or DictCache() - adapter_class = adapter_class or CacheControlAdapter - adapter = adapter_class( - cache, - cache_etags=cache_etags, - serializer=serializer, - heuristic=heuristic, - controller_class=controller_class, - cacheable_methods=cacheable_methods, - ) - sess.mount("http://", adapter) - sess.mount("https://", adapter) - - return sess diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/__init__.py deleted file mode 100644 index ef71f3a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .core import where - -__version__ = "2018.11.29" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/__main__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/__main__.py deleted file mode 100644 index ae2aff5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/__main__.py +++ /dev/null @@ -1,2 +0,0 @@ -from pip._vendor.certifi import where -print(where()) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/cacert.pem b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/cacert.pem deleted file mode 100644 index db68797..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/cacert.pem +++ /dev/null @@ -1,4512 +0,0 @@ - -# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA -# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA -# Label: "GlobalSign Root CA" -# Serial: 4835703278459707669005204 -# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a -# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c -# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 ------BEGIN CERTIFICATE----- -MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG -A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv -b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw -MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i -YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT -aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ -jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp -xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp -1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG -snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ -U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 -9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E -BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B -AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz -yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE -38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP -AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad -DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME -HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Label: "GlobalSign Root CA - R2" -# Serial: 4835703278459682885658125 -# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 -# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe -# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G -A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp -Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 -MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG -A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL -v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 -eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq -tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd -C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa -zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB -mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH -V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n -bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG -3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs -J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO -291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS -ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd -AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 -TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Label: "Verisign Class 3 Public Primary Certification Authority - G3" -# Serial: 206684696279472310254277870180966723415 -# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 -# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 -# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl -cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu -LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT -aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD -VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT -aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ -bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu -IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b -N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t -KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu -kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm -CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ -Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu -imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te -2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe -DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC -/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p -F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt -TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== ------END CERTIFICATE----- - -# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited -# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited -# Label: "Entrust.net Premium 2048 Secure Server CA" -# Serial: 946069240 -# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 -# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 -# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 ------BEGIN CERTIFICATE----- -MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML -RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp -bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 -IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 -MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 -LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp -YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG -A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq -K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe -sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX -MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT -XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ -HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH -4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV -HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub -j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo -U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf -zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b -u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ -bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er -fF6adulZkMV8gzURZVE= ------END CERTIFICATE----- - -# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust -# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust -# Label: "Baltimore CyberTrust Root" -# Serial: 33554617 -# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 -# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 -# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb ------BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ -RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD -VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX -DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y -ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy -VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr -mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr -IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK -mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu -XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy -dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye -jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 -BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 -DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 -9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx -jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 -Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz -ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS -R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp ------END CERTIFICATE----- - -# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network -# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network -# Label: "AddTrust External Root" -# Serial: 1 -# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f -# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 -# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 ------BEGIN CERTIFICATE----- -MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU -MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs -IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 -MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux -FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h -bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v -dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt -H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 -uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX -mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX -a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN -E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 -WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD -VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 -Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU -cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx -IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN -AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH -YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 -6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC -Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX -c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a -mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= ------END CERTIFICATE----- - -# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. -# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. -# Label: "Entrust Root Certification Authority" -# Serial: 1164660820 -# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 -# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 -# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c ------BEGIN CERTIFICATE----- -MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC -VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 -Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW -KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl -cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw -NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw -NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy -ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV -BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo -Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 -4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 -KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI -rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi -94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB -sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi -gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo -kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE -vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA -A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t -O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua -AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP -9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ -eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m -0vdXcDazv/wor3ElhVsT/h5/WrQ8 ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. -# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. -# Label: "GeoTrust Global CA" -# Serial: 144470 -# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 -# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 -# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a ------BEGIN CERTIFICATE----- -MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT -MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i -YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG -EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg -R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 -9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq -fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv -iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU -1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ -bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW -MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA -ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l -uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn -Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS -tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF -PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un -hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV -5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. -# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. -# Label: "GeoTrust Universal CA" -# Serial: 1 -# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 -# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 -# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 ------BEGIN CERTIFICATE----- -MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW -MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy -c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE -BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 -IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV -VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 -cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT -QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh -F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v -c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w -mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd -VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX -teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ -f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe -Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ -nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB -/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY -MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG -9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc -aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX -IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn -ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z -uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN -Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja -QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW -koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 -ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt -DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm -bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. -# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. -# Label: "GeoTrust Universal CA 2" -# Serial: 1 -# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 -# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 -# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b ------BEGIN CERTIFICATE----- -MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW -MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy -c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD -VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 -c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 -WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG -FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq -XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL -se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb -KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd -IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 -y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt -hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc -QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 -Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV -HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV -HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ -KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z -dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ -L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr -Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo -ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY -T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz -GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m -1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV -OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH -6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX -QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS ------END CERTIFICATE----- - -# Issuer: CN=AAA Certificate Services O=Comodo CA Limited -# Subject: CN=AAA Certificate Services O=Comodo CA Limited -# Label: "Comodo AAA Services root" -# Serial: 1 -# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 -# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 -# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 ------BEGIN CERTIFICATE----- -MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb -MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow -GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj -YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL -MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE -BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM -GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua -BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe -3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 -YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR -rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm -ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU -oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF -MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v -QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t -b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF -AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q -GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz -Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 -G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi -l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 -smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== ------END CERTIFICATE----- - -# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority -# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority -# Label: "QuoVadis Root CA" -# Serial: 985026699 -# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24 -# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9 -# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73 ------BEGIN CERTIFICATE----- -MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC -TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz -MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw -IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR -dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp -li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D -rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ -WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug -F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU -xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC -Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv -dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw -ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl -IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh -c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy -ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh -Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI -KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T -KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq -y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p -dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD -VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL -MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk -fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 -7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R -cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y -mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW -xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK -SnQ2+Q== ------END CERTIFICATE----- - -# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited -# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited -# Label: "QuoVadis Root CA 2" -# Serial: 1289 -# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b -# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 -# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 ------BEGIN CERTIFICATE----- -MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x -GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv -b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV -BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W -YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa -GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg -Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J -WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB -rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp -+ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 -ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i -Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz -PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og -/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH -oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI -yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud -EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 -A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL -MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT -ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f -BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn -g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl -fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K -WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha -B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc -hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR -TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD -mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z -ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y -4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza -8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u ------END CERTIFICATE----- - -# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited -# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited -# Label: "QuoVadis Root CA 3" -# Serial: 1478 -# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf -# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 -# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 ------BEGIN CERTIFICATE----- -MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x -GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv -b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV -BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W -YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM -V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB -4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr -H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd -8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv -vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT -mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe -btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc -T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt -WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ -c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A -4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD -VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG -CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 -aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 -aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu -dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw -czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G -A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC -TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg -Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 -7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem -d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd -+LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B -4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN -t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x -DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 -k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s -zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j -Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT -mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK -4SVhM7JZG+Ju1zdXtg2pEto= ------END CERTIFICATE----- - -# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 -# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 -# Label: "Security Communication Root CA" -# Serial: 0 -# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a -# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 -# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c ------BEGIN CERTIFICATE----- -MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY -MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t -dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 -WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD -VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 -9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ -DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 -Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N -QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ -xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G -A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T -AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG -kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr -Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 -Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU -JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot -RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== ------END CERTIFICATE----- - -# Issuer: CN=Sonera Class2 CA O=Sonera -# Subject: CN=Sonera Class2 CA O=Sonera -# Label: "Sonera Class 2 Root CA" -# Serial: 29 -# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb -# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27 -# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27 ------BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP -MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx -MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV -BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o -Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt -5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s -3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej -vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu -8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw -DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG -MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil -zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ -3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD -FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 -Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 -ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M ------END CERTIFICATE----- - -# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com -# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com -# Label: "XRamp Global CA Root" -# Serial: 107108908803651509692980124233745014957 -# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 -# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 -# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 ------BEGIN CERTIFICATE----- -MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB -gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk -MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY -UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx -NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 -dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy -dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB -dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 -38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP -KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q -DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 -qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa -JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi -PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P -BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs -jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 -eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD -ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR -vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt -qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa -IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy -i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ -O+7ETPTsJ3xCwnR8gooJybQDJbw= ------END CERTIFICATE----- - -# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority -# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority -# Label: "Go Daddy Class 2 CA" -# Serial: 0 -# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 -# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 -# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 ------BEGIN CERTIFICATE----- -MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh -MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE -YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 -MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo -ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg -MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN -ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA -PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w -wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi -EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY -avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ -YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE -sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h -/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 -IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj -YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD -ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy -OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P -TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ -HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER -dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf -ReYNnyicsbkqWletNw+vHX/bvZ8= ------END CERTIFICATE----- - -# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority -# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority -# Label: "Starfield Class 2 CA" -# Serial: 0 -# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 -# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a -# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl -MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp -U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw -NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE -ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp -ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 -DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf -8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN -+lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 -X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa -K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA -1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G -A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR -zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 -YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD -bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w -DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 -L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D -eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl -xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp -VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY -WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= ------END CERTIFICATE----- - -# Issuer: O=Government Root Certification Authority -# Subject: O=Government Root Certification Authority -# Label: "Taiwan GRCA" -# Serial: 42023070807708724159991140556527066870 -# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e -# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9 -# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3 ------BEGIN CERTIFICATE----- -MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/ -MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj -YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow -PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp -Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB -AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR -IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q -gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy -yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts -F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2 -jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx -ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC -VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK -YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH -EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN -Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud -DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE -MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK -UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ -TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf -qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK -ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE -JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7 -hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1 -EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm -nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX -udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz -ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe -LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl -pYYsfPQS ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Assured ID Root CA" -# Serial: 17154717934120587862167794914071425081 -# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 -# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 -# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c ------BEGIN CERTIFICATE----- -MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv -b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG -EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl -cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c -JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP -mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ -wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 -VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ -AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB -AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW -BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun -pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC -dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf -fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm -NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx -H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe -+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Global Root CA" -# Serial: 10944719598952040374951832963794454346 -# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e -# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 -# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 ------BEGIN CERTIFICATE----- -MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD -QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT -MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j -b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB -CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 -nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt -43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P -T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 -gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO -BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR -TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw -DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr -hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg -06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF -PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls -YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk -CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= ------END CERTIFICATE----- - -# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert High Assurance EV Root CA" -# Serial: 3553400076410547919724730734378100087 -# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a -# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 -# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf ------BEGIN CERTIFICATE----- -MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j -ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL -MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 -LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug -RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm -+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW -PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM -xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB -Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 -hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg -EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF -MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA -FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec -nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z -eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF -hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 -Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe -vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep -+OkuE6N36B9K ------END CERTIFICATE----- - -# Issuer: CN=Class 2 Primary CA O=Certplus -# Subject: CN=Class 2 Primary CA O=Certplus -# Label: "Certplus Class 2 Primary CA" -# Serial: 177770208045934040241468760488327595043 -# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b -# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb -# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb ------BEGIN CERTIFICATE----- -MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw -PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz -cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 -MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz -IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ -ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR -VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL -kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd -EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas -H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 -HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud -DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 -QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu -Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ -AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 -yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR -FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA -ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB -kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 -l7+ijrRU ------END CERTIFICATE----- - -# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. -# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. -# Label: "DST Root CA X3" -# Serial: 91299735575339953335919266965803778155 -# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 -# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 -# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ -MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT -DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow -PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD -Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O -rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq -OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b -xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw -7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD -aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG -SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 -ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr -AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz -R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 -JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo -Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ ------END CERTIFICATE----- - -# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG -# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG -# Label: "SwissSign Gold CA - G2" -# Serial: 13492815561806991280 -# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 -# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 -# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 ------BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV -BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln -biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF -MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT -d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 -76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ -bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c -6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE -emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd -MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt -MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y -MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y -FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi -aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM -gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB -qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 -lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn -8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov -L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 -45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO -UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 -O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC -bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv -GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a -77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC -hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 -92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp -Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w -ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt -Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ ------END CERTIFICATE----- - -# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG -# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG -# Label: "SwissSign Silver CA - G2" -# Serial: 5700383053117599563 -# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 -# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb -# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 ------BEGIN CERTIFICATE----- -MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE -BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu -IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow -RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY -U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A -MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv -Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br -YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF -nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH -6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt -eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ -c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ -MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH -HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf -jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 -5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB -rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU -F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c -wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 -cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB -AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp -WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 -xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ -2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ -IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 -aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X -em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR -dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ -OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ -hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy -tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. -# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. -# Label: "GeoTrust Primary Certification Authority" -# Serial: 32798226551256963324313806436981982369 -# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf -# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 -# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c ------BEGIN CERTIFICATE----- -MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY -MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo -R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx -MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK -Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 -AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA -ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 -7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W -kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI -mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G -A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ -KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 -6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl -4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K -oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj -UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU -AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= ------END CERTIFICATE----- - -# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only -# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only -# Label: "thawte Primary Root CA" -# Serial: 69529181992039203566298953787712940909 -# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 -# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 -# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB -qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf -Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw -MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV -BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw -NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j -LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG -A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl -IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs -W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta -3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk -6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 -Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J -NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA -MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP -r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU -DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz -YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX -xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 -/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ -LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 -jVaMaA== ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only -# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" -# Serial: 33037644167568058970164719475676101450 -# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c -# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 -# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df ------BEGIN CERTIFICATE----- -MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB -yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL -ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp -U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW -ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL -MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW -ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln -biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp -U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y -aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 -nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex -t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz -SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG -BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ -rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ -NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E -BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH -BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy -aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv -MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE -p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y -5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK -WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ -4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N -hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq ------END CERTIFICATE----- - -# Issuer: CN=SecureTrust CA O=SecureTrust Corporation -# Subject: CN=SecureTrust CA O=SecureTrust Corporation -# Label: "SecureTrust CA" -# Serial: 17199774589125277788362757014266862032 -# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 -# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 -# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 ------BEGIN CERTIFICATE----- -MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI -MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x -FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz -MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv -cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz -Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO -0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao -wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj -7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS -8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT -BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB -/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg -JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC -NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 -6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ -3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm -D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS -CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR -3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= ------END CERTIFICATE----- - -# Issuer: CN=Secure Global CA O=SecureTrust Corporation -# Subject: CN=Secure Global CA O=SecureTrust Corporation -# Label: "Secure Global CA" -# Serial: 9751836167731051554232119481456978597 -# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de -# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b -# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 ------BEGIN CERTIFICATE----- -MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK -MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x -GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx -MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg -Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ -iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa -/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ -jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI -HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 -sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w -gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF -MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw -KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG -AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L -URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO -H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm -I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY -iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc -f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW ------END CERTIFICATE----- - -# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited -# Subject: CN=COMODO Certification Authority O=COMODO CA Limited -# Label: "COMODO Certification Authority" -# Serial: 104350513648249232941998508985834464573 -# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 -# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b -# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 ------BEGIN CERTIFICATE----- -MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB -gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G -A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV -BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw -MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl -YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P -RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 -UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI -2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 -Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp -+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ -DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O -nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW -/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g -PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u -QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY -SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv -IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ -RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 -zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd -BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB -ZQ== ------END CERTIFICATE----- - -# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Label: "Network Solutions Certificate Authority" -# Serial: 116697915152937497490437556386812487904 -# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e -# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce -# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c ------BEGIN CERTIFICATE----- -MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi -MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu -MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp -dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV -UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO -ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz -c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP -OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl -mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF -BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 -qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw -gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu -bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp -dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 -6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ -h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH -/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv -wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN -pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey ------END CERTIFICATE----- - -# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited -# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited -# Label: "COMODO ECC Certification Authority" -# Serial: 41578283867086692638256921589707938090 -# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 -# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 -# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 ------BEGIN CERTIFICATE----- -MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL -MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE -BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT -IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw -MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy -ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N -T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv -biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR -FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J -cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW -BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ -BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm -fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv -GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= ------END CERTIFICATE----- - -# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed -# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed -# Label: "OISTE WISeKey Global Root GA CA" -# Serial: 86718877871133159090080555911823548314 -# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93 -# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9 -# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5 ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB -ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly -aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl -ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w -NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G -A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD -VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX -SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR -VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 -w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF -mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg -4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 -4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw -DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw -EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx -SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 -ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 -vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa -hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi -Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ -/L7fCg0= ------END CERTIFICATE----- - -# Issuer: CN=Certigna O=Dhimyotis -# Subject: CN=Certigna O=Dhimyotis -# Label: "Certigna" -# Serial: 18364802974209362175 -# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff -# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 -# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d ------BEGIN CERTIFICATE----- -MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV -BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X -DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ -BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 -QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny -gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw -zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q -130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 -JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw -DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw -ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT -AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj -AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG -9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h -bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc -fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu -HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w -t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw -WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== ------END CERTIFICATE----- - -# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center -# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center -# Label: "Deutsche Telekom Root CA 2" -# Serial: 38 -# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 -# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf -# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 ------BEGIN CERTIFICATE----- -MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc -MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj -IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB -IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE -RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl -U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 -IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU -ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC -QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr -rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S -NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc -QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH -txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP -BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC -AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp -tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa -IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl -6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ -xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU -Cm26OWMohpLzGITY+9HPBVZkVw== ------END CERTIFICATE----- - -# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc -# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc -# Label: "Cybertrust Global Root" -# Serial: 4835703278459682877484360 -# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 -# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 -# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 ------BEGIN CERTIFICATE----- -MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG -A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh -bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE -ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS -b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 -7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS -J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y -HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP -t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz -FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY -XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ -MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw -hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js -MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA -A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj -Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx -XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o -omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc -A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW -WL1WMRJOEcgh4LMRkWXbtKaIOM5V ------END CERTIFICATE----- - -# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority -# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority -# Label: "ePKI Root Certification Authority" -# Serial: 28956088682735189655030529057352760477 -# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 -# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 -# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 ------BEGIN CERTIFICATE----- -MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe -MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 -ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe -Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw -IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL -SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF -AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH -SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh -ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X -DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 -TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ -fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA -sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU -WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS -nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH -dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip -NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC -AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF -MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH -ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB -uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl -PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP -JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ -gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 -j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 -5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB -o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS -/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z -Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE -W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D -hNQ+IIX3Sj0rnP0qCglN6oH4EZw= ------END CERTIFICATE----- - -# Issuer: O=certSIGN OU=certSIGN ROOT CA -# Subject: O=certSIGN OU=certSIGN ROOT CA -# Label: "certSIGN ROOT CA" -# Serial: 35210227249154 -# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 -# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b -# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb ------BEGIN CERTIFICATE----- -MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT -AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD -QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP -MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do -0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ -UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d -RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ -OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv -JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C -AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O -BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ -LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY -MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ -44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I -Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw -i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN -9u6wWk5JRFRYX0KD ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only -# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only -# Label: "GeoTrust Primary Certification Authority - G3" -# Serial: 28809105769928564313984085209975885599 -# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 -# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd -# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 ------BEGIN CERTIFICATE----- -MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB -mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT -MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s -eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv -cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ -BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg -MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 -BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz -+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm -hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn -5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W -JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL -DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC -huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw -HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB -AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB -zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN -kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD -AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH -SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G -spki4cErx5z481+oghLrGREt ------END CERTIFICATE----- - -# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only -# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only -# Label: "thawte Primary Root CA - G2" -# Serial: 71758320672825410020661621085256472406 -# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f -# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 -# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 ------BEGIN CERTIFICATE----- -MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL -MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp -IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi -BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw -MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh -d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig -YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v -dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ -BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 -papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K -DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 -KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox -XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== ------END CERTIFICATE----- - -# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only -# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only -# Label: "thawte Primary Root CA - G3" -# Serial: 127614157056681299805556476275995414779 -# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 -# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 -# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c ------BEGIN CERTIFICATE----- -MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB -rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf -Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw -MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV -BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa -Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl -LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u -MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl -ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm -gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 -YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf -b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 -9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S -zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk -OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV -HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA -2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW -oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu -t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c -KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM -m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu -MdRAGmI0Nj81Aa6sY6A= ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only -# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only -# Label: "GeoTrust Primary Certification Authority - G2" -# Serial: 80682863203381065782177908751794619243 -# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a -# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 -# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 ------BEGIN CERTIFICATE----- -MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL -MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj -KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 -MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 -eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV -BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw -NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV -BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH -MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL -So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal -tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO -BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG -CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT -qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz -rD6ogRLQy7rQkgu2npaqBA+K ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only -# Label: "VeriSign Universal Root Certification Authority" -# Serial: 85209574734084581917763752644031726877 -# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 -# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 -# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c ------BEGIN CERTIFICATE----- -MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB -vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL -ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp -U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W -ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe -Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX -MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 -IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y -IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh -bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF -9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH -H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H -LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN -/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT -rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw -WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs -exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud -DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 -sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ -seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz -4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ -BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR -lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 -7M2CYfE45k+XmCpajQ== ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only -# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" -# Serial: 63143484348153506665311985501458640051 -# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 -# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a -# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 ------BEGIN CERTIFICATE----- -MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL -MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW -ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln -biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp -U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y -aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG -A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp -U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg -SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln -biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 -IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm -GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve -fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ -aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj -aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW -kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC -4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga -FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== ------END CERTIFICATE----- - -# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) -# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) -# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" -# Serial: 80544274841616 -# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 -# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 -# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 ------BEGIN CERTIFICATE----- -MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG -EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 -MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl -cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR -dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB -pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM -b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm -aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz -IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT -lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz -AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 -VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG -ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 -BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG -AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M -U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh -bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C -+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC -bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F -uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 -XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= ------END CERTIFICATE----- - -# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden -# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden -# Label: "Staat der Nederlanden Root CA - G2" -# Serial: 10000012 -# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a -# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16 -# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f ------BEGIN CERTIFICATE----- -MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO -TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh -dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX -DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl -ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv -b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 -qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp -uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU -Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE -pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp -5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M -UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN -GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy -5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv -6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK -eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 -B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ -BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov -L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV -HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG -SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS -CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen -5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 -IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK -gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL -+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL -vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm -bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk -N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC -Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z -ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== ------END CERTIFICATE----- - -# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post -# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post -# Label: "Hongkong Post Root CA 1" -# Serial: 1000 -# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca -# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 -# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 ------BEGIN CERTIFICATE----- -MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx -FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg -Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG -A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr -b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ -jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn -PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh -ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 -nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h -q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED -MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC -mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 -7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB -oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs -EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO -fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi -AmvZWg== ------END CERTIFICATE----- - -# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. -# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. -# Label: "SecureSign RootCA11" -# Serial: 1 -# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 -# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 -# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 ------BEGIN CERTIFICATE----- -MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr -MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG -A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 -MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp -Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD -QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz -i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 -h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV -MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 -UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni -8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC -h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD -VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB -AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm -KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ -X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr -QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 -pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN -QSdJQO7e5iNEOdyhIta6A/I= ------END CERTIFICATE----- - -# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. -# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. -# Label: "Microsec e-Szigno Root CA 2009" -# Serial: 14014712776195784473 -# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 -# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e -# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 ------BEGIN CERTIFICATE----- -MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD -VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 -ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G -CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y -OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx -FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp -Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o -dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP -kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc -cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U -fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 -N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC -xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 -+rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G -A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM -Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG -SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h -mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk -ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 -tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c -2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t -HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 -# Label: "GlobalSign Root CA - R3" -# Serial: 4835703278459759426209954 -# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 -# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad -# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b ------BEGIN CERTIFICATE----- -MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G -A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp -Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 -MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG -A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 -RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT -gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm -KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd -QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ -XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw -DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o -LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU -RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp -jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK -6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX -mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs -Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH -WD9f ------END CERTIFICATE----- - -# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 -# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 -# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" -# Serial: 6047274297262753887 -# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 -# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa -# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef ------BEGIN CERTIFICATE----- -MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE -BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h -cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy -MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg -Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi -MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 -thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM -cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG -L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i -NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h -X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b -m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy -Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja -EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T -KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF -6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh -OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD -VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD -VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp -cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv -ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl -AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF -661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 -am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 -ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 -PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS -3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k -SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF -3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM -ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g -StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz -Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB -jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V ------END CERTIFICATE----- - -# Issuer: CN=Izenpe.com O=IZENPE S.A. -# Subject: CN=Izenpe.com O=IZENPE S.A. -# Label: "Izenpe.com" -# Serial: 917563065490389241595536686991402621 -# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 -# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 -# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f ------BEGIN CERTIFICATE----- -MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 -MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 -ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD -VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j -b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq -scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO -xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H -LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX -uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD -yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ -JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q -rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN -BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L -hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB -QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ -HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu -Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg -QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB -BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx -MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC -AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA -A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb -laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 -awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo -JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw -LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT -VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk -LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb -UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ -QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ -naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls -QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== ------END CERTIFICATE----- - -# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. -# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. -# Label: "Chambers of Commerce Root - 2008" -# Serial: 11806822484801597146 -# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7 -# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c -# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0 ------BEGIN CERTIFICATE----- -MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD -VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 -IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 -MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz -IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz -MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj -dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw -EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp -MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 -28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq -VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q -DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR -5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL -ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a -Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl -UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s -+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 -Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj -ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx -hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV -HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 -+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN -YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t -L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy -ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt -IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV -HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w -DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW -PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF -5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 -glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH -FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 -pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD -xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG -tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq -jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De -fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg -OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ -d0jQ ------END CERTIFICATE----- - -# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. -# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. -# Label: "Global Chambersign Root - 2008" -# Serial: 14541511773111788494 -# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3 -# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c -# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca ------BEGIN CERTIFICATE----- -MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD -VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 -IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 -MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD -aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx -MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy -cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG -A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl -BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI -hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed -KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 -G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 -zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 -ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG -HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 -Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V -yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e -beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r -6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh -wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog -zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW -BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr -ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp -ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk -cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt -YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC -CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow -KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI -hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ -UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz -X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x -fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz -a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd -Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd -SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O -AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso -M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge -v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z -09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B ------END CERTIFICATE----- - -# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. -# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. -# Label: "Go Daddy Root Certificate Authority - G2" -# Serial: 0 -# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 -# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b -# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da ------BEGIN CERTIFICATE----- -MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx -EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT -EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp -ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz -NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH -EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE -AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw -DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD -E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH -/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy -DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh -GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR -tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA -AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE -FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX -WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu -9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr -gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo -2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO -LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI -4uJEvlz36hz1 ------END CERTIFICATE----- - -# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Label: "Starfield Root Certificate Authority - G2" -# Serial: 0 -# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 -# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e -# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 ------BEGIN CERTIFICATE----- -MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx -EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT -HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs -ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw -MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 -b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj -aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg -nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 -HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N -Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN -dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 -HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO -BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G -CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU -sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 -4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg -8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K -pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 -mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 ------END CERTIFICATE----- - -# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Label: "Starfield Services Root Certificate Authority - G2" -# Serial: 0 -# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 -# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f -# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 ------BEGIN CERTIFICATE----- -MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx -EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT -HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs -ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 -MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD -VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy -ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy -dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p -OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 -8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K -Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe -hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk -6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw -DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q -AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI -bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB -ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z -qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd -iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn -0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN -sSi6 ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Commercial O=AffirmTrust -# Subject: CN=AffirmTrust Commercial O=AffirmTrust -# Label: "AffirmTrust Commercial" -# Serial: 8608355977964138876 -# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 -# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 -# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 ------BEGIN CERTIFICATE----- -MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz -dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL -MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp -cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP -Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr -ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL -MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 -yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr -VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ -nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ -KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG -XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj -vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt -Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g -N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC -nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Networking O=AffirmTrust -# Subject: CN=AffirmTrust Networking O=AffirmTrust -# Label: "AffirmTrust Networking" -# Serial: 8957382827206547757 -# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f -# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f -# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b ------BEGIN CERTIFICATE----- -MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz -dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL -MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp -cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y -YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua -kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL -QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp -6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG -yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i -QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ -KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO -tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu -QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ -Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u -olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 -x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Premium O=AffirmTrust -# Subject: CN=AffirmTrust Premium O=AffirmTrust -# Label: "AffirmTrust Premium" -# Serial: 7893706540734352110 -# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 -# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 -# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a ------BEGIN CERTIFICATE----- -MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz -dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG -A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U -cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf -qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ -JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ -+jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS -s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 -HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 -70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG -V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S -qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S -5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia -C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX -OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE -FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ -BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 -KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg -Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B -8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ -MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc -0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ -u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF -u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH -YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 -GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO -RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e -KeC2uAloGRwYQw== ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust -# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust -# Label: "AffirmTrust Premium ECC" -# Serial: 8401224907861490260 -# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d -# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb -# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 ------BEGIN CERTIFICATE----- -MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC -VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ -cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ -BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt -VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D -0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 -ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G -A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G -A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs -aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I -flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== ------END CERTIFICATE----- - -# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority -# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority -# Label: "Certum Trusted Network CA" -# Serial: 279744 -# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 -# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e -# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e ------BEGIN CERTIFICATE----- -MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM -MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D -ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU -cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 -WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg -Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw -IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH -UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM -TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU -BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM -kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x -AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV -HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV -HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y -sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL -I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 -J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY -VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI -03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= ------END CERTIFICATE----- - -# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA -# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA -# Label: "TWCA Root Certification Authority" -# Serial: 1 -# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 -# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 -# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 ------BEGIN CERTIFICATE----- -MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES -MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU -V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz -WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO -LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm -aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE -AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH -K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX -RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z -rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx -3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV -HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq -hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC -MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls -XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D -lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn -aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ -YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== ------END CERTIFICATE----- - -# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 -# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 -# Label: "Security Communication RootCA2" -# Serial: 0 -# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 -# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 -# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 ------BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl -MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe -U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX -DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy -dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj -YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV -OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr -zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM -VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ -hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO -ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw -awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs -OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 -DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF -coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc -okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 -t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy -1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ -SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 ------END CERTIFICATE----- - -# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority -# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority -# Label: "Hellenic Academic and Research Institutions RootCA 2011" -# Serial: 0 -# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 -# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d -# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 ------BEGIN CERTIFICATE----- -MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix -RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 -dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p -YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw -NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK -EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl -cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl -c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz -dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ -fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns -bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD -75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP -FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV -HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp -5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu -b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA -A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p -6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 -TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 -dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys -Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI -l7WdmplNsDz4SgCbZN2fOUvRJ9e4 ------END CERTIFICATE----- - -# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 -# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 -# Label: "Actalis Authentication Root CA" -# Serial: 6271844772424770508 -# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 -# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac -# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 ------BEGIN CERTIFICATE----- -MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE -BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w -MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 -IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC -SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 -ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB -MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv -UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX -4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 -KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ -gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb -rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ -51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F -be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe -KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F -v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn -fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 -jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz -ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt -ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL -e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 -jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz -WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V -SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j -pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX -X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok -fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R -K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU -ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU -LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT -LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== ------END CERTIFICATE----- - -# Issuer: O=Trustis Limited OU=Trustis FPS Root CA -# Subject: O=Trustis Limited OU=Trustis FPS Root CA -# Label: "Trustis FPS Root CA" -# Serial: 36053640375399034304724988975563710553 -# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d -# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04 -# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d ------BEGIN CERTIFICATE----- -MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF -MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL -ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx -MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc -MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ -AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH -iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj -vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA -0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB -OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ -BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E -FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 -GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW -zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 -1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE -f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F -jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN -ZetX2fNXlrtIzYE= ------END CERTIFICATE----- - -# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 -# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 -# Label: "Buypass Class 2 Root CA" -# Serial: 2 -# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 -# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 -# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 ------BEGIN CERTIFICATE----- -MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd -MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg -Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow -TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw -HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr -6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV -L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 -1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx -MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ -QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB -arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr -Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi -FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS -P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN -9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP -AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz -uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h -9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s -A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t -OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo -+fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 -KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 -DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us -H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ -I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 -5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h -3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz -Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= ------END CERTIFICATE----- - -# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 -# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 -# Label: "Buypass Class 3 Root CA" -# Serial: 2 -# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec -# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 -# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d ------BEGIN CERTIFICATE----- -MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd -MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg -Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow -TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw -HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y -ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E -N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 -tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX -0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c -/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X -KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY -zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS -O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D -34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP -K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 -AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv -Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj -QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV -cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS -IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 -HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa -O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv -033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u -dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE -kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 -3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD -u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq -4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= ------END CERTIFICATE----- - -# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center -# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center -# Label: "T-TeleSec GlobalRoot Class 3" -# Serial: 1 -# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef -# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 -# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd ------BEGIN CERTIFICATE----- -MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx -KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd -BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl -YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 -OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy -aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 -ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN -8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ -RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 -hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 -ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM -EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj -QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 -A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy -WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ -1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 -6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT -91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml -e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p -TpPDpFQUWw== ------END CERTIFICATE----- - -# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus -# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus -# Label: "EE Certification Centre Root CA" -# Serial: 112324828676200291871926431888494945866 -# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f -# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7 -# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76 ------BEGIN CERTIFICATE----- -MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 -MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 -czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG -CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy -MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl -ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS -b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy -euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO -bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw -WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d -MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE -1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD -VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ -zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB -BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF -BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV -v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG -E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u -uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW -iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v -GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= ------END CERTIFICATE----- - -# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH -# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH -# Label: "D-TRUST Root Class 3 CA 2 2009" -# Serial: 623603 -# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f -# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 -# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 ------BEGIN CERTIFICATE----- -MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF -MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD -bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha -ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM -HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 -UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 -tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R -ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM -lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp -/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G -A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G -A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj -dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy -MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl -cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js -L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL -BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni -acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 -o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K -zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 -PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y -Johw1+qRzT65ysCQblrGXnRl11z+o+I= ------END CERTIFICATE----- - -# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH -# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH -# Label: "D-TRUST Root Class 3 CA 2 EV 2009" -# Serial: 623604 -# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 -# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 -# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 ------BEGIN CERTIFICATE----- -MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF -MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD -bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw -NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV -BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn -ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 -3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z -qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR -p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 -HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw -ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea -HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw -Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh -c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E -RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt -dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku -Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp -3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 -nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF -CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na -xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX -KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 ------END CERTIFICATE----- - -# Issuer: CN=CA Disig Root R2 O=Disig a.s. -# Subject: CN=CA Disig Root R2 O=Disig a.s. -# Label: "CA Disig Root R2" -# Serial: 10572350602393338211 -# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 -# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 -# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 ------BEGIN CERTIFICATE----- -MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV -BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu -MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy -MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx -EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw -ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe -NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH -PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I -x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe -QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR -yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO -QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 -H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ -QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD -i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs -nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 -rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud -DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI -hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM -tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf -GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb -lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka -+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal -TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i -nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 -gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr -G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os -zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x -L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL ------END CERTIFICATE----- - -# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV -# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV -# Label: "ACCVRAIZ1" -# Serial: 6828503384748696800 -# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 -# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 -# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 ------BEGIN CERTIFICATE----- -MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE -AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw -CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ -BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND -VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb -qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY -HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo -G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA -lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr -IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ -0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH -k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 -4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO -m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa -cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl -uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI -KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls -ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG -AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 -VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT -VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG -CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA -cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA -QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA -7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA -cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA -QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA -czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu -aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt -aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud -DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF -BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp -D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU -JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m -AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD -vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms -tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH -7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h -I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA -h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF -d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H -pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 ------END CERTIFICATE----- - -# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA -# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA -# Label: "TWCA Global Root CA" -# Serial: 3262 -# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 -# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 -# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b ------BEGIN CERTIFICATE----- -MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx -EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT -VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 -NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT -B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF -10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz -0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh -MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH -zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc -46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 -yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi -laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP -oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA -BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE -qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm -4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB -/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL -1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn -LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF -H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo -RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ -nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh -15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW -6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW -nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j -wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz -aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy -KwbQBM0= ------END CERTIFICATE----- - -# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera -# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera -# Label: "TeliaSonera Root CA v1" -# Serial: 199041966741090107964904287217786801558 -# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c -# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 -# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 ------BEGIN CERTIFICATE----- -MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw -NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv -b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD -VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 -MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F -VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 -7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X -Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ -/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs -81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm -dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe -Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu -sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 -pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs -slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ -arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD -VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG -9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl -dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx -0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj -TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed -Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 -Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI -OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 -vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW -t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn -HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx -SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= ------END CERTIFICATE----- - -# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi -# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi -# Label: "E-Tugra Certification Authority" -# Serial: 7667447206703254355 -# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 -# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 -# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c ------BEGIN CERTIFICATE----- -MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV -BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC -aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV -BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 -Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz -MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ -BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp -em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN -ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 -MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY -B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH -D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF -Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo -q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D -k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH -fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut -dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM -ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 -zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn -rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX -U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 -Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 -XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF -Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR -HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY -GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c -77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 -+GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK -vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 -FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl -yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P -AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD -y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d -NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== ------END CERTIFICATE----- - -# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center -# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center -# Label: "T-TeleSec GlobalRoot Class 2" -# Serial: 1 -# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a -# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 -# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 ------BEGIN CERTIFICATE----- -MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx -KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd -BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl -YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 -OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy -aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 -ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd -AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC -FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi -1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq -jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ -wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj -QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ -WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy -NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC -uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw -IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 -g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN -9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP -BSeOE6Fuwg== ------END CERTIFICATE----- - -# Issuer: CN=Atos TrustedRoot 2011 O=Atos -# Subject: CN=Atos TrustedRoot 2011 O=Atos -# Label: "Atos TrustedRoot 2011" -# Serial: 6643877497813316402 -# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 -# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 -# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 ------BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE -AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG -EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM -FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC -REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp -Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM -VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ -SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ -4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L -cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi -eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV -HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG -A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 -DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j -vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP -DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc -maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D -lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv -KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed ------END CERTIFICATE----- - -# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited -# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited -# Label: "QuoVadis Root CA 1 G3" -# Serial: 687049649626669250736271037606554624078720034195 -# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab -# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 -# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 ------BEGIN CERTIFICATE----- -MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc -BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 -MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM -aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV -wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe -rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 -68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh -4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp -UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o -abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc -3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G -KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt -hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO -Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt -zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB -BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD -ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC -MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 -cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN -qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 -YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv -b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 -8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k -NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj -ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp -q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt -nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD ------END CERTIFICATE----- - -# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited -# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited -# Label: "QuoVadis Root CA 2 G3" -# Serial: 390156079458959257446133169266079962026824725800 -# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 -# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 -# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 ------BEGIN CERTIFICATE----- -MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc -BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 -MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM -aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf -qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW -n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym -c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ -O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 -o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j -IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq -IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz -8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh -vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l -7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG -cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB -BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD -ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 -AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC -roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga -W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n -lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE -+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV -csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd -dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg -KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM -HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 -WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M ------END CERTIFICATE----- - -# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited -# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited -# Label: "QuoVadis Root CA 3 G3" -# Serial: 268090761170461462463995952157327242137089239581 -# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 -# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d -# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 ------BEGIN CERTIFICATE----- -MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc -BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 -MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM -aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR -/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu -FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR -U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c -ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR -FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k -A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw -eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl -sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp -VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q -A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ -ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB -BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD -ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px -KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI -FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv -oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg -u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP -0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf -3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl -8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ -DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN -PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ -ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Assured ID Root G2" -# Serial: 15385348160840213938643033620894905419 -# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d -# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f -# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 ------BEGIN CERTIFICATE----- -MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv -b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG -EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl -cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA -n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc -biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp -EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA -bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu -YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB -AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW -BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI -QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I -0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni -lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 -B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv -ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo -IhNzbM8m9Yop5w== ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Assured ID Root G3" -# Serial: 15459312981008553731928384953135426796 -# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb -# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 -# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 ------BEGIN CERTIFICATE----- -MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw -CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu -ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg -RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV -UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu -Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq -hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf -Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q -RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ -BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD -AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY -JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv -6pZjamVFkpUBtA== ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Global Root G2" -# Serial: 4293743540046975378534879503202253541 -# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 -# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 -# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f ------BEGIN CERTIFICATE----- -MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH -MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT -MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j -b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI -2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx -1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ -q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz -tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ -vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP -BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV -5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY -1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 -NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG -Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 -8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe -pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl -MrY= ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Global Root G3" -# Serial: 7089244469030293291760083333884364146 -# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca -# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e -# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 ------BEGIN CERTIFICATE----- -MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw -CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu -ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe -Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw -EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x -IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF -K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG -fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO -Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd -BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx -AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ -oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 -sycX ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Trusted Root G4" -# Serial: 7451500558977370777930084869016614236 -# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 -# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 -# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 ------BEGIN CERTIFICATE----- -MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg -RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV -UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu -Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y -ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If -xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV -ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO -DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ -jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ -CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi -EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM -fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY -uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK -chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t -9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB -hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD -ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 -SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd -+SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc -fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa -sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N -cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N -0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie -4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI -r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 -/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm -gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ ------END CERTIFICATE----- - -# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited -# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited -# Label: "COMODO RSA Certification Authority" -# Serial: 101909084537582093308941363524873193117 -# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 -# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 -# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 ------BEGIN CERTIFICATE----- -MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB -hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G -A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV -BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 -MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT -EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR -Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR -6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X -pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC -9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV -/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf -Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z -+pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w -qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah -SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC -u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf -Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq -crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E -FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB -/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl -wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM -4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV -2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna -FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ -CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK -boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke -jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL -S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb -QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl -0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB -NVOFBkpdn627G190 ------END CERTIFICATE----- - -# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network -# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network -# Label: "USERTrust RSA Certification Authority" -# Serial: 2645093764781058787591871645665788717 -# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 -# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e -# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 ------BEGIN CERTIFICATE----- -MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB -iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl -cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV -BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw -MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU -aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy -dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B -3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY -tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ -Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 -VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT -79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 -c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT -Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l -c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee -UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE -Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd -BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G -A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF -Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO -VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 -ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs -8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR -iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze -Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ -XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ -qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB -VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB -L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG -jjxDah2nGN59PRbxYvnKkKj9 ------END CERTIFICATE----- - -# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network -# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network -# Label: "USERTrust ECC Certification Authority" -# Serial: 123013823720199481456569720443997572134 -# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 -# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 -# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a ------BEGIN CERTIFICATE----- -MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL -MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl -eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT -JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx -MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT -Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg -VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm -aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo -I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng -o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G -A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB -zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW -RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 -# Label: "GlobalSign ECC Root CA - R4" -# Serial: 14367148294922964480859022125800977897474 -# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e -# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb -# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c ------BEGIN CERTIFICATE----- -MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk -MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH -bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX -DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD -QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ -FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw -DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F -uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX -kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs -ewv4n4Q= ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 -# Label: "GlobalSign ECC Root CA - R5" -# Serial: 32785792099990507226680698011560947931244 -# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 -# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa -# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 ------BEGIN CERTIFICATE----- -MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk -MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH -bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX -DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD -QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu -MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc -8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke -hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI -KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg -515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO -xwy8p2Fp8fc74SrL+SvzZpA3 ------END CERTIFICATE----- - -# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden -# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden -# Label: "Staat der Nederlanden Root CA - G3" -# Serial: 10003001 -# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37 -# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc -# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28 ------BEGIN CERTIFICATE----- -MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO -TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh -dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX -DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl -ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv -b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP -cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW -IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX -xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy -KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR -9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az -5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 -6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 -Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP -bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt -BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt -XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF -MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd -INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD -U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp -LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 -Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp -gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh -/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw -0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A -fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq -4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR -1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ -QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM -94B7IWcnMFk= ------END CERTIFICATE----- - -# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden -# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden -# Label: "Staat der Nederlanden EV Root CA" -# Serial: 10000013 -# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba -# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb -# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a ------BEGIN CERTIFICATE----- -MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO -TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh -dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y -MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg -TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS -b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS -M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC -UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d -Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p -rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l -pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb -j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC -KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS -/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X -cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH -1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP -px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB -/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 -MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI -eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u -2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS -v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC -wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy -CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e -vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 -Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa -Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL -eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 -FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc -7uzXLg== ------END CERTIFICATE----- - -# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust -# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust -# Label: "IdenTrust Commercial Root CA 1" -# Serial: 13298821034946342390520003877796839426 -# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 -# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 -# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae ------BEGIN CERTIFICATE----- -MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK -MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu -VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw -MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw -JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT -3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU -+ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp -S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 -bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi -T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL -vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK -Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK -dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT -c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv -l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N -iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB -/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD -ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH -6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt -LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 -nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 -+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK -W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT -AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq -l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG -4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ -mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A -7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H ------END CERTIFICATE----- - -# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust -# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust -# Label: "IdenTrust Public Sector Root CA 1" -# Serial: 13298821034946342390521976156843933698 -# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba -# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd -# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f ------BEGIN CERTIFICATE----- -MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN -MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu -VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN -MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 -MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi -MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 -ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy -RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS -bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF -/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R -3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw -EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy -9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V -GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ -2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV -WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD -W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ -BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN -AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj -t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV -DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 -TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G -lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW -mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df -WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 -+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ -tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA -GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv -8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c ------END CERTIFICATE----- - -# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only -# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only -# Label: "Entrust Root Certification Authority - G2" -# Serial: 1246989352 -# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 -# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 -# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 ------BEGIN CERTIFICATE----- -MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC -VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 -cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs -IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz -dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy -NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu -dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt -dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 -aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj -YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T -RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN -cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW -wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 -U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 -jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN -BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ -jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ -Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v -1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R -nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH -VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== ------END CERTIFICATE----- - -# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only -# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only -# Label: "Entrust Root Certification Authority - EC1" -# Serial: 51543124481930649114116133369 -# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc -# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 -# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 ------BEGIN CERTIFICATE----- -MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG -A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 -d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu -dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq -RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy -MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD -VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 -L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g -Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD -ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi -A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt -ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH -Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O -BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC -R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX -hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G ------END CERTIFICATE----- - -# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority -# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority -# Label: "CFCA EV ROOT" -# Serial: 407555286 -# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 -# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 -# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd ------BEGIN CERTIFICATE----- -MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD -TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y -aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx -MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j -aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP -T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 -sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL -TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 -/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp -7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz -EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt -hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP -a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot -aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg -TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV -PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv -cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL -tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd -BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB -ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT -ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL -jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS -ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy -P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 -xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d -Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN -5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe -/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z -AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ -5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su ------END CERTIFICATE----- - -# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 -# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 -# Label: "Certinomis - Root CA" -# Serial: 1 -# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f -# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 -# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 ------BEGIN CERTIFICATE----- -MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET -MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb -BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz -MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx -FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g -Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 -fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl -LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV -WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF -TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb -5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc -CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri -wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ -wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG -m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 -F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng -WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 -2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF -AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ -0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw -F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS -g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj -qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN -h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ -ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V -btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj -Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ -8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW -gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= ------END CERTIFICATE----- - -# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed -# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed -# Label: "OISTE WISeKey Global Root GB CA" -# Serial: 157768595616588414422159278966750757568 -# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d -# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed -# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 ------BEGIN CERTIFICATE----- -MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt -MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg -Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i -YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x -CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG -b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh -bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 -HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx -WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX -1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk -u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P -99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r -M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw -AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB -BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh -cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 -gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO -ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf -aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic -Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= ------END CERTIFICATE----- - -# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. -# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. -# Label: "SZAFIR ROOT CA2" -# Serial: 357043034767186914217277344587386743377558296292 -# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 -# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de -# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe ------BEGIN CERTIFICATE----- -MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL -BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 -ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw -NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L -cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg -Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN -QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT -3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw -3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 -3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 -BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN -XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD -AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF -AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw -8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG -nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP -oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy -d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg -LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== ------END CERTIFICATE----- - -# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority -# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority -# Label: "Certum Trusted Network CA 2" -# Serial: 44979900017204383099463764357512596969 -# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 -# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 -# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 ------BEGIN CERTIFICATE----- -MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB -gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu -QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG -A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz -OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ -VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 -b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA -DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn -0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB -OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE -fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E -Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m -o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i -sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW -OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez -Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS -adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n -3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC -AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ -F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf -CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 -XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm -djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ -WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb -AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq -P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko -b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj -XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P -5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi -DrW5viSP ------END CERTIFICATE----- - -# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority -# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority -# Label: "Hellenic Academic and Research Institutions RootCA 2015" -# Serial: 0 -# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce -# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 -# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 ------BEGIN CERTIFICATE----- -MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix -DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k -IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT -N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v -dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG -A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh -ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx -QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 -dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA -4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 -AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 -4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C -ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV -9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD -gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 -Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq -NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko -LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc -Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV -HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd -ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I -XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI -M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot -9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V -Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea -j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh -X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ -l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf -bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 -pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK -e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 -vm9qp/UsQu0yrbYhnr68 ------END CERTIFICATE----- - -# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority -# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority -# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" -# Serial: 0 -# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef -# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 -# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 ------BEGIN CERTIFICATE----- -MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN -BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl -c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl -bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv -b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ -BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj -YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 -MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 -dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg -QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa -jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC -MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi -C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep -lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof -TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR ------END CERTIFICATE----- - -# Issuer: CN=ISRG Root X1 O=Internet Security Research Group -# Subject: CN=ISRG Root X1 O=Internet Security Research Group -# Label: "ISRG Root X1" -# Serial: 172886928669790476064670243504169061120 -# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e -# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 -# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 ------BEGIN CERTIFICATE----- -MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw -TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh -cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 -WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu -ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY -MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc -h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ -0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U -A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW -T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH -B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC -B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv -KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn -OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn -jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw -qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI -rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV -HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq -hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL -ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ -3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK -NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 -ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur -TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC -jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc -oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq -4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA -mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d -emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= ------END CERTIFICATE----- - -# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM -# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM -# Label: "AC RAIZ FNMT-RCM" -# Serial: 485876308206448804701554682760554759 -# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d -# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 -# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa ------BEGIN CERTIFICATE----- -MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx -CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ -WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ -BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG -Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ -yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf -BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz -WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF -tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z -374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC -IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL -mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 -wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS -MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 -ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet -UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H -YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 -LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD -nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 -RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM -LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf -77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N -JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm -fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp -6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp -1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B -9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok -RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv -uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= ------END CERTIFICATE----- - -# Issuer: CN=Amazon Root CA 1 O=Amazon -# Subject: CN=Amazon Root CA 1 O=Amazon -# Label: "Amazon Root CA 1" -# Serial: 143266978916655856878034712317230054538369994 -# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 -# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 -# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e ------BEGIN CERTIFICATE----- -MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF -ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 -b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL -MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv -b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj -ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM -9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw -IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 -VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L -93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm -jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC -AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA -A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI -U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs -N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv -o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU -5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy -rqXRfboQnoZsG4q5WTP468SQvvG5 ------END CERTIFICATE----- - -# Issuer: CN=Amazon Root CA 2 O=Amazon -# Subject: CN=Amazon Root CA 2 O=Amazon -# Label: "Amazon Root CA 2" -# Serial: 143266982885963551818349160658925006970653239 -# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 -# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a -# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 ------BEGIN CERTIFICATE----- -MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF -ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 -b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL -MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv -b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK -gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ -W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg -1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K -8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r -2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me -z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR -8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj -mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz -7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 -+XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI -0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB -Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm -UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 -LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY -+gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS -k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl -7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm -btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl -urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ -fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 -n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE -76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H -9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT -4PsJYGw= ------END CERTIFICATE----- - -# Issuer: CN=Amazon Root CA 3 O=Amazon -# Subject: CN=Amazon Root CA 3 O=Amazon -# Label: "Amazon Root CA 3" -# Serial: 143266986699090766294700635381230934788665930 -# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 -# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e -# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 ------BEGIN CERTIFICATE----- -MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 -MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g -Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG -A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg -Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl -ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j -QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr -ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr -BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM -YyRIHN8wfdVoOw== ------END CERTIFICATE----- - -# Issuer: CN=Amazon Root CA 4 O=Amazon -# Subject: CN=Amazon Root CA 4 O=Amazon -# Label: "Amazon Root CA 4" -# Serial: 143266989758080763974105200630763877849284878 -# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd -# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be -# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 ------BEGIN CERTIFICATE----- -MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 -MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g -Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG -A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg -Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi -9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk -M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB -/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB -MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw -CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW -1KyLa2tJElMzrdfkviT8tQp21KW8EA== ------END CERTIFICATE----- - -# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A. -# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A. -# Label: "LuxTrust Global Root 2" -# Serial: 59914338225734147123941058376788110305822489521 -# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c -# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f -# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5 ------BEGIN CERTIFICATE----- -MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL -BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV -BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw -MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B -LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F -ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem -hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1 -EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn -Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4 -zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ -96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m -j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g -DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+ -8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j -X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH -hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB -KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0 -Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT -+Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL -BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9 -BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO -jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9 -loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c -qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+ -2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/ -JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre -zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf -LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+ -x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6 -oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr ------END CERTIFICATE----- - -# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM -# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM -# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" -# Serial: 1 -# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 -# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca -# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 ------BEGIN CERTIFICATE----- -MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx -GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp -bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w -KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 -BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy -dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG -EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll -IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU -QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT -TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg -LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 -a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr -LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr -N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X -YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ -iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f -AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH -V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL -BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh -AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf -IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 -lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c -8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf -lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= ------END CERTIFICATE----- - -# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. -# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. -# Label: "GDCA TrustAUTH R5 ROOT" -# Serial: 9009899650740120186 -# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 -# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 -# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 ------BEGIN CERTIFICATE----- -MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE -BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ -IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 -MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV -BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w -HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF -AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj -Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj -TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u -KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj -qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm -MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 -ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP -zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk -L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC -jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA -HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC -AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB -/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg -p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm -DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 -COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry -L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf -JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg -IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io -2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV -09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ -XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq -T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe -MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== ------END CERTIFICATE----- - -# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor RootCert CA-1" -# Serial: 15752444095811006489 -# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 -# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a -# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c ------BEGIN CERTIFICATE----- -MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD -VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk -MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U -cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y -IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB -pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h -IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG -A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU -cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid -RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V -seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme -9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV -EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW -hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ -DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw -DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD -ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I -/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf -ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ -yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts -L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN -zl/HHk484IkzlQsPpTLWPFp5LBk= ------END CERTIFICATE----- - -# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor RootCert CA-2" -# Serial: 2711694510199101698 -# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 -# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 -# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 ------BEGIN CERTIFICATE----- -MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV -BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw -IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy -dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig -Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk -MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg -Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD -VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy -dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ -QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq -1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp -2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK -DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape -az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF -3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 -oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM -g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 -mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh -8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd -BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U -nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw -DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX -dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ -MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL -/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX -CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa -ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW -2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 -N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 -Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB -As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp -5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu -1uwJ ------END CERTIFICATE----- - -# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor ECA-1" -# Serial: 9548242946988625984 -# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c -# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd -# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD -VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk -MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U -cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y -IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV -BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw -IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy -dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig -RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb -3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA -BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 -3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou -owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ -wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF -ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf -BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ -MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv -civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 -AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F -hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 -soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI -WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi -tJ/X5g== ------END CERTIFICATE----- - -# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation -# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation -# Label: "SSL.com Root Certification Authority RSA" -# Serial: 8875640296558310041 -# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 -# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb -# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 ------BEGIN CERTIFICATE----- -MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE -BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK -DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp -Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz -OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv -dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv -bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R -xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX -qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC -C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 -6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh -/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF -YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E -JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc -US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 -ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm -+Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi -M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV -HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G -A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV -cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc -Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs -PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ -q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 -cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr -a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I -H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y -K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu -nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf -oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY -Ic2wBlX7Jz9TkHCpBB5XJ7k= ------END CERTIFICATE----- - -# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation -# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation -# Label: "SSL.com Root Certification Authority ECC" -# Serial: 8495723813297216424 -# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e -# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a -# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 ------BEGIN CERTIFICATE----- -MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC -VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T -U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 -aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz -WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 -b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS -b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB -BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI -7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg -CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud -EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD -VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T -kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ -gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl ------END CERTIFICATE----- - -# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation -# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation -# Label: "SSL.com EV Root Certification Authority RSA R2" -# Serial: 6248227494352943350 -# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 -# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a -# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c ------BEGIN CERTIFICATE----- -MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV -BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE -CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy -dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy -MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G -A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD -DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy -MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq -M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf -OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa -4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 -HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR -aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA -b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ -Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV -PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO -pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu -UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY -MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV -HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 -9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW -s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 -Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg -cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM -79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz -/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt -ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm -Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK -QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ -w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi -S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 -mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== ------END CERTIFICATE----- - -# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation -# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation -# Label: "SSL.com EV Root Certification Authority ECC" -# Serial: 3182246526754555285 -# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 -# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d -# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 ------BEGIN CERTIFICATE----- -MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC -VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T -U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp -Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx -NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv -dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv -bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 -AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA -VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku -WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP -MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX -5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ -ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg -h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 -# Label: "GlobalSign Root CA - R6" -# Serial: 1417766617973444989252670301619537 -# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae -# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 -# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 ------BEGIN CERTIFICATE----- -MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg -MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh -bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx -MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET -MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ -KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI -xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k -ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD -aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw -LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw -1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX -k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 -SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h -bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n -WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY -rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce -MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu -bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN -nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt -Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 -55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj -vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf -cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz -oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp -nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs -pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v -JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R -8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 -5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= ------END CERTIFICATE----- - -# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed -# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed -# Label: "OISTE WISeKey Global Root GC CA" -# Serial: 44084345621038548146064804565436152554 -# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 -# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 -# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d ------BEGIN CERTIFICATE----- -MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw -CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 -bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg -Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ -BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu -ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS -b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni -eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W -p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E -BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T -rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV -57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg -Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R1 O=Google Trust Services LLC -# Subject: CN=GTS Root R1 O=Google Trust Services LLC -# Label: "GTS Root R1" -# Serial: 146587175971765017618439757810265552097 -# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85 -# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8 -# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72 ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH -MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM -QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy -MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl -cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB -AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM -f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX -mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7 -zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P -fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc -vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4 -Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp -zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO -Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW -k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+ -DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF -lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV -HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW -Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 -d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z -XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR -gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3 -d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv -J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg -DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM -+SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy -F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9 -SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws -E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R2 O=Google Trust Services LLC -# Subject: CN=GTS Root R2 O=Google Trust Services LLC -# Label: "GTS Root R2" -# Serial: 146587176055767053814479386953112547951 -# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b -# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d -# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60 ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH -MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM -QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy -MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl -cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB -AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv -CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg -GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu -XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd -re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu -PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1 -mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K -8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj -x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR -nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0 -kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok -twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV -HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp -8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT -vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT -z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA -pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb -pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB -R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R -RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk -0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC -5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF -izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn -yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R3 O=Google Trust Services LLC -# Subject: CN=GTS Root R3 O=Google Trust Services LLC -# Label: "GTS Root R3" -# Serial: 146587176140553309517047991083707763997 -# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25 -# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5 -# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5 ------BEGIN CERTIFICATE----- -MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw -CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU -MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw -MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp -Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA -IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout -736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A -DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud -DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk -fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA -njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R4 O=Google Trust Services LLC -# Subject: CN=GTS Root R4 O=Google Trust Services LLC -# Label: "GTS Root R4" -# Serial: 146587176229350439916519468929765261721 -# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26 -# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb -# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd ------BEGIN CERTIFICATE----- -MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw -CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU -MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw -MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp -Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA -IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu -hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l -xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud -DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0 -CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx -sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w== ------END CERTIFICATE----- - -# Issuer: CN=UCA Global G2 Root O=UniTrust -# Subject: CN=UCA Global G2 Root O=UniTrust -# Label: "UCA Global G2 Root" -# Serial: 124779693093741543919145257850076631279 -# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 -# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a -# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c ------BEGIN CERTIFICATE----- -MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 -MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH -bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x -CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds -b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr -b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 -kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm -VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R -VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc -C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj -tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY -D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv -j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl -NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 -iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP -O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV -ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj -L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 -1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl -1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU -b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV -PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj -y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb -EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg -DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI -+Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy -YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX -UB+K+wb1whnw0A== ------END CERTIFICATE----- - -# Issuer: CN=UCA Extended Validation Root O=UniTrust -# Subject: CN=UCA Extended Validation Root O=UniTrust -# Label: "UCA Extended Validation Root" -# Serial: 106100277556486529736699587978573607008 -# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 -# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a -# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH -MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF -eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx -MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV -BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB -AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog -D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS -sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop -O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk -sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi -c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj -VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz -KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ -TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G -sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs -1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD -fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T -AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN -l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR -ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ -VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 -c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp -4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s -t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj -2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO -vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C -xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx -cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM -fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax ------END CERTIFICATE----- - -# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 -# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 -# Label: "Certigna Root CA" -# Serial: 269714418870597844693661054334862075617 -# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 -# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 -# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 ------BEGIN CERTIFICATE----- -MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw -WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw -MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x -MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD -VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX -BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO -ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M -CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu -I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm -TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh -C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf -ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz -IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT -Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k -JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 -hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB -GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE -FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of -1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov -L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo -dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr -aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq -hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L -6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG -HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 -0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB -lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi -o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 -gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v -faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 -Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh -jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw -3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= ------END CERTIFICATE----- diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/core.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/core.py deleted file mode 100644 index 2d02ea4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/certifi/core.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -certifi.py -~~~~~~~~~~ - -This module returns the installation location of cacert.pem. -""" -import os - - -def where(): - f = os.path.dirname(__file__) - - return os.path.join(f, 'cacert.pem') - - -if __name__ == '__main__': - print(where()) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/__init__.py deleted file mode 100644 index 0f9f820..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - - -from .compat import PY2, PY3 -from .universaldetector import UniversalDetector -from .version import __version__, VERSION - - -def detect(byte_str): - """ - Detect the encoding of the given byte string. - - :param byte_str: The byte sequence to examine. - :type byte_str: ``bytes`` or ``bytearray`` - """ - if not isinstance(byte_str, bytearray): - if not isinstance(byte_str, bytes): - raise TypeError('Expected object of type bytes or bytearray, got: ' - '{0}'.format(type(byte_str))) - else: - byte_str = bytearray(byte_str) - detector = UniversalDetector() - detector.feed(byte_str) - return detector.close() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/big5freq.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/big5freq.py deleted file mode 100644 index 38f3251..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/big5freq.py +++ /dev/null @@ -1,386 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# Big5 frequency table -# by Taiwan's Mandarin Promotion Council -# <http://www.edu.tw:81/mandr/> -# -# 128 --> 0.42261 -# 256 --> 0.57851 -# 512 --> 0.74851 -# 1024 --> 0.89384 -# 2048 --> 0.97583 -# -# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98 -# Random Distribution Ration = 512/(5401-512)=0.105 -# -# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR - -BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 - -#Char to FreqOrder table -BIG5_TABLE_SIZE = 5376 - -BIG5_CHAR_TO_FREQ_ORDER = ( - 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 -3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 -1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 - 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64 -3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80 -4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96 -5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112 - 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128 - 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144 - 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160 -2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176 -1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192 -3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208 - 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224 -1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240 -3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256 -2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272 - 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288 -3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304 -1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320 -5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336 - 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352 -5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368 -1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384 - 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400 - 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416 -3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432 -3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448 - 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464 -2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480 -2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496 - 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512 - 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528 -3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544 -1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560 -1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576 -1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592 -2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608 - 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624 -4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640 -1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656 -5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672 -2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688 - 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704 - 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720 - 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736 - 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752 -5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768 - 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784 -1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800 - 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816 - 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832 -5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848 -1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864 - 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880 -3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896 -4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912 -3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928 - 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944 - 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960 -1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976 -4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992 -3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008 -3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024 -2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040 -5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056 -3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072 -5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088 -1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104 -2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120 -1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136 - 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152 -1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168 -4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184 -3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200 - 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216 - 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232 - 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248 -2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264 -5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280 -1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296 -2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312 -1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328 -1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344 -5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360 -5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376 -5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392 -3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408 -4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424 -4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440 -2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456 -5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472 -3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488 - 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504 -5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520 -5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536 -1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552 -2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568 -3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584 -4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600 -5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616 -3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632 -4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648 -1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664 -1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680 -4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696 -1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712 - 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728 -1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744 -1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760 -3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776 - 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792 -5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808 -2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824 -1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840 -1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856 -5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872 - 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888 -4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904 - 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920 -2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936 - 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952 -1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968 -1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984 - 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000 -4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016 -4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032 -1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048 -3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064 -5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080 -5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096 -1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112 -2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128 -1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144 -3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160 -2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176 -3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192 -2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208 -4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224 -4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240 -3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256 - 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272 -3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288 - 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304 -3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320 -4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336 -3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352 -1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368 -5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384 - 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400 -5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416 -1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432 - 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448 -4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464 -4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480 - 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496 -2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512 -2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528 -3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544 -1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560 -4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576 -2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592 -1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608 -1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624 -2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640 -3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656 -1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672 -5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688 -1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704 -4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720 -1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736 - 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752 -1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768 -4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784 -4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800 -2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816 -1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832 -4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848 - 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864 -5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880 -2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896 -3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912 -4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928 - 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944 -5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960 -5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976 -1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992 -4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008 -4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024 -2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040 -3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056 -3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072 -2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088 -1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104 -4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120 -3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136 -3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152 -2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168 -4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184 -5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200 -3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216 -2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232 -3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248 -1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264 -2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280 -3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296 -4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312 -2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328 -2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344 -5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360 -1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376 -2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392 -1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408 -3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424 -4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440 -2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456 -3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472 -3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488 -2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504 -4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520 -2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536 -3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552 -4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568 -5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584 -3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600 - 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616 -1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632 -4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648 -1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664 -4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680 -5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696 - 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712 -5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728 -5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744 -2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760 -3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776 -2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792 -2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808 - 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824 -1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840 -4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856 -3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872 -3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888 - 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904 -2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920 - 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936 -2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952 -4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968 -1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984 -4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000 -1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016 -3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032 - 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048 -3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064 -5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080 -5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096 -3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112 -3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128 -1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144 -2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160 -5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176 -1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192 -1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208 -3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224 - 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240 -1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256 -4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272 -5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288 -2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304 -3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320 - 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336 -1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352 -2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368 -2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384 -5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400 -5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416 -5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432 -2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448 -2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464 -1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480 -4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496 -3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512 -3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528 -4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544 -4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560 -2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576 -2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592 -5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608 -4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624 -5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640 -4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656 - 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672 - 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688 -1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704 -3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720 -4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736 -1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752 -5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768 -2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784 -2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800 -3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816 -5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832 -1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848 -3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864 -5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880 -1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896 -5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912 -2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928 -3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944 -2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960 -3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976 -3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992 -3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008 -4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024 - 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040 -2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056 -4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072 -3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088 -5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104 -1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120 -5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136 - 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152 -1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168 - 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184 -4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200 -1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216 -4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232 -1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248 - 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264 -3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280 -4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296 -5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312 - 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328 -3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344 - 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 -2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 -) - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/big5prober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/big5prober.py deleted file mode 100644 index 98f9970..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/big5prober.py +++ /dev/null @@ -1,47 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine -from .chardistribution import Big5DistributionAnalysis -from .mbcssm import BIG5_SM_MODEL - - -class Big5Prober(MultiByteCharSetProber): - def __init__(self): - super(Big5Prober, self).__init__() - self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) - self.distribution_analyzer = Big5DistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "Big5" - - @property - def language(self): - return "Chinese" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/chardistribution.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/chardistribution.py deleted file mode 100644 index c0395f4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/chardistribution.py +++ /dev/null @@ -1,233 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE, - EUCTW_TYPICAL_DISTRIBUTION_RATIO) -from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE, - EUCKR_TYPICAL_DISTRIBUTION_RATIO) -from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE, - GB2312_TYPICAL_DISTRIBUTION_RATIO) -from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE, - BIG5_TYPICAL_DISTRIBUTION_RATIO) -from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE, - JIS_TYPICAL_DISTRIBUTION_RATIO) - - -class CharDistributionAnalysis(object): - ENOUGH_DATA_THRESHOLD = 1024 - SURE_YES = 0.99 - SURE_NO = 0.01 - MINIMUM_DATA_THRESHOLD = 3 - - def __init__(self): - # Mapping table to get frequency order from char order (get from - # GetOrder()) - self._char_to_freq_order = None - self._table_size = None # Size of above table - # This is a constant value which varies from language to language, - # used in calculating confidence. See - # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html - # for further detail. - self.typical_distribution_ratio = None - self._done = None - self._total_chars = None - self._freq_chars = None - self.reset() - - def reset(self): - """reset analyser, clear any state""" - # If this flag is set to True, detection is done and conclusion has - # been made - self._done = False - self._total_chars = 0 # Total characters encountered - # The number of characters whose frequency order is less than 512 - self._freq_chars = 0 - - def feed(self, char, char_len): - """feed a character with known length""" - if char_len == 2: - # we only care about 2-bytes character in our distribution analysis - order = self.get_order(char) - else: - order = -1 - if order >= 0: - self._total_chars += 1 - # order is valid - if order < self._table_size: - if 512 > self._char_to_freq_order[order]: - self._freq_chars += 1 - - def get_confidence(self): - """return confidence based on existing data""" - # if we didn't receive any character in our consideration range, - # return negative answer - if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD: - return self.SURE_NO - - if self._total_chars != self._freq_chars: - r = (self._freq_chars / ((self._total_chars - self._freq_chars) - * self.typical_distribution_ratio)) - if r < self.SURE_YES: - return r - - # normalize confidence (we don't want to be 100% sure) - return self.SURE_YES - - def got_enough_data(self): - # It is not necessary to receive all data to draw conclusion. - # For charset detection, certain amount of data is enough - return self._total_chars > self.ENOUGH_DATA_THRESHOLD - - def get_order(self, byte_str): - # We do not handle characters based on the original encoding string, - # but convert this encoding string to a number, here called order. - # This allows multiple encodings of a language to share one frequency - # table. - return -1 - - -class EUCTWDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(EUCTWDistributionAnalysis, self).__init__() - self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER - self._table_size = EUCTW_TABLE_SIZE - self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO - - def get_order(self, byte_str): - # for euc-TW encoding, we are interested - # first byte range: 0xc4 -- 0xfe - # second byte range: 0xa1 -- 0xfe - # no validation needed here. State machine has done that - first_char = byte_str[0] - if first_char >= 0xC4: - return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1 - else: - return -1 - - -class EUCKRDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(EUCKRDistributionAnalysis, self).__init__() - self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER - self._table_size = EUCKR_TABLE_SIZE - self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO - - def get_order(self, byte_str): - # for euc-KR encoding, we are interested - # first byte range: 0xb0 -- 0xfe - # second byte range: 0xa1 -- 0xfe - # no validation needed here. State machine has done that - first_char = byte_str[0] - if first_char >= 0xB0: - return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1 - else: - return -1 - - -class GB2312DistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(GB2312DistributionAnalysis, self).__init__() - self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER - self._table_size = GB2312_TABLE_SIZE - self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO - - def get_order(self, byte_str): - # for GB2312 encoding, we are interested - # first byte range: 0xb0 -- 0xfe - # second byte range: 0xa1 -- 0xfe - # no validation needed here. State machine has done that - first_char, second_char = byte_str[0], byte_str[1] - if (first_char >= 0xB0) and (second_char >= 0xA1): - return 94 * (first_char - 0xB0) + second_char - 0xA1 - else: - return -1 - - -class Big5DistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(Big5DistributionAnalysis, self).__init__() - self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER - self._table_size = BIG5_TABLE_SIZE - self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO - - def get_order(self, byte_str): - # for big5 encoding, we are interested - # first byte range: 0xa4 -- 0xfe - # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe - # no validation needed here. State machine has done that - first_char, second_char = byte_str[0], byte_str[1] - if first_char >= 0xA4: - if second_char >= 0xA1: - return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 - else: - return 157 * (first_char - 0xA4) + second_char - 0x40 - else: - return -1 - - -class SJISDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(SJISDistributionAnalysis, self).__init__() - self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER - self._table_size = JIS_TABLE_SIZE - self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO - - def get_order(self, byte_str): - # for sjis encoding, we are interested - # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe - # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe - # no validation needed here. State machine has done that - first_char, second_char = byte_str[0], byte_str[1] - if (first_char >= 0x81) and (first_char <= 0x9F): - order = 188 * (first_char - 0x81) - elif (first_char >= 0xE0) and (first_char <= 0xEF): - order = 188 * (first_char - 0xE0 + 31) - else: - return -1 - order = order + second_char - 0x40 - if second_char > 0x7F: - order = -1 - return order - - -class EUCJPDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(EUCJPDistributionAnalysis, self).__init__() - self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER - self._table_size = JIS_TABLE_SIZE - self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO - - def get_order(self, byte_str): - # for euc-JP encoding, we are interested - # first byte range: 0xa0 -- 0xfe - # second byte range: 0xa1 -- 0xfe - # no validation needed here. State machine has done that - char = byte_str[0] - if char >= 0xA0: - return 94 * (char - 0xA1) + byte_str[1] - 0xa1 - else: - return -1 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/charsetgroupprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/charsetgroupprober.py deleted file mode 100644 index 8b3738e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/charsetgroupprober.py +++ /dev/null @@ -1,106 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .enums import ProbingState -from .charsetprober import CharSetProber - - -class CharSetGroupProber(CharSetProber): - def __init__(self, lang_filter=None): - super(CharSetGroupProber, self).__init__(lang_filter=lang_filter) - self._active_num = 0 - self.probers = [] - self._best_guess_prober = None - - def reset(self): - super(CharSetGroupProber, self).reset() - self._active_num = 0 - for prober in self.probers: - if prober: - prober.reset() - prober.active = True - self._active_num += 1 - self._best_guess_prober = None - - @property - def charset_name(self): - if not self._best_guess_prober: - self.get_confidence() - if not self._best_guess_prober: - return None - return self._best_guess_prober.charset_name - - @property - def language(self): - if not self._best_guess_prober: - self.get_confidence() - if not self._best_guess_prober: - return None - return self._best_guess_prober.language - - def feed(self, byte_str): - for prober in self.probers: - if not prober: - continue - if not prober.active: - continue - state = prober.feed(byte_str) - if not state: - continue - if state == ProbingState.FOUND_IT: - self._best_guess_prober = prober - return self.state - elif state == ProbingState.NOT_ME: - prober.active = False - self._active_num -= 1 - if self._active_num <= 0: - self._state = ProbingState.NOT_ME - return self.state - return self.state - - def get_confidence(self): - state = self.state - if state == ProbingState.FOUND_IT: - return 0.99 - elif state == ProbingState.NOT_ME: - return 0.01 - best_conf = 0.0 - self._best_guess_prober = None - for prober in self.probers: - if not prober: - continue - if not prober.active: - self.logger.debug('%s not active', prober.charset_name) - continue - conf = prober.get_confidence() - self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf) - if best_conf < conf: - best_conf = conf - self._best_guess_prober = prober - if not self._best_guess_prober: - return 0.0 - return best_conf diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/charsetprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/charsetprober.py deleted file mode 100644 index eac4e59..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/charsetprober.py +++ /dev/null @@ -1,145 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -import logging -import re - -from .enums import ProbingState - - -class CharSetProber(object): - - SHORTCUT_THRESHOLD = 0.95 - - def __init__(self, lang_filter=None): - self._state = None - self.lang_filter = lang_filter - self.logger = logging.getLogger(__name__) - - def reset(self): - self._state = ProbingState.DETECTING - - @property - def charset_name(self): - return None - - def feed(self, buf): - pass - - @property - def state(self): - return self._state - - def get_confidence(self): - return 0.0 - - @staticmethod - def filter_high_byte_only(buf): - buf = re.sub(b'([\x00-\x7F])+', b' ', buf) - return buf - - @staticmethod - def filter_international_words(buf): - """ - We define three types of bytes: - alphabet: english alphabets [a-zA-Z] - international: international characters [\x80-\xFF] - marker: everything else [^a-zA-Z\x80-\xFF] - - The input buffer can be thought to contain a series of words delimited - by markers. This function works to filter all words that contain at - least one international character. All contiguous sequences of markers - are replaced by a single space ascii character. - - This filter applies to all scripts which do not use English characters. - """ - filtered = bytearray() - - # This regex expression filters out only words that have at-least one - # international character. The word may include one marker character at - # the end. - words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?', - buf) - - for word in words: - filtered.extend(word[:-1]) - - # If the last character in the word is a marker, replace it with a - # space as markers shouldn't affect our analysis (they are used - # similarly across all languages and may thus have similar - # frequencies). - last_char = word[-1:] - if not last_char.isalpha() and last_char < b'\x80': - last_char = b' ' - filtered.extend(last_char) - - return filtered - - @staticmethod - def filter_with_english_letters(buf): - """ - Returns a copy of ``buf`` that retains only the sequences of English - alphabet and high byte characters that are not between <> characters. - Also retains English alphabet and high byte characters immediately - before occurrences of >. - - This filter can be applied to all scripts which contain both English - characters and extended ASCII characters, but is currently only used by - ``Latin1Prober``. - """ - filtered = bytearray() - in_tag = False - prev = 0 - - for curr in range(len(buf)): - # Slice here to get bytes instead of an int with Python 3 - buf_char = buf[curr:curr + 1] - # Check if we're coming out of or entering an HTML tag - if buf_char == b'>': - in_tag = False - elif buf_char == b'<': - in_tag = True - - # If current character is not extended-ASCII and not alphabetic... - if buf_char < b'\x80' and not buf_char.isalpha(): - # ...and we're not in a tag - if curr > prev and not in_tag: - # Keep everything after last non-extended-ASCII, - # non-alphabetic character - filtered.extend(buf[prev:curr]) - # Output a space to delimit stretch we kept - filtered.extend(b' ') - prev = curr + 1 - - # If we're not in a tag... - if not in_tag: - # Keep everything after last non-extended-ASCII, non-alphabetic - # character - filtered.extend(buf[prev:]) - - return filtered diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cli/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cli/__init__.py deleted file mode 100644 index 8b13789..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cli/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cli/chardetect.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cli/chardetect.py deleted file mode 100644 index c61136b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cli/chardetect.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python -""" -Script which takes one or more file paths and reports on their detected -encodings - -Example:: - - % chardetect somefile someotherfile - somefile: windows-1252 with confidence 0.5 - someotherfile: ascii with confidence 1.0 - -If no paths are provided, it takes its input from stdin. - -""" - -from __future__ import absolute_import, print_function, unicode_literals - -import argparse -import sys - -from pip._vendor.chardet import __version__ -from pip._vendor.chardet.compat import PY2 -from pip._vendor.chardet.universaldetector import UniversalDetector - - -def description_of(lines, name='stdin'): - """ - Return a string describing the probable encoding of a file or - list of strings. - - :param lines: The lines to get the encoding of. - :type lines: Iterable of bytes - :param name: Name of file or collection of lines - :type name: str - """ - u = UniversalDetector() - for line in lines: - line = bytearray(line) - u.feed(line) - # shortcut out of the loop to save reading further - particularly useful if we read a BOM. - if u.done: - break - u.close() - result = u.result - if PY2: - name = name.decode(sys.getfilesystemencoding(), 'ignore') - if result['encoding']: - return '{0}: {1} with confidence {2}'.format(name, result['encoding'], - result['confidence']) - else: - return '{0}: no result'.format(name) - - -def main(argv=None): - """ - Handles command line arguments and gets things started. - - :param argv: List of arguments, as if specified on the command-line. - If None, ``sys.argv[1:]`` is used instead. - :type argv: list of str - """ - # Get command line arguments - parser = argparse.ArgumentParser( - description="Takes one or more file paths and reports their detected \ - encodings") - parser.add_argument('input', - help='File whose encoding we would like to determine. \ - (default: stdin)', - type=argparse.FileType('rb'), nargs='*', - default=[sys.stdin if PY2 else sys.stdin.buffer]) - parser.add_argument('--version', action='version', - version='%(prog)s {0}'.format(__version__)) - args = parser.parse_args(argv) - - for f in args.input: - if f.isatty(): - print("You are running chardetect interactively. Press " + - "CTRL-D twice at the start of a blank line to signal the " + - "end of your input. If you want help, run chardetect " + - "--help\n", file=sys.stderr) - print(description_of(f, f.name)) - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/codingstatemachine.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/codingstatemachine.py deleted file mode 100644 index 68fba44..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/codingstatemachine.py +++ /dev/null @@ -1,88 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -import logging - -from .enums import MachineState - - -class CodingStateMachine(object): - """ - A state machine to verify a byte sequence for a particular encoding. For - each byte the detector receives, it will feed that byte to every active - state machine available, one byte at a time. The state machine changes its - state based on its previous state and the byte it receives. There are 3 - states in a state machine that are of interest to an auto-detector: - - START state: This is the state to start with, or a legal byte sequence - (i.e. a valid code point) for character has been identified. - - ME state: This indicates that the state machine identified a byte sequence - that is specific to the charset it is designed for and that - there is no other possible encoding which can contain this byte - sequence. This will to lead to an immediate positive answer for - the detector. - - ERROR state: This indicates the state machine identified an illegal byte - sequence for that encoding. This will lead to an immediate - negative answer for this encoding. Detector will exclude this - encoding from consideration from here on. - """ - def __init__(self, sm): - self._model = sm - self._curr_byte_pos = 0 - self._curr_char_len = 0 - self._curr_state = None - self.logger = logging.getLogger(__name__) - self.reset() - - def reset(self): - self._curr_state = MachineState.START - - def next_state(self, c): - # for each byte we get its class - # if it is first byte, we also get byte length - byte_class = self._model['class_table'][c] - if self._curr_state == MachineState.START: - self._curr_byte_pos = 0 - self._curr_char_len = self._model['char_len_table'][byte_class] - # from byte's class and state_table, we get its next state - curr_state = (self._curr_state * self._model['class_factor'] - + byte_class) - self._curr_state = self._model['state_table'][curr_state] - self._curr_byte_pos += 1 - return self._curr_state - - def get_current_charlen(self): - return self._curr_char_len - - def get_coding_state_machine(self): - return self._model['name'] - - @property - def language(self): - return self._model['language'] diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/compat.py deleted file mode 100644 index ddd7468..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/compat.py +++ /dev/null @@ -1,34 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# Contributor(s): -# Dan Blanchard -# Ian Cordasco -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -import sys - - -if sys.version_info < (3, 0): - PY2 = True - PY3 = False - base_str = (str, unicode) - text_type = unicode -else: - PY2 = False - PY3 = True - base_str = (bytes, str) - text_type = str diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cp949prober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cp949prober.py deleted file mode 100644 index efd793a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/cp949prober.py +++ /dev/null @@ -1,49 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import EUCKRDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import CP949_SM_MODEL - - -class CP949Prober(MultiByteCharSetProber): - def __init__(self): - super(CP949Prober, self).__init__() - self.coding_sm = CodingStateMachine(CP949_SM_MODEL) - # NOTE: CP949 is a superset of EUC-KR, so the distribution should be - # not different. - self.distribution_analyzer = EUCKRDistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "CP949" - - @property - def language(self): - return "Korean" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/enums.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/enums.py deleted file mode 100644 index 0451207..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/enums.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -All of the Enums that are used throughout the chardet package. - -:author: Dan Blanchard (dan.blanchard@gmail.com) -""" - - -class InputState(object): - """ - This enum represents the different states a universal detector can be in. - """ - PURE_ASCII = 0 - ESC_ASCII = 1 - HIGH_BYTE = 2 - - -class LanguageFilter(object): - """ - This enum represents the different language filters we can apply to a - ``UniversalDetector``. - """ - CHINESE_SIMPLIFIED = 0x01 - CHINESE_TRADITIONAL = 0x02 - JAPANESE = 0x04 - KOREAN = 0x08 - NON_CJK = 0x10 - ALL = 0x1F - CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL - CJK = CHINESE | JAPANESE | KOREAN - - -class ProbingState(object): - """ - This enum represents the different states a prober can be in. - """ - DETECTING = 0 - FOUND_IT = 1 - NOT_ME = 2 - - -class MachineState(object): - """ - This enum represents the different states a state machine can be in. - """ - START = 0 - ERROR = 1 - ITS_ME = 2 - - -class SequenceLikelihood(object): - """ - This enum represents the likelihood of a character following the previous one. - """ - NEGATIVE = 0 - UNLIKELY = 1 - LIKELY = 2 - POSITIVE = 3 - - @classmethod - def get_num_categories(cls): - """:returns: The number of likelihood categories in the enum.""" - return 4 - - -class CharacterCategory(object): - """ - This enum represents the different categories language models for - ``SingleByteCharsetProber`` put characters into. - - Anything less than CONTROL is considered a letter. - """ - UNDEFINED = 255 - LINE_BREAK = 254 - SYMBOL = 253 - DIGIT = 252 - CONTROL = 251 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/escprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/escprober.py deleted file mode 100644 index c70493f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/escprober.py +++ /dev/null @@ -1,101 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .codingstatemachine import CodingStateMachine -from .enums import LanguageFilter, ProbingState, MachineState -from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL, - ISO2022KR_SM_MODEL) - - -class EscCharSetProber(CharSetProber): - """ - This CharSetProber uses a "code scheme" approach for detecting encodings, - whereby easily recognizable escape or shift sequences are relied on to - identify these encodings. - """ - - def __init__(self, lang_filter=None): - super(EscCharSetProber, self).__init__(lang_filter=lang_filter) - self.coding_sm = [] - if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED: - self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL)) - self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL)) - if self.lang_filter & LanguageFilter.JAPANESE: - self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL)) - if self.lang_filter & LanguageFilter.KOREAN: - self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL)) - self.active_sm_count = None - self._detected_charset = None - self._detected_language = None - self._state = None - self.reset() - - def reset(self): - super(EscCharSetProber, self).reset() - for coding_sm in self.coding_sm: - if not coding_sm: - continue - coding_sm.active = True - coding_sm.reset() - self.active_sm_count = len(self.coding_sm) - self._detected_charset = None - self._detected_language = None - - @property - def charset_name(self): - return self._detected_charset - - @property - def language(self): - return self._detected_language - - def get_confidence(self): - if self._detected_charset: - return 0.99 - else: - return 0.00 - - def feed(self, byte_str): - for c in byte_str: - for coding_sm in self.coding_sm: - if not coding_sm or not coding_sm.active: - continue - coding_state = coding_sm.next_state(c) - if coding_state == MachineState.ERROR: - coding_sm.active = False - self.active_sm_count -= 1 - if self.active_sm_count <= 0: - self._state = ProbingState.NOT_ME - return self.state - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - self._detected_charset = coding_sm.get_coding_state_machine() - self._detected_language = coding_sm.language - return self.state - - return self.state diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/escsm.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/escsm.py deleted file mode 100644 index 0069523..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/escsm.py +++ /dev/null @@ -1,246 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .enums import MachineState - -HZ_CLS = ( -1,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,0,0,0,0, # 20 - 27 -0,0,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,0,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,4,0,5,2,0, # 78 - 7f -1,1,1,1,1,1,1,1, # 80 - 87 -1,1,1,1,1,1,1,1, # 88 - 8f -1,1,1,1,1,1,1,1, # 90 - 97 -1,1,1,1,1,1,1,1, # 98 - 9f -1,1,1,1,1,1,1,1, # a0 - a7 -1,1,1,1,1,1,1,1, # a8 - af -1,1,1,1,1,1,1,1, # b0 - b7 -1,1,1,1,1,1,1,1, # b8 - bf -1,1,1,1,1,1,1,1, # c0 - c7 -1,1,1,1,1,1,1,1, # c8 - cf -1,1,1,1,1,1,1,1, # d0 - d7 -1,1,1,1,1,1,1,1, # d8 - df -1,1,1,1,1,1,1,1, # e0 - e7 -1,1,1,1,1,1,1,1, # e8 - ef -1,1,1,1,1,1,1,1, # f0 - f7 -1,1,1,1,1,1,1,1, # f8 - ff -) - -HZ_ST = ( -MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17 - 5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f - 4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27 - 4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f -) - -HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) - -HZ_SM_MODEL = {'class_table': HZ_CLS, - 'class_factor': 6, - 'state_table': HZ_ST, - 'char_len_table': HZ_CHAR_LEN_TABLE, - 'name': "HZ-GB-2312", - 'language': 'Chinese'} - -ISO2022CN_CLS = ( -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,0,0,0,0, # 20 - 27 -0,3,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,4,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff -) - -ISO2022CN_ST = ( -MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 -MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f -MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27 - 5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f -) - -ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0) - -ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS, - 'class_factor': 9, - 'state_table': ISO2022CN_ST, - 'char_len_table': ISO2022CN_CHAR_LEN_TABLE, - 'name': "ISO-2022-CN", - 'language': 'Chinese'} - -ISO2022JP_CLS = ( -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,2,2, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,7,0,0,0, # 20 - 27 -3,0,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -6,0,4,0,8,0,0,0, # 40 - 47 -0,9,5,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff -) - -ISO2022JP_ST = ( -MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 -MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f -MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47 -) - -ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - -ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS, - 'class_factor': 10, - 'state_table': ISO2022JP_ST, - 'char_len_table': ISO2022JP_CHAR_LEN_TABLE, - 'name': "ISO-2022-JP", - 'language': 'Japanese'} - -ISO2022KR_CLS = ( -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,3,0,0,0, # 20 - 27 -0,4,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,5,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff -) - -ISO2022KR_ST = ( -MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27 -) - -ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) - -ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS, - 'class_factor': 6, - 'state_table': ISO2022KR_ST, - 'char_len_table': ISO2022KR_CHAR_LEN_TABLE, - 'name': "ISO-2022-KR", - 'language': 'Korean'} - - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/eucjpprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/eucjpprober.py deleted file mode 100644 index 20ce8f7..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/eucjpprober.py +++ /dev/null @@ -1,92 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .enums import ProbingState, MachineState -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine -from .chardistribution import EUCJPDistributionAnalysis -from .jpcntx import EUCJPContextAnalysis -from .mbcssm import EUCJP_SM_MODEL - - -class EUCJPProber(MultiByteCharSetProber): - def __init__(self): - super(EUCJPProber, self).__init__() - self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL) - self.distribution_analyzer = EUCJPDistributionAnalysis() - self.context_analyzer = EUCJPContextAnalysis() - self.reset() - - def reset(self): - super(EUCJPProber, self).reset() - self.context_analyzer.reset() - - @property - def charset_name(self): - return "EUC-JP" - - @property - def language(self): - return "Japanese" - - def feed(self, byte_str): - for i in range(len(byte_str)): - # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte - coding_state = self.coding_sm.next_state(byte_str[i]) - if coding_state == MachineState.ERROR: - self.logger.debug('%s %s prober hit error at byte %s', - self.charset_name, self.language, i) - self._state = ProbingState.NOT_ME - break - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - elif coding_state == MachineState.START: - char_len = self.coding_sm.get_current_charlen() - if i == 0: - self._last_char[1] = byte_str[0] - self.context_analyzer.feed(self._last_char, char_len) - self.distribution_analyzer.feed(self._last_char, char_len) - else: - self.context_analyzer.feed(byte_str[i - 1:i + 1], - char_len) - self.distribution_analyzer.feed(byte_str[i - 1:i + 1], - char_len) - - self._last_char[0] = byte_str[-1] - - if self.state == ProbingState.DETECTING: - if (self.context_analyzer.got_enough_data() and - (self.get_confidence() > self.SHORTCUT_THRESHOLD)): - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self): - context_conf = self.context_analyzer.get_confidence() - distrib_conf = self.distribution_analyzer.get_confidence() - return max(context_conf, distrib_conf) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euckrfreq.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euckrfreq.py deleted file mode 100644 index b68078c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euckrfreq.py +++ /dev/null @@ -1,195 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# Sampling from about 20M text materials include literature and computer technology - -# 128 --> 0.79 -# 256 --> 0.92 -# 512 --> 0.986 -# 1024 --> 0.99944 -# 2048 --> 0.99999 -# -# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24 -# Random Distribution Ration = 512 / (2350-512) = 0.279. -# -# Typical Distribution Ratio - -EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0 - -EUCKR_TABLE_SIZE = 2352 - -# Char to FreqOrder table , -EUCKR_CHAR_TO_FREQ_ORDER = ( - 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87, -1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398, -1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734, - 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739, - 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622, - 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750, -1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856, - 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205, - 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779, -1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19, -1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567, -1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797, -1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802, -1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899, - 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818, -1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409, -1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697, -1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770, -1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723, - 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416, -1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300, - 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083, - 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857, -1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871, - 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420, -1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885, - 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889, - 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893, -1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317, -1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841, -1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910, -1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610, - 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375, -1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939, - 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870, - 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934, -1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888, -1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950, -1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065, -1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002, -1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965, -1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467, - 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285, - 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7, - 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979, -1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985, - 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994, -1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250, - 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824, - 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003, -2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745, - 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61, - 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023, -2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032, -2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912, -2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224, - 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012, - 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050, -2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681, - 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414, -1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068, -2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075, -1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850, -2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606, -2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449, -1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452, - 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112, -2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121, -2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130, - 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274, - 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139, -2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721, -1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298, -2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463, -2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747, -2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285, -2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187, -2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10, -2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350, -1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201, -2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972, -2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219, -2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233, -2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242, -2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247, -1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178, -1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255, -2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259, -1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262, -2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702, -1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273, - 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541, -2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117, - 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187, -2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800, - 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312, -2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229, -2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315, - 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484, -2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170, -1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335, - 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601, -1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395, -2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354, -1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476, -2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035, - 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498, -2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310, -1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389, -2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504, -1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505, -2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145, -1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624, - 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700, -2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221, -2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377, - 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448, - 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485, -1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705, -1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465, - 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471, -2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997, -2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486, - 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494, - 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771, - 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323, -2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491, - 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510, - 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519, -2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532, -2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199, - 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544, -2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247, -1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441, - 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562, -2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362, -2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583, -2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465, - 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431, - 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151, - 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596, -2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406, -2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611, -2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619, -1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628, -2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042, - 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256 -) - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euckrprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euckrprober.py deleted file mode 100644 index 345a060..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euckrprober.py +++ /dev/null @@ -1,47 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine -from .chardistribution import EUCKRDistributionAnalysis -from .mbcssm import EUCKR_SM_MODEL - - -class EUCKRProber(MultiByteCharSetProber): - def __init__(self): - super(EUCKRProber, self).__init__() - self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) - self.distribution_analyzer = EUCKRDistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "EUC-KR" - - @property - def language(self): - return "Korean" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euctwfreq.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euctwfreq.py deleted file mode 100644 index ed7a995..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euctwfreq.py +++ /dev/null @@ -1,387 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# EUCTW frequency table -# Converted from big5 work -# by Taiwan's Mandarin Promotion Council -# <http:#www.edu.tw:81/mandr/> - -# 128 --> 0.42261 -# 256 --> 0.57851 -# 512 --> 0.74851 -# 1024 --> 0.89384 -# 2048 --> 0.97583 -# -# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 -# Random Distribution Ration = 512/(5401-512)=0.105 -# -# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR - -EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 - -# Char to FreqOrder table , -EUCTW_TABLE_SIZE = 5376 - -EUCTW_CHAR_TO_FREQ_ORDER = ( - 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 -3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 -1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 - 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 -3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 -4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 -7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 - 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 - 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 - 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 -2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 -1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 -3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 - 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 -1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 -3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 -2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 - 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 -3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 -1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 -7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 - 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 -7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 -1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 - 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 - 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 -3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 -3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 - 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 -2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 -2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 - 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 - 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 -3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 -1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 -1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 -1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 -2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 - 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 -4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 -1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 -7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 -2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 - 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 - 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 - 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 - 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 -7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 - 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 -1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 - 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 - 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 -7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 -1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 - 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 -3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 -4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 -3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 - 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 - 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 -1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 -4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 -3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 -3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 -2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 -7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 -3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 -7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 -1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 -2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 -1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 - 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 -1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 -4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 -3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 - 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 - 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 - 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 -2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 -7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 -1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 -2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 -1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 -1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 -7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 -7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 -7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 -3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 -4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 -1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 -7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 -2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 -7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 -3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 -3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 -7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 -2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 -7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 - 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 -4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 -2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 -7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 -3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 -2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 -2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 - 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 -2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 -1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 -1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 -2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 -1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 -7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 -7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 -2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 -4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 -1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 -7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 - 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 -4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 - 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 -2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 - 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 -1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 -1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 - 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 -3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 -3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 -1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 -3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 -7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 -7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 -1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 -2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 -1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 -3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 -2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 -3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 -2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 -4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 -4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 -3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 - 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 -3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 - 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 -3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 -3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 -3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 -1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 -7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 - 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 -7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 -1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 - 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 -4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 -3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 - 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 -2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 -2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 -3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 -1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 -4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 -2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 -1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 -1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 -2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 -3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 -1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 -7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 -1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 -4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 -1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 - 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 -1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 -3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 -3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 -2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 -1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 -4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 - 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 -7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 -2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 -3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 -4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 - 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 -7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 -7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 -1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 -4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 -3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 -2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 -3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 -3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 -2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 -1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 -4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 -3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 -3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 -2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 -4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 -7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 -3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 -2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 -3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 -1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 -2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 -3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 -4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 -2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 -2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 -7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 -1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 -2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 -1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 -3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 -4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 -2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 -3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 -3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 -2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 -4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 -2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 -3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 -4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 -7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 -3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 - 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 -1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 -4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 -1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 -4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 -7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 - 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 -7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 -2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 -1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 -1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 -3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 - 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 - 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 - 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 -3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 -2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 - 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 -7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 -1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 -3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 -7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 -1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 -7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 -4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 -1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 -2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 -2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 -4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 - 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 - 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 -3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 -3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 -1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 -2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 -7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 -1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 -1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 -3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 - 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 -1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 -4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 -7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 -2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 -3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 - 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 -1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 -2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 -2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 -7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 -7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 -7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 -2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 -2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 -1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 -4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 -3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 -3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 -4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 -4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 -2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 -2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 -7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 -4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 -7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 -2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 -1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 -3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 -4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 -2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 - 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 -2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 -1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 -2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 -2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 -4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 -7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 -1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 -3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 -7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 -1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 -8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 -2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 -8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 -2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 -2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 -8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 -8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 -8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 - 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 -8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 -4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 -3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 -8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 -1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 -8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 - 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 -1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 - 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 -4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 -1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 -4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 -1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 - 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 -3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 -4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 -8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 - 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 -3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 - 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 -2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 -) - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euctwprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euctwprober.py deleted file mode 100644 index 35669cc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/euctwprober.py +++ /dev/null @@ -1,46 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine -from .chardistribution import EUCTWDistributionAnalysis -from .mbcssm import EUCTW_SM_MODEL - -class EUCTWProber(MultiByteCharSetProber): - def __init__(self): - super(EUCTWProber, self).__init__() - self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) - self.distribution_analyzer = EUCTWDistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "EUC-TW" - - @property - def language(self): - return "Taiwan" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/gb2312freq.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/gb2312freq.py deleted file mode 100644 index 697837b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/gb2312freq.py +++ /dev/null @@ -1,283 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# GB2312 most frequently used character table -# -# Char to FreqOrder table , from hz6763 - -# 512 --> 0.79 -- 0.79 -# 1024 --> 0.92 -- 0.13 -# 2048 --> 0.98 -- 0.06 -# 6768 --> 1.00 -- 0.02 -# -# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79 -# Random Distribution Ration = 512 / (3755 - 512) = 0.157 -# -# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR - -GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9 - -GB2312_TABLE_SIZE = 3760 - -GB2312_CHAR_TO_FREQ_ORDER = ( -1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205, -2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842, -2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409, - 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670, -1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820, -1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585, - 152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566, -1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575, -2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853, -3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061, - 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155, -1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406, - 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816, -2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606, - 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023, -2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414, -1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513, -3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052, - 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570, -1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575, - 253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250, -2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506, -1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26, -3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835, -1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686, -2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054, -1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894, - 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105, -3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403, -3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694, - 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873, -3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940, - 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121, -1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648, -3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992, -2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233, -1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157, - 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807, -1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094, -4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258, - 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478, -3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152, -3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909, - 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272, -1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221, -2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252, -1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301, -1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254, - 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070, -3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461, -3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360, -4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124, - 296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535, -3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243, -1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713, -1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071, -4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442, - 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946, - 814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257, -3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180, -1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427, - 602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781, -1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724, -2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937, - 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943, - 432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789, - 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552, -3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246, -4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451, -3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310, - 750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860, -2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297, -2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780, -2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745, - 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936, -2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032, - 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657, - 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414, - 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976, -3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436, -2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254, -2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536, -1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238, - 18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059, -2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741, - 90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447, - 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601, -1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269, -1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894, - 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173, - 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994, -1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956, -2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437, -3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154, -2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240, -2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143, -2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634, -3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472, -1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541, -1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143, -2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312, -1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414, -3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754, -1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424, -1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302, -3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739, - 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004, -2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484, -1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739, -4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535, -1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641, -1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307, -3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573, -1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533, - 47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965, - 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99, -1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280, - 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505, -1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012, -1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039, - 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982, -3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530, -4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392, -3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656, -2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220, -2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766, -1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535, -3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728, -2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338, -1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627, -1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885, - 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411, -2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671, -2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162, -3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774, -4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524, -3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346, - 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040, -3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188, -2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280, -1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131, - 259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947, - 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970, -3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814, -4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557, -2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997, -1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972, -1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369, - 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376, -1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480, -3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610, - 955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128, - 642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769, -1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207, - 57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392, -1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623, - 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782, -2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650, - 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478, -2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773, -2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007, -1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323, -1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598, -2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961, - 819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302, -1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409, -1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683, -2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191, -2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616, -3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302, -1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774, -4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147, - 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731, - 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464, -3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377, -1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315, - 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557, -3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903, -1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060, -4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261, -1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092, -2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810, -1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708, - 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658, -1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871, -3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503, - 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229, -2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112, - 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504, -1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389, -1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27, -1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542, -3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861, -2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845, -3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700, -3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469, -3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582, - 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999, -2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274, - 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020, -2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601, - 12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628, -1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31, - 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668, - 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778, -1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169, -3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667, -3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881, -1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276, -1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320, -3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751, -2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432, -2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772, -1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843, -3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116, - 451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904, -4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652, -1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664, -2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770, -3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283, -3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626, -1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713, - 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333, - 391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062, -2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555, - 931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014, -1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510, - 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015, -1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459, -1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390, -1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238, -1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232, -1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624, - 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189, - 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512 -) - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/gb2312prober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/gb2312prober.py deleted file mode 100644 index 8446d2d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/gb2312prober.py +++ /dev/null @@ -1,46 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine -from .chardistribution import GB2312DistributionAnalysis -from .mbcssm import GB2312_SM_MODEL - -class GB2312Prober(MultiByteCharSetProber): - def __init__(self): - super(GB2312Prober, self).__init__() - self.coding_sm = CodingStateMachine(GB2312_SM_MODEL) - self.distribution_analyzer = GB2312DistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "GB2312" - - @property - def language(self): - return "Chinese" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/hebrewprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/hebrewprober.py deleted file mode 100644 index b0e1bf4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/hebrewprober.py +++ /dev/null @@ -1,292 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Shy Shalom -# Portions created by the Initial Developer are Copyright (C) 2005 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .enums import ProbingState - -# This prober doesn't actually recognize a language or a charset. -# It is a helper prober for the use of the Hebrew model probers - -### General ideas of the Hebrew charset recognition ### -# -# Four main charsets exist in Hebrew: -# "ISO-8859-8" - Visual Hebrew -# "windows-1255" - Logical Hebrew -# "ISO-8859-8-I" - Logical Hebrew -# "x-mac-hebrew" - ?? Logical Hebrew ?? -# -# Both "ISO" charsets use a completely identical set of code points, whereas -# "windows-1255" and "x-mac-hebrew" are two different proper supersets of -# these code points. windows-1255 defines additional characters in the range -# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific -# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. -# x-mac-hebrew defines similar additional code points but with a different -# mapping. -# -# As far as an average Hebrew text with no diacritics is concerned, all four -# charsets are identical with respect to code points. Meaning that for the -# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters -# (including final letters). -# -# The dominant difference between these charsets is their directionality. -# "Visual" directionality means that the text is ordered as if the renderer is -# not aware of a BIDI rendering algorithm. The renderer sees the text and -# draws it from left to right. The text itself when ordered naturally is read -# backwards. A buffer of Visual Hebrew generally looks like so: -# "[last word of first line spelled backwards] [whole line ordered backwards -# and spelled backwards] [first word of first line spelled backwards] -# [end of line] [last word of second line] ... etc' " -# adding punctuation marks, numbers and English text to visual text is -# naturally also "visual" and from left to right. -# -# "Logical" directionality means the text is ordered "naturally" according to -# the order it is read. It is the responsibility of the renderer to display -# the text from right to left. A BIDI algorithm is used to place general -# punctuation marks, numbers and English text in the text. -# -# Texts in x-mac-hebrew are almost impossible to find on the Internet. From -# what little evidence I could find, it seems that its general directionality -# is Logical. -# -# To sum up all of the above, the Hebrew probing mechanism knows about two -# charsets: -# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are -# backwards while line order is natural. For charset recognition purposes -# the line order is unimportant (In fact, for this implementation, even -# word order is unimportant). -# Logical Hebrew - "windows-1255" - normal, naturally ordered text. -# -# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be -# specifically identified. -# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew -# that contain special punctuation marks or diacritics is displayed with -# some unconverted characters showing as question marks. This problem might -# be corrected using another model prober for x-mac-hebrew. Due to the fact -# that x-mac-hebrew texts are so rare, writing another model prober isn't -# worth the effort and performance hit. -# -#### The Prober #### -# -# The prober is divided between two SBCharSetProbers and a HebrewProber, -# all of which are managed, created, fed data, inquired and deleted by the -# SBCSGroupProber. The two SBCharSetProbers identify that the text is in -# fact some kind of Hebrew, Logical or Visual. The final decision about which -# one is it is made by the HebrewProber by combining final-letter scores -# with the scores of the two SBCharSetProbers to produce a final answer. -# -# The SBCSGroupProber is responsible for stripping the original text of HTML -# tags, English characters, numbers, low-ASCII punctuation characters, spaces -# and new lines. It reduces any sequence of such characters to a single space. -# The buffer fed to each prober in the SBCS group prober is pure text in -# high-ASCII. -# The two SBCharSetProbers (model probers) share the same language model: -# Win1255Model. -# The first SBCharSetProber uses the model normally as any other -# SBCharSetProber does, to recognize windows-1255, upon which this model was -# built. The second SBCharSetProber is told to make the pair-of-letter -# lookup in the language model backwards. This in practice exactly simulates -# a visual Hebrew model using the windows-1255 logical Hebrew model. -# -# The HebrewProber is not using any language model. All it does is look for -# final-letter evidence suggesting the text is either logical Hebrew or visual -# Hebrew. Disjointed from the model probers, the results of the HebrewProber -# alone are meaningless. HebrewProber always returns 0.00 as confidence -# since it never identifies a charset by itself. Instead, the pointer to the -# HebrewProber is passed to the model probers as a helper "Name Prober". -# When the Group prober receives a positive identification from any prober, -# it asks for the name of the charset identified. If the prober queried is a -# Hebrew model prober, the model prober forwards the call to the -# HebrewProber to make the final decision. In the HebrewProber, the -# decision is made according to the final-letters scores maintained and Both -# model probers scores. The answer is returned in the form of the name of the -# charset identified, either "windows-1255" or "ISO-8859-8". - -class HebrewProber(CharSetProber): - # windows-1255 / ISO-8859-8 code points of interest - FINAL_KAF = 0xea - NORMAL_KAF = 0xeb - FINAL_MEM = 0xed - NORMAL_MEM = 0xee - FINAL_NUN = 0xef - NORMAL_NUN = 0xf0 - FINAL_PE = 0xf3 - NORMAL_PE = 0xf4 - FINAL_TSADI = 0xf5 - NORMAL_TSADI = 0xf6 - - # Minimum Visual vs Logical final letter score difference. - # If the difference is below this, don't rely solely on the final letter score - # distance. - MIN_FINAL_CHAR_DISTANCE = 5 - - # Minimum Visual vs Logical model score difference. - # If the difference is below this, don't rely at all on the model score - # distance. - MIN_MODEL_DISTANCE = 0.01 - - VISUAL_HEBREW_NAME = "ISO-8859-8" - LOGICAL_HEBREW_NAME = "windows-1255" - - def __init__(self): - super(HebrewProber, self).__init__() - self._final_char_logical_score = None - self._final_char_visual_score = None - self._prev = None - self._before_prev = None - self._logical_prober = None - self._visual_prober = None - self.reset() - - def reset(self): - self._final_char_logical_score = 0 - self._final_char_visual_score = 0 - # The two last characters seen in the previous buffer, - # mPrev and mBeforePrev are initialized to space in order to simulate - # a word delimiter at the beginning of the data - self._prev = ' ' - self._before_prev = ' ' - # These probers are owned by the group prober. - - def set_model_probers(self, logicalProber, visualProber): - self._logical_prober = logicalProber - self._visual_prober = visualProber - - def is_final(self, c): - return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, - self.FINAL_PE, self.FINAL_TSADI] - - def is_non_final(self, c): - # The normal Tsadi is not a good Non-Final letter due to words like - # 'lechotet' (to chat) containing an apostrophe after the tsadi. This - # apostrophe is converted to a space in FilterWithoutEnglishLetters - # causing the Non-Final tsadi to appear at an end of a word even - # though this is not the case in the original text. - # The letters Pe and Kaf rarely display a related behavior of not being - # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' - # for example legally end with a Non-Final Pe or Kaf. However, the - # benefit of these letters as Non-Final letters outweighs the damage - # since these words are quite rare. - return c in [self.NORMAL_KAF, self.NORMAL_MEM, - self.NORMAL_NUN, self.NORMAL_PE] - - def feed(self, byte_str): - # Final letter analysis for logical-visual decision. - # Look for evidence that the received buffer is either logical Hebrew - # or visual Hebrew. - # The following cases are checked: - # 1) A word longer than 1 letter, ending with a final letter. This is - # an indication that the text is laid out "naturally" since the - # final letter really appears at the end. +1 for logical score. - # 2) A word longer than 1 letter, ending with a Non-Final letter. In - # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, - # should not end with the Non-Final form of that letter. Exceptions - # to this rule are mentioned above in isNonFinal(). This is an - # indication that the text is laid out backwards. +1 for visual - # score - # 3) A word longer than 1 letter, starting with a final letter. Final - # letters should not appear at the beginning of a word. This is an - # indication that the text is laid out backwards. +1 for visual - # score. - # - # The visual score and logical score are accumulated throughout the - # text and are finally checked against each other in GetCharSetName(). - # No checking for final letters in the middle of words is done since - # that case is not an indication for either Logical or Visual text. - # - # We automatically filter out all 7-bit characters (replace them with - # spaces) so the word boundary detection works properly. [MAP] - - if self.state == ProbingState.NOT_ME: - # Both model probers say it's not them. No reason to continue. - return ProbingState.NOT_ME - - byte_str = self.filter_high_byte_only(byte_str) - - for cur in byte_str: - if cur == ' ': - # We stand on a space - a word just ended - if self._before_prev != ' ': - # next-to-last char was not a space so self._prev is not a - # 1 letter word - if self.is_final(self._prev): - # case (1) [-2:not space][-1:final letter][cur:space] - self._final_char_logical_score += 1 - elif self.is_non_final(self._prev): - # case (2) [-2:not space][-1:Non-Final letter][ - # cur:space] - self._final_char_visual_score += 1 - else: - # Not standing on a space - if ((self._before_prev == ' ') and - (self.is_final(self._prev)) and (cur != ' ')): - # case (3) [-2:space][-1:final letter][cur:not space] - self._final_char_visual_score += 1 - self._before_prev = self._prev - self._prev = cur - - # Forever detecting, till the end or until both model probers return - # ProbingState.NOT_ME (handled above) - return ProbingState.DETECTING - - @property - def charset_name(self): - # Make the decision: is it Logical or Visual? - # If the final letter score distance is dominant enough, rely on it. - finalsub = self._final_char_logical_score - self._final_char_visual_score - if finalsub >= self.MIN_FINAL_CHAR_DISTANCE: - return self.LOGICAL_HEBREW_NAME - if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE: - return self.VISUAL_HEBREW_NAME - - # It's not dominant enough, try to rely on the model scores instead. - modelsub = (self._logical_prober.get_confidence() - - self._visual_prober.get_confidence()) - if modelsub > self.MIN_MODEL_DISTANCE: - return self.LOGICAL_HEBREW_NAME - if modelsub < -self.MIN_MODEL_DISTANCE: - return self.VISUAL_HEBREW_NAME - - # Still no good, back to final letter distance, maybe it'll save the - # day. - if finalsub < 0.0: - return self.VISUAL_HEBREW_NAME - - # (finalsub > 0 - Logical) or (don't know what to do) default to - # Logical. - return self.LOGICAL_HEBREW_NAME - - @property - def language(self): - return 'Hebrew' - - @property - def state(self): - # Remain active as long as any of the model probers are active. - if (self._logical_prober.state == ProbingState.NOT_ME) and \ - (self._visual_prober.state == ProbingState.NOT_ME): - return ProbingState.NOT_ME - return ProbingState.DETECTING diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/jisfreq.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/jisfreq.py deleted file mode 100644 index 83fc082..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/jisfreq.py +++ /dev/null @@ -1,325 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# Sampling from about 20M text materials include literature and computer technology -# -# Japanese frequency table, applied to both S-JIS and EUC-JP -# They are sorted in order. - -# 128 --> 0.77094 -# 256 --> 0.85710 -# 512 --> 0.92635 -# 1024 --> 0.97130 -# 2048 --> 0.99431 -# -# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58 -# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191 -# -# Typical Distribution Ratio, 25% of IDR - -JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0 - -# Char to FreqOrder table , -JIS_TABLE_SIZE = 4368 - -JIS_CHAR_TO_FREQ_ORDER = ( - 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 -3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 -1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48 -2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64 -2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80 -5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96 -1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112 -5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128 -5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144 -5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160 -5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176 -5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192 -5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208 -1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224 -1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240 -1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256 -2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272 -3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288 -3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304 - 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320 - 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336 -1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352 - 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368 -5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384 - 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400 - 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416 - 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432 - 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448 - 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464 -5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480 -5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496 -5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512 -4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528 -5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544 -5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560 -5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576 -5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592 -5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608 -5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624 -5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640 -5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656 -5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672 -3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688 -5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704 -5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720 -5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736 -5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752 -5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768 -5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784 -5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800 -5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816 -5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832 -5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848 -5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864 -5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880 -5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896 -5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912 -5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928 -5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944 -5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960 -5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976 -5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992 -5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008 -5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024 -5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040 -5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056 -5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072 -5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088 -5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104 -5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120 -5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136 -5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152 -5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168 -5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184 -5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200 -5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216 -5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232 -5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248 -5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264 -5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280 -5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296 -6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312 -6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328 -6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344 -6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360 -6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376 -6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392 -6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408 -6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424 -4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440 - 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456 - 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472 -1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488 -1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504 - 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520 -3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536 -3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552 - 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568 -3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584 -3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600 - 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616 -2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632 - 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648 -3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664 -1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680 - 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696 -1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712 - 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728 -2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744 -2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760 -2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776 -2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792 -1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808 -1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824 -1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840 -1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856 -2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872 -1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888 -2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904 -1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920 -1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936 -1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952 -1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968 -1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984 -1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000 - 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016 - 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032 -1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048 -2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064 -2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080 -2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096 -3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112 -3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128 - 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144 -3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160 -1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176 - 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192 -2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208 -1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224 - 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240 -3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256 -4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272 -2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288 -1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304 -2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320 -1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336 - 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352 - 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368 -1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384 -2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400 -2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416 -2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432 -3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448 -1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464 -2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480 - 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496 - 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512 - 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528 -1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544 -2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560 - 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576 -1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592 -1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608 - 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624 -1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640 -1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656 -1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672 - 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688 -2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704 - 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720 -2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736 -3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752 -2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768 -1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784 -6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800 -1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816 -2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832 -1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848 - 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864 - 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880 -3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896 -3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912 -1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928 -1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944 -1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960 -1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976 - 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992 - 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008 -2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024 - 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040 -3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056 -2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072 - 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088 -1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104 -2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120 - 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136 -1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152 - 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168 -4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184 -2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200 -1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216 - 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232 -1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248 -2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264 - 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280 -6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296 -1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312 -1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328 -2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344 -3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360 - 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376 -3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392 -1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408 - 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424 -1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440 - 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456 -3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472 - 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488 -2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504 - 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520 -4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536 -2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552 -1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568 -1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584 -1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600 - 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616 -1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632 -3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648 -1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664 -3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680 - 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696 - 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712 - 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728 -2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744 -1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760 - 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776 -1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792 - 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808 -1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824 - 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840 - 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856 - 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872 -1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888 -1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904 -2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920 -4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936 - 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952 -1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968 - 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984 -1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000 -3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016 -1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032 -2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048 -2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064 -1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080 -1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096 -2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112 - 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128 -2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144 -1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160 -1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176 -1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192 -1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208 -3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224 -2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240 -2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256 - 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272 -3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288 -3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304 -1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320 -2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336 -1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352 -2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512 -) - - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/jpcntx.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/jpcntx.py deleted file mode 100644 index 20044e4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/jpcntx.py +++ /dev/null @@ -1,233 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - - -# This is hiragana 2-char sequence table, the number in each cell represents its frequency category -jp2CharContext = ( -(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), -(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), -(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), -(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), -(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), -(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), -(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), -(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), -(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), -(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), -(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), -(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), -(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), -(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), -(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), -(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), -(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), -(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), -(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), -(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), -(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), -(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), -(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), -(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), -(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), -(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), -(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), -(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), -(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), -(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), -(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), -(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), -(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), -(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), -(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), -(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), -(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), -(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), -(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), -(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), -(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), -(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), -(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), -(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), -(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), -(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), -(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), -(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), -(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), -(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), -(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), -(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), -(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), -(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), -(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), -(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), -(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), -(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), -(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), -(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), -(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), -(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), -(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), -(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), -(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), -(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), -(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), -(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), -(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), -(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), -(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), -(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), -(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), -(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), -) - -class JapaneseContextAnalysis(object): - NUM_OF_CATEGORY = 6 - DONT_KNOW = -1 - ENOUGH_REL_THRESHOLD = 100 - MAX_REL_THRESHOLD = 1000 - MINIMUM_DATA_THRESHOLD = 4 - - def __init__(self): - self._total_rel = None - self._rel_sample = None - self._need_to_skip_char_num = None - self._last_char_order = None - self._done = None - self.reset() - - def reset(self): - self._total_rel = 0 # total sequence received - # category counters, each integer counts sequence in its category - self._rel_sample = [0] * self.NUM_OF_CATEGORY - # if last byte in current buffer is not the last byte of a character, - # we need to know how many bytes to skip in next buffer - self._need_to_skip_char_num = 0 - self._last_char_order = -1 # The order of previous char - # If this flag is set to True, detection is done and conclusion has - # been made - self._done = False - - def feed(self, byte_str, num_bytes): - if self._done: - return - - # The buffer we got is byte oriented, and a character may span in more than one - # buffers. In case the last one or two byte in last buffer is not - # complete, we record how many byte needed to complete that character - # and skip these bytes here. We can choose to record those bytes as - # well and analyse the character once it is complete, but since a - # character will not make much difference, by simply skipping - # this character will simply our logic and improve performance. - i = self._need_to_skip_char_num - while i < num_bytes: - order, char_len = self.get_order(byte_str[i:i + 2]) - i += char_len - if i > num_bytes: - self._need_to_skip_char_num = i - num_bytes - self._last_char_order = -1 - else: - if (order != -1) and (self._last_char_order != -1): - self._total_rel += 1 - if self._total_rel > self.MAX_REL_THRESHOLD: - self._done = True - break - self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 - self._last_char_order = order - - def got_enough_data(self): - return self._total_rel > self.ENOUGH_REL_THRESHOLD - - def get_confidence(self): - # This is just one way to calculate confidence. It works well for me. - if self._total_rel > self.MINIMUM_DATA_THRESHOLD: - return (self._total_rel - self._rel_sample[0]) / self._total_rel - else: - return self.DONT_KNOW - - def get_order(self, byte_str): - return -1, 1 - -class SJISContextAnalysis(JapaneseContextAnalysis): - def __init__(self): - super(SJISContextAnalysis, self).__init__() - self._charset_name = "SHIFT_JIS" - - @property - def charset_name(self): - return self._charset_name - - def get_order(self, byte_str): - if not byte_str: - return -1, 1 - # find out current char's byte length - first_char = byte_str[0] - if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC): - char_len = 2 - if (first_char == 0x87) or (0xFA <= first_char <= 0xFC): - self._charset_name = "CP932" - else: - char_len = 1 - - # return its order if it is hiragana - if len(byte_str) > 1: - second_char = byte_str[1] - if (first_char == 202) and (0x9F <= second_char <= 0xF1): - return second_char - 0x9F, char_len - - return -1, char_len - -class EUCJPContextAnalysis(JapaneseContextAnalysis): - def get_order(self, byte_str): - if not byte_str: - return -1, 1 - # find out current char's byte length - first_char = byte_str[0] - if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): - char_len = 2 - elif first_char == 0x8F: - char_len = 3 - else: - char_len = 1 - - # return its order if it is hiragana - if len(byte_str) > 1: - second_char = byte_str[1] - if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): - return second_char - 0xA1, char_len - - return -1, char_len - - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langbulgarianmodel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langbulgarianmodel.py deleted file mode 100644 index 2aa4fb2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langbulgarianmodel.py +++ /dev/null @@ -1,228 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# 255: Control characters that usually does not exist in any text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 - -# Character Mapping Table: -# this table is modified base on win1251BulgarianCharToOrderMap, so -# only number <64 is sure valid - -Latin5_BulgarianCharToOrderMap = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40 -110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50 -253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60 -116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70 -194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80 -210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90 - 81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0 - 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0 - 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0 - 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0 - 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0 - 62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0 -) - -win1251BulgarianCharToOrderMap = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40 -110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50 -253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60 -116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70 -206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80 -221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90 - 88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0 - 73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0 - 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0 - 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0 - 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0 - 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0 -) - -# Model Table: -# total sequences: 100% -# first 512 sequences: 96.9392% -# first 1024 sequences:3.0618% -# rest sequences: 0.2992% -# negative sequences: 0.0020% -BulgarianLangModel = ( -0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2, -3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1, -0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0, -0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0, -0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0, -1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0, -0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0, -0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3, -2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1, -3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, -3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2, -1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0, -3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1, -1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0, -2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2, -2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0, -3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2, -1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0, -2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2, -2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0, -3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2, -1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0, -2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2, -2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0, -2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2, -1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0, -2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2, -1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0, -3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2, -1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0, -3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1, -1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0, -2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1, -1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0, -2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2, -1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0, -2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1, -1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0, -3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0, -1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2, -1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1, -2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2, -1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0, -2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2, -1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, -1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1, -0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, -1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2, -1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, -2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1, -1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0, -1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1, -0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, -1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1, -0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1, -0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, -2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0, -1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1, -0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0, -0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, -1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1, -1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0, -1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -) - -Latin5BulgarianModel = { - 'char_to_order_map': Latin5_BulgarianCharToOrderMap, - 'precedence_matrix': BulgarianLangModel, - 'typical_positive_ratio': 0.969392, - 'keep_english_letter': False, - 'charset_name': "ISO-8859-5", - 'language': 'Bulgairan', -} - -Win1251BulgarianModel = { - 'char_to_order_map': win1251BulgarianCharToOrderMap, - 'precedence_matrix': BulgarianLangModel, - 'typical_positive_ratio': 0.969392, - 'keep_english_letter': False, - 'charset_name': "windows-1251", - 'language': 'Bulgarian', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langcyrillicmodel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langcyrillicmodel.py deleted file mode 100644 index e5f9a1f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langcyrillicmodel.py +++ /dev/null @@ -1,333 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# KOI8-R language model -# Character Mapping Table: -KOI8R_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 -155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 -253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 - 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 -191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80 -207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90 -223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0 -238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0 - 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0 - 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0 - 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0 - 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 -) - -win1251_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 -155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 -253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 - 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 -191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, -207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, -223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, -239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253, - 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, - 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, - 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, - 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, -) - -latin5_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 -155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 -253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 - 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 -191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, -207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, -223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, - 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, - 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, - 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, - 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, -239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, -) - -macCyrillic_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 -155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 -253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 - 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 - 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, - 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, -191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, -207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, -223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, -239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16, - 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, - 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255, -) - -IBM855_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 -155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 -253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 - 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 -191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205, -206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70, - 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219, -220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229, -230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243, - 8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248, - 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249, -250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255, -) - -IBM866_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 -155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 -253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 - 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 - 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, - 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, - 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, -191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, -207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, -223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, - 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, -239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, -) - -# Model Table: -# total sequences: 100% -# first 512 sequences: 97.6601% -# first 1024 sequences: 2.3389% -# rest sequences: 0.1237% -# negative sequences: 0.0009% -RussianLangModel = ( -0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2, -3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, -0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, -0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0, -0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, -3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0, -0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1, -1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, -2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1, -1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0, -2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1, -1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0, -3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1, -1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0, -2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2, -1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1, -1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1, -1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, -2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1, -1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0, -3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2, -1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1, -2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1, -1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0, -2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0, -0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1, -1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0, -1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1, -1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0, -3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1, -2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1, -3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1, -1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1, -1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1, -0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0, -2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1, -1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0, -1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1, -0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1, -1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0, -2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2, -2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1, -1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0, -1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0, -2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0, -1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1, -0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, -2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1, -1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1, -1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0, -0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, -0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1, -0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1, -0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, -1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1, -0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, -1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0, -0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, -1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1, -0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1, -2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0, -0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, -) - -Koi8rModel = { - 'char_to_order_map': KOI8R_char_to_order_map, - 'precedence_matrix': RussianLangModel, - 'typical_positive_ratio': 0.976601, - 'keep_english_letter': False, - 'charset_name': "KOI8-R", - 'language': 'Russian', -} - -Win1251CyrillicModel = { - 'char_to_order_map': win1251_char_to_order_map, - 'precedence_matrix': RussianLangModel, - 'typical_positive_ratio': 0.976601, - 'keep_english_letter': False, - 'charset_name': "windows-1251", - 'language': 'Russian', -} - -Latin5CyrillicModel = { - 'char_to_order_map': latin5_char_to_order_map, - 'precedence_matrix': RussianLangModel, - 'typical_positive_ratio': 0.976601, - 'keep_english_letter': False, - 'charset_name': "ISO-8859-5", - 'language': 'Russian', -} - -MacCyrillicModel = { - 'char_to_order_map': macCyrillic_char_to_order_map, - 'precedence_matrix': RussianLangModel, - 'typical_positive_ratio': 0.976601, - 'keep_english_letter': False, - 'charset_name': "MacCyrillic", - 'language': 'Russian', -} - -Ibm866Model = { - 'char_to_order_map': IBM866_char_to_order_map, - 'precedence_matrix': RussianLangModel, - 'typical_positive_ratio': 0.976601, - 'keep_english_letter': False, - 'charset_name': "IBM866", - 'language': 'Russian', -} - -Ibm855Model = { - 'char_to_order_map': IBM855_char_to_order_map, - 'precedence_matrix': RussianLangModel, - 'typical_positive_ratio': 0.976601, - 'keep_english_letter': False, - 'charset_name': "IBM855", - 'language': 'Russian', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langgreekmodel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langgreekmodel.py deleted file mode 100644 index 5332221..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langgreekmodel.py +++ /dev/null @@ -1,225 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# 255: Control characters that usually does not exist in any text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 - -# Character Mapping Table: -Latin7_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 - 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 -253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 - 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 -253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 -253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0 -110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 - 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 -124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 - 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 -) - -win1253_char_to_order_map = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 - 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 -253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 - 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 -253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 -253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0 -110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 - 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 -124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 - 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 -) - -# Model Table: -# total sequences: 100% -# first 512 sequences: 98.2851% -# first 1024 sequences:1.7001% -# rest sequences: 0.0359% -# negative sequences: 0.0148% -GreekLangModel = ( -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0, -3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, -0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0, -2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0, -0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0, -2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0, -2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0, -0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0, -2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0, -0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0, -3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0, -3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0, -2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0, -2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0, -0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0, -0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0, -0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2, -0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0, -0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2, -0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0, -0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2, -0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2, -0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0, -0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2, -0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0, -0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0, -0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, -0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0, -0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0, -0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0, -0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2, -0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0, -0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2, -0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0, -0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2, -0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0, -0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2, -0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0, -0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1, -0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0, -0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2, -0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, -0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2, -0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2, -0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0, -0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0, -0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1, -0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, -0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0, -0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0, -0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -) - -Latin7GreekModel = { - 'char_to_order_map': Latin7_char_to_order_map, - 'precedence_matrix': GreekLangModel, - 'typical_positive_ratio': 0.982851, - 'keep_english_letter': False, - 'charset_name': "ISO-8859-7", - 'language': 'Greek', -} - -Win1253GreekModel = { - 'char_to_order_map': win1253_char_to_order_map, - 'precedence_matrix': GreekLangModel, - 'typical_positive_ratio': 0.982851, - 'keep_english_letter': False, - 'charset_name': "windows-1253", - 'language': 'Greek', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langhebrewmodel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langhebrewmodel.py deleted file mode 100644 index 58f4c87..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langhebrewmodel.py +++ /dev/null @@ -1,200 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Simon Montagu -# Portions created by the Initial Developer are Copyright (C) 2005 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# Shoshannah Forbes - original C code (?) -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# 255: Control characters that usually does not exist in any text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 - -# Windows-1255 language model -# Character Mapping Table: -WIN1255_CHAR_TO_ORDER_MAP = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40 - 78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50 -253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60 - 66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70 -124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214, -215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221, - 34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227, -106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234, - 30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237, -238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250, - 9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23, - 12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253, -) - -# Model Table: -# total sequences: 100% -# first 512 sequences: 98.4004% -# first 1024 sequences: 1.5981% -# rest sequences: 0.087% -# negative sequences: 0.0015% -HEBREW_LANG_MODEL = ( -0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0, -3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2, -1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2, -1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3, -1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2, -1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2, -1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2, -0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2, -0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2, -1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0, -3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2, -0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1, -0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0, -0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2, -0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2, -0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0, -3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2, -0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2, -0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2, -0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2, -0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1, -0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2, -0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0, -3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2, -0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2, -0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2, -0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0, -1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2, -0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, -3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0, -0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, -3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3, -0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0, -0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0, -0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, -0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0, -0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, -0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0, -2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0, -0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1, -0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1, -0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2, -0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0, -0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1, -1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1, -0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1, -2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1, -1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1, -2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1, -1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1, -2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0, -0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1, -1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1, -0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0, -) - -Win1255HebrewModel = { - 'char_to_order_map': WIN1255_CHAR_TO_ORDER_MAP, - 'precedence_matrix': HEBREW_LANG_MODEL, - 'typical_positive_ratio': 0.984004, - 'keep_english_letter': False, - 'charset_name': "windows-1255", - 'language': 'Hebrew', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langhungarianmodel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langhungarianmodel.py deleted file mode 100644 index bb7c095..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langhungarianmodel.py +++ /dev/null @@ -1,225 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# 255: Control characters that usually does not exist in any text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 - -# Character Mapping Table: -Latin2_HungarianCharToOrderMap = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47, - 46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253, -253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8, - 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253, -159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, -175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, -191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205, - 79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, -221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231, -232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241, - 82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85, -245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253, -) - -win1250HungarianCharToOrderMap = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47, - 46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253, -253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8, - 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253, -161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, -177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190, -191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205, - 81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, -221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231, -232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241, - 84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87, -245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253, -) - -# Model Table: -# total sequences: 100% -# first 512 sequences: 94.7368% -# first 1024 sequences:5.2623% -# rest sequences: 0.8894% -# negative sequences: 0.0009% -HungarianLangModel = ( -0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, -3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2, -3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0, -3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3, -0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0, -3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2, -0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0, -3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, -3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, -3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2, -0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, -3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0, -2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1, -0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, -3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0, -1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0, -1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0, -1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1, -3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1, -2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1, -2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1, -2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1, -2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0, -2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1, -3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1, -2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1, -2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1, -2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1, -1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1, -1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1, -3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0, -1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1, -1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1, -2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1, -2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0, -2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1, -3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1, -2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1, -1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0, -1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0, -2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1, -2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1, -1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0, -1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1, -2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0, -1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0, -1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0, -2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1, -2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1, -2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1, -1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1, -1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1, -1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0, -0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0, -2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1, -2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1, -1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1, -2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1, -1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0, -1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0, -2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0, -2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1, -2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0, -1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0, -2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0, -0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, -1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0, -0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0, -1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, -0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, -2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0, -0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0, -) - -Latin2HungarianModel = { - 'char_to_order_map': Latin2_HungarianCharToOrderMap, - 'precedence_matrix': HungarianLangModel, - 'typical_positive_ratio': 0.947368, - 'keep_english_letter': True, - 'charset_name': "ISO-8859-2", - 'language': 'Hungarian', -} - -Win1250HungarianModel = { - 'char_to_order_map': win1250HungarianCharToOrderMap, - 'precedence_matrix': HungarianLangModel, - 'typical_positive_ratio': 0.947368, - 'keep_english_letter': True, - 'charset_name': "windows-1250", - 'language': 'Hungarian', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langthaimodel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langthaimodel.py deleted file mode 100644 index 15f94c2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langthaimodel.py +++ /dev/null @@ -1,199 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# 255: Control characters that usually does not exist in any text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 - -# The following result for thai was collected from a limited sample (1M). - -# Character Mapping Table: -TIS620CharToOrderMap = ( -255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 -253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 -252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 -253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40 -188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50 -253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60 - 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70 -209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222, -223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235, -236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57, - 49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54, - 45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63, - 22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244, - 11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247, - 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253, -) - -# Model Table: -# total sequences: 100% -# first 512 sequences: 92.6386% -# first 1024 sequences:7.3177% -# rest sequences: 1.0230% -# negative sequences: 0.0436% -ThaiLangModel = ( -0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3, -0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2, -3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3, -0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1, -3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2, -3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1, -3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2, -3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1, -3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1, -3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0, -3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1, -2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1, -3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1, -0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0, -3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1, -0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0, -3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2, -1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0, -3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3, -3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0, -1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2, -0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0, -2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3, -0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0, -3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1, -2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0, -3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2, -0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2, -3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, -3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0, -2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2, -3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1, -2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1, -3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1, -3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0, -3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1, -3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1, -3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1, -1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2, -0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3, -0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1, -3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0, -3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1, -1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0, -3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1, -3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0, -0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2, -0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0, -0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0, -1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1, -1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1, -3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1, -0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, -0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0, -0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, -3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0, -3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0, -0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1, -0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0, -0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1, -0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1, -0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0, -0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1, -0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0, -3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0, -0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0, -0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0, -3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1, -2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1, -0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0, -3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0, -0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, -2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0, -1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0, -1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, -1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0, -1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -) - -TIS620ThaiModel = { - 'char_to_order_map': TIS620CharToOrderMap, - 'precedence_matrix': ThaiLangModel, - 'typical_positive_ratio': 0.926386, - 'keep_english_letter': False, - 'charset_name': "TIS-620", - 'language': 'Thai', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langturkishmodel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langturkishmodel.py deleted file mode 100644 index a427a45..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/langturkishmodel.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Özgür Baskın - Turkish Language Model -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# 255: Control characters that usually does not exist in any text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 - -# Character Mapping Table: -Latin5_TurkishCharToOrderMap = ( -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, -255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, -255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42, - 48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255, -255, 1, 21, 28, 12, 2, 18, 27, 25, 3, 24, 10, 5, 13, 4, 15, - 26, 64, 7, 8, 9, 14, 32, 57, 58, 11, 22,255,255,255,255,255, -180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165, -164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106, -150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136, - 94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125, -124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119, - 68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86, - 89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96, - 30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17, 6, 19,107, -) - -TurkishLangModel = ( -3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3, -3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, -3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3, -3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1, -3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3, -3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1, -3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2, -2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1, -3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2, -2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0, -1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1, -2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2, -3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1, -3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2, -2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1, -3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2, -2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, -3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2, -3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, -3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3, -0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1, -3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1, -0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0, -3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1, -1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1, -3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3, -2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, -2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3, -2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, -3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0, -0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0, -1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2, -3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1, -1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0, -3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2, -2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0, -0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0, -3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1, -0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, -3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1, -1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0, -1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3, -2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1, -2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0, -0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1, -2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, -3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0, -2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, -3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0, -0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0, -3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1, -1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, -1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2, -0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1, -3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1, -0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0, -3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2, -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0, -3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0, -1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2, -2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1, -0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0, -3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0, -0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0, -3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0, -0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0, -3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0, -0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0, -0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0, -3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0, -0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, -0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1, -3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0, -0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1, -0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0, -3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, -0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0, -3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0, -0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0, -3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0, -0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0, -0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0, -3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0, -0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0, -3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0, -0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0, -3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, -0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0, -0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0, -0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, -2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0, -1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, -3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0, -0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0, -0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1, -0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0, -3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0, -0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, -0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0, -2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0, -2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0, -0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, -1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1, -0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, -2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, -0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -) - -Latin5TurkishModel = { - 'char_to_order_map': Latin5_TurkishCharToOrderMap, - 'precedence_matrix': TurkishLangModel, - 'typical_positive_ratio': 0.970290, - 'keep_english_letter': True, - 'charset_name': "ISO-8859-9", - 'language': 'Turkish', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/latin1prober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/latin1prober.py deleted file mode 100644 index 7d1e8c2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/latin1prober.py +++ /dev/null @@ -1,145 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .enums import ProbingState - -FREQ_CAT_NUM = 4 - -UDF = 0 # undefined -OTH = 1 # other -ASC = 2 # ascii capital letter -ASS = 3 # ascii small letter -ACV = 4 # accent capital vowel -ACO = 5 # accent capital other -ASV = 6 # accent small vowel -ASO = 7 # accent small other -CLASS_NUM = 8 # total classes - -Latin1_CharToClass = ( - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F - OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 - ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F - OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 - ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F - OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87 - OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F - UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97 - OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF - ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7 - ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF - ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7 - ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF - ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7 - ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF - ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7 - ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF -) - -# 0 : illegal -# 1 : very unlikely -# 2 : normal -# 3 : very likely -Latin1ClassModel = ( -# UDF OTH ASC ASS ACV ACO ASV ASO - 0, 0, 0, 0, 0, 0, 0, 0, # UDF - 0, 3, 3, 3, 3, 3, 3, 3, # OTH - 0, 3, 3, 3, 3, 3, 3, 3, # ASC - 0, 3, 3, 3, 1, 1, 3, 3, # ASS - 0, 3, 3, 3, 1, 2, 1, 2, # ACV - 0, 3, 3, 3, 3, 3, 3, 3, # ACO - 0, 3, 1, 3, 1, 1, 1, 3, # ASV - 0, 3, 1, 3, 1, 1, 3, 3, # ASO -) - - -class Latin1Prober(CharSetProber): - def __init__(self): - super(Latin1Prober, self).__init__() - self._last_char_class = None - self._freq_counter = None - self.reset() - - def reset(self): - self._last_char_class = OTH - self._freq_counter = [0] * FREQ_CAT_NUM - CharSetProber.reset(self) - - @property - def charset_name(self): - return "ISO-8859-1" - - @property - def language(self): - return "" - - def feed(self, byte_str): - byte_str = self.filter_with_english_letters(byte_str) - for c in byte_str: - char_class = Latin1_CharToClass[c] - freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) - + char_class] - if freq == 0: - self._state = ProbingState.NOT_ME - break - self._freq_counter[freq] += 1 - self._last_char_class = char_class - - return self.state - - def get_confidence(self): - if self.state == ProbingState.NOT_ME: - return 0.01 - - total = sum(self._freq_counter) - if total < 0.01: - confidence = 0.0 - else: - confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0) - / total) - if confidence < 0.0: - confidence = 0.0 - # lower the confidence of latin1 so that other more accurate - # detector can take priority. - confidence = confidence * 0.73 - return confidence diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcharsetprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcharsetprober.py deleted file mode 100644 index 6256ecf..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcharsetprober.py +++ /dev/null @@ -1,91 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# Proofpoint, Inc. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .enums import ProbingState, MachineState - - -class MultiByteCharSetProber(CharSetProber): - """ - MultiByteCharSetProber - """ - - def __init__(self, lang_filter=None): - super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter) - self.distribution_analyzer = None - self.coding_sm = None - self._last_char = [0, 0] - - def reset(self): - super(MultiByteCharSetProber, self).reset() - if self.coding_sm: - self.coding_sm.reset() - if self.distribution_analyzer: - self.distribution_analyzer.reset() - self._last_char = [0, 0] - - @property - def charset_name(self): - raise NotImplementedError - - @property - def language(self): - raise NotImplementedError - - def feed(self, byte_str): - for i in range(len(byte_str)): - coding_state = self.coding_sm.next_state(byte_str[i]) - if coding_state == MachineState.ERROR: - self.logger.debug('%s %s prober hit error at byte %s', - self.charset_name, self.language, i) - self._state = ProbingState.NOT_ME - break - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - elif coding_state == MachineState.START: - char_len = self.coding_sm.get_current_charlen() - if i == 0: - self._last_char[1] = byte_str[0] - self.distribution_analyzer.feed(self._last_char, char_len) - else: - self.distribution_analyzer.feed(byte_str[i - 1:i + 1], - char_len) - - self._last_char[0] = byte_str[-1] - - if self.state == ProbingState.DETECTING: - if (self.distribution_analyzer.got_enough_data() and - (self.get_confidence() > self.SHORTCUT_THRESHOLD)): - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self): - return self.distribution_analyzer.get_confidence() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcsgroupprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcsgroupprober.py deleted file mode 100644 index 530abe7..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcsgroupprober.py +++ /dev/null @@ -1,54 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# Proofpoint, Inc. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetgroupprober import CharSetGroupProber -from .utf8prober import UTF8Prober -from .sjisprober import SJISProber -from .eucjpprober import EUCJPProber -from .gb2312prober import GB2312Prober -from .euckrprober import EUCKRProber -from .cp949prober import CP949Prober -from .big5prober import Big5Prober -from .euctwprober import EUCTWProber - - -class MBCSGroupProber(CharSetGroupProber): - def __init__(self, lang_filter=None): - super(MBCSGroupProber, self).__init__(lang_filter=lang_filter) - self.probers = [ - UTF8Prober(), - SJISProber(), - EUCJPProber(), - GB2312Prober(), - EUCKRProber(), - CP949Prober(), - Big5Prober(), - EUCTWProber() - ] - self.reset() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcssm.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcssm.py deleted file mode 100644 index 8360d0f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/mbcssm.py +++ /dev/null @@ -1,572 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .enums import MachineState - -# BIG5 - -BIG5_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,1, # 78 - 7f - 4,4,4,4,4,4,4,4, # 80 - 87 - 4,4,4,4,4,4,4,4, # 88 - 8f - 4,4,4,4,4,4,4,4, # 90 - 97 - 4,4,4,4,4,4,4,4, # 98 - 9f - 4,3,3,3,3,3,3,3, # a0 - a7 - 3,3,3,3,3,3,3,3, # a8 - af - 3,3,3,3,3,3,3,3, # b0 - b7 - 3,3,3,3,3,3,3,3, # b8 - bf - 3,3,3,3,3,3,3,3, # c0 - c7 - 3,3,3,3,3,3,3,3, # c8 - cf - 3,3,3,3,3,3,3,3, # d0 - d7 - 3,3,3,3,3,3,3,3, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,3,3,3, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,3,3,0 # f8 - ff -) - -BIG5_ST = ( - MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17 -) - -BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0) - -BIG5_SM_MODEL = {'class_table': BIG5_CLS, - 'class_factor': 5, - 'state_table': BIG5_ST, - 'char_len_table': BIG5_CHAR_LEN_TABLE, - 'name': 'Big5'} - -# CP949 - -CP949_CLS = ( - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f - 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f - 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f - 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f - 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f - 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f - 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f - 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f - 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af - 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf - 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf - 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df - 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef - 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff -) - -CP949_ST = ( -#cls= 0 1 2 3 4 5 6 7 8 9 # previous state = - MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3 - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4 - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5 - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6 -) - -CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) - -CP949_SM_MODEL = {'class_table': CP949_CLS, - 'class_factor': 10, - 'state_table': CP949_ST, - 'char_len_table': CP949_CHAR_LEN_TABLE, - 'name': 'CP949'} - -# EUC-JP - -EUCJP_CLS = ( - 4,4,4,4,4,4,4,4, # 00 - 07 - 4,4,4,4,4,4,5,5, # 08 - 0f - 4,4,4,4,4,4,4,4, # 10 - 17 - 4,4,4,5,4,4,4,4, # 18 - 1f - 4,4,4,4,4,4,4,4, # 20 - 27 - 4,4,4,4,4,4,4,4, # 28 - 2f - 4,4,4,4,4,4,4,4, # 30 - 37 - 4,4,4,4,4,4,4,4, # 38 - 3f - 4,4,4,4,4,4,4,4, # 40 - 47 - 4,4,4,4,4,4,4,4, # 48 - 4f - 4,4,4,4,4,4,4,4, # 50 - 57 - 4,4,4,4,4,4,4,4, # 58 - 5f - 4,4,4,4,4,4,4,4, # 60 - 67 - 4,4,4,4,4,4,4,4, # 68 - 6f - 4,4,4,4,4,4,4,4, # 70 - 77 - 4,4,4,4,4,4,4,4, # 78 - 7f - 5,5,5,5,5,5,5,5, # 80 - 87 - 5,5,5,5,5,5,1,3, # 88 - 8f - 5,5,5,5,5,5,5,5, # 90 - 97 - 5,5,5,5,5,5,5,5, # 98 - 9f - 5,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,2,2,2, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,2,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,0,5 # f8 - ff -) - -EUCJP_ST = ( - 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f - 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27 -) - -EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0) - -EUCJP_SM_MODEL = {'class_table': EUCJP_CLS, - 'class_factor': 6, - 'state_table': EUCJP_ST, - 'char_len_table': EUCJP_CHAR_LEN_TABLE, - 'name': 'EUC-JP'} - -# EUC-KR - -EUCKR_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 1,1,1,1,1,1,1,1, # 40 - 47 - 1,1,1,1,1,1,1,1, # 48 - 4f - 1,1,1,1,1,1,1,1, # 50 - 57 - 1,1,1,1,1,1,1,1, # 58 - 5f - 1,1,1,1,1,1,1,1, # 60 - 67 - 1,1,1,1,1,1,1,1, # 68 - 6f - 1,1,1,1,1,1,1,1, # 70 - 77 - 1,1,1,1,1,1,1,1, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,3,3,3, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,3,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 2,2,2,2,2,2,2,2, # e0 - e7 - 2,2,2,2,2,2,2,2, # e8 - ef - 2,2,2,2,2,2,2,2, # f0 - f7 - 2,2,2,2,2,2,2,0 # f8 - ff -) - -EUCKR_ST = ( - MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f -) - -EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0) - -EUCKR_SM_MODEL = {'class_table': EUCKR_CLS, - 'class_factor': 4, - 'state_table': EUCKR_ST, - 'char_len_table': EUCKR_CHAR_LEN_TABLE, - 'name': 'EUC-KR'} - -# EUC-TW - -EUCTW_CLS = ( - 2,2,2,2,2,2,2,2, # 00 - 07 - 2,2,2,2,2,2,0,0, # 08 - 0f - 2,2,2,2,2,2,2,2, # 10 - 17 - 2,2,2,0,2,2,2,2, # 18 - 1f - 2,2,2,2,2,2,2,2, # 20 - 27 - 2,2,2,2,2,2,2,2, # 28 - 2f - 2,2,2,2,2,2,2,2, # 30 - 37 - 2,2,2,2,2,2,2,2, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,2, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,6,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,3,4,4,4,4,4,4, # a0 - a7 - 5,5,1,1,1,1,1,1, # a8 - af - 1,1,1,1,1,1,1,1, # b0 - b7 - 1,1,1,1,1,1,1,1, # b8 - bf - 1,1,3,1,3,3,3,3, # c0 - c7 - 3,3,3,3,3,3,3,3, # c8 - cf - 3,3,3,3,3,3,3,3, # d0 - d7 - 3,3,3,3,3,3,3,3, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,3,3,3, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,3,3,0 # f8 - ff -) - -EUCTW_ST = ( - MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17 - MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f - 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27 - MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f -) - -EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3) - -EUCTW_SM_MODEL = {'class_table': EUCTW_CLS, - 'class_factor': 7, - 'state_table': EUCTW_ST, - 'char_len_table': EUCTW_CHAR_LEN_TABLE, - 'name': 'x-euc-tw'} - -# GB2312 - -GB2312_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 3,3,3,3,3,3,3,3, # 30 - 37 - 3,3,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,4, # 78 - 7f - 5,6,6,6,6,6,6,6, # 80 - 87 - 6,6,6,6,6,6,6,6, # 88 - 8f - 6,6,6,6,6,6,6,6, # 90 - 97 - 6,6,6,6,6,6,6,6, # 98 - 9f - 6,6,6,6,6,6,6,6, # a0 - a7 - 6,6,6,6,6,6,6,6, # a8 - af - 6,6,6,6,6,6,6,6, # b0 - b7 - 6,6,6,6,6,6,6,6, # b8 - bf - 6,6,6,6,6,6,6,6, # c0 - c7 - 6,6,6,6,6,6,6,6, # c8 - cf - 6,6,6,6,6,6,6,6, # d0 - d7 - 6,6,6,6,6,6,6,6, # d8 - df - 6,6,6,6,6,6,6,6, # e0 - e7 - 6,6,6,6,6,6,6,6, # e8 - ef - 6,6,6,6,6,6,6,6, # f0 - f7 - 6,6,6,6,6,6,6,0 # f8 - ff -) - -GB2312_ST = ( - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17 - 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f - MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27 - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f -) - -# To be accurate, the length of class 6 can be either 2 or 4. -# But it is not necessary to discriminate between the two since -# it is used for frequency analysis only, and we are validating -# each code range there as well. So it is safe to set it to be -# 2 here. -GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2) - -GB2312_SM_MODEL = {'class_table': GB2312_CLS, - 'class_factor': 7, - 'state_table': GB2312_ST, - 'char_len_table': GB2312_CHAR_LEN_TABLE, - 'name': 'GB2312'} - -# Shift_JIS - -SJIS_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,1, # 78 - 7f - 3,3,3,3,3,2,2,3, # 80 - 87 - 3,3,3,3,3,3,3,3, # 88 - 8f - 3,3,3,3,3,3,3,3, # 90 - 97 - 3,3,3,3,3,3,3,3, # 98 - 9f - #0xa0 is illegal in sjis encoding, but some pages does - #contain such byte. We need to be more error forgiven. - 2,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,2,2,2, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,2,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,4,4,4, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,0,0,0) # f8 - ff - - -SJIS_ST = ( - MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17 -) - -SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0) - -SJIS_SM_MODEL = {'class_table': SJIS_CLS, - 'class_factor': 6, - 'state_table': SJIS_ST, - 'char_len_table': SJIS_CHAR_LEN_TABLE, - 'name': 'Shift_JIS'} - -# UCS2-BE - -UCS2BE_CLS = ( - 0,0,0,0,0,0,0,0, # 00 - 07 - 0,0,1,0,0,2,0,0, # 08 - 0f - 0,0,0,0,0,0,0,0, # 10 - 17 - 0,0,0,3,0,0,0,0, # 18 - 1f - 0,0,0,0,0,0,0,0, # 20 - 27 - 0,3,3,3,3,3,0,0, # 28 - 2f - 0,0,0,0,0,0,0,0, # 30 - 37 - 0,0,0,0,0,0,0,0, # 38 - 3f - 0,0,0,0,0,0,0,0, # 40 - 47 - 0,0,0,0,0,0,0,0, # 48 - 4f - 0,0,0,0,0,0,0,0, # 50 - 57 - 0,0,0,0,0,0,0,0, # 58 - 5f - 0,0,0,0,0,0,0,0, # 60 - 67 - 0,0,0,0,0,0,0,0, # 68 - 6f - 0,0,0,0,0,0,0,0, # 70 - 77 - 0,0,0,0,0,0,0,0, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,0,0,0,0,0,0,0, # a0 - a7 - 0,0,0,0,0,0,0,0, # a8 - af - 0,0,0,0,0,0,0,0, # b0 - b7 - 0,0,0,0,0,0,0,0, # b8 - bf - 0,0,0,0,0,0,0,0, # c0 - c7 - 0,0,0,0,0,0,0,0, # c8 - cf - 0,0,0,0,0,0,0,0, # d0 - d7 - 0,0,0,0,0,0,0,0, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,4,5 # f8 - ff -) - -UCS2BE_ST = ( - 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17 - 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f - 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27 - 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f - 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37 -) - -UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2) - -UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS, - 'class_factor': 6, - 'state_table': UCS2BE_ST, - 'char_len_table': UCS2BE_CHAR_LEN_TABLE, - 'name': 'UTF-16BE'} - -# UCS2-LE - -UCS2LE_CLS = ( - 0,0,0,0,0,0,0,0, # 00 - 07 - 0,0,1,0,0,2,0,0, # 08 - 0f - 0,0,0,0,0,0,0,0, # 10 - 17 - 0,0,0,3,0,0,0,0, # 18 - 1f - 0,0,0,0,0,0,0,0, # 20 - 27 - 0,3,3,3,3,3,0,0, # 28 - 2f - 0,0,0,0,0,0,0,0, # 30 - 37 - 0,0,0,0,0,0,0,0, # 38 - 3f - 0,0,0,0,0,0,0,0, # 40 - 47 - 0,0,0,0,0,0,0,0, # 48 - 4f - 0,0,0,0,0,0,0,0, # 50 - 57 - 0,0,0,0,0,0,0,0, # 58 - 5f - 0,0,0,0,0,0,0,0, # 60 - 67 - 0,0,0,0,0,0,0,0, # 68 - 6f - 0,0,0,0,0,0,0,0, # 70 - 77 - 0,0,0,0,0,0,0,0, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,0,0,0,0,0,0,0, # a0 - a7 - 0,0,0,0,0,0,0,0, # a8 - af - 0,0,0,0,0,0,0,0, # b0 - b7 - 0,0,0,0,0,0,0,0, # b8 - bf - 0,0,0,0,0,0,0,0, # c0 - c7 - 0,0,0,0,0,0,0,0, # c8 - cf - 0,0,0,0,0,0,0,0, # d0 - d7 - 0,0,0,0,0,0,0,0, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,4,5 # f8 - ff -) - -UCS2LE_ST = ( - 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17 - 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f - 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27 - 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f - 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37 -) - -UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2) - -UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS, - 'class_factor': 6, - 'state_table': UCS2LE_ST, - 'char_len_table': UCS2LE_CHAR_LEN_TABLE, - 'name': 'UTF-16LE'} - -# UTF-8 - -UTF8_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 1,1,1,1,1,1,1,1, # 40 - 47 - 1,1,1,1,1,1,1,1, # 48 - 4f - 1,1,1,1,1,1,1,1, # 50 - 57 - 1,1,1,1,1,1,1,1, # 58 - 5f - 1,1,1,1,1,1,1,1, # 60 - 67 - 1,1,1,1,1,1,1,1, # 68 - 6f - 1,1,1,1,1,1,1,1, # 70 - 77 - 1,1,1,1,1,1,1,1, # 78 - 7f - 2,2,2,2,3,3,3,3, # 80 - 87 - 4,4,4,4,4,4,4,4, # 88 - 8f - 4,4,4,4,4,4,4,4, # 90 - 97 - 4,4,4,4,4,4,4,4, # 98 - 9f - 5,5,5,5,5,5,5,5, # a0 - a7 - 5,5,5,5,5,5,5,5, # a8 - af - 5,5,5,5,5,5,5,5, # b0 - b7 - 5,5,5,5,5,5,5,5, # b8 - bf - 0,0,6,6,6,6,6,6, # c0 - c7 - 6,6,6,6,6,6,6,6, # c8 - cf - 6,6,6,6,6,6,6,6, # d0 - d7 - 6,6,6,6,6,6,6,6, # d8 - df - 7,8,8,8,8,8,8,8, # e0 - e7 - 8,8,8,8,8,9,8,8, # e8 - ef - 10,11,11,11,11,11,11,11, # f0 - f7 - 12,13,13,13,14,15,0,0 # f8 - ff -) - -UTF8_ST = ( - MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07 - 9, 11, 8, 7, 6, 5, 4, 3,#08-0f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27 - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f - MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f - MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f - MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f - MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af - MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf -) - -UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) - -UTF8_SM_MODEL = {'class_table': UTF8_CLS, - 'class_factor': 16, - 'state_table': UTF8_ST, - 'char_len_table': UTF8_CHAR_LEN_TABLE, - 'name': 'UTF-8'} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sbcharsetprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sbcharsetprober.py deleted file mode 100644 index 0adb51d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sbcharsetprober.py +++ /dev/null @@ -1,132 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .enums import CharacterCategory, ProbingState, SequenceLikelihood - - -class SingleByteCharSetProber(CharSetProber): - SAMPLE_SIZE = 64 - SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2 - POSITIVE_SHORTCUT_THRESHOLD = 0.95 - NEGATIVE_SHORTCUT_THRESHOLD = 0.05 - - def __init__(self, model, reversed=False, name_prober=None): - super(SingleByteCharSetProber, self).__init__() - self._model = model - # TRUE if we need to reverse every pair in the model lookup - self._reversed = reversed - # Optional auxiliary prober for name decision - self._name_prober = name_prober - self._last_order = None - self._seq_counters = None - self._total_seqs = None - self._total_char = None - self._freq_char = None - self.reset() - - def reset(self): - super(SingleByteCharSetProber, self).reset() - # char order of last character - self._last_order = 255 - self._seq_counters = [0] * SequenceLikelihood.get_num_categories() - self._total_seqs = 0 - self._total_char = 0 - # characters that fall in our sampling range - self._freq_char = 0 - - @property - def charset_name(self): - if self._name_prober: - return self._name_prober.charset_name - else: - return self._model['charset_name'] - - @property - def language(self): - if self._name_prober: - return self._name_prober.language - else: - return self._model.get('language') - - def feed(self, byte_str): - if not self._model['keep_english_letter']: - byte_str = self.filter_international_words(byte_str) - if not byte_str: - return self.state - char_to_order_map = self._model['char_to_order_map'] - for i, c in enumerate(byte_str): - # XXX: Order is in range 1-64, so one would think we want 0-63 here, - # but that leads to 27 more test failures than before. - order = char_to_order_map[c] - # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but - # CharacterCategory.SYMBOL is actually 253, so we use CONTROL - # to make it closer to the original intent. The only difference - # is whether or not we count digits and control characters for - # _total_char purposes. - if order < CharacterCategory.CONTROL: - self._total_char += 1 - if order < self.SAMPLE_SIZE: - self._freq_char += 1 - if self._last_order < self.SAMPLE_SIZE: - self._total_seqs += 1 - if not self._reversed: - i = (self._last_order * self.SAMPLE_SIZE) + order - model = self._model['precedence_matrix'][i] - else: # reverse the order of the letters in the lookup - i = (order * self.SAMPLE_SIZE) + self._last_order - model = self._model['precedence_matrix'][i] - self._seq_counters[model] += 1 - self._last_order = order - - charset_name = self._model['charset_name'] - if self.state == ProbingState.DETECTING: - if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD: - confidence = self.get_confidence() - if confidence > self.POSITIVE_SHORTCUT_THRESHOLD: - self.logger.debug('%s confidence = %s, we have a winner', - charset_name, confidence) - self._state = ProbingState.FOUND_IT - elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD: - self.logger.debug('%s confidence = %s, below negative ' - 'shortcut threshhold %s', charset_name, - confidence, - self.NEGATIVE_SHORTCUT_THRESHOLD) - self._state = ProbingState.NOT_ME - - return self.state - - def get_confidence(self): - r = 0.01 - if self._total_seqs > 0: - r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) / - self._total_seqs / self._model['typical_positive_ratio']) - r = r * self._freq_char / self._total_char - if r >= 1.0: - r = 0.99 - return r diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sbcsgroupprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sbcsgroupprober.py deleted file mode 100644 index 98e95dc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sbcsgroupprober.py +++ /dev/null @@ -1,73 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetgroupprober import CharSetGroupProber -from .sbcharsetprober import SingleByteCharSetProber -from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, - Latin5CyrillicModel, MacCyrillicModel, - Ibm866Model, Ibm855Model) -from .langgreekmodel import Latin7GreekModel, Win1253GreekModel -from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel -# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel -from .langthaimodel import TIS620ThaiModel -from .langhebrewmodel import Win1255HebrewModel -from .hebrewprober import HebrewProber -from .langturkishmodel import Latin5TurkishModel - - -class SBCSGroupProber(CharSetGroupProber): - def __init__(self): - super(SBCSGroupProber, self).__init__() - self.probers = [ - SingleByteCharSetProber(Win1251CyrillicModel), - SingleByteCharSetProber(Koi8rModel), - SingleByteCharSetProber(Latin5CyrillicModel), - SingleByteCharSetProber(MacCyrillicModel), - SingleByteCharSetProber(Ibm866Model), - SingleByteCharSetProber(Ibm855Model), - SingleByteCharSetProber(Latin7GreekModel), - SingleByteCharSetProber(Win1253GreekModel), - SingleByteCharSetProber(Latin5BulgarianModel), - SingleByteCharSetProber(Win1251BulgarianModel), - # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) - # after we retrain model. - # SingleByteCharSetProber(Latin2HungarianModel), - # SingleByteCharSetProber(Win1250HungarianModel), - SingleByteCharSetProber(TIS620ThaiModel), - SingleByteCharSetProber(Latin5TurkishModel), - ] - hebrew_prober = HebrewProber() - logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, - False, hebrew_prober) - visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True, - hebrew_prober) - hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober) - self.probers.extend([hebrew_prober, logical_hebrew_prober, - visual_hebrew_prober]) - - self.reset() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sjisprober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sjisprober.py deleted file mode 100644 index 9e29623..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/sjisprober.py +++ /dev/null @@ -1,92 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine -from .chardistribution import SJISDistributionAnalysis -from .jpcntx import SJISContextAnalysis -from .mbcssm import SJIS_SM_MODEL -from .enums import ProbingState, MachineState - - -class SJISProber(MultiByteCharSetProber): - def __init__(self): - super(SJISProber, self).__init__() - self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) - self.distribution_analyzer = SJISDistributionAnalysis() - self.context_analyzer = SJISContextAnalysis() - self.reset() - - def reset(self): - super(SJISProber, self).reset() - self.context_analyzer.reset() - - @property - def charset_name(self): - return self.context_analyzer.charset_name - - @property - def language(self): - return "Japanese" - - def feed(self, byte_str): - for i in range(len(byte_str)): - coding_state = self.coding_sm.next_state(byte_str[i]) - if coding_state == MachineState.ERROR: - self.logger.debug('%s %s prober hit error at byte %s', - self.charset_name, self.language, i) - self._state = ProbingState.NOT_ME - break - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - elif coding_state == MachineState.START: - char_len = self.coding_sm.get_current_charlen() - if i == 0: - self._last_char[1] = byte_str[0] - self.context_analyzer.feed(self._last_char[2 - char_len:], - char_len) - self.distribution_analyzer.feed(self._last_char, char_len) - else: - self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3 - - char_len], char_len) - self.distribution_analyzer.feed(byte_str[i - 1:i + 1], - char_len) - - self._last_char[0] = byte_str[-1] - - if self.state == ProbingState.DETECTING: - if (self.context_analyzer.got_enough_data() and - (self.get_confidence() > self.SHORTCUT_THRESHOLD)): - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self): - context_conf = self.context_analyzer.get_confidence() - distrib_conf = self.distribution_analyzer.get_confidence() - return max(context_conf, distrib_conf) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/universaldetector.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/universaldetector.py deleted file mode 100644 index 7b4e92d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/universaldetector.py +++ /dev/null @@ -1,286 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### -""" -Module containing the UniversalDetector detector class, which is the primary -class a user of ``chardet`` should use. - -:author: Mark Pilgrim (initial port to Python) -:author: Shy Shalom (original C code) -:author: Dan Blanchard (major refactoring for 3.0) -:author: Ian Cordasco -""" - - -import codecs -import logging -import re - -from .charsetgroupprober import CharSetGroupProber -from .enums import InputState, LanguageFilter, ProbingState -from .escprober import EscCharSetProber -from .latin1prober import Latin1Prober -from .mbcsgroupprober import MBCSGroupProber -from .sbcsgroupprober import SBCSGroupProber - - -class UniversalDetector(object): - """ - The ``UniversalDetector`` class underlies the ``chardet.detect`` function - and coordinates all of the different charset probers. - - To get a ``dict`` containing an encoding and its confidence, you can simply - run: - - .. code:: - - u = UniversalDetector() - u.feed(some_bytes) - u.close() - detected = u.result - - """ - - MINIMUM_THRESHOLD = 0.20 - HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]') - ESC_DETECTOR = re.compile(b'(\033|~{)') - WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]') - ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252', - 'iso-8859-2': 'Windows-1250', - 'iso-8859-5': 'Windows-1251', - 'iso-8859-6': 'Windows-1256', - 'iso-8859-7': 'Windows-1253', - 'iso-8859-8': 'Windows-1255', - 'iso-8859-9': 'Windows-1254', - 'iso-8859-13': 'Windows-1257'} - - def __init__(self, lang_filter=LanguageFilter.ALL): - self._esc_charset_prober = None - self._charset_probers = [] - self.result = None - self.done = None - self._got_data = None - self._input_state = None - self._last_char = None - self.lang_filter = lang_filter - self.logger = logging.getLogger(__name__) - self._has_win_bytes = None - self.reset() - - def reset(self): - """ - Reset the UniversalDetector and all of its probers back to their - initial states. This is called by ``__init__``, so you only need to - call this directly in between analyses of different documents. - """ - self.result = {'encoding': None, 'confidence': 0.0, 'language': None} - self.done = False - self._got_data = False - self._has_win_bytes = False - self._input_state = InputState.PURE_ASCII - self._last_char = b'' - if self._esc_charset_prober: - self._esc_charset_prober.reset() - for prober in self._charset_probers: - prober.reset() - - def feed(self, byte_str): - """ - Takes a chunk of a document and feeds it through all of the relevant - charset probers. - - After calling ``feed``, you can check the value of the ``done`` - attribute to see if you need to continue feeding the - ``UniversalDetector`` more data, or if it has made a prediction - (in the ``result`` attribute). - - .. note:: - You should always call ``close`` when you're done feeding in your - document if ``done`` is not already ``True``. - """ - if self.done: - return - - if not len(byte_str): - return - - if not isinstance(byte_str, bytearray): - byte_str = bytearray(byte_str) - - # First check for known BOMs, since these are guaranteed to be correct - if not self._got_data: - # If the data starts with BOM, we know it is UTF - if byte_str.startswith(codecs.BOM_UTF8): - # EF BB BF UTF-8 with BOM - self.result = {'encoding': "UTF-8-SIG", - 'confidence': 1.0, - 'language': ''} - elif byte_str.startswith((codecs.BOM_UTF32_LE, - codecs.BOM_UTF32_BE)): - # FF FE 00 00 UTF-32, little-endian BOM - # 00 00 FE FF UTF-32, big-endian BOM - self.result = {'encoding': "UTF-32", - 'confidence': 1.0, - 'language': ''} - elif byte_str.startswith(b'\xFE\xFF\x00\x00'): - # FE FF 00 00 UCS-4, unusual octet order BOM (3412) - self.result = {'encoding': "X-ISO-10646-UCS-4-3412", - 'confidence': 1.0, - 'language': ''} - elif byte_str.startswith(b'\x00\x00\xFF\xFE'): - # 00 00 FF FE UCS-4, unusual octet order BOM (2143) - self.result = {'encoding': "X-ISO-10646-UCS-4-2143", - 'confidence': 1.0, - 'language': ''} - elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): - # FF FE UTF-16, little endian BOM - # FE FF UTF-16, big endian BOM - self.result = {'encoding': "UTF-16", - 'confidence': 1.0, - 'language': ''} - - self._got_data = True - if self.result['encoding'] is not None: - self.done = True - return - - # If none of those matched and we've only see ASCII so far, check - # for high bytes and escape sequences - if self._input_state == InputState.PURE_ASCII: - if self.HIGH_BYTE_DETECTOR.search(byte_str): - self._input_state = InputState.HIGH_BYTE - elif self._input_state == InputState.PURE_ASCII and \ - self.ESC_DETECTOR.search(self._last_char + byte_str): - self._input_state = InputState.ESC_ASCII - - self._last_char = byte_str[-1:] - - # If we've seen escape sequences, use the EscCharSetProber, which - # uses a simple state machine to check for known escape sequences in - # HZ and ISO-2022 encodings, since those are the only encodings that - # use such sequences. - if self._input_state == InputState.ESC_ASCII: - if not self._esc_charset_prober: - self._esc_charset_prober = EscCharSetProber(self.lang_filter) - if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = {'encoding': - self._esc_charset_prober.charset_name, - 'confidence': - self._esc_charset_prober.get_confidence(), - 'language': - self._esc_charset_prober.language} - self.done = True - # If we've seen high bytes (i.e., those with values greater than 127), - # we need to do more complicated checks using all our multi-byte and - # single-byte probers that are left. The single-byte probers - # use character bigram distributions to determine the encoding, whereas - # the multi-byte probers use a combination of character unigram and - # bigram distributions. - elif self._input_state == InputState.HIGH_BYTE: - if not self._charset_probers: - self._charset_probers = [MBCSGroupProber(self.lang_filter)] - # If we're checking non-CJK encodings, use single-byte prober - if self.lang_filter & LanguageFilter.NON_CJK: - self._charset_probers.append(SBCSGroupProber()) - self._charset_probers.append(Latin1Prober()) - for prober in self._charset_probers: - if prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = {'encoding': prober.charset_name, - 'confidence': prober.get_confidence(), - 'language': prober.language} - self.done = True - break - if self.WIN_BYTE_DETECTOR.search(byte_str): - self._has_win_bytes = True - - def close(self): - """ - Stop analyzing the current document and come up with a final - prediction. - - :returns: The ``result`` attribute, a ``dict`` with the keys - `encoding`, `confidence`, and `language`. - """ - # Don't bother with checks if we're already done - if self.done: - return self.result - self.done = True - - if not self._got_data: - self.logger.debug('no data received!') - - # Default to ASCII if it is all we've seen so far - elif self._input_state == InputState.PURE_ASCII: - self.result = {'encoding': 'ascii', - 'confidence': 1.0, - 'language': ''} - - # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD - elif self._input_state == InputState.HIGH_BYTE: - prober_confidence = None - max_prober_confidence = 0.0 - max_prober = None - for prober in self._charset_probers: - if not prober: - continue - prober_confidence = prober.get_confidence() - if prober_confidence > max_prober_confidence: - max_prober_confidence = prober_confidence - max_prober = prober - if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): - charset_name = max_prober.charset_name - lower_charset_name = max_prober.charset_name.lower() - confidence = max_prober.get_confidence() - # Use Windows encoding name instead of ISO-8859 if we saw any - # extra Windows-specific bytes - if lower_charset_name.startswith('iso-8859'): - if self._has_win_bytes: - charset_name = self.ISO_WIN_MAP.get(lower_charset_name, - charset_name) - self.result = {'encoding': charset_name, - 'confidence': confidence, - 'language': max_prober.language} - - # Log all prober confidences if none met MINIMUM_THRESHOLD - if self.logger.getEffectiveLevel() == logging.DEBUG: - if self.result['encoding'] is None: - self.logger.debug('no probers hit minimum threshold') - for group_prober in self._charset_probers: - if not group_prober: - continue - if isinstance(group_prober, CharSetGroupProber): - for prober in group_prober.probers: - self.logger.debug('%s %s confidence = %s', - prober.charset_name, - prober.language, - prober.get_confidence()) - else: - self.logger.debug('%s %s confidence = %s', - prober.charset_name, - prober.language, - prober.get_confidence()) - return self.result diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/utf8prober.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/utf8prober.py deleted file mode 100644 index 6c3196c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/utf8prober.py +++ /dev/null @@ -1,82 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .enums import ProbingState, MachineState -from .codingstatemachine import CodingStateMachine -from .mbcssm import UTF8_SM_MODEL - - - -class UTF8Prober(CharSetProber): - ONE_CHAR_PROB = 0.5 - - def __init__(self): - super(UTF8Prober, self).__init__() - self.coding_sm = CodingStateMachine(UTF8_SM_MODEL) - self._num_mb_chars = None - self.reset() - - def reset(self): - super(UTF8Prober, self).reset() - self.coding_sm.reset() - self._num_mb_chars = 0 - - @property - def charset_name(self): - return "utf-8" - - @property - def language(self): - return "" - - def feed(self, byte_str): - for c in byte_str: - coding_state = self.coding_sm.next_state(c) - if coding_state == MachineState.ERROR: - self._state = ProbingState.NOT_ME - break - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - elif coding_state == MachineState.START: - if self.coding_sm.get_current_charlen() >= 2: - self._num_mb_chars += 1 - - if self.state == ProbingState.DETECTING: - if self.get_confidence() > self.SHORTCUT_THRESHOLD: - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self): - unlike = 0.99 - if self._num_mb_chars < 6: - unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars - return 1.0 - unlike - else: - return unlike diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/version.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/version.py deleted file mode 100644 index bb2a34a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/version.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -This module exists only to simplify retrieving the version number of chardet -from within setup.py and from chardet subpackages. - -:author: Dan Blanchard (dan.blanchard@gmail.com) -""" - -__version__ = "3.0.4" -VERSION = __version__.split('.') diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/__init__.py deleted file mode 100644 index 2a3bf47..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from .initialise import init, deinit, reinit, colorama_text -from .ansi import Fore, Back, Style, Cursor -from .ansitowin32 import AnsiToWin32 - -__version__ = '0.4.1' diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/ansi.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/ansi.py deleted file mode 100644 index 7877658..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/ansi.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -''' -This module generates ANSI character codes to printing colors to terminals. -See: http://en.wikipedia.org/wiki/ANSI_escape_code -''' - -CSI = '\033[' -OSC = '\033]' -BEL = '\007' - - -def code_to_chars(code): - return CSI + str(code) + 'm' - -def set_title(title): - return OSC + '2;' + title + BEL - -def clear_screen(mode=2): - return CSI + str(mode) + 'J' - -def clear_line(mode=2): - return CSI + str(mode) + 'K' - - -class AnsiCodes(object): - def __init__(self): - # the subclasses declare class attributes which are numbers. - # Upon instantiation we define instance attributes, which are the same - # as the class attributes but wrapped with the ANSI escape sequence - for name in dir(self): - if not name.startswith('_'): - value = getattr(self, name) - setattr(self, name, code_to_chars(value)) - - -class AnsiCursor(object): - def UP(self, n=1): - return CSI + str(n) + 'A' - def DOWN(self, n=1): - return CSI + str(n) + 'B' - def FORWARD(self, n=1): - return CSI + str(n) + 'C' - def BACK(self, n=1): - return CSI + str(n) + 'D' - def POS(self, x=1, y=1): - return CSI + str(y) + ';' + str(x) + 'H' - - -class AnsiFore(AnsiCodes): - BLACK = 30 - RED = 31 - GREEN = 32 - YELLOW = 33 - BLUE = 34 - MAGENTA = 35 - CYAN = 36 - WHITE = 37 - RESET = 39 - - # These are fairly well supported, but not part of the standard. - LIGHTBLACK_EX = 90 - LIGHTRED_EX = 91 - LIGHTGREEN_EX = 92 - LIGHTYELLOW_EX = 93 - LIGHTBLUE_EX = 94 - LIGHTMAGENTA_EX = 95 - LIGHTCYAN_EX = 96 - LIGHTWHITE_EX = 97 - - -class AnsiBack(AnsiCodes): - BLACK = 40 - RED = 41 - GREEN = 42 - YELLOW = 43 - BLUE = 44 - MAGENTA = 45 - CYAN = 46 - WHITE = 47 - RESET = 49 - - # These are fairly well supported, but not part of the standard. - LIGHTBLACK_EX = 100 - LIGHTRED_EX = 101 - LIGHTGREEN_EX = 102 - LIGHTYELLOW_EX = 103 - LIGHTBLUE_EX = 104 - LIGHTMAGENTA_EX = 105 - LIGHTCYAN_EX = 106 - LIGHTWHITE_EX = 107 - - -class AnsiStyle(AnsiCodes): - BRIGHT = 1 - DIM = 2 - NORMAL = 22 - RESET_ALL = 0 - -Fore = AnsiFore() -Back = AnsiBack() -Style = AnsiStyle() -Cursor = AnsiCursor() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/ansitowin32.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/ansitowin32.py deleted file mode 100644 index 359c92b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/ansitowin32.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -import re -import sys -import os - -from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style -from .winterm import WinTerm, WinColor, WinStyle -from .win32 import windll, winapi_test - - -winterm = None -if windll is not None: - winterm = WinTerm() - - -class StreamWrapper(object): - ''' - Wraps a stream (such as stdout), acting as a transparent proxy for all - attribute access apart from method 'write()', which is delegated to our - Converter instance. - ''' - def __init__(self, wrapped, converter): - # double-underscore everything to prevent clashes with names of - # attributes on the wrapped stream object. - self.__wrapped = wrapped - self.__convertor = converter - - def __getattr__(self, name): - return getattr(self.__wrapped, name) - - def __enter__(self, *args, **kwargs): - # special method lookup bypasses __getattr__/__getattribute__, see - # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit - # thus, contextlib magic methods are not proxied via __getattr__ - return self.__wrapped.__enter__(*args, **kwargs) - - def __exit__(self, *args, **kwargs): - return self.__wrapped.__exit__(*args, **kwargs) - - def write(self, text): - self.__convertor.write(text) - - def isatty(self): - stream = self.__wrapped - if 'PYCHARM_HOSTED' in os.environ: - if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): - return True - try: - stream_isatty = stream.isatty - except AttributeError: - return False - else: - return stream_isatty() - - @property - def closed(self): - stream = self.__wrapped - try: - return stream.closed - except AttributeError: - return True - - -class AnsiToWin32(object): - ''' - Implements a 'write()' method which, on Windows, will strip ANSI character - sequences from the text, and if outputting to a tty, will convert them into - win32 function calls. - ''' - ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer - ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command - - def __init__(self, wrapped, convert=None, strip=None, autoreset=False): - # The wrapped stream (normally sys.stdout or sys.stderr) - self.wrapped = wrapped - - # should we reset colors to defaults after every .write() - self.autoreset = autoreset - - # create the proxy wrapping our output stream - self.stream = StreamWrapper(wrapped, self) - - on_windows = os.name == 'nt' - # We test if the WinAPI works, because even if we are on Windows - # we may be using a terminal that doesn't support the WinAPI - # (e.g. Cygwin Terminal). In this case it's up to the terminal - # to support the ANSI codes. - conversion_supported = on_windows and winapi_test() - - # should we strip ANSI sequences from our output? - if strip is None: - strip = conversion_supported or (not self.stream.closed and not self.stream.isatty()) - self.strip = strip - - # should we should convert ANSI sequences into win32 calls? - if convert is None: - convert = conversion_supported and not self.stream.closed and self.stream.isatty() - self.convert = convert - - # dict of ansi codes to win32 functions and parameters - self.win32_calls = self.get_win32_calls() - - # are we wrapping stderr? - self.on_stderr = self.wrapped is sys.stderr - - def should_wrap(self): - ''' - True if this class is actually needed. If false, then the output - stream will not be affected, nor will win32 calls be issued, so - wrapping stdout is not actually required. This will generally be - False on non-Windows platforms, unless optional functionality like - autoreset has been requested using kwargs to init() - ''' - return self.convert or self.strip or self.autoreset - - def get_win32_calls(self): - if self.convert and winterm: - return { - AnsiStyle.RESET_ALL: (winterm.reset_all, ), - AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), - AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), - AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), - AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), - AnsiFore.RED: (winterm.fore, WinColor.RED), - AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), - AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), - AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), - AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), - AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), - AnsiFore.WHITE: (winterm.fore, WinColor.GREY), - AnsiFore.RESET: (winterm.fore, ), - AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), - AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), - AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), - AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), - AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), - AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), - AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), - AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), - AnsiBack.BLACK: (winterm.back, WinColor.BLACK), - AnsiBack.RED: (winterm.back, WinColor.RED), - AnsiBack.GREEN: (winterm.back, WinColor.GREEN), - AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), - AnsiBack.BLUE: (winterm.back, WinColor.BLUE), - AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), - AnsiBack.CYAN: (winterm.back, WinColor.CYAN), - AnsiBack.WHITE: (winterm.back, WinColor.GREY), - AnsiBack.RESET: (winterm.back, ), - AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), - AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), - AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), - AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), - AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), - AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), - AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), - AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), - } - return dict() - - def write(self, text): - if self.strip or self.convert: - self.write_and_convert(text) - else: - self.wrapped.write(text) - self.wrapped.flush() - if self.autoreset: - self.reset_all() - - - def reset_all(self): - if self.convert: - self.call_win32('m', (0,)) - elif not self.strip and not self.stream.closed: - self.wrapped.write(Style.RESET_ALL) - - - def write_and_convert(self, text): - ''' - Write the given text to our wrapped stream, stripping any ANSI - sequences from the text, and optionally converting them into win32 - calls. - ''' - cursor = 0 - text = self.convert_osc(text) - for match in self.ANSI_CSI_RE.finditer(text): - start, end = match.span() - self.write_plain_text(text, cursor, start) - self.convert_ansi(*match.groups()) - cursor = end - self.write_plain_text(text, cursor, len(text)) - - - def write_plain_text(self, text, start, end): - if start < end: - self.wrapped.write(text[start:end]) - self.wrapped.flush() - - - def convert_ansi(self, paramstring, command): - if self.convert: - params = self.extract_params(command, paramstring) - self.call_win32(command, params) - - - def extract_params(self, command, paramstring): - if command in 'Hf': - params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) - while len(params) < 2: - # defaults: - params = params + (1,) - else: - params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) - if len(params) == 0: - # defaults: - if command in 'JKm': - params = (0,) - elif command in 'ABCD': - params = (1,) - - return params - - - def call_win32(self, command, params): - if command == 'm': - for param in params: - if param in self.win32_calls: - func_args = self.win32_calls[param] - func = func_args[0] - args = func_args[1:] - kwargs = dict(on_stderr=self.on_stderr) - func(*args, **kwargs) - elif command in 'J': - winterm.erase_screen(params[0], on_stderr=self.on_stderr) - elif command in 'K': - winterm.erase_line(params[0], on_stderr=self.on_stderr) - elif command in 'Hf': # cursor position - absolute - winterm.set_cursor_position(params, on_stderr=self.on_stderr) - elif command in 'ABCD': # cursor position - relative - n = params[0] - # A - up, B - down, C - forward, D - back - x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] - winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) - - - def convert_osc(self, text): - for match in self.ANSI_OSC_RE.finditer(text): - start, end = match.span() - text = text[:start] + text[end:] - paramstring, command = match.groups() - if command in '\x07': # \x07 = BEL - params = paramstring.split(";") - # 0 - change title and icon (we will only change title) - # 1 - change icon (we don't support this) - # 2 - change title - if params[0] in '02': - winterm.set_title(params[1]) - return text diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/initialise.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/initialise.py deleted file mode 100644 index 430d066..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/initialise.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -import atexit -import contextlib -import sys - -from .ansitowin32 import AnsiToWin32 - - -orig_stdout = None -orig_stderr = None - -wrapped_stdout = None -wrapped_stderr = None - -atexit_done = False - - -def reset_all(): - if AnsiToWin32 is not None: # Issue #74: objects might become None at exit - AnsiToWin32(orig_stdout).reset_all() - - -def init(autoreset=False, convert=None, strip=None, wrap=True): - - if not wrap and any([autoreset, convert, strip]): - raise ValueError('wrap=False conflicts with any other arg=True') - - global wrapped_stdout, wrapped_stderr - global orig_stdout, orig_stderr - - orig_stdout = sys.stdout - orig_stderr = sys.stderr - - if sys.stdout is None: - wrapped_stdout = None - else: - sys.stdout = wrapped_stdout = \ - wrap_stream(orig_stdout, convert, strip, autoreset, wrap) - if sys.stderr is None: - wrapped_stderr = None - else: - sys.stderr = wrapped_stderr = \ - wrap_stream(orig_stderr, convert, strip, autoreset, wrap) - - global atexit_done - if not atexit_done: - atexit.register(reset_all) - atexit_done = True - - -def deinit(): - if orig_stdout is not None: - sys.stdout = orig_stdout - if orig_stderr is not None: - sys.stderr = orig_stderr - - -@contextlib.contextmanager -def colorama_text(*args, **kwargs): - init(*args, **kwargs) - try: - yield - finally: - deinit() - - -def reinit(): - if wrapped_stdout is not None: - sys.stdout = wrapped_stdout - if wrapped_stderr is not None: - sys.stderr = wrapped_stderr - - -def wrap_stream(stream, convert, strip, autoreset, wrap): - if wrap: - wrapper = AnsiToWin32(stream, - convert=convert, strip=strip, autoreset=autoreset) - if wrapper.should_wrap(): - stream = wrapper.stream - return stream diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/win32.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/win32.py deleted file mode 100644 index c2d8360..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/win32.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. - -# from winbase.h -STDOUT = -11 -STDERR = -12 - -try: - import ctypes - from ctypes import LibraryLoader - windll = LibraryLoader(ctypes.WinDLL) - from ctypes import wintypes -except (AttributeError, ImportError): - windll = None - SetConsoleTextAttribute = lambda *_: None - winapi_test = lambda *_: None -else: - from ctypes import byref, Structure, c_char, POINTER - - COORD = wintypes._COORD - - class CONSOLE_SCREEN_BUFFER_INFO(Structure): - """struct in wincon.h.""" - _fields_ = [ - ("dwSize", COORD), - ("dwCursorPosition", COORD), - ("wAttributes", wintypes.WORD), - ("srWindow", wintypes.SMALL_RECT), - ("dwMaximumWindowSize", COORD), - ] - def __str__(self): - return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( - self.dwSize.Y, self.dwSize.X - , self.dwCursorPosition.Y, self.dwCursorPosition.X - , self.wAttributes - , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right - , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X - ) - - _GetStdHandle = windll.kernel32.GetStdHandle - _GetStdHandle.argtypes = [ - wintypes.DWORD, - ] - _GetStdHandle.restype = wintypes.HANDLE - - _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo - _GetConsoleScreenBufferInfo.argtypes = [ - wintypes.HANDLE, - POINTER(CONSOLE_SCREEN_BUFFER_INFO), - ] - _GetConsoleScreenBufferInfo.restype = wintypes.BOOL - - _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute - _SetConsoleTextAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, - ] - _SetConsoleTextAttribute.restype = wintypes.BOOL - - _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition - _SetConsoleCursorPosition.argtypes = [ - wintypes.HANDLE, - COORD, - ] - _SetConsoleCursorPosition.restype = wintypes.BOOL - - _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA - _FillConsoleOutputCharacterA.argtypes = [ - wintypes.HANDLE, - c_char, - wintypes.DWORD, - COORD, - POINTER(wintypes.DWORD), - ] - _FillConsoleOutputCharacterA.restype = wintypes.BOOL - - _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute - _FillConsoleOutputAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, - wintypes.DWORD, - COORD, - POINTER(wintypes.DWORD), - ] - _FillConsoleOutputAttribute.restype = wintypes.BOOL - - _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW - _SetConsoleTitleW.argtypes = [ - wintypes.LPCWSTR - ] - _SetConsoleTitleW.restype = wintypes.BOOL - - def _winapi_test(handle): - csbi = CONSOLE_SCREEN_BUFFER_INFO() - success = _GetConsoleScreenBufferInfo( - handle, byref(csbi)) - return bool(success) - - def winapi_test(): - return any(_winapi_test(h) for h in - (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) - - def GetConsoleScreenBufferInfo(stream_id=STDOUT): - handle = _GetStdHandle(stream_id) - csbi = CONSOLE_SCREEN_BUFFER_INFO() - success = _GetConsoleScreenBufferInfo( - handle, byref(csbi)) - return csbi - - def SetConsoleTextAttribute(stream_id, attrs): - handle = _GetStdHandle(stream_id) - return _SetConsoleTextAttribute(handle, attrs) - - def SetConsoleCursorPosition(stream_id, position, adjust=True): - position = COORD(*position) - # If the position is out of range, do nothing. - if position.Y <= 0 or position.X <= 0: - return - # Adjust for Windows' SetConsoleCursorPosition: - # 1. being 0-based, while ANSI is 1-based. - # 2. expecting (x,y), while ANSI uses (y,x). - adjusted_position = COORD(position.Y - 1, position.X - 1) - if adjust: - # Adjust for viewport's scroll position - sr = GetConsoleScreenBufferInfo(STDOUT).srWindow - adjusted_position.Y += sr.Top - adjusted_position.X += sr.Left - # Resume normal processing - handle = _GetStdHandle(stream_id) - return _SetConsoleCursorPosition(handle, adjusted_position) - - def FillConsoleOutputCharacter(stream_id, char, length, start): - handle = _GetStdHandle(stream_id) - char = c_char(char.encode()) - length = wintypes.DWORD(length) - num_written = wintypes.DWORD(0) - # Note that this is hard-coded for ANSI (vs wide) bytes. - success = _FillConsoleOutputCharacterA( - handle, char, length, start, byref(num_written)) - return num_written.value - - def FillConsoleOutputAttribute(stream_id, attr, length, start): - ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' - handle = _GetStdHandle(stream_id) - attribute = wintypes.WORD(attr) - length = wintypes.DWORD(length) - num_written = wintypes.DWORD(0) - # Note that this is hard-coded for ANSI (vs wide) bytes. - return _FillConsoleOutputAttribute( - handle, attribute, length, start, byref(num_written)) - - def SetConsoleTitle(title): - return _SetConsoleTitleW(title) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/winterm.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/winterm.py deleted file mode 100644 index 0fdb4ec..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/colorama/winterm.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from . import win32 - - -# from wincon.h -class WinColor(object): - BLACK = 0 - BLUE = 1 - GREEN = 2 - CYAN = 3 - RED = 4 - MAGENTA = 5 - YELLOW = 6 - GREY = 7 - -# from wincon.h -class WinStyle(object): - NORMAL = 0x00 # dim text, dim background - BRIGHT = 0x08 # bright text, dim background - BRIGHT_BACKGROUND = 0x80 # dim text, bright background - -class WinTerm(object): - - def __init__(self): - self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes - self.set_attrs(self._default) - self._default_fore = self._fore - self._default_back = self._back - self._default_style = self._style - # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. - # So that LIGHT_EX colors and BRIGHT style do not clobber each other, - # we track them separately, since LIGHT_EX is overwritten by Fore/Back - # and BRIGHT is overwritten by Style codes. - self._light = 0 - - def get_attrs(self): - return self._fore + self._back * 16 + (self._style | self._light) - - def set_attrs(self, value): - self._fore = value & 7 - self._back = (value >> 4) & 7 - self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) - - def reset_all(self, on_stderr=None): - self.set_attrs(self._default) - self.set_console(attrs=self._default) - self._light = 0 - - def fore(self, fore=None, light=False, on_stderr=False): - if fore is None: - fore = self._default_fore - self._fore = fore - # Emulate LIGHT_EX with BRIGHT Style - if light: - self._light |= WinStyle.BRIGHT - else: - self._light &= ~WinStyle.BRIGHT - self.set_console(on_stderr=on_stderr) - - def back(self, back=None, light=False, on_stderr=False): - if back is None: - back = self._default_back - self._back = back - # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style - if light: - self._light |= WinStyle.BRIGHT_BACKGROUND - else: - self._light &= ~WinStyle.BRIGHT_BACKGROUND - self.set_console(on_stderr=on_stderr) - - def style(self, style=None, on_stderr=False): - if style is None: - style = self._default_style - self._style = style - self.set_console(on_stderr=on_stderr) - - def set_console(self, attrs=None, on_stderr=False): - if attrs is None: - attrs = self.get_attrs() - handle = win32.STDOUT - if on_stderr: - handle = win32.STDERR - win32.SetConsoleTextAttribute(handle, attrs) - - def get_position(self, handle): - position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition - # Because Windows coordinates are 0-based, - # and win32.SetConsoleCursorPosition expects 1-based. - position.X += 1 - position.Y += 1 - return position - - def set_cursor_position(self, position=None, on_stderr=False): - if position is None: - # I'm not currently tracking the position, so there is no default. - # position = self.get_position() - return - handle = win32.STDOUT - if on_stderr: - handle = win32.STDERR - win32.SetConsoleCursorPosition(handle, position) - - def cursor_adjust(self, x, y, on_stderr=False): - handle = win32.STDOUT - if on_stderr: - handle = win32.STDERR - position = self.get_position(handle) - adjusted_position = (position.Y + y, position.X + x) - win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) - - def erase_screen(self, mode=0, on_stderr=False): - # 0 should clear from the cursor to the end of the screen. - # 1 should clear from the cursor to the beginning of the screen. - # 2 should clear the entire screen, and move cursor to (1,1) - handle = win32.STDOUT - if on_stderr: - handle = win32.STDERR - csbi = win32.GetConsoleScreenBufferInfo(handle) - # get the number of character cells in the current buffer - cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y - # get number of character cells before current cursor position - cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X - if mode == 0: - from_coord = csbi.dwCursorPosition - cells_to_erase = cells_in_screen - cells_before_cursor - elif mode == 1: - from_coord = win32.COORD(0, 0) - cells_to_erase = cells_before_cursor - elif mode == 2: - from_coord = win32.COORD(0, 0) - cells_to_erase = cells_in_screen - else: - # invalid mode - return - # fill the entire screen with blanks - win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) - # now set the buffer's attributes accordingly - win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) - if mode == 2: - # put the cursor where needed - win32.SetConsoleCursorPosition(handle, (1, 1)) - - def erase_line(self, mode=0, on_stderr=False): - # 0 should clear from the cursor to the end of the line. - # 1 should clear from the cursor to the beginning of the line. - # 2 should clear the entire line. - handle = win32.STDOUT - if on_stderr: - handle = win32.STDERR - csbi = win32.GetConsoleScreenBufferInfo(handle) - if mode == 0: - from_coord = csbi.dwCursorPosition - cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X - elif mode == 1: - from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) - cells_to_erase = csbi.dwCursorPosition.X - elif mode == 2: - from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) - cells_to_erase = csbi.dwSize.X - else: - # invalid mode - return - # fill the entire screen with blanks - win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) - # now set the buffer's attributes accordingly - win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) - - def set_title(self, title): - win32.SetConsoleTitle(title) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/__init__.py deleted file mode 100644 index a786b4d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2017 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -import logging - -__version__ = '0.2.8' - -class DistlibException(Exception): - pass - -try: - from logging import NullHandler -except ImportError: # pragma: no cover - class NullHandler(logging.Handler): - def handle(self, record): pass - def emit(self, record): pass - def createLock(self): self.lock = None - -logger = logging.getLogger(__name__) -logger.addHandler(NullHandler()) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/__init__.py deleted file mode 100644 index f7dbf4c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Modules copied from Python 3 standard libraries, for internal use only. - -Individual classes and functions are found in d2._backport.misc. Intended -usage is to always import things missing from 3.1 from that module: the -built-in/stdlib objects will be used if found. -""" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/misc.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/misc.py deleted file mode 100644 index cfb318d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/misc.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Backports for individual classes and functions.""" - -import os -import sys - -__all__ = ['cache_from_source', 'callable', 'fsencode'] - - -try: - from imp import cache_from_source -except ImportError: - def cache_from_source(py_file, debug=__debug__): - ext = debug and 'c' or 'o' - return py_file + ext - - -try: - callable = callable -except NameError: - from collections import Callable - - def callable(obj): - return isinstance(obj, Callable) - - -try: - fsencode = os.fsencode -except AttributeError: - def fsencode(filename): - if isinstance(filename, bytes): - return filename - elif isinstance(filename, str): - return filename.encode(sys.getfilesystemencoding()) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/shutil.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/shutil.py deleted file mode 100644 index 159e49e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/shutil.py +++ /dev/null @@ -1,761 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Utility functions for copying and archiving files and directory trees. - -XXX The functions here don't copy the resource fork or other metadata on Mac. - -""" - -import os -import sys -import stat -from os.path import abspath -import fnmatch -import collections -import errno -from . import tarfile - -try: - import bz2 - _BZ2_SUPPORTED = True -except ImportError: - _BZ2_SUPPORTED = False - -try: - from pwd import getpwnam -except ImportError: - getpwnam = None - -try: - from grp import getgrnam -except ImportError: - getgrnam = None - -__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", - "copytree", "move", "rmtree", "Error", "SpecialFileError", - "ExecError", "make_archive", "get_archive_formats", - "register_archive_format", "unregister_archive_format", - "get_unpack_formats", "register_unpack_format", - "unregister_unpack_format", "unpack_archive", "ignore_patterns"] - -class Error(EnvironmentError): - pass - -class SpecialFileError(EnvironmentError): - """Raised when trying to do a kind of operation (e.g. copying) which is - not supported on a special file (e.g. a named pipe)""" - -class ExecError(EnvironmentError): - """Raised when a command could not be executed""" - -class ReadError(EnvironmentError): - """Raised when an archive cannot be read""" - -class RegistryError(Exception): - """Raised when a registry operation with the archiving - and unpacking registries fails""" - - -try: - WindowsError -except NameError: - WindowsError = None - -def copyfileobj(fsrc, fdst, length=16*1024): - """copy data from file-like object fsrc to file-like object fdst""" - while 1: - buf = fsrc.read(length) - if not buf: - break - fdst.write(buf) - -def _samefile(src, dst): - # Macintosh, Unix. - if hasattr(os.path, 'samefile'): - try: - return os.path.samefile(src, dst) - except OSError: - return False - - # All other platforms: check for same pathname. - return (os.path.normcase(os.path.abspath(src)) == - os.path.normcase(os.path.abspath(dst))) - -def copyfile(src, dst): - """Copy data from src to dst""" - if _samefile(src, dst): - raise Error("`%s` and `%s` are the same file" % (src, dst)) - - for fn in [src, dst]: - try: - st = os.stat(fn) - except OSError: - # File most likely does not exist - pass - else: - # XXX What about other special files? (sockets, devices...) - if stat.S_ISFIFO(st.st_mode): - raise SpecialFileError("`%s` is a named pipe" % fn) - - with open(src, 'rb') as fsrc: - with open(dst, 'wb') as fdst: - copyfileobj(fsrc, fdst) - -def copymode(src, dst): - """Copy mode bits from src to dst""" - if hasattr(os, 'chmod'): - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - os.chmod(dst, mode) - -def copystat(src, dst): - """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - if hasattr(os, 'utime'): - os.utime(dst, (st.st_atime, st.st_mtime)) - if hasattr(os, 'chmod'): - os.chmod(dst, mode) - if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): - try: - os.chflags(dst, st.st_flags) - except OSError as why: - if (not hasattr(errno, 'EOPNOTSUPP') or - why.errno != errno.EOPNOTSUPP): - raise - -def copy(src, dst): - """Copy data and mode bits ("cp src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copymode(src, dst) - -def copy2(src, dst): - """Copy data and all stat info ("cp -p src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copystat(src, dst) - -def ignore_patterns(*patterns): - """Function that can be used as copytree() ignore parameter. - - Patterns is a sequence of glob-style patterns - that are used to exclude files""" - def _ignore_patterns(path, names): - ignored_names = [] - for pattern in patterns: - ignored_names.extend(fnmatch.filter(names, pattern)) - return set(ignored_names) - return _ignore_patterns - -def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, - ignore_dangling_symlinks=False): - """Recursively copy a directory tree. - - The destination directory must not already exist. - If exception(s) occur, an Error is raised with a list of reasons. - - If the optional symlinks flag is true, symbolic links in the - source tree result in symbolic links in the destination tree; if - it is false, the contents of the files pointed to by symbolic - links are copied. If the file pointed by the symlink doesn't - exist, an exception will be added in the list of errors raised in - an Error exception at the end of the copy process. - - You can set the optional ignore_dangling_symlinks flag to true if you - want to silence this exception. Notice that this has no effect on - platforms that don't support os.symlink. - - The optional ignore argument is a callable. If given, it - is called with the `src` parameter, which is the directory - being visited by copytree(), and `names` which is the list of - `src` contents, as returned by os.listdir(): - - callable(src, names) -> ignored_names - - Since copytree() is called recursively, the callable will be - called once for each directory that is copied. It returns a - list of names relative to the `src` directory that should - not be copied. - - The optional copy_function argument is a callable that will be used - to copy each file. It will be called with the source path and the - destination path as arguments. By default, copy2() is used, but any - function that supports the same signature (like copy()) can be used. - - """ - names = os.listdir(src) - if ignore is not None: - ignored_names = ignore(src, names) - else: - ignored_names = set() - - os.makedirs(dst) - errors = [] - for name in names: - if name in ignored_names: - continue - srcname = os.path.join(src, name) - dstname = os.path.join(dst, name) - try: - if os.path.islink(srcname): - linkto = os.readlink(srcname) - if symlinks: - os.symlink(linkto, dstname) - else: - # ignore dangling symlink if the flag is on - if not os.path.exists(linkto) and ignore_dangling_symlinks: - continue - # otherwise let the copy occurs. copy2 will raise an error - copy_function(srcname, dstname) - elif os.path.isdir(srcname): - copytree(srcname, dstname, symlinks, ignore, copy_function) - else: - # Will raise a SpecialFileError for unsupported file types - copy_function(srcname, dstname) - # catch the Error from the recursive copytree so that we can - # continue with other files - except Error as err: - errors.extend(err.args[0]) - except EnvironmentError as why: - errors.append((srcname, dstname, str(why))) - try: - copystat(src, dst) - except OSError as why: - if WindowsError is not None and isinstance(why, WindowsError): - # Copying file access times may fail on Windows - pass - else: - errors.extend((src, dst, str(why))) - if errors: - raise Error(errors) - -def rmtree(path, ignore_errors=False, onerror=None): - """Recursively delete a directory tree. - - If ignore_errors is set, errors are ignored; otherwise, if onerror - is set, it is called to handle the error with arguments (func, - path, exc_info) where func is os.listdir, os.remove, or os.rmdir; - path is the argument to that function that caused it to fail; and - exc_info is a tuple returned by sys.exc_info(). If ignore_errors - is false and onerror is None, an exception is raised. - - """ - if ignore_errors: - def onerror(*args): - pass - elif onerror is None: - def onerror(*args): - raise - try: - if os.path.islink(path): - # symlinks to directories are forbidden, see bug #1669 - raise OSError("Cannot call rmtree on a symbolic link") - except OSError: - onerror(os.path.islink, path, sys.exc_info()) - # can't continue even if onerror hook returns - return - names = [] - try: - names = os.listdir(path) - except os.error: - onerror(os.listdir, path, sys.exc_info()) - for name in names: - fullname = os.path.join(path, name) - try: - mode = os.lstat(fullname).st_mode - except os.error: - mode = 0 - if stat.S_ISDIR(mode): - rmtree(fullname, ignore_errors, onerror) - else: - try: - os.remove(fullname) - except os.error: - onerror(os.remove, fullname, sys.exc_info()) - try: - os.rmdir(path) - except os.error: - onerror(os.rmdir, path, sys.exc_info()) - - -def _basename(path): - # A basename() variant which first strips the trailing slash, if present. - # Thus we always get the last component of the path, even for directories. - return os.path.basename(path.rstrip(os.path.sep)) - -def move(src, dst): - """Recursively move a file or directory to another location. This is - similar to the Unix "mv" command. - - If the destination is a directory or a symlink to a directory, the source - is moved inside the directory. The destination path must not already - exist. - - If the destination already exists but is not a directory, it may be - overwritten depending on os.rename() semantics. - - If the destination is on our current filesystem, then rename() is used. - Otherwise, src is copied to the destination and then removed. - A lot more could be done here... A look at a mv.c shows a lot of - the issues this implementation glosses over. - - """ - real_dst = dst - if os.path.isdir(dst): - if _samefile(src, dst): - # We might be on a case insensitive filesystem, - # perform the rename anyway. - os.rename(src, dst) - return - - real_dst = os.path.join(dst, _basename(src)) - if os.path.exists(real_dst): - raise Error("Destination path '%s' already exists" % real_dst) - try: - os.rename(src, real_dst) - except OSError: - if os.path.isdir(src): - if _destinsrc(src, dst): - raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) - copytree(src, real_dst, symlinks=True) - rmtree(src) - else: - copy2(src, real_dst) - os.unlink(src) - -def _destinsrc(src, dst): - src = abspath(src) - dst = abspath(dst) - if not src.endswith(os.path.sep): - src += os.path.sep - if not dst.endswith(os.path.sep): - dst += os.path.sep - return dst.startswith(src) - -def _get_gid(name): - """Returns a gid, given a group name.""" - if getgrnam is None or name is None: - return None - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if getpwnam is None or name is None: - return None - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None, logger=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", or None. - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_name' + ".tar", possibly plus - the appropriate compression extension (".gz", or ".bz2"). - - Returns the output filename. - """ - tar_compression = {'gzip': 'gz', None: ''} - compress_ext = {'gzip': '.gz'} - - if _BZ2_SUPPORTED: - tar_compression['bzip2'] = 'bz2' - compress_ext['bzip2'] = '.bz2' - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext: - raise ValueError("bad value for 'compress', or compression format not " - "supported : {0}".format(compress)) - - archive_name = base_name + '.tar' + compress_ext.get(compress, '') - archive_dir = os.path.dirname(archive_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # creating the tarball - if logger is not None: - logger.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) - try: - tar.add(base_dir, filter=_set_uid_gid) - finally: - tar.close() - - return archive_name - -def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): - # XXX see if we want to keep an external call here - if verbose: - zipoptions = "-r" - else: - zipoptions = "-rq" - from distutils.errors import DistutilsExecError - from distutils.spawn import spawn - try: - spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) - except DistutilsExecError: - # XXX really should distinguish between "couldn't find - # external 'zip' command" and "zip failed". - raise ExecError("unable to create zip file '%s': " - "could neither import the 'zipfile' module nor " - "find a standalone zip utility") % zip_filename - -def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Uses either the - "zipfile" Python module (if available) or the InfoZIP "zip" utility - (if installed and found on the default search path). If neither tool is - available, raises ExecError. Returns the name of the output zip - file. - """ - zip_filename = base_name + ".zip" - archive_dir = os.path.dirname(base_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # If zipfile module is not available, try spawning an external 'zip' - # command. - try: - import zipfile - except ImportError: - zipfile = None - - if zipfile is None: - _call_external_zip(base_dir, zip_filename, verbose, dry_run) - else: - if logger is not None: - logger.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) - - for dirpath, dirnames, filenames in os.walk(base_dir): - for name in filenames: - path = os.path.normpath(os.path.join(dirpath, name)) - if os.path.isfile(path): - zip.write(path, path) - if logger is not None: - logger.info("adding '%s'", path) - zip.close() - - return zip_filename - -_ARCHIVE_FORMATS = { - 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), - 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), - 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), - 'zip': (_make_zipfile, [], "ZIP file"), - } - -if _BZ2_SUPPORTED: - _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], - "bzip2'ed tar-file") - -def get_archive_formats(): - """Returns a list of supported formats for archiving and unarchiving. - - Each element of the returned sequence is a tuple (name, description) - """ - formats = [(name, registry[2]) for name, registry in - _ARCHIVE_FORMATS.items()] - formats.sort() - return formats - -def register_archive_format(name, function, extra_args=None, description=''): - """Registers an archive format. - - name is the name of the format. function is the callable that will be - used to create archives. If provided, extra_args is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_archive_formats() function. - """ - if extra_args is None: - extra_args = [] - if not isinstance(function, collections.Callable): - raise TypeError('The %s object is not callable' % function) - if not isinstance(extra_args, (tuple, list)): - raise TypeError('extra_args needs to be a sequence') - for element in extra_args: - if not isinstance(element, (tuple, list)) or len(element) !=2: - raise TypeError('extra_args elements are : (arg_name, value)') - - _ARCHIVE_FORMATS[name] = (function, extra_args, description) - -def unregister_archive_format(name): - del _ARCHIVE_FORMATS[name] - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None, logger=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "bztar" - or "gztar". - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - save_cwd = os.getcwd() - if root_dir is not None: - if logger is not None: - logger.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - if base_dir is None: - base_dir = os.curdir - - kwargs = {'dry_run': dry_run, 'logger': logger} - - try: - format_info = _ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if format != 'zip': - kwargs['owner'] = owner - kwargs['group'] = group - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if root_dir is not None: - if logger is not None: - logger.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename - - -def get_unpack_formats(): - """Returns a list of supported formats for unpacking. - - Each element of the returned sequence is a tuple - (name, extensions, description) - """ - formats = [(name, info[0], info[3]) for name, info in - _UNPACK_FORMATS.items()] - formats.sort() - return formats - -def _check_unpack_options(extensions, function, extra_args): - """Checks what gets registered as an unpacker.""" - # first make sure no other unpacker is registered for this extension - existing_extensions = {} - for name, info in _UNPACK_FORMATS.items(): - for ext in info[0]: - existing_extensions[ext] = name - - for extension in extensions: - if extension in existing_extensions: - msg = '%s is already registered for "%s"' - raise RegistryError(msg % (extension, - existing_extensions[extension])) - - if not isinstance(function, collections.Callable): - raise TypeError('The registered function must be a callable') - - -def register_unpack_format(name, extensions, function, extra_args=None, - description=''): - """Registers an unpack format. - - `name` is the name of the format. `extensions` is a list of extensions - corresponding to the format. - - `function` is the callable that will be - used to unpack archives. The callable will receive archives to unpack. - If it's unable to handle an archive, it needs to raise a ReadError - exception. - - If provided, `extra_args` is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_unpack_formats() function. - """ - if extra_args is None: - extra_args = [] - _check_unpack_options(extensions, function, extra_args) - _UNPACK_FORMATS[name] = extensions, function, extra_args, description - -def unregister_unpack_format(name): - """Removes the pack format from the registry.""" - del _UNPACK_FORMATS[name] - -def _ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - -def _unpack_zipfile(filename, extract_dir): - """Unpack zip `filename` to `extract_dir` - """ - try: - import zipfile - except ImportError: - raise ReadError('zlib not supported, cannot unpack this archive.') - - if not zipfile.is_zipfile(filename): - raise ReadError("%s is not a zip file" % filename) - - zip = zipfile.ZipFile(filename) - try: - for info in zip.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name: - continue - - target = os.path.join(extract_dir, *name.split('/')) - if not target: - continue - - _ensure_directory(target) - if not name.endswith('/'): - # file - data = zip.read(info.filename) - f = open(target, 'wb') - try: - f.write(data) - finally: - f.close() - del data - finally: - zip.close() - -def _unpack_tarfile(filename, extract_dir): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - """ - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise ReadError( - "%s is not a compressed or uncompressed tar file" % filename) - try: - tarobj.extractall(extract_dir) - finally: - tarobj.close() - -_UNPACK_FORMATS = { - 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), - 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), - 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") - } - -if _BZ2_SUPPORTED: - _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], - "bzip2'ed tar-file") - -def _find_unpack_format(filename): - for name, info in _UNPACK_FORMATS.items(): - for extension in info[0]: - if filename.endswith(extension): - return name - return None - -def unpack_archive(filename, extract_dir=None, format=None): - """Unpack an archive. - - `filename` is the name of the archive. - - `extract_dir` is the name of the target directory, where the archive - is unpacked. If not provided, the current working directory is used. - - `format` is the archive format: one of "zip", "tar", or "gztar". Or any - other registered format. If not provided, unpack_archive will use the - filename extension and see if an unpacker was registered for that - extension. - - In case none is found, a ValueError is raised. - """ - if extract_dir is None: - extract_dir = os.getcwd() - - if format is not None: - try: - format_info = _UNPACK_FORMATS[format] - except KeyError: - raise ValueError("Unknown unpack format '{0}'".format(format)) - - func = format_info[1] - func(filename, extract_dir, **dict(format_info[2])) - else: - # we need to look at the registered unpackers supported extensions - format = _find_unpack_format(filename) - if format is None: - raise ReadError("Unknown archive format '{0}'".format(filename)) - - func = _UNPACK_FORMATS[format][1] - kwargs = dict(_UNPACK_FORMATS[format][2]) - func(filename, extract_dir, **kwargs) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/sysconfig.cfg b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/sysconfig.cfg deleted file mode 100644 index 1746bd0..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/sysconfig.cfg +++ /dev/null @@ -1,84 +0,0 @@ -[posix_prefix] -# Configuration directories. Some of these come straight out of the -# configure script. They are for implementing the other variables, not to -# be used directly in [resource_locations]. -confdir = /etc -datadir = /usr/share -libdir = /usr/lib -statedir = /var -# User resource directory -local = ~/.local/{distribution.name} - -stdlib = {base}/lib/python{py_version_short} -platstdlib = {platbase}/lib/python{py_version_short} -purelib = {base}/lib/python{py_version_short}/site-packages -platlib = {platbase}/lib/python{py_version_short}/site-packages -include = {base}/include/python{py_version_short}{abiflags} -platinclude = {platbase}/include/python{py_version_short}{abiflags} -data = {base} - -[posix_home] -stdlib = {base}/lib/python -platstdlib = {base}/lib/python -purelib = {base}/lib/python -platlib = {base}/lib/python -include = {base}/include/python -platinclude = {base}/include/python -scripts = {base}/bin -data = {base} - -[nt] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2_home] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[nt_user] -stdlib = {userbase}/Python{py_version_nodot} -platstdlib = {userbase}/Python{py_version_nodot} -purelib = {userbase}/Python{py_version_nodot}/site-packages -platlib = {userbase}/Python{py_version_nodot}/site-packages -include = {userbase}/Python{py_version_nodot}/Include -scripts = {userbase}/Scripts -data = {userbase} - -[posix_user] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[osx_framework_user] -stdlib = {userbase}/lib/python -platstdlib = {userbase}/lib/python -purelib = {userbase}/lib/python/site-packages -platlib = {userbase}/lib/python/site-packages -include = {userbase}/include -scripts = {userbase}/bin -data = {userbase} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/sysconfig.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/sysconfig.py deleted file mode 100644 index 1df3aba..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/sysconfig.py +++ /dev/null @@ -1,788 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Access to Python's configuration information.""" - -import codecs -import os -import re -import sys -from os.path import pardir, realpath -try: - import configparser -except ImportError: - import ConfigParser as configparser - - -__all__ = [ - 'get_config_h_filename', - 'get_config_var', - 'get_config_vars', - 'get_makefile_filename', - 'get_path', - 'get_path_names', - 'get_paths', - 'get_platform', - 'get_python_version', - 'get_scheme_names', - 'parse_config_h', -] - - -def _safe_realpath(path): - try: - return realpath(path) - except OSError: - return path - - -if sys.executable: - _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) -else: - # sys.executable can be empty if argv[0] has been changed and Python is - # unable to retrieve the real program name - _PROJECT_BASE = _safe_realpath(os.getcwd()) - -if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) -# PC/VS7.1 -if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) -# PC/AMD64 -if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) - - -def is_python_build(): - for fn in ("Setup.dist", "Setup.local"): - if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): - return True - return False - -_PYTHON_BUILD = is_python_build() - -_cfg_read = False - -def _ensure_cfg_read(): - global _cfg_read - if not _cfg_read: - from ..resources import finder - backport_package = __name__.rsplit('.', 1)[0] - _finder = finder(backport_package) - _cfgfile = _finder.find('sysconfig.cfg') - assert _cfgfile, 'sysconfig.cfg exists' - with _cfgfile.as_stream() as s: - _SCHEMES.readfp(s) - if _PYTHON_BUILD: - for scheme in ('posix_prefix', 'posix_home'): - _SCHEMES.set(scheme, 'include', '{srcdir}/Include') - _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') - - _cfg_read = True - - -_SCHEMES = configparser.RawConfigParser() -_VAR_REPL = re.compile(r'\{([^{]*?)\}') - -def _expand_globals(config): - _ensure_cfg_read() - if config.has_section('globals'): - globals = config.items('globals') - else: - globals = tuple() - - sections = config.sections() - for section in sections: - if section == 'globals': - continue - for option, value in globals: - if config.has_option(section, option): - continue - config.set(section, option, value) - config.remove_section('globals') - - # now expanding local variables defined in the cfg file - # - for section in config.sections(): - variables = dict(config.items(section)) - - def _replacer(matchobj): - name = matchobj.group(1) - if name in variables: - return variables[name] - return matchobj.group(0) - - for option, value in config.items(section): - config.set(section, option, _VAR_REPL.sub(_replacer, value)) - -#_expand_globals(_SCHEMES) - - # FIXME don't rely on sys.version here, its format is an implementation detail - # of CPython, use sys.version_info or sys.hexversion -_PY_VERSION = sys.version.split()[0] -_PY_VERSION_SHORT = sys.version[:3] -_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] -_PREFIX = os.path.normpath(sys.prefix) -_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) -_CONFIG_VARS = None -_USER_BASE = None - - -def _subst_vars(path, local_vars): - """In the string `path`, replace tokens like {some.thing} with the - corresponding value from the map `local_vars`. - - If there is no corresponding value, leave the token unchanged. - """ - def _replacer(matchobj): - name = matchobj.group(1) - if name in local_vars: - return local_vars[name] - elif name in os.environ: - return os.environ[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, path) - - -def _extend_dict(target_dict, other_dict): - target_keys = target_dict.keys() - for key, value in other_dict.items(): - if key in target_keys: - continue - target_dict[key] = value - - -def _expand_vars(scheme, vars): - res = {} - if vars is None: - vars = {} - _extend_dict(vars, get_config_vars()) - - for key, value in _SCHEMES.items(scheme): - if os.name in ('posix', 'nt'): - value = os.path.expanduser(value) - res[key] = os.path.normpath(_subst_vars(value, vars)) - return res - - -def format_value(value, vars): - def _replacer(matchobj): - name = matchobj.group(1) - if name in vars: - return vars[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, value) - - -def _get_default_scheme(): - if os.name == 'posix': - # the default scheme for posix is posix_prefix - return 'posix_prefix' - return os.name - - -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - # what about 'os2emx', 'riscos' ? - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - if env_base: - return env_base - else: - return joinuser(base, "Python") - - if sys.platform == "darwin": - framework = get_config_var("PYTHONFRAMEWORK") - if framework: - if env_base: - return env_base - else: - return joinuser("~", "Library", framework, "%d.%d" % - sys.version_info[:2]) - - if env_base: - return env_base - else: - return joinuser("~", ".local") - - -def _parse_makefile(filename, vars=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - # Regexes needed for parsing Makefile (and similar syntaxes, - # like old-style Setup files). - _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") - _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") - _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - - if vars is None: - vars = {} - done = {} - notdone = {} - - with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: - lines = f.readlines() - - for line in lines: - if line.startswith('#') or line.strip() == '': - continue - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # do variable interpolation here - variables = list(notdone.keys()) - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - while len(variables) > 0: - for name in tuple(variables): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m is not None: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if (name.startswith('PY_') and - name[3:] in renamed_variables): - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - - else: - done[n] = item = "" - - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: - value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - variables.remove(name) - - if (name.startswith('PY_') and - name[3:] in renamed_variables): - - name = name[3:] - if name not in done: - done[name] = value - - else: - # bogus variable reference (e.g. "prefix=$/opt/python"); - # just drop it since we can't deal - done[name] = value - variables.remove(name) - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - vars.update(done) - return vars - - -def get_makefile_filename(): - """Return the path of the Makefile.""" - if _PYTHON_BUILD: - return os.path.join(_PROJECT_BASE, "Makefile") - if hasattr(sys, 'abiflags'): - config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) - else: - config_dir_name = 'config' - return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') - - -def _init_posix(vars): - """Initialize the module as appropriate for POSIX systems.""" - # load the installed Makefile: - makefile = get_makefile_filename() - try: - _parse_makefile(makefile, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % makefile - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # load the installed pyconfig.h: - config_h = get_config_h_filename() - try: - with open(config_h) as f: - parse_config_h(f, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % config_h - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if _PYTHON_BUILD: - vars['LDSHARED'] = vars['BLDSHARED'] - - -def _init_non_posix(vars): - """Initialize the module as appropriate for NT""" - # set basic install directories - vars['LIBDEST'] = get_path('stdlib') - vars['BINLIBDEST'] = get_path('platstdlib') - vars['INCLUDEPY'] = get_path('include') - vars['SO'] = '.pyd' - vars['EXE'] = '.exe' - vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT - vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) - -# -# public APIs -# - - -def parse_config_h(fp, vars=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if vars is None: - vars = {} - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: - v = int(v) - except ValueError: - pass - vars[n] = v - else: - m = undef_rx.match(line) - if m: - vars[m.group(1)] = 0 - return vars - - -def get_config_h_filename(): - """Return the path of pyconfig.h.""" - if _PYTHON_BUILD: - if os.name == "nt": - inc_dir = os.path.join(_PROJECT_BASE, "PC") - else: - inc_dir = _PROJECT_BASE - else: - inc_dir = get_path('platinclude') - return os.path.join(inc_dir, 'pyconfig.h') - - -def get_scheme_names(): - """Return a tuple containing the schemes names.""" - return tuple(sorted(_SCHEMES.sections())) - - -def get_path_names(): - """Return a tuple containing the paths names.""" - # xxx see if we want a static list - return _SCHEMES.options('posix_prefix') - - -def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): - """Return a mapping containing an install scheme. - - ``scheme`` is the install scheme name. If not provided, it will - return the default scheme for the current platform. - """ - _ensure_cfg_read() - if expand: - return _expand_vars(scheme, vars) - else: - return dict(_SCHEMES.items(scheme)) - - -def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): - """Return a path corresponding to the scheme. - - ``scheme`` is the install scheme name. - """ - return get_paths(scheme, vars, expand)[name] - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. - - On Unix, this means every variable defined in Python's installed Makefile; - On Windows and Mac OS it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _CONFIG_VARS - if _CONFIG_VARS is None: - _CONFIG_VARS = {} - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # distutils2 module. - _CONFIG_VARS['prefix'] = _PREFIX - _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX - _CONFIG_VARS['py_version'] = _PY_VERSION - _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT - _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] - _CONFIG_VARS['base'] = _PREFIX - _CONFIG_VARS['platbase'] = _EXEC_PREFIX - _CONFIG_VARS['projectbase'] = _PROJECT_BASE - try: - _CONFIG_VARS['abiflags'] = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - _CONFIG_VARS['abiflags'] = '' - - if os.name in ('nt', 'os2'): - _init_non_posix(_CONFIG_VARS) - if os.name == 'posix': - _init_posix(_CONFIG_VARS) - # Setting 'userbase' is done below the call to the - # init function to enable using 'get_config_var' in - # the init-function. - if sys.version >= '2.6': - _CONFIG_VARS['userbase'] = _getuserbase() - - if 'srcdir' not in _CONFIG_VARS: - _CONFIG_VARS['srcdir'] = _PROJECT_BASE - else: - _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) - - # Convert srcdir into an absolute path if it appears necessary. - # Normally it is relative to the build directory. However, during - # testing, for example, we might be running a non-installed python - # from a different directory. - if _PYTHON_BUILD and os.name == "posix": - base = _PROJECT_BASE - try: - cwd = os.getcwd() - except OSError: - cwd = None - if (not os.path.isabs(_CONFIG_VARS['srcdir']) and - base != cwd): - # srcdir is relative and we are not in the same directory - # as the executable. Assume executable is in the build - # directory and make srcdir absolute. - srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) - _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) - - if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _CONFIG_VARS[key] = flags - else: - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _CONFIG_VARS[key] = flags - - # If we're on OSX 10.5 or later and the user tries to - # compiles an extension using an SDK that is not present - # on the current machine it is better to not use an SDK - # than to fail. - # - # The major usecase for this is users using a Python.org - # binary installer on OSX 10.6: that installer uses - # the 10.4u SDK, but that SDK is not installed by default - # when you install Xcode. - # - CFLAGS = _CONFIG_VARS.get('CFLAGS', '') - m = re.search(r'-isysroot\s+(\S+)', CFLAGS) - if m is not None: - sdk = m.group(1) - if not os.path.exists(sdk): - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) - _CONFIG_VARS[key] = flags - - if args: - vals = [] - for name in args: - vals.append(_CONFIG_VARS.get(name)) - return vals - else: - return _CONFIG_VARS - - -def get_config_var(name): - """Return the value of a single variable using the dictionary returned by - 'get_config_vars()'. - - Equivalent to get_config_vars().get(name) - """ - return get_config_vars().get(name) - - -def get_platform(): - """Return a string that identifies the current platform. - - This is used mainly to distinguish platform-specific build directories and - platform-specific built distributions. Typically includes the OS name - and version and the architecture (as supplied by 'os.uname()'), - although the exact information included depends on the OS; eg. for IRIX - the architecture isn't particularly important (IRIX only runs on SGI - hardware), but for Linux the kernel version isn't particularly - important. - - Examples of returned values: - linux-i586 - linux-alpha (?) - solaris-2.6-sun4u - irix-5.3 - irix64-6.2 - - Windows will return one of: - win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) - win-ia64 (64bit Windows on Itanium) - win32 (all others - specifically, sys.platform is returned) - - For other non-POSIX platforms, currently just returns 'sys.platform'. - """ - if os.name == 'nt': - # sniff sys.version for architecture. - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return sys.platform - j = sys.version.find(")", i) - look = sys.version[i+len(prefix):j].lower() - if look == 'amd64': - return 'win-amd64' - if look == 'itanium': - return 'win-ia64' - return sys.platform - - if os.name != "posix" or not hasattr(os, 'uname'): - # XXX what about the architecture? NT is Intel or Alpha, - # Mac OS is M68k or PPC, etc. - return sys.platform - - # Try to distinguish various flavours of Unix - osname, host, release, version, machine = os.uname() - - # Convert the OS name to lowercase, remove '/' characters - # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") - osname = osname.lower().replace('/', '') - machine = machine.replace(' ', '_') - machine = machine.replace('/', '-') - - if osname[:5] == "linux": - # At least on Linux/Intel, 'machine' is the processor -- - # i386, etc. - # XXX what about Alpha, SPARC, etc? - return "%s-%s" % (osname, machine) - elif osname[:5] == "sunos": - if release[0] >= "5": # SunOS 5 == Solaris 2 - osname = "solaris" - release = "%d.%s" % (int(release[0]) - 3, release[2:]) - # fall through to standard osname-release-machine representation - elif osname[:4] == "irix": # could be "irix64"! - return "%s-%s" % (osname, release) - elif osname[:3] == "aix": - return "%s-%s.%s" % (osname, version, release) - elif osname[:6] == "cygwin": - osname = "cygwin" - rel_re = re.compile(r'[\d.]+') - m = rel_re.match(release) - if m: - release = m.group() - elif osname[:6] == "darwin": - # - # For our purposes, we'll assume that the system version from - # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set - # to. This makes the compatibility story a bit more sane because the - # machine is going to compile and link as if it were - # MACOSX_DEPLOYMENT_TARGET. - cfgvars = get_config_vars() - macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') - - if True: - # Always calculate the release of the running machine, - # needed to determine if we can build fat binaries or not. - - macrelease = macver - # Get the system version. Reading this plist is a documented - # way to get the system version (see the documentation for - # the Gestalt Manager) - try: - f = open('/System/Library/CoreServices/SystemVersion.plist') - except IOError: - # We're on a plain darwin box, fall back to the default - # behaviour. - pass - else: - try: - m = re.search(r'<key>ProductUserVisibleVersion</key>\s*' - r'<string>(.*?)</string>', f.read()) - finally: - f.close() - if m is not None: - macrelease = '.'.join(m.group(1).split('.')[:2]) - # else: fall back to the default behaviour - - if not macver: - macver = macrelease - - if macver: - release = macver - osname = "macosx" - - if ((macrelease + '.') >= '10.4.' and - '-arch' in get_config_vars().get('CFLAGS', '').strip()): - # The universal build will build fat binaries, but not on - # systems before 10.4 - # - # Try to detect 4-way universal builds, those have machine-type - # 'universal' instead of 'fat'. - - machine = 'fat' - cflags = get_config_vars().get('CFLAGS') - - archs = re.findall(r'-arch\s+(\S+)', cflags) - archs = tuple(sorted(set(archs))) - - if len(archs) == 1: - machine = archs[0] - elif archs == ('i386', 'ppc'): - machine = 'fat' - elif archs == ('i386', 'x86_64'): - machine = 'intel' - elif archs == ('i386', 'ppc', 'x86_64'): - machine = 'fat3' - elif archs == ('ppc64', 'x86_64'): - machine = 'fat64' - elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): - machine = 'universal' - else: - raise ValueError( - "Don't know machine value for archs=%r" % (archs,)) - - elif machine == 'i386': - # On OSX the machine type returned by uname is always the - # 32-bit variant, even if the executable architecture is - # the 64-bit variant - if sys.maxsize >= 2**32: - machine = 'x86_64' - - elif machine in ('PowerPC', 'Power_Macintosh'): - # Pick a sane name for the PPC architecture. - # See 'i386' case - if sys.maxsize >= 2**32: - machine = 'ppc64' - else: - machine = 'ppc' - - return "%s-%s-%s" % (osname, release, machine) - - -def get_python_version(): - return _PY_VERSION_SHORT - - -def _print_dict(title, data): - for index, (key, value) in enumerate(sorted(data.items())): - if index == 0: - print('%s: ' % (title)) - print('\t%s = "%s"' % (key, value)) - - -def _main(): - """Display all information sysconfig detains.""" - print('Platform: "%s"' % get_platform()) - print('Python version: "%s"' % get_python_version()) - print('Current installation scheme: "%s"' % _get_default_scheme()) - print() - _print_dict('Paths', get_paths()) - print() - _print_dict('Variables', get_config_vars()) - - -if __name__ == '__main__': - _main() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/tarfile.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/tarfile.py deleted file mode 100644 index d66d856..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/tarfile.py +++ /dev/null @@ -1,2607 +0,0 @@ -#------------------------------------------------------------------- -# tarfile.py -#------------------------------------------------------------------- -# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de> -# All rights reserved. -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -from __future__ import print_function - -"""Read from and write to tar format archives. -""" - -__version__ = "$Revision$" - -version = "0.9.0" -__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" -__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" -__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" -__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." - -#--------- -# Imports -#--------- -import sys -import os -import stat -import errno -import time -import struct -import copy -import re - -try: - import grp, pwd -except ImportError: - grp = pwd = None - -# os.symlink on Windows prior to 6.0 raises NotImplementedError -symlink_exception = (AttributeError, NotImplementedError) -try: - # WindowsError (1314) will be raised if the caller does not hold the - # SeCreateSymbolicLinkPrivilege privilege - symlink_exception += (WindowsError,) -except NameError: - pass - -# from tarfile import * -__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] - -if sys.version_info[0] < 3: - import __builtin__ as builtins -else: - import builtins - -_open = builtins.open # Since 'open' is TarFile.open - -#--------------------------------------------------------- -# tar constants -#--------------------------------------------------------- -NUL = b"\0" # the null character -BLOCKSIZE = 512 # length of processing blocks -RECORDSIZE = BLOCKSIZE * 20 # length of records -GNU_MAGIC = b"ustar \0" # magic gnu tar string -POSIX_MAGIC = b"ustar\x0000" # magic posix tar string - -LENGTH_NAME = 100 # maximum length of a filename -LENGTH_LINK = 100 # maximum length of a linkname -LENGTH_PREFIX = 155 # maximum length of the prefix field - -REGTYPE = b"0" # regular file -AREGTYPE = b"\0" # regular file -LNKTYPE = b"1" # link (inside tarfile) -SYMTYPE = b"2" # symbolic link -CHRTYPE = b"3" # character special device -BLKTYPE = b"4" # block special device -DIRTYPE = b"5" # directory -FIFOTYPE = b"6" # fifo special device -CONTTYPE = b"7" # contiguous file - -GNUTYPE_LONGNAME = b"L" # GNU tar longname -GNUTYPE_LONGLINK = b"K" # GNU tar longlink -GNUTYPE_SPARSE = b"S" # GNU tar sparse file - -XHDTYPE = b"x" # POSIX.1-2001 extended header -XGLTYPE = b"g" # POSIX.1-2001 global header -SOLARIS_XHDTYPE = b"X" # Solaris extended header - -USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format -GNU_FORMAT = 1 # GNU tar format -PAX_FORMAT = 2 # POSIX.1-2001 (pax) format -DEFAULT_FORMAT = GNU_FORMAT - -#--------------------------------------------------------- -# tarfile constants -#--------------------------------------------------------- -# File types that tarfile supports: -SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, - SYMTYPE, DIRTYPE, FIFOTYPE, - CONTTYPE, CHRTYPE, BLKTYPE, - GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# File types that will be treated as a regular file. -REGULAR_TYPES = (REGTYPE, AREGTYPE, - CONTTYPE, GNUTYPE_SPARSE) - -# File types that are part of the GNU tar format. -GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# Fields from a pax header that override a TarInfo attribute. -PAX_FIELDS = ("path", "linkpath", "size", "mtime", - "uid", "gid", "uname", "gname") - -# Fields from a pax header that are affected by hdrcharset. -PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) - -# Fields in a pax header that are numbers, all other fields -# are treated as strings. -PAX_NUMBER_FIELDS = { - "atime": float, - "ctime": float, - "mtime": float, - "uid": int, - "gid": int, - "size": int -} - -#--------------------------------------------------------- -# Bits used in the mode field, values in octal. -#--------------------------------------------------------- -S_IFLNK = 0o120000 # symbolic link -S_IFREG = 0o100000 # regular file -S_IFBLK = 0o060000 # block device -S_IFDIR = 0o040000 # directory -S_IFCHR = 0o020000 # character device -S_IFIFO = 0o010000 # fifo - -TSUID = 0o4000 # set UID on execution -TSGID = 0o2000 # set GID on execution -TSVTX = 0o1000 # reserved - -TUREAD = 0o400 # read by owner -TUWRITE = 0o200 # write by owner -TUEXEC = 0o100 # execute/search by owner -TGREAD = 0o040 # read by group -TGWRITE = 0o020 # write by group -TGEXEC = 0o010 # execute/search by group -TOREAD = 0o004 # read by other -TOWRITE = 0o002 # write by other -TOEXEC = 0o001 # execute/search by other - -#--------------------------------------------------------- -# initialization -#--------------------------------------------------------- -if os.name in ("nt", "ce"): - ENCODING = "utf-8" -else: - ENCODING = sys.getfilesystemencoding() - -#--------------------------------------------------------- -# Some useful functions -#--------------------------------------------------------- - -def stn(s, length, encoding, errors): - """Convert a string to a null-terminated bytes object. - """ - s = s.encode(encoding, errors) - return s[:length] + (length - len(s)) * NUL - -def nts(s, encoding, errors): - """Convert a null-terminated bytes object to a string. - """ - p = s.find(b"\0") - if p != -1: - s = s[:p] - return s.decode(encoding, errors) - -def nti(s): - """Convert a number field to a python number. - """ - # There are two possible encodings for a number field, see - # itn() below. - if s[0] != chr(0o200): - try: - n = int(nts(s, "ascii", "strict") or "0", 8) - except ValueError: - raise InvalidHeaderError("invalid header") - else: - n = 0 - for i in range(len(s) - 1): - n <<= 8 - n += ord(s[i + 1]) - return n - -def itn(n, digits=8, format=DEFAULT_FORMAT): - """Convert a python number to a number field. - """ - # POSIX 1003.1-1988 requires numbers to be encoded as a string of - # octal digits followed by a null-byte, this allows values up to - # (8**(digits-1))-1. GNU tar allows storing numbers greater than - # that if necessary. A leading 0o200 byte indicates this particular - # encoding, the following digits-1 bytes are a big-endian - # representation. This allows values up to (256**(digits-1))-1. - if 0 <= n < 8 ** (digits - 1): - s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL - else: - if format != GNU_FORMAT or n >= 256 ** (digits - 1): - raise ValueError("overflow in number field") - - if n < 0: - # XXX We mimic GNU tar's behaviour with negative numbers, - # this could raise OverflowError. - n = struct.unpack("L", struct.pack("l", n))[0] - - s = bytearray() - for i in range(digits - 1): - s.insert(0, n & 0o377) - n >>= 8 - s.insert(0, 0o200) - return s - -def calc_chksums(buf): - """Calculate the checksum for a member's header by summing up all - characters except for the chksum field which is treated as if - it was filled with spaces. According to the GNU tar sources, - some tars (Sun and NeXT) calculate chksum with signed char, - which will be different if there are chars in the buffer with - the high bit set. So we calculate two checksums, unsigned and - signed. - """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) - return unsigned_chksum, signed_chksum - -def copyfileobj(src, dst, length=None): - """Copy length bytes from fileobj src to fileobj dst. - If length is None, copy the entire content. - """ - if length == 0: - return - if length is None: - while True: - buf = src.read(16*1024) - if not buf: - break - dst.write(buf) - return - - BUFSIZE = 16 * 1024 - blocks, remainder = divmod(length, BUFSIZE) - for b in range(blocks): - buf = src.read(BUFSIZE) - if len(buf) < BUFSIZE: - raise IOError("end of file reached") - dst.write(buf) - - if remainder != 0: - buf = src.read(remainder) - if len(buf) < remainder: - raise IOError("end of file reached") - dst.write(buf) - return - -filemode_table = ( - ((S_IFLNK, "l"), - (S_IFREG, "-"), - (S_IFBLK, "b"), - (S_IFDIR, "d"), - (S_IFCHR, "c"), - (S_IFIFO, "p")), - - ((TUREAD, "r"),), - ((TUWRITE, "w"),), - ((TUEXEC|TSUID, "s"), - (TSUID, "S"), - (TUEXEC, "x")), - - ((TGREAD, "r"),), - ((TGWRITE, "w"),), - ((TGEXEC|TSGID, "s"), - (TSGID, "S"), - (TGEXEC, "x")), - - ((TOREAD, "r"),), - ((TOWRITE, "w"),), - ((TOEXEC|TSVTX, "t"), - (TSVTX, "T"), - (TOEXEC, "x")) -) - -def filemode(mode): - """Convert a file's mode to a string of the form - -rwxrwxrwx. - Used by TarFile.list() - """ - perm = [] - for table in filemode_table: - for bit, char in table: - if mode & bit == bit: - perm.append(char) - break - else: - perm.append("-") - return "".join(perm) - -class TarError(Exception): - """Base exception.""" - pass -class ExtractError(TarError): - """General exception for extract errors.""" - pass -class ReadError(TarError): - """Exception for unreadable tar archives.""" - pass -class CompressionError(TarError): - """Exception for unavailable compression methods.""" - pass -class StreamError(TarError): - """Exception for unsupported operations on stream-like TarFiles.""" - pass -class HeaderError(TarError): - """Base exception for header errors.""" - pass -class EmptyHeaderError(HeaderError): - """Exception for empty headers.""" - pass -class TruncatedHeaderError(HeaderError): - """Exception for truncated headers.""" - pass -class EOFHeaderError(HeaderError): - """Exception for end of file headers.""" - pass -class InvalidHeaderError(HeaderError): - """Exception for invalid headers.""" - pass -class SubsequentHeaderError(HeaderError): - """Exception for missing and invalid extended headers.""" - pass - -#--------------------------- -# internal stream interface -#--------------------------- -class _LowLevelFile(object): - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. - """ - - def __init__(self, name, mode): - mode = { - "r": os.O_RDONLY, - "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - }[mode] - if hasattr(os, "O_BINARY"): - mode |= os.O_BINARY - self.fd = os.open(name, mode, 0o666) - - def close(self): - os.close(self.fd) - - def read(self, size): - return os.read(self.fd, size) - - def write(self, s): - os.write(self.fd, s) - -class _Stream(object): - """Class that serves as an adapter between TarFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method and is accessed - blockwise. Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin, - sys.stdout, a socket, a tape device etc. - - _Stream is intended to be used only internally. - """ - - def __init__(self, name, mode, comptype, fileobj, bufsize): - """Construct a _Stream object. - """ - self._extfileobj = True - if fileobj is None: - fileobj = _LowLevelFile(name, mode) - self._extfileobj = False - - if comptype == '*': - # Enable transparent compression detection for the - # stream interface - fileobj = _StreamProxy(fileobj) - comptype = fileobj.getcomptype() - - self.name = name or "" - self.mode = mode - self.comptype = comptype - self.fileobj = fileobj - self.bufsize = bufsize - self.buf = b"" - self.pos = 0 - self.closed = False - - try: - if comptype == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") - self.zlib = zlib - self.crc = zlib.crc32(b"") - if mode == "r": - self._init_read_gz() - else: - self._init_write_gz() - - if comptype == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - if mode == "r": - self.dbuf = b"" - self.cmp = bz2.BZ2Decompressor() - else: - self.cmp = bz2.BZ2Compressor() - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - def __del__(self): - if hasattr(self, "closed") and not self.closed: - self.close() - - def _init_write_gz(self): - """Initialize for writing with gzip compression. - """ - self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, - -self.zlib.MAX_WBITS, - self.zlib.DEF_MEM_LEVEL, - 0) - timestamp = struct.pack("<L", int(time.time())) - self.__write(b"\037\213\010\010" + timestamp + b"\002\377") - if self.name.endswith(".gz"): - self.name = self.name[:-3] - # RFC1952 says we must use ISO-8859-1 for the FNAME field. - self.__write(self.name.encode("iso-8859-1", "replace") + NUL) - - def write(self, s): - """Write string s to the stream. - """ - if self.comptype == "gz": - self.crc = self.zlib.crc32(s, self.crc) - self.pos += len(s) - if self.comptype != "tar": - s = self.cmp.compress(s) - self.__write(s) - - def __write(self, s): - """Write string s to the stream if a whole new block - is ready to be written. - """ - self.buf += s - while len(self.buf) > self.bufsize: - self.fileobj.write(self.buf[:self.bufsize]) - self.buf = self.buf[self.bufsize:] - - def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. - """ - if self.closed: - return - - if self.mode == "w" and self.comptype != "tar": - self.buf += self.cmp.flush() - - if self.mode == "w" and self.buf: - self.fileobj.write(self.buf) - self.buf = b"" - if self.comptype == "gz": - # The native zlib crc is an unsigned 32-bit integer, but - # the Python wrapper implicitly casts that to a signed C - # long. So, on a 32-bit box self.crc may "look negative", - # while the same crc on a 64-bit box may "look positive". - # To avoid irksome warnings from the `struct` module, force - # it to look positive on all boxes. - self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff)) - self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) - - if not self._extfileobj: - self.fileobj.close() - - self.closed = True - - def _init_read_gz(self): - """Initialize for reading a gzip compressed fileobj. - """ - self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) - self.dbuf = b"" - - # taken from gzip.GzipFile with some alterations - if self.__read(2) != b"\037\213": - raise ReadError("not a gzip file") - if self.__read(1) != b"\010": - raise CompressionError("unsupported compression method") - - flag = ord(self.__read(1)) - self.__read(6) - - if flag & 4: - xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) - self.read(xlen) - if flag & 8: - while True: - s = self.__read(1) - if not s or s == NUL: - break - if flag & 16: - while True: - s = self.__read(1) - if not s or s == NUL: - break - if flag & 2: - self.__read(2) - - def tell(self): - """Return the stream's file pointer position. - """ - return self.pos - - def seek(self, pos=0): - """Set the stream's file pointer to pos. Negative seeking - is forbidden. - """ - if pos - self.pos >= 0: - blocks, remainder = divmod(pos - self.pos, self.bufsize) - for i in range(blocks): - self.read(self.bufsize) - self.read(remainder) - else: - raise StreamError("seeking backwards is not allowed") - return self.pos - - def read(self, size=None): - """Return the next size number of bytes from the stream. - If size is not defined, return all bytes of the stream - up to EOF. - """ - if size is None: - t = [] - while True: - buf = self._read(self.bufsize) - if not buf: - break - t.append(buf) - buf = "".join(t) - else: - buf = self._read(size) - self.pos += len(buf) - return buf - - def _read(self, size): - """Return size bytes from the stream. - """ - if self.comptype == "tar": - return self.__read(size) - - c = len(self.dbuf) - while c < size: - buf = self.__read(self.bufsize) - if not buf: - break - try: - buf = self.cmp.decompress(buf) - except IOError: - raise ReadError("invalid compressed data") - self.dbuf += buf - c += len(buf) - buf = self.dbuf[:size] - self.dbuf = self.dbuf[size:] - return buf - - def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. - """ - c = len(self.buf) - while c < size: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - self.buf += buf - c += len(buf) - buf = self.buf[:size] - self.buf = self.buf[size:] - return buf -# class _Stream - -class _StreamProxy(object): - """Small proxy class that enables transparent compression - detection for the Stream interface (mode 'r|*'). - """ - - def __init__(self, fileobj): - self.fileobj = fileobj - self.buf = self.fileobj.read(BLOCKSIZE) - - def read(self, size): - self.read = self.fileobj.read - return self.buf - - def getcomptype(self): - if self.buf.startswith(b"\037\213\010"): - return "gz" - if self.buf.startswith(b"BZh91"): - return "bz2" - return "tar" - - def close(self): - self.fileobj.close() -# class StreamProxy - -class _BZ2Proxy(object): - """Small proxy class that enables external file object - support for "r:bz2" and "w:bz2" modes. This is actually - a workaround for a limitation in bz2 module's BZ2File - class which (unlike gzip.GzipFile) has no support for - a file object argument. - """ - - blocksize = 16 * 1024 - - def __init__(self, fileobj, mode): - self.fileobj = fileobj - self.mode = mode - self.name = getattr(self.fileobj, "name", None) - self.init() - - def init(self): - import bz2 - self.pos = 0 - if self.mode == "r": - self.bz2obj = bz2.BZ2Decompressor() - self.fileobj.seek(0) - self.buf = b"" - else: - self.bz2obj = bz2.BZ2Compressor() - - def read(self, size): - x = len(self.buf) - while x < size: - raw = self.fileobj.read(self.blocksize) - if not raw: - break - data = self.bz2obj.decompress(raw) - self.buf += data - x += len(data) - - buf = self.buf[:size] - self.buf = self.buf[size:] - self.pos += len(buf) - return buf - - def seek(self, pos): - if pos < self.pos: - self.init() - self.read(pos - self.pos) - - def tell(self): - return self.pos - - def write(self, data): - self.pos += len(data) - raw = self.bz2obj.compress(data) - self.fileobj.write(raw) - - def close(self): - if self.mode == "w": - raw = self.bz2obj.flush() - self.fileobj.write(raw) -# class _BZ2Proxy - -#------------------------ -# Extraction file object -#------------------------ -class _FileInFile(object): - """A thin wrapper around an existing file object that - provides a part of its data as an individual file - object. - """ - - def __init__(self, fileobj, offset, size, blockinfo=None): - self.fileobj = fileobj - self.offset = offset - self.size = size - self.position = 0 - - if blockinfo is None: - blockinfo = [(0, size)] - - # Construct a map with data and zero blocks. - self.map_index = 0 - self.map = [] - lastpos = 0 - realpos = self.offset - for offset, size in blockinfo: - if offset > lastpos: - self.map.append((False, lastpos, offset, None)) - self.map.append((True, offset, offset + size, realpos)) - realpos += size - lastpos = offset + size - if lastpos < self.size: - self.map.append((False, lastpos, self.size, None)) - - def seekable(self): - if not hasattr(self.fileobj, "seekable"): - # XXX gzip.GzipFile and bz2.BZ2File - return True - return self.fileobj.seekable() - - def tell(self): - """Return the current file position. - """ - return self.position - - def seek(self, position): - """Seek to a position in the file. - """ - self.position = position - - def read(self, size=None): - """Read data from the file. - """ - if size is None: - size = self.size - self.position - else: - size = min(size, self.size - self.position) - - buf = b"" - while size > 0: - while True: - data, start, stop, offset = self.map[self.map_index] - if start <= self.position < stop: - break - else: - self.map_index += 1 - if self.map_index == len(self.map): - self.map_index = 0 - length = min(size, stop - self.position) - if data: - self.fileobj.seek(offset + (self.position - start)) - buf += self.fileobj.read(length) - else: - buf += NUL * length - size -= length - self.position += length - return buf -#class _FileInFile - - -class ExFileObject(object): - """File-like object for reading an archive member. - Is returned by TarFile.extractfile(). - """ - blocksize = 1024 - - def __init__(self, tarfile, tarinfo): - self.fileobj = _FileInFile(tarfile.fileobj, - tarinfo.offset_data, - tarinfo.size, - tarinfo.sparse) - self.name = tarinfo.name - self.mode = "r" - self.closed = False - self.size = tarinfo.size - - self.position = 0 - self.buffer = b"" - - def readable(self): - return True - - def writable(self): - return False - - def seekable(self): - return self.fileobj.seekable() - - def read(self, size=None): - """Read at most size bytes from the file. If size is not - present or None, read all data until EOF is reached. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - buf = b"" - if self.buffer: - if size is None: - buf = self.buffer - self.buffer = b"" - else: - buf = self.buffer[:size] - self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() - else: - buf += self.fileobj.read(size - len(buf)) - - self.position += len(buf) - return buf - - # XXX TextIOWrapper uses the read1() method. - read1 = read - - def readline(self, size=-1): - """Read one entire line from the file. If size is present - and non-negative, return a string with at most that - size, which may be an incomplete line. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - while True: - buf = self.fileobj.read(self.blocksize) - self.buffer += buf - if not buf or b"\n" in buf: - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - pos = len(self.buffer) - break - - if size != -1: - pos = min(size, pos) - - buf = self.buffer[:pos] - self.buffer = self.buffer[pos:] - self.position += len(buf) - return buf - - def readlines(self): - """Return a list with all remaining lines. - """ - result = [] - while True: - line = self.readline() - if not line: break - result.append(line) - return result - - def tell(self): - """Return the current file position. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - return self.position - - def seek(self, pos, whence=os.SEEK_SET): - """Seek to a position in the file. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - if whence == os.SEEK_SET: - self.position = min(max(pos, 0), self.size) - elif whence == os.SEEK_CUR: - if pos < 0: - self.position = max(self.position + pos, 0) - else: - self.position = min(self.position + pos, self.size) - elif whence == os.SEEK_END: - self.position = max(min(self.size + pos, self.size), 0) - else: - raise ValueError("Invalid argument") - - self.buffer = b"" - self.fileobj.seek(self.position) - - def close(self): - """Close the file object. - """ - self.closed = True - - def __iter__(self): - """Get an iterator over the file's lines. - """ - while True: - line = self.readline() - if not line: - break - yield line -#class ExFileObject - -#------------------ -# Exported Classes -#------------------ -class TarInfo(object): - """Informational class which holds the details about an - archive member given by a tar header block. - TarInfo objects are returned by TarFile.getmember(), - TarFile.getmembers() and TarFile.gettarinfo() and are - usually created internally. - """ - - __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", - "chksum", "type", "linkname", "uname", "gname", - "devmajor", "devminor", - "offset", "offset_data", "pax_headers", "sparse", - "tarfile", "_sparse_structs", "_link_target") - - def __init__(self, name=""): - """Construct a TarInfo object. name is the optional name - of the member. - """ - self.name = name # member name - self.mode = 0o644 # file permissions - self.uid = 0 # user id - self.gid = 0 # group id - self.size = 0 # file size - self.mtime = 0 # modification time - self.chksum = 0 # header checksum - self.type = REGTYPE # member type - self.linkname = "" # link name - self.uname = "" # user name - self.gname = "" # group name - self.devmajor = 0 # device major number - self.devminor = 0 # device minor number - - self.offset = 0 # the tar header starts here - self.offset_data = 0 # the file's data starts here - - self.sparse = None # sparse member information - self.pax_headers = {} # pax header information - - # In pax headers the "name" and "linkname" field are called - # "path" and "linkpath". - def _getpath(self): - return self.name - def _setpath(self, name): - self.name = name - path = property(_getpath, _setpath) - - def _getlinkpath(self): - return self.linkname - def _setlinkpath(self, linkname): - self.linkname = linkname - linkpath = property(_getlinkpath, _setlinkpath) - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) - - def get_info(self): - """Return the TarInfo's attributes as a dictionary. - """ - info = { - "name": self.name, - "mode": self.mode & 0o7777, - "uid": self.uid, - "gid": self.gid, - "size": self.size, - "mtime": self.mtime, - "chksum": self.chksum, - "type": self.type, - "linkname": self.linkname, - "uname": self.uname, - "gname": self.gname, - "devmajor": self.devmajor, - "devminor": self.devminor - } - - if info["type"] == DIRTYPE and not info["name"].endswith("/"): - info["name"] += "/" - - return info - - def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): - """Return a tar header as a string of 512 byte blocks. - """ - info = self.get_info() - - if format == USTAR_FORMAT: - return self.create_ustar_header(info, encoding, errors) - elif format == GNU_FORMAT: - return self.create_gnu_header(info, encoding, errors) - elif format == PAX_FORMAT: - return self.create_pax_header(info, encoding) - else: - raise ValueError("invalid format") - - def create_ustar_header(self, info, encoding, errors): - """Return the object as a ustar header block. - """ - info["magic"] = POSIX_MAGIC - - if len(info["linkname"]) > LENGTH_LINK: - raise ValueError("linkname is too long") - - if len(info["name"]) > LENGTH_NAME: - info["prefix"], info["name"] = self._posix_split_name(info["name"]) - - return self._create_header(info, USTAR_FORMAT, encoding, errors) - - def create_gnu_header(self, info, encoding, errors): - """Return the object as a GNU header block sequence. - """ - info["magic"] = GNU_MAGIC - - buf = b"" - if len(info["linkname"]) > LENGTH_LINK: - buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) - - if len(info["name"]) > LENGTH_NAME: - buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) - - return buf + self._create_header(info, GNU_FORMAT, encoding, errors) - - def create_pax_header(self, info, encoding): - """Return the object as a ustar header block. If it cannot be - represented this way, prepend a pax extended header sequence - with supplement information. - """ - info["magic"] = POSIX_MAGIC - pax_headers = self.pax_headers.copy() - - # Test string fields for values that exceed the field length or cannot - # be represented in ASCII encoding. - for name, hname, length in ( - ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), - ("uname", "uname", 32), ("gname", "gname", 32)): - - if hname in pax_headers: - # The pax header has priority. - continue - - # Try to encode the string as ASCII. - try: - info[name].encode("ascii", "strict") - except UnicodeEncodeError: - pax_headers[hname] = info[name] - continue - - if len(info[name]) > length: - pax_headers[hname] = info[name] - - # Test number fields for values that exceed the field limit or values - # that like to be stored as float. - for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): - if name in pax_headers: - # The pax header has priority. Avoid overflow. - info[name] = 0 - continue - - val = info[name] - if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): - pax_headers[name] = str(val) - info[name] = 0 - - # Create a pax extended header if necessary. - if pax_headers: - buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) - else: - buf = b"" - - return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") - - @classmethod - def create_pax_global_header(cls, pax_headers): - """Return the object as a pax global header block sequence. - """ - return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") - - def _posix_split_name(self, name): - """Split a name longer than 100 chars into a prefix - and a name part. - """ - prefix = name[:LENGTH_PREFIX + 1] - while prefix and prefix[-1] != "/": - prefix = prefix[:-1] - - name = name[len(prefix):] - prefix = prefix[:-1] - - if not prefix or len(name) > LENGTH_NAME: - raise ValueError("name is too long") - return prefix, name - - @staticmethod - def _create_header(info, format, encoding, errors): - """Return a header block. info is a dictionary with file - information, format must be one of the *_FORMAT constants. - """ - parts = [ - stn(info.get("name", ""), 100, encoding, errors), - itn(info.get("mode", 0) & 0o7777, 8, format), - itn(info.get("uid", 0), 8, format), - itn(info.get("gid", 0), 8, format), - itn(info.get("size", 0), 12, format), - itn(info.get("mtime", 0), 12, format), - b" ", # checksum field - info.get("type", REGTYPE), - stn(info.get("linkname", ""), 100, encoding, errors), - info.get("magic", POSIX_MAGIC), - stn(info.get("uname", ""), 32, encoding, errors), - stn(info.get("gname", ""), 32, encoding, errors), - itn(info.get("devmajor", 0), 8, format), - itn(info.get("devminor", 0), 8, format), - stn(info.get("prefix", ""), 155, encoding, errors) - ] - - buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) - chksum = calc_chksums(buf[-BLOCKSIZE:])[0] - buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] - return buf - - @staticmethod - def _create_payload(payload): - """Return the string payload filled with zero bytes - up to the next 512 byte border. - """ - blocks, remainder = divmod(len(payload), BLOCKSIZE) - if remainder > 0: - payload += (BLOCKSIZE - remainder) * NUL - return payload - - @classmethod - def _create_gnu_long_header(cls, name, type, encoding, errors): - """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence - for name. - """ - name = name.encode(encoding, errors) + NUL - - info = {} - info["name"] = "././@LongLink" - info["type"] = type - info["size"] = len(name) - info["magic"] = GNU_MAGIC - - # create extended header + name blocks. - return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ - cls._create_payload(name) - - @classmethod - def _create_pax_generic_header(cls, pax_headers, type, encoding): - """Return a POSIX.1-2008 extended or global header sequence - that contains a list of keyword, value pairs. The values - must be strings. - """ - # Check if one of the fields contains surrogate characters and thereby - # forces hdrcharset=BINARY, see _proc_pax() for more information. - binary = False - for keyword, value in pax_headers.items(): - try: - value.encode("utf8", "strict") - except UnicodeEncodeError: - binary = True - break - - records = b"" - if binary: - # Put the hdrcharset field at the beginning of the header. - records += b"21 hdrcharset=BINARY\n" - - for keyword, value in pax_headers.items(): - keyword = keyword.encode("utf8") - if binary: - # Try to restore the original byte representation of `value'. - # Needless to say, that the encoding must match the string. - value = value.encode(encoding, "surrogateescape") - else: - value = value.encode("utf8") - - l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' - n = p = 0 - while True: - n = l + len(str(p)) - if n == p: - break - p = n - records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" - - # We use a hardcoded "././@PaxHeader" name like star does - # instead of the one that POSIX recommends. - info = {} - info["name"] = "././@PaxHeader" - info["type"] = type - info["size"] = len(records) - info["magic"] = POSIX_MAGIC - - # Create pax header + record blocks. - return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ - cls._create_payload(records) - - @classmethod - def frombuf(cls, buf, encoding, errors): - """Construct a TarInfo object from a 512 byte bytes object. - """ - if len(buf) == 0: - raise EmptyHeaderError("empty header") - if len(buf) != BLOCKSIZE: - raise TruncatedHeaderError("truncated header") - if buf.count(NUL) == BLOCKSIZE: - raise EOFHeaderError("end of file header") - - chksum = nti(buf[148:156]) - if chksum not in calc_chksums(buf): - raise InvalidHeaderError("bad checksum") - - obj = cls() - obj.name = nts(buf[0:100], encoding, errors) - obj.mode = nti(buf[100:108]) - obj.uid = nti(buf[108:116]) - obj.gid = nti(buf[116:124]) - obj.size = nti(buf[124:136]) - obj.mtime = nti(buf[136:148]) - obj.chksum = chksum - obj.type = buf[156:157] - obj.linkname = nts(buf[157:257], encoding, errors) - obj.uname = nts(buf[265:297], encoding, errors) - obj.gname = nts(buf[297:329], encoding, errors) - obj.devmajor = nti(buf[329:337]) - obj.devminor = nti(buf[337:345]) - prefix = nts(buf[345:500], encoding, errors) - - # Old V7 tar format represents a directory as a regular - # file with a trailing slash. - if obj.type == AREGTYPE and obj.name.endswith("/"): - obj.type = DIRTYPE - - # The old GNU sparse format occupies some of the unused - # space in the buffer for up to 4 sparse structures. - # Save the them for later processing in _proc_sparse(). - if obj.type == GNUTYPE_SPARSE: - pos = 386 - structs = [] - for i in range(4): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[482]) - origsize = nti(buf[483:495]) - obj._sparse_structs = (structs, isextended, origsize) - - # Remove redundant slashes from directories. - if obj.isdir(): - obj.name = obj.name.rstrip("/") - - # Reconstruct a ustar longname. - if prefix and obj.type not in GNU_TYPES: - obj.name = prefix + "/" + obj.name - return obj - - @classmethod - def fromtarfile(cls, tarfile): - """Return the next TarInfo object from TarFile object - tarfile. - """ - buf = tarfile.fileobj.read(BLOCKSIZE) - obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) - obj.offset = tarfile.fileobj.tell() - BLOCKSIZE - return obj._proc_member(tarfile) - - #-------------------------------------------------------------------------- - # The following are methods that are called depending on the type of a - # member. The entry point is _proc_member() which can be overridden in a - # subclass to add custom _proc_*() methods. A _proc_*() method MUST - # implement the following - # operations: - # 1. Set self.offset_data to the position where the data blocks begin, - # if there is data that follows. - # 2. Set tarfile.offset to the position where the next member's header will - # begin. - # 3. Return self or another valid TarInfo object. - def _proc_member(self, tarfile): - """Choose the right processing method depending on - the type and call it. - """ - if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): - return self._proc_gnulong(tarfile) - elif self.type == GNUTYPE_SPARSE: - return self._proc_sparse(tarfile) - elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): - return self._proc_pax(tarfile) - else: - return self._proc_builtin(tarfile) - - def _proc_builtin(self, tarfile): - """Process a builtin type or an unknown type which - will be treated as a regular file. - """ - self.offset_data = tarfile.fileobj.tell() - offset = self.offset_data - if self.isreg() or self.type not in SUPPORTED_TYPES: - # Skip the following data blocks. - offset += self._block(self.size) - tarfile.offset = offset - - # Patch the TarInfo object with saved global - # header information. - self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) - - return self - - def _proc_gnulong(self, tarfile): - """Process the blocks that hold a GNU longname - or longlink member. - """ - buf = tarfile.fileobj.read(self._block(self.size)) - - # Fetch the next header and process it. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Patch the TarInfo object from the next header with - # the longname information. - next.offset = self.offset - if self.type == GNUTYPE_LONGNAME: - next.name = nts(buf, tarfile.encoding, tarfile.errors) - elif self.type == GNUTYPE_LONGLINK: - next.linkname = nts(buf, tarfile.encoding, tarfile.errors) - - return next - - def _proc_sparse(self, tarfile): - """Process a GNU sparse header plus extra headers. - """ - # We already collected some sparse structures in frombuf(). - structs, isextended, origsize = self._sparse_structs - del self._sparse_structs - - # Collect sparse structures from extended header blocks. - while isextended: - buf = tarfile.fileobj.read(BLOCKSIZE) - pos = 0 - for i in range(21): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - if offset and numbytes: - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[504]) - self.sparse = structs - - self.offset_data = tarfile.fileobj.tell() - tarfile.offset = self.offset_data + self._block(self.size) - self.size = origsize - return self - - def _proc_pax(self, tarfile): - """Process an extended or global header as described in - POSIX.1-2008. - """ - # Read the header information. - buf = tarfile.fileobj.read(self._block(self.size)) - - # A pax header stores supplemental information for either - # the following file (extended) or all following files - # (global). - if self.type == XGLTYPE: - pax_headers = tarfile.pax_headers - else: - pax_headers = tarfile.pax_headers.copy() - - # Check if the pax header contains a hdrcharset field. This tells us - # the encoding of the path, linkpath, uname and gname fields. Normally, - # these fields are UTF-8 encoded but since POSIX.1-2008 tar - # implementations are allowed to store them as raw binary strings if - # the translation to UTF-8 fails. - match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) - if match is not None: - pax_headers["hdrcharset"] = match.group(1).decode("utf8") - - # For the time being, we don't care about anything other than "BINARY". - # The only other value that is currently allowed by the standard is - # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. - hdrcharset = pax_headers.get("hdrcharset") - if hdrcharset == "BINARY": - encoding = tarfile.encoding - else: - encoding = "utf8" - - # Parse pax header information. A record looks like that: - # "%d %s=%s\n" % (length, keyword, value). length is the size - # of the complete record including the length field itself and - # the newline. keyword and value are both UTF-8 encoded strings. - regex = re.compile(br"(\d+) ([^=]+)=") - pos = 0 - while True: - match = regex.match(buf, pos) - if not match: - break - - length, keyword = match.groups() - length = int(length) - value = buf[match.end(2) + 1:match.start(1) + length - 1] - - # Normally, we could just use "utf8" as the encoding and "strict" - # as the error handler, but we better not take the risk. For - # example, GNU tar <= 1.23 is known to store filenames it cannot - # translate to UTF-8 as raw strings (unfortunately without a - # hdrcharset=BINARY header). - # We first try the strict standard encoding, and if that fails we - # fall back on the user's encoding and error handler. - keyword = self._decode_pax_field(keyword, "utf8", "utf8", - tarfile.errors) - if keyword in PAX_NAME_FIELDS: - value = self._decode_pax_field(value, encoding, tarfile.encoding, - tarfile.errors) - else: - value = self._decode_pax_field(value, "utf8", "utf8", - tarfile.errors) - - pax_headers[keyword] = value - pos += length - - # Fetch the next header. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Process GNU sparse information. - if "GNU.sparse.map" in pax_headers: - # GNU extended sparse format version 0.1. - self._proc_gnusparse_01(next, pax_headers) - - elif "GNU.sparse.size" in pax_headers: - # GNU extended sparse format version 0.0. - self._proc_gnusparse_00(next, pax_headers, buf) - - elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": - # GNU extended sparse format version 1.0. - self._proc_gnusparse_10(next, pax_headers, tarfile) - - if self.type in (XHDTYPE, SOLARIS_XHDTYPE): - # Patch the TarInfo object with the extended header info. - next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) - next.offset = self.offset - - if "size" in pax_headers: - # If the extended header replaces the size field, - # we need to recalculate the offset where the next - # header starts. - offset = next.offset_data - if next.isreg() or next.type not in SUPPORTED_TYPES: - offset += next._block(next.size) - tarfile.offset = offset - - return next - - def _proc_gnusparse_00(self, next, pax_headers, buf): - """Process a GNU tar extended sparse header, version 0.0. - """ - offsets = [] - for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): - offsets.append(int(match.group(1))) - numbytes = [] - for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): - numbytes.append(int(match.group(1))) - next.sparse = list(zip(offsets, numbytes)) - - def _proc_gnusparse_01(self, next, pax_headers): - """Process a GNU tar extended sparse header, version 0.1. - """ - sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _proc_gnusparse_10(self, next, pax_headers, tarfile): - """Process a GNU tar extended sparse header, version 1.0. - """ - fields = None - sparse = [] - buf = tarfile.fileobj.read(BLOCKSIZE) - fields, buf = buf.split(b"\n", 1) - fields = int(fields) - while len(sparse) < fields * 2: - if b"\n" not in buf: - buf += tarfile.fileobj.read(BLOCKSIZE) - number, buf = buf.split(b"\n", 1) - sparse.append(int(number)) - next.offset_data = tarfile.fileobj.tell() - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _apply_pax_info(self, pax_headers, encoding, errors): - """Replace fields with supplemental information from a previous - pax extended or global header. - """ - for keyword, value in pax_headers.items(): - if keyword == "GNU.sparse.name": - setattr(self, "path", value) - elif keyword == "GNU.sparse.size": - setattr(self, "size", int(value)) - elif keyword == "GNU.sparse.realsize": - setattr(self, "size", int(value)) - elif keyword in PAX_FIELDS: - if keyword in PAX_NUMBER_FIELDS: - try: - value = PAX_NUMBER_FIELDS[keyword](value) - except ValueError: - value = 0 - if keyword == "path": - value = value.rstrip("/") - setattr(self, keyword, value) - - self.pax_headers = pax_headers.copy() - - def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): - """Decode a single field from a pax record. - """ - try: - return value.decode(encoding, "strict") - except UnicodeDecodeError: - return value.decode(fallback_encoding, fallback_errors) - - def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. - """ - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 - return blocks * BLOCKSIZE - - def isreg(self): - return self.type in REGULAR_TYPES - def isfile(self): - return self.isreg() - def isdir(self): - return self.type == DIRTYPE - def issym(self): - return self.type == SYMTYPE - def islnk(self): - return self.type == LNKTYPE - def ischr(self): - return self.type == CHRTYPE - def isblk(self): - return self.type == BLKTYPE - def isfifo(self): - return self.type == FIFOTYPE - def issparse(self): - return self.sparse is not None - def isdev(self): - return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) -# class TarInfo - -class TarFile(object): - """The TarFile Class provides an interface to tar archives. - """ - - debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) - - dereference = False # If true, add content of linked file to the - # tar file, else the link. - - ignore_zeros = False # If true, skips empty or invalid blocks and - # continues processing. - - errorlevel = 1 # If 0, fatal errors only appear in debug - # messages (if debug >= 0). If > 0, errors - # are passed to the caller as exceptions. - - format = DEFAULT_FORMAT # The format to use when creating an archive. - - encoding = ENCODING # Encoding for 8-bit character strings. - - errors = None # Error handler for unicode conversion. - - tarinfo = TarInfo # The default TarInfo class to use. - - fileobject = ExFileObject # The default ExFileObject class to use. - - def __init__(self, name=None, mode="r", fileobj=None, format=None, - tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, - errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): - """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when TarFile is closed. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - self.mode = mode - self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] - - if not fileobj: - if self.mode == "a" and not os.path.exists(name): - # Create nonexistent files in append mode. - self.mode = "w" - self._mode = "wb" - fileobj = bltn_open(name, self._mode) - self._extfileobj = False - else: - if name is None and hasattr(fileobj, "name"): - name = fileobj.name - if hasattr(fileobj, "mode"): - self._mode = fileobj.mode - self._extfileobj = True - self.name = os.path.abspath(name) if name else None - self.fileobj = fileobj - - # Init attributes. - if format is not None: - self.format = format - if tarinfo is not None: - self.tarinfo = tarinfo - if dereference is not None: - self.dereference = dereference - if ignore_zeros is not None: - self.ignore_zeros = ignore_zeros - if encoding is not None: - self.encoding = encoding - self.errors = errors - - if pax_headers is not None and self.format == PAX_FORMAT: - self.pax_headers = pax_headers - else: - self.pax_headers = {} - - if debug is not None: - self.debug = debug - if errorlevel is not None: - self.errorlevel = errorlevel - - # Init datastructures. - self.closed = False - self.members = [] # list of members as TarInfo objects - self._loaded = False # flag if all members have been read - self.offset = self.fileobj.tell() - # current position in the archive file - self.inodes = {} # dictionary caching the inodes of - # archive members already added - - try: - if self.mode == "r": - self.firstmember = None - self.firstmember = self.next() - - if self.mode == "a": - # Move to the end of the archive, - # before the first empty block. - while True: - self.fileobj.seek(self.offset) - try: - tarinfo = self.tarinfo.fromtarfile(self) - self.members.append(tarinfo) - except EOFHeaderError: - self.fileobj.seek(self.offset) - break - except HeaderError as e: - raise ReadError(str(e)) - - if self.mode in "aw": - self._loaded = True - - if self.pax_headers: - buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) - self.fileobj.write(buf) - self.offset += len(buf) - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - #-------------------------------------------------------------------------- - # Below are the classmethods which act as alternate constructors to the - # TarFile class. The open() method is the only one that is needed for - # public use; it is the "super"-constructor and is able to select an - # adequate "sub"-constructor for a particular compression using the mapping - # from OPEN_METH. - # - # This concept allows one to subclass TarFile without losing the comfort of - # the super-constructor. A sub-constructor is registered and made available - # by adding it to the mapping in OPEN_METH. - - @classmethod - def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): - """Open a tar archive for reading, writing or appending. Return - an appropriate TarFile class. - - mode: - 'r' or 'r:*' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'a' or 'a:' open for appending, creating the file if necessary - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression - - 'r|*' open a stream of tar blocks with transparent compression - 'r|' open an uncompressed stream of tar blocks for reading - 'r|gz' open a gzip compressed stream of tar blocks - 'r|bz2' open a bzip2 compressed stream of tar blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing - """ - - if not name and not fileobj: - raise ValueError("nothing to open") - - if mode in ("r", "r:*"): - # Find out which *open() is appropriate for opening the file. - for comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - if fileobj is not None: - saved_pos = fileobj.tell() - try: - return func(name, "r", fileobj, **kwargs) - except (ReadError, CompressionError) as e: - if fileobj is not None: - fileobj.seek(saved_pos) - continue - raise ReadError("file could not be opened successfully") - - elif ":" in mode: - filemode, comptype = mode.split(":", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - # Select the *open() function according to - # given compression. - if comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - else: - raise CompressionError("unknown compression type %r" % comptype) - return func(name, filemode, fileobj, **kwargs) - - elif "|" in mode: - filemode, comptype = mode.split("|", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - if filemode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - stream = _Stream(name, filemode, comptype, fileobj, bufsize) - try: - t = cls(name, filemode, stream, **kwargs) - except: - stream.close() - raise - t._extfileobj = False - return t - - elif mode in "aw": - return cls.taropen(name, mode, fileobj, **kwargs) - - raise ValueError("undiscernible mode") - - @classmethod - def taropen(cls, name, mode="r", fileobj=None, **kwargs): - """Open uncompressed tar archive name for reading or writing. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - return cls(name, mode, fileobj, **kwargs) - - @classmethod - def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open gzip compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - try: - import gzip - gzip.GzipFile - except (ImportError, AttributeError): - raise CompressionError("gzip module is not available") - - extfileobj = fileobj is not None - try: - fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) - t = cls.taropen(name, mode, fileobj, **kwargs) - except IOError: - if not extfileobj and fileobj is not None: - fileobj.close() - if fileobj is None: - raise - raise ReadError("not a gzip file") - except: - if not extfileobj and fileobj is not None: - fileobj.close() - raise - t._extfileobj = extfileobj - return t - - @classmethod - def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open bzip2 compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'.") - - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - - if fileobj is not None: - fileobj = _BZ2Proxy(fileobj, mode) - else: - fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (IOError, EOFError): - fileobj.close() - raise ReadError("not a bzip2 file") - t._extfileobj = False - return t - - # All *open() methods are registered here. - OPEN_METH = { - "tar": "taropen", # uncompressed tar - "gz": "gzopen", # gzip compressed tar - "bz2": "bz2open" # bzip2 compressed tar - } - - #-------------------------------------------------------------------------- - # The public methods which TarFile provides: - - def close(self): - """Close the TarFile. In write-mode, two finishing zero blocks are - appended to the archive. - """ - if self.closed: - return - - if self.mode in "aw": - self.fileobj.write(NUL * (BLOCKSIZE * 2)) - self.offset += (BLOCKSIZE * 2) - # fill up the end with zero-blocks - # (like option -b20 for tar does) - blocks, remainder = divmod(self.offset, RECORDSIZE) - if remainder > 0: - self.fileobj.write(NUL * (RECORDSIZE - remainder)) - - if not self._extfileobj: - self.fileobj.close() - self.closed = True - - def getmember(self, name): - """Return a TarInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurrence is assumed to be the - most up-to-date version. - """ - tarinfo = self._getmember(name) - if tarinfo is None: - raise KeyError("filename %r not found" % name) - return tarinfo - - def getmembers(self): - """Return the members of the archive as a list of TarInfo objects. The - list has the same order as the members in the archive. - """ - self._check() - if not self._loaded: # if we want to obtain a list of - self._load() # all members, we first have to - # scan the whole archive. - return self.members - - def getnames(self): - """Return the members of the archive as a list of their names. It has - the same order as the list returned by getmembers(). - """ - return [tarinfo.name for tarinfo in self.getmembers()] - - def gettarinfo(self, name=None, arcname=None, fileobj=None): - """Create a TarInfo object for either the file `name' or the file - object `fileobj' (using os.fstat on its file descriptor). You can - modify some of the TarInfo's attributes before you add it using - addfile(). If given, `arcname' specifies an alternative name for the - file in the archive. - """ - self._check("aw") - - # When fileobj is given, replace name by - # fileobj's real name. - if fileobj is not None: - name = fileobj.name - - # Building the name of the member in the archive. - # Backward slashes are converted to forward slashes, - # Absolute paths are turned to relative paths. - if arcname is None: - arcname = name - drv, arcname = os.path.splitdrive(arcname) - arcname = arcname.replace(os.sep, "/") - arcname = arcname.lstrip("/") - - # Now, fill the TarInfo object with - # information specific for the file. - tarinfo = self.tarinfo() - tarinfo.tarfile = self - - # Use os.stat or os.lstat, depending on platform - # and if symlinks shall be resolved. - if fileobj is None: - if hasattr(os, "lstat") and not self.dereference: - statres = os.lstat(name) - else: - statres = os.stat(name) - else: - statres = os.fstat(fileobj.fileno()) - linkname = "" - - stmd = statres.st_mode - if stat.S_ISREG(stmd): - inode = (statres.st_ino, statres.st_dev) - if not self.dereference and statres.st_nlink > 1 and \ - inode in self.inodes and arcname != self.inodes[inode]: - # Is it a hardlink to an already - # archived file? - type = LNKTYPE - linkname = self.inodes[inode] - else: - # The inode is added only if its valid. - # For win32 it is always 0. - type = REGTYPE - if inode[0]: - self.inodes[inode] = arcname - elif stat.S_ISDIR(stmd): - type = DIRTYPE - elif stat.S_ISFIFO(stmd): - type = FIFOTYPE - elif stat.S_ISLNK(stmd): - type = SYMTYPE - linkname = os.readlink(name) - elif stat.S_ISCHR(stmd): - type = CHRTYPE - elif stat.S_ISBLK(stmd): - type = BLKTYPE - else: - return None - - # Fill the TarInfo object with all - # information we can get. - tarinfo.name = arcname - tarinfo.mode = stmd - tarinfo.uid = statres.st_uid - tarinfo.gid = statres.st_gid - if type == REGTYPE: - tarinfo.size = statres.st_size - else: - tarinfo.size = 0 - tarinfo.mtime = statres.st_mtime - tarinfo.type = type - tarinfo.linkname = linkname - if pwd: - try: - tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] - except KeyError: - pass - if grp: - try: - tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] - except KeyError: - pass - - if type in (CHRTYPE, BLKTYPE): - if hasattr(os, "major") and hasattr(os, "minor"): - tarinfo.devmajor = os.major(statres.st_rdev) - tarinfo.devminor = os.minor(statres.st_rdev) - return tarinfo - - def list(self, verbose=True): - """Print a table of contents to sys.stdout. If `verbose' is False, only - the names of the members are printed. If it is True, an `ls -l'-like - output is produced. - """ - self._check() - - for tarinfo in self: - if verbose: - print(filemode(tarinfo.mode), end=' ') - print("%s/%s" % (tarinfo.uname or tarinfo.uid, - tarinfo.gname or tarinfo.gid), end=' ') - if tarinfo.ischr() or tarinfo.isblk(): - print("%10s" % ("%d,%d" \ - % (tarinfo.devmajor, tarinfo.devminor)), end=' ') - else: - print("%10d" % tarinfo.size, end=' ') - print("%d-%02d-%02d %02d:%02d:%02d" \ - % time.localtime(tarinfo.mtime)[:6], end=' ') - - print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') - - if verbose: - if tarinfo.issym(): - print("->", tarinfo.linkname, end=' ') - if tarinfo.islnk(): - print("link to", tarinfo.linkname, end=' ') - print() - - def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): - """Add the file `name' to the archive. `name' may be any type of file - (directory, fifo, symbolic link, etc.). If given, `arcname' - specifies an alternative name for the file in the archive. - Directories are added recursively by default. This can be avoided by - setting `recursive' to False. `exclude' is a function that should - return True for each filename to be excluded. `filter' is a function - that expects a TarInfo object argument and returns the changed - TarInfo object, if it returns None the TarInfo object will be - excluded from the archive. - """ - self._check("aw") - - if arcname is None: - arcname = name - - # Exclude pathnames. - if exclude is not None: - import warnings - warnings.warn("use the filter argument instead", - DeprecationWarning, 2) - if exclude(name): - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Skip if somebody tries to archive the archive... - if self.name is not None and os.path.abspath(name) == self.name: - self._dbg(2, "tarfile: Skipped %r" % name) - return - - self._dbg(1, name) - - # Create a TarInfo object from the file. - tarinfo = self.gettarinfo(name, arcname) - - if tarinfo is None: - self._dbg(1, "tarfile: Unsupported type %r" % name) - return - - # Change or exclude the TarInfo object. - if filter is not None: - tarinfo = filter(tarinfo) - if tarinfo is None: - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Append the tar header and data to the archive. - if tarinfo.isreg(): - f = bltn_open(name, "rb") - self.addfile(tarinfo, f) - f.close() - - elif tarinfo.isdir(): - self.addfile(tarinfo) - if recursive: - for f in os.listdir(name): - self.add(os.path.join(name, f), os.path.join(arcname, f), - recursive, exclude, filter=filter) - - else: - self.addfile(tarinfo) - - def addfile(self, tarinfo, fileobj=None): - """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is - given, tarinfo.size bytes are read from it and added to the archive. - You can create TarInfo objects using gettarinfo(). - On Windows platforms, `fileobj' should always be opened with mode - 'rb' to avoid irritation about the file size. - """ - self._check("aw") - - tarinfo = copy.copy(tarinfo) - - buf = tarinfo.tobuf(self.format, self.encoding, self.errors) - self.fileobj.write(buf) - self.offset += len(buf) - - # If there's data to follow, append it. - if fileobj is not None: - copyfileobj(fileobj, self.fileobj, tarinfo.size) - blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) - if remainder > 0: - self.fileobj.write(NUL * (BLOCKSIZE - remainder)) - blocks += 1 - self.offset += blocks * BLOCKSIZE - - self.members.append(tarinfo) - - def extractall(self, path=".", members=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - directories = [] - - if members is None: - members = self - - for tarinfo in members: - if tarinfo.isdir(): - # Extract directories with a safe mode. - directories.append(tarinfo) - tarinfo = copy.copy(tarinfo) - tarinfo.mode = 0o700 - # Do not set_attrs directories, as we will do that further down - self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) - - # Reverse sort directories. - directories.sort(key=lambda a: a.name) - directories.reverse() - - # Set correct owner, mtime and filemode on directories. - for tarinfo in directories: - dirpath = os.path.join(path, tarinfo.name) - try: - self.chown(tarinfo, dirpath) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extract(self, member, path="", set_attrs=True): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a TarInfo object. You can - specify a different directory using `path'. File attributes (owner, - mtime, mode) are set unless `set_attrs' is False. - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - # Prepare the link target for makelink(). - if tarinfo.islnk(): - tarinfo._link_target = os.path.join(path, tarinfo.linkname) - - try: - self._extract_member(tarinfo, os.path.join(path, tarinfo.name), - set_attrs=set_attrs) - except EnvironmentError as e: - if self.errorlevel > 0: - raise - else: - if e.filename is None: - self._dbg(1, "tarfile: %s" % e.strerror) - else: - self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extractfile(self, member): - """Extract a member from the archive as a file object. `member' may be - a filename or a TarInfo object. If `member' is a regular file, a - file-like object is returned. If `member' is a link, a file-like - object is constructed from the link's target. If `member' is none of - the above, None is returned. - The file-like object is read-only and provides the following - methods: read(), readline(), readlines(), seek() and tell() - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - if tarinfo.isreg(): - return self.fileobject(self, tarinfo) - - elif tarinfo.type not in SUPPORTED_TYPES: - # If a member's type is unknown, it is treated as a - # regular file. - return self.fileobject(self, tarinfo) - - elif tarinfo.islnk() or tarinfo.issym(): - if isinstance(self.fileobj, _Stream): - # A small but ugly workaround for the case that someone tries - # to extract a (sym)link as a file-object from a non-seekable - # stream of tar blocks. - raise StreamError("cannot extract (sym)link as file object") - else: - # A (sym)link's file object is its target's file object. - return self.extractfile(self._find_link_target(tarinfo)) - else: - # If there's no data associated with the member (directory, chrdev, - # blkdev, etc.), return None instead of a file object. - return None - - def _extract_member(self, tarinfo, targetpath, set_attrs=True): - """Extract the TarInfo object tarinfo to a physical - file called targetpath. - """ - # Fetch the TarInfo object for the given name - # and build the destination pathname, replacing - # forward slashes to platform specific separators. - targetpath = targetpath.rstrip("/") - targetpath = targetpath.replace("/", os.sep) - - # Create all upper directories. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - # Create directories that are not part of the archive with - # default permissions. - os.makedirs(upperdirs) - - if tarinfo.islnk() or tarinfo.issym(): - self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) - else: - self._dbg(1, tarinfo.name) - - if tarinfo.isreg(): - self.makefile(tarinfo, targetpath) - elif tarinfo.isdir(): - self.makedir(tarinfo, targetpath) - elif tarinfo.isfifo(): - self.makefifo(tarinfo, targetpath) - elif tarinfo.ischr() or tarinfo.isblk(): - self.makedev(tarinfo, targetpath) - elif tarinfo.islnk() or tarinfo.issym(): - self.makelink(tarinfo, targetpath) - elif tarinfo.type not in SUPPORTED_TYPES: - self.makeunknown(tarinfo, targetpath) - else: - self.makefile(tarinfo, targetpath) - - if set_attrs: - self.chown(tarinfo, targetpath) - if not tarinfo.issym(): - self.chmod(tarinfo, targetpath) - self.utime(tarinfo, targetpath) - - #-------------------------------------------------------------------------- - # Below are the different file methods. They are called via - # _extract_member() when extract() is called. They can be replaced in a - # subclass to implement other functionality. - - def makedir(self, tarinfo, targetpath): - """Make a directory called targetpath. - """ - try: - # Use a safe mode for the directory, the real mode is set - # later in _extract_member(). - os.mkdir(targetpath, 0o700) - except EnvironmentError as e: - if e.errno != errno.EEXIST: - raise - - def makefile(self, tarinfo, targetpath): - """Make a file called targetpath. - """ - source = self.fileobj - source.seek(tarinfo.offset_data) - target = bltn_open(targetpath, "wb") - if tarinfo.sparse is not None: - for offset, size in tarinfo.sparse: - target.seek(offset) - copyfileobj(source, target, size) - else: - copyfileobj(source, target, tarinfo.size) - target.seek(tarinfo.size) - target.truncate() - target.close() - - def makeunknown(self, tarinfo, targetpath): - """Make a file from a TarInfo object with an unknown type - at targetpath. - """ - self.makefile(tarinfo, targetpath) - self._dbg(1, "tarfile: Unknown file type %r, " \ - "extracted as regular file." % tarinfo.type) - - def makefifo(self, tarinfo, targetpath): - """Make a fifo called targetpath. - """ - if hasattr(os, "mkfifo"): - os.mkfifo(targetpath) - else: - raise ExtractError("fifo not supported by system") - - def makedev(self, tarinfo, targetpath): - """Make a character or block device called targetpath. - """ - if not hasattr(os, "mknod") or not hasattr(os, "makedev"): - raise ExtractError("special devices not supported by system") - - mode = tarinfo.mode - if tarinfo.isblk(): - mode |= stat.S_IFBLK - else: - mode |= stat.S_IFCHR - - os.mknod(targetpath, mode, - os.makedev(tarinfo.devmajor, tarinfo.devminor)) - - def makelink(self, tarinfo, targetpath): - """Make a (symbolic) link called targetpath. If it cannot be created - (platform limitation), we try to make a copy of the referenced file - instead of a link. - """ - try: - # For systems that support symbolic and hard links. - if tarinfo.issym(): - os.symlink(tarinfo.linkname, targetpath) - else: - # See extract(). - if os.path.exists(tarinfo._link_target): - os.link(tarinfo._link_target, targetpath) - else: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except symlink_exception: - if tarinfo.issym(): - linkpath = os.path.join(os.path.dirname(tarinfo.name), - tarinfo.linkname) - else: - linkpath = tarinfo.linkname - else: - try: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except KeyError: - raise ExtractError("unable to resolve link inside archive") - - def chown(self, tarinfo, targetpath): - """Set owner of targetpath according to tarinfo. - """ - if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: - # We have to be root to do so. - try: - g = grp.getgrnam(tarinfo.gname)[2] - except KeyError: - g = tarinfo.gid - try: - u = pwd.getpwnam(tarinfo.uname)[2] - except KeyError: - u = tarinfo.uid - try: - if tarinfo.issym() and hasattr(os, "lchown"): - os.lchown(targetpath, u, g) - else: - if sys.platform != "os2emx": - os.chown(targetpath, u, g) - except EnvironmentError as e: - raise ExtractError("could not change owner") - - def chmod(self, tarinfo, targetpath): - """Set file permissions of targetpath according to tarinfo. - """ - if hasattr(os, 'chmod'): - try: - os.chmod(targetpath, tarinfo.mode) - except EnvironmentError as e: - raise ExtractError("could not change mode") - - def utime(self, tarinfo, targetpath): - """Set modification time of targetpath according to tarinfo. - """ - if not hasattr(os, 'utime'): - return - try: - os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) - except EnvironmentError as e: - raise ExtractError("could not change modification time") - - #-------------------------------------------------------------------------- - def next(self): - """Return the next member of the archive as a TarInfo object, when - TarFile is opened for reading. Return None if there is no more - available. - """ - self._check("ra") - if self.firstmember is not None: - m = self.firstmember - self.firstmember = None - return m - - # Read the next block. - self.fileobj.seek(self.offset) - tarinfo = None - while True: - try: - tarinfo = self.tarinfo.fromtarfile(self) - except EOFHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - except InvalidHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - elif self.offset == 0: - raise ReadError(str(e)) - except EmptyHeaderError: - if self.offset == 0: - raise ReadError("empty file") - except TruncatedHeaderError as e: - if self.offset == 0: - raise ReadError(str(e)) - except SubsequentHeaderError as e: - raise ReadError(str(e)) - break - - if tarinfo is not None: - self.members.append(tarinfo) - else: - self._loaded = True - - return tarinfo - - #-------------------------------------------------------------------------- - # Little helper methods: - - def _getmember(self, name, tarinfo=None, normalize=False): - """Find an archive member by name from bottom to top. - If tarinfo is given, it is used as the starting point. - """ - # Ensure that all members have been loaded. - members = self.getmembers() - - # Limit the member search list up to tarinfo. - if tarinfo is not None: - members = members[:members.index(tarinfo)] - - if normalize: - name = os.path.normpath(name) - - for member in reversed(members): - if normalize: - member_name = os.path.normpath(member.name) - else: - member_name = member.name - - if name == member_name: - return member - - def _load(self): - """Read through the entire archive file and look for readable - members. - """ - while True: - tarinfo = self.next() - if tarinfo is None: - break - self._loaded = True - - def _check(self, mode=None): - """Check if TarFile is still open, and if the operation's mode - corresponds to TarFile's mode. - """ - if self.closed: - raise IOError("%s is closed" % self.__class__.__name__) - if mode is not None and self.mode not in mode: - raise IOError("bad operation for mode %r" % self.mode) - - def _find_link_target(self, tarinfo): - """Find the target member of a symlink or hardlink member in the - archive. - """ - if tarinfo.issym(): - # Always search the entire archive. - linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname - limit = None - else: - # Search the archive before the link, because a hard link is - # just a reference to an already archived file. - linkname = tarinfo.linkname - limit = tarinfo - - member = self._getmember(linkname, tarinfo=limit, normalize=True) - if member is None: - raise KeyError("linkname %r not found" % linkname) - return member - - def __iter__(self): - """Provide an iterator object. - """ - if self._loaded: - return iter(self.members) - else: - return TarIter(self) - - def _dbg(self, level, msg): - """Write debugging output to sys.stderr. - """ - if level <= self.debug: - print(msg, file=sys.stderr) - - def __enter__(self): - self._check() - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.close() - else: - # An exception occurred. We must not call close() because - # it would try to write end-of-archive blocks and padding. - if not self._extfileobj: - self.fileobj.close() - self.closed = True -# class TarFile - -class TarIter(object): - """Iterator Class. - - for tarinfo in TarFile(...): - suite... - """ - - def __init__(self, tarfile): - """Construct a TarIter object. - """ - self.tarfile = tarfile - self.index = 0 - def __iter__(self): - """Return iterator object. - """ - return self - - def __next__(self): - """Return the next item using TarFile's next() method. - When all members have been read, set TarFile as _loaded. - """ - # Fix for SF #1100429: Under rare circumstances it can - # happen that getmembers() is called during iteration, - # which will cause TarIter to stop prematurely. - if not self.tarfile._loaded: - tarinfo = self.tarfile.next() - if not tarinfo: - self.tarfile._loaded = True - raise StopIteration - else: - try: - tarinfo = self.tarfile.members[self.index] - except IndexError: - raise StopIteration - self.index += 1 - return tarinfo - - next = __next__ # for Python 2.x - -#-------------------- -# exported functions -#-------------------- -def is_tarfile(name): - """Return True if name points to a tar archive that we - are able to handle, else return False. - """ - try: - t = open(name) - t.close() - return True - except TarError: - return False - -bltn_open = open -open = TarFile.open diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/compat.py deleted file mode 100644 index ff328c8..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/compat.py +++ /dev/null @@ -1,1120 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013-2017 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -from __future__ import absolute_import - -import os -import re -import sys - -try: - import ssl -except ImportError: # pragma: no cover - ssl = None - -if sys.version_info[0] < 3: # pragma: no cover - from StringIO import StringIO - string_types = basestring, - text_type = unicode - from types import FileType as file_type - import __builtin__ as builtins - import ConfigParser as configparser - from ._backport import shutil - from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit - from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, - pathname2url, ContentTooShortError, splittype) - - def quote(s): - if isinstance(s, unicode): - s = s.encode('utf-8') - return _quote(s) - - import urllib2 - from urllib2 import (Request, urlopen, URLError, HTTPError, - HTTPBasicAuthHandler, HTTPPasswordMgr, - HTTPHandler, HTTPRedirectHandler, - build_opener) - if ssl: - from urllib2 import HTTPSHandler - import httplib - import xmlrpclib - import Queue as queue - from HTMLParser import HTMLParser - import htmlentitydefs - raw_input = raw_input - from itertools import ifilter as filter - from itertools import ifilterfalse as filterfalse - - _userprog = None - def splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - global _userprog - if _userprog is None: - import re - _userprog = re.compile('^(.*)@(.*)$') - - match = _userprog.match(host) - if match: return match.group(1, 2) - return None, host - -else: # pragma: no cover - from io import StringIO - string_types = str, - text_type = str - from io import TextIOWrapper as file_type - import builtins - import configparser - import shutil - from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, - unquote, urlsplit, urlunsplit, splittype) - from urllib.request import (urlopen, urlretrieve, Request, url2pathname, - pathname2url, - HTTPBasicAuthHandler, HTTPPasswordMgr, - HTTPHandler, HTTPRedirectHandler, - build_opener) - if ssl: - from urllib.request import HTTPSHandler - from urllib.error import HTTPError, URLError, ContentTooShortError - import http.client as httplib - import urllib.request as urllib2 - import xmlrpc.client as xmlrpclib - import queue - from html.parser import HTMLParser - import html.entities as htmlentitydefs - raw_input = input - from itertools import filterfalse - filter = filter - -try: - from ssl import match_hostname, CertificateError -except ImportError: # pragma: no cover - class CertificateError(ValueError): - pass - - - def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - parts = dn.split('.') - leftmost, remainder = parts[0], parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - - def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") - - -try: - from types import SimpleNamespace as Container -except ImportError: # pragma: no cover - class Container(object): - """ - A generic container for when multiple values need to be returned - """ - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - -try: - from shutil import which -except ImportError: # pragma: no cover - # Implementation from Python 3.3 - def which(cmd, mode=os.F_OK | os.X_OK, path=None): - """Given a command, mode, and a PATH string, return the path which - conforms to the given mode on the PATH, or None if there is no such - file. - - `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result - of os.environ.get("PATH"), or can be overridden with a custom search - path. - - """ - # Check that a given file can be accessed with the correct mode. - # Additionally check that `file` is not a directory, as on Windows - # directories pass the os.access check. - def _access_check(fn, mode): - return (os.path.exists(fn) and os.access(fn, mode) - and not os.path.isdir(fn)) - - # If we're given a path with a directory part, look it up directly rather - # than referring to PATH directories. This includes checking relative to the - # current directory, e.g. ./script - if os.path.dirname(cmd): - if _access_check(cmd, mode): - return cmd - return None - - if path is None: - path = os.environ.get("PATH", os.defpath) - if not path: - return None - path = path.split(os.pathsep) - - if sys.platform == "win32": - # The current directory takes precedence on Windows. - if not os.curdir in path: - path.insert(0, os.curdir) - - # PATHEXT is necessary to check on Windows. - pathext = os.environ.get("PATHEXT", "").split(os.pathsep) - # See if the given file matches any of the expected path extensions. - # This will allow us to short circuit when given "python.exe". - # If it does match, only test that one, otherwise we have to try - # others. - if any(cmd.lower().endswith(ext.lower()) for ext in pathext): - files = [cmd] - else: - files = [cmd + ext for ext in pathext] - else: - # On other platforms you don't have things like PATHEXT to tell you - # what file suffixes are executable, so just pass on cmd as-is. - files = [cmd] - - seen = set() - for dir in path: - normdir = os.path.normcase(dir) - if not normdir in seen: - seen.add(normdir) - for thefile in files: - name = os.path.join(dir, thefile) - if _access_check(name, mode): - return name - return None - - -# ZipFile is a context manager in 2.7, but not in 2.6 - -from zipfile import ZipFile as BaseZipFile - -if hasattr(BaseZipFile, '__enter__'): # pragma: no cover - ZipFile = BaseZipFile -else: # pragma: no cover - from zipfile import ZipExtFile as BaseZipExtFile - - class ZipExtFile(BaseZipExtFile): - def __init__(self, base): - self.__dict__.update(base.__dict__) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - # return None, so if an exception occurred, it will propagate - - class ZipFile(BaseZipFile): - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - # return None, so if an exception occurred, it will propagate - - def open(self, *args, **kwargs): - base = BaseZipFile.open(self, *args, **kwargs) - return ZipExtFile(base) - -try: - from platform import python_implementation -except ImportError: # pragma: no cover - def python_implementation(): - """Return a string identifying the Python implementation.""" - if 'PyPy' in sys.version: - return 'PyPy' - if os.name == 'java': - return 'Jython' - if sys.version.startswith('IronPython'): - return 'IronPython' - return 'CPython' - -try: - import sysconfig -except ImportError: # pragma: no cover - from ._backport import sysconfig - -try: - callable = callable -except NameError: # pragma: no cover - from collections import Callable - - def callable(obj): - return isinstance(obj, Callable) - - -try: - fsencode = os.fsencode - fsdecode = os.fsdecode -except AttributeError: # pragma: no cover - # Issue #99: on some systems (e.g. containerised), - # sys.getfilesystemencoding() returns None, and we need a real value, - # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and - # sys.getfilesystemencoding(): the return value is "the user’s preference - # according to the result of nl_langinfo(CODESET), or None if the - # nl_langinfo(CODESET) failed." - _fsencoding = sys.getfilesystemencoding() or 'utf-8' - if _fsencoding == 'mbcs': - _fserrors = 'strict' - else: - _fserrors = 'surrogateescape' - - def fsencode(filename): - if isinstance(filename, bytes): - return filename - elif isinstance(filename, text_type): - return filename.encode(_fsencoding, _fserrors) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) - - def fsdecode(filename): - if isinstance(filename, text_type): - return filename - elif isinstance(filename, bytes): - return filename.decode(_fsencoding, _fserrors) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) - -try: - from tokenize import detect_encoding -except ImportError: # pragma: no cover - from codecs import BOM_UTF8, lookup - import re - - cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") - - def _get_normal_name(orig_enc): - """Imitates get_normal_name in tokenizer.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ - enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): - return "iso-8859-1" - return orig_enc - - def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argument, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - try: - filename = readline.__self__.name - except AttributeError: - filename = None - bom_found = False - encoding = None - default = 'utf-8' - def read_or_stop(): - try: - return readline() - except StopIteration: - return b'' - - def find_cookie(line): - try: - # Decode as UTF-8. Either the line is an encoding declaration, - # in which case it should be pure ASCII, or it must be UTF-8 - # per default encoding. - line_string = line.decode('utf-8') - except UnicodeDecodeError: - msg = "invalid or missing encoding declaration" - if filename is not None: - msg = '{} for {!r}'.format(msg, filename) - raise SyntaxError(msg) - - matches = cookie_re.findall(line_string) - if not matches: - return None - encoding = _get_normal_name(matches[0]) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - if filename is None: - msg = "unknown encoding: " + encoding - else: - msg = "unknown encoding for {!r}: {}".format(filename, - encoding) - raise SyntaxError(msg) - - if bom_found: - if codec.name != 'utf-8': - # This behaviour mimics the Python interpreter - if filename is None: - msg = 'encoding problem: utf-8' - else: - msg = 'encoding problem for {!r}: utf-8'.format(filename) - raise SyntaxError(msg) - encoding += '-sig' - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = 'utf-8-sig' - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - -# For converting & <-> & etc. -try: - from html import escape -except ImportError: - from cgi import escape -if sys.version_info[:2] < (3, 4): - unescape = HTMLParser().unescape -else: - from html import unescape - -try: - from collections import ChainMap -except ImportError: # pragma: no cover - from collections import MutableMapping - - try: - from reprlib import recursive_repr as _recursive_repr - except ImportError: - def _recursive_repr(fillvalue='...'): - ''' - Decorator to make a repr function return fillvalue for a recursive - call - ''' - - def decorating_function(user_function): - repr_running = set() - - def wrapper(self): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - - # Can't use functools.wraps() here because of bootstrap issues - wrapper.__module__ = getattr(user_function, '__module__') - wrapper.__doc__ = getattr(user_function, '__doc__') - wrapper.__name__ = getattr(user_function, '__name__') - wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) - return wrapper - - return decorating_function - - class ChainMap(MutableMapping): - ''' A ChainMap groups multiple dicts (or other mappings) together - to create a single, updateable view. - - The underlying mappings are stored in a list. That list is public and can - accessed or updated using the *maps* attribute. There is no other state. - - Lookups search the underlying mappings successively until a key is found. - In contrast, writes, updates, and deletions only operate on the first - mapping. - - ''' - - def __init__(self, *maps): - '''Initialize a ChainMap by setting *maps* to the given mappings. - If no mappings are provided, a single empty dictionary is used. - - ''' - self.maps = list(maps) or [{}] # always at least one map - - def __missing__(self, key): - raise KeyError(key) - - def __getitem__(self, key): - for mapping in self.maps: - try: - return mapping[key] # can't use 'key in mapping' with defaultdict - except KeyError: - pass - return self.__missing__(key) # support subclasses that define __missing__ - - def get(self, key, default=None): - return self[key] if key in self else default - - def __len__(self): - return len(set().union(*self.maps)) # reuses stored hash values if possible - - def __iter__(self): - return iter(set().union(*self.maps)) - - def __contains__(self, key): - return any(key in m for m in self.maps) - - def __bool__(self): - return any(self.maps) - - @_recursive_repr() - def __repr__(self): - return '{0.__class__.__name__}({1})'.format( - self, ', '.join(map(repr, self.maps))) - - @classmethod - def fromkeys(cls, iterable, *args): - 'Create a ChainMap with a single dict created from the iterable.' - return cls(dict.fromkeys(iterable, *args)) - - def copy(self): - 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' - return self.__class__(self.maps[0].copy(), *self.maps[1:]) - - __copy__ = copy - - def new_child(self): # like Django's Context.push() - 'New ChainMap with a new dict followed by all previous maps.' - return self.__class__({}, *self.maps) - - @property - def parents(self): # like Django's Context.pop() - 'New ChainMap from maps[1:].' - return self.__class__(*self.maps[1:]) - - def __setitem__(self, key, value): - self.maps[0][key] = value - - def __delitem__(self, key): - try: - del self.maps[0][key] - except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) - - def popitem(self): - 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' - try: - return self.maps[0].popitem() - except KeyError: - raise KeyError('No keys found in the first mapping.') - - def pop(self, key, *args): - 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' - try: - return self.maps[0].pop(key, *args) - except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) - - def clear(self): - 'Clear maps[0], leaving maps[1:] intact.' - self.maps[0].clear() - -try: - from importlib.util import cache_from_source # Python >= 3.4 -except ImportError: # pragma: no cover - try: - from imp import cache_from_source - except ImportError: # pragma: no cover - def cache_from_source(path, debug_override=None): - assert path.endswith('.py') - if debug_override is None: - debug_override = __debug__ - if debug_override: - suffix = 'c' - else: - suffix = 'o' - return path + suffix - -try: - from collections import OrderedDict -except ImportError: # pragma: no cover -## {{{ http://code.activestate.com/recipes/576693/ (r9) -# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. -# Passes Python2.7's test suite and incorporates all the latest updates. - try: - from thread import get_ident as _get_ident - except ImportError: - from dummy_thread import get_ident as _get_ident - - try: - from _abcoll import KeysView, ValuesView, ItemsView - except ImportError: - pass - - - class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as for regular dictionaries. - - # The internal self.__map dictionary maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. - - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. Signature is the same as for - regular dictionaries, but keyword arguments are not recommended - because their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link which goes at the end of the linked - # list, and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which is - # then removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, key = self.__map.pop(key) - link_prev[1] = link_next - link_next[0] = link_prev - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - root = self.__root - curr = root[1] - while curr is not root: - yield curr[2] - curr = curr[1] - - def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - root = self.__root - curr = root[0] - while curr is not root: - yield curr[2] - curr = curr[0] - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - try: - for node in self.__map.itervalues(): - del node[:] - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - except AttributeError: - pass - dict.clear(self) - - def popitem(self, last=True): - '''od.popitem() -> (k, v), return and remove a (key, value) pair. - Pairs are returned in LIFO order if last is true or FIFO order if false. - - ''' - if not self: - raise KeyError('dictionary is empty') - root = self.__root - if last: - link = root[0] - link_prev = link[0] - link_prev[1] = root - root[0] = link_prev - else: - link = root[1] - link_next = link[1] - root[1] = link_next - link_next[0] = root - key = link[2] - del self.__map[key] - value = dict.pop(self, key) - return key, value - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) items in od' - for k in self: - yield (k, self[k]) - - def update(*args, **kwds): - '''od.update(E, **F) -> None. Update od from dict/iterable E and F. - - If E is a dict instance, does: for k in E: od[k] = E[k] - If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] - Or if E is an iterable of items, does: for k, v in E: od[k] = v - In either case, this is followed by: for k, v in F.items(): od[k] = v - - ''' - if len(args) > 2: - raise TypeError('update() takes at most 2 positional ' - 'arguments (%d given)' % (len(args),)) - elif not args: - raise TypeError('update() takes at least 1 argument (0 given)') - self = args[0] - # Make progressively weaker assumptions about "other" - other = () - if len(args) == 2: - other = args[1] - if isinstance(other, dict): - for key in other: - self[key] = other[key] - elif hasattr(other, 'keys'): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default - - def __repr__(self, _repr_running=None): - 'od.__repr__() <==> repr(od)' - if not _repr_running: _repr_running = {} - call_key = id(self), _get_ident() - if call_key in _repr_running: - return '...' - _repr_running[call_key] = 1 - try: - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - finally: - del _repr_running[call_key] - - def __reduce__(self): - 'Return state information for pickling' - items = [[k, self[k]] for k in self] - inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def copy(self): - 'od.copy() -> a shallow copy of od' - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S - and values equal to v (which defaults to None). - - ''' - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive - while comparison to a regular mapping is order-insensitive. - - ''' - if isinstance(other, OrderedDict): - return len(self)==len(other) and self.items() == other.items() - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other - - # -- the following methods are only used in Python 2.7 -- - - def viewkeys(self): - "od.viewkeys() -> a set-like object providing a view on od's keys" - return KeysView(self) - - def viewvalues(self): - "od.viewvalues() -> an object providing a view on od's values" - return ValuesView(self) - - def viewitems(self): - "od.viewitems() -> a set-like object providing a view on od's items" - return ItemsView(self) - -try: - from logging.config import BaseConfigurator, valid_ident -except ImportError: # pragma: no cover - IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - - - def valid_ident(s): - m = IDENTIFIER.match(s) - if not m: - raise ValueError('Not a valid Python identifier: %r' % s) - return True - - - # The ConvertingXXX classes are wrappers around standard Python containers, - # and they serve to convert any suitable values in the container. The - # conversion converts base dicts, lists and tuples to their wrapped - # equivalents, whereas strings which match a conversion format are converted - # appropriately. - # - # Each wrapper should have a configurator attribute holding the actual - # configurator to use for conversion. - - class ConvertingDict(dict): - """A converting dictionary wrapper.""" - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def get(self, key, default=None): - value = dict.get(self, key, default) - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, key, default=None): - value = dict.pop(self, key, default) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - class ConvertingList(list): - """A converting list wrapper.""" - def __getitem__(self, key): - value = list.__getitem__(self, key) - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, idx=-1): - value = list.pop(self, idx) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - return result - - class ConvertingTuple(tuple): - """A converting tuple wrapper.""" - def __getitem__(self, key): - value = tuple.__getitem__(self, key) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - class BaseConfigurator(object): - """ - The configurator base class which defines some useful defaults. - """ - - CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') - - WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') - DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') - INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') - DIGIT_PATTERN = re.compile(r'^\d+$') - - value_converters = { - 'ext' : 'ext_convert', - 'cfg' : 'cfg_convert', - } - - # We might want to use a different one, e.g. importlib - importer = staticmethod(__import__) - - def __init__(self, config): - self.config = ConvertingDict(config) - self.config.configurator = self - - def resolve(self, s): - """ - Resolve strings to objects using standard import and attribute - syntax. - """ - name = s.split('.') - used = name.pop(0) - try: - found = self.importer(used) - for frag in name: - used += '.' + frag - try: - found = getattr(found, frag) - except AttributeError: - self.importer(used) - found = getattr(found, frag) - return found - except ImportError: - e, tb = sys.exc_info()[1:] - v = ValueError('Cannot resolve %r: %s' % (s, e)) - v.__cause__, v.__traceback__ = e, tb - raise v - - def ext_convert(self, value): - """Default converter for the ext:// protocol.""" - return self.resolve(value) - - def cfg_convert(self, value): - """Default converter for the cfg:// protocol.""" - rest = value - m = self.WORD_PATTERN.match(rest) - if m is None: - raise ValueError("Unable to convert %r" % value) - else: - rest = rest[m.end():] - d = self.config[m.groups()[0]] - #print d, rest - while rest: - m = self.DOT_PATTERN.match(rest) - if m: - d = d[m.groups()[0]] - else: - m = self.INDEX_PATTERN.match(rest) - if m: - idx = m.groups()[0] - if not self.DIGIT_PATTERN.match(idx): - d = d[idx] - else: - try: - n = int(idx) # try as number first (most likely) - d = d[n] - except TypeError: - d = d[idx] - if m: - rest = rest[m.end():] - else: - raise ValueError('Unable to convert ' - '%r at %r' % (value, rest)) - #rest should be empty - return d - - def convert(self, value): - """ - Convert values to an appropriate type. dicts, lists and tuples are - replaced by their converting alternatives. Strings are checked to - see if they have a conversion format and are converted if they do. - """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): - value = ConvertingDict(value) - value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): - value = ConvertingList(value) - value.configurator = self - elif not isinstance(value, ConvertingTuple) and\ - isinstance(value, tuple): - value = ConvertingTuple(value) - value.configurator = self - elif isinstance(value, string_types): - m = self.CONVERT_PATTERN.match(value) - if m: - d = m.groupdict() - prefix = d['prefix'] - converter = self.value_converters.get(prefix, None) - if converter: - suffix = d['suffix'] - converter = getattr(self, converter) - value = converter(suffix) - return value - - def configure_custom(self, config): - """Configure an object with a user-supplied factory.""" - c = config.pop('()') - if not callable(c): - c = self.resolve(c) - props = config.pop('.', None) - # Check for valid identifiers - kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) - result = c(**kwargs) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def as_tuple(self, value): - """Utility function which converts lists to tuples.""" - if isinstance(value, list): - value = tuple(value) - return value diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/database.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/database.py deleted file mode 100644 index b13cdac..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/database.py +++ /dev/null @@ -1,1339 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2017 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""PEP 376 implementation.""" - -from __future__ import unicode_literals - -import base64 -import codecs -import contextlib -import hashlib -import logging -import os -import posixpath -import sys -import zipimport - -from . import DistlibException, resources -from .compat import StringIO -from .version import get_scheme, UnsupportedVersionError -from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME) -from .util import (parse_requirement, cached_property, parse_name_and_version, - read_exports, write_exports, CSVReader, CSVWriter) - - -__all__ = ['Distribution', 'BaseInstalledDistribution', - 'InstalledDistribution', 'EggInfoDistribution', - 'DistributionPath'] - - -logger = logging.getLogger(__name__) - -EXPORTS_FILENAME = 'pydist-exports.json' -COMMANDS_FILENAME = 'pydist-commands.json' - -DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', - 'RESOURCES', EXPORTS_FILENAME, 'SHARED') - -DISTINFO_EXT = '.dist-info' - - -class _Cache(object): - """ - A simple cache mapping names and .dist-info paths to distributions - """ - def __init__(self): - """ - Initialise an instance. There is normally one for each DistributionPath. - """ - self.name = {} - self.path = {} - self.generated = False - - def clear(self): - """ - Clear the cache, setting it to its initial state. - """ - self.name.clear() - self.path.clear() - self.generated = False - - def add(self, dist): - """ - Add a distribution to the cache. - :param dist: The distribution to add. - """ - if dist.path not in self.path: - self.path[dist.path] = dist - self.name.setdefault(dist.key, []).append(dist) - - -class DistributionPath(object): - """ - Represents a set of distributions installed on a path (typically sys.path). - """ - def __init__(self, path=None, include_egg=False): - """ - Create an instance from a path, optionally including legacy (distutils/ - setuptools/distribute) distributions. - :param path: The path to use, as a list of directories. If not specified, - sys.path is used. - :param include_egg: If True, this instance will look for and return legacy - distributions as well as those based on PEP 376. - """ - if path is None: - path = sys.path - self.path = path - self._include_dist = True - self._include_egg = include_egg - - self._cache = _Cache() - self._cache_egg = _Cache() - self._cache_enabled = True - self._scheme = get_scheme('default') - - def _get_cache_enabled(self): - return self._cache_enabled - - def _set_cache_enabled(self, value): - self._cache_enabled = value - - cache_enabled = property(_get_cache_enabled, _set_cache_enabled) - - def clear_cache(self): - """ - Clears the internal cache. - """ - self._cache.clear() - self._cache_egg.clear() - - - def _yield_distributions(self): - """ - Yield .dist-info and/or .egg(-info) distributions. - """ - # We need to check if we've seen some resources already, because on - # some Linux systems (e.g. some Debian/Ubuntu variants) there are - # symlinks which alias other files in the environment. - seen = set() - for path in self.path: - finder = resources.finder_for_path(path) - if finder is None: - continue - r = finder.find('') - if not r or not r.is_container: - continue - rset = sorted(r.resources) - for entry in rset: - r = finder.find(entry) - if not r or r.path in seen: - continue - if self._include_dist and entry.endswith(DISTINFO_EXT): - possible_filenames = [METADATA_FILENAME, - WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME] - for metadata_filename in possible_filenames: - metadata_path = posixpath.join(entry, metadata_filename) - pydist = finder.find(metadata_path) - if pydist: - break - else: - continue - - with contextlib.closing(pydist.as_stream()) as stream: - metadata = Metadata(fileobj=stream, scheme='legacy') - logger.debug('Found %s', r.path) - seen.add(r.path) - yield new_dist_class(r.path, metadata=metadata, - env=self) - elif self._include_egg and entry.endswith(('.egg-info', - '.egg')): - logger.debug('Found %s', r.path) - seen.add(r.path) - yield old_dist_class(r.path, self) - - def _generate_cache(self): - """ - Scan the path for distributions and populate the cache with - those that are found. - """ - gen_dist = not self._cache.generated - gen_egg = self._include_egg and not self._cache_egg.generated - if gen_dist or gen_egg: - for dist in self._yield_distributions(): - if isinstance(dist, InstalledDistribution): - self._cache.add(dist) - else: - self._cache_egg.add(dist) - - if gen_dist: - self._cache.generated = True - if gen_egg: - self._cache_egg.generated = True - - @classmethod - def distinfo_dirname(cls, name, version): - """ - The *name* and *version* parameters are converted into their - filename-escaped form, i.e. any ``'-'`` characters are replaced - with ``'_'`` other than the one in ``'dist-info'`` and the one - separating the name from the version number. - - :parameter name: is converted to a standard distribution name by replacing - any runs of non- alphanumeric characters with a single - ``'-'``. - :type name: string - :parameter version: is converted to a standard version string. Spaces - become dots, and all other non-alphanumeric characters - (except dots) become dashes, with runs of multiple - dashes condensed to a single dash. - :type version: string - :returns: directory name - :rtype: string""" - name = name.replace('-', '_') - return '-'.join([name, version]) + DISTINFO_EXT - - def get_distributions(self): - """ - Provides an iterator that looks for distributions and returns - :class:`InstalledDistribution` or - :class:`EggInfoDistribution` instances for each one of them. - - :rtype: iterator of :class:`InstalledDistribution` and - :class:`EggInfoDistribution` instances - """ - if not self._cache_enabled: - for dist in self._yield_distributions(): - yield dist - else: - self._generate_cache() - - for dist in self._cache.path.values(): - yield dist - - if self._include_egg: - for dist in self._cache_egg.path.values(): - yield dist - - def get_distribution(self, name): - """ - Looks for a named distribution on the path. - - This function only returns the first result found, as no more than one - value is expected. If nothing is found, ``None`` is returned. - - :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` - or ``None`` - """ - result = None - name = name.lower() - if not self._cache_enabled: - for dist in self._yield_distributions(): - if dist.key == name: - result = dist - break - else: - self._generate_cache() - - if name in self._cache.name: - result = self._cache.name[name][0] - elif self._include_egg and name in self._cache_egg.name: - result = self._cache_egg.name[name][0] - return result - - def provides_distribution(self, name, version=None): - """ - Iterates over all distributions to find which distributions provide *name*. - If a *version* is provided, it will be used to filter the results. - - This function only returns the first result found, since no more than - one values are expected. If the directory is not found, returns ``None``. - - :parameter version: a version specifier that indicates the version - required, conforming to the format in ``PEP-345`` - - :type name: string - :type version: string - """ - matcher = None - if version is not None: - try: - matcher = self._scheme.matcher('%s (%s)' % (name, version)) - except ValueError: - raise DistlibException('invalid name or version: %r, %r' % - (name, version)) - - for dist in self.get_distributions(): - # We hit a problem on Travis where enum34 was installed and doesn't - # have a provides attribute ... - if not hasattr(dist, 'provides'): - logger.debug('No "provides": %s', dist) - else: - provided = dist.provides - - for p in provided: - p_name, p_ver = parse_name_and_version(p) - if matcher is None: - if p_name == name: - yield dist - break - else: - if p_name == name and matcher.match(p_ver): - yield dist - break - - def get_file_path(self, name, relative_path): - """ - Return the path to a resource file. - """ - dist = self.get_distribution(name) - if dist is None: - raise LookupError('no distribution named %r found' % name) - return dist.get_resource_path(relative_path) - - def get_exported_entries(self, category, name=None): - """ - Return all of the exported entries in a particular category. - - :param category: The category to search for entries. - :param name: If specified, only entries with that name are returned. - """ - for dist in self.get_distributions(): - r = dist.exports - if category in r: - d = r[category] - if name is not None: - if name in d: - yield d[name] - else: - for v in d.values(): - yield v - - -class Distribution(object): - """ - A base class for distributions, whether installed or from indexes. - Either way, it must have some metadata, so that's all that's needed - for construction. - """ - - build_time_dependency = False - """ - Set to True if it's known to be only a build-time dependency (i.e. - not needed after installation). - """ - - requested = False - """A boolean that indicates whether the ``REQUESTED`` metadata file is - present (in other words, whether the package was installed by user - request or it was installed as a dependency).""" - - def __init__(self, metadata): - """ - Initialise an instance. - :param metadata: The instance of :class:`Metadata` describing this - distribution. - """ - self.metadata = metadata - self.name = metadata.name - self.key = self.name.lower() # for case-insensitive comparisons - self.version = metadata.version - self.locator = None - self.digest = None - self.extras = None # additional features requested - self.context = None # environment marker overrides - self.download_urls = set() - self.digests = {} - - @property - def source_url(self): - """ - The source archive download URL for this distribution. - """ - return self.metadata.source_url - - download_url = source_url # Backward compatibility - - @property - def name_and_version(self): - """ - A utility property which displays the name and version in parentheses. - """ - return '%s (%s)' % (self.name, self.version) - - @property - def provides(self): - """ - A set of distribution names and versions provided by this distribution. - :return: A set of "name (version)" strings. - """ - plist = self.metadata.provides - s = '%s (%s)' % (self.name, self.version) - if s not in plist: - plist.append(s) - return plist - - def _get_requirements(self, req_attr): - md = self.metadata - logger.debug('Getting requirements from metadata %r', md.todict()) - reqts = getattr(md, req_attr) - return set(md.get_requirements(reqts, extras=self.extras, - env=self.context)) - - @property - def run_requires(self): - return self._get_requirements('run_requires') - - @property - def meta_requires(self): - return self._get_requirements('meta_requires') - - @property - def build_requires(self): - return self._get_requirements('build_requires') - - @property - def test_requires(self): - return self._get_requirements('test_requires') - - @property - def dev_requires(self): - return self._get_requirements('dev_requires') - - def matches_requirement(self, req): - """ - Say if this instance matches (fulfills) a requirement. - :param req: The requirement to match. - :rtype req: str - :return: True if it matches, else False. - """ - # Requirement may contain extras - parse to lose those - # from what's passed to the matcher - r = parse_requirement(req) - scheme = get_scheme(self.metadata.scheme) - try: - matcher = scheme.matcher(r.requirement) - except UnsupportedVersionError: - # XXX compat-mode if cannot read the version - logger.warning('could not read version %r - using name only', - req) - name = req.split()[0] - matcher = scheme.matcher(name) - - name = matcher.key # case-insensitive - - result = False - for p in self.provides: - p_name, p_ver = parse_name_and_version(p) - if p_name != name: - continue - try: - result = matcher.match(p_ver) - break - except UnsupportedVersionError: - pass - return result - - def __repr__(self): - """ - Return a textual representation of this instance, - """ - if self.source_url: - suffix = ' [%s]' % self.source_url - else: - suffix = '' - return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix) - - def __eq__(self, other): - """ - See if this distribution is the same as another. - :param other: The distribution to compare with. To be equal to one - another. distributions must have the same type, name, - version and source_url. - :return: True if it is the same, else False. - """ - if type(other) is not type(self): - result = False - else: - result = (self.name == other.name and - self.version == other.version and - self.source_url == other.source_url) - return result - - def __hash__(self): - """ - Compute hash in a way which matches the equality test. - """ - return hash(self.name) + hash(self.version) + hash(self.source_url) - - -class BaseInstalledDistribution(Distribution): - """ - This is the base class for installed distributions (whether PEP 376 or - legacy). - """ - - hasher = None - - def __init__(self, metadata, path, env=None): - """ - Initialise an instance. - :param metadata: An instance of :class:`Metadata` which describes the - distribution. This will normally have been initialised - from a metadata file in the ``path``. - :param path: The path of the ``.dist-info`` or ``.egg-info`` - directory for the distribution. - :param env: This is normally the :class:`DistributionPath` - instance where this distribution was found. - """ - super(BaseInstalledDistribution, self).__init__(metadata) - self.path = path - self.dist_path = env - - def get_hash(self, data, hasher=None): - """ - Get the hash of some data, using a particular hash algorithm, if - specified. - - :param data: The data to be hashed. - :type data: bytes - :param hasher: The name of a hash implementation, supported by hashlib, - or ``None``. Examples of valid values are ``'sha1'``, - ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and - ``'sha512'``. If no hasher is specified, the ``hasher`` - attribute of the :class:`InstalledDistribution` instance - is used. If the hasher is determined to be ``None``, MD5 - is used as the hashing algorithm. - :returns: The hash of the data. If a hasher was explicitly specified, - the returned hash will be prefixed with the specified hasher - followed by '='. - :rtype: str - """ - if hasher is None: - hasher = self.hasher - if hasher is None: - hasher = hashlib.md5 - prefix = '' - else: - hasher = getattr(hashlib, hasher) - prefix = '%s=' % self.hasher - digest = hasher(data).digest() - digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') - return '%s%s' % (prefix, digest) - - -class InstalledDistribution(BaseInstalledDistribution): - """ - Created with the *path* of the ``.dist-info`` directory provided to the - constructor. It reads the metadata contained in ``pydist.json`` when it is - instantiated., or uses a passed in Metadata instance (useful for when - dry-run mode is being used). - """ - - hasher = 'sha256' - - def __init__(self, path, metadata=None, env=None): - self.modules = [] - self.finder = finder = resources.finder_for_path(path) - if finder is None: - raise ValueError('finder unavailable for %s' % path) - if env and env._cache_enabled and path in env._cache.path: - metadata = env._cache.path[path].metadata - elif metadata is None: - r = finder.find(METADATA_FILENAME) - # Temporary - for Wheel 0.23 support - if r is None: - r = finder.find(WHEEL_METADATA_FILENAME) - # Temporary - for legacy support - if r is None: - r = finder.find('METADATA') - if r is None: - raise ValueError('no %s found in %s' % (METADATA_FILENAME, - path)) - with contextlib.closing(r.as_stream()) as stream: - metadata = Metadata(fileobj=stream, scheme='legacy') - - super(InstalledDistribution, self).__init__(metadata, path, env) - - if env and env._cache_enabled: - env._cache.add(self) - - r = finder.find('REQUESTED') - self.requested = r is not None - p = os.path.join(path, 'top_level.txt') - if os.path.exists(p): - with open(p, 'rb') as f: - data = f.read() - self.modules = data.splitlines() - - def __repr__(self): - return '<InstalledDistribution %r %s at %r>' % ( - self.name, self.version, self.path) - - def __str__(self): - return "%s %s" % (self.name, self.version) - - def _get_records(self): - """ - Get the list of installed files for the distribution - :return: A list of tuples of path, hash and size. Note that hash and - size might be ``None`` for some entries. The path is exactly - as stored in the file (which is as in PEP 376). - """ - results = [] - r = self.get_distinfo_resource('RECORD') - with contextlib.closing(r.as_stream()) as stream: - with CSVReader(stream=stream) as record_reader: - # Base location is parent dir of .dist-info dir - #base_location = os.path.dirname(self.path) - #base_location = os.path.abspath(base_location) - for row in record_reader: - missing = [None for i in range(len(row), 3)] - path, checksum, size = row + missing - #if not os.path.isabs(path): - # path = path.replace('/', os.sep) - # path = os.path.join(base_location, path) - results.append((path, checksum, size)) - return results - - @cached_property - def exports(self): - """ - Return the information exported by this distribution. - :return: A dictionary of exports, mapping an export category to a dict - of :class:`ExportEntry` instances describing the individual - export entries, and keyed by name. - """ - result = {} - r = self.get_distinfo_resource(EXPORTS_FILENAME) - if r: - result = self.read_exports() - return result - - def read_exports(self): - """ - Read exports data from a file in .ini format. - - :return: A dictionary of exports, mapping an export category to a list - of :class:`ExportEntry` instances describing the individual - export entries. - """ - result = {} - r = self.get_distinfo_resource(EXPORTS_FILENAME) - if r: - with contextlib.closing(r.as_stream()) as stream: - result = read_exports(stream) - return result - - def write_exports(self, exports): - """ - Write a dictionary of exports to a file in .ini format. - :param exports: A dictionary of exports, mapping an export category to - a list of :class:`ExportEntry` instances describing the - individual export entries. - """ - rf = self.get_distinfo_file(EXPORTS_FILENAME) - with open(rf, 'w') as f: - write_exports(exports, f) - - def get_resource_path(self, relative_path): - """ - NOTE: This API may change in the future. - - Return the absolute path to a resource file with the given relative - path. - - :param relative_path: The path, relative to .dist-info, of the resource - of interest. - :return: The absolute path where the resource is to be found. - """ - r = self.get_distinfo_resource('RESOURCES') - with contextlib.closing(r.as_stream()) as stream: - with CSVReader(stream=stream) as resources_reader: - for relative, destination in resources_reader: - if relative == relative_path: - return destination - raise KeyError('no resource file with relative path %r ' - 'is installed' % relative_path) - - def list_installed_files(self): - """ - Iterates over the ``RECORD`` entries and returns a tuple - ``(path, hash, size)`` for each line. - - :returns: iterator of (path, hash, size) - """ - for result in self._get_records(): - yield result - - def write_installed_files(self, paths, prefix, dry_run=False): - """ - Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any - existing ``RECORD`` file is silently overwritten. - - prefix is used to determine when to write absolute paths. - """ - prefix = os.path.join(prefix, '') - base = os.path.dirname(self.path) - base_under_prefix = base.startswith(prefix) - base = os.path.join(base, '') - record_path = self.get_distinfo_file('RECORD') - logger.info('creating %s', record_path) - if dry_run: - return None - with CSVWriter(record_path) as writer: - for path in paths: - if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): - # do not put size and hash, as in PEP-376 - hash_value = size = '' - else: - size = '%d' % os.path.getsize(path) - with open(path, 'rb') as fp: - hash_value = self.get_hash(fp.read()) - if path.startswith(base) or (base_under_prefix and - path.startswith(prefix)): - path = os.path.relpath(path, base) - writer.writerow((path, hash_value, size)) - - # add the RECORD file itself - if record_path.startswith(base): - record_path = os.path.relpath(record_path, base) - writer.writerow((record_path, '', '')) - return record_path - - def check_installed_files(self): - """ - Checks that the hashes and sizes of the files in ``RECORD`` are - matched by the files themselves. Returns a (possibly empty) list of - mismatches. Each entry in the mismatch list will be a tuple consisting - of the path, 'exists', 'size' or 'hash' according to what didn't match - (existence is checked first, then size, then hash), the expected - value and the actual value. - """ - mismatches = [] - base = os.path.dirname(self.path) - record_path = self.get_distinfo_file('RECORD') - for path, hash_value, size in self.list_installed_files(): - if not os.path.isabs(path): - path = os.path.join(base, path) - if path == record_path: - continue - if not os.path.exists(path): - mismatches.append((path, 'exists', True, False)) - elif os.path.isfile(path): - actual_size = str(os.path.getsize(path)) - if size and actual_size != size: - mismatches.append((path, 'size', size, actual_size)) - elif hash_value: - if '=' in hash_value: - hasher = hash_value.split('=', 1)[0] - else: - hasher = None - - with open(path, 'rb') as f: - actual_hash = self.get_hash(f.read(), hasher) - if actual_hash != hash_value: - mismatches.append((path, 'hash', hash_value, actual_hash)) - return mismatches - - @cached_property - def shared_locations(self): - """ - A dictionary of shared locations whose keys are in the set 'prefix', - 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. - The corresponding value is the absolute path of that category for - this distribution, and takes into account any paths selected by the - user at installation time (e.g. via command-line arguments). In the - case of the 'namespace' key, this would be a list of absolute paths - for the roots of namespace packages in this distribution. - - The first time this property is accessed, the relevant information is - read from the SHARED file in the .dist-info directory. - """ - result = {} - shared_path = os.path.join(self.path, 'SHARED') - if os.path.isfile(shared_path): - with codecs.open(shared_path, 'r', encoding='utf-8') as f: - lines = f.read().splitlines() - for line in lines: - key, value = line.split('=', 1) - if key == 'namespace': - result.setdefault(key, []).append(value) - else: - result[key] = value - return result - - def write_shared_locations(self, paths, dry_run=False): - """ - Write shared location information to the SHARED file in .dist-info. - :param paths: A dictionary as described in the documentation for - :meth:`shared_locations`. - :param dry_run: If True, the action is logged but no file is actually - written. - :return: The path of the file written to. - """ - shared_path = os.path.join(self.path, 'SHARED') - logger.info('creating %s', shared_path) - if dry_run: - return None - lines = [] - for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): - path = paths[key] - if os.path.isdir(paths[key]): - lines.append('%s=%s' % (key, path)) - for ns in paths.get('namespace', ()): - lines.append('namespace=%s' % ns) - - with codecs.open(shared_path, 'w', encoding='utf-8') as f: - f.write('\n'.join(lines)) - return shared_path - - def get_distinfo_resource(self, path): - if path not in DIST_FILES: - raise DistlibException('invalid path for a dist-info file: ' - '%r at %r' % (path, self.path)) - finder = resources.finder_for_path(self.path) - if finder is None: - raise DistlibException('Unable to get a finder for %s' % self.path) - return finder.find(path) - - def get_distinfo_file(self, path): - """ - Returns a path located under the ``.dist-info`` directory. Returns a - string representing the path. - - :parameter path: a ``'/'``-separated path relative to the - ``.dist-info`` directory or an absolute path; - If *path* is an absolute path and doesn't start - with the ``.dist-info`` directory path, - a :class:`DistlibException` is raised - :type path: str - :rtype: str - """ - # Check if it is an absolute path # XXX use relpath, add tests - if path.find(os.sep) >= 0: - # it's an absolute path? - distinfo_dirname, path = path.split(os.sep)[-2:] - if distinfo_dirname != self.path.split(os.sep)[-1]: - raise DistlibException( - 'dist-info file %r does not belong to the %r %s ' - 'distribution' % (path, self.name, self.version)) - - # The file must be relative - if path not in DIST_FILES: - raise DistlibException('invalid path for a dist-info file: ' - '%r at %r' % (path, self.path)) - - return os.path.join(self.path, path) - - def list_distinfo_files(self): - """ - Iterates over the ``RECORD`` entries and returns paths for each line if - the path is pointing to a file located in the ``.dist-info`` directory - or one of its subdirectories. - - :returns: iterator of paths - """ - base = os.path.dirname(self.path) - for path, checksum, size in self._get_records(): - # XXX add separator or use real relpath algo - if not os.path.isabs(path): - path = os.path.join(base, path) - if path.startswith(self.path): - yield path - - def __eq__(self, other): - return (isinstance(other, InstalledDistribution) and - self.path == other.path) - - # See http://docs.python.org/reference/datamodel#object.__hash__ - __hash__ = object.__hash__ - - -class EggInfoDistribution(BaseInstalledDistribution): - """Created with the *path* of the ``.egg-info`` directory or file provided - to the constructor. It reads the metadata contained in the file itself, or - if the given path happens to be a directory, the metadata is read from the - file ``PKG-INFO`` under that directory.""" - - requested = True # as we have no way of knowing, assume it was - shared_locations = {} - - def __init__(self, path, env=None): - def set_name_and_version(s, n, v): - s.name = n - s.key = n.lower() # for case-insensitive comparisons - s.version = v - - self.path = path - self.dist_path = env - if env and env._cache_enabled and path in env._cache_egg.path: - metadata = env._cache_egg.path[path].metadata - set_name_and_version(self, metadata.name, metadata.version) - else: - metadata = self._get_metadata(path) - - # Need to be set before caching - set_name_and_version(self, metadata.name, metadata.version) - - if env and env._cache_enabled: - env._cache_egg.add(self) - super(EggInfoDistribution, self).__init__(metadata, path, env) - - def _get_metadata(self, path): - requires = None - - def parse_requires_data(data): - """Create a list of dependencies from a requires.txt file. - - *data*: the contents of a setuptools-produced requires.txt file. - """ - reqs = [] - lines = data.splitlines() - for line in lines: - line = line.strip() - if line.startswith('['): - logger.warning('Unexpected line: quitting requirement scan: %r', - line) - break - r = parse_requirement(line) - if not r: - logger.warning('Not recognised as a requirement: %r', line) - continue - if r.extras: - logger.warning('extra requirements in requires.txt are ' - 'not supported') - if not r.constraints: - reqs.append(r.name) - else: - cons = ', '.join('%s%s' % c for c in r.constraints) - reqs.append('%s (%s)' % (r.name, cons)) - return reqs - - def parse_requires_path(req_path): - """Create a list of dependencies from a requires.txt file. - - *req_path*: the path to a setuptools-produced requires.txt file. - """ - - reqs = [] - try: - with codecs.open(req_path, 'r', 'utf-8') as fp: - reqs = parse_requires_data(fp.read()) - except IOError: - pass - return reqs - - tl_path = tl_data = None - if path.endswith('.egg'): - if os.path.isdir(path): - p = os.path.join(path, 'EGG-INFO') - meta_path = os.path.join(p, 'PKG-INFO') - metadata = Metadata(path=meta_path, scheme='legacy') - req_path = os.path.join(p, 'requires.txt') - tl_path = os.path.join(p, 'top_level.txt') - requires = parse_requires_path(req_path) - else: - # FIXME handle the case where zipfile is not available - zipf = zipimport.zipimporter(path) - fileobj = StringIO( - zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) - metadata = Metadata(fileobj=fileobj, scheme='legacy') - try: - data = zipf.get_data('EGG-INFO/requires.txt') - tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8') - requires = parse_requires_data(data.decode('utf-8')) - except IOError: - requires = None - elif path.endswith('.egg-info'): - if os.path.isdir(path): - req_path = os.path.join(path, 'requires.txt') - requires = parse_requires_path(req_path) - path = os.path.join(path, 'PKG-INFO') - tl_path = os.path.join(path, 'top_level.txt') - metadata = Metadata(path=path, scheme='legacy') - else: - raise DistlibException('path must end with .egg-info or .egg, ' - 'got %r' % path) - - if requires: - metadata.add_requirements(requires) - # look for top-level modules in top_level.txt, if present - if tl_data is None: - if tl_path is not None and os.path.exists(tl_path): - with open(tl_path, 'rb') as f: - tl_data = f.read().decode('utf-8') - if not tl_data: - tl_data = [] - else: - tl_data = tl_data.splitlines() - self.modules = tl_data - return metadata - - def __repr__(self): - return '<EggInfoDistribution %r %s at %r>' % ( - self.name, self.version, self.path) - - def __str__(self): - return "%s %s" % (self.name, self.version) - - def check_installed_files(self): - """ - Checks that the hashes and sizes of the files in ``RECORD`` are - matched by the files themselves. Returns a (possibly empty) list of - mismatches. Each entry in the mismatch list will be a tuple consisting - of the path, 'exists', 'size' or 'hash' according to what didn't match - (existence is checked first, then size, then hash), the expected - value and the actual value. - """ - mismatches = [] - record_path = os.path.join(self.path, 'installed-files.txt') - if os.path.exists(record_path): - for path, _, _ in self.list_installed_files(): - if path == record_path: - continue - if not os.path.exists(path): - mismatches.append((path, 'exists', True, False)) - return mismatches - - def list_installed_files(self): - """ - Iterates over the ``installed-files.txt`` entries and returns a tuple - ``(path, hash, size)`` for each line. - - :returns: a list of (path, hash, size) - """ - - def _md5(path): - f = open(path, 'rb') - try: - content = f.read() - finally: - f.close() - return hashlib.md5(content).hexdigest() - - def _size(path): - return os.stat(path).st_size - - record_path = os.path.join(self.path, 'installed-files.txt') - result = [] - if os.path.exists(record_path): - with codecs.open(record_path, 'r', encoding='utf-8') as f: - for line in f: - line = line.strip() - p = os.path.normpath(os.path.join(self.path, line)) - # "./" is present as a marker between installed files - # and installation metadata files - if not os.path.exists(p): - logger.warning('Non-existent file: %s', p) - if p.endswith(('.pyc', '.pyo')): - continue - #otherwise fall through and fail - if not os.path.isdir(p): - result.append((p, _md5(p), _size(p))) - result.append((record_path, None, None)) - return result - - def list_distinfo_files(self, absolute=False): - """ - Iterates over the ``installed-files.txt`` entries and returns paths for - each line if the path is pointing to a file located in the - ``.egg-info`` directory or one of its subdirectories. - - :parameter absolute: If *absolute* is ``True``, each returned path is - transformed into a local absolute path. Otherwise the - raw value from ``installed-files.txt`` is returned. - :type absolute: boolean - :returns: iterator of paths - """ - record_path = os.path.join(self.path, 'installed-files.txt') - if os.path.exists(record_path): - skip = True - with codecs.open(record_path, 'r', encoding='utf-8') as f: - for line in f: - line = line.strip() - if line == './': - skip = False - continue - if not skip: - p = os.path.normpath(os.path.join(self.path, line)) - if p.startswith(self.path): - if absolute: - yield p - else: - yield line - - def __eq__(self, other): - return (isinstance(other, EggInfoDistribution) and - self.path == other.path) - - # See http://docs.python.org/reference/datamodel#object.__hash__ - __hash__ = object.__hash__ - -new_dist_class = InstalledDistribution -old_dist_class = EggInfoDistribution - - -class DependencyGraph(object): - """ - Represents a dependency graph between distributions. - - The dependency relationships are stored in an ``adjacency_list`` that maps - distributions to a list of ``(other, label)`` tuples where ``other`` - is a distribution and the edge is labeled with ``label`` (i.e. the version - specifier, if such was provided). Also, for more efficient traversal, for - every distribution ``x``, a list of predecessors is kept in - ``reverse_list[x]``. An edge from distribution ``a`` to - distribution ``b`` means that ``a`` depends on ``b``. If any missing - dependencies are found, they are stored in ``missing``, which is a - dictionary that maps distributions to a list of requirements that were not - provided by any other distributions. - """ - - def __init__(self): - self.adjacency_list = {} - self.reverse_list = {} - self.missing = {} - - def add_distribution(self, distribution): - """Add the *distribution* to the graph. - - :type distribution: :class:`distutils2.database.InstalledDistribution` - or :class:`distutils2.database.EggInfoDistribution` - """ - self.adjacency_list[distribution] = [] - self.reverse_list[distribution] = [] - #self.missing[distribution] = [] - - def add_edge(self, x, y, label=None): - """Add an edge from distribution *x* to distribution *y* with the given - *label*. - - :type x: :class:`distutils2.database.InstalledDistribution` or - :class:`distutils2.database.EggInfoDistribution` - :type y: :class:`distutils2.database.InstalledDistribution` or - :class:`distutils2.database.EggInfoDistribution` - :type label: ``str`` or ``None`` - """ - self.adjacency_list[x].append((y, label)) - # multiple edges are allowed, so be careful - if x not in self.reverse_list[y]: - self.reverse_list[y].append(x) - - def add_missing(self, distribution, requirement): - """ - Add a missing *requirement* for the given *distribution*. - - :type distribution: :class:`distutils2.database.InstalledDistribution` - or :class:`distutils2.database.EggInfoDistribution` - :type requirement: ``str`` - """ - logger.debug('%s missing %r', distribution, requirement) - self.missing.setdefault(distribution, []).append(requirement) - - def _repr_dist(self, dist): - return '%s %s' % (dist.name, dist.version) - - def repr_node(self, dist, level=1): - """Prints only a subgraph""" - output = [self._repr_dist(dist)] - for other, label in self.adjacency_list[dist]: - dist = self._repr_dist(other) - if label is not None: - dist = '%s [%s]' % (dist, label) - output.append(' ' * level + str(dist)) - suboutput = self.repr_node(other, level + 1) - subs = suboutput.split('\n') - output.extend(subs[1:]) - return '\n'.join(output) - - def to_dot(self, f, skip_disconnected=True): - """Writes a DOT output for the graph to the provided file *f*. - - If *skip_disconnected* is set to ``True``, then all distributions - that are not dependent on any other distribution are skipped. - - :type f: has to support ``file``-like operations - :type skip_disconnected: ``bool`` - """ - disconnected = [] - - f.write("digraph dependencies {\n") - for dist, adjs in self.adjacency_list.items(): - if len(adjs) == 0 and not skip_disconnected: - disconnected.append(dist) - for other, label in adjs: - if not label is None: - f.write('"%s" -> "%s" [label="%s"]\n' % - (dist.name, other.name, label)) - else: - f.write('"%s" -> "%s"\n' % (dist.name, other.name)) - if not skip_disconnected and len(disconnected) > 0: - f.write('subgraph disconnected {\n') - f.write('label = "Disconnected"\n') - f.write('bgcolor = red\n') - - for dist in disconnected: - f.write('"%s"' % dist.name) - f.write('\n') - f.write('}\n') - f.write('}\n') - - def topological_sort(self): - """ - Perform a topological sort of the graph. - :return: A tuple, the first element of which is a topologically sorted - list of distributions, and the second element of which is a - list of distributions that cannot be sorted because they have - circular dependencies and so form a cycle. - """ - result = [] - # Make a shallow copy of the adjacency list - alist = {} - for k, v in self.adjacency_list.items(): - alist[k] = v[:] - while True: - # See what we can remove in this run - to_remove = [] - for k, v in list(alist.items())[:]: - if not v: - to_remove.append(k) - del alist[k] - if not to_remove: - # What's left in alist (if anything) is a cycle. - break - # Remove from the adjacency list of others - for k, v in alist.items(): - alist[k] = [(d, r) for d, r in v if d not in to_remove] - logger.debug('Moving to result: %s', - ['%s (%s)' % (d.name, d.version) for d in to_remove]) - result.extend(to_remove) - return result, list(alist.keys()) - - def __repr__(self): - """Representation of the graph""" - output = [] - for dist, adjs in self.adjacency_list.items(): - output.append(self.repr_node(dist)) - return '\n'.join(output) - - -def make_graph(dists, scheme='default'): - """Makes a dependency graph from the given distributions. - - :parameter dists: a list of distributions - :type dists: list of :class:`distutils2.database.InstalledDistribution` and - :class:`distutils2.database.EggInfoDistribution` instances - :rtype: a :class:`DependencyGraph` instance - """ - scheme = get_scheme(scheme) - graph = DependencyGraph() - provided = {} # maps names to lists of (version, dist) tuples - - # first, build the graph and find out what's provided - for dist in dists: - graph.add_distribution(dist) - - for p in dist.provides: - name, version = parse_name_and_version(p) - logger.debug('Add to provided: %s, %s, %s', name, version, dist) - provided.setdefault(name, []).append((version, dist)) - - # now make the edges - for dist in dists: - requires = (dist.run_requires | dist.meta_requires | - dist.build_requires | dist.dev_requires) - for req in requires: - try: - matcher = scheme.matcher(req) - except UnsupportedVersionError: - # XXX compat-mode if cannot read the version - logger.warning('could not read version %r - using name only', - req) - name = req.split()[0] - matcher = scheme.matcher(name) - - name = matcher.key # case-insensitive - - matched = False - if name in provided: - for version, provider in provided[name]: - try: - match = matcher.match(version) - except UnsupportedVersionError: - match = False - - if match: - graph.add_edge(dist, provider, req) - matched = True - break - if not matched: - graph.add_missing(dist, req) - return graph - - -def get_dependent_dists(dists, dist): - """Recursively generate a list of distributions from *dists* that are - dependent on *dist*. - - :param dists: a list of distributions - :param dist: a distribution, member of *dists* for which we are interested - """ - if dist not in dists: - raise DistlibException('given distribution %r is not a member ' - 'of the list' % dist.name) - graph = make_graph(dists) - - dep = [dist] # dependent distributions - todo = graph.reverse_list[dist] # list of nodes we should inspect - - while todo: - d = todo.pop() - dep.append(d) - for succ in graph.reverse_list[d]: - if succ not in dep: - todo.append(succ) - - dep.pop(0) # remove dist from dep, was there to prevent infinite loops - return dep - - -def get_required_dists(dists, dist): - """Recursively generate a list of distributions from *dists* that are - required by *dist*. - - :param dists: a list of distributions - :param dist: a distribution, member of *dists* for which we are interested - """ - if dist not in dists: - raise DistlibException('given distribution %r is not a member ' - 'of the list' % dist.name) - graph = make_graph(dists) - - req = [] # required distributions - todo = graph.adjacency_list[dist] # list of nodes we should inspect - - while todo: - d = todo.pop()[0] - req.append(d) - for pred in graph.adjacency_list[d]: - if pred not in req: - todo.append(pred) - - return req - - -def make_dist(name, version, **kwargs): - """ - A convenience method for making a dist given just a name and version. - """ - summary = kwargs.pop('summary', 'Placeholder for summary') - md = Metadata(**kwargs) - md.name = name - md.version = version - md.summary = summary or 'Placeholder for summary' - return Distribution(md) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/index.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/index.py deleted file mode 100644 index 2406be2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/index.py +++ /dev/null @@ -1,516 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -import hashlib -import logging -import os -import shutil -import subprocess -import tempfile -try: - from threading import Thread -except ImportError: - from dummy_threading import Thread - -from . import DistlibException -from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, - urlparse, build_opener, string_types) -from .util import cached_property, zip_dir, ServerProxy - -logger = logging.getLogger(__name__) - -DEFAULT_INDEX = 'https://pypi.python.org/pypi' -DEFAULT_REALM = 'pypi' - -class PackageIndex(object): - """ - This class represents a package index compatible with PyPI, the Python - Package Index. - """ - - boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' - - def __init__(self, url=None): - """ - Initialise an instance. - - :param url: The URL of the index. If not specified, the URL for PyPI is - used. - """ - self.url = url or DEFAULT_INDEX - self.read_configuration() - scheme, netloc, path, params, query, frag = urlparse(self.url) - if params or query or frag or scheme not in ('http', 'https'): - raise DistlibException('invalid repository: %s' % self.url) - self.password_handler = None - self.ssl_verifier = None - self.gpg = None - self.gpg_home = None - with open(os.devnull, 'w') as sink: - # Use gpg by default rather than gpg2, as gpg2 insists on - # prompting for passwords - for s in ('gpg', 'gpg2'): - try: - rc = subprocess.check_call([s, '--version'], stdout=sink, - stderr=sink) - if rc == 0: - self.gpg = s - break - except OSError: - pass - - def _get_pypirc_command(self): - """ - Get the distutils command for interacting with PyPI configurations. - :return: the command. - """ - from distutils.core import Distribution - from distutils.config import PyPIRCCommand - d = Distribution() - return PyPIRCCommand(d) - - def read_configuration(self): - """ - Read the PyPI access configuration as supported by distutils, getting - PyPI to do the actual work. This populates ``username``, ``password``, - ``realm`` and ``url`` attributes from the configuration. - """ - # get distutils to do the work - c = self._get_pypirc_command() - c.repository = self.url - cfg = c._read_pypirc() - self.username = cfg.get('username') - self.password = cfg.get('password') - self.realm = cfg.get('realm', 'pypi') - self.url = cfg.get('repository', self.url) - - def save_configuration(self): - """ - Save the PyPI access configuration. You must have set ``username`` and - ``password`` attributes before calling this method. - - Again, distutils is used to do the actual work. - """ - self.check_credentials() - # get distutils to do the work - c = self._get_pypirc_command() - c._store_pypirc(self.username, self.password) - - def check_credentials(self): - """ - Check that ``username`` and ``password`` have been set, and raise an - exception if not. - """ - if self.username is None or self.password is None: - raise DistlibException('username and password must be set') - pm = HTTPPasswordMgr() - _, netloc, _, _, _, _ = urlparse(self.url) - pm.add_password(self.realm, netloc, self.username, self.password) - self.password_handler = HTTPBasicAuthHandler(pm) - - def register(self, metadata): - """ - Register a distribution on PyPI, using the provided metadata. - - :param metadata: A :class:`Metadata` instance defining at least a name - and version number for the distribution to be - registered. - :return: The HTTP response received from PyPI upon submission of the - request. - """ - self.check_credentials() - metadata.validate() - d = metadata.todict() - d[':action'] = 'verify' - request = self.encode_request(d.items(), []) - response = self.send_request(request) - d[':action'] = 'submit' - request = self.encode_request(d.items(), []) - return self.send_request(request) - - def _reader(self, name, stream, outbuf): - """ - Thread runner for reading lines of from a subprocess into a buffer. - - :param name: The logical name of the stream (used for logging only). - :param stream: The stream to read from. This will typically a pipe - connected to the output stream of a subprocess. - :param outbuf: The list to append the read lines to. - """ - while True: - s = stream.readline() - if not s: - break - s = s.decode('utf-8').rstrip() - outbuf.append(s) - logger.debug('%s: %s' % (name, s)) - stream.close() - - def get_sign_command(self, filename, signer, sign_password, - keystore=None): - """ - Return a suitable command for signing a file. - - :param filename: The pathname to the file to be signed. - :param signer: The identifier of the signer of the file. - :param sign_password: The passphrase for the signer's - private key used for signing. - :param keystore: The path to a directory which contains the keys - used in verification. If not specified, the - instance's ``gpg_home`` attribute is used instead. - :return: The signing command as a list suitable to be - passed to :class:`subprocess.Popen`. - """ - cmd = [self.gpg, '--status-fd', '2', '--no-tty'] - if keystore is None: - keystore = self.gpg_home - if keystore: - cmd.extend(['--homedir', keystore]) - if sign_password is not None: - cmd.extend(['--batch', '--passphrase-fd', '0']) - td = tempfile.mkdtemp() - sf = os.path.join(td, os.path.basename(filename) + '.asc') - cmd.extend(['--detach-sign', '--armor', '--local-user', - signer, '--output', sf, filename]) - logger.debug('invoking: %s', ' '.join(cmd)) - return cmd, sf - - def run_command(self, cmd, input_data=None): - """ - Run a command in a child process , passing it any input data specified. - - :param cmd: The command to run. - :param input_data: If specified, this must be a byte string containing - data to be sent to the child process. - :return: A tuple consisting of the subprocess' exit code, a list of - lines read from the subprocess' ``stdout``, and a list of - lines read from the subprocess' ``stderr``. - """ - kwargs = { - 'stdout': subprocess.PIPE, - 'stderr': subprocess.PIPE, - } - if input_data is not None: - kwargs['stdin'] = subprocess.PIPE - stdout = [] - stderr = [] - p = subprocess.Popen(cmd, **kwargs) - # We don't use communicate() here because we may need to - # get clever with interacting with the command - t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) - t1.start() - t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) - t2.start() - if input_data is not None: - p.stdin.write(input_data) - p.stdin.close() - - p.wait() - t1.join() - t2.join() - return p.returncode, stdout, stderr - - def sign_file(self, filename, signer, sign_password, keystore=None): - """ - Sign a file. - - :param filename: The pathname to the file to be signed. - :param signer: The identifier of the signer of the file. - :param sign_password: The passphrase for the signer's - private key used for signing. - :param keystore: The path to a directory which contains the keys - used in signing. If not specified, the instance's - ``gpg_home`` attribute is used instead. - :return: The absolute pathname of the file where the signature is - stored. - """ - cmd, sig_file = self.get_sign_command(filename, signer, sign_password, - keystore) - rc, stdout, stderr = self.run_command(cmd, - sign_password.encode('utf-8')) - if rc != 0: - raise DistlibException('sign command failed with error ' - 'code %s' % rc) - return sig_file - - def upload_file(self, metadata, filename, signer=None, sign_password=None, - filetype='sdist', pyversion='source', keystore=None): - """ - Upload a release file to the index. - - :param metadata: A :class:`Metadata` instance defining at least a name - and version number for the file to be uploaded. - :param filename: The pathname of the file to be uploaded. - :param signer: The identifier of the signer of the file. - :param sign_password: The passphrase for the signer's - private key used for signing. - :param filetype: The type of the file being uploaded. This is the - distutils command which produced that file, e.g. - ``sdist`` or ``bdist_wheel``. - :param pyversion: The version of Python which the release relates - to. For code compatible with any Python, this would - be ``source``, otherwise it would be e.g. ``3.2``. - :param keystore: The path to a directory which contains the keys - used in signing. If not specified, the instance's - ``gpg_home`` attribute is used instead. - :return: The HTTP response received from PyPI upon submission of the - request. - """ - self.check_credentials() - if not os.path.exists(filename): - raise DistlibException('not found: %s' % filename) - metadata.validate() - d = metadata.todict() - sig_file = None - if signer: - if not self.gpg: - logger.warning('no signing program available - not signed') - else: - sig_file = self.sign_file(filename, signer, sign_password, - keystore) - with open(filename, 'rb') as f: - file_data = f.read() - md5_digest = hashlib.md5(file_data).hexdigest() - sha256_digest = hashlib.sha256(file_data).hexdigest() - d.update({ - ':action': 'file_upload', - 'protocol_version': '1', - 'filetype': filetype, - 'pyversion': pyversion, - 'md5_digest': md5_digest, - 'sha256_digest': sha256_digest, - }) - files = [('content', os.path.basename(filename), file_data)] - if sig_file: - with open(sig_file, 'rb') as f: - sig_data = f.read() - files.append(('gpg_signature', os.path.basename(sig_file), - sig_data)) - shutil.rmtree(os.path.dirname(sig_file)) - request = self.encode_request(d.items(), files) - return self.send_request(request) - - def upload_documentation(self, metadata, doc_dir): - """ - Upload documentation to the index. - - :param metadata: A :class:`Metadata` instance defining at least a name - and version number for the documentation to be - uploaded. - :param doc_dir: The pathname of the directory which contains the - documentation. This should be the directory that - contains the ``index.html`` for the documentation. - :return: The HTTP response received from PyPI upon submission of the - request. - """ - self.check_credentials() - if not os.path.isdir(doc_dir): - raise DistlibException('not a directory: %r' % doc_dir) - fn = os.path.join(doc_dir, 'index.html') - if not os.path.exists(fn): - raise DistlibException('not found: %r' % fn) - metadata.validate() - name, version = metadata.name, metadata.version - zip_data = zip_dir(doc_dir).getvalue() - fields = [(':action', 'doc_upload'), - ('name', name), ('version', version)] - files = [('content', name, zip_data)] - request = self.encode_request(fields, files) - return self.send_request(request) - - def get_verify_command(self, signature_filename, data_filename, - keystore=None): - """ - Return a suitable command for verifying a file. - - :param signature_filename: The pathname to the file containing the - signature. - :param data_filename: The pathname to the file containing the - signed data. - :param keystore: The path to a directory which contains the keys - used in verification. If not specified, the - instance's ``gpg_home`` attribute is used instead. - :return: The verifying command as a list suitable to be - passed to :class:`subprocess.Popen`. - """ - cmd = [self.gpg, '--status-fd', '2', '--no-tty'] - if keystore is None: - keystore = self.gpg_home - if keystore: - cmd.extend(['--homedir', keystore]) - cmd.extend(['--verify', signature_filename, data_filename]) - logger.debug('invoking: %s', ' '.join(cmd)) - return cmd - - def verify_signature(self, signature_filename, data_filename, - keystore=None): - """ - Verify a signature for a file. - - :param signature_filename: The pathname to the file containing the - signature. - :param data_filename: The pathname to the file containing the - signed data. - :param keystore: The path to a directory which contains the keys - used in verification. If not specified, the - instance's ``gpg_home`` attribute is used instead. - :return: True if the signature was verified, else False. - """ - if not self.gpg: - raise DistlibException('verification unavailable because gpg ' - 'unavailable') - cmd = self.get_verify_command(signature_filename, data_filename, - keystore) - rc, stdout, stderr = self.run_command(cmd) - if rc not in (0, 1): - raise DistlibException('verify command failed with error ' - 'code %s' % rc) - return rc == 0 - - def download_file(self, url, destfile, digest=None, reporthook=None): - """ - This is a convenience method for downloading a file from an URL. - Normally, this will be a file from the index, though currently - no check is made for this (i.e. a file can be downloaded from - anywhere). - - The method is just like the :func:`urlretrieve` function in the - standard library, except that it allows digest computation to be - done during download and checking that the downloaded data - matched any expected value. - - :param url: The URL of the file to be downloaded (assumed to be - available via an HTTP GET request). - :param destfile: The pathname where the downloaded file is to be - saved. - :param digest: If specified, this must be a (hasher, value) - tuple, where hasher is the algorithm used (e.g. - ``'md5'``) and ``value`` is the expected value. - :param reporthook: The same as for :func:`urlretrieve` in the - standard library. - """ - if digest is None: - digester = None - logger.debug('No digest specified') - else: - if isinstance(digest, (list, tuple)): - hasher, digest = digest - else: - hasher = 'md5' - digester = getattr(hashlib, hasher)() - logger.debug('Digest specified: %s' % digest) - # The following code is equivalent to urlretrieve. - # We need to do it this way so that we can compute the - # digest of the file as we go. - with open(destfile, 'wb') as dfp: - # addinfourl is not a context manager on 2.x - # so we have to use try/finally - sfp = self.send_request(Request(url)) - try: - headers = sfp.info() - blocksize = 8192 - size = -1 - read = 0 - blocknum = 0 - if "content-length" in headers: - size = int(headers["Content-Length"]) - if reporthook: - reporthook(blocknum, blocksize, size) - while True: - block = sfp.read(blocksize) - if not block: - break - read += len(block) - dfp.write(block) - if digester: - digester.update(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, blocksize, size) - finally: - sfp.close() - - # check that we got the whole file, if we can - if size >= 0 and read < size: - raise DistlibException( - 'retrieval incomplete: got only %d out of %d bytes' - % (read, size)) - # if we have a digest, it must match. - if digester: - actual = digester.hexdigest() - if digest != actual: - raise DistlibException('%s digest mismatch for %s: expected ' - '%s, got %s' % (hasher, destfile, - digest, actual)) - logger.debug('Digest verified: %s', digest) - - def send_request(self, req): - """ - Send a standard library :class:`Request` to PyPI and return its - response. - - :param req: The request to send. - :return: The HTTP response from PyPI (a standard library HTTPResponse). - """ - handlers = [] - if self.password_handler: - handlers.append(self.password_handler) - if self.ssl_verifier: - handlers.append(self.ssl_verifier) - opener = build_opener(*handlers) - return opener.open(req) - - def encode_request(self, fields, files): - """ - Encode fields and files for posting to an HTTP server. - - :param fields: The fields to send as a list of (fieldname, value) - tuples. - :param files: The files to send as a list of (fieldname, filename, - file_bytes) tuple. - """ - # Adapted from packaging, which in turn was adapted from - # http://code.activestate.com/recipes/146306 - - parts = [] - boundary = self.boundary - for k, values in fields: - if not isinstance(values, (list, tuple)): - values = [values] - - for v in values: - parts.extend(( - b'--' + boundary, - ('Content-Disposition: form-data; name="%s"' % - k).encode('utf-8'), - b'', - v.encode('utf-8'))) - for key, filename, value in files: - parts.extend(( - b'--' + boundary, - ('Content-Disposition: form-data; name="%s"; filename="%s"' % - (key, filename)).encode('utf-8'), - b'', - value)) - - parts.extend((b'--' + boundary + b'--', b'')) - - body = b'\r\n'.join(parts) - ct = b'multipart/form-data; boundary=' + boundary - headers = { - 'Content-type': ct, - 'Content-length': str(len(body)) - } - return Request(self.url, body, headers) - - def search(self, terms, operator=None): - if isinstance(terms, string_types): - terms = {'name': terms} - rpc_proxy = ServerProxy(self.url, timeout=3.0) - try: - return rpc_proxy.search(terms, operator or 'and') - finally: - rpc_proxy('close')() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/locators.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/locators.py deleted file mode 100644 index 5c655c3..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/locators.py +++ /dev/null @@ -1,1295 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2015 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# - -import gzip -from io import BytesIO -import json -import logging -import os -import posixpath -import re -try: - import threading -except ImportError: # pragma: no cover - import dummy_threading as threading -import zlib - -from . import DistlibException -from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, - queue, quote, unescape, string_types, build_opener, - HTTPRedirectHandler as BaseRedirectHandler, text_type, - Request, HTTPError, URLError) -from .database import Distribution, DistributionPath, make_dist -from .metadata import Metadata, MetadataInvalidError -from .util import (cached_property, parse_credentials, ensure_slash, - split_filename, get_project_data, parse_requirement, - parse_name_and_version, ServerProxy, normalize_name) -from .version import get_scheme, UnsupportedVersionError -from .wheel import Wheel, is_compatible - -logger = logging.getLogger(__name__) - -HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') -CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) -HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') -DEFAULT_INDEX = 'https://pypi.python.org/pypi' - -def get_all_distribution_names(url=None): - """ - Return all distribution names known by an index. - :param url: The URL of the index. - :return: A list of all known distribution names. - """ - if url is None: - url = DEFAULT_INDEX - client = ServerProxy(url, timeout=3.0) - try: - return client.list_packages() - finally: - client('close')() - -class RedirectHandler(BaseRedirectHandler): - """ - A class to work around a bug in some Python 3.2.x releases. - """ - # There's a bug in the base version for some 3.2.x - # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header - # returns e.g. /abc, it bails because it says the scheme '' - # is bogus, when actually it should use the request's - # URL for the scheme. See Python issue #13696. - def http_error_302(self, req, fp, code, msg, headers): - # Some servers (incorrectly) return multiple Location headers - # (so probably same goes for URI). Use first header. - newurl = None - for key in ('location', 'uri'): - if key in headers: - newurl = headers[key] - break - if newurl is None: # pragma: no cover - return - urlparts = urlparse(newurl) - if urlparts.scheme == '': - newurl = urljoin(req.get_full_url(), newurl) - if hasattr(headers, 'replace_header'): - headers.replace_header(key, newurl) - else: - headers[key] = newurl - return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, - headers) - - http_error_301 = http_error_303 = http_error_307 = http_error_302 - -class Locator(object): - """ - A base class for locators - things that locate distributions. - """ - source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') - binary_extensions = ('.egg', '.exe', '.whl') - excluded_extensions = ('.pdf',) - - # A list of tags indicating which wheels you want to match. The default - # value of None matches against the tags compatible with the running - # Python. If you want to match other values, set wheel_tags on a locator - # instance to a list of tuples (pyver, abi, arch) which you want to match. - wheel_tags = None - - downloadable_extensions = source_extensions + ('.whl',) - - def __init__(self, scheme='default'): - """ - Initialise an instance. - :param scheme: Because locators look for most recent versions, they - need to know the version scheme to use. This specifies - the current PEP-recommended scheme - use ``'legacy'`` - if you need to support existing distributions on PyPI. - """ - self._cache = {} - self.scheme = scheme - # Because of bugs in some of the handlers on some of the platforms, - # we use our own opener rather than just using urlopen. - self.opener = build_opener(RedirectHandler()) - # If get_project() is called from locate(), the matcher instance - # is set from the requirement passed to locate(). See issue #18 for - # why this can be useful to know. - self.matcher = None - self.errors = queue.Queue() - - def get_errors(self): - """ - Return any errors which have occurred. - """ - result = [] - while not self.errors.empty(): # pragma: no cover - try: - e = self.errors.get(False) - result.append(e) - except self.errors.Empty: - continue - self.errors.task_done() - return result - - def clear_errors(self): - """ - Clear any errors which may have been logged. - """ - # Just get the errors and throw them away - self.get_errors() - - def clear_cache(self): - self._cache.clear() - - def _get_scheme(self): - return self._scheme - - def _set_scheme(self, value): - self._scheme = value - - scheme = property(_get_scheme, _set_scheme) - - def _get_project(self, name): - """ - For a given project, get a dictionary mapping available versions to Distribution - instances. - - This should be implemented in subclasses. - - If called from a locate() request, self.matcher will be set to a - matcher for the requirement to satisfy, otherwise it will be None. - """ - raise NotImplementedError('Please implement in the subclass') - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Please implement in the subclass') - - def get_project(self, name): - """ - For a given project, get a dictionary mapping available versions to Distribution - instances. - - This calls _get_project to do all the work, and just implements a caching layer on top. - """ - if self._cache is None: # pragma: no cover - result = self._get_project(name) - elif name in self._cache: - result = self._cache[name] - else: - self.clear_errors() - result = self._get_project(name) - self._cache[name] = result - return result - - def score_url(self, url): - """ - Give an url a score which can be used to choose preferred URLs - for a given project release. - """ - t = urlparse(url) - basename = posixpath.basename(t.path) - compatible = True - is_wheel = basename.endswith('.whl') - is_downloadable = basename.endswith(self.downloadable_extensions) - if is_wheel: - compatible = is_compatible(Wheel(basename), self.wheel_tags) - return (t.scheme == 'https', 'pypi.python.org' in t.netloc, - is_downloadable, is_wheel, compatible, basename) - - def prefer_url(self, url1, url2): - """ - Choose one of two URLs where both are candidates for distribution - archives for the same version of a distribution (for example, - .tar.gz vs. zip). - - The current implementation favours https:// URLs over http://, archives - from PyPI over those from other locations, wheel compatibility (if a - wheel) and then the archive name. - """ - result = url2 - if url1: - s1 = self.score_url(url1) - s2 = self.score_url(url2) - if s1 > s2: - result = url1 - if result != url2: - logger.debug('Not replacing %r with %r', url1, url2) - else: - logger.debug('Replacing %r with %r', url1, url2) - return result - - def split_filename(self, filename, project_name): - """ - Attempt to split a filename in project name, version and Python version. - """ - return split_filename(filename, project_name) - - def convert_url_to_download_info(self, url, project_name): - """ - See if a URL is a candidate for a download URL for a project (the URL - has typically been scraped from an HTML page). - - If it is, a dictionary is returned with keys "name", "version", - "filename" and "url"; otherwise, None is returned. - """ - def same_project(name1, name2): - return normalize_name(name1) == normalize_name(name2) - - result = None - scheme, netloc, path, params, query, frag = urlparse(url) - if frag.lower().startswith('egg='): # pragma: no cover - logger.debug('%s: version hint in fragment: %r', - project_name, frag) - m = HASHER_HASH.match(frag) - if m: - algo, digest = m.groups() - else: - algo, digest = None, None - origpath = path - if path and path[-1] == '/': # pragma: no cover - path = path[:-1] - if path.endswith('.whl'): - try: - wheel = Wheel(path) - if not is_compatible(wheel, self.wheel_tags): - logger.debug('Wheel not compatible: %s', path) - else: - if project_name is None: - include = True - else: - include = same_project(wheel.name, project_name) - if include: - result = { - 'name': wheel.name, - 'version': wheel.version, - 'filename': wheel.filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - 'python-version': ', '.join( - ['.'.join(list(v[2:])) for v in wheel.pyver]), - } - except Exception as e: # pragma: no cover - logger.warning('invalid path for wheel: %s', path) - elif not path.endswith(self.downloadable_extensions): # pragma: no cover - logger.debug('Not downloadable: %s', path) - else: # downloadable extension - path = filename = posixpath.basename(path) - for ext in self.downloadable_extensions: - if path.endswith(ext): - path = path[:-len(ext)] - t = self.split_filename(path, project_name) - if not t: # pragma: no cover - logger.debug('No match for project/version: %s', path) - else: - name, version, pyver = t - if not project_name or same_project(project_name, name): - result = { - 'name': name, - 'version': version, - 'filename': filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - #'packagetype': 'sdist', - } - if pyver: # pragma: no cover - result['python-version'] = pyver - break - if result and algo: - result['%s_digest' % algo] = digest - return result - - def _get_digest(self, info): - """ - Get a digest from a dictionary by looking at keys of the form - 'algo_digest'. - - Returns a 2-tuple (algo, digest) if found, else None. Currently - looks only for SHA256, then MD5. - """ - result = None - for algo in ('sha256', 'md5'): - key = '%s_digest' % algo - if key in info: - result = (algo, info[key]) - break - return result - - def _update_version_data(self, result, info): - """ - Update a result dictionary (the final result from _get_project) with a - dictionary for a specific version, which typically holds information - gleaned from a filename or URL for an archive for the distribution. - """ - name = info.pop('name') - version = info.pop('version') - if version in result: - dist = result[version] - md = dist.metadata - else: - dist = make_dist(name, version, scheme=self.scheme) - md = dist.metadata - dist.digest = digest = self._get_digest(info) - url = info['url'] - result['digests'][url] = digest - if md.source_url != info['url']: - md.source_url = self.prefer_url(md.source_url, url) - result['urls'].setdefault(version, set()).add(url) - dist.locator = self - result[version] = dist - - def locate(self, requirement, prereleases=False): - """ - Find the most recent distribution which matches the given - requirement. - - :param requirement: A requirement of the form 'foo (1.0)' or perhaps - 'foo (>= 1.0, < 2.0, != 1.3)' - :param prereleases: If ``True``, allow pre-release versions - to be located. Otherwise, pre-release versions - are not returned. - :return: A :class:`Distribution` instance, or ``None`` if no such - distribution could be located. - """ - result = None - r = parse_requirement(requirement) - if r is None: # pragma: no cover - raise DistlibException('Not a valid requirement: %r' % requirement) - scheme = get_scheme(self.scheme) - self.matcher = matcher = scheme.matcher(r.requirement) - logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) - versions = self.get_project(r.name) - if len(versions) > 2: # urls and digests keys are present - # sometimes, versions are invalid - slist = [] - vcls = matcher.version_class - for k in versions: - if k in ('urls', 'digests'): - continue - try: - if not matcher.match(k): - logger.debug('%s did not match %r', matcher, k) - else: - if prereleases or not vcls(k).is_prerelease: - slist.append(k) - else: - logger.debug('skipping pre-release ' - 'version %s of %s', k, matcher.name) - except Exception: # pragma: no cover - logger.warning('error matching %s with %r', matcher, k) - pass # slist.append(k) - if len(slist) > 1: - slist = sorted(slist, key=scheme.key) - if slist: - logger.debug('sorted list: %s', slist) - version = slist[-1] - result = versions[version] - if result: - if r.extras: - result.extras = r.extras - result.download_urls = versions.get('urls', {}).get(version, set()) - d = {} - sd = versions.get('digests', {}) - for url in result.download_urls: - if url in sd: # pragma: no cover - d[url] = sd[url] - result.digests = d - self.matcher = None - return result - - -class PyPIRPCLocator(Locator): - """ - This locator uses XML-RPC to locate distributions. It therefore - cannot be used with simple mirrors (that only mirror file content). - """ - def __init__(self, url, **kwargs): - """ - Initialise an instance. - - :param url: The URL to use for XML-RPC. - :param kwargs: Passed to the superclass constructor. - """ - super(PyPIRPCLocator, self).__init__(**kwargs) - self.base_url = url - self.client = ServerProxy(url, timeout=3.0) - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - return set(self.client.list_packages()) - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - versions = self.client.package_releases(name, True) - for v in versions: - urls = self.client.release_urls(name, v) - data = self.client.release_data(name, v) - metadata = Metadata(scheme=self.scheme) - metadata.name = data['name'] - metadata.version = data['version'] - metadata.license = data.get('license') - metadata.keywords = data.get('keywords', []) - metadata.summary = data.get('summary') - dist = Distribution(metadata) - if urls: - info = urls[0] - metadata.source_url = info['url'] - dist.digest = self._get_digest(info) - dist.locator = self - result[v] = dist - for info in urls: - url = info['url'] - digest = self._get_digest(info) - result['urls'].setdefault(v, set()).add(url) - result['digests'][url] = digest - return result - -class PyPIJSONLocator(Locator): - """ - This locator uses PyPI's JSON interface. It's very limited in functionality - and probably not worth using. - """ - def __init__(self, url, **kwargs): - super(PyPIJSONLocator, self).__init__(**kwargs) - self.base_url = ensure_slash(url) - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Not available from this locator') - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - url = urljoin(self.base_url, '%s/json' % quote(name)) - try: - resp = self.opener.open(url) - data = resp.read().decode() # for now - d = json.loads(data) - md = Metadata(scheme=self.scheme) - data = d['info'] - md.name = data['name'] - md.version = data['version'] - md.license = data.get('license') - md.keywords = data.get('keywords', []) - md.summary = data.get('summary') - dist = Distribution(md) - dist.locator = self - urls = d['urls'] - result[md.version] = dist - for info in d['urls']: - url = info['url'] - dist.download_urls.add(url) - dist.digests[url] = self._get_digest(info) - result['urls'].setdefault(md.version, set()).add(url) - result['digests'][url] = self._get_digest(info) - # Now get other releases - for version, infos in d['releases'].items(): - if version == md.version: - continue # already done - omd = Metadata(scheme=self.scheme) - omd.name = md.name - omd.version = version - odist = Distribution(omd) - odist.locator = self - result[version] = odist - for info in infos: - url = info['url'] - odist.download_urls.add(url) - odist.digests[url] = self._get_digest(info) - result['urls'].setdefault(version, set()).add(url) - result['digests'][url] = self._get_digest(info) -# for info in urls: -# md.source_url = info['url'] -# dist.digest = self._get_digest(info) -# dist.locator = self -# for info in urls: -# url = info['url'] -# result['urls'].setdefault(md.version, set()).add(url) -# result['digests'][url] = self._get_digest(info) - except Exception as e: - self.errors.put(text_type(e)) - logger.exception('JSON fetch failed: %s', e) - return result - - -class Page(object): - """ - This class represents a scraped HTML page. - """ - # The following slightly hairy-looking regex just looks for the contents of - # an anchor link, which has an attribute "href" either immediately preceded - # or immediately followed by a "rel" attribute. The attribute values can be - # declared with double quotes, single quotes or no quotes - which leads to - # the length of the expression. - _href = re.compile(""" -(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)? -href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*)) -(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))? -""", re.I | re.S | re.X) - _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S) - - def __init__(self, data, url): - """ - Initialise an instance with the Unicode page contents and the URL they - came from. - """ - self.data = data - self.base_url = self.url = url - m = self._base.search(self.data) - if m: - self.base_url = m.group(1) - - _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - @cached_property - def links(self): - """ - Return the URLs of all the links on a page together with information - about their "rel" attribute, for determining which ones to treat as - downloads and which ones to queue for further scraping. - """ - def clean(url): - "Tidy up an URL." - scheme, netloc, path, params, query, frag = urlparse(url) - return urlunparse((scheme, netloc, quote(path), - params, query, frag)) - - result = set() - for match in self._href.finditer(self.data): - d = match.groupdict('') - rel = (d['rel1'] or d['rel2'] or d['rel3'] or - d['rel4'] or d['rel5'] or d['rel6']) - url = d['url1'] or d['url2'] or d['url3'] - url = urljoin(self.base_url, url) - url = unescape(url) - url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) - result.add((url, rel)) - # We sort the result, hoping to bring the most recent versions - # to the front - result = sorted(result, key=lambda t: t[0], reverse=True) - return result - - -class SimpleScrapingLocator(Locator): - """ - A locator which scrapes HTML pages to locate downloads for a distribution. - This runs multiple threads to do the I/O; performance is at least as good - as pip's PackageFinder, which works in an analogous fashion. - """ - - # These are used to deal with various Content-Encoding schemes. - decoders = { - 'deflate': zlib.decompress, - 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), - 'none': lambda b: b, - } - - def __init__(self, url, timeout=None, num_workers=10, **kwargs): - """ - Initialise an instance. - :param url: The root URL to use for scraping. - :param timeout: The timeout, in seconds, to be applied to requests. - This defaults to ``None`` (no timeout specified). - :param num_workers: The number of worker threads you want to do I/O, - This defaults to 10. - :param kwargs: Passed to the superclass. - """ - super(SimpleScrapingLocator, self).__init__(**kwargs) - self.base_url = ensure_slash(url) - self.timeout = timeout - self._page_cache = {} - self._seen = set() - self._to_fetch = queue.Queue() - self._bad_hosts = set() - self.skip_externals = False - self.num_workers = num_workers - self._lock = threading.RLock() - # See issue #45: we need to be resilient when the locator is used - # in a thread, e.g. with concurrent.futures. We can't use self._lock - # as it is for coordinating our internal threads - the ones created - # in _prepare_threads. - self._gplock = threading.RLock() - self.platform_check = False # See issue #112 - - def _prepare_threads(self): - """ - Threads are created only when get_project is called, and terminate - before it returns. They are there primarily to parallelise I/O (i.e. - fetching web pages). - """ - self._threads = [] - for i in range(self.num_workers): - t = threading.Thread(target=self._fetch) - t.setDaemon(True) - t.start() - self._threads.append(t) - - def _wait_threads(self): - """ - Tell all the threads to terminate (by sending a sentinel value) and - wait for them to do so. - """ - # Note that you need two loops, since you can't say which - # thread will get each sentinel - for t in self._threads: - self._to_fetch.put(None) # sentinel - for t in self._threads: - t.join() - self._threads = [] - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - with self._gplock: - self.result = result - self.project_name = name - url = urljoin(self.base_url, '%s/' % quote(name)) - self._seen.clear() - self._page_cache.clear() - self._prepare_threads() - try: - logger.debug('Queueing %s', url) - self._to_fetch.put(url) - self._to_fetch.join() - finally: - self._wait_threads() - del self.result - return result - - platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|' - r'win(32|_amd64)|macosx_?\d+)\b', re.I) - - def _is_platform_dependent(self, url): - """ - Does an URL refer to a platform-specific download? - """ - return self.platform_dependent.search(url) - - def _process_download(self, url): - """ - See if an URL is a suitable download for a project. - - If it is, register information in the result dictionary (for - _get_project) about the specific version it's for. - - Note that the return value isn't actually used other than as a boolean - value. - """ - if self.platform_check and self._is_platform_dependent(url): - info = None - else: - info = self.convert_url_to_download_info(url, self.project_name) - logger.debug('process_download: %s -> %s', url, info) - if info: - with self._lock: # needed because self.result is shared - self._update_version_data(self.result, info) - return info - - def _should_queue(self, link, referrer, rel): - """ - Determine whether a link URL from a referring page and with a - particular "rel" attribute should be queued for scraping. - """ - scheme, netloc, path, _, _, _ = urlparse(link) - if path.endswith(self.source_extensions + self.binary_extensions + - self.excluded_extensions): - result = False - elif self.skip_externals and not link.startswith(self.base_url): - result = False - elif not referrer.startswith(self.base_url): - result = False - elif rel not in ('homepage', 'download'): - result = False - elif scheme not in ('http', 'https', 'ftp'): - result = False - elif self._is_platform_dependent(link): - result = False - else: - host = netloc.split(':', 1)[0] - if host.lower() == 'localhost': - result = False - else: - result = True - logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, - referrer, result) - return result - - def _fetch(self): - """ - Get a URL to fetch from the work queue, get the HTML page, examine its - links for download candidates and candidates for further scraping. - - This is a handy method to run in a thread. - """ - while True: - url = self._to_fetch.get() - try: - if url: - page = self.get_page(url) - if page is None: # e.g. after an error - continue - for link, rel in page.links: - if link not in self._seen: - try: - self._seen.add(link) - if (not self._process_download(link) and - self._should_queue(link, url, rel)): - logger.debug('Queueing %s from %s', link, url) - self._to_fetch.put(link) - except MetadataInvalidError: # e.g. invalid versions - pass - except Exception as e: # pragma: no cover - self.errors.put(text_type(e)) - finally: - # always do this, to avoid hangs :-) - self._to_fetch.task_done() - if not url: - #logger.debug('Sentinel seen, quitting.') - break - - def get_page(self, url): - """ - Get the HTML for an URL, possibly from an in-memory cache. - - XXX TODO Note: this cache is never actually cleared. It's assumed that - the data won't get stale over the lifetime of a locator instance (not - necessarily true for the default_locator). - """ - # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api - scheme, netloc, path, _, _, _ = urlparse(url) - if scheme == 'file' and os.path.isdir(url2pathname(path)): - url = urljoin(ensure_slash(url), 'index.html') - - if url in self._page_cache: - result = self._page_cache[url] - logger.debug('Returning %s from cache: %s', url, result) - else: - host = netloc.split(':', 1)[0] - result = None - if host in self._bad_hosts: - logger.debug('Skipping %s due to bad host %s', url, host) - else: - req = Request(url, headers={'Accept-encoding': 'identity'}) - try: - logger.debug('Fetching %s', url) - resp = self.opener.open(req, timeout=self.timeout) - logger.debug('Fetched %s', url) - headers = resp.info() - content_type = headers.get('Content-Type', '') - if HTML_CONTENT_TYPE.match(content_type): - final_url = resp.geturl() - data = resp.read() - encoding = headers.get('Content-Encoding') - if encoding: - decoder = self.decoders[encoding] # fail if not found - data = decoder(data) - encoding = 'utf-8' - m = CHARSET.search(content_type) - if m: - encoding = m.group(1) - try: - data = data.decode(encoding) - except UnicodeError: # pragma: no cover - data = data.decode('latin-1') # fallback - result = Page(data, final_url) - self._page_cache[final_url] = result - except HTTPError as e: - if e.code != 404: - logger.exception('Fetch failed: %s: %s', url, e) - except URLError as e: # pragma: no cover - logger.exception('Fetch failed: %s: %s', url, e) - with self._lock: - self._bad_hosts.add(host) - except Exception as e: # pragma: no cover - logger.exception('Fetch failed: %s: %s', url, e) - finally: - self._page_cache[url] = result # even if None (failure) - return result - - _distname_re = re.compile('<a href=[^>]*>([^<]+)<') - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - page = self.get_page(self.base_url) - if not page: - raise DistlibException('Unable to get %s' % self.base_url) - for match in self._distname_re.finditer(page.data): - result.add(match.group(1)) - return result - -class DirectoryLocator(Locator): - """ - This class locates distributions in a directory tree. - """ - - def __init__(self, path, **kwargs): - """ - Initialise an instance. - :param path: The root of the directory tree to search. - :param kwargs: Passed to the superclass constructor, - except for: - * recursive - if True (the default), subdirectories are - recursed into. If False, only the top-level directory - is searched, - """ - self.recursive = kwargs.pop('recursive', True) - super(DirectoryLocator, self).__init__(**kwargs) - path = os.path.abspath(path) - if not os.path.isdir(path): # pragma: no cover - raise DistlibException('Not a directory: %r' % path) - self.base_dir = path - - def should_include(self, filename, parent): - """ - Should a filename be considered as a candidate for a distribution - archive? As well as the filename, the directory which contains it - is provided, though not used by the current implementation. - """ - return filename.endswith(self.downloadable_extensions) - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - for root, dirs, files in os.walk(self.base_dir): - for fn in files: - if self.should_include(fn, root): - fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) - info = self.convert_url_to_download_info(url, name) - if info: - self._update_version_data(result, info) - if not self.recursive: - break - return result - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - for root, dirs, files in os.walk(self.base_dir): - for fn in files: - if self.should_include(fn, root): - fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) - info = self.convert_url_to_download_info(url, None) - if info: - result.add(info['name']) - if not self.recursive: - break - return result - -class JSONLocator(Locator): - """ - This locator uses special extended metadata (not available on PyPI) and is - the basis of performant dependency resolution in distlib. Other locators - require archive downloads before dependencies can be determined! As you - might imagine, that can be slow. - """ - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Not available from this locator') - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - data = get_project_data(name) - if data: - for info in data.get('files', []): - if info['ptype'] != 'sdist' or info['pyversion'] != 'source': - continue - # We don't store summary in project metadata as it makes - # the data bigger for no benefit during dependency - # resolution - dist = make_dist(data['name'], info['version'], - summary=data.get('summary', - 'Placeholder for summary'), - scheme=self.scheme) - md = dist.metadata - md.source_url = info['url'] - # TODO SHA256 digest - if 'digest' in info and info['digest']: - dist.digest = ('md5', info['digest']) - md.dependencies = info.get('requirements', {}) - dist.exports = info.get('exports', {}) - result[dist.version] = dist - result['urls'].setdefault(dist.version, set()).add(info['url']) - return result - -class DistPathLocator(Locator): - """ - This locator finds installed distributions in a path. It can be useful for - adding to an :class:`AggregatingLocator`. - """ - def __init__(self, distpath, **kwargs): - """ - Initialise an instance. - - :param distpath: A :class:`DistributionPath` instance to search. - """ - super(DistPathLocator, self).__init__(**kwargs) - assert isinstance(distpath, DistributionPath) - self.distpath = distpath - - def _get_project(self, name): - dist = self.distpath.get_distribution(name) - if dist is None: - result = {'urls': {}, 'digests': {}} - else: - result = { - dist.version: dist, - 'urls': {dist.version: set([dist.source_url])}, - 'digests': {dist.version: set([None])} - } - return result - - -class AggregatingLocator(Locator): - """ - This class allows you to chain and/or merge a list of locators. - """ - def __init__(self, *locators, **kwargs): - """ - Initialise an instance. - - :param locators: The list of locators to search. - :param kwargs: Passed to the superclass constructor, - except for: - * merge - if False (the default), the first successful - search from any of the locators is returned. If True, - the results from all locators are merged (this can be - slow). - """ - self.merge = kwargs.pop('merge', False) - self.locators = locators - super(AggregatingLocator, self).__init__(**kwargs) - - def clear_cache(self): - super(AggregatingLocator, self).clear_cache() - for locator in self.locators: - locator.clear_cache() - - def _set_scheme(self, value): - self._scheme = value - for locator in self.locators: - locator.scheme = value - - scheme = property(Locator.scheme.fget, _set_scheme) - - def _get_project(self, name): - result = {} - for locator in self.locators: - d = locator.get_project(name) - if d: - if self.merge: - files = result.get('urls', {}) - digests = result.get('digests', {}) - # next line could overwrite result['urls'], result['digests'] - result.update(d) - df = result.get('urls') - if files and df: - for k, v in files.items(): - if k in df: - df[k] |= v - else: - df[k] = v - dd = result.get('digests') - if digests and dd: - dd.update(digests) - else: - # See issue #18. If any dists are found and we're looking - # for specific constraints, we only return something if - # a match is found. For example, if a DirectoryLocator - # returns just foo (1.0) while we're looking for - # foo (>= 2.0), we'll pretend there was nothing there so - # that subsequent locators can be queried. Otherwise we - # would just return foo (1.0) which would then lead to a - # failure to find foo (>= 2.0), because other locators - # weren't searched. Note that this only matters when - # merge=False. - if self.matcher is None: - found = True - else: - found = False - for k in d: - if self.matcher.match(k): - found = True - break - if found: - result = d - break - return result - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - for locator in self.locators: - try: - result |= locator.get_distribution_names() - except NotImplementedError: - pass - return result - - -# We use a legacy scheme simply because most of the dists on PyPI use legacy -# versions which don't conform to PEP 426 / PEP 440. -default_locator = AggregatingLocator( - JSONLocator(), - SimpleScrapingLocator('https://pypi.python.org/simple/', - timeout=3.0), - scheme='legacy') - -locate = default_locator.locate - -NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*' - r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$') - -class DependencyFinder(object): - """ - Locate dependencies for distributions. - """ - - def __init__(self, locator=None): - """ - Initialise an instance, using the specified locator - to locate distributions. - """ - self.locator = locator or default_locator - self.scheme = get_scheme(self.locator.scheme) - - def add_distribution(self, dist): - """ - Add a distribution to the finder. This will update internal information - about who provides what. - :param dist: The distribution to add. - """ - logger.debug('adding distribution %s', dist) - name = dist.key - self.dists_by_name[name] = dist - self.dists[(name, dist.version)] = dist - for p in dist.provides: - name, version = parse_name_and_version(p) - logger.debug('Add to provided: %s, %s, %s', name, version, dist) - self.provided.setdefault(name, set()).add((version, dist)) - - def remove_distribution(self, dist): - """ - Remove a distribution from the finder. This will update internal - information about who provides what. - :param dist: The distribution to remove. - """ - logger.debug('removing distribution %s', dist) - name = dist.key - del self.dists_by_name[name] - del self.dists[(name, dist.version)] - for p in dist.provides: - name, version = parse_name_and_version(p) - logger.debug('Remove from provided: %s, %s, %s', name, version, dist) - s = self.provided[name] - s.remove((version, dist)) - if not s: - del self.provided[name] - - def get_matcher(self, reqt): - """ - Get a version matcher for a requirement. - :param reqt: The requirement - :type reqt: str - :return: A version matcher (an instance of - :class:`distlib.version.Matcher`). - """ - try: - matcher = self.scheme.matcher(reqt) - except UnsupportedVersionError: # pragma: no cover - # XXX compat-mode if cannot read the version - name = reqt.split()[0] - matcher = self.scheme.matcher(name) - return matcher - - def find_providers(self, reqt): - """ - Find the distributions which can fulfill a requirement. - - :param reqt: The requirement. - :type reqt: str - :return: A set of distribution which can fulfill the requirement. - """ - matcher = self.get_matcher(reqt) - name = matcher.key # case-insensitive - result = set() - provided = self.provided - if name in provided: - for version, provider in provided[name]: - try: - match = matcher.match(version) - except UnsupportedVersionError: - match = False - - if match: - result.add(provider) - break - return result - - def try_to_replace(self, provider, other, problems): - """ - Attempt to replace one provider with another. This is typically used - when resolving dependencies from multiple sources, e.g. A requires - (B >= 1.0) while C requires (B >= 1.1). - - For successful replacement, ``provider`` must meet all the requirements - which ``other`` fulfills. - - :param provider: The provider we are trying to replace with. - :param other: The provider we're trying to replace. - :param problems: If False is returned, this will contain what - problems prevented replacement. This is currently - a tuple of the literal string 'cantreplace', - ``provider``, ``other`` and the set of requirements - that ``provider`` couldn't fulfill. - :return: True if we can replace ``other`` with ``provider``, else - False. - """ - rlist = self.reqts[other] - unmatched = set() - for s in rlist: - matcher = self.get_matcher(s) - if not matcher.match(provider.version): - unmatched.add(s) - if unmatched: - # can't replace other with provider - problems.add(('cantreplace', provider, other, - frozenset(unmatched))) - result = False - else: - # can replace other with provider - self.remove_distribution(other) - del self.reqts[other] - for s in rlist: - self.reqts.setdefault(provider, set()).add(s) - self.add_distribution(provider) - result = True - return result - - def find(self, requirement, meta_extras=None, prereleases=False): - """ - Find a distribution and all distributions it depends on. - - :param requirement: The requirement specifying the distribution to - find, or a Distribution instance. - :param meta_extras: A list of meta extras such as :test:, :build: and - so on. - :param prereleases: If ``True``, allow pre-release versions to be - returned - otherwise, don't return prereleases - unless they're all that's available. - - Return a set of :class:`Distribution` instances and a set of - problems. - - The distributions returned should be such that they have the - :attr:`required` attribute set to ``True`` if they were - from the ``requirement`` passed to ``find()``, and they have the - :attr:`build_time_dependency` attribute set to ``True`` unless they - are post-installation dependencies of the ``requirement``. - - The problems should be a tuple consisting of the string - ``'unsatisfied'`` and the requirement which couldn't be satisfied - by any distribution known to the locator. - """ - - self.provided = {} - self.dists = {} - self.dists_by_name = {} - self.reqts = {} - - meta_extras = set(meta_extras or []) - if ':*:' in meta_extras: - meta_extras.remove(':*:') - # :meta: and :run: are implicitly included - meta_extras |= set([':test:', ':build:', ':dev:']) - - if isinstance(requirement, Distribution): - dist = odist = requirement - logger.debug('passed %s as requirement', odist) - else: - dist = odist = self.locator.locate(requirement, - prereleases=prereleases) - if dist is None: - raise DistlibException('Unable to locate %r' % requirement) - logger.debug('located %s', odist) - dist.requested = True - problems = set() - todo = set([dist]) - install_dists = set([odist]) - while todo: - dist = todo.pop() - name = dist.key # case-insensitive - if name not in self.dists_by_name: - self.add_distribution(dist) - else: - #import pdb; pdb.set_trace() - other = self.dists_by_name[name] - if other != dist: - self.try_to_replace(dist, other, problems) - - ireqts = dist.run_requires | dist.meta_requires - sreqts = dist.build_requires - ereqts = set() - if meta_extras and dist in install_dists: - for key in ('test', 'build', 'dev'): - e = ':%s:' % key - if e in meta_extras: - ereqts |= getattr(dist, '%s_requires' % key) - all_reqts = ireqts | sreqts | ereqts - for r in all_reqts: - providers = self.find_providers(r) - if not providers: - logger.debug('No providers found for %r', r) - provider = self.locator.locate(r, prereleases=prereleases) - # If no provider is found and we didn't consider - # prereleases, consider them now. - if provider is None and not prereleases: - provider = self.locator.locate(r, prereleases=True) - if provider is None: - logger.debug('Cannot satisfy %r', r) - problems.add(('unsatisfied', r)) - else: - n, v = provider.key, provider.version - if (n, v) not in self.dists: - todo.add(provider) - providers.add(provider) - if r in ireqts and dist in install_dists: - install_dists.add(provider) - logger.debug('Adding %s to install_dists', - provider.name_and_version) - for p in providers: - name = p.key - if name not in self.dists_by_name: - self.reqts.setdefault(p, set()).add(r) - else: - other = self.dists_by_name[name] - if other != p: - # see if other can be replaced by p - self.try_to_replace(p, other, problems) - - dists = set(self.dists.values()) - for dist in dists: - dist.build_time_dependency = dist not in install_dists - if dist.build_time_dependency: - logger.debug('%s is a build-time dependency only.', - dist.name_and_version) - logger.debug('find done for %s', odist) - return dists, problems diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/manifest.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/manifest.py deleted file mode 100644 index ca0fe44..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/manifest.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2013 Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -""" -Class representing the list of files in a distribution. - -Equivalent to distutils.filelist, but fixes some problems. -""" -import fnmatch -import logging -import os -import re -import sys - -from . import DistlibException -from .compat import fsdecode -from .util import convert_path - - -__all__ = ['Manifest'] - -logger = logging.getLogger(__name__) - -# a \ followed by some spaces + EOL -_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) -_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) - -# -# Due to the different results returned by fnmatch.translate, we need -# to do slightly different processing for Python 2.7 and 3.2 ... this needed -# to be brought in for Python 3.6 onwards. -# -_PYTHON_VERSION = sys.version_info[:2] - -class Manifest(object): - """A list of files built by on exploring the filesystem and filtered by - applying various patterns to what we find there. - """ - - def __init__(self, base=None): - """ - Initialise an instance. - - :param base: The base directory to explore under. - """ - self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) - self.prefix = self.base + os.sep - self.allfiles = None - self.files = set() - - # - # Public API - # - - def findall(self): - """Find all files under the base and set ``allfiles`` to the absolute - pathnames of files found. - """ - from stat import S_ISREG, S_ISDIR, S_ISLNK - - self.allfiles = allfiles = [] - root = self.base - stack = [root] - pop = stack.pop - push = stack.append - - while stack: - root = pop() - names = os.listdir(root) - - for name in names: - fullname = os.path.join(root, name) - - # Avoid excess stat calls -- just one will do, thank you! - stat = os.stat(fullname) - mode = stat.st_mode - if S_ISREG(mode): - allfiles.append(fsdecode(fullname)) - elif S_ISDIR(mode) and not S_ISLNK(mode): - push(fullname) - - def add(self, item): - """ - Add a file to the manifest. - - :param item: The pathname to add. This can be relative to the base. - """ - if not item.startswith(self.prefix): - item = os.path.join(self.base, item) - self.files.add(os.path.normpath(item)) - - def add_many(self, items): - """ - Add a list of files to the manifest. - - :param items: The pathnames to add. These can be relative to the base. - """ - for item in items: - self.add(item) - - def sorted(self, wantdirs=False): - """ - Return sorted files in directory order - """ - - def add_dir(dirs, d): - dirs.add(d) - logger.debug('add_dir added %s', d) - if d != self.base: - parent, _ = os.path.split(d) - assert parent not in ('', '/') - add_dir(dirs, parent) - - result = set(self.files) # make a copy! - if wantdirs: - dirs = set() - for f in result: - add_dir(dirs, os.path.dirname(f)) - result |= dirs - return [os.path.join(*path_tuple) for path_tuple in - sorted(os.path.split(path) for path in result)] - - def clear(self): - """Clear all collected files.""" - self.files = set() - self.allfiles = [] - - def process_directive(self, directive): - """ - Process a directive which either adds some files from ``allfiles`` to - ``files``, or removes some files from ``files``. - - :param directive: The directive to process. This should be in a format - compatible with distutils ``MANIFEST.in`` files: - - http://docs.python.org/distutils/sourcedist.html#commands - """ - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dirpattern). - action, patterns, thedir, dirpattern = self._parse_directive(directive) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - if action == 'include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=True): - logger.warning('no files found matching %r', pattern) - - elif action == 'exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=True) - #if not found: - # logger.warning('no previously-included files ' - # 'found matching %r', pattern) - - elif action == 'global-include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=False): - logger.warning('no files found matching %r ' - 'anywhere in distribution', pattern) - - elif action == 'global-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=False) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found anywhere in ' - # 'distribution', pattern) - - elif action == 'recursive-include': - for pattern in patterns: - if not self._include_pattern(pattern, prefix=thedir): - logger.warning('no files found matching %r ' - 'under directory %r', pattern, thedir) - - elif action == 'recursive-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, prefix=thedir) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found under directory %r', - # pattern, thedir) - - elif action == 'graft': - if not self._include_pattern(None, prefix=dirpattern): - logger.warning('no directories found matching %r', - dirpattern) - - elif action == 'prune': - if not self._exclude_pattern(None, prefix=dirpattern): - logger.warning('no previously-included directories found ' - 'matching %r', dirpattern) - else: # pragma: no cover - # This should never happen, as it should be caught in - # _parse_template_line - raise DistlibException( - 'invalid action %r' % action) - - # - # Private API - # - - def _parse_directive(self, directive): - """ - Validate a directive. - :param directive: The directive to validate. - :return: A tuple of action, patterns, thedir, dir_patterns - """ - words = directive.split() - if len(words) == 1 and words[0] not in ('include', 'exclude', - 'global-include', - 'global-exclude', - 'recursive-include', - 'recursive-exclude', - 'graft', 'prune'): - # no action given, let's use the default 'include' - words.insert(0, 'include') - - action = words[0] - patterns = thedir = dir_pattern = None - - if action in ('include', 'exclude', - 'global-include', 'global-exclude'): - if len(words) < 2: - raise DistlibException( - '%r expects <pattern1> <pattern2> ...' % action) - - patterns = [convert_path(word) for word in words[1:]] - - elif action in ('recursive-include', 'recursive-exclude'): - if len(words) < 3: - raise DistlibException( - '%r expects <dir> <pattern1> <pattern2> ...' % action) - - thedir = convert_path(words[1]) - patterns = [convert_path(word) for word in words[2:]] - - elif action in ('graft', 'prune'): - if len(words) != 2: - raise DistlibException( - '%r expects a single <dir_pattern>' % action) - - dir_pattern = convert_path(words[1]) - - else: - raise DistlibException('unknown action %r' % action) - - return action, patterns, thedir, dir_pattern - - def _include_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Select strings (presumably filenames) from 'self.files' that - match 'pattern', a Unix-style wildcard (glob) pattern. - - Patterns are not quite the same as implemented by the 'fnmatch' - module: '*' and '?' match non-special characters, where "special" - is platform-dependent: slash on Unix; colon, slash, and backslash on - DOS/Windows; and colon on Mac OS. - - If 'anchor' is true (the default), then the pattern match is more - stringent: "*.py" will match "foo.py" but not "foo/bar.py". If - 'anchor' is false, both of these will match. - - If 'prefix' is supplied, then only filenames starting with 'prefix' - (itself a pattern) and ending with 'pattern', with anything in between - them, will match. 'anchor' is ignored in this case. - - If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and - 'pattern' is assumed to be either a string containing a regex or a - regex object -- no translation is done, the regex is just compiled - and used as-is. - - Selected strings will be added to self.files. - - Return True if files are found. - """ - # XXX docstring lying about what the special chars are? - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - - # delayed loading of allfiles list - if self.allfiles is None: - self.findall() - - for name in self.allfiles: - if pattern_re.search(name): - self.files.add(name) - found = True - return found - - def _exclude_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Remove strings (presumably filenames) from 'files' that match - 'pattern'. - - Other parameters are the same as for 'include_pattern()', above. - The list 'self.files' is modified in place. Return True if files are - found. - - This API is public to allow e.g. exclusion of SCM subdirs, e.g. when - packaging source distributions - """ - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - for f in list(self.files): - if pattern_re.search(f): - self.files.remove(f) - found = True - return found - - def _translate_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Translate a shell-like wildcard pattern to a compiled regular - expression. - - Return the compiled regex. If 'is_regex' true, - then 'pattern' is directly compiled to a regex (if it's a string) - or just returned as-is (assumes it's a regex object). - """ - if is_regex: - if isinstance(pattern, str): - return re.compile(pattern) - else: - return pattern - - if _PYTHON_VERSION > (3, 2): - # ditch start and end characters - start, _, end = self._glob_to_re('_').partition('_') - - if pattern: - pattern_re = self._glob_to_re(pattern) - if _PYTHON_VERSION > (3, 2): - assert pattern_re.startswith(start) and pattern_re.endswith(end) - else: - pattern_re = '' - - base = re.escape(os.path.join(self.base, '')) - if prefix is not None: - # ditch end of pattern character - if _PYTHON_VERSION <= (3, 2): - empty_pattern = self._glob_to_re('') - prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] - else: - prefix_re = self._glob_to_re(prefix) - assert prefix_re.startswith(start) and prefix_re.endswith(end) - prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] - sep = os.sep - if os.sep == '\\': - sep = r'\\' - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + sep.join((prefix_re, - '.*' + pattern_re)) - else: - pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] - pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, - pattern_re, end) - else: # no prefix -- respect anchor flag - if anchor: - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + pattern_re - else: - pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) - - return re.compile(pattern_re) - - def _glob_to_re(self, pattern): - """Translate a shell-like glob pattern to a regular expression. - - Return a string containing the regex. Differs from - 'fnmatch.translate()' in that '*' does not match "special characters" - (which are platform-specific). - """ - pattern_re = fnmatch.translate(pattern) - - # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which - # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, - # and by extension they shouldn't match such "special characters" under - # any OS. So change all non-escaped dots in the RE to match any - # character except the special characters (currently: just os.sep). - sep = os.sep - if os.sep == '\\': - # we're using a regex to manipulate a regex, so we need - # to escape the backslash twice - sep = r'\\\\' - escaped = r'\1[^%s]' % sep - pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re) - return pattern_re diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/markers.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/markers.py deleted file mode 100644 index ee1f3e2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/markers.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2017 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -""" -Parser for the environment markers micro-language defined in PEP 508. -""" - -# Note: In PEP 345, the micro-language was Python compatible, so the ast -# module could be used to parse it. However, PEP 508 introduced operators such -# as ~= and === which aren't in Python, necessitating a different approach. - -import os -import sys -import platform -import re - -from .compat import python_implementation, urlparse, string_types -from .util import in_venv, parse_marker - -__all__ = ['interpret'] - -def _is_literal(o): - if not isinstance(o, string_types) or not o: - return False - return o[0] in '\'"' - -class Evaluator(object): - """ - This class is used to evaluate marker expessions. - """ - - operations = { - '==': lambda x, y: x == y, - '===': lambda x, y: x == y, - '~=': lambda x, y: x == y or x > y, - '!=': lambda x, y: x != y, - '<': lambda x, y: x < y, - '<=': lambda x, y: x == y or x < y, - '>': lambda x, y: x > y, - '>=': lambda x, y: x == y or x > y, - 'and': lambda x, y: x and y, - 'or': lambda x, y: x or y, - 'in': lambda x, y: x in y, - 'not in': lambda x, y: x not in y, - } - - def evaluate(self, expr, context): - """ - Evaluate a marker expression returned by the :func:`parse_requirement` - function in the specified context. - """ - if isinstance(expr, string_types): - if expr[0] in '\'"': - result = expr[1:-1] - else: - if expr not in context: - raise SyntaxError('unknown variable: %s' % expr) - result = context[expr] - else: - assert isinstance(expr, dict) - op = expr['op'] - if op not in self.operations: - raise NotImplementedError('op not implemented: %s' % op) - elhs = expr['lhs'] - erhs = expr['rhs'] - if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): - raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) - - lhs = self.evaluate(elhs, context) - rhs = self.evaluate(erhs, context) - result = self.operations[op](lhs, rhs) - return result - -def default_context(): - def format_full_version(info): - version = '%s.%s.%s' % (info.major, info.minor, info.micro) - kind = info.releaselevel - if kind != 'final': - version += kind[0] + str(info.serial) - return version - - if hasattr(sys, 'implementation'): - implementation_version = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - else: - implementation_version = '0' - implementation_name = '' - - result = { - 'implementation_name': implementation_name, - 'implementation_version': implementation_version, - 'os_name': os.name, - 'platform_machine': platform.machine(), - 'platform_python_implementation': platform.python_implementation(), - 'platform_release': platform.release(), - 'platform_system': platform.system(), - 'platform_version': platform.version(), - 'platform_in_venv': str(in_venv()), - 'python_full_version': platform.python_version(), - 'python_version': platform.python_version()[:3], - 'sys_platform': sys.platform, - } - return result - -DEFAULT_CONTEXT = default_context() -del default_context - -evaluator = Evaluator() - -def interpret(marker, execution_context=None): - """ - Interpret a marker and return a result depending on environment. - - :param marker: The marker to interpret. - :type marker: str - :param execution_context: The context used for name lookup. - :type execution_context: mapping - """ - try: - expr, rest = parse_marker(marker) - except Exception as e: - raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e)) - if rest and rest[0] != '#': - raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest)) - context = dict(DEFAULT_CONTEXT) - if execution_context: - context.update(execution_context) - return evaluator.evaluate(expr, context) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/metadata.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/metadata.py deleted file mode 100644 index 77eed7f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/metadata.py +++ /dev/null @@ -1,1094 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Implementation of the Metadata for Python packages PEPs. - -Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). -""" -from __future__ import unicode_literals - -import codecs -from email import message_from_file -import json -import logging -import re - - -from . import DistlibException, __version__ -from .compat import StringIO, string_types, text_type -from .markers import interpret -from .util import extract_by_key, get_extras -from .version import get_scheme, PEP440_VERSION_RE - -logger = logging.getLogger(__name__) - - -class MetadataMissingError(DistlibException): - """A required metadata is missing""" - - -class MetadataConflictError(DistlibException): - """Attempt to read or write metadata fields that are conflictual.""" - - -class MetadataUnrecognizedVersionError(DistlibException): - """Unknown metadata version number.""" - - -class MetadataInvalidError(DistlibException): - """A metadata value is invalid""" - -# public API of this module -__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] - -# Encoding used for the PKG-INFO files -PKG_INFO_ENCODING = 'utf-8' - -# preferred version. Hopefully will be changed -# to 1.2 once PEP 345 is supported everywhere -PKG_INFO_PREFERRED_VERSION = '1.1' - -_LINE_PREFIX_1_2 = re.compile('\n \\|') -_LINE_PREFIX_PRE_1_2 = re.compile('\n ') -_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'License') - -_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Supported-Platform', 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'License', 'Classifier', 'Download-URL', 'Obsoletes', - 'Provides', 'Requires') - -_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', - 'Download-URL') - -_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Supported-Platform', 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'Maintainer', 'Maintainer-email', 'License', - 'Classifier', 'Download-URL', 'Obsoletes-Dist', - 'Project-URL', 'Provides-Dist', 'Requires-Dist', - 'Requires-Python', 'Requires-External') - -_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', - 'Obsoletes-Dist', 'Requires-External', 'Maintainer', - 'Maintainer-email', 'Project-URL') - -_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Supported-Platform', 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'Maintainer', 'Maintainer-email', 'License', - 'Classifier', 'Download-URL', 'Obsoletes-Dist', - 'Project-URL', 'Provides-Dist', 'Requires-Dist', - 'Requires-Python', 'Requires-External', 'Private-Version', - 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', - 'Provides-Extra') - -_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', - 'Setup-Requires-Dist', 'Extension') - -# See issue #106: Sometimes 'Requires' occurs wrongly in the metadata. Include -# it in the tuple literal below to allow it (for now) -_566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires') - -_566_MARKERS = ('Description-Content-Type',) - -_ALL_FIELDS = set() -_ALL_FIELDS.update(_241_FIELDS) -_ALL_FIELDS.update(_314_FIELDS) -_ALL_FIELDS.update(_345_FIELDS) -_ALL_FIELDS.update(_426_FIELDS) -_ALL_FIELDS.update(_566_FIELDS) - -EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') - - -def _version2fieldlist(version): - if version == '1.0': - return _241_FIELDS - elif version == '1.1': - return _314_FIELDS - elif version == '1.2': - return _345_FIELDS - elif version in ('1.3', '2.1'): - return _345_FIELDS + _566_FIELDS - elif version == '2.0': - return _426_FIELDS - raise MetadataUnrecognizedVersionError(version) - - -def _best_version(fields): - """Detect the best version depending on the fields used.""" - def _has_marker(keys, markers): - for marker in markers: - if marker in keys: - return True - return False - - keys = [] - for key, value in fields.items(): - if value in ([], 'UNKNOWN', None): - continue - keys.append(key) - - possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1'] - - # first let's try to see if a field is not part of one of the version - for key in keys: - if key not in _241_FIELDS and '1.0' in possible_versions: - possible_versions.remove('1.0') - logger.debug('Removed 1.0 due to %s', key) - if key not in _314_FIELDS and '1.1' in possible_versions: - possible_versions.remove('1.1') - logger.debug('Removed 1.1 due to %s', key) - if key not in _345_FIELDS and '1.2' in possible_versions: - possible_versions.remove('1.2') - logger.debug('Removed 1.2 due to %s', key) - if key not in _566_FIELDS and '1.3' in possible_versions: - possible_versions.remove('1.3') - logger.debug('Removed 1.3 due to %s', key) - if key not in _566_FIELDS and '2.1' in possible_versions: - if key != 'Description': # In 2.1, description allowed after headers - possible_versions.remove('2.1') - logger.debug('Removed 2.1 due to %s', key) - if key not in _426_FIELDS and '2.0' in possible_versions: - possible_versions.remove('2.0') - logger.debug('Removed 2.0 due to %s', key) - - # possible_version contains qualified versions - if len(possible_versions) == 1: - return possible_versions[0] # found ! - elif len(possible_versions) == 0: - logger.debug('Out of options - unknown metadata set: %s', fields) - raise MetadataConflictError('Unknown metadata set') - - # let's see if one unique marker is found - is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) - is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) - is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) - is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) - if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1: - raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields') - - # we have the choice, 1.0, or 1.2, or 2.0 - # - 1.0 has a broken Summary field but works with all tools - # - 1.1 is to avoid - # - 1.2 fixes Summary but has little adoption - # - 2.0 adds more features and is very new - if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0: - # we couldn't find any specific marker - if PKG_INFO_PREFERRED_VERSION in possible_versions: - return PKG_INFO_PREFERRED_VERSION - if is_1_1: - return '1.1' - if is_1_2: - return '1.2' - if is_2_1: - return '2.1' - - return '2.0' - -_ATTR2FIELD = { - 'metadata_version': 'Metadata-Version', - 'name': 'Name', - 'version': 'Version', - 'platform': 'Platform', - 'supported_platform': 'Supported-Platform', - 'summary': 'Summary', - 'description': 'Description', - 'keywords': 'Keywords', - 'home_page': 'Home-page', - 'author': 'Author', - 'author_email': 'Author-email', - 'maintainer': 'Maintainer', - 'maintainer_email': 'Maintainer-email', - 'license': 'License', - 'classifier': 'Classifier', - 'download_url': 'Download-URL', - 'obsoletes_dist': 'Obsoletes-Dist', - 'provides_dist': 'Provides-Dist', - 'requires_dist': 'Requires-Dist', - 'setup_requires_dist': 'Setup-Requires-Dist', - 'requires_python': 'Requires-Python', - 'requires_external': 'Requires-External', - 'requires': 'Requires', - 'provides': 'Provides', - 'obsoletes': 'Obsoletes', - 'project_url': 'Project-URL', - 'private_version': 'Private-Version', - 'obsoleted_by': 'Obsoleted-By', - 'extension': 'Extension', - 'provides_extra': 'Provides-Extra', -} - -_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') -_VERSIONS_FIELDS = ('Requires-Python',) -_VERSION_FIELDS = ('Version',) -_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', - 'Requires', 'Provides', 'Obsoletes-Dist', - 'Provides-Dist', 'Requires-Dist', 'Requires-External', - 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', - 'Provides-Extra', 'Extension') -_LISTTUPLEFIELDS = ('Project-URL',) - -_ELEMENTSFIELD = ('Keywords',) - -_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') - -_MISSING = object() - -_FILESAFE = re.compile('[^A-Za-z0-9.]+') - - -def _get_name_and_version(name, version, for_filename=False): - """Return the distribution name with version. - - If for_filename is true, return a filename-escaped form.""" - if for_filename: - # For both name and version any runs of non-alphanumeric or '.' - # characters are replaced with a single '-'. Additionally any - # spaces in the version string become '.' - name = _FILESAFE.sub('-', name) - version = _FILESAFE.sub('-', version.replace(' ', '.')) - return '%s-%s' % (name, version) - - -class LegacyMetadata(object): - """The legacy metadata of a release. - - Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can - instantiate the class with one of these arguments (or none): - - *path*, the path to a metadata file - - *fileobj* give a file-like object with metadata as content - - *mapping* is a dict-like object - - *scheme* is a version scheme name - """ - # TODO document the mapping API and UNKNOWN default key - - def __init__(self, path=None, fileobj=None, mapping=None, - scheme='default'): - if [path, fileobj, mapping].count(None) < 2: - raise TypeError('path, fileobj and mapping are exclusive') - self._fields = {} - self.requires_files = [] - self._dependencies = None - self.scheme = scheme - if path is not None: - self.read(path) - elif fileobj is not None: - self.read_file(fileobj) - elif mapping is not None: - self.update(mapping) - self.set_metadata_version() - - def set_metadata_version(self): - self._fields['Metadata-Version'] = _best_version(self._fields) - - def _write_field(self, fileobj, name, value): - fileobj.write('%s: %s\n' % (name, value)) - - def __getitem__(self, name): - return self.get(name) - - def __setitem__(self, name, value): - return self.set(name, value) - - def __delitem__(self, name): - field_name = self._convert_name(name) - try: - del self._fields[field_name] - except KeyError: - raise KeyError(name) - - def __contains__(self, name): - return (name in self._fields or - self._convert_name(name) in self._fields) - - def _convert_name(self, name): - if name in _ALL_FIELDS: - return name - name = name.replace('-', '_').lower() - return _ATTR2FIELD.get(name, name) - - def _default_value(self, name): - if name in _LISTFIELDS or name in _ELEMENTSFIELD: - return [] - return 'UNKNOWN' - - def _remove_line_prefix(self, value): - if self.metadata_version in ('1.0', '1.1'): - return _LINE_PREFIX_PRE_1_2.sub('\n', value) - else: - return _LINE_PREFIX_1_2.sub('\n', value) - - def __getattr__(self, name): - if name in _ATTR2FIELD: - return self[name] - raise AttributeError(name) - - # - # Public API - # - -# dependencies = property(_get_dependencies, _set_dependencies) - - def get_fullname(self, filesafe=False): - """Return the distribution name with version. - - If filesafe is true, return a filename-escaped form.""" - return _get_name_and_version(self['Name'], self['Version'], filesafe) - - def is_field(self, name): - """return True if name is a valid metadata key""" - name = self._convert_name(name) - return name in _ALL_FIELDS - - def is_multi_field(self, name): - name = self._convert_name(name) - return name in _LISTFIELDS - - def read(self, filepath): - """Read the metadata values from a file path.""" - fp = codecs.open(filepath, 'r', encoding='utf-8') - try: - self.read_file(fp) - finally: - fp.close() - - def read_file(self, fileob): - """Read the metadata values from a file object.""" - msg = message_from_file(fileob) - self._fields['Metadata-Version'] = msg['metadata-version'] - - # When reading, get all the fields we can - for field in _ALL_FIELDS: - if field not in msg: - continue - if field in _LISTFIELDS: - # we can have multiple lines - values = msg.get_all(field) - if field in _LISTTUPLEFIELDS and values is not None: - values = [tuple(value.split(',')) for value in values] - self.set(field, values) - else: - # single line - value = msg[field] - if value is not None and value != 'UNKNOWN': - self.set(field, value) - # logger.debug('Attempting to set metadata for %s', self) - # self.set_metadata_version() - - def write(self, filepath, skip_unknown=False): - """Write the metadata fields to filepath.""" - fp = codecs.open(filepath, 'w', encoding='utf-8') - try: - self.write_file(fp, skip_unknown) - finally: - fp.close() - - def write_file(self, fileobject, skip_unknown=False): - """Write the PKG-INFO format data to a file object.""" - self.set_metadata_version() - - for field in _version2fieldlist(self['Metadata-Version']): - values = self.get(field) - if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): - continue - if field in _ELEMENTSFIELD: - self._write_field(fileobject, field, ','.join(values)) - continue - if field not in _LISTFIELDS: - if field == 'Description': - if self.metadata_version in ('1.0', '1.1'): - values = values.replace('\n', '\n ') - else: - values = values.replace('\n', '\n |') - values = [values] - - if field in _LISTTUPLEFIELDS: - values = [','.join(value) for value in values] - - for value in values: - self._write_field(fileobject, field, value) - - def update(self, other=None, **kwargs): - """Set metadata values from the given iterable `other` and kwargs. - - Behavior is like `dict.update`: If `other` has a ``keys`` method, - they are looped over and ``self[key]`` is assigned ``other[key]``. - Else, ``other`` is an iterable of ``(key, value)`` iterables. - - Keys that don't match a metadata field or that have an empty value are - dropped. - """ - def _set(key, value): - if key in _ATTR2FIELD and value: - self.set(self._convert_name(key), value) - - if not other: - # other is None or empty container - pass - elif hasattr(other, 'keys'): - for k in other.keys(): - _set(k, other[k]) - else: - for k, v in other: - _set(k, v) - - if kwargs: - for k, v in kwargs.items(): - _set(k, v) - - def set(self, name, value): - """Control then set a metadata field.""" - name = self._convert_name(name) - - if ((name in _ELEMENTSFIELD or name == 'Platform') and - not isinstance(value, (list, tuple))): - if isinstance(value, string_types): - value = [v.strip() for v in value.split(',')] - else: - value = [] - elif (name in _LISTFIELDS and - not isinstance(value, (list, tuple))): - if isinstance(value, string_types): - value = [value] - else: - value = [] - - if logger.isEnabledFor(logging.WARNING): - project_name = self['Name'] - - scheme = get_scheme(self.scheme) - if name in _PREDICATE_FIELDS and value is not None: - for v in value: - # check that the values are valid - if not scheme.is_valid_matcher(v.split(';')[0]): - logger.warning( - "'%s': '%s' is not valid (field '%s')", - project_name, v, name) - # FIXME this rejects UNKNOWN, is that right? - elif name in _VERSIONS_FIELDS and value is not None: - if not scheme.is_valid_constraint_list(value): - logger.warning("'%s': '%s' is not a valid version (field '%s')", - project_name, value, name) - elif name in _VERSION_FIELDS and value is not None: - if not scheme.is_valid_version(value): - logger.warning("'%s': '%s' is not a valid version (field '%s')", - project_name, value, name) - - if name in _UNICODEFIELDS: - if name == 'Description': - value = self._remove_line_prefix(value) - - self._fields[name] = value - - def get(self, name, default=_MISSING): - """Get a metadata field.""" - name = self._convert_name(name) - if name not in self._fields: - if default is _MISSING: - default = self._default_value(name) - return default - if name in _UNICODEFIELDS: - value = self._fields[name] - return value - elif name in _LISTFIELDS: - value = self._fields[name] - if value is None: - return [] - res = [] - for val in value: - if name not in _LISTTUPLEFIELDS: - res.append(val) - else: - # That's for Project-URL - res.append((val[0], val[1])) - return res - - elif name in _ELEMENTSFIELD: - value = self._fields[name] - if isinstance(value, string_types): - return value.split(',') - return self._fields[name] - - def check(self, strict=False): - """Check if the metadata is compliant. If strict is True then raise if - no Name or Version are provided""" - self.set_metadata_version() - - # XXX should check the versions (if the file was loaded) - missing, warnings = [], [] - - for attr in ('Name', 'Version'): # required by PEP 345 - if attr not in self: - missing.append(attr) - - if strict and missing != []: - msg = 'missing required metadata: %s' % ', '.join(missing) - raise MetadataMissingError(msg) - - for attr in ('Home-page', 'Author'): - if attr not in self: - missing.append(attr) - - # checking metadata 1.2 (XXX needs to check 1.1, 1.0) - if self['Metadata-Version'] != '1.2': - return missing, warnings - - scheme = get_scheme(self.scheme) - - def are_valid_constraints(value): - for v in value: - if not scheme.is_valid_matcher(v.split(';')[0]): - return False - return True - - for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), - (_VERSIONS_FIELDS, - scheme.is_valid_constraint_list), - (_VERSION_FIELDS, - scheme.is_valid_version)): - for field in fields: - value = self.get(field, None) - if value is not None and not controller(value): - warnings.append("Wrong value for '%s': %s" % (field, value)) - - return missing, warnings - - def todict(self, skip_missing=False): - """Return fields as a dict. - - Field names will be converted to use the underscore-lowercase style - instead of hyphen-mixed case (i.e. home_page instead of Home-page). - """ - self.set_metadata_version() - - mapping_1_0 = ( - ('metadata_version', 'Metadata-Version'), - ('name', 'Name'), - ('version', 'Version'), - ('summary', 'Summary'), - ('home_page', 'Home-page'), - ('author', 'Author'), - ('author_email', 'Author-email'), - ('license', 'License'), - ('description', 'Description'), - ('keywords', 'Keywords'), - ('platform', 'Platform'), - ('classifiers', 'Classifier'), - ('download_url', 'Download-URL'), - ) - - data = {} - for key, field_name in mapping_1_0: - if not skip_missing or field_name in self._fields: - data[key] = self[field_name] - - if self['Metadata-Version'] == '1.2': - mapping_1_2 = ( - ('requires_dist', 'Requires-Dist'), - ('requires_python', 'Requires-Python'), - ('requires_external', 'Requires-External'), - ('provides_dist', 'Provides-Dist'), - ('obsoletes_dist', 'Obsoletes-Dist'), - ('project_url', 'Project-URL'), - ('maintainer', 'Maintainer'), - ('maintainer_email', 'Maintainer-email'), - ) - for key, field_name in mapping_1_2: - if not skip_missing or field_name in self._fields: - if key != 'project_url': - data[key] = self[field_name] - else: - data[key] = [','.join(u) for u in self[field_name]] - - elif self['Metadata-Version'] == '1.1': - mapping_1_1 = ( - ('provides', 'Provides'), - ('requires', 'Requires'), - ('obsoletes', 'Obsoletes'), - ) - for key, field_name in mapping_1_1: - if not skip_missing or field_name in self._fields: - data[key] = self[field_name] - - return data - - def add_requirements(self, requirements): - if self['Metadata-Version'] == '1.1': - # we can't have 1.1 metadata *and* Setuptools requires - for field in ('Obsoletes', 'Requires', 'Provides'): - if field in self: - del self[field] - self['Requires-Dist'] += requirements - - # Mapping API - # TODO could add iter* variants - - def keys(self): - return list(_version2fieldlist(self['Metadata-Version'])) - - def __iter__(self): - for key in self.keys(): - yield key - - def values(self): - return [self[key] for key in self.keys()] - - def items(self): - return [(key, self[key]) for key in self.keys()] - - def __repr__(self): - return '<%s %s %s>' % (self.__class__.__name__, self.name, - self.version) - - -METADATA_FILENAME = 'pydist.json' -WHEEL_METADATA_FILENAME = 'metadata.json' -LEGACY_METADATA_FILENAME = 'METADATA' - - -class Metadata(object): - """ - The metadata of a release. This implementation uses 2.0 (JSON) - metadata where possible. If not possible, it wraps a LegacyMetadata - instance which handles the key-value metadata format. - """ - - METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$') - - NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) - - VERSION_MATCHER = PEP440_VERSION_RE - - SUMMARY_MATCHER = re.compile('.{1,2047}') - - METADATA_VERSION = '2.0' - - GENERATOR = 'distlib (%s)' % __version__ - - MANDATORY_KEYS = { - 'name': (), - 'version': (), - 'summary': ('legacy',), - } - - INDEX_KEYS = ('name version license summary description author ' - 'author_email keywords platform home_page classifiers ' - 'download_url') - - DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' - 'dev_requires provides meta_requires obsoleted_by ' - 'supports_environments') - - SYNTAX_VALIDATORS = { - 'metadata_version': (METADATA_VERSION_MATCHER, ()), - 'name': (NAME_MATCHER, ('legacy',)), - 'version': (VERSION_MATCHER, ('legacy',)), - 'summary': (SUMMARY_MATCHER, ('legacy',)), - } - - __slots__ = ('_legacy', '_data', 'scheme') - - def __init__(self, path=None, fileobj=None, mapping=None, - scheme='default'): - if [path, fileobj, mapping].count(None) < 2: - raise TypeError('path, fileobj and mapping are exclusive') - self._legacy = None - self._data = None - self.scheme = scheme - #import pdb; pdb.set_trace() - if mapping is not None: - try: - self._validate_mapping(mapping, scheme) - self._data = mapping - except MetadataUnrecognizedVersionError: - self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) - self.validate() - else: - data = None - if path: - with open(path, 'rb') as f: - data = f.read() - elif fileobj: - data = fileobj.read() - if data is None: - # Initialised with no args - to be added - self._data = { - 'metadata_version': self.METADATA_VERSION, - 'generator': self.GENERATOR, - } - else: - if not isinstance(data, text_type): - data = data.decode('utf-8') - try: - self._data = json.loads(data) - self._validate_mapping(self._data, scheme) - except ValueError: - # Note: MetadataUnrecognizedVersionError does not - # inherit from ValueError (it's a DistlibException, - # which should not inherit from ValueError). - # The ValueError comes from the json.load - if that - # succeeds and we get a validation error, we want - # that to propagate - self._legacy = LegacyMetadata(fileobj=StringIO(data), - scheme=scheme) - self.validate() - - common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) - - none_list = (None, list) - none_dict = (None, dict) - - mapped_keys = { - 'run_requires': ('Requires-Dist', list), - 'build_requires': ('Setup-Requires-Dist', list), - 'dev_requires': none_list, - 'test_requires': none_list, - 'meta_requires': none_list, - 'extras': ('Provides-Extra', list), - 'modules': none_list, - 'namespaces': none_list, - 'exports': none_dict, - 'commands': none_dict, - 'classifiers': ('Classifier', list), - 'source_url': ('Download-URL', None), - 'metadata_version': ('Metadata-Version', None), - } - - del none_list, none_dict - - def __getattribute__(self, key): - common = object.__getattribute__(self, 'common_keys') - mapped = object.__getattribute__(self, 'mapped_keys') - if key in mapped: - lk, maker = mapped[key] - if self._legacy: - if lk is None: - result = None if maker is None else maker() - else: - result = self._legacy.get(lk) - else: - value = None if maker is None else maker() - if key not in ('commands', 'exports', 'modules', 'namespaces', - 'classifiers'): - result = self._data.get(key, value) - else: - # special cases for PEP 459 - sentinel = object() - result = sentinel - d = self._data.get('extensions') - if d: - if key == 'commands': - result = d.get('python.commands', value) - elif key == 'classifiers': - d = d.get('python.details') - if d: - result = d.get(key, value) - else: - d = d.get('python.exports') - if not d: - d = self._data.get('python.exports') - if d: - result = d.get(key, value) - if result is sentinel: - result = value - elif key not in common: - result = object.__getattribute__(self, key) - elif self._legacy: - result = self._legacy.get(key) - else: - result = self._data.get(key) - return result - - def _validate_value(self, key, value, scheme=None): - if key in self.SYNTAX_VALIDATORS: - pattern, exclusions = self.SYNTAX_VALIDATORS[key] - if (scheme or self.scheme) not in exclusions: - m = pattern.match(value) - if not m: - raise MetadataInvalidError("'%s' is an invalid value for " - "the '%s' property" % (value, - key)) - - def __setattr__(self, key, value): - self._validate_value(key, value) - common = object.__getattribute__(self, 'common_keys') - mapped = object.__getattribute__(self, 'mapped_keys') - if key in mapped: - lk, _ = mapped[key] - if self._legacy: - if lk is None: - raise NotImplementedError - self._legacy[lk] = value - elif key not in ('commands', 'exports', 'modules', 'namespaces', - 'classifiers'): - self._data[key] = value - else: - # special cases for PEP 459 - d = self._data.setdefault('extensions', {}) - if key == 'commands': - d['python.commands'] = value - elif key == 'classifiers': - d = d.setdefault('python.details', {}) - d[key] = value - else: - d = d.setdefault('python.exports', {}) - d[key] = value - elif key not in common: - object.__setattr__(self, key, value) - else: - if key == 'keywords': - if isinstance(value, string_types): - value = value.strip() - if value: - value = value.split() - else: - value = [] - if self._legacy: - self._legacy[key] = value - else: - self._data[key] = value - - @property - def name_and_version(self): - return _get_name_and_version(self.name, self.version, True) - - @property - def provides(self): - if self._legacy: - result = self._legacy['Provides-Dist'] - else: - result = self._data.setdefault('provides', []) - s = '%s (%s)' % (self.name, self.version) - if s not in result: - result.append(s) - return result - - @provides.setter - def provides(self, value): - if self._legacy: - self._legacy['Provides-Dist'] = value - else: - self._data['provides'] = value - - def get_requirements(self, reqts, extras=None, env=None): - """ - Base method to get dependencies, given a set of extras - to satisfy and an optional environment context. - :param reqts: A list of sometimes-wanted dependencies, - perhaps dependent on extras and environment. - :param extras: A list of optional components being requested. - :param env: An optional environment for marker evaluation. - """ - if self._legacy: - result = reqts - else: - result = [] - extras = get_extras(extras or [], self.extras) - for d in reqts: - if 'extra' not in d and 'environment' not in d: - # unconditional - include = True - else: - if 'extra' not in d: - # Not extra-dependent - only environment-dependent - include = True - else: - include = d.get('extra') in extras - if include: - # Not excluded because of extras, check environment - marker = d.get('environment') - if marker: - include = interpret(marker, env) - if include: - result.extend(d['requires']) - for key in ('build', 'dev', 'test'): - e = ':%s:' % key - if e in extras: - extras.remove(e) - # A recursive call, but it should terminate since 'test' - # has been removed from the extras - reqts = self._data.get('%s_requires' % key, []) - result.extend(self.get_requirements(reqts, extras=extras, - env=env)) - return result - - @property - def dictionary(self): - if self._legacy: - return self._from_legacy() - return self._data - - @property - def dependencies(self): - if self._legacy: - raise NotImplementedError - else: - return extract_by_key(self._data, self.DEPENDENCY_KEYS) - - @dependencies.setter - def dependencies(self, value): - if self._legacy: - raise NotImplementedError - else: - self._data.update(value) - - def _validate_mapping(self, mapping, scheme): - if mapping.get('metadata_version') != self.METADATA_VERSION: - raise MetadataUnrecognizedVersionError() - missing = [] - for key, exclusions in self.MANDATORY_KEYS.items(): - if key not in mapping: - if scheme not in exclusions: - missing.append(key) - if missing: - msg = 'Missing metadata items: %s' % ', '.join(missing) - raise MetadataMissingError(msg) - for k, v in mapping.items(): - self._validate_value(k, v, scheme) - - def validate(self): - if self._legacy: - missing, warnings = self._legacy.check(True) - if missing or warnings: - logger.warning('Metadata: missing: %s, warnings: %s', - missing, warnings) - else: - self._validate_mapping(self._data, self.scheme) - - def todict(self): - if self._legacy: - return self._legacy.todict(True) - else: - result = extract_by_key(self._data, self.INDEX_KEYS) - return result - - def _from_legacy(self): - assert self._legacy and not self._data - result = { - 'metadata_version': self.METADATA_VERSION, - 'generator': self.GENERATOR, - } - lmd = self._legacy.todict(True) # skip missing ones - for k in ('name', 'version', 'license', 'summary', 'description', - 'classifier'): - if k in lmd: - if k == 'classifier': - nk = 'classifiers' - else: - nk = k - result[nk] = lmd[k] - kw = lmd.get('Keywords', []) - if kw == ['']: - kw = [] - result['keywords'] = kw - keys = (('requires_dist', 'run_requires'), - ('setup_requires_dist', 'build_requires')) - for ok, nk in keys: - if ok in lmd and lmd[ok]: - result[nk] = [{'requires': lmd[ok]}] - result['provides'] = self.provides - author = {} - maintainer = {} - return result - - LEGACY_MAPPING = { - 'name': 'Name', - 'version': 'Version', - 'license': 'License', - 'summary': 'Summary', - 'description': 'Description', - 'classifiers': 'Classifier', - } - - def _to_legacy(self): - def process_entries(entries): - reqts = set() - for e in entries: - extra = e.get('extra') - env = e.get('environment') - rlist = e['requires'] - for r in rlist: - if not env and not extra: - reqts.add(r) - else: - marker = '' - if extra: - marker = 'extra == "%s"' % extra - if env: - if marker: - marker = '(%s) and %s' % (env, marker) - else: - marker = env - reqts.add(';'.join((r, marker))) - return reqts - - assert self._data and not self._legacy - result = LegacyMetadata() - nmd = self._data - for nk, ok in self.LEGACY_MAPPING.items(): - if nk in nmd: - result[ok] = nmd[nk] - r1 = process_entries(self.run_requires + self.meta_requires) - r2 = process_entries(self.build_requires + self.dev_requires) - if self.extras: - result['Provides-Extra'] = sorted(self.extras) - result['Requires-Dist'] = sorted(r1) - result['Setup-Requires-Dist'] = sorted(r2) - # TODO: other fields such as contacts - return result - - def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): - if [path, fileobj].count(None) != 1: - raise ValueError('Exactly one of path and fileobj is needed') - self.validate() - if legacy: - if self._legacy: - legacy_md = self._legacy - else: - legacy_md = self._to_legacy() - if path: - legacy_md.write(path, skip_unknown=skip_unknown) - else: - legacy_md.write_file(fileobj, skip_unknown=skip_unknown) - else: - if self._legacy: - d = self._from_legacy() - else: - d = self._data - if fileobj: - json.dump(d, fileobj, ensure_ascii=True, indent=2, - sort_keys=True) - else: - with codecs.open(path, 'w', 'utf-8') as f: - json.dump(d, f, ensure_ascii=True, indent=2, - sort_keys=True) - - def add_requirements(self, requirements): - if self._legacy: - self._legacy.add_requirements(requirements) - else: - run_requires = self._data.setdefault('run_requires', []) - always = None - for entry in run_requires: - if 'environment' not in entry and 'extra' not in entry: - always = entry - break - if always is None: - always = { 'requires': requirements } - run_requires.insert(0, always) - else: - rset = set(always['requires']) | set(requirements) - always['requires'] = sorted(rset) - - def __repr__(self): - name = self.name or '(no name)' - version = self.version or 'no version' - return '<%s %s %s (%s)>' % (self.__class__.__name__, - self.metadata_version, name, version) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/resources.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/resources.py deleted file mode 100644 index 1884016..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/resources.py +++ /dev/null @@ -1,355 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013-2017 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -from __future__ import unicode_literals - -import bisect -import io -import logging -import os -import pkgutil -import shutil -import sys -import types -import zipimport - -from . import DistlibException -from .util import cached_property, get_cache_base, path_to_cache_dir, Cache - -logger = logging.getLogger(__name__) - - -cache = None # created when needed - - -class ResourceCache(Cache): - def __init__(self, base=None): - if base is None: - # Use native string to avoid issues on 2.x: see Python #20140. - base = os.path.join(get_cache_base(), str('resource-cache')) - super(ResourceCache, self).__init__(base) - - def is_stale(self, resource, path): - """ - Is the cache stale for the given resource? - - :param resource: The :class:`Resource` being cached. - :param path: The path of the resource in the cache. - :return: True if the cache is stale. - """ - # Cache invalidation is a hard problem :-) - return True - - def get(self, resource): - """ - Get a resource into the cache, - - :param resource: A :class:`Resource` instance. - :return: The pathname of the resource in the cache. - """ - prefix, path = resource.finder.get_cache_info(resource) - if prefix is None: - result = path - else: - result = os.path.join(self.base, self.prefix_to_dir(prefix), path) - dirname = os.path.dirname(result) - if not os.path.isdir(dirname): - os.makedirs(dirname) - if not os.path.exists(result): - stale = True - else: - stale = self.is_stale(resource, path) - if stale: - # write the bytes of the resource to the cache location - with open(result, 'wb') as f: - f.write(resource.bytes) - return result - - -class ResourceBase(object): - def __init__(self, finder, name): - self.finder = finder - self.name = name - - -class Resource(ResourceBase): - """ - A class representing an in-package resource, such as a data file. This is - not normally instantiated by user code, but rather by a - :class:`ResourceFinder` which manages the resource. - """ - is_container = False # Backwards compatibility - - def as_stream(self): - """ - Get the resource as a stream. - - This is not a property to make it obvious that it returns a new stream - each time. - """ - return self.finder.get_stream(self) - - @cached_property - def file_path(self): - global cache - if cache is None: - cache = ResourceCache() - return cache.get(self) - - @cached_property - def bytes(self): - return self.finder.get_bytes(self) - - @cached_property - def size(self): - return self.finder.get_size(self) - - -class ResourceContainer(ResourceBase): - is_container = True # Backwards compatibility - - @cached_property - def resources(self): - return self.finder.get_resources(self) - - -class ResourceFinder(object): - """ - Resource finder for file system resources. - """ - - if sys.platform.startswith('java'): - skipped_extensions = ('.pyc', '.pyo', '.class') - else: - skipped_extensions = ('.pyc', '.pyo') - - def __init__(self, module): - self.module = module - self.loader = getattr(module, '__loader__', None) - self.base = os.path.dirname(getattr(module, '__file__', '')) - - def _adjust_path(self, path): - return os.path.realpath(path) - - def _make_path(self, resource_name): - # Issue #50: need to preserve type of path on Python 2.x - # like os.path._get_sep - if isinstance(resource_name, bytes): # should only happen on 2.x - sep = b'/' - else: - sep = '/' - parts = resource_name.split(sep) - parts.insert(0, self.base) - result = os.path.join(*parts) - return self._adjust_path(result) - - def _find(self, path): - return os.path.exists(path) - - def get_cache_info(self, resource): - return None, resource.path - - def find(self, resource_name): - path = self._make_path(resource_name) - if not self._find(path): - result = None - else: - if self._is_directory(path): - result = ResourceContainer(self, resource_name) - else: - result = Resource(self, resource_name) - result.path = path - return result - - def get_stream(self, resource): - return open(resource.path, 'rb') - - def get_bytes(self, resource): - with open(resource.path, 'rb') as f: - return f.read() - - def get_size(self, resource): - return os.path.getsize(resource.path) - - def get_resources(self, resource): - def allowed(f): - return (f != '__pycache__' and not - f.endswith(self.skipped_extensions)) - return set([f for f in os.listdir(resource.path) if allowed(f)]) - - def is_container(self, resource): - return self._is_directory(resource.path) - - _is_directory = staticmethod(os.path.isdir) - - def iterator(self, resource_name): - resource = self.find(resource_name) - if resource is not None: - todo = [resource] - while todo: - resource = todo.pop(0) - yield resource - if resource.is_container: - rname = resource.name - for name in resource.resources: - if not rname: - new_name = name - else: - new_name = '/'.join([rname, name]) - child = self.find(new_name) - if child.is_container: - todo.append(child) - else: - yield child - - -class ZipResourceFinder(ResourceFinder): - """ - Resource finder for resources in .zip files. - """ - def __init__(self, module): - super(ZipResourceFinder, self).__init__(module) - archive = self.loader.archive - self.prefix_len = 1 + len(archive) - # PyPy doesn't have a _files attr on zipimporter, and you can't set one - if hasattr(self.loader, '_files'): - self._files = self.loader._files - else: - self._files = zipimport._zip_directory_cache[archive] - self.index = sorted(self._files) - - def _adjust_path(self, path): - return path - - def _find(self, path): - path = path[self.prefix_len:] - if path in self._files: - result = True - else: - if path and path[-1] != os.sep: - path = path + os.sep - i = bisect.bisect(self.index, path) - try: - result = self.index[i].startswith(path) - except IndexError: - result = False - if not result: - logger.debug('_find failed: %r %r', path, self.loader.prefix) - else: - logger.debug('_find worked: %r %r', path, self.loader.prefix) - return result - - def get_cache_info(self, resource): - prefix = self.loader.archive - path = resource.path[1 + len(prefix):] - return prefix, path - - def get_bytes(self, resource): - return self.loader.get_data(resource.path) - - def get_stream(self, resource): - return io.BytesIO(self.get_bytes(resource)) - - def get_size(self, resource): - path = resource.path[self.prefix_len:] - return self._files[path][3] - - def get_resources(self, resource): - path = resource.path[self.prefix_len:] - if path and path[-1] != os.sep: - path += os.sep - plen = len(path) - result = set() - i = bisect.bisect(self.index, path) - while i < len(self.index): - if not self.index[i].startswith(path): - break - s = self.index[i][plen:] - result.add(s.split(os.sep, 1)[0]) # only immediate children - i += 1 - return result - - def _is_directory(self, path): - path = path[self.prefix_len:] - if path and path[-1] != os.sep: - path += os.sep - i = bisect.bisect(self.index, path) - try: - result = self.index[i].startswith(path) - except IndexError: - result = False - return result - -_finder_registry = { - type(None): ResourceFinder, - zipimport.zipimporter: ZipResourceFinder -} - -try: - # In Python 3.6, _frozen_importlib -> _frozen_importlib_external - try: - import _frozen_importlib_external as _fi - except ImportError: - import _frozen_importlib as _fi - _finder_registry[_fi.SourceFileLoader] = ResourceFinder - _finder_registry[_fi.FileFinder] = ResourceFinder - del _fi -except (ImportError, AttributeError): - pass - - -def register_finder(loader, finder_maker): - _finder_registry[type(loader)] = finder_maker - -_finder_cache = {} - - -def finder(package): - """ - Return a resource finder for a package. - :param package: The name of the package. - :return: A :class:`ResourceFinder` instance for the package. - """ - if package in _finder_cache: - result = _finder_cache[package] - else: - if package not in sys.modules: - __import__(package) - module = sys.modules[package] - path = getattr(module, '__path__', None) - if path is None: - raise DistlibException('You cannot get a finder for a module, ' - 'only for a package') - loader = getattr(module, '__loader__', None) - finder_maker = _finder_registry.get(type(loader)) - if finder_maker is None: - raise DistlibException('Unable to locate finder for %r' % package) - result = finder_maker(module) - _finder_cache[package] = result - return result - - -_dummy_module = types.ModuleType(str('__dummy__')) - - -def finder_for_path(path): - """ - Return a resource finder for a path, which should represent a container. - - :param path: The path. - :return: A :class:`ResourceFinder` instance for the path. - """ - result = None - # calls any path hooks, gets importer into cache - pkgutil.get_importer(path) - loader = sys.path_importer_cache.get(path) - finder = _finder_registry.get(type(loader)) - if finder: - module = _dummy_module - module.__file__ = os.path.join(path, '') - module.__loader__ = loader - result = finder(module) - return result diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/scripts.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/scripts.py deleted file mode 100644 index 8e22cb9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/scripts.py +++ /dev/null @@ -1,417 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013-2015 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -from io import BytesIO -import logging -import os -import re -import struct -import sys - -from .compat import sysconfig, detect_encoding, ZipFile -from .resources import finder -from .util import (FileOperator, get_export_entry, convert_path, - get_executable, in_venv) - -logger = logging.getLogger(__name__) - -_DEFAULT_MANIFEST = ''' -<?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> - <assemblyIdentity version="1.0.0.0" - processorArchitecture="X86" - name="%s" - type="win32"/> - - <!-- Identify the application security requirements. --> - <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> - <security> - <requestedPrivileges> - <requestedExecutionLevel level="asInvoker" uiAccess="false"/> - </requestedPrivileges> - </security> - </trustInfo> -</assembly>'''.strip() - -# check if Python is called on the first line with this expression -FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') -SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*- -if __name__ == '__main__': - import sys, re - - def _resolve(module, func): - __import__(module) - mod = sys.modules[module] - parts = func.split('.') - result = getattr(mod, parts.pop(0)) - for p in parts: - result = getattr(result, p) - return result - - try: - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - - func = _resolve('%(module)s', '%(func)s') - rc = func() # None interpreted as 0 - except Exception as e: # only supporting Python >= 2.6 - sys.stderr.write('%%s\n' %% e) - rc = 1 - sys.exit(rc) -''' - - -def _enquote_executable(executable): - if ' ' in executable: - # make sure we quote only the executable in case of env - # for example /usr/bin/env "/dir with spaces/bin/jython" - # instead of "/usr/bin/env /dir with spaces/bin/jython" - # otherwise whole - if executable.startswith('/usr/bin/env '): - env, _executable = executable.split(' ', 1) - if ' ' in _executable and not _executable.startswith('"'): - executable = '%s "%s"' % (env, _executable) - else: - if not executable.startswith('"'): - executable = '"%s"' % executable - return executable - - -class ScriptMaker(object): - """ - A class to copy or create scripts from source scripts or callable - specifications. - """ - script_template = SCRIPT_TEMPLATE - - executable = None # for shebangs - - def __init__(self, source_dir, target_dir, add_launchers=True, - dry_run=False, fileop=None): - self.source_dir = source_dir - self.target_dir = target_dir - self.add_launchers = add_launchers - self.force = False - self.clobber = False - # It only makes sense to set mode bits on POSIX. - self.set_mode = (os.name == 'posix') or (os.name == 'java' and - os._name == 'posix') - self.variants = set(('', 'X.Y')) - self._fileop = fileop or FileOperator(dry_run) - - self._is_nt = os.name == 'nt' or ( - os.name == 'java' and os._name == 'nt') - - def _get_alternate_executable(self, executable, options): - if options.get('gui', False) and self._is_nt: # pragma: no cover - dn, fn = os.path.split(executable) - fn = fn.replace('python', 'pythonw') - executable = os.path.join(dn, fn) - return executable - - if sys.platform.startswith('java'): # pragma: no cover - def _is_shell(self, executable): - """ - Determine if the specified executable is a script - (contains a #! line) - """ - try: - with open(executable) as fp: - return fp.read(2) == '#!' - except (OSError, IOError): - logger.warning('Failed to open %s', executable) - return False - - def _fix_jython_executable(self, executable): - if self._is_shell(executable): - # Workaround for Jython is not needed on Linux systems. - import java - - if java.lang.System.getProperty('os.name') == 'Linux': - return executable - elif executable.lower().endswith('jython.exe'): - # Use wrapper exe for Jython on Windows - return executable - return '/usr/bin/env %s' % executable - - def _build_shebang(self, executable, post_interp): - """ - Build a shebang line. In the simple case (on Windows, or a shebang line - which is not too long or contains spaces) use a simple formulation for - the shebang. Otherwise, use /bin/sh as the executable, with a contrived - shebang which allows the script to run either under Python or sh, using - suitable quoting. Thanks to Harald Nordgren for his input. - - See also: http://www.in-ulm.de/~mascheck/various/shebang/#length - https://hg.mozilla.org/mozilla-central/file/tip/mach - """ - if os.name != 'posix': - simple_shebang = True - else: - # Add 3 for '#!' prefix and newline suffix. - shebang_length = len(executable) + len(post_interp) + 3 - if sys.platform == 'darwin': - max_shebang_length = 512 - else: - max_shebang_length = 127 - simple_shebang = ((b' ' not in executable) and - (shebang_length <= max_shebang_length)) - - if simple_shebang: - result = b'#!' + executable + post_interp + b'\n' - else: - result = b'#!/bin/sh\n' - result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' - result += b"' '''" - return result - - def _get_shebang(self, encoding, post_interp=b'', options=None): - enquote = True - if self.executable: - executable = self.executable - enquote = False # assume this will be taken care of - elif not sysconfig.is_python_build(): - executable = get_executable() - elif in_venv(): # pragma: no cover - executable = os.path.join(sysconfig.get_path('scripts'), - 'python%s' % sysconfig.get_config_var('EXE')) - else: # pragma: no cover - executable = os.path.join( - sysconfig.get_config_var('BINDIR'), - 'python%s%s' % (sysconfig.get_config_var('VERSION'), - sysconfig.get_config_var('EXE'))) - if options: - executable = self._get_alternate_executable(executable, options) - - if sys.platform.startswith('java'): # pragma: no cover - executable = self._fix_jython_executable(executable) - # Normalise case for Windows - executable = os.path.normcase(executable) - # If the user didn't specify an executable, it may be necessary to - # cater for executable paths with spaces (not uncommon on Windows) - if enquote: - executable = _enquote_executable(executable) - # Issue #51: don't use fsencode, since we later try to - # check that the shebang is decodable using utf-8. - executable = executable.encode('utf-8') - # in case of IronPython, play safe and enable frames support - if (sys.platform == 'cli' and '-X:Frames' not in post_interp - and '-X:FullFrames' not in post_interp): # pragma: no cover - post_interp += b' -X:Frames' - shebang = self._build_shebang(executable, post_interp) - # Python parser starts to read a script using UTF-8 until - # it gets a #coding:xxx cookie. The shebang has to be the - # first line of a file, the #coding:xxx cookie cannot be - # written before. So the shebang has to be decodable from - # UTF-8. - try: - shebang.decode('utf-8') - except UnicodeDecodeError: # pragma: no cover - raise ValueError( - 'The shebang (%r) is not decodable from utf-8' % shebang) - # If the script is encoded to a custom encoding (use a - # #coding:xxx cookie), the shebang has to be decodable from - # the script encoding too. - if encoding != 'utf-8': - try: - shebang.decode(encoding) - except UnicodeDecodeError: # pragma: no cover - raise ValueError( - 'The shebang (%r) is not decodable ' - 'from the script encoding (%r)' % (shebang, encoding)) - return shebang - - def _get_script_text(self, entry): - return self.script_template % dict(module=entry.prefix, - func=entry.suffix) - - manifest = _DEFAULT_MANIFEST - - def get_manifest(self, exename): - base = os.path.basename(exename) - return self.manifest % base - - def _write_script(self, names, shebang, script_bytes, filenames, ext): - use_launcher = self.add_launchers and self._is_nt - linesep = os.linesep.encode('utf-8') - if not shebang.endswith(linesep): - shebang += linesep - if not use_launcher: - script_bytes = shebang + script_bytes - else: # pragma: no cover - if ext == 'py': - launcher = self._get_launcher('t') - else: - launcher = self._get_launcher('w') - stream = BytesIO() - with ZipFile(stream, 'w') as zf: - zf.writestr('__main__.py', script_bytes) - zip_data = stream.getvalue() - script_bytes = launcher + shebang + zip_data - for name in names: - outname = os.path.join(self.target_dir, name) - if use_launcher: # pragma: no cover - n, e = os.path.splitext(outname) - if e.startswith('.py'): - outname = n - outname = '%s.exe' % outname - try: - self._fileop.write_binary_file(outname, script_bytes) - except Exception: - # Failed writing an executable - it might be in use. - logger.warning('Failed to write executable - trying to ' - 'use .deleteme logic') - dfname = '%s.deleteme' % outname - if os.path.exists(dfname): - os.remove(dfname) # Not allowed to fail here - os.rename(outname, dfname) # nor here - self._fileop.write_binary_file(outname, script_bytes) - logger.debug('Able to replace executable using ' - '.deleteme logic') - try: - os.remove(dfname) - except Exception: - pass # still in use - ignore error - else: - if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover - outname = '%s.%s' % (outname, ext) - if os.path.exists(outname) and not self.clobber: - logger.warning('Skipping existing file %s', outname) - continue - self._fileop.write_binary_file(outname, script_bytes) - if self.set_mode: - self._fileop.set_executable_mode([outname]) - filenames.append(outname) - - def _make_script(self, entry, filenames, options=None): - post_interp = b'' - if options: - args = options.get('interpreter_args', []) - if args: - args = ' %s' % ' '.join(args) - post_interp = args.encode('utf-8') - shebang = self._get_shebang('utf-8', post_interp, options=options) - script = self._get_script_text(entry).encode('utf-8') - name = entry.name - scriptnames = set() - if '' in self.variants: - scriptnames.add(name) - if 'X' in self.variants: - scriptnames.add('%s%s' % (name, sys.version[0])) - if 'X.Y' in self.variants: - scriptnames.add('%s-%s' % (name, sys.version[:3])) - if options and options.get('gui', False): - ext = 'pyw' - else: - ext = 'py' - self._write_script(scriptnames, shebang, script, filenames, ext) - - def _copy_script(self, script, filenames): - adjust = False - script = os.path.join(self.source_dir, convert_path(script)) - outname = os.path.join(self.target_dir, os.path.basename(script)) - if not self.force and not self._fileop.newer(script, outname): - logger.debug('not copying %s (up-to-date)', script) - return - - # Always open the file, but ignore failures in dry-run mode -- - # that way, we'll get accurate feedback if we can read the - # script. - try: - f = open(script, 'rb') - except IOError: # pragma: no cover - if not self.dry_run: - raise - f = None - else: - first_line = f.readline() - if not first_line: # pragma: no cover - logger.warning('%s: %s is an empty file (skipping)', - self.get_command_name(), script) - return - - match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) - if match: - adjust = True - post_interp = match.group(1) or b'' - - if not adjust: - if f: - f.close() - self._fileop.copy_file(script, outname) - if self.set_mode: - self._fileop.set_executable_mode([outname]) - filenames.append(outname) - else: - logger.info('copying and adjusting %s -> %s', script, - self.target_dir) - if not self._fileop.dry_run: - encoding, lines = detect_encoding(f.readline) - f.seek(0) - shebang = self._get_shebang(encoding, post_interp) - if b'pythonw' in first_line: # pragma: no cover - ext = 'pyw' - else: - ext = 'py' - n = os.path.basename(outname) - self._write_script([n], shebang, f.read(), filenames, ext) - if f: - f.close() - - @property - def dry_run(self): - return self._fileop.dry_run - - @dry_run.setter - def dry_run(self, value): - self._fileop.dry_run = value - - if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover - # Executable launcher support. - # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ - - def _get_launcher(self, kind): - if struct.calcsize('P') == 8: # 64-bit - bits = '64' - else: - bits = '32' - name = '%s%s.exe' % (kind, bits) - # Issue 31: don't hardcode an absolute package name, but - # determine it relative to the current package - distlib_package = __name__.rsplit('.', 1)[0] - result = finder(distlib_package).find(name).bytes - return result - - # Public API follows - - def make(self, specification, options=None): - """ - Make a script. - - :param specification: The specification, which is either a valid export - entry specification (to make a script from a - callable) or a filename (to make a script by - copying from a source location). - :param options: A dictionary of options controlling script generation. - :return: A list of all absolute pathnames written to. - """ - filenames = [] - entry = get_export_entry(specification) - if entry is None: - self._copy_script(specification, filenames) - else: - self._make_script(entry, filenames, options=options) - return filenames - - def make_multiple(self, specifications, options=None): - """ - Take a list of specifications and make scripts from them, - :param specifications: A list of specifications. - :return: A list of all absolute pathnames written to, - """ - filenames = [] - for specification in specifications: - filenames.extend(self.make(specification, options)) - return filenames diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/t32.exe b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/t32.exe deleted file mode 100644 index a09d926..0000000 Binary files a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/t32.exe and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/t64.exe b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/t64.exe deleted file mode 100644 index 9da9b40..0000000 Binary files a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/t64.exe and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/util.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/util.py deleted file mode 100644 index 9d4bfd3..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/util.py +++ /dev/null @@ -1,1756 +0,0 @@ -# -# Copyright (C) 2012-2017 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -import codecs -from collections import deque -import contextlib -import csv -from glob import iglob as std_iglob -import io -import json -import logging -import os -import py_compile -import re -import socket -try: - import ssl -except ImportError: # pragma: no cover - ssl = None -import subprocess -import sys -import tarfile -import tempfile -import textwrap - -try: - import threading -except ImportError: # pragma: no cover - import dummy_threading as threading -import time - -from . import DistlibException -from .compat import (string_types, text_type, shutil, raw_input, StringIO, - cache_from_source, urlopen, urljoin, httplib, xmlrpclib, - splittype, HTTPHandler, BaseConfigurator, valid_ident, - Container, configparser, URLError, ZipFile, fsdecode, - unquote, urlparse) - -logger = logging.getLogger(__name__) - -# -# Requirement parsing code as per PEP 508 -# - -IDENTIFIER = re.compile(r'^([\w\.-]+)\s*') -VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*') -COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*') -MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*') -OR = re.compile(r'^or\b\s*') -AND = re.compile(r'^and\b\s*') -NON_SPACE = re.compile(r'(\S+)\s*') -STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)') - - -def parse_marker(marker_string): - """ - Parse a marker string and return a dictionary containing a marker expression. - - The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in - the expression grammar, or strings. A string contained in quotes is to be - interpreted as a literal string, and a string not contained in quotes is a - variable (such as os_name). - """ - def marker_var(remaining): - # either identifier, or literal string - m = IDENTIFIER.match(remaining) - if m: - result = m.groups()[0] - remaining = remaining[m.end():] - elif not remaining: - raise SyntaxError('unexpected end of input') - else: - q = remaining[0] - if q not in '\'"': - raise SyntaxError('invalid expression: %s' % remaining) - oq = '\'"'.replace(q, '') - remaining = remaining[1:] - parts = [q] - while remaining: - # either a string chunk, or oq, or q to terminate - if remaining[0] == q: - break - elif remaining[0] == oq: - parts.append(oq) - remaining = remaining[1:] - else: - m = STRING_CHUNK.match(remaining) - if not m: - raise SyntaxError('error in string literal: %s' % remaining) - parts.append(m.groups()[0]) - remaining = remaining[m.end():] - else: - s = ''.join(parts) - raise SyntaxError('unterminated string: %s' % s) - parts.append(q) - result = ''.join(parts) - remaining = remaining[1:].lstrip() # skip past closing quote - return result, remaining - - def marker_expr(remaining): - if remaining and remaining[0] == '(': - result, remaining = marker(remaining[1:].lstrip()) - if remaining[0] != ')': - raise SyntaxError('unterminated parenthesis: %s' % remaining) - remaining = remaining[1:].lstrip() - else: - lhs, remaining = marker_var(remaining) - while remaining: - m = MARKER_OP.match(remaining) - if not m: - break - op = m.groups()[0] - remaining = remaining[m.end():] - rhs, remaining = marker_var(remaining) - lhs = {'op': op, 'lhs': lhs, 'rhs': rhs} - result = lhs - return result, remaining - - def marker_and(remaining): - lhs, remaining = marker_expr(remaining) - while remaining: - m = AND.match(remaining) - if not m: - break - remaining = remaining[m.end():] - rhs, remaining = marker_expr(remaining) - lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs} - return lhs, remaining - - def marker(remaining): - lhs, remaining = marker_and(remaining) - while remaining: - m = OR.match(remaining) - if not m: - break - remaining = remaining[m.end():] - rhs, remaining = marker_and(remaining) - lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs} - return lhs, remaining - - return marker(marker_string) - - -def parse_requirement(req): - """ - Parse a requirement passed in as a string. Return a Container - whose attributes contain the various parts of the requirement. - """ - remaining = req.strip() - if not remaining or remaining.startswith('#'): - return None - m = IDENTIFIER.match(remaining) - if not m: - raise SyntaxError('name expected: %s' % remaining) - distname = m.groups()[0] - remaining = remaining[m.end():] - extras = mark_expr = versions = uri = None - if remaining and remaining[0] == '[': - i = remaining.find(']', 1) - if i < 0: - raise SyntaxError('unterminated extra: %s' % remaining) - s = remaining[1:i] - remaining = remaining[i + 1:].lstrip() - extras = [] - while s: - m = IDENTIFIER.match(s) - if not m: - raise SyntaxError('malformed extra: %s' % s) - extras.append(m.groups()[0]) - s = s[m.end():] - if not s: - break - if s[0] != ',': - raise SyntaxError('comma expected in extras: %s' % s) - s = s[1:].lstrip() - if not extras: - extras = None - if remaining: - if remaining[0] == '@': - # it's a URI - remaining = remaining[1:].lstrip() - m = NON_SPACE.match(remaining) - if not m: - raise SyntaxError('invalid URI: %s' % remaining) - uri = m.groups()[0] - t = urlparse(uri) - # there are issues with Python and URL parsing, so this test - # is a bit crude. See bpo-20271, bpo-23505. Python doesn't - # always parse invalid URLs correctly - it should raise - # exceptions for malformed URLs - if not (t.scheme and t.netloc): - raise SyntaxError('Invalid URL: %s' % uri) - remaining = remaining[m.end():].lstrip() - else: - - def get_versions(ver_remaining): - """ - Return a list of operator, version tuples if any are - specified, else None. - """ - m = COMPARE_OP.match(ver_remaining) - versions = None - if m: - versions = [] - while True: - op = m.groups()[0] - ver_remaining = ver_remaining[m.end():] - m = VERSION_IDENTIFIER.match(ver_remaining) - if not m: - raise SyntaxError('invalid version: %s' % ver_remaining) - v = m.groups()[0] - versions.append((op, v)) - ver_remaining = ver_remaining[m.end():] - if not ver_remaining or ver_remaining[0] != ',': - break - ver_remaining = ver_remaining[1:].lstrip() - m = COMPARE_OP.match(ver_remaining) - if not m: - raise SyntaxError('invalid constraint: %s' % ver_remaining) - if not versions: - versions = None - return versions, ver_remaining - - if remaining[0] != '(': - versions, remaining = get_versions(remaining) - else: - i = remaining.find(')', 1) - if i < 0: - raise SyntaxError('unterminated parenthesis: %s' % remaining) - s = remaining[1:i] - remaining = remaining[i + 1:].lstrip() - # As a special diversion from PEP 508, allow a version number - # a.b.c in parentheses as a synonym for ~= a.b.c (because this - # is allowed in earlier PEPs) - if COMPARE_OP.match(s): - versions, _ = get_versions(s) - else: - m = VERSION_IDENTIFIER.match(s) - if not m: - raise SyntaxError('invalid constraint: %s' % s) - v = m.groups()[0] - s = s[m.end():].lstrip() - if s: - raise SyntaxError('invalid constraint: %s' % s) - versions = [('~=', v)] - - if remaining: - if remaining[0] != ';': - raise SyntaxError('invalid requirement: %s' % remaining) - remaining = remaining[1:].lstrip() - - mark_expr, remaining = parse_marker(remaining) - - if remaining and remaining[0] != '#': - raise SyntaxError('unexpected trailing data: %s' % remaining) - - if not versions: - rs = distname - else: - rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) - return Container(name=distname, extras=extras, constraints=versions, - marker=mark_expr, url=uri, requirement=rs) - - -def get_resources_dests(resources_root, rules): - """Find destinations for resources files""" - - def get_rel_path(root, path): - # normalizes and returns a lstripped-/-separated path - root = root.replace(os.path.sep, '/') - path = path.replace(os.path.sep, '/') - assert path.startswith(root) - return path[len(root):].lstrip('/') - - destinations = {} - for base, suffix, dest in rules: - prefix = os.path.join(resources_root, base) - for abs_base in iglob(prefix): - abs_glob = os.path.join(abs_base, suffix) - for abs_path in iglob(abs_glob): - resource_file = get_rel_path(resources_root, abs_path) - if dest is None: # remove the entry if it was here - destinations.pop(resource_file, None) - else: - rel_path = get_rel_path(abs_base, abs_path) - rel_dest = dest.replace(os.path.sep, '/').rstrip('/') - destinations[resource_file] = rel_dest + '/' + rel_path - return destinations - - -def in_venv(): - if hasattr(sys, 'real_prefix'): - # virtualenv venvs - result = True - else: - # PEP 405 venvs - result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) - return result - - -def get_executable(): -# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as -# changes to the stub launcher mean that sys.executable always points -# to the stub on OS X -# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' -# in os.environ): -# result = os.environ['__PYVENV_LAUNCHER__'] -# else: -# result = sys.executable -# return result - result = os.path.normcase(sys.executable) - if not isinstance(result, text_type): - result = fsdecode(result) - return result - - -def proceed(prompt, allowed_chars, error_prompt=None, default=None): - p = prompt - while True: - s = raw_input(p) - p = prompt - if not s and default: - s = default - if s: - c = s[0].lower() - if c in allowed_chars: - break - if error_prompt: - p = '%c: %s\n%s' % (c, error_prompt, prompt) - return c - - -def extract_by_key(d, keys): - if isinstance(keys, string_types): - keys = keys.split() - result = {} - for key in keys: - if key in d: - result[key] = d[key] - return result - -def read_exports(stream): - if sys.version_info[0] >= 3: - # needs to be a text stream - stream = codecs.getreader('utf-8')(stream) - # Try to load as JSON, falling back on legacy format - data = stream.read() - stream = StringIO(data) - try: - jdata = json.load(stream) - result = jdata['extensions']['python.exports']['exports'] - for group, entries in result.items(): - for k, v in entries.items(): - s = '%s = %s' % (k, v) - entry = get_export_entry(s) - assert entry is not None - entries[k] = entry - return result - except Exception: - stream.seek(0, 0) - - def read_stream(cp, stream): - if hasattr(cp, 'read_file'): - cp.read_file(stream) - else: - cp.readfp(stream) - - cp = configparser.ConfigParser() - try: - read_stream(cp, stream) - except configparser.MissingSectionHeaderError: - stream.close() - data = textwrap.dedent(data) - stream = StringIO(data) - read_stream(cp, stream) - - result = {} - for key in cp.sections(): - result[key] = entries = {} - for name, value in cp.items(key): - s = '%s = %s' % (name, value) - entry = get_export_entry(s) - assert entry is not None - #entry.dist = self - entries[name] = entry - return result - - -def write_exports(exports, stream): - if sys.version_info[0] >= 3: - # needs to be a text stream - stream = codecs.getwriter('utf-8')(stream) - cp = configparser.ConfigParser() - for k, v in exports.items(): - # TODO check k, v for valid values - cp.add_section(k) - for entry in v.values(): - if entry.suffix is None: - s = entry.prefix - else: - s = '%s:%s' % (entry.prefix, entry.suffix) - if entry.flags: - s = '%s [%s]' % (s, ', '.join(entry.flags)) - cp.set(k, entry.name, s) - cp.write(stream) - - -@contextlib.contextmanager -def tempdir(): - td = tempfile.mkdtemp() - try: - yield td - finally: - shutil.rmtree(td) - -@contextlib.contextmanager -def chdir(d): - cwd = os.getcwd() - try: - os.chdir(d) - yield - finally: - os.chdir(cwd) - - -@contextlib.contextmanager -def socket_timeout(seconds=15): - cto = socket.getdefaulttimeout() - try: - socket.setdefaulttimeout(seconds) - yield - finally: - socket.setdefaulttimeout(cto) - - -class cached_property(object): - def __init__(self, func): - self.func = func - #for attr in ('__name__', '__module__', '__doc__'): - # setattr(self, attr, getattr(func, attr, None)) - - def __get__(self, obj, cls=None): - if obj is None: - return self - value = self.func(obj) - object.__setattr__(obj, self.func.__name__, value) - #obj.__dict__[self.func.__name__] = value = self.func(obj) - return value - -def convert_path(pathname): - """Return 'pathname' as a name that will work on the native filesystem. - - The path is split on '/' and put back together again using the current - directory separator. Needed because filenames in the setup script are - always supplied in Unix style, and have to be converted to the local - convention before we can actually use them in the filesystem. Raises - ValueError on non-Unix-ish systems if 'pathname' either starts or - ends with a slash. - """ - if os.sep == '/': - return pathname - if not pathname: - return pathname - if pathname[0] == '/': - raise ValueError("path '%s' cannot be absolute" % pathname) - if pathname[-1] == '/': - raise ValueError("path '%s' cannot end with '/'" % pathname) - - paths = pathname.split('/') - while os.curdir in paths: - paths.remove(os.curdir) - if not paths: - return os.curdir - return os.path.join(*paths) - - -class FileOperator(object): - def __init__(self, dry_run=False): - self.dry_run = dry_run - self.ensured = set() - self._init_record() - - def _init_record(self): - self.record = False - self.files_written = set() - self.dirs_created = set() - - def record_as_written(self, path): - if self.record: - self.files_written.add(path) - - def newer(self, source, target): - """Tell if the target is newer than the source. - - Returns true if 'source' exists and is more recently modified than - 'target', or if 'source' exists and 'target' doesn't. - - Returns false if both exist and 'target' is the same age or younger - than 'source'. Raise PackagingFileError if 'source' does not exist. - - Note that this test is not very accurate: files created in the same - second will have the same "age". - """ - if not os.path.exists(source): - raise DistlibException("file '%r' does not exist" % - os.path.abspath(source)) - if not os.path.exists(target): - return True - - return os.stat(source).st_mtime > os.stat(target).st_mtime - - def copy_file(self, infile, outfile, check=True): - """Copy a file respecting dry-run and force flags. - """ - self.ensure_dir(os.path.dirname(outfile)) - logger.info('Copying %s to %s', infile, outfile) - if not self.dry_run: - msg = None - if check: - if os.path.islink(outfile): - msg = '%s is a symlink' % outfile - elif os.path.exists(outfile) and not os.path.isfile(outfile): - msg = '%s is a non-regular file' % outfile - if msg: - raise ValueError(msg + ' which would be overwritten') - shutil.copyfile(infile, outfile) - self.record_as_written(outfile) - - def copy_stream(self, instream, outfile, encoding=None): - assert not os.path.isdir(outfile) - self.ensure_dir(os.path.dirname(outfile)) - logger.info('Copying stream %s to %s', instream, outfile) - if not self.dry_run: - if encoding is None: - outstream = open(outfile, 'wb') - else: - outstream = codecs.open(outfile, 'w', encoding=encoding) - try: - shutil.copyfileobj(instream, outstream) - finally: - outstream.close() - self.record_as_written(outfile) - - def write_binary_file(self, path, data): - self.ensure_dir(os.path.dirname(path)) - if not self.dry_run: - if os.path.exists(path): - os.remove(path) - with open(path, 'wb') as f: - f.write(data) - self.record_as_written(path) - - def write_text_file(self, path, data, encoding): - self.write_binary_file(path, data.encode(encoding)) - - def set_mode(self, bits, mask, files): - if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): - # Set the executable bits (owner, group, and world) on - # all the files specified. - for f in files: - if self.dry_run: - logger.info("changing mode of %s", f) - else: - mode = (os.stat(f).st_mode | bits) & mask - logger.info("changing mode of %s to %o", f, mode) - os.chmod(f, mode) - - set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) - - def ensure_dir(self, path): - path = os.path.abspath(path) - if path not in self.ensured and not os.path.exists(path): - self.ensured.add(path) - d, f = os.path.split(path) - self.ensure_dir(d) - logger.info('Creating %s' % path) - if not self.dry_run: - os.mkdir(path) - if self.record: - self.dirs_created.add(path) - - def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False): - dpath = cache_from_source(path, not optimize) - logger.info('Byte-compiling %s to %s', path, dpath) - if not self.dry_run: - if force or self.newer(path, dpath): - if not prefix: - diagpath = None - else: - assert path.startswith(prefix) - diagpath = path[len(prefix):] - compile_kwargs = {} - if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'): - compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH - py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error - self.record_as_written(dpath) - return dpath - - def ensure_removed(self, path): - if os.path.exists(path): - if os.path.isdir(path) and not os.path.islink(path): - logger.debug('Removing directory tree at %s', path) - if not self.dry_run: - shutil.rmtree(path) - if self.record: - if path in self.dirs_created: - self.dirs_created.remove(path) - else: - if os.path.islink(path): - s = 'link' - else: - s = 'file' - logger.debug('Removing %s %s', s, path) - if not self.dry_run: - os.remove(path) - if self.record: - if path in self.files_written: - self.files_written.remove(path) - - def is_writable(self, path): - result = False - while not result: - if os.path.exists(path): - result = os.access(path, os.W_OK) - break - parent = os.path.dirname(path) - if parent == path: - break - path = parent - return result - - def commit(self): - """ - Commit recorded changes, turn off recording, return - changes. - """ - assert self.record - result = self.files_written, self.dirs_created - self._init_record() - return result - - def rollback(self): - if not self.dry_run: - for f in list(self.files_written): - if os.path.exists(f): - os.remove(f) - # dirs should all be empty now, except perhaps for - # __pycache__ subdirs - # reverse so that subdirs appear before their parents - dirs = sorted(self.dirs_created, reverse=True) - for d in dirs: - flist = os.listdir(d) - if flist: - assert flist == ['__pycache__'] - sd = os.path.join(d, flist[0]) - os.rmdir(sd) - os.rmdir(d) # should fail if non-empty - self._init_record() - -def resolve(module_name, dotted_path): - if module_name in sys.modules: - mod = sys.modules[module_name] - else: - mod = __import__(module_name) - if dotted_path is None: - result = mod - else: - parts = dotted_path.split('.') - result = getattr(mod, parts.pop(0)) - for p in parts: - result = getattr(result, p) - return result - - -class ExportEntry(object): - def __init__(self, name, prefix, suffix, flags): - self.name = name - self.prefix = prefix - self.suffix = suffix - self.flags = flags - - @cached_property - def value(self): - return resolve(self.prefix, self.suffix) - - def __repr__(self): # pragma: no cover - return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, - self.suffix, self.flags) - - def __eq__(self, other): - if not isinstance(other, ExportEntry): - result = False - else: - result = (self.name == other.name and - self.prefix == other.prefix and - self.suffix == other.suffix and - self.flags == other.flags) - return result - - __hash__ = object.__hash__ - - -ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+) - \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) - \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? - ''', re.VERBOSE) - -def get_export_entry(specification): - m = ENTRY_RE.search(specification) - if not m: - result = None - if '[' in specification or ']' in specification: - raise DistlibException("Invalid specification " - "'%s'" % specification) - else: - d = m.groupdict() - name = d['name'] - path = d['callable'] - colons = path.count(':') - if colons == 0: - prefix, suffix = path, None - else: - if colons != 1: - raise DistlibException("Invalid specification " - "'%s'" % specification) - prefix, suffix = path.split(':') - flags = d['flags'] - if flags is None: - if '[' in specification or ']' in specification: - raise DistlibException("Invalid specification " - "'%s'" % specification) - flags = [] - else: - flags = [f.strip() for f in flags.split(',')] - result = ExportEntry(name, prefix, suffix, flags) - return result - - -def get_cache_base(suffix=None): - """ - Return the default base location for distlib caches. If the directory does - not exist, it is created. Use the suffix provided for the base directory, - and default to '.distlib' if it isn't provided. - - On Windows, if LOCALAPPDATA is defined in the environment, then it is - assumed to be a directory, and will be the parent directory of the result. - On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home - directory - using os.expanduser('~') - will be the parent directory of - the result. - - The result is just the directory '.distlib' in the parent directory as - determined above, or with the name specified with ``suffix``. - """ - if suffix is None: - suffix = '.distlib' - if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: - result = os.path.expandvars('$localappdata') - else: - # Assume posix, or old Windows - result = os.path.expanduser('~') - # we use 'isdir' instead of 'exists', because we want to - # fail if there's a file with that name - if os.path.isdir(result): - usable = os.access(result, os.W_OK) - if not usable: - logger.warning('Directory exists but is not writable: %s', result) - else: - try: - os.makedirs(result) - usable = True - except OSError: - logger.warning('Unable to create %s', result, exc_info=True) - usable = False - if not usable: - result = tempfile.mkdtemp() - logger.warning('Default location unusable, using %s', result) - return os.path.join(result, suffix) - - -def path_to_cache_dir(path): - """ - Convert an absolute path to a directory name for use in a cache. - - The algorithm used is: - - #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. - #. Any occurrence of ``os.sep`` is replaced with ``'--'``. - #. ``'.cache'`` is appended. - """ - d, p = os.path.splitdrive(os.path.abspath(path)) - if d: - d = d.replace(':', '---') - p = p.replace(os.sep, '--') - return d + p + '.cache' - - -def ensure_slash(s): - if not s.endswith('/'): - return s + '/' - return s - - -def parse_credentials(netloc): - username = password = None - if '@' in netloc: - prefix, netloc = netloc.split('@', 1) - if ':' not in prefix: - username = prefix - else: - username, password = prefix.split(':', 1) - return username, password, netloc - - -def get_process_umask(): - result = os.umask(0o22) - os.umask(result) - return result - -def is_string_sequence(seq): - result = True - i = None - for i, s in enumerate(seq): - if not isinstance(s, string_types): - result = False - break - assert i is not None - return result - -PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' - '([a-z0-9_.+-]+)', re.I) -PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') - - -def split_filename(filename, project_name=None): - """ - Extract name, version, python version from a filename (no extension) - - Return name, version, pyver or None - """ - result = None - pyver = None - filename = unquote(filename).replace(' ', '-') - m = PYTHON_VERSION.search(filename) - if m: - pyver = m.group(1) - filename = filename[:m.start()] - if project_name and len(filename) > len(project_name) + 1: - m = re.match(re.escape(project_name) + r'\b', filename) - if m: - n = m.end() - result = filename[:n], filename[n + 1:], pyver - if result is None: - m = PROJECT_NAME_AND_VERSION.match(filename) - if m: - result = m.group(1), m.group(3), pyver - return result - -# Allow spaces in name because of legacy dists like "Twisted Core" -NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' - r'\(\s*(?P<ver>[^\s)]+)\)$') - -def parse_name_and_version(p): - """ - A utility method used to get name and version from a string. - - From e.g. a Provides-Dist value. - - :param p: A value in a form 'foo (1.0)' - :return: The name and version as a tuple. - """ - m = NAME_VERSION_RE.match(p) - if not m: - raise DistlibException('Ill-formed name/version string: \'%s\'' % p) - d = m.groupdict() - return d['name'].strip().lower(), d['ver'] - -def get_extras(requested, available): - result = set() - requested = set(requested or []) - available = set(available or []) - if '*' in requested: - requested.remove('*') - result |= available - for r in requested: - if r == '-': - result.add(r) - elif r.startswith('-'): - unwanted = r[1:] - if unwanted not in available: - logger.warning('undeclared extra: %s' % unwanted) - if unwanted in result: - result.remove(unwanted) - else: - if r not in available: - logger.warning('undeclared extra: %s' % r) - result.add(r) - return result -# -# Extended metadata functionality -# - -def _get_external_data(url): - result = {} - try: - # urlopen might fail if it runs into redirections, - # because of Python issue #13696. Fixed in locators - # using a custom redirect handler. - resp = urlopen(url) - headers = resp.info() - ct = headers.get('Content-Type') - if not ct.startswith('application/json'): - logger.debug('Unexpected response for JSON request: %s', ct) - else: - reader = codecs.getreader('utf-8')(resp) - #data = reader.read().decode('utf-8') - #result = json.loads(data) - result = json.load(reader) - except Exception as e: - logger.exception('Failed to get external data for %s: %s', url, e) - return result - -_external_data_base_url = 'https://www.red-dove.com/pypi/projects/' - -def get_project_data(name): - url = '%s/%s/project.json' % (name[0].upper(), name) - url = urljoin(_external_data_base_url, url) - result = _get_external_data(url) - return result - -def get_package_data(name, version): - url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) - url = urljoin(_external_data_base_url, url) - return _get_external_data(url) - - -class Cache(object): - """ - A class implementing a cache for resources that need to live in the file system - e.g. shared libraries. This class was moved from resources to here because it - could be used by other modules, e.g. the wheel module. - """ - - def __init__(self, base): - """ - Initialise an instance. - - :param base: The base directory where the cache should be located. - """ - # we use 'isdir' instead of 'exists', because we want to - # fail if there's a file with that name - if not os.path.isdir(base): # pragma: no cover - os.makedirs(base) - if (os.stat(base).st_mode & 0o77) != 0: - logger.warning('Directory \'%s\' is not private', base) - self.base = os.path.abspath(os.path.normpath(base)) - - def prefix_to_dir(self, prefix): - """ - Converts a resource prefix to a directory name in the cache. - """ - return path_to_cache_dir(prefix) - - def clear(self): - """ - Clear the cache. - """ - not_removed = [] - for fn in os.listdir(self.base): - fn = os.path.join(self.base, fn) - try: - if os.path.islink(fn) or os.path.isfile(fn): - os.remove(fn) - elif os.path.isdir(fn): - shutil.rmtree(fn) - except Exception: - not_removed.append(fn) - return not_removed - - -class EventMixin(object): - """ - A very simple publish/subscribe system. - """ - def __init__(self): - self._subscribers = {} - - def add(self, event, subscriber, append=True): - """ - Add a subscriber for an event. - - :param event: The name of an event. - :param subscriber: The subscriber to be added (and called when the - event is published). - :param append: Whether to append or prepend the subscriber to an - existing subscriber list for the event. - """ - subs = self._subscribers - if event not in subs: - subs[event] = deque([subscriber]) - else: - sq = subs[event] - if append: - sq.append(subscriber) - else: - sq.appendleft(subscriber) - - def remove(self, event, subscriber): - """ - Remove a subscriber for an event. - - :param event: The name of an event. - :param subscriber: The subscriber to be removed. - """ - subs = self._subscribers - if event not in subs: - raise ValueError('No subscribers: %r' % event) - subs[event].remove(subscriber) - - def get_subscribers(self, event): - """ - Return an iterator for the subscribers for an event. - :param event: The event to return subscribers for. - """ - return iter(self._subscribers.get(event, ())) - - def publish(self, event, *args, **kwargs): - """ - Publish a event and return a list of values returned by its - subscribers. - - :param event: The event to publish. - :param args: The positional arguments to pass to the event's - subscribers. - :param kwargs: The keyword arguments to pass to the event's - subscribers. - """ - result = [] - for subscriber in self.get_subscribers(event): - try: - value = subscriber(event, *args, **kwargs) - except Exception: - logger.exception('Exception during event publication') - value = None - result.append(value) - logger.debug('publish %s: args = %s, kwargs = %s, result = %s', - event, args, kwargs, result) - return result - -# -# Simple sequencing -# -class Sequencer(object): - def __init__(self): - self._preds = {} - self._succs = {} - self._nodes = set() # nodes with no preds/succs - - def add_node(self, node): - self._nodes.add(node) - - def remove_node(self, node, edges=False): - if node in self._nodes: - self._nodes.remove(node) - if edges: - for p in set(self._preds.get(node, ())): - self.remove(p, node) - for s in set(self._succs.get(node, ())): - self.remove(node, s) - # Remove empties - for k, v in list(self._preds.items()): - if not v: - del self._preds[k] - for k, v in list(self._succs.items()): - if not v: - del self._succs[k] - - def add(self, pred, succ): - assert pred != succ - self._preds.setdefault(succ, set()).add(pred) - self._succs.setdefault(pred, set()).add(succ) - - def remove(self, pred, succ): - assert pred != succ - try: - preds = self._preds[succ] - succs = self._succs[pred] - except KeyError: # pragma: no cover - raise ValueError('%r not a successor of anything' % succ) - try: - preds.remove(pred) - succs.remove(succ) - except KeyError: # pragma: no cover - raise ValueError('%r not a successor of %r' % (succ, pred)) - - def is_step(self, step): - return (step in self._preds or step in self._succs or - step in self._nodes) - - def get_steps(self, final): - if not self.is_step(final): - raise ValueError('Unknown: %r' % final) - result = [] - todo = [] - seen = set() - todo.append(final) - while todo: - step = todo.pop(0) - if step in seen: - # if a step was already seen, - # move it to the end (so it will appear earlier - # when reversed on return) ... but not for the - # final step, as that would be confusing for - # users - if step != final: - result.remove(step) - result.append(step) - else: - seen.add(step) - result.append(step) - preds = self._preds.get(step, ()) - todo.extend(preds) - return reversed(result) - - @property - def strong_connections(self): - #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm - index_counter = [0] - stack = [] - lowlinks = {} - index = {} - result = [] - - graph = self._succs - - def strongconnect(node): - # set the depth index for this node to the smallest unused index - index[node] = index_counter[0] - lowlinks[node] = index_counter[0] - index_counter[0] += 1 - stack.append(node) - - # Consider successors - try: - successors = graph[node] - except Exception: - successors = [] - for successor in successors: - if successor not in lowlinks: - # Successor has not yet been visited - strongconnect(successor) - lowlinks[node] = min(lowlinks[node],lowlinks[successor]) - elif successor in stack: - # the successor is in the stack and hence in the current - # strongly connected component (SCC) - lowlinks[node] = min(lowlinks[node],index[successor]) - - # If `node` is a root node, pop the stack and generate an SCC - if lowlinks[node] == index[node]: - connected_component = [] - - while True: - successor = stack.pop() - connected_component.append(successor) - if successor == node: break - component = tuple(connected_component) - # storing the result - result.append(component) - - for node in graph: - if node not in lowlinks: - strongconnect(node) - - return result - - @property - def dot(self): - result = ['digraph G {'] - for succ in self._preds: - preds = self._preds[succ] - for pred in preds: - result.append(' %s -> %s;' % (pred, succ)) - for node in self._nodes: - result.append(' %s;' % node) - result.append('}') - return '\n'.join(result) - -# -# Unarchiving functionality for zip, tar, tgz, tbz, whl -# - -ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', - '.tgz', '.tbz', '.whl') - -def unarchive(archive_filename, dest_dir, format=None, check=True): - - def check_path(path): - if not isinstance(path, text_type): - path = path.decode('utf-8') - p = os.path.abspath(os.path.join(dest_dir, path)) - if not p.startswith(dest_dir) or p[plen] != os.sep: - raise ValueError('path outside destination: %r' % p) - - dest_dir = os.path.abspath(dest_dir) - plen = len(dest_dir) - archive = None - if format is None: - if archive_filename.endswith(('.zip', '.whl')): - format = 'zip' - elif archive_filename.endswith(('.tar.gz', '.tgz')): - format = 'tgz' - mode = 'r:gz' - elif archive_filename.endswith(('.tar.bz2', '.tbz')): - format = 'tbz' - mode = 'r:bz2' - elif archive_filename.endswith('.tar'): - format = 'tar' - mode = 'r' - else: # pragma: no cover - raise ValueError('Unknown format for %r' % archive_filename) - try: - if format == 'zip': - archive = ZipFile(archive_filename, 'r') - if check: - names = archive.namelist() - for name in names: - check_path(name) - else: - archive = tarfile.open(archive_filename, mode) - if check: - names = archive.getnames() - for name in names: - check_path(name) - if format != 'zip' and sys.version_info[0] < 3: - # See Python issue 17153. If the dest path contains Unicode, - # tarfile extraction fails on Python 2.x if a member path name - # contains non-ASCII characters - it leads to an implicit - # bytes -> unicode conversion using ASCII to decode. - for tarinfo in archive.getmembers(): - if not isinstance(tarinfo.name, text_type): - tarinfo.name = tarinfo.name.decode('utf-8') - archive.extractall(dest_dir) - - finally: - if archive: - archive.close() - - -def zip_dir(directory): - """zip a directory tree into a BytesIO object""" - result = io.BytesIO() - dlen = len(directory) - with ZipFile(result, "w") as zf: - for root, dirs, files in os.walk(directory): - for name in files: - full = os.path.join(root, name) - rel = root[dlen:] - dest = os.path.join(rel, name) - zf.write(full, dest) - return result - -# -# Simple progress bar -# - -UNITS = ('', 'K', 'M', 'G','T','P') - - -class Progress(object): - unknown = 'UNKNOWN' - - def __init__(self, minval=0, maxval=100): - assert maxval is None or maxval >= minval - self.min = self.cur = minval - self.max = maxval - self.started = None - self.elapsed = 0 - self.done = False - - def update(self, curval): - assert self.min <= curval - assert self.max is None or curval <= self.max - self.cur = curval - now = time.time() - if self.started is None: - self.started = now - else: - self.elapsed = now - self.started - - def increment(self, incr): - assert incr >= 0 - self.update(self.cur + incr) - - def start(self): - self.update(self.min) - return self - - def stop(self): - if self.max is not None: - self.update(self.max) - self.done = True - - @property - def maximum(self): - return self.unknown if self.max is None else self.max - - @property - def percentage(self): - if self.done: - result = '100 %' - elif self.max is None: - result = ' ?? %' - else: - v = 100.0 * (self.cur - self.min) / (self.max - self.min) - result = '%3d %%' % v - return result - - def format_duration(self, duration): - if (duration <= 0) and self.max is None or self.cur == self.min: - result = '??:??:??' - #elif duration < 1: - # result = '--:--:--' - else: - result = time.strftime('%H:%M:%S', time.gmtime(duration)) - return result - - @property - def ETA(self): - if self.done: - prefix = 'Done' - t = self.elapsed - #import pdb; pdb.set_trace() - else: - prefix = 'ETA ' - if self.max is None: - t = -1 - elif self.elapsed == 0 or (self.cur == self.min): - t = 0 - else: - #import pdb; pdb.set_trace() - t = float(self.max - self.min) - t /= self.cur - self.min - t = (t - 1) * self.elapsed - return '%s: %s' % (prefix, self.format_duration(t)) - - @property - def speed(self): - if self.elapsed == 0: - result = 0.0 - else: - result = (self.cur - self.min) / self.elapsed - for unit in UNITS: - if result < 1000: - break - result /= 1000.0 - return '%d %sB/s' % (result, unit) - -# -# Glob functionality -# - -RICH_GLOB = re.compile(r'\{([^}]*)\}') -_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') -_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') - - -def iglob(path_glob): - """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" - if _CHECK_RECURSIVE_GLOB.search(path_glob): - msg = """invalid glob %r: recursive glob "**" must be used alone""" - raise ValueError(msg % path_glob) - if _CHECK_MISMATCH_SET.search(path_glob): - msg = """invalid glob %r: mismatching set marker '{' or '}'""" - raise ValueError(msg % path_glob) - return _iglob(path_glob) - - -def _iglob(path_glob): - rich_path_glob = RICH_GLOB.split(path_glob, 1) - if len(rich_path_glob) > 1: - assert len(rich_path_glob) == 3, rich_path_glob - prefix, set, suffix = rich_path_glob - for item in set.split(','): - for path in _iglob(''.join((prefix, item, suffix))): - yield path - else: - if '**' not in path_glob: - for item in std_iglob(path_glob): - yield item - else: - prefix, radical = path_glob.split('**', 1) - if prefix == '': - prefix = '.' - if radical == '': - radical = '*' - else: - # we support both - radical = radical.lstrip('/') - radical = radical.lstrip('\\') - for path, dir, files in os.walk(prefix): - path = os.path.normpath(path) - for fn in _iglob(os.path.join(path, radical)): - yield fn - -if ssl: - from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, - CertificateError) - - -# -# HTTPSConnection which verifies certificates/matches domains -# - - class HTTPSConnection(httplib.HTTPSConnection): - ca_certs = None # set this to the path to the certs file (.pem) - check_domain = True # only used if ca_certs is not None - - # noinspection PyPropertyAccess - def connect(self): - sock = socket.create_connection((self.host, self.port), self.timeout) - if getattr(self, '_tunnel_host', False): - self.sock = sock - self._tunnel() - - if not hasattr(ssl, 'SSLContext'): - # For 2.x - if self.ca_certs: - cert_reqs = ssl.CERT_REQUIRED - else: - cert_reqs = ssl.CERT_NONE - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, - cert_reqs=cert_reqs, - ssl_version=ssl.PROTOCOL_SSLv23, - ca_certs=self.ca_certs) - else: # pragma: no cover - context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - context.options |= ssl.OP_NO_SSLv2 - if self.cert_file: - context.load_cert_chain(self.cert_file, self.key_file) - kwargs = {} - if self.ca_certs: - context.verify_mode = ssl.CERT_REQUIRED - context.load_verify_locations(cafile=self.ca_certs) - if getattr(ssl, 'HAS_SNI', False): - kwargs['server_hostname'] = self.host - self.sock = context.wrap_socket(sock, **kwargs) - if self.ca_certs and self.check_domain: - try: - match_hostname(self.sock.getpeercert(), self.host) - logger.debug('Host verified: %s', self.host) - except CertificateError: # pragma: no cover - self.sock.shutdown(socket.SHUT_RDWR) - self.sock.close() - raise - - class HTTPSHandler(BaseHTTPSHandler): - def __init__(self, ca_certs, check_domain=True): - BaseHTTPSHandler.__init__(self) - self.ca_certs = ca_certs - self.check_domain = check_domain - - def _conn_maker(self, *args, **kwargs): - """ - This is called to create a connection instance. Normally you'd - pass a connection class to do_open, but it doesn't actually check for - a class, and just expects a callable. As long as we behave just as a - constructor would have, we should be OK. If it ever changes so that - we *must* pass a class, we'll create an UnsafeHTTPSConnection class - which just sets check_domain to False in the class definition, and - choose which one to pass to do_open. - """ - result = HTTPSConnection(*args, **kwargs) - if self.ca_certs: - result.ca_certs = self.ca_certs - result.check_domain = self.check_domain - return result - - def https_open(self, req): - try: - return self.do_open(self._conn_maker, req) - except URLError as e: - if 'certificate verify failed' in str(e.reason): - raise CertificateError('Unable to verify server certificate ' - 'for %s' % req.host) - else: - raise - - # - # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- - # Middle proxy using HTTP listens on port 443, or an index mistakenly serves - # HTML containing a http://xyz link when it should be https://xyz), - # you can use the following handler class, which does not allow HTTP traffic. - # - # It works by inheriting from HTTPHandler - so build_opener won't add a - # handler for HTTP itself. - # - class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): - def http_open(self, req): - raise URLError('Unexpected HTTP request on what should be a secure ' - 'connection: %s' % req) - -# -# XML-RPC with timeouts -# - -_ver_info = sys.version_info[:2] - -if _ver_info == (2, 6): - class HTTP(httplib.HTTP): - def __init__(self, host='', port=None, **kwargs): - if port == 0: # 0 means use port 0, not the default port - port = None - self._setup(self._connection_class(host, port, **kwargs)) - - - if ssl: - class HTTPS(httplib.HTTPS): - def __init__(self, host='', port=None, **kwargs): - if port == 0: # 0 means use port 0, not the default port - port = None - self._setup(self._connection_class(host, port, **kwargs)) - - -class Transport(xmlrpclib.Transport): - def __init__(self, timeout, use_datetime=0): - self.timeout = timeout - xmlrpclib.Transport.__init__(self, use_datetime) - - def make_connection(self, host): - h, eh, x509 = self.get_host_info(host) - if _ver_info == (2, 6): - result = HTTP(h, timeout=self.timeout) - else: - if not self._connection or host != self._connection[0]: - self._extra_headers = eh - self._connection = host, httplib.HTTPConnection(h) - result = self._connection[1] - return result - -if ssl: - class SafeTransport(xmlrpclib.SafeTransport): - def __init__(self, timeout, use_datetime=0): - self.timeout = timeout - xmlrpclib.SafeTransport.__init__(self, use_datetime) - - def make_connection(self, host): - h, eh, kwargs = self.get_host_info(host) - if not kwargs: - kwargs = {} - kwargs['timeout'] = self.timeout - if _ver_info == (2, 6): - result = HTTPS(host, None, **kwargs) - else: - if not self._connection or host != self._connection[0]: - self._extra_headers = eh - self._connection = host, httplib.HTTPSConnection(h, None, - **kwargs) - result = self._connection[1] - return result - - -class ServerProxy(xmlrpclib.ServerProxy): - def __init__(self, uri, **kwargs): - self.timeout = timeout = kwargs.pop('timeout', None) - # The above classes only come into play if a timeout - # is specified - if timeout is not None: - scheme, _ = splittype(uri) - use_datetime = kwargs.get('use_datetime', 0) - if scheme == 'https': - tcls = SafeTransport - else: - tcls = Transport - kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) - self.transport = t - xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) - -# -# CSV functionality. This is provided because on 2.x, the csv module can't -# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. -# - -def _csv_open(fn, mode, **kwargs): - if sys.version_info[0] < 3: - mode += 'b' - else: - kwargs['newline'] = '' - # Python 3 determines encoding from locale. Force 'utf-8' - # file encoding to match other forced utf-8 encoding - kwargs['encoding'] = 'utf-8' - return open(fn, mode, **kwargs) - - -class CSVBase(object): - defaults = { - 'delimiter': str(','), # The strs are used because we need native - 'quotechar': str('"'), # str in the csv API (2.x won't take - 'lineterminator': str('\n') # Unicode) - } - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.stream.close() - - -class CSVReader(CSVBase): - def __init__(self, **kwargs): - if 'stream' in kwargs: - stream = kwargs['stream'] - if sys.version_info[0] >= 3: - # needs to be a text stream - stream = codecs.getreader('utf-8')(stream) - self.stream = stream - else: - self.stream = _csv_open(kwargs['path'], 'r') - self.reader = csv.reader(self.stream, **self.defaults) - - def __iter__(self): - return self - - def next(self): - result = next(self.reader) - if sys.version_info[0] < 3: - for i, item in enumerate(result): - if not isinstance(item, text_type): - result[i] = item.decode('utf-8') - return result - - __next__ = next - -class CSVWriter(CSVBase): - def __init__(self, fn, **kwargs): - self.stream = _csv_open(fn, 'w') - self.writer = csv.writer(self.stream, **self.defaults) - - def writerow(self, row): - if sys.version_info[0] < 3: - r = [] - for item in row: - if isinstance(item, text_type): - item = item.encode('utf-8') - r.append(item) - row = r - self.writer.writerow(row) - -# -# Configurator functionality -# - -class Configurator(BaseConfigurator): - - value_converters = dict(BaseConfigurator.value_converters) - value_converters['inc'] = 'inc_convert' - - def __init__(self, config, base=None): - super(Configurator, self).__init__(config) - self.base = base or os.getcwd() - - def configure_custom(self, config): - def convert(o): - if isinstance(o, (list, tuple)): - result = type(o)([convert(i) for i in o]) - elif isinstance(o, dict): - if '()' in o: - result = self.configure_custom(o) - else: - result = {} - for k in o: - result[k] = convert(o[k]) - else: - result = self.convert(o) - return result - - c = config.pop('()') - if not callable(c): - c = self.resolve(c) - props = config.pop('.', None) - # Check for valid identifiers - args = config.pop('[]', ()) - if args: - args = tuple([convert(o) for o in args]) - items = [(k, convert(config[k])) for k in config if valid_ident(k)] - kwargs = dict(items) - result = c(*args, **kwargs) - if props: - for n, v in props.items(): - setattr(result, n, convert(v)) - return result - - def __getitem__(self, key): - result = self.config[key] - if isinstance(result, dict) and '()' in result: - self.config[key] = result = self.configure_custom(result) - return result - - def inc_convert(self, value): - """Default converter for the inc:// protocol.""" - if not os.path.isabs(value): - value = os.path.join(self.base, value) - with codecs.open(value, 'r', encoding='utf-8') as f: - result = json.load(f) - return result - - -class SubprocessMixin(object): - """ - Mixin for running subprocesses and capturing their output - """ - def __init__(self, verbose=False, progress=None): - self.verbose = verbose - self.progress = progress - - def reader(self, stream, context): - """ - Read lines from a subprocess' output stream and either pass to a progress - callable (if specified) or write progress information to sys.stderr. - """ - progress = self.progress - verbose = self.verbose - while True: - s = stream.readline() - if not s: - break - if progress is not None: - progress(s, context) - else: - if not verbose: - sys.stderr.write('.') - else: - sys.stderr.write(s.decode('utf-8')) - sys.stderr.flush() - stream.close() - - def run_command(self, cmd, **kwargs): - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, **kwargs) - t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) - t1.start() - t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) - t2.start() - p.wait() - t1.join() - t2.join() - if self.progress is not None: - self.progress('done.', 'main') - elif self.verbose: - sys.stderr.write('done.\n') - return p - - -def normalize_name(name): - """Normalize a python package name a la PEP 503""" - # https://www.python.org/dev/peps/pep-0503/#normalized-names - return re.sub('[-_.]+', '-', name).lower() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/version.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/version.py deleted file mode 100644 index 3eebe18..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/version.py +++ /dev/null @@ -1,736 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2017 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -""" -Implementation of a flexible versioning scheme providing support for PEP-440, -setuptools-compatible and semantic versioning. -""" - -import logging -import re - -from .compat import string_types -from .util import parse_requirement - -__all__ = ['NormalizedVersion', 'NormalizedMatcher', - 'LegacyVersion', 'LegacyMatcher', - 'SemanticVersion', 'SemanticMatcher', - 'UnsupportedVersionError', 'get_scheme'] - -logger = logging.getLogger(__name__) - - -class UnsupportedVersionError(ValueError): - """This is an unsupported version.""" - pass - - -class Version(object): - def __init__(self, s): - self._string = s = s.strip() - self._parts = parts = self.parse(s) - assert isinstance(parts, tuple) - assert len(parts) > 0 - - def parse(self, s): - raise NotImplementedError('please implement in a subclass') - - def _check_compatible(self, other): - if type(self) != type(other): - raise TypeError('cannot compare %r and %r' % (self, other)) - - def __eq__(self, other): - self._check_compatible(other) - return self._parts == other._parts - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - self._check_compatible(other) - return self._parts < other._parts - - def __gt__(self, other): - return not (self.__lt__(other) or self.__eq__(other)) - - def __le__(self, other): - return self.__lt__(other) or self.__eq__(other) - - def __ge__(self, other): - return self.__gt__(other) or self.__eq__(other) - - # See http://docs.python.org/reference/datamodel#object.__hash__ - def __hash__(self): - return hash(self._parts) - - def __repr__(self): - return "%s('%s')" % (self.__class__.__name__, self._string) - - def __str__(self): - return self._string - - @property - def is_prerelease(self): - raise NotImplementedError('Please implement in subclasses.') - - -class Matcher(object): - version_class = None - - # value is either a callable or the name of a method - _operators = { - '<': lambda v, c, p: v < c, - '>': lambda v, c, p: v > c, - '<=': lambda v, c, p: v == c or v < c, - '>=': lambda v, c, p: v == c or v > c, - '==': lambda v, c, p: v == c, - '===': lambda v, c, p: v == c, - # by default, compatible => >=. - '~=': lambda v, c, p: v == c or v > c, - '!=': lambda v, c, p: v != c, - } - - # this is a method only to support alternative implementations - # via overriding - def parse_requirement(self, s): - return parse_requirement(s) - - def __init__(self, s): - if self.version_class is None: - raise ValueError('Please specify a version class') - self._string = s = s.strip() - r = self.parse_requirement(s) - if not r: - raise ValueError('Not valid: %r' % s) - self.name = r.name - self.key = self.name.lower() # for case-insensitive comparisons - clist = [] - if r.constraints: - # import pdb; pdb.set_trace() - for op, s in r.constraints: - if s.endswith('.*'): - if op not in ('==', '!='): - raise ValueError('\'.*\' not allowed for ' - '%r constraints' % op) - # Could be a partial version (e.g. for '2.*') which - # won't parse as a version, so keep it as a string - vn, prefix = s[:-2], True - # Just to check that vn is a valid version - self.version_class(vn) - else: - # Should parse as a version, so we can create an - # instance for the comparison - vn, prefix = self.version_class(s), False - clist.append((op, vn, prefix)) - self._parts = tuple(clist) - - def match(self, version): - """ - Check if the provided version matches the constraints. - - :param version: The version to match against this instance. - :type version: String or :class:`Version` instance. - """ - if isinstance(version, string_types): - version = self.version_class(version) - for operator, constraint, prefix in self._parts: - f = self._operators.get(operator) - if isinstance(f, string_types): - f = getattr(self, f) - if not f: - msg = ('%r not implemented ' - 'for %s' % (operator, self.__class__.__name__)) - raise NotImplementedError(msg) - if not f(version, constraint, prefix): - return False - return True - - @property - def exact_version(self): - result = None - if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): - result = self._parts[0][1] - return result - - def _check_compatible(self, other): - if type(self) != type(other) or self.name != other.name: - raise TypeError('cannot compare %s and %s' % (self, other)) - - def __eq__(self, other): - self._check_compatible(other) - return self.key == other.key and self._parts == other._parts - - def __ne__(self, other): - return not self.__eq__(other) - - # See http://docs.python.org/reference/datamodel#object.__hash__ - def __hash__(self): - return hash(self.key) + hash(self._parts) - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self._string) - - def __str__(self): - return self._string - - -PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' - r'(\.(post)(\d+))?(\.(dev)(\d+))?' - r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') - - -def _pep_440_key(s): - s = s.strip() - m = PEP440_VERSION_RE.match(s) - if not m: - raise UnsupportedVersionError('Not a valid version: %s' % s) - groups = m.groups() - nums = tuple(int(v) for v in groups[1].split('.')) - while len(nums) > 1 and nums[-1] == 0: - nums = nums[:-1] - - if not groups[0]: - epoch = 0 - else: - epoch = int(groups[0]) - pre = groups[4:6] - post = groups[7:9] - dev = groups[10:12] - local = groups[13] - if pre == (None, None): - pre = () - else: - pre = pre[0], int(pre[1]) - if post == (None, None): - post = () - else: - post = post[0], int(post[1]) - if dev == (None, None): - dev = () - else: - dev = dev[0], int(dev[1]) - if local is None: - local = () - else: - parts = [] - for part in local.split('.'): - # to ensure that numeric compares as > lexicographic, avoid - # comparing them directly, but encode a tuple which ensures - # correct sorting - if part.isdigit(): - part = (1, int(part)) - else: - part = (0, part) - parts.append(part) - local = tuple(parts) - if not pre: - # either before pre-release, or final release and after - if not post and dev: - # before pre-release - pre = ('a', -1) # to sort before a0 - else: - pre = ('z',) # to sort after all pre-releases - # now look at the state of post and dev. - if not post: - post = ('_',) # sort before 'a' - if not dev: - dev = ('final',) - - #print('%s -> %s' % (s, m.groups())) - return epoch, nums, pre, post, dev, local - - -_normalized_key = _pep_440_key - - -class NormalizedVersion(Version): - """A rational version. - - Good: - 1.2 # equivalent to "1.2.0" - 1.2.0 - 1.2a1 - 1.2.3a2 - 1.2.3b1 - 1.2.3c1 - 1.2.3.4 - TODO: fill this out - - Bad: - 1 # minimum two numbers - 1.2a # release level must have a release serial - 1.2.3b - """ - def parse(self, s): - result = _normalized_key(s) - # _normalized_key loses trailing zeroes in the release - # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 - # However, PEP 440 prefix matching needs it: for example, - # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). - m = PEP440_VERSION_RE.match(s) # must succeed - groups = m.groups() - self._release_clause = tuple(int(v) for v in groups[1].split('.')) - return result - - PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) - - @property - def is_prerelease(self): - return any(t[0] in self.PREREL_TAGS for t in self._parts if t) - - -def _match_prefix(x, y): - x = str(x) - y = str(y) - if x == y: - return True - if not x.startswith(y): - return False - n = len(y) - return x[n] == '.' - - -class NormalizedMatcher(Matcher): - version_class = NormalizedVersion - - # value is either a callable or the name of a method - _operators = { - '~=': '_match_compatible', - '<': '_match_lt', - '>': '_match_gt', - '<=': '_match_le', - '>=': '_match_ge', - '==': '_match_eq', - '===': '_match_arbitrary', - '!=': '_match_ne', - } - - def _adjust_local(self, version, constraint, prefix): - if prefix: - strip_local = '+' not in constraint and version._parts[-1] - else: - # both constraint and version are - # NormalizedVersion instances. - # If constraint does not have a local component, - # ensure the version doesn't, either. - strip_local = not constraint._parts[-1] and version._parts[-1] - if strip_local: - s = version._string.split('+', 1)[0] - version = self.version_class(s) - return version, constraint - - def _match_lt(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if version >= constraint: - return False - release_clause = constraint._release_clause - pfx = '.'.join([str(i) for i in release_clause]) - return not _match_prefix(version, pfx) - - def _match_gt(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if version <= constraint: - return False - release_clause = constraint._release_clause - pfx = '.'.join([str(i) for i in release_clause]) - return not _match_prefix(version, pfx) - - def _match_le(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - return version <= constraint - - def _match_ge(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - return version >= constraint - - def _match_eq(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if not prefix: - result = (version == constraint) - else: - result = _match_prefix(version, constraint) - return result - - def _match_arbitrary(self, version, constraint, prefix): - return str(version) == str(constraint) - - def _match_ne(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if not prefix: - result = (version != constraint) - else: - result = not _match_prefix(version, constraint) - return result - - def _match_compatible(self, version, constraint, prefix): - version, constraint = self._adjust_local(version, constraint, prefix) - if version == constraint: - return True - if version < constraint: - return False -# if not prefix: -# return True - release_clause = constraint._release_clause - if len(release_clause) > 1: - release_clause = release_clause[:-1] - pfx = '.'.join([str(i) for i in release_clause]) - return _match_prefix(version, pfx) - -_REPLACEMENTS = ( - (re.compile('[.+-]$'), ''), # remove trailing puncts - (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start - (re.compile('^[.-]'), ''), # remove leading puncts - (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses - (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) - (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) - (re.compile('[.]{2,}'), '.'), # multiple runs of '.' - (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha - (re.compile(r'\b(pre-alpha|prealpha)\b'), - 'pre.alpha'), # standardise - (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses -) - -_SUFFIX_REPLACEMENTS = ( - (re.compile('^[:~._+-]+'), ''), # remove leading puncts - (re.compile('[,*")([\\]]'), ''), # remove unwanted chars - (re.compile('[~:+_ -]'), '.'), # replace illegal chars - (re.compile('[.]{2,}'), '.'), # multiple runs of '.' - (re.compile(r'\.$'), ''), # trailing '.' -) - -_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') - - -def _suggest_semantic_version(s): - """ - Try to suggest a semantic form for a version for which - _suggest_normalized_version couldn't come up with anything. - """ - result = s.strip().lower() - for pat, repl in _REPLACEMENTS: - result = pat.sub(repl, result) - if not result: - result = '0.0.0' - - # Now look for numeric prefix, and separate it out from - # the rest. - #import pdb; pdb.set_trace() - m = _NUMERIC_PREFIX.match(result) - if not m: - prefix = '0.0.0' - suffix = result - else: - prefix = m.groups()[0].split('.') - prefix = [int(i) for i in prefix] - while len(prefix) < 3: - prefix.append(0) - if len(prefix) == 3: - suffix = result[m.end():] - else: - suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] - prefix = prefix[:3] - prefix = '.'.join([str(i) for i in prefix]) - suffix = suffix.strip() - if suffix: - #import pdb; pdb.set_trace() - # massage the suffix. - for pat, repl in _SUFFIX_REPLACEMENTS: - suffix = pat.sub(repl, suffix) - - if not suffix: - result = prefix - else: - sep = '-' if 'dev' in suffix else '+' - result = prefix + sep + suffix - if not is_semver(result): - result = None - return result - - -def _suggest_normalized_version(s): - """Suggest a normalized version close to the given version string. - - If you have a version string that isn't rational (i.e. NormalizedVersion - doesn't like it) then you might be able to get an equivalent (or close) - rational version from this function. - - This does a number of simple normalizations to the given string, based - on observation of versions currently in use on PyPI. Given a dump of - those version during PyCon 2009, 4287 of them: - - 2312 (53.93%) match NormalizedVersion without change - with the automatic suggestion - - 3474 (81.04%) match when using this suggestion method - - @param s {str} An irrational version string. - @returns A rational version string, or None, if couldn't determine one. - """ - try: - _normalized_key(s) - return s # already rational - except UnsupportedVersionError: - pass - - rs = s.lower() - - # part of this could use maketrans - for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), - ('beta', 'b'), ('rc', 'c'), ('-final', ''), - ('-pre', 'c'), - ('-release', ''), ('.release', ''), ('-stable', ''), - ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), - ('final', '')): - rs = rs.replace(orig, repl) - - # if something ends with dev or pre, we add a 0 - rs = re.sub(r"pre$", r"pre0", rs) - rs = re.sub(r"dev$", r"dev0", rs) - - # if we have something like "b-2" or "a.2" at the end of the - # version, that is probably beta, alpha, etc - # let's remove the dash or dot - rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) - - # 1.0-dev-r371 -> 1.0.dev371 - # 0.1-dev-r79 -> 0.1.dev79 - rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) - - # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 - rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) - - # Clean: v0.3, v1.0 - if rs.startswith('v'): - rs = rs[1:] - - # Clean leading '0's on numbers. - #TODO: unintended side-effect on, e.g., "2003.05.09" - # PyPI stats: 77 (~2%) better - rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) - - # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers - # zero. - # PyPI stats: 245 (7.56%) better - rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) - - # the 'dev-rNNN' tag is a dev tag - rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) - - # clean the - when used as a pre delimiter - rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) - - # a terminal "dev" or "devel" can be changed into ".dev0" - rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) - - # a terminal "dev" can be changed into ".dev0" - rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) - - # a terminal "final" or "stable" can be removed - rs = re.sub(r"(final|stable)$", "", rs) - - # The 'r' and the '-' tags are post release tags - # 0.4a1.r10 -> 0.4a1.post10 - # 0.9.33-17222 -> 0.9.33.post17222 - # 0.9.33-r17222 -> 0.9.33.post17222 - rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) - - # Clean 'r' instead of 'dev' usage: - # 0.9.33+r17222 -> 0.9.33.dev17222 - # 1.0dev123 -> 1.0.dev123 - # 1.0.git123 -> 1.0.dev123 - # 1.0.bzr123 -> 1.0.dev123 - # 0.1a0dev.123 -> 0.1a0.dev123 - # PyPI stats: ~150 (~4%) better - rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) - - # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: - # 0.2.pre1 -> 0.2c1 - # 0.2-c1 -> 0.2c1 - # 1.0preview123 -> 1.0c123 - # PyPI stats: ~21 (0.62%) better - rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) - - # Tcl/Tk uses "px" for their post release markers - rs = re.sub(r"p(\d+)$", r".post\1", rs) - - try: - _normalized_key(rs) - except UnsupportedVersionError: - rs = None - return rs - -# -# Legacy version processing (distribute-compatible) -# - -_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) -_VERSION_REPLACE = { - 'pre': 'c', - 'preview': 'c', - '-': 'final-', - 'rc': 'c', - 'dev': '@', - '': None, - '.': None, -} - - -def _legacy_key(s): - def get_parts(s): - result = [] - for p in _VERSION_PART.split(s.lower()): - p = _VERSION_REPLACE.get(p, p) - if p: - if '0' <= p[:1] <= '9': - p = p.zfill(8) - else: - p = '*' + p - result.append(p) - result.append('*final') - return result - - result = [] - for p in get_parts(s): - if p.startswith('*'): - if p < '*final': - while result and result[-1] == '*final-': - result.pop() - while result and result[-1] == '00000000': - result.pop() - result.append(p) - return tuple(result) - - -class LegacyVersion(Version): - def parse(self, s): - return _legacy_key(s) - - @property - def is_prerelease(self): - result = False - for x in self._parts: - if (isinstance(x, string_types) and x.startswith('*') and - x < '*final'): - result = True - break - return result - - -class LegacyMatcher(Matcher): - version_class = LegacyVersion - - _operators = dict(Matcher._operators) - _operators['~='] = '_match_compatible' - - numeric_re = re.compile(r'^(\d+(\.\d+)*)') - - def _match_compatible(self, version, constraint, prefix): - if version < constraint: - return False - m = self.numeric_re.match(str(constraint)) - if not m: - logger.warning('Cannot compute compatible match for version %s ' - ' and constraint %s', version, constraint) - return True - s = m.groups()[0] - if '.' in s: - s = s.rsplit('.', 1)[0] - return _match_prefix(version, s) - -# -# Semantic versioning -# - -_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' - r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' - r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) - - -def is_semver(s): - return _SEMVER_RE.match(s) - - -def _semantic_key(s): - def make_tuple(s, absent): - if s is None: - result = (absent,) - else: - parts = s[1:].split('.') - # We can't compare ints and strings on Python 3, so fudge it - # by zero-filling numeric values so simulate a numeric comparison - result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) - return result - - m = is_semver(s) - if not m: - raise UnsupportedVersionError(s) - groups = m.groups() - major, minor, patch = [int(i) for i in groups[:3]] - # choose the '|' and '*' so that versions sort correctly - pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') - return (major, minor, patch), pre, build - - -class SemanticVersion(Version): - def parse(self, s): - return _semantic_key(s) - - @property - def is_prerelease(self): - return self._parts[1][0] != '|' - - -class SemanticMatcher(Matcher): - version_class = SemanticVersion - - -class VersionScheme(object): - def __init__(self, key, matcher, suggester=None): - self.key = key - self.matcher = matcher - self.suggester = suggester - - def is_valid_version(self, s): - try: - self.matcher.version_class(s) - result = True - except UnsupportedVersionError: - result = False - return result - - def is_valid_matcher(self, s): - try: - self.matcher(s) - result = True - except UnsupportedVersionError: - result = False - return result - - def is_valid_constraint_list(self, s): - """ - Used for processing some metadata fields - """ - return self.is_valid_matcher('dummy_name (%s)' % s) - - def suggest(self, s): - if self.suggester is None: - result = None - else: - result = self.suggester(s) - return result - -_SCHEMES = { - 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, - _suggest_normalized_version), - 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), - 'semantic': VersionScheme(_semantic_key, SemanticMatcher, - _suggest_semantic_version), -} - -_SCHEMES['default'] = _SCHEMES['normalized'] - - -def get_scheme(name): - if name not in _SCHEMES: - raise ValueError('unknown scheme name: %r' % name) - return _SCHEMES[name] diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/w32.exe b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/w32.exe deleted file mode 100644 index 732215a..0000000 Binary files a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/w32.exe and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/w64.exe b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/w64.exe deleted file mode 100644 index c41bd0a..0000000 Binary files a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/w64.exe and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/wheel.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/wheel.py deleted file mode 100644 index b04bfae..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/wheel.py +++ /dev/null @@ -1,988 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013-2017 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -from __future__ import unicode_literals - -import base64 -import codecs -import datetime -import distutils.util -from email import message_from_file -import hashlib -import imp -import json -import logging -import os -import posixpath -import re -import shutil -import sys -import tempfile -import zipfile - -from . import __version__, DistlibException -from .compat import sysconfig, ZipFile, fsdecode, text_type, filter -from .database import InstalledDistribution -from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME -from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, - cached_property, get_cache_base, read_exports, tempdir) -from .version import NormalizedVersion, UnsupportedVersionError - -logger = logging.getLogger(__name__) - -cache = None # created when needed - -if hasattr(sys, 'pypy_version_info'): # pragma: no cover - IMP_PREFIX = 'pp' -elif sys.platform.startswith('java'): # pragma: no cover - IMP_PREFIX = 'jy' -elif sys.platform == 'cli': # pragma: no cover - IMP_PREFIX = 'ip' -else: - IMP_PREFIX = 'cp' - -VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') -if not VER_SUFFIX: # pragma: no cover - VER_SUFFIX = '%s%s' % sys.version_info[:2] -PYVER = 'py' + VER_SUFFIX -IMPVER = IMP_PREFIX + VER_SUFFIX - -ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') - -ABI = sysconfig.get_config_var('SOABI') -if ABI and ABI.startswith('cpython-'): - ABI = ABI.replace('cpython-', 'cp') -else: - def _derive_abi(): - parts = ['cp', VER_SUFFIX] - if sysconfig.get_config_var('Py_DEBUG'): - parts.append('d') - if sysconfig.get_config_var('WITH_PYMALLOC'): - parts.append('m') - if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: - parts.append('u') - return ''.join(parts) - ABI = _derive_abi() - del _derive_abi - -FILENAME_RE = re.compile(r''' -(?P<nm>[^-]+) --(?P<vn>\d+[^-]*) -(-(?P<bn>\d+[^-]*))? --(?P<py>\w+\d+(\.\w+\d+)*) --(?P<bi>\w+) --(?P<ar>\w+(\.\w+)*) -\.whl$ -''', re.IGNORECASE | re.VERBOSE) - -NAME_VERSION_RE = re.compile(r''' -(?P<nm>[^-]+) --(?P<vn>\d+[^-]*) -(-(?P<bn>\d+[^-]*))?$ -''', re.IGNORECASE | re.VERBOSE) - -SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') -SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') -SHEBANG_PYTHON = b'#!python' -SHEBANG_PYTHONW = b'#!pythonw' - -if os.sep == '/': - to_posix = lambda o: o -else: - to_posix = lambda o: o.replace(os.sep, '/') - - -class Mounter(object): - def __init__(self): - self.impure_wheels = {} - self.libs = {} - - def add(self, pathname, extensions): - self.impure_wheels[pathname] = extensions - self.libs.update(extensions) - - def remove(self, pathname): - extensions = self.impure_wheels.pop(pathname) - for k, v in extensions: - if k in self.libs: - del self.libs[k] - - def find_module(self, fullname, path=None): - if fullname in self.libs: - result = self - else: - result = None - return result - - def load_module(self, fullname): - if fullname in sys.modules: - result = sys.modules[fullname] - else: - if fullname not in self.libs: - raise ImportError('unable to find extension for %s' % fullname) - result = imp.load_dynamic(fullname, self.libs[fullname]) - result.__loader__ = self - parts = fullname.rsplit('.', 1) - if len(parts) > 1: - result.__package__ = parts[0] - return result - -_hook = Mounter() - - -class Wheel(object): - """ - Class to build and install from Wheel files (PEP 427). - """ - - wheel_version = (1, 1) - hash_kind = 'sha256' - - def __init__(self, filename=None, sign=False, verify=False): - """ - Initialise an instance using a (valid) filename. - """ - self.sign = sign - self.should_verify = verify - self.buildver = '' - self.pyver = [PYVER] - self.abi = ['none'] - self.arch = ['any'] - self.dirname = os.getcwd() - if filename is None: - self.name = 'dummy' - self.version = '0.1' - self._filename = self.filename - else: - m = NAME_VERSION_RE.match(filename) - if m: - info = m.groupdict('') - self.name = info['nm'] - # Reinstate the local version separator - self.version = info['vn'].replace('_', '-') - self.buildver = info['bn'] - self._filename = self.filename - else: - dirname, filename = os.path.split(filename) - m = FILENAME_RE.match(filename) - if not m: - raise DistlibException('Invalid name or ' - 'filename: %r' % filename) - if dirname: - self.dirname = os.path.abspath(dirname) - self._filename = filename - info = m.groupdict('') - self.name = info['nm'] - self.version = info['vn'] - self.buildver = info['bn'] - self.pyver = info['py'].split('.') - self.abi = info['bi'].split('.') - self.arch = info['ar'].split('.') - - @property - def filename(self): - """ - Build and return a filename from the various components. - """ - if self.buildver: - buildver = '-' + self.buildver - else: - buildver = '' - pyver = '.'.join(self.pyver) - abi = '.'.join(self.abi) - arch = '.'.join(self.arch) - # replace - with _ as a local version separator - version = self.version.replace('-', '_') - return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, - pyver, abi, arch) - - @property - def exists(self): - path = os.path.join(self.dirname, self.filename) - return os.path.isfile(path) - - @property - def tags(self): - for pyver in self.pyver: - for abi in self.abi: - for arch in self.arch: - yield pyver, abi, arch - - @cached_property - def metadata(self): - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - wrapper = codecs.getreader('utf-8') - with ZipFile(pathname, 'r') as zf: - wheel_metadata = self.get_wheel_metadata(zf) - wv = wheel_metadata['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) - if file_version < (1, 1): - fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, 'METADATA'] - else: - fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] - result = None - for fn in fns: - try: - metadata_filename = posixpath.join(info_dir, fn) - with zf.open(metadata_filename) as bf: - wf = wrapper(bf) - result = Metadata(fileobj=wf) - if result: - break - except KeyError: - pass - if not result: - raise ValueError('Invalid wheel, because metadata is ' - 'missing: looked in %s' % ', '.join(fns)) - return result - - def get_wheel_metadata(self, zf): - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - metadata_filename = posixpath.join(info_dir, 'WHEEL') - with zf.open(metadata_filename) as bf: - wf = codecs.getreader('utf-8')(bf) - message = message_from_file(wf) - return dict(message) - - @cached_property - def info(self): - pathname = os.path.join(self.dirname, self.filename) - with ZipFile(pathname, 'r') as zf: - result = self.get_wheel_metadata(zf) - return result - - def process_shebang(self, data): - m = SHEBANG_RE.match(data) - if m: - end = m.end() - shebang, data_after_shebang = data[:end], data[end:] - # Preserve any arguments after the interpreter - if b'pythonw' in shebang.lower(): - shebang_python = SHEBANG_PYTHONW - else: - shebang_python = SHEBANG_PYTHON - m = SHEBANG_DETAIL_RE.match(shebang) - if m: - args = b' ' + m.groups()[-1] - else: - args = b'' - shebang = shebang_python + args - data = shebang + data_after_shebang - else: - cr = data.find(b'\r') - lf = data.find(b'\n') - if cr < 0 or cr > lf: - term = b'\n' - else: - if data[cr:cr + 2] == b'\r\n': - term = b'\r\n' - else: - term = b'\r' - data = SHEBANG_PYTHON + term + data - return data - - def get_hash(self, data, hash_kind=None): - if hash_kind is None: - hash_kind = self.hash_kind - try: - hasher = getattr(hashlib, hash_kind) - except AttributeError: - raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) - result = hasher(data).digest() - result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') - return hash_kind, result - - def write_record(self, records, record_path, base): - records = list(records) # make a copy for sorting - p = to_posix(os.path.relpath(record_path, base)) - records.append((p, '', '')) - records.sort() - with CSVWriter(record_path) as writer: - for row in records: - writer.writerow(row) - - def write_records(self, info, libdir, archive_paths): - records = [] - distinfo, info_dir = info - hasher = getattr(hashlib, self.hash_kind) - for ap, p in archive_paths: - with open(p, 'rb') as f: - data = f.read() - digest = '%s=%s' % self.get_hash(data) - size = os.path.getsize(p) - records.append((ap, digest, size)) - - p = os.path.join(distinfo, 'RECORD') - self.write_record(records, p, libdir) - ap = to_posix(os.path.join(info_dir, 'RECORD')) - archive_paths.append((ap, p)) - - def build_zip(self, pathname, archive_paths): - with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: - for ap, p in archive_paths: - logger.debug('Wrote %s to %s in wheel', p, ap) - zf.write(p, ap) - - def build(self, paths, tags=None, wheel_version=None): - """ - Build a wheel from files in specified paths, and use any specified tags - when determining the name of the wheel. - """ - if tags is None: - tags = {} - - libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] - if libkey == 'platlib': - is_pure = 'false' - default_pyver = [IMPVER] - default_abi = [ABI] - default_arch = [ARCH] - else: - is_pure = 'true' - default_pyver = [PYVER] - default_abi = ['none'] - default_arch = ['any'] - - self.pyver = tags.get('pyver', default_pyver) - self.abi = tags.get('abi', default_abi) - self.arch = tags.get('arch', default_arch) - - libdir = paths[libkey] - - name_ver = '%s-%s' % (self.name, self.version) - data_dir = '%s.data' % name_ver - info_dir = '%s.dist-info' % name_ver - - archive_paths = [] - - # First, stuff which is not in site-packages - for key in ('data', 'headers', 'scripts'): - if key not in paths: - continue - path = paths[key] - if os.path.isdir(path): - for root, dirs, files in os.walk(path): - for fn in files: - p = fsdecode(os.path.join(root, fn)) - rp = os.path.relpath(p, path) - ap = to_posix(os.path.join(data_dir, key, rp)) - archive_paths.append((ap, p)) - if key == 'scripts' and not p.endswith('.exe'): - with open(p, 'rb') as f: - data = f.read() - data = self.process_shebang(data) - with open(p, 'wb') as f: - f.write(data) - - # Now, stuff which is in site-packages, other than the - # distinfo stuff. - path = libdir - distinfo = None - for root, dirs, files in os.walk(path): - if root == path: - # At the top level only, save distinfo for later - # and skip it for now - for i, dn in enumerate(dirs): - dn = fsdecode(dn) - if dn.endswith('.dist-info'): - distinfo = os.path.join(root, dn) - del dirs[i] - break - assert distinfo, '.dist-info directory expected, not found' - - for fn in files: - # comment out next suite to leave .pyc files in - if fsdecode(fn).endswith(('.pyc', '.pyo')): - continue - p = os.path.join(root, fn) - rp = to_posix(os.path.relpath(p, path)) - archive_paths.append((rp, p)) - - # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. - files = os.listdir(distinfo) - for fn in files: - if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): - p = fsdecode(os.path.join(distinfo, fn)) - ap = to_posix(os.path.join(info_dir, fn)) - archive_paths.append((ap, p)) - - wheel_metadata = [ - 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), - 'Generator: distlib %s' % __version__, - 'Root-Is-Purelib: %s' % is_pure, - ] - for pyver, abi, arch in self.tags: - wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) - p = os.path.join(distinfo, 'WHEEL') - with open(p, 'w') as f: - f.write('\n'.join(wheel_metadata)) - ap = to_posix(os.path.join(info_dir, 'WHEEL')) - archive_paths.append((ap, p)) - - # Now, at last, RECORD. - # Paths in here are archive paths - nothing else makes sense. - self.write_records((distinfo, info_dir), libdir, archive_paths) - # Now, ready to build the zip file - pathname = os.path.join(self.dirname, self.filename) - self.build_zip(pathname, archive_paths) - return pathname - - def install(self, paths, maker, **kwargs): - """ - Install a wheel to the specified paths. If kwarg ``warner`` is - specified, it should be a callable, which will be called with two - tuples indicating the wheel version of this software and the wheel - version in the file, if there is a discrepancy in the versions. - This can be used to issue any warnings to raise any exceptions. - If kwarg ``lib_only`` is True, only the purelib/platlib files are - installed, and the headers, scripts, data and dist-info metadata are - not written. If kwarg ``bytecode_hashed_invalidation`` is True, written - bytecode will try to use file-hash based invalidation (PEP-552) on - supported interpreter versions (CPython 2.7+). - - The return value is a :class:`InstalledDistribution` instance unless - ``options.lib_only`` is True, in which case the return value is ``None``. - """ - - dry_run = maker.dry_run - warner = kwargs.get('warner') - lib_only = kwargs.get('lib_only', False) - bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False) - - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - data_dir = '%s.data' % name_ver - info_dir = '%s.dist-info' % name_ver - - metadata_name = posixpath.join(info_dir, METADATA_FILENAME) - wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') - record_name = posixpath.join(info_dir, 'RECORD') - - wrapper = codecs.getreader('utf-8') - - with ZipFile(pathname, 'r') as zf: - with zf.open(wheel_metadata_name) as bwf: - wf = wrapper(bwf) - message = message_from_file(wf) - wv = message['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) - if (file_version != self.wheel_version) and warner: - warner(self.wheel_version, file_version) - - if message['Root-Is-Purelib'] == 'true': - libdir = paths['purelib'] - else: - libdir = paths['platlib'] - - records = {} - with zf.open(record_name) as bf: - with CSVReader(stream=bf) as reader: - for row in reader: - p = row[0] - records[p] = row - - data_pfx = posixpath.join(data_dir, '') - info_pfx = posixpath.join(info_dir, '') - script_pfx = posixpath.join(data_dir, 'scripts', '') - - # make a new instance rather than a copy of maker's, - # as we mutate it - fileop = FileOperator(dry_run=dry_run) - fileop.record = True # so we can rollback if needed - - bc = not sys.dont_write_bytecode # Double negatives. Lovely! - - outfiles = [] # for RECORD writing - - # for script copying/shebang processing - workdir = tempfile.mkdtemp() - # set target dir later - # we default add_launchers to False, as the - # Python Launcher should be used instead - maker.source_dir = workdir - maker.target_dir = None - try: - for zinfo in zf.infolist(): - arcname = zinfo.filename - if isinstance(arcname, text_type): - u_arcname = arcname - else: - u_arcname = arcname.decode('utf-8') - # The signature file won't be in RECORD, - # and we don't currently don't do anything with it - if u_arcname.endswith('/RECORD.jws'): - continue - row = records[u_arcname] - if row[2] and str(zinfo.file_size) != row[2]: - raise DistlibException('size mismatch for ' - '%s' % u_arcname) - if row[1]: - kind, value = row[1].split('=', 1) - with zf.open(arcname) as bf: - data = bf.read() - _, digest = self.get_hash(data, kind) - if digest != value: - raise DistlibException('digest mismatch for ' - '%s' % arcname) - - if lib_only and u_arcname.startswith((info_pfx, data_pfx)): - logger.debug('lib_only: skipping %s', u_arcname) - continue - is_script = (u_arcname.startswith(script_pfx) - and not u_arcname.endswith('.exe')) - - if u_arcname.startswith(data_pfx): - _, where, rp = u_arcname.split('/', 2) - outfile = os.path.join(paths[where], convert_path(rp)) - else: - # meant for site-packages. - if u_arcname in (wheel_metadata_name, record_name): - continue - outfile = os.path.join(libdir, convert_path(u_arcname)) - if not is_script: - with zf.open(arcname) as bf: - fileop.copy_stream(bf, outfile) - outfiles.append(outfile) - # Double check the digest of the written file - if not dry_run and row[1]: - with open(outfile, 'rb') as bf: - data = bf.read() - _, newdigest = self.get_hash(data, kind) - if newdigest != digest: - raise DistlibException('digest mismatch ' - 'on write for ' - '%s' % outfile) - if bc and outfile.endswith('.py'): - try: - pyc = fileop.byte_compile(outfile, - hashed_invalidation=bc_hashed_invalidation) - outfiles.append(pyc) - except Exception: - # Don't give up if byte-compilation fails, - # but log it and perhaps warn the user - logger.warning('Byte-compilation failed', - exc_info=True) - else: - fn = os.path.basename(convert_path(arcname)) - workname = os.path.join(workdir, fn) - with zf.open(arcname) as bf: - fileop.copy_stream(bf, workname) - - dn, fn = os.path.split(outfile) - maker.target_dir = dn - filenames = maker.make(fn) - fileop.set_executable_mode(filenames) - outfiles.extend(filenames) - - if lib_only: - logger.debug('lib_only: returning None') - dist = None - else: - # Generate scripts - - # Try to get pydist.json so we can see if there are - # any commands to generate. If this fails (e.g. because - # of a legacy wheel), log a warning but don't give up. - commands = None - file_version = self.info['Wheel-Version'] - if file_version == '1.0': - # Use legacy info - ep = posixpath.join(info_dir, 'entry_points.txt') - try: - with zf.open(ep) as bwf: - epdata = read_exports(bwf) - commands = {} - for key in ('console', 'gui'): - k = '%s_scripts' % key - if k in epdata: - commands['wrap_%s' % key] = d = {} - for v in epdata[k].values(): - s = '%s:%s' % (v.prefix, v.suffix) - if v.flags: - s += ' %s' % v.flags - d[v.name] = s - except Exception: - logger.warning('Unable to read legacy script ' - 'metadata, so cannot generate ' - 'scripts') - else: - try: - with zf.open(metadata_name) as bwf: - wf = wrapper(bwf) - commands = json.load(wf).get('extensions') - if commands: - commands = commands.get('python.commands') - except Exception: - logger.warning('Unable to read JSON metadata, so ' - 'cannot generate scripts') - if commands: - console_scripts = commands.get('wrap_console', {}) - gui_scripts = commands.get('wrap_gui', {}) - if console_scripts or gui_scripts: - script_dir = paths.get('scripts', '') - if not os.path.isdir(script_dir): - raise ValueError('Valid script path not ' - 'specified') - maker.target_dir = script_dir - for k, v in console_scripts.items(): - script = '%s = %s' % (k, v) - filenames = maker.make(script) - fileop.set_executable_mode(filenames) - - if gui_scripts: - options = {'gui': True } - for k, v in gui_scripts.items(): - script = '%s = %s' % (k, v) - filenames = maker.make(script, options) - fileop.set_executable_mode(filenames) - - p = os.path.join(libdir, info_dir) - dist = InstalledDistribution(p) - - # Write SHARED - paths = dict(paths) # don't change passed in dict - del paths['purelib'] - del paths['platlib'] - paths['lib'] = libdir - p = dist.write_shared_locations(paths, dry_run) - if p: - outfiles.append(p) - - # Write RECORD - dist.write_installed_files(outfiles, paths['prefix'], - dry_run) - return dist - except Exception: # pragma: no cover - logger.exception('installation failed.') - fileop.rollback() - raise - finally: - shutil.rmtree(workdir) - - def _get_dylib_cache(self): - global cache - if cache is None: - # Use native string to avoid issues on 2.x: see Python #20140. - base = os.path.join(get_cache_base(), str('dylib-cache'), - sys.version[:3]) - cache = Cache(base) - return cache - - def _get_extensions(self): - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - arcname = posixpath.join(info_dir, 'EXTENSIONS') - wrapper = codecs.getreader('utf-8') - result = [] - with ZipFile(pathname, 'r') as zf: - try: - with zf.open(arcname) as bf: - wf = wrapper(bf) - extensions = json.load(wf) - cache = self._get_dylib_cache() - prefix = cache.prefix_to_dir(pathname) - cache_base = os.path.join(cache.base, prefix) - if not os.path.isdir(cache_base): - os.makedirs(cache_base) - for name, relpath in extensions.items(): - dest = os.path.join(cache_base, convert_path(relpath)) - if not os.path.exists(dest): - extract = True - else: - file_time = os.stat(dest).st_mtime - file_time = datetime.datetime.fromtimestamp(file_time) - info = zf.getinfo(relpath) - wheel_time = datetime.datetime(*info.date_time) - extract = wheel_time > file_time - if extract: - zf.extract(relpath, cache_base) - result.append((name, dest)) - except KeyError: - pass - return result - - def is_compatible(self): - """ - Determine if a wheel is compatible with the running system. - """ - return is_compatible(self) - - def is_mountable(self): - """ - Determine if a wheel is asserted as mountable by its metadata. - """ - return True # for now - metadata details TBD - - def mount(self, append=False): - pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) - if not self.is_compatible(): - msg = 'Wheel %s not compatible with this Python.' % pathname - raise DistlibException(msg) - if not self.is_mountable(): - msg = 'Wheel %s is marked as not mountable.' % pathname - raise DistlibException(msg) - if pathname in sys.path: - logger.debug('%s already in path', pathname) - else: - if append: - sys.path.append(pathname) - else: - sys.path.insert(0, pathname) - extensions = self._get_extensions() - if extensions: - if _hook not in sys.meta_path: - sys.meta_path.append(_hook) - _hook.add(pathname, extensions) - - def unmount(self): - pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) - if pathname not in sys.path: - logger.debug('%s not in path', pathname) - else: - sys.path.remove(pathname) - if pathname in _hook.impure_wheels: - _hook.remove(pathname) - if not _hook.impure_wheels: - if _hook in sys.meta_path: - sys.meta_path.remove(_hook) - - def verify(self): - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - data_dir = '%s.data' % name_ver - info_dir = '%s.dist-info' % name_ver - - metadata_name = posixpath.join(info_dir, METADATA_FILENAME) - wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') - record_name = posixpath.join(info_dir, 'RECORD') - - wrapper = codecs.getreader('utf-8') - - with ZipFile(pathname, 'r') as zf: - with zf.open(wheel_metadata_name) as bwf: - wf = wrapper(bwf) - message = message_from_file(wf) - wv = message['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) - # TODO version verification - - records = {} - with zf.open(record_name) as bf: - with CSVReader(stream=bf) as reader: - for row in reader: - p = row[0] - records[p] = row - - for zinfo in zf.infolist(): - arcname = zinfo.filename - if isinstance(arcname, text_type): - u_arcname = arcname - else: - u_arcname = arcname.decode('utf-8') - if '..' in u_arcname: - raise DistlibException('invalid entry in ' - 'wheel: %r' % u_arcname) - - # The signature file won't be in RECORD, - # and we don't currently don't do anything with it - if u_arcname.endswith('/RECORD.jws'): - continue - row = records[u_arcname] - if row[2] and str(zinfo.file_size) != row[2]: - raise DistlibException('size mismatch for ' - '%s' % u_arcname) - if row[1]: - kind, value = row[1].split('=', 1) - with zf.open(arcname) as bf: - data = bf.read() - _, digest = self.get_hash(data, kind) - if digest != value: - raise DistlibException('digest mismatch for ' - '%s' % arcname) - - def update(self, modifier, dest_dir=None, **kwargs): - """ - Update the contents of a wheel in a generic way. The modifier should - be a callable which expects a dictionary argument: its keys are - archive-entry paths, and its values are absolute filesystem paths - where the contents the corresponding archive entries can be found. The - modifier is free to change the contents of the files pointed to, add - new entries and remove entries, before returning. This method will - extract the entire contents of the wheel to a temporary location, call - the modifier, and then use the passed (and possibly updated) - dictionary to write a new wheel. If ``dest_dir`` is specified, the new - wheel is written there -- otherwise, the original wheel is overwritten. - - The modifier should return True if it updated the wheel, else False. - This method returns the same value the modifier returns. - """ - - def get_version(path_map, info_dir): - version = path = None - key = '%s/%s' % (info_dir, METADATA_FILENAME) - if key not in path_map: - key = '%s/PKG-INFO' % info_dir - if key in path_map: - path = path_map[key] - version = Metadata(path=path).version - return version, path - - def update_version(version, path): - updated = None - try: - v = NormalizedVersion(version) - i = version.find('-') - if i < 0: - updated = '%s+1' % version - else: - parts = [int(s) for s in version[i + 1:].split('.')] - parts[-1] += 1 - updated = '%s+%s' % (version[:i], - '.'.join(str(i) for i in parts)) - except UnsupportedVersionError: - logger.debug('Cannot update non-compliant (PEP-440) ' - 'version %r', version) - if updated: - md = Metadata(path=path) - md.version = updated - legacy = not path.endswith(METADATA_FILENAME) - md.write(path=path, legacy=legacy) - logger.debug('Version updated from %r to %r', version, - updated) - - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - record_name = posixpath.join(info_dir, 'RECORD') - with tempdir() as workdir: - with ZipFile(pathname, 'r') as zf: - path_map = {} - for zinfo in zf.infolist(): - arcname = zinfo.filename - if isinstance(arcname, text_type): - u_arcname = arcname - else: - u_arcname = arcname.decode('utf-8') - if u_arcname == record_name: - continue - if '..' in u_arcname: - raise DistlibException('invalid entry in ' - 'wheel: %r' % u_arcname) - zf.extract(zinfo, workdir) - path = os.path.join(workdir, convert_path(u_arcname)) - path_map[u_arcname] = path - - # Remember the version. - original_version, _ = get_version(path_map, info_dir) - # Files extracted. Call the modifier. - modified = modifier(path_map, **kwargs) - if modified: - # Something changed - need to build a new wheel. - current_version, path = get_version(path_map, info_dir) - if current_version and (current_version == original_version): - # Add or update local version to signify changes. - update_version(current_version, path) - # Decide where the new wheel goes. - if dest_dir is None: - fd, newpath = tempfile.mkstemp(suffix='.whl', - prefix='wheel-update-', - dir=workdir) - os.close(fd) - else: - if not os.path.isdir(dest_dir): - raise DistlibException('Not a directory: %r' % dest_dir) - newpath = os.path.join(dest_dir, self.filename) - archive_paths = list(path_map.items()) - distinfo = os.path.join(workdir, info_dir) - info = distinfo, info_dir - self.write_records(info, workdir, archive_paths) - self.build_zip(newpath, archive_paths) - if dest_dir is None: - shutil.copyfile(newpath, pathname) - return modified - -def compatible_tags(): - """ - Return (pyver, abi, arch) tuples compatible with this Python. - """ - versions = [VER_SUFFIX] - major = VER_SUFFIX[0] - for minor in range(sys.version_info[1] - 1, - 1, -1): - versions.append(''.join([major, str(minor)])) - - abis = [] - for suffix, _, _ in imp.get_suffixes(): - if suffix.startswith('.abi'): - abis.append(suffix.split('.', 2)[1]) - abis.sort() - if ABI != 'none': - abis.insert(0, ABI) - abis.append('none') - result = [] - - arches = [ARCH] - if sys.platform == 'darwin': - m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) - if m: - name, major, minor, arch = m.groups() - minor = int(minor) - matches = [arch] - if arch in ('i386', 'ppc'): - matches.append('fat') - if arch in ('i386', 'ppc', 'x86_64'): - matches.append('fat3') - if arch in ('ppc64', 'x86_64'): - matches.append('fat64') - if arch in ('i386', 'x86_64'): - matches.append('intel') - if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): - matches.append('universal') - while minor >= 0: - for match in matches: - s = '%s_%s_%s_%s' % (name, major, minor, match) - if s != ARCH: # already there - arches.append(s) - minor -= 1 - - # Most specific - our Python version, ABI and arch - for abi in abis: - for arch in arches: - result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) - - # where no ABI / arch dependency, but IMP_PREFIX dependency - for i, version in enumerate(versions): - result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) - if i == 0: - result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) - - # no IMP_PREFIX, ABI or arch dependency - for i, version in enumerate(versions): - result.append((''.join(('py', version)), 'none', 'any')) - if i == 0: - result.append((''.join(('py', version[0])), 'none', 'any')) - return set(result) - - -COMPATIBLE_TAGS = compatible_tags() - -del compatible_tags - - -def is_compatible(wheel, tags=None): - if not isinstance(wheel, Wheel): - wheel = Wheel(wheel) # assume it's a filename - result = False - if tags is None: - tags = COMPATIBLE_TAGS - for ver, abi, arch in tags: - if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: - result = True - break - return result diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distro.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distro.py deleted file mode 100644 index aa4defc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distro.py +++ /dev/null @@ -1,1197 +0,0 @@ -# Copyright 2015,2016,2017 Nir Cohen -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The ``distro`` package (``distro`` stands for Linux Distribution) provides -information about the Linux distribution it runs on, such as a reliable -machine-readable distro ID, or version information. - -It is a renewed alternative implementation for Python's original -:py:func:`platform.linux_distribution` function, but it provides much more -functionality. An alternative implementation became necessary because Python -3.5 deprecated this function, and Python 3.7 is expected to remove it -altogether. Its predecessor function :py:func:`platform.dist` was already -deprecated since Python 2.6 and is also expected to be removed in Python 3.7. -Still, there are many cases in which access to OS distribution information -is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for -more information. -""" - -import os -import re -import sys -import json -import shlex -import logging -import argparse -import subprocess - - -_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc') -_OS_RELEASE_BASENAME = 'os-release' - -#: Translation table for normalizing the "ID" attribute defined in os-release -#: files, for use by the :func:`distro.id` method. -#: -#: * Key: Value as defined in the os-release file, translated to lower case, -#: with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_OS_ID = {} - -#: Translation table for normalizing the "Distributor ID" attribute returned by -#: the lsb_release command, for use by the :func:`distro.id` method. -#: -#: * Key: Value as returned by the lsb_release command, translated to lower -#: case, with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_LSB_ID = { - 'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux - 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation - 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server -} - -#: Translation table for normalizing the distro ID derived from the file name -#: of distro release files, for use by the :func:`distro.id` method. -#: -#: * Key: Value as derived from the file name of a distro release file, -#: translated to lower case, with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_DISTRO_ID = { - 'redhat': 'rhel', # RHEL 6.x, 7.x -} - -# Pattern for content of distro release file (reversed) -_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( - r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)') - -# Pattern for base file name of distro release file -_DISTRO_RELEASE_BASENAME_PATTERN = re.compile( - r'(\w+)[-_](release|version)$') - -# Base file names to be ignored when searching for distro release file -_DISTRO_RELEASE_IGNORE_BASENAMES = ( - 'debian_version', - 'lsb-release', - 'oem-release', - _OS_RELEASE_BASENAME, - 'system-release' -) - - -def linux_distribution(full_distribution_name=True): - """ - Return information about the current OS distribution as a tuple - ``(id_name, version, codename)`` with items as follows: - - * ``id_name``: If *full_distribution_name* is false, the result of - :func:`distro.id`. Otherwise, the result of :func:`distro.name`. - - * ``version``: The result of :func:`distro.version`. - - * ``codename``: The result of :func:`distro.codename`. - - The interface of this function is compatible with the original - :py:func:`platform.linux_distribution` function, supporting a subset of - its parameters. - - The data it returns may not exactly be the same, because it uses more data - sources than the original function, and that may lead to different data if - the OS distribution is not consistent across multiple data sources it - provides (there are indeed such distributions ...). - - Another reason for differences is the fact that the :func:`distro.id` - method normalizes the distro ID string to a reliable machine-readable value - for a number of popular OS distributions. - """ - return _distro.linux_distribution(full_distribution_name) - - -def id(): - """ - Return the distro ID of the current distribution, as a - machine-readable string. - - For a number of OS distributions, the returned distro ID value is - *reliable*, in the sense that it is documented and that it does not change - across releases of the distribution. - - This package maintains the following reliable distro ID values: - - ============== ========================================= - Distro ID Distribution - ============== ========================================= - "ubuntu" Ubuntu - "debian" Debian - "rhel" RedHat Enterprise Linux - "centos" CentOS - "fedora" Fedora - "sles" SUSE Linux Enterprise Server - "opensuse" openSUSE - "amazon" Amazon Linux - "arch" Arch Linux - "cloudlinux" CloudLinux OS - "exherbo" Exherbo Linux - "gentoo" GenToo Linux - "ibm_powerkvm" IBM PowerKVM - "kvmibm" KVM for IBM z Systems - "linuxmint" Linux Mint - "mageia" Mageia - "mandriva" Mandriva Linux - "parallels" Parallels - "pidora" Pidora - "raspbian" Raspbian - "oracle" Oracle Linux (and Oracle Enterprise Linux) - "scientific" Scientific Linux - "slackware" Slackware - "xenserver" XenServer - "openbsd" OpenBSD - "netbsd" NetBSD - "freebsd" FreeBSD - ============== ========================================= - - If you have a need to get distros for reliable IDs added into this set, - or if you find that the :func:`distro.id` function returns a different - distro ID for one of the listed distros, please create an issue in the - `distro issue tracker`_. - - **Lookup hierarchy and transformations:** - - First, the ID is obtained from the following sources, in the specified - order. The first available and non-empty value is used: - - * the value of the "ID" attribute of the os-release file, - - * the value of the "Distributor ID" attribute returned by the lsb_release - command, - - * the first part of the file name of the distro release file, - - The so determined ID value then passes the following transformations, - before it is returned by this method: - - * it is translated to lower case, - - * blanks (which should not be there anyway) are translated to underscores, - - * a normalization of the ID is performed, based upon - `normalization tables`_. The purpose of this normalization is to ensure - that the ID is as reliable as possible, even across incompatible changes - in the OS distributions. A common reason for an incompatible change is - the addition of an os-release file, or the addition of the lsb_release - command, with ID values that differ from what was previously determined - from the distro release file name. - """ - return _distro.id() - - -def name(pretty=False): - """ - Return the name of the current OS distribution, as a human-readable - string. - - If *pretty* is false, the name is returned without version or codename. - (e.g. "CentOS Linux") - - If *pretty* is true, the version and codename are appended. - (e.g. "CentOS Linux 7.1.1503 (Core)") - - **Lookup hierarchy:** - - The name is obtained from the following sources, in the specified order. - The first available and non-empty value is used: - - * If *pretty* is false: - - - the value of the "NAME" attribute of the os-release file, - - - the value of the "Distributor ID" attribute returned by the lsb_release - command, - - - the value of the "<name>" field of the distro release file. - - * If *pretty* is true: - - - the value of the "PRETTY_NAME" attribute of the os-release file, - - - the value of the "Description" attribute returned by the lsb_release - command, - - - the value of the "<name>" field of the distro release file, appended - with the value of the pretty version ("<version_id>" and "<codename>" - fields) of the distro release file, if available. - """ - return _distro.name(pretty) - - -def version(pretty=False, best=False): - """ - Return the version of the current OS distribution, as a human-readable - string. - - If *pretty* is false, the version is returned without codename (e.g. - "7.0"). - - If *pretty* is true, the codename in parenthesis is appended, if the - codename is non-empty (e.g. "7.0 (Maipo)"). - - Some distributions provide version numbers with different precisions in - the different sources of distribution information. Examining the different - sources in a fixed priority order does not always yield the most precise - version (e.g. for Debian 8.2, or CentOS 7.1). - - The *best* parameter can be used to control the approach for the returned - version: - - If *best* is false, the first non-empty version number in priority order of - the examined sources is returned. - - If *best* is true, the most precise version number out of all examined - sources is returned. - - **Lookup hierarchy:** - - In all cases, the version number is obtained from the following sources. - If *best* is false, this order represents the priority order: - - * the value of the "VERSION_ID" attribute of the os-release file, - * the value of the "Release" attribute returned by the lsb_release - command, - * the version number parsed from the "<version_id>" field of the first line - of the distro release file, - * the version number parsed from the "PRETTY_NAME" attribute of the - os-release file, if it follows the format of the distro release files. - * the version number parsed from the "Description" attribute returned by - the lsb_release command, if it follows the format of the distro release - files. - """ - return _distro.version(pretty, best) - - -def version_parts(best=False): - """ - Return the version of the current OS distribution as a tuple - ``(major, minor, build_number)`` with items as follows: - - * ``major``: The result of :func:`distro.major_version`. - - * ``minor``: The result of :func:`distro.minor_version`. - - * ``build_number``: The result of :func:`distro.build_number`. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.version_parts(best) - - -def major_version(best=False): - """ - Return the major version of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The major version is the first - part of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.major_version(best) - - -def minor_version(best=False): - """ - Return the minor version of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The minor version is the second - part of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.minor_version(best) - - -def build_number(best=False): - """ - Return the build number of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The build number is the third part - of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.build_number(best) - - -def like(): - """ - Return a space-separated list of distro IDs of distributions that are - closely related to the current OS distribution in regards to packaging - and programming interfaces, for example distributions the current - distribution is a derivative from. - - **Lookup hierarchy:** - - This information item is only provided by the os-release file. - For details, see the description of the "ID_LIKE" attribute in the - `os-release man page - <http://www.freedesktop.org/software/systemd/man/os-release.html>`_. - """ - return _distro.like() - - -def codename(): - """ - Return the codename for the release of the current OS distribution, - as a string. - - If the distribution does not have a codename, an empty string is returned. - - Note that the returned codename is not always really a codename. For - example, openSUSE returns "x86_64". This function does not handle such - cases in any special way and just returns the string it finds, if any. - - **Lookup hierarchy:** - - * the codename within the "VERSION" attribute of the os-release file, if - provided, - - * the value of the "Codename" attribute returned by the lsb_release - command, - - * the value of the "<codename>" field of the distro release file. - """ - return _distro.codename() - - -def info(pretty=False, best=False): - """ - Return certain machine-readable information items about the current OS - distribution in a dictionary, as shown in the following example: - - .. sourcecode:: python - - { - 'id': 'rhel', - 'version': '7.0', - 'version_parts': { - 'major': '7', - 'minor': '0', - 'build_number': '' - }, - 'like': 'fedora', - 'codename': 'Maipo' - } - - The dictionary structure and keys are always the same, regardless of which - information items are available in the underlying data sources. The values - for the various keys are as follows: - - * ``id``: The result of :func:`distro.id`. - - * ``version``: The result of :func:`distro.version`. - - * ``version_parts -> major``: The result of :func:`distro.major_version`. - - * ``version_parts -> minor``: The result of :func:`distro.minor_version`. - - * ``version_parts -> build_number``: The result of - :func:`distro.build_number`. - - * ``like``: The result of :func:`distro.like`. - - * ``codename``: The result of :func:`distro.codename`. - - For a description of the *pretty* and *best* parameters, see the - :func:`distro.version` method. - """ - return _distro.info(pretty, best) - - -def os_release_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the os-release file data source of the current OS distribution. - - See `os-release file`_ for details about these information items. - """ - return _distro.os_release_info() - - -def lsb_release_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the lsb_release command data source of the current OS distribution. - - See `lsb_release command output`_ for details about these information - items. - """ - return _distro.lsb_release_info() - - -def distro_release_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the distro release file data source of the current OS distribution. - - See `distro release file`_ for details about these information items. - """ - return _distro.distro_release_info() - - -def uname_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the distro release file data source of the current OS distribution. - """ - return _distro.uname_info() - - -def os_release_attr(attribute): - """ - Return a single named information item from the os-release file data source - of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `os-release file`_ for details about these information items. - """ - return _distro.os_release_attr(attribute) - - -def lsb_release_attr(attribute): - """ - Return a single named information item from the lsb_release command output - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `lsb_release command output`_ for details about these information - items. - """ - return _distro.lsb_release_attr(attribute) - - -def distro_release_attr(attribute): - """ - Return a single named information item from the distro release file - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `distro release file`_ for details about these information items. - """ - return _distro.distro_release_attr(attribute) - - -def uname_attr(attribute): - """ - Return a single named information item from the distro release file - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - """ - return _distro.uname_attr(attribute) - - -class cached_property(object): - """A version of @property which caches the value. On access, it calls the - underlying function and sets the value in `__dict__` so future accesses - will not re-call the property. - """ - def __init__(self, f): - self._fname = f.__name__ - self._f = f - - def __get__(self, obj, owner): - assert obj is not None, 'call {} on an instance'.format(self._fname) - ret = obj.__dict__[self._fname] = self._f(obj) - return ret - - -class LinuxDistribution(object): - """ - Provides information about a OS distribution. - - This package creates a private module-global instance of this class with - default initialization arguments, that is used by the - `consolidated accessor functions`_ and `single source accessor functions`_. - By using default initialization arguments, that module-global instance - returns data about the current OS distribution (i.e. the distro this - package runs on). - - Normally, it is not necessary to create additional instances of this class. - However, in situations where control is needed over the exact data sources - that are used, instances of this class can be created with a specific - distro release file, or a specific os-release file, or without invoking the - lsb_release command. - """ - - def __init__(self, - include_lsb=True, - os_release_file='', - distro_release_file='', - include_uname=True): - """ - The initialization method of this class gathers information from the - available data sources, and stores that in private instance attributes. - Subsequent access to the information items uses these private instance - attributes, so that the data sources are read only once. - - Parameters: - - * ``include_lsb`` (bool): Controls whether the - `lsb_release command output`_ is included as a data source. - - If the lsb_release command is not available in the program execution - path, the data source for the lsb_release command will be empty. - - * ``os_release_file`` (string): The path name of the - `os-release file`_ that is to be used as a data source. - - An empty string (the default) will cause the default path name to - be used (see `os-release file`_ for details). - - If the specified or defaulted os-release file does not exist, the - data source for the os-release file will be empty. - - * ``distro_release_file`` (string): The path name of the - `distro release file`_ that is to be used as a data source. - - An empty string (the default) will cause a default search algorithm - to be used (see `distro release file`_ for details). - - If the specified distro release file does not exist, or if no default - distro release file can be found, the data source for the distro - release file will be empty. - - * ``include_name`` (bool): Controls whether uname command output is - included as a data source. If the uname command is not available in - the program execution path the data source for the uname command will - be empty. - - Public instance attributes: - - * ``os_release_file`` (string): The path name of the - `os-release file`_ that is actually used as a data source. The - empty string if no distro release file is used as a data source. - - * ``distro_release_file`` (string): The path name of the - `distro release file`_ that is actually used as a data source. The - empty string if no distro release file is used as a data source. - - * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. - This controls whether the lsb information will be loaded. - - * ``include_uname`` (bool): The result of the ``include_uname`` - parameter. This controls whether the uname information will - be loaded. - - Raises: - - * :py:exc:`IOError`: Some I/O issue with an os-release file or distro - release file. - - * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had - some issue (other than not being available in the program execution - path). - - * :py:exc:`UnicodeError`: A data source has unexpected characters or - uses an unexpected encoding. - """ - self.os_release_file = os_release_file or \ - os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME) - self.distro_release_file = distro_release_file or '' # updated later - self.include_lsb = include_lsb - self.include_uname = include_uname - - def __repr__(self): - """Return repr of all info - """ - return \ - "LinuxDistribution(" \ - "os_release_file={self.os_release_file!r}, " \ - "distro_release_file={self.distro_release_file!r}, " \ - "include_lsb={self.include_lsb!r}, " \ - "include_uname={self.include_uname!r}, " \ - "_os_release_info={self._os_release_info!r}, " \ - "_lsb_release_info={self._lsb_release_info!r}, " \ - "_distro_release_info={self._distro_release_info!r}, " \ - "_uname_info={self._uname_info!r})".format( - self=self) - - def linux_distribution(self, full_distribution_name=True): - """ - Return information about the OS distribution that is compatible - with Python's :func:`platform.linux_distribution`, supporting a subset - of its parameters. - - For details, see :func:`distro.linux_distribution`. - """ - return ( - self.name() if full_distribution_name else self.id(), - self.version(), - self.codename() - ) - - def id(self): - """Return the distro ID of the OS distribution, as a string. - - For details, see :func:`distro.id`. - """ - def normalize(distro_id, table): - distro_id = distro_id.lower().replace(' ', '_') - return table.get(distro_id, distro_id) - - distro_id = self.os_release_attr('id') - if distro_id: - return normalize(distro_id, NORMALIZED_OS_ID) - - distro_id = self.lsb_release_attr('distributor_id') - if distro_id: - return normalize(distro_id, NORMALIZED_LSB_ID) - - distro_id = self.distro_release_attr('id') - if distro_id: - return normalize(distro_id, NORMALIZED_DISTRO_ID) - - distro_id = self.uname_attr('id') - if distro_id: - return normalize(distro_id, NORMALIZED_DISTRO_ID) - - return '' - - def name(self, pretty=False): - """ - Return the name of the OS distribution, as a string. - - For details, see :func:`distro.name`. - """ - name = self.os_release_attr('name') \ - or self.lsb_release_attr('distributor_id') \ - or self.distro_release_attr('name') \ - or self.uname_attr('name') - if pretty: - name = self.os_release_attr('pretty_name') \ - or self.lsb_release_attr('description') - if not name: - name = self.distro_release_attr('name') \ - or self.uname_attr('name') - version = self.version(pretty=True) - if version: - name = name + ' ' + version - return name or '' - - def version(self, pretty=False, best=False): - """ - Return the version of the OS distribution, as a string. - - For details, see :func:`distro.version`. - """ - versions = [ - self.os_release_attr('version_id'), - self.lsb_release_attr('release'), - self.distro_release_attr('version_id'), - self._parse_distro_release_content( - self.os_release_attr('pretty_name')).get('version_id', ''), - self._parse_distro_release_content( - self.lsb_release_attr('description')).get('version_id', ''), - self.uname_attr('release') - ] - version = '' - if best: - # This algorithm uses the last version in priority order that has - # the best precision. If the versions are not in conflict, that - # does not matter; otherwise, using the last one instead of the - # first one might be considered a surprise. - for v in versions: - if v.count(".") > version.count(".") or version == '': - version = v - else: - for v in versions: - if v != '': - version = v - break - if pretty and version and self.codename(): - version = u'{0} ({1})'.format(version, self.codename()) - return version - - def version_parts(self, best=False): - """ - Return the version of the OS distribution, as a tuple of version - numbers. - - For details, see :func:`distro.version_parts`. - """ - version_str = self.version(best=best) - if version_str: - version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?') - matches = version_regex.match(version_str) - if matches: - major, minor, build_number = matches.groups() - return major, minor or '', build_number or '' - return '', '', '' - - def major_version(self, best=False): - """ - Return the major version number of the current distribution. - - For details, see :func:`distro.major_version`. - """ - return self.version_parts(best)[0] - - def minor_version(self, best=False): - """ - Return the minor version number of the current distribution. - - For details, see :func:`distro.minor_version`. - """ - return self.version_parts(best)[1] - - def build_number(self, best=False): - """ - Return the build number of the current distribution. - - For details, see :func:`distro.build_number`. - """ - return self.version_parts(best)[2] - - def like(self): - """ - Return the IDs of distributions that are like the OS distribution. - - For details, see :func:`distro.like`. - """ - return self.os_release_attr('id_like') or '' - - def codename(self): - """ - Return the codename of the OS distribution. - - For details, see :func:`distro.codename`. - """ - return self.os_release_attr('codename') \ - or self.lsb_release_attr('codename') \ - or self.distro_release_attr('codename') \ - or '' - - def info(self, pretty=False, best=False): - """ - Return certain machine-readable information about the OS - distribution. - - For details, see :func:`distro.info`. - """ - return dict( - id=self.id(), - version=self.version(pretty, best), - version_parts=dict( - major=self.major_version(best), - minor=self.minor_version(best), - build_number=self.build_number(best) - ), - like=self.like(), - codename=self.codename(), - ) - - def os_release_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the os-release file data source of the OS distribution. - - For details, see :func:`distro.os_release_info`. - """ - return self._os_release_info - - def lsb_release_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the lsb_release command data source of the OS - distribution. - - For details, see :func:`distro.lsb_release_info`. - """ - return self._lsb_release_info - - def distro_release_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the distro release file data source of the OS - distribution. - - For details, see :func:`distro.distro_release_info`. - """ - return self._distro_release_info - - def uname_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the uname command data source of the OS distribution. - - For details, see :func:`distro.uname_info`. - """ - - def os_release_attr(self, attribute): - """ - Return a single named information item from the os-release file data - source of the OS distribution. - - For details, see :func:`distro.os_release_attr`. - """ - return self._os_release_info.get(attribute, '') - - def lsb_release_attr(self, attribute): - """ - Return a single named information item from the lsb_release command - output data source of the OS distribution. - - For details, see :func:`distro.lsb_release_attr`. - """ - return self._lsb_release_info.get(attribute, '') - - def distro_release_attr(self, attribute): - """ - Return a single named information item from the distro release file - data source of the OS distribution. - - For details, see :func:`distro.distro_release_attr`. - """ - return self._distro_release_info.get(attribute, '') - - def uname_attr(self, attribute): - """ - Return a single named information item from the uname command - output data source of the OS distribution. - - For details, see :func:`distro.uname_release_attr`. - """ - return self._uname_info.get(attribute, '') - - @cached_property - def _os_release_info(self): - """ - Get the information items from the specified os-release file. - - Returns: - A dictionary containing all information items. - """ - if os.path.isfile(self.os_release_file): - with open(self.os_release_file) as release_file: - return self._parse_os_release_content(release_file) - return {} - - @staticmethod - def _parse_os_release_content(lines): - """ - Parse the lines of an os-release file. - - Parameters: - - * lines: Iterable through the lines in the os-release file. - Each line must be a unicode string or a UTF-8 encoded byte - string. - - Returns: - A dictionary containing all information items. - """ - props = {} - lexer = shlex.shlex(lines, posix=True) - lexer.whitespace_split = True - - # The shlex module defines its `wordchars` variable using literals, - # making it dependent on the encoding of the Python source file. - # In Python 2.6 and 2.7, the shlex source file is encoded in - # 'iso-8859-1', and the `wordchars` variable is defined as a byte - # string. This causes a UnicodeDecodeError to be raised when the - # parsed content is a unicode object. The following fix resolves that - # (... but it should be fixed in shlex...): - if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): - lexer.wordchars = lexer.wordchars.decode('iso-8859-1') - - tokens = list(lexer) - for token in tokens: - # At this point, all shell-like parsing has been done (i.e. - # comments processed, quotes and backslash escape sequences - # processed, multi-line values assembled, trailing newlines - # stripped, etc.), so the tokens are now either: - # * variable assignments: var=value - # * commands or their arguments (not allowed in os-release) - if '=' in token: - k, v = token.split('=', 1) - if isinstance(v, bytes): - v = v.decode('utf-8') - props[k.lower()] = v - if k == 'VERSION': - # this handles cases in which the codename is in - # the `(CODENAME)` (rhel, centos, fedora) format - # or in the `, CODENAME` format (Ubuntu). - codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v) - if codename: - codename = codename.group() - codename = codename.strip('()') - codename = codename.strip(',') - codename = codename.strip() - # codename appears within paranthese. - props['codename'] = codename - else: - props['codename'] = '' - else: - # Ignore any tokens that are not variable assignments - pass - return props - - @cached_property - def _lsb_release_info(self): - """ - Get the information items from the lsb_release command output. - - Returns: - A dictionary containing all information items. - """ - if not self.include_lsb: - return {} - with open(os.devnull, 'w') as devnull: - try: - cmd = ('lsb_release', '-a') - stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: # Command not found - return {} - content = stdout.decode(sys.getfilesystemencoding()).splitlines() - return self._parse_lsb_release_content(content) - - @staticmethod - def _parse_lsb_release_content(lines): - """ - Parse the output of the lsb_release command. - - Parameters: - - * lines: Iterable through the lines of the lsb_release output. - Each line must be a unicode string or a UTF-8 encoded byte - string. - - Returns: - A dictionary containing all information items. - """ - props = {} - for line in lines: - kv = line.strip('\n').split(':', 1) - if len(kv) != 2: - # Ignore lines without colon. - continue - k, v = kv - props.update({k.replace(' ', '_').lower(): v.strip()}) - return props - - @cached_property - def _uname_info(self): - with open(os.devnull, 'w') as devnull: - try: - cmd = ('uname', '-rs') - stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: - return {} - content = stdout.decode(sys.getfilesystemencoding()).splitlines() - return self._parse_uname_content(content) - - @staticmethod - def _parse_uname_content(lines): - props = {} - match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip()) - if match: - name, version = match.groups() - - # This is to prevent the Linux kernel version from - # appearing as the 'best' version on otherwise - # identifiable distributions. - if name == 'Linux': - return {} - props['id'] = name.lower() - props['name'] = name - props['release'] = version - return props - - @cached_property - def _distro_release_info(self): - """ - Get the information items from the specified distro release file. - - Returns: - A dictionary containing all information items. - """ - if self.distro_release_file: - # If it was specified, we use it and parse what we can, even if - # its file name or content does not match the expected pattern. - distro_info = self._parse_distro_release_file( - self.distro_release_file) - basename = os.path.basename(self.distro_release_file) - # The file name pattern for user-specified distro release files - # is somewhat more tolerant (compared to when searching for the - # file), because we want to use what was specified as best as - # possible. - match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if match: - distro_info['id'] = match.group(1) - return distro_info - else: - try: - basenames = os.listdir(_UNIXCONFDIR) - # We sort for repeatability in cases where there are multiple - # distro specific files; e.g. CentOS, Oracle, Enterprise all - # containing `redhat-release` on top of their own. - basenames.sort() - except OSError: - # This may occur when /etc is not readable but we can't be - # sure about the *-release files. Check common entries of - # /etc for information. If they turn out to not be there the - # error is handled in `_parse_distro_release_file()`. - basenames = ['SuSE-release', - 'arch-release', - 'base-release', - 'centos-release', - 'fedora-release', - 'gentoo-release', - 'mageia-release', - 'mandrake-release', - 'mandriva-release', - 'mandrivalinux-release', - 'manjaro-release', - 'oracle-release', - 'redhat-release', - 'sl-release', - 'slackware-version'] - for basename in basenames: - if basename in _DISTRO_RELEASE_IGNORE_BASENAMES: - continue - match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if match: - filepath = os.path.join(_UNIXCONFDIR, basename) - distro_info = self._parse_distro_release_file(filepath) - if 'name' in distro_info: - # The name is always present if the pattern matches - self.distro_release_file = filepath - distro_info['id'] = match.group(1) - return distro_info - return {} - - def _parse_distro_release_file(self, filepath): - """ - Parse a distro release file. - - Parameters: - - * filepath: Path name of the distro release file. - - Returns: - A dictionary containing all information items. - """ - try: - with open(filepath) as fp: - # Only parse the first line. For instance, on SLES there - # are multiple lines. We don't want them... - return self._parse_distro_release_content(fp.readline()) - except (OSError, IOError): - # Ignore not being able to read a specific, seemingly version - # related file. - # See https://github.com/nir0s/distro/issues/162 - return {} - - @staticmethod - def _parse_distro_release_content(line): - """ - Parse a line from a distro release file. - - Parameters: - * line: Line from the distro release file. Must be a unicode string - or a UTF-8 encoded byte string. - - Returns: - A dictionary containing all information items. - """ - if isinstance(line, bytes): - line = line.decode('utf-8') - matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match( - line.strip()[::-1]) - distro_info = {} - if matches: - # regexp ensures non-None - distro_info['name'] = matches.group(3)[::-1] - if matches.group(2): - distro_info['version_id'] = matches.group(2)[::-1] - if matches.group(1): - distro_info['codename'] = matches.group(1)[::-1] - elif line: - distro_info['name'] = line.strip() - return distro_info - - -_distro = LinuxDistribution() - - -def main(): - logger = logging.getLogger(__name__) - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(sys.stdout)) - - parser = argparse.ArgumentParser(description="OS distro info tool") - parser.add_argument( - '--json', - '-j', - help="Output in machine readable format", - action="store_true") - args = parser.parse_args() - - if args.json: - logger.info(json.dumps(info(), indent=4, sort_keys=True)) - else: - logger.info('Name: %s', name(pretty=True)) - distribution_version = version(pretty=True) - logger.info('Version: %s', distribution_version) - distribution_codename = codename() - logger.info('Codename: %s', distribution_codename) - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/__init__.py deleted file mode 100644 index 0491234..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -HTML parsing library based on the `WHATWG HTML specification -<https://whatwg.org/html>`_. The parser is designed to be compatible with -existing HTML found in the wild and implements well-defined error recovery that -is largely compatible with modern desktop web browsers. - -Example usage:: - - from pip._vendor import html5lib - with open("my_document.html", "rb") as f: - tree = html5lib.parse(f) - -For convenience, this module re-exports the following names: - -* :func:`~.html5parser.parse` -* :func:`~.html5parser.parseFragment` -* :class:`~.html5parser.HTMLParser` -* :func:`~.treebuilders.getTreeBuilder` -* :func:`~.treewalkers.getTreeWalker` -* :func:`~.serializer.serialize` -""" - -from __future__ import absolute_import, division, unicode_literals - -from .html5parser import HTMLParser, parse, parseFragment -from .treebuilders import getTreeBuilder -from .treewalkers import getTreeWalker -from .serializer import serialize - -__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", - "getTreeWalker", "serialize"] - -# this has to be at the top level, see how setup.py parses this -#: Distribution version number. -__version__ = "1.0.1" diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_ihatexml.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_ihatexml.py deleted file mode 100644 index 4c77717..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_ihatexml.py +++ /dev/null @@ -1,288 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -import re -import warnings - -from .constants import DataLossWarning - -baseChar = """ -[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | -[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | -[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | -[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | -[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | -[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | -[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | -[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | -[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | -[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | -[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | -[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | -[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | -[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | -[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | -[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | -[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | -[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | -[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | -[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | -[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | -[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | -[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | -[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | -[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | -[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | -[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | -[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | -[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | -[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | -#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | -#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | -#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | -[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | -[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | -#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | -[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | -[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | -[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | -[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | -[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | -#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | -[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | -[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | -[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | -[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]""" - -ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]""" - -combiningCharacter = """ -[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | -[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | -[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | -[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | -#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | -[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | -[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | -#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | -[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | -[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | -#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | -[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | -[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | -[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | -[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | -[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | -#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | -[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | -#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | -[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | -[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | -#x3099 | #x309A""" - -digit = """ -[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | -[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | -[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | -[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]""" - -extender = """ -#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | -#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]""" - -letter = " | ".join([baseChar, ideographic]) - -# Without the -name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter, - extender]) -nameFirst = " | ".join([letter, "_"]) - -reChar = re.compile(r"#x([\d|A-F]{4,4})") -reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]") - - -def charStringToList(chars): - charRanges = [item.strip() for item in chars.split(" | ")] - rv = [] - for item in charRanges: - foundMatch = False - for regexp in (reChar, reCharRange): - match = regexp.match(item) - if match is not None: - rv.append([hexToInt(item) for item in match.groups()]) - if len(rv[-1]) == 1: - rv[-1] = rv[-1] * 2 - foundMatch = True - break - if not foundMatch: - assert len(item) == 1 - - rv.append([ord(item)] * 2) - rv = normaliseCharList(rv) - return rv - - -def normaliseCharList(charList): - charList = sorted(charList) - for item in charList: - assert item[1] >= item[0] - rv = [] - i = 0 - while i < len(charList): - j = 1 - rv.append(charList[i]) - while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1: - rv[-1][1] = charList[i + j][1] - j += 1 - i += j - return rv - -# We don't really support characters above the BMP :( -max_unicode = int("FFFF", 16) - - -def missingRanges(charList): - rv = [] - if charList[0] != 0: - rv.append([0, charList[0][0] - 1]) - for i, item in enumerate(charList[:-1]): - rv.append([item[1] + 1, charList[i + 1][0] - 1]) - if charList[-1][1] != max_unicode: - rv.append([charList[-1][1] + 1, max_unicode]) - return rv - - -def listToRegexpStr(charList): - rv = [] - for item in charList: - if item[0] == item[1]: - rv.append(escapeRegexp(chr(item[0]))) - else: - rv.append(escapeRegexp(chr(item[0])) + "-" + - escapeRegexp(chr(item[1]))) - return "[%s]" % "".join(rv) - - -def hexToInt(hex_str): - return int(hex_str, 16) - - -def escapeRegexp(string): - specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}", - "[", "]", "|", "(", ")", "-") - for char in specialCharacters: - string = string.replace(char, "\\" + char) - - return string - -# output from the above -nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa - -nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa - -# Simpler things -nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]") - - -class InfosetFilter(object): - replacementRegexp = re.compile(r"U[\dA-F]{5,5}") - - def __init__(self, - dropXmlnsLocalName=False, - dropXmlnsAttrNs=False, - preventDoubleDashComments=False, - preventDashAtCommentEnd=False, - replaceFormFeedCharacters=True, - preventSingleQuotePubid=False): - - self.dropXmlnsLocalName = dropXmlnsLocalName - self.dropXmlnsAttrNs = dropXmlnsAttrNs - - self.preventDoubleDashComments = preventDoubleDashComments - self.preventDashAtCommentEnd = preventDashAtCommentEnd - - self.replaceFormFeedCharacters = replaceFormFeedCharacters - - self.preventSingleQuotePubid = preventSingleQuotePubid - - self.replaceCache = {} - - def coerceAttribute(self, name, namespace=None): - if self.dropXmlnsLocalName and name.startswith("xmlns:"): - warnings.warn("Attributes cannot begin with xmlns", DataLossWarning) - return None - elif (self.dropXmlnsAttrNs and - namespace == "http://www.w3.org/2000/xmlns/"): - warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning) - return None - else: - return self.toXmlName(name) - - def coerceElement(self, name): - return self.toXmlName(name) - - def coerceComment(self, data): - if self.preventDoubleDashComments: - while "--" in data: - warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) - data = data.replace("--", "- -") - if data.endswith("-"): - warnings.warn("Comments cannot end in a dash", DataLossWarning) - data += " " - return data - - def coerceCharacters(self, data): - if self.replaceFormFeedCharacters: - for _ in range(data.count("\x0C")): - warnings.warn("Text cannot contain U+000C", DataLossWarning) - data = data.replace("\x0C", " ") - # Other non-xml characters - return data - - def coercePubid(self, data): - dataOutput = data - for char in nonPubidCharRegexp.findall(data): - warnings.warn("Coercing non-XML pubid", DataLossWarning) - replacement = self.getReplacementCharacter(char) - dataOutput = dataOutput.replace(char, replacement) - if self.preventSingleQuotePubid and dataOutput.find("'") >= 0: - warnings.warn("Pubid cannot contain single quote", DataLossWarning) - dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'")) - return dataOutput - - def toXmlName(self, name): - nameFirst = name[0] - nameRest = name[1:] - m = nonXmlNameFirstBMPRegexp.match(nameFirst) - if m: - warnings.warn("Coercing non-XML name", DataLossWarning) - nameFirstOutput = self.getReplacementCharacter(nameFirst) - else: - nameFirstOutput = nameFirst - - nameRestOutput = nameRest - replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest)) - for char in replaceChars: - warnings.warn("Coercing non-XML name", DataLossWarning) - replacement = self.getReplacementCharacter(char) - nameRestOutput = nameRestOutput.replace(char, replacement) - return nameFirstOutput + nameRestOutput - - def getReplacementCharacter(self, char): - if char in self.replaceCache: - replacement = self.replaceCache[char] - else: - replacement = self.escapeChar(char) - return replacement - - def fromXmlName(self, name): - for item in set(self.replacementRegexp.findall(name)): - name = name.replace(item, self.unescapeChar(item)) - return name - - def escapeChar(self, char): - replacement = "U%05X" % ord(char) - self.replaceCache[char] = replacement - return replacement - - def unescapeChar(self, charcode): - return chr(int(charcode[1:], 16)) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_inputstream.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_inputstream.py deleted file mode 100644 index a65e55f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_inputstream.py +++ /dev/null @@ -1,923 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from pip._vendor.six import text_type, binary_type -from pip._vendor.six.moves import http_client, urllib - -import codecs -import re - -from pip._vendor import webencodings - -from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase -from .constants import _ReparseException -from . import _utils - -from io import StringIO - -try: - from io import BytesIO -except ImportError: - BytesIO = StringIO - -# Non-unicode versions of constants for use in the pre-parser -spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters]) -asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters]) -asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase]) -spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"]) - - -invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa - -if _utils.supports_lone_surrogates: - # Use one extra step of indirection and create surrogates with - # eval. Not using this indirection would introduce an illegal - # unicode literal on platforms not supporting such lone - # surrogates. - assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1 - invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] + - eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used - "]") -else: - invalid_unicode_re = re.compile(invalid_unicode_no_surrogate) - -non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, - 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, - 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, - 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, - 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, - 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, - 0x10FFFE, 0x10FFFF]) - -ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]") - -# Cache for charsUntil() -charsUntilRegEx = {} - - -class BufferedStream(object): - """Buffering for streams that do not have buffering of their own - - The buffer is implemented as a list of chunks on the assumption that - joining many strings will be slow since it is O(n**2) - """ - - def __init__(self, stream): - self.stream = stream - self.buffer = [] - self.position = [-1, 0] # chunk number, offset - - def tell(self): - pos = 0 - for chunk in self.buffer[:self.position[0]]: - pos += len(chunk) - pos += self.position[1] - return pos - - def seek(self, pos): - assert pos <= self._bufferedBytes() - offset = pos - i = 0 - while len(self.buffer[i]) < offset: - offset -= len(self.buffer[i]) - i += 1 - self.position = [i, offset] - - def read(self, bytes): - if not self.buffer: - return self._readStream(bytes) - elif (self.position[0] == len(self.buffer) and - self.position[1] == len(self.buffer[-1])): - return self._readStream(bytes) - else: - return self._readFromBuffer(bytes) - - def _bufferedBytes(self): - return sum([len(item) for item in self.buffer]) - - def _readStream(self, bytes): - data = self.stream.read(bytes) - self.buffer.append(data) - self.position[0] += 1 - self.position[1] = len(data) - return data - - def _readFromBuffer(self, bytes): - remainingBytes = bytes - rv = [] - bufferIndex = self.position[0] - bufferOffset = self.position[1] - while bufferIndex < len(self.buffer) and remainingBytes != 0: - assert remainingBytes > 0 - bufferedData = self.buffer[bufferIndex] - - if remainingBytes <= len(bufferedData) - bufferOffset: - bytesToRead = remainingBytes - self.position = [bufferIndex, bufferOffset + bytesToRead] - else: - bytesToRead = len(bufferedData) - bufferOffset - self.position = [bufferIndex, len(bufferedData)] - bufferIndex += 1 - rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead]) - remainingBytes -= bytesToRead - - bufferOffset = 0 - - if remainingBytes: - rv.append(self._readStream(remainingBytes)) - - return b"".join(rv) - - -def HTMLInputStream(source, **kwargs): - # Work around Python bug #20007: read(0) closes the connection. - # http://bugs.python.org/issue20007 - if (isinstance(source, http_client.HTTPResponse) or - # Also check for addinfourl wrapping HTTPResponse - (isinstance(source, urllib.response.addbase) and - isinstance(source.fp, http_client.HTTPResponse))): - isUnicode = False - elif hasattr(source, "read"): - isUnicode = isinstance(source.read(0), text_type) - else: - isUnicode = isinstance(source, text_type) - - if isUnicode: - encodings = [x for x in kwargs if x.endswith("_encoding")] - if encodings: - raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings) - - return HTMLUnicodeInputStream(source, **kwargs) - else: - return HTMLBinaryInputStream(source, **kwargs) - - -class HTMLUnicodeInputStream(object): - """Provides a unicode stream of characters to the HTMLTokenizer. - - This class takes care of character encoding and removing or replacing - incorrect byte-sequences and also provides column and line tracking. - - """ - - _defaultChunkSize = 10240 - - def __init__(self, source): - """Initialises the HTMLInputStream. - - HTMLInputStream(source, [encoding]) -> Normalized stream from source - for use by html5lib. - - source can be either a file-object, local filename or a string. - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) - - """ - - if not _utils.supports_lone_surrogates: - # Such platforms will have already checked for such - # surrogate errors, so no need to do this checking. - self.reportCharacterErrors = None - elif len("\U0010FFFF") == 1: - self.reportCharacterErrors = self.characterErrorsUCS4 - else: - self.reportCharacterErrors = self.characterErrorsUCS2 - - # List of where new lines occur - self.newLines = [0] - - self.charEncoding = (lookupEncoding("utf-8"), "certain") - self.dataStream = self.openStream(source) - - self.reset() - - def reset(self): - self.chunk = "" - self.chunkSize = 0 - self.chunkOffset = 0 - self.errors = [] - - # number of (complete) lines in previous chunks - self.prevNumLines = 0 - # number of columns in the last line of the previous chunk - self.prevNumCols = 0 - - # Deal with CR LF and surrogates split over chunk boundaries - self._bufferedCharacter = None - - def openStream(self, source): - """Produces a file object from source. - - source can be either a file object, local filename or a string. - - """ - # Already a file object - if hasattr(source, 'read'): - stream = source - else: - stream = StringIO(source) - - return stream - - def _position(self, offset): - chunk = self.chunk - nLines = chunk.count('\n', 0, offset) - positionLine = self.prevNumLines + nLines - lastLinePos = chunk.rfind('\n', 0, offset) - if lastLinePos == -1: - positionColumn = self.prevNumCols + offset - else: - positionColumn = offset - (lastLinePos + 1) - return (positionLine, positionColumn) - - def position(self): - """Returns (line, col) of the current position in the stream.""" - line, col = self._position(self.chunkOffset) - return (line + 1, col) - - def char(self): - """ Read one character from the stream or queue if available. Return - EOF when EOF is reached. - """ - # Read a new chunk from the input stream if necessary - if self.chunkOffset >= self.chunkSize: - if not self.readChunk(): - return EOF - - chunkOffset = self.chunkOffset - char = self.chunk[chunkOffset] - self.chunkOffset = chunkOffset + 1 - - return char - - def readChunk(self, chunkSize=None): - if chunkSize is None: - chunkSize = self._defaultChunkSize - - self.prevNumLines, self.prevNumCols = self._position(self.chunkSize) - - self.chunk = "" - self.chunkSize = 0 - self.chunkOffset = 0 - - data = self.dataStream.read(chunkSize) - - # Deal with CR LF and surrogates broken across chunks - if self._bufferedCharacter: - data = self._bufferedCharacter + data - self._bufferedCharacter = None - elif not data: - # We have no more data, bye-bye stream - return False - - if len(data) > 1: - lastv = ord(data[-1]) - if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF: - self._bufferedCharacter = data[-1] - data = data[:-1] - - if self.reportCharacterErrors: - self.reportCharacterErrors(data) - - # Replace invalid characters - data = data.replace("\r\n", "\n") - data = data.replace("\r", "\n") - - self.chunk = data - self.chunkSize = len(data) - - return True - - def characterErrorsUCS4(self, data): - for _ in range(len(invalid_unicode_re.findall(data))): - self.errors.append("invalid-codepoint") - - def characterErrorsUCS2(self, data): - # Someone picked the wrong compile option - # You lose - skip = False - for match in invalid_unicode_re.finditer(data): - if skip: - continue - codepoint = ord(match.group()) - pos = match.start() - # Pretty sure there should be endianness issues here - if _utils.isSurrogatePair(data[pos:pos + 2]): - # We have a surrogate pair! - char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2]) - if char_val in non_bmp_invalid_codepoints: - self.errors.append("invalid-codepoint") - skip = True - elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and - pos == len(data) - 1): - self.errors.append("invalid-codepoint") - else: - skip = False - self.errors.append("invalid-codepoint") - - def charsUntil(self, characters, opposite=False): - """ Returns a string of characters from the stream up to but not - including any character in 'characters' or EOF. 'characters' must be - a container that supports the 'in' method and iteration over its - characters. - """ - - # Use a cache of regexps to find the required characters - try: - chars = charsUntilRegEx[(characters, opposite)] - except KeyError: - if __debug__: - for c in characters: - assert(ord(c) < 128) - regex = "".join(["\\x%02x" % ord(c) for c in characters]) - if not opposite: - regex = "^%s" % regex - chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex) - - rv = [] - - while True: - # Find the longest matching prefix - m = chars.match(self.chunk, self.chunkOffset) - if m is None: - # If nothing matched, and it wasn't because we ran out of chunk, - # then stop - if self.chunkOffset != self.chunkSize: - break - else: - end = m.end() - # If not the whole chunk matched, return everything - # up to the part that didn't match - if end != self.chunkSize: - rv.append(self.chunk[self.chunkOffset:end]) - self.chunkOffset = end - break - # If the whole remainder of the chunk matched, - # use it all and read the next chunk - rv.append(self.chunk[self.chunkOffset:]) - if not self.readChunk(): - # Reached EOF - break - - r = "".join(rv) - return r - - def unget(self, char): - # Only one character is allowed to be ungotten at once - it must - # be consumed again before any further call to unget - if char is not None: - if self.chunkOffset == 0: - # unget is called quite rarely, so it's a good idea to do - # more work here if it saves a bit of work in the frequently - # called char and charsUntil. - # So, just prepend the ungotten character onto the current - # chunk: - self.chunk = char + self.chunk - self.chunkSize += 1 - else: - self.chunkOffset -= 1 - assert self.chunk[self.chunkOffset] == char - - -class HTMLBinaryInputStream(HTMLUnicodeInputStream): - """Provides a unicode stream of characters to the HTMLTokenizer. - - This class takes care of character encoding and removing or replacing - incorrect byte-sequences and also provides column and line tracking. - - """ - - def __init__(self, source, override_encoding=None, transport_encoding=None, - same_origin_parent_encoding=None, likely_encoding=None, - default_encoding="windows-1252", useChardet=True): - """Initialises the HTMLInputStream. - - HTMLInputStream(source, [encoding]) -> Normalized stream from source - for use by html5lib. - - source can be either a file-object, local filename or a string. - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) - - """ - # Raw Stream - for unicode objects this will encode to utf-8 and set - # self.charEncoding as appropriate - self.rawStream = self.openStream(source) - - HTMLUnicodeInputStream.__init__(self, self.rawStream) - - # Encoding Information - # Number of bytes to use when looking for a meta element with - # encoding information - self.numBytesMeta = 1024 - # Number of bytes to use when using detecting encoding using chardet - self.numBytesChardet = 100 - # Things from args - self.override_encoding = override_encoding - self.transport_encoding = transport_encoding - self.same_origin_parent_encoding = same_origin_parent_encoding - self.likely_encoding = likely_encoding - self.default_encoding = default_encoding - - # Determine encoding - self.charEncoding = self.determineEncoding(useChardet) - assert self.charEncoding[0] is not None - - # Call superclass - self.reset() - - def reset(self): - self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace') - HTMLUnicodeInputStream.reset(self) - - def openStream(self, source): - """Produces a file object from source. - - source can be either a file object, local filename or a string. - - """ - # Already a file object - if hasattr(source, 'read'): - stream = source - else: - stream = BytesIO(source) - - try: - stream.seek(stream.tell()) - except: # pylint:disable=bare-except - stream = BufferedStream(stream) - - return stream - - def determineEncoding(self, chardet=True): - # BOMs take precedence over everything - # This will also read past the BOM if present - charEncoding = self.detectBOM(), "certain" - if charEncoding[0] is not None: - return charEncoding - - # If we've been overriden, we've been overriden - charEncoding = lookupEncoding(self.override_encoding), "certain" - if charEncoding[0] is not None: - return charEncoding - - # Now check the transport layer - charEncoding = lookupEncoding(self.transport_encoding), "certain" - if charEncoding[0] is not None: - return charEncoding - - # Look for meta elements with encoding information - charEncoding = self.detectEncodingMeta(), "tentative" - if charEncoding[0] is not None: - return charEncoding - - # Parent document encoding - charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative" - if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"): - return charEncoding - - # "likely" encoding - charEncoding = lookupEncoding(self.likely_encoding), "tentative" - if charEncoding[0] is not None: - return charEncoding - - # Guess with chardet, if available - if chardet: - try: - from pip._vendor.chardet.universaldetector import UniversalDetector - except ImportError: - pass - else: - buffers = [] - detector = UniversalDetector() - while not detector.done: - buffer = self.rawStream.read(self.numBytesChardet) - assert isinstance(buffer, bytes) - if not buffer: - break - buffers.append(buffer) - detector.feed(buffer) - detector.close() - encoding = lookupEncoding(detector.result['encoding']) - self.rawStream.seek(0) - if encoding is not None: - return encoding, "tentative" - - # Try the default encoding - charEncoding = lookupEncoding(self.default_encoding), "tentative" - if charEncoding[0] is not None: - return charEncoding - - # Fallback to html5lib's default if even that hasn't worked - return lookupEncoding("windows-1252"), "tentative" - - def changeEncoding(self, newEncoding): - assert self.charEncoding[1] != "certain" - newEncoding = lookupEncoding(newEncoding) - if newEncoding is None: - return - if newEncoding.name in ("utf-16be", "utf-16le"): - newEncoding = lookupEncoding("utf-8") - assert newEncoding is not None - elif newEncoding == self.charEncoding[0]: - self.charEncoding = (self.charEncoding[0], "certain") - else: - self.rawStream.seek(0) - self.charEncoding = (newEncoding, "certain") - self.reset() - raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) - - def detectBOM(self): - """Attempts to detect at BOM at the start of the stream. If - an encoding can be determined from the BOM return the name of the - encoding otherwise return None""" - bomDict = { - codecs.BOM_UTF8: 'utf-8', - codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be', - codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be' - } - - # Go to beginning of file and read in 4 bytes - string = self.rawStream.read(4) - assert isinstance(string, bytes) - - # Try detecting the BOM using bytes from the string - encoding = bomDict.get(string[:3]) # UTF-8 - seek = 3 - if not encoding: - # Need to detect UTF-32 before UTF-16 - encoding = bomDict.get(string) # UTF-32 - seek = 4 - if not encoding: - encoding = bomDict.get(string[:2]) # UTF-16 - seek = 2 - - # Set the read position past the BOM if one was found, otherwise - # set it to the start of the stream - if encoding: - self.rawStream.seek(seek) - return lookupEncoding(encoding) - else: - self.rawStream.seek(0) - return None - - def detectEncodingMeta(self): - """Report the encoding declared by the meta element - """ - buffer = self.rawStream.read(self.numBytesMeta) - assert isinstance(buffer, bytes) - parser = EncodingParser(buffer) - self.rawStream.seek(0) - encoding = parser.getEncoding() - - if encoding is not None and encoding.name in ("utf-16be", "utf-16le"): - encoding = lookupEncoding("utf-8") - - return encoding - - -class EncodingBytes(bytes): - """String-like object with an associated position and various extra methods - If the position is ever greater than the string length then an exception is - raised""" - def __new__(self, value): - assert isinstance(value, bytes) - return bytes.__new__(self, value.lower()) - - def __init__(self, value): - # pylint:disable=unused-argument - self._position = -1 - - def __iter__(self): - return self - - def __next__(self): - p = self._position = self._position + 1 - if p >= len(self): - raise StopIteration - elif p < 0: - raise TypeError - return self[p:p + 1] - - def next(self): - # Py2 compat - return self.__next__() - - def previous(self): - p = self._position - if p >= len(self): - raise StopIteration - elif p < 0: - raise TypeError - self._position = p = p - 1 - return self[p:p + 1] - - def setPosition(self, position): - if self._position >= len(self): - raise StopIteration - self._position = position - - def getPosition(self): - if self._position >= len(self): - raise StopIteration - if self._position >= 0: - return self._position - else: - return None - - position = property(getPosition, setPosition) - - def getCurrentByte(self): - return self[self.position:self.position + 1] - - currentByte = property(getCurrentByte) - - def skip(self, chars=spaceCharactersBytes): - """Skip past a list of characters""" - p = self.position # use property for the error-checking - while p < len(self): - c = self[p:p + 1] - if c not in chars: - self._position = p - return c - p += 1 - self._position = p - return None - - def skipUntil(self, chars): - p = self.position - while p < len(self): - c = self[p:p + 1] - if c in chars: - self._position = p - return c - p += 1 - self._position = p - return None - - def matchBytes(self, bytes): - """Look for a sequence of bytes at the start of a string. If the bytes - are found return True and advance the position to the byte after the - match. Otherwise return False and leave the position alone""" - p = self.position - data = self[p:p + len(bytes)] - rv = data.startswith(bytes) - if rv: - self.position += len(bytes) - return rv - - def jumpTo(self, bytes): - """Look for the next sequence of bytes matching a given sequence. If - a match is found advance the position to the last byte of the match""" - newPosition = self[self.position:].find(bytes) - if newPosition > -1: - # XXX: This is ugly, but I can't see a nicer way to fix this. - if self._position == -1: - self._position = 0 - self._position += (newPosition + len(bytes) - 1) - return True - else: - raise StopIteration - - -class EncodingParser(object): - """Mini parser for detecting character encoding from meta elements""" - - def __init__(self, data): - """string - the data to work on for encoding detection""" - self.data = EncodingBytes(data) - self.encoding = None - - def getEncoding(self): - methodDispatch = ( - (b"<!--", self.handleComment), - (b"<meta", self.handleMeta), - (b"</", self.handlePossibleEndTag), - (b"<!", self.handleOther), - (b"<?", self.handleOther), - (b"<", self.handlePossibleStartTag)) - for _ in self.data: - keepParsing = True - for key, method in methodDispatch: - if self.data.matchBytes(key): - try: - keepParsing = method() - break - except StopIteration: - keepParsing = False - break - if not keepParsing: - break - - return self.encoding - - def handleComment(self): - """Skip over comments""" - return self.data.jumpTo(b"-->") - - def handleMeta(self): - if self.data.currentByte not in spaceCharactersBytes: - # if we have <meta not followed by a space so just keep going - return True - # We have a valid meta element we want to search for attributes - hasPragma = False - pendingEncoding = None - while True: - # Try to find the next attribute after the current position - attr = self.getAttribute() - if attr is None: - return True - else: - if attr[0] == b"http-equiv": - hasPragma = attr[1] == b"content-type" - if hasPragma and pendingEncoding is not None: - self.encoding = pendingEncoding - return False - elif attr[0] == b"charset": - tentativeEncoding = attr[1] - codec = lookupEncoding(tentativeEncoding) - if codec is not None: - self.encoding = codec - return False - elif attr[0] == b"content": - contentParser = ContentAttrParser(EncodingBytes(attr[1])) - tentativeEncoding = contentParser.parse() - if tentativeEncoding is not None: - codec = lookupEncoding(tentativeEncoding) - if codec is not None: - if hasPragma: - self.encoding = codec - return False - else: - pendingEncoding = codec - - def handlePossibleStartTag(self): - return self.handlePossibleTag(False) - - def handlePossibleEndTag(self): - next(self.data) - return self.handlePossibleTag(True) - - def handlePossibleTag(self, endTag): - data = self.data - if data.currentByte not in asciiLettersBytes: - # If the next byte is not an ascii letter either ignore this - # fragment (possible start tag case) or treat it according to - # handleOther - if endTag: - data.previous() - self.handleOther() - return True - - c = data.skipUntil(spacesAngleBrackets) - if c == b"<": - # return to the first step in the overall "two step" algorithm - # reprocessing the < byte - data.previous() - else: - # Read all attributes - attr = self.getAttribute() - while attr is not None: - attr = self.getAttribute() - return True - - def handleOther(self): - return self.data.jumpTo(b">") - - def getAttribute(self): - """Return a name,value pair for the next attribute in the stream, - if one is found, or None""" - data = self.data - # Step 1 (skip chars) - c = data.skip(spaceCharactersBytes | frozenset([b"/"])) - assert c is None or len(c) == 1 - # Step 2 - if c in (b">", None): - return None - # Step 3 - attrName = [] - attrValue = [] - # Step 4 attribute name - while True: - if c == b"=" and attrName: - break - elif c in spaceCharactersBytes: - # Step 6! - c = data.skip() - break - elif c in (b"/", b">"): - return b"".join(attrName), b"" - elif c in asciiUppercaseBytes: - attrName.append(c.lower()) - elif c is None: - return None - else: - attrName.append(c) - # Step 5 - c = next(data) - # Step 7 - if c != b"=": - data.previous() - return b"".join(attrName), b"" - # Step 8 - next(data) - # Step 9 - c = data.skip() - # Step 10 - if c in (b"'", b'"'): - # 10.1 - quoteChar = c - while True: - # 10.2 - c = next(data) - # 10.3 - if c == quoteChar: - next(data) - return b"".join(attrName), b"".join(attrValue) - # 10.4 - elif c in asciiUppercaseBytes: - attrValue.append(c.lower()) - # 10.5 - else: - attrValue.append(c) - elif c == b">": - return b"".join(attrName), b"" - elif c in asciiUppercaseBytes: - attrValue.append(c.lower()) - elif c is None: - return None - else: - attrValue.append(c) - # Step 11 - while True: - c = next(data) - if c in spacesAngleBrackets: - return b"".join(attrName), b"".join(attrValue) - elif c in asciiUppercaseBytes: - attrValue.append(c.lower()) - elif c is None: - return None - else: - attrValue.append(c) - - -class ContentAttrParser(object): - def __init__(self, data): - assert isinstance(data, bytes) - self.data = data - - def parse(self): - try: - # Check if the attr name is charset - # otherwise return - self.data.jumpTo(b"charset") - self.data.position += 1 - self.data.skip() - if not self.data.currentByte == b"=": - # If there is no = sign keep looking for attrs - return None - self.data.position += 1 - self.data.skip() - # Look for an encoding between matching quote marks - if self.data.currentByte in (b'"', b"'"): - quoteMark = self.data.currentByte - self.data.position += 1 - oldPosition = self.data.position - if self.data.jumpTo(quoteMark): - return self.data[oldPosition:self.data.position] - else: - return None - else: - # Unquoted value - oldPosition = self.data.position - try: - self.data.skipUntil(spaceCharactersBytes) - return self.data[oldPosition:self.data.position] - except StopIteration: - # Return the whole remaining value - return self.data[oldPosition:] - except StopIteration: - return None - - -def lookupEncoding(encoding): - """Return the python codec name corresponding to an encoding or None if the - string doesn't correspond to a valid encoding.""" - if isinstance(encoding, binary_type): - try: - encoding = encoding.decode("ascii") - except UnicodeDecodeError: - return None - - if encoding is not None: - try: - return webencodings.lookup(encoding) - except AttributeError: - return None - else: - return None diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_tokenizer.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_tokenizer.py deleted file mode 100644 index 178f6e7..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_tokenizer.py +++ /dev/null @@ -1,1721 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from pip._vendor.six import unichr as chr - -from collections import deque - -from .constants import spaceCharacters -from .constants import entities -from .constants import asciiLetters, asciiUpper2Lower -from .constants import digits, hexDigits, EOF -from .constants import tokenTypes, tagTokenTypes -from .constants import replacementCharacters - -from ._inputstream import HTMLInputStream - -from ._trie import Trie - -entitiesTrie = Trie(entities) - - -class HTMLTokenizer(object): - """ This class takes care of tokenizing HTML. - - * self.currentToken - Holds the token that is currently being processed. - - * self.state - Holds a reference to the method to be invoked... XXX - - * self.stream - Points to HTMLInputStream object. - """ - - def __init__(self, stream, parser=None, **kwargs): - - self.stream = HTMLInputStream(stream, **kwargs) - self.parser = parser - - # Setup the initial tokenizer state - self.escapeFlag = False - self.lastFourChars = [] - self.state = self.dataState - self.escape = False - - # The current token being created - self.currentToken = None - super(HTMLTokenizer, self).__init__() - - def __iter__(self): - """ This is where the magic happens. - - We do our usually processing through the states and when we have a token - to return we yield the token which pauses processing until the next token - is requested. - """ - self.tokenQueue = deque([]) - # Start processing. When EOF is reached self.state will return False - # instead of True and the loop will terminate. - while self.state(): - while self.stream.errors: - yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} - while self.tokenQueue: - yield self.tokenQueue.popleft() - - def consumeNumberEntity(self, isHex): - """This function returns either U+FFFD or the character based on the - decimal or hexadecimal representation. It also discards ";" if present. - If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. - """ - - allowed = digits - radix = 10 - if isHex: - allowed = hexDigits - radix = 16 - - charStack = [] - - # Consume all the characters that are in range while making sure we - # don't hit an EOF. - c = self.stream.char() - while c in allowed and c is not EOF: - charStack.append(c) - c = self.stream.char() - - # Convert the set of characters consumed to an int. - charAsInt = int("".join(charStack), radix) - - # Certain characters get replaced with others - if charAsInt in replacementCharacters: - char = replacementCharacters[charAsInt] - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "illegal-codepoint-for-numeric-entity", - "datavars": {"charAsInt": charAsInt}}) - elif ((0xD800 <= charAsInt <= 0xDFFF) or - (charAsInt > 0x10FFFF)): - char = "\uFFFD" - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "illegal-codepoint-for-numeric-entity", - "datavars": {"charAsInt": charAsInt}}) - else: - # Should speed up this check somehow (e.g. move the set to a constant) - if ((0x0001 <= charAsInt <= 0x0008) or - (0x000E <= charAsInt <= 0x001F) or - (0x007F <= charAsInt <= 0x009F) or - (0xFDD0 <= charAsInt <= 0xFDEF) or - charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, - 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, - 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, - 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, - 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, - 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, - 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, - 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, - 0xFFFFF, 0x10FFFE, 0x10FFFF])): - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": - "illegal-codepoint-for-numeric-entity", - "datavars": {"charAsInt": charAsInt}}) - try: - # Try/except needed as UCS-2 Python builds' unichar only works - # within the BMP. - char = chr(charAsInt) - except ValueError: - v = charAsInt - 0x10000 - char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) - - # Discard the ; if present. Otherwise, put it back on the queue and - # invoke parseError on parser. - if c != ";": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "numeric-entity-without-semicolon"}) - self.stream.unget(c) - - return char - - def consumeEntity(self, allowedChar=None, fromAttribute=False): - # Initialise to the default output for when no entity is matched - output = "&" - - charStack = [self.stream.char()] - if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or - (allowedChar is not None and allowedChar == charStack[0])): - self.stream.unget(charStack[0]) - - elif charStack[0] == "#": - # Read the next character to see if it's hex or decimal - hex = False - charStack.append(self.stream.char()) - if charStack[-1] in ("x", "X"): - hex = True - charStack.append(self.stream.char()) - - # charStack[-1] should be the first digit - if (hex and charStack[-1] in hexDigits) \ - or (not hex and charStack[-1] in digits): - # At least one digit found, so consume the whole number - self.stream.unget(charStack[-1]) - output = self.consumeNumberEntity(hex) - else: - # No digits found - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "expected-numeric-entity"}) - self.stream.unget(charStack.pop()) - output = "&" + "".join(charStack) - - else: - # At this point in the process might have named entity. Entities - # are stored in the global variable "entities". - # - # Consume characters and compare to these to a substring of the - # entity names in the list until the substring no longer matches. - while (charStack[-1] is not EOF): - if not entitiesTrie.has_keys_with_prefix("".join(charStack)): - break - charStack.append(self.stream.char()) - - # At this point we have a string that starts with some characters - # that may match an entity - # Try to find the longest entity the string will match to take care - # of ¬i for instance. - try: - entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) - entityLength = len(entityName) - except KeyError: - entityName = None - - if entityName is not None: - if entityName[-1] != ";": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "named-entity-without-semicolon"}) - if (entityName[-1] != ";" and fromAttribute and - (charStack[entityLength] in asciiLetters or - charStack[entityLength] in digits or - charStack[entityLength] == "=")): - self.stream.unget(charStack.pop()) - output = "&" + "".join(charStack) - else: - output = entities[entityName] - self.stream.unget(charStack.pop()) - output += "".join(charStack[entityLength:]) - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-named-entity"}) - self.stream.unget(charStack.pop()) - output = "&" + "".join(charStack) - - if fromAttribute: - self.currentToken["data"][-1][1] += output - else: - if output in spaceCharacters: - tokenType = "SpaceCharacters" - else: - tokenType = "Characters" - self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) - - def processEntityInAttribute(self, allowedChar): - """This method replaces the need for "entityInAttributeValueState". - """ - self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) - - def emitCurrentToken(self): - """This method is a generic handler for emitting the tags. It also sets - the state to "data" because that's what's needed after a token has been - emitted. - """ - token = self.currentToken - # Add token to the queue to be yielded - if (token["type"] in tagTokenTypes): - token["name"] = token["name"].translate(asciiUpper2Lower) - if token["type"] == tokenTypes["EndTag"]: - if token["data"]: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "attributes-in-end-tag"}) - if token["selfClosing"]: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "self-closing-flag-on-end-tag"}) - self.tokenQueue.append(token) - self.state = self.dataState - - # Below are the various tokenizer states worked out. - def dataState(self): - data = self.stream.char() - if data == "&": - self.state = self.entityDataState - elif data == "<": - self.state = self.tagOpenState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\u0000"}) - elif data is EOF: - # Tokenization ends. - return False - elif data in spaceCharacters: - # Directly after emitting a token you switch back to the "data - # state". At that point spaceCharacters are important so they are - # emitted separately. - self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": - data + self.stream.charsUntil(spaceCharacters, True)}) - # No need to update lastFourChars here, since the first space will - # have already been appended to lastFourChars and will have broken - # any <!-- or --> sequences - else: - chars = self.stream.charsUntil(("&", "<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def entityDataState(self): - self.consumeEntity() - self.state = self.dataState - return True - - def rcdataState(self): - data = self.stream.char() - if data == "&": - self.state = self.characterReferenceInRcdata - elif data == "<": - self.state = self.rcdataLessThanSignState - elif data == EOF: - # Tokenization ends. - return False - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data in spaceCharacters: - # Directly after emitting a token you switch back to the "data - # state". At that point spaceCharacters are important so they are - # emitted separately. - self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": - data + self.stream.charsUntil(spaceCharacters, True)}) - # No need to update lastFourChars here, since the first space will - # have already been appended to lastFourChars and will have broken - # any <!-- or --> sequences - else: - chars = self.stream.charsUntil(("&", "<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def characterReferenceInRcdata(self): - self.consumeEntity() - self.state = self.rcdataState - return True - - def rawtextState(self): - data = self.stream.char() - if data == "<": - self.state = self.rawtextLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - # Tokenization ends. - return False - else: - chars = self.stream.charsUntil(("<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def scriptDataState(self): - data = self.stream.char() - if data == "<": - self.state = self.scriptDataLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - # Tokenization ends. - return False - else: - chars = self.stream.charsUntil(("<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def plaintextState(self): - data = self.stream.char() - if data == EOF: - # Tokenization ends. - return False - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + self.stream.charsUntil("\u0000")}) - return True - - def tagOpenState(self): - data = self.stream.char() - if data == "!": - self.state = self.markupDeclarationOpenState - elif data == "/": - self.state = self.closeTagOpenState - elif data in asciiLetters: - self.currentToken = {"type": tokenTypes["StartTag"], - "name": data, "data": [], - "selfClosing": False, - "selfClosingAcknowledged": False} - self.state = self.tagNameState - elif data == ">": - # XXX In theory it could be something besides a tag name. But - # do we really care? - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-tag-name-but-got-right-bracket"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) - self.state = self.dataState - elif data == "?": - # XXX In theory it could be something besides a tag name. But - # do we really care? - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-tag-name-but-got-question-mark"}) - self.stream.unget(data) - self.state = self.bogusCommentState - else: - # XXX - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-tag-name"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.dataState - return True - - def closeTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.currentToken = {"type": tokenTypes["EndTag"], "name": data, - "data": [], "selfClosing": False} - self.state = self.tagNameState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-closing-tag-but-got-right-bracket"}) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-closing-tag-but-got-eof"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.state = self.dataState - else: - # XXX data can be _'_... - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-closing-tag-but-got-char", - "datavars": {"data": data}}) - self.stream.unget(data) - self.state = self.bogusCommentState - return True - - def tagNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeAttributeNameState - elif data == ">": - self.emitCurrentToken() - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-tag-name"}) - self.state = self.dataState - elif data == "/": - self.state = self.selfClosingStartTagState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["name"] += "\uFFFD" - else: - self.currentToken["name"] += data - # (Don't use charsUntil here, because tag names are - # very short and it's faster to not do anything fancy) - return True - - def rcdataLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.rcdataEndTagOpenState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.rcdataState - return True - - def rcdataEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer += data - self.state = self.rcdataEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.rcdataState - return True - - def rcdataEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.rcdataState - return True - - def rawtextLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.rawtextEndTagOpenState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.rawtextState - return True - - def rawtextEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer += data - self.state = self.rawtextEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.rawtextState - return True - - def rawtextEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.rawtextState - return True - - def scriptDataLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.scriptDataEndTagOpenState - elif data == "!": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) - self.state = self.scriptDataEscapeStartState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer += data - self.state = self.scriptDataEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEscapeStartState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapeStartDashState - else: - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEscapeStartDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapedDashDashState - else: - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEscapedState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapedDashState - elif data == "<": - self.state = self.scriptDataEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - self.state = self.dataState - else: - chars = self.stream.charsUntil(("<", "-", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def scriptDataEscapedDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapedDashDashState - elif data == "<": - self.state = self.scriptDataEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataEscapedState - elif data == EOF: - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedDashDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - elif data == "<": - self.state = self.scriptDataEscapedLessThanSignState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) - self.state = self.scriptDataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataEscapedState - elif data == EOF: - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.scriptDataEscapedEndTagOpenState - elif data in asciiLetters: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) - self.temporaryBuffer = data - self.state = self.scriptDataDoubleEscapeStartState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer = data - self.state = self.scriptDataEscapedEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataDoubleEscapeStartState(self): - data = self.stream.char() - if data in (spaceCharacters | frozenset(("/", ">"))): - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - if self.temporaryBuffer.lower() == "script": - self.state = self.scriptDataDoubleEscapedState - else: - self.state = self.scriptDataEscapedState - elif data in asciiLetters: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.temporaryBuffer += data - else: - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataDoubleEscapedState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataDoubleEscapedDashState - elif data == "<": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.state = self.scriptDataDoubleEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-script-in-script"}) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - return True - - def scriptDataDoubleEscapedDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataDoubleEscapedDashDashState - elif data == "<": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.state = self.scriptDataDoubleEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataDoubleEscapedState - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-script-in-script"}) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataDoubleEscapedState - return True - - def scriptDataDoubleEscapedDashDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - elif data == "<": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.state = self.scriptDataDoubleEscapedLessThanSignState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) - self.state = self.scriptDataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataDoubleEscapedState - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-script-in-script"}) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataDoubleEscapedState - return True - - def scriptDataDoubleEscapedLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) - self.temporaryBuffer = "" - self.state = self.scriptDataDoubleEscapeEndState - else: - self.stream.unget(data) - self.state = self.scriptDataDoubleEscapedState - return True - - def scriptDataDoubleEscapeEndState(self): - data = self.stream.char() - if data in (spaceCharacters | frozenset(("/", ">"))): - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - if self.temporaryBuffer.lower() == "script": - self.state = self.scriptDataEscapedState - else: - self.state = self.scriptDataDoubleEscapedState - elif data in asciiLetters: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.temporaryBuffer += data - else: - self.stream.unget(data) - self.state = self.scriptDataDoubleEscapedState - return True - - def beforeAttributeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.stream.charsUntil(spaceCharacters, True) - elif data in asciiLetters: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data == ">": - self.emitCurrentToken() - elif data == "/": - self.state = self.selfClosingStartTagState - elif data in ("'", '"', "=", "<"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "invalid-character-in-attribute-name"}) - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"].append(["\uFFFD", ""]) - self.state = self.attributeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-attribute-name-but-got-eof"}) - self.state = self.dataState - else: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - return True - - def attributeNameState(self): - data = self.stream.char() - leavingThisState = True - emitToken = False - if data == "=": - self.state = self.beforeAttributeValueState - elif data in asciiLetters: - self.currentToken["data"][-1][0] += data +\ - self.stream.charsUntil(asciiLetters, True) - leavingThisState = False - elif data == ">": - # XXX If we emit here the attributes are converted to a dict - # without being checked and when the code below runs we error - # because data is a dict not a list - emitToken = True - elif data in spaceCharacters: - self.state = self.afterAttributeNameState - elif data == "/": - self.state = self.selfClosingStartTagState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][0] += "\uFFFD" - leavingThisState = False - elif data in ("'", '"', "<"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": - "invalid-character-in-attribute-name"}) - self.currentToken["data"][-1][0] += data - leavingThisState = False - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "eof-in-attribute-name"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][0] += data - leavingThisState = False - - if leavingThisState: - # Attributes are not dropped at this stage. That happens when the - # start tag token is emitted so values can still be safely appended - # to attributes, but we do want to report the parse error in time. - self.currentToken["data"][-1][0] = ( - self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) - for name, _ in self.currentToken["data"][:-1]: - if self.currentToken["data"][-1][0] == name: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "duplicate-attribute"}) - break - # XXX Fix for above XXX - if emitToken: - self.emitCurrentToken() - return True - - def afterAttributeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.stream.charsUntil(spaceCharacters, True) - elif data == "=": - self.state = self.beforeAttributeValueState - elif data == ">": - self.emitCurrentToken() - elif data in asciiLetters: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data == "/": - self.state = self.selfClosingStartTagState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"].append(["\uFFFD", ""]) - self.state = self.attributeNameState - elif data in ("'", '"', "<"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "invalid-character-after-attribute-name"}) - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-end-of-tag-but-got-eof"}) - self.state = self.dataState - else: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - return True - - def beforeAttributeValueState(self): - data = self.stream.char() - if data in spaceCharacters: - self.stream.charsUntil(spaceCharacters, True) - elif data == "\"": - self.state = self.attributeValueDoubleQuotedState - elif data == "&": - self.state = self.attributeValueUnQuotedState - self.stream.unget(data) - elif data == "'": - self.state = self.attributeValueSingleQuotedState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-attribute-value-but-got-right-bracket"}) - self.emitCurrentToken() - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - self.state = self.attributeValueUnQuotedState - elif data in ("=", "<", "`"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "equals-in-unquoted-attribute-value"}) - self.currentToken["data"][-1][1] += data - self.state = self.attributeValueUnQuotedState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-attribute-value-but-got-eof"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data - self.state = self.attributeValueUnQuotedState - return True - - def attributeValueDoubleQuotedState(self): - data = self.stream.char() - if data == "\"": - self.state = self.afterAttributeValueState - elif data == "&": - self.processEntityInAttribute('"') - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-attribute-value-double-quote"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data +\ - self.stream.charsUntil(("\"", "&", "\u0000")) - return True - - def attributeValueSingleQuotedState(self): - data = self.stream.char() - if data == "'": - self.state = self.afterAttributeValueState - elif data == "&": - self.processEntityInAttribute("'") - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-attribute-value-single-quote"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data +\ - self.stream.charsUntil(("'", "&", "\u0000")) - return True - - def attributeValueUnQuotedState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeAttributeNameState - elif data == "&": - self.processEntityInAttribute(">") - elif data == ">": - self.emitCurrentToken() - elif data in ('"', "'", "=", "<", "`"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-character-in-unquoted-attribute-value"}) - self.currentToken["data"][-1][1] += data - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-attribute-value-no-quotes"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data + self.stream.charsUntil( - frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) - return True - - def afterAttributeValueState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeAttributeNameState - elif data == ">": - self.emitCurrentToken() - elif data == "/": - self.state = self.selfClosingStartTagState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-EOF-after-attribute-value"}) - self.stream.unget(data) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-character-after-attribute-value"}) - self.stream.unget(data) - self.state = self.beforeAttributeNameState - return True - - def selfClosingStartTagState(self): - data = self.stream.char() - if data == ">": - self.currentToken["selfClosing"] = True - self.emitCurrentToken() - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": - "unexpected-EOF-after-solidus-in-tag"}) - self.stream.unget(data) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-character-after-solidus-in-tag"}) - self.stream.unget(data) - self.state = self.beforeAttributeNameState - return True - - def bogusCommentState(self): - # Make a new comment token and give it as value all the characters - # until the first > or EOF (charsUntil checks for EOF automatically) - # and emit it. - data = self.stream.charsUntil(">") - data = data.replace("\u0000", "\uFFFD") - self.tokenQueue.append( - {"type": tokenTypes["Comment"], "data": data}) - - # Eat the character directly after the bogus comment which is either a - # ">" or an EOF. - self.stream.char() - self.state = self.dataState - return True - - def markupDeclarationOpenState(self): - charStack = [self.stream.char()] - if charStack[-1] == "-": - charStack.append(self.stream.char()) - if charStack[-1] == "-": - self.currentToken = {"type": tokenTypes["Comment"], "data": ""} - self.state = self.commentStartState - return True - elif charStack[-1] in ('d', 'D'): - matched = True - for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), - ('y', 'Y'), ('p', 'P'), ('e', 'E')): - charStack.append(self.stream.char()) - if charStack[-1] not in expected: - matched = False - break - if matched: - self.currentToken = {"type": tokenTypes["Doctype"], - "name": "", - "publicId": None, "systemId": None, - "correct": True} - self.state = self.doctypeState - return True - elif (charStack[-1] == "[" and - self.parser is not None and - self.parser.tree.openElements and - self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): - matched = True - for expected in ["C", "D", "A", "T", "A", "["]: - charStack.append(self.stream.char()) - if charStack[-1] != expected: - matched = False - break - if matched: - self.state = self.cdataSectionState - return True - - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-dashes-or-doctype"}) - - while charStack: - self.stream.unget(charStack.pop()) - self.state = self.bogusCommentState - return True - - def commentStartState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentStartDashState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "incorrect-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += data - self.state = self.commentState - return True - - def commentStartDashState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentEndState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "-\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "incorrect-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += "-" + data - self.state = self.commentState - return True - - def commentState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentEndDashState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "eof-in-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += data + \ - self.stream.charsUntil(("-", "\u0000")) - return True - - def commentEndDashState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentEndState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "-\uFFFD" - self.state = self.commentState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment-end-dash"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += "-" + data - self.state = self.commentState - return True - - def commentEndState(self): - data = self.stream.char() - if data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "--\uFFFD" - self.state = self.commentState - elif data == "!": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-bang-after-double-dash-in-comment"}) - self.state = self.commentEndBangState - elif data == "-": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-dash-after-double-dash-in-comment"}) - self.currentToken["data"] += data - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment-double-dash"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - # XXX - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-comment"}) - self.currentToken["data"] += "--" + data - self.state = self.commentState - return True - - def commentEndBangState(self): - data = self.stream.char() - if data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "-": - self.currentToken["data"] += "--!" - self.state = self.commentEndDashState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "--!\uFFFD" - self.state = self.commentState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment-end-bang-state"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += "--!" + data - self.state = self.commentState - return True - - def doctypeState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeDoctypeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-doctype-name-but-got-eof"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "need-space-after-doctype"}) - self.stream.unget(data) - self.state = self.beforeDoctypeNameState - return True - - def beforeDoctypeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-doctype-name-but-got-right-bracket"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["name"] = "\uFFFD" - self.state = self.doctypeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-doctype-name-but-got-eof"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["name"] = data - self.state = self.doctypeNameState - return True - - def doctypeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) - self.state = self.afterDoctypeNameState - elif data == ">": - self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["name"] += "\uFFFD" - self.state = self.doctypeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype-name"}) - self.currentToken["correct"] = False - self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["name"] += data - return True - - def afterDoctypeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.currentToken["correct"] = False - self.stream.unget(data) - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - if data in ("p", "P"): - matched = True - for expected in (("u", "U"), ("b", "B"), ("l", "L"), - ("i", "I"), ("c", "C")): - data = self.stream.char() - if data not in expected: - matched = False - break - if matched: - self.state = self.afterDoctypePublicKeywordState - return True - elif data in ("s", "S"): - matched = True - for expected in (("y", "Y"), ("s", "S"), ("t", "T"), - ("e", "E"), ("m", "M")): - data = self.stream.char() - if data not in expected: - matched = False - break - if matched: - self.state = self.afterDoctypeSystemKeywordState - return True - - # All the characters read before the current 'data' will be - # [a-zA-Z], so they're garbage in the bogus doctype and can be - # discarded; only the latest character might be '>' or EOF - # and needs to be ungetted - self.stream.unget(data) - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-space-or-right-bracket-in-doctype", "datavars": - {"data": data}}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - - return True - - def afterDoctypePublicKeywordState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeDoctypePublicIdentifierState - elif data in ("'", '"'): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.stream.unget(data) - self.state = self.beforeDoctypePublicIdentifierState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.stream.unget(data) - self.state = self.beforeDoctypePublicIdentifierState - return True - - def beforeDoctypePublicIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == "\"": - self.currentToken["publicId"] = "" - self.state = self.doctypePublicIdentifierDoubleQuotedState - elif data == "'": - self.currentToken["publicId"] = "" - self.state = self.doctypePublicIdentifierSingleQuotedState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def doctypePublicIdentifierDoubleQuotedState(self): - data = self.stream.char() - if data == "\"": - self.state = self.afterDoctypePublicIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["publicId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["publicId"] += data - return True - - def doctypePublicIdentifierSingleQuotedState(self): - data = self.stream.char() - if data == "'": - self.state = self.afterDoctypePublicIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["publicId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["publicId"] += data - return True - - def afterDoctypePublicIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.betweenDoctypePublicAndSystemIdentifiersState - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == '"': - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierDoubleQuotedState - elif data == "'": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierSingleQuotedState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def betweenDoctypePublicAndSystemIdentifiersState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == '"': - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierDoubleQuotedState - elif data == "'": - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierSingleQuotedState - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def afterDoctypeSystemKeywordState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeDoctypeSystemIdentifierState - elif data in ("'", '"'): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.stream.unget(data) - self.state = self.beforeDoctypeSystemIdentifierState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.stream.unget(data) - self.state = self.beforeDoctypeSystemIdentifierState - return True - - def beforeDoctypeSystemIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == "\"": - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierDoubleQuotedState - elif data == "'": - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierSingleQuotedState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def doctypeSystemIdentifierDoubleQuotedState(self): - data = self.stream.char() - if data == "\"": - self.state = self.afterDoctypeSystemIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["systemId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["systemId"] += data - return True - - def doctypeSystemIdentifierSingleQuotedState(self): - data = self.stream.char() - if data == "'": - self.state = self.afterDoctypeSystemIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["systemId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["systemId"] += data - return True - - def afterDoctypeSystemIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.state = self.bogusDoctypeState - return True - - def bogusDoctypeState(self): - data = self.stream.char() - if data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - # XXX EMIT - self.stream.unget(data) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - pass - return True - - def cdataSectionState(self): - data = [] - while True: - data.append(self.stream.charsUntil("]")) - data.append(self.stream.charsUntil(">")) - char = self.stream.char() - if char == EOF: - break - else: - assert char == ">" - if data[-1][-2:] == "]]": - data[-1] = data[-1][:-2] - break - else: - data.append(char) - - data = "".join(data) # pylint:disable=redefined-variable-type - # Deal with null here rather than in the parser - nullCount = data.count("\u0000") - if nullCount > 0: - for _ in range(nullCount): - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - data = data.replace("\u0000", "\uFFFD") - if data: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": data}) - self.state = self.dataState - return True diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/__init__.py deleted file mode 100644 index a5ba4bf..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from .py import Trie as PyTrie - -Trie = PyTrie - -# pylint:disable=wrong-import-position -try: - from .datrie import Trie as DATrie -except ImportError: - pass -else: - Trie = DATrie -# pylint:enable=wrong-import-position diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/_base.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/_base.py deleted file mode 100644 index a1158bb..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/_base.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from collections import Mapping - - -class Trie(Mapping): - """Abstract base class for tries""" - - def keys(self, prefix=None): - # pylint:disable=arguments-differ - keys = super(Trie, self).keys() - - if prefix is None: - return set(keys) - - return {x for x in keys if x.startswith(prefix)} - - def has_keys_with_prefix(self, prefix): - for key in self.keys(): - if key.startswith(prefix): - return True - - return False - - def longest_prefix(self, prefix): - if prefix in self: - return prefix - - for i in range(1, len(prefix) + 1): - if prefix[:-i] in self: - return prefix[:-i] - - raise KeyError(prefix) - - def longest_prefix_item(self, prefix): - lprefix = self.longest_prefix(prefix) - return (lprefix, self[lprefix]) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/datrie.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/datrie.py deleted file mode 100644 index e2e5f86..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/datrie.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from datrie import Trie as DATrie -from pip._vendor.six import text_type - -from ._base import Trie as ABCTrie - - -class Trie(ABCTrie): - def __init__(self, data): - chars = set() - for key in data.keys(): - if not isinstance(key, text_type): - raise TypeError("All keys must be strings") - for char in key: - chars.add(char) - - self._data = DATrie("".join(chars)) - for key, value in data.items(): - self._data[key] = value - - def __contains__(self, key): - return key in self._data - - def __len__(self): - return len(self._data) - - def __iter__(self): - raise NotImplementedError() - - def __getitem__(self, key): - return self._data[key] - - def keys(self, prefix=None): - return self._data.keys(prefix) - - def has_keys_with_prefix(self, prefix): - return self._data.has_keys_with_prefix(prefix) - - def longest_prefix(self, prefix): - return self._data.longest_prefix(prefix) - - def longest_prefix_item(self, prefix): - return self._data.longest_prefix_item(prefix) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/py.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/py.py deleted file mode 100644 index c178b21..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_trie/py.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -from bisect import bisect_left - -from ._base import Trie as ABCTrie - - -class Trie(ABCTrie): - def __init__(self, data): - if not all(isinstance(x, text_type) for x in data.keys()): - raise TypeError("All keys must be strings") - - self._data = data - self._keys = sorted(data.keys()) - self._cachestr = "" - self._cachepoints = (0, len(data)) - - def __contains__(self, key): - return key in self._data - - def __len__(self): - return len(self._data) - - def __iter__(self): - return iter(self._data) - - def __getitem__(self, key): - return self._data[key] - - def keys(self, prefix=None): - if prefix is None or prefix == "" or not self._keys: - return set(self._keys) - - if prefix.startswith(self._cachestr): - lo, hi = self._cachepoints - start = i = bisect_left(self._keys, prefix, lo, hi) - else: - start = i = bisect_left(self._keys, prefix) - - keys = set() - if start == len(self._keys): - return keys - - while self._keys[i].startswith(prefix): - keys.add(self._keys[i]) - i += 1 - - self._cachestr = prefix - self._cachepoints = (start, i) - - return keys - - def has_keys_with_prefix(self, prefix): - if prefix in self._data: - return True - - if prefix.startswith(self._cachestr): - lo, hi = self._cachepoints - i = bisect_left(self._keys, prefix, lo, hi) - else: - i = bisect_left(self._keys, prefix) - - if i == len(self._keys): - return False - - return self._keys[i].startswith(prefix) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_utils.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_utils.py deleted file mode 100644 index 0703afb..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/_utils.py +++ /dev/null @@ -1,124 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from types import ModuleType - -from pip._vendor.six import text_type - -try: - import xml.etree.cElementTree as default_etree -except ImportError: - import xml.etree.ElementTree as default_etree - - -__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair", - "surrogatePairToCodepoint", "moduleFactoryFactory", - "supports_lone_surrogates"] - - -# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be -# caught by the below test. In general this would be any platform -# using UTF-16 as its encoding of unicode strings, such as -# Jython. This is because UTF-16 itself is based on the use of such -# surrogates, and there is no mechanism to further escape such -# escapes. -try: - _x = eval('"\\uD800"') # pylint:disable=eval-used - if not isinstance(_x, text_type): - # We need this with u"" because of http://bugs.jython.org/issue2039 - _x = eval('u"\\uD800"') # pylint:disable=eval-used - assert isinstance(_x, text_type) -except: # pylint:disable=bare-except - supports_lone_surrogates = False -else: - supports_lone_surrogates = True - - -class MethodDispatcher(dict): - """Dict with 2 special properties: - - On initiation, keys that are lists, sets or tuples are converted to - multiple keys so accessing any one of the items in the original - list-like object returns the matching value - - md = MethodDispatcher({("foo", "bar"):"baz"}) - md["foo"] == "baz" - - A default value which can be set through the default attribute. - """ - - def __init__(self, items=()): - # Using _dictEntries instead of directly assigning to self is about - # twice as fast. Please do careful performance testing before changing - # anything here. - _dictEntries = [] - for name, value in items: - if isinstance(name, (list, tuple, frozenset, set)): - for item in name: - _dictEntries.append((item, value)) - else: - _dictEntries.append((name, value)) - dict.__init__(self, _dictEntries) - assert len(self) == len(_dictEntries) - self.default = None - - def __getitem__(self, key): - return dict.get(self, key, self.default) - - -# Some utility functions to deal with weirdness around UCS2 vs UCS4 -# python builds - -def isSurrogatePair(data): - return (len(data) == 2 and - ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and - ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) - - -def surrogatePairToCodepoint(data): - char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + - (ord(data[1]) - 0xDC00)) - return char_val - -# Module Factory Factory (no, this isn't Java, I know) -# Here to stop this being duplicated all over the place. - - -def moduleFactoryFactory(factory): - moduleCache = {} - - def moduleFactory(baseModule, *args, **kwargs): - if isinstance(ModuleType.__name__, type("")): - name = "_%s_factory" % baseModule.__name__ - else: - name = b"_%s_factory" % baseModule.__name__ - - kwargs_tuple = tuple(kwargs.items()) - - try: - return moduleCache[name][args][kwargs_tuple] - except KeyError: - mod = ModuleType(name) - objs = factory(baseModule, *args, **kwargs) - mod.__dict__.update(objs) - if "name" not in moduleCache: - moduleCache[name] = {} - if "args" not in moduleCache[name]: - moduleCache[name][args] = {} - if "kwargs" not in moduleCache[name][args]: - moduleCache[name][args][kwargs_tuple] = {} - moduleCache[name][args][kwargs_tuple] = mod - return mod - - return moduleFactory - - -def memoize(func): - cache = {} - - def wrapped(*args, **kwargs): - key = (tuple(args), tuple(kwargs.items())) - if key not in cache: - cache[key] = func(*args, **kwargs) - return cache[key] - - return wrapped diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/constants.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/constants.py deleted file mode 100644 index 1ff8041..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/constants.py +++ /dev/null @@ -1,2947 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -import string - -EOF = None - -E = { - "null-character": - "Null character in input stream, replaced with U+FFFD.", - "invalid-codepoint": - "Invalid codepoint in stream.", - "incorrectly-placed-solidus": - "Solidus (/) incorrectly placed in tag.", - "incorrect-cr-newline-entity": - "Incorrect CR newline entity, replaced with LF.", - "illegal-windows-1252-entity": - "Entity used with illegal number (windows-1252 reference).", - "cant-convert-numeric-entity": - "Numeric entity couldn't be converted to character " - "(codepoint U+%(charAsInt)08x).", - "illegal-codepoint-for-numeric-entity": - "Numeric entity represents an illegal codepoint: " - "U+%(charAsInt)08x.", - "numeric-entity-without-semicolon": - "Numeric entity didn't end with ';'.", - "expected-numeric-entity-but-got-eof": - "Numeric entity expected. Got end of file instead.", - "expected-numeric-entity": - "Numeric entity expected but none found.", - "named-entity-without-semicolon": - "Named entity didn't end with ';'.", - "expected-named-entity": - "Named entity expected. Got none.", - "attributes-in-end-tag": - "End tag contains unexpected attributes.", - 'self-closing-flag-on-end-tag': - "End tag contains unexpected self-closing flag.", - "expected-tag-name-but-got-right-bracket": - "Expected tag name. Got '>' instead.", - "expected-tag-name-but-got-question-mark": - "Expected tag name. Got '?' instead. (HTML doesn't " - "support processing instructions.)", - "expected-tag-name": - "Expected tag name. Got something else instead", - "expected-closing-tag-but-got-right-bracket": - "Expected closing tag. Got '>' instead. Ignoring '</>'.", - "expected-closing-tag-but-got-eof": - "Expected closing tag. Unexpected end of file.", - "expected-closing-tag-but-got-char": - "Expected closing tag. Unexpected character '%(data)s' found.", - "eof-in-tag-name": - "Unexpected end of file in the tag name.", - "expected-attribute-name-but-got-eof": - "Unexpected end of file. Expected attribute name instead.", - "eof-in-attribute-name": - "Unexpected end of file in attribute name.", - "invalid-character-in-attribute-name": - "Invalid character in attribute name", - "duplicate-attribute": - "Dropped duplicate attribute on tag.", - "expected-end-of-tag-name-but-got-eof": - "Unexpected end of file. Expected = or end of tag.", - "expected-attribute-value-but-got-eof": - "Unexpected end of file. Expected attribute value.", - "expected-attribute-value-but-got-right-bracket": - "Expected attribute value. Got '>' instead.", - 'equals-in-unquoted-attribute-value': - "Unexpected = in unquoted attribute", - 'unexpected-character-in-unquoted-attribute-value': - "Unexpected character in unquoted attribute", - "invalid-character-after-attribute-name": - "Unexpected character after attribute name.", - "unexpected-character-after-attribute-value": - "Unexpected character after attribute value.", - "eof-in-attribute-value-double-quote": - "Unexpected end of file in attribute value (\").", - "eof-in-attribute-value-single-quote": - "Unexpected end of file in attribute value (').", - "eof-in-attribute-value-no-quotes": - "Unexpected end of file in attribute value.", - "unexpected-EOF-after-solidus-in-tag": - "Unexpected end of file in tag. Expected >", - "unexpected-character-after-solidus-in-tag": - "Unexpected character after / in tag. Expected >", - "expected-dashes-or-doctype": - "Expected '--' or 'DOCTYPE'. Not found.", - "unexpected-bang-after-double-dash-in-comment": - "Unexpected ! after -- in comment", - "unexpected-space-after-double-dash-in-comment": - "Unexpected space after -- in comment", - "incorrect-comment": - "Incorrect comment.", - "eof-in-comment": - "Unexpected end of file in comment.", - "eof-in-comment-end-dash": - "Unexpected end of file in comment (-)", - "unexpected-dash-after-double-dash-in-comment": - "Unexpected '-' after '--' found in comment.", - "eof-in-comment-double-dash": - "Unexpected end of file in comment (--).", - "eof-in-comment-end-space-state": - "Unexpected end of file in comment.", - "eof-in-comment-end-bang-state": - "Unexpected end of file in comment.", - "unexpected-char-in-comment": - "Unexpected character in comment found.", - "need-space-after-doctype": - "No space after literal string 'DOCTYPE'.", - "expected-doctype-name-but-got-right-bracket": - "Unexpected > character. Expected DOCTYPE name.", - "expected-doctype-name-but-got-eof": - "Unexpected end of file. Expected DOCTYPE name.", - "eof-in-doctype-name": - "Unexpected end of file in DOCTYPE name.", - "eof-in-doctype": - "Unexpected end of file in DOCTYPE.", - "expected-space-or-right-bracket-in-doctype": - "Expected space or '>'. Got '%(data)s'", - "unexpected-end-of-doctype": - "Unexpected end of DOCTYPE.", - "unexpected-char-in-doctype": - "Unexpected character in DOCTYPE.", - "eof-in-innerhtml": - "XXX innerHTML EOF", - "unexpected-doctype": - "Unexpected DOCTYPE. Ignored.", - "non-html-root": - "html needs to be the first start tag.", - "expected-doctype-but-got-eof": - "Unexpected End of file. Expected DOCTYPE.", - "unknown-doctype": - "Erroneous DOCTYPE.", - "expected-doctype-but-got-chars": - "Unexpected non-space characters. Expected DOCTYPE.", - "expected-doctype-but-got-start-tag": - "Unexpected start tag (%(name)s). Expected DOCTYPE.", - "expected-doctype-but-got-end-tag": - "Unexpected end tag (%(name)s). Expected DOCTYPE.", - "end-tag-after-implied-root": - "Unexpected end tag (%(name)s) after the (implied) root element.", - "expected-named-closing-tag-but-got-eof": - "Unexpected end of file. Expected end tag (%(name)s).", - "two-heads-are-not-better-than-one": - "Unexpected start tag head in existing head. Ignored.", - "unexpected-end-tag": - "Unexpected end tag (%(name)s). Ignored.", - "unexpected-start-tag-out-of-my-head": - "Unexpected start tag (%(name)s) that can be in head. Moved.", - "unexpected-start-tag": - "Unexpected start tag (%(name)s).", - "missing-end-tag": - "Missing end tag (%(name)s).", - "missing-end-tags": - "Missing end tags (%(name)s).", - "unexpected-start-tag-implies-end-tag": - "Unexpected start tag (%(startName)s) " - "implies end tag (%(endName)s).", - "unexpected-start-tag-treated-as": - "Unexpected start tag (%(originalName)s). Treated as %(newName)s.", - "deprecated-tag": - "Unexpected start tag %(name)s. Don't use it!", - "unexpected-start-tag-ignored": - "Unexpected start tag %(name)s. Ignored.", - "expected-one-end-tag-but-got-another": - "Unexpected end tag (%(gotName)s). " - "Missing end tag (%(expectedName)s).", - "end-tag-too-early": - "End tag (%(name)s) seen too early. Expected other end tag.", - "end-tag-too-early-named": - "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).", - "end-tag-too-early-ignored": - "End tag (%(name)s) seen too early. Ignored.", - "adoption-agency-1.1": - "End tag (%(name)s) violates step 1, " - "paragraph 1 of the adoption agency algorithm.", - "adoption-agency-1.2": - "End tag (%(name)s) violates step 1, " - "paragraph 2 of the adoption agency algorithm.", - "adoption-agency-1.3": - "End tag (%(name)s) violates step 1, " - "paragraph 3 of the adoption agency algorithm.", - "adoption-agency-4.4": - "End tag (%(name)s) violates step 4, " - "paragraph 4 of the adoption agency algorithm.", - "unexpected-end-tag-treated-as": - "Unexpected end tag (%(originalName)s). Treated as %(newName)s.", - "no-end-tag": - "This element (%(name)s) has no end tag.", - "unexpected-implied-end-tag-in-table": - "Unexpected implied end tag (%(name)s) in the table phase.", - "unexpected-implied-end-tag-in-table-body": - "Unexpected implied end tag (%(name)s) in the table body phase.", - "unexpected-char-implies-table-voodoo": - "Unexpected non-space characters in " - "table context caused voodoo mode.", - "unexpected-hidden-input-in-table": - "Unexpected input with type hidden in table context.", - "unexpected-form-in-table": - "Unexpected form in table context.", - "unexpected-start-tag-implies-table-voodoo": - "Unexpected start tag (%(name)s) in " - "table context caused voodoo mode.", - "unexpected-end-tag-implies-table-voodoo": - "Unexpected end tag (%(name)s) in " - "table context caused voodoo mode.", - "unexpected-cell-in-table-body": - "Unexpected table cell start tag (%(name)s) " - "in the table body phase.", - "unexpected-cell-end-tag": - "Got table cell end tag (%(name)s) " - "while required end tags are missing.", - "unexpected-end-tag-in-table-body": - "Unexpected end tag (%(name)s) in the table body phase. Ignored.", - "unexpected-implied-end-tag-in-table-row": - "Unexpected implied end tag (%(name)s) in the table row phase.", - "unexpected-end-tag-in-table-row": - "Unexpected end tag (%(name)s) in the table row phase. Ignored.", - "unexpected-select-in-select": - "Unexpected select start tag in the select phase " - "treated as select end tag.", - "unexpected-input-in-select": - "Unexpected input start tag in the select phase.", - "unexpected-start-tag-in-select": - "Unexpected start tag token (%(name)s in the select phase. " - "Ignored.", - "unexpected-end-tag-in-select": - "Unexpected end tag (%(name)s) in the select phase. Ignored.", - "unexpected-table-element-start-tag-in-select-in-table": - "Unexpected table element start tag (%(name)s) in the select in table phase.", - "unexpected-table-element-end-tag-in-select-in-table": - "Unexpected table element end tag (%(name)s) in the select in table phase.", - "unexpected-char-after-body": - "Unexpected non-space characters in the after body phase.", - "unexpected-start-tag-after-body": - "Unexpected start tag token (%(name)s)" - " in the after body phase.", - "unexpected-end-tag-after-body": - "Unexpected end tag token (%(name)s)" - " in the after body phase.", - "unexpected-char-in-frameset": - "Unexpected characters in the frameset phase. Characters ignored.", - "unexpected-start-tag-in-frameset": - "Unexpected start tag token (%(name)s)" - " in the frameset phase. Ignored.", - "unexpected-frameset-in-frameset-innerhtml": - "Unexpected end tag token (frameset) " - "in the frameset phase (innerHTML).", - "unexpected-end-tag-in-frameset": - "Unexpected end tag token (%(name)s)" - " in the frameset phase. Ignored.", - "unexpected-char-after-frameset": - "Unexpected non-space characters in the " - "after frameset phase. Ignored.", - "unexpected-start-tag-after-frameset": - "Unexpected start tag (%(name)s)" - " in the after frameset phase. Ignored.", - "unexpected-end-tag-after-frameset": - "Unexpected end tag (%(name)s)" - " in the after frameset phase. Ignored.", - "unexpected-end-tag-after-body-innerhtml": - "Unexpected end tag after body(innerHtml)", - "expected-eof-but-got-char": - "Unexpected non-space characters. Expected end of file.", - "expected-eof-but-got-start-tag": - "Unexpected start tag (%(name)s)" - ". Expected end of file.", - "expected-eof-but-got-end-tag": - "Unexpected end tag (%(name)s)" - ". Expected end of file.", - "eof-in-table": - "Unexpected end of file. Expected table content.", - "eof-in-select": - "Unexpected end of file. Expected select content.", - "eof-in-frameset": - "Unexpected end of file. Expected frameset content.", - "eof-in-script-in-script": - "Unexpected end of file. Expected script content.", - "eof-in-foreign-lands": - "Unexpected end of file. Expected foreign content", - "non-void-element-with-trailing-solidus": - "Trailing solidus not allowed on element %(name)s", - "unexpected-html-element-in-foreign-content": - "Element %(name)s not allowed in a non-html context", - "unexpected-end-tag-before-html": - "Unexpected end tag (%(name)s) before html.", - "unexpected-inhead-noscript-tag": - "Element %(name)s not allowed in a inhead-noscript context", - "eof-in-head-noscript": - "Unexpected end of file. Expected inhead-noscript content", - "char-in-head-noscript": - "Unexpected non-space character. Expected inhead-noscript content", - "XXX-undefined-error": - "Undefined error (this sucks and should be fixed)", -} - -namespaces = { - "html": "http://www.w3.org/1999/xhtml", - "mathml": "http://www.w3.org/1998/Math/MathML", - "svg": "http://www.w3.org/2000/svg", - "xlink": "http://www.w3.org/1999/xlink", - "xml": "http://www.w3.org/XML/1998/namespace", - "xmlns": "http://www.w3.org/2000/xmlns/" -} - -scopingElements = frozenset([ - (namespaces["html"], "applet"), - (namespaces["html"], "caption"), - (namespaces["html"], "html"), - (namespaces["html"], "marquee"), - (namespaces["html"], "object"), - (namespaces["html"], "table"), - (namespaces["html"], "td"), - (namespaces["html"], "th"), - (namespaces["mathml"], "mi"), - (namespaces["mathml"], "mo"), - (namespaces["mathml"], "mn"), - (namespaces["mathml"], "ms"), - (namespaces["mathml"], "mtext"), - (namespaces["mathml"], "annotation-xml"), - (namespaces["svg"], "foreignObject"), - (namespaces["svg"], "desc"), - (namespaces["svg"], "title"), -]) - -formattingElements = frozenset([ - (namespaces["html"], "a"), - (namespaces["html"], "b"), - (namespaces["html"], "big"), - (namespaces["html"], "code"), - (namespaces["html"], "em"), - (namespaces["html"], "font"), - (namespaces["html"], "i"), - (namespaces["html"], "nobr"), - (namespaces["html"], "s"), - (namespaces["html"], "small"), - (namespaces["html"], "strike"), - (namespaces["html"], "strong"), - (namespaces["html"], "tt"), - (namespaces["html"], "u") -]) - -specialElements = frozenset([ - (namespaces["html"], "address"), - (namespaces["html"], "applet"), - (namespaces["html"], "area"), - (namespaces["html"], "article"), - (namespaces["html"], "aside"), - (namespaces["html"], "base"), - (namespaces["html"], "basefont"), - (namespaces["html"], "bgsound"), - (namespaces["html"], "blockquote"), - (namespaces["html"], "body"), - (namespaces["html"], "br"), - (namespaces["html"], "button"), - (namespaces["html"], "caption"), - (namespaces["html"], "center"), - (namespaces["html"], "col"), - (namespaces["html"], "colgroup"), - (namespaces["html"], "command"), - (namespaces["html"], "dd"), - (namespaces["html"], "details"), - (namespaces["html"], "dir"), - (namespaces["html"], "div"), - (namespaces["html"], "dl"), - (namespaces["html"], "dt"), - (namespaces["html"], "embed"), - (namespaces["html"], "fieldset"), - (namespaces["html"], "figure"), - (namespaces["html"], "footer"), - (namespaces["html"], "form"), - (namespaces["html"], "frame"), - (namespaces["html"], "frameset"), - (namespaces["html"], "h1"), - (namespaces["html"], "h2"), - (namespaces["html"], "h3"), - (namespaces["html"], "h4"), - (namespaces["html"], "h5"), - (namespaces["html"], "h6"), - (namespaces["html"], "head"), - (namespaces["html"], "header"), - (namespaces["html"], "hr"), - (namespaces["html"], "html"), - (namespaces["html"], "iframe"), - # Note that image is commented out in the spec as "this isn't an - # element that can end up on the stack, so it doesn't matter," - (namespaces["html"], "image"), - (namespaces["html"], "img"), - (namespaces["html"], "input"), - (namespaces["html"], "isindex"), - (namespaces["html"], "li"), - (namespaces["html"], "link"), - (namespaces["html"], "listing"), - (namespaces["html"], "marquee"), - (namespaces["html"], "menu"), - (namespaces["html"], "meta"), - (namespaces["html"], "nav"), - (namespaces["html"], "noembed"), - (namespaces["html"], "noframes"), - (namespaces["html"], "noscript"), - (namespaces["html"], "object"), - (namespaces["html"], "ol"), - (namespaces["html"], "p"), - (namespaces["html"], "param"), - (namespaces["html"], "plaintext"), - (namespaces["html"], "pre"), - (namespaces["html"], "script"), - (namespaces["html"], "section"), - (namespaces["html"], "select"), - (namespaces["html"], "style"), - (namespaces["html"], "table"), - (namespaces["html"], "tbody"), - (namespaces["html"], "td"), - (namespaces["html"], "textarea"), - (namespaces["html"], "tfoot"), - (namespaces["html"], "th"), - (namespaces["html"], "thead"), - (namespaces["html"], "title"), - (namespaces["html"], "tr"), - (namespaces["html"], "ul"), - (namespaces["html"], "wbr"), - (namespaces["html"], "xmp"), - (namespaces["svg"], "foreignObject") -]) - -htmlIntegrationPointElements = frozenset([ - (namespaces["mathml"], "annotation-xml"), - (namespaces["svg"], "foreignObject"), - (namespaces["svg"], "desc"), - (namespaces["svg"], "title") -]) - -mathmlTextIntegrationPointElements = frozenset([ - (namespaces["mathml"], "mi"), - (namespaces["mathml"], "mo"), - (namespaces["mathml"], "mn"), - (namespaces["mathml"], "ms"), - (namespaces["mathml"], "mtext") -]) - -adjustSVGAttributes = { - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan" -} - -adjustMathMLAttributes = {"definitionurl": "definitionURL"} - -adjustForeignAttributes = { - "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), - "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), - "xlink:href": ("xlink", "href", namespaces["xlink"]), - "xlink:role": ("xlink", "role", namespaces["xlink"]), - "xlink:show": ("xlink", "show", namespaces["xlink"]), - "xlink:title": ("xlink", "title", namespaces["xlink"]), - "xlink:type": ("xlink", "type", namespaces["xlink"]), - "xml:base": ("xml", "base", namespaces["xml"]), - "xml:lang": ("xml", "lang", namespaces["xml"]), - "xml:space": ("xml", "space", namespaces["xml"]), - "xmlns": (None, "xmlns", namespaces["xmlns"]), - "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) -} - -unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in - adjustForeignAttributes.items()]) - -spaceCharacters = frozenset([ - "\t", - "\n", - "\u000C", - " ", - "\r" -]) - -tableInsertModeElements = frozenset([ - "table", - "tbody", - "tfoot", - "thead", - "tr" -]) - -asciiLowercase = frozenset(string.ascii_lowercase) -asciiUppercase = frozenset(string.ascii_uppercase) -asciiLetters = frozenset(string.ascii_letters) -digits = frozenset(string.digits) -hexDigits = frozenset(string.hexdigits) - -asciiUpper2Lower = dict([(ord(c), ord(c.lower())) - for c in string.ascii_uppercase]) - -# Heading elements need to be ordered -headingElements = ( - "h1", - "h2", - "h3", - "h4", - "h5", - "h6" -) - -voidElements = frozenset([ - "base", - "command", - "event-source", - "link", - "meta", - "hr", - "br", - "img", - "embed", - "param", - "area", - "col", - "input", - "source", - "track" -]) - -cdataElements = frozenset(['title', 'textarea']) - -rcdataElements = frozenset([ - 'style', - 'script', - 'xmp', - 'iframe', - 'noembed', - 'noframes', - 'noscript' -]) - -booleanAttributes = { - "": frozenset(["irrelevant", "itemscope"]), - "style": frozenset(["scoped"]), - "img": frozenset(["ismap"]), - "audio": frozenset(["autoplay", "controls"]), - "video": frozenset(["autoplay", "controls"]), - "script": frozenset(["defer", "async"]), - "details": frozenset(["open"]), - "datagrid": frozenset(["multiple", "disabled"]), - "command": frozenset(["hidden", "disabled", "checked", "default"]), - "hr": frozenset(["noshade"]), - "menu": frozenset(["autosubmit"]), - "fieldset": frozenset(["disabled", "readonly"]), - "option": frozenset(["disabled", "readonly", "selected"]), - "optgroup": frozenset(["disabled", "readonly"]), - "button": frozenset(["disabled", "autofocus"]), - "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), - "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), - "output": frozenset(["disabled", "readonly"]), - "iframe": frozenset(["seamless"]), -} - -# entitiesWindows1252 has to be _ordered_ and needs to have an index. It -# therefore can't be a frozenset. -entitiesWindows1252 = ( - 8364, # 0x80 0x20AC EURO SIGN - 65533, # 0x81 UNDEFINED - 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK - 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK - 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK - 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS - 8224, # 0x86 0x2020 DAGGER - 8225, # 0x87 0x2021 DOUBLE DAGGER - 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT - 8240, # 0x89 0x2030 PER MILLE SIGN - 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON - 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK - 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE - 65533, # 0x8D UNDEFINED - 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON - 65533, # 0x8F UNDEFINED - 65533, # 0x90 UNDEFINED - 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK - 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK - 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK - 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK - 8226, # 0x95 0x2022 BULLET - 8211, # 0x96 0x2013 EN DASH - 8212, # 0x97 0x2014 EM DASH - 732, # 0x98 0x02DC SMALL TILDE - 8482, # 0x99 0x2122 TRADE MARK SIGN - 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON - 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK - 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE - 65533, # 0x9D UNDEFINED - 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON - 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS -) - -xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;']) - -entities = { - "AElig": "\xc6", - "AElig;": "\xc6", - "AMP": "&", - "AMP;": "&", - "Aacute": "\xc1", - "Aacute;": "\xc1", - "Abreve;": "\u0102", - "Acirc": "\xc2", - "Acirc;": "\xc2", - "Acy;": "\u0410", - "Afr;": "\U0001d504", - "Agrave": "\xc0", - "Agrave;": "\xc0", - "Alpha;": "\u0391", - "Amacr;": "\u0100", - "And;": "\u2a53", - "Aogon;": "\u0104", - "Aopf;": "\U0001d538", - "ApplyFunction;": "\u2061", - "Aring": "\xc5", - "Aring;": "\xc5", - "Ascr;": "\U0001d49c", - "Assign;": "\u2254", - "Atilde": "\xc3", - "Atilde;": "\xc3", - "Auml": "\xc4", - "Auml;": "\xc4", - "Backslash;": "\u2216", - "Barv;": "\u2ae7", - "Barwed;": "\u2306", - "Bcy;": "\u0411", - "Because;": "\u2235", - "Bernoullis;": "\u212c", - "Beta;": "\u0392", - "Bfr;": "\U0001d505", - "Bopf;": "\U0001d539", - "Breve;": "\u02d8", - "Bscr;": "\u212c", - "Bumpeq;": "\u224e", - "CHcy;": "\u0427", - "COPY": "\xa9", - "COPY;": "\xa9", - "Cacute;": "\u0106", - "Cap;": "\u22d2", - "CapitalDifferentialD;": "\u2145", - "Cayleys;": "\u212d", - "Ccaron;": "\u010c", - "Ccedil": "\xc7", - "Ccedil;": "\xc7", - "Ccirc;": "\u0108", - "Cconint;": "\u2230", - "Cdot;": "\u010a", - "Cedilla;": "\xb8", - "CenterDot;": "\xb7", - "Cfr;": "\u212d", - "Chi;": "\u03a7", - "CircleDot;": "\u2299", - "CircleMinus;": "\u2296", - "CirclePlus;": "\u2295", - "CircleTimes;": "\u2297", - "ClockwiseContourIntegral;": "\u2232", - "CloseCurlyDoubleQuote;": "\u201d", - "CloseCurlyQuote;": "\u2019", - "Colon;": "\u2237", - "Colone;": "\u2a74", - "Congruent;": "\u2261", - "Conint;": "\u222f", - "ContourIntegral;": "\u222e", - "Copf;": "\u2102", - "Coproduct;": "\u2210", - "CounterClockwiseContourIntegral;": "\u2233", - "Cross;": "\u2a2f", - "Cscr;": "\U0001d49e", - "Cup;": "\u22d3", - "CupCap;": "\u224d", - "DD;": "\u2145", - "DDotrahd;": "\u2911", - "DJcy;": "\u0402", - "DScy;": "\u0405", - "DZcy;": "\u040f", - "Dagger;": "\u2021", - "Darr;": "\u21a1", - "Dashv;": "\u2ae4", - "Dcaron;": "\u010e", - "Dcy;": "\u0414", - "Del;": "\u2207", - "Delta;": "\u0394", - "Dfr;": "\U0001d507", - "DiacriticalAcute;": "\xb4", - "DiacriticalDot;": "\u02d9", - "DiacriticalDoubleAcute;": "\u02dd", - "DiacriticalGrave;": "`", - "DiacriticalTilde;": "\u02dc", - "Diamond;": "\u22c4", - "DifferentialD;": "\u2146", - "Dopf;": "\U0001d53b", - "Dot;": "\xa8", - "DotDot;": "\u20dc", - "DotEqual;": "\u2250", - "DoubleContourIntegral;": "\u222f", - "DoubleDot;": "\xa8", - "DoubleDownArrow;": "\u21d3", - "DoubleLeftArrow;": "\u21d0", - "DoubleLeftRightArrow;": "\u21d4", - "DoubleLeftTee;": "\u2ae4", - "DoubleLongLeftArrow;": "\u27f8", - "DoubleLongLeftRightArrow;": "\u27fa", - "DoubleLongRightArrow;": "\u27f9", - "DoubleRightArrow;": "\u21d2", - "DoubleRightTee;": "\u22a8", - "DoubleUpArrow;": "\u21d1", - "DoubleUpDownArrow;": "\u21d5", - "DoubleVerticalBar;": "\u2225", - "DownArrow;": "\u2193", - "DownArrowBar;": "\u2913", - "DownArrowUpArrow;": "\u21f5", - "DownBreve;": "\u0311", - "DownLeftRightVector;": "\u2950", - "DownLeftTeeVector;": "\u295e", - "DownLeftVector;": "\u21bd", - "DownLeftVectorBar;": "\u2956", - "DownRightTeeVector;": "\u295f", - "DownRightVector;": "\u21c1", - "DownRightVectorBar;": "\u2957", - "DownTee;": "\u22a4", - "DownTeeArrow;": "\u21a7", - "Downarrow;": "\u21d3", - "Dscr;": "\U0001d49f", - "Dstrok;": "\u0110", - "ENG;": "\u014a", - "ETH": "\xd0", - "ETH;": "\xd0", - "Eacute": "\xc9", - "Eacute;": "\xc9", - "Ecaron;": "\u011a", - "Ecirc": "\xca", - "Ecirc;": "\xca", - "Ecy;": "\u042d", - "Edot;": "\u0116", - "Efr;": "\U0001d508", - "Egrave": "\xc8", - "Egrave;": "\xc8", - "Element;": "\u2208", - "Emacr;": "\u0112", - "EmptySmallSquare;": "\u25fb", - "EmptyVerySmallSquare;": "\u25ab", - "Eogon;": "\u0118", - "Eopf;": "\U0001d53c", - "Epsilon;": "\u0395", - "Equal;": "\u2a75", - "EqualTilde;": "\u2242", - "Equilibrium;": "\u21cc", - "Escr;": "\u2130", - "Esim;": "\u2a73", - "Eta;": "\u0397", - "Euml": "\xcb", - "Euml;": "\xcb", - "Exists;": "\u2203", - "ExponentialE;": "\u2147", - "Fcy;": "\u0424", - "Ffr;": "\U0001d509", - "FilledSmallSquare;": "\u25fc", - "FilledVerySmallSquare;": "\u25aa", - "Fopf;": "\U0001d53d", - "ForAll;": "\u2200", - "Fouriertrf;": "\u2131", - "Fscr;": "\u2131", - "GJcy;": "\u0403", - "GT": ">", - "GT;": ">", - "Gamma;": "\u0393", - "Gammad;": "\u03dc", - "Gbreve;": "\u011e", - "Gcedil;": "\u0122", - "Gcirc;": "\u011c", - "Gcy;": "\u0413", - "Gdot;": "\u0120", - "Gfr;": "\U0001d50a", - "Gg;": "\u22d9", - "Gopf;": "\U0001d53e", - "GreaterEqual;": "\u2265", - "GreaterEqualLess;": "\u22db", - "GreaterFullEqual;": "\u2267", - "GreaterGreater;": "\u2aa2", - "GreaterLess;": "\u2277", - "GreaterSlantEqual;": "\u2a7e", - "GreaterTilde;": "\u2273", - "Gscr;": "\U0001d4a2", - "Gt;": "\u226b", - "HARDcy;": "\u042a", - "Hacek;": "\u02c7", - "Hat;": "^", - "Hcirc;": "\u0124", - "Hfr;": "\u210c", - "HilbertSpace;": "\u210b", - "Hopf;": "\u210d", - "HorizontalLine;": "\u2500", - "Hscr;": "\u210b", - "Hstrok;": "\u0126", - "HumpDownHump;": "\u224e", - "HumpEqual;": "\u224f", - "IEcy;": "\u0415", - "IJlig;": "\u0132", - "IOcy;": "\u0401", - "Iacute": "\xcd", - "Iacute;": "\xcd", - "Icirc": "\xce", - "Icirc;": "\xce", - "Icy;": "\u0418", - "Idot;": "\u0130", - "Ifr;": "\u2111", - "Igrave": "\xcc", - "Igrave;": "\xcc", - "Im;": "\u2111", - "Imacr;": "\u012a", - "ImaginaryI;": "\u2148", - "Implies;": "\u21d2", - "Int;": "\u222c", - "Integral;": "\u222b", - "Intersection;": "\u22c2", - "InvisibleComma;": "\u2063", - "InvisibleTimes;": "\u2062", - "Iogon;": "\u012e", - "Iopf;": "\U0001d540", - "Iota;": "\u0399", - "Iscr;": "\u2110", - "Itilde;": "\u0128", - "Iukcy;": "\u0406", - "Iuml": "\xcf", - "Iuml;": "\xcf", - "Jcirc;": "\u0134", - "Jcy;": "\u0419", - "Jfr;": "\U0001d50d", - "Jopf;": "\U0001d541", - "Jscr;": "\U0001d4a5", - "Jsercy;": "\u0408", - "Jukcy;": "\u0404", - "KHcy;": "\u0425", - "KJcy;": "\u040c", - "Kappa;": "\u039a", - "Kcedil;": "\u0136", - "Kcy;": "\u041a", - "Kfr;": "\U0001d50e", - "Kopf;": "\U0001d542", - "Kscr;": "\U0001d4a6", - "LJcy;": "\u0409", - "LT": "<", - "LT;": "<", - "Lacute;": "\u0139", - "Lambda;": "\u039b", - "Lang;": "\u27ea", - "Laplacetrf;": "\u2112", - "Larr;": "\u219e", - "Lcaron;": "\u013d", - "Lcedil;": "\u013b", - "Lcy;": "\u041b", - "LeftAngleBracket;": "\u27e8", - "LeftArrow;": "\u2190", - "LeftArrowBar;": "\u21e4", - "LeftArrowRightArrow;": "\u21c6", - "LeftCeiling;": "\u2308", - "LeftDoubleBracket;": "\u27e6", - "LeftDownTeeVector;": "\u2961", - "LeftDownVector;": "\u21c3", - "LeftDownVectorBar;": "\u2959", - "LeftFloor;": "\u230a", - "LeftRightArrow;": "\u2194", - "LeftRightVector;": "\u294e", - "LeftTee;": "\u22a3", - "LeftTeeArrow;": "\u21a4", - "LeftTeeVector;": "\u295a", - "LeftTriangle;": "\u22b2", - "LeftTriangleBar;": "\u29cf", - "LeftTriangleEqual;": "\u22b4", - "LeftUpDownVector;": "\u2951", - "LeftUpTeeVector;": "\u2960", - "LeftUpVector;": "\u21bf", - "LeftUpVectorBar;": "\u2958", - "LeftVector;": "\u21bc", - "LeftVectorBar;": "\u2952", - "Leftarrow;": "\u21d0", - "Leftrightarrow;": "\u21d4", - "LessEqualGreater;": "\u22da", - "LessFullEqual;": "\u2266", - "LessGreater;": "\u2276", - "LessLess;": "\u2aa1", - "LessSlantEqual;": "\u2a7d", - "LessTilde;": "\u2272", - "Lfr;": "\U0001d50f", - "Ll;": "\u22d8", - "Lleftarrow;": "\u21da", - "Lmidot;": "\u013f", - "LongLeftArrow;": "\u27f5", - "LongLeftRightArrow;": "\u27f7", - "LongRightArrow;": "\u27f6", - "Longleftarrow;": "\u27f8", - "Longleftrightarrow;": "\u27fa", - "Longrightarrow;": "\u27f9", - "Lopf;": "\U0001d543", - "LowerLeftArrow;": "\u2199", - "LowerRightArrow;": "\u2198", - "Lscr;": "\u2112", - "Lsh;": "\u21b0", - "Lstrok;": "\u0141", - "Lt;": "\u226a", - "Map;": "\u2905", - "Mcy;": "\u041c", - "MediumSpace;": "\u205f", - "Mellintrf;": "\u2133", - "Mfr;": "\U0001d510", - "MinusPlus;": "\u2213", - "Mopf;": "\U0001d544", - "Mscr;": "\u2133", - "Mu;": "\u039c", - "NJcy;": "\u040a", - "Nacute;": "\u0143", - "Ncaron;": "\u0147", - "Ncedil;": "\u0145", - "Ncy;": "\u041d", - "NegativeMediumSpace;": "\u200b", - "NegativeThickSpace;": "\u200b", - "NegativeThinSpace;": "\u200b", - "NegativeVeryThinSpace;": "\u200b", - "NestedGreaterGreater;": "\u226b", - "NestedLessLess;": "\u226a", - "NewLine;": "\n", - "Nfr;": "\U0001d511", - "NoBreak;": "\u2060", - "NonBreakingSpace;": "\xa0", - "Nopf;": "\u2115", - "Not;": "\u2aec", - "NotCongruent;": "\u2262", - "NotCupCap;": "\u226d", - "NotDoubleVerticalBar;": "\u2226", - "NotElement;": "\u2209", - "NotEqual;": "\u2260", - "NotEqualTilde;": "\u2242\u0338", - "NotExists;": "\u2204", - "NotGreater;": "\u226f", - "NotGreaterEqual;": "\u2271", - "NotGreaterFullEqual;": "\u2267\u0338", - "NotGreaterGreater;": "\u226b\u0338", - "NotGreaterLess;": "\u2279", - "NotGreaterSlantEqual;": "\u2a7e\u0338", - "NotGreaterTilde;": "\u2275", - "NotHumpDownHump;": "\u224e\u0338", - "NotHumpEqual;": "\u224f\u0338", - "NotLeftTriangle;": "\u22ea", - "NotLeftTriangleBar;": "\u29cf\u0338", - "NotLeftTriangleEqual;": "\u22ec", - "NotLess;": "\u226e", - "NotLessEqual;": "\u2270", - "NotLessGreater;": "\u2278", - "NotLessLess;": "\u226a\u0338", - "NotLessSlantEqual;": "\u2a7d\u0338", - "NotLessTilde;": "\u2274", - "NotNestedGreaterGreater;": "\u2aa2\u0338", - "NotNestedLessLess;": "\u2aa1\u0338", - "NotPrecedes;": "\u2280", - "NotPrecedesEqual;": "\u2aaf\u0338", - "NotPrecedesSlantEqual;": "\u22e0", - "NotReverseElement;": "\u220c", - "NotRightTriangle;": "\u22eb", - "NotRightTriangleBar;": "\u29d0\u0338", - "NotRightTriangleEqual;": "\u22ed", - "NotSquareSubset;": "\u228f\u0338", - "NotSquareSubsetEqual;": "\u22e2", - "NotSquareSuperset;": "\u2290\u0338", - "NotSquareSupersetEqual;": "\u22e3", - "NotSubset;": "\u2282\u20d2", - "NotSubsetEqual;": "\u2288", - "NotSucceeds;": "\u2281", - "NotSucceedsEqual;": "\u2ab0\u0338", - "NotSucceedsSlantEqual;": "\u22e1", - "NotSucceedsTilde;": "\u227f\u0338", - "NotSuperset;": "\u2283\u20d2", - "NotSupersetEqual;": "\u2289", - "NotTilde;": "\u2241", - "NotTildeEqual;": "\u2244", - "NotTildeFullEqual;": "\u2247", - "NotTildeTilde;": "\u2249", - "NotVerticalBar;": "\u2224", - "Nscr;": "\U0001d4a9", - "Ntilde": "\xd1", - "Ntilde;": "\xd1", - "Nu;": "\u039d", - "OElig;": "\u0152", - "Oacute": "\xd3", - "Oacute;": "\xd3", - "Ocirc": "\xd4", - "Ocirc;": "\xd4", - "Ocy;": "\u041e", - "Odblac;": "\u0150", - "Ofr;": "\U0001d512", - "Ograve": "\xd2", - "Ograve;": "\xd2", - "Omacr;": "\u014c", - "Omega;": "\u03a9", - "Omicron;": "\u039f", - "Oopf;": "\U0001d546", - "OpenCurlyDoubleQuote;": "\u201c", - "OpenCurlyQuote;": "\u2018", - "Or;": "\u2a54", - "Oscr;": "\U0001d4aa", - "Oslash": "\xd8", - "Oslash;": "\xd8", - "Otilde": "\xd5", - "Otilde;": "\xd5", - "Otimes;": "\u2a37", - "Ouml": "\xd6", - "Ouml;": "\xd6", - "OverBar;": "\u203e", - "OverBrace;": "\u23de", - "OverBracket;": "\u23b4", - "OverParenthesis;": "\u23dc", - "PartialD;": "\u2202", - "Pcy;": "\u041f", - "Pfr;": "\U0001d513", - "Phi;": "\u03a6", - "Pi;": "\u03a0", - "PlusMinus;": "\xb1", - "Poincareplane;": "\u210c", - "Popf;": "\u2119", - "Pr;": "\u2abb", - "Precedes;": "\u227a", - "PrecedesEqual;": "\u2aaf", - "PrecedesSlantEqual;": "\u227c", - "PrecedesTilde;": "\u227e", - "Prime;": "\u2033", - "Product;": "\u220f", - "Proportion;": "\u2237", - "Proportional;": "\u221d", - "Pscr;": "\U0001d4ab", - "Psi;": "\u03a8", - "QUOT": "\"", - "QUOT;": "\"", - "Qfr;": "\U0001d514", - "Qopf;": "\u211a", - "Qscr;": "\U0001d4ac", - "RBarr;": "\u2910", - "REG": "\xae", - "REG;": "\xae", - "Racute;": "\u0154", - "Rang;": "\u27eb", - "Rarr;": "\u21a0", - "Rarrtl;": "\u2916", - "Rcaron;": "\u0158", - "Rcedil;": "\u0156", - "Rcy;": "\u0420", - "Re;": "\u211c", - "ReverseElement;": "\u220b", - "ReverseEquilibrium;": "\u21cb", - "ReverseUpEquilibrium;": "\u296f", - "Rfr;": "\u211c", - "Rho;": "\u03a1", - "RightAngleBracket;": "\u27e9", - "RightArrow;": "\u2192", - "RightArrowBar;": "\u21e5", - "RightArrowLeftArrow;": "\u21c4", - "RightCeiling;": "\u2309", - "RightDoubleBracket;": "\u27e7", - "RightDownTeeVector;": "\u295d", - "RightDownVector;": "\u21c2", - "RightDownVectorBar;": "\u2955", - "RightFloor;": "\u230b", - "RightTee;": "\u22a2", - "RightTeeArrow;": "\u21a6", - "RightTeeVector;": "\u295b", - "RightTriangle;": "\u22b3", - "RightTriangleBar;": "\u29d0", - "RightTriangleEqual;": "\u22b5", - "RightUpDownVector;": "\u294f", - "RightUpTeeVector;": "\u295c", - "RightUpVector;": "\u21be", - "RightUpVectorBar;": "\u2954", - "RightVector;": "\u21c0", - "RightVectorBar;": "\u2953", - "Rightarrow;": "\u21d2", - "Ropf;": "\u211d", - "RoundImplies;": "\u2970", - "Rrightarrow;": "\u21db", - "Rscr;": "\u211b", - "Rsh;": "\u21b1", - "RuleDelayed;": "\u29f4", - "SHCHcy;": "\u0429", - "SHcy;": "\u0428", - "SOFTcy;": "\u042c", - "Sacute;": "\u015a", - "Sc;": "\u2abc", - "Scaron;": "\u0160", - "Scedil;": "\u015e", - "Scirc;": "\u015c", - "Scy;": "\u0421", - "Sfr;": "\U0001d516", - "ShortDownArrow;": "\u2193", - "ShortLeftArrow;": "\u2190", - "ShortRightArrow;": "\u2192", - "ShortUpArrow;": "\u2191", - "Sigma;": "\u03a3", - "SmallCircle;": "\u2218", - "Sopf;": "\U0001d54a", - "Sqrt;": "\u221a", - "Square;": "\u25a1", - "SquareIntersection;": "\u2293", - "SquareSubset;": "\u228f", - "SquareSubsetEqual;": "\u2291", - "SquareSuperset;": "\u2290", - "SquareSupersetEqual;": "\u2292", - "SquareUnion;": "\u2294", - "Sscr;": "\U0001d4ae", - "Star;": "\u22c6", - "Sub;": "\u22d0", - "Subset;": "\u22d0", - "SubsetEqual;": "\u2286", - "Succeeds;": "\u227b", - "SucceedsEqual;": "\u2ab0", - "SucceedsSlantEqual;": "\u227d", - "SucceedsTilde;": "\u227f", - "SuchThat;": "\u220b", - "Sum;": "\u2211", - "Sup;": "\u22d1", - "Superset;": "\u2283", - "SupersetEqual;": "\u2287", - "Supset;": "\u22d1", - "THORN": "\xde", - "THORN;": "\xde", - "TRADE;": "\u2122", - "TSHcy;": "\u040b", - "TScy;": "\u0426", - "Tab;": "\t", - "Tau;": "\u03a4", - "Tcaron;": "\u0164", - "Tcedil;": "\u0162", - "Tcy;": "\u0422", - "Tfr;": "\U0001d517", - "Therefore;": "\u2234", - "Theta;": "\u0398", - "ThickSpace;": "\u205f\u200a", - "ThinSpace;": "\u2009", - "Tilde;": "\u223c", - "TildeEqual;": "\u2243", - "TildeFullEqual;": "\u2245", - "TildeTilde;": "\u2248", - "Topf;": "\U0001d54b", - "TripleDot;": "\u20db", - "Tscr;": "\U0001d4af", - "Tstrok;": "\u0166", - "Uacute": "\xda", - "Uacute;": "\xda", - "Uarr;": "\u219f", - "Uarrocir;": "\u2949", - "Ubrcy;": "\u040e", - "Ubreve;": "\u016c", - "Ucirc": "\xdb", - "Ucirc;": "\xdb", - "Ucy;": "\u0423", - "Udblac;": "\u0170", - "Ufr;": "\U0001d518", - "Ugrave": "\xd9", - "Ugrave;": "\xd9", - "Umacr;": "\u016a", - "UnderBar;": "_", - "UnderBrace;": "\u23df", - "UnderBracket;": "\u23b5", - "UnderParenthesis;": "\u23dd", - "Union;": "\u22c3", - "UnionPlus;": "\u228e", - "Uogon;": "\u0172", - "Uopf;": "\U0001d54c", - "UpArrow;": "\u2191", - "UpArrowBar;": "\u2912", - "UpArrowDownArrow;": "\u21c5", - "UpDownArrow;": "\u2195", - "UpEquilibrium;": "\u296e", - "UpTee;": "\u22a5", - "UpTeeArrow;": "\u21a5", - "Uparrow;": "\u21d1", - "Updownarrow;": "\u21d5", - "UpperLeftArrow;": "\u2196", - "UpperRightArrow;": "\u2197", - "Upsi;": "\u03d2", - "Upsilon;": "\u03a5", - "Uring;": "\u016e", - "Uscr;": "\U0001d4b0", - "Utilde;": "\u0168", - "Uuml": "\xdc", - "Uuml;": "\xdc", - "VDash;": "\u22ab", - "Vbar;": "\u2aeb", - "Vcy;": "\u0412", - "Vdash;": "\u22a9", - "Vdashl;": "\u2ae6", - "Vee;": "\u22c1", - "Verbar;": "\u2016", - "Vert;": "\u2016", - "VerticalBar;": "\u2223", - "VerticalLine;": "|", - "VerticalSeparator;": "\u2758", - "VerticalTilde;": "\u2240", - "VeryThinSpace;": "\u200a", - "Vfr;": "\U0001d519", - "Vopf;": "\U0001d54d", - "Vscr;": "\U0001d4b1", - "Vvdash;": "\u22aa", - "Wcirc;": "\u0174", - "Wedge;": "\u22c0", - "Wfr;": "\U0001d51a", - "Wopf;": "\U0001d54e", - "Wscr;": "\U0001d4b2", - "Xfr;": "\U0001d51b", - "Xi;": "\u039e", - "Xopf;": "\U0001d54f", - "Xscr;": "\U0001d4b3", - "YAcy;": "\u042f", - "YIcy;": "\u0407", - "YUcy;": "\u042e", - "Yacute": "\xdd", - "Yacute;": "\xdd", - "Ycirc;": "\u0176", - "Ycy;": "\u042b", - "Yfr;": "\U0001d51c", - "Yopf;": "\U0001d550", - "Yscr;": "\U0001d4b4", - "Yuml;": "\u0178", - "ZHcy;": "\u0416", - "Zacute;": "\u0179", - "Zcaron;": "\u017d", - "Zcy;": "\u0417", - "Zdot;": "\u017b", - "ZeroWidthSpace;": "\u200b", - "Zeta;": "\u0396", - "Zfr;": "\u2128", - "Zopf;": "\u2124", - "Zscr;": "\U0001d4b5", - "aacute": "\xe1", - "aacute;": "\xe1", - "abreve;": "\u0103", - "ac;": "\u223e", - "acE;": "\u223e\u0333", - "acd;": "\u223f", - "acirc": "\xe2", - "acirc;": "\xe2", - "acute": "\xb4", - "acute;": "\xb4", - "acy;": "\u0430", - "aelig": "\xe6", - "aelig;": "\xe6", - "af;": "\u2061", - "afr;": "\U0001d51e", - "agrave": "\xe0", - "agrave;": "\xe0", - "alefsym;": "\u2135", - "aleph;": "\u2135", - "alpha;": "\u03b1", - "amacr;": "\u0101", - "amalg;": "\u2a3f", - "amp": "&", - "amp;": "&", - "and;": "\u2227", - "andand;": "\u2a55", - "andd;": "\u2a5c", - "andslope;": "\u2a58", - "andv;": "\u2a5a", - "ang;": "\u2220", - "ange;": "\u29a4", - "angle;": "\u2220", - "angmsd;": "\u2221", - "angmsdaa;": "\u29a8", - "angmsdab;": "\u29a9", - "angmsdac;": "\u29aa", - "angmsdad;": "\u29ab", - "angmsdae;": "\u29ac", - "angmsdaf;": "\u29ad", - "angmsdag;": "\u29ae", - "angmsdah;": "\u29af", - "angrt;": "\u221f", - "angrtvb;": "\u22be", - "angrtvbd;": "\u299d", - "angsph;": "\u2222", - "angst;": "\xc5", - "angzarr;": "\u237c", - "aogon;": "\u0105", - "aopf;": "\U0001d552", - "ap;": "\u2248", - "apE;": "\u2a70", - "apacir;": "\u2a6f", - "ape;": "\u224a", - "apid;": "\u224b", - "apos;": "'", - "approx;": "\u2248", - "approxeq;": "\u224a", - "aring": "\xe5", - "aring;": "\xe5", - "ascr;": "\U0001d4b6", - "ast;": "*", - "asymp;": "\u2248", - "asympeq;": "\u224d", - "atilde": "\xe3", - "atilde;": "\xe3", - "auml": "\xe4", - "auml;": "\xe4", - "awconint;": "\u2233", - "awint;": "\u2a11", - "bNot;": "\u2aed", - "backcong;": "\u224c", - "backepsilon;": "\u03f6", - "backprime;": "\u2035", - "backsim;": "\u223d", - "backsimeq;": "\u22cd", - "barvee;": "\u22bd", - "barwed;": "\u2305", - "barwedge;": "\u2305", - "bbrk;": "\u23b5", - "bbrktbrk;": "\u23b6", - "bcong;": "\u224c", - "bcy;": "\u0431", - "bdquo;": "\u201e", - "becaus;": "\u2235", - "because;": "\u2235", - "bemptyv;": "\u29b0", - "bepsi;": "\u03f6", - "bernou;": "\u212c", - "beta;": "\u03b2", - "beth;": "\u2136", - "between;": "\u226c", - "bfr;": "\U0001d51f", - "bigcap;": "\u22c2", - "bigcirc;": "\u25ef", - "bigcup;": "\u22c3", - "bigodot;": "\u2a00", - "bigoplus;": "\u2a01", - "bigotimes;": "\u2a02", - "bigsqcup;": "\u2a06", - "bigstar;": "\u2605", - "bigtriangledown;": "\u25bd", - "bigtriangleup;": "\u25b3", - "biguplus;": "\u2a04", - "bigvee;": "\u22c1", - "bigwedge;": "\u22c0", - "bkarow;": "\u290d", - "blacklozenge;": "\u29eb", - "blacksquare;": "\u25aa", - "blacktriangle;": "\u25b4", - "blacktriangledown;": "\u25be", - "blacktriangleleft;": "\u25c2", - "blacktriangleright;": "\u25b8", - "blank;": "\u2423", - "blk12;": "\u2592", - "blk14;": "\u2591", - "blk34;": "\u2593", - "block;": "\u2588", - "bne;": "=\u20e5", - "bnequiv;": "\u2261\u20e5", - "bnot;": "\u2310", - "bopf;": "\U0001d553", - "bot;": "\u22a5", - "bottom;": "\u22a5", - "bowtie;": "\u22c8", - "boxDL;": "\u2557", - "boxDR;": "\u2554", - "boxDl;": "\u2556", - "boxDr;": "\u2553", - "boxH;": "\u2550", - "boxHD;": "\u2566", - "boxHU;": "\u2569", - "boxHd;": "\u2564", - "boxHu;": "\u2567", - "boxUL;": "\u255d", - "boxUR;": "\u255a", - "boxUl;": "\u255c", - "boxUr;": "\u2559", - "boxV;": "\u2551", - "boxVH;": "\u256c", - "boxVL;": "\u2563", - "boxVR;": "\u2560", - "boxVh;": "\u256b", - "boxVl;": "\u2562", - "boxVr;": "\u255f", - "boxbox;": "\u29c9", - "boxdL;": "\u2555", - "boxdR;": "\u2552", - "boxdl;": "\u2510", - "boxdr;": "\u250c", - "boxh;": "\u2500", - "boxhD;": "\u2565", - "boxhU;": "\u2568", - "boxhd;": "\u252c", - "boxhu;": "\u2534", - "boxminus;": "\u229f", - "boxplus;": "\u229e", - "boxtimes;": "\u22a0", - "boxuL;": "\u255b", - "boxuR;": "\u2558", - "boxul;": "\u2518", - "boxur;": "\u2514", - "boxv;": "\u2502", - "boxvH;": "\u256a", - "boxvL;": "\u2561", - "boxvR;": "\u255e", - "boxvh;": "\u253c", - "boxvl;": "\u2524", - "boxvr;": "\u251c", - "bprime;": "\u2035", - "breve;": "\u02d8", - "brvbar": "\xa6", - "brvbar;": "\xa6", - "bscr;": "\U0001d4b7", - "bsemi;": "\u204f", - "bsim;": "\u223d", - "bsime;": "\u22cd", - "bsol;": "\\", - "bsolb;": "\u29c5", - "bsolhsub;": "\u27c8", - "bull;": "\u2022", - "bullet;": "\u2022", - "bump;": "\u224e", - "bumpE;": "\u2aae", - "bumpe;": "\u224f", - "bumpeq;": "\u224f", - "cacute;": "\u0107", - "cap;": "\u2229", - "capand;": "\u2a44", - "capbrcup;": "\u2a49", - "capcap;": "\u2a4b", - "capcup;": "\u2a47", - "capdot;": "\u2a40", - "caps;": "\u2229\ufe00", - "caret;": "\u2041", - "caron;": "\u02c7", - "ccaps;": "\u2a4d", - "ccaron;": "\u010d", - "ccedil": "\xe7", - "ccedil;": "\xe7", - "ccirc;": "\u0109", - "ccups;": "\u2a4c", - "ccupssm;": "\u2a50", - "cdot;": "\u010b", - "cedil": "\xb8", - "cedil;": "\xb8", - "cemptyv;": "\u29b2", - "cent": "\xa2", - "cent;": "\xa2", - "centerdot;": "\xb7", - "cfr;": "\U0001d520", - "chcy;": "\u0447", - "check;": "\u2713", - "checkmark;": "\u2713", - "chi;": "\u03c7", - "cir;": "\u25cb", - "cirE;": "\u29c3", - "circ;": "\u02c6", - "circeq;": "\u2257", - "circlearrowleft;": "\u21ba", - "circlearrowright;": "\u21bb", - "circledR;": "\xae", - "circledS;": "\u24c8", - "circledast;": "\u229b", - "circledcirc;": "\u229a", - "circleddash;": "\u229d", - "cire;": "\u2257", - "cirfnint;": "\u2a10", - "cirmid;": "\u2aef", - "cirscir;": "\u29c2", - "clubs;": "\u2663", - "clubsuit;": "\u2663", - "colon;": ":", - "colone;": "\u2254", - "coloneq;": "\u2254", - "comma;": ",", - "commat;": "@", - "comp;": "\u2201", - "compfn;": "\u2218", - "complement;": "\u2201", - "complexes;": "\u2102", - "cong;": "\u2245", - "congdot;": "\u2a6d", - "conint;": "\u222e", - "copf;": "\U0001d554", - "coprod;": "\u2210", - "copy": "\xa9", - "copy;": "\xa9", - "copysr;": "\u2117", - "crarr;": "\u21b5", - "cross;": "\u2717", - "cscr;": "\U0001d4b8", - "csub;": "\u2acf", - "csube;": "\u2ad1", - "csup;": "\u2ad0", - "csupe;": "\u2ad2", - "ctdot;": "\u22ef", - "cudarrl;": "\u2938", - "cudarrr;": "\u2935", - "cuepr;": "\u22de", - "cuesc;": "\u22df", - "cularr;": "\u21b6", - "cularrp;": "\u293d", - "cup;": "\u222a", - "cupbrcap;": "\u2a48", - "cupcap;": "\u2a46", - "cupcup;": "\u2a4a", - "cupdot;": "\u228d", - "cupor;": "\u2a45", - "cups;": "\u222a\ufe00", - "curarr;": "\u21b7", - "curarrm;": "\u293c", - "curlyeqprec;": "\u22de", - "curlyeqsucc;": "\u22df", - "curlyvee;": "\u22ce", - "curlywedge;": "\u22cf", - "curren": "\xa4", - "curren;": "\xa4", - "curvearrowleft;": "\u21b6", - "curvearrowright;": "\u21b7", - "cuvee;": "\u22ce", - "cuwed;": "\u22cf", - "cwconint;": "\u2232", - "cwint;": "\u2231", - "cylcty;": "\u232d", - "dArr;": "\u21d3", - "dHar;": "\u2965", - "dagger;": "\u2020", - "daleth;": "\u2138", - "darr;": "\u2193", - "dash;": "\u2010", - "dashv;": "\u22a3", - "dbkarow;": "\u290f", - "dblac;": "\u02dd", - "dcaron;": "\u010f", - "dcy;": "\u0434", - "dd;": "\u2146", - "ddagger;": "\u2021", - "ddarr;": "\u21ca", - "ddotseq;": "\u2a77", - "deg": "\xb0", - "deg;": "\xb0", - "delta;": "\u03b4", - "demptyv;": "\u29b1", - "dfisht;": "\u297f", - "dfr;": "\U0001d521", - "dharl;": "\u21c3", - "dharr;": "\u21c2", - "diam;": "\u22c4", - "diamond;": "\u22c4", - "diamondsuit;": "\u2666", - "diams;": "\u2666", - "die;": "\xa8", - "digamma;": "\u03dd", - "disin;": "\u22f2", - "div;": "\xf7", - "divide": "\xf7", - "divide;": "\xf7", - "divideontimes;": "\u22c7", - "divonx;": "\u22c7", - "djcy;": "\u0452", - "dlcorn;": "\u231e", - "dlcrop;": "\u230d", - "dollar;": "$", - "dopf;": "\U0001d555", - "dot;": "\u02d9", - "doteq;": "\u2250", - "doteqdot;": "\u2251", - "dotminus;": "\u2238", - "dotplus;": "\u2214", - "dotsquare;": "\u22a1", - "doublebarwedge;": "\u2306", - "downarrow;": "\u2193", - "downdownarrows;": "\u21ca", - "downharpoonleft;": "\u21c3", - "downharpoonright;": "\u21c2", - "drbkarow;": "\u2910", - "drcorn;": "\u231f", - "drcrop;": "\u230c", - "dscr;": "\U0001d4b9", - "dscy;": "\u0455", - "dsol;": "\u29f6", - "dstrok;": "\u0111", - "dtdot;": "\u22f1", - "dtri;": "\u25bf", - "dtrif;": "\u25be", - "duarr;": "\u21f5", - "duhar;": "\u296f", - "dwangle;": "\u29a6", - "dzcy;": "\u045f", - "dzigrarr;": "\u27ff", - "eDDot;": "\u2a77", - "eDot;": "\u2251", - "eacute": "\xe9", - "eacute;": "\xe9", - "easter;": "\u2a6e", - "ecaron;": "\u011b", - "ecir;": "\u2256", - "ecirc": "\xea", - "ecirc;": "\xea", - "ecolon;": "\u2255", - "ecy;": "\u044d", - "edot;": "\u0117", - "ee;": "\u2147", - "efDot;": "\u2252", - "efr;": "\U0001d522", - "eg;": "\u2a9a", - "egrave": "\xe8", - "egrave;": "\xe8", - "egs;": "\u2a96", - "egsdot;": "\u2a98", - "el;": "\u2a99", - "elinters;": "\u23e7", - "ell;": "\u2113", - "els;": "\u2a95", - "elsdot;": "\u2a97", - "emacr;": "\u0113", - "empty;": "\u2205", - "emptyset;": "\u2205", - "emptyv;": "\u2205", - "emsp13;": "\u2004", - "emsp14;": "\u2005", - "emsp;": "\u2003", - "eng;": "\u014b", - "ensp;": "\u2002", - "eogon;": "\u0119", - "eopf;": "\U0001d556", - "epar;": "\u22d5", - "eparsl;": "\u29e3", - "eplus;": "\u2a71", - "epsi;": "\u03b5", - "epsilon;": "\u03b5", - "epsiv;": "\u03f5", - "eqcirc;": "\u2256", - "eqcolon;": "\u2255", - "eqsim;": "\u2242", - "eqslantgtr;": "\u2a96", - "eqslantless;": "\u2a95", - "equals;": "=", - "equest;": "\u225f", - "equiv;": "\u2261", - "equivDD;": "\u2a78", - "eqvparsl;": "\u29e5", - "erDot;": "\u2253", - "erarr;": "\u2971", - "escr;": "\u212f", - "esdot;": "\u2250", - "esim;": "\u2242", - "eta;": "\u03b7", - "eth": "\xf0", - "eth;": "\xf0", - "euml": "\xeb", - "euml;": "\xeb", - "euro;": "\u20ac", - "excl;": "!", - "exist;": "\u2203", - "expectation;": "\u2130", - "exponentiale;": "\u2147", - "fallingdotseq;": "\u2252", - "fcy;": "\u0444", - "female;": "\u2640", - "ffilig;": "\ufb03", - "fflig;": "\ufb00", - "ffllig;": "\ufb04", - "ffr;": "\U0001d523", - "filig;": "\ufb01", - "fjlig;": "fj", - "flat;": "\u266d", - "fllig;": "\ufb02", - "fltns;": "\u25b1", - "fnof;": "\u0192", - "fopf;": "\U0001d557", - "forall;": "\u2200", - "fork;": "\u22d4", - "forkv;": "\u2ad9", - "fpartint;": "\u2a0d", - "frac12": "\xbd", - "frac12;": "\xbd", - "frac13;": "\u2153", - "frac14": "\xbc", - "frac14;": "\xbc", - "frac15;": "\u2155", - "frac16;": "\u2159", - "frac18;": "\u215b", - "frac23;": "\u2154", - "frac25;": "\u2156", - "frac34": "\xbe", - "frac34;": "\xbe", - "frac35;": "\u2157", - "frac38;": "\u215c", - "frac45;": "\u2158", - "frac56;": "\u215a", - "frac58;": "\u215d", - "frac78;": "\u215e", - "frasl;": "\u2044", - "frown;": "\u2322", - "fscr;": "\U0001d4bb", - "gE;": "\u2267", - "gEl;": "\u2a8c", - "gacute;": "\u01f5", - "gamma;": "\u03b3", - "gammad;": "\u03dd", - "gap;": "\u2a86", - "gbreve;": "\u011f", - "gcirc;": "\u011d", - "gcy;": "\u0433", - "gdot;": "\u0121", - "ge;": "\u2265", - "gel;": "\u22db", - "geq;": "\u2265", - "geqq;": "\u2267", - "geqslant;": "\u2a7e", - "ges;": "\u2a7e", - "gescc;": "\u2aa9", - "gesdot;": "\u2a80", - "gesdoto;": "\u2a82", - "gesdotol;": "\u2a84", - "gesl;": "\u22db\ufe00", - "gesles;": "\u2a94", - "gfr;": "\U0001d524", - "gg;": "\u226b", - "ggg;": "\u22d9", - "gimel;": "\u2137", - "gjcy;": "\u0453", - "gl;": "\u2277", - "glE;": "\u2a92", - "gla;": "\u2aa5", - "glj;": "\u2aa4", - "gnE;": "\u2269", - "gnap;": "\u2a8a", - "gnapprox;": "\u2a8a", - "gne;": "\u2a88", - "gneq;": "\u2a88", - "gneqq;": "\u2269", - "gnsim;": "\u22e7", - "gopf;": "\U0001d558", - "grave;": "`", - "gscr;": "\u210a", - "gsim;": "\u2273", - "gsime;": "\u2a8e", - "gsiml;": "\u2a90", - "gt": ">", - "gt;": ">", - "gtcc;": "\u2aa7", - "gtcir;": "\u2a7a", - "gtdot;": "\u22d7", - "gtlPar;": "\u2995", - "gtquest;": "\u2a7c", - "gtrapprox;": "\u2a86", - "gtrarr;": "\u2978", - "gtrdot;": "\u22d7", - "gtreqless;": "\u22db", - "gtreqqless;": "\u2a8c", - "gtrless;": "\u2277", - "gtrsim;": "\u2273", - "gvertneqq;": "\u2269\ufe00", - "gvnE;": "\u2269\ufe00", - "hArr;": "\u21d4", - "hairsp;": "\u200a", - "half;": "\xbd", - "hamilt;": "\u210b", - "hardcy;": "\u044a", - "harr;": "\u2194", - "harrcir;": "\u2948", - "harrw;": "\u21ad", - "hbar;": "\u210f", - "hcirc;": "\u0125", - "hearts;": "\u2665", - "heartsuit;": "\u2665", - "hellip;": "\u2026", - "hercon;": "\u22b9", - "hfr;": "\U0001d525", - "hksearow;": "\u2925", - "hkswarow;": "\u2926", - "hoarr;": "\u21ff", - "homtht;": "\u223b", - "hookleftarrow;": "\u21a9", - "hookrightarrow;": "\u21aa", - "hopf;": "\U0001d559", - "horbar;": "\u2015", - "hscr;": "\U0001d4bd", - "hslash;": "\u210f", - "hstrok;": "\u0127", - "hybull;": "\u2043", - "hyphen;": "\u2010", - "iacute": "\xed", - "iacute;": "\xed", - "ic;": "\u2063", - "icirc": "\xee", - "icirc;": "\xee", - "icy;": "\u0438", - "iecy;": "\u0435", - "iexcl": "\xa1", - "iexcl;": "\xa1", - "iff;": "\u21d4", - "ifr;": "\U0001d526", - "igrave": "\xec", - "igrave;": "\xec", - "ii;": "\u2148", - "iiiint;": "\u2a0c", - "iiint;": "\u222d", - "iinfin;": "\u29dc", - "iiota;": "\u2129", - "ijlig;": "\u0133", - "imacr;": "\u012b", - "image;": "\u2111", - "imagline;": "\u2110", - "imagpart;": "\u2111", - "imath;": "\u0131", - "imof;": "\u22b7", - "imped;": "\u01b5", - "in;": "\u2208", - "incare;": "\u2105", - "infin;": "\u221e", - "infintie;": "\u29dd", - "inodot;": "\u0131", - "int;": "\u222b", - "intcal;": "\u22ba", - "integers;": "\u2124", - "intercal;": "\u22ba", - "intlarhk;": "\u2a17", - "intprod;": "\u2a3c", - "iocy;": "\u0451", - "iogon;": "\u012f", - "iopf;": "\U0001d55a", - "iota;": "\u03b9", - "iprod;": "\u2a3c", - "iquest": "\xbf", - "iquest;": "\xbf", - "iscr;": "\U0001d4be", - "isin;": "\u2208", - "isinE;": "\u22f9", - "isindot;": "\u22f5", - "isins;": "\u22f4", - "isinsv;": "\u22f3", - "isinv;": "\u2208", - "it;": "\u2062", - "itilde;": "\u0129", - "iukcy;": "\u0456", - "iuml": "\xef", - "iuml;": "\xef", - "jcirc;": "\u0135", - "jcy;": "\u0439", - "jfr;": "\U0001d527", - "jmath;": "\u0237", - "jopf;": "\U0001d55b", - "jscr;": "\U0001d4bf", - "jsercy;": "\u0458", - "jukcy;": "\u0454", - "kappa;": "\u03ba", - "kappav;": "\u03f0", - "kcedil;": "\u0137", - "kcy;": "\u043a", - "kfr;": "\U0001d528", - "kgreen;": "\u0138", - "khcy;": "\u0445", - "kjcy;": "\u045c", - "kopf;": "\U0001d55c", - "kscr;": "\U0001d4c0", - "lAarr;": "\u21da", - "lArr;": "\u21d0", - "lAtail;": "\u291b", - "lBarr;": "\u290e", - "lE;": "\u2266", - "lEg;": "\u2a8b", - "lHar;": "\u2962", - "lacute;": "\u013a", - "laemptyv;": "\u29b4", - "lagran;": "\u2112", - "lambda;": "\u03bb", - "lang;": "\u27e8", - "langd;": "\u2991", - "langle;": "\u27e8", - "lap;": "\u2a85", - "laquo": "\xab", - "laquo;": "\xab", - "larr;": "\u2190", - "larrb;": "\u21e4", - "larrbfs;": "\u291f", - "larrfs;": "\u291d", - "larrhk;": "\u21a9", - "larrlp;": "\u21ab", - "larrpl;": "\u2939", - "larrsim;": "\u2973", - "larrtl;": "\u21a2", - "lat;": "\u2aab", - "latail;": "\u2919", - "late;": "\u2aad", - "lates;": "\u2aad\ufe00", - "lbarr;": "\u290c", - "lbbrk;": "\u2772", - "lbrace;": "{", - "lbrack;": "[", - "lbrke;": "\u298b", - "lbrksld;": "\u298f", - "lbrkslu;": "\u298d", - "lcaron;": "\u013e", - "lcedil;": "\u013c", - "lceil;": "\u2308", - "lcub;": "{", - "lcy;": "\u043b", - "ldca;": "\u2936", - "ldquo;": "\u201c", - "ldquor;": "\u201e", - "ldrdhar;": "\u2967", - "ldrushar;": "\u294b", - "ldsh;": "\u21b2", - "le;": "\u2264", - "leftarrow;": "\u2190", - "leftarrowtail;": "\u21a2", - "leftharpoondown;": "\u21bd", - "leftharpoonup;": "\u21bc", - "leftleftarrows;": "\u21c7", - "leftrightarrow;": "\u2194", - "leftrightarrows;": "\u21c6", - "leftrightharpoons;": "\u21cb", - "leftrightsquigarrow;": "\u21ad", - "leftthreetimes;": "\u22cb", - "leg;": "\u22da", - "leq;": "\u2264", - "leqq;": "\u2266", - "leqslant;": "\u2a7d", - "les;": "\u2a7d", - "lescc;": "\u2aa8", - "lesdot;": "\u2a7f", - "lesdoto;": "\u2a81", - "lesdotor;": "\u2a83", - "lesg;": "\u22da\ufe00", - "lesges;": "\u2a93", - "lessapprox;": "\u2a85", - "lessdot;": "\u22d6", - "lesseqgtr;": "\u22da", - "lesseqqgtr;": "\u2a8b", - "lessgtr;": "\u2276", - "lesssim;": "\u2272", - "lfisht;": "\u297c", - "lfloor;": "\u230a", - "lfr;": "\U0001d529", - "lg;": "\u2276", - "lgE;": "\u2a91", - "lhard;": "\u21bd", - "lharu;": "\u21bc", - "lharul;": "\u296a", - "lhblk;": "\u2584", - "ljcy;": "\u0459", - "ll;": "\u226a", - "llarr;": "\u21c7", - "llcorner;": "\u231e", - "llhard;": "\u296b", - "lltri;": "\u25fa", - "lmidot;": "\u0140", - "lmoust;": "\u23b0", - "lmoustache;": "\u23b0", - "lnE;": "\u2268", - "lnap;": "\u2a89", - "lnapprox;": "\u2a89", - "lne;": "\u2a87", - "lneq;": "\u2a87", - "lneqq;": "\u2268", - "lnsim;": "\u22e6", - "loang;": "\u27ec", - "loarr;": "\u21fd", - "lobrk;": "\u27e6", - "longleftarrow;": "\u27f5", - "longleftrightarrow;": "\u27f7", - "longmapsto;": "\u27fc", - "longrightarrow;": "\u27f6", - "looparrowleft;": "\u21ab", - "looparrowright;": "\u21ac", - "lopar;": "\u2985", - "lopf;": "\U0001d55d", - "loplus;": "\u2a2d", - "lotimes;": "\u2a34", - "lowast;": "\u2217", - "lowbar;": "_", - "loz;": "\u25ca", - "lozenge;": "\u25ca", - "lozf;": "\u29eb", - "lpar;": "(", - "lparlt;": "\u2993", - "lrarr;": "\u21c6", - "lrcorner;": "\u231f", - "lrhar;": "\u21cb", - "lrhard;": "\u296d", - "lrm;": "\u200e", - "lrtri;": "\u22bf", - "lsaquo;": "\u2039", - "lscr;": "\U0001d4c1", - "lsh;": "\u21b0", - "lsim;": "\u2272", - "lsime;": "\u2a8d", - "lsimg;": "\u2a8f", - "lsqb;": "[", - "lsquo;": "\u2018", - "lsquor;": "\u201a", - "lstrok;": "\u0142", - "lt": "<", - "lt;": "<", - "ltcc;": "\u2aa6", - "ltcir;": "\u2a79", - "ltdot;": "\u22d6", - "lthree;": "\u22cb", - "ltimes;": "\u22c9", - "ltlarr;": "\u2976", - "ltquest;": "\u2a7b", - "ltrPar;": "\u2996", - "ltri;": "\u25c3", - "ltrie;": "\u22b4", - "ltrif;": "\u25c2", - "lurdshar;": "\u294a", - "luruhar;": "\u2966", - "lvertneqq;": "\u2268\ufe00", - "lvnE;": "\u2268\ufe00", - "mDDot;": "\u223a", - "macr": "\xaf", - "macr;": "\xaf", - "male;": "\u2642", - "malt;": "\u2720", - "maltese;": "\u2720", - "map;": "\u21a6", - "mapsto;": "\u21a6", - "mapstodown;": "\u21a7", - "mapstoleft;": "\u21a4", - "mapstoup;": "\u21a5", - "marker;": "\u25ae", - "mcomma;": "\u2a29", - "mcy;": "\u043c", - "mdash;": "\u2014", - "measuredangle;": "\u2221", - "mfr;": "\U0001d52a", - "mho;": "\u2127", - "micro": "\xb5", - "micro;": "\xb5", - "mid;": "\u2223", - "midast;": "*", - "midcir;": "\u2af0", - "middot": "\xb7", - "middot;": "\xb7", - "minus;": "\u2212", - "minusb;": "\u229f", - "minusd;": "\u2238", - "minusdu;": "\u2a2a", - "mlcp;": "\u2adb", - "mldr;": "\u2026", - "mnplus;": "\u2213", - "models;": "\u22a7", - "mopf;": "\U0001d55e", - "mp;": "\u2213", - "mscr;": "\U0001d4c2", - "mstpos;": "\u223e", - "mu;": "\u03bc", - "multimap;": "\u22b8", - "mumap;": "\u22b8", - "nGg;": "\u22d9\u0338", - "nGt;": "\u226b\u20d2", - "nGtv;": "\u226b\u0338", - "nLeftarrow;": "\u21cd", - "nLeftrightarrow;": "\u21ce", - "nLl;": "\u22d8\u0338", - "nLt;": "\u226a\u20d2", - "nLtv;": "\u226a\u0338", - "nRightarrow;": "\u21cf", - "nVDash;": "\u22af", - "nVdash;": "\u22ae", - "nabla;": "\u2207", - "nacute;": "\u0144", - "nang;": "\u2220\u20d2", - "nap;": "\u2249", - "napE;": "\u2a70\u0338", - "napid;": "\u224b\u0338", - "napos;": "\u0149", - "napprox;": "\u2249", - "natur;": "\u266e", - "natural;": "\u266e", - "naturals;": "\u2115", - "nbsp": "\xa0", - "nbsp;": "\xa0", - "nbump;": "\u224e\u0338", - "nbumpe;": "\u224f\u0338", - "ncap;": "\u2a43", - "ncaron;": "\u0148", - "ncedil;": "\u0146", - "ncong;": "\u2247", - "ncongdot;": "\u2a6d\u0338", - "ncup;": "\u2a42", - "ncy;": "\u043d", - "ndash;": "\u2013", - "ne;": "\u2260", - "neArr;": "\u21d7", - "nearhk;": "\u2924", - "nearr;": "\u2197", - "nearrow;": "\u2197", - "nedot;": "\u2250\u0338", - "nequiv;": "\u2262", - "nesear;": "\u2928", - "nesim;": "\u2242\u0338", - "nexist;": "\u2204", - "nexists;": "\u2204", - "nfr;": "\U0001d52b", - "ngE;": "\u2267\u0338", - "nge;": "\u2271", - "ngeq;": "\u2271", - "ngeqq;": "\u2267\u0338", - "ngeqslant;": "\u2a7e\u0338", - "nges;": "\u2a7e\u0338", - "ngsim;": "\u2275", - "ngt;": "\u226f", - "ngtr;": "\u226f", - "nhArr;": "\u21ce", - "nharr;": "\u21ae", - "nhpar;": "\u2af2", - "ni;": "\u220b", - "nis;": "\u22fc", - "nisd;": "\u22fa", - "niv;": "\u220b", - "njcy;": "\u045a", - "nlArr;": "\u21cd", - "nlE;": "\u2266\u0338", - "nlarr;": "\u219a", - "nldr;": "\u2025", - "nle;": "\u2270", - "nleftarrow;": "\u219a", - "nleftrightarrow;": "\u21ae", - "nleq;": "\u2270", - "nleqq;": "\u2266\u0338", - "nleqslant;": "\u2a7d\u0338", - "nles;": "\u2a7d\u0338", - "nless;": "\u226e", - "nlsim;": "\u2274", - "nlt;": "\u226e", - "nltri;": "\u22ea", - "nltrie;": "\u22ec", - "nmid;": "\u2224", - "nopf;": "\U0001d55f", - "not": "\xac", - "not;": "\xac", - "notin;": "\u2209", - "notinE;": "\u22f9\u0338", - "notindot;": "\u22f5\u0338", - "notinva;": "\u2209", - "notinvb;": "\u22f7", - "notinvc;": "\u22f6", - "notni;": "\u220c", - "notniva;": "\u220c", - "notnivb;": "\u22fe", - "notnivc;": "\u22fd", - "npar;": "\u2226", - "nparallel;": "\u2226", - "nparsl;": "\u2afd\u20e5", - "npart;": "\u2202\u0338", - "npolint;": "\u2a14", - "npr;": "\u2280", - "nprcue;": "\u22e0", - "npre;": "\u2aaf\u0338", - "nprec;": "\u2280", - "npreceq;": "\u2aaf\u0338", - "nrArr;": "\u21cf", - "nrarr;": "\u219b", - "nrarrc;": "\u2933\u0338", - "nrarrw;": "\u219d\u0338", - "nrightarrow;": "\u219b", - "nrtri;": "\u22eb", - "nrtrie;": "\u22ed", - "nsc;": "\u2281", - "nsccue;": "\u22e1", - "nsce;": "\u2ab0\u0338", - "nscr;": "\U0001d4c3", - "nshortmid;": "\u2224", - "nshortparallel;": "\u2226", - "nsim;": "\u2241", - "nsime;": "\u2244", - "nsimeq;": "\u2244", - "nsmid;": "\u2224", - "nspar;": "\u2226", - "nsqsube;": "\u22e2", - "nsqsupe;": "\u22e3", - "nsub;": "\u2284", - "nsubE;": "\u2ac5\u0338", - "nsube;": "\u2288", - "nsubset;": "\u2282\u20d2", - "nsubseteq;": "\u2288", - "nsubseteqq;": "\u2ac5\u0338", - "nsucc;": "\u2281", - "nsucceq;": "\u2ab0\u0338", - "nsup;": "\u2285", - "nsupE;": "\u2ac6\u0338", - "nsupe;": "\u2289", - "nsupset;": "\u2283\u20d2", - "nsupseteq;": "\u2289", - "nsupseteqq;": "\u2ac6\u0338", - "ntgl;": "\u2279", - "ntilde": "\xf1", - "ntilde;": "\xf1", - "ntlg;": "\u2278", - "ntriangleleft;": "\u22ea", - "ntrianglelefteq;": "\u22ec", - "ntriangleright;": "\u22eb", - "ntrianglerighteq;": "\u22ed", - "nu;": "\u03bd", - "num;": "#", - "numero;": "\u2116", - "numsp;": "\u2007", - "nvDash;": "\u22ad", - "nvHarr;": "\u2904", - "nvap;": "\u224d\u20d2", - "nvdash;": "\u22ac", - "nvge;": "\u2265\u20d2", - "nvgt;": ">\u20d2", - "nvinfin;": "\u29de", - "nvlArr;": "\u2902", - "nvle;": "\u2264\u20d2", - "nvlt;": "<\u20d2", - "nvltrie;": "\u22b4\u20d2", - "nvrArr;": "\u2903", - "nvrtrie;": "\u22b5\u20d2", - "nvsim;": "\u223c\u20d2", - "nwArr;": "\u21d6", - "nwarhk;": "\u2923", - "nwarr;": "\u2196", - "nwarrow;": "\u2196", - "nwnear;": "\u2927", - "oS;": "\u24c8", - "oacute": "\xf3", - "oacute;": "\xf3", - "oast;": "\u229b", - "ocir;": "\u229a", - "ocirc": "\xf4", - "ocirc;": "\xf4", - "ocy;": "\u043e", - "odash;": "\u229d", - "odblac;": "\u0151", - "odiv;": "\u2a38", - "odot;": "\u2299", - "odsold;": "\u29bc", - "oelig;": "\u0153", - "ofcir;": "\u29bf", - "ofr;": "\U0001d52c", - "ogon;": "\u02db", - "ograve": "\xf2", - "ograve;": "\xf2", - "ogt;": "\u29c1", - "ohbar;": "\u29b5", - "ohm;": "\u03a9", - "oint;": "\u222e", - "olarr;": "\u21ba", - "olcir;": "\u29be", - "olcross;": "\u29bb", - "oline;": "\u203e", - "olt;": "\u29c0", - "omacr;": "\u014d", - "omega;": "\u03c9", - "omicron;": "\u03bf", - "omid;": "\u29b6", - "ominus;": "\u2296", - "oopf;": "\U0001d560", - "opar;": "\u29b7", - "operp;": "\u29b9", - "oplus;": "\u2295", - "or;": "\u2228", - "orarr;": "\u21bb", - "ord;": "\u2a5d", - "order;": "\u2134", - "orderof;": "\u2134", - "ordf": "\xaa", - "ordf;": "\xaa", - "ordm": "\xba", - "ordm;": "\xba", - "origof;": "\u22b6", - "oror;": "\u2a56", - "orslope;": "\u2a57", - "orv;": "\u2a5b", - "oscr;": "\u2134", - "oslash": "\xf8", - "oslash;": "\xf8", - "osol;": "\u2298", - "otilde": "\xf5", - "otilde;": "\xf5", - "otimes;": "\u2297", - "otimesas;": "\u2a36", - "ouml": "\xf6", - "ouml;": "\xf6", - "ovbar;": "\u233d", - "par;": "\u2225", - "para": "\xb6", - "para;": "\xb6", - "parallel;": "\u2225", - "parsim;": "\u2af3", - "parsl;": "\u2afd", - "part;": "\u2202", - "pcy;": "\u043f", - "percnt;": "%", - "period;": ".", - "permil;": "\u2030", - "perp;": "\u22a5", - "pertenk;": "\u2031", - "pfr;": "\U0001d52d", - "phi;": "\u03c6", - "phiv;": "\u03d5", - "phmmat;": "\u2133", - "phone;": "\u260e", - "pi;": "\u03c0", - "pitchfork;": "\u22d4", - "piv;": "\u03d6", - "planck;": "\u210f", - "planckh;": "\u210e", - "plankv;": "\u210f", - "plus;": "+", - "plusacir;": "\u2a23", - "plusb;": "\u229e", - "pluscir;": "\u2a22", - "plusdo;": "\u2214", - "plusdu;": "\u2a25", - "pluse;": "\u2a72", - "plusmn": "\xb1", - "plusmn;": "\xb1", - "plussim;": "\u2a26", - "plustwo;": "\u2a27", - "pm;": "\xb1", - "pointint;": "\u2a15", - "popf;": "\U0001d561", - "pound": "\xa3", - "pound;": "\xa3", - "pr;": "\u227a", - "prE;": "\u2ab3", - "prap;": "\u2ab7", - "prcue;": "\u227c", - "pre;": "\u2aaf", - "prec;": "\u227a", - "precapprox;": "\u2ab7", - "preccurlyeq;": "\u227c", - "preceq;": "\u2aaf", - "precnapprox;": "\u2ab9", - "precneqq;": "\u2ab5", - "precnsim;": "\u22e8", - "precsim;": "\u227e", - "prime;": "\u2032", - "primes;": "\u2119", - "prnE;": "\u2ab5", - "prnap;": "\u2ab9", - "prnsim;": "\u22e8", - "prod;": "\u220f", - "profalar;": "\u232e", - "profline;": "\u2312", - "profsurf;": "\u2313", - "prop;": "\u221d", - "propto;": "\u221d", - "prsim;": "\u227e", - "prurel;": "\u22b0", - "pscr;": "\U0001d4c5", - "psi;": "\u03c8", - "puncsp;": "\u2008", - "qfr;": "\U0001d52e", - "qint;": "\u2a0c", - "qopf;": "\U0001d562", - "qprime;": "\u2057", - "qscr;": "\U0001d4c6", - "quaternions;": "\u210d", - "quatint;": "\u2a16", - "quest;": "?", - "questeq;": "\u225f", - "quot": "\"", - "quot;": "\"", - "rAarr;": "\u21db", - "rArr;": "\u21d2", - "rAtail;": "\u291c", - "rBarr;": "\u290f", - "rHar;": "\u2964", - "race;": "\u223d\u0331", - "racute;": "\u0155", - "radic;": "\u221a", - "raemptyv;": "\u29b3", - "rang;": "\u27e9", - "rangd;": "\u2992", - "range;": "\u29a5", - "rangle;": "\u27e9", - "raquo": "\xbb", - "raquo;": "\xbb", - "rarr;": "\u2192", - "rarrap;": "\u2975", - "rarrb;": "\u21e5", - "rarrbfs;": "\u2920", - "rarrc;": "\u2933", - "rarrfs;": "\u291e", - "rarrhk;": "\u21aa", - "rarrlp;": "\u21ac", - "rarrpl;": "\u2945", - "rarrsim;": "\u2974", - "rarrtl;": "\u21a3", - "rarrw;": "\u219d", - "ratail;": "\u291a", - "ratio;": "\u2236", - "rationals;": "\u211a", - "rbarr;": "\u290d", - "rbbrk;": "\u2773", - "rbrace;": "}", - "rbrack;": "]", - "rbrke;": "\u298c", - "rbrksld;": "\u298e", - "rbrkslu;": "\u2990", - "rcaron;": "\u0159", - "rcedil;": "\u0157", - "rceil;": "\u2309", - "rcub;": "}", - "rcy;": "\u0440", - "rdca;": "\u2937", - "rdldhar;": "\u2969", - "rdquo;": "\u201d", - "rdquor;": "\u201d", - "rdsh;": "\u21b3", - "real;": "\u211c", - "realine;": "\u211b", - "realpart;": "\u211c", - "reals;": "\u211d", - "rect;": "\u25ad", - "reg": "\xae", - "reg;": "\xae", - "rfisht;": "\u297d", - "rfloor;": "\u230b", - "rfr;": "\U0001d52f", - "rhard;": "\u21c1", - "rharu;": "\u21c0", - "rharul;": "\u296c", - "rho;": "\u03c1", - "rhov;": "\u03f1", - "rightarrow;": "\u2192", - "rightarrowtail;": "\u21a3", - "rightharpoondown;": "\u21c1", - "rightharpoonup;": "\u21c0", - "rightleftarrows;": "\u21c4", - "rightleftharpoons;": "\u21cc", - "rightrightarrows;": "\u21c9", - "rightsquigarrow;": "\u219d", - "rightthreetimes;": "\u22cc", - "ring;": "\u02da", - "risingdotseq;": "\u2253", - "rlarr;": "\u21c4", - "rlhar;": "\u21cc", - "rlm;": "\u200f", - "rmoust;": "\u23b1", - "rmoustache;": "\u23b1", - "rnmid;": "\u2aee", - "roang;": "\u27ed", - "roarr;": "\u21fe", - "robrk;": "\u27e7", - "ropar;": "\u2986", - "ropf;": "\U0001d563", - "roplus;": "\u2a2e", - "rotimes;": "\u2a35", - "rpar;": ")", - "rpargt;": "\u2994", - "rppolint;": "\u2a12", - "rrarr;": "\u21c9", - "rsaquo;": "\u203a", - "rscr;": "\U0001d4c7", - "rsh;": "\u21b1", - "rsqb;": "]", - "rsquo;": "\u2019", - "rsquor;": "\u2019", - "rthree;": "\u22cc", - "rtimes;": "\u22ca", - "rtri;": "\u25b9", - "rtrie;": "\u22b5", - "rtrif;": "\u25b8", - "rtriltri;": "\u29ce", - "ruluhar;": "\u2968", - "rx;": "\u211e", - "sacute;": "\u015b", - "sbquo;": "\u201a", - "sc;": "\u227b", - "scE;": "\u2ab4", - "scap;": "\u2ab8", - "scaron;": "\u0161", - "sccue;": "\u227d", - "sce;": "\u2ab0", - "scedil;": "\u015f", - "scirc;": "\u015d", - "scnE;": "\u2ab6", - "scnap;": "\u2aba", - "scnsim;": "\u22e9", - "scpolint;": "\u2a13", - "scsim;": "\u227f", - "scy;": "\u0441", - "sdot;": "\u22c5", - "sdotb;": "\u22a1", - "sdote;": "\u2a66", - "seArr;": "\u21d8", - "searhk;": "\u2925", - "searr;": "\u2198", - "searrow;": "\u2198", - "sect": "\xa7", - "sect;": "\xa7", - "semi;": ";", - "seswar;": "\u2929", - "setminus;": "\u2216", - "setmn;": "\u2216", - "sext;": "\u2736", - "sfr;": "\U0001d530", - "sfrown;": "\u2322", - "sharp;": "\u266f", - "shchcy;": "\u0449", - "shcy;": "\u0448", - "shortmid;": "\u2223", - "shortparallel;": "\u2225", - "shy": "\xad", - "shy;": "\xad", - "sigma;": "\u03c3", - "sigmaf;": "\u03c2", - "sigmav;": "\u03c2", - "sim;": "\u223c", - "simdot;": "\u2a6a", - "sime;": "\u2243", - "simeq;": "\u2243", - "simg;": "\u2a9e", - "simgE;": "\u2aa0", - "siml;": "\u2a9d", - "simlE;": "\u2a9f", - "simne;": "\u2246", - "simplus;": "\u2a24", - "simrarr;": "\u2972", - "slarr;": "\u2190", - "smallsetminus;": "\u2216", - "smashp;": "\u2a33", - "smeparsl;": "\u29e4", - "smid;": "\u2223", - "smile;": "\u2323", - "smt;": "\u2aaa", - "smte;": "\u2aac", - "smtes;": "\u2aac\ufe00", - "softcy;": "\u044c", - "sol;": "/", - "solb;": "\u29c4", - "solbar;": "\u233f", - "sopf;": "\U0001d564", - "spades;": "\u2660", - "spadesuit;": "\u2660", - "spar;": "\u2225", - "sqcap;": "\u2293", - "sqcaps;": "\u2293\ufe00", - "sqcup;": "\u2294", - "sqcups;": "\u2294\ufe00", - "sqsub;": "\u228f", - "sqsube;": "\u2291", - "sqsubset;": "\u228f", - "sqsubseteq;": "\u2291", - "sqsup;": "\u2290", - "sqsupe;": "\u2292", - "sqsupset;": "\u2290", - "sqsupseteq;": "\u2292", - "squ;": "\u25a1", - "square;": "\u25a1", - "squarf;": "\u25aa", - "squf;": "\u25aa", - "srarr;": "\u2192", - "sscr;": "\U0001d4c8", - "ssetmn;": "\u2216", - "ssmile;": "\u2323", - "sstarf;": "\u22c6", - "star;": "\u2606", - "starf;": "\u2605", - "straightepsilon;": "\u03f5", - "straightphi;": "\u03d5", - "strns;": "\xaf", - "sub;": "\u2282", - "subE;": "\u2ac5", - "subdot;": "\u2abd", - "sube;": "\u2286", - "subedot;": "\u2ac3", - "submult;": "\u2ac1", - "subnE;": "\u2acb", - "subne;": "\u228a", - "subplus;": "\u2abf", - "subrarr;": "\u2979", - "subset;": "\u2282", - "subseteq;": "\u2286", - "subseteqq;": "\u2ac5", - "subsetneq;": "\u228a", - "subsetneqq;": "\u2acb", - "subsim;": "\u2ac7", - "subsub;": "\u2ad5", - "subsup;": "\u2ad3", - "succ;": "\u227b", - "succapprox;": "\u2ab8", - "succcurlyeq;": "\u227d", - "succeq;": "\u2ab0", - "succnapprox;": "\u2aba", - "succneqq;": "\u2ab6", - "succnsim;": "\u22e9", - "succsim;": "\u227f", - "sum;": "\u2211", - "sung;": "\u266a", - "sup1": "\xb9", - "sup1;": "\xb9", - "sup2": "\xb2", - "sup2;": "\xb2", - "sup3": "\xb3", - "sup3;": "\xb3", - "sup;": "\u2283", - "supE;": "\u2ac6", - "supdot;": "\u2abe", - "supdsub;": "\u2ad8", - "supe;": "\u2287", - "supedot;": "\u2ac4", - "suphsol;": "\u27c9", - "suphsub;": "\u2ad7", - "suplarr;": "\u297b", - "supmult;": "\u2ac2", - "supnE;": "\u2acc", - "supne;": "\u228b", - "supplus;": "\u2ac0", - "supset;": "\u2283", - "supseteq;": "\u2287", - "supseteqq;": "\u2ac6", - "supsetneq;": "\u228b", - "supsetneqq;": "\u2acc", - "supsim;": "\u2ac8", - "supsub;": "\u2ad4", - "supsup;": "\u2ad6", - "swArr;": "\u21d9", - "swarhk;": "\u2926", - "swarr;": "\u2199", - "swarrow;": "\u2199", - "swnwar;": "\u292a", - "szlig": "\xdf", - "szlig;": "\xdf", - "target;": "\u2316", - "tau;": "\u03c4", - "tbrk;": "\u23b4", - "tcaron;": "\u0165", - "tcedil;": "\u0163", - "tcy;": "\u0442", - "tdot;": "\u20db", - "telrec;": "\u2315", - "tfr;": "\U0001d531", - "there4;": "\u2234", - "therefore;": "\u2234", - "theta;": "\u03b8", - "thetasym;": "\u03d1", - "thetav;": "\u03d1", - "thickapprox;": "\u2248", - "thicksim;": "\u223c", - "thinsp;": "\u2009", - "thkap;": "\u2248", - "thksim;": "\u223c", - "thorn": "\xfe", - "thorn;": "\xfe", - "tilde;": "\u02dc", - "times": "\xd7", - "times;": "\xd7", - "timesb;": "\u22a0", - "timesbar;": "\u2a31", - "timesd;": "\u2a30", - "tint;": "\u222d", - "toea;": "\u2928", - "top;": "\u22a4", - "topbot;": "\u2336", - "topcir;": "\u2af1", - "topf;": "\U0001d565", - "topfork;": "\u2ada", - "tosa;": "\u2929", - "tprime;": "\u2034", - "trade;": "\u2122", - "triangle;": "\u25b5", - "triangledown;": "\u25bf", - "triangleleft;": "\u25c3", - "trianglelefteq;": "\u22b4", - "triangleq;": "\u225c", - "triangleright;": "\u25b9", - "trianglerighteq;": "\u22b5", - "tridot;": "\u25ec", - "trie;": "\u225c", - "triminus;": "\u2a3a", - "triplus;": "\u2a39", - "trisb;": "\u29cd", - "tritime;": "\u2a3b", - "trpezium;": "\u23e2", - "tscr;": "\U0001d4c9", - "tscy;": "\u0446", - "tshcy;": "\u045b", - "tstrok;": "\u0167", - "twixt;": "\u226c", - "twoheadleftarrow;": "\u219e", - "twoheadrightarrow;": "\u21a0", - "uArr;": "\u21d1", - "uHar;": "\u2963", - "uacute": "\xfa", - "uacute;": "\xfa", - "uarr;": "\u2191", - "ubrcy;": "\u045e", - "ubreve;": "\u016d", - "ucirc": "\xfb", - "ucirc;": "\xfb", - "ucy;": "\u0443", - "udarr;": "\u21c5", - "udblac;": "\u0171", - "udhar;": "\u296e", - "ufisht;": "\u297e", - "ufr;": "\U0001d532", - "ugrave": "\xf9", - "ugrave;": "\xf9", - "uharl;": "\u21bf", - "uharr;": "\u21be", - "uhblk;": "\u2580", - "ulcorn;": "\u231c", - "ulcorner;": "\u231c", - "ulcrop;": "\u230f", - "ultri;": "\u25f8", - "umacr;": "\u016b", - "uml": "\xa8", - "uml;": "\xa8", - "uogon;": "\u0173", - "uopf;": "\U0001d566", - "uparrow;": "\u2191", - "updownarrow;": "\u2195", - "upharpoonleft;": "\u21bf", - "upharpoonright;": "\u21be", - "uplus;": "\u228e", - "upsi;": "\u03c5", - "upsih;": "\u03d2", - "upsilon;": "\u03c5", - "upuparrows;": "\u21c8", - "urcorn;": "\u231d", - "urcorner;": "\u231d", - "urcrop;": "\u230e", - "uring;": "\u016f", - "urtri;": "\u25f9", - "uscr;": "\U0001d4ca", - "utdot;": "\u22f0", - "utilde;": "\u0169", - "utri;": "\u25b5", - "utrif;": "\u25b4", - "uuarr;": "\u21c8", - "uuml": "\xfc", - "uuml;": "\xfc", - "uwangle;": "\u29a7", - "vArr;": "\u21d5", - "vBar;": "\u2ae8", - "vBarv;": "\u2ae9", - "vDash;": "\u22a8", - "vangrt;": "\u299c", - "varepsilon;": "\u03f5", - "varkappa;": "\u03f0", - "varnothing;": "\u2205", - "varphi;": "\u03d5", - "varpi;": "\u03d6", - "varpropto;": "\u221d", - "varr;": "\u2195", - "varrho;": "\u03f1", - "varsigma;": "\u03c2", - "varsubsetneq;": "\u228a\ufe00", - "varsubsetneqq;": "\u2acb\ufe00", - "varsupsetneq;": "\u228b\ufe00", - "varsupsetneqq;": "\u2acc\ufe00", - "vartheta;": "\u03d1", - "vartriangleleft;": "\u22b2", - "vartriangleright;": "\u22b3", - "vcy;": "\u0432", - "vdash;": "\u22a2", - "vee;": "\u2228", - "veebar;": "\u22bb", - "veeeq;": "\u225a", - "vellip;": "\u22ee", - "verbar;": "|", - "vert;": "|", - "vfr;": "\U0001d533", - "vltri;": "\u22b2", - "vnsub;": "\u2282\u20d2", - "vnsup;": "\u2283\u20d2", - "vopf;": "\U0001d567", - "vprop;": "\u221d", - "vrtri;": "\u22b3", - "vscr;": "\U0001d4cb", - "vsubnE;": "\u2acb\ufe00", - "vsubne;": "\u228a\ufe00", - "vsupnE;": "\u2acc\ufe00", - "vsupne;": "\u228b\ufe00", - "vzigzag;": "\u299a", - "wcirc;": "\u0175", - "wedbar;": "\u2a5f", - "wedge;": "\u2227", - "wedgeq;": "\u2259", - "weierp;": "\u2118", - "wfr;": "\U0001d534", - "wopf;": "\U0001d568", - "wp;": "\u2118", - "wr;": "\u2240", - "wreath;": "\u2240", - "wscr;": "\U0001d4cc", - "xcap;": "\u22c2", - "xcirc;": "\u25ef", - "xcup;": "\u22c3", - "xdtri;": "\u25bd", - "xfr;": "\U0001d535", - "xhArr;": "\u27fa", - "xharr;": "\u27f7", - "xi;": "\u03be", - "xlArr;": "\u27f8", - "xlarr;": "\u27f5", - "xmap;": "\u27fc", - "xnis;": "\u22fb", - "xodot;": "\u2a00", - "xopf;": "\U0001d569", - "xoplus;": "\u2a01", - "xotime;": "\u2a02", - "xrArr;": "\u27f9", - "xrarr;": "\u27f6", - "xscr;": "\U0001d4cd", - "xsqcup;": "\u2a06", - "xuplus;": "\u2a04", - "xutri;": "\u25b3", - "xvee;": "\u22c1", - "xwedge;": "\u22c0", - "yacute": "\xfd", - "yacute;": "\xfd", - "yacy;": "\u044f", - "ycirc;": "\u0177", - "ycy;": "\u044b", - "yen": "\xa5", - "yen;": "\xa5", - "yfr;": "\U0001d536", - "yicy;": "\u0457", - "yopf;": "\U0001d56a", - "yscr;": "\U0001d4ce", - "yucy;": "\u044e", - "yuml": "\xff", - "yuml;": "\xff", - "zacute;": "\u017a", - "zcaron;": "\u017e", - "zcy;": "\u0437", - "zdot;": "\u017c", - "zeetrf;": "\u2128", - "zeta;": "\u03b6", - "zfr;": "\U0001d537", - "zhcy;": "\u0436", - "zigrarr;": "\u21dd", - "zopf;": "\U0001d56b", - "zscr;": "\U0001d4cf", - "zwj;": "\u200d", - "zwnj;": "\u200c", -} - -replacementCharacters = { - 0x0: "\uFFFD", - 0x0d: "\u000D", - 0x80: "\u20AC", - 0x81: "\u0081", - 0x82: "\u201A", - 0x83: "\u0192", - 0x84: "\u201E", - 0x85: "\u2026", - 0x86: "\u2020", - 0x87: "\u2021", - 0x88: "\u02C6", - 0x89: "\u2030", - 0x8A: "\u0160", - 0x8B: "\u2039", - 0x8C: "\u0152", - 0x8D: "\u008D", - 0x8E: "\u017D", - 0x8F: "\u008F", - 0x90: "\u0090", - 0x91: "\u2018", - 0x92: "\u2019", - 0x93: "\u201C", - 0x94: "\u201D", - 0x95: "\u2022", - 0x96: "\u2013", - 0x97: "\u2014", - 0x98: "\u02DC", - 0x99: "\u2122", - 0x9A: "\u0161", - 0x9B: "\u203A", - 0x9C: "\u0153", - 0x9D: "\u009D", - 0x9E: "\u017E", - 0x9F: "\u0178", -} - -tokenTypes = { - "Doctype": 0, - "Characters": 1, - "SpaceCharacters": 2, - "StartTag": 3, - "EndTag": 4, - "EmptyTag": 5, - "Comment": 6, - "ParseError": 7 -} - -tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"], - tokenTypes["EmptyTag"]]) - - -prefixes = dict([(v, k) for k, v in namespaces.items()]) -prefixes["http://www.w3.org/1998/Math/MathML"] = "math" - - -class DataLossWarning(UserWarning): - """Raised when the current tree is unable to represent the input data""" - pass - - -class _ReparseException(Exception): - pass diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/alphabeticalattributes.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/alphabeticalattributes.py deleted file mode 100644 index 5ba926e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/alphabeticalattributes.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from . import base - -from collections import OrderedDict - - -def _attr_key(attr): - """Return an appropriate key for an attribute for sorting - - Attributes have a namespace that can be either ``None`` or a string. We - can't compare the two because they're different types, so we convert - ``None`` to an empty string first. - - """ - return (attr[0][0] or ''), attr[0][1] - - -class Filter(base.Filter): - """Alphabetizes attributes for elements""" - def __iter__(self): - for token in base.Filter.__iter__(self): - if token["type"] in ("StartTag", "EmptyTag"): - attrs = OrderedDict() - for name, value in sorted(token["data"].items(), - key=_attr_key): - attrs[name] = value - token["data"] = attrs - yield token diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/base.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/base.py deleted file mode 100644 index c7dbaed..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/base.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - - -class Filter(object): - def __init__(self, source): - self.source = source - - def __iter__(self): - return iter(self.source) - - def __getattr__(self, name): - return getattr(self.source, name) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/inject_meta_charset.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/inject_meta_charset.py deleted file mode 100644 index aefb5c8..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/inject_meta_charset.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from . import base - - -class Filter(base.Filter): - """Injects ``<meta charset=ENCODING>`` tag into head of document""" - def __init__(self, source, encoding): - """Creates a Filter - - :arg source: the source token stream - - :arg encoding: the encoding to set - - """ - base.Filter.__init__(self, source) - self.encoding = encoding - - def __iter__(self): - state = "pre_head" - meta_found = (self.encoding is None) - pending = [] - - for token in base.Filter.__iter__(self): - type = token["type"] - if type == "StartTag": - if token["name"].lower() == "head": - state = "in_head" - - elif type == "EmptyTag": - if token["name"].lower() == "meta": - # replace charset with actual encoding - has_http_equiv_content_type = False - for (namespace, name), value in token["data"].items(): - if namespace is not None: - continue - elif name.lower() == 'charset': - token["data"][(namespace, name)] = self.encoding - meta_found = True - break - elif name == 'http-equiv' and value.lower() == 'content-type': - has_http_equiv_content_type = True - else: - if has_http_equiv_content_type and (None, "content") in token["data"]: - token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding - meta_found = True - - elif token["name"].lower() == "head" and not meta_found: - # insert meta into empty head - yield {"type": "StartTag", "name": "head", - "data": token["data"]} - yield {"type": "EmptyTag", "name": "meta", - "data": {(None, "charset"): self.encoding}} - yield {"type": "EndTag", "name": "head"} - meta_found = True - continue - - elif type == "EndTag": - if token["name"].lower() == "head" and pending: - # insert meta into head (if necessary) and flush pending queue - yield pending.pop(0) - if not meta_found: - yield {"type": "EmptyTag", "name": "meta", - "data": {(None, "charset"): self.encoding}} - while pending: - yield pending.pop(0) - meta_found = True - state = "post_head" - - if state == "in_head": - pending.append(token) - else: - yield token diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/lint.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/lint.py deleted file mode 100644 index fcc07ee..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/lint.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from pip._vendor.six import text_type - -from . import base -from ..constants import namespaces, voidElements - -from ..constants import spaceCharacters -spaceCharacters = "".join(spaceCharacters) - - -class Filter(base.Filter): - """Lints the token stream for errors - - If it finds any errors, it'll raise an ``AssertionError``. - - """ - def __init__(self, source, require_matching_tags=True): - """Creates a Filter - - :arg source: the source token stream - - :arg require_matching_tags: whether or not to require matching tags - - """ - super(Filter, self).__init__(source) - self.require_matching_tags = require_matching_tags - - def __iter__(self): - open_elements = [] - for token in base.Filter.__iter__(self): - type = token["type"] - if type in ("StartTag", "EmptyTag"): - namespace = token["namespace"] - name = token["name"] - assert namespace is None or isinstance(namespace, text_type) - assert namespace != "" - assert isinstance(name, text_type) - assert name != "" - assert isinstance(token["data"], dict) - if (not namespace or namespace == namespaces["html"]) and name in voidElements: - assert type == "EmptyTag" - else: - assert type == "StartTag" - if type == "StartTag" and self.require_matching_tags: - open_elements.append((namespace, name)) - for (namespace, name), value in token["data"].items(): - assert namespace is None or isinstance(namespace, text_type) - assert namespace != "" - assert isinstance(name, text_type) - assert name != "" - assert isinstance(value, text_type) - - elif type == "EndTag": - namespace = token["namespace"] - name = token["name"] - assert namespace is None or isinstance(namespace, text_type) - assert namespace != "" - assert isinstance(name, text_type) - assert name != "" - if (not namespace or namespace == namespaces["html"]) and name in voidElements: - assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name} - elif self.require_matching_tags: - start = open_elements.pop() - assert start == (namespace, name) - - elif type == "Comment": - data = token["data"] - assert isinstance(data, text_type) - - elif type in ("Characters", "SpaceCharacters"): - data = token["data"] - assert isinstance(data, text_type) - assert data != "" - if type == "SpaceCharacters": - assert data.strip(spaceCharacters) == "" - - elif type == "Doctype": - name = token["name"] - assert name is None or isinstance(name, text_type) - assert token["publicId"] is None or isinstance(name, text_type) - assert token["systemId"] is None or isinstance(name, text_type) - - elif type == "Entity": - assert isinstance(token["name"], text_type) - - elif type == "SerializerError": - assert isinstance(token["data"], text_type) - - else: - assert False, "Unknown token type: %(type)s" % {"type": type} - - yield token diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/optionaltags.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/optionaltags.py deleted file mode 100644 index 4a86501..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/optionaltags.py +++ /dev/null @@ -1,207 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from . import base - - -class Filter(base.Filter): - """Removes optional tags from the token stream""" - def slider(self): - previous1 = previous2 = None - for token in self.source: - if previous1 is not None: - yield previous2, previous1, token - previous2 = previous1 - previous1 = token - if previous1 is not None: - yield previous2, previous1, None - - def __iter__(self): - for previous, token, next in self.slider(): - type = token["type"] - if type == "StartTag": - if (token["data"] or - not self.is_optional_start(token["name"], previous, next)): - yield token - elif type == "EndTag": - if not self.is_optional_end(token["name"], next): - yield token - else: - yield token - - def is_optional_start(self, tagname, previous, next): - type = next and next["type"] or None - if tagname in 'html': - # An html element's start tag may be omitted if the first thing - # inside the html element is not a space character or a comment. - return type not in ("Comment", "SpaceCharacters") - elif tagname == 'head': - # A head element's start tag may be omitted if the first thing - # inside the head element is an element. - # XXX: we also omit the start tag if the head element is empty - if type in ("StartTag", "EmptyTag"): - return True - elif type == "EndTag": - return next["name"] == "head" - elif tagname == 'body': - # A body element's start tag may be omitted if the first thing - # inside the body element is not a space character or a comment, - # except if the first thing inside the body element is a script - # or style element and the node immediately preceding the body - # element is a head element whose end tag has been omitted. - if type in ("Comment", "SpaceCharacters"): - return False - elif type == "StartTag": - # XXX: we do not look at the preceding event, so we never omit - # the body element's start tag if it's followed by a script or - # a style element. - return next["name"] not in ('script', 'style') - else: - return True - elif tagname == 'colgroup': - # A colgroup element's start tag may be omitted if the first thing - # inside the colgroup element is a col element, and if the element - # is not immediately preceded by another colgroup element whose - # end tag has been omitted. - if type in ("StartTag", "EmptyTag"): - # XXX: we do not look at the preceding event, so instead we never - # omit the colgroup element's end tag when it is immediately - # followed by another colgroup element. See is_optional_end. - return next["name"] == "col" - else: - return False - elif tagname == 'tbody': - # A tbody element's start tag may be omitted if the first thing - # inside the tbody element is a tr element, and if the element is - # not immediately preceded by a tbody, thead, or tfoot element - # whose end tag has been omitted. - if type == "StartTag": - # omit the thead and tfoot elements' end tag when they are - # immediately followed by a tbody element. See is_optional_end. - if previous and previous['type'] == 'EndTag' and \ - previous['name'] in ('tbody', 'thead', 'tfoot'): - return False - return next["name"] == 'tr' - else: - return False - return False - - def is_optional_end(self, tagname, next): - type = next and next["type"] or None - if tagname in ('html', 'head', 'body'): - # An html element's end tag may be omitted if the html element - # is not immediately followed by a space character or a comment. - return type not in ("Comment", "SpaceCharacters") - elif tagname in ('li', 'optgroup', 'tr'): - # A li element's end tag may be omitted if the li element is - # immediately followed by another li element or if there is - # no more content in the parent element. - # An optgroup element's end tag may be omitted if the optgroup - # element is immediately followed by another optgroup element, - # or if there is no more content in the parent element. - # A tr element's end tag may be omitted if the tr element is - # immediately followed by another tr element, or if there is - # no more content in the parent element. - if type == "StartTag": - return next["name"] == tagname - else: - return type == "EndTag" or type is None - elif tagname in ('dt', 'dd'): - # A dt element's end tag may be omitted if the dt element is - # immediately followed by another dt element or a dd element. - # A dd element's end tag may be omitted if the dd element is - # immediately followed by another dd element or a dt element, - # or if there is no more content in the parent element. - if type == "StartTag": - return next["name"] in ('dt', 'dd') - elif tagname == 'dd': - return type == "EndTag" or type is None - else: - return False - elif tagname == 'p': - # A p element's end tag may be omitted if the p element is - # immediately followed by an address, article, aside, - # blockquote, datagrid, dialog, dir, div, dl, fieldset, - # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, - # nav, ol, p, pre, section, table, or ul, element, or if - # there is no more content in the parent element. - if type in ("StartTag", "EmptyTag"): - return next["name"] in ('address', 'article', 'aside', - 'blockquote', 'datagrid', 'dialog', - 'dir', 'div', 'dl', 'fieldset', 'footer', - 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', - 'header', 'hr', 'menu', 'nav', 'ol', - 'p', 'pre', 'section', 'table', 'ul') - else: - return type == "EndTag" or type is None - elif tagname == 'option': - # An option element's end tag may be omitted if the option - # element is immediately followed by another option element, - # or if it is immediately followed by an <code>optgroup</code> - # element, or if there is no more content in the parent - # element. - if type == "StartTag": - return next["name"] in ('option', 'optgroup') - else: - return type == "EndTag" or type is None - elif tagname in ('rt', 'rp'): - # An rt element's end tag may be omitted if the rt element is - # immediately followed by an rt or rp element, or if there is - # no more content in the parent element. - # An rp element's end tag may be omitted if the rp element is - # immediately followed by an rt or rp element, or if there is - # no more content in the parent element. - if type == "StartTag": - return next["name"] in ('rt', 'rp') - else: - return type == "EndTag" or type is None - elif tagname == 'colgroup': - # A colgroup element's end tag may be omitted if the colgroup - # element is not immediately followed by a space character or - # a comment. - if type in ("Comment", "SpaceCharacters"): - return False - elif type == "StartTag": - # XXX: we also look for an immediately following colgroup - # element. See is_optional_start. - return next["name"] != 'colgroup' - else: - return True - elif tagname in ('thead', 'tbody'): - # A thead element's end tag may be omitted if the thead element - # is immediately followed by a tbody or tfoot element. - # A tbody element's end tag may be omitted if the tbody element - # is immediately followed by a tbody or tfoot element, or if - # there is no more content in the parent element. - # A tfoot element's end tag may be omitted if the tfoot element - # is immediately followed by a tbody element, or if there is no - # more content in the parent element. - # XXX: we never omit the end tag when the following element is - # a tbody. See is_optional_start. - if type == "StartTag": - return next["name"] in ['tbody', 'tfoot'] - elif tagname == 'tbody': - return type == "EndTag" or type is None - else: - return False - elif tagname == 'tfoot': - # A tfoot element's end tag may be omitted if the tfoot element - # is immediately followed by a tbody element, or if there is no - # more content in the parent element. - # XXX: we never omit the end tag when the following element is - # a tbody. See is_optional_start. - if type == "StartTag": - return next["name"] == 'tbody' - else: - return type == "EndTag" or type is None - elif tagname in ('td', 'th'): - # A td element's end tag may be omitted if the td element is - # immediately followed by a td or th element, or if there is - # no more content in the parent element. - # A th element's end tag may be omitted if the th element is - # immediately followed by a td or th element, or if there is - # no more content in the parent element. - if type == "StartTag": - return next["name"] in ('td', 'th') - else: - return type == "EndTag" or type is None - return False diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/sanitizer.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/sanitizer.py deleted file mode 100644 index af8e77b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/sanitizer.py +++ /dev/null @@ -1,896 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -import re -from xml.sax.saxutils import escape, unescape - -from pip._vendor.six.moves import urllib_parse as urlparse - -from . import base -from ..constants import namespaces, prefixes - -__all__ = ["Filter"] - - -allowed_elements = frozenset(( - (namespaces['html'], 'a'), - (namespaces['html'], 'abbr'), - (namespaces['html'], 'acronym'), - (namespaces['html'], 'address'), - (namespaces['html'], 'area'), - (namespaces['html'], 'article'), - (namespaces['html'], 'aside'), - (namespaces['html'], 'audio'), - (namespaces['html'], 'b'), - (namespaces['html'], 'big'), - (namespaces['html'], 'blockquote'), - (namespaces['html'], 'br'), - (namespaces['html'], 'button'), - (namespaces['html'], 'canvas'), - (namespaces['html'], 'caption'), - (namespaces['html'], 'center'), - (namespaces['html'], 'cite'), - (namespaces['html'], 'code'), - (namespaces['html'], 'col'), - (namespaces['html'], 'colgroup'), - (namespaces['html'], 'command'), - (namespaces['html'], 'datagrid'), - (namespaces['html'], 'datalist'), - (namespaces['html'], 'dd'), - (namespaces['html'], 'del'), - (namespaces['html'], 'details'), - (namespaces['html'], 'dfn'), - (namespaces['html'], 'dialog'), - (namespaces['html'], 'dir'), - (namespaces['html'], 'div'), - (namespaces['html'], 'dl'), - (namespaces['html'], 'dt'), - (namespaces['html'], 'em'), - (namespaces['html'], 'event-source'), - (namespaces['html'], 'fieldset'), - (namespaces['html'], 'figcaption'), - (namespaces['html'], 'figure'), - (namespaces['html'], 'footer'), - (namespaces['html'], 'font'), - (namespaces['html'], 'form'), - (namespaces['html'], 'header'), - (namespaces['html'], 'h1'), - (namespaces['html'], 'h2'), - (namespaces['html'], 'h3'), - (namespaces['html'], 'h4'), - (namespaces['html'], 'h5'), - (namespaces['html'], 'h6'), - (namespaces['html'], 'hr'), - (namespaces['html'], 'i'), - (namespaces['html'], 'img'), - (namespaces['html'], 'input'), - (namespaces['html'], 'ins'), - (namespaces['html'], 'keygen'), - (namespaces['html'], 'kbd'), - (namespaces['html'], 'label'), - (namespaces['html'], 'legend'), - (namespaces['html'], 'li'), - (namespaces['html'], 'm'), - (namespaces['html'], 'map'), - (namespaces['html'], 'menu'), - (namespaces['html'], 'meter'), - (namespaces['html'], 'multicol'), - (namespaces['html'], 'nav'), - (namespaces['html'], 'nextid'), - (namespaces['html'], 'ol'), - (namespaces['html'], 'output'), - (namespaces['html'], 'optgroup'), - (namespaces['html'], 'option'), - (namespaces['html'], 'p'), - (namespaces['html'], 'pre'), - (namespaces['html'], 'progress'), - (namespaces['html'], 'q'), - (namespaces['html'], 's'), - (namespaces['html'], 'samp'), - (namespaces['html'], 'section'), - (namespaces['html'], 'select'), - (namespaces['html'], 'small'), - (namespaces['html'], 'sound'), - (namespaces['html'], 'source'), - (namespaces['html'], 'spacer'), - (namespaces['html'], 'span'), - (namespaces['html'], 'strike'), - (namespaces['html'], 'strong'), - (namespaces['html'], 'sub'), - (namespaces['html'], 'sup'), - (namespaces['html'], 'table'), - (namespaces['html'], 'tbody'), - (namespaces['html'], 'td'), - (namespaces['html'], 'textarea'), - (namespaces['html'], 'time'), - (namespaces['html'], 'tfoot'), - (namespaces['html'], 'th'), - (namespaces['html'], 'thead'), - (namespaces['html'], 'tr'), - (namespaces['html'], 'tt'), - (namespaces['html'], 'u'), - (namespaces['html'], 'ul'), - (namespaces['html'], 'var'), - (namespaces['html'], 'video'), - (namespaces['mathml'], 'maction'), - (namespaces['mathml'], 'math'), - (namespaces['mathml'], 'merror'), - (namespaces['mathml'], 'mfrac'), - (namespaces['mathml'], 'mi'), - (namespaces['mathml'], 'mmultiscripts'), - (namespaces['mathml'], 'mn'), - (namespaces['mathml'], 'mo'), - (namespaces['mathml'], 'mover'), - (namespaces['mathml'], 'mpadded'), - (namespaces['mathml'], 'mphantom'), - (namespaces['mathml'], 'mprescripts'), - (namespaces['mathml'], 'mroot'), - (namespaces['mathml'], 'mrow'), - (namespaces['mathml'], 'mspace'), - (namespaces['mathml'], 'msqrt'), - (namespaces['mathml'], 'mstyle'), - (namespaces['mathml'], 'msub'), - (namespaces['mathml'], 'msubsup'), - (namespaces['mathml'], 'msup'), - (namespaces['mathml'], 'mtable'), - (namespaces['mathml'], 'mtd'), - (namespaces['mathml'], 'mtext'), - (namespaces['mathml'], 'mtr'), - (namespaces['mathml'], 'munder'), - (namespaces['mathml'], 'munderover'), - (namespaces['mathml'], 'none'), - (namespaces['svg'], 'a'), - (namespaces['svg'], 'animate'), - (namespaces['svg'], 'animateColor'), - (namespaces['svg'], 'animateMotion'), - (namespaces['svg'], 'animateTransform'), - (namespaces['svg'], 'clipPath'), - (namespaces['svg'], 'circle'), - (namespaces['svg'], 'defs'), - (namespaces['svg'], 'desc'), - (namespaces['svg'], 'ellipse'), - (namespaces['svg'], 'font-face'), - (namespaces['svg'], 'font-face-name'), - (namespaces['svg'], 'font-face-src'), - (namespaces['svg'], 'g'), - (namespaces['svg'], 'glyph'), - (namespaces['svg'], 'hkern'), - (namespaces['svg'], 'linearGradient'), - (namespaces['svg'], 'line'), - (namespaces['svg'], 'marker'), - (namespaces['svg'], 'metadata'), - (namespaces['svg'], 'missing-glyph'), - (namespaces['svg'], 'mpath'), - (namespaces['svg'], 'path'), - (namespaces['svg'], 'polygon'), - (namespaces['svg'], 'polyline'), - (namespaces['svg'], 'radialGradient'), - (namespaces['svg'], 'rect'), - (namespaces['svg'], 'set'), - (namespaces['svg'], 'stop'), - (namespaces['svg'], 'svg'), - (namespaces['svg'], 'switch'), - (namespaces['svg'], 'text'), - (namespaces['svg'], 'title'), - (namespaces['svg'], 'tspan'), - (namespaces['svg'], 'use'), -)) - -allowed_attributes = frozenset(( - # HTML attributes - (None, 'abbr'), - (None, 'accept'), - (None, 'accept-charset'), - (None, 'accesskey'), - (None, 'action'), - (None, 'align'), - (None, 'alt'), - (None, 'autocomplete'), - (None, 'autofocus'), - (None, 'axis'), - (None, 'background'), - (None, 'balance'), - (None, 'bgcolor'), - (None, 'bgproperties'), - (None, 'border'), - (None, 'bordercolor'), - (None, 'bordercolordark'), - (None, 'bordercolorlight'), - (None, 'bottompadding'), - (None, 'cellpadding'), - (None, 'cellspacing'), - (None, 'ch'), - (None, 'challenge'), - (None, 'char'), - (None, 'charoff'), - (None, 'choff'), - (None, 'charset'), - (None, 'checked'), - (None, 'cite'), - (None, 'class'), - (None, 'clear'), - (None, 'color'), - (None, 'cols'), - (None, 'colspan'), - (None, 'compact'), - (None, 'contenteditable'), - (None, 'controls'), - (None, 'coords'), - (None, 'data'), - (None, 'datafld'), - (None, 'datapagesize'), - (None, 'datasrc'), - (None, 'datetime'), - (None, 'default'), - (None, 'delay'), - (None, 'dir'), - (None, 'disabled'), - (None, 'draggable'), - (None, 'dynsrc'), - (None, 'enctype'), - (None, 'end'), - (None, 'face'), - (None, 'for'), - (None, 'form'), - (None, 'frame'), - (None, 'galleryimg'), - (None, 'gutter'), - (None, 'headers'), - (None, 'height'), - (None, 'hidefocus'), - (None, 'hidden'), - (None, 'high'), - (None, 'href'), - (None, 'hreflang'), - (None, 'hspace'), - (None, 'icon'), - (None, 'id'), - (None, 'inputmode'), - (None, 'ismap'), - (None, 'keytype'), - (None, 'label'), - (None, 'leftspacing'), - (None, 'lang'), - (None, 'list'), - (None, 'longdesc'), - (None, 'loop'), - (None, 'loopcount'), - (None, 'loopend'), - (None, 'loopstart'), - (None, 'low'), - (None, 'lowsrc'), - (None, 'max'), - (None, 'maxlength'), - (None, 'media'), - (None, 'method'), - (None, 'min'), - (None, 'multiple'), - (None, 'name'), - (None, 'nohref'), - (None, 'noshade'), - (None, 'nowrap'), - (None, 'open'), - (None, 'optimum'), - (None, 'pattern'), - (None, 'ping'), - (None, 'point-size'), - (None, 'poster'), - (None, 'pqg'), - (None, 'preload'), - (None, 'prompt'), - (None, 'radiogroup'), - (None, 'readonly'), - (None, 'rel'), - (None, 'repeat-max'), - (None, 'repeat-min'), - (None, 'replace'), - (None, 'required'), - (None, 'rev'), - (None, 'rightspacing'), - (None, 'rows'), - (None, 'rowspan'), - (None, 'rules'), - (None, 'scope'), - (None, 'selected'), - (None, 'shape'), - (None, 'size'), - (None, 'span'), - (None, 'src'), - (None, 'start'), - (None, 'step'), - (None, 'style'), - (None, 'summary'), - (None, 'suppress'), - (None, 'tabindex'), - (None, 'target'), - (None, 'template'), - (None, 'title'), - (None, 'toppadding'), - (None, 'type'), - (None, 'unselectable'), - (None, 'usemap'), - (None, 'urn'), - (None, 'valign'), - (None, 'value'), - (None, 'variable'), - (None, 'volume'), - (None, 'vspace'), - (None, 'vrml'), - (None, 'width'), - (None, 'wrap'), - (namespaces['xml'], 'lang'), - # MathML attributes - (None, 'actiontype'), - (None, 'align'), - (None, 'columnalign'), - (None, 'columnalign'), - (None, 'columnalign'), - (None, 'columnlines'), - (None, 'columnspacing'), - (None, 'columnspan'), - (None, 'depth'), - (None, 'display'), - (None, 'displaystyle'), - (None, 'equalcolumns'), - (None, 'equalrows'), - (None, 'fence'), - (None, 'fontstyle'), - (None, 'fontweight'), - (None, 'frame'), - (None, 'height'), - (None, 'linethickness'), - (None, 'lspace'), - (None, 'mathbackground'), - (None, 'mathcolor'), - (None, 'mathvariant'), - (None, 'mathvariant'), - (None, 'maxsize'), - (None, 'minsize'), - (None, 'other'), - (None, 'rowalign'), - (None, 'rowalign'), - (None, 'rowalign'), - (None, 'rowlines'), - (None, 'rowspacing'), - (None, 'rowspan'), - (None, 'rspace'), - (None, 'scriptlevel'), - (None, 'selection'), - (None, 'separator'), - (None, 'stretchy'), - (None, 'width'), - (None, 'width'), - (namespaces['xlink'], 'href'), - (namespaces['xlink'], 'show'), - (namespaces['xlink'], 'type'), - # SVG attributes - (None, 'accent-height'), - (None, 'accumulate'), - (None, 'additive'), - (None, 'alphabetic'), - (None, 'arabic-form'), - (None, 'ascent'), - (None, 'attributeName'), - (None, 'attributeType'), - (None, 'baseProfile'), - (None, 'bbox'), - (None, 'begin'), - (None, 'by'), - (None, 'calcMode'), - (None, 'cap-height'), - (None, 'class'), - (None, 'clip-path'), - (None, 'color'), - (None, 'color-rendering'), - (None, 'content'), - (None, 'cx'), - (None, 'cy'), - (None, 'd'), - (None, 'dx'), - (None, 'dy'), - (None, 'descent'), - (None, 'display'), - (None, 'dur'), - (None, 'end'), - (None, 'fill'), - (None, 'fill-opacity'), - (None, 'fill-rule'), - (None, 'font-family'), - (None, 'font-size'), - (None, 'font-stretch'), - (None, 'font-style'), - (None, 'font-variant'), - (None, 'font-weight'), - (None, 'from'), - (None, 'fx'), - (None, 'fy'), - (None, 'g1'), - (None, 'g2'), - (None, 'glyph-name'), - (None, 'gradientUnits'), - (None, 'hanging'), - (None, 'height'), - (None, 'horiz-adv-x'), - (None, 'horiz-origin-x'), - (None, 'id'), - (None, 'ideographic'), - (None, 'k'), - (None, 'keyPoints'), - (None, 'keySplines'), - (None, 'keyTimes'), - (None, 'lang'), - (None, 'marker-end'), - (None, 'marker-mid'), - (None, 'marker-start'), - (None, 'markerHeight'), - (None, 'markerUnits'), - (None, 'markerWidth'), - (None, 'mathematical'), - (None, 'max'), - (None, 'min'), - (None, 'name'), - (None, 'offset'), - (None, 'opacity'), - (None, 'orient'), - (None, 'origin'), - (None, 'overline-position'), - (None, 'overline-thickness'), - (None, 'panose-1'), - (None, 'path'), - (None, 'pathLength'), - (None, 'points'), - (None, 'preserveAspectRatio'), - (None, 'r'), - (None, 'refX'), - (None, 'refY'), - (None, 'repeatCount'), - (None, 'repeatDur'), - (None, 'requiredExtensions'), - (None, 'requiredFeatures'), - (None, 'restart'), - (None, 'rotate'), - (None, 'rx'), - (None, 'ry'), - (None, 'slope'), - (None, 'stemh'), - (None, 'stemv'), - (None, 'stop-color'), - (None, 'stop-opacity'), - (None, 'strikethrough-position'), - (None, 'strikethrough-thickness'), - (None, 'stroke'), - (None, 'stroke-dasharray'), - (None, 'stroke-dashoffset'), - (None, 'stroke-linecap'), - (None, 'stroke-linejoin'), - (None, 'stroke-miterlimit'), - (None, 'stroke-opacity'), - (None, 'stroke-width'), - (None, 'systemLanguage'), - (None, 'target'), - (None, 'text-anchor'), - (None, 'to'), - (None, 'transform'), - (None, 'type'), - (None, 'u1'), - (None, 'u2'), - (None, 'underline-position'), - (None, 'underline-thickness'), - (None, 'unicode'), - (None, 'unicode-range'), - (None, 'units-per-em'), - (None, 'values'), - (None, 'version'), - (None, 'viewBox'), - (None, 'visibility'), - (None, 'width'), - (None, 'widths'), - (None, 'x'), - (None, 'x-height'), - (None, 'x1'), - (None, 'x2'), - (namespaces['xlink'], 'actuate'), - (namespaces['xlink'], 'arcrole'), - (namespaces['xlink'], 'href'), - (namespaces['xlink'], 'role'), - (namespaces['xlink'], 'show'), - (namespaces['xlink'], 'title'), - (namespaces['xlink'], 'type'), - (namespaces['xml'], 'base'), - (namespaces['xml'], 'lang'), - (namespaces['xml'], 'space'), - (None, 'y'), - (None, 'y1'), - (None, 'y2'), - (None, 'zoomAndPan'), -)) - -attr_val_is_uri = frozenset(( - (None, 'href'), - (None, 'src'), - (None, 'cite'), - (None, 'action'), - (None, 'longdesc'), - (None, 'poster'), - (None, 'background'), - (None, 'datasrc'), - (None, 'dynsrc'), - (None, 'lowsrc'), - (None, 'ping'), - (namespaces['xlink'], 'href'), - (namespaces['xml'], 'base'), -)) - -svg_attr_val_allows_ref = frozenset(( - (None, 'clip-path'), - (None, 'color-profile'), - (None, 'cursor'), - (None, 'fill'), - (None, 'filter'), - (None, 'marker'), - (None, 'marker-start'), - (None, 'marker-mid'), - (None, 'marker-end'), - (None, 'mask'), - (None, 'stroke'), -)) - -svg_allow_local_href = frozenset(( - (None, 'altGlyph'), - (None, 'animate'), - (None, 'animateColor'), - (None, 'animateMotion'), - (None, 'animateTransform'), - (None, 'cursor'), - (None, 'feImage'), - (None, 'filter'), - (None, 'linearGradient'), - (None, 'pattern'), - (None, 'radialGradient'), - (None, 'textpath'), - (None, 'tref'), - (None, 'set'), - (None, 'use') -)) - -allowed_css_properties = frozenset(( - 'azimuth', - 'background-color', - 'border-bottom-color', - 'border-collapse', - 'border-color', - 'border-left-color', - 'border-right-color', - 'border-top-color', - 'clear', - 'color', - 'cursor', - 'direction', - 'display', - 'elevation', - 'float', - 'font', - 'font-family', - 'font-size', - 'font-style', - 'font-variant', - 'font-weight', - 'height', - 'letter-spacing', - 'line-height', - 'overflow', - 'pause', - 'pause-after', - 'pause-before', - 'pitch', - 'pitch-range', - 'richness', - 'speak', - 'speak-header', - 'speak-numeral', - 'speak-punctuation', - 'speech-rate', - 'stress', - 'text-align', - 'text-decoration', - 'text-indent', - 'unicode-bidi', - 'vertical-align', - 'voice-family', - 'volume', - 'white-space', - 'width', -)) - -allowed_css_keywords = frozenset(( - 'auto', - 'aqua', - 'black', - 'block', - 'blue', - 'bold', - 'both', - 'bottom', - 'brown', - 'center', - 'collapse', - 'dashed', - 'dotted', - 'fuchsia', - 'gray', - 'green', - '!important', - 'italic', - 'left', - 'lime', - 'maroon', - 'medium', - 'none', - 'navy', - 'normal', - 'nowrap', - 'olive', - 'pointer', - 'purple', - 'red', - 'right', - 'solid', - 'silver', - 'teal', - 'top', - 'transparent', - 'underline', - 'white', - 'yellow', -)) - -allowed_svg_properties = frozenset(( - 'fill', - 'fill-opacity', - 'fill-rule', - 'stroke', - 'stroke-width', - 'stroke-linecap', - 'stroke-linejoin', - 'stroke-opacity', -)) - -allowed_protocols = frozenset(( - 'ed2k', - 'ftp', - 'http', - 'https', - 'irc', - 'mailto', - 'news', - 'gopher', - 'nntp', - 'telnet', - 'webcal', - 'xmpp', - 'callto', - 'feed', - 'urn', - 'aim', - 'rsync', - 'tag', - 'ssh', - 'sftp', - 'rtsp', - 'afs', - 'data', -)) - -allowed_content_types = frozenset(( - 'image/png', - 'image/jpeg', - 'image/gif', - 'image/webp', - 'image/bmp', - 'text/plain', -)) - - -data_content_type = re.compile(r''' - ^ - # Match a content type <application>/<type> - (?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) - # Match any character set and encoding - (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) - |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) - # Assume the rest is data - ,.* - $ - ''', - re.VERBOSE) - - -class Filter(base.Filter): - """Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes""" - def __init__(self, - source, - allowed_elements=allowed_elements, - allowed_attributes=allowed_attributes, - allowed_css_properties=allowed_css_properties, - allowed_css_keywords=allowed_css_keywords, - allowed_svg_properties=allowed_svg_properties, - allowed_protocols=allowed_protocols, - allowed_content_types=allowed_content_types, - attr_val_is_uri=attr_val_is_uri, - svg_attr_val_allows_ref=svg_attr_val_allows_ref, - svg_allow_local_href=svg_allow_local_href): - """Creates a Filter - - :arg allowed_elements: set of elements to allow--everything else will - be escaped - - :arg allowed_attributes: set of attributes to allow in - elements--everything else will be stripped - - :arg allowed_css_properties: set of CSS properties to allow--everything - else will be stripped - - :arg allowed_css_keywords: set of CSS keywords to allow--everything - else will be stripped - - :arg allowed_svg_properties: set of SVG properties to allow--everything - else will be removed - - :arg allowed_protocols: set of allowed protocols for URIs - - :arg allowed_content_types: set of allowed content types for ``data`` URIs. - - :arg attr_val_is_uri: set of attributes that have URI values--values - that have a scheme not listed in ``allowed_protocols`` are removed - - :arg svg_attr_val_allows_ref: set of SVG attributes that can have - references - - :arg svg_allow_local_href: set of SVG elements that can have local - hrefs--these are removed - - """ - super(Filter, self).__init__(source) - self.allowed_elements = allowed_elements - self.allowed_attributes = allowed_attributes - self.allowed_css_properties = allowed_css_properties - self.allowed_css_keywords = allowed_css_keywords - self.allowed_svg_properties = allowed_svg_properties - self.allowed_protocols = allowed_protocols - self.allowed_content_types = allowed_content_types - self.attr_val_is_uri = attr_val_is_uri - self.svg_attr_val_allows_ref = svg_attr_val_allows_ref - self.svg_allow_local_href = svg_allow_local_href - - def __iter__(self): - for token in base.Filter.__iter__(self): - token = self.sanitize_token(token) - if token: - yield token - - # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and - # stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes - # are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and - # ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI - # are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are - # allowed. - # - # sanitize_html('<script> do_nasty_stuff() </script>') - # => <script> do_nasty_stuff() </script> - # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') - # => <a>Click here for $100</a> - def sanitize_token(self, token): - - # accommodate filters which use token_type differently - token_type = token["type"] - if token_type in ("StartTag", "EndTag", "EmptyTag"): - name = token["name"] - namespace = token["namespace"] - if ((namespace, name) in self.allowed_elements or - (namespace is None and - (namespaces["html"], name) in self.allowed_elements)): - return self.allowed_token(token) - else: - return self.disallowed_token(token) - elif token_type == "Comment": - pass - else: - return token - - def allowed_token(self, token): - if "data" in token: - attrs = token["data"] - attr_names = set(attrs.keys()) - - # Remove forbidden attributes - for to_remove in (attr_names - self.allowed_attributes): - del token["data"][to_remove] - attr_names.remove(to_remove) - - # Remove attributes with disallowed URL values - for attr in (attr_names & self.attr_val_is_uri): - assert attr in attrs - # I don't have a clue where this regexp comes from or why it matches those - # characters, nor why we call unescape. I just know it's always been here. - # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all - # this will do is remove *more* than it otherwise would. - val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '', - unescape(attrs[attr])).lower() - # remove replacement characters from unescaped characters - val_unescaped = val_unescaped.replace("\ufffd", "") - try: - uri = urlparse.urlparse(val_unescaped) - except ValueError: - uri = None - del attrs[attr] - if uri and uri.scheme: - if uri.scheme not in self.allowed_protocols: - del attrs[attr] - if uri.scheme == 'data': - m = data_content_type.match(uri.path) - if not m: - del attrs[attr] - elif m.group('content_type') not in self.allowed_content_types: - del attrs[attr] - - for attr in self.svg_attr_val_allows_ref: - if attr in attrs: - attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', - ' ', - unescape(attrs[attr])) - if (token["name"] in self.svg_allow_local_href and - (namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*', - attrs[(namespaces['xlink'], 'href')])): - del attrs[(namespaces['xlink'], 'href')] - if (None, 'style') in attrs: - attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')]) - token["data"] = attrs - return token - - def disallowed_token(self, token): - token_type = token["type"] - if token_type == "EndTag": - token["data"] = "</%s>" % token["name"] - elif token["data"]: - assert token_type in ("StartTag", "EmptyTag") - attrs = [] - for (ns, name), v in token["data"].items(): - attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) - token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) - else: - token["data"] = "<%s>" % token["name"] - if token.get("selfClosing"): - token["data"] = token["data"][:-1] + "/>" - - token["type"] = "Characters" - - del token["name"] - return token - - def sanitize_css(self, style): - # disallow urls - style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) - - # gauntlet - if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): - return '' - if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): - return '' - - clean = [] - for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style): - if not value: - continue - if prop.lower() in self.allowed_css_properties: - clean.append(prop + ': ' + value + ';') - elif prop.split('-')[0].lower() in ['background', 'border', 'margin', - 'padding']: - for keyword in value.split(): - if keyword not in self.allowed_css_keywords and \ - not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa - break - else: - clean.append(prop + ': ' + value + ';') - elif prop.lower() in self.allowed_svg_properties: - clean.append(prop + ': ' + value + ';') - - return ' '.join(clean) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/whitespace.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/whitespace.py deleted file mode 100644 index 0d12584..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/filters/whitespace.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -import re - -from . import base -from ..constants import rcdataElements, spaceCharacters -spaceCharacters = "".join(spaceCharacters) - -SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) - - -class Filter(base.Filter): - """Collapses whitespace except in pre, textarea, and script elements""" - spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) - - def __iter__(self): - preserve = 0 - for token in base.Filter.__iter__(self): - type = token["type"] - if type == "StartTag" \ - and (preserve or token["name"] in self.spacePreserveElements): - preserve += 1 - - elif type == "EndTag" and preserve: - preserve -= 1 - - elif not preserve and type == "SpaceCharacters" and token["data"]: - # Test on token["data"] above to not introduce spaces where there were not - token["data"] = " " - - elif not preserve and type == "Characters": - token["data"] = collapse_spaces(token["data"]) - - yield token - - -def collapse_spaces(text): - return SPACES_REGEX.sub(' ', text) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/html5parser.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/html5parser.py deleted file mode 100644 index ae41a13..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/html5parser.py +++ /dev/null @@ -1,2791 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import with_metaclass, viewkeys - -import types -from collections import OrderedDict - -from . import _inputstream -from . import _tokenizer - -from . import treebuilders -from .treebuilders.base import Marker - -from . import _utils -from .constants import ( - spaceCharacters, asciiUpper2Lower, - specialElements, headingElements, cdataElements, rcdataElements, - tokenTypes, tagTokenTypes, - namespaces, - htmlIntegrationPointElements, mathmlTextIntegrationPointElements, - adjustForeignAttributes as adjustForeignAttributesMap, - adjustMathMLAttributes, adjustSVGAttributes, - E, - _ReparseException -) - - -def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs): - """Parse an HTML document as a string or file-like object into a tree - - :arg doc: the document to parse as a string or file-like object - - :arg treebuilder: the treebuilder to use when parsing - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - :returns: parsed tree - - Example: - - >>> from html5lib.html5parser import parse - >>> parse('<html><body><p>This is a doc</p></body></html>') - <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> - - """ - tb = treebuilders.getTreeBuilder(treebuilder) - p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) - return p.parse(doc, **kwargs) - - -def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): - """Parse an HTML fragment as a string or file-like object into a tree - - :arg doc: the fragment to parse as a string or file-like object - - :arg container: the container context to parse the fragment in - - :arg treebuilder: the treebuilder to use when parsing - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - :returns: parsed tree - - Example: - - >>> from html5lib.html5libparser import parseFragment - >>> parseFragment('<b>this is a fragment</b>') - <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> - - """ - tb = treebuilders.getTreeBuilder(treebuilder) - p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) - return p.parseFragment(doc, container=container, **kwargs) - - -def method_decorator_metaclass(function): - class Decorated(type): - def __new__(meta, classname, bases, classDict): - for attributeName, attribute in classDict.items(): - if isinstance(attribute, types.FunctionType): - attribute = function(attribute) - - classDict[attributeName] = attribute - return type.__new__(meta, classname, bases, classDict) - return Decorated - - -class HTMLParser(object): - """HTML parser - - Generates a tree structure from a stream of (possibly malformed) HTML. - - """ - - def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False): - """ - :arg tree: a treebuilder class controlling the type of tree that will be - returned. Built in treebuilders can be accessed through - html5lib.treebuilders.getTreeBuilder(treeType) - - :arg strict: raise an exception when a parse error is encountered - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - :arg debug: whether or not to enable debug mode which logs things - - Example: - - >>> from html5lib.html5parser import HTMLParser - >>> parser = HTMLParser() # generates parser with etree builder - >>> parser = HTMLParser('lxml', strict=True) # generates parser with lxml builder which is strict - - """ - - # Raise an exception on the first error encountered - self.strict = strict - - if tree is None: - tree = treebuilders.getTreeBuilder("etree") - self.tree = tree(namespaceHTMLElements) - self.errors = [] - - self.phases = dict([(name, cls(self, self.tree)) for name, cls in - getPhases(debug).items()]) - - def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): - - self.innerHTMLMode = innerHTML - self.container = container - self.scripting = scripting - self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs) - self.reset() - - try: - self.mainLoop() - except _ReparseException: - self.reset() - self.mainLoop() - - def reset(self): - self.tree.reset() - self.firstStartTag = False - self.errors = [] - self.log = [] # only used with debug mode - # "quirks" / "limited quirks" / "no quirks" - self.compatMode = "no quirks" - - if self.innerHTMLMode: - self.innerHTML = self.container.lower() - - if self.innerHTML in cdataElements: - self.tokenizer.state = self.tokenizer.rcdataState - elif self.innerHTML in rcdataElements: - self.tokenizer.state = self.tokenizer.rawtextState - elif self.innerHTML == 'plaintext': - self.tokenizer.state = self.tokenizer.plaintextState - else: - # state already is data state - # self.tokenizer.state = self.tokenizer.dataState - pass - self.phase = self.phases["beforeHtml"] - self.phase.insertHtmlElement() - self.resetInsertionMode() - else: - self.innerHTML = False # pylint:disable=redefined-variable-type - self.phase = self.phases["initial"] - - self.lastPhase = None - - self.beforeRCDataPhase = None - - self.framesetOK = True - - @property - def documentEncoding(self): - """Name of the character encoding that was used to decode the input stream, or - :obj:`None` if that is not determined yet - - """ - if not hasattr(self, 'tokenizer'): - return None - return self.tokenizer.stream.charEncoding[0].name - - def isHTMLIntegrationPoint(self, element): - if (element.name == "annotation-xml" and - element.namespace == namespaces["mathml"]): - return ("encoding" in element.attributes and - element.attributes["encoding"].translate( - asciiUpper2Lower) in - ("text/html", "application/xhtml+xml")) - else: - return (element.namespace, element.name) in htmlIntegrationPointElements - - def isMathMLTextIntegrationPoint(self, element): - return (element.namespace, element.name) in mathmlTextIntegrationPointElements - - def mainLoop(self): - CharactersToken = tokenTypes["Characters"] - SpaceCharactersToken = tokenTypes["SpaceCharacters"] - StartTagToken = tokenTypes["StartTag"] - EndTagToken = tokenTypes["EndTag"] - CommentToken = tokenTypes["Comment"] - DoctypeToken = tokenTypes["Doctype"] - ParseErrorToken = tokenTypes["ParseError"] - - for token in self.normalizedTokens(): - prev_token = None - new_token = token - while new_token is not None: - prev_token = new_token - currentNode = self.tree.openElements[-1] if self.tree.openElements else None - currentNodeNamespace = currentNode.namespace if currentNode else None - currentNodeName = currentNode.name if currentNode else None - - type = new_token["type"] - - if type == ParseErrorToken: - self.parseError(new_token["data"], new_token.get("datavars", {})) - new_token = None - else: - if (len(self.tree.openElements) == 0 or - currentNodeNamespace == self.tree.defaultNamespace or - (self.isMathMLTextIntegrationPoint(currentNode) and - ((type == StartTagToken and - token["name"] not in frozenset(["mglyph", "malignmark"])) or - type in (CharactersToken, SpaceCharactersToken))) or - (currentNodeNamespace == namespaces["mathml"] and - currentNodeName == "annotation-xml" and - type == StartTagToken and - token["name"] == "svg") or - (self.isHTMLIntegrationPoint(currentNode) and - type in (StartTagToken, CharactersToken, SpaceCharactersToken))): - phase = self.phase - else: - phase = self.phases["inForeignContent"] - - if type == CharactersToken: - new_token = phase.processCharacters(new_token) - elif type == SpaceCharactersToken: - new_token = phase.processSpaceCharacters(new_token) - elif type == StartTagToken: - new_token = phase.processStartTag(new_token) - elif type == EndTagToken: - new_token = phase.processEndTag(new_token) - elif type == CommentToken: - new_token = phase.processComment(new_token) - elif type == DoctypeToken: - new_token = phase.processDoctype(new_token) - - if (type == StartTagToken and prev_token["selfClosing"] and - not prev_token["selfClosingAcknowledged"]): - self.parseError("non-void-element-with-trailing-solidus", - {"name": prev_token["name"]}) - - # When the loop finishes it's EOF - reprocess = True - phases = [] - while reprocess: - phases.append(self.phase) - reprocess = self.phase.processEOF() - if reprocess: - assert self.phase not in phases - - def normalizedTokens(self): - for token in self.tokenizer: - yield self.normalizeToken(token) - - def parse(self, stream, *args, **kwargs): - """Parse a HTML document into a well-formed tree - - :arg stream: a file-like object or string containing the HTML to be parsed - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element). - - :arg scripting: treat noscript elements as if JavaScript was turned on - - :returns: parsed tree - - Example: - - >>> from html5lib.html5parser import HTMLParser - >>> parser = HTMLParser() - >>> parser.parse('<html><body><p>This is a doc</p></body></html>') - <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> - - """ - self._parse(stream, False, None, *args, **kwargs) - return self.tree.getDocument() - - def parseFragment(self, stream, *args, **kwargs): - """Parse a HTML fragment into a well-formed tree fragment - - :arg container: name of the element we're setting the innerHTML - property if set to None, default to 'div' - - :arg stream: a file-like object or string containing the HTML to be parsed - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) - - :arg scripting: treat noscript elements as if JavaScript was turned on - - :returns: parsed tree - - Example: - - >>> from html5lib.html5libparser import HTMLParser - >>> parser = HTMLParser() - >>> parser.parseFragment('<b>this is a fragment</b>') - <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> - - """ - self._parse(stream, True, *args, **kwargs) - return self.tree.getFragment() - - def parseError(self, errorcode="XXX-undefined-error", datavars=None): - # XXX The idea is to make errorcode mandatory. - if datavars is None: - datavars = {} - self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) - if self.strict: - raise ParseError(E[errorcode] % datavars) - - def normalizeToken(self, token): - # HTML5 specific normalizations to the token stream - if token["type"] == tokenTypes["StartTag"]: - raw = token["data"] - token["data"] = OrderedDict(raw) - if len(raw) > len(token["data"]): - # we had some duplicated attribute, fix so first wins - token["data"].update(raw[::-1]) - - return token - - def adjustMathMLAttributes(self, token): - adjust_attributes(token, adjustMathMLAttributes) - - def adjustSVGAttributes(self, token): - adjust_attributes(token, adjustSVGAttributes) - - def adjustForeignAttributes(self, token): - adjust_attributes(token, adjustForeignAttributesMap) - - def reparseTokenNormal(self, token): - # pylint:disable=unused-argument - self.parser.phase() - - def resetInsertionMode(self): - # The name of this method is mostly historical. (It's also used in the - # specification.) - last = False - newModes = { - "select": "inSelect", - "td": "inCell", - "th": "inCell", - "tr": "inRow", - "tbody": "inTableBody", - "thead": "inTableBody", - "tfoot": "inTableBody", - "caption": "inCaption", - "colgroup": "inColumnGroup", - "table": "inTable", - "head": "inBody", - "body": "inBody", - "frameset": "inFrameset", - "html": "beforeHead" - } - for node in self.tree.openElements[::-1]: - nodeName = node.name - new_phase = None - if node == self.tree.openElements[0]: - assert self.innerHTML - last = True - nodeName = self.innerHTML - # Check for conditions that should only happen in the innerHTML - # case - if nodeName in ("select", "colgroup", "head", "html"): - assert self.innerHTML - - if not last and node.namespace != self.tree.defaultNamespace: - continue - - if nodeName in newModes: - new_phase = self.phases[newModes[nodeName]] - break - elif last: - new_phase = self.phases["inBody"] - break - - self.phase = new_phase - - def parseRCDataRawtext(self, token, contentType): - # Generic RCDATA/RAWTEXT Parsing algorithm - assert contentType in ("RAWTEXT", "RCDATA") - - self.tree.insertElement(token) - - if contentType == "RAWTEXT": - self.tokenizer.state = self.tokenizer.rawtextState - else: - self.tokenizer.state = self.tokenizer.rcdataState - - self.originalPhase = self.phase - - self.phase = self.phases["text"] - - -@_utils.memoize -def getPhases(debug): - def log(function): - """Logger that records which phase processes each token""" - type_names = dict((value, key) for key, value in - tokenTypes.items()) - - def wrapped(self, *args, **kwargs): - if function.__name__.startswith("process") and len(args) > 0: - token = args[0] - try: - info = {"type": type_names[token['type']]} - except: - raise - if token['type'] in tagTokenTypes: - info["name"] = token['name'] - - self.parser.log.append((self.parser.tokenizer.state.__name__, - self.parser.phase.__class__.__name__, - self.__class__.__name__, - function.__name__, - info)) - return function(self, *args, **kwargs) - else: - return function(self, *args, **kwargs) - return wrapped - - def getMetaclass(use_metaclass, metaclass_func): - if use_metaclass: - return method_decorator_metaclass(metaclass_func) - else: - return type - - # pylint:disable=unused-argument - class Phase(with_metaclass(getMetaclass(debug, log))): - """Base class for helper object that implements each phase of processing - """ - - def __init__(self, parser, tree): - self.parser = parser - self.tree = tree - - def processEOF(self): - raise NotImplementedError - - def processComment(self, token): - # For most phases the following is correct. Where it's not it will be - # overridden. - self.tree.insertComment(token, self.tree.openElements[-1]) - - def processDoctype(self, token): - self.parser.parseError("unexpected-doctype") - - def processCharacters(self, token): - self.tree.insertText(token["data"]) - - def processSpaceCharacters(self, token): - self.tree.insertText(token["data"]) - - def processStartTag(self, token): - return self.startTagHandler[token["name"]](token) - - def startTagHtml(self, token): - if not self.parser.firstStartTag and token["name"] == "html": - self.parser.parseError("non-html-root") - # XXX Need a check here to see if the first start tag token emitted is - # this token... If it's not, invoke self.parser.parseError(). - for attr, value in token["data"].items(): - if attr not in self.tree.openElements[0].attributes: - self.tree.openElements[0].attributes[attr] = value - self.parser.firstStartTag = False - - def processEndTag(self, token): - return self.endTagHandler[token["name"]](token) - - class InitialPhase(Phase): - def processSpaceCharacters(self, token): - pass - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - correct = token["correct"] - - if (name != "html" or publicId is not None or - systemId is not None and systemId != "about:legacy-compat"): - self.parser.parseError("unknown-doctype") - - if publicId is None: - publicId = "" - - self.tree.insertDoctype(token) - - if publicId != "": - publicId = publicId.translate(asciiUpper2Lower) - - if (not correct or token["name"] != "html" or - publicId.startswith( - ("+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//")) or - publicId in ("-//w3o//dtd w3 html strict 3.0//en//", - "-/w3c/dtd html 4.0 transitional/en", - "html") or - publicId.startswith( - ("-//w3c//dtd html 4.01 frameset//", - "-//w3c//dtd html 4.01 transitional//")) and - systemId is None or - systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): - self.parser.compatMode = "quirks" - elif (publicId.startswith( - ("-//w3c//dtd xhtml 1.0 frameset//", - "-//w3c//dtd xhtml 1.0 transitional//")) or - publicId.startswith( - ("-//w3c//dtd html 4.01 frameset//", - "-//w3c//dtd html 4.01 transitional//")) and - systemId is not None): - self.parser.compatMode = "limited quirks" - - self.parser.phase = self.parser.phases["beforeHtml"] - - def anythingElse(self): - self.parser.compatMode = "quirks" - self.parser.phase = self.parser.phases["beforeHtml"] - - def processCharacters(self, token): - self.parser.parseError("expected-doctype-but-got-chars") - self.anythingElse() - return token - - def processStartTag(self, token): - self.parser.parseError("expected-doctype-but-got-start-tag", - {"name": token["name"]}) - self.anythingElse() - return token - - def processEndTag(self, token): - self.parser.parseError("expected-doctype-but-got-end-tag", - {"name": token["name"]}) - self.anythingElse() - return token - - def processEOF(self): - self.parser.parseError("expected-doctype-but-got-eof") - self.anythingElse() - return True - - class BeforeHtmlPhase(Phase): - # helper methods - def insertHtmlElement(self): - self.tree.insertRoot(impliedTagToken("html", "StartTag")) - self.parser.phase = self.parser.phases["beforeHead"] - - # other - def processEOF(self): - self.insertHtmlElement() - return True - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processSpaceCharacters(self, token): - pass - - def processCharacters(self, token): - self.insertHtmlElement() - return token - - def processStartTag(self, token): - if token["name"] == "html": - self.parser.firstStartTag = True - self.insertHtmlElement() - return token - - def processEndTag(self, token): - if token["name"] not in ("head", "body", "html", "br"): - self.parser.parseError("unexpected-end-tag-before-html", - {"name": token["name"]}) - else: - self.insertHtmlElement() - return token - - class BeforeHeadPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("head", self.startTagHead) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - (("head", "body", "html", "br"), self.endTagImplyHead) - ]) - self.endTagHandler.default = self.endTagOther - - def processEOF(self): - self.startTagHead(impliedTagToken("head", "StartTag")) - return True - - def processSpaceCharacters(self, token): - pass - - def processCharacters(self, token): - self.startTagHead(impliedTagToken("head", "StartTag")) - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagHead(self, token): - self.tree.insertElement(token) - self.tree.headPointer = self.tree.openElements[-1] - self.parser.phase = self.parser.phases["inHead"] - - def startTagOther(self, token): - self.startTagHead(impliedTagToken("head", "StartTag")) - return token - - def endTagImplyHead(self, token): - self.startTagHead(impliedTagToken("head", "StartTag")) - return token - - def endTagOther(self, token): - self.parser.parseError("end-tag-after-implied-root", - {"name": token["name"]}) - - class InHeadPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("title", self.startTagTitle), - (("noframes", "style"), self.startTagNoFramesStyle), - ("noscript", self.startTagNoscript), - ("script", self.startTagScript), - (("base", "basefont", "bgsound", "command", "link"), - self.startTagBaseLinkCommand), - ("meta", self.startTagMeta), - ("head", self.startTagHead) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("head", self.endTagHead), - (("br", "html", "body"), self.endTagHtmlBodyBr) - ]) - self.endTagHandler.default = self.endTagOther - - # the real thing - def processEOF(self): - self.anythingElse() - return True - - def processCharacters(self, token): - self.anythingElse() - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagHead(self, token): - self.parser.parseError("two-heads-are-not-better-than-one") - - def startTagBaseLinkCommand(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagMeta(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - attributes = token["data"] - if self.parser.tokenizer.stream.charEncoding[1] == "tentative": - if "charset" in attributes: - self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) - elif ("content" in attributes and - "http-equiv" in attributes and - attributes["http-equiv"].lower() == "content-type"): - # Encoding it as UTF-8 here is a hack, as really we should pass - # the abstract Unicode string, and just use the - # ContentAttrParser on that, but using UTF-8 allows all chars - # to be encoded and as a ASCII-superset works. - data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8")) - parser = _inputstream.ContentAttrParser(data) - codec = parser.parse() - self.parser.tokenizer.stream.changeEncoding(codec) - - def startTagTitle(self, token): - self.parser.parseRCDataRawtext(token, "RCDATA") - - def startTagNoFramesStyle(self, token): - # Need to decide whether to implement the scripting-disabled case - self.parser.parseRCDataRawtext(token, "RAWTEXT") - - def startTagNoscript(self, token): - if self.parser.scripting: - self.parser.parseRCDataRawtext(token, "RAWTEXT") - else: - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inHeadNoscript"] - - def startTagScript(self, token): - self.tree.insertElement(token) - self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState - self.parser.originalPhase = self.parser.phase - self.parser.phase = self.parser.phases["text"] - - def startTagOther(self, token): - self.anythingElse() - return token - - def endTagHead(self, token): - node = self.parser.tree.openElements.pop() - assert node.name == "head", "Expected head got %s" % node.name - self.parser.phase = self.parser.phases["afterHead"] - - def endTagHtmlBodyBr(self, token): - self.anythingElse() - return token - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def anythingElse(self): - self.endTagHead(impliedTagToken("head")) - - class InHeadNoscriptPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand), - (("head", "noscript"), self.startTagHeadNoscript), - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("noscript", self.endTagNoscript), - ("br", self.endTagBr), - ]) - self.endTagHandler.default = self.endTagOther - - def processEOF(self): - self.parser.parseError("eof-in-head-noscript") - self.anythingElse() - return True - - def processComment(self, token): - return self.parser.phases["inHead"].processComment(token) - - def processCharacters(self, token): - self.parser.parseError("char-in-head-noscript") - self.anythingElse() - return token - - def processSpaceCharacters(self, token): - return self.parser.phases["inHead"].processSpaceCharacters(token) - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagBaseLinkCommand(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagHeadNoscript(self, token): - self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) - - def startTagOther(self, token): - self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) - self.anythingElse() - return token - - def endTagNoscript(self, token): - node = self.parser.tree.openElements.pop() - assert node.name == "noscript", "Expected noscript got %s" % node.name - self.parser.phase = self.parser.phases["inHead"] - - def endTagBr(self, token): - self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) - self.anythingElse() - return token - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def anythingElse(self): - # Caller must raise parse error first! - self.endTagNoscript(impliedTagToken("noscript")) - - class AfterHeadPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("body", self.startTagBody), - ("frameset", self.startTagFrameset), - (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", - "style", "title"), - self.startTagFromHead), - ("head", self.startTagHead) - ]) - self.startTagHandler.default = self.startTagOther - self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"), - self.endTagHtmlBodyBr)]) - self.endTagHandler.default = self.endTagOther - - def processEOF(self): - self.anythingElse() - return True - - def processCharacters(self, token): - self.anythingElse() - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagBody(self, token): - self.parser.framesetOK = False - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inBody"] - - def startTagFrameset(self, token): - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inFrameset"] - - def startTagFromHead(self, token): - self.parser.parseError("unexpected-start-tag-out-of-my-head", - {"name": token["name"]}) - self.tree.openElements.append(self.tree.headPointer) - self.parser.phases["inHead"].processStartTag(token) - for node in self.tree.openElements[::-1]: - if node.name == "head": - self.tree.openElements.remove(node) - break - - def startTagHead(self, token): - self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) - - def startTagOther(self, token): - self.anythingElse() - return token - - def endTagHtmlBodyBr(self, token): - self.anythingElse() - return token - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def anythingElse(self): - self.tree.insertElement(impliedTagToken("body", "StartTag")) - self.parser.phase = self.parser.phases["inBody"] - self.parser.framesetOK = True - - class InBodyPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody - # the really-really-really-very crazy mode - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - # Set this to the default handler - self.processSpaceCharacters = self.processSpaceCharactersNonPre - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - (("base", "basefont", "bgsound", "command", "link", "meta", - "script", "style", "title"), - self.startTagProcessInHead), - ("body", self.startTagBody), - ("frameset", self.startTagFrameset), - (("address", "article", "aside", "blockquote", "center", "details", - "dir", "div", "dl", "fieldset", "figcaption", "figure", - "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", - "section", "summary", "ul"), - self.startTagCloseP), - (headingElements, self.startTagHeading), - (("pre", "listing"), self.startTagPreListing), - ("form", self.startTagForm), - (("li", "dd", "dt"), self.startTagListItem), - ("plaintext", self.startTagPlaintext), - ("a", self.startTagA), - (("b", "big", "code", "em", "font", "i", "s", "small", "strike", - "strong", "tt", "u"), self.startTagFormatting), - ("nobr", self.startTagNobr), - ("button", self.startTagButton), - (("applet", "marquee", "object"), self.startTagAppletMarqueeObject), - ("xmp", self.startTagXmp), - ("table", self.startTagTable), - (("area", "br", "embed", "img", "keygen", "wbr"), - self.startTagVoidFormatting), - (("param", "source", "track"), self.startTagParamSource), - ("input", self.startTagInput), - ("hr", self.startTagHr), - ("image", self.startTagImage), - ("isindex", self.startTagIsIndex), - ("textarea", self.startTagTextarea), - ("iframe", self.startTagIFrame), - ("noscript", self.startTagNoscript), - (("noembed", "noframes"), self.startTagRawtext), - ("select", self.startTagSelect), - (("rp", "rt"), self.startTagRpRt), - (("option", "optgroup"), self.startTagOpt), - (("math"), self.startTagMath), - (("svg"), self.startTagSvg), - (("caption", "col", "colgroup", "frame", "head", - "tbody", "td", "tfoot", "th", "thead", - "tr"), self.startTagMisplaced) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("body", self.endTagBody), - ("html", self.endTagHtml), - (("address", "article", "aside", "blockquote", "button", "center", - "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", - "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", - "section", "summary", "ul"), self.endTagBlock), - ("form", self.endTagForm), - ("p", self.endTagP), - (("dd", "dt", "li"), self.endTagListItem), - (headingElements, self.endTagHeading), - (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", - "strike", "strong", "tt", "u"), self.endTagFormatting), - (("applet", "marquee", "object"), self.endTagAppletMarqueeObject), - ("br", self.endTagBr), - ]) - self.endTagHandler.default = self.endTagOther - - def isMatchingFormattingElement(self, node1, node2): - return (node1.name == node2.name and - node1.namespace == node2.namespace and - node1.attributes == node2.attributes) - - # helper - def addFormattingElement(self, token): - self.tree.insertElement(token) - element = self.tree.openElements[-1] - - matchingElements = [] - for node in self.tree.activeFormattingElements[::-1]: - if node is Marker: - break - elif self.isMatchingFormattingElement(node, element): - matchingElements.append(node) - - assert len(matchingElements) <= 3 - if len(matchingElements) == 3: - self.tree.activeFormattingElements.remove(matchingElements[-1]) - self.tree.activeFormattingElements.append(element) - - # the real deal - def processEOF(self): - allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", - "tfoot", "th", "thead", "tr", "body", - "html")) - for node in self.tree.openElements[::-1]: - if node.name not in allowed_elements: - self.parser.parseError("expected-closing-tag-but-got-eof") - break - # Stop parsing - - def processSpaceCharactersDropNewline(self, token): - # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we - # want to drop leading newlines - data = token["data"] - self.processSpaceCharacters = self.processSpaceCharactersNonPre - if (data.startswith("\n") and - self.tree.openElements[-1].name in ("pre", "listing", "textarea") and - not self.tree.openElements[-1].hasContent()): - data = data[1:] - if data: - self.tree.reconstructActiveFormattingElements() - self.tree.insertText(data) - - def processCharacters(self, token): - if token["data"] == "\u0000": - # The tokenizer should always emit null on its own - return - self.tree.reconstructActiveFormattingElements() - self.tree.insertText(token["data"]) - # This must be bad for performance - if (self.parser.framesetOK and - any([char not in spaceCharacters - for char in token["data"]])): - self.parser.framesetOK = False - - def processSpaceCharactersNonPre(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertText(token["data"]) - - def startTagProcessInHead(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagBody(self, token): - self.parser.parseError("unexpected-start-tag", {"name": "body"}) - if (len(self.tree.openElements) == 1 or - self.tree.openElements[1].name != "body"): - assert self.parser.innerHTML - else: - self.parser.framesetOK = False - for attr, value in token["data"].items(): - if attr not in self.tree.openElements[1].attributes: - self.tree.openElements[1].attributes[attr] = value - - def startTagFrameset(self, token): - self.parser.parseError("unexpected-start-tag", {"name": "frameset"}) - if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): - assert self.parser.innerHTML - elif not self.parser.framesetOK: - pass - else: - if self.tree.openElements[1].parent: - self.tree.openElements[1].parent.removeChild(self.tree.openElements[1]) - while self.tree.openElements[-1].name != "html": - self.tree.openElements.pop() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inFrameset"] - - def startTagCloseP(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - - def startTagPreListing(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.parser.framesetOK = False - self.processSpaceCharacters = self.processSpaceCharactersDropNewline - - def startTagForm(self, token): - if self.tree.formPointer: - self.parser.parseError("unexpected-start-tag", {"name": "form"}) - else: - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.tree.formPointer = self.tree.openElements[-1] - - def startTagListItem(self, token): - self.parser.framesetOK = False - - stopNamesMap = {"li": ["li"], - "dt": ["dt", "dd"], - "dd": ["dt", "dd"]} - stopNames = stopNamesMap[token["name"]] - for node in reversed(self.tree.openElements): - if node.name in stopNames: - self.parser.phase.processEndTag( - impliedTagToken(node.name, "EndTag")) - break - if (node.nameTuple in specialElements and - node.name not in ("address", "div", "p")): - break - - if self.tree.elementInScope("p", variant="button"): - self.parser.phase.processEndTag( - impliedTagToken("p", "EndTag")) - - self.tree.insertElement(token) - - def startTagPlaintext(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.parser.tokenizer.state = self.parser.tokenizer.plaintextState - - def startTagHeading(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - if self.tree.openElements[-1].name in headingElements: - self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) - self.tree.openElements.pop() - self.tree.insertElement(token) - - def startTagA(self, token): - afeAElement = self.tree.elementInActiveFormattingElements("a") - if afeAElement: - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "a", "endName": "a"}) - self.endTagFormatting(impliedTagToken("a")) - if afeAElement in self.tree.openElements: - self.tree.openElements.remove(afeAElement) - if afeAElement in self.tree.activeFormattingElements: - self.tree.activeFormattingElements.remove(afeAElement) - self.tree.reconstructActiveFormattingElements() - self.addFormattingElement(token) - - def startTagFormatting(self, token): - self.tree.reconstructActiveFormattingElements() - self.addFormattingElement(token) - - def startTagNobr(self, token): - self.tree.reconstructActiveFormattingElements() - if self.tree.elementInScope("nobr"): - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "nobr", "endName": "nobr"}) - self.processEndTag(impliedTagToken("nobr")) - # XXX Need tests that trigger the following - self.tree.reconstructActiveFormattingElements() - self.addFormattingElement(token) - - def startTagButton(self, token): - if self.tree.elementInScope("button"): - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "button", "endName": "button"}) - self.processEndTag(impliedTagToken("button")) - return token - else: - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.parser.framesetOK = False - - def startTagAppletMarqueeObject(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.tree.activeFormattingElements.append(Marker) - self.parser.framesetOK = False - - def startTagXmp(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.reconstructActiveFormattingElements() - self.parser.framesetOK = False - self.parser.parseRCDataRawtext(token, "RAWTEXT") - - def startTagTable(self, token): - if self.parser.compatMode != "quirks": - if self.tree.elementInScope("p", variant="button"): - self.processEndTag(impliedTagToken("p")) - self.tree.insertElement(token) - self.parser.framesetOK = False - self.parser.phase = self.parser.phases["inTable"] - - def startTagVoidFormatting(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - self.parser.framesetOK = False - - def startTagInput(self, token): - framesetOK = self.parser.framesetOK - self.startTagVoidFormatting(token) - if ("type" in token["data"] and - token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): - # input type=hidden doesn't change framesetOK - self.parser.framesetOK = framesetOK - - def startTagParamSource(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagHr(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - self.parser.framesetOK = False - - def startTagImage(self, token): - # No really... - self.parser.parseError("unexpected-start-tag-treated-as", - {"originalName": "image", "newName": "img"}) - self.processStartTag(impliedTagToken("img", "StartTag", - attributes=token["data"], - selfClosing=token["selfClosing"])) - - def startTagIsIndex(self, token): - self.parser.parseError("deprecated-tag", {"name": "isindex"}) - if self.tree.formPointer: - return - form_attrs = {} - if "action" in token["data"]: - form_attrs["action"] = token["data"]["action"] - self.processStartTag(impliedTagToken("form", "StartTag", - attributes=form_attrs)) - self.processStartTag(impliedTagToken("hr", "StartTag")) - self.processStartTag(impliedTagToken("label", "StartTag")) - # XXX Localization ... - if "prompt" in token["data"]: - prompt = token["data"]["prompt"] - else: - prompt = "This is a searchable index. Enter search keywords: " - self.processCharacters( - {"type": tokenTypes["Characters"], "data": prompt}) - attributes = token["data"].copy() - if "action" in attributes: - del attributes["action"] - if "prompt" in attributes: - del attributes["prompt"] - attributes["name"] = "isindex" - self.processStartTag(impliedTagToken("input", "StartTag", - attributes=attributes, - selfClosing=token["selfClosing"])) - self.processEndTag(impliedTagToken("label")) - self.processStartTag(impliedTagToken("hr", "StartTag")) - self.processEndTag(impliedTagToken("form")) - - def startTagTextarea(self, token): - self.tree.insertElement(token) - self.parser.tokenizer.state = self.parser.tokenizer.rcdataState - self.processSpaceCharacters = self.processSpaceCharactersDropNewline - self.parser.framesetOK = False - - def startTagIFrame(self, token): - self.parser.framesetOK = False - self.startTagRawtext(token) - - def startTagNoscript(self, token): - if self.parser.scripting: - self.startTagRawtext(token) - else: - self.startTagOther(token) - - def startTagRawtext(self, token): - """iframe, noembed noframes, noscript(if scripting enabled)""" - self.parser.parseRCDataRawtext(token, "RAWTEXT") - - def startTagOpt(self, token): - if self.tree.openElements[-1].name == "option": - self.parser.phase.processEndTag(impliedTagToken("option")) - self.tree.reconstructActiveFormattingElements() - self.parser.tree.insertElement(token) - - def startTagSelect(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.parser.framesetOK = False - if self.parser.phase in (self.parser.phases["inTable"], - self.parser.phases["inCaption"], - self.parser.phases["inColumnGroup"], - self.parser.phases["inTableBody"], - self.parser.phases["inRow"], - self.parser.phases["inCell"]): - self.parser.phase = self.parser.phases["inSelectInTable"] - else: - self.parser.phase = self.parser.phases["inSelect"] - - def startTagRpRt(self, token): - if self.tree.elementInScope("ruby"): - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != "ruby": - self.parser.parseError() - self.tree.insertElement(token) - - def startTagMath(self, token): - self.tree.reconstructActiveFormattingElements() - self.parser.adjustMathMLAttributes(token) - self.parser.adjustForeignAttributes(token) - token["namespace"] = namespaces["mathml"] - self.tree.insertElement(token) - # Need to get the parse error right for the case where the token - # has a namespace not equal to the xmlns attribute - if token["selfClosing"]: - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagSvg(self, token): - self.tree.reconstructActiveFormattingElements() - self.parser.adjustSVGAttributes(token) - self.parser.adjustForeignAttributes(token) - token["namespace"] = namespaces["svg"] - self.tree.insertElement(token) - # Need to get the parse error right for the case where the token - # has a namespace not equal to the xmlns attribute - if token["selfClosing"]: - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagMisplaced(self, token): - """ Elements that should be children of other elements that have a - different insertion mode; here they are ignored - "caption", "col", "colgroup", "frame", "frameset", "head", - "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", - "tr", "noscript" - """ - self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]}) - - def startTagOther(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - - def endTagP(self, token): - if not self.tree.elementInScope("p", variant="button"): - self.startTagCloseP(impliedTagToken("p", "StartTag")) - self.parser.parseError("unexpected-end-tag", {"name": "p"}) - self.endTagP(impliedTagToken("p", "EndTag")) - else: - self.tree.generateImpliedEndTags("p") - if self.tree.openElements[-1].name != "p": - self.parser.parseError("unexpected-end-tag", {"name": "p"}) - node = self.tree.openElements.pop() - while node.name != "p": - node = self.tree.openElements.pop() - - def endTagBody(self, token): - if not self.tree.elementInScope("body"): - self.parser.parseError() - return - elif self.tree.openElements[-1].name != "body": - for node in self.tree.openElements[2:]: - if node.name not in frozenset(("dd", "dt", "li", "optgroup", - "option", "p", "rp", "rt", - "tbody", "td", "tfoot", - "th", "thead", "tr", "body", - "html")): - # Not sure this is the correct name for the parse error - self.parser.parseError( - "expected-one-end-tag-but-got-another", - {"gotName": "body", "expectedName": node.name}) - break - self.parser.phase = self.parser.phases["afterBody"] - - def endTagHtml(self, token): - # We repeat the test for the body end tag token being ignored here - if self.tree.elementInScope("body"): - self.endTagBody(impliedTagToken("body")) - return token - - def endTagBlock(self, token): - # Put us back in the right whitespace handling mode - if token["name"] == "pre": - self.processSpaceCharacters = self.processSpaceCharactersNonPre - inScope = self.tree.elementInScope(token["name"]) - if inScope: - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("end-tag-too-early", {"name": token["name"]}) - if inScope: - node = self.tree.openElements.pop() - while node.name != token["name"]: - node = self.tree.openElements.pop() - - def endTagForm(self, token): - node = self.tree.formPointer - self.tree.formPointer = None - if node is None or not self.tree.elementInScope(node): - self.parser.parseError("unexpected-end-tag", - {"name": "form"}) - else: - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1] != node: - self.parser.parseError("end-tag-too-early-ignored", - {"name": "form"}) - self.tree.openElements.remove(node) - - def endTagListItem(self, token): - if token["name"] == "li": - variant = "list" - else: - variant = None - if not self.tree.elementInScope(token["name"], variant=variant): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - else: - self.tree.generateImpliedEndTags(exclude=token["name"]) - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError( - "end-tag-too-early", - {"name": token["name"]}) - node = self.tree.openElements.pop() - while node.name != token["name"]: - node = self.tree.openElements.pop() - - def endTagHeading(self, token): - for item in headingElements: - if self.tree.elementInScope(item): - self.tree.generateImpliedEndTags() - break - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("end-tag-too-early", {"name": token["name"]}) - - for item in headingElements: - if self.tree.elementInScope(item): - item = self.tree.openElements.pop() - while item.name not in headingElements: - item = self.tree.openElements.pop() - break - - def endTagFormatting(self, token): - """The much-feared adoption agency algorithm""" - # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867 - # XXX Better parseError messages appreciated. - - # Step 1 - outerLoopCounter = 0 - - # Step 2 - while outerLoopCounter < 8: - - # Step 3 - outerLoopCounter += 1 - - # Step 4: - - # Let the formatting element be the last element in - # the list of active formatting elements that: - # - is between the end of the list and the last scope - # marker in the list, if any, or the start of the list - # otherwise, and - # - has the same tag name as the token. - formattingElement = self.tree.elementInActiveFormattingElements( - token["name"]) - if (not formattingElement or - (formattingElement in self.tree.openElements and - not self.tree.elementInScope(formattingElement.name))): - # If there is no such node, then abort these steps - # and instead act as described in the "any other - # end tag" entry below. - self.endTagOther(token) - return - - # Otherwise, if there is such a node, but that node is - # not in the stack of open elements, then this is a - # parse error; remove the element from the list, and - # abort these steps. - elif formattingElement not in self.tree.openElements: - self.parser.parseError("adoption-agency-1.2", {"name": token["name"]}) - self.tree.activeFormattingElements.remove(formattingElement) - return - - # Otherwise, if there is such a node, and that node is - # also in the stack of open elements, but the element - # is not in scope, then this is a parse error; ignore - # the token, and abort these steps. - elif not self.tree.elementInScope(formattingElement.name): - self.parser.parseError("adoption-agency-4.4", {"name": token["name"]}) - return - - # Otherwise, there is a formatting element and that - # element is in the stack and is in scope. If the - # element is not the current node, this is a parse - # error. In any case, proceed with the algorithm as - # written in the following steps. - else: - if formattingElement != self.tree.openElements[-1]: - self.parser.parseError("adoption-agency-1.3", {"name": token["name"]}) - - # Step 5: - - # Let the furthest block be the topmost node in the - # stack of open elements that is lower in the stack - # than the formatting element, and is an element in - # the special category. There might not be one. - afeIndex = self.tree.openElements.index(formattingElement) - furthestBlock = None - for element in self.tree.openElements[afeIndex:]: - if element.nameTuple in specialElements: - furthestBlock = element - break - - # Step 6: - - # If there is no furthest block, then the UA must - # first pop all the nodes from the bottom of the stack - # of open elements, from the current node up to and - # including the formatting element, then remove the - # formatting element from the list of active - # formatting elements, and finally abort these steps. - if furthestBlock is None: - element = self.tree.openElements.pop() - while element != formattingElement: - element = self.tree.openElements.pop() - self.tree.activeFormattingElements.remove(element) - return - - # Step 7 - commonAncestor = self.tree.openElements[afeIndex - 1] - - # Step 8: - # The bookmark is supposed to help us identify where to reinsert - # nodes in step 15. We have to ensure that we reinsert nodes after - # the node before the active formatting element. Note the bookmark - # can move in step 9.7 - bookmark = self.tree.activeFormattingElements.index(formattingElement) - - # Step 9 - lastNode = node = furthestBlock - innerLoopCounter = 0 - - index = self.tree.openElements.index(node) - while innerLoopCounter < 3: - innerLoopCounter += 1 - # Node is element before node in open elements - index -= 1 - node = self.tree.openElements[index] - if node not in self.tree.activeFormattingElements: - self.tree.openElements.remove(node) - continue - # Step 9.6 - if node == formattingElement: - break - # Step 9.7 - if lastNode == furthestBlock: - bookmark = self.tree.activeFormattingElements.index(node) + 1 - # Step 9.8 - clone = node.cloneNode() - # Replace node with clone - self.tree.activeFormattingElements[ - self.tree.activeFormattingElements.index(node)] = clone - self.tree.openElements[ - self.tree.openElements.index(node)] = clone - node = clone - # Step 9.9 - # Remove lastNode from its parents, if any - if lastNode.parent: - lastNode.parent.removeChild(lastNode) - node.appendChild(lastNode) - # Step 9.10 - lastNode = node - - # Step 10 - # Foster parent lastNode if commonAncestor is a - # table, tbody, tfoot, thead, or tr we need to foster - # parent the lastNode - if lastNode.parent: - lastNode.parent.removeChild(lastNode) - - if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")): - parent, insertBefore = self.tree.getTableMisnestedNodePosition() - parent.insertBefore(lastNode, insertBefore) - else: - commonAncestor.appendChild(lastNode) - - # Step 11 - clone = formattingElement.cloneNode() - - # Step 12 - furthestBlock.reparentChildren(clone) - - # Step 13 - furthestBlock.appendChild(clone) - - # Step 14 - self.tree.activeFormattingElements.remove(formattingElement) - self.tree.activeFormattingElements.insert(bookmark, clone) - - # Step 15 - self.tree.openElements.remove(formattingElement) - self.tree.openElements.insert( - self.tree.openElements.index(furthestBlock) + 1, clone) - - def endTagAppletMarqueeObject(self, token): - if self.tree.elementInScope(token["name"]): - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("end-tag-too-early", {"name": token["name"]}) - - if self.tree.elementInScope(token["name"]): - element = self.tree.openElements.pop() - while element.name != token["name"]: - element = self.tree.openElements.pop() - self.tree.clearActiveFormattingElements() - - def endTagBr(self, token): - self.parser.parseError("unexpected-end-tag-treated-as", - {"originalName": "br", "newName": "br element"}) - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(impliedTagToken("br", "StartTag")) - self.tree.openElements.pop() - - def endTagOther(self, token): - for node in self.tree.openElements[::-1]: - if node.name == token["name"]: - self.tree.generateImpliedEndTags(exclude=token["name"]) - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - while self.tree.openElements.pop() != node: - pass - break - else: - if node.nameTuple in specialElements: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - break - - class TextPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - self.startTagHandler = _utils.MethodDispatcher([]) - self.startTagHandler.default = self.startTagOther - self.endTagHandler = _utils.MethodDispatcher([ - ("script", self.endTagScript)]) - self.endTagHandler.default = self.endTagOther - - def processCharacters(self, token): - self.tree.insertText(token["data"]) - - def processEOF(self): - self.parser.parseError("expected-named-closing-tag-but-got-eof", - {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - self.parser.phase = self.parser.originalPhase - return True - - def startTagOther(self, token): - assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name'] - - def endTagScript(self, token): - node = self.tree.openElements.pop() - assert node.name == "script" - self.parser.phase = self.parser.originalPhase - # The rest of this method is all stuff that only happens if - # document.write works - - def endTagOther(self, token): - self.tree.openElements.pop() - self.parser.phase = self.parser.originalPhase - - class InTablePhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-table - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("caption", self.startTagCaption), - ("colgroup", self.startTagColgroup), - ("col", self.startTagCol), - (("tbody", "tfoot", "thead"), self.startTagRowGroup), - (("td", "th", "tr"), self.startTagImplyTbody), - ("table", self.startTagTable), - (("style", "script"), self.startTagStyleScript), - ("input", self.startTagInput), - ("form", self.startTagForm) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("table", self.endTagTable), - (("body", "caption", "col", "colgroup", "html", "tbody", "td", - "tfoot", "th", "thead", "tr"), self.endTagIgnore) - ]) - self.endTagHandler.default = self.endTagOther - - # helper methods - def clearStackToTableContext(self): - # "clear the stack back to a table context" - while self.tree.openElements[-1].name not in ("table", "html"): - # self.parser.parseError("unexpected-implied-end-tag-in-table", - # {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - # When the current node is <html> it's an innerHTML case - - # processing methods - def processEOF(self): - if self.tree.openElements[-1].name != "html": - self.parser.parseError("eof-in-table") - else: - assert self.parser.innerHTML - # Stop parsing - - def processSpaceCharacters(self, token): - originalPhase = self.parser.phase - self.parser.phase = self.parser.phases["inTableText"] - self.parser.phase.originalPhase = originalPhase - self.parser.phase.processSpaceCharacters(token) - - def processCharacters(self, token): - originalPhase = self.parser.phase - self.parser.phase = self.parser.phases["inTableText"] - self.parser.phase.originalPhase = originalPhase - self.parser.phase.processCharacters(token) - - def insertText(self, token): - # If we get here there must be at least one non-whitespace character - # Do the table magic! - self.tree.insertFromTable = True - self.parser.phases["inBody"].processCharacters(token) - self.tree.insertFromTable = False - - def startTagCaption(self, token): - self.clearStackToTableContext() - self.tree.activeFormattingElements.append(Marker) - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inCaption"] - - def startTagColgroup(self, token): - self.clearStackToTableContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inColumnGroup"] - - def startTagCol(self, token): - self.startTagColgroup(impliedTagToken("colgroup", "StartTag")) - return token - - def startTagRowGroup(self, token): - self.clearStackToTableContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inTableBody"] - - def startTagImplyTbody(self, token): - self.startTagRowGroup(impliedTagToken("tbody", "StartTag")) - return token - - def startTagTable(self, token): - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "table", "endName": "table"}) - self.parser.phase.processEndTag(impliedTagToken("table")) - if not self.parser.innerHTML: - return token - - def startTagStyleScript(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagInput(self, token): - if ("type" in token["data"] and - token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): - self.parser.parseError("unexpected-hidden-input-in-table") - self.tree.insertElement(token) - # XXX associate with form - self.tree.openElements.pop() - else: - self.startTagOther(token) - - def startTagForm(self, token): - self.parser.parseError("unexpected-form-in-table") - if self.tree.formPointer is None: - self.tree.insertElement(token) - self.tree.formPointer = self.tree.openElements[-1] - self.tree.openElements.pop() - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]}) - # Do the table magic! - self.tree.insertFromTable = True - self.parser.phases["inBody"].processStartTag(token) - self.tree.insertFromTable = False - - def endTagTable(self, token): - if self.tree.elementInScope("table", variant="table"): - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != "table": - self.parser.parseError("end-tag-too-early-named", - {"gotName": "table", - "expectedName": self.tree.openElements[-1].name}) - while self.tree.openElements[-1].name != "table": - self.tree.openElements.pop() - self.tree.openElements.pop() - self.parser.resetInsertionMode() - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]}) - # Do the table magic! - self.tree.insertFromTable = True - self.parser.phases["inBody"].processEndTag(token) - self.tree.insertFromTable = False - - class InTableTextPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - self.originalPhase = None - self.characterTokens = [] - - def flushCharacters(self): - data = "".join([item["data"] for item in self.characterTokens]) - if any([item not in spaceCharacters for item in data]): - token = {"type": tokenTypes["Characters"], "data": data} - self.parser.phases["inTable"].insertText(token) - elif data: - self.tree.insertText(data) - self.characterTokens = [] - - def processComment(self, token): - self.flushCharacters() - self.parser.phase = self.originalPhase - return token - - def processEOF(self): - self.flushCharacters() - self.parser.phase = self.originalPhase - return True - - def processCharacters(self, token): - if token["data"] == "\u0000": - return - self.characterTokens.append(token) - - def processSpaceCharacters(self, token): - # pretty sure we should never reach here - self.characterTokens.append(token) - # assert False - - def processStartTag(self, token): - self.flushCharacters() - self.parser.phase = self.originalPhase - return token - - def processEndTag(self, token): - self.flushCharacters() - self.parser.phase = self.originalPhase - return token - - class InCaptionPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-caption - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", - "thead", "tr"), self.startTagTableElement) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("caption", self.endTagCaption), - ("table", self.endTagTable), - (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", - "thead", "tr"), self.endTagIgnore) - ]) - self.endTagHandler.default = self.endTagOther - - def ignoreEndTagCaption(self): - return not self.tree.elementInScope("caption", variant="table") - - def processEOF(self): - self.parser.phases["inBody"].processEOF() - - def processCharacters(self, token): - return self.parser.phases["inBody"].processCharacters(token) - - def startTagTableElement(self, token): - self.parser.parseError() - # XXX Have to duplicate logic here to find out if the tag is ignored - ignoreEndTag = self.ignoreEndTagCaption() - self.parser.phase.processEndTag(impliedTagToken("caption")) - if not ignoreEndTag: - return token - - def startTagOther(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def endTagCaption(self, token): - if not self.ignoreEndTagCaption(): - # AT this code is quite similar to endTagTable in "InTable" - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != "caption": - self.parser.parseError("expected-one-end-tag-but-got-another", - {"gotName": "caption", - "expectedName": self.tree.openElements[-1].name}) - while self.tree.openElements[-1].name != "caption": - self.tree.openElements.pop() - self.tree.openElements.pop() - self.tree.clearActiveFormattingElements() - self.parser.phase = self.parser.phases["inTable"] - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagTable(self, token): - self.parser.parseError() - ignoreEndTag = self.ignoreEndTagCaption() - self.parser.phase.processEndTag(impliedTagToken("caption")) - if not ignoreEndTag: - return token - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagOther(self, token): - return self.parser.phases["inBody"].processEndTag(token) - - class InColumnGroupPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-column - - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("col", self.startTagCol) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("colgroup", self.endTagColgroup), - ("col", self.endTagCol) - ]) - self.endTagHandler.default = self.endTagOther - - def ignoreEndTagColgroup(self): - return self.tree.openElements[-1].name == "html" - - def processEOF(self): - if self.tree.openElements[-1].name == "html": - assert self.parser.innerHTML - return - else: - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return True - - def processCharacters(self, token): - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return token - - def startTagCol(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagOther(self, token): - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return token - - def endTagColgroup(self, token): - if self.ignoreEndTagColgroup(): - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - else: - self.tree.openElements.pop() - self.parser.phase = self.parser.phases["inTable"] - - def endTagCol(self, token): - self.parser.parseError("no-end-tag", {"name": "col"}) - - def endTagOther(self, token): - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return token - - class InTableBodyPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("tr", self.startTagTr), - (("td", "th"), self.startTagTableCell), - (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), - self.startTagTableOther) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), - ("table", self.endTagTable), - (("body", "caption", "col", "colgroup", "html", "td", "th", - "tr"), self.endTagIgnore) - ]) - self.endTagHandler.default = self.endTagOther - - # helper methods - def clearStackToTableBodyContext(self): - while self.tree.openElements[-1].name not in ("tbody", "tfoot", - "thead", "html"): - # self.parser.parseError("unexpected-implied-end-tag-in-table", - # {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - if self.tree.openElements[-1].name == "html": - assert self.parser.innerHTML - - # the rest - def processEOF(self): - self.parser.phases["inTable"].processEOF() - - def processSpaceCharacters(self, token): - return self.parser.phases["inTable"].processSpaceCharacters(token) - - def processCharacters(self, token): - return self.parser.phases["inTable"].processCharacters(token) - - def startTagTr(self, token): - self.clearStackToTableBodyContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inRow"] - - def startTagTableCell(self, token): - self.parser.parseError("unexpected-cell-in-table-body", - {"name": token["name"]}) - self.startTagTr(impliedTagToken("tr", "StartTag")) - return token - - def startTagTableOther(self, token): - # XXX AT Any ideas on how to share this with endTagTable? - if (self.tree.elementInScope("tbody", variant="table") or - self.tree.elementInScope("thead", variant="table") or - self.tree.elementInScope("tfoot", variant="table")): - self.clearStackToTableBodyContext() - self.endTagTableRowGroup( - impliedTagToken(self.tree.openElements[-1].name)) - return token - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def startTagOther(self, token): - return self.parser.phases["inTable"].processStartTag(token) - - def endTagTableRowGroup(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.clearStackToTableBodyContext() - self.tree.openElements.pop() - self.parser.phase = self.parser.phases["inTable"] - else: - self.parser.parseError("unexpected-end-tag-in-table-body", - {"name": token["name"]}) - - def endTagTable(self, token): - if (self.tree.elementInScope("tbody", variant="table") or - self.tree.elementInScope("thead", variant="table") or - self.tree.elementInScope("tfoot", variant="table")): - self.clearStackToTableBodyContext() - self.endTagTableRowGroup( - impliedTagToken(self.tree.openElements[-1].name)) - return token - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag-in-table-body", - {"name": token["name"]}) - - def endTagOther(self, token): - return self.parser.phases["inTable"].processEndTag(token) - - class InRowPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-row - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - (("td", "th"), self.startTagTableCell), - (("caption", "col", "colgroup", "tbody", "tfoot", "thead", - "tr"), self.startTagTableOther) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("tr", self.endTagTr), - ("table", self.endTagTable), - (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), - (("body", "caption", "col", "colgroup", "html", "td", "th"), - self.endTagIgnore) - ]) - self.endTagHandler.default = self.endTagOther - - # helper methods (XXX unify this with other table helper methods) - def clearStackToTableRowContext(self): - while self.tree.openElements[-1].name not in ("tr", "html"): - self.parser.parseError("unexpected-implied-end-tag-in-table-row", - {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - - def ignoreEndTagTr(self): - return not self.tree.elementInScope("tr", variant="table") - - # the rest - def processEOF(self): - self.parser.phases["inTable"].processEOF() - - def processSpaceCharacters(self, token): - return self.parser.phases["inTable"].processSpaceCharacters(token) - - def processCharacters(self, token): - return self.parser.phases["inTable"].processCharacters(token) - - def startTagTableCell(self, token): - self.clearStackToTableRowContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inCell"] - self.tree.activeFormattingElements.append(Marker) - - def startTagTableOther(self, token): - ignoreEndTag = self.ignoreEndTagTr() - self.endTagTr(impliedTagToken("tr")) - # XXX how are we sure it's always ignored in the innerHTML case? - if not ignoreEndTag: - return token - - def startTagOther(self, token): - return self.parser.phases["inTable"].processStartTag(token) - - def endTagTr(self, token): - if not self.ignoreEndTagTr(): - self.clearStackToTableRowContext() - self.tree.openElements.pop() - self.parser.phase = self.parser.phases["inTableBody"] - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagTable(self, token): - ignoreEndTag = self.ignoreEndTagTr() - self.endTagTr(impliedTagToken("tr")) - # Reprocess the current tag if the tr end tag was not ignored - # XXX how are we sure it's always ignored in the innerHTML case? - if not ignoreEndTag: - return token - - def endTagTableRowGroup(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.endTagTr(impliedTagToken("tr")) - return token - else: - self.parser.parseError() - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag-in-table-row", - {"name": token["name"]}) - - def endTagOther(self, token): - return self.parser.phases["inTable"].processEndTag(token) - - class InCellPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-cell - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", - "thead", "tr"), self.startTagTableOther) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - (("td", "th"), self.endTagTableCell), - (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore), - (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply) - ]) - self.endTagHandler.default = self.endTagOther - - # helper - def closeCell(self): - if self.tree.elementInScope("td", variant="table"): - self.endTagTableCell(impliedTagToken("td")) - elif self.tree.elementInScope("th", variant="table"): - self.endTagTableCell(impliedTagToken("th")) - - # the rest - def processEOF(self): - self.parser.phases["inBody"].processEOF() - - def processCharacters(self, token): - return self.parser.phases["inBody"].processCharacters(token) - - def startTagTableOther(self, token): - if (self.tree.elementInScope("td", variant="table") or - self.tree.elementInScope("th", variant="table")): - self.closeCell() - return token - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def startTagOther(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def endTagTableCell(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.tree.generateImpliedEndTags(token["name"]) - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("unexpected-cell-end-tag", - {"name": token["name"]}) - while True: - node = self.tree.openElements.pop() - if node.name == token["name"]: - break - else: - self.tree.openElements.pop() - self.tree.clearActiveFormattingElements() - self.parser.phase = self.parser.phases["inRow"] - else: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagImply(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.closeCell() - return token - else: - # sometimes innerHTML case - self.parser.parseError() - - def endTagOther(self, token): - return self.parser.phases["inBody"].processEndTag(token) - - class InSelectPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("option", self.startTagOption), - ("optgroup", self.startTagOptgroup), - ("select", self.startTagSelect), - (("input", "keygen", "textarea"), self.startTagInput), - ("script", self.startTagScript) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("option", self.endTagOption), - ("optgroup", self.endTagOptgroup), - ("select", self.endTagSelect) - ]) - self.endTagHandler.default = self.endTagOther - - # http://www.whatwg.org/specs/web-apps/current-work/#in-select - def processEOF(self): - if self.tree.openElements[-1].name != "html": - self.parser.parseError("eof-in-select") - else: - assert self.parser.innerHTML - - def processCharacters(self, token): - if token["data"] == "\u0000": - return - self.tree.insertText(token["data"]) - - def startTagOption(self, token): - # We need to imply </option> if <option> is the current node. - if self.tree.openElements[-1].name == "option": - self.tree.openElements.pop() - self.tree.insertElement(token) - - def startTagOptgroup(self, token): - if self.tree.openElements[-1].name == "option": - self.tree.openElements.pop() - if self.tree.openElements[-1].name == "optgroup": - self.tree.openElements.pop() - self.tree.insertElement(token) - - def startTagSelect(self, token): - self.parser.parseError("unexpected-select-in-select") - self.endTagSelect(impliedTagToken("select")) - - def startTagInput(self, token): - self.parser.parseError("unexpected-input-in-select") - if self.tree.elementInScope("select", variant="select"): - self.endTagSelect(impliedTagToken("select")) - return token - else: - assert self.parser.innerHTML - - def startTagScript(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-in-select", - {"name": token["name"]}) - - def endTagOption(self, token): - if self.tree.openElements[-1].name == "option": - self.tree.openElements.pop() - else: - self.parser.parseError("unexpected-end-tag-in-select", - {"name": "option"}) - - def endTagOptgroup(self, token): - # </optgroup> implicitly closes <option> - if (self.tree.openElements[-1].name == "option" and - self.tree.openElements[-2].name == "optgroup"): - self.tree.openElements.pop() - # It also closes </optgroup> - if self.tree.openElements[-1].name == "optgroup": - self.tree.openElements.pop() - # But nothing else - else: - self.parser.parseError("unexpected-end-tag-in-select", - {"name": "optgroup"}) - - def endTagSelect(self, token): - if self.tree.elementInScope("select", variant="select"): - node = self.tree.openElements.pop() - while node.name != "select": - node = self.tree.openElements.pop() - self.parser.resetInsertionMode() - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-in-select", - {"name": token["name"]}) - - class InSelectInTablePhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), - self.startTagTable) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), - self.endTagTable) - ]) - self.endTagHandler.default = self.endTagOther - - def processEOF(self): - self.parser.phases["inSelect"].processEOF() - - def processCharacters(self, token): - return self.parser.phases["inSelect"].processCharacters(token) - - def startTagTable(self, token): - self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]}) - self.endTagOther(impliedTagToken("select")) - return token - - def startTagOther(self, token): - return self.parser.phases["inSelect"].processStartTag(token) - - def endTagTable(self, token): - self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]}) - if self.tree.elementInScope(token["name"], variant="table"): - self.endTagOther(impliedTagToken("select")) - return token - - def endTagOther(self, token): - return self.parser.phases["inSelect"].processEndTag(token) - - class InForeignContentPhase(Phase): - breakoutElements = frozenset(["b", "big", "blockquote", "body", "br", - "center", "code", "dd", "div", "dl", "dt", - "em", "embed", "h1", "h2", "h3", - "h4", "h5", "h6", "head", "hr", "i", "img", - "li", "listing", "menu", "meta", "nobr", - "ol", "p", "pre", "ruby", "s", "small", - "span", "strong", "strike", "sub", "sup", - "table", "tt", "u", "ul", "var"]) - - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - def adjustSVGTagNames(self, token): - replacements = {"altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath"} - - if token["name"] in replacements: - token["name"] = replacements[token["name"]] - - def processCharacters(self, token): - if token["data"] == "\u0000": - token["data"] = "\uFFFD" - elif (self.parser.framesetOK and - any(char not in spaceCharacters for char in token["data"])): - self.parser.framesetOK = False - Phase.processCharacters(self, token) - - def processStartTag(self, token): - currentNode = self.tree.openElements[-1] - if (token["name"] in self.breakoutElements or - (token["name"] == "font" and - set(token["data"].keys()) & set(["color", "face", "size"]))): - self.parser.parseError("unexpected-html-element-in-foreign-content", - {"name": token["name"]}) - while (self.tree.openElements[-1].namespace != - self.tree.defaultNamespace and - not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and - not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])): - self.tree.openElements.pop() - return token - - else: - if currentNode.namespace == namespaces["mathml"]: - self.parser.adjustMathMLAttributes(token) - elif currentNode.namespace == namespaces["svg"]: - self.adjustSVGTagNames(token) - self.parser.adjustSVGAttributes(token) - self.parser.adjustForeignAttributes(token) - token["namespace"] = currentNode.namespace - self.tree.insertElement(token) - if token["selfClosing"]: - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def processEndTag(self, token): - nodeIndex = len(self.tree.openElements) - 1 - node = self.tree.openElements[-1] - if node.name.translate(asciiUpper2Lower) != token["name"]: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - while True: - if node.name.translate(asciiUpper2Lower) == token["name"]: - # XXX this isn't in the spec but it seems necessary - if self.parser.phase == self.parser.phases["inTableText"]: - self.parser.phase.flushCharacters() - self.parser.phase = self.parser.phase.originalPhase - while self.tree.openElements.pop() != node: - assert self.tree.openElements - new_token = None - break - nodeIndex -= 1 - - node = self.tree.openElements[nodeIndex] - if node.namespace != self.tree.defaultNamespace: - continue - else: - new_token = self.parser.phase.processEndTag(token) - break - return new_token - - class AfterBodyPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)]) - self.endTagHandler.default = self.endTagOther - - def processEOF(self): - # Stop parsing - pass - - def processComment(self, token): - # This is needed because data is to be appended to the <html> element - # here and not to whatever is currently open. - self.tree.insertComment(token, self.tree.openElements[0]) - - def processCharacters(self, token): - self.parser.parseError("unexpected-char-after-body") - self.parser.phase = self.parser.phases["inBody"] - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-after-body", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - def endTagHtml(self, name): - if self.parser.innerHTML: - self.parser.parseError("unexpected-end-tag-after-body-innerhtml") - else: - self.parser.phase = self.parser.phases["afterAfterBody"] - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-after-body", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - class InFramesetPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("frameset", self.startTagFrameset), - ("frame", self.startTagFrame), - ("noframes", self.startTagNoframes) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("frameset", self.endTagFrameset) - ]) - self.endTagHandler.default = self.endTagOther - - def processEOF(self): - if self.tree.openElements[-1].name != "html": - self.parser.parseError("eof-in-frameset") - else: - assert self.parser.innerHTML - - def processCharacters(self, token): - self.parser.parseError("unexpected-char-in-frameset") - - def startTagFrameset(self, token): - self.tree.insertElement(token) - - def startTagFrame(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - - def startTagNoframes(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-in-frameset", - {"name": token["name"]}) - - def endTagFrameset(self, token): - if self.tree.openElements[-1].name == "html": - # innerHTML case - self.parser.parseError("unexpected-frameset-in-frameset-innerhtml") - else: - self.tree.openElements.pop() - if (not self.parser.innerHTML and - self.tree.openElements[-1].name != "frameset"): - # If we're not in innerHTML mode and the current node is not a - # "frameset" element (anymore) then switch. - self.parser.phase = self.parser.phases["afterFrameset"] - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-in-frameset", - {"name": token["name"]}) - - class AfterFramesetPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#after3 - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("noframes", self.startTagNoframes) - ]) - self.startTagHandler.default = self.startTagOther - - self.endTagHandler = _utils.MethodDispatcher([ - ("html", self.endTagHtml) - ]) - self.endTagHandler.default = self.endTagOther - - def processEOF(self): - # Stop parsing - pass - - def processCharacters(self, token): - self.parser.parseError("unexpected-char-after-frameset") - - def startTagNoframes(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-after-frameset", - {"name": token["name"]}) - - def endTagHtml(self, token): - self.parser.phase = self.parser.phases["afterAfterFrameset"] - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-after-frameset", - {"name": token["name"]}) - - class AfterAfterBodyPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml) - ]) - self.startTagHandler.default = self.startTagOther - - def processEOF(self): - pass - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processSpaceCharacters(self, token): - return self.parser.phases["inBody"].processSpaceCharacters(token) - - def processCharacters(self, token): - self.parser.parseError("expected-eof-but-got-char") - self.parser.phase = self.parser.phases["inBody"] - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("expected-eof-but-got-start-tag", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - def processEndTag(self, token): - self.parser.parseError("expected-eof-but-got-end-tag", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - class AfterAfterFramesetPhase(Phase): - def __init__(self, parser, tree): - Phase.__init__(self, parser, tree) - - self.startTagHandler = _utils.MethodDispatcher([ - ("html", self.startTagHtml), - ("noframes", self.startTagNoFrames) - ]) - self.startTagHandler.default = self.startTagOther - - def processEOF(self): - pass - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processSpaceCharacters(self, token): - return self.parser.phases["inBody"].processSpaceCharacters(token) - - def processCharacters(self, token): - self.parser.parseError("expected-eof-but-got-char") - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagNoFrames(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("expected-eof-but-got-start-tag", - {"name": token["name"]}) - - def processEndTag(self, token): - self.parser.parseError("expected-eof-but-got-end-tag", - {"name": token["name"]}) - # pylint:enable=unused-argument - - return { - "initial": InitialPhase, - "beforeHtml": BeforeHtmlPhase, - "beforeHead": BeforeHeadPhase, - "inHead": InHeadPhase, - "inHeadNoscript": InHeadNoscriptPhase, - "afterHead": AfterHeadPhase, - "inBody": InBodyPhase, - "text": TextPhase, - "inTable": InTablePhase, - "inTableText": InTableTextPhase, - "inCaption": InCaptionPhase, - "inColumnGroup": InColumnGroupPhase, - "inTableBody": InTableBodyPhase, - "inRow": InRowPhase, - "inCell": InCellPhase, - "inSelect": InSelectPhase, - "inSelectInTable": InSelectInTablePhase, - "inForeignContent": InForeignContentPhase, - "afterBody": AfterBodyPhase, - "inFrameset": InFramesetPhase, - "afterFrameset": AfterFramesetPhase, - "afterAfterBody": AfterAfterBodyPhase, - "afterAfterFrameset": AfterAfterFramesetPhase, - # XXX after after frameset - } - - -def adjust_attributes(token, replacements): - needs_adjustment = viewkeys(token['data']) & viewkeys(replacements) - if needs_adjustment: - token['data'] = OrderedDict((replacements.get(k, k), v) - for k, v in token['data'].items()) - - -def impliedTagToken(name, type="EndTag", attributes=None, - selfClosing=False): - if attributes is None: - attributes = {} - return {"type": tokenTypes[type], "name": name, "data": attributes, - "selfClosing": selfClosing} - - -class ParseError(Exception): - """Error in parsed document""" - pass diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/serializer.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/serializer.py deleted file mode 100644 index 53f4d44..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/serializer.py +++ /dev/null @@ -1,409 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -import re - -from codecs import register_error, xmlcharrefreplace_errors - -from .constants import voidElements, booleanAttributes, spaceCharacters -from .constants import rcdataElements, entities, xmlEntities -from . import treewalkers, _utils -from xml.sax.saxutils import escape - -_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`" -_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]") -_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars + - "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" - "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" - "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" - "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" - "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" - "\u3000]") - - -_encode_entity_map = {} -_is_ucs4 = len("\U0010FFFF") == 1 -for k, v in list(entities.items()): - # skip multi-character entities - if ((_is_ucs4 and len(v) > 1) or - (not _is_ucs4 and len(v) > 2)): - continue - if v != "&": - if len(v) == 2: - v = _utils.surrogatePairToCodepoint(v) - else: - v = ord(v) - if v not in _encode_entity_map or k.islower(): - # prefer < over < and similarly for &, >, etc. - _encode_entity_map[v] = k - - -def htmlentityreplace_errors(exc): - if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): - res = [] - codepoints = [] - skip = False - for i, c in enumerate(exc.object[exc.start:exc.end]): - if skip: - skip = False - continue - index = i + exc.start - if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): - codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2]) - skip = True - else: - codepoint = ord(c) - codepoints.append(codepoint) - for cp in codepoints: - e = _encode_entity_map.get(cp) - if e: - res.append("&") - res.append(e) - if not e.endswith(";"): - res.append(";") - else: - res.append("&#x%s;" % (hex(cp)[2:])) - return ("".join(res), exc.end) - else: - return xmlcharrefreplace_errors(exc) - - -register_error("htmlentityreplace", htmlentityreplace_errors) - - -def serialize(input, tree="etree", encoding=None, **serializer_opts): - """Serializes the input token stream using the specified treewalker - - :arg input: the token stream to serialize - - :arg tree: the treewalker to use - - :arg encoding: the encoding to use - - :arg serializer_opts: any options to pass to the - :py:class:`html5lib.serializer.HTMLSerializer` that gets created - - :returns: the tree serialized as a string - - Example: - - >>> from html5lib.html5parser import parse - >>> from html5lib.serializer import serialize - >>> token_stream = parse('<html><body><p>Hi!</p></body></html>') - >>> serialize(token_stream, omit_optional_tags=False) - '<html><head></head><body><p>Hi!</p></body></html>' - - """ - # XXX: Should we cache this? - walker = treewalkers.getTreeWalker(tree) - s = HTMLSerializer(**serializer_opts) - return s.render(walker(input), encoding) - - -class HTMLSerializer(object): - - # attribute quoting options - quote_attr_values = "legacy" # be secure by default - quote_char = '"' - use_best_quote_char = True - - # tag syntax options - omit_optional_tags = True - minimize_boolean_attributes = True - use_trailing_solidus = False - space_before_trailing_solidus = True - - # escaping options - escape_lt_in_attrs = False - escape_rcdata = False - resolve_entities = True - - # miscellaneous options - alphabetical_attributes = False - inject_meta_charset = True - strip_whitespace = False - sanitize = False - - options = ("quote_attr_values", "quote_char", "use_best_quote_char", - "omit_optional_tags", "minimize_boolean_attributes", - "use_trailing_solidus", "space_before_trailing_solidus", - "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", - "alphabetical_attributes", "inject_meta_charset", - "strip_whitespace", "sanitize") - - def __init__(self, **kwargs): - """Initialize HTMLSerializer - - :arg inject_meta_charset: Whether or not to inject the meta charset. - - Defaults to ``True``. - - :arg quote_attr_values: Whether to quote attribute values that don't - require quoting per legacy browser behavior (``"legacy"``), when - required by the standard (``"spec"``), or always (``"always"``). - - Defaults to ``"legacy"``. - - :arg quote_char: Use given quote character for attribute quoting. - - Defaults to ``"`` which will use double quotes unless attribute - value contains a double quote, in which case single quotes are - used. - - :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute - values. - - Defaults to ``False``. - - :arg escape_rcdata: Whether to escape characters that need to be - escaped within normal elements within rcdata elements such as - style. - - Defaults to ``False``. - - :arg resolve_entities: Whether to resolve named character entities that - appear in the source tree. The XML predefined entities < > - & " ' are unaffected by this setting. - - Defaults to ``True``. - - :arg strip_whitespace: Whether to remove semantically meaningless - whitespace. (This compresses all whitespace to a single space - except within ``pre``.) - - Defaults to ``False``. - - :arg minimize_boolean_attributes: Shortens boolean attributes to give - just the attribute value, for example:: - - <input disabled="disabled"> - - becomes:: - - <input disabled> - - Defaults to ``True``. - - :arg use_trailing_solidus: Includes a close-tag slash at the end of the - start tag of void elements (empty elements whose end tag is - forbidden). E.g. ``<hr/>``. - - Defaults to ``False``. - - :arg space_before_trailing_solidus: Places a space immediately before - the closing slash in a tag using a trailing solidus. E.g. - ``<hr />``. Requires ``use_trailing_solidus=True``. - - Defaults to ``True``. - - :arg sanitize: Strip all unsafe or unknown constructs from output. - See :py:class:`html5lib.filters.sanitizer.Filter`. - - Defaults to ``False``. - - :arg omit_optional_tags: Omit start/end tags that are optional. - - Defaults to ``True``. - - :arg alphabetical_attributes: Reorder attributes to be in alphabetical order. - - Defaults to ``False``. - - """ - unexpected_args = frozenset(kwargs) - frozenset(self.options) - if len(unexpected_args) > 0: - raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) - if 'quote_char' in kwargs: - self.use_best_quote_char = False - for attr in self.options: - setattr(self, attr, kwargs.get(attr, getattr(self, attr))) - self.errors = [] - self.strict = False - - def encode(self, string): - assert(isinstance(string, text_type)) - if self.encoding: - return string.encode(self.encoding, "htmlentityreplace") - else: - return string - - def encodeStrict(self, string): - assert(isinstance(string, text_type)) - if self.encoding: - return string.encode(self.encoding, "strict") - else: - return string - - def serialize(self, treewalker, encoding=None): - # pylint:disable=too-many-nested-blocks - self.encoding = encoding - in_cdata = False - self.errors = [] - - if encoding and self.inject_meta_charset: - from .filters.inject_meta_charset import Filter - treewalker = Filter(treewalker, encoding) - # Alphabetical attributes is here under the assumption that none of - # the later filters add or change order of attributes; it needs to be - # before the sanitizer so escaped elements come out correctly - if self.alphabetical_attributes: - from .filters.alphabeticalattributes import Filter - treewalker = Filter(treewalker) - # WhitespaceFilter should be used before OptionalTagFilter - # for maximum efficiently of this latter filter - if self.strip_whitespace: - from .filters.whitespace import Filter - treewalker = Filter(treewalker) - if self.sanitize: - from .filters.sanitizer import Filter - treewalker = Filter(treewalker) - if self.omit_optional_tags: - from .filters.optionaltags import Filter - treewalker = Filter(treewalker) - - for token in treewalker: - type = token["type"] - if type == "Doctype": - doctype = "<!DOCTYPE %s" % token["name"] - - if token["publicId"]: - doctype += ' PUBLIC "%s"' % token["publicId"] - elif token["systemId"]: - doctype += " SYSTEM" - if token["systemId"]: - if token["systemId"].find('"') >= 0: - if token["systemId"].find("'") >= 0: - self.serializeError("System identifer contains both single and double quote characters") - quote_char = "'" - else: - quote_char = '"' - doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) - - doctype += ">" - yield self.encodeStrict(doctype) - - elif type in ("Characters", "SpaceCharacters"): - if type == "SpaceCharacters" or in_cdata: - if in_cdata and token["data"].find("</") >= 0: - self.serializeError("Unexpected </ in CDATA") - yield self.encode(token["data"]) - else: - yield self.encode(escape(token["data"])) - - elif type in ("StartTag", "EmptyTag"): - name = token["name"] - yield self.encodeStrict("<%s" % name) - if name in rcdataElements and not self.escape_rcdata: - in_cdata = True - elif in_cdata: - self.serializeError("Unexpected child element of a CDATA element") - for (_, attr_name), attr_value in token["data"].items(): - # TODO: Add namespace support here - k = attr_name - v = attr_value - yield self.encodeStrict(' ') - - yield self.encodeStrict(k) - if not self.minimize_boolean_attributes or \ - (k not in booleanAttributes.get(name, tuple()) and - k not in booleanAttributes.get("", tuple())): - yield self.encodeStrict("=") - if self.quote_attr_values == "always" or len(v) == 0: - quote_attr = True - elif self.quote_attr_values == "spec": - quote_attr = _quoteAttributeSpec.search(v) is not None - elif self.quote_attr_values == "legacy": - quote_attr = _quoteAttributeLegacy.search(v) is not None - else: - raise ValueError("quote_attr_values must be one of: " - "'always', 'spec', or 'legacy'") - v = v.replace("&", "&") - if self.escape_lt_in_attrs: - v = v.replace("<", "<") - if quote_attr: - quote_char = self.quote_char - if self.use_best_quote_char: - if "'" in v and '"' not in v: - quote_char = '"' - elif '"' in v and "'" not in v: - quote_char = "'" - if quote_char == "'": - v = v.replace("'", "'") - else: - v = v.replace('"', """) - yield self.encodeStrict(quote_char) - yield self.encode(v) - yield self.encodeStrict(quote_char) - else: - yield self.encode(v) - if name in voidElements and self.use_trailing_solidus: - if self.space_before_trailing_solidus: - yield self.encodeStrict(" /") - else: - yield self.encodeStrict("/") - yield self.encode(">") - - elif type == "EndTag": - name = token["name"] - if name in rcdataElements: - in_cdata = False - elif in_cdata: - self.serializeError("Unexpected child element of a CDATA element") - yield self.encodeStrict("</%s>" % name) - - elif type == "Comment": - data = token["data"] - if data.find("--") >= 0: - self.serializeError("Comment contains --") - yield self.encodeStrict("<!--%s-->" % token["data"]) - - elif type == "Entity": - name = token["name"] - key = name + ";" - if key not in entities: - self.serializeError("Entity %s not recognized" % name) - if self.resolve_entities and key not in xmlEntities: - data = entities[key] - else: - data = "&%s;" % name - yield self.encodeStrict(data) - - else: - self.serializeError(token["data"]) - - def render(self, treewalker, encoding=None): - """Serializes the stream from the treewalker into a string - - :arg treewalker: the treewalker to serialize - - :arg encoding: the string encoding to use - - :returns: the serialized tree - - Example: - - >>> from html5lib import parse, getTreeWalker - >>> from html5lib.serializer import HTMLSerializer - >>> token_stream = parse('<html><body>Hi!</body></html>') - >>> walker = getTreeWalker('etree') - >>> serializer = HTMLSerializer(omit_optional_tags=False) - >>> serializer.render(walker(token_stream)) - '<html><head></head><body>Hi!</body></html>' - - """ - if encoding: - return b"".join(list(self.serialize(treewalker, encoding))) - else: - return "".join(list(self.serialize(treewalker))) - - def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): - # XXX The idea is to make data mandatory. - self.errors.append(data) - if self.strict: - raise SerializeError - - -class SerializeError(Exception): - """Error in serialized tree""" - pass diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/__init__.py deleted file mode 100644 index 7ef5959..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Tree adapters let you convert from one tree structure to another - -Example: - -.. code-block:: python - - from pip._vendor import html5lib - from pip._vendor.html5lib.treeadapters import genshi - - doc = '<html><body>Hi!</body></html>' - treebuilder = html5lib.getTreeBuilder('etree') - parser = html5lib.HTMLParser(tree=treebuilder) - tree = parser.parse(doc) - TreeWalker = html5lib.getTreeWalker('etree') - - genshi_tree = genshi.to_genshi(TreeWalker(tree)) - -""" -from __future__ import absolute_import, division, unicode_literals - -from . import sax - -__all__ = ["sax"] - -try: - from . import genshi # noqa -except ImportError: - pass -else: - __all__.append("genshi") diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/genshi.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/genshi.py deleted file mode 100644 index 61d5fb6..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/genshi.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from genshi.core import QName, Attrs -from genshi.core import START, END, TEXT, COMMENT, DOCTYPE - - -def to_genshi(walker): - """Convert a tree to a genshi tree - - :arg walker: the treewalker to use to walk the tree to convert it - - :returns: generator of genshi nodes - - """ - text = [] - for token in walker: - type = token["type"] - if type in ("Characters", "SpaceCharacters"): - text.append(token["data"]) - elif text: - yield TEXT, "".join(text), (None, -1, -1) - text = [] - - if type in ("StartTag", "EmptyTag"): - if token["namespace"]: - name = "{%s}%s" % (token["namespace"], token["name"]) - else: - name = token["name"] - attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value) - for attr, value in token["data"].items()]) - yield (START, (QName(name), attrs), (None, -1, -1)) - if type == "EmptyTag": - type = "EndTag" - - if type == "EndTag": - if token["namespace"]: - name = "{%s}%s" % (token["namespace"], token["name"]) - else: - name = token["name"] - - yield END, QName(name), (None, -1, -1) - - elif type == "Comment": - yield COMMENT, token["data"], (None, -1, -1) - - elif type == "Doctype": - yield DOCTYPE, (token["name"], token["publicId"], - token["systemId"]), (None, -1, -1) - - else: - pass # FIXME: What to do? - - if text: - yield TEXT, "".join(text), (None, -1, -1) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/sax.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/sax.py deleted file mode 100644 index f4ccea5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treeadapters/sax.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.sax.xmlreader import AttributesNSImpl - -from ..constants import adjustForeignAttributes, unadjustForeignAttributes - -prefix_mapping = {} -for prefix, localName, namespace in adjustForeignAttributes.values(): - if prefix is not None: - prefix_mapping[prefix] = namespace - - -def to_sax(walker, handler): - """Call SAX-like content handler based on treewalker walker - - :arg walker: the treewalker to use to walk the tree to convert it - - :arg handler: SAX handler to use - - """ - handler.startDocument() - for prefix, namespace in prefix_mapping.items(): - handler.startPrefixMapping(prefix, namespace) - - for token in walker: - type = token["type"] - if type == "Doctype": - continue - elif type in ("StartTag", "EmptyTag"): - attrs = AttributesNSImpl(token["data"], - unadjustForeignAttributes) - handler.startElementNS((token["namespace"], token["name"]), - token["name"], - attrs) - if type == "EmptyTag": - handler.endElementNS((token["namespace"], token["name"]), - token["name"]) - elif type == "EndTag": - handler.endElementNS((token["namespace"], token["name"]), - token["name"]) - elif type in ("Characters", "SpaceCharacters"): - handler.characters(token["data"]) - elif type == "Comment": - pass - else: - assert False, "Unknown token type" - - for prefix, namespace in prefix_mapping.items(): - handler.endPrefixMapping(prefix) - handler.endDocument() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/__init__.py deleted file mode 100644 index d44447e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -"""A collection of modules for building different kinds of trees from HTML -documents. - -To create a treebuilder for a new type of tree, you need to do -implement several things: - -1. A set of classes for various types of elements: Document, Doctype, Comment, - Element. These must implement the interface of ``base.treebuilders.Node`` - (although comment nodes have a different signature for their constructor, - see ``treebuilders.etree.Comment``) Textual content may also be implemented - as another node type, or not, as your tree implementation requires. - -2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits - from ``treebuilders.base.TreeBuilder``. This has 4 required attributes: - - * ``documentClass`` - the class to use for the bottommost node of a document - * ``elementClass`` - the class to use for HTML Elements - * ``commentClass`` - the class to use for comments - * ``doctypeClass`` - the class to use for doctypes - - It also has one required method: - - * ``getDocument`` - Returns the root node of the complete document tree - -3. If you wish to run the unit tests, you must also create a ``testSerializer`` - method on your treebuilder which accepts a node and returns a string - containing Node and its children serialized according to the format used in - the unittests - -""" - -from __future__ import absolute_import, division, unicode_literals - -from .._utils import default_etree - -treeBuilderCache = {} - - -def getTreeBuilder(treeType, implementation=None, **kwargs): - """Get a TreeBuilder class for various types of trees with built-in support - - :arg treeType: the name of the tree type required (case-insensitive). Supported - values are: - - * "dom" - A generic builder for DOM implementations, defaulting to a - xml.dom.minidom based implementation. - * "etree" - A generic builder for tree implementations exposing an - ElementTree-like interface, defaulting to xml.etree.cElementTree if - available and xml.etree.ElementTree if not. - * "lxml" - A etree-based builder for lxml.etree, handling limitations - of lxml's implementation. - - :arg implementation: (Currently applies to the "etree" and "dom" tree - types). A module implementing the tree type e.g. xml.etree.ElementTree - or xml.etree.cElementTree. - - :arg kwargs: Any additional options to pass to the TreeBuilder when - creating it. - - Example: - - >>> from html5lib.treebuilders import getTreeBuilder - >>> builder = getTreeBuilder('etree') - - """ - - treeType = treeType.lower() - if treeType not in treeBuilderCache: - if treeType == "dom": - from . import dom - # Come up with a sane default (pref. from the stdlib) - if implementation is None: - from xml.dom import minidom - implementation = minidom - # NEVER cache here, caching is done in the dom submodule - return dom.getDomModule(implementation, **kwargs).TreeBuilder - elif treeType == "lxml": - from . import etree_lxml - treeBuilderCache[treeType] = etree_lxml.TreeBuilder - elif treeType == "etree": - from . import etree - if implementation is None: - implementation = default_etree - # NEVER cache here, caching is done in the etree submodule - return etree.getETreeModule(implementation, **kwargs).TreeBuilder - else: - raise ValueError("""Unrecognised treebuilder "%s" """ % treeType) - return treeBuilderCache.get(treeType) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/base.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/base.py deleted file mode 100644 index 73973db..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/base.py +++ /dev/null @@ -1,417 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -from ..constants import scopingElements, tableInsertModeElements, namespaces - -# The scope markers are inserted when entering object elements, -# marquees, table cells, and table captions, and are used to prevent formatting -# from "leaking" into tables, object elements, and marquees. -Marker = None - -listElementsMap = { - None: (frozenset(scopingElements), False), - "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False), - "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"), - (namespaces["html"], "ul")])), False), - "table": (frozenset([(namespaces["html"], "html"), - (namespaces["html"], "table")]), False), - "select": (frozenset([(namespaces["html"], "optgroup"), - (namespaces["html"], "option")]), True) -} - - -class Node(object): - """Represents an item in the tree""" - def __init__(self, name): - """Creates a Node - - :arg name: The tag name associated with the node - - """ - # The tag name assocaited with the node - self.name = name - # The parent of the current node (or None for the document node) - self.parent = None - # The value of the current node (applies to text nodes and comments) - self.value = None - # A dict holding name -> value pairs for attributes of the node - self.attributes = {} - # A list of child nodes of the current node. This must include all - # elements but not necessarily other node types. - self.childNodes = [] - # A list of miscellaneous flags that can be set on the node. - self._flags = [] - - def __str__(self): - attributesStr = " ".join(["%s=\"%s\"" % (name, value) - for name, value in - self.attributes.items()]) - if attributesStr: - return "<%s %s>" % (self.name, attributesStr) - else: - return "<%s>" % (self.name) - - def __repr__(self): - return "<%s>" % (self.name) - - def appendChild(self, node): - """Insert node as a child of the current node - - :arg node: the node to insert - - """ - raise NotImplementedError - - def insertText(self, data, insertBefore=None): - """Insert data as text in the current node, positioned before the - start of node insertBefore or to the end of the node's text. - - :arg data: the data to insert - - :arg insertBefore: True if you want to insert the text before the node - and False if you want to insert it after the node - - """ - raise NotImplementedError - - def insertBefore(self, node, refNode): - """Insert node as a child of the current node, before refNode in the - list of child nodes. Raises ValueError if refNode is not a child of - the current node - - :arg node: the node to insert - - :arg refNode: the child node to insert the node before - - """ - raise NotImplementedError - - def removeChild(self, node): - """Remove node from the children of the current node - - :arg node: the child node to remove - - """ - raise NotImplementedError - - def reparentChildren(self, newParent): - """Move all the children of the current node to newParent. - This is needed so that trees that don't store text as nodes move the - text in the correct way - - :arg newParent: the node to move all this node's children to - - """ - # XXX - should this method be made more general? - for child in self.childNodes: - newParent.appendChild(child) - self.childNodes = [] - - def cloneNode(self): - """Return a shallow copy of the current node i.e. a node with the same - name and attributes but with no parent or child nodes - """ - raise NotImplementedError - - def hasContent(self): - """Return true if the node has children or text, false otherwise - """ - raise NotImplementedError - - -class ActiveFormattingElements(list): - def append(self, node): - equalCount = 0 - if node != Marker: - for element in self[::-1]: - if element == Marker: - break - if self.nodesEqual(element, node): - equalCount += 1 - if equalCount == 3: - self.remove(element) - break - list.append(self, node) - - def nodesEqual(self, node1, node2): - if not node1.nameTuple == node2.nameTuple: - return False - - if not node1.attributes == node2.attributes: - return False - - return True - - -class TreeBuilder(object): - """Base treebuilder implementation - - * documentClass - the class to use for the bottommost node of a document - * elementClass - the class to use for HTML Elements - * commentClass - the class to use for comments - * doctypeClass - the class to use for doctypes - - """ - # pylint:disable=not-callable - - # Document class - documentClass = None - - # The class to use for creating a node - elementClass = None - - # The class to use for creating comments - commentClass = None - - # The class to use for creating doctypes - doctypeClass = None - - # Fragment class - fragmentClass = None - - def __init__(self, namespaceHTMLElements): - """Create a TreeBuilder - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - """ - if namespaceHTMLElements: - self.defaultNamespace = "http://www.w3.org/1999/xhtml" - else: - self.defaultNamespace = None - self.reset() - - def reset(self): - self.openElements = [] - self.activeFormattingElements = ActiveFormattingElements() - - # XXX - rename these to headElement, formElement - self.headPointer = None - self.formPointer = None - - self.insertFromTable = False - - self.document = self.documentClass() - - def elementInScope(self, target, variant=None): - - # If we pass a node in we match that. if we pass a string - # match any node with that name - exactNode = hasattr(target, "nameTuple") - if not exactNode: - if isinstance(target, text_type): - target = (namespaces["html"], target) - assert isinstance(target, tuple) - - listElements, invert = listElementsMap[variant] - - for node in reversed(self.openElements): - if exactNode and node == target: - return True - elif not exactNode and node.nameTuple == target: - return True - elif (invert ^ (node.nameTuple in listElements)): - return False - - assert False # We should never reach this point - - def reconstructActiveFormattingElements(self): - # Within this algorithm the order of steps described in the - # specification is not quite the same as the order of steps in the - # code. It should still do the same though. - - # Step 1: stop the algorithm when there's nothing to do. - if not self.activeFormattingElements: - return - - # Step 2 and step 3: we start with the last element. So i is -1. - i = len(self.activeFormattingElements) - 1 - entry = self.activeFormattingElements[i] - if entry == Marker or entry in self.openElements: - return - - # Step 6 - while entry != Marker and entry not in self.openElements: - if i == 0: - # This will be reset to 0 below - i = -1 - break - i -= 1 - # Step 5: let entry be one earlier in the list. - entry = self.activeFormattingElements[i] - - while True: - # Step 7 - i += 1 - - # Step 8 - entry = self.activeFormattingElements[i] - clone = entry.cloneNode() # Mainly to get a new copy of the attributes - - # Step 9 - element = self.insertElement({"type": "StartTag", - "name": clone.name, - "namespace": clone.namespace, - "data": clone.attributes}) - - # Step 10 - self.activeFormattingElements[i] = element - - # Step 11 - if element == self.activeFormattingElements[-1]: - break - - def clearActiveFormattingElements(self): - entry = self.activeFormattingElements.pop() - while self.activeFormattingElements and entry != Marker: - entry = self.activeFormattingElements.pop() - - def elementInActiveFormattingElements(self, name): - """Check if an element exists between the end of the active - formatting elements and the last marker. If it does, return it, else - return false""" - - for item in self.activeFormattingElements[::-1]: - # Check for Marker first because if it's a Marker it doesn't have a - # name attribute. - if item == Marker: - break - elif item.name == name: - return item - return False - - def insertRoot(self, token): - element = self.createElement(token) - self.openElements.append(element) - self.document.appendChild(element) - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - doctype = self.doctypeClass(name, publicId, systemId) - self.document.appendChild(doctype) - - def insertComment(self, token, parent=None): - if parent is None: - parent = self.openElements[-1] - parent.appendChild(self.commentClass(token["data"])) - - def createElement(self, token): - """Create an element but don't insert it anywhere""" - name = token["name"] - namespace = token.get("namespace", self.defaultNamespace) - element = self.elementClass(name, namespace) - element.attributes = token["data"] - return element - - def _getInsertFromTable(self): - return self._insertFromTable - - def _setInsertFromTable(self, value): - """Switch the function used to insert an element from the - normal one to the misnested table one and back again""" - self._insertFromTable = value - if value: - self.insertElement = self.insertElementTable - else: - self.insertElement = self.insertElementNormal - - insertFromTable = property(_getInsertFromTable, _setInsertFromTable) - - def insertElementNormal(self, token): - name = token["name"] - assert isinstance(name, text_type), "Element %s not unicode" % name - namespace = token.get("namespace", self.defaultNamespace) - element = self.elementClass(name, namespace) - element.attributes = token["data"] - self.openElements[-1].appendChild(element) - self.openElements.append(element) - return element - - def insertElementTable(self, token): - """Create an element and insert it into the tree""" - element = self.createElement(token) - if self.openElements[-1].name not in tableInsertModeElements: - return self.insertElementNormal(token) - else: - # We should be in the InTable mode. This means we want to do - # special magic element rearranging - parent, insertBefore = self.getTableMisnestedNodePosition() - if insertBefore is None: - parent.appendChild(element) - else: - parent.insertBefore(element, insertBefore) - self.openElements.append(element) - return element - - def insertText(self, data, parent=None): - """Insert text data.""" - if parent is None: - parent = self.openElements[-1] - - if (not self.insertFromTable or (self.insertFromTable and - self.openElements[-1].name - not in tableInsertModeElements)): - parent.insertText(data) - else: - # We should be in the InTable mode. This means we want to do - # special magic element rearranging - parent, insertBefore = self.getTableMisnestedNodePosition() - parent.insertText(data, insertBefore) - - def getTableMisnestedNodePosition(self): - """Get the foster parent element, and sibling to insert before - (or None) when inserting a misnested table node""" - # The foster parent element is the one which comes before the most - # recently opened table element - # XXX - this is really inelegant - lastTable = None - fosterParent = None - insertBefore = None - for elm in self.openElements[::-1]: - if elm.name == "table": - lastTable = elm - break - if lastTable: - # XXX - we should really check that this parent is actually a - # node here - if lastTable.parent: - fosterParent = lastTable.parent - insertBefore = lastTable - else: - fosterParent = self.openElements[ - self.openElements.index(lastTable) - 1] - else: - fosterParent = self.openElements[0] - return fosterParent, insertBefore - - def generateImpliedEndTags(self, exclude=None): - name = self.openElements[-1].name - # XXX td, th and tr are not actually needed - if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and - name != exclude): - self.openElements.pop() - # XXX This is not entirely what the specification says. We should - # investigate it more closely. - self.generateImpliedEndTags(exclude) - - def getDocument(self): - """Return the final tree""" - return self.document - - def getFragment(self): - """Return the final fragment""" - # assert self.innerHTML - fragment = self.fragmentClass() - self.openElements[0].reparentChildren(fragment) - return fragment - - def testSerializer(self, node): - """Serialize the subtree of node in the format required by unit tests - - :arg node: the node from which to start serializing - - """ - raise NotImplementedError diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/dom.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/dom.py deleted file mode 100644 index dcfac22..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/dom.py +++ /dev/null @@ -1,236 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - - -from collections import MutableMapping -from xml.dom import minidom, Node -import weakref - -from . import base -from .. import constants -from ..constants import namespaces -from .._utils import moduleFactoryFactory - - -def getDomBuilder(DomImplementation): - Dom = DomImplementation - - class AttrList(MutableMapping): - def __init__(self, element): - self.element = element - - def __iter__(self): - return iter(self.element.attributes.keys()) - - def __setitem__(self, name, value): - if isinstance(name, tuple): - raise NotImplementedError - else: - attr = self.element.ownerDocument.createAttribute(name) - attr.value = value - self.element.attributes[name] = attr - - def __len__(self): - return len(self.element.attributes) - - def items(self): - return list(self.element.attributes.items()) - - def values(self): - return list(self.element.attributes.values()) - - def __getitem__(self, name): - if isinstance(name, tuple): - raise NotImplementedError - else: - return self.element.attributes[name].value - - def __delitem__(self, name): - if isinstance(name, tuple): - raise NotImplementedError - else: - del self.element.attributes[name] - - class NodeBuilder(base.Node): - def __init__(self, element): - base.Node.__init__(self, element.nodeName) - self.element = element - - namespace = property(lambda self: hasattr(self.element, "namespaceURI") and - self.element.namespaceURI or None) - - def appendChild(self, node): - node.parent = self - self.element.appendChild(node.element) - - def insertText(self, data, insertBefore=None): - text = self.element.ownerDocument.createTextNode(data) - if insertBefore: - self.element.insertBefore(text, insertBefore.element) - else: - self.element.appendChild(text) - - def insertBefore(self, node, refNode): - self.element.insertBefore(node.element, refNode.element) - node.parent = self - - def removeChild(self, node): - if node.element.parentNode == self.element: - self.element.removeChild(node.element) - node.parent = None - - def reparentChildren(self, newParent): - while self.element.hasChildNodes(): - child = self.element.firstChild - self.element.removeChild(child) - newParent.element.appendChild(child) - self.childNodes = [] - - def getAttributes(self): - return AttrList(self.element) - - def setAttributes(self, attributes): - if attributes: - for name, value in list(attributes.items()): - if isinstance(name, tuple): - if name[0] is not None: - qualifiedName = (name[0] + ":" + name[1]) - else: - qualifiedName = name[1] - self.element.setAttributeNS(name[2], qualifiedName, - value) - else: - self.element.setAttribute( - name, value) - attributes = property(getAttributes, setAttributes) - - def cloneNode(self): - return NodeBuilder(self.element.cloneNode(False)) - - def hasContent(self): - return self.element.hasChildNodes() - - def getNameTuple(self): - if self.namespace is None: - return namespaces["html"], self.name - else: - return self.namespace, self.name - - nameTuple = property(getNameTuple) - - class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable - def documentClass(self): - self.dom = Dom.getDOMImplementation().createDocument(None, None, None) - return weakref.proxy(self) - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - domimpl = Dom.getDOMImplementation() - doctype = domimpl.createDocumentType(name, publicId, systemId) - self.document.appendChild(NodeBuilder(doctype)) - if Dom == minidom: - doctype.ownerDocument = self.dom - - def elementClass(self, name, namespace=None): - if namespace is None and self.defaultNamespace is None: - node = self.dom.createElement(name) - else: - node = self.dom.createElementNS(namespace, name) - - return NodeBuilder(node) - - def commentClass(self, data): - return NodeBuilder(self.dom.createComment(data)) - - def fragmentClass(self): - return NodeBuilder(self.dom.createDocumentFragment()) - - def appendChild(self, node): - self.dom.appendChild(node.element) - - def testSerializer(self, element): - return testSerializer(element) - - def getDocument(self): - return self.dom - - def getFragment(self): - return base.TreeBuilder.getFragment(self).element - - def insertText(self, data, parent=None): - data = data - if parent != self: - base.TreeBuilder.insertText(self, data, parent) - else: - # HACK: allow text nodes as children of the document node - if hasattr(self.dom, '_child_node_types'): - # pylint:disable=protected-access - if Node.TEXT_NODE not in self.dom._child_node_types: - self.dom._child_node_types = list(self.dom._child_node_types) - self.dom._child_node_types.append(Node.TEXT_NODE) - self.dom.appendChild(self.dom.createTextNode(data)) - - implementation = DomImplementation - name = None - - def testSerializer(element): - element.normalize() - rv = [] - - def serializeElement(element, indent=0): - if element.nodeType == Node.DOCUMENT_TYPE_NODE: - if element.name: - if element.publicId or element.systemId: - publicId = element.publicId or "" - systemId = element.systemId or "" - rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % - (' ' * indent, element.name, publicId, systemId)) - else: - rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name)) - else: - rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) - elif element.nodeType == Node.DOCUMENT_NODE: - rv.append("#document") - elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: - rv.append("#document-fragment") - elif element.nodeType == Node.COMMENT_NODE: - rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue)) - elif element.nodeType == Node.TEXT_NODE: - rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue)) - else: - if (hasattr(element, "namespaceURI") and - element.namespaceURI is not None): - name = "%s %s" % (constants.prefixes[element.namespaceURI], - element.nodeName) - else: - name = element.nodeName - rv.append("|%s<%s>" % (' ' * indent, name)) - if element.hasAttributes(): - attributes = [] - for i in range(len(element.attributes)): - attr = element.attributes.item(i) - name = attr.nodeName - value = attr.value - ns = attr.namespaceURI - if ns: - name = "%s %s" % (constants.prefixes[ns], attr.localName) - else: - name = attr.nodeName - attributes.append((name, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - indent += 2 - for child in element.childNodes: - serializeElement(child, indent) - serializeElement(element, 0) - - return "\n".join(rv) - - return locals() - - -# The actual means to get a module! -getDomModule = moduleFactoryFactory(getDomBuilder) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/etree.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/etree.py deleted file mode 100644 index 0dedf44..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/etree.py +++ /dev/null @@ -1,340 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -# pylint:disable=protected-access - -from pip._vendor.six import text_type - -import re - -from . import base -from .. import _ihatexml -from .. import constants -from ..constants import namespaces -from .._utils import moduleFactoryFactory - -tag_regexp = re.compile("{([^}]*)}(.*)") - - -def getETreeBuilder(ElementTreeImplementation, fullTree=False): - ElementTree = ElementTreeImplementation - ElementTreeCommentType = ElementTree.Comment("asd").tag - - class Element(base.Node): - def __init__(self, name, namespace=None): - self._name = name - self._namespace = namespace - self._element = ElementTree.Element(self._getETreeTag(name, - namespace)) - if namespace is None: - self.nameTuple = namespaces["html"], self._name - else: - self.nameTuple = self._namespace, self._name - self.parent = None - self._childNodes = [] - self._flags = [] - - def _getETreeTag(self, name, namespace): - if namespace is None: - etree_tag = name - else: - etree_tag = "{%s}%s" % (namespace, name) - return etree_tag - - def _setName(self, name): - self._name = name - self._element.tag = self._getETreeTag(self._name, self._namespace) - - def _getName(self): - return self._name - - name = property(_getName, _setName) - - def _setNamespace(self, namespace): - self._namespace = namespace - self._element.tag = self._getETreeTag(self._name, self._namespace) - - def _getNamespace(self): - return self._namespace - - namespace = property(_getNamespace, _setNamespace) - - def _getAttributes(self): - return self._element.attrib - - def _setAttributes(self, attributes): - # Delete existing attributes first - # XXX - there may be a better way to do this... - for key in list(self._element.attrib.keys()): - del self._element.attrib[key] - for key, value in attributes.items(): - if isinstance(key, tuple): - name = "{%s}%s" % (key[2], key[1]) - else: - name = key - self._element.set(name, value) - - attributes = property(_getAttributes, _setAttributes) - - def _getChildNodes(self): - return self._childNodes - - def _setChildNodes(self, value): - del self._element[:] - self._childNodes = [] - for element in value: - self.insertChild(element) - - childNodes = property(_getChildNodes, _setChildNodes) - - def hasContent(self): - """Return true if the node has children or text""" - return bool(self._element.text or len(self._element)) - - def appendChild(self, node): - self._childNodes.append(node) - self._element.append(node._element) - node.parent = self - - def insertBefore(self, node, refNode): - index = list(self._element).index(refNode._element) - self._element.insert(index, node._element) - node.parent = self - - def removeChild(self, node): - self._childNodes.remove(node) - self._element.remove(node._element) - node.parent = None - - def insertText(self, data, insertBefore=None): - if not(len(self._element)): - if not self._element.text: - self._element.text = "" - self._element.text += data - elif insertBefore is None: - # Insert the text as the tail of the last child element - if not self._element[-1].tail: - self._element[-1].tail = "" - self._element[-1].tail += data - else: - # Insert the text before the specified node - children = list(self._element) - index = children.index(insertBefore._element) - if index > 0: - if not self._element[index - 1].tail: - self._element[index - 1].tail = "" - self._element[index - 1].tail += data - else: - if not self._element.text: - self._element.text = "" - self._element.text += data - - def cloneNode(self): - element = type(self)(self.name, self.namespace) - for name, value in self.attributes.items(): - element.attributes[name] = value - return element - - def reparentChildren(self, newParent): - if newParent.childNodes: - newParent.childNodes[-1]._element.tail += self._element.text - else: - if not newParent._element.text: - newParent._element.text = "" - if self._element.text is not None: - newParent._element.text += self._element.text - self._element.text = "" - base.Node.reparentChildren(self, newParent) - - class Comment(Element): - def __init__(self, data): - # Use the superclass constructor to set all properties on the - # wrapper element - self._element = ElementTree.Comment(data) - self.parent = None - self._childNodes = [] - self._flags = [] - - def _getData(self): - return self._element.text - - def _setData(self, value): - self._element.text = value - - data = property(_getData, _setData) - - class DocumentType(Element): - def __init__(self, name, publicId, systemId): - Element.__init__(self, "<!DOCTYPE>") - self._element.text = name - self.publicId = publicId - self.systemId = systemId - - def _getPublicId(self): - return self._element.get("publicId", "") - - def _setPublicId(self, value): - if value is not None: - self._element.set("publicId", value) - - publicId = property(_getPublicId, _setPublicId) - - def _getSystemId(self): - return self._element.get("systemId", "") - - def _setSystemId(self, value): - if value is not None: - self._element.set("systemId", value) - - systemId = property(_getSystemId, _setSystemId) - - class Document(Element): - def __init__(self): - Element.__init__(self, "DOCUMENT_ROOT") - - class DocumentFragment(Element): - def __init__(self): - Element.__init__(self, "DOCUMENT_FRAGMENT") - - def testSerializer(element): - rv = [] - - def serializeElement(element, indent=0): - if not(hasattr(element, "tag")): - element = element.getroot() - if element.tag == "<!DOCTYPE>": - if element.get("publicId") or element.get("systemId"): - publicId = element.get("publicId") or "" - systemId = element.get("systemId") or "" - rv.append("""<!DOCTYPE %s "%s" "%s">""" % - (element.text, publicId, systemId)) - else: - rv.append("<!DOCTYPE %s>" % (element.text,)) - elif element.tag == "DOCUMENT_ROOT": - rv.append("#document") - if element.text is not None: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - if element.tail is not None: - raise TypeError("Document node cannot have tail") - if hasattr(element, "attrib") and len(element.attrib): - raise TypeError("Document node cannot have attributes") - elif element.tag == ElementTreeCommentType: - rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) - else: - assert isinstance(element.tag, text_type), \ - "Expected unicode, got %s, %s" % (type(element.tag), element.tag) - nsmatch = tag_regexp.match(element.tag) - - if nsmatch is None: - name = element.tag - else: - ns, name = nsmatch.groups() - prefix = constants.prefixes[ns] - name = "%s %s" % (prefix, name) - rv.append("|%s<%s>" % (' ' * indent, name)) - - if hasattr(element, "attrib"): - attributes = [] - for name, value in element.attrib.items(): - nsmatch = tag_regexp.match(name) - if nsmatch is not None: - ns, name = nsmatch.groups() - prefix = constants.prefixes[ns] - attr_string = "%s %s" % (prefix, name) - else: - attr_string = name - attributes.append((attr_string, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - if element.text: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - indent += 2 - for child in element: - serializeElement(child, indent) - if element.tail: - rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) - serializeElement(element, 0) - - return "\n".join(rv) - - def tostring(element): # pylint:disable=unused-variable - """Serialize an element and its child nodes to a string""" - rv = [] - filter = _ihatexml.InfosetFilter() - - def serializeElement(element): - if isinstance(element, ElementTree.ElementTree): - element = element.getroot() - - if element.tag == "<!DOCTYPE>": - if element.get("publicId") or element.get("systemId"): - publicId = element.get("publicId") or "" - systemId = element.get("systemId") or "" - rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" % - (element.text, publicId, systemId)) - else: - rv.append("<!DOCTYPE %s>" % (element.text,)) - elif element.tag == "DOCUMENT_ROOT": - if element.text is not None: - rv.append(element.text) - if element.tail is not None: - raise TypeError("Document node cannot have tail") - if hasattr(element, "attrib") and len(element.attrib): - raise TypeError("Document node cannot have attributes") - - for child in element: - serializeElement(child) - - elif element.tag == ElementTreeCommentType: - rv.append("<!--%s-->" % (element.text,)) - else: - # This is assumed to be an ordinary element - if not element.attrib: - rv.append("<%s>" % (filter.fromXmlName(element.tag),)) - else: - attr = " ".join(["%s=\"%s\"" % ( - filter.fromXmlName(name), value) - for name, value in element.attrib.items()]) - rv.append("<%s %s>" % (element.tag, attr)) - if element.text: - rv.append(element.text) - - for child in element: - serializeElement(child) - - rv.append("</%s>" % (element.tag,)) - - if element.tail: - rv.append(element.tail) - - serializeElement(element) - - return "".join(rv) - - class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable - documentClass = Document - doctypeClass = DocumentType - elementClass = Element - commentClass = Comment - fragmentClass = DocumentFragment - implementation = ElementTreeImplementation - - def testSerializer(self, element): - return testSerializer(element) - - def getDocument(self): - if fullTree: - return self.document._element - else: - if self.defaultNamespace is not None: - return self.document._element.find( - "{%s}html" % self.defaultNamespace) - else: - return self.document._element.find("html") - - def getFragment(self): - return base.TreeBuilder.getFragment(self)._element - - return locals() - - -getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/etree_lxml.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/etree_lxml.py deleted file mode 100644 index ca12a99..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/etree_lxml.py +++ /dev/null @@ -1,366 +0,0 @@ -"""Module for supporting the lxml.etree library. The idea here is to use as much -of the native library as possible, without using fragile hacks like custom element -names that break between releases. The downside of this is that we cannot represent -all possible trees; specifically the following are known to cause problems: - -Text or comments as siblings of the root element -Docypes with no name - -When any of these things occur, we emit a DataLossWarning -""" - -from __future__ import absolute_import, division, unicode_literals -# pylint:disable=protected-access - -import warnings -import re -import sys - -from . import base -from ..constants import DataLossWarning -from .. import constants -from . import etree as etree_builders -from .. import _ihatexml - -import lxml.etree as etree - - -fullTree = True -tag_regexp = re.compile("{([^}]*)}(.*)") - -comment_type = etree.Comment("asd").tag - - -class DocumentType(object): - def __init__(self, name, publicId, systemId): - self.name = name - self.publicId = publicId - self.systemId = systemId - - -class Document(object): - def __init__(self): - self._elementTree = None - self._childNodes = [] - - def appendChild(self, element): - self._elementTree.getroot().addnext(element._element) - - def _getChildNodes(self): - return self._childNodes - - childNodes = property(_getChildNodes) - - -def testSerializer(element): - rv = [] - infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) - - def serializeElement(element, indent=0): - if not hasattr(element, "tag"): - if hasattr(element, "getroot"): - # Full tree case - rv.append("#document") - if element.docinfo.internalDTD: - if not (element.docinfo.public_id or - element.docinfo.system_url): - dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name - else: - dtd_str = """<!DOCTYPE %s "%s" "%s">""" % ( - element.docinfo.root_name, - element.docinfo.public_id, - element.docinfo.system_url) - rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) - next_element = element.getroot() - while next_element.getprevious() is not None: - next_element = next_element.getprevious() - while next_element is not None: - serializeElement(next_element, indent + 2) - next_element = next_element.getnext() - elif isinstance(element, str) or isinstance(element, bytes): - # Text in a fragment - assert isinstance(element, str) or sys.version_info[0] == 2 - rv.append("|%s\"%s\"" % (' ' * indent, element)) - else: - # Fragment case - rv.append("#document-fragment") - for next_element in element: - serializeElement(next_element, indent + 2) - elif element.tag == comment_type: - rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) - if hasattr(element, "tail") and element.tail: - rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) - else: - assert isinstance(element, etree._Element) - nsmatch = etree_builders.tag_regexp.match(element.tag) - if nsmatch is not None: - ns = nsmatch.group(1) - tag = nsmatch.group(2) - prefix = constants.prefixes[ns] - rv.append("|%s<%s %s>" % (' ' * indent, prefix, - infosetFilter.fromXmlName(tag))) - else: - rv.append("|%s<%s>" % (' ' * indent, - infosetFilter.fromXmlName(element.tag))) - - if hasattr(element, "attrib"): - attributes = [] - for name, value in element.attrib.items(): - nsmatch = tag_regexp.match(name) - if nsmatch is not None: - ns, name = nsmatch.groups() - name = infosetFilter.fromXmlName(name) - prefix = constants.prefixes[ns] - attr_string = "%s %s" % (prefix, name) - else: - attr_string = infosetFilter.fromXmlName(name) - attributes.append((attr_string, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - - if element.text: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - indent += 2 - for child in element: - serializeElement(child, indent) - if hasattr(element, "tail") and element.tail: - rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) - serializeElement(element, 0) - - return "\n".join(rv) - - -def tostring(element): - """Serialize an element and its child nodes to a string""" - rv = [] - - def serializeElement(element): - if not hasattr(element, "tag"): - if element.docinfo.internalDTD: - if element.docinfo.doctype: - dtd_str = element.docinfo.doctype - else: - dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name - rv.append(dtd_str) - serializeElement(element.getroot()) - - elif element.tag == comment_type: - rv.append("<!--%s-->" % (element.text,)) - - else: - # This is assumed to be an ordinary element - if not element.attrib: - rv.append("<%s>" % (element.tag,)) - else: - attr = " ".join(["%s=\"%s\"" % (name, value) - for name, value in element.attrib.items()]) - rv.append("<%s %s>" % (element.tag, attr)) - if element.text: - rv.append(element.text) - - for child in element: - serializeElement(child) - - rv.append("</%s>" % (element.tag,)) - - if hasattr(element, "tail") and element.tail: - rv.append(element.tail) - - serializeElement(element) - - return "".join(rv) - - -class TreeBuilder(base.TreeBuilder): - documentClass = Document - doctypeClass = DocumentType - elementClass = None - commentClass = None - fragmentClass = Document - implementation = etree - - def __init__(self, namespaceHTMLElements, fullTree=False): - builder = etree_builders.getETreeModule(etree, fullTree=fullTree) - infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) - self.namespaceHTMLElements = namespaceHTMLElements - - class Attributes(dict): - def __init__(self, element, value=None): - if value is None: - value = {} - self._element = element - dict.__init__(self, value) # pylint:disable=non-parent-init-called - for key, value in self.items(): - if isinstance(key, tuple): - name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) - else: - name = infosetFilter.coerceAttribute(key) - self._element._element.attrib[name] = value - - def __setitem__(self, key, value): - dict.__setitem__(self, key, value) - if isinstance(key, tuple): - name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) - else: - name = infosetFilter.coerceAttribute(key) - self._element._element.attrib[name] = value - - class Element(builder.Element): - def __init__(self, name, namespace): - name = infosetFilter.coerceElement(name) - builder.Element.__init__(self, name, namespace=namespace) - self._attributes = Attributes(self) - - def _setName(self, name): - self._name = infosetFilter.coerceElement(name) - self._element.tag = self._getETreeTag( - self._name, self._namespace) - - def _getName(self): - return infosetFilter.fromXmlName(self._name) - - name = property(_getName, _setName) - - def _getAttributes(self): - return self._attributes - - def _setAttributes(self, attributes): - self._attributes = Attributes(self, attributes) - - attributes = property(_getAttributes, _setAttributes) - - def insertText(self, data, insertBefore=None): - data = infosetFilter.coerceCharacters(data) - builder.Element.insertText(self, data, insertBefore) - - def appendChild(self, child): - builder.Element.appendChild(self, child) - - class Comment(builder.Comment): - def __init__(self, data): - data = infosetFilter.coerceComment(data) - builder.Comment.__init__(self, data) - - def _setData(self, data): - data = infosetFilter.coerceComment(data) - self._element.text = data - - def _getData(self): - return self._element.text - - data = property(_getData, _setData) - - self.elementClass = Element - self.commentClass = Comment - # self.fragmentClass = builder.DocumentFragment - base.TreeBuilder.__init__(self, namespaceHTMLElements) - - def reset(self): - base.TreeBuilder.reset(self) - self.insertComment = self.insertCommentInitial - self.initial_comments = [] - self.doctype = None - - def testSerializer(self, element): - return testSerializer(element) - - def getDocument(self): - if fullTree: - return self.document._elementTree - else: - return self.document._elementTree.getroot() - - def getFragment(self): - fragment = [] - element = self.openElements[0]._element - if element.text: - fragment.append(element.text) - fragment.extend(list(element)) - if element.tail: - fragment.append(element.tail) - return fragment - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - if not name: - warnings.warn("lxml cannot represent empty doctype", DataLossWarning) - self.doctype = None - else: - coercedName = self.infosetFilter.coerceElement(name) - if coercedName != name: - warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) - - doctype = self.doctypeClass(coercedName, publicId, systemId) - self.doctype = doctype - - def insertCommentInitial(self, data, parent=None): - assert parent is None or parent is self.document - assert self.document._elementTree is None - self.initial_comments.append(data) - - def insertCommentMain(self, data, parent=None): - if (parent == self.document and - self.document._elementTree.getroot()[-1].tag == comment_type): - warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) - super(TreeBuilder, self).insertComment(data, parent) - - def insertRoot(self, token): - # Because of the way libxml2 works, it doesn't seem to be possible to - # alter information like the doctype after the tree has been parsed. - # Therefore we need to use the built-in parser to create our initial - # tree, after which we can add elements like normal - docStr = "" - if self.doctype: - assert self.doctype.name - docStr += "<!DOCTYPE %s" % self.doctype.name - if (self.doctype.publicId is not None or - self.doctype.systemId is not None): - docStr += (' PUBLIC "%s" ' % - (self.infosetFilter.coercePubid(self.doctype.publicId or ""))) - if self.doctype.systemId: - sysid = self.doctype.systemId - if sysid.find("'") >= 0 and sysid.find('"') >= 0: - warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) - sysid = sysid.replace("'", 'U00027') - if sysid.find("'") >= 0: - docStr += '"%s"' % sysid - else: - docStr += "'%s'" % sysid - else: - docStr += "''" - docStr += ">" - if self.doctype.name != token["name"]: - warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) - docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>" - root = etree.fromstring(docStr) - - # Append the initial comments: - for comment_token in self.initial_comments: - comment = self.commentClass(comment_token["data"]) - root.addprevious(comment._element) - - # Create the root document and add the ElementTree to it - self.document = self.documentClass() - self.document._elementTree = root.getroottree() - - # Give the root element the right name - name = token["name"] - namespace = token.get("namespace", self.defaultNamespace) - if namespace is None: - etree_tag = name - else: - etree_tag = "{%s}%s" % (namespace, name) - root.tag = etree_tag - - # Add the root element to the internal child/open data structures - root_element = self.elementClass(name, namespace) - root_element._element = root - self.document._childNodes.append(root_element) - self.openElements.append(root_element) - - # Reset to the default insert comment function - self.insertComment = self.insertCommentMain diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/__init__.py deleted file mode 100644 index 9bec207..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/__init__.py +++ /dev/null @@ -1,154 +0,0 @@ -"""A collection of modules for iterating through different kinds of -tree, generating tokens identical to those produced by the tokenizer -module. - -To create a tree walker for a new type of tree, you need to do -implement a tree walker object (called TreeWalker by convention) that -implements a 'serialize' method taking a tree as sole argument and -returning an iterator generating tokens. -""" - -from __future__ import absolute_import, division, unicode_literals - -from .. import constants -from .._utils import default_etree - -__all__ = ["getTreeWalker", "pprint"] - -treeWalkerCache = {} - - -def getTreeWalker(treeType, implementation=None, **kwargs): - """Get a TreeWalker class for various types of tree with built-in support - - :arg str treeType: the name of the tree type required (case-insensitive). - Supported values are: - - * "dom": The xml.dom.minidom DOM implementation - * "etree": A generic walker for tree implementations exposing an - elementtree-like interface (known to work with ElementTree, - cElementTree and lxml.etree). - * "lxml": Optimized walker for lxml.etree - * "genshi": a Genshi stream - - :arg implementation: A module implementing the tree type e.g. - xml.etree.ElementTree or cElementTree (Currently applies to the "etree" - tree type only). - - :arg kwargs: keyword arguments passed to the etree walker--for other - walkers, this has no effect - - :returns: a TreeWalker class - - """ - - treeType = treeType.lower() - if treeType not in treeWalkerCache: - if treeType == "dom": - from . import dom - treeWalkerCache[treeType] = dom.TreeWalker - elif treeType == "genshi": - from . import genshi - treeWalkerCache[treeType] = genshi.TreeWalker - elif treeType == "lxml": - from . import etree_lxml - treeWalkerCache[treeType] = etree_lxml.TreeWalker - elif treeType == "etree": - from . import etree - if implementation is None: - implementation = default_etree - # XXX: NEVER cache here, caching is done in the etree submodule - return etree.getETreeModule(implementation, **kwargs).TreeWalker - return treeWalkerCache.get(treeType) - - -def concatenateCharacterTokens(tokens): - pendingCharacters = [] - for token in tokens: - type = token["type"] - if type in ("Characters", "SpaceCharacters"): - pendingCharacters.append(token["data"]) - else: - if pendingCharacters: - yield {"type": "Characters", "data": "".join(pendingCharacters)} - pendingCharacters = [] - yield token - if pendingCharacters: - yield {"type": "Characters", "data": "".join(pendingCharacters)} - - -def pprint(walker): - """Pretty printer for tree walkers - - Takes a TreeWalker instance and pretty prints the output of walking the tree. - - :arg walker: a TreeWalker instance - - """ - output = [] - indent = 0 - for token in concatenateCharacterTokens(walker): - type = token["type"] - if type in ("StartTag", "EmptyTag"): - # tag name - if token["namespace"] and token["namespace"] != constants.namespaces["html"]: - if token["namespace"] in constants.prefixes: - ns = constants.prefixes[token["namespace"]] - else: - ns = token["namespace"] - name = "%s %s" % (ns, token["name"]) - else: - name = token["name"] - output.append("%s<%s>" % (" " * indent, name)) - indent += 2 - # attributes (sorted for consistent ordering) - attrs = token["data"] - for (namespace, localname), value in sorted(attrs.items()): - if namespace: - if namespace in constants.prefixes: - ns = constants.prefixes[namespace] - else: - ns = namespace - name = "%s %s" % (ns, localname) - else: - name = localname - output.append("%s%s=\"%s\"" % (" " * indent, name, value)) - # self-closing - if type == "EmptyTag": - indent -= 2 - - elif type == "EndTag": - indent -= 2 - - elif type == "Comment": - output.append("%s<!-- %s -->" % (" " * indent, token["data"])) - - elif type == "Doctype": - if token["name"]: - if token["publicId"]: - output.append("""%s<!DOCTYPE %s "%s" "%s">""" % - (" " * indent, - token["name"], - token["publicId"], - token["systemId"] if token["systemId"] else "")) - elif token["systemId"]: - output.append("""%s<!DOCTYPE %s "" "%s">""" % - (" " * indent, - token["name"], - token["systemId"])) - else: - output.append("%s<!DOCTYPE %s>" % (" " * indent, - token["name"])) - else: - output.append("%s<!DOCTYPE >" % (" " * indent,)) - - elif type == "Characters": - output.append("%s\"%s\"" % (" " * indent, token["data"])) - - elif type == "SpaceCharacters": - assert False, "concatenateCharacterTokens should have got rid of all Space tokens" - - else: - raise ValueError("Unknown token type, %s" % type) - - return "\n".join(output) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/base.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/base.py deleted file mode 100644 index 80c474c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/base.py +++ /dev/null @@ -1,252 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.dom import Node -from ..constants import namespaces, voidElements, spaceCharacters - -__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN", - "TreeWalker", "NonRecursiveTreeWalker"] - -DOCUMENT = Node.DOCUMENT_NODE -DOCTYPE = Node.DOCUMENT_TYPE_NODE -TEXT = Node.TEXT_NODE -ELEMENT = Node.ELEMENT_NODE -COMMENT = Node.COMMENT_NODE -ENTITY = Node.ENTITY_NODE -UNKNOWN = "<#UNKNOWN#>" - -spaceCharacters = "".join(spaceCharacters) - - -class TreeWalker(object): - """Walks a tree yielding tokens - - Tokens are dicts that all have a ``type`` field specifying the type of the - token. - - """ - def __init__(self, tree): - """Creates a TreeWalker - - :arg tree: the tree to walk - - """ - self.tree = tree - - def __iter__(self): - raise NotImplementedError - - def error(self, msg): - """Generates an error token with the given message - - :arg msg: the error message - - :returns: SerializeError token - - """ - return {"type": "SerializeError", "data": msg} - - def emptyTag(self, namespace, name, attrs, hasChildren=False): - """Generates an EmptyTag token - - :arg namespace: the namespace of the token--can be ``None`` - - :arg name: the name of the element - - :arg attrs: the attributes of the element as a dict - - :arg hasChildren: whether or not to yield a SerializationError because - this tag shouldn't have children - - :returns: EmptyTag token - - """ - yield {"type": "EmptyTag", "name": name, - "namespace": namespace, - "data": attrs} - if hasChildren: - yield self.error("Void element has children") - - def startTag(self, namespace, name, attrs): - """Generates a StartTag token - - :arg namespace: the namespace of the token--can be ``None`` - - :arg name: the name of the element - - :arg attrs: the attributes of the element as a dict - - :returns: StartTag token - - """ - return {"type": "StartTag", - "name": name, - "namespace": namespace, - "data": attrs} - - def endTag(self, namespace, name): - """Generates an EndTag token - - :arg namespace: the namespace of the token--can be ``None`` - - :arg name: the name of the element - - :returns: EndTag token - - """ - return {"type": "EndTag", - "name": name, - "namespace": namespace} - - def text(self, data): - """Generates SpaceCharacters and Characters tokens - - Depending on what's in the data, this generates one or more - ``SpaceCharacters`` and ``Characters`` tokens. - - For example: - - >>> from html5lib.treewalkers.base import TreeWalker - >>> # Give it an empty tree just so it instantiates - >>> walker = TreeWalker([]) - >>> list(walker.text('')) - [] - >>> list(walker.text(' ')) - [{u'data': ' ', u'type': u'SpaceCharacters'}] - >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE - [{u'data': ' ', u'type': u'SpaceCharacters'}, - {u'data': u'abc', u'type': u'Characters'}, - {u'data': u' ', u'type': u'SpaceCharacters'}] - - :arg data: the text data - - :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens - - """ - data = data - middle = data.lstrip(spaceCharacters) - left = data[:len(data) - len(middle)] - if left: - yield {"type": "SpaceCharacters", "data": left} - data = middle - middle = data.rstrip(spaceCharacters) - right = data[len(middle):] - if middle: - yield {"type": "Characters", "data": middle} - if right: - yield {"type": "SpaceCharacters", "data": right} - - def comment(self, data): - """Generates a Comment token - - :arg data: the comment - - :returns: Comment token - - """ - return {"type": "Comment", "data": data} - - def doctype(self, name, publicId=None, systemId=None): - """Generates a Doctype token - - :arg name: - - :arg publicId: - - :arg systemId: - - :returns: the Doctype token - - """ - return {"type": "Doctype", - "name": name, - "publicId": publicId, - "systemId": systemId} - - def entity(self, name): - """Generates an Entity token - - :arg name: the entity name - - :returns: an Entity token - - """ - return {"type": "Entity", "name": name} - - def unknown(self, nodeType): - """Handles unknown node types""" - return self.error("Unknown node type: " + nodeType) - - -class NonRecursiveTreeWalker(TreeWalker): - def getNodeDetails(self, node): - raise NotImplementedError - - def getFirstChild(self, node): - raise NotImplementedError - - def getNextSibling(self, node): - raise NotImplementedError - - def getParentNode(self, node): - raise NotImplementedError - - def __iter__(self): - currentNode = self.tree - while currentNode is not None: - details = self.getNodeDetails(currentNode) - type, details = details[0], details[1:] - hasChildren = False - - if type == DOCTYPE: - yield self.doctype(*details) - - elif type == TEXT: - for token in self.text(*details): - yield token - - elif type == ELEMENT: - namespace, name, attributes, hasChildren = details - if (not namespace or namespace == namespaces["html"]) and name in voidElements: - for token in self.emptyTag(namespace, name, attributes, - hasChildren): - yield token - hasChildren = False - else: - yield self.startTag(namespace, name, attributes) - - elif type == COMMENT: - yield self.comment(details[0]) - - elif type == ENTITY: - yield self.entity(details[0]) - - elif type == DOCUMENT: - hasChildren = True - - else: - yield self.unknown(details[0]) - - if hasChildren: - firstChild = self.getFirstChild(currentNode) - else: - firstChild = None - - if firstChild is not None: - currentNode = firstChild - else: - while currentNode is not None: - details = self.getNodeDetails(currentNode) - type, details = details[0], details[1:] - if type == ELEMENT: - namespace, name, attributes, hasChildren = details - if (namespace and namespace != namespaces["html"]) or name not in voidElements: - yield self.endTag(namespace, name) - if self.tree is currentNode: - currentNode = None - break - nextSibling = self.getNextSibling(currentNode) - if nextSibling is not None: - currentNode = nextSibling - break - else: - currentNode = self.getParentNode(currentNode) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/dom.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/dom.py deleted file mode 100644 index b0c89b0..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/dom.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.dom import Node - -from . import base - - -class TreeWalker(base.NonRecursiveTreeWalker): - def getNodeDetails(self, node): - if node.nodeType == Node.DOCUMENT_TYPE_NODE: - return base.DOCTYPE, node.name, node.publicId, node.systemId - - elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): - return base.TEXT, node.nodeValue - - elif node.nodeType == Node.ELEMENT_NODE: - attrs = {} - for attr in list(node.attributes.keys()): - attr = node.getAttributeNode(attr) - if attr.namespaceURI: - attrs[(attr.namespaceURI, attr.localName)] = attr.value - else: - attrs[(None, attr.name)] = attr.value - return (base.ELEMENT, node.namespaceURI, node.nodeName, - attrs, node.hasChildNodes()) - - elif node.nodeType == Node.COMMENT_NODE: - return base.COMMENT, node.nodeValue - - elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): - return (base.DOCUMENT,) - - else: - return base.UNKNOWN, node.nodeType - - def getFirstChild(self, node): - return node.firstChild - - def getNextSibling(self, node): - return node.nextSibling - - def getParentNode(self, node): - return node.parentNode diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/etree.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/etree.py deleted file mode 100644 index 95fc0c1..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/etree.py +++ /dev/null @@ -1,130 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from collections import OrderedDict -import re - -from pip._vendor.six import string_types - -from . import base -from .._utils import moduleFactoryFactory - -tag_regexp = re.compile("{([^}]*)}(.*)") - - -def getETreeBuilder(ElementTreeImplementation): - ElementTree = ElementTreeImplementation - ElementTreeCommentType = ElementTree.Comment("asd").tag - - class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable - """Given the particular ElementTree representation, this implementation, - to avoid using recursion, returns "nodes" as tuples with the following - content: - - 1. The current element - - 2. The index of the element relative to its parent - - 3. A stack of ancestor elements - - 4. A flag "text", "tail" or None to indicate if the current node is a - text node; either the text or tail of the current element (1) - """ - def getNodeDetails(self, node): - if isinstance(node, tuple): # It might be the root Element - elt, _, _, flag = node - if flag in ("text", "tail"): - return base.TEXT, getattr(elt, flag) - else: - node = elt - - if not(hasattr(node, "tag")): - node = node.getroot() - - if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): - return (base.DOCUMENT,) - - elif node.tag == "<!DOCTYPE>": - return (base.DOCTYPE, node.text, - node.get("publicId"), node.get("systemId")) - - elif node.tag == ElementTreeCommentType: - return base.COMMENT, node.text - - else: - assert isinstance(node.tag, string_types), type(node.tag) - # This is assumed to be an ordinary element - match = tag_regexp.match(node.tag) - if match: - namespace, tag = match.groups() - else: - namespace = None - tag = node.tag - attrs = OrderedDict() - for name, value in list(node.attrib.items()): - match = tag_regexp.match(name) - if match: - attrs[(match.group(1), match.group(2))] = value - else: - attrs[(None, name)] = value - return (base.ELEMENT, namespace, tag, - attrs, len(node) or node.text) - - def getFirstChild(self, node): - if isinstance(node, tuple): - element, key, parents, flag = node - else: - element, key, parents, flag = node, None, [], None - - if flag in ("text", "tail"): - return None - else: - if element.text: - return element, key, parents, "text" - elif len(element): - parents.append(element) - return element[0], 0, parents, None - else: - return None - - def getNextSibling(self, node): - if isinstance(node, tuple): - element, key, parents, flag = node - else: - return None - - if flag == "text": - if len(element): - parents.append(element) - return element[0], 0, parents, None - else: - return None - else: - if element.tail and flag != "tail": - return element, key, parents, "tail" - elif key < len(parents[-1]) - 1: - return parents[-1][key + 1], key + 1, parents, None - else: - return None - - def getParentNode(self, node): - if isinstance(node, tuple): - element, key, parents, flag = node - else: - return None - - if flag == "text": - if not parents: - return element - else: - return element, key, parents, None - else: - parent = parents.pop() - if not parents: - return parent - else: - assert list(parents[-1]).count(parent) == 1 - return parent, list(parents[-1]).index(parent), parents, None - - return locals() - -getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/etree_lxml.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/etree_lxml.py deleted file mode 100644 index e81ddf3..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/etree_lxml.py +++ /dev/null @@ -1,213 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -from lxml import etree -from ..treebuilders.etree import tag_regexp - -from . import base - -from .. import _ihatexml - - -def ensure_str(s): - if s is None: - return None - elif isinstance(s, text_type): - return s - else: - return s.decode("ascii", "strict") - - -class Root(object): - def __init__(self, et): - self.elementtree = et - self.children = [] - - try: - if et.docinfo.internalDTD: - self.children.append(Doctype(self, - ensure_str(et.docinfo.root_name), - ensure_str(et.docinfo.public_id), - ensure_str(et.docinfo.system_url))) - except AttributeError: - pass - - try: - node = et.getroot() - except AttributeError: - node = et - - while node.getprevious() is not None: - node = node.getprevious() - while node is not None: - self.children.append(node) - node = node.getnext() - - self.text = None - self.tail = None - - def __getitem__(self, key): - return self.children[key] - - def getnext(self): - return None - - def __len__(self): - return 1 - - -class Doctype(object): - def __init__(self, root_node, name, public_id, system_id): - self.root_node = root_node - self.name = name - self.public_id = public_id - self.system_id = system_id - - self.text = None - self.tail = None - - def getnext(self): - return self.root_node.children[1] - - -class FragmentRoot(Root): - def __init__(self, children): - self.children = [FragmentWrapper(self, child) for child in children] - self.text = self.tail = None - - def getnext(self): - return None - - -class FragmentWrapper(object): - def __init__(self, fragment_root, obj): - self.root_node = fragment_root - self.obj = obj - if hasattr(self.obj, 'text'): - self.text = ensure_str(self.obj.text) - else: - self.text = None - if hasattr(self.obj, 'tail'): - self.tail = ensure_str(self.obj.tail) - else: - self.tail = None - - def __getattr__(self, name): - return getattr(self.obj, name) - - def getnext(self): - siblings = self.root_node.children - idx = siblings.index(self) - if idx < len(siblings) - 1: - return siblings[idx + 1] - else: - return None - - def __getitem__(self, key): - return self.obj[key] - - def __bool__(self): - return bool(self.obj) - - def getparent(self): - return None - - def __str__(self): - return str(self.obj) - - def __unicode__(self): - return str(self.obj) - - def __len__(self): - return len(self.obj) - - -class TreeWalker(base.NonRecursiveTreeWalker): - def __init__(self, tree): - # pylint:disable=redefined-variable-type - if isinstance(tree, list): - self.fragmentChildren = set(tree) - tree = FragmentRoot(tree) - else: - self.fragmentChildren = set() - tree = Root(tree) - base.NonRecursiveTreeWalker.__init__(self, tree) - self.filter = _ihatexml.InfosetFilter() - - def getNodeDetails(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - return base.TEXT, ensure_str(getattr(node, key)) - - elif isinstance(node, Root): - return (base.DOCUMENT,) - - elif isinstance(node, Doctype): - return base.DOCTYPE, node.name, node.public_id, node.system_id - - elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"): - return base.TEXT, ensure_str(node.obj) - - elif node.tag == etree.Comment: - return base.COMMENT, ensure_str(node.text) - - elif node.tag == etree.Entity: - return base.ENTITY, ensure_str(node.text)[1:-1] # strip &; - - else: - # This is assumed to be an ordinary element - match = tag_regexp.match(ensure_str(node.tag)) - if match: - namespace, tag = match.groups() - else: - namespace = None - tag = ensure_str(node.tag) - attrs = {} - for name, value in list(node.attrib.items()): - name = ensure_str(name) - value = ensure_str(value) - match = tag_regexp.match(name) - if match: - attrs[(match.group(1), match.group(2))] = value - else: - attrs[(None, name)] = value - return (base.ELEMENT, namespace, self.filter.fromXmlName(tag), - attrs, len(node) > 0 or node.text) - - def getFirstChild(self, node): - assert not isinstance(node, tuple), "Text nodes have no children" - - assert len(node) or node.text, "Node has no children" - if node.text: - return (node, "text") - else: - return node[0] - - def getNextSibling(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - if key == "text": - # XXX: we cannot use a "bool(node) and node[0] or None" construct here - # because node[0] might evaluate to False if it has no child element - if len(node): - return node[0] - else: - return None - else: # tail - return node.getnext() - - return (node, "tail") if node.tail else node.getnext() - - def getParentNode(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - if key == "text": - return node - # else: fallback to "normal" processing - elif node in self.fragmentChildren: - return None - - return node.getparent() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/genshi.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/genshi.py deleted file mode 100644 index 7483be2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treewalkers/genshi.py +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from genshi.core import QName -from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT -from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT - -from . import base - -from ..constants import voidElements, namespaces - - -class TreeWalker(base.TreeWalker): - def __iter__(self): - # Buffer the events so we can pass in the following one - previous = None - for event in self.tree: - if previous is not None: - for token in self.tokens(previous, event): - yield token - previous = event - - # Don't forget the final event! - if previous is not None: - for token in self.tokens(previous, None): - yield token - - def tokens(self, event, next): - kind, data, _ = event - if kind == START: - tag, attribs = data - name = tag.localname - namespace = tag.namespace - converted_attribs = {} - for k, v in attribs: - if isinstance(k, QName): - converted_attribs[(k.namespace, k.localname)] = v - else: - converted_attribs[(None, k)] = v - - if namespace == namespaces["html"] and name in voidElements: - for token in self.emptyTag(namespace, name, converted_attribs, - not next or next[0] != END or - next[1] != tag): - yield token - else: - yield self.startTag(namespace, name, converted_attribs) - - elif kind == END: - name = data.localname - namespace = data.namespace - if namespace != namespaces["html"] or name not in voidElements: - yield self.endTag(namespace, name) - - elif kind == COMMENT: - yield self.comment(data) - - elif kind == TEXT: - for token in self.text(data): - yield token - - elif kind == DOCTYPE: - yield self.doctype(*data) - - elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, - START_CDATA, END_CDATA, PI): - pass - - else: - yield self.unknown(kind) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/__init__.py deleted file mode 100644 index 847bf93..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .package_data import __version__ -from .core import * diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/codec.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/codec.py deleted file mode 100644 index 98c65ea..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/codec.py +++ /dev/null @@ -1,118 +0,0 @@ -from .core import encode, decode, alabel, ulabel, IDNAError -import codecs -import re - -_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') - -class Codec(codecs.Codec): - - def encode(self, data, errors='strict'): - - if errors != 'strict': - raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) - - if not data: - return "", 0 - - return encode(data), len(data) - - def decode(self, data, errors='strict'): - - if errors != 'strict': - raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) - - if not data: - return u"", 0 - - return decode(data), len(data) - -class IncrementalEncoder(codecs.BufferedIncrementalEncoder): - def _buffer_encode(self, data, errors, final): - if errors != 'strict': - raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) - - if not data: - return ("", 0) - - labels = _unicode_dots_re.split(data) - trailing_dot = u'' - if labels: - if not labels[-1]: - trailing_dot = '.' - del labels[-1] - elif not final: - # Keep potentially unfinished label until the next call - del labels[-1] - if labels: - trailing_dot = '.' - - result = [] - size = 0 - for label in labels: - result.append(alabel(label)) - if size: - size += 1 - size += len(label) - - # Join with U+002E - result = ".".join(result) + trailing_dot - size += len(trailing_dot) - return (result, size) - -class IncrementalDecoder(codecs.BufferedIncrementalDecoder): - def _buffer_decode(self, data, errors, final): - if errors != 'strict': - raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) - - if not data: - return (u"", 0) - - # IDNA allows decoding to operate on Unicode strings, too. - if isinstance(data, unicode): - labels = _unicode_dots_re.split(data) - else: - # Must be ASCII string - data = str(data) - unicode(data, "ascii") - labels = data.split(".") - - trailing_dot = u'' - if labels: - if not labels[-1]: - trailing_dot = u'.' - del labels[-1] - elif not final: - # Keep potentially unfinished label until the next call - del labels[-1] - if labels: - trailing_dot = u'.' - - result = [] - size = 0 - for label in labels: - result.append(ulabel(label)) - if size: - size += 1 - size += len(label) - - result = u".".join(result) + trailing_dot - size += len(trailing_dot) - return (result, size) - - -class StreamWriter(Codec, codecs.StreamWriter): - pass - -class StreamReader(Codec, codecs.StreamReader): - pass - -def getregentry(): - return codecs.CodecInfo( - name='idna', - encode=Codec().encode, - decode=Codec().decode, - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamwriter=StreamWriter, - streamreader=StreamReader, - ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/compat.py deleted file mode 100644 index 4d47f33..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/compat.py +++ /dev/null @@ -1,12 +0,0 @@ -from .core import * -from .codec import * - -def ToASCII(label): - return encode(label) - -def ToUnicode(label): - return decode(label) - -def nameprep(s): - raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol") - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/core.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/core.py deleted file mode 100644 index 104624a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/core.py +++ /dev/null @@ -1,396 +0,0 @@ -from . import idnadata -import bisect -import unicodedata -import re -import sys -from .intranges import intranges_contain - -_virama_combining_class = 9 -_alabel_prefix = b'xn--' -_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') - -if sys.version_info[0] == 3: - unicode = str - unichr = chr - -class IDNAError(UnicodeError): - """ Base exception for all IDNA-encoding related problems """ - pass - - -class IDNABidiError(IDNAError): - """ Exception when bidirectional requirements are not satisfied """ - pass - - -class InvalidCodepoint(IDNAError): - """ Exception when a disallowed or unallocated codepoint is used """ - pass - - -class InvalidCodepointContext(IDNAError): - """ Exception when the codepoint is not valid in the context it is used """ - pass - - -def _combining_class(cp): - v = unicodedata.combining(unichr(cp)) - if v == 0: - if not unicodedata.name(unichr(cp)): - raise ValueError("Unknown character in unicodedata") - return v - -def _is_script(cp, script): - return intranges_contain(ord(cp), idnadata.scripts[script]) - -def _punycode(s): - return s.encode('punycode') - -def _unot(s): - return 'U+{0:04X}'.format(s) - - -def valid_label_length(label): - - if len(label) > 63: - return False - return True - - -def valid_string_length(label, trailing_dot): - - if len(label) > (254 if trailing_dot else 253): - return False - return True - - -def check_bidi(label, check_ltr=False): - - # Bidi rules should only be applied if string contains RTL characters - bidi_label = False - for (idx, cp) in enumerate(label, 1): - direction = unicodedata.bidirectional(cp) - if direction == '': - # String likely comes from a newer version of Unicode - raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx)) - if direction in ['R', 'AL', 'AN']: - bidi_label = True - if not bidi_label and not check_ltr: - return True - - # Bidi rule 1 - direction = unicodedata.bidirectional(label[0]) - if direction in ['R', 'AL']: - rtl = True - elif direction == 'L': - rtl = False - else: - raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label))) - - valid_ending = False - number_type = False - for (idx, cp) in enumerate(label, 1): - direction = unicodedata.bidirectional(cp) - - if rtl: - # Bidi rule 2 - if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: - raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx)) - # Bidi rule 3 - if direction in ['R', 'AL', 'EN', 'AN']: - valid_ending = True - elif direction != 'NSM': - valid_ending = False - # Bidi rule 4 - if direction in ['AN', 'EN']: - if not number_type: - number_type = direction - else: - if number_type != direction: - raise IDNABidiError('Can not mix numeral types in a right-to-left label') - else: - # Bidi rule 5 - if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: - raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx)) - # Bidi rule 6 - if direction in ['L', 'EN']: - valid_ending = True - elif direction != 'NSM': - valid_ending = False - - if not valid_ending: - raise IDNABidiError('Label ends with illegal codepoint directionality') - - return True - - -def check_initial_combiner(label): - - if unicodedata.category(label[0])[0] == 'M': - raise IDNAError('Label begins with an illegal combining character') - return True - - -def check_hyphen_ok(label): - - if label[2:4] == '--': - raise IDNAError('Label has disallowed hyphens in 3rd and 4th position') - if label[0] == '-' or label[-1] == '-': - raise IDNAError('Label must not start or end with a hyphen') - return True - - -def check_nfc(label): - - if unicodedata.normalize('NFC', label) != label: - raise IDNAError('Label must be in Normalization Form C') - - -def valid_contextj(label, pos): - - cp_value = ord(label[pos]) - - if cp_value == 0x200c: - - if pos > 0: - if _combining_class(ord(label[pos - 1])) == _virama_combining_class: - return True - - ok = False - for i in range(pos-1, -1, -1): - joining_type = idnadata.joining_types.get(ord(label[i])) - if joining_type == ord('T'): - continue - if joining_type in [ord('L'), ord('D')]: - ok = True - break - - if not ok: - return False - - ok = False - for i in range(pos+1, len(label)): - joining_type = idnadata.joining_types.get(ord(label[i])) - if joining_type == ord('T'): - continue - if joining_type in [ord('R'), ord('D')]: - ok = True - break - return ok - - if cp_value == 0x200d: - - if pos > 0: - if _combining_class(ord(label[pos - 1])) == _virama_combining_class: - return True - return False - - else: - - return False - - -def valid_contexto(label, pos, exception=False): - - cp_value = ord(label[pos]) - - if cp_value == 0x00b7: - if 0 < pos < len(label)-1: - if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c: - return True - return False - - elif cp_value == 0x0375: - if pos < len(label)-1 and len(label) > 1: - return _is_script(label[pos + 1], 'Greek') - return False - - elif cp_value == 0x05f3 or cp_value == 0x05f4: - if pos > 0: - return _is_script(label[pos - 1], 'Hebrew') - return False - - elif cp_value == 0x30fb: - for cp in label: - if cp == u'\u30fb': - continue - if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'): - return True - return False - - elif 0x660 <= cp_value <= 0x669: - for cp in label: - if 0x6f0 <= ord(cp) <= 0x06f9: - return False - return True - - elif 0x6f0 <= cp_value <= 0x6f9: - for cp in label: - if 0x660 <= ord(cp) <= 0x0669: - return False - return True - - -def check_label(label): - - if isinstance(label, (bytes, bytearray)): - label = label.decode('utf-8') - if len(label) == 0: - raise IDNAError('Empty Label') - - check_nfc(label) - check_hyphen_ok(label) - check_initial_combiner(label) - - for (pos, cp) in enumerate(label): - cp_value = ord(cp) - if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']): - continue - elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']): - try: - if not valid_contextj(label, pos): - raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format( - _unot(cp_value), pos+1, repr(label))) - except ValueError: - raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format( - _unot(cp_value), pos+1, repr(label))) - elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']): - if not valid_contexto(label, pos): - raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label))) - else: - raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label))) - - check_bidi(label) - - -def alabel(label): - - try: - label = label.encode('ascii') - ulabel(label) - if not valid_label_length(label): - raise IDNAError('Label too long') - return label - except UnicodeEncodeError: - pass - - if not label: - raise IDNAError('No Input') - - label = unicode(label) - check_label(label) - label = _punycode(label) - label = _alabel_prefix + label - - if not valid_label_length(label): - raise IDNAError('Label too long') - - return label - - -def ulabel(label): - - if not isinstance(label, (bytes, bytearray)): - try: - label = label.encode('ascii') - except UnicodeEncodeError: - check_label(label) - return label - - label = label.lower() - if label.startswith(_alabel_prefix): - label = label[len(_alabel_prefix):] - else: - check_label(label) - return label.decode('ascii') - - label = label.decode('punycode') - check_label(label) - return label - - -def uts46_remap(domain, std3_rules=True, transitional=False): - """Re-map the characters in the string according to UTS46 processing.""" - from .uts46data import uts46data - output = u"" - try: - for pos, char in enumerate(domain): - code_point = ord(char) - uts46row = uts46data[code_point if code_point < 256 else - bisect.bisect_left(uts46data, (code_point, "Z")) - 1] - status = uts46row[1] - replacement = uts46row[2] if len(uts46row) == 3 else None - if (status == "V" or - (status == "D" and not transitional) or - (status == "3" and not std3_rules and replacement is None)): - output += char - elif replacement is not None and (status == "M" or - (status == "3" and not std3_rules) or - (status == "D" and transitional)): - output += replacement - elif status != "I": - raise IndexError() - return unicodedata.normalize("NFC", output) - except IndexError: - raise InvalidCodepoint( - "Codepoint {0} not allowed at position {1} in {2}".format( - _unot(code_point), pos + 1, repr(domain))) - - -def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): - - if isinstance(s, (bytes, bytearray)): - s = s.decode("ascii") - if uts46: - s = uts46_remap(s, std3_rules, transitional) - trailing_dot = False - result = [] - if strict: - labels = s.split('.') - else: - labels = _unicode_dots_re.split(s) - if not labels or labels == ['']: - raise IDNAError('Empty domain') - if labels[-1] == '': - del labels[-1] - trailing_dot = True - for label in labels: - s = alabel(label) - if s: - result.append(s) - else: - raise IDNAError('Empty label') - if trailing_dot: - result.append(b'') - s = b'.'.join(result) - if not valid_string_length(s, trailing_dot): - raise IDNAError('Domain too long') - return s - - -def decode(s, strict=False, uts46=False, std3_rules=False): - - if isinstance(s, (bytes, bytearray)): - s = s.decode("ascii") - if uts46: - s = uts46_remap(s, std3_rules, False) - trailing_dot = False - result = [] - if not strict: - labels = _unicode_dots_re.split(s) - else: - labels = s.split(u'.') - if not labels or labels == ['']: - raise IDNAError('Empty domain') - if not labels[-1]: - del labels[-1] - trailing_dot = True - for label in labels: - s = ulabel(label) - if s: - result.append(s) - else: - raise IDNAError('Empty label') - if trailing_dot: - result.append(u'') - return u'.'.join(result) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/idnadata.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/idnadata.py deleted file mode 100644 index a80c959..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/idnadata.py +++ /dev/null @@ -1,1979 +0,0 @@ -# This file is automatically generated by tools/idna-data - -__version__ = "11.0.0" -scripts = { - 'Greek': ( - 0x37000000374, - 0x37500000378, - 0x37a0000037e, - 0x37f00000380, - 0x38400000385, - 0x38600000387, - 0x3880000038b, - 0x38c0000038d, - 0x38e000003a2, - 0x3a3000003e2, - 0x3f000000400, - 0x1d2600001d2b, - 0x1d5d00001d62, - 0x1d6600001d6b, - 0x1dbf00001dc0, - 0x1f0000001f16, - 0x1f1800001f1e, - 0x1f2000001f46, - 0x1f4800001f4e, - 0x1f5000001f58, - 0x1f5900001f5a, - 0x1f5b00001f5c, - 0x1f5d00001f5e, - 0x1f5f00001f7e, - 0x1f8000001fb5, - 0x1fb600001fc5, - 0x1fc600001fd4, - 0x1fd600001fdc, - 0x1fdd00001ff0, - 0x1ff200001ff5, - 0x1ff600001fff, - 0x212600002127, - 0xab650000ab66, - 0x101400001018f, - 0x101a0000101a1, - 0x1d2000001d246, - ), - 'Han': ( - 0x2e8000002e9a, - 0x2e9b00002ef4, - 0x2f0000002fd6, - 0x300500003006, - 0x300700003008, - 0x30210000302a, - 0x30380000303c, - 0x340000004db6, - 0x4e0000009ff0, - 0xf9000000fa6e, - 0xfa700000fada, - 0x200000002a6d7, - 0x2a7000002b735, - 0x2b7400002b81e, - 0x2b8200002cea2, - 0x2ceb00002ebe1, - 0x2f8000002fa1e, - ), - 'Hebrew': ( - 0x591000005c8, - 0x5d0000005eb, - 0x5ef000005f5, - 0xfb1d0000fb37, - 0xfb380000fb3d, - 0xfb3e0000fb3f, - 0xfb400000fb42, - 0xfb430000fb45, - 0xfb460000fb50, - ), - 'Hiragana': ( - 0x304100003097, - 0x309d000030a0, - 0x1b0010001b11f, - 0x1f2000001f201, - ), - 'Katakana': ( - 0x30a1000030fb, - 0x30fd00003100, - 0x31f000003200, - 0x32d0000032ff, - 0x330000003358, - 0xff660000ff70, - 0xff710000ff9e, - 0x1b0000001b001, - ), -} -joining_types = { - 0x600: 85, - 0x601: 85, - 0x602: 85, - 0x603: 85, - 0x604: 85, - 0x605: 85, - 0x608: 85, - 0x60b: 85, - 0x620: 68, - 0x621: 85, - 0x622: 82, - 0x623: 82, - 0x624: 82, - 0x625: 82, - 0x626: 68, - 0x627: 82, - 0x628: 68, - 0x629: 82, - 0x62a: 68, - 0x62b: 68, - 0x62c: 68, - 0x62d: 68, - 0x62e: 68, - 0x62f: 82, - 0x630: 82, - 0x631: 82, - 0x632: 82, - 0x633: 68, - 0x634: 68, - 0x635: 68, - 0x636: 68, - 0x637: 68, - 0x638: 68, - 0x639: 68, - 0x63a: 68, - 0x63b: 68, - 0x63c: 68, - 0x63d: 68, - 0x63e: 68, - 0x63f: 68, - 0x640: 67, - 0x641: 68, - 0x642: 68, - 0x643: 68, - 0x644: 68, - 0x645: 68, - 0x646: 68, - 0x647: 68, - 0x648: 82, - 0x649: 68, - 0x64a: 68, - 0x66e: 68, - 0x66f: 68, - 0x671: 82, - 0x672: 82, - 0x673: 82, - 0x674: 85, - 0x675: 82, - 0x676: 82, - 0x677: 82, - 0x678: 68, - 0x679: 68, - 0x67a: 68, - 0x67b: 68, - 0x67c: 68, - 0x67d: 68, - 0x67e: 68, - 0x67f: 68, - 0x680: 68, - 0x681: 68, - 0x682: 68, - 0x683: 68, - 0x684: 68, - 0x685: 68, - 0x686: 68, - 0x687: 68, - 0x688: 82, - 0x689: 82, - 0x68a: 82, - 0x68b: 82, - 0x68c: 82, - 0x68d: 82, - 0x68e: 82, - 0x68f: 82, - 0x690: 82, - 0x691: 82, - 0x692: 82, - 0x693: 82, - 0x694: 82, - 0x695: 82, - 0x696: 82, - 0x697: 82, - 0x698: 82, - 0x699: 82, - 0x69a: 68, - 0x69b: 68, - 0x69c: 68, - 0x69d: 68, - 0x69e: 68, - 0x69f: 68, - 0x6a0: 68, - 0x6a1: 68, - 0x6a2: 68, - 0x6a3: 68, - 0x6a4: 68, - 0x6a5: 68, - 0x6a6: 68, - 0x6a7: 68, - 0x6a8: 68, - 0x6a9: 68, - 0x6aa: 68, - 0x6ab: 68, - 0x6ac: 68, - 0x6ad: 68, - 0x6ae: 68, - 0x6af: 68, - 0x6b0: 68, - 0x6b1: 68, - 0x6b2: 68, - 0x6b3: 68, - 0x6b4: 68, - 0x6b5: 68, - 0x6b6: 68, - 0x6b7: 68, - 0x6b8: 68, - 0x6b9: 68, - 0x6ba: 68, - 0x6bb: 68, - 0x6bc: 68, - 0x6bd: 68, - 0x6be: 68, - 0x6bf: 68, - 0x6c0: 82, - 0x6c1: 68, - 0x6c2: 68, - 0x6c3: 82, - 0x6c4: 82, - 0x6c5: 82, - 0x6c6: 82, - 0x6c7: 82, - 0x6c8: 82, - 0x6c9: 82, - 0x6ca: 82, - 0x6cb: 82, - 0x6cc: 68, - 0x6cd: 82, - 0x6ce: 68, - 0x6cf: 82, - 0x6d0: 68, - 0x6d1: 68, - 0x6d2: 82, - 0x6d3: 82, - 0x6d5: 82, - 0x6dd: 85, - 0x6ee: 82, - 0x6ef: 82, - 0x6fa: 68, - 0x6fb: 68, - 0x6fc: 68, - 0x6ff: 68, - 0x70f: 84, - 0x710: 82, - 0x712: 68, - 0x713: 68, - 0x714: 68, - 0x715: 82, - 0x716: 82, - 0x717: 82, - 0x718: 82, - 0x719: 82, - 0x71a: 68, - 0x71b: 68, - 0x71c: 68, - 0x71d: 68, - 0x71e: 82, - 0x71f: 68, - 0x720: 68, - 0x721: 68, - 0x722: 68, - 0x723: 68, - 0x724: 68, - 0x725: 68, - 0x726: 68, - 0x727: 68, - 0x728: 82, - 0x729: 68, - 0x72a: 82, - 0x72b: 68, - 0x72c: 82, - 0x72d: 68, - 0x72e: 68, - 0x72f: 82, - 0x74d: 82, - 0x74e: 68, - 0x74f: 68, - 0x750: 68, - 0x751: 68, - 0x752: 68, - 0x753: 68, - 0x754: 68, - 0x755: 68, - 0x756: 68, - 0x757: 68, - 0x758: 68, - 0x759: 82, - 0x75a: 82, - 0x75b: 82, - 0x75c: 68, - 0x75d: 68, - 0x75e: 68, - 0x75f: 68, - 0x760: 68, - 0x761: 68, - 0x762: 68, - 0x763: 68, - 0x764: 68, - 0x765: 68, - 0x766: 68, - 0x767: 68, - 0x768: 68, - 0x769: 68, - 0x76a: 68, - 0x76b: 82, - 0x76c: 82, - 0x76d: 68, - 0x76e: 68, - 0x76f: 68, - 0x770: 68, - 0x771: 82, - 0x772: 68, - 0x773: 82, - 0x774: 82, - 0x775: 68, - 0x776: 68, - 0x777: 68, - 0x778: 82, - 0x779: 82, - 0x77a: 68, - 0x77b: 68, - 0x77c: 68, - 0x77d: 68, - 0x77e: 68, - 0x77f: 68, - 0x7ca: 68, - 0x7cb: 68, - 0x7cc: 68, - 0x7cd: 68, - 0x7ce: 68, - 0x7cf: 68, - 0x7d0: 68, - 0x7d1: 68, - 0x7d2: 68, - 0x7d3: 68, - 0x7d4: 68, - 0x7d5: 68, - 0x7d6: 68, - 0x7d7: 68, - 0x7d8: 68, - 0x7d9: 68, - 0x7da: 68, - 0x7db: 68, - 0x7dc: 68, - 0x7dd: 68, - 0x7de: 68, - 0x7df: 68, - 0x7e0: 68, - 0x7e1: 68, - 0x7e2: 68, - 0x7e3: 68, - 0x7e4: 68, - 0x7e5: 68, - 0x7e6: 68, - 0x7e7: 68, - 0x7e8: 68, - 0x7e9: 68, - 0x7ea: 68, - 0x7fa: 67, - 0x840: 82, - 0x841: 68, - 0x842: 68, - 0x843: 68, - 0x844: 68, - 0x845: 68, - 0x846: 82, - 0x847: 82, - 0x848: 68, - 0x849: 82, - 0x84a: 68, - 0x84b: 68, - 0x84c: 68, - 0x84d: 68, - 0x84e: 68, - 0x84f: 68, - 0x850: 68, - 0x851: 68, - 0x852: 68, - 0x853: 68, - 0x854: 82, - 0x855: 68, - 0x856: 85, - 0x857: 85, - 0x858: 85, - 0x860: 68, - 0x861: 85, - 0x862: 68, - 0x863: 68, - 0x864: 68, - 0x865: 68, - 0x866: 85, - 0x867: 82, - 0x868: 68, - 0x869: 82, - 0x86a: 82, - 0x8a0: 68, - 0x8a1: 68, - 0x8a2: 68, - 0x8a3: 68, - 0x8a4: 68, - 0x8a5: 68, - 0x8a6: 68, - 0x8a7: 68, - 0x8a8: 68, - 0x8a9: 68, - 0x8aa: 82, - 0x8ab: 82, - 0x8ac: 82, - 0x8ad: 85, - 0x8ae: 82, - 0x8af: 68, - 0x8b0: 68, - 0x8b1: 82, - 0x8b2: 82, - 0x8b3: 68, - 0x8b4: 68, - 0x8b6: 68, - 0x8b7: 68, - 0x8b8: 68, - 0x8b9: 82, - 0x8ba: 68, - 0x8bb: 68, - 0x8bc: 68, - 0x8bd: 68, - 0x8e2: 85, - 0x1806: 85, - 0x1807: 68, - 0x180a: 67, - 0x180e: 85, - 0x1820: 68, - 0x1821: 68, - 0x1822: 68, - 0x1823: 68, - 0x1824: 68, - 0x1825: 68, - 0x1826: 68, - 0x1827: 68, - 0x1828: 68, - 0x1829: 68, - 0x182a: 68, - 0x182b: 68, - 0x182c: 68, - 0x182d: 68, - 0x182e: 68, - 0x182f: 68, - 0x1830: 68, - 0x1831: 68, - 0x1832: 68, - 0x1833: 68, - 0x1834: 68, - 0x1835: 68, - 0x1836: 68, - 0x1837: 68, - 0x1838: 68, - 0x1839: 68, - 0x183a: 68, - 0x183b: 68, - 0x183c: 68, - 0x183d: 68, - 0x183e: 68, - 0x183f: 68, - 0x1840: 68, - 0x1841: 68, - 0x1842: 68, - 0x1843: 68, - 0x1844: 68, - 0x1845: 68, - 0x1846: 68, - 0x1847: 68, - 0x1848: 68, - 0x1849: 68, - 0x184a: 68, - 0x184b: 68, - 0x184c: 68, - 0x184d: 68, - 0x184e: 68, - 0x184f: 68, - 0x1850: 68, - 0x1851: 68, - 0x1852: 68, - 0x1853: 68, - 0x1854: 68, - 0x1855: 68, - 0x1856: 68, - 0x1857: 68, - 0x1858: 68, - 0x1859: 68, - 0x185a: 68, - 0x185b: 68, - 0x185c: 68, - 0x185d: 68, - 0x185e: 68, - 0x185f: 68, - 0x1860: 68, - 0x1861: 68, - 0x1862: 68, - 0x1863: 68, - 0x1864: 68, - 0x1865: 68, - 0x1866: 68, - 0x1867: 68, - 0x1868: 68, - 0x1869: 68, - 0x186a: 68, - 0x186b: 68, - 0x186c: 68, - 0x186d: 68, - 0x186e: 68, - 0x186f: 68, - 0x1870: 68, - 0x1871: 68, - 0x1872: 68, - 0x1873: 68, - 0x1874: 68, - 0x1875: 68, - 0x1876: 68, - 0x1877: 68, - 0x1878: 68, - 0x1880: 85, - 0x1881: 85, - 0x1882: 85, - 0x1883: 85, - 0x1884: 85, - 0x1885: 84, - 0x1886: 84, - 0x1887: 68, - 0x1888: 68, - 0x1889: 68, - 0x188a: 68, - 0x188b: 68, - 0x188c: 68, - 0x188d: 68, - 0x188e: 68, - 0x188f: 68, - 0x1890: 68, - 0x1891: 68, - 0x1892: 68, - 0x1893: 68, - 0x1894: 68, - 0x1895: 68, - 0x1896: 68, - 0x1897: 68, - 0x1898: 68, - 0x1899: 68, - 0x189a: 68, - 0x189b: 68, - 0x189c: 68, - 0x189d: 68, - 0x189e: 68, - 0x189f: 68, - 0x18a0: 68, - 0x18a1: 68, - 0x18a2: 68, - 0x18a3: 68, - 0x18a4: 68, - 0x18a5: 68, - 0x18a6: 68, - 0x18a7: 68, - 0x18a8: 68, - 0x18aa: 68, - 0x200c: 85, - 0x200d: 67, - 0x202f: 85, - 0x2066: 85, - 0x2067: 85, - 0x2068: 85, - 0x2069: 85, - 0xa840: 68, - 0xa841: 68, - 0xa842: 68, - 0xa843: 68, - 0xa844: 68, - 0xa845: 68, - 0xa846: 68, - 0xa847: 68, - 0xa848: 68, - 0xa849: 68, - 0xa84a: 68, - 0xa84b: 68, - 0xa84c: 68, - 0xa84d: 68, - 0xa84e: 68, - 0xa84f: 68, - 0xa850: 68, - 0xa851: 68, - 0xa852: 68, - 0xa853: 68, - 0xa854: 68, - 0xa855: 68, - 0xa856: 68, - 0xa857: 68, - 0xa858: 68, - 0xa859: 68, - 0xa85a: 68, - 0xa85b: 68, - 0xa85c: 68, - 0xa85d: 68, - 0xa85e: 68, - 0xa85f: 68, - 0xa860: 68, - 0xa861: 68, - 0xa862: 68, - 0xa863: 68, - 0xa864: 68, - 0xa865: 68, - 0xa866: 68, - 0xa867: 68, - 0xa868: 68, - 0xa869: 68, - 0xa86a: 68, - 0xa86b: 68, - 0xa86c: 68, - 0xa86d: 68, - 0xa86e: 68, - 0xa86f: 68, - 0xa870: 68, - 0xa871: 68, - 0xa872: 76, - 0xa873: 85, - 0x10ac0: 68, - 0x10ac1: 68, - 0x10ac2: 68, - 0x10ac3: 68, - 0x10ac4: 68, - 0x10ac5: 82, - 0x10ac6: 85, - 0x10ac7: 82, - 0x10ac8: 85, - 0x10ac9: 82, - 0x10aca: 82, - 0x10acb: 85, - 0x10acc: 85, - 0x10acd: 76, - 0x10ace: 82, - 0x10acf: 82, - 0x10ad0: 82, - 0x10ad1: 82, - 0x10ad2: 82, - 0x10ad3: 68, - 0x10ad4: 68, - 0x10ad5: 68, - 0x10ad6: 68, - 0x10ad7: 76, - 0x10ad8: 68, - 0x10ad9: 68, - 0x10ada: 68, - 0x10adb: 68, - 0x10adc: 68, - 0x10add: 82, - 0x10ade: 68, - 0x10adf: 68, - 0x10ae0: 68, - 0x10ae1: 82, - 0x10ae2: 85, - 0x10ae3: 85, - 0x10ae4: 82, - 0x10aeb: 68, - 0x10aec: 68, - 0x10aed: 68, - 0x10aee: 68, - 0x10aef: 82, - 0x10b80: 68, - 0x10b81: 82, - 0x10b82: 68, - 0x10b83: 82, - 0x10b84: 82, - 0x10b85: 82, - 0x10b86: 68, - 0x10b87: 68, - 0x10b88: 68, - 0x10b89: 82, - 0x10b8a: 68, - 0x10b8b: 68, - 0x10b8c: 82, - 0x10b8d: 68, - 0x10b8e: 82, - 0x10b8f: 82, - 0x10b90: 68, - 0x10b91: 82, - 0x10ba9: 82, - 0x10baa: 82, - 0x10bab: 82, - 0x10bac: 82, - 0x10bad: 68, - 0x10bae: 68, - 0x10baf: 85, - 0x10d00: 76, - 0x10d01: 68, - 0x10d02: 68, - 0x10d03: 68, - 0x10d04: 68, - 0x10d05: 68, - 0x10d06: 68, - 0x10d07: 68, - 0x10d08: 68, - 0x10d09: 68, - 0x10d0a: 68, - 0x10d0b: 68, - 0x10d0c: 68, - 0x10d0d: 68, - 0x10d0e: 68, - 0x10d0f: 68, - 0x10d10: 68, - 0x10d11: 68, - 0x10d12: 68, - 0x10d13: 68, - 0x10d14: 68, - 0x10d15: 68, - 0x10d16: 68, - 0x10d17: 68, - 0x10d18: 68, - 0x10d19: 68, - 0x10d1a: 68, - 0x10d1b: 68, - 0x10d1c: 68, - 0x10d1d: 68, - 0x10d1e: 68, - 0x10d1f: 68, - 0x10d20: 68, - 0x10d21: 68, - 0x10d22: 82, - 0x10d23: 68, - 0x10f30: 68, - 0x10f31: 68, - 0x10f32: 68, - 0x10f33: 82, - 0x10f34: 68, - 0x10f35: 68, - 0x10f36: 68, - 0x10f37: 68, - 0x10f38: 68, - 0x10f39: 68, - 0x10f3a: 68, - 0x10f3b: 68, - 0x10f3c: 68, - 0x10f3d: 68, - 0x10f3e: 68, - 0x10f3f: 68, - 0x10f40: 68, - 0x10f41: 68, - 0x10f42: 68, - 0x10f43: 68, - 0x10f44: 68, - 0x10f45: 85, - 0x10f51: 68, - 0x10f52: 68, - 0x10f53: 68, - 0x10f54: 82, - 0x110bd: 85, - 0x110cd: 85, - 0x1e900: 68, - 0x1e901: 68, - 0x1e902: 68, - 0x1e903: 68, - 0x1e904: 68, - 0x1e905: 68, - 0x1e906: 68, - 0x1e907: 68, - 0x1e908: 68, - 0x1e909: 68, - 0x1e90a: 68, - 0x1e90b: 68, - 0x1e90c: 68, - 0x1e90d: 68, - 0x1e90e: 68, - 0x1e90f: 68, - 0x1e910: 68, - 0x1e911: 68, - 0x1e912: 68, - 0x1e913: 68, - 0x1e914: 68, - 0x1e915: 68, - 0x1e916: 68, - 0x1e917: 68, - 0x1e918: 68, - 0x1e919: 68, - 0x1e91a: 68, - 0x1e91b: 68, - 0x1e91c: 68, - 0x1e91d: 68, - 0x1e91e: 68, - 0x1e91f: 68, - 0x1e920: 68, - 0x1e921: 68, - 0x1e922: 68, - 0x1e923: 68, - 0x1e924: 68, - 0x1e925: 68, - 0x1e926: 68, - 0x1e927: 68, - 0x1e928: 68, - 0x1e929: 68, - 0x1e92a: 68, - 0x1e92b: 68, - 0x1e92c: 68, - 0x1e92d: 68, - 0x1e92e: 68, - 0x1e92f: 68, - 0x1e930: 68, - 0x1e931: 68, - 0x1e932: 68, - 0x1e933: 68, - 0x1e934: 68, - 0x1e935: 68, - 0x1e936: 68, - 0x1e937: 68, - 0x1e938: 68, - 0x1e939: 68, - 0x1e93a: 68, - 0x1e93b: 68, - 0x1e93c: 68, - 0x1e93d: 68, - 0x1e93e: 68, - 0x1e93f: 68, - 0x1e940: 68, - 0x1e941: 68, - 0x1e942: 68, - 0x1e943: 68, -} -codepoint_classes = { - 'PVALID': ( - 0x2d0000002e, - 0x300000003a, - 0x610000007b, - 0xdf000000f7, - 0xf800000100, - 0x10100000102, - 0x10300000104, - 0x10500000106, - 0x10700000108, - 0x1090000010a, - 0x10b0000010c, - 0x10d0000010e, - 0x10f00000110, - 0x11100000112, - 0x11300000114, - 0x11500000116, - 0x11700000118, - 0x1190000011a, - 0x11b0000011c, - 0x11d0000011e, - 0x11f00000120, - 0x12100000122, - 0x12300000124, - 0x12500000126, - 0x12700000128, - 0x1290000012a, - 0x12b0000012c, - 0x12d0000012e, - 0x12f00000130, - 0x13100000132, - 0x13500000136, - 0x13700000139, - 0x13a0000013b, - 0x13c0000013d, - 0x13e0000013f, - 0x14200000143, - 0x14400000145, - 0x14600000147, - 0x14800000149, - 0x14b0000014c, - 0x14d0000014e, - 0x14f00000150, - 0x15100000152, - 0x15300000154, - 0x15500000156, - 0x15700000158, - 0x1590000015a, - 0x15b0000015c, - 0x15d0000015e, - 0x15f00000160, - 0x16100000162, - 0x16300000164, - 0x16500000166, - 0x16700000168, - 0x1690000016a, - 0x16b0000016c, - 0x16d0000016e, - 0x16f00000170, - 0x17100000172, - 0x17300000174, - 0x17500000176, - 0x17700000178, - 0x17a0000017b, - 0x17c0000017d, - 0x17e0000017f, - 0x18000000181, - 0x18300000184, - 0x18500000186, - 0x18800000189, - 0x18c0000018e, - 0x19200000193, - 0x19500000196, - 0x1990000019c, - 0x19e0000019f, - 0x1a1000001a2, - 0x1a3000001a4, - 0x1a5000001a6, - 0x1a8000001a9, - 0x1aa000001ac, - 0x1ad000001ae, - 0x1b0000001b1, - 0x1b4000001b5, - 0x1b6000001b7, - 0x1b9000001bc, - 0x1bd000001c4, - 0x1ce000001cf, - 0x1d0000001d1, - 0x1d2000001d3, - 0x1d4000001d5, - 0x1d6000001d7, - 0x1d8000001d9, - 0x1da000001db, - 0x1dc000001de, - 0x1df000001e0, - 0x1e1000001e2, - 0x1e3000001e4, - 0x1e5000001e6, - 0x1e7000001e8, - 0x1e9000001ea, - 0x1eb000001ec, - 0x1ed000001ee, - 0x1ef000001f1, - 0x1f5000001f6, - 0x1f9000001fa, - 0x1fb000001fc, - 0x1fd000001fe, - 0x1ff00000200, - 0x20100000202, - 0x20300000204, - 0x20500000206, - 0x20700000208, - 0x2090000020a, - 0x20b0000020c, - 0x20d0000020e, - 0x20f00000210, - 0x21100000212, - 0x21300000214, - 0x21500000216, - 0x21700000218, - 0x2190000021a, - 0x21b0000021c, - 0x21d0000021e, - 0x21f00000220, - 0x22100000222, - 0x22300000224, - 0x22500000226, - 0x22700000228, - 0x2290000022a, - 0x22b0000022c, - 0x22d0000022e, - 0x22f00000230, - 0x23100000232, - 0x2330000023a, - 0x23c0000023d, - 0x23f00000241, - 0x24200000243, - 0x24700000248, - 0x2490000024a, - 0x24b0000024c, - 0x24d0000024e, - 0x24f000002b0, - 0x2b9000002c2, - 0x2c6000002d2, - 0x2ec000002ed, - 0x2ee000002ef, - 0x30000000340, - 0x34200000343, - 0x3460000034f, - 0x35000000370, - 0x37100000372, - 0x37300000374, - 0x37700000378, - 0x37b0000037e, - 0x39000000391, - 0x3ac000003cf, - 0x3d7000003d8, - 0x3d9000003da, - 0x3db000003dc, - 0x3dd000003de, - 0x3df000003e0, - 0x3e1000003e2, - 0x3e3000003e4, - 0x3e5000003e6, - 0x3e7000003e8, - 0x3e9000003ea, - 0x3eb000003ec, - 0x3ed000003ee, - 0x3ef000003f0, - 0x3f3000003f4, - 0x3f8000003f9, - 0x3fb000003fd, - 0x43000000460, - 0x46100000462, - 0x46300000464, - 0x46500000466, - 0x46700000468, - 0x4690000046a, - 0x46b0000046c, - 0x46d0000046e, - 0x46f00000470, - 0x47100000472, - 0x47300000474, - 0x47500000476, - 0x47700000478, - 0x4790000047a, - 0x47b0000047c, - 0x47d0000047e, - 0x47f00000480, - 0x48100000482, - 0x48300000488, - 0x48b0000048c, - 0x48d0000048e, - 0x48f00000490, - 0x49100000492, - 0x49300000494, - 0x49500000496, - 0x49700000498, - 0x4990000049a, - 0x49b0000049c, - 0x49d0000049e, - 0x49f000004a0, - 0x4a1000004a2, - 0x4a3000004a4, - 0x4a5000004a6, - 0x4a7000004a8, - 0x4a9000004aa, - 0x4ab000004ac, - 0x4ad000004ae, - 0x4af000004b0, - 0x4b1000004b2, - 0x4b3000004b4, - 0x4b5000004b6, - 0x4b7000004b8, - 0x4b9000004ba, - 0x4bb000004bc, - 0x4bd000004be, - 0x4bf000004c0, - 0x4c2000004c3, - 0x4c4000004c5, - 0x4c6000004c7, - 0x4c8000004c9, - 0x4ca000004cb, - 0x4cc000004cd, - 0x4ce000004d0, - 0x4d1000004d2, - 0x4d3000004d4, - 0x4d5000004d6, - 0x4d7000004d8, - 0x4d9000004da, - 0x4db000004dc, - 0x4dd000004de, - 0x4df000004e0, - 0x4e1000004e2, - 0x4e3000004e4, - 0x4e5000004e6, - 0x4e7000004e8, - 0x4e9000004ea, - 0x4eb000004ec, - 0x4ed000004ee, - 0x4ef000004f0, - 0x4f1000004f2, - 0x4f3000004f4, - 0x4f5000004f6, - 0x4f7000004f8, - 0x4f9000004fa, - 0x4fb000004fc, - 0x4fd000004fe, - 0x4ff00000500, - 0x50100000502, - 0x50300000504, - 0x50500000506, - 0x50700000508, - 0x5090000050a, - 0x50b0000050c, - 0x50d0000050e, - 0x50f00000510, - 0x51100000512, - 0x51300000514, - 0x51500000516, - 0x51700000518, - 0x5190000051a, - 0x51b0000051c, - 0x51d0000051e, - 0x51f00000520, - 0x52100000522, - 0x52300000524, - 0x52500000526, - 0x52700000528, - 0x5290000052a, - 0x52b0000052c, - 0x52d0000052e, - 0x52f00000530, - 0x5590000055a, - 0x56000000587, - 0x58800000589, - 0x591000005be, - 0x5bf000005c0, - 0x5c1000005c3, - 0x5c4000005c6, - 0x5c7000005c8, - 0x5d0000005eb, - 0x5ef000005f3, - 0x6100000061b, - 0x62000000640, - 0x64100000660, - 0x66e00000675, - 0x679000006d4, - 0x6d5000006dd, - 0x6df000006e9, - 0x6ea000006f0, - 0x6fa00000700, - 0x7100000074b, - 0x74d000007b2, - 0x7c0000007f6, - 0x7fd000007fe, - 0x8000000082e, - 0x8400000085c, - 0x8600000086b, - 0x8a0000008b5, - 0x8b6000008be, - 0x8d3000008e2, - 0x8e300000958, - 0x96000000964, - 0x96600000970, - 0x97100000984, - 0x9850000098d, - 0x98f00000991, - 0x993000009a9, - 0x9aa000009b1, - 0x9b2000009b3, - 0x9b6000009ba, - 0x9bc000009c5, - 0x9c7000009c9, - 0x9cb000009cf, - 0x9d7000009d8, - 0x9e0000009e4, - 0x9e6000009f2, - 0x9fc000009fd, - 0x9fe000009ff, - 0xa0100000a04, - 0xa0500000a0b, - 0xa0f00000a11, - 0xa1300000a29, - 0xa2a00000a31, - 0xa3200000a33, - 0xa3500000a36, - 0xa3800000a3a, - 0xa3c00000a3d, - 0xa3e00000a43, - 0xa4700000a49, - 0xa4b00000a4e, - 0xa5100000a52, - 0xa5c00000a5d, - 0xa6600000a76, - 0xa8100000a84, - 0xa8500000a8e, - 0xa8f00000a92, - 0xa9300000aa9, - 0xaaa00000ab1, - 0xab200000ab4, - 0xab500000aba, - 0xabc00000ac6, - 0xac700000aca, - 0xacb00000ace, - 0xad000000ad1, - 0xae000000ae4, - 0xae600000af0, - 0xaf900000b00, - 0xb0100000b04, - 0xb0500000b0d, - 0xb0f00000b11, - 0xb1300000b29, - 0xb2a00000b31, - 0xb3200000b34, - 0xb3500000b3a, - 0xb3c00000b45, - 0xb4700000b49, - 0xb4b00000b4e, - 0xb5600000b58, - 0xb5f00000b64, - 0xb6600000b70, - 0xb7100000b72, - 0xb8200000b84, - 0xb8500000b8b, - 0xb8e00000b91, - 0xb9200000b96, - 0xb9900000b9b, - 0xb9c00000b9d, - 0xb9e00000ba0, - 0xba300000ba5, - 0xba800000bab, - 0xbae00000bba, - 0xbbe00000bc3, - 0xbc600000bc9, - 0xbca00000bce, - 0xbd000000bd1, - 0xbd700000bd8, - 0xbe600000bf0, - 0xc0000000c0d, - 0xc0e00000c11, - 0xc1200000c29, - 0xc2a00000c3a, - 0xc3d00000c45, - 0xc4600000c49, - 0xc4a00000c4e, - 0xc5500000c57, - 0xc5800000c5b, - 0xc6000000c64, - 0xc6600000c70, - 0xc8000000c84, - 0xc8500000c8d, - 0xc8e00000c91, - 0xc9200000ca9, - 0xcaa00000cb4, - 0xcb500000cba, - 0xcbc00000cc5, - 0xcc600000cc9, - 0xcca00000cce, - 0xcd500000cd7, - 0xcde00000cdf, - 0xce000000ce4, - 0xce600000cf0, - 0xcf100000cf3, - 0xd0000000d04, - 0xd0500000d0d, - 0xd0e00000d11, - 0xd1200000d45, - 0xd4600000d49, - 0xd4a00000d4f, - 0xd5400000d58, - 0xd5f00000d64, - 0xd6600000d70, - 0xd7a00000d80, - 0xd8200000d84, - 0xd8500000d97, - 0xd9a00000db2, - 0xdb300000dbc, - 0xdbd00000dbe, - 0xdc000000dc7, - 0xdca00000dcb, - 0xdcf00000dd5, - 0xdd600000dd7, - 0xdd800000de0, - 0xde600000df0, - 0xdf200000df4, - 0xe0100000e33, - 0xe3400000e3b, - 0xe4000000e4f, - 0xe5000000e5a, - 0xe8100000e83, - 0xe8400000e85, - 0xe8700000e89, - 0xe8a00000e8b, - 0xe8d00000e8e, - 0xe9400000e98, - 0xe9900000ea0, - 0xea100000ea4, - 0xea500000ea6, - 0xea700000ea8, - 0xeaa00000eac, - 0xead00000eb3, - 0xeb400000eba, - 0xebb00000ebe, - 0xec000000ec5, - 0xec600000ec7, - 0xec800000ece, - 0xed000000eda, - 0xede00000ee0, - 0xf0000000f01, - 0xf0b00000f0c, - 0xf1800000f1a, - 0xf2000000f2a, - 0xf3500000f36, - 0xf3700000f38, - 0xf3900000f3a, - 0xf3e00000f43, - 0xf4400000f48, - 0xf4900000f4d, - 0xf4e00000f52, - 0xf5300000f57, - 0xf5800000f5c, - 0xf5d00000f69, - 0xf6a00000f6d, - 0xf7100000f73, - 0xf7400000f75, - 0xf7a00000f81, - 0xf8200000f85, - 0xf8600000f93, - 0xf9400000f98, - 0xf9900000f9d, - 0xf9e00000fa2, - 0xfa300000fa7, - 0xfa800000fac, - 0xfad00000fb9, - 0xfba00000fbd, - 0xfc600000fc7, - 0x10000000104a, - 0x10500000109e, - 0x10d0000010fb, - 0x10fd00001100, - 0x120000001249, - 0x124a0000124e, - 0x125000001257, - 0x125800001259, - 0x125a0000125e, - 0x126000001289, - 0x128a0000128e, - 0x1290000012b1, - 0x12b2000012b6, - 0x12b8000012bf, - 0x12c0000012c1, - 0x12c2000012c6, - 0x12c8000012d7, - 0x12d800001311, - 0x131200001316, - 0x13180000135b, - 0x135d00001360, - 0x138000001390, - 0x13a0000013f6, - 0x14010000166d, - 0x166f00001680, - 0x16810000169b, - 0x16a0000016eb, - 0x16f1000016f9, - 0x17000000170d, - 0x170e00001715, - 0x172000001735, - 0x174000001754, - 0x17600000176d, - 0x176e00001771, - 0x177200001774, - 0x1780000017b4, - 0x17b6000017d4, - 0x17d7000017d8, - 0x17dc000017de, - 0x17e0000017ea, - 0x18100000181a, - 0x182000001879, - 0x1880000018ab, - 0x18b0000018f6, - 0x19000000191f, - 0x19200000192c, - 0x19300000193c, - 0x19460000196e, - 0x197000001975, - 0x1980000019ac, - 0x19b0000019ca, - 0x19d0000019da, - 0x1a0000001a1c, - 0x1a2000001a5f, - 0x1a6000001a7d, - 0x1a7f00001a8a, - 0x1a9000001a9a, - 0x1aa700001aa8, - 0x1ab000001abe, - 0x1b0000001b4c, - 0x1b5000001b5a, - 0x1b6b00001b74, - 0x1b8000001bf4, - 0x1c0000001c38, - 0x1c4000001c4a, - 0x1c4d00001c7e, - 0x1cd000001cd3, - 0x1cd400001cfa, - 0x1d0000001d2c, - 0x1d2f00001d30, - 0x1d3b00001d3c, - 0x1d4e00001d4f, - 0x1d6b00001d78, - 0x1d7900001d9b, - 0x1dc000001dfa, - 0x1dfb00001e00, - 0x1e0100001e02, - 0x1e0300001e04, - 0x1e0500001e06, - 0x1e0700001e08, - 0x1e0900001e0a, - 0x1e0b00001e0c, - 0x1e0d00001e0e, - 0x1e0f00001e10, - 0x1e1100001e12, - 0x1e1300001e14, - 0x1e1500001e16, - 0x1e1700001e18, - 0x1e1900001e1a, - 0x1e1b00001e1c, - 0x1e1d00001e1e, - 0x1e1f00001e20, - 0x1e2100001e22, - 0x1e2300001e24, - 0x1e2500001e26, - 0x1e2700001e28, - 0x1e2900001e2a, - 0x1e2b00001e2c, - 0x1e2d00001e2e, - 0x1e2f00001e30, - 0x1e3100001e32, - 0x1e3300001e34, - 0x1e3500001e36, - 0x1e3700001e38, - 0x1e3900001e3a, - 0x1e3b00001e3c, - 0x1e3d00001e3e, - 0x1e3f00001e40, - 0x1e4100001e42, - 0x1e4300001e44, - 0x1e4500001e46, - 0x1e4700001e48, - 0x1e4900001e4a, - 0x1e4b00001e4c, - 0x1e4d00001e4e, - 0x1e4f00001e50, - 0x1e5100001e52, - 0x1e5300001e54, - 0x1e5500001e56, - 0x1e5700001e58, - 0x1e5900001e5a, - 0x1e5b00001e5c, - 0x1e5d00001e5e, - 0x1e5f00001e60, - 0x1e6100001e62, - 0x1e6300001e64, - 0x1e6500001e66, - 0x1e6700001e68, - 0x1e6900001e6a, - 0x1e6b00001e6c, - 0x1e6d00001e6e, - 0x1e6f00001e70, - 0x1e7100001e72, - 0x1e7300001e74, - 0x1e7500001e76, - 0x1e7700001e78, - 0x1e7900001e7a, - 0x1e7b00001e7c, - 0x1e7d00001e7e, - 0x1e7f00001e80, - 0x1e8100001e82, - 0x1e8300001e84, - 0x1e8500001e86, - 0x1e8700001e88, - 0x1e8900001e8a, - 0x1e8b00001e8c, - 0x1e8d00001e8e, - 0x1e8f00001e90, - 0x1e9100001e92, - 0x1e9300001e94, - 0x1e9500001e9a, - 0x1e9c00001e9e, - 0x1e9f00001ea0, - 0x1ea100001ea2, - 0x1ea300001ea4, - 0x1ea500001ea6, - 0x1ea700001ea8, - 0x1ea900001eaa, - 0x1eab00001eac, - 0x1ead00001eae, - 0x1eaf00001eb0, - 0x1eb100001eb2, - 0x1eb300001eb4, - 0x1eb500001eb6, - 0x1eb700001eb8, - 0x1eb900001eba, - 0x1ebb00001ebc, - 0x1ebd00001ebe, - 0x1ebf00001ec0, - 0x1ec100001ec2, - 0x1ec300001ec4, - 0x1ec500001ec6, - 0x1ec700001ec8, - 0x1ec900001eca, - 0x1ecb00001ecc, - 0x1ecd00001ece, - 0x1ecf00001ed0, - 0x1ed100001ed2, - 0x1ed300001ed4, - 0x1ed500001ed6, - 0x1ed700001ed8, - 0x1ed900001eda, - 0x1edb00001edc, - 0x1edd00001ede, - 0x1edf00001ee0, - 0x1ee100001ee2, - 0x1ee300001ee4, - 0x1ee500001ee6, - 0x1ee700001ee8, - 0x1ee900001eea, - 0x1eeb00001eec, - 0x1eed00001eee, - 0x1eef00001ef0, - 0x1ef100001ef2, - 0x1ef300001ef4, - 0x1ef500001ef6, - 0x1ef700001ef8, - 0x1ef900001efa, - 0x1efb00001efc, - 0x1efd00001efe, - 0x1eff00001f08, - 0x1f1000001f16, - 0x1f2000001f28, - 0x1f3000001f38, - 0x1f4000001f46, - 0x1f5000001f58, - 0x1f6000001f68, - 0x1f7000001f71, - 0x1f7200001f73, - 0x1f7400001f75, - 0x1f7600001f77, - 0x1f7800001f79, - 0x1f7a00001f7b, - 0x1f7c00001f7d, - 0x1fb000001fb2, - 0x1fb600001fb7, - 0x1fc600001fc7, - 0x1fd000001fd3, - 0x1fd600001fd8, - 0x1fe000001fe3, - 0x1fe400001fe8, - 0x1ff600001ff7, - 0x214e0000214f, - 0x218400002185, - 0x2c3000002c5f, - 0x2c6100002c62, - 0x2c6500002c67, - 0x2c6800002c69, - 0x2c6a00002c6b, - 0x2c6c00002c6d, - 0x2c7100002c72, - 0x2c7300002c75, - 0x2c7600002c7c, - 0x2c8100002c82, - 0x2c8300002c84, - 0x2c8500002c86, - 0x2c8700002c88, - 0x2c8900002c8a, - 0x2c8b00002c8c, - 0x2c8d00002c8e, - 0x2c8f00002c90, - 0x2c9100002c92, - 0x2c9300002c94, - 0x2c9500002c96, - 0x2c9700002c98, - 0x2c9900002c9a, - 0x2c9b00002c9c, - 0x2c9d00002c9e, - 0x2c9f00002ca0, - 0x2ca100002ca2, - 0x2ca300002ca4, - 0x2ca500002ca6, - 0x2ca700002ca8, - 0x2ca900002caa, - 0x2cab00002cac, - 0x2cad00002cae, - 0x2caf00002cb0, - 0x2cb100002cb2, - 0x2cb300002cb4, - 0x2cb500002cb6, - 0x2cb700002cb8, - 0x2cb900002cba, - 0x2cbb00002cbc, - 0x2cbd00002cbe, - 0x2cbf00002cc0, - 0x2cc100002cc2, - 0x2cc300002cc4, - 0x2cc500002cc6, - 0x2cc700002cc8, - 0x2cc900002cca, - 0x2ccb00002ccc, - 0x2ccd00002cce, - 0x2ccf00002cd0, - 0x2cd100002cd2, - 0x2cd300002cd4, - 0x2cd500002cd6, - 0x2cd700002cd8, - 0x2cd900002cda, - 0x2cdb00002cdc, - 0x2cdd00002cde, - 0x2cdf00002ce0, - 0x2ce100002ce2, - 0x2ce300002ce5, - 0x2cec00002ced, - 0x2cee00002cf2, - 0x2cf300002cf4, - 0x2d0000002d26, - 0x2d2700002d28, - 0x2d2d00002d2e, - 0x2d3000002d68, - 0x2d7f00002d97, - 0x2da000002da7, - 0x2da800002daf, - 0x2db000002db7, - 0x2db800002dbf, - 0x2dc000002dc7, - 0x2dc800002dcf, - 0x2dd000002dd7, - 0x2dd800002ddf, - 0x2de000002e00, - 0x2e2f00002e30, - 0x300500003008, - 0x302a0000302e, - 0x303c0000303d, - 0x304100003097, - 0x30990000309b, - 0x309d0000309f, - 0x30a1000030fb, - 0x30fc000030ff, - 0x310500003130, - 0x31a0000031bb, - 0x31f000003200, - 0x340000004db6, - 0x4e0000009ff0, - 0xa0000000a48d, - 0xa4d00000a4fe, - 0xa5000000a60d, - 0xa6100000a62c, - 0xa6410000a642, - 0xa6430000a644, - 0xa6450000a646, - 0xa6470000a648, - 0xa6490000a64a, - 0xa64b0000a64c, - 0xa64d0000a64e, - 0xa64f0000a650, - 0xa6510000a652, - 0xa6530000a654, - 0xa6550000a656, - 0xa6570000a658, - 0xa6590000a65a, - 0xa65b0000a65c, - 0xa65d0000a65e, - 0xa65f0000a660, - 0xa6610000a662, - 0xa6630000a664, - 0xa6650000a666, - 0xa6670000a668, - 0xa6690000a66a, - 0xa66b0000a66c, - 0xa66d0000a670, - 0xa6740000a67e, - 0xa67f0000a680, - 0xa6810000a682, - 0xa6830000a684, - 0xa6850000a686, - 0xa6870000a688, - 0xa6890000a68a, - 0xa68b0000a68c, - 0xa68d0000a68e, - 0xa68f0000a690, - 0xa6910000a692, - 0xa6930000a694, - 0xa6950000a696, - 0xa6970000a698, - 0xa6990000a69a, - 0xa69b0000a69c, - 0xa69e0000a6e6, - 0xa6f00000a6f2, - 0xa7170000a720, - 0xa7230000a724, - 0xa7250000a726, - 0xa7270000a728, - 0xa7290000a72a, - 0xa72b0000a72c, - 0xa72d0000a72e, - 0xa72f0000a732, - 0xa7330000a734, - 0xa7350000a736, - 0xa7370000a738, - 0xa7390000a73a, - 0xa73b0000a73c, - 0xa73d0000a73e, - 0xa73f0000a740, - 0xa7410000a742, - 0xa7430000a744, - 0xa7450000a746, - 0xa7470000a748, - 0xa7490000a74a, - 0xa74b0000a74c, - 0xa74d0000a74e, - 0xa74f0000a750, - 0xa7510000a752, - 0xa7530000a754, - 0xa7550000a756, - 0xa7570000a758, - 0xa7590000a75a, - 0xa75b0000a75c, - 0xa75d0000a75e, - 0xa75f0000a760, - 0xa7610000a762, - 0xa7630000a764, - 0xa7650000a766, - 0xa7670000a768, - 0xa7690000a76a, - 0xa76b0000a76c, - 0xa76d0000a76e, - 0xa76f0000a770, - 0xa7710000a779, - 0xa77a0000a77b, - 0xa77c0000a77d, - 0xa77f0000a780, - 0xa7810000a782, - 0xa7830000a784, - 0xa7850000a786, - 0xa7870000a789, - 0xa78c0000a78d, - 0xa78e0000a790, - 0xa7910000a792, - 0xa7930000a796, - 0xa7970000a798, - 0xa7990000a79a, - 0xa79b0000a79c, - 0xa79d0000a79e, - 0xa79f0000a7a0, - 0xa7a10000a7a2, - 0xa7a30000a7a4, - 0xa7a50000a7a6, - 0xa7a70000a7a8, - 0xa7a90000a7aa, - 0xa7af0000a7b0, - 0xa7b50000a7b6, - 0xa7b70000a7b8, - 0xa7b90000a7ba, - 0xa7f70000a7f8, - 0xa7fa0000a828, - 0xa8400000a874, - 0xa8800000a8c6, - 0xa8d00000a8da, - 0xa8e00000a8f8, - 0xa8fb0000a8fc, - 0xa8fd0000a92e, - 0xa9300000a954, - 0xa9800000a9c1, - 0xa9cf0000a9da, - 0xa9e00000a9ff, - 0xaa000000aa37, - 0xaa400000aa4e, - 0xaa500000aa5a, - 0xaa600000aa77, - 0xaa7a0000aac3, - 0xaadb0000aade, - 0xaae00000aaf0, - 0xaaf20000aaf7, - 0xab010000ab07, - 0xab090000ab0f, - 0xab110000ab17, - 0xab200000ab27, - 0xab280000ab2f, - 0xab300000ab5b, - 0xab600000ab66, - 0xabc00000abeb, - 0xabec0000abee, - 0xabf00000abfa, - 0xac000000d7a4, - 0xfa0e0000fa10, - 0xfa110000fa12, - 0xfa130000fa15, - 0xfa1f0000fa20, - 0xfa210000fa22, - 0xfa230000fa25, - 0xfa270000fa2a, - 0xfb1e0000fb1f, - 0xfe200000fe30, - 0xfe730000fe74, - 0x100000001000c, - 0x1000d00010027, - 0x100280001003b, - 0x1003c0001003e, - 0x1003f0001004e, - 0x100500001005e, - 0x10080000100fb, - 0x101fd000101fe, - 0x102800001029d, - 0x102a0000102d1, - 0x102e0000102e1, - 0x1030000010320, - 0x1032d00010341, - 0x103420001034a, - 0x103500001037b, - 0x103800001039e, - 0x103a0000103c4, - 0x103c8000103d0, - 0x104280001049e, - 0x104a0000104aa, - 0x104d8000104fc, - 0x1050000010528, - 0x1053000010564, - 0x1060000010737, - 0x1074000010756, - 0x1076000010768, - 0x1080000010806, - 0x1080800010809, - 0x1080a00010836, - 0x1083700010839, - 0x1083c0001083d, - 0x1083f00010856, - 0x1086000010877, - 0x108800001089f, - 0x108e0000108f3, - 0x108f4000108f6, - 0x1090000010916, - 0x109200001093a, - 0x10980000109b8, - 0x109be000109c0, - 0x10a0000010a04, - 0x10a0500010a07, - 0x10a0c00010a14, - 0x10a1500010a18, - 0x10a1900010a36, - 0x10a3800010a3b, - 0x10a3f00010a40, - 0x10a6000010a7d, - 0x10a8000010a9d, - 0x10ac000010ac8, - 0x10ac900010ae7, - 0x10b0000010b36, - 0x10b4000010b56, - 0x10b6000010b73, - 0x10b8000010b92, - 0x10c0000010c49, - 0x10cc000010cf3, - 0x10d0000010d28, - 0x10d3000010d3a, - 0x10f0000010f1d, - 0x10f2700010f28, - 0x10f3000010f51, - 0x1100000011047, - 0x1106600011070, - 0x1107f000110bb, - 0x110d0000110e9, - 0x110f0000110fa, - 0x1110000011135, - 0x1113600011140, - 0x1114400011147, - 0x1115000011174, - 0x1117600011177, - 0x11180000111c5, - 0x111c9000111cd, - 0x111d0000111db, - 0x111dc000111dd, - 0x1120000011212, - 0x1121300011238, - 0x1123e0001123f, - 0x1128000011287, - 0x1128800011289, - 0x1128a0001128e, - 0x1128f0001129e, - 0x1129f000112a9, - 0x112b0000112eb, - 0x112f0000112fa, - 0x1130000011304, - 0x113050001130d, - 0x1130f00011311, - 0x1131300011329, - 0x1132a00011331, - 0x1133200011334, - 0x113350001133a, - 0x1133b00011345, - 0x1134700011349, - 0x1134b0001134e, - 0x1135000011351, - 0x1135700011358, - 0x1135d00011364, - 0x113660001136d, - 0x1137000011375, - 0x114000001144b, - 0x114500001145a, - 0x1145e0001145f, - 0x11480000114c6, - 0x114c7000114c8, - 0x114d0000114da, - 0x11580000115b6, - 0x115b8000115c1, - 0x115d8000115de, - 0x1160000011641, - 0x1164400011645, - 0x116500001165a, - 0x11680000116b8, - 0x116c0000116ca, - 0x117000001171b, - 0x1171d0001172c, - 0x117300001173a, - 0x118000001183b, - 0x118c0000118ea, - 0x118ff00011900, - 0x11a0000011a3f, - 0x11a4700011a48, - 0x11a5000011a84, - 0x11a8600011a9a, - 0x11a9d00011a9e, - 0x11ac000011af9, - 0x11c0000011c09, - 0x11c0a00011c37, - 0x11c3800011c41, - 0x11c5000011c5a, - 0x11c7200011c90, - 0x11c9200011ca8, - 0x11ca900011cb7, - 0x11d0000011d07, - 0x11d0800011d0a, - 0x11d0b00011d37, - 0x11d3a00011d3b, - 0x11d3c00011d3e, - 0x11d3f00011d48, - 0x11d5000011d5a, - 0x11d6000011d66, - 0x11d6700011d69, - 0x11d6a00011d8f, - 0x11d9000011d92, - 0x11d9300011d99, - 0x11da000011daa, - 0x11ee000011ef7, - 0x120000001239a, - 0x1248000012544, - 0x130000001342f, - 0x1440000014647, - 0x1680000016a39, - 0x16a4000016a5f, - 0x16a6000016a6a, - 0x16ad000016aee, - 0x16af000016af5, - 0x16b0000016b37, - 0x16b4000016b44, - 0x16b5000016b5a, - 0x16b6300016b78, - 0x16b7d00016b90, - 0x16e6000016e80, - 0x16f0000016f45, - 0x16f5000016f7f, - 0x16f8f00016fa0, - 0x16fe000016fe2, - 0x17000000187f2, - 0x1880000018af3, - 0x1b0000001b11f, - 0x1b1700001b2fc, - 0x1bc000001bc6b, - 0x1bc700001bc7d, - 0x1bc800001bc89, - 0x1bc900001bc9a, - 0x1bc9d0001bc9f, - 0x1da000001da37, - 0x1da3b0001da6d, - 0x1da750001da76, - 0x1da840001da85, - 0x1da9b0001daa0, - 0x1daa10001dab0, - 0x1e0000001e007, - 0x1e0080001e019, - 0x1e01b0001e022, - 0x1e0230001e025, - 0x1e0260001e02b, - 0x1e8000001e8c5, - 0x1e8d00001e8d7, - 0x1e9220001e94b, - 0x1e9500001e95a, - 0x200000002a6d7, - 0x2a7000002b735, - 0x2b7400002b81e, - 0x2b8200002cea2, - 0x2ceb00002ebe1, - ), - 'CONTEXTJ': ( - 0x200c0000200e, - ), - 'CONTEXTO': ( - 0xb7000000b8, - 0x37500000376, - 0x5f3000005f5, - 0x6600000066a, - 0x6f0000006fa, - 0x30fb000030fc, - ), -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/intranges.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/intranges.py deleted file mode 100644 index fa8a735..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/intranges.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -Given a list of integers, made up of (hopefully) a small number of long runs -of consecutive integers, compute a representation of the form -((start1, end1), (start2, end2) ...). Then answer the question "was x present -in the original list?" in time O(log(# runs)). -""" - -import bisect - -def intranges_from_list(list_): - """Represent a list of integers as a sequence of ranges: - ((start_0, end_0), (start_1, end_1), ...), such that the original - integers are exactly those x such that start_i <= x < end_i for some i. - - Ranges are encoded as single integers (start << 32 | end), not as tuples. - """ - - sorted_list = sorted(list_) - ranges = [] - last_write = -1 - for i in range(len(sorted_list)): - if i+1 < len(sorted_list): - if sorted_list[i] == sorted_list[i+1]-1: - continue - current_range = sorted_list[last_write+1:i+1] - ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) - last_write = i - - return tuple(ranges) - -def _encode_range(start, end): - return (start << 32) | end - -def _decode_range(r): - return (r >> 32), (r & ((1 << 32) - 1)) - - -def intranges_contain(int_, ranges): - """Determine if `int_` falls into one of the ranges in `ranges`.""" - tuple_ = _encode_range(int_, 0) - pos = bisect.bisect_left(ranges, tuple_) - # we could be immediately ahead of a tuple (start, end) - # with start < int_ <= end - if pos > 0: - left, right = _decode_range(ranges[pos-1]) - if left <= int_ < right: - return True - # or we could be immediately behind a tuple (int_, end) - if pos < len(ranges): - left, _ = _decode_range(ranges[pos]) - if left == int_: - return True - return False diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/package_data.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/package_data.py deleted file mode 100644 index 257e898..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/package_data.py +++ /dev/null @@ -1,2 +0,0 @@ -__version__ = '2.8' - diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/uts46data.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/uts46data.py deleted file mode 100644 index a68ed4c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/idna/uts46data.py +++ /dev/null @@ -1,8205 +0,0 @@ -# This file is automatically generated by tools/idna-data -# vim: set fileencoding=utf-8 : - -"""IDNA Mapping Table from UTS46.""" - - -__version__ = "11.0.0" -def _seg_0(): - return [ - (0x0, '3'), - (0x1, '3'), - (0x2, '3'), - (0x3, '3'), - (0x4, '3'), - (0x5, '3'), - (0x6, '3'), - (0x7, '3'), - (0x8, '3'), - (0x9, '3'), - (0xA, '3'), - (0xB, '3'), - (0xC, '3'), - (0xD, '3'), - (0xE, '3'), - (0xF, '3'), - (0x10, '3'), - (0x11, '3'), - (0x12, '3'), - (0x13, '3'), - (0x14, '3'), - (0x15, '3'), - (0x16, '3'), - (0x17, '3'), - (0x18, '3'), - (0x19, '3'), - (0x1A, '3'), - (0x1B, '3'), - (0x1C, '3'), - (0x1D, '3'), - (0x1E, '3'), - (0x1F, '3'), - (0x20, '3'), - (0x21, '3'), - (0x22, '3'), - (0x23, '3'), - (0x24, '3'), - (0x25, '3'), - (0x26, '3'), - (0x27, '3'), - (0x28, '3'), - (0x29, '3'), - (0x2A, '3'), - (0x2B, '3'), - (0x2C, '3'), - (0x2D, 'V'), - (0x2E, 'V'), - (0x2F, '3'), - (0x30, 'V'), - (0x31, 'V'), - (0x32, 'V'), - (0x33, 'V'), - (0x34, 'V'), - (0x35, 'V'), - (0x36, 'V'), - (0x37, 'V'), - (0x38, 'V'), - (0x39, 'V'), - (0x3A, '3'), - (0x3B, '3'), - (0x3C, '3'), - (0x3D, '3'), - (0x3E, '3'), - (0x3F, '3'), - (0x40, '3'), - (0x41, 'M', u'a'), - (0x42, 'M', u'b'), - (0x43, 'M', u'c'), - (0x44, 'M', u'd'), - (0x45, 'M', u'e'), - (0x46, 'M', u'f'), - (0x47, 'M', u'g'), - (0x48, 'M', u'h'), - (0x49, 'M', u'i'), - (0x4A, 'M', u'j'), - (0x4B, 'M', u'k'), - (0x4C, 'M', u'l'), - (0x4D, 'M', u'm'), - (0x4E, 'M', u'n'), - (0x4F, 'M', u'o'), - (0x50, 'M', u'p'), - (0x51, 'M', u'q'), - (0x52, 'M', u'r'), - (0x53, 'M', u's'), - (0x54, 'M', u't'), - (0x55, 'M', u'u'), - (0x56, 'M', u'v'), - (0x57, 'M', u'w'), - (0x58, 'M', u'x'), - (0x59, 'M', u'y'), - (0x5A, 'M', u'z'), - (0x5B, '3'), - (0x5C, '3'), - (0x5D, '3'), - (0x5E, '3'), - (0x5F, '3'), - (0x60, '3'), - (0x61, 'V'), - (0x62, 'V'), - (0x63, 'V'), - ] - -def _seg_1(): - return [ - (0x64, 'V'), - (0x65, 'V'), - (0x66, 'V'), - (0x67, 'V'), - (0x68, 'V'), - (0x69, 'V'), - (0x6A, 'V'), - (0x6B, 'V'), - (0x6C, 'V'), - (0x6D, 'V'), - (0x6E, 'V'), - (0x6F, 'V'), - (0x70, 'V'), - (0x71, 'V'), - (0x72, 'V'), - (0x73, 'V'), - (0x74, 'V'), - (0x75, 'V'), - (0x76, 'V'), - (0x77, 'V'), - (0x78, 'V'), - (0x79, 'V'), - (0x7A, 'V'), - (0x7B, '3'), - (0x7C, '3'), - (0x7D, '3'), - (0x7E, '3'), - (0x7F, '3'), - (0x80, 'X'), - (0x81, 'X'), - (0x82, 'X'), - (0x83, 'X'), - (0x84, 'X'), - (0x85, 'X'), - (0x86, 'X'), - (0x87, 'X'), - (0x88, 'X'), - (0x89, 'X'), - (0x8A, 'X'), - (0x8B, 'X'), - (0x8C, 'X'), - (0x8D, 'X'), - (0x8E, 'X'), - (0x8F, 'X'), - (0x90, 'X'), - (0x91, 'X'), - (0x92, 'X'), - (0x93, 'X'), - (0x94, 'X'), - (0x95, 'X'), - (0x96, 'X'), - (0x97, 'X'), - (0x98, 'X'), - (0x99, 'X'), - (0x9A, 'X'), - (0x9B, 'X'), - (0x9C, 'X'), - (0x9D, 'X'), - (0x9E, 'X'), - (0x9F, 'X'), - (0xA0, '3', u' '), - (0xA1, 'V'), - (0xA2, 'V'), - (0xA3, 'V'), - (0xA4, 'V'), - (0xA5, 'V'), - (0xA6, 'V'), - (0xA7, 'V'), - (0xA8, '3', u' ̈'), - (0xA9, 'V'), - (0xAA, 'M', u'a'), - (0xAB, 'V'), - (0xAC, 'V'), - (0xAD, 'I'), - (0xAE, 'V'), - (0xAF, '3', u' Ì„'), - (0xB0, 'V'), - (0xB1, 'V'), - (0xB2, 'M', u'2'), - (0xB3, 'M', u'3'), - (0xB4, '3', u' Ì'), - (0xB5, 'M', u'μ'), - (0xB6, 'V'), - (0xB7, 'V'), - (0xB8, '3', u' ̧'), - (0xB9, 'M', u'1'), - (0xBA, 'M', u'o'), - (0xBB, 'V'), - (0xBC, 'M', u'1â„4'), - (0xBD, 'M', u'1â„2'), - (0xBE, 'M', u'3â„4'), - (0xBF, 'V'), - (0xC0, 'M', u'à'), - (0xC1, 'M', u'á'), - (0xC2, 'M', u'â'), - (0xC3, 'M', u'ã'), - (0xC4, 'M', u'ä'), - (0xC5, 'M', u'Ã¥'), - (0xC6, 'M', u'æ'), - (0xC7, 'M', u'ç'), - ] - -def _seg_2(): - return [ - (0xC8, 'M', u'è'), - (0xC9, 'M', u'é'), - (0xCA, 'M', u'ê'), - (0xCB, 'M', u'ë'), - (0xCC, 'M', u'ì'), - (0xCD, 'M', u'í'), - (0xCE, 'M', u'î'), - (0xCF, 'M', u'ï'), - (0xD0, 'M', u'ð'), - (0xD1, 'M', u'ñ'), - (0xD2, 'M', u'ò'), - (0xD3, 'M', u'ó'), - (0xD4, 'M', u'ô'), - (0xD5, 'M', u'õ'), - (0xD6, 'M', u'ö'), - (0xD7, 'V'), - (0xD8, 'M', u'ø'), - (0xD9, 'M', u'ù'), - (0xDA, 'M', u'ú'), - (0xDB, 'M', u'û'), - (0xDC, 'M', u'ü'), - (0xDD, 'M', u'ý'), - (0xDE, 'M', u'þ'), - (0xDF, 'D', u'ss'), - (0xE0, 'V'), - (0xE1, 'V'), - (0xE2, 'V'), - (0xE3, 'V'), - (0xE4, 'V'), - (0xE5, 'V'), - (0xE6, 'V'), - (0xE7, 'V'), - (0xE8, 'V'), - (0xE9, 'V'), - (0xEA, 'V'), - (0xEB, 'V'), - (0xEC, 'V'), - (0xED, 'V'), - (0xEE, 'V'), - (0xEF, 'V'), - (0xF0, 'V'), - (0xF1, 'V'), - (0xF2, 'V'), - (0xF3, 'V'), - (0xF4, 'V'), - (0xF5, 'V'), - (0xF6, 'V'), - (0xF7, 'V'), - (0xF8, 'V'), - (0xF9, 'V'), - (0xFA, 'V'), - (0xFB, 'V'), - (0xFC, 'V'), - (0xFD, 'V'), - (0xFE, 'V'), - (0xFF, 'V'), - (0x100, 'M', u'Ä'), - (0x101, 'V'), - (0x102, 'M', u'ă'), - (0x103, 'V'), - (0x104, 'M', u'Ä…'), - (0x105, 'V'), - (0x106, 'M', u'ć'), - (0x107, 'V'), - (0x108, 'M', u'ĉ'), - (0x109, 'V'), - (0x10A, 'M', u'Ä‹'), - (0x10B, 'V'), - (0x10C, 'M', u'Ä'), - (0x10D, 'V'), - (0x10E, 'M', u'Ä'), - (0x10F, 'V'), - (0x110, 'M', u'Ä‘'), - (0x111, 'V'), - (0x112, 'M', u'Ä“'), - (0x113, 'V'), - (0x114, 'M', u'Ä•'), - (0x115, 'V'), - (0x116, 'M', u'Ä—'), - (0x117, 'V'), - (0x118, 'M', u'Ä™'), - (0x119, 'V'), - (0x11A, 'M', u'Ä›'), - (0x11B, 'V'), - (0x11C, 'M', u'Ä'), - (0x11D, 'V'), - (0x11E, 'M', u'ÄŸ'), - (0x11F, 'V'), - (0x120, 'M', u'Ä¡'), - (0x121, 'V'), - (0x122, 'M', u'Ä£'), - (0x123, 'V'), - (0x124, 'M', u'Ä¥'), - (0x125, 'V'), - (0x126, 'M', u'ħ'), - (0x127, 'V'), - (0x128, 'M', u'Ä©'), - (0x129, 'V'), - (0x12A, 'M', u'Ä«'), - (0x12B, 'V'), - ] - -def _seg_3(): - return [ - (0x12C, 'M', u'Ä­'), - (0x12D, 'V'), - (0x12E, 'M', u'į'), - (0x12F, 'V'), - (0x130, 'M', u'i̇'), - (0x131, 'V'), - (0x132, 'M', u'ij'), - (0x134, 'M', u'ĵ'), - (0x135, 'V'), - (0x136, 'M', u'Ä·'), - (0x137, 'V'), - (0x139, 'M', u'ĺ'), - (0x13A, 'V'), - (0x13B, 'M', u'ļ'), - (0x13C, 'V'), - (0x13D, 'M', u'ľ'), - (0x13E, 'V'), - (0x13F, 'M', u'l·'), - (0x141, 'M', u'Å‚'), - (0x142, 'V'), - (0x143, 'M', u'Å„'), - (0x144, 'V'), - (0x145, 'M', u'ņ'), - (0x146, 'V'), - (0x147, 'M', u'ň'), - (0x148, 'V'), - (0x149, 'M', u'ʼn'), - (0x14A, 'M', u'Å‹'), - (0x14B, 'V'), - (0x14C, 'M', u'Å'), - (0x14D, 'V'), - (0x14E, 'M', u'Å'), - (0x14F, 'V'), - (0x150, 'M', u'Å‘'), - (0x151, 'V'), - (0x152, 'M', u'Å“'), - (0x153, 'V'), - (0x154, 'M', u'Å•'), - (0x155, 'V'), - (0x156, 'M', u'Å—'), - (0x157, 'V'), - (0x158, 'M', u'Å™'), - (0x159, 'V'), - (0x15A, 'M', u'Å›'), - (0x15B, 'V'), - (0x15C, 'M', u'Å'), - (0x15D, 'V'), - (0x15E, 'M', u'ÅŸ'), - (0x15F, 'V'), - (0x160, 'M', u'Å¡'), - (0x161, 'V'), - (0x162, 'M', u'Å£'), - (0x163, 'V'), - (0x164, 'M', u'Å¥'), - (0x165, 'V'), - (0x166, 'M', u'ŧ'), - (0x167, 'V'), - (0x168, 'M', u'Å©'), - (0x169, 'V'), - (0x16A, 'M', u'Å«'), - (0x16B, 'V'), - (0x16C, 'M', u'Å­'), - (0x16D, 'V'), - (0x16E, 'M', u'ů'), - (0x16F, 'V'), - (0x170, 'M', u'ű'), - (0x171, 'V'), - (0x172, 'M', u'ų'), - (0x173, 'V'), - (0x174, 'M', u'ŵ'), - (0x175, 'V'), - (0x176, 'M', u'Å·'), - (0x177, 'V'), - (0x178, 'M', u'ÿ'), - (0x179, 'M', u'ź'), - (0x17A, 'V'), - (0x17B, 'M', u'ż'), - (0x17C, 'V'), - (0x17D, 'M', u'ž'), - (0x17E, 'V'), - (0x17F, 'M', u's'), - (0x180, 'V'), - (0x181, 'M', u'É“'), - (0x182, 'M', u'ƃ'), - (0x183, 'V'), - (0x184, 'M', u'Æ…'), - (0x185, 'V'), - (0x186, 'M', u'É”'), - (0x187, 'M', u'ƈ'), - (0x188, 'V'), - (0x189, 'M', u'É–'), - (0x18A, 'M', u'É—'), - (0x18B, 'M', u'ÆŒ'), - (0x18C, 'V'), - (0x18E, 'M', u'Ç'), - (0x18F, 'M', u'É™'), - (0x190, 'M', u'É›'), - (0x191, 'M', u'Æ’'), - (0x192, 'V'), - (0x193, 'M', u'É '), - ] - -def _seg_4(): - return [ - (0x194, 'M', u'É£'), - (0x195, 'V'), - (0x196, 'M', u'É©'), - (0x197, 'M', u'ɨ'), - (0x198, 'M', u'Æ™'), - (0x199, 'V'), - (0x19C, 'M', u'ɯ'), - (0x19D, 'M', u'ɲ'), - (0x19E, 'V'), - (0x19F, 'M', u'ɵ'), - (0x1A0, 'M', u'Æ¡'), - (0x1A1, 'V'), - (0x1A2, 'M', u'Æ£'), - (0x1A3, 'V'), - (0x1A4, 'M', u'Æ¥'), - (0x1A5, 'V'), - (0x1A6, 'M', u'Ê€'), - (0x1A7, 'M', u'ƨ'), - (0x1A8, 'V'), - (0x1A9, 'M', u'ʃ'), - (0x1AA, 'V'), - (0x1AC, 'M', u'Æ­'), - (0x1AD, 'V'), - (0x1AE, 'M', u'ʈ'), - (0x1AF, 'M', u'Æ°'), - (0x1B0, 'V'), - (0x1B1, 'M', u'ÊŠ'), - (0x1B2, 'M', u'Ê‹'), - (0x1B3, 'M', u'Æ´'), - (0x1B4, 'V'), - (0x1B5, 'M', u'ƶ'), - (0x1B6, 'V'), - (0x1B7, 'M', u'Ê’'), - (0x1B8, 'M', u'ƹ'), - (0x1B9, 'V'), - (0x1BC, 'M', u'ƽ'), - (0x1BD, 'V'), - (0x1C4, 'M', u'dž'), - (0x1C7, 'M', u'lj'), - (0x1CA, 'M', u'nj'), - (0x1CD, 'M', u'ÇŽ'), - (0x1CE, 'V'), - (0x1CF, 'M', u'Ç'), - (0x1D0, 'V'), - (0x1D1, 'M', u'Ç’'), - (0x1D2, 'V'), - (0x1D3, 'M', u'Ç”'), - (0x1D4, 'V'), - (0x1D5, 'M', u'Ç–'), - (0x1D6, 'V'), - (0x1D7, 'M', u'ǘ'), - (0x1D8, 'V'), - (0x1D9, 'M', u'Çš'), - (0x1DA, 'V'), - (0x1DB, 'M', u'Çœ'), - (0x1DC, 'V'), - (0x1DE, 'M', u'ÇŸ'), - (0x1DF, 'V'), - (0x1E0, 'M', u'Ç¡'), - (0x1E1, 'V'), - (0x1E2, 'M', u'Ç£'), - (0x1E3, 'V'), - (0x1E4, 'M', u'Ç¥'), - (0x1E5, 'V'), - (0x1E6, 'M', u'ǧ'), - (0x1E7, 'V'), - (0x1E8, 'M', u'Ç©'), - (0x1E9, 'V'), - (0x1EA, 'M', u'Ç«'), - (0x1EB, 'V'), - (0x1EC, 'M', u'Ç­'), - (0x1ED, 'V'), - (0x1EE, 'M', u'ǯ'), - (0x1EF, 'V'), - (0x1F1, 'M', u'dz'), - (0x1F4, 'M', u'ǵ'), - (0x1F5, 'V'), - (0x1F6, 'M', u'Æ•'), - (0x1F7, 'M', u'Æ¿'), - (0x1F8, 'M', u'ǹ'), - (0x1F9, 'V'), - (0x1FA, 'M', u'Ç»'), - (0x1FB, 'V'), - (0x1FC, 'M', u'ǽ'), - (0x1FD, 'V'), - (0x1FE, 'M', u'Ç¿'), - (0x1FF, 'V'), - (0x200, 'M', u'È'), - (0x201, 'V'), - (0x202, 'M', u'ȃ'), - (0x203, 'V'), - (0x204, 'M', u'È…'), - (0x205, 'V'), - (0x206, 'M', u'ȇ'), - (0x207, 'V'), - (0x208, 'M', u'ȉ'), - (0x209, 'V'), - (0x20A, 'M', u'È‹'), - (0x20B, 'V'), - (0x20C, 'M', u'È'), - ] - -def _seg_5(): - return [ - (0x20D, 'V'), - (0x20E, 'M', u'È'), - (0x20F, 'V'), - (0x210, 'M', u'È‘'), - (0x211, 'V'), - (0x212, 'M', u'È“'), - (0x213, 'V'), - (0x214, 'M', u'È•'), - (0x215, 'V'), - (0x216, 'M', u'È—'), - (0x217, 'V'), - (0x218, 'M', u'È™'), - (0x219, 'V'), - (0x21A, 'M', u'È›'), - (0x21B, 'V'), - (0x21C, 'M', u'È'), - (0x21D, 'V'), - (0x21E, 'M', u'ÈŸ'), - (0x21F, 'V'), - (0x220, 'M', u'Æž'), - (0x221, 'V'), - (0x222, 'M', u'È£'), - (0x223, 'V'), - (0x224, 'M', u'È¥'), - (0x225, 'V'), - (0x226, 'M', u'ȧ'), - (0x227, 'V'), - (0x228, 'M', u'È©'), - (0x229, 'V'), - (0x22A, 'M', u'È«'), - (0x22B, 'V'), - (0x22C, 'M', u'È­'), - (0x22D, 'V'), - (0x22E, 'M', u'ȯ'), - (0x22F, 'V'), - (0x230, 'M', u'ȱ'), - (0x231, 'V'), - (0x232, 'M', u'ȳ'), - (0x233, 'V'), - (0x23A, 'M', u'â±¥'), - (0x23B, 'M', u'ȼ'), - (0x23C, 'V'), - (0x23D, 'M', u'Æš'), - (0x23E, 'M', u'ⱦ'), - (0x23F, 'V'), - (0x241, 'M', u'É‚'), - (0x242, 'V'), - (0x243, 'M', u'Æ€'), - (0x244, 'M', u'ʉ'), - (0x245, 'M', u'ÊŒ'), - (0x246, 'M', u'ɇ'), - (0x247, 'V'), - (0x248, 'M', u'ɉ'), - (0x249, 'V'), - (0x24A, 'M', u'É‹'), - (0x24B, 'V'), - (0x24C, 'M', u'É'), - (0x24D, 'V'), - (0x24E, 'M', u'É'), - (0x24F, 'V'), - (0x2B0, 'M', u'h'), - (0x2B1, 'M', u'ɦ'), - (0x2B2, 'M', u'j'), - (0x2B3, 'M', u'r'), - (0x2B4, 'M', u'ɹ'), - (0x2B5, 'M', u'É»'), - (0x2B6, 'M', u'Ê'), - (0x2B7, 'M', u'w'), - (0x2B8, 'M', u'y'), - (0x2B9, 'V'), - (0x2D8, '3', u' ̆'), - (0x2D9, '3', u' ̇'), - (0x2DA, '3', u' ÌŠ'), - (0x2DB, '3', u' ̨'), - (0x2DC, '3', u' ̃'), - (0x2DD, '3', u' Ì‹'), - (0x2DE, 'V'), - (0x2E0, 'M', u'É£'), - (0x2E1, 'M', u'l'), - (0x2E2, 'M', u's'), - (0x2E3, 'M', u'x'), - (0x2E4, 'M', u'Ê•'), - (0x2E5, 'V'), - (0x340, 'M', u'Ì€'), - (0x341, 'M', u'Ì'), - (0x342, 'V'), - (0x343, 'M', u'Ì“'), - (0x344, 'M', u'̈Ì'), - (0x345, 'M', u'ι'), - (0x346, 'V'), - (0x34F, 'I'), - (0x350, 'V'), - (0x370, 'M', u'ͱ'), - (0x371, 'V'), - (0x372, 'M', u'ͳ'), - (0x373, 'V'), - (0x374, 'M', u'ʹ'), - (0x375, 'V'), - (0x376, 'M', u'Í·'), - (0x377, 'V'), - ] - -def _seg_6(): - return [ - (0x378, 'X'), - (0x37A, '3', u' ι'), - (0x37B, 'V'), - (0x37E, '3', u';'), - (0x37F, 'M', u'ϳ'), - (0x380, 'X'), - (0x384, '3', u' Ì'), - (0x385, '3', u' ̈Ì'), - (0x386, 'M', u'ά'), - (0x387, 'M', u'·'), - (0x388, 'M', u'έ'), - (0x389, 'M', u'ή'), - (0x38A, 'M', u'ί'), - (0x38B, 'X'), - (0x38C, 'M', u'ÏŒ'), - (0x38D, 'X'), - (0x38E, 'M', u'Ï'), - (0x38F, 'M', u'ÏŽ'), - (0x390, 'V'), - (0x391, 'M', u'α'), - (0x392, 'M', u'β'), - (0x393, 'M', u'γ'), - (0x394, 'M', u'δ'), - (0x395, 'M', u'ε'), - (0x396, 'M', u'ζ'), - (0x397, 'M', u'η'), - (0x398, 'M', u'θ'), - (0x399, 'M', u'ι'), - (0x39A, 'M', u'κ'), - (0x39B, 'M', u'λ'), - (0x39C, 'M', u'μ'), - (0x39D, 'M', u'ν'), - (0x39E, 'M', u'ξ'), - (0x39F, 'M', u'ο'), - (0x3A0, 'M', u'Ï€'), - (0x3A1, 'M', u'Ï'), - (0x3A2, 'X'), - (0x3A3, 'M', u'σ'), - (0x3A4, 'M', u'Ï„'), - (0x3A5, 'M', u'Ï…'), - (0x3A6, 'M', u'φ'), - (0x3A7, 'M', u'χ'), - (0x3A8, 'M', u'ψ'), - (0x3A9, 'M', u'ω'), - (0x3AA, 'M', u'ÏŠ'), - (0x3AB, 'M', u'Ï‹'), - (0x3AC, 'V'), - (0x3C2, 'D', u'σ'), - (0x3C3, 'V'), - (0x3CF, 'M', u'Ï—'), - (0x3D0, 'M', u'β'), - (0x3D1, 'M', u'θ'), - (0x3D2, 'M', u'Ï…'), - (0x3D3, 'M', u'Ï'), - (0x3D4, 'M', u'Ï‹'), - (0x3D5, 'M', u'φ'), - (0x3D6, 'M', u'Ï€'), - (0x3D7, 'V'), - (0x3D8, 'M', u'Ï™'), - (0x3D9, 'V'), - (0x3DA, 'M', u'Ï›'), - (0x3DB, 'V'), - (0x3DC, 'M', u'Ï'), - (0x3DD, 'V'), - (0x3DE, 'M', u'ÏŸ'), - (0x3DF, 'V'), - (0x3E0, 'M', u'Ï¡'), - (0x3E1, 'V'), - (0x3E2, 'M', u'Ï£'), - (0x3E3, 'V'), - (0x3E4, 'M', u'Ï¥'), - (0x3E5, 'V'), - (0x3E6, 'M', u'ϧ'), - (0x3E7, 'V'), - (0x3E8, 'M', u'Ï©'), - (0x3E9, 'V'), - (0x3EA, 'M', u'Ï«'), - (0x3EB, 'V'), - (0x3EC, 'M', u'Ï­'), - (0x3ED, 'V'), - (0x3EE, 'M', u'ϯ'), - (0x3EF, 'V'), - (0x3F0, 'M', u'κ'), - (0x3F1, 'M', u'Ï'), - (0x3F2, 'M', u'σ'), - (0x3F3, 'V'), - (0x3F4, 'M', u'θ'), - (0x3F5, 'M', u'ε'), - (0x3F6, 'V'), - (0x3F7, 'M', u'ϸ'), - (0x3F8, 'V'), - (0x3F9, 'M', u'σ'), - (0x3FA, 'M', u'Ï»'), - (0x3FB, 'V'), - (0x3FD, 'M', u'Í»'), - (0x3FE, 'M', u'ͼ'), - (0x3FF, 'M', u'ͽ'), - (0x400, 'M', u'Ñ'), - (0x401, 'M', u'Ñ‘'), - (0x402, 'M', u'Ñ’'), - ] - -def _seg_7(): - return [ - (0x403, 'M', u'Ñ“'), - (0x404, 'M', u'Ñ”'), - (0x405, 'M', u'Ñ•'), - (0x406, 'M', u'Ñ–'), - (0x407, 'M', u'Ñ—'), - (0x408, 'M', u'ј'), - (0x409, 'M', u'Ñ™'), - (0x40A, 'M', u'Ñš'), - (0x40B, 'M', u'Ñ›'), - (0x40C, 'M', u'Ñœ'), - (0x40D, 'M', u'Ñ'), - (0x40E, 'M', u'Ñž'), - (0x40F, 'M', u'ÑŸ'), - (0x410, 'M', u'а'), - (0x411, 'M', u'б'), - (0x412, 'M', u'в'), - (0x413, 'M', u'г'), - (0x414, 'M', u'д'), - (0x415, 'M', u'е'), - (0x416, 'M', u'ж'), - (0x417, 'M', u'з'), - (0x418, 'M', u'и'), - (0x419, 'M', u'й'), - (0x41A, 'M', u'к'), - (0x41B, 'M', u'л'), - (0x41C, 'M', u'м'), - (0x41D, 'M', u'н'), - (0x41E, 'M', u'о'), - (0x41F, 'M', u'п'), - (0x420, 'M', u'Ñ€'), - (0x421, 'M', u'Ñ'), - (0x422, 'M', u'Ñ‚'), - (0x423, 'M', u'у'), - (0x424, 'M', u'Ñ„'), - (0x425, 'M', u'Ñ…'), - (0x426, 'M', u'ц'), - (0x427, 'M', u'ч'), - (0x428, 'M', u'ш'), - (0x429, 'M', u'щ'), - (0x42A, 'M', u'ÑŠ'), - (0x42B, 'M', u'Ñ‹'), - (0x42C, 'M', u'ÑŒ'), - (0x42D, 'M', u'Ñ'), - (0x42E, 'M', u'ÑŽ'), - (0x42F, 'M', u'Ñ'), - (0x430, 'V'), - (0x460, 'M', u'Ñ¡'), - (0x461, 'V'), - (0x462, 'M', u'Ñ£'), - (0x463, 'V'), - (0x464, 'M', u'Ñ¥'), - (0x465, 'V'), - (0x466, 'M', u'ѧ'), - (0x467, 'V'), - (0x468, 'M', u'Ñ©'), - (0x469, 'V'), - (0x46A, 'M', u'Ñ«'), - (0x46B, 'V'), - (0x46C, 'M', u'Ñ­'), - (0x46D, 'V'), - (0x46E, 'M', u'ѯ'), - (0x46F, 'V'), - (0x470, 'M', u'ѱ'), - (0x471, 'V'), - (0x472, 'M', u'ѳ'), - (0x473, 'V'), - (0x474, 'M', u'ѵ'), - (0x475, 'V'), - (0x476, 'M', u'Ñ·'), - (0x477, 'V'), - (0x478, 'M', u'ѹ'), - (0x479, 'V'), - (0x47A, 'M', u'Ñ»'), - (0x47B, 'V'), - (0x47C, 'M', u'ѽ'), - (0x47D, 'V'), - (0x47E, 'M', u'Ñ¿'), - (0x47F, 'V'), - (0x480, 'M', u'Ò'), - (0x481, 'V'), - (0x48A, 'M', u'Ò‹'), - (0x48B, 'V'), - (0x48C, 'M', u'Ò'), - (0x48D, 'V'), - (0x48E, 'M', u'Ò'), - (0x48F, 'V'), - (0x490, 'M', u'Ò‘'), - (0x491, 'V'), - (0x492, 'M', u'Ò“'), - (0x493, 'V'), - (0x494, 'M', u'Ò•'), - (0x495, 'V'), - (0x496, 'M', u'Ò—'), - (0x497, 'V'), - (0x498, 'M', u'Ò™'), - (0x499, 'V'), - (0x49A, 'M', u'Ò›'), - (0x49B, 'V'), - (0x49C, 'M', u'Ò'), - (0x49D, 'V'), - ] - -def _seg_8(): - return [ - (0x49E, 'M', u'ÒŸ'), - (0x49F, 'V'), - (0x4A0, 'M', u'Ò¡'), - (0x4A1, 'V'), - (0x4A2, 'M', u'Ò£'), - (0x4A3, 'V'), - (0x4A4, 'M', u'Ò¥'), - (0x4A5, 'V'), - (0x4A6, 'M', u'Ò§'), - (0x4A7, 'V'), - (0x4A8, 'M', u'Ò©'), - (0x4A9, 'V'), - (0x4AA, 'M', u'Ò«'), - (0x4AB, 'V'), - (0x4AC, 'M', u'Ò­'), - (0x4AD, 'V'), - (0x4AE, 'M', u'Ò¯'), - (0x4AF, 'V'), - (0x4B0, 'M', u'Ò±'), - (0x4B1, 'V'), - (0x4B2, 'M', u'Ò³'), - (0x4B3, 'V'), - (0x4B4, 'M', u'Òµ'), - (0x4B5, 'V'), - (0x4B6, 'M', u'Ò·'), - (0x4B7, 'V'), - (0x4B8, 'M', u'Ò¹'), - (0x4B9, 'V'), - (0x4BA, 'M', u'Ò»'), - (0x4BB, 'V'), - (0x4BC, 'M', u'Ò½'), - (0x4BD, 'V'), - (0x4BE, 'M', u'Ò¿'), - (0x4BF, 'V'), - (0x4C0, 'X'), - (0x4C1, 'M', u'Ó‚'), - (0x4C2, 'V'), - (0x4C3, 'M', u'Ó„'), - (0x4C4, 'V'), - (0x4C5, 'M', u'Ó†'), - (0x4C6, 'V'), - (0x4C7, 'M', u'Óˆ'), - (0x4C8, 'V'), - (0x4C9, 'M', u'ÓŠ'), - (0x4CA, 'V'), - (0x4CB, 'M', u'ÓŒ'), - (0x4CC, 'V'), - (0x4CD, 'M', u'ÓŽ'), - (0x4CE, 'V'), - (0x4D0, 'M', u'Ó‘'), - (0x4D1, 'V'), - (0x4D2, 'M', u'Ó“'), - (0x4D3, 'V'), - (0x4D4, 'M', u'Ó•'), - (0x4D5, 'V'), - (0x4D6, 'M', u'Ó—'), - (0x4D7, 'V'), - (0x4D8, 'M', u'Ó™'), - (0x4D9, 'V'), - (0x4DA, 'M', u'Ó›'), - (0x4DB, 'V'), - (0x4DC, 'M', u'Ó'), - (0x4DD, 'V'), - (0x4DE, 'M', u'ÓŸ'), - (0x4DF, 'V'), - (0x4E0, 'M', u'Ó¡'), - (0x4E1, 'V'), - (0x4E2, 'M', u'Ó£'), - (0x4E3, 'V'), - (0x4E4, 'M', u'Ó¥'), - (0x4E5, 'V'), - (0x4E6, 'M', u'Ó§'), - (0x4E7, 'V'), - (0x4E8, 'M', u'Ó©'), - (0x4E9, 'V'), - (0x4EA, 'M', u'Ó«'), - (0x4EB, 'V'), - (0x4EC, 'M', u'Ó­'), - (0x4ED, 'V'), - (0x4EE, 'M', u'Ó¯'), - (0x4EF, 'V'), - (0x4F0, 'M', u'Ó±'), - (0x4F1, 'V'), - (0x4F2, 'M', u'Ó³'), - (0x4F3, 'V'), - (0x4F4, 'M', u'Óµ'), - (0x4F5, 'V'), - (0x4F6, 'M', u'Ó·'), - (0x4F7, 'V'), - (0x4F8, 'M', u'Ó¹'), - (0x4F9, 'V'), - (0x4FA, 'M', u'Ó»'), - (0x4FB, 'V'), - (0x4FC, 'M', u'Ó½'), - (0x4FD, 'V'), - (0x4FE, 'M', u'Ó¿'), - (0x4FF, 'V'), - (0x500, 'M', u'Ô'), - (0x501, 'V'), - (0x502, 'M', u'Ôƒ'), - ] - -def _seg_9(): - return [ - (0x503, 'V'), - (0x504, 'M', u'Ô…'), - (0x505, 'V'), - (0x506, 'M', u'Ô‡'), - (0x507, 'V'), - (0x508, 'M', u'Ô‰'), - (0x509, 'V'), - (0x50A, 'M', u'Ô‹'), - (0x50B, 'V'), - (0x50C, 'M', u'Ô'), - (0x50D, 'V'), - (0x50E, 'M', u'Ô'), - (0x50F, 'V'), - (0x510, 'M', u'Ô‘'), - (0x511, 'V'), - (0x512, 'M', u'Ô“'), - (0x513, 'V'), - (0x514, 'M', u'Ô•'), - (0x515, 'V'), - (0x516, 'M', u'Ô—'), - (0x517, 'V'), - (0x518, 'M', u'Ô™'), - (0x519, 'V'), - (0x51A, 'M', u'Ô›'), - (0x51B, 'V'), - (0x51C, 'M', u'Ô'), - (0x51D, 'V'), - (0x51E, 'M', u'ÔŸ'), - (0x51F, 'V'), - (0x520, 'M', u'Ô¡'), - (0x521, 'V'), - (0x522, 'M', u'Ô£'), - (0x523, 'V'), - (0x524, 'M', u'Ô¥'), - (0x525, 'V'), - (0x526, 'M', u'Ô§'), - (0x527, 'V'), - (0x528, 'M', u'Ô©'), - (0x529, 'V'), - (0x52A, 'M', u'Ô«'), - (0x52B, 'V'), - (0x52C, 'M', u'Ô­'), - (0x52D, 'V'), - (0x52E, 'M', u'Ô¯'), - (0x52F, 'V'), - (0x530, 'X'), - (0x531, 'M', u'Õ¡'), - (0x532, 'M', u'Õ¢'), - (0x533, 'M', u'Õ£'), - (0x534, 'M', u'Õ¤'), - (0x535, 'M', u'Õ¥'), - (0x536, 'M', u'Õ¦'), - (0x537, 'M', u'Õ§'), - (0x538, 'M', u'Õ¨'), - (0x539, 'M', u'Õ©'), - (0x53A, 'M', u'Õª'), - (0x53B, 'M', u'Õ«'), - (0x53C, 'M', u'Õ¬'), - (0x53D, 'M', u'Õ­'), - (0x53E, 'M', u'Õ®'), - (0x53F, 'M', u'Õ¯'), - (0x540, 'M', u'Õ°'), - (0x541, 'M', u'Õ±'), - (0x542, 'M', u'Õ²'), - (0x543, 'M', u'Õ³'), - (0x544, 'M', u'Õ´'), - (0x545, 'M', u'Õµ'), - (0x546, 'M', u'Õ¶'), - (0x547, 'M', u'Õ·'), - (0x548, 'M', u'Õ¸'), - (0x549, 'M', u'Õ¹'), - (0x54A, 'M', u'Õº'), - (0x54B, 'M', u'Õ»'), - (0x54C, 'M', u'Õ¼'), - (0x54D, 'M', u'Õ½'), - (0x54E, 'M', u'Õ¾'), - (0x54F, 'M', u'Õ¿'), - (0x550, 'M', u'Ö€'), - (0x551, 'M', u'Ö'), - (0x552, 'M', u'Ö‚'), - (0x553, 'M', u'Öƒ'), - (0x554, 'M', u'Ö„'), - (0x555, 'M', u'Ö…'), - (0x556, 'M', u'Ö†'), - (0x557, 'X'), - (0x559, 'V'), - (0x587, 'M', u'Õ¥Ö‚'), - (0x588, 'V'), - (0x58B, 'X'), - (0x58D, 'V'), - (0x590, 'X'), - (0x591, 'V'), - (0x5C8, 'X'), - (0x5D0, 'V'), - (0x5EB, 'X'), - (0x5EF, 'V'), - (0x5F5, 'X'), - (0x606, 'V'), - (0x61C, 'X'), - (0x61E, 'V'), - ] - -def _seg_10(): - return [ - (0x675, 'M', u'اٴ'), - (0x676, 'M', u'وٴ'), - (0x677, 'M', u'Û‡Ù´'), - (0x678, 'M', u'يٴ'), - (0x679, 'V'), - (0x6DD, 'X'), - (0x6DE, 'V'), - (0x70E, 'X'), - (0x710, 'V'), - (0x74B, 'X'), - (0x74D, 'V'), - (0x7B2, 'X'), - (0x7C0, 'V'), - (0x7FB, 'X'), - (0x7FD, 'V'), - (0x82E, 'X'), - (0x830, 'V'), - (0x83F, 'X'), - (0x840, 'V'), - (0x85C, 'X'), - (0x85E, 'V'), - (0x85F, 'X'), - (0x860, 'V'), - (0x86B, 'X'), - (0x8A0, 'V'), - (0x8B5, 'X'), - (0x8B6, 'V'), - (0x8BE, 'X'), - (0x8D3, 'V'), - (0x8E2, 'X'), - (0x8E3, 'V'), - (0x958, 'M', u'क़'), - (0x959, 'M', u'ख़'), - (0x95A, 'M', u'ग़'), - (0x95B, 'M', u'ज़'), - (0x95C, 'M', u'ड़'), - (0x95D, 'M', u'ढ़'), - (0x95E, 'M', u'फ़'), - (0x95F, 'M', u'य़'), - (0x960, 'V'), - (0x984, 'X'), - (0x985, 'V'), - (0x98D, 'X'), - (0x98F, 'V'), - (0x991, 'X'), - (0x993, 'V'), - (0x9A9, 'X'), - (0x9AA, 'V'), - (0x9B1, 'X'), - (0x9B2, 'V'), - (0x9B3, 'X'), - (0x9B6, 'V'), - (0x9BA, 'X'), - (0x9BC, 'V'), - (0x9C5, 'X'), - (0x9C7, 'V'), - (0x9C9, 'X'), - (0x9CB, 'V'), - (0x9CF, 'X'), - (0x9D7, 'V'), - (0x9D8, 'X'), - (0x9DC, 'M', u'ড়'), - (0x9DD, 'M', u'ঢ়'), - (0x9DE, 'X'), - (0x9DF, 'M', u'য়'), - (0x9E0, 'V'), - (0x9E4, 'X'), - (0x9E6, 'V'), - (0x9FF, 'X'), - (0xA01, 'V'), - (0xA04, 'X'), - (0xA05, 'V'), - (0xA0B, 'X'), - (0xA0F, 'V'), - (0xA11, 'X'), - (0xA13, 'V'), - (0xA29, 'X'), - (0xA2A, 'V'), - (0xA31, 'X'), - (0xA32, 'V'), - (0xA33, 'M', u'ਲ਼'), - (0xA34, 'X'), - (0xA35, 'V'), - (0xA36, 'M', u'ਸ਼'), - (0xA37, 'X'), - (0xA38, 'V'), - (0xA3A, 'X'), - (0xA3C, 'V'), - (0xA3D, 'X'), - (0xA3E, 'V'), - (0xA43, 'X'), - (0xA47, 'V'), - (0xA49, 'X'), - (0xA4B, 'V'), - (0xA4E, 'X'), - (0xA51, 'V'), - (0xA52, 'X'), - (0xA59, 'M', u'ਖ਼'), - (0xA5A, 'M', u'ਗ਼'), - (0xA5B, 'M', u'ਜ਼'), - ] - -def _seg_11(): - return [ - (0xA5C, 'V'), - (0xA5D, 'X'), - (0xA5E, 'M', u'ਫ਼'), - (0xA5F, 'X'), - (0xA66, 'V'), - (0xA77, 'X'), - (0xA81, 'V'), - (0xA84, 'X'), - (0xA85, 'V'), - (0xA8E, 'X'), - (0xA8F, 'V'), - (0xA92, 'X'), - (0xA93, 'V'), - (0xAA9, 'X'), - (0xAAA, 'V'), - (0xAB1, 'X'), - (0xAB2, 'V'), - (0xAB4, 'X'), - (0xAB5, 'V'), - (0xABA, 'X'), - (0xABC, 'V'), - (0xAC6, 'X'), - (0xAC7, 'V'), - (0xACA, 'X'), - (0xACB, 'V'), - (0xACE, 'X'), - (0xAD0, 'V'), - (0xAD1, 'X'), - (0xAE0, 'V'), - (0xAE4, 'X'), - (0xAE6, 'V'), - (0xAF2, 'X'), - (0xAF9, 'V'), - (0xB00, 'X'), - (0xB01, 'V'), - (0xB04, 'X'), - (0xB05, 'V'), - (0xB0D, 'X'), - (0xB0F, 'V'), - (0xB11, 'X'), - (0xB13, 'V'), - (0xB29, 'X'), - (0xB2A, 'V'), - (0xB31, 'X'), - (0xB32, 'V'), - (0xB34, 'X'), - (0xB35, 'V'), - (0xB3A, 'X'), - (0xB3C, 'V'), - (0xB45, 'X'), - (0xB47, 'V'), - (0xB49, 'X'), - (0xB4B, 'V'), - (0xB4E, 'X'), - (0xB56, 'V'), - (0xB58, 'X'), - (0xB5C, 'M', u'ଡ଼'), - (0xB5D, 'M', u'ଢ଼'), - (0xB5E, 'X'), - (0xB5F, 'V'), - (0xB64, 'X'), - (0xB66, 'V'), - (0xB78, 'X'), - (0xB82, 'V'), - (0xB84, 'X'), - (0xB85, 'V'), - (0xB8B, 'X'), - (0xB8E, 'V'), - (0xB91, 'X'), - (0xB92, 'V'), - (0xB96, 'X'), - (0xB99, 'V'), - (0xB9B, 'X'), - (0xB9C, 'V'), - (0xB9D, 'X'), - (0xB9E, 'V'), - (0xBA0, 'X'), - (0xBA3, 'V'), - (0xBA5, 'X'), - (0xBA8, 'V'), - (0xBAB, 'X'), - (0xBAE, 'V'), - (0xBBA, 'X'), - (0xBBE, 'V'), - (0xBC3, 'X'), - (0xBC6, 'V'), - (0xBC9, 'X'), - (0xBCA, 'V'), - (0xBCE, 'X'), - (0xBD0, 'V'), - (0xBD1, 'X'), - (0xBD7, 'V'), - (0xBD8, 'X'), - (0xBE6, 'V'), - (0xBFB, 'X'), - (0xC00, 'V'), - (0xC0D, 'X'), - (0xC0E, 'V'), - (0xC11, 'X'), - (0xC12, 'V'), - ] - -def _seg_12(): - return [ - (0xC29, 'X'), - (0xC2A, 'V'), - (0xC3A, 'X'), - (0xC3D, 'V'), - (0xC45, 'X'), - (0xC46, 'V'), - (0xC49, 'X'), - (0xC4A, 'V'), - (0xC4E, 'X'), - (0xC55, 'V'), - (0xC57, 'X'), - (0xC58, 'V'), - (0xC5B, 'X'), - (0xC60, 'V'), - (0xC64, 'X'), - (0xC66, 'V'), - (0xC70, 'X'), - (0xC78, 'V'), - (0xC8D, 'X'), - (0xC8E, 'V'), - (0xC91, 'X'), - (0xC92, 'V'), - (0xCA9, 'X'), - (0xCAA, 'V'), - (0xCB4, 'X'), - (0xCB5, 'V'), - (0xCBA, 'X'), - (0xCBC, 'V'), - (0xCC5, 'X'), - (0xCC6, 'V'), - (0xCC9, 'X'), - (0xCCA, 'V'), - (0xCCE, 'X'), - (0xCD5, 'V'), - (0xCD7, 'X'), - (0xCDE, 'V'), - (0xCDF, 'X'), - (0xCE0, 'V'), - (0xCE4, 'X'), - (0xCE6, 'V'), - (0xCF0, 'X'), - (0xCF1, 'V'), - (0xCF3, 'X'), - (0xD00, 'V'), - (0xD04, 'X'), - (0xD05, 'V'), - (0xD0D, 'X'), - (0xD0E, 'V'), - (0xD11, 'X'), - (0xD12, 'V'), - (0xD45, 'X'), - (0xD46, 'V'), - (0xD49, 'X'), - (0xD4A, 'V'), - (0xD50, 'X'), - (0xD54, 'V'), - (0xD64, 'X'), - (0xD66, 'V'), - (0xD80, 'X'), - (0xD82, 'V'), - (0xD84, 'X'), - (0xD85, 'V'), - (0xD97, 'X'), - (0xD9A, 'V'), - (0xDB2, 'X'), - (0xDB3, 'V'), - (0xDBC, 'X'), - (0xDBD, 'V'), - (0xDBE, 'X'), - (0xDC0, 'V'), - (0xDC7, 'X'), - (0xDCA, 'V'), - (0xDCB, 'X'), - (0xDCF, 'V'), - (0xDD5, 'X'), - (0xDD6, 'V'), - (0xDD7, 'X'), - (0xDD8, 'V'), - (0xDE0, 'X'), - (0xDE6, 'V'), - (0xDF0, 'X'), - (0xDF2, 'V'), - (0xDF5, 'X'), - (0xE01, 'V'), - (0xE33, 'M', u'à¹à¸²'), - (0xE34, 'V'), - (0xE3B, 'X'), - (0xE3F, 'V'), - (0xE5C, 'X'), - (0xE81, 'V'), - (0xE83, 'X'), - (0xE84, 'V'), - (0xE85, 'X'), - (0xE87, 'V'), - (0xE89, 'X'), - (0xE8A, 'V'), - (0xE8B, 'X'), - (0xE8D, 'V'), - (0xE8E, 'X'), - (0xE94, 'V'), - ] - -def _seg_13(): - return [ - (0xE98, 'X'), - (0xE99, 'V'), - (0xEA0, 'X'), - (0xEA1, 'V'), - (0xEA4, 'X'), - (0xEA5, 'V'), - (0xEA6, 'X'), - (0xEA7, 'V'), - (0xEA8, 'X'), - (0xEAA, 'V'), - (0xEAC, 'X'), - (0xEAD, 'V'), - (0xEB3, 'M', u'à»àº²'), - (0xEB4, 'V'), - (0xEBA, 'X'), - (0xEBB, 'V'), - (0xEBE, 'X'), - (0xEC0, 'V'), - (0xEC5, 'X'), - (0xEC6, 'V'), - (0xEC7, 'X'), - (0xEC8, 'V'), - (0xECE, 'X'), - (0xED0, 'V'), - (0xEDA, 'X'), - (0xEDC, 'M', u'ຫນ'), - (0xEDD, 'M', u'ຫມ'), - (0xEDE, 'V'), - (0xEE0, 'X'), - (0xF00, 'V'), - (0xF0C, 'M', u'་'), - (0xF0D, 'V'), - (0xF43, 'M', u'གྷ'), - (0xF44, 'V'), - (0xF48, 'X'), - (0xF49, 'V'), - (0xF4D, 'M', u'ཌྷ'), - (0xF4E, 'V'), - (0xF52, 'M', u'དྷ'), - (0xF53, 'V'), - (0xF57, 'M', u'བྷ'), - (0xF58, 'V'), - (0xF5C, 'M', u'ཛྷ'), - (0xF5D, 'V'), - (0xF69, 'M', u'ཀྵ'), - (0xF6A, 'V'), - (0xF6D, 'X'), - (0xF71, 'V'), - (0xF73, 'M', u'ཱི'), - (0xF74, 'V'), - (0xF75, 'M', u'ཱུ'), - (0xF76, 'M', u'ྲྀ'), - (0xF77, 'M', u'ྲཱྀ'), - (0xF78, 'M', u'ླྀ'), - (0xF79, 'M', u'ླཱྀ'), - (0xF7A, 'V'), - (0xF81, 'M', u'ཱྀ'), - (0xF82, 'V'), - (0xF93, 'M', u'ྒྷ'), - (0xF94, 'V'), - (0xF98, 'X'), - (0xF99, 'V'), - (0xF9D, 'M', u'ྜྷ'), - (0xF9E, 'V'), - (0xFA2, 'M', u'ྡྷ'), - (0xFA3, 'V'), - (0xFA7, 'M', u'ྦྷ'), - (0xFA8, 'V'), - (0xFAC, 'M', u'ྫྷ'), - (0xFAD, 'V'), - (0xFB9, 'M', u'à¾à¾µ'), - (0xFBA, 'V'), - (0xFBD, 'X'), - (0xFBE, 'V'), - (0xFCD, 'X'), - (0xFCE, 'V'), - (0xFDB, 'X'), - (0x1000, 'V'), - (0x10A0, 'X'), - (0x10C7, 'M', u'â´§'), - (0x10C8, 'X'), - (0x10CD, 'M', u'â´­'), - (0x10CE, 'X'), - (0x10D0, 'V'), - (0x10FC, 'M', u'ნ'), - (0x10FD, 'V'), - (0x115F, 'X'), - (0x1161, 'V'), - (0x1249, 'X'), - (0x124A, 'V'), - (0x124E, 'X'), - (0x1250, 'V'), - (0x1257, 'X'), - (0x1258, 'V'), - (0x1259, 'X'), - (0x125A, 'V'), - (0x125E, 'X'), - (0x1260, 'V'), - (0x1289, 'X'), - (0x128A, 'V'), - ] - -def _seg_14(): - return [ - (0x128E, 'X'), - (0x1290, 'V'), - (0x12B1, 'X'), - (0x12B2, 'V'), - (0x12B6, 'X'), - (0x12B8, 'V'), - (0x12BF, 'X'), - (0x12C0, 'V'), - (0x12C1, 'X'), - (0x12C2, 'V'), - (0x12C6, 'X'), - (0x12C8, 'V'), - (0x12D7, 'X'), - (0x12D8, 'V'), - (0x1311, 'X'), - (0x1312, 'V'), - (0x1316, 'X'), - (0x1318, 'V'), - (0x135B, 'X'), - (0x135D, 'V'), - (0x137D, 'X'), - (0x1380, 'V'), - (0x139A, 'X'), - (0x13A0, 'V'), - (0x13F6, 'X'), - (0x13F8, 'M', u'á°'), - (0x13F9, 'M', u'á±'), - (0x13FA, 'M', u'á²'), - (0x13FB, 'M', u'á³'), - (0x13FC, 'M', u'á´'), - (0x13FD, 'M', u'áµ'), - (0x13FE, 'X'), - (0x1400, 'V'), - (0x1680, 'X'), - (0x1681, 'V'), - (0x169D, 'X'), - (0x16A0, 'V'), - (0x16F9, 'X'), - (0x1700, 'V'), - (0x170D, 'X'), - (0x170E, 'V'), - (0x1715, 'X'), - (0x1720, 'V'), - (0x1737, 'X'), - (0x1740, 'V'), - (0x1754, 'X'), - (0x1760, 'V'), - (0x176D, 'X'), - (0x176E, 'V'), - (0x1771, 'X'), - (0x1772, 'V'), - (0x1774, 'X'), - (0x1780, 'V'), - (0x17B4, 'X'), - (0x17B6, 'V'), - (0x17DE, 'X'), - (0x17E0, 'V'), - (0x17EA, 'X'), - (0x17F0, 'V'), - (0x17FA, 'X'), - (0x1800, 'V'), - (0x1806, 'X'), - (0x1807, 'V'), - (0x180B, 'I'), - (0x180E, 'X'), - (0x1810, 'V'), - (0x181A, 'X'), - (0x1820, 'V'), - (0x1879, 'X'), - (0x1880, 'V'), - (0x18AB, 'X'), - (0x18B0, 'V'), - (0x18F6, 'X'), - (0x1900, 'V'), - (0x191F, 'X'), - (0x1920, 'V'), - (0x192C, 'X'), - (0x1930, 'V'), - (0x193C, 'X'), - (0x1940, 'V'), - (0x1941, 'X'), - (0x1944, 'V'), - (0x196E, 'X'), - (0x1970, 'V'), - (0x1975, 'X'), - (0x1980, 'V'), - (0x19AC, 'X'), - (0x19B0, 'V'), - (0x19CA, 'X'), - (0x19D0, 'V'), - (0x19DB, 'X'), - (0x19DE, 'V'), - (0x1A1C, 'X'), - (0x1A1E, 'V'), - (0x1A5F, 'X'), - (0x1A60, 'V'), - (0x1A7D, 'X'), - (0x1A7F, 'V'), - (0x1A8A, 'X'), - (0x1A90, 'V'), - ] - -def _seg_15(): - return [ - (0x1A9A, 'X'), - (0x1AA0, 'V'), - (0x1AAE, 'X'), - (0x1AB0, 'V'), - (0x1ABF, 'X'), - (0x1B00, 'V'), - (0x1B4C, 'X'), - (0x1B50, 'V'), - (0x1B7D, 'X'), - (0x1B80, 'V'), - (0x1BF4, 'X'), - (0x1BFC, 'V'), - (0x1C38, 'X'), - (0x1C3B, 'V'), - (0x1C4A, 'X'), - (0x1C4D, 'V'), - (0x1C80, 'M', u'в'), - (0x1C81, 'M', u'д'), - (0x1C82, 'M', u'о'), - (0x1C83, 'M', u'Ñ'), - (0x1C84, 'M', u'Ñ‚'), - (0x1C86, 'M', u'ÑŠ'), - (0x1C87, 'M', u'Ñ£'), - (0x1C88, 'M', u'ꙋ'), - (0x1C89, 'X'), - (0x1CC0, 'V'), - (0x1CC8, 'X'), - (0x1CD0, 'V'), - (0x1CFA, 'X'), - (0x1D00, 'V'), - (0x1D2C, 'M', u'a'), - (0x1D2D, 'M', u'æ'), - (0x1D2E, 'M', u'b'), - (0x1D2F, 'V'), - (0x1D30, 'M', u'd'), - (0x1D31, 'M', u'e'), - (0x1D32, 'M', u'Ç'), - (0x1D33, 'M', u'g'), - (0x1D34, 'M', u'h'), - (0x1D35, 'M', u'i'), - (0x1D36, 'M', u'j'), - (0x1D37, 'M', u'k'), - (0x1D38, 'M', u'l'), - (0x1D39, 'M', u'm'), - (0x1D3A, 'M', u'n'), - (0x1D3B, 'V'), - (0x1D3C, 'M', u'o'), - (0x1D3D, 'M', u'È£'), - (0x1D3E, 'M', u'p'), - (0x1D3F, 'M', u'r'), - (0x1D40, 'M', u't'), - (0x1D41, 'M', u'u'), - (0x1D42, 'M', u'w'), - (0x1D43, 'M', u'a'), - (0x1D44, 'M', u'É'), - (0x1D45, 'M', u'É‘'), - (0x1D46, 'M', u'á´‚'), - (0x1D47, 'M', u'b'), - (0x1D48, 'M', u'd'), - (0x1D49, 'M', u'e'), - (0x1D4A, 'M', u'É™'), - (0x1D4B, 'M', u'É›'), - (0x1D4C, 'M', u'Éœ'), - (0x1D4D, 'M', u'g'), - (0x1D4E, 'V'), - (0x1D4F, 'M', u'k'), - (0x1D50, 'M', u'm'), - (0x1D51, 'M', u'Å‹'), - (0x1D52, 'M', u'o'), - (0x1D53, 'M', u'É”'), - (0x1D54, 'M', u'á´–'), - (0x1D55, 'M', u'á´—'), - (0x1D56, 'M', u'p'), - (0x1D57, 'M', u't'), - (0x1D58, 'M', u'u'), - (0x1D59, 'M', u'á´'), - (0x1D5A, 'M', u'ɯ'), - (0x1D5B, 'M', u'v'), - (0x1D5C, 'M', u'á´¥'), - (0x1D5D, 'M', u'β'), - (0x1D5E, 'M', u'γ'), - (0x1D5F, 'M', u'δ'), - (0x1D60, 'M', u'φ'), - (0x1D61, 'M', u'χ'), - (0x1D62, 'M', u'i'), - (0x1D63, 'M', u'r'), - (0x1D64, 'M', u'u'), - (0x1D65, 'M', u'v'), - (0x1D66, 'M', u'β'), - (0x1D67, 'M', u'γ'), - (0x1D68, 'M', u'Ï'), - (0x1D69, 'M', u'φ'), - (0x1D6A, 'M', u'χ'), - (0x1D6B, 'V'), - (0x1D78, 'M', u'н'), - (0x1D79, 'V'), - (0x1D9B, 'M', u'É’'), - (0x1D9C, 'M', u'c'), - (0x1D9D, 'M', u'É•'), - (0x1D9E, 'M', u'ð'), - ] - -def _seg_16(): - return [ - (0x1D9F, 'M', u'Éœ'), - (0x1DA0, 'M', u'f'), - (0x1DA1, 'M', u'ÉŸ'), - (0x1DA2, 'M', u'É¡'), - (0x1DA3, 'M', u'É¥'), - (0x1DA4, 'M', u'ɨ'), - (0x1DA5, 'M', u'É©'), - (0x1DA6, 'M', u'ɪ'), - (0x1DA7, 'M', u'áµ»'), - (0x1DA8, 'M', u'Ê'), - (0x1DA9, 'M', u'É­'), - (0x1DAA, 'M', u'ᶅ'), - (0x1DAB, 'M', u'ÊŸ'), - (0x1DAC, 'M', u'ɱ'), - (0x1DAD, 'M', u'É°'), - (0x1DAE, 'M', u'ɲ'), - (0x1DAF, 'M', u'ɳ'), - (0x1DB0, 'M', u'É´'), - (0x1DB1, 'M', u'ɵ'), - (0x1DB2, 'M', u'ɸ'), - (0x1DB3, 'M', u'Ê‚'), - (0x1DB4, 'M', u'ʃ'), - (0x1DB5, 'M', u'Æ«'), - (0x1DB6, 'M', u'ʉ'), - (0x1DB7, 'M', u'ÊŠ'), - (0x1DB8, 'M', u'á´œ'), - (0x1DB9, 'M', u'Ê‹'), - (0x1DBA, 'M', u'ÊŒ'), - (0x1DBB, 'M', u'z'), - (0x1DBC, 'M', u'Ê'), - (0x1DBD, 'M', u'Ê‘'), - (0x1DBE, 'M', u'Ê’'), - (0x1DBF, 'M', u'θ'), - (0x1DC0, 'V'), - (0x1DFA, 'X'), - (0x1DFB, 'V'), - (0x1E00, 'M', u'á¸'), - (0x1E01, 'V'), - (0x1E02, 'M', u'ḃ'), - (0x1E03, 'V'), - (0x1E04, 'M', u'ḅ'), - (0x1E05, 'V'), - (0x1E06, 'M', u'ḇ'), - (0x1E07, 'V'), - (0x1E08, 'M', u'ḉ'), - (0x1E09, 'V'), - (0x1E0A, 'M', u'ḋ'), - (0x1E0B, 'V'), - (0x1E0C, 'M', u'á¸'), - (0x1E0D, 'V'), - (0x1E0E, 'M', u'á¸'), - (0x1E0F, 'V'), - (0x1E10, 'M', u'ḑ'), - (0x1E11, 'V'), - (0x1E12, 'M', u'ḓ'), - (0x1E13, 'V'), - (0x1E14, 'M', u'ḕ'), - (0x1E15, 'V'), - (0x1E16, 'M', u'ḗ'), - (0x1E17, 'V'), - (0x1E18, 'M', u'ḙ'), - (0x1E19, 'V'), - (0x1E1A, 'M', u'ḛ'), - (0x1E1B, 'V'), - (0x1E1C, 'M', u'á¸'), - (0x1E1D, 'V'), - (0x1E1E, 'M', u'ḟ'), - (0x1E1F, 'V'), - (0x1E20, 'M', u'ḡ'), - (0x1E21, 'V'), - (0x1E22, 'M', u'ḣ'), - (0x1E23, 'V'), - (0x1E24, 'M', u'ḥ'), - (0x1E25, 'V'), - (0x1E26, 'M', u'ḧ'), - (0x1E27, 'V'), - (0x1E28, 'M', u'ḩ'), - (0x1E29, 'V'), - (0x1E2A, 'M', u'ḫ'), - (0x1E2B, 'V'), - (0x1E2C, 'M', u'ḭ'), - (0x1E2D, 'V'), - (0x1E2E, 'M', u'ḯ'), - (0x1E2F, 'V'), - (0x1E30, 'M', u'ḱ'), - (0x1E31, 'V'), - (0x1E32, 'M', u'ḳ'), - (0x1E33, 'V'), - (0x1E34, 'M', u'ḵ'), - (0x1E35, 'V'), - (0x1E36, 'M', u'ḷ'), - (0x1E37, 'V'), - (0x1E38, 'M', u'ḹ'), - (0x1E39, 'V'), - (0x1E3A, 'M', u'ḻ'), - (0x1E3B, 'V'), - (0x1E3C, 'M', u'ḽ'), - (0x1E3D, 'V'), - (0x1E3E, 'M', u'ḿ'), - (0x1E3F, 'V'), - ] - -def _seg_17(): - return [ - (0x1E40, 'M', u'á¹'), - (0x1E41, 'V'), - (0x1E42, 'M', u'ṃ'), - (0x1E43, 'V'), - (0x1E44, 'M', u'á¹…'), - (0x1E45, 'V'), - (0x1E46, 'M', u'ṇ'), - (0x1E47, 'V'), - (0x1E48, 'M', u'ṉ'), - (0x1E49, 'V'), - (0x1E4A, 'M', u'ṋ'), - (0x1E4B, 'V'), - (0x1E4C, 'M', u'á¹'), - (0x1E4D, 'V'), - (0x1E4E, 'M', u'á¹'), - (0x1E4F, 'V'), - (0x1E50, 'M', u'ṑ'), - (0x1E51, 'V'), - (0x1E52, 'M', u'ṓ'), - (0x1E53, 'V'), - (0x1E54, 'M', u'ṕ'), - (0x1E55, 'V'), - (0x1E56, 'M', u'á¹—'), - (0x1E57, 'V'), - (0x1E58, 'M', u'á¹™'), - (0x1E59, 'V'), - (0x1E5A, 'M', u'á¹›'), - (0x1E5B, 'V'), - (0x1E5C, 'M', u'á¹'), - (0x1E5D, 'V'), - (0x1E5E, 'M', u'ṟ'), - (0x1E5F, 'V'), - (0x1E60, 'M', u'ṡ'), - (0x1E61, 'V'), - (0x1E62, 'M', u'á¹£'), - (0x1E63, 'V'), - (0x1E64, 'M', u'á¹¥'), - (0x1E65, 'V'), - (0x1E66, 'M', u'ṧ'), - (0x1E67, 'V'), - (0x1E68, 'M', u'ṩ'), - (0x1E69, 'V'), - (0x1E6A, 'M', u'ṫ'), - (0x1E6B, 'V'), - (0x1E6C, 'M', u'á¹­'), - (0x1E6D, 'V'), - (0x1E6E, 'M', u'ṯ'), - (0x1E6F, 'V'), - (0x1E70, 'M', u'á¹±'), - (0x1E71, 'V'), - (0x1E72, 'M', u'á¹³'), - (0x1E73, 'V'), - (0x1E74, 'M', u'á¹µ'), - (0x1E75, 'V'), - (0x1E76, 'M', u'á¹·'), - (0x1E77, 'V'), - (0x1E78, 'M', u'á¹¹'), - (0x1E79, 'V'), - (0x1E7A, 'M', u'á¹»'), - (0x1E7B, 'V'), - (0x1E7C, 'M', u'á¹½'), - (0x1E7D, 'V'), - (0x1E7E, 'M', u'ṿ'), - (0x1E7F, 'V'), - (0x1E80, 'M', u'áº'), - (0x1E81, 'V'), - (0x1E82, 'M', u'ẃ'), - (0x1E83, 'V'), - (0x1E84, 'M', u'ẅ'), - (0x1E85, 'V'), - (0x1E86, 'M', u'ẇ'), - (0x1E87, 'V'), - (0x1E88, 'M', u'ẉ'), - (0x1E89, 'V'), - (0x1E8A, 'M', u'ẋ'), - (0x1E8B, 'V'), - (0x1E8C, 'M', u'áº'), - (0x1E8D, 'V'), - (0x1E8E, 'M', u'áº'), - (0x1E8F, 'V'), - (0x1E90, 'M', u'ẑ'), - (0x1E91, 'V'), - (0x1E92, 'M', u'ẓ'), - (0x1E93, 'V'), - (0x1E94, 'M', u'ẕ'), - (0x1E95, 'V'), - (0x1E9A, 'M', u'aʾ'), - (0x1E9B, 'M', u'ṡ'), - (0x1E9C, 'V'), - (0x1E9E, 'M', u'ss'), - (0x1E9F, 'V'), - (0x1EA0, 'M', u'ạ'), - (0x1EA1, 'V'), - (0x1EA2, 'M', u'ả'), - (0x1EA3, 'V'), - (0x1EA4, 'M', u'ấ'), - (0x1EA5, 'V'), - (0x1EA6, 'M', u'ầ'), - (0x1EA7, 'V'), - (0x1EA8, 'M', u'ẩ'), - ] - -def _seg_18(): - return [ - (0x1EA9, 'V'), - (0x1EAA, 'M', u'ẫ'), - (0x1EAB, 'V'), - (0x1EAC, 'M', u'ậ'), - (0x1EAD, 'V'), - (0x1EAE, 'M', u'ắ'), - (0x1EAF, 'V'), - (0x1EB0, 'M', u'ằ'), - (0x1EB1, 'V'), - (0x1EB2, 'M', u'ẳ'), - (0x1EB3, 'V'), - (0x1EB4, 'M', u'ẵ'), - (0x1EB5, 'V'), - (0x1EB6, 'M', u'ặ'), - (0x1EB7, 'V'), - (0x1EB8, 'M', u'ẹ'), - (0x1EB9, 'V'), - (0x1EBA, 'M', u'ẻ'), - (0x1EBB, 'V'), - (0x1EBC, 'M', u'ẽ'), - (0x1EBD, 'V'), - (0x1EBE, 'M', u'ế'), - (0x1EBF, 'V'), - (0x1EC0, 'M', u'á»'), - (0x1EC1, 'V'), - (0x1EC2, 'M', u'ể'), - (0x1EC3, 'V'), - (0x1EC4, 'M', u'á»…'), - (0x1EC5, 'V'), - (0x1EC6, 'M', u'ệ'), - (0x1EC7, 'V'), - (0x1EC8, 'M', u'ỉ'), - (0x1EC9, 'V'), - (0x1ECA, 'M', u'ị'), - (0x1ECB, 'V'), - (0x1ECC, 'M', u'á»'), - (0x1ECD, 'V'), - (0x1ECE, 'M', u'á»'), - (0x1ECF, 'V'), - (0x1ED0, 'M', u'ố'), - (0x1ED1, 'V'), - (0x1ED2, 'M', u'ồ'), - (0x1ED3, 'V'), - (0x1ED4, 'M', u'ổ'), - (0x1ED5, 'V'), - (0x1ED6, 'M', u'á»—'), - (0x1ED7, 'V'), - (0x1ED8, 'M', u'á»™'), - (0x1ED9, 'V'), - (0x1EDA, 'M', u'á»›'), - (0x1EDB, 'V'), - (0x1EDC, 'M', u'á»'), - (0x1EDD, 'V'), - (0x1EDE, 'M', u'ở'), - (0x1EDF, 'V'), - (0x1EE0, 'M', u'ỡ'), - (0x1EE1, 'V'), - (0x1EE2, 'M', u'ợ'), - (0x1EE3, 'V'), - (0x1EE4, 'M', u'ụ'), - (0x1EE5, 'V'), - (0x1EE6, 'M', u'ủ'), - (0x1EE7, 'V'), - (0x1EE8, 'M', u'ứ'), - (0x1EE9, 'V'), - (0x1EEA, 'M', u'ừ'), - (0x1EEB, 'V'), - (0x1EEC, 'M', u'á»­'), - (0x1EED, 'V'), - (0x1EEE, 'M', u'ữ'), - (0x1EEF, 'V'), - (0x1EF0, 'M', u'á»±'), - (0x1EF1, 'V'), - (0x1EF2, 'M', u'ỳ'), - (0x1EF3, 'V'), - (0x1EF4, 'M', u'ỵ'), - (0x1EF5, 'V'), - (0x1EF6, 'M', u'á»·'), - (0x1EF7, 'V'), - (0x1EF8, 'M', u'ỹ'), - (0x1EF9, 'V'), - (0x1EFA, 'M', u'á»»'), - (0x1EFB, 'V'), - (0x1EFC, 'M', u'ỽ'), - (0x1EFD, 'V'), - (0x1EFE, 'M', u'ỿ'), - (0x1EFF, 'V'), - (0x1F08, 'M', u'á¼€'), - (0x1F09, 'M', u'á¼'), - (0x1F0A, 'M', u'ἂ'), - (0x1F0B, 'M', u'ἃ'), - (0x1F0C, 'M', u'ἄ'), - (0x1F0D, 'M', u'á¼…'), - (0x1F0E, 'M', u'ἆ'), - (0x1F0F, 'M', u'ἇ'), - (0x1F10, 'V'), - (0x1F16, 'X'), - (0x1F18, 'M', u'á¼'), - (0x1F19, 'M', u'ἑ'), - (0x1F1A, 'M', u'á¼’'), - ] - -def _seg_19(): - return [ - (0x1F1B, 'M', u'ἓ'), - (0x1F1C, 'M', u'á¼”'), - (0x1F1D, 'M', u'ἕ'), - (0x1F1E, 'X'), - (0x1F20, 'V'), - (0x1F28, 'M', u'á¼ '), - (0x1F29, 'M', u'ἡ'), - (0x1F2A, 'M', u'á¼¢'), - (0x1F2B, 'M', u'á¼£'), - (0x1F2C, 'M', u'ἤ'), - (0x1F2D, 'M', u'á¼¥'), - (0x1F2E, 'M', u'ἦ'), - (0x1F2F, 'M', u'ἧ'), - (0x1F30, 'V'), - (0x1F38, 'M', u'á¼°'), - (0x1F39, 'M', u'á¼±'), - (0x1F3A, 'M', u'á¼²'), - (0x1F3B, 'M', u'á¼³'), - (0x1F3C, 'M', u'á¼´'), - (0x1F3D, 'M', u'á¼µ'), - (0x1F3E, 'M', u'ἶ'), - (0x1F3F, 'M', u'á¼·'), - (0x1F40, 'V'), - (0x1F46, 'X'), - (0x1F48, 'M', u'á½€'), - (0x1F49, 'M', u'á½'), - (0x1F4A, 'M', u'ὂ'), - (0x1F4B, 'M', u'ὃ'), - (0x1F4C, 'M', u'ὄ'), - (0x1F4D, 'M', u'á½…'), - (0x1F4E, 'X'), - (0x1F50, 'V'), - (0x1F58, 'X'), - (0x1F59, 'M', u'ὑ'), - (0x1F5A, 'X'), - (0x1F5B, 'M', u'ὓ'), - (0x1F5C, 'X'), - (0x1F5D, 'M', u'ὕ'), - (0x1F5E, 'X'), - (0x1F5F, 'M', u'á½—'), - (0x1F60, 'V'), - (0x1F68, 'M', u'á½ '), - (0x1F69, 'M', u'ὡ'), - (0x1F6A, 'M', u'á½¢'), - (0x1F6B, 'M', u'á½£'), - (0x1F6C, 'M', u'ὤ'), - (0x1F6D, 'M', u'á½¥'), - (0x1F6E, 'M', u'ὦ'), - (0x1F6F, 'M', u'ὧ'), - (0x1F70, 'V'), - (0x1F71, 'M', u'ά'), - (0x1F72, 'V'), - (0x1F73, 'M', u'έ'), - (0x1F74, 'V'), - (0x1F75, 'M', u'ή'), - (0x1F76, 'V'), - (0x1F77, 'M', u'ί'), - (0x1F78, 'V'), - (0x1F79, 'M', u'ÏŒ'), - (0x1F7A, 'V'), - (0x1F7B, 'M', u'Ï'), - (0x1F7C, 'V'), - (0x1F7D, 'M', u'ÏŽ'), - (0x1F7E, 'X'), - (0x1F80, 'M', u'ἀι'), - (0x1F81, 'M', u'á¼Î¹'), - (0x1F82, 'M', u'ἂι'), - (0x1F83, 'M', u'ἃι'), - (0x1F84, 'M', u'ἄι'), - (0x1F85, 'M', u'ἅι'), - (0x1F86, 'M', u'ἆι'), - (0x1F87, 'M', u'ἇι'), - (0x1F88, 'M', u'ἀι'), - (0x1F89, 'M', u'á¼Î¹'), - (0x1F8A, 'M', u'ἂι'), - (0x1F8B, 'M', u'ἃι'), - (0x1F8C, 'M', u'ἄι'), - (0x1F8D, 'M', u'ἅι'), - (0x1F8E, 'M', u'ἆι'), - (0x1F8F, 'M', u'ἇι'), - (0x1F90, 'M', u'ἠι'), - (0x1F91, 'M', u'ἡι'), - (0x1F92, 'M', u'ἢι'), - (0x1F93, 'M', u'ἣι'), - (0x1F94, 'M', u'ἤι'), - (0x1F95, 'M', u'ἥι'), - (0x1F96, 'M', u'ἦι'), - (0x1F97, 'M', u'ἧι'), - (0x1F98, 'M', u'ἠι'), - (0x1F99, 'M', u'ἡι'), - (0x1F9A, 'M', u'ἢι'), - (0x1F9B, 'M', u'ἣι'), - (0x1F9C, 'M', u'ἤι'), - (0x1F9D, 'M', u'ἥι'), - (0x1F9E, 'M', u'ἦι'), - (0x1F9F, 'M', u'ἧι'), - (0x1FA0, 'M', u'ὠι'), - (0x1FA1, 'M', u'ὡι'), - (0x1FA2, 'M', u'ὢι'), - (0x1FA3, 'M', u'ὣι'), - ] - -def _seg_20(): - return [ - (0x1FA4, 'M', u'ὤι'), - (0x1FA5, 'M', u'ὥι'), - (0x1FA6, 'M', u'ὦι'), - (0x1FA7, 'M', u'ὧι'), - (0x1FA8, 'M', u'ὠι'), - (0x1FA9, 'M', u'ὡι'), - (0x1FAA, 'M', u'ὢι'), - (0x1FAB, 'M', u'ὣι'), - (0x1FAC, 'M', u'ὤι'), - (0x1FAD, 'M', u'ὥι'), - (0x1FAE, 'M', u'ὦι'), - (0x1FAF, 'M', u'ὧι'), - (0x1FB0, 'V'), - (0x1FB2, 'M', u'ὰι'), - (0x1FB3, 'M', u'αι'), - (0x1FB4, 'M', u'άι'), - (0x1FB5, 'X'), - (0x1FB6, 'V'), - (0x1FB7, 'M', u'ᾶι'), - (0x1FB8, 'M', u'á¾°'), - (0x1FB9, 'M', u'á¾±'), - (0x1FBA, 'M', u'á½°'), - (0x1FBB, 'M', u'ά'), - (0x1FBC, 'M', u'αι'), - (0x1FBD, '3', u' Ì“'), - (0x1FBE, 'M', u'ι'), - (0x1FBF, '3', u' Ì“'), - (0x1FC0, '3', u' Í‚'), - (0x1FC1, '3', u' ̈͂'), - (0x1FC2, 'M', u'ὴι'), - (0x1FC3, 'M', u'ηι'), - (0x1FC4, 'M', u'ήι'), - (0x1FC5, 'X'), - (0x1FC6, 'V'), - (0x1FC7, 'M', u'ῆι'), - (0x1FC8, 'M', u'á½²'), - (0x1FC9, 'M', u'έ'), - (0x1FCA, 'M', u'á½´'), - (0x1FCB, 'M', u'ή'), - (0x1FCC, 'M', u'ηι'), - (0x1FCD, '3', u' Ì“Ì€'), - (0x1FCE, '3', u' Ì“Ì'), - (0x1FCF, '3', u' Ì“Í‚'), - (0x1FD0, 'V'), - (0x1FD3, 'M', u'Î'), - (0x1FD4, 'X'), - (0x1FD6, 'V'), - (0x1FD8, 'M', u'á¿'), - (0x1FD9, 'M', u'á¿‘'), - (0x1FDA, 'M', u'ὶ'), - (0x1FDB, 'M', u'ί'), - (0x1FDC, 'X'), - (0x1FDD, '3', u' ̔̀'), - (0x1FDE, '3', u' Ì”Ì'), - (0x1FDF, '3', u' ̔͂'), - (0x1FE0, 'V'), - (0x1FE3, 'M', u'ΰ'), - (0x1FE4, 'V'), - (0x1FE8, 'M', u'á¿ '), - (0x1FE9, 'M', u'á¿¡'), - (0x1FEA, 'M', u'ὺ'), - (0x1FEB, 'M', u'Ï'), - (0x1FEC, 'M', u'á¿¥'), - (0x1FED, '3', u' ̈̀'), - (0x1FEE, '3', u' ̈Ì'), - (0x1FEF, '3', u'`'), - (0x1FF0, 'X'), - (0x1FF2, 'M', u'ὼι'), - (0x1FF3, 'M', u'ωι'), - (0x1FF4, 'M', u'ώι'), - (0x1FF5, 'X'), - (0x1FF6, 'V'), - (0x1FF7, 'M', u'ῶι'), - (0x1FF8, 'M', u'ὸ'), - (0x1FF9, 'M', u'ÏŒ'), - (0x1FFA, 'M', u'á½¼'), - (0x1FFB, 'M', u'ÏŽ'), - (0x1FFC, 'M', u'ωι'), - (0x1FFD, '3', u' Ì'), - (0x1FFE, '3', u' Ì”'), - (0x1FFF, 'X'), - (0x2000, '3', u' '), - (0x200B, 'I'), - (0x200C, 'D', u''), - (0x200E, 'X'), - (0x2010, 'V'), - (0x2011, 'M', u'â€'), - (0x2012, 'V'), - (0x2017, '3', u' ̳'), - (0x2018, 'V'), - (0x2024, 'X'), - (0x2027, 'V'), - (0x2028, 'X'), - (0x202F, '3', u' '), - (0x2030, 'V'), - (0x2033, 'M', u'′′'), - (0x2034, 'M', u'′′′'), - (0x2035, 'V'), - (0x2036, 'M', u'‵‵'), - (0x2037, 'M', u'‵‵‵'), - ] - -def _seg_21(): - return [ - (0x2038, 'V'), - (0x203C, '3', u'!!'), - (0x203D, 'V'), - (0x203E, '3', u' Ì…'), - (0x203F, 'V'), - (0x2047, '3', u'??'), - (0x2048, '3', u'?!'), - (0x2049, '3', u'!?'), - (0x204A, 'V'), - (0x2057, 'M', u'′′′′'), - (0x2058, 'V'), - (0x205F, '3', u' '), - (0x2060, 'I'), - (0x2061, 'X'), - (0x2064, 'I'), - (0x2065, 'X'), - (0x2070, 'M', u'0'), - (0x2071, 'M', u'i'), - (0x2072, 'X'), - (0x2074, 'M', u'4'), - (0x2075, 'M', u'5'), - (0x2076, 'M', u'6'), - (0x2077, 'M', u'7'), - (0x2078, 'M', u'8'), - (0x2079, 'M', u'9'), - (0x207A, '3', u'+'), - (0x207B, 'M', u'−'), - (0x207C, '3', u'='), - (0x207D, '3', u'('), - (0x207E, '3', u')'), - (0x207F, 'M', u'n'), - (0x2080, 'M', u'0'), - (0x2081, 'M', u'1'), - (0x2082, 'M', u'2'), - (0x2083, 'M', u'3'), - (0x2084, 'M', u'4'), - (0x2085, 'M', u'5'), - (0x2086, 'M', u'6'), - (0x2087, 'M', u'7'), - (0x2088, 'M', u'8'), - (0x2089, 'M', u'9'), - (0x208A, '3', u'+'), - (0x208B, 'M', u'−'), - (0x208C, '3', u'='), - (0x208D, '3', u'('), - (0x208E, '3', u')'), - (0x208F, 'X'), - (0x2090, 'M', u'a'), - (0x2091, 'M', u'e'), - (0x2092, 'M', u'o'), - (0x2093, 'M', u'x'), - (0x2094, 'M', u'É™'), - (0x2095, 'M', u'h'), - (0x2096, 'M', u'k'), - (0x2097, 'M', u'l'), - (0x2098, 'M', u'm'), - (0x2099, 'M', u'n'), - (0x209A, 'M', u'p'), - (0x209B, 'M', u's'), - (0x209C, 'M', u't'), - (0x209D, 'X'), - (0x20A0, 'V'), - (0x20A8, 'M', u'rs'), - (0x20A9, 'V'), - (0x20C0, 'X'), - (0x20D0, 'V'), - (0x20F1, 'X'), - (0x2100, '3', u'a/c'), - (0x2101, '3', u'a/s'), - (0x2102, 'M', u'c'), - (0x2103, 'M', u'°c'), - (0x2104, 'V'), - (0x2105, '3', u'c/o'), - (0x2106, '3', u'c/u'), - (0x2107, 'M', u'É›'), - (0x2108, 'V'), - (0x2109, 'M', u'°f'), - (0x210A, 'M', u'g'), - (0x210B, 'M', u'h'), - (0x210F, 'M', u'ħ'), - (0x2110, 'M', u'i'), - (0x2112, 'M', u'l'), - (0x2114, 'V'), - (0x2115, 'M', u'n'), - (0x2116, 'M', u'no'), - (0x2117, 'V'), - (0x2119, 'M', u'p'), - (0x211A, 'M', u'q'), - (0x211B, 'M', u'r'), - (0x211E, 'V'), - (0x2120, 'M', u'sm'), - (0x2121, 'M', u'tel'), - (0x2122, 'M', u'tm'), - (0x2123, 'V'), - (0x2124, 'M', u'z'), - (0x2125, 'V'), - (0x2126, 'M', u'ω'), - (0x2127, 'V'), - (0x2128, 'M', u'z'), - (0x2129, 'V'), - ] - -def _seg_22(): - return [ - (0x212A, 'M', u'k'), - (0x212B, 'M', u'Ã¥'), - (0x212C, 'M', u'b'), - (0x212D, 'M', u'c'), - (0x212E, 'V'), - (0x212F, 'M', u'e'), - (0x2131, 'M', u'f'), - (0x2132, 'X'), - (0x2133, 'M', u'm'), - (0x2134, 'M', u'o'), - (0x2135, 'M', u'×'), - (0x2136, 'M', u'ב'), - (0x2137, 'M', u'×’'), - (0x2138, 'M', u'ד'), - (0x2139, 'M', u'i'), - (0x213A, 'V'), - (0x213B, 'M', u'fax'), - (0x213C, 'M', u'Ï€'), - (0x213D, 'M', u'γ'), - (0x213F, 'M', u'Ï€'), - (0x2140, 'M', u'∑'), - (0x2141, 'V'), - (0x2145, 'M', u'd'), - (0x2147, 'M', u'e'), - (0x2148, 'M', u'i'), - (0x2149, 'M', u'j'), - (0x214A, 'V'), - (0x2150, 'M', u'1â„7'), - (0x2151, 'M', u'1â„9'), - (0x2152, 'M', u'1â„10'), - (0x2153, 'M', u'1â„3'), - (0x2154, 'M', u'2â„3'), - (0x2155, 'M', u'1â„5'), - (0x2156, 'M', u'2â„5'), - (0x2157, 'M', u'3â„5'), - (0x2158, 'M', u'4â„5'), - (0x2159, 'M', u'1â„6'), - (0x215A, 'M', u'5â„6'), - (0x215B, 'M', u'1â„8'), - (0x215C, 'M', u'3â„8'), - (0x215D, 'M', u'5â„8'), - (0x215E, 'M', u'7â„8'), - (0x215F, 'M', u'1â„'), - (0x2160, 'M', u'i'), - (0x2161, 'M', u'ii'), - (0x2162, 'M', u'iii'), - (0x2163, 'M', u'iv'), - (0x2164, 'M', u'v'), - (0x2165, 'M', u'vi'), - (0x2166, 'M', u'vii'), - (0x2167, 'M', u'viii'), - (0x2168, 'M', u'ix'), - (0x2169, 'M', u'x'), - (0x216A, 'M', u'xi'), - (0x216B, 'M', u'xii'), - (0x216C, 'M', u'l'), - (0x216D, 'M', u'c'), - (0x216E, 'M', u'd'), - (0x216F, 'M', u'm'), - (0x2170, 'M', u'i'), - (0x2171, 'M', u'ii'), - (0x2172, 'M', u'iii'), - (0x2173, 'M', u'iv'), - (0x2174, 'M', u'v'), - (0x2175, 'M', u'vi'), - (0x2176, 'M', u'vii'), - (0x2177, 'M', u'viii'), - (0x2178, 'M', u'ix'), - (0x2179, 'M', u'x'), - (0x217A, 'M', u'xi'), - (0x217B, 'M', u'xii'), - (0x217C, 'M', u'l'), - (0x217D, 'M', u'c'), - (0x217E, 'M', u'd'), - (0x217F, 'M', u'm'), - (0x2180, 'V'), - (0x2183, 'X'), - (0x2184, 'V'), - (0x2189, 'M', u'0â„3'), - (0x218A, 'V'), - (0x218C, 'X'), - (0x2190, 'V'), - (0x222C, 'M', u'∫∫'), - (0x222D, 'M', u'∫∫∫'), - (0x222E, 'V'), - (0x222F, 'M', u'∮∮'), - (0x2230, 'M', u'∮∮∮'), - (0x2231, 'V'), - (0x2260, '3'), - (0x2261, 'V'), - (0x226E, '3'), - (0x2270, 'V'), - (0x2329, 'M', u'〈'), - (0x232A, 'M', u'〉'), - (0x232B, 'V'), - (0x2427, 'X'), - (0x2440, 'V'), - (0x244B, 'X'), - (0x2460, 'M', u'1'), - (0x2461, 'M', u'2'), - ] - -def _seg_23(): - return [ - (0x2462, 'M', u'3'), - (0x2463, 'M', u'4'), - (0x2464, 'M', u'5'), - (0x2465, 'M', u'6'), - (0x2466, 'M', u'7'), - (0x2467, 'M', u'8'), - (0x2468, 'M', u'9'), - (0x2469, 'M', u'10'), - (0x246A, 'M', u'11'), - (0x246B, 'M', u'12'), - (0x246C, 'M', u'13'), - (0x246D, 'M', u'14'), - (0x246E, 'M', u'15'), - (0x246F, 'M', u'16'), - (0x2470, 'M', u'17'), - (0x2471, 'M', u'18'), - (0x2472, 'M', u'19'), - (0x2473, 'M', u'20'), - (0x2474, '3', u'(1)'), - (0x2475, '3', u'(2)'), - (0x2476, '3', u'(3)'), - (0x2477, '3', u'(4)'), - (0x2478, '3', u'(5)'), - (0x2479, '3', u'(6)'), - (0x247A, '3', u'(7)'), - (0x247B, '3', u'(8)'), - (0x247C, '3', u'(9)'), - (0x247D, '3', u'(10)'), - (0x247E, '3', u'(11)'), - (0x247F, '3', u'(12)'), - (0x2480, '3', u'(13)'), - (0x2481, '3', u'(14)'), - (0x2482, '3', u'(15)'), - (0x2483, '3', u'(16)'), - (0x2484, '3', u'(17)'), - (0x2485, '3', u'(18)'), - (0x2486, '3', u'(19)'), - (0x2487, '3', u'(20)'), - (0x2488, 'X'), - (0x249C, '3', u'(a)'), - (0x249D, '3', u'(b)'), - (0x249E, '3', u'(c)'), - (0x249F, '3', u'(d)'), - (0x24A0, '3', u'(e)'), - (0x24A1, '3', u'(f)'), - (0x24A2, '3', u'(g)'), - (0x24A3, '3', u'(h)'), - (0x24A4, '3', u'(i)'), - (0x24A5, '3', u'(j)'), - (0x24A6, '3', u'(k)'), - (0x24A7, '3', u'(l)'), - (0x24A8, '3', u'(m)'), - (0x24A9, '3', u'(n)'), - (0x24AA, '3', u'(o)'), - (0x24AB, '3', u'(p)'), - (0x24AC, '3', u'(q)'), - (0x24AD, '3', u'(r)'), - (0x24AE, '3', u'(s)'), - (0x24AF, '3', u'(t)'), - (0x24B0, '3', u'(u)'), - (0x24B1, '3', u'(v)'), - (0x24B2, '3', u'(w)'), - (0x24B3, '3', u'(x)'), - (0x24B4, '3', u'(y)'), - (0x24B5, '3', u'(z)'), - (0x24B6, 'M', u'a'), - (0x24B7, 'M', u'b'), - (0x24B8, 'M', u'c'), - (0x24B9, 'M', u'd'), - (0x24BA, 'M', u'e'), - (0x24BB, 'M', u'f'), - (0x24BC, 'M', u'g'), - (0x24BD, 'M', u'h'), - (0x24BE, 'M', u'i'), - (0x24BF, 'M', u'j'), - (0x24C0, 'M', u'k'), - (0x24C1, 'M', u'l'), - (0x24C2, 'M', u'm'), - (0x24C3, 'M', u'n'), - (0x24C4, 'M', u'o'), - (0x24C5, 'M', u'p'), - (0x24C6, 'M', u'q'), - (0x24C7, 'M', u'r'), - (0x24C8, 'M', u's'), - (0x24C9, 'M', u't'), - (0x24CA, 'M', u'u'), - (0x24CB, 'M', u'v'), - (0x24CC, 'M', u'w'), - (0x24CD, 'M', u'x'), - (0x24CE, 'M', u'y'), - (0x24CF, 'M', u'z'), - (0x24D0, 'M', u'a'), - (0x24D1, 'M', u'b'), - (0x24D2, 'M', u'c'), - (0x24D3, 'M', u'd'), - (0x24D4, 'M', u'e'), - (0x24D5, 'M', u'f'), - (0x24D6, 'M', u'g'), - (0x24D7, 'M', u'h'), - (0x24D8, 'M', u'i'), - ] - -def _seg_24(): - return [ - (0x24D9, 'M', u'j'), - (0x24DA, 'M', u'k'), - (0x24DB, 'M', u'l'), - (0x24DC, 'M', u'm'), - (0x24DD, 'M', u'n'), - (0x24DE, 'M', u'o'), - (0x24DF, 'M', u'p'), - (0x24E0, 'M', u'q'), - (0x24E1, 'M', u'r'), - (0x24E2, 'M', u's'), - (0x24E3, 'M', u't'), - (0x24E4, 'M', u'u'), - (0x24E5, 'M', u'v'), - (0x24E6, 'M', u'w'), - (0x24E7, 'M', u'x'), - (0x24E8, 'M', u'y'), - (0x24E9, 'M', u'z'), - (0x24EA, 'M', u'0'), - (0x24EB, 'V'), - (0x2A0C, 'M', u'∫∫∫∫'), - (0x2A0D, 'V'), - (0x2A74, '3', u'::='), - (0x2A75, '3', u'=='), - (0x2A76, '3', u'==='), - (0x2A77, 'V'), - (0x2ADC, 'M', u'â«Ì¸'), - (0x2ADD, 'V'), - (0x2B74, 'X'), - (0x2B76, 'V'), - (0x2B96, 'X'), - (0x2B98, 'V'), - (0x2BC9, 'X'), - (0x2BCA, 'V'), - (0x2BFF, 'X'), - (0x2C00, 'M', u'â°°'), - (0x2C01, 'M', u'â°±'), - (0x2C02, 'M', u'â°²'), - (0x2C03, 'M', u'â°³'), - (0x2C04, 'M', u'â°´'), - (0x2C05, 'M', u'â°µ'), - (0x2C06, 'M', u'â°¶'), - (0x2C07, 'M', u'â°·'), - (0x2C08, 'M', u'â°¸'), - (0x2C09, 'M', u'â°¹'), - (0x2C0A, 'M', u'â°º'), - (0x2C0B, 'M', u'â°»'), - (0x2C0C, 'M', u'â°¼'), - (0x2C0D, 'M', u'â°½'), - (0x2C0E, 'M', u'â°¾'), - (0x2C0F, 'M', u'â°¿'), - (0x2C10, 'M', u'â±€'), - (0x2C11, 'M', u'â±'), - (0x2C12, 'M', u'ⱂ'), - (0x2C13, 'M', u'ⱃ'), - (0x2C14, 'M', u'ⱄ'), - (0x2C15, 'M', u'â±…'), - (0x2C16, 'M', u'ⱆ'), - (0x2C17, 'M', u'ⱇ'), - (0x2C18, 'M', u'ⱈ'), - (0x2C19, 'M', u'ⱉ'), - (0x2C1A, 'M', u'ⱊ'), - (0x2C1B, 'M', u'ⱋ'), - (0x2C1C, 'M', u'ⱌ'), - (0x2C1D, 'M', u'â±'), - (0x2C1E, 'M', u'ⱎ'), - (0x2C1F, 'M', u'â±'), - (0x2C20, 'M', u'â±'), - (0x2C21, 'M', u'ⱑ'), - (0x2C22, 'M', u'â±’'), - (0x2C23, 'M', u'ⱓ'), - (0x2C24, 'M', u'â±”'), - (0x2C25, 'M', u'ⱕ'), - (0x2C26, 'M', u'â±–'), - (0x2C27, 'M', u'â±—'), - (0x2C28, 'M', u'ⱘ'), - (0x2C29, 'M', u'â±™'), - (0x2C2A, 'M', u'ⱚ'), - (0x2C2B, 'M', u'â±›'), - (0x2C2C, 'M', u'ⱜ'), - (0x2C2D, 'M', u'â±'), - (0x2C2E, 'M', u'ⱞ'), - (0x2C2F, 'X'), - (0x2C30, 'V'), - (0x2C5F, 'X'), - (0x2C60, 'M', u'ⱡ'), - (0x2C61, 'V'), - (0x2C62, 'M', u'É«'), - (0x2C63, 'M', u'áµ½'), - (0x2C64, 'M', u'ɽ'), - (0x2C65, 'V'), - (0x2C67, 'M', u'ⱨ'), - (0x2C68, 'V'), - (0x2C69, 'M', u'ⱪ'), - (0x2C6A, 'V'), - (0x2C6B, 'M', u'ⱬ'), - (0x2C6C, 'V'), - (0x2C6D, 'M', u'É‘'), - (0x2C6E, 'M', u'ɱ'), - (0x2C6F, 'M', u'É'), - (0x2C70, 'M', u'É’'), - ] - -def _seg_25(): - return [ - (0x2C71, 'V'), - (0x2C72, 'M', u'â±³'), - (0x2C73, 'V'), - (0x2C75, 'M', u'ⱶ'), - (0x2C76, 'V'), - (0x2C7C, 'M', u'j'), - (0x2C7D, 'M', u'v'), - (0x2C7E, 'M', u'È¿'), - (0x2C7F, 'M', u'É€'), - (0x2C80, 'M', u'â²'), - (0x2C81, 'V'), - (0x2C82, 'M', u'ⲃ'), - (0x2C83, 'V'), - (0x2C84, 'M', u'â²…'), - (0x2C85, 'V'), - (0x2C86, 'M', u'ⲇ'), - (0x2C87, 'V'), - (0x2C88, 'M', u'ⲉ'), - (0x2C89, 'V'), - (0x2C8A, 'M', u'ⲋ'), - (0x2C8B, 'V'), - (0x2C8C, 'M', u'â²'), - (0x2C8D, 'V'), - (0x2C8E, 'M', u'â²'), - (0x2C8F, 'V'), - (0x2C90, 'M', u'ⲑ'), - (0x2C91, 'V'), - (0x2C92, 'M', u'ⲓ'), - (0x2C93, 'V'), - (0x2C94, 'M', u'ⲕ'), - (0x2C95, 'V'), - (0x2C96, 'M', u'â²—'), - (0x2C97, 'V'), - (0x2C98, 'M', u'â²™'), - (0x2C99, 'V'), - (0x2C9A, 'M', u'â²›'), - (0x2C9B, 'V'), - (0x2C9C, 'M', u'â²'), - (0x2C9D, 'V'), - (0x2C9E, 'M', u'ⲟ'), - (0x2C9F, 'V'), - (0x2CA0, 'M', u'ⲡ'), - (0x2CA1, 'V'), - (0x2CA2, 'M', u'â²£'), - (0x2CA3, 'V'), - (0x2CA4, 'M', u'â²¥'), - (0x2CA5, 'V'), - (0x2CA6, 'M', u'ⲧ'), - (0x2CA7, 'V'), - (0x2CA8, 'M', u'ⲩ'), - (0x2CA9, 'V'), - (0x2CAA, 'M', u'ⲫ'), - (0x2CAB, 'V'), - (0x2CAC, 'M', u'â²­'), - (0x2CAD, 'V'), - (0x2CAE, 'M', u'ⲯ'), - (0x2CAF, 'V'), - (0x2CB0, 'M', u'â²±'), - (0x2CB1, 'V'), - (0x2CB2, 'M', u'â²³'), - (0x2CB3, 'V'), - (0x2CB4, 'M', u'â²µ'), - (0x2CB5, 'V'), - (0x2CB6, 'M', u'â²·'), - (0x2CB7, 'V'), - (0x2CB8, 'M', u'â²¹'), - (0x2CB9, 'V'), - (0x2CBA, 'M', u'â²»'), - (0x2CBB, 'V'), - (0x2CBC, 'M', u'â²½'), - (0x2CBD, 'V'), - (0x2CBE, 'M', u'ⲿ'), - (0x2CBF, 'V'), - (0x2CC0, 'M', u'â³'), - (0x2CC1, 'V'), - (0x2CC2, 'M', u'ⳃ'), - (0x2CC3, 'V'), - (0x2CC4, 'M', u'â³…'), - (0x2CC5, 'V'), - (0x2CC6, 'M', u'ⳇ'), - (0x2CC7, 'V'), - (0x2CC8, 'M', u'ⳉ'), - (0x2CC9, 'V'), - (0x2CCA, 'M', u'ⳋ'), - (0x2CCB, 'V'), - (0x2CCC, 'M', u'â³'), - (0x2CCD, 'V'), - (0x2CCE, 'M', u'â³'), - (0x2CCF, 'V'), - (0x2CD0, 'M', u'ⳑ'), - (0x2CD1, 'V'), - (0x2CD2, 'M', u'ⳓ'), - (0x2CD3, 'V'), - (0x2CD4, 'M', u'ⳕ'), - (0x2CD5, 'V'), - (0x2CD6, 'M', u'â³—'), - (0x2CD7, 'V'), - (0x2CD8, 'M', u'â³™'), - (0x2CD9, 'V'), - (0x2CDA, 'M', u'â³›'), - ] - -def _seg_26(): - return [ - (0x2CDB, 'V'), - (0x2CDC, 'M', u'â³'), - (0x2CDD, 'V'), - (0x2CDE, 'M', u'ⳟ'), - (0x2CDF, 'V'), - (0x2CE0, 'M', u'ⳡ'), - (0x2CE1, 'V'), - (0x2CE2, 'M', u'â³£'), - (0x2CE3, 'V'), - (0x2CEB, 'M', u'ⳬ'), - (0x2CEC, 'V'), - (0x2CED, 'M', u'â³®'), - (0x2CEE, 'V'), - (0x2CF2, 'M', u'â³³'), - (0x2CF3, 'V'), - (0x2CF4, 'X'), - (0x2CF9, 'V'), - (0x2D26, 'X'), - (0x2D27, 'V'), - (0x2D28, 'X'), - (0x2D2D, 'V'), - (0x2D2E, 'X'), - (0x2D30, 'V'), - (0x2D68, 'X'), - (0x2D6F, 'M', u'ⵡ'), - (0x2D70, 'V'), - (0x2D71, 'X'), - (0x2D7F, 'V'), - (0x2D97, 'X'), - (0x2DA0, 'V'), - (0x2DA7, 'X'), - (0x2DA8, 'V'), - (0x2DAF, 'X'), - (0x2DB0, 'V'), - (0x2DB7, 'X'), - (0x2DB8, 'V'), - (0x2DBF, 'X'), - (0x2DC0, 'V'), - (0x2DC7, 'X'), - (0x2DC8, 'V'), - (0x2DCF, 'X'), - (0x2DD0, 'V'), - (0x2DD7, 'X'), - (0x2DD8, 'V'), - (0x2DDF, 'X'), - (0x2DE0, 'V'), - (0x2E4F, 'X'), - (0x2E80, 'V'), - (0x2E9A, 'X'), - (0x2E9B, 'V'), - (0x2E9F, 'M', u'æ¯'), - (0x2EA0, 'V'), - (0x2EF3, 'M', u'龟'), - (0x2EF4, 'X'), - (0x2F00, 'M', u'一'), - (0x2F01, 'M', u'丨'), - (0x2F02, 'M', u'丶'), - (0x2F03, 'M', u'丿'), - (0x2F04, 'M', u'ä¹™'), - (0x2F05, 'M', u'亅'), - (0x2F06, 'M', u'二'), - (0x2F07, 'M', u'亠'), - (0x2F08, 'M', u'人'), - (0x2F09, 'M', u'å„¿'), - (0x2F0A, 'M', u'å…¥'), - (0x2F0B, 'M', u'å…«'), - (0x2F0C, 'M', u'冂'), - (0x2F0D, 'M', u'冖'), - (0x2F0E, 'M', u'冫'), - (0x2F0F, 'M', u'几'), - (0x2F10, 'M', u'凵'), - (0x2F11, 'M', u'刀'), - (0x2F12, 'M', u'力'), - (0x2F13, 'M', u'勹'), - (0x2F14, 'M', u'匕'), - (0x2F15, 'M', u'匚'), - (0x2F16, 'M', u'匸'), - (0x2F17, 'M', u'å'), - (0x2F18, 'M', u'åœ'), - (0x2F19, 'M', u'å©'), - (0x2F1A, 'M', u'厂'), - (0x2F1B, 'M', u'厶'), - (0x2F1C, 'M', u'åˆ'), - (0x2F1D, 'M', u'å£'), - (0x2F1E, 'M', u'å›—'), - (0x2F1F, 'M', u'土'), - (0x2F20, 'M', u'士'), - (0x2F21, 'M', u'夂'), - (0x2F22, 'M', u'夊'), - (0x2F23, 'M', u'夕'), - (0x2F24, 'M', u'大'), - (0x2F25, 'M', u'女'), - (0x2F26, 'M', u'å­'), - (0x2F27, 'M', u'宀'), - (0x2F28, 'M', u'寸'), - (0x2F29, 'M', u'å°'), - (0x2F2A, 'M', u'å°¢'), - (0x2F2B, 'M', u'å°¸'), - (0x2F2C, 'M', u'å±®'), - (0x2F2D, 'M', u'å±±'), - ] - -def _seg_27(): - return [ - (0x2F2E, 'M', u'å·›'), - (0x2F2F, 'M', u'å·¥'), - (0x2F30, 'M', u'å·±'), - (0x2F31, 'M', u'å·¾'), - (0x2F32, 'M', u'å¹²'), - (0x2F33, 'M', u'幺'), - (0x2F34, 'M', u'广'), - (0x2F35, 'M', u'å»´'), - (0x2F36, 'M', u'廾'), - (0x2F37, 'M', u'弋'), - (0x2F38, 'M', u'弓'), - (0x2F39, 'M', u'å½'), - (0x2F3A, 'M', u'彡'), - (0x2F3B, 'M', u'å½³'), - (0x2F3C, 'M', u'心'), - (0x2F3D, 'M', u'戈'), - (0x2F3E, 'M', u'戶'), - (0x2F3F, 'M', u'手'), - (0x2F40, 'M', u'支'), - (0x2F41, 'M', u'æ”´'), - (0x2F42, 'M', u'æ–‡'), - (0x2F43, 'M', u'æ–—'), - (0x2F44, 'M', u'æ–¤'), - (0x2F45, 'M', u'æ–¹'), - (0x2F46, 'M', u'æ— '), - (0x2F47, 'M', u'æ—¥'), - (0x2F48, 'M', u'æ›°'), - (0x2F49, 'M', u'月'), - (0x2F4A, 'M', u'木'), - (0x2F4B, 'M', u'欠'), - (0x2F4C, 'M', u'æ­¢'), - (0x2F4D, 'M', u'æ­¹'), - (0x2F4E, 'M', u'殳'), - (0x2F4F, 'M', u'毋'), - (0x2F50, 'M', u'比'), - (0x2F51, 'M', u'毛'), - (0x2F52, 'M', u'æ°'), - (0x2F53, 'M', u'æ°”'), - (0x2F54, 'M', u'æ°´'), - (0x2F55, 'M', u'ç«'), - (0x2F56, 'M', u'爪'), - (0x2F57, 'M', u'父'), - (0x2F58, 'M', u'爻'), - (0x2F59, 'M', u'爿'), - (0x2F5A, 'M', u'片'), - (0x2F5B, 'M', u'牙'), - (0x2F5C, 'M', u'牛'), - (0x2F5D, 'M', u'犬'), - (0x2F5E, 'M', u'玄'), - (0x2F5F, 'M', u'玉'), - (0x2F60, 'M', u'ç“œ'), - (0x2F61, 'M', u'瓦'), - (0x2F62, 'M', u'甘'), - (0x2F63, 'M', u'生'), - (0x2F64, 'M', u'用'), - (0x2F65, 'M', u'ç”°'), - (0x2F66, 'M', u'ç–‹'), - (0x2F67, 'M', u'ç–’'), - (0x2F68, 'M', u'癶'), - (0x2F69, 'M', u'白'), - (0x2F6A, 'M', u'çš®'), - (0x2F6B, 'M', u'çš¿'), - (0x2F6C, 'M', u'ç›®'), - (0x2F6D, 'M', u'矛'), - (0x2F6E, 'M', u'矢'), - (0x2F6F, 'M', u'石'), - (0x2F70, 'M', u'示'), - (0x2F71, 'M', u'禸'), - (0x2F72, 'M', u'禾'), - (0x2F73, 'M', u'ç©´'), - (0x2F74, 'M', u'ç«‹'), - (0x2F75, 'M', u'竹'), - (0x2F76, 'M', u'ç±³'), - (0x2F77, 'M', u'糸'), - (0x2F78, 'M', u'缶'), - (0x2F79, 'M', u'网'), - (0x2F7A, 'M', u'羊'), - (0x2F7B, 'M', u'ç¾½'), - (0x2F7C, 'M', u'è€'), - (0x2F7D, 'M', u'而'), - (0x2F7E, 'M', u'耒'), - (0x2F7F, 'M', u'耳'), - (0x2F80, 'M', u'è¿'), - (0x2F81, 'M', u'肉'), - (0x2F82, 'M', u'臣'), - (0x2F83, 'M', u'自'), - (0x2F84, 'M', u'至'), - (0x2F85, 'M', u'臼'), - (0x2F86, 'M', u'舌'), - (0x2F87, 'M', u'舛'), - (0x2F88, 'M', u'舟'), - (0x2F89, 'M', u'艮'), - (0x2F8A, 'M', u'色'), - (0x2F8B, 'M', u'艸'), - (0x2F8C, 'M', u'è™'), - (0x2F8D, 'M', u'虫'), - (0x2F8E, 'M', u'è¡€'), - (0x2F8F, 'M', u'è¡Œ'), - (0x2F90, 'M', u'è¡£'), - (0x2F91, 'M', u'襾'), - ] - -def _seg_28(): - return [ - (0x2F92, 'M', u'見'), - (0x2F93, 'M', u'角'), - (0x2F94, 'M', u'言'), - (0x2F95, 'M', u'è°·'), - (0x2F96, 'M', u'豆'), - (0x2F97, 'M', u'豕'), - (0x2F98, 'M', u'豸'), - (0x2F99, 'M', u'è²'), - (0x2F9A, 'M', u'赤'), - (0x2F9B, 'M', u'èµ°'), - (0x2F9C, 'M', u'足'), - (0x2F9D, 'M', u'身'), - (0x2F9E, 'M', u'車'), - (0x2F9F, 'M', u'è¾›'), - (0x2FA0, 'M', u'è¾°'), - (0x2FA1, 'M', u'è¾µ'), - (0x2FA2, 'M', u'é‚‘'), - (0x2FA3, 'M', u'é…‰'), - (0x2FA4, 'M', u'釆'), - (0x2FA5, 'M', u'里'), - (0x2FA6, 'M', u'金'), - (0x2FA7, 'M', u'é•·'), - (0x2FA8, 'M', u'é–€'), - (0x2FA9, 'M', u'阜'), - (0x2FAA, 'M', u'隶'), - (0x2FAB, 'M', u'éš¹'), - (0x2FAC, 'M', u'雨'), - (0x2FAD, 'M', u'é‘'), - (0x2FAE, 'M', u'éž'), - (0x2FAF, 'M', u'é¢'), - (0x2FB0, 'M', u'é©'), - (0x2FB1, 'M', u'韋'), - (0x2FB2, 'M', u'韭'), - (0x2FB3, 'M', u'音'), - (0x2FB4, 'M', u'é '), - (0x2FB5, 'M', u'風'), - (0x2FB6, 'M', u'飛'), - (0x2FB7, 'M', u'食'), - (0x2FB8, 'M', u'首'), - (0x2FB9, 'M', u'香'), - (0x2FBA, 'M', u'馬'), - (0x2FBB, 'M', u'骨'), - (0x2FBC, 'M', u'高'), - (0x2FBD, 'M', u'é«Ÿ'), - (0x2FBE, 'M', u'鬥'), - (0x2FBF, 'M', u'鬯'), - (0x2FC0, 'M', u'鬲'), - (0x2FC1, 'M', u'鬼'), - (0x2FC2, 'M', u'é­š'), - (0x2FC3, 'M', u'é³¥'), - (0x2FC4, 'M', u'é¹µ'), - (0x2FC5, 'M', u'鹿'), - (0x2FC6, 'M', u'麥'), - (0x2FC7, 'M', u'麻'), - (0x2FC8, 'M', u'黃'), - (0x2FC9, 'M', u'é»'), - (0x2FCA, 'M', u'黑'), - (0x2FCB, 'M', u'黹'), - (0x2FCC, 'M', u'黽'), - (0x2FCD, 'M', u'鼎'), - (0x2FCE, 'M', u'鼓'), - (0x2FCF, 'M', u'é¼ '), - (0x2FD0, 'M', u'é¼»'), - (0x2FD1, 'M', u'齊'), - (0x2FD2, 'M', u'é½’'), - (0x2FD3, 'M', u'é¾'), - (0x2FD4, 'M', u'龜'), - (0x2FD5, 'M', u'é¾ '), - (0x2FD6, 'X'), - (0x3000, '3', u' '), - (0x3001, 'V'), - (0x3002, 'M', u'.'), - (0x3003, 'V'), - (0x3036, 'M', u'〒'), - (0x3037, 'V'), - (0x3038, 'M', u'å'), - (0x3039, 'M', u'å„'), - (0x303A, 'M', u'å…'), - (0x303B, 'V'), - (0x3040, 'X'), - (0x3041, 'V'), - (0x3097, 'X'), - (0x3099, 'V'), - (0x309B, '3', u' ã‚™'), - (0x309C, '3', u' ã‚š'), - (0x309D, 'V'), - (0x309F, 'M', u'より'), - (0x30A0, 'V'), - (0x30FF, 'M', u'コト'), - (0x3100, 'X'), - (0x3105, 'V'), - (0x3130, 'X'), - (0x3131, 'M', u'á„€'), - (0x3132, 'M', u'á„'), - (0x3133, 'M', u'ᆪ'), - (0x3134, 'M', u'á„‚'), - (0x3135, 'M', u'ᆬ'), - (0x3136, 'M', u'ᆭ'), - (0x3137, 'M', u'ᄃ'), - (0x3138, 'M', u'á„„'), - ] - -def _seg_29(): - return [ - (0x3139, 'M', u'á„…'), - (0x313A, 'M', u'ᆰ'), - (0x313B, 'M', u'ᆱ'), - (0x313C, 'M', u'ᆲ'), - (0x313D, 'M', u'ᆳ'), - (0x313E, 'M', u'ᆴ'), - (0x313F, 'M', u'ᆵ'), - (0x3140, 'M', u'á„š'), - (0x3141, 'M', u'ᄆ'), - (0x3142, 'M', u'ᄇ'), - (0x3143, 'M', u'ᄈ'), - (0x3144, 'M', u'á„¡'), - (0x3145, 'M', u'ᄉ'), - (0x3146, 'M', u'á„Š'), - (0x3147, 'M', u'á„‹'), - (0x3148, 'M', u'á„Œ'), - (0x3149, 'M', u'á„'), - (0x314A, 'M', u'á„Ž'), - (0x314B, 'M', u'á„'), - (0x314C, 'M', u'á„'), - (0x314D, 'M', u'á„‘'), - (0x314E, 'M', u'á„’'), - (0x314F, 'M', u'á…¡'), - (0x3150, 'M', u'á…¢'), - (0x3151, 'M', u'á…£'), - (0x3152, 'M', u'á…¤'), - (0x3153, 'M', u'á…¥'), - (0x3154, 'M', u'á…¦'), - (0x3155, 'M', u'á…§'), - (0x3156, 'M', u'á…¨'), - (0x3157, 'M', u'á…©'), - (0x3158, 'M', u'á…ª'), - (0x3159, 'M', u'á…«'), - (0x315A, 'M', u'á…¬'), - (0x315B, 'M', u'á…­'), - (0x315C, 'M', u'á…®'), - (0x315D, 'M', u'á…¯'), - (0x315E, 'M', u'á…°'), - (0x315F, 'M', u'á…±'), - (0x3160, 'M', u'á…²'), - (0x3161, 'M', u'á…³'), - (0x3162, 'M', u'á…´'), - (0x3163, 'M', u'á…µ'), - (0x3164, 'X'), - (0x3165, 'M', u'á„”'), - (0x3166, 'M', u'á„•'), - (0x3167, 'M', u'ᇇ'), - (0x3168, 'M', u'ᇈ'), - (0x3169, 'M', u'ᇌ'), - (0x316A, 'M', u'ᇎ'), - (0x316B, 'M', u'ᇓ'), - (0x316C, 'M', u'ᇗ'), - (0x316D, 'M', u'ᇙ'), - (0x316E, 'M', u'á„œ'), - (0x316F, 'M', u'á‡'), - (0x3170, 'M', u'ᇟ'), - (0x3171, 'M', u'á„'), - (0x3172, 'M', u'á„ž'), - (0x3173, 'M', u'á„ '), - (0x3174, 'M', u'á„¢'), - (0x3175, 'M', u'á„£'), - (0x3176, 'M', u'ᄧ'), - (0x3177, 'M', u'á„©'), - (0x3178, 'M', u'á„«'), - (0x3179, 'M', u'ᄬ'), - (0x317A, 'M', u'á„­'), - (0x317B, 'M', u'á„®'), - (0x317C, 'M', u'ᄯ'), - (0x317D, 'M', u'ᄲ'), - (0x317E, 'M', u'ᄶ'), - (0x317F, 'M', u'á…€'), - (0x3180, 'M', u'á…‡'), - (0x3181, 'M', u'á…Œ'), - (0x3182, 'M', u'ᇱ'), - (0x3183, 'M', u'ᇲ'), - (0x3184, 'M', u'á…—'), - (0x3185, 'M', u'á…˜'), - (0x3186, 'M', u'á…™'), - (0x3187, 'M', u'ᆄ'), - (0x3188, 'M', u'ᆅ'), - (0x3189, 'M', u'ᆈ'), - (0x318A, 'M', u'ᆑ'), - (0x318B, 'M', u'ᆒ'), - (0x318C, 'M', u'ᆔ'), - (0x318D, 'M', u'ᆞ'), - (0x318E, 'M', u'ᆡ'), - (0x318F, 'X'), - (0x3190, 'V'), - (0x3192, 'M', u'一'), - (0x3193, 'M', u'二'), - (0x3194, 'M', u'三'), - (0x3195, 'M', u'å››'), - (0x3196, 'M', u'上'), - (0x3197, 'M', u'中'), - (0x3198, 'M', u'下'), - (0x3199, 'M', u'甲'), - (0x319A, 'M', u'ä¹™'), - (0x319B, 'M', u'丙'), - (0x319C, 'M', u'ä¸'), - (0x319D, 'M', u'天'), - ] - -def _seg_30(): - return [ - (0x319E, 'M', u'地'), - (0x319F, 'M', u'人'), - (0x31A0, 'V'), - (0x31BB, 'X'), - (0x31C0, 'V'), - (0x31E4, 'X'), - (0x31F0, 'V'), - (0x3200, '3', u'(á„€)'), - (0x3201, '3', u'(á„‚)'), - (0x3202, '3', u'(ᄃ)'), - (0x3203, '3', u'(á„…)'), - (0x3204, '3', u'(ᄆ)'), - (0x3205, '3', u'(ᄇ)'), - (0x3206, '3', u'(ᄉ)'), - (0x3207, '3', u'(á„‹)'), - (0x3208, '3', u'(á„Œ)'), - (0x3209, '3', u'(á„Ž)'), - (0x320A, '3', u'(á„)'), - (0x320B, '3', u'(á„)'), - (0x320C, '3', u'(á„‘)'), - (0x320D, '3', u'(á„’)'), - (0x320E, '3', u'(ê°€)'), - (0x320F, '3', u'(나)'), - (0x3210, '3', u'(다)'), - (0x3211, '3', u'(ë¼)'), - (0x3212, '3', u'(마)'), - (0x3213, '3', u'(ë°”)'), - (0x3214, '3', u'(사)'), - (0x3215, '3', u'(ì•„)'), - (0x3216, '3', u'(ìž)'), - (0x3217, '3', u'(ì°¨)'), - (0x3218, '3', u'(ì¹´)'), - (0x3219, '3', u'(타)'), - (0x321A, '3', u'(파)'), - (0x321B, '3', u'(하)'), - (0x321C, '3', u'(주)'), - (0x321D, '3', u'(오전)'), - (0x321E, '3', u'(오후)'), - (0x321F, 'X'), - (0x3220, '3', u'(一)'), - (0x3221, '3', u'(二)'), - (0x3222, '3', u'(三)'), - (0x3223, '3', u'(å››)'), - (0x3224, '3', u'(五)'), - (0x3225, '3', u'(å…­)'), - (0x3226, '3', u'(七)'), - (0x3227, '3', u'(å…«)'), - (0x3228, '3', u'(ä¹)'), - (0x3229, '3', u'(å)'), - (0x322A, '3', u'(月)'), - (0x322B, '3', u'(ç«)'), - (0x322C, '3', u'(æ°´)'), - (0x322D, '3', u'(木)'), - (0x322E, '3', u'(金)'), - (0x322F, '3', u'(土)'), - (0x3230, '3', u'(æ—¥)'), - (0x3231, '3', u'(æ ª)'), - (0x3232, '3', u'(有)'), - (0x3233, '3', u'(社)'), - (0x3234, '3', u'(å)'), - (0x3235, '3', u'(特)'), - (0x3236, '3', u'(財)'), - (0x3237, '3', u'(ç¥)'), - (0x3238, '3', u'(労)'), - (0x3239, '3', u'(代)'), - (0x323A, '3', u'(呼)'), - (0x323B, '3', u'(å­¦)'), - (0x323C, '3', u'(監)'), - (0x323D, '3', u'(ä¼)'), - (0x323E, '3', u'(資)'), - (0x323F, '3', u'(å”)'), - (0x3240, '3', u'(祭)'), - (0x3241, '3', u'(休)'), - (0x3242, '3', u'(自)'), - (0x3243, '3', u'(至)'), - (0x3244, 'M', u'å•'), - (0x3245, 'M', u'å¹¼'), - (0x3246, 'M', u'æ–‡'), - (0x3247, 'M', u'ç®'), - (0x3248, 'V'), - (0x3250, 'M', u'pte'), - (0x3251, 'M', u'21'), - (0x3252, 'M', u'22'), - (0x3253, 'M', u'23'), - (0x3254, 'M', u'24'), - (0x3255, 'M', u'25'), - (0x3256, 'M', u'26'), - (0x3257, 'M', u'27'), - (0x3258, 'M', u'28'), - (0x3259, 'M', u'29'), - (0x325A, 'M', u'30'), - (0x325B, 'M', u'31'), - (0x325C, 'M', u'32'), - (0x325D, 'M', u'33'), - (0x325E, 'M', u'34'), - (0x325F, 'M', u'35'), - (0x3260, 'M', u'á„€'), - (0x3261, 'M', u'á„‚'), - (0x3262, 'M', u'ᄃ'), - (0x3263, 'M', u'á„…'), - ] - -def _seg_31(): - return [ - (0x3264, 'M', u'ᄆ'), - (0x3265, 'M', u'ᄇ'), - (0x3266, 'M', u'ᄉ'), - (0x3267, 'M', u'á„‹'), - (0x3268, 'M', u'á„Œ'), - (0x3269, 'M', u'á„Ž'), - (0x326A, 'M', u'á„'), - (0x326B, 'M', u'á„'), - (0x326C, 'M', u'á„‘'), - (0x326D, 'M', u'á„’'), - (0x326E, 'M', u'ê°€'), - (0x326F, 'M', u'나'), - (0x3270, 'M', u'다'), - (0x3271, 'M', u'ë¼'), - (0x3272, 'M', u'마'), - (0x3273, 'M', u'ë°”'), - (0x3274, 'M', u'사'), - (0x3275, 'M', u'ì•„'), - (0x3276, 'M', u'ìž'), - (0x3277, 'M', u'ì°¨'), - (0x3278, 'M', u'ì¹´'), - (0x3279, 'M', u'타'), - (0x327A, 'M', u'파'), - (0x327B, 'M', u'하'), - (0x327C, 'M', u'참고'), - (0x327D, 'M', u'주ì˜'), - (0x327E, 'M', u'ìš°'), - (0x327F, 'V'), - (0x3280, 'M', u'一'), - (0x3281, 'M', u'二'), - (0x3282, 'M', u'三'), - (0x3283, 'M', u'å››'), - (0x3284, 'M', u'五'), - (0x3285, 'M', u'å…­'), - (0x3286, 'M', u'七'), - (0x3287, 'M', u'å…«'), - (0x3288, 'M', u'ä¹'), - (0x3289, 'M', u'å'), - (0x328A, 'M', u'月'), - (0x328B, 'M', u'ç«'), - (0x328C, 'M', u'æ°´'), - (0x328D, 'M', u'木'), - (0x328E, 'M', u'金'), - (0x328F, 'M', u'土'), - (0x3290, 'M', u'æ—¥'), - (0x3291, 'M', u'æ ª'), - (0x3292, 'M', u'有'), - (0x3293, 'M', u'社'), - (0x3294, 'M', u'å'), - (0x3295, 'M', u'特'), - (0x3296, 'M', u'財'), - (0x3297, 'M', u'ç¥'), - (0x3298, 'M', u'労'), - (0x3299, 'M', u'秘'), - (0x329A, 'M', u'ç”·'), - (0x329B, 'M', u'女'), - (0x329C, 'M', u'é©'), - (0x329D, 'M', u'優'), - (0x329E, 'M', u'å°'), - (0x329F, 'M', u'注'), - (0x32A0, 'M', u'é …'), - (0x32A1, 'M', u'休'), - (0x32A2, 'M', u'写'), - (0x32A3, 'M', u'æ­£'), - (0x32A4, 'M', u'上'), - (0x32A5, 'M', u'中'), - (0x32A6, 'M', u'下'), - (0x32A7, 'M', u'å·¦'), - (0x32A8, 'M', u'å³'), - (0x32A9, 'M', u'医'), - (0x32AA, 'M', u'å®—'), - (0x32AB, 'M', u'å­¦'), - (0x32AC, 'M', u'監'), - (0x32AD, 'M', u'ä¼'), - (0x32AE, 'M', u'資'), - (0x32AF, 'M', u'å”'), - (0x32B0, 'M', u'夜'), - (0x32B1, 'M', u'36'), - (0x32B2, 'M', u'37'), - (0x32B3, 'M', u'38'), - (0x32B4, 'M', u'39'), - (0x32B5, 'M', u'40'), - (0x32B6, 'M', u'41'), - (0x32B7, 'M', u'42'), - (0x32B8, 'M', u'43'), - (0x32B9, 'M', u'44'), - (0x32BA, 'M', u'45'), - (0x32BB, 'M', u'46'), - (0x32BC, 'M', u'47'), - (0x32BD, 'M', u'48'), - (0x32BE, 'M', u'49'), - (0x32BF, 'M', u'50'), - (0x32C0, 'M', u'1月'), - (0x32C1, 'M', u'2月'), - (0x32C2, 'M', u'3月'), - (0x32C3, 'M', u'4月'), - (0x32C4, 'M', u'5月'), - (0x32C5, 'M', u'6月'), - (0x32C6, 'M', u'7月'), - (0x32C7, 'M', u'8月'), - ] - -def _seg_32(): - return [ - (0x32C8, 'M', u'9月'), - (0x32C9, 'M', u'10月'), - (0x32CA, 'M', u'11月'), - (0x32CB, 'M', u'12月'), - (0x32CC, 'M', u'hg'), - (0x32CD, 'M', u'erg'), - (0x32CE, 'M', u'ev'), - (0x32CF, 'M', u'ltd'), - (0x32D0, 'M', u'ã‚¢'), - (0x32D1, 'M', u'イ'), - (0x32D2, 'M', u'ウ'), - (0x32D3, 'M', u'エ'), - (0x32D4, 'M', u'オ'), - (0x32D5, 'M', u'ã‚«'), - (0x32D6, 'M', u'ã‚­'), - (0x32D7, 'M', u'ク'), - (0x32D8, 'M', u'ケ'), - (0x32D9, 'M', u'コ'), - (0x32DA, 'M', u'サ'), - (0x32DB, 'M', u'ã‚·'), - (0x32DC, 'M', u'ス'), - (0x32DD, 'M', u'ã‚»'), - (0x32DE, 'M', u'ソ'), - (0x32DF, 'M', u'ã‚¿'), - (0x32E0, 'M', u'ãƒ'), - (0x32E1, 'M', u'ツ'), - (0x32E2, 'M', u'テ'), - (0x32E3, 'M', u'ト'), - (0x32E4, 'M', u'ナ'), - (0x32E5, 'M', u'ニ'), - (0x32E6, 'M', u'ヌ'), - (0x32E7, 'M', u'ãƒ'), - (0x32E8, 'M', u'ノ'), - (0x32E9, 'M', u'ãƒ'), - (0x32EA, 'M', u'ヒ'), - (0x32EB, 'M', u'フ'), - (0x32EC, 'M', u'ヘ'), - (0x32ED, 'M', u'ホ'), - (0x32EE, 'M', u'マ'), - (0x32EF, 'M', u'ミ'), - (0x32F0, 'M', u'ム'), - (0x32F1, 'M', u'メ'), - (0x32F2, 'M', u'モ'), - (0x32F3, 'M', u'ヤ'), - (0x32F4, 'M', u'ユ'), - (0x32F5, 'M', u'ヨ'), - (0x32F6, 'M', u'ラ'), - (0x32F7, 'M', u'リ'), - (0x32F8, 'M', u'ル'), - (0x32F9, 'M', u'レ'), - (0x32FA, 'M', u'ロ'), - (0x32FB, 'M', u'ワ'), - (0x32FC, 'M', u'ヰ'), - (0x32FD, 'M', u'ヱ'), - (0x32FE, 'M', u'ヲ'), - (0x32FF, 'X'), - (0x3300, 'M', u'アパート'), - (0x3301, 'M', u'アルファ'), - (0x3302, 'M', u'アンペア'), - (0x3303, 'M', u'アール'), - (0x3304, 'M', u'イニング'), - (0x3305, 'M', u'インãƒ'), - (0x3306, 'M', u'ウォン'), - (0x3307, 'M', u'エスクード'), - (0x3308, 'M', u'エーカー'), - (0x3309, 'M', u'オンス'), - (0x330A, 'M', u'オーム'), - (0x330B, 'M', u'カイリ'), - (0x330C, 'M', u'カラット'), - (0x330D, 'M', u'カロリー'), - (0x330E, 'M', u'ガロン'), - (0x330F, 'M', u'ガンマ'), - (0x3310, 'M', u'ギガ'), - (0x3311, 'M', u'ギニー'), - (0x3312, 'M', u'キュリー'), - (0x3313, 'M', u'ギルダー'), - (0x3314, 'M', u'キロ'), - (0x3315, 'M', u'キログラム'), - (0x3316, 'M', u'キロメートル'), - (0x3317, 'M', u'キロワット'), - (0x3318, 'M', u'グラム'), - (0x3319, 'M', u'グラムトン'), - (0x331A, 'M', u'クルゼイロ'), - (0x331B, 'M', u'クローãƒ'), - (0x331C, 'M', u'ケース'), - (0x331D, 'M', u'コルナ'), - (0x331E, 'M', u'コーãƒ'), - (0x331F, 'M', u'サイクル'), - (0x3320, 'M', u'サンãƒãƒ¼ãƒ '), - (0x3321, 'M', u'シリング'), - (0x3322, 'M', u'センãƒ'), - (0x3323, 'M', u'セント'), - (0x3324, 'M', u'ダース'), - (0x3325, 'M', u'デシ'), - (0x3326, 'M', u'ドル'), - (0x3327, 'M', u'トン'), - (0x3328, 'M', u'ナノ'), - (0x3329, 'M', u'ノット'), - (0x332A, 'M', u'ãƒã‚¤ãƒ„'), - (0x332B, 'M', u'パーセント'), - ] - -def _seg_33(): - return [ - (0x332C, 'M', u'パーツ'), - (0x332D, 'M', u'ãƒãƒ¼ãƒ¬ãƒ«'), - (0x332E, 'M', u'ピアストル'), - (0x332F, 'M', u'ピクル'), - (0x3330, 'M', u'ピコ'), - (0x3331, 'M', u'ビル'), - (0x3332, 'M', u'ファラッド'), - (0x3333, 'M', u'フィート'), - (0x3334, 'M', u'ブッシェル'), - (0x3335, 'M', u'フラン'), - (0x3336, 'M', u'ヘクタール'), - (0x3337, 'M', u'ペソ'), - (0x3338, 'M', u'ペニヒ'), - (0x3339, 'M', u'ヘルツ'), - (0x333A, 'M', u'ペンス'), - (0x333B, 'M', u'ページ'), - (0x333C, 'M', u'ベータ'), - (0x333D, 'M', u'ãƒã‚¤ãƒ³ãƒˆ'), - (0x333E, 'M', u'ボルト'), - (0x333F, 'M', u'ホン'), - (0x3340, 'M', u'ãƒãƒ³ãƒ‰'), - (0x3341, 'M', u'ホール'), - (0x3342, 'M', u'ホーン'), - (0x3343, 'M', u'マイクロ'), - (0x3344, 'M', u'マイル'), - (0x3345, 'M', u'マッãƒ'), - (0x3346, 'M', u'マルク'), - (0x3347, 'M', u'マンション'), - (0x3348, 'M', u'ミクロン'), - (0x3349, 'M', u'ミリ'), - (0x334A, 'M', u'ミリãƒãƒ¼ãƒ«'), - (0x334B, 'M', u'メガ'), - (0x334C, 'M', u'メガトン'), - (0x334D, 'M', u'メートル'), - (0x334E, 'M', u'ヤード'), - (0x334F, 'M', u'ヤール'), - (0x3350, 'M', u'ユアン'), - (0x3351, 'M', u'リットル'), - (0x3352, 'M', u'リラ'), - (0x3353, 'M', u'ルピー'), - (0x3354, 'M', u'ルーブル'), - (0x3355, 'M', u'レム'), - (0x3356, 'M', u'レントゲン'), - (0x3357, 'M', u'ワット'), - (0x3358, 'M', u'0点'), - (0x3359, 'M', u'1点'), - (0x335A, 'M', u'2点'), - (0x335B, 'M', u'3点'), - (0x335C, 'M', u'4点'), - (0x335D, 'M', u'5点'), - (0x335E, 'M', u'6点'), - (0x335F, 'M', u'7点'), - (0x3360, 'M', u'8点'), - (0x3361, 'M', u'9点'), - (0x3362, 'M', u'10点'), - (0x3363, 'M', u'11点'), - (0x3364, 'M', u'12点'), - (0x3365, 'M', u'13点'), - (0x3366, 'M', u'14点'), - (0x3367, 'M', u'15点'), - (0x3368, 'M', u'16点'), - (0x3369, 'M', u'17点'), - (0x336A, 'M', u'18点'), - (0x336B, 'M', u'19点'), - (0x336C, 'M', u'20点'), - (0x336D, 'M', u'21点'), - (0x336E, 'M', u'22点'), - (0x336F, 'M', u'23点'), - (0x3370, 'M', u'24点'), - (0x3371, 'M', u'hpa'), - (0x3372, 'M', u'da'), - (0x3373, 'M', u'au'), - (0x3374, 'M', u'bar'), - (0x3375, 'M', u'ov'), - (0x3376, 'M', u'pc'), - (0x3377, 'M', u'dm'), - (0x3378, 'M', u'dm2'), - (0x3379, 'M', u'dm3'), - (0x337A, 'M', u'iu'), - (0x337B, 'M', u'å¹³æˆ'), - (0x337C, 'M', u'昭和'), - (0x337D, 'M', u'大正'), - (0x337E, 'M', u'明治'), - (0x337F, 'M', u'æ ªå¼ä¼šç¤¾'), - (0x3380, 'M', u'pa'), - (0x3381, 'M', u'na'), - (0x3382, 'M', u'μa'), - (0x3383, 'M', u'ma'), - (0x3384, 'M', u'ka'), - (0x3385, 'M', u'kb'), - (0x3386, 'M', u'mb'), - (0x3387, 'M', u'gb'), - (0x3388, 'M', u'cal'), - (0x3389, 'M', u'kcal'), - (0x338A, 'M', u'pf'), - (0x338B, 'M', u'nf'), - (0x338C, 'M', u'μf'), - (0x338D, 'M', u'μg'), - (0x338E, 'M', u'mg'), - (0x338F, 'M', u'kg'), - ] - -def _seg_34(): - return [ - (0x3390, 'M', u'hz'), - (0x3391, 'M', u'khz'), - (0x3392, 'M', u'mhz'), - (0x3393, 'M', u'ghz'), - (0x3394, 'M', u'thz'), - (0x3395, 'M', u'μl'), - (0x3396, 'M', u'ml'), - (0x3397, 'M', u'dl'), - (0x3398, 'M', u'kl'), - (0x3399, 'M', u'fm'), - (0x339A, 'M', u'nm'), - (0x339B, 'M', u'μm'), - (0x339C, 'M', u'mm'), - (0x339D, 'M', u'cm'), - (0x339E, 'M', u'km'), - (0x339F, 'M', u'mm2'), - (0x33A0, 'M', u'cm2'), - (0x33A1, 'M', u'm2'), - (0x33A2, 'M', u'km2'), - (0x33A3, 'M', u'mm3'), - (0x33A4, 'M', u'cm3'), - (0x33A5, 'M', u'm3'), - (0x33A6, 'M', u'km3'), - (0x33A7, 'M', u'm∕s'), - (0x33A8, 'M', u'm∕s2'), - (0x33A9, 'M', u'pa'), - (0x33AA, 'M', u'kpa'), - (0x33AB, 'M', u'mpa'), - (0x33AC, 'M', u'gpa'), - (0x33AD, 'M', u'rad'), - (0x33AE, 'M', u'rad∕s'), - (0x33AF, 'M', u'rad∕s2'), - (0x33B0, 'M', u'ps'), - (0x33B1, 'M', u'ns'), - (0x33B2, 'M', u'μs'), - (0x33B3, 'M', u'ms'), - (0x33B4, 'M', u'pv'), - (0x33B5, 'M', u'nv'), - (0x33B6, 'M', u'μv'), - (0x33B7, 'M', u'mv'), - (0x33B8, 'M', u'kv'), - (0x33B9, 'M', u'mv'), - (0x33BA, 'M', u'pw'), - (0x33BB, 'M', u'nw'), - (0x33BC, 'M', u'μw'), - (0x33BD, 'M', u'mw'), - (0x33BE, 'M', u'kw'), - (0x33BF, 'M', u'mw'), - (0x33C0, 'M', u'kω'), - (0x33C1, 'M', u'mω'), - (0x33C2, 'X'), - (0x33C3, 'M', u'bq'), - (0x33C4, 'M', u'cc'), - (0x33C5, 'M', u'cd'), - (0x33C6, 'M', u'c∕kg'), - (0x33C7, 'X'), - (0x33C8, 'M', u'db'), - (0x33C9, 'M', u'gy'), - (0x33CA, 'M', u'ha'), - (0x33CB, 'M', u'hp'), - (0x33CC, 'M', u'in'), - (0x33CD, 'M', u'kk'), - (0x33CE, 'M', u'km'), - (0x33CF, 'M', u'kt'), - (0x33D0, 'M', u'lm'), - (0x33D1, 'M', u'ln'), - (0x33D2, 'M', u'log'), - (0x33D3, 'M', u'lx'), - (0x33D4, 'M', u'mb'), - (0x33D5, 'M', u'mil'), - (0x33D6, 'M', u'mol'), - (0x33D7, 'M', u'ph'), - (0x33D8, 'X'), - (0x33D9, 'M', u'ppm'), - (0x33DA, 'M', u'pr'), - (0x33DB, 'M', u'sr'), - (0x33DC, 'M', u'sv'), - (0x33DD, 'M', u'wb'), - (0x33DE, 'M', u'v∕m'), - (0x33DF, 'M', u'a∕m'), - (0x33E0, 'M', u'1æ—¥'), - (0x33E1, 'M', u'2æ—¥'), - (0x33E2, 'M', u'3æ—¥'), - (0x33E3, 'M', u'4æ—¥'), - (0x33E4, 'M', u'5æ—¥'), - (0x33E5, 'M', u'6æ—¥'), - (0x33E6, 'M', u'7æ—¥'), - (0x33E7, 'M', u'8æ—¥'), - (0x33E8, 'M', u'9æ—¥'), - (0x33E9, 'M', u'10æ—¥'), - (0x33EA, 'M', u'11æ—¥'), - (0x33EB, 'M', u'12æ—¥'), - (0x33EC, 'M', u'13æ—¥'), - (0x33ED, 'M', u'14æ—¥'), - (0x33EE, 'M', u'15æ—¥'), - (0x33EF, 'M', u'16æ—¥'), - (0x33F0, 'M', u'17æ—¥'), - (0x33F1, 'M', u'18æ—¥'), - (0x33F2, 'M', u'19æ—¥'), - (0x33F3, 'M', u'20æ—¥'), - ] - -def _seg_35(): - return [ - (0x33F4, 'M', u'21æ—¥'), - (0x33F5, 'M', u'22æ—¥'), - (0x33F6, 'M', u'23æ—¥'), - (0x33F7, 'M', u'24æ—¥'), - (0x33F8, 'M', u'25æ—¥'), - (0x33F9, 'M', u'26æ—¥'), - (0x33FA, 'M', u'27æ—¥'), - (0x33FB, 'M', u'28æ—¥'), - (0x33FC, 'M', u'29æ—¥'), - (0x33FD, 'M', u'30æ—¥'), - (0x33FE, 'M', u'31æ—¥'), - (0x33FF, 'M', u'gal'), - (0x3400, 'V'), - (0x4DB6, 'X'), - (0x4DC0, 'V'), - (0x9FF0, 'X'), - (0xA000, 'V'), - (0xA48D, 'X'), - (0xA490, 'V'), - (0xA4C7, 'X'), - (0xA4D0, 'V'), - (0xA62C, 'X'), - (0xA640, 'M', u'ê™'), - (0xA641, 'V'), - (0xA642, 'M', u'ꙃ'), - (0xA643, 'V'), - (0xA644, 'M', u'ê™…'), - (0xA645, 'V'), - (0xA646, 'M', u'ꙇ'), - (0xA647, 'V'), - (0xA648, 'M', u'ꙉ'), - (0xA649, 'V'), - (0xA64A, 'M', u'ꙋ'), - (0xA64B, 'V'), - (0xA64C, 'M', u'ê™'), - (0xA64D, 'V'), - (0xA64E, 'M', u'ê™'), - (0xA64F, 'V'), - (0xA650, 'M', u'ꙑ'), - (0xA651, 'V'), - (0xA652, 'M', u'ꙓ'), - (0xA653, 'V'), - (0xA654, 'M', u'ꙕ'), - (0xA655, 'V'), - (0xA656, 'M', u'ê™—'), - (0xA657, 'V'), - (0xA658, 'M', u'ê™™'), - (0xA659, 'V'), - (0xA65A, 'M', u'ê™›'), - (0xA65B, 'V'), - (0xA65C, 'M', u'ê™'), - (0xA65D, 'V'), - (0xA65E, 'M', u'ꙟ'), - (0xA65F, 'V'), - (0xA660, 'M', u'ꙡ'), - (0xA661, 'V'), - (0xA662, 'M', u'ꙣ'), - (0xA663, 'V'), - (0xA664, 'M', u'ꙥ'), - (0xA665, 'V'), - (0xA666, 'M', u'ꙧ'), - (0xA667, 'V'), - (0xA668, 'M', u'ꙩ'), - (0xA669, 'V'), - (0xA66A, 'M', u'ꙫ'), - (0xA66B, 'V'), - (0xA66C, 'M', u'ê™­'), - (0xA66D, 'V'), - (0xA680, 'M', u'êš'), - (0xA681, 'V'), - (0xA682, 'M', u'ꚃ'), - (0xA683, 'V'), - (0xA684, 'M', u'êš…'), - (0xA685, 'V'), - (0xA686, 'M', u'ꚇ'), - (0xA687, 'V'), - (0xA688, 'M', u'ꚉ'), - (0xA689, 'V'), - (0xA68A, 'M', u'êš‹'), - (0xA68B, 'V'), - (0xA68C, 'M', u'êš'), - (0xA68D, 'V'), - (0xA68E, 'M', u'êš'), - (0xA68F, 'V'), - (0xA690, 'M', u'êš‘'), - (0xA691, 'V'), - (0xA692, 'M', u'êš“'), - (0xA693, 'V'), - (0xA694, 'M', u'êš•'), - (0xA695, 'V'), - (0xA696, 'M', u'êš—'), - (0xA697, 'V'), - (0xA698, 'M', u'êš™'), - (0xA699, 'V'), - (0xA69A, 'M', u'êš›'), - (0xA69B, 'V'), - (0xA69C, 'M', u'ÑŠ'), - (0xA69D, 'M', u'ÑŒ'), - (0xA69E, 'V'), - (0xA6F8, 'X'), - ] - -def _seg_36(): - return [ - (0xA700, 'V'), - (0xA722, 'M', u'ꜣ'), - (0xA723, 'V'), - (0xA724, 'M', u'ꜥ'), - (0xA725, 'V'), - (0xA726, 'M', u'ꜧ'), - (0xA727, 'V'), - (0xA728, 'M', u'ꜩ'), - (0xA729, 'V'), - (0xA72A, 'M', u'ꜫ'), - (0xA72B, 'V'), - (0xA72C, 'M', u'ꜭ'), - (0xA72D, 'V'), - (0xA72E, 'M', u'ꜯ'), - (0xA72F, 'V'), - (0xA732, 'M', u'ꜳ'), - (0xA733, 'V'), - (0xA734, 'M', u'ꜵ'), - (0xA735, 'V'), - (0xA736, 'M', u'ꜷ'), - (0xA737, 'V'), - (0xA738, 'M', u'ꜹ'), - (0xA739, 'V'), - (0xA73A, 'M', u'ꜻ'), - (0xA73B, 'V'), - (0xA73C, 'M', u'ꜽ'), - (0xA73D, 'V'), - (0xA73E, 'M', u'ꜿ'), - (0xA73F, 'V'), - (0xA740, 'M', u'ê'), - (0xA741, 'V'), - (0xA742, 'M', u'êƒ'), - (0xA743, 'V'), - (0xA744, 'M', u'ê…'), - (0xA745, 'V'), - (0xA746, 'M', u'ê‡'), - (0xA747, 'V'), - (0xA748, 'M', u'ê‰'), - (0xA749, 'V'), - (0xA74A, 'M', u'ê‹'), - (0xA74B, 'V'), - (0xA74C, 'M', u'ê'), - (0xA74D, 'V'), - (0xA74E, 'M', u'ê'), - (0xA74F, 'V'), - (0xA750, 'M', u'ê‘'), - (0xA751, 'V'), - (0xA752, 'M', u'ê“'), - (0xA753, 'V'), - (0xA754, 'M', u'ê•'), - (0xA755, 'V'), - (0xA756, 'M', u'ê—'), - (0xA757, 'V'), - (0xA758, 'M', u'ê™'), - (0xA759, 'V'), - (0xA75A, 'M', u'ê›'), - (0xA75B, 'V'), - (0xA75C, 'M', u'ê'), - (0xA75D, 'V'), - (0xA75E, 'M', u'êŸ'), - (0xA75F, 'V'), - (0xA760, 'M', u'ê¡'), - (0xA761, 'V'), - (0xA762, 'M', u'ê£'), - (0xA763, 'V'), - (0xA764, 'M', u'ê¥'), - (0xA765, 'V'), - (0xA766, 'M', u'ê§'), - (0xA767, 'V'), - (0xA768, 'M', u'ê©'), - (0xA769, 'V'), - (0xA76A, 'M', u'ê«'), - (0xA76B, 'V'), - (0xA76C, 'M', u'ê­'), - (0xA76D, 'V'), - (0xA76E, 'M', u'ê¯'), - (0xA76F, 'V'), - (0xA770, 'M', u'ê¯'), - (0xA771, 'V'), - (0xA779, 'M', u'êº'), - (0xA77A, 'V'), - (0xA77B, 'M', u'ê¼'), - (0xA77C, 'V'), - (0xA77D, 'M', u'áµ¹'), - (0xA77E, 'M', u'ê¿'), - (0xA77F, 'V'), - (0xA780, 'M', u'êž'), - (0xA781, 'V'), - (0xA782, 'M', u'ꞃ'), - (0xA783, 'V'), - (0xA784, 'M', u'êž…'), - (0xA785, 'V'), - (0xA786, 'M', u'ꞇ'), - (0xA787, 'V'), - (0xA78B, 'M', u'ꞌ'), - (0xA78C, 'V'), - (0xA78D, 'M', u'É¥'), - (0xA78E, 'V'), - (0xA790, 'M', u'êž‘'), - (0xA791, 'V'), - ] - -def _seg_37(): - return [ - (0xA792, 'M', u'êž“'), - (0xA793, 'V'), - (0xA796, 'M', u'êž—'), - (0xA797, 'V'), - (0xA798, 'M', u'êž™'), - (0xA799, 'V'), - (0xA79A, 'M', u'êž›'), - (0xA79B, 'V'), - (0xA79C, 'M', u'êž'), - (0xA79D, 'V'), - (0xA79E, 'M', u'ꞟ'), - (0xA79F, 'V'), - (0xA7A0, 'M', u'êž¡'), - (0xA7A1, 'V'), - (0xA7A2, 'M', u'ꞣ'), - (0xA7A3, 'V'), - (0xA7A4, 'M', u'ꞥ'), - (0xA7A5, 'V'), - (0xA7A6, 'M', u'ꞧ'), - (0xA7A7, 'V'), - (0xA7A8, 'M', u'êž©'), - (0xA7A9, 'V'), - (0xA7AA, 'M', u'ɦ'), - (0xA7AB, 'M', u'Éœ'), - (0xA7AC, 'M', u'É¡'), - (0xA7AD, 'M', u'ɬ'), - (0xA7AE, 'M', u'ɪ'), - (0xA7AF, 'V'), - (0xA7B0, 'M', u'Êž'), - (0xA7B1, 'M', u'ʇ'), - (0xA7B2, 'M', u'Ê'), - (0xA7B3, 'M', u'ê­“'), - (0xA7B4, 'M', u'êžµ'), - (0xA7B5, 'V'), - (0xA7B6, 'M', u'êž·'), - (0xA7B7, 'V'), - (0xA7B8, 'X'), - (0xA7B9, 'V'), - (0xA7BA, 'X'), - (0xA7F7, 'V'), - (0xA7F8, 'M', u'ħ'), - (0xA7F9, 'M', u'Å“'), - (0xA7FA, 'V'), - (0xA82C, 'X'), - (0xA830, 'V'), - (0xA83A, 'X'), - (0xA840, 'V'), - (0xA878, 'X'), - (0xA880, 'V'), - (0xA8C6, 'X'), - (0xA8CE, 'V'), - (0xA8DA, 'X'), - (0xA8E0, 'V'), - (0xA954, 'X'), - (0xA95F, 'V'), - (0xA97D, 'X'), - (0xA980, 'V'), - (0xA9CE, 'X'), - (0xA9CF, 'V'), - (0xA9DA, 'X'), - (0xA9DE, 'V'), - (0xA9FF, 'X'), - (0xAA00, 'V'), - (0xAA37, 'X'), - (0xAA40, 'V'), - (0xAA4E, 'X'), - (0xAA50, 'V'), - (0xAA5A, 'X'), - (0xAA5C, 'V'), - (0xAAC3, 'X'), - (0xAADB, 'V'), - (0xAAF7, 'X'), - (0xAB01, 'V'), - (0xAB07, 'X'), - (0xAB09, 'V'), - (0xAB0F, 'X'), - (0xAB11, 'V'), - (0xAB17, 'X'), - (0xAB20, 'V'), - (0xAB27, 'X'), - (0xAB28, 'V'), - (0xAB2F, 'X'), - (0xAB30, 'V'), - (0xAB5C, 'M', u'ꜧ'), - (0xAB5D, 'M', u'ꬷ'), - (0xAB5E, 'M', u'É«'), - (0xAB5F, 'M', u'ê­’'), - (0xAB60, 'V'), - (0xAB66, 'X'), - (0xAB70, 'M', u'Ꭰ'), - (0xAB71, 'M', u'Ꭱ'), - (0xAB72, 'M', u'Ꭲ'), - (0xAB73, 'M', u'Ꭳ'), - (0xAB74, 'M', u'Ꭴ'), - (0xAB75, 'M', u'Ꭵ'), - (0xAB76, 'M', u'Ꭶ'), - (0xAB77, 'M', u'Ꭷ'), - (0xAB78, 'M', u'Ꭸ'), - (0xAB79, 'M', u'Ꭹ'), - (0xAB7A, 'M', u'Ꭺ'), - ] - -def _seg_38(): - return [ - (0xAB7B, 'M', u'Ꭻ'), - (0xAB7C, 'M', u'Ꭼ'), - (0xAB7D, 'M', u'Ꭽ'), - (0xAB7E, 'M', u'Ꭾ'), - (0xAB7F, 'M', u'Ꭿ'), - (0xAB80, 'M', u'Ꮀ'), - (0xAB81, 'M', u'Ꮁ'), - (0xAB82, 'M', u'Ꮂ'), - (0xAB83, 'M', u'Ꮃ'), - (0xAB84, 'M', u'Ꮄ'), - (0xAB85, 'M', u'Ꮅ'), - (0xAB86, 'M', u'Ꮆ'), - (0xAB87, 'M', u'Ꮇ'), - (0xAB88, 'M', u'Ꮈ'), - (0xAB89, 'M', u'Ꮉ'), - (0xAB8A, 'M', u'Ꮊ'), - (0xAB8B, 'M', u'Ꮋ'), - (0xAB8C, 'M', u'Ꮌ'), - (0xAB8D, 'M', u'Ꮍ'), - (0xAB8E, 'M', u'Ꮎ'), - (0xAB8F, 'M', u'Ꮏ'), - (0xAB90, 'M', u'á€'), - (0xAB91, 'M', u'á'), - (0xAB92, 'M', u'á‚'), - (0xAB93, 'M', u'áƒ'), - (0xAB94, 'M', u'á„'), - (0xAB95, 'M', u'á…'), - (0xAB96, 'M', u'á†'), - (0xAB97, 'M', u'á‡'), - (0xAB98, 'M', u'áˆ'), - (0xAB99, 'M', u'á‰'), - (0xAB9A, 'M', u'áŠ'), - (0xAB9B, 'M', u'á‹'), - (0xAB9C, 'M', u'áŒ'), - (0xAB9D, 'M', u'á'), - (0xAB9E, 'M', u'áŽ'), - (0xAB9F, 'M', u'á'), - (0xABA0, 'M', u'á'), - (0xABA1, 'M', u'á‘'), - (0xABA2, 'M', u'á’'), - (0xABA3, 'M', u'á“'), - (0xABA4, 'M', u'á”'), - (0xABA5, 'M', u'á•'), - (0xABA6, 'M', u'á–'), - (0xABA7, 'M', u'á—'), - (0xABA8, 'M', u'á˜'), - (0xABA9, 'M', u'á™'), - (0xABAA, 'M', u'áš'), - (0xABAB, 'M', u'á›'), - (0xABAC, 'M', u'áœ'), - (0xABAD, 'M', u'á'), - (0xABAE, 'M', u'áž'), - (0xABAF, 'M', u'áŸ'), - (0xABB0, 'M', u'á '), - (0xABB1, 'M', u'á¡'), - (0xABB2, 'M', u'á¢'), - (0xABB3, 'M', u'á£'), - (0xABB4, 'M', u'á¤'), - (0xABB5, 'M', u'á¥'), - (0xABB6, 'M', u'á¦'), - (0xABB7, 'M', u'á§'), - (0xABB8, 'M', u'á¨'), - (0xABB9, 'M', u'á©'), - (0xABBA, 'M', u'áª'), - (0xABBB, 'M', u'á«'), - (0xABBC, 'M', u'á¬'), - (0xABBD, 'M', u'á­'), - (0xABBE, 'M', u'á®'), - (0xABBF, 'M', u'á¯'), - (0xABC0, 'V'), - (0xABEE, 'X'), - (0xABF0, 'V'), - (0xABFA, 'X'), - (0xAC00, 'V'), - (0xD7A4, 'X'), - (0xD7B0, 'V'), - (0xD7C7, 'X'), - (0xD7CB, 'V'), - (0xD7FC, 'X'), - (0xF900, 'M', u'豈'), - (0xF901, 'M', u'æ›´'), - (0xF902, 'M', u'車'), - (0xF903, 'M', u'賈'), - (0xF904, 'M', u'滑'), - (0xF905, 'M', u'串'), - (0xF906, 'M', u'å¥'), - (0xF907, 'M', u'龜'), - (0xF909, 'M', u'契'), - (0xF90A, 'M', u'金'), - (0xF90B, 'M', u'å–‡'), - (0xF90C, 'M', u'奈'), - (0xF90D, 'M', u'懶'), - (0xF90E, 'M', u'癩'), - (0xF90F, 'M', u'ç¾…'), - (0xF910, 'M', u'蘿'), - (0xF911, 'M', u'螺'), - (0xF912, 'M', u'裸'), - (0xF913, 'M', u'é‚'), - (0xF914, 'M', u'樂'), - (0xF915, 'M', u'æ´›'), - ] - -def _seg_39(): - return [ - (0xF916, 'M', u'烙'), - (0xF917, 'M', u'çž'), - (0xF918, 'M', u'è½'), - (0xF919, 'M', u'é…ª'), - (0xF91A, 'M', u'駱'), - (0xF91B, 'M', u'亂'), - (0xF91C, 'M', u'åµ'), - (0xF91D, 'M', u'欄'), - (0xF91E, 'M', u'爛'), - (0xF91F, 'M', u'蘭'), - (0xF920, 'M', u'鸞'), - (0xF921, 'M', u'åµ'), - (0xF922, 'M', u'æ¿«'), - (0xF923, 'M', u'è—'), - (0xF924, 'M', u'襤'), - (0xF925, 'M', u'拉'), - (0xF926, 'M', u'臘'), - (0xF927, 'M', u'è Ÿ'), - (0xF928, 'M', u'廊'), - (0xF929, 'M', u'朗'), - (0xF92A, 'M', u'浪'), - (0xF92B, 'M', u'狼'), - (0xF92C, 'M', u'郎'), - (0xF92D, 'M', u'來'), - (0xF92E, 'M', u'冷'), - (0xF92F, 'M', u'å‹ž'), - (0xF930, 'M', u'æ“„'), - (0xF931, 'M', u'æ«“'), - (0xF932, 'M', u'çˆ'), - (0xF933, 'M', u'盧'), - (0xF934, 'M', u'è€'), - (0xF935, 'M', u'蘆'), - (0xF936, 'M', u'虜'), - (0xF937, 'M', u'è·¯'), - (0xF938, 'M', u'露'), - (0xF939, 'M', u'é­¯'), - (0xF93A, 'M', u'é·º'), - (0xF93B, 'M', u'碌'), - (0xF93C, 'M', u'祿'), - (0xF93D, 'M', u'綠'), - (0xF93E, 'M', u'è‰'), - (0xF93F, 'M', u'錄'), - (0xF940, 'M', u'鹿'), - (0xF941, 'M', u'è«–'), - (0xF942, 'M', u'壟'), - (0xF943, 'M', u'弄'), - (0xF944, 'M', u'ç± '), - (0xF945, 'M', u'è¾'), - (0xF946, 'M', u'牢'), - (0xF947, 'M', u'磊'), - (0xF948, 'M', u'賂'), - (0xF949, 'M', u'é›·'), - (0xF94A, 'M', u'壘'), - (0xF94B, 'M', u'å±¢'), - (0xF94C, 'M', u'樓'), - (0xF94D, 'M', u'æ·š'), - (0xF94E, 'M', u'æ¼'), - (0xF94F, 'M', u'ç´¯'), - (0xF950, 'M', u'縷'), - (0xF951, 'M', u'陋'), - (0xF952, 'M', u'å‹’'), - (0xF953, 'M', u'è‚‹'), - (0xF954, 'M', u'凜'), - (0xF955, 'M', u'凌'), - (0xF956, 'M', u'稜'), - (0xF957, 'M', u'綾'), - (0xF958, 'M', u'è±'), - (0xF959, 'M', u'陵'), - (0xF95A, 'M', u'讀'), - (0xF95B, 'M', u'æ‹'), - (0xF95C, 'M', u'樂'), - (0xF95D, 'M', u'諾'), - (0xF95E, 'M', u'丹'), - (0xF95F, 'M', u'寧'), - (0xF960, 'M', u'怒'), - (0xF961, 'M', u'率'), - (0xF962, 'M', u'ç•°'), - (0xF963, 'M', u'北'), - (0xF964, 'M', u'磻'), - (0xF965, 'M', u'便'), - (0xF966, 'M', u'復'), - (0xF967, 'M', u'ä¸'), - (0xF968, 'M', u'泌'), - (0xF969, 'M', u'數'), - (0xF96A, 'M', u'ç´¢'), - (0xF96B, 'M', u'åƒ'), - (0xF96C, 'M', u'å¡ž'), - (0xF96D, 'M', u'çœ'), - (0xF96E, 'M', u'葉'), - (0xF96F, 'M', u'說'), - (0xF970, 'M', u'殺'), - (0xF971, 'M', u'è¾°'), - (0xF972, 'M', u'沈'), - (0xF973, 'M', u'拾'), - (0xF974, 'M', u'è‹¥'), - (0xF975, 'M', u'掠'), - (0xF976, 'M', u'ç•¥'), - (0xF977, 'M', u'亮'), - (0xF978, 'M', u'å…©'), - (0xF979, 'M', u'凉'), - ] - -def _seg_40(): - return [ - (0xF97A, 'M', u'æ¢'), - (0xF97B, 'M', u'糧'), - (0xF97C, 'M', u'良'), - (0xF97D, 'M', u'è«’'), - (0xF97E, 'M', u'é‡'), - (0xF97F, 'M', u'勵'), - (0xF980, 'M', u'å‘‚'), - (0xF981, 'M', u'女'), - (0xF982, 'M', u'廬'), - (0xF983, 'M', u'æ—…'), - (0xF984, 'M', u'濾'), - (0xF985, 'M', u'礪'), - (0xF986, 'M', u'é–­'), - (0xF987, 'M', u'驪'), - (0xF988, 'M', u'麗'), - (0xF989, 'M', u'黎'), - (0xF98A, 'M', u'力'), - (0xF98B, 'M', u'曆'), - (0xF98C, 'M', u'æ­·'), - (0xF98D, 'M', u'è½¢'), - (0xF98E, 'M', u'å¹´'), - (0xF98F, 'M', u'æ†'), - (0xF990, 'M', u'戀'), - (0xF991, 'M', u'æ’š'), - (0xF992, 'M', u'æ¼£'), - (0xF993, 'M', u'ç…‰'), - (0xF994, 'M', u'ç’‰'), - (0xF995, 'M', u'秊'), - (0xF996, 'M', u'ç·´'), - (0xF997, 'M', u'è¯'), - (0xF998, 'M', u'輦'), - (0xF999, 'M', u'è“®'), - (0xF99A, 'M', u'連'), - (0xF99B, 'M', u'éŠ'), - (0xF99C, 'M', u'列'), - (0xF99D, 'M', u'劣'), - (0xF99E, 'M', u'å’½'), - (0xF99F, 'M', u'烈'), - (0xF9A0, 'M', u'裂'), - (0xF9A1, 'M', u'說'), - (0xF9A2, 'M', u'廉'), - (0xF9A3, 'M', u'念'), - (0xF9A4, 'M', u'æ»'), - (0xF9A5, 'M', u'æ®®'), - (0xF9A6, 'M', u'ç°¾'), - (0xF9A7, 'M', u'çµ'), - (0xF9A8, 'M', u'令'), - (0xF9A9, 'M', u'囹'), - (0xF9AA, 'M', u'寧'), - (0xF9AB, 'M', u'嶺'), - (0xF9AC, 'M', u'怜'), - (0xF9AD, 'M', u'玲'), - (0xF9AE, 'M', u'ç‘©'), - (0xF9AF, 'M', u'羚'), - (0xF9B0, 'M', u'è†'), - (0xF9B1, 'M', u'鈴'), - (0xF9B2, 'M', u'零'), - (0xF9B3, 'M', u'éˆ'), - (0xF9B4, 'M', u'é ˜'), - (0xF9B5, 'M', u'例'), - (0xF9B6, 'M', u'禮'), - (0xF9B7, 'M', u'醴'), - (0xF9B8, 'M', u'隸'), - (0xF9B9, 'M', u'惡'), - (0xF9BA, 'M', u'了'), - (0xF9BB, 'M', u'僚'), - (0xF9BC, 'M', u'寮'), - (0xF9BD, 'M', u'å°¿'), - (0xF9BE, 'M', u'æ–™'), - (0xF9BF, 'M', u'樂'), - (0xF9C0, 'M', u'燎'), - (0xF9C1, 'M', u'療'), - (0xF9C2, 'M', u'蓼'), - (0xF9C3, 'M', u'é¼'), - (0xF9C4, 'M', u'é¾'), - (0xF9C5, 'M', u'暈'), - (0xF9C6, 'M', u'阮'), - (0xF9C7, 'M', u'劉'), - (0xF9C8, 'M', u'æ»'), - (0xF9C9, 'M', u'柳'), - (0xF9CA, 'M', u'æµ'), - (0xF9CB, 'M', u'溜'), - (0xF9CC, 'M', u'ç‰'), - (0xF9CD, 'M', u'ç•™'), - (0xF9CE, 'M', u'ç¡«'), - (0xF9CF, 'M', u'ç´'), - (0xF9D0, 'M', u'é¡ž'), - (0xF9D1, 'M', u'å…­'), - (0xF9D2, 'M', u'戮'), - (0xF9D3, 'M', u'陸'), - (0xF9D4, 'M', u'倫'), - (0xF9D5, 'M', u'å´™'), - (0xF9D6, 'M', u'æ·ª'), - (0xF9D7, 'M', u'輪'), - (0xF9D8, 'M', u'律'), - (0xF9D9, 'M', u'æ…„'), - (0xF9DA, 'M', u'æ —'), - (0xF9DB, 'M', u'率'), - (0xF9DC, 'M', u'隆'), - (0xF9DD, 'M', u'利'), - ] - -def _seg_41(): - return [ - (0xF9DE, 'M', u'å'), - (0xF9DF, 'M', u'å±¥'), - (0xF9E0, 'M', u'易'), - (0xF9E1, 'M', u'æŽ'), - (0xF9E2, 'M', u'梨'), - (0xF9E3, 'M', u'æ³¥'), - (0xF9E4, 'M', u'ç†'), - (0xF9E5, 'M', u'ç—¢'), - (0xF9E6, 'M', u'ç½¹'), - (0xF9E7, 'M', u'è£'), - (0xF9E8, 'M', u'裡'), - (0xF9E9, 'M', u'里'), - (0xF9EA, 'M', u'離'), - (0xF9EB, 'M', u'匿'), - (0xF9EC, 'M', u'溺'), - (0xF9ED, 'M', u'å'), - (0xF9EE, 'M', u'ç‡'), - (0xF9EF, 'M', u'ç’˜'), - (0xF9F0, 'M', u'è—º'), - (0xF9F1, 'M', u'隣'), - (0xF9F2, 'M', u'é±—'), - (0xF9F3, 'M', u'麟'), - (0xF9F4, 'M', u'æž—'), - (0xF9F5, 'M', u'æ·‹'), - (0xF9F6, 'M', u'臨'), - (0xF9F7, 'M', u'ç«‹'), - (0xF9F8, 'M', u'笠'), - (0xF9F9, 'M', u'ç²’'), - (0xF9FA, 'M', u'ç‹€'), - (0xF9FB, 'M', u'ç‚™'), - (0xF9FC, 'M', u'è­˜'), - (0xF9FD, 'M', u'什'), - (0xF9FE, 'M', u'茶'), - (0xF9FF, 'M', u'刺'), - (0xFA00, 'M', u'切'), - (0xFA01, 'M', u'度'), - (0xFA02, 'M', u'æ‹“'), - (0xFA03, 'M', u'ç³–'), - (0xFA04, 'M', u'å®…'), - (0xFA05, 'M', u'æ´ž'), - (0xFA06, 'M', u'æš´'), - (0xFA07, 'M', u'è¼»'), - (0xFA08, 'M', u'è¡Œ'), - (0xFA09, 'M', u'é™'), - (0xFA0A, 'M', u'見'), - (0xFA0B, 'M', u'廓'), - (0xFA0C, 'M', u'å…€'), - (0xFA0D, 'M', u'å—€'), - (0xFA0E, 'V'), - (0xFA10, 'M', u'å¡š'), - (0xFA11, 'V'), - (0xFA12, 'M', u'æ™´'), - (0xFA13, 'V'), - (0xFA15, 'M', u'凞'), - (0xFA16, 'M', u'猪'), - (0xFA17, 'M', u'益'), - (0xFA18, 'M', u'礼'), - (0xFA19, 'M', u'神'), - (0xFA1A, 'M', u'祥'), - (0xFA1B, 'M', u'ç¦'), - (0xFA1C, 'M', u'é–'), - (0xFA1D, 'M', u'ç²¾'), - (0xFA1E, 'M', u'ç¾½'), - (0xFA1F, 'V'), - (0xFA20, 'M', u'蘒'), - (0xFA21, 'V'), - (0xFA22, 'M', u'諸'), - (0xFA23, 'V'), - (0xFA25, 'M', u'逸'), - (0xFA26, 'M', u'都'), - (0xFA27, 'V'), - (0xFA2A, 'M', u'飯'), - (0xFA2B, 'M', u'飼'), - (0xFA2C, 'M', u'館'), - (0xFA2D, 'M', u'鶴'), - (0xFA2E, 'M', u'郞'), - (0xFA2F, 'M', u'éš·'), - (0xFA30, 'M', u'ä¾®'), - (0xFA31, 'M', u'僧'), - (0xFA32, 'M', u'å…'), - (0xFA33, 'M', u'勉'), - (0xFA34, 'M', u'勤'), - (0xFA35, 'M', u'å‘'), - (0xFA36, 'M', u'å–'), - (0xFA37, 'M', u'嘆'), - (0xFA38, 'M', u'器'), - (0xFA39, 'M', u'å¡€'), - (0xFA3A, 'M', u'墨'), - (0xFA3B, 'M', u'層'), - (0xFA3C, 'M', u'å±®'), - (0xFA3D, 'M', u'æ‚”'), - (0xFA3E, 'M', u'æ…¨'), - (0xFA3F, 'M', u'憎'), - (0xFA40, 'M', u'懲'), - (0xFA41, 'M', u'æ•'), - (0xFA42, 'M', u'æ—¢'), - (0xFA43, 'M', u'æš‘'), - (0xFA44, 'M', u'梅'), - (0xFA45, 'M', u'æµ·'), - (0xFA46, 'M', u'渚'), - ] - -def _seg_42(): - return [ - (0xFA47, 'M', u'æ¼¢'), - (0xFA48, 'M', u'ç…®'), - (0xFA49, 'M', u'爫'), - (0xFA4A, 'M', u'ç¢'), - (0xFA4B, 'M', u'碑'), - (0xFA4C, 'M', u'社'), - (0xFA4D, 'M', u'祉'), - (0xFA4E, 'M', u'祈'), - (0xFA4F, 'M', u'ç¥'), - (0xFA50, 'M', u'祖'), - (0xFA51, 'M', u'ç¥'), - (0xFA52, 'M', u'ç¦'), - (0xFA53, 'M', u'禎'), - (0xFA54, 'M', u'ç©€'), - (0xFA55, 'M', u'çª'), - (0xFA56, 'M', u'節'), - (0xFA57, 'M', u'ç·´'), - (0xFA58, 'M', u'縉'), - (0xFA59, 'M', u'ç¹'), - (0xFA5A, 'M', u'ç½²'), - (0xFA5B, 'M', u'者'), - (0xFA5C, 'M', u'臭'), - (0xFA5D, 'M', u'艹'), - (0xFA5F, 'M', u'è‘—'), - (0xFA60, 'M', u'è¤'), - (0xFA61, 'M', u'視'), - (0xFA62, 'M', u'è¬'), - (0xFA63, 'M', u'謹'), - (0xFA64, 'M', u'賓'), - (0xFA65, 'M', u'è´ˆ'), - (0xFA66, 'M', u'辶'), - (0xFA67, 'M', u'逸'), - (0xFA68, 'M', u'難'), - (0xFA69, 'M', u'響'), - (0xFA6A, 'M', u'é »'), - (0xFA6B, 'M', u'æµ'), - (0xFA6C, 'M', u'𤋮'), - (0xFA6D, 'M', u'舘'), - (0xFA6E, 'X'), - (0xFA70, 'M', u'並'), - (0xFA71, 'M', u'况'), - (0xFA72, 'M', u'å…¨'), - (0xFA73, 'M', u'ä¾€'), - (0xFA74, 'M', u'å……'), - (0xFA75, 'M', u'冀'), - (0xFA76, 'M', u'勇'), - (0xFA77, 'M', u'勺'), - (0xFA78, 'M', u'å–'), - (0xFA79, 'M', u'å••'), - (0xFA7A, 'M', u'å–™'), - (0xFA7B, 'M', u'å—¢'), - (0xFA7C, 'M', u'å¡š'), - (0xFA7D, 'M', u'墳'), - (0xFA7E, 'M', u'奄'), - (0xFA7F, 'M', u'奔'), - (0xFA80, 'M', u'å©¢'), - (0xFA81, 'M', u'嬨'), - (0xFA82, 'M', u'å»’'), - (0xFA83, 'M', u'å»™'), - (0xFA84, 'M', u'彩'), - (0xFA85, 'M', u'å¾­'), - (0xFA86, 'M', u'惘'), - (0xFA87, 'M', u'æ…Ž'), - (0xFA88, 'M', u'愈'), - (0xFA89, 'M', u'憎'), - (0xFA8A, 'M', u'æ… '), - (0xFA8B, 'M', u'懲'), - (0xFA8C, 'M', u'戴'), - (0xFA8D, 'M', u'æ„'), - (0xFA8E, 'M', u'æœ'), - (0xFA8F, 'M', u'æ‘’'), - (0xFA90, 'M', u'æ•–'), - (0xFA91, 'M', u'æ™´'), - (0xFA92, 'M', u'朗'), - (0xFA93, 'M', u'望'), - (0xFA94, 'M', u'æ–'), - (0xFA95, 'M', u'æ­¹'), - (0xFA96, 'M', u'殺'), - (0xFA97, 'M', u'æµ'), - (0xFA98, 'M', u'æ»›'), - (0xFA99, 'M', u'滋'), - (0xFA9A, 'M', u'æ¼¢'), - (0xFA9B, 'M', u'瀞'), - (0xFA9C, 'M', u'ç…®'), - (0xFA9D, 'M', u'瞧'), - (0xFA9E, 'M', u'爵'), - (0xFA9F, 'M', u'犯'), - (0xFAA0, 'M', u'猪'), - (0xFAA1, 'M', u'瑱'), - (0xFAA2, 'M', u'甆'), - (0xFAA3, 'M', u'ç”»'), - (0xFAA4, 'M', u'ç˜'), - (0xFAA5, 'M', u'瘟'), - (0xFAA6, 'M', u'益'), - (0xFAA7, 'M', u'ç››'), - (0xFAA8, 'M', u'ç›´'), - (0xFAA9, 'M', u'çŠ'), - (0xFAAA, 'M', u'ç€'), - (0xFAAB, 'M', u'磌'), - (0xFAAC, 'M', u'窱'), - ] - -def _seg_43(): - return [ - (0xFAAD, 'M', u'節'), - (0xFAAE, 'M', u'ç±»'), - (0xFAAF, 'M', u'çµ›'), - (0xFAB0, 'M', u'ç·´'), - (0xFAB1, 'M', u'ç¼¾'), - (0xFAB2, 'M', u'者'), - (0xFAB3, 'M', u'è’'), - (0xFAB4, 'M', u'è¯'), - (0xFAB5, 'M', u'è¹'), - (0xFAB6, 'M', u'è¥'), - (0xFAB7, 'M', u'覆'), - (0xFAB8, 'M', u'視'), - (0xFAB9, 'M', u'調'), - (0xFABA, 'M', u'諸'), - (0xFABB, 'M', u'è«‹'), - (0xFABC, 'M', u'è¬'), - (0xFABD, 'M', u'諾'), - (0xFABE, 'M', u'è«­'), - (0xFABF, 'M', u'謹'), - (0xFAC0, 'M', u'變'), - (0xFAC1, 'M', u'è´ˆ'), - (0xFAC2, 'M', u'輸'), - (0xFAC3, 'M', u'é²'), - (0xFAC4, 'M', u'醙'), - (0xFAC5, 'M', u'鉶'), - (0xFAC6, 'M', u'陼'), - (0xFAC7, 'M', u'難'), - (0xFAC8, 'M', u'é–'), - (0xFAC9, 'M', u'韛'), - (0xFACA, 'M', u'響'), - (0xFACB, 'M', u'é ‹'), - (0xFACC, 'M', u'é »'), - (0xFACD, 'M', u'鬒'), - (0xFACE, 'M', u'龜'), - (0xFACF, 'M', u'𢡊'), - (0xFAD0, 'M', u'𢡄'), - (0xFAD1, 'M', u'ð£•'), - (0xFAD2, 'M', u'ã®'), - (0xFAD3, 'M', u'䀘'), - (0xFAD4, 'M', u'䀹'), - (0xFAD5, 'M', u'𥉉'), - (0xFAD6, 'M', u'ð¥³'), - (0xFAD7, 'M', u'𧻓'), - (0xFAD8, 'M', u'齃'), - (0xFAD9, 'M', u'龎'), - (0xFADA, 'X'), - (0xFB00, 'M', u'ff'), - (0xFB01, 'M', u'fi'), - (0xFB02, 'M', u'fl'), - (0xFB03, 'M', u'ffi'), - (0xFB04, 'M', u'ffl'), - (0xFB05, 'M', u'st'), - (0xFB07, 'X'), - (0xFB13, 'M', u'Õ´Õ¶'), - (0xFB14, 'M', u'Õ´Õ¥'), - (0xFB15, 'M', u'Õ´Õ«'), - (0xFB16, 'M', u'Õ¾Õ¶'), - (0xFB17, 'M', u'Õ´Õ­'), - (0xFB18, 'X'), - (0xFB1D, 'M', u'×™Ö´'), - (0xFB1E, 'V'), - (0xFB1F, 'M', u'ײַ'), - (0xFB20, 'M', u'×¢'), - (0xFB21, 'M', u'×'), - (0xFB22, 'M', u'ד'), - (0xFB23, 'M', u'×”'), - (0xFB24, 'M', u'×›'), - (0xFB25, 'M', u'ל'), - (0xFB26, 'M', u'×'), - (0xFB27, 'M', u'ר'), - (0xFB28, 'M', u'ת'), - (0xFB29, '3', u'+'), - (0xFB2A, 'M', u'ש×'), - (0xFB2B, 'M', u'שׂ'), - (0xFB2C, 'M', u'שּ×'), - (0xFB2D, 'M', u'שּׂ'), - (0xFB2E, 'M', u'×Ö·'), - (0xFB2F, 'M', u'×Ö¸'), - (0xFB30, 'M', u'×Ö¼'), - (0xFB31, 'M', u'בּ'), - (0xFB32, 'M', u'×’Ö¼'), - (0xFB33, 'M', u'דּ'), - (0xFB34, 'M', u'×”Ö¼'), - (0xFB35, 'M', u'וּ'), - (0xFB36, 'M', u'×–Ö¼'), - (0xFB37, 'X'), - (0xFB38, 'M', u'טּ'), - (0xFB39, 'M', u'×™Ö¼'), - (0xFB3A, 'M', u'ךּ'), - (0xFB3B, 'M', u'×›Ö¼'), - (0xFB3C, 'M', u'לּ'), - (0xFB3D, 'X'), - (0xFB3E, 'M', u'מּ'), - (0xFB3F, 'X'), - (0xFB40, 'M', u'× Ö¼'), - (0xFB41, 'M', u'סּ'), - (0xFB42, 'X'), - (0xFB43, 'M', u'×£Ö¼'), - (0xFB44, 'M', u'פּ'), - (0xFB45, 'X'), - ] - -def _seg_44(): - return [ - (0xFB46, 'M', u'צּ'), - (0xFB47, 'M', u'קּ'), - (0xFB48, 'M', u'רּ'), - (0xFB49, 'M', u'שּ'), - (0xFB4A, 'M', u'תּ'), - (0xFB4B, 'M', u'וֹ'), - (0xFB4C, 'M', u'בֿ'), - (0xFB4D, 'M', u'×›Ö¿'), - (0xFB4E, 'M', u'פֿ'), - (0xFB4F, 'M', u'×ל'), - (0xFB50, 'M', u'Ù±'), - (0xFB52, 'M', u'Ù»'), - (0xFB56, 'M', u'Ù¾'), - (0xFB5A, 'M', u'Ú€'), - (0xFB5E, 'M', u'Ùº'), - (0xFB62, 'M', u'Ù¿'), - (0xFB66, 'M', u'Ù¹'), - (0xFB6A, 'M', u'Ú¤'), - (0xFB6E, 'M', u'Ú¦'), - (0xFB72, 'M', u'Ú„'), - (0xFB76, 'M', u'Úƒ'), - (0xFB7A, 'M', u'Ú†'), - (0xFB7E, 'M', u'Ú‡'), - (0xFB82, 'M', u'Ú'), - (0xFB84, 'M', u'ÚŒ'), - (0xFB86, 'M', u'ÚŽ'), - (0xFB88, 'M', u'Úˆ'), - (0xFB8A, 'M', u'Ú˜'), - (0xFB8C, 'M', u'Ú‘'), - (0xFB8E, 'M', u'Ú©'), - (0xFB92, 'M', u'Ú¯'), - (0xFB96, 'M', u'Ú³'), - (0xFB9A, 'M', u'Ú±'), - (0xFB9E, 'M', u'Úº'), - (0xFBA0, 'M', u'Ú»'), - (0xFBA4, 'M', u'Û€'), - (0xFBA6, 'M', u'Û'), - (0xFBAA, 'M', u'Ú¾'), - (0xFBAE, 'M', u'Û’'), - (0xFBB0, 'M', u'Û“'), - (0xFBB2, 'V'), - (0xFBC2, 'X'), - (0xFBD3, 'M', u'Ú­'), - (0xFBD7, 'M', u'Û‡'), - (0xFBD9, 'M', u'Û†'), - (0xFBDB, 'M', u'Ûˆ'), - (0xFBDD, 'M', u'Û‡Ù´'), - (0xFBDE, 'M', u'Û‹'), - (0xFBE0, 'M', u'Û…'), - (0xFBE2, 'M', u'Û‰'), - (0xFBE4, 'M', u'Û'), - (0xFBE8, 'M', u'Ù‰'), - (0xFBEA, 'M', u'ئا'), - (0xFBEC, 'M', u'ئە'), - (0xFBEE, 'M', u'ئو'), - (0xFBF0, 'M', u'ئۇ'), - (0xFBF2, 'M', u'ئۆ'), - (0xFBF4, 'M', u'ئۈ'), - (0xFBF6, 'M', u'ئÛ'), - (0xFBF9, 'M', u'ئى'), - (0xFBFC, 'M', u'ÛŒ'), - (0xFC00, 'M', u'ئج'), - (0xFC01, 'M', u'ئح'), - (0xFC02, 'M', u'ئم'), - (0xFC03, 'M', u'ئى'), - (0xFC04, 'M', u'ئي'), - (0xFC05, 'M', u'بج'), - (0xFC06, 'M', u'بح'), - (0xFC07, 'M', u'بخ'), - (0xFC08, 'M', u'بم'), - (0xFC09, 'M', u'بى'), - (0xFC0A, 'M', u'بي'), - (0xFC0B, 'M', u'تج'), - (0xFC0C, 'M', u'تح'), - (0xFC0D, 'M', u'تخ'), - (0xFC0E, 'M', u'تم'), - (0xFC0F, 'M', u'تى'), - (0xFC10, 'M', u'تي'), - (0xFC11, 'M', u'ثج'), - (0xFC12, 'M', u'ثم'), - (0xFC13, 'M', u'ثى'), - (0xFC14, 'M', u'ثي'), - (0xFC15, 'M', u'جح'), - (0xFC16, 'M', u'جم'), - (0xFC17, 'M', u'حج'), - (0xFC18, 'M', u'حم'), - (0xFC19, 'M', u'خج'), - (0xFC1A, 'M', u'خح'), - (0xFC1B, 'M', u'خم'), - (0xFC1C, 'M', u'سج'), - (0xFC1D, 'M', u'سح'), - (0xFC1E, 'M', u'سخ'), - (0xFC1F, 'M', u'سم'), - (0xFC20, 'M', u'صح'), - (0xFC21, 'M', u'صم'), - (0xFC22, 'M', u'ضج'), - (0xFC23, 'M', u'ضح'), - (0xFC24, 'M', u'ضخ'), - (0xFC25, 'M', u'ضم'), - (0xFC26, 'M', u'طح'), - ] - -def _seg_45(): - return [ - (0xFC27, 'M', u'طم'), - (0xFC28, 'M', u'ظم'), - (0xFC29, 'M', u'عج'), - (0xFC2A, 'M', u'عم'), - (0xFC2B, 'M', u'غج'), - (0xFC2C, 'M', u'غم'), - (0xFC2D, 'M', u'Ùج'), - (0xFC2E, 'M', u'ÙØ­'), - (0xFC2F, 'M', u'ÙØ®'), - (0xFC30, 'M', u'ÙÙ…'), - (0xFC31, 'M', u'ÙÙ‰'), - (0xFC32, 'M', u'ÙÙŠ'), - (0xFC33, 'M', u'قح'), - (0xFC34, 'M', u'قم'), - (0xFC35, 'M', u'قى'), - (0xFC36, 'M', u'قي'), - (0xFC37, 'M', u'كا'), - (0xFC38, 'M', u'كج'), - (0xFC39, 'M', u'كح'), - (0xFC3A, 'M', u'كخ'), - (0xFC3B, 'M', u'كل'), - (0xFC3C, 'M', u'كم'), - (0xFC3D, 'M', u'كى'), - (0xFC3E, 'M', u'كي'), - (0xFC3F, 'M', u'لج'), - (0xFC40, 'M', u'لح'), - (0xFC41, 'M', u'لخ'), - (0xFC42, 'M', u'لم'), - (0xFC43, 'M', u'لى'), - (0xFC44, 'M', u'لي'), - (0xFC45, 'M', u'مج'), - (0xFC46, 'M', u'مح'), - (0xFC47, 'M', u'مخ'), - (0xFC48, 'M', u'مم'), - (0xFC49, 'M', u'مى'), - (0xFC4A, 'M', u'مي'), - (0xFC4B, 'M', u'نج'), - (0xFC4C, 'M', u'نح'), - (0xFC4D, 'M', u'نخ'), - (0xFC4E, 'M', u'نم'), - (0xFC4F, 'M', u'نى'), - (0xFC50, 'M', u'ني'), - (0xFC51, 'M', u'هج'), - (0xFC52, 'M', u'هم'), - (0xFC53, 'M', u'هى'), - (0xFC54, 'M', u'هي'), - (0xFC55, 'M', u'يج'), - (0xFC56, 'M', u'يح'), - (0xFC57, 'M', u'يخ'), - (0xFC58, 'M', u'يم'), - (0xFC59, 'M', u'يى'), - (0xFC5A, 'M', u'يي'), - (0xFC5B, 'M', u'ذٰ'), - (0xFC5C, 'M', u'رٰ'), - (0xFC5D, 'M', u'ىٰ'), - (0xFC5E, '3', u' ٌّ'), - (0xFC5F, '3', u' ÙÙ‘'), - (0xFC60, '3', u' ÙŽÙ‘'), - (0xFC61, '3', u' ÙÙ‘'), - (0xFC62, '3', u' ÙÙ‘'), - (0xFC63, '3', u' ّٰ'), - (0xFC64, 'M', u'ئر'), - (0xFC65, 'M', u'ئز'), - (0xFC66, 'M', u'ئم'), - (0xFC67, 'M', u'ئن'), - (0xFC68, 'M', u'ئى'), - (0xFC69, 'M', u'ئي'), - (0xFC6A, 'M', u'بر'), - (0xFC6B, 'M', u'بز'), - (0xFC6C, 'M', u'بم'), - (0xFC6D, 'M', u'بن'), - (0xFC6E, 'M', u'بى'), - (0xFC6F, 'M', u'بي'), - (0xFC70, 'M', u'تر'), - (0xFC71, 'M', u'تز'), - (0xFC72, 'M', u'تم'), - (0xFC73, 'M', u'تن'), - (0xFC74, 'M', u'تى'), - (0xFC75, 'M', u'تي'), - (0xFC76, 'M', u'ثر'), - (0xFC77, 'M', u'ثز'), - (0xFC78, 'M', u'ثم'), - (0xFC79, 'M', u'ثن'), - (0xFC7A, 'M', u'ثى'), - (0xFC7B, 'M', u'ثي'), - (0xFC7C, 'M', u'ÙÙ‰'), - (0xFC7D, 'M', u'ÙÙŠ'), - (0xFC7E, 'M', u'قى'), - (0xFC7F, 'M', u'قي'), - (0xFC80, 'M', u'كا'), - (0xFC81, 'M', u'كل'), - (0xFC82, 'M', u'كم'), - (0xFC83, 'M', u'كى'), - (0xFC84, 'M', u'كي'), - (0xFC85, 'M', u'لم'), - (0xFC86, 'M', u'لى'), - (0xFC87, 'M', u'لي'), - (0xFC88, 'M', u'ما'), - (0xFC89, 'M', u'مم'), - (0xFC8A, 'M', u'نر'), - ] - -def _seg_46(): - return [ - (0xFC8B, 'M', u'نز'), - (0xFC8C, 'M', u'نم'), - (0xFC8D, 'M', u'نن'), - (0xFC8E, 'M', u'نى'), - (0xFC8F, 'M', u'ني'), - (0xFC90, 'M', u'ىٰ'), - (0xFC91, 'M', u'ير'), - (0xFC92, 'M', u'يز'), - (0xFC93, 'M', u'يم'), - (0xFC94, 'M', u'ين'), - (0xFC95, 'M', u'يى'), - (0xFC96, 'M', u'يي'), - (0xFC97, 'M', u'ئج'), - (0xFC98, 'M', u'ئح'), - (0xFC99, 'M', u'ئخ'), - (0xFC9A, 'M', u'ئم'), - (0xFC9B, 'M', u'ئه'), - (0xFC9C, 'M', u'بج'), - (0xFC9D, 'M', u'بح'), - (0xFC9E, 'M', u'بخ'), - (0xFC9F, 'M', u'بم'), - (0xFCA0, 'M', u'به'), - (0xFCA1, 'M', u'تج'), - (0xFCA2, 'M', u'تح'), - (0xFCA3, 'M', u'تخ'), - (0xFCA4, 'M', u'تم'), - (0xFCA5, 'M', u'ته'), - (0xFCA6, 'M', u'ثم'), - (0xFCA7, 'M', u'جح'), - (0xFCA8, 'M', u'جم'), - (0xFCA9, 'M', u'حج'), - (0xFCAA, 'M', u'حم'), - (0xFCAB, 'M', u'خج'), - (0xFCAC, 'M', u'خم'), - (0xFCAD, 'M', u'سج'), - (0xFCAE, 'M', u'سح'), - (0xFCAF, 'M', u'سخ'), - (0xFCB0, 'M', u'سم'), - (0xFCB1, 'M', u'صح'), - (0xFCB2, 'M', u'صخ'), - (0xFCB3, 'M', u'صم'), - (0xFCB4, 'M', u'ضج'), - (0xFCB5, 'M', u'ضح'), - (0xFCB6, 'M', u'ضخ'), - (0xFCB7, 'M', u'ضم'), - (0xFCB8, 'M', u'طح'), - (0xFCB9, 'M', u'ظم'), - (0xFCBA, 'M', u'عج'), - (0xFCBB, 'M', u'عم'), - (0xFCBC, 'M', u'غج'), - (0xFCBD, 'M', u'غم'), - (0xFCBE, 'M', u'Ùج'), - (0xFCBF, 'M', u'ÙØ­'), - (0xFCC0, 'M', u'ÙØ®'), - (0xFCC1, 'M', u'ÙÙ…'), - (0xFCC2, 'M', u'قح'), - (0xFCC3, 'M', u'قم'), - (0xFCC4, 'M', u'كج'), - (0xFCC5, 'M', u'كح'), - (0xFCC6, 'M', u'كخ'), - (0xFCC7, 'M', u'كل'), - (0xFCC8, 'M', u'كم'), - (0xFCC9, 'M', u'لج'), - (0xFCCA, 'M', u'لح'), - (0xFCCB, 'M', u'لخ'), - (0xFCCC, 'M', u'لم'), - (0xFCCD, 'M', u'له'), - (0xFCCE, 'M', u'مج'), - (0xFCCF, 'M', u'مح'), - (0xFCD0, 'M', u'مخ'), - (0xFCD1, 'M', u'مم'), - (0xFCD2, 'M', u'نج'), - (0xFCD3, 'M', u'نح'), - (0xFCD4, 'M', u'نخ'), - (0xFCD5, 'M', u'نم'), - (0xFCD6, 'M', u'نه'), - (0xFCD7, 'M', u'هج'), - (0xFCD8, 'M', u'هم'), - (0xFCD9, 'M', u'هٰ'), - (0xFCDA, 'M', u'يج'), - (0xFCDB, 'M', u'يح'), - (0xFCDC, 'M', u'يخ'), - (0xFCDD, 'M', u'يم'), - (0xFCDE, 'M', u'يه'), - (0xFCDF, 'M', u'ئم'), - (0xFCE0, 'M', u'ئه'), - (0xFCE1, 'M', u'بم'), - (0xFCE2, 'M', u'به'), - (0xFCE3, 'M', u'تم'), - (0xFCE4, 'M', u'ته'), - (0xFCE5, 'M', u'ثم'), - (0xFCE6, 'M', u'ثه'), - (0xFCE7, 'M', u'سم'), - (0xFCE8, 'M', u'سه'), - (0xFCE9, 'M', u'شم'), - (0xFCEA, 'M', u'شه'), - (0xFCEB, 'M', u'كل'), - (0xFCEC, 'M', u'كم'), - (0xFCED, 'M', u'لم'), - (0xFCEE, 'M', u'نم'), - ] - -def _seg_47(): - return [ - (0xFCEF, 'M', u'نه'), - (0xFCF0, 'M', u'يم'), - (0xFCF1, 'M', u'يه'), - (0xFCF2, 'M', u'Ù€ÙŽÙ‘'), - (0xFCF3, 'M', u'Ù€ÙÙ‘'), - (0xFCF4, 'M', u'Ù€ÙÙ‘'), - (0xFCF5, 'M', u'طى'), - (0xFCF6, 'M', u'طي'), - (0xFCF7, 'M', u'عى'), - (0xFCF8, 'M', u'عي'), - (0xFCF9, 'M', u'غى'), - (0xFCFA, 'M', u'غي'), - (0xFCFB, 'M', u'سى'), - (0xFCFC, 'M', u'سي'), - (0xFCFD, 'M', u'شى'), - (0xFCFE, 'M', u'شي'), - (0xFCFF, 'M', u'حى'), - (0xFD00, 'M', u'حي'), - (0xFD01, 'M', u'جى'), - (0xFD02, 'M', u'جي'), - (0xFD03, 'M', u'خى'), - (0xFD04, 'M', u'خي'), - (0xFD05, 'M', u'صى'), - (0xFD06, 'M', u'صي'), - (0xFD07, 'M', u'ضى'), - (0xFD08, 'M', u'ضي'), - (0xFD09, 'M', u'شج'), - (0xFD0A, 'M', u'شح'), - (0xFD0B, 'M', u'شخ'), - (0xFD0C, 'M', u'شم'), - (0xFD0D, 'M', u'شر'), - (0xFD0E, 'M', u'سر'), - (0xFD0F, 'M', u'صر'), - (0xFD10, 'M', u'ضر'), - (0xFD11, 'M', u'طى'), - (0xFD12, 'M', u'طي'), - (0xFD13, 'M', u'عى'), - (0xFD14, 'M', u'عي'), - (0xFD15, 'M', u'غى'), - (0xFD16, 'M', u'غي'), - (0xFD17, 'M', u'سى'), - (0xFD18, 'M', u'سي'), - (0xFD19, 'M', u'شى'), - (0xFD1A, 'M', u'شي'), - (0xFD1B, 'M', u'حى'), - (0xFD1C, 'M', u'حي'), - (0xFD1D, 'M', u'جى'), - (0xFD1E, 'M', u'جي'), - (0xFD1F, 'M', u'خى'), - (0xFD20, 'M', u'خي'), - (0xFD21, 'M', u'صى'), - (0xFD22, 'M', u'صي'), - (0xFD23, 'M', u'ضى'), - (0xFD24, 'M', u'ضي'), - (0xFD25, 'M', u'شج'), - (0xFD26, 'M', u'شح'), - (0xFD27, 'M', u'شخ'), - (0xFD28, 'M', u'شم'), - (0xFD29, 'M', u'شر'), - (0xFD2A, 'M', u'سر'), - (0xFD2B, 'M', u'صر'), - (0xFD2C, 'M', u'ضر'), - (0xFD2D, 'M', u'شج'), - (0xFD2E, 'M', u'شح'), - (0xFD2F, 'M', u'شخ'), - (0xFD30, 'M', u'شم'), - (0xFD31, 'M', u'سه'), - (0xFD32, 'M', u'شه'), - (0xFD33, 'M', u'طم'), - (0xFD34, 'M', u'سج'), - (0xFD35, 'M', u'سح'), - (0xFD36, 'M', u'سخ'), - (0xFD37, 'M', u'شج'), - (0xFD38, 'M', u'شح'), - (0xFD39, 'M', u'شخ'), - (0xFD3A, 'M', u'طم'), - (0xFD3B, 'M', u'ظم'), - (0xFD3C, 'M', u'اً'), - (0xFD3E, 'V'), - (0xFD40, 'X'), - (0xFD50, 'M', u'تجم'), - (0xFD51, 'M', u'تحج'), - (0xFD53, 'M', u'تحم'), - (0xFD54, 'M', u'تخم'), - (0xFD55, 'M', u'تمج'), - (0xFD56, 'M', u'تمح'), - (0xFD57, 'M', u'تمخ'), - (0xFD58, 'M', u'جمح'), - (0xFD5A, 'M', u'حمي'), - (0xFD5B, 'M', u'حمى'), - (0xFD5C, 'M', u'سحج'), - (0xFD5D, 'M', u'سجح'), - (0xFD5E, 'M', u'سجى'), - (0xFD5F, 'M', u'سمح'), - (0xFD61, 'M', u'سمج'), - (0xFD62, 'M', u'سمم'), - (0xFD64, 'M', u'صحح'), - (0xFD66, 'M', u'صمم'), - (0xFD67, 'M', u'شحم'), - (0xFD69, 'M', u'شجي'), - ] - -def _seg_48(): - return [ - (0xFD6A, 'M', u'شمخ'), - (0xFD6C, 'M', u'شمم'), - (0xFD6E, 'M', u'ضحى'), - (0xFD6F, 'M', u'ضخم'), - (0xFD71, 'M', u'طمح'), - (0xFD73, 'M', u'طمم'), - (0xFD74, 'M', u'طمي'), - (0xFD75, 'M', u'عجم'), - (0xFD76, 'M', u'عمم'), - (0xFD78, 'M', u'عمى'), - (0xFD79, 'M', u'غمم'), - (0xFD7A, 'M', u'غمي'), - (0xFD7B, 'M', u'غمى'), - (0xFD7C, 'M', u'Ùخم'), - (0xFD7E, 'M', u'قمح'), - (0xFD7F, 'M', u'قمم'), - (0xFD80, 'M', u'لحم'), - (0xFD81, 'M', u'لحي'), - (0xFD82, 'M', u'لحى'), - (0xFD83, 'M', u'لجج'), - (0xFD85, 'M', u'لخم'), - (0xFD87, 'M', u'لمح'), - (0xFD89, 'M', u'محج'), - (0xFD8A, 'M', u'محم'), - (0xFD8B, 'M', u'محي'), - (0xFD8C, 'M', u'مجح'), - (0xFD8D, 'M', u'مجم'), - (0xFD8E, 'M', u'مخج'), - (0xFD8F, 'M', u'مخم'), - (0xFD90, 'X'), - (0xFD92, 'M', u'مجخ'), - (0xFD93, 'M', u'همج'), - (0xFD94, 'M', u'همم'), - (0xFD95, 'M', u'نحم'), - (0xFD96, 'M', u'نحى'), - (0xFD97, 'M', u'نجم'), - (0xFD99, 'M', u'نجى'), - (0xFD9A, 'M', u'نمي'), - (0xFD9B, 'M', u'نمى'), - (0xFD9C, 'M', u'يمم'), - (0xFD9E, 'M', u'بخي'), - (0xFD9F, 'M', u'تجي'), - (0xFDA0, 'M', u'تجى'), - (0xFDA1, 'M', u'تخي'), - (0xFDA2, 'M', u'تخى'), - (0xFDA3, 'M', u'تمي'), - (0xFDA4, 'M', u'تمى'), - (0xFDA5, 'M', u'جمي'), - (0xFDA6, 'M', u'جحى'), - (0xFDA7, 'M', u'جمى'), - (0xFDA8, 'M', u'سخى'), - (0xFDA9, 'M', u'صحي'), - (0xFDAA, 'M', u'شحي'), - (0xFDAB, 'M', u'ضحي'), - (0xFDAC, 'M', u'لجي'), - (0xFDAD, 'M', u'لمي'), - (0xFDAE, 'M', u'يحي'), - (0xFDAF, 'M', u'يجي'), - (0xFDB0, 'M', u'يمي'), - (0xFDB1, 'M', u'ممي'), - (0xFDB2, 'M', u'قمي'), - (0xFDB3, 'M', u'نحي'), - (0xFDB4, 'M', u'قمح'), - (0xFDB5, 'M', u'لحم'), - (0xFDB6, 'M', u'عمي'), - (0xFDB7, 'M', u'كمي'), - (0xFDB8, 'M', u'نجح'), - (0xFDB9, 'M', u'مخي'), - (0xFDBA, 'M', u'لجم'), - (0xFDBB, 'M', u'كمم'), - (0xFDBC, 'M', u'لجم'), - (0xFDBD, 'M', u'نجح'), - (0xFDBE, 'M', u'جحي'), - (0xFDBF, 'M', u'حجي'), - (0xFDC0, 'M', u'مجي'), - (0xFDC1, 'M', u'Ùمي'), - (0xFDC2, 'M', u'بحي'), - (0xFDC3, 'M', u'كمم'), - (0xFDC4, 'M', u'عجم'), - (0xFDC5, 'M', u'صمم'), - (0xFDC6, 'M', u'سخي'), - (0xFDC7, 'M', u'نجي'), - (0xFDC8, 'X'), - (0xFDF0, 'M', u'صلے'), - (0xFDF1, 'M', u'قلے'), - (0xFDF2, 'M', u'الله'), - (0xFDF3, 'M', u'اكبر'), - (0xFDF4, 'M', u'محمد'), - (0xFDF5, 'M', u'صلعم'), - (0xFDF6, 'M', u'رسول'), - (0xFDF7, 'M', u'عليه'), - (0xFDF8, 'M', u'وسلم'), - (0xFDF9, 'M', u'صلى'), - (0xFDFA, '3', u'صلى الله عليه وسلم'), - (0xFDFB, '3', u'جل جلاله'), - (0xFDFC, 'M', u'ریال'), - (0xFDFD, 'V'), - (0xFDFE, 'X'), - (0xFE00, 'I'), - (0xFE10, '3', u','), - ] - -def _seg_49(): - return [ - (0xFE11, 'M', u'ã€'), - (0xFE12, 'X'), - (0xFE13, '3', u':'), - (0xFE14, '3', u';'), - (0xFE15, '3', u'!'), - (0xFE16, '3', u'?'), - (0xFE17, 'M', u'〖'), - (0xFE18, 'M', u'〗'), - (0xFE19, 'X'), - (0xFE20, 'V'), - (0xFE30, 'X'), - (0xFE31, 'M', u'—'), - (0xFE32, 'M', u'–'), - (0xFE33, '3', u'_'), - (0xFE35, '3', u'('), - (0xFE36, '3', u')'), - (0xFE37, '3', u'{'), - (0xFE38, '3', u'}'), - (0xFE39, 'M', u'〔'), - (0xFE3A, 'M', u'〕'), - (0xFE3B, 'M', u'ã€'), - (0xFE3C, 'M', u'】'), - (0xFE3D, 'M', u'《'), - (0xFE3E, 'M', u'》'), - (0xFE3F, 'M', u'〈'), - (0xFE40, 'M', u'〉'), - (0xFE41, 'M', u'「'), - (0xFE42, 'M', u'ã€'), - (0xFE43, 'M', u'『'), - (0xFE44, 'M', u'ã€'), - (0xFE45, 'V'), - (0xFE47, '3', u'['), - (0xFE48, '3', u']'), - (0xFE49, '3', u' Ì…'), - (0xFE4D, '3', u'_'), - (0xFE50, '3', u','), - (0xFE51, 'M', u'ã€'), - (0xFE52, 'X'), - (0xFE54, '3', u';'), - (0xFE55, '3', u':'), - (0xFE56, '3', u'?'), - (0xFE57, '3', u'!'), - (0xFE58, 'M', u'—'), - (0xFE59, '3', u'('), - (0xFE5A, '3', u')'), - (0xFE5B, '3', u'{'), - (0xFE5C, '3', u'}'), - (0xFE5D, 'M', u'〔'), - (0xFE5E, 'M', u'〕'), - (0xFE5F, '3', u'#'), - (0xFE60, '3', u'&'), - (0xFE61, '3', u'*'), - (0xFE62, '3', u'+'), - (0xFE63, 'M', u'-'), - (0xFE64, '3', u'<'), - (0xFE65, '3', u'>'), - (0xFE66, '3', u'='), - (0xFE67, 'X'), - (0xFE68, '3', u'\\'), - (0xFE69, '3', u'$'), - (0xFE6A, '3', u'%'), - (0xFE6B, '3', u'@'), - (0xFE6C, 'X'), - (0xFE70, '3', u' Ù‹'), - (0xFE71, 'M', u'ـً'), - (0xFE72, '3', u' ÙŒ'), - (0xFE73, 'V'), - (0xFE74, '3', u' Ù'), - (0xFE75, 'X'), - (0xFE76, '3', u' ÙŽ'), - (0xFE77, 'M', u'Ù€ÙŽ'), - (0xFE78, '3', u' Ù'), - (0xFE79, 'M', u'Ù€Ù'), - (0xFE7A, '3', u' Ù'), - (0xFE7B, 'M', u'Ù€Ù'), - (0xFE7C, '3', u' Ù‘'), - (0xFE7D, 'M', u'ـّ'), - (0xFE7E, '3', u' Ù’'), - (0xFE7F, 'M', u'ـْ'), - (0xFE80, 'M', u'Ø¡'), - (0xFE81, 'M', u'Ø¢'), - (0xFE83, 'M', u'Ø£'), - (0xFE85, 'M', u'ؤ'), - (0xFE87, 'M', u'Ø¥'), - (0xFE89, 'M', u'ئ'), - (0xFE8D, 'M', u'ا'), - (0xFE8F, 'M', u'ب'), - (0xFE93, 'M', u'Ø©'), - (0xFE95, 'M', u'ت'), - (0xFE99, 'M', u'Ø«'), - (0xFE9D, 'M', u'ج'), - (0xFEA1, 'M', u'Ø­'), - (0xFEA5, 'M', u'Ø®'), - (0xFEA9, 'M', u'د'), - (0xFEAB, 'M', u'Ø°'), - (0xFEAD, 'M', u'ر'), - (0xFEAF, 'M', u'ز'), - (0xFEB1, 'M', u'س'), - (0xFEB5, 'M', u'Ø´'), - (0xFEB9, 'M', u'ص'), - ] - -def _seg_50(): - return [ - (0xFEBD, 'M', u'ض'), - (0xFEC1, 'M', u'Ø·'), - (0xFEC5, 'M', u'ظ'), - (0xFEC9, 'M', u'ع'), - (0xFECD, 'M', u'غ'), - (0xFED1, 'M', u'Ù'), - (0xFED5, 'M', u'Ù‚'), - (0xFED9, 'M', u'Ùƒ'), - (0xFEDD, 'M', u'Ù„'), - (0xFEE1, 'M', u'Ù…'), - (0xFEE5, 'M', u'Ù†'), - (0xFEE9, 'M', u'Ù‡'), - (0xFEED, 'M', u'Ùˆ'), - (0xFEEF, 'M', u'Ù‰'), - (0xFEF1, 'M', u'ÙŠ'), - (0xFEF5, 'M', u'لآ'), - (0xFEF7, 'M', u'لأ'), - (0xFEF9, 'M', u'لإ'), - (0xFEFB, 'M', u'لا'), - (0xFEFD, 'X'), - (0xFEFF, 'I'), - (0xFF00, 'X'), - (0xFF01, '3', u'!'), - (0xFF02, '3', u'"'), - (0xFF03, '3', u'#'), - (0xFF04, '3', u'$'), - (0xFF05, '3', u'%'), - (0xFF06, '3', u'&'), - (0xFF07, '3', u'\''), - (0xFF08, '3', u'('), - (0xFF09, '3', u')'), - (0xFF0A, '3', u'*'), - (0xFF0B, '3', u'+'), - (0xFF0C, '3', u','), - (0xFF0D, 'M', u'-'), - (0xFF0E, 'M', u'.'), - (0xFF0F, '3', u'/'), - (0xFF10, 'M', u'0'), - (0xFF11, 'M', u'1'), - (0xFF12, 'M', u'2'), - (0xFF13, 'M', u'3'), - (0xFF14, 'M', u'4'), - (0xFF15, 'M', u'5'), - (0xFF16, 'M', u'6'), - (0xFF17, 'M', u'7'), - (0xFF18, 'M', u'8'), - (0xFF19, 'M', u'9'), - (0xFF1A, '3', u':'), - (0xFF1B, '3', u';'), - (0xFF1C, '3', u'<'), - (0xFF1D, '3', u'='), - (0xFF1E, '3', u'>'), - (0xFF1F, '3', u'?'), - (0xFF20, '3', u'@'), - (0xFF21, 'M', u'a'), - (0xFF22, 'M', u'b'), - (0xFF23, 'M', u'c'), - (0xFF24, 'M', u'd'), - (0xFF25, 'M', u'e'), - (0xFF26, 'M', u'f'), - (0xFF27, 'M', u'g'), - (0xFF28, 'M', u'h'), - (0xFF29, 'M', u'i'), - (0xFF2A, 'M', u'j'), - (0xFF2B, 'M', u'k'), - (0xFF2C, 'M', u'l'), - (0xFF2D, 'M', u'm'), - (0xFF2E, 'M', u'n'), - (0xFF2F, 'M', u'o'), - (0xFF30, 'M', u'p'), - (0xFF31, 'M', u'q'), - (0xFF32, 'M', u'r'), - (0xFF33, 'M', u's'), - (0xFF34, 'M', u't'), - (0xFF35, 'M', u'u'), - (0xFF36, 'M', u'v'), - (0xFF37, 'M', u'w'), - (0xFF38, 'M', u'x'), - (0xFF39, 'M', u'y'), - (0xFF3A, 'M', u'z'), - (0xFF3B, '3', u'['), - (0xFF3C, '3', u'\\'), - (0xFF3D, '3', u']'), - (0xFF3E, '3', u'^'), - (0xFF3F, '3', u'_'), - (0xFF40, '3', u'`'), - (0xFF41, 'M', u'a'), - (0xFF42, 'M', u'b'), - (0xFF43, 'M', u'c'), - (0xFF44, 'M', u'd'), - (0xFF45, 'M', u'e'), - (0xFF46, 'M', u'f'), - (0xFF47, 'M', u'g'), - (0xFF48, 'M', u'h'), - (0xFF49, 'M', u'i'), - (0xFF4A, 'M', u'j'), - (0xFF4B, 'M', u'k'), - (0xFF4C, 'M', u'l'), - (0xFF4D, 'M', u'm'), - (0xFF4E, 'M', u'n'), - ] - -def _seg_51(): - return [ - (0xFF4F, 'M', u'o'), - (0xFF50, 'M', u'p'), - (0xFF51, 'M', u'q'), - (0xFF52, 'M', u'r'), - (0xFF53, 'M', u's'), - (0xFF54, 'M', u't'), - (0xFF55, 'M', u'u'), - (0xFF56, 'M', u'v'), - (0xFF57, 'M', u'w'), - (0xFF58, 'M', u'x'), - (0xFF59, 'M', u'y'), - (0xFF5A, 'M', u'z'), - (0xFF5B, '3', u'{'), - (0xFF5C, '3', u'|'), - (0xFF5D, '3', u'}'), - (0xFF5E, '3', u'~'), - (0xFF5F, 'M', u'⦅'), - (0xFF60, 'M', u'⦆'), - (0xFF61, 'M', u'.'), - (0xFF62, 'M', u'「'), - (0xFF63, 'M', u'ã€'), - (0xFF64, 'M', u'ã€'), - (0xFF65, 'M', u'・'), - (0xFF66, 'M', u'ヲ'), - (0xFF67, 'M', u'ã‚¡'), - (0xFF68, 'M', u'ã‚£'), - (0xFF69, 'M', u'ã‚¥'), - (0xFF6A, 'M', u'ェ'), - (0xFF6B, 'M', u'ã‚©'), - (0xFF6C, 'M', u'ャ'), - (0xFF6D, 'M', u'ュ'), - (0xFF6E, 'M', u'ョ'), - (0xFF6F, 'M', u'ッ'), - (0xFF70, 'M', u'ー'), - (0xFF71, 'M', u'ã‚¢'), - (0xFF72, 'M', u'イ'), - (0xFF73, 'M', u'ウ'), - (0xFF74, 'M', u'エ'), - (0xFF75, 'M', u'オ'), - (0xFF76, 'M', u'ã‚«'), - (0xFF77, 'M', u'ã‚­'), - (0xFF78, 'M', u'ク'), - (0xFF79, 'M', u'ケ'), - (0xFF7A, 'M', u'コ'), - (0xFF7B, 'M', u'サ'), - (0xFF7C, 'M', u'ã‚·'), - (0xFF7D, 'M', u'ス'), - (0xFF7E, 'M', u'ã‚»'), - (0xFF7F, 'M', u'ソ'), - (0xFF80, 'M', u'ã‚¿'), - (0xFF81, 'M', u'ãƒ'), - (0xFF82, 'M', u'ツ'), - (0xFF83, 'M', u'テ'), - (0xFF84, 'M', u'ト'), - (0xFF85, 'M', u'ナ'), - (0xFF86, 'M', u'ニ'), - (0xFF87, 'M', u'ヌ'), - (0xFF88, 'M', u'ãƒ'), - (0xFF89, 'M', u'ノ'), - (0xFF8A, 'M', u'ãƒ'), - (0xFF8B, 'M', u'ヒ'), - (0xFF8C, 'M', u'フ'), - (0xFF8D, 'M', u'ヘ'), - (0xFF8E, 'M', u'ホ'), - (0xFF8F, 'M', u'マ'), - (0xFF90, 'M', u'ミ'), - (0xFF91, 'M', u'ム'), - (0xFF92, 'M', u'メ'), - (0xFF93, 'M', u'モ'), - (0xFF94, 'M', u'ヤ'), - (0xFF95, 'M', u'ユ'), - (0xFF96, 'M', u'ヨ'), - (0xFF97, 'M', u'ラ'), - (0xFF98, 'M', u'リ'), - (0xFF99, 'M', u'ル'), - (0xFF9A, 'M', u'レ'), - (0xFF9B, 'M', u'ロ'), - (0xFF9C, 'M', u'ワ'), - (0xFF9D, 'M', u'ン'), - (0xFF9E, 'M', u'ã‚™'), - (0xFF9F, 'M', u'ã‚š'), - (0xFFA0, 'X'), - (0xFFA1, 'M', u'á„€'), - (0xFFA2, 'M', u'á„'), - (0xFFA3, 'M', u'ᆪ'), - (0xFFA4, 'M', u'á„‚'), - (0xFFA5, 'M', u'ᆬ'), - (0xFFA6, 'M', u'ᆭ'), - (0xFFA7, 'M', u'ᄃ'), - (0xFFA8, 'M', u'á„„'), - (0xFFA9, 'M', u'á„…'), - (0xFFAA, 'M', u'ᆰ'), - (0xFFAB, 'M', u'ᆱ'), - (0xFFAC, 'M', u'ᆲ'), - (0xFFAD, 'M', u'ᆳ'), - (0xFFAE, 'M', u'ᆴ'), - (0xFFAF, 'M', u'ᆵ'), - (0xFFB0, 'M', u'á„š'), - (0xFFB1, 'M', u'ᄆ'), - (0xFFB2, 'M', u'ᄇ'), - ] - -def _seg_52(): - return [ - (0xFFB3, 'M', u'ᄈ'), - (0xFFB4, 'M', u'á„¡'), - (0xFFB5, 'M', u'ᄉ'), - (0xFFB6, 'M', u'á„Š'), - (0xFFB7, 'M', u'á„‹'), - (0xFFB8, 'M', u'á„Œ'), - (0xFFB9, 'M', u'á„'), - (0xFFBA, 'M', u'á„Ž'), - (0xFFBB, 'M', u'á„'), - (0xFFBC, 'M', u'á„'), - (0xFFBD, 'M', u'á„‘'), - (0xFFBE, 'M', u'á„’'), - (0xFFBF, 'X'), - (0xFFC2, 'M', u'á…¡'), - (0xFFC3, 'M', u'á…¢'), - (0xFFC4, 'M', u'á…£'), - (0xFFC5, 'M', u'á…¤'), - (0xFFC6, 'M', u'á…¥'), - (0xFFC7, 'M', u'á…¦'), - (0xFFC8, 'X'), - (0xFFCA, 'M', u'á…§'), - (0xFFCB, 'M', u'á…¨'), - (0xFFCC, 'M', u'á…©'), - (0xFFCD, 'M', u'á…ª'), - (0xFFCE, 'M', u'á…«'), - (0xFFCF, 'M', u'á…¬'), - (0xFFD0, 'X'), - (0xFFD2, 'M', u'á…­'), - (0xFFD3, 'M', u'á…®'), - (0xFFD4, 'M', u'á…¯'), - (0xFFD5, 'M', u'á…°'), - (0xFFD6, 'M', u'á…±'), - (0xFFD7, 'M', u'á…²'), - (0xFFD8, 'X'), - (0xFFDA, 'M', u'á…³'), - (0xFFDB, 'M', u'á…´'), - (0xFFDC, 'M', u'á…µ'), - (0xFFDD, 'X'), - (0xFFE0, 'M', u'¢'), - (0xFFE1, 'M', u'£'), - (0xFFE2, 'M', u'¬'), - (0xFFE3, '3', u' Ì„'), - (0xFFE4, 'M', u'¦'), - (0xFFE5, 'M', u'Â¥'), - (0xFFE6, 'M', u'â‚©'), - (0xFFE7, 'X'), - (0xFFE8, 'M', u'│'), - (0xFFE9, 'M', u'â†'), - (0xFFEA, 'M', u'↑'), - (0xFFEB, 'M', u'→'), - (0xFFEC, 'M', u'↓'), - (0xFFED, 'M', u'â– '), - (0xFFEE, 'M', u'â—‹'), - (0xFFEF, 'X'), - (0x10000, 'V'), - (0x1000C, 'X'), - (0x1000D, 'V'), - (0x10027, 'X'), - (0x10028, 'V'), - (0x1003B, 'X'), - (0x1003C, 'V'), - (0x1003E, 'X'), - (0x1003F, 'V'), - (0x1004E, 'X'), - (0x10050, 'V'), - (0x1005E, 'X'), - (0x10080, 'V'), - (0x100FB, 'X'), - (0x10100, 'V'), - (0x10103, 'X'), - (0x10107, 'V'), - (0x10134, 'X'), - (0x10137, 'V'), - (0x1018F, 'X'), - (0x10190, 'V'), - (0x1019C, 'X'), - (0x101A0, 'V'), - (0x101A1, 'X'), - (0x101D0, 'V'), - (0x101FE, 'X'), - (0x10280, 'V'), - (0x1029D, 'X'), - (0x102A0, 'V'), - (0x102D1, 'X'), - (0x102E0, 'V'), - (0x102FC, 'X'), - (0x10300, 'V'), - (0x10324, 'X'), - (0x1032D, 'V'), - (0x1034B, 'X'), - (0x10350, 'V'), - (0x1037B, 'X'), - (0x10380, 'V'), - (0x1039E, 'X'), - (0x1039F, 'V'), - (0x103C4, 'X'), - (0x103C8, 'V'), - (0x103D6, 'X'), - (0x10400, 'M', u'ð¨'), - (0x10401, 'M', u'ð©'), - ] - -def _seg_53(): - return [ - (0x10402, 'M', u'ðª'), - (0x10403, 'M', u'ð«'), - (0x10404, 'M', u'ð¬'), - (0x10405, 'M', u'ð­'), - (0x10406, 'M', u'ð®'), - (0x10407, 'M', u'ð¯'), - (0x10408, 'M', u'ð°'), - (0x10409, 'M', u'ð±'), - (0x1040A, 'M', u'ð²'), - (0x1040B, 'M', u'ð³'), - (0x1040C, 'M', u'ð´'), - (0x1040D, 'M', u'ðµ'), - (0x1040E, 'M', u'ð¶'), - (0x1040F, 'M', u'ð·'), - (0x10410, 'M', u'ð¸'), - (0x10411, 'M', u'ð¹'), - (0x10412, 'M', u'ðº'), - (0x10413, 'M', u'ð»'), - (0x10414, 'M', u'ð¼'), - (0x10415, 'M', u'ð½'), - (0x10416, 'M', u'ð¾'), - (0x10417, 'M', u'ð¿'), - (0x10418, 'M', u'ð‘€'), - (0x10419, 'M', u'ð‘'), - (0x1041A, 'M', u'ð‘‚'), - (0x1041B, 'M', u'ð‘ƒ'), - (0x1041C, 'M', u'ð‘„'), - (0x1041D, 'M', u'ð‘…'), - (0x1041E, 'M', u'ð‘†'), - (0x1041F, 'M', u'ð‘‡'), - (0x10420, 'M', u'ð‘ˆ'), - (0x10421, 'M', u'ð‘‰'), - (0x10422, 'M', u'ð‘Š'), - (0x10423, 'M', u'ð‘‹'), - (0x10424, 'M', u'ð‘Œ'), - (0x10425, 'M', u'ð‘'), - (0x10426, 'M', u'ð‘Ž'), - (0x10427, 'M', u'ð‘'), - (0x10428, 'V'), - (0x1049E, 'X'), - (0x104A0, 'V'), - (0x104AA, 'X'), - (0x104B0, 'M', u'ð“˜'), - (0x104B1, 'M', u'ð“™'), - (0x104B2, 'M', u'ð“š'), - (0x104B3, 'M', u'ð“›'), - (0x104B4, 'M', u'ð“œ'), - (0x104B5, 'M', u'ð“'), - (0x104B6, 'M', u'ð“ž'), - (0x104B7, 'M', u'ð“Ÿ'), - (0x104B8, 'M', u'ð“ '), - (0x104B9, 'M', u'ð“¡'), - (0x104BA, 'M', u'ð“¢'), - (0x104BB, 'M', u'ð“£'), - (0x104BC, 'M', u'ð“¤'), - (0x104BD, 'M', u'ð“¥'), - (0x104BE, 'M', u'ð“¦'), - (0x104BF, 'M', u'ð“§'), - (0x104C0, 'M', u'ð“¨'), - (0x104C1, 'M', u'ð“©'), - (0x104C2, 'M', u'ð“ª'), - (0x104C3, 'M', u'ð“«'), - (0x104C4, 'M', u'ð“¬'), - (0x104C5, 'M', u'ð“­'), - (0x104C6, 'M', u'ð“®'), - (0x104C7, 'M', u'ð“¯'), - (0x104C8, 'M', u'ð“°'), - (0x104C9, 'M', u'ð“±'), - (0x104CA, 'M', u'ð“²'), - (0x104CB, 'M', u'ð“³'), - (0x104CC, 'M', u'ð“´'), - (0x104CD, 'M', u'ð“µ'), - (0x104CE, 'M', u'ð“¶'), - (0x104CF, 'M', u'ð“·'), - (0x104D0, 'M', u'ð“¸'), - (0x104D1, 'M', u'ð“¹'), - (0x104D2, 'M', u'ð“º'), - (0x104D3, 'M', u'ð“»'), - (0x104D4, 'X'), - (0x104D8, 'V'), - (0x104FC, 'X'), - (0x10500, 'V'), - (0x10528, 'X'), - (0x10530, 'V'), - (0x10564, 'X'), - (0x1056F, 'V'), - (0x10570, 'X'), - (0x10600, 'V'), - (0x10737, 'X'), - (0x10740, 'V'), - (0x10756, 'X'), - (0x10760, 'V'), - (0x10768, 'X'), - (0x10800, 'V'), - (0x10806, 'X'), - (0x10808, 'V'), - (0x10809, 'X'), - (0x1080A, 'V'), - (0x10836, 'X'), - (0x10837, 'V'), - ] - -def _seg_54(): - return [ - (0x10839, 'X'), - (0x1083C, 'V'), - (0x1083D, 'X'), - (0x1083F, 'V'), - (0x10856, 'X'), - (0x10857, 'V'), - (0x1089F, 'X'), - (0x108A7, 'V'), - (0x108B0, 'X'), - (0x108E0, 'V'), - (0x108F3, 'X'), - (0x108F4, 'V'), - (0x108F6, 'X'), - (0x108FB, 'V'), - (0x1091C, 'X'), - (0x1091F, 'V'), - (0x1093A, 'X'), - (0x1093F, 'V'), - (0x10940, 'X'), - (0x10980, 'V'), - (0x109B8, 'X'), - (0x109BC, 'V'), - (0x109D0, 'X'), - (0x109D2, 'V'), - (0x10A04, 'X'), - (0x10A05, 'V'), - (0x10A07, 'X'), - (0x10A0C, 'V'), - (0x10A14, 'X'), - (0x10A15, 'V'), - (0x10A18, 'X'), - (0x10A19, 'V'), - (0x10A36, 'X'), - (0x10A38, 'V'), - (0x10A3B, 'X'), - (0x10A3F, 'V'), - (0x10A49, 'X'), - (0x10A50, 'V'), - (0x10A59, 'X'), - (0x10A60, 'V'), - (0x10AA0, 'X'), - (0x10AC0, 'V'), - (0x10AE7, 'X'), - (0x10AEB, 'V'), - (0x10AF7, 'X'), - (0x10B00, 'V'), - (0x10B36, 'X'), - (0x10B39, 'V'), - (0x10B56, 'X'), - (0x10B58, 'V'), - (0x10B73, 'X'), - (0x10B78, 'V'), - (0x10B92, 'X'), - (0x10B99, 'V'), - (0x10B9D, 'X'), - (0x10BA9, 'V'), - (0x10BB0, 'X'), - (0x10C00, 'V'), - (0x10C49, 'X'), - (0x10C80, 'M', u'ð³€'), - (0x10C81, 'M', u'ð³'), - (0x10C82, 'M', u'ð³‚'), - (0x10C83, 'M', u'ð³ƒ'), - (0x10C84, 'M', u'ð³„'), - (0x10C85, 'M', u'ð³…'), - (0x10C86, 'M', u'ð³†'), - (0x10C87, 'M', u'ð³‡'), - (0x10C88, 'M', u'ð³ˆ'), - (0x10C89, 'M', u'ð³‰'), - (0x10C8A, 'M', u'ð³Š'), - (0x10C8B, 'M', u'ð³‹'), - (0x10C8C, 'M', u'ð³Œ'), - (0x10C8D, 'M', u'ð³'), - (0x10C8E, 'M', u'ð³Ž'), - (0x10C8F, 'M', u'ð³'), - (0x10C90, 'M', u'ð³'), - (0x10C91, 'M', u'ð³‘'), - (0x10C92, 'M', u'ð³’'), - (0x10C93, 'M', u'ð³“'), - (0x10C94, 'M', u'ð³”'), - (0x10C95, 'M', u'ð³•'), - (0x10C96, 'M', u'ð³–'), - (0x10C97, 'M', u'ð³—'), - (0x10C98, 'M', u'ð³˜'), - (0x10C99, 'M', u'ð³™'), - (0x10C9A, 'M', u'ð³š'), - (0x10C9B, 'M', u'ð³›'), - (0x10C9C, 'M', u'ð³œ'), - (0x10C9D, 'M', u'ð³'), - (0x10C9E, 'M', u'ð³ž'), - (0x10C9F, 'M', u'ð³Ÿ'), - (0x10CA0, 'M', u'ð³ '), - (0x10CA1, 'M', u'ð³¡'), - (0x10CA2, 'M', u'ð³¢'), - (0x10CA3, 'M', u'ð³£'), - (0x10CA4, 'M', u'ð³¤'), - (0x10CA5, 'M', u'ð³¥'), - (0x10CA6, 'M', u'ð³¦'), - (0x10CA7, 'M', u'ð³§'), - (0x10CA8, 'M', u'ð³¨'), - ] - -def _seg_55(): - return [ - (0x10CA9, 'M', u'ð³©'), - (0x10CAA, 'M', u'ð³ª'), - (0x10CAB, 'M', u'ð³«'), - (0x10CAC, 'M', u'ð³¬'), - (0x10CAD, 'M', u'ð³­'), - (0x10CAE, 'M', u'ð³®'), - (0x10CAF, 'M', u'ð³¯'), - (0x10CB0, 'M', u'ð³°'), - (0x10CB1, 'M', u'ð³±'), - (0x10CB2, 'M', u'ð³²'), - (0x10CB3, 'X'), - (0x10CC0, 'V'), - (0x10CF3, 'X'), - (0x10CFA, 'V'), - (0x10D28, 'X'), - (0x10D30, 'V'), - (0x10D3A, 'X'), - (0x10E60, 'V'), - (0x10E7F, 'X'), - (0x10F00, 'V'), - (0x10F28, 'X'), - (0x10F30, 'V'), - (0x10F5A, 'X'), - (0x11000, 'V'), - (0x1104E, 'X'), - (0x11052, 'V'), - (0x11070, 'X'), - (0x1107F, 'V'), - (0x110BD, 'X'), - (0x110BE, 'V'), - (0x110C2, 'X'), - (0x110D0, 'V'), - (0x110E9, 'X'), - (0x110F0, 'V'), - (0x110FA, 'X'), - (0x11100, 'V'), - (0x11135, 'X'), - (0x11136, 'V'), - (0x11147, 'X'), - (0x11150, 'V'), - (0x11177, 'X'), - (0x11180, 'V'), - (0x111CE, 'X'), - (0x111D0, 'V'), - (0x111E0, 'X'), - (0x111E1, 'V'), - (0x111F5, 'X'), - (0x11200, 'V'), - (0x11212, 'X'), - (0x11213, 'V'), - (0x1123F, 'X'), - (0x11280, 'V'), - (0x11287, 'X'), - (0x11288, 'V'), - (0x11289, 'X'), - (0x1128A, 'V'), - (0x1128E, 'X'), - (0x1128F, 'V'), - (0x1129E, 'X'), - (0x1129F, 'V'), - (0x112AA, 'X'), - (0x112B0, 'V'), - (0x112EB, 'X'), - (0x112F0, 'V'), - (0x112FA, 'X'), - (0x11300, 'V'), - (0x11304, 'X'), - (0x11305, 'V'), - (0x1130D, 'X'), - (0x1130F, 'V'), - (0x11311, 'X'), - (0x11313, 'V'), - (0x11329, 'X'), - (0x1132A, 'V'), - (0x11331, 'X'), - (0x11332, 'V'), - (0x11334, 'X'), - (0x11335, 'V'), - (0x1133A, 'X'), - (0x1133B, 'V'), - (0x11345, 'X'), - (0x11347, 'V'), - (0x11349, 'X'), - (0x1134B, 'V'), - (0x1134E, 'X'), - (0x11350, 'V'), - (0x11351, 'X'), - (0x11357, 'V'), - (0x11358, 'X'), - (0x1135D, 'V'), - (0x11364, 'X'), - (0x11366, 'V'), - (0x1136D, 'X'), - (0x11370, 'V'), - (0x11375, 'X'), - (0x11400, 'V'), - (0x1145A, 'X'), - (0x1145B, 'V'), - (0x1145C, 'X'), - (0x1145D, 'V'), - ] - -def _seg_56(): - return [ - (0x1145F, 'X'), - (0x11480, 'V'), - (0x114C8, 'X'), - (0x114D0, 'V'), - (0x114DA, 'X'), - (0x11580, 'V'), - (0x115B6, 'X'), - (0x115B8, 'V'), - (0x115DE, 'X'), - (0x11600, 'V'), - (0x11645, 'X'), - (0x11650, 'V'), - (0x1165A, 'X'), - (0x11660, 'V'), - (0x1166D, 'X'), - (0x11680, 'V'), - (0x116B8, 'X'), - (0x116C0, 'V'), - (0x116CA, 'X'), - (0x11700, 'V'), - (0x1171B, 'X'), - (0x1171D, 'V'), - (0x1172C, 'X'), - (0x11730, 'V'), - (0x11740, 'X'), - (0x11800, 'V'), - (0x1183C, 'X'), - (0x118A0, 'M', u'ð‘£€'), - (0x118A1, 'M', u'ð‘£'), - (0x118A2, 'M', u'𑣂'), - (0x118A3, 'M', u'𑣃'), - (0x118A4, 'M', u'𑣄'), - (0x118A5, 'M', u'ð‘£…'), - (0x118A6, 'M', u'𑣆'), - (0x118A7, 'M', u'𑣇'), - (0x118A8, 'M', u'𑣈'), - (0x118A9, 'M', u'𑣉'), - (0x118AA, 'M', u'𑣊'), - (0x118AB, 'M', u'𑣋'), - (0x118AC, 'M', u'𑣌'), - (0x118AD, 'M', u'ð‘£'), - (0x118AE, 'M', u'𑣎'), - (0x118AF, 'M', u'ð‘£'), - (0x118B0, 'M', u'ð‘£'), - (0x118B1, 'M', u'𑣑'), - (0x118B2, 'M', u'ð‘£’'), - (0x118B3, 'M', u'𑣓'), - (0x118B4, 'M', u'ð‘£”'), - (0x118B5, 'M', u'𑣕'), - (0x118B6, 'M', u'ð‘£–'), - (0x118B7, 'M', u'ð‘£—'), - (0x118B8, 'M', u'𑣘'), - (0x118B9, 'M', u'ð‘£™'), - (0x118BA, 'M', u'𑣚'), - (0x118BB, 'M', u'ð‘£›'), - (0x118BC, 'M', u'𑣜'), - (0x118BD, 'M', u'ð‘£'), - (0x118BE, 'M', u'𑣞'), - (0x118BF, 'M', u'𑣟'), - (0x118C0, 'V'), - (0x118F3, 'X'), - (0x118FF, 'V'), - (0x11900, 'X'), - (0x11A00, 'V'), - (0x11A48, 'X'), - (0x11A50, 'V'), - (0x11A84, 'X'), - (0x11A86, 'V'), - (0x11AA3, 'X'), - (0x11AC0, 'V'), - (0x11AF9, 'X'), - (0x11C00, 'V'), - (0x11C09, 'X'), - (0x11C0A, 'V'), - (0x11C37, 'X'), - (0x11C38, 'V'), - (0x11C46, 'X'), - (0x11C50, 'V'), - (0x11C6D, 'X'), - (0x11C70, 'V'), - (0x11C90, 'X'), - (0x11C92, 'V'), - (0x11CA8, 'X'), - (0x11CA9, 'V'), - (0x11CB7, 'X'), - (0x11D00, 'V'), - (0x11D07, 'X'), - (0x11D08, 'V'), - (0x11D0A, 'X'), - (0x11D0B, 'V'), - (0x11D37, 'X'), - (0x11D3A, 'V'), - (0x11D3B, 'X'), - (0x11D3C, 'V'), - (0x11D3E, 'X'), - (0x11D3F, 'V'), - (0x11D48, 'X'), - (0x11D50, 'V'), - (0x11D5A, 'X'), - (0x11D60, 'V'), - ] - -def _seg_57(): - return [ - (0x11D66, 'X'), - (0x11D67, 'V'), - (0x11D69, 'X'), - (0x11D6A, 'V'), - (0x11D8F, 'X'), - (0x11D90, 'V'), - (0x11D92, 'X'), - (0x11D93, 'V'), - (0x11D99, 'X'), - (0x11DA0, 'V'), - (0x11DAA, 'X'), - (0x11EE0, 'V'), - (0x11EF9, 'X'), - (0x12000, 'V'), - (0x1239A, 'X'), - (0x12400, 'V'), - (0x1246F, 'X'), - (0x12470, 'V'), - (0x12475, 'X'), - (0x12480, 'V'), - (0x12544, 'X'), - (0x13000, 'V'), - (0x1342F, 'X'), - (0x14400, 'V'), - (0x14647, 'X'), - (0x16800, 'V'), - (0x16A39, 'X'), - (0x16A40, 'V'), - (0x16A5F, 'X'), - (0x16A60, 'V'), - (0x16A6A, 'X'), - (0x16A6E, 'V'), - (0x16A70, 'X'), - (0x16AD0, 'V'), - (0x16AEE, 'X'), - (0x16AF0, 'V'), - (0x16AF6, 'X'), - (0x16B00, 'V'), - (0x16B46, 'X'), - (0x16B50, 'V'), - (0x16B5A, 'X'), - (0x16B5B, 'V'), - (0x16B62, 'X'), - (0x16B63, 'V'), - (0x16B78, 'X'), - (0x16B7D, 'V'), - (0x16B90, 'X'), - (0x16E60, 'V'), - (0x16E9B, 'X'), - (0x16F00, 'V'), - (0x16F45, 'X'), - (0x16F50, 'V'), - (0x16F7F, 'X'), - (0x16F8F, 'V'), - (0x16FA0, 'X'), - (0x16FE0, 'V'), - (0x16FE2, 'X'), - (0x17000, 'V'), - (0x187F2, 'X'), - (0x18800, 'V'), - (0x18AF3, 'X'), - (0x1B000, 'V'), - (0x1B11F, 'X'), - (0x1B170, 'V'), - (0x1B2FC, 'X'), - (0x1BC00, 'V'), - (0x1BC6B, 'X'), - (0x1BC70, 'V'), - (0x1BC7D, 'X'), - (0x1BC80, 'V'), - (0x1BC89, 'X'), - (0x1BC90, 'V'), - (0x1BC9A, 'X'), - (0x1BC9C, 'V'), - (0x1BCA0, 'I'), - (0x1BCA4, 'X'), - (0x1D000, 'V'), - (0x1D0F6, 'X'), - (0x1D100, 'V'), - (0x1D127, 'X'), - (0x1D129, 'V'), - (0x1D15E, 'M', u'ð…—ð…¥'), - (0x1D15F, 'M', u'ð…˜ð…¥'), - (0x1D160, 'M', u'ð…˜ð…¥ð…®'), - (0x1D161, 'M', u'ð…˜ð…¥ð…¯'), - (0x1D162, 'M', u'ð…˜ð…¥ð…°'), - (0x1D163, 'M', u'ð…˜ð…¥ð…±'), - (0x1D164, 'M', u'ð…˜ð…¥ð…²'), - (0x1D165, 'V'), - (0x1D173, 'X'), - (0x1D17B, 'V'), - (0x1D1BB, 'M', u'ð†¹ð…¥'), - (0x1D1BC, 'M', u'ð†ºð…¥'), - (0x1D1BD, 'M', u'ð†¹ð…¥ð…®'), - (0x1D1BE, 'M', u'ð†ºð…¥ð…®'), - (0x1D1BF, 'M', u'ð†¹ð…¥ð…¯'), - (0x1D1C0, 'M', u'ð†ºð…¥ð…¯'), - (0x1D1C1, 'V'), - (0x1D1E9, 'X'), - (0x1D200, 'V'), - ] - -def _seg_58(): - return [ - (0x1D246, 'X'), - (0x1D2E0, 'V'), - (0x1D2F4, 'X'), - (0x1D300, 'V'), - (0x1D357, 'X'), - (0x1D360, 'V'), - (0x1D379, 'X'), - (0x1D400, 'M', u'a'), - (0x1D401, 'M', u'b'), - (0x1D402, 'M', u'c'), - (0x1D403, 'M', u'd'), - (0x1D404, 'M', u'e'), - (0x1D405, 'M', u'f'), - (0x1D406, 'M', u'g'), - (0x1D407, 'M', u'h'), - (0x1D408, 'M', u'i'), - (0x1D409, 'M', u'j'), - (0x1D40A, 'M', u'k'), - (0x1D40B, 'M', u'l'), - (0x1D40C, 'M', u'm'), - (0x1D40D, 'M', u'n'), - (0x1D40E, 'M', u'o'), - (0x1D40F, 'M', u'p'), - (0x1D410, 'M', u'q'), - (0x1D411, 'M', u'r'), - (0x1D412, 'M', u's'), - (0x1D413, 'M', u't'), - (0x1D414, 'M', u'u'), - (0x1D415, 'M', u'v'), - (0x1D416, 'M', u'w'), - (0x1D417, 'M', u'x'), - (0x1D418, 'M', u'y'), - (0x1D419, 'M', u'z'), - (0x1D41A, 'M', u'a'), - (0x1D41B, 'M', u'b'), - (0x1D41C, 'M', u'c'), - (0x1D41D, 'M', u'd'), - (0x1D41E, 'M', u'e'), - (0x1D41F, 'M', u'f'), - (0x1D420, 'M', u'g'), - (0x1D421, 'M', u'h'), - (0x1D422, 'M', u'i'), - (0x1D423, 'M', u'j'), - (0x1D424, 'M', u'k'), - (0x1D425, 'M', u'l'), - (0x1D426, 'M', u'm'), - (0x1D427, 'M', u'n'), - (0x1D428, 'M', u'o'), - (0x1D429, 'M', u'p'), - (0x1D42A, 'M', u'q'), - (0x1D42B, 'M', u'r'), - (0x1D42C, 'M', u's'), - (0x1D42D, 'M', u't'), - (0x1D42E, 'M', u'u'), - (0x1D42F, 'M', u'v'), - (0x1D430, 'M', u'w'), - (0x1D431, 'M', u'x'), - (0x1D432, 'M', u'y'), - (0x1D433, 'M', u'z'), - (0x1D434, 'M', u'a'), - (0x1D435, 'M', u'b'), - (0x1D436, 'M', u'c'), - (0x1D437, 'M', u'd'), - (0x1D438, 'M', u'e'), - (0x1D439, 'M', u'f'), - (0x1D43A, 'M', u'g'), - (0x1D43B, 'M', u'h'), - (0x1D43C, 'M', u'i'), - (0x1D43D, 'M', u'j'), - (0x1D43E, 'M', u'k'), - (0x1D43F, 'M', u'l'), - (0x1D440, 'M', u'm'), - (0x1D441, 'M', u'n'), - (0x1D442, 'M', u'o'), - (0x1D443, 'M', u'p'), - (0x1D444, 'M', u'q'), - (0x1D445, 'M', u'r'), - (0x1D446, 'M', u's'), - (0x1D447, 'M', u't'), - (0x1D448, 'M', u'u'), - (0x1D449, 'M', u'v'), - (0x1D44A, 'M', u'w'), - (0x1D44B, 'M', u'x'), - (0x1D44C, 'M', u'y'), - (0x1D44D, 'M', u'z'), - (0x1D44E, 'M', u'a'), - (0x1D44F, 'M', u'b'), - (0x1D450, 'M', u'c'), - (0x1D451, 'M', u'd'), - (0x1D452, 'M', u'e'), - (0x1D453, 'M', u'f'), - (0x1D454, 'M', u'g'), - (0x1D455, 'X'), - (0x1D456, 'M', u'i'), - (0x1D457, 'M', u'j'), - (0x1D458, 'M', u'k'), - (0x1D459, 'M', u'l'), - (0x1D45A, 'M', u'm'), - (0x1D45B, 'M', u'n'), - (0x1D45C, 'M', u'o'), - ] - -def _seg_59(): - return [ - (0x1D45D, 'M', u'p'), - (0x1D45E, 'M', u'q'), - (0x1D45F, 'M', u'r'), - (0x1D460, 'M', u's'), - (0x1D461, 'M', u't'), - (0x1D462, 'M', u'u'), - (0x1D463, 'M', u'v'), - (0x1D464, 'M', u'w'), - (0x1D465, 'M', u'x'), - (0x1D466, 'M', u'y'), - (0x1D467, 'M', u'z'), - (0x1D468, 'M', u'a'), - (0x1D469, 'M', u'b'), - (0x1D46A, 'M', u'c'), - (0x1D46B, 'M', u'd'), - (0x1D46C, 'M', u'e'), - (0x1D46D, 'M', u'f'), - (0x1D46E, 'M', u'g'), - (0x1D46F, 'M', u'h'), - (0x1D470, 'M', u'i'), - (0x1D471, 'M', u'j'), - (0x1D472, 'M', u'k'), - (0x1D473, 'M', u'l'), - (0x1D474, 'M', u'm'), - (0x1D475, 'M', u'n'), - (0x1D476, 'M', u'o'), - (0x1D477, 'M', u'p'), - (0x1D478, 'M', u'q'), - (0x1D479, 'M', u'r'), - (0x1D47A, 'M', u's'), - (0x1D47B, 'M', u't'), - (0x1D47C, 'M', u'u'), - (0x1D47D, 'M', u'v'), - (0x1D47E, 'M', u'w'), - (0x1D47F, 'M', u'x'), - (0x1D480, 'M', u'y'), - (0x1D481, 'M', u'z'), - (0x1D482, 'M', u'a'), - (0x1D483, 'M', u'b'), - (0x1D484, 'M', u'c'), - (0x1D485, 'M', u'd'), - (0x1D486, 'M', u'e'), - (0x1D487, 'M', u'f'), - (0x1D488, 'M', u'g'), - (0x1D489, 'M', u'h'), - (0x1D48A, 'M', u'i'), - (0x1D48B, 'M', u'j'), - (0x1D48C, 'M', u'k'), - (0x1D48D, 'M', u'l'), - (0x1D48E, 'M', u'm'), - (0x1D48F, 'M', u'n'), - (0x1D490, 'M', u'o'), - (0x1D491, 'M', u'p'), - (0x1D492, 'M', u'q'), - (0x1D493, 'M', u'r'), - (0x1D494, 'M', u's'), - (0x1D495, 'M', u't'), - (0x1D496, 'M', u'u'), - (0x1D497, 'M', u'v'), - (0x1D498, 'M', u'w'), - (0x1D499, 'M', u'x'), - (0x1D49A, 'M', u'y'), - (0x1D49B, 'M', u'z'), - (0x1D49C, 'M', u'a'), - (0x1D49D, 'X'), - (0x1D49E, 'M', u'c'), - (0x1D49F, 'M', u'd'), - (0x1D4A0, 'X'), - (0x1D4A2, 'M', u'g'), - (0x1D4A3, 'X'), - (0x1D4A5, 'M', u'j'), - (0x1D4A6, 'M', u'k'), - (0x1D4A7, 'X'), - (0x1D4A9, 'M', u'n'), - (0x1D4AA, 'M', u'o'), - (0x1D4AB, 'M', u'p'), - (0x1D4AC, 'M', u'q'), - (0x1D4AD, 'X'), - (0x1D4AE, 'M', u's'), - (0x1D4AF, 'M', u't'), - (0x1D4B0, 'M', u'u'), - (0x1D4B1, 'M', u'v'), - (0x1D4B2, 'M', u'w'), - (0x1D4B3, 'M', u'x'), - (0x1D4B4, 'M', u'y'), - (0x1D4B5, 'M', u'z'), - (0x1D4B6, 'M', u'a'), - (0x1D4B7, 'M', u'b'), - (0x1D4B8, 'M', u'c'), - (0x1D4B9, 'M', u'd'), - (0x1D4BA, 'X'), - (0x1D4BB, 'M', u'f'), - (0x1D4BC, 'X'), - (0x1D4BD, 'M', u'h'), - (0x1D4BE, 'M', u'i'), - (0x1D4BF, 'M', u'j'), - (0x1D4C0, 'M', u'k'), - (0x1D4C1, 'M', u'l'), - (0x1D4C2, 'M', u'm'), - (0x1D4C3, 'M', u'n'), - ] - -def _seg_60(): - return [ - (0x1D4C4, 'X'), - (0x1D4C5, 'M', u'p'), - (0x1D4C6, 'M', u'q'), - (0x1D4C7, 'M', u'r'), - (0x1D4C8, 'M', u's'), - (0x1D4C9, 'M', u't'), - (0x1D4CA, 'M', u'u'), - (0x1D4CB, 'M', u'v'), - (0x1D4CC, 'M', u'w'), - (0x1D4CD, 'M', u'x'), - (0x1D4CE, 'M', u'y'), - (0x1D4CF, 'M', u'z'), - (0x1D4D0, 'M', u'a'), - (0x1D4D1, 'M', u'b'), - (0x1D4D2, 'M', u'c'), - (0x1D4D3, 'M', u'd'), - (0x1D4D4, 'M', u'e'), - (0x1D4D5, 'M', u'f'), - (0x1D4D6, 'M', u'g'), - (0x1D4D7, 'M', u'h'), - (0x1D4D8, 'M', u'i'), - (0x1D4D9, 'M', u'j'), - (0x1D4DA, 'M', u'k'), - (0x1D4DB, 'M', u'l'), - (0x1D4DC, 'M', u'm'), - (0x1D4DD, 'M', u'n'), - (0x1D4DE, 'M', u'o'), - (0x1D4DF, 'M', u'p'), - (0x1D4E0, 'M', u'q'), - (0x1D4E1, 'M', u'r'), - (0x1D4E2, 'M', u's'), - (0x1D4E3, 'M', u't'), - (0x1D4E4, 'M', u'u'), - (0x1D4E5, 'M', u'v'), - (0x1D4E6, 'M', u'w'), - (0x1D4E7, 'M', u'x'), - (0x1D4E8, 'M', u'y'), - (0x1D4E9, 'M', u'z'), - (0x1D4EA, 'M', u'a'), - (0x1D4EB, 'M', u'b'), - (0x1D4EC, 'M', u'c'), - (0x1D4ED, 'M', u'd'), - (0x1D4EE, 'M', u'e'), - (0x1D4EF, 'M', u'f'), - (0x1D4F0, 'M', u'g'), - (0x1D4F1, 'M', u'h'), - (0x1D4F2, 'M', u'i'), - (0x1D4F3, 'M', u'j'), - (0x1D4F4, 'M', u'k'), - (0x1D4F5, 'M', u'l'), - (0x1D4F6, 'M', u'm'), - (0x1D4F7, 'M', u'n'), - (0x1D4F8, 'M', u'o'), - (0x1D4F9, 'M', u'p'), - (0x1D4FA, 'M', u'q'), - (0x1D4FB, 'M', u'r'), - (0x1D4FC, 'M', u's'), - (0x1D4FD, 'M', u't'), - (0x1D4FE, 'M', u'u'), - (0x1D4FF, 'M', u'v'), - (0x1D500, 'M', u'w'), - (0x1D501, 'M', u'x'), - (0x1D502, 'M', u'y'), - (0x1D503, 'M', u'z'), - (0x1D504, 'M', u'a'), - (0x1D505, 'M', u'b'), - (0x1D506, 'X'), - (0x1D507, 'M', u'd'), - (0x1D508, 'M', u'e'), - (0x1D509, 'M', u'f'), - (0x1D50A, 'M', u'g'), - (0x1D50B, 'X'), - (0x1D50D, 'M', u'j'), - (0x1D50E, 'M', u'k'), - (0x1D50F, 'M', u'l'), - (0x1D510, 'M', u'm'), - (0x1D511, 'M', u'n'), - (0x1D512, 'M', u'o'), - (0x1D513, 'M', u'p'), - (0x1D514, 'M', u'q'), - (0x1D515, 'X'), - (0x1D516, 'M', u's'), - (0x1D517, 'M', u't'), - (0x1D518, 'M', u'u'), - (0x1D519, 'M', u'v'), - (0x1D51A, 'M', u'w'), - (0x1D51B, 'M', u'x'), - (0x1D51C, 'M', u'y'), - (0x1D51D, 'X'), - (0x1D51E, 'M', u'a'), - (0x1D51F, 'M', u'b'), - (0x1D520, 'M', u'c'), - (0x1D521, 'M', u'd'), - (0x1D522, 'M', u'e'), - (0x1D523, 'M', u'f'), - (0x1D524, 'M', u'g'), - (0x1D525, 'M', u'h'), - (0x1D526, 'M', u'i'), - (0x1D527, 'M', u'j'), - (0x1D528, 'M', u'k'), - ] - -def _seg_61(): - return [ - (0x1D529, 'M', u'l'), - (0x1D52A, 'M', u'm'), - (0x1D52B, 'M', u'n'), - (0x1D52C, 'M', u'o'), - (0x1D52D, 'M', u'p'), - (0x1D52E, 'M', u'q'), - (0x1D52F, 'M', u'r'), - (0x1D530, 'M', u's'), - (0x1D531, 'M', u't'), - (0x1D532, 'M', u'u'), - (0x1D533, 'M', u'v'), - (0x1D534, 'M', u'w'), - (0x1D535, 'M', u'x'), - (0x1D536, 'M', u'y'), - (0x1D537, 'M', u'z'), - (0x1D538, 'M', u'a'), - (0x1D539, 'M', u'b'), - (0x1D53A, 'X'), - (0x1D53B, 'M', u'd'), - (0x1D53C, 'M', u'e'), - (0x1D53D, 'M', u'f'), - (0x1D53E, 'M', u'g'), - (0x1D53F, 'X'), - (0x1D540, 'M', u'i'), - (0x1D541, 'M', u'j'), - (0x1D542, 'M', u'k'), - (0x1D543, 'M', u'l'), - (0x1D544, 'M', u'm'), - (0x1D545, 'X'), - (0x1D546, 'M', u'o'), - (0x1D547, 'X'), - (0x1D54A, 'M', u's'), - (0x1D54B, 'M', u't'), - (0x1D54C, 'M', u'u'), - (0x1D54D, 'M', u'v'), - (0x1D54E, 'M', u'w'), - (0x1D54F, 'M', u'x'), - (0x1D550, 'M', u'y'), - (0x1D551, 'X'), - (0x1D552, 'M', u'a'), - (0x1D553, 'M', u'b'), - (0x1D554, 'M', u'c'), - (0x1D555, 'M', u'd'), - (0x1D556, 'M', u'e'), - (0x1D557, 'M', u'f'), - (0x1D558, 'M', u'g'), - (0x1D559, 'M', u'h'), - (0x1D55A, 'M', u'i'), - (0x1D55B, 'M', u'j'), - (0x1D55C, 'M', u'k'), - (0x1D55D, 'M', u'l'), - (0x1D55E, 'M', u'm'), - (0x1D55F, 'M', u'n'), - (0x1D560, 'M', u'o'), - (0x1D561, 'M', u'p'), - (0x1D562, 'M', u'q'), - (0x1D563, 'M', u'r'), - (0x1D564, 'M', u's'), - (0x1D565, 'M', u't'), - (0x1D566, 'M', u'u'), - (0x1D567, 'M', u'v'), - (0x1D568, 'M', u'w'), - (0x1D569, 'M', u'x'), - (0x1D56A, 'M', u'y'), - (0x1D56B, 'M', u'z'), - (0x1D56C, 'M', u'a'), - (0x1D56D, 'M', u'b'), - (0x1D56E, 'M', u'c'), - (0x1D56F, 'M', u'd'), - (0x1D570, 'M', u'e'), - (0x1D571, 'M', u'f'), - (0x1D572, 'M', u'g'), - (0x1D573, 'M', u'h'), - (0x1D574, 'M', u'i'), - (0x1D575, 'M', u'j'), - (0x1D576, 'M', u'k'), - (0x1D577, 'M', u'l'), - (0x1D578, 'M', u'm'), - (0x1D579, 'M', u'n'), - (0x1D57A, 'M', u'o'), - (0x1D57B, 'M', u'p'), - (0x1D57C, 'M', u'q'), - (0x1D57D, 'M', u'r'), - (0x1D57E, 'M', u's'), - (0x1D57F, 'M', u't'), - (0x1D580, 'M', u'u'), - (0x1D581, 'M', u'v'), - (0x1D582, 'M', u'w'), - (0x1D583, 'M', u'x'), - (0x1D584, 'M', u'y'), - (0x1D585, 'M', u'z'), - (0x1D586, 'M', u'a'), - (0x1D587, 'M', u'b'), - (0x1D588, 'M', u'c'), - (0x1D589, 'M', u'd'), - (0x1D58A, 'M', u'e'), - (0x1D58B, 'M', u'f'), - (0x1D58C, 'M', u'g'), - (0x1D58D, 'M', u'h'), - (0x1D58E, 'M', u'i'), - ] - -def _seg_62(): - return [ - (0x1D58F, 'M', u'j'), - (0x1D590, 'M', u'k'), - (0x1D591, 'M', u'l'), - (0x1D592, 'M', u'm'), - (0x1D593, 'M', u'n'), - (0x1D594, 'M', u'o'), - (0x1D595, 'M', u'p'), - (0x1D596, 'M', u'q'), - (0x1D597, 'M', u'r'), - (0x1D598, 'M', u's'), - (0x1D599, 'M', u't'), - (0x1D59A, 'M', u'u'), - (0x1D59B, 'M', u'v'), - (0x1D59C, 'M', u'w'), - (0x1D59D, 'M', u'x'), - (0x1D59E, 'M', u'y'), - (0x1D59F, 'M', u'z'), - (0x1D5A0, 'M', u'a'), - (0x1D5A1, 'M', u'b'), - (0x1D5A2, 'M', u'c'), - (0x1D5A3, 'M', u'd'), - (0x1D5A4, 'M', u'e'), - (0x1D5A5, 'M', u'f'), - (0x1D5A6, 'M', u'g'), - (0x1D5A7, 'M', u'h'), - (0x1D5A8, 'M', u'i'), - (0x1D5A9, 'M', u'j'), - (0x1D5AA, 'M', u'k'), - (0x1D5AB, 'M', u'l'), - (0x1D5AC, 'M', u'm'), - (0x1D5AD, 'M', u'n'), - (0x1D5AE, 'M', u'o'), - (0x1D5AF, 'M', u'p'), - (0x1D5B0, 'M', u'q'), - (0x1D5B1, 'M', u'r'), - (0x1D5B2, 'M', u's'), - (0x1D5B3, 'M', u't'), - (0x1D5B4, 'M', u'u'), - (0x1D5B5, 'M', u'v'), - (0x1D5B6, 'M', u'w'), - (0x1D5B7, 'M', u'x'), - (0x1D5B8, 'M', u'y'), - (0x1D5B9, 'M', u'z'), - (0x1D5BA, 'M', u'a'), - (0x1D5BB, 'M', u'b'), - (0x1D5BC, 'M', u'c'), - (0x1D5BD, 'M', u'd'), - (0x1D5BE, 'M', u'e'), - (0x1D5BF, 'M', u'f'), - (0x1D5C0, 'M', u'g'), - (0x1D5C1, 'M', u'h'), - (0x1D5C2, 'M', u'i'), - (0x1D5C3, 'M', u'j'), - (0x1D5C4, 'M', u'k'), - (0x1D5C5, 'M', u'l'), - (0x1D5C6, 'M', u'm'), - (0x1D5C7, 'M', u'n'), - (0x1D5C8, 'M', u'o'), - (0x1D5C9, 'M', u'p'), - (0x1D5CA, 'M', u'q'), - (0x1D5CB, 'M', u'r'), - (0x1D5CC, 'M', u's'), - (0x1D5CD, 'M', u't'), - (0x1D5CE, 'M', u'u'), - (0x1D5CF, 'M', u'v'), - (0x1D5D0, 'M', u'w'), - (0x1D5D1, 'M', u'x'), - (0x1D5D2, 'M', u'y'), - (0x1D5D3, 'M', u'z'), - (0x1D5D4, 'M', u'a'), - (0x1D5D5, 'M', u'b'), - (0x1D5D6, 'M', u'c'), - (0x1D5D7, 'M', u'd'), - (0x1D5D8, 'M', u'e'), - (0x1D5D9, 'M', u'f'), - (0x1D5DA, 'M', u'g'), - (0x1D5DB, 'M', u'h'), - (0x1D5DC, 'M', u'i'), - (0x1D5DD, 'M', u'j'), - (0x1D5DE, 'M', u'k'), - (0x1D5DF, 'M', u'l'), - (0x1D5E0, 'M', u'm'), - (0x1D5E1, 'M', u'n'), - (0x1D5E2, 'M', u'o'), - (0x1D5E3, 'M', u'p'), - (0x1D5E4, 'M', u'q'), - (0x1D5E5, 'M', u'r'), - (0x1D5E6, 'M', u's'), - (0x1D5E7, 'M', u't'), - (0x1D5E8, 'M', u'u'), - (0x1D5E9, 'M', u'v'), - (0x1D5EA, 'M', u'w'), - (0x1D5EB, 'M', u'x'), - (0x1D5EC, 'M', u'y'), - (0x1D5ED, 'M', u'z'), - (0x1D5EE, 'M', u'a'), - (0x1D5EF, 'M', u'b'), - (0x1D5F0, 'M', u'c'), - (0x1D5F1, 'M', u'd'), - (0x1D5F2, 'M', u'e'), - ] - -def _seg_63(): - return [ - (0x1D5F3, 'M', u'f'), - (0x1D5F4, 'M', u'g'), - (0x1D5F5, 'M', u'h'), - (0x1D5F6, 'M', u'i'), - (0x1D5F7, 'M', u'j'), - (0x1D5F8, 'M', u'k'), - (0x1D5F9, 'M', u'l'), - (0x1D5FA, 'M', u'm'), - (0x1D5FB, 'M', u'n'), - (0x1D5FC, 'M', u'o'), - (0x1D5FD, 'M', u'p'), - (0x1D5FE, 'M', u'q'), - (0x1D5FF, 'M', u'r'), - (0x1D600, 'M', u's'), - (0x1D601, 'M', u't'), - (0x1D602, 'M', u'u'), - (0x1D603, 'M', u'v'), - (0x1D604, 'M', u'w'), - (0x1D605, 'M', u'x'), - (0x1D606, 'M', u'y'), - (0x1D607, 'M', u'z'), - (0x1D608, 'M', u'a'), - (0x1D609, 'M', u'b'), - (0x1D60A, 'M', u'c'), - (0x1D60B, 'M', u'd'), - (0x1D60C, 'M', u'e'), - (0x1D60D, 'M', u'f'), - (0x1D60E, 'M', u'g'), - (0x1D60F, 'M', u'h'), - (0x1D610, 'M', u'i'), - (0x1D611, 'M', u'j'), - (0x1D612, 'M', u'k'), - (0x1D613, 'M', u'l'), - (0x1D614, 'M', u'm'), - (0x1D615, 'M', u'n'), - (0x1D616, 'M', u'o'), - (0x1D617, 'M', u'p'), - (0x1D618, 'M', u'q'), - (0x1D619, 'M', u'r'), - (0x1D61A, 'M', u's'), - (0x1D61B, 'M', u't'), - (0x1D61C, 'M', u'u'), - (0x1D61D, 'M', u'v'), - (0x1D61E, 'M', u'w'), - (0x1D61F, 'M', u'x'), - (0x1D620, 'M', u'y'), - (0x1D621, 'M', u'z'), - (0x1D622, 'M', u'a'), - (0x1D623, 'M', u'b'), - (0x1D624, 'M', u'c'), - (0x1D625, 'M', u'd'), - (0x1D626, 'M', u'e'), - (0x1D627, 'M', u'f'), - (0x1D628, 'M', u'g'), - (0x1D629, 'M', u'h'), - (0x1D62A, 'M', u'i'), - (0x1D62B, 'M', u'j'), - (0x1D62C, 'M', u'k'), - (0x1D62D, 'M', u'l'), - (0x1D62E, 'M', u'm'), - (0x1D62F, 'M', u'n'), - (0x1D630, 'M', u'o'), - (0x1D631, 'M', u'p'), - (0x1D632, 'M', u'q'), - (0x1D633, 'M', u'r'), - (0x1D634, 'M', u's'), - (0x1D635, 'M', u't'), - (0x1D636, 'M', u'u'), - (0x1D637, 'M', u'v'), - (0x1D638, 'M', u'w'), - (0x1D639, 'M', u'x'), - (0x1D63A, 'M', u'y'), - (0x1D63B, 'M', u'z'), - (0x1D63C, 'M', u'a'), - (0x1D63D, 'M', u'b'), - (0x1D63E, 'M', u'c'), - (0x1D63F, 'M', u'd'), - (0x1D640, 'M', u'e'), - (0x1D641, 'M', u'f'), - (0x1D642, 'M', u'g'), - (0x1D643, 'M', u'h'), - (0x1D644, 'M', u'i'), - (0x1D645, 'M', u'j'), - (0x1D646, 'M', u'k'), - (0x1D647, 'M', u'l'), - (0x1D648, 'M', u'm'), - (0x1D649, 'M', u'n'), - (0x1D64A, 'M', u'o'), - (0x1D64B, 'M', u'p'), - (0x1D64C, 'M', u'q'), - (0x1D64D, 'M', u'r'), - (0x1D64E, 'M', u's'), - (0x1D64F, 'M', u't'), - (0x1D650, 'M', u'u'), - (0x1D651, 'M', u'v'), - (0x1D652, 'M', u'w'), - (0x1D653, 'M', u'x'), - (0x1D654, 'M', u'y'), - (0x1D655, 'M', u'z'), - (0x1D656, 'M', u'a'), - ] - -def _seg_64(): - return [ - (0x1D657, 'M', u'b'), - (0x1D658, 'M', u'c'), - (0x1D659, 'M', u'd'), - (0x1D65A, 'M', u'e'), - (0x1D65B, 'M', u'f'), - (0x1D65C, 'M', u'g'), - (0x1D65D, 'M', u'h'), - (0x1D65E, 'M', u'i'), - (0x1D65F, 'M', u'j'), - (0x1D660, 'M', u'k'), - (0x1D661, 'M', u'l'), - (0x1D662, 'M', u'm'), - (0x1D663, 'M', u'n'), - (0x1D664, 'M', u'o'), - (0x1D665, 'M', u'p'), - (0x1D666, 'M', u'q'), - (0x1D667, 'M', u'r'), - (0x1D668, 'M', u's'), - (0x1D669, 'M', u't'), - (0x1D66A, 'M', u'u'), - (0x1D66B, 'M', u'v'), - (0x1D66C, 'M', u'w'), - (0x1D66D, 'M', u'x'), - (0x1D66E, 'M', u'y'), - (0x1D66F, 'M', u'z'), - (0x1D670, 'M', u'a'), - (0x1D671, 'M', u'b'), - (0x1D672, 'M', u'c'), - (0x1D673, 'M', u'd'), - (0x1D674, 'M', u'e'), - (0x1D675, 'M', u'f'), - (0x1D676, 'M', u'g'), - (0x1D677, 'M', u'h'), - (0x1D678, 'M', u'i'), - (0x1D679, 'M', u'j'), - (0x1D67A, 'M', u'k'), - (0x1D67B, 'M', u'l'), - (0x1D67C, 'M', u'm'), - (0x1D67D, 'M', u'n'), - (0x1D67E, 'M', u'o'), - (0x1D67F, 'M', u'p'), - (0x1D680, 'M', u'q'), - (0x1D681, 'M', u'r'), - (0x1D682, 'M', u's'), - (0x1D683, 'M', u't'), - (0x1D684, 'M', u'u'), - (0x1D685, 'M', u'v'), - (0x1D686, 'M', u'w'), - (0x1D687, 'M', u'x'), - (0x1D688, 'M', u'y'), - (0x1D689, 'M', u'z'), - (0x1D68A, 'M', u'a'), - (0x1D68B, 'M', u'b'), - (0x1D68C, 'M', u'c'), - (0x1D68D, 'M', u'd'), - (0x1D68E, 'M', u'e'), - (0x1D68F, 'M', u'f'), - (0x1D690, 'M', u'g'), - (0x1D691, 'M', u'h'), - (0x1D692, 'M', u'i'), - (0x1D693, 'M', u'j'), - (0x1D694, 'M', u'k'), - (0x1D695, 'M', u'l'), - (0x1D696, 'M', u'm'), - (0x1D697, 'M', u'n'), - (0x1D698, 'M', u'o'), - (0x1D699, 'M', u'p'), - (0x1D69A, 'M', u'q'), - (0x1D69B, 'M', u'r'), - (0x1D69C, 'M', u's'), - (0x1D69D, 'M', u't'), - (0x1D69E, 'M', u'u'), - (0x1D69F, 'M', u'v'), - (0x1D6A0, 'M', u'w'), - (0x1D6A1, 'M', u'x'), - (0x1D6A2, 'M', u'y'), - (0x1D6A3, 'M', u'z'), - (0x1D6A4, 'M', u'ı'), - (0x1D6A5, 'M', u'È·'), - (0x1D6A6, 'X'), - (0x1D6A8, 'M', u'α'), - (0x1D6A9, 'M', u'β'), - (0x1D6AA, 'M', u'γ'), - (0x1D6AB, 'M', u'δ'), - (0x1D6AC, 'M', u'ε'), - (0x1D6AD, 'M', u'ζ'), - (0x1D6AE, 'M', u'η'), - (0x1D6AF, 'M', u'θ'), - (0x1D6B0, 'M', u'ι'), - (0x1D6B1, 'M', u'κ'), - (0x1D6B2, 'M', u'λ'), - (0x1D6B3, 'M', u'μ'), - (0x1D6B4, 'M', u'ν'), - (0x1D6B5, 'M', u'ξ'), - (0x1D6B6, 'M', u'ο'), - (0x1D6B7, 'M', u'Ï€'), - (0x1D6B8, 'M', u'Ï'), - (0x1D6B9, 'M', u'θ'), - (0x1D6BA, 'M', u'σ'), - (0x1D6BB, 'M', u'Ï„'), - ] - -def _seg_65(): - return [ - (0x1D6BC, 'M', u'Ï…'), - (0x1D6BD, 'M', u'φ'), - (0x1D6BE, 'M', u'χ'), - (0x1D6BF, 'M', u'ψ'), - (0x1D6C0, 'M', u'ω'), - (0x1D6C1, 'M', u'∇'), - (0x1D6C2, 'M', u'α'), - (0x1D6C3, 'M', u'β'), - (0x1D6C4, 'M', u'γ'), - (0x1D6C5, 'M', u'δ'), - (0x1D6C6, 'M', u'ε'), - (0x1D6C7, 'M', u'ζ'), - (0x1D6C8, 'M', u'η'), - (0x1D6C9, 'M', u'θ'), - (0x1D6CA, 'M', u'ι'), - (0x1D6CB, 'M', u'κ'), - (0x1D6CC, 'M', u'λ'), - (0x1D6CD, 'M', u'μ'), - (0x1D6CE, 'M', u'ν'), - (0x1D6CF, 'M', u'ξ'), - (0x1D6D0, 'M', u'ο'), - (0x1D6D1, 'M', u'Ï€'), - (0x1D6D2, 'M', u'Ï'), - (0x1D6D3, 'M', u'σ'), - (0x1D6D5, 'M', u'Ï„'), - (0x1D6D6, 'M', u'Ï…'), - (0x1D6D7, 'M', u'φ'), - (0x1D6D8, 'M', u'χ'), - (0x1D6D9, 'M', u'ψ'), - (0x1D6DA, 'M', u'ω'), - (0x1D6DB, 'M', u'∂'), - (0x1D6DC, 'M', u'ε'), - (0x1D6DD, 'M', u'θ'), - (0x1D6DE, 'M', u'κ'), - (0x1D6DF, 'M', u'φ'), - (0x1D6E0, 'M', u'Ï'), - (0x1D6E1, 'M', u'Ï€'), - (0x1D6E2, 'M', u'α'), - (0x1D6E3, 'M', u'β'), - (0x1D6E4, 'M', u'γ'), - (0x1D6E5, 'M', u'δ'), - (0x1D6E6, 'M', u'ε'), - (0x1D6E7, 'M', u'ζ'), - (0x1D6E8, 'M', u'η'), - (0x1D6E9, 'M', u'θ'), - (0x1D6EA, 'M', u'ι'), - (0x1D6EB, 'M', u'κ'), - (0x1D6EC, 'M', u'λ'), - (0x1D6ED, 'M', u'μ'), - (0x1D6EE, 'M', u'ν'), - (0x1D6EF, 'M', u'ξ'), - (0x1D6F0, 'M', u'ο'), - (0x1D6F1, 'M', u'Ï€'), - (0x1D6F2, 'M', u'Ï'), - (0x1D6F3, 'M', u'θ'), - (0x1D6F4, 'M', u'σ'), - (0x1D6F5, 'M', u'Ï„'), - (0x1D6F6, 'M', u'Ï…'), - (0x1D6F7, 'M', u'φ'), - (0x1D6F8, 'M', u'χ'), - (0x1D6F9, 'M', u'ψ'), - (0x1D6FA, 'M', u'ω'), - (0x1D6FB, 'M', u'∇'), - (0x1D6FC, 'M', u'α'), - (0x1D6FD, 'M', u'β'), - (0x1D6FE, 'M', u'γ'), - (0x1D6FF, 'M', u'δ'), - (0x1D700, 'M', u'ε'), - (0x1D701, 'M', u'ζ'), - (0x1D702, 'M', u'η'), - (0x1D703, 'M', u'θ'), - (0x1D704, 'M', u'ι'), - (0x1D705, 'M', u'κ'), - (0x1D706, 'M', u'λ'), - (0x1D707, 'M', u'μ'), - (0x1D708, 'M', u'ν'), - (0x1D709, 'M', u'ξ'), - (0x1D70A, 'M', u'ο'), - (0x1D70B, 'M', u'Ï€'), - (0x1D70C, 'M', u'Ï'), - (0x1D70D, 'M', u'σ'), - (0x1D70F, 'M', u'Ï„'), - (0x1D710, 'M', u'Ï…'), - (0x1D711, 'M', u'φ'), - (0x1D712, 'M', u'χ'), - (0x1D713, 'M', u'ψ'), - (0x1D714, 'M', u'ω'), - (0x1D715, 'M', u'∂'), - (0x1D716, 'M', u'ε'), - (0x1D717, 'M', u'θ'), - (0x1D718, 'M', u'κ'), - (0x1D719, 'M', u'φ'), - (0x1D71A, 'M', u'Ï'), - (0x1D71B, 'M', u'Ï€'), - (0x1D71C, 'M', u'α'), - (0x1D71D, 'M', u'β'), - (0x1D71E, 'M', u'γ'), - (0x1D71F, 'M', u'δ'), - (0x1D720, 'M', u'ε'), - (0x1D721, 'M', u'ζ'), - ] - -def _seg_66(): - return [ - (0x1D722, 'M', u'η'), - (0x1D723, 'M', u'θ'), - (0x1D724, 'M', u'ι'), - (0x1D725, 'M', u'κ'), - (0x1D726, 'M', u'λ'), - (0x1D727, 'M', u'μ'), - (0x1D728, 'M', u'ν'), - (0x1D729, 'M', u'ξ'), - (0x1D72A, 'M', u'ο'), - (0x1D72B, 'M', u'Ï€'), - (0x1D72C, 'M', u'Ï'), - (0x1D72D, 'M', u'θ'), - (0x1D72E, 'M', u'σ'), - (0x1D72F, 'M', u'Ï„'), - (0x1D730, 'M', u'Ï…'), - (0x1D731, 'M', u'φ'), - (0x1D732, 'M', u'χ'), - (0x1D733, 'M', u'ψ'), - (0x1D734, 'M', u'ω'), - (0x1D735, 'M', u'∇'), - (0x1D736, 'M', u'α'), - (0x1D737, 'M', u'β'), - (0x1D738, 'M', u'γ'), - (0x1D739, 'M', u'δ'), - (0x1D73A, 'M', u'ε'), - (0x1D73B, 'M', u'ζ'), - (0x1D73C, 'M', u'η'), - (0x1D73D, 'M', u'θ'), - (0x1D73E, 'M', u'ι'), - (0x1D73F, 'M', u'κ'), - (0x1D740, 'M', u'λ'), - (0x1D741, 'M', u'μ'), - (0x1D742, 'M', u'ν'), - (0x1D743, 'M', u'ξ'), - (0x1D744, 'M', u'ο'), - (0x1D745, 'M', u'Ï€'), - (0x1D746, 'M', u'Ï'), - (0x1D747, 'M', u'σ'), - (0x1D749, 'M', u'Ï„'), - (0x1D74A, 'M', u'Ï…'), - (0x1D74B, 'M', u'φ'), - (0x1D74C, 'M', u'χ'), - (0x1D74D, 'M', u'ψ'), - (0x1D74E, 'M', u'ω'), - (0x1D74F, 'M', u'∂'), - (0x1D750, 'M', u'ε'), - (0x1D751, 'M', u'θ'), - (0x1D752, 'M', u'κ'), - (0x1D753, 'M', u'φ'), - (0x1D754, 'M', u'Ï'), - (0x1D755, 'M', u'Ï€'), - (0x1D756, 'M', u'α'), - (0x1D757, 'M', u'β'), - (0x1D758, 'M', u'γ'), - (0x1D759, 'M', u'δ'), - (0x1D75A, 'M', u'ε'), - (0x1D75B, 'M', u'ζ'), - (0x1D75C, 'M', u'η'), - (0x1D75D, 'M', u'θ'), - (0x1D75E, 'M', u'ι'), - (0x1D75F, 'M', u'κ'), - (0x1D760, 'M', u'λ'), - (0x1D761, 'M', u'μ'), - (0x1D762, 'M', u'ν'), - (0x1D763, 'M', u'ξ'), - (0x1D764, 'M', u'ο'), - (0x1D765, 'M', u'Ï€'), - (0x1D766, 'M', u'Ï'), - (0x1D767, 'M', u'θ'), - (0x1D768, 'M', u'σ'), - (0x1D769, 'M', u'Ï„'), - (0x1D76A, 'M', u'Ï…'), - (0x1D76B, 'M', u'φ'), - (0x1D76C, 'M', u'χ'), - (0x1D76D, 'M', u'ψ'), - (0x1D76E, 'M', u'ω'), - (0x1D76F, 'M', u'∇'), - (0x1D770, 'M', u'α'), - (0x1D771, 'M', u'β'), - (0x1D772, 'M', u'γ'), - (0x1D773, 'M', u'δ'), - (0x1D774, 'M', u'ε'), - (0x1D775, 'M', u'ζ'), - (0x1D776, 'M', u'η'), - (0x1D777, 'M', u'θ'), - (0x1D778, 'M', u'ι'), - (0x1D779, 'M', u'κ'), - (0x1D77A, 'M', u'λ'), - (0x1D77B, 'M', u'μ'), - (0x1D77C, 'M', u'ν'), - (0x1D77D, 'M', u'ξ'), - (0x1D77E, 'M', u'ο'), - (0x1D77F, 'M', u'Ï€'), - (0x1D780, 'M', u'Ï'), - (0x1D781, 'M', u'σ'), - (0x1D783, 'M', u'Ï„'), - (0x1D784, 'M', u'Ï…'), - (0x1D785, 'M', u'φ'), - (0x1D786, 'M', u'χ'), - (0x1D787, 'M', u'ψ'), - ] - -def _seg_67(): - return [ - (0x1D788, 'M', u'ω'), - (0x1D789, 'M', u'∂'), - (0x1D78A, 'M', u'ε'), - (0x1D78B, 'M', u'θ'), - (0x1D78C, 'M', u'κ'), - (0x1D78D, 'M', u'φ'), - (0x1D78E, 'M', u'Ï'), - (0x1D78F, 'M', u'Ï€'), - (0x1D790, 'M', u'α'), - (0x1D791, 'M', u'β'), - (0x1D792, 'M', u'γ'), - (0x1D793, 'M', u'δ'), - (0x1D794, 'M', u'ε'), - (0x1D795, 'M', u'ζ'), - (0x1D796, 'M', u'η'), - (0x1D797, 'M', u'θ'), - (0x1D798, 'M', u'ι'), - (0x1D799, 'M', u'κ'), - (0x1D79A, 'M', u'λ'), - (0x1D79B, 'M', u'μ'), - (0x1D79C, 'M', u'ν'), - (0x1D79D, 'M', u'ξ'), - (0x1D79E, 'M', u'ο'), - (0x1D79F, 'M', u'Ï€'), - (0x1D7A0, 'M', u'Ï'), - (0x1D7A1, 'M', u'θ'), - (0x1D7A2, 'M', u'σ'), - (0x1D7A3, 'M', u'Ï„'), - (0x1D7A4, 'M', u'Ï…'), - (0x1D7A5, 'M', u'φ'), - (0x1D7A6, 'M', u'χ'), - (0x1D7A7, 'M', u'ψ'), - (0x1D7A8, 'M', u'ω'), - (0x1D7A9, 'M', u'∇'), - (0x1D7AA, 'M', u'α'), - (0x1D7AB, 'M', u'β'), - (0x1D7AC, 'M', u'γ'), - (0x1D7AD, 'M', u'δ'), - (0x1D7AE, 'M', u'ε'), - (0x1D7AF, 'M', u'ζ'), - (0x1D7B0, 'M', u'η'), - (0x1D7B1, 'M', u'θ'), - (0x1D7B2, 'M', u'ι'), - (0x1D7B3, 'M', u'κ'), - (0x1D7B4, 'M', u'λ'), - (0x1D7B5, 'M', u'μ'), - (0x1D7B6, 'M', u'ν'), - (0x1D7B7, 'M', u'ξ'), - (0x1D7B8, 'M', u'ο'), - (0x1D7B9, 'M', u'Ï€'), - (0x1D7BA, 'M', u'Ï'), - (0x1D7BB, 'M', u'σ'), - (0x1D7BD, 'M', u'Ï„'), - (0x1D7BE, 'M', u'Ï…'), - (0x1D7BF, 'M', u'φ'), - (0x1D7C0, 'M', u'χ'), - (0x1D7C1, 'M', u'ψ'), - (0x1D7C2, 'M', u'ω'), - (0x1D7C3, 'M', u'∂'), - (0x1D7C4, 'M', u'ε'), - (0x1D7C5, 'M', u'θ'), - (0x1D7C6, 'M', u'κ'), - (0x1D7C7, 'M', u'φ'), - (0x1D7C8, 'M', u'Ï'), - (0x1D7C9, 'M', u'Ï€'), - (0x1D7CA, 'M', u'Ï'), - (0x1D7CC, 'X'), - (0x1D7CE, 'M', u'0'), - (0x1D7CF, 'M', u'1'), - (0x1D7D0, 'M', u'2'), - (0x1D7D1, 'M', u'3'), - (0x1D7D2, 'M', u'4'), - (0x1D7D3, 'M', u'5'), - (0x1D7D4, 'M', u'6'), - (0x1D7D5, 'M', u'7'), - (0x1D7D6, 'M', u'8'), - (0x1D7D7, 'M', u'9'), - (0x1D7D8, 'M', u'0'), - (0x1D7D9, 'M', u'1'), - (0x1D7DA, 'M', u'2'), - (0x1D7DB, 'M', u'3'), - (0x1D7DC, 'M', u'4'), - (0x1D7DD, 'M', u'5'), - (0x1D7DE, 'M', u'6'), - (0x1D7DF, 'M', u'7'), - (0x1D7E0, 'M', u'8'), - (0x1D7E1, 'M', u'9'), - (0x1D7E2, 'M', u'0'), - (0x1D7E3, 'M', u'1'), - (0x1D7E4, 'M', u'2'), - (0x1D7E5, 'M', u'3'), - (0x1D7E6, 'M', u'4'), - (0x1D7E7, 'M', u'5'), - (0x1D7E8, 'M', u'6'), - (0x1D7E9, 'M', u'7'), - (0x1D7EA, 'M', u'8'), - (0x1D7EB, 'M', u'9'), - (0x1D7EC, 'M', u'0'), - (0x1D7ED, 'M', u'1'), - (0x1D7EE, 'M', u'2'), - ] - -def _seg_68(): - return [ - (0x1D7EF, 'M', u'3'), - (0x1D7F0, 'M', u'4'), - (0x1D7F1, 'M', u'5'), - (0x1D7F2, 'M', u'6'), - (0x1D7F3, 'M', u'7'), - (0x1D7F4, 'M', u'8'), - (0x1D7F5, 'M', u'9'), - (0x1D7F6, 'M', u'0'), - (0x1D7F7, 'M', u'1'), - (0x1D7F8, 'M', u'2'), - (0x1D7F9, 'M', u'3'), - (0x1D7FA, 'M', u'4'), - (0x1D7FB, 'M', u'5'), - (0x1D7FC, 'M', u'6'), - (0x1D7FD, 'M', u'7'), - (0x1D7FE, 'M', u'8'), - (0x1D7FF, 'M', u'9'), - (0x1D800, 'V'), - (0x1DA8C, 'X'), - (0x1DA9B, 'V'), - (0x1DAA0, 'X'), - (0x1DAA1, 'V'), - (0x1DAB0, 'X'), - (0x1E000, 'V'), - (0x1E007, 'X'), - (0x1E008, 'V'), - (0x1E019, 'X'), - (0x1E01B, 'V'), - (0x1E022, 'X'), - (0x1E023, 'V'), - (0x1E025, 'X'), - (0x1E026, 'V'), - (0x1E02B, 'X'), - (0x1E800, 'V'), - (0x1E8C5, 'X'), - (0x1E8C7, 'V'), - (0x1E8D7, 'X'), - (0x1E900, 'M', u'𞤢'), - (0x1E901, 'M', u'𞤣'), - (0x1E902, 'M', u'𞤤'), - (0x1E903, 'M', u'𞤥'), - (0x1E904, 'M', u'𞤦'), - (0x1E905, 'M', u'𞤧'), - (0x1E906, 'M', u'𞤨'), - (0x1E907, 'M', u'𞤩'), - (0x1E908, 'M', u'𞤪'), - (0x1E909, 'M', u'𞤫'), - (0x1E90A, 'M', u'𞤬'), - (0x1E90B, 'M', u'𞤭'), - (0x1E90C, 'M', u'𞤮'), - (0x1E90D, 'M', u'𞤯'), - (0x1E90E, 'M', u'𞤰'), - (0x1E90F, 'M', u'𞤱'), - (0x1E910, 'M', u'𞤲'), - (0x1E911, 'M', u'𞤳'), - (0x1E912, 'M', u'𞤴'), - (0x1E913, 'M', u'𞤵'), - (0x1E914, 'M', u'𞤶'), - (0x1E915, 'M', u'𞤷'), - (0x1E916, 'M', u'𞤸'), - (0x1E917, 'M', u'𞤹'), - (0x1E918, 'M', u'𞤺'), - (0x1E919, 'M', u'𞤻'), - (0x1E91A, 'M', u'𞤼'), - (0x1E91B, 'M', u'𞤽'), - (0x1E91C, 'M', u'𞤾'), - (0x1E91D, 'M', u'𞤿'), - (0x1E91E, 'M', u'𞥀'), - (0x1E91F, 'M', u'ðž¥'), - (0x1E920, 'M', u'𞥂'), - (0x1E921, 'M', u'𞥃'), - (0x1E922, 'V'), - (0x1E94B, 'X'), - (0x1E950, 'V'), - (0x1E95A, 'X'), - (0x1E95E, 'V'), - (0x1E960, 'X'), - (0x1EC71, 'V'), - (0x1ECB5, 'X'), - (0x1EE00, 'M', u'ا'), - (0x1EE01, 'M', u'ب'), - (0x1EE02, 'M', u'ج'), - (0x1EE03, 'M', u'د'), - (0x1EE04, 'X'), - (0x1EE05, 'M', u'Ùˆ'), - (0x1EE06, 'M', u'ز'), - (0x1EE07, 'M', u'Ø­'), - (0x1EE08, 'M', u'Ø·'), - (0x1EE09, 'M', u'ÙŠ'), - (0x1EE0A, 'M', u'Ùƒ'), - (0x1EE0B, 'M', u'Ù„'), - (0x1EE0C, 'M', u'Ù…'), - (0x1EE0D, 'M', u'Ù†'), - (0x1EE0E, 'M', u'س'), - (0x1EE0F, 'M', u'ع'), - (0x1EE10, 'M', u'Ù'), - (0x1EE11, 'M', u'ص'), - (0x1EE12, 'M', u'Ù‚'), - (0x1EE13, 'M', u'ر'), - (0x1EE14, 'M', u'Ø´'), - ] - -def _seg_69(): - return [ - (0x1EE15, 'M', u'ت'), - (0x1EE16, 'M', u'Ø«'), - (0x1EE17, 'M', u'Ø®'), - (0x1EE18, 'M', u'Ø°'), - (0x1EE19, 'M', u'ض'), - (0x1EE1A, 'M', u'ظ'), - (0x1EE1B, 'M', u'غ'), - (0x1EE1C, 'M', u'Ù®'), - (0x1EE1D, 'M', u'Úº'), - (0x1EE1E, 'M', u'Ú¡'), - (0x1EE1F, 'M', u'Ù¯'), - (0x1EE20, 'X'), - (0x1EE21, 'M', u'ب'), - (0x1EE22, 'M', u'ج'), - (0x1EE23, 'X'), - (0x1EE24, 'M', u'Ù‡'), - (0x1EE25, 'X'), - (0x1EE27, 'M', u'Ø­'), - (0x1EE28, 'X'), - (0x1EE29, 'M', u'ÙŠ'), - (0x1EE2A, 'M', u'Ùƒ'), - (0x1EE2B, 'M', u'Ù„'), - (0x1EE2C, 'M', u'Ù…'), - (0x1EE2D, 'M', u'Ù†'), - (0x1EE2E, 'M', u'س'), - (0x1EE2F, 'M', u'ع'), - (0x1EE30, 'M', u'Ù'), - (0x1EE31, 'M', u'ص'), - (0x1EE32, 'M', u'Ù‚'), - (0x1EE33, 'X'), - (0x1EE34, 'M', u'Ø´'), - (0x1EE35, 'M', u'ت'), - (0x1EE36, 'M', u'Ø«'), - (0x1EE37, 'M', u'Ø®'), - (0x1EE38, 'X'), - (0x1EE39, 'M', u'ض'), - (0x1EE3A, 'X'), - (0x1EE3B, 'M', u'غ'), - (0x1EE3C, 'X'), - (0x1EE42, 'M', u'ج'), - (0x1EE43, 'X'), - (0x1EE47, 'M', u'Ø­'), - (0x1EE48, 'X'), - (0x1EE49, 'M', u'ÙŠ'), - (0x1EE4A, 'X'), - (0x1EE4B, 'M', u'Ù„'), - (0x1EE4C, 'X'), - (0x1EE4D, 'M', u'Ù†'), - (0x1EE4E, 'M', u'س'), - (0x1EE4F, 'M', u'ع'), - (0x1EE50, 'X'), - (0x1EE51, 'M', u'ص'), - (0x1EE52, 'M', u'Ù‚'), - (0x1EE53, 'X'), - (0x1EE54, 'M', u'Ø´'), - (0x1EE55, 'X'), - (0x1EE57, 'M', u'Ø®'), - (0x1EE58, 'X'), - (0x1EE59, 'M', u'ض'), - (0x1EE5A, 'X'), - (0x1EE5B, 'M', u'غ'), - (0x1EE5C, 'X'), - (0x1EE5D, 'M', u'Úº'), - (0x1EE5E, 'X'), - (0x1EE5F, 'M', u'Ù¯'), - (0x1EE60, 'X'), - (0x1EE61, 'M', u'ب'), - (0x1EE62, 'M', u'ج'), - (0x1EE63, 'X'), - (0x1EE64, 'M', u'Ù‡'), - (0x1EE65, 'X'), - (0x1EE67, 'M', u'Ø­'), - (0x1EE68, 'M', u'Ø·'), - (0x1EE69, 'M', u'ÙŠ'), - (0x1EE6A, 'M', u'Ùƒ'), - (0x1EE6B, 'X'), - (0x1EE6C, 'M', u'Ù…'), - (0x1EE6D, 'M', u'Ù†'), - (0x1EE6E, 'M', u'س'), - (0x1EE6F, 'M', u'ع'), - (0x1EE70, 'M', u'Ù'), - (0x1EE71, 'M', u'ص'), - (0x1EE72, 'M', u'Ù‚'), - (0x1EE73, 'X'), - (0x1EE74, 'M', u'Ø´'), - (0x1EE75, 'M', u'ت'), - (0x1EE76, 'M', u'Ø«'), - (0x1EE77, 'M', u'Ø®'), - (0x1EE78, 'X'), - (0x1EE79, 'M', u'ض'), - (0x1EE7A, 'M', u'ظ'), - (0x1EE7B, 'M', u'غ'), - (0x1EE7C, 'M', u'Ù®'), - (0x1EE7D, 'X'), - (0x1EE7E, 'M', u'Ú¡'), - (0x1EE7F, 'X'), - (0x1EE80, 'M', u'ا'), - (0x1EE81, 'M', u'ب'), - (0x1EE82, 'M', u'ج'), - (0x1EE83, 'M', u'د'), - ] - -def _seg_70(): - return [ - (0x1EE84, 'M', u'Ù‡'), - (0x1EE85, 'M', u'Ùˆ'), - (0x1EE86, 'M', u'ز'), - (0x1EE87, 'M', u'Ø­'), - (0x1EE88, 'M', u'Ø·'), - (0x1EE89, 'M', u'ÙŠ'), - (0x1EE8A, 'X'), - (0x1EE8B, 'M', u'Ù„'), - (0x1EE8C, 'M', u'Ù…'), - (0x1EE8D, 'M', u'Ù†'), - (0x1EE8E, 'M', u'س'), - (0x1EE8F, 'M', u'ع'), - (0x1EE90, 'M', u'Ù'), - (0x1EE91, 'M', u'ص'), - (0x1EE92, 'M', u'Ù‚'), - (0x1EE93, 'M', u'ر'), - (0x1EE94, 'M', u'Ø´'), - (0x1EE95, 'M', u'ت'), - (0x1EE96, 'M', u'Ø«'), - (0x1EE97, 'M', u'Ø®'), - (0x1EE98, 'M', u'Ø°'), - (0x1EE99, 'M', u'ض'), - (0x1EE9A, 'M', u'ظ'), - (0x1EE9B, 'M', u'غ'), - (0x1EE9C, 'X'), - (0x1EEA1, 'M', u'ب'), - (0x1EEA2, 'M', u'ج'), - (0x1EEA3, 'M', u'د'), - (0x1EEA4, 'X'), - (0x1EEA5, 'M', u'Ùˆ'), - (0x1EEA6, 'M', u'ز'), - (0x1EEA7, 'M', u'Ø­'), - (0x1EEA8, 'M', u'Ø·'), - (0x1EEA9, 'M', u'ÙŠ'), - (0x1EEAA, 'X'), - (0x1EEAB, 'M', u'Ù„'), - (0x1EEAC, 'M', u'Ù…'), - (0x1EEAD, 'M', u'Ù†'), - (0x1EEAE, 'M', u'س'), - (0x1EEAF, 'M', u'ع'), - (0x1EEB0, 'M', u'Ù'), - (0x1EEB1, 'M', u'ص'), - (0x1EEB2, 'M', u'Ù‚'), - (0x1EEB3, 'M', u'ر'), - (0x1EEB4, 'M', u'Ø´'), - (0x1EEB5, 'M', u'ت'), - (0x1EEB6, 'M', u'Ø«'), - (0x1EEB7, 'M', u'Ø®'), - (0x1EEB8, 'M', u'Ø°'), - (0x1EEB9, 'M', u'ض'), - (0x1EEBA, 'M', u'ظ'), - (0x1EEBB, 'M', u'غ'), - (0x1EEBC, 'X'), - (0x1EEF0, 'V'), - (0x1EEF2, 'X'), - (0x1F000, 'V'), - (0x1F02C, 'X'), - (0x1F030, 'V'), - (0x1F094, 'X'), - (0x1F0A0, 'V'), - (0x1F0AF, 'X'), - (0x1F0B1, 'V'), - (0x1F0C0, 'X'), - (0x1F0C1, 'V'), - (0x1F0D0, 'X'), - (0x1F0D1, 'V'), - (0x1F0F6, 'X'), - (0x1F101, '3', u'0,'), - (0x1F102, '3', u'1,'), - (0x1F103, '3', u'2,'), - (0x1F104, '3', u'3,'), - (0x1F105, '3', u'4,'), - (0x1F106, '3', u'5,'), - (0x1F107, '3', u'6,'), - (0x1F108, '3', u'7,'), - (0x1F109, '3', u'8,'), - (0x1F10A, '3', u'9,'), - (0x1F10B, 'V'), - (0x1F10D, 'X'), - (0x1F110, '3', u'(a)'), - (0x1F111, '3', u'(b)'), - (0x1F112, '3', u'(c)'), - (0x1F113, '3', u'(d)'), - (0x1F114, '3', u'(e)'), - (0x1F115, '3', u'(f)'), - (0x1F116, '3', u'(g)'), - (0x1F117, '3', u'(h)'), - (0x1F118, '3', u'(i)'), - (0x1F119, '3', u'(j)'), - (0x1F11A, '3', u'(k)'), - (0x1F11B, '3', u'(l)'), - (0x1F11C, '3', u'(m)'), - (0x1F11D, '3', u'(n)'), - (0x1F11E, '3', u'(o)'), - (0x1F11F, '3', u'(p)'), - (0x1F120, '3', u'(q)'), - (0x1F121, '3', u'(r)'), - (0x1F122, '3', u'(s)'), - (0x1F123, '3', u'(t)'), - (0x1F124, '3', u'(u)'), - ] - -def _seg_71(): - return [ - (0x1F125, '3', u'(v)'), - (0x1F126, '3', u'(w)'), - (0x1F127, '3', u'(x)'), - (0x1F128, '3', u'(y)'), - (0x1F129, '3', u'(z)'), - (0x1F12A, 'M', u'〔s〕'), - (0x1F12B, 'M', u'c'), - (0x1F12C, 'M', u'r'), - (0x1F12D, 'M', u'cd'), - (0x1F12E, 'M', u'wz'), - (0x1F12F, 'V'), - (0x1F130, 'M', u'a'), - (0x1F131, 'M', u'b'), - (0x1F132, 'M', u'c'), - (0x1F133, 'M', u'd'), - (0x1F134, 'M', u'e'), - (0x1F135, 'M', u'f'), - (0x1F136, 'M', u'g'), - (0x1F137, 'M', u'h'), - (0x1F138, 'M', u'i'), - (0x1F139, 'M', u'j'), - (0x1F13A, 'M', u'k'), - (0x1F13B, 'M', u'l'), - (0x1F13C, 'M', u'm'), - (0x1F13D, 'M', u'n'), - (0x1F13E, 'M', u'o'), - (0x1F13F, 'M', u'p'), - (0x1F140, 'M', u'q'), - (0x1F141, 'M', u'r'), - (0x1F142, 'M', u's'), - (0x1F143, 'M', u't'), - (0x1F144, 'M', u'u'), - (0x1F145, 'M', u'v'), - (0x1F146, 'M', u'w'), - (0x1F147, 'M', u'x'), - (0x1F148, 'M', u'y'), - (0x1F149, 'M', u'z'), - (0x1F14A, 'M', u'hv'), - (0x1F14B, 'M', u'mv'), - (0x1F14C, 'M', u'sd'), - (0x1F14D, 'M', u'ss'), - (0x1F14E, 'M', u'ppv'), - (0x1F14F, 'M', u'wc'), - (0x1F150, 'V'), - (0x1F16A, 'M', u'mc'), - (0x1F16B, 'M', u'md'), - (0x1F16C, 'X'), - (0x1F170, 'V'), - (0x1F190, 'M', u'dj'), - (0x1F191, 'V'), - (0x1F1AD, 'X'), - (0x1F1E6, 'V'), - (0x1F200, 'M', u'ã»ã‹'), - (0x1F201, 'M', u'ココ'), - (0x1F202, 'M', u'サ'), - (0x1F203, 'X'), - (0x1F210, 'M', u'手'), - (0x1F211, 'M', u'å­—'), - (0x1F212, 'M', u'åŒ'), - (0x1F213, 'M', u'デ'), - (0x1F214, 'M', u'二'), - (0x1F215, 'M', u'多'), - (0x1F216, 'M', u'解'), - (0x1F217, 'M', u'天'), - (0x1F218, 'M', u'交'), - (0x1F219, 'M', u'映'), - (0x1F21A, 'M', u'ç„¡'), - (0x1F21B, 'M', u'æ–™'), - (0x1F21C, 'M', u'å‰'), - (0x1F21D, 'M', u'後'), - (0x1F21E, 'M', u'å†'), - (0x1F21F, 'M', u'æ–°'), - (0x1F220, 'M', u'åˆ'), - (0x1F221, 'M', u'終'), - (0x1F222, 'M', u'生'), - (0x1F223, 'M', u'販'), - (0x1F224, 'M', u'声'), - (0x1F225, 'M', u'å¹'), - (0x1F226, 'M', u'æ¼”'), - (0x1F227, 'M', u'投'), - (0x1F228, 'M', u'æ•'), - (0x1F229, 'M', u'一'), - (0x1F22A, 'M', u'三'), - (0x1F22B, 'M', u'éŠ'), - (0x1F22C, 'M', u'å·¦'), - (0x1F22D, 'M', u'中'), - (0x1F22E, 'M', u'å³'), - (0x1F22F, 'M', u'指'), - (0x1F230, 'M', u'èµ°'), - (0x1F231, 'M', u'打'), - (0x1F232, 'M', u'ç¦'), - (0x1F233, 'M', u'空'), - (0x1F234, 'M', u'åˆ'), - (0x1F235, 'M', u'満'), - (0x1F236, 'M', u'有'), - (0x1F237, 'M', u'月'), - (0x1F238, 'M', u'申'), - (0x1F239, 'M', u'割'), - (0x1F23A, 'M', u'å–¶'), - (0x1F23B, 'M', u'é…'), - ] - -def _seg_72(): - return [ - (0x1F23C, 'X'), - (0x1F240, 'M', u'〔本〕'), - (0x1F241, 'M', u'〔三〕'), - (0x1F242, 'M', u'〔二〕'), - (0x1F243, 'M', u'〔安〕'), - (0x1F244, 'M', u'〔点〕'), - (0x1F245, 'M', u'〔打〕'), - (0x1F246, 'M', u'〔盗〕'), - (0x1F247, 'M', u'〔å‹ã€•'), - (0x1F248, 'M', u'〔敗〕'), - (0x1F249, 'X'), - (0x1F250, 'M', u'å¾—'), - (0x1F251, 'M', u'å¯'), - (0x1F252, 'X'), - (0x1F260, 'V'), - (0x1F266, 'X'), - (0x1F300, 'V'), - (0x1F6D5, 'X'), - (0x1F6E0, 'V'), - (0x1F6ED, 'X'), - (0x1F6F0, 'V'), - (0x1F6FA, 'X'), - (0x1F700, 'V'), - (0x1F774, 'X'), - (0x1F780, 'V'), - (0x1F7D9, 'X'), - (0x1F800, 'V'), - (0x1F80C, 'X'), - (0x1F810, 'V'), - (0x1F848, 'X'), - (0x1F850, 'V'), - (0x1F85A, 'X'), - (0x1F860, 'V'), - (0x1F888, 'X'), - (0x1F890, 'V'), - (0x1F8AE, 'X'), - (0x1F900, 'V'), - (0x1F90C, 'X'), - (0x1F910, 'V'), - (0x1F93F, 'X'), - (0x1F940, 'V'), - (0x1F971, 'X'), - (0x1F973, 'V'), - (0x1F977, 'X'), - (0x1F97A, 'V'), - (0x1F97B, 'X'), - (0x1F97C, 'V'), - (0x1F9A3, 'X'), - (0x1F9B0, 'V'), - (0x1F9BA, 'X'), - (0x1F9C0, 'V'), - (0x1F9C3, 'X'), - (0x1F9D0, 'V'), - (0x1FA00, 'X'), - (0x1FA60, 'V'), - (0x1FA6E, 'X'), - (0x20000, 'V'), - (0x2A6D7, 'X'), - (0x2A700, 'V'), - (0x2B735, 'X'), - (0x2B740, 'V'), - (0x2B81E, 'X'), - (0x2B820, 'V'), - (0x2CEA2, 'X'), - (0x2CEB0, 'V'), - (0x2EBE1, 'X'), - (0x2F800, 'M', u'丽'), - (0x2F801, 'M', u'丸'), - (0x2F802, 'M', u'ä¹'), - (0x2F803, 'M', u'ð „¢'), - (0x2F804, 'M', u'ä½ '), - (0x2F805, 'M', u'ä¾®'), - (0x2F806, 'M', u'ä¾»'), - (0x2F807, 'M', u'倂'), - (0x2F808, 'M', u'åº'), - (0x2F809, 'M', u'å‚™'), - (0x2F80A, 'M', u'僧'), - (0x2F80B, 'M', u'åƒ'), - (0x2F80C, 'M', u'ã’ž'), - (0x2F80D, 'M', u'𠘺'), - (0x2F80E, 'M', u'å…'), - (0x2F80F, 'M', u'å…”'), - (0x2F810, 'M', u'å…¤'), - (0x2F811, 'M', u'å…·'), - (0x2F812, 'M', u'𠔜'), - (0x2F813, 'M', u'ã’¹'), - (0x2F814, 'M', u'å…§'), - (0x2F815, 'M', u'å†'), - (0x2F816, 'M', u'ð •‹'), - (0x2F817, 'M', u'冗'), - (0x2F818, 'M', u'冤'), - (0x2F819, 'M', u'仌'), - (0x2F81A, 'M', u'冬'), - (0x2F81B, 'M', u'况'), - (0x2F81C, 'M', u'𩇟'), - (0x2F81D, 'M', u'凵'), - (0x2F81E, 'M', u'刃'), - (0x2F81F, 'M', u'ã“Ÿ'), - (0x2F820, 'M', u'刻'), - (0x2F821, 'M', u'剆'), - ] - -def _seg_73(): - return [ - (0x2F822, 'M', u'割'), - (0x2F823, 'M', u'剷'), - (0x2F824, 'M', u'㔕'), - (0x2F825, 'M', u'勇'), - (0x2F826, 'M', u'勉'), - (0x2F827, 'M', u'勤'), - (0x2F828, 'M', u'勺'), - (0x2F829, 'M', u'包'), - (0x2F82A, 'M', u'匆'), - (0x2F82B, 'M', u'北'), - (0x2F82C, 'M', u'å‰'), - (0x2F82D, 'M', u'å‘'), - (0x2F82E, 'M', u'åš'), - (0x2F82F, 'M', u'å³'), - (0x2F830, 'M', u'å½'), - (0x2F831, 'M', u'å¿'), - (0x2F834, 'M', u'𠨬'), - (0x2F835, 'M', u'ç°'), - (0x2F836, 'M', u'åŠ'), - (0x2F837, 'M', u'åŸ'), - (0x2F838, 'M', u'ð ­£'), - (0x2F839, 'M', u'å«'), - (0x2F83A, 'M', u'å±'), - (0x2F83B, 'M', u'å†'), - (0x2F83C, 'M', u'å’ž'), - (0x2F83D, 'M', u'å¸'), - (0x2F83E, 'M', u'呈'), - (0x2F83F, 'M', u'周'), - (0x2F840, 'M', u'å’¢'), - (0x2F841, 'M', u'哶'), - (0x2F842, 'M', u'å”'), - (0x2F843, 'M', u'å•“'), - (0x2F844, 'M', u'å•£'), - (0x2F845, 'M', u'å–„'), - (0x2F847, 'M', u'å–™'), - (0x2F848, 'M', u'å–«'), - (0x2F849, 'M', u'å–³'), - (0x2F84A, 'M', u'å—‚'), - (0x2F84B, 'M', u'圖'), - (0x2F84C, 'M', u'嘆'), - (0x2F84D, 'M', u'圗'), - (0x2F84E, 'M', u'噑'), - (0x2F84F, 'M', u'å™´'), - (0x2F850, 'M', u'切'), - (0x2F851, 'M', u'壮'), - (0x2F852, 'M', u'城'), - (0x2F853, 'M', u'埴'), - (0x2F854, 'M', u'å '), - (0x2F855, 'M', u'åž‹'), - (0x2F856, 'M', u'å ²'), - (0x2F857, 'M', u'å ±'), - (0x2F858, 'M', u'墬'), - (0x2F859, 'M', u'𡓤'), - (0x2F85A, 'M', u'売'), - (0x2F85B, 'M', u'壷'), - (0x2F85C, 'M', u'夆'), - (0x2F85D, 'M', u'多'), - (0x2F85E, 'M', u'夢'), - (0x2F85F, 'M', u'奢'), - (0x2F860, 'M', u'𡚨'), - (0x2F861, 'M', u'𡛪'), - (0x2F862, 'M', u'姬'), - (0x2F863, 'M', u'娛'), - (0x2F864, 'M', u'娧'), - (0x2F865, 'M', u'姘'), - (0x2F866, 'M', u'婦'), - (0x2F867, 'M', u'ã›®'), - (0x2F868, 'X'), - (0x2F869, 'M', u'嬈'), - (0x2F86A, 'M', u'嬾'), - (0x2F86C, 'M', u'𡧈'), - (0x2F86D, 'M', u'寃'), - (0x2F86E, 'M', u'寘'), - (0x2F86F, 'M', u'寧'), - (0x2F870, 'M', u'寳'), - (0x2F871, 'M', u'𡬘'), - (0x2F872, 'M', u'寿'), - (0x2F873, 'M', u'å°†'), - (0x2F874, 'X'), - (0x2F875, 'M', u'å°¢'), - (0x2F876, 'M', u'ãž'), - (0x2F877, 'M', u'å± '), - (0x2F878, 'M', u'å±®'), - (0x2F879, 'M', u'å³€'), - (0x2F87A, 'M', u'å²'), - (0x2F87B, 'M', u'ð¡·¤'), - (0x2F87C, 'M', u'嵃'), - (0x2F87D, 'M', u'ð¡·¦'), - (0x2F87E, 'M', u'åµ®'), - (0x2F87F, 'M', u'嵫'), - (0x2F880, 'M', u'åµ¼'), - (0x2F881, 'M', u'å·¡'), - (0x2F882, 'M', u'å·¢'), - (0x2F883, 'M', u'ã ¯'), - (0x2F884, 'M', u'å·½'), - (0x2F885, 'M', u'帨'), - (0x2F886, 'M', u'帽'), - (0x2F887, 'M', u'幩'), - (0x2F888, 'M', u'ã¡¢'), - (0x2F889, 'M', u'𢆃'), - ] - -def _seg_74(): - return [ - (0x2F88A, 'M', u'㡼'), - (0x2F88B, 'M', u'庰'), - (0x2F88C, 'M', u'庳'), - (0x2F88D, 'M', u'庶'), - (0x2F88E, 'M', u'廊'), - (0x2F88F, 'M', u'𪎒'), - (0x2F890, 'M', u'廾'), - (0x2F891, 'M', u'𢌱'), - (0x2F893, 'M', u'èˆ'), - (0x2F894, 'M', u'å¼¢'), - (0x2F896, 'M', u'㣇'), - (0x2F897, 'M', u'𣊸'), - (0x2F898, 'M', u'𦇚'), - (0x2F899, 'M', u'å½¢'), - (0x2F89A, 'M', u'彫'), - (0x2F89B, 'M', u'㣣'), - (0x2F89C, 'M', u'徚'), - (0x2F89D, 'M', u'å¿'), - (0x2F89E, 'M', u'å¿—'), - (0x2F89F, 'M', u'忹'), - (0x2F8A0, 'M', u'æ‚'), - (0x2F8A1, 'M', u'㤺'), - (0x2F8A2, 'M', u'㤜'), - (0x2F8A3, 'M', u'æ‚”'), - (0x2F8A4, 'M', u'𢛔'), - (0x2F8A5, 'M', u'惇'), - (0x2F8A6, 'M', u'æ…ˆ'), - (0x2F8A7, 'M', u'æ…Œ'), - (0x2F8A8, 'M', u'æ…Ž'), - (0x2F8A9, 'M', u'æ…Œ'), - (0x2F8AA, 'M', u'æ…º'), - (0x2F8AB, 'M', u'憎'), - (0x2F8AC, 'M', u'憲'), - (0x2F8AD, 'M', u'憤'), - (0x2F8AE, 'M', u'憯'), - (0x2F8AF, 'M', u'懞'), - (0x2F8B0, 'M', u'懲'), - (0x2F8B1, 'M', u'懶'), - (0x2F8B2, 'M', u'æˆ'), - (0x2F8B3, 'M', u'戛'), - (0x2F8B4, 'M', u'æ‰'), - (0x2F8B5, 'M', u'抱'), - (0x2F8B6, 'M', u'æ‹”'), - (0x2F8B7, 'M', u'æ'), - (0x2F8B8, 'M', u'𢬌'), - (0x2F8B9, 'M', u'挽'), - (0x2F8BA, 'M', u'拼'), - (0x2F8BB, 'M', u'æ¨'), - (0x2F8BC, 'M', u'掃'), - (0x2F8BD, 'M', u'æ¤'), - (0x2F8BE, 'M', u'𢯱'), - (0x2F8BF, 'M', u'æ¢'), - (0x2F8C0, 'M', u'æ…'), - (0x2F8C1, 'M', u'掩'), - (0x2F8C2, 'M', u'㨮'), - (0x2F8C3, 'M', u'æ‘©'), - (0x2F8C4, 'M', u'摾'), - (0x2F8C5, 'M', u'æ’'), - (0x2F8C6, 'M', u'æ‘·'), - (0x2F8C7, 'M', u'㩬'), - (0x2F8C8, 'M', u'æ•'), - (0x2F8C9, 'M', u'敬'), - (0x2F8CA, 'M', u'𣀊'), - (0x2F8CB, 'M', u'æ—£'), - (0x2F8CC, 'M', u'書'), - (0x2F8CD, 'M', u'晉'), - (0x2F8CE, 'M', u'㬙'), - (0x2F8CF, 'M', u'æš‘'), - (0x2F8D0, 'M', u'㬈'), - (0x2F8D1, 'M', u'㫤'), - (0x2F8D2, 'M', u'冒'), - (0x2F8D3, 'M', u'冕'), - (0x2F8D4, 'M', u'最'), - (0x2F8D5, 'M', u'æšœ'), - (0x2F8D6, 'M', u'è‚­'), - (0x2F8D7, 'M', u'ä™'), - (0x2F8D8, 'M', u'朗'), - (0x2F8D9, 'M', u'望'), - (0x2F8DA, 'M', u'朡'), - (0x2F8DB, 'M', u'æž'), - (0x2F8DC, 'M', u'æ“'), - (0x2F8DD, 'M', u'ð£ƒ'), - (0x2F8DE, 'M', u'ã­‰'), - (0x2F8DF, 'M', u'柺'), - (0x2F8E0, 'M', u'æž…'), - (0x2F8E1, 'M', u'æ¡’'), - (0x2F8E2, 'M', u'梅'), - (0x2F8E3, 'M', u'𣑭'), - (0x2F8E4, 'M', u'梎'), - (0x2F8E5, 'M', u'æ Ÿ'), - (0x2F8E6, 'M', u'椔'), - (0x2F8E7, 'M', u'ã®'), - (0x2F8E8, 'M', u'楂'), - (0x2F8E9, 'M', u'榣'), - (0x2F8EA, 'M', u'槪'), - (0x2F8EB, 'M', u'檨'), - (0x2F8EC, 'M', u'𣚣'), - (0x2F8ED, 'M', u'æ«›'), - (0x2F8EE, 'M', u'ã°˜'), - (0x2F8EF, 'M', u'次'), - ] - -def _seg_75(): - return [ - (0x2F8F0, 'M', u'𣢧'), - (0x2F8F1, 'M', u'æ­”'), - (0x2F8F2, 'M', u'㱎'), - (0x2F8F3, 'M', u'æ­²'), - (0x2F8F4, 'M', u'殟'), - (0x2F8F5, 'M', u'殺'), - (0x2F8F6, 'M', u'æ®»'), - (0x2F8F7, 'M', u'ð£ª'), - (0x2F8F8, 'M', u'ð¡´‹'), - (0x2F8F9, 'M', u'𣫺'), - (0x2F8FA, 'M', u'汎'), - (0x2F8FB, 'M', u'𣲼'), - (0x2F8FC, 'M', u'沿'), - (0x2F8FD, 'M', u'æ³'), - (0x2F8FE, 'M', u'汧'), - (0x2F8FF, 'M', u'æ´–'), - (0x2F900, 'M', u'æ´¾'), - (0x2F901, 'M', u'æµ·'), - (0x2F902, 'M', u'æµ'), - (0x2F903, 'M', u'浩'), - (0x2F904, 'M', u'浸'), - (0x2F905, 'M', u'涅'), - (0x2F906, 'M', u'𣴞'), - (0x2F907, 'M', u'æ´´'), - (0x2F908, 'M', u'港'), - (0x2F909, 'M', u'æ¹®'), - (0x2F90A, 'M', u'ã´³'), - (0x2F90B, 'M', u'滋'), - (0x2F90C, 'M', u'滇'), - (0x2F90D, 'M', u'𣻑'), - (0x2F90E, 'M', u'æ·¹'), - (0x2F90F, 'M', u'æ½®'), - (0x2F910, 'M', u'𣽞'), - (0x2F911, 'M', u'𣾎'), - (0x2F912, 'M', u'濆'), - (0x2F913, 'M', u'瀹'), - (0x2F914, 'M', u'瀞'), - (0x2F915, 'M', u'瀛'), - (0x2F916, 'M', u'㶖'), - (0x2F917, 'M', u'çŠ'), - (0x2F918, 'M', u'ç½'), - (0x2F919, 'M', u'ç·'), - (0x2F91A, 'M', u'ç‚­'), - (0x2F91B, 'M', u'𠔥'), - (0x2F91C, 'M', u'ç……'), - (0x2F91D, 'M', u'𤉣'), - (0x2F91E, 'M', u'熜'), - (0x2F91F, 'X'), - (0x2F920, 'M', u'爨'), - (0x2F921, 'M', u'爵'), - (0x2F922, 'M', u'ç‰'), - (0x2F923, 'M', u'𤘈'), - (0x2F924, 'M', u'犀'), - (0x2F925, 'M', u'犕'), - (0x2F926, 'M', u'𤜵'), - (0x2F927, 'M', u'𤠔'), - (0x2F928, 'M', u'çº'), - (0x2F929, 'M', u'王'), - (0x2F92A, 'M', u'㺬'), - (0x2F92B, 'M', u'玥'), - (0x2F92C, 'M', u'㺸'), - (0x2F92E, 'M', u'瑇'), - (0x2F92F, 'M', u'ç‘œ'), - (0x2F930, 'M', u'瑱'), - (0x2F931, 'M', u'ç’…'), - (0x2F932, 'M', u'ç“Š'), - (0x2F933, 'M', u'ã¼›'), - (0x2F934, 'M', u'甤'), - (0x2F935, 'M', u'𤰶'), - (0x2F936, 'M', u'甾'), - (0x2F937, 'M', u'𤲒'), - (0x2F938, 'M', u'ç•°'), - (0x2F939, 'M', u'𢆟'), - (0x2F93A, 'M', u'ç˜'), - (0x2F93B, 'M', u'𤾡'), - (0x2F93C, 'M', u'𤾸'), - (0x2F93D, 'M', u'ð¥„'), - (0x2F93E, 'M', u'㿼'), - (0x2F93F, 'M', u'䀈'), - (0x2F940, 'M', u'ç›´'), - (0x2F941, 'M', u'𥃳'), - (0x2F942, 'M', u'𥃲'), - (0x2F943, 'M', u'𥄙'), - (0x2F944, 'M', u'𥄳'), - (0x2F945, 'M', u'眞'), - (0x2F946, 'M', u'真'), - (0x2F948, 'M', u'çŠ'), - (0x2F949, 'M', u'䀹'), - (0x2F94A, 'M', u'çž‹'), - (0x2F94B, 'M', u'ä†'), - (0x2F94C, 'M', u'ä‚–'), - (0x2F94D, 'M', u'ð¥'), - (0x2F94E, 'M', u'ç¡Ž'), - (0x2F94F, 'M', u'碌'), - (0x2F950, 'M', u'磌'), - (0x2F951, 'M', u'䃣'), - (0x2F952, 'M', u'𥘦'), - (0x2F953, 'M', u'祖'), - (0x2F954, 'M', u'𥚚'), - (0x2F955, 'M', u'𥛅'), - ] - -def _seg_76(): - return [ - (0x2F956, 'M', u'ç¦'), - (0x2F957, 'M', u'秫'), - (0x2F958, 'M', u'䄯'), - (0x2F959, 'M', u'ç©€'), - (0x2F95A, 'M', u'ç©Š'), - (0x2F95B, 'M', u'ç©'), - (0x2F95C, 'M', u'𥥼'), - (0x2F95D, 'M', u'𥪧'), - (0x2F95F, 'X'), - (0x2F960, 'M', u'䈂'), - (0x2F961, 'M', u'𥮫'), - (0x2F962, 'M', u'篆'), - (0x2F963, 'M', u'築'), - (0x2F964, 'M', u'䈧'), - (0x2F965, 'M', u'𥲀'), - (0x2F966, 'M', u'ç³’'), - (0x2F967, 'M', u'䊠'), - (0x2F968, 'M', u'糨'), - (0x2F969, 'M', u'ç³£'), - (0x2F96A, 'M', u'ç´€'), - (0x2F96B, 'M', u'𥾆'), - (0x2F96C, 'M', u'çµ£'), - (0x2F96D, 'M', u'äŒ'), - (0x2F96E, 'M', u'ç·‡'), - (0x2F96F, 'M', u'縂'), - (0x2F970, 'M', u'ç¹…'), - (0x2F971, 'M', u'䌴'), - (0x2F972, 'M', u'𦈨'), - (0x2F973, 'M', u'𦉇'), - (0x2F974, 'M', u'ä™'), - (0x2F975, 'M', u'𦋙'), - (0x2F976, 'M', u'罺'), - (0x2F977, 'M', u'𦌾'), - (0x2F978, 'M', u'羕'), - (0x2F979, 'M', u'翺'), - (0x2F97A, 'M', u'者'), - (0x2F97B, 'M', u'𦓚'), - (0x2F97C, 'M', u'𦔣'), - (0x2F97D, 'M', u'è '), - (0x2F97E, 'M', u'𦖨'), - (0x2F97F, 'M', u'è°'), - (0x2F980, 'M', u'ð£Ÿ'), - (0x2F981, 'M', u'ä•'), - (0x2F982, 'M', u'育'), - (0x2F983, 'M', u'脃'), - (0x2F984, 'M', u'ä‹'), - (0x2F985, 'M', u'脾'), - (0x2F986, 'M', u'媵'), - (0x2F987, 'M', u'𦞧'), - (0x2F988, 'M', u'𦞵'), - (0x2F989, 'M', u'𣎓'), - (0x2F98A, 'M', u'𣎜'), - (0x2F98B, 'M', u'èˆ'), - (0x2F98C, 'M', u'舄'), - (0x2F98D, 'M', u'辞'), - (0x2F98E, 'M', u'ä‘«'), - (0x2F98F, 'M', u'芑'), - (0x2F990, 'M', u'芋'), - (0x2F991, 'M', u'èŠ'), - (0x2F992, 'M', u'劳'), - (0x2F993, 'M', u'花'), - (0x2F994, 'M', u'芳'), - (0x2F995, 'M', u'芽'), - (0x2F996, 'M', u'苦'), - (0x2F997, 'M', u'𦬼'), - (0x2F998, 'M', u'è‹¥'), - (0x2F999, 'M', u'èŒ'), - (0x2F99A, 'M', u'è£'), - (0x2F99B, 'M', u'莭'), - (0x2F99C, 'M', u'茣'), - (0x2F99D, 'M', u'莽'), - (0x2F99E, 'M', u'è§'), - (0x2F99F, 'M', u'è‘—'), - (0x2F9A0, 'M', u'è“'), - (0x2F9A1, 'M', u'èŠ'), - (0x2F9A2, 'M', u'èŒ'), - (0x2F9A3, 'M', u'èœ'), - (0x2F9A4, 'M', u'𦰶'), - (0x2F9A5, 'M', u'𦵫'), - (0x2F9A6, 'M', u'𦳕'), - (0x2F9A7, 'M', u'䔫'), - (0x2F9A8, 'M', u'蓱'), - (0x2F9A9, 'M', u'蓳'), - (0x2F9AA, 'M', u'è”–'), - (0x2F9AB, 'M', u'ð§Š'), - (0x2F9AC, 'M', u'蕤'), - (0x2F9AD, 'M', u'𦼬'), - (0x2F9AE, 'M', u'ä•'), - (0x2F9AF, 'M', u'ä•¡'), - (0x2F9B0, 'M', u'𦾱'), - (0x2F9B1, 'M', u'𧃒'), - (0x2F9B2, 'M', u'ä•«'), - (0x2F9B3, 'M', u'è™'), - (0x2F9B4, 'M', u'虜'), - (0x2F9B5, 'M', u'虧'), - (0x2F9B6, 'M', u'虩'), - (0x2F9B7, 'M', u'èš©'), - (0x2F9B8, 'M', u'蚈'), - (0x2F9B9, 'M', u'蜎'), - (0x2F9BA, 'M', u'蛢'), - ] - -def _seg_77(): - return [ - (0x2F9BB, 'M', u'è¹'), - (0x2F9BC, 'M', u'蜨'), - (0x2F9BD, 'M', u'è«'), - (0x2F9BE, 'M', u'螆'), - (0x2F9BF, 'X'), - (0x2F9C0, 'M', u'蟡'), - (0x2F9C1, 'M', u'è '), - (0x2F9C2, 'M', u'ä—¹'), - (0x2F9C3, 'M', u'è¡ '), - (0x2F9C4, 'M', u'è¡£'), - (0x2F9C5, 'M', u'𧙧'), - (0x2F9C6, 'M', u'裗'), - (0x2F9C7, 'M', u'裞'), - (0x2F9C8, 'M', u'䘵'), - (0x2F9C9, 'M', u'裺'), - (0x2F9CA, 'M', u'ã’»'), - (0x2F9CB, 'M', u'𧢮'), - (0x2F9CC, 'M', u'𧥦'), - (0x2F9CD, 'M', u'äš¾'), - (0x2F9CE, 'M', u'䛇'), - (0x2F9CF, 'M', u'誠'), - (0x2F9D0, 'M', u'è«­'), - (0x2F9D1, 'M', u'變'), - (0x2F9D2, 'M', u'豕'), - (0x2F9D3, 'M', u'𧲨'), - (0x2F9D4, 'M', u'貫'), - (0x2F9D5, 'M', u'è³'), - (0x2F9D6, 'M', u'è´›'), - (0x2F9D7, 'M', u'èµ·'), - (0x2F9D8, 'M', u'𧼯'), - (0x2F9D9, 'M', u'ð  „'), - (0x2F9DA, 'M', u'è·‹'), - (0x2F9DB, 'M', u'趼'), - (0x2F9DC, 'M', u'è·°'), - (0x2F9DD, 'M', u'𠣞'), - (0x2F9DE, 'M', u'è»”'), - (0x2F9DF, 'M', u'輸'), - (0x2F9E0, 'M', u'𨗒'), - (0x2F9E1, 'M', u'𨗭'), - (0x2F9E2, 'M', u'é‚”'), - (0x2F9E3, 'M', u'郱'), - (0x2F9E4, 'M', u'é„‘'), - (0x2F9E5, 'M', u'𨜮'), - (0x2F9E6, 'M', u'é„›'), - (0x2F9E7, 'M', u'鈸'), - (0x2F9E8, 'M', u'é‹—'), - (0x2F9E9, 'M', u'鋘'), - (0x2F9EA, 'M', u'鉼'), - (0x2F9EB, 'M', u'é¹'), - (0x2F9EC, 'M', u'é•'), - (0x2F9ED, 'M', u'𨯺'), - (0x2F9EE, 'M', u'é–‹'), - (0x2F9EF, 'M', u'䦕'), - (0x2F9F0, 'M', u'é–·'), - (0x2F9F1, 'M', u'𨵷'), - (0x2F9F2, 'M', u'䧦'), - (0x2F9F3, 'M', u'雃'), - (0x2F9F4, 'M', u'嶲'), - (0x2F9F5, 'M', u'霣'), - (0x2F9F6, 'M', u'ð©……'), - (0x2F9F7, 'M', u'𩈚'), - (0x2F9F8, 'M', u'ä©®'), - (0x2F9F9, 'M', u'䩶'), - (0x2F9FA, 'M', u'韠'), - (0x2F9FB, 'M', u'ð©Š'), - (0x2F9FC, 'M', u'䪲'), - (0x2F9FD, 'M', u'ð©’–'), - (0x2F9FE, 'M', u'é ‹'), - (0x2FA00, 'M', u'é ©'), - (0x2FA01, 'M', u'ð©–¶'), - (0x2FA02, 'M', u'飢'), - (0x2FA03, 'M', u'䬳'), - (0x2FA04, 'M', u'餩'), - (0x2FA05, 'M', u'馧'), - (0x2FA06, 'M', u'駂'), - (0x2FA07, 'M', u'駾'), - (0x2FA08, 'M', u'䯎'), - (0x2FA09, 'M', u'𩬰'), - (0x2FA0A, 'M', u'鬒'), - (0x2FA0B, 'M', u'é±€'), - (0x2FA0C, 'M', u'é³½'), - (0x2FA0D, 'M', u'䳎'), - (0x2FA0E, 'M', u'ä³­'), - (0x2FA0F, 'M', u'鵧'), - (0x2FA10, 'M', u'𪃎'), - (0x2FA11, 'M', u'䳸'), - (0x2FA12, 'M', u'𪄅'), - (0x2FA13, 'M', u'𪈎'), - (0x2FA14, 'M', u'𪊑'), - (0x2FA15, 'M', u'麻'), - (0x2FA16, 'M', u'äµ–'), - (0x2FA17, 'M', u'黹'), - (0x2FA18, 'M', u'黾'), - (0x2FA19, 'M', u'é¼…'), - (0x2FA1A, 'M', u'é¼'), - (0x2FA1B, 'M', u'é¼–'), - (0x2FA1C, 'M', u'é¼»'), - (0x2FA1D, 'M', u'𪘀'), - (0x2FA1E, 'X'), - (0xE0100, 'I'), - ] - -def _seg_78(): - return [ - (0xE01F0, 'X'), - ] - -uts46data = tuple( - _seg_0() - + _seg_1() - + _seg_2() - + _seg_3() - + _seg_4() - + _seg_5() - + _seg_6() - + _seg_7() - + _seg_8() - + _seg_9() - + _seg_10() - + _seg_11() - + _seg_12() - + _seg_13() - + _seg_14() - + _seg_15() - + _seg_16() - + _seg_17() - + _seg_18() - + _seg_19() - + _seg_20() - + _seg_21() - + _seg_22() - + _seg_23() - + _seg_24() - + _seg_25() - + _seg_26() - + _seg_27() - + _seg_28() - + _seg_29() - + _seg_30() - + _seg_31() - + _seg_32() - + _seg_33() - + _seg_34() - + _seg_35() - + _seg_36() - + _seg_37() - + _seg_38() - + _seg_39() - + _seg_40() - + _seg_41() - + _seg_42() - + _seg_43() - + _seg_44() - + _seg_45() - + _seg_46() - + _seg_47() - + _seg_48() - + _seg_49() - + _seg_50() - + _seg_51() - + _seg_52() - + _seg_53() - + _seg_54() - + _seg_55() - + _seg_56() - + _seg_57() - + _seg_58() - + _seg_59() - + _seg_60() - + _seg_61() - + _seg_62() - + _seg_63() - + _seg_64() - + _seg_65() - + _seg_66() - + _seg_67() - + _seg_68() - + _seg_69() - + _seg_70() - + _seg_71() - + _seg_72() - + _seg_73() - + _seg_74() - + _seg_75() - + _seg_76() - + _seg_77() - + _seg_78() -) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/ipaddress.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/ipaddress.py deleted file mode 100644 index f2d0766..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/ipaddress.py +++ /dev/null @@ -1,2419 +0,0 @@ -# Copyright 2007 Google Inc. -# Licensed to PSF under a Contributor Agreement. - -"""A fast, lightweight IPv4/IPv6 manipulation library in Python. - -This library is used to create/poke/manipulate IPv4 and IPv6 addresses -and networks. - -""" - -from __future__ import unicode_literals - - -import itertools -import struct - -__version__ = '1.0.22' - -# Compatibility functions -_compat_int_types = (int,) -try: - _compat_int_types = (int, long) -except NameError: - pass -try: - _compat_str = unicode -except NameError: - _compat_str = str - assert bytes != str -if b'\0'[0] == 0: # Python 3 semantics - def _compat_bytes_to_byte_vals(byt): - return byt -else: - def _compat_bytes_to_byte_vals(byt): - return [struct.unpack(b'!B', b)[0] for b in byt] -try: - _compat_int_from_byte_vals = int.from_bytes -except AttributeError: - def _compat_int_from_byte_vals(bytvals, endianess): - assert endianess == 'big' - res = 0 - for bv in bytvals: - assert isinstance(bv, _compat_int_types) - res = (res << 8) + bv - return res - - -def _compat_to_bytes(intval, length, endianess): - assert isinstance(intval, _compat_int_types) - assert endianess == 'big' - if length == 4: - if intval < 0 or intval >= 2 ** 32: - raise struct.error("integer out of range for 'I' format code") - return struct.pack(b'!I', intval) - elif length == 16: - if intval < 0 or intval >= 2 ** 128: - raise struct.error("integer out of range for 'QQ' format code") - return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) - else: - raise NotImplementedError() - - -if hasattr(int, 'bit_length'): - # Not int.bit_length , since that won't work in 2.7 where long exists - def _compat_bit_length(i): - return i.bit_length() -else: - def _compat_bit_length(i): - for res in itertools.count(): - if i >> res == 0: - return res - - -def _compat_range(start, end, step=1): - assert step > 0 - i = start - while i < end: - yield i - i += step - - -class _TotalOrderingMixin(object): - __slots__ = () - - # Helper that derives the other comparison operations from - # __lt__ and __eq__ - # We avoid functools.total_ordering because it doesn't handle - # NotImplemented correctly yet (http://bugs.python.org/issue10042) - def __eq__(self, other): - raise NotImplementedError - - def __ne__(self, other): - equal = self.__eq__(other) - if equal is NotImplemented: - return NotImplemented - return not equal - - def __lt__(self, other): - raise NotImplementedError - - def __le__(self, other): - less = self.__lt__(other) - if less is NotImplemented or not less: - return self.__eq__(other) - return less - - def __gt__(self, other): - less = self.__lt__(other) - if less is NotImplemented: - return NotImplemented - equal = self.__eq__(other) - if equal is NotImplemented: - return NotImplemented - return not (less or equal) - - def __ge__(self, other): - less = self.__lt__(other) - if less is NotImplemented: - return NotImplemented - return not less - - -IPV4LENGTH = 32 -IPV6LENGTH = 128 - - -class AddressValueError(ValueError): - """A Value Error related to the address.""" - - -class NetmaskValueError(ValueError): - """A Value Error related to the netmask.""" - - -def ip_address(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Address or IPv6Address object. - - Raises: - ValueError: if the *address* passed isn't either a v4 or a v6 - address - - """ - try: - return IPv4Address(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Address(address) - except (AddressValueError, NetmaskValueError): - pass - - if isinstance(address, bytes): - raise AddressValueError( - '%r does not appear to be an IPv4 or IPv6 address. ' - 'Did you pass in a bytes (str in Python 2) instead of' - ' a unicode object?' % address) - - raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % - address) - - -def ip_network(address, strict=True): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP network. Either IPv4 or - IPv6 networks may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Network or IPv6Network object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. Or if the network has host bits set. - - """ - try: - return IPv4Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - if isinstance(address, bytes): - raise AddressValueError( - '%r does not appear to be an IPv4 or IPv6 network. ' - 'Did you pass in a bytes (str in Python 2) instead of' - ' a unicode object?' % address) - - raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % - address) - - -def ip_interface(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Interface or IPv6Interface object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. - - Notes: - The IPv?Interface classes describe an Address on a particular - Network, so they're basically a combination of both the Address - and Network classes. - - """ - try: - return IPv4Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % - address) - - -def v4_int_to_packed(address): - """Represent an address as 4 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv4 IP address. - - Returns: - The integer address packed as 4 bytes in network (big-endian) order. - - Raises: - ValueError: If the integer is negative or too large to be an - IPv4 IP address. - - """ - try: - return _compat_to_bytes(address, 4, 'big') - except (struct.error, OverflowError): - raise ValueError("Address negative or too large for IPv4") - - -def v6_int_to_packed(address): - """Represent an address as 16 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv6 IP address. - - Returns: - The integer address packed as 16 bytes in network (big-endian) order. - - """ - try: - return _compat_to_bytes(address, 16, 'big') - except (struct.error, OverflowError): - raise ValueError("Address negative or too large for IPv6") - - -def _split_optional_netmask(address): - """Helper to split the netmask and raise AddressValueError if needed""" - addr = _compat_str(address).split('/') - if len(addr) > 2: - raise AddressValueError("Only one '/' permitted in %r" % address) - return addr - - -def _find_address_range(addresses): - """Find a sequence of sorted deduplicated IPv#Address. - - Args: - addresses: a list of IPv#Address objects. - - Yields: - A tuple containing the first and last IP addresses in the sequence. - - """ - it = iter(addresses) - first = last = next(it) - for ip in it: - if ip._ip != last._ip + 1: - yield first, last - first = ip - last = ip - yield first, last - - -def _count_righthand_zero_bits(number, bits): - """Count the number of zero bits on the right hand side. - - Args: - number: an integer. - bits: maximum number of bits to count. - - Returns: - The number of zero bits on the right hand side of the number. - - """ - if number == 0: - return bits - return min(bits, _compat_bit_length(~number & (number - 1))) - - -def summarize_address_range(first, last): - """Summarize a network range given the first and last IP addresses. - - Example: - >>> list(summarize_address_range(IPv4Address('192.0.2.0'), - ... IPv4Address('192.0.2.130'))) - ... #doctest: +NORMALIZE_WHITESPACE - [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), - IPv4Network('192.0.2.130/32')] - - Args: - first: the first IPv4Address or IPv6Address in the range. - last: the last IPv4Address or IPv6Address in the range. - - Returns: - An iterator of the summarized IPv(4|6) network objects. - - Raise: - TypeError: - If the first and last objects are not IP addresses. - If the first and last objects are not the same version. - ValueError: - If the last object is not greater than the first. - If the version of the first address is not 4 or 6. - - """ - if (not (isinstance(first, _BaseAddress) and - isinstance(last, _BaseAddress))): - raise TypeError('first and last must be IP addresses, not networks') - if first.version != last.version: - raise TypeError("%s and %s are not of the same version" % ( - first, last)) - if first > last: - raise ValueError('last IP address must be greater than first') - - if first.version == 4: - ip = IPv4Network - elif first.version == 6: - ip = IPv6Network - else: - raise ValueError('unknown IP version') - - ip_bits = first._max_prefixlen - first_int = first._ip - last_int = last._ip - while first_int <= last_int: - nbits = min(_count_righthand_zero_bits(first_int, ip_bits), - _compat_bit_length(last_int - first_int + 1) - 1) - net = ip((first_int, ip_bits - nbits)) - yield net - first_int += 1 << nbits - if first_int - 1 == ip._ALL_ONES: - break - - -def _collapse_addresses_internal(addresses): - """Loops through the addresses, collapsing concurrent netblocks. - - Example: - - ip1 = IPv4Network('192.0.2.0/26') - ip2 = IPv4Network('192.0.2.64/26') - ip3 = IPv4Network('192.0.2.128/26') - ip4 = IPv4Network('192.0.2.192/26') - - _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> - [IPv4Network('192.0.2.0/24')] - - This shouldn't be called directly; it is called via - collapse_addresses([]). - - Args: - addresses: A list of IPv4Network's or IPv6Network's - - Returns: - A list of IPv4Network's or IPv6Network's depending on what we were - passed. - - """ - # First merge - to_merge = list(addresses) - subnets = {} - while to_merge: - net = to_merge.pop() - supernet = net.supernet() - existing = subnets.get(supernet) - if existing is None: - subnets[supernet] = net - elif existing != net: - # Merge consecutive subnets - del subnets[supernet] - to_merge.append(supernet) - # Then iterate over resulting networks, skipping subsumed subnets - last = None - for net in sorted(subnets.values()): - if last is not None: - # Since they are sorted, - # last.network_address <= net.network_address is a given. - if last.broadcast_address >= net.broadcast_address: - continue - yield net - last = net - - -def collapse_addresses(addresses): - """Collapse a list of IP objects. - - Example: - collapse_addresses([IPv4Network('192.0.2.0/25'), - IPv4Network('192.0.2.128/25')]) -> - [IPv4Network('192.0.2.0/24')] - - Args: - addresses: An iterator of IPv4Network or IPv6Network objects. - - Returns: - An iterator of the collapsed IPv(4|6)Network objects. - - Raises: - TypeError: If passed a list of mixed version objects. - - """ - addrs = [] - ips = [] - nets = [] - - # split IP addresses and networks - for ip in addresses: - if isinstance(ip, _BaseAddress): - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) - ips.append(ip) - elif ip._prefixlen == ip._max_prefixlen: - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) - try: - ips.append(ip.ip) - except AttributeError: - ips.append(ip.network_address) - else: - if nets and nets[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, nets[-1])) - nets.append(ip) - - # sort and dedup - ips = sorted(set(ips)) - - # find consecutive address ranges in the sorted sequence and summarize them - if ips: - for first, last in _find_address_range(ips): - addrs.extend(summarize_address_range(first, last)) - - return _collapse_addresses_internal(addrs + nets) - - -def get_mixed_type_key(obj): - """Return a key suitable for sorting between networks and addresses. - - Address and Network objects are not sortable by default; they're - fundamentally different so the expression - - IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') - - doesn't make any sense. There are some times however, where you may wish - to have ipaddress sort these for you anyway. If you need to do this, you - can use this function as the key= argument to sorted(). - - Args: - obj: either a Network or Address object. - Returns: - appropriate key. - - """ - if isinstance(obj, _BaseNetwork): - return obj._get_networks_key() - elif isinstance(obj, _BaseAddress): - return obj._get_address_key() - return NotImplemented - - -class _IPAddressBase(_TotalOrderingMixin): - - """The mother class.""" - - __slots__ = () - - @property - def exploded(self): - """Return the longhand version of the IP address as a string.""" - return self._explode_shorthand_ip_string() - - @property - def compressed(self): - """Return the shorthand version of the IP address as a string.""" - return _compat_str(self) - - @property - def reverse_pointer(self): - """The name of the reverse DNS pointer for the IP address, e.g.: - >>> ipaddress.ip_address("127.0.0.1").reverse_pointer - '1.0.0.127.in-addr.arpa' - >>> ipaddress.ip_address("2001:db8::1").reverse_pointer - '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' - - """ - return self._reverse_pointer() - - @property - def version(self): - msg = '%200s has no version specified' % (type(self),) - raise NotImplementedError(msg) - - def _check_int_address(self, address): - if address < 0: - msg = "%d (< 0) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._version)) - if address > self._ALL_ONES: - msg = "%d (>= 2**%d) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._max_prefixlen, - self._version)) - - def _check_packed_address(self, address, expected_len): - address_len = len(address) - if address_len != expected_len: - msg = ( - '%r (len %d != %d) is not permitted as an IPv%d address. ' - 'Did you pass in a bytes (str in Python 2) instead of' - ' a unicode object?') - raise AddressValueError(msg % (address, address_len, - expected_len, self._version)) - - @classmethod - def _ip_int_from_prefix(cls, prefixlen): - """Turn the prefix length into a bitwise netmask - - Args: - prefixlen: An integer, the prefix length. - - Returns: - An integer. - - """ - return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) - - @classmethod - def _prefix_from_ip_int(cls, ip_int): - """Return prefix length from the bitwise netmask. - - Args: - ip_int: An integer, the netmask in expanded bitwise format - - Returns: - An integer, the prefix length. - - Raises: - ValueError: If the input intermingles zeroes & ones - """ - trailing_zeroes = _count_righthand_zero_bits(ip_int, - cls._max_prefixlen) - prefixlen = cls._max_prefixlen - trailing_zeroes - leading_ones = ip_int >> trailing_zeroes - all_ones = (1 << prefixlen) - 1 - if leading_ones != all_ones: - byteslen = cls._max_prefixlen // 8 - details = _compat_to_bytes(ip_int, byteslen, 'big') - msg = 'Netmask pattern %r mixes zeroes & ones' - raise ValueError(msg % details) - return prefixlen - - @classmethod - def _report_invalid_netmask(cls, netmask_str): - msg = '%r is not a valid netmask' % netmask_str - raise NetmaskValueError(msg) - - @classmethod - def _prefix_from_prefix_string(cls, prefixlen_str): - """Return prefix length from a numeric string - - Args: - prefixlen_str: The string to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask - """ - # int allows a leading +/- as well as surrounding whitespace, - # so we ensure that isn't the case - if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): - cls._report_invalid_netmask(prefixlen_str) - try: - prefixlen = int(prefixlen_str) - except ValueError: - cls._report_invalid_netmask(prefixlen_str) - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen_str) - return prefixlen - - @classmethod - def _prefix_from_ip_string(cls, ip_str): - """Turn a netmask/hostmask string into a prefix length - - Args: - ip_str: The netmask/hostmask to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask/hostmask - """ - # Parse the netmask/hostmask like an IP address. - try: - ip_int = cls._ip_int_from_string(ip_str) - except AddressValueError: - cls._report_invalid_netmask(ip_str) - - # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). - # Note that the two ambiguous cases (all-ones and all-zeroes) are - # treated as netmasks. - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - pass - - # Invert the bits, and try matching a /0+1+/ hostmask instead. - ip_int ^= cls._ALL_ONES - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - cls._report_invalid_netmask(ip_str) - - def __reduce__(self): - return self.__class__, (_compat_str(self),) - - -class _BaseAddress(_IPAddressBase): - - """A generic IP object. - - This IP class contains the version independent methods which are - used by single IP addresses. - """ - - __slots__ = () - - def __int__(self): - return self._ip - - def __eq__(self, other): - try: - return (self._ip == other._ip and - self._version == other._version) - except AttributeError: - return NotImplemented - - def __lt__(self, other): - if not isinstance(other, _IPAddressBase): - return NotImplemented - if not isinstance(other, _BaseAddress): - raise TypeError('%s and %s are not of the same type' % ( - self, other)) - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) - if self._ip != other._ip: - return self._ip < other._ip - return False - - # Shorthand for Integer addition and subtraction. This is not - # meant to ever support addition/subtraction of addresses. - def __add__(self, other): - if not isinstance(other, _compat_int_types): - return NotImplemented - return self.__class__(int(self) + other) - - def __sub__(self, other): - if not isinstance(other, _compat_int_types): - return NotImplemented - return self.__class__(int(self) - other) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) - - def __str__(self): - return _compat_str(self._string_from_ip_int(self._ip)) - - def __hash__(self): - return hash(hex(int(self._ip))) - - def _get_address_key(self): - return (self._version, self) - - def __reduce__(self): - return self.__class__, (self._ip,) - - -class _BaseNetwork(_IPAddressBase): - - """A generic IP network object. - - This IP class contains the version independent methods which are - used by networks. - - """ - def __init__(self, address): - self._cache = {} - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) - - def __str__(self): - return '%s/%d' % (self.network_address, self.prefixlen) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the network - or broadcast addresses. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network + 1, broadcast): - yield self._address_class(x) - - def __iter__(self): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network, broadcast + 1): - yield self._address_class(x) - - def __getitem__(self, n): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - if n >= 0: - if network + n > broadcast: - raise IndexError('address out of range') - return self._address_class(network + n) - else: - n += 1 - if broadcast + n < network: - raise IndexError('address out of range') - return self._address_class(broadcast + n) - - def __lt__(self, other): - if not isinstance(other, _IPAddressBase): - return NotImplemented - if not isinstance(other, _BaseNetwork): - raise TypeError('%s and %s are not of the same type' % ( - self, other)) - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) - if self.network_address != other.network_address: - return self.network_address < other.network_address - if self.netmask != other.netmask: - return self.netmask < other.netmask - return False - - def __eq__(self, other): - try: - return (self._version == other._version and - self.network_address == other.network_address and - int(self.netmask) == int(other.netmask)) - except AttributeError: - return NotImplemented - - def __hash__(self): - return hash(int(self.network_address) ^ int(self.netmask)) - - def __contains__(self, other): - # always false if one is v4 and the other is v6. - if self._version != other._version: - return False - # dealing with another network. - if isinstance(other, _BaseNetwork): - return False - # dealing with another address - else: - # address - return (int(self.network_address) <= int(other._ip) <= - int(self.broadcast_address)) - - def overlaps(self, other): - """Tell if self is partly contained in other.""" - return self.network_address in other or ( - self.broadcast_address in other or ( - other.network_address in self or ( - other.broadcast_address in self))) - - @property - def broadcast_address(self): - x = self._cache.get('broadcast_address') - if x is None: - x = self._address_class(int(self.network_address) | - int(self.hostmask)) - self._cache['broadcast_address'] = x - return x - - @property - def hostmask(self): - x = self._cache.get('hostmask') - if x is None: - x = self._address_class(int(self.netmask) ^ self._ALL_ONES) - self._cache['hostmask'] = x - return x - - @property - def with_prefixlen(self): - return '%s/%d' % (self.network_address, self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self.network_address, self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self.network_address, self.hostmask) - - @property - def num_addresses(self): - """Number of hosts in the current subnet.""" - return int(self.broadcast_address) - int(self.network_address) + 1 - - @property - def _address_class(self): - # Returning bare address objects (rather than interfaces) allows for - # more consistent behaviour across the network address, broadcast - # address and individual host addresses. - msg = '%200s has no associated address class' % (type(self),) - raise NotImplementedError(msg) - - @property - def prefixlen(self): - return self._prefixlen - - def address_exclude(self, other): - """Remove an address from a larger block. - - For example: - - addr1 = ip_network('192.0.2.0/28') - addr2 = ip_network('192.0.2.1/32') - list(addr1.address_exclude(addr2)) = - [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), - IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] - - or IPv6: - - addr1 = ip_network('2001:db8::1/32') - addr2 = ip_network('2001:db8::1/128') - list(addr1.address_exclude(addr2)) = - [ip_network('2001:db8::1/128'), - ip_network('2001:db8::2/127'), - ip_network('2001:db8::4/126'), - ip_network('2001:db8::8/125'), - ... - ip_network('2001:db8:8000::/33')] - - Args: - other: An IPv4Network or IPv6Network object of the same type. - - Returns: - An iterator of the IPv(4|6)Network objects which is self - minus other. - - Raises: - TypeError: If self and other are of differing address - versions, or if other is not a network object. - ValueError: If other is not completely contained by self. - - """ - if not self._version == other._version: - raise TypeError("%s and %s are not of the same version" % ( - self, other)) - - if not isinstance(other, _BaseNetwork): - raise TypeError("%s is not a network object" % other) - - if not other.subnet_of(self): - raise ValueError('%s not contained in %s' % (other, self)) - if other == self: - return - - # Make sure we're comparing the network of other. - other = other.__class__('%s/%s' % (other.network_address, - other.prefixlen)) - - s1, s2 = self.subnets() - while s1 != other and s2 != other: - if other.subnet_of(s1): - yield s2 - s1, s2 = s1.subnets() - elif other.subnet_of(s2): - yield s1 - s1, s2 = s2.subnets() - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (s1, s2, other)) - if s1 == other: - yield s2 - elif s2 == other: - yield s1 - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (s1, s2, other)) - - def compare_networks(self, other): - """Compare two IP objects. - - This is only concerned about the comparison of the integer - representation of the network addresses. This means that the - host bits aren't considered at all in this method. If you want - to compare host bits, you can easily enough do a - 'HostA._ip < HostB._ip' - - Args: - other: An IP object. - - Returns: - If the IP versions of self and other are the same, returns: - - -1 if self < other: - eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') - IPv6Network('2001:db8::1000/124') < - IPv6Network('2001:db8::2000/124') - 0 if self == other - eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') - IPv6Network('2001:db8::1000/124') == - IPv6Network('2001:db8::1000/124') - 1 if self > other - eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') - IPv6Network('2001:db8::2000/124') > - IPv6Network('2001:db8::1000/124') - - Raises: - TypeError if the IP versions are different. - - """ - # does this need to raise a ValueError? - if self._version != other._version: - raise TypeError('%s and %s are not of the same type' % ( - self, other)) - # self._version == other._version below here: - if self.network_address < other.network_address: - return -1 - if self.network_address > other.network_address: - return 1 - # self.network_address == other.network_address below here: - if self.netmask < other.netmask: - return -1 - if self.netmask > other.netmask: - return 1 - return 0 - - def _get_networks_key(self): - """Network-only key function. - - Returns an object that identifies this address' network and - netmask. This function is a suitable "key" argument for sorted() - and list.sort(). - - """ - return (self._version, self.network_address, self.netmask) - - def subnets(self, prefixlen_diff=1, new_prefix=None): - """The subnets which join to make the current subnet. - - In the case that self contains only one IP - (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 - for IPv6), yield an iterator with just ourself. - - Args: - prefixlen_diff: An integer, the amount the prefix length - should be increased by. This should not be set if - new_prefix is also set. - new_prefix: The desired new prefix length. This must be a - larger number (smaller prefix) than the existing prefix. - This should not be set if prefixlen_diff is also set. - - Returns: - An iterator of IPv(4|6) objects. - - Raises: - ValueError: The prefixlen_diff is too small or too large. - OR - prefixlen_diff and new_prefix are both set or new_prefix - is a smaller number than the current prefix (smaller - number means a larger network) - - """ - if self._prefixlen == self._max_prefixlen: - yield self - return - - if new_prefix is not None: - if new_prefix < self._prefixlen: - raise ValueError('new prefix must be longer') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = new_prefix - self._prefixlen - - if prefixlen_diff < 0: - raise ValueError('prefix length diff must be > 0') - new_prefixlen = self._prefixlen + prefixlen_diff - - if new_prefixlen > self._max_prefixlen: - raise ValueError( - 'prefix length diff %d is invalid for netblock %s' % ( - new_prefixlen, self)) - - start = int(self.network_address) - end = int(self.broadcast_address) + 1 - step = (int(self.hostmask) + 1) >> prefixlen_diff - for new_addr in _compat_range(start, end, step): - current = self.__class__((new_addr, new_prefixlen)) - yield current - - def supernet(self, prefixlen_diff=1, new_prefix=None): - """The supernet containing the current network. - - Args: - prefixlen_diff: An integer, the amount the prefix length of - the network should be decreased by. For example, given a - /24 network and a prefixlen_diff of 3, a supernet with a - /21 netmask is returned. - - Returns: - An IPv4 network object. - - Raises: - ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have - a negative prefix length. - OR - If prefixlen_diff and new_prefix are both set or new_prefix is a - larger number than the current prefix (larger number means a - smaller network) - - """ - if self._prefixlen == 0: - return self - - if new_prefix is not None: - if new_prefix > self._prefixlen: - raise ValueError('new prefix must be shorter') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = self._prefixlen - new_prefix - - new_prefixlen = self.prefixlen - prefixlen_diff - if new_prefixlen < 0: - raise ValueError( - 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % - (self.prefixlen, prefixlen_diff)) - return self.__class__(( - int(self.network_address) & (int(self.netmask) << prefixlen_diff), - new_prefixlen)) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return (self.network_address.is_multicast and - self.broadcast_address.is_multicast) - - @staticmethod - def _is_subnet_of(a, b): - try: - # Always false if one is v4 and the other is v6. - if a._version != b._version: - raise TypeError("%s and %s are not of the same version" (a, b)) - return (b.network_address <= a.network_address and - b.broadcast_address >= a.broadcast_address) - except AttributeError: - raise TypeError("Unable to test subnet containment " - "between %s and %s" % (a, b)) - - def subnet_of(self, other): - """Return True if this network is a subnet of other.""" - return self._is_subnet_of(self, other) - - def supernet_of(self, other): - """Return True if this network is a supernet of other.""" - return self._is_subnet_of(other, self) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return (self.network_address.is_reserved and - self.broadcast_address.is_reserved) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return (self.network_address.is_link_local and - self.broadcast_address.is_link_local) - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return (self.network_address.is_private and - self.broadcast_address.is_private) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return (self.network_address.is_unspecified and - self.broadcast_address.is_unspecified) - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return (self.network_address.is_loopback and - self.broadcast_address.is_loopback) - - -class _BaseV4(object): - - """Base IPv4 object. - - The following methods are used by IPv4 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 4 - # Equivalent to 255.255.255.255 or 32 bits of 1's. - _ALL_ONES = (2 ** IPV4LENGTH) - 1 - _DECIMAL_DIGITS = frozenset('0123456789') - - # the valid octets for host and netmasks. only useful for IPv4. - _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) - - _max_prefixlen = IPV4LENGTH - # There are only a handful of valid v4 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - def _explode_shorthand_ip_string(self): - return _compat_str(self) - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, _compat_int_types): - prefixlen = arg - else: - try: - # Check for a netmask in prefix length form - prefixlen = cls._prefix_from_prefix_string(arg) - except NetmaskValueError: - # Check for a netmask or hostmask in dotted-quad form. - # This may raise NetmaskValueError. - prefixlen = cls._prefix_from_ip_string(arg) - netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn the given IP string into an integer for comparison. - - Args: - ip_str: A string, the IP ip_str. - - Returns: - The IP ip_str as an integer. - - Raises: - AddressValueError: if ip_str isn't a valid IPv4 Address. - - """ - if not ip_str: - raise AddressValueError('Address cannot be empty') - - octets = ip_str.split('.') - if len(octets) != 4: - raise AddressValueError("Expected 4 octets in %r" % ip_str) - - try: - return _compat_int_from_byte_vals( - map(cls._parse_octet, octets), 'big') - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - - @classmethod - def _parse_octet(cls, octet_str): - """Convert a decimal octet into an integer. - - Args: - octet_str: A string, the number to parse. - - Returns: - The octet as an integer. - - Raises: - ValueError: if the octet isn't strictly a decimal from [0..255]. - - """ - if not octet_str: - raise ValueError("Empty octet not permitted") - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not cls._DECIMAL_DIGITS.issuperset(octet_str): - msg = "Only decimal digits permitted in %r" - raise ValueError(msg % octet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(octet_str) > 3: - msg = "At most 3 characters permitted in %r" - raise ValueError(msg % octet_str) - # Convert to integer (we know digits are legal) - octet_int = int(octet_str, 10) - # Any octets that look like they *might* be written in octal, - # and which don't look exactly the same in both octal and - # decimal are rejected as ambiguous - if octet_int > 7 and octet_str[0] == '0': - msg = "Ambiguous (octal/decimal) value in %r not permitted" - raise ValueError(msg % octet_str) - if octet_int > 255: - raise ValueError("Octet %d (> 255) not permitted" % octet_int) - return octet_int - - @classmethod - def _string_from_ip_int(cls, ip_int): - """Turns a 32-bit integer into dotted decimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - The IP address as a string in dotted decimal notation. - - """ - return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] - if isinstance(b, bytes) - else b) - for b in _compat_to_bytes(ip_int, 4, 'big')) - - def _is_hostmask(self, ip_str): - """Test if the IP string is a hostmask (rather than a netmask). - - Args: - ip_str: A string, the potential hostmask. - - Returns: - A boolean, True if the IP string is a hostmask. - - """ - bits = ip_str.split('.') - try: - parts = [x for x in map(int, bits) if x in self._valid_mask_octets] - except ValueError: - return False - if len(parts) != len(bits): - return False - if parts[0] < parts[-1]: - return True - return False - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv4 address. - - This implements the method described in RFC1035 3.5. - - """ - reverse_octets = _compat_str(self).split('.')[::-1] - return '.'.join(reverse_octets) + '.in-addr.arpa' - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv4Address(_BaseV4, _BaseAddress): - - """Represent and manipulate single IPv4 Addresses.""" - - __slots__ = ('_ip', '__weakref__') - - def __init__(self, address): - - """ - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv4Address('192.0.2.1') == IPv4Address(3221225985). - or, more generally - IPv4Address(int(IPv4Address('192.0.2.1'))) == - IPv4Address('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - - """ - # Efficient constructor from integer. - if isinstance(address, _compat_int_types): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 4) - bvs = _compat_bytes_to_byte_vals(address) - self._ip = _compat_int_from_byte_vals(bvs, 'big') - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = _compat_str(address) - if '/' in addr_str: - raise AddressValueError("Unexpected '/' in %r" % address) - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v4_int_to_packed(self._ip) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within the - reserved IPv4 Network range. - - """ - return self in self._constants._reserved_network - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv4-special-registry. - - """ - return any(self in net for net in self._constants._private_networks) - - @property - def is_global(self): - return ( - self not in self._constants._public_network and - not self.is_private) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is multicast. - See RFC 3171 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 5735 3. - - """ - return self == self._constants._unspecified_address - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback per RFC 3330. - - """ - return self in self._constants._loopback_network - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is link-local per RFC 3927. - - """ - return self in self._constants._linklocal_network - - -class IPv4Interface(IPv4Address): - - def __init__(self, address): - if isinstance(address, (bytes, _compat_int_types)): - IPv4Address.__init__(self, address) - self.network = IPv4Network(self._ip) - self._prefixlen = self._max_prefixlen - return - - if isinstance(address, tuple): - IPv4Address.__init__(self, address[0]) - if len(address) > 1: - self._prefixlen = int(address[1]) - else: - self._prefixlen = self._max_prefixlen - - self.network = IPv4Network(address, strict=False) - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - return - - addr = _split_optional_netmask(address) - IPv4Address.__init__(self, addr[0]) - - self.network = IPv4Network(address, strict=False) - self._prefixlen = self.network._prefixlen - - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - - def __str__(self): - return '%s/%d' % (self._string_from_ip_int(self._ip), - self.network.prefixlen) - - def __eq__(self, other): - address_equal = IPv4Address.__eq__(self, other) - if not address_equal or address_equal is NotImplemented: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv4Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return (self.network < other.network or - self.network == other.network and address_less) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv4Address(self._ip) - - @property - def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - -class IPv4Network(_BaseV4, _BaseNetwork): - - """This class represents and manipulates 32-bit IPv4 network + addresses.. - - Attributes: [examples for IPv4Network('192.0.2.0/27')] - .network_address: IPv4Address('192.0.2.0') - .hostmask: IPv4Address('0.0.0.31') - .broadcast_address: IPv4Address('192.0.2.32') - .netmask: IPv4Address('255.255.255.224') - .prefixlen: 27 - - """ - # Class to use when creating address objects - _address_class = IPv4Address - - def __init__(self, address, strict=True): - - """Instantiate a new IPv4 network object. - - Args: - address: A string or integer representing the IP [& network]. - '192.0.2.0/24' - '192.0.2.0/255.255.255.0' - '192.0.0.2/0.0.0.255' - are all functionally the same in IPv4. Similarly, - '192.0.2.1' - '192.0.2.1/255.255.255.255' - '192.0.2.1/32' - are also functionally equivalent. That is to say, failing to - provide a subnetmask will create an object with a mask of /32. - - If the mask (portion after the / in the argument) is given in - dotted quad form, it is treated as a netmask if it starts with a - non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it - starts with a zero field (e.g. 0.255.255.255 == /8), with the - single exception of an all-zero mask which is treated as a - netmask == /0. If no mask is given, a default of /32 is used. - - Additionally, an integer can be passed, so - IPv4Network('192.0.2.1') == IPv4Network(3221225985) - or, more generally - IPv4Interface(int(IPv4Interface('192.0.2.1'))) == - IPv4Interface('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - NetmaskValueError: If the netmask isn't valid for - an IPv4 address. - ValueError: If strict is True and a network address is not - supplied. - - """ - _BaseNetwork.__init__(self, address) - - # Constructing from a packed address or integer - if isinstance(address, (_compat_int_types, bytes)): - self.network_address = IPv4Address(address) - self.netmask, self._prefixlen = self._make_netmask( - self._max_prefixlen) - # fixme: address/network test here. - return - - if isinstance(address, tuple): - if len(address) > 1: - arg = address[1] - else: - # We weren't given an address[1] - arg = self._max_prefixlen - self.network_address = IPv4Address(address[0]) - self.netmask, self._prefixlen = self._make_netmask(arg) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError('%s has host bits set' % self) - else: - self.network_address = IPv4Address(packed & - int(self.netmask)) - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = _split_optional_netmask(address) - self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) - - if len(addr) == 2: - arg = addr[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - - if strict: - if (IPv4Address(int(self.network_address) & int(self.netmask)) != - self.network_address): - raise ValueError('%s has host bits set' % self) - self.network_address = IPv4Address(int(self.network_address) & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry. - - """ - return (not (self.network_address in IPv4Network('100.64.0.0/10') and - self.broadcast_address in IPv4Network('100.64.0.0/10')) and - not self.is_private) - - -class _IPv4Constants(object): - - _linklocal_network = IPv4Network('169.254.0.0/16') - - _loopback_network = IPv4Network('127.0.0.0/8') - - _multicast_network = IPv4Network('224.0.0.0/4') - - _public_network = IPv4Network('100.64.0.0/10') - - _private_networks = [ - IPv4Network('0.0.0.0/8'), - IPv4Network('10.0.0.0/8'), - IPv4Network('127.0.0.0/8'), - IPv4Network('169.254.0.0/16'), - IPv4Network('172.16.0.0/12'), - IPv4Network('192.0.0.0/29'), - IPv4Network('192.0.0.170/31'), - IPv4Network('192.0.2.0/24'), - IPv4Network('192.168.0.0/16'), - IPv4Network('198.18.0.0/15'), - IPv4Network('198.51.100.0/24'), - IPv4Network('203.0.113.0/24'), - IPv4Network('240.0.0.0/4'), - IPv4Network('255.255.255.255/32'), - ] - - _reserved_network = IPv4Network('240.0.0.0/4') - - _unspecified_address = IPv4Address('0.0.0.0') - - -IPv4Address._constants = _IPv4Constants - - -class _BaseV6(object): - - """Base IPv6 object. - - The following methods are used by IPv6 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 6 - _ALL_ONES = (2 ** IPV6LENGTH) - 1 - _HEXTET_COUNT = 8 - _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') - _max_prefixlen = IPV6LENGTH - - # There are only a bunch of valid v6 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, _compat_int_types): - prefixlen = arg - else: - prefixlen = cls._prefix_from_prefix_string(arg) - netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn an IPv6 ip_str into an integer. - - Args: - ip_str: A string, the IPv6 ip_str. - - Returns: - An int, the IPv6 address - - Raises: - AddressValueError: if ip_str isn't a valid IPv6 Address. - - """ - if not ip_str: - raise AddressValueError('Address cannot be empty') - - parts = ip_str.split(':') - - # An IPv6 address needs at least 2 colons (3 parts). - _min_parts = 3 - if len(parts) < _min_parts: - msg = "At least %d parts expected in %r" % (_min_parts, ip_str) - raise AddressValueError(msg) - - # If the address has an IPv4-style suffix, convert it to hexadecimal. - if '.' in parts[-1]: - try: - ipv4_int = IPv4Address(parts.pop())._ip - except AddressValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) - parts.append('%x' % (ipv4_int & 0xFFFF)) - - # An IPv6 address can't have more than 8 colons (9 parts). - # The extra colon comes from using the "::" notation for a single - # leading or trailing zero part. - _max_parts = cls._HEXTET_COUNT + 1 - if len(parts) > _max_parts: - msg = "At most %d colons permitted in %r" % ( - _max_parts - 1, ip_str) - raise AddressValueError(msg) - - # Disregarding the endpoints, find '::' with nothing in between. - # This indicates that a run of zeroes has been skipped. - skip_index = None - for i in _compat_range(1, len(parts) - 1): - if not parts[i]: - if skip_index is not None: - # Can't have more than one '::' - msg = "At most one '::' permitted in %r" % ip_str - raise AddressValueError(msg) - skip_index = i - - # parts_hi is the number of parts to copy from above/before the '::' - # parts_lo is the number of parts to copy from below/after the '::' - if skip_index is not None: - # If we found a '::', then check if it also covers the endpoints. - parts_hi = skip_index - parts_lo = len(parts) - skip_index - 1 - if not parts[0]: - parts_hi -= 1 - if parts_hi: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - parts_lo -= 1 - if parts_lo: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) - if parts_skipped < 1: - msg = "Expected at most %d other parts with '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) - else: - # Otherwise, allocate the entire address to parts_hi. The - # endpoints could still be empty, but _parse_hextet() will check - # for that. - if len(parts) != cls._HEXTET_COUNT: - msg = "Exactly %d parts expected without '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) - if not parts[0]: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_hi = len(parts) - parts_lo = 0 - parts_skipped = 0 - - try: - # Now, parse the hextets into a 128-bit integer. - ip_int = 0 - for i in range(parts_hi): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - ip_int <<= 16 * parts_skipped - for i in range(-parts_lo, 0): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - return ip_int - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - - @classmethod - def _parse_hextet(cls, hextet_str): - """Convert an IPv6 hextet string into an integer. - - Args: - hextet_str: A string, the number to parse. - - Returns: - The hextet as an integer. - - Raises: - ValueError: if the input isn't strictly a hex number from - [0..FFFF]. - - """ - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not cls._HEX_DIGITS.issuperset(hextet_str): - raise ValueError("Only hex digits permitted in %r" % hextet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(hextet_str) > 4: - msg = "At most 4 characters permitted in %r" - raise ValueError(msg % hextet_str) - # Length check means we can skip checking the integer value - return int(hextet_str, 16) - - @classmethod - def _compress_hextets(cls, hextets): - """Compresses a list of hextets. - - Compresses a list of strings, replacing the longest continuous - sequence of "0" in the list with "" and adding empty strings at - the beginning or at the end of the string such that subsequently - calling ":".join(hextets) will produce the compressed version of - the IPv6 address. - - Args: - hextets: A list of strings, the hextets to compress. - - Returns: - A list of strings. - - """ - best_doublecolon_start = -1 - best_doublecolon_len = 0 - doublecolon_start = -1 - doublecolon_len = 0 - for index, hextet in enumerate(hextets): - if hextet == '0': - doublecolon_len += 1 - if doublecolon_start == -1: - # Start of a sequence of zeros. - doublecolon_start = index - if doublecolon_len > best_doublecolon_len: - # This is the longest sequence of zeros so far. - best_doublecolon_len = doublecolon_len - best_doublecolon_start = doublecolon_start - else: - doublecolon_len = 0 - doublecolon_start = -1 - - if best_doublecolon_len > 1: - best_doublecolon_end = (best_doublecolon_start + - best_doublecolon_len) - # For zeros at the end of the address. - if best_doublecolon_end == len(hextets): - hextets += [''] - hextets[best_doublecolon_start:best_doublecolon_end] = [''] - # For zeros at the beginning of the address. - if best_doublecolon_start == 0: - hextets = [''] + hextets - - return hextets - - @classmethod - def _string_from_ip_int(cls, ip_int=None): - """Turns a 128-bit integer into hexadecimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - A string, the hexadecimal representation of the address. - - Raises: - ValueError: The address is bigger than 128 bits of all ones. - - """ - if ip_int is None: - ip_int = int(cls._ip) - - if ip_int > cls._ALL_ONES: - raise ValueError('IPv6 address is too large') - - hex_str = '%032x' % ip_int - hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] - - hextets = cls._compress_hextets(hextets) - return ':'.join(hextets) - - def _explode_shorthand_ip_string(self): - """Expand a shortened IPv6 address. - - Args: - ip_str: A string, the IPv6 address. - - Returns: - A string, the expanded IPv6 address. - - """ - if isinstance(self, IPv6Network): - ip_str = _compat_str(self.network_address) - elif isinstance(self, IPv6Interface): - ip_str = _compat_str(self.ip) - else: - ip_str = _compat_str(self) - - ip_int = self._ip_int_from_string(ip_str) - hex_str = '%032x' % ip_int - parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] - if isinstance(self, (_BaseNetwork, IPv6Interface)): - return '%s/%d' % (':'.join(parts), self._prefixlen) - return ':'.join(parts) - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv6 address. - - This implements the method described in RFC3596 2.5. - - """ - reverse_chars = self.exploded[::-1].replace(':', '') - return '.'.join(reverse_chars) + '.ip6.arpa' - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv6Address(_BaseV6, _BaseAddress): - - """Represent and manipulate single IPv6 Addresses.""" - - __slots__ = ('_ip', '__weakref__') - - def __init__(self, address): - """Instantiate a new IPv6 address object. - - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv6Address('2001:db8::') == - IPv6Address(42540766411282592856903984951653826560) - or, more generally - IPv6Address(int(IPv6Address('2001:db8::'))) == - IPv6Address('2001:db8::') - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - - """ - # Efficient constructor from integer. - if isinstance(address, _compat_int_types): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 16) - bvs = _compat_bytes_to_byte_vals(address) - self._ip = _compat_int_from_byte_vals(bvs, 'big') - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = _compat_str(address) - if '/' in addr_str: - raise AddressValueError("Unexpected '/' in %r" % address) - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v6_int_to_packed(self._ip) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return any(self in x for x in self._constants._reserved_networks) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return self in self._constants._linklocal_network - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return self in self._constants._sitelocal_network - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv6-special-registry. - - """ - return any(self in net for net in self._constants._private_networks) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, true if the address is not reserved per - iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return self._ip == 0 - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return self._ip == 1 - - @property - def ipv4_mapped(self): - """Return the IPv4 mapped address. - - Returns: - If the IPv6 address is a v4 mapped address, return the - IPv4 mapped address. Return None otherwise. - - """ - if (self._ip >> 32) != 0xFFFF: - return None - return IPv4Address(self._ip & 0xFFFFFFFF) - - @property - def teredo(self): - """Tuple of embedded teredo IPs. - - Returns: - Tuple of the (server, client) IPs or None if the address - doesn't appear to be a teredo address (doesn't start with - 2001::/32) - - """ - if (self._ip >> 96) != 0x20010000: - return None - return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), - IPv4Address(~self._ip & 0xFFFFFFFF)) - - @property - def sixtofour(self): - """Return the IPv4 6to4 embedded address. - - Returns: - The IPv4 6to4-embedded address if present or None if the - address doesn't appear to contain a 6to4 embedded address. - - """ - if (self._ip >> 112) != 0x2002: - return None - return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) - - -class IPv6Interface(IPv6Address): - - def __init__(self, address): - if isinstance(address, (bytes, _compat_int_types)): - IPv6Address.__init__(self, address) - self.network = IPv6Network(self._ip) - self._prefixlen = self._max_prefixlen - return - if isinstance(address, tuple): - IPv6Address.__init__(self, address[0]) - if len(address) > 1: - self._prefixlen = int(address[1]) - else: - self._prefixlen = self._max_prefixlen - self.network = IPv6Network(address, strict=False) - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - return - - addr = _split_optional_netmask(address) - IPv6Address.__init__(self, addr[0]) - self.network = IPv6Network(address, strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - self.hostmask = self.network.hostmask - - def __str__(self): - return '%s/%d' % (self._string_from_ip_int(self._ip), - self.network.prefixlen) - - def __eq__(self, other): - address_equal = IPv6Address.__eq__(self, other) - if not address_equal or address_equal is NotImplemented: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv6Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return (self.network < other.network or - self.network == other.network and address_less) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv6Address(self._ip) - - @property - def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - @property - def is_unspecified(self): - return self._ip == 0 and self.network.is_unspecified - - @property - def is_loopback(self): - return self._ip == 1 and self.network.is_loopback - - -class IPv6Network(_BaseV6, _BaseNetwork): - - """This class represents and manipulates 128-bit IPv6 networks. - - Attributes: [examples for IPv6('2001:db8::1000/124')] - .network_address: IPv6Address('2001:db8::1000') - .hostmask: IPv6Address('::f') - .broadcast_address: IPv6Address('2001:db8::100f') - .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') - .prefixlen: 124 - - """ - - # Class to use when creating address objects - _address_class = IPv6Address - - def __init__(self, address, strict=True): - """Instantiate a new IPv6 Network object. - - Args: - address: A string or integer representing the IPv6 network or the - IP and prefix/netmask. - '2001:db8::/128' - '2001:db8:0000:0000:0000:0000:0000:0000/128' - '2001:db8::' - are all functionally the same in IPv6. That is to say, - failing to provide a subnetmask will create an object with - a mask of /128. - - Additionally, an integer can be passed, so - IPv6Network('2001:db8::') == - IPv6Network(42540766411282592856903984951653826560) - or, more generally - IPv6Network(int(IPv6Network('2001:db8::'))) == - IPv6Network('2001:db8::') - - strict: A boolean. If true, ensure that we have been passed - A true network address, eg, 2001:db8::1000/124 and not an - IP address on a network, eg, 2001:db8::1/124. - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - NetmaskValueError: If the netmask isn't valid for - an IPv6 address. - ValueError: If strict was True and a network address was not - supplied. - - """ - _BaseNetwork.__init__(self, address) - - # Efficient constructor from integer or packed address - if isinstance(address, (bytes, _compat_int_types)): - self.network_address = IPv6Address(address) - self.netmask, self._prefixlen = self._make_netmask( - self._max_prefixlen) - return - - if isinstance(address, tuple): - if len(address) > 1: - arg = address[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - self.network_address = IPv6Address(address[0]) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError('%s has host bits set' % self) - else: - self.network_address = IPv6Address(packed & - int(self.netmask)) - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = _split_optional_netmask(address) - - self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) - - if len(addr) == 2: - arg = addr[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - - if strict: - if (IPv6Address(int(self.network_address) & int(self.netmask)) != - self.network_address): - raise ValueError('%s has host bits set' % self) - self.network_address = IPv6Address(int(self.network_address) & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the - Subnet-Router anycast address. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network + 1, broadcast + 1): - yield self._address_class(x) - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return (self.network_address.is_site_local and - self.broadcast_address.is_site_local) - - -class _IPv6Constants(object): - - _linklocal_network = IPv6Network('fe80::/10') - - _multicast_network = IPv6Network('ff00::/8') - - _private_networks = [ - IPv6Network('::1/128'), - IPv6Network('::/128'), - IPv6Network('::ffff:0:0/96'), - IPv6Network('100::/64'), - IPv6Network('2001::/23'), - IPv6Network('2001:2::/48'), - IPv6Network('2001:db8::/32'), - IPv6Network('2001:10::/28'), - IPv6Network('fc00::/7'), - IPv6Network('fe80::/10'), - ] - - _reserved_networks = [ - IPv6Network('::/8'), IPv6Network('100::/8'), - IPv6Network('200::/7'), IPv6Network('400::/6'), - IPv6Network('800::/5'), IPv6Network('1000::/4'), - IPv6Network('4000::/3'), IPv6Network('6000::/3'), - IPv6Network('8000::/3'), IPv6Network('A000::/3'), - IPv6Network('C000::/3'), IPv6Network('E000::/4'), - IPv6Network('F000::/5'), IPv6Network('F800::/6'), - IPv6Network('FE00::/9'), - ] - - _sitelocal_network = IPv6Network('fec0::/10') - - -IPv6Address._constants = _IPv6Constants diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/__init__.py deleted file mode 100644 index a6f44a5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/__init__.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -lockfile.py - Platform-independent advisory file locks. - -Requires Python 2.5 unless you apply 2.4.diff -Locking is done on a per-thread basis instead of a per-process basis. - -Usage: - ->>> lock = LockFile('somefile') ->>> try: -... lock.acquire() -... except AlreadyLocked: -... print 'somefile', 'is locked already.' -... except LockFailed: -... print 'somefile', 'can\\'t be locked.' -... else: -... print 'got lock' -got lock ->>> print lock.is_locked() -True ->>> lock.release() - ->>> lock = LockFile('somefile') ->>> print lock.is_locked() -False ->>> with lock: -... print lock.is_locked() -True ->>> print lock.is_locked() -False - ->>> lock = LockFile('somefile') ->>> # It is okay to lock twice from the same thread... ->>> with lock: -... lock.acquire() -... ->>> # Though no counter is kept, so you can't unlock multiple times... ->>> print lock.is_locked() -False - -Exceptions: - - Error - base class for other exceptions - LockError - base class for all locking exceptions - AlreadyLocked - Another thread or process already holds the lock - LockFailed - Lock failed for some other reason - UnlockError - base class for all unlocking exceptions - AlreadyUnlocked - File was not locked. - NotMyLock - File was locked but not by the current thread/process -""" - -from __future__ import absolute_import - -import functools -import os -import socket -import threading -import warnings - -# Work with PEP8 and non-PEP8 versions of threading module. -if not hasattr(threading, "current_thread"): - threading.current_thread = threading.currentThread -if not hasattr(threading.Thread, "get_name"): - threading.Thread.get_name = threading.Thread.getName - -__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', - 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', - 'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock', - 'LockBase', 'locked'] - - -class Error(Exception): - """ - Base class for other exceptions. - - >>> try: - ... raise Error - ... except Exception: - ... pass - """ - pass - - -class LockError(Error): - """ - Base class for error arising from attempts to acquire the lock. - - >>> try: - ... raise LockError - ... except Error: - ... pass - """ - pass - - -class LockTimeout(LockError): - """Raised when lock creation fails within a user-defined period of time. - - >>> try: - ... raise LockTimeout - ... except LockError: - ... pass - """ - pass - - -class AlreadyLocked(LockError): - """Some other thread/process is locking the file. - - >>> try: - ... raise AlreadyLocked - ... except LockError: - ... pass - """ - pass - - -class LockFailed(LockError): - """Lock file creation failed for some other reason. - - >>> try: - ... raise LockFailed - ... except LockError: - ... pass - """ - pass - - -class UnlockError(Error): - """ - Base class for errors arising from attempts to release the lock. - - >>> try: - ... raise UnlockError - ... except Error: - ... pass - """ - pass - - -class NotLocked(UnlockError): - """Raised when an attempt is made to unlock an unlocked file. - - >>> try: - ... raise NotLocked - ... except UnlockError: - ... pass - """ - pass - - -class NotMyLock(UnlockError): - """Raised when an attempt is made to unlock a file someone else locked. - - >>> try: - ... raise NotMyLock - ... except UnlockError: - ... pass - """ - pass - - -class _SharedBase(object): - def __init__(self, path): - self.path = path - - def acquire(self, timeout=None): - """ - Acquire the lock. - - * If timeout is omitted (or None), wait forever trying to lock the - file. - - * If timeout > 0, try to acquire the lock for that many seconds. If - the lock period expires and the file is still locked, raise - LockTimeout. - - * If timeout <= 0, raise AlreadyLocked immediately if the file is - already locked. - """ - raise NotImplemented("implement in subclass") - - def release(self): - """ - Release the lock. - - If the file is not locked, raise NotLocked. - """ - raise NotImplemented("implement in subclass") - - def __enter__(self): - """ - Context manager support. - """ - self.acquire() - return self - - def __exit__(self, *_exc): - """ - Context manager support. - """ - self.release() - - def __repr__(self): - return "<%s: %r>" % (self.__class__.__name__, self.path) - - -class LockBase(_SharedBase): - """Base class for platform-specific lock classes.""" - def __init__(self, path, threaded=True, timeout=None): - """ - >>> lock = LockBase('somefile') - >>> lock = LockBase('somefile', threaded=False) - """ - super(LockBase, self).__init__(path) - self.lock_file = os.path.abspath(path) + ".lock" - self.hostname = socket.gethostname() - self.pid = os.getpid() - if threaded: - t = threading.current_thread() - # Thread objects in Python 2.4 and earlier do not have ident - # attrs. Worm around that. - ident = getattr(t, "ident", hash(t)) - self.tname = "-%x" % (ident & 0xffffffff) - else: - self.tname = "" - dirname = os.path.dirname(self.lock_file) - - # unique name is mostly about the current process, but must - # also contain the path -- otherwise, two adjacent locked - # files conflict (one file gets locked, creating lock-file and - # unique file, the other one gets locked, creating lock-file - # and overwriting the already existing lock-file, then one - # gets unlocked, deleting both lock-file and unique file, - # finally the last lock errors out upon releasing. - self.unique_name = os.path.join(dirname, - "%s%s.%s%s" % (self.hostname, - self.tname, - self.pid, - hash(self.path))) - self.timeout = timeout - - def is_locked(self): - """ - Tell whether or not the file is locked. - """ - raise NotImplemented("implement in subclass") - - def i_am_locking(self): - """ - Return True if this object is locking the file. - """ - raise NotImplemented("implement in subclass") - - def break_lock(self): - """ - Remove a lock. Useful if a locking thread failed to unlock. - """ - raise NotImplemented("implement in subclass") - - def __repr__(self): - return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, - self.path) - - -def _fl_helper(cls, mod, *args, **kwds): - warnings.warn("Import from %s module instead of lockfile package" % mod, - DeprecationWarning, stacklevel=2) - # This is a bit funky, but it's only for awhile. The way the unit tests - # are constructed this function winds up as an unbound method, so it - # actually takes three args, not two. We want to toss out self. - if not isinstance(args[0], str): - # We are testing, avoid the first arg - args = args[1:] - if len(args) == 1 and not kwds: - kwds["threaded"] = True - return cls(*args, **kwds) - - -def LinkFileLock(*args, **kwds): - """Factory function provided for backwards compatibility. - - Do not use in new code. Instead, import LinkLockFile from the - lockfile.linklockfile module. - """ - from . import linklockfile - return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", - *args, **kwds) - - -def MkdirFileLock(*args, **kwds): - """Factory function provided for backwards compatibility. - - Do not use in new code. Instead, import MkdirLockFile from the - lockfile.mkdirlockfile module. - """ - from . import mkdirlockfile - return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", - *args, **kwds) - - -def SQLiteFileLock(*args, **kwds): - """Factory function provided for backwards compatibility. - - Do not use in new code. Instead, import SQLiteLockFile from the - lockfile.mkdirlockfile module. - """ - from . import sqlitelockfile - return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", - *args, **kwds) - - -def locked(path, timeout=None): - """Decorator which enables locks for decorated function. - - Arguments: - - path: path for lockfile. - - timeout (optional): Timeout for acquiring lock. - - Usage: - @locked('/var/run/myname', timeout=0) - def myname(...): - ... - """ - def decor(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - lock = FileLock(path, timeout=timeout) - lock.acquire() - try: - return func(*args, **kwargs) - finally: - lock.release() - return wrapper - return decor - - -if hasattr(os, "link"): - from . import linklockfile as _llf - LockFile = _llf.LinkLockFile -else: - from . import mkdirlockfile as _mlf - LockFile = _mlf.MkdirLockFile - -FileLock = LockFile diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/linklockfile.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/linklockfile.py deleted file mode 100644 index 2ca9be0..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/linklockfile.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import - -import time -import os - -from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, - AlreadyLocked) - - -class LinkLockFile(LockBase): - """Lock access to a file using atomic property of link(2). - - >>> lock = LinkLockFile('somefile') - >>> lock = LinkLockFile('somefile', threaded=False) - """ - - def acquire(self, timeout=None): - try: - open(self.unique_name, "wb").close() - except IOError: - raise LockFailed("failed to create %s" % self.unique_name) - - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - while True: - # Try and create a hard link to it. - try: - os.link(self.unique_name, self.lock_file) - except OSError: - # Link creation failed. Maybe we've double-locked? - nlinks = os.stat(self.unique_name).st_nlink - if nlinks == 2: - # The original link plus the one I created == 2. We're - # good to go. - return - else: - # Otherwise the lock creation failed. - if timeout is not None and time.time() > end_time: - os.unlink(self.unique_name) - if timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(timeout is not None and timeout / 10 or 0.1) - else: - # Link creation succeeded. We're good to go. - return - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - elif not os.path.exists(self.unique_name): - raise NotMyLock("%s is locked, but not by me" % self.path) - os.unlink(self.unique_name) - os.unlink(self.lock_file) - - def is_locked(self): - return os.path.exists(self.lock_file) - - def i_am_locking(self): - return (self.is_locked() and - os.path.exists(self.unique_name) and - os.stat(self.unique_name).st_nlink == 2) - - def break_lock(self): - if os.path.exists(self.lock_file): - os.unlink(self.lock_file) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/mkdirlockfile.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/mkdirlockfile.py deleted file mode 100644 index 05a8c96..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/mkdirlockfile.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import absolute_import, division - -import time -import os -import sys -import errno - -from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, - AlreadyLocked) - - -class MkdirLockFile(LockBase): - """Lock file by creating a directory.""" - def __init__(self, path, threaded=True, timeout=None): - """ - >>> lock = MkdirLockFile('somefile') - >>> lock = MkdirLockFile('somefile', threaded=False) - """ - LockBase.__init__(self, path, threaded, timeout) - # Lock file itself is a directory. Place the unique file name into - # it. - self.unique_name = os.path.join(self.lock_file, - "%s.%s%s" % (self.hostname, - self.tname, - self.pid)) - - def acquire(self, timeout=None): - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - if timeout is None: - wait = 0.1 - else: - wait = max(0, timeout / 10) - - while True: - try: - os.mkdir(self.lock_file) - except OSError: - err = sys.exc_info()[1] - if err.errno == errno.EEXIST: - # Already locked. - if os.path.exists(self.unique_name): - # Already locked by me. - return - if timeout is not None and time.time() > end_time: - if timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - # Someone else has the lock. - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(wait) - else: - # Couldn't create the lock for some other reason - raise LockFailed("failed to create %s" % self.lock_file) - else: - open(self.unique_name, "wb").close() - return - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - elif not os.path.exists(self.unique_name): - raise NotMyLock("%s is locked, but not by me" % self.path) - os.unlink(self.unique_name) - os.rmdir(self.lock_file) - - def is_locked(self): - return os.path.exists(self.lock_file) - - def i_am_locking(self): - return (self.is_locked() and - os.path.exists(self.unique_name)) - - def break_lock(self): - if os.path.exists(self.lock_file): - for name in os.listdir(self.lock_file): - os.unlink(os.path.join(self.lock_file, name)) - os.rmdir(self.lock_file) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/pidlockfile.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/pidlockfile.py deleted file mode 100644 index 069e85b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/pidlockfile.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- - -# pidlockfile.py -# -# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au> -# -# This is free software: you may copy, modify, and/or distribute this work -# under the terms of the Python Software Foundation License, version 2 or -# later as published by the Python Software Foundation. -# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. - -""" Lockfile behaviour implemented via Unix PID files. - """ - -from __future__ import absolute_import - -import errno -import os -import time - -from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock, - LockTimeout) - - -class PIDLockFile(LockBase): - """ Lockfile implemented as a Unix PID file. - - The lock file is a normal file named by the attribute `path`. - A lock's PID file contains a single line of text, containing - the process ID (PID) of the process that acquired the lock. - - >>> lock = PIDLockFile('somefile') - >>> lock = PIDLockFile('somefile') - """ - - def __init__(self, path, threaded=False, timeout=None): - # pid lockfiles don't support threaded operation, so always force - # False as the threaded arg. - LockBase.__init__(self, path, False, timeout) - self.unique_name = self.path - - def read_pid(self): - """ Get the PID from the lock file. - """ - return read_pid_from_pidfile(self.path) - - def is_locked(self): - """ Test if the lock is currently held. - - The lock is held if the PID file for this lock exists. - - """ - return os.path.exists(self.path) - - def i_am_locking(self): - """ Test if the lock is held by the current process. - - Returns ``True`` if the current process ID matches the - number stored in the PID file. - """ - return self.is_locked() and os.getpid() == self.read_pid() - - def acquire(self, timeout=None): - """ Acquire the lock. - - Creates the PID file for this lock, or raises an error if - the lock could not be acquired. - """ - - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - while True: - try: - write_pid_to_pidfile(self.path) - except OSError as exc: - if exc.errno == errno.EEXIST: - # The lock creation failed. Maybe sleep a bit. - if time.time() > end_time: - if timeout is not None and timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(timeout is not None and timeout / 10 or 0.1) - else: - raise LockFailed("failed to create %s" % self.path) - else: - return - - def release(self): - """ Release the lock. - - Removes the PID file to release the lock, or raises an - error if the current process does not hold the lock. - - """ - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - if not self.i_am_locking(): - raise NotMyLock("%s is locked, but not by me" % self.path) - remove_existing_pidfile(self.path) - - def break_lock(self): - """ Break an existing lock. - - Removes the PID file if it already exists, otherwise does - nothing. - - """ - remove_existing_pidfile(self.path) - - -def read_pid_from_pidfile(pidfile_path): - """ Read the PID recorded in the named PID file. - - Read and return the numeric PID recorded as text in the named - PID file. If the PID file cannot be read, or if the content is - not a valid PID, return ``None``. - - """ - pid = None - try: - pidfile = open(pidfile_path, 'r') - except IOError: - pass - else: - # According to the FHS 2.3 section on PID files in /var/run: - # - # The file must consist of the process identifier in - # ASCII-encoded decimal, followed by a newline character. - # - # Programs that read PID files should be somewhat flexible - # in what they accept; i.e., they should ignore extra - # whitespace, leading zeroes, absence of the trailing - # newline, or additional lines in the PID file. - - line = pidfile.readline().strip() - try: - pid = int(line) - except ValueError: - pass - pidfile.close() - - return pid - - -def write_pid_to_pidfile(pidfile_path): - """ Write the PID in the named PID file. - - Get the numeric process ID (“PIDâ€) of the current process - and write it to the named file as a line of text. - - """ - open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) - open_mode = 0o644 - pidfile_fd = os.open(pidfile_path, open_flags, open_mode) - pidfile = os.fdopen(pidfile_fd, 'w') - - # According to the FHS 2.3 section on PID files in /var/run: - # - # The file must consist of the process identifier in - # ASCII-encoded decimal, followed by a newline character. For - # example, if crond was process number 25, /var/run/crond.pid - # would contain three characters: two, five, and newline. - - pid = os.getpid() - pidfile.write("%s\n" % pid) - pidfile.close() - - -def remove_existing_pidfile(pidfile_path): - """ Remove the named PID file if it exists. - - Removing a PID file that doesn't already exist puts us in the - desired state, so we ignore the condition if the file does not - exist. - - """ - try: - os.remove(pidfile_path) - except OSError as exc: - if exc.errno == errno.ENOENT: - pass - else: - raise diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/sqlitelockfile.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/sqlitelockfile.py deleted file mode 100644 index f997e24..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/sqlitelockfile.py +++ /dev/null @@ -1,156 +0,0 @@ -from __future__ import absolute_import, division - -import time -import os - -try: - unicode -except NameError: - unicode = str - -from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked - - -class SQLiteLockFile(LockBase): - "Demonstrate SQL-based locking." - - testdb = None - - def __init__(self, path, threaded=True, timeout=None): - """ - >>> lock = SQLiteLockFile('somefile') - >>> lock = SQLiteLockFile('somefile', threaded=False) - """ - LockBase.__init__(self, path, threaded, timeout) - self.lock_file = unicode(self.lock_file) - self.unique_name = unicode(self.unique_name) - - if SQLiteLockFile.testdb is None: - import tempfile - _fd, testdb = tempfile.mkstemp() - os.close(_fd) - os.unlink(testdb) - del _fd, tempfile - SQLiteLockFile.testdb = testdb - - import sqlite3 - self.connection = sqlite3.connect(SQLiteLockFile.testdb) - - c = self.connection.cursor() - try: - c.execute("create table locks" - "(" - " lock_file varchar(32)," - " unique_name varchar(32)" - ")") - except sqlite3.OperationalError: - pass - else: - self.connection.commit() - import atexit - atexit.register(os.unlink, SQLiteLockFile.testdb) - - def acquire(self, timeout=None): - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - if timeout is None: - wait = 0.1 - elif timeout <= 0: - wait = 0 - else: - wait = timeout / 10 - - cursor = self.connection.cursor() - - while True: - if not self.is_locked(): - # Not locked. Try to lock it. - cursor.execute("insert into locks" - " (lock_file, unique_name)" - " values" - " (?, ?)", - (self.lock_file, self.unique_name)) - self.connection.commit() - - # Check to see if we are the only lock holder. - cursor.execute("select * from locks" - " where unique_name = ?", - (self.unique_name,)) - rows = cursor.fetchall() - if len(rows) > 1: - # Nope. Someone else got there. Remove our lock. - cursor.execute("delete from locks" - " where unique_name = ?", - (self.unique_name,)) - self.connection.commit() - else: - # Yup. We're done, so go home. - return - else: - # Check to see if we are the only lock holder. - cursor.execute("select * from locks" - " where unique_name = ?", - (self.unique_name,)) - rows = cursor.fetchall() - if len(rows) == 1: - # We're the locker, so go home. - return - - # Maybe we should wait a bit longer. - if timeout is not None and time.time() > end_time: - if timeout > 0: - # No more waiting. - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - # Someone else has the lock and we are impatient.. - raise AlreadyLocked("%s is already locked" % self.path) - - # Well, okay. We'll give it a bit longer. - time.sleep(wait) - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - if not self.i_am_locking(): - raise NotMyLock("%s is locked, but not by me (by %s)" % - (self.unique_name, self._who_is_locking())) - cursor = self.connection.cursor() - cursor.execute("delete from locks" - " where unique_name = ?", - (self.unique_name,)) - self.connection.commit() - - def _who_is_locking(self): - cursor = self.connection.cursor() - cursor.execute("select unique_name from locks" - " where lock_file = ?", - (self.lock_file,)) - return cursor.fetchone()[0] - - def is_locked(self): - cursor = self.connection.cursor() - cursor.execute("select * from locks" - " where lock_file = ?", - (self.lock_file,)) - rows = cursor.fetchall() - return not not rows - - def i_am_locking(self): - cursor = self.connection.cursor() - cursor.execute("select * from locks" - " where lock_file = ?" - " and unique_name = ?", - (self.lock_file, self.unique_name)) - return not not cursor.fetchall() - - def break_lock(self): - cursor = self.connection.cursor() - cursor.execute("delete from locks" - " where lock_file = ?", - (self.lock_file,)) - self.connection.commit() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/symlinklockfile.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/symlinklockfile.py deleted file mode 100644 index 23b41f5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/lockfile/symlinklockfile.py +++ /dev/null @@ -1,70 +0,0 @@ -from __future__ import absolute_import - -import os -import time - -from . import (LockBase, NotLocked, NotMyLock, LockTimeout, - AlreadyLocked) - - -class SymlinkLockFile(LockBase): - """Lock access to a file using symlink(2).""" - - def __init__(self, path, threaded=True, timeout=None): - # super(SymlinkLockFile).__init(...) - LockBase.__init__(self, path, threaded, timeout) - # split it back! - self.unique_name = os.path.split(self.unique_name)[1] - - def acquire(self, timeout=None): - # Hopefully unnecessary for symlink. - # try: - # open(self.unique_name, "wb").close() - # except IOError: - # raise LockFailed("failed to create %s" % self.unique_name) - timeout = timeout if timeout is not None else self.timeout - end_time = time.time() - if timeout is not None and timeout > 0: - end_time += timeout - - while True: - # Try and create a symbolic link to it. - try: - os.symlink(self.unique_name, self.lock_file) - except OSError: - # Link creation failed. Maybe we've double-locked? - if self.i_am_locking(): - # Linked to out unique name. Proceed. - return - else: - # Otherwise the lock creation failed. - if timeout is not None and time.time() > end_time: - if timeout > 0: - raise LockTimeout("Timeout waiting to acquire" - " lock for %s" % - self.path) - else: - raise AlreadyLocked("%s is already locked" % - self.path) - time.sleep(timeout / 10 if timeout is not None else 0.1) - else: - # Link creation succeeded. We're good to go. - return - - def release(self): - if not self.is_locked(): - raise NotLocked("%s is not locked" % self.path) - elif not self.i_am_locking(): - raise NotMyLock("%s is locked, but not by me" % self.path) - os.unlink(self.lock_file) - - def is_locked(self): - return os.path.islink(self.lock_file) - - def i_am_locking(self): - return (os.path.islink(self.lock_file) - and os.readlink(self.lock_file) == self.unique_name) - - def break_lock(self): - if os.path.islink(self.lock_file): # exists && link - os.unlink(self.lock_file) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/__init__.py deleted file mode 100644 index 2afca5a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding: utf-8 -from pip._vendor.msgpack._version import version -from pip._vendor.msgpack.exceptions import * - -from collections import namedtuple - - -class ExtType(namedtuple('ExtType', 'code data')): - """ExtType represents ext type in msgpack.""" - def __new__(cls, code, data): - if not isinstance(code, int): - raise TypeError("code must be int") - if not isinstance(data, bytes): - raise TypeError("data must be bytes") - if not 0 <= code <= 127: - raise ValueError("code must be 0~127") - return super(ExtType, cls).__new__(cls, code, data) - - -import os -if os.environ.get('MSGPACK_PUREPYTHON'): - from pip._vendor.msgpack.fallback import Packer, unpackb, Unpacker -else: - try: - from pip._vendor.msgpack._packer import Packer - from pip._vendor.msgpack._unpacker import unpackb, Unpacker - except ImportError: - from pip._vendor.msgpack.fallback import Packer, unpackb, Unpacker - - -def pack(o, stream, **kwargs): - """ - Pack object `o` and write it to `stream` - - See :class:`Packer` for options. - """ - packer = Packer(**kwargs) - stream.write(packer.pack(o)) - - -def packb(o, **kwargs): - """ - Pack object `o` and return packed bytes - - See :class:`Packer` for options. - """ - return Packer(**kwargs).pack(o) - - -def unpack(stream, **kwargs): - """ - Unpack an object from `stream`. - - Raises `ExtraData` when `stream` contains extra bytes. - See :class:`Unpacker` for options. - """ - data = stream.read() - return unpackb(data, **kwargs) - - -# alias for compatibility to simplejson/marshal/pickle. -load = unpack -loads = unpackb - -dump = pack -dumps = packb diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/_version.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/_version.py deleted file mode 100644 index d28f0de..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/_version.py +++ /dev/null @@ -1 +0,0 @@ -version = (0, 5, 6) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/exceptions.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/exceptions.py deleted file mode 100644 index 9766881..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/exceptions.py +++ /dev/null @@ -1,41 +0,0 @@ -class UnpackException(Exception): - """Deprecated. Use Exception instead to catch all exception during unpacking.""" - - -class BufferFull(UnpackException): - pass - - -class OutOfData(UnpackException): - pass - - -class UnpackValueError(UnpackException, ValueError): - """Deprecated. Use ValueError instead.""" - - -class ExtraData(UnpackValueError): - def __init__(self, unpacked, extra): - self.unpacked = unpacked - self.extra = extra - - def __str__(self): - return "unpack(b) received extra data." - - -class PackException(Exception): - """Deprecated. Use Exception instead to catch all exception during packing.""" - - -class PackValueError(PackException, ValueError): - """PackValueError is raised when type of input data is supported but it's value is unsupported. - - Deprecated. Use ValueError instead. - """ - - -class PackOverflowError(PackValueError, OverflowError): - """PackOverflowError is raised when integer value is out of range of msgpack support [-2**31, 2**32). - - Deprecated. Use ValueError instead. - """ diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/fallback.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/fallback.py deleted file mode 100644 index 9418421..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/msgpack/fallback.py +++ /dev/null @@ -1,977 +0,0 @@ -"""Fallback pure Python implementation of msgpack""" - -import sys -import struct -import warnings - -if sys.version_info[0] == 3: - PY3 = True - int_types = int - Unicode = str - xrange = range - def dict_iteritems(d): - return d.items() -else: - PY3 = False - int_types = (int, long) - Unicode = unicode - def dict_iteritems(d): - return d.iteritems() - - -if hasattr(sys, 'pypy_version_info'): - # cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own - # StringBuilder is fastest. - from __pypy__ import newlist_hint - try: - from __pypy__.builders import BytesBuilder as StringBuilder - except ImportError: - from __pypy__.builders import StringBuilder - USING_STRINGBUILDER = True - class StringIO(object): - def __init__(self, s=b''): - if s: - self.builder = StringBuilder(len(s)) - self.builder.append(s) - else: - self.builder = StringBuilder() - def write(self, s): - if isinstance(s, memoryview): - s = s.tobytes() - elif isinstance(s, bytearray): - s = bytes(s) - self.builder.append(s) - def getvalue(self): - return self.builder.build() -else: - USING_STRINGBUILDER = False - from io import BytesIO as StringIO - newlist_hint = lambda size: [] - - -from pip._vendor.msgpack.exceptions import ( - BufferFull, - OutOfData, - UnpackValueError, - PackValueError, - PackOverflowError, - ExtraData) - -from pip._vendor.msgpack import ExtType - - -EX_SKIP = 0 -EX_CONSTRUCT = 1 -EX_READ_ARRAY_HEADER = 2 -EX_READ_MAP_HEADER = 3 - -TYPE_IMMEDIATE = 0 -TYPE_ARRAY = 1 -TYPE_MAP = 2 -TYPE_RAW = 3 -TYPE_BIN = 4 -TYPE_EXT = 5 - -DEFAULT_RECURSE_LIMIT = 511 - - -def _check_type_strict(obj, t, type=type, tuple=tuple): - if type(t) is tuple: - return type(obj) in t - else: - return type(obj) is t - - -def _get_data_from_buffer(obj): - try: - view = memoryview(obj) - except TypeError: - # try to use legacy buffer protocol if 2.7, otherwise re-raise - if not PY3: - view = memoryview(buffer(obj)) - warnings.warn("using old buffer interface to unpack %s; " - "this leads to unpacking errors if slicing is used and " - "will be removed in a future version" % type(obj), - RuntimeWarning) - else: - raise - if view.itemsize != 1: - raise ValueError("cannot unpack from multi-byte object") - return view - - -def unpack(stream, **kwargs): - warnings.warn( - "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", - PendingDeprecationWarning) - data = stream.read() - return unpackb(data, **kwargs) - - -def unpackb(packed, **kwargs): - """ - Unpack an object from `packed`. - - Raises `ExtraData` when `packed` contains extra bytes. - See :class:`Unpacker` for options. - """ - unpacker = Unpacker(None, **kwargs) - unpacker.feed(packed) - try: - ret = unpacker._unpack() - except OutOfData: - raise UnpackValueError("Data is not enough.") - if unpacker._got_extradata(): - raise ExtraData(ret, unpacker._get_extradata()) - return ret - - -class Unpacker(object): - """Streaming unpacker. - - arguments: - - :param file_like: - File-like object having `.read(n)` method. - If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. - - :param int read_size: - Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`) - - :param bool use_list: - If true, unpack msgpack array to Python list. - Otherwise, unpack to Python tuple. (default: True) - - :param bool raw: - If true, unpack msgpack raw to Python bytes (default). - Otherwise, unpack to Python str (or unicode on Python 2) by decoding - with UTF-8 encoding (recommended). - Currently, the default is true, but it will be changed to false in - near future. So you must specify it explicitly for keeping backward - compatibility. - - *encoding* option which is deprecated overrides this option. - - :param callable object_hook: - When specified, it should be callable. - Unpacker calls it with a dict argument after unpacking msgpack map. - (See also simplejson) - - :param callable object_pairs_hook: - When specified, it should be callable. - Unpacker calls it with a list of key-value pairs after unpacking msgpack map. - (See also simplejson) - - :param str encoding: - Encoding used for decoding msgpack raw. - If it is None (default), msgpack raw is deserialized to Python bytes. - - :param str unicode_errors: - (deprecated) Used for decoding msgpack raw with *encoding*. - (default: `'strict'`) - - :param int max_buffer_size: - Limits size of data waiting unpacked. 0 means system's INT_MAX (default). - Raises `BufferFull` exception when it is insufficient. - You should set this parameter when unpacking data from untrusted source. - - :param int max_str_len: - Limits max length of str. (default: 2**31-1) - - :param int max_bin_len: - Limits max length of bin. (default: 2**31-1) - - :param int max_array_len: - Limits max length of array. (default: 2**31-1) - - :param int max_map_len: - Limits max length of map. (default: 2**31-1) - - - example of streaming deserialize from file-like object:: - - unpacker = Unpacker(file_like, raw=False) - for o in unpacker: - process(o) - - example of streaming deserialize from socket:: - - unpacker = Unpacker(raw=False) - while True: - buf = sock.recv(1024**2) - if not buf: - break - unpacker.feed(buf) - for o in unpacker: - process(o) - """ - - def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, - object_hook=None, object_pairs_hook=None, list_hook=None, - encoding=None, unicode_errors=None, max_buffer_size=0, - ext_hook=ExtType, - max_str_len=2147483647, # 2**32-1 - max_bin_len=2147483647, - max_array_len=2147483647, - max_map_len=2147483647, - max_ext_len=2147483647): - - if encoding is not None: - warnings.warn( - "encoding is deprecated, Use raw=False instead.", - PendingDeprecationWarning) - - if unicode_errors is None: - unicode_errors = 'strict' - - if file_like is None: - self._feeding = True - else: - if not callable(file_like.read): - raise TypeError("`file_like.read` must be callable") - self.file_like = file_like - self._feeding = False - - #: array of bytes fed. - self._buffer = bytearray() - # Some very old pythons don't support `struct.unpack_from()` with a - # `bytearray`. So we wrap it in a `buffer()` there. - if sys.version_info < (2, 7, 6): - self._buffer_view = buffer(self._buffer) - else: - self._buffer_view = self._buffer - #: Which position we currently reads - self._buff_i = 0 - - # When Unpacker is used as an iterable, between the calls to next(), - # the buffer is not "consumed" completely, for efficiency sake. - # Instead, it is done sloppily. To make sure we raise BufferFull at - # the correct moments, we have to keep track of how sloppy we were. - # Furthermore, when the buffer is incomplete (that is: in the case - # we raise an OutOfData) we need to rollback the buffer to the correct - # state, which _buf_checkpoint records. - self._buf_checkpoint = 0 - - self._max_buffer_size = max_buffer_size or 2**31-1 - if read_size > self._max_buffer_size: - raise ValueError("read_size must be smaller than max_buffer_size") - self._read_size = read_size or min(self._max_buffer_size, 16*1024) - self._raw = bool(raw) - self._encoding = encoding - self._unicode_errors = unicode_errors - self._use_list = use_list - self._list_hook = list_hook - self._object_hook = object_hook - self._object_pairs_hook = object_pairs_hook - self._ext_hook = ext_hook - self._max_str_len = max_str_len - self._max_bin_len = max_bin_len - self._max_array_len = max_array_len - self._max_map_len = max_map_len - self._max_ext_len = max_ext_len - self._stream_offset = 0 - - if list_hook is not None and not callable(list_hook): - raise TypeError('`list_hook` is not callable') - if object_hook is not None and not callable(object_hook): - raise TypeError('`object_hook` is not callable') - if object_pairs_hook is not None and not callable(object_pairs_hook): - raise TypeError('`object_pairs_hook` is not callable') - if object_hook is not None and object_pairs_hook is not None: - raise TypeError("object_pairs_hook and object_hook are mutually " - "exclusive") - if not callable(ext_hook): - raise TypeError("`ext_hook` is not callable") - - def feed(self, next_bytes): - assert self._feeding - view = _get_data_from_buffer(next_bytes) - if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size): - raise BufferFull - - # Strip buffer before checkpoint before reading file. - if self._buf_checkpoint > 0: - del self._buffer[:self._buf_checkpoint] - self._buff_i -= self._buf_checkpoint - self._buf_checkpoint = 0 - - self._buffer += view - - def _consume(self): - """ Gets rid of the used parts of the buffer. """ - self._stream_offset += self._buff_i - self._buf_checkpoint - self._buf_checkpoint = self._buff_i - - def _got_extradata(self): - return self._buff_i < len(self._buffer) - - def _get_extradata(self): - return self._buffer[self._buff_i:] - - def read_bytes(self, n): - return self._read(n) - - def _read(self, n): - # (int) -> bytearray - self._reserve(n) - i = self._buff_i - self._buff_i = i+n - return self._buffer[i:i+n] - - def _reserve(self, n): - remain_bytes = len(self._buffer) - self._buff_i - n - - # Fast path: buffer has n bytes already - if remain_bytes >= 0: - return - - if self._feeding: - self._buff_i = self._buf_checkpoint - raise OutOfData - - # Strip buffer before checkpoint before reading file. - if self._buf_checkpoint > 0: - del self._buffer[:self._buf_checkpoint] - self._buff_i -= self._buf_checkpoint - self._buf_checkpoint = 0 - - # Read from file - remain_bytes = -remain_bytes - while remain_bytes > 0: - to_read_bytes = max(self._read_size, remain_bytes) - read_data = self.file_like.read(to_read_bytes) - if not read_data: - break - assert isinstance(read_data, bytes) - self._buffer += read_data - remain_bytes -= len(read_data) - - if len(self._buffer) < n + self._buff_i: - self._buff_i = 0 # rollback - raise OutOfData - - def _read_header(self, execute=EX_CONSTRUCT): - typ = TYPE_IMMEDIATE - n = 0 - obj = None - self._reserve(1) - b = self._buffer[self._buff_i] - self._buff_i += 1 - if b & 0b10000000 == 0: - obj = b - elif b & 0b11100000 == 0b11100000: - obj = -1 - (b ^ 0xff) - elif b & 0b11100000 == 0b10100000: - n = b & 0b00011111 - typ = TYPE_RAW - if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b & 0b11110000 == 0b10010000: - n = b & 0b00001111 - typ = TYPE_ARRAY - if n > self._max_array_len: - raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) - elif b & 0b11110000 == 0b10000000: - n = b & 0b00001111 - typ = TYPE_MAP - if n > self._max_map_len: - raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) - elif b == 0xc0: - obj = None - elif b == 0xc2: - obj = False - elif b == 0xc3: - obj = True - elif b == 0xc4: - typ = TYPE_BIN - self._reserve(1) - n = self._buffer[self._buff_i] - self._buff_i += 1 - if n > self._max_bin_len: - raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._read(n) - elif b == 0xc5: - typ = TYPE_BIN - self._reserve(2) - n = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0] - self._buff_i += 2 - if n > self._max_bin_len: - raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._read(n) - elif b == 0xc6: - typ = TYPE_BIN - self._reserve(4) - n = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0] - self._buff_i += 4 - if n > self._max_bin_len: - raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._read(n) - elif b == 0xc7: # ext 8 - typ = TYPE_EXT - self._reserve(2) - L, n = struct.unpack_from('Bb', self._buffer_view, self._buff_i) - self._buff_i += 2 - if L > self._max_ext_len: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._read(L) - elif b == 0xc8: # ext 16 - typ = TYPE_EXT - self._reserve(3) - L, n = struct.unpack_from('>Hb', self._buffer_view, self._buff_i) - self._buff_i += 3 - if L > self._max_ext_len: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._read(L) - elif b == 0xc9: # ext 32 - typ = TYPE_EXT - self._reserve(5) - L, n = struct.unpack_from('>Ib', self._buffer_view, self._buff_i) - self._buff_i += 5 - if L > self._max_ext_len: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._read(L) - elif b == 0xca: - self._reserve(4) - obj = struct.unpack_from(">f", self._buffer_view, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xcb: - self._reserve(8) - obj = struct.unpack_from(">d", self._buffer_view, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xcc: - self._reserve(1) - obj = self._buffer[self._buff_i] - self._buff_i += 1 - elif b == 0xcd: - self._reserve(2) - obj = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0] - self._buff_i += 2 - elif b == 0xce: - self._reserve(4) - obj = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xcf: - self._reserve(8) - obj = struct.unpack_from(">Q", self._buffer_view, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xd0: - self._reserve(1) - obj = struct.unpack_from("b", self._buffer_view, self._buff_i)[0] - self._buff_i += 1 - elif b == 0xd1: - self._reserve(2) - obj = struct.unpack_from(">h", self._buffer_view, self._buff_i)[0] - self._buff_i += 2 - elif b == 0xd2: - self._reserve(4) - obj = struct.unpack_from(">i", self._buffer_view, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xd3: - self._reserve(8) - obj = struct.unpack_from(">q", self._buffer_view, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xd4: # fixext 1 - typ = TYPE_EXT - if self._max_ext_len < 1: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) - self._reserve(2) - n, obj = struct.unpack_from("b1s", self._buffer_view, self._buff_i) - self._buff_i += 2 - elif b == 0xd5: # fixext 2 - typ = TYPE_EXT - if self._max_ext_len < 2: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) - self._reserve(3) - n, obj = struct.unpack_from("b2s", self._buffer_view, self._buff_i) - self._buff_i += 3 - elif b == 0xd6: # fixext 4 - typ = TYPE_EXT - if self._max_ext_len < 4: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) - self._reserve(5) - n, obj = struct.unpack_from("b4s", self._buffer_view, self._buff_i) - self._buff_i += 5 - elif b == 0xd7: # fixext 8 - typ = TYPE_EXT - if self._max_ext_len < 8: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) - self._reserve(9) - n, obj = struct.unpack_from("b8s", self._buffer_view, self._buff_i) - self._buff_i += 9 - elif b == 0xd8: # fixext 16 - typ = TYPE_EXT - if self._max_ext_len < 16: - raise UnpackValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) - self._reserve(17) - n, obj = struct.unpack_from("b16s", self._buffer_view, self._buff_i) - self._buff_i += 17 - elif b == 0xd9: - typ = TYPE_RAW - self._reserve(1) - n = self._buffer[self._buff_i] - self._buff_i += 1 - if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b == 0xda: - typ = TYPE_RAW - self._reserve(2) - n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) - self._buff_i += 2 - if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b == 0xdb: - typ = TYPE_RAW - self._reserve(4) - n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) - self._buff_i += 4 - if n > self._max_str_len: - raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b == 0xdc: - typ = TYPE_ARRAY - self._reserve(2) - n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) - self._buff_i += 2 - if n > self._max_array_len: - raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) - elif b == 0xdd: - typ = TYPE_ARRAY - self._reserve(4) - n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) - self._buff_i += 4 - if n > self._max_array_len: - raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) - elif b == 0xde: - self._reserve(2) - n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) - self._buff_i += 2 - if n > self._max_map_len: - raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) - typ = TYPE_MAP - elif b == 0xdf: - self._reserve(4) - n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) - self._buff_i += 4 - if n > self._max_map_len: - raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) - typ = TYPE_MAP - else: - raise UnpackValueError("Unknown header: 0x%x" % b) - return typ, n, obj - - def _unpack(self, execute=EX_CONSTRUCT): - typ, n, obj = self._read_header(execute) - - if execute == EX_READ_ARRAY_HEADER: - if typ != TYPE_ARRAY: - raise UnpackValueError("Expected array") - return n - if execute == EX_READ_MAP_HEADER: - if typ != TYPE_MAP: - raise UnpackValueError("Expected map") - return n - # TODO should we eliminate the recursion? - if typ == TYPE_ARRAY: - if execute == EX_SKIP: - for i in xrange(n): - # TODO check whether we need to call `list_hook` - self._unpack(EX_SKIP) - return - ret = newlist_hint(n) - for i in xrange(n): - ret.append(self._unpack(EX_CONSTRUCT)) - if self._list_hook is not None: - ret = self._list_hook(ret) - # TODO is the interaction between `list_hook` and `use_list` ok? - return ret if self._use_list else tuple(ret) - if typ == TYPE_MAP: - if execute == EX_SKIP: - for i in xrange(n): - # TODO check whether we need to call hooks - self._unpack(EX_SKIP) - self._unpack(EX_SKIP) - return - if self._object_pairs_hook is not None: - ret = self._object_pairs_hook( - (self._unpack(EX_CONSTRUCT), - self._unpack(EX_CONSTRUCT)) - for _ in xrange(n)) - else: - ret = {} - for _ in xrange(n): - key = self._unpack(EX_CONSTRUCT) - ret[key] = self._unpack(EX_CONSTRUCT) - if self._object_hook is not None: - ret = self._object_hook(ret) - return ret - if execute == EX_SKIP: - return - if typ == TYPE_RAW: - if self._encoding is not None: - obj = obj.decode(self._encoding, self._unicode_errors) - elif self._raw: - obj = bytes(obj) - else: - obj = obj.decode('utf_8') - return obj - if typ == TYPE_EXT: - return self._ext_hook(n, bytes(obj)) - if typ == TYPE_BIN: - return bytes(obj) - assert typ == TYPE_IMMEDIATE - return obj - - def __iter__(self): - return self - - def __next__(self): - try: - ret = self._unpack(EX_CONSTRUCT) - self._consume() - return ret - except OutOfData: - self._consume() - raise StopIteration - - next = __next__ - - def skip(self, write_bytes=None): - self._unpack(EX_SKIP) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) - self._consume() - - def unpack(self, write_bytes=None): - ret = self._unpack(EX_CONSTRUCT) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) - self._consume() - return ret - - def read_array_header(self, write_bytes=None): - ret = self._unpack(EX_READ_ARRAY_HEADER) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) - self._consume() - return ret - - def read_map_header(self, write_bytes=None): - ret = self._unpack(EX_READ_MAP_HEADER) - if write_bytes is not None: - warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) - write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) - self._consume() - return ret - - def tell(self): - return self._stream_offset - - -class Packer(object): - """ - MessagePack Packer - - usage: - - packer = Packer() - astream.write(packer.pack(a)) - astream.write(packer.pack(b)) - - Packer's constructor has some keyword arguments: - - :param callable default: - Convert user type to builtin type that Packer supports. - See also simplejson's document. - - :param bool use_single_float: - Use single precision float type for float. (default: False) - - :param bool autoreset: - Reset buffer after each pack and return its content as `bytes`. (default: True). - If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. - - :param bool use_bin_type: - Use bin type introduced in msgpack spec 2.0 for bytes. - It also enables str8 type for unicode. - - :param bool strict_types: - If set to true, types will be checked to be exact. Derived classes - from serializeable types will not be serialized and will be - treated as unsupported type and forwarded to default. - Additionally tuples will not be serialized as lists. - This is useful when trying to implement accurate serialization - for python types. - - :param str encoding: - (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') - - :param str unicode_errors: - Error handler for encoding unicode. (default: 'strict') - """ - def __init__(self, default=None, encoding=None, unicode_errors=None, - use_single_float=False, autoreset=True, use_bin_type=False, - strict_types=False): - if encoding is None: - encoding = 'utf_8' - else: - warnings.warn( - "encoding is deprecated, Use raw=False instead.", - PendingDeprecationWarning) - - if unicode_errors is None: - unicode_errors = 'strict' - - self._strict_types = strict_types - self._use_float = use_single_float - self._autoreset = autoreset - self._use_bin_type = use_bin_type - self._encoding = encoding - self._unicode_errors = unicode_errors - self._buffer = StringIO() - if default is not None: - if not callable(default): - raise TypeError("default must be callable") - self._default = default - - def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, - check=isinstance, check_type_strict=_check_type_strict): - default_used = False - if self._strict_types: - check = check_type_strict - list_types = list - else: - list_types = (list, tuple) - while True: - if nest_limit < 0: - raise PackValueError("recursion limit exceeded") - if obj is None: - return self._buffer.write(b"\xc0") - if check(obj, bool): - if obj: - return self._buffer.write(b"\xc3") - return self._buffer.write(b"\xc2") - if check(obj, int_types): - if 0 <= obj < 0x80: - return self._buffer.write(struct.pack("B", obj)) - if -0x20 <= obj < 0: - return self._buffer.write(struct.pack("b", obj)) - if 0x80 <= obj <= 0xff: - return self._buffer.write(struct.pack("BB", 0xcc, obj)) - if -0x80 <= obj < 0: - return self._buffer.write(struct.pack(">Bb", 0xd0, obj)) - if 0xff < obj <= 0xffff: - return self._buffer.write(struct.pack(">BH", 0xcd, obj)) - if -0x8000 <= obj < -0x80: - return self._buffer.write(struct.pack(">Bh", 0xd1, obj)) - if 0xffff < obj <= 0xffffffff: - return self._buffer.write(struct.pack(">BI", 0xce, obj)) - if -0x80000000 <= obj < -0x8000: - return self._buffer.write(struct.pack(">Bi", 0xd2, obj)) - if 0xffffffff < obj <= 0xffffffffffffffff: - return self._buffer.write(struct.pack(">BQ", 0xcf, obj)) - if -0x8000000000000000 <= obj < -0x80000000: - return self._buffer.write(struct.pack(">Bq", 0xd3, obj)) - if not default_used and self._default is not None: - obj = self._default(obj) - default_used = True - continue - raise PackOverflowError("Integer value out of range") - if check(obj, (bytes, bytearray)): - n = len(obj) - if n >= 2**32: - raise PackValueError("%s is too large" % type(obj).__name__) - self._pack_bin_header(n) - return self._buffer.write(obj) - if check(obj, Unicode): - if self._encoding is None: - raise TypeError( - "Can't encode unicode string: " - "no encoding is specified") - obj = obj.encode(self._encoding, self._unicode_errors) - n = len(obj) - if n >= 2**32: - raise PackValueError("String is too large") - self._pack_raw_header(n) - return self._buffer.write(obj) - if check(obj, memoryview): - n = len(obj) * obj.itemsize - if n >= 2**32: - raise PackValueError("Memoryview is too large") - self._pack_bin_header(n) - return self._buffer.write(obj) - if check(obj, float): - if self._use_float: - return self._buffer.write(struct.pack(">Bf", 0xca, obj)) - return self._buffer.write(struct.pack(">Bd", 0xcb, obj)) - if check(obj, ExtType): - code = obj.code - data = obj.data - assert isinstance(code, int) - assert isinstance(data, bytes) - L = len(data) - if L == 1: - self._buffer.write(b'\xd4') - elif L == 2: - self._buffer.write(b'\xd5') - elif L == 4: - self._buffer.write(b'\xd6') - elif L == 8: - self._buffer.write(b'\xd7') - elif L == 16: - self._buffer.write(b'\xd8') - elif L <= 0xff: - self._buffer.write(struct.pack(">BB", 0xc7, L)) - elif L <= 0xffff: - self._buffer.write(struct.pack(">BH", 0xc8, L)) - else: - self._buffer.write(struct.pack(">BI", 0xc9, L)) - self._buffer.write(struct.pack("b", code)) - self._buffer.write(data) - return - if check(obj, list_types): - n = len(obj) - self._pack_array_header(n) - for i in xrange(n): - self._pack(obj[i], nest_limit - 1) - return - if check(obj, dict): - return self._pack_map_pairs(len(obj), dict_iteritems(obj), - nest_limit - 1) - if not default_used and self._default is not None: - obj = self._default(obj) - default_used = 1 - continue - raise TypeError("Cannot serialize %r" % (obj, )) - - def pack(self, obj): - try: - self._pack(obj) - except: - self._buffer = StringIO() # force reset - raise - ret = self._buffer.getvalue() - if self._autoreset: - self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret - - def pack_map_pairs(self, pairs): - self._pack_map_pairs(len(pairs), pairs) - ret = self._buffer.getvalue() - if self._autoreset: - self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret - - def pack_array_header(self, n): - if n >= 2**32: - raise PackValueError - self._pack_array_header(n) - ret = self._buffer.getvalue() - if self._autoreset: - self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret - - def pack_map_header(self, n): - if n >= 2**32: - raise PackValueError - self._pack_map_header(n) - ret = self._buffer.getvalue() - if self._autoreset: - self._buffer = StringIO() - elif USING_STRINGBUILDER: - self._buffer = StringIO(ret) - return ret - - def pack_ext_type(self, typecode, data): - if not isinstance(typecode, int): - raise TypeError("typecode must have int type.") - if not 0 <= typecode <= 127: - raise ValueError("typecode should be 0-127") - if not isinstance(data, bytes): - raise TypeError("data must have bytes type") - L = len(data) - if L > 0xffffffff: - raise PackValueError("Too large data") - if L == 1: - self._buffer.write(b'\xd4') - elif L == 2: - self._buffer.write(b'\xd5') - elif L == 4: - self._buffer.write(b'\xd6') - elif L == 8: - self._buffer.write(b'\xd7') - elif L == 16: - self._buffer.write(b'\xd8') - elif L <= 0xff: - self._buffer.write(b'\xc7' + struct.pack('B', L)) - elif L <= 0xffff: - self._buffer.write(b'\xc8' + struct.pack('>H', L)) - else: - self._buffer.write(b'\xc9' + struct.pack('>I', L)) - self._buffer.write(struct.pack('B', typecode)) - self._buffer.write(data) - - def _pack_array_header(self, n): - if n <= 0x0f: - return self._buffer.write(struct.pack('B', 0x90 + n)) - if n <= 0xffff: - return self._buffer.write(struct.pack(">BH", 0xdc, n)) - if n <= 0xffffffff: - return self._buffer.write(struct.pack(">BI", 0xdd, n)) - raise PackValueError("Array is too large") - - def _pack_map_header(self, n): - if n <= 0x0f: - return self._buffer.write(struct.pack('B', 0x80 + n)) - if n <= 0xffff: - return self._buffer.write(struct.pack(">BH", 0xde, n)) - if n <= 0xffffffff: - return self._buffer.write(struct.pack(">BI", 0xdf, n)) - raise PackValueError("Dict is too large") - - def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): - self._pack_map_header(n) - for (k, v) in pairs: - self._pack(k, nest_limit - 1) - self._pack(v, nest_limit - 1) - - def _pack_raw_header(self, n): - if n <= 0x1f: - self._buffer.write(struct.pack('B', 0xa0 + n)) - elif self._use_bin_type and n <= 0xff: - self._buffer.write(struct.pack('>BB', 0xd9, n)) - elif n <= 0xffff: - self._buffer.write(struct.pack(">BH", 0xda, n)) - elif n <= 0xffffffff: - self._buffer.write(struct.pack(">BI", 0xdb, n)) - else: - raise PackValueError('Raw is too large') - - def _pack_bin_header(self, n): - if not self._use_bin_type: - return self._pack_raw_header(n) - elif n <= 0xff: - return self._buffer.write(struct.pack('>BB', 0xc4, n)) - elif n <= 0xffff: - return self._buffer.write(struct.pack(">BH", 0xc5, n)) - elif n <= 0xffffffff: - return self._buffer.write(struct.pack(">BI", 0xc6, n)) - else: - raise PackValueError('Bin is too large') - - def bytes(self): - return self._buffer.getvalue() - - def reset(self): - self._buffer = StringIO() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/__about__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/__about__.py deleted file mode 100644 index 7481c9e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/__about__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "19.0" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2014-2019 %s" % __author__ diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/__init__.py deleted file mode 100644 index a0cf67d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/_compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/_compat.py deleted file mode 100644 index 25da473..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/_compat.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import sys - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -# flake8: noqa - -if PY3: - string_types = (str,) -else: - string_types = (basestring,) - - -def with_metaclass(meta, *bases): - """ - Create a base class with a metaclass. - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - - return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/_structures.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/_structures.py deleted file mode 100644 index 68dcca6..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/_structures.py +++ /dev/null @@ -1,68 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - - -class Infinity(object): - def __repr__(self): - return "Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return False - - def __le__(self, other): - return False - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return True - - def __ge__(self, other): - return True - - def __neg__(self): - return NegativeInfinity - - -Infinity = Infinity() - - -class NegativeInfinity(object): - def __repr__(self): - return "-Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return True - - def __le__(self, other): - return True - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return False - - def __ge__(self, other): - return False - - def __neg__(self): - return Infinity - - -NegativeInfinity = NegativeInfinity() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/markers.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/markers.py deleted file mode 100644 index 5482476..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/markers.py +++ /dev/null @@ -1,296 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import operator -import os -import platform -import sys - -from pip._vendor.pyparsing import ParseException, ParseResults, stringStart, stringEnd -from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString -from pip._vendor.pyparsing import Literal as L # noqa - -from ._compat import string_types -from .specifiers import Specifier, InvalidSpecifier - - -__all__ = [ - "InvalidMarker", - "UndefinedComparison", - "UndefinedEnvironmentName", - "Marker", - "default_environment", -] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node(object): - def __init__(self, value): - self.value = value - - def __str__(self): - return str(self.value) - - def __repr__(self): - return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) - - def serialize(self): - raise NotImplementedError - - -class Variable(Node): - def serialize(self): - return str(self) - - -class Value(Node): - def serialize(self): - return '"{0}"'.format(self) - - -class Op(Node): - def serialize(self): - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # PEP-345 - | L("extra") # undocumented setuptools legacy -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results): - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker(marker, first=True): - assert isinstance(marker, (list, tuple, string_types)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if ( - isinstance(marker, list) - and len(marker) == 1 - and isinstance(marker[0], (list, tuple)) - ): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs, op, rhs): - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison( - "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) - ) - - return oper(lhs, rhs) - - -_undefined = object() - - -def _get_env(environment, name): - value = environment.get(name, _undefined) - - if value is _undefined: - raise UndefinedEnvironmentName( - "{0!r} does not exist in evaluation environment.".format(name) - ) - - return value - - -def _evaluate_markers(markers, environment): - groups = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, string_types)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info): - version = "{0.major}.{0.minor}.{0.micro}".format(info) - kind = info.releaselevel - if kind != "final": - version += kind[0] + str(info.serial) - return version - - -def default_environment(): - if hasattr(sys, "implementation"): - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - else: - iver = "0" - implementation_name = "" - - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": platform.python_version()[:3], - "sys_platform": sys.platform, - } - - -class Marker(object): - def __init__(self, marker): - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( - marker, marker[e.loc : e.loc + 8] - ) - raise InvalidMarker(err_str) - - def __str__(self): - return _format_marker(self._markers) - - def __repr__(self): - return "<Marker({0!r})>".format(str(self)) - - def evaluate(self, environment=None): - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/requirements.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/requirements.py deleted file mode 100644 index dbc5f11..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/requirements.py +++ /dev/null @@ -1,138 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import string -import re - -from pip._vendor.pyparsing import stringStart, stringEnd, originalTextFor, ParseException -from pip._vendor.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine -from pip._vendor.pyparsing import Literal as L # noqa -from pip._vendor.six.moves.urllib import parse as urlparse - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - -class Requirement(object): - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string): - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - 'Parse error at "{0!r}": {1}'.format( - requirement_string[e.loc : e.loc + 8], e.msg - ) - ) - - self.name = req.name - if req.url: - parsed_url = urlparse.urlparse(req.url) - if parsed_url.scheme == "file": - if urlparse.urlunparse(parsed_url) != req.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement("Invalid URL: {0}".format(req.url)) - self.url = req.url - else: - self.url = None - self.extras = set(req.extras.asList() if req.extras else []) - self.specifier = SpecifierSet(req.specifier) - self.marker = req.marker if req.marker else None - - def __str__(self): - parts = [self.name] - - if self.extras: - parts.append("[{0}]".format(",".join(sorted(self.extras)))) - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append("@ {0}".format(self.url)) - if self.marker: - parts.append(" ") - - if self.marker: - parts.append("; {0}".format(self.marker)) - - return "".join(parts) - - def __repr__(self): - return "<Requirement({0!r})>".format(str(self)) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/specifiers.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/specifiers.py deleted file mode 100644 index 743576a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/specifiers.py +++ /dev/null @@ -1,749 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import abc -import functools -import itertools -import re - -from ._compat import string_types, with_metaclass -from .version import Version, LegacyVersion, parse - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): - @abc.abstractmethod - def __str__(self): - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self): - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractmethod - def __ne__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - - @abc.abstractproperty - def prereleases(self): - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value): - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item, prereleases=None): - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter(self, iterable, prereleases=None): - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators = {} - - def __init__(self, spec="", prereleases=None): - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) - - self._spec = (match.group("operator").strip(), match.group("version").strip()) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre) - - def __str__(self): - return "{0}{1}".format(*self._spec) - - def __hash__(self): - return hash(self._spec) - - def __eq__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec == other._spec - - def __ne__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - - def _get_operator(self, op): - return getattr(self, "_compare_{0}".format(self._operators[op])) - - def _coerce_version(self, version): - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self): - return self._spec[0] - - @property - def version(self): - return self._spec[1] - - @property - def prereleases(self): - return self._prereleases - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - return self._get_operator(self.operator)(item, self.version) - - def filter(self, iterable, prereleases=None): - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later incase nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P<operator>(==|!=|<=|>=|<|>)) - \s* - (?P<version> - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def _coerce_version(self, version): - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective, spec): - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective, spec): - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective, spec): - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective, spec): - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective, spec): - return prospective > self._coerce_version(spec) - - -def _require_version_compare(fn): - @functools.wraps(fn) - def wrapped(self, prospective, spec): - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = r""" - (?P<operator>(~=|==|!=|<=|>=|<|>|===)) - (?P<version> - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?<!==|!=|~=) # We have special cases for these - # operators so we want to make sure they - # don't match here. - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "~=": "compatible", - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective, spec): - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore post and dev releases and we want to treat the pre-release as - # it's own separate segment. - prefix = ".".join( - list( - itertools.takewhile( - lambda x: (not x.startswith("post") and not x.startswith("dev")), - _version_split(spec), - ) - )[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( - prospective, prefix - ) - - @_require_version_compare - def _compare_equal(self, prospective, spec): - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - prospective = prospective[: len(spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - spec, prospective = _pad_version(spec, prospective) - else: - # Convert our spec string into a Version - spec = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec.local: - prospective = Version(prospective.public) - - return prospective == spec - - @_require_version_compare - def _compare_not_equal(self, prospective, spec): - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective, spec): - return prospective <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is technically greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective, spec): - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self): - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version): - result = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _pad_version(left, right): - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]) :]) - right_split.append(right[len(right_split[0]) :]) - - # Insert our padding - left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) - right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - - return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) - - -class SpecifierSet(BaseSpecifier): - def __init__(self, specifiers="", prereleases=None): - # Split on , to break each indidivual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed = set() - for specifier in specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<SpecifierSet({0!r}{1})>".format(str(self), pre) - - def __str__(self): - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self): - return hash(self._specs) - - def __and__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __ne__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - - def __len__(self): - return len(self._specs) - - def __iter__(self): - return iter(self._specs) - - @property - def prereleases(self): - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all(s.contains(item, prereleases=prereleases) for s in self._specs) - - def filter(self, iterable, prereleases=None): - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered = [] - found_prereleases = [] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/utils.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/utils.py deleted file mode 100644 index 8841878..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/utils.py +++ /dev/null @@ -1,57 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import re - -from .version import InvalidVersion, Version - - -_canonicalize_regex = re.compile(r"[-_.]+") - - -def canonicalize_name(name): - # This is taken from PEP 503. - return _canonicalize_regex.sub("-", name).lower() - - -def canonicalize_version(version): - """ - This is very similar to Version.__str__, but has one subtle differences - with the way it handles the release segment. - """ - - try: - version = Version(version) - except InvalidVersion: - # Legacy versions cannot be normalized - return version - - parts = [] - - # Epoch - if version.epoch != 0: - parts.append("{0}!".format(version.epoch)) - - # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release))) - - # Pre-release - if version.pre is not None: - parts.append("".join(str(x) for x in version.pre)) - - # Post-release - if version.post is not None: - parts.append(".post{0}".format(version.post)) - - # Development release - if version.dev is not None: - parts.append(".dev{0}".format(version.dev)) - - # Local version segment - if version.local is not None: - parts.append("+{0}".format(version.local)) - - return "".join(parts) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/version.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/version.py deleted file mode 100644 index 95157a1..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/packaging/version.py +++ /dev/null @@ -1,420 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import collections -import itertools -import re - -from ._structures import Infinity - - -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] - - -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) - - -def parse(version): - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion(object): - def __hash__(self): - return hash(self._key) - - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) - - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) - - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) - - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) - - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) - - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) - - def _compare(self, other, method): - if not isinstance(other, _BaseVersion): - return NotImplemented - - return method(self._key, other._key) - - -class LegacyVersion(_BaseVersion): - def __init__(self, version): - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - def __str__(self): - return self._version - - def __repr__(self): - return "<LegacyVersion({0})>".format(repr(str(self))) - - @property - def public(self): - return self._version - - @property - def base_version(self): - return self._version - - @property - def epoch(self): - return -1 - - @property - def release(self): - return None - - @property - def pre(self): - return None - - @property - def post(self): - return None - - @property - def dev(self): - return None - - @property - def local(self): - return None - - @property - def is_prerelease(self): - return False - - @property - def is_postrelease(self): - return False - - @property - def is_devrelease(self): - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s): - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version): - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - parts = tuple(parts) - - return epoch, parts - - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P<epoch>[0-9]+)!)? # epoch - (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment - (?P<pre> # pre-release - [-_\.]? - (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) - [-_\.]? - (?P<pre_n>[0-9]+)? - )? - (?P<post> # post release - (?:-(?P<post_n1>[0-9]+)) - | - (?: - [-_\.]? - (?P<post_l>post|rev|r) - [-_\.]? - (?P<post_n2>[0-9]+)? - ) - )? - (?P<dev> # dev release - [-_\.]? - (?P<dev_l>dev) - [-_\.]? - (?P<dev_n>[0-9]+)? - )? - ) - (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version -""" - - -class Version(_BaseVersion): - - _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) - - def __init__(self, version): - # Validate the version and parse it into pieces - match = self._regex.search(version) - if not match: - raise InvalidVersion("Invalid version: '{0}'".format(version)) - - # Store the parsed out pieces of the version - self._version = _Version( - epoch=int(match.group("epoch")) if match.group("epoch") else 0, - release=tuple(int(i) for i in match.group("release").split(".")), - pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), - post=_parse_letter_version( - match.group("post_l"), match.group("post_n1") or match.group("post_n2") - ), - dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), - local=_parse_local_version(match.group("local")), - ) - - # Generate a key which will be used for sorting - self._key = _cmpkey( - self._version.epoch, - self._version.release, - self._version.pre, - self._version.post, - self._version.dev, - self._version.local, - ) - - def __repr__(self): - return "<Version({0})>".format(repr(str(self))) - - def __str__(self): - parts = [] - - # Epoch - if self.epoch != 0: - parts.append("{0}!".format(self.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self.release)) - - # Pre-release - if self.pre is not None: - parts.append("".join(str(x) for x in self.pre)) - - # Post-release - if self.post is not None: - parts.append(".post{0}".format(self.post)) - - # Development release - if self.dev is not None: - parts.append(".dev{0}".format(self.dev)) - - # Local version segment - if self.local is not None: - parts.append("+{0}".format(self.local)) - - return "".join(parts) - - @property - def epoch(self): - return self._version.epoch - - @property - def release(self): - return self._version.release - - @property - def pre(self): - return self._version.pre - - @property - def post(self): - return self._version.post[1] if self._version.post else None - - @property - def dev(self): - return self._version.dev[1] if self._version.dev else None - - @property - def local(self): - if self._version.local: - return ".".join(str(x) for x in self._version.local) - else: - return None - - @property - def public(self): - return str(self).split("+", 1)[0] - - @property - def base_version(self): - parts = [] - - # Epoch - if self.epoch != 0: - parts.append("{0}!".format(self.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self.release)) - - return "".join(parts) - - @property - def is_prerelease(self): - return self.dev is not None or self.pre is not None - - @property - def is_postrelease(self): - return self.post is not None - - @property - def is_devrelease(self): - return self.dev is not None - - -def _parse_letter_version(letter, number): - if letter: - # We consider there to be an implicit 0 in a pre-release if there is - # not a numeral associated with it. - if number is None: - number = 0 - - # We normalize any letters to their lower case form - letter = letter.lower() - - # We consider some words to be alternate spellings of other words and - # in those cases we want to normalize the spellings to our preferred - # spelling. - if letter == "alpha": - letter = "a" - elif letter == "beta": - letter = "b" - elif letter in ["c", "pre", "preview"]: - letter = "rc" - elif letter in ["rev", "r"]: - letter = "post" - - return letter, int(number) - if not letter and number: - # We assume if we are given a number, but we are not given a letter - # then this is using the implicit post release syntax (e.g. 1.0-1) - letter = "post" - - return letter, int(number) - - -_local_version_separators = re.compile(r"[\._-]") - - -def _parse_local_version(local): - """ - Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). - """ - if local is not None: - return tuple( - part.lower() if not part.isdigit() else int(part) - for part in _local_version_separators.split(local) - ) - - -def _cmpkey(epoch, release, pre, post, dev, local): - # When we compare a release version, we want to compare it with all of the - # trailing zeros removed. So we'll use a reverse the list, drop all the now - # leading zeros until we come to something non zero, then take the rest - # re-reverse it back into the correct order and make it a tuple and use - # that for our sorting key. - release = tuple( - reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) - ) - - # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. - # We'll do this by abusing the pre segment, but we _only_ want to do this - # if there is not a pre or a post segment. If we have one of those then - # the normal sorting rules will handle this case correctly. - if pre is None and post is None and dev is not None: - pre = -Infinity - # Versions without a pre-release (except as noted above) should sort after - # those with one. - elif pre is None: - pre = Infinity - - # Versions without a post segment should sort before those with one. - if post is None: - post = -Infinity - - # Versions without a development segment should sort after those with one. - if dev is None: - dev = Infinity - - if local is None: - # Versions without a local segment should sort before those with one. - local = -Infinity - else: - # Versions with a local segment need that segment parsed to implement - # the sorting rules in PEP440. - # - Alpha numeric segments sort before numeric segments - # - Alpha numeric segments sort lexicographically - # - Numeric segments sort numerically - # - Shorter versions sort before longer versions when the prefixes - # match exactly - local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local) - - return epoch, release, pre, post, dev, local diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/__init__.py deleted file mode 100644 index 9c1a098..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Wrappers to build Python packages using PEP 517 hooks -""" - -__version__ = '0.5.0' diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/_in_process.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/_in_process.py deleted file mode 100644 index d6524b6..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/_in_process.py +++ /dev/null @@ -1,207 +0,0 @@ -"""This is invoked in a subprocess to call the build backend hooks. - -It expects: -- Command line args: hook_name, control_dir -- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec -- control_dir/input.json: - - {"kwargs": {...}} - -Results: -- control_dir/output.json - - {"return_val": ...} -""" -from glob import glob -from importlib import import_module -import os -from os.path import join as pjoin -import re -import shutil -import sys - -# This is run as a script, not a module, so it can't do a relative import -import compat - - -class BackendUnavailable(Exception): - """Raised if we cannot import the backend""" - - -def _build_backend(): - """Find and load the build backend""" - ep = os.environ['PEP517_BUILD_BACKEND'] - mod_path, _, obj_path = ep.partition(':') - try: - obj = import_module(mod_path) - except ImportError: - raise BackendUnavailable - if obj_path: - for path_part in obj_path.split('.'): - obj = getattr(obj, path_part) - return obj - - -def get_requires_for_build_wheel(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_wheel - except AttributeError: - return [] - else: - return hook(config_settings) - - -def prepare_metadata_for_build_wheel(metadata_directory, config_settings): - """Invoke optional prepare_metadata_for_build_wheel - - Implements a fallback by building a wheel if the hook isn't defined. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_wheel - except AttributeError: - return _get_wheel_metadata_from_wheel(backend, metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' - - -def _dist_info_files(whl_zip): - """Identify the .dist-info folder inside a wheel ZipFile.""" - res = [] - for path in whl_zip.namelist(): - m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) - if m: - res.append(path) - if res: - return res - raise Exception("No .dist-info folder found in wheel") - - -def _get_wheel_metadata_from_wheel( - backend, metadata_directory, config_settings): - """Build a wheel and extract the metadata from it. - - Fallback for when the build backend does not - define the 'get_wheel_metadata' hook. - """ - from zipfile import ZipFile - whl_basename = backend.build_wheel(metadata_directory, config_settings) - with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): - pass # Touch marker file - - whl_file = os.path.join(metadata_directory, whl_basename) - with ZipFile(whl_file) as zipf: - dist_info = _dist_info_files(zipf) - zipf.extractall(path=metadata_directory, members=dist_info) - return dist_info[0].split('/')[0] - - -def _find_already_built_wheel(metadata_directory): - """Check for a wheel already built during the get_wheel_metadata hook. - """ - if not metadata_directory: - return None - metadata_parent = os.path.dirname(metadata_directory) - if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): - return None - - whl_files = glob(os.path.join(metadata_parent, '*.whl')) - if not whl_files: - print('Found wheel built marker, but no .whl files') - return None - if len(whl_files) > 1: - print('Found multiple .whl files; unspecified behaviour. ' - 'Will call build_wheel.') - return None - - # Exactly one .whl file - return whl_files[0] - - -def build_wheel(wheel_directory, config_settings, metadata_directory=None): - """Invoke the mandatory build_wheel hook. - - If a wheel was already built in the - prepare_metadata_for_build_wheel fallback, this - will copy it rather than rebuilding the wheel. - """ - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return _build_backend().build_wheel(wheel_directory, config_settings, - metadata_directory) - - -def get_requires_for_build_sdist(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_sdist - except AttributeError: - return [] - else: - return hook(config_settings) - - -class _DummyException(Exception): - """Nothing should ever raise this exception""" - - -class GotUnsupportedOperation(Exception): - """For internal use when backend raises UnsupportedOperation""" - - -def build_sdist(sdist_directory, config_settings): - """Invoke the mandatory build_sdist hook.""" - backend = _build_backend() - try: - return backend.build_sdist(sdist_directory, config_settings) - except getattr(backend, 'UnsupportedOperation', _DummyException): - raise GotUnsupportedOperation - - -HOOK_NAMES = { - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'get_requires_for_build_sdist', - 'build_sdist', -} - - -def main(): - if len(sys.argv) < 3: - sys.exit("Needs args: hook_name, control_dir") - hook_name = sys.argv[1] - control_dir = sys.argv[2] - if hook_name not in HOOK_NAMES: - sys.exit("Unknown hook: %s" % hook_name) - hook = globals()[hook_name] - - hook_input = compat.read_json(pjoin(control_dir, 'input.json')) - - json_out = {'unsupported': False, 'return_val': None} - try: - json_out['return_val'] = hook(**hook_input['kwargs']) - except BackendUnavailable: - json_out['no_backend'] = True - except GotUnsupportedOperation: - json_out['unsupported'] = True - - compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/build.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/build.py deleted file mode 100644 index ac6c949..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/build.py +++ /dev/null @@ -1,108 +0,0 @@ -"""Build a project using PEP 517 hooks. -""" -import argparse -import logging -import os -import contextlib -from pip._vendor import pytoml -import shutil -import errno -import tempfile - -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def tempdir(): - td = tempfile.mkdtemp() - try: - yield td - finally: - shutil.rmtree(td) - - -def _do_build(hooks, env, dist, dest): - get_requires_name = 'get_requires_for_build_{dist}'.format(**locals()) - get_requires = getattr(hooks, get_requires_name) - reqs = get_requires({}) - log.info('Got build requires: %s', reqs) - - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - - with tempdir() as td: - log.info('Trying to build %s in %s', dist, td) - build_name = 'build_{dist}'.format(**locals()) - build = getattr(hooks, build_name) - filename = build(td, {}) - source = os.path.join(td, filename) - shutil.move(source, os.path.join(dest, os.path.basename(filename))) - - -def mkdir_p(*args, **kwargs): - """Like `mkdir`, but does not raise an exception if the - directory already exists. - """ - try: - return os.mkdir(*args, **kwargs) - except OSError as exc: - if exc.errno != errno.EEXIST: - raise - - -def build(source_dir, dist, dest=None): - pyproject = os.path.join(source_dir, 'pyproject.toml') - dest = os.path.join(source_dir, dest or 'dist') - mkdir_p(dest) - - with open(pyproject) as f: - pyproject_data = pytoml.load(f) - # Ensure the mandatory data can be loaded - buildsys = pyproject_data['build-system'] - requires = buildsys['requires'] - backend = buildsys['build-backend'] - - hooks = Pep517HookCaller(source_dir, backend) - - with BuildEnvironment() as env: - env.pip_install(requires) - _do_build(hooks, env, dist, dest) - - -parser = argparse.ArgumentParser() -parser.add_argument( - 'source_dir', - help="A directory containing pyproject.toml", -) -parser.add_argument( - '--binary', '-b', - action='store_true', - default=False, -) -parser.add_argument( - '--source', '-s', - action='store_true', - default=False, -) -parser.add_argument( - '--out-dir', '-o', - help="Destination in which to save the builds relative to source dir", -) - - -def main(args): - # determine which dists to build - dists = list(filter(None, ( - 'sdist' if args.source or not args.binary else None, - 'wheel' if args.binary or not args.source else None, - ))) - - for dist in dists: - build(args.source_dir, dist, args.out_dir) - - -if __name__ == '__main__': - main(parser.parse_args()) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/check.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/check.py deleted file mode 100644 index f4cdc6b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/check.py +++ /dev/null @@ -1,202 +0,0 @@ -"""Check a project and backend by attempting to build using PEP 517 hooks. -""" -import argparse -import logging -import os -from os.path import isfile, join as pjoin -from pip._vendor.pytoml import TomlError, load as toml_load -import shutil -from subprocess import CalledProcessError -import sys -import tarfile -from tempfile import mkdtemp -import zipfile - -from .colorlog import enable_colourful_output -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller - -log = logging.getLogger(__name__) - - -def check_build_sdist(hooks, build_sys_requires): - with BuildEnvironment() as env: - try: - env.pip_install(build_sys_requires) - log.info('Installed static build dependencies') - except CalledProcessError: - log.error('Failed to install static build dependencies') - return False - - try: - reqs = hooks.get_requires_for_build_sdist({}) - log.info('Got build requires: %s', reqs) - except Exception: - log.error('Failure in get_requires_for_build_sdist', exc_info=True) - return False - - try: - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - except CalledProcessError: - log.error('Failed to install dynamic build dependencies') - return False - - td = mkdtemp() - log.info('Trying to build sdist in %s', td) - try: - try: - filename = hooks.build_sdist(td, {}) - log.info('build_sdist returned %r', filename) - except Exception: - log.info('Failure in build_sdist', exc_info=True) - return False - - if not filename.endswith('.tar.gz'): - log.error( - "Filename %s doesn't have .tar.gz extension", filename) - return False - - path = pjoin(td, filename) - if isfile(path): - log.info("Output file %s exists", path) - else: - log.error("Output file %s does not exist", path) - return False - - if tarfile.is_tarfile(path): - log.info("Output file is a tar file") - else: - log.error("Output file is not a tar file") - return False - - finally: - shutil.rmtree(td) - - return True - - -def check_build_wheel(hooks, build_sys_requires): - with BuildEnvironment() as env: - try: - env.pip_install(build_sys_requires) - log.info('Installed static build dependencies') - except CalledProcessError: - log.error('Failed to install static build dependencies') - return False - - try: - reqs = hooks.get_requires_for_build_wheel({}) - log.info('Got build requires: %s', reqs) - except Exception: - log.error('Failure in get_requires_for_build_sdist', exc_info=True) - return False - - try: - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - except CalledProcessError: - log.error('Failed to install dynamic build dependencies') - return False - - td = mkdtemp() - log.info('Trying to build wheel in %s', td) - try: - try: - filename = hooks.build_wheel(td, {}) - log.info('build_wheel returned %r', filename) - except Exception: - log.info('Failure in build_wheel', exc_info=True) - return False - - if not filename.endswith('.whl'): - log.error("Filename %s doesn't have .whl extension", filename) - return False - - path = pjoin(td, filename) - if isfile(path): - log.info("Output file %s exists", path) - else: - log.error("Output file %s does not exist", path) - return False - - if zipfile.is_zipfile(path): - log.info("Output file is a zip file") - else: - log.error("Output file is not a zip file") - return False - - finally: - shutil.rmtree(td) - - return True - - -def check(source_dir): - pyproject = pjoin(source_dir, 'pyproject.toml') - if isfile(pyproject): - log.info('Found pyproject.toml') - else: - log.error('Missing pyproject.toml') - return False - - try: - with open(pyproject) as f: - pyproject_data = toml_load(f) - # Ensure the mandatory data can be loaded - buildsys = pyproject_data['build-system'] - requires = buildsys['requires'] - backend = buildsys['build-backend'] - log.info('Loaded pyproject.toml') - except (TomlError, KeyError): - log.error("Invalid pyproject.toml", exc_info=True) - return False - - hooks = Pep517HookCaller(source_dir, backend) - - sdist_ok = check_build_sdist(hooks, requires) - wheel_ok = check_build_wheel(hooks, requires) - - if not sdist_ok: - log.warning('Sdist checks failed; scroll up to see') - if not wheel_ok: - log.warning('Wheel checks failed') - - return sdist_ok - - -def main(argv=None): - ap = argparse.ArgumentParser() - ap.add_argument( - 'source_dir', - help="A directory containing pyproject.toml") - args = ap.parse_args(argv) - - enable_colourful_output() - - ok = check(args.source_dir) - - if ok: - print(ansi('Checks passed', 'green')) - else: - print(ansi('Checks failed', 'red')) - sys.exit(1) - - -ansi_codes = { - 'reset': '\x1b[0m', - 'bold': '\x1b[1m', - 'red': '\x1b[31m', - 'green': '\x1b[32m', -} - - -def ansi(s, attr): - if os.name != 'nt' and sys.stdout.isatty(): - return ansi_codes[attr] + str(s) + ansi_codes['reset'] - else: - return str(s) - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/colorlog.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/colorlog.py deleted file mode 100644 index 69c8a59..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/colorlog.py +++ /dev/null @@ -1,115 +0,0 @@ -"""Nicer log formatting with colours. - -Code copied from Tornado, Apache licensed. -""" -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import sys - -try: - import curses -except ImportError: - curses = None - - -def _stderr_supports_color(): - color = False - if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): - try: - curses.setupterm() - if curses.tigetnum("colors") > 0: - color = True - except Exception: - pass - return color - - -class LogFormatter(logging.Formatter): - """Log formatter with colour support - """ - DEFAULT_COLORS = { - logging.INFO: 2, # Green - logging.WARNING: 3, # Yellow - logging.ERROR: 1, # Red - logging.CRITICAL: 1, - } - - def __init__(self, color=True, datefmt=None): - r""" - :arg bool color: Enables color support. - :arg string fmt: Log message format. - It will be applied to the attributes dict of log records. The - text between ``%(color)s`` and ``%(end_color)s`` will be colored - depending on the level if color support is on. - :arg dict colors: color mappings from logging level to terminal color - code - :arg string datefmt: Datetime format. - Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. - .. versionchanged:: 3.2 - Added ``fmt`` and ``datefmt`` arguments. - """ - logging.Formatter.__init__(self, datefmt=datefmt) - self._colors = {} - if color and _stderr_supports_color(): - # The curses module has some str/bytes confusion in - # python3. Until version 3.2.3, most methods return - # bytes, but only accept strings. In addition, we want to - # output these strings with the logging module, which - # works with unicode strings. The explicit calls to - # unicode() below are harmless in python2 but will do the - # right conversion in python 3. - fg_color = (curses.tigetstr("setaf") or - curses.tigetstr("setf") or "") - if (3, 0) < sys.version_info < (3, 2, 3): - fg_color = str(fg_color, "ascii") - - for levelno, code in self.DEFAULT_COLORS.items(): - self._colors[levelno] = str( - curses.tparm(fg_color, code), "ascii") - self._normal = str(curses.tigetstr("sgr0"), "ascii") - - scr = curses.initscr() - self.termwidth = scr.getmaxyx()[1] - curses.endwin() - else: - self._normal = '' - # Default width is usually 80, but too wide is - # worse than too narrow - self.termwidth = 70 - - def formatMessage(self, record): - mlen = len(record.message) - right_text = '{initial}-{name}'.format(initial=record.levelname[0], - name=record.name) - if mlen + len(right_text) < self.termwidth: - space = ' ' * (self.termwidth - (mlen + len(right_text))) - else: - space = ' ' - - if record.levelno in self._colors: - start_color = self._colors[record.levelno] - end_color = self._normal - else: - start_color = end_color = '' - - return record.message + space + start_color + right_text + end_color - - -def enable_colourful_output(level=logging.INFO): - handler = logging.StreamHandler() - handler.setFormatter(LogFormatter()) - logging.root.addHandler(handler) - logging.root.setLevel(level) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/compat.py deleted file mode 100644 index 01c66fc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/compat.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Handle reading and writing JSON in UTF-8, on Python 3 and 2.""" -import json -import sys - -if sys.version_info[0] >= 3: - # Python 3 - def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - def read_json(path): - with open(path, 'r', encoding='utf-8') as f: - return json.load(f) - -else: - # Python 2 - def write_json(obj, path, **kwargs): - with open(path, 'wb') as f: - json.dump(obj, f, encoding='utf-8', **kwargs) - - def read_json(path): - with open(path, 'rb') as f: - return json.load(f) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/envbuild.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/envbuild.py deleted file mode 100644 index f7ac5f4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/envbuild.py +++ /dev/null @@ -1,158 +0,0 @@ -"""Build wheels/sdists by installing build deps to a temporary environment. -""" - -import os -import logging -from pip._vendor import pytoml -import shutil -from subprocess import check_call -import sys -from sysconfig import get_paths -from tempfile import mkdtemp - -from .wrappers import Pep517HookCaller - -log = logging.getLogger(__name__) - - -def _load_pyproject(source_dir): - with open(os.path.join(source_dir, 'pyproject.toml')) as f: - pyproject_data = pytoml.load(f) - buildsys = pyproject_data['build-system'] - return buildsys['requires'], buildsys['build-backend'] - - -class BuildEnvironment(object): - """Context manager to install build deps in a simple temporary environment - - Based on code I wrote for pip, which is MIT licensed. - """ - # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) - # - # Permission is hereby granted, free of charge, to any person obtaining - # a copy of this software and associated documentation files (the - # "Software"), to deal in the Software without restriction, including - # without limitation the rights to use, copy, modify, merge, publish, - # distribute, sublicense, and/or sell copies of the Software, and to - # permit persons to whom the Software is furnished to do so, subject to - # the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - path = None - - def __init__(self, cleanup=True): - self._cleanup = cleanup - - def __enter__(self): - self.path = mkdtemp(prefix='pep517-build-env-') - log.info('Temporary build environment: %s', self.path) - - self.save_path = os.environ.get('PATH', None) - self.save_pythonpath = os.environ.get('PYTHONPATH', None) - - install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' - install_dirs = get_paths(install_scheme, vars={ - 'base': self.path, - 'platbase': self.path, - }) - - scripts = install_dirs['scripts'] - if self.save_path: - os.environ['PATH'] = scripts + os.pathsep + self.save_path - else: - os.environ['PATH'] = scripts + os.pathsep + os.defpath - - if install_dirs['purelib'] == install_dirs['platlib']: - lib_dirs = install_dirs['purelib'] - else: - lib_dirs = install_dirs['purelib'] + os.pathsep + \ - install_dirs['platlib'] - if self.save_pythonpath: - os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ - self.save_pythonpath - else: - os.environ['PYTHONPATH'] = lib_dirs - - return self - - def pip_install(self, reqs): - """Install dependencies into this env by calling pip in a subprocess""" - if not reqs: - return - log.info('Calling pip to install %s', reqs) - check_call([ - sys.executable, '-m', 'pip', 'install', '--ignore-installed', - '--prefix', self.path] + list(reqs)) - - def __exit__(self, exc_type, exc_val, exc_tb): - needs_cleanup = ( - self._cleanup and - self.path is not None and - os.path.isdir(self.path) - ) - if needs_cleanup: - shutil.rmtree(self.path) - - if self.save_path is None: - os.environ.pop('PATH', None) - else: - os.environ['PATH'] = self.save_path - - if self.save_pythonpath is None: - os.environ.pop('PYTHONPATH', None) - else: - os.environ['PYTHONPATH'] = self.save_pythonpath - - -def build_wheel(source_dir, wheel_dir, config_settings=None): - """Build a wheel from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str wheel_dir: Target directory to create wheel in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_wheel(config_settings) - env.pip_install(reqs) - return hooks.build_wheel(wheel_dir, config_settings) - - -def build_sdist(source_dir, sdist_dir, config_settings=None): - """Build an sdist from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str sdist_dir: Target directory to place sdist in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_sdist(config_settings) - env.pip_install(reqs) - return hooks.build_sdist(sdist_dir, config_settings) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/wrappers.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/wrappers.py deleted file mode 100644 index b14b899..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/wrappers.py +++ /dev/null @@ -1,163 +0,0 @@ -from contextlib import contextmanager -import os -from os.path import dirname, abspath, join as pjoin -import shutil -from subprocess import check_call -import sys -from tempfile import mkdtemp - -from . import compat - -_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py') - - -@contextmanager -def tempdir(): - td = mkdtemp() - try: - yield td - finally: - shutil.rmtree(td) - - -class BackendUnavailable(Exception): - """Will be raised if the backend cannot be imported in the hook process.""" - - -class UnsupportedOperation(Exception): - """May be raised by build_sdist if the backend indicates that it can't.""" - - -def default_subprocess_runner(cmd, cwd=None, extra_environ=None): - """The default method of calling the wrapper subprocess.""" - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - - check_call(cmd, cwd=cwd, env=env) - - -class Pep517HookCaller(object): - """A wrapper around a source directory to be built with a PEP 517 backend. - - source_dir : The path to the source directory, containing pyproject.toml. - backend : The build backend spec, as per PEP 517, from pyproject.toml. - """ - def __init__(self, source_dir, build_backend): - self.source_dir = abspath(source_dir) - self.build_backend = build_backend - self._subprocess_runner = default_subprocess_runner - - # TODO: Is this over-engineered? Maybe frontends only need to - # set this when creating the wrapper, not on every call. - @contextmanager - def subprocess_runner(self, runner): - prev = self._subprocess_runner - self._subprocess_runner = runner - yield - self._subprocess_runner = prev - - def get_requires_for_build_wheel(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.: - ["wheel >= 0.25", "setuptools"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_wheel', { - 'config_settings': config_settings - }) - - def prepare_metadata_for_build_wheel( - self, metadata_directory, config_settings=None): - """Prepare a *.dist-info folder with metadata for this project. - - Returns the name of the newly created folder. - - If the build backend defines a hook with this name, it will be called - in a subprocess. If not, the backend will be asked to build a wheel, - and the dist-info extracted from that. - """ - return self._call_hook('prepare_metadata_for_build_wheel', { - 'metadata_directory': abspath(metadata_directory), - 'config_settings': config_settings, - }) - - def build_wheel( - self, wheel_directory, config_settings=None, - metadata_directory=None): - """Build a wheel from this project. - - Returns the name of the newly created file. - - In general, this will call the 'build_wheel' hook in the backend. - However, if that was previously called by - 'prepare_metadata_for_build_wheel', and the same metadata_directory is - used, the previously built wheel will be copied to wheel_directory. - """ - if metadata_directory is not None: - metadata_directory = abspath(metadata_directory) - return self._call_hook('build_wheel', { - 'wheel_directory': abspath(wheel_directory), - 'config_settings': config_settings, - 'metadata_directory': metadata_directory, - }) - - def get_requires_for_build_sdist(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.: - ["setuptools >= 26"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_sdist', { - 'config_settings': config_settings - }) - - def build_sdist(self, sdist_directory, config_settings=None): - """Build an sdist from this project. - - Returns the name of the newly created file. - - This calls the 'build_sdist' backend hook in a subprocess. - """ - return self._call_hook('build_sdist', { - 'sdist_directory': abspath(sdist_directory), - 'config_settings': config_settings, - }) - - def _call_hook(self, hook_name, kwargs): - # On Python 2, pytoml returns Unicode values (which is correct) but the - # environment passed to check_call needs to contain string values. We - # convert here by encoding using ASCII (the backend can only contain - # letters, digits and _, . and : characters, and will be used as a - # Python identifier, so non-ASCII content is wrong on Python 2 in - # any case). - if sys.version_info[0] == 2: - build_backend = self.build_backend.encode('ASCII') - else: - build_backend = self.build_backend - - with tempdir() as td: - compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'), - indent=2) - - # Run the hook in a subprocess - self._subprocess_runner( - [sys.executable, _in_proc_script, hook_name, td], - cwd=self.source_dir, - extra_environ={'PEP517_BUILD_BACKEND': build_backend} - ) - - data = compat.read_json(pjoin(td, 'output.json')) - if data.get('unsupported'): - raise UnsupportedOperation - if data.get('no_backend'): - raise BackendUnavailable - return data['return_val'] diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pkg_resources/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pkg_resources/__init__.py deleted file mode 100644 index 9c4fd8e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pkg_resources/__init__.py +++ /dev/null @@ -1,3171 +0,0 @@ -# coding: utf-8 -""" -Package resource API --------------------- - -A resource is a logical file contained within a package, or a logical -subdirectory thereof. The package resource API expects resource names -to have their path parts separated with ``/``, *not* whatever the local -path separator is. Do not use os.path operations to manipulate resource -names being passed into the API. - -The package resource API is designed to work with normal filesystem packages, -.egg files, and unpacked .egg files. It can also work in a limited way with -.zip files and with custom PEP 302 loaders that support the ``get_data()`` -method. -""" - -from __future__ import absolute_import - -import sys -import os -import io -import time -import re -import types -import zipfile -import zipimport -import warnings -import stat -import functools -import pkgutil -import operator -import platform -import collections -import plistlib -import email.parser -import errno -import tempfile -import textwrap -import itertools -import inspect -from pkgutil import get_importer - -try: - import _imp -except ImportError: - # Python 3.2 compatibility - import imp as _imp - -try: - FileExistsError -except NameError: - FileExistsError = OSError - -from pip._vendor import six -from pip._vendor.six.moves import urllib, map, filter - -# capture these to bypass sandboxing -from os import utime -try: - from os import mkdir, rename, unlink - WRITE_SUPPORT = True -except ImportError: - # no write support, probably under GAE - WRITE_SUPPORT = False - -from os import open as os_open -from os.path import isdir, split - -try: - import importlib.machinery as importlib_machinery - # access attribute to force import under delayed import mechanisms. - importlib_machinery.__name__ -except ImportError: - importlib_machinery = None - -from . import py31compat -from pip._vendor import appdirs -from pip._vendor import packaging -__import__('pip._vendor.packaging.version') -__import__('pip._vendor.packaging.specifiers') -__import__('pip._vendor.packaging.requirements') -__import__('pip._vendor.packaging.markers') - - -__metaclass__ = type - - -if (3, 0) < sys.version_info < (3, 4): - raise RuntimeError("Python 3.4 or later is required") - -if six.PY2: - # Those builtin exceptions are only defined in Python 3 - PermissionError = None - NotADirectoryError = None - -# declare some globals that will be defined later to -# satisfy the linters. -require = None -working_set = None -add_activation_listener = None -resources_stream = None -cleanup_resources = None -resource_dir = None -resource_stream = None -set_extraction_path = None -resource_isdir = None -resource_string = None -iter_entry_points = None -resource_listdir = None -resource_filename = None -resource_exists = None -_distribution_finders = None -_namespace_handlers = None -_namespace_packages = None - - -class PEP440Warning(RuntimeWarning): - """ - Used when there is an issue with a version or specifier not complying with - PEP 440. - """ - - -def parse_version(v): - try: - return packaging.version.Version(v) - except packaging.version.InvalidVersion: - return packaging.version.LegacyVersion(v) - - -_state_vars = {} - - -def _declare_state(vartype, **kw): - globals().update(kw) - _state_vars.update(dict.fromkeys(kw, vartype)) - - -def __getstate__(): - state = {} - g = globals() - for k, v in _state_vars.items(): - state[k] = g['_sget_' + v](g[k]) - return state - - -def __setstate__(state): - g = globals() - for k, v in state.items(): - g['_sset_' + _state_vars[k]](k, g[k], v) - return state - - -def _sget_dict(val): - return val.copy() - - -def _sset_dict(key, ob, state): - ob.clear() - ob.update(state) - - -def _sget_object(val): - return val.__getstate__() - - -def _sset_object(key, ob, state): - ob.__setstate__(state) - - -_sget_none = _sset_none = lambda *args: None - - -def get_supported_platform(): - """Return this platform's maximum compatible version. - - distutils.util.get_platform() normally reports the minimum version - of Mac OS X that would be required to *use* extensions produced by - distutils. But what we want when checking compatibility is to know the - version of Mac OS X that we are *running*. To allow usage of packages that - explicitly require a newer version of Mac OS X, we must also know the - current version of the OS. - - If this condition occurs for any other platform with a version in its - platform strings, this function should be extended accordingly. - """ - plat = get_build_platform() - m = macosVersionString.match(plat) - if m is not None and sys.platform == "darwin": - try: - plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) - except ValueError: - # not Mac OS X - pass - return plat - - -__all__ = [ - # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', - 'load_entry_point', 'get_entry_map', 'get_entry_info', - 'iter_entry_points', - 'resource_string', 'resource_stream', 'resource_filename', - 'resource_listdir', 'resource_exists', 'resource_isdir', - - # Environmental control - 'declare_namespace', 'working_set', 'add_activation_listener', - 'find_distributions', 'set_extraction_path', 'cleanup_resources', - 'get_default_cache', - - # Primary implementation classes - 'Environment', 'WorkingSet', 'ResourceManager', - 'Distribution', 'Requirement', 'EntryPoint', - - # Exceptions - 'ResolutionError', 'VersionConflict', 'DistributionNotFound', - 'UnknownExtra', 'ExtractionError', - - # Warnings - 'PEP440Warning', - - # Parsing functions and string utilities - 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', - 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', - 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', - - # filesystem utilities - 'ensure_directory', 'normalize_path', - - # Distribution "precedence" constants - 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', - - # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', - 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', - 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', - 'register_finder', 'register_namespace_handler', 'register_loader_type', - 'fixup_namespace_packages', 'get_importer', - - # Warnings - 'PkgResourcesDeprecationWarning', - - # Deprecated/backward compatibility only - 'run_main', 'AvailableDistributions', -] - - -class ResolutionError(Exception): - """Abstract base for dependency resolution errors""" - - def __repr__(self): - return self.__class__.__name__ + repr(self.args) - - -class VersionConflict(ResolutionError): - """ - An already-installed version conflicts with the requested version. - - Should be initialized with the installed Distribution and the requested - Requirement. - """ - - _template = "{self.dist} is installed but {self.req} is required" - - @property - def dist(self): - return self.args[0] - - @property - def req(self): - return self.args[1] - - def report(self): - return self._template.format(**locals()) - - def with_context(self, required_by): - """ - If required_by is non-empty, return a version of self that is a - ContextualVersionConflict. - """ - if not required_by: - return self - args = self.args + (required_by,) - return ContextualVersionConflict(*args) - - -class ContextualVersionConflict(VersionConflict): - """ - A VersionConflict that accepts a third parameter, the set of the - requirements that required the installed Distribution. - """ - - _template = VersionConflict._template + ' by {self.required_by}' - - @property - def required_by(self): - return self.args[2] - - -class DistributionNotFound(ResolutionError): - """A requested distribution was not found""" - - _template = ("The '{self.req}' distribution was not found " - "and is required by {self.requirers_str}") - - @property - def req(self): - return self.args[0] - - @property - def requirers(self): - return self.args[1] - - @property - def requirers_str(self): - if not self.requirers: - return 'the application' - return ', '.join(self.requirers) - - def report(self): - return self._template.format(**locals()) - - def __str__(self): - return self.report() - - -class UnknownExtra(ResolutionError): - """Distribution doesn't have an "extra feature" of the given name""" - - -_provider_factories = {} - -PY_MAJOR = sys.version[:3] -EGG_DIST = 3 -BINARY_DIST = 2 -SOURCE_DIST = 1 -CHECKOUT_DIST = 0 -DEVELOP_DIST = -1 - - -def register_loader_type(loader_type, provider_factory): - """Register `provider_factory` to make providers for `loader_type` - - `loader_type` is the type or class of a PEP 302 ``module.__loader__``, - and `provider_factory` is a function that, passed a *module* object, - returns an ``IResourceProvider`` for that module. - """ - _provider_factories[loader_type] = provider_factory - - -def get_provider(moduleOrReq): - """Return an IResourceProvider for the named module or requirement""" - if isinstance(moduleOrReq, Requirement): - return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] - try: - module = sys.modules[moduleOrReq] - except KeyError: - __import__(moduleOrReq) - module = sys.modules[moduleOrReq] - loader = getattr(module, '__loader__', None) - return _find_adapter(_provider_factories, loader)(module) - - -def _macosx_vers(_cache=[]): - if not _cache: - version = platform.mac_ver()[0] - # fallback for MacPorts - if version == '': - plist = '/System/Library/CoreServices/SystemVersion.plist' - if os.path.exists(plist): - if hasattr(plistlib, 'readPlist'): - plist_content = plistlib.readPlist(plist) - if 'ProductVersion' in plist_content: - version = plist_content['ProductVersion'] - - _cache.append(version.split('.')) - return _cache[0] - - -def _macosx_arch(machine): - return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) - - -def get_build_platform(): - """Return this platform's string for platform-specific distributions - - XXX Currently this is the same as ``distutils.util.get_platform()``, but it - needs some hacks for Linux and Mac OS X. - """ - from sysconfig import get_platform - - plat = get_platform() - if sys.platform == "darwin" and not plat.startswith('macosx-'): - try: - version = _macosx_vers() - machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % ( - int(version[0]), int(version[1]), - _macosx_arch(machine), - ) - except ValueError: - # if someone is running a non-Mac darwin system, this will fall - # through to the default implementation - pass - return plat - - -macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") -darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") -# XXX backward compat -get_platform = get_build_platform - - -def compatible_platforms(provided, required): - """Can code for the `provided` platform run on the `required` platform? - - Returns true if either platform is ``None``, or the platforms are equal. - - XXX Needs compatibility checks for Linux and other unixy OSes. - """ - if provided is None or required is None or provided == required: - # easy case - return True - - # Mac OS X special cases - reqMac = macosVersionString.match(required) - if reqMac: - provMac = macosVersionString.match(provided) - - # is this a Mac package? - if not provMac: - # this is backwards compatibility for packages built before - # setuptools 0.6. All packages built after this point will - # use the new macosx designation. - provDarwin = darwinVersionString.match(provided) - if provDarwin: - dversion = int(provDarwin.group(1)) - macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if dversion == 7 and macosversion >= "10.3" or \ - dversion == 8 and macosversion >= "10.4": - return True - # egg isn't macosx or legacy darwin - return False - - # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or \ - provMac.group(3) != reqMac.group(3): - return False - - # is the required OS major update >= the provided one? - if int(provMac.group(2)) > int(reqMac.group(2)): - return False - - return True - - # XXX Linux and other platforms' special cases should go here - return False - - -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - - -# backward compatibility -run_main = run_script - - -def get_distribution(dist): - """Return a current distribution object for a Requirement or string""" - if isinstance(dist, six.string_types): - dist = Requirement.parse(dist) - if isinstance(dist, Requirement): - dist = get_provider(dist) - if not isinstance(dist, Distribution): - raise TypeError("Expected string, Requirement, or Distribution", dist) - return dist - - -def load_entry_point(dist, group, name): - """Return `name` entry point of `group` for `dist` or raise ImportError""" - return get_distribution(dist).load_entry_point(group, name) - - -def get_entry_map(dist, group=None): - """Return the entry point map for `group`, or the full entry map""" - return get_distribution(dist).get_entry_map(group) - - -def get_entry_info(dist, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return get_distribution(dist).get_entry_info(group, name) - - -class IMetadataProvider: - def has_metadata(name): - """Does the package's distribution contain the named metadata?""" - - def get_metadata(name): - """The named metadata resource as a string""" - - def get_metadata_lines(name): - """Yield named metadata resource as list of non-blank non-comment lines - - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" - - def metadata_isdir(name): - """Is the named metadata a directory? (like ``os.path.isdir()``)""" - - def metadata_listdir(name): - """List of metadata names in the directory (like ``os.listdir()``)""" - - def run_script(script_name, namespace): - """Execute the named script in the supplied namespace dictionary""" - - -class IResourceProvider(IMetadataProvider): - """An object that provides access to package resources""" - - def get_resource_filename(manager, resource_name): - """Return a true filesystem path for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_stream(manager, resource_name): - """Return a readable file-like object for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_string(manager, resource_name): - """Return a string containing the contents of `resource_name` - - `manager` must be an ``IResourceManager``""" - - def has_resource(resource_name): - """Does the package contain the named resource?""" - - def resource_isdir(resource_name): - """Is the named resource a directory? (like ``os.path.isdir()``)""" - - def resource_listdir(resource_name): - """List of resource names in the directory (like ``os.listdir()``)""" - - -class WorkingSet: - """A collection of active distributions on sys.path (or a similar list)""" - - def __init__(self, entries=None): - """Create working set from list of path entries (default=sys.path)""" - self.entries = [] - self.entry_keys = {} - self.by_key = {} - self.callbacks = [] - - if entries is None: - entries = sys.path - - for entry in entries: - self.add_entry(entry) - - @classmethod - def _build_master(cls): - """ - Prepare the master working set. - """ - ws = cls() - try: - from __main__ import __requires__ - except ImportError: - # The main program does not list any requirements - return ws - - # ensure the requirements are met - try: - ws.require(__requires__) - except VersionConflict: - return cls._build_from_requirements(__requires__) - - return ws - - @classmethod - def _build_from_requirements(cls, req_spec): - """ - Build a working set from a requirement spec. Rewrites sys.path. - """ - # try it without defaults already on sys.path - # by starting with an empty path - ws = cls([]) - reqs = parse_requirements(req_spec) - dists = ws.resolve(reqs, Environment()) - for dist in dists: - ws.add(dist) - - # add any missing entries from sys.path - for entry in sys.path: - if entry not in ws.entries: - ws.add_entry(entry) - - # then copy back to sys.path - sys.path[:] = ws.entries - return ws - - def add_entry(self, entry): - """Add a path item to ``.entries``, finding any distributions on it - - ``find_distributions(entry, True)`` is used to find distributions - corresponding to the path entry, and they are added. `entry` is - always appended to ``.entries``, even if it is already present. - (This is because ``sys.path`` can contain the same value more than - once, and the ``.entries`` of the ``sys.path`` WorkingSet should always - equal ``sys.path``.) - """ - self.entry_keys.setdefault(entry, []) - self.entries.append(entry) - for dist in find_distributions(entry, True): - self.add(dist, entry, False) - - def __contains__(self, dist): - """True if `dist` is the active distribution for its project""" - return self.by_key.get(dist.key) == dist - - def find(self, req): - """Find a distribution matching requirement `req` - - If there is an active distribution for the requested project, this - returns it as long as it meets the version requirement specified by - `req`. But, if there is an active distribution for the project and it - does *not* meet the `req` requirement, ``VersionConflict`` is raised. - If there is no active distribution for the requested project, ``None`` - is returned. - """ - dist = self.by_key.get(req.key) - if dist is not None and dist not in req: - # XXX add more info - raise VersionConflict(dist, req) - return dist - - def iter_entry_points(self, group, name=None): - """Yield entry point objects from `group` matching `name` - - If `name` is None, yields all entry points in `group` from all - distributions in the working set, otherwise only ones matching - both `group` and `name` are yielded (in distribution order). - """ - return ( - entry - for dist in self - for entry in dist.get_entry_map(group).values() - if name is None or name == entry.name - ) - - def run_script(self, requires, script_name): - """Locate distribution for `requires` and run `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - self.require(requires)[0].run_script(script_name, ns) - - def __iter__(self): - """Yield distributions for non-duplicate projects in the working set - - The yield order is the order in which the items' path entries were - added to the working set. - """ - seen = {} - for item in self.entries: - if item not in self.entry_keys: - # workaround a cache issue - continue - - for key in self.entry_keys[item]: - if key not in seen: - seen[key] = 1 - yield self.by_key[key] - - def add(self, dist, entry=None, insert=True, replace=False): - """Add `dist` to working set, associated with `entry` - - If `entry` is unspecified, it defaults to the ``.location`` of `dist`. - On exit from this routine, `entry` is added to the end of the working - set's ``.entries`` (if it wasn't already present). - - `dist` is only added to the working set if it's for a project that - doesn't already have a distribution in the set, unless `replace=True`. - If it's added, any callbacks registered with the ``subscribe()`` method - will be called. - """ - if insert: - dist.insert_on(self.entries, entry, replace=replace) - - if entry is None: - entry = dist.location - keys = self.entry_keys.setdefault(entry, []) - keys2 = self.entry_keys.setdefault(dist.location, []) - if not replace and dist.key in self.by_key: - # ignore hidden distros - return - - self.by_key[dist.key] = dist - if dist.key not in keys: - keys.append(dist.key) - if dist.key not in keys2: - keys2.append(dist.key) - self._added_new(dist) - - def resolve(self, requirements, env=None, installer=None, - replace_conflicting=False, extras=None): - """List all distributions needed to (recursively) meet `requirements` - - `requirements` must be a sequence of ``Requirement`` objects. `env`, - if supplied, should be an ``Environment`` instance. If - not supplied, it defaults to all distributions available within any - entry or distribution in the working set. `installer`, if supplied, - will be invoked with each requirement that cannot be met by an - already-installed distribution; it should return a ``Distribution`` or - ``None``. - - Unless `replace_conflicting=True`, raises a VersionConflict exception - if - any requirements are found on the path that have the correct name but - the wrong version. Otherwise, if an `installer` is supplied it will be - invoked to obtain the correct version of the requirement and activate - it. - - `extras` is a list of the extras to be used with these requirements. - This is important because extra requirements may look like `my_req; - extra = "my_extra"`, which would otherwise be interpreted as a purely - optional requirement. Instead, we want to be able to assert that these - requirements are truly required. - """ - - # set up the stack - requirements = list(requirements)[::-1] - # set of processed requirements - processed = {} - # key -> dist - best = {} - to_activate = [] - - req_extras = _ReqExtras() - - # Mapping of requirement to set of distributions that required it; - # useful for reporting info about conflicts. - required_by = collections.defaultdict(set) - - while requirements: - # process dependencies breadth-first - req = requirements.pop(0) - if req in processed: - # Ignore cyclic or redundant dependencies - continue - - if not req_extras.markers_pass(req, extras): - continue - - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None or (dist not in req and replace_conflicting): - ws = self - if env is None: - if dist is None: - env = Environment(self.entries) - else: - # Use an empty environment and workingset to avoid - # any further conflicts with the conflicting - # distribution - env = Environment([]) - ws = WorkingSet([]) - dist = best[req.key] = env.best_match( - req, ws, installer, - replace_conflicting=replace_conflicting - ) - if dist is None: - requirers = required_by.get(req, None) - raise DistributionNotFound(req, requirers) - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - dependent_req = required_by[req] - raise VersionConflict(dist, req).with_context(dependent_req) - - # push the new requirements onto the stack - new_requirements = dist.requires(req.extras)[::-1] - requirements.extend(new_requirements) - - # Register the new requirements needed by req - for new_requirement in new_requirements: - required_by[new_requirement].add(req.project_name) - req_extras[new_requirement] = req.extras - - processed[req] = True - - # return list of distros to activate - return to_activate - - def find_plugins( - self, plugin_env, full_env=None, installer=None, fallback=True): - """Find all activatable distributions in `plugin_env` - - Example usage:: - - distributions, errors = working_set.find_plugins( - Environment(plugin_dirlist) - ) - # add plugins+libs to sys.path - map(working_set.add, distributions) - # display errors - print('Could not load', errors) - - The `plugin_env` should be an ``Environment`` instance that contains - only distributions that are in the project's "plugin directory" or - directories. The `full_env`, if supplied, should be an ``Environment`` - contains all currently-available distributions. If `full_env` is not - supplied, one is created automatically from the ``WorkingSet`` this - method is called on, which will typically mean that every directory on - ``sys.path`` will be scanned for distributions. - - `installer` is a standard installer callback as used by the - ``resolve()`` method. The `fallback` flag indicates whether we should - attempt to resolve older versions of a plugin if the newest version - cannot be resolved. - - This method returns a 2-tuple: (`distributions`, `error_info`), where - `distributions` is a list of the distributions found in `plugin_env` - that were loadable, along with any other distributions that are needed - to resolve their dependencies. `error_info` is a dictionary mapping - unloadable plugin distributions to an exception instance describing the - error that occurred. Usually this will be a ``DistributionNotFound`` or - ``VersionConflict`` instance. - """ - - plugin_projects = list(plugin_env) - # scan project names in alphabetic order - plugin_projects.sort() - - error_info = {} - distributions = {} - - if full_env is None: - env = Environment(self.entries) - env += plugin_env - else: - env = full_env + plugin_env - - shadow_set = self.__class__([]) - # put all our entries in shadow_set - list(map(shadow_set.add, self)) - - for project_name in plugin_projects: - - for dist in plugin_env[project_name]: - - req = [dist.as_requirement()] - - try: - resolvees = shadow_set.resolve(req, env, installer) - - except ResolutionError as v: - # save error info - error_info[dist] = v - if fallback: - # try the next older version of project - continue - else: - # give up on this project, keep going - break - - else: - list(map(shadow_set.add, resolvees)) - distributions.update(dict.fromkeys(resolvees)) - - # success, no need to try any more versions of this project - break - - distributions = list(distributions) - distributions.sort() - - return distributions, error_info - - def require(self, *requirements): - """Ensure that distributions matching `requirements` are activated - - `requirements` must be a string or a (possibly-nested) sequence - thereof, specifying the distributions and versions required. The - return value is a sequence of the distributions that needed to be - activated to fulfill the requirements; all relevant distributions are - included, even if they were already activated in this working set. - """ - needed = self.resolve(parse_requirements(requirements)) - - for dist in needed: - self.add(dist) - - return needed - - def subscribe(self, callback, existing=True): - """Invoke `callback` for all distributions - - If `existing=True` (default), - call on all existing ones, as well. - """ - if callback in self.callbacks: - return - self.callbacks.append(callback) - if not existing: - return - for dist in self: - callback(dist) - - def _added_new(self, dist): - for callback in self.callbacks: - callback(dist) - - def __getstate__(self): - return ( - self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.callbacks[:] - ) - - def __setstate__(self, e_k_b_c): - entries, keys, by_key, callbacks = e_k_b_c - self.entries = entries[:] - self.entry_keys = keys.copy() - self.by_key = by_key.copy() - self.callbacks = callbacks[:] - - -class _ReqExtras(dict): - """ - Map each requirement to the extras that demanded it. - """ - - def markers_pass(self, req, extras=None): - """ - Evaluate markers for req against each extra that - demanded it. - - Return False if the req has a marker and fails - evaluation. Otherwise, return True. - """ - extra_evals = ( - req.marker.evaluate({'extra': extra}) - for extra in self.get(req, ()) + (extras or (None,)) - ) - return not req.marker or any(extra_evals) - - -class Environment: - """Searchable snapshot of distributions on a search path""" - - def __init__( - self, search_path=None, platform=get_supported_platform(), - python=PY_MAJOR): - """Snapshot distributions available on a search path - - Any distributions found on `search_path` are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. - - `platform` is an optional string specifying the name of the platform - that platform-specific distributions must be compatible with. If - unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'3.6'``); - it defaults to the current version. - - You may explicitly set `platform` (and/or `python`) to ``None`` if you - wish to map *all* distributions, not just those compatible with the - running platform or Python version. - """ - self._distmap = {} - self.platform = platform - self.python = python - self.scan(search_path) - - def can_add(self, dist): - """Is distribution `dist` acceptable for this environment? - - The distribution must match the platform and python version - requirements specified when this environment was created, or False - is returned. - """ - py_compat = ( - self.python is None - or dist.py_version is None - or dist.py_version == self.python - ) - return py_compat and compatible_platforms(dist.platform, self.platform) - - def remove(self, dist): - """Remove `dist` from the environment""" - self._distmap[dist.key].remove(dist) - - def scan(self, search_path=None): - """Scan `search_path` for distributions usable in this environment - - Any distributions found are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. Only distributions conforming to - the platform/python version defined at initialization are added. - """ - if search_path is None: - search_path = sys.path - - for item in search_path: - for dist in find_distributions(item): - self.add(dist) - - def __getitem__(self, project_name): - """Return a newest-to-oldest list of distributions for `project_name` - - Uses case-insensitive `project_name` comparison, assuming all the - project's distributions use their project's name converted to all - lowercase as their key. - - """ - distribution_key = project_name.lower() - return self._distmap.get(distribution_key, []) - - def add(self, dist): - """Add `dist` if we ``can_add()`` it and it has not already been added - """ - if self.can_add(dist) and dist.has_version(): - dists = self._distmap.setdefault(dist.key, []) - if dist not in dists: - dists.append(dist) - dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - - def best_match( - self, req, working_set, installer=None, replace_conflicting=False): - """Find distribution best matching `req` and usable on `working_set` - - This calls the ``find(req)`` method of the `working_set` to see if a - suitable distribution is already active. (This may raise - ``VersionConflict`` if an unsuitable version of the project is already - active in the specified `working_set`.) If a suitable distribution - isn't active, this method returns the newest distribution in the - environment that meets the ``Requirement`` in `req`. If no suitable - distribution is found, and `installer` is supplied, then the result of - calling the environment's ``obtain(req, installer)`` method will be - returned. - """ - try: - dist = working_set.find(req) - except VersionConflict: - if not replace_conflicting: - raise - dist = None - if dist is not None: - return dist - for dist in self[req.key]: - if dist in req: - return dist - # try to download/install - return self.obtain(req, installer) - - def obtain(self, requirement, installer=None): - """Obtain a distribution matching `requirement` (e.g. via download) - - Obtain a distro that matches requirement (e.g. via download). In the - base ``Environment`` class, this routine just returns - ``installer(requirement)``, unless `installer` is None, in which case - None is returned instead. This method is a hook that allows subclasses - to attempt other ways of obtaining a distribution before falling back - to the `installer` argument.""" - if installer is not None: - return installer(requirement) - - def __iter__(self): - """Yield the unique project names of the available distributions""" - for key in self._distmap.keys(): - if self[key]: - yield key - - def __iadd__(self, other): - """In-place addition of a distribution or environment""" - if isinstance(other, Distribution): - self.add(other) - elif isinstance(other, Environment): - for project in other: - for dist in other[project]: - self.add(dist) - else: - raise TypeError("Can't add %r to environment" % (other,)) - return self - - def __add__(self, other): - """Add an environment or distribution to an environment""" - new = self.__class__([], platform=None, python=None) - for env in self, other: - new += env - return new - - -# XXX backward compatibility -AvailableDistributions = Environment - - -class ExtractionError(RuntimeError): - """An error occurred extracting a resource - - The following attributes are available from instances of this exception: - - manager - The resource manager that raised this exception - - cache_path - The base directory for resource extraction - - original_error - The exception instance that caused extraction to fail - """ - - -class ResourceManager: - """Manage resource extraction and packages""" - extraction_path = None - - def __init__(self): - self.cached_files = {} - - def resource_exists(self, package_or_requirement, resource_name): - """Does the named resource exist?""" - return get_provider(package_or_requirement).has_resource(resource_name) - - def resource_isdir(self, package_or_requirement, resource_name): - """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir( - resource_name - ) - - def resource_filename(self, package_or_requirement, resource_name): - """Return a true filesystem path for specified resource""" - return get_provider(package_or_requirement).get_resource_filename( - self, resource_name - ) - - def resource_stream(self, package_or_requirement, resource_name): - """Return a readable file-like object for specified resource""" - return get_provider(package_or_requirement).get_resource_stream( - self, resource_name - ) - - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" - return get_provider(package_or_requirement).get_resource_string( - self, resource_name - ) - - def resource_listdir(self, package_or_requirement, resource_name): - """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir( - resource_name - ) - - def extraction_error(self): - """Give an error message for problems extracting file(s)""" - - old_exc = sys.exc_info()[1] - cache_path = self.extraction_path or get_default_cache() - - tmpl = textwrap.dedent(""" - Can't extract file(s) to egg cache - - The following error occurred while trying to extract file(s) - to the Python egg cache: - - {old_exc} - - The Python egg cache directory is currently set to: - - {cache_path} - - Perhaps your account does not have write access to this directory? - You can change the cache directory by setting the PYTHON_EGG_CACHE - environment variable to point to an accessible directory. - """).lstrip() - err = ExtractionError(tmpl.format(**locals())) - err.manager = self - err.cache_path = cache_path - err.original_error = old_exc - raise err - - def get_cache_path(self, archive_name, names=()): - """Return absolute location in cache for `archive_name` and `names` - - The parent directory of the resulting path will be created if it does - not already exist. `archive_name` should be the base filename of the - enclosing egg (which may not be the name of the enclosing zipfile!), - including its ".egg" extension. `names`, if provided, should be a - sequence of path name parts "under" the egg's extraction location. - - This method should only be called by resource providers that need to - obtain an extraction location, and only for names they intend to - extract, as it tracks the generated names for possible cleanup later. - """ - extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name + '-tmp', *names) - try: - _bypass_ensure_directory(target_path) - except Exception: - self.extraction_error() - - self._warn_unsafe_extraction_path(extract_path) - - self.cached_files[target_path] = 1 - return target_path - - @staticmethod - def _warn_unsafe_extraction_path(path): - """ - If the default extraction path is overridden and set to an insecure - location, such as /tmp, it opens up an opportunity for an attacker to - replace an extracted file with an unauthorized payload. Warn the user - if a known insecure location is used. - - See Distribute #375 for more details. - """ - if os.name == 'nt' and not path.startswith(os.environ['windir']): - # On Windows, permissions are generally restrictive by default - # and temp directories are not writable by other users, so - # bypass the warning. - return - mode = os.stat(path).st_mode - if mode & stat.S_IWOTH or mode & stat.S_IWGRP: - msg = ( - "%s is writable by group/others and vulnerable to attack " - "when " - "used with get_resource_filename. Consider a more secure " - "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." % path - ) - warnings.warn(msg, UserWarning) - - def postprocess(self, tempname, filename): - """Perform any platform-specific postprocessing of `tempname` - - This is where Mac header rewrites should be done; other platforms don't - have anything special they should do. - - Resource providers should call this method ONLY after successfully - extracting a compressed resource. They must NOT call it on resources - that are already in the filesystem. - - `tempname` is the current (temporary) name of the file, and `filename` - is the name it will be renamed to by the caller after this routine - returns. - """ - - if os.name == 'posix': - # Make the resource executable - mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 - os.chmod(tempname, mode) - - def set_extraction_path(self, path): - """Set the base path where resources will be extracted to, if needed. - - If you do not call this routine before any extractions take place, the - path defaults to the return value of ``get_default_cache()``. (Which - is based on the ``PYTHON_EGG_CACHE`` environment variable, with various - platform-specific fallbacks. See that routine's documentation for more - details.) - - Resources are extracted to subdirectories of this path based upon - information given by the ``IResourceProvider``. You may set this to a - temporary directory, but then you must call ``cleanup_resources()`` to - delete the extracted files when done. There is no guarantee that - ``cleanup_resources()`` will be able to remove all extracted files. - - (Note: you may not change the extraction path for a given resource - manager once resources have been extracted, unless you first call - ``cleanup_resources()``.) - """ - if self.cached_files: - raise ValueError( - "Can't change extraction path, files already extracted" - ) - - self.extraction_path = path - - def cleanup_resources(self, force=False): - """ - Delete all extracted resource files and directories, returning a list - of the file and directory names that could not be successfully removed. - This function does not have any concurrency protection, so it should - generally only be called when the extraction path is a temporary - directory exclusive to a single process. This method is not - automatically called; you must call it explicitly or register it as an - ``atexit`` function if you wish to ensure cleanup of a temporary - directory used for extractions. - """ - # XXX - - -def get_default_cache(): - """ - Return the ``PYTHON_EGG_CACHE`` environment variable - or a platform-relevant user cache dir for an app - named "Python-Eggs". - """ - return ( - os.environ.get('PYTHON_EGG_CACHE') - or appdirs.user_cache_dir(appname='Python-Eggs') - ) - - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """ - Convert an arbitrary string to a standard version string - """ - try: - # normalize the version - return str(packaging.version.Version(version)) - except packaging.version.InvalidVersion: - version = version.replace(' ', '.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def safe_extra(extra): - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - """ - return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-', '_') - - -def invalid_marker(text): - """ - Validate text as a PEP 508 environment marker; return an exception - if invalid or False otherwise. - """ - try: - evaluate_marker(text) - except SyntaxError as e: - e.filename = None - e.lineno = None - return e - return False - - -def evaluate_marker(text, extra=None): - """ - Evaluate a PEP 508 environment marker. - Return a boolean indicating the marker result in this environment. - Raise SyntaxError if marker is invalid. - - This implementation uses the 'pyparsing' module. - """ - try: - marker = packaging.markers.Marker(text) - return marker.evaluate() - except packaging.markers.InvalidMarker as e: - raise SyntaxError(e) - - -class NullProvider: - """Try to implement resources and metadata for arbitrary PEP 302 loaders""" - - egg_name = None - egg_info = None - loader = None - - def __init__(self, module): - self.loader = getattr(module, '__loader__', None) - self.module_path = os.path.dirname(getattr(module, '__file__', '')) - - def get_resource_filename(self, manager, resource_name): - return self._fn(self.module_path, resource_name) - - def get_resource_stream(self, manager, resource_name): - return io.BytesIO(self.get_resource_string(manager, resource_name)) - - def get_resource_string(self, manager, resource_name): - return self._get(self._fn(self.module_path, resource_name)) - - def has_resource(self, resource_name): - return self._has(self._fn(self.module_path, resource_name)) - - def has_metadata(self, name): - return self.egg_info and self._has(self._fn(self.egg_info, name)) - - def get_metadata(self, name): - if not self.egg_info: - return "" - value = self._get(self._fn(self.egg_info, name)) - return value.decode('utf-8') if six.PY3 else value - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - def resource_isdir(self, resource_name): - return self._isdir(self._fn(self.module_path, resource_name)) - - def metadata_isdir(self, name): - return self.egg_info and self._isdir(self._fn(self.egg_info, name)) - - def resource_listdir(self, resource_name): - return self._listdir(self._fn(self.module_path, resource_name)) - - def metadata_listdir(self, name): - if self.egg_info: - return self._listdir(self._fn(self.egg_info, name)) - return [] - - def run_script(self, script_name, namespace): - script = 'scripts/' + script_name - if not self.has_metadata(script): - raise ResolutionError( - "Script {script!r} not found in metadata at {self.egg_info!r}" - .format(**locals()), - ) - script_text = self.get_metadata(script).replace('\r\n', '\n') - script_text = script_text.replace('\r', '\n') - script_filename = self._fn(self.egg_info, script) - namespace['__file__'] = script_filename - if os.path.exists(script_filename): - source = open(script_filename).read() - code = compile(source, script_filename, 'exec') - exec(code, namespace, namespace) - else: - from linecache import cache - cache[script_filename] = ( - len(script_text), 0, script_text.split('\n'), script_filename - ) - script_code = compile(script_text, script_filename, 'exec') - exec(script_code, namespace, namespace) - - def _has(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _isdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _listdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _fn(self, base, resource_name): - if resource_name: - return os.path.join(base, *resource_name.split('/')) - return base - - def _get(self, path): - if hasattr(self.loader, 'get_data'): - return self.loader.get_data(path) - raise NotImplementedError( - "Can't perform this operation for loaders without 'get_data()'" - ) - - -register_loader_type(object, NullProvider) - - -class EggProvider(NullProvider): - """Provider based on a virtual filesystem""" - - def __init__(self, module): - NullProvider.__init__(self, module) - self._setup_prefix() - - def _setup_prefix(self): - # we assume here that our metadata may be nested inside a "basket" - # of multiple eggs; that's why we use module_path instead of .archive - path = self.module_path - old = None - while path != old: - if _is_egg_path(path): - self.egg_name = os.path.basename(path) - self.egg_info = os.path.join(path, 'EGG-INFO') - self.egg_root = path - break - old = path - path, base = os.path.split(path) - - -class DefaultProvider(EggProvider): - """Provides access to package resources in the filesystem""" - - def _has(self, path): - return os.path.exists(path) - - def _isdir(self, path): - return os.path.isdir(path) - - def _listdir(self, path): - return os.listdir(path) - - def get_resource_stream(self, manager, resource_name): - return open(self._fn(self.module_path, resource_name), 'rb') - - def _get(self, path): - with open(path, 'rb') as stream: - return stream.read() - - @classmethod - def _register(cls): - loader_names = 'SourceFileLoader', 'SourcelessFileLoader', - for name in loader_names: - loader_cls = getattr(importlib_machinery, name, type(None)) - register_loader_type(loader_cls, cls) - - -DefaultProvider._register() - - -class EmptyProvider(NullProvider): - """Provider that returns nothing for all requests""" - - module_path = None - - _isdir = _has = lambda self, path: False - - def _get(self, path): - return '' - - def _listdir(self, path): - return [] - - def __init__(self): - pass - - -empty_provider = EmptyProvider() - - -class ZipManifests(dict): - """ - zip manifest builder - """ - - @classmethod - def build(cls, path): - """ - Build a dictionary similar to the zipimport directory - caches, except instead of tuples, store ZipInfo objects. - - Use a platform-specific path separator (os.sep) for the path keys - for compatibility with pypy on Windows. - """ - with zipfile.ZipFile(path) as zfile: - items = ( - ( - name.replace('/', os.sep), - zfile.getinfo(name), - ) - for name in zfile.namelist() - ) - return dict(items) - - load = build - - -class MemoizedZipManifests(ZipManifests): - """ - Memoized zipfile manifests. - """ - manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') - - def load(self, path): - """ - Load a manifest at path or return a suitable manifest already loaded. - """ - path = os.path.normpath(path) - mtime = os.stat(path).st_mtime - - if path not in self or self[path].mtime != mtime: - manifest = self.build(path) - self[path] = self.manifest_mod(manifest, mtime) - - return self[path].manifest - - -class ZipProvider(EggProvider): - """Resource support for zips and eggs""" - - eagers = None - _zip_manifests = MemoizedZipManifests() - - def __init__(self, module): - EggProvider.__init__(self, module) - self.zip_pre = self.loader.archive + os.sep - - def _zipinfo_name(self, fspath): - # Convert a virtual filename (full path to file) into a zipfile subpath - # usable with the zipimport directory cache for our target archive - fspath = fspath.rstrip(os.sep) - if fspath == self.loader.archive: - return '' - if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre):] - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.zip_pre) - ) - - def _parts(self, zip_path): - # Convert a zipfile subpath into an egg-relative path part list. - # pseudo-fs path - fspath = self.zip_pre + zip_path - if fspath.startswith(self.egg_root + os.sep): - return fspath[len(self.egg_root) + 1:].split(os.sep) - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.egg_root) - ) - - @property - def zipinfo(self): - return self._zip_manifests.load(self.loader.archive) - - def get_resource_filename(self, manager, resource_name): - if not self.egg_name: - raise NotImplementedError( - "resource_filename() only supported for .egg, not .zip" - ) - # no need to lock for extraction, since we use temp names - zip_path = self._resource_to_zip(resource_name) - eagers = self._get_eager_resources() - if '/'.join(self._parts(zip_path)) in eagers: - for name in eagers: - self._extract_resource(manager, self._eager_to_zip(name)) - return self._extract_resource(manager, zip_path) - - @staticmethod - def _get_date_and_size(zip_stat): - size = zip_stat.file_size - # ymdhms+wday, yday, dst - date_time = zip_stat.date_time + (0, 0, -1) - # 1980 offset already done - timestamp = time.mktime(date_time) - return timestamp, size - - def _extract_resource(self, manager, zip_path): - - if zip_path in self._index(): - for name in self._index()[zip_path]: - last = self._extract_resource( - manager, os.path.join(zip_path, name) - ) - # return the extracted directory name - return os.path.dirname(last) - - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - - if not WRITE_SUPPORT: - raise IOError('"os.rename" and "os.unlink" are not supported ' - 'on this platform') - try: - - real_path = manager.get_cache_path( - self.egg_name, self._parts(zip_path) - ) - - if self._is_current(real_path, zip_path): - return real_path - - outf, tmpnam = _mkstemp( - ".$extract", - dir=os.path.dirname(real_path), - ) - os.write(outf, self.loader.get_data(zip_path)) - os.close(outf) - utime(tmpnam, (timestamp, timestamp)) - manager.postprocess(tmpnam, real_path) - - try: - rename(tmpnam, real_path) - - except os.error: - if os.path.isfile(real_path): - if self._is_current(real_path, zip_path): - # the file became current since it was checked above, - # so proceed. - return real_path - # Windows, del old file and retry - elif os.name == 'nt': - unlink(real_path) - rename(tmpnam, real_path) - return real_path - raise - - except os.error: - # report a user-friendly error - manager.extraction_error() - - return real_path - - def _is_current(self, file_path, zip_path): - """ - Return True if the file_path is current for this zip_path - """ - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - if not os.path.isfile(file_path): - return False - stat = os.stat(file_path) - if stat.st_size != size or stat.st_mtime != timestamp: - return False - # check that the contents match - zip_contents = self.loader.get_data(zip_path) - with open(file_path, 'rb') as f: - file_contents = f.read() - return zip_contents == file_contents - - def _get_eager_resources(self): - if self.eagers is None: - eagers = [] - for name in ('native_libs.txt', 'eager_resources.txt'): - if self.has_metadata(name): - eagers.extend(self.get_metadata_lines(name)) - self.eagers = eagers - return self.eagers - - def _index(self): - try: - return self._dirindex - except AttributeError: - ind = {} - for path in self.zipinfo: - parts = path.split(os.sep) - while parts: - parent = os.sep.join(parts[:-1]) - if parent in ind: - ind[parent].append(parts[-1]) - break - else: - ind[parent] = [parts.pop()] - self._dirindex = ind - return ind - - def _has(self, fspath): - zip_path = self._zipinfo_name(fspath) - return zip_path in self.zipinfo or zip_path in self._index() - - def _isdir(self, fspath): - return self._zipinfo_name(fspath) in self._index() - - def _listdir(self, fspath): - return list(self._index().get(self._zipinfo_name(fspath), ())) - - def _eager_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.egg_root, resource_name)) - - def _resource_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.module_path, resource_name)) - - -register_loader_type(zipimport.zipimporter, ZipProvider) - - -class FileMetadata(EmptyProvider): - """Metadata handler for standalone PKG-INFO files - - Usage:: - - metadata = FileMetadata("/path/to/PKG-INFO") - - This provider rejects all data and metadata requests except for PKG-INFO, - which is treated as existing, and will be the contents of the file at - the provided location. - """ - - def __init__(self, path): - self.path = path - - def has_metadata(self, name): - return name == 'PKG-INFO' and os.path.isfile(self.path) - - def get_metadata(self, name): - if name != 'PKG-INFO': - raise KeyError("No metadata except PKG-INFO is available") - - with io.open(self.path, encoding='utf-8', errors="replace") as f: - metadata = f.read() - self._warn_on_replacement(metadata) - return metadata - - def _warn_on_replacement(self, metadata): - # Python 2.7 compat for: replacement_char = '�' - replacement_char = b'\xef\xbf\xbd'.decode('utf-8') - if replacement_char in metadata: - tmpl = "{self.path} could not be properly decoded in UTF-8" - msg = tmpl.format(**locals()) - warnings.warn(msg) - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - -class PathMetadata(DefaultProvider): - """Metadata provider for egg directories - - Usage:: - - # Development eggs: - - egg_info = "/path/to/PackageName.egg-info" - base_dir = os.path.dirname(egg_info) - metadata = PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - dist = Distribution(basedir, project_name=dist_name, metadata=metadata) - - # Unpacked egg directories: - - egg_path = "/path/to/PackageName-ver-pyver-etc.egg" - metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) - dist = Distribution.from_filename(egg_path, metadata=metadata) - """ - - def __init__(self, path, egg_info): - self.module_path = path - self.egg_info = egg_info - - -class EggMetadata(ZipProvider): - """Metadata provider for .egg files""" - - def __init__(self, importer): - """Create a metadata provider from a zipimporter""" - - self.zip_pre = importer.archive + os.sep - self.loader = importer - if importer.prefix: - self.module_path = os.path.join(importer.archive, importer.prefix) - else: - self.module_path = importer.archive - self._setup_prefix() - - -_declare_state('dict', _distribution_finders={}) - - -def register_finder(importer_type, distribution_finder): - """Register `distribution_finder` to find distributions in sys.path items - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `distribution_finder` is a callable that, passed a path - item and the importer instance, yields ``Distribution`` instances found on - that path item. See ``pkg_resources.find_on_path`` for an example.""" - _distribution_finders[importer_type] = distribution_finder - - -def find_distributions(path_item, only=False): - """Yield distributions accessible via `path_item`""" - importer = get_importer(path_item) - finder = _find_adapter(_distribution_finders, importer) - return finder(importer, path_item, only) - - -def find_eggs_in_zip(importer, path_item, only=False): - """ - Find eggs in zip files; possibly multiple nested eggs. - """ - if importer.archive.endswith('.whl'): - # wheels are not supported with this finder - # they don't have PKG-INFO metadata, and won't ever contain eggs - return - metadata = EggMetadata(importer) - if metadata.has_metadata('PKG-INFO'): - yield Distribution.from_filename(path_item, metadata=metadata) - if only: - # don't yield nested distros - return - for subitem in metadata.resource_listdir('/'): - if _is_egg_path(subitem): - subpath = os.path.join(path_item, subitem) - dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) - for dist in dists: - yield dist - elif subitem.lower().endswith('.dist-info'): - subpath = os.path.join(path_item, subitem) - submeta = EggMetadata(zipimport.zipimporter(subpath)) - submeta.egg_info = subpath - yield Distribution.from_location(path_item, subitem, submeta) - - -register_finder(zipimport.zipimporter, find_eggs_in_zip) - - -def find_nothing(importer, path_item, only=False): - return () - - -register_finder(object, find_nothing) - - -def _by_version_descending(names): - """ - Given a list of filenames, return them in descending order - by version number. - - >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' - >>> _by_version_descending(names) - ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] - """ - def _by_version(name): - """ - Parse each component of the filename - """ - name, ext = os.path.splitext(name) - parts = itertools.chain(name.split('-'), [ext]) - return [packaging.version.parse(part) for part in parts] - - return sorted(names, key=_by_version, reverse=True) - - -def find_on_path(importer, path_item, only=False): - """Yield distributions accessible on a sys.path directory""" - path_item = _normalize_cached(path_item) - - if _is_unpacked_egg(path_item): - yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item, 'EGG-INFO') - ) - ) - return - - entries = safe_listdir(path_item) - - # for performance, before sorting by version, - # screen entries for only those that will yield - # distributions - filtered = ( - entry - for entry in entries - if dist_factory(path_item, entry, only) - ) - - # scan for .egg and .egg-info in directory - path_item_entries = _by_version_descending(filtered) - for entry in path_item_entries: - fullpath = os.path.join(path_item, entry) - factory = dist_factory(path_item, entry, only) - for dist in factory(fullpath): - yield dist - - -def dist_factory(path_item, entry, only): - """ - Return a dist_factory for a path_item and entry - """ - lower = entry.lower() - is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info'))) - return ( - distributions_from_metadata - if is_meta else - find_distributions - if not only and _is_egg_path(entry) else - resolve_egg_link - if not only and lower.endswith('.egg-link') else - NoDists() - ) - - -class NoDists: - """ - >>> bool(NoDists()) - False - - >>> list(NoDists()('anything')) - [] - """ - def __bool__(self): - return False - if six.PY2: - __nonzero__ = __bool__ - - def __call__(self, fullpath): - return iter(()) - - -def safe_listdir(path): - """ - Attempt to list contents of path, but suppress some exceptions. - """ - try: - return os.listdir(path) - except (PermissionError, NotADirectoryError): - pass - except OSError as e: - # Ignore the directory if does not exist, not a directory or - # permission denied - ignorable = ( - e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) - # Python 2 on Windows needs to be handled this way :( - or getattr(e, "winerror", None) == 267 - ) - if not ignorable: - raise - return () - - -def distributions_from_metadata(path): - root = os.path.dirname(path) - if os.path.isdir(path): - if len(os.listdir(path)) == 0: - # empty metadata dir; skip - return - metadata = PathMetadata(root, path) - else: - metadata = FileMetadata(path) - entry = os.path.basename(path) - yield Distribution.from_location( - root, entry, metadata, precedence=DEVELOP_DIST, - ) - - -def non_empty_lines(path): - """ - Yield non-empty lines from file at path - """ - with open(path) as f: - for line in f: - line = line.strip() - if line: - yield line - - -def resolve_egg_link(path): - """ - Given a path to an .egg-link, resolve distributions - present in the referenced path. - """ - referenced_paths = non_empty_lines(path) - resolved_paths = ( - os.path.join(os.path.dirname(path), ref) - for ref in referenced_paths - ) - dist_groups = map(find_distributions, resolved_paths) - return next(dist_groups, ()) - - -register_finder(pkgutil.ImpImporter, find_on_path) - -if hasattr(importlib_machinery, 'FileFinder'): - register_finder(importlib_machinery.FileFinder, find_on_path) - -_declare_state('dict', _namespace_handlers={}) -_declare_state('dict', _namespace_packages={}) - - -def register_namespace_handler(importer_type, namespace_handler): - """Register `namespace_handler` to declare namespace packages - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `namespace_handler` is a callable like this:: - - def namespace_handler(importer, path_entry, moduleName, module): - # return a path_entry to use for child packages - - Namespace handlers are only called if the importer object has already - agreed that it can handle the relevant path item, and they should only - return a subpath if the module __path__ does not already contain an - equivalent subpath. For an example namespace handler, see - ``pkg_resources.file_ns_handler``. - """ - _namespace_handlers[importer_type] = namespace_handler - - -def _handle_ns(packageName, path_item): - """Ensure that named package includes a subpath of path_item (if needed)""" - - importer = get_importer(path_item) - if importer is None: - return None - - # capture warnings due to #1111 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - loader = importer.find_module(packageName) - - if loader is None: - return None - module = sys.modules.get(packageName) - if module is None: - module = sys.modules[packageName] = types.ModuleType(packageName) - module.__path__ = [] - _set_parent_ns(packageName) - elif not hasattr(module, '__path__'): - raise TypeError("Not a package:", packageName) - handler = _find_adapter(_namespace_handlers, importer) - subpath = handler(importer, path_item, packageName, module) - if subpath is not None: - path = module.__path__ - path.append(subpath) - loader.load_module(packageName) - _rebuild_mod_path(path, packageName, module) - return subpath - - -def _rebuild_mod_path(orig_path, package_name, module): - """ - Rebuild module.__path__ ensuring that all entries are ordered - corresponding to their sys.path order - """ - sys_path = [_normalize_cached(p) for p in sys.path] - - def safe_sys_path_index(entry): - """ - Workaround for #520 and #513. - """ - try: - return sys_path.index(entry) - except ValueError: - return float('inf') - - def position_in_sys_path(path): - """ - Return the ordinal of the path based on its position in sys.path - """ - path_parts = path.split(os.sep) - module_parts = package_name.count('.') + 1 - parts = path_parts[:-module_parts] - return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) - - new_path = sorted(orig_path, key=position_in_sys_path) - new_path = [_normalize_cached(p) for p in new_path] - - if isinstance(module.__path__, list): - module.__path__[:] = new_path - else: - module.__path__ = new_path - - -def declare_namespace(packageName): - """Declare that package 'packageName' is a namespace package""" - - _imp.acquire_lock() - try: - if packageName in _namespace_packages: - return - - path = sys.path - parent, _, _ = packageName.rpartition('.') - - if parent: - declare_namespace(parent) - if parent not in _namespace_packages: - __import__(parent) - try: - path = sys.modules[parent].__path__ - except AttributeError: - raise TypeError("Not a package:", parent) - - # Track what packages are namespaces, so when new path items are added, - # they can be updated - _namespace_packages.setdefault(parent or None, []).append(packageName) - _namespace_packages.setdefault(packageName, []) - - for path_item in path: - # Ensure all the parent's path items are reflected in the child, - # if they apply - _handle_ns(packageName, path_item) - - finally: - _imp.release_lock() - - -def fixup_namespace_packages(path_item, parent=None): - """Ensure that previously-declared namespace packages include path_item""" - _imp.acquire_lock() - try: - for package in _namespace_packages.get(parent, ()): - subpath = _handle_ns(package, path_item) - if subpath: - fixup_namespace_packages(subpath, package) - finally: - _imp.release_lock() - - -def file_ns_handler(importer, path_item, packageName, module): - """Compute an ns-package subpath for a filesystem or zipfile importer""" - - subpath = os.path.join(path_item, packageName.split('.')[-1]) - normalized = _normalize_cached(subpath) - for item in module.__path__: - if _normalize_cached(item) == normalized: - break - else: - # Only return the path if it's not already there - return subpath - - -register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -register_namespace_handler(zipimport.zipimporter, file_ns_handler) - -if hasattr(importlib_machinery, 'FileFinder'): - register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) - - -def null_ns_handler(importer, path_item, packageName, module): - return None - - -register_namespace_handler(object, null_ns_handler) - - -def normalize_path(filename): - """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) - - -def _cygwin_patch(filename): # pragma: nocover - """ - Contrary to POSIX 2008, on Cygwin, getcwd (3) contains - symlink components. Using - os.path.abspath() works around this limitation. A fix in os.getcwd() - would probably better, in Cygwin even more so, except - that this seems to be by design... - """ - return os.path.abspath(filename) if sys.platform == 'cygwin' else filename - - -def _normalize_cached(filename, _cache={}): - try: - return _cache[filename] - except KeyError: - _cache[filename] = result = normalize_path(filename) - return result - - -def _is_egg_path(path): - """ - Determine if given path appears to be an egg. - """ - return path.lower().endswith('.egg') - - -def _is_unpacked_egg(path): - """ - Determine if given path appears to be an unpacked egg. - """ - return ( - _is_egg_path(path) and - os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) - ) - - -def _set_parent_ns(packageName): - parts = packageName.split('.') - name = parts.pop() - if parts: - parent = '.'.join(parts) - setattr(sys.modules[parent], name, sys.modules[packageName]) - - -def yield_lines(strs): - """Yield non-empty/non-comment lines of a string or sequence""" - if isinstance(strs, six.string_types): - for s in strs.splitlines(): - s = s.strip() - # skip blank lines/comments - if s and not s.startswith('#'): - yield s - else: - for ss in strs: - for s in yield_lines(ss): - yield s - - -MODULE = re.compile(r"\w+(\.\w+)*$").match -EGG_NAME = re.compile( - r""" - (?P<name>[^-]+) ( - -(?P<ver>[^-]+) ( - -py(?P<pyver>[^-]+) ( - -(?P<plat>.+) - )? - )? - )? - """, - re.VERBOSE | re.IGNORECASE, -).match - - -class EntryPoint: - """Object representing an advertised importable object""" - - def __init__(self, name, module_name, attrs=(), extras=(), dist=None): - if not MODULE(module_name): - raise ValueError("Invalid module name", module_name) - self.name = name - self.module_name = module_name - self.attrs = tuple(attrs) - self.extras = tuple(extras) - self.dist = dist - - def __str__(self): - s = "%s = %s" % (self.name, self.module_name) - if self.attrs: - s += ':' + '.'.join(self.attrs) - if self.extras: - s += ' [%s]' % ','.join(self.extras) - return s - - def __repr__(self): - return "EntryPoint.parse(%r)" % str(self) - - def load(self, require=True, *args, **kwargs): - """ - Require packages for this EntryPoint, then resolve it. - """ - if not require or args or kwargs: - warnings.warn( - "Parameters to load are deprecated. Call .resolve and " - ".require separately.", - PkgResourcesDeprecationWarning, - stacklevel=2, - ) - if require: - self.require(*args, **kwargs) - return self.resolve() - - def resolve(self): - """ - Resolve the entry point from its module and attrs. - """ - module = __import__(self.module_name, fromlist=['__name__'], level=0) - try: - return functools.reduce(getattr, self.attrs, module) - except AttributeError as exc: - raise ImportError(str(exc)) - - def require(self, env=None, installer=None): - if self.extras and not self.dist: - raise UnknownExtra("Can't require() without a distribution", self) - - # Get the requirements for this entry point with all its extras and - # then resolve them. We have to pass `extras` along when resolving so - # that the working set knows what extras we want. Otherwise, for - # dist-info distributions, the working set will assume that the - # requirements for that extra are purely optional and skip over them. - reqs = self.dist.requires(self.extras) - items = working_set.resolve(reqs, env, installer, extras=self.extras) - list(map(working_set.add, items)) - - pattern = re.compile( - r'\s*' - r'(?P<name>.+?)\s*' - r'=\s*' - r'(?P<module>[\w.]+)\s*' - r'(:\s*(?P<attr>[\w.]+))?\s*' - r'(?P<extras>\[.*\])?\s*$' - ) - - @classmethod - def parse(cls, src, dist=None): - """Parse a single entry point from string `src` - - Entry point syntax follows the form:: - - name = some.module:some.attr [extra1, extra2] - - The entry name and module name are required, but the ``:attrs`` and - ``[extras]`` parts are optional - """ - m = cls.pattern.match(src) - if not m: - msg = "EntryPoint must be in 'name=module:attrs [extras]' format" - raise ValueError(msg, src) - res = m.groupdict() - extras = cls._parse_extras(res['extras']) - attrs = res['attr'].split('.') if res['attr'] else () - return cls(res['name'], res['module'], attrs, extras, dist) - - @classmethod - def _parse_extras(cls, extras_spec): - if not extras_spec: - return () - req = Requirement.parse('x' + extras_spec) - if req.specs: - raise ValueError() - return req.extras - - @classmethod - def parse_group(cls, group, lines, dist=None): - """Parse an entry point group""" - if not MODULE(group): - raise ValueError("Invalid group name", group) - this = {} - for line in yield_lines(lines): - ep = cls.parse(line, dist) - if ep.name in this: - raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name] = ep - return this - - @classmethod - def parse_map(cls, data, dist=None): - """Parse a map of entry point groups""" - if isinstance(data, dict): - data = data.items() - else: - data = split_sections(data) - maps = {} - for group, lines in data: - if group is None: - if not lines: - continue - raise ValueError("Entry points must be listed in groups") - group = group.strip() - if group in maps: - raise ValueError("Duplicate group name", group) - maps[group] = cls.parse_group(group, lines, dist) - return maps - - -def _remove_md5_fragment(location): - if not location: - return '' - parsed = urllib.parse.urlparse(location) - if parsed[-1].startswith('md5='): - return urllib.parse.urlunparse(parsed[:-1] + ('',)) - return location - - -def _version_from_file(lines): - """ - Given an iterable of lines from a Metadata file, return - the value of the Version field, if present, or None otherwise. - """ - def is_version_line(line): - return line.lower().startswith('version:') - version_lines = filter(is_version_line, lines) - line = next(iter(version_lines), '') - _, _, value = line.partition(':') - return safe_version(value.strip()) or None - - -class Distribution: - """Wrap an actual or potential sys.path entry w/metadata""" - PKG_INFO = 'PKG-INFO' - - def __init__( - self, location=None, metadata=None, project_name=None, - version=None, py_version=PY_MAJOR, platform=None, - precedence=EGG_DIST): - self.project_name = safe_name(project_name or 'Unknown') - if version is not None: - self._version = safe_version(version) - self.py_version = py_version - self.platform = platform - self.location = location - self.precedence = precedence - self._provider = metadata or empty_provider - - @classmethod - def from_location(cls, location, basename, metadata=None, **kw): - project_name, version, py_version, platform = [None] * 4 - basename, ext = os.path.splitext(basename) - if ext.lower() in _distributionImpl: - cls = _distributionImpl[ext.lower()] - - match = EGG_NAME(basename) - if match: - project_name, version, py_version, platform = match.group( - 'name', 'ver', 'pyver', 'plat' - ) - return cls( - location, metadata, project_name=project_name, version=version, - py_version=py_version, platform=platform, **kw - )._reload_version() - - def _reload_version(self): - return self - - @property - def hashcmp(self): - return ( - self.parsed_version, - self.precedence, - self.key, - _remove_md5_fragment(self.location), - self.py_version or '', - self.platform or '', - ) - - def __hash__(self): - return hash(self.hashcmp) - - def __lt__(self, other): - return self.hashcmp < other.hashcmp - - def __le__(self, other): - return self.hashcmp <= other.hashcmp - - def __gt__(self, other): - return self.hashcmp > other.hashcmp - - def __ge__(self, other): - return self.hashcmp >= other.hashcmp - - def __eq__(self, other): - if not isinstance(other, self.__class__): - # It's not a Distribution, so they are not equal - return False - return self.hashcmp == other.hashcmp - - def __ne__(self, other): - return not self == other - - # These properties have to be lazy so that we don't have to load any - # metadata until/unless it's actually needed. (i.e., some distributions - # may not know their name or version without loading PKG-INFO) - - @property - def key(self): - try: - return self._key - except AttributeError: - self._key = key = self.project_name.lower() - return key - - @property - def parsed_version(self): - if not hasattr(self, "_parsed_version"): - self._parsed_version = parse_version(self.version) - - return self._parsed_version - - def _warn_legacy_version(self): - LV = packaging.version.LegacyVersion - is_legacy = isinstance(self._parsed_version, LV) - if not is_legacy: - return - - # While an empty version is technically a legacy version and - # is not a valid PEP 440 version, it's also unlikely to - # actually come from someone and instead it is more likely that - # it comes from setuptools attempting to parse a filename and - # including it in the list. So for that we'll gate this warning - # on if the version is anything at all or not. - if not self.version: - return - - tmpl = textwrap.dedent(""" - '{project_name} ({version})' is being parsed as a legacy, - non PEP 440, - version. You may find odd behavior and sort order. - In particular it will be sorted as less than 0.0. It - is recommended to migrate to PEP 440 compatible - versions. - """).strip().replace('\n', ' ') - - warnings.warn(tmpl.format(**vars(self)), PEP440Warning) - - @property - def version(self): - try: - return self._version - except AttributeError: - version = _version_from_file(self._get_metadata(self.PKG_INFO)) - if version is None: - tmpl = "Missing 'Version:' header and/or %s file" - raise ValueError(tmpl % self.PKG_INFO, self) - return version - - @property - def _dep_map(self): - """ - A map of extra to its list of (direct) requirements - for this distribution, including the null extra. - """ - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._filter_extras(self._build_dep_map()) - return self.__dep_map - - @staticmethod - def _filter_extras(dm): - """ - Given a mapping of extras to dependencies, strip off - environment markers and filter out any dependencies - not matching the markers. - """ - for extra in list(filter(None, dm)): - new_extra = extra - reqs = dm.pop(extra) - new_extra, _, marker = extra.partition(':') - fails_marker = marker and ( - invalid_marker(marker) - or not evaluate_marker(marker) - ) - if fails_marker: - reqs = [] - new_extra = safe_extra(new_extra) or None - - dm.setdefault(new_extra, []).extend(reqs) - return dm - - def _build_dep_map(self): - dm = {} - for name in 'requires.txt', 'depends.txt': - for extra, reqs in split_sections(self._get_metadata(name)): - dm.setdefault(extra, []).extend(parse_requirements(reqs)) - return dm - - def requires(self, extras=()): - """List of Requirements needed for this distro if `extras` are used""" - dm = self._dep_map - deps = [] - deps.extend(dm.get(None, ())) - for ext in extras: - try: - deps.extend(dm[safe_extra(ext)]) - except KeyError: - raise UnknownExtra( - "%s has no such extra feature %r" % (self, ext) - ) - return deps - - def _get_metadata(self, name): - if self.has_metadata(name): - for line in self.get_metadata_lines(name): - yield line - - def activate(self, path=None, replace=False): - """Ensure distribution is importable on `path` (default=sys.path)""" - if path is None: - path = sys.path - self.insert_on(path, replace=replace) - if path is sys.path: - fixup_namespace_packages(self.location) - for pkg in self._get_metadata('namespace_packages.txt'): - if pkg in sys.modules: - declare_namespace(pkg) - - def egg_name(self): - """Return what this distribution's standard .egg filename should be""" - filename = "%s-%s-py%s" % ( - to_filename(self.project_name), to_filename(self.version), - self.py_version or PY_MAJOR - ) - - if self.platform: - filename += '-' + self.platform - return filename - - def __repr__(self): - if self.location: - return "%s (%s)" % (self, self.location) - else: - return str(self) - - def __str__(self): - try: - version = getattr(self, 'version', None) - except ValueError: - version = None - version = version or "[unknown version]" - return "%s %s" % (self.project_name, version) - - def __getattr__(self, attr): - """Delegate all unrecognized public attributes to .metadata provider""" - if attr.startswith('_'): - raise AttributeError(attr) - return getattr(self._provider, attr) - - def __dir__(self): - return list( - set(super(Distribution, self).__dir__()) - | set( - attr for attr in self._provider.__dir__() - if not attr.startswith('_') - ) - ) - - if not hasattr(object, '__dir__'): - # python 2.7 not supported - del __dir__ - - @classmethod - def from_filename(cls, filename, metadata=None, **kw): - return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, - **kw - ) - - def as_requirement(self): - """Return a ``Requirement`` that matches this distribution exactly""" - if isinstance(self.parsed_version, packaging.version.Version): - spec = "%s==%s" % (self.project_name, self.parsed_version) - else: - spec = "%s===%s" % (self.project_name, self.parsed_version) - - return Requirement.parse(spec) - - def load_entry_point(self, group, name): - """Return the `name` entry point of `group` or raise ImportError""" - ep = self.get_entry_info(group, name) - if ep is None: - raise ImportError("Entry point %r not found" % ((group, name),)) - return ep.load() - - def get_entry_map(self, group=None): - """Return the entry point map for `group`, or the full entry map""" - try: - ep_map = self._ep_map - except AttributeError: - ep_map = self._ep_map = EntryPoint.parse_map( - self._get_metadata('entry_points.txt'), self - ) - if group is not None: - return ep_map.get(group, {}) - return ep_map - - def get_entry_info(self, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return self.get_entry_map(group).get(name) - - def insert_on(self, path, loc=None, replace=False): - """Ensure self.location is on path - - If replace=False (default): - - If location is already in path anywhere, do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent. - - Else: add to the end of path. - If replace=True: - - If location is already on path anywhere (not eggs) - or higher priority than its parent (eggs) - do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent, - removing any lower-priority entries. - - Else: add it to the front of path. - """ - - loc = loc or self.location - if not loc: - return - - nloc = _normalize_cached(loc) - bdir = os.path.dirname(nloc) - npath = [(p and _normalize_cached(p) or p) for p in path] - - for p, item in enumerate(npath): - if item == nloc: - if replace: - break - else: - # don't modify path (even removing duplicates) if - # found and not replace - return - elif item == bdir and self.precedence == EGG_DIST: - # if it's an .egg, give it precedence over its directory - # UNLESS it's already been added to sys.path and replace=False - if (not replace) and nloc in npath[p:]: - return - if path is sys.path: - self.check_version_conflict() - path.insert(p, loc) - npath.insert(p, nloc) - break - else: - if path is sys.path: - self.check_version_conflict() - if replace: - path.insert(0, loc) - else: - path.append(loc) - return - - # p is the spot where we found or inserted loc; now remove duplicates - while True: - try: - np = npath.index(nloc, p + 1) - except ValueError: - break - else: - del npath[np], path[np] - # ha! - p = np - - return - - def check_version_conflict(self): - if self.key == 'setuptools': - # ignore the inevitable setuptools self-conflicts :( - return - - nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) - loc = normalize_path(self.location) - for modname in self._get_metadata('top_level.txt'): - if (modname not in sys.modules or modname in nsp - or modname in _namespace_packages): - continue - if modname in ('pkg_resources', 'setuptools', 'site'): - continue - fn = getattr(sys.modules[modname], '__file__', None) - if fn and (normalize_path(fn).startswith(loc) or - fn.startswith(self.location)): - continue - issue_warning( - "Module %s was already imported from %s, but %s is being added" - " to sys.path" % (modname, fn, self.location), - ) - - def has_version(self): - try: - self.version - except ValueError: - issue_warning("Unbuilt egg for " + repr(self)) - return False - return True - - def clone(self, **kw): - """Copy this distribution, substituting in any changed keyword args""" - names = 'project_name version py_version platform location precedence' - for attr in names.split(): - kw.setdefault(attr, getattr(self, attr, None)) - kw.setdefault('metadata', self._provider) - return self.__class__(**kw) - - @property - def extras(self): - return [dep for dep in self._dep_map if dep] - - -class EggInfoDistribution(Distribution): - def _reload_version(self): - """ - Packages installed by distutils (e.g. numpy or scipy), - which uses an old safe_version, and so - their version numbers can get mangled when - converted to filenames (e.g., 1.11.0.dev0+2329eae to - 1.11.0.dev0_2329eae). These distributions will not be - parsed properly - downstream by Distribution and safe_version, so - take an extra step and try to get the version number from - the metadata file itself instead of the filename. - """ - md_version = _version_from_file(self._get_metadata(self.PKG_INFO)) - if md_version: - self._version = md_version - return self - - -class DistInfoDistribution(Distribution): - """ - Wrap an actual or potential sys.path entry - w/metadata, .dist-info style. - """ - PKG_INFO = 'METADATA' - EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") - - @property - def _parsed_pkg_info(self): - """Parse and cache metadata""" - try: - return self._pkg_info - except AttributeError: - metadata = self.get_metadata(self.PKG_INFO) - self._pkg_info = email.parser.Parser().parsestr(metadata) - return self._pkg_info - - @property - def _dep_map(self): - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._compute_dependencies() - return self.__dep_map - - def _compute_dependencies(self): - """Recompute this distribution's dependencies.""" - dm = self.__dep_map = {None: []} - - reqs = [] - # Including any condition expressions - for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: - reqs.extend(parse_requirements(req)) - - def reqs_for_extra(extra): - for req in reqs: - if not req.marker or req.marker.evaluate({'extra': extra}): - yield req - - common = frozenset(reqs_for_extra(None)) - dm[None].extend(common) - - for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: - s_extra = safe_extra(extra.strip()) - dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) - - return dm - - -_distributionImpl = { - '.egg': Distribution, - '.egg-info': EggInfoDistribution, - '.dist-info': DistInfoDistribution, -} - - -def issue_warning(*args, **kw): - level = 1 - g = globals() - try: - # find the first stack frame that is *not* code in - # the pkg_resources module, to use for the warning - while sys._getframe(level).f_globals is g: - level += 1 - except ValueError: - pass - warnings.warn(stacklevel=level + 1, *args, **kw) - - -class RequirementParseError(ValueError): - def __str__(self): - return ' '.join(self.args) - - -def parse_requirements(strs): - """Yield ``Requirement`` objects for each specification in `strs` - - `strs` must be a string, or a (possibly-nested) iterable thereof. - """ - # create a steppable iterator, so we can handle \-continuations - lines = iter(yield_lines(strs)) - - for line in lines: - # Drop comments -- a hash without a space may be in a URL. - if ' #' in line: - line = line[:line.find(' #')] - # If there is a line continuation, drop it, and append the next line. - if line.endswith('\\'): - line = line[:-2].strip() - try: - line += next(lines) - except StopIteration: - return - yield Requirement(line) - - -class Requirement(packaging.requirements.Requirement): - def __init__(self, requirement_string): - """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - try: - super(Requirement, self).__init__(requirement_string) - except packaging.requirements.InvalidRequirement as e: - raise RequirementParseError(str(e)) - self.unsafe_name = self.name - project_name = safe_name(self.name) - self.project_name, self.key = project_name, project_name.lower() - self.specs = [ - (spec.operator, spec.version) for spec in self.specifier] - self.extras = tuple(map(safe_extra, self.extras)) - self.hashCmp = ( - self.key, - self.specifier, - frozenset(self.extras), - str(self.marker) if self.marker else None, - ) - self.__hash = hash(self.hashCmp) - - def __eq__(self, other): - return ( - isinstance(other, Requirement) and - self.hashCmp == other.hashCmp - ) - - def __ne__(self, other): - return not self == other - - def __contains__(self, item): - if isinstance(item, Distribution): - if item.key != self.key: - return False - - item = item.version - - # Allow prereleases always in order to match the previous behavior of - # this method. In the future this should be smarter and follow PEP 440 - # more accurately. - return self.specifier.contains(item, prereleases=True) - - def __hash__(self): - return self.__hash - - def __repr__(self): - return "Requirement.parse(%r)" % str(self) - - @staticmethod - def parse(s): - req, = parse_requirements(s) - return req - - -def _always_object(classes): - """ - Ensure object appears in the mro even - for old-style classes. - """ - if object not in classes: - return classes + (object,) - return classes - - -def _find_adapter(registry, ob): - """Return an adapter factory for `ob` from `registry`""" - types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) - for t in types: - if t in registry: - return registry[t] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - py31compat.makedirs(dirname, exist_ok=True) - - -def _bypass_ensure_directory(path): - """Sandbox-bypassing version of ensure_directory()""" - if not WRITE_SUPPORT: - raise IOError('"os.mkdir" not supported on this platform.') - dirname, filename = split(path) - if dirname and filename and not isdir(dirname): - _bypass_ensure_directory(dirname) - try: - mkdir(dirname, 0o755) - except FileExistsError: - pass - - -def split_sections(s): - """Split a string or iterable thereof into (section, content) pairs - - Each ``section`` is a stripped version of the section header ("[section]") - and each ``content`` is a list of stripped lines excluding blank lines and - comment-only lines. If there are any such lines before the first section - header, they're returned in a first ``section`` of ``None``. - """ - section = None - content = [] - for line in yield_lines(s): - if line.startswith("["): - if line.endswith("]"): - if section or content: - yield section, content - section = line[1:-1].strip() - content = [] - else: - raise ValueError("Invalid section heading", line) - else: - content.append(line) - - # wrap up last segment - yield section, content - - -def _mkstemp(*args, **kw): - old_open = os.open - try: - # temporarily bypass sandboxing - os.open = os_open - return tempfile.mkstemp(*args, **kw) - finally: - # and then put it back - os.open = old_open - - -# Silence the PEP440Warning by default, so that end users don't get hit by it -# randomly just because they use pkg_resources. We want to append the rule -# because we want earlier uses of filterwarnings to take precedence over this -# one. -warnings.filterwarnings("ignore", category=PEP440Warning, append=True) - - -# from jaraco.functools 1.3 -def _call_aside(f, *args, **kwargs): - f(*args, **kwargs) - return f - - -@_call_aside -def _initialize(g=globals()): - "Set up global resource manager (deliberately not state-saved)" - manager = ResourceManager() - g['_manager'] = manager - g.update( - (name, getattr(manager, name)) - for name in dir(manager) - if not name.startswith('_') - ) - - -@_call_aside -def _initialize_master_working_set(): - """ - Prepare the master working set and make the ``require()`` - API available. - - This function has explicit effects on the global state - of pkg_resources. It is intended to be invoked once at - the initialization of this module. - - Invocation by other packages is unsupported and done - at their own risk. - """ - working_set = WorkingSet._build_master() - _declare_state('object', working_set=working_set) - - require = working_set.require - iter_entry_points = working_set.iter_entry_points - add_activation_listener = working_set.subscribe - run_script = working_set.run_script - # backward compatibility - run_main = run_script - # Activate all distributions already on sys.path with replace=False and - # ensure that all distributions added to the working set in the future - # (e.g. by calling ``require()``) will get activated as well, - # with higher priority (replace=True). - tuple( - dist.activate(replace=False) - for dist in working_set - ) - add_activation_listener( - lambda dist: dist.activate(replace=True), - existing=False, - ) - working_set.entries = [] - # match order - list(map(working_set.add_entry, sys.path)) - globals().update(locals()) - -class PkgResourcesDeprecationWarning(Warning): - """ - Base class for warning about deprecations in ``pkg_resources`` - - This class is not derived from ``DeprecationWarning``, and as such is - visible by default. - """ diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pkg_resources/py31compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pkg_resources/py31compat.py deleted file mode 100644 index a2d3007..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pkg_resources/py31compat.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import errno -import sys - -from pip._vendor import six - - -def _makedirs_31(path, exist_ok=False): - try: - os.makedirs(path) - except OSError as exc: - if not exist_ok or exc.errno != errno.EEXIST: - raise - - -# rely on compatibility behavior until mode considerations -# and exists_ok considerations are disentangled. -# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 -needs_makedirs = ( - six.PY2 or - (3, 4) <= sys.version_info < (3, 4, 1) -) -makedirs = _makedirs_31 if needs_makedirs else os.makedirs diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/__init__.py deleted file mode 100644 index a41f65d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/__init__.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import division - -from collections import deque -from datetime import timedelta -from math import ceil -from sys import stderr -from time import time - - -__version__ = '1.4' - - -class Infinite(object): - file = stderr - sma_window = 10 # Simple Moving Average window - - def __init__(self, *args, **kwargs): - self.index = 0 - self.start_ts = time() - self.avg = 0 - self._ts = self.start_ts - self._xput = deque(maxlen=self.sma_window) - for key, val in kwargs.items(): - setattr(self, key, val) - - def __getitem__(self, key): - if key.startswith('_'): - return None - return getattr(self, key, None) - - @property - def elapsed(self): - return int(time() - self.start_ts) - - @property - def elapsed_td(self): - return timedelta(seconds=self.elapsed) - - def update_avg(self, n, dt): - if n > 0: - self._xput.append(dt / n) - self.avg = sum(self._xput) / len(self._xput) - - def update(self): - pass - - def start(self): - pass - - def finish(self): - pass - - def next(self, n=1): - now = time() - dt = now - self._ts - self.update_avg(n, dt) - self._ts = now - self.index = self.index + n - self.update() - - def iter(self, it): - try: - for x in it: - yield x - self.next() - finally: - self.finish() - - -class Progress(Infinite): - def __init__(self, *args, **kwargs): - super(Progress, self).__init__(*args, **kwargs) - self.max = kwargs.get('max', 100) - - @property - def eta(self): - return int(ceil(self.avg * self.remaining)) - - @property - def eta_td(self): - return timedelta(seconds=self.eta) - - @property - def percent(self): - return self.progress * 100 - - @property - def progress(self): - return min(1, self.index / self.max) - - @property - def remaining(self): - return max(self.max - self.index, 0) - - def start(self): - self.update() - - def goto(self, index): - incr = index - self.index - self.next(incr) - - def iter(self, it): - try: - self.max = len(it) - except TypeError: - pass - - try: - for x in it: - yield x - self.next() - finally: - self.finish() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/bar.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/bar.py deleted file mode 100644 index 025e61c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/bar.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import unicode_literals - -import sys - -from . import Progress -from .helpers import WritelnMixin - - -class Bar(WritelnMixin, Progress): - width = 32 - message = '' - suffix = '%(index)d/%(max)d' - bar_prefix = ' |' - bar_suffix = '| ' - empty_fill = ' ' - fill = '#' - hide_cursor = True - - def update(self): - filled_length = int(self.width * self.progress) - empty_length = self.width - filled_length - - message = self.message % self - bar = self.fill * filled_length - empty = self.empty_fill * empty_length - suffix = self.suffix % self - line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, - suffix]) - self.writeln(line) - - -class ChargingBar(Bar): - suffix = '%(percent)d%%' - bar_prefix = ' ' - bar_suffix = ' ' - empty_fill = '∙' - fill = 'â–ˆ' - - -class FillingSquaresBar(ChargingBar): - empty_fill = 'â–¢' - fill = 'â–£' - - -class FillingCirclesBar(ChargingBar): - empty_fill = 'â—¯' - fill = 'â—‰' - - -class IncrementalBar(Bar): - if sys.platform.startswith('win'): - phases = (u' ', u'â–Œ', u'â–ˆ') - else: - phases = (' ', 'â–', 'â–Ž', 'â–', 'â–Œ', 'â–‹', 'â–Š', 'â–‰', 'â–ˆ') - - def update(self): - nphases = len(self.phases) - filled_len = self.width * self.progress - nfull = int(filled_len) # Number of full chars - phase = int((filled_len - nfull) * nphases) # Phase of last char - nempty = self.width - nfull # Number of empty chars - - message = self.message % self - bar = self.phases[-1] * nfull - current = self.phases[phase] if phase > 0 else '' - empty = self.empty_fill * max(0, nempty - len(current)) - suffix = self.suffix % self - line = ''.join([message, self.bar_prefix, bar, current, empty, - self.bar_suffix, suffix]) - self.writeln(line) - - -class PixelBar(IncrementalBar): - phases = ('â¡€', 'â¡„', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿') - - -class ShadyBar(IncrementalBar): - phases = (' ', 'â–‘', 'â–’', 'â–“', 'â–ˆ') diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/counter.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/counter.py deleted file mode 100644 index 6b45a1e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/counter.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import unicode_literals -from . import Infinite, Progress -from .helpers import WriteMixin - - -class Counter(WriteMixin, Infinite): - message = '' - hide_cursor = True - - def update(self): - self.write(str(self.index)) - - -class Countdown(WriteMixin, Progress): - hide_cursor = True - - def update(self): - self.write(str(self.remaining)) - - -class Stack(WriteMixin, Progress): - phases = (' ', 'â–', 'â–‚', 'â–ƒ', 'â–„', 'â–…', 'â–†', 'â–‡', 'â–ˆ') - hide_cursor = True - - def update(self): - nphases = len(self.phases) - i = min(nphases - 1, int(self.progress * nphases)) - self.write(self.phases[i]) - - -class Pie(Stack): - phases = ('â—‹', 'â—”', 'â—‘', 'â—•', 'â—') diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/helpers.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/helpers.py deleted file mode 100644 index 0cde44e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/helpers.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import print_function - - -HIDE_CURSOR = '\x1b[?25l' -SHOW_CURSOR = '\x1b[?25h' - - -class WriteMixin(object): - hide_cursor = False - - def __init__(self, message=None, **kwargs): - super(WriteMixin, self).__init__(**kwargs) - self._width = 0 - if message: - self.message = message - - if self.file and self.file.isatty(): - if self.hide_cursor: - print(HIDE_CURSOR, end='', file=self.file) - print(self.message, end='', file=self.file) - self.file.flush() - - def write(self, s): - if self.file and self.file.isatty(): - b = '\b' * self._width - c = s.ljust(self._width) - print(b + c, end='', file=self.file) - self._width = max(self._width, len(s)) - self.file.flush() - - def finish(self): - if self.file and self.file.isatty() and self.hide_cursor: - print(SHOW_CURSOR, end='', file=self.file) - - -class WritelnMixin(object): - hide_cursor = False - - def __init__(self, message=None, **kwargs): - super(WritelnMixin, self).__init__(**kwargs) - if message: - self.message = message - - if self.file and self.file.isatty() and self.hide_cursor: - print(HIDE_CURSOR, end='', file=self.file) - - def clearln(self): - if self.file and self.file.isatty(): - print('\r\x1b[K', end='', file=self.file) - - def writeln(self, line): - if self.file and self.file.isatty(): - self.clearln() - print(line, end='', file=self.file) - self.file.flush() - - def finish(self): - if self.file and self.file.isatty(): - print(file=self.file) - if self.hide_cursor: - print(SHOW_CURSOR, end='', file=self.file) - - -from signal import signal, SIGINT -from sys import exit - - -class SigIntMixin(object): - """Registers a signal handler that calls finish on SIGINT""" - - def __init__(self, *args, **kwargs): - super(SigIntMixin, self).__init__(*args, **kwargs) - signal(SIGINT, self._sigint_handler) - - def _sigint_handler(self, signum, frame): - self.finish() - exit(0) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/spinner.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/spinner.py deleted file mode 100644 index 464c7b2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/progress/spinner.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import unicode_literals -from . import Infinite -from .helpers import WriteMixin - - -class Spinner(WriteMixin, Infinite): - message = '' - phases = ('-', '\\', '|', '/') - hide_cursor = True - - def update(self): - i = self.index % len(self.phases) - self.write(self.phases[i]) - - -class PieSpinner(Spinner): - phases = ['â—·', 'â—¶', 'â—µ', 'â—´'] - - -class MoonSpinner(Spinner): - phases = ['â—‘', 'â—’', 'â—', 'â—“'] - - -class LineSpinner(Spinner): - phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] - -class PixelSpinner(Spinner): - phases = ['⣾','⣷', '⣯', '⣟', 'â¡¿', '⢿', '⣻', '⣽'] diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pyparsing.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pyparsing.py deleted file mode 100644 index bea4d9c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pyparsing.py +++ /dev/null @@ -1,6452 +0,0 @@ -#-*- coding: utf-8 -*- -# module pyparsing.py -# -# Copyright (c) 2003-2019 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars -============================================================================= - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. With pyparsing, you don't need to learn -a new syntax for defining grammars or matching expressions - the parsing -module provides a library of classes that you use to construct the -grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -``"<salutation>, <addressee>!"``), built up using :class:`Word`, -:class:`Literal`, and :class:`And` elements -(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions, -and the strings are auto-converted to :class:`Literal` expressions):: - - from pip._vendor.pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of '+', '|' and '^' operators. - -The :class:`ParseResults` object returned from -:class:`ParserElement.parseString` can be -accessed as a nested list, a dictionary, or an object with named -attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - - - extra or missing whitespace (the above program will also handle - "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments - - -Getting Started - ------------------ -Visit the classes :class:`ParserElement` and :class:`ParseResults` to -see the base classes that most other pyparsing -classes inherit from. Use the docstrings for examples of how to: - - - construct literal match expressions from :class:`Literal` and - :class:`CaselessLiteral` classes - - construct character word-group expressions using the :class:`Word` - class - - see how to create repetitive expressions using :class:`ZeroOrMore` - and :class:`OneOrMore` classes - - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`, - and :class:`'&'<Each>` operators to combine simple expressions into - more complex ones - - associate names with your parsed results using - :class:`ParserElement.setResultsName` - - find some helpful expression short-cuts like :class:`delimitedList` - and :class:`oneOf` - - find more useful common expressions in the :class:`pyparsing_common` - namespace class -""" - -__version__ = "2.3.1" -__versionTime__ = "09 Jan 2019 23:26 UTC" -__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -import collections -import pprint -import traceback -import types -from datetime import datetime - -try: - # Python 3 - from itertools import filterfalse -except ImportError: - from itertools import ifilterfalse as filterfalse - -try: - from _thread import RLock -except ImportError: - from threading import RLock - -try: - # Python 3 - from collections.abc import Iterable - from collections.abc import MutableMapping -except ImportError: - # Python 2.7 - from collections import Iterable - from collections import MutableMapping - -try: - from collections import OrderedDict as _OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict as _OrderedDict - except ImportError: - _OrderedDict = None - -try: - from types import SimpleNamespace -except ImportError: - class SimpleNamespace: pass - - -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', -'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', -'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', -] - -system_version = tuple(sys.version_info)[:3] -PY_3 = system_version[0] == 3 -if PY_3: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - unicode = str - _ustr = str - - # build list of single arg builtins, that can be used as parse actions - singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] - -else: - _MAX_INT = sys.maxint - range = xrange - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode - friendly. It first tries str(obj). If that fails with - a UnicodeEncodeError, then it tries unicode(obj). It then - < returns the unicode object | encodes it with the default - encoding | ... >. - """ - if isinstance(obj,unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # Else encode it - ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') - xmlcharref = Regex(r'&#\d+;') - xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) - return xmlcharref.transformString(ret) - - # build list of single arg builtins, tolerant of Python version, that can be used as parse actions - singleArgBuiltins = [] - import __builtin__ - for fname in "sum len sorted reversed list tuple set any all min max".split(): - try: - singleArgBuiltins.append(getattr(__builtin__,fname)) - except AttributeError: - continue - -_generatorType = type((y for y in range(1))) - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) - for from_,to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join(c for c in string.printable if c not in string.whitespace) - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - self.args = (pstr, loc, msg) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - def __getattr__( self, aname ): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) - else: - raise AttributeError(aname) - - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): - return _ustr(self) - def markInputline( self, markerString = ">!<" ): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join((line_str[:line_column], - markerString, line_str[line_column:])) - return line_str.strip() - def __dir__(self): - return "lineno col line".split() + dir(type(self)) - -class ParseException(ParseBaseException): - """ - Exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - - Example:: - - try: - Word(nums).setName("integer").parseString("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.col)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - @staticmethod - def explain(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `setName` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - explain() is only supported under Python 3. - """ - import inspect - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(' ' * (exc.col - 1) + '^') - ret.append("{0}: {1}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff.frame - - f_self = frm.f_locals.get('self', None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'): - continue - if f_self in seen: - continue - seen.add(f_self) - - self_type = type(f_self) - ret.append("{0}.{1} - {2}".format(self_type.__module__, - self_type.__name__, - f_self)) - elif f_self is not None: - self_type = type(f_self) - ret.append("{0}.{1}".format(self_type.__module__, - self_type.__name__)) - else: - code = frm.f_code - if code.co_name in ('wrapper', '<module>'): - continue - - ret.append("{0}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return '\n'.join(ret) - - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - pass - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by :class:`ParserElement.validate` if the - grammar could be improperly recursive - """ - def __init__( self, parseElementList ): - self.parseElementTrace = parseElementList - - def __str__( self ): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): - return self.tup[i] - def __repr__(self): - return repr(self.tup[0]) - def setOffset(self,i): - self.tup = (self.tup[0],i) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`) - - Example:: - - integer = Word(nums) - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - # equivalent form: - # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - # parseString returns a ParseResults object - result = date_str.parseString("1999/12/31") - - def test(s, fn=repr): - print("%s -> %s" % (s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: 31 - - month: 12 - - year: 1999 - """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True ): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - self.__asList = asList - self.__modal = modal - if toklist is None: - toklist = [] - if isinstance(toklist, list): - self.__toklist = toklist[:] - elif isinstance(toklist, _generatorType): - self.__toklist = list(toklist) - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): - if isinstance(toklist,basestring): - toklist = [ toklist ] - if asList: - if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError,TypeError,IndexError): - self[name] = toklist - - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) - - def __setitem__( self, k, v, isinstance=isinstance ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] - sub = v[0] - elif isinstance(k,(int,slice)): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] - sub = v - if isinstance(sub,ParseResults): - sub.__parent = wkref(self) - - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i+1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__( self, k ): - return k in self.__tokdict - - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return ( not not self.__toklist ) - __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( self.__toklist[::-1] ) - def _iterkeys( self ): - if hasattr(self.__tokdict, "iterkeys"): - return self.__tokdict.iterkeys() - else: - return iter(self.__tokdict) - - def _itervalues( self ): - return (self[k] for k in self._iterkeys()) - - def _iteritems( self ): - return ((k, self[k]) for k in self._iterkeys()) - - if PY_3: - keys = _iterkeys - """Returns an iterator of all named result keys.""" - - values = _itervalues - """Returns an iterator of all named result values.""" - - items = _iteritems - """Returns an iterator of all named result key-value tuples.""" - - else: - iterkeys = _iterkeys - """Returns an iterator of all named result keys (Python 2.x only).""" - - itervalues = _itervalues - """Returns an iterator of all named result values (Python 2.x only).""" - - iteritems = _iteritems - """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - - def keys( self ): - """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iterkeys()) - - def values( self ): - """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.itervalues()) - - def items( self ): - """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iteritems()) - - def haskeys( self ): - """Since keys() returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self.__tokdict) - - def pop( self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - def remove_first(tokens): - tokens.pop(0) - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + OneOrMore(Word(nums)) - print(patt.parseString("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.addParseAction(remove_LABEL) - print(patt.parseString("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: AAB - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k,v in kwargs.items(): - if k == 'default': - args = (args[0], v) - else: - raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) or - len(args) == 1 or - args[0] in self): - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, defaultValue=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``defaultValue`` or ``None`` if no - ``defaultValue`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return defaultValue - - def insert( self, index, insStr ): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] - """ - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def append( self, item ): - """ - Add single element to end of ParseResults list of elements. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] - """ - self.__toklist.append(item) - - def extend( self, itemseq ): - """ - Add sequence of elements to end of ParseResults list of elements. - - Example:: - - patt = OneOrMore(Word(alphas)) - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self += itemseq - else: - self.__toklist.extend(itemseq) - - def clear( self ): - """ - Clear all elements and results names. - """ - del self.__toklist[:] - self.__tokdict.clear() - - def __getattr__( self, name ): - try: - return self[name] - except KeyError: - return "" - - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - - def __add__( self, other ): - ret = self.copy() - ret += other - return ret - - def __iadd__( self, other ): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = lambda a: offset if a<0 else a+offset - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: - self[k] = v - if isinstance(v[0],ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) - return self - - def __radd__(self, other): - if isinstance(other,int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) - - def __str__( self ): - return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - - def _asStringList( self, sep='' ): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance( item, ParseResults ): - out += item._asStringList() - else: - out.append( _ustr(item) ) - return out - - def asList( self ): - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = OneOrMore(Word(alphas)) - result = patt.parseString("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] - - # Use asList() to create an actual list - result_list = result.asList() - print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] - - def asDict( self ): - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.asDict() - print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - if PY_3: - item_fn = self.items - else: - item_fn = self.iteritems - - def toItem(obj): - if isinstance(obj, ParseResults): - if obj.haskeys(): - return obj.asDict() - else: - return [toItem(v) for v in obj] - else: - return obj - - return dict((k,toItem(v)) for k,v in item_fn()) - - def copy( self ): - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults( self.__toklist ) - ret.__tokdict = dict(self.__tokdict.items()) - ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) - ret.__name = self.__name - return ret - - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """ - (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. - """ - nl = "\n" - out = [] - namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [ nl, indent, "<", selfTag, ">" ] - - for i,res in enumerate(self.__toklist): - if isinstance(res,ParseResults): - if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "</", resTag, ">" ] - - out += [ nl, indent, "</", selfTag, ">" ] - return "".join(out) - - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: - if sub is v: - return k - return None - - def getName(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = OneOrMore(user_data) - - result = user_info.parseString("22 111-22-3333 #221B") - for item in result: - print(item.getName(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - next(iter(self.__tokdict.values()))[0][1] in (0,-1)): - return next(iter(self.__tokdict.keys())) - else: - return None - - def dump(self, indent='', depth=0, full=True): - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(result.dump()) - - prints:: - - ['12', '/', '31', '/', '1999'] - - day: 1999 - - month: 31 - - year: 12 - """ - out = [] - NL = '\n' - out.append( indent+_ustr(self.asList()) ) - if full: - if self.haskeys(): - items = sorted((str(k), v) for k,v in self.items()) - for k,v in items: - if out: - out.append(NL) - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v: - out.append( v.dump(indent,depth+1) ) - else: - out.append(_ustr(v)) - else: - out.append(repr(v)) - elif any(isinstance(vv,ParseResults) for vv in self): - v = self - for i,vv in enumerate(v): - if isinstance(vv,ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint <https://docs.python.org/3/library/pprint.html>`_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimitedList(term))) - result = func.parseString("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.asList(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) - - def __setstate__(self,state): - self.__toklist = state[0] - (self.__tokdict, - par, - inAccumNames, - self.__name) = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __getnewargs__(self): - return self.__toklist, self.__name, self.__asList, self.__modal - - def __dir__(self): - return (dir(type(self)) + list(self.keys())) - -MutableMapping.register(ParseResults) - -def col (loc,strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ``<TAB>`` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) - -def lineno(loc,strg): - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ``<TAB>`` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n",0,loc) + 1 - -def line( loc, strg ): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR+1:nextCR] - else: - return strg[lastCR+1:] - -def _defaultStartDebugAction( instring, loc, expr ): - print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) - -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -# Only works on Python 3.x - nonlocal is toxic to Python 2 installs -#~ 'decorator to trim function calls to match the arity of the target' -#~ def _trim_arity(func, maxargs=3): - #~ if func in singleArgBuiltins: - #~ return lambda s,l,t: func(t) - #~ limit = 0 - #~ foundArity = False - #~ def wrapper(*args): - #~ nonlocal limit,foundArity - #~ while 1: - #~ try: - #~ ret = func(*args[limit:]) - #~ foundArity = True - #~ return ret - #~ except TypeError: - #~ if limit == maxargs or foundArity: - #~ raise - #~ limit += 1 - #~ continue - #~ return wrapper - -# this version is Python 2.x-3.x cross-compatible -'decorator to trim function calls to match the arity of the target' -def _trim_arity(func, maxargs=2): - if func in singleArgBuiltins: - return lambda s,l,t: func(t) - limit = [0] - foundArity = [False] - - # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3,5): - def extract_stack(limit=0): - # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3,5,0) else -2 - frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] - return [frame_summary[:2]] - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - else: - extract_stack = traceback.extract_stack - extract_tb = traceback.extract_tb - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - LINE_DIFF = 6 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) - - def wrapper(*args): - while 1: - try: - ret = func(*args[limit[0]:]) - foundArity[0] = True - return ret - except TypeError: - # re-raise TypeErrors if they did not come from our arity testing - if foundArity[0]: - raise - else: - try: - tb = sys.exc_info()[-1] - if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: - raise - finally: - del tb - - if limit[0] <= maxargs: - limit[0] += 1 - continue - raise - - # copy func name to wrapper for sensible debug output - func_name = "<parse action>" - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - wrapper.__name__ = func_name - - return wrapper - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - @staticmethod - def setDefaultWhitespaceChars( chars ): - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, <TAB> and newline - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.setDefaultWhitespaceChars(" \t") - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - @staticmethod - def inlineLiteralsUsing(cls): - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inlineLiteralsUsing(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - def __init__( self, savelist=False ): - self.parseAction = list() - self.failAction = None - #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy( self ): - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") - - print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") - """ - cpy = copy.copy( self ) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName( self, name ): - """ - Define name for this expression, makes debugging and exception messages clearer. - - Example:: - - Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) - Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.name = name - self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg - return self - - def setResultsName( self, name, listAllMatches=False ): - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.setResultsName("name")`` - - see :class:`__call__`. - - Example:: - - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches=True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self,breakFlag = True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``breakFlag`` to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse,"_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def setParseAction( self, *fns, **kwargs ): - """ - Define one or more actions to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as ``fn(s,loc,toks)`` , - ``fn(loc,toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Optional keyword arguments: - - callDuringTry = (default= ``False`` ) indicate if parse action should be run during lookaheads and alternate testing - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parseString for more - information on parsing strings containing ``<TAB>`` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - # use parse action to convert to ints at parse time - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - date_str = integer + '/' + integer + '/' + integer - - # note that integer fields are now ints, not strings - date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] - """ - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) - return self - - def addParseAction( self, *fns, **kwargs ): - """ - Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. - - See examples in :class:`copy`. - """ - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def addCondition(self, *fns, **kwargs): - """Add a boolean predicate function to expression's list of parse actions. See - :class:`setParseAction` for function call signatures. Unlike ``setParseAction``, - functions passed to ``addCondition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) - """ - msg = kwargs.get("message", "failed user-defined condition") - exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException - for fn in fns: - fn = _trim_arity(fn) - def pa(s,l,t): - if not bool(fn(s,l,t)): - raise exc_type(s,l,msg) - self.parseAction.append(pa) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def setFailAction( self, fn ): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s,loc,expr,err)`` where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables( self, instring, loc ): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc,dummy = e._parse( instring, loc ) - exprsFound = True - except ParseException: - pass - return loc - - def preParse( self, instring, loc ): - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl( self, instring, loc, doActions=True ): - return loc, [] - - def postParse( self, instring, loc, tokenlist ): - return tokenlist - - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) - - if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException as err: - #~ print ("Exception raised:", err) - if self.debugActions[2]: - self.debugActions[2]( instring, tokensStart, self, err ) - if self.failAction: - self.failAction( instring, tokensStart, self, err ) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or preloc >= len(instring): - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - - tokens = self.postParse( instring, loc, tokens ) - - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn( instring, tokensStart, retTokens ) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException as err: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - self.debugActions[2]( instring, tokensStart, self, err ) - raise - else: - for fn in self.parseAction: - try: - tokens = fn( instring, tokensStart, retTokens ) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) - - return loc, retTokens - - def tryParse( self, instring, loc ): - try: - return self._parse( instring, loc, doActions=False )[0] - except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) - - def canParseNext(self, instring, loc): - try: - self.tryParse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - class _UnboundedCache(object): - def __init__(self): - cache = {} - self.not_in_cache = not_in_cache = object() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - if _OrderedDict is not None: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = _OrderedDict() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(cache) > size: - try: - cache.popitem(False) - except KeyError: - pass - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - else: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = {} - key_fifo = collections.deque([], size) - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(key_fifo) > size: - cache.pop(key_fifo.popleft(), None) - key_fifo.append(key) - - def clear(self): - cache.clear() - key_fifo.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - HIT, MISS = 0, 1 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy())) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if isinstance(value, Exception): - raise value - return (value[0], value[1].copy()) - - _parse = _parseNoCache - - @staticmethod - def resetCache(): - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) - - _packratEnabled = False - @staticmethod - def enablePackrat(cache_size_limit=128): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enablePackrat`. - For best results, call ``enablePackrat()`` immediately after - importing pyparsing. - - Example:: - - from pip._vendor import pyparsing - pyparsing.ParserElement.enablePackrat() - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = ParserElement._UnboundedCache() - else: - ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parseString( self, instring, parseAll=False ): - """ - Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - If you want the grammar to require that the entire input string be - successfully parsed, then set ``parseAll`` to True (equivalent to ending - the grammar with ``StringEnd()``). - - Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the ``loc`` argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - - calling ``parseWithTabs`` on your grammar before calling ``parseString`` - (see :class:`parseWithTabs`) - - define your parse action using the full ``(s,loc,toks)`` signature, and - reference the input string using the parse action's ``s`` argument - - explictly expand the tabs in your input string before calling - ``parseString`` - - Example:: - - Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] - Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - #~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse( instring, 0 ) - if parseAll: - loc = self.preParse( instring, loc ) - se = Empty() + StringEnd() - se._parse( instring, loc ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - else: - return tokens - - def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``maxMatches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parseString` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens,start,end in Word(alphas).scanString(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) - except ParseException: - loc = preloc+1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn( instring, loc ) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc+1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def transformString( self, instring ): - """ - Extension to :class:`scanString`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transformString``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transformString()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transformString()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.setParseAction(lambda toks: toks[0].title()) - - print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out = [] - lastE = 0 - # force preservation of <TAB>s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) - if t: - if isinstance(t,ParseResults): - out += t.asList() - elif isinstance(t,list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(_ustr,_flatten(out))) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def searchString( self, instring, maxMatches=_MAX_INT ): - """ - Another extension to :class:`scanString`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``maxMatches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``includeSeparators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = oneOf(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - splits = 0 - last = 0 - for t,s,e in self.scanString(instring, maxMatches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other ): - """ - Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, other ] ) - - def __radd__(self, other ): - """ - Implementation of + operator when left operand is not a :class:`ParserElement` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """ - Implementation of - operator, returns :class:`And` with error stop - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return self + And._ErrorStop() + other - - def __rsub__(self, other ): - """ - Implementation of - operator when left operand is not a :class:`ParserElement` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self,other): - """ - Implementation of * operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer - tuple, similar to ``{min,max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n,None)`` or ``expr*(n,)`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None,n)`` is equivalent to ``expr*(0,n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None,n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None,n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None,n) + ~expr`` - """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") - - if (optElements): - def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self]*minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self]*minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other ): - """ - Implementation of | operator - returns :class:`MatchFirst` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst( [ self, other ] ) - - def __ror__(self, other ): - """ - Implementation of | operator when left operand is not a :class:`ParserElement` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other ): - """ - Implementation of ^ operator - returns :class:`Or` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or( [ self, other ] ) - - def __rxor__(self, other ): - """ - Implementation of ^ operator when left operand is not a :class:`ParserElement` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other ): - """ - Implementation of & operator - returns :class:`Each` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each( [ self, other ] ) - - def __rand__(self, other ): - """ - Implementation of & operator when left operand is not a :class:`ParserElement` - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__( self ): - """ - Implementation of ~ operator - returns :class:`NotAny` - """ - return NotAny( self ) - - def __call__(self, name=None): - """ - Shortcut for :class:`setResultsName`, with ``listAllMatches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - """ - if name is not None: - return self.setResultsName(name) - else: - return self.copy() - - def suppress( self ): - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress( self ) - - def leaveWhitespace( self ): - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars( self, chars ): - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs( self ): - """ - Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string. - Must be called before ``parseString`` when the input grammar contains elements that - match ``<TAB>`` characters. - """ - self.keepTabs = True - return self - - def ignore( self, other ): - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = OneOrMore(Word(alphas)) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] - - patt.ignore(cStyleComment) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] - """ - if isinstance(other, basestring): - other = Suppress(other) - - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append( Suppress( other.copy() ) ) - return self - - def setDebugActions( self, startAction, successAction, exceptionAction ): - """ - Enable display of debugging messages while doing pattern matching. - """ - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug( self, flag=True ): - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to True to enable, False to disable. - - Example:: - - wd = Word(alphas).setName("alphaword") - integer = Word(nums).setName("numword") - term = wd | integer - - # turn on debugging for wd - wd.setDebug() - - OneOrMore(term).parseString("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`setDebugActions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. - """ - if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) - else: - self.debug = False - return self - - def __str__( self ): - return self.name - - def __repr__( self ): - return _ustr(self) - - def streamline( self ): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion( self, parseElementList ): - pass - - def validate( self, validateTrace=[] ): - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self.checkRecursion( [] ) - - def parseFile( self, file_or_filename, parseAll=False ): - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r") as f: - file_contents = f.read() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def __eq__(self,other): - if isinstance(other, ParserElement): - return self is other or vars(self) == vars(other) - elif isinstance(other, basestring): - return self.matches(other) - else: - return super(ParserElement,self)==other - - def __ne__(self,other): - return not (self == other) - - def __hash__(self): - return hash(id(self)) - - def __req__(self,other): - return self == other - - def __rne__(self,other): - return not (self == other) - - def matches(self, testString, parseAll=True): - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - testString - to test against this expression for a match - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - try: - self.parseString(_ustr(testString), parseAll=parseAll) - return True - except ParseBaseException: - return False - - def runTests(self, tests, parseAll=True, comment='#', - fullDump=True, printResults=True, failureTests=False, postParse=None): - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - comment - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - printResults - (default= ``True``) prints test output to stdout - - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing - - postParse - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failureTests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.runTests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.runTests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failureTests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading 'r'.) - """ - if isinstance(tests, basestring): - tests = list(map(str.strip, tests.rstrip().splitlines())) - if isinstance(comment, basestring): - comment = Literal(comment) - allResults = [] - comments = [] - success = True - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append(t) - continue - if not t: - continue - out = ['\n'.join(comments), t] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = t.replace(r'\n','\n').lstrip('\ufeff') - result = self.parseString(t, parseAll=parseAll) - out.append(result.dump(full=fullDump)) - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - out.append(str(pp_value)) - except Exception as e: - out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - if '\n' in t: - out.append(line(pe.loc, t)) - out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) - else: - out.append(' '*pe.loc + '^' + fatal) - out.append("FAIL: " + str(pe)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: " + str(exc)) - success = success and failureTests - result = exc - - if printResults: - if fullDump: - out.append('') - print('\n'.join(out)) - - allResults.append((t, result)) - - return success, allResults - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - def __init__( self ): - super(Token,self).__init__( savelist=False ) - - -class Empty(Token): - """An empty token, will always match. - """ - def __init__( self ): - super(Empty,self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match. - """ - def __init__( self ): - super(NoMatch,self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl( self, instring, loc, doActions=True ): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """Token to exactly match a specified string. - - Example:: - - Literal('blah').parseString('blah') # -> ['blah'] - Literal('blah').parseString('blahfooblah') # -> ['blah'] - Literal('blah').parseString('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - def __init__( self, matchString ): - super(Literal,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) -_L = Literal -ParserElement._literalStringClass = Literal - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parseString("start") # -> ['start'] - Keyword("start").parseString("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" - - def __init__( self, matchString, identChars=None, caseless=False ): - super(Keyword,self).__init__() - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl( self, instring, loc, doActions=True ): - if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - def copy(self): - c = super(Keyword,self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - @staticmethod - def setDefaultKeywordChars( chars ): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - def __init__( self, matchString, identChars=None ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``maxMismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) - patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - def __init__(self, match_string, maxMismatches=1): - super(CloseMatch,self).__init__() - self.name = match_string - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) - self.mayIndexError = False - self.mayReturnEmpty = False - - def parseImpl( self, instring, loc, doActions=True ): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): - src,mat = s_m - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results['original'] = self.match_string - results['mismatches'] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, an - optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. An optional ``excludeChars`` parameter can - list characters that might be found in the input ``bodyChars`` - string; useful to define a word of all printables except for one or - two characters, for instance. - - :class:`srange` is useful for defining custom character set strings - for defining ``Word`` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums+'-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, excludeChars=",") - """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): - super(Word,self).__init__() - if excludeChars: - initChars = ''.join(c for c in initChars if c not in excludeChars) - if bodyChars: - bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars : - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" - try: - self.re = re.compile( self.reString ) - except Exception: - self.re = None - - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - if not(instring[ loc ] in self.initChars): - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(Word,self).__str__() - except Exception: - pass - - - if self.strRepr is None: - - def charsAsStr(s): - if len(s)>4: - return s[:4]+"..." - else: - return s - - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - - -class Char(Word): - """A short-cut class for defining ``Word(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - def __init__(self, charset): - super(Char, self).__init__(charset, exact=1) - self.reString = "[%s]" % _escapeRegexRangeChars(self.initCharsOrig) - self.re = re.compile( self.reString ) - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_. - If the given regex contains named groups (defined using ``(?P<name>...)``), - these will be preserved as named parse results. - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - """ - compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module <https://docs.python.org/3/library/re.html>`_ module for an - explanation of the acceptable patterns and flags. - """ - super(Regex,self).__init__() - - if isinstance(pattern, basestring): - if not pattern: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif isinstance(pattern, Regex.compiledREtype): - self.re = pattern - self.pattern = \ - self.reString = str(pattern) - self.flags = flags - - else: - raise ValueError("Regex may only be constructed with a string or a compiled RE object") - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - self.asGroupList = asGroupList - self.asMatch = asMatch - - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - if self.asMatch: - ret = result - elif self.asGroupList: - ret = result.groups() - else: - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc,ret - - def __str__( self ): - try: - return super(Regex,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - def sub(self, repl): - """ - Return Regex with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") - print(make_html.transformString("h1:main title:")) - # prints "<h1>main title</h1>" - """ - if self.asGroupList: - warnings.warn("cannot use sub() with Regex(asGroupList=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch and callable(repl): - warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch: - def pa(tokens): - return tokens[0].expand(repl) - else: - def pa(tokens): - return self.re.sub(repl, tokens[0]) - return self.addParseAction(pa) - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - quoteChar - string of one or more characters defining the - quote delimiting string - - escChar - character to escape quotes, typically backslash - (default= ``None`` ) - - escQuote - special quote sequence to escape an embedded quote - string (such as SQL's ``""`` to escape an embedded ``"``) - (default= ``None`` ) - - multiline - boolean indicating whether quotes can span - multiple lines (default= ``False`` ) - - unquoteResults - boolean indicating whether the matched text - should be unquoted (default= ``True`` ) - - endQuoteChar - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True`` ) - - Example:: - - qs = QuotedString('"') - print(qs.searchString('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', endQuoteChar='}}') - print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', escQuote='""') - print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString,self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if not quoteChar: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' - ) - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar)+"(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] - - if isinstance(ret,basestring): - # replace escaped whitespace - if '\\' in ret and self.convertWhitespaceEscapes: - ws_map = { - r'\t' : '\t', - r'\n' : '\n', - r'\f' : '\f', - r'\r' : '\r', - } - for wslit,wschar in ws_map.items(): - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__( self ): - try: - return super(QuotedString,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " + - "Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) - self.mayIndexError = False - - def parseImpl( self, instring, loc, doActions=True ): - if instring[loc] in self.notChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(CharsNotIn, self).__str__() - except Exception: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - whiteStrs = { - ' ' : '<SP>', - '\t': '<TAB>', - '\n': '<LF>', - '\r': '<CR>', - '\f': '<FF>', - 'u\00A0': '<NBSP>', - 'u\1680': '<OGHAM_SPACE_MARK>', - 'u\180E': '<MONGOLIAN_VOWEL_SEPARATOR>', - 'u\2000': '<EN_QUAD>', - 'u\2001': '<EM_QUAD>', - 'u\2002': '<EN_SPACE>', - 'u\2003': '<EM_SPACE>', - 'u\2004': '<THREE-PER-EM_SPACE>', - 'u\2005': '<FOUR-PER-EM_SPACE>', - 'u\2006': '<SIX-PER-EM_SPACE>', - 'u\2007': '<FIGURE_SPACE>', - 'u\2008': '<PUNCTUATION_SPACE>', - 'u\2009': '<THIN_SPACE>', - 'u\200A': '<HAIR_SPACE>', - 'u\200B': '<ZERO_WIDTH_SPACE>', - 'u\202F': '<NNBSP>', - 'u\205F': '<MMSP>', - 'u\3000': '<IDEOGRAPHIC_SPACE>', - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() - self.matchWhite = ws - self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) - #~ self.leaveWhitespace() - self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - def __init__( self, colno ): - super(GoToColumn,self).__init__() - self.col = colno - - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) - if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) - newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] - return newloc, ret - - -class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).searchString(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - def __init__( self ): - super(LineStart,self).__init__() - self.errmsg = "Expected start of line" - - def parseImpl( self, instring, loc, doActions=True ): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected end of line" - - def parseImpl( self, instring, loc, doActions=True ): - if loc<len(instring): - if instring[loc] == "\n": - return loc+1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc+1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class StringStart(_PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - def __init__( self ): - super(StringStart,self).__init__() - self.errmsg = "Expected start of text" - - def parseImpl( self, instring, loc, doActions=True ): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse( instring, 0 ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class StringEnd(_PositionToken): - """Matches if current position is at the end of the parse string - """ - def __init__( self ): - super(StringEnd,self).__init__() - self.errmsg = "Expected end of text" - - def parseImpl( self, instring, loc, doActions=True ): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc+1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, - and is not preceded by any character in a given set of - ``wordChars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True ): - if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and is - not followed by any character in a given set of ``wordChars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True ): - instrlen = len(instring) - if instrlen>0 and loc<instrlen: - if (instring[loc] in self.wordChars or - instring[loc-1] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - def __init__( self, exprs, savelist = False ): - super(ParseExpression,self).__init__(savelist) - if isinstance( exprs, _generatorType ): - exprs = list(exprs) - - if isinstance( exprs, basestring ): - self.exprs = [ ParserElement._literalStringClass( exprs ) ] - elif isinstance( exprs, Iterable ): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if all(isinstance(expr, basestring) for expr in exprs): - exprs = map(ParserElement._literalStringClass, exprs) - self.exprs = list(exprs) - else: - try: - self.exprs = list( exprs ) - except TypeError: - self.exprs = [ exprs ] - self.callPreparse = False - - def __getitem__( self, i ): - return self.exprs[i] - - def append( self, other ): - self.exprs.append( other ) - self.strRepr = None - return self - - def leaveWhitespace( self ): - """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on - all contained expressions.""" - self.skipWhitespace = False - self.exprs = [ e.copy() for e in self.exprs ] - for e in self.exprs: - e.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseExpression, self).ignore( other ) - for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) - else: - super( ParseExpression, self).ignore( other ) - for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) - return self - - def __str__( self ): - try: - return super(ParseExpression,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) - return self.strRepr - - def streamline( self ): - super(ParseExpression,self).streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for Or's and MatchFirst's) - if ( len(self.exprs) == 2 ): - other = self.exprs[0] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = other.exprs[:] + [ self.exprs[1] ] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + _ustr(self) - - return self - - def setResultsName( self, name, listAllMatches=False ): - ret = super(ParseExpression,self).setResultsName(name,listAllMatches) - return ret - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - for e in self.exprs: - e.validate(tmp) - self.checkRecursion( [] ) - - def copy(self): - ret = super(ParseExpression,self).copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"),name_expr("name"),integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super(And._ErrorStop,self).__init__(*args, **kwargs) - self.name = '-' - self.leaveWhitespace() - - def __init__( self, exprs, savelist = True ): - super(And,self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars( self.exprs[0].whiteChars ) - self.skipWhitespace = self.exprs[0].skipWhitespace - self.callPreparse = True - - def streamline(self): - super(And, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl( self, instring, loc, doActions=True ): - # pass False as last arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) - errorStop = False - for e in self.exprs[1:]: - if isinstance(e, And._ErrorStop): - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse( instring, loc, doActions ) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException(instring, len(instring), self.errmsg, self) - else: - loc, exprtokens = e._parse( instring, loc, doActions ) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #And( [ self, other ] ) - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - if not e.mayReturnEmpty: - break - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - def __init__( self, exprs, savelist = False ): - super(Or,self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(Or, self).streamline() - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - matches = [] - for e in self.exprs: - try: - loc2 = e.tryParse( instring, loc ) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - matches.sort(key=lambda x: -x[0]) - for _,e in matches: - try: - return e._parse( instring, loc, doActions ) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #Or( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - # self.saveAsList = any(e.saveAsList for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(MatchFirst, self).streamline() - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse( instring, loc, doActions ) - return ret - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) - - shape_spec.runTests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self): - super(Each, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl( self, instring, loc, doActions=True ): - if self.initExprGroups: - self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] - self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse( instring, tmpLoc ) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e),e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) - resultlist.append(results) - - finalResults = sum(resultlist, ParseResults([])) - return loc, finalResults - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - if issubclass(ParserElement._literalStringClass, Token): - expr = ParserElement._literalStringClass(expr) - else: - expr = ParserElement._literalStringClass(Literal(expr)) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) - else: - raise ParseException("",loc,self.errmsg,self) - - def leaveWhitespace( self ): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - else: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - return self - - def streamline( self ): - super(ParseElementEnhance,self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion( self, parseElementList ): - if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] - if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion( [] ) - - def __str__( self ): - try: - return super(ParseElementEnhance,self).__str__() - except Exception: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, Literal, Keyword, or - a Word or CharsNotIn with a specified exact or maximum length, then - the retreat parameter is not required. Otherwise, retreat must be - specified to give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - def __init__(self, expr, retreat=None): - super(PrecededBy, self).__init__(expr) - self.expr = self.expr().leaveWhitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, _PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[:loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat+1)): - try: - _, ret = test_expr._parse(instring_slice, loc-offset) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - # return empty list of tokens, but preserve any defined results names - del ret[:] - return loc, ret - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the '~' operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Optional(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infixNotation - boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr.canParseNext(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - -class _MultipleMatch(ParseElementEnhance): - def __init__( self, expr, stopOn=None): - super(_MultipleMatch, self).__init__(expr) - self.saveAsList = True - ender = stopOn - if isinstance(ender, basestring): - ender = ParserElement._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - - def parseImpl( self, instring, loc, doActions=True ): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) - try: - hasIgnoreExprs = (not not self.ignoreExprs) - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self_expr_parse( instring, preloc, doActions ) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - -class OneOrMore(_MultipleMatch): - """Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stopOn attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parseString(text).pprint() - """ - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - -class ZeroOrMore(_MultipleMatch): - """Optional repetition of zero or more of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example: similar to :class:`OneOrMore` - """ - def __init__( self, expr, stopOn=None): - super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException,IndexError): - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -_optionalNotMatched = _NullToken() -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - default (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) - zip.runTests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - def __init__( self, expr, default=_optionalNotMatched ): - super(Optional,self).__init__( expr, savelist=False ) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [ self.defaultValue ] - else: - tokens = [] - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - expr - target expression marking the end of the data to be skipped - - include - (default= ``False``) if True, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element list). - - ignore - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - failOn - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the SkipTo is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quotedString) - string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.searchString(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, basestring): - self.failOn = ParserElement._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) - - def parseImpl( self, instring, loc, doActions=True ): - startloc = loc - instrlen = len(instring) - expr = self.expr - expr_parse = self.expr._parse - self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None - self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) - skipresult += mat - - return loc, skipresult - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the '<<' operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, '|' has a lower precedence than '<<', so that:: - - fwdExpr << a | b | c - - will actually be evaluated as:: - - (fwdExpr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwdExpr << (a | b | c) - - Converting to use the '<<=' operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) - - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass(other) - self.expr = other - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return self - - def __ilshift__(self, other): - return self << other - - def leaveWhitespace( self ): - self.skipWhitespace = False - return self - - def streamline( self ): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate( self, validateTrace=[] ): - if self not in validateTrace: - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - return self.__class__.__name__ + ": ..." - - # stubbed out for now - creates awful memory and perf issues - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse - try: - if self.expr is not None: - retString = _ustr(self.expr) - else: - retString = "None" - finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString - - def copy(self): - if self.expr is not None: - return super(Forward,self).copy() - else: - ret = Forward() - ret <<= self - return ret - -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) - self.saveAsList = False - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parseString('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parseString('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) - """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore( self, other ): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super( Combine, self).ignore( other ) - return self - - def postParse( self, instring, loc, tokenlist ): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) - - if self.resultsName and retToks.haskeys(): - return [ retToks ] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] - """ - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = expr.saveAsList - - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parseString(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parseString(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.asDict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - def __init__( self, expr ): - super(Dict,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey,int): - ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) - else: - dictvalue = tok.copy() #ParseResults(i) - del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) - - if self.resultsName: - return [ tokenlist ] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parseString(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parseString(source)) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - - (See also :class:`delimitedList`.) - """ - def postParse( self, instring, loc, tokenlist ): - return [] - - def suppress( self ): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once. - """ - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self,s,l,t): - if not self.called: - results = self.callable(s,l,t) - self.called = True - return results - raise ParseException(s,l,"") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @traceParseAction - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) - print(wds.parseString("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - <<leaving remove_duplicate_chars (ret: 'dfjkls') - ['dfjkls'] - """ - f = _trim_arity(f) - def z(*paArgs): - thisFunc = f.__name__ - s,l,t = paArgs[-3:] - if len(paArgs)>3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) - raise - sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) - return ret - try: - z.__name__ = f.__name__ - except AttributeError: - pass - return z - -# -# global helpers -# -def delimitedList( expr, delim=",", combine=False ): - """Helper to define a delimited list of expressions - the delimiter - defaults to ','. By default, the list elements and delimiters can - have intervening whitespace, and comments, but this can be - overridden by passing ``combine=True`` in the constructor. If - ``combine`` is set to ``True``, the matching tokens are - returned as a single token string, with the delimiters included; - otherwise, the matching tokens are returned as a list of tokens, - with the delimiters suppressed. - - Example:: - - delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." - if combine: - return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) - else: - return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) - -def countedArray( expr, intExpr=None ): - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``intExpr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) - countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] - """ - arrayExpr = Forward() - def countFieldParseAction(s,l,t): - n = t[0] - arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) - return [] - if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t:int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.setName("arrayLen") - intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') - -def _flatten(L): - ret = [] - for i in L: - if isinstance(i,list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - -def matchPreviousLiteral(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`matchPreviousExpr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - def copyTokenToRepeater(s,l,t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.asList()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def matchPreviousExpr(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - def copyTokenToRepeater(s,l,t): - matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s,l,t): - theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException("",0,"") - rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def _escapeRegexRangeChars(s): - #~ escape these chars: ^-] - for c in r"\^-]": - s = s.replace(c,_bslash+c) - s = s.replace("\n",r"\n") - s = s.replace("\t",r"\t") - return _ustr(s) - -def oneOf( strs, caseless=False, useRegex=True ): - """Helper to quickly define a set of alternative Literals, and makes - sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - strs - a string of space-delimited literals, or a collection of - string literals - - caseless - (default= ``False``) - treat all literals as - caseless - - useRegex - (default= ``True``) - as an optimization, will - generate a Regex object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True``, or if - creating a :class:`Regex` raises an exception) - - Example:: - - comp_oper = oneOf("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - if caseless: - isequal = ( lambda a,b: a.upper() == b.upper() ) - masks = ( lambda a,b: b.upper().startswith(a.upper()) ) - parseElementClass = CaselessLiteral - else: - isequal = ( lambda a,b: a == b ) - masks = ( lambda a,b: b.startswith(a) ) - parseElementClass = Literal - - symbols = [] - if isinstance(strs,basestring): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) - if not symbols: - return NoMatch() - - i = 0 - while i < len(symbols)-1: - cur = symbols[i] - for j,other in enumerate(symbols[i+1:]): - if ( isequal(other, cur) ): - del symbols[i+j+1] - break - elif ( masks(cur, other) ): - del symbols[i+j+1] - symbols.insert(i,other) - cur = other - break - else: - i += 1 - - if not caseless and useRegex: - #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) - try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) - else: - return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) - except Exception: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) - -def dictOf( key, value ): - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - print(OneOrMore(attr_expr).parseString(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) - - # similar to Dict, but simpler call format - result = dictOf(attr_label, attr_value).parseString(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.asDict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``asString`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`originalTextFor` contains expressions with defined - results names, you must set ``asString`` to ``False`` if you - want to preserve those results name values. - - Example:: - - src = "this is test <b> bold <i>text</i> </b> normal text " - for tag in ("b","i"): - opener,closer = makeHTMLTags(tag) - patt = originalTextFor(opener + SkipTo(closer) + closer) - print(patt.searchString(src)[0]) - - prints:: - - ['<b> bold <i>text</i> </b>'] - ['<i>text</i>'] - """ - locMarker = Empty().setParseAction(lambda s,loc,t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] - else: - def extractText(s,l,t): - t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] - matchExpr.setParseAction(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - return matchExpr - -def ungroup(expr): - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).setParseAction(lambda t:t[0]) - -def locatedExpr(expr): - """Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains ``<TAB>`` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().setParseAction(lambda s,l,t: l) - return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) - - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" - -def srange(s): - r"""Helper to easily define string ranges for use in Word - construction. Borrows syntax from regexp '[]' string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except Exception: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transformString<ParserElement.transformString>` (). - - Example:: - - num = Word(nums).setParseAction(lambda toks: int(toks[0])) - na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) - term = na | num - - OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s,l,t: [replStr] - -def removeQuotes(s,l,t): - """Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use removeQuotes to strip quotation marks from parsed results - quotedString.setParseAction(removeQuotes) - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - -def tokenMap(func, *args): - """Helper to define a parse action by mapping a function to all - elements of a ParseResults list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transformString`:: - - hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) - hex_ints.runTests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).setParseAction(tokenMap(str.upper)) - OneOrMore(upperword).runTests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).setParseAction(tokenMap(str.title)) - OneOrMore(wd).setParseAction(' '.join).runTests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - def pa(s,l,t): - return [func(tokn, *args) for tokn in t] - - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - pa.__name__ = func_name - - return pa - -upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. -Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" - -downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. -Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" - -def _makeTags(tagStr, xml): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - else: - printablesLessRAbrack = "".join(c for c in printables if c not in ">") - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("</") + tagStr + ">") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) - openTag.tag = resname - closeTag.tag = resname - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' - # makeHTMLTags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a,a_end = makeHTMLTags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.searchString(text): - # attributes in the <A> tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags( tagStr, False ) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`makeHTMLTags` - """ - return _makeTags( tagStr, True ) - -def withAttribute(*args,**attrDict): - """Helper to create a validating parse action to be used with start - tags created with :class:`makeXMLTags` or - :class:`makeHTMLTags`. Use ``withAttribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ``<TD>`` or ``<DIV>``. - - Call ``withAttribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`withClass`. - - To verify that the attribute exists, but without specifying a value, - pass ``withAttribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' - <div> - Some text - <div type="grid">1 4 0 1 0</div> - <div type="graph">1,3 2,3 1,1</div> - <div>this has no type</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """Simplified version of :class:`withAttribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' - <div> - Some text - <div class="grid">1 4 0 1 0</div> - <div class="graph">1,3 2,3 1,1</div> - <div>this <div> has no class</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) - -opAssoc = SimpleNamespace() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infixNotation. See - :class:`ParserElement.enablePackrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the - nested - - opList - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(opExpr, - numTerms, rightLeftAssoc, parseAction)``, where: - - - opExpr is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if numTerms - is 3, opExpr is a tuple of two expressions, for the two - operators separating the 3 terms - - numTerms is the number of terms for this operator (must be 1, - 2, or 3) - - rightLeftAssoc is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``setParseAction(*fn)`` - (:class:`ParserElement.setParseAction`) - - lpar - expression for matching left-parentheses - (default= ``Suppress('(')``) - - rpar - expression for matching right-parentheses - (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.tryParse(instring, loc) - return loc, [] - - ret = Forward() - lastExpr = baseExpr | ( lpar + ret + rpar ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = _FB(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = _FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = _FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.setParseAction(*pa) - else: - matchExpr.setParseAction(pa) - thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of :class:`infixNotation`, will be -dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and - closing delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - closer - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - content - expression for items within the nested lists - (default= ``None``) - - ignoreExpr - expression for ignoring opening and closing - delimiters (default= :class:`quotedString`) - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignoreExpr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quotedString or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quotedString`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR,RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - ret.setName('nested %s%s expression' % (opener,closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single - grammar should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond - the the current level; set to False for block of left-most - statements (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group( funcDecl + func_body ) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << ( funcDef | assignment | identifier ) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) -commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form ``/* ... */``" - -htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") -"Comment of the form ``<!-- ... -->``" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form ``// ... (to end of line)``" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") -"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" - -javaStyleComment = cppStyleComment -"Same as :class:`cppStyleComment`" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form ``# ... (to end of line)``" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + - Optional( Word(" \t") + - ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") -commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or -quoted strings, separated by commas. - -This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. -""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers<integer>`, :class:`reals<real>`, - :class:`scientific notation<sci_real>`) - - common :class:`programming identifiers<identifier>` - - network addresses (:class:`MAC<mac_address>`, - :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`) - - ISO8601 :class:`dates<iso8601_date>` and - :class:`datetime<iso8601_datetime>` - - :class:`UUID<uuid>` - - :class:`comma-separated list<comma_separated_list>` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas+'_', alphanums+'_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' - td,td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') - + Optional( White(" \t") ) ) ).streamline().setName("commaItem") - comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -class _lazyclassproperty(object): - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) for superclass in cls.__mro__[1:]): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -class unicode_set(object): - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``, such as:: - - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - _ranges = [] - - @classmethod - def _get_chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in cc._ranges: - ret.extend(range(rr[0], rr[-1]+1)) - return [unichr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - _ranges = [(32, sys.maxunicode)] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges = [(0x0100, 0x017f),] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges = [(0x0180, 0x024f),] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges = [ - (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), - (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), - (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges = [(0x0400, 0x04ff)] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f), ] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges = [ ] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f), ] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges = [(0x3040, 0x309f), ] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges = [(0x30a0, 0x30ff), ] - - class Korean(unicode_set): - "Unicode set for Korean Unicode Character Range" - _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f), ] - - class CJK(Chinese, Japanese, Korean): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - pass - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b), ] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f), ] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges = [(0x0590, 0x05ff), ] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] - -pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges) - -# define ranges in language character sets -if PY_3: - setattr(pyparsing_unicode, "العربية", pyparsing_unicode.Arabic) - setattr(pyparsing_unicode, "中文", pyparsing_unicode.Chinese) - setattr(pyparsing_unicode, "кириллица", pyparsing_unicode.Cyrillic) - setattr(pyparsing_unicode, "Ελληνικά", pyparsing_unicode.Greek) - setattr(pyparsing_unicode, "עִברִית", pyparsing_unicode.Hebrew) - setattr(pyparsing_unicode, "日本語", pyparsing_unicode.Japanese) - setattr(pyparsing_unicode.Japanese, "漢字", pyparsing_unicode.Japanese.Kanji) - setattr(pyparsing_unicode.Japanese, "カタカナ", pyparsing_unicode.Japanese.Katakana) - setattr(pyparsing_unicode.Japanese, "ã²ã‚‰ãŒãª", pyparsing_unicode.Japanese.Hiragana) - setattr(pyparsing_unicode, "한국어", pyparsing_unicode.Korean) - setattr(pyparsing_unicode, "ไทย", pyparsing_unicode.Thai) - setattr(pyparsing_unicode, "देवनागरी", pyparsing_unicode.Devanagari) - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/__init__.py deleted file mode 100644 index 8ed060f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .core import TomlError -from .parser import load, loads -from .test import translate_to_test -from .writer import dump, dumps \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/core.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/core.py deleted file mode 100644 index c182734..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/core.py +++ /dev/null @@ -1,13 +0,0 @@ -class TomlError(RuntimeError): - def __init__(self, message, line, col, filename): - RuntimeError.__init__(self, message, line, col, filename) - self.message = message - self.line = line - self.col = col - self.filename = filename - - def __str__(self): - return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message) - - def __repr__(self): - return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/parser.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/parser.py deleted file mode 100644 index 3493aa6..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/parser.py +++ /dev/null @@ -1,341 +0,0 @@ -import string, re, sys, datetime -from .core import TomlError -from .utils import rfc3339_re, parse_rfc3339_re - -if sys.version_info[0] == 2: - _chr = unichr -else: - _chr = chr - -def load(fin, translate=lambda t, x, v: v, object_pairs_hook=dict): - return loads(fin.read(), translate=translate, object_pairs_hook=object_pairs_hook, filename=getattr(fin, 'name', repr(fin))) - -def loads(s, filename='<string>', translate=lambda t, x, v: v, object_pairs_hook=dict): - if isinstance(s, bytes): - s = s.decode('utf-8') - - s = s.replace('\r\n', '\n') - - root = object_pairs_hook() - tables = object_pairs_hook() - scope = root - - src = _Source(s, filename=filename) - ast = _p_toml(src, object_pairs_hook=object_pairs_hook) - - def error(msg): - raise TomlError(msg, pos[0], pos[1], filename) - - def process_value(v, object_pairs_hook): - kind, text, value, pos = v - if kind == 'str' and value.startswith('\n'): - value = value[1:] - if kind == 'array': - if value and any(k != value[0][0] for k, t, v, p in value[1:]): - error('array-type-mismatch') - value = [process_value(item, object_pairs_hook=object_pairs_hook) for item in value] - elif kind == 'table': - value = object_pairs_hook([(k, process_value(value[k], object_pairs_hook=object_pairs_hook)) for k in value]) - return translate(kind, text, value) - - for kind, value, pos in ast: - if kind == 'kv': - k, v = value - if k in scope: - error('duplicate_keys. Key "{0}" was used more than once.'.format(k)) - scope[k] = process_value(v, object_pairs_hook=object_pairs_hook) - else: - is_table_array = (kind == 'table_array') - cur = tables - for name in value[:-1]: - if isinstance(cur.get(name), list): - d, cur = cur[name][-1] - else: - d, cur = cur.setdefault(name, (None, object_pairs_hook())) - - scope = object_pairs_hook() - name = value[-1] - if name not in cur: - if is_table_array: - cur[name] = [(scope, object_pairs_hook())] - else: - cur[name] = (scope, object_pairs_hook()) - elif isinstance(cur[name], list): - if not is_table_array: - error('table_type_mismatch') - cur[name].append((scope, object_pairs_hook())) - else: - if is_table_array: - error('table_type_mismatch') - old_scope, next_table = cur[name] - if old_scope is not None: - error('duplicate_tables') - cur[name] = (scope, next_table) - - def merge_tables(scope, tables): - if scope is None: - scope = object_pairs_hook() - for k in tables: - if k in scope: - error('key_table_conflict') - v = tables[k] - if isinstance(v, list): - scope[k] = [merge_tables(sc, tbl) for sc, tbl in v] - else: - scope[k] = merge_tables(v[0], v[1]) - return scope - - return merge_tables(root, tables) - -class _Source: - def __init__(self, s, filename=None): - self.s = s - self._pos = (1, 1) - self._last = None - self._filename = filename - self.backtrack_stack = [] - - def last(self): - return self._last - - def pos(self): - return self._pos - - def fail(self): - return self._expect(None) - - def consume_dot(self): - if self.s: - self._last = self.s[0] - self.s = self[1:] - self._advance(self._last) - return self._last - return None - - def expect_dot(self): - return self._expect(self.consume_dot()) - - def consume_eof(self): - if not self.s: - self._last = '' - return True - return False - - def expect_eof(self): - return self._expect(self.consume_eof()) - - def consume(self, s): - if self.s.startswith(s): - self.s = self.s[len(s):] - self._last = s - self._advance(s) - return True - return False - - def expect(self, s): - return self._expect(self.consume(s)) - - def consume_re(self, re): - m = re.match(self.s) - if m: - self.s = self.s[len(m.group(0)):] - self._last = m - self._advance(m.group(0)) - return m - return None - - def expect_re(self, re): - return self._expect(self.consume_re(re)) - - def __enter__(self): - self.backtrack_stack.append((self.s, self._pos)) - - def __exit__(self, type, value, traceback): - if type is None: - self.backtrack_stack.pop() - else: - self.s, self._pos = self.backtrack_stack.pop() - return type == TomlError - - def commit(self): - self.backtrack_stack[-1] = (self.s, self._pos) - - def _expect(self, r): - if not r: - raise TomlError('msg', self._pos[0], self._pos[1], self._filename) - return r - - def _advance(self, s): - suffix_pos = s.rfind('\n') - if suffix_pos == -1: - self._pos = (self._pos[0], self._pos[1] + len(s)) - else: - self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos) - -_ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*') -def _p_ews(s): - s.expect_re(_ews_re) - -_ws_re = re.compile(r'[ \t]*') -def _p_ws(s): - s.expect_re(_ws_re) - -_escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"', - '\\': '\\', 'f': '\f' } - -_basicstr_re = re.compile(r'[^"\\\000-\037]*') -_short_uni_re = re.compile(r'u([0-9a-fA-F]{4})') -_long_uni_re = re.compile(r'U([0-9a-fA-F]{8})') -_escapes_re = re.compile(r'[btnfr\"\\]') -_newline_esc_re = re.compile('\n[ \t\n]*') -def _p_basicstr_content(s, content=_basicstr_re): - res = [] - while True: - res.append(s.expect_re(content).group(0)) - if not s.consume('\\'): - break - if s.consume_re(_newline_esc_re): - pass - elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re): - v = int(s.last().group(1), 16) - if 0xd800 <= v < 0xe000: - s.fail() - res.append(_chr(v)) - else: - s.expect_re(_escapes_re) - res.append(_escapes[s.last().group(0)]) - return ''.join(res) - -_key_re = re.compile(r'[0-9a-zA-Z-_]+') -def _p_key(s): - with s: - s.expect('"') - r = _p_basicstr_content(s, _basicstr_re) - s.expect('"') - return r - if s.consume('\''): - if s.consume('\'\''): - r = s.expect_re(_litstr_ml_re).group(0) - s.expect('\'\'\'') - else: - r = s.expect_re(_litstr_re).group(0) - s.expect('\'') - return r - return s.expect_re(_key_re).group(0) - -_float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?') - -_basicstr_ml_re = re.compile(r'(?:""?(?!")|[^"\\\000-\011\013-\037])*') -_litstr_re = re.compile(r"[^'\000\010\012-\037]*") -_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*") -def _p_value(s, object_pairs_hook): - pos = s.pos() - - if s.consume('true'): - return 'bool', s.last(), True, pos - if s.consume('false'): - return 'bool', s.last(), False, pos - - if s.consume('"'): - if s.consume('""'): - r = _p_basicstr_content(s, _basicstr_ml_re) - s.expect('"""') - else: - r = _p_basicstr_content(s, _basicstr_re) - s.expect('"') - return 'str', r, r, pos - - if s.consume('\''): - if s.consume('\'\''): - r = s.expect_re(_litstr_ml_re).group(0) - s.expect('\'\'\'') - else: - r = s.expect_re(_litstr_re).group(0) - s.expect('\'') - return 'str', r, r, pos - - if s.consume_re(rfc3339_re): - m = s.last() - return 'datetime', m.group(0), parse_rfc3339_re(m), pos - - if s.consume_re(_float_re): - m = s.last().group(0) - r = m.replace('_','') - if '.' in m or 'e' in m or 'E' in m: - return 'float', m, float(r), pos - else: - return 'int', m, int(r, 10), pos - - if s.consume('['): - items = [] - with s: - while True: - _p_ews(s) - items.append(_p_value(s, object_pairs_hook=object_pairs_hook)) - s.commit() - _p_ews(s) - s.expect(',') - s.commit() - _p_ews(s) - s.expect(']') - return 'array', None, items, pos - - if s.consume('{'): - _p_ws(s) - items = object_pairs_hook() - if not s.consume('}'): - k = _p_key(s) - _p_ws(s) - s.expect('=') - _p_ws(s) - items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) - _p_ws(s) - while s.consume(','): - _p_ws(s) - k = _p_key(s) - _p_ws(s) - s.expect('=') - _p_ws(s) - items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) - _p_ws(s) - s.expect('}') - return 'table', None, items, pos - - s.fail() - -def _p_stmt(s, object_pairs_hook): - pos = s.pos() - if s.consume( '['): - is_array = s.consume('[') - _p_ws(s) - keys = [_p_key(s)] - _p_ws(s) - while s.consume('.'): - _p_ws(s) - keys.append(_p_key(s)) - _p_ws(s) - s.expect(']') - if is_array: - s.expect(']') - return 'table_array' if is_array else 'table', keys, pos - - key = _p_key(s) - _p_ws(s) - s.expect('=') - _p_ws(s) - value = _p_value(s, object_pairs_hook=object_pairs_hook) - return 'kv', (key, value), pos - -_stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*') -def _p_toml(s, object_pairs_hook): - stmts = [] - _p_ews(s) - with s: - stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) - while True: - s.commit() - s.expect_re(_stmtsep_re) - stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) - _p_ews(s) - s.expect_eof() - return stmts diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/test.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/test.py deleted file mode 100644 index ec8abfc..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/test.py +++ /dev/null @@ -1,30 +0,0 @@ -import datetime -from .utils import format_rfc3339 - -try: - _string_types = (str, unicode) - _int_types = (int, long) -except NameError: - _string_types = str - _int_types = int - -def translate_to_test(v): - if isinstance(v, dict): - return { k: translate_to_test(v) for k, v in v.items() } - if isinstance(v, list): - a = [translate_to_test(x) for x in v] - if v and isinstance(v[0], dict): - return a - else: - return {'type': 'array', 'value': a} - if isinstance(v, datetime.datetime): - return {'type': 'datetime', 'value': format_rfc3339(v)} - if isinstance(v, bool): - return {'type': 'bool', 'value': 'true' if v else 'false'} - if isinstance(v, _int_types): - return {'type': 'integer', 'value': str(v)} - if isinstance(v, float): - return {'type': 'float', 'value': '{:.17}'.format(v)} - if isinstance(v, _string_types): - return {'type': 'string', 'value': v} - raise RuntimeError('unexpected value: {!r}'.format(v)) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/utils.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/utils.py deleted file mode 100644 index 636a680..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/utils.py +++ /dev/null @@ -1,67 +0,0 @@ -import datetime -import re - -rfc3339_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') - -def parse_rfc3339(v): - m = rfc3339_re.match(v) - if not m or m.group(0) != v: - return None - return parse_rfc3339_re(m) - -def parse_rfc3339_re(m): - r = map(int, m.groups()[:6]) - if m.group(7): - micro = float(m.group(7)) - else: - micro = 0 - - if m.group(8): - g = int(m.group(8), 10) * 60 + int(m.group(9), 10) - tz = _TimeZone(datetime.timedelta(0, g * 60)) - else: - tz = _TimeZone(datetime.timedelta(0, 0)) - - y, m, d, H, M, S = r - return datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz) - - -def format_rfc3339(v): - offs = v.utcoffset() - offs = int(offs.total_seconds()) // 60 if offs is not None else 0 - - if offs == 0: - suffix = 'Z' - else: - if offs > 0: - suffix = '+' - else: - suffix = '-' - offs = -offs - suffix = '{0}{1:02}:{2:02}'.format(suffix, offs // 60, offs % 60) - - if v.microsecond: - return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix - else: - return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix - -class _TimeZone(datetime.tzinfo): - def __init__(self, offset): - self._offset = offset - - def utcoffset(self, dt): - return self._offset - - def dst(self, dt): - return None - - def tzname(self, dt): - m = self._offset.total_seconds() // 60 - if m < 0: - res = '-' - m = -m - else: - res = '+' - h = m // 60 - m = m - h * 60 - return '{}{:.02}{:.02}'.format(res, h, m) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/writer.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/writer.py deleted file mode 100644 index 73b5089..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pytoml/writer.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import unicode_literals -import io, datetime, math, string, sys - -from .utils import format_rfc3339 - -if sys.version_info[0] == 3: - long = int - unicode = str - - -def dumps(obj, sort_keys=False): - fout = io.StringIO() - dump(obj, fout, sort_keys=sort_keys) - return fout.getvalue() - - -_escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'} - - -def _escape_string(s): - res = [] - start = 0 - - def flush(): - if start != i: - res.append(s[start:i]) - return i + 1 - - i = 0 - while i < len(s): - c = s[i] - if c in '"\\\n\r\t\b\f': - start = flush() - res.append('\\' + _escapes[c]) - elif ord(c) < 0x20: - start = flush() - res.append('\\u%04x' % ord(c)) - i += 1 - - flush() - return '"' + ''.join(res) + '"' - - -_key_chars = string.digits + string.ascii_letters + '-_' -def _escape_id(s): - if any(c not in _key_chars for c in s): - return _escape_string(s) - return s - - -def _format_value(v): - if isinstance(v, bool): - return 'true' if v else 'false' - if isinstance(v, int) or isinstance(v, long): - return unicode(v) - if isinstance(v, float): - if math.isnan(v) or math.isinf(v): - raise ValueError("{0} is not a valid TOML value".format(v)) - else: - return repr(v) - elif isinstance(v, unicode) or isinstance(v, bytes): - return _escape_string(v) - elif isinstance(v, datetime.datetime): - return format_rfc3339(v) - elif isinstance(v, list): - return '[{0}]'.format(', '.join(_format_value(obj) for obj in v)) - elif isinstance(v, dict): - return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items())) - else: - raise RuntimeError(v) - - -def dump(obj, fout, sort_keys=False): - tables = [((), obj, False)] - - while tables: - name, table, is_array = tables.pop() - if name: - section_name = '.'.join(_escape_id(c) for c in name) - if is_array: - fout.write('[[{0}]]\n'.format(section_name)) - else: - fout.write('[{0}]\n'.format(section_name)) - - table_keys = sorted(table.keys()) if sort_keys else table.keys() - new_tables = [] - has_kv = False - for k in table_keys: - v = table[k] - if isinstance(v, dict): - new_tables.append((name + (k,), v, False)) - elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v): - new_tables.extend((name + (k,), d, True) for d in v) - elif v is None: - # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344 - fout.write( - '#{} = null # To use: uncomment and replace null with value\n'.format(_escape_id(k))) - has_kv = True - else: - fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v))) - has_kv = True - - tables.extend(reversed(new_tables)) - - if (name or has_kv) and tables: - fout.write('\n') diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/__init__.py deleted file mode 100644 index 80c4ce1..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- - -# __ -# /__) _ _ _ _ _/ _ -# / ( (- (/ (/ (- _) / _) -# / - -""" -Requests HTTP Library -~~~~~~~~~~~~~~~~~~~~~ - -Requests is an HTTP library, written in Python, for human beings. Basic GET -usage: - - >>> import requests - >>> r = requests.get('https://www.python.org') - >>> r.status_code - 200 - >>> 'Python is a programming language' in r.content - True - -... or POST: - - >>> payload = dict(key1='value1', key2='value2') - >>> r = requests.post('https://httpbin.org/post', data=payload) - >>> print(r.text) - { - ... - "form": { - "key2": "value2", - "key1": "value1" - }, - ... - } - -The other HTTP methods are supported - see `requests.api`. Full documentation -is at <http://python-requests.org>. - -:copyright: (c) 2017 by Kenneth Reitz. -:license: Apache 2.0, see LICENSE for more details. -""" - -from pip._vendor import urllib3 -from pip._vendor import chardet -import warnings -from .exceptions import RequestsDependencyWarning - - -def check_compatibility(urllib3_version, chardet_version): - urllib3_version = urllib3_version.split('.') - assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. - - # Sometimes, urllib3 only reports its version as 16.1. - if len(urllib3_version) == 2: - urllib3_version.append('0') - - # Check urllib3 for compatibility. - major, minor, patch = urllib3_version # noqa: F811 - major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1, <= 1.24 - assert major == 1 - assert minor >= 21 - assert minor <= 24 - - # Check chardet for compatibility. - major, minor, patch = chardet_version.split('.')[:3] - major, minor, patch = int(major), int(minor), int(patch) - # chardet >= 3.0.2, < 3.1.0 - assert major == 3 - assert minor < 1 - assert patch >= 2 - - -def _check_cryptography(cryptography_version): - # cryptography < 1.3.4 - try: - cryptography_version = list(map(int, cryptography_version.split('.'))) - except ValueError: - return - - if cryptography_version < [1, 3, 4]: - warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) - warnings.warn(warning, RequestsDependencyWarning) - -# Check imported dependencies for compatibility. -try: - check_compatibility(urllib3.__version__, chardet.__version__) -except (AssertionError, ValueError): - warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " - "version!".format(urllib3.__version__, chardet.__version__), - RequestsDependencyWarning) - -# Attempt to enable urllib3's SNI support, if possible -from pip._internal.utils.compat import WINDOWS -if not WINDOWS: - try: - from pip._vendor.urllib3.contrib import pyopenssl - pyopenssl.inject_into_urllib3() - - # Check cryptography version - from cryptography import __version__ as cryptography_version - _check_cryptography(cryptography_version) - except ImportError: - pass - -# urllib3's DependencyWarnings should be silenced. -from pip._vendor.urllib3.exceptions import DependencyWarning -warnings.simplefilter('ignore', DependencyWarning) - -from .__version__ import __title__, __description__, __url__, __version__ -from .__version__ import __build__, __author__, __author_email__, __license__ -from .__version__ import __copyright__, __cake__ - -from . import utils -from . import packages -from .models import Request, Response, PreparedRequest -from .api import request, get, head, post, patch, put, delete, options -from .sessions import session, Session -from .status_codes import codes -from .exceptions import ( - RequestException, Timeout, URLRequired, - TooManyRedirects, HTTPError, ConnectionError, - FileModeWarning, ConnectTimeout, ReadTimeout -) - -# Set default logging handler to avoid "No handler found" warnings. -import logging -from logging import NullHandler - -logging.getLogger(__name__).addHandler(NullHandler()) - -# FileModeWarnings go off per the default. -warnings.simplefilter('default', FileModeWarning, append=True) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/__version__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/__version__.py deleted file mode 100644 index f5b5d03..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/__version__.py +++ /dev/null @@ -1,14 +0,0 @@ -# .-. .-. .-. . . .-. .-. .-. .-. -# |( |- |.| | | |- `-. | `-. -# ' ' `-' `-`.`-' `-' `-' ' `-' - -__title__ = 'requests' -__description__ = 'Python HTTP for Humans.' -__url__ = 'http://python-requests.org' -__version__ = '2.21.0' -__build__ = 0x022100 -__author__ = 'Kenneth Reitz' -__author_email__ = 'me@kennethreitz.org' -__license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2018 Kenneth Reitz' -__cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/_internal_utils.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/_internal_utils.py deleted file mode 100644 index 759d9a5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/_internal_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests._internal_utils -~~~~~~~~~~~~~~ - -Provides utility functions that are consumed internally by Requests -which depend on extremely few external helpers (such as compat) -""" - -from .compat import is_py2, builtin_str, str - - -def to_native_string(string, encoding='ascii'): - """Given a string object, regardless of type, returns a representation of - that string in the native string type, encoding and decoding where - necessary. This assumes ASCII unless told otherwise. - """ - if isinstance(string, builtin_str): - out = string - else: - if is_py2: - out = string.encode(encoding) - else: - out = string.decode(encoding) - - return out - - -def unicode_is_ascii(u_string): - """Determine if unicode string only contains ASCII characters. - - :param str u_string: unicode string to check. Must be unicode - and not Python 2 `str`. - :rtype: bool - """ - assert isinstance(u_string, str) - try: - u_string.encode('ascii') - return True - except UnicodeEncodeError: - return False diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/adapters.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/adapters.py deleted file mode 100644 index c30e7c9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/adapters.py +++ /dev/null @@ -1,533 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.adapters -~~~~~~~~~~~~~~~~~ - -This module contains the transport adapters that Requests uses to define -and maintain connections. -""" - -import os.path -import socket - -from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url -from pip._vendor.urllib3.response import HTTPResponse -from pip._vendor.urllib3.util import parse_url -from pip._vendor.urllib3.util import Timeout as TimeoutSauce -from pip._vendor.urllib3.util.retry import Retry -from pip._vendor.urllib3.exceptions import ClosedPoolError -from pip._vendor.urllib3.exceptions import ConnectTimeoutError -from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError -from pip._vendor.urllib3.exceptions import MaxRetryError -from pip._vendor.urllib3.exceptions import NewConnectionError -from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError -from pip._vendor.urllib3.exceptions import ProtocolError -from pip._vendor.urllib3.exceptions import ReadTimeoutError -from pip._vendor.urllib3.exceptions import SSLError as _SSLError -from pip._vendor.urllib3.exceptions import ResponseError -from pip._vendor.urllib3.exceptions import LocationValueError - -from .models import Response -from .compat import urlparse, basestring -from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, - get_encoding_from_headers, prepend_scheme_if_needed, - get_auth_from_url, urldefragauth, select_proxy) -from .structures import CaseInsensitiveDict -from .cookies import extract_cookies_to_jar -from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, - ProxyError, RetryError, InvalidSchema, InvalidProxyURL, - InvalidURL) -from .auth import _basic_auth_str - -try: - from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager -except ImportError: - def SOCKSProxyManager(*args, **kwargs): - raise InvalidSchema("Missing dependencies for SOCKS support.") - -DEFAULT_POOLBLOCK = False -DEFAULT_POOLSIZE = 10 -DEFAULT_RETRIES = 0 -DEFAULT_POOL_TIMEOUT = None - - -class BaseAdapter(object): - """The Base Transport Adapter""" - - def __init__(self): - super(BaseAdapter, self).__init__() - - def send(self, request, stream=False, timeout=None, verify=True, - cert=None, proxies=None): - """Sends PreparedRequest object. Returns Response object. - - :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. - :param stream: (optional) Whether to stream the request content. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) <timeouts>` tuple. - :type timeout: float or tuple - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use - :param cert: (optional) Any user-provided SSL certificate to be trusted. - :param proxies: (optional) The proxies dictionary to apply to the request. - """ - raise NotImplementedError - - def close(self): - """Cleans up adapter specific items.""" - raise NotImplementedError - - -class HTTPAdapter(BaseAdapter): - """The built-in HTTP Adapter for urllib3. - - Provides a general-case interface for Requests sessions to contact HTTP and - HTTPS urls by implementing the Transport Adapter interface. This class will - usually be created by the :class:`Session <Session>` class under the - covers. - - :param pool_connections: The number of urllib3 connection pools to cache. - :param pool_maxsize: The maximum number of connections to save in the pool. - :param max_retries: The maximum number of retries each connection - should attempt. Note, this applies only to failed DNS lookups, socket - connections and connection timeouts, never to requests where data has - made it to the server. By default, Requests does not retry failed - connections. If you need granular control over the conditions under - which we retry a request, import urllib3's ``Retry`` class and pass - that instead. - :param pool_block: Whether the connection pool should block for connections. - - Usage:: - - >>> import requests - >>> s = requests.Session() - >>> a = requests.adapters.HTTPAdapter(max_retries=3) - >>> s.mount('http://', a) - """ - __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', - '_pool_block'] - - def __init__(self, pool_connections=DEFAULT_POOLSIZE, - pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, - pool_block=DEFAULT_POOLBLOCK): - if max_retries == DEFAULT_RETRIES: - self.max_retries = Retry(0, read=False) - else: - self.max_retries = Retry.from_int(max_retries) - self.config = {} - self.proxy_manager = {} - - super(HTTPAdapter, self).__init__() - - self._pool_connections = pool_connections - self._pool_maxsize = pool_maxsize - self._pool_block = pool_block - - self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) - - def __getstate__(self): - return {attr: getattr(self, attr, None) for attr in self.__attrs__} - - def __setstate__(self, state): - # Can't handle by adding 'proxy_manager' to self.__attrs__ because - # self.poolmanager uses a lambda function, which isn't pickleable. - self.proxy_manager = {} - self.config = {} - - for attr, value in state.items(): - setattr(self, attr, value) - - self.init_poolmanager(self._pool_connections, self._pool_maxsize, - block=self._pool_block) - - def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): - """Initializes a urllib3 PoolManager. - - This method should not be called from user code, and is only - exposed for use when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - :param connections: The number of urllib3 connection pools to cache. - :param maxsize: The maximum number of connections to save in the pool. - :param block: Block when no free connections are available. - :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. - """ - # save these values for pickling - self._pool_connections = connections - self._pool_maxsize = maxsize - self._pool_block = block - - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, **pool_kwargs) - - def proxy_manager_for(self, proxy, **proxy_kwargs): - """Return urllib3 ProxyManager for the given proxy. - - This method should not be called from user code, and is only - exposed for use when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - :param proxy: The proxy to return a urllib3 ProxyManager for. - :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. - :returns: ProxyManager - :rtype: urllib3.ProxyManager - """ - if proxy in self.proxy_manager: - manager = self.proxy_manager[proxy] - elif proxy.lower().startswith('socks'): - username, password = get_auth_from_url(proxy) - manager = self.proxy_manager[proxy] = SOCKSProxyManager( - proxy, - username=username, - password=password, - num_pools=self._pool_connections, - maxsize=self._pool_maxsize, - block=self._pool_block, - **proxy_kwargs - ) - else: - proxy_headers = self.proxy_headers(proxy) - manager = self.proxy_manager[proxy] = proxy_from_url( - proxy, - proxy_headers=proxy_headers, - num_pools=self._pool_connections, - maxsize=self._pool_maxsize, - block=self._pool_block, - **proxy_kwargs) - - return manager - - def cert_verify(self, conn, url, verify, cert): - """Verify a SSL certificate. This method should not be called from user - code, and is only exposed for use when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - :param conn: The urllib3 connection object associated with the cert. - :param url: The requested URL. - :param verify: Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use - :param cert: The SSL certificate to verify. - """ - if url.lower().startswith('https') and verify: - - cert_loc = None - - # Allow self-specified cert location. - if verify is not True: - cert_loc = verify - - if not cert_loc: - cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) - - if not cert_loc or not os.path.exists(cert_loc): - raise IOError("Could not find a suitable TLS CA certificate bundle, " - "invalid path: {}".format(cert_loc)) - - conn.cert_reqs = 'CERT_REQUIRED' - - if not os.path.isdir(cert_loc): - conn.ca_certs = cert_loc - else: - conn.ca_cert_dir = cert_loc - else: - conn.cert_reqs = 'CERT_NONE' - conn.ca_certs = None - conn.ca_cert_dir = None - - if cert: - if not isinstance(cert, basestring): - conn.cert_file = cert[0] - conn.key_file = cert[1] - else: - conn.cert_file = cert - conn.key_file = None - if conn.cert_file and not os.path.exists(conn.cert_file): - raise IOError("Could not find the TLS certificate file, " - "invalid path: {}".format(conn.cert_file)) - if conn.key_file and not os.path.exists(conn.key_file): - raise IOError("Could not find the TLS key file, " - "invalid path: {}".format(conn.key_file)) - - def build_response(self, req, resp): - """Builds a :class:`Response <requests.Response>` object from a urllib3 - response. This should not be called from user code, and is only exposed - for use when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` - - :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. - :param resp: The urllib3 response object. - :rtype: requests.Response - """ - response = Response() - - # Fallback to None if there's no status_code, for whatever reason. - response.status_code = getattr(resp, 'status', None) - - # Make headers case-insensitive. - response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) - - # Set encoding. - response.encoding = get_encoding_from_headers(response.headers) - response.raw = resp - response.reason = response.raw.reason - - if isinstance(req.url, bytes): - response.url = req.url.decode('utf-8') - else: - response.url = req.url - - # Add new cookies from the server. - extract_cookies_to_jar(response.cookies, req, resp) - - # Give the Response some context. - response.request = req - response.connection = self - - return response - - def get_connection(self, url, proxies=None): - """Returns a urllib3 connection for the given URL. This should not be - called from user code, and is only exposed for use when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - :param url: The URL to connect to. - :param proxies: (optional) A Requests-style dictionary of proxies used on this request. - :rtype: urllib3.ConnectionPool - """ - proxy = select_proxy(url, proxies) - - if proxy: - proxy = prepend_scheme_if_needed(proxy, 'http') - proxy_url = parse_url(proxy) - if not proxy_url.host: - raise InvalidProxyURL("Please check proxy URL. It is malformed" - " and could be missing the host.") - proxy_manager = self.proxy_manager_for(proxy) - conn = proxy_manager.connection_from_url(url) - else: - # Only scheme should be lower case - parsed = urlparse(url) - url = parsed.geturl() - conn = self.poolmanager.connection_from_url(url) - - return conn - - def close(self): - """Disposes of any internal state. - - Currently, this closes the PoolManager and any active ProxyManager, - which closes any pooled connections. - """ - self.poolmanager.clear() - for proxy in self.proxy_manager.values(): - proxy.clear() - - def request_url(self, request, proxies): - """Obtain the url to use when making the final request. - - If the message is being sent through a HTTP proxy, the full URL has to - be used. Otherwise, we should only use the path portion of the URL. - - This should not be called from user code, and is only exposed for use - when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. - :rtype: str - """ - proxy = select_proxy(request.url, proxies) - scheme = urlparse(request.url).scheme - - is_proxied_http_request = (proxy and scheme != 'https') - using_socks_proxy = False - if proxy: - proxy_scheme = urlparse(proxy).scheme.lower() - using_socks_proxy = proxy_scheme.startswith('socks') - - url = request.path_url - if is_proxied_http_request and not using_socks_proxy: - url = urldefragauth(request.url) - - return url - - def add_headers(self, request, **kwargs): - """Add any headers needed by the connection. As of v2.0 this does - nothing by default, but is left for overriding by users that subclass - the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - This should not be called from user code, and is only exposed for use - when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. - :param kwargs: The keyword arguments from the call to send(). - """ - pass - - def proxy_headers(self, proxy): - """Returns a dictionary of the headers to add to any request sent - through a proxy. This works with urllib3 magic to ensure that they are - correctly sent to the proxy, rather than in a tunnelled request if - CONNECT is being used. - - This should not be called from user code, and is only exposed for use - when subclassing the - :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - - :param proxy: The url of the proxy being used for this request. - :rtype: dict - """ - headers = {} - username, password = get_auth_from_url(proxy) - - if username: - headers['Proxy-Authorization'] = _basic_auth_str(username, - password) - - return headers - - def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): - """Sends PreparedRequest object. Returns Response object. - - :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. - :param stream: (optional) Whether to stream the request content. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) <timeouts>` tuple. - :type timeout: float or tuple or urllib3 Timeout object - :param verify: (optional) Either a boolean, in which case it controls whether - we verify the server's TLS certificate, or a string, in which case it - must be a path to a CA bundle to use - :param cert: (optional) Any user-provided SSL certificate to be trusted. - :param proxies: (optional) The proxies dictionary to apply to the request. - :rtype: requests.Response - """ - - try: - conn = self.get_connection(request.url, proxies) - except LocationValueError as e: - raise InvalidURL(e, request=request) - - self.cert_verify(conn, request.url, verify, cert) - url = self.request_url(request, proxies) - self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) - - chunked = not (request.body is None or 'Content-Length' in request.headers) - - if isinstance(timeout, tuple): - try: - connect, read = timeout - timeout = TimeoutSauce(connect=connect, read=read) - except ValueError as e: - # this may raise a string formatting error. - err = ("Invalid timeout {}. Pass a (connect, read) " - "timeout tuple, or a single float to set " - "both timeouts to the same value".format(timeout)) - raise ValueError(err) - elif isinstance(timeout, TimeoutSauce): - pass - else: - timeout = TimeoutSauce(connect=timeout, read=timeout) - - try: - if not chunked: - resp = conn.urlopen( - method=request.method, - url=url, - body=request.body, - headers=request.headers, - redirect=False, - assert_same_host=False, - preload_content=False, - decode_content=False, - retries=self.max_retries, - timeout=timeout - ) - - # Send the request. - else: - if hasattr(conn, 'proxy_pool'): - conn = conn.proxy_pool - - low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) - - try: - low_conn.putrequest(request.method, - url, - skip_accept_encoding=True) - - for header, value in request.headers.items(): - low_conn.putheader(header, value) - - low_conn.endheaders() - - for i in request.body: - low_conn.send(hex(len(i))[2:].encode('utf-8')) - low_conn.send(b'\r\n') - low_conn.send(i) - low_conn.send(b'\r\n') - low_conn.send(b'0\r\n\r\n') - - # Receive the response from the server - try: - # For Python 2.7, use buffering of HTTP responses - r = low_conn.getresponse(buffering=True) - except TypeError: - # For compatibility with Python 3.3+ - r = low_conn.getresponse() - - resp = HTTPResponse.from_httplib( - r, - pool=conn, - connection=low_conn, - preload_content=False, - decode_content=False - ) - except: - # If we hit any problems here, clean up the connection. - # Then, reraise so that we can handle the actual exception. - low_conn.close() - raise - - except (ProtocolError, socket.error) as err: - raise ConnectionError(err, request=request) - - except MaxRetryError as e: - if isinstance(e.reason, ConnectTimeoutError): - # TODO: Remove this in 3.0.0: see #2811 - if not isinstance(e.reason, NewConnectionError): - raise ConnectTimeout(e, request=request) - - if isinstance(e.reason, ResponseError): - raise RetryError(e, request=request) - - if isinstance(e.reason, _ProxyError): - raise ProxyError(e, request=request) - - if isinstance(e.reason, _SSLError): - # This branch is for urllib3 v1.22 and later. - raise SSLError(e, request=request) - - raise ConnectionError(e, request=request) - - except ClosedPoolError as e: - raise ConnectionError(e, request=request) - - except _ProxyError as e: - raise ProxyError(e) - - except (_SSLError, _HTTPError) as e: - if isinstance(e, _SSLError): - # This branch is for urllib3 versions earlier than v1.22 - raise SSLError(e, request=request) - elif isinstance(e, ReadTimeoutError): - raise ReadTimeout(e, request=request) - else: - raise - - return self.build_response(request, resp) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/api.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/api.py deleted file mode 100644 index abada96..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/api.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.api -~~~~~~~~~~~~ - -This module implements the Requests API. - -:copyright: (c) 2012 by Kenneth Reitz. -:license: Apache2, see LICENSE for more details. -""" - -from . import sessions - - -def request(method, url, **kwargs): - """Constructs and sends a :class:`Request <Request>`. - - :param method: method for the new :class:`Request` object. - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary, list of tuples or bytes to send - in the body of the :class:`Request`. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. - :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. - :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. - :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. - ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` - or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string - defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers - to add for the file. - :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. - :param timeout: (optional) How many seconds to wait for the server to send data - before giving up, as a float, or a :ref:`(connect timeout, read - timeout) <timeouts>` tuple. - :type timeout: float or tuple - :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. - :type allow_redirects: bool - :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use. Defaults to ``True``. - :param stream: (optional) if ``False``, the response content will be immediately downloaded. - :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. - :return: :class:`Response <Response>` object - :rtype: requests.Response - - Usage:: - - >>> import requests - >>> req = requests.request('GET', 'https://httpbin.org/get') - <Response [200]> - """ - - # By using the 'with' statement we are sure the session is closed, thus we - # avoid leaving sockets open which can trigger a ResourceWarning in some - # cases, and look like a memory leak in others. - with sessions.Session() as session: - return session.request(method=method, url=url, **kwargs) - - -def get(url, params=None, **kwargs): - r"""Sends a GET request. - - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary, list of tuples or bytes to send - in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response <Response>` object - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return request('get', url, params=params, **kwargs) - - -def options(url, **kwargs): - r"""Sends an OPTIONS request. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response <Response>` object - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return request('options', url, **kwargs) - - -def head(url, **kwargs): - r"""Sends a HEAD request. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response <Response>` object - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', False) - return request('head', url, **kwargs) - - -def post(url, data=None, json=None, **kwargs): - r"""Sends a POST request. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response <Response>` object - :rtype: requests.Response - """ - - return request('post', url, data=data, json=json, **kwargs) - - -def put(url, data=None, **kwargs): - r"""Sends a PUT request. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response <Response>` object - :rtype: requests.Response - """ - - return request('put', url, data=data, **kwargs) - - -def patch(url, data=None, **kwargs): - r"""Sends a PATCH request. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response <Response>` object - :rtype: requests.Response - """ - - return request('patch', url, data=data, **kwargs) - - -def delete(url, **kwargs): - r"""Sends a DELETE request. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response <Response>` object - :rtype: requests.Response - """ - - return request('delete', url, **kwargs) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/auth.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/auth.py deleted file mode 100644 index bdde51c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/auth.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.auth -~~~~~~~~~~~~~ - -This module contains the authentication handlers for Requests. -""" - -import os -import re -import time -import hashlib -import threading -import warnings - -from base64 import b64encode - -from .compat import urlparse, str, basestring -from .cookies import extract_cookies_to_jar -from ._internal_utils import to_native_string -from .utils import parse_dict_header - -CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' -CONTENT_TYPE_MULTI_PART = 'multipart/form-data' - - -def _basic_auth_str(username, password): - """Returns a Basic Auth string.""" - - # "I want us to put a big-ol' comment on top of it that - # says that this behaviour is dumb but we need to preserve - # it because people are relying on it." - # - Lukasa - # - # These are here solely to maintain backwards compatibility - # for things like ints. This will be removed in 3.0.0. - if not isinstance(username, basestring): - warnings.warn( - "Non-string usernames will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({!r}) to " - "a string or bytes object in the near future to avoid " - "problems.".format(username), - category=DeprecationWarning, - ) - username = str(username) - - if not isinstance(password, basestring): - warnings.warn( - "Non-string passwords will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({!r}) to " - "a string or bytes object in the near future to avoid " - "problems.".format(password), - category=DeprecationWarning, - ) - password = str(password) - # -- End Removal -- - - if isinstance(username, str): - username = username.encode('latin1') - - if isinstance(password, str): - password = password.encode('latin1') - - authstr = 'Basic ' + to_native_string( - b64encode(b':'.join((username, password))).strip() - ) - - return authstr - - -class AuthBase(object): - """Base class that all auth implementations derive from""" - - def __call__(self, r): - raise NotImplementedError('Auth hooks must be callable.') - - -class HTTPBasicAuth(AuthBase): - """Attaches HTTP Basic Authentication to the given Request object.""" - - def __init__(self, username, password): - self.username = username - self.password = password - - def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) - - def __ne__(self, other): - return not self == other - - def __call__(self, r): - r.headers['Authorization'] = _basic_auth_str(self.username, self.password) - return r - - -class HTTPProxyAuth(HTTPBasicAuth): - """Attaches HTTP Proxy Authentication to a given Request object.""" - - def __call__(self, r): - r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) - return r - - -class HTTPDigestAuth(AuthBase): - """Attaches HTTP Digest Authentication to the given Request object.""" - - def __init__(self, username, password): - self.username = username - self.password = password - # Keep state in per-thread local storage - self._thread_local = threading.local() - - def init_per_thread_state(self): - # Ensure state is initialized just once per-thread - if not hasattr(self._thread_local, 'init'): - self._thread_local.init = True - self._thread_local.last_nonce = '' - self._thread_local.nonce_count = 0 - self._thread_local.chal = {} - self._thread_local.pos = None - self._thread_local.num_401_calls = None - - def build_digest_header(self, method, url): - """ - :rtype: str - """ - - realm = self._thread_local.chal['realm'] - nonce = self._thread_local.chal['nonce'] - qop = self._thread_local.chal.get('qop') - algorithm = self._thread_local.chal.get('algorithm') - opaque = self._thread_local.chal.get('opaque') - hash_utf8 = None - - if algorithm is None: - _algorithm = 'MD5' - else: - _algorithm = algorithm.upper() - # lambdas assume digest modules are imported at the top level - if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': - def md5_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.md5(x).hexdigest() - hash_utf8 = md5_utf8 - elif _algorithm == 'SHA': - def sha_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha1(x).hexdigest() - hash_utf8 = sha_utf8 - elif _algorithm == 'SHA-256': - def sha256_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha256(x).hexdigest() - hash_utf8 = sha256_utf8 - elif _algorithm == 'SHA-512': - def sha512_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha512(x).hexdigest() - hash_utf8 = sha512_utf8 - - KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) - - if hash_utf8 is None: - return None - - # XXX not implemented yet - entdig = None - p_parsed = urlparse(url) - #: path is request-uri defined in RFC 2616 which should not be empty - path = p_parsed.path or "/" - if p_parsed.query: - path += '?' + p_parsed.query - - A1 = '%s:%s:%s' % (self.username, realm, self.password) - A2 = '%s:%s' % (method, path) - - HA1 = hash_utf8(A1) - HA2 = hash_utf8(A2) - - if nonce == self._thread_local.last_nonce: - self._thread_local.nonce_count += 1 - else: - self._thread_local.nonce_count = 1 - ncvalue = '%08x' % self._thread_local.nonce_count - s = str(self._thread_local.nonce_count).encode('utf-8') - s += nonce.encode('utf-8') - s += time.ctime().encode('utf-8') - s += os.urandom(8) - - cnonce = (hashlib.sha1(s).hexdigest()[:16]) - if _algorithm == 'MD5-SESS': - HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) - - if not qop: - respdig = KD(HA1, "%s:%s" % (nonce, HA2)) - elif qop == 'auth' or 'auth' in qop.split(','): - noncebit = "%s:%s:%s:%s:%s" % ( - nonce, ncvalue, cnonce, 'auth', HA2 - ) - respdig = KD(HA1, noncebit) - else: - # XXX handle auth-int. - return None - - self._thread_local.last_nonce = nonce - - # XXX should the partial digests be encoded too? - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (self.username, realm, nonce, path, respdig) - if opaque: - base += ', opaque="%s"' % opaque - if algorithm: - base += ', algorithm="%s"' % algorithm - if entdig: - base += ', digest="%s"' % entdig - if qop: - base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) - - return 'Digest %s' % (base) - - def handle_redirect(self, r, **kwargs): - """Reset num_401_calls counter on redirects.""" - if r.is_redirect: - self._thread_local.num_401_calls = 1 - - def handle_401(self, r, **kwargs): - """ - Takes the given response and tries digest-auth, if needed. - - :rtype: requests.Response - """ - - # If response is not 4xx, do not auth - # See https://github.com/requests/requests/issues/3772 - if not 400 <= r.status_code < 500: - self._thread_local.num_401_calls = 1 - return r - - if self._thread_local.pos is not None: - # Rewind the file position indicator of the body to where - # it was to resend the request. - r.request.body.seek(self._thread_local.pos) - s_auth = r.headers.get('www-authenticate', '') - - if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: - - self._thread_local.num_401_calls += 1 - pat = re.compile(r'digest ', flags=re.IGNORECASE) - self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) - - # Consume content and release the original connection - # to allow our new request to reuse the same one. - r.content - r.close() - prep = r.request.copy() - extract_cookies_to_jar(prep._cookies, r.request, r.raw) - prep.prepare_cookies(prep._cookies) - - prep.headers['Authorization'] = self.build_digest_header( - prep.method, prep.url) - _r = r.connection.send(prep, **kwargs) - _r.history.append(r) - _r.request = prep - - return _r - - self._thread_local.num_401_calls = 1 - return r - - def __call__(self, r): - # Initialize per-thread state, if needed - self.init_per_thread_state() - # If we have a saved nonce, skip the 401 - if self._thread_local.last_nonce: - r.headers['Authorization'] = self.build_digest_header(r.method, r.url) - try: - self._thread_local.pos = r.body.tell() - except AttributeError: - # In the case of HTTPDigestAuth being reused and the body of - # the previous request was a file-like object, pos has the - # file position of the previous body. Ensure it's set to - # None. - self._thread_local.pos = None - r.register_hook('response', self.handle_401) - r.register_hook('response', self.handle_redirect) - self._thread_local.num_401_calls = 1 - - return r - - def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) - - def __ne__(self, other): - return not self == other diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/certs.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/certs.py deleted file mode 100644 index 06a594e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/certs.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -requests.certs -~~~~~~~~~~~~~~ - -This module returns the preferred default CA certificate bundle. There is -only one — the one from the certifi package. - -If you are packaging Requests, e.g., for a Linux distribution or a managed -environment, you can change the definition of where() to return a separately -packaged CA bundle. -""" -from pip._vendor.certifi import where - -if __name__ == '__main__': - print(where()) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/compat.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/compat.py deleted file mode 100644 index 6a86893..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/compat.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.compat -~~~~~~~~~~~~~~~ - -This module handles import compatibility issues between Python 2 and -Python 3. -""" - -from pip._vendor import chardet - -import sys - -# ------- -# Pythons -# ------- - -# Syntax sugar. -_ver = sys.version_info - -#: Python 2.x? -is_py2 = (_ver[0] == 2) - -#: Python 3.x? -is_py3 = (_ver[0] == 3) - -# Note: We've patched out simplejson support in pip because it prevents -# upgrading simplejson on Windows. -# try: -# import simplejson as json -# except (ImportError, SyntaxError): -# # simplejson does not support Python 3.2, it throws a SyntaxError -# # because of u'...' Unicode literals. -import json - -# --------- -# Specifics -# --------- - -if is_py2: - from urllib import ( - quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, - proxy_bypass, proxy_bypass_environment, getproxies_environment) - from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag - from urllib2 import parse_http_list - import cookielib - from Cookie import Morsel - from StringIO import StringIO - from collections import Callable, Mapping, MutableMapping, OrderedDict - - - builtin_str = str - bytes = str - str = unicode - basestring = basestring - numeric_types = (int, long, float) - integer_types = (int, long) - -elif is_py3: - from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag - from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment - from http import cookiejar as cookielib - from http.cookies import Morsel - from io import StringIO - from collections import OrderedDict - from collections.abc import Callable, Mapping, MutableMapping - - builtin_str = str - str = str - bytes = bytes - basestring = (str, bytes) - numeric_types = (int, float) - integer_types = (int,) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/cookies.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/cookies.py deleted file mode 100644 index 56fccd9..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/cookies.py +++ /dev/null @@ -1,549 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.cookies -~~~~~~~~~~~~~~~~ - -Compatibility code to be able to use `cookielib.CookieJar` with requests. - -requests.utils imports from here, so be careful with imports. -""" - -import copy -import time -import calendar - -from ._internal_utils import to_native_string -from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping - -try: - import threading -except ImportError: - import dummy_threading as threading - - -class MockRequest(object): - """Wraps a `requests.Request` to mimic a `urllib2.Request`. - - The code in `cookielib.CookieJar` expects this interface in order to correctly - manage cookie policies, i.e., determine whether a cookie can be set, given the - domains of the request and the cookie. - - The original request object is read-only. The client is responsible for collecting - the new headers via `get_new_headers()` and interpreting them appropriately. You - probably want `get_cookie_header`, defined below. - """ - - def __init__(self, request): - self._r = request - self._new_headers = {} - self.type = urlparse(self._r.url).scheme - - def get_type(self): - return self.type - - def get_host(self): - return urlparse(self._r.url).netloc - - def get_origin_req_host(self): - return self.get_host() - - def get_full_url(self): - # Only return the response's URL if the user hadn't set the Host - # header - if not self._r.headers.get('Host'): - return self._r.url - # If they did set it, retrieve it and reconstruct the expected domain - host = to_native_string(self._r.headers['Host'], encoding='utf-8') - parsed = urlparse(self._r.url) - # Reconstruct the URL as we expect it - return urlunparse([ - parsed.scheme, host, parsed.path, parsed.params, parsed.query, - parsed.fragment - ]) - - def is_unverifiable(self): - return True - - def has_header(self, name): - return name in self._r.headers or name in self._new_headers - - def get_header(self, name, default=None): - return self._r.headers.get(name, self._new_headers.get(name, default)) - - def add_header(self, key, val): - """cookielib has no legitimate use for this method; add it back if you find one.""" - raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") - - def add_unredirected_header(self, name, value): - self._new_headers[name] = value - - def get_new_headers(self): - return self._new_headers - - @property - def unverifiable(self): - return self.is_unverifiable() - - @property - def origin_req_host(self): - return self.get_origin_req_host() - - @property - def host(self): - return self.get_host() - - -class MockResponse(object): - """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. - - ...what? Basically, expose the parsed HTTP headers from the server response - the way `cookielib` expects to see them. - """ - - def __init__(self, headers): - """Make a MockResponse for `cookielib` to read. - - :param headers: a httplib.HTTPMessage or analogous carrying the headers - """ - self._headers = headers - - def info(self): - return self._headers - - def getheaders(self, name): - self._headers.getheaders(name) - - -def extract_cookies_to_jar(jar, request, response): - """Extract the cookies from the response into a CookieJar. - - :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) - :param request: our own requests.Request object - :param response: urllib3.HTTPResponse object - """ - if not (hasattr(response, '_original_response') and - response._original_response): - return - # the _original_response field is the wrapped httplib.HTTPResponse object, - req = MockRequest(request) - # pull out the HTTPMessage with the headers and put it in the mock: - res = MockResponse(response._original_response.msg) - jar.extract_cookies(res, req) - - -def get_cookie_header(jar, request): - """ - Produce an appropriate Cookie header string to be sent with `request`, or None. - - :rtype: str - """ - r = MockRequest(request) - jar.add_cookie_header(r) - return r.get_new_headers().get('Cookie') - - -def remove_cookie_by_name(cookiejar, name, domain=None, path=None): - """Unsets a cookie by name, by default over all domains and paths. - - Wraps CookieJar.clear(), is O(n). - """ - clearables = [] - for cookie in cookiejar: - if cookie.name != name: - continue - if domain is not None and domain != cookie.domain: - continue - if path is not None and path != cookie.path: - continue - clearables.append((cookie.domain, cookie.path, cookie.name)) - - for domain, path, name in clearables: - cookiejar.clear(domain, path, name) - - -class CookieConflictError(RuntimeError): - """There are two cookies that meet the criteria specified in the cookie jar. - Use .get and .set and include domain and path args in order to be more specific. - """ - - -class RequestsCookieJar(cookielib.CookieJar, MutableMapping): - """Compatibility class; is a cookielib.CookieJar, but exposes a dict - interface. - - This is the CookieJar we create by default for requests and sessions that - don't specify one, since some clients may expect response.cookies and - session.cookies to support dict operations. - - Requests does not use the dict interface internally; it's just for - compatibility with external client code. All requests code should work - out of the box with externally provided instances of ``CookieJar``, e.g. - ``LWPCookieJar`` and ``FileCookieJar``. - - Unlike a regular CookieJar, this class is pickleable. - - .. warning:: dictionary operations that are normally O(1) may be O(n). - """ - - def get(self, name, default=None, domain=None, path=None): - """Dict-like get() that also supports optional domain and path args in - order to resolve naming collisions from using one cookie jar over - multiple domains. - - .. warning:: operation is O(n), not O(1). - """ - try: - return self._find_no_duplicates(name, domain, path) - except KeyError: - return default - - def set(self, name, value, **kwargs): - """Dict-like set() that also supports optional domain and path args in - order to resolve naming collisions from using one cookie jar over - multiple domains. - """ - # support client code that unsets cookies by assignment of a None value: - if value is None: - remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) - return - - if isinstance(value, Morsel): - c = morsel_to_cookie(value) - else: - c = create_cookie(name, value, **kwargs) - self.set_cookie(c) - return c - - def iterkeys(self): - """Dict-like iterkeys() that returns an iterator of names of cookies - from the jar. - - .. seealso:: itervalues() and iteritems(). - """ - for cookie in iter(self): - yield cookie.name - - def keys(self): - """Dict-like keys() that returns a list of names of cookies from the - jar. - - .. seealso:: values() and items(). - """ - return list(self.iterkeys()) - - def itervalues(self): - """Dict-like itervalues() that returns an iterator of values of cookies - from the jar. - - .. seealso:: iterkeys() and iteritems(). - """ - for cookie in iter(self): - yield cookie.value - - def values(self): - """Dict-like values() that returns a list of values of cookies from the - jar. - - .. seealso:: keys() and items(). - """ - return list(self.itervalues()) - - def iteritems(self): - """Dict-like iteritems() that returns an iterator of name-value tuples - from the jar. - - .. seealso:: iterkeys() and itervalues(). - """ - for cookie in iter(self): - yield cookie.name, cookie.value - - def items(self): - """Dict-like items() that returns a list of name-value tuples from the - jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a - vanilla python dict of key value pairs. - - .. seealso:: keys() and values(). - """ - return list(self.iteritems()) - - def list_domains(self): - """Utility method to list all the domains in the jar.""" - domains = [] - for cookie in iter(self): - if cookie.domain not in domains: - domains.append(cookie.domain) - return domains - - def list_paths(self): - """Utility method to list all the paths in the jar.""" - paths = [] - for cookie in iter(self): - if cookie.path not in paths: - paths.append(cookie.path) - return paths - - def multiple_domains(self): - """Returns True if there are multiple domains in the jar. - Returns False otherwise. - - :rtype: bool - """ - domains = [] - for cookie in iter(self): - if cookie.domain is not None and cookie.domain in domains: - return True - domains.append(cookie.domain) - return False # there is only one domain in jar - - def get_dict(self, domain=None, path=None): - """Takes as an argument an optional domain and path and returns a plain - old Python dict of name-value pairs of cookies that meet the - requirements. - - :rtype: dict - """ - dictionary = {} - for cookie in iter(self): - if ( - (domain is None or cookie.domain == domain) and - (path is None or cookie.path == path) - ): - dictionary[cookie.name] = cookie.value - return dictionary - - def __contains__(self, name): - try: - return super(RequestsCookieJar, self).__contains__(name) - except CookieConflictError: - return True - - def __getitem__(self, name): - """Dict-like __getitem__() for compatibility with client code. Throws - exception if there are more than one cookie with name. In that case, - use the more explicit get() method instead. - - .. warning:: operation is O(n), not O(1). - """ - return self._find_no_duplicates(name) - - def __setitem__(self, name, value): - """Dict-like __setitem__ for compatibility with client code. Throws - exception if there is already a cookie of that name in the jar. In that - case, use the more explicit set() method instead. - """ - self.set(name, value) - - def __delitem__(self, name): - """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s - ``remove_cookie_by_name()``. - """ - remove_cookie_by_name(self, name) - - def set_cookie(self, cookie, *args, **kwargs): - if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): - cookie.value = cookie.value.replace('\\"', '') - return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) - - def update(self, other): - """Updates this jar with cookies from another CookieJar or dict-like""" - if isinstance(other, cookielib.CookieJar): - for cookie in other: - self.set_cookie(copy.copy(cookie)) - else: - super(RequestsCookieJar, self).update(other) - - def _find(self, name, domain=None, path=None): - """Requests uses this method internally to get cookie values. - - If there are conflicting cookies, _find arbitrarily chooses one. - See _find_no_duplicates if you want an exception thrown if there are - conflicting cookies. - - :param name: a string containing name of cookie - :param domain: (optional) string containing domain of cookie - :param path: (optional) string containing path of cookie - :return: cookie.value - """ - for cookie in iter(self): - if cookie.name == name: - if domain is None or cookie.domain == domain: - if path is None or cookie.path == path: - return cookie.value - - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) - - def _find_no_duplicates(self, name, domain=None, path=None): - """Both ``__get_item__`` and ``get`` call this function: it's never - used elsewhere in Requests. - - :param name: a string containing name of cookie - :param domain: (optional) string containing domain of cookie - :param path: (optional) string containing path of cookie - :raises KeyError: if cookie is not found - :raises CookieConflictError: if there are multiple cookies - that match name and optionally domain and path - :return: cookie.value - """ - toReturn = None - for cookie in iter(self): - if cookie.name == name: - if domain is None or cookie.domain == domain: - if path is None or cookie.path == path: - if toReturn is not None: # if there are multiple cookies that meet passed in criteria - raise CookieConflictError('There are multiple cookies with name, %r' % (name)) - toReturn = cookie.value # we will eventually return this as long as no cookie conflict - - if toReturn: - return toReturn - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) - - def __getstate__(self): - """Unlike a normal CookieJar, this class is pickleable.""" - state = self.__dict__.copy() - # remove the unpickleable RLock object - state.pop('_cookies_lock') - return state - - def __setstate__(self, state): - """Unlike a normal CookieJar, this class is pickleable.""" - self.__dict__.update(state) - if '_cookies_lock' not in self.__dict__: - self._cookies_lock = threading.RLock() - - def copy(self): - """Return a copy of this RequestsCookieJar.""" - new_cj = RequestsCookieJar() - new_cj.set_policy(self.get_policy()) - new_cj.update(self) - return new_cj - - def get_policy(self): - """Return the CookiePolicy instance used.""" - return self._policy - - -def _copy_cookie_jar(jar): - if jar is None: - return None - - if hasattr(jar, 'copy'): - # We're dealing with an instance of RequestsCookieJar - return jar.copy() - # We're dealing with a generic CookieJar instance - new_jar = copy.copy(jar) - new_jar.clear() - for cookie in jar: - new_jar.set_cookie(copy.copy(cookie)) - return new_jar - - -def create_cookie(name, value, **kwargs): - """Make a cookie from underspecified parameters. - - By default, the pair of `name` and `value` will be set for the domain '' - and sent on every request (this is sometimes called a "supercookie"). - """ - result = { - 'version': 0, - 'name': name, - 'value': value, - 'port': None, - 'domain': '', - 'path': '/', - 'secure': False, - 'expires': None, - 'discard': True, - 'comment': None, - 'comment_url': None, - 'rest': {'HttpOnly': None}, - 'rfc2109': False, - } - - badargs = set(kwargs) - set(result) - if badargs: - err = 'create_cookie() got unexpected keyword arguments: %s' - raise TypeError(err % list(badargs)) - - result.update(kwargs) - result['port_specified'] = bool(result['port']) - result['domain_specified'] = bool(result['domain']) - result['domain_initial_dot'] = result['domain'].startswith('.') - result['path_specified'] = bool(result['path']) - - return cookielib.Cookie(**result) - - -def morsel_to_cookie(morsel): - """Convert a Morsel object into a Cookie containing the one k/v pair.""" - - expires = None - if morsel['max-age']: - try: - expires = int(time.time() + int(morsel['max-age'])) - except ValueError: - raise TypeError('max-age: %s must be integer' % morsel['max-age']) - elif morsel['expires']: - time_template = '%a, %d-%b-%Y %H:%M:%S GMT' - expires = calendar.timegm( - time.strptime(morsel['expires'], time_template) - ) - return create_cookie( - comment=morsel['comment'], - comment_url=bool(morsel['comment']), - discard=False, - domain=morsel['domain'], - expires=expires, - name=morsel.key, - path=morsel['path'], - port=None, - rest={'HttpOnly': morsel['httponly']}, - rfc2109=False, - secure=bool(morsel['secure']), - value=morsel.value, - version=morsel['version'] or 0, - ) - - -def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): - """Returns a CookieJar from a key/value dictionary. - - :param cookie_dict: Dict of key/values to insert into CookieJar. - :param cookiejar: (optional) A cookiejar to add the cookies to. - :param overwrite: (optional) If False, will not replace cookies - already in the jar with new ones. - :rtype: CookieJar - """ - if cookiejar is None: - cookiejar = RequestsCookieJar() - - if cookie_dict is not None: - names_from_jar = [cookie.name for cookie in cookiejar] - for name in cookie_dict: - if overwrite or (name not in names_from_jar): - cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) - - return cookiejar - - -def merge_cookies(cookiejar, cookies): - """Add cookies to cookiejar and returns a merged CookieJar. - - :param cookiejar: CookieJar object to add the cookies to. - :param cookies: Dictionary or CookieJar object to be added. - :rtype: CookieJar - """ - if not isinstance(cookiejar, cookielib.CookieJar): - raise ValueError('You can only merge into CookieJar') - - if isinstance(cookies, dict): - cookiejar = cookiejar_from_dict( - cookies, cookiejar=cookiejar, overwrite=False) - elif isinstance(cookies, cookielib.CookieJar): - try: - cookiejar.update(cookies) - except AttributeError: - for cookie_in_jar in cookies: - cookiejar.set_cookie(cookie_in_jar) - - return cookiejar diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/exceptions.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/exceptions.py deleted file mode 100644 index a91e1fd..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/exceptions.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.exceptions -~~~~~~~~~~~~~~~~~~~ - -This module contains the set of Requests' exceptions. -""" -from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError - - -class RequestException(IOError): - """There was an ambiguous exception that occurred while handling your - request. - """ - - def __init__(self, *args, **kwargs): - """Initialize RequestException with `request` and `response` objects.""" - response = kwargs.pop('response', None) - self.response = response - self.request = kwargs.pop('request', None) - if (response is not None and not self.request and - hasattr(response, 'request')): - self.request = self.response.request - super(RequestException, self).__init__(*args, **kwargs) - - -class HTTPError(RequestException): - """An HTTP error occurred.""" - - -class ConnectionError(RequestException): - """A Connection error occurred.""" - - -class ProxyError(ConnectionError): - """A proxy error occurred.""" - - -class SSLError(ConnectionError): - """An SSL error occurred.""" - - -class Timeout(RequestException): - """The request timed out. - - Catching this error will catch both - :exc:`~requests.exceptions.ConnectTimeout` and - :exc:`~requests.exceptions.ReadTimeout` errors. - """ - - -class ConnectTimeout(ConnectionError, Timeout): - """The request timed out while trying to connect to the remote server. - - Requests that produced this error are safe to retry. - """ - - -class ReadTimeout(Timeout): - """The server did not send any data in the allotted amount of time.""" - - -class URLRequired(RequestException): - """A valid URL is required to make a request.""" - - -class TooManyRedirects(RequestException): - """Too many redirects.""" - - -class MissingSchema(RequestException, ValueError): - """The URL schema (e.g. http or https) is missing.""" - - -class InvalidSchema(RequestException, ValueError): - """See defaults.py for valid schemas.""" - - -class InvalidURL(RequestException, ValueError): - """The URL provided was somehow invalid.""" - - -class InvalidHeader(RequestException, ValueError): - """The header value provided was somehow invalid.""" - - -class InvalidProxyURL(InvalidURL): - """The proxy URL provided is invalid.""" - - -class ChunkedEncodingError(RequestException): - """The server declared chunked encoding but sent an invalid chunk.""" - - -class ContentDecodingError(RequestException, BaseHTTPError): - """Failed to decode response content""" - - -class StreamConsumedError(RequestException, TypeError): - """The content for this response was already consumed""" - - -class RetryError(RequestException): - """Custom retries logic failed""" - - -class UnrewindableBodyError(RequestException): - """Requests encountered an error when trying to rewind a body""" - -# Warnings - - -class RequestsWarning(Warning): - """Base warning for Requests.""" - pass - - -class FileModeWarning(RequestsWarning, DeprecationWarning): - """A file was opened in text mode, but Requests determined its binary length.""" - pass - - -class RequestsDependencyWarning(RequestsWarning): - """An imported dependency doesn't match the expected version range.""" - pass diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/help.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/help.py deleted file mode 100644 index 3c3072b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/help.py +++ /dev/null @@ -1,119 +0,0 @@ -"""Module containing bug report helper(s).""" -from __future__ import print_function - -import json -import platform -import sys -import ssl - -from pip._vendor import idna -from pip._vendor import urllib3 -from pip._vendor import chardet - -from . import __version__ as requests_version - -try: - from pip._vendor.urllib3.contrib import pyopenssl -except ImportError: - pyopenssl = None - OpenSSL = None - cryptography = None -else: - import OpenSSL - import cryptography - - -def _implementation(): - """Return a dict with the Python implementation and version. - - Provide both the name and the version of the Python implementation - currently running. For example, on CPython 2.7.5 it will return - {'name': 'CPython', 'version': '2.7.5'}. - - This function works best on CPython and PyPy: in particular, it probably - doesn't work for Jython or IronPython. Future investigation should be done - to work out the correct shape of the code for those platforms. - """ - implementation = platform.python_implementation() - - if implementation == 'CPython': - implementation_version = platform.python_version() - elif implementation == 'PyPy': - implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, - sys.pypy_version_info.minor, - sys.pypy_version_info.micro) - if sys.pypy_version_info.releaselevel != 'final': - implementation_version = ''.join([ - implementation_version, sys.pypy_version_info.releaselevel - ]) - elif implementation == 'Jython': - implementation_version = platform.python_version() # Complete Guess - elif implementation == 'IronPython': - implementation_version = platform.python_version() # Complete Guess - else: - implementation_version = 'Unknown' - - return {'name': implementation, 'version': implementation_version} - - -def info(): - """Generate information for a bug report.""" - try: - platform_info = { - 'system': platform.system(), - 'release': platform.release(), - } - except IOError: - platform_info = { - 'system': 'Unknown', - 'release': 'Unknown', - } - - implementation_info = _implementation() - urllib3_info = {'version': urllib3.__version__} - chardet_info = {'version': chardet.__version__} - - pyopenssl_info = { - 'version': None, - 'openssl_version': '', - } - if OpenSSL: - pyopenssl_info = { - 'version': OpenSSL.__version__, - 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, - } - cryptography_info = { - 'version': getattr(cryptography, '__version__', ''), - } - idna_info = { - 'version': getattr(idna, '__version__', ''), - } - - system_ssl = ssl.OPENSSL_VERSION_NUMBER - system_ssl_info = { - 'version': '%x' % system_ssl if system_ssl is not None else '' - } - - return { - 'platform': platform_info, - 'implementation': implementation_info, - 'system_ssl': system_ssl_info, - 'using_pyopenssl': pyopenssl is not None, - 'pyOpenSSL': pyopenssl_info, - 'urllib3': urllib3_info, - 'chardet': chardet_info, - 'cryptography': cryptography_info, - 'idna': idna_info, - 'requests': { - 'version': requests_version, - }, - } - - -def main(): - """Pretty-print the bug information as JSON.""" - print(json.dumps(info(), sort_keys=True, indent=2)) - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/hooks.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/hooks.py deleted file mode 100644 index 7a51f21..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/hooks.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.hooks -~~~~~~~~~~~~~~ - -This module provides the capabilities for the Requests hooks system. - -Available hooks: - -``response``: - The response generated from a Request. -""" -HOOKS = ['response'] - - -def default_hooks(): - return {event: [] for event in HOOKS} - -# TODO: response is the only one - - -def dispatch_hook(key, hooks, hook_data, **kwargs): - """Dispatches a hook dictionary on a given piece of data.""" - hooks = hooks or {} - hooks = hooks.get(key) - if hooks: - if hasattr(hooks, '__call__'): - hooks = [hooks] - for hook in hooks: - _hook_data = hook(hook_data, **kwargs) - if _hook_data is not None: - hook_data = _hook_data - return hook_data diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/models.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/models.py deleted file mode 100644 index 0839957..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/models.py +++ /dev/null @@ -1,953 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.models -~~~~~~~~~~~~~~~ - -This module contains the primary objects that power Requests. -""" - -import datetime -import sys - -# Import encoding now, to avoid implicit import later. -# Implicit import within threads may cause LookupError when standard library is in a ZIP, -# such as in Embedded Python. See https://github.com/requests/requests/issues/3578. -import encodings.idna - -from pip._vendor.urllib3.fields import RequestField -from pip._vendor.urllib3.filepost import encode_multipart_formdata -from pip._vendor.urllib3.util import parse_url -from pip._vendor.urllib3.exceptions import ( - DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) - -from io import UnsupportedOperation -from .hooks import default_hooks -from .structures import CaseInsensitiveDict - -from .auth import HTTPBasicAuth -from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar -from .exceptions import ( - HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, - ContentDecodingError, ConnectionError, StreamConsumedError) -from ._internal_utils import to_native_string, unicode_is_ascii -from .utils import ( - guess_filename, get_auth_from_url, requote_uri, - stream_decode_response_unicode, to_key_val_list, parse_header_links, - iter_slices, guess_json_utf, super_len, check_header_validity) -from .compat import ( - Callable, Mapping, - cookielib, urlunparse, urlsplit, urlencode, str, bytes, - is_py2, chardet, builtin_str, basestring) -from .compat import json as complexjson -from .status_codes import codes - -#: The set of HTTP status codes that indicate an automatically -#: processable redirect. -REDIRECT_STATI = ( - codes.moved, # 301 - codes.found, # 302 - codes.other, # 303 - codes.temporary_redirect, # 307 - codes.permanent_redirect, # 308 -) - -DEFAULT_REDIRECT_LIMIT = 30 -CONTENT_CHUNK_SIZE = 10 * 1024 -ITER_CHUNK_SIZE = 512 - - -class RequestEncodingMixin(object): - @property - def path_url(self): - """Build the path URL to use.""" - - url = [] - - p = urlsplit(self.url) - - path = p.path - if not path: - path = '/' - - url.append(path) - - query = p.query - if query: - url.append('?') - url.append(query) - - return ''.join(url) - - @staticmethod - def _encode_params(data): - """Encode parameters in a piece of data. - - Will successfully encode parameters when passed as a dict or a list of - 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary - if parameters are supplied as a dict. - """ - - if isinstance(data, (str, bytes)): - return data - elif hasattr(data, 'read'): - return data - elif hasattr(data, '__iter__'): - result = [] - for k, vs in to_key_val_list(data): - if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): - vs = [vs] - for v in vs: - if v is not None: - result.append( - (k.encode('utf-8') if isinstance(k, str) else k, - v.encode('utf-8') if isinstance(v, str) else v)) - return urlencode(result, doseq=True) - else: - return data - - @staticmethod - def _encode_files(files, data): - """Build the body for a multipart/form-data request. - - Will successfully encode files when passed as a dict or a list of - tuples. Order is retained if data is a list of tuples but arbitrary - if parameters are supplied as a dict. - The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) - or 4-tuples (filename, fileobj, contentype, custom_headers). - """ - if (not files): - raise ValueError("Files must be provided.") - elif isinstance(data, basestring): - raise ValueError("Data must not be a string.") - - new_fields = [] - fields = to_key_val_list(data or {}) - files = to_key_val_list(files or {}) - - for field, val in fields: - if isinstance(val, basestring) or not hasattr(val, '__iter__'): - val = [val] - for v in val: - if v is not None: - # Don't call str() on bytestrings: in Py3 it all goes wrong. - if not isinstance(v, bytes): - v = str(v) - - new_fields.append( - (field.decode('utf-8') if isinstance(field, bytes) else field, - v.encode('utf-8') if isinstance(v, str) else v)) - - for (k, v) in files: - # support for explicit filename - ft = None - fh = None - if isinstance(v, (tuple, list)): - if len(v) == 2: - fn, fp = v - elif len(v) == 3: - fn, fp, ft = v - else: - fn, fp, ft, fh = v - else: - fn = guess_filename(v) or k - fp = v - - if isinstance(fp, (str, bytes, bytearray)): - fdata = fp - elif hasattr(fp, 'read'): - fdata = fp.read() - elif fp is None: - continue - else: - fdata = fp - - rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) - rf.make_multipart(content_type=ft) - new_fields.append(rf) - - body, content_type = encode_multipart_formdata(new_fields) - - return body, content_type - - -class RequestHooksMixin(object): - def register_hook(self, event, hook): - """Properly register a hook.""" - - if event not in self.hooks: - raise ValueError('Unsupported event specified, with event name "%s"' % (event)) - - if isinstance(hook, Callable): - self.hooks[event].append(hook) - elif hasattr(hook, '__iter__'): - self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) - - def deregister_hook(self, event, hook): - """Deregister a previously registered hook. - Returns True if the hook existed, False if not. - """ - - try: - self.hooks[event].remove(hook) - return True - except ValueError: - return False - - -class Request(RequestHooksMixin): - """A user-created :class:`Request <Request>` object. - - Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server. - - :param method: HTTP method to use. - :param url: URL to send. - :param headers: dictionary of headers to send. - :param files: dictionary of {filename: fileobject} files to multipart upload. - :param data: the body to attach to the request. If a dictionary or - list of tuples ``[(key, value)]`` is provided, form-encoding will - take place. - :param json: json for the body to attach to the request (if files or data is not specified). - :param params: URL parameters to append to the URL. If a dictionary or - list of tuples ``[(key, value)]`` is provided, form-encoding will - take place. - :param auth: Auth handler or (user, pass) tuple. - :param cookies: dictionary or CookieJar of cookies to attach to this request. - :param hooks: dictionary of callback hooks, for internal usage. - - Usage:: - - >>> import requests - >>> req = requests.Request('GET', 'https://httpbin.org/get') - >>> req.prepare() - <PreparedRequest [GET]> - """ - - def __init__(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): - - # Default empty dicts for dict params. - data = [] if data is None else data - files = [] if files is None else files - headers = {} if headers is None else headers - params = {} if params is None else params - hooks = {} if hooks is None else hooks - - self.hooks = default_hooks() - for (k, v) in list(hooks.items()): - self.register_hook(event=k, hook=v) - - self.method = method - self.url = url - self.headers = headers - self.files = files - self.data = data - self.json = json - self.params = params - self.auth = auth - self.cookies = cookies - - def __repr__(self): - return '<Request [%s]>' % (self.method) - - def prepare(self): - """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" - p = PreparedRequest() - p.prepare( - method=self.method, - url=self.url, - headers=self.headers, - files=self.files, - data=self.data, - json=self.json, - params=self.params, - auth=self.auth, - cookies=self.cookies, - hooks=self.hooks, - ) - return p - - -class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): - """The fully mutable :class:`PreparedRequest <PreparedRequest>` object, - containing the exact bytes that will be sent to the server. - - Generated from either a :class:`Request <Request>` object or manually. - - Usage:: - - >>> import requests - >>> req = requests.Request('GET', 'https://httpbin.org/get') - >>> r = req.prepare() - <PreparedRequest [GET]> - - >>> s = requests.Session() - >>> s.send(r) - <Response [200]> - """ - - def __init__(self): - #: HTTP verb to send to the server. - self.method = None - #: HTTP URL to send the request to. - self.url = None - #: dictionary of HTTP headers. - self.headers = None - # The `CookieJar` used to create the Cookie header will be stored here - # after prepare_cookies is called - self._cookies = None - #: request body to send to the server. - self.body = None - #: dictionary of callback hooks, for internal usage. - self.hooks = default_hooks() - #: integer denoting starting position of a readable file-like body. - self._body_position = None - - def prepare(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): - """Prepares the entire request with the given parameters.""" - - self.prepare_method(method) - self.prepare_url(url, params) - self.prepare_headers(headers) - self.prepare_cookies(cookies) - self.prepare_body(data, files, json) - self.prepare_auth(auth, url) - - # Note that prepare_auth must be last to enable authentication schemes - # such as OAuth to work on a fully prepared request. - - # This MUST go after prepare_auth. Authenticators could add a hook - self.prepare_hooks(hooks) - - def __repr__(self): - return '<PreparedRequest [%s]>' % (self.method) - - def copy(self): - p = PreparedRequest() - p.method = self.method - p.url = self.url - p.headers = self.headers.copy() if self.headers is not None else None - p._cookies = _copy_cookie_jar(self._cookies) - p.body = self.body - p.hooks = self.hooks - p._body_position = self._body_position - return p - - def prepare_method(self, method): - """Prepares the given HTTP method.""" - self.method = method - if self.method is not None: - self.method = to_native_string(self.method.upper()) - - @staticmethod - def _get_idna_encoded_host(host): - from pip._vendor import idna - - try: - host = idna.encode(host, uts46=True).decode('utf-8') - except idna.IDNAError: - raise UnicodeError - return host - - def prepare_url(self, url, params): - """Prepares the given HTTP URL.""" - #: Accept objects that have string representations. - #: We're unable to blindly call unicode/str functions - #: as this will include the bytestring indicator (b'') - #: on python 3.x. - #: https://github.com/requests/requests/pull/2238 - if isinstance(url, bytes): - url = url.decode('utf8') - else: - url = unicode(url) if is_py2 else str(url) - - # Remove leading whitespaces from url - url = url.lstrip() - - # Don't do any URL preparation for non-HTTP schemes like `mailto`, - # `data` etc to work around exceptions from `url_parse`, which - # handles RFC 3986 only. - if ':' in url and not url.lower().startswith('http'): - self.url = url - return - - # Support for unicode domain names and paths. - try: - scheme, auth, host, port, path, query, fragment = parse_url(url) - except LocationParseError as e: - raise InvalidURL(*e.args) - - if not scheme: - error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") - error = error.format(to_native_string(url, 'utf8')) - - raise MissingSchema(error) - - if not host: - raise InvalidURL("Invalid URL %r: No host supplied" % url) - - # In general, we want to try IDNA encoding the hostname if the string contains - # non-ASCII characters. This allows users to automatically get the correct IDNA - # behaviour. For strings containing only ASCII characters, we need to also verify - # it doesn't start with a wildcard (*), before allowing the unencoded hostname. - if not unicode_is_ascii(host): - try: - host = self._get_idna_encoded_host(host) - except UnicodeError: - raise InvalidURL('URL has an invalid label.') - elif host.startswith(u'*'): - raise InvalidURL('URL has an invalid label.') - - # Carefully reconstruct the network location - netloc = auth or '' - if netloc: - netloc += '@' - netloc += host - if port: - netloc += ':' + str(port) - - # Bare domains aren't valid URLs. - if not path: - path = '/' - - if is_py2: - if isinstance(scheme, str): - scheme = scheme.encode('utf-8') - if isinstance(netloc, str): - netloc = netloc.encode('utf-8') - if isinstance(path, str): - path = path.encode('utf-8') - if isinstance(query, str): - query = query.encode('utf-8') - if isinstance(fragment, str): - fragment = fragment.encode('utf-8') - - if isinstance(params, (str, bytes)): - params = to_native_string(params) - - enc_params = self._encode_params(params) - if enc_params: - if query: - query = '%s&%s' % (query, enc_params) - else: - query = enc_params - - url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) - self.url = url - - def prepare_headers(self, headers): - """Prepares the given HTTP headers.""" - - self.headers = CaseInsensitiveDict() - if headers: - for header in headers.items(): - # Raise exception on invalid header value. - check_header_validity(header) - name, value = header - self.headers[to_native_string(name)] = value - - def prepare_body(self, data, files, json=None): - """Prepares the given HTTP body data.""" - - # Check if file, fo, generator, iterator. - # If not, run through normal process. - - # Nottin' on you. - body = None - content_type = None - - if not data and json is not None: - # urllib3 requires a bytes-like body. Python 2's json.dumps - # provides this natively, but Python 3 gives a Unicode string. - content_type = 'application/json' - body = complexjson.dumps(json) - if not isinstance(body, bytes): - body = body.encode('utf-8') - - is_stream = all([ - hasattr(data, '__iter__'), - not isinstance(data, (basestring, list, tuple, Mapping)) - ]) - - try: - length = super_len(data) - except (TypeError, AttributeError, UnsupportedOperation): - length = None - - if is_stream: - body = data - - if getattr(body, 'tell', None) is not None: - # Record the current file position before reading. - # This will allow us to rewind a file in the event - # of a redirect. - try: - self._body_position = body.tell() - except (IOError, OSError): - # This differentiates from None, allowing us to catch - # a failed `tell()` later when trying to rewind the body - self._body_position = object() - - if files: - raise NotImplementedError('Streamed bodies and files are mutually exclusive.') - - if length: - self.headers['Content-Length'] = builtin_str(length) - else: - self.headers['Transfer-Encoding'] = 'chunked' - else: - # Multi-part file uploads. - if files: - (body, content_type) = self._encode_files(files, data) - else: - if data: - body = self._encode_params(data) - if isinstance(data, basestring) or hasattr(data, 'read'): - content_type = None - else: - content_type = 'application/x-www-form-urlencoded' - - self.prepare_content_length(body) - - # Add content-type if it wasn't explicitly provided. - if content_type and ('content-type' not in self.headers): - self.headers['Content-Type'] = content_type - - self.body = body - - def prepare_content_length(self, body): - """Prepare Content-Length header based on request method and body""" - if body is not None: - length = super_len(body) - if length: - # If length exists, set it. Otherwise, we fallback - # to Transfer-Encoding: chunked. - self.headers['Content-Length'] = builtin_str(length) - elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: - # Set Content-Length to 0 for methods that can have a body - # but don't provide one. (i.e. not GET or HEAD) - self.headers['Content-Length'] = '0' - - def prepare_auth(self, auth, url=''): - """Prepares the given HTTP auth data.""" - - # If no Auth is explicitly provided, extract it from the URL first. - if auth is None: - url_auth = get_auth_from_url(self.url) - auth = url_auth if any(url_auth) else None - - if auth: - if isinstance(auth, tuple) and len(auth) == 2: - # special-case basic HTTP auth - auth = HTTPBasicAuth(*auth) - - # Allow auth to make its changes. - r = auth(self) - - # Update self to reflect the auth changes. - self.__dict__.update(r.__dict__) - - # Recompute Content-Length - self.prepare_content_length(self.body) - - def prepare_cookies(self, cookies): - """Prepares the given HTTP cookie data. - - This function eventually generates a ``Cookie`` header from the - given cookies using cookielib. Due to cookielib's design, the header - will not be regenerated if it already exists, meaning this function - can only be called once for the life of the - :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls - to ``prepare_cookies`` will have no actual effect, unless the "Cookie" - header is removed beforehand. - """ - if isinstance(cookies, cookielib.CookieJar): - self._cookies = cookies - else: - self._cookies = cookiejar_from_dict(cookies) - - cookie_header = get_cookie_header(self._cookies, self) - if cookie_header is not None: - self.headers['Cookie'] = cookie_header - - def prepare_hooks(self, hooks): - """Prepares the given hooks.""" - # hooks can be passed as None to the prepare method and to this - # method. To prevent iterating over None, simply use an empty list - # if hooks is False-y - hooks = hooks or [] - for event in hooks: - self.register_hook(event, hooks[event]) - - -class Response(object): - """The :class:`Response <Response>` object, which contains a - server's response to an HTTP request. - """ - - __attrs__ = [ - '_content', 'status_code', 'headers', 'url', 'history', - 'encoding', 'reason', 'cookies', 'elapsed', 'request' - ] - - def __init__(self): - self._content = False - self._content_consumed = False - self._next = None - - #: Integer Code of responded HTTP Status, e.g. 404 or 200. - self.status_code = None - - #: Case-insensitive Dictionary of Response Headers. - #: For example, ``headers['content-encoding']`` will return the - #: value of a ``'Content-Encoding'`` response header. - self.headers = CaseInsensitiveDict() - - #: File-like object representation of response (for advanced usage). - #: Use of ``raw`` requires that ``stream=True`` be set on the request. - # This requirement does not apply for use internally to Requests. - self.raw = None - - #: Final URL location of Response. - self.url = None - - #: Encoding to decode with when accessing r.text. - self.encoding = None - - #: A list of :class:`Response <Response>` objects from - #: the history of the Request. Any redirect responses will end - #: up here. The list is sorted from the oldest to the most recent request. - self.history = [] - - #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". - self.reason = None - - #: A CookieJar of Cookies the server sent back. - self.cookies = cookiejar_from_dict({}) - - #: The amount of time elapsed between sending the request - #: and the arrival of the response (as a timedelta). - #: This property specifically measures the time taken between sending - #: the first byte of the request and finishing parsing the headers. It - #: is therefore unaffected by consuming the response content or the - #: value of the ``stream`` keyword argument. - self.elapsed = datetime.timedelta(0) - - #: The :class:`PreparedRequest <PreparedRequest>` object to which this - #: is a response. - self.request = None - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def __getstate__(self): - # Consume everything; accessing the content attribute makes - # sure the content has been fully read. - if not self._content_consumed: - self.content - - return {attr: getattr(self, attr, None) for attr in self.__attrs__} - - def __setstate__(self, state): - for name, value in state.items(): - setattr(self, name, value) - - # pickled objects do not have .raw - setattr(self, '_content_consumed', True) - setattr(self, 'raw', None) - - def __repr__(self): - return '<Response [%s]>' % (self.status_code) - - def __bool__(self): - """Returns True if :attr:`status_code` is less than 400. - - This attribute checks if the status code of the response is between - 400 and 600 to see if there was a client error or a server error. If - the status code, is between 200 and 400, this will return True. This - is **not** a check to see if the response code is ``200 OK``. - """ - return self.ok - - def __nonzero__(self): - """Returns True if :attr:`status_code` is less than 400. - - This attribute checks if the status code of the response is between - 400 and 600 to see if there was a client error or a server error. If - the status code, is between 200 and 400, this will return True. This - is **not** a check to see if the response code is ``200 OK``. - """ - return self.ok - - def __iter__(self): - """Allows you to use a response as an iterator.""" - return self.iter_content(128) - - @property - def ok(self): - """Returns True if :attr:`status_code` is less than 400, False if not. - - This attribute checks if the status code of the response is between - 400 and 600 to see if there was a client error or a server error. If - the status code is between 200 and 400, this will return True. This - is **not** a check to see if the response code is ``200 OK``. - """ - try: - self.raise_for_status() - except HTTPError: - return False - return True - - @property - def is_redirect(self): - """True if this Response is a well-formed HTTP redirect that could have - been processed automatically (by :meth:`Session.resolve_redirects`). - """ - return ('location' in self.headers and self.status_code in REDIRECT_STATI) - - @property - def is_permanent_redirect(self): - """True if this Response one of the permanent versions of redirect.""" - return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) - - @property - def next(self): - """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" - return self._next - - @property - def apparent_encoding(self): - """The apparent encoding, provided by the chardet library.""" - return chardet.detect(self.content)['encoding'] - - def iter_content(self, chunk_size=1, decode_unicode=False): - """Iterates over the response data. When stream=True is set on the - request, this avoids reading the content at once into memory for - large responses. The chunk size is the number of bytes it should - read into memory. This is not necessarily the length of each item - returned as decoding can take place. - - chunk_size must be of type int or None. A value of None will - function differently depending on the value of `stream`. - stream=True will read data as it arrives in whatever size the - chunks are received. If stream=False, data is returned as - a single chunk. - - If decode_unicode is True, content will be decoded using the best - available encoding based on the response. - """ - - def generate(): - # Special case for urllib3. - if hasattr(self.raw, 'stream'): - try: - for chunk in self.raw.stream(chunk_size, decode_content=True): - yield chunk - except ProtocolError as e: - raise ChunkedEncodingError(e) - except DecodeError as e: - raise ContentDecodingError(e) - except ReadTimeoutError as e: - raise ConnectionError(e) - else: - # Standard file-like object. - while True: - chunk = self.raw.read(chunk_size) - if not chunk: - break - yield chunk - - self._content_consumed = True - - if self._content_consumed and isinstance(self._content, bool): - raise StreamConsumedError() - elif chunk_size is not None and not isinstance(chunk_size, int): - raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) - # simulate reading small chunks of the content - reused_chunks = iter_slices(self._content, chunk_size) - - stream_chunks = generate() - - chunks = reused_chunks if self._content_consumed else stream_chunks - - if decode_unicode: - chunks = stream_decode_response_unicode(chunks, self) - - return chunks - - def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): - """Iterates over the response data, one line at a time. When - stream=True is set on the request, this avoids reading the - content at once into memory for large responses. - - .. note:: This method is not reentrant safe. - """ - - pending = None - - for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): - - if pending is not None: - chunk = pending + chunk - - if delimiter: - lines = chunk.split(delimiter) - else: - lines = chunk.splitlines() - - if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: - pending = lines.pop() - else: - pending = None - - for line in lines: - yield line - - if pending is not None: - yield pending - - @property - def content(self): - """Content of the response, in bytes.""" - - if self._content is False: - # Read the contents. - if self._content_consumed: - raise RuntimeError( - 'The content for this response was already consumed') - - if self.status_code == 0 or self.raw is None: - self._content = None - else: - self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' - - self._content_consumed = True - # don't need to release the connection; that's been handled by urllib3 - # since we exhausted the data. - return self._content - - @property - def text(self): - """Content of the response, in unicode. - - If Response.encoding is None, encoding will be guessed using - ``chardet``. - - The encoding of the response content is determined based solely on HTTP - headers, following RFC 2616 to the letter. If you can take advantage of - non-HTTP knowledge to make a better guess at the encoding, you should - set ``r.encoding`` appropriately before accessing this property. - """ - - # Try charset from content-type - content = None - encoding = self.encoding - - if not self.content: - return str('') - - # Fallback to auto-detected encoding. - if self.encoding is None: - encoding = self.apparent_encoding - - # Decode unicode from given encoding. - try: - content = str(self.content, encoding, errors='replace') - except (LookupError, TypeError): - # A LookupError is raised if the encoding was not found which could - # indicate a misspelling or similar mistake. - # - # A TypeError can be raised if encoding is None - # - # So we try blindly encoding. - content = str(self.content, errors='replace') - - return content - - def json(self, **kwargs): - r"""Returns the json-encoded content of a response, if any. - - :param \*\*kwargs: Optional arguments that ``json.loads`` takes. - :raises ValueError: If the response body does not contain valid json. - """ - - if not self.encoding and self.content and len(self.content) > 3: - # No encoding set. JSON RFC 4627 section 3 states we should expect - # UTF-8, -16 or -32. Detect which one to use; If the detection or - # decoding fails, fall back to `self.text` (using chardet to make - # a best guess). - encoding = guess_json_utf(self.content) - if encoding is not None: - try: - return complexjson.loads( - self.content.decode(encoding), **kwargs - ) - except UnicodeDecodeError: - # Wrong UTF codec detected; usually because it's not UTF-8 - # but some other 8-bit codec. This is an RFC violation, - # and the server didn't bother to tell us what codec *was* - # used. - pass - return complexjson.loads(self.text, **kwargs) - - @property - def links(self): - """Returns the parsed header links of the response, if any.""" - - header = self.headers.get('link') - - # l = MultiDict() - l = {} - - if header: - links = parse_header_links(header) - - for link in links: - key = link.get('rel') or link.get('url') - l[key] = link - - return l - - def raise_for_status(self): - """Raises stored :class:`HTTPError`, if one occurred.""" - - http_error_msg = '' - if isinstance(self.reason, bytes): - # We attempt to decode utf-8 first because some servers - # choose to localize their reason strings. If the string - # isn't utf-8, we fall back to iso-8859-1 for all other - # encodings. (See PR #3538) - try: - reason = self.reason.decode('utf-8') - except UnicodeDecodeError: - reason = self.reason.decode('iso-8859-1') - else: - reason = self.reason - - if 400 <= self.status_code < 500: - http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) - - elif 500 <= self.status_code < 600: - http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) - - if http_error_msg: - raise HTTPError(http_error_msg, response=self) - - def close(self): - """Releases the connection back to the pool. Once this method has been - called the underlying ``raw`` object must not be accessed again. - - *Note: Should not normally need to be called explicitly.* - """ - if not self._content_consumed: - self.raw.close() - - release_conn = getattr(self.raw, 'release_conn', None) - if release_conn is not None: - release_conn() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/packages.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/packages.py deleted file mode 100644 index 9582fa7..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/packages.py +++ /dev/null @@ -1,16 +0,0 @@ -import sys - -# This code exists for backwards compatibility reasons. -# I don't like it either. Just look the other way. :) - -for package in ('urllib3', 'idna', 'chardet'): - vendored_package = "pip._vendor." + package - locals()[package] = __import__(vendored_package) - # This traversal is apparently necessary such that the identities are - # preserved (requests.packages.urllib3.* is urllib3.*) - for mod in list(sys.modules): - if mod == vendored_package or mod.startswith(vendored_package + '.'): - unprefixed_mod = mod[len("pip._vendor."):] - sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] - -# Kinda cool, though, right? diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/sessions.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/sessions.py deleted file mode 100644 index d73d700..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/sessions.py +++ /dev/null @@ -1,770 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.session -~~~~~~~~~~~~~~~~ - -This module provides a Session object to manage and persist settings across -requests (cookies, auth, proxies). -""" -import os -import sys -import time -from datetime import timedelta - -from .auth import _basic_auth_str -from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping -from .cookies import ( - cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) -from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT -from .hooks import default_hooks, dispatch_hook -from ._internal_utils import to_native_string -from .utils import to_key_val_list, default_headers, DEFAULT_PORTS -from .exceptions import ( - TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) - -from .structures import CaseInsensitiveDict -from .adapters import HTTPAdapter - -from .utils import ( - requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, - get_auth_from_url, rewind_body -) - -from .status_codes import codes - -# formerly defined here, reexposed here for backward compatibility -from .models import REDIRECT_STATI - -# Preferred clock, based on which one is more accurate on a given system. -if sys.platform == 'win32': - try: # Python 3.4+ - preferred_clock = time.perf_counter - except AttributeError: # Earlier than Python 3. - preferred_clock = time.clock -else: - preferred_clock = time.time - - -def merge_setting(request_setting, session_setting, dict_class=OrderedDict): - """Determines appropriate setting for a given request, taking into account - the explicit setting on that request, and the setting in the session. If a - setting is a dictionary, they will be merged together using `dict_class` - """ - - if session_setting is None: - return request_setting - - if request_setting is None: - return session_setting - - # Bypass if not a dictionary (e.g. verify) - if not ( - isinstance(session_setting, Mapping) and - isinstance(request_setting, Mapping) - ): - return request_setting - - merged_setting = dict_class(to_key_val_list(session_setting)) - merged_setting.update(to_key_val_list(request_setting)) - - # Remove keys that are set to None. Extract keys first to avoid altering - # the dictionary during iteration. - none_keys = [k for (k, v) in merged_setting.items() if v is None] - for key in none_keys: - del merged_setting[key] - - return merged_setting - - -def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): - """Properly merges both requests and session hooks. - - This is necessary because when request_hooks == {'response': []}, the - merge breaks Session hooks entirely. - """ - if session_hooks is None or session_hooks.get('response') == []: - return request_hooks - - if request_hooks is None or request_hooks.get('response') == []: - return session_hooks - - return merge_setting(request_hooks, session_hooks, dict_class) - - -class SessionRedirectMixin(object): - - def get_redirect_target(self, resp): - """Receives a Response. Returns a redirect URI or ``None``""" - # Due to the nature of how requests processes redirects this method will - # be called at least once upon the original response and at least twice - # on each subsequent redirect response (if any). - # If a custom mixin is used to handle this logic, it may be advantageous - # to cache the redirect location onto the response object as a private - # attribute. - if resp.is_redirect: - location = resp.headers['location'] - # Currently the underlying http module on py3 decode headers - # in latin1, but empirical evidence suggests that latin1 is very - # rarely used with non-ASCII characters in HTTP headers. - # It is more likely to get UTF8 header rather than latin1. - # This causes incorrect handling of UTF8 encoded location headers. - # To solve this, we re-encode the location in latin1. - if is_py3: - location = location.encode('latin1') - return to_native_string(location, 'utf8') - return None - - def should_strip_auth(self, old_url, new_url): - """Decide whether Authorization header should be removed when redirecting""" - old_parsed = urlparse(old_url) - new_parsed = urlparse(new_url) - if old_parsed.hostname != new_parsed.hostname: - return True - # Special case: allow http -> https redirect when using the standard - # ports. This isn't specified by RFC 7235, but is kept to avoid - # breaking backwards compatibility with older versions of requests - # that allowed any redirects on the same host. - if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) - and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): - return False - - # Handle default port usage corresponding to scheme. - changed_port = old_parsed.port != new_parsed.port - changed_scheme = old_parsed.scheme != new_parsed.scheme - default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) - if (not changed_scheme and old_parsed.port in default_port - and new_parsed.port in default_port): - return False - - # Standard case: root URI must match - return changed_port or changed_scheme - - def resolve_redirects(self, resp, req, stream=False, timeout=None, - verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): - """Receives a Response. Returns a generator of Responses or Requests.""" - - hist = [] # keep track of history - - url = self.get_redirect_target(resp) - previous_fragment = urlparse(req.url).fragment - while url: - prepared_request = req.copy() - - # Update history and keep track of redirects. - # resp.history must ignore the original request in this loop - hist.append(resp) - resp.history = hist[1:] - - try: - resp.content # Consume socket so it can be released - except (ChunkedEncodingError, ContentDecodingError, RuntimeError): - resp.raw.read(decode_content=False) - - if len(resp.history) >= self.max_redirects: - raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) - - # Release the connection back into the pool. - resp.close() - - # Handle redirection without scheme (see: RFC 1808 Section 4) - if url.startswith('//'): - parsed_rurl = urlparse(resp.url) - url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) - - # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) - parsed = urlparse(url) - if parsed.fragment == '' and previous_fragment: - parsed = parsed._replace(fragment=previous_fragment) - elif parsed.fragment: - previous_fragment = parsed.fragment - url = parsed.geturl() - - # Facilitate relative 'location' headers, as allowed by RFC 7231. - # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') - # Compliant with RFC3986, we percent encode the url. - if not parsed.netloc: - url = urljoin(resp.url, requote_uri(url)) - else: - url = requote_uri(url) - - prepared_request.url = to_native_string(url) - - self.rebuild_method(prepared_request, resp) - - # https://github.com/requests/requests/issues/1084 - if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): - # https://github.com/requests/requests/issues/3490 - purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') - for header in purged_headers: - prepared_request.headers.pop(header, None) - prepared_request.body = None - - headers = prepared_request.headers - try: - del headers['Cookie'] - except KeyError: - pass - - # Extract any cookies sent on the response to the cookiejar - # in the new request. Because we've mutated our copied prepared - # request, use the old one that we haven't yet touched. - extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) - merge_cookies(prepared_request._cookies, self.cookies) - prepared_request.prepare_cookies(prepared_request._cookies) - - # Rebuild auth and proxy information. - proxies = self.rebuild_proxies(prepared_request, proxies) - self.rebuild_auth(prepared_request, resp) - - # A failed tell() sets `_body_position` to `object()`. This non-None - # value ensures `rewindable` will be True, allowing us to raise an - # UnrewindableBodyError, instead of hanging the connection. - rewindable = ( - prepared_request._body_position is not None and - ('Content-Length' in headers or 'Transfer-Encoding' in headers) - ) - - # Attempt to rewind consumed file-like object. - if rewindable: - rewind_body(prepared_request) - - # Override the original request. - req = prepared_request - - if yield_requests: - yield req - else: - - resp = self.send( - req, - stream=stream, - timeout=timeout, - verify=verify, - cert=cert, - proxies=proxies, - allow_redirects=False, - **adapter_kwargs - ) - - extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) - - # extract redirect url, if any, for the next loop - url = self.get_redirect_target(resp) - yield resp - - def rebuild_auth(self, prepared_request, response): - """When being redirected we may want to strip authentication from the - request to avoid leaking credentials. This method intelligently removes - and reapplies authentication where possible to avoid credential loss. - """ - headers = prepared_request.headers - url = prepared_request.url - - if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): - # If we get redirected to a new host, we should strip out any - # authentication headers. - del headers['Authorization'] - - # .netrc might have more auth for us on our new host. - new_auth = get_netrc_auth(url) if self.trust_env else None - if new_auth is not None: - prepared_request.prepare_auth(new_auth) - - return - - def rebuild_proxies(self, prepared_request, proxies): - """This method re-evaluates the proxy configuration by considering the - environment variables. If we are redirected to a URL covered by - NO_PROXY, we strip the proxy configuration. Otherwise, we set missing - proxy keys for this URL (in case they were stripped by a previous - redirect). - - This method also replaces the Proxy-Authorization header where - necessary. - - :rtype: dict - """ - proxies = proxies if proxies is not None else {} - headers = prepared_request.headers - url = prepared_request.url - scheme = urlparse(url).scheme - new_proxies = proxies.copy() - no_proxy = proxies.get('no_proxy') - - bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) - if self.trust_env and not bypass_proxy: - environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) - - proxy = environ_proxies.get(scheme, environ_proxies.get('all')) - - if proxy: - new_proxies.setdefault(scheme, proxy) - - if 'Proxy-Authorization' in headers: - del headers['Proxy-Authorization'] - - try: - username, password = get_auth_from_url(new_proxies[scheme]) - except KeyError: - username, password = None, None - - if username and password: - headers['Proxy-Authorization'] = _basic_auth_str(username, password) - - return new_proxies - - def rebuild_method(self, prepared_request, response): - """When being redirected we may want to change the method of the request - based on certain specs or browser behavior. - """ - method = prepared_request.method - - # https://tools.ietf.org/html/rfc7231#section-6.4.4 - if response.status_code == codes.see_other and method != 'HEAD': - method = 'GET' - - # Do what the browsers do, despite standards... - # First, turn 302s into GETs. - if response.status_code == codes.found and method != 'HEAD': - method = 'GET' - - # Second, if a POST is responded to with a 301, turn it into a GET. - # This bizarre behaviour is explained in Issue 1704. - if response.status_code == codes.moved and method == 'POST': - method = 'GET' - - prepared_request.method = method - - -class Session(SessionRedirectMixin): - """A Requests session. - - Provides cookie persistence, connection-pooling, and configuration. - - Basic Usage:: - - >>> import requests - >>> s = requests.Session() - >>> s.get('https://httpbin.org/get') - <Response [200]> - - Or as a context manager:: - - >>> with requests.Session() as s: - >>> s.get('https://httpbin.org/get') - <Response [200]> - """ - - __attrs__ = [ - 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', - 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', - 'max_redirects', - ] - - def __init__(self): - - #: A case-insensitive dictionary of headers to be sent on each - #: :class:`Request <Request>` sent from this - #: :class:`Session <Session>`. - self.headers = default_headers() - - #: Default Authentication tuple or object to attach to - #: :class:`Request <Request>`. - self.auth = None - - #: Dictionary mapping protocol or protocol and host to the URL of the proxy - #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to - #: be used on each :class:`Request <Request>`. - self.proxies = {} - - #: Event-handling hooks. - self.hooks = default_hooks() - - #: Dictionary of querystring data to attach to each - #: :class:`Request <Request>`. The dictionary values may be lists for - #: representing multivalued query parameters. - self.params = {} - - #: Stream response content default. - self.stream = False - - #: SSL Verification default. - self.verify = True - - #: SSL client certificate default, if String, path to ssl client - #: cert file (.pem). If Tuple, ('cert', 'key') pair. - self.cert = None - - #: Maximum number of redirects allowed. If the request exceeds this - #: limit, a :class:`TooManyRedirects` exception is raised. - #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is - #: 30. - self.max_redirects = DEFAULT_REDIRECT_LIMIT - - #: Trust environment settings for proxy configuration, default - #: authentication and similar. - self.trust_env = True - - #: A CookieJar containing all currently outstanding cookies set on this - #: session. By default it is a - #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but - #: may be any other ``cookielib.CookieJar`` compatible object. - self.cookies = cookiejar_from_dict({}) - - # Default connection adapters. - self.adapters = OrderedDict() - self.mount('https://', HTTPAdapter()) - self.mount('http://', HTTPAdapter()) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def prepare_request(self, request): - """Constructs a :class:`PreparedRequest <PreparedRequest>` for - transmission and returns it. The :class:`PreparedRequest` has settings - merged from the :class:`Request <Request>` instance and those of the - :class:`Session`. - - :param request: :class:`Request` instance to prepare with this - session's settings. - :rtype: requests.PreparedRequest - """ - cookies = request.cookies or {} - - # Bootstrap CookieJar. - if not isinstance(cookies, cookielib.CookieJar): - cookies = cookiejar_from_dict(cookies) - - # Merge with session cookies - merged_cookies = merge_cookies( - merge_cookies(RequestsCookieJar(), self.cookies), cookies) - - # Set environment's basic authentication if not explicitly set. - auth = request.auth - if self.trust_env and not auth and not self.auth: - auth = get_netrc_auth(request.url) - - p = PreparedRequest() - p.prepare( - method=request.method.upper(), - url=request.url, - files=request.files, - data=request.data, - json=request.json, - headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), - params=merge_setting(request.params, self.params), - auth=merge_setting(auth, self.auth), - cookies=merged_cookies, - hooks=merge_hooks(request.hooks, self.hooks), - ) - return p - - def request(self, method, url, - params=None, data=None, headers=None, cookies=None, files=None, - auth=None, timeout=None, allow_redirects=True, proxies=None, - hooks=None, stream=None, verify=None, cert=None, json=None): - """Constructs a :class:`Request <Request>`, prepares it and sends it. - Returns :class:`Response <Response>` object. - - :param method: method for the new :class:`Request` object. - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary or bytes to be sent in the query - string for the :class:`Request`. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the - :class:`Request`. - :param headers: (optional) Dictionary of HTTP Headers to send with the - :class:`Request`. - :param cookies: (optional) Dict or CookieJar object to send with the - :class:`Request`. - :param files: (optional) Dictionary of ``'filename': file-like-objects`` - for multipart encoding upload. - :param auth: (optional) Auth tuple or callable to enable - Basic/Digest/Custom HTTP Auth. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) <timeouts>` tuple. - :type timeout: float or tuple - :param allow_redirects: (optional) Set to True by default. - :type allow_redirects: bool - :param proxies: (optional) Dictionary mapping protocol or protocol and - hostname to the URL of the proxy. - :param stream: (optional) whether to immediately download the response - content. Defaults to ``False``. - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use. Defaults to ``True``. - :param cert: (optional) if String, path to ssl client cert file (.pem). - If Tuple, ('cert', 'key') pair. - :rtype: requests.Response - """ - # Create the Request. - req = Request( - method=method.upper(), - url=url, - headers=headers, - files=files, - data=data or {}, - json=json, - params=params or {}, - auth=auth, - cookies=cookies, - hooks=hooks, - ) - prep = self.prepare_request(req) - - proxies = proxies or {} - - settings = self.merge_environment_settings( - prep.url, proxies, stream, verify, cert - ) - - # Send the request. - send_kwargs = { - 'timeout': timeout, - 'allow_redirects': allow_redirects, - } - send_kwargs.update(settings) - resp = self.send(prep, **send_kwargs) - - return resp - - def get(self, url, **kwargs): - r"""Sends a GET request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return self.request('GET', url, **kwargs) - - def options(self, url, **kwargs): - r"""Sends a OPTIONS request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return self.request('OPTIONS', url, **kwargs) - - def head(self, url, **kwargs): - r"""Sends a HEAD request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', False) - return self.request('HEAD', url, **kwargs) - - def post(self, url, data=None, json=None, **kwargs): - r"""Sends a POST request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('POST', url, data=data, json=json, **kwargs) - - def put(self, url, data=None, **kwargs): - r"""Sends a PUT request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('PUT', url, data=data, **kwargs) - - def patch(self, url, data=None, **kwargs): - r"""Sends a PATCH request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('PATCH', url, data=data, **kwargs) - - def delete(self, url, **kwargs): - r"""Sends a DELETE request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('DELETE', url, **kwargs) - - def send(self, request, **kwargs): - """Send a given PreparedRequest. - - :rtype: requests.Response - """ - # Set defaults that the hooks can utilize to ensure they always have - # the correct parameters to reproduce the previous request. - kwargs.setdefault('stream', self.stream) - kwargs.setdefault('verify', self.verify) - kwargs.setdefault('cert', self.cert) - kwargs.setdefault('proxies', self.proxies) - - # It's possible that users might accidentally send a Request object. - # Guard against that specific failure case. - if isinstance(request, Request): - raise ValueError('You can only send PreparedRequests.') - - # Set up variables needed for resolve_redirects and dispatching of hooks - allow_redirects = kwargs.pop('allow_redirects', True) - stream = kwargs.get('stream') - hooks = request.hooks - - # Get the appropriate adapter to use - adapter = self.get_adapter(url=request.url) - - # Start time (approximately) of the request - start = preferred_clock() - - # Send the request - r = adapter.send(request, **kwargs) - - # Total elapsed time of the request (approximately) - elapsed = preferred_clock() - start - r.elapsed = timedelta(seconds=elapsed) - - # Response manipulation hooks - r = dispatch_hook('response', hooks, r, **kwargs) - - # Persist cookies - if r.history: - - # If the hooks create history then we want those cookies too - for resp in r.history: - extract_cookies_to_jar(self.cookies, resp.request, resp.raw) - - extract_cookies_to_jar(self.cookies, request, r.raw) - - # Redirect resolving generator. - gen = self.resolve_redirects(r, request, **kwargs) - - # Resolve redirects if allowed. - history = [resp for resp in gen] if allow_redirects else [] - - # Shuffle things around if there's history. - if history: - # Insert the first (original) request at the start - history.insert(0, r) - # Get the last request made - r = history.pop() - r.history = history - - # If redirects aren't being followed, store the response on the Request for Response.next(). - if not allow_redirects: - try: - r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) - except StopIteration: - pass - - if not stream: - r.content - - return r - - def merge_environment_settings(self, url, proxies, stream, verify, cert): - """ - Check the environment and merge it with some settings. - - :rtype: dict - """ - # Gather clues from the surrounding environment. - if self.trust_env: - # Set environment's proxies. - no_proxy = proxies.get('no_proxy') if proxies is not None else None - env_proxies = get_environ_proxies(url, no_proxy=no_proxy) - for (k, v) in env_proxies.items(): - proxies.setdefault(k, v) - - # Look for requests environment configuration and be compatible - # with cURL. - if verify is True or verify is None: - verify = (os.environ.get('REQUESTS_CA_BUNDLE') or - os.environ.get('CURL_CA_BUNDLE')) - - # Merge all the kwargs. - proxies = merge_setting(proxies, self.proxies) - stream = merge_setting(stream, self.stream) - verify = merge_setting(verify, self.verify) - cert = merge_setting(cert, self.cert) - - return {'verify': verify, 'proxies': proxies, 'stream': stream, - 'cert': cert} - - def get_adapter(self, url): - """ - Returns the appropriate connection adapter for the given URL. - - :rtype: requests.adapters.BaseAdapter - """ - for (prefix, adapter) in self.adapters.items(): - - if url.lower().startswith(prefix.lower()): - return adapter - - # Nothing matches :-/ - raise InvalidSchema("No connection adapters were found for '%s'" % url) - - def close(self): - """Closes all adapters and as such the session""" - for v in self.adapters.values(): - v.close() - - def mount(self, prefix, adapter): - """Registers a connection adapter to a prefix. - - Adapters are sorted in descending order by prefix length. - """ - self.adapters[prefix] = adapter - keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] - - for key in keys_to_move: - self.adapters[key] = self.adapters.pop(key) - - def __getstate__(self): - state = {attr: getattr(self, attr, None) for attr in self.__attrs__} - return state - - def __setstate__(self, state): - for attr, value in state.items(): - setattr(self, attr, value) - - -def session(): - """ - Returns a :class:`Session` for context-management. - - .. deprecated:: 1.0.0 - - This method has been deprecated since version 1.0.0 and is only kept for - backwards compatibility. New code should use :class:`~requests.sessions.Session` - to create a session. This may be removed at a future date. - - :rtype: Session - """ - return Session() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/status_codes.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/status_codes.py deleted file mode 100644 index 813e8c4..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/status_codes.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- - -r""" -The ``codes`` object defines a mapping from common names for HTTP statuses -to their numerical codes, accessible either as attributes or as dictionary -items. - ->>> requests.codes['temporary_redirect'] -307 ->>> requests.codes.teapot -418 ->>> requests.codes['\o/'] -200 - -Some codes have multiple names, and both upper- and lower-case versions of -the names are allowed. For example, ``codes.ok``, ``codes.OK``, and -``codes.okay`` all correspond to the HTTP status code 200. -""" - -from .structures import LookupDict - -_codes = { - - # Informational. - 100: ('continue',), - 101: ('switching_protocols',), - 102: ('processing',), - 103: ('checkpoint',), - 122: ('uri_too_long', 'request_uri_too_long'), - 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), - 201: ('created',), - 202: ('accepted',), - 203: ('non_authoritative_info', 'non_authoritative_information'), - 204: ('no_content',), - 205: ('reset_content', 'reset'), - 206: ('partial_content', 'partial'), - 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), - 208: ('already_reported',), - 226: ('im_used',), - - # Redirection. - 300: ('multiple_choices',), - 301: ('moved_permanently', 'moved', '\\o-'), - 302: ('found',), - 303: ('see_other', 'other'), - 304: ('not_modified',), - 305: ('use_proxy',), - 306: ('switch_proxy',), - 307: ('temporary_redirect', 'temporary_moved', 'temporary'), - 308: ('permanent_redirect', - 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 - - # Client Error. - 400: ('bad_request', 'bad'), - 401: ('unauthorized',), - 402: ('payment_required', 'payment'), - 403: ('forbidden',), - 404: ('not_found', '-o-'), - 405: ('method_not_allowed', 'not_allowed'), - 406: ('not_acceptable',), - 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), - 408: ('request_timeout', 'timeout'), - 409: ('conflict',), - 410: ('gone',), - 411: ('length_required',), - 412: ('precondition_failed', 'precondition'), - 413: ('request_entity_too_large',), - 414: ('request_uri_too_large',), - 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), - 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), - 417: ('expectation_failed',), - 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), - 421: ('misdirected_request',), - 422: ('unprocessable_entity', 'unprocessable'), - 423: ('locked',), - 424: ('failed_dependency', 'dependency'), - 425: ('unordered_collection', 'unordered'), - 426: ('upgrade_required', 'upgrade'), - 428: ('precondition_required', 'precondition'), - 429: ('too_many_requests', 'too_many'), - 431: ('header_fields_too_large', 'fields_too_large'), - 444: ('no_response', 'none'), - 449: ('retry_with', 'retry'), - 450: ('blocked_by_windows_parental_controls', 'parental_controls'), - 451: ('unavailable_for_legal_reasons', 'legal_reasons'), - 499: ('client_closed_request',), - - # Server Error. - 500: ('internal_server_error', 'server_error', '/o\\', '✗'), - 501: ('not_implemented',), - 502: ('bad_gateway',), - 503: ('service_unavailable', 'unavailable'), - 504: ('gateway_timeout',), - 505: ('http_version_not_supported', 'http_version'), - 506: ('variant_also_negotiates',), - 507: ('insufficient_storage',), - 509: ('bandwidth_limit_exceeded', 'bandwidth'), - 510: ('not_extended',), - 511: ('network_authentication_required', 'network_auth', 'network_authentication'), -} - -codes = LookupDict(name='status_codes') - -def _init(): - for code, titles in _codes.items(): - for title in titles: - setattr(codes, title, code) - if not title.startswith(('\\', '/')): - setattr(codes, title.upper(), code) - - def doc(code): - names = ', '.join('``%s``' % n for n in _codes[code]) - return '* %d: %s' % (code, names) - - global __doc__ - __doc__ = (__doc__ + '\n' + - '\n'.join(doc(code) for code in sorted(_codes)) - if __doc__ is not None else None) - -_init() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/structures.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/structures.py deleted file mode 100644 index da930e2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/structures.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.structures -~~~~~~~~~~~~~~~~~~~ - -Data structures that power Requests. -""" - -from .compat import OrderedDict, Mapping, MutableMapping - - -class CaseInsensitiveDict(MutableMapping): - """A case-insensitive ``dict``-like object. - - Implements all methods and operations of - ``MutableMapping`` as well as dict's ``copy``. Also - provides ``lower_items``. - - All keys are expected to be strings. The structure remembers the - case of the last key to be set, and ``iter(instance)``, - ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` - will contain case-sensitive keys. However, querying and contains - testing is case insensitive:: - - cid = CaseInsensitiveDict() - cid['Accept'] = 'application/json' - cid['aCCEPT'] == 'application/json' # True - list(cid) == ['Accept'] # True - - For example, ``headers['content-encoding']`` will return the - value of a ``'Content-Encoding'`` response header, regardless - of how the header name was originally stored. - - If the constructor, ``.update``, or equality comparison - operations are given keys that have equal ``.lower()``s, the - behavior is undefined. - """ - - def __init__(self, data=None, **kwargs): - self._store = OrderedDict() - if data is None: - data = {} - self.update(data, **kwargs) - - def __setitem__(self, key, value): - # Use the lowercased key for lookups, but store the actual - # key alongside the value. - self._store[key.lower()] = (key, value) - - def __getitem__(self, key): - return self._store[key.lower()][1] - - def __delitem__(self, key): - del self._store[key.lower()] - - def __iter__(self): - return (casedkey for casedkey, mappedvalue in self._store.values()) - - def __len__(self): - return len(self._store) - - def lower_items(self): - """Like iteritems(), but with all lowercase keys.""" - return ( - (lowerkey, keyval[1]) - for (lowerkey, keyval) - in self._store.items() - ) - - def __eq__(self, other): - if isinstance(other, Mapping): - other = CaseInsensitiveDict(other) - else: - return NotImplemented - # Compare insensitively - return dict(self.lower_items()) == dict(other.lower_items()) - - # Copy is required - def copy(self): - return CaseInsensitiveDict(self._store.values()) - - def __repr__(self): - return str(dict(self.items())) - - -class LookupDict(dict): - """Dictionary lookup object.""" - - def __init__(self, name=None): - self.name = name - super(LookupDict, self).__init__() - - def __repr__(self): - return '<lookup \'%s\'>' % (self.name) - - def __getitem__(self, key): - # We allow fall-through here, so values default to None - - return self.__dict__.get(key, None) - - def get(self, key, default=None): - return self.__dict__.get(key, default) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/utils.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/utils.py deleted file mode 100644 index 8170a8d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/requests/utils.py +++ /dev/null @@ -1,977 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.utils -~~~~~~~~~~~~~~ - -This module provides utility functions that are used within Requests -that are also useful for external consumption. -""" - -import codecs -import contextlib -import io -import os -import re -import socket -import struct -import sys -import tempfile -import warnings -import zipfile - -from .__version__ import __version__ -from . import certs -# to_native_string is unused here, but imported here for backwards compatibility -from ._internal_utils import to_native_string -from .compat import parse_http_list as _parse_list_header -from .compat import ( - quote, urlparse, bytes, str, OrderedDict, unquote, getproxies, - proxy_bypass, urlunparse, basestring, integer_types, is_py3, - proxy_bypass_environment, getproxies_environment, Mapping) -from .cookies import cookiejar_from_dict -from .structures import CaseInsensitiveDict -from .exceptions import ( - InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) - -NETRC_FILES = ('.netrc', '_netrc') - -DEFAULT_CA_BUNDLE_PATH = certs.where() - -DEFAULT_PORTS = {'http': 80, 'https': 443} - - -if sys.platform == 'win32': - # provide a proxy_bypass version on Windows without DNS lookups - - def proxy_bypass_registry(host): - try: - if is_py3: - import winreg - else: - import _winreg as winreg - except ImportError: - return False - - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it - proxyEnable = int(winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0]) - # ProxyOverride is almost always a string - proxyOverride = winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0] - except OSError: - return False - if not proxyEnable or not proxyOverride: - return False - - # make a check value list from the registry entry: replace the - # '<local>' string by the localhost entry and the corresponding - # canonical entry. - proxyOverride = proxyOverride.split(';') - # now check if we match one of the registry values. - for test in proxyOverride: - if test == '<local>': - if '.' not in host: - return True - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char - if re.match(test, host, re.I): - return True - return False - - def proxy_bypass(host): # noqa - """Return True, if the host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or the registry. - """ - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_registry(host) - - -def dict_to_sequence(d): - """Returns an internal sequence dictionary update.""" - - if hasattr(d, 'items'): - d = d.items() - - return d - - -def super_len(o): - total_length = None - current_position = 0 - - if hasattr(o, '__len__'): - total_length = len(o) - - elif hasattr(o, 'len'): - total_length = o.len - - elif hasattr(o, 'fileno'): - try: - fileno = o.fileno() - except io.UnsupportedOperation: - pass - else: - total_length = os.fstat(fileno).st_size - - # Having used fstat to determine the file length, we need to - # confirm that this file was opened up in binary mode. - if 'b' not in o.mode: - warnings.warn(( - "Requests has determined the content-length for this " - "request using the binary size of the file: however, the " - "file has been opened in text mode (i.e. without the 'b' " - "flag in the mode). This may lead to an incorrect " - "content-length. In Requests 3.0, support will be removed " - "for files in text mode."), - FileModeWarning - ) - - if hasattr(o, 'tell'): - try: - current_position = o.tell() - except (OSError, IOError): - # This can happen in some weird situations, such as when the file - # is actually a special file descriptor like stdin. In this - # instance, we don't know what the length is, so set it to zero and - # let requests chunk it instead. - if total_length is not None: - current_position = total_length - else: - if hasattr(o, 'seek') and total_length is None: - # StringIO and BytesIO have seek but no useable fileno - try: - # seek to end of file - o.seek(0, 2) - total_length = o.tell() - - # seek back to current position to support - # partially read file-like objects - o.seek(current_position or 0) - except (OSError, IOError): - total_length = 0 - - if total_length is None: - total_length = 0 - - return max(0, total_length - current_position) - - -def get_netrc_auth(url, raise_errors=False): - """Returns the Requests tuple auth for a given url from netrc.""" - - try: - from netrc import netrc, NetrcParseError - - netrc_path = None - - for f in NETRC_FILES: - try: - loc = os.path.expanduser('~/{}'.format(f)) - except KeyError: - # os.path.expanduser can fail when $HOME is undefined and - # getpwuid fails. See https://bugs.python.org/issue20164 & - # https://github.com/requests/requests/issues/1846 - return - - if os.path.exists(loc): - netrc_path = loc - break - - # Abort early if there isn't one. - if netrc_path is None: - return - - ri = urlparse(url) - - # Strip port numbers from netloc. This weird `if...encode`` dance is - # used for Python 3.2, which doesn't support unicode literals. - splitstr = b':' - if isinstance(url, str): - splitstr = splitstr.decode('ascii') - host = ri.netloc.split(splitstr)[0] - - try: - _netrc = netrc(netrc_path).authenticators(host) - if _netrc: - # Return with login / password - login_i = (0 if _netrc[0] else 1) - return (_netrc[login_i], _netrc[2]) - except (NetrcParseError, IOError): - # If there was a parsing error or a permissions issue reading the file, - # we'll just skip netrc auth unless explicitly asked to raise errors. - if raise_errors: - raise - - # AppEngine hackiness. - except (ImportError, AttributeError): - pass - - -def guess_filename(obj): - """Tries to guess the filename of the given object.""" - name = getattr(obj, 'name', None) - if (name and isinstance(name, basestring) and name[0] != '<' and - name[-1] != '>'): - return os.path.basename(name) - - -def extract_zipped_paths(path): - """Replace nonexistent paths that look like they refer to a member of a zip - archive with the location of an extracted copy of the target, or else - just return the provided path unchanged. - """ - if os.path.exists(path): - # this is already a valid path, no need to do anything further - return path - - # find the first valid part of the provided path and treat that as a zip archive - # assume the rest of the path is the name of a member in the archive - archive, member = os.path.split(path) - while archive and not os.path.exists(archive): - archive, prefix = os.path.split(archive) - member = '/'.join([prefix, member]) - - if not zipfile.is_zipfile(archive): - return path - - zip_file = zipfile.ZipFile(archive) - if member not in zip_file.namelist(): - return path - - # we have a valid zip archive and a valid member of that archive - tmp = tempfile.gettempdir() - extracted_path = os.path.join(tmp, *member.split('/')) - if not os.path.exists(extracted_path): - extracted_path = zip_file.extract(member, path=tmp) - - return extracted_path - - -def from_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. Unless it can not be represented as such, return an - OrderedDict, e.g., - - :: - - >>> from_key_val_list([('key', 'val')]) - OrderedDict([('key', 'val')]) - >>> from_key_val_list('string') - ValueError: cannot encode objects that are not 2-tuples - >>> from_key_val_list({'key': 'val'}) - OrderedDict([('key', 'val')]) - - :rtype: OrderedDict - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') - - return OrderedDict(value) - - -def to_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. If it can be, return a list of tuples, e.g., - - :: - - >>> to_key_val_list([('key', 'val')]) - [('key', 'val')] - >>> to_key_val_list({'key': 'val'}) - [('key', 'val')] - >>> to_key_val_list('string') - ValueError: cannot encode objects that are not 2-tuples. - - :rtype: list - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') - - if isinstance(value, Mapping): - value = value.items() - - return list(value) - - -# From mitsuhiko/werkzeug (used with permission). -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - It basically works like :func:`parse_set_header` just that items - may appear multiple times and case sensitivity is preserved. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - To create a header from the :class:`list` again, use the - :func:`dump_header` function. - - :param value: a string with a list header. - :return: :class:`list` - :rtype: list - """ - result = [] - for item in _parse_list_header(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -# From mitsuhiko/werkzeug (used with permission). -def parse_dict_header(value): - """Parse lists of key, value pairs as described by RFC 2068 Section 2 and - convert them into a python dict: - - >>> d = parse_dict_header('foo="is a fish", bar="as well"') - >>> type(d) is dict - True - >>> sorted(d.items()) - [('bar', 'as well'), ('foo', 'is a fish')] - - If there is no value for a key it will be `None`: - - >>> parse_dict_header('key_without_value') - {'key_without_value': None} - - To create a header from the :class:`dict` again, use the - :func:`dump_header` function. - - :param value: a string with a dict header. - :return: :class:`dict` - :rtype: dict - """ - result = {} - for item in _parse_list_header(value): - if '=' not in item: - result[item] = None - continue - name, value = item.split('=', 1) - if value[:1] == value[-1:] == '"': - value = unquote_header_value(value[1:-1]) - result[name] = value - return result - - -# From mitsuhiko/werkzeug (used with permission). -def unquote_header_value(value, is_filename=False): - r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). - This does not use the real unquoting but what browsers are actually - using for quoting. - - :param value: the header value to unquote. - :rtype: str - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - - # if this is a filename and the starting characters look like - # a UNC path, then just return the value without quotes. Using the - # replace sequence below on a UNC path has the effect of turning - # the leading double slash into a single slash and then - # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != '\\\\': - return value.replace('\\\\', '\\').replace('\\"', '"') - return value - - -def dict_from_cookiejar(cj): - """Returns a key/value dictionary from a CookieJar. - - :param cj: CookieJar object to extract cookies from. - :rtype: dict - """ - - cookie_dict = {} - - for cookie in cj: - cookie_dict[cookie.name] = cookie.value - - return cookie_dict - - -def add_dict_to_cookiejar(cj, cookie_dict): - """Returns a CookieJar from a key/value dictionary. - - :param cj: CookieJar to insert cookies into. - :param cookie_dict: Dict of key/values to insert into CookieJar. - :rtype: CookieJar - """ - - return cookiejar_from_dict(cookie_dict, cj) - - -def get_encodings_from_content(content): - """Returns encodings from given content string. - - :param content: bytestring to extract encodings from. - """ - warnings.warn(( - 'In requests 3.0, get_encodings_from_content will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) - - charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) - pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) - xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - - return (charset_re.findall(content) + - pragma_re.findall(content) + - xml_re.findall(content)) - - -def _parse_content_type_header(header): - """Returns content type and parameters from given header - - :param header: string - :return: tuple containing content type and dictionary of - parameters - """ - - tokens = header.split(';') - content_type, params = tokens[0].strip(), tokens[1:] - params_dict = {} - items_to_strip = "\"' " - - for param in params: - param = param.strip() - if param: - key, value = param, True - index_of_equals = param.find("=") - if index_of_equals != -1: - key = param[:index_of_equals].strip(items_to_strip) - value = param[index_of_equals + 1:].strip(items_to_strip) - params_dict[key.lower()] = value - return content_type, params_dict - - -def get_encoding_from_headers(headers): - """Returns encodings from given HTTP Header Dict. - - :param headers: dictionary to extract encoding from. - :rtype: str - """ - - content_type = headers.get('content-type') - - if not content_type: - return None - - content_type, params = _parse_content_type_header(content_type) - - if 'charset' in params: - return params['charset'].strip("'\"") - - if 'text' in content_type: - return 'ISO-8859-1' - - -def stream_decode_response_unicode(iterator, r): - """Stream decodes a iterator.""" - - if r.encoding is None: - for item in iterator: - yield item - return - - decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') - for chunk in iterator: - rv = decoder.decode(chunk) - if rv: - yield rv - rv = decoder.decode(b'', final=True) - if rv: - yield rv - - -def iter_slices(string, slice_length): - """Iterate over slices of a string.""" - pos = 0 - if slice_length is None or slice_length <= 0: - slice_length = len(string) - while pos < len(string): - yield string[pos:pos + slice_length] - pos += slice_length - - -def get_unicode_from_response(r): - """Returns the requested content back in unicode. - - :param r: Response object to get unicode content from. - - Tried: - - 1. charset from content-type - 2. fall back and replace all unicode characters - - :rtype: str - """ - warnings.warn(( - 'In requests 3.0, get_unicode_from_response will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) - - tried_encodings = [] - - # Try charset from content-type - encoding = get_encoding_from_headers(r.headers) - - if encoding: - try: - return str(r.content, encoding) - except UnicodeError: - tried_encodings.append(encoding) - - # Fall back: - try: - return str(r.content, encoding, errors='replace') - except TypeError: - return r.content - - -# The unreserved URI characters (RFC 3986) -UNRESERVED_SET = frozenset( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") - - -def unquote_unreserved(uri): - """Un-escape any percent-escape sequences in a URI that are unreserved - characters. This leaves all reserved, illegal and non-ASCII bytes encoded. - - :rtype: str - """ - parts = uri.split('%') - for i in range(1, len(parts)): - h = parts[i][0:2] - if len(h) == 2 and h.isalnum(): - try: - c = chr(int(h, 16)) - except ValueError: - raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) - - if c in UNRESERVED_SET: - parts[i] = c + parts[i][2:] - else: - parts[i] = '%' + parts[i] - else: - parts[i] = '%' + parts[i] - return ''.join(parts) - - -def requote_uri(uri): - """Re-quote the given URI. - - This function passes the given URI through an unquote/quote cycle to - ensure that it is fully and consistently quoted. - - :rtype: str - """ - safe_with_percent = "!#$%&'()*+,/:;=?@[]~" - safe_without_percent = "!#$&'()*+,/:;=?@[]~" - try: - # Unquote only the unreserved characters - # Then quote only illegal characters (do not quote reserved, - # unreserved, or '%') - return quote(unquote_unreserved(uri), safe=safe_with_percent) - except InvalidURL: - # We couldn't unquote the given URI, so let's try quoting it, but - # there may be unquoted '%'s in the URI. We need to make sure they're - # properly quoted so they do not cause issues elsewhere. - return quote(uri, safe=safe_without_percent) - - -def address_in_network(ip, net): - """This function allows you to check if an IP belongs to a network subnet - - Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 - returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 - - :rtype: bool - """ - ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] - netaddr, bits = net.split('/') - netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] - network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask - return (ipaddr & netmask) == (network & netmask) - - -def dotted_netmask(mask): - """Converts mask from /xx format to xxx.xxx.xxx.xxx - - Example: if mask is 24 function returns 255.255.255.0 - - :rtype: str - """ - bits = 0xffffffff ^ (1 << 32 - mask) - 1 - return socket.inet_ntoa(struct.pack('>I', bits)) - - -def is_ipv4_address(string_ip): - """ - :rtype: bool - """ - try: - socket.inet_aton(string_ip) - except socket.error: - return False - return True - - -def is_valid_cidr(string_network): - """ - Very simple check of the cidr format in no_proxy variable. - - :rtype: bool - """ - if string_network.count('/') == 1: - try: - mask = int(string_network.split('/')[1]) - except ValueError: - return False - - if mask < 1 or mask > 32: - return False - - try: - socket.inet_aton(string_network.split('/')[0]) - except socket.error: - return False - else: - return False - return True - - -@contextlib.contextmanager -def set_environ(env_name, value): - """Set the environment variable 'env_name' to 'value' - - Save previous value, yield, and then restore the previous value stored in - the environment variable 'env_name'. - - If 'value' is None, do nothing""" - value_changed = value is not None - if value_changed: - old_value = os.environ.get(env_name) - os.environ[env_name] = value - try: - yield - finally: - if value_changed: - if old_value is None: - del os.environ[env_name] - else: - os.environ[env_name] = old_value - - -def should_bypass_proxies(url, no_proxy): - """ - Returns whether we should bypass proxies or not. - - :rtype: bool - """ - # Prioritize lowercase environment variables over uppercase - # to keep a consistent behaviour with other http projects (curl, wget). - get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) - - # First check whether no_proxy is defined. If it is, check that the URL - # we're getting isn't in the no_proxy list. - no_proxy_arg = no_proxy - if no_proxy is None: - no_proxy = get_proxy('no_proxy') - parsed = urlparse(url) - - if parsed.hostname is None: - # URLs don't always have hostnames, e.g. file:/// urls. - return True - - if no_proxy: - # We need to check whether we match here. We need to see if we match - # the end of the hostname, both with and without the port. - no_proxy = ( - host for host in no_proxy.replace(' ', '').split(',') if host - ) - - if is_ipv4_address(parsed.hostname): - for proxy_ip in no_proxy: - if is_valid_cidr(proxy_ip): - if address_in_network(parsed.hostname, proxy_ip): - return True - elif parsed.hostname == proxy_ip: - # If no_proxy ip was defined in plain IP notation instead of cidr notation & - # matches the IP of the index - return True - else: - host_with_port = parsed.hostname - if parsed.port: - host_with_port += ':{}'.format(parsed.port) - - for host in no_proxy: - if parsed.hostname.endswith(host) or host_with_port.endswith(host): - # The URL does match something in no_proxy, so we don't want - # to apply the proxies on this URL. - return True - - with set_environ('no_proxy', no_proxy_arg): - # parsed.hostname can be `None` in cases such as a file URI. - try: - bypass = proxy_bypass(parsed.hostname) - except (TypeError, socket.gaierror): - bypass = False - - if bypass: - return True - - return False - - -def get_environ_proxies(url, no_proxy=None): - """ - Return a dict of environment proxies. - - :rtype: dict - """ - if should_bypass_proxies(url, no_proxy=no_proxy): - return {} - else: - return getproxies() - - -def select_proxy(url, proxies): - """Select a proxy for the url, if applicable. - - :param url: The url being for the request - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs - """ - proxies = proxies or {} - urlparts = urlparse(url) - if urlparts.hostname is None: - return proxies.get(urlparts.scheme, proxies.get('all')) - - proxy_keys = [ - urlparts.scheme + '://' + urlparts.hostname, - urlparts.scheme, - 'all://' + urlparts.hostname, - 'all', - ] - proxy = None - for proxy_key in proxy_keys: - if proxy_key in proxies: - proxy = proxies[proxy_key] - break - - return proxy - - -def default_user_agent(name="python-requests"): - """ - Return a string representing the default user agent. - - :rtype: str - """ - return '%s/%s' % (name, __version__) - - -def default_headers(): - """ - :rtype: requests.structures.CaseInsensitiveDict - """ - return CaseInsensitiveDict({ - 'User-Agent': default_user_agent(), - 'Accept-Encoding': ', '.join(('gzip', 'deflate')), - 'Accept': '*/*', - 'Connection': 'keep-alive', - }) - - -def parse_header_links(value): - """Return a list of parsed link headers proxies. - - i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" - - :rtype: list - """ - - links = [] - - replace_chars = ' \'"' - - value = value.strip(replace_chars) - if not value: - return links - - for val in re.split(', *<', value): - try: - url, params = val.split(';', 1) - except ValueError: - url, params = val, '' - - link = {'url': url.strip('<> \'"')} - - for param in params.split(';'): - try: - key, value = param.split('=') - except ValueError: - break - - link[key.strip(replace_chars)] = value.strip(replace_chars) - - links.append(link) - - return links - - -# Null bytes; no need to recreate these on each call to guess_json_utf -_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 -_null2 = _null * 2 -_null3 = _null * 3 - - -def guess_json_utf(data): - """ - :rtype: str - """ - # JSON always starts with two ASCII characters, so detection is as - # easy as counting the nulls and from their location and count - # determine the encoding. Also detect a BOM, if present. - sample = data[:4] - if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return 'utf-32' # BOM included - if sample[:3] == codecs.BOM_UTF8: - return 'utf-8-sig' # BOM included, MS style (discouraged) - if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return 'utf-16' # BOM included - nullcount = sample.count(_null) - if nullcount == 0: - return 'utf-8' - if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return 'utf-16-be' - if sample[1::2] == _null2: # 2nd and 4th are null - return 'utf-16-le' - # Did not detect 2 valid UTF-16 ascii-range characters - if nullcount == 3: - if sample[:3] == _null3: - return 'utf-32-be' - if sample[1:] == _null3: - return 'utf-32-le' - # Did not detect a valid UTF-32 ascii-range character - return None - - -def prepend_scheme_if_needed(url, new_scheme): - """Given a URL that may or may not have a scheme, prepend the given scheme. - Does not replace a present scheme with the one provided as an argument. - - :rtype: str - """ - scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) - - # urlparse is a finicky beast, and sometimes decides that there isn't a - # netloc present. Assume that it's being over-cautious, and switch netloc - # and path if urlparse decided there was no netloc. - if not netloc: - netloc, path = path, netloc - - return urlunparse((scheme, netloc, path, params, query, fragment)) - - -def get_auth_from_url(url): - """Given a url with authentication components, extract them into a tuple of - username,password. - - :rtype: (str,str) - """ - parsed = urlparse(url) - - try: - auth = (unquote(parsed.username), unquote(parsed.password)) - except (AttributeError, TypeError): - auth = ('', '') - - return auth - - -# Moved outside of function to avoid recompile every call -_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') -_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') - - -def check_header_validity(header): - """Verifies that header value is a string which doesn't contain - leading whitespace or return characters. This prevents unintended - header injection. - - :param header: tuple, in the format (name, value). - """ - name, value = header - - if isinstance(value, bytes): - pat = _CLEAN_HEADER_REGEX_BYTE - else: - pat = _CLEAN_HEADER_REGEX_STR - try: - if not pat.match(value): - raise InvalidHeader("Invalid return character or leading space in header: %s" % name) - except TypeError: - raise InvalidHeader("Value for header {%s: %s} must be of type str or " - "bytes, not %s" % (name, value, type(value))) - - -def urldefragauth(url): - """ - Given a url remove the fragment and the authentication part. - - :rtype: str - """ - scheme, netloc, path, params, query, fragment = urlparse(url) - - # see func:`prepend_scheme_if_needed` - if not netloc: - netloc, path = path, netloc - - netloc = netloc.rsplit('@', 1)[-1] - - return urlunparse((scheme, netloc, path, params, query, '')) - - -def rewind_body(prepared_request): - """Move file pointer back to its recorded starting position - so it can be read again on redirect. - """ - body_seek = getattr(prepared_request.body, 'seek', None) - if body_seek is not None and isinstance(prepared_request._body_position, integer_types): - try: - body_seek(prepared_request._body_position) - except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect.") - else: - raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/retrying.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/retrying.py deleted file mode 100644 index 6d1e627..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/retrying.py +++ /dev/null @@ -1,267 +0,0 @@ -## Copyright 2013-2014 Ray Holder -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. - -import random -from pip._vendor import six -import sys -import time -import traceback - - -# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... -MAX_WAIT = 1073741823 - - -def retry(*dargs, **dkw): - """ - Decorator function that instantiates the Retrying object - @param *dargs: positional arguments passed to Retrying object - @param **dkw: keyword arguments passed to the Retrying object - """ - # support both @retry and @retry() as valid syntax - if len(dargs) == 1 and callable(dargs[0]): - def wrap_simple(f): - - @six.wraps(f) - def wrapped_f(*args, **kw): - return Retrying().call(f, *args, **kw) - - return wrapped_f - - return wrap_simple(dargs[0]) - - else: - def wrap(f): - - @six.wraps(f) - def wrapped_f(*args, **kw): - return Retrying(*dargs, **dkw).call(f, *args, **kw) - - return wrapped_f - - return wrap - - -class Retrying(object): - - def __init__(self, - stop=None, wait=None, - stop_max_attempt_number=None, - stop_max_delay=None, - wait_fixed=None, - wait_random_min=None, wait_random_max=None, - wait_incrementing_start=None, wait_incrementing_increment=None, - wait_exponential_multiplier=None, wait_exponential_max=None, - retry_on_exception=None, - retry_on_result=None, - wrap_exception=False, - stop_func=None, - wait_func=None, - wait_jitter_max=None): - - self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number - self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay - self._wait_fixed = 1000 if wait_fixed is None else wait_fixed - self._wait_random_min = 0 if wait_random_min is None else wait_random_min - self._wait_random_max = 1000 if wait_random_max is None else wait_random_max - self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start - self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment - self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier - self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max - self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max - - # TODO add chaining of stop behaviors - # stop behavior - stop_funcs = [] - if stop_max_attempt_number is not None: - stop_funcs.append(self.stop_after_attempt) - - if stop_max_delay is not None: - stop_funcs.append(self.stop_after_delay) - - if stop_func is not None: - self.stop = stop_func - - elif stop is None: - self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs) - - else: - self.stop = getattr(self, stop) - - # TODO add chaining of wait behaviors - # wait behavior - wait_funcs = [lambda *args, **kwargs: 0] - if wait_fixed is not None: - wait_funcs.append(self.fixed_sleep) - - if wait_random_min is not None or wait_random_max is not None: - wait_funcs.append(self.random_sleep) - - if wait_incrementing_start is not None or wait_incrementing_increment is not None: - wait_funcs.append(self.incrementing_sleep) - - if wait_exponential_multiplier is not None or wait_exponential_max is not None: - wait_funcs.append(self.exponential_sleep) - - if wait_func is not None: - self.wait = wait_func - - elif wait is None: - self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs) - - else: - self.wait = getattr(self, wait) - - # retry on exception filter - if retry_on_exception is None: - self._retry_on_exception = self.always_reject - else: - self._retry_on_exception = retry_on_exception - - # TODO simplify retrying by Exception types - # retry on result filter - if retry_on_result is None: - self._retry_on_result = self.never_reject - else: - self._retry_on_result = retry_on_result - - self._wrap_exception = wrap_exception - - def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms): - """Stop after the previous attempt >= stop_max_attempt_number.""" - return previous_attempt_number >= self._stop_max_attempt_number - - def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms): - """Stop after the time from the first attempt >= stop_max_delay.""" - return delay_since_first_attempt_ms >= self._stop_max_delay - - def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """Don't sleep at all before retrying.""" - return 0 - - def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """Sleep a fixed amount of time between each retry.""" - return self._wait_fixed - - def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """Sleep a random amount of time between wait_random_min and wait_random_max""" - return random.randint(self._wait_random_min, self._wait_random_max) - - def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - """ - Sleep an incremental amount of time after each attempt, starting at - wait_incrementing_start and incrementing by wait_incrementing_increment - """ - result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1)) - if result < 0: - result = 0 - return result - - def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): - exp = 2 ** previous_attempt_number - result = self._wait_exponential_multiplier * exp - if result > self._wait_exponential_max: - result = self._wait_exponential_max - if result < 0: - result = 0 - return result - - def never_reject(self, result): - return False - - def always_reject(self, result): - return True - - def should_reject(self, attempt): - reject = False - if attempt.has_exception: - reject |= self._retry_on_exception(attempt.value[1]) - else: - reject |= self._retry_on_result(attempt.value) - - return reject - - def call(self, fn, *args, **kwargs): - start_time = int(round(time.time() * 1000)) - attempt_number = 1 - while True: - try: - attempt = Attempt(fn(*args, **kwargs), attempt_number, False) - except: - tb = sys.exc_info() - attempt = Attempt(tb, attempt_number, True) - - if not self.should_reject(attempt): - return attempt.get(self._wrap_exception) - - delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time - if self.stop(attempt_number, delay_since_first_attempt_ms): - if not self._wrap_exception and attempt.has_exception: - # get() on an attempt with an exception should cause it to be raised, but raise just in case - raise attempt.get() - else: - raise RetryError(attempt) - else: - sleep = self.wait(attempt_number, delay_since_first_attempt_ms) - if self._wait_jitter_max: - jitter = random.random() * self._wait_jitter_max - sleep = sleep + max(0, jitter) - time.sleep(sleep / 1000.0) - - attempt_number += 1 - - -class Attempt(object): - """ - An Attempt encapsulates a call to a target function that may end as a - normal return value from the function or an Exception depending on what - occurred during the execution. - """ - - def __init__(self, value, attempt_number, has_exception): - self.value = value - self.attempt_number = attempt_number - self.has_exception = has_exception - - def get(self, wrap_exception=False): - """ - Return the return value of this Attempt instance or raise an Exception. - If wrap_exception is true, this Attempt is wrapped inside of a - RetryError before being raised. - """ - if self.has_exception: - if wrap_exception: - raise RetryError(self) - else: - six.reraise(self.value[0], self.value[1], self.value[2]) - else: - return self.value - - def __repr__(self): - if self.has_exception: - return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2]))) - else: - return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value) - - -class RetryError(Exception): - """ - A RetryError encapsulates the last Attempt instance right before giving up. - """ - - def __init__(self, last_attempt): - self.last_attempt = last_attempt - - def __str__(self): - return "RetryError[{0}]".format(self.last_attempt) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/six.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/six.py deleted file mode 100644 index 89b2188..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/six.py +++ /dev/null @@ -1,952 +0,0 @@ -# Copyright (c) 2010-2018 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Utilities for writing code that runs on Python 2 and 3""" - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.12.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("getoutput", "commands", "subprocess"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("splitvalue", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), - MovedAttribute("parse_http_list", "urllib2", "urllib.request"), - MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - try: - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - finally: - value = None - tb = None - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - try: - raise tp, value, tb - finally: - tb = None -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - try: - if from_value is None: - raise value - raise value from from_value - finally: - value = None -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - try: - raise value from from_value - finally: - value = None -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - if hasattr(cls, '__qualname__'): - orig_vars['__qualname__'] = cls.__qualname__ - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def ensure_binary(s, encoding='utf-8', errors='strict'): - """Coerce **s** to six.binary_type. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> encoded to `bytes` - - `bytes` -> `bytes` - """ - if isinstance(s, text_type): - return s.encode(encoding, errors) - elif isinstance(s, binary_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) - - -def ensure_str(s, encoding='utf-8', errors='strict'): - """Coerce *s* to `str`. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - if not isinstance(s, (text_type, binary_type)): - raise TypeError("not expecting type '%s'" % type(s)) - if PY2 and isinstance(s, text_type): - s = s.encode(encoding, errors) - elif PY3 and isinstance(s, binary_type): - s = s.decode(encoding, errors) - return s - - -def ensure_text(s, encoding='utf-8', errors='strict'): - """Coerce *s* to six.text_type. - - For Python 2: - - `unicode` -> `unicode` - - `str` -> `unicode` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - if isinstance(s, binary_type): - return s.decode(encoding, errors) - elif isinstance(s, text_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) - - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/__init__.py deleted file mode 100644 index 148a9c3..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/__init__.py +++ /dev/null @@ -1,92 +0,0 @@ -""" -urllib3 - Thread-safe connection pooling and re-using. -""" - -from __future__ import absolute_import -import warnings - -from .connectionpool import ( - HTTPConnectionPool, - HTTPSConnectionPool, - connection_from_url -) - -from . import exceptions -from .filepost import encode_multipart_formdata -from .poolmanager import PoolManager, ProxyManager, proxy_from_url -from .response import HTTPResponse -from .util.request import make_headers -from .util.url import get_host -from .util.timeout import Timeout -from .util.retry import Retry - - -# Set default logging handler to avoid "No handler found" warnings. -import logging -from logging import NullHandler - -__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' -__license__ = 'MIT' -__version__ = '1.24.1' - -__all__ = ( - 'HTTPConnectionPool', - 'HTTPSConnectionPool', - 'PoolManager', - 'ProxyManager', - 'HTTPResponse', - 'Retry', - 'Timeout', - 'add_stderr_logger', - 'connection_from_url', - 'disable_warnings', - 'encode_multipart_formdata', - 'get_host', - 'make_headers', - 'proxy_from_url', -) - -logging.getLogger(__name__).addHandler(NullHandler()) - - -def add_stderr_logger(level=logging.DEBUG): - """ - Helper for quickly adding a StreamHandler to the logger. Useful for - debugging. - - Returns the handler after adding it. - """ - # This method needs to be in this __init__.py to get the __name__ correct - # even if urllib3 is vendored within another package. - logger = logging.getLogger(__name__) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) - logger.addHandler(handler) - logger.setLevel(level) - logger.debug('Added a stderr logging handler to logger: %s', __name__) - return handler - - -# ... Clean up. -del NullHandler - - -# All warning filters *must* be appended unless you're really certain that they -# shouldn't be: otherwise, it's very hard for users to use most Python -# mechanisms to silence them. -# SecurityWarning's always go off by default. -warnings.simplefilter('always', exceptions.SecurityWarning, append=True) -# SubjectAltNameWarning's should go off once per host -warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) -# InsecurePlatformWarning's don't vary between requests, so we keep it default. -warnings.simplefilter('default', exceptions.InsecurePlatformWarning, - append=True) -# SNIMissingWarnings should go off only once. -warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) - - -def disable_warnings(category=exceptions.HTTPWarning): - """ - Helper for quickly disabling all urllib3 warnings. - """ - warnings.simplefilter('ignore', category) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/_collections.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/_collections.py deleted file mode 100644 index 34f2381..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/_collections.py +++ /dev/null @@ -1,329 +0,0 @@ -from __future__ import absolute_import -try: - from collections.abc import Mapping, MutableMapping -except ImportError: - from collections import Mapping, MutableMapping -try: - from threading import RLock -except ImportError: # Platform-specific: No threads available - class RLock: - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_value, traceback): - pass - - -from collections import OrderedDict -from .exceptions import InvalidHeader -from .packages.six import iterkeys, itervalues, PY3 - - -__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] - - -_Null = object() - - -class RecentlyUsedContainer(MutableMapping): - """ - Provides a thread-safe dict-like container which maintains up to - ``maxsize`` keys while throwing away the least-recently-used keys beyond - ``maxsize``. - - :param maxsize: - Maximum number of recent elements to retain. - - :param dispose_func: - Every time an item is evicted from the container, - ``dispose_func(value)`` is called. Callback which will get called - """ - - ContainerCls = OrderedDict - - def __init__(self, maxsize=10, dispose_func=None): - self._maxsize = maxsize - self.dispose_func = dispose_func - - self._container = self.ContainerCls() - self.lock = RLock() - - def __getitem__(self, key): - # Re-insert the item, moving it to the end of the eviction line. - with self.lock: - item = self._container.pop(key) - self._container[key] = item - return item - - def __setitem__(self, key, value): - evicted_value = _Null - with self.lock: - # Possibly evict the existing value of 'key' - evicted_value = self._container.get(key, _Null) - self._container[key] = value - - # If we didn't evict an existing value, we might have to evict the - # least recently used item from the beginning of the container. - if len(self._container) > self._maxsize: - _key, evicted_value = self._container.popitem(last=False) - - if self.dispose_func and evicted_value is not _Null: - self.dispose_func(evicted_value) - - def __delitem__(self, key): - with self.lock: - value = self._container.pop(key) - - if self.dispose_func: - self.dispose_func(value) - - def __len__(self): - with self.lock: - return len(self._container) - - def __iter__(self): - raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') - - def clear(self): - with self.lock: - # Copy pointers to all values, then wipe the mapping - values = list(itervalues(self._container)) - self._container.clear() - - if self.dispose_func: - for value in values: - self.dispose_func(value) - - def keys(self): - with self.lock: - return list(iterkeys(self._container)) - - -class HTTPHeaderDict(MutableMapping): - """ - :param headers: - An iterable of field-value pairs. Must not contain multiple field names - when compared case-insensitively. - - :param kwargs: - Additional field-value pairs to pass in to ``dict.update``. - - A ``dict`` like container for storing HTTP Headers. - - Field names are stored and compared case-insensitively in compliance with - RFC 7230. Iteration provides the first case-sensitive key seen for each - case-insensitive pair. - - Using ``__setitem__`` syntax overwrites fields that compare equal - case-insensitively in order to maintain ``dict``'s api. For fields that - compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` - in a loop. - - If multiple fields that are equal case-insensitively are passed to the - constructor or ``.update``, the behavior is undefined and some will be - lost. - - >>> headers = HTTPHeaderDict() - >>> headers.add('Set-Cookie', 'foo=bar') - >>> headers.add('set-cookie', 'baz=quxx') - >>> headers['content-length'] = '7' - >>> headers['SET-cookie'] - 'foo=bar, baz=quxx' - >>> headers['Content-Length'] - '7' - """ - - def __init__(self, headers=None, **kwargs): - super(HTTPHeaderDict, self).__init__() - self._container = OrderedDict() - if headers is not None: - if isinstance(headers, HTTPHeaderDict): - self._copy_from(headers) - else: - self.extend(headers) - if kwargs: - self.extend(kwargs) - - def __setitem__(self, key, val): - self._container[key.lower()] = [key, val] - return self._container[key.lower()] - - def __getitem__(self, key): - val = self._container[key.lower()] - return ', '.join(val[1:]) - - def __delitem__(self, key): - del self._container[key.lower()] - - def __contains__(self, key): - return key.lower() in self._container - - def __eq__(self, other): - if not isinstance(other, Mapping) and not hasattr(other, 'keys'): - return False - if not isinstance(other, type(self)): - other = type(self)(other) - return (dict((k.lower(), v) for k, v in self.itermerged()) == - dict((k.lower(), v) for k, v in other.itermerged())) - - def __ne__(self, other): - return not self.__eq__(other) - - if not PY3: # Python 2 - iterkeys = MutableMapping.iterkeys - itervalues = MutableMapping.itervalues - - __marker = object() - - def __len__(self): - return len(self._container) - - def __iter__(self): - # Only provide the originally cased names - for vals in self._container.values(): - yield vals[0] - - def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - ''' - # Using the MutableMapping function directly fails due to the private marker. - # Using ordinary dict.pop would expose the internal structures. - # So let's reinvent the wheel. - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def discard(self, key): - try: - del self[key] - except KeyError: - pass - - def add(self, key, val): - """Adds a (name, value) pair, doesn't overwrite the value if it already - exists. - - >>> headers = HTTPHeaderDict(foo='bar') - >>> headers.add('Foo', 'baz') - >>> headers['foo'] - 'bar, baz' - """ - key_lower = key.lower() - new_vals = [key, val] - # Keep the common case aka no item present as fast as possible - vals = self._container.setdefault(key_lower, new_vals) - if new_vals is not vals: - vals.append(val) - - def extend(self, *args, **kwargs): - """Generic import function for any type of header-like object. - Adapted version of MutableMapping.update in order to insert items - with self.add instead of self.__setitem__ - """ - if len(args) > 1: - raise TypeError("extend() takes at most 1 positional " - "arguments ({0} given)".format(len(args))) - other = args[0] if len(args) >= 1 else () - - if isinstance(other, HTTPHeaderDict): - for key, val in other.iteritems(): - self.add(key, val) - elif isinstance(other, Mapping): - for key in other: - self.add(key, other[key]) - elif hasattr(other, "keys"): - for key in other.keys(): - self.add(key, other[key]) - else: - for key, value in other: - self.add(key, value) - - for key, value in kwargs.items(): - self.add(key, value) - - def getlist(self, key, default=__marker): - """Returns a list of all the values for the named field. Returns an - empty list if the key doesn't exist.""" - try: - vals = self._container[key.lower()] - except KeyError: - if default is self.__marker: - return [] - return default - else: - return vals[1:] - - # Backwards compatibility for httplib - getheaders = getlist - getallmatchingheaders = getlist - iget = getlist - - # Backwards compatibility for http.cookiejar - get_all = getlist - - def __repr__(self): - return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) - - def _copy_from(self, other): - for key in other: - val = other.getlist(key) - if isinstance(val, list): - # Don't need to convert tuples - val = list(val) - self._container[key.lower()] = [key] + val - - def copy(self): - clone = type(self)() - clone._copy_from(self) - return clone - - def iteritems(self): - """Iterate over all header lines, including duplicate ones.""" - for key in self: - vals = self._container[key.lower()] - for val in vals[1:]: - yield vals[0], val - - def itermerged(self): - """Iterate over all headers, merging duplicate ones together.""" - for key in self: - val = self._container[key.lower()] - yield val[0], ', '.join(val[1:]) - - def items(self): - return list(self.iteritems()) - - @classmethod - def from_httplib(cls, message): # Python 2 - """Read headers from a Python 2 httplib message object.""" - # python2.7 does not expose a proper API for exporting multiheaders - # efficiently. This function re-reads raw lines from the message - # object and extracts the multiheaders properly. - obs_fold_continued_leaders = (' ', '\t') - headers = [] - - for line in message.headers: - if line.startswith(obs_fold_continued_leaders): - if not headers: - # We received a header line that starts with OWS as described - # in RFC-7230 S3.2.4. This indicates a multiline header, but - # there exists no previous header to which we can attach it. - raise InvalidHeader( - 'Header continuation with no previous header: %s' % line - ) - else: - key, value = headers[-1] - headers[-1] = (key, value + ' ' + line.strip()) - continue - - key, value = line.split(':', 1) - headers.append((key, value.strip())) - - return cls(headers) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/connection.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/connection.py deleted file mode 100644 index 02b3665..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/connection.py +++ /dev/null @@ -1,391 +0,0 @@ -from __future__ import absolute_import -import datetime -import logging -import os -import socket -from socket import error as SocketError, timeout as SocketTimeout -import warnings -from .packages import six -from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection -from .packages.six.moves.http_client import HTTPException # noqa: F401 - -try: # Compiled with SSL? - import ssl - BaseSSLError = ssl.SSLError -except (ImportError, AttributeError): # Platform-specific: No SSL. - ssl = None - - class BaseSSLError(BaseException): - pass - - -try: # Python 3: - # Not a no-op, we're adding this to the namespace so it can be imported. - ConnectionError = ConnectionError -except NameError: # Python 2: - class ConnectionError(Exception): - pass - - -from .exceptions import ( - NewConnectionError, - ConnectTimeoutError, - SubjectAltNameWarning, - SystemTimeWarning, -) -from .packages.ssl_match_hostname import match_hostname, CertificateError - -from .util.ssl_ import ( - resolve_cert_reqs, - resolve_ssl_version, - assert_fingerprint, - create_urllib3_context, - ssl_wrap_socket -) - - -from .util import connection - -from ._collections import HTTPHeaderDict - -log = logging.getLogger(__name__) - -port_by_scheme = { - 'http': 80, - 'https': 443, -} - -# When updating RECENT_DATE, move it to within two years of the current date, -# and not less than 6 months ago. -# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or -# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months) -RECENT_DATE = datetime.date(2017, 6, 30) - - -class DummyConnection(object): - """Used to detect a failed ConnectionCls import.""" - pass - - -class HTTPConnection(_HTTPConnection, object): - """ - Based on httplib.HTTPConnection but provides an extra constructor - backwards-compatibility layer between older and newer Pythons. - - Additional keyword parameters are used to configure attributes of the connection. - Accepted parameters include: - - - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - - ``source_address``: Set the source address for the current connection. - - ``socket_options``: Set specific options on the underlying socket. If not specified, then - defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling - Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. - - For example, if you wish to enable TCP Keep Alive in addition to the defaults, - you might pass:: - - HTTPConnection.default_socket_options + [ - (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), - ] - - Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). - """ - - default_port = port_by_scheme['http'] - - #: Disable Nagle's algorithm by default. - #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` - default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] - - #: Whether this connection verifies the host's certificate. - is_verified = False - - def __init__(self, *args, **kw): - if six.PY3: # Python 3 - kw.pop('strict', None) - - # Pre-set source_address. - self.source_address = kw.get('source_address') - - #: The socket options provided by the user. If no options are - #: provided, we use the default options. - self.socket_options = kw.pop('socket_options', self.default_socket_options) - - _HTTPConnection.__init__(self, *args, **kw) - - @property - def host(self): - """ - Getter method to remove any trailing dots that indicate the hostname is an FQDN. - - In general, SSL certificates don't include the trailing dot indicating a - fully-qualified domain name, and thus, they don't validate properly when - checked against a domain name that includes the dot. In addition, some - servers may not expect to receive the trailing dot when provided. - - However, the hostname with trailing dot is critical to DNS resolution; doing a - lookup with the trailing dot will properly only resolve the appropriate FQDN, - whereas a lookup without a trailing dot will search the system's search domain - list. Thus, it's important to keep the original host around for use only in - those cases where it's appropriate (i.e., when doing DNS lookup to establish the - actual TCP connection across which we're going to send HTTP requests). - """ - return self._dns_host.rstrip('.') - - @host.setter - def host(self, value): - """ - Setter for the `host` property. - - We assume that only urllib3 uses the _dns_host attribute; httplib itself - only uses `host`, and it seems reasonable that other libraries follow suit. - """ - self._dns_host = value - - def _new_conn(self): - """ Establish a socket connection and set nodelay settings on it. - - :return: New socket connection. - """ - extra_kw = {} - if self.source_address: - extra_kw['source_address'] = self.source_address - - if self.socket_options: - extra_kw['socket_options'] = self.socket_options - - try: - conn = connection.create_connection( - (self._dns_host, self.port), self.timeout, **extra_kw) - - except SocketTimeout as e: - raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) - - except SocketError as e: - raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) - - return conn - - def _prepare_conn(self, conn): - self.sock = conn - if self._tunnel_host: - # TODO: Fix tunnel so it doesn't depend on self.sock state. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - def request_chunked(self, method, url, body=None, headers=None): - """ - Alternative to the common request method, which sends the - body with chunked encoding and not as one block - """ - headers = HTTPHeaderDict(headers if headers is not None else {}) - skip_accept_encoding = 'accept-encoding' in headers - skip_host = 'host' in headers - self.putrequest( - method, - url, - skip_accept_encoding=skip_accept_encoding, - skip_host=skip_host - ) - for header, value in headers.items(): - self.putheader(header, value) - if 'transfer-encoding' not in headers: - self.putheader('Transfer-Encoding', 'chunked') - self.endheaders() - - if body is not None: - stringish_types = six.string_types + (bytes,) - if isinstance(body, stringish_types): - body = (body,) - for chunk in body: - if not chunk: - continue - if not isinstance(chunk, bytes): - chunk = chunk.encode('utf8') - len_str = hex(len(chunk))[2:] - self.send(len_str.encode('utf-8')) - self.send(b'\r\n') - self.send(chunk) - self.send(b'\r\n') - - # After the if clause, to always have a closed body - self.send(b'0\r\n\r\n') - - -class HTTPSConnection(HTTPConnection): - default_port = port_by_scheme['https'] - - ssl_version = None - - def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - ssl_context=None, server_hostname=None, **kw): - - HTTPConnection.__init__(self, host, port, strict=strict, - timeout=timeout, **kw) - - self.key_file = key_file - self.cert_file = cert_file - self.ssl_context = ssl_context - self.server_hostname = server_hostname - - # Required property for Google AppEngine 1.9.0 which otherwise causes - # HTTPS requests to go out as HTTP. (See Issue #356) - self._protocol = 'https' - - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - if self.ssl_context is None: - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(None), - cert_reqs=resolve_cert_reqs(None), - ) - - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - ssl_context=self.ssl_context, - server_hostname=self.server_hostname - ) - - -class VerifiedHTTPSConnection(HTTPSConnection): - """ - Based on httplib.HTTPSConnection but wraps the socket with - SSL certification. - """ - cert_reqs = None - ca_certs = None - ca_cert_dir = None - ssl_version = None - assert_fingerprint = None - - def set_cert(self, key_file=None, cert_file=None, - cert_reqs=None, ca_certs=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None): - """ - This method should only be called once, before the connection is used. - """ - # If cert_reqs is not provided, we can try to guess. If the user gave - # us a cert database, we assume they want to use it: otherwise, if - # they gave us an SSL Context object we should use whatever is set for - # it. - if cert_reqs is None: - if ca_certs or ca_cert_dir: - cert_reqs = 'CERT_REQUIRED' - elif self.ssl_context is not None: - cert_reqs = self.ssl_context.verify_mode - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - self.ca_certs = ca_certs and os.path.expanduser(ca_certs) - self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) - - def connect(self): - # Add certificate verification - conn = self._new_conn() - hostname = self.host - - if self._tunnel_host: - self.sock = conn - # Calls self._set_hostport(), so self.host is - # self._tunnel_host below. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - # Override the host with the one we're requesting data from. - hostname = self._tunnel_host - - server_hostname = hostname - if self.server_hostname is not None: - server_hostname = self.server_hostname - - is_time_off = datetime.date.today() < RECENT_DATE - if is_time_off: - warnings.warn(( - 'System time is way off (before {0}). This will probably ' - 'lead to SSL verification errors').format(RECENT_DATE), - SystemTimeWarning - ) - - # Wrap socket using verification with the root certs in - # trusted_root_certs - if self.ssl_context is None: - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(self.ssl_version), - cert_reqs=resolve_cert_reqs(self.cert_reqs), - ) - - context = self.ssl_context - context.verify_mode = resolve_cert_reqs(self.cert_reqs) - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - server_hostname=server_hostname, - ssl_context=context) - - if self.assert_fingerprint: - assert_fingerprint(self.sock.getpeercert(binary_form=True), - self.assert_fingerprint) - elif context.verify_mode != ssl.CERT_NONE \ - and not getattr(context, 'check_hostname', False) \ - and self.assert_hostname is not False: - # While urllib3 attempts to always turn off hostname matching from - # the TLS library, this cannot always be done. So we check whether - # the TLS Library still thinks it's matching hostnames. - cert = self.sock.getpeercert() - if not cert.get('subjectAltName', ()): - warnings.warn(( - 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' - '`commonName` for now. This feature is being removed by major browsers and ' - 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' - 'for details.)'.format(hostname)), - SubjectAltNameWarning - ) - _match_hostname(cert, self.assert_hostname or server_hostname) - - self.is_verified = ( - context.verify_mode == ssl.CERT_REQUIRED or - self.assert_fingerprint is not None - ) - - -def _match_hostname(cert, asserted_hostname): - try: - match_hostname(cert, asserted_hostname) - except CertificateError as e: - log.error( - 'Certificate did not match expected hostname: %s. ' - 'Certificate: %s', asserted_hostname, cert - ) - # Add cert to exception and reraise so client code can inspect - # the cert when catching the exception, if they want to - e._peer_cert = cert - raise - - -if ssl: - # Make a copy for testing. - UnverifiedHTTPSConnection = HTTPSConnection - HTTPSConnection = VerifiedHTTPSConnection -else: - HTTPSConnection = DummyConnection diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/connectionpool.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/connectionpool.py deleted file mode 100644 index f7a8f19..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/connectionpool.py +++ /dev/null @@ -1,896 +0,0 @@ -from __future__ import absolute_import -import errno -import logging -import sys -import warnings - -from socket import error as SocketError, timeout as SocketTimeout -import socket - - -from .exceptions import ( - ClosedPoolError, - ProtocolError, - EmptyPoolError, - HeaderParsingError, - HostChangedError, - LocationValueError, - MaxRetryError, - ProxyError, - ReadTimeoutError, - SSLError, - TimeoutError, - InsecureRequestWarning, - NewConnectionError, -) -from .packages.ssl_match_hostname import CertificateError -from .packages import six -from .packages.six.moves import queue -from .connection import ( - port_by_scheme, - DummyConnection, - HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, - HTTPException, BaseSSLError, -) -from .request import RequestMethods -from .response import HTTPResponse - -from .util.connection import is_connection_dropped -from .util.request import set_file_position -from .util.response import assert_header_parsing -from .util.retry import Retry -from .util.timeout import Timeout -from .util.url import get_host, Url, NORMALIZABLE_SCHEMES -from .util.queue import LifoQueue - - -xrange = six.moves.xrange - -log = logging.getLogger(__name__) - -_Default = object() - - -# Pool objects -class ConnectionPool(object): - """ - Base class for all connection pools, such as - :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. - """ - - scheme = None - QueueCls = LifoQueue - - def __init__(self, host, port=None): - if not host: - raise LocationValueError("No host specified.") - - self.host = _ipv6_host(host, self.scheme) - self._proxy_host = host.lower() - self.port = port - - def __str__(self): - return '%s(host=%r, port=%r)' % (type(self).__name__, - self.host, self.port) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - # Return False to re-raise any potential exceptions - return False - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - pass - - -# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 -_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} - - -class HTTPConnectionPool(ConnectionPool, RequestMethods): - """ - Thread-safe connection pool for one host. - - :param host: - Host used for this HTTP Connection (e.g. "localhost"), passed into - :class:`httplib.HTTPConnection`. - - :param port: - Port used for this HTTP Connection (None is equivalent to 80), passed - into :class:`httplib.HTTPConnection`. - - :param strict: - Causes BadStatusLine to be raised if the status line can't be parsed - as a valid HTTP/1.0 or 1.1 status line, passed into - :class:`httplib.HTTPConnection`. - - .. note:: - Only works in Python 2. This parameter is ignored in Python 3. - - :param timeout: - Socket timeout in seconds for each individual connection. This can - be a float or integer, which sets the timeout for the HTTP request, - or an instance of :class:`urllib3.util.Timeout` which gives you more - fine-grained control over request timeouts. After the constructor has - been parsed, this is always a `urllib3.util.Timeout` object. - - :param maxsize: - Number of connections to save that can be reused. More than 1 is useful - in multithreaded situations. If ``block`` is set to False, more - connections will be created but they will not be saved once they've - been used. - - :param block: - If set to True, no more than ``maxsize`` connections will be used at - a time. When no free connections are available, the call will block - until a connection has been released. This is a useful side effect for - particular multithreaded situations where one does not want to use more - than maxsize connections per host to prevent flooding. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param retries: - Retry configuration to use by default with requests in this pool. - - :param _proxy: - Parsed proxy URL, should not be used directly, instead, see - :class:`urllib3.connectionpool.ProxyManager`" - - :param _proxy_headers: - A dictionary with proxy headers, should not be used directly, - instead, see :class:`urllib3.connectionpool.ProxyManager`" - - :param \\**conn_kw: - Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, - :class:`urllib3.connection.HTTPSConnection` instances. - """ - - scheme = 'http' - ConnectionCls = HTTPConnection - ResponseCls = HTTPResponse - - def __init__(self, host, port=None, strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, - headers=None, retries=None, - _proxy=None, _proxy_headers=None, - **conn_kw): - ConnectionPool.__init__(self, host, port) - RequestMethods.__init__(self, headers) - - self.strict = strict - - if not isinstance(timeout, Timeout): - timeout = Timeout.from_float(timeout) - - if retries is None: - retries = Retry.DEFAULT - - self.timeout = timeout - self.retries = retries - - self.pool = self.QueueCls(maxsize) - self.block = block - - self.proxy = _proxy - self.proxy_headers = _proxy_headers or {} - - # Fill the queue up so that doing get() on it will block properly - for _ in xrange(maxsize): - self.pool.put(None) - - # These are mostly for testing and debugging purposes. - self.num_connections = 0 - self.num_requests = 0 - self.conn_kw = conn_kw - - if self.proxy: - # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. - # We cannot know if the user has added default socket options, so we cannot replace the - # list. - self.conn_kw.setdefault('socket_options', []) - - def _new_conn(self): - """ - Return a fresh :class:`HTTPConnection`. - """ - self.num_connections += 1 - log.debug("Starting new HTTP connection (%d): %s:%s", - self.num_connections, self.host, self.port or "80") - - conn = self.ConnectionCls(host=self.host, port=self.port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) - return conn - - def _get_conn(self, timeout=None): - """ - Get a connection. Will return a pooled connection if one is available. - - If no connections are available and :prop:`.block` is ``False``, then a - fresh connection is returned. - - :param timeout: - Seconds to wait before giving up and raising - :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and - :prop:`.block` is ``True``. - """ - conn = None - try: - conn = self.pool.get(block=self.block, timeout=timeout) - - except AttributeError: # self.pool is None - raise ClosedPoolError(self, "Pool is closed.") - - except queue.Empty: - if self.block: - raise EmptyPoolError(self, - "Pool reached maximum size and no more " - "connections are allowed.") - pass # Oh well, we'll create a new connection then - - # If this is a persistent connection, check if it got disconnected - if conn and is_connection_dropped(conn): - log.debug("Resetting dropped connection: %s", self.host) - conn.close() - if getattr(conn, 'auto_open', 1) == 0: - # This is a proxied connection that has been mutated by - # httplib._tunnel() and cannot be reused (since it would - # attempt to bypass the proxy) - conn = None - - return conn or self._new_conn() - - def _put_conn(self, conn): - """ - Put a connection back into the pool. - - :param conn: - Connection object for the current host and port as returned by - :meth:`._new_conn` or :meth:`._get_conn`. - - If the pool is already full, the connection is closed and discarded - because we exceeded maxsize. If connections are discarded frequently, - then maxsize should be increased. - - If the pool is closed, then the connection will be closed and discarded. - """ - try: - self.pool.put(conn, block=False) - return # Everything is dandy, done. - except AttributeError: - # self.pool is None. - pass - except queue.Full: - # This should never happen if self.block == True - log.warning( - "Connection pool is full, discarding connection: %s", - self.host) - - # Connection never got put back into the pool, close it. - if conn: - conn.close() - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - pass - - def _prepare_proxy(self, conn): - # Nothing to do for HTTP connections. - pass - - def _get_timeout(self, timeout): - """ Helper that always returns a :class:`urllib3.util.Timeout` """ - if timeout is _Default: - return self.timeout.clone() - - if isinstance(timeout, Timeout): - return timeout.clone() - else: - # User passed us an int/float. This is for backwards compatibility, - # can be removed later - return Timeout.from_float(timeout) - - def _raise_timeout(self, err, url, timeout_value): - """Is the error actually a timeout? Will raise a ReadTimeout or pass""" - - if isinstance(err, SocketTimeout): - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - # See the above comment about EAGAIN in Python 3. In Python 2 we have - # to specifically catch it and throw the timeout error - if hasattr(err, 'errno') and err.errno in _blocking_errnos: - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - # Catch possible read timeouts thrown as SSL errors. If not the - # case, rethrow the original. We need to do this because of: - # http://bugs.python.org/issue10272 - if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python < 2.7.4 - raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) - - def _make_request(self, conn, method, url, timeout=_Default, chunked=False, - **httplib_request_kw): - """ - Perform a request on a given urllib connection object taken from our - pool. - - :param conn: - a connection from one of our connection pools - - :param timeout: - Socket timeout in seconds for the request. This can be a - float or integer, which will set the same timeout value for - the socket connect and the socket read, or an instance of - :class:`urllib3.util.Timeout`, which gives you more fine-grained - control over your timeouts. - """ - self.num_requests += 1 - - timeout_obj = self._get_timeout(timeout) - timeout_obj.start_connect() - conn.timeout = timeout_obj.connect_timeout - - # Trigger any extra validation we need to do. - try: - self._validate_conn(conn) - except (SocketTimeout, BaseSSLError) as e: - # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. - self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) - raise - - # conn.request() calls httplib.*.request, not the method in - # urllib3.request. It also calls makefile (recv) on the socket. - if chunked: - conn.request_chunked(method, url, **httplib_request_kw) - else: - conn.request(method, url, **httplib_request_kw) - - # Reset the timeout for the recv() on the socket - read_timeout = timeout_obj.read_timeout - - # App Engine doesn't have a sock attr - if getattr(conn, 'sock', None): - # In Python 3 socket.py will catch EAGAIN and return None when you - # try and read into the file pointer created by http.client, which - # instead raises a BadStatusLine exception. Instead of catching - # the exception and assuming all BadStatusLine exceptions are read - # timeouts, check for a zero timeout before making the request. - if read_timeout == 0: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) - if read_timeout is Timeout.DEFAULT_TIMEOUT: - conn.sock.settimeout(socket.getdefaulttimeout()) - else: # None or a value - conn.sock.settimeout(read_timeout) - - # Receive the response from the server - try: - try: # Python 2.7, use buffering of HTTP responses - httplib_response = conn.getresponse(buffering=True) - except TypeError: # Python 3 - try: - httplib_response = conn.getresponse() - except Exception as e: - # Remove the TypeError from the exception chain in Python 3; - # otherwise it looks like a programming error was the cause. - six.raise_from(e, None) - except (SocketTimeout, BaseSSLError, SocketError) as e: - self._raise_timeout(err=e, url=url, timeout_value=read_timeout) - raise - - # AppEngine doesn't have a version attr. - http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') - log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, - method, url, http_version, httplib_response.status, - httplib_response.length) - - try: - assert_header_parsing(httplib_response.msg) - except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 - log.warning( - 'Failed to parse headers (url=%s): %s', - self._absolute_url(url), hpe, exc_info=True) - - return httplib_response - - def _absolute_url(self, path): - return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - if self.pool is None: - return - # Disable access to the pool - old_pool, self.pool = self.pool, None - - try: - while True: - conn = old_pool.get(block=False) - if conn: - conn.close() - - except queue.Empty: - pass # Done. - - def is_same_host(self, url): - """ - Check if the given ``url`` is a member of the same host as this - connection pool. - """ - if url.startswith('/'): - return True - - # TODO: Add optional support for socket.gethostbyname checking. - scheme, host, port = get_host(url) - - host = _ipv6_host(host, self.scheme) - - # Use explicit default port for comparison when none is given - if self.port and not port: - port = port_by_scheme.get(scheme) - elif not self.port and port == port_by_scheme.get(scheme): - port = None - - return (scheme, host, port) == (self.scheme, self.host, self.port) - - def urlopen(self, method, url, body=None, headers=None, retries=None, - redirect=True, assert_same_host=True, timeout=_Default, - pool_timeout=None, release_conn=None, chunked=False, - body_pos=None, **response_kw): - """ - Get a connection from the pool and perform an HTTP request. This is the - lowest level call for making a request, so you'll need to specify all - the raw details. - - .. note:: - - More commonly, it's appropriate to use a convenience method provided - by :class:`.RequestMethods`, such as :meth:`request`. - - .. note:: - - `release_conn` will only behave as expected if - `preload_content=False` because we want to make - `preload_content=False` the default behaviour someday soon without - breaking backwards compatibility. - - :param method: - HTTP request method (such as GET, POST, PUT, etc.) - - :param body: - Data to send in the request body (useful for creating - POST requests, see HTTPConnectionPool.post_url for - more convenience). - - :param headers: - Dictionary of custom headers to send, such as User-Agent, - If-None-Match, etc. If None, pool headers are used. If provided, - these headers completely replace any pool-specific headers. - - :param retries: - Configure the number of retries to allow before raising a - :class:`~urllib3.exceptions.MaxRetryError` exception. - - Pass ``None`` to retry until you receive a response. Pass a - :class:`~urllib3.util.retry.Retry` object for fine-grained control - over different types of retries. - Pass an integer number to retry connection errors that many times, - but no other types of errors. Pass zero to never retry. - - If ``False``, then retries are disabled and any exception is raised - immediately. Also, instead of raising a MaxRetryError on redirects, - the redirect response will be returned. - - :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. - - :param redirect: - If True, automatically handle redirects (status codes 301, 302, - 303, 307, 308). Each redirect counts as a retry. Disabling retries - will disable redirect, too. - - :param assert_same_host: - If ``True``, will make sure that the host of the pool requests is - consistent else will raise HostChangedError. When False, you can - use the pool on an HTTP proxy and request foreign hosts. - - :param timeout: - If specified, overrides the default timeout for this one - request. It may be a float (in seconds) or an instance of - :class:`urllib3.util.Timeout`. - - :param pool_timeout: - If set and the pool is set to block=True, then this method will - block for ``pool_timeout`` seconds and raise EmptyPoolError if no - connection is available within the time period. - - :param release_conn: - If False, then the urlopen call will not release the connection - back into the pool once a response is received (but will release if - you read the entire contents of the response such as when - `preload_content=True`). This is useful if you're not preloading - the response's content immediately. You will need to call - ``r.release_conn()`` on the response ``r`` to return the connection - back into the pool. If None, it takes the value of - ``response_kw.get('preload_content', True)``. - - :param chunked: - If True, urllib3 will send the body using chunked transfer - encoding. Otherwise, urllib3 will send the body using the standard - content-length form. Defaults to False. - - :param int body_pos: - Position to seek to in file-like body in the event of a retry or - redirect. Typically this won't need to be set because urllib3 will - auto-populate the value when needed. - - :param \\**response_kw: - Additional parameters are passed to - :meth:`urllib3.response.HTTPResponse.from_httplib` - """ - if headers is None: - headers = self.headers - - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect, default=self.retries) - - if release_conn is None: - release_conn = response_kw.get('preload_content', True) - - # Check host - if assert_same_host and not self.is_same_host(url): - raise HostChangedError(self, url, retries) - - conn = None - - # Track whether `conn` needs to be released before - # returning/raising/recursing. Update this variable if necessary, and - # leave `release_conn` constant throughout the function. That way, if - # the function recurses, the original value of `release_conn` will be - # passed down into the recursive call, and its value will be respected. - # - # See issue #651 [1] for details. - # - # [1] <https://github.com/shazow/urllib3/issues/651> - release_this_conn = release_conn - - # Merge the proxy headers. Only do this in HTTP. We have to copy the - # headers dict so we can safely change it without those changes being - # reflected in anyone else's copy. - if self.scheme == 'http': - headers = headers.copy() - headers.update(self.proxy_headers) - - # Must keep the exception bound to a separate variable or else Python 3 - # complains about UnboundLocalError. - err = None - - # Keep track of whether we cleanly exited the except block. This - # ensures we do proper cleanup in finally. - clean_exit = False - - # Rewind body position, if needed. Record current position - # for future rewinds in the event of a redirect/retry. - body_pos = set_file_position(body, body_pos) - - try: - # Request a connection from the queue. - timeout_obj = self._get_timeout(timeout) - conn = self._get_conn(timeout=pool_timeout) - - conn.timeout = timeout_obj.connect_timeout - - is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) - if is_new_proxy_conn: - self._prepare_proxy(conn) - - # Make the request on the httplib connection object. - httplib_response = self._make_request(conn, method, url, - timeout=timeout_obj, - body=body, headers=headers, - chunked=chunked) - - # If we're going to release the connection in ``finally:``, then - # the response doesn't need to know about the connection. Otherwise - # it will also try to release it and we'll have a double-release - # mess. - response_conn = conn if not release_conn else None - - # Pass method to Response for length checking - response_kw['request_method'] = method - - # Import httplib's response into our own wrapper object - response = self.ResponseCls.from_httplib(httplib_response, - pool=self, - connection=response_conn, - retries=retries, - **response_kw) - - # Everything went great! - clean_exit = True - - except queue.Empty: - # Timed out by queue. - raise EmptyPoolError(self, "No pool connections are available.") - - except (TimeoutError, HTTPException, SocketError, ProtocolError, - BaseSSLError, SSLError, CertificateError) as e: - # Discard the connection for these exceptions. It will be - # replaced during the next _get_conn() call. - clean_exit = False - if isinstance(e, (BaseSSLError, CertificateError)): - e = SSLError(e) - elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: - e = ProxyError('Cannot connect to proxy.', e) - elif isinstance(e, (SocketError, HTTPException)): - e = ProtocolError('Connection aborted.', e) - - retries = retries.increment(method, url, error=e, _pool=self, - _stacktrace=sys.exc_info()[2]) - retries.sleep() - - # Keep track of the error for the retry warning. - err = e - - finally: - if not clean_exit: - # We hit some kind of exception, handled or otherwise. We need - # to throw the connection away unless explicitly told not to. - # Close the connection, set the variable to None, and make sure - # we put the None back in the pool to avoid leaking it. - conn = conn and conn.close() - release_this_conn = True - - if release_this_conn: - # Put the connection back to be reused. If the connection is - # expired then it will be None, which will get replaced with a - # fresh connection during _get_conn. - self._put_conn(conn) - - if not conn: - # Try again - log.warning("Retrying (%r) after connection " - "broken by '%r': %s", retries, err, url) - return self.urlopen(method, url, body, headers, retries, - redirect, assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) - - def drain_and_release_conn(response): - try: - # discard any remaining response body, the connection will be - # released back to the pool once the entire response is read - response.read() - except (TimeoutError, HTTPException, SocketError, ProtocolError, - BaseSSLError, SSLError) as e: - pass - - # Handle redirect? - redirect_location = redirect and response.get_redirect_location() - if redirect_location: - if response.status == 303: - method = 'GET' - - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - # Drain and release the connection for this response, since - # we're not returning it to be released manually. - drain_and_release_conn(response) - raise - return response - - # drain and return the connection to the pool before recursing - drain_and_release_conn(response) - - retries.sleep_for_retry(response) - log.debug("Redirecting %s -> %s", url, redirect_location) - return self.urlopen( - method, redirect_location, body, headers, - retries=retries, redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, body_pos=body_pos, - **response_kw) - - # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader('Retry-After')) - if retries.is_retry(method, response.status, has_retry_after): - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_status: - # Drain and release the connection for this response, since - # we're not returning it to be released manually. - drain_and_release_conn(response) - raise - return response - - # drain and return the connection to the pool before recursing - drain_and_release_conn(response) - - retries.sleep(response) - log.debug("Retry: %s", url) - return self.urlopen( - method, url, body, headers, - retries=retries, redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, pool_timeout=pool_timeout, - release_conn=release_conn, - body_pos=body_pos, **response_kw) - - return response - - -class HTTPSConnectionPool(HTTPConnectionPool): - """ - Same as :class:`.HTTPConnectionPool`, but HTTPS. - - When Python is compiled with the :mod:`ssl` module, then - :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, - instead of :class:`.HTTPSConnection`. - - :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, - ``assert_hostname`` and ``host`` in this order to verify connections. - If ``assert_hostname`` is False, no verification is done. - - The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, - ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is - available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade - the connection socket into an SSL socket. - """ - - scheme = 'https' - ConnectionCls = HTTPSConnection - - def __init__(self, host, port=None, - strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, - block=False, headers=None, retries=None, - _proxy=None, _proxy_headers=None, - key_file=None, cert_file=None, cert_reqs=None, - ca_certs=None, ssl_version=None, - assert_hostname=None, assert_fingerprint=None, - ca_cert_dir=None, **conn_kw): - - HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, - block, headers, retries, _proxy, _proxy_headers, - **conn_kw) - - if ca_certs and cert_reqs is None: - cert_reqs = 'CERT_REQUIRED' - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.ca_certs = ca_certs - self.ca_cert_dir = ca_cert_dir - self.ssl_version = ssl_version - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - - def _prepare_conn(self, conn): - """ - Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` - and establish the tunnel if proxy is used. - """ - - if isinstance(conn, VerifiedHTTPSConnection): - conn.set_cert(key_file=self.key_file, - cert_file=self.cert_file, - cert_reqs=self.cert_reqs, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint) - conn.ssl_version = self.ssl_version - return conn - - def _prepare_proxy(self, conn): - """ - Establish tunnel connection early, because otherwise httplib - would improperly set Host: header to proxy's IP:port. - """ - conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) - conn.connect() - - def _new_conn(self): - """ - Return a fresh :class:`httplib.HTTPSConnection`. - """ - self.num_connections += 1 - log.debug("Starting new HTTPS connection (%d): %s:%s", - self.num_connections, self.host, self.port or "443") - - if not self.ConnectionCls or self.ConnectionCls is DummyConnection: - raise SSLError("Can't connect to HTTPS URL because the SSL " - "module is not available.") - - actual_host = self.host - actual_port = self.port - if self.proxy is not None: - actual_host = self.proxy.host - actual_port = self.proxy.port - - conn = self.ConnectionCls(host=actual_host, port=actual_port, - timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) - - return self._prepare_conn(conn) - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - super(HTTPSConnectionPool, self)._validate_conn(conn) - - # Force connect early to allow us to validate the connection. - if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` - conn.connect() - - if not conn.is_verified: - warnings.warn(( - 'Unverified HTTPS request is being made. ' - 'Adding certificate verification is strongly advised. See: ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings'), - InsecureRequestWarning) - - -def connection_from_url(url, **kw): - """ - Given a url, return an :class:`.ConnectionPool` instance of its host. - - This is a shortcut for not having to parse out the scheme, host, and port - of the url before creating an :class:`.ConnectionPool` instance. - - :param url: - Absolute URL string that must include the scheme. Port is optional. - - :param \\**kw: - Passes additional parameters to the constructor of the appropriate - :class:`.ConnectionPool`. Useful for specifying things like - timeout, maxsize, headers, etc. - - Example:: - - >>> conn = connection_from_url('http://google.com/') - >>> r = conn.request('GET', '/') - """ - scheme, host, port = get_host(url) - port = port or port_by_scheme.get(scheme, 80) - if scheme == 'https': - return HTTPSConnectionPool(host, port=port, **kw) - else: - return HTTPConnectionPool(host, port=port, **kw) - - -def _ipv6_host(host, scheme): - """ - Process IPv6 address literals - """ - - # httplib doesn't like it when we include brackets in IPv6 addresses - # Specifically, if we include brackets but also pass the port then - # httplib crazily doubles up the square brackets on the Host header. - # Instead, we need to make sure we never pass ``None`` as the port. - # However, for backward compatibility reasons we can't actually - # *assert* that. See http://bugs.python.org/issue28539 - # - # Also if an IPv6 address literal has a zone identifier, the - # percent sign might be URIencoded, convert it back into ASCII - if host.startswith('[') and host.endswith(']'): - host = host.replace('%25', '%').strip('[]') - if scheme in NORMALIZABLE_SCHEMES: - host = host.lower() - return host diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_appengine_environ.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_appengine_environ.py deleted file mode 100644 index f3e0094..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_appengine_environ.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -This module provides means to detect the App Engine environment. -""" - -import os - - -def is_appengine(): - return (is_local_appengine() or - is_prod_appengine() or - is_prod_appengine_mvms()) - - -def is_appengine_sandbox(): - return is_appengine() and not is_prod_appengine_mvms() - - -def is_local_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) - - -def is_prod_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and - not is_prod_appengine_mvms()) - - -def is_prod_appengine_mvms(): - return os.environ.get('GAE_VM', False) == 'true' diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/bindings.py deleted file mode 100644 index bcf41c0..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/bindings.py +++ /dev/null @@ -1,593 +0,0 @@ -""" -This module uses ctypes to bind a whole bunch of functions and constants from -SecureTransport. The goal here is to provide the low-level API to -SecureTransport. These are essentially the C-level functions and constants, and -they're pretty gross to work with. - -This code is a bastardised version of the code found in Will Bond's oscrypto -library. An enormous debt is owed to him for blazing this trail for us. For -that reason, this code should be considered to be covered both by urllib3's -license and by oscrypto's: - - Copyright (c) 2015-2016 Will Bond <will@wbond.net> - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. -""" -from __future__ import absolute_import - -import platform -from ctypes.util import find_library -from ctypes import ( - c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, - c_bool -) -from ctypes import CDLL, POINTER, CFUNCTYPE - - -security_path = find_library('Security') -if not security_path: - raise ImportError('The library Security could not be found') - - -core_foundation_path = find_library('CoreFoundation') -if not core_foundation_path: - raise ImportError('The library CoreFoundation could not be found') - - -version = platform.mac_ver()[0] -version_info = tuple(map(int, version.split('.'))) -if version_info < (10, 8): - raise OSError( - 'Only OS X 10.8 and newer are supported, not %s.%s' % ( - version_info[0], version_info[1] - ) - ) - -Security = CDLL(security_path, use_errno=True) -CoreFoundation = CDLL(core_foundation_path, use_errno=True) - -Boolean = c_bool -CFIndex = c_long -CFStringEncoding = c_uint32 -CFData = c_void_p -CFString = c_void_p -CFArray = c_void_p -CFMutableArray = c_void_p -CFDictionary = c_void_p -CFError = c_void_p -CFType = c_void_p -CFTypeID = c_ulong - -CFTypeRef = POINTER(CFType) -CFAllocatorRef = c_void_p - -OSStatus = c_int32 - -CFDataRef = POINTER(CFData) -CFStringRef = POINTER(CFString) -CFArrayRef = POINTER(CFArray) -CFMutableArrayRef = POINTER(CFMutableArray) -CFDictionaryRef = POINTER(CFDictionary) -CFArrayCallBacks = c_void_p -CFDictionaryKeyCallBacks = c_void_p -CFDictionaryValueCallBacks = c_void_p - -SecCertificateRef = POINTER(c_void_p) -SecExternalFormat = c_uint32 -SecExternalItemType = c_uint32 -SecIdentityRef = POINTER(c_void_p) -SecItemImportExportFlags = c_uint32 -SecItemImportExportKeyParameters = c_void_p -SecKeychainRef = POINTER(c_void_p) -SSLProtocol = c_uint32 -SSLCipherSuite = c_uint32 -SSLContextRef = POINTER(c_void_p) -SecTrustRef = POINTER(c_void_p) -SSLConnectionRef = c_uint32 -SecTrustResultType = c_uint32 -SecTrustOptionFlags = c_uint32 -SSLProtocolSide = c_uint32 -SSLConnectionType = c_uint32 -SSLSessionOption = c_uint32 - - -try: - Security.SecItemImport.argtypes = [ - CFDataRef, - CFStringRef, - POINTER(SecExternalFormat), - POINTER(SecExternalItemType), - SecItemImportExportFlags, - POINTER(SecItemImportExportKeyParameters), - SecKeychainRef, - POINTER(CFArrayRef), - ] - Security.SecItemImport.restype = OSStatus - - Security.SecCertificateGetTypeID.argtypes = [] - Security.SecCertificateGetTypeID.restype = CFTypeID - - Security.SecIdentityGetTypeID.argtypes = [] - Security.SecIdentityGetTypeID.restype = CFTypeID - - Security.SecKeyGetTypeID.argtypes = [] - Security.SecKeyGetTypeID.restype = CFTypeID - - Security.SecCertificateCreateWithData.argtypes = [ - CFAllocatorRef, - CFDataRef - ] - Security.SecCertificateCreateWithData.restype = SecCertificateRef - - Security.SecCertificateCopyData.argtypes = [ - SecCertificateRef - ] - Security.SecCertificateCopyData.restype = CFDataRef - - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SecIdentityCreateWithCertificate.argtypes = [ - CFTypeRef, - SecCertificateRef, - POINTER(SecIdentityRef) - ] - Security.SecIdentityCreateWithCertificate.restype = OSStatus - - Security.SecKeychainCreate.argtypes = [ - c_char_p, - c_uint32, - c_void_p, - Boolean, - c_void_p, - POINTER(SecKeychainRef) - ] - Security.SecKeychainCreate.restype = OSStatus - - Security.SecKeychainDelete.argtypes = [ - SecKeychainRef - ] - Security.SecKeychainDelete.restype = OSStatus - - Security.SecPKCS12Import.argtypes = [ - CFDataRef, - CFDictionaryRef, - POINTER(CFArrayRef) - ] - Security.SecPKCS12Import.restype = OSStatus - - SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) - SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) - - Security.SSLSetIOFuncs.argtypes = [ - SSLContextRef, - SSLReadFunc, - SSLWriteFunc - ] - Security.SSLSetIOFuncs.restype = OSStatus - - Security.SSLSetPeerID.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] - Security.SSLSetPeerID.restype = OSStatus - - Security.SSLSetCertificate.argtypes = [ - SSLContextRef, - CFArrayRef - ] - Security.SSLSetCertificate.restype = OSStatus - - Security.SSLSetCertificateAuthorities.argtypes = [ - SSLContextRef, - CFTypeRef, - Boolean - ] - Security.SSLSetCertificateAuthorities.restype = OSStatus - - Security.SSLSetConnection.argtypes = [ - SSLContextRef, - SSLConnectionRef - ] - Security.SSLSetConnection.restype = OSStatus - - Security.SSLSetPeerDomainName.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t - ] - Security.SSLSetPeerDomainName.restype = OSStatus - - Security.SSLHandshake.argtypes = [ - SSLContextRef - ] - Security.SSLHandshake.restype = OSStatus - - Security.SSLRead.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] - Security.SSLRead.restype = OSStatus - - Security.SSLWrite.argtypes = [ - SSLContextRef, - c_char_p, - c_size_t, - POINTER(c_size_t) - ] - Security.SSLWrite.restype = OSStatus - - Security.SSLClose.argtypes = [ - SSLContextRef - ] - Security.SSLClose.restype = OSStatus - - Security.SSLGetNumberSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(c_size_t) - ] - Security.SSLGetNumberSupportedCiphers.restype = OSStatus - - Security.SSLGetSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t) - ] - Security.SSLGetSupportedCiphers.restype = OSStatus - - Security.SSLSetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - c_size_t - ] - Security.SSLSetEnabledCiphers.restype = OSStatus - - Security.SSLGetNumberEnabledCiphers.argtype = [ - SSLContextRef, - POINTER(c_size_t) - ] - Security.SSLGetNumberEnabledCiphers.restype = OSStatus - - Security.SSLGetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t) - ] - Security.SSLGetEnabledCiphers.restype = OSStatus - - Security.SSLGetNegotiatedCipher.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite) - ] - Security.SSLGetNegotiatedCipher.restype = OSStatus - - Security.SSLGetNegotiatedProtocolVersion.argtypes = [ - SSLContextRef, - POINTER(SSLProtocol) - ] - Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus - - Security.SSLCopyPeerTrust.argtypes = [ - SSLContextRef, - POINTER(SecTrustRef) - ] - Security.SSLCopyPeerTrust.restype = OSStatus - - Security.SecTrustSetAnchorCertificates.argtypes = [ - SecTrustRef, - CFArrayRef - ] - Security.SecTrustSetAnchorCertificates.restype = OSStatus - - Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ - SecTrustRef, - Boolean - ] - Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - - Security.SecTrustEvaluate.argtypes = [ - SecTrustRef, - POINTER(SecTrustResultType) - ] - Security.SecTrustEvaluate.restype = OSStatus - - Security.SecTrustGetCertificateCount.argtypes = [ - SecTrustRef - ] - Security.SecTrustGetCertificateCount.restype = CFIndex - - Security.SecTrustGetCertificateAtIndex.argtypes = [ - SecTrustRef, - CFIndex - ] - Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef - - Security.SSLCreateContext.argtypes = [ - CFAllocatorRef, - SSLProtocolSide, - SSLConnectionType - ] - Security.SSLCreateContext.restype = SSLContextRef - - Security.SSLSetSessionOption.argtypes = [ - SSLContextRef, - SSLSessionOption, - Boolean - ] - Security.SSLSetSessionOption.restype = OSStatus - - Security.SSLSetProtocolVersionMin.argtypes = [ - SSLContextRef, - SSLProtocol - ] - Security.SSLSetProtocolVersionMin.restype = OSStatus - - Security.SSLSetProtocolVersionMax.argtypes = [ - SSLContextRef, - SSLProtocol - ] - Security.SSLSetProtocolVersionMax.restype = OSStatus - - Security.SecCopyErrorMessageString.argtypes = [ - OSStatus, - c_void_p - ] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SSLReadFunc = SSLReadFunc - Security.SSLWriteFunc = SSLWriteFunc - Security.SSLContextRef = SSLContextRef - Security.SSLProtocol = SSLProtocol - Security.SSLCipherSuite = SSLCipherSuite - Security.SecIdentityRef = SecIdentityRef - Security.SecKeychainRef = SecKeychainRef - Security.SecTrustRef = SecTrustRef - Security.SecTrustResultType = SecTrustResultType - Security.SecExternalFormat = SecExternalFormat - Security.OSStatus = OSStatus - - Security.kSecImportExportPassphrase = CFStringRef.in_dll( - Security, 'kSecImportExportPassphrase' - ) - Security.kSecImportItemIdentity = CFStringRef.in_dll( - Security, 'kSecImportItemIdentity' - ) - - # CoreFoundation time! - CoreFoundation.CFRetain.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFRetain.restype = CFTypeRef - - CoreFoundation.CFRelease.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFRelease.restype = None - - CoreFoundation.CFGetTypeID.argtypes = [ - CFTypeRef - ] - CoreFoundation.CFGetTypeID.restype = CFTypeID - - CoreFoundation.CFStringCreateWithCString.argtypes = [ - CFAllocatorRef, - c_char_p, - CFStringEncoding - ] - CoreFoundation.CFStringCreateWithCString.restype = CFStringRef - - CoreFoundation.CFStringGetCStringPtr.argtypes = [ - CFStringRef, - CFStringEncoding - ] - CoreFoundation.CFStringGetCStringPtr.restype = c_char_p - - CoreFoundation.CFStringGetCString.argtypes = [ - CFStringRef, - c_char_p, - CFIndex, - CFStringEncoding - ] - CoreFoundation.CFStringGetCString.restype = c_bool - - CoreFoundation.CFDataCreate.argtypes = [ - CFAllocatorRef, - c_char_p, - CFIndex - ] - CoreFoundation.CFDataCreate.restype = CFDataRef - - CoreFoundation.CFDataGetLength.argtypes = [ - CFDataRef - ] - CoreFoundation.CFDataGetLength.restype = CFIndex - - CoreFoundation.CFDataGetBytePtr.argtypes = [ - CFDataRef - ] - CoreFoundation.CFDataGetBytePtr.restype = c_void_p - - CoreFoundation.CFDictionaryCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - POINTER(CFTypeRef), - CFIndex, - CFDictionaryKeyCallBacks, - CFDictionaryValueCallBacks - ] - CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef - - CoreFoundation.CFDictionaryGetValue.argtypes = [ - CFDictionaryRef, - CFTypeRef - ] - CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef - - CoreFoundation.CFArrayCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - CFIndex, - CFArrayCallBacks, - ] - CoreFoundation.CFArrayCreate.restype = CFArrayRef - - CoreFoundation.CFArrayCreateMutable.argtypes = [ - CFAllocatorRef, - CFIndex, - CFArrayCallBacks - ] - CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef - - CoreFoundation.CFArrayAppendValue.argtypes = [ - CFMutableArrayRef, - c_void_p - ] - CoreFoundation.CFArrayAppendValue.restype = None - - CoreFoundation.CFArrayGetCount.argtypes = [ - CFArrayRef - ] - CoreFoundation.CFArrayGetCount.restype = CFIndex - - CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ - CFArrayRef, - CFIndex - ] - CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p - - CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( - CoreFoundation, 'kCFAllocatorDefault' - ) - CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') - CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' - ) - CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( - CoreFoundation, 'kCFTypeDictionaryValueCallBacks' - ) - - CoreFoundation.CFTypeRef = CFTypeRef - CoreFoundation.CFArrayRef = CFArrayRef - CoreFoundation.CFStringRef = CFStringRef - CoreFoundation.CFDictionaryRef = CFDictionaryRef - -except (AttributeError): - raise ImportError('Error initializing ctypes') - - -class CFConst(object): - """ - A class object that acts as essentially a namespace for CoreFoundation - constants. - """ - kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) - - -class SecurityConst(object): - """ - A class object that acts as essentially a namespace for Security constants. - """ - kSSLSessionOptionBreakOnServerAuth = 0 - - kSSLProtocol2 = 1 - kSSLProtocol3 = 2 - kTLSProtocol1 = 4 - kTLSProtocol11 = 7 - kTLSProtocol12 = 8 - - kSSLClientSide = 1 - kSSLStreamType = 0 - - kSecFormatPEMSequence = 10 - - kSecTrustResultInvalid = 0 - kSecTrustResultProceed = 1 - # This gap is present on purpose: this was kSecTrustResultConfirm, which - # is deprecated. - kSecTrustResultDeny = 3 - kSecTrustResultUnspecified = 4 - kSecTrustResultRecoverableTrustFailure = 5 - kSecTrustResultFatalTrustFailure = 6 - kSecTrustResultOtherError = 7 - - errSSLProtocol = -9800 - errSSLWouldBlock = -9803 - errSSLClosedGraceful = -9805 - errSSLClosedNoNotify = -9816 - errSSLClosedAbort = -9806 - - errSSLXCertChainInvalid = -9807 - errSSLCrypto = -9809 - errSSLInternal = -9810 - errSSLCertExpired = -9814 - errSSLCertNotYetValid = -9815 - errSSLUnknownRootCert = -9812 - errSSLNoRootCert = -9813 - errSSLHostNameMismatch = -9843 - errSSLPeerHandshakeFail = -9824 - errSSLPeerUserCancelled = -9839 - errSSLWeakPeerEphemeralDHKey = -9850 - errSSLServerAuthCompleted = -9841 - errSSLRecordOverflow = -9847 - - errSecVerifyFailed = -67808 - errSecNoTrustSettings = -25263 - errSecItemNotFound = -25300 - errSecInvalidTrustSettings = -25262 - - # Cipher suites. We only pick the ones our default cipher string allows. - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F - TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 - TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F - TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 - TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B - TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A - TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 - TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 - TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D - TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C - TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D - TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C - TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 - TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F - TLS_AES_128_GCM_SHA256 = 0x1301 - TLS_AES_256_GCM_SHA384 = 0x1302 - TLS_CHACHA20_POLY1305_SHA256 = 0x1303 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/low_level.py deleted file mode 100644 index b13cd9e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/low_level.py +++ /dev/null @@ -1,346 +0,0 @@ -""" -Low-level helpers for the SecureTransport bindings. - -These are Python functions that are not directly related to the high-level APIs -but are necessary to get them to work. They include a whole bunch of low-level -CoreFoundation messing about and memory management. The concerns in this module -are almost entirely about trying to avoid memory leaks and providing -appropriate and useful assistance to the higher-level code. -""" -import base64 -import ctypes -import itertools -import re -import os -import ssl -import tempfile - -from .bindings import Security, CoreFoundation, CFConst - - -# This regular expression is used to grab PEM data out of a PEM bundle. -_PEM_CERTS_RE = re.compile( - b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL -) - - -def _cf_data_from_bytes(bytestring): - """ - Given a bytestring, create a CFData object from it. This CFData object must - be CFReleased by the caller. - """ - return CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) - ) - - -def _cf_dictionary_from_tuples(tuples): - """ - Given a list of Python tuples, create an associated CFDictionary. - """ - dictionary_size = len(tuples) - - # We need to get the dictionary keys and values out in the same order. - keys = (t[0] for t in tuples) - values = (t[1] for t in tuples) - cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) - cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) - - return CoreFoundation.CFDictionaryCreate( - CoreFoundation.kCFAllocatorDefault, - cf_keys, - cf_values, - dictionary_size, - CoreFoundation.kCFTypeDictionaryKeyCallBacks, - CoreFoundation.kCFTypeDictionaryValueCallBacks, - ) - - -def _cf_string_to_unicode(value): - """ - Creates a Unicode string from a CFString object. Used entirely for error - reporting. - - Yes, it annoys me quite a lot that this function is this complex. - """ - value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) - - string = CoreFoundation.CFStringGetCStringPtr( - value_as_void_p, - CFConst.kCFStringEncodingUTF8 - ) - if string is None: - buffer = ctypes.create_string_buffer(1024) - result = CoreFoundation.CFStringGetCString( - value_as_void_p, - buffer, - 1024, - CFConst.kCFStringEncodingUTF8 - ) - if not result: - raise OSError('Error copying C string from CFStringRef') - string = buffer.value - if string is not None: - string = string.decode('utf-8') - return string - - -def _assert_no_error(error, exception_class=None): - """ - Checks the return code and throws an exception if there is an error to - report - """ - if error == 0: - return - - cf_error_string = Security.SecCopyErrorMessageString(error, None) - output = _cf_string_to_unicode(cf_error_string) - CoreFoundation.CFRelease(cf_error_string) - - if output is None or output == u'': - output = u'OSStatus %s' % error - - if exception_class is None: - exception_class = ssl.SSLError - - raise exception_class(output) - - -def _cert_array_from_pem(pem_bundle): - """ - Given a bundle of certs in PEM format, turns them into a CFArray of certs - that can be used to validate a cert chain. - """ - # Normalize the PEM bundle's line endings. - pem_bundle = pem_bundle.replace(b"\r\n", b"\n") - - der_certs = [ - base64.b64decode(match.group(1)) - for match in _PEM_CERTS_RE.finditer(pem_bundle) - ] - if not der_certs: - raise ssl.SSLError("No root certificates specified") - - cert_array = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) - ) - if not cert_array: - raise ssl.SSLError("Unable to allocate memory!") - - try: - for der_bytes in der_certs: - certdata = _cf_data_from_bytes(der_bytes) - if not certdata: - raise ssl.SSLError("Unable to allocate memory!") - cert = Security.SecCertificateCreateWithData( - CoreFoundation.kCFAllocatorDefault, certdata - ) - CoreFoundation.CFRelease(certdata) - if not cert: - raise ssl.SSLError("Unable to build cert object!") - - CoreFoundation.CFArrayAppendValue(cert_array, cert) - CoreFoundation.CFRelease(cert) - except Exception: - # We need to free the array before the exception bubbles further. - # We only want to do that if an error occurs: otherwise, the caller - # should free. - CoreFoundation.CFRelease(cert_array) - - return cert_array - - -def _is_cert(item): - """ - Returns True if a given CFTypeRef is a certificate. - """ - expected = Security.SecCertificateGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _is_identity(item): - """ - Returns True if a given CFTypeRef is an identity. - """ - expected = Security.SecIdentityGetTypeID() - return CoreFoundation.CFGetTypeID(item) == expected - - -def _temporary_keychain(): - """ - This function creates a temporary Mac keychain that we can use to work with - credentials. This keychain uses a one-time password and a temporary file to - store the data. We expect to have one keychain per socket. The returned - SecKeychainRef must be freed by the caller, including calling - SecKeychainDelete. - - Returns a tuple of the SecKeychainRef and the path to the temporary - directory that contains it. - """ - # Unfortunately, SecKeychainCreate requires a path to a keychain. This - # means we cannot use mkstemp to use a generic temporary file. Instead, - # we're going to create a temporary directory and a filename to use there. - # This filename will be 8 random bytes expanded into base64. We also need - # some random bytes to password-protect the keychain we're creating, so we - # ask for 40 random bytes. - random_bytes = os.urandom(40) - filename = base64.b16encode(random_bytes[:8]).decode('utf-8') - password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 - tempdirectory = tempfile.mkdtemp() - - keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') - - # We now want to create the keychain itself. - keychain = Security.SecKeychainRef() - status = Security.SecKeychainCreate( - keychain_path, - len(password), - password, - False, - None, - ctypes.byref(keychain) - ) - _assert_no_error(status) - - # Having created the keychain, we want to pass it off to the caller. - return keychain, tempdirectory - - -def _load_items_from_file(keychain, path): - """ - Given a single file, loads all the trust objects from it into arrays and - the keychain. - Returns a tuple of lists: the first list is a list of identities, the - second a list of certs. - """ - certificates = [] - identities = [] - result_array = None - - with open(path, 'rb') as f: - raw_filedata = f.read() - - try: - filedata = CoreFoundation.CFDataCreate( - CoreFoundation.kCFAllocatorDefault, - raw_filedata, - len(raw_filedata) - ) - result_array = CoreFoundation.CFArrayRef() - result = Security.SecItemImport( - filedata, # cert data - None, # Filename, leaving it out for now - None, # What the type of the file is, we don't care - None, # what's in the file, we don't care - 0, # import flags - None, # key params, can include passphrase in the future - keychain, # The keychain to insert into - ctypes.byref(result_array) # Results - ) - _assert_no_error(result) - - # A CFArray is not very useful to us as an intermediary - # representation, so we are going to extract the objects we want - # and then free the array. We don't need to keep hold of keys: the - # keychain already has them! - result_count = CoreFoundation.CFArrayGetCount(result_array) - for index in range(result_count): - item = CoreFoundation.CFArrayGetValueAtIndex( - result_array, index - ) - item = ctypes.cast(item, CoreFoundation.CFTypeRef) - - if _is_cert(item): - CoreFoundation.CFRetain(item) - certificates.append(item) - elif _is_identity(item): - CoreFoundation.CFRetain(item) - identities.append(item) - finally: - if result_array: - CoreFoundation.CFRelease(result_array) - - CoreFoundation.CFRelease(filedata) - - return (identities, certificates) - - -def _load_client_cert_chain(keychain, *paths): - """ - Load certificates and maybe keys from a number of files. Has the end goal - of returning a CFArray containing one SecIdentityRef, and then zero or more - SecCertificateRef objects, suitable for use as a client certificate trust - chain. - """ - # Ok, the strategy. - # - # This relies on knowing that macOS will not give you a SecIdentityRef - # unless you have imported a key into a keychain. This is a somewhat - # artificial limitation of macOS (for example, it doesn't necessarily - # affect iOS), but there is nothing inside Security.framework that lets you - # get a SecIdentityRef without having a key in a keychain. - # - # So the policy here is we take all the files and iterate them in order. - # Each one will use SecItemImport to have one or more objects loaded from - # it. We will also point at a keychain that macOS can use to work with the - # private key. - # - # Once we have all the objects, we'll check what we actually have. If we - # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, - # we'll take the first certificate (which we assume to be our leaf) and - # ask the keychain to give us a SecIdentityRef with that cert's associated - # key. - # - # We'll then return a CFArray containing the trust chain: one - # SecIdentityRef and then zero-or-more SecCertificateRef objects. The - # responsibility for freeing this CFArray will be with the caller. This - # CFArray must remain alive for the entire connection, so in practice it - # will be stored with a single SSLSocket, along with the reference to the - # keychain. - certificates = [] - identities = [] - - # Filter out bad paths. - paths = (path for path in paths if path) - - try: - for file_path in paths: - new_identities, new_certs = _load_items_from_file( - keychain, file_path - ) - identities.extend(new_identities) - certificates.extend(new_certs) - - # Ok, we have everything. The question is: do we have an identity? If - # not, we want to grab one from the first cert we have. - if not identities: - new_identity = Security.SecIdentityRef() - status = Security.SecIdentityCreateWithCertificate( - keychain, - certificates[0], - ctypes.byref(new_identity) - ) - _assert_no_error(status) - identities.append(new_identity) - - # We now want to release the original certificate, as we no longer - # need it. - CoreFoundation.CFRelease(certificates.pop(0)) - - # We now need to build a new CFArray that holds the trust chain. - trust_chain = CoreFoundation.CFArrayCreateMutable( - CoreFoundation.kCFAllocatorDefault, - 0, - ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), - ) - for item in itertools.chain(identities, certificates): - # ArrayAppendValue does a CFRetain on the item. That's fine, - # because the finally block will release our other refs to them. - CoreFoundation.CFArrayAppendValue(trust_chain, item) - - return trust_chain - finally: - for obj in itertools.chain(identities, certificates): - CoreFoundation.CFRelease(obj) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/appengine.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/appengine.py deleted file mode 100644 index 9b42952..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/appengine.py +++ /dev/null @@ -1,289 +0,0 @@ -""" -This module provides a pool manager that uses Google App Engine's -`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. - -Example usage:: - - from pip._vendor.urllib3 import PoolManager - from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox - - if is_appengine_sandbox(): - # AppEngineManager uses AppEngine's URLFetch API behind the scenes - http = AppEngineManager() - else: - # PoolManager uses a socket-level API behind the scenes - http = PoolManager() - - r = http.request('GET', 'https://google.com/') - -There are `limitations <https://cloud.google.com/appengine/docs/python/\ -urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be -the best choice for your application. There are three options for using -urllib3 on Google App Engine: - -1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is - cost-effective in many circumstances as long as your usage is within the - limitations. -2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. - Sockets also have `limitations and restrictions - <https://cloud.google.com/appengine/docs/python/sockets/\ - #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. - To use sockets, be sure to specify the following in your ``app.yaml``:: - - env_variables: - GAE_USE_SOCKETS_HTTPLIB : 'true' - -3. If you are using `App Engine Flexible -<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard -:class:`PoolManager` without any configuration or special environment variables. -""" - -from __future__ import absolute_import -import io -import logging -import warnings -from ..packages.six.moves.urllib.parse import urljoin - -from ..exceptions import ( - HTTPError, - HTTPWarning, - MaxRetryError, - ProtocolError, - TimeoutError, - SSLError -) - -from ..request import RequestMethods -from ..response import HTTPResponse -from ..util.timeout import Timeout -from ..util.retry import Retry -from . import _appengine_environ - -try: - from google.appengine.api import urlfetch -except ImportError: - urlfetch = None - - -log = logging.getLogger(__name__) - - -class AppEnginePlatformWarning(HTTPWarning): - pass - - -class AppEnginePlatformError(HTTPError): - pass - - -class AppEngineManager(RequestMethods): - """ - Connection manager for Google App Engine sandbox applications. - - This manager uses the URLFetch service directly instead of using the - emulated httplib, and is subject to URLFetch limitations as described in - the App Engine documentation `here - <https://cloud.google.com/appengine/docs/python/urlfetch>`_. - - Notably it will raise an :class:`AppEnginePlatformError` if: - * URLFetch is not available. - * If you attempt to use this on App Engine Flexible, as full socket - support is available. - * If a request size is more than 10 megabytes. - * If a response size is more than 32 megabtyes. - * If you use an unsupported request method such as OPTIONS. - - Beyond those cases, it will raise normal urllib3 errors. - """ - - def __init__(self, headers=None, retries=None, validate_certificate=True, - urlfetch_retries=True): - if not urlfetch: - raise AppEnginePlatformError( - "URLFetch is not available in this environment.") - - if is_prod_appengine_mvms(): - raise AppEnginePlatformError( - "Use normal urllib3.PoolManager instead of AppEngineManager" - "on Managed VMs, as using URLFetch is not necessary in " - "this environment.") - - warnings.warn( - "urllib3 is using URLFetch on Google App Engine sandbox instead " - "of sockets. To use sockets directly instead of URLFetch see " - "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", - AppEnginePlatformWarning) - - RequestMethods.__init__(self, headers) - self.validate_certificate = validate_certificate - self.urlfetch_retries = urlfetch_retries - - self.retries = retries or Retry.DEFAULT - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - # Return False to re-raise any potential exceptions - return False - - def urlopen(self, method, url, body=None, headers=None, - retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, - **response_kw): - - retries = self._get_retries(retries, redirect) - - try: - follow_redirects = ( - redirect and - retries.redirect != 0 and - retries.total) - response = urlfetch.fetch( - url, - payload=body, - method=method, - headers=headers or {}, - allow_truncated=False, - follow_redirects=self.urlfetch_retries and follow_redirects, - deadline=self._get_absolute_timeout(timeout), - validate_certificate=self.validate_certificate, - ) - except urlfetch.DeadlineExceededError as e: - raise TimeoutError(self, e) - - except urlfetch.InvalidURLError as e: - if 'too large' in str(e): - raise AppEnginePlatformError( - "URLFetch request too large, URLFetch only " - "supports requests up to 10mb in size.", e) - raise ProtocolError(e) - - except urlfetch.DownloadError as e: - if 'Too many redirects' in str(e): - raise MaxRetryError(self, url, reason=e) - raise ProtocolError(e) - - except urlfetch.ResponseTooLargeError as e: - raise AppEnginePlatformError( - "URLFetch response too large, URLFetch only supports" - "responses up to 32mb in size.", e) - - except urlfetch.SSLCertificateError as e: - raise SSLError(e) - - except urlfetch.InvalidMethodError as e: - raise AppEnginePlatformError( - "URLFetch does not support method: %s" % method, e) - - http_response = self._urlfetch_response_to_http_response( - response, retries=retries, **response_kw) - - # Handle redirect? - redirect_location = redirect and http_response.get_redirect_location() - if redirect_location: - # Check for redirect response - if (self.urlfetch_retries and retries.raise_on_redirect): - raise MaxRetryError(self, url, "too many redirects") - else: - if http_response.status == 303: - method = 'GET' - - try: - retries = retries.increment(method, url, response=http_response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - raise MaxRetryError(self, url, "too many redirects") - return http_response - - retries.sleep_for_retry(http_response) - log.debug("Redirecting %s -> %s", url, redirect_location) - redirect_url = urljoin(url, redirect_location) - return self.urlopen( - method, redirect_url, body, headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) - - # Check if we should retry the HTTP response. - has_retry_after = bool(http_response.getheader('Retry-After')) - if retries.is_retry(method, http_response.status, has_retry_after): - retries = retries.increment( - method, url, response=http_response, _pool=self) - log.debug("Retry: %s", url) - retries.sleep(http_response) - return self.urlopen( - method, url, - body=body, headers=headers, - retries=retries, redirect=redirect, - timeout=timeout, **response_kw) - - return http_response - - def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): - - if is_prod_appengine(): - # Production GAE handles deflate encoding automatically, but does - # not remove the encoding header. - content_encoding = urlfetch_resp.headers.get('content-encoding') - - if content_encoding == 'deflate': - del urlfetch_resp.headers['content-encoding'] - - transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') - # We have a full response's content, - # so let's make sure we don't report ourselves as chunked data. - if transfer_encoding == 'chunked': - encodings = transfer_encoding.split(",") - encodings.remove('chunked') - urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) - - original_response = HTTPResponse( - # In order for decoding to work, we must present the content as - # a file-like object. - body=io.BytesIO(urlfetch_resp.content), - msg=urlfetch_resp.header_msg, - headers=urlfetch_resp.headers, - status=urlfetch_resp.status_code, - **response_kw - ) - - return HTTPResponse( - body=io.BytesIO(urlfetch_resp.content), - headers=urlfetch_resp.headers, - status=urlfetch_resp.status_code, - original_response=original_response, - **response_kw - ) - - def _get_absolute_timeout(self, timeout): - if timeout is Timeout.DEFAULT_TIMEOUT: - return None # Defer to URLFetch's default. - if isinstance(timeout, Timeout): - if timeout._read is not None or timeout._connect is not None: - warnings.warn( - "URLFetch does not support granular timeout settings, " - "reverting to total or default URLFetch timeout.", - AppEnginePlatformWarning) - return timeout.total - return timeout - - def _get_retries(self, retries, redirect): - if not isinstance(retries, Retry): - retries = Retry.from_int( - retries, redirect=redirect, default=self.retries) - - if retries.connect or retries.read or retries.redirect: - warnings.warn( - "URLFetch only supports total retries and does not " - "recognize connect, read, or redirect retry parameters.", - AppEnginePlatformWarning) - - return retries - - -# Alias methods from _appengine_environ to maintain public API interface. - -is_appengine = _appengine_environ.is_appengine -is_appengine_sandbox = _appengine_environ.is_appengine_sandbox -is_local_appengine = _appengine_environ.is_local_appengine -is_prod_appengine = _appengine_environ.is_prod_appengine -is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/ntlmpool.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/ntlmpool.py deleted file mode 100644 index 8ea127c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/ntlmpool.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -NTLM authenticating pool, contributed by erikcederstran - -Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 -""" -from __future__ import absolute_import - -from logging import getLogger -from ntlm import ntlm - -from .. import HTTPSConnectionPool -from ..packages.six.moves.http_client import HTTPSConnection - - -log = getLogger(__name__) - - -class NTLMConnectionPool(HTTPSConnectionPool): - """ - Implements an NTLM authentication version of an urllib3 connection pool - """ - - scheme = 'https' - - def __init__(self, user, pw, authurl, *args, **kwargs): - """ - authurl is a random URL on the server that is protected by NTLM. - user is the Windows user, probably in the DOMAIN\\username format. - pw is the password for the user. - """ - super(NTLMConnectionPool, self).__init__(*args, **kwargs) - self.authurl = authurl - self.rawuser = user - user_parts = user.split('\\', 1) - self.domain = user_parts[0].upper() - self.user = user_parts[1] - self.pw = pw - - def _new_conn(self): - # Performs the NTLM handshake that secures the connection. The socket - # must be kept open while requests are performed. - self.num_connections += 1 - log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', - self.num_connections, self.host, self.authurl) - - headers = {'Connection': 'Keep-Alive'} - req_header = 'Authorization' - resp_header = 'www-authenticate' - - conn = HTTPSConnection(host=self.host, port=self.port) - - # Send negotiation message - headers[req_header] = ( - 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) - res = conn.getresponse() - reshdr = dict(res.getheaders()) - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', reshdr) - log.debug('Response data: %s [...]', res.read(100)) - - # Remove the reference to the socket, so that it can not be closed by - # the response object (we want to keep the socket open) - res.fp = None - - # Server should respond with a challenge message - auth_header_values = reshdr[resp_header].split(', ') - auth_header_value = None - for s in auth_header_values: - if s[:5] == 'NTLM ': - auth_header_value = s[5:] - if auth_header_value is None: - raise Exception('Unexpected %s response header: %s' % - (resp_header, reshdr[resp_header])) - - # Send authentication message - ServerChallenge, NegotiateFlags = \ - ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) - auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, - self.user, - self.domain, - self.pw, - NegotiateFlags) - headers[req_header] = 'NTLM %s' % auth_msg - log.debug('Request headers: %s', headers) - conn.request('GET', self.authurl, None, headers) - res = conn.getresponse() - log.debug('Response status: %s %s', res.status, res.reason) - log.debug('Response headers: %s', dict(res.getheaders())) - log.debug('Response data: %s [...]', res.read()[:100]) - if res.status != 200: - if res.status == 401: - raise Exception('Server rejected request: wrong ' - 'username or password') - raise Exception('Wrong server response: %s %s' % - (res.status, res.reason)) - - res.fp = None - log.debug('Connection established') - return conn - - def urlopen(self, method, url, body=None, headers=None, retries=3, - redirect=True, assert_same_host=True): - if headers is None: - headers = {} - headers['Connection'] = 'Keep-Alive' - return super(NTLMConnectionPool, self).urlopen(method, url, body, - headers, retries, - redirect, - assert_same_host) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/pyopenssl.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/pyopenssl.py deleted file mode 100644 index 363667c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/pyopenssl.py +++ /dev/null @@ -1,466 +0,0 @@ -""" -SSL with SNI_-support for Python 2. Follow these instructions if you would -like to verify SSL certificates in Python 2. Note, the default libraries do -*not* do certificate checking; you need to do additional work to validate -certificates yourself. - -This needs the following packages installed: - -* pyOpenSSL (tested with 16.0.0) -* cryptography (minimum 1.3.4, from pyopenssl) -* idna (minimum 2.0, from cryptography) - -However, pyopenssl depends on cryptography, which depends on idna, so while we -use all three directly here we end up having relatively few packages required. - -You can install them with the following command: - - pip install pyopenssl cryptography idna - -To activate certificate checking, call -:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code -before you begin making HTTP requests. This can be done in a ``sitecustomize`` -module, or at any other time before your application begins using ``urllib3``, -like this:: - - try: - import urllib3.contrib.pyopenssl - urllib3.contrib.pyopenssl.inject_into_urllib3() - except ImportError: - pass - -Now you can use :mod:`urllib3` as you normally would, and it will support SNI -when the required modules are installed. - -Activating this module also has the positive side effect of disabling SSL/TLS -compression in Python 2 (see `CRIME attack`_). - -If you want to configure the default list of supported cipher suites, you can -set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. - -.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication -.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) -""" -from __future__ import absolute_import - -import OpenSSL.SSL -from cryptography import x509 -from cryptography.hazmat.backends.openssl import backend as openssl_backend -from cryptography.hazmat.backends.openssl.x509 import _Certificate -try: - from cryptography.x509 import UnsupportedExtension -except ImportError: - # UnsupportedExtension is gone in cryptography >= 2.1.0 - class UnsupportedExtension(Exception): - pass - -from socket import timeout, error as SocketError -from io import BytesIO - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -import logging -import ssl -from ..packages import six -import sys - -from .. import util - -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] - -# SNI always works. -HAS_SNI = True - -# Map from urllib3 to PyOpenSSL compatible parameter-values. -_openssl_versions = { - ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, - ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, -} - -if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): - _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD - -if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): - _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD - -try: - _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) -except AttributeError: - pass - -_stdlib_to_openssl_verify = { - ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, - ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, - ssl.CERT_REQUIRED: - OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, -} -_openssl_to_stdlib_verify = dict( - (v, k) for k, v in _stdlib_to_openssl_verify.items() -) - -# OpenSSL will only write 16K at a time -SSL_WRITE_BLOCKSIZE = 16384 - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - - -log = logging.getLogger(__name__) - - -def inject_into_urllib3(): - 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' - - _validate_dependencies_met() - - util.ssl_.SSLContext = PyOpenSSLContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_PYOPENSSL = True - util.ssl_.IS_PYOPENSSL = True - - -def extract_from_urllib3(): - 'Undo monkey-patching by :func:`inject_into_urllib3`.' - - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_PYOPENSSL = False - util.ssl_.IS_PYOPENSSL = False - - -def _validate_dependencies_met(): - """ - Verifies that PyOpenSSL's package-level dependencies have been met. - Throws `ImportError` if they are not met. - """ - # Method added in `cryptography==1.1`; not available in older versions - from cryptography.x509.extensions import Extensions - if getattr(Extensions, "get_extension_for_class", None) is None: - raise ImportError("'cryptography' module missing required functionality. " - "Try upgrading to v1.3.4 or newer.") - - # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 - # attribute is only present on those versions. - from OpenSSL.crypto import X509 - x509 = X509() - if getattr(x509, "_x509", None) is None: - raise ImportError("'pyOpenSSL' module missing required functionality. " - "Try upgrading to v0.14 or newer.") - - -def _dnsname_to_stdlib(name): - """ - Converts a dNSName SubjectAlternativeName field to the form used by the - standard library on the given Python version. - - Cryptography produces a dNSName as a unicode string that was idna-decoded - from ASCII bytes. We need to idna-encode that string to get it back, and - then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib - uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). - - If the name cannot be idna-encoded then we return None signalling that - the name given should be skipped. - """ - def idna_encode(name): - """ - Borrowed wholesale from the Python Cryptography Project. It turns out - that we can't just safely call `idna.encode`: it can explode for - wildcard names. This avoids that problem. - """ - from pip._vendor import idna - - try: - for prefix in [u'*.', u'.']: - if name.startswith(prefix): - name = name[len(prefix):] - return prefix.encode('ascii') + idna.encode(name) - return idna.encode(name) - except idna.core.IDNAError: - return None - - name = idna_encode(name) - if name is None: - return None - elif sys.version_info >= (3, 0): - name = name.decode('utf-8') - return name - - -def get_subj_alt_name(peer_cert): - """ - Given an PyOpenSSL certificate, provides all the subject alternative names. - """ - # Pass the cert to cryptography, which has much better APIs for this. - if hasattr(peer_cert, "to_cryptography"): - cert = peer_cert.to_cryptography() - else: - # This is technically using private APIs, but should work across all - # relevant versions before PyOpenSSL got a proper API for this. - cert = _Certificate(openssl_backend, peer_cert._x509) - - # We want to find the SAN extension. Ask Cryptography to locate it (it's - # faster than looping in Python) - try: - ext = cert.extensions.get_extension_for_class( - x509.SubjectAlternativeName - ).value - except x509.ExtensionNotFound: - # No such extension, return the empty list. - return [] - except (x509.DuplicateExtension, UnsupportedExtension, - x509.UnsupportedGeneralNameType, UnicodeError) as e: - # A problem has been found with the quality of the certificate. Assume - # no SAN field is present. - log.warning( - "A problem was encountered with the certificate that prevented " - "urllib3 from finding the SubjectAlternativeName field. This can " - "affect certificate validation. The error was %s", - e, - ) - return [] - - # We want to return dNSName and iPAddress fields. We need to cast the IPs - # back to strings because the match_hostname function wants them as - # strings. - # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 - # decoded. This is pretty frustrating, but that's what the standard library - # does with certificates, and so we need to attempt to do the same. - # We also want to skip over names which cannot be idna encoded. - names = [ - ('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) - if name is not None - ] - names.extend( - ('IP Address', str(name)) - for name in ext.get_values_for_type(x509.IPAddress) - ) - - return names - - -class WrappedSocket(object): - '''API-compatibility wrapper for Python OpenSSL's Connection-class. - - Note: _makefile_refs, _drop() and _reuse() are needed for the garbage - collector of pypy. - ''' - - def __init__(self, connection, socket, suppress_ragged_eofs=True): - self.connection = connection - self.socket = socket - self.suppress_ragged_eofs = suppress_ragged_eofs - self._makefile_refs = 0 - self._closed = False - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, *args, **kwargs): - try: - data = self.connection.recv(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return b'' - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return b'' - else: - raise - except OpenSSL.SSL.WantReadError: - if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout('The read operation timed out') - else: - return self.recv(*args, **kwargs) - else: - return data - - def recv_into(self, *args, **kwargs): - try: - return self.connection.recv_into(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): - return 0 - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError as e: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return 0 - else: - raise - except OpenSSL.SSL.WantReadError: - if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout('The read operation timed out') - else: - return self.recv_into(*args, **kwargs) - - def settimeout(self, timeout): - return self.socket.settimeout(timeout) - - def _send_until_done(self, data): - while True: - try: - return self.connection.send(data) - except OpenSSL.SSL.WantWriteError: - if not util.wait_for_write(self.socket, self.socket.gettimeout()): - raise timeout() - continue - except OpenSSL.SSL.SysCallError as e: - raise SocketError(str(e)) - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) - total_sent += sent - - def shutdown(self): - # FIXME rethrow compatible exceptions should we ever use this - self.connection.shutdown() - - def close(self): - if self._makefile_refs < 1: - try: - self._closed = True - return self.connection.close() - except OpenSSL.SSL.Error: - return - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - x509 = self.connection.get_peer_certificate() - - if not x509: - return x509 - - if binary_form: - return OpenSSL.crypto.dump_certificate( - OpenSSL.crypto.FILETYPE_ASN1, - x509) - - return { - 'subject': ( - (('commonName', x509.get_subject().CN),), - ), - 'subjectAltName': get_subj_alt_name(x509) - } - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) -else: # Platform-specific: Python 3 - makefile = backport_makefile - -WrappedSocket.makefile = makefile - - -class PyOpenSSLContext(object): - """ - I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible - for translating the interface of the standard library ``SSLContext`` object - to calls into PyOpenSSL. - """ - def __init__(self, protocol): - self.protocol = _openssl_versions[protocol] - self._ctx = OpenSSL.SSL.Context(self.protocol) - self._options = 0 - self.check_hostname = False - - @property - def options(self): - return self._options - - @options.setter - def options(self, value): - self._options = value - self._ctx.set_options(value) - - @property - def verify_mode(self): - return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] - - @verify_mode.setter - def verify_mode(self, value): - self._ctx.set_verify( - _stdlib_to_openssl_verify[value], - _verify_callback - ) - - def set_default_verify_paths(self): - self._ctx.set_default_verify_paths() - - def set_ciphers(self, ciphers): - if isinstance(ciphers, six.text_type): - ciphers = ciphers.encode('utf-8') - self._ctx.set_cipher_list(ciphers) - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - if cafile is not None: - cafile = cafile.encode('utf-8') - if capath is not None: - capath = capath.encode('utf-8') - self._ctx.load_verify_locations(cafile, capath) - if cadata is not None: - self._ctx.load_verify_locations(BytesIO(cadata)) - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._ctx.use_certificate_chain_file(certfile) - if password is not None: - self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) - self._ctx.use_privatekey_file(keyfile or certfile) - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): - cnx = OpenSSL.SSL.Connection(self._ctx, sock) - - if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 - server_hostname = server_hostname.encode('utf-8') - - if server_hostname is not None: - cnx.set_tlsext_host_name(server_hostname) - - cnx.set_connect_state() - - while True: - try: - cnx.do_handshake() - except OpenSSL.SSL.WantReadError: - if not util.wait_for_read(sock, sock.gettimeout()): - raise timeout('select timed out') - continue - except OpenSSL.SSL.Error as e: - raise ssl.SSLError('bad handshake: %r' % e) - break - - return WrappedSocket(cnx, sock) - - -def _verify_callback(cnx, x509, err_no, err_depth, return_code): - return err_no == 0 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/securetransport.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/securetransport.py deleted file mode 100644 index 77cb59e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/securetransport.py +++ /dev/null @@ -1,804 +0,0 @@ -""" -SecureTranport support for urllib3 via ctypes. - -This makes platform-native TLS available to urllib3 users on macOS without the -use of a compiler. This is an important feature because the Python Package -Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL -that ships with macOS is not capable of doing TLSv1.2. The only way to resolve -this is to give macOS users an alternative solution to the problem, and that -solution is to use SecureTransport. - -We use ctypes here because this solution must not require a compiler. That's -because pip is not allowed to require a compiler either. - -This is not intended to be a seriously long-term solution to this problem. -The hope is that PEP 543 will eventually solve this issue for us, at which -point we can retire this contrib module. But in the short term, we need to -solve the impending tire fire that is Python on Mac without this kind of -contrib module. So...here we are. - -To use this module, simply import and inject it:: - - import urllib3.contrib.securetransport - urllib3.contrib.securetransport.inject_into_urllib3() - -Happy TLSing! -""" -from __future__ import absolute_import - -import contextlib -import ctypes -import errno -import os.path -import shutil -import socket -import ssl -import threading -import weakref - -from .. import util -from ._securetransport.bindings import ( - Security, SecurityConst, CoreFoundation -) -from ._securetransport.low_level import ( - _assert_no_error, _cert_array_from_pem, _temporary_keychain, - _load_client_cert_chain -) - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] - -# SNI always works -HAS_SNI = True - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - -# This dictionary is used by the read callback to obtain a handle to the -# calling wrapped socket. This is a pretty silly approach, but for now it'll -# do. I feel like I should be able to smuggle a handle to the wrapped socket -# directly in the SSLConnectionRef, but for now this approach will work I -# guess. -# -# We need to lock around this structure for inserts, but we don't do it for -# reads/writes in the callbacks. The reasoning here goes as follows: -# -# 1. It is not possible to call into the callbacks before the dictionary is -# populated, so once in the callback the id must be in the dictionary. -# 2. The callbacks don't mutate the dictionary, they only read from it, and -# so cannot conflict with any of the insertions. -# -# This is good: if we had to lock in the callbacks we'd drastically slow down -# the performance of this code. -_connection_refs = weakref.WeakValueDictionary() -_connection_ref_lock = threading.Lock() - -# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over -# for no better reason than we need *a* limit, and this one is right there. -SSL_WRITE_BLOCKSIZE = 16384 - -# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to -# individual cipher suites. We need to do this because this is how -# SecureTransport wants them. -CIPHER_SUITES = [ - SecurityConst.TLS_AES_256_GCM_SHA384, - SecurityConst.TLS_CHACHA20_POLY1305_SHA256, - SecurityConst.TLS_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, -] - -# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of -# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. -_protocol_to_min_max = { - ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), -} - -if hasattr(ssl, "PROTOCOL_SSLv2"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( - SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 - ) -if hasattr(ssl, "PROTOCOL_SSLv3"): - _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( - SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 - ) -if hasattr(ssl, "PROTOCOL_TLSv1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( - SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 - ) -if hasattr(ssl, "PROTOCOL_TLSv1_1"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( - SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 - ) -if hasattr(ssl, "PROTOCOL_TLSv1_2"): - _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( - SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 - ) -if hasattr(ssl, "PROTOCOL_TLS"): - _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] - - -def inject_into_urllib3(): - """ - Monkey-patch urllib3 with SecureTransport-backed SSL-support. - """ - util.ssl_.SSLContext = SecureTransportContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_SECURETRANSPORT = True - util.ssl_.IS_SECURETRANSPORT = True - - -def extract_from_urllib3(): - """ - Undo monkey-patching by :func:`inject_into_urllib3`. - """ - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_SECURETRANSPORT = False - util.ssl_.IS_SECURETRANSPORT = False - - -def _read_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport read callback. This is called by ST to request that data - be returned from the socket. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - requested_length = data_length_pointer[0] - - timeout = wrapped_socket.gettimeout() - error = None - read_count = 0 - - try: - while read_count < requested_length: - if timeout is None or timeout >= 0: - if not util.wait_for_read(base_socket, timeout): - raise socket.error(errno.EAGAIN, 'timed out') - - remaining = requested_length - read_count - buffer = (ctypes.c_char * remaining).from_address( - data_buffer + read_count - ) - chunk_size = base_socket.recv_into(buffer, remaining) - read_count += chunk_size - if not chunk_size: - if not read_count: - return SecurityConst.errSSLClosedGraceful - break - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - data_length_pointer[0] = read_count - if error == errno.ECONNRESET or error == errno.EPIPE: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = read_count - - if read_count != requested_length: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -def _write_callback(connection_id, data_buffer, data_length_pointer): - """ - SecureTransport write callback. This is called by ST to request that data - actually be sent on the network. - """ - wrapped_socket = None - try: - wrapped_socket = _connection_refs.get(connection_id) - if wrapped_socket is None: - return SecurityConst.errSSLInternal - base_socket = wrapped_socket.socket - - bytes_to_write = data_length_pointer[0] - data = ctypes.string_at(data_buffer, bytes_to_write) - - timeout = wrapped_socket.gettimeout() - error = None - sent = 0 - - try: - while sent < bytes_to_write: - if timeout is None or timeout >= 0: - if not util.wait_for_write(base_socket, timeout): - raise socket.error(errno.EAGAIN, 'timed out') - chunk_sent = base_socket.send(data) - sent += chunk_sent - - # This has some needless copying here, but I'm not sure there's - # much value in optimising this data path. - data = data[chunk_sent:] - except (socket.error) as e: - error = e.errno - - if error is not None and error != errno.EAGAIN: - data_length_pointer[0] = sent - if error == errno.ECONNRESET or error == errno.EPIPE: - return SecurityConst.errSSLClosedAbort - raise - - data_length_pointer[0] = sent - - if sent != bytes_to_write: - return SecurityConst.errSSLWouldBlock - - return 0 - except Exception as e: - if wrapped_socket is not None: - wrapped_socket._exception = e - return SecurityConst.errSSLInternal - - -# We need to keep these two objects references alive: if they get GC'd while -# in use then SecureTransport could attempt to call a function that is in freed -# memory. That would be...uh...bad. Yeah, that's the word. Bad. -_read_callback_pointer = Security.SSLReadFunc(_read_callback) -_write_callback_pointer = Security.SSLWriteFunc(_write_callback) - - -class WrappedSocket(object): - """ - API-compatibility wrapper for Python's OpenSSL wrapped socket object. - - Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage - collector of PyPy. - """ - def __init__(self, socket): - self.socket = socket - self.context = None - self._makefile_refs = 0 - self._closed = False - self._exception = None - self._keychain = None - self._keychain_dir = None - self._client_cert_chain = None - - # We save off the previously-configured timeout and then set it to - # zero. This is done because we use select and friends to handle the - # timeouts, but if we leave the timeout set on the lower socket then - # Python will "kindly" call select on that socket again for us. Avoid - # that by forcing the timeout to zero. - self._timeout = self.socket.gettimeout() - self.socket.settimeout(0) - - @contextlib.contextmanager - def _raise_on_error(self): - """ - A context manager that can be used to wrap calls that do I/O from - SecureTransport. If any of the I/O callbacks hit an exception, this - context manager will correctly propagate the exception after the fact. - This avoids silently swallowing those exceptions. - - It also correctly forces the socket closed. - """ - self._exception = None - - # We explicitly don't catch around this yield because in the unlikely - # event that an exception was hit in the block we don't want to swallow - # it. - yield - if self._exception is not None: - exception, self._exception = self._exception, None - self.close() - raise exception - - def _set_ciphers(self): - """ - Sets up the allowed ciphers. By default this matches the set in - util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done - custom and doesn't allow changing at this time, mostly because parsing - OpenSSL cipher strings is going to be a freaking nightmare. - """ - ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) - result = Security.SSLSetEnabledCiphers( - self.context, ciphers, len(CIPHER_SUITES) - ) - _assert_no_error(result) - - def _custom_validate(self, verify, trust_bundle): - """ - Called when we have set custom validation. We do this in two cases: - first, when cert validation is entirely disabled; and second, when - using a custom trust DB. - """ - # If we disabled cert validation, just say: cool. - if not verify: - return - - # We want data in memory, so load it up. - if os.path.isfile(trust_bundle): - with open(trust_bundle, 'rb') as f: - trust_bundle = f.read() - - cert_array = None - trust = Security.SecTrustRef() - - try: - # Get a CFArray that contains the certs we want. - cert_array = _cert_array_from_pem(trust_bundle) - - # Ok, now the hard part. We want to get the SecTrustRef that ST has - # created for this connection, shove our CAs into it, tell ST to - # ignore everything else it knows, and then ask if it can build a - # chain. This is a buuuunch of code. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) - _assert_no_error(result) - if not trust: - raise ssl.SSLError("Failed to copy trust reference") - - result = Security.SecTrustSetAnchorCertificates(trust, cert_array) - _assert_no_error(result) - - result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) - _assert_no_error(result) - - trust_result = Security.SecTrustResultType() - result = Security.SecTrustEvaluate( - trust, ctypes.byref(trust_result) - ) - _assert_no_error(result) - finally: - if trust: - CoreFoundation.CFRelease(trust) - - if cert_array is not None: - CoreFoundation.CFRelease(cert_array) - - # Ok, now we can look at what the result was. - successes = ( - SecurityConst.kSecTrustResultUnspecified, - SecurityConst.kSecTrustResultProceed - ) - if trust_result.value not in successes: - raise ssl.SSLError( - "certificate verify failed, error code: %d" % - trust_result.value - ) - - def handshake(self, - server_hostname, - verify, - trust_bundle, - min_version, - max_version, - client_cert, - client_key, - client_key_passphrase): - """ - Actually performs the TLS handshake. This is run automatically by - wrapped socket, and shouldn't be needed in user code. - """ - # First, we do the initial bits of connection setup. We need to create - # a context, set its I/O funcs, and set the connection reference. - self.context = Security.SSLCreateContext( - None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType - ) - result = Security.SSLSetIOFuncs( - self.context, _read_callback_pointer, _write_callback_pointer - ) - _assert_no_error(result) - - # Here we need to compute the handle to use. We do this by taking the - # id of self modulo 2**31 - 1. If this is already in the dictionary, we - # just keep incrementing by one until we find a free space. - with _connection_ref_lock: - handle = id(self) % 2147483647 - while handle in _connection_refs: - handle = (handle + 1) % 2147483647 - _connection_refs[handle] = self - - result = Security.SSLSetConnection(self.context, handle) - _assert_no_error(result) - - # If we have a server hostname, we should set that too. - if server_hostname: - if not isinstance(server_hostname, bytes): - server_hostname = server_hostname.encode('utf-8') - - result = Security.SSLSetPeerDomainName( - self.context, server_hostname, len(server_hostname) - ) - _assert_no_error(result) - - # Setup the ciphers. - self._set_ciphers() - - # Set the minimum and maximum TLS versions. - result = Security.SSLSetProtocolVersionMin(self.context, min_version) - _assert_no_error(result) - result = Security.SSLSetProtocolVersionMax(self.context, max_version) - _assert_no_error(result) - - # If there's a trust DB, we need to use it. We do that by telling - # SecureTransport to break on server auth. We also do that if we don't - # want to validate the certs at all: we just won't actually do any - # authing in that case. - if not verify or trust_bundle is not None: - result = Security.SSLSetSessionOption( - self.context, - SecurityConst.kSSLSessionOptionBreakOnServerAuth, - True - ) - _assert_no_error(result) - - # If there's a client cert, we need to use it. - if client_cert: - self._keychain, self._keychain_dir = _temporary_keychain() - self._client_cert_chain = _load_client_cert_chain( - self._keychain, client_cert, client_key - ) - result = Security.SSLSetCertificate( - self.context, self._client_cert_chain - ) - _assert_no_error(result) - - while True: - with self._raise_on_error(): - result = Security.SSLHandshake(self.context) - - if result == SecurityConst.errSSLWouldBlock: - raise socket.timeout("handshake timed out") - elif result == SecurityConst.errSSLServerAuthCompleted: - self._custom_validate(verify, trust_bundle) - continue - else: - _assert_no_error(result) - break - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, bufsiz): - buffer = ctypes.create_string_buffer(bufsiz) - bytes_read = self.recv_into(buffer, bufsiz) - data = buffer[:bytes_read] - return data - - def recv_into(self, buffer, nbytes=None): - # Read short on EOF. - if self._closed: - return 0 - - if nbytes is None: - nbytes = len(buffer) - - buffer = (ctypes.c_char * nbytes).from_buffer(buffer) - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLRead( - self.context, buffer, nbytes, ctypes.byref(processed_bytes) - ) - - # There are some result codes that we want to treat as "not always - # errors". Specifically, those are errSSLWouldBlock, - # errSSLClosedGraceful, and errSSLClosedNoNotify. - if (result == SecurityConst.errSSLWouldBlock): - # If we didn't process any bytes, then this was just a time out. - # However, we can get errSSLWouldBlock in situations when we *did* - # read some data, and in those cases we should just read "short" - # and return. - if processed_bytes.value == 0: - # Timed out, no data read. - raise socket.timeout("recv timed out") - elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): - # The remote peer has closed this connection. We should do so as - # well. Note that we don't actually return here because in - # principle this could actually be fired along with return data. - # It's unlikely though. - self.close() - else: - _assert_no_error(result) - - # Ok, we read and probably succeeded. We should return whatever data - # was actually read. - return processed_bytes.value - - def settimeout(self, timeout): - self._timeout = timeout - - def gettimeout(self): - return self._timeout - - def send(self, data): - processed_bytes = ctypes.c_size_t(0) - - with self._raise_on_error(): - result = Security.SSLWrite( - self.context, data, len(data), ctypes.byref(processed_bytes) - ) - - if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: - # Timed out - raise socket.timeout("send timed out") - else: - _assert_no_error(result) - - # We sent, and probably succeeded. Tell them how much we sent. - return processed_bytes.value - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) - total_sent += sent - - def shutdown(self): - with self._raise_on_error(): - Security.SSLClose(self.context) - - def close(self): - # TODO: should I do clean shutdown here? Do I have to? - if self._makefile_refs < 1: - self._closed = True - if self.context: - CoreFoundation.CFRelease(self.context) - self.context = None - if self._client_cert_chain: - CoreFoundation.CFRelease(self._client_cert_chain) - self._client_cert_chain = None - if self._keychain: - Security.SecKeychainDelete(self._keychain) - CoreFoundation.CFRelease(self._keychain) - shutil.rmtree(self._keychain_dir) - self._keychain = self._keychain_dir = None - return self.socket.close() - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - # Urgh, annoying. - # - # Here's how we do this: - # - # 1. Call SSLCopyPeerTrust to get hold of the trust object for this - # connection. - # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. - # 3. To get the CN, call SecCertificateCopyCommonName and process that - # string so that it's of the appropriate type. - # 4. To get the SAN, we need to do something a bit more complex: - # a. Call SecCertificateCopyValues to get the data, requesting - # kSecOIDSubjectAltName. - # b. Mess about with this dictionary to try to get the SANs out. - # - # This is gross. Really gross. It's going to be a few hundred LoC extra - # just to repeat something that SecureTransport can *already do*. So my - # operating assumption at this time is that what we want to do is - # instead to just flag to urllib3 that it shouldn't do its own hostname - # validation when using SecureTransport. - if not binary_form: - raise ValueError( - "SecureTransport only supports dumping binary certs" - ) - trust = Security.SecTrustRef() - certdata = None - der_bytes = None - - try: - # Grab the trust store. - result = Security.SSLCopyPeerTrust( - self.context, ctypes.byref(trust) - ) - _assert_no_error(result) - if not trust: - # Probably we haven't done the handshake yet. No biggie. - return None - - cert_count = Security.SecTrustGetCertificateCount(trust) - if not cert_count: - # Also a case that might happen if we haven't handshaked. - # Handshook? Handshaken? - return None - - leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) - assert leaf - - # Ok, now we want the DER bytes. - certdata = Security.SecCertificateCopyData(leaf) - assert certdata - - data_length = CoreFoundation.CFDataGetLength(certdata) - data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) - der_bytes = ctypes.string_at(data_buffer, data_length) - finally: - if certdata: - CoreFoundation.CFRelease(certdata) - if trust: - CoreFoundation.CFRelease(trust) - - return der_bytes - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) -else: # Platform-specific: Python 3 - def makefile(self, mode="r", buffering=None, *args, **kwargs): - # We disable buffering with SecureTransport because it conflicts with - # the buffering that ST does internally (see issue #1153 for more). - buffering = 0 - return backport_makefile(self, mode, buffering, *args, **kwargs) - -WrappedSocket.makefile = makefile - - -class SecureTransportContext(object): - """ - I am a wrapper class for the SecureTransport library, to translate the - interface of the standard library ``SSLContext`` object to calls into - SecureTransport. - """ - def __init__(self, protocol): - self._min_version, self._max_version = _protocol_to_min_max[protocol] - self._options = 0 - self._verify = False - self._trust_bundle = None - self._client_cert = None - self._client_key = None - self._client_key_passphrase = None - - @property - def check_hostname(self): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - return True - - @check_hostname.setter - def check_hostname(self, value): - """ - SecureTransport cannot have its hostname checking disabled. For more, - see the comment on getpeercert() in this file. - """ - pass - - @property - def options(self): - # TODO: Well, crap. - # - # So this is the bit of the code that is the most likely to cause us - # trouble. Essentially we need to enumerate all of the SSL options that - # users might want to use and try to see if we can sensibly translate - # them, or whether we should just ignore them. - return self._options - - @options.setter - def options(self, value): - # TODO: Update in line with above. - self._options = value - - @property - def verify_mode(self): - return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE - - @verify_mode.setter - def verify_mode(self, value): - self._verify = True if value == ssl.CERT_REQUIRED else False - - def set_default_verify_paths(self): - # So, this has to do something a bit weird. Specifically, what it does - # is nothing. - # - # This means that, if we had previously had load_verify_locations - # called, this does not undo that. We need to do that because it turns - # out that the rest of the urllib3 code will attempt to load the - # default verify paths if it hasn't been told about any paths, even if - # the context itself was sometime earlier. We resolve that by just - # ignoring it. - pass - - def load_default_certs(self): - return self.set_default_verify_paths() - - def set_ciphers(self, ciphers): - # For now, we just require the default cipher string. - if ciphers != util.ssl_.DEFAULT_CIPHERS: - raise ValueError( - "SecureTransport doesn't support custom cipher strings" - ) - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - # OK, we only really support cadata and cafile. - if capath is not None: - raise ValueError( - "SecureTransport does not support cert directories" - ) - - self._trust_bundle = cafile or cadata - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._client_cert = certfile - self._client_key = keyfile - self._client_cert_passphrase = password - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, suppress_ragged_eofs=True, - server_hostname=None): - # So, what do we do here? Firstly, we assert some properties. This is a - # stripped down shim, so there is some functionality we don't support. - # See PEP 543 for the real deal. - assert not server_side - assert do_handshake_on_connect - assert suppress_ragged_eofs - - # Ok, we're good to go. Now we want to create the wrapped socket object - # and store it in the appropriate place. - wrapped_socket = WrappedSocket(sock) - - # Now we can handshake - wrapped_socket.handshake( - server_hostname, self._verify, self._trust_bundle, - self._min_version, self._max_version, self._client_cert, - self._client_key, self._client_key_passphrase - ) - return wrapped_socket diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/socks.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/socks.py deleted file mode 100644 index 811e312..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/contrib/socks.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module contains provisional support for SOCKS proxies from within -urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and -SOCKS5. To enable its functionality, either install PySocks or install this -module with the ``socks`` extra. - -The SOCKS implementation supports the full range of urllib3 features. It also -supports the following SOCKS features: - -- SOCKS4 -- SOCKS4a -- SOCKS5 -- Usernames and passwords for the SOCKS proxy - -Known Limitations: - -- Currently PySocks does not support contacting remote websites via literal - IPv6 addresses. Any such connection attempt will fail. You must use a domain - name. -- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any - such connection attempt will fail. -""" -from __future__ import absolute_import - -try: - import socks -except ImportError: - import warnings - from ..exceptions import DependencyWarning - - warnings.warn(( - 'SOCKS support in urllib3 requires the installation of optional ' - 'dependencies: specifically, PySocks. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' - ), - DependencyWarning - ) - raise - -from socket import error as SocketError, timeout as SocketTimeout - -from ..connection import ( - HTTPConnection, HTTPSConnection -) -from ..connectionpool import ( - HTTPConnectionPool, HTTPSConnectionPool -) -from ..exceptions import ConnectTimeoutError, NewConnectionError -from ..poolmanager import PoolManager -from ..util.url import parse_url - -try: - import ssl -except ImportError: - ssl = None - - -class SOCKSConnection(HTTPConnection): - """ - A plain-text HTTP connection that connects via a SOCKS proxy. - """ - def __init__(self, *args, **kwargs): - self._socks_options = kwargs.pop('_socks_options') - super(SOCKSConnection, self).__init__(*args, **kwargs) - - def _new_conn(self): - """ - Establish a new connection via the SOCKS proxy. - """ - extra_kw = {} - if self.source_address: - extra_kw['source_address'] = self.source_address - - if self.socket_options: - extra_kw['socket_options'] = self.socket_options - - try: - conn = socks.create_connection( - (self.host, self.port), - proxy_type=self._socks_options['socks_version'], - proxy_addr=self._socks_options['proxy_host'], - proxy_port=self._socks_options['proxy_port'], - proxy_username=self._socks_options['username'], - proxy_password=self._socks_options['password'], - proxy_rdns=self._socks_options['rdns'], - timeout=self.timeout, - **extra_kw - ) - - except SocketTimeout as e: - raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) - - except socks.ProxyError as e: - # This is fragile as hell, but it seems to be the only way to raise - # useful errors here. - if e.socket_err: - error = e.socket_err - if isinstance(error, SocketTimeout): - raise ConnectTimeoutError( - self, - "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout) - ) - else: - raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % error - ) - else: - raise NewConnectionError( - self, - "Failed to establish a new connection: %s" % e - ) - - except SocketError as e: # Defensive: PySocks should catch all these. - raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e) - - return conn - - -# We don't need to duplicate the Verified/Unverified distinction from -# urllib3/connection.py here because the HTTPSConnection will already have been -# correctly set to either the Verified or Unverified form by that module. This -# means the SOCKSHTTPSConnection will automatically be the correct type. -class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): - pass - - -class SOCKSHTTPConnectionPool(HTTPConnectionPool): - ConnectionCls = SOCKSConnection - - -class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): - ConnectionCls = SOCKSHTTPSConnection - - -class SOCKSProxyManager(PoolManager): - """ - A version of the urllib3 ProxyManager that routes connections via the - defined SOCKS proxy. - """ - pool_classes_by_scheme = { - 'http': SOCKSHTTPConnectionPool, - 'https': SOCKSHTTPSConnectionPool, - } - - def __init__(self, proxy_url, username=None, password=None, - num_pools=10, headers=None, **connection_pool_kw): - parsed = parse_url(proxy_url) - - if username is None and password is None and parsed.auth is not None: - split = parsed.auth.split(':') - if len(split) == 2: - username, password = split - if parsed.scheme == 'socks5': - socks_version = socks.PROXY_TYPE_SOCKS5 - rdns = False - elif parsed.scheme == 'socks5h': - socks_version = socks.PROXY_TYPE_SOCKS5 - rdns = True - elif parsed.scheme == 'socks4': - socks_version = socks.PROXY_TYPE_SOCKS4 - rdns = False - elif parsed.scheme == 'socks4a': - socks_version = socks.PROXY_TYPE_SOCKS4 - rdns = True - else: - raise ValueError( - "Unable to determine SOCKS version from %s" % proxy_url - ) - - self.proxy_url = proxy_url - - socks_options = { - 'socks_version': socks_version, - 'proxy_host': parsed.host, - 'proxy_port': parsed.port, - 'username': username, - 'password': password, - 'rdns': rdns - } - connection_pool_kw['_socks_options'] = socks_options - - super(SOCKSProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw - ) - - self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/exceptions.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/exceptions.py deleted file mode 100644 index 7bbaa98..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/exceptions.py +++ /dev/null @@ -1,246 +0,0 @@ -from __future__ import absolute_import -from .packages.six.moves.http_client import ( - IncompleteRead as httplib_IncompleteRead -) -# Base Exceptions - - -class HTTPError(Exception): - "Base exception used by this module." - pass - - -class HTTPWarning(Warning): - "Base warning used by this module." - pass - - -class PoolError(HTTPError): - "Base exception for errors caused within a pool." - def __init__(self, pool, message): - self.pool = pool - HTTPError.__init__(self, "%s: %s" % (pool, message)) - - def __reduce__(self): - # For pickling purposes. - return self.__class__, (None, None) - - -class RequestError(PoolError): - "Base exception for PoolErrors that have associated URLs." - def __init__(self, pool, url, message): - self.url = url - PoolError.__init__(self, pool, message) - - def __reduce__(self): - # For pickling purposes. - return self.__class__, (None, self.url, None) - - -class SSLError(HTTPError): - "Raised when SSL certificate fails in an HTTPS connection." - pass - - -class ProxyError(HTTPError): - "Raised when the connection to a proxy fails." - pass - - -class DecodeError(HTTPError): - "Raised when automatic decoding based on Content-Type fails." - pass - - -class ProtocolError(HTTPError): - "Raised when something unexpected happens mid-request/response." - pass - - -#: Renamed to ProtocolError but aliased for backwards compatibility. -ConnectionError = ProtocolError - - -# Leaf Exceptions - -class MaxRetryError(RequestError): - """Raised when the maximum number of retries is exceeded. - - :param pool: The connection pool - :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` - :param string url: The requested Url - :param exceptions.Exception reason: The underlying error - - """ - - def __init__(self, pool, url, reason=None): - self.reason = reason - - message = "Max retries exceeded with url: %s (Caused by %r)" % ( - url, reason) - - RequestError.__init__(self, pool, url, message) - - -class HostChangedError(RequestError): - "Raised when an existing pool gets a request for a foreign host." - - def __init__(self, pool, url, retries=3): - message = "Tried to open a foreign host with url: %s" % url - RequestError.__init__(self, pool, url, message) - self.retries = retries - - -class TimeoutStateError(HTTPError): - """ Raised when passing an invalid state to a timeout """ - pass - - -class TimeoutError(HTTPError): - """ Raised when a socket timeout error occurs. - - Catching this error will catch both :exc:`ReadTimeoutErrors - <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. - """ - pass - - -class ReadTimeoutError(TimeoutError, RequestError): - "Raised when a socket timeout occurs while receiving data from a server" - pass - - -# This timeout error does not have a URL attached and needs to inherit from the -# base HTTPError -class ConnectTimeoutError(TimeoutError): - "Raised when a socket timeout occurs while connecting to a server" - pass - - -class NewConnectionError(ConnectTimeoutError, PoolError): - "Raised when we fail to establish a new connection. Usually ECONNREFUSED." - pass - - -class EmptyPoolError(PoolError): - "Raised when a pool runs out of connections and no more are allowed." - pass - - -class ClosedPoolError(PoolError): - "Raised when a request enters a pool after the pool has been closed." - pass - - -class LocationValueError(ValueError, HTTPError): - "Raised when there is something wrong with a given URL input." - pass - - -class LocationParseError(LocationValueError): - "Raised when get_host or similar fails to parse the URL input." - - def __init__(self, location): - message = "Failed to parse: %s" % location - HTTPError.__init__(self, message) - - self.location = location - - -class ResponseError(HTTPError): - "Used as a container for an error reason supplied in a MaxRetryError." - GENERIC_ERROR = 'too many error responses' - SPECIFIC_ERROR = 'too many {status_code} error responses' - - -class SecurityWarning(HTTPWarning): - "Warned when performing security reducing actions" - pass - - -class SubjectAltNameWarning(SecurityWarning): - "Warned when connecting to a host with a certificate missing a SAN." - pass - - -class InsecureRequestWarning(SecurityWarning): - "Warned when making an unverified HTTPS request." - pass - - -class SystemTimeWarning(SecurityWarning): - "Warned when system time is suspected to be wrong" - pass - - -class InsecurePlatformWarning(SecurityWarning): - "Warned when certain SSL configuration is not available on a platform." - pass - - -class SNIMissingWarning(HTTPWarning): - "Warned when making a HTTPS request without SNI available." - pass - - -class DependencyWarning(HTTPWarning): - """ - Warned when an attempt is made to import a module with missing optional - dependencies. - """ - pass - - -class ResponseNotChunked(ProtocolError, ValueError): - "Response needs to be chunked in order to read it as chunks." - pass - - -class BodyNotHttplibCompatible(HTTPError): - """ - Body should be httplib.HTTPResponse like (have an fp attribute which - returns raw chunks) for read_chunked(). - """ - pass - - -class IncompleteRead(HTTPError, httplib_IncompleteRead): - """ - Response length doesn't match expected Content-Length - - Subclass of http_client.IncompleteRead to allow int value - for `partial` to avoid creating large objects on streamed - reads. - """ - def __init__(self, partial, expected): - super(IncompleteRead, self).__init__(partial, expected) - - def __repr__(self): - return ('IncompleteRead(%i bytes read, ' - '%i more expected)' % (self.partial, self.expected)) - - -class InvalidHeader(HTTPError): - "The header provided was somehow invalid." - pass - - -class ProxySchemeUnknown(AssertionError, ValueError): - "ProxyManager does not support the supplied scheme" - # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. - - def __init__(self, scheme): - message = "Not supported proxy scheme %s" % scheme - super(ProxySchemeUnknown, self).__init__(message) - - -class HeaderParsingError(HTTPError): - "Raised by assert_header_parsing, but we convert it to a log.warning statement." - def __init__(self, defects, unparsed_data): - message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) - super(HeaderParsingError, self).__init__(message) - - -class UnrewindableBodyError(HTTPError): - "urllib3 encountered an error when trying to rewind a body" - pass diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/fields.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/fields.py deleted file mode 100644 index 37fe64a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/fields.py +++ /dev/null @@ -1,178 +0,0 @@ -from __future__ import absolute_import -import email.utils -import mimetypes - -from .packages import six - - -def guess_content_type(filename, default='application/octet-stream'): - """ - Guess the "Content-Type" of a file. - - :param filename: - The filename to guess the "Content-Type" of using :mod:`mimetypes`. - :param default: - If no "Content-Type" can be guessed, default to `default`. - """ - if filename: - return mimetypes.guess_type(filename)[0] or default - return default - - -def format_header_param(name, value): - """ - Helper function to format and quote a single header parameter. - - Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows RFC 2231, as - suggested by RFC 2388 Section 4.4. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - if not any(ch in value for ch in '"\\\r\n'): - result = '%s="%s"' % (name, value) - try: - result.encode('ascii') - except (UnicodeEncodeError, UnicodeDecodeError): - pass - else: - return result - if not six.PY3 and isinstance(value, six.text_type): # Python 2: - value = value.encode('utf-8') - value = email.utils.encode_rfc2231(value, 'utf-8') - value = '%s*=%s' % (name, value) - return value - - -class RequestField(object): - """ - A data container for request body parameters. - - :param name: - The name of this request field. - :param data: - The data/value body. - :param filename: - An optional filename of the request field. - :param headers: - An optional dict-like object of headers to initially use for the field. - """ - def __init__(self, name, data, filename=None, headers=None): - self._name = name - self._filename = filename - self.data = data - self.headers = {} - if headers: - self.headers = dict(headers) - - @classmethod - def from_tuples(cls, fieldname, value): - """ - A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. - - Supports constructing :class:`~urllib3.fields.RequestField` from - parameter of key/value strings AND key/filetuple. A filetuple is a - (filename, data, MIME type) tuple where the MIME type is optional. - For example:: - - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - - Field names and filenames must be unicode. - """ - if isinstance(value, tuple): - if len(value) == 3: - filename, data, content_type = value - else: - filename, data = value - content_type = guess_content_type(filename) - else: - filename = None - content_type = None - data = value - - request_param = cls(fieldname, data, filename=filename) - request_param.make_multipart(content_type=content_type) - - return request_param - - def _render_part(self, name, value): - """ - Overridable helper function to format a single header parameter. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - return format_header_param(name, value) - - def _render_parts(self, header_parts): - """ - Helper function to format and quote a single header. - - Useful for single headers that are composed of multiple items. E.g., - 'Content-Disposition' fields. - - :param header_parts: - A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format - as `k1="v1"; k2="v2"; ...`. - """ - parts = [] - iterable = header_parts - if isinstance(header_parts, dict): - iterable = header_parts.items() - - for name, value in iterable: - if value is not None: - parts.append(self._render_part(name, value)) - - return '; '.join(parts) - - def render_headers(self): - """ - Renders the headers for this request field. - """ - lines = [] - - sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] - for sort_key in sort_keys: - if self.headers.get(sort_key, False): - lines.append('%s: %s' % (sort_key, self.headers[sort_key])) - - for header_name, header_value in self.headers.items(): - if header_name not in sort_keys: - if header_value: - lines.append('%s: %s' % (header_name, header_value)) - - lines.append('\r\n') - return '\r\n'.join(lines) - - def make_multipart(self, content_disposition=None, content_type=None, - content_location=None): - """ - Makes this request field into a multipart request field. - - This method overrides "Content-Disposition", "Content-Type" and - "Content-Location" headers to the request parameter. - - :param content_type: - The 'Content-Type' of the request body. - :param content_location: - The 'Content-Location' of the request body. - - """ - self.headers['Content-Disposition'] = content_disposition or 'form-data' - self.headers['Content-Disposition'] += '; '.join([ - '', self._render_parts( - (('name', self._name), ('filename', self._filename)) - ) - ]) - self.headers['Content-Type'] = content_type - self.headers['Content-Location'] = content_location diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/filepost.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/filepost.py deleted file mode 100644 index 78f1e19..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/filepost.py +++ /dev/null @@ -1,98 +0,0 @@ -from __future__ import absolute_import -import binascii -import codecs -import os - -from io import BytesIO - -from .packages import six -from .packages.six import b -from .fields import RequestField - -writer = codecs.lookup('utf-8')[3] - - -def choose_boundary(): - """ - Our embarrassingly-simple replacement for mimetools.choose_boundary. - """ - boundary = binascii.hexlify(os.urandom(16)) - if six.PY3: - boundary = boundary.decode('ascii') - return boundary - - -def iter_field_objects(fields): - """ - Iterate over fields. - - Supports list of (k, v) tuples and dicts, and lists of - :class:`~urllib3.fields.RequestField`. - - """ - if isinstance(fields, dict): - i = six.iteritems(fields) - else: - i = iter(fields) - - for field in i: - if isinstance(field, RequestField): - yield field - else: - yield RequestField.from_tuples(*field) - - -def iter_fields(fields): - """ - .. deprecated:: 1.6 - - Iterate over fields. - - The addition of :class:`~urllib3.fields.RequestField` makes this function - obsolete. Instead, use :func:`iter_field_objects`, which returns - :class:`~urllib3.fields.RequestField` objects. - - Supports list of (k, v) tuples and dicts. - """ - if isinstance(fields, dict): - return ((k, v) for k, v in six.iteritems(fields)) - - return ((k, v) for k, v in fields) - - -def encode_multipart_formdata(fields, boundary=None): - """ - Encode a dictionary of ``fields`` using the multipart/form-data MIME format. - - :param fields: - Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). - - :param boundary: - If not specified, then a random boundary will be generated using - :func:`urllib3.filepost.choose_boundary`. - """ - body = BytesIO() - if boundary is None: - boundary = choose_boundary() - - for field in iter_field_objects(fields): - body.write(b('--%s\r\n' % (boundary))) - - writer(body).write(field.render_headers()) - data = field.data - - if isinstance(data, int): - data = str(data) # Backwards compatibility - - if isinstance(data, six.text_type): - writer(body).write(data) - else: - body.write(data) - - body.write(b'\r\n') - - body.write(b('--%s--\r\n' % (boundary))) - - content_type = str('multipart/form-data; boundary=%s' % boundary) - - return body.getvalue(), content_type diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/__init__.py deleted file mode 100644 index 170e974..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from . import ssl_match_hostname - -__all__ = ('ssl_match_hostname', ) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/backports/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/backports/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/backports/makefile.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/backports/makefile.py deleted file mode 100644 index 740db37..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/backports/makefile.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -""" -backports.makefile -~~~~~~~~~~~~~~~~~~ - -Backports the Python 3 ``socket.makefile`` method for use with anything that -wants to create a "fake" socket object. -""" -import io - -from socket import SocketIO - - -def backport_makefile(self, mode="r", buffering=None, encoding=None, - errors=None, newline=None): - """ - Backport of ``socket.makefile`` from Python 3.5. - """ - if not set(mode) <= {"r", "w", "b"}: - raise ValueError( - "invalid mode %r (only r, w, b allowed)" % (mode,) - ) - writing = "w" in mode - reading = "r" in mode or not writing - assert reading or writing - binary = "b" in mode - rawmode = "" - if reading: - rawmode += "r" - if writing: - rawmode += "w" - raw = SocketIO(self, rawmode) - self._makefile_refs += 1 - if buffering is None: - buffering = -1 - if buffering < 0: - buffering = io.DEFAULT_BUFFER_SIZE - if buffering == 0: - if not binary: - raise ValueError("unbuffered streams must be binary") - return raw - if reading and writing: - buffer = io.BufferedRWPair(raw, raw, buffering) - elif reading: - buffer = io.BufferedReader(raw, buffering) - else: - assert writing - buffer = io.BufferedWriter(raw, buffering) - if binary: - return buffer - text = io.TextIOWrapper(buffer, encoding, errors, newline) - text.mode = mode - return text diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/six.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/six.py deleted file mode 100644 index 190c023..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/six.py +++ /dev/null @@ -1,868 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.10.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py deleted file mode 100644 index d6594eb..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys - -try: - # Our match_hostname function is the same as 3.5's, so we only want to - # import the match_hostname function if it's at least that good. - if sys.version_info < (3, 5): - raise ImportError("Fallback to vendored code") - - from ssl import CertificateError, match_hostname -except ImportError: - try: - # Backport of the function from a pypi module - from backports.ssl_match_hostname import CertificateError, match_hostname - except ImportError: - # Our vendored copy - from ._implementation import CertificateError, match_hostname - -# Not needed, but documenting what we provide. -__all__ = ('CertificateError', 'match_hostname') diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py deleted file mode 100644 index 970cf65..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py +++ /dev/null @@ -1,156 +0,0 @@ -"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" - -# Note: This file is under the PSF license as the code comes from the python -# stdlib. http://docs.python.org/3/license.html - -import re -import sys - -# ipaddress has been backported to 2.6+ in pypi. If it is installed on the -# system, use it to handle IPAddress ServerAltnames (this was added in -# python-3.5) otherwise only do DNS matching. This allows -# backports.ssl_match_hostname to continue to be used in Python 2.7. -try: - from pip._vendor import ipaddress -except ImportError: - ipaddress = None - -__version__ = '3.5.0.1' - - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - # Ported from python3-syntax: - # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def _to_unicode(obj): - if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding='ascii', errors='strict') - return obj - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - # Divergence from upstream: ipaddress can't handle byte str - ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - try: - # Divergence from upstream: ipaddress can't handle byte str - host_ip = ipaddress.ip_address(_to_unicode(hostname)) - except ValueError: - # Not an IP address (common case) - host_ip = None - except UnicodeError: - # Divergence from upstream: Have to deal with ipaddress not taking - # byte strings. addresses should be all ascii, so we consider it not - # an ipaddress in this case - host_ip = None - except AttributeError: - # Divergence from upstream: Make ipaddress library optional - if ipaddress is None: - host_ip = None - else: - raise - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == 'IP Address': - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/poolmanager.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/poolmanager.py deleted file mode 100644 index fe5491c..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/poolmanager.py +++ /dev/null @@ -1,450 +0,0 @@ -from __future__ import absolute_import -import collections -import functools -import logging - -from ._collections import RecentlyUsedContainer -from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool -from .connectionpool import port_by_scheme -from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown -from .packages.six.moves.urllib.parse import urljoin -from .request import RequestMethods -from .util.url import parse_url -from .util.retry import Retry - - -__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] - - -log = logging.getLogger(__name__) - -SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version', 'ca_cert_dir', 'ssl_context') - -# All known keyword arguments that could be provided to the pool manager, its -# pools, or the underlying connections. This is used to construct a pool key. -_key_fields = ( - 'key_scheme', # str - 'key_host', # str - 'key_port', # int - 'key_timeout', # int or float or Timeout - 'key_retries', # int or Retry - 'key_strict', # bool - 'key_block', # bool - 'key_source_address', # str - 'key_key_file', # str - 'key_cert_file', # str - 'key_cert_reqs', # str - 'key_ca_certs', # str - 'key_ssl_version', # str - 'key_ca_cert_dir', # str - 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext - 'key_maxsize', # int - 'key_headers', # dict - 'key__proxy', # parsed proxy url - 'key__proxy_headers', # dict - 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples - 'key__socks_options', # dict - 'key_assert_hostname', # bool or string - 'key_assert_fingerprint', # str - 'key_server_hostname', #str -) - -#: The namedtuple class used to construct keys for the connection pool. -#: All custom key schemes should include the fields in this key at a minimum. -PoolKey = collections.namedtuple('PoolKey', _key_fields) - - -def _default_key_normalizer(key_class, request_context): - """ - Create a pool key out of a request context dictionary. - - According to RFC 3986, both the scheme and host are case-insensitive. - Therefore, this function normalizes both before constructing the pool - key for an HTTPS request. If you wish to change this behaviour, provide - alternate callables to ``key_fn_by_scheme``. - - :param key_class: - The class to use when constructing the key. This should be a namedtuple - with the ``scheme`` and ``host`` keys at a minimum. - :type key_class: namedtuple - :param request_context: - A dictionary-like object that contain the context for a request. - :type request_context: dict - - :return: A namedtuple that can be used as a connection pool key. - :rtype: PoolKey - """ - # Since we mutate the dictionary, make a copy first - context = request_context.copy() - context['scheme'] = context['scheme'].lower() - context['host'] = context['host'].lower() - - # These are both dictionaries and need to be transformed into frozensets - for key in ('headers', '_proxy_headers', '_socks_options'): - if key in context and context[key] is not None: - context[key] = frozenset(context[key].items()) - - # The socket_options key may be a list and needs to be transformed into a - # tuple. - socket_opts = context.get('socket_options') - if socket_opts is not None: - context['socket_options'] = tuple(socket_opts) - - # Map the kwargs to the names in the namedtuple - this is necessary since - # namedtuples can't have fields starting with '_'. - for key in list(context.keys()): - context['key_' + key] = context.pop(key) - - # Default to ``None`` for keys missing from the context - for field in key_class._fields: - if field not in context: - context[field] = None - - return key_class(**context) - - -#: A dictionary that maps a scheme to a callable that creates a pool key. -#: This can be used to alter the way pool keys are constructed, if desired. -#: Each PoolManager makes a copy of this dictionary so they can be configured -#: globally here, or individually on the instance. -key_fn_by_scheme = { - 'http': functools.partial(_default_key_normalizer, PoolKey), - 'https': functools.partial(_default_key_normalizer, PoolKey), -} - -pool_classes_by_scheme = { - 'http': HTTPConnectionPool, - 'https': HTTPSConnectionPool, -} - - -class PoolManager(RequestMethods): - """ - Allows for arbitrary requests while transparently keeping track of - necessary connection pools for you. - - :param num_pools: - Number of connection pools to cache before discarding the least - recently used pool. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param \\**connection_pool_kw: - Additional parameters are used to create fresh - :class:`urllib3.connectionpool.ConnectionPool` instances. - - Example:: - - >>> manager = PoolManager(num_pools=2) - >>> r = manager.request('GET', 'http://google.com/') - >>> r = manager.request('GET', 'http://google.com/mail') - >>> r = manager.request('GET', 'http://yahoo.com/') - >>> len(manager.pools) - 2 - - """ - - proxy = None - - def __init__(self, num_pools=10, headers=None, **connection_pool_kw): - RequestMethods.__init__(self, headers) - self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, - dispose_func=lambda p: p.close()) - - # Locally set the pool classes and keys so other PoolManagers can - # override them. - self.pool_classes_by_scheme = pool_classes_by_scheme - self.key_fn_by_scheme = key_fn_by_scheme.copy() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.clear() - # Return False to re-raise any potential exceptions - return False - - def _new_pool(self, scheme, host, port, request_context=None): - """ - Create a new :class:`ConnectionPool` based on host, port, scheme, and - any additional pool keyword arguments. - - If ``request_context`` is provided, it is provided as keyword arguments - to the pool class used. This method is used to actually create the - connection pools handed out by :meth:`connection_from_url` and - companion methods. It is intended to be overridden for customization. - """ - pool_cls = self.pool_classes_by_scheme[scheme] - if request_context is None: - request_context = self.connection_pool_kw.copy() - - # Although the context has everything necessary to create the pool, - # this function has historically only used the scheme, host, and port - # in the positional args. When an API change is acceptable these can - # be removed. - for key in ('scheme', 'host', 'port'): - request_context.pop(key, None) - - if scheme == 'http': - for kw in SSL_KEYWORDS: - request_context.pop(kw, None) - - return pool_cls(host, port, **request_context) - - def clear(self): - """ - Empty our store of pools and direct them all to close. - - This will not affect in-flight connections, but they will not be - re-used after completion. - """ - self.pools.clear() - - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): - """ - Get a :class:`ConnectionPool` based on the host, port, and scheme. - - If ``port`` isn't given, it will be derived from the ``scheme`` using - ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is - provided, it is merged with the instance's ``connection_pool_kw`` - variable and used to create the new connection pool, if one is - needed. - """ - - if not host: - raise LocationValueError("No host specified.") - - request_context = self._merge_pool_kwargs(pool_kwargs) - request_context['scheme'] = scheme or 'http' - if not port: - port = port_by_scheme.get(request_context['scheme'].lower(), 80) - request_context['port'] = port - request_context['host'] = host - - return self.connection_from_context(request_context) - - def connection_from_context(self, request_context): - """ - Get a :class:`ConnectionPool` based on the request context. - - ``request_context`` must at least contain the ``scheme`` key and its - value must be a key in ``key_fn_by_scheme`` instance variable. - """ - scheme = request_context['scheme'].lower() - pool_key_constructor = self.key_fn_by_scheme[scheme] - pool_key = pool_key_constructor(request_context) - - return self.connection_from_pool_key(pool_key, request_context=request_context) - - def connection_from_pool_key(self, pool_key, request_context=None): - """ - Get a :class:`ConnectionPool` based on the provided pool key. - - ``pool_key`` should be a namedtuple that only contains immutable - objects. At a minimum it must have the ``scheme``, ``host``, and - ``port`` fields. - """ - with self.pools.lock: - # If the scheme, host, or port doesn't match existing open - # connections, open a new ConnectionPool. - pool = self.pools.get(pool_key) - if pool: - return pool - - # Make a fresh ConnectionPool of the desired type - scheme = request_context['scheme'] - host = request_context['host'] - port = request_context['port'] - pool = self._new_pool(scheme, host, port, request_context=request_context) - self.pools[pool_key] = pool - - return pool - - def connection_from_url(self, url, pool_kwargs=None): - """ - Similar to :func:`urllib3.connectionpool.connection_from_url`. - - If ``pool_kwargs`` is not provided and a new pool needs to be - constructed, ``self.connection_pool_kw`` is used to initialize - the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` - is provided, it is used instead. Note that if a new pool does not - need to be created for the request, the provided ``pool_kwargs`` are - not used. - """ - u = parse_url(url) - return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, - pool_kwargs=pool_kwargs) - - def _merge_pool_kwargs(self, override): - """ - Merge a dictionary of override values for self.connection_pool_kw. - - This does not modify self.connection_pool_kw and returns a new dict. - Any keys in the override dictionary with a value of ``None`` are - removed from the merged dictionary. - """ - base_pool_kwargs = self.connection_pool_kw.copy() - if override: - for key, value in override.items(): - if value is None: - try: - del base_pool_kwargs[key] - except KeyError: - pass - else: - base_pool_kwargs[key] = value - return base_pool_kwargs - - def urlopen(self, method, url, redirect=True, **kw): - """ - Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` - with custom cross-host redirect logic and only sends the request-uri - portion of the ``url``. - - The given ``url`` parameter must be absolute, such that an appropriate - :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. - """ - u = parse_url(url) - conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - - kw['assert_same_host'] = False - kw['redirect'] = False - - if 'headers' not in kw: - kw['headers'] = self.headers.copy() - - if self.proxy is not None and u.scheme == "http": - response = conn.urlopen(method, url, **kw) - else: - response = conn.urlopen(method, u.request_uri, **kw) - - redirect_location = redirect and response.get_redirect_location() - if not redirect_location: - return response - - # Support relative URLs for redirecting. - redirect_location = urljoin(url, redirect_location) - - # RFC 7231, Section 6.4.4 - if response.status == 303: - method = 'GET' - - retries = kw.get('retries') - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect) - - # Strip headers marked as unsafe to forward to the redirected location. - # Check remove_headers_on_redirect to avoid a potential network call within - # conn.is_same_host() which may use socket.gethostbyname() in the future. - if (retries.remove_headers_on_redirect - and not conn.is_same_host(redirect_location)): - for header in retries.remove_headers_on_redirect: - kw['headers'].pop(header, None) - - try: - retries = retries.increment(method, url, response=response, _pool=conn) - except MaxRetryError: - if retries.raise_on_redirect: - raise - return response - - kw['retries'] = retries - kw['redirect'] = redirect - - log.info("Redirecting %s -> %s", url, redirect_location) - return self.urlopen(method, redirect_location, **kw) - - -class ProxyManager(PoolManager): - """ - Behaves just like :class:`PoolManager`, but sends all requests through - the defined proxy, using the CONNECT method for HTTPS URLs. - - :param proxy_url: - The URL of the proxy to be used. - - :param proxy_headers: - A dictionary containing headers that will be sent to the proxy. In case - of HTTP they are being sent with each request, while in the - HTTPS/CONNECT case they are sent only once. Could be used for proxy - authentication. - - Example: - >>> proxy = urllib3.ProxyManager('http://localhost:3128/') - >>> r1 = proxy.request('GET', 'http://google.com/') - >>> r2 = proxy.request('GET', 'http://httpbin.org/') - >>> len(proxy.pools) - 1 - >>> r3 = proxy.request('GET', 'https://httpbin.org/') - >>> r4 = proxy.request('GET', 'https://twitter.com/') - >>> len(proxy.pools) - 3 - - """ - - def __init__(self, proxy_url, num_pools=10, headers=None, - proxy_headers=None, **connection_pool_kw): - - if isinstance(proxy_url, HTTPConnectionPool): - proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, - proxy_url.port) - proxy = parse_url(proxy_url) - if not proxy.port: - port = port_by_scheme.get(proxy.scheme, 80) - proxy = proxy._replace(port=port) - - if proxy.scheme not in ("http", "https"): - raise ProxySchemeUnknown(proxy.scheme) - - self.proxy = proxy - self.proxy_headers = proxy_headers or {} - - connection_pool_kw['_proxy'] = self.proxy - connection_pool_kw['_proxy_headers'] = self.proxy_headers - - super(ProxyManager, self).__init__( - num_pools, headers, **connection_pool_kw) - - def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): - if scheme == "https": - return super(ProxyManager, self).connection_from_host( - host, port, scheme, pool_kwargs=pool_kwargs) - - return super(ProxyManager, self).connection_from_host( - self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) - - def _set_proxy_headers(self, url, headers=None): - """ - Sets headers needed by proxies: specifically, the Accept and Host - headers. Only sets headers not provided by the user. - """ - headers_ = {'Accept': '*/*'} - - netloc = parse_url(url).netloc - if netloc: - headers_['Host'] = netloc - - if headers: - headers_.update(headers) - return headers_ - - def urlopen(self, method, url, redirect=True, **kw): - "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." - u = parse_url(url) - - if u.scheme == "http": - # For proxied HTTPS requests, httplib sets the necessary headers - # on the CONNECT to the proxy. For HTTP, we'll definitely - # need to set 'Host' at the very least. - headers = kw.get('headers', self.headers) - kw['headers'] = self._set_proxy_headers(url, headers) - - return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) - - -def proxy_from_url(url, **kw): - return ProxyManager(proxy_url=url, **kw) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/request.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/request.py deleted file mode 100644 index 8f2f44b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/request.py +++ /dev/null @@ -1,150 +0,0 @@ -from __future__ import absolute_import - -from .filepost import encode_multipart_formdata -from .packages.six.moves.urllib.parse import urlencode - - -__all__ = ['RequestMethods'] - - -class RequestMethods(object): - """ - Convenience mixin for classes who implement a :meth:`urlopen` method, such - as :class:`~urllib3.connectionpool.HTTPConnectionPool` and - :class:`~urllib3.poolmanager.PoolManager`. - - Provides behavior for making common types of HTTP request methods and - decides which type of request field encoding to use. - - Specifically, - - :meth:`.request_encode_url` is for sending requests whose fields are - encoded in the URL (such as GET, HEAD, DELETE). - - :meth:`.request_encode_body` is for sending requests whose fields are - encoded in the *body* of the request using multipart or www-form-urlencoded - (such as for POST, PUT, PATCH). - - :meth:`.request` is for making any kind of request, it will look up the - appropriate encoding format and use one of the above two methods to make - the request. - - Initializer parameters: - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - """ - - _encode_url_methods = {'DELETE', 'GET', 'HEAD', 'OPTIONS'} - - def __init__(self, headers=None): - self.headers = headers or {} - - def urlopen(self, method, url, body=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **kw): # Abstract - raise NotImplementedError("Classes extending RequestMethods must implement " - "their own ``urlopen`` method.") - - def request(self, method, url, fields=None, headers=None, **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the appropriate encoding of - ``fields`` based on the ``method`` used. - - This is a convenience method that requires the least amount of manual - effort. It can be used in most situations, while still having the - option to drop down to more specific methods when necessary, such as - :meth:`request_encode_url`, :meth:`request_encode_body`, - or even the lowest level :meth:`urlopen`. - """ - method = method.upper() - - urlopen_kw['request_url'] = url - - if method in self._encode_url_methods: - return self.request_encode_url(method, url, fields=fields, - headers=headers, - **urlopen_kw) - else: - return self.request_encode_body(method, url, fields=fields, - headers=headers, - **urlopen_kw) - - def request_encode_url(self, method, url, fields=None, headers=None, - **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the url. This is useful for request methods like GET, HEAD, DELETE, etc. - """ - if headers is None: - headers = self.headers - - extra_kw = {'headers': headers} - extra_kw.update(urlopen_kw) - - if fields: - url += '?' + urlencode(fields) - - return self.urlopen(method, url, **extra_kw) - - def request_encode_body(self, method, url, fields=None, headers=None, - encode_multipart=True, multipart_boundary=None, - **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the body. This is useful for request methods like POST, PUT, PATCH, etc. - - When ``encode_multipart=True`` (default), then - :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode - the payload with the appropriate content type. Otherwise - :meth:`urllib.urlencode` is used with the - 'application/x-www-form-urlencoded' content type. - - Multipart encoding must be used when posting files, and it's reasonably - safe to use it in other times too. However, it may break request - signing, such as with OAuth. - - Supports an optional ``fields`` parameter of key/value strings AND - key/filetuple. A filetuple is a (filename, data, MIME type) tuple where - the MIME type is optional. For example:: - - fields = { - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), - 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - } - - When uploading a file, providing a filename (the first parameter of the - tuple) is optional but recommended to best mimic behavior of browsers. - - Note that if ``headers`` are supplied, the 'Content-Type' header will - be overwritten because it depends on the dynamic random boundary string - which is used to compose the body of the request. The random boundary - string can be explicitly set with the ``multipart_boundary`` parameter. - """ - if headers is None: - headers = self.headers - - extra_kw = {'headers': {}} - - if fields: - if 'body' in urlopen_kw: - raise TypeError( - "request got values for both 'fields' and 'body', can only specify one.") - - if encode_multipart: - body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) - else: - body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' - - extra_kw['body'] = body - extra_kw['headers'] = {'Content-Type': content_type} - - extra_kw['headers'].update(headers) - extra_kw.update(urlopen_kw) - - return self.urlopen(method, url, **extra_kw) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/response.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/response.py deleted file mode 100644 index c112690..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/response.py +++ /dev/null @@ -1,705 +0,0 @@ -from __future__ import absolute_import -from contextlib import contextmanager -import zlib -import io -import logging -from socket import timeout as SocketTimeout -from socket import error as SocketError - -from ._collections import HTTPHeaderDict -from .exceptions import ( - BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, - ResponseNotChunked, IncompleteRead, InvalidHeader -) -from .packages.six import string_types as basestring, PY3 -from .packages.six.moves import http_client as httplib -from .connection import HTTPException, BaseSSLError -from .util.response import is_fp_closed, is_response_to_head - -log = logging.getLogger(__name__) - - -class DeflateDecoder(object): - - def __init__(self): - self._first_try = True - self._data = b'' - self._obj = zlib.decompressobj() - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - if not data: - return data - - if not self._first_try: - return self._obj.decompress(data) - - self._data += data - try: - decompressed = self._obj.decompress(data) - if decompressed: - self._first_try = False - self._data = None - return decompressed - except zlib.error: - self._first_try = False - self._obj = zlib.decompressobj(-zlib.MAX_WBITS) - try: - return self.decompress(self._data) - finally: - self._data = None - - -class GzipDecoderState(object): - - FIRST_MEMBER = 0 - OTHER_MEMBERS = 1 - SWALLOW_DATA = 2 - - -class GzipDecoder(object): - - def __init__(self): - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - self._state = GzipDecoderState.FIRST_MEMBER - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - ret = bytearray() - if self._state == GzipDecoderState.SWALLOW_DATA or not data: - return bytes(ret) - while True: - try: - ret += self._obj.decompress(data) - except zlib.error: - previous_state = self._state - # Ignore data after the first error - self._state = GzipDecoderState.SWALLOW_DATA - if previous_state == GzipDecoderState.OTHER_MEMBERS: - # Allow trailing garbage acceptable in other gzip clients - return bytes(ret) - raise - data = self._obj.unused_data - if not data: - return bytes(ret) - self._state = GzipDecoderState.OTHER_MEMBERS - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - - -class MultiDecoder(object): - """ - From RFC7231: - If one or more encodings have been applied to a representation, the - sender that applied the encodings MUST generate a Content-Encoding - header field that lists the content codings in the order in which - they were applied. - """ - - def __init__(self, modes): - self._decoders = [_get_decoder(m.strip()) for m in modes.split(',')] - - def flush(self): - return self._decoders[0].flush() - - def decompress(self, data): - for d in reversed(self._decoders): - data = d.decompress(data) - return data - - -def _get_decoder(mode): - if ',' in mode: - return MultiDecoder(mode) - - if mode == 'gzip': - return GzipDecoder() - - return DeflateDecoder() - - -class HTTPResponse(io.IOBase): - """ - HTTP Response container. - - Backwards-compatible to httplib's HTTPResponse but the response ``body`` is - loaded and decoded on-demand when the ``data`` property is accessed. This - class is also compatible with the Python standard library's :mod:`io` - module, and can hence be treated as a readable object in the context of that - framework. - - Extra parameters for behaviour not present in httplib.HTTPResponse: - - :param preload_content: - If True, the response's body will be preloaded during construction. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param original_response: - When this HTTPResponse wrapper is generated from an httplib.HTTPResponse - object, it's convenient to include the original for debug purposes. It's - otherwise unused. - - :param retries: - The retries contains the last :class:`~urllib3.util.retry.Retry` that - was used during the request. - - :param enforce_content_length: - Enforce content length checking. Body returned by server must match - value of Content-Length header, if present. Otherwise, raise error. - """ - - CONTENT_DECODERS = ['gzip', 'deflate'] - REDIRECT_STATUSES = [301, 302, 303, 307, 308] - - def __init__(self, body='', headers=None, status=0, version=0, reason=None, - strict=0, preload_content=True, decode_content=True, - original_response=None, pool=None, connection=None, msg=None, - retries=None, enforce_content_length=False, - request_method=None, request_url=None): - - if isinstance(headers, HTTPHeaderDict): - self.headers = headers - else: - self.headers = HTTPHeaderDict(headers) - self.status = status - self.version = version - self.reason = reason - self.strict = strict - self.decode_content = decode_content - self.retries = retries - self.enforce_content_length = enforce_content_length - - self._decoder = None - self._body = None - self._fp = None - self._original_response = original_response - self._fp_bytes_read = 0 - self.msg = msg - self._request_url = request_url - - if body and isinstance(body, (basestring, bytes)): - self._body = body - - self._pool = pool - self._connection = connection - - if hasattr(body, 'read'): - self._fp = body - - # Are we using the chunked-style of transfer encoding? - self.chunked = False - self.chunk_left = None - tr_enc = self.headers.get('transfer-encoding', '').lower() - # Don't incur the penalty of creating a list and then discarding it - encodings = (enc.strip() for enc in tr_enc.split(",")) - if "chunked" in encodings: - self.chunked = True - - # Determine length of response - self.length_remaining = self._init_length(request_method) - - # If requested, preload the body. - if preload_content and not self._body: - self._body = self.read(decode_content=decode_content) - - def get_redirect_location(self): - """ - Should we redirect and where to? - - :returns: Truthy redirect location string if we got a redirect status - code and valid location. ``None`` if redirect status and no - location. ``False`` if not a redirect status code. - """ - if self.status in self.REDIRECT_STATUSES: - return self.headers.get('location') - - return False - - def release_conn(self): - if not self._pool or not self._connection: - return - - self._pool._put_conn(self._connection) - self._connection = None - - @property - def data(self): - # For backwords-compat with earlier urllib3 0.4 and earlier. - if self._body: - return self._body - - if self._fp: - return self.read(cache_content=True) - - @property - def connection(self): - return self._connection - - def isclosed(self): - return is_fp_closed(self._fp) - - def tell(self): - """ - Obtain the number of bytes pulled over the wire so far. May differ from - the amount of content returned by :meth:``HTTPResponse.read`` if bytes - are encoded on the wire (e.g, compressed). - """ - return self._fp_bytes_read - - def _init_length(self, request_method): - """ - Set initial length value for Response content if available. - """ - length = self.headers.get('content-length') - - if length is not None: - if self.chunked: - # This Response will fail with an IncompleteRead if it can't be - # received as chunked. This method falls back to attempt reading - # the response before raising an exception. - log.warning("Received response with both Content-Length and " - "Transfer-Encoding set. This is expressly forbidden " - "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " - "attempting to process response as Transfer-Encoding: " - "chunked.") - return None - - try: - # RFC 7230 section 3.3.2 specifies multiple content lengths can - # be sent in a single Content-Length header - # (e.g. Content-Length: 42, 42). This line ensures the values - # are all valid ints and that as long as the `set` length is 1, - # all values are the same. Otherwise, the header is invalid. - lengths = set([int(val) for val in length.split(',')]) - if len(lengths) > 1: - raise InvalidHeader("Content-Length contained multiple " - "unmatching values (%s)" % length) - length = lengths.pop() - except ValueError: - length = None - else: - if length < 0: - length = None - - # Convert status to int for comparison - # In some cases, httplib returns a status of "_UNKNOWN" - try: - status = int(self.status) - except ValueError: - status = 0 - - # Check for responses that shouldn't include a body - if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': - length = 0 - - return length - - def _init_decoder(self): - """ - Set-up the _decoder attribute if necessary. - """ - # Note: content-encoding value should be case-insensitive, per RFC 7230 - # Section 3.2 - content_encoding = self.headers.get('content-encoding', '').lower() - if self._decoder is None: - if content_encoding in self.CONTENT_DECODERS: - self._decoder = _get_decoder(content_encoding) - elif ',' in content_encoding: - encodings = [e.strip() for e in content_encoding.split(',') if e.strip() in self.CONTENT_DECODERS] - if len(encodings): - self._decoder = _get_decoder(content_encoding) - - def _decode(self, data, decode_content, flush_decoder): - """ - Decode the data passed in and potentially flush the decoder. - """ - try: - if decode_content and self._decoder: - data = self._decoder.decompress(data) - except (IOError, zlib.error) as e: - content_encoding = self.headers.get('content-encoding', '').lower() - raise DecodeError( - "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, e) - - if flush_decoder and decode_content: - data += self._flush_decoder() - - return data - - def _flush_decoder(self): - """ - Flushes the decoder. Should only be called if the decoder is actually - being used. - """ - if self._decoder: - buf = self._decoder.decompress(b'') - return buf + self._decoder.flush() - - return b'' - - @contextmanager - def _error_catcher(self): - """ - Catch low-level python exceptions, instead re-raising urllib3 - variants, so that low-level exceptions are not leaked in the - high-level api. - - On exit, release the connection back to the pool. - """ - clean_exit = False - - try: - try: - yield - - except SocketTimeout: - # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but - # there is yet no clean way to get at it from this context. - raise ReadTimeoutError(self._pool, None, 'Read timed out.') - - except BaseSSLError as e: - # FIXME: Is there a better way to differentiate between SSLErrors? - if 'read operation timed out' not in str(e): # Defensive: - # This shouldn't happen but just in case we're missing an edge - # case, let's avoid swallowing SSL errors. - raise - - raise ReadTimeoutError(self._pool, None, 'Read timed out.') - - except (HTTPException, SocketError) as e: - # This includes IncompleteRead. - raise ProtocolError('Connection broken: %r' % e, e) - - # If no exception is thrown, we should avoid cleaning up - # unnecessarily. - clean_exit = True - finally: - # If we didn't terminate cleanly, we need to throw away our - # connection. - if not clean_exit: - # The response may not be closed but we're not going to use it - # anymore so close it now to ensure that the connection is - # released back to the pool. - if self._original_response: - self._original_response.close() - - # Closing the response may not actually be sufficient to close - # everything, so if we have a hold of the connection close that - # too. - if self._connection: - self._connection.close() - - # If we hold the original response but it's closed now, we should - # return the connection back to the pool. - if self._original_response and self._original_response.isclosed(): - self.release_conn() - - def read(self, amt=None, decode_content=None, cache_content=False): - """ - Similar to :meth:`httplib.HTTPResponse.read`, but with two additional - parameters: ``decode_content`` and ``cache_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param cache_content: - If True, will save the returned data such that the same result is - returned despite of the state of the underlying file object. This - is useful if you want the ``.data`` property to continue working - after having ``.read()`` the file object. (Overridden if ``amt`` is - set.) - """ - self._init_decoder() - if decode_content is None: - decode_content = self.decode_content - - if self._fp is None: - return - - flush_decoder = False - data = None - - with self._error_catcher(): - if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() - flush_decoder = True - else: - cache_content = False - data = self._fp.read(amt) - if amt != 0 and not data: # Platform-specific: Buggy versions of Python. - # Close the connection when no data is returned - # - # This is redundant to what httplib/http.client _should_ - # already do. However, versions of python released before - # December 15, 2012 (http://bugs.python.org/issue16298) do - # not properly close the connection in all cases. There is - # no harm in redundantly calling close. - self._fp.close() - flush_decoder = True - if self.enforce_content_length and self.length_remaining not in (0, None): - # This is an edge case that httplib failed to cover due - # to concerns of backward compatibility. We're - # addressing it here to make sure IncompleteRead is - # raised during streaming, so all calls with incorrect - # Content-Length are caught. - raise IncompleteRead(self._fp_bytes_read, self.length_remaining) - - if data: - self._fp_bytes_read += len(data) - if self.length_remaining is not None: - self.length_remaining -= len(data) - - data = self._decode(data, decode_content, flush_decoder) - - if cache_content: - self._body = data - - return data - - def stream(self, amt=2**16, decode_content=None): - """ - A generator wrapper for the read() method. A call will block until - ``amt`` bytes have been read from the connection or until the - connection is closed. - - :param amt: - How much of the content to read. The generator will return up to - much data per iteration, but may return less. This is particularly - likely when using compressed data. However, the empty string will - never be returned. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - if self.chunked and self.supports_chunked_reads(): - for line in self.read_chunked(amt, decode_content=decode_content): - yield line - else: - while not is_fp_closed(self._fp): - data = self.read(amt=amt, decode_content=decode_content) - - if data: - yield data - - @classmethod - def from_httplib(ResponseCls, r, **response_kw): - """ - Given an :class:`httplib.HTTPResponse` instance ``r``, return a - corresponding :class:`urllib3.response.HTTPResponse` object. - - Remaining parameters are passed to the HTTPResponse constructor, along - with ``original_response=r``. - """ - headers = r.msg - - if not isinstance(headers, HTTPHeaderDict): - if PY3: # Python 3 - headers = HTTPHeaderDict(headers.items()) - else: # Python 2 - headers = HTTPHeaderDict.from_httplib(headers) - - # HTTPResponse objects in Python 3 don't have a .strict attribute - strict = getattr(r, 'strict', 0) - resp = ResponseCls(body=r, - headers=headers, - status=r.status, - version=r.version, - reason=r.reason, - strict=strict, - original_response=r, - **response_kw) - return resp - - # Backwards-compatibility methods for httplib.HTTPResponse - def getheaders(self): - return self.headers - - def getheader(self, name, default=None): - return self.headers.get(name, default) - - # Backwards compatibility for http.cookiejar - def info(self): - return self.headers - - # Overrides from io.IOBase - def close(self): - if not self.closed: - self._fp.close() - - if self._connection: - self._connection.close() - - @property - def closed(self): - if self._fp is None: - return True - elif hasattr(self._fp, 'isclosed'): - return self._fp.isclosed() - elif hasattr(self._fp, 'closed'): - return self._fp.closed - else: - return True - - def fileno(self): - if self._fp is None: - raise IOError("HTTPResponse has no file to get a fileno from") - elif hasattr(self._fp, "fileno"): - return self._fp.fileno() - else: - raise IOError("The file-like object this HTTPResponse is wrapped " - "around has no file descriptor") - - def flush(self): - if self._fp is not None and hasattr(self._fp, 'flush'): - return self._fp.flush() - - def readable(self): - # This method is required for `io` module compatibility. - return True - - def readinto(self, b): - # This method is required for `io` module compatibility. - temp = self.read(len(b)) - if len(temp) == 0: - return 0 - else: - b[:len(temp)] = temp - return len(temp) - - def supports_chunked_reads(self): - """ - Checks if the underlying file-like object looks like a - httplib.HTTPResponse object. We do this by testing for the fp - attribute. If it is present we assume it returns raw chunks as - processed by read_chunked(). - """ - return hasattr(self._fp, 'fp') - - def _update_chunk_length(self): - # First, we'll figure out length of a chunk and then - # we'll try to read it from socket. - if self.chunk_left is not None: - return - line = self._fp.fp.readline() - line = line.split(b';', 1)[0] - try: - self.chunk_left = int(line, 16) - except ValueError: - # Invalid chunked protocol response, abort. - self.close() - raise httplib.IncompleteRead(line) - - def _handle_chunk(self, amt): - returned_chunk = None - if amt is None: - chunk = self._fp._safe_read(self.chunk_left) - returned_chunk = chunk - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - elif amt < self.chunk_left: - value = self._fp._safe_read(amt) - self.chunk_left = self.chunk_left - amt - returned_chunk = value - elif amt == self.chunk_left: - value = self._fp._safe_read(amt) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - returned_chunk = value - else: # amt > self.chunk_left - returned_chunk = self._fp._safe_read(self.chunk_left) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - return returned_chunk - - def read_chunked(self, amt=None, decode_content=None): - """ - Similar to :meth:`HTTPResponse.read`, but with an additional - parameter: ``decode_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - self._init_decoder() - # FIXME: Rewrite this method and make it a class with a better structured logic. - if not self.chunked: - raise ResponseNotChunked( - "Response is not chunked. " - "Header 'transfer-encoding: chunked' is missing.") - if not self.supports_chunked_reads(): - raise BodyNotHttplibCompatible( - "Body should be httplib.HTTPResponse like. " - "It should have have an fp attribute which returns raw chunks.") - - with self._error_catcher(): - # Don't bother reading the body of a HEAD request. - if self._original_response and is_response_to_head(self._original_response): - self._original_response.close() - return - - # If a response is already read and closed - # then return immediately. - if self._fp.fp is None: - return - - while True: - self._update_chunk_length() - if self.chunk_left == 0: - break - chunk = self._handle_chunk(amt) - decoded = self._decode(chunk, decode_content=decode_content, - flush_decoder=False) - if decoded: - yield decoded - - if decode_content: - # On CPython and PyPy, we should never need to flush the - # decoder. However, on Jython we *might* need to, so - # lets defensively do it anyway. - decoded = self._flush_decoder() - if decoded: # Platform-specific: Jython. - yield decoded - - # Chunk content ends with \r\n: discard it. - while True: - line = self._fp.fp.readline() - if not line: - # Some sites may not end with '\r\n'. - break - if line == b'\r\n': - break - - # We read everything; close the "file". - if self._original_response: - self._original_response.close() - - def geturl(self): - """ - Returns the URL that was the source of this response. - If the request that generated this response redirected, this method - will return the final redirect location. - """ - if self.retries is not None and len(self.retries.history): - return self.retries.history[-1].redirect_location - else: - return self._request_url diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/__init__.py deleted file mode 100644 index 2f2770b..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import -# For backwards compatibility, provide imports that used to be here. -from .connection import is_connection_dropped -from .request import make_headers -from .response import is_fp_closed -from .ssl_ import ( - SSLContext, - HAS_SNI, - IS_PYOPENSSL, - IS_SECURETRANSPORT, - assert_fingerprint, - resolve_cert_reqs, - resolve_ssl_version, - ssl_wrap_socket, -) -from .timeout import ( - current_time, - Timeout, -) - -from .retry import Retry -from .url import ( - get_host, - parse_url, - split_first, - Url, -) -from .wait import ( - wait_for_read, - wait_for_write -) - -__all__ = ( - 'HAS_SNI', - 'IS_PYOPENSSL', - 'IS_SECURETRANSPORT', - 'SSLContext', - 'Retry', - 'Timeout', - 'Url', - 'assert_fingerprint', - 'current_time', - 'is_connection_dropped', - 'is_fp_closed', - 'get_host', - 'parse_url', - 'make_headers', - 'resolve_cert_reqs', - 'resolve_ssl_version', - 'split_first', - 'ssl_wrap_socket', - 'wait_for_read', - 'wait_for_write' -) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/connection.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/connection.py deleted file mode 100644 index 5ad70b2..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/connection.py +++ /dev/null @@ -1,134 +0,0 @@ -from __future__ import absolute_import -import socket -from .wait import NoWayToWaitForSocketError, wait_for_read -from ..contrib import _appengine_environ - - -def is_connection_dropped(conn): # Platform-specific - """ - Returns True if the connection is dropped and should be closed. - - :param conn: - :class:`httplib.HTTPConnection` object. - - Note: For platforms like AppEngine, this will always return ``False`` to - let the platform handle connection recycling transparently for us. - """ - sock = getattr(conn, 'sock', False) - if sock is False: # Platform-specific: AppEngine - return False - if sock is None: # Connection already closed (such as by httplib). - return True - try: - # Returns True if readable, which here means it's been dropped - return wait_for_read(sock, timeout=0.0) - except NoWayToWaitForSocketError: # Platform-specific: AppEngine - return False - - -# This function is copied from socket.py in the Python 2.7 standard -# library test suite. Added to its signature is only `socket_options`. -# One additional modification is that we avoid binding to IPv6 servers -# discovered in DNS if the system doesn't have IPv6 functionality. -def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, socket_options=None): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - An host of '' or port 0 tells the OS to use the default. - """ - - host, port = address - if host.startswith('['): - host = host.strip('[]') - err = None - - # Using the value from allowed_gai_family() in the context of getaddrinfo lets - # us select whether to work with IPv4 DNS records, IPv6 records, or both. - # The original create_connection function always returns all records. - family = allowed_gai_family() - - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket.socket(af, socktype, proto) - - # If provided, set socket level options before connecting. - _set_socket_options(sock, socket_options) - - if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - return sock - - except socket.error as e: - err = e - if sock is not None: - sock.close() - sock = None - - if err is not None: - raise err - - raise socket.error("getaddrinfo returns an empty list") - - -def _set_socket_options(sock, options): - if options is None: - return - - for opt in options: - sock.setsockopt(*opt) - - -def allowed_gai_family(): - """This function is designed to work in the context of - getaddrinfo, where family=socket.AF_UNSPEC is the default and - will perform a DNS search for both IPv6 and IPv4 records.""" - - family = socket.AF_INET - if HAS_IPV6: - family = socket.AF_UNSPEC - return family - - -def _has_ipv6(host): - """ Returns True if the system can bind an IPv6 address. """ - sock = None - has_ipv6 = False - - # App Engine doesn't support IPV6 sockets and actually has a quota on the - # number of sockets that can be used, so just early out here instead of - # creating a socket needlessly. - # See https://github.com/urllib3/urllib3/issues/1446 - if _appengine_environ.is_appengine_sandbox(): - return False - - if socket.has_ipv6: - # has_ipv6 returns true if cPython was compiled with IPv6 support. - # It does not tell us if the system has IPv6 support enabled. To - # determine that we must bind to an IPv6 address. - # https://github.com/shazow/urllib3/pull/611 - # https://bugs.python.org/issue658327 - try: - sock = socket.socket(socket.AF_INET6) - sock.bind((host, 0)) - has_ipv6 = True - except Exception: - pass - - if sock: - sock.close() - return has_ipv6 - - -HAS_IPV6 = _has_ipv6('::1') diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/queue.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/queue.py deleted file mode 100644 index d3d379a..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/queue.py +++ /dev/null @@ -1,21 +0,0 @@ -import collections -from ..packages import six -from ..packages.six.moves import queue - -if six.PY2: - # Queue is imported for side effects on MS Windows. See issue #229. - import Queue as _unused_module_Queue # noqa: F401 - - -class LifoQueue(queue.Queue): - def _init(self, _): - self.queue = collections.deque() - - def _qsize(self, len=len): - return len(self.queue) - - def _put(self, item): - self.queue.append(item) - - def _get(self): - return self.queue.pop() diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/request.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/request.py deleted file mode 100644 index 3ddfcd5..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/request.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import absolute_import -from base64 import b64encode - -from ..packages.six import b, integer_types -from ..exceptions import UnrewindableBodyError - -ACCEPT_ENCODING = 'gzip,deflate' -_FAILEDTELL = object() - - -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None, proxy_basic_auth=None, disable_cache=None): - """ - Shortcuts for generating request headers. - - :param keep_alive: - If ``True``, adds 'connection: keep-alive' header. - - :param accept_encoding: - Can be a boolean, list, or string. - ``True`` translates to 'gzip,deflate'. - List will get joined by comma. - String will be used as provided. - - :param user_agent: - String representing the user-agent you want, such as - "python-urllib3/0.6" - - :param basic_auth: - Colon-separated username:password string for 'authorization: basic ...' - auth header. - - :param proxy_basic_auth: - Colon-separated username:password string for 'proxy-authorization: basic ...' - auth header. - - :param disable_cache: - If ``True``, adds 'cache-control: no-cache' header. - - Example:: - - >>> make_headers(keep_alive=True, user_agent="Batman/1.0") - {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} - >>> make_headers(accept_encoding=True) - {'accept-encoding': 'gzip,deflate'} - """ - headers = {} - if accept_encoding: - if isinstance(accept_encoding, str): - pass - elif isinstance(accept_encoding, list): - accept_encoding = ','.join(accept_encoding) - else: - accept_encoding = ACCEPT_ENCODING - headers['accept-encoding'] = accept_encoding - - if user_agent: - headers['user-agent'] = user_agent - - if keep_alive: - headers['connection'] = 'keep-alive' - - if basic_auth: - headers['authorization'] = 'Basic ' + \ - b64encode(b(basic_auth)).decode('utf-8') - - if proxy_basic_auth: - headers['proxy-authorization'] = 'Basic ' + \ - b64encode(b(proxy_basic_auth)).decode('utf-8') - - if disable_cache: - headers['cache-control'] = 'no-cache' - - return headers - - -def set_file_position(body, pos): - """ - If a position is provided, move file to that point. - Otherwise, we'll attempt to record a position for future use. - """ - if pos is not None: - rewind_body(body, pos) - elif getattr(body, 'tell', None) is not None: - try: - pos = body.tell() - except (IOError, OSError): - # This differentiates from None, allowing us to catch - # a failed `tell()` later when trying to rewind the body. - pos = _FAILEDTELL - - return pos - - -def rewind_body(body, body_pos): - """ - Attempt to rewind body to a certain position. - Primarily used for request redirects and retries. - - :param body: - File-like object that supports seek. - - :param int pos: - Position to seek to in file. - """ - body_seek = getattr(body, 'seek', None) - if body_seek is not None and isinstance(body_pos, integer_types): - try: - body_seek(body_pos) - except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect/retry.") - elif body_pos is _FAILEDTELL: - raise UnrewindableBodyError("Unable to record file position for rewinding " - "request body during a redirect/retry.") - else: - raise ValueError("body_pos must be of type integer, " - "instead it was %s." % type(body_pos)) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/response.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/response.py deleted file mode 100644 index 3d54864..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/response.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import absolute_import -from ..packages.six.moves import http_client as httplib - -from ..exceptions import HeaderParsingError - - -def is_fp_closed(obj): - """ - Checks whether a given file-like object is closed. - - :param obj: - The file-like object to check. - """ - - try: - # Check `isclosed()` first, in case Python3 doesn't set `closed`. - # GH Issue #928 - return obj.isclosed() - except AttributeError: - pass - - try: - # Check via the official file-like-object way. - return obj.closed - except AttributeError: - pass - - try: - # Check if the object is a container for another file-like object that - # gets released on exhaustion (e.g. HTTPResponse). - return obj.fp is None - except AttributeError: - pass - - raise ValueError("Unable to determine whether fp is closed.") - - -def assert_header_parsing(headers): - """ - Asserts whether all headers have been successfully parsed. - Extracts encountered errors from the result of parsing headers. - - Only works on Python 3. - - :param headers: Headers to verify. - :type headers: `httplib.HTTPMessage`. - - :raises urllib3.exceptions.HeaderParsingError: - If parsing errors are found. - """ - - # This will fail silently if we pass in the wrong kind of parameter. - # To make debugging easier add an explicit check. - if not isinstance(headers, httplib.HTTPMessage): - raise TypeError('expected httplib.Message, got {0}.'.format( - type(headers))) - - defects = getattr(headers, 'defects', None) - get_payload = getattr(headers, 'get_payload', None) - - unparsed_data = None - if get_payload: - # get_payload is actually email.message.Message.get_payload; - # we're only interested in the result if it's not a multipart message - if not headers.is_multipart(): - payload = get_payload() - - if isinstance(payload, (bytes, str)): - unparsed_data = payload - - if defects or unparsed_data: - raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) - - -def is_response_to_head(response): - """ - Checks whether the request of a response has been a HEAD-request. - Handles the quirks of AppEngine. - - :param conn: - :type conn: :class:`httplib.HTTPResponse` - """ - # FIXME: Can we do this somehow without accessing private httplib _method? - method = response._method - if isinstance(method, int): # Platform-specific: Appengine - return method == 3 - return method.upper() == 'HEAD' diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/retry.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/retry.py deleted file mode 100644 index e7d0abd..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/retry.py +++ /dev/null @@ -1,411 +0,0 @@ -from __future__ import absolute_import -import time -import logging -from collections import namedtuple -from itertools import takewhile -import email -import re - -from ..exceptions import ( - ConnectTimeoutError, - MaxRetryError, - ProtocolError, - ReadTimeoutError, - ResponseError, - InvalidHeader, -) -from ..packages import six - - -log = logging.getLogger(__name__) - - -# Data structure for representing the metadata of requests that result in a retry. -RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", - "status", "redirect_location"]) - - -class Retry(object): - """ Retry configuration. - - Each retry attempt will create a new Retry object with updated values, so - they can be safely reused. - - Retries can be defined as a default for a pool:: - - retries = Retry(connect=5, read=2, redirect=5) - http = PoolManager(retries=retries) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool):: - - response = http.request('GET', 'http://example.com/', retries=Retry(10)) - - Retries can be disabled by passing ``False``:: - - response = http.request('GET', 'http://example.com/', retries=False) - - Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless - retries are disabled, in which case the causing exception will be raised. - - :param int total: - Total number of retries to allow. Takes precedence over other counts. - - Set to ``None`` to remove this constraint and fall back on other - counts. It's a good idea to set this to some sensibly-high value to - account for unexpected edge cases and avoid infinite retry loops. - - Set to ``0`` to fail on the first retry. - - Set to ``False`` to disable and imply ``raise_on_redirect=False``. - - :param int connect: - How many connection-related errors to retry on. - - These are errors raised before the request is sent to the remote server, - which we assume has not triggered the server to process the request. - - Set to ``0`` to fail on the first retry of this type. - - :param int read: - How many times to retry on read errors. - - These errors are raised after the request was sent to the server, so the - request may have side-effects. - - Set to ``0`` to fail on the first retry of this type. - - :param int redirect: - How many redirects to perform. Limit this to avoid infinite redirect - loops. - - A redirect is a HTTP response with a status code 301, 302, 303, 307 or - 308. - - Set to ``0`` to fail on the first retry of this type. - - Set to ``False`` to disable and imply ``raise_on_redirect=False``. - - :param int status: - How many times to retry on bad status codes. - - These are retries made on responses, where status code matches - ``status_forcelist``. - - Set to ``0`` to fail on the first retry of this type. - - :param iterable method_whitelist: - Set of uppercased HTTP method verbs that we should retry on. - - By default, we only retry on methods which are considered to be - idempotent (multiple requests with the same parameters end with the - same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. - - Set to a ``False`` value to retry on any verb. - - :param iterable status_forcelist: - A set of integer HTTP status codes that we should force a retry on. - A retry is initiated if the request method is in ``method_whitelist`` - and the response status code is in ``status_forcelist``. - - By default, this is disabled with ``None``. - - :param float backoff_factor: - A backoff factor to apply between attempts after the second try - (most errors are resolved immediately by a second try without a - delay). urllib3 will sleep for:: - - {backoff factor} * (2 ** ({number of total retries} - 1)) - - seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep - for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer - than :attr:`Retry.BACKOFF_MAX`. - - By default, backoff is disabled (set to 0). - - :param bool raise_on_redirect: Whether, if the number of redirects is - exhausted, to raise a MaxRetryError, or to return a response with a - response code in the 3xx range. - - :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: - whether we should raise an exception, or return a response, - if status falls in ``status_forcelist`` range and retries have - been exhausted. - - :param tuple history: The history of the request encountered during - each call to :meth:`~Retry.increment`. The list is in the order - the requests occurred. Each list item is of class :class:`RequestHistory`. - - :param bool respect_retry_after_header: - Whether to respect Retry-After header on status codes defined as - :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. - - :param iterable remove_headers_on_redirect: - Sequence of headers to remove from the request when a response - indicating a redirect is returned before firing off the redirected - request. - """ - - DEFAULT_METHOD_WHITELIST = frozenset([ - 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) - - RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - - DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization']) - - #: Maximum backoff time. - BACKOFF_MAX = 120 - - def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, - method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, - backoff_factor=0, raise_on_redirect=True, raise_on_status=True, - history=None, respect_retry_after_header=True, - remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST): - - self.total = total - self.connect = connect - self.read = read - self.status = status - - if redirect is False or total is False: - redirect = 0 - raise_on_redirect = False - - self.redirect = redirect - self.status_forcelist = status_forcelist or set() - self.method_whitelist = method_whitelist - self.backoff_factor = backoff_factor - self.raise_on_redirect = raise_on_redirect - self.raise_on_status = raise_on_status - self.history = history or tuple() - self.respect_retry_after_header = respect_retry_after_header - self.remove_headers_on_redirect = remove_headers_on_redirect - - def new(self, **kw): - params = dict( - total=self.total, - connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, - method_whitelist=self.method_whitelist, - status_forcelist=self.status_forcelist, - backoff_factor=self.backoff_factor, - raise_on_redirect=self.raise_on_redirect, - raise_on_status=self.raise_on_status, - history=self.history, - remove_headers_on_redirect=self.remove_headers_on_redirect - ) - params.update(kw) - return type(self)(**params) - - @classmethod - def from_int(cls, retries, redirect=True, default=None): - """ Backwards-compatibility for the old retries format.""" - if retries is None: - retries = default if default is not None else cls.DEFAULT - - if isinstance(retries, Retry): - return retries - - redirect = bool(redirect) and None - new_retries = cls(retries, redirect=redirect) - log.debug("Converted retries value: %r -> %r", retries, new_retries) - return new_retries - - def get_backoff_time(self): - """ Formula for computing the current backoff - - :rtype: float - """ - # We want to consider only the last consecutive errors sequence (Ignore redirects). - consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, - reversed(self.history)))) - if consecutive_errors_len <= 1: - return 0 - - backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) - return min(self.BACKOFF_MAX, backoff_value) - - def parse_retry_after(self, retry_after): - # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 - if re.match(r"^\s*[0-9]+\s*$", retry_after): - seconds = int(retry_after) - else: - retry_date_tuple = email.utils.parsedate(retry_after) - if retry_date_tuple is None: - raise InvalidHeader("Invalid Retry-After header: %s" % retry_after) - retry_date = time.mktime(retry_date_tuple) - seconds = retry_date - time.time() - - if seconds < 0: - seconds = 0 - - return seconds - - def get_retry_after(self, response): - """ Get the value of Retry-After in seconds. """ - - retry_after = response.getheader("Retry-After") - - if retry_after is None: - return None - - return self.parse_retry_after(retry_after) - - def sleep_for_retry(self, response=None): - retry_after = self.get_retry_after(response) - if retry_after: - time.sleep(retry_after) - return True - - return False - - def _sleep_backoff(self): - backoff = self.get_backoff_time() - if backoff <= 0: - return - time.sleep(backoff) - - def sleep(self, response=None): - """ Sleep between retry attempts. - - This method will respect a server's ``Retry-After`` response header - and sleep the duration of the time requested. If that is not present, it - will use an exponential backoff. By default, the backoff factor is 0 and - this method will return immediately. - """ - - if response: - slept = self.sleep_for_retry(response) - if slept: - return - - self._sleep_backoff() - - def _is_connection_error(self, err): - """ Errors when we're fairly sure that the server did not receive the - request, so it should be safe to retry. - """ - return isinstance(err, ConnectTimeoutError) - - def _is_read_error(self, err): - """ Errors that occur after the request has been started, so we should - assume that the server began processing it. - """ - return isinstance(err, (ReadTimeoutError, ProtocolError)) - - def _is_method_retryable(self, method): - """ Checks if a given HTTP method should be retried upon, depending if - it is included on the method whitelist. - """ - if self.method_whitelist and method.upper() not in self.method_whitelist: - return False - - return True - - def is_retry(self, method, status_code, has_retry_after=False): - """ Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - if not self._is_method_retryable(method): - return False - - if self.status_forcelist and status_code in self.status_forcelist: - return True - - return (self.total and self.respect_retry_after_header and - has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) - - def is_exhausted(self): - """ Are we out of retries? """ - retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - - return min(retry_counts) < 0 - - def increment(self, method=None, url=None, response=None, error=None, - _pool=None, _stacktrace=None): - """ Return a new Retry object with incremented retry counters. - - :param response: A response object, or None, if the server did not - return a response. - :type response: :class:`~urllib3.response.HTTPResponse` - :param Exception error: An error encountered during the request, or - None if the response was received successfully. - - :return: A new ``Retry`` object. - """ - if self.total is False and error: - # Disabled, indicate to re-raise the error. - raise six.reraise(type(error), error, _stacktrace) - - total = self.total - if total is not None: - total -= 1 - - connect = self.connect - read = self.read - redirect = self.redirect - status_count = self.status - cause = 'unknown' - status = None - redirect_location = None - - if error and self._is_connection_error(error): - # Connect retry? - if connect is False: - raise six.reraise(type(error), error, _stacktrace) - elif connect is not None: - connect -= 1 - - elif error and self._is_read_error(error): - # Read retry? - if read is False or not self._is_method_retryable(method): - raise six.reraise(type(error), error, _stacktrace) - elif read is not None: - read -= 1 - - elif response and response.get_redirect_location(): - # Redirect retry? - if redirect is not None: - redirect -= 1 - cause = 'too many redirects' - redirect_location = response.get_redirect_location() - status = response.status - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - cause = ResponseError.GENERIC_ERROR - if response and response.status: - if status_count is not None: - status_count -= 1 - cause = ResponseError.SPECIFIC_ERROR.format( - status_code=response.status) - status = response.status - - history = self.history + (RequestHistory(method, url, error, status, redirect_location),) - - new_retry = self.new( - total=total, - connect=connect, read=read, redirect=redirect, status=status_count, - history=history) - - if new_retry.is_exhausted(): - raise MaxRetryError(_pool, url, error or ResponseError(cause)) - - log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) - - return new_retry - - def __repr__(self): - return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' - 'read={self.read}, redirect={self.redirect}, status={self.status})').format( - cls=type(self), self=self) - - -# For backwards compatibility (equivalent to pre-v1.9): -Retry.DEFAULT = Retry(3) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py deleted file mode 100644 index dfc553f..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py +++ /dev/null @@ -1,381 +0,0 @@ -from __future__ import absolute_import -import errno -import warnings -import hmac -import socket - -from binascii import hexlify, unhexlify -from hashlib import md5, sha1, sha256 - -from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning -from ..packages import six - - -SSLContext = None -HAS_SNI = False -IS_PYOPENSSL = False -IS_SECURETRANSPORT = False - -# Maps the length of a digest to a possible hash function producing this digest -HASHFUNC_MAP = { - 32: md5, - 40: sha1, - 64: sha256, -} - - -def _const_compare_digest_backport(a, b): - """ - Compare two digests of equal length in constant time. - - The digests must be of type str/bytes. - Returns True if the digests match, and False otherwise. - """ - result = abs(len(a) - len(b)) - for l, r in zip(bytearray(a), bytearray(b)): - result |= l ^ r - return result == 0 - - -_const_compare_digest = getattr(hmac, 'compare_digest', - _const_compare_digest_backport) - - -try: # Test for SSL features - import ssl - from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 - from ssl import HAS_SNI # Has SNI? -except ImportError: - pass - - -try: - from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION -except ImportError: - OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 - OP_NO_COMPRESSION = 0x20000 - - -# Python 2.7 doesn't have inet_pton on non-Linux so we fallback on inet_aton in -# those cases. This means that we can only detect IPv4 addresses in this case. -if hasattr(socket, 'inet_pton'): - inet_pton = socket.inet_pton -else: - # Maybe we can use ipaddress if the user has urllib3[secure]? - try: - from pip._vendor import ipaddress - - def inet_pton(_, host): - if isinstance(host, bytes): - host = host.decode('ascii') - return ipaddress.ip_address(host) - - except ImportError: # Platform-specific: Non-Linux - def inet_pton(_, host): - return socket.inet_aton(host) - - -# A secure default. -# Sources for more information on TLS ciphers: -# -# - https://wiki.mozilla.org/Security/Server_Side_TLS -# - https://www.ssllabs.com/projects/best-practices/index.html -# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ -# -# The general intent is: -# - Prefer TLS 1.3 cipher suites -# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), -# - prefer ECDHE over DHE for better performance, -# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and -# security, -# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, -# - disable NULL authentication, MD5 MACs and DSS for security reasons. -DEFAULT_CIPHERS = ':'.join([ - 'TLS13-AES-256-GCM-SHA384', - 'TLS13-CHACHA20-POLY1305-SHA256', - 'TLS13-AES-128-GCM-SHA256', - 'ECDH+AESGCM', - 'ECDH+CHACHA20', - 'DH+AESGCM', - 'DH+CHACHA20', - 'ECDH+AES256', - 'DH+AES256', - 'ECDH+AES128', - 'DH+AES', - 'RSA+AESGCM', - 'RSA+AES', - '!aNULL', - '!eNULL', - '!MD5', -]) - -try: - from ssl import SSLContext # Modern SSL? -except ImportError: - import sys - - class SSLContext(object): # Platform-specific: Python 2 - def __init__(self, protocol_version): - self.protocol = protocol_version - # Use default values from a real SSLContext - self.check_hostname = False - self.verify_mode = ssl.CERT_NONE - self.ca_certs = None - self.options = 0 - self.certfile = None - self.keyfile = None - self.ciphers = None - - def load_cert_chain(self, certfile, keyfile): - self.certfile = certfile - self.keyfile = keyfile - - def load_verify_locations(self, cafile=None, capath=None): - self.ca_certs = cafile - - if capath is not None: - raise SSLError("CA directories not supported in older Pythons") - - def set_ciphers(self, cipher_suite): - self.ciphers = cipher_suite - - def wrap_socket(self, socket, server_hostname=None, server_side=False): - warnings.warn( - 'A true SSLContext object is not available. This prevents ' - 'urllib3 from configuring SSL appropriately and may cause ' - 'certain SSL connections to fail. You can upgrade to a newer ' - 'version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - InsecurePlatformWarning - ) - kwargs = { - 'keyfile': self.keyfile, - 'certfile': self.certfile, - 'ca_certs': self.ca_certs, - 'cert_reqs': self.verify_mode, - 'ssl_version': self.protocol, - 'server_side': server_side, - } - return wrap_socket(socket, ciphers=self.ciphers, **kwargs) - - -def assert_fingerprint(cert, fingerprint): - """ - Checks if given fingerprint matches the supplied certificate. - - :param cert: - Certificate as bytes object. - :param fingerprint: - Fingerprint as string of hexdigits, can be interspersed by colons. - """ - - fingerprint = fingerprint.replace(':', '').lower() - digest_length = len(fingerprint) - hashfunc = HASHFUNC_MAP.get(digest_length) - if not hashfunc: - raise SSLError( - 'Fingerprint of invalid length: {0}'.format(fingerprint)) - - # We need encode() here for py32; works on py2 and p33. - fingerprint_bytes = unhexlify(fingerprint.encode()) - - cert_digest = hashfunc(cert).digest() - - if not _const_compare_digest(cert_digest, fingerprint_bytes): - raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' - .format(fingerprint, hexlify(cert_digest))) - - -def resolve_cert_reqs(candidate): - """ - Resolves the argument to a numeric constant, which can be passed to - the wrap_socket function/method from the ssl module. - Defaults to :data:`ssl.CERT_NONE`. - If given a string it is assumed to be the name of the constant in the - :mod:`ssl` module or its abbreviation. - (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. - If it's neither `None` nor a string we assume it is already the numeric - constant which can directly be passed to wrap_socket. - """ - if candidate is None: - return CERT_NONE - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'CERT_' + candidate) - return res - - return candidate - - -def resolve_ssl_version(candidate): - """ - like resolve_cert_reqs - """ - if candidate is None: - return PROTOCOL_SSLv23 - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'PROTOCOL_' + candidate) - return res - - return candidate - - -def create_urllib3_context(ssl_version=None, cert_reqs=None, - options=None, ciphers=None): - """All arguments have the same meaning as ``ssl_wrap_socket``. - - By default, this function does a lot of the same work that - ``ssl.create_default_context`` does on Python 3.4+. It: - - - Disables SSLv2, SSLv3, and compression - - Sets a restricted set of server ciphers - - If you wish to enable SSLv3, you can do:: - - from pip._vendor.urllib3.util import ssl_ - context = ssl_.create_urllib3_context() - context.options &= ~ssl_.OP_NO_SSLv3 - - You can do the same to enable compression (substituting ``COMPRESSION`` - for ``SSLv3`` in the last line above). - - :param ssl_version: - The desired protocol version to use. This will default to - PROTOCOL_SSLv23 which will negotiate the highest protocol that both - the server and your installation of OpenSSL support. - :param cert_reqs: - Whether to require the certificate verification. This defaults to - ``ssl.CERT_REQUIRED``. - :param options: - Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, - ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. - :param ciphers: - Which cipher suites to allow the server to select. - :returns: - Constructed SSLContext object with specified options - :rtype: SSLContext - """ - context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) - - context.set_ciphers(ciphers or DEFAULT_CIPHERS) - - # Setting the default here, as we may have no ssl module on import - cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs - - if options is None: - options = 0 - # SSLv2 is easily broken and is considered harmful and dangerous - options |= OP_NO_SSLv2 - # SSLv3 has several problems and is now dangerous - options |= OP_NO_SSLv3 - # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ - # (issue #309) - options |= OP_NO_COMPRESSION - - context.options |= options - - context.verify_mode = cert_reqs - if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 - # We do our own verification, including fingerprints and alternative - # hostnames. So disable it here - context.check_hostname = False - return context - - -def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, - ca_certs=None, server_hostname=None, - ssl_version=None, ciphers=None, ssl_context=None, - ca_cert_dir=None): - """ - All arguments except for server_hostname, ssl_context, and ca_cert_dir have - the same meaning as they do when using :func:`ssl.wrap_socket`. - - :param server_hostname: - When SNI is supported, the expected hostname of the certificate - :param ssl_context: - A pre-made :class:`SSLContext` object. If none is provided, one will - be created using :func:`create_urllib3_context`. - :param ciphers: - A string of ciphers we wish the client to support. - :param ca_cert_dir: - A directory containing CA certificates in multiple separate files, as - supported by OpenSSL's -CApath flag or the capath argument to - SSLContext.load_verify_locations(). - """ - context = ssl_context - if context is None: - # Note: This branch of code and all the variables in it are no longer - # used by urllib3 itself. We should consider deprecating and removing - # this code. - context = create_urllib3_context(ssl_version, cert_reqs, - ciphers=ciphers) - - if ca_certs or ca_cert_dir: - try: - context.load_verify_locations(ca_certs, ca_cert_dir) - except IOError as e: # Platform-specific: Python 2.7 - raise SSLError(e) - # Py33 raises FileNotFoundError which subclasses OSError - # These are not equivalent unless we check the errno attribute - except OSError as e: # Platform-specific: Python 3.3 and beyond - if e.errno == errno.ENOENT: - raise SSLError(e) - raise - elif getattr(context, 'load_default_certs', None) is not None: - # try to load OS default certs; works well on Windows (require Python3.4+) - context.load_default_certs() - - if certfile: - context.load_cert_chain(certfile, keyfile) - - # If we detect server_hostname is an IP address then the SNI - # extension should not be used according to RFC3546 Section 3.1 - # We shouldn't warn the user if SNI isn't available but we would - # not be using SNI anyways due to IP address for server_hostname. - if ((server_hostname is not None and not is_ipaddress(server_hostname)) - or IS_SECURETRANSPORT): - if HAS_SNI and server_hostname is not None: - return context.wrap_socket(sock, server_hostname=server_hostname) - - warnings.warn( - 'An HTTPS request has been made, but the SNI (Server Name ' - 'Indication) extension to TLS is not available on this platform. ' - 'This may cause the server to present an incorrect TLS ' - 'certificate, which can cause validation failures. You can upgrade to ' - 'a newer version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - SNIMissingWarning - ) - - return context.wrap_socket(sock) - - -def is_ipaddress(hostname): - """Detects whether the hostname given is an IP address. - - :param str hostname: Hostname to examine. - :return: True if the hostname is an IP address, False otherwise. - """ - if six.PY3 and isinstance(hostname, bytes): - # IDN A-label bytes are ASCII compatible. - hostname = hostname.decode('ascii') - - families = [socket.AF_INET] - if hasattr(socket, 'AF_INET6'): - families.append(socket.AF_INET6) - - for af in families: - try: - inet_pton(af, hostname) - except (socket.error, ValueError, OSError): - pass - else: - return True - return False diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/timeout.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/timeout.py deleted file mode 100644 index cec817e..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/timeout.py +++ /dev/null @@ -1,242 +0,0 @@ -from __future__ import absolute_import -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT -import time - -from ..exceptions import TimeoutStateError - -# A sentinel value to indicate that no timeout was specified by the user in -# urllib3 -_Default = object() - - -# Use time.monotonic if available. -current_time = getattr(time, "monotonic", time.time) - - -class Timeout(object): - """ Timeout configuration. - - Timeouts can be defined as a default for a pool:: - - timeout = Timeout(connect=2.0, read=7.0) - http = PoolManager(timeout=timeout) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool):: - - response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) - - Timeouts can be disabled by setting all the parameters to ``None``:: - - no_timeout = Timeout(connect=None, read=None) - response = http.request('GET', 'http://example.com/, timeout=no_timeout) - - - :param total: - This combines the connect and read timeouts into one; the read timeout - will be set to the time leftover from the connect attempt. In the - event that both a connect timeout and a total are specified, or a read - timeout and a total are specified, the shorter timeout will be applied. - - Defaults to None. - - :type total: integer, float, or None - - :param connect: - The maximum amount of time to wait for a connection attempt to a server - to succeed. Omitting the parameter will default the connect timeout to - the system default, probably `the global default timeout in socket.py - <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. - None will set an infinite timeout for connection attempts. - - :type connect: integer, float, or None - - :param read: - The maximum amount of time to wait between consecutive - read operations for a response from the server. Omitting - the parameter will default the read timeout to the system - default, probably `the global default timeout in socket.py - <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. - None will set an infinite timeout. - - :type read: integer, float, or None - - .. note:: - - Many factors can affect the total amount of time for urllib3 to return - an HTTP response. - - For example, Python's DNS resolver does not obey the timeout specified - on the socket. Other factors that can affect total request time include - high CPU load, high swap, the program running at a low priority level, - or other behaviors. - - In addition, the read and total timeouts only measure the time between - read operations on the socket connecting the client and the server, - not the total amount of time for the request to return a complete - response. For most requests, the timeout is raised because the server - has not sent the first byte in the specified time. This is not always - the case; if a server streams one byte every fifteen seconds, a timeout - of 20 seconds will not trigger, even though the request will take - several minutes to complete. - - If your goal is to cut off any request after a set amount of wall clock - time, consider having a second "watcher" thread to cut off a slow - request. - """ - - #: A sentinel object representing the default timeout value - DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT - - def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, 'connect') - self._read = self._validate_timeout(read, 'read') - self.total = self._validate_timeout(total, 'total') - self._start_connect = None - - def __str__(self): - return '%s(connect=%r, read=%r, total=%r)' % ( - type(self).__name__, self._connect, self._read, self.total) - - @classmethod - def _validate_timeout(cls, value, name): - """ Check that a timeout attribute is valid. - - :param value: The timeout value to validate - :param name: The name of the timeout attribute to validate. This is - used to specify in error messages. - :return: The validated and casted version of the given value. - :raises ValueError: If it is a numeric value less than or equal to - zero, or the type is not an integer, float, or None. - """ - if value is _Default: - return cls.DEFAULT_TIMEOUT - - if value is None or value is cls.DEFAULT_TIMEOUT: - return value - - if isinstance(value, bool): - raise ValueError("Timeout cannot be a boolean value. It must " - "be an int, float or None.") - try: - float(value) - except (TypeError, ValueError): - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) - - try: - if value <= 0: - raise ValueError("Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than or equal to 0." % (name, value)) - except TypeError: # Python 3 - raise ValueError("Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value)) - - return value - - @classmethod - def from_float(cls, timeout): - """ Create a new Timeout from a legacy timeout value. - - The timeout value used by httplib.py sets the same timeout on the - connect(), and recv() socket requests. This creates a :class:`Timeout` - object that sets the individual timeouts to the ``timeout`` value - passed to this function. - - :param timeout: The legacy timeout value. - :type timeout: integer, float, sentinel default object, or None - :return: Timeout object - :rtype: :class:`Timeout` - """ - return Timeout(read=timeout, connect=timeout) - - def clone(self): - """ Create a copy of the timeout object - - Timeout properties are stored per-pool but each request needs a fresh - Timeout object to ensure each one has its own start/stop configured. - - :return: a copy of the timeout object - :rtype: :class:`Timeout` - """ - # We can't use copy.deepcopy because that will also create a new object - # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to - # detect the user default. - return Timeout(connect=self._connect, read=self._read, - total=self.total) - - def start_connect(self): - """ Start the timeout clock, used during a connect() attempt - - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to start a timer that has been started already. - """ - if self._start_connect is not None: - raise TimeoutStateError("Timeout timer has already been started.") - self._start_connect = current_time() - return self._start_connect - - def get_connect_duration(self): - """ Gets the time elapsed since the call to :meth:`start_connect`. - - :return: Elapsed time. - :rtype: float - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to get duration for a timer that hasn't been started. - """ - if self._start_connect is None: - raise TimeoutStateError("Can't get connect duration for timer " - "that has not started.") - return current_time() - self._start_connect - - @property - def connect_timeout(self): - """ Get the value to use when setting a connection timeout. - - This will be a positive float or integer, the value None - (never timeout), or the default system timeout. - - :return: Connect timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - """ - if self.total is None: - return self._connect - - if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: - return self.total - - return min(self._connect, self.total) - - @property - def read_timeout(self): - """ Get the value for the read timeout. - - This assumes some time has elapsed in the connection timeout and - computes the read timeout appropriately. - - If self.total is set, the read timeout is dependent on the amount of - time taken by the connect timeout. If the connection time has not been - established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be - raised. - - :return: Value to use for the read timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` - has not yet been called on this object. - """ - if (self.total is not None and - self.total is not self.DEFAULT_TIMEOUT and - self._read is not None and - self._read is not self.DEFAULT_TIMEOUT): - # In case the connect timeout has not yet been established. - if self._start_connect is None: - return self._read - return max(0, min(self.total - self.get_connect_duration(), - self._read)) - elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: - return max(0, self.total - self.get_connect_duration()) - else: - return self._read diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/url.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/url.py deleted file mode 100644 index 6b6f996..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/url.py +++ /dev/null @@ -1,230 +0,0 @@ -from __future__ import absolute_import -from collections import namedtuple - -from ..exceptions import LocationParseError - - -url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] - -# We only want to normalize urls with an HTTP(S) scheme. -# urllib3 infers URLs without a scheme (None) to be http. -NORMALIZABLE_SCHEMES = ('http', 'https', None) - - -class Url(namedtuple('Url', url_attrs)): - """ - Datastructure for representing an HTTP URL. Used as a return value for - :func:`parse_url`. Both the scheme and host are normalized as they are - both case-insensitive according to RFC 3986. - """ - __slots__ = () - - def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, - query=None, fragment=None): - if path and not path.startswith('/'): - path = '/' + path - if scheme: - scheme = scheme.lower() - if host and scheme in NORMALIZABLE_SCHEMES: - host = host.lower() - return super(Url, cls).__new__(cls, scheme, auth, host, port, path, - query, fragment) - - @property - def hostname(self): - """For backwards-compatibility with urlparse. We're nice like that.""" - return self.host - - @property - def request_uri(self): - """Absolute path including the query string.""" - uri = self.path or '/' - - if self.query is not None: - uri += '?' + self.query - - return uri - - @property - def netloc(self): - """Network location including host and port""" - if self.port: - return '%s:%d' % (self.host, self.port) - return self.host - - @property - def url(self): - """ - Convert self into a url - - This function should more or less round-trip with :func:`.parse_url`. The - returned url may not be exactly the same as the url inputted to - :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls - with a blank port will have : removed). - - Example: :: - - >>> U = parse_url('http://google.com/mail/') - >>> U.url - 'http://google.com/mail/' - >>> Url('http', 'username:password', 'host.com', 80, - ... '/path', 'query', 'fragment').url - 'http://username:password@host.com:80/path?query#fragment' - """ - scheme, auth, host, port, path, query, fragment = self - url = '' - - # We use "is not None" we want things to happen with empty strings (or 0 port) - if scheme is not None: - url += scheme + '://' - if auth is not None: - url += auth + '@' - if host is not None: - url += host - if port is not None: - url += ':' + str(port) - if path is not None: - url += path - if query is not None: - url += '?' + query - if fragment is not None: - url += '#' + fragment - - return url - - def __str__(self): - return self.url - - -def split_first(s, delims): - """ - Given a string and an iterable of delimiters, split on the first found - delimiter. Return two split parts and the matched delimiter. - - If not found, then the first part is the full input string. - - Example:: - - >>> split_first('foo/bar?baz', '?/=') - ('foo', 'bar?baz', '/') - >>> split_first('foo/bar?baz', '123') - ('foo/bar?baz', '', None) - - Scales linearly with number of delims. Not ideal for large number of delims. - """ - min_idx = None - min_delim = None - for d in delims: - idx = s.find(d) - if idx < 0: - continue - - if min_idx is None or idx < min_idx: - min_idx = idx - min_delim = d - - if min_idx is None or min_idx < 0: - return s, '', None - - return s[:min_idx], s[min_idx + 1:], min_delim - - -def parse_url(url): - """ - Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is - performed to parse incomplete urls. Fields not provided will be None. - - Partly backwards-compatible with :mod:`urlparse`. - - Example:: - - >>> parse_url('http://google.com/mail/') - Url(scheme='http', host='google.com', port=None, path='/mail/', ...) - >>> parse_url('google.com:80') - Url(scheme=None, host='google.com', port=80, path=None, ...) - >>> parse_url('/foo?bar') - Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) - """ - - # While this code has overlap with stdlib's urlparse, it is much - # simplified for our needs and less annoying. - # Additionally, this implementations does silly things to be optimal - # on CPython. - - if not url: - # Empty - return Url() - - scheme = None - auth = None - host = None - port = None - path = None - fragment = None - query = None - - # Scheme - if '://' in url: - scheme, url = url.split('://', 1) - - # Find the earliest Authority Terminator - # (http://tools.ietf.org/html/rfc3986#section-3.2) - url, path_, delim = split_first(url, ['/', '?', '#']) - - if delim: - # Reassemble the path - path = delim + path_ - - # Auth - if '@' in url: - # Last '@' denotes end of auth part - auth, url = url.rsplit('@', 1) - - # IPv6 - if url and url[0] == '[': - host, url = url.split(']', 1) - host += ']' - - # Port - if ':' in url: - _host, port = url.split(':', 1) - - if not host: - host = _host - - if port: - # If given, ports must be integers. No whitespace, no plus or - # minus prefixes, no non-integer digits such as ^2 (superscript). - if not port.isdigit(): - raise LocationParseError(url) - try: - port = int(port) - except ValueError: - raise LocationParseError(url) - else: - # Blank ports are cool, too. (rfc3986#section-3.2.3) - port = None - - elif not host and url: - host = url - - if not path: - return Url(scheme, auth, host, port, path, query, fragment) - - # Fragment - if '#' in path: - path, fragment = path.split('#', 1) - - # Query - if '?' in path: - path, query = path.split('?', 1) - - return Url(scheme, auth, host, port, path, query, fragment) - - -def get_host(url): - """ - Deprecated. Use :func:`parse_url` instead. - """ - p = parse_url(url) - return p.scheme or 'http', p.hostname, p.port diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/wait.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/wait.py deleted file mode 100644 index 4db71ba..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/wait.py +++ /dev/null @@ -1,150 +0,0 @@ -import errno -from functools import partial -import select -import sys -try: - from time import monotonic -except ImportError: - from time import time as monotonic - -__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"] - - -class NoWayToWaitForSocketError(Exception): - pass - - -# How should we wait on sockets? -# -# There are two types of APIs you can use for waiting on sockets: the fancy -# modern stateful APIs like epoll/kqueue, and the older stateless APIs like -# select/poll. The stateful APIs are more efficient when you have a lots of -# sockets to keep track of, because you can set them up once and then use them -# lots of times. But we only ever want to wait on a single socket at a time -# and don't want to keep track of state, so the stateless APIs are actually -# more efficient. So we want to use select() or poll(). -# -# Now, how do we choose between select() and poll()? On traditional Unixes, -# select() has a strange calling convention that makes it slow, or fail -# altogether, for high-numbered file descriptors. The point of poll() is to fix -# that, so on Unixes, we prefer poll(). -# -# On Windows, there is no poll() (or at least Python doesn't provide a wrapper -# for it), but that's OK, because on Windows, select() doesn't have this -# strange calling convention; plain select() works fine. -# -# So: on Windows we use select(), and everywhere else we use poll(). We also -# fall back to select() in case poll() is somehow broken or missing. - -if sys.version_info >= (3, 5): - # Modern Python, that retries syscalls by default - def _retry_on_intr(fn, timeout): - return fn(timeout) -else: - # Old and broken Pythons. - def _retry_on_intr(fn, timeout): - if timeout is None: - deadline = float("inf") - else: - deadline = monotonic() + timeout - - while True: - try: - return fn(timeout) - # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7 - except (OSError, select.error) as e: - # 'e.args[0]' incantation works for both OSError and select.error - if e.args[0] != errno.EINTR: - raise - else: - timeout = deadline - monotonic() - if timeout < 0: - timeout = 0 - if timeout == float("inf"): - timeout = None - continue - - -def select_wait_for_socket(sock, read=False, write=False, timeout=None): - if not read and not write: - raise RuntimeError("must specify at least one of read=True, write=True") - rcheck = [] - wcheck = [] - if read: - rcheck.append(sock) - if write: - wcheck.append(sock) - # When doing a non-blocking connect, most systems signal success by - # marking the socket writable. Windows, though, signals success by marked - # it as "exceptional". We paper over the difference by checking the write - # sockets for both conditions. (The stdlib selectors module does the same - # thing.) - fn = partial(select.select, rcheck, wcheck, wcheck) - rready, wready, xready = _retry_on_intr(fn, timeout) - return bool(rready or wready or xready) - - -def poll_wait_for_socket(sock, read=False, write=False, timeout=None): - if not read and not write: - raise RuntimeError("must specify at least one of read=True, write=True") - mask = 0 - if read: - mask |= select.POLLIN - if write: - mask |= select.POLLOUT - poll_obj = select.poll() - poll_obj.register(sock, mask) - - # For some reason, poll() takes timeout in milliseconds - def do_poll(t): - if t is not None: - t *= 1000 - return poll_obj.poll(t) - - return bool(_retry_on_intr(do_poll, timeout)) - - -def null_wait_for_socket(*args, **kwargs): - raise NoWayToWaitForSocketError("no select-equivalent available") - - -def _have_working_poll(): - # Apparently some systems have a select.poll that fails as soon as you try - # to use it, either due to strange configuration or broken monkeypatching - # from libraries like eventlet/greenlet. - try: - poll_obj = select.poll() - _retry_on_intr(poll_obj.poll, 0) - except (AttributeError, OSError): - return False - else: - return True - - -def wait_for_socket(*args, **kwargs): - # We delay choosing which implementation to use until the first time we're - # called. We could do it at import time, but then we might make the wrong - # decision if someone goes wild with monkeypatching select.poll after - # we're imported. - global wait_for_socket - if _have_working_poll(): - wait_for_socket = poll_wait_for_socket - elif hasattr(select, "select"): - wait_for_socket = select_wait_for_socket - else: # Platform-specific: Appengine. - wait_for_socket = null_wait_for_socket - return wait_for_socket(*args, **kwargs) - - -def wait_for_read(sock, timeout=None): - """ Waits for reading to be available on a given socket. - Returns True if the socket is readable, or False if the timeout expired. - """ - return wait_for_socket(sock, read=True, timeout=timeout) - - -def wait_for_write(sock, timeout=None): - """ Waits for writing to be available on a given socket. - Returns True if the socket is readable, or False if the timeout expired. - """ - return wait_for_socket(sock, write=True, timeout=timeout) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/__init__.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/__init__.py deleted file mode 100644 index d21d697..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/__init__.py +++ /dev/null @@ -1,342 +0,0 @@ -# coding: utf-8 -""" - - webencodings - ~~~~~~~~~~~~ - - This is a Python implementation of the `WHATWG Encoding standard - <http://encoding.spec.whatwg.org/>`. See README for details. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -from __future__ import unicode_literals - -import codecs - -from .labels import LABELS - - -VERSION = '0.5.1' - - -# Some names in Encoding are not valid Python aliases. Remap these. -PYTHON_NAMES = { - 'iso-8859-8-i': 'iso-8859-8', - 'x-mac-cyrillic': 'mac-cyrillic', - 'macintosh': 'mac-roman', - 'windows-874': 'cp874'} - -CACHE = {} - - -def ascii_lower(string): - r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. - - :param string: An Unicode string. - :returns: A new Unicode string. - - This is used for `ASCII case-insensitive - <http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_ - matching of encoding labels. - The same matching is also used, among other things, - for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_. - - This is different from the :meth:`~py:str.lower` method of Unicode strings - which also affect non-ASCII characters, - sometimes mapping them into the ASCII range: - - >>> keyword = u'Bac\N{KELVIN SIGN}ground' - >>> assert keyword.lower() == u'background' - >>> assert ascii_lower(keyword) != keyword.lower() - >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' - - """ - # This turns out to be faster than unicode.translate() - return string.encode('utf8').lower().decode('utf8') - - -def lookup(label): - """ - Look for an encoding by its label. - This is the spec’s `get an encoding - <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm. - Supported labels are listed there. - - :param label: A string. - :returns: - An :class:`Encoding` object, or :obj:`None` for an unknown label. - - """ - # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020. - label = ascii_lower(label.strip('\t\n\f\r ')) - name = LABELS.get(label) - if name is None: - return None - encoding = CACHE.get(name) - if encoding is None: - if name == 'x-user-defined': - from .x_user_defined import codec_info - else: - python_name = PYTHON_NAMES.get(name, name) - # Any python_name value that gets to here should be valid. - codec_info = codecs.lookup(python_name) - encoding = Encoding(name, codec_info) - CACHE[name] = encoding - return encoding - - -def _get_encoding(encoding_or_label): - """ - Accept either an encoding object or label. - - :param encoding: An :class:`Encoding` object or a label string. - :returns: An :class:`Encoding` object. - :raises: :exc:`~exceptions.LookupError` for an unknown label. - - """ - if hasattr(encoding_or_label, 'codec_info'): - return encoding_or_label - - encoding = lookup(encoding_or_label) - if encoding is None: - raise LookupError('Unknown encoding label: %r' % encoding_or_label) - return encoding - - -class Encoding(object): - """Reresents a character encoding such as UTF-8, - that can be used for decoding or encoding. - - .. attribute:: name - - Canonical name of the encoding - - .. attribute:: codec_info - - The actual implementation of the encoding, - a stdlib :class:`~codecs.CodecInfo` object. - See :func:`codecs.register`. - - """ - def __init__(self, name, codec_info): - self.name = name - self.codec_info = codec_info - - def __repr__(self): - return '<Encoding %s>' % self.name - - -#: The UTF-8 encoding. Should be used for new content and formats. -UTF8 = lookup('utf-8') - -_UTF16LE = lookup('utf-16le') -_UTF16BE = lookup('utf-16be') - - -def decode(input, fallback_encoding, errors='replace'): - """ - Decode a single string. - - :param input: A byte string - :param fallback_encoding: - An :class:`Encoding` object or a label string. - The encoding to use if :obj:`input` does note have a BOM. - :param errors: Type of error handling. See :func:`codecs.register`. - :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. - :return: - A ``(output, encoding)`` tuple of an Unicode string - and an :obj:`Encoding`. - - """ - # Fail early if `encoding` is an invalid label. - fallback_encoding = _get_encoding(fallback_encoding) - bom_encoding, input = _detect_bom(input) - encoding = bom_encoding or fallback_encoding - return encoding.codec_info.decode(input, errors)[0], encoding - - -def _detect_bom(input): - """Return (bom_encoding, input), with any BOM removed from the input.""" - if input.startswith(b'\xFF\xFE'): - return _UTF16LE, input[2:] - if input.startswith(b'\xFE\xFF'): - return _UTF16BE, input[2:] - if input.startswith(b'\xEF\xBB\xBF'): - return UTF8, input[3:] - return None, input - - -def encode(input, encoding=UTF8, errors='strict'): - """ - Encode a single string. - - :param input: An Unicode string. - :param encoding: An :class:`Encoding` object or a label string. - :param errors: Type of error handling. See :func:`codecs.register`. - :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. - :return: A byte string. - - """ - return _get_encoding(encoding).codec_info.encode(input, errors)[0] - - -def iter_decode(input, fallback_encoding, errors='replace'): - """ - "Pull"-based decoder. - - :param input: - An iterable of byte strings. - - The input is first consumed just enough to determine the encoding - based on the precense of a BOM, - then consumed on demand when the return value is. - :param fallback_encoding: - An :class:`Encoding` object or a label string. - The encoding to use if :obj:`input` does note have a BOM. - :param errors: Type of error handling. See :func:`codecs.register`. - :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. - :returns: - An ``(output, encoding)`` tuple. - :obj:`output` is an iterable of Unicode strings, - :obj:`encoding` is the :obj:`Encoding` that is being used. - - """ - - decoder = IncrementalDecoder(fallback_encoding, errors) - generator = _iter_decode_generator(input, decoder) - encoding = next(generator) - return generator, encoding - - -def _iter_decode_generator(input, decoder): - """Return a generator that first yields the :obj:`Encoding`, - then yields output chukns as Unicode strings. - - """ - decode = decoder.decode - input = iter(input) - for chunck in input: - output = decode(chunck) - if output: - assert decoder.encoding is not None - yield decoder.encoding - yield output - break - else: - # Input exhausted without determining the encoding - output = decode(b'', final=True) - assert decoder.encoding is not None - yield decoder.encoding - if output: - yield output - return - - for chunck in input: - output = decode(chunck) - if output: - yield output - output = decode(b'', final=True) - if output: - yield output - - -def iter_encode(input, encoding=UTF8, errors='strict'): - """ - “Pullâ€-based encoder. - - :param input: An iterable of Unicode strings. - :param encoding: An :class:`Encoding` object or a label string. - :param errors: Type of error handling. See :func:`codecs.register`. - :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. - :returns: An iterable of byte strings. - - """ - # Fail early if `encoding` is an invalid label. - encode = IncrementalEncoder(encoding, errors).encode - return _iter_encode_generator(input, encode) - - -def _iter_encode_generator(input, encode): - for chunck in input: - output = encode(chunck) - if output: - yield output - output = encode('', final=True) - if output: - yield output - - -class IncrementalDecoder(object): - """ - “Pushâ€-based decoder. - - :param fallback_encoding: - An :class:`Encoding` object or a label string. - The encoding to use if :obj:`input` does note have a BOM. - :param errors: Type of error handling. See :func:`codecs.register`. - :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. - - """ - def __init__(self, fallback_encoding, errors='replace'): - # Fail early if `encoding` is an invalid label. - self._fallback_encoding = _get_encoding(fallback_encoding) - self._errors = errors - self._buffer = b'' - self._decoder = None - #: The actual :class:`Encoding` that is being used, - #: or :obj:`None` if that is not determined yet. - #: (Ie. if there is not enough input yet to determine - #: if there is a BOM.) - self.encoding = None # Not known yet. - - def decode(self, input, final=False): - """Decode one chunk of the input. - - :param input: A byte string. - :param final: - Indicate that no more input is available. - Must be :obj:`True` if this is the last call. - :returns: An Unicode string. - - """ - decoder = self._decoder - if decoder is not None: - return decoder(input, final) - - input = self._buffer + input - encoding, input = _detect_bom(input) - if encoding is None: - if len(input) < 3 and not final: # Not enough data yet. - self._buffer = input - return '' - else: # No BOM - encoding = self._fallback_encoding - decoder = encoding.codec_info.incrementaldecoder(self._errors).decode - self._decoder = decoder - self.encoding = encoding - return decoder(input, final) - - -class IncrementalEncoder(object): - """ - “Pushâ€-based encoder. - - :param encoding: An :class:`Encoding` object or a label string. - :param errors: Type of error handling. See :func:`codecs.register`. - :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. - - .. method:: encode(input, final=False) - - :param input: An Unicode string. - :param final: - Indicate that no more input is available. - Must be :obj:`True` if this is the last call. - :returns: A byte string. - - """ - def __init__(self, encoding=UTF8, errors='strict'): - encoding = _get_encoding(encoding) - self.encode = encoding.codec_info.incrementalencoder(errors).encode diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/labels.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/labels.py deleted file mode 100644 index 29cbf91..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/labels.py +++ /dev/null @@ -1,231 +0,0 @@ -""" - - webencodings.labels - ~~~~~~~~~~~~~~~~~~~ - - Map encoding labels to their name. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -# XXX Do not edit! -# This file is automatically generated by mklabels.py - -LABELS = { - 'unicode-1-1-utf-8': 'utf-8', - 'utf-8': 'utf-8', - 'utf8': 'utf-8', - '866': 'ibm866', - 'cp866': 'ibm866', - 'csibm866': 'ibm866', - 'ibm866': 'ibm866', - 'csisolatin2': 'iso-8859-2', - 'iso-8859-2': 'iso-8859-2', - 'iso-ir-101': 'iso-8859-2', - 'iso8859-2': 'iso-8859-2', - 'iso88592': 'iso-8859-2', - 'iso_8859-2': 'iso-8859-2', - 'iso_8859-2:1987': 'iso-8859-2', - 'l2': 'iso-8859-2', - 'latin2': 'iso-8859-2', - 'csisolatin3': 'iso-8859-3', - 'iso-8859-3': 'iso-8859-3', - 'iso-ir-109': 'iso-8859-3', - 'iso8859-3': 'iso-8859-3', - 'iso88593': 'iso-8859-3', - 'iso_8859-3': 'iso-8859-3', - 'iso_8859-3:1988': 'iso-8859-3', - 'l3': 'iso-8859-3', - 'latin3': 'iso-8859-3', - 'csisolatin4': 'iso-8859-4', - 'iso-8859-4': 'iso-8859-4', - 'iso-ir-110': 'iso-8859-4', - 'iso8859-4': 'iso-8859-4', - 'iso88594': 'iso-8859-4', - 'iso_8859-4': 'iso-8859-4', - 'iso_8859-4:1988': 'iso-8859-4', - 'l4': 'iso-8859-4', - 'latin4': 'iso-8859-4', - 'csisolatincyrillic': 'iso-8859-5', - 'cyrillic': 'iso-8859-5', - 'iso-8859-5': 'iso-8859-5', - 'iso-ir-144': 'iso-8859-5', - 'iso8859-5': 'iso-8859-5', - 'iso88595': 'iso-8859-5', - 'iso_8859-5': 'iso-8859-5', - 'iso_8859-5:1988': 'iso-8859-5', - 'arabic': 'iso-8859-6', - 'asmo-708': 'iso-8859-6', - 'csiso88596e': 'iso-8859-6', - 'csiso88596i': 'iso-8859-6', - 'csisolatinarabic': 'iso-8859-6', - 'ecma-114': 'iso-8859-6', - 'iso-8859-6': 'iso-8859-6', - 'iso-8859-6-e': 'iso-8859-6', - 'iso-8859-6-i': 'iso-8859-6', - 'iso-ir-127': 'iso-8859-6', - 'iso8859-6': 'iso-8859-6', - 'iso88596': 'iso-8859-6', - 'iso_8859-6': 'iso-8859-6', - 'iso_8859-6:1987': 'iso-8859-6', - 'csisolatingreek': 'iso-8859-7', - 'ecma-118': 'iso-8859-7', - 'elot_928': 'iso-8859-7', - 'greek': 'iso-8859-7', - 'greek8': 'iso-8859-7', - 'iso-8859-7': 'iso-8859-7', - 'iso-ir-126': 'iso-8859-7', - 'iso8859-7': 'iso-8859-7', - 'iso88597': 'iso-8859-7', - 'iso_8859-7': 'iso-8859-7', - 'iso_8859-7:1987': 'iso-8859-7', - 'sun_eu_greek': 'iso-8859-7', - 'csiso88598e': 'iso-8859-8', - 'csisolatinhebrew': 'iso-8859-8', - 'hebrew': 'iso-8859-8', - 'iso-8859-8': 'iso-8859-8', - 'iso-8859-8-e': 'iso-8859-8', - 'iso-ir-138': 'iso-8859-8', - 'iso8859-8': 'iso-8859-8', - 'iso88598': 'iso-8859-8', - 'iso_8859-8': 'iso-8859-8', - 'iso_8859-8:1988': 'iso-8859-8', - 'visual': 'iso-8859-8', - 'csiso88598i': 'iso-8859-8-i', - 'iso-8859-8-i': 'iso-8859-8-i', - 'logical': 'iso-8859-8-i', - 'csisolatin6': 'iso-8859-10', - 'iso-8859-10': 'iso-8859-10', - 'iso-ir-157': 'iso-8859-10', - 'iso8859-10': 'iso-8859-10', - 'iso885910': 'iso-8859-10', - 'l6': 'iso-8859-10', - 'latin6': 'iso-8859-10', - 'iso-8859-13': 'iso-8859-13', - 'iso8859-13': 'iso-8859-13', - 'iso885913': 'iso-8859-13', - 'iso-8859-14': 'iso-8859-14', - 'iso8859-14': 'iso-8859-14', - 'iso885914': 'iso-8859-14', - 'csisolatin9': 'iso-8859-15', - 'iso-8859-15': 'iso-8859-15', - 'iso8859-15': 'iso-8859-15', - 'iso885915': 'iso-8859-15', - 'iso_8859-15': 'iso-8859-15', - 'l9': 'iso-8859-15', - 'iso-8859-16': 'iso-8859-16', - 'cskoi8r': 'koi8-r', - 'koi': 'koi8-r', - 'koi8': 'koi8-r', - 'koi8-r': 'koi8-r', - 'koi8_r': 'koi8-r', - 'koi8-u': 'koi8-u', - 'csmacintosh': 'macintosh', - 'mac': 'macintosh', - 'macintosh': 'macintosh', - 'x-mac-roman': 'macintosh', - 'dos-874': 'windows-874', - 'iso-8859-11': 'windows-874', - 'iso8859-11': 'windows-874', - 'iso885911': 'windows-874', - 'tis-620': 'windows-874', - 'windows-874': 'windows-874', - 'cp1250': 'windows-1250', - 'windows-1250': 'windows-1250', - 'x-cp1250': 'windows-1250', - 'cp1251': 'windows-1251', - 'windows-1251': 'windows-1251', - 'x-cp1251': 'windows-1251', - 'ansi_x3.4-1968': 'windows-1252', - 'ascii': 'windows-1252', - 'cp1252': 'windows-1252', - 'cp819': 'windows-1252', - 'csisolatin1': 'windows-1252', - 'ibm819': 'windows-1252', - 'iso-8859-1': 'windows-1252', - 'iso-ir-100': 'windows-1252', - 'iso8859-1': 'windows-1252', - 'iso88591': 'windows-1252', - 'iso_8859-1': 'windows-1252', - 'iso_8859-1:1987': 'windows-1252', - 'l1': 'windows-1252', - 'latin1': 'windows-1252', - 'us-ascii': 'windows-1252', - 'windows-1252': 'windows-1252', - 'x-cp1252': 'windows-1252', - 'cp1253': 'windows-1253', - 'windows-1253': 'windows-1253', - 'x-cp1253': 'windows-1253', - 'cp1254': 'windows-1254', - 'csisolatin5': 'windows-1254', - 'iso-8859-9': 'windows-1254', - 'iso-ir-148': 'windows-1254', - 'iso8859-9': 'windows-1254', - 'iso88599': 'windows-1254', - 'iso_8859-9': 'windows-1254', - 'iso_8859-9:1989': 'windows-1254', - 'l5': 'windows-1254', - 'latin5': 'windows-1254', - 'windows-1254': 'windows-1254', - 'x-cp1254': 'windows-1254', - 'cp1255': 'windows-1255', - 'windows-1255': 'windows-1255', - 'x-cp1255': 'windows-1255', - 'cp1256': 'windows-1256', - 'windows-1256': 'windows-1256', - 'x-cp1256': 'windows-1256', - 'cp1257': 'windows-1257', - 'windows-1257': 'windows-1257', - 'x-cp1257': 'windows-1257', - 'cp1258': 'windows-1258', - 'windows-1258': 'windows-1258', - 'x-cp1258': 'windows-1258', - 'x-mac-cyrillic': 'x-mac-cyrillic', - 'x-mac-ukrainian': 'x-mac-cyrillic', - 'chinese': 'gbk', - 'csgb2312': 'gbk', - 'csiso58gb231280': 'gbk', - 'gb2312': 'gbk', - 'gb_2312': 'gbk', - 'gb_2312-80': 'gbk', - 'gbk': 'gbk', - 'iso-ir-58': 'gbk', - 'x-gbk': 'gbk', - 'gb18030': 'gb18030', - 'hz-gb-2312': 'hz-gb-2312', - 'big5': 'big5', - 'big5-hkscs': 'big5', - 'cn-big5': 'big5', - 'csbig5': 'big5', - 'x-x-big5': 'big5', - 'cseucpkdfmtjapanese': 'euc-jp', - 'euc-jp': 'euc-jp', - 'x-euc-jp': 'euc-jp', - 'csiso2022jp': 'iso-2022-jp', - 'iso-2022-jp': 'iso-2022-jp', - 'csshiftjis': 'shift_jis', - 'ms_kanji': 'shift_jis', - 'shift-jis': 'shift_jis', - 'shift_jis': 'shift_jis', - 'sjis': 'shift_jis', - 'windows-31j': 'shift_jis', - 'x-sjis': 'shift_jis', - 'cseuckr': 'euc-kr', - 'csksc56011987': 'euc-kr', - 'euc-kr': 'euc-kr', - 'iso-ir-149': 'euc-kr', - 'korean': 'euc-kr', - 'ks_c_5601-1987': 'euc-kr', - 'ks_c_5601-1989': 'euc-kr', - 'ksc5601': 'euc-kr', - 'ksc_5601': 'euc-kr', - 'windows-949': 'euc-kr', - 'csiso2022kr': 'iso-2022-kr', - 'iso-2022-kr': 'iso-2022-kr', - 'utf-16be': 'utf-16be', - 'utf-16': 'utf-16le', - 'utf-16le': 'utf-16le', - 'x-user-defined': 'x-user-defined', -} diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/mklabels.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/mklabels.py deleted file mode 100644 index 295dc92..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/mklabels.py +++ /dev/null @@ -1,59 +0,0 @@ -""" - - webencodings.mklabels - ~~~~~~~~~~~~~~~~~~~~~ - - Regenarate the webencodings.labels module. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -import json -try: - from urllib import urlopen -except ImportError: - from urllib.request import urlopen - - -def assert_lower(string): - assert string == string.lower() - return string - - -def generate(url): - parts = ['''\ -""" - - webencodings.labels - ~~~~~~~~~~~~~~~~~~~ - - Map encoding labels to their name. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -# XXX Do not edit! -# This file is automatically generated by mklabels.py - -LABELS = { -'''] - labels = [ - (repr(assert_lower(label)).lstrip('u'), - repr(encoding['name']).lstrip('u')) - for category in json.loads(urlopen(url).read().decode('ascii')) - for encoding in category['encodings'] - for label in encoding['labels']] - max_len = max(len(label) for label, name in labels) - parts.extend( - ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name) - for label, name in labels) - parts.append('}') - return ''.join(parts) - - -if __name__ == '__main__': - print(generate('http://encoding.spec.whatwg.org/encodings.json')) diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/tests.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/tests.py deleted file mode 100644 index e12c10d..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/tests.py +++ /dev/null @@ -1,153 +0,0 @@ -# coding: utf-8 -""" - - webencodings.tests - ~~~~~~~~~~~~~~~~~~ - - A basic test suite for Encoding. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -from __future__ import unicode_literals - -from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, - IncrementalDecoder, IncrementalEncoder, UTF8) - - -def assert_raises(exception, function, *args, **kwargs): - try: - function(*args, **kwargs) - except exception: - return - else: # pragma: no cover - raise AssertionError('Did not raise %s.' % exception) - - -def test_labels(): - assert lookup('utf-8').name == 'utf-8' - assert lookup('Utf-8').name == 'utf-8' - assert lookup('UTF-8').name == 'utf-8' - assert lookup('utf8').name == 'utf-8' - assert lookup('utf8').name == 'utf-8' - assert lookup('utf8 ').name == 'utf-8' - assert lookup(' \r\nutf8\t').name == 'utf-8' - assert lookup('u8') is None # Python label. - assert lookup('utf-8 ') is None # Non-ASCII white space. - - assert lookup('US-ASCII').name == 'windows-1252' - assert lookup('iso-8859-1').name == 'windows-1252' - assert lookup('latin1').name == 'windows-1252' - assert lookup('LATIN1').name == 'windows-1252' - assert lookup('latin-1') is None - assert lookup('LATÄ°N1') is None # ASCII-only case insensitivity. - - -def test_all_labels(): - for label in LABELS: - assert decode(b'', label) == ('', lookup(label)) - assert encode('', label) == b'' - for repeat in [0, 1, 12]: - output, _ = iter_decode([b''] * repeat, label) - assert list(output) == [] - assert list(iter_encode([''] * repeat, label)) == [] - decoder = IncrementalDecoder(label) - assert decoder.decode(b'') == '' - assert decoder.decode(b'', final=True) == '' - encoder = IncrementalEncoder(label) - assert encoder.encode('') == b'' - assert encoder.encode('', final=True) == b'' - # All encoding names are valid labels too: - for name in set(LABELS.values()): - assert lookup(name).name == name - - -def test_invalid_label(): - assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid') - assert_raises(LookupError, encode, 'é', 'invalid') - assert_raises(LookupError, iter_decode, [], 'invalid') - assert_raises(LookupError, iter_encode, [], 'invalid') - assert_raises(LookupError, IncrementalDecoder, 'invalid') - assert_raises(LookupError, IncrementalEncoder, 'invalid') - - -def test_decode(): - assert decode(b'\x80', 'latin1') == ('€', lookup('latin1')) - assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1')) - assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8')) - assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8')) - assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii')) - assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM - - assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM - assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM - assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be')) - assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le')) - - assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be')) - assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le')) - assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le')) - - assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be')) - assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le')) - assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le')) - - -def test_encode(): - assert encode('é', 'latin1') == b'\xe9' - assert encode('é', 'utf8') == b'\xc3\xa9' - assert encode('é', 'utf8') == b'\xc3\xa9' - assert encode('é', 'utf-16') == b'\xe9\x00' - assert encode('é', 'utf-16le') == b'\xe9\x00' - assert encode('é', 'utf-16be') == b'\x00\xe9' - - -def test_iter_decode(): - def iter_decode_to_string(input, fallback_encoding): - output, _encoding = iter_decode(input, fallback_encoding) - return ''.join(output) - assert iter_decode_to_string([], 'latin1') == '' - assert iter_decode_to_string([b''], 'latin1') == '' - assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é' - assert iter_decode_to_string([b'hello'], 'latin1') == 'hello' - assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello' - assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello' - assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD' - assert iter_decode_to_string([ - b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é' - assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == '' - assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»' - assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é' - assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é' - assert iter_decode_to_string([ - b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo' - - -def test_iter_encode(): - assert b''.join(iter_encode([], 'latin1')) == b'' - assert b''.join(iter_encode([''], 'latin1')) == b'' - assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9' - assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9' - assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00' - assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00' - assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9' - assert b''.join(iter_encode([ - '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo' - - -def test_x_user_defined(): - encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca' - decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca' - encoded = b'aa' - decoded = 'aa' - assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined')) - assert encode(decoded, 'x-user-defined') == encoded diff --git a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/x_user_defined.py b/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/x_user_defined.py deleted file mode 100644 index d16e326..0000000 --- a/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/webencodings/x_user_defined.py +++ /dev/null @@ -1,325 +0,0 @@ -# coding: utf-8 -""" - - webencodings.x_user_defined - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - An implementation of the x-user-defined encoding. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -from __future__ import unicode_literals - -import codecs - - -### Codec APIs - -class Codec(codecs.Codec): - - def encode(self, input, errors='strict'): - return codecs.charmap_encode(input, errors, encoding_table) - - def decode(self, input, errors='strict'): - return codecs.charmap_decode(input, errors, decoding_table) - - -class IncrementalEncoder(codecs.IncrementalEncoder): - def encode(self, input, final=False): - return codecs.charmap_encode(input, self.errors, encoding_table)[0] - - -class IncrementalDecoder(codecs.IncrementalDecoder): - def decode(self, input, final=False): - return codecs.charmap_decode(input, self.errors, decoding_table)[0] - - -class StreamWriter(Codec, codecs.StreamWriter): - pass - - -class StreamReader(Codec, codecs.StreamReader): - pass - - -### encodings module API - -codec_info = codecs.CodecInfo( - name='x-user-defined', - encode=Codec().encode, - decode=Codec().decode, - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamreader=StreamReader, - streamwriter=StreamWriter, -) - - -### Decoding Table - -# Python 3: -# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) -decoding_table = ( - '\x00' - '\x01' - '\x02' - '\x03' - '\x04' - '\x05' - '\x06' - '\x07' - '\x08' - '\t' - '\n' - '\x0b' - '\x0c' - '\r' - '\x0e' - '\x0f' - '\x10' - '\x11' - '\x12' - '\x13' - '\x14' - '\x15' - '\x16' - '\x17' - '\x18' - '\x19' - '\x1a' - '\x1b' - '\x1c' - '\x1d' - '\x1e' - '\x1f' - ' ' - '!' - '"' - '#' - '$' - '%' - '&' - "'" - '(' - ')' - '*' - '+' - ',' - '-' - '.' - '/' - '0' - '1' - '2' - '3' - '4' - '5' - '6' - '7' - '8' - '9' - ':' - ';' - '<' - '=' - '>' - '?' - '@' - 'A' - 'B' - 'C' - 'D' - 'E' - 'F' - 'G' - 'H' - 'I' - 'J' - 'K' - 'L' - 'M' - 'N' - 'O' - 'P' - 'Q' - 'R' - 'S' - 'T' - 'U' - 'V' - 'W' - 'X' - 'Y' - 'Z' - '[' - '\\' - ']' - '^' - '_' - '`' - 'a' - 'b' - 'c' - 'd' - 'e' - 'f' - 'g' - 'h' - 'i' - 'j' - 'k' - 'l' - 'm' - 'n' - 'o' - 'p' - 'q' - 'r' - 's' - 't' - 'u' - 'v' - 'w' - 'x' - 'y' - 'z' - '{' - '|' - '}' - '~' - '\x7f' - '\uf780' - '\uf781' - '\uf782' - '\uf783' - '\uf784' - '\uf785' - '\uf786' - '\uf787' - '\uf788' - '\uf789' - '\uf78a' - '\uf78b' - '\uf78c' - '\uf78d' - '\uf78e' - '\uf78f' - '\uf790' - '\uf791' - '\uf792' - '\uf793' - '\uf794' - '\uf795' - '\uf796' - '\uf797' - '\uf798' - '\uf799' - '\uf79a' - '\uf79b' - '\uf79c' - '\uf79d' - '\uf79e' - '\uf79f' - '\uf7a0' - '\uf7a1' - '\uf7a2' - '\uf7a3' - '\uf7a4' - '\uf7a5' - '\uf7a6' - '\uf7a7' - '\uf7a8' - '\uf7a9' - '\uf7aa' - '\uf7ab' - '\uf7ac' - '\uf7ad' - '\uf7ae' - '\uf7af' - '\uf7b0' - '\uf7b1' - '\uf7b2' - '\uf7b3' - '\uf7b4' - '\uf7b5' - '\uf7b6' - '\uf7b7' - '\uf7b8' - '\uf7b9' - '\uf7ba' - '\uf7bb' - '\uf7bc' - '\uf7bd' - '\uf7be' - '\uf7bf' - '\uf7c0' - '\uf7c1' - '\uf7c2' - '\uf7c3' - '\uf7c4' - '\uf7c5' - '\uf7c6' - '\uf7c7' - '\uf7c8' - '\uf7c9' - '\uf7ca' - '\uf7cb' - '\uf7cc' - '\uf7cd' - '\uf7ce' - '\uf7cf' - '\uf7d0' - '\uf7d1' - '\uf7d2' - '\uf7d3' - '\uf7d4' - '\uf7d5' - '\uf7d6' - '\uf7d7' - '\uf7d8' - '\uf7d9' - '\uf7da' - '\uf7db' - '\uf7dc' - '\uf7dd' - '\uf7de' - '\uf7df' - '\uf7e0' - '\uf7e1' - '\uf7e2' - '\uf7e3' - '\uf7e4' - '\uf7e5' - '\uf7e6' - '\uf7e7' - '\uf7e8' - '\uf7e9' - '\uf7ea' - '\uf7eb' - '\uf7ec' - '\uf7ed' - '\uf7ee' - '\uf7ef' - '\uf7f0' - '\uf7f1' - '\uf7f2' - '\uf7f3' - '\uf7f4' - '\uf7f5' - '\uf7f6' - '\uf7f7' - '\uf7f8' - '\uf7f9' - '\uf7fa' - '\uf7fb' - '\uf7fc' - '\uf7fd' - '\uf7fe' - '\uf7ff' -) - -### Encoding table -encoding_table = codecs.charmap_build(decoding_table) diff --git a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/METADATA b/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/METADATA deleted file mode 100644 index b318aa7..0000000 --- a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/METADATA +++ /dev/null @@ -1,15 +0,0 @@ -Metadata-Version: 2.1 -Name: pygame -Version: 1.9.6 -Summary: Python Game Development -Home-page: https://www.pygame.org -Author: A community project. -Author-email: pygame@pygame.org -License: LGPL -Platform: UNKNOWN - -Pygame is a Python wrapper module for the -SDL multimedia library. It contains python functions and classes -that will allow you to use SDL's support for playing cdroms, -audio and video output, and keyboard, mouse and joystick input. - diff --git a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/RECORD b/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/RECORD deleted file mode 100644 index 1a4964b..0000000 --- a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/RECORD +++ /dev/null @@ -1,502 +0,0 @@ -../../../include/site/python3.7/pygame/_camera.h,sha256=4dX0hz1SNd_5xZn0kuwHw6-3pGxEOuj1XcENVbAeucg,840 -../../../include/site/python3.7/pygame/_pygame.h,sha256=OQ9soShwz-VZtechnxP3TfBV1VM58l_u4w3p0D8Vu5c,28409 -../../../include/site/python3.7/pygame/_surface.h,sha256=G6ICVNMqd3DqzBtTh1BxFxW1bs2fPrO__vcBUQtbwRM,958 -../../../include/site/python3.7/pygame/bitmask.h,sha256=7FJpaFGWXPa581C333b1ObFfQkfc6i-bW1In5qjxwvg,4777 -../../../include/site/python3.7/pygame/camera.h,sha256=jwdMpuVS1iAhbNgIjXvCcL-nolqbLdnwNlbrCjKfn24,6957 -../../../include/site/python3.7/pygame/fastevents.h,sha256=OKgCTiH8K4ud2orTBJXeF5w3Mo0_ImbgRW5f5UTnCqY,1643 -../../../include/site/python3.7/pygame/font.h,sha256=97S2JuIstaUlyTmmuEGGr_Mg33EhvDJZ5nlTTbU_6O0,1836 -../../../include/site/python3.7/pygame/freetype.h,sha256=9OyIUWvVYfqIUZRNdTQpvVzdH2W7x21yjItkdc7GQZo,4175 -../../../include/site/python3.7/pygame/mask.h,sha256=pKCMpOLRBF4KxnF-c9shrUPFbLbr6-LqDlEXHm_IwvM,620 -../../../include/site/python3.7/pygame/mixer.h,sha256=hr5NeLohLT_ljX6FceMNK_10ASrIKhIyASK2YVtvupo,2258 -../../../include/site/python3.7/pygame/palette.h,sha256=dzARYIsQdHAaV8ypCrQbYRWFisXYXABD4ToMlzYKojg,7057 -../../../include/site/python3.7/pygame/pgarrinter.h,sha256=alsw7p6X7ukOB1o3curyrjWOcGHgVCQgCvS1D9FtiRc,1060 -../../../include/site/python3.7/pygame/pgbufferproxy.h,sha256=hcAe-mipMYC4hNL9xIYVBwg4Aot4w1qpit84Zmkmnqw,1961 -../../../include/site/python3.7/pygame/pgcompat.h,sha256=HFUiPlkczXkP3Bjpvz60iW9JiD6LKZ8L4qt2DL5KfcM,5927 -../../../include/site/python3.7/pygame/pgopengl.h,sha256=hbNYtcJU3jxmvubD890dhSSE2KjzISXT76PJcguKV04,379 -../../../include/site/python3.7/pygame/pygame.h,sha256=90tX-gvapUFv077c3Z22x8r17DB0bmMYfQ7aduxOKHc,1246 -../../../include/site/python3.7/pygame/scrap.h,sha256=MhdSrMflRdBNnwemNm1Os5kgQ-tU9rGk5yN3qoMJza4,4594 -../../../include/site/python3.7/pygame/surface.h,sha256=gbtI4NuqYrVty0iJLXgPwc_7tQjtRy1FJgHQ-bCeFuI,14554 -pygame-1.9.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pygame-1.9.6.dist-info/METADATA,sha256=QkfSJHGdvxbVNWr5ZSCH-DF_HJZhmXey3HDoc2yhXWc,444 -pygame-1.9.6.dist-info/RECORD,, -pygame-1.9.6.dist-info/WHEEL,sha256=AhV6RMqZ2IDfreRJKo44QWYxYeP-0Jr0bezzBLQ1eog,109 -pygame-1.9.6.dist-info/top_level.txt,sha256=ABXdFGIAE2g9m2VOzQPaLa917r6XEu6d96RqIzvAWCs,7 -pygame/.libs/libFLAC-6cefaab3.so.8.3.0,sha256=y9mXCaQ_HkMVYWs3Odv6nGfSo-CdmK3iVEUw1qMhciU,1724904 -pygame/.libs/libSDL-1-c39928c2.2.so.0.11.4,sha256=NhCisosVpG-gOyVrVwbh7Yp5RZUmDpXYH1FdoduytPI,2582880 -pygame/.libs/libSDL_image-1-c6cf9ff4.2.so.0.8.4,sha256=c1_0AiGboBNGA3RqQB8jlY0OJvpewdcrFYfXRtrYfSE,280640 -pygame/.libs/libSDL_mixer-1-9fea1499.2.so.0.12.0,sha256=M8qlY0Nw9MGU-RrSdw9U6QW0ORHue-Z6KJey3e5JpRY,712352 -pygame/.libs/libSDL_ttf-2-254ab10f.0.so.0.10.1,sha256=HavJvhP126fd0MRRuiwj4ii2JVfHCGfsyjnqa0U3S_U,128704 -pygame/.libs/libasound-87b0bd31.so.2.0.0,sha256=IVRYWcReXPnG7s0QEoYrJc1WuRUGofxp47NZWpenky8,5163104 -pygame/.libs/libcap-5fec18ee.so.1.10,sha256=YYaDzqhzH--qj7GDiVlL638bhFpbZQK5nQsYQPo5I1c,17000 -pygame/.libs/libdbus-1-30e56626.so.3.4.0,sha256=HI-2ay2XJhVjtzkHKOAlBlQcHjdJH2DiH6oCfjyDQKc,260624 -pygame/.libs/libfluidsynth-e2f12a37.so.1.5.2,sha256=zdD6G_zyiG-oh44SAH8H43Vc-_-t4ENbtWa70Y9fa5Q,1723552 -pygame/.libs/libfreetype-b48c0f70.so.6.15.0,sha256=kExCrLkYGDryRbN1rQOZ2u6DAVpyZ1yY1-Z_0FDz5Fo,3465904 -pygame/.libs/libjpeg-bd53fca1.so.62.0.0,sha256=95OjRF5_IIkcEcsOJu3tuMkd98-VX5tJL2TwAclQYHM,142552 -pygame/.libs/libmikmod-fabcac29.so.2.0.4,sha256=Imh5r2HHV9k240JFoZXHyrANiyfJtSlX8bDQO6swDqI,308648 -pygame/.libs/libogg-0d584846.so.0.8.3,sha256=Sjg58Y8RG30TVqN9hJ5HKAz6jTULNJ6wgmr_-CdTJMI,75872 -pygame/.libs/libpng16-e11200e8.so.16.36.0,sha256=fN3uYznr8BDQ_R8kicBUdVx2IZ79JJZi4SAKqg9hFrk,959776 -pygame/.libs/libportmidi-7fcf6c23.so,sha256=FovmpEeh0niUlMIH8Y_ZYAsevY2L0oDeWpHU5AHUD88,59984 -pygame/.libs/libsmpeg-0-7159ee15.4.so.0.1.4,sha256=15CRlz_H9JGsUnzLCuuvbkGk76h--9qqPdFx8Z-Zzq0,893440 -pygame/.libs/libsndfile-d5ecee11.so.1.0.17,sha256=a0giJ9M0QsnP43kmh73JaoZ7zvAYnrWekshPEZuqSNs,328920 -pygame/.libs/libtiff-97e44e95.so.3.8.2,sha256=UYf_bWKDJiOWTjkGbrm3h3B4GuPIrPCAh6Y5trgFunM,389776 -pygame/.libs/libvorbis-373f4a71.so.0.4.8,sha256=THGb3fm04BHuCW88L_TbJ6IS4ZWfOO6UZrPgFF1CPhg,236240 -pygame/.libs/libvorbisfile-f207f3a6.so.3.3.7,sha256=EeTATCY87BIaQJlw7rCTZL7jAMSKHpbhMlnkrWZ0PIw,59024 -pygame/.libs/libwebp-ec096d5f.so.7.0.4,sha256=8YKk5tUE-LXMy-LRfkNIOD0RwIrdjdyf_vstdnjTADY,3631832 -pygame/.libs/libz-a147dcb0.so.1.2.3,sha256=1IGoOjRpujOMRn7cZ29ERtAxBt6SxTUlRLBkSqa_lsk,87848 -pygame/__init__.py,sha256=eowEkDP_hoyHbEdkVkDlV1d1SOsSSYx7rmWYseNWiOQ,11582 -pygame/__pycache__/__init__.cpython-37.pyc,, -pygame/__pycache__/_camera_opencv_highgui.cpython-37.pyc,, -pygame/__pycache__/_camera_vidcapture.cpython-37.pyc,, -pygame/__pycache__/_dummybackend.cpython-37.pyc,, -pygame/__pycache__/_numpysndarray.cpython-37.pyc,, -pygame/__pycache__/_numpysurfarray.cpython-37.pyc,, -pygame/__pycache__/camera.cpython-37.pyc,, -pygame/__pycache__/colordict.cpython-37.pyc,, -pygame/__pycache__/compat.cpython-37.pyc,, -pygame/__pycache__/cursors.cpython-37.pyc,, -pygame/__pycache__/draw_py.cpython-37.pyc,, -pygame/__pycache__/freetype.cpython-37.pyc,, -pygame/__pycache__/ftfont.cpython-37.pyc,, -pygame/__pycache__/locals.cpython-37.pyc,, -pygame/__pycache__/macosx.cpython-37.pyc,, -pygame/__pycache__/midi.cpython-37.pyc,, -pygame/__pycache__/pkgdata.cpython-37.pyc,, -pygame/__pycache__/sndarray.cpython-37.pyc,, -pygame/__pycache__/sprite.cpython-37.pyc,, -pygame/__pycache__/surfarray.cpython-37.pyc,, -pygame/__pycache__/sysfont.cpython-37.pyc,, -pygame/__pycache__/version.cpython-37.pyc,, -pygame/_camera.cpython-37m-x86_64-linux-gnu.so,sha256=xAtjL92eQ_NTlYPEgMPevYiUPrfvWFVknMfZLs-putc,202768 -pygame/_camera_opencv_highgui.py,sha256=0BxVKRNVVKV6De4Fj7bjHY6xZgFPD7DG43X8sJ7YC9o,2230 -pygame/_camera_vidcapture.py,sha256=3MSVsbGNi7jRLR9o4MwkL7Kqwzfrb4PY4Vobl_IDjIU,3739 -pygame/_dummybackend.py,sha256=cIt88kBhPzgset-VkJ1D3bG10pvlXLevpqRgrHhbeuo,770 -pygame/_freetype.cpython-37m-x86_64-linux-gnu.so,sha256=M1js0aWPyoY5mJIbhRW6F022huZthYVCLf4o_SWUXco,433408 -pygame/_numpysndarray.py,sha256=58IIFmOiKGbYSt8yiKSGGGztcmg3aJKcPlqhCMvTYwY,2616 -pygame/_numpysurfarray.py,sha256=OYIpTY3mCz87c1IF-IXiLrTmeUmDKnrpj2q84ssW5d0,12999 -pygame/base.cpython-37m-x86_64-linux-gnu.so,sha256=k91rqbUEANXQvU_ODraZisGVJt1OnDYYsAhLhk7p51c,120456 -pygame/bufferproxy.cpython-37m-x86_64-linux-gnu.so,sha256=d5yj39Tk0CXGVkI2dVadYw4h5EzOESN9E1eUoVmQ6hQ,62456 -pygame/camera.py,sha256=Yop3_-IWJzFaJgAv-NiAlLKpLkK8guAEbpGVhdQ4IdI,2871 -pygame/cdrom.cpython-37m-x86_64-linux-gnu.so,sha256=UBTnDHFAH0VwRE_7CMzILbg6O1CaGZYxgNezfllOJ9I,66480 -pygame/color.cpython-37m-x86_64-linux-gnu.so,sha256=veEpqPmqhBe2PtD2NAbNCVOsoUidLNMZB9TT9CDoxps,132344 -pygame/colordict.py,sha256=3Xu50TCCxRdi-ue8Vp9YBGQpmKOsv1rU9bOxf3kpkR4,24175 -pygame/compat.py,sha256=ybSN_4HbNWvDJ6quCTklD1hATz8LyN0bIeRzN2yTqD8,3113 -pygame/constants.cpython-37m-x86_64-linux-gnu.so,sha256=bgk4DiKiZLrllBNKEjv5GZDujaMgIh1oqHTKJC6N6ws,102832 -pygame/cursors.py,sha256=haMvZ4N2kCrHrmFXY161Q_tDt8zVaKFW_JNjji13n-0,10043 -pygame/display.cpython-37m-x86_64-linux-gnu.so,sha256=s_5589xZqctlSEzesC_RGhcRHu7YUcZf3UEC60Vi0l4,108024 -pygame/docs/__init__.py,sha256=29vwukb_6yJPWEe66QZQynt10HvgrYBmGP2vWm7Nt_w,287 -pygame/docs/__main__.py,sha256=5r8cR_y261Kf_QDTxmw9hCdFaVBGexXV5-OrQLbSNac,750 -pygame/docs/__pycache__/__init__.cpython-37.pyc,, -pygame/docs/__pycache__/__main__.cpython-37.pyc,, -pygame/docs/logos.html,sha256=Av4WDVwgX0xfhNJ-GJ2hFARHmZiDi_yv0WGk8FKzvJI,1604 -pygame/docs/pygame_logo.gif,sha256=XTZuyss-niaQhtGw78vP9MQmqIH_yLgq6cJqtb8hRtI,25116 -pygame/docs/pygame_powered.gif,sha256=JOcurZ9ApwPx420aiIOPFgoKFHkhpQKbll0w9-qhvnY,10171 -pygame/docs/pygame_small.gif,sha256=OT5k5n6OnoatNH9HfWETFx88Cr3PrUzkZyZritBviOM,10286 -pygame/docs/pygame_tiny.gif,sha256=vx7ERhvSpj51wp_qu-jISAgQr8E6vxK4_3I58-oSRm0,5485 -pygame/docs/ref/docscomments.json,sha256=MN3tMIlMeCG4G3qA3J52ZuR_vkf535KlPuRot9pJu8s,445603 -pygame/draw.cpython-37m-x86_64-linux-gnu.so,sha256=G_AVjMKmlQI0STZQ8_NtRVQnvSdEsWsMG0ImESVZf8M,152312 -pygame/draw_py.py,sha256=fQooMsO259L-Z56ZTJXliaxyVBPKLEUsrEsIrrmFBAM,17943 -pygame/event.cpython-37m-x86_64-linux-gnu.so,sha256=48YL-WH37nBhiV7TfA-sY5TrJEhG7hwZd-w93jlesAY,111944 -pygame/examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pygame/examples/__pycache__/__init__.cpython-37.pyc,, -pygame/examples/__pycache__/aacircle.cpython-37.pyc,, -pygame/examples/__pycache__/aliens.cpython-37.pyc,, -pygame/examples/__pycache__/arraydemo.cpython-37.pyc,, -pygame/examples/__pycache__/audiocapture.cpython-37.pyc,, -pygame/examples/__pycache__/blend_fill.cpython-37.pyc,, -pygame/examples/__pycache__/blit_blends.cpython-37.pyc,, -pygame/examples/__pycache__/camera.cpython-37.pyc,, -pygame/examples/__pycache__/chimp.cpython-37.pyc,, -pygame/examples/__pycache__/cursors.cpython-37.pyc,, -pygame/examples/__pycache__/dropevent.cpython-37.pyc,, -pygame/examples/__pycache__/eventlist.cpython-37.pyc,, -pygame/examples/__pycache__/fastevents.cpython-37.pyc,, -pygame/examples/__pycache__/fonty.cpython-37.pyc,, -pygame/examples/__pycache__/freetype_misc.cpython-37.pyc,, -pygame/examples/__pycache__/glcube.cpython-37.pyc,, -pygame/examples/__pycache__/headless_no_windows_needed.cpython-37.pyc,, -pygame/examples/__pycache__/liquid.cpython-37.pyc,, -pygame/examples/__pycache__/mask.cpython-37.pyc,, -pygame/examples/__pycache__/midi.cpython-37.pyc,, -pygame/examples/__pycache__/moveit.cpython-37.pyc,, -pygame/examples/__pycache__/oldalien.cpython-37.pyc,, -pygame/examples/__pycache__/overlay.cpython-37.pyc,, -pygame/examples/__pycache__/pixelarray.cpython-37.pyc,, -pygame/examples/__pycache__/playmus.cpython-37.pyc,, -pygame/examples/__pycache__/prevent_display_stretching.cpython-37.pyc,, -pygame/examples/__pycache__/scaletest.cpython-37.pyc,, -pygame/examples/__pycache__/scrap_clipboard.cpython-37.pyc,, -pygame/examples/__pycache__/scroll.cpython-37.pyc,, -pygame/examples/__pycache__/sound.cpython-37.pyc,, -pygame/examples/__pycache__/sound_array_demos.cpython-37.pyc,, -pygame/examples/__pycache__/stars.cpython-37.pyc,, -pygame/examples/__pycache__/testsprite.cpython-37.pyc,, -pygame/examples/__pycache__/textinput.cpython-37.pyc,, -pygame/examples/__pycache__/vgrade.cpython-37.pyc,, -pygame/examples/__pycache__/video.cpython-37.pyc,, -pygame/examples/aacircle.py,sha256=mVTq3082ajYV7-uiYGCdjKldNPSZIh1y98_sqFDJ3g8,909 -pygame/examples/aliens.py,sha256=df5swn9ApFlOoq3Glbo9V7mnUmLCixwc9bgmnl8txHo,10720 -pygame/examples/arraydemo.py,sha256=UaIWsusqmEhKg9l520pupgwhGlXsN_Vz2Hk8y_Mdif8,3721 -pygame/examples/audiocapture.py,sha256=N5lSw4TV0GNVPW4rYs3Zz-W_o-zS_l4VJDXP4IvYhRQ,1275 -pygame/examples/blend_fill.py,sha256=DQ6UsV6qwK2PwuFuDhdTnlUBLddOwsD_loGG-xGi1iY,3105 -pygame/examples/blit_blends.py,sha256=roYwEUM6atE8mxF0Z_FHJlvVZ61LZVgg2rMjifmUvf0,5908 -pygame/examples/camera.py,sha256=UyKAwSHwV9_GxMLym4FO6O8WfyNuiy5xb2u-ewtZwl4,2756 -pygame/examples/chimp.py,sha256=1KllbxPCeLMP3GhqUxPcpXBTQVmSwxmbVTqyJWxVKwE,6016 -pygame/examples/cursors.py,sha256=kDG9Fv-J-5z7jFmrpCNVojkaihgTQZSyPO5GOhD8qiA,3062 -pygame/examples/data/alien1.gif,sha256=8Wveo1zpLVaFCtYITm_SoYqjy8L-TDuaZOcNa8Osqsw,3826 -pygame/examples/data/alien1.jpg,sha256=HOjXjmW4Ofsu_en9WNrkuIp_DCwupXcFB0Yt_cqV9rA,3103 -pygame/examples/data/alien1.png,sha256=femzLssV7oGvT3S2tyviyq7qO32QfhBDtMOR3ENBCLs,3522 -pygame/examples/data/alien2.gif,sha256=0MPpVYzvjAECy0pd7YRFKCEzzIYDKEJt70rbjlLbTZM,3834 -pygame/examples/data/alien2.png,sha256=FKGYDI2FBBR1Z56BLn357PNfh3-M38gAJpSQL8BpKYY,3526 -pygame/examples/data/alien3.gif,sha256=bFCRGZOQPaadCKIc-tlqoUjHdsi5IzR0E-2SjpPEvmA,3829 -pygame/examples/data/alien3.png,sha256=a51Tb9E4IvoICGzQChHq51RKVQJLf1GOCEeqA5yYfnk,3518 -pygame/examples/data/arraydemo.bmp,sha256=xM4-n_hRCQFZlfwwdTK6eaBweycUc863TgSFbWp3dbA,76854 -pygame/examples/data/asprite.bmp,sha256=97XMpKq9lLpMuv8UveCf8UJEAxheBhPUjHfMRQBkUx4,578 -pygame/examples/data/background.gif,sha256=-3kZwt99MFUBbBo-kHvPZXVlFrSB34XVNQWWxfHb970,9133 -pygame/examples/data/blue.mpg,sha256=XDj1CRPt1MWxspCfA3oqb822nlZgQ7CyyEuVJwlgmpg,6144 -pygame/examples/data/bomb.gif,sha256=T4VCSOht8tpisgV5rIQnBCPs7vtSzAZBJF7SZ_L6JQM,1162 -pygame/examples/data/boom.wav,sha256=kfoWs0VVDGHv0JSa46nXZBGyw70-jpfPq_B31qNA_F8,12562 -pygame/examples/data/brick.png,sha256=K_mshK0aL81nzOjAorTXyPps6n9mvofLeOWFXFpVjYA,170 -pygame/examples/data/car_door.wav,sha256=TwYWVqme5NqVVID1N4es92RSKEdTYkxbNx6dNamK-_4,3910 -pygame/examples/data/chimp.bmp,sha256=PS9dLh1kfgnmba5lQiKyEQIBi8k-R1kvJ832SNudj1A,5498 -pygame/examples/data/city.png,sha256=c0Nu2o7x7QmvGMDmDCaPnhvJ8tPNuguKKpI_Z-NfQ40,143 -pygame/examples/data/danger.gif,sha256=m0CBKalFbkqlohgOmrwkwVOfqBhRWonb7xm1pzbDy2Q,2761 -pygame/examples/data/explosion1.gif,sha256=WYcdwbZqmYdaaaPYFiR5vka0Anp4F4nnNlpSSx_1xug,6513 -pygame/examples/data/fist.bmp,sha256=Nze8jhiCNl9wLgNYtVtRBqUGYqbn4-frAZaSVYXm_TE,4378 -pygame/examples/data/house_lo.mp3,sha256=R0nZUXymMp_XLPU8S1yvsiVeWT6MKLt5Rjp-WSnVrLQ,116320 -pygame/examples/data/house_lo.ogg,sha256=64FiQ1Zjq-cOj6Bmya_v3ZjEWmBaGZlTl19udKaz6sU,31334 -pygame/examples/data/house_lo.wav,sha256=B1BwfFaPIsSxaash-igVI_YE9SQd1BCXRTnSAKsNunY,78464 -pygame/examples/data/liquid.bmp,sha256=qtzPXhq0dr2ORNCCZ6gY2loT2Tsu0Dx5YvXB548I1Xg,11734 -pygame/examples/data/midikeys.png,sha256=9HCCmMHvlubR6G9a0jMv1C-AKeBzYfb5jjNhol2Mdqw,19666 -pygame/examples/data/oldplayer.gif,sha256=NWEhmaE5FUe0J-uCF8fr-XUAnoaqWa0SicoMQUBFYUg,1075 -pygame/examples/data/player1.gif,sha256=3ZTVWGxnedKqtf3R-X1omPC0Y8jUSPGgHBAzeGhnV4c,3470 -pygame/examples/data/punch.wav,sha256=A0F1xT8aIZ6aNI_5McMqLygb1EfmdIzPi4kWkU4EwQc,4176 -pygame/examples/data/sans.ttf,sha256=nrZ6FRet4dwlvA7xOReYCP2QwyGebk0iVJaSFbtpOhM,133088 -pygame/examples/data/secosmic_lo.wav,sha256=-EIFkzj7k5qEqG04n7mnUGUp1SsyCJ4n08TzPT600DY,18700 -pygame/examples/data/shot.gif,sha256=bF2eY629zQzvDu83AKpveSFhJq5G4QpOE98A0tvbPFI,129 -pygame/examples/data/static.png,sha256=Xe4wN80awt7nTNiLemoSNTEKlAbGFW7djNETP8IleNs,1202 -pygame/examples/data/whiff.wav,sha256=FMWM3XnYtce6mHFXQCYPgzT-xu-Q4DJybZfpPjG8cpE,5850 -pygame/examples/data/yuv_1.pgm,sha256=WGXoVZ0O-c6DTX9ALLoy-y4LFeOEul-W1PqFjBXGL20,649743 -pygame/examples/dropevent.py,sha256=EnvfZWDNuFYF5lHihTD5Pe3BlwnvcAmJFFuVVj4qSDE,2112 -pygame/examples/eventlist.py,sha256=6Xjeb9KTdhfeGbxy3slam4iCbwHGlmLCKZeiw2gJmW4,3677 -pygame/examples/fastevents.py,sha256=5AI_PBXwXnZlqG-shmkzEdWBBtnh30CWcyS7SDkVKkk,2890 -pygame/examples/fonty.py,sha256=YMFsysZUCdoUVhr0WYi1jm_WTF3QzzbNB2vvuq3Re5k,2539 -pygame/examples/freetype_misc.py,sha256=LtIEvxRJFwwldAYy-otKQkwa-mm4yAMwhXhewUEN2QE,3543 -pygame/examples/glcube.py,sha256=Q7XdacBhYgOTVdRy5LVKqNqAB1vm5LVYjPp5aZ9FoA8,3404 -pygame/examples/headless_no_windows_needed.py,sha256=iT4Ifv-CRP71ebyd7oYIGjXyDniFCu3W49Df8SlJMLY,1333 -pygame/examples/liquid.py,sha256=pi2Wc_fwtb4StxiAuGqCV99a-Kp39mwPU6GCptaPSvA,2519 -pygame/examples/macosx/__pycache__/macfont.cpython-37.pyc,, -pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/JavaCompiling.plist,sha256=xhIgbq92qrjlpg57wb29TGJP0m0shOPN46mZMaumQOM,278 -pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/_MainMenu_EOArchive_English.java,sha256=u16hPcsJQdm1ylRbrg2zn4DBwEEIALLAj53CkzcykHk,5793 -pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/classes.nib,sha256=5bgACAlsCG2ezXYqAOAKtoBXAj-0yU7JrIby-Fuj4Dc,306 -pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/info.nib,sha256=QZyXxX5PBZnb_A-_-Xd2MFX1YBHdJJecqKF4T0N1BJg,566 -pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/keyedobjects.nib,sha256=8IiTBRH77Z1V-WRSL1nYA_dQ5eAGbTRqZ6H6JUYCFBg,9442 -pygame/examples/macosx/aliens_app_example/English.lproj/aliens.icns,sha256=DuKpW8ACs9WR98NVge-Rda6bHyYhqGFN2EXlH-WmgBY,7236 -pygame/examples/macosx/aliens_app_example/README.txt,sha256=i6ixQdcSofTjmtIY5OKVdkHQ8TvyHjdcMroWzFpIffE,432 -pygame/examples/macosx/aliens_app_example/__pycache__/aliens.cpython-37.pyc,, -pygame/examples/macosx/aliens_app_example/__pycache__/setup.cpython-37.pyc,, -pygame/examples/macosx/aliens_app_example/aliens.py,sha256=e6wjvgHqDicdXHUCXp3zhaR1tjNdLyyzd-6vfJRv8Pc,9634 -pygame/examples/macosx/aliens_app_example/setup.py,sha256=dvNmUM1MRKe6LZvBnoM4Zhe8vLRdce1QcSJ2Zv-cE5w,578 -pygame/examples/macosx/macfont.py,sha256=El5xehQNCjZub57OJmRUmfJQrcaEzUND12lvjMor3Bk,4486 -pygame/examples/mask.py,sha256=ERdhkGFu6iPk5L-coC42vzeaK32VlCUcY0R1uVTa6Zw,5555 -pygame/examples/midi.py,sha256=djuCoTzwhcxWmtQ0PYcq7nfKCnHES8wKviIL4hq52iw,29990 -pygame/examples/moveit.py,sha256=xc2Sug_XjmAEsdyrBGu5sr3DBM0tO8_Z_i3MYNUo_PQ,1837 -pygame/examples/oldalien.py,sha256=mGla5IFZqIQmL3YQ7kNw4f7O9b83_XmcWeoSkh8vToM,6714 -pygame/examples/overlay.py,sha256=CLqqCwwOxIFKKZDX__7iZ7hH8rQ-tPr5BF0UERcdwkI,1397 -pygame/examples/pixelarray.py,sha256=WFoqKpM73S1CylX_2Tr15zvfnUoqnruQzwxV8UCEJ38,3318 -pygame/examples/playmus.py,sha256=lsOaUiCFjn_H0OOUhFK3gqhOBM_Kfyd22V6J6Vj56Ic,4564 -pygame/examples/prevent_display_stretching.py,sha256=0SrxOL-zECoN2ZPhYqH85d78u_-V_M8FG_FlWCDajtY,2741 -pygame/examples/scaletest.py,sha256=dRx3I3cO3uBrdrxrUEqKCjYLPyWlms70ZSUeU3bH3Lk,4689 -pygame/examples/scrap_clipboard.py,sha256=t1Ofdo-FaSsNt5TmrnDD6nZpgB6PN6mDNThDYrvEVY4,2926 -pygame/examples/scroll.py,sha256=yG4OCUDI4eZHXHR5viINt5d7qXRpcCVLSTVnG_SLYX0,6792 -pygame/examples/sound.py,sha256=ncjK2E85bLTk6IhIlBDKKa1hw_NCi2zHMk3nKPwqrrw,1359 -pygame/examples/sound_array_demos.py,sha256=5HsxrBo0uWXUHGOsfUXOxsCo3vQVjmx3c6vCrTxq1bA,6598 -pygame/examples/stars.py,sha256=M6_Q3pD9DgPVNfF0EtTsVet3m09ny-HlQvw1o5F5wf0,2488 -pygame/examples/testsprite.py,sha256=ObeWnWbPnM8D3W3tnhJFfGn1nEcYYfXzgSkHgjGLMuM,6910 -pygame/examples/textinput.py,sha256=JVRlppW4pCGQ_vYNb3Y3VAeusB0fLcOPko4F-OibuBs,5346 -pygame/examples/vgrade.py,sha256=ImMjT5H7_3JsziaxPFRLoLQRIk5H_0LzW0spxLTBEeI,3320 -pygame/examples/video.py,sha256=QP1Z2h1u99YFJsYOxw_5WugiBSuw23Z8qXk7huNjBDw,2814 -pygame/fastevent.cpython-37m-x86_64-linux-gnu.so,sha256=GEowsUjeHvoaL3TBNrineuAygGL48QWbNb0Wf6iVLWc,62496 -pygame/font.cpython-37m-x86_64-linux-gnu.so,sha256=zL7SXN3uYOKFKQV8XAF8ZjTskAg6_1oDjhzlj8TD1g8,93448 -pygame/freesansbold.ttf,sha256=v5JRJp8R5LNVgqmTdglt7uPQxJc6RZy9l7C-vAH0QK0,98600 -pygame/freetype.py,sha256=jIO8euGMhCUIzpTsRSZQ0BSEyLGiA24IYkuicmOAWTM,1814 -pygame/ftfont.py,sha256=vHzoGq-lEswO_AU9G23bzsKg6m6g-dJzXokhszW4z-I,6239 -pygame/gfxdraw.cpython-37m-x86_64-linux-gnu.so,sha256=0lcobRPsc2beUoHM03axJ9cGtOTQY-Mn5QTVr1UQT14,350824 -pygame/image.cpython-37m-x86_64-linux-gnu.so,sha256=81LCARBAAx5BfZj7Bbsb6cD8adz6FZDsBgROHGh6FkM,103608 -pygame/imageext.cpython-37m-x86_64-linux-gnu.so,sha256=VzlKxsIQESSyElkYV5uLdxfSnBMwhuM2mBzXJLOVF4U,95616 -pygame/joystick.cpython-37m-x86_64-linux-gnu.so,sha256=-VM60sh65IySdmFo--nnd_Z0OfBhi2J61cZX8YbxzNs,54304 -pygame/key.cpython-37m-x86_64-linux-gnu.so,sha256=zwW9u_h1b1_jKmcw4pVpksn66b0EDPjcbGqL5RyuzAo,41696 -pygame/locals.py,sha256=8ZvWyqOqFiaPT5y7ru0_PHeCp15u_ecMqlRqNaNJ_As,1102 -pygame/macosx.py,sha256=H9QEz8HaEG__660Nz2mndCPpQHLEjf6jNulFy_3xQXk,769 -pygame/mask.cpython-37m-x86_64-linux-gnu.so,sha256=jjI7nPvnhidP5T99YicChKOqyP6ZDJilqZBaV2SdHac,156984 -pygame/math.cpython-37m-x86_64-linux-gnu.so,sha256=n4n3C_3AGAUl87zwxgeeaPc0TcuJmoiQjeu92o844K8,259416 -pygame/midi.py,sha256=ezAnHYQ2szG9o3C_1N6kyzax9fnhz6CKiqlk27dormM,23836 -pygame/mixer.cpython-37m-x86_64-linux-gnu.so,sha256=VJiVQDg2TRWFs6Wr90MP4VcljynZqiMrbEbI2-6CxzI,145168 -pygame/mixer_music.cpython-37m-x86_64-linux-gnu.so,sha256=M_GWBAApWOuWeEmA8rzGeeVIBaj_JTC2PkFjPC5J3pg,67832 -pygame/mouse.cpython-37m-x86_64-linux-gnu.so,sha256=SW1tG_aKHT20F9zyvku96v6a4dbFnj0KnPSSuvjZ3-Q,37696 -pygame/newbuffer.cpython-37m-x86_64-linux-gnu.so,sha256=rQYPV6E7rI4MCi1MQ3ecRh38s4weBHrfNrF3q25NY_s,90126 -pygame/overlay.cpython-37m-x86_64-linux-gnu.so,sha256=H9g3tUd837B-gzmBgCcFbt-VBPYJfz2qIX2wvfyFpOY,37664 -pygame/pixelarray.cpython-37m-x86_64-linux-gnu.so,sha256=j_AobUqoLKH4Clp1qFS0hlXvxc1baKYdJYia_MbuIRo,193928 -pygame/pixelcopy.cpython-37m-x86_64-linux-gnu.so,sha256=wjmweCF6PGiQLyAIkKvwqN5k0QqiDA1uRTSwL1OtUuA,86776 -pygame/pkgdata.py,sha256=sN89ew2QZXjAyc1wVlqNSOcPcK4z0cAy9Zmw29qkFoA,2264 -pygame/pygame.ico,sha256=PBF9cw0Ca9Rw2pNmDD3iXvcYYQeI9ZzZ9vxtRLQRoJc,145516 -pygame/pygame_icon.bmp,sha256=Twnby8nv4HMhGka49n-47CPseDvwrSLZ0l1o9U2Bb5s,630 -pygame/pygame_icon.icns,sha256=B3Q59PaET66Br-x3wUFtoymOjJBB9cxEUEA74YE1TL4,53627 -pygame/pygame_icon.svg,sha256=oxge7RESGgP2--7fUmM7HnmD3vadIT5hjDykhVLcIj0,15363 -pygame/pygame_icon.tiff,sha256=cvDqNeR5SckSMyD6a7vNUsVgV7QYXNVWjsmC0BAUX98,61604 -pygame/pypm.cpython-37m-x86_64-linux-gnu.so,sha256=CYY1vhpLsb6rwTkj4C8YpwUBmvqYCMml0iNQ_-lxO5A,448320 -pygame/rect.cpython-37m-x86_64-linux-gnu.so,sha256=o466XakJAmPO__1o9p_rV2nKfmAnuDH4v4QGNfq-k1M,177200 -pygame/rwobject.cpython-37m-x86_64-linux-gnu.so,sha256=_cOG5nERlKxwx54ouOokRZ6o2QkQ8yGkWwEuwMRKZvM,70776 -pygame/scrap.cpython-37m-x86_64-linux-gnu.so,sha256=GwDemX6x_M9j5EvDPow9RQlKhOfiephLJlB2aqlZMk0,95784 -pygame/sndarray.py,sha256=0zkFW14vjzBkA_FprdfwoWxt4UEeu4gOLkWXtLjk7sQ,3413 -pygame/sprite.py,sha256=t8_kbgO15FPjrBBiAxlKM1Y-mlSSv81h_0O2asJQYDM,56082 -pygame/surface.cpython-37m-x86_64-linux-gnu.so,sha256=CUHE_GlnI3zhBXA5Bs6o7W99uGjTmTYTzOq8cedeSEU,1033992 -pygame/surfarray.py,sha256=8FC0ltTJxGrxaZ1RLZ3HGy8ap72ePe99-uSb-zVg50E,10257 -pygame/surflock.cpython-37m-x86_64-linux-gnu.so,sha256=ljyuu-QdT7j-n1ziR8l-ji9-I2UUiKY3gAPjEgngy2g,41688 -pygame/sysfont.py,sha256=kfgHO6VDvDUVzj8tsL4reURbkKK2gs4R4HiIDpkF9qc,13581 -pygame/tests/__init__.py,sha256=dE6IFYHjVPdSDk9jtWne-2yjHXt1ukGOxMP37sBRfgA,1273 -pygame/tests/__main__.py,sha256=HJMNTIIl6i0nAbrghd3sQsVSqcyJleYCmfqt3Rv8ekA,3834 -pygame/tests/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/__pycache__/__main__.cpython-37.pyc,, -pygame/tests/__pycache__/base_test.cpython-37.pyc,, -pygame/tests/__pycache__/blit_test.cpython-37.pyc,, -pygame/tests/__pycache__/bufferproxy_test.cpython-37.pyc,, -pygame/tests/__pycache__/camera_test.cpython-37.pyc,, -pygame/tests/__pycache__/cdrom_tags.cpython-37.pyc,, -pygame/tests/__pycache__/cdrom_test.cpython-37.pyc,, -pygame/tests/__pycache__/color_test.cpython-37.pyc,, -pygame/tests/__pycache__/compat_test.cpython-37.pyc,, -pygame/tests/__pycache__/constants_test.cpython-37.pyc,, -pygame/tests/__pycache__/cursors_test.cpython-37.pyc,, -pygame/tests/__pycache__/display_test.cpython-37.pyc,, -pygame/tests/__pycache__/draw_test.cpython-37.pyc,, -pygame/tests/__pycache__/event_test.cpython-37.pyc,, -pygame/tests/__pycache__/fastevent_tags.cpython-37.pyc,, -pygame/tests/__pycache__/fastevent_test.cpython-37.pyc,, -pygame/tests/__pycache__/font_tags.cpython-37.pyc,, -pygame/tests/__pycache__/font_test.cpython-37.pyc,, -pygame/tests/__pycache__/freetype_tags.cpython-37.pyc,, -pygame/tests/__pycache__/freetype_test.cpython-37.pyc,, -pygame/tests/__pycache__/ftfont_tags.cpython-37.pyc,, -pygame/tests/__pycache__/ftfont_test.cpython-37.pyc,, -pygame/tests/__pycache__/gfxdraw_test.cpython-37.pyc,, -pygame/tests/__pycache__/image__save_gl_surface_test.cpython-37.pyc,, -pygame/tests/__pycache__/image_tags.cpython-37.pyc,, -pygame/tests/__pycache__/image_test.cpython-37.pyc,, -pygame/tests/__pycache__/imageext_tags.cpython-37.pyc,, -pygame/tests/__pycache__/imageext_test.cpython-37.pyc,, -pygame/tests/__pycache__/joystick_test.cpython-37.pyc,, -pygame/tests/__pycache__/key_test.cpython-37.pyc,, -pygame/tests/__pycache__/mask_test.cpython-37.pyc,, -pygame/tests/__pycache__/math_test.cpython-37.pyc,, -pygame/tests/__pycache__/midi_tags.cpython-37.pyc,, -pygame/tests/__pycache__/midi_test.cpython-37.pyc,, -pygame/tests/__pycache__/mixer_music_tags.cpython-37.pyc,, -pygame/tests/__pycache__/mixer_music_test.cpython-37.pyc,, -pygame/tests/__pycache__/mixer_tags.cpython-37.pyc,, -pygame/tests/__pycache__/mixer_test.cpython-37.pyc,, -pygame/tests/__pycache__/mouse_test.cpython-37.pyc,, -pygame/tests/__pycache__/overlay_tags.cpython-37.pyc,, -pygame/tests/__pycache__/overlay_test.cpython-37.pyc,, -pygame/tests/__pycache__/pixelarray_test.cpython-37.pyc,, -pygame/tests/__pycache__/pixelcopy_test.cpython-37.pyc,, -pygame/tests/__pycache__/rect_test.cpython-37.pyc,, -pygame/tests/__pycache__/rwobject_test.cpython-37.pyc,, -pygame/tests/__pycache__/scrap_tags.cpython-37.pyc,, -pygame/tests/__pycache__/scrap_test.cpython-37.pyc,, -pygame/tests/__pycache__/sndarray_tags.cpython-37.pyc,, -pygame/tests/__pycache__/sndarray_test.cpython-37.pyc,, -pygame/tests/__pycache__/sprite_test.cpython-37.pyc,, -pygame/tests/__pycache__/surface_test.cpython-37.pyc,, -pygame/tests/__pycache__/surfarray_tags.cpython-37.pyc,, -pygame/tests/__pycache__/surfarray_test.cpython-37.pyc,, -pygame/tests/__pycache__/surflock_test.cpython-37.pyc,, -pygame/tests/__pycache__/sysfont_test.cpython-37.pyc,, -pygame/tests/__pycache__/test_test_.cpython-37.pyc,, -pygame/tests/__pycache__/threads_test.cpython-37.pyc,, -pygame/tests/__pycache__/time_test.cpython-37.pyc,, -pygame/tests/__pycache__/touch_tags.cpython-37.pyc,, -pygame/tests/__pycache__/touch_test.cpython-37.pyc,, -pygame/tests/__pycache__/transform_test.cpython-37.pyc,, -pygame/tests/__pycache__/version_test.cpython-37.pyc,, -pygame/tests/base_test.py,sha256=VxGkbo89qJfmc53FdXjouXqAoWBhhCC4owFnYebN3LA,24055 -pygame/tests/blit_test.py,sha256=cl0dU599wvF2wwMRoM81rITzp11BpxQhxiP0xVUbG_k,4683 -pygame/tests/bufferproxy_test.py,sha256=VZukxi7C90IQq8mfY32uXPJdLZFENABl7UsiSWZrxlU,16808 -pygame/tests/camera_test.py,sha256=rcfoTOQKD57NaYS7uQ8_IUM45tjH0v-AS9VNrgJ-h6I,129 -pygame/tests/cdrom_tags.py,sha256=Ka5HOPip7wostxjh5nPQCPMhQWZzQtqCiOssl01fXEg,42 -pygame/tests/cdrom_test.py,sha256=aL8xrIPUj-FJzIBVk_VT05A1aaFCoWiJxMLtqn8PSsQ,9976 -pygame/tests/color_test.py,sha256=RMoVPGY4ctfObWfJiMF6IYa4dSFQuJRPSid1hSqVKuE,36611 -pygame/tests/compat_test.py,sha256=cuEgdQEKUgUbRJwn5TWqPfygOC_bbyO6Pv18o8XwDHI,2773 -pygame/tests/constants_test.py,sha256=8aeCXB2yg8QitImPVLjOAm8sPRHF5FaRlW74QVdAis4,1445 -pygame/tests/cursors_test.py,sha256=naD_joc1jPy1ATkDyZgsajb_Ez5oiLbV3C4OjgmGgrg,2491 -pygame/tests/display_test.py,sha256=AzRyb33RuWkpZPjsVHBf9f2tCdS2yo9vh0VE5HKeyDI,16299 -pygame/tests/draw_test.py,sha256=pIYeUxGY5NDLS8mrDX6AIoDjS-jub6JWdxR9SKs4EWA,51526 -pygame/tests/event_test.py,sha256=WJfV5VA-ULlnIKblNVQEYQYjaBNplVA9T4V6bSaoLLw,10698 -pygame/tests/fastevent_tags.py,sha256=m9CAhjZFciPRvWFz1fJu6kGZMNt2FrDloEkA_Wz1POg,14 -pygame/tests/fastevent_test.py,sha256=DjPLD3ygDT_bQBsIdPfz5s0xCa1snsPoiTu1_dLuhkY,5410 -pygame/tests/fixtures/fonts/A_PyGameMono-8.png,sha256=QmhReADwKrzW5RWnG1KHEtZIqpVtwWzhXmydX1su10c,92 -pygame/tests/fixtures/fonts/PyGameMono-18-100dpi.bdf,sha256=nm3okxnfAFtADlp7s2AY43zS49NYg9jq7GVzG2lPhOQ,1947 -pygame/tests/fixtures/fonts/PyGameMono-18-75dpi.bdf,sha256=4kB0uYeEpa3W-ZAomFMpc0hD-h6FnOh2m5IPi6xzfds,1648 -pygame/tests/fixtures/fonts/PyGameMono-8.bdf,sha256=aK0KV-_osDPTPiA1BUCgZHOmufy6J9Vh5pf1IAi0_yg,1365 -pygame/tests/fixtures/fonts/PyGameMono.otf,sha256=_Af4LyMEgKKGa8jDlfik89axhLc3HoS8aG5JHWN5sZw,3128 -pygame/tests/fixtures/fonts/test_fixed.otf,sha256=FWHmFsQUobgtbm370Y5XJv1lAokTreGR5fo4tuw3Who,58464 -pygame/tests/fixtures/fonts/test_sans.ttf,sha256=nrZ6FRet4dwlvA7xOReYCP2QwyGebk0iVJaSFbtpOhM,133088 -pygame/tests/fixtures/fonts/u13079_PyGameMono-8.png,sha256=x_D28PW8aKed8ZHBK6AISEZ9vlEV76Whi770ItTuFVU,89 -pygame/tests/fixtures/xbm_cursors/white_sizing.xbm,sha256=VLAS1A417T-Vg6GMsmicUCYpOhvGsrgJJYUvdFYYteY,366 -pygame/tests/fixtures/xbm_cursors/white_sizing_mask.xbm,sha256=CKQeiOtlFoJdAts83UmTEeVk-3pxgJ9Wu2QJaCjzAQM,391 -pygame/tests/font_tags.py,sha256=m9CAhjZFciPRvWFz1fJu6kGZMNt2FrDloEkA_Wz1POg,14 -pygame/tests/font_test.py,sha256=-57BrYOw5XrqvkbnPZpEhUK0mCnZWsAqg3gEYXX24e4,19716 -pygame/tests/freetype_tags.py,sha256=k8pF6wXOxo6geb3zWRLafKemsFBJsaTbgLbGBdZHl5w,183 -pygame/tests/freetype_test.py,sha256=_2SFZTVVZ0LFtRLVPd-Q9BzjDa0D9RtpCxfNM3mMrrs,60454 -pygame/tests/ftfont_tags.py,sha256=QoCzxaa4QXJEtHTVWXauabL2GhNx0ZDi_cbkN2XeUHY,181 -pygame/tests/ftfont_test.py,sha256=c2adgJJmf3ppWLH6ASRBHymm7RMamtQifSeQyfZfTyU,534 -pygame/tests/gfxdraw_test.py,sha256=3CePp-GpTqkfiALwSiiCeI3LN_INJcDYSYf4-999N9Y,34365 -pygame/tests/image__save_gl_surface_test.py,sha256=Kcn8RGVkMSu7WUqMdpIgEQkZIBv2QLm0Gmx069AT6ZQ,1201 -pygame/tests/image_tags.py,sha256=Neq0in62AlLpXmGuEgu3X8-DLQYMaZfoHFyIcyxHe6I,132 -pygame/tests/image_test.py,sha256=wdZZ4i3tFMgq0d-blos7OSz0Ydh6EmX3HYGGzRhZD3Y,20747 -pygame/tests/imageext_tags.py,sha256=CR9cI4WIVu0b_GuPqczNqFk-t-hcWATTaOWSnAOIAEs,140 -pygame/tests/imageext_test.py,sha256=h8GgWEdcLuxAXPbRXgqY0AGTVS5aJfcBud_JBc_JcTs,2956 -pygame/tests/joystick_test.py,sha256=fvRuB3uaiewWcmbrg4W2U3tlKlxArWmMjTtCo0OoNKk,3254 -pygame/tests/key_test.py,sha256=uv8nnTloAdKfJ5uPeaL0oVUHbG2H48Pi9QN0zqMm8GY,2099 -pygame/tests/mask_test.py,sha256=3YW-64LEmbPYLt1j8sbXHJzcUnwdAVW0wLRPBajRT2A,75723 -pygame/tests/math_test.py,sha256=-re2ESSFIrR4yC0TX5BejVE6D5sJ24D3nnjC03fsxag,75473 -pygame/tests/midi_tags.py,sha256=kh-RgQubZpoDpC0fLslJZOOgKCuyANiJKWFPcuUXakA,27 -pygame/tests/midi_test.py,sha256=MC178DaLkqxHdtMSj_2hYN-JiYGX0nrw727gWalDFdI,13053 -pygame/tests/mixer_music_tags.py,sha256=tv3QqQFSCb4m5Ej67KVNbM0CneqEwp1cqdinoPD-wC0,138 -pygame/tests/mixer_music_test.py,sha256=B8F7V3Bf4FLQJOru1CrKd-eXTEq2JQXTcbZxsTO3OOQ,8371 -pygame/tests/mixer_tags.py,sha256=OxorCxCHpu5TIVo70M_VfQXlPU9ksfbpFE5giS-zb5I,132 -pygame/tests/mixer_test.py,sha256=pHAI2Msqv_I_nWbxgIe5A0Qy23CpJ-XpCXWCrUSA-U4,38752 -pygame/tests/mouse_test.py,sha256=ZhHxTcAyIuzDTk71EAo2g5F-u_Og9WV8dH3fsPltMtc,5607 -pygame/tests/overlay_tags.py,sha256=o4vxnSAG_uhNaj7lKUmbzpdwqldhCX9mbjOjtdsJLbc,66 -pygame/tests/overlay_test.py,sha256=jVkFbxEl94DfrMr0CX5eqLL6-cpJR0FLt4RWI1ItJ0A,943 -pygame/tests/pixelarray_test.py,sha256=HP1jLfIhnACqILGny28c5PtNW9Non4_qjf_L2IYa3bQ,56189 -pygame/tests/pixelcopy_test.py,sha256=y7GdW9bM06T22-sM4C87jA86pUNs15L2xrZ3HKpfIME,26072 -pygame/tests/rect_test.py,sha256=4EgyD0WEl6FgdFN9hcFhN55hvrRUl5pC4LKdcbebzAc,27447 -pygame/tests/run_tests__tests/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/__pycache__/run_tests__test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/all_ok/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__pycache__/fake_3_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__pycache__/fake_4_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__pycache__/fake_5_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__pycache__/fake_6_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__pycache__/no_assertions__ret_code_of_1__test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/__pycache__/zero_tests_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/all_ok/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/all_ok/fake_3_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/all_ok/fake_4_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/all_ok/fake_5_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/all_ok/fake_6_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/all_ok/no_assertions__ret_code_of_1__test.py,sha256=yMOBw45z13s73pTx82OzMmYG7ZiSiQ_IEgMEH21_4io,877 -pygame/tests/run_tests__tests/all_ok/zero_tests_test.py,sha256=s30QH2Ae0zlo-IouC0UuFNYa5rrTwPI6UZ5PVW8hxUc,625 -pygame/tests/run_tests__tests/everything/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/everything/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/everything/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/everything/__pycache__/incomplete_todo_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/everything/__pycache__/magic_tag_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/everything/__pycache__/sleep_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/everything/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/everything/incomplete_todo_test.py,sha256=zZeKBjmqEtpFcHcuX0jVZudWaTxvM2oX0JToAbKLleY,990 -pygame/tests/run_tests__tests/everything/magic_tag_test.py,sha256=7fQgh8UMxCx51jLGN-5zhxHcl6ETegpYd4y4or5gqZk,940 -pygame/tests/run_tests__tests/everything/sleep_test.py,sha256=4t-3qQW7G4Wn0T-e2yMqCjBdTg_I7-zWFKXUkW6-9Gc,796 -pygame/tests/run_tests__tests/exclude/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/exclude/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/exclude/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/exclude/__pycache__/invisible_tag_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/exclude/__pycache__/magic_tag_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/exclude/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/exclude/invisible_tag_test.py,sha256=wYEC59vGVFIsidO0f3mgHir_nUQZRKbehaHq0A-aTy8,1006 -pygame/tests/run_tests__tests/exclude/magic_tag_test.py,sha256=7fQgh8UMxCx51jLGN-5zhxHcl6ETegpYd4y4or5gqZk,940 -pygame/tests/run_tests__tests/failures1/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/failures1/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/failures1/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/failures1/__pycache__/fake_3_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/failures1/__pycache__/fake_4_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/failures1/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/failures1/fake_3_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/failures1/fake_4_test.py,sha256=-fMHT7BiAm2z_b38xpCZw2y-jjNNVqxMOwEmOjeSXEw,1030 -pygame/tests/run_tests__tests/incomplete/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/incomplete/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/incomplete/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/incomplete/__pycache__/fake_3_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/incomplete/fake_2_test.py,sha256=0N2viyjn6Mq10bgcHP_x9rkUfyobvYjGRbT_FfJSUyc,970 -pygame/tests/run_tests__tests/incomplete/fake_3_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/incomplete_todo/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/incomplete_todo/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/incomplete_todo/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/incomplete_todo/__pycache__/fake_3_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/incomplete_todo/fake_2_test.py,sha256=zZeKBjmqEtpFcHcuX0jVZudWaTxvM2oX0JToAbKLleY,990 -pygame/tests/run_tests__tests/incomplete_todo/fake_3_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/infinite_loop/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/infinite_loop/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/infinite_loop/__pycache__/fake_1_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/infinite_loop/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/infinite_loop/fake_1_test.py,sha256=A6bbE5P9J3jdoLCqGiGKZrI5cfjZy7nikBR_7baHqNw,987 -pygame/tests/run_tests__tests/infinite_loop/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/print_stderr/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/print_stderr/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stderr/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stderr/__pycache__/fake_3_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stderr/__pycache__/fake_4_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stderr/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/print_stderr/fake_3_test.py,sha256=kmHeArRCqQ6CsZjSNxb26ftElmGNSZ6_xWQz2qjmZjo,1034 -pygame/tests/run_tests__tests/print_stderr/fake_4_test.py,sha256=-fMHT7BiAm2z_b38xpCZw2y-jjNNVqxMOwEmOjeSXEw,1030 -pygame/tests/run_tests__tests/print_stdout/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/print_stdout/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stdout/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stdout/__pycache__/fake_3_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stdout/__pycache__/fake_4_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/print_stdout/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/print_stdout/fake_3_test.py,sha256=S4aPSIEYpWuOiSKB75WceZgLJEs3LqmnWqMfbuh8pbA,1092 -pygame/tests/run_tests__tests/print_stdout/fake_4_test.py,sha256=-fMHT7BiAm2z_b38xpCZw2y-jjNNVqxMOwEmOjeSXEw,1030 -pygame/tests/run_tests__tests/run_tests__test.py,sha256=-8bey9yXLmafecehooi3xCPScI6DtVPa5p13EkRkGoY,4285 -pygame/tests/run_tests__tests/timeout/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -pygame/tests/run_tests__tests/timeout/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/run_tests__tests/timeout/__pycache__/fake_2_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/timeout/__pycache__/sleep_test.cpython-37.pyc,, -pygame/tests/run_tests__tests/timeout/fake_2_test.py,sha256=WQbZ12A0FMgoi0Jv-MGSf7K93GKCEA_NtY-s32fFPCA,980 -pygame/tests/run_tests__tests/timeout/sleep_test.py,sha256=f8oaqC2-JwlZADxCqScqv3PA2vfe2CDOTkv4efcdo-0,797 -pygame/tests/rwobject_test.py,sha256=Tg4T0vJDntlCrSmlv2d0yCj09QmbxZtJzZD5SzVkXpg,3865 -pygame/tests/scrap_tags.py,sha256=Z59xMvR6VZh_jxMGXJAQhLcOCrKc8wqLzFYIuFCC1iQ,417 -pygame/tests/scrap_test.py,sha256=RMQ-uB5kxEUMr4twvbP0r7ptJsH0jYFbd1SFqd8OVyw,9036 -pygame/tests/sndarray_tags.py,sha256=OKf8uXDz43bSC5YmOXme0CJ2R3mF33CWfVKKwV5v624,190 -pygame/tests/sndarray_test.py,sha256=VmNjrFkhAfVe0w3VxjLwLu7nrt9JeeIyUWA6rXZh0AM,6841 -pygame/tests/sprite_test.py,sha256=BEhFJlUSOEQqNg-7N7ffoPiebZ53YHo1fJiyXJF1ujg,43313 -pygame/tests/surface_test.py,sha256=oBUzoml2k7LJdITM4FNGH1rWBZTyut4w-NUOE54FUnc,100140 -pygame/tests/surfarray_tags.py,sha256=qBxxuyRGVht11RU3jpN78PPwxBkFmoVyinj0UTIX5FQ,260 -pygame/tests/surfarray_test.py,sha256=wwdi6lC5NQFPuZqnQXOdBM_xWLN-Gs4RsfdIkmr3hHY,25452 -pygame/tests/surflock_test.py,sha256=czBVDuVXbinJ7X055cgX6Q3uSUCtnZTd6vj6OBkQ9PY,4725 -pygame/tests/sysfont_test.py,sha256=cL7hDRuJtXfMKMvQHq8OyADrjI5Do3c5aCaOjCChg1w,798 -pygame/tests/test_test_.py,sha256=YTMzScCIzPnxzIIxhLvMuxKG2EfL9klWbC87pO4Eri8,25 -pygame/tests/test_utils/__init__.py,sha256=IGEBsBPznul3Ki-b6-3JLTm9zMos4r459fduBzoKSS4,5836 -pygame/tests/test_utils/__pycache__/__init__.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/arrinter.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/async_sub.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/buftools.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/endian.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/png.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/run_tests.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/test_machinery.cpython-37.pyc,, -pygame/tests/test_utils/__pycache__/test_runner.cpython-37.pyc,, -pygame/tests/test_utils/arrinter.py,sha256=XWN_5AHyOFWHrTeGfCvQn56jUgN-m13hjAOvjQ0kXLA,15234 -pygame/tests/test_utils/async_sub.py,sha256=5P6H3egMmICa5u83ek4iFMK84wDFBc4pkgGQQbx-2UY,9109 -pygame/tests/test_utils/buftools.py,sha256=nrygK9ouEu70oCi0d0bSBWHlvPH8ERnIyj5r0NeILbQ,23886 -pygame/tests/test_utils/endian.py,sha256=hxXVLc_t5P8_Q5ReHX6wuV-YPD1-jdsVszLDLNaMf7I,493 -pygame/tests/test_utils/png.py,sha256=3Q6zk1_oCs4cSJymYlDaUh3xE-dzoD6tguHOMNSkbYc,151218 -pygame/tests/test_utils/run_tests.py,sha256=tIM5nM5m3EFGPLAm9tLzDC5YMR86qbn3-Zj4KvPCxLk,12292 -pygame/tests/test_utils/test_machinery.py,sha256=sQA7HSiLYnmFfWC6a8OKkYw4xe2f9zYLHYHvEvJwJ6w,2404 -pygame/tests/test_utils/test_runner.py,sha256=nFwRt5Mv5KZcEfWhE2uFFoZYksMoop5g7VI9Un9hkxw,7729 -pygame/tests/threads_test.py,sha256=zov2oHktLZIrlAsvE_fP18TePWN00jXIYVLC5ltfq_s,5055 -pygame/tests/time_test.py,sha256=Zf-AcvJVPNMj4KNU1w1o8u21bJouuAPBZxP2cvkMkGo,6908 -pygame/tests/touch_tags.py,sha256=LsHl4hEKvNKjvGCZWC2lcQ9VZsvMmFbiynd9prQKM54,28 -pygame/tests/touch_test.py,sha256=b7TvlNGOGhODFk72A5IJf2v118XsJ051JH7hTchcFn0,1198 -pygame/tests/transform_test.py,sha256=PyYFWxUwZ1Hqpdr7dIqDq6uyCMOwHk4yOlvWAbTUTuc,39625 -pygame/tests/version_test.py,sha256=LlO7iW9dAK0dBdu3_7X5b-WzkSFArF-1wyti4OX0gZI,1339 -pygame/threads/Py25Queue.py,sha256=qdHdnydu26pfhdq7VRGD8j8cFN2DdYuxImUGdzLWhG0,7759 -pygame/threads/__init__.py,sha256=n1eKpbYy0KyDmVxUXKLgmPQL-UIJl5IN8f8VHP56CFc,8709 -pygame/threads/__pycache__/Py25Queue.cpython-37.pyc,, -pygame/threads/__pycache__/__init__.cpython-37.pyc,, -pygame/time.cpython-37m-x86_64-linux-gnu.so,sha256=Lvt53jQBGQLG62GqkTCSCMLXkz4J0ezwWB0NhTsKRcY,54136 -pygame/transform.cpython-37m-x86_64-linux-gnu.so,sha256=f5lOM2PTn-_j06BAMnXw5ab6Yj-tVPFA7Yw1AZfjM38,239096 -pygame/version.py,sha256=p62kRkhcj8Ui-oJ33PQ2dRAHxIwLhVQjn8QVwFWguOY,1920 diff --git a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/WHEEL b/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/WHEEL deleted file mode 100644 index 697e432..0000000 --- a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: false -Tag: cp37-cp37m-manylinux1_x86_64 - diff --git a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/top_level.txt deleted file mode 100644 index 0cb7ff1..0000000 --- a/venv/lib/python3.7/site-packages/pygame-1.9.6.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -pygame diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libFLAC-6cefaab3.so.8.3.0 b/venv/lib/python3.7/site-packages/pygame/.libs/libFLAC-6cefaab3.so.8.3.0 deleted file mode 100755 index 2fc9a3d..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libFLAC-6cefaab3.so.8.3.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL-1-c39928c2.2.so.0.11.4 b/venv/lib/python3.7/site-packages/pygame/.libs/libSDL-1-c39928c2.2.so.0.11.4 deleted file mode 100755 index 76de4d6..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL-1-c39928c2.2.so.0.11.4 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_image-1-c6cf9ff4.2.so.0.8.4 b/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_image-1-c6cf9ff4.2.so.0.8.4 deleted file mode 100755 index 183cdc2..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_image-1-c6cf9ff4.2.so.0.8.4 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_mixer-1-9fea1499.2.so.0.12.0 b/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_mixer-1-9fea1499.2.so.0.12.0 deleted file mode 100755 index b87f2b3..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_mixer-1-9fea1499.2.so.0.12.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_ttf-2-254ab10f.0.so.0.10.1 b/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_ttf-2-254ab10f.0.so.0.10.1 deleted file mode 100755 index ef08830..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libSDL_ttf-2-254ab10f.0.so.0.10.1 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libasound-87b0bd31.so.2.0.0 b/venv/lib/python3.7/site-packages/pygame/.libs/libasound-87b0bd31.so.2.0.0 deleted file mode 100755 index 1fd794c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libasound-87b0bd31.so.2.0.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libcap-5fec18ee.so.1.10 b/venv/lib/python3.7/site-packages/pygame/.libs/libcap-5fec18ee.so.1.10 deleted file mode 100755 index edd0b57..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libcap-5fec18ee.so.1.10 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libdbus-1-30e56626.so.3.4.0 b/venv/lib/python3.7/site-packages/pygame/.libs/libdbus-1-30e56626.so.3.4.0 deleted file mode 100755 index d94d281..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libdbus-1-30e56626.so.3.4.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libfluidsynth-e2f12a37.so.1.5.2 b/venv/lib/python3.7/site-packages/pygame/.libs/libfluidsynth-e2f12a37.so.1.5.2 deleted file mode 100755 index f5a47a9..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libfluidsynth-e2f12a37.so.1.5.2 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libfreetype-b48c0f70.so.6.15.0 b/venv/lib/python3.7/site-packages/pygame/.libs/libfreetype-b48c0f70.so.6.15.0 deleted file mode 100755 index 155a903..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libfreetype-b48c0f70.so.6.15.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libjpeg-bd53fca1.so.62.0.0 b/venv/lib/python3.7/site-packages/pygame/.libs/libjpeg-bd53fca1.so.62.0.0 deleted file mode 100755 index 17ef77c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libjpeg-bd53fca1.so.62.0.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libmikmod-fabcac29.so.2.0.4 b/venv/lib/python3.7/site-packages/pygame/.libs/libmikmod-fabcac29.so.2.0.4 deleted file mode 100755 index 3f4b3e5..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libmikmod-fabcac29.so.2.0.4 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libogg-0d584846.so.0.8.3 b/venv/lib/python3.7/site-packages/pygame/.libs/libogg-0d584846.so.0.8.3 deleted file mode 100755 index b2134aa..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libogg-0d584846.so.0.8.3 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libpng16-e11200e8.so.16.36.0 b/venv/lib/python3.7/site-packages/pygame/.libs/libpng16-e11200e8.so.16.36.0 deleted file mode 100755 index 7b8574d..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libpng16-e11200e8.so.16.36.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libportmidi-7fcf6c23.so b/venv/lib/python3.7/site-packages/pygame/.libs/libportmidi-7fcf6c23.so deleted file mode 100755 index 59a8992..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libportmidi-7fcf6c23.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libsmpeg-0-7159ee15.4.so.0.1.4 b/venv/lib/python3.7/site-packages/pygame/.libs/libsmpeg-0-7159ee15.4.so.0.1.4 deleted file mode 100755 index 721a877..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libsmpeg-0-7159ee15.4.so.0.1.4 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libsndfile-d5ecee11.so.1.0.17 b/venv/lib/python3.7/site-packages/pygame/.libs/libsndfile-d5ecee11.so.1.0.17 deleted file mode 100755 index df00974..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libsndfile-d5ecee11.so.1.0.17 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libtiff-97e44e95.so.3.8.2 b/venv/lib/python3.7/site-packages/pygame/.libs/libtiff-97e44e95.so.3.8.2 deleted file mode 100755 index c810e5c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libtiff-97e44e95.so.3.8.2 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libvorbis-373f4a71.so.0.4.8 b/venv/lib/python3.7/site-packages/pygame/.libs/libvorbis-373f4a71.so.0.4.8 deleted file mode 100755 index 018e734..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libvorbis-373f4a71.so.0.4.8 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libvorbisfile-f207f3a6.so.3.3.7 b/venv/lib/python3.7/site-packages/pygame/.libs/libvorbisfile-f207f3a6.so.3.3.7 deleted file mode 100755 index f04987d..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libvorbisfile-f207f3a6.so.3.3.7 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libwebp-ec096d5f.so.7.0.4 b/venv/lib/python3.7/site-packages/pygame/.libs/libwebp-ec096d5f.so.7.0.4 deleted file mode 100755 index 2fa5623..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libwebp-ec096d5f.so.7.0.4 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/.libs/libz-a147dcb0.so.1.2.3 b/venv/lib/python3.7/site-packages/pygame/.libs/libz-a147dcb0.so.1.2.3 deleted file mode 100755 index c123f89..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/.libs/libz-a147dcb0.so.1.2.3 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/__init__.py b/venv/lib/python3.7/site-packages/pygame/__init__.py deleted file mode 100644 index 9237561..0000000 --- a/venv/lib/python3.7/site-packages/pygame/__init__.py +++ /dev/null @@ -1,402 +0,0 @@ -# coding: ascii -# pygame - Python Game Library -# Copyright (C) 2000-2001 Pete Shinners -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Library General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Library General Public License for more details. -# -# You should have received a copy of the GNU Library General Public -# License along with this library; if not, write to the Free -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# -# Pete Shinners -# pete@shinners.org -"""Pygame is a set of Python modules designed for writing games. -It is written on top of the excellent SDL library. This allows you -to create fully featured games and multimedia programs in the python -language. The package is highly portable, with games running on -Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.""" - -import sys -import os - -# Choose Windows display driver -if os.name == 'nt': - - #pypy does not find the dlls, so we add package folder to PATH. - pygame_dir = os.path.split(__file__)[0] - os.environ['PATH'] = os.environ['PATH'] + ';' + pygame_dir - # Respect existing SDL_VIDEODRIVER setting if it has been set - if 'SDL_VIDEODRIVER' not in os.environ: - - # If the Windows version is 95/98/ME and DirectX 5 or greater is - # installed, then use the directx driver rather than the default - # windib driver. - - # http://docs.python.org/lib/module-sys.html - # 0 (VER_PLATFORM_WIN32s) Win32s on Windows 3.1 - # 1 (VER_PLATFORM_WIN32_WINDOWS) Windows 95/98/ME - # 2 (VER_PLATFORM_WIN32_NT) Windows NT/2000/XP - # 3 (VER_PLATFORM_WIN32_CE) Windows CE - if sys.getwindowsversion()[0] == 1: - - import _winreg - - try: - - # Get DirectX version from registry - key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, - 'SOFTWARE\\Microsoft\\DirectX') - dx_version_string = _winreg.QueryValueEx(key, 'Version') - key.Close() - - # Set video driver to directx if DirectX 5 or better is - # installed. - # To interpret DirectX version numbers, see this page: - # http://en.wikipedia.org/wiki/DirectX#Releases - minor_dx_version = int(dx_version_string.split('.')[1]) - if minor_dx_version >= 5: - os.environ['SDL_VIDEODRIVER'] = 'directx' - - # Clean up namespace - del key, dx_version_string, minor_dx_version - - except: - pass - - # Clean up namespace - del _winreg - -# when running under X11, always set the SDL window WM_CLASS to make the -# window managers correctly match the pygame window. -elif 'DISPLAY' in os.environ and 'SDL_VIDEO_X11_WMCLASS' not in os.environ: - os.environ['SDL_VIDEO_X11_WMCLASS'] = os.path.basename(sys.argv[0]) - - -class MissingModule: - _NOT_IMPLEMENTED_ = True - - def __init__(self, name, urgent=0): - self.name = name - exc_type, exc_msg = sys.exc_info()[:2] - self.info = str(exc_msg) - self.reason = "%s: %s" % (exc_type.__name__, self.info) - self.urgent = urgent - if urgent: - self.warn() - - def __getattr__(self, var): - if not self.urgent: - self.warn() - self.urgent = 1 - missing_msg = "%s module not available (%s)" % (self.name, self.reason) - raise NotImplementedError(missing_msg) - - def __nonzero__(self): - return 0 - - def warn(self): - msg_type = 'import' if self.urgent else 'use' - message = '%s %s: %s\n(%s)' % (msg_type, self.name, self.info, self.reason) - try: - import warnings - level = 4 if self.urgent else 3 - warnings.warn(message, RuntimeWarning, level) - except ImportError: - print (message) - - -# we need to import like this, each at a time. the cleanest way to import -# our modules is with the import command (not the __import__ function) - -# first, the "required" modules -from pygame.base import * -from pygame.constants import * -from pygame.version import * -from pygame.rect import Rect -from pygame.compat import PY_MAJOR_VERSION -from pygame.rwobject import encode_string, encode_file_path -import pygame.surflock -import pygame.color -Color = color.Color -import pygame.bufferproxy -BufferProxy = bufferproxy.BufferProxy -import pygame.math -Vector2 = pygame.math.Vector2 -Vector3 = pygame.math.Vector3 - -__version__ = ver - -# next, the "standard" modules -# we still allow them to be missing for stripped down pygame distributions -if get_sdl_version() < (2, 0, 0): - # cdrom only available for SDL 1.2.X - try: - import pygame.cdrom - except (ImportError, IOError): - cdrom = MissingModule("cdrom", urgent=1) - -try: - import pygame.cursors -except (ImportError, IOError): - cursors = MissingModule("cursors", urgent=1) - -try: - import pygame.display -except (ImportError, IOError): - display = MissingModule("display", urgent=1) - -try: - import pygame.draw -except (ImportError, IOError): - draw = MissingModule("draw", urgent=1) - -try: - import pygame.event -except (ImportError, IOError): - event = MissingModule("event", urgent=1) - -try: - import pygame.image -except (ImportError, IOError): - image = MissingModule("image", urgent=1) - -try: - import pygame.joystick -except (ImportError, IOError): - joystick = MissingModule("joystick", urgent=1) - -try: - import pygame.key -except (ImportError, IOError): - key = MissingModule("key", urgent=1) - -try: - import pygame.mouse -except (ImportError, IOError): - mouse = MissingModule("mouse", urgent=1) - -try: - import pygame.sprite -except (ImportError, IOError): - sprite = MissingModule("sprite", urgent=1) - -try: - import pygame.threads -except (ImportError, IOError): - threads = MissingModule("threads", urgent=1) - -try: - import pygame.pixelcopy -except (ImportError, IOError): - pixelcopy = MissingModule("pixelcopy", urgent=1) - - -def warn_unwanted_files(): - """warn about unneeded old files""" - - # a temporary hack to warn about camera.so and camera.pyd. - install_path = os.path.split(pygame.base.__file__)[0] - extension_ext = os.path.splitext(pygame.base.__file__)[1] - - # here are the .so/.pyd files we need to ask to remove. - ext_to_remove = ["camera"] - - # here are the .py/.pyo/.pyc files we need to ask to remove. - py_to_remove = ["color"] - - # Don't warn on Symbian. The color.py is used as a wrapper. - if os.name == "e32": - py_to_remove = [] - - # See if any of the files are there. - extension_files = ["%s%s" % (x, extension_ext) for x in ext_to_remove] - - py_files = ["%s%s" % (x, py_ext) - for py_ext in [".py", ".pyc", ".pyo"] - for x in py_to_remove] - - files = py_files + extension_files - - unwanted_files = [] - for f in files: - unwanted_files.append(os.path.join(install_path, f)) - - ask_remove = [] - for f in unwanted_files: - if os.path.exists(f): - ask_remove.append(f) - - if ask_remove: - message = "Detected old file(s). Please remove the old files:\n" - - for f in ask_remove: - message += "%s " % f - message += "\nLeaving them there might break pygame. Cheers!\n\n" - - try: - import warnings - level = 4 - warnings.warn(message, RuntimeWarning, level) - except ImportError: - print (message) - - -# disable, because we hopefully don't need it. -# warn_unwanted_files() - - -try: - from pygame.surface import * -except (ImportError, IOError): - Surface = lambda: Missing_Function - - -try: - import pygame.mask - from pygame.mask import Mask -except (ImportError, IOError): - Mask = lambda: Missing_Function - -try: - from pygame.pixelarray import * -except (ImportError, IOError): - PixelArray = lambda: Missing_Function - -try: - from pygame.overlay import * -except (ImportError, IOError): - Overlay = lambda: Missing_Function - -try: - import pygame.time -except (ImportError, IOError): - time = MissingModule("time", urgent=1) - -try: - import pygame.transform -except (ImportError, IOError): - transform = MissingModule("transform", urgent=1) - -# lastly, the "optional" pygame modules -if 'PYGAME_FREETYPE' in os.environ: - try: - import pygame.ftfont as font - sys.modules['pygame.font'] = font - except (ImportError, IOError): - pass -try: - import pygame.font - import pygame.sysfont - pygame.font.SysFont = pygame.sysfont.SysFont - pygame.font.get_fonts = pygame.sysfont.get_fonts - pygame.font.match_font = pygame.sysfont.match_font -except (ImportError, IOError): - font = MissingModule("font", urgent=0) - -# try and load pygame.mixer_music before mixer, for py2app... -try: - import pygame.mixer_music - #del pygame.mixer_music - #print ("NOTE2: failed importing pygame.mixer_music in lib/__init__.py") -except (ImportError, IOError): - pass - -try: - import pygame.mixer -except (ImportError, IOError): - mixer = MissingModule("mixer", urgent=0) - -try: - import pygame.movie -except (ImportError, IOError): - movie = MissingModule("movie", urgent=0) - -# try: -# import pygame.movieext -# except (ImportError,IOError): -# movieext=MissingModule("movieext", urgent=0) - -try: - import pygame.scrap -except (ImportError, IOError): - scrap = MissingModule("scrap", urgent=0) - -try: - import pygame.surfarray -except (ImportError, IOError): - surfarray = MissingModule("surfarray", urgent=0) - -try: - import pygame.sndarray -except (ImportError, IOError): - sndarray = MissingModule("sndarray", urgent=0) - -try: - import pygame.fastevent -except (ImportError, IOError): - fastevent = MissingModule("fastevent", urgent=0) - -# there's also a couple "internal" modules not needed -# by users, but putting them here helps "dependency finder" -# programs get everything they need (like py2exe) -try: - import pygame.imageext - del pygame.imageext -except (ImportError, IOError): - pass - - -def packager_imports(): - """some additional imports that py2app/py2exe will want to see""" - import atexit - import numpy - import OpenGL.GL - import pygame.macosx - import pygame.bufferproxy - import pygame.colordict - import pygame._view - -# make Rects pickleable -if PY_MAJOR_VERSION >= 3: - import copyreg as copy_reg -else: - import copy_reg - - -def __rect_constructor(x, y, w, h): - return Rect(x, y, w, h) - - -def __rect_reduce(r): - assert type(r) == Rect - return __rect_constructor, (r.x, r.y, r.w, r.h) -copy_reg.pickle(Rect, __rect_reduce, __rect_constructor) - - -# make Colors pickleable -def __color_constructor(r, g, b, a): - return Color(r, g, b, a) - - -def __color_reduce(c): - assert type(c) == Color - return __color_constructor, (c.r, c.g, c.b, c.a) -copy_reg.pickle(Color, __color_reduce, __color_constructor) - - -# Thanks for supporting pygame. Without support now, there won't be pygame later. -if 'PYGAME_HIDE_SUPPORT_PROMPT' not in os.environ: - print('pygame %s' % ver) - print('Hello from the pygame community. https://www.pygame.org/contribute.html') - - -# cleanup namespace -del pygame, os, sys, surflock, MissingModule, copy_reg, PY_MAJOR_VERSION diff --git a/venv/lib/python3.7/site-packages/pygame/_camera.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/_camera.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index f34bcc0..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/_camera.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/_camera_opencv_highgui.py b/venv/lib/python3.7/site-packages/pygame/_camera_opencv_highgui.py deleted file mode 100644 index 6acf190..0000000 --- a/venv/lib/python3.7/site-packages/pygame/_camera_opencv_highgui.py +++ /dev/null @@ -1,98 +0,0 @@ - -import pygame -import numpy - -import opencv -#this is important for capturing/displaying images -from opencv import highgui - - - -def list_cameras(): - """ - """ - # -1 for opencv means get any of them. - return [-1] - -def init(): - pass - -def quit(): - pass - - -class Camera: - - def __init__(self, device = 0, size = (640,480), mode = "RGB"): - """ - """ - self.camera = highgui.cvCreateCameraCapture(device) - if not self.camera: - raise ValueError ("Could not open camera. Sorry.") - - - def set_controls(self, **kwargs): - """ - """ - - - def set_resolution(self, width, height): - """Sets the capture resolution. (without dialog) - """ - # nothing to do here. - pass - def query_image(self): - return True - - def stop(self): - pass - - def start(self): - # do nothing here... since the camera is already open. - pass - - def get_buffer(self): - """Returns a string containing the raw pixel data. - """ - return self.get_surface().get_buffer() - - def get_image(self, dest_surf = None): - return self.get_surface(dest_surf) - - def get_surface(self, dest_surf = None): - camera = self.camera - - im = highgui.cvQueryFrame(camera) - #convert Ipl image to PIL image - #print type(im) - if im: - xx = opencv.adaptors.Ipl2NumPy(im) - #print type(xx) - #print xx.iscontiguous() - #print dir(xx) - #print xx.shape - xxx = numpy.reshape(xx, (numpy.product(xx.shape),)) - - if xx.shape[2] != 3: - raise ValueError("not sure what to do about this size") - - pg_img = pygame.image.frombuffer(xxx, (xx.shape[1],xx.shape[0]), "RGB") - - # if there is a destination surface given, we blit onto that. - if dest_surf: - dest_surf.blit(pg_img, (0,0)) - return dest_surf - #return pg_img - - - -if __name__ == "__main__": - - # try and use this camera stuff with the pygame camera example. - import pygame.examples.camera - - pygame.camera.Camera = Camera - pygame.camera.list_cameras = list_cameras - pygame.examples.camera.main() - - diff --git a/venv/lib/python3.7/site-packages/pygame/_camera_vidcapture.py b/venv/lib/python3.7/site-packages/pygame/_camera_vidcapture.py deleted file mode 100644 index 7ee77b6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/_camera_vidcapture.py +++ /dev/null @@ -1,133 +0,0 @@ -"""pygame.camera.Camera implementation using the videocapture module for windows. - -http://videocapture.sourceforge.net/ - -Binary windows wheels: - https://www.lfd.uci.edu/~gohlke/pythonlibs/#videocapture -""" -import pygame - -def list_cameras(): - """Always only lists one camera. - - Functionality not supported in videocapture module. - """ - return [0] - - # this just cycles through all the cameras trying to open them - cameras = [] - for x in range(256): - try: - c = Camera(x) - except: - break - cameras.append(x) - - return cameras - -def init(): - global vidcap - try: - import vidcap as vc - except ImportError: - from VideoCapture import vidcap as vc - vidcap = vc - -def quit(): - global vidcap - pass - del vidcap - - -class Camera: - - def __init__(self, device =0, - size = (640,480), - mode = "RGB", - show_video_window=0): - """device: VideoCapture enumerates the available video capture devices - on your system. If you have more than one device, specify - the desired one here. The device number starts from 0. - - show_video_window: 0 ... do not display a video window (the default) - 1 ... display a video window - - Mainly used for debugging, since the video window - can not be closed or moved around. - """ - self.dev = vidcap.new_Dev(device, show_video_window) - width, height = size - self.dev.setresolution(width, height) - - def display_capture_filter_properties(self): - """Displays a dialog containing the property page of the capture filter. - - For VfW drivers you may find the option to select the resolution most - likely here. - """ - self.dev.displaycapturefilterproperties() - - def display_capture_pin_properties(self): - """Displays a dialog containing the property page of the capture pin. - - For WDM drivers you may find the option to select the resolution most - likely here. - """ - self.dev.displaycapturepinproperties() - - def set_resolution(self, width, height): - """Sets the capture resolution. (without dialog) - """ - self.dev.setresolution(width, height) - - def get_buffer(self): - """Returns a string containing the raw pixel data. - """ - return self.dev.getbuffer() - - def start(self): - """ Not implemented. - """ - - def set_controls(self, **kwargs): - """ Not implemented. - """ - - def stop(self): - """ Not implemented. - """ - - def get_image(self, dest_surf = None): - """ - """ - return self.get_surface(dest_surf) - - def get_surface(self, dest_surf = None): - """Returns a pygame Surface. - """ - abuffer, width, height = self.get_buffer() - if abuffer: - surf = pygame.image.frombuffer(abuffer, (width, height), "RGB") - - # swap it from a BGR surface to an RGB surface. - r,g,b,a = surf.get_masks() - surf.set_masks((b,g,r,a)) - - r,g,b,a = surf.get_shifts() - surf.set_shifts((b,g,r,a)) - - surf = pygame.transform.flip(surf, 0,1) - - # if there is a destination surface given, we blit onto that. - if dest_surf: - dest_surf.blit(surf, (0,0)) - else: - dest_surf = surf - return dest_surf - -if __name__ == "__main__": - import pygame.examples.camera - - pygame.camera.Camera = Camera - pygame.camera.list_cameras = list_cameras - pygame.examples.camera.main() diff --git a/venv/lib/python3.7/site-packages/pygame/_dummybackend.py b/venv/lib/python3.7/site-packages/pygame/_dummybackend.py deleted file mode 100644 index 49b3e30..0000000 --- a/venv/lib/python3.7/site-packages/pygame/_dummybackend.py +++ /dev/null @@ -1,30 +0,0 @@ -"""dummy Movie class if all else fails """ -class Movie: - def __init__(self, filename, surface=None): - self.filename=filename - self.surface = surface - self.process = None - self.loops=0 - self.playing = False - self.paused = False - self._backend = "DUMMY" - self.width=0 - self.height=0 - self.finished = 1 - def play(self, loops=0): - self.playing= not self.playing - - def stop(self): - self.playing=not self.playing - self.paused =not self.paused - - def pause(self): - self.paused=not self.paused - - def resize(self, w, h): - self.width=w - self.height=h - - def __repr__(self): - return "(%s 0.0s)"%self.filename - \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pygame/_freetype.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/_freetype.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 9e67b87..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/_freetype.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/_numpysndarray.py b/venv/lib/python3.7/site-packages/pygame/_numpysndarray.py deleted file mode 100644 index f531a9a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/_numpysndarray.py +++ /dev/null @@ -1,76 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2008 Marcus von Appen -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Marcus von Appen -## mva@sysfault.org - -"""pygame module for accessing sound sample data using numpy - -Functions to convert between numpy arrays and Sound objects. This module -will only be available when pygame can use the external numpy package. - -Sound data is made of thousands of samples per second, and each sample -is the amplitude of the wave at a particular moment in time. For -example, in 22-kHz format, element number 5 of the array is the -amplitude of the wave after 5/22000 seconds. - -Each sample is an 8-bit or 16-bit integer, depending on the data format. -A stereo sound file has two values per sample, while a mono sound file -only has one. -""" - -import pygame -import pygame.mixer as mixer -import numpy - - -def array (sound): - """pygame._numpysndarray.array(Sound): return array - - Copy Sound samples into an array. - - Creates a new array for the sound data and copies the samples. The - array will always be in the format returned from - pygame.mixer.get_init(). - """ - - return numpy.array (sound, copy=True) - -def samples (sound): - """pygame._numpysndarray.samples(Sound): return array - - Reference Sound samples into an array. - - Creates a new array that directly references the samples in a Sound - object. Modifying the array will change the Sound. The array will - always be in the format returned from pygame.mixer.get_init(). - """ - - return numpy.array (sound, copy=False) - -def make_sound (array): - """pygame._numpysndarray.make_sound(array): return Sound - - Convert an array into a Sound object. - - Create a new playable Sound object from an array. The mixer module - must be initialized and the array format must be similar to the mixer - audio format. - """ - - return mixer.Sound (array=array) - diff --git a/venv/lib/python3.7/site-packages/pygame/_numpysurfarray.py b/venv/lib/python3.7/site-packages/pygame/_numpysurfarray.py deleted file mode 100644 index e6cf3d8..0000000 --- a/venv/lib/python3.7/site-packages/pygame/_numpysurfarray.py +++ /dev/null @@ -1,356 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2007 Marcus von Appen -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Marcus von Appen -## mva@sysfault.org - -"""pygame module for accessing surface pixel data using numpy - -Functions to convert pixel data between pygame Surfaces and Numpy -arrays. This module will only be available when pygame can use the -external Numpy package. - -Note, that numpyarray is an optional module. It requires that Numpy is -installed to be used. If not installed, an exception will be raised when -it is used. eg. ImportError: no module named numpy - -Every pixel is stored as a single integer value to represent the red, -green, and blue colors. The 8bit images use a value that looks into a -colormap. Pixels with higher depth use a bit packing process to place -three or four values into a single number. - -The Numpy arrays are indexed by the X axis first, followed by the Y -axis. Arrays that treat the pixels as a single integer are referred to -as 2D arrays. This module can also separate the red, green, and blue -color values into separate indices. These types of arrays are referred -to as 3D arrays, and the last index is 0 for red, 1 for green, and 2 for -blue. - -In contrast to Numeric Numpy does use unsigned 16bit integers, images -with 16bit data will be treated as unsigned integers. -""" - -import pygame -from pygame.compat import bytes_ -from pygame.pixelcopy import array_to_surface, surface_to_array, \ - map_array as pix_map_array, make_surface as pix_make_surface -import numpy -from numpy import array as numpy_array, empty as numpy_empty, \ - around as numpy_around, uint32 as numpy_uint32, \ - ndarray as numpy_ndarray - -#float96 not available on all numpy versions. -numpy_floats = [] -for type_name in "float float32 float64 float96".split(): - if hasattr(numpy, type_name): - numpy_floats.append(getattr(numpy, type_name)) - -# Pixel sizes corresponding to NumPy supported integer sizes, and therefore -# permissible for 2D reference arrays. -_pixel2d_bitdepths = set([8, 16, 32]) - - -def blit_array (surface, array): - """pygame.surfarray.blit_array(Surface, array): return None - - Blit directly from a array values. - - Directly copy values from an array into a Surface. This is faster than - converting the array into a Surface and blitting. The array must be the - same dimensions as the Surface and will completely replace all pixel - values. Only integer, ascii character and record arrays are accepted. - - This function will temporarily lock the Surface as the new values are - copied. - """ - if isinstance(array, numpy_ndarray) and array.dtype in numpy_floats: - array = array.round(0).astype(numpy_uint32) - return array_to_surface(surface, array) - -def make_surface(array): - """pygame.surfarray.make_surface (array): return Surface - - Copy an array to a new surface. - - Create a new Surface that best resembles the data and format on the - array. The array can be 2D or 3D with any sized integer values. - """ - if isinstance(array, numpy_ndarray) and array.dtype in numpy_floats: - array = array.round(0).astype(numpy_uint32) - return pix_make_surface (array) - -def array2d(surface): - """pygame.numpyarray.array2d(Surface): return array - - copy pixels into a 2d array - - Copy the pixels from a Surface into a 2D array. The bit depth of the - surface will control the size of the integer values, and will work - for any type of pixel format. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - bpp = surface.get_bytesize() - try: - dtype = (numpy.uint8, numpy.uint16, numpy.int32, numpy.int32)[bpp - 1] - except IndexError: - raise ValueError("unsupported bit depth %i for 2D array" % (bpp * 8,)) - size = surface.get_size() - array = numpy.empty(size, dtype) - surface_to_array(array, surface) - return array - -def pixels2d(surface): - """pygame.numpyarray.pixels2d(Surface): return array - - reference pixels into a 2d array - - Create a new 2D array that directly references the pixel values in a - Surface. Any changes to the array will affect the pixels in the - Surface. This is a fast operation since no data is copied. - - Pixels from a 24-bit Surface cannot be referenced, but all other - Surface bit depths can. - - The Surface this references will remain locked for the lifetime of - the array (see the Surface.lock - lock the Surface memory for pixel - access method). - """ - if (surface.get_bitsize() not in _pixel2d_bitdepths): - raise ValueError("unsupport bit depth for 2D reference array") - try: - return numpy_array(surface.get_view('2'), copy=False) - except (ValueError, TypeError): - raise ValueError("bit depth %i unsupported for 2D reference array" % - (surface.get_bitsize(),)) - -def array3d(surface): - """pygame.numpyarray.array3d(Surface): return array - - copy pixels into a 3d array - - Copy the pixels from a Surface into a 3D array. The bit depth of the - surface will control the size of the integer values, and will work - for any type of pixel format. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - w, h = surface.get_size() - array = numpy.empty((w, h, 3), numpy.uint8) - surface_to_array(array, surface) - return array - -def pixels3d (surface): - """pygame.numpyarray.pixels3d(Surface): return array - - reference pixels into a 3d array - - Create a new 3D array that directly references the pixel values in a - Surface. Any changes to the array will affect the pixels in the - Surface. This is a fast operation since no data is copied. - - This will only work on Surfaces that have 24-bit or 32-bit - formats. Lower pixel formats cannot be referenced. - - The Surface this references will remain locked for the lifetime of - the array (see the Surface.lock - lock the Surface memory for pixel - access method). - """ - return numpy_array(surface.get_view('3'), copy=False) - -def array_alpha(surface): - """pygame.numpyarray.array_alpha(Surface): return array - - copy pixel alphas into a 2d array - - Copy the pixel alpha values (degree of transparency) from a Surface - into a 2D array. This will work for any type of Surface - format. Surfaces without a pixel alpha will return an array with all - opaque values. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - size = surface.get_size() - array = numpy.empty(size, numpy.uint8) - surface_to_array(array, surface, 'A') - return array - -def pixels_alpha(surface): - """pygame.numpyarray.pixels_alpha(Surface): return array - - reference pixel alphas into a 2d array - - Create a new 2D array that directly references the alpha values - (degree of transparency) in a Surface. Any changes to the array will - affect the pixels in the Surface. This is a fast operation since no - data is copied. - - This can only work on 32-bit Surfaces with a per-pixel alpha value. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpy.array(surface.get_view('A'), copy=False) - -def pixels_red(surface): - """pygame.surfarray.pixels_red(Surface): return array - - Reference pixel red into a 2d array. - - Create a new 2D array that directly references the red values - in a Surface. Any changes to the array will affect the pixels - in the Surface. This is a fast operation since no data is copied. - - This can only work on 24-bit or 32-bit Surfaces. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpy.array(surface.get_view('R'), copy=False) - -def array_red(surface): - """pygame.numpyarray.array_red(Surface): return array - - copy pixel red into a 2d array - - Copy the pixel red values from a Surface into a 2D array. This will work - for any type of Surface format. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - size = surface.get_size() - array = numpy.empty(size, numpy.uint8) - surface_to_array(array, surface, 'R') - return array - -def pixels_green(surface): - """pygame.surfarray.pixels_green(Surface): return array - - Reference pixel green into a 2d array. - - Create a new 2D array that directly references the green values - in a Surface. Any changes to the array will affect the pixels - in the Surface. This is a fast operation since no data is copied. - - This can only work on 24-bit or 32-bit Surfaces. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpy.array(surface.get_view('G'), copy=False) - -def array_green(surface): - """pygame.numpyarray.array_green(Surface): return array - - copy pixel green into a 2d array - - Copy the pixel green values from a Surface into a 2D array. This will work - for any type of Surface format. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - size = surface.get_size() - array = numpy.empty(size, numpy.uint8) - surface_to_array(array, surface, 'G') - return array - -def pixels_blue (surface): - """pygame.surfarray.pixels_blue(Surface): return array - - Reference pixel blue into a 2d array. - - Create a new 2D array that directly references the blue values - in a Surface. Any changes to the array will affect the pixels - in the Surface. This is a fast operation since no data is copied. - - This can only work on 24-bit or 32-bit Surfaces. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpy.array(surface.get_view('B'), copy=False) - -def array_blue(surface): - """pygame.numpyarray.array_blue(Surface): return array - - copy pixel blue into a 2d array - - Copy the pixel blue values from a Surface into a 2D array. This will work - for any type of Surface format. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - size = surface.get_size() - array = numpy.empty(size, numpy.uint8) - surface_to_array(array, surface, 'B') - return array - -def array_colorkey(surface): - """pygame.numpyarray.array_colorkey(Surface): return array - - copy the colorkey values into a 2d array - - Create a new array with the colorkey transparency value from each - pixel. If the pixel matches the colorkey it will be fully - tranparent; otherwise it will be fully opaque. - - This will work on any type of Surface format. If the image has no - colorkey a solid opaque array will be returned. - - This function will temporarily lock the Surface as pixels are - copied. - """ - size = surface.get_size() - array = numpy.empty(size, numpy.uint8) - surface_to_array(array, surface, 'C') - return array - -def map_array(surface, array): - """pygame.numpyarray.map_array(Surface, array3d): return array2d - - map a 3d array into a 2d array - - Convert a 3D array into a 2D array. This will use the given Surface - format to control the conversion. - - Note: arrays do not need to be 3D, as long as the minor axis has - three elements giving the component colours, any array shape can be - used (for example, a single colour can be mapped, or an array of - colours). The array shape is limited to eleven dimensions maximum, - including the three element minor axis. - """ - if array.ndim == 0: - raise ValueError("array must have at least 1 dimension") - shape = array.shape - if shape[-1] != 3: - raise ValueError("array must be a 3d array of 3-value color data") - target = numpy_empty(shape[:-1], numpy.int32) - pix_map_array(target, array, surface) - return target - diff --git a/venv/lib/python3.7/site-packages/pygame/base.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/base.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b97fd4b..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/base.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/bufferproxy.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/bufferproxy.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index d10b7fd..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/bufferproxy.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/camera.py b/venv/lib/python3.7/site-packages/pygame/camera.py deleted file mode 100644 index 3f18350..0000000 --- a/venv/lib/python3.7/site-packages/pygame/camera.py +++ /dev/null @@ -1,146 +0,0 @@ - -_is_init = 0 - - - -def init(): - global list_cameras, Camera, colorspace, _is_init - - - import os,sys - - use_opencv = False - use_vidcapture = False - use__camera = True - - - if sys.platform == 'win32': - use_vidcapture = True - use__camera = False - - elif "linux" in sys.platform: - use__camera = True - elif "darwin" in sys.platform: - use__camera = True - else: - use_opencv = True - - - - # see if we have any user specified defaults in environments. - camera_env = os.environ.get("PYGAME_CAMERA", "") - if camera_env == "opencv": - use_opencv = True - if camera_env == "vidcapture": - use_vidcapture = True - - - - # select the camera module to import here. - - # the _camera module has some code which can be reused by other modules. - # it will also be the default one. - if use__camera: - from pygame import _camera - colorspace = _camera.colorspace - - list_cameras = _camera.list_cameras - Camera = _camera.Camera - - if use_opencv: - try: - from pygame import _camera_opencv_highgui - except: - _camera_opencv_highgui = None - - if _camera_opencv_highgui: - _camera_opencv_highgui.init() - - list_cameras = _camera_opencv_highgui.list_cameras - Camera = _camera_opencv_highgui.Camera - - if use_vidcapture: - try: - from pygame import _camera_vidcapture - except: - _camera_vidcapture = None - - if _camera_vidcapture: - _camera_vidcapture.init() - list_cameras = _camera_vidcapture.list_cameras - Camera = _camera_vidcapture.Camera - - - - _is_init = 1 - pass - - -def quit(): - global _is_init - _is_init = 0 - pass - - -def _check_init(): - global _is_init - if not _is_init: - raise ValueError("Need to call camera.init() before using.") - -def list_cameras(): - """ - """ - _check_init() - raise NotImplementedError() - - -class Camera: - - def __init__(self, device =0, size = (320, 200), mode = "RGB"): - """ - """ - _check_init() - raise NotImplementedError() - - def set_resolution(self, width, height): - """Sets the capture resolution. (without dialog) - """ - pass - - def start(self): - """ - """ - - def stop(self): - """ - """ - - def get_buffer(self): - """ - """ - - def set_controls(self, **kwargs): - """ - """ - - def get_image(self, dest_surf = None): - """ - """ - - def get_surface(self, dest_surf = None): - """ - """ - - - -if __name__ == "__main__": - - # try and use this camera stuff with the pygame camera example. - import pygame.examples.camera - - #pygame.camera.Camera = Camera - #pygame.camera.list_cameras = list_cameras - pygame.examples.camera.main() - - - diff --git a/venv/lib/python3.7/site-packages/pygame/cdrom.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/cdrom.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b24e17f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/cdrom.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/color.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/color.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 135fe84..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/color.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/colordict.py b/venv/lib/python3.7/site-packages/pygame/colordict.py deleted file mode 100644 index 52b1166..0000000 --- a/venv/lib/python3.7/site-packages/pygame/colordict.py +++ /dev/null @@ -1,684 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2000-2003 Pete Shinners -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Pete Shinners -## pete@shinners.org - -from pygame.compat import unicode_ - -THECOLORS = { -'gray17' : (43, 43, 43, 255) , -'gold' : (255, 215, 0, 255) , -'gray10' : (26, 26, 26, 255) , -'yellow' : (255, 255, 0, 255) , -'gray11' : (28, 28, 28, 255) , -'grey61' : (156, 156, 156, 255) , -'grey60' : (153, 153, 153, 255) , -'darkseagreen' : (143, 188, 143, 255) , -'grey62' : (158, 158, 158, 255) , -'grey65' : (166, 166, 166, 255) , -'gray12' : (31, 31, 31, 255) , -'grey67' : (171, 171, 171, 255) , -'grey66' : (168, 168, 168, 255) , -'grey69' : (176, 176, 176, 255) , -'gray21' : (54, 54, 54, 255) , -'lightsalmon4' : (139, 87, 66, 255) , -'lightsalmon2' : (238, 149, 114, 255) , -'lightsalmon3' : (205, 129, 98, 255) , -'lightsalmon1' : (255, 160, 122, 255) , -'gray32' : (82, 82, 82, 255) , -'green4' : (0, 139, 0, 255) , -'gray30' : (77, 77, 77, 255) , -'gray31' : (79, 79, 79, 255) , -'green1' : (0, 255, 0, 255) , -'gray37' : (94, 94, 94, 255) , -'green3' : (0, 205, 0, 255) , -'green2' : (0, 238, 0, 255) , -'darkslategray1' : (151, 255, 255, 255) , -'darkslategray2' : (141, 238, 238, 255) , -'darkslategray3' : (121, 205, 205, 255) , -'aquamarine1' : (127, 255, 212, 255) , -'aquamarine3' : (102, 205, 170, 255) , -'aquamarine2' : (118, 238, 198, 255) , -'papayawhip' : (255, 239, 213, 255) , -'black' : (0, 0, 0, 255) , -'darkorange3' : (205, 102, 0, 255) , -'oldlace' : (253, 245, 230, 255) , -'lightgoldenrod4' : (139, 129, 76, 255) , -'gray90' : (229, 229, 229, 255) , -'orchid1' : (255, 131, 250, 255) , -'orchid2' : (238, 122, 233, 255) , -'orchid3' : (205, 105, 201, 255) , -'grey68' : (173, 173, 173, 255) , -'brown' : (165, 42, 42, 255) , -'purple2' : (145, 44, 238, 255) , -'gray80' : (204, 204, 204, 255) , -'antiquewhite3' : (205, 192, 176, 255) , -'antiquewhite2' : (238, 223, 204, 255) , -'antiquewhite1' : (255, 239, 219, 255) , -'palevioletred3' : (205, 104, 137, 255) , -'hotpink' : (255, 105, 180, 255) , -'lightcyan' : (224, 255, 255, 255) , -'coral3' : (205, 91, 69, 255) , -'gray8' : (20, 20, 20, 255) , -'gray9' : (23, 23, 23, 255) , -'grey32' : (82, 82, 82, 255) , -'bisque4' : (139, 125, 107, 255) , -'cyan' : (0, 255, 255, 255) , -'gray0' : (0, 0, 0, 255) , -'gray1' : (3, 3, 3, 255) , -'gray6' : (15, 15, 15, 255) , -'bisque1' : (255, 228, 196, 255) , -'bisque2' : (238, 213, 183, 255) , -'bisque3' : (205, 183, 158, 255) , -'skyblue' : (135, 206, 235, 255) , -'gray' : (190, 190, 190, 255) , -'darkturquoise' : (0, 206, 209, 255) , -'rosybrown4' : (139, 105, 105, 255) , -'deepskyblue3' : (0, 154, 205, 255) , -'grey63' : (161, 161, 161, 255) , -'indianred1' : (255, 106, 106, 255) , -'grey78' : (199, 199, 199, 255) , -'lightpink' : (255, 182, 193, 255) , -'gray88' : (224, 224, 224, 255) , -'gray22' : (56, 56, 56, 255) , -'red' : (255, 0, 0, 255) , -'grey11' : (28, 28, 28, 255) , -'lemonchiffon3' : (205, 201, 165, 255) , -'lemonchiffon2' : (238, 233, 191, 255) , -'lemonchiffon1' : (255, 250, 205, 255) , -'indianred3' : (205, 85, 85, 255) , -'violetred1' : (255, 62, 150, 255) , -'plum2' : (238, 174, 238, 255) , -'plum1' : (255, 187, 255, 255) , -'lemonchiffon4' : (139, 137, 112, 255) , -'gray99' : (252, 252, 252, 255) , -'grey13' : (33, 33, 33, 255) , -'grey55' : (140, 140, 140, 255) , -'darkcyan' : (0, 139, 139, 255) , -'chocolate4' : (139, 69, 19, 255) , -'lightgoldenrodyellow' : (250, 250, 210, 255) , -'gray54' : (138, 138, 138, 255) , -'lavender' : (230, 230, 250, 255) , -'chartreuse3' : (102, 205, 0, 255) , -'chartreuse2' : (118, 238, 0, 255) , -'chartreuse1' : (127, 255, 0, 255) , -'grey48' : (122, 122, 122, 255) , -'grey16' : (41, 41, 41, 255) , -'thistle' : (216, 191, 216, 255) , -'chartreuse4' : (69, 139, 0, 255) , -'darkorchid4' : (104, 34, 139, 255) , -'grey42' : (107, 107, 107, 255) , -'grey41' : (105, 105, 105, 255) , -'grey17' : (43, 43, 43, 255) , -'dimgrey' : (105, 105, 105, 255) , -'dodgerblue4' : (16, 78, 139, 255) , -'darkorchid2' : (178, 58, 238, 255) , -'darkorchid3' : (154, 50, 205, 255) , -'blue' : (0, 0, 255, 255) , -'rosybrown2' : (238, 180, 180, 255) , -'honeydew' : (240, 255, 240, 255) , -'gray18' : (46, 46, 46, 255) , -'cornflowerblue' : (100, 149, 237, 255) , -'grey91' : (232, 232, 232, 255) , -'gray14' : (36, 36, 36, 255) , -'gray15' : (38, 38, 38, 255) , -'gray16' : (41, 41, 41, 255) , -'maroon4' : (139, 28, 98, 255) , -'maroon3' : (205, 41, 144, 255) , -'maroon2' : (238, 48, 167, 255) , -'maroon1' : (255, 52, 179, 255) , -'gray13' : (33, 33, 33, 255) , -'gold3' : (205, 173, 0, 255) , -'gold2' : (238, 201, 0, 255) , -'gold1' : (255, 215, 0, 255) , -'grey79' : (201, 201, 201, 255) , -'palevioletred1' : (255, 130, 171, 255) , -'palevioletred2' : (238, 121, 159, 255) , -'gold4' : (139, 117, 0, 255) , -'gray41' : (105, 105, 105, 255) , -'gray84' : (214, 214, 214, 255) , -'mediumpurple' : (147, 112, 219, 255) , -'rosybrown1' : (255, 193, 193, 255) , -'lightblue2' : (178, 223, 238, 255) , -'lightblue3' : (154, 192, 205, 255) , -'grey57' : (145, 145, 145, 255) , -'lightblue1' : (191, 239, 255, 255) , -'lightblue4' : (104, 131, 139, 255) , -'gray33' : (84, 84, 84, 255) , -'skyblue4' : (74, 112, 139, 255) , -'grey97' : (247, 247, 247, 255) , -'skyblue1' : (135, 206, 255, 255) , -'gray27' : (69, 69, 69, 255) , -'skyblue3' : (108, 166, 205, 255) , -'skyblue2' : (126, 192, 238, 255) , -'lavenderblush1' : (255, 240, 245, 255) , -'darkgrey' : (169, 169, 169, 255) , -'lavenderblush3' : (205, 193, 197, 255) , -'darkslategrey' : (47, 79, 79, 255) , -'lavenderblush4' : (139, 131, 134, 255) , -'deeppink4' : (139, 10, 80, 255) , -'grey99' : (252, 252, 252, 255) , -'gray36' : (92, 92, 92, 255) , -'coral4' : (139, 62, 47, 255) , -'magenta3' : (205, 0, 205, 255) , -'lightskyblue4' : (96, 123, 139, 255) , -'mediumturquoise' : (72, 209, 204, 255) , -'gray34' : (87, 87, 87, 255) , -'floralwhite' : (255, 250, 240, 255) , -'grey39' : (99, 99, 99, 255) , -'grey36' : (92, 92, 92, 255) , -'grey37' : (94, 94, 94, 255) , -'grey34' : (87, 87, 87, 255) , -'gray26' : (66, 66, 66, 255) , -'royalblue2' : (67, 110, 238, 255) , -'grey33' : (84, 84, 84, 255) , -'turquoise1' : (0, 245, 255, 255) , -'grey31' : (79, 79, 79, 255) , -'steelblue1' : (99, 184, 255, 255) , -'sienna4' : (139, 71, 38, 255) , -'steelblue3' : (79, 148, 205, 255) , -'lavenderblush2' : (238, 224, 229, 255) , -'sienna1' : (255, 130, 71, 255) , -'steelblue4' : (54, 100, 139, 255) , -'sienna3' : (205, 104, 57, 255) , -'aquamarine4' : (69, 139, 116, 255) , -'lightyellow1' : (255, 255, 224, 255) , -'lightyellow2' : (238, 238, 209, 255) , -'lightsteelblue' : (176, 196, 222, 255) , -'lightyellow4' : (139, 139, 122, 255) , -'magenta2' : (238, 0, 238, 255) , -'lightskyblue1' : (176, 226, 255, 255) , -'lightgoldenrod' : (238, 221, 130, 255) , -'magenta4' : (139, 0, 139, 255) , -'gray87' : (222, 222, 222, 255) , -'greenyellow' : (173, 255, 47, 255) , -'navajowhite4' : (139, 121, 94, 255) , -'darkslategray4' : (82, 139, 139, 255) , -'olivedrab' : (107, 142, 35, 255) , -'navajowhite1' : (255, 222, 173, 255) , -'navajowhite2' : (238, 207, 161, 255) , -'darkgoldenrod1' : (255, 185, 15, 255) , -'sienna' : (160, 82, 45, 255) , -'blue1' : (0, 0, 255, 255) , -'yellow1' : (255, 255, 0, 255) , -'gray61' : (156, 156, 156, 255) , -'magenta1' : (255, 0, 255, 255) , -'grey52' : (133, 133, 133, 255) , -'orangered4' : (139, 37, 0, 255) , -'palegreen' : (152, 251, 152, 255) , -'gray86' : (219, 219, 219, 255) , -'grey80' : (204, 204, 204, 255) , -'seashell' : (255, 245, 238, 255) , -'royalblue' : (65, 105, 225, 255) , -'firebrick3' : (205, 38, 38, 255) , -'blue4' : (0, 0, 139, 255) , -'peru' : (205, 133, 63, 255) , -'gray60' : (153, 153, 153, 255) , -'aquamarine' : (127, 255, 212, 255) , -'grey53' : (135, 135, 135, 255) , -'tan4' : (139, 90, 43, 255) , -'darkgoldenrod' : (184, 134, 11, 255) , -'tan2' : (238, 154, 73, 255) , -'tan1' : (255, 165, 79, 255) , -'darkslategray' : (47, 79, 79, 255) , -'royalblue3' : (58, 95, 205, 255) , -'red2' : (238, 0, 0, 255) , -'red1' : (255, 0, 0, 255) , -'dodgerblue' : (30, 144, 255, 255) , -'violetred4' : (139, 34, 82, 255) , -'lightyellow' : (255, 255, 224, 255) , -'paleturquoise1' : (187, 255, 255, 255) , -'firebrick2' : (238, 44, 44, 255) , -'mediumaquamarine' : (102, 205, 170, 255) , -'lemonchiffon' : (255, 250, 205, 255) , -'chocolate' : (210, 105, 30, 255) , -'orchid4' : (139, 71, 137, 255) , -'maroon' : (176, 48, 96, 255) , -'gray38' : (97, 97, 97, 255) , -'darkorange4' : (139, 69, 0, 255) , -'mintcream' : (245, 255, 250, 255) , -'darkorange1' : (255, 127, 0, 255) , -'antiquewhite' : (250, 235, 215, 255) , -'darkorange2' : (238, 118, 0, 255) , -'grey18' : (46, 46, 46, 255) , -'grey19' : (48, 48, 48, 255) , -'grey38' : (97, 97, 97, 255) , -'moccasin' : (255, 228, 181, 255) , -'grey10' : (26, 26, 26, 255) , -'chocolate1' : (255, 127, 36, 255) , -'chocolate2' : (238, 118, 33, 255) , -'chocolate3' : (205, 102, 29, 255) , -'saddlebrown' : (139, 69, 19, 255) , -'grey15' : (38, 38, 38, 255) , -'darkslateblue' : (72, 61, 139, 255) , -'lightskyblue' : (135, 206, 250, 255) , -'gray69' : (176, 176, 176, 255) , -'gray68' : (173, 173, 173, 255) , -'deeppink' : (255, 20, 147, 255) , -'gray65' : (166, 166, 166, 255) , -'gray64' : (163, 163, 163, 255) , -'gray67' : (171, 171, 171, 255) , -'gray66' : (168, 168, 168, 255) , -'gray25' : (64, 64, 64, 255) , -'coral' : (255, 127, 80, 255) , -'gray63' : (161, 161, 161, 255) , -'gray62' : (158, 158, 158, 255) , -'goldenrod4' : (139, 105, 20, 255) , -'grey35' : (89, 89, 89, 255) , -'gray89' : (227, 227, 227, 255) , -'goldenrod1' : (255, 193, 37, 255) , -'goldenrod2' : (238, 180, 34, 255) , -'goldenrod3' : (205, 155, 29, 255) , -'springgreen1' : (0, 255, 127, 255) , -'springgreen2' : (0, 238, 118, 255) , -'springgreen3' : (0, 205, 102, 255) , -'springgreen4' : (0, 139, 69, 255) , -'mistyrose1' : (255, 228, 225, 255) , -'sandybrown' : (244, 164, 96, 255) , -'grey30' : (77, 77, 77, 255) , -'seashell2' : (238, 229, 222, 255) , -'seashell3' : (205, 197, 191, 255) , -'tan' : (210, 180, 140, 255) , -'seashell1' : (255, 245, 238, 255) , -'mistyrose3' : (205, 183, 181, 255) , -'magenta' : (255, 0, 255, 255) , -'pink' : (255, 192, 203, 255) , -'ivory2' : (238, 238, 224, 255) , -'ivory1' : (255, 255, 240, 255) , -'lightcyan2' : (209, 238, 238, 255) , -'mediumseagreen' : (60, 179, 113, 255) , -'ivory4' : (139, 139, 131, 255) , -'darkorange' : (255, 140, 0, 255) , -'powderblue' : (176, 224, 230, 255) , -'dodgerblue1' : (30, 144, 255, 255) , -'gray95' : (242, 242, 242, 255) , -'firebrick1' : (255, 48, 48, 255) , -'gray7' : (18, 18, 18, 255) , -'mistyrose4' : (139, 125, 123, 255) , -'tomato' : (255, 99, 71, 255) , -'indianred2' : (238, 99, 99, 255) , -'steelblue2' : (92, 172, 238, 255) , -'gray100' : (255, 255, 255, 255) , -'seashell4' : (139, 134, 130, 255) , -'grey89' : (227, 227, 227, 255) , -'grey88' : (224, 224, 224, 255) , -'grey87' : (222, 222, 222, 255) , -'grey86' : (219, 219, 219, 255) , -'grey85' : (217, 217, 217, 255) , -'grey84' : (214, 214, 214, 255) , -'midnightblue' : (25, 25, 112, 255) , -'grey82' : (209, 209, 209, 255) , -'grey81' : (207, 207, 207, 255) , -'yellow3' : (205, 205, 0, 255) , -'ivory3' : (205, 205, 193, 255) , -'grey22' : (56, 56, 56, 255) , -'gray85' : (217, 217, 217, 255) , -'violetred3' : (205, 50, 120, 255) , -'dodgerblue2' : (28, 134, 238, 255) , -'gray42' : (107, 107, 107, 255) , -'sienna2' : (238, 121, 66, 255) , -'grey72' : (184, 184, 184, 255) , -'grey73' : (186, 186, 186, 255) , -'grey70' : (179, 179, 179, 255) , -'palevioletred' : (219, 112, 147, 255) , -'lightslategray' : (119, 136, 153, 255) , -'grey77' : (196, 196, 196, 255) , -'grey74' : (189, 189, 189, 255) , -'slategray1' : (198, 226, 255, 255) , -'pink1' : (255, 181, 197, 255) , -'mediumpurple1' : (171, 130, 255, 255) , -'pink3' : (205, 145, 158, 255) , -'antiquewhite4' : (139, 131, 120, 255) , -'lightpink1' : (255, 174, 185, 255) , -'honeydew2' : (224, 238, 224, 255) , -'khaki4' : (139, 134, 78, 255) , -'darkolivegreen4' : (110, 139, 61, 255) , -'gray45' : (115, 115, 115, 255) , -'slategray3' : (159, 182, 205, 255) , -'darkolivegreen1' : (202, 255, 112, 255) , -'khaki1' : (255, 246, 143, 255) , -'khaki2' : (238, 230, 133, 255) , -'khaki3' : (205, 198, 115, 255) , -'lavenderblush' : (255, 240, 245, 255) , -'honeydew4' : (131, 139, 131, 255) , -'salmon3' : (205, 112, 84, 255) , -'salmon2' : (238, 130, 98, 255) , -'gray92' : (235, 235, 235, 255) , -'salmon4' : (139, 76, 57, 255) , -'gray49' : (125, 125, 125, 255) , -'gray48' : (122, 122, 122, 255) , -'linen' : (250, 240, 230, 255) , -'burlywood1' : (255, 211, 155, 255) , -'green' : (0, 255, 0, 255) , -'gray47' : (120, 120, 120, 255) , -'blueviolet' : (138, 43, 226, 255) , -'brown2' : (238, 59, 59, 255) , -'brown3' : (205, 51, 51, 255) , -'peachpuff' : (255, 218, 185, 255) , -'brown4' : (139, 35, 35, 255) , -'firebrick4' : (139, 26, 26, 255) , -'azure1' : (240, 255, 255, 255) , -'azure3' : (193, 205, 205, 255) , -'azure2' : (224, 238, 238, 255) , -'azure4' : (131, 139, 139, 255) , -'tomato4' : (139, 54, 38, 255) , -'orange4' : (139, 90, 0, 255) , -'firebrick' : (178, 34, 34, 255) , -'indianred' : (205, 92, 92, 255) , -'orange1' : (255, 165, 0, 255) , -'orange3' : (205, 133, 0, 255) , -'orange2' : (238, 154, 0, 255) , -'darkolivegreen' : (85, 107, 47, 255) , -'gray2' : (5, 5, 5, 255) , -'slategrey' : (112, 128, 144, 255) , -'gray81' : (207, 207, 207, 255) , -'darkred' : (139, 0, 0, 255) , -'gray3' : (8, 8, 8, 255) , -'lightsteelblue1' : (202, 225, 255, 255) , -'lightsteelblue2' : (188, 210, 238, 255) , -'lightsteelblue3' : (162, 181, 205, 255) , -'lightsteelblue4' : (110, 123, 139, 255) , -'tomato3' : (205, 79, 57, 255) , -'gray43' : (110, 110, 110, 255) , -'darkgoldenrod4' : (139, 101, 8, 255) , -'grey50' : (127, 127, 127, 255) , -'yellow4' : (139, 139, 0, 255) , -'mediumorchid' : (186, 85, 211, 255) , -'yellow2' : (238, 238, 0, 255) , -'darkgoldenrod2' : (238, 173, 14, 255) , -'darkgoldenrod3' : (205, 149, 12, 255) , -'chartreuse' : (127, 255, 0, 255) , -'mediumblue' : (0, 0, 205, 255) , -'gray4' : (10, 10, 10, 255) , -'springgreen' : (0, 255, 127, 255) , -'orange' : (255, 165, 0, 255) , -'gray5' : (13, 13, 13, 255) , -'lightsalmon' : (255, 160, 122, 255) , -'gray19' : (48, 48, 48, 255) , -'turquoise' : (64, 224, 208, 255) , -'lightseagreen' : (32, 178, 170, 255) , -'grey8' : (20, 20, 20, 255) , -'grey9' : (23, 23, 23, 255) , -'grey6' : (15, 15, 15, 255) , -'grey7' : (18, 18, 18, 255) , -'grey4' : (10, 10, 10, 255) , -'grey5' : (13, 13, 13, 255) , -'grey2' : (5, 5, 5, 255) , -'grey3' : (8, 8, 8, 255) , -'grey0' : (0, 0, 0, 255) , -'grey1' : (3, 3, 3, 255) , -'gray50' : (127, 127, 127, 255) , -'goldenrod' : (218, 165, 32, 255) , -'grey58' : (148, 148, 148, 255) , -'grey59' : (150, 150, 150, 255) , -'gray51' : (130, 130, 130, 255) , -'grey54' : (138, 138, 138, 255) , -'mediumorchid4' : (122, 55, 139, 255) , -'grey56' : (143, 143, 143, 255) , -'navajowhite3' : (205, 179, 139, 255) , -'mediumorchid1' : (224, 102, 255, 255) , -'grey51' : (130, 130, 130, 255) , -'mediumorchid3' : (180, 82, 205, 255) , -'mediumorchid2' : (209, 95, 238, 255) , -'cyan2' : (0, 238, 238, 255) , -'cyan3' : (0, 205, 205, 255) , -'gray23' : (59, 59, 59, 255) , -'cyan1' : (0, 255, 255, 255) , -'darkgreen' : (0, 100, 0, 255) , -'gray24' : (61, 61, 61, 255) , -'cyan4' : (0, 139, 139, 255) , -'darkviolet' : (148, 0, 211, 255) , -'peachpuff4' : (139, 119, 101, 255) , -'gray28' : (71, 71, 71, 255) , -'slateblue4' : (71, 60, 139, 255) , -'slateblue3' : (105, 89, 205, 255) , -'peachpuff1' : (255, 218, 185, 255) , -'peachpuff2' : (238, 203, 173, 255) , -'peachpuff3' : (205, 175, 149, 255) , -'gray29' : (74, 74, 74, 255) , -'paleturquoise' : (175, 238, 238, 255) , -'darkgray' : (169, 169, 169, 255) , -'grey25' : (64, 64, 64, 255) , -'darkmagenta' : (139, 0, 139, 255) , -'palegoldenrod' : (238, 232, 170, 255) , -'grey64' : (163, 163, 163, 255) , -'grey12' : (31, 31, 31, 255) , -'deeppink3' : (205, 16, 118, 255) , -'gray79' : (201, 201, 201, 255) , -'gray83' : (212, 212, 212, 255) , -'deeppink2' : (238, 18, 137, 255) , -'burlywood4' : (139, 115, 85, 255) , -'palevioletred4' : (139, 71, 93, 255) , -'deeppink1' : (255, 20, 147, 255) , -'slateblue2' : (122, 103, 238, 255) , -'grey46' : (117, 117, 117, 255) , -'royalblue4' : (39, 64, 139, 255) , -'yellowgreen' : (154, 205, 50, 255) , -'royalblue1' : (72, 118, 255, 255) , -'slateblue1' : (131, 111, 255, 255) , -'lightgoldenrod3' : (205, 190, 112, 255) , -'lightgoldenrod2' : (238, 220, 130, 255) , -'navy' : (0, 0, 128, 255) , -'orchid' : (218, 112, 214, 255) , -'ghostwhite' : (248, 248, 255, 255) , -'purple' : (160, 32, 240, 255) , -'darkkhaki' : (189, 183, 107, 255) , -'grey45' : (115, 115, 115, 255) , -'gray94' : (240, 240, 240, 255) , -'wheat4' : (139, 126, 102, 255) , -'gray96' : (245, 245, 245, 255) , -'gray97' : (247, 247, 247, 255) , -'wheat1' : (255, 231, 186, 255) , -'gray91' : (232, 232, 232, 255) , -'wheat3' : (205, 186, 150, 255) , -'wheat2' : (238, 216, 174, 255) , -'indianred4' : (139, 58, 58, 255) , -'coral2' : (238, 106, 80, 255) , -'coral1' : (255, 114, 86, 255) , -'violetred' : (208, 32, 144, 255) , -'rosybrown3' : (205, 155, 155, 255) , -'deepskyblue2' : (0, 178, 238, 255) , -'deepskyblue1' : (0, 191, 255, 255) , -'bisque' : (255, 228, 196, 255) , -'grey49' : (125, 125, 125, 255) , -'khaki' : (240, 230, 140, 255) , -'wheat' : (245, 222, 179, 255) , -'lightslateblue' : (132, 112, 255, 255) , -'mediumpurple3' : (137, 104, 205, 255) , -'gray55' : (140, 140, 140, 255) , -'deepskyblue' : (0, 191, 255, 255) , -'gray98' : (250, 250, 250, 255) , -'steelblue' : (70, 130, 180, 255) , -'aliceblue' : (240, 248, 255, 255) , -'lightskyblue2' : (164, 211, 238, 255) , -'lightskyblue3' : (141, 182, 205, 255) , -'lightslategrey' : (119, 136, 153, 255) , -'blue3' : (0, 0, 205, 255) , -'blue2' : (0, 0, 238, 255) , -'gainsboro' : (220, 220, 220, 255) , -'grey76' : (194, 194, 194, 255) , -'purple3' : (125, 38, 205, 255) , -'plum4' : (139, 102, 139, 255) , -'gray56' : (143, 143, 143, 255) , -'plum3' : (205, 150, 205, 255) , -'plum' : (221, 160, 221, 255) , -'lightgrey' : (211, 211, 211, 255) , -'mediumslateblue' : (123, 104, 238, 255) , -'mistyrose' : (255, 228, 225, 255) , -'lightcyan1' : (224, 255, 255, 255) , -'grey71' : (181, 181, 181, 255) , -'darksalmon' : (233, 150, 122, 255) , -'beige' : (245, 245, 220, 255) , -'grey24' : (61, 61, 61, 255) , -'azure' : (240, 255, 255, 255) , -'honeydew1' : (240, 255, 240, 255) , -'slategray2' : (185, 211, 238, 255) , -'dodgerblue3' : (24, 116, 205, 255) , -'slategray4' : (108, 123, 139, 255) , -'grey27' : (69, 69, 69, 255) , -'lightcyan3' : (180, 205, 205, 255) , -'cornsilk' : (255, 248, 220, 255) , -'tomato1' : (255, 99, 71, 255) , -'gray57' : (145, 145, 145, 255) , -'mediumvioletred' : (199, 21, 133, 255) , -'tomato2' : (238, 92, 66, 255) , -'snow4' : (139, 137, 137, 255) , -'grey75' : (191, 191, 191, 255) , -'snow2' : (238, 233, 233, 255) , -'snow3' : (205, 201, 201, 255) , -'snow1' : (255, 250, 250, 255) , -'grey23' : (59, 59, 59, 255) , -'cornsilk3' : (205, 200, 177, 255) , -'lightcoral' : (240, 128, 128, 255) , -'orangered' : (255, 69, 0, 255) , -'navajowhite' : (255, 222, 173, 255) , -'mediumpurple2' : (159, 121, 238, 255) , -'slategray' : (112, 128, 144, 255) , -'pink2' : (238, 169, 184, 255) , -'grey29' : (74, 74, 74, 255) , -'grey28' : (71, 71, 71, 255) , -'gray82' : (209, 209, 209, 255) , -'burlywood' : (222, 184, 135, 255) , -'mediumpurple4' : (93, 71, 139, 255) , -'mediumspringgreen' : (0, 250, 154, 255) , -'grey26' : (66, 66, 66, 255) , -'grey21' : (54, 54, 54, 255) , -'grey20' : (51, 51, 51, 255) , -'blanchedalmond' : (255, 235, 205, 255) , -'pink4' : (139, 99, 108, 255) , -'gray78' : (199, 199, 199, 255) , -'tan3' : (205, 133, 63, 255) , -'gray76' : (194, 194, 194, 255) , -'gray77' : (196, 196, 196, 255) , -'white' : (255, 255, 255, 255) , -'gray75' : (191, 191, 191, 255) , -'gray72' : (184, 184, 184, 255) , -'gray73' : (186, 186, 186, 255) , -'gray70' : (179, 179, 179, 255) , -'gray71' : (181, 181, 181, 255) , -'lightgray' : (211, 211, 211, 255) , -'ivory' : (255, 255, 240, 255) , -'gray46' : (117, 117, 117, 255) , -'gray74' : (189, 189, 189, 255) , -'lightyellow3' : (205, 205, 180, 255) , -'lightpink2' : (238, 162, 173, 255) , -'lightpink3' : (205, 140, 149, 255) , -'paleturquoise4' : (102, 139, 139, 255) , -'lightpink4' : (139, 95, 101, 255) , -'paleturquoise3' : (150, 205, 205, 255) , -'seagreen4' : (46, 139, 87, 255) , -'seagreen3' : (67, 205, 128, 255) , -'seagreen2' : (78, 238, 148, 255) , -'seagreen1' : (84, 255, 159, 255) , -'paleturquoise2' : (174, 238, 238, 255) , -'gray52' : (133, 133, 133, 255) , -'cornsilk4' : (139, 136, 120, 255) , -'cornsilk2' : (238, 232, 205, 255) , -'darkolivegreen3' : (162, 205, 90, 255) , -'cornsilk1' : (255, 248, 220, 255) , -'limegreen' : (50, 205, 50, 255) , -'darkolivegreen2' : (188, 238, 104, 255) , -'grey' : (190, 190, 190, 255) , -'violetred2' : (238, 58, 140, 255) , -'salmon1' : (255, 140, 105, 255) , -'grey92' : (235, 235, 235, 255) , -'grey93' : (237, 237, 237, 255) , -'grey94' : (240, 240, 240, 255) , -'grey95' : (242, 242, 242, 255) , -'grey96' : (245, 245, 245, 255) , -'grey83' : (212, 212, 212, 255) , -'grey98' : (250, 250, 250, 255) , -'lightgoldenrod1' : (255, 236, 139, 255) , -'palegreen1' : (154, 255, 154, 255) , -'red3' : (205, 0, 0, 255) , -'palegreen3' : (124, 205, 124, 255) , -'palegreen2' : (144, 238, 144, 255) , -'palegreen4' : (84, 139, 84, 255) , -'cadetblue' : (95, 158, 160, 255) , -'violet' : (238, 130, 238, 255) , -'mistyrose2' : (238, 213, 210, 255) , -'slateblue' : (106, 90, 205, 255) , -'grey43' : (110, 110, 110, 255) , -'grey90' : (229, 229, 229, 255) , -'gray35' : (89, 89, 89, 255) , -'turquoise3' : (0, 197, 205, 255) , -'turquoise2' : (0, 229, 238, 255) , -'burlywood3' : (205, 170, 125, 255) , -'burlywood2' : (238, 197, 145, 255) , -'lightcyan4' : (122, 139, 139, 255) , -'rosybrown' : (188, 143, 143, 255) , -'turquoise4' : (0, 134, 139, 255) , -'whitesmoke' : (245, 245, 245, 255) , -'lightblue' : (173, 216, 230, 255) , -'grey40' : (102, 102, 102, 255) , -'gray40' : (102, 102, 102, 255) , -'honeydew3' : (193, 205, 193, 255) , -'dimgray' : (105, 105, 105, 255) , -'grey47' : (120, 120, 120, 255) , -'seagreen' : (46, 139, 87, 255) , -'red4' : (139, 0, 0, 255) , -'grey14' : (36, 36, 36, 255) , -'snow' : (255, 250, 250, 255) , -'darkorchid1' : (191, 62, 255, 255) , -'gray58' : (148, 148, 148, 255) , -'gray59' : (150, 150, 150, 255) , -'cadetblue4' : (83, 134, 139, 255) , -'cadetblue3' : (122, 197, 205, 255) , -'cadetblue2' : (142, 229, 238, 255) , -'cadetblue1' : (152, 245, 255, 255) , -'olivedrab4' : (105, 139, 34, 255) , -'purple4' : (85, 26, 139, 255) , -'gray20' : (51, 51, 51, 255) , -'grey44' : (112, 112, 112, 255) , -'purple1' : (155, 48, 255, 255) , -'olivedrab1' : (192, 255, 62, 255) , -'olivedrab2' : (179, 238, 58, 255) , -'olivedrab3' : (154, 205, 50, 255) , -'orangered3' : (205, 55, 0, 255) , -'orangered2' : (238, 64, 0, 255) , -'orangered1' : (255, 69, 0, 255) , -'darkorchid' : (153, 50, 204, 255) , -'thistle3' : (205, 181, 205, 255) , -'thistle2' : (238, 210, 238, 255) , -'thistle1' : (255, 225, 255, 255) , -'salmon' : (250, 128, 114, 255) , -'gray93' : (237, 237, 237, 255) , -'thistle4' : (139, 123, 139, 255) , -'gray39' : (99, 99, 99, 255) , -'lawngreen' : (124, 252, 0, 255) , -'hotpink3' : (205, 96, 144, 255) , -'hotpink2' : (238, 106, 167, 255) , -'hotpink1' : (255, 110, 180, 255) , -'lightgreen' : (144, 238, 144, 255) , -'hotpink4' : (139, 58, 98, 255) , -'darkseagreen4' : (105, 139, 105, 255) , -'darkseagreen3' : (155, 205, 155, 255) , -'darkseagreen2' : (180, 238, 180, 255) , -'darkseagreen1' : (193, 255, 193, 255) , -'deepskyblue4' : (0, 104, 139, 255) , -'gray44' : (112, 112, 112, 255) , -'navyblue' : (0, 0, 128, 255) , -'darkblue' : (0, 0, 139, 255) , -'forestgreen' : (34, 139, 34, 255) , -'gray53' : (135, 135, 135, 255) , -'grey100' : (255, 255, 255, 255) , -'brown1' : (255, 64, 64, 255) , -} - -for k,v in THECOLORS.items(): - THECOLORS[unicode_(k)] = v diff --git a/venv/lib/python3.7/site-packages/pygame/compat.py b/venv/lib/python3.7/site-packages/pygame/compat.py deleted file mode 100644 index 58e5c54..0000000 --- a/venv/lib/python3.7/site-packages/pygame/compat.py +++ /dev/null @@ -1,103 +0,0 @@ -# coding: ascii -"""Python 2.x/3.x compatibility tools""" - -import sys - -__all__ = ['geterror', 'long_', 'xrange_', 'ord_', 'unichr_', - 'unicode_', 'raw_input_', 'as_bytes', 'as_unicode', - 'bytes_', 'imap_', 'PY_MAJOR_VERSION'] - -PY_MAJOR_VERSION = sys.version_info[0] - - -def geterror(): - return sys.exc_info()[1] - -# Python 3 -if PY_MAJOR_VERSION >= 3: - long_ = int - xrange_ = range - from io import StringIO - from io import BytesIO - unichr_ = chr - unicode_ = str - bytes_ = bytes - raw_input_ = input - imap_ = map - - # Represent escaped bytes and strings in a portable way. - # - # as_bytes: Allow a Python 3.x string to represent a bytes object. - # e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x - # as_bytes("a\x01\b") == "a\x01b" # Python 2.x - # as_unicode: Allow a Python "r" string to represent a unicode string. - # e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x - # as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x - def as_bytes(string): - """ '<binary literal>' => b'<binary literal>' """ - return string.encode('latin-1', 'strict') - - def as_unicode(rstring): - """ r'<Unicode literal>' => '<Unicode literal>' """ - return rstring.encode('ascii', 'strict').decode('unicode_escape', - 'strict') - -# Python 2 -else: - long_ = long - xrange_ = xrange - from cStringIO import StringIO - BytesIO = StringIO - unichr_ = unichr - unicode_ = unicode - bytes_ = str - raw_input_ = raw_input - from itertools import imap as imap_ - - # Represent escaped bytes and strings in a portable way. - # - # as_bytes: Allow a Python 3.x string to represent a bytes object. - # e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x - # as_bytes("a\x01\b") == "a\x01b" # Python 2.x - # as_unicode: Allow a Python "r" string to represent a unicode string. - # e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x - # as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x - def as_bytes(string): - """ '<binary literal>' => '<binary literal>' """ - return string - - def as_unicode(rstring): - """ r'<Unicode literal>' => u'<Unicode literal>' """ - return rstring.decode('unicode_escape', 'strict') - - -def get_BytesIO(): - return BytesIO - - -def get_StringIO(): - return StringIO - - -def ord_(o): - try: - return ord(o) - except TypeError: - return o - -if sys.platform == 'win32': - filesystem_errors = "replace" -elif PY_MAJOR_VERSION >= 3: - filesystem_errors = "surrogateescape" -else: - filesystem_errors = "strict" - - -def filesystem_encode(u): - fsencoding = sys.getfilesystemencoding() - if fsencoding.lower() in ['ascii', 'ansi_x3.4-1968'] and sys.platform.startswith('linux'): - # Don't believe Linux systems claiming ASCII-only filesystems. In - # practice, arbitrary bytes are allowed, and most things expect UTF-8. - fsencoding = 'utf-8' - return u.encode(fsencoding, filesystem_errors) - diff --git a/venv/lib/python3.7/site-packages/pygame/constants.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/constants.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b1d21f5..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/constants.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/cursors.py b/venv/lib/python3.7/site-packages/pygame/cursors.py deleted file mode 100644 index 944fc73..0000000 --- a/venv/lib/python3.7/site-packages/pygame/cursors.py +++ /dev/null @@ -1,309 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2000-2003 Pete Shinners -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Pete Shinners -## pete@shinners.org - -"""Set of cursor resources available for use. These cursors come -in a sequence of values that are needed as the arguments for -pygame.mouse.set_cursor(). to dereference the sequence in place -and create the cursor in one step, call like this; -pygame.mouse.set_cursor(*pygame.cursors.arrow). - -Here is a list of available cursors; arrow, diamond, ball, - broken_x, tri_left, tri_right - -There is also a sample string cursor named 'thickarrow_strings'. -The compile() function can convert these string cursors into cursor byte data. -""" - -#default pygame black arrow -arrow = ((16, 16), (0, 0), - (0x00,0x00,0x40,0x00,0x60,0x00,0x70,0x00,0x78,0x00,0x7C,0x00,0x7E,0x00,0x7F,0x00, - 0x7F,0x80,0x7C,0x00,0x6C,0x00,0x46,0x00,0x06,0x00,0x03,0x00,0x03,0x00,0x00,0x00), - (0x40,0x00,0xE0,0x00,0xF0,0x00,0xF8,0x00,0xFC,0x00,0xFE,0x00,0xFF,0x00,0xFF,0x80, - 0xFF,0xC0,0xFF,0x80,0xFE,0x00,0xEF,0x00,0x4F,0x00,0x07,0x80,0x07,0x80,0x03,0x00)) - -diamond = ((16, 16), (7, 7), - (0, 0, 1, 0, 3, 128, 7, 192, 14, 224, 28, 112, 56, 56, 112, 28, 56, - 56, 28, 112, 14, 224, 7, 192, 3, 128, 1, 0, 0, 0, 0, 0), - (1, 0, 3, 128, 7, 192, 15, 224, 31, 240, 62, 248, 124, 124, 248, 62, - 124, 124, 62, 248, 31, 240, 15, 224, 7, 192, 3, 128, 1, 0, 0, 0)) - -ball = ((16, 16), (7, 7), - (0, 0, 3, 192, 15, 240, 24, 248, 51, 252, 55, 252, 127, 254, 127, 254, - 127, 254, 127, 254, 63, 252, 63, 252, 31, 248, 15, 240, 3, 192, 0, 0), - (3, 192, 15, 240, 31, 248, 63, 252, 127, 254, 127, 254, 255, 255, 255, - 255, 255, 255, 255, 255, 127, 254, 127, 254, 63, 252, 31, 248, 15, 240, - 3, 192)) - -broken_x = ((16, 16), (7, 7), - (0, 0, 96, 6, 112, 14, 56, 28, 28, 56, 12, 48, 0, 0, 0, 0, 0, 0, 0, 0, - 12, 48, 28, 56, 56, 28, 112, 14, 96, 6, 0, 0), - (224, 7, 240, 15, 248, 31, 124, 62, 62, 124, 30, 120, 14, 112, 0, 0, 0, - 0, 14, 112, 30, 120, 62, 124, 124, 62, 248, 31, 240, 15, 224, 7)) - - -tri_left = ((16, 16), (1, 1), - (0, 0, 96, 0, 120, 0, 62, 0, 63, 128, 31, 224, 31, 248, 15, 254, 15, 254, - 7, 128, 7, 128, 3, 128, 3, 128, 1, 128, 1, 128, 0, 0), - (224, 0, 248, 0, 254, 0, 127, 128, 127, 224, 63, 248, 63, 254, 31, 255, - 31, 255, 15, 254, 15, 192, 7, 192, 7, 192, 3, 192, 3, 192, 1, 128)) - -tri_right = ((16, 16), (14, 1), - (0, 0, 0, 6, 0, 30, 0, 124, 1, 252, 7, 248, 31, 248, 127, 240, 127, 240, - 1, 224, 1, 224, 1, 192, 1, 192, 1, 128, 1, 128, 0, 0), - (0, 7, 0, 31, 0, 127, 1, 254, 7, 254, 31, 252, 127, 252, 255, 248, 255, - 248, 127, 240, 3, 240, 3, 224, 3, 224, 3, 192, 3, 192, 1, 128)) - - - -#here is an example string resource cursor. to use this; -# curs, mask = pygame.cursors.compile_cursor(pygame.cursors.thickarrow_strings, 'X', '.') -# pygame.mouse.set_cursor((24, 24), (0, 0), curs, mask) - -thickarrow_strings = ( #sized 24x24 - "XX ", - "XXX ", - "XXXX ", - "XX.XX ", - "XX..XX ", - "XX...XX ", - "XX....XX ", - "XX.....XX ", - "XX......XX ", - "XX.......XX ", - "XX........XX ", - "XX........XXX ", - "XX......XXXXX ", - "XX.XXX..XX ", - "XXXX XX..XX ", - "XX XX..XX ", - " XX..XX ", - " XX..XX ", - " XX..XX ", - " XXXX ", - " XX ", - " ", - " ", - " ", -) - -sizer_x_strings = ( #sized 24x16 - " X X ", - " XX XX ", - " X.X X.X ", - " X..X X..X ", - " X...XXXXXXXX...X ", - "X................X ", - " X...XXXXXXXX...X ", - " X..X X..X ", - " X.X X.X ", - " XX XX ", - " X X ", - " ", - " ", - " ", - " ", - " ", -) -sizer_y_strings = ( #sized 16x24 - " X ", - " X.X ", - " X...X ", - " X.....X ", - " X.......X ", - "XXXXX.XXXXX ", - " X.X ", - " X.X ", - " X.X ", - " X.X ", - " X.X ", - " X.X ", - " X.X ", - "XXXXX.XXXXX ", - " X.......X ", - " X.....X ", - " X...X ", - " X.X ", - " X ", - " ", - " ", - " ", - " ", - " ", -) -sizer_xy_strings = ( #sized 24x16 - "XXXXXXXX ", - "X.....X ", - "X....X ", - "X...X ", - "X..X.X ", - "X.X X.X ", - "XX X.X X ", - "X X.X XX ", - " X.XX.X ", - " X...X ", - " X...X ", - " X....X ", - " X.....X ", - " XXXXXXXX ", - " ", - " ", -) -textmarker_strings = ( #sized 8x16 - "ooo ooo ", - " o ", - " o ", - " o ", - " o ", - " o ", - " o ", - " o ", - " o ", - " o ", - " o ", - "ooo ooo ", - " ", - " ", - " ", - " ", -) - - - -def compile(strings, black='X', white='.',xor='o'): - """pygame.cursors.compile(strings, black, white,xor) -> data, mask -compile cursor strings into cursor data - -This takes a set of strings with equal length and computes -the binary data for that cursor. The string widths must be -divisible by 8. - -The black and white arguments are single letter strings that -tells which characters will represent black pixels, and which -characters represent white pixels. All other characters are -considered clear. - -This returns a tuple containing the cursor data and cursor mask -data. Both these arguments are used when setting a cursor with -pygame.mouse.set_cursor(). -""" - - #first check for consistent lengths - size = len(strings[0]), len(strings) - if size[0] % 8 or size[1] % 8: - raise ValueError("cursor string sizes must be divisible by 8 %s" % - size) - for s in strings[1:]: - if len(s) != size[0]: - raise ValueError("Cursor strings are inconsistent lengths") - - #create the data arrays. - #this could stand a little optimizing - maskdata = [] - filldata = [] - maskitem = fillitem = 0 - step = 8 - for s in strings: - for c in s: - maskitem = maskitem << 1 - fillitem = fillitem << 1 - step = step - 1 - if c == black: - maskitem = maskitem | 1 - fillitem = fillitem | 1 - elif c == white: - maskitem = maskitem | 1 - elif c == xor: - fillitem = fillitem | 1 - if not step: - maskdata.append(maskitem) - filldata.append(fillitem) - maskitem = fillitem = 0 - step = 8 - return tuple(filldata), tuple(maskdata) - - - - -def load_xbm(curs, mask): - """pygame.cursors.load_xbm(cursorfile, maskfile) -> cursor_args -reads a pair of XBM files into set_cursor arguments - -Arguments can either be filenames or filelike objects -with the readlines method. Not largely tested, but -should work with typical XBM files. -""" - def bitswap(num): - val = 0 - for x in range(8): - b = num&(1<<x) != 0 - val = val<<1 | b - return val - - if type(curs) is type(''): - with open(curs) as cursor_f: - curs = cursor_f.readlines() - else: - curs = curs.readlines() - - if type(mask) is type(''): - with open(mask) as mask_f: - mask = mask_f.readlines() - else: - mask = mask.readlines() - - #avoid comments - for line in range(len(curs)): - if curs[line].startswith("#define"): - curs = curs[line:] - break - for line in range(len(mask)): - if mask[line].startswith("#define"): - mask = mask[line:] - break - #load width,height - width = int(curs[0].split()[-1]) - height = int(curs[1].split()[-1]) - #load hotspot position - if curs[2].startswith('#define'): - hotx = int(curs[2].split()[-1]) - hoty = int(curs[3].split()[-1]) - else: - hotx = hoty = 0 - - info = width, height, hotx, hoty - - for line in range(len(curs)): - if curs[line].startswith('static char') or curs[line].startswith('static unsigned char'): - break - data = ' '.join(curs[line+1:]).replace('};', '').replace(',', ' ') - cursdata = [] - for x in data.split(): - cursdata.append(bitswap(int(x, 16))) - cursdata = tuple(cursdata) - - for line in range(len(mask)): - if mask[line].startswith('static char') or mask[line].startswith('static unsigned char'): - break - data = ' '.join(mask[line+1:]).replace('};', '').replace(',', ' ') - maskdata = [] - for x in data.split(): - maskdata.append(bitswap(int(x, 16))) - maskdata = tuple(maskdata) - return info[:2], info[2:], cursdata, maskdata diff --git a/venv/lib/python3.7/site-packages/pygame/display.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/display.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index e3ea634..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/display.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/docs/__init__.py b/venv/lib/python3.7/site-packages/pygame/docs/__init__.py deleted file mode 100644 index 0d02f17..0000000 --- a/venv/lib/python3.7/site-packages/pygame/docs/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Make docs a package that brings up the main page in a web brower when -# executed. -# -# python -m pygame.docs - -if __name__ == '__main__': - import os - pkg_dir = os.path.dirname(os.path.abspath(__file__)) - main = os.path.join(pkg_dir, '__main__.py') - exec(open(main).read()) - - diff --git a/venv/lib/python3.7/site-packages/pygame/docs/__main__.py b/venv/lib/python3.7/site-packages/pygame/docs/__main__.py deleted file mode 100644 index 5fa3d2d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/docs/__main__.py +++ /dev/null @@ -1,28 +0,0 @@ -# python -m pygame.docs - -import os -import webbrowser -try: - from urllib.parse import urlunparse, quote -except ImportError: - from urlparse import urlunparse - from urllib import quote - -def iterpath(path): - path, last = os.path.split(path) - if last: - for p in iterpath(path): - yield p - yield last - -pkg_dir = os.path.dirname(os.path.abspath(__file__)) -main_page = os.path.join(pkg_dir, 'index.html') -if os.path.exists(main_page): - url_path = quote('/'.join(iterpath(main_page))) - drive, rest = os.path.splitdrive(__file__) - if drive: - url_path = "%s/%s" % (drive, url_path) - url = urlunparse(('file', '', url_path, '', '', '')) -else: - url = "https://www.pygame.org/docs/" -webbrowser.open(url) diff --git a/venv/lib/python3.7/site-packages/pygame/docs/logos.html b/venv/lib/python3.7/site-packages/pygame/docs/logos.html deleted file mode 100644 index 0c08249..0000000 --- a/venv/lib/python3.7/site-packages/pygame/docs/logos.html +++ /dev/null @@ -1,44 +0,0 @@ -<html> <title>Pygame Logos - - - - - -

pygame logos
- -These logos are available for use in your own game projects. -Please put them up wherever you see fit. The logo was created -by TheCorruptor on July 29, 2001. - - -
- -

-There is a higher resolution layered photoshop image -available here. -(1.3 MB)

- -
-
- pygame_logo.gif - 676 x 200
-

-
- pygame_small.gif - 338 x 100
-

-
- pygame_tiny.gif - 200 x 60
-
- -

-
-pygame_powered.gif - 250 x 100
-


 
- - - - - - - diff --git a/venv/lib/python3.7/site-packages/pygame/docs/pygame_logo.gif b/venv/lib/python3.7/site-packages/pygame/docs/pygame_logo.gif deleted file mode 100644 index 63d2e77..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/docs/pygame_logo.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/docs/pygame_powered.gif b/venv/lib/python3.7/site-packages/pygame/docs/pygame_powered.gif deleted file mode 100644 index 5a2bb5f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/docs/pygame_powered.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/docs/pygame_small.gif b/venv/lib/python3.7/site-packages/pygame/docs/pygame_small.gif deleted file mode 100644 index 4916dbf..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/docs/pygame_small.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/docs/pygame_tiny.gif b/venv/lib/python3.7/site-packages/pygame/docs/pygame_tiny.gif deleted file mode 100644 index f9aa517..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/docs/pygame_tiny.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/docs/ref/docscomments.json b/venv/lib/python3.7/site-packages/pygame/docs/ref/docscomments.json deleted file mode 100644 index 09a646a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/docs/ref/docscomments.json +++ /dev/null @@ -1 +0,0 @@ -[{"content": "How To Get ALL The Mouse Clicks.\n\nTook me hours to figure this out. Also note that button 1 and 3 pressed at the same time shows up as button 2, at least on my ubuntu computer.\n\ne=pygame.event.wait()\nif e.type == MOUSEBUTTONDOWN and e.button == 4 : do something mousey", "user_title": "Douglas Smith", "datetimeon": "2005-11-11T14:05:52", "link": "pygame.mouse.get_pressed", "id": 3}, {"content": "If you're trying to create a surface with per-pixel alphas, and\n\n my_surface = pygame.Surface((w, h), SRCALPHA)\n\ncreates a regular surface instead, try\n\n my_surface = pygame.Surface((w, h)).convert_alpha()", "user_title": "Marius Gedminas", "datetimeon": "2006-01-05T16:07:06", "link": "pygame.Surface", "id": 39}, {"content": "Interestingly, pygame.font.get_default_font() returns a font name ('freesansbold.ttf') which is not among the 189 listed by pygame.font.get_fonts().", "user_title": "Dave Burton", "datetimeon": "2011-01-03T08:47:41", "link": "pygame.font.get_fonts", "id": 3698}, {"content": "The font name is not a list! It is a single string.\n\nThe string can contain multiple font names with commas between them,\nbut if you pass a Python list (or tuple) you'll get an error.", "user_title": "Dave Burton", "datetimeon": "2011-01-03T09:13:14", "link": "pygame.font.SysFont", "id": 3699}, {"content": "Re: \"During its lifetime, the PixelArray locks the surface, thus you explicitly have to delete it once its not used anymore and the surface should perform operations in the same scope.\"\n\n1. Grammer: s/its/it's/\n\n2. s/you explicitly have to delete/you have to explicitly delete/\n\n3. I assume that to explicitly delete it you can either use \"del pxarray\"\nor else simply exit the function to which pxarray is local. Is that correct?\n\n4. What does \"and the surface should perform operations in the same scope\" mean?\nIs it saying something about the surface returned by px.make_surface(), i.e.,\nthat it should be a local variable in the same function to which pxarray is local?\nOr is it saying something about the surface that is passed to pygame.PixelArray()\nto create the pxarray object, and if so WHAT is it saying?", "user_title": "Dave Burton", "datetimeon": "2011-01-07T03:08:20", "link": "pygame.PixelArray", "id": 3703}, {"content": "On my Windows Vista machine running Python 3.1.2 and pygame 1.9.1, pgame.font.get_fonts() returns a list of 189 fonts. All the font names are lower case, and there are no special characters (like hyphens) in the names. The expected 'timesnewroman', 'arial', 'arialblack', 'couriernew', 'veranda', 'microsoftsansserif', 'symbol' and 'wingdings' are there (but not 'times' or 'roman' or 'helvetica'), but also many obscure fonts that I've never heard of.", "user_title": "Dave Burton", "datetimeon": "2011-01-03T08:43:54", "link": "pygame.font.get_fonts", "id": 3697}, {"content": "Pretty cool demo Mr. Anony", "user_title": "Robert Leachman", "datetimeon": "2010-12-10T22:09:50", "link": "pygame.key.get_pressed", "id": 3683}, {"content": "If you want to see a list of attributes, do a help(pygame) and it'll show you", "user_title": "Alex Polosky", "datetimeon": "2010-12-15T23:46:38", "link": "pygame.locals", "id": 3686}, {"content": "Works fine for me on OS X 10.6.5, though yes it does need to brought up to Quartz", "user_title": "Robert Leachman", "datetimeon": "2010-12-04T21:54:48", "link": "pygame.display.init", "id": 3675}, {"content": "See tutorials. \nAfter each line \n pygame.image.load(\"<>\")\nMake it\n pygame.image.load(\"<>\").convert()\nNo matter what, this will increase your speed by 600%!\nThanks to whoever put in that tutorial!\n -P.Z.", "user_title": "Ian Mallett", "datetimeon": "2007-03-05T00:13:41", "link": "pygame.draw", "id": 403}, {"content": "see:\nhttp://www.pygame.org/docs/tut/newbieguide.html\n#4", "user_title": "Ian Mallett", "datetimeon": "2007-03-17T13:13:59", "link": "pygame.draw", "id": 439}, {"content": "Dear readers, here is a working example of MPEG playing.\n-tgfcoder\n\n\nimport pygame, time\n\npygame.init()\n\ncine = pygame.movie.Movie('a-movie.mpg')\nsz=cine.get_size()\npygame.display.set_mode(sz)\nscreen = pygame.display.get_surface()\ncine.set_display(screen)\ncine.play()\nwhile True:\n time.sleep(1)", "user_title": "Jordan Trudgett", "datetimeon": "2008-01-01T09:40:25", "link": "pygame.movie", "id": 1349}, {"content": "Oh, please replace pygame.init() with pygame.display.init()\nBecause we don't want the mixer to be initialised.", "user_title": "Jordan Trudgett", "datetimeon": "2008-01-01T09:46:24", "link": "pygame.movie", "id": 1350}, {"content": "Well, actually it's not even that (x,y) needs to be in the referential of the Rect, because if it was true, then (0,0) would return 1, and it doesn't. It is really a bug.", "user_title": "Guillaume Rava", "datetimeon": "2007-04-20T18:04:08", "link": "Rect.collidepoint", "id": 503}, {"content": "# Ellipse example:\n# When border=0 ellipse is filled\n# (screen, (rgb colour) (Xpos,Ypos,width,height),border width)\npygame.draw.ellipse(screen, (0, 127, 0), (300, 150, 80, 40), 0)", "user_title": "Miroslav Cika", "datetimeon": "2008-01-10T10:08:04", "link": "pygame.draw.ellipse", "id": 1392}, {"content": "# Circle example:\n# When border=0 circle is filled\n# (screen, (rgb colour), (Xpos,Ypos),Diameter,border width)\npygame.draw.circle(screen, (0, 127, 255), (300, 140), 50, 4)", "user_title": "Miroslav Cika", "datetimeon": "2008-01-10T10:20:38", "link": "pygame.draw.circle", "id": 1393}, {"content": "thank you Trudget for the working code", "user_title": "vishwanath", "datetimeon": "2008-01-13T13:38:10", "link": "pygame.movie", "id": 1407}, {"content": "\"\"\"It seems that this method does not detect point collisions that fall anywhere \nalong the right wall or bottom wall of the rect used. The following program\ncreates a rect with a width and height of 4, and a topleft corner at [0,0]. \nThe program then moves along each row of the rect area from left to right and\ntop to bottom by 1 unit, creating a new point and checking to see if the point\ncollides with the rect. If the point collides, a 1 is printed, and if the\npoint doesn't collide, a 0 is printed.\"\"\"\n\n# import\nimport pygame\n\n# main\ny = 4\nr = pygame.Rect(0,0,y,y)\np = [0,0]\npList = []\nwhile p != [0,y+1]:\n\tfor n in range(0,y+1):\n\t\tp[0] = n\n\t\tif r.collidepoint(p):\n\t\t\tpList.append(1)\n\t\telse:\n\t\t\tpList.append(0)\n\n\tprint '%d %d %d %d %d' % (pList[0],pList[1],pList[2],pList[3],pList[4])\n\t\n\tpList = []\n\tp[0] = 0\n\tp[1] += 1\n\n# wait for user to manually exit program\ninput('press enter to exit')\n\n\"\"\"Here is the output:\"\"\"\n1 1 1 1 0\n1 1 1 1 0\n1 1 1 1 0\n1 1 1 1 0\n0 0 0 0 0\npress enter to exit\n\n\"\"\"Even if you were to directly reference the topright, bottomleft, or bottomright\npoint of the rect as the argument to the collidepoint function, the rect still \nwould not detect a collision. The rect does, however, detect collision with its\ntopleft point:\"\"\"\n\n>>>r.collidepoint(r.bottomleft)\n0\n>>>r.collidepoint(r.topright)\n0\n>>>r.collidepoint(r.bottomright)\n0\n>>>r.collidepoint(r.topleft)\n1", "user_title": "Tim Winter", "datetimeon": "2008-01-16T15:33:48", "link": "Rect.collidepoint", "id": 1420}, {"content": "To use the scrollwheel:\nfor event in pygame.event.get():\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 4:\n #Zoom Out\n elif event.button == 5:\n #Zoom In", "user_title": "Ian Mallett", "datetimeon": "2008-01-25T15:59:11", "link": "pygame.mouse", "id": 1442}, {"content": "Use the following class to generate a bezier curve that can be drawn with aalines:\n\n## Class begins here\nclass Bezier:\n\n\tclass SmoothnessError(Exception): pass\n\tclass CurveError(Exception): pass\n\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tA Python class for generating bezier curves\n\t\t\n\t\tAn implementation of an algorithm presented by Nils Pipenbrinck\n\t\thttp://www.cubic.org/docs/bezier.htm\n\t\t\"\"\"\n\t\n\tdef __lerp(self, ptA, ptB, t):\n\t\t\"\"\"\n\t\tReturns the linear interp between two points as a list\n\t\tptA and ptB are a list of xy coords, t is the point on the curve\n\t\t\"\"\"\n\t\tdest = []\n\t\tdest.append(ptA[0]+float(ptB[0]-ptA[0])*t)\n\t\tdest.append(ptA[1]+float(ptB[1]-ptA[1])*t)\n\t\treturn dest\n\t\n\tdef bezierPt(self, ctrlPts, t):\n\t\t\"\"\"A recursive function for finding point t along a bezier curve\"\"\"\n\t\tif len(ctrlPts) == 1:\n\t\t\t#print \"Len is 1\", ctrlPts\n\t\t\treturn ctrlPts[0]\n\t\tlerpList = []\n\t\tfor i in xrange(len(ctrlPts)-1):\n\t\t\tptA = [ctrlPts[i][0],ctrlPts[i][1]]\n\t\t\tptB = [ctrlPts[i+1][0],ctrlPts[i+1][1]]\n\t\t\tlerpList.append(self.__lerp(ptA,ptB,t))\n\t\t#print len(lerpList)\n\t\treturn self.bezierPt(lerpList, t)\n\t\n\tdef makeBezier(self, ctrlPts, smoothness):\n\t\t\"\"\"\n\t\tReturns a list of points on a bezier curve\n\t\t\n\t\tctrlPts is a list of 2d Points that define the curve, in most cases these\n\t\tconsist of control point locations and their handles, except in a 3 point\n\t\tcurve where it's just defined by the three control points.\n\t\t\n\t\tsmoothness is the number of points on the curve that should be generated.\n\t\tThis should always be more than two points or generating the bezier curve is\n\t\tpointless and the script dies in a fire (or throws an exception)\n\t\t\"\"\"\n\t\t\n\t\tif len(ctrlPts) < 2:\n\t\t\traise self.CurverError(\"Curve list must contain more than one point\")\n\t\tif smoothness < 3:\n\t\t\traise self.SmoothnessError(\"Smoothness must be more than two\")\n\t\titeration = smoothness\n\t\tbezierList = []\n\t\tsubtract=1.0/smoothness\n\t\tfor i in xrange(0,iteration):\n\t\t\tt = 1.0-(subtract*i)\n\t\t\tif t < subtract:\n\t\t\t\tt = 0\n\t\t\tbPt = self.bezierPt(ctrlPts,t)\n\t\t\t#print bPt\n\t\t\tbezierList.append(bPt)\n\t\treturn bezierLis\n## Class ends\n\n###################\n# An example of how to use the class with pygame\n\n\n## Pygame Example\nimport math, pygame\nfrom pygame.locals import *\nimport bezier\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode((640,480))\n clock = pygame.time.Clock()\n \n b = bezier.Bezier()\n \"\"\"\n\tA bezier curve definition, a list of 2d poins, simple innit\n\tIt's basically control points with control handle locations before or\n\tafter the control point.\n\t\n Read http://www.cubic.org/docs/bezier.htm for more info\n \"\"\"\n bezierPts = [[40,100],[80,20],[150,180],[260,100]]\n bLine = b.makeBezier(bezierPts, 10)\n screen.fill((255,255,255))\n pygame.draw.aalines(screen, (1,1,1), False, bLine, 1)\n pygame.display.flip()\n bounce = False\n \n while True:\n clock.tick(60)\n pygame.event.pump()\n event = pygame.event.poll()\n if event.type == QUIT:\n return\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n return\n setTo = pygame.time.get_ticks()/20\n bezierPts[1][1] = setTo\n bLine = b.makeBezier(bezierPts,20)\n screen.fill((255,255,255))\n pygame.draw.aalines(screen, (1,1,1), False, bLine, 1)\n pygame.display.flip()\n\nif __name__ == \"__main__\":\n m = main()\n## End example", "user_title": "Jeiel Aranal", "datetimeon": "2008-02-06T10:50:10", "link": "pygame.draw.aalines", "id": 1502}, {"content": "Play can return None. So be sure to check the channel before using it. Something like this...\n\n channel = self.bounce_sound.play()\n if channel is not None:\n channel.set_volume(1.0 - stereo, stereo)", "user_title": "Will McGugan", "datetimeon": "2007-06-14T12:13:28", "link": "Sound.play", "id": 635}, {"content": "Should have an optional option \nfor sound playback speed...", "user_title": "Ian Mallett", "datetimeon": "2007-06-30T19:41:18", "link": "Sound.play", "id": 676}, {"content": "When antialias is enabled, rendering it on a black background makes it look bold.", "user_title": "Ian Mallett", "datetimeon": "2007-07-03T01:18:12", "link": "Font.render", "id": 689}, {"content": "Should have an Anti-alias option...", "user_title": "Ian Mallett", "datetimeon": "2008-02-25T23:09:09", "link": "pygame.draw.circle", "id": 1646}, {"content": "mods = pygame.key.get_mods()\nif mods & KMOD_LSHIFT: #use whatever KMOD_ constant you want;)\n print \"left shift pressed\"", "user_title": "Isaiah Heyer", "datetimeon": "2008-03-29T16:22:04", "link": "pygame.key.get_mods", "id": 1716}, {"content": "I would like to have a method of telling which side of a rect a point collides. \nIn other words, which side is the point closest to?", "user_title": "Ian Mallett", "datetimeon": "2008-03-29T23:08:21", "link": "Rect.collidepoint", "id": 1718}, {"content": "Right. Unfortunately, That's the way it is. A width or height of 0 should also\nbe allowed, for rectangles of changing sizes (think progressbar at 0%)", "user_title": "Ian Mallett", "datetimeon": "2007-08-01T17:48:16", "link": "pygame.draw.rect", "id": 785}, {"content": "Instead of drawing a circle with zero radius, you can use the method set_at on the surface to set the color of a single pixel: http://www.pygame.org/docs/ref/surface.html#Surface.set_at", "user_title": "Victor Blomqvist", "datetimeon": "2007-09-23T08:07:45", "link": "pygame.draw.circle", "id": 873}, {"content": "'dummy' driver is missing ;-)", "user_title": "DR0ID", "datetimeon": "2007-09-23T12:08:05", "link": "pygame.display.init", "id": 875}, {"content": "please able my display mode with opengl acceleration", "user_title": "aaron pedralvez", "datetimeon": "2007-09-27T02:05:16", "link": "pygame.display.init", "id": 882}, {"content": "In the event MOUSEBUTTONDOWN, if you're using a mouse with a rotating wheel,\nevent.button returns 4 when it is rotated forward (counterclockwise) and 5 when\nit is rotating backward (clockwise). I used a print statement to discover this.", "user_title": "Andy Hanson", "datetimeon": "2007-10-02T20:43:15", "link": "pygame.event", "id": 900}, {"content": "In the event MOUSEBUTTONDOWN, if you're using a mouse with a rotating wheel,\nevent.button returns 4 when it is rotated forward (counterclockwise) and 5 when\nit is rotating backward (clockwise). I used a print statement to discover this.", "user_title": "Andy Hanson", "datetimeon": "2007-10-02T20:43:21", "link": "pygame.event", "id": 901}, {"content": "You can request fullscreen, but there doesn't seem to be a way to\ndetermine whether it's on. Meaning, there ought to be a 'get_mode()'.", "user_title": "Andy Sommerville", "datetimeon": "2007-12-04T14:43:28", "link": "pygame.display.set_mode", "id": 1206}, {"content": "http://www.pygame.org/docs/ref/sndarray.html#pygame.sndarray.make_sound\ncan be used to synthesize a sound object from sound samples.", "user_title": "Ian Mallett", "datetimeon": "2008-05-26T20:16:58", "link": "pygame.mixer.Sound", "id": 1953}, {"content": "The movie module in Pygame 1.8 works on Windows.\nThe statement that it doesn't work is out-of-date.", "user_title": "Jason M. Marshall", "datetimeon": "2008-05-21T14:15:44", "link": "pygame.movie", "id": 1917}, {"content": "return bezierLis -> return bezierList (line 65)", "user_title": "Jordan Trudgett", "datetimeon": "2008-06-18T02:46:43", "link": "pygame.draw.aalines", "id": 2060}, {"content": "pygame.mixer.get_num_channels(): return count", "user_title": "Jordan Trudgett", "datetimeon": "2008-07-10T13:15:18", "link": "pygame.mixer.get_num_channels", "id": 2150}, {"content": "Calling Surface.lock() before many calls to Surface.set_at() and Surface.unlock() after is a great and easy optimization.", "user_title": "Ian Mallett", "datetimeon": "2008-07-11T23:25:11", "link": "Surface.set_at", "id": 2156}, {"content": "Just set the delay to something really big.", "user_title": "Ian Mallett", "datetimeon": "2008-08-19T06:01:04", "link": "pygame.key.set_repeat", "id": 2265}, {"content": "Is this thread-safe? Can I safely post messages from a different thread\nfrom the one that's processing events and rendering?", "user_title": "Weeble", "datetimeon": "2008-11-28T19:09:39", "link": "pygame.event.post", "id": 2339}, {"content": "''' Change alpha for surfaces with per-pixel alpha; only for small surfaces '''\ndef change_alpha(surface,alpha=0.5):\n\tsize = surface.get_size()\n\ttry:\n\t\tfor y in xrange(size[1]):\n\t\t\tfor x in xrange(size[0]):\n\t\t\t\tr,g,b,a = surface.get_at((x,y))\n\t\t\t\tsurface.set_at((x,y),(r,g,b,int(a*alpha)))\n\texcept:\n\t\treturn surface\n\treturn surface", "user_title": "Josef Vanzura", "datetimeon": "2010-11-19T09:47:18", "link": "Surface.set_alpha", "id": 3245}, {"content": "You can also do it with surfarray (faster).", "user_title": "Josef Vanzura", "datetimeon": "2010-11-19T09:49:02", "link": "Surface.set_alpha", "id": 3246}, {"content": "Sorry. I didn't read the previous comment, which is a better way.", "user_title": "Josef Vanzura", "datetimeon": "2010-11-19T09:56:18", "link": "Surface.set_alpha", "id": 3247}, {"content": "present in pygame 1.9.1 but not in pygame 1.8.1, which is currently the last binary release on Linux.", "user_title": "Shanti Pothapragada", "datetimeon": "2010-11-22T17:04:34", "link": "Rect.copy", "id": 3249}, {"content": "Also includes the attributes: x, y.", "user_title": "Sam Bull", "datetimeon": "2010-10-26T07:40:18", "link": "pygame.Rect", "id": 3225}, {"content": "Works like a charm. Thanks whoever you are.", "user_title": "Bartosz Debski", "datetimeon": "2010-09-29T19:26:57", "link": "Surface.fill", "id": 3211}, {"content": "The code snippet works perfectly; thanks!\nI think the documentation is sorely in need of an update.\nWishlist: other video formats, like .avi?", "user_title": "Ian Mallett", "datetimeon": "2009-01-01T16:05:17", "link": "pygame.movie", "id": 2360}, {"content": "An example to use this:\nscreen = pygame.display.set_mode(SCREENRECT.size) # SCREENRECT is a rect variable...\n # ...with screen dimension\n\ndoggie = pygame.sprite.RenderUpdates() #We create the group\nDog.containers = doggie \n# class Dog: Needs 'pygame.sprite.Sprite.__init__(self,self.containers)'\n# inside def __init__(self, ...):\n\ndog1 = Dog(...) #Class Dog\ndog2 = Dog(...)\n...\ndogN = Dog(...)\n\n... #Some move actions and things\n\n#Now, time to re-paint them all\ndoggie.clear(screen, Background)\nchanges = doggie.draw(screen)\npygame.display.update(changes)\n#Now we have all dogs updated in screen\n\n#---------\nEasy, quick and effortless", "user_title": "Patata", "datetimeon": "2009-01-07T12:38:26", "link": "pygame.sprite.RenderUpdates", "id": 2366}, {"content": "# A better loading script:\n\nimport os, pygame\n\ndef load_image(file_name, colorkey=False, image_directory='images'):\n 'Loads an image, file_name, from image_directory, for use in pygame'\n file = os.path.join(image_directory, file_name)\n _image = pygame.image.load(file)\n if colorkey:\n if colorkey == -1: \n # If the color key is -1, set it to color of upper left corner\n colorkey = _image.get_at((0, 0))\n _image.set_colorkey(colorkey)\n _image = _image.convert()\n else: # If there is no colorkey, preserve the image's alpha per pixel.\n _image = _image.convert_alpha()\n return _image", "user_title": "Nekody Lenkner", "datetimeon": "2009-03-20T21:58:08", "link": "pygame.image.load", "id": 2399}, {"content": "what does it mean by font name? can it be a path to a font?", "user_title": "Mad Cloud Games", "datetimeon": "2010-07-02T01:21:30", "link": "pygame.font.SysFont", "id": 3151}, {"content": "You can use multiple screens, but you'll need to make a separate process for each.", "user_title": "Ian Mallett", "datetimeon": "2009-08-01T23:26:07", "link": "pygame.display.set_mode", "id": 2900}, {"content": "VIDEORESIZE size, w, h\nsize == (w, h) # same data, different access", "user_title": "DR0ID", "datetimeon": "2009-04-04T12:27:05", "link": "pygame.event", "id": 2411}, {"content": "je ne sais pas pourquoi, mais; si vous utiliser une surface pour effacer le display au lieu d'utiliser un 'fill',\nil sera beaucoup plus rapide de blitter une copie du display :\n\ndisplay = pygame.display.set_mode((500,500))\nbackground = pygame.image.load('blablabla...')\ndisplay.blit(background,(0,0))\nbackground = display.copy() ----> utiliser cette copie pour multi-blitter plus rapidement une image de fond.", "user_title": "josmiley", "datetimeon": "2009-08-03T01:50:34", "link": "Surface.copy", "id": 2902}, {"content": "This doesn't say anything about the type attribute.\nYou can compare it to MOUSEBUTTONUP, KEYDOWN, etc to find out what the events\ntype is.", "user_title": "Daniel Westbrook", "datetimeon": "2009-07-29T00:20:38", "link": "pygame.event.Event", "id": 2896}, {"content": "is it a Rect object???", "user_title": "Alex", "datetimeon": "2010-05-04T17:47:38", "link": "pygame.display.update", "id": 3117}, {"content": "TIP:\nIf sound has noise/noisy is choppy or has static, the solution:\n\npygame.mixer.quit() #Make sure you all this before .init()\npygame.mixer.init()", "user_title": "Chris Goldie", "datetimeon": "2009-08-11T05:44:03", "link": "Sound.play", "id": 2911}, {"content": "The convert_alpha function prepares a surface for usage with per-pixel alphas. That is, for example, if you have a PNG or TGA image with an alpha channel controlling opacity of individual pixels, you would want to use this function on your surface after loading the image to speed up the blitting process.", "user_title": "Brad Smithee", "datetimeon": "2010-04-29T02:55:18", "link": "Surface.convert_alpha", "id": 3113}, {"content": "This will be extremely useful!", "user_title": "Ian Mallett", "datetimeon": "2009-08-15T19:21:45", "link": "pygame.transform.average_surfaces", "id": 2917}, {"content": "Some basic sample code for (approximately) constraining a bunch of text to a given width:\n\nwordsToWrite = toWrite.rstrip().split(\" \") #Get rid of the newline char and split on spaces\ncurrLine = \"\"\nnumLines = 0\nmaxWidthFound = 0\nfor word in wordsToWrite:\n currLine = currLine + \" \" + word #Add the next word to the line\n\n if ((textFont.size(currLine))[0] > maxAllowedWidth): #Check if the width of the line exceeds the set limit\n\n if (textFont.size(currLine))[0] > maxWidthFound: #Get the maximum line width found\n maxWidthFound = (textFont.size(currLine))[0]\n\n lines.append (textFont.render(currLine, 1, color, bgcolor)) #Add the rendered line to a list\n currLine = \"\"\n numLines = numLines + 1\n\nif currLine != \"\": #Once we exit the loop, we will probably still have a line to be rendered\n lines.append (textFont.render(currLine, 1, color, bgcolor))\n currLine = \"\"\n numLines = numLines + 1\n\nself.image = pygame.Surface((maxWidthFound + 20, numLines * textFont.get_height() + 20)) #Create a surface of the appropriate size\n\nfor lineNum in range(numLines): \n self.image.blit(lines[lineNum], (10,lineNum * textFont.get_height() + 10))", "user_title": "Aditya Keswani", "datetimeon": "2009-07-20T07:29:06", "link": "Font.render", "id": 2887}, {"content": "For all of these drawing functions, the coordinates are relative to the surface\nyou are drawing to. i.e. if you are drawing to a surface somewhere in the middle of\nthe screen, and you draw a circle at (0,0), its center will be the top-left corner\nof the surface being drawn to, not the top-left corner of the screen", "user_title": "Aditya Keswani", "datetimeon": "2009-07-20T07:23:36", "link": "pygame.draw", "id": 2886}, {"content": "The messages here:\nhttp://www.mail-archive.com/pygame-users@seul.org/msg10616.html\n\nimply that GL_SWAP_CONTROL can also be passed to gl_set_attribute to control whether\ndisplay swaps honor vsync.", "user_title": "Jonathan Hartley", "datetimeon": "2010-03-25T15:42:06", "link": "pygame.display.gl_set_attribute", "id": 3087}, {"content": "Pygame THECOLORS as HTML\nhttps://sites.google.com/site/meticulosslacker/pygame-thecolors", "user_title": "Meticulos Slacker", "datetimeon": "2010-03-18T03:10:26", "link": "pygame.Color", "id": 3078}, {"content": "Should be \"buffer\", not \"buffersize\"", "user_title": "Ian Mallett", "datetimeon": "2010-03-19T00:36:48", "link": "pygame.mixer.pre_init", "id": 3079}, {"content": "Put this first:\nfor e in pygame.event.get()", "user_title": "Ian Mallett", "datetimeon": "2010-03-20T19:51:20", "link": "pygame.key.set_repeat", "id": 3080}, {"content": "mod is the bitfield of KMOD_* constants:\npygame.KMOD_NONE\t0\npygame.KMOD_LSHIFT\t1\npygame.KMOD_RSHIFT\t2\npygame.KMOD_SHIFT\t3\npygame.KMOD_LCTRL\t64\npygame.KMOD_RCTRL\t128\npygame.KMOD_CTRL\t192\npygame.KMOD_LALT\t256\npygame.KMOD_RALT\t512\npygame.KMOD_ALT\t\t768\npygame.KMOD_LMETA\t1024\npygame.KMOD_RMETA\t2048\npygame.KMOD_META\t3072\npygame.KMOD_NUM\t\t4096\npygame.KMOD_CAPS\t8192\npygame.KMOD_MODE\t16384", "user_title": "Vladar", "datetimeon": "2010-03-23T06:16:44", "link": "pygame.event", "id": 3081}, {"content": "If you try to use fadeout, the queued sound will begin, as opposed to stop and pause.", "user_title": "Andy Hanson", "datetimeon": "2009-07-12T22:43:31", "link": "Channel.queue", "id": 2875}, {"content": "I ran into that problem -- the solution is to initialize pygame first :)\n\nimport pygame\npygame.init()\nprint pygame.key.name(pygame.K_UP)", "user_title": "Jared", "datetimeon": "2009-08-29T04:11:27", "link": "pygame.key.name", "id": 2928}, {"content": "It appears that when the delay is set to zero, \nkey.set_repeat is returned to the default, disabled state.\nTo set it to a minimum, essentially no delay, just set it to 1.\n\npygame.key.set_repeat(0,50) #Doesn't work.\npygame.key.set_repeat(1,50) #Works with essentially no delay.", "user_title": "David Khono Hackland", "datetimeon": "2010-03-02T22:20:03", "link": "pygame.key.set_repeat", "id": 3065}, {"content": "It appears that when the delay is set to zero, \nkey.set_repeat is returned to the default, disabled state.\nTo set it to a minimum, essentially no delay, just set it to 1.\n\npygame.key.set_repeat(0,50) #Doesn't work.\npygame.key.set_repeat(1,50) #Works with essentially no delay.", "user_title": "David Khono Hackland", "datetimeon": "2010-03-02T22:19:26", "link": "pygame.key.set_repeat", "id": 3064}, {"content": "Does it matter if you tick at the start or at the end?", "user_title": "Mitchell K", "datetimeon": "2009-09-12T21:09:15", "link": "Clock.tick", "id": 2944}, {"content": "The example's .flip(..) below won't work - maybe I should have checked it before posting...\nHere is a better Version, it should work now.\n\nfrom pygame import Rect, Surface\nclass Sprites():\n def __init__(self, spritesheet, size):\n self.sheet = spritesheet\n self.sheet.convert_alpha()\n self.size = size\n \n self.sprites = []\n for x in xrange(spritesheet.get_width() / size[0]):\n list = []\n for y in xrange(spritesheet.get_height() / size[1]):\n list.append(spritesheet.subsurface(Rect((x*size[0], y*size[1]) , size)))\n self.sprites.append(list)\n def flip(self, xbool, ybool):\n new = Surface(self.sheet.get_size())\n new.fill((0, 0, 0, 0))\n for row in self.sprites:\n for sprite in row:\n new.blit(flip(sprite, xbool, ybool), sprite.get_offset())\n self.sheet.fill((0, 0, 0, 0))\n self.sheet.blit(new, (0, 0))\n def __getitem__(self, x=None, y=None):\n # not very tested, .flip(y=7) won't work\n # the if conditions should allow you to access a sheet with one row/col more easily .flip(5, 0) == .flip(5)\n if x is not None:\n if y is None:\n if len(self.sprites) > x:\n y = 0\n else:\n y = x\n x = 0\n elif y is None:\n raise IndexError\n \n return self.sprites[x][y]\n\n@any Developer/Moderator - it would be nice if my wrong post, \"The example below won't work\" and this Notice would be removed.", "user_title": "Rolf Sievers", "datetimeon": "2009-11-06T11:31:59", "link": "Surface.subsurface", "id": 2998}, {"content": "Here is a simple Sprite-sheet Class I wrote for an application, maybe someone can use it.\n\nfrom pygame import Rect\nclass Sprites():\n def __init__(self, spritesheet, size):\n self.sheet = spritesheet\n self.sheet.convert_alpha()\n self.size = size\n \n self.sprites = []\n for x in xrange(spritesheet.get_width() / size[0]):\n list = []\n for y in xrange(spritesheet.get_height() / size[1]):\n list.append(spritesheet.subsurface(Rect((x*size[0], y*size[1]) , size)))\n self.sprites.append(list)\n print list\n print self.sprites\n def flip(self, xbool, ybool):\n self.sheet.fill((0, 0, 0, 0))\n for row in self.sprites:\n for sprite in row:\n sprite.blit(flip(sprite, xbool, ybool), (0, 0))\n def __getitem__(self, x=None, y=None):\n if x is not None:\n if y is None:\n if len(self.sprites) > x:\n y = 0\n else:\n y = x\n x = 0\n elif y is None:\n raise IndexError\n \n return self.sprites[x][y]", "user_title": "Rolf Sievers", "datetimeon": "2009-11-06T11:14:23", "link": "Surface.subsurface", "id": 2997}, {"content": "Is there anyway to get the rect of a polygon without having to create a surface greater than or equal to the polygon, and then gather the rect from the polygon?", "user_title": "Luke Endres", "datetimeon": "2009-08-08T21:50:46", "link": "pygame.draw.polygon", "id": 2907}, {"content": "This is twice the work because the image is rotated and then resized (subrect\nis copied) but it doesn't hurt my math-addled brain.\n\ndef rot_center(image, angle):\n \"\"\"rotate an image while keeping its center and size\"\"\"\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "user_title": "Gummbum", "datetimeon": "2010-01-17T01:17:23", "link": "pygame.transform.rotate", "id": 3034}, {"content": "Use pygame.transform.rotate(Surface, angle)", "user_title": "Francesco Pasa", "datetimeon": "2010-01-17T09:30:05", "link": "pygame.Surface", "id": 3035}, {"content": "Does the returned Boolean value indicate success/failure at toggling fullscreen mode,\nor current status of the display (e.g., fullscreen = True, windowed = False)?", "user_title": "Chris L", "datetimeon": "2010-01-27T16:33:12", "link": "pygame.display.toggle_fullscreen", "id": 3041}, {"content": "set_blocked() clear queue from ALL events", "user_title": "ploutos", "datetimeon": "2009-10-23T11:07:21", "link": "pygame.event.set_blocked", "id": 2980}, {"content": "If they play in a CD player, they were burned properly.", "user_title": "Jeffrey Aylesworth", "datetimeon": "2009-10-21T19:51:47", "link": "pygame.cdrom", "id": 2975}, {"content": "This is very important for mac because it shows the icon in the dock.. when I use this, it flashes the snake image for a second before changing, and it also gets smaller then the application icon which is the same (using py2app).", "user_title": "Mitchell K", "datetimeon": "2009-09-24T16:05:59", "link": "pygame.display.set_icon", "id": 2959}, {"content": "\"\"\"\n\t # This is a get_ticks() function simple example\n\t # This script should return 10 as a result\n\"\"\"\n# Standard library imports\nimport time\n# Related third party imports\nimport pygame\n#Pygame start function\n\npygame.init()\n# Create the clock\nclock = pygame.time.Clock()\n# A simple loop of 10 stages\nfor i in range(10):\n\t# Update the clock\n\tclock.tick(1)\n# Print the seconds\nprint int(round(pygame.time.get_ticks()/1000))", "user_title": "Sergio Milardovich", "datetimeon": "2010-06-08T15:33:04", "link": "pygame.time.get_ticks", "id": 3138}, {"content": "Note that pygame.scrap seems to be unimplemented in pygame-1.9.1.win32-py3.1.msi\n\nDefine testscrap.py, like this:\n\nimport pygame\npygame.init()\npygame.scrap.init()\n\n\nRun it, like this:\n\nC:\\Users\\Dave\\Documents\\Python>testscrap.py\nC:\\Users\\Dave\\Documents\\Python\\testscrap.py:3: RuntimeWarning: use scrap: No module named scrap\n(ImportError: No module named scrap)\n pygame.scrap.init()\nTraceback (most recent call last):\n File \"C:\\Users\\Dave\\Documents\\Python\\testscrap.py\", line 3, in \n pygame.scrap.init()\n File \"C:\\Python31\\lib\\site-packages\\pygame\\__init__.py\", line 70, in __getattr__\n raise NotImplementedError(MissingPygameModule)\nNotImplementedError: scrap module not available\n(ImportError: No module named scrap)", "user_title": "Dave Burton", "datetimeon": "2011-01-16T00:15:21", "link": "pygame.scrap", "id": 3731}, {"content": "BTW, the same error occurs if you have initialized a display surface, too:\n\nimport pygame\npygame.init()\nscreen=pygame.display.set_mode((640,360),0,32)\npygame.scrap.init()", "user_title": "Dave Burton", "datetimeon": "2011-01-16T00:18:19", "link": "pygame.scrap", "id": 3732}, {"content": "Hey, July 7 2009 Anonymous, that's a nice demo!\n\nFor Python 3 compatibility, just change the last line to:\n\n pygame.time.delay(1000//50)", "user_title": "Dave Burton", "datetimeon": "2011-01-24T10:00:06", "link": "Surface.subsurface", "id": 3746}, {"content": "Note that the order of the tuple members in virtual attributes like .topleft\nis always (x,y) [or (left,top) or (width,height)] even if the name of the\nvirtual attribute seems to suggest the opposite order. E.g.,\nrect1.topleft == (rect1.left,rect1.top)", "user_title": "Dave Burton", "datetimeon": "2011-01-25T01:19:14", "link": "pygame.Rect", "id": 3747}, {"content": "There's no 'code' member for type USEREVENT, unless you create one yourself\nwhen you create the event.", "user_title": "Dave Burton", "datetimeon": "2011-01-25T21:33:33", "link": "pygame.event", "id": 3750}, {"content": "July 15 2010 Anonymous, here's your example of a resizeable pygame window.\n\nI don't know what you mean by \"window itself as well as the display.\"\nIf you want to resize something WITHIN the pygame window, just blit something\ndifferent onto it.\n\nDave\n\n\nimport sys, os, pygame\npygame.init()\n\nclock = pygame.time.Clock()\n\nscrsize = width,height = 600,400\nblack = 0,0,0\nbgcolor = (240,240,220) # light grey\n\n# to get the true full-screen size, do this BEFORE pygame.display.set_mode:\nfullscreen_sz = pygame.display.Info().current_w, pygame.display.Info().current_h\nprint( 'screen size =', fullscreen_sz )\n\n\n# ---------- This works under Windows Vista, no promises elsewhere! ----------\n# initially center the pygame window by setting %SDL_VIDEO_WINDOW_POS%\nwin_pos_left = 1 + ((fullscreen_sz[0] - width) // 2)\nwin_pos_top = 1 + ((fullscreen_sz[1] - height) // 2)\nos.environ['SDL_VIDEO_WINDOW_POS'] = '{0},{1}'.format(win_pos_left, win_pos_top)\n# ----------------------------------------------------------------------------\n\nscreen = pygame.display.set_mode(scrsize, pygame.RESIZABLE)\n\n# ----------------------------------------------------------------------------\nos.environ['SDL_VIDEO_WINDOW_POS'] = ''\n# if you don't clear the environment variable, the window will reposition\n# every time pygame.display.set_mode() gets called due to a VIDEORESIZE event.\n# ----------------------------------------------------------------------------\n\narial = pygame.font.SysFont( 'arial,microsoftsansserif,courier', 14 )\ntxt2display = arial.render( \"This window is resizeable\", True, black )\ntxt2display_w = txt2display.get_size()[0]\n\nwhile True:\n changed = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n elif event.type == pygame.VIDEORESIZE:\n scrsize = event.size # or event.w, event.h\n screen = pygame.display.set_mode(scrsize,RESIZABLE)\n changed = True\n\n screen.fill( bgcolor )\n screen.blit( txt2display, ((scrsize[0]+1-txt2display_w)//2,1) ) # at top-center of screen\n pygame.display.update()\n if not changed:\n clock.tick(60) # limit to 60 fps", "user_title": "Dave Burton", "datetimeon": "2011-01-25T23:10:35", "link": "pygame.display.init", "id": 3751}, {"content": "Oops! Tiny correction... the 8th-to-last line should be\n\n screen = pygame.display.set_mode(scrsize,pygame.RESIZABLE)\n\n(Or else you can \"from pygame.locals import *\")", "user_title": "Dave Burton", "datetimeon": "2011-01-25T23:16:00", "link": "pygame.display.init", "id": 3752}, {"content": "from pygame.locals import *\n\n_evnames = {} # from SDL-1.2.14\\include\\SDL_events.h\n_evnames[NOEVENT] = 'NOEVENT' # 0 SDL_NOEVENT\n_evnames[ACTIVEEVENT] = 'ACTIVEEVENT' # 1 SDL_ACTIVEEVENT\n_evnames[KEYDOWN] = 'KEYDOWN' # 2 SDL_KEYDOWN\n_evnames[KEYUP] = 'KEYUP' # 3 SDL_KEYUP\n_evnames[MOUSEMOTION] = 'MOUSEMOTION' # 4 SDL_MOUSEMOTION\n_evnames[MOUSEBUTTONDOWN] = 'MOUSEBUTTONDOWN' # 5 SDL_MOUSEBUTTONDOWN\n_evnames[MOUSEBUTTONUP] = 'MOUSEBUTTONUP' # 6 SDL_MOUSEBUTTONUP\n_evnames[JOYAXISMOTION] = 'JOYAXISMOTION' # 7 SDL_JOYAXISMOTION\n_evnames[JOYBALLMOTION] = 'JOYBALLMOTION' # 8 SDL_JOYBALLMOTION\n_evnames[JOYHATMOTION] = 'JOYHATMOTION' # 9 SDL_JOYHATMOTION\n_evnames[JOYBUTTONDOWN] = 'JOYBUTTONDOWN' # 10 SDL_JOYBUTTONDOWN\n_evnames[JOYBUTTONUP] = 'JOYBUTTONUP' # 11 SDL_JOYBUTTONUP\n_evnames[QUIT] = 'QUIT' # 12 SDL_QUIT\n_evnames[SYSWMEVENT] = 'SYSWMEVENT' # 13 SDL_SYSWMEVENT\n # 14 SDL_EVENT_RESERVEDA\n # 15 SDL_EVENT_RESERVEDB\n_evnames[VIDEORESIZE] = 'VIDEORESIZE' # 16 SDL_VIDEORESIZE\n_evnames[VIDEOEXPOSE] = 'VIDEOEXPOSE' # 17 SDL_VIDEOEXPOSE\n # 18 SDL_EVENT_RESERVED2\n # 19 SDL_EVENT_RESERVED3\n # 20 SDL_EVENT_RESERVED4\n # 21 SDL_EVENT_RESERVED5\n # 22 SDL_EVENT_RESERVED6\n # 23 SDL_EVENT_RESERVED7\n_evnames[USEREVENT] = 'USEREVENT' # 24 SDL_USEREVENT\n_evnames[NUMEVENTS] = 'NUMEVENTS' # 32 SDL_NUMEVENTS\n\n\ndef event_name(evtype):\n '''return a displayable name for a pygame/SDL event type number'''\n try:\n result = _evnames[evtype]\n except:\n if evtype in range(USEREVENT,NUMEVENTS):\n result = 'USEREVENT+' + repr(evtype-USEREVENT)\n elif evtype >= NUMEVENTS:\n result = 'ILLEGAL_EVENT_' + repr(evtype)\n elif evtype == 14:\n result = 'EVENT_RESERVEDA'\n elif evtype == 15:\n result = 'EVENT_RESERVEDB'\n else:\n result = 'EVENT_RESERVED' + repr(evtype-16)\n return result\n\n\nfor i in range(0,33):\n print(repr(i) + ' = ' + event_name(i))\n\n\n# It's all gonna change in SDL 1.3:\n#\n# SDL_FIRSTEVENT = 0 # Unused\n#\n# SDL_QUIT = 0x100 # User-requested quit\n#\n# SDL_WINDOWEVENT = 0x200 # Window state change\n# SDL_SYSWMEVENT = 0x201 # System specific event\n#\n# # Keyboard events\n# SDL_KEYDOWN = 0x300 # Key pressed\n# SDL_KEYUP = 0x301 # Key released\n# SDL_TEXTEDITING = 0x302 # Keyboard text editing (composition)\n# SDL_TEXTINPUT = 0x303 # Keyboard text input\n#\n# # Mouse events\n# SDL_MOUSEMOTION = 0x400 # Mouse moved\n# SDL_MOUSEBUTTONDOWN = 0x401 # Mouse button pressed\n# SDL_MOUSEBUTTONUP = 0x402 # Mouse button released\n# SDL_MOUSEWHEEL = 0x403 # Mouse wheel motion\n#\n# # Tablet or multiple mice input device events\n# SDL_INPUTMOTION = 0x500 # Input moved\n# SDL_INPUTBUTTONDOWN = 0x501 # Input button pressed\n# SDL_INPUTBUTTONUP = 0x502 # Input button released\n# SDL_INPUTWHEEL = 0x503 # Input wheel motion\n# SDL_INPUTPROXIMITYIN = 0x504 # Input pen entered proximity\n# SDL_INPUTPROXIMITYOUT = 0x505 # Input pen left proximity\n#\n# # Joystick events\n# SDL_JOYAXISMOTION = 0x600 # Joystick axis motion\n# SDL_JOYBALLMOTION = 0x601 # Joystick trackball motion\n# SDL_JOYHATMOTION = 0x602 # Joystick hat position change\n# SDL_JOYBUTTONDOWN = 0x603 # Joystick button pressed\n# SDL_JOYBUTTONUP = 0x604 # Joystick button released\n#\n# # Touch events\n# SDL_FINGERDOWN = 0x700\n# SDL_FINGERUP = 0x701\n# SDL_FINGERMOTION = 0x702\n# SDL_TOUCHBUTTONDOWN = 0x703\n# SDL_TOUCHBUTTONUP = 0x704\n#\n# # Gesture events\n# SDL_DOLLARGESTURE = 0x800\n# SDL_DOLLARRECORD = 0x801\n# SDL_MULTIGESTURE = 0x802\n#\n# # Clipboard events\n# SDL_CLIPBOARDUPDATE = 0x900 # The clipboard changed\n#\n# # Obsolete events\n# SDL_EVENT_COMPAT1 =0x7000 # SDL 1.2 events for compatibility\n# SDL_EVENT_COMPAT2 =0x7001\n# SDL_EVENT_COMPAT3 =0x7002\n#\n# # SDL_USEREVENT thru SDL_LASTEVENT are for your use\n# SDL_USEREVENT =0x8000\n# SDL_LASTEVENT =0xFFFF", "user_title": "Dave Burton", "datetimeon": "2011-01-27T04:08:06", "link": "pygame.event.Event", "id": 3753}, {"content": "This function seems to me little bit buggy, so I wrote my own:\n\na and b are surfarrays of some surfaces that you want to compare\n\n def comparray(self,a,b):\n c = abs(a.__sub__(b))\n c = c.__ge__(self.tolerance)*255\n surface = pygame.surfarray.make_surface(c)\n return surface", "user_title": "Kaan Ak\u00c3\u009fit", "datetimeon": "2011-01-27T18:49:33", "link": "PixelArray.compare", "id": 3756}, {"content": "There's an error in this documentation w/r/t the final (width) argument:\n\n pygame.draw.rect(self.image, color, self.image.get_rect(), width=1)\nTypeError: rect() takes no keyword arguments\n\nLeave off the \"width=\" to make it work:\n\n pygame.draw.rect(self.image, color, self.image.get_rect(), 1)\n\nThis is with either pygame-1.9.1.win32-py2.6.msi or pygame-1.9.1.win32-py3.1.msi", "user_title": "Dave Burton", "datetimeon": "2011-01-28T03:12:31", "link": "pygame.draw.rect", "id": 3757}, {"content": "There's an error in this documentation w/r/t the final (width) argument:\n\n pygame.draw.line(self.image, (0,0,0), (x,y), (x,y+h), width=2)\nTypeError: line() takes no keyword arguments\n\nLeave off the \"width=\" to make it work:\n\n pygame.draw.line(self.image, (0,0,0), (x,y), (x,y+h), 2)\n\nThis is with either pygame-1.9.1.win32-py2.6.msi or pygame-1.9.1.win32-py3.1.msi", "user_title": "Dave Burton", "datetimeon": "2011-01-28T04:54:01", "link": "pygame.draw.line", "id": 3759}, {"content": "Rect.center rounds UP:\n\nr0x0 = pygame.Rect(0,0,0,0) # a 0x0 rect\nprint('center of 0x0 rect is ' + repr(r0x0.center)) # result is (0,0) = not in the rect!\nr1x1 = pygame.Rect(0,0,1,1) # a 1x1 rect\nprint('center of 1x1 rect is ' + repr(r1x1.center)) # result is (0,0) = correct\nr2x2 = pygame.Rect(0,0,2,2) # a 2x2 rect\nprint('center of 2x2 rect is ' + repr(r2x2.center)) # result is (1,1) = rounded up!\nr3x3 = pygame.Rect(0,0,3,3) # a 3x3 rect\nprint('center of 3x3 rect is ' + repr(r3x3.center)) # result is (1,1) = exact\nr4x4 = pygame.Rect(0,0,4,4) # a 4x4 rect\nprint('center of 4x4 rect is ' + repr(r4x4.center)) # result is (2,2) = rounded up!\nr5x5 = pygame.Rect(0,0,5,5) # a 5x5 rect\nprint('center of 5x5 rect is ' + repr(r5x5.center)) # result is (2,2) = exact\nr6x6 = pygame.Rect(0,0,6,6) # a 6x6 rect\nprint('center of 6x6 rect is ' + repr(r6x6.center)) # result is (3,3) = rounded up!\nr7x7 = pygame.Rect(0,0,7,7) # a 7x7 rect\nprint('center of 7x7 rect is ' + repr(r7x7.center)) # result is (3,3) = exact", "user_title": "Dave Burton", "datetimeon": "2011-01-29T20:46:36", "link": "Rect.collidepoint", "id": 3761}, {"content": "(Oops, I added that comment in the wrong place.)", "user_title": "Dave Burton", "datetimeon": "2011-01-29T20:47:58", "link": "Rect.collidepoint", "id": 3762}, {"content": "Rect.center rounds UP:\n\nr0x0 = pygame.Rect(0,0,0,0) # a 0x0 rect\nprint('center of 0x0 rect is ' + repr(r0x0.center)) # result is (0,0) = not in the rect!\nr1x1 = pygame.Rect(0,0,1,1) # a 1x1 rect\nprint('center of 1x1 rect is ' + repr(r1x1.center)) # result is (0,0) = correct\nr2x2 = pygame.Rect(0,0,2,2) # a 2x2 rect\nprint('center of 2x2 rect is ' + repr(r2x2.center)) # result is (1,1) = rounded up!\nr3x3 = pygame.Rect(0,0,3,3) # a 3x3 rect\nprint('center of 3x3 rect is ' + repr(r3x3.center)) # result is (1,1) = exact\nr4x4 = pygame.Rect(0,0,4,4) # a 4x4 rect\nprint('center of 4x4 rect is ' + repr(r4x4.center)) # result is (2,2) = rounded up!\nr5x5 = pygame.Rect(0,0,5,5) # a 5x5 rect\nprint('center of 5x5 rect is ' + repr(r5x5.center)) # result is (2,2) = exact\nr6x6 = pygame.Rect(0,0,6,6) # a 6x6 rect\nprint('center of 6x6 rect is ' + repr(r6x6.center)) # result is (3,3) = rounded up!\nr7x7 = pygame.Rect(0,0,7,7) # a 7x7 rect\nprint('center of 7x7 rect is ' + repr(r7x7.center)) # result is (3,3) = exact", "user_title": "Dave Burton", "datetimeon": "2011-01-29T20:48:50", "link": "pygame.Rect", "id": 3763}, {"content": "This documentation is incorrect. A point along the right or bottom edge IS\nwithin the Rect, and points at coordinates on the bottom or right edge DO\ncollide with the Rect.\n\nHere's proof:\n\nr = Rect(0,0, 4,4) # a 4x4 rectangle\nprint('0,0: ' + repr(r.collidepoint(0,0)))\nprint('1,1: ' + repr(r.collidepoint(1,1)))\nprint('2,2: ' + repr(r.collidepoint(2,2)))\nprint('3,3: ' + repr(r.collidepoint(3,3)))\nprint('4,4: ' + repr(r.collidepoint(4,4)))\n\nWith pygame 1.9.1 under both Python 3.1 and 2.6, it prints:\n\n0,0: 1\n1,1: 1\n2,2: 1\n3,3: 1\n4,4: 0\n\nNote that the bottom-right pixel within the 4x4 rect is at (3,3) and\ncollidepoint((3,3)) does return 1 (meaning true).\n\nA second (minor) documentation error is that it actually returns an integer\n1 or 0 instead of boolean True or False.", "user_title": "Dave Burton", "datetimeon": "2011-01-29T22:35:18", "link": "Rect.collidepoint", "id": 3764}, {"content": "This class is a bit odd. Event objects have no event.__dict__ attribute,\nand the dir(event) function doesn't work. However, repr(event) returns a\nnice, thorough description of an event object and its attributes, and the\nevent.dict attribute lists all the important attributes except .type and\n.dict itself.", "user_title": "Dave Burton", "datetimeon": "2011-02-07T04:48:40", "link": "pygame.event", "id": 3774}, {"content": "With a Microsoft IntelliMouse p/n X05-77975, under Windows Vista,\nwith either Python 2.6 or 3.1, the button numbers are:\n1 = left button\n2 = center button/wheel press\n3 = right button\n4 = wheel roll forward/up\n5 = wheel roll backward/down\n6 = left side extra button\n7 = right side extra button", "user_title": "Dave Burton", "datetimeon": "2011-02-23T21:04:46", "link": "pygame.event", "id": 3794}, {"content": "There's a cursor missing! sizer_xy_strings defines an upper-left-to-lower-right\nresizer cursor, suitable for dragging the upper-left or lower-right corner.\nBut there's no sizer_yx_strings to make the upper-right-to-lower-left cursor.\nHere's how I made one:\n\nsizer_yx_strings = [ x[12::-1]+x[13:] for x in pygame.cursors.sizer_xy_strings ]", "user_title": "Dave Burton", "datetimeon": "2011-02-24T02:08:27", "link": "pygame.cursors", "id": 3795}, {"content": "Or, equivalently:\n\nsizer_yx_strings = ( #sized 24x16\n \" XXXXXXXX \",\n \" X.....X \",\n \" X....X \",\n \" X...X \",\n \" X.X..X \",\n \" X.X X.X \",\n \"X X.X XX \",\n \"XX X.X X \",\n \"X.XX.X \",\n \"X...X \",\n \"X...X \",\n \"X....X \",\n \"X.....X \",\n \"XXXXXXXX \",\n \" \",\n \" \",\n)", "user_title": "Dave Burton", "datetimeon": "2011-02-24T02:13:59", "link": "pygame.cursors", "id": 3796}, {"content": "The Sound function now accepts 'buffer', 'file', and 'array' keyword arguments\nto remove any ambiguity in how to treat an argument. The 'array' keyword is new,\nand tells Sound to look check the argument for an array struct interface or\nthe new buffer protocol if supported. This allows Sound to function like\nsndarray.make_sound.\n\nSound also exposes an array struct interface and the new buffer protocol.", "user_title": "Lenard Lindstrom", "datetimeon": "2011-03-01T13:26:33", "link": "pygame.mixer.Sound", "id": 3799}, {"content": "I'm not sure which version of Pygame is being used here, 1.9? At the time it was\nreleased NumPy was unavailable for Python 3.1. Python 1.9.2 alpha from SVN\ncertainly does support NumPy for Python 3.1, and 3.2.", "user_title": "Lenard Lindstrom", "datetimeon": "2011-03-01T13:35:12", "link": "pygame.surfarray", "id": 3800}, {"content": "That should be \"which version of Pygame is being used here, 1.9.1?\"", "user_title": "Lenard Lindstrom", "datetimeon": "2011-03-01T13:37:12", "link": "pygame.surfarray", "id": 3801}, {"content": "New to Pygame 1.9.2 for NumPy: pixels_red, pixels_green, and pixels_blue.", "user_title": "Lenard Lindstrom", "datetimeon": "2011-03-01T13:38:40", "link": "pygame.surfarray.pixels_alpha", "id": 3802}, {"content": "For the KEYDOWN and KEYUP event \"scancode\" is also a member and can be used \nfor the unknown keys", "user_title": "Daniel Kaminsky", "datetimeon": "2011-03-23T05:51:58", "link": "pygame.event", "id": 3872}, {"content": "The wheel generates pygame.MOUSEBUTTONUP events too, not just pygame.MOUSEBUTTONDOWN event.", "user_title": "Dan Ross", "datetimeon": "2011-04-02T23:30:45", "link": "pygame.mouse", "id": 3884}, {"content": "Forget what the functions do, check out Mr. Brown's naming style. Its pure genius!\n1) angle_times_WOW_pi_divided_by_180\n2) HE_HE_strange_popper_z\n3) buffy_the_fat2\n4) they_did_touch\n5) while Grr < LIN_collide_max:\n6) Rotated_Relate_ball1_z__PLUS__Rotated_ball1_zol\n7) write_to_file_WEEE_STRANGE()\n8) freaky_rect_switcharoo_2D()", "user_title": "Mad Cloud Games", "datetimeon": "2011-04-03T18:28:23", "link": "pygame.draw.circle", "id": 3885}, {"content": "If you like to receive the inner rectangle, the blit is a much better setup. \nThe following comparing examples show how-to cut a (centered) 150x150 frame out of a 250x250 image:\norig_surf = pygame.Surface((250,250),flags=pygame.SRCALPHA)\npygame.draw.circle(orig_surf,(255,0,0),(50,50),25)\npygame.draw.circle(orig_surf,(0,255,0),(50,200),25)\npygame.draw.circle(orig_surf,(0,0,255),(200,50),25)\npygame.draw.circle(orig_surf,(0,255,255),(200,200),25)\n\ncrop_surf = pygame.transform.chop(pygame.transform.chop(orig_surf,(0,0,50,50)),(150,150,250,250))\npygame.image.save(crop_surf, 'test-crop.png')\n\n\ncrop_surf = pygame.Surface((150,150),flags=pygame.SRCALPHA)\ncrop_surf.blit(orig_surf, (0,0),(50,50,200,200))\npygame.image.save(crop_surf, 'test-blit.png')", "user_title": "Rick van der Zwet", "datetimeon": "2011-05-05T04:36:44", "link": "pygame.transform.chop", "id": 4045}, {"content": "It all seemed simple and working properly, then I noticed... \"The area covered by a Rect does not include the right- and bottom-most edge of pixels. If one Rect's bottom border is another Rect's top border (i.e., rect1.bottom=rect2.top), the two meet exactly on the screen but do not overlap, and rect1.colliderect(rect2) returns false.\"\n\n*mutter* good to know.", "user_title": "Anonymous", "datetimeon": "2011-01-10T19:28:58", "link": "Rect.colliderect", "id": 3725}, {"content": "Note that when the user resizes the game window, pygame does not automatically update its internal screen surface. You must call set_mode() every time VIDEORESIZE is sent. This really should be more clear in the documentation.", "user_title": "Anonymous", "datetimeon": "2011-01-11T15:55:57", "link": "pygame.display", "id": 3726}, {"content": "Is it possible to set this mode transparent?\nI mean without changing the transparency with set_alpha or ... but from the beginning.", "user_title": "Anonymous", "datetimeon": "2011-01-13T08:28:22", "link": "pygame.display.set_mode", "id": 3727}, {"content": "Draw a normal thick line, then draw two aa lines either side. Not exactly what you want but it will work.", "user_title": "Anonymous", "datetimeon": "2011-01-13T15:22:43", "link": "pygame.draw.aaline", "id": 3728}, {"content": "This code fixes the bad rect given by the line function.\n\ntemprect=(pygame.draw.line(screen,color,firstpos,newpos,thick))\ntemprect.inflate_ip(thick*2, thick*2)\ndirty.append(temprect)", "user_title": "Anonymous", "datetimeon": "2005-11-22T22:22:44", "link": "pygame.draw.line", "id": 8}, {"content": "if your rect contains a negative width or height you need to rect.normalize() your rect before passing it to this function", "user_title": "Anonymous", "datetimeon": "2005-11-27T22:45:10", "link": "pygame.draw.ellipse", "id": 13}, {"content": "Rotates image about its center.", "user_title": "Anonymous", "datetimeon": "2005-11-28T19:22:44", "link": "pygame.transform.rotate", "id": 14}, {"content": "Make sure you blit according to the center of the newly formed surface, and not what the center of the orginal image is.", "user_title": "Anonymous", "datetimeon": "2005-11-28T19:24:48", "link": "pygame.transform.rotate", "id": 15}, {"content": "This probably goes without saying, but always rotate the orginal image, not a rotated copy.", "user_title": "Anonymous", "datetimeon": "2005-11-28T19:26:45", "link": "pygame.transform.rotate", "id": 16}, {"content": "Before calling pygame.key.get_pressed(), one should call pygame.event.pump() to get the lates state of the keyboard.\n\nThis is so because the get_pressed() function wraps the SDL_GetKeyState() function and in the SDL_GetKeyState() documentation it is written that one should use SDL_PumpEvents() to update the state array and pygame.event.pump() just happens to be a wrapper for SDL_PumpEvents() :-)", "user_title": "Anonymous", "datetimeon": "2005-12-01T10:30:49", "link": "pygame.key.get_pressed", "id": 18}, {"content": "When I tryed to use this, he couldn't find the key K_t I wanted\n untill I used:\n\nfrom pygame.locals import *\n\nSo be sure to use it - Shefy", "user_title": "Anonymous", "datetimeon": "2005-12-07T04:09:33", "link": "pygame.key.get_pressed", "id": 19}, {"content": "if you pass in None as the background argument, you get the error\n\"TypeError: Invalid background RGBA argument\"", "user_title": "Anonymous", "datetimeon": "2005-12-10T19:21:13", "link": "Font.render", "id": 22}, {"content": "pygame.event.pump()\n m = pygame.key.get_mods()\n if m & KMOD_SHIFT:\n print 'shift pressed'", "user_title": "Anonymous", "datetimeon": "2005-12-25T19:36:47", "link": "pygame.key.get_pressed", "id": 32}, {"content": "Rotated objects tend to move around because bounding rectangle changes size.\nStore the center in a temporary variable, then rotate the original image, and finally reset the center before you blit or update\nThis code comes from a sprite class:\n\n def turn(self, amount):\n \"turn some amount\"\n oldCenter = self.rect.center\n self.dir += amount\n self.image = pygame.transform.rotate(self.baseImage, self.dir)\n self.rect = self.image.get_rect()\n self.rect.center = oldCenter", "user_title": "Anonymous", "datetimeon": "2006-01-03T09:48:09", "link": "pygame.transform.rotate", "id": 36}, {"content": "This effect (1 + 3 = 2) is caused by your X.org/XServer mouse configuration section, which allows to emulate the middle button by clicking both the left and right mouse button at the same time.", "user_title": "Anonymous", "datetimeon": "2006-01-04T09:16:25", "link": "pygame.mouse.get_pressed", "id": 37}, {"content": "This does not result in 'truly' transparent text, as the area between the letters is filled in with the background color. For truly transparent text with an invisible background behind the letters, use Numeric:\n\ndef RenderTransparent(font, text, antialias=1, color=(255, 0, 0, 0)):\n 'Render text with transparency underneath the letters'\n 'Requires Numeric'\n\n # Create a colored block big enough to hold the text\n w, h = font.size(text)\n surface = pygame.Surface((w, h), pygame.SRCALPHA)\n surface.fill(color)\n \n # Create an alpha channel that contains the shapes of the letters\n alpha = pygame.Surface((w, h), pygame.SRCALPHA)\n WHITE = (255, 255, 255, 0)\n BLACK = (0, 0, 0, 0)\n a = font.render(text, antialias, WHITE, BLACK)\n alpha.blit(a, (0, 0))\n \n # Combine the alpha channel with the colored block\n pic = surface.convert_alpha()\n mask = alpha.convert(32)\n mskarray = pygame.surfarray.pixels3d(mask)\n pygame.surfarray.pixels_alpha(pic)[:, :] = mskarray[:, :, 0]\n\n # Return the 'truly' transparent text.\n return pic", "user_title": "Anonymous", "datetimeon": "2006-01-17T14:45:02", "link": "Font.render", "id": 41}, {"content": "LOL", "user_title": "Anonymous", "datetimeon": "2011-01-03T19:03:40", "link": "PixelArray.replace", "id": 3700}, {"content": "FUCKING SPAMMER MOTHER FUCKERS WHO OWNS THIS SHIT THEY SHOULD BURN IN HELLL", "user_title": "Anonymous", "datetimeon": "2011-01-03T19:05:09", "link": "pygame.locals", "id": 3701}, {"content": "On my Windows Vista machine running Python 3.1.2 and pygame 1.9.1,\nthe 'black=' and 'white=' parameters are swapped.\n\nSo, to make the example work (with a black arrow outline\naround a white center), you have to do this:\n\nthickarrow_strings = ( #sized 24x24\n \"XX \",\n \"XXX \",\n \"XXXX \",\n \"XX.XX \",\n \"XX..XX \",\n \"XX...XX \",\n \"XX....XX \",\n \"XX.....XX \",\n \"XX......XX \",\n \"XX.......XX \",\n \"XX........XX \",\n \"XX........XXX \",\n \"XX......XXXXX \",\n \"XX.XXX..XX \",\n \"XXXX XX..XX \",\n \"XX XX..XX \",\n \" XX..XX \",\n \" XX..XX \",\n \" XX..XX \",\n \" XXXX \",\n \" XX \",\n \" \",\n \" \",\n \" \")\n\ndatatuple, masktuple = pygame.cursor.compile( thickarrow_strings,\n black='.', white='X', xor='o' )\npygame.mouse.set_cursor( (24,24), (0,0), datatuple, masktuple )", "user_title": "Anonymous", "datetimeon": "2011-01-04T09:45:11", "link": "pygame.cursors.compile", "id": 3702}, {"content": "I'm using this generator to get a channel id for each sprite:\n\ndef free_sound_channel():\n \"\"\"Get next available sound channel\n Usage:\n free_channels=free_sound_channel()\n id=free_channels.next()\n \"\"\"\n id=0\n while id<pygame.mixer.get_num_channels():\n yield id\n id+=1\n return # or: raise StopIteration()", "user_title": "Anonymous", "datetimeon": "2006-01-29T16:18:15", "link": "pygame.mixer.Channel", "id": 49}, {"content": "COLORKEY and ALPHA should have 'SRC' prefixed to them. Here is a more-complete list of flags revelvant to surface.get_flags():\n,\"SRCCOLORKEY\"\n,\"RLEACCEL\"\n,\"RLEACCELOK\"\n,\"PREALLOC\"\n,\"HWACCEL\"\n,\"SRCALPHA\"\n,\"UYVY_OVERLAY\"\n,\"YV12_OVERLAY\"\n,\"YVYU_OVERLAY\"\n,\"YUY2_OVERLAY\"\n,\"HWPALETTE\"\nSWSURFACE - not really usable as a surface flag, equates to 0 and is always default\nANYFORMAT - used to create surfaces, pygame defaults to this flag if you don't specifya bit depth\nHWACCEL - surface is hardware accelerated, readonly\nSRCCOLORKEY- surface has a colorkey for blits, readonly\nSRCALPHA - surface has alpha enabled, readonly\nRLEACCELOK - surface is rle accelerated, but hasn't been compiled yet, readonly\nPREALLOC - not even sure?\nHope this helps....", "user_title": "Anonymous", "datetimeon": "2006-02-07T21:37:24", "link": "Surface.get_flags", "id": 52}, {"content": "I wish all the possible flags were documented here...", "user_title": "Anonymous", "datetimeon": "2006-02-07T22:02:57", "link": "pygame.Surface", "id": 53}, {"content": "I don't know what is wrong with you two. I tested the following and it worked as expected. Perhaps it is because I tested it on windows, if you tested it somewhere else (of course that's not the likely cause but I really can't see what else is wrong).\n\nIt is true that passing None for the final argument causes \"Invalid RGBA argument\". This is a bug in the documentation, not the code. The proper way to get transparency is to simply omit the last argument.\n\n$python\n>>>import pygame\n>>>pygame.init()\n>>>screen = pygame.display.set_mode((300,300))\n>>>screen.fill((255,0,0))\n>>>pygame.display.flip()\n\n>>>font = pygame.font.SysFont(\"Times New Roman\",30)\n>>>s = font.render(\"Eggs are good for you, but not on the eiffel tower\",True,(0,255,255))\n>>>s.get_flags() #-> 65536 [SRCALPHA].. good, implies the image has per-pixel transparency\n>>>[s.get_at((i,j)) for i in range(20) for j in range(20)]\n[.... #here we see that indeed each\n(0,255,255,68) #pixel is a full RGBA pixel with 4\n....] #components.\n>>>screen.blit(s, (0,0))\n>>>pygame.display.flip()\n>>>pygame.event.pump() #in order to bring the window back to life...\n\nAnd the result is turquoise text with red in the background, clearly showing transparency. Phew, you had me worried there, thinking I couldn't do transparency with this... until I looked closer. These docs are shiny but can be very hard to read sometimes.", "user_title": "Anonymous", "datetimeon": "2006-02-08T02:29:13", "link": "Font.render", "id": 54}, {"content": "Here's another solution for creating surfaces with per-pixel-alpha:\n\nimage = pygame.Surface((width,height),pygame.SRCALPHA,32);\n\nAdding the depth argument '32' seems to make this work every time.", "user_title": "Anonymous", "datetimeon": "2006-02-21T15:49:19", "link": "pygame.Surface", "id": 58}, {"content": "In the documentery it says \"The antialias argument is a boolean, if true the \ncharacters will have smooth edges.\". If you pass a string as the antialias \nargument it raises an exception saying \"TypeError: an integer is required\". This\nis very confusing. It should raise \"TypeError: a boolean is required\". \nIf antialias is enabled it will greatly drop the framerate (from 100 to 33 on my\nmachine). Font.render should be called only for as many times as you need fonts.\nDo not call this function every gameloop for it will greatly drop the framerate.\n(this cost me about 2 houres of debugging to find out.)\nIf any admins read this: Please change the script so that long lines will be seperated to shorter lines. Those 500+ words lines are uncomfortable to read with all that scrolling. mfg nwp.", "user_title": "Anonymous", "datetimeon": "2006-03-05T13:28:14", "link": "Font.render", "id": 61}, {"content": "you may want to initalise the \ndifferent modules seperately\nto speed up your program. Of \ncourse, then you would need \nto know which modules you have\ninitalised and which ones you\nhave not.", "user_title": "Anonymous", "datetimeon": "2006-03-08T22:55:41", "link": "pygame.init", "id": 64}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-30T07:59:42", "link": "Hope to find some more useful information on your site! It is really great!", "id": 196}, {"content": "format of music files\non cds are (usualy) in\nCD Digital Audio, except\nsometimes a program will\nmake a cd useing a \ndifferent format, so \npygame.cdrom.CD(n).play()\nwill maby not play it.", "user_title": "Anonymous", "datetimeon": "2006-03-08T23:02:35", "link": "pygame.cdrom", "id": 67}, {"content": "import pygame\nfrom pygame.locals import *\n\npygame.init()\npygame.display.set_mode((300,200))\npygame.display.set_caption('Mouse Input Demonstration')\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n running = False\n if event.type == MOUSEBUTTONDOWN:\n print event.button\n\npygame.display.quit()", "user_title": "Anonymous", "datetimeon": "2006-04-02T00:38:08", "link": "pygame.mouse.get_pressed", "id": 82}, {"content": "# An Example from perldude69@gmail.com www.wachadoo.com/forum/\n# CONSTANTS\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\n#Initialise Game\npygame.init()\nscreen = pygame.display.set_mode( (SCREEN_WIDTH,SCREEN_HEIGHT))\npygame.display.set_caption('Space Invaders')\nbackground = pygame.image.load('./pics/background1.jpg').convert()\nbackground = pygame.transform.scale(background,( SCREEN_WIDTH, SCREEN_HEIGHT))\nscreen.blit(background, (0,0)) \npygame.display.flip() \ndone = False\nwhile not done:\n\tfor e in pygame.event.get():\n\t\tif e.type == KEYDOWN:\n\t\t\tdone = True\nif __name__ == \"__main__\":\n main()", "user_title": "Anonymous", "datetimeon": "2006-04-07T18:04:11", "link": "pygame.transform.scale", "id": 84}, {"content": "Don't specify flags unless you absolutely *must* (that is, don't specify HWSURFACE, depth=32 just because you think it's a good idea). This will reduce the portability of your game.", "user_title": "Anonymous", "datetimeon": "2006-06-13T21:27:25", "link": "pygame.display.set_mode", "id": 98}, {"content": "Could someone please post the integer values corresponding to the various shift/ctl/alt keys? Or provide a link.\nthank you!", "user_title": "Anonymous", "datetimeon": "2006-12-18T16:29:25", "link": "pygame.key.get_mods", "id": 183}, {"content": "numpy is fine in Python 3.1.2. However, pygame.surfarray doesn't work\nat all in pygame-1.9.1.win32-py3.1.msi with python-3.1.2.msi and\nnumpy-1.5.1-win32-superpack-python3.1.exe under Windows Vista.\n\nTo see the problem, just run the test that comes with it; 4 of 14 tests fail:\n\nC:\\>cd \\python31\\lib\\site-packages\\pygame\\tests\n\nC:\\Python31\\Lib\\site-packages\\pygame\\tests>\\python31\\python surfarray_test.py\nEE.EE.........\n======================================================================\nERROR: test_array2d (__main__.SurfarrayModuleTest)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"surfarray_test.py\", line 147, in test_array2d\n arr = pygame.surfarray.array2d(surf)\n File \"C:\\python31\\lib\\site-packages\\pygame\\surfarray.py\", line 104, in array2d\n return numpysf.array2d (surface)\n File \"C:\\python31\\lib\\site-packages\\pygame\\_numpysurfarray.py\", line 77, in array2d\n data = ''.join (pattern.findall (data))\nTypeError: can't use a string pattern on a bytes-like object\n[...snip...]", "user_title": "Anonymous", "datetimeon": "2011-01-07T03:47:28", "link": "pygame.surfarray", "id": 3704}, {"content": "no fill?", "user_title": "Anonymous", "datetimeon": "2006-11-19T14:38:36", "link": "pygame.draw.arc", "id": 163}, {"content": "works perfectly fine for me... question: what's the name of the overloaded operator that does the pxarray[x,y] subscripting?", "user_title": "Anonymous", "datetimeon": "2010-12-23T18:28:10", "link": "pygame.PixelArray", "id": 3689}, {"content": "pygame.cursors.ball is also a cool one.", "user_title": "Anonymous", "datetimeon": "2010-12-28T16:21:48", "link": "pygame.cursors", "id": 3690}, {"content": "Its a success/failure scenario. It returns True (1) if it went well.", "user_title": "Anonymous", "datetimeon": "2010-12-30T05:47:55", "link": "pygame.display.toggle_fullscreen", "id": 3691}, {"content": "These appear to be in degrees rather than radians (different than how draw.arc()'s are specified) which is kind of inconsistent. Are these documented better elsewhere?", "user_title": "Anonymous", "datetimeon": "2011-01-01T15:04:01", "link": "pygame.gfxdraw.pie", "id": 3694}, {"content": "You need to put\nimport pygame\nat the top of your program, anonymous.", "user_title": "Anonymous", "datetimeon": "2011-01-02T07:03:47", "link": "pygame.display", "id": 3695}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-28T17:27:45", "link": "Looking for information and found it at this great site...", "id": 188}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-29T02:41:08", "link": "I love the whiiite suits! Great show!", "id": 189}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-29T10:09:22", "link": "Thank you for your site. I have found here much useful information...", "id": 190}, {"content": "I had this weird thing where blue/red was inversed, but not the other colours, when I was mapping some pixels from one image to a blank surface.\nIt was caused by copying the color integer directly to one pixel to the other, so the trick is to always surface.unmap_rgb(pixel) before setting the color to a new pixel", "user_title": "Anonymous", "datetimeon": "2010-12-13T21:22:42", "link": "pygame.PixelArray", "id": 3685}, {"content": ".", "user_title": "Anonymous", "datetimeon": "2010-12-17T14:01:47", "link": "Movie.play", "id": 3687}, {"content": "import pygame, sys\nfrom pygame.version import ver\nprint (\"pygame \", ver)\nstartstate = pygame.init()\nprint (\"{pygame.init()}\", startstate)\nscreen = pygame.display.set_mode([640, 480])\nprint (\"{pygame.display.set_mode([640, 480]}\", screen)\nwhile True:\n for event in pygame.event.get():\n if not event:\n print (\"Event processing error: cannot find event.\")\n elif event.type == pygame.QUIT or event.type == pygame.K_ESCAPE:\n print (\"{for event in pygame.event.get():} : \", event)\n sys.exit()\nsys.exit() command does not run when I press escape, all it does is the same as if not event.", "user_title": "Anonymous", "datetimeon": "2010-12-18T17:09:49", "link": "pygame.key", "id": 3688}, {"content": "It the range for H should only be [0, 360); at exactly 360 the expression throws an OverflowError. The other ranges are not affected as such.", "user_title": "Anonymous", "datetimeon": "2010-12-08T17:55:35", "link": "Color.hsva", "id": 3677}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-30T22:35:22", "link": "Very cool design! Useful information. Go on!", "id": 202}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-31T05:52:59", "link": "Very interesting! site. A must bookmark! I wait for continuation", "id": 203}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-31T13:03:40", "link": "Very interesting! site. A must bookmark! I wait for continuation", "id": 204}, {"content": "", "user_title": "Anonymous", "datetimeon": "2006-12-31T19:55:09", "link": "Just wanted to say you have some happyY looking walkers. All natural!", "id": 205}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-01T16:59:55", "link": "You have an outstanding good and well structured site. I enjoyed browsing through it.", "id": 208}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-02T00:19:19", "link": "I love the whiiite suits! Great show!", "id": 211}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-02T15:12:24", "link": "Hope to find some more useful information on your site! It is really great!", "id": 212}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-02T22:17:19", "link": "Just wanted to say you have some happyY looking walkers. All natural!", "id": 213}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-03T05:35:22", "link": "You have a great site. All in your web is very useful. Please keep on working.", "id": 214}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-03T12:53:55", "link": "Pretty nice site, wants to see much more on it! :)", "id": 215}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-03T20:02:38", "link": "You have an outstanding good and well structured site. I enjoyed browsing through it.", "id": 216}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-04T18:10:07", "link": "This site is asomeee, well done, thanks for all!", "id": 218}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-01-05T01:15:13", "link": "Very nice site. Keep up the great work.", "id": 219}, {"content": "Left:\nctrl 4160 \nshift 4097 \nalt 4352 \n\nRight:\nctrl 4224\nshift 4098\nalt 4608\n\nLeft:\nc+s 4161\nc+a 4416\na+s 4353\n\nRight:\nc+s 4226\nc+a 4736\na+s 4610\n\nDone by hand ;)\n\n-Jabapyth", "user_title": "Anonymous", "datetimeon": "2007-01-29T17:22:04", "link": "pygame.key.get_mods", "id": 316}, {"content": "Depending on your keyboard there may be limitations of how many simultaneous keypresses can be detected by this command. Some combinations will work on one keyboard and not on another.", "user_title": "Anonymous", "datetimeon": "2007-02-08T19:53:05", "link": "pygame.key.get_pressed", "id": 335}, {"content": "Just use the same line width as your radius.\nThis of course dosn't solve your problem if you want a border on your arc, but then you can just paint twice.", "user_title": "Anonymous", "datetimeon": "2007-02-26T18:27:17", "link": "pygame.draw.arc", "id": 374}, {"content": "It looks like width is not a keyword argument, but a required/positional/whatever one instead.\n\n>>> pygame.draw.line(surf, color, (x1, y1), (x2, y2), width=width)\nTraceback (most recent call last):\n File \"\", line 1, in ?\nTypeError: line() takes no keyword arguments\n>>> pygame.draw.line(surf, color, (x1, y1), (x2, y2))\n\n>>> pygame.draw.line(surf, color, (x1, y1), (x2, y2), 1)", "user_title": "Anonymous", "datetimeon": "2007-03-01T11:28:42", "link": "pygame.draw.line", "id": 386}, {"content": "first number = top left rectangle x coordinate\nsecond number = top left rectangle y coordinate\nthird number = width of rectangle\nfourth number = length of rectangle", "user_title": "Anonymous", "datetimeon": "2007-03-03T16:39:29", "link": "pygame.draw.rect", "id": 400}, {"content": "If you want to make a deep copy of a Rect object (without importing the copy module)\nthen you can do so by calling move with the arguments (0,0).", "user_title": "Anonymous", "datetimeon": "2008-01-04T00:22:04", "link": "Rect.move", "id": 1359}, {"content": "the forth numer is the height of the rect", "user_title": "Anonymous", "datetimeon": "2007-03-11T20:32:13", "link": "pygame.draw.rect", "id": 434}, {"content": "The documentation is incorrect. pygame.mixer.music(5) will indeed play the music five times, not six. Perhaps the function used to behave differently, but I can find nothing in the documentation for either pygame or SDL_mixer that suggests so.", "user_title": "Anonymous", "datetimeon": "2007-03-19T15:33:19", "link": "pygame.mixer.music.play", "id": 440}, {"content": "I meant to say pygame.mixer.music.play(5), of course. I left out the \"play\" part.", "user_title": "Anonymous", "datetimeon": "2007-03-19T15:34:19", "link": "pygame.mixer.music.play", "id": 441}, {"content": "Properties in the object returned by get_rect():\n\nbottom\nbottomleft\nbottomright\ncenter\ncenterx\ncentery\nclamp\nclamp_ip\nclip\ncollidedict\ncollidedictall\ncollidelist\ncollidelistall\ncollidepoint\ncolliderect\ncontains\nfit\nh\nheight\ninflate\ninflate_ip\nleft\nmidbottom\nmidleft\nmidright\nmidtop\nmove\nmove_ip\nnormalize\nright\nsize\ntop\ntopleft\ntopright\nunion\nunion_ip\nunionall\nunionall_ip\nw\nwidth\nx\ny", "user_title": "Anonymous", "datetimeon": "2007-03-23T00:10:38", "link": "Surface.get_rect", "id": 446}, {"content": "i've noticed the loop functionality to be iffy for certain wave files (an audible gap between each loop). from what i can tell, it looks like this happens with stereo wave files, but i'm not completely sure. the mono waves i try to loop play as expected", "user_title": "Anonymous", "datetimeon": "2008-01-04T23:39:18", "link": "pygame.mixer.music.fadeout", "id": 1365}, {"content": "i've noticed the loop functionality to be iffy for certain wave files (an audible gap between each loop). from what i can tell, it looks like this happens with stereo wave files, but i'm not completely sure. the mono waves i try to loop play as expected (i accidentally added this comment to fadeout(), sorry)", "user_title": "Anonymous", "datetimeon": "2008-01-04T23:39:59", "link": "pygame.mixer.music.play", "id": 1366}, {"content": "what about osx? is macosx working?", "user_title": "Anonymous", "datetimeon": "2007-03-25T15:39:01", "link": "pygame.display.init", "id": 448}, {"content": "omg, you should really use the KMOD_ constants here", "user_title": "Anonymous", "datetimeon": "2007-03-28T15:20:15", "link": "pygame.key.get_mods", "id": 451}, {"content": "In 1.7.1, the behaviour when None is passed in is NOT reversed. pygame.event.set_allowed(None) will BLOCK all events.", "user_title": "Anonymous", "datetimeon": "2007-04-01T20:22:29", "link": "pygame.event.set_allowed", "id": 457}, {"content": "just do:\nimg = pygame.image.load(\"<>\").convert()\n\n-harry666t", "user_title": "Anonymous", "datetimeon": "2007-04-03T12:22:16", "link": "pygame.draw", "id": 463}, {"content": "BTW, Those values gotten below are if num-lock is on\nKMOD_NUM == 4096\nKMOD_LSHIFT == 1\nKMOD_RSHIFT == 2\nKMOD_NUM | KMOD_LSHIFT == 4097\nThe simpler way is to use the bitwise AND (&)\n\nkeymods & KMOD_LSHIFT \n\nreturns true (actually 1 in this case) if left shift is pressed, no matter what else is pressed or if num lock is on, or if the planets are aligned correctly.", "user_title": "Anonymous", "datetimeon": "2007-04-03T19:15:04", "link": "pygame.key.get_mods", "id": 466}, {"content": "What is the definition of the key and mod members of KEYDOWN?", "user_title": "Anonymous", "datetimeon": "2007-04-07T05:35:39", "link": "pygame.event", "id": 475}, {"content": "who do i get a reference to a reserved channel? its not channel 0 nor num_channels-1", "user_title": "Anonymous", "datetimeon": "2007-04-07T11:19:42", "link": "pygame.mixer.set_reserved", "id": 476}, {"content": "\"does not work with current release\". Which release is that? Is the information valid?", "user_title": "Anonymous", "datetimeon": "2007-04-18T13:12:55", "link": "pygame.movie", "id": 497}, {"content": "FLAC support would be cool", "user_title": "Anonymous", "datetimeon": "2007-11-19T06:00:08", "link": "pygame.mixer.Sound", "id": 1142}, {"content": "fadeout does not block in linux either", "user_title": "Anonymous", "datetimeon": "2008-01-01T21:41:36", "link": "pygame.mixer.music.fadeout", "id": 1351}, {"content": "Watch out for this one, it has a major twist:\n(x,y) are coordinates in the referential of the rectangle.\nFor instance:\n>>> import pygame\n>>> r = pygame.Rect(32,32,132,132)\n>>> r.collidepoint(140,140)\n1", "user_title": "Anonymous", "datetimeon": "2007-04-20T17:57:54", "link": "Rect.collidepoint", "id": 502}, {"content": "Music will be resampled in some cases, not in others. When playing a 44.1kHz MP3, the default 22050 frequency works, but a 48kHz mp3 plays in less than half speed - 48000 or 24000 works then.\nTo handle this behaviour, you have to know the sample rate of your music files before playing them, and can't switch smoothly. Big bummer.", "user_title": "Anonymous", "datetimeon": "2008-01-13T07:45:26", "link": "pygame.mixer.music.play", "id": 1406}, {"content": "# This should draw a square with a hight of 20 pixels on a Surface:\nheight = 20\npygame.draw.rect(Surface, (255, 255, 255), (0, 0, height, height))", "user_title": "Anonymous", "datetimeon": "2007-12-06T15:43:11", "link": "pygame.draw.rect", "id": 1220}, {"content": "Anonymous[0], that's nonsense. The x,y coords are absolute coordinates. To illustrate:\n\n>>> r = pygame.rect.Rect(32, 32, 132, 132)\n>>> r.collidepoint(1,1)\n0\n>>> r.collidepoint(32,32)\n1", "user_title": "Anonymous", "datetimeon": "2007-11-23T19:43:53", "link": "Rect.collidepoint", "id": 1153}, {"content": "A little black cross. Mouse cursor is 8*8 Pixel, hotspot is at (4, 4). \nthe cross is (Read Binary):\n00011000 => 24 \n00011000\n00011000\n11100111 => 231\n11100111\n00011000\n00011000\nand has no AND-Mask. \n\npygame.mouse.set_cursor((8, 8), (4, 4), (24, 24, 24, 231, 231, 24, 24, 24), (0, 0, 0, 0, 0, 0, 0, 0))", "user_title": "Anonymous", "datetimeon": "2007-11-26T11:20:04", "link": "pygame.mouse.set_cursor", "id": 1157}, {"content": "excellent comments!\njorgen", "user_title": "Anonymous", "datetimeon": "2007-11-26T19:56:49", "link": "Surface.fill", "id": 1158}, {"content": "True. set_allowed(None) blocks all event types.\n\n- Another (initially skeptical) pygame user.", "user_title": "Anonymous", "datetimeon": "2007-11-26T22:35:30", "link": "pygame.event.set_blocked", "id": 1159}, {"content": "The first channels are reserved.\nFor example: pygame.mixer.Channel(0)", "user_title": "Anonymous", "datetimeon": "2007-11-27T12:11:21", "link": "pygame.mixer.set_reserved", "id": 1160}, {"content": "I have found that just watching for joystick events may not provide enough \ngranularity for fast-paced arcade games that require 100 millisecond changes.\nInstead of events, consider polling the status of the axes in the main game loop\n(or whatever your local equivalent is)", "user_title": "Anonymous", "datetimeon": "2007-12-08T11:08:32", "link": "pygame.joystick", "id": 1230}, {"content": "Actually, on my system [Ubunty Gutsy] it returned a list of None:\n>>> import pygame\n>>> pygame.font.get_fonts()\n[None]", "user_title": "Anonymous", "datetimeon": "2007-12-12T08:11:00", "link": "pygame.font.get_fonts", "id": 1242}, {"content": "How do you draw squares in pygame??", "user_title": "Anonymous", "datetimeon": "2007-11-29T13:19:20", "link": "pygame.draw.rect", "id": 1171}, {"content": "\"... will only effect the smaller area\" is probably meant to read \"... will only affect the smaller area\"", "user_title": "Anonymous", "datetimeon": "2007-12-20T07:32:23", "link": "pygame.Surface", "id": 1290}, {"content": "# Matthew N. Brown copyright 2007\n\n# Here is an example program in wich\n# balls hit walls and other balls:\n#\n# This program draws circles using: pygame.draw.circle\n#\n# You can copy this program on to\n# your own computer and run it.\n#\n\nimport os, sys\n\n ## INIT STUFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\ndef HE_HE_init():\n global screen, big_black_rect, APPLICATION_w_size, APPLICATION_z_size\n global WOW_pi_divided_by_180, WOW_180_divided_by_pi\n pygame.init()\n random.seed()\n APPLICATION_w_size = 700\n APPLICATION_z_size = 500\n ##### To close window while in fullscreen, press Esc while holding shift. #######\n screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size))\n #screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), FULLSCREEN)\n pygame.display.set_caption(\"They bwounce off bwalls? Matthew N. Brown copyright 2007\")\n pygame.mouse.set_visible(1)\n big_black_rect = pygame.Surface(screen.get_size())\n big_black_rect = big_black_rect.convert()\n big_black_rect.fill((0, 0, 0))\n screen.blit(big_black_rect, (0, 0))\n #fonty = pygame.font.Font(None, 36)\n fonty = pygame.font.SysFont(\"Times New Roman\", 25)\n fonty.set_bold(0)\n IMAGEE = fonty.render('Loading . . .', 1, (0, 250, 10))\n screen.blit(IMAGEE, (100, 200)); del IMAGEE\n pygame.display.flip()\n pygame.mixer.init(22050, -16, True, 1024)\n WOW_pi_divided_by_180 = math.pi / 180.0\n WOW_180_divided_by_pi = 180.0 / math.pi\n set_up_key_variables()\n Lets_ROLL()\n ## INIT STUFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\n\n ## SAVE LEVEL?!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\ndef write_to_file_WEEE_STRANGE(file_namey, data):\n noq = '\\n'\n filey = open(file_namey, 'w')\n for d in data:\n filey.write( str(d) + noq)\n ## SAVE LEVEL?!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\n\n ## SMALL FUNCTIONS STUFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\n ### some functions: ###\ndef distance_2D (w1, z1, w2, z2):\n return math.sqrt(math.pow(float(w1) - float(w2), 2) + math.pow(float(z1) - float(z2), 2))\ndef rect_touching_rect(w1, z1, wol1, zol1, w2, z2, wol2, zol2):\n w2 -= w1\n z2 -= z1\n ww1 = -wol2\n zz1 = -zol2\n return (w2 > ww1 and w2 < wol1 and z2 > zz1 and z2 < zol1)\ndef rect_touching_rect2(w1, z1, wol1, zol1, w2, z2, wol2, zol2):\n w2 -= w1\n z2 -= z1\n ww1 = -wol2\n zz1 = -zol2\n return (w2 >= ww1 and w2 <= wol1 and z2 >= zz1 and z2 <= zol1)\ndef positive(n):\n if n < 0: n = -n; return n\ndef int_randy(range, add):\n return int((random.random() * range) + add)\ndef randy(range, add):\n return (random.random() * range) + add\ndef freaky_rect_switcharoo_2D(pw, pz, pwol, pzol, buffy_the_fat):\n buffy_the_fat2 = buffy_the_fat * 2\n if pwol > 0:\n gw = pw; gwol = pwol\n else:\n gw = pwol + pw; gwol = pw - gw\n if pzol > 0:\n gz = pz; gzol = pzol\n else:\n gz = pzol + pz; gzol = pz - gz\n return [gw - buffy_the_fat, gz - buffy_the_fat, gwol + buffy_the_fat2, gzol + buffy_the_fat2]\ndef points_rotated_by_angle_2D(points_wz, axis_w, axis_z, angle):\n rotated_points_wz = []\n angle = -angle -90\n angle_times_WOW_pi_divided_by_180 = angle * WOW_pi_divided_by_180\n c1 = math.cos(angle_times_WOW_pi_divided_by_180)\n s1 = math.sin(angle_times_WOW_pi_divided_by_180)\n for pointy in points_wz:\n xt = pointy[0] - axis_w\n yt = pointy[1] - axis_z\n rotated_points_wz += [(-xt * s1) + (yt * c1) + axis_w, (-xt * c1) - (yt * s1) + axis_z]\n return rotated_points_wz\ndef point_rotated_by_angle_2D(point_w, point_z, axis_w, axis_z, angle):\n angle = -angle -90\n angle_times_WOW_pi_divided_by_180 = angle * WOW_pi_divided_by_180\n c1 = math.cos(angle_times_WOW_pi_divided_by_180)\n s1 = math.sin(angle_times_WOW_pi_divided_by_180)\n xt = point_w - axis_w\n yt = point_z - axis_z\n return (-xt * s1) + (yt * c1) + axis_w, (-xt * c1) - (yt * s1) + axis_z\ndef arc_tangent_2D(point_w, point_z):\n return math.atan2(point_w, point_z) * WOW_180_divided_by_pi + 180\ndef arc_tangent_2D_2(point_w, point_z):\n return -math.atan2(point_w, point_z) * WOW_180_divided_by_pi + 180\ndef ball_to_ball_wzkol_bounce(V1, m1, V2, m2, ball1_is_to_the_left):\n if (ball1_is_to_the_left and V1 >= V2) or (not ball1_is_to_the_left and V1 <= V2):\n Rv1 = V1 - V2\n Rv2 = 0 #V2 - V2\n NewV1 = ((m1 - m2) / float(m1 + m2)) * float(Rv1) + V2\n NewV2 = (( 2 * m1) / float(m1 + m2)) * float(Rv1) + V2\n return NewV1, NewV2\n else:\n return V1, V2\ndef Find_where_ball_stops_on_line_w(ball_w, ball_z, ball_wol, ball_zol, ball_rad, line_w, line_rad):\n did_collide = False\n totally = ball_rad + line_rad\n b1 = line_w + totally\n b2 = line_w - totally\n New_ball_w = ball_w + ball_wol\n New_ball_z = ball_z + ball_zol\n if ball_w >= b1 and ball_wol < 0 and New_ball_w < b1: New_ball_w = b1; did_collide = True\n elif ball_w <= b2 and ball_wol > 0 and New_ball_w > b2: New_ball_w = b2; did_collide = True\n else:\n if ball_w > b2 and ball_w < b1:\n if ball_w > line_w and ball_wol < 0:\n New_ball_w = ball_w; New_ball_z = ball_z\n did_collide = True\n elif ball_w < line_w and ball_wol > 0:\n New_ball_w = ball_w; New_ball_z = ball_z\n did_collide = True\n return New_ball_w, New_ball_z, did_collide\n New_ball_z = (float(ball_zol) / float(ball_wol) * float(New_ball_w - ball_w)) + float(ball_z)\n return New_ball_w, New_ball_z, did_collide\ndef find_where_ball_collides_on_a_wall(\n ball_w, ball_z,\n ball_wol, ball_zol,\n ball_rad,\n wall_type,\n wall_w1, wall_z1,\n wall_w2, wall_z2,\n wall_rad):\n toetoadly = ball_rad + wall_rad\n did_collide = False\n New_ball_w = ball_w + ball_wol\n New_ball_z = ball_z + ball_zol\n angle_hit_at = None\n Relate_ball_w = ball_w - wall_w1\n Relate_ball_z = ball_z - wall_z1\n Relate_wall_w2 = wall_w2 - wall_w1\n Relate_wall_z2 = wall_z2 - wall_z1\n arc_tangeriney = arc_tangent_2D(Relate_wall_w2, Relate_wall_z2)\n Rotate_Relate_ball_w, Rotate_Relate_ball_z, Rotate_Relate_wall_w2, Rotate_Relate_wall_z2 = points_rotated_by_angle_2D(((Relate_ball_w, Relate_ball_z), (Relate_wall_w2, Relate_wall_z2)), 0, 0, arc_tangeriney)\n Rotate_ball_wol, Rotate_ball_zol = point_rotated_by_angle_2D(ball_wol, ball_zol, 0, 0, arc_tangeriney)\n Rotate_Relate_ball_collide_w, Rotate_Relate_ball_collide_z, did_hit_weird_line = Find_where_ball_stops_on_line_w(Rotate_Relate_ball_w, Rotate_Relate_ball_z, Rotate_ball_wol, Rotate_ball_zol, ball_rad, 0, wall_rad)\n if Rotate_Relate_ball_w > -toetoadly and Rotate_Relate_ball_w < toetoadly:\n HE_HE_strange_popper_z = Rotate_Relate_ball_z\n else:\n HE_HE_strange_popper_z = Rotate_Relate_ball_collide_z\n Rotate_angle_hit_at = None\n if HE_HE_strange_popper_z < Rotate_Relate_wall_z2:\n if ball_is_going_towards_point(Rotate_Relate_ball_w, Rotate_Relate_ball_z, Rotate_ball_wol, Rotate_ball_zol, 0, Rotate_Relate_wall_z2):\n p1_touched, p1_collide_w, p1_collide_z, p1_angle_hit_at = find_where_ball_collides_on_another_ball(Rotate_Relate_ball_w, Rotate_Relate_ball_z, Rotate_ball_wol, Rotate_ball_zol, ball_rad, 0, Rotate_Relate_wall_z2, wall_rad)\n if p1_touched:\n Rotate_Relate_ball_collide_w = p1_collide_w\n Rotate_Relate_ball_collide_z = p1_collide_z\n Rotate_angle_hit_at = p1_angle_hit_at\n did_collide = True\n elif HE_HE_strange_popper_z > 0:\n if ball_is_going_towards_point(Rotate_Relate_ball_w, Rotate_Relate_ball_z, Rotate_ball_wol, Rotate_ball_zol, 0, 0):\n p2_touched, p2_collide_w, p2_collide_z, p2_angle_hit_at = find_where_ball_collides_on_another_ball(Rotate_Relate_ball_w, Rotate_Relate_ball_z, Rotate_ball_wol, Rotate_ball_zol, ball_rad, 0, 0, wall_rad)\n if p2_touched:\n Rotate_Relate_ball_collide_w = p2_collide_w\n Rotate_Relate_ball_collide_z = p2_collide_z\n Rotate_angle_hit_at = p2_angle_hit_at\n did_collide = True\n else:\n if did_hit_weird_line:\n did_collide = True\n if Rotate_Relate_ball_collide_w < 0: Rotate_angle_hit_at = 90\n else: Rotate_angle_hit_at = 270\n if did_collide:\n arc_tangeriney_2 = -arc_tangeriney\n angle_hit_at = Rotate_angle_hit_at + arc_tangeriney\n New_ball_w, New_ball_z = point_rotated_by_angle_2D(Rotate_Relate_ball_collide_w, Rotate_Relate_ball_collide_z, 0, 0, arc_tangeriney_2)\n New_ball_w += wall_w1\n New_ball_z += wall_z1\n return did_collide, New_ball_w, New_ball_z, angle_hit_at #, is_moving_towards\ndef zol_at_angle(wol, zol, angle):\n rotated_wol, rotated_zol = point_rotated_by_angle_2D(wol, zol, 0, 0, angle)\n return rotated_zol\ndef wzol_bounce_at_angle(wol, zol, angle, multi):\n rotated_wol, rotated_zol = point_rotated_by_angle_2D(wol, zol, 0, 0, angle)\n if rotated_zol > 0: rotated_zol = -rotated_zol * multi\n return point_rotated_by_angle_2D(rotated_wol, rotated_zol, 0, 0, -angle)\ndef ball_is_going_towards_point(ball_w, ball_z, ball_wol, ball_zol, point_w, point_z):\n angley = arc_tangent_2D(ball_w - point_w, ball_z - point_z)\n rotated_wol, rotated_zol = point_rotated_by_angle_2D(ball_wol, ball_zol, 0, 0, angley)\n return rotated_zol > 0\ndef find_where_ball_collides_on_another_ball (\n ball1_w, ball1_z,\n ball1_wol, ball1_zol,\n ball1_rad,\n ball2_w, ball2_z,\n ball2_rad\n ):\n totally = ball1_rad + ball2_rad\n dis_from_each_other = math.sqrt(math.pow(float(ball1_w) - float(ball2_w), 2) + math.pow(float(ball1_z) - float(ball2_z), 2))\n if dis_from_each_other < totally:\n angley = arc_tangent_2D(ball1_w - ball2_w, ball1_z - ball2_z)\n return True, ball1_w, ball1_z, angley\n else:\n they_did_touch = False\n New_ball1_w = ball1_w + ball1_wol\n New_ball1_z = ball1_z + ball1_zol\n angle_hit_at = None\n Relate_ball1_w = ball1_w - ball2_w\n Relate_ball1_z = ball1_z - ball2_z\n Relate_ball2_w = 0\n Relate_ball2_z = 0\n arcy_tangeriney = arc_tangent_2D(ball1_wol, ball1_zol)\n Rotated_Relate_ball1_w, Rotated_Relate_ball1_z, Rotated_ball1_wol, Rotated_ball1_zol = points_rotated_by_angle_2D(((Relate_ball1_w, Relate_ball1_z), (ball1_wol, ball1_zol)), 0, 0, arcy_tangeriney)\n did_collidey = False\n if Rotated_Relate_ball1_z > 0 and (Rotated_Relate_ball1_w > -totally and Rotated_Relate_ball1_w < totally):\n Rotated_Relate_ball1_collide_w = Rotated_Relate_ball1_w # + Rotated_ball1_wol\n HE_HE = math.pow(Rotated_Relate_ball1_w, 2) - math.pow(totally, 2)\n if HE_HE < 0: HE_HE = -HE_HE\n Rotated_Relate_ball1_collide_z = math.sqrt(HE_HE)\n Rotated_Relate_ball1_z__PLUS__Rotated_ball1_zol = Rotated_Relate_ball1_z + Rotated_ball1_zol\n if Rotated_Relate_ball1_collide_z < Rotated_Relate_ball1_z__PLUS__Rotated_ball1_zol:\n collision_wol = Rotated_ball1_wol\n collision_zol = Rotated_ball1_zol\n Rotated_Relate_ball1_collide_z = Rotated_Relate_ball1_z__PLUS__Rotated_ball1_zol\n angley_to_hit = None\n else:\n did_collidey = True\n they_did_touch = True\n angley_to_hit = arc_tangent_2D(Rotated_Relate_ball1_collide_w, Rotated_Relate_ball1_collide_z)\n else:\n angley_to_hit = None\n collision_wol = Rotated_ball1_wol\n collision_zol = Rotated_ball1_zol\n Rotated_Relate_ball1_collide_w = Rotated_Relate_ball1_w + Rotated_ball1_wol\n Rotated_Relate_ball1_collide_z = Rotated_Relate_ball1_z + Rotated_ball1_zol\n if did_collidey:\n arcy_tangeriney_2 = -arcy_tangeriney\n angle_hit_at = angley_to_hit + arcy_tangeriney\n New_ball1_w, New_ball1_z = point_rotated_by_angle_2D(Rotated_Relate_ball1_collide_w, Rotated_Relate_ball1_collide_z, 0, 0, arcy_tangeriney_2)\n New_ball1_w += ball2_w\n New_ball1_z += ball2_z\n return they_did_touch, New_ball1_w, New_ball1_z, angle_hit_at #, New_ball1_wol, New_ball1_zol\n ### some functions: ###\n\n ## GRAPHICS STUFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\ndef chilly_font(size):\n fonti = pygame.font.SysFont(\"Times New Roman\", size)\n return fonti\ndef chilly_font_Italicy(size):\n fonti = pygame.font.SysFont(\"Times New Roman\", size)\n fonti.set_italic(1)\n return fonti\ndef draw_loading_messagey(stringy): # Draw loading message\n pygame.mouse.set_visible(1)\n fonty = chilly_font(26)\n IMAGEE = fonty.render(stringy, 0, (0, 255, 0), (0, 0, 0))\n screen.blit(IMAGEE, (200, 250))\n del IMAGEE\n pygame.display.flip()\n ## GRAPHICS STUFF: ##\n#########################################################################################\n\n ## KEYS AND MOUSE STUFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\ndef set_up_key_variables():\n global ky_held, ky_first_held, ky_time_last_pressed\n global mowse_w, mowse_z, mowse_inn\n global mowse_left_pressed, mowse_right_pressed, mowse_left_held, mowse_right_held\n mowse_left_held = False\n mowse_right_held = False\n mowse_left_pressed = False\n mowse_right_pressed = False\n mowse_w = 0\n mowse_z = 0\n mowse_inn = 0\n ky_held = []\n ky_first_held = []\n ky_time_last_pressed = []\n m = -1\n while m < 500:\n m += 1\n ky_held += [0]\n ky_first_held += [0]\n ky_time_last_pressed += [0]\ndef clear_all_kys():\n global mowse_left_pressed, mowse_right_pressed, mowse_left_held, mowse_right_held\n mowse_left_held = False\n mowse_right_held = False\n mowse_left_pressed = False\n mowse_right_pressed = False\n m = -1\n while (m < 500):\n m += 1; ky_held[m] = 0; ky_first_held[m] = 0; ky_time_last_pressed[m] = 0\ndef clear_these_ky_first_held(list_keys_numbers):\n for k in list_keys_numbers:\n ky_first_held[k] = 0\ndef clear_first_held_kys():\n m = -1\n while (m < 500):\n m += 1; ky_first_held[m] = 0\ndef old_style_ky(n):\n return (ky_first_held_CEV(n) or (ky_held[n] and ky_time_last_pressed[n] < time.time() - .3))\ndef ky_first_held_CEV(n):\n if (ky_first_held[n]):\n ky_first_held[n] = 0; return 1\n else:\n return 0\ndef mowse_in_rect (w, z, wol, zol):\n return (mowse_w >= w and mowse_z >= z and mowse_w <= w + wol and mowse_z <= z + zol)\ndef mowse_in_circle (w, z, rad):\n dia = rad * 2\n if mowse_in_rect(w - rad, z - rad, w + dia, z + dia):\n return (distance_2D(mowse_w, mowse_z, w, z) < rad)\n else:\n return 0\n ## CHECK FOR: KEYBOARD, MOUSE, JOYSTICK, AND OTHERY INPUTY: ##\ndef check_for_keys():\n global mowse_w, mowse_z, mowse_inn, mowse_left_pressed, mowse_right_pressed, mowse_left_held, mowse_right_held, APPLICATION_w_size, APPLICATION_z_size\n global loopy\n global unicodey\n mowse_left_pressed = False\n mowse_right_pressed = False\n unicodey = ''\n for e in pygame.event.get():\n if e.type == QUIT:\n loopy = 0\n elif e.type == ACTIVEEVENT:\n mowse_inn = (e.gain and (e.state == 1 or e.state == 6))\n elif e.type == KEYDOWN:\n ky_held[e.key] = 1\n ky_first_held[e.key] = 1\n ky_time_last_pressed[e.key] = time.time()\n unicodey = e.unicode\n elif e.type == KEYUP:\n ky_held[e.key] = 0\n elif e.type == MOUSEMOTION:\n mowse_w = e.pos[0]\n mowse_z = e.pos[1]\n if mowse_w >= 0 and mowse_w <= APPLICATION_w_size and mowse_z >= 0 and mowse_z <= APPLICATION_z_size:\n mowse_inn = 1\n else:\n mowse_inn = 0\n elif e.type == MOUSEBUTTONUP:\n if e.button == 1: mowse_left_held = 0\n if e.button == 3: mowse_right_held = 0\n elif e.type == MOUSEBUTTONDOWN:\n mowse_left_pressed = (e.button == 1)\n mowse_right_pressed = (e.button == 3)\n mowse_left_held = mowse_left_held or e.button == 1\n mowse_right_held = mowse_right_held or e.button == 3\n elif e.type == JOYAXISMOTION:\n pass\n elif e.type == JOYBALLMOTION:\n pass\n elif e.type == JOYHATMOTION:\n pass\n elif e.type == JOYBUTTONUP:\n pass\n elif e.type == JOYBUTTONDOWN:\n pass\n elif e.type == VIDEORESIZE:\n print e\n print \"What happened!?\"\n #global big_black_rect, screen\n #APPLICATION_w_size = e.size[0]\n #APPLICATION_z_size = e.size[1]\n #screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size))#, RESIZABLE)\n #big_black_rect = pygame.Surface(screen.get_size())\n #big_black_rect = big_black_rect.convert()\n #big_black_rect.fill((0, 100, 200))\n elif e.type == VIDEOEXPOSE:\n pass\n elif e.type == USEREVENT:\n pass\n if ky_held[27] and (ky_held[303] or ky_held[304]): loopy = 0\n ## CHECK FOR: KEYBOARD, MOUSE, JOYSTICK, AND OTHERY INPUTY: ##\n ## KEYS AND MOUSE STUFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\n\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n\n\n ## MAIN LOOPY STUFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#########################################################################################\ndef ball_is_going_towards_ball(Bn1, Bn2):\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_mass, ball_RECT\n arc_tangerine = arc_tangent_2D(ball_w[Bn1] - ball_w[Bn2], ball_z[Bn1] - ball_z[Bn2])\n woly1, zoly1 = point_rotated_by_angle_2D(ball_wol[Bn1], ball_zol[Bn1], 0, 0, arc_tangerine)\n return zoly1 > 0\ndef ball_is_relatively_going_towards_ball(Bn1, Bn2):\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_mass, ball_RECT\n arc_tangerine = arc_tangent_2D(ball_w[Bn1] - ball_w[Bn2], ball_z[Bn1] - ball_z[Bn2])\n woly1, zoly1, woly2, zoly2 = points_rotated_by_angle_2D(((ball_wol[Bn1], ball_zol[Bn1]), (ball_wol[Bn2], ball_zol[Bn2])), 0, 0, arc_tangerine)\n return zoly1 > 0 and zoly1 > zoly2 # zoly2 < zoly1 or zoly2 > zoly1 # zoly1 + zoly2 > 0\n #return zoly1 > 0 or zoly1 > zoly2\ndef Make_two_balls_hit_at_angle(Bn1, Bn2, angle):\n global bounce_friction\n #print angle\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_mass, ball_RECT\n woly1, zoly1, woly2, zoly2 = points_rotated_by_angle_2D(((ball_wol[Bn1], ball_zol[Bn1]), (ball_wol[Bn2], ball_zol[Bn2])), 0, 0, angle)\n V1 = zoly1 * bounce_friction\n V2 = zoly2 * bounce_friction\n zoly1, zoly2 = ball_to_ball_wzkol_bounce(V1, ball_mass[Bn1], V2, ball_mass[Bn2], True)\n ball_wol[Bn1], ball_zol[Bn1], ball_wol[Bn2], ball_zol[Bn2] = points_rotated_by_angle_2D(((woly1, zoly1), (woly2, zoly2)), 0, 0, -angle)\n updatey_ball_quick_rect(Bn1)\n updatey_ball_quick_rect(Bn2)\ndef updatey_ball_quick_rect(B):\n dia = ball_rad[B] * 2 + 4\n ball_squar[B] = [ball_w[B] - ball_rad[B] - 2, ball_z[B] - ball_rad[B] - 2, dia, dia]\n ball_RECT[B] = freaky_rect_switcharoo_2D(ball_w[B], ball_z[B], ball_wol[B], ball_zol[B], ball_rad[B] + 4)\ndef minus_ball_thing(n):\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_angle, ball_angleol, ball_squar, ball_mass, ball_RECT\n if ball_max >= 0:\n del ball_w [n]\n del ball_z [n]\n del ball_wol [n]\n del ball_zol [n]\n del ball_rad [n]\n del ball_color [n]\n del ball_squar [n]\n del ball_angle [n]\n del ball_angleol[n]\n del ball_mass [n]\n del ball_RECT [n]\n ball_max -= 1\ndef add_ball_thing(w, z, wol, zol, rad, color, angle, angleol, mass_thing, rect_thing):\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_squar, ball_angle, ball_angleol, ball_mass, ball_RECT\n ball_max += 1\n ball_w += [w]\n ball_z += [z]\n ball_wol += [wol]\n ball_zol += [zol]\n ball_rad += [rad]\n ball_color += [color]\n ball_angle += [angle]\n ball_angleol += [angleol]\n dia = rad * 2\n ball_squar += [[w - rad, z - rad, dia, dia]]\n if mass_thing == True:\n ball_mass += [4 / 3 * math.pi * rad * rad * rad]\n else:\n ball_mass += [mass_thing]\n if rect_thing == True:\n ball_RECT += [None]\n updatey_ball_quick_rect(ball_max)\n #ball_RECT += [freaky_rect_switcharoo_2D(w, z, wol, zol, rad)]\n else:\n ball_RECT += [rect_thing]\ndef minus_wall_thing(WAL):\n global wall_max, wall_type, wall_w1, wall_z1, wall_w2, wall_z2, wall_rad, wall_color, wall_RECT\n if wall_max >= 0:\n del wall_type [WAL]\n del wall_w1 [WAL]\n del wall_z1 [WAL]\n del wall_w2 [WAL]\n del wall_z2 [WAL]\n del wall_rad [WAL]\n del wall_color [WAL]\n del wall_RECT [WAL]\n wall_max -= 1\ndef add_wall_thing(type, w1, z1, w2, z2, rad, color_thing, rect_thing):\n global wall_max, wall_type, wall_w1, wall_z1, wall_w2, wall_z2, wall_rad, wall_color, wall_RECT\n wall_max += 1\n wall_type += [type]\n wall_w1 += [w1]\n wall_z1 += [z1]\n wall_w2 += [w2]\n wall_z2 += [z2]\n wall_rad += [rad]\n if color_thing == True:\n if type == 1: color_thing = (220, 220, 220)\n elif type == 2: color_thing = (240, 140, 130)\n elif type == 3: color_thing = (100, 255, 100)\n elif type == 4: color_thing = (255, 100, 100)\n elif type == 5: color_thing = (100, 100, 255)\n wall_color += [color_thing]\n if rect_thing == True:\n wall_RECT += [freaky_rect_switcharoo_2D(w1 - 2, z1 - 2, w2 - w1 + 4, z2 - z1 + 4, rad)]\n else:\n wall_RECT += [rect_thing]\ndef reset_stuff():\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_angle, ball_angleol, ball_squar, ball_mass, ball_RECT\n global wall_max, wall_type, wall_w1, wall_z1, wall_w2, wall_z2, wall_rad, wall_color, wall_RECT\n global levely\n if levely == 1:\n ball_max = -1\n ball_w = []\n ball_z = []\n ball_wol = []\n ball_zol = []\n ball_rad = []\n ball_color = []\n ball_angle = []\n ball_angleol = []\n ball_squar = []\n ball_mass = []\n ball_RECT = []\n #add_ball_thing(350, 300, 0, 0, 18, (230, 230, 250), 0, 0, True, True)\n #add_ball_thing(150, 400, 0, 0, 40, (220, 210, 255), 0, 0, True, True)\n #add_ball_thing(300, 150, 0, 0, 62, (110, 106, 255), 0, 0, True, True)\n add_ball_thing(220, 200, 0, 0, 50, (180, 226, 255), 180, 0, True, True)\n wall_max = -1\n wall_type = []\n wall_w1 = []\n wall_z1 = []\n wall_w2 = []\n wall_z2 = []\n wall_rad = []\n wall_color = []\n wall_RECT = []\n add_wall_thing(1, 160, 250, 300, 270, 1, True, True)\n add_wall_thing(1, 500, 270, 600, 310, 1, True, True)\n add_wall_thing(1, 200, 450, 600, 450, 10, True, True)\n add_wall_thing(1, 300, 350, 400, 370, 5, True, True)\n add_wall_thing(1, 300, 100, 400, 100, 20, True, True)\n add_wall_thing(1, 650, 140, 700, 200, 6, True, True)\n add_wall_thing(1, 650, 140, 600, 40, 6, True, True)\n add_wall_thing(1, 150, 340, 150, 340, 30, True, True)\n add_wall_thing(1, 40, 200, 40, 200, 30, True, True)\n add_wall_thing(1, 30, 30, 30, 30, 10, True, True)\n add_wall_thing(1, 30, 30, 30, 30, 10, True, True)\n add_wall_thing(1, 30, 30, 30, 30, 10, True, True)\n add_wall_thing(1, 30, 30, 30, 30, 10, True, True)\n add_wall_thing(1, 30, 30, 30, 30, 10, True, True)\n add_wall_thing(1, 0, 0, APPLICATION_w_size, 0, 5, True, True)\n add_wall_thing(1, 0, 0, 0, APPLICATION_z_size, 5, True, True)\n add_wall_thing(1, 0, APPLICATION_z_size, APPLICATION_w_size, APPLICATION_z_size, 5, True, True)\n add_wall_thing(1, APPLICATION_w_size, 0, APPLICATION_w_size, APPLICATION_z_size, 5, True, True)\n elif levely == 2:\n ball_max = 1\n ball_w = [323.62638473709342, 384.72135876760257]\n ball_z = [298.67896746658624, 109.24043981044279]\n ball_wol = [-0.27396932987421913, 7.133321987715842]\n ball_zol = [-0.38420912894762504, 1.6564147490246901]\n ball_rad = [15, 28]\n ball_color = [(137, 244, 234), (138, 221, 217)]\n ball_angle = [51.908780125668613, 294.77431504891717]\n ball_angleol = [-1.2400074168431123, 17.698615258690229]\n ball_squar = [[306.62638473709342, 281.67896746658624, 34, 34], [354.72135876760257, 79.240439810442794, 60, 60]]\n ball_mass = [10602.875205865552, 68964.24193160313]\n ball_RECT = [[304.35241540721921, 279.2947583376386, 38.273969329874205, 38.384209128947646], [352.72135876760257, 77.240439810442794, 71.133321987715846, 65.656414749024691]]\n wall_max = 17\n wall_type = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n wall_w1 = [189, 290, 166, 14, 697, 562, 643, 3, 0, 223, 117, 695, 497, 497, 0, 0, 0, 700]\n wall_z1 = [284, 316, 436, 499, 446, 0, 128, 225, 106, 310, 155, 210, 159, 159, 0, 0, 500, 0]\n wall_w2 = [222, 446, 697, 157, 377, 681, 679, 49, 383, 287, 5, 448, 376, 546, 700, 0, 700, 700]\n wall_z2 = [301, 314, 478, 432, 487, 99, 98, 416, 171, 324, 225, 323, 147, 179, 0, 500, 500, 500]\n wall_rad = [1, 1, 10, 5, 20, 6, 6, 30, 30, 10, 10, 10, 10, 10, 5, 5, 5, 5]\n wall_color = [(220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220)]\n wall_RECT = [[186, 281, 39, 23], [287, 313, 162, 4], [154, 424, 555, 66], [7, 429, 157, 73], [359, 424, 356, 85], [554, -8, 135, 115], [635, 94, 52, 38], [-29, 193, 110, 255], [-32, 74, 447, 129], [211, 298, 88, 38], [-3, 143, 128, 94], [440, 198, 263, 137], [368, 139, 137, 28], [485, 147, 73, 44], [-7, -7, 714, 14], [-7, -7, 14, 514], [-7, 493, 714, 14], [693, -7, 14, 514]]\n elif levely == 3:\n ball_max = 2\n ball_w = [425.0, 492.31837629165733, 98.512856261065167]\n ball_z = [126.0, 422.24553778829392, 430.4902396760661]\n ball_wol = [-12.0, 2.6816237083426699, 6.487143738934833]\n ball_zol = [-3.0, -1.245537788293916, -21.490239676066096]\n ball_rad = [15, 28, 21]\n ball_color = [(137, 244, 234), (138, 221, 217), (136, 235, 236)]\n ball_angle = [93.833857527468922, 75.681742520058592, 323.2915629772819]\n ball_angleol = [-0.87655530207419896, 0.30220691772972269, 1.1825329351046094]\n ball_squar = [[408.0, 109.0, 34, 34], [462.31837629165733, 392.24553778829392, 60, 60], [75.512856261065167, 407.4902396760661, 46, 46]]\n ball_mass = [10602.875205865552, 68964.24193160313, 29094.28956489508]\n ball_RECT = [[394.0, 104.0, 50.0, 41.0], [460.31837629165733, 389.0, 66.68162370834267, 65.245537788293916], [73.512856261065167, 384.0, 56.487143738934833, 71.490239676066096]]\n wall_max = 17\n wall_type = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n wall_w1 = [189, 290, 166, 14, 697, 562, 643, 3, 0, 223, 117, 695, 497, 497, 0, 0, 0, 700]\n wall_z1 = [284, 316, 436, 499, 446, 0, 128, 225, 106, 310, 155, 210, 159, 159, 0, 0, 500, 0]\n wall_w2 = [222, 446, 697, 157, 377, 681, 679, 49, 383, 287, 5, 480, 376, 546, 700, 0, 700, 700]\n wall_z2 = [301, 314, 478, 432, 487, 99, 98, 416, 171, 324, 225, 325, 147, 179, 0, 500, 500, 500]\n wall_rad = [1, 1, 10, 5, 20, 6, 6, 30, 30, 10, 10, 10, 10, 10, 5, 5, 5, 5]\n wall_color = [(220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220)]\n wall_RECT = [[186, 281, 39, 23], [287, 313, 162, 4], [154, 424, 555, 66], [7, 429, 157, 73], [359, 424, 356, 85], [554, -8, 135, 115], [635, 94, 52, 38], [-29, 193, 110, 255], [-32, 74, 447, 129], [211, 298, 88, 38], [-3, 143, 128, 94], [472, 198, 231, 139], [368, 139, 137, 28], [485, 147, 73, 44], [-7, -7, 714, 14], [-7, -7, 14, 514], [-7, 493, 714, 14], [693, -7, 14, 514]]\n elif levely == 4:\n ball_max = 15\n ball_w = [60.722554805471077, 452.1573538490178, 80.244575784959252, 38.90004863123329, 526.62934623960155, 561.76077439217966, 51.00641675327735, 476.21179724447387, 74.019911348330012, 104.13986580489509, 77.672785567417591, 97.908669417930454, 492.31309851379422, 107.55531577343871, 25.677250467589708, 408.28461679522843]\n ball_z = [123.53309256655999, 426.85562864865636, 446.98025958602022, 145.55077237791539, 432.36880616921724, 419.52605372165829, 185.76812996010321, 398.60172712183214, 227.90675893521163, 330.14246403509031, 280.7917430301959, 382.77488932204739, 431.7008452670733, 426.72875393133694, 108.86075181750218, 420.07030113046562]\n ball_wol = [0.58974898201312453, 0.29357826379544644, -0.7453458908661944, -0.26977452024547638, -0.13077525550683244, 0.35703289164546842, 0.25581836770201244, -0.16968524576896582, -0.96858759109981474, 0.020541831638986374, 0.21623640500730243, 0.16869582232640204, -0.32778500262837312, -1.0423733543425631, 0.078384075232750969, 0.070169924397188832]\n ball_zol = [2.5202528491916918, -0.067935899483811957, 1.0209651395893582, 1.5519551597452736, 0.37674466231734333, 0.7179102343171756, 1.2098558443319702, -0.21937811619009639, 1.6292902773669935, 0.95366629391114355, 0.99836183708718151, 0.65985328138026611, 0.72997687518744558, -0.33325230167901332, 1.8584237502130836, 1.1180771215980612]\n ball_rad = [12, 20, 14, 19, 14, 23, 23, 13, 25, 28, 28, 25, 20, 20, 20, 24]\n ball_color = [(132, 202, 208), (130, 220, 228), (133, 230, 241), (133, 200, 224), (138, 244, 248), (134, 176, 212), (132, 246, 206), (136, 191, 201), (130, 247, 204), (135, 190, 248), (136, 196, 244), (137, 246, 211), (132, 176, 232), (139, 200, 204), (135, 204, 206), (137, 234, 248)]\n ball_angle = [250.64218161257492, 228.50285566079282, 169.93029421257162, 93.92451866434908, 160.53385135173758, 101.81391124171368, 58.682544988047297, 42.833392250734839, 278.96920717602609, 157.52451729820555, 104.82808146227505, 319.29094377305643, 8.3988066326588289, 61.303383965779759, 262.01723832271352, 187.75853100116501]\n ball_angleol = [-11.145052526574146, 0.73910476098485844, -1.916370769365741, 7.8109934129380036, 1.2564621818214414, -0.21633250902344123, 0.96094866236460608, 18.696614939999161, -2.7765510174821686, -0.46915418861267033, 1.3615127061730832, 0.55215997018655683, 0.83188571652892485, -2.1096665563746759, 4.3536534603644128, 0.77565328887569629]\n ball_squar = [[46.722554805471077, 109.53309256655999, 28, 28], [430.1573538490178, 404.85562864865636, 44, 44], [64.244575784959252, 430.98025958602022, 32, 32], [17.90004863123329, 124.55077237791539, 42, 42], [510.62934623960155, 416.36880616921724, 32, 32], [536.76077439217966, 394.52605372165829, 50, 50], [26.00641675327735, 160.76812996010321, 50, 50], [461.21179724447387, 383.60172712183214, 30, 30], [47.019911348330012, 200.90675893521163, 54, 54], [74.139865804895095, 300.14246403509031, 60, 60], [47.672785567417591, 250.7917430301959, 60, 60], [70.908669417930454, 355.77488932204739, 54, 54], [470.31309851379422, 409.7008452670733, 44, 44], [85.555315773438707, 404.72875393133694, 44, 44], [3.6772504675897082, 86.860751817502177, 44, 44], [382.28461679522843, 394.07030113046562, 52, 52]]\n ball_mass = [5428.6721054031623, 25132.741228718347, 8620.5302414503913, 21548.184010972389, 8620.5302414503913, 38223.757816227015, 38223.757816227015, 6902.0790599367756, 49087.385212340516, 68964.24193160313, 68964.24193160313, 49087.385212340516, 25132.741228718347, 25132.741228718347, 25132.741228718347, 43429.376843225298]\n tempy = [[24.00641675327735, 158.76812996010321, 54.255818367702012, 55.209855844331969], [459.04211199870491, 381.38234900564203, 34.16968524576896, 34.219378116190114], [44.051323757230193, 198.90675893521163, 58.968587591099819, 59.629290277366991], [72.139865804895095, 298.14246403509031, 64.02054183163898, 64.953666293911141], [45.672785567417591, 248.7917430301959, 64.216236405007308, 64.998361837087188], [68.908669417930454, 353.77488932204739, 58.168695822326399, 58.659853281380265], [467.98531351116583, 407.7008452670733, 48.327785002628389, 48.729976875187447], [82.512942419096149, 402.39550162965793, 49.042373354342558, 48.333252301679011], [1.6772504675897082, 84.860751817502177, 48.078384075232748, 49.858423750213085], [380.28461679522843, 392.07030113046562, 56.070169924397192, 57.118077121598063]]\n ball_RECT = [[44.722554805471077, 107.53309256655999, 32.589748982013127, 34.520252849191692], [428.1573538490178, 402.78769274917255, 48.293578263795446, 48.067935899483814], [61.499229894093062, 428.98025958602022, 36.74534589086619, 37.020965139589357], [15.630274110987813, 122.55077237791539, 46.269774520245477, 47.551955159745276], [508.49857098409473, 414.36880616921724, 36.130775255506819, 36.376744662317343], [534.76077439217966, 392.52605372165829, 54.357032891645467, 54.717910234317173]] + tempy\n del tempy\n wall_max = 17\n wall_type = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n wall_w1 = [189, 196, 166, 14, 697, 562, 643, 0, 326, 51, 18, 695, 497, 497, 0, 0, 0, 700]\n wall_z1 = [284, 221, 436, 499, 446, 0, 128, 201, 62, 9, 182, 210, 159, 159, 0, 0, 500, 0]\n wall_w2 = [220, 297, 697, 157, 377, 681, 679, 49, 304, 139, 0, 480, 376, 524, 700, 0, 700, 700]\n wall_z2 = [244, 218, 478, 432, 487, 99, 98, 416, 161, 315, 126, 325, 147, 176, 0, 500, 500, 500]\n wall_rad = [1, 1, 10, 5, 20, 6, 6, 30, 30, 10, 10, 10, 10, 10, 5, 5, 5, 5]\n wall_color = [(220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220)]\n wall_RECT = [[186, 245, 37, 38], [193, 218, 107, 3], [154, 424, 555, 66], [7, 429, 157, 73], [359, 424, 356, 85], [554, -8, 135, 115], [635, 94, 52, 38], [-32, 169, 113, 279], [276, 30, 78, 163], [39, -3, 112, 330], [-8, 118, 34, 72], [472, 198, 231, 139], [368, 139, 137, 28], [485, 147, 51, 41], [-7, -7, 714, 14], [-7, -7, 14, 514], [-7, 493, 714, 14], [693, -7, 14, 514]]\n elif levely == 5:\n ball_max = 15\n ball_w = [563.2380017184845, 135.5091931534665, 435.09697027584525, 132.51126304855137, 158.80356877160969, 486.49890666361813, 28.0454597909272, 469.94449157610796, 253.77058846375945, 33.311743878553251, 651.08671805489632, 467.4560139814393, 420.90145867058521, 248.83956419449743, 98.267666685148598, 670.85536291962285]\n ball_z = [340.3499477728684, 192.53572614832325, 274.00276170743837, 474.72360924550071, 248.04392629767023, 199.66234253741388, 291.77486188629132, 98.828156873677884, 261.79870802935454, 452.90721309179793, 434.31611085503482, 422.84067516142846, 143.71750465032488, 474.55563009909457, 63.407930077910926, 97.5392796541895]\n ball_wol = [-0.12736934788998625, -0.34670289908297647, -0.62730956112551528, -0.01316352118701539, -0.36875760413492498, 0.3253705975573648, -0.43186646985168864, 0.029829055857965088, -0.051399766840351885, 0.31143213467472303, 0.91261705660387604, -0.39289683694945782, 0.6973192899270082, -0.026739395385515136, 0.47773812365404217, -0.14449244329674141]\n ball_zol = [0.2651067487506561, 0.33747092449158278, -0.20330004911815291, 0.11263669365628809, 0.62183969591811039, 0.220324713577495, 0.12382039798193512, -0.062689280803922554, 0.13756798955280808, 0.8702172500111478, -0.031277763984301599, 0.28378328194527458, 0.1666190295210413, 0.056074468995401638, 0.75422143538357722, 0.14790083350095956]\n ball_rad = [12, 20, 14, 19, 14, 23, 23, 13, 25, 28, 28, 25, 20, 20, 20, 24]\n ball_color = [(132, 202, 208), (130, 220, 228), (133, 230, 241), (133, 200, 224), (138, 244, 248), (134, 176, 212), (132, 246, 206), (136, 191, 201), (130, 247, 204), (135, 190, 248), (136, 196, 244), (137, 246, 211), (132, 176, 232), (139, 200, 204), (135, 204, 206), (137, 234, 248)]\n ball_angle = [103.32400188884675, 316.71158855283181, 66.797426175129175, 35.509394217326573, 15.886531654813545, 0.61656478963343941, 195.33151301725019, 152.08747184390086, 199.80989069184068, 131.62120808048311, 339.38767654500623, 158.21789358507957, 322.31233400906359, 97.437869538449633, 179.6312883714439, 134.41162557033078]\n ball_angleol = [0.54118695268280415, -1.0009948706990461, -0.42583251039327935, -0.049119552546591096, -1.7234897593393199, 0.1278122582140804, -0.33925087348758332, 0.98916269599321738, 0.054177225060088277, 0.93648329222661952, 2.0855948904138386, -1.2792816321392795, 1.9343475351789952, -0.094694117658838645, 1.3328174529019678, 1.0390947956294083]\n ball_squar = [[549.2380017184845, 326.3499477728684, 28, 28], [113.5091931534665, 170.53572614832325, 44, 44], [419.09697027584525, 258.00276170743837, 32, 32], [111.51126304855137, 453.72360924550071, 42, 42], [142.80356877160969, 232.04392629767023, 32, 32], [461.49890666361813, 174.66234253741388, 50, 50], [3.0454597909272003, 266.77486188629132, 50, 50], [454.94449157610796, 83.828156873677884, 30, 30], [226.77058846375945, 234.79870802935454, 54, 54], [3.3117438785532514, 422.90721309179793, 60, 60], [621.08671805489632, 404.31611085503482, 60, 60], [440.4560139814393, 395.84067516142846, 54, 54], [398.90145867058521, 121.71750465032488, 44, 44], [226.83956419449743, 452.55563009909457, 44, 44], [76.267666685148598, 41.407930077910926, 44, 44], [644.85536291962285, 71.5392796541895, 52, 52]]\n ball_mass = [5428.6721054031623, 25132.741228718347, 8620.5302414503913, 21548.184010972389, 8620.5302414503913, 38223.757816227015, 38223.757816227015, 6902.0790599367756, 49087.385212340516, 68964.24193160313, 68964.24193160313, 49087.385212340516, 25132.741228718347, 25132.741228718347, 25132.741228718347, 43429.376843225298]\n tempy = [[140.43481116747478, 230.04392629767023, 36.368757604134913, 36.621839695918112], [459.49890666361813, 172.66234253741388, 54.325370597557367, 54.220324713577497], [0.61359332107551268, 264.77486188629132, 54.431866469851684, 54.123820397981937], [452.94449157610796, 81.765467592873961, 34.029829055857967, 34.062689280803923], [224.7191886969191, 232.79870802935454, 58.051399766840348, 58.137567989552807], [1.3117438785532514, 420.90721309179793, 64.311432134674718, 64.870217250011152], [619.08671805489632, 402.28483309105053, 64.912617056603878, 64.031277763984292], [438.06311714448987, 393.84067516142846, 58.392896836949433, 58.283783281945276], [396.90145867058521, 119.71750465032488, 48.697319289927009, 48.166619029521044], [224.81282479911192, 450.55563009909457, 48.026739395385505, 48.056074468995405], [74.267666685148598, 39.407930077910926, 48.477738123654042, 48.754221435383577], [642.71087047632614, 69.5392796541895, 56.144492443296713, 56.147900833500962]]\n ball_RECT = [[547.11063237059454, 324.3499477728684, 32.127369347889953, 32.265106748750654], [111.16249025438353, 168.53572614832325, 48.34670289908297, 48.337470924491583], [416.46966071471974, 255.79946165832024, 36.627309561125514, 36.203300049118127], [109.49809952736436, 451.72360924550071, 46.01316352118701, 46.112636693656285]] + tempy\n del tempy\n wall_max = 17\n wall_type = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n wall_w1 = [135, 120, 230, 14, 531, 562, 441, 128, 403, 51, 504, 518, 377, 447, 0, 0, 0, 700]\n wall_z1 = [265, 216, 439, 499, 339, 0, 217, 104, 306, 9, 441, 210, 168, 127, 0, 0, 500, 0]\n wall_w2 = [227, 288, 697, 157, 456, 665, 476, 432, 61, 139, 633, 547, 435, 537, 700, 0, 700, 700]\n wall_z2 = [262, 200, 478, 432, 302, 141, 228, 77, 334, 315, 295, 193, 178, 114, 0, 500, 500, 500]\n wall_rad = [1, 1, 10, 5, 20, 6, 6, 30, 30, 10, 10, 10, 10, 10, 5, 5, 5, 5]\n wall_color = [(220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220), (220, 220, 220)]\n wall_RECT = [[132, 262, 98, 3], [117, 201, 174, 14], [218, 427, 491, 63], [7, 429, 157, 73], [438, 284, 111, 73], [554, -8, 119, 157], [433, 209, 51, 27], [96, 49, 368, 83], [33, 274, 398, 92], [39, -3, 112, 330], [492, 287, 153, 162], [506, 185, 53, 33], [365, 156, 82, 34], [435, 106, 114, 29], [-7, -7, 714, 14], [-7, -7, 14, 514], [-7, 493, 714, 14], [693, -7, 14, 514]]\ndef draw_walls_on_big_black_rect():\n global wall_max, wall_type, wall_w1, wall_z1, wall_w2, wall_z2, wall_rad, wall_color, wall_RECT\n global big_black_rect\n global LIN_selected, CLICKER, CLICKER2\n if CLICKER:\n if LIN_selected != -1:\n nnn = LIN_selected[0]\n if LIN_selected[1] == 1:\n wall_w1[nnn] = mowse_w\n wall_z1[nnn] = mowse_z\n else:\n wall_w2[nnn] = mowse_w\n wall_z2[nnn] = mowse_z\n w1 = wall_w1[nnn]\n z1 = wall_z1[nnn]\n w2 = wall_w2[nnn]\n z2 = wall_z2[nnn]\n rad = wall_rad[nnn]\n wall_RECT[nnn] = freaky_rect_switcharoo_2D(w1 - 2, z1 - 2, w2 - w1 + 4, z2 - z1 + 4, rad)\n wl = -1\n while wl < wall_max:\n wl += 1\n w1 = wall_w1[wl]\n z1 = wall_z1[wl]\n w2 = wall_w2[wl]\n z2 = wall_z2[wl]\n rad = wall_rad[wl]\n collyu = wall_color[wl]\n pygame.draw.line(big_black_rect, collyu, (w1, z1), (w2, z2), rad * 2)\n pygame.draw.circle(big_black_rect, collyu, (w1, z1), rad)\n pygame.draw.circle(big_black_rect, collyu, (w2, z2), rad)\n #pygame.draw.rect(big_black_rect, (200, 200, 200), wall_RECT[wl], 1)\n if CLICKER2:\n if mowse_in_rect(wall_RECT[wl][0], wall_RECT[wl][1], wall_RECT[wl][2], wall_RECT[wl][3]):\n if mowse_in_circle(w1, z1, rad+3): selected = -1; LIN_selected = [wl, 1]\n elif mowse_in_circle(w2, z2, rad+3): selected = -1; LIN_selected = [wl, 2]\ndef Lets_ROLL():\n global loopy\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_angle, ball_angleol, ball_squar, ball_mass, ball_RECT\n global wall_max, wall_type, wall_w1, wall_z1, wall_w2, wall_z2, wall_rad, wall_color, wall_RECT\n global bounce_friction, air_friction, gravity, rock_and_ROLLY\n global LIN_selected, CLICKER, CLICKER2\n global levely\n levely = 3\n bounce_friction = 0.8\n #bounce_friction = 1.0\n air_friction = 0.999\n #air_friction = 1.0\n gravity = 0.5\n rock_and_ROLLY = math.pi / 8 * 180 #24\n reset_stuff()\n fontyyy = chilly_font_Italicy(24)\n PRESS_SPACE_BAR_TO_MOVE_immy = fontyyy.render('Press SPACE BAR to start motion.', 0, (100, 200, 100))\n PRESS_SPACE_BAR_TO_STOP_immy = fontyyy.render('Press SPACE BAR to stop motion.', 0, (200, 100, 100))\n PRESS_ENTER_TO_RESET_immy = fontyyy.render('Press ENTER to reset.', 0, (150, 150, 150))\n PRESS_MINUS_TO_MINUS_immy = fontyyy.render('Press - to delete a ball.', 0, (150, 150, 150))\n PRESS_ADD_TO_ADD_immy = fontyyy.render('Press + to add a ball.', 0, (150, 150, 150))\n LEFT_CLICK_TO_immy = fontyyy.render('Left click on a \"ghost ball\" to change its speed.', 0, (150, 150, 150))\n RIGHT_CLICK_TO_immy = fontyyy.render('Right click on a ball to stop its motion.', 0, (150, 150, 150))\n PRESS_S_TO_immy = fontyyy.render('Press S to stop all balls.', 0, (150, 150, 150))\n PRESS_PAGE_UP_TO_immy = fontyyy.render('Press Page Up to change the level.', 0, (150, 150, 150))\n #message_1_immy\n del fontyyy\n #calculate_for_sure = True\n selected = -1\n LIN_selected = -1\n move_stuff = True\n t = time.time() + .01\n CLICKER = False\n CLICKER2 = False\n loopy = 1\n while loopy:\n big_black_rect.fill((0, 0, 0))\n draw_walls_on_big_black_rect()\n screen.blit(big_black_rect, (0, 0))\n check_for_keys()\n CLICKER = mowse_left_held\n CLICKER2 = mowse_left_pressed\n CLICKER_2 = mowse_right_held\n CLICKER2_2 = mowse_right_pressed\n if ky_first_held_CEV(32): move_stuff = not move_stuff\n if ky_first_held_CEV(13): reset_stuff()\n if ky_first_held_CEV(280):\n levely += 1\n if levely > 5: levely = 1\n reset_stuff()\n if ky_first_held_CEV(115): # S\n M = -1\n while M < ball_max:\n M += 1\n ball_wol[M] = 0\n ball_zol[M] = 0\n updatey_ball_quick_rect(M)\n if ky_first_held_CEV(45) or ky_first_held_CEV(269): # -\n minus_ball_thing(0)\n if ky_first_held_CEV(61) or ky_first_held_CEV(270): # +\n add_ball_thing(350 + randy(40, -20), 400 + randy(40, -20), randy(40, -20), randy(40, -20), int_randy(20, 10), (int_randy(10, 130), int_randy(80, 170), int_randy(50, 200)), 0, 0, True, True)\n if ky_first_held_CEV(49):\n listy = ['Level_save']\n listy += ['ball_max = ' + str(ball_max)]\n listy += ['ball_w = ' + str(ball_w)]\n listy += ['ball_z = ' + str(ball_z)]\n listy += ['ball_wol = ' + str(ball_wol)]\n listy += ['ball_zol = ' + str(ball_zol)]\n listy += ['ball_rad = ' + str(ball_rad)]\n listy += ['ball_color = ' + str(ball_color)]\n listy += ['ball_angle = ' + str(ball_angle)]\n listy += ['ball_angleol = ' + str(ball_angleol)]\n listy += ['ball_squar = ' + str(ball_squar)]\n listy += ['ball_mass = ' + str(ball_mass)]\n listy += ['ball_RECT = ' + str(ball_RECT)]\n listy += ['wall_max = ' + str(wall_max)]\n listy += ['wall_type = ' + str(wall_type)]\n listy += ['wall_w1 = ' + str(wall_w1)]\n listy += ['wall_z1 = ' + str(wall_z1)]\n listy += ['wall_w2 = ' + str(wall_w2)]\n listy += ['wall_z2 = ' + str(wall_z2)]\n listy += ['wall_rad = ' + str(wall_rad)]\n listy += ['wall_color = ' + str(wall_color)]\n listy += ['wall_RECT = ' + str(wall_RECT)]\n ##write_to_file_WEEE_STRANGE(\"Level_Save.dat\", listy)\n del listy\n if CLICKER2:\n allow_selectey_thing = True\n else:\n allow_selectey_thing = False\n if not CLICKER:\n selected = -1\n LIN_selected = -1\n to_be_selected = selected\n M = -1\n while M < ball_max:\n M += 1\n if move_stuff:\n move_ball(M)\n wwol = int(ball_w[M] + ball_wol[M])\n zzol = int(ball_z[M] + ball_zol[M])\n pygame.draw.circle(screen, ball_color[M], (int(ball_w[M]), int(ball_z[M])), ball_rad[M])\n blpw, blpz = point_rotated_by_angle_2D(0, -ball_rad[M], 0, 0, ball_angle[M])\n pygame.draw.line(screen, (100, 100, 100), (int(ball_w[M] + blpw), int(ball_z[M] + blpz)), (int(ball_w[M]), int(ball_z[M])))\n if not move_stuff:\n pygame.draw.circle(screen, (100, 100, 250), (wwol, zzol), ball_rad[M], 1)\n pygame.draw.circle(screen, (100, 100, 150), (wwol, zzol), int(ball_rad[M] * 1.0), 1)\n pygame.draw.circle(screen, (150, 150, 200), (wwol, zzol), int(ball_rad[M] * 0.8), 1)\n pygame.draw.circle(screen, (200, 200, 250), (wwol, zzol), int(ball_rad[M] * 0.5), 1)\n pygame.draw.line(screen, (100, 160, 250), (int(ball_w[M]), int(ball_z[M])), (wwol, zzol))\n pygame.draw.rect(screen, (130, 130, 130), ball_RECT[M], 1)\n pygame.draw.rect(screen, (140, 140, 140), ball_squar[M], 1)\n if allow_selectey_thing:\n if mowse_in_rect(ball_RECT[M][0], ball_RECT[M][1], ball_RECT[M][2], ball_RECT[M][3]):\n if mowse_in_circle(wwol, zzol, ball_rad[M]):\n to_be_selected = M\n LIN_selected = -1\n if CLICKER_2:\n if mowse_in_rect(ball_squar[M][0], ball_squar[M][1], ball_squar[M][2], ball_squar[M][3]):\n if mowse_in_circle(ball_w[M], ball_z[M], ball_rad[M]):\n ball_wol[M] = 0\n ball_zol[M] = 0\n ball_angleol[M] = 0\n updatey_ball_quick_rect(M)\n if CLICKER:\n if selected == M:\n if move_stuff:\n mowseyy_w = mowse_w\n mowseyy_z = mowse_z\n bw1 = ball_rad[M]\n bz1 = ball_rad[M]\n bw2 = APPLICATION_w_size - ball_rad[M]\n bz2 = APPLICATION_z_size - ball_rad[M]\n if mowseyy_w < bw1: mowseyy_w = bw1\n if mowseyy_w > bw2: mowseyy_w = bw2\n if mowseyy_z < bz1: mowseyy_z = bz1\n if mowseyy_z > bz2: mowseyy_z = bz2\n ww = mowseyy_w - ball_w[M]\n zz = mowseyy_z - ball_z[M]\n #dissy = distance_2D(0, 0, ww, zz)\n ball_wol[M] = ww # / 2.0 # / dissy\n ball_zol[M] = zz # / 2.0 # / dissy\n else:\n ball_wol[M] = mowse_w - ball_w[M]\n ball_zol[M] = mowse_z - ball_z[M]\n updatey_ball_quick_rect(M)\n selected = to_be_selected\n if not move_stuff:\n screen.blit(PRESS_SPACE_BAR_TO_MOVE_immy, (10, 10))\n else:\n screen.blit(PRESS_SPACE_BAR_TO_STOP_immy, (10, 10))\n screen.blit(PRESS_MINUS_TO_MINUS_immy, (10, 30))\n screen.blit(PRESS_ADD_TO_ADD_immy, (10, 50))\n screen.blit(PRESS_ENTER_TO_RESET_immy, (10, 70))\n screen.blit(LEFT_CLICK_TO_immy, (10, 90))\n screen.blit(RIGHT_CLICK_TO_immy, (10, 110))\n screen.blit(PRESS_S_TO_immy, (10, 130))\n screen.blit(PRESS_PAGE_UP_TO_immy, (10, 150))\n pygame.display.flip()\n while t > time.time(): pass\n t = time.time() + .01\n # Try_Again_HE_HE Is weird!! maybe It should be deleted!!\ndef move_ball(M):\n ball_angle[M] += ball_angleol[M]\n if ball_angle[M] > 359: ball_angle[M] -= 360\n elif ball_angle[M] < 0: ball_angle[M] += 361\n #movey_bally_speciality(M, ball_wol[M], ball_zol[M], 10)\n movey_bally_speciality(M, ball_wol[M], ball_zol[M], 10)\n ball_zol[M] += gravity\n updatey_ball_quick_rect(M)\ndef movey_bally_speciality(M, wol_special, zol_special, Try_Again_HE_HE):\n global loopy\n global ball_max, ball_w, ball_z, ball_wol, ball_zol, ball_rad, ball_color, ball_angle, ball_angleol, ball_squar, ball_mass, ball_RECT\n global wall_max, wall_type, wall_w1, wall_z1, wall_w2, wall_z2, wall_rad, wall_color, wall_RECT\n global bounce_friction, air_friction, gravity, rock_and_ROLLY\n distance_is_supposed_to_be_at = distance_2D(0, 0, wol_special, zol_special)\n wa = ball_w[M]\n za = ball_z[M]\n #will_be_w = wa + ball_wol[M]\n #will_be_z = za + ball_zol[M]\n will_be_w = wa + wol_special\n will_be_z = za + zol_special\n LIN_collide_max = -1\n LIN_collide_w = []\n LIN_collide_z = []\n LIN_collide_ang = []\n LIN_collide_dis = []\n LL = -1\n while LL < wall_max:\n LL += 1\n if rect_touching_rect2(ball_RECT[M][0], ball_RECT[M][1], ball_RECT[M][2], ball_RECT[M][3], wall_RECT[LL][0], wall_RECT[LL][1], wall_RECT[LL][2], wall_RECT[LL][3]):\n #print 'weee'\n did_collide, New_ball_w, New_ball_z, angle_hit_at = find_where_ball_collides_on_a_wall(wa, za, wol_special, zol_special, ball_rad[M], wall_type[LL], wall_w1[LL], wall_z1[LL], wall_w2[LL], wall_z2[LL], wall_rad[LL])\n if did_collide:\n #print 'collide'\n #print str(New_ball_w), str(New_ball_z)\n LIN_collide_max += 1\n LIN_collide_w += [New_ball_w]\n LIN_collide_z += [New_ball_z]\n LIN_collide_ang += [angle_hit_at]\n LIN_collide_dis += [distance_2D(wa, za, New_ball_w, New_ball_z)]\n HEH_collide_max = -1\n HEH_collide_w = []\n HEH_collide_z = []\n HEH_collide_ang = []\n HEH_collide_dis = []\n HEH_collide_ball_hit = []\n M2 = -1\n while M2 < ball_max:\n M2 += 1\n if M2 != M:\n if rect_touching_rect2(ball_RECT[M][0], ball_RECT[M][1], ball_RECT[M][2], ball_RECT[M][3], ball_squar[M2][0], ball_squar[M2][1], ball_squar[M2][2], ball_squar[M2][3]):\n #they_did_touch, New_ball1_w, New_ball1_z, angle_hit_at = find_where_ball_collides_on_another_ball(wa, za, ball_wol[M], ball_zol[M], ball_rad[M], ball_w[M2], ball_z[M2], ball_rad[M2])\n they_did_touch, New_ball1_w, New_ball1_z, angle_hit_at = find_where_ball_collides_on_another_ball(wa, za, wol_special, zol_special, ball_rad[M], ball_w[M2], ball_z[M2], ball_rad[M2])\n if they_did_touch:\n HEH_collide_max += 1\n HEH_collide_w += [New_ball1_w]\n HEH_collide_z += [New_ball1_z]\n HEH_collide_ang += [angle_hit_at]\n HEH_collide_dis += [distance_2D(wa, za, New_ball1_w, New_ball1_z)]\n HEH_collide_ball_hit += [M2]\n current_dis = distance_is_supposed_to_be_at\n Wall_to_hit_at_angley = None\n Grr = -1\n while Grr < LIN_collide_max:\n Grr += 1\n #print LIN_collide_dis[Grr], current_dis\n if LIN_collide_dis[Grr] < current_dis:\n #print 'weee!'\n Wall_to_hit_at_angley = LIN_collide_ang[Grr]\n current_dis = LIN_collide_dis[Grr]\n will_be_w = LIN_collide_w[Grr]\n will_be_z = LIN_collide_z[Grr]\n Ball_to_hit = None\n Ball_to_hit_at_angley = None\n Heh = -1\n while Heh < HEH_collide_max:\n Heh += 1\n if HEH_collide_dis[Heh] < current_dis:\n if ball_is_going_towards_ball(M, HEH_collide_ball_hit[Heh]):\n if ball_is_relatively_going_towards_ball(M, HEH_collide_ball_hit[Heh]):\n Ball_to_hit = HEH_collide_ball_hit[Heh]\n Ball_to_hit_at_angley = HEH_collide_ang[Heh]\n else:\n Ball_to_hit = None\n Ball_to_hit_at_angley = None\n current_dis = HEH_collide_dis[Heh]\n will_be_w = HEH_collide_w[Heh]\n will_be_z = HEH_collide_z[Heh]\n if Ball_to_hit != None:\n Make_two_balls_hit_at_angle(M, Ball_to_hit, Ball_to_hit_at_angley)\n else:\n #if bouncey == 1: ball_wol[M] = -ball_wol[M] * bounce_friction\n #elif bouncey == 2: ball_zol[M] = -ball_zol[M] * bounce_friction\n if Wall_to_hit_at_angley != None:\n ball_wol[M], ball_zol[M] = wzol_bounce_at_angle(ball_wol[M], ball_zol[M], Wall_to_hit_at_angley, bounce_friction)\n ball_angleol[M] = zol_at_angle(ball_wol[M], ball_zol[M], Wall_to_hit_at_angley + 90) / ball_rad[M] * rock_and_ROLLY\n ball_w[M] = will_be_w\n ball_z[M] = will_be_z\n if ball_w[M] < 0 or ball_w[M] > APPLICATION_w_size or ball_z[M] < 0 or ball_z[M] > APPLICATION_z_size:\n #print str(M) + \" \", str(wa), str(za)\n print str(M) + \" \", str(ball_w[M]), str(ball_z[M]), str(ball_rad[M])\n ball_wol[M] *= air_friction\n ball_zol[M] *= air_friction\n updatey_ball_quick_rect(M)\n if current_dis < distance_is_supposed_to_be_at:\n if Try_Again_HE_HE > 0:\n distance_to_travel_next = distance_is_supposed_to_be_at - current_dis\n disy_HE_HE = distance_2D(0, 0, ball_wol[M], ball_zol[M])\n next_wol = ball_wol[M]\n next_zol = ball_zol[M]\n movey_bally_speciality(M, next_wol, next_zol, Try_Again_HE_HE - 1)\n\n ## Woah... Finally! Were near the end of the program! ##\nif __name__ == '__main__':\n import math\n import pygame\n import random\n import time\n import gc\n import copy\n from pygame.locals import *\n if not pygame.font: print 'Warning, fonts disabled?'\n if not pygame.mixer: print 'Warning, sound disabled?'\n HE_HE_init()\n ## THE END! ##", "user_title": "Anonymous", "datetimeon": "2007-12-23T15:46:20", "link": "pygame.draw.circle", "id": 1320}, {"content": "Guillame was confused on the nature of the last two arguments to Rect().\nhe thought that they were absolute coordinates, not width-height.\nThere isn't a bug with collidepoint.\nsee the pygame mailing list archives for the discussion with Guillame where this topic arose.", "user_title": "Anonymous", "datetimeon": "2007-04-25T17:52:17", "link": "Rect.collidepoint", "id": 524}, {"content": "I'm guessing they're key pressed (eg, the A or J keys) and the modifiers (Shift, Ctrl, Alt, Meta, Super, etc). Experiment to find details.", "user_title": "Anonymous", "datetimeon": "2007-04-26T17:03:55", "link": "pygame.event", "id": 528}, {"content": "I, too, get a list with a single item, None. This on Mac OS X 10.4 (Tiger) and Python 2.4.4; \npygame.ver returns '1.8.0pre'", "user_title": "Anonymous", "datetimeon": "2008-01-20T04:04:36", "link": "pygame.font.get_fonts", "id": 1428}, {"content": "The params units are in pixels.\nThe smallest unit for Pygame I think.\nIt doesn't make sense to change this to float.", "user_title": "Anonymous", "datetimeon": "2008-01-21T10:48:00", "link": "Rect.move_ip", "id": 1430}, {"content": "this resource is perfect for games with controls options\nbecause through it you can show the current input without creating a whole database of inputs\ntxt = font.render(pygame.key.name(current_key),True,(0,0,0))\nscreen.blit(txt,(0,0))", "user_title": "Anonymous", "datetimeon": "2008-01-21T20:04:03", "link": "pygame.key.name", "id": 1431}, {"content": "#!/usr/bin/python\nimport pygame\nfrom pygame.locals import *\n\ndef main():\n pygame.init()\n pygame.display.set_mode((300,200))\n pygame.display.set_caption('Testing')\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n running = False\n if event.type == MOUSEBUTTONDOWN:\n #print event.button\n print pygame.mouse.get_pos()\n pygame.display.quit()\n\nif __name__ == '__main__':\n main()", "user_title": "Anonymous", "datetimeon": "2008-01-23T15:43:49", "link": "pygame.mouse.get_pos", "id": 1436}, {"content": "Once you draw something, how do you delete it??", "user_title": "Anonymous", "datetimeon": "2007-05-04T00:39:15", "link": "pygame.draw.rect", "id": 548}, {"content": "Hi,\n\nI was having a problem when trying to jump from one movie to another. Even with the fist one stopped the second movie played at half speed with no volume. A solution was to keep a reference to the old movie, which I assume stops the garbage collector trying to delete it before its finished doing whatever it was doing. \n\nThis would cause problems:\n\ncurrentMovie.stop()\ncurrentMovie = pygame.movie.Movie(fullname)\ncurrentMovie.play()\n\nAnd this fixes it:\n\ncurrentMovie.stop()\noldMovie = currentMovie\ncurrentMovie = pygame.movie.Movie(fullname)\ncurrentMovie.play()\n\nMaybe need a method for unloading the memory and stopping all its threads.", "user_title": "Anonymous", "datetimeon": "2007-05-09T10:27:03", "link": "pygame.movie", "id": 554}, {"content": "Does anyone know how to make multiple screens? I've tried making a different variable with different dimensions, but it only changes original\n\nsize=400,400\nscreen=display.set_mode(size)\n\nsize1=200,200\nnew_screen=display.set_mode(size1)", "user_title": "Anonymous", "datetimeon": "2008-01-29T10:33:27", "link": "pygame.display.set_mode", "id": 1456}, {"content": "The first two arguments to Surface.blit() seem to be reversed. To draw Surface \"source\" onto Surface \"dest\" the correct call is:\n\n pygame.Surface.blit(dest, source, position)\n\nor\n\n dest.blit(source, position)\n\n\nExample:\n\n screen = pygame.display.set_mode((100,100))\n screen.fill((255,255,255)) # white background\n red_block = pygame.Surface((50,50))\n red_block.fill((255,0,0))\n\n # draw red block onto white background\n screen.blit(red_block, (25,25))\n\n pygame.display.update()", "user_title": "Anonymous", "datetimeon": "2007-05-16T08:51:09", "link": "Surface.blit", "id": 572}, {"content": "Actually, on re-reading the method description, I realize that the \"dest\" argument means the position to where the source Surface should be copied too. So the call synopsis is equivalent to the second case in my comment.\n\nThe naming of the arguments is a bit confusing, IMHO, and also that it is not (visually) clear that the documentation describes the methods of a Surface object instance and makes no mention of the class methods.", "user_title": "Anonymous", "datetimeon": "2007-05-16T09:15:43", "link": "Surface.blit", "id": 573}, {"content": "Only the \"systems with multiple choices\" are listed there.", "user_title": "Anonymous", "datetimeon": "2007-05-20T19:04:19", "link": "pygame.display.init", "id": 578}, {"content": "Yeah the doc could be improved. The wrong comment below should be removed, it's only confusing. I did find another problem, though: It's important to note that when you cut out an area from the source, then the dest argument does _not_ specify where the origin of the source would be on the surface that is blitted on, but instead dest specifies the top left corner of just the area that is actually blitted. This is not quite clear from this doc, I think.", "user_title": "Anonymous", "datetimeon": "2007-05-24T07:20:10", "link": "Surface.blit", "id": 588}, {"content": "Pygame (and SDL) doesn't support multiple windows.", "user_title": "Anonymous", "datetimeon": "2008-02-09T15:29:56", "link": "pygame.display.set_mode", "id": 1525}, {"content": "you can't \"delete\" something you have drawn, you have to draw something over it instead.\nyou can make classes that wrap the different draw function and have both a \"show\" and a \"hide\" function and the hide function has to draw the background over the shape you created in \"hide\", but this can have weird results if shapes overlap and are not correctly redrawn.\ncheers", "user_title": "Anonymous", "datetimeon": "2007-06-02T10:44:48", "link": "pygame.draw.rect", "id": 607}, {"content": "destination.blit(source (distination location),(source location x,y and size x,y)", "user_title": "Anonymous", "datetimeon": "2007-06-05T09:13:28", "link": "pygame.Surface", "id": 609}, {"content": "destination.blit(source (distination location),(source location x,y and size x,y)", "user_title": "Anonymous", "datetimeon": "2007-06-05T09:13:56", "link": "Surface.blit", "id": 610}, {"content": "use events to save and release keystates to use it only for some:\n\nif event.type == pygame.KEYDOWN: if event.key == MYKEY: i = True\nelif event.type == pygame.KEYUP: if event.key == MYKEY: i = False\nif i: do stuff.", "user_title": "Anonymous", "datetimeon": "2008-02-11T16:09:33", "link": "pygame.key.set_repeat", "id": 1541}, {"content": "Is there a way to make the collision box of the line accurate to the line itself?", "user_title": "Anonymous", "datetimeon": "2008-02-12T18:09:15", "link": "pygame.draw.line", "id": 1549}, {"content": "Is there a way to produce small line segments that have accurate collision boxes\nso that one could have two lines that would be parallel to each other with out them colliding?", "user_title": "Anonymous", "datetimeon": "2008-02-12T18:13:18", "link": "pygame.draw", "id": 1550}, {"content": "Yes, actually it would be quite useful for it to be float, if it could store the\ndecimals and increment them as such but not display it until it would take effect...", "user_title": "Anonymous", "datetimeon": "2008-02-12T20:21:41", "link": "Rect.move_ip", "id": 1552}, {"content": "Clearly, more documentation about the event properties should be written.", "user_title": "Anonymous", "datetimeon": "2007-06-10T10:28:36", "link": "pygame.event", "id": 628}, {"content": "MOUSEBUTTONDOWN/UP:\n pos: tuple of x and y coordinates of the click\n button: 1 - left 2 - middle 3 - right button", "user_title": "Anonymous", "datetimeon": "2007-06-10T13:16:50", "link": "pygame.event", "id": 630}, {"content": "MOUSEMOTION\n pos: tuple of x and y coordinates\n rel: tuple of relative x and relative y change from previous position\n buttons: tuple of three values (left,middle,right). 0-not pressed 1-pressed", "user_title": "Anonymous", "datetimeon": "2007-06-10T13:18:03", "link": "pygame.event", "id": 631}, {"content": "While thick arcs do get filled, they also get moire holes - at least on Debian's 1.7.1release-4.1. For now, I've been using a rather ugly workaround where one draws the arc several times with the start angle offset by 0.01 to cut the moires back.", "user_title": "Anonymous", "datetimeon": "2008-02-16T15:39:17", "link": "pygame.draw.arc", "id": 1580}, {"content": "I have noticed that with my analog joystick square shaped information is returned\nfrom this function. For example, pressing fully down and right would return 1.0\nfor both axis directions, instead of 0.7071... as one might expect (since analog\njoysticks have a circle shaped socket for the stick to move in.)\n\nTo correct this one might want to use a function similar to the following\njoystick_transform function:\n\ndef length(v):\n\treturn math.sqrt(v[0]*v[0] + v[1]*v[1])\n\n# Transforms the square info of an analog joystick into circular info\ndef joystick_transform(j):\n\t# If joystick is not centered:\n\tif (j[0],j[1]) != (0,0):\n\t\t# Check if x axis is larger than y axis\n\t\tif abs(j[0]) > abs(j[1]):\n\t\t\t# Since x>y we will check for line intersection with wall\n\t\t\t# Get slope (m = y/x) for y = m * x (line equation)\n\t\t\tm = abs(j[1] / j[0])\n\t\t\t# At x=1.0 (intersecting right wall), y would equal m\n\t\t\t# scaler = length of normalized vector / length of line intersecting box\n\t\t\ts = 1.0 / length((1.0, m))\n\t\telse:\n\t\t\t# Since y>=x we will check for line intersection with ceiling\n\t\t\t# Get slope (m = x/y) for x = m * y (line equation)\n\t\t\tm = abs(j[0] / j[1])\n\t\t\t# At y=1.0 (intersecting ceiling), x would equal m\n\t\t\t# scaler = length of normalized vector / length of line intersecting box\n\t\t\ts = 1.0 / length((m,1.0))\n\telse:\n\t\t# Since the joystick is centered, the scaler will be 0\n\t\ts = 0\n\t\t\n\t# Simply scale the joystick axis data by the scaler\n\treturn (j[0] * s, j[1] * s)\n\n-----\n\nHere is a full example illustrating the difference between raw joystick input\nand transformed joystick information:\n\n#!/usr/bin/python\n\n# In this example the function joystick_transform will transform the\n# square shaped joystick axis information into a circular shape.\n# This will make the new joystick axis information easier to use while\n# moving around a character or cursor.\n\n# The RED dot represents the actual joystick information.\n\n# The BLUE dot represents the transformed joystick information.\n\n# The GREEN dot is a cursor that moves by the transformed joystick\n# information in a motion relative to it's previous location.\n\n\nimport pygame\nimport math\n\ndef norm(v):\n\tl = length(v)\n\tif l != 0:\n\t\treturn (v[0] / l, v[1] / l)\n\treturn (0,0)\n\t\ndef length(v):\n\treturn math.sqrt(v[0]*v[0] + v[1]*v[1])\n\n# Transforms the square info of an analog joystick into circular info\ndef joystick_transform(j):\n\t# If joystick is not centered:\n\tif (j[0],j[1]) != (0,0):\n\t\t# Check if x axis is larger than y axis\n\t\tif abs(j[0]) > abs(j[1]):\n\t\t\t# Since x>y we will check for line intersection with wall\n\t\t\t# Get slope (m = y/x) for y = m * x (line equation)\n\t\t\tm = abs(j[1] / j[0])\n\t\t\t# At x=1.0 (intersecting right wall), y would equal m\n\t\t\t# scaler = length of normalized vector / length of line intersecting box\n\t\t\ts = 1.0 / length((1.0, m))\n\t\telse:\n\t\t\t# Since y>=x we will check for line intersection with ceiling\n\t\t\t# Get slope (m = x/y) for x = m * y (line equation)\n\t\t\tm = abs(j[0] / j[1])\n\t\t\t# At y=1.0 (intersecting ceiling), x would equal m\n\t\t\t# scaler = length of normalized vector / length of line intersecting box\n\t\t\ts = 1.0 / length((m,1.0))\n\telse:\n\t\t# Since the joystick is centered, the scaler will be 0\n\t\ts = 0\n\t\t\n\t# Simply scale the joystick axis data by the scaler\n\treturn (j[0] * s, j[1] * s)\n\npygame.init()\npygame.joystick.init()\n\nscreen = pygame.display.set_mode((640,480))\n\njs = pygame.joystick.Joystick(0)\njs.init()\n\npx = 320.0\npy = 240.0\n\nmove_speed = 2.0\n\ndone = False\nwhile not done:\n\tkey = pygame.key.get_pressed()\n\tscreen.fill((255,255,255))\n\t\n\t# Outer box boundry\n\tpygame.draw.rect(screen, (200,200,200), ((10,10),(180,180)), 1)\n\t\n\t# Circle boundry\n\tpygame.draw.circle(screen, (0,0,0), (100,100), 90, 1)\n\t\n\t# Center point\n\tpygame.draw.circle(screen, (200,200,200), (100,100), 2, 1)\n\t\n\tjx = js.get_axis(0)\n\tjy = js.get_axis(1)\n\tn = norm((jx,jy))\n\t\n\t# Line representing normalized joystick information\n\tpygame.draw.line(screen, (200,200,200), (100,100), (100 + int(n[0] * 90.0), 100 + int(n[1] * 90.0)))\n\t\n\t# Raw joystick information\n\tx = 100 + int(jx * 90.0)\n\ty = 100 + int(jy * 90.0)\n\tpygame.draw.circle(screen, (255,0,0), (x, y), 5)\n\t\n\t# Transformed joystick information\n\ttj = joystick_transform((jx,jy))\n\tx = 100 + int(tj[0] * 90.0)\n\ty = 100 + int(tj[1] * 90.0)\n\tpygame.draw.circle(screen, (0,0,255), (x, y), 5)\n\t\n\t# Cursor moved by transformed joystick information\n\tpx = px + tj[0] * move_speed\n\tpy = py + tj[1] * move_speed\n\tpygame.draw.circle(screen, (0, 255, 0), (int(px), int(py)), 5)\n\t\n\tpygame.display.flip()\n\t\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT: \n\t\t\tdone = True\n\t\t\t\n\t\tif key[pygame.K_ESCAPE]:\n\t\t\tdone = True", "user_title": "Anonymous", "datetimeon": "2008-02-16T23:25:56", "link": "Joystick.get_axis", "id": 1584}, {"content": "the playback doesnt work, the music stops for a second and then starts again\nHELP ME", "user_title": "Anonymous", "datetimeon": "2010-11-27T14:29:59", "link": "pygame.mixer.music.play", "id": 3415}, {"content": "An example of the Color object is an rgb tuple like (100,0,200).", "user_title": "Anonymous", "datetimeon": "2008-02-27T10:13:22", "link": "Surface.set_at", "id": 1657}, {"content": "It might be convenient to have this particular documentation within the\nhelp(pygame.display.init) documentation, as of this writing it is not.\n\nAdditionally, information pertaining to Mac-OS X is not present; it may also be\nnoteworthy to document the methods by which Pygame renders its surfaces, as OSX,\nlike Windows, has its own various subsystems to draw views(surfaces).", "user_title": "Anonymous", "datetimeon": "2008-03-01T23:13:53", "link": "pygame.display.init", "id": 1680}, {"content": "All the keyboard event.key constants:\n\nLetters:\n K_a ... K_z\n\nNumbers:\n K_0 ... K_9\n\nControl:\n K_TAB\n K_RETURN\n K_ESCAPE\n K_SCROLLOCK\n K_SYSREQ\n K_BREAK\n K_DELETE\n K_BACKSPACE\n K_CAPSLOCK\n K_CLEAR\n K_NUMLOCK\n\nPunctuation:\n K_SPACE\n K_PERIOD\n K_COMMA\n K_QUESTION\n K_AMPERSAND\n K_ASTERISK\n K_AT\n K_CARET\n K_BACKQUOTE\n K_DOLLAR\n K_EQUALS\n K_EURO\n K_EXCLAIM\n K_SLASH, K_BACKSLASH\n K_COLON, K_SEMICOLON\n K_QUOTE, K_QUOTEDBL\n K_MINUS, K_PLUS\n K_GREATER, K_LESS\n\nBrackets:\n K_RIGHTBRACKET, K_LEFTBRACKET\n K_RIGHTPAREN, K_LEFTPAREN\n\nF-Keys:\n K_F1 ... K_F15\n\nEdit keys:\n K_HELP\n K_HOME\n K_END\n K_INSERT\n K_PRINT\n K_PAGEUP, K_PAGEDOWN\n K_FIRST, K_LAST\n\nKeypad:\n K_KP0 ... K_KP9\n K_KP_DIVIDE\n K_KP_ENTER\n K_KP_EQUALS\n K_KP_MINUS\n K_KP_MULTIPLY\n K_KP_PERIOD\n K_KP_PLUS\n\nSHF,CTL,ALT etc:\n K_LALT, K_RALT\n K_LCTRL, K_RCTRL\n K_LSUPER, K_RSUPER\n K_LSHIFT, K_RSHIFT\n K_RMETA, K_LMETA\n\nArrows:\n K_LEFT\n K_UP\n K_RIGHT\n K_DOWN\n\nOther:\n K_MENU\n K_MODE\n K_PAUSE\n K_POWER\n K_UNDERSCORE\n K_HASH\n\n K_UNKNOWN", "user_title": "Anonymous", "datetimeon": "2008-03-11T11:07:08", "link": "pygame.event.Event", "id": 1682}, {"content": "key is one of the K_* constants in the pygame package level -- it indicates the key pressed. For example, K_UP or K_ESCAPE.\nmod is the modifier. I'm assuming it's either a bitfield or a list. Shouldn't be hard to figure it out.", "user_title": "Anonymous", "datetimeon": "2008-03-16T02:36:10", "link": "pygame.event", "id": 1685}, {"content": "can anyone give me just a small script about how can i play a movie, please?\n\ni used this script but nothing happened. just a window but blank :(\n\nimport pygame\n\n\npygame.init()\n\n\ncine = pygame.movie.Movie('film.mpg')\nsz=cine.get_size()\npygame.display.set_mode(sz)\nwhile 1:\n cine.play(1)\n\nthe movie loads because i tried to find the length and worked and the movie is at the same location. please HELP!!", "user_title": "Anonymous", "datetimeon": "2007-07-12T09:58:26", "link": "pygame.movie", "id": 716}, {"content": "JOYBUTTONDOWN/JOYBUTTONUP\nbutton -- the ID of the button which fired the event.", "user_title": "Anonymous", "datetimeon": "2007-07-14T15:07:03", "link": "pygame.event", "id": 719}, {"content": "Is there a way to draw an anti aliased line with a thickness?", "user_title": "Anonymous", "datetimeon": "2008-03-28T14:12:55", "link": "pygame.draw.aaline", "id": 1713}, {"content": "I've notice that passing any negative number will cause the music to loop forever, not just -1.", "user_title": "Anonymous", "datetimeon": "2008-03-30T20:55:44", "link": "pygame.mixer.music.play", "id": 1722}, {"content": "It seams that you can't read the axes or button positions if you don't start the event loop.\nThis is a little different from what the docs are saying but actually expected since the joystick \nbroadcasts the position (is this right for all drivers?)", "user_title": "Anonymous", "datetimeon": "2008-04-02T23:40:29", "link": "pygame.joystick.Joystick", "id": 1727}, {"content": "Can I draw just one pixel with this? \nApparantly the smallest rect one can draw is 2 pixels big, I guess?", "user_title": "Anonymous", "datetimeon": "2007-07-16T21:15:36", "link": "pygame.draw.rect", "id": 726}, {"content": "the fade_in parameter seems to be missing. Using 1.7: \nTypeError: function takes at most 2 arguments (3 given)", "user_title": "Anonymous", "datetimeon": "2008-04-04T23:21:49", "link": "Sound.play", "id": 1732}, {"content": "The algorithm used will probably ruin the edges in your images. Makes them kinda blurry.", "user_title": "Anonymous", "datetimeon": "2008-04-05T17:52:43", "link": "pygame.transform.smoothscale", "id": 1734}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-07-18T19:04:17", "link": "Very cool design! Useful information. Go on!", "id": 733}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-07-18T19:04:17", "link": "Pretty nice site, wants to see much more on it! :)", "id": 734}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-07-18T19:04:17", "link": "Thanks for the enjoy to have you on my site ! Good luck.", "id": 735}, {"content": "How about creating a sound from a string of raw samples? I shouldn't have to construct a fake WAV header just to get Pygame to accept sound data.", "user_title": "Anonymous", "datetimeon": "2008-04-09T23:10:47", "link": "pygame.mixer.Sound", "id": 1742}, {"content": "Here are attributes of the different events (as best I can tell):\n\nACTIVEEVENT:\n\tgain\n\tstate\nKEYDOWN:\n\tunicode\nKEYUP:\n\tkey\n\tmod\nMOUSEMOTION:\n\tpos\n\trel\n\tbuttons\nMOUSEBUTTONDOWN and MOUSEBUTTONUP:\n\tpos\n\tbutton\nJOYAXISMOTION:\n\tjoy\n\taxis\n\tvalue\nJOYBALLMOTION:\n\tjoy\n\tball\n\trel\nJOYHATMOTION:\n\tjoy\n\that\n\tvalue\nJOYBUTTONUP and case JOYBUTTONDOWN:\n\tjoy\n\tbutton\nVIDEORESIZE:\n\tsize\n\tw\n\th\nSYSWMEVENT (WIN32 only):\n\thwnd\n\tmsg\n\twparam\n\tlparam", "user_title": "Anonymous", "datetimeon": "2007-07-19T13:22:47", "link": "pygame.event", "id": 740}, {"content": "> It is true that passing None for the final argument causes \"Invalid RGBA argument\". This is a bug in the documentation, not the code.\nI'd suggest replacing 'None' with 'NULL' in the documentation, then - it also indicates no value, but is not one you can enter in Python (hence there's no confusion).", "user_title": "Anonymous", "datetimeon": "2007-07-19T16:32:03", "link": "Font.render", "id": 741}, {"content": "There seems to be a typo in the sentence \"The blit routines will attempt to use hardware acceleration when possible, otherwise will use highly optimized software blitting methods.\"", "user_title": "Anonymous", "datetimeon": "2007-07-22T04:31:03", "link": "pygame.Surface", "id": 748}, {"content": "hey thanks for the tip ive been searchin in vain for days", "user_title": "Anonymous", "datetimeon": "2007-07-22T10:05:15", "link": "Surface.get_rect", "id": 749}, {"content": "Yes, it's particularly an issue for when a sprite wants to move diagonally in a low resolution.\n\nWhen you're only moving sideways 1 pixel at a time, it's impossible to move diagonally without breaking conservation of momentum.", "user_title": "Anonymous", "datetimeon": "2008-04-23T19:32:47", "link": "Rect.move_ip", "id": 1782}, {"content": "# Mattew N. Brown copyright 2007\n# This is an example program for key input:\n\n ## IMPORT THEN EXECUTE IMPORTED MODULE ('*.py'): ##\nimport os, sys\nimport random\nimport pygame\nfrom pygame.locals import *\n ## UH!? WHAT IF IT ISN'T EXISTANT!?: ##\nif not pygame.font: print 'Warning, fonts disabled'\nif not pygame.mixer: print 'Warning, sound disabled'\n\n ## LOAD IMAGE AND SOUND: ##\ndef image_file_data(file_name, colorkey=None):\n try:\n image = pygame.image.load(file_name)\n except pygame.error, message:\n print 'ERROR: Image did not load:', file_name\n raise SystemExit, message\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image, image.get_rect()\ndef sound_file_data(file_name):\n class NoneSound:\n def play(self): pass\n if not pygame.mixer:\n return NoneSound()\n try:\n sound = pygame.mixer.Sound(file_name)\n except pygame.error, message:\n print 'ERROR: Sound did not load:', file_name\n raise SystemExit, message\n return sound\ndef HEHEHE_font(size):\n fonti = pygame.font.Font(None, size)\n fonti.set_bold(0)\n return fonti\n ## IMAGE STRETCH AND ROTATE: ##\ndef HEHEHE_stretch_image (IMAGEY, wol, zol):\n #return pygame.transform.scale(IMAGEY, (wol, zol))\n return pygame.transform.scale(IMAGEY, (wol + IMAGEY.get_width(), zol + IMAGEY.get_height()))\ndef HEHEHE_rotate_image (IMAGEY, angle):\n center = (0, 0)\n rotate = pygame.transform.rotate\n IMAGEY = rotate(IMAGEY, angle)\n recty = IMAGEY.get_rect(center=center)\n return IMAGEY, recty\n ## DRAW IMAGE: ##\ndef draw_HEHEHE_image (IMAGEE, w, z):\n screen.blit(IMAGEE, (w, z))\ndef draw_HEHEHE_image_stretch (IMAGEE, w, z, wol, zol):\n IMAGEE = HEHEHE_stretch_image(IMAGEE, wol, zol)\n screen.blit(IMAGEE, (w, z))\ndef draw_HEHEHE_image_stretch_rotate (IMAGEE, w, z, wol, zol, angle):\n IMAGEE = HEHEHE_stretch_image(IMAGEE, wol, zol)\n IMAGEE, recty = HEHEHE_rotate_image(IMAGEE, angle)\n screen.blit(IMAGEE, (w + recty.x, z + recty.y))\n ## DRAW TEXT IMAGE: ##\ndef draw_HEHEHE_text (t, special, size, w, z, colory):\n fonty = HEHEHE_font(size)\n IMAGEE = fonty.render(t, special, colory)\n screen.blit(IMAGEE, (w, z))\ndef draw_HEHEHE_text_stretch (t, special, size, w, z, colory, wol, zol):\n fonty = HEHEHE_font(size)\n IMAGEE = fonty.render(t, special, colory)\n IMAGEE = HEHEHE_stretch_image(IMAGEE, wol, zol)\n screen.blit(IMAGEE, (w, z))\ndef draw_HEHEHE_text_stretch_rotate (t, special, size, w, z, colory, wol, zol, angle):\n fonty = HEHEHE_font(size)\n IMAGE = fonty.render(t, special, colory)\n IMAGE = HEHEHE_stretch_image(IMAGE, wol, zol)\n IMAGE, recty = HEHEHE_rotate_image(IMAGE, angle)\n screen.blit(IMAGE, (w + recty.x, z + recty.y))\n ### AAAH! FREAKY!! ###\nclock = pygame.time.Clock()\nImage_directory = \"PNG/\"\nSound_directory = \"SOUND/\"\n ### WHAT IN THE WORLD IS THIS!!??: ###\npygame.init()\nAPPLICATION_w_size = 700\nAPPLICATION_z_size = 500\nscreen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), RESIZABLE)\n#pygame.display.set_icon(image_file_data(Image_directory + \"ICON.PNG\", 0)[0])\npygame.display.set_caption('Mattew N. Brown copyright 2007')\npygame.mouse.set_visible(1)\n ### WHAT IN THE WORLD IS THIS!!!!!!!!!!!!??: ###\nbackground = pygame.Surface(screen.get_size())\nbackground = background.convert()\nbackground.fill((0, 0, 0))\n ### THIS IS DRIVING MY CAR CRAZYs!!!\nscreen.blit(background, (0, 0))\npygame.display.flip()\nrandom.seed()\n\n ## LOAD ALL IMAGES AND SOUNDS: ##\nimage_MAX = 0\nimage_file_name = [];\nimage = []\nimage_rect = []\nwhile (image_MAX <= (-1) ):\n I = image_MAX\n Itemp1, Itemp2 = image_file_data(Image_directory + image_file_name[I], 0)\n image += [Itemp1]\n image_rect += [Itemp2]\n image_MAX += 1\nsound_MAX = 0\nsound_file_name = [];\nsound = []\nsound_rect = []\nwhile (sound_MAX <= (-1) ):\n I = sound_MAX\n Itemp1, Itemp2 = sound_file_data(Sound_directory + sound_file_name[I])\n sound += [Itemp1]\n sound_rect += [Itemp2]\n sound_MAX += 1\n ## LOAD ONE AND ONLY MUSIC FILE: ##\n#pygame.mixer.music.load(d + 'PCDV0043.WAV')\n\n # QUIT\t none\n # ACTIVEEVENT gain, state\n # KEYDOWN\t unicode, key, mod\n # KEYUP\t key, mod\n # MOUSEMOTION pos, rel, buttons\n # MOUSEBUTTONUP pos, button\n # MOUSEBUTTONDOWN pos, button\n # JOYAXISMOTION joy, axis, value\n # JOYBALLMOTION joy, ball, rel\n # JOYHATMOTION joy, hat, value\n # JOYBUTTONUP joy, button\n # JOYBUTTONDOWN joy, button\n # VIDEORESIZE size, w, h\n # VIDEOEXPOSE none\n # USEREVENT code\n\n\n\n ## MAIN: ##\nif __name__ == '__main__':\n EE = ['', '', '', '', '', '', '', '', '', '',\n '', '', '', '', '']\n b = (190, 130, 110)\n COLORY = [b, b, b, b, b, b, b, b, b, b,\n b, b, b, b, b]\n angy = 0\n loopy = 1\n while (loopy == 1):\n angy += 1\n if angy > 360:\n angy = 1\n clock.tick(70)\n screen.blit(background, (0, 0))\n for e in pygame.event.get():\n if e.type == QUIT:\n loopy = 0\n #elif e.type == KEYDOWN and e.key == K_ESCAPE:\n # loopy = 0\n else:\n nnnnnn = -1\n if e.type == QUIT: nnnnnn = 0\n if e.type == ACTIVEEVENT: nnnnnn = 1\n if e.type == KEYDOWN: nnnnnn = 2\n if e.type == KEYUP: nnnnnn = 3\n if e.type == MOUSEMOTION: nnnnnn = 4\n if e.type == MOUSEBUTTONUP: nnnnnn = 5\n if e.type == MOUSEBUTTONDOWN: nnnnnn = 6\n if e.type == JOYAXISMOTION: nnnnnn = 7\n if e.type == JOYBALLMOTION: nnnnnn = 8\n if e.type == JOYHATMOTION: nnnnnn = 9\n if e.type == JOYBUTTONUP: nnnnnn = 10\n if e.type == JOYBUTTONDOWN: nnnnnn = 11\n if e.type == VIDEORESIZE:\n nnnnnn = 12\n APPLICATION_w_size = e.size[0]\n APPLICATION_z_size = e.size[1]\n screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), RESIZABLE)\n if e.type == VIDEOEXPOSE: nnnnnn = 13\n if e.type == USEREVENT: nnnnnn = 14\n if nnnnnn != -1:\n EE[nnnnnn] = str(e); COLORY[nnnnnn] = (190, 200, 255)\n WOW = 0\n while (WOW < 14):\n draw_HEHEHE_text(\"E\" + str(WOW) + \" = \" + EE[WOW], 1, 24, 30, 20 + (WOW * 22), COLORY[WOW])\n if COLORY[WOW] == b:\n COLORY[WOW] = COLORY[WOW]\n else:\n COLORY[WOW] = (200, 240, 200)\n WOW += 1\n # QUIT\t none\n # ACTIVEEVENT gain, state\n # KEYDOWN\t unicode, key, mod\n # KEYUP\t key, mod\n # MOUSEMOTION pos, rel, buttons\n # MOUSEBUTTONUP pos, button\n # MOUSEBUTTONDOWN pos, button\n # JOYAXISMOTION joy, axis, value\n # JOYBALLMOTION joy, ball, rel\n # JOYHATMOTION joy, hat, value\n # JOYBUTTONUP joy, button\n # JOYBUTTONDOWN joy, button\n # VIDEORESIZE size, w, h\n # VIDEOEXPOSE none\n # USEREVENT code\n #I = 0\n #www = 0\n #while (www < 2):\n # www += 1\n # zzz = 0\n # while (zzz < 10):\n # zzz += 1\n # #if (I < image_MAX): draw_HEHEHE_image(I, www * 40, zzz * 40)\n # if (I < image_MAX): draw_HEHEHE_image_stretch_rotate(I, www * 40, zzz * 40, 40, 40, angy)\n # I += 1\n #draw_HEHEHE_text_stretch(\"BOOM!\", 1, 40, 330, 400, (255, 255, 255), 20, 20)\n #draw_HEHEHE_text_stretch_rotate(\"BOOM!\", 1, 40, 140, 400, (255, 255, 255), 0, 0, angy)\n #draw_HEHEHE_text_stretch_rotate(\"WEEEE!\", 1, 30, 450, 470, (255, 255, 255), 0, 0, angy)\n pygame.display.flip()", "user_title": "Anonymous", "datetimeon": "2007-07-28T12:28:50", "link": "pygame.event", "id": 779}, {"content": "# Matthew N. Brown copyright 2007\n# Here is an example program that\n# draws: polygons, circles, and rectangles:\n#\n# You can copy this program on to\n# your own computer and run it.\n#\n\nimport os, sys\nimport random\nimport pygame\nfrom pygame.locals import *\nif not pygame.font: print 'Warning, fonts disabled'\nif not pygame.mixer: print 'Warning, sound disabled'\nimport time\nimport gc\nimport math\n\npygame.init()\nAPPLICATION_w_size = 700\nAPPLICATION_z_size = 500\nscreen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), RESIZABLE)\n#screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), FULLSCREEN)\npygame.display.set_caption(\"HEHE test draw thingie program Matthew N. Brown copyright 2007\")\n#pygame.mouse.set_visible(0)\nglobal background\nbackground = pygame.Surface(screen.get_size())\nbackground.fill((0, 0, 0))\nscreen.blit(background, (0, 0))\npygame.display.flip()\nrandom.seed()\n\nplayer_w = 3\nplayer_z = 2\n\nx = -1\nmap_w_size = 10\nmap_z_size = 10\nmap = [[x, x, x, x, x, x, x, x, x, x, x],\n [x, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1],\n [x, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],\n [x, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1],\n [x, 1, 1, 0, 0, 1, 0, 0, 4, 0, 0],\n [x, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0],\n [x, 1, 1, 0, 0, 2, 0, 1, 1, 0, 1],\n [x, 1, 1, 1, 0, 2, 0, 1, 0, 0, 1],\n [x, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1],\n [x, 1, 0, 4, 0, 1, 0, 0, 0, 0, 1],\n [x, 1, 0, 2, 2, 2, 2, 2, 2, 2, 1]]\n\nmap[player_z][player_w] = 3\n\n ## IMAGE STRETCH AND ROTATE: ##\ndef chilly_font(size):\n fonti = pygame.font.Font(None, size)\n fonti.set_bold(0)\n return fonti\n ## DRAW TEXT IMAGE: ##\ndef draw_chilly_text (t, special, size, w, z, colory):\n fonty = chilly_font(size)\n IMAGEE = fonty.render(t, special, colory)\n screen.blit(IMAGEE, (w, z))\n\n\n ### some functions: ###\ndef in_repeating_boundy (n, b1, b2):\n if n < b1: n = b2\n if n > b2: n = b1\n return n\ndef in_boundy (n, b1, b2):\n if n < b1: n = b1\n if n > b2: n = b2\n return n\ndef in_boundy2D ((w, z), (w1, z1, w2, z2)):\n if w < w1: w = w1\n if w > w2: w = w2\n if z < z1: z = z1\n if z > z2: z = z2\n return w, z\ndef chilly_distance (w1, z1, w2, z2):\n return math.sqrt(math.pow(w1 - w2, 2) + math.pow(z1 - z2, 2))\ndef chilly_rect_touching_rect(w1, z1, wol1, zol1, w2, z2, wol2, zol2):\n w2 -= w1\n z2 -= z1\n ww1 = -wol2\n zz1 = -zol2\n return (w2 >= ww1 and w2 <= wol1 and z2 >= zz1 and z2 <= zol1)\n\n ## keys and mouse stuff: ##\nglobal ky_held, ky_first_held, ky_time_last_pressed\nglobal mouse_w, mouse_z, mouse_inn, mouse_left_pressed, mouse_right_pressed, mouse_left_held, mouse_right_held\nnot_mouse_left_or_right_held = 1\nmouse_left_held = 0\nmouse_right_held = 0\nmouse_w = 0\nmouse_z = 0\nmouse_inn = 0\nky_held = [0]\nky_first_held = [0]\nky_time_last_pressed = [0]\nm = -1\nwhile (m < 500):\n m += 1\n ky_held += [0]\n ky_first_held += [0]\n ky_time_last_pressed += [0]\n\n ## MOUSE AND KEY FUNCTIONS: ##\ndef clear_kys():\n m = -1\n while (m < 500):\n m += 1\n ky_held[m] = 0\n ky_first_held[m] = 0\n ky_time_last_pressed[m] = 0\ndef mouse_left_pressed_CEV():\n global mouse_left_pressed\n if mouse_left_pressed: mouse_left_pressed = 0; return 1\ndef mouse_right_pressed_CEV():\n global mouse_right_pressed\n if mouse_right_pressed: mouse_right_pressed = 0; return 1\ndef old_style_ky(n):\n return (ky_first_held_CEV(n) or (ky_held[n] and ky_time_last_pressed[n] < time.time() - .3))\ndef ky_first_held_CEV(n):\n if (ky_first_held[n]):\n ky_first_held[n] = 0\n return 1\n else:\n return 0\ndef mouse_in_rect (w, z, wol, zol):\n return (mouse_w >= w and mouse_z >= z and mouse_w <= w + wol and mouse_z <= z + zol)\ndef mouse_in_circle (w, z, rad):\n dia = rad * 2\n if mouse_in_rect(w - rad, z - rad, w + dia, z + dia):\n return (chilly_distance(mouse_w, mouse_z, w, z) < rad)\n else:\n return 0\n\n ## CHECK FOR: KEYBOARD, MOUSE, JOYSTICK, AND OTHERY INPUTY: ##\ndef check_for_keys():\n global mouse_w, mouse_z, mouse_inn, mouse_left_pressed, mouse_right_pressed, mouse_left_held, mouse_right_held\n global loopy, letter_hitty\n global not_mouse_left_or_right_held\n for e in pygame.event.get():\n if e.type == QUIT:\n loopy = 0\n if e.type == ACTIVEEVENT:\n mouse_inn = (e.gain and (e.state == 1 or e.state == 6))\n if not mouse_inn:\n mouse_w = 0\n mouse_z = 0\n if e.type == KEYDOWN:\n ky_held[e.key] = 1\n ky_first_held[e.key] = 1\n ky_time_last_pressed[e.key] = time.time()\n if (e.key >= 97 and e.key <= 122):\n letter_hitty = e.unicode.lower()\n if e.type == KEYUP:\n ky_held[e.key] = 0\n #ky_first_held[e.key] = 0\n if e.type == MOUSEMOTION:\n mouse_w = e.pos[0]\n mouse_z = e.pos[1]\n if e.type == MOUSEBUTTONUP:\n if e.button == 1: mouse_left_held = 0\n if e.button == 3: mouse_right_held = 0\n if not mouse_left_held and not mouse_right_held: not_mouse_left_or_right_held = 1\n if e.type == MOUSEBUTTONDOWN:\n mouse_left_pressed = e.button == 1\n mouse_right_pressed = e.button == 3\n mouse_left_held = mouse_left_held or e.button == 1\n mouse_right_held = mouse_right_held or e.button == 3\n if mouse_left_held or mouse_right_held: not_mouse_left_or_right_held = 0\n if e.type == JOYAXISMOTION: nnnnnn = 7\n if e.type == JOYBALLMOTION: nnnnnn = 8\n if e.type == JOYHATMOTION: nnnnnn = 9\n if e.type == JOYBUTTONUP: nnnnnn = 10\n if e.type == JOYBUTTONDOWN: nnnnnn = 11\n if e.type == VIDEORESIZE:\n global background, Dimage_editing_screen, screen, APPLICATION_w_size, APPLICATION_z_size\n APPLICATION_w_size = e.size[0]\n APPLICATION_z_size = e.size[1]\n screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), RESIZABLE)\n background = pygame.Surface((APPLICATION_w_size, APPLICATION_z_size))\n if e.type == VIDEOEXPOSE: nnnnnn = 13\n if e.type == USEREVENT: nnnnnn = 14\n\n ### MORE STUFF: ###\nHE_HE_surfacey = pygame.Surface((40, 40))\ncolor1 = (200, 200, 200)\ncolor2 = (200, 0, 0)\ncolor3 = (0, 200, 0)\ncolor4 = (130, 180, 180)\nblack_colory = (0, 0, 0)\nHE_HE_surfacey.fill(black_colory)\n\ndef try_to_push_block(w, z, wo, zo):\n if map[z][w] == 1:\n w_pushed = w + wo\n z_pushed = z + zo\n w_pushed, z_pushed = in_boundy2D((w_pushed, z_pushed), (0, 0, map_w_size, map_z_size))\n if map[z_pushed][w_pushed] == 0:\n map[z][w] = 0\n map[z_pushed][w_pushed] = 1\n\ndef draw_map():\n ww = 0\n while ww < map_w_size:\n ww += 1\n zz = 0\n while zz < map_z_size:\n zz += 1\n n = map[zz][ww]\n screen.blit(HE_HE_surfacey, (ww * 40, zz * 40))\n if n == 1:\n pygame.draw.rect(screen, color1, (ww * 40, zz * 40, 40, 40), 2)\n elif n == 2:\n #pygame.draw.rect(screen, color2, (ww * 40, zz * 40, 40, 40), 2)\n pygame.draw.circle(screen, color2, (ww * 40 + 20, zz * 40 + 20), 17, 2)\n elif n == 3:\n #pygame.draw.rect(screen, color3, (ww * 40, zz * 40, 40, 40), 2)\n locy_w = ww * 40\n locy_z = zz * 40\n point1 = (20 + locy_w, 10 + locy_z)\n point2 = (40 + locy_w, 12 + locy_z)\n point3 = (30 + locy_w, 19 + locy_z)\n point4 = (30 + locy_w, 30 + locy_z)\n point5 = (20 + locy_w, 20 + locy_z)\n points = (point1, point2, point3, point4, point5)\n pygame.draw.polygon(screen, color3, points, 2)\n elif n == 4:\n pygame.draw.rect(screen, color4, (ww * 40, zz * 40, 40, 40), 4)\n\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n\n# NOTE: w = x\n# NOTE: z = y\n# -- HE, HE, Bad habit of mine . . .\n\n ## MAIN: ##\nif __name__ == '__main__':\n\n # THE MAIN, MAIN, MAIN LOOP:\n loopy = 1\n while (loopy == 1):\n\n\n mouse_left_pressed = 0\n mouse_right_pressed = 0\n check_for_keys()\n\n draw_map()\n draw_chilly_text('Press the arrow keys to move . . .', 0, 20, 0, 0, (255, 255, 255))\n\n wa = player_w\n za = player_z\n map[player_z][player_w] = 0\n if old_style_ky(276): player_w -= 1\n if old_style_ky(273): player_z -= 1\n if old_style_ky(275): player_w += 1\n if old_style_ky(274): player_z += 1\n player_w, player_z = in_boundy2D((player_w, player_z), (0, 0, map_w_size, map_z_size))\n try_to_push_block(player_w, player_z, player_w - wa, player_z - za)\n if map[player_z][player_w] != 0:\n player_w = wa\n player_z = za\n map[player_z][player_w] = 3\n\n #if ky_first_held[27]: loopy = 0\n pygame.display.flip()", "user_title": "Anonymous", "datetimeon": "2007-07-29T17:05:04", "link": "pygame.draw", "id": 781}, {"content": "# Matthew N. Brown copyright 2007\n# Here is an example program that\n# draws a bouncing ball using: pygame.draw.circle\n#\n# You can copy this program on to\n# your own computer and run it.\n#\n\nimport os, sys\nimport random\nimport pygame\nfrom pygame.locals import *\nif not pygame.font: print 'Warning, fonts disabled'\nif not pygame.mixer: print 'Warning, sound disabled'\nimport time\nimport gc\nimport math\n\npygame.init()\nAPPLICATION_w_size = 700\nAPPLICATION_z_size = 500\nscreen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), RESIZABLE)\n#screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), FULLSCREEN)\npygame.display.set_caption(\"HEHE test circle thingie program Matthew N. Brown copyright 2007\")\n#pygame.mouse.set_visible(0)\nglobal background\nbackground = pygame.Surface(screen.get_size())\nbackground.fill((0, 0, 0))\nscreen.blit(background, (0, 0))\npygame.display.flip()\nrandom.seed()\n\n\n ## IMAGE STRETCH AND ROTATE: ##\ndef HEHEHE_font(size):\n fonti = pygame.font.Font(None, size)\n fonti.set_bold(0)\n return fonti\n ## DRAW TEXT IMAGE: ##\ndef draw_HEHEHE_text (t, special, size, w, z, colory):\n fonty = HEHEHE_font(size)\n IMAGEE = fonty.render(t, special, colory)\n screen.blit(IMAGEE, (w, z))\n\n\n ### some functions: ###\ndef in_repeating_boundy (n, b1, b2):\n if n < b1: n = b2\n if n > b2: n = b1\n return n\ndef in_boundy (n, b1, b2):\n if n < b1: n = b1\n if n > b2: n = b2\n return n\ndef in_boundy2D ((w, z), (w1, z1, w2, z2)):\n if w < w1: w = w1\n if w > w2: w = w2\n if z < z1: z = z1\n if z > z2: z = z2\n return w, z\ndef HEHEHE_distance (w1, z1, w2, z2):\n return math.sqrt(math.pow(w1 - w2, 2) + math.pow(z1 - z2, 2))\ndef HEHEHE_rect_touching_rect(w1, z1, wol1, zol1, w2, z2, wol2, zol2):\n w2 -= w1\n z2 -= z1\n ww1 = -wol2\n zz1 = -zol2\n return (w2 >= ww1 and w2 <= wol1 and z2 >= zz1 and z2 <= zol1)\n\n ## keys and mouse stuff: ##\nglobal ky_held, ky_first_held, ky_time_last_pressed\nglobal mouse_w, mouse_z, mouse_inn, mouse_left_pressed, mouse_right_pressed, mouse_left_held, mouse_right_held\nnot_mouse_left_or_right_held = 1\nmouse_left_held = 0\nmouse_right_held = 0\nmouse_w = 0\nmouse_z = 0\nmouse_inn = 0\nky_held = [0]\nky_first_held = [0]\nky_time_last_pressed = [0]\nm = -1\nwhile (m < 500):\n m += 1\n ky_held += [0]\n ky_first_held += [0]\n ky_time_last_pressed += [0]\n\n ## MOUSE AND KEY FUNCTIONS: ##\ndef clear_kys():\n m = -1\n while (m < 500):\n m += 1\n ky_held[m] = 0\n ky_first_held[m] = 0\n ky_time_last_pressed[m] = 0\ndef mouse_left_pressed_CEV():\n global mouse_left_pressed\n if mouse_left_pressed: mouse_left_pressed = 0; return 1\ndef mouse_right_pressed_CEV():\n global mouse_right_pressed\n if mouse_right_pressed: mouse_right_pressed = 0; return 1\ndef old_style_ky(n):\n return (ky_first_held_CEV(n) or (ky_held[n] and ky_time_last_pressed[n] < time.time() - .3))\ndef ky_first_held_CEV(n):\n if (ky_first_held[n]):\n ky_first_held[n] = 0\n return 1\n else:\n return 0\ndef mouse_in_rect (w, z, wol, zol):\n return (mouse_w >= w and mouse_z >= z and mouse_w <= w + wol and mouse_z <= z + zol)\ndef mouse_in_circle (w, z, rad):\n dia = rad * 2\n if mouse_in_rect(w - rad, z - rad, w + dia, z + dia):\n return (HEHEHE_distance(mouse_w, mouse_z, w, z) < rad)\n else:\n return 0\n\n ## CHECK FOR: KEYBOARD, MOUSE, JOYSTICK, AND OTHERY INPUTY: ##\ndef check_for_keys():\n global mouse_w, mouse_z, mouse_inn, mouse_left_pressed, mouse_right_pressed, mouse_left_held, mouse_right_held\n global loopy, letter_hitty\n global not_mouse_left_or_right_held\n for e in pygame.event.get():\n if e.type == QUIT:\n loopy = 0\n if e.type == ACTIVEEVENT:\n mouse_inn = (e.gain and (e.state == 1 or e.state == 6))\n if not mouse_inn:\n mouse_w = 0\n mouse_z = 0\n if e.type == KEYDOWN:\n ky_held[e.key] = 1\n ky_first_held[e.key] = 1\n ky_time_last_pressed[e.key] = time.time()\n if (e.key >= 97 and e.key <= 122):\n letter_hitty = e.unicode.lower()\n if e.type == KEYUP:\n ky_held[e.key] = 0\n #ky_first_held[e.key] = 0\n if e.type == MOUSEMOTION:\n mouse_w = e.pos[0]\n mouse_z = e.pos[1]\n if e.type == MOUSEBUTTONUP:\n if e.button == 1: mouse_left_held = 0\n if e.button == 3: mouse_right_held = 0\n if not mouse_left_held and not mouse_right_held: not_mouse_left_or_right_held = 1\n if e.type == MOUSEBUTTONDOWN:\n mouse_left_pressed = e.button == 1\n mouse_right_pressed = e.button == 3\n mouse_left_held = mouse_left_held or e.button == 1\n mouse_right_held = mouse_right_held or e.button == 3\n if mouse_left_held or mouse_right_held: not_mouse_left_or_right_held = 0\n if e.type == JOYAXISMOTION: nnnnnn = 7\n if e.type == JOYBALLMOTION: nnnnnn = 8\n if e.type == JOYHATMOTION: nnnnnn = 9\n if e.type == JOYBUTTONUP: nnnnnn = 10\n if e.type == JOYBUTTONDOWN: nnnnnn = 11\n if e.type == VIDEORESIZE:\n global background, Dimage_editing_screen, screen, APPLICATION_w_size, APPLICATION_z_size\n APPLICATION_w_size = e.size[0]\n APPLICATION_z_size = e.size[1]\n screen = pygame.display.set_mode((APPLICATION_w_size, APPLICATION_z_size), RESIZABLE)\n background = pygame.Surface((APPLICATION_w_size, APPLICATION_z_size))\n if e.type == VIDEOEXPOSE: nnnnnn = 13\n if e.type == USEREVENT: nnnnnn = 14\n\n ### MORE STUFF: ###\nball_w = 30.0\nball_z = 20.0\n\nball_wol = 4.0\nball_zol = -1.0\n\ngravity_w = 0.0\ngravity_z = 1.0\n\nradius = 11.0\n\nmakes_ball_slower_per_bounce = 1.2\n\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n#######################################################################################\n\n# NOTE: w = x\n# NOTE: z = y\n# -- HE, HE, Bad habit of mine . . .\n\n ## MAIN: ##\nif __name__ == '__main__':\n\n # THE MAIN, MAIN, MAIN LOOP:\n loopy = 1\n while (loopy == 1):\n\n t = time.time()\n while t > time.time() - .03:\n pass\n mouse_left_pressed = 0\n mouse_right_pressed = 0\n check_for_keys()\n\n ball_wol += gravity_w\n ball_zol += gravity_z\n\n if old_style_ky(276): ball_wol -= 12\n if old_style_ky(273): ball_zol -= 22\n if old_style_ky(275): ball_wol += 12\n if old_style_ky(274): ball_zol += 22\n if ky_held[115]: ball_wol = 0; ball_zol = 0\n if ky_held[99]: ball_wol = (random.random() * 400) - 200; ball_zol = (random.random() * 400) - 200\n\n ball_w += ball_wol\n ball_z += ball_zol\n\n if ball_w < radius: ball_w = radius; ball_wol = -(ball_wol / makes_ball_slower_per_bounce)\n if ball_z < radius: ball_z = radius; ball_zol = -(ball_zol / makes_ball_slower_per_bounce)\n if ball_w > APPLICATION_w_size - radius: ball_w = APPLICATION_w_size - radius; ball_wol = -(ball_wol / makes_ball_slower_per_bounce)\n if ball_z > APPLICATION_z_size - radius: ball_z = APPLICATION_z_size - radius; ball_zol = -(ball_zol / makes_ball_slower_per_bounce)\n\n screen.fill((0, 0, 0))\n draw_HEHEHE_text('Press the arrow keys to move ball.', 0, 25, 0, 0, (255, 255, 255))\n draw_HEHEHE_text('Hold S to stop ball.', 0, 25, 0, 30, (255, 255, 255))\n draw_HEHEHE_text('press C to make ball go crazy.', 0, 25, 0, 70, (255, 255, 255))\n pygame.draw.circle(screen, (200, 200, 200), (int(ball_w), int(ball_z)), int(radius))\n\n #if ky_first_held[27]: loopy = 0\n pygame.display.flip()", "user_title": "Anonymous", "datetimeon": "2007-07-29T19:02:50", "link": "pygame.draw.circle", "id": 782}, {"content": "", "user_title": "Anonymous", "datetimeon": "2007-08-08T10:35:27", "link": "You have a great site. All in your web is very useful. Please keep on working.", "id": 796}, {"content": "Is this fast ? What is better for software systems ?", "user_title": "Anonymous", "datetimeon": "2007-08-12T18:03:51", "link": "Surface.blit", "id": 800}, {"content": "I have not tried photoshop or something to create alpha channels in bitmaps directly. Instead, I use a mask color in a 'normal' bitmap, and make that look transparant in the icon.\ncreate a bitmap in mspaint, black areas will be transparant, size 32x32 pixels, save it as 'icon.bmp'.\n \nthen create a file named icon.py and put this in it:\n###\nimport pygame\n\ndef seticon(iconname):\n \"\"\"\n give an iconname, a bitmap sized 32x32 pixels, black (0,0,0) will be alpha channel\n \n the windowicon will be set to the bitmap, but the black pixels will be full alpha channel\n \n can only be called once after pygame.init() and before somewindow = pygame.display.set_mode()\n \"\"\"\n icon=pygame.Surface((32,32))\n icon.set_colorkey((0,0,0))#and call that color transparant\n rawicon=pygame.image.load(iconname)#must be 32x32, black is transparant\n for i in range(0,32):\n for j in range(0,32):\n icon.set_at((i,j), rawicon.get_at((i,j)))\n pygame.display.set_icon(icon)#set wind\n\npygame.init()\nseticon('icon.bmp')\nwindow=pygame.display.set_mode((250,250))\nbackground=pygame.Surface(window.get_size())\nbackground.fill((50,50,50))\n \nwhile 1:\n for event in pygame.event.get():\n if not event.type == pygame.MOUSEMOTION:#print all events, but not the mousemoves :) for feedback info\n print str(event)\n if event.type == pygame.QUIT: # close window cross (upper right corner) pressed: exit\n raise SystemExit\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: #escape pressed: exit\n raise SystemExit\n window.blit(background, (0, 0))#fresh background \n #insert other blitty things here onto the window \n pygame.display.flip()#show completed window\n###\nto see the effect: a transparant icon, based on a normal bitmap with a mask color. \nYou can easily modify the code to use an different mask color or filename if needed.", "user_title": "Anonymous", "datetimeon": "2007-08-12T20:35:22", "link": "pygame.display.set_icon", "id": 803}, {"content": "This function's resume is wrong, it says:\nGroup.has(*sprites): return None\nshould say\nGroup.has(*sprites): return Boolean", "user_title": "Anonymous", "datetimeon": "2007-08-13T03:15:52", "link": "Group.has", "id": 805}, {"content": "In other words, this returns the area in which 2 Rects overlap.\nThis implies that rectA.clip(rectB) == rectB.clip(rectA).", "user_title": "Anonymous", "datetimeon": "2007-08-17T03:35:39", "link": "Rect.clip", "id": 809}, {"content": "There should be more types of sounds loadable. Also a way to save them.", "user_title": "Anonymous", "datetimeon": "2007-08-25T14:44:46", "link": "pygame.mixer.Sound", "id": 815}, {"content": "if you want to use the same function but from module PIL\n\n from PIL import Image\n im = pygame.image.load (\"image.png\")\n s = pygame.image.tostring (im, \"RGBX\")\n temp = Image.fromstring (\"RGBX\", im.get_size (), s)\n tu = (0,0, im.get_size () [0]-1, im.get_size () [1] - 1)\n temp = temp.transform (size2, Image.EXTENT, tu, Image.BICUBIC)\n mode = temp.mode\n size = temp.size\n data = temp.tostring()\n res = pygame.image.fromstring (data, size, mode)", "user_title": "Anonymous", "datetimeon": "2007-08-26T09:36:23", "link": "pygame.transform.rotate", "id": 817}, {"content": "if you want to use the same function but from module PIL\n\n from PIL import Image\n im = pygame.image.load (\"image.png\")\n s = pygame.image.tostring (im, \"RGBX\")\n temp = Image.fromstring (\"RGBX\", im.get_size (), s)\n tu = (0,0, im.get_size () [0]-1, im.get_size () [1] - 1)\n temp = temp.transform (size2, Image.EXTENT, tu, Image.BICUBIC)\n mode = temp.mode\n size = temp.size\n data = temp.tostring()\n res = pygame.image.fromstring (data, size, mode)", "user_title": "Anonymous", "datetimeon": "2007-08-26T09:37:15", "link": "pygame.transform.scale", "id": 818}, {"content": "Numbers greater than 1.0 seem to be interpreted as 1.0.\nNegative numbers are made positive (absolute value)\n\n-4.0 = 4.0 = 1.0", "user_title": "Anonymous", "datetimeon": "2007-08-26T21:15:03", "link": "Sound.set_volume", "id": 820}, {"content": "If you try to use the alpha in [Color] its not applied, \nbut Draw.lines applies alpha in [Color]", "user_title": "Anonymous", "datetimeon": "2007-08-27T12:19:45", "link": "pygame.draw.aalines", "id": 821}, {"content": "Is it possible to use this on only certain keys,\nor to use different values for different groups of keys?\n\nFor example, say you wanted to have a certain value assigned to the player movement keys,\nbut a different value assigned to the attack keys, and no value set for the menu keys.", "user_title": "Anonymous", "datetimeon": "2007-08-31T04:29:56", "link": "pygame.key.set_repeat", "id": 823}, {"content": "That depends on a number of things. For digital D-pads (like on a SNES controller)\nthe values reported will always be \"full blast\" because that's how the gamepad\nhardware reports the direction of the D-pad. For analog sticks, like the ones in\nthe middle of a PS2 controller, they will usually report a value in the range of\na 32-bit integer (or maybe a 16-bit integer, or even a float--I don't really know).\nIt all depends on the hardware, not to mention the drivers of your OS.", "user_title": "Anonymous", "datetimeon": "2007-09-04T04:16:59", "link": "pygame.joystick.Joystick", "id": 825}, {"content": "This will make one single pixel a Color at coordanates x, y on a Surface:\npygame.draw.rect(Surface, Color, (x, y, 1, 1))", "user_title": "Anonymous", "datetimeon": "2007-09-05T21:50:41", "link": "pygame.draw.rect", "id": 826}, {"content": "Hi Tim,\n\nMaybe you realised your error by now, but here is a little clarification for the\npeople reading your comment.\n\nYou created a square with a side of 4 pixel at the position (0, 0). \n\n 0 1 2 3 4\n0 x x x x .\n1 x x x x .\n2 x x x x .\n3 x x x x .\n4 . . . . .\n\nSo of course any position with x or y >= 4 will be outside the square.", "user_title": "Anonymous", "datetimeon": "2008-05-04T00:13:13", "link": "Rect.collidepoint", "id": 1821}, {"content": "How to draw a single pixel: draw a circle with radius zero! Took me a while to find this.\n\ncircle( ..., 0 ) will give you a single pixel", "user_title": "Anonymous", "datetimeon": "2007-09-08T02:17:09", "link": "pygame.draw.circle", "id": 837}, {"content": "It seems to be true that None will cause set_allowed to BLOCK all events.\n...even though it was April 1st.\n\nset_blocked(None) doesn't seem to have any effect like described above.", "user_title": "Anonymous", "datetimeon": "2007-09-09T17:05:37", "link": "pygame.event.set_allowed", "id": 847}, {"content": "set_blocked(None) doesn't seem to have this effect at all, see the comment in set_allowed after April 1st.", "user_title": "Anonymous", "datetimeon": "2007-09-09T17:06:19", "link": "pygame.event.set_blocked", "id": 848}, {"content": "Antialised text *does* work on black backgrounds, you just have to be careful only to\nblit it once, because the parts with less than full alpha will build up \n(very quickly if you are blitting over and over.)", "user_title": "Anonymous", "datetimeon": "2007-09-12T21:29:31", "link": "Font.render", "id": 851}, {"content": "There is an alternative to setting pixels one-at-a-time that is much, much faster. Pygame's Surfarray module will allow you to access the pixels like an array.\n\nIf you need to manipulate pixels on an individual level, it is strongly recommended that you use Surfarrays instead of set_at.", "user_title": "Anonymous", "datetimeon": "2008-05-07T10:56:34", "link": "Surface.set_at", "id": 1842}, {"content": "fade out does NOT block till it is finished in windows!!!!", "user_title": "Anonymous", "datetimeon": "2007-11-12T00:14:47", "link": "pygame.mixer.music.fadeout", "id": 1084}, {"content": "There is no explanation of 'color' argument...?", "user_title": "Anonymous", "datetimeon": "2007-10-09T13:57:03", "link": "pygame.draw.rect", "id": 913}, {"content": "colors are usually done as a tuple\n(red light out of 255,green light out of 255, blue light out of 255).", "user_title": "Anonymous", "datetimeon": "2007-10-09T20:01:34", "link": "pygame.draw.rect", "id": 915}, {"content": "OGG is a container format... They probably meant only OGG/Vorbis.", "user_title": "Anonymous", "datetimeon": "2007-10-17T11:20:52", "link": "pygame.mixer.Sound", "id": 932}, {"content": "You can use Surface.set_at((x,y), colour) to set a pixel.", "user_title": "Anonymous", "datetimeon": "2007-10-17T16:49:42", "link": "pygame.draw.rect", "id": 933}, {"content": "It is a dissapointment to discover that \\n does not\nwork with the default font and merely shows a box.", "user_title": "Anonymous", "datetimeon": "2007-10-18T18:45:51", "link": "Font.render", "id": 937}, {"content": "# This is an example that uses pygame.draw.rect:\nimport os, sys\nimport random\nimport pygame\nfrom pygame.locals import *\npygame.init()\nAPPLICATION_x_size = 400\nAPPLICATION_y_size = 300\nscreen = pygame.display.set_mode((APPLICATION_x_size, APPLICATION_y_size))\npygame.display.set_caption('Fun Boring Example comes with Source Code too!!')\npygame.mouse.set_visible(True)\n#pygame.mouse.set_visible(False)\nblack_square_that_is_the_size_of_the_screen = pygame.Surface(screen.get_size())\nblack_square_that_is_the_size_of_the_screen.fill((0, 0, 0))\nscreen.blit(black_square_that_is_the_size_of_the_screen, (0, 0))\npygame.display.flip()\nWeeee = True\nwhile Weeee:\n # a color can be: (0 to 255, 0 to 255, 0 to 255)\n My_red_color = (255, 0, 0)\n My_blue_color = (0, 0, 255)\n My_green_color = (0, 255, 0)\n My_yellow_color = (255, 255, 0)\n WHITE_WHITE_HOORAY = (255, 255, 255)\n My_light_red_color = (255, 180, 180)\n My_light_blue_color = (190, 190, 255)\n # \"screen.set_at((x, y), Color)\" and \"pygame.draw.rect(screen, Color, (x, y, x_size, y_size))\" draw colors on to an \"in computer memory image\" called: \"screen\"\n screen.set_at(( 1, 1), My_yellow_color)\n screen.set_at(( 2, 2), My_yellow_color)\n screen.set_at(( 3, 3), My_yellow_color)\n screen.set_at(( 4, 4), My_yellow_color)\n screen.set_at(( 5, 5), My_yellow_color)\n screen.set_at(( 6, 6), My_yellow_color)\n screen.set_at(( 7, 7), My_yellow_color)\n screen.set_at(( 8, 8), My_yellow_color)\n screen.set_at(( 9, 9), My_yellow_color)\n screen.set_at((10, 10), My_yellow_color)\n screen.set_at((11, 11), My_yellow_color)\n screen.set_at((12, 12), My_yellow_color)\n screen.set_at((13, 13), My_yellow_color)\n screen.set_at((14, 14), My_yellow_color)\n screen.set_at((15, 15), My_yellow_color)\n screen.set_at((16, 16), My_yellow_color)\n screen.set_at((17, 17), My_yellow_color)\n screen.set_at((18, 18), My_yellow_color)\n screen.set_at((19, 19), My_yellow_color)\n screen.set_at((20, 20), My_yellow_color)\n pygame.draw.rect(screen, My_red_color, (50, 50, 10, 10))\n pygame.draw.rect(screen, My_red_color, (50, 120, 20, 20))\n pygame.draw.rect(screen, My_blue_color, (50, 150, 30, 30))\n pygame.draw.rect(screen, My_blue_color, (50, 1000, 1000, 10))\n pygame.draw.rect(screen, My_green_color, (200, 10, 40, 40))\n pygame.draw.rect(screen, My_light_red_color, (10, 200, 50, 50))\n pygame.draw.rect(screen, My_light_blue_color, (200, 200, 60, 60))\n pygame.draw.rect(screen, My_light_blue_color, (100, 200, 10, 2))\n pygame.draw.rect(screen, WHITE_WHITE_HOORAY, (0, 100, 50, 52))\n # If you delete the below line you should no longer see the vibrant colors.\n pygame.display.flip()\n # if the 'X' button is pressed the window should close:\n Geesh = pygame.event.get()\n if len(Geesh) > 0:\n if Geesh[0].type == QUIT: Weeee = False\n## Once this line is reached the window should close", "user_title": "Anonymous", "datetimeon": "2007-10-18T19:23:51", "link": "pygame.draw.rect", "id": 938}, {"content": "If your program has sources of events that are not managed by pygame, such as\nnetwork socket data, or large files, you must either add a thread that selects\non the source and injects pygame events, or poll the source briefly and rapidly.", "user_title": "Anonymous", "datetimeon": "2007-10-23T23:20:11", "link": "pygame.event", "id": 955}, {"content": "It never seems to be able to load this (error reported, cannot read). But when I run a script that directly runs it(without the loop), it works fine\n\n(songs is a list of filenames loaded form a .txt file)\n\ncurrent_song = 0\nwhile 1:\n if pygame.mixer.music.get_busy() == False:\n print songs[current_song]\n pygame.mixer.music.load(songs[current_song])\n pygame.mixer.music.play() \n current_song += 1", "user_title": "Anonymous", "datetimeon": "2007-10-25T21:19:18", "link": "pygame.mixer.music.load", "id": 965}, {"content": "When you make an icon make a 16x16 icon and then scale it to 32x32 pixels.\nIf you make it 16x16 pixels it looks distorted.\n\nI usally have a transparent 32x32 .gif icon for my games.", "user_title": "Anonymous", "datetimeon": "2007-11-01T18:47:48", "link": "pygame.display.set_icon", "id": 1002}, {"content": "When you make an icon make a 16x16 icon and then scale it to 32x32 pixels.\nIf you make it 16x16 pixels it looks distorted.\n\nI usally have a transparent 32x32 .gif icon for my games.", "user_title": "Anonymous", "datetimeon": "2007-11-01T18:49:22", "link": "pygame.display.set_icon", "id": 1003}, {"content": "Always set the icon before you call pygame.display.set_mode", "user_title": "Anonymous", "datetimeon": "2007-11-01T18:50:24", "link": "pygame.display.set_icon", "id": 1004}, {"content": "Here's a quick script for loading images:\n\ndef load_image(file, colorkey=False):\n file = os.path.join('data', file)\n try:\n image = pygame.image.load(file)\n colorkey = image.get_at((0, 0))\n if colorkey is True:\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n except:\n print 'Unable to load: ' + file\n return image.convert_alpha() #Convert any transparency in the image", "user_title": "Anonymous", "datetimeon": "2007-11-01T18:56:17", "link": "pygame.image.load", "id": 1005}, {"content": "I agree that it is a dissapointment about \\n, but anti-aliasing works fine for me!", "user_title": "Anonymous", "datetimeon": "2007-11-01T19:00:33", "link": "Font.render", "id": 1006}, {"content": "this gives me 6 modules initialised OK, 0 failed.\nbut i only know of 5 modules that have to be inited:\ncdrom, display, font, joystick, mixer. which one did i miss?", "user_title": "Anonymous", "datetimeon": "2007-11-03T04:27:28", "link": "pygame.init", "id": 1013}, {"content": "Yeah, it is fast, but what do you want to compare it to when you\nask \"what is better\"? Within PyGame, there's no alternative to\nusing Surface.blit. I'd suggest you either use that, or if you find\nit too slow (but really make sure it's too slow for you, i.e. test\nif the real problem might be using flip instead of update), use\nOpenGL.", "user_title": "Anonymous", "datetimeon": "2007-11-05T05:06:25", "link": "Surface.blit", "id": 1032}, {"content": "Rects do not move to floating point numbers. Only integers.\n\n\nSo if you do:\n\nself.rect.move_ip(4.5, 0)\n\nit will actually execute:\n\nself.rect.move_ip(4, 0)\n\n\nThis limitation is really bad if you're making a small screen platformer.\nI hope that Rects will move to floating point numbers in pygame 1.8.", "user_title": "Anonymous", "datetimeon": "2007-11-05T09:47:30", "link": "Rect.move_ip", "id": 1034}, {"content": "Copy this to your computer and save it as a .py file to run a little trig demo.\n\n\n\n#! usr/bin/env python\n\nimport pygame, math\nfrom pygame.locals import *\n\nclass Ship:\n def __init__(self):\n self.image=pygame.Surface((40, 40))\n self.rect=self.image.get_rect(center=(320,240))\n self.x=200\n self.y=150\n self.x_vel=0\n self.y_vel=0\n self.angle=0\n self.point_list = [(0, -20), (2.25, -20), (3.0, -6), (4.05, -20)]\n def update(self):\n self.rect.centerx=self.x\n self.rect.centery=self.y\n self.x+=self.x_vel\n self.y+=self.y_vel\n key = pygame.key.get_pressed()\n if key[K_RIGHT]:\n self.angle -= 4\n if key[K_LEFT]:\n self.angle += 4\n if key[K_UP]:\n self.accel(0.1)\n if key[K_DOWN]:\n self.accel(-0.1)\n def draw(self, surface):\n surface.blit(self.image, self.rect)\n self.image.fill((0, 0, 0))\n\tpoint_list = []\n\tself.angle2 = math.radians(self.angle)\n\tfor p in self.point_list:\n radian, radius = p\n x = int(math.sin(radian+self.angle2)*radius)\n y = int(math.cos(radian+self.angle2)*radius)\n\t point_list.append((x+self.image.get_width()/2,y+self.image.get_height()/2))\n\tpygame.draw.polygon(self.image, (255,255,255), point_list, 1)\n def accel(self, accel_speed):\n self.x_vel += math.sin(self.angle*2*math.pi/360)*-accel_speed\n self.y_vel += math.cos(self.angle*2*math.pi/360)*-accel_speed\n def wrap(self, surface):\n if self.x >= surface.get_width() + self.image.get_width()/2:\n self.x = -self.image.get_width()/2\n if self.x <= -self.image.get_width()/2 - 1:\n self.x = surface.get_width() + self.image.get_width()/2\n if self.y >= surface.get_height() + self.image.get_height()/2:\n self.y = -self.image.get_height()/2\n if self.y <= -self.image.get_height()/2 - 1:\n self.y = surface.get_height() + self.image.get_height()/2\n\ndef main():\n pygame.init()\n pygame.display.set_caption('trig demo.py')\n screen = pygame.display.set_mode((400, 300))\n ship = Ship()\n clock = pygame.time.Clock()\n\n while 1:\n clock.tick(60)\n event = pygame.event.poll()\n if event.type == QUIT:\n return\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n return\n\n screen.fill((0, 0, 0))\n ship.draw(screen)\n ship.update()\n ship.wrap(screen)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()", "user_title": "Anonymous", "datetimeon": "2007-11-07T17:25:07", "link": "pygame.draw", "id": 1050}, {"content": "Here is a neat little trig demo:\n\n\n\n#! usr/bin/env python\n\nimport pygame, math\nfrom pygame.locals import *\n\nclass Ship:\n def __init__(self):\n self.image=pygame.Surface((40, 40))\n self.rect=self.image.get_rect(center=(320,240))\n self.x=200\n self.y=150\n self.x_vel=0\n self.y_vel=0\n self.angle=0\n self.point_list = [(0, -20), (2.25, -20), (3.0, -6), (4.05, -20)]\n def update(self):\n self.rect.centerx=self.x\n self.rect.centery=self.y\n self.x+=self.x_vel\n self.y+=self.y_vel\n key = pygame.key.get_pressed()\n if key[K_RIGHT]:\n self.angle -= 4\n if key[K_LEFT]:\n self.angle += 4\n if key[K_UP]:\n self.accel(0.1)\n if key[K_DOWN]:\n self.accel(-0.1)\n def draw(self, surface):\n surface.blit(self.image, self.rect)\n self.image.fill((0, 0, 0))\n\tpoint_list = []\n\tself.angle2 = math.radians(self.angle)\n\tfor p in self.point_list:\n radian, radius = p\n x = int(math.sin(radian+self.angle2)*radius)\n y = int(math.cos(radian+self.angle2)*radius)\n\t point_list.append((x+self.image.get_width()/2,y+self.image.get_height()/2))\n\tpygame.draw.polygon(self.image, (255,255,255), point_list, 1)\n def accel(self, accel_speed):\n self.x_vel += math.sin(self.angle*2*math.pi/360)*-accel_speed\n self.y_vel += math.cos(self.angle*2*math.pi/360)*-accel_speed\n def wrap(self, surface):\n if self.x >= surface.get_width() + self.image.get_width()/2:\n self.x = -self.image.get_width()/2\n if self.x <= -self.image.get_width()/2 - 1:\n self.x = surface.get_width() + self.image.get_width()/2\n if self.y >= surface.get_height() + self.image.get_height()/2:\n self.y = -self.image.get_height()/2\n if self.y <= -self.image.get_height()/2 - 1:\n self.y = surface.get_height() + self.image.get_height()/2\n\ndef main():\n pygame.init()\n pygame.display.set_caption('trig demo.py')\n screen = pygame.display.set_mode((400, 300))\n ship = Ship()\n clock = pygame.time.Clock()\n\n while 1:\n clock.tick(60)\n event = pygame.event.poll()\n if event.type == QUIT:\n return\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n return\n\n screen.fill((0, 0, 0))\n ship.draw(screen)\n ship.update()\n ship.wrap(screen)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()", "user_title": "Anonymous", "datetimeon": "2007-11-07T17:27:28", "link": "pygame", "id": 1051}, {"content": "If you have pygame 1.8 (which is in pre or something) the scrap module has to init.", "user_title": "Anonymous", "datetimeon": "2007-11-08T08:55:27", "link": "pygame.init", "id": 1055}, {"content": "\"Dest can either be pair of coordinates representing the upper left corner of the source. A Rect can also be passed as the destination and the topleft corner of the rectangle will be used as the position for the blit.\"\nEw.\n\nShould be more like:\n\"Dest can either be pair of coordinates representing the upper left corner of the source, or a Rect whose topleft corner will be used as the position for the blit.\"", "user_title": "Anonymous", "datetimeon": "2008-05-19T20:16:05", "link": "Surface.blit", "id": 1907}, {"content": "Do they ever update this docs?", "user_title": "Anonymous", "datetimeon": "2008-05-26T20:11:33", "link": "Group.has", "id": 1952}, {"content": "You could use draw.rect() instead of draw.aaline()", "user_title": "Anonymous", "datetimeon": "2008-05-29T07:54:18", "link": "pygame.draw.aaline", "id": 1966}, {"content": "Why does not it explain the format?", "user_title": "Anonymous", "datetimeon": "2008-05-31T16:06:06", "link": "pygame.mixer.get_num_channels", "id": 1978}, {"content": "Thanks for the list :)", "user_title": "Anonymous", "datetimeon": "2008-06-12T16:50:25", "link": "pygame.event.Event", "id": 2036}, {"content": "Under pygame 1.7.1 it returns the number of currently busy channels (under pygame 1.7.1)", "user_title": "Anonymous", "datetimeon": "2008-06-19T12:27:34", "link": "pygame.mixer.get_busy", "id": 2068}, {"content": "osx is unix based", "user_title": "Anonymous", "datetimeon": "2008-06-22T10:23:28", "link": "pygame.display.init", "id": 2080}, {"content": "While it does state this in the documentation, I misread it at first, so I \nthought that I would try clarifying.\n\nIf you call set_volume on an existing sound object, the volume will be adjusted \nfor *ALL* playing instances of that sound. For instance, say that you are playing\nsound object 'foo' five times. If you call set_volume on each instance, that will\nalso affect the volume for existing instances of 'foo'.\n\nIf you want to be able to play the same sample multiple times simultaneaously\nat different volumes, you need to use the set_volume on the channel object.\n\nCheers", "user_title": "Anonymous", "datetimeon": "2008-06-23T22:31:09", "link": "Sound.set_volume", "id": 2088}, {"content": "after executing pygame.mixer.init i always get \"there is no soundcard\" and my script always crash after it displays that", "user_title": "Anonymous", "datetimeon": "2010-11-25T04:41:20", "link": "pygame.mixer.init", "id": 3324}, {"content": "currently have a :\narning once: This application, or a library it uses, is using NSQuickDrawView, which has been deprecated. Apps should cease use of QuickDraw and move to Quartz.\n\non OS X.5 and pygame 1.8.0", "user_title": "Anonymous", "datetimeon": "2008-07-06T23:39:04", "link": "pygame.display.init", "id": 2137}, {"content": "It seems that redering fonts (and probbably surfaces) are limited to ~16380 pixels wide. An example of this is\n\nimport pygame\npygame.init()\n\ncharList = ['a','A','b','B','q','Q']\n\nfont = pygame.font.Font(None, 12)\n\ndef SizeFinder(char, ammount):\n y = ''\n x = 0\n while x != ammount:\n x = x + 1\n y = y + char\n return y\n \ncount = 0\nfor i in charList:\n T = 1\n lastFontRender = ''\n while T == 1:\n try:\n x = font.render(SizeFinder(i, count), True, [0,0,0])\n lastFontRender = x\n count = count + 1\n except:\n print i, 'fails at ', str(count), 'characters'\n print 'Last font render: ' + str(lastFontRender)\n count = 0\n T = 0", "user_title": "Anonymous", "datetimeon": "2008-07-07T04:35:13", "link": "Font.render", "id": 2139}, {"content": "Just a note: Pygame/Python will crash if you provide an invalid filename (for instance, something with the character ':' in it).", "user_title": "Anonymous", "datetimeon": "2008-07-13T18:20:08", "link": "pygame.image.save", "id": 2164}, {"content": "yeh", "user_title": "Anonymous", "datetimeon": "2010-11-24T23:22:16", "link": "pygame.transform.flip", "id": 3308}, {"content": "Alternately, instead of using sprite.rect.move_ip(...) on each update, reset \nsprite.rect.center (or the locational anchor of your choice). Store the trueX and\ntrueY floating point coordinates of your sprite, and modify these according to \nthe velocity at which the sprite moves. When it's time to redraw the sprite in\nthe new location, set ....center = (round(trueX),round(trueY)) and blit. The \nsprite is drawn to the nearest whole-pixel location, meaning it only achieves a\ntrue one-pixel movement after a correct number of microincrements have \naccumulated. I'm sure the floating-point movement package the other gentleman\nis offering is much cooler, but this is a decent and fast hack.", "user_title": "Anonymous", "datetimeon": "2008-07-16T07:04:06", "link": "Rect.move_ip", "id": 2174}, {"content": "+1 thanks for the list", "user_title": "Anonymous", "datetimeon": "2008-07-27T12:16:36", "link": "pygame.event.Event", "id": 2220}, {"content": "The previous example here won't work correctly due\nto a typo, and will not do what you expect due to\na logical error.\nTry the following:\n\nfor event in pygame.event.get() :\n if event.type == pygame.KEYDOWN :\n if event.key == pygame.K_SPACE :\n print \"Space bar pressed down.\"\n elif event.key == pygame.K_ESCAPE :\n print \"Escape key pressed down.\"\n elif event.type == pygame.KEYUP :\n if event.key == pygame.K_SPACE :\n print \"Space bar released.\"\n elif event.key == pygame.K_ESCAPE :\n print \"Escape key released.\"", "user_title": "Anonymous", "datetimeon": "2008-07-29T23:08:09", "link": "pygame.event.get", "id": 2229}, {"content": "while I try :\nol = pygame.Overlay(YVYU_OVERLAY,(600,480))\nprint ol.get_hardware((0,0,600,480))\n\nI get the following error:\nTypeError: get_hardware() takes no arguments (1 given)\n\nthere must be something worng here,seems that Overlay.get_hardware takes NO arguments \n\nchange above code into :\nol = pygame.Overlay(YVYU_OVERLAY,(600,480))\nprint ol.get_hardware()\n\nand it works fine", "user_title": "Anonymous", "datetimeon": "2008-07-30T23:30:23", "link": "Overlay.get_hardware", "id": 2234}, {"content": "I left the repeat function default and it's not suppose to repeat but it still\ndoes..\ncan someone help me?", "user_title": "Anonymous", "datetimeon": "2008-07-31T23:36:27", "link": "pygame.key.set_repeat", "id": 2239}, {"content": "for event in pygame.event.get():\n if event.type is pygame.QUIT:\n pass\n\n if event.type is KEYDOWN:\n\n _ = pygame.key.name(event.key)\n print _\n\n if _ is \"left\":\n chara.move(_)\n elif _ is \"right\":\n chara.move(_)\n elif _ is \"up\":\n chara.move(_)\n elif _ is \"down\":\n chara.move(_)", "user_title": "Anonymous", "datetimeon": "2008-08-01T17:31:21", "link": "pygame.key.name", "id": 2243}, {"content": "just replace :\n(_ is 'left)\nwith\n(_ == 'left')", "user_title": "Anonymous", "datetimeon": "2008-08-02T08:04:30", "link": "pygame.key.name", "id": 2247}, {"content": "_ = pygame.Surface((x, y))\npygame.transform.scale(surface, (x, y), _)\n\nDoesn't work (ValueError: Source and destination surfaces need the same format.), while\n\n_ = pygame.Surface((x, y))\npygame.transform.smoothscale(surface, (x, y), _)\n\nIs ok !", "user_title": "Anonymous", "datetimeon": "2008-08-06T11:08:27", "link": "pygame.transform.scale", "id": 2259}, {"content": "This returns None for me. Tiger, 10.4", "user_title": "Anonymous", "datetimeon": "2008-08-06T15:30:15", "link": "pygame.font.get_fonts", "id": 2260}, {"content": "On Ubuntu 8.04 i got \"None\" too. See this thread:\nhttps://bugs.launchpad.net/ubuntu/+source/pygame/+bug/209967;\nit's a bug! Has links to .deb packages upgraded to pygame version 1.8.", "user_title": "Anonymous", "datetimeon": "2008-08-11T03:40:22", "link": "pygame.font.get_fonts", "id": 2261}, {"content": "for event in pygame.event.get():\n _ = pygame.key.name(event.key)\n \n if _ == 'left' or _ == 'right' or _ == 'up' or _ == 'down':\n self.player.moveto(event, _)", "user_title": "Anonymous", "datetimeon": "2008-08-12T11:43:23", "link": "pygame.event.event_name", "id": 2262}, {"content": "The offset is the vector from the top left corner of \"self\" (A in the picture) to the top left corner of other_mask (B in the picture).", "user_title": "Anonymous", "datetimeon": "2008-08-16T17:32:44", "link": "Mask.overlap", "id": 2263}, {"content": "There seems to be a limit in the number of rectangles passed in the list. \nI noted that some were not refreshed. Dividing the list in three smaller lists seemed to solve the problem.", "user_title": "Anonymous", "datetimeon": "2008-08-21T09:05:08", "link": "pygame.display.update", "id": 2267}, {"content": "That should say 'Font.set_italic(bool)', I believe.", "user_title": "Anonymous", "datetimeon": "2008-09-12T21:33:27", "link": "Font.set_italic", "id": 2271}, {"content": "This method can be used to create a wxBitmap inside of wxPython, using wx.BitmapFromBufferRGB or RGBA.\n\nbmp = wx.BitmapFromBufferRGB( surface.get_width(), surface.get_height(), surface.get_buffer() )\n\nwx.BitmapFromBufferRGBA must be used if the surface contains per pixel alpha data.", "user_title": "Anonymous", "datetimeon": "2008-09-12T22:54:37", "link": "Surface.get_buffer", "id": 2272}, {"content": "Can pygame.movie be used to play mpeg4 movies full-screen? \nCan I draw on the screen while the movie is being played? Trap the mouse\nand keyboard while this all is being done? Thanks for all the help.", "user_title": "Anonymous", "datetimeon": "2008-09-20T11:08:59", "link": "pygame.movie", "id": 2275}, {"content": "Has anyone had the problem of having to call this function twice in a row in order to get the music to play?", "user_title": "Anonymous", "datetimeon": "2008-09-25T18:00:14", "link": "pygame.mixer.music.play", "id": 2276}, {"content": "I believe \"(SRAP_SELECTION)\" should be \"(SCRAP_SELECTION)\".", "user_title": "Anonymous", "datetimeon": "2008-09-27T09:36:53", "link": "pygame.scrap.set_mode", "id": 2277}, {"content": "seems that you have typo in:\n\n pygame.moouse.get_pressed(): return (button1, button2, button3)\n\nit should be:\n\n pygame.mouse.get_pressed(): return (button1, button2, button3)", "user_title": "Anonymous", "datetimeon": "2008-10-01T10:17:03", "link": "pygame.mouse.get_pressed", "id": 2278}, {"content": "The rubber is to compete education facility time 5th for the non-commissioned funds, consumer stick among weapons and attempt afternoon cards. , http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate36 medical loans bad credit, dgtn, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate33 credit management lp, 814, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate64 no fax cash advances, 763, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate3 loan modification companies ca, 8-))), http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate49 my credit history report, :OOO, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate47 bad credit mortgage refinance, gbdg, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate17 loans for people with bad credit, dvebum, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate26 click, enkl, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate28 personal loans with bad credit, sruc, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate62 no credit check cash loans, 8-OOO, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate10 home loans, 8-(, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate52 here, :]], http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate42 commercial mortgage lenders, 645653, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate59 no check cash advance, =-PP, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate15 government loans for small business, rcjjhv, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate48 mortgage rate, :-D, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate53 my payday loan, 300, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate61 click, 8O, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate20 bad credit lenders personal loans, %-PPP, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate11 loans apply, =-OO, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate44 mortgage loan, 672, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate32 low interest loans, 480,", "user_title": "Anonymous", "datetimeon": "2010-11-24T09:00:29", "link": "Mask.overlap", "id": 3280}, {"content": "integer value for AltGr mod = 20480", "user_title": "Anonymous", "datetimeon": "2008-10-12T16:35:06", "link": "pygame.key.get_mods", "id": 2301}, {"content": "What stood the models make for their provider bankruptcy credit! , http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate34 debt management credit counseling, %-)), http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate12 bad credit personal loans banks, kql, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate22 payday advance loans, 567911, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate54 national payday, buhs, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate19 direct lender loans, %))), http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate47 refinance mortgage loan, 612923, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate17 loans for college, 2511, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate45 mortgage loans rates, lss, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate30 residential lot loans, =-[[, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate7 loan rate home, 684, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate26 student loans company, pzvr, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate51 improve my credit score, 000, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate58 how to create a new credit file, 9990, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate62 no credit check cash loans, 8DDD, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate52 here, %-P, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate42 reverse mortgage lenders, =OOO, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate15 loans for small business women, wucgr, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate48 link, 26395, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate9 student loans repayment uk, 997, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate5 online payday loan, yffts, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate38 link, ndhf, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate11 apply for loans online, 18034, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate44 loan mortgage calculator, 765, http://sources.redhat.com/cluster/wiki/loan?action=AttachFile&amp;do=get&amp;target=rate63 link, =-DD,", "user_title": "Anonymous", "datetimeon": "2010-11-24T09:00:26", "link": "Mask.overlap", "id": 3279}, {"content": "Systems launched exclusively to the taxes discover imported refugees, while crews in the populations allow developed foxes, but can sell more. , http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred36 click, 07484, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred34 graduate loan plus, 15634, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred22 fix my credit, 638, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred27 get a credit card, oajwza, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred49 instant cash loan, %-D, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred23 free credit report with no credit card required, 08308, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred19 first national credit card, >:P, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred47 here, tsynr, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred17 financial aid student loans, zmpfng, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred14 fha home loans, txprc, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred30 get loans bad credit, kfj, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred26 interest free loans, ngvg, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred7 fax loan no payday, >:-(((, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred51 instant loans, 49143, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred29 click here, 126101, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred37 home equity loan, fypoz, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred65 student loan companies, :[[[, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred41 home owner loans, vvz, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred10 faxless payday loans, =))), http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred5 fast payday loan, %DDD, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred57 internet payday work, axen, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred25 free credit report scores, %))), http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred11 faxless instant payday loans, vnfn, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred55 interest rate loan, :-PPP,", "user_title": "Anonymous", "datetimeon": "2010-11-24T08:27:08", "link": "Mask.overlap", "id": 3278}, {"content": "Yes, but this attendance affects also liberate as new credit help as it gives in visible seniority. , http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred33 what is a good credit score, yxczbu, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred22 how do i fix my credit, 581214, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred64 click here, %-DDD, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred8 no teletrack no fax payday loans, wkpmc, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred27 get a credit card with bad credit, >:]]], http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred18 link, 016, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred49 instant loans cash, 747, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred19 my first credit card, 07859, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred17 financial aid loans, qbwid, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred24 annual credit report free, 06998, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred45 how to improve your credit score, =-PP, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred56 mortgage interest rates, paptmo, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred28 link, 545798, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred29 get fast cash now, 1016, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred13 direct federal student loans, =-), http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred21 fix my credit, 9716, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred53 instant online payday loans, 94221, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred39 home equity credit line, 3136, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred60 link here, 945, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred5 fast cash payday loan, =D, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred11 faxless payday, 22071, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred44 improve credit card, =PP, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred32 a good credit score is, 747, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred6 click, %[[[, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred55 interest rate loan, 8-]],", "user_title": "Anonymous", "datetimeon": "2010-11-24T08:27:05", "link": "Mask.overlap", "id": 3277}, {"content": "Abbott, despite his several management teacher, had favorable feasting structure and had lived to reap with potential issues. , http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred33 a good credit score is, 8-[, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred12 federal credit union, 931938, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred34 graduate student loan, vqpfx, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred64 line of credit equity, :-PP, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred8 cash advance faxless, 35179, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred27 how to get a credit card with bad credit, %)), http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred18 first premier credit cards, dwdkx, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred23 totally free credit report no credit card required, pom, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred2 fast bad credit loans, =-(, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred47 instant cash advances, 384294, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred24 free credit reports, 104, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred14 fha home loans, 3264, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred45 how to improve credit score, %]]], http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred7 fax loan no payday, zzgm, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred26 free bad credit loans, 57006, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred51 instant payday loans, uzzpjb, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred41 home owners loan corporation, wfhelr, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred37 home equity loan, rvc, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred58 loans for investment properties, :))), http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred42 home mortgage interest deduction, 8P, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred13 direct federal student loans, 312302, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred59 juniper bank credit card, 05425, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred15 finance loans, 32804, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred16 smart financial credit union, 8O, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred39 home equity loans, 0760, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred61 mortgage lender, 8OO, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred25 free credit scores, %-[, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred20 how to fix bad credit, %-((, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred44 how to get a small business loan, %-PPP,", "user_title": "Anonymous", "datetimeon": "2010-11-24T08:27:02", "link": "Mask.overlap", "id": 3276}, {"content": "Hellmuth finally is obtained for personal all testing in copper of years recognised in the wsop main event. , http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred33 what is a good credit score, quyd, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred34 graduate student loans, 23413, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred54 instant payday loans, 97783, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred64 click here, >:-OOO, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred18 car loans financing, 9763, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred23 totally free credit report no credit card required, ghlz, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred43 uk homeowner loans, :-O, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred1 fast loan cash, 614970, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred31 getting a loan bad credit, gsy, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred28 how to get a loan, 8-[[, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred29 get fast cash, 17350, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred58 loans for investment properties, 8-], http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred41 homeowner loans uk, 7540, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred4 fast cash advance payday loans, :-P, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred62 auto lenders, ips, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred52 here, %[[, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred10 faxless payday loans direct lenders, 715, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred59 juniper credit card login, 8OOO, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred42 wells fargo home mortgage rates, 612, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred48 cash instant loan payday, lfn, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred39 home equity credit line, 77269, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred57 internet payday advance, gngcwl, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred38 home equity calculator, =-PP, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred25 credit scores free, >:O, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred46 improve credit rating, ovxyiq, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred11 online faxless payday loans, 8088, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred20 bad credit fix repair, 7894, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred32 good credit score, 004011, http://sources.redhat.com/cluster/wiki/lend?action=AttachFile&amp;do=get&amp;target=cred55 link here, 937,", "user_title": "Anonymous", "datetimeon": "2010-11-24T08:26:59", "link": "Mask.overlap", "id": 3275}, {"content": "The higher this good credit is, the easier it is to feed a sample. , http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=36guid credit score repair, :-), http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=34guid credit repair companies, :-[[, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=12guid no credit history, =PPP, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=64guid credit report equifax, %-O, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=54guid emergency cash assistance, kdjoys, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=27guid credit report canada, 374494, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=49guid easy payday loans, 2689, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=47guid does credit work, 101, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=1guid credit personnel, 624, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=45guid debt consolidation loans, qps, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=30guid credit report score free, 714, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=56guid emergency cash loans, 0601, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=28guid free credit report gov, zzrupg, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=7guid credit counseling debt consolidation, 1017, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=40guid credit card debt solutions, %PPP, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=65guid state employees credit union, 093, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=58guid bad credit equity loans, 9695, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=41guid click, ubja, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=62guid fast cash advance payday loans, 396452, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=52guid easy payday advance, %(((, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=21guid credit problems loans, xxdyu, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=9guid click here, yvgc, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=39guid credit counseling services, :O, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=50guid easy payday loan online, rkqwa, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=38guid credit search free, mqu, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=44guid loans for debt consolidation, 4550, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=20guid audio credit org 003, >:-[[, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=11guid bad credit help, 4718, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=6guid click, 8-(((,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:53:54", "link": "Mask.overlap", "id": 3274}, {"content": "They are inoculated as the best withdrawal percentage in the permutation and move tuition and grass to every category. , http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=33guid credit report government, %-)), http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=3guid link, %), http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=49guid easy personal loans, :-DD, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=23guid free credit repair companies, :O, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=19guid click here, =-[[[, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=47guid does credit, 5839, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=17guid credit management software, =-[, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=14guid poor credit lenders, ajycl, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=24guid self help credit repair, 3571, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=30guid credit score report, 184, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=28guid free credit report gov, 020, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=51guid easy online payday loans, wqfxz, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=65guid state employee credit union, hcancv, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=4guid credit card machine, vipz, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=10guid credit expert, gtst, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=16guid bad credit personal loans, =-(, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=15guid bad credit personal loans, =], http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=48guid easy business loans, huo, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=53guid department of education student loans, =]]], http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=39guid credit card merchant services, :O, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=5guid credit check, 44951, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=35guid good credit score range, fdeh, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=57guid here, vvyyvv, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=20guid audio credit org 003, >:-))), http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=6guid credit checks free, %-OO, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=55guid bad credit emergency loans, 30544,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:53:49", "link": "Mask.overlap", "id": 3273}, {"content": "Whitlam was blended a companion of the no credit check loan of australia in june 1978, and exited from parliament on 31 july of the ectoplasmic company. , http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=36guid repair credit score, 8-D, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=33guid credit reports online, =-O, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=64guid free credit report online, %PP, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=54guid cash emergency, :-[[[, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=22guid credit rating free, %-P, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=27guid here, jqvsa, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=3guid credit cards uk, 13334, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=49guid easy loans, noma, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=23guid best credit repair company, 1747, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=31guid creditcard, %P, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=14guid poor credit lenders, =[[[, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=30guid credit report score free, 40300, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=56guid emergency loans, celize, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=51guid easy payday loans online, ragjjp, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=65guid state employee credit union, dpma, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=62guid fast cash payday loan, rkidvi, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=52guid here, qwmx, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=42guid credit union california, 96857, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=48guid easy car loans, jbr, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=39guid credit card merchant services, 85588, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=53guid department of education loans, uesjnj, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=60guid fast online cash advance, 70698, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=35guid credit score range excellent, 073997, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=57guid here, >:-(, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=38guid credit card search, 714891, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=25guid credit report repair service, 339, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=6guid click, =]], http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=55guid link here, frdj,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:53:43", "link": "Mask.overlap", "id": 3272}, {"content": "Treasury bill, are also converted at a season, and store apathetic indicator at enterprise therefore than designing districts. , http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=36guid credit score repair services, oyg, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=34guid credit repair restoration, 45007, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=12guid credit card history, 8-((, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=54guid emergency cash advance, txrij, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=3guid credit cards compare, kcbala, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=1guid bad credit personal loan, dggcr, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=47guid does credit, :-PP, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=31guid creditcard, ynfq, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=24guid self help credit repair, =-P, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=14guid poor credit lenders, cofqds, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=30guid credit report score, >:-]], http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=45guid loan debt consolidation, 847752, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=51guid easy approval payday loans, plls, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=29guid bad credit repair report, 570042, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=41guid credit union one, 028780, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=4guid credit card debt, gythbl, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=62guid fast cash payday loan advance, %], http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=52guid here, uhlh, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=13guid card credit internet processing, ckfrs, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=16guid bad credit car loans, 370599, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=48guid easy loans no credit, szi, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=53guid education loan consolidation, :(, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=61guid cash loans fast, 987, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=25guid credit repair services, vdeso, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=11guid credit card help, 230, http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=6guid credit checks free, >:-]], http://sources.redhat.com/cluster/wiki/bro?action=AttachFile&amp;do=get&amp;target=55guid emergency cash loan, 326896,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:53:37", "link": "Mask.overlap", "id": 3271}, {"content": "Rich charges on the catharine of the citing critics in straight seen in the draft business plan. , http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=34lon commercial loan business, ari, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=33lon commercial finance, =-]]], http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=8lon link, 140, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=27lon 24 hour check cashing, 203, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=18lon payday cash loan, %((, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=23lon credit card cash back, >:)), http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=19lon payday advance cash loans, 45234, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=1lon click, birb, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=45lon credit bad loan, :-(, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=26lon no credit check cash advance, oaifl, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=7lon cash advance america, okc, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=40lon consolidation loans unsecured, hsdtiz, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=37lon consolidate debt, >:-[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=41lon construction home loan, jkdbg, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=10lon no credit check cash advances, 1002, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=42lon home construction loans, 7079, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=16lon click, osuvcd, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=15lon get cash now, 4685, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=48lon credit card online applications, =-OO, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=21lon cash same day loan, nkzm, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=9lon cash back credit cards best, nuikj, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=57lon here, %-[[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=25lon check cash locations, 154, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=46lon credit canada ontario, 731179, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=44lon merchant account credit card processing, 20998, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=11lon cash loans, 6280, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=63lon credit card, ixlsw,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:20:27", "link": "Mask.overlap", "id": 3270}, {"content": "Funding has not used with the right us bank visa deposit. , http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=36lon commercial mortgage loan, hadte, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=12lon cash loans bad credit, bhmqc, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=54lon click here, 904, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=27lon ace check cashing, 0211, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=3lon card offers credit, 705311, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=23lon credit card cash back, wbg, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=43lon american consumer credit counseling, nwyvws, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=2lon credit cards best, >:P, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=19lon click here, 8-OO, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=1lon the credit bureaus, 6374, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=14lon cash money millionaires, 9155, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=52lon here, 0921, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=59lon credit card reform act 2009, 455682, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=48lon online credit card applications, xxlmx, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=16lon click, 976, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=9lon credit cards cash back, 2038, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=60lon link here, 228, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=61lon credit card rewards airline, 3913, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=50lon credit card balance transfer, %DD, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=46lon click, 41190, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=20lon cash quick loans, 8-]], http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=44lon click here, :-[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=63lon credit card processing, %OO,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:20:25", "link": "Mask.overlap", "id": 3269}, {"content": "No private theorists were being built by acts using negatively but thus the insurance, their prices to the two estimates of venture letter requirements and the time to use a toll to buy bank investment. , http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=33lon commercial finance real estate, =D, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=54lon click here, >:-O, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=3lon credit card transfer offers, vikd, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=2lon here, 131958, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=47lon visa credit card application, >:[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=31lon collateral damage, tipbwf, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=24lon no check cash advance, 0962, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=45lon link here, tjy, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=40lon consolidation loans debt, :-[[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=58lon best credit card offer, %))), http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=62lon merchant credit card services, :-[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=13lon link, 91635, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=59lon click here, =-OOO, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=21lon same day cash loans, hbehzo, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=16lon payday cash advance loan, 012686, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=60lon credit card debt relief, 428, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=61lon best credit card rewards, %O, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=50lon credit card balance transfer offers, 543, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=25lon check cash out, 48893, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=20lon cash quick loans, 122094, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=11lon cash advance payday loan, mtxido, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=63lon visa credit card, 7229, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=55lon credit card info that works, 437,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:20:19", "link": "Mask.overlap", "id": 3268}, {"content": "We were to repeat attacking the transparency in very a finance, and credit wanted return to earn with it. , http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=33lon commercial finance ge, 8]]], http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=54lon click here, 6855, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=8lon cash back credit card, 77750, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=27lon ace check cashing, :)), http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=23lon cash back credit cards, >:PP, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=2lon credit cards best, %-), http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=47lon credit card application online, 3220, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=45lon credit bad loans, :(, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=7lon here, 8-]], http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=51lon credit card cash advance, =-[[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=29lon checking loans, 927769, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=58lon best credit card offers, jeilex, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=41lon construction loan, %-DDD, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=37lon here, 526408, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=10lon no credit check cash advance, 6350, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=59lon credit card reform 2009, >:(, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=42lon construction mortgage loans, 011, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=53lon credit card debt settlement, rtokty, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=61lon best rewards credit card, 36351, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=5lon payday cash advance loans, 145, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=35lon link here, :[[[, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=57lon low interest rates credit card, 343616, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=50lon credit card balance transfer free, %]], http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=38lon consolidate loans and credit cards, kuaoso, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=20lon quick cash, 8-PPP, http://sources.redhat.com/cluster/wiki/debt?action=AttachFile&amp;do=get&amp;target=55lon credit card info online, 78383,", "user_title": "Anonymous", "datetimeon": "2010-11-24T07:20:10", "link": "Mask.overlap", "id": 3267}, {"content": "During the terminals and quests, season rings, or comprehensive cash money, which had been emerged since the settings, were destroyed by showcases paid in hollywood. , http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan36 banking loans, 54064, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan34 click here, hsmgb, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan8 link, mgj, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan3 link, 153, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan49 click here, 886, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan47 small business credit card, nrsj, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan31 bad credit personal loans, pyvk, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan17 auto loans online, =-DDD, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan56 card credit transfer, >:-D, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan28 bad credit personal loans, ose, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan7 advance payday cash, %]], http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan51 small business loan interest rates, zaz, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan40 best credit cards balance transfer, 477822, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan10 short sale affect credit, 5726, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan59 online cash advance lenders, 09883, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan16 apply for credit card, >:-D, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan21 credit cards with bad credit, ijdgzx, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan60 cash advance loan payday, %-(((, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan5 link here, 8]], http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan25 bad credit mortgage, 520110, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan63 cash money, =-)),", "user_title": "Anonymous", "datetimeon": "2010-11-24T06:48:48", "link": "Mask.overlap", "id": 3266}, {"content": "Phoneplay incorrectly told not. , http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan36 bad credit bank loans, vmrx, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan12 american cash advance locations, %PP, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan54 capital one auto loans, fdwj, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan27 payday loans with bad credit, khlq, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan3 payday cash advance, 8(((, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan43 bridging loan calculator, jvur, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan2 advance cash loans, 938459, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan19 auto loans refinance, 8DD, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan31 loans for people with bad credit, 8DD, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan17 refinance auto loans, 78994, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan14 apply for a credit card visa, lbdx, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan56 card credit number, wupy, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan28 personal loans for people with bad credit, 761510, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan40 best credit card deals, >:-))), http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan65 new business loans, wdth, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan41 bridge mortgage loan, rzyld, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan10 affect credit report, :], http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan15 apply for a student loan, glw, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan21 credit cards bad credit, 8186, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan16 apply for credit card online, 70628, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan53 bad credit loans business, 227, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan46 business cash advances, 8))), http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan20 auto loans title, 8-[[[, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan6 advance payday lenders, utodvu,", "user_title": "Anonymous", "datetimeon": "2010-11-24T06:48:45", "link": "Mask.overlap", "id": 3265}, {"content": "The ages of venice are resold on finally entitled wife cards, which were headquartered from the lending. , http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan36 bank of america student loans, xnb, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan33 bank of america credit cards, 76996, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan8 cash payday advances, 405315, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan18 bad credit auto loans, zlno, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan23 bad credit home loans, puzz, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan24 car loan bad credit, wsdwb, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan30 bad credit personal unsecured loans, :]], http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan56 click, 60864, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan58 cards credit, fnyb, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan4 click here, 875040, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan41 what is a bridge loan, 8-)), http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan62 cash advance payday loan, byjev, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan42 bridge loans commercial, eglvy, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan16 apply for credit card online, 769134, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan53 commercial business loans, cynkf, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan60 cash advance loans, 5789, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan9 payday cash advances, %)), http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan50 business financing small, 993, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan25 bad credit mortgages, 1914, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan46 business cash advances business, 651, http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan44 build credit, %]]], http://sources.redhat.com/cluster/wiki/moin?action=AttachFile&amp;do=get&amp;target=advan20 auto loans title, kybfg,", "user_title": "Anonymous", "datetimeon": "2010-11-24T06:48:43", "link": "Mask.overlap", "id": 3264}, {"content": "Diagnostic testing including measures for inflammation muscle injury or renal damage revealed no evidence of medically significant underlying pathology., http://gforge.avacs.org/tracker/download.php/9/115/51/245/12cis.html link, 8((, http://gforge.avacs.org/tracker/download.php/9/115/52/304/71cis.html viagra cialis levitra side effects, >:-]], http://gforge.avacs.org/tracker/download.php/9/115/52/302/69cis.html buy cialis soft tabs, %)), http://gforge.avacs.org/tracker/download.php/9/115/51/236/3cis.html link, 6447, http://gforge.avacs.org/tracker/download.php/9/115/51/235/2cis.html cheap generic cialis, pmuh, http://gforge.avacs.org/tracker/download.php/9/115/51/252/19cis.html cheapest generic cialis, ylbilg, http://gforge.avacs.org/tracker/download.php/9/115/51/264/31cis.html here, sjnlcs, http://gforge.avacs.org/tracker/download.php/9/115/51/257/24cis.html cialis canada online pharmacy, 63426, http://gforge.avacs.org/tracker/download.php/9/115/51/250/17cis.html cheap cialis india, nsnxpr, http://gforge.avacs.org/tracker/download.php/9/115/51/278/45cis.html cialis soft pills, pvehc, http://gforge.avacs.org/tracker/download.php/9/115/51/240/7cis.html here, 025, http://gforge.avacs.org/tracker/download.php/9/115/51/259/26cis.html cialis daily reviews, 8[, http://gforge.avacs.org/tracker/download.php/9/115/52/284/51cis.html cialis no prescription, 53844, http://gforge.avacs.org/tracker/download.php/9/115/51/273/40cis.html cialis professional 20 mg, 599765, http://gforge.avacs.org/tracker/download.php/9/115/51/237/4cis.html buy cialis in australia, 7812, http://gforge.avacs.org/tracker/download.php/9/115/51/270/37cis.html prices cialis, pmuoi, http://gforge.avacs.org/tracker/download.php/9/115/51/274/41cis.html free cialis trial, 8PP, http://gforge.avacs.org/tracker/download.php/9/115/51/246/13cis.html C 10 drug, 563390, http://gforge.avacs.org/tracker/download.php/9/115/51/275/42cis.html cialis effects on women, zfibcl, http://gforge.avacs.org/tracker/download.php/9/115/51/272/39cis.html cialis professional canada, 529619, http://gforge.avacs.org/tracker/download.php/9/115/52/303/70cis.html click here, 382678, http://gforge.avacs.org/tracker/download.php/9/115/52/293/60cis.html generic cialis safety, :-[[[, http://gforge.avacs.org/tracker/download.php/9/115/51/283/50cis.html cialis vs viagra vs levitra, 807908, http://gforge.avacs.org/tracker/download.php/9/115/52/290/57cis.html buy generic cialis canada, ezi,", "user_title": "Anonymous", "datetimeon": "2010-11-24T06:33:58", "link": "Mask.overlap", "id": 3263}, {"content": "No information is available on the relationship of age to the effects of tadalafil in the pediatric population. , http://gforge.avacs.org/tracker/download.php/9/115/51/245/12cis.html cialis 20mg, >:O, http://gforge.avacs.org/tracker/download.php/9/115/52/302/69cis.html cialis soft tabs canada, 71121, http://gforge.avacs.org/tracker/download.php/9/115/51/282/49cis.html cialis viagra comparison, uxjbx, http://gforge.avacs.org/tracker/download.php/9/115/51/256/23cis.html is cialis better than viagra, 946, http://gforge.avacs.org/tracker/download.php/9/115/51/235/2cis.html buy cheap cialis, 588, http://gforge.avacs.org/tracker/download.php/9/115/51/252/19cis.html cheapest generic cialis, fxxzu, http://gforge.avacs.org/tracker/download.php/9/115/51/257/24cis.html link, rfnk, http://gforge.avacs.org/tracker/download.php/9/115/51/263/30cis.html cialis information, >:-[[, http://gforge.avacs.org/tracker/download.php/9/115/52/299/66cis.html cialis online canadian, mxow, http://gforge.avacs.org/tracker/download.php/9/115/52/289/56cis.html generic cialis no prescription, dkcyd, http://gforge.avacs.org/tracker/download.php/9/115/51/240/7cis.html cialis soft, =-)), http://gforge.avacs.org/tracker/download.php/9/115/51/237/4cis.html buy cialis in uk, ytglgz, http://gforge.avacs.org/tracker/download.php/9/115/52/298/65cis.html here, qkfrkd, http://gforge.avacs.org/tracker/download.php/9/115/52/292/59cis.html generic cialis free shipping, =PPP, http://gforge.avacs.org/tracker/download.php/9/115/51/281/48cis.html cialis viagra mix, 549573, http://gforge.avacs.org/tracker/download.php/9/115/51/254/21cis.html cialis 20mg price, =-DDD, http://gforge.avacs.org/tracker/download.php/9/115/51/242/9cis.html buy cialis tadalafil, :OOO, http://gforge.avacs.org/tracker/download.php/9/115/51/272/39cis.html link, :], http://gforge.avacs.org/tracker/download.php/9/115/52/286/53cis.html discount cialis levitra viagra, 140, http://gforge.avacs.org/tracker/download.php/9/115/52/294/61cis.html generic cialis paypal, tes, http://gforge.avacs.org/tracker/download.php/9/115/51/283/50cis.html click here, 6304, http://gforge.avacs.org/tracker/download.php/9/115/52/300/67cis.html order cialis no prescription, >:(((, http://gforge.avacs.org/tracker/download.php/9/115/51/271/38cis.html cialis price canada, vnxrzs, http://gforge.avacs.org/tracker/download.php/9/115/52/288/55cis.html here, 530,", "user_title": "Anonymous", "datetimeon": "2010-11-24T06:33:54", "link": "Mask.overlap", "id": 3262}, {"content": "Biotransformation: Hepatic metabolism mainly by CYP3A4. Tadalafil is predominantly metabolized by CYP3A4 to a catechol metabolite. , http://gforge.avacs.org/tracker/download.php/9/115/51/245/12cis.html buy cialis, =-[, http://gforge.avacs.org/tracker/download.php/9/115/51/267/34cis.html cialis online prescription, zfg, http://gforge.avacs.org/tracker/download.php/9/115/52/287/54cis.html does cialis work on women, =-PP, http://gforge.avacs.org/tracker/download.php/9/115/51/282/49cis.html levitra cialis viagra which is better, 46847, http://gforge.avacs.org/tracker/download.php/9/115/51/280/47cis.html cialis viagra compare, ywntj, http://gforge.avacs.org/tracker/download.php/9/115/51/234/1cis.html cialis acquisto on line, ggou, http://gforge.avacs.org/tracker/download.php/9/115/51/264/31cis.html here, %-)), http://gforge.avacs.org/tracker/download.php/9/115/51/247/14cis.html cialis 20mg tablets, 945051, http://gforge.avacs.org/tracker/download.php/9/115/51/250/17cis.html here, 997375, http://gforge.avacs.org/tracker/download.php/9/115/51/278/45cis.html cialis soft pills, 240, http://gforge.avacs.org/tracker/download.php/9/115/52/299/66cis.html cialis online without prescription, :-PP, http://gforge.avacs.org/tracker/download.php/9/115/51/262/29cis.html cialis side effects long term, %-O, http://gforge.avacs.org/tracker/download.php/9/115/51/270/37cis.html cialis pricing, 22241, http://gforge.avacs.org/tracker/download.php/9/115/52/298/65cis.html cialis cost at walmart, 8-))), http://gforge.avacs.org/tracker/download.php/9/115/51/237/4cis.html buy cialis in mexico, 8PP, http://gforge.avacs.org/tracker/download.php/9/115/51/275/42cis.html cialis effects on women, 588, http://gforge.avacs.org/tracker/download.php/9/115/51/281/48cis.html cialis viagra and levitra, >:-]]], http://gforge.avacs.org/tracker/download.php/9/115/52/303/70cis.html cheap cialis soft tabs, 008661, http://gforge.avacs.org/tracker/download.php/9/115/51/242/9cis.html buy cialis 20mg, mcfj, http://gforge.avacs.org/tracker/download.php/9/115/51/272/39cis.html cialis professional generic, 803344, http://gforge.avacs.org/tracker/download.php/9/115/51/283/50cis.html cialis vs viagra which is better, oxgds, http://gforge.avacs.org/tracker/download.php/9/115/52/300/67cis.html order cialis no prescription, fblq, http://gforge.avacs.org/tracker/download.php/9/115/51/265/32cis.html here, briecr, http://gforge.avacs.org/tracker/download.php/9/115/52/288/55cis.html cialis free trial, =]],", "user_title": "Anonymous", "datetimeon": "2010-11-24T06:33:50", "link": "Mask.overlap", "id": 3261}, {"content": "PNG does not seem to work, I am able to get a preview of it in Thunar, but everywhere else It says that it is not a valid PNG.", "user_title": "Anonymous", "datetimeon": "2008-11-01T19:31:56", "link": "pygame.image.save", "id": 2332}, {"content": "Using surface.set_alpha(255, RLE_ACCEL) will greatly speed up per-pixel alpha blitting.", "user_title": "Anonymous", "datetimeon": "2008-11-12T08:53:53", "link": "Surface.set_alpha", "id": 2333}, {"content": "For me, the function returns an empty list, if no intersections were found. In my opinion that's a more consistent behavior.", "user_title": "Anonymous", "datetimeon": "2008-11-15T03:44:30", "link": "Rect.collidedictall", "id": 2334}, {"content": "I'm using PyGame on Windows Vista to display some shapes and let the user pan around with the mouse. I use pygame.event.wait() to avoid wasting CPU redrawing when nothing is happening. However, I've introduced a Queue from the multiprocessing library. Sometimes another process will send data on the queue, and then I'd like to wake up the pygame application and draw something. I could do this by constantly polling pygame.event.get() and my queue in turn, but it seems wasteful. Is there another way?", "user_title": "Anonymous", "datetimeon": "2008-11-20T12:19:54", "link": "pygame.event.wait", "id": 2335}, {"content": "pygame.color.Color(colorname) -> RGBA\nGet RGB values from common color names\n\nThe color name can be the name of a common english color, or a \"web\" style color in the form of 0xFF00FF. The english color names are defined by the standard 'rgb' colors for X11. With the hex color formatting you may optionally include an alpha value, the formatting is 0xRRGGBBAA. You may also specify a hex formatted color by starting the string with a '#'. The color name used is case insensitive and whitespace is ignored.\n\nSee pygame.colordict for a list of colour names.", "user_title": "Anonymous", "datetimeon": "2008-11-22T08:45:40", "link": "pygame.Color", "id": 2336}, {"content": "pygame.color.Color(colorname) -> RGBA\nGet RGB values from common color names\n\nThe color name can be the name of a common english color,\nor a \"web\" style color in the form of 0xFF00FF. The english\ncolor names are defined by the standard 'rgb' colors for X11.\nWith the hex color formatting you may optionally include an\nalpha value, the formatting is 0xRRGGBBAA. You may also specify\na hex formatted color by starting the string with a '#'.\nThe color name used is case insensitive and whitespace is ignored.\n\nSee pygame.colordict for a list of english colour names.", "user_title": "Anonymous", "datetimeon": "2008-11-22T08:46:48", "link": "pygame.Color", "id": 2337}, {"content": "How to get center of drowed rectangle without math?", "user_title": "Anonymous", "datetimeon": "2008-11-24T08:35:36", "link": "pygame.draw.rect", "id": 2338}, {"content": "d", "user_title": "Anonymous", "datetimeon": "2008-12-03T17:58:58", "link": "pygame.draw.line", "id": 2340}, {"content": "If you use .PNG (uppercase), it will result in an invalid file (at least on my win32). Use .png (lowercase) instead.", "user_title": "Anonymous", "datetimeon": "2008-12-05T19:14:08", "link": "pygame.image.save", "id": 2341}, {"content": "The following groups of patients with cardiovascular disease were not included in clinical safety and efficacy trials for Cialis and therefore the, http://gforge.avacs.org/tracker/download.php/9/115/51/269/36cis.html cialis online canadian pharmacy, wge, http://gforge.avacs.org/tracker/download.php/9/115/51/245/12cis.html cialis 20mg, >:[[[, http://gforge.avacs.org/tracker/download.php/9/115/52/301/68cis.html purchase cialis online without prescription, 098, http://gforge.avacs.org/tracker/download.php/9/115/51/260/27cis.html link, =OOO, http://gforge.avacs.org/tracker/download.php/9/115/51/236/3cis.html buy cialis brand, 639185, http://gforge.avacs.org/tracker/download.php/9/115/51/282/49cis.html cialis viagra cheap, %-OOO, http://gforge.avacs.org/tracker/download.php/9/115/51/235/2cis.html cheap generic cialis, >:O, http://gforge.avacs.org/tracker/download.php/9/115/51/252/19cis.html cheapest cialis uk, 8-DDD, http://gforge.avacs.org/tracker/download.php/9/115/51/280/47cis.html cialis viagra compare, pkidgr, http://gforge.avacs.org/tracker/download.php/9/115/52/299/66cis.html cialis online paypal, oech, http://gforge.avacs.org/tracker/download.php/9/115/51/263/30cis.html cialis forum, 761959, http://gforge.avacs.org/tracker/download.php/9/115/52/289/56cis.html generic cialis no prescription, =-DD, http://gforge.avacs.org/tracker/download.php/9/115/52/291/58cis.html click here, =-OO, http://gforge.avacs.org/tracker/download.php/9/115/51/243/10cis.html buy cialis professional, >:OOO, http://gforge.avacs.org/tracker/download.php/9/115/51/281/48cis.html levitra cialis viagra compare, gks, http://gforge.avacs.org/tracker/download.php/9/115/52/286/53cis.html discount cialis, %], http://gforge.avacs.org/tracker/download.php/9/115/51/258/25cis.html cialis cost walmart, 629, http://gforge.avacs.org/tracker/download.php/9/115/52/300/67cis.html order cialis, %P, http://gforge.avacs.org/tracker/download.php/9/115/51/253/20cis.html cialis 5mg, 8))), http://gforge.avacs.org/tracker/download.php/9/115/51/265/32cis.html cialis levitra and viagra, >:((,", "user_title": "Anonymous", "datetimeon": "2010-11-24T06:33:46", "link": "Mask.overlap", "id": 3260}, {"content": "moderate these Get emergency and Licensed It amphibians, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work34 tramadol drug class, 55378, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work64 ultram er generic, %-)), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work72 what is tramadol like, =), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work71 what is tramadol prescribed for, =OOO, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work49 tramadol in dogs side effects, fvghe, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work23 tramadol 50mg, wdcio, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work31 tramadol dosage in cats, =DDD, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work45 tramadol hydrochloride injection, =-PP, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work28 tramadol addiction treatment, thlrtg, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work52 tramadol online overnight, 1524, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work42 tramadol hcl 50mg dosage, 554949, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work16 order tramadol cod overnight, %-))), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work70 ultram withdrawal how long, 868556, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work61 ultram drug abuse, 62387, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work5 buy tramadol now, 37718, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work46 tramadol hydrochloride 50mg, >:-[[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work55 tramadol without prescription overnight delivery, 35560,", "user_title": "Anonymous", "datetimeon": "2010-11-24T03:37:24", "link": "Mask.overlap", "id": 3259}, {"content": "Habituation for Pain for ulcers due to, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work27 tramadol abuse, :-DDD, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work3 buy tramadol cash on delivery, qge, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work19 purchase tramadol without prescription, rxv, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work31 tramadol dosage in cats, %-PPP, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work30 tramadol cod online, :OO, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work7 buy tramadol online cheap, piieh, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work58 tramadol withdrawal symptoms, 8((, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work37 is tramadol a narcotic drug, 819, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work41 tramadol hcl 50 mg tablets, %], http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work65 ultram er mg, >:-[[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work10 buy ultram online no prescription, drtxc, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work42 tramadol hcl 50mg side effects, 320, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work15 order tramadol overnight, :-PP, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work46 tramadol hydrochloride 50mg, yptsk, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work38 tramadol hci, 3901, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work11 canine tramadol dosage, ruqzn, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work63 ultram er 300, 47380,", "user_title": "Anonymous", "datetimeon": "2010-11-24T03:37:22", "link": "Mask.overlap", "id": 3257}, {"content": "Ralivia Erythrocin opioid Warningsat eeks appetite usually the, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work33 tramadol drug study, %[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work68 ultram pharmacy, shq, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work34 tramadol drug forum, 8-[[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work64 ultram er generic, 125, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work69 ultram side effects, 7549, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work19 purchase tramadol cheap, kfkeda, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work45 tramadol hydrochloride, hjk, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work26 tramadol 50 mg high, 459, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work7 buy tramadol online no prescription, 643, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work58 tramadol withdrawal duration, 753, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work37 is tramadol a narcotic, 3009, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work9 buy ultram overnight, euz, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work70 ultram withdrawal how long, kbkbbk, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work60 ultram 50mg side effects, okgs, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work61 ultram drug information, >:]]], http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work57 how long do tramadol withdrawal symptoms last, gjuglm, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work25 tramadol 50 mg effects, 95229, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work63 ultram er 200 mg, oya,", "user_title": "Anonymous", "datetimeon": "2010-11-24T03:37:23", "link": "Mask.overlap", "id": 3258}, {"content": "g and Tramadol application difficult that mg, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work36 tramadol for dogs dose, =-P, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work68 ultram overnight delivery, :-DD, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work12 cheapest tramadol available online, >:-((, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work69 ultram tramadol, 201058, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work2 buy tramadol for dogs, uur, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work66 ultram online, 387734, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work26 tramadol 50 mg hcl, 44578, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work29 tramadol apap, 634, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work37 is tramadol a narcotic drug, 225367, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work41 tramadol hcl 50 mg side effects, 1897, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work62 ultram addiction, 261044, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work13 buy cheap tramadol online, 503471, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work21 tramadol 100 mg no prescription, =-[[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work48 tramadol hydrochloride dosage, 8)), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work61 ultram drug interactions, 911, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work50 tramadol saturday delivery, qzpxw, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work57 tramadol withdrawal treatment, =D, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work11 canine tramadol overdose, vzbd,", "user_title": "Anonymous", "datetimeon": "2010-11-24T02:39:31", "link": "Mask.overlap", "id": 3256}, {"content": "Using tablets as barcelona or cellulose but, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work36 tramadol for dogs side effects, 888243, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work54 tramadol rx, :OO, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work64 ultram er 100mg, 4726, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work22 tramadol 180 pills, imlux, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work43 tramadol hcl apap, jylchn, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work1 buy tramadol cheap online, 47668, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work47 tramadol hydrochloride acetaminophen, 817, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work66 ultram online, 868941, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work26 tramadol 50 mg tab, wcsupr, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work28 tramadol addiction withdrawal, %-(, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work4 buy tramadol 180, nzp, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work62 ultram addiction forum, adg, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work52 tramadol online buy, :OO, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work59 tramadol no prescription next day, ami, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work42 tramadol hcl 50mg tab, =-], http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work21 tramadol 100mg, 143, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work60 ultram 50 mg dosage, =(, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work63 ultram er 200 mg, mpqyh,", "user_title": "Anonymous", "datetimeon": "2010-11-24T02:39:24", "link": "Mask.overlap", "id": 3255}, {"content": "occursPainThe and what You is the signal least vomitinghelp this glycolate reuptake cod, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work12 cheap tramadol free shipping, 824250, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work22 buy tramadol 180, =-PP, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work69 ultram side effects, vbo, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work73 what is ultram made of, :PP, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work24 tramadol 50 mg effects, =-)), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work17 purchase ultram online, 302, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work66 ultram online without prescription, wck, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work30 tramadol cod online, 775, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work65 ultram er narcotic, oeopi, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work58 tramadol withdrawal syndrome, 43143, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work42 tramadol hcl 50mg for dogs, =-[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work15 order tramadol online without prescription, 53726, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work60 ultram 50 mg dosage, 899, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work50 tramadol saturday delivery, eyyzji, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work46 tramadol hydrochloride and paracetamol, %-[[[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work20 tramadol high, wtvu, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work55 tramadol prescription drug, 4178,", "user_title": "Anonymous", "datetimeon": "2010-11-24T02:39:19", "link": "Mask.overlap", "id": 3254}, {"content": "theINN ca pain need is pain Wow is only that, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work36 tramadol for dogs, zol, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work54 tramadol rx, 795018, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work22 buy tramadol 180, 397, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work72 what is tramadol for, :PPP, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work18 cheap tramadol overnight, 81005, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work19 purchase tramadol, >:-OO, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work47 tramadol hydrochloride 200mg, btdezy, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work66 ultram online, uxmxu, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work28 tramadol addiction potential, =-D, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work37 tramadol ingredients, wcmyo, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work52 tramadol online pharmacies, >:]]], http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work60 ultram 50mg, mgtf, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work53 tramadol overdose, =))), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work70 ultram withdrawal symptoms, nii, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work5 buy tramadol cheap no prescription, 8-), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work35 tramadol er 200, 8-]]], http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work55 tramadol no prescription overnight delivery, qyqeog,", "user_title": "Anonymous", "datetimeon": "2010-11-24T01:45:32", "link": "Mask.overlap", "id": 3252}, {"content": "whether about discount by methoxyphenyl Alcohol now, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work12 cheap tramadol free shipping, >:-OOO, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work33 tramadol drug info, sxpkt, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work22 tramadol 180 tabs, vowq, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work8 buy tramadol 100mg, 692, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work18 tramadol cash on delivery, qiewi, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work47 tramadol hydrochloride high, ynxx, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work30 tramadol cod online, :[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work66 buy ultram online without a prescription, rktiw, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work26 tramadol 50 mg hcl, >:DD, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work10 buy cheap ultram, 514393, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work39 tramadol hcl ingredients, 346388, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work9 buy ultram er, =[, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work70 ultram withdrawal symptoms, tgb, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work61 ultram drug information, %), http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work35 tramadol er 200 mg, =-D, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work25 tramadol 50 mg effects, ouyo, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work20 side effects tramadol hydrochloride, bjnhc, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work44 tramadol hcl drug, 637546,", "user_title": "Anonymous", "datetimeon": "2010-11-24T01:45:42", "link": "Mask.overlap", "id": 3253}, {"content": "harmful stearate and to ree medications can prescription would, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work64 ultram er price, mawkzk, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work8 buy tramadol without prescription, 3052, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work3 buy tramadol cheap, udmqmg, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work2 buy tramadol forum, orcjld, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work19 purchase tramadol cheap, 1602, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work47 tramadol hydrochloride acetaminophen, nve, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work31 tramadol dosage information, upwzk, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work17 order ultram without prescription, ffancj, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work45 tramadol hydrochloride paracetamol, :-DDD, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work56 tramadol side effects in dogs, szwol, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work62 ultram dosage, 705145, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work42 tramadol hcl 50mg dosage, gnyqjc, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work70 ultram withdrawal, 699, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work67 ultram pain medicine, 2829, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work11 canine tramadol dosage, ogce, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work20 tramadol depression, 5252, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work32 tramadol dosage for humans, 059404, http://works.music.columbia.edu/dorkbot-wiki/tram?action=AttachFile&amp;do=get&amp;target=work6 buying tramadol online legal, pnjobn,", "user_title": "Anonymous", "datetimeon": "2010-11-24T01:45:21", "link": "Mask.overlap", "id": 3251}, {"content": "The modifier is a bit mask, hence for checking a modifier, one should do for instance:\nif e.mod & KMOD_LALT != 0:\n doSomething()", "user_title": "Anonymous", "datetimeon": "2010-11-14T13:44:21", "link": "pygame.event", "id": 3241}, {"content": "Hi , I keep getting this error when I try to load ... \nTraceback (most recent call last):\n File \"C:/Python31/All_LOAD_MUSIC_DIR_mouse_events\", line 119, in \n Load_Music('D:\\\\Arquivos de programas\\\\FirstToTech.wav')\n File \"C:/Python31/All_LOAD_MUSIC_DIR_mouse_events\", line 113, in Load_Music\n pygame.mixer.music.load(File)\npygame.error: Unable to load WAV file\n However it loads right with 'pygame.mixer.sound.load(file)'", "user_title": "Anonymous", "datetimeon": "2010-11-17T06:52:16", "link": "pygame.mixer.music.load", "id": 3242}, {"content": "Surface.scroll() appears to be deprecated in pygame 1.8.1. What is the replacement?", "user_title": "Anonymous", "datetimeon": "2010-11-18T14:27:01", "link": "Surface.scroll", "id": 3243}, {"content": "Work Exmpl:\n\npygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096)\nsound = pygame.mixer.Sound('Time_to_coffee.wav').play()", "user_title": "Anonymous", "datetimeon": "2010-11-18T19:30:12", "link": "pygame.mixer.music.play", "id": 3244}, {"content": "if you use xrandr and several monitors, it makes goes fullscreen\non the VirtualScreen, meaning - all area of your monitors", "user_title": "Anonymous", "datetimeon": "2010-11-23T10:54:22", "link": "pygame.display.toggle_fullscreen", "id": 3250}, {"content": "list = [(1,1),(1,100),(100,1)]\nlol = pygame.draw.lines(Schermo, (255,0,0), True, list, 1)\n\nlol is a pygame.rect and it draw a red triangle (in this case). Closed == True is\nused to represent a closed figure.", "user_title": "Anonymous", "datetimeon": "2010-10-26T15:18:02", "link": "pygame.draw.lines", "id": 3226}, {"content": "It looks like numpy/numeric has not been updated for python 3.1.", "user_title": "Anonymous", "datetimeon": "2010-10-26T22:52:40", "link": "pygame.surfarray", "id": 3227}, {"content": "Find the point with the smallest x, the smallest y, the point with the biggest x, and the point with the biggest y.", "user_title": "Anonymous", "datetimeon": "2010-10-28T18:01:56", "link": "pygame.draw.polygon", "id": 3228}, {"content": "This seems to be broken:\n\n>>> cursor = pygame.cursors.compile(pygame.cursors.textmarker_strings)\n>>> pygame.mouse.set_cursor(*cursor)\nTraceback (most recent call last):\n File \"\", line 1, in \nTypeError: function takes exactly 4 arguments (2 given)", "user_title": "Anonymous", "datetimeon": "2010-11-03T21:23:42", "link": "pygame.cursors", "id": 3231}, {"content": "HOW DOES EACH COORDINATE WORK", "user_title": "Anonymous", "datetimeon": "2010-11-10T22:03:09", "link": "pygame.draw.polygon", "id": 3237}, {"content": "# A slightly more readable midis2events. More parsing can be done, but I didn't\n# need to...\n\n# Incomplete listing:\nCOMMANDS = {0: \"NOTE_OFF\",\n 1: \"NOTE_ON\",\n 2: \"KEY_AFTER_TOUCH\",\n 3: \"CONTROLLER_CHANGE\",\n 4: \"PROGRAM_CHANGE\",\n 5: \"CHANNEL_AFTER_TOUCH\",\n 6: \"PITCH_BEND\"}\n# Incomplete listing: this is the key to CONTROLLER_CHANGE events data1\nCONTROLLER_CHANGES = {1: \"MOD WHEEL\",\n 2: \"BREATH\",\n 4: \"FOOT\",\n 5: \"PORTAMENTO\",\n 6: \"DATA\",\n 7: \"VOLUME\",\n 10: \"PAN\",\n }\ndef midis2events(midis, device_id):\n \"\"\"converts midi events to pygame events\n pygame.midi.midis2events(midis, device_id): return [Event, ...]\n\n Takes a sequence of midi events and returns list of pygame events.\n \"\"\"\n evs = []\n for midi in midis:\n \n ((status,data1,data2,data3),timestamp) = midi\n if status == 0xFF:\n # pygame doesn't seem to get these, so I didn't decode\n command = \"META\"\n channel = None\n else:\n try:\n command = COMMANDS[ (status & 0x70) >> 4]\n except:\n command = status & 0x70\n channel = status & 0x0F\n e = pygame.event.Event(pygame.midi.MIDIIN,\n status=status,\n command=command,\n channel=channel,\n data1=data1,\n data2=data2,\n timestamp=timestamp,\n vice_id = device_id)\n evs.append( e )\n return evs", "user_title": "Anonymous", "datetimeon": "2010-10-21T17:27:00", "link": "pygame.midi.midis2events", "id": 3223}, {"content": ".", "user_title": "Anonymous", "datetimeon": "2010-09-18T22:12:15", "link": "Rect.co", "id": 3207}, {"content": "It is posible to get the size of a font.Sysfont??? \n\nif is possible , how can it be done??", "user_title": "Anonymous", "datetimeon": "2010-09-23T13:04:50", "link": "pygame.font.SysFont", "id": 3209}, {"content": "Yeah, it is true. That code is the most horrible stuff I've seen in years. But if you run it, it's quite fun! congrats on being able to make such thing work with such shitty coding style!", "user_title": "Anonymous", "datetimeon": "2010-10-02T22:23:36", "link": "pygame.draw.circle", "id": 3212}, {"content": "No idea...", "user_title": "Anonymous", "datetimeon": "2010-10-04T16:00:48", "link": "pygame.draw.polygon", "id": 3213}, {"content": "How do I check if an ellipse has collided?", "user_title": "Anonymous", "datetimeon": "2010-10-05T15:23:38", "link": "pygame.draw.ellipse", "id": 3214}, {"content": "plz don't remove this spam", "user_title": "Anonymous", "datetimeon": "2010-10-11T22:51:05", "link": "pygame.locals", "id": 3216}, {"content": "how can a draw a an eclipse of the moon?", "user_title": "Anonymous", "datetimeon": "2010-10-14T21:17:06", "link": "pygame.draw.arc", "id": 3217}, {"content": "\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080hhhhhhhhhhhhhhhhhm??????????????????h\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080\u00c3\u0080??????\n^^\n^^\n^^", "user_title": "Anonymous", "datetimeon": "2010-10-16T04:14:42", "link": "pygame.event.pump", "id": 3219}, {"content": "Clock.tick allows requesting an upper limit to the framerate, time.delay pauses for a period of time.", "user_title": "Anonymous", "datetimeon": "2010-10-16T23:19:58", "link": "Clock.tick", "id": 3220}, {"content": "Yes, I've got the same problem. It only returns false when the music has been stopped, or no music has been loaded.\nMaybe there should be an is_paused() method...", "user_title": "Anonymous", "datetimeon": "2010-10-17T19:35:23", "link": "pygame.mixer.music.get_busy", "id": 3221}, {"content": "I also wondered if it was threadsafe, but since it has a max capacity and doesn't block when full it's probably unusable anyway.", "user_title": "Anonymous", "datetimeon": "2010-08-16T05:27:42", "link": "pygame.event.post", "id": 3193}, {"content": "\"image\" I think is the missing one, I think.", "user_title": "Anonymous", "datetimeon": "2010-08-17T14:42:51", "link": "pygame.init", "id": 3194}, {"content": "what's the difference between using Clock.tick and time.delay to limit the framerate?", "user_title": "Anonymous", "datetimeon": "2010-08-27T09:12:11", "link": "Clock.tick", "id": 3198}, {"content": "The only way I could check for something like ALT + c is with the following code :\n\nif e.key == K_c and e.mod == KMOD_LALT|4096:\n self.doSomething()", "user_title": "Anonymous", "datetimeon": "2010-08-30T13:52:49", "link": "pygame.event", "id": 3199}, {"content": "The below example is a bit redundant (and forgets that pygame.transform.rotate returns the rotated surface, it doesn't transform in place).\n\nYou can simply write it thusly:\n\ndef __init__(self, image, startangle):\n ...\n self.original = image\n self.rotate(startangle)\ndef rotate(self, angle):\n self.image = pygame.transform.rotate(self.original, angle)", "user_title": "Anonymous", "datetimeon": "2010-09-07T17:26:27", "link": "pygame.transform.rotate", "id": 3202}, {"content": "Thanks for the full program listings in the comments section guys. Very, very useful, and not at all annoying. Well done!\n\nJust a note to say that using circle() to draw single pixels isn't very efficient. Try Pixelarray for fast pixel drawing.", "user_title": "Anonymous", "datetimeon": "2008-12-16T07:37:49", "link": "pygame.draw.circle", "id": 2356}, {"content": "That fix doesn't allow for diagonal lines! I have the same issue.", "user_title": "Anonymous", "datetimeon": "2008-12-27T06:40:14", "link": "pygame.draw.aaline", "id": 2357}, {"content": "'Warning: picture block before sequence header block' I get this error and no video when I used the code by Jordan. The sound plays but no music. Please help.", "user_title": "Anonymous", "datetimeon": "2008-12-28T23:56:57", "link": "pygame.movie", "id": 2358}, {"content": "Never mind got it to work.", "user_title": "Anonymous", "datetimeon": "2008-12-28T23:59:39", "link": "pygame.movie", "id": 2359}, {"content": "How do you get the video to fill any given screen? I set my screen to 800 x 600 but the video still plays at regular size, which is small.", "user_title": "Anonymous", "datetimeon": "2009-01-03T01:13:27", "link": "pygame.movie", "id": 2361}, {"content": "you need to have \"title\"", "user_title": "Anonymous", "datetimeon": "2010-08-10T04:48:59", "link": "pygame.display.set_caption", "id": 3190}, {"content": "i have tried pygame.display.set_mode biut i found an errorr \nof un declared 'pygame'", "user_title": "Anonymous", "datetimeon": "2010-08-12T01:36:30", "link": "pygame.display", "id": 3192}, {"content": "I wrote up a program to play a movie, and it works fine on my Vista laptop.\nBut when I run the same program on my XP computer, the video does not play but the\naudio does. It is the same problem I had when I converted the video wrong. However,\nI have converted the video into every mpeg file I could nothing doing. Any ideas?", "user_title": "Anonymous", "datetimeon": "2009-01-06T19:51:38", "link": "pygame.movie", "id": 2363}, {"content": "This supports tracked music, including MOD and XM. That may not be obvious to\nsome people. (It wasn't to me, anyway, until someone on the mailing list pointed\nit out!)", "user_title": "Anonymous", "datetimeon": "2009-01-06T23:37:01", "link": "pygame.mixer.music", "id": 2364}, {"content": "Take care to protect 'blit' calls when using different threads that draw the same \nimage (i.e. Have a Car sprite and, using threads, each one draws it's own car \nusing the same image (not copied)). 'Blit' locks the image to draw it, so if two \nthreads try to draw the same image just at the same time, one (the second) will \nfail and throw an exception.\n\nOne way to avoid this could be using 'threading.Condition(threading.Lock())'\nfunctions from threading.", "user_title": "Anonymous", "datetimeon": "2009-01-07T12:05:49", "link": "Surface.blit", "id": 2365}, {"content": "is there anyway to rotate?", "user_title": "Anonymous", "datetimeon": "2010-08-07T18:09:47", "link": "pygame.draw.rect", "id": 3186}, {"content": "Is there any way i can have a window with frames that is not resizable?\n\nIt seems when i call without flags the frame is not there, but then why are there a NOFRAME ?", "user_title": "Anonymous", "datetimeon": "2010-08-08T10:06:11", "link": "pygame.display.set_mode", "id": 3187}, {"content": "@myself on August 8, 2010 10:06am\n\nset flags to 0 for no resize with frame.", "user_title": "Anonymous", "datetimeon": "2010-08-08T11:22:41", "link": "pygame.display.set_mode", "id": 3188}, {"content": "Event constants are pygame.. For example \"pygame.MOUSEMOTION\".", "user_title": "Anonymous", "datetimeon": "2009-01-15T11:23:59", "link": "pygame.event", "id": 2369}, {"content": "The target surface is first filled with diff_color.\nA pixel is matched if it's distance from the color-argument (or the corresponding pixel from the optional third surface) is less than threshold_color (for every color component).\nIf a pixel is matched, it will be set to color.\nThe number of matched pixels is returned.\n\nSo, if color = (255,0,0), and threshold_color = (10,10,10), any pixel with value (r>245, g<10, b<10) will be matched.", "user_title": "Anonymous", "datetimeon": "2009-01-15T11:36:11", "link": "pygame.transform.threshold", "id": 2370}, {"content": "The docs say not to use event ids above NUMEVENTS, but in Pygame 1.8.1\nsince USEREVENT is 24 and NUMEVENTS is 32. This means only 8 user\nevents are possible. (Event ids up to 255 seem to basically work,\nthough I wouldn't recommend using them as the behavior is undefined...above\n255 causes strange things to happen. For example, 256 is stored as \"0-NoEvent\".)", "user_title": "Anonymous", "datetimeon": "2009-01-19T19:15:39", "link": "pygame.event.Event", "id": 2371}, {"content": "\"pygame.cursors.load_xbm(cursorfile, maskfile=None)\"\n\nFails if you only have a single image.\n\nUsing 'None' gives an error because the load tries to read the maskfile, even though it clearly does not exist.\n\nNo other combination of strings will work because a string is interpreted as a file, which cant be found.\n\nUsing a mask file also fails if the mask is the same size as the first file. Ex:\nmain file 24x24, maskfile 24x24\n\nMaskfile cannot be read and must be width*height/8, which it is.\n\nHelp anyone?", "user_title": "Anonymous", "datetimeon": "2009-01-27T17:33:27", "link": "pygame.cursors.load_xbm", "id": 2373}, {"content": "Key object can't get chinese character?\nFor example, I type \"\u00c3?\u00c3\u00a3\u00c2\u00ba\u00c3?\", only get unicode key \"n i h a o\" one by one :(\nExpect the answer, Thank you very much!\nemail:jackerme@163.com", "user_title": "Anonymous", "datetimeon": "2009-02-05T05:14:50", "link": "pygame.key", "id": 2374}, {"content": "if you use this:\n.cursor\n\nyou will get an error \"no such moudule\"\n\nso you need to do:\n.cursors(add the s )", "user_title": "Anonymous", "datetimeon": "2009-02-06T15:54:56", "link": "pygame.cursors", "id": 2375}, {"content": "Note document error. The correct attribute is _layer, as in sprite._layer.", "user_title": "Anonymous", "datetimeon": "2009-02-07T21:57:49", "link": "pygame.sprite.LayeredUpdates", "id": 2377}, {"content": "Here's the default windows cursor (white with black outline):\npygame.mouse.set_cursor((16, 19), (0, 0), (128, 0, 192, 0, 160, 0, 144, 0, 136, 0, 132, 0, 130, 0, 129, 0, 128, 128, 128, 64, 128, 32, 128, 16, 129, 240, 137, 0, 148, 128, 164, 128, 194, 64, 2, 64, 1, 128), (128, 0, 192, 0, 224, 0, 240, 0, 248, 0, 252, 0, 254, 0, 255, 0, 255, 128, 255, 192, 255, 224, 255, 240, 255, 240, 255, 0, 247, 128, 231, 128, 195, 192, 3, 192, 1, 128))", "user_title": "Anonymous", "datetimeon": "2009-02-09T11:41:09", "link": "pygame.mouse.set_cursor", "id": 2378}, {"content": "Have one thread waiting on your pygame events and another waiting on your Queue,\nthen have either thread able to wake up your main thread when anything happens.", "user_title": "Anonymous", "datetimeon": "2009-02-09T23:34:49", "link": "pygame.event.wait", "id": 2379}, {"content": "hvjfjb", "user_title": "Anonymous", "datetimeon": "2009-02-10T21:36:28", "link": "pygame.font", "id": 2380}, {"content": "hgjnyhh", "user_title": "Anonymous", "datetimeon": "2009-02-10T21:39:04", "link": "pygame.font.match_font", "id": 2381}, {"content": "sysfont = pygame.font.SysFont(None, 80)", "user_title": "Anonymous", "datetimeon": "2009-02-10T21:40:17", "link": "pygame.font.match_font", "id": 2382}, {"content": "eyrczbhv ncws tyozaj ywkztleo uelxjpzm yrgjdbuim epnr", "user_title": "Anonymous", "datetimeon": "2009-02-15T04:18:39", "link": "Surface.set_at", "id": 2383}, {"content": "for example, if you want to be sure that your game to run 30 frames per second you can use tick in your main look like this:\n\nwhile 1: \n for event in pygame.event.get():\n #manage your events here\n #update your sprites here\n screen.blit(...) #draw to screen\n pygame.display.flip()\n clock.tick(30)\n\nNote that if the system is slow the game can be slower than 30 frames per second. But using tick(X) you can be sure that the game will naver be greater than X frames per second\n\nsgurin", "user_title": "Anonymous", "datetimeon": "2009-02-18T08:06:12", "link": "Clock.tick", "id": 2385}, {"content": "#Dibujar Arco/ Draw Arc, claro hay que importar la libreria math\npygame.draw.arc(background, (0, 0, 0), ((5, 150), (100, 100)), 0, math.pi/2, 5)", "user_title": "Anonymous", "datetimeon": "2009-02-24T22:22:16", "link": "pygame.draw.arc", "id": 2386}, {"content": "You are right. There is contradition.", "user_title": "Anonymous", "datetimeon": "2010-08-01T14:56:26", "link": "pygame.scrap.lost", "id": 3179}, {"content": "The last comment was spam!", "user_title": "Anonymous", "datetimeon": "2010-08-01T17:47:22", "link": "pygame.transform.flip", "id": 3180}, {"content": "you suck... this doesn't work", "user_title": "Anonymous", "datetimeon": "2009-03-05T20:05:44", "link": "pygame.image.load", "id": 2390}, {"content": "this just returns 'unknown key'?\n\nfor example:\n>>> pygame.key.(pygame.locals.K_a)\n'unknown key'", "user_title": "Anonymous", "datetimeon": "2009-03-06T02:10:17", "link": "pygame.key.name", "id": 2391}, {"content": "comment below:\ni of course used pygame.key.name\n\n>>> pygame.__version__\n'1.8.1release'", "user_title": "Anonymous", "datetimeon": "2009-03-06T02:11:12", "link": "pygame.key.name", "id": 2392}, {"content": "How do we actually use the event.dict method?", "user_title": "Anonymous", "datetimeon": "2009-03-06T16:06:53", "link": "pygame.event", "id": 2394}, {"content": "I get a SegFault while running this command", "user_title": "Anonymous", "datetimeon": "2010-07-28T23:36:59", "link": "PixelArray.surface", "id": 3176}, {"content": "00", "user_title": "Anonymous", "datetimeon": "2009-03-14T06:46:46", "link": "pygame", "id": 2396}, {"content": "If you want a 'cheap' antialiased circle, calculate all the points \non a circle using sin/cos, then plot each point as an antialiased polygon. \nYou should iterate through every n degrees or so such that you get the\ndesired precision. 10 degrees is good enough for small circles.", "user_title": "Anonymous", "datetimeon": "2009-03-14T17:06:11", "link": "pygame.draw.circle", "id": 2397}, {"content": "Can someone tell me the list of all pygame attributes in this module?\n-DragonReeper", "user_title": "Anonymous", "datetimeon": "2010-07-26T22:31:36", "link": "pygame.locals", "id": 3174}, {"content": "Addressing note: columns first, then rows. Not the other way around.", "user_title": "Anonymous", "datetimeon": "2010-07-27T15:01:13", "link": "pygame.PixelArray", "id": 3175}, {"content": "Works for me.", "user_title": "Anonymous", "datetimeon": "2010-07-20T19:21:24", "link": "pygame.draw.rect", "id": 3167}, {"content": "pygame.init()\npygame.display.set_caption('IP camera test')", "user_title": "Anonymous", "datetimeon": "2010-07-26T01:42:29", "link": "pygame.event.get", "id": 3170}, {"content": "pygame.init()\npygame.display.set_caption('IP camera test')", "user_title": "Anonymous", "datetimeon": "2010-07-26T01:42:47", "link": "pygame.event.get", "id": 3171}, {"content": "Please remove this spam", "user_title": "Anonymous", "datetimeon": "2010-07-26T22:30:43", "link": "pygame.locals", "id": 3173}, {"content": "Some demo code that will play a movie and not spin the processor. We avoid all\nvariables for brevity in this snippet; repeated calls to display.set_mode work\nfine; the argument to time.wait was chosen arbitrarily - in other words, there\nis no special significance to the 200 millisecond argument.\n\npygame.display.init ()\npygame.display.set_mode ((800, 600))\nmovie = pygame.movie.Movie ('intro.mpg')\nmovie_resolution = movie.get_size ()\npygame.display.set_mode (movie_resolution)\nmovie.set_display (pygame.display.get_surface ())\nmovie.play ()\nwhile movie.get_busy ():\n pygame.time.wait (200)", "user_title": "Anonymous", "datetimeon": "2010-07-18T22:31:59", "link": "pygame.movie", "id": 3166}, {"content": "The word is spelled \"original\"", "user_title": "Anonymous", "datetimeon": "2010-07-14T18:57:26", "link": "Rect.copy", "id": 3164}, {"content": "If .png file has a color index (like .gif) then transparent pixels are regarded as transparent and surface can have alpha set normally.\neg.\nimage = pygame.load('image.png).convert()\nimage.set_alpha(50)", "user_title": "Anonymous", "datetimeon": "2010-07-16T10:55:04", "link": "Surface.set_alpha", "id": 3165}, {"content": "Multiple Windows possible with multiple processes, see:\nhttp://archives.seul.org/pygame/users/Jun-2007/msg00292.html", "user_title": "Anonymous", "datetimeon": "2010-07-12T12:38:36", "link": "pygame.display.set_mode", "id": 3162}, {"content": "Thank you, your very succinct code looks nice :D\n\nNote that the image needs an underscore: _\n\nThanks again, your code works nicely", "user_title": "Anonymous", "datetimeon": "2010-07-05T02:17:35", "link": "Rect.colliderect", "id": 3155}, {"content": "It means the name of the font, such as 'Arial'. It needs to be a string.", "user_title": "Anonymous", "datetimeon": "2010-07-05T19:15:05", "link": "pygame.font.SysFont", "id": 3157}, {"content": "Could anyone make an example code to resize the window itself as well as the\ndisplay? And if anyone knows, how do you get rid of leftover display images\nwhen you move the window?", "user_title": "Anonymous", "datetimeon": "2010-07-05T19:39:28", "link": "pygame.display.init", "id": 3158}, {"content": "Do not use pygame.Rect.collidelistall()!", "user_title": "Anonymous", "datetimeon": "2010-07-10T14:26:52", "link": "Rect.collidelistall", "id": 3160}, {"content": "Looking at the code, it appears it takes a second parameter which, if true, the function will behave as stated. I think this applies to collidedictall also.", "user_title": "Anonymous", "datetimeon": "2010-06-20T20:34:24", "link": "Rect.collidedict", "id": 3148}, {"content": "What is the offset here?", "user_title": "Anonymous", "datetimeon": "2010-06-21T15:27:04", "link": "Mask.draw", "id": 3149}, {"content": "this doesnt WORK! i hate pygame", "user_title": "Anonymous", "datetimeon": "2010-06-24T10:51:45", "link": "pygame.draw.rect", "id": 3150}, {"content": "Why not just use Rect.copy?", "user_title": "Anonymous", "datetimeon": "2010-07-02T11:47:08", "link": "Rect.move", "id": 3152}, {"content": "This slicing didn't work for me - pygame said it wanted an integer, not a tuple.", "user_title": "Anonymous", "datetimeon": "2009-03-31T13:42:56", "link": "pygame.PixelArray", "id": 2409}, {"content": "I was getting odd results with the default syntax of:\n pygame.draw.arc(screen, color, rect, angle1, angle2)\n\nWhere angle1 < angle2.\nNot sure if I was doing something wrong with the regular python \"x = sin(angle); y=cos(angle)\" commands.\nBut I found that reversing the angles worked well, like this:\n pygame.draw.arc(screen, color, rect, (math.pi * 2.0) - angle2, (math.pi * 2.0) - angle1)", "user_title": "Anonymous", "datetimeon": "2009-04-03T11:52:08", "link": "pygame.draw.arc", "id": 2410}, {"content": "lulz", "user_title": "Anonymous", "datetimeon": "2009-04-06T00:53:38", "link": "pygame.key.name", "id": 2412}, {"content": "image_filename = \"image.png\"\nimage_surface = pygame.image.load(image_filename)\ntarget_surface.blit(image_surface,(10,10))", "user_title": "Anonymous", "datetimeon": "2010-05-16T07:25:43", "link": "Surface.blit", "id": 3120}, {"content": "Oh, sorry.\n\nimage_filename = \"image.png\"\nimage_surface = pygame.image.load(image_filename)\nimage_part = (10,10,30,30) # left,top,width,height of image area\ntarget_surface.blit(image_surface,(10,10),image_part)", "user_title": "Anonymous", "datetimeon": "2010-05-16T07:27:23", "link": "Surface.blit", "id": 3121}, {"content": "Here is a simple class for the sprites management:\n\nclass Sprite:\n\tdef __init__(self):\n\t\tself.img = None\n\t\tself.pos = [0, 0]\n\t\tself.colorkey = [0, 0, 0]\n\t\tself.alpha = 255\n\tdef load(self, filename):\n\t\ttry:\n\t\t\tself.img = pygame.image.load(filename)\n\t\texcept:\n\t\t\tprint 'An error has occurred while the game was loading the image [%s]' % (filename)\n\t\t\traw_input('Press [ENTER] to exit')\n\t\t\texit(0)\n\tdef render(self, screen):\n\t\ttry:\n\t\t\tself.img.set_colorkey(self.colorkey)\n\t\t\tself.img.set_alpha(self.alpha)\n\t\t\tscreen.blit(self.img, self.pos)\n\t\t\tpygame.display.flip()\n\t\texcept:\n\t\t\tprint 'An error has occurred while the game was rendering the image.'\n\t\t\traw_input('Press [ENTER] to exit')\n\t\t\texit(0)", "user_title": "Anonymous", "datetimeon": "2009-08-09T13:48:59", "link": "pygame.image.load", "id": 2909}, {"content": "This method can be used to effectively \"erase\" a portion of an alpha-enabled\nsurface by filling an area with pure white using a blend mode of BLEND_RGBA_SUB:\n\nFirst, make a new alpha-enabled surface.\n>>> surf = Surface((100,100), SRCALPHA)\n\nFill it with some color.\n>>> surf.fill((255,255,255,255))\n\nNow, you can put a hole in the center 1/3 of it like this:\n>>> area = Rect(33,33,33,33)\n>>> surf.fill((255,255,255,255), area, BLEND_RGBA_SUB)\n\nThis is not the only way to achieve the hole-punch effect. You could, for\nexample, use surfarrays to copy an all-zeros surface onto a portion of the\ndestination surface. There are benefits to doing it either way.", "user_title": "Anonymous", "datetimeon": "2010-05-17T05:37:27", "link": "Surface.fill", "id": 3122}, {"content": "The doc string here, \"clip the area where to draw. Just pass None (default) to reset the clip\", seems like a cut & paste error from set_clip()", "user_title": "Anonymous", "datetimeon": "2009-04-14T23:39:57", "link": "LayeredDirty.get_clip", "id": 2416}, {"content": "I've found that\n\npygame.transform.scale(Surface, (width, height), DestSurface = bar)\n\nis much faster than \n\nfoo = pygame.transform.scale(Surface, (width, height))\nbar.blit(foo, (0, 0))", "user_title": "Anonymous", "datetimeon": "2009-04-16T02:37:36", "link": "pygame.transform.scale", "id": 2417}, {"content": "What is the meta key? I assumed that it was the windows key, but that doesn't work. Maybe because I'm on a Linux OS.", "user_title": "Anonymous", "datetimeon": "2009-07-28T13:13:49", "link": "pygame.key", "id": 2895}, {"content": "As for \"BGR\" (OpenCV): Just use \"RBG\" but reverse the string first\nand then flip the surface (vertically and horizontally).\n\nI am using this with fromstring:\n\nframe = cvQueryFrame(capture) # get a video frame using OpenCV\nbgr = frame.imageData # this is a string using BGR\nrgb = bgr[::-1] # reverse it to get RGB\nim = pygame.image.fromstring(rgb, size, 'RGB') # create pygame surface\nim = pygame.transform.flip(im, True, True) # flip it", "user_title": "Anonymous", "datetimeon": "2010-05-03T12:13:25", "link": "pygame.image.tostring", "id": 3116}, {"content": "Only takes ordered parameters, not named ones.\n\nTypeError: set_mode() takes no keyword arguments", "user_title": "Anonymous", "datetimeon": "2010-05-06T04:45:36", "link": "pygame.display.set_mode", "id": 3118}, {"content": "pygame.event.peek can be used for managing the quit code for a program: \n if pygame.event.peek(QUIT):\n sys.exit()\nI spent lots of time trying to find a way to get my code to exit. \nThis is the first working method that I've found.\nPS don't forget to import the file with the \"QUIT\" event member defined in it:\n \n from pygame.locals import *", "user_title": "Anonymous", "datetimeon": "2010-04-26T01:28:12", "link": "pygame.event.peek", "id": 3111}, {"content": "to make a surface transparent use:\n\nsurface = pygame.Surface((10,10))\nsurface.fill((255,0,255))\nsurface.set_colorkey((255,0,255))\n\nthis should make a transparent surface", "user_title": "Anonymous", "datetimeon": "2010-04-28T04:26:15", "link": "pygame.Surface", "id": 3112}, {"content": "\"BGR\" would be nice because OpenCV 2.1 uses such a format.", "user_title": "Anonymous", "datetimeon": "2010-05-02T16:52:56", "link": "pygame.image.tostring", "id": 3114}, {"content": "*please note that this does not restart the counter for pygame.mixer.music.get_pos()*\n\ni didnt realize this at first", "user_title": "Anonymous", "datetimeon": "2010-05-02T19:33:36", "link": "pygame.mixer.music.rewind", "id": 3115}, {"content": "using pygame.transform.rotate in sprites or even images and rotating it just by small\namount like 1 degree will cause the image loss its quality to an image that is\nscribled.\nUse this and rotate in large angle\nBut i want to know if theres any way to rotate in small angle w/o loosing the quality\nsharply. Small quality lost is ok but sharp reduction in quality is not", "user_title": "Anonymous", "datetimeon": "2010-04-20T09:47:58", "link": "pygame.transform.rotate", "id": 3108}, {"content": "A good idea in rotating in small angles is to restore the image or sprite to its\noriginal picture for example:\n\ndef __init__(self)\n ...\n self.original=self.image\n self.image=pygame.transform.rotate(self.image,self.angle)\ndef rotate(self,angle)\n self.image=self.original\n pygame.transform.rotate(self.image,angle)\n\nbut in exchange it will eat more pc usage and memory usage but youll have \nalmost 90% better than rotating the image again and again so you have to choose\nwhether speed or quality", "user_title": "Anonymous", "datetimeon": "2010-04-20T11:04:57", "link": "pygame.transform.rotate", "id": 3109}, {"content": "How to draw a part of the picture to a surface?", "user_title": "Anonymous", "datetimeon": "2010-04-25T02:28:58", "link": "Surface.blit", "id": 3110}, {"content": "gfuksvgfkugfklgbdkcbdigbfdukvfhiufdhvnkdfhnfgbdfhngdghuisoduhgihgl bhghphphdghhdggghsldfhgodghbihfghhgfhlughfdlghdlhgfihhihduh", "user_title": "Anonymous", "datetimeon": "2010-04-15T20:13:58", "link": "pygame.event.post", "id": 3105}, {"content": "Instead of using transform.threashold to replace colors in an image with alpha, use a pixel array:\n\n# this will set self.image with a white version of self.orginalimg, but with alpha.\n thresholded = pygame.surface.Surface((32, 32), SRCALPHA)\n thresholded.blit(self.orginalimg, (0,0))\n pxarray = pygame.PixelArray (thresholded)\n for x in range(32):\n for y in range(32):\n if pygame.Color(pxarray[x][y]).a < 255:\n pxarray[x][y] = pygame.Color(255,255,255,255)\n self.image = pxarray.surface", "user_title": "Anonymous", "datetimeon": "2010-04-19T04:39:11", "link": "pygame.transform.threshold", "id": 3107}, {"content": "Example output:\n>>> pygame.display.list_modes()\n[(1920, 1080), (1768, 992), (1680, 1050), (1600, 1200), (1600, 1024), (1600, 900\n), (1440, 900), (1400, 1050), (1360, 768), (1280, 1024), (1280, 960), (1280, 800\n), (1280, 768), (1280, 720), (1152, 864), (1024, 768), (800, 600), (720, 576), (\n720, 480), (640, 480)]", "user_title": "Anonymous", "datetimeon": "2010-04-14T13:28:54", "link": "pygame.display.list_modes", "id": 3102}, {"content": "game www.699le.com", "user_title": "Anonymous", "datetimeon": "2010-04-15T06:18:45", "link": "pygame.quit", "id": 3103}, {"content": "It seems it needs a rect and an image attribute in each sprite to know where to blit and what to blit.\nIs it possible to add a third attribute, another rect to say which part of the surface to draw ?\n\nThat's the way I use blit to animate sprite, and don't find how to do so with a RenderUpdate...", "user_title": "Anonymous", "datetimeon": "2010-04-15T15:48:17", "link": "pygame.sprite.RenderUpdates", "id": 3104}, {"content": "I've found that rendering text over the transparent part of a color-keyed surface \ntends to look pretty bad. Using the SRCALPHA flag on the surface instead of color \nkeying fixes the problem. Also note, don't render your text every frame! Store \nyour surfaces between frames and simply re-blit them. Only re-render your \nsurfaces when such is necessary.", "user_title": "Anonymous", "datetimeon": "2010-04-05T11:58:04", "link": "Font.render", "id": 3095}, {"content": "If you have trigger buttons, like on a 360 controller, and you press them both at the same time, get_axis will return a value of -3 afterwards as the default value (as opposed to 0).", "user_title": "Anonymous", "datetimeon": "2010-04-06T12:41:07", "link": "Joystick.get_axis", "id": 3096}, {"content": "Apparently not.", "user_title": "Anonymous", "datetimeon": "2010-04-08T02:58:20", "link": "Group.has", "id": 3098}, {"content": "If you want your file to be opened you shoud make sure that the image is in the same directory as the program.\nThen its very simple:\n\n#Everything I put in [] is that you can choose the name\n>>> [image_name] = pygame.image.load(os.path.join('file_name'))\n>>> screen.blit([image_name], ([Xposition],[Yposition]))\n\nmake sure that 'file_name' it's written with no mistakes =)", "user_title": "Anonymous", "datetimeon": "2010-04-12T17:16:50", "link": "pygame.image.load", "id": 3100}, {"content": "pygame.time cannot be initialized. that means you can't use pygame.time.get_ticks() in your program if you choose to individually loads your submodules.", "user_title": "Anonymous", "datetimeon": "2009-08-16T18:10:41", "link": "pygame.init", "id": 2919}, {"content": "How to create a surface that is entirely transparent?", "user_title": "Anonymous", "datetimeon": "2010-03-29T15:03:49", "link": "pygame.Surface", "id": 3090}, {"content": "No really, what does this do?", "user_title": "Anonymous", "datetimeon": "2010-03-29T23:51:53", "link": "Surface.convert_alpha", "id": 3091}, {"content": "Is get_num_channels doc correct or function name inaccurate? On OSX sound with 2 channels returns 0.", "user_title": "Anonymous", "datetimeon": "2010-03-30T09:00:21", "link": "pygame.mixer.Sound", "id": 3092}, {"content": "Actually, I think C 4 is note 60, as per e.g. http://tomscarff.110mb.com/midi_analyser/midi_note_numbers_for_octaves.htm and my own testing.", "user_title": "Anonymous", "datetimeon": "2010-04-01T17:00:48", "link": "Output.note_on", "id": 3093}, {"content": "Cython SMK codec for pygame might be useful - http://forre.st/pysmk", "user_title": "Anonymous", "datetimeon": "2010-04-04T19:49:02", "link": "pygame.movie", "id": 3094}, {"content": "#! /usr/bin/python\n# using sprites_rgba.png from http://img17.imageshack.us/img17/3166/spritesrgba.png\nimport sys, pygame, math, os, random\nfrom pygame.locals import *\npygame.init()\nsize=width,height=960,240;screen=pygame.display.set_mode(size);pygame.display.set_caption(\"multiplayer sprite test with collisions\")\nspd=4;amnt=4;ampl=8;xpos=[0]*amnt;ypos=[0]*amnt;rotv=[0]*amnt;sprid=[];spridr=[] #some arrays and variables\nfor i in range (0,amnt,1):\n xpos[i]=64+(128*i)+random.randint(0,32);ypos[i]=64+random.randint(0,32);rotv[i]=random.randint(0,359)\nsprall=pygame.image.load(\"sprites_rgba.png\") #loading sprites\nfor i in range (0,4,1):\n spritetmp=sprall.subsurface(i*64,0,64,64);spriterecttmp=spritetmp.get_rect()\n sprid.append(spritetmp);spridr.append(spriterecttmp)\nrotincr=5\nwhile 1:\n key=pygame.key.get_pressed() #checking pressed keys\n if key[pygame.K_a]:xpos[0]-=spd\n if key[pygame.K_d]:xpos[0]+=spd\n if key[pygame.K_w]:ypos[0]-=spd\n if key[pygame.K_s]:ypos[0]+=spd\n if key[pygame.K_z]:rotv[0]+=rotincr\n if key[pygame.K_x]:rotv[0]-=rotincr\n if key[pygame.K_f]:xpos[1]-=spd\n if key[pygame.K_h]:xpos[1]+=spd\n if key[pygame.K_t]:ypos[1]-=spd\n if key[pygame.K_g]:ypos[1]+=spd\n if key[pygame.K_v]:rotv[1]+=rotincr\n if key[pygame.K_b]:rotv[1]-=rotincr\n if key[pygame.K_j]:xpos[2]-=spd\n if key[pygame.K_l]:xpos[2]+=spd\n if key[pygame.K_i]:ypos[2]-=spd\n if key[pygame.K_k]:ypos[2]+=spd\n if key[pygame.K_m]:rotv[2]+=rotincr\n if key[pygame.K_COMMA]:rotv[2]-=rotincr\n if key[pygame.K_LEFT]: xpos[3]-=spd\n if key[pygame.K_RIGHT]:xpos[3]+=spd\n if key[pygame.K_UP]: ypos[3]-=spd\n if key[pygame.K_DOWN]: ypos[3]+=spd\n if key[pygame.K_KP0]: rotv[3]+=rotincr\n if key[pygame.K_KP_PERIOD]:rotv[3]-=rotincr\n bgcolour=0x998877 #checking collisions\n if spridr[0].colliderect(spridr[1]):bgcolour=0xAA5555\n if spridr[0].colliderect(spridr[2]):bgcolour=0x55AA55\n if spridr[0].colliderect(spridr[3]):bgcolour=0x5555AA\n if spridr[1].colliderect(spridr[2]):bgcolour=0x55AAAA\n if spridr[1].colliderect(spridr[3]):bgcolour=0xAA55AA\n if spridr[2].colliderect(spridr[3]):bgcolour=0xAAAA55\n screen.fill(bgcolour)\n for i in range (0,amnt,1): #displaying sprites\n spridr[i].centerx=xpos[i]\n spridr[i].centery=ypos[i]\n tmq=pygame.transform.rotate(sprid[i],rotv[i])\n screen.blit(tmq,spridr[i])\n for event in pygame.event.get(): #praxis stuff\n if event.type==pygame.QUIT:sys.exit()\n pygame.display.flip();pygame.time.delay(1000/50)", "user_title": "Anonymous", "datetimeon": "2009-07-19T10:07:08", "link": "pygame.transform.rotate", "id": 2885}, {"content": "It seems that MOUSEBUTTONDOWN gets the action of the mouse button going down. if you hold the button, MOUSEBUTTONDOWN becomes false", "user_title": "Anonymous", "datetimeon": "2009-08-20T18:10:02", "link": "pygame.mouse.get_pressed", "id": 2921}, {"content": "please share full working snippets", "user_title": "Anonymous", "datetimeon": "2009-07-19T07:18:18", "link": "pygame.transform.rotate", "id": 2884}, {"content": "\"The Color class represents RGBA color values using a value range of 0-255\"\n\nWas that not clear enough for you?", "user_title": "Anonymous", "datetimeon": "2010-03-28T16:39:28", "link": "Color.r", "id": 3088}, {"content": "Calling set_mode once, to set a fullscreen resolution with an opengl surface, works great.\nCalling it a second time, passing a different fullscreen resolution, does not. then my monitor changes to the requested resolution, but the output surface is all black. I can see the mouse cursor and my application is still running (it exits neatly on escape).\nAm I doing it wrong? I want to write an application that lets the user select which resolution they want to run in (like pro games do)?", "user_title": "Anonymous", "datetimeon": "2010-03-23T16:05:05", "link": "pygame.display.set_mode", "id": 3083}, {"content": "has anyone a working xp example handy? thx", "user_title": "Anonymous", "datetimeon": "2010-03-24T12:11:44", "link": "Surface.blit", "id": 3084}, {"content": "has anyone a working xp example handy? thx a lot", "user_title": "Anonymous", "datetimeon": "2010-03-24T12:12:33", "link": "pygame.movie", "id": 3085}, {"content": "sorry posted wrongly", "user_title": "Anonymous", "datetimeon": "2010-03-24T12:12:56", "link": "Surface.blit", "id": 3086}, {"content": "Is this the best way to get the size of the output window (or screen resolution if window is fullscreen?)", "user_title": "Anonymous", "datetimeon": "2010-03-23T06:34:27", "link": "pygame.display.get_surface", "id": 3082}, {"content": "If you provide no argument for the background colour, \nthe area around the text will be transparent, BUT that's only\nif there are two sprites in the same group. For example:\n\nimport pygame\nfrom pygame.locals import *\n\npygame.init()\nscreen = pygame.display.set_mode((500,500))\npygame.display.get_surface().fill((0,0,255))\n\nbackground = pygame.Surface(screen.get_size())\nbackground.fill((0,0,0))\n\ntextFont = pygame.font.Font(None, 30)\nimage = textFont.render(\"BLLLAHHHH\", 0, (255,0,0))\na = pygame.sprite.Sprite()\na.image = image\na.rect = image.get_rect()\na.rect.center = ((50,50))\n\nb = pygame.sprite.Sprite()\nb.image = image\nb.rect = image.get_rect()\nb.rect.center = ((60,60))\n\ngroup = pygame.sprite.RenderUpdates(a, b)\n\nwhile 1:\n group.clear(screen, background)\n rects = group.draw(screen)\n pygame.display.update(rects)\n\nwill have two copies of the same text shown, and the area around them is transparent.\n\nHowever, in this example:\n\nimport pygame\nfrom pygame.locals import *\n\npygame.init()\nscreen = pygame.display.set_mode((500,500))\npygame.display.get_surface().fill((0,0,255))\n\nbackground = pygame.Surface(screen.get_size())\nbackground.fill((0,0,0))\n\ntextFont = pygame.font.Font(None, 30)\nimage = textFont.render(\"BLLLAHHHH\", 0, (255,0,0))\na = pygame.sprite.Sprite()\na.image = image\na.rect = image.get_rect()\na.rect.center = ((50,50))\n\nb = pygame.sprite.Sprite()\nb.image = image\nb.rect = image.get_rect()\nb.rect.center = ((60,60))\n\ngroupA = pygame.sprite.RenderUpdates(a)\ngroupB = pygame.sprite.RenderUpdates(b)\n\nwhile 1:\n groupA.clear(screen, background)\n rects = groupA.draw(screen)\n groupB.clear(screen, background)\n rects.extend(groupB.draw(screen))\n pygame.display.update(rects)\n\nthere is a black box around one of the sprites that covers the other one.\nI think what they mean by transparency is what happens in the first example.", "user_title": "Anonymous", "datetimeon": "2009-07-18T04:03:26", "link": "Font.render", "id": 2881}, {"content": "when i try this it says that the 'e' in e.type is undefined? any suggestions?", "user_title": "Anonymous", "datetimeon": "2010-03-16T15:33:05", "link": "pygame.key.set_repeat", "id": 3075}, {"content": "good", "user_title": "Anonymous", "datetimeon": "2010-03-17T23:46:58", "link": "pygame.event.Event", "id": 3076}, {"content": "#! /usr/bin/python\n# using sprites_rgba.png from http://img17.imageshack.us/img17/3166/spritesrgba.png\nimport sys, pygame, math, os, random\nfrom pygame.locals import *\npygame.init()\nsize=width,height=1024,256;screen=pygame.display.set_mode(size);pygame.display.set_caption(\"multiplayer sprite test with collisions\")\nspd=4;amnt=4;ampl=8;xpos=[0]*amnt;ypos=[0]*amnt;sprid=[];spridr=[] #some arrays and variables\nfor i in range (0,amnt,1):\n xpos[i]=64+(128*i)+random.randint(0,32);ypos[i]=64+random.randint(0,32)\nsprall=pygame.image.load(\"sprites_rgba.png\") #loading sprites\nfor i in range (0,4,1):\n spritetmp=sprall.subsurface(i*64,0,64,64);spriterecttmp=spritetmp.get_rect()\n sprid.append(spritetmp);spridr.append(spriterecttmp)\nwhile 1:\n key=pygame.key.get_pressed() #checking pressed keys\n if key[pygame.K_a]:xpos[0]-=spd\n if key[pygame.K_d]:xpos[0]+=spd\n if key[pygame.K_w]:ypos[0]-=spd\n if key[pygame.K_s]:ypos[0]+=spd\n if key[pygame.K_f]:xpos[1]-=spd\n if key[pygame.K_h]:xpos[1]+=spd\n if key[pygame.K_t]:ypos[1]-=spd\n if key[pygame.K_g]:ypos[1]+=spd\n if key[pygame.K_j]:xpos[2]-=spd\n if key[pygame.K_l]:xpos[2]+=spd\n if key[pygame.K_i]:ypos[2]-=spd\n if key[pygame.K_k]:ypos[2]+=spd\n if key[pygame.K_LEFT]: xpos[3]-=spd\n if key[pygame.K_RIGHT]:xpos[3]+=spd\n if key[pygame.K_UP]: ypos[3]-=spd\n if key[pygame.K_DOWN]: ypos[3]+=spd\n bgcolour=0x998877 #checking collisions\n if spridr[0].colliderect(spridr[1]):bgcolour=0xAA5555\n if spridr[0].colliderect(spridr[2]):bgcolour=0x55AA55\n if spridr[0].colliderect(spridr[3]):bgcolour=0x5555AA\n if spridr[1].colliderect(spridr[2]):bgcolour=0x55AAAA\n if spridr[1].colliderect(spridr[3]):bgcolour=0xAA55AA\n if spridr[2].colliderect(spridr[3]):bgcolour=0xAAAA55\n screen.fill(bgcolour)\n for i in range (0,amnt,1): #displaying sprites\n spridr[i].left=xpos[i];spridr[i].top=ypos[i];screen.blit(sprid[i],spridr[i])\n for event in pygame.event.get(): #praxis stuff\n if event.type==pygame.QUIT:sys.exit()\n pygame.display.flip();pygame.time.delay(1000/50)", "user_title": "Anonymous", "datetimeon": "2009-07-14T09:12:03", "link": "Rect.colliderect", "id": 2879}, {"content": "#! /usr/bin/python\n# using sprites_rgba.png from http://img17.imageshack.us/img17/3166/spritesrgba.png\nimport sys, pygame, math, os, random\nfrom pygame.locals import *\npygame.init()\nsize=width,height=1024,256;screen=pygame.display.set_mode(size);pygame.display.set_caption(\"multiplayer sprite test with collisions\")\nspd=4;amnt=4;ampl=8;xpos=[0]*amnt;ypos=[0]*amnt;sprid=[];spridr=[] #some arrays and variables\nfor i in range (0,amnt,1):\n xpos[i]=64+(128*i)+random.randint(0,32);ypos[i]=64+random.randint(0,32)\nsprall=pygame.image.load(\"sprites_rgba.png\") #loading sprites\nfor i in range (0,4,1):\n spritetmp=sprall.subsurface(i*64,0,64,64);spriterecttmp=spritetmp.get_rect()\n sprid.append(spritetmp);spridr.append(spriterecttmp)\nwhile 1:\n key=pygame.key.get_pressed() #checking pressed keys\n if key[pygame.K_a]:xpos[0]-=spd\n if key[pygame.K_d]:xpos[0]+=spd\n if key[pygame.K_w]:ypos[0]-=spd\n if key[pygame.K_s]:ypos[0]+=spd\n if key[pygame.K_f]:xpos[1]-=spd\n if key[pygame.K_h]:xpos[1]+=spd\n if key[pygame.K_t]:ypos[1]-=spd\n if key[pygame.K_g]:ypos[1]+=spd\n if key[pygame.K_j]:xpos[2]-=spd\n if key[pygame.K_l]:xpos[2]+=spd\n if key[pygame.K_i]:ypos[2]-=spd\n if key[pygame.K_k]:ypos[2]+=spd\n if key[pygame.K_LEFT]: xpos[3]-=spd\n if key[pygame.K_RIGHT]:xpos[3]+=spd\n if key[pygame.K_UP]: ypos[3]-=spd\n if key[pygame.K_DOWN]: ypos[3]+=spd\n bgcolour=0x998877 #checking collisions\n if spridr[0].colliderect(spridr[1]):bgcolour=0xAA5555\n if spridr[0].colliderect(spridr[2]):bgcolour=0x55AA55\n if spridr[0].colliderect(spridr[3]):bgcolour=0x5555AA\n if spridr[1].colliderect(spridr[2]):bgcolour=0x55AAAA\n if spridr[1].colliderect(spridr[3]):bgcolour=0xAA55AA\n if spridr[2].colliderect(spridr[3]):bgcolour=0xAAAA55\n screen.fill(bgcolour)\n for i in range (0,amnt,1): #displaying sprites\n spridr[i].left=xpos[i];spridr[i].top=ypos[i];screen.blit(sprid[i],spridr[i])\n for event in pygame.event.get(): #praxis stuff\n if event.type==pygame.QUIT:sys.exit()\n pygame.display.flip();pygame.time.delay(1000/50)", "user_title": "Anonymous", "datetimeon": "2009-07-14T09:09:26", "link": "pygame.key.get_pressed", "id": 2878}, {"content": "This creates a mask from the surface which has all the pixels set which have color values above or equal to those in color, but below (and not equal to) the values in threshold. So no pixel with a 255 value can possibly be considered. And the default threshold doesn't let the mask have any set pixels for any given surface.", "user_title": "Anonymous", "datetimeon": "2010-03-13T06:03:20", "link": "pygame.mask.from_threshold", "id": 3073}, {"content": "It appears if you end up rotating your sprites, you need to regenerate their masks when collision is detected via the rect test, or the masks won't match with the corresponding imagery.", "user_title": "Anonymous", "datetimeon": "2009-07-13T01:08:45", "link": "pygame.sprite.collide_mask", "id": 2876}, {"content": "This didn't seem clear in the documentation. I checked the source (v. 1.8.1).\n\nThis takes a sequence of (R, G, B) triplets. This is currently the only way the palette can be defined.", "user_title": "Anonymous", "datetimeon": "2009-07-12T13:50:36", "link": "Surface.set_palette", "id": 2874}, {"content": "A less look at me demo:\n\nimport pygame.font\nimport pygame.surface\n\ndef gameprint(text,xx,yy,color):\n font = pygame.font.SysFont(\"Courier New\",18)\n ren = font.render(text,1,color)\n screen.blit(ren, (xx,yy))", "user_title": "Anonymous", "datetimeon": "2009-08-28T19:19:15", "link": "Font.render", "id": 2927}, {"content": "how to fadein() ?", "user_title": "Anonymous", "datetimeon": "2010-03-09T06:18:13", "link": "pygame.mixer.music.fadeout", "id": 3068}, {"content": "cfadsfsadgfdh hHAHAHAH", "user_title": "Anonymous", "datetimeon": "2010-03-10T08:48:11", "link": "pygame.event.pump", "id": 3070}, {"content": "This function, at least on my system using Windows XP, \nonly one key is repeated at a time. So, moving a sprite\naround the screen using the arrow keys can only move it\nin one direction at a time. No diagonal by using two arrows\nat the same time...\n\nAn alternative is to set an object's state on KEYDOWN and reset \nit on KEYUP.\n\nExample:\n\n\tif e.type == KEYDOWN:\n\t\tif e.key == K_LEFT:\n\t\t\tship.xspeed -= SPEED\n\t\telif e.key == K_RIGHT:\n\t\t\tship.xspeed += SPEED\n\t\telif e.key == K_UP:\n\t\t\tship.yspeed -= SPEED\n\t\telif e.key == K_DOWN:\n\t\t\tship.yspeed += SPEED\n\t\telif e.key == K_SPACE\n\t\t\tship.firing = True\n\telif e.type == KEYUP:\n\t\tif e.key == K_LEFT:\n\t\t\tship.xspeed += SPEED\n\t\telif e.key == K_RIGHT:\n\t\t\tship.xspeed -= SPEED\n\t\telif e.key == K_UP:\n\t\t\tship.yspeed += SPEED\n\t\telif e.key == K_DOWN:\n\t\t\tship.yspeed -= SPEED\n\t\telif e.key == K_SPACE:\n\t\t\tship.firing == False", "user_title": "Anonymous", "datetimeon": "2010-03-12T11:41:32", "link": "pygame.key.set_repeat", "id": 3072}, {"content": "Only if you do not import all the pygame locals:\n\nfrom pygame.locals import *", "user_title": "Anonymous", "datetimeon": "2009-07-10T23:11:01", "link": "pygame.mouse.get_pressed", "id": 2871}, {"content": "Note that\nmyrect.move(x,y)\ndoes not change the Rect myrect. Only\nmyrect = myrect.move(x,y)\ndoes.", "user_title": "Anonymous", "datetimeon": "2010-03-03T13:42:28", "link": "Rect.move", "id": 3066}, {"content": "My copy of pygames uses numeric as default, not numpy (as stated above).\nThe best thing is probably to explicitly state the array type used (e.g. pygame.sndarray.use_arraytype('numpy')) to avoid problems with future convention changes.", "user_title": "Anonymous", "datetimeon": "2010-03-08T09:27:57", "link": "pygame.sndarray", "id": 3067}, {"content": "The docs are faulty here. scroll() takes two integers and not a tuple or a list.", "user_title": "Anonymous", "datetimeon": "2009-08-30T08:39:48", "link": "Surface.scroll", "id": 2930}, {"content": "What about Duel screen displays?", "user_title": "Anonymous", "datetimeon": "2009-07-09T13:26:59", "link": "pygame.display.set_mode", "id": 2869}, {"content": "are yoh sure that sign are correct", "user_title": "Anonymous", "datetimeon": "2010-03-02T18:18:51", "link": "pygame.key", "id": 3063}, {"content": "import pygame\nfrom pygame.locals import *\n\ndef timerFunc():\n print \"Timer CallBack\"\n\npygame.init()\npygame.time.set_timer(USEREVENT+1, 100)\nwhile 1:\n for event in pygame.event.get():\n if event.type == USEREVENT+1:\n timerFunc() #calling the function wheever we get timer event.\n if event.type == QUIT:\n break", "user_title": "Anonymous", "datetimeon": "2009-08-31T04:50:51", "link": "pygame.time.set_timer", "id": 2932}, {"content": "this is helpful thanks son", "user_title": "Anonymous", "datetimeon": "2009-08-31T22:47:55", "link": "pygame.font", "id": 2933}, {"content": "Wouldn't the center simply be X = X2 - X1 Y = Y2 - Y1 ? Bottom right minus top left. That doesn't require any special math functions, yes?", "user_title": "Anonymous", "datetimeon": "2009-04-22T14:45:07", "link": "pygame.draw.rect", "id": 2557}, {"content": "#! /usr/bin/python\n# using sprites_rgba.png from http://img17.imageshack.us/img17/3166/spritesrgba.png\nimport sys, pygame, math, os, random\nfrom pygame.locals import *\npygame.init()\nsize=width,height=1024,256;screen=pygame.display.set_mode(size)\namnt=64;ampl=8;xpos=[0]*amnt;ypos=[0]*amnt;xdif=[0]*amnt;ydif=[0]*amnt;snum=[0]*amnt\nfor i in range (0,amnt,1):\n xpos[i]=random.randint(0,width)\n ypos[i]=random.randint(0,height)\n xdif[i]=random.randint(0,ampl*2)-ampl\n ydif[i]=random.randint(0,ampl*2)-ampl\n snum[i]=random.randint(0,3)\nball=pygame.image.load(\"sprites_rgba.png\");ballrect=ball.get_rect()\nsprite00=ball.subsurface(( 0,0,64,64));spriterect00=sprite00.get_rect()\nsprite01=ball.subsurface(( 64,0,64,64));spriterect01=sprite01.get_rect()\nsprite02=ball.subsurface((128,0,64,64));spriterect02=sprite02.get_rect()\nsprite03=ball.subsurface((192,0,64,64));spriterect03=sprite03.get_rect()\nwhile 1:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:sys.exit()\n for i in range (0,amnt,1):\n xpos[i]+=xdif[i];ypos[i]+=ydif[i]\n if xpos[i]>width:xpos[i]-=(width+64)\n if ypos[i]>height:ypos[i]-=(height+64)\n if xpos[i]<-64:xpos[i]+=(width+64)\n if ypos[i]<-64:ypos[i]+=(height+64)\n screen.fill(0x998877)\n for i in range (0,amnt,1):\n if snum[i]==0:\n spriterect00.left=xpos[i];spriterect00.top=ypos[i];screen.blit(sprite00,spriterect00)\n if snum[i]==1:\n spriterect01.left=xpos[i];spriterect01.top=ypos[i];screen.blit(sprite01,spriterect01)\n if snum[i]==2:\n spriterect02.left=xpos[i];spriterect02.top=ypos[i];screen.blit(sprite02,spriterect02)\n if snum[i]==3:\n spriterect03.left=xpos[i];spriterect03.top=ypos[i];screen.blit(sprite03,spriterect03)\n pygame.display.flip()\n pygame.time.delay(1000/50)", "user_title": "Anonymous", "datetimeon": "2009-07-07T10:21:37", "link": "Surface.subsurface", "id": 2867}, {"content": "Usage of the event queue for USEREVENT-style events is limited by the maximum size of the SDL event queue, which is 256.\nSo, if more events (of any sort) get posted to the queue, you will get an exception stating \"error: Event queue full\".\nIf you expect to generate more than a few user events before they are posted, consider a separate queue.", "user_title": "Anonymous", "datetimeon": "2009-05-03T10:30:15", "link": "pygame.event.post", "id": 2560}, {"content": "Is this the same as pygame.surface.fill(color, rect)?", "user_title": "Anonymous", "datetimeon": "2009-05-05T15:48:33", "link": "pygame.draw.rect", "id": 2561}, {"content": "Just a note, Nautilus, the default file browser in GNOME sets copied files as 'x-special/gnome-copied-files', if you retrieve it, it holds the location as plain text.", "user_title": "Anonymous", "datetimeon": "2009-08-02T21:50:25", "link": "pygame.scrap", "id": 2901}, {"content": "so someone can share some Surface.subsurface snippet? (one about sprites is very welcome)", "user_title": "Anonymous", "datetimeon": "2009-07-06T22:14:24", "link": "Surface.subsurface", "id": 2866}, {"content": "test\ntest", "user_title": "Anonymous", "datetimeon": "2009-07-03T15:37:54", "link": "Surface.copy", "id": 2864}, {"content": "test <br /> test", "user_title": "Anonymous", "datetimeon": "2009-07-03T15:37:38", "link": "Surface.copy", "id": 2863}, {"content": "somewhere in the pygame google-group i found it's possible to have multiple sprites based on just one picture plenty of sprite drawings, without having to have them cropped file by file - how can we do this?", "user_title": "Anonymous", "datetimeon": "2009-07-03T15:37:10", "link": "Surface.copy", "id": 2862}, {"content": "missing commands for drawing bezier lines - some gpl sources can be find at http://nitrofurano.linuxkafe.com/sdlbasic - just needed to be recoded to Pygame, but it's not that difficult task at all...", "user_title": "Anonymous", "datetimeon": "2009-07-03T13:28:41", "link": "pygame.draw", "id": 2861}, {"content": "What does the error 'text has zero width' mean?\nI was simply printing 'Hello World!' to the screen.\nI was fiddling with text size, jumped from 32 to 12 and I got the above error\nNow the only way to stop the error is to have no text ('')", "user_title": "Anonymous", "datetimeon": "2009-07-03T07:24:30", "link": "Font.render", "id": 2860}, {"content": "It would be handy if the range of values was given. It appears to be 0 to 255.", "user_title": "Anonymous", "datetimeon": "2009-07-02T12:01:48", "link": "Color.r", "id": 2859}, {"content": "don't you know its a bad idea to leave your email on the internet?\n-wekul", "user_title": "Anonymous", "datetimeon": "2009-06-29T09:02:53", "link": "pygame.key", "id": 2857}, {"content": "With event.type == MOUSEBUTTONDOWN - Error! \nWrite it - event.type == pygame.MOUSEBUTTONDOWN", "user_title": "Anonymous", "datetimeon": "2009-07-02T06:50:36", "link": "pygame.mouse.get_pressed", "id": 2858}, {"content": "In Pygame 1.9 Surface.copy() does not preserve the original image's alpha. If\nyour image has an alpha you need to:\n\ns1 = s0.copy()\ns1.set_alpha(s0.get_alpha())", "user_title": "Anonymous", "datetimeon": "2010-02-21T12:03:18", "link": "Surface.copy", "id": 3059}, {"content": "In the comment on February 21, 2010 10:32am, in the last sentence I meant,\n\"for a sample i, the value of the left channel is a[i][0], the right channel\na[i][1],\" of course.", "user_title": "Anonymous", "datetimeon": "2010-02-21T14:25:02", "link": "pygame.sndarray", "id": 3060}, {"content": "When using numpy, be careful to set the type of the array correctly.\nFor instance, when you're in signed 16-bit stereo mode, e.g., when you've\ncalled\n\npygame.mixer.pre_init(size = -16, channels = 2)\n\nand you want to create an array to use for synthesizing a sound, don't forget\nthe dtype argument in\n\nsamples = numpy.zeros((n_samples, 2), dtype = numpy.int16)", "user_title": "Anonymous", "datetimeon": "2010-02-21T10:37:46", "link": "pygame.sndarray", "id": 3058}, {"content": "The above is hard to understand, at least for me. For instance, what does\n\"A stereo sound file has two values per sample\" mean? Here's what it means:\nif you're in mono, and your array has N samples, the shape of the array\nshould be (N,) (a d=1 array of N elements). If you're in stereo, then the\nshape should be (N,2) (a d=2 array, Nx2); for a sample i, the value of the left\nchannel is a[N][0], the right channel a[N][1].", "user_title": "Anonymous", "datetimeon": "2010-02-21T10:32:01", "link": "pygame.sndarray", "id": 3057}, {"content": "You could always do dir(pygame.Rect) or whatever.", "user_title": "Anonymous", "datetimeon": "2009-06-28T09:26:48", "link": "Surface.get_rect", "id": 2854}, {"content": "HTML (#rrggbbaa) format doesn't seem to work with 1.9.1 ... gives a \"ValueError: invalid argument\" exception.", "user_title": "Anonymous", "datetimeon": "2009-12-02T14:01:18", "link": "pygame.Color", "id": 3019}, {"content": "(What's even more odd is that it works from the python console ... just not in a program)", "user_title": "Anonymous", "datetimeon": "2009-12-02T14:07:37", "link": "pygame.Color", "id": 3020}, {"content": "it dun wok", "user_title": "Anonymous", "datetimeon": "2009-12-03T12:43:47", "link": "pygame.draw.arc", "id": 3021}, {"content": "Re: noise and static was occurring on my Linux box, and I was able to ameliorate it\nby specifically setting my mixer:\n pygame.mixer.pre_init(44100, -16, 2)\n pygame.init()", "user_title": "Anonymous", "datetimeon": "2009-12-07T01:43:58", "link": "Sound.play", "id": 3022}, {"content": "its set_palette_at, not set_at", "user_title": "Anonymous", "datetimeon": "2009-12-15T12:51:36", "link": "Surface.set_palette_at", "id": 3023}, {"content": "hey matthew sucks in snping", "user_title": "Anonymous", "datetimeon": "2009-12-16T15:50:04", "link": "pygame.draw.ellipse", "id": 3024}, {"content": "ellipserect=Rect(cmx,cmy,mx-cmx,my-cmy)\nellipserect.normalize()\ndraw.ellipse(screen,color,ellipserect,1)\nwhy ValueError: width greater than ellipse radius?", "user_title": "Anonymous", "datetimeon": "2009-12-18T13:28:16", "link": "pygame.draw.ellipse", "id": 3025}, {"content": "It plays dot-to-dot with the given points.", "user_title": "Anonymous", "datetimeon": "2010-02-19T01:32:43", "link": "pygame.draw.lines", "id": 3055}, {"content": "In ver. 1.9.1, if I try to initialize by calling\n\npygame.mixer.pre_init(...)\npygame.init()\n\nand then try to play a sound buffer, I get no output unless I open a graphics\nwindow first by calling pygame.display.set_mode(...). If, on the other hand,\nI'm not doing graphics and I initialize with just\n\npygame.mixer.init(...)\n\nI can get sound without opening any window.", "user_title": "Anonymous", "datetimeon": "2010-02-21T10:20:13", "link": "pygame.mixer", "id": 3056}, {"content": "to get a picture instead of black and white cursor, in order :\n1) simply make a transparent cursor\n pygame.mouse.set_cursor(pygame.mouse.set_cursor((8,8),(0,0),(0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0))\n\n2) constantly actualise the postition of the picture to the postion of the cursor\n cursor_picture==pygame.image.load('./cursor.png').convert_alpha()\n while True:\n for event in pygame.event.get():\n if event.type==QUIT:\n exit()\n screen.fill(black)\n screen.blit(mouse_cursor, pygame.mouse.get_pos())\n pygame.display.update()", "user_title": "Anonymous", "datetimeon": "2009-06-27T17:10:22", "link": "pygame.mouse.set_cursor", "id": 2850}, {"content": "Not sure why I am leaving this here... but can anyone tell me how you would rotate a surface? Its 3am so excuse my ignorance :D", "user_title": "Anonymous", "datetimeon": "2009-11-30T03:38:25", "link": "pygame.Surface", "id": 3016}, {"content": "Theres is an error in documentation: pygame.cursor.compile does not exists, but pygame.cursors.compile do", "user_title": "Anonymous", "datetimeon": "2009-11-30T18:49:41", "link": "pygame.cursors.compile", "id": 3017}, {"content": "is there anyway to rotate a rectangle with out making it an image?", "user_title": "Anonymous", "datetimeon": "2009-11-13T15:06:41", "link": "pygame.transform.rotate", "id": 3009}, {"content": "Minor problem: When I run the program using this for an image, it says \"cannot load image!\" could anyone help me?", "user_title": "Anonymous", "datetimeon": "2009-11-15T09:50:56", "link": "pygame.image.load", "id": 3010}, {"content": "We need the posibility to rotate without the image being rescaled, just keep its original size like in PIL.\n\nim.rotate(angle, filter=NEAREST, expand=0) \nThe expand argument, if true, indicates that the output image should be made \nlarge enough to hold the rotated image. \nIf omitted or false, the output image has the same size as the input image.", "user_title": "Anonymous", "datetimeon": "2009-11-21T05:03:48", "link": "pygame.transform.rotate", "id": 3012}, {"content": "In pygame 1.9.1 this function does not return negative values instead it returns a \n0 for all values < 0 (of the direction vector). If you are looking for a way to \ncompute a collision response look at Mask.overlap_area.", "user_title": "Anonymous", "datetimeon": "2009-11-23T10:34:10", "link": "Mask.overlap", "id": 3013}, {"content": "can anyone please explain what this function do? at best whith an example", "user_title": "Anonymous", "datetimeon": "2009-11-27T06:41:00", "link": "pygame.draw.lines", "id": 3014}, {"content": "Meta key is 'Apple' or 'Command' on a mac.", "user_title": "Anonymous", "datetimeon": "2009-09-07T03:31:29", "link": "pygame.key", "id": 2939}, {"content": "why doesn't this blit on the screen???\n\n\n for star in self.stars:\n self.screen.blit(self.a, star.pos)\n star.update()\n self.screen.blit(star.image, star.pos)\n pygame.display.update()", "user_title": "Anonymous", "datetimeon": "2009-09-08T22:04:17", "link": "Surface.blit", "id": 2941}, {"content": "HELP ME ONMMGMGMGMGMGMG", "user_title": "Anonymous", "datetimeon": "2009-11-12T10:31:24", "link": "Surface.convert_alpha", "id": 3006}, {"content": "aghahhahahahah I LOVE THIS OMG (*&^%$#@Q", "user_title": "Anonymous", "datetimeon": "2009-11-12T10:31:43", "link": "Surface.convert_alpha", "id": 3007}, {"content": "In my game the screen is scaled. This can cause havoc with the mouse positioning. I made this function:\n\n def get_mouse_pos(pos):\n\treturn (pos[0] * (1280.0/float(game.game_scaled[0])),pos[1] * (720.0/float(game.game_scaled[1])))\n\nReplace 1280.0 and 720.0 with the resolution of the pre-scaled game and game.game_scaled with a sequence containing the scaled resolution.", "user_title": "Anonymous", "datetimeon": "2009-11-12T12:43:39", "link": "pygame.transform.scale", "id": 3008}, {"content": "It seems to work if you set the display of the movie to a surface of the same size and then blit that surface to the screen.", "user_title": "Anonymous", "datetimeon": "2009-06-24T20:28:26", "link": "pygame.movie", "id": 2846}, {"content": "This documentation does not seem to match what is currently in 1.8.1. \nInstead: pygame.transform.threshold(DestSurface, Surface, color, threshold = (0,0,0,0), diff_color = (0,0,0,0), change_return = True, Surface =None): return num_threshold_pixels", "user_title": "Anonymous", "datetimeon": "2009-11-11T15:11:43", "link": "pygame.transform.threshold", "id": 3004}, {"content": "For some odd reason anything in pygame.Color gets an error message like it doesn't exist.\nIf I try using pygame.Color.r, it says that Color has no attribute r. I tried redownloading\npygame, but nothing diffrent.", "user_title": "Anonymous", "datetimeon": "2009-09-11T20:56:38", "link": "pygame.Color", "id": 2943}, {"content": "I am having the same problem on XP. The sound plays, but the video does not.", "user_title": "Anonymous", "datetimeon": "2009-06-24T20:09:34", "link": "pygame.movie", "id": 2845}, {"content": "\"current_h, current_h: Width and height of the current video mode, or of the\"[...]\nOne of them should be \"current_w\" instead.", "user_title": "Anonymous", "datetimeon": "2009-11-07T06:40:09", "link": "pygame.display.Info", "id": 2999}, {"content": "For me, PixelArray works much faster (4 or 5 times faster) than Surfarray. I wanted to set every pixel in my off-screen surface individually. Creating the surface, creating a PixelArray on it, and going through the pixels one-by-one is much faster than creating the bitmap using numpy and calling surfarray.make_surface.", "user_title": "Anonymous", "datetimeon": "2009-11-08T08:17:58", "link": "pygame.PixelArray", "id": 3000}, {"content": "\u00c3?\u00c3? \u00c3\u00ac\u00c3\u00ae\u00c3\u008a\u00c3\u00a5\u00c3\u00b2\u00c3\u00a5 \u00c3\u00ad\u00c3\u00a0\u00c3\u00b0\u00c3\u009a\u00c3\u00b1\u00c3\u00ae\u00c3\u00a2\u00c3\u00a0\u00c3\u00b2\u00c3\u008c \u00c3\u00ad\u00c3\u00a5\u00c3\u00b1\u00c3\u00aa\u00c3\u00ae\u00c3\u00ab\u00c3\u008c\u00c3\u00aa\u00c3\u00ae \u00c3\u00af\u00c3\u00a0\u00c3\u00b0\u00c3\u00a0\u00c3\u00ab\u00c3\u00ab\u00c3\u00a5\u00c3\u00ab\u00c3\u008c\u00c3\u00ad\u00c3\u00bb\u00c3\u00b5 \u00c3\u00ab\u00c3\u009a\u00c3\u00ad\u00c3\u009a\u00c3\u00a9 \u00c3\u00b0\u00c3\u00bf\u00c3\u0080\u00c3\u00ae\u00c3\u00ac \u00c3\u00b1 \u00c3\u0080\u00c3\u00b0\u00c3\u00b3\u00c3\u00a3\u00c3\u00ae\u00c3\u00ac. \u00c3?\u00c3\u00b2\u00c3\u00ae\u00c3\u00a1\u00c3\u00bb \u00c3\u009a\u00c3\u00b5 \u00c3\u0080\u00c3\u00ab\u00c3\u009a\u00c3\u00ad\u00c3\u00bb \u00c3\u00b1\u00c3\u00ab\u00c3\u009a\u00c3\u00a2\u00c3\u00a0\u00c3\u00ab\u00c3\u009a\u00c3\u00b1\u00c3\u008c\n \u00c3\u00ae\u00c3\u00ad\u00c3\u009a \u00c3\u00a1\u00c3\u00b3\u00c3\u0080\u00c3\u00b3\u00c3\u00b2 \u00c3\u00a2\u00c3\u009a\u00c3\u0080\u00c3\u00ad\u00c3\u00bb, \u00c3\u00aa\u00c3\u00a0\u00c3\u00aa \u00c3\u00ae\u00c3\u0080\u00c3\u00ad\u00c3\u00a0. \u00c3\u008d\u00c3\u00ae \u00c3\u009c\u00c3\u00b2\u00c3\u00ae \u00c3\u00a1\u00c3\u00b3\u00c3\u0080\u00c3\u00a5\u00c3\u00b2 \u00c3\u00ac\u00c3\u00a5\u00c3\u0080\u00c3\u00ab\u00c3\u00a5\u00c3\u00ad\u00c3\u00ad\u00c3\u00a5\u00c3\u00a9.\n I don't know english, write russian. translate.google for you help! :)", "user_title": "Anonymous", "datetimeon": "2009-11-08T18:19:31", "link": "pygame.draw.aaline", "id": 3001}, {"content": "uiuuiu", "user_title": "Anonymous", "datetimeon": "2009-11-11T05:10:30", "link": "pygame.key.get_focused", "id": 3002}, {"content": "its very ...................", "user_title": "Anonymous", "datetimeon": "2009-11-11T05:11:00", "link": "pygame.key.get_focused", "id": 3003}, {"content": "Tip for noobs. This was killing me. If you're trying this for the first time and getting no sound, it may be because you're program exits before the playback thread completes. See pygame/examples/sound.py: it waits at the end.", "user_title": "Anonymous", "datetimeon": "2009-12-19T19:22:25", "link": "Channel.play", "id": 3026}, {"content": "Tip for noobs. This was killing me. If you're trying this for the first time and\ngetting no sound, it may be because you're program exits before the playback\nthread completes. See pygame/examples/sound.py: it waits at the end.", "user_title": "Anonymous", "datetimeon": "2009-12-19T19:23:52", "link": "Sound.play", "id": 3027}, {"content": "You missed pygame.Mixer", "user_title": "Anonymous", "datetimeon": "2009-10-27T02:47:35", "link": "pygame.init", "id": 2991}, {"content": "Tried this on XP with pygame 1.9 - video works, but sound doesn't. Uninitializing pygame.mixer does not seem to effect behavior.", "user_title": "Anonymous", "datetimeon": "2009-10-27T20:31:08", "link": "pygame.movie", "id": 2992}, {"content": "As of 1.9, it does not appear the loop param exists.", "user_title": "Anonymous", "datetimeon": "2009-10-27T20:42:45", "link": "Movie.play", "id": 2993}, {"content": "If your movies play slow or occasionally freeze, setting a limit to framerates seems to resolve the problem. \nSee the documentation on Clock.tick(framerate) on how to limit framerates:\nhttp://www.pygame.org/docs/ref/time.html#Clock.tick", "user_title": "Anonymous", "datetimeon": "2009-10-29T10:53:02", "link": "pygame.movie.Movie", "id": 2994}, {"content": "The example code and the explanation contradict each other.\n\nThe example returns true if lost, supposedly, and the explanation supposedly returns False if lost?", "user_title": "Anonymous", "datetimeon": "2009-11-04T18:10:25", "link": "pygame.scrap.lost", "id": 2995}, {"content": "Does not work *at all*. No changes occur when I change any pixels.", "user_title": "Anonymous", "datetimeon": "2009-11-05T22:12:49", "link": "pygame.PixelArray", "id": 2996}, {"content": "Remember, there are not semitones. Thus, each semitone is represented by an integer named \"note\".\nIf you are a beginer it is a good choice to start practicing with the middle notes.\nC 4 is the note number 61. Have fun!.\n\nTourette", "user_title": "Anonymous", "datetimeon": "2009-12-21T19:52:02", "link": "Output.note_on", "id": 3028}, {"content": "pygame.mask.from_surface(Surface, threshold) -> Mask\nno Keyword argument", "user_title": "Anonymous", "datetimeon": "2009-10-25T18:04:44", "link": "pygame.mask.from_surface", "id": 2989}, {"content": "you use a class to get the var\nfor example\nclass myimage(object):\n image = (your image)\nthen to blit\n\ndisplay.blit(myimage.image, (0,0))\n\nso that you are only accesing the one variable to blit instead of multiple instances of the same", "user_title": "Anonymous", "datetimeon": "2009-08-04T11:33:28", "link": "Surface.copy", "id": 2905}, {"content": "if you want to measure it over a period of your choosing just compute a moving average of the result of clock.tick(), like this:\n\n...\n\nrecent_frame_lengths = [ 100, 100, 100, 100, 100 ]\nrfl_array_len = float( len( recent_frame_lengths ) )\n\n...\nmain():\n...\ndt = clock.tick()\nrecent_frame_lengths.pop(0)\nrecent_frame_lengths.append(dt)\naverage_frame_length = recent_frame_lengths / rfl_array_len\nframes_per_second = 1000. / average_frame_length", "user_title": "Anonymous", "datetimeon": "2010-05-18T12:08:59", "link": "Clock.get_fps", "id": 3124}, {"content": "button can be at least 1-6. 4/5 are for the scroll wheel. \nThe squeeze-click on new Apple mice is 6.", "user_title": "Anonymous", "datetimeon": "2010-05-20T22:13:33", "link": "pygame.event", "id": 3126}, {"content": "event polling:\n#self.keys is [] 256 len. \nself.mouse = ((0,0), 0, 0, 0, 0, 0, 0) #(pos, b1,b2,b3,b4,b5,b6)\n#squeezing a new Apple mouse is button 6. \nfor event in pygame.event.get():\n\tif event.type == pygame.QUIT:\n\t\tself.running = 0\n\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\tself.mouse[event.button] = 1\n\t\tself.mouse[0] = event.pos\n\telif event.type == pygame.MOUSEBUTTONUP:\n\t\tself.mouse[event.button] = 0\n\t\tself.mouse[0] = event.pos\n\telif event.type == pygame.MOUSEMOTION:\n\t\tself.mouse[0] = event.pos\n\telif event.type == pygame.KEYDOWN:\n\t\tself.keys[event.key % 255] = 1\n\telif event.type == pygame.KEYUP:\n\t\tself.keys[event.key % 255] = 0", "user_title": "Anonymous", "datetimeon": "2010-05-20T22:31:20", "link": "pygame.mouse.get_pressed", "id": 3127}, {"content": "PyGame beginners, please don't look at Matthew N. Brown's code below.\n\nThat is the worst Python code I have ever seen, and it offers almost no examples of how to use pygame.draw.circle.", "user_title": "Anonymous", "datetimeon": "2010-05-17T13:22:44", "link": "pygame.draw.circle", "id": 3123}, {"content": "What about subsubsubsubsurfaces?", "user_title": "Anonymous", "datetimeon": "2009-06-14T09:27:19", "link": "Surface.subsurface", "id": 2830}, {"content": "for some reason this gives always wrong numbers.\nmask_creep = pygame.mask.from_surface(creep.image)\nmask_player = pygame.mask.from_surface(p.image)\np.life -= mask_player.overlap_area(\n mask_creep, (creep.rect.x-p.rect.x,creep.rect.y-p.rect.y))\n\np.life is the life of the player, and I want to drain it by the amount of pixels overlapping with creep.\nhowever, it seems it hits before it should, and with mask_creep.invert() I seem to get more accurate hits, which makes no sense...", "user_title": "Anonymous", "datetimeon": "2009-10-24T06:03:49", "link": "pygame.mask", "id": 2984}, {"content": "get also this warning : nsquickdrawview \nif I compile.\n\ncan\u00c2\u008et use pygame with py2app\n\npython 2.4 or 2.5\nmaxos leopard\neclipse", "user_title": "Anonymous", "datetimeon": "2010-01-05T21:56:27", "link": "pygame.display.init", "id": 3032}, {"content": "This function is not working as stated. It requires a 'rectstyle' argument (ie. a tuple with rect's parameters). it tests for collisions using this 'rectsytle' object and returns any that are colliding along with their values. Not really useful since no one needs a tuple of rect's parameters back. this would be really nice if it worked as stated...", "user_title": "Anonymous", "datetimeon": "2009-10-24T03:33:00", "link": "Rect.collidedict", "id": 2982}, {"content": "The math isn't that hard, you just have to think in relative coordinates then.\nThe key is to look at the center of the image, as these coordinates wont change by rotating it.\nThe coordinates of the center of the image on the screen are given by:\nx_cntr.. x coordinate of the center\ny_cntr.. y coordinate of the center\n\npos_org = (x_cntr - image_org.get_rect().width / 2,\n y_cntr - image_org.get_rect().height / 2) \n \"\"\"gives position of upper left corner of image_org (not rotated)\n depending on the center coordinates for the Surface.blit function\"\"\"\nimage_rotated = pygame.transform.rotate(image_org, angle) #rotate image\npos_new = (x_pos_org - image_rotated.get_rect().width / 2,\n y_pos_org - image_rotated.get_rect().height / 2)\n #get new position for upper left corner for rotated image", "user_title": "Anonymous", "datetimeon": "2010-01-21T11:14:00", "link": "pygame.transform.rotate", "id": 3036}, {"content": "x_pos_org and y_pos_org naturally need to be x_cntr and y_cntr.. sry", "user_title": "Anonymous", "datetimeon": "2010-01-21T11:20:31", "link": "pygame.transform.rotate", "id": 3037}, {"content": "fuck u", "user_title": "Anonymous", "datetimeon": "2010-01-21T12:35:00", "link": "PixelArray.replace", "id": 3038}, {"content": "doesnt work!", "user_title": "Anonymous", "datetimeon": "2010-01-25T12:07:07", "link": "pygame.display.set_caption", "id": 3039}, {"content": "WTH!???", "user_title": "Anonymous", "datetimeon": "2010-01-25T12:07:16", "link": "pygame.display.set_caption", "id": 3040}, {"content": "Where should the image file be save at?", "user_title": "Anonymous", "datetimeon": "2010-01-29T02:01:23", "link": "pygame.image.load", "id": 3042}, {"content": "To clip the blit, you have to pass a rect like this (0, 0, clipWidth, clipHeigth):\n\nexample:\n\nsForeground.blit(sText, rText, (0, 0, 32, 32)):\n\ndraw the surface sText into sForeground at topleft position defined with the rect rText,\nclippping the sText by 32x32 pixel box", "user_title": "Anonymous", "datetimeon": "2010-01-30T14:45:56", "link": "Surface.blit", "id": 3043}, {"content": "actually, it DOES WORK. put it in the right group", "user_title": "Anonymous", "datetimeon": "2010-02-02T15:56:23", "link": "pygame.display.set_caption", "id": 3044}, {"content": "how does this even work?????", "user_title": "Anonymous", "datetimeon": "2010-02-02T23:04:45", "link": "Rect.move", "id": 3045}, {"content": "Pygame 1.9: DeprecationWarning: integer argument expected, got float\n\nThe tuple elements must be integers. The line number in the warning message will\nindicate your enclosing function or method. I found this very misleading and\nfrustrating.", "user_title": "Anonymous", "datetimeon": "2010-02-06T02:16:05", "link": "pygame.transform.smoothscale", "id": 3048}, {"content": "By calling Clock.tick -> clock.tick", "user_title": "Anonymous", "datetimeon": "2010-02-08T10:30:27", "link": "Clock.tick_busy_loop", "id": 3049}, {"content": "that code you posted (TWICE) doesnt show how to draw a circle in pygame.\n It shows an example of a complex couple hundred lines of interactive \nphysics engine that just so happens to take im guessing less than\n5 lines to use pygame.draw.circle. Its a waste of space Good job for getting your code out into \nthe world. Now people dont like you.", "user_title": "Anonymous", "datetimeon": "2010-02-08T22:31:47", "link": "pygame.draw.circle", "id": 3050}, {"content": "TypeError: descriptor 'collidelistall' requires a 'pygame.Rect' object but received a 'list'\n\n...so it doesn't like a list, but a pygame.Rect? That does't make sense.", "user_title": "Anonymous", "datetimeon": "2009-10-22T05:21:56", "link": "Rect.collidelistall", "id": 2977}, {"content": "Also using\n\n my_surface=pygame.Surface([width, height]).convert()\n\nseems to be just as effective", "user_title": "Anonymous", "datetimeon": "2009-10-16T22:13:15", "link": "pygame.Surface", "id": 2974}, {"content": "x = x2 - x1, y = y2 - y1 gives the width and height of the rectangle.\n(x2 - x1) / 2, (y2 - y1) / 2 gives the center coordinates.\nI don't know of a built-in method on the Rect that gives the center coords.", "user_title": "Anonymous", "datetimeon": "2009-10-21T20:50:31", "link": "pygame.draw.rect", "id": 2976}, {"content": "AWSOME", "user_title": "Anonymous", "datetimeon": "2009-10-08T11:50:27", "link": "pygame.draw", "id": 2967}, {"content": "Aloha! jdv", "user_title": "Anonymous", "datetimeon": "2009-09-20T07:33:05", "link": "Rect.move_ip", "id": 2954}, {"content": "Worth mentioning: the initial angle must be less than the final angle; otherwise it will draw the full elipse.", "user_title": "Anonymous", "datetimeon": "2009-10-01T14:16:05", "link": "pygame.draw.arc", "id": 2964}, {"content": "If you want a circle with a *good* outline, use this:\n\ndef drawcircle(image, colour, origin, radius, width=0):\n\tif width == 0:\n\t\tpygame.draw.circle(image,colour,intlist(origin),int(radius))\n\telse:\n\t\tif radius > 65534/5: radius = 65534/5\n\t\tcircle = pygame.Surface([radius*2+width,radius*2+width]).convert_alpha()\n\t\tcircle.fill([0,0,0,0])\n\t\tpygame.draw.circle(circle, colour, intlist([circle.get_width()/2, circle.get_height()/2]), int(radius+(width/2)))\n\t\tif int(radius-(width/2)) > 0: pygame.draw.circle(circle, [0,0,0,0], intlist([circle.get_width()/2, circle.get_height()/2]), abs(int(radius-(width/2))))\n\t\timage.blit(circle, [origin[0] - (circle.get_width()/2), origin[1] - (circle.get_height()/2)])", "user_title": "Anonymous", "datetimeon": "2009-09-30T10:47:07", "link": "pygame.draw.circle", "id": 2963}, {"content": "I think the problem has something to do with encapsulation.\nTry:\ncurrent_song = 0\ndef Play_Next_Song():\n global current_song\n if pygame.mixer.music.get_busy() == False:\n print songs[current_song]\n pygame.mixer.music.load(songs[current_song])\n pygame.mixer.music.play() \n current_song += 1", "user_title": "Anonymous", "datetimeon": "2009-09-21T11:39:31", "link": "pygame.mixer.music.load", "id": 2956}, {"content": "WHAT?", "user_title": "Anonymous", "datetimeon": "2009-09-23T15:22:38", "link": "pygame.draw.lines", "id": 2957}, {"content": "Can I draw circles too?", "user_title": "Anonymous", "datetimeon": "2009-09-24T13:53:40", "link": "pygame.draw.circle", "id": 2958}, {"content": "how can i get a mouse wheel value? please show in a snippet - i don't know how to use pygame.MOUSEBUTTONDOWN and pygame.MOUSEBUTTONUP", "user_title": "Anonymous", "datetimeon": "2009-09-25T11:53:07", "link": "pygame.mouse.get_pressed", "id": 2960}, {"content": "Setting the line width does not work!", "user_title": "Anonymous", "datetimeon": "2009-09-29T10:21:37", "link": "pygame.draw.rect", "id": 2962}, {"content": "i dont like pygame very much\nrectangles can go poop themselves", "user_title": "Anonymous", "datetimeon": "2010-05-21T11:08:52", "link": "Rect.collidelistall", "id": 3128}, {"content": "In fact it should be\n\"By calling Clock.tick(40)\" -> \"Clock.tick_busy_loop(40)\"", "user_title": "Anonymous", "datetimeon": "2010-06-01T03:26:55", "link": "Clock.tick_busy_loop", "id": 3130}, {"content": "No, waiting at the very beginning or very end of a loop does not make\nmuch difference. Moving it to the middle might: \n screen.fill(...)\n clock.tick(30)\n screen.blit(...)\nwill mostly show a blank screen because the fill will be visible\nduring the wait.", "user_title": "Anonymous", "datetimeon": "2010-06-01T07:55:40", "link": "Clock.tick", "id": 3131}, {"content": "haleluja", "user_title": "Anonymous", "datetimeon": "2010-06-02T03:11:33", "link": "Rect.collidelist", "id": 3133}, {"content": "PixelArray works faster than SurfArray for me also.", "user_title": "Anonymous", "datetimeon": "2010-06-02T20:55:16", "link": "pygame.PixelArray", "id": 3134}, {"content": "Only seems to block if you start another piece of music playing while it's still\nfading out.", "user_title": "Anonymous", "datetimeon": "2010-06-06T16:23:37", "link": "pygame.mixer.music.fadeout", "id": 3135}, {"content": "If you're having trouble with color keys, try image.set_alpha(None) on each individual subsurface.\nsubsurface seems not to always inherit its parent's alpha setting, so if the parent source image has an alpha then color key is ignored in subsurfaces.\n\nYou can easily remove the alpha channel in GIMP by right-clicking the background layer and selecting \"remove alpha channel\" to fix all your problems also :)", "user_title": "Anonymous", "datetimeon": "2010-06-08T23:09:21", "link": "Surface.subsurface", "id": 3139}, {"content": "You should use pygame.display.Info.current_h and pygame.display.Info.current_w.", "user_title": "Anonymous", "datetimeon": "2010-06-10T20:42:30", "link": "pygame.display.get_surface", "id": 3140}, {"content": "pygame.display.Info().current_h and pygame.display.Info().current_w", "user_title": "Anonymous", "datetimeon": "2010-06-10T20:43:14", "link": "pygame.display.get_surface", "id": 3141}, {"content": "screen = pygame.display.set_mode(...)\nscreen.get_size()", "user_title": "Anonymous", "datetimeon": "2010-06-10T20:55:19", "link": "pygame.display.get_surface", "id": 3142}, {"content": "11246579455877\nppqu s442", "user_title": "Anonymous", "datetimeon": "2010-06-11T18:56:04", "link": "pygame.mixer.music.play", "id": 3143}, {"content": "import pygame\nimport pygame, sys,os, time \nfrom pygame.locals import * \nfrom pygame.color import THECOLORS \nimport platform, os \nif platform.system()==\"Windows\": \n os.environ['SDL_VIDEODRIVER']='windib'\npygame.init()\nwindow = pygame.display.set_mode((600,600)) \nscreen = pygame.display.get_surface() \npygame.display.set_caption('Excercise 5') \nscreen.fill((0,0,0))\n\nclass GfxCursor:\n \"\"\"\n Replaces the normal pygame cursor with any bitmap cursor\n \"\"\"\n\n def __init__(self,surface,cursor=None,hotspot=(0,0)):\n \"\"\"\n surface = Global surface to draw on\n cursor = surface of cursor (needs to be specified when enabled!)\n hotspot = the hotspot for your cursor\n \"\"\"\n self.surface = surface\n self.enabled = 0\n self.cursor = None\n self.hotspot = hotspot\n self.bg = None\n self.offset = 0,0\n self.old_pos = 0,0\n \n if cursor:\n self.setCursor(cursor,hotspot)\n self.enable()\n\n def enable(self):\n \"\"\"\n Enable the GfxCursor (disable normal pygame cursor)\n \"\"\"\n if not self.cursor or self.enabled: return\n pygame.mouse.set_visible(0)\n self.enabled = 1\n\n def disable(self):\n \"\"\"\n Disable the GfxCursor (enable normal pygame cursor)\n \"\"\"\n if self.enabled:\n self.hide()\n pygame.mouse.set_visible(1)\n self.enabled = 0\n\n def setCursor(self,cursor,hotspot=(0,0)):\n \"\"\"\n Set a new cursor surface\n \"\"\"\n if not cursor: return\n self.cursor = cursor\n self.hide()\n self.show()\n self.offset = 0,0\n self.bg = pygame.Surface(self.cursor.get_size())\n pos = self.old_pos[0]-self.offset[0],self.old_pos[1]-self.offset[1]\n self.bg.blit(self.surface,(0,0),\n (pos[0],pos[1],self.cursor.get_width(),self.cursor.get_height()))\n\n self.offset = hotspot\n\n def setHotspot(self,pos):\n \"\"\"\n Set a new hotspot for the cursor\n \"\"\"\n self.hide()\n self.offset = pos\n\n def hide(self):\n \"\"\"\n Hide the cursor (useful for redraws)\n \"\"\"\n if self.bg and self.enabled:\n return self.surface.blit(self.bg,\n (self.old_pos[0]-self.offset[0],self.old_pos[1]-self.offset[1]))\n\n def show(self):\n \"\"\"\n Show the cursor again\n \"\"\"\n if self.bg and self.enabled:\n pos = self.old_pos[0]-self.offset[0],self.old_pos[1]-self.offset[1]\n self.bg.blit(self.surface,(0,0),\n (pos[0],pos[1],self.cursor.get_width(),self.cursor.get_height()))\n return self.surface.blit(self.cursor,pos)\n\n def update(self,event):\n \"\"\"\n Update the cursor with a MOUSEMOTION event\n \"\"\"\n self.old_pos = event.pos\n\nif __name__ == '__main__': #test it out\n import pygame.draw\n pygame.init()\n screen = pygame.display.set_mode((400, 300))\n screen.fill((50, 50, 111), (0, 0, 400, 150))\n pygame.display.flip()\n pygame.display.set_caption('Test the GfxCursor (and paint)')\n \n image = pygame.Surface((20, 20))\n pygame.draw.circle(image, (50, 220, 100), (10, 10), 8, 0)\n pygame.draw.circle(image, (220, 200, 50), (10, 10), 8, 2)\n image.set_at((9, 9), (255,255,255))\n image.set_colorkey(0, pygame.RLEACCEL)\n \n magicbox = pygame.Rect(10, 10, 100, 90)\n magiccolor = 0\n \n cursor = GfxCursor(screen, image, (10, 10))\n finished = 0\n downpos = None\n while not finished:\n dirtyrects = []\n dirtyrects.extend([cursor.hide()])\n for e in pygame.event.get():\n if e.type in (pygame.QUIT, pygame.KEYDOWN):\n finished = 1\n break\n elif e.type == pygame.MOUSEBUTTONDOWN:\n cursor.disable()\n downpos = e.pos\n elif e.type == pygame.MOUSEBUTTONUP:\n cursor.enable()\n downpos = None\n elif downpos and e.type == pygame.MOUSEMOTION:\n r = pygame.draw.line(screen, (100,100,100), downpos, e.pos, 2)\n dirtyrects.append(r)\n downpos = e.pos\n cursor.update(e)\n elif not downpos and e.type == pygame.MOUSEMOTION:\n cursor.update(e)\n \n magiccolor = (magiccolor + 2) % 255\n r = screen.fill((0, 0, magiccolor), magicbox)\n dirtyrects.append(r)\n \n #here's how we sandwich the flip/update with cursor show and hide\n dirtyrects.extend([cursor.show()])\n pygame.display.update(dirtyrects)\n \n pygame.time.delay(5) #should be time.wait(5) with pygame-1.3 :]", "user_title": "Anonymous", "datetimeon": "2010-06-13T02:45:37", "link": "Color.r", "id": 3145}, {"content": "I get an 'UnboundLocalError: local variable 'fonts' referenced before assignment' error. \nI'm on Mac OS X 10.4 Tiger with Python 2.6.4, Pygame 1.9.1.", "user_title": "Anonymous", "datetimeon": "2010-06-19T22:22:14", "link": "pygame.font.get_fonts", "id": 3146}, {"content": "I'm no expert but wouldn't it be faster to use the math.hypot(a, b)\nfunction for determining length instead of writing your own function?", "user_title": "Anonymous", "datetimeon": "2009-06-09T21:45:55", "link": "Joystick.get_axis", "id": 2759}, {"content": "You can also move the corner:\n\nsquare_corner=(x-square_dim[0]*sqrt(2)/2*sin((abs(angle)+45)*pi/180),\n y-square_dim[1]*sqrt(2)/2*sin((45+abs(angle))*pi/180))\n screen.blit(pygame.transform.rotate(square,angle), square_corner)\n\nBeware: sin calculates the angle in rad but rotate needs angles in degrees.", "user_title": "Anonymous", "datetimeon": "2009-06-08T14:31:39", "link": "pygame.transform.rotate", "id": 2758}, {"content": "ACTIVEEVENT has two attributes. \"gain\" is set to 0 or one depending if the type of focus was lost or gained. \"state\" will equal 1 for mouse focus, 2 for keyboard focus, or 4 for window iconification.", "user_title": "Anonymous", "datetimeon": "2009-06-03T22:15:32", "link": "pygame.event", "id": 2755}, {"content": "If you want only certain events to be enabled -- you will need to disable them all, and only then enable the ones that you need:\n#--\npygame.event.set_allowed(None)\nprint map(pygame.event.get_blocked,range(1,33))\npygame.event.set_allowed([pygame.QUIT, pygame.KEYDOWN, pygame.USEREVENT])\nprint map(pygame.event.get_blocked,range(1,33))\n#--\nbelow is the output:\n[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n[1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1]", "user_title": "Anonymous", "datetimeon": "2009-05-30T16:39:22", "link": "pygame.event.set_allowed", "id": 2754}, {"content": "Also, it should maybe be noted that it returns 4 tuple items, not just 3.\nMy guess is RGBA tuple instead of RGB, but I'm not an expert :P", "user_title": "Anonymous", "datetimeon": "2009-05-27T02:45:25", "link": "Surface.unmap_rgb", "id": 2753}, {"content": "convert a mapped integer color value into a Color\nSurface.map_rgb(mapped_int): return Color\n ^\nShouldn't it be \"Surface.unmap_rgb\"?", "user_title": "Anonymous", "datetimeon": "2009-05-27T02:43:16", "link": "Surface.unmap_rgb", "id": 2752}, {"content": "Tracked module playback with Pygame has a lower playback volume than usual,\nand I need to find something like \"stereo separation\" feature yet.", "user_title": "Anonymous", "datetimeon": "2009-05-21T06:41:14", "link": "pygame.mixer.music", "id": 2751}, {"content": "Yes, I just found it out, and it plays tracked music modules such as MOD or XM.\nBut IT (Impulse Tracker) modules don't play correctly. Wow. I used fmod with Python bindings\nall the time, and Pygame supported it already. I wish I knew that sooner :D", "user_title": "Anonymous", "datetimeon": "2009-05-21T06:35:19", "link": "pygame.mixer.music", "id": 2750}, {"content": "While using this, it seems that it returns true when the music is paused. Anyone else having this problem, if it's a problem?", "user_title": "Anonymous", "datetimeon": "2009-05-19T11:35:42", "link": "pygame.mixer.music.get_busy", "id": 2749}, {"content": "If you want to draw the sprites in your group in the opposite order you could try something like this:\n#Suppose that you keep a list of objects in \"Sprites\"\nSprites=[]\n#You also have:\nAllsprites=pygame.sprite.OrderedUpdates(Sprites)\n\nWhenever you add something to the list of sprites, you want to add it to Allsprites like this:\ndef create_sprite():\n a=SomeClassForYourSprite()\n Sprites.append(a)\n Allsprites.add(a)\n\n#That is how I normally add sprites, but unfortunately I was getting the reverse order of what I wanted so I did this:\ndef create_sprite():\n Allsprites.empty() #This removes all objects from your group\n a=SomeClassForYourSprite()\n Sprites.insert(0,a) #Placing your new sprite at the front of the list\n for sprite in Sprites:\n Allsprites.add(sprite)\n\nThis should reverse the order for you, allowing the newest sprite created to appear at the bottom instead of the top", "user_title": "Anonymous", "datetimeon": "2009-05-11T00:29:55", "link": "pygame.sprite.OrderedUpdates", "id": 2744}, {"content": "Is this function blocking? I mean... when it returns and my program flow continues, can I be assured that the display has updated on the actual screen?", "user_title": "Anonymous", "datetimeon": "2011-01-14T19:38:50", "link": "pygame.display.update", "id": 3730}, {"content": "Is there a way to set a path to a font file?", "user_title": "Anonymous", "datetimeon": "2011-01-16T15:32:54", "link": "pygame.font.SysFont", "id": 3734}, {"content": "eee", "user_title": "Anonymous", "datetimeon": "2011-01-16T19:25:25", "link": "pygame.image.tostring", "id": 3735}, {"content": "The last comment was spam", "user_title": "Anonymous", "datetimeon": "2011-01-17T21:08:17", "link": "pygame.draw.polygon", "id": 3737}, {"content": "[(x,y), (x1,y1), (x2,y2)]", "user_title": "Anonymous", "datetimeon": "2011-01-17T21:08:51", "link": "pygame.draw.polygon", "id": 3738}, {"content": "The core algorithm works with 32-bit surfaces. When a 24-bit surface is passed, the pixel data is converted to 32-bit data before the actual transformation, and then it's converted back into 24-bits again, which means 2 extra conversions of the whole image. This would especially be troublesome with large images.", "user_title": "Anonymous", "datetimeon": "2011-01-18T03:41:16", "link": "pygame.transform.smoothscale", "id": 3739}, {"content": "re", "user_title": "Anonymous", "datetimeon": "2011-01-18T06:48:13", "link": "pygame.mixer.music.pause", "id": 3740}, {"content": "Hello world", "user_title": "Anonymous", "datetimeon": "2011-01-19T16:38:01", "link": "index.html", "id": 3743}, {"content": "what", "user_title": "Anonymous", "datetimeon": "2011-01-25T20:39:22", "link": "pygame.event.get_grab", "id": 3748}, {"content": "The width for \"ae\" WILL always match the width for \"a\" + \"e\" (which is \"a\" concatinated with\"e\").\nIt will not always match the width of \"a\" plus the width of \"e\".\n(But we knew what you meant.)", "user_title": "Anonymous", "datetimeon": "2011-01-25T21:11:43", "link": "Font.size", "id": 3749}, {"content": "can we get an admin to delete that?", "user_title": "Anonymous", "datetimeon": "2011-01-27T17:25:55", "link": "pygame.draw.circle", "id": 3754}, {"content": "This function seems to me little bit buggy, so I wrote my own:\n\na and b are surfarrays of some surfaces that you want to compare\n\n def comparray(self,a,b):\n c = abs(a.__sub__(b))\n c = c.__ge__(self.tolerance)*255\n surface = pygame.surfarray.make_surface(c)\n return surface", "user_title": "Anonymous", "datetimeon": "2011-01-27T18:49:07", "link": "PixelArray.compare", "id": 3755}, {"content": "your gay", "user_title": "Anonymous", "datetimeon": "2011-01-28T04:28:47", "link": "Rect.colliderect", "id": 3758}, {"content": "It would be nice if the number of Sounds on queue was more than one...\n\nGreg Ruo", "user_title": "Anonymous", "datetimeon": "2011-01-30T21:39:27", "link": "Channel.queue", "id": 3765}, {"content": "I solved my previous question:\n\nIf you need to play in sequence several Sounds in a queue, you can solve this\nwith something like:\n=======================\n i=0\n while (Ch0.get_queue()==None) and (i<10):\n i+=1\n Ch0.queue(f[i])\n============================\n\nwhere Ch0 is your Sound channel created with Ch0=pygame.mixer.Channel(0).\nIn other words, even if the queue allows only one single sound in queue,\nI use the .get_queue method to wait until the queue is free before\n adding the next Sound in the sequence.\n\nIf you have better solutions please reply here. Thanks.\n\nGreg Ruo", "user_title": "Anonymous", "datetimeon": "2011-01-30T22:38:45", "link": "Channel.queue", "id": 3766}, {"content": "Probably would be good to have an OPTIONAL choice to remove them from the queue . . .", "user_title": "Anonymous", "datetimeon": "2011-01-31T13:48:40", "link": "pygame.event.get", "id": 3768}, {"content": "I had this weird thing where blue/red was inversed, but not the other colours, when I was mapping some pixels from one image to a blank surface.\nIt was caused by copying the color integer directly to one pixel to the other, so the trick is to always surface.unmap_rgb(pixel) before setting the color to a new pixel \nThat tricked works.\nIt's the only way unfortunately.", "user_title": "Anonymous", "datetimeon": "2011-02-04T02:33:15", "link": "pygame.PixelArray", "id": 3771}, {"content": "@Dave: Thanks very much for writing that we should ignore the keyword \"width\". This saved me time and my program now works.", "user_title": "Anonymous", "datetimeon": "2011-02-05T08:25:19", "link": "pygame.draw.rect", "id": 3773}, {"content": "ankit sucks", "user_title": "Anonymous", "datetimeon": "2011-02-08T14:07:31", "link": "Rect.colliderect", "id": 3776}, {"content": "i heard ankit really sucks", "user_title": "Anonymous", "datetimeon": "2011-02-08T14:07:46", "link": "Rect.collidepoint", "id": 3777}, {"content": "ankit tandon sucks", "user_title": "Anonymous", "datetimeon": "2011-02-08T14:07:55", "link": "Rect.contains", "id": 3778}, {"content": "I don't get what this does...", "user_title": "Anonymous", "datetimeon": "2011-02-09T22:32:43", "link": "pygame.event.pump", "id": 3779}, {"content": "If you don't use the event queue(why aren't you??) this will keep your program from locking up.", "user_title": "Anonymous", "datetimeon": "2011-02-10T21:20:05", "link": "pygame.event.pump", "id": 3780}, {"content": "When the camera is stopped and you try to access it pygame segfaults.\n(On Debian testing with pygame 1.9.1)", "user_title": "Anonymous", "datetimeon": "2011-02-11T03:31:32", "link": "Camera.stop", "id": 3781}, {"content": "set_allowed removes events from the queue! \nEven if the event in question doesn't belong to the given type.\n\n>>> import pygame\n>>> pygame.init()\n>>> pygame.event.post(pygame.event.Event(pygame.USEREVENT, code=0))\n>>> print pygame.event.peek(pygame.USEREVENT)\n1\n>>> pygame.event.set_allowed(pygame.MOUSEMOTION)\n>>> print pygame.event.peek(pygame.USEREVENT)\n0", "user_title": "Anonymous", "datetimeon": "2011-02-11T17:42:34", "link": "pygame.event.set_allowed", "id": 3782}, {"content": "Thank you very much.\nYour compare function works much better than the original one.", "user_title": "Anonymous", "datetimeon": "2011-02-14T16:59:41", "link": "PixelArray.compare", "id": 3783}, {"content": "meto", "user_title": "Anonymous", "datetimeon": "2011-02-15T19:30:06", "link": "Color.g", "id": 3786}, {"content": "rofl rofl what for a crappy thing you performed? go to wikipedia it works!\n\nu mad!", "user_title": "Anonymous", "datetimeon": "2011-02-16T06:49:19", "link": "pygame.draw.rect", "id": 3787}, {"content": "rofl rofl what for a crappy thing you performed? go to wikipedia it works!\n\nu mad!", "user_title": "Anonymous", "datetimeon": "2011-02-16T06:50:00", "link": "pygame.draw.rect", "id": 3788}, {"content": "stfu\n\nu mad", "user_title": "Anonymous", "datetimeon": "2011-02-16T06:52:07", "link": "Rect.unionall_ip", "id": 3789}, {"content": "This method only queues one music file.\nIf you call it and there already is a queued file, it will be overrided.", "user_title": "Anonymous", "datetimeon": "2011-02-19T12:17:58", "link": "pygame.mixer.music.queue", "id": 3791}, {"content": "elif event.type == pygame.QUIT or event.type == pygame.K_ESCAPE:\n\nthe event type is not pygame.K_ESCAPE. you have to check for a KEYDOWN or KEYUP event and check if it is the key you want, for example:\n\nelif event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):", "user_title": "Anonymous", "datetimeon": "2011-02-21T08:30:17", "link": "pygame.key", "id": 3793}, {"content": "Well, sometimes black and white are swapped, sometimes they aren't.\nEven in the same program, for one mouse cursor it may work right,\nand for another cursor the black & white colors are swapped.\n\nI haven't figured out what triggers this bug.", "user_title": "Anonymous", "datetimeon": "2011-02-24T13:00:58", "link": "pygame.cursors.compile", "id": 3797}, {"content": "spam", "user_title": "Anonymous", "datetimeon": "2011-02-26T15:00:47", "link": "pygame.event.set_grab", "id": 3798}, {"content": "In Pygame 1.9.2 surface objects have sprouted a new method, get_view:\n\nSurface.get_view\nreturn a view of a surface's pixel data.\nSurface.get_view(kind='2'): return\n\nReturn an object which exposes a surface's internal pixel buffer to a NumPy array. For now a custom object with an array struct interface is returned. A Python memoryview may be returned in the future. The buffer is writeable.\n\nThe kind argument is the length 1 string '2', '3', 'r', 'g', 'b', or 'a'. The letters are case insensitive; 'A' will work as well. The argument can be either a Unicode or byte (char) string. The default is '2'.\n\nA kind '2' view is a (surface-width, surface-height) array of raw pixels. The pixels are surface bytesized unsigned integers. The pixel format is surface specific. It is unavailable for 24-bit surfaces.\n\n'3' returns a (surface-width, surface-height, 3) view of RGB color components. Each of the red, green, and blue components are unsigned bytes. Only 24-bit and 32-bit surfaces are supported. The color components must be in either RGB or BGR order within the pixel.\n\n'r' for red, 'g' for green, 'b' for blue, and 'a' for alpha return a (surface-width, surface-height) view of a single color component within a surface: a color plane. Color components are unsigned bytes. Both 24-bit and 32-bit surfaces support 'r', 'g', and 'b'. Only 32-bit surfaces with SRCALPHA support 'a'.\n\nThis method implicitly locks the Surface. The lock will be released, once the returned view object is deleted.", "user_title": "Anonymous", "datetimeon": "2011-03-01T14:59:53", "link": "pygame.Surface", "id": 3803}, {"content": "In Pygame 1.9.2 surface objects have sprouted a new method, get_view:\n\nSurface.get_view\nreturn a view of a surface's pixel data.\nSurface.get_view(kind='2'): return\n\nReturn an object which exposes a surface's internal pixel buffer to a NumPy\narray. For now a custom object with an array struct interface is returned.\nA Python memoryview may be returned in the future. The buffer is writeable.\n\nThe kind argument is the length 1 string '2', '3', 'r', 'g', 'b', or 'a'.\nThe letters are case insensitive; 'A' will work as well. The argument can be\neither a Unicode or byte (char) string. The default is '2'.\n\nA kind '2' view is a (surface-width, surface-height) array of raw pixels. The\npixels are surface bytesized unsigned integers. The pixel format is surface\nspecific. It is unavailable for 24-bit surfaces.\n\n'3' returns a (surface-width, surface-height, 3) view of RGB color components.\nEach of the red, green, and blue components are unsigned bytes. Only 24-bit and\n32-bit surfaces are supported. The color components must be in either RGB or\nBGR order within the pixel.\n\n'r' for red, 'g' for green, 'b' for blue, and 'a' for alpha return a\n(surface-width, surface-height) view of a single color component within a\nsurface: a color plane. Color components are unsigned bytes. Both 24-bit and\n32-bit surfaces support 'r', 'g', and 'b'. Only 32-bit surfaces with SRCALPHA\nsupport 'a'.\n\nThis method implicitly locks the Surface. The lock will be released, once the\nreturned view object is deleted.", "user_title": "Anonymous", "datetimeon": "2011-03-01T15:02:04", "link": "pygame.Surface", "id": 3804}, {"content": "for statement with arc", "user_title": "Anonymous", "datetimeon": "2011-03-01T19:54:13", "link": "pygame.draw.arc", "id": 3805}, {"content": "Oh my god I was so thankful that you can adjust the volume, I have no editing software to fix my sounds.", "user_title": "Anonymous", "datetimeon": "2011-03-03T21:26:40", "link": "Sound.set_volume", "id": 3806}, {"content": "Thanks to the comments for wrinting about the event attributes and the key constants list. It should be in any serious documentation.", "user_title": "Anonymous", "datetimeon": "2011-03-09T16:10:36", "link": "pygame.event.Event", "id": 3808}, {"content": "a;rtawkljethlak", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:41:39", "link": "pygame.font", "id": 3809}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:20", "link": "pygame.font.init", "id": 3810}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:22", "link": "pygame.font.init", "id": 3811}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:24", "link": "pygame.font.init", "id": 3812}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:27", "link": "pygame.font.init", "id": 3813}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:29", "link": "pygame.font.init", "id": 3814}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:31", "link": "pygame.font.init", "id": 3815}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:33", "link": "pygame.font.init", "id": 3816}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:36", "link": "pygame.font.init", "id": 3817}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:37", "link": "pygame.font.init", "id": 3818}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:47", "link": "pygame.font.init", "id": 3819}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:53", "link": "pygame.font.init", "id": 3820}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:54", "link": "pygame.font.init", "id": 3821}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:56", "link": "pygame.font.init", "id": 3822}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:42:58", "link": "pygame.font.init", "id": 3823}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:01", "link": "pygame.font.init", "id": 3824}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:03", "link": "pygame.font.init", "id": 3825}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:07", "link": "pygame.font.init", "id": 3826}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:09", "link": "pygame.font.init", "id": 3827}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:11", "link": "pygame.font.init", "id": 3828}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:13", "link": "pygame.font.init", "id": 3829}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:15", "link": "pygame.font.init", "id": 3830}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:16", "link": "pygame.font.init", "id": 3831}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:19", "link": "pygame.font.init", "id": 3832}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:20", "link": "pygame.font.init", "id": 3833}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:22", "link": "pygame.font.init", "id": 3834}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:24", "link": "pygame.font.init", "id": 3835}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:26", "link": "pygame.font.init", "id": 3836}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:28", "link": "pygame.font.init", "id": 3837}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:29", "link": "pygame.font.init", "id": 3838}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:31", "link": "pygame.font.init", "id": 3839}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:33", "link": "pygame.font.init", "id": 3840}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:35", "link": "pygame.font.init", "id": 3841}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:37", "link": "pygame.font.init", "id": 3842}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:39", "link": "pygame.font.init", "id": 3843}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:40", "link": "pygame.font.init", "id": 3844}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:42", "link": "pygame.font.init", "id": 3845}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:45", "link": "pygame.font.init", "id": 3846}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:46", "link": "pygame.font.init", "id": 3847}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:48", "link": "pygame.font.init", "id": 3848}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:50", "link": "pygame.font.init", "id": 3849}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:52", "link": "pygame.font.init", "id": 3850}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:53", "link": "pygame.font.init", "id": 3851}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:55", "link": "pygame.font.init", "id": 3852}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:57", "link": "pygame.font.init", "id": 3853}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:43:59", "link": "pygame.font.init", "id": 3854}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:07", "link": "pygame.font.init", "id": 3855}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:09", "link": "pygame.font.init", "id": 3856}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:11", "link": "pygame.font.init", "id": 3857}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:13", "link": "pygame.font.init", "id": 3858}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:15", "link": "pygame.font.init", "id": 3859}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:18", "link": "pygame.font.init", "id": 3860}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:20", "link": "pygame.font.init", "id": 3861}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:22", "link": "pygame.font.init", "id": 3862}, {"content": "I LIKEY TO SPAM", "user_title": "Anonymous", "datetimeon": "2011-03-10T18:44:24", "link": "pygame.font.init", "id": 3863}, {"content": "Hey guys, how do you detect if user hits enter? There's no event.key for that! Thank you.", "user_title": "Anonymous", "datetimeon": "2011-03-12T11:25:09", "link": "pygame.key", "id": 3864}, {"content": "Could somebody please delete the horrible code in this comment thread? :(", "user_title": "Anonymous", "datetimeon": "2011-03-15T20:16:46", "link": "pygame.draw.circle", "id": 3865}, {"content": "pygame documentation\t || Pygame Home || Help Contents || Reference Index || \n \nCamera || Cdrom || Color || Cursors || Display || Draw || Event || Examples || Font || Gfxdraw || Image || Joystick || Key || Locals || Mask || Midi || Mixer || Mouse || Movie || Music || Overlay || Pixelarray || Pygame || Rect || Scrap || Sndarray || Sprite || Surface || Surfarray || Tests || Time || Transform\nFont.metrics\n\nThe user submitted comments should be used for:\n\nExamples\nHelpful hints, tips, and tricks\nFurther explanation / documentation\nThe user submitted comments should NOT be used for:\n\nBug Reports (see our new Bug Reports link on the side)\nFeature Requests\nQuestions\nPlease note that periodically, the developers may go through the notes and incorporate the information in them into the documentation. This means that any note submitted here becomes the property of Pete Shinners under the LGPL licence.\n\nIf you do not want to leave an anonymous comment, please sign in first.", "user_title": "Anonymous", "datetimeon": "2011-03-21T15:13:24", "link": "Font.metrics", "id": 3868}, {"content": "I can't figure how to crop an image, even after reading this suggestion", "user_title": "Anonymous", "datetimeon": "2011-03-22T11:32:04", "link": "pygame.transform.chop", "id": 3870}, {"content": "I was dumb. Here is how I got it to work. So simple:\n\ncreen.blit(gameboard,(selection.x-18,selection.y-18),(selection.x-18,selection.y-18,96,96))", "user_title": "Anonymous", "datetimeon": "2011-03-22T11:45:29", "link": "pygame.transform.chop", "id": 3871}, {"content": "Perfect, Matthew Brown! Just what I was looking for. I found the reset_stuff() function especially useful and I'm going to use it in our production software.", "user_title": "Anonymous", "datetimeon": "2011-03-24T10:34:18", "link": "pygame.draw.circle", "id": 3873}, {"content": "Wow, thanks a lot Matthew, I am a PyGame newbie and was having some troubles understanding this function without a complete example.", "user_title": "Anonymous", "datetimeon": "2011-03-24T12:23:02", "link": "pygame.draw.circle", "id": 3874}, {"content": "lol", "user_title": "Anonymous", "datetimeon": "2011-03-24T18:06:29", "link": "pygame.draw.circle", "id": 3875}, {"content": "Guys, what Matthew N. Brown did here harms python's reputation.\n\nPython is elegant.\n\nWhat Matthew did was perlify python ... this is terrible...", "user_title": "Anonymous", "datetimeon": "2011-03-24T18:07:11", "link": "pygame.draw.circle", "id": 3876}, {"content": "Just wanted to give my profuse thanks to Matthew N. Brown for his superb usage example of this otherwise vague and esoteric method. I've contacted my boss and we will now integrate this snippet into all of our newly created (and soon-to-be-refactored) legacy code.", "user_title": "Anonymous", "datetimeon": "2011-03-25T20:41:38", "link": "pygame.draw.circle", "id": 3878}, {"content": "where image is a surface, rot and scale are floats\n\n return pygame.transform.smoothscale(image, rot, scale)\nTypeError: argument 2 must be 2-item sequence, not float\n \n return pygame.transform.smoothscale(image,[0,0], scale)\nTypeError: argument 3 must be pygame.Surface, not float", "user_title": "Anonymous", "datetimeon": "2011-03-27T01:37:09", "link": "pygame.transform.rotozoom", "id": 3879}, {"content": "and...\n surf = pygame.surface.Surface((image.get_width()*scale, image.get_height()*scale))\n return pygame.transform.smoothscale(image,[0,0], surf)\n\n pygame.transform.smoothscale(image,[0,0], surf)\nValueError: Destination surface not the given width or height.", "user_title": "Anonymous", "datetimeon": "2011-03-27T01:40:56", "link": "pygame.transform.rotozoom", "id": 3880}, {"content": "look at set_grab first and then you understand", "user_title": "Anonymous", "datetimeon": "2011-03-27T13:55:15", "link": "pygame.event.get_grab", "id": 3881}, {"content": "getting this error :\n in __init__\n self.font = pygame.font.Font(\"None\", 50)\nerror: font not initialized\n\nnot sure why because i rendered the font..", "user_title": "Anonymous", "datetimeon": "2011-04-02T14:35:05", "link": "Font.render", "id": 3882}, {"content": "The enter key is K_RETURN.", "user_title": "Anonymous", "datetimeon": "2011-04-02T17:03:07", "link": "pygame.key", "id": 3883}, {"content": "For this error :\nthere is no soundcard\n\nCall pygame.mixer.init two times :\npygame.mixer.init()\npygame.mixer.init()\n\nOR\n\npygame.mixer.init(); pygame.mixer.init()\n\n(don't omit semicolon)", "user_title": "Anonymous", "datetimeon": "2011-04-05T05:06:41", "link": "pygame.mixer.init", "id": 3886}, {"content": "/love little kids who leave dumb comments like this one", "user_title": "Anonymous", "datetimeon": "2011-04-12T10:48:18", "link": "Rect.colliderect", "id": 3889}, {"content": "there is no information about the supported formats - are those from ModPlugTracker supported? (like .mod, .xm, .s3m, etc.)", "user_title": "Anonymous", "datetimeon": "2011-04-12T12:35:41", "link": "pygame.mixer.music.load", "id": 3890}, {"content": "Yes", "user_title": "Anonymous", "datetimeon": "2011-04-12T16:17:04", "link": "Font.render", "id": 3891}, {"content": "This seems to be broken!\nI call this before playing a movie, but there is no sound.\nPlaying the movie without initializing the mixer in the first place, works!\n\nCan anyone confirm that this is broken?", "user_title": "Anonymous", "datetimeon": "2011-04-18T11:51:33", "link": "pygame.mixer.quit", "id": 3895}, {"content": "For Windows (XP):\nThe problem is that the screen does not get updated automatically, for some reason.\nThe solution is simple. Take a surface with the size of the movie, set this as the display of the movie.\nIn a loop, get the current frame of the movie and if it increases, blit the surface onto the screen and update the screen.", "user_title": "Anonymous", "datetimeon": "2011-04-19T10:47:45", "link": "pygame.movie", "id": 3896}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom pygame import *\nimport pygame, time, numpy, pygame.sndarray\n\nsample_rate = 44100\n\ndef play_for(sample_array, ms, volLeft, volRight):\n sound = pygame.sndarray.make_sound(sample_array)\n beg = time.time()\n channel = sound.play(-1)\n channel.set_volume(volLeft,volRight)\n pygame.time.delay(ms)\n sound.stop()\n end = time.time()\n return beg, end\n \ndef sine_array_onecycle(hz, peak):\n length = sample_rate / float(hz)\n omega = numpy.pi * 2 / length\n xvalues = numpy.arange(int(length)) * omega\n return (peak * numpy.sin(xvalues))\n \ndef sine_array(hz, peak, n_samples = sample_rate):\n return numpy.resize(sine_array_onecycle(hz, peak), (n_samples,))\n \ndef main():\n pygame.mixer.pre_init(sample_rate, -16, 2) # 44.1kHz, 16-bit signed, stereo\n pygame.init()\n f = sine_array(8000, 1)\n f = numpy.array(zip (f , f))\n\n play_for(f , 5000, 0.5, 0.5)\n\nif __name__ == '__main__': main()", "user_title": "Anonymous", "datetimeon": "2011-04-22T12:57:35", "link": "pygame.sndarray.make_sound", "id": 4038}, {"content": "Get Unicode key in Pygame:\n\nfrom pygame import *\npygame.init()\npygame.display.set_mode((500,500),OPENGLBLIT|OPENGL|DOUBLEBUF)\nexitt = 0 \ninte = 4096\nwhile not exitt:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n pygame.quit()\n exitt = True\n if event.type == KEYDOWN:\n print event.dict['unicode']", "user_title": "Anonymous", "datetimeon": "2011-04-23T04:11:07", "link": "pygame.key", "id": 4039}, {"content": "The param is \"loops\" not \"loop\".", "user_title": "Anonymous", "datetimeon": "2011-04-27T15:37:38", "link": "Movie.play", "id": 4040}, {"content": "Use it when you load PNG images with transparencies", "user_title": "Anonymous", "datetimeon": "2011-04-30T21:10:05", "link": "Surface.convert_alpha", "id": 4041}, {"content": "The dest argument doesnt work for me, say if I do\n\nscreen.blit(mySurf, dest=(0,0))\n\nor screen.blit(mySurf, dest=(100,100))\n\nI get exactly the same outcome.\n\nWhere am i wrong?", "user_title": "Anonymous", "datetimeon": "2011-05-03T00:45:25", "link": "Surface.blit", "id": 4042}, {"content": "Sorry I realized i was blitting to screen instead of my temporary surface,\nplease ignore (and delete) my comment.", "user_title": "Anonymous", "datetimeon": "2011-05-03T00:46:43", "link": "Surface.blit", "id": 4043}, {"content": "How does that differ from pygame.draw.aalines? This one can also not be filled.", "user_title": "Anonymous", "datetimeon": "2011-05-04T14:58:27", "link": "pygame.gfxdraw.aapolygon", "id": 4044}, {"content": "XBM not supported?", "user_title": "Anonymous", "datetimeon": "2011-05-10T04:11:53", "link": "pygame.image", "id": 4048}, {"content": "I believe it offsets the detection area by (x,y) pixels. So just put (0,0) for no offset", "user_title": "Anonymous", "datetimeon": "2011-05-11T00:48:47", "link": "Mask.draw", "id": 4049}, {"content": "This function seems to need raw strings!", "user_title": "Anonymous", "datetimeon": "2011-05-12T10:26:46", "link": "pygame.image.save", "id": 4050}, {"content": "Is there anyway to play more than 2 songs I have tried everything I want the loaded music to play in order\n#!/usr/bin/env python\nimport pygame\npygame.mixer.init()\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.init()\nprint \"hey I finaly got this working!\"\npygame.mixer.music.load('D:/Users/John/Music/Music/FUN.OGG')\npygame.mixer.music.load('D:/Users/John/Music/Music/Still Alive.OGG')\npygame.mixer.music.load('D:/Users/John/Music/Music/turret.OGG')\npygame.mixer.music.load('D:/Users/John/Music/Music/portalend.OGG')\npygame.mixer.music.play()\nimport pysic", "user_title": "Anonymous", "datetimeon": "2011-05-14T16:30:13", "link": "pygame.mixer.music.load", "id": 4053}, {"content": "How is it you play more than one song besides using queue", "user_title": "Anonymous", "datetimeon": "2011-05-14T19:29:16", "link": "pygame.mixer.music.play", "id": 4054}, {"content": "how is it to play a list of songs more than just one without using the queue(which only works once)", "user_title": "Anonymous", "datetimeon": "2011-05-14T19:32:21", "link": "pygame.mixer.music.play", "id": 4055}, {"content": "can the rectangle be filled with an RGBA color so that I can make it transclucent?", "user_title": "Anonymous", "datetimeon": "2011-05-26T09:16:14", "link": "pygame.draw.rect", "id": 4056}, {"content": "Exemple, playing a sinus sound :\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pygame import *\nimport pygame, time, numpy, pygame.sndarray\n\nsample_rate = 44100\n\ndef play_for(sample_array, ms, volLeft, volRight):\n sound = pygame.sndarray.make_sound(sample_array)\n beg = time.time()\n channel = sound.play(-1)\n channel.set_volume(volLeft,volRight)\n pygame.time.delay(ms)\n sound.stop()\n end = time.time()\n return beg, end\n \ndef sine_array_onecycle(hz, peak):\n length = sample_rate / float(hz)\n omega = numpy.pi * 2 / length\n xvalues = numpy.arange(int(length)) * omega\n return (peak * numpy.sin(xvalues))\n \ndef sine_array(hz, peak, n_samples = sample_rate):\n return numpy.resize(sine_array_onecycle(hz, peak), (n_samples,))\n\n \ndef main():\n pygame.mixer.pre_init(sample_rate, -16, 2) # 44.1kHz, 16-bit signed, stereo\n pygame.init()\n f = sine_array(8000, 1)\n f = numpy.array(zip (f , f))\n\n play_for(f , 5000, 0.2, 0.2)\n\nif __name__ == '__main__': main()", "user_title": "Anonymous", "datetimeon": "2011-05-27T02:36:07", "link": "pygame.sndarray", "id": 4057}] \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pygame/draw.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/draw.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 02d4df8..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/draw.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/draw_py.py b/venv/lib/python3.7/site-packages/pygame/draw_py.py deleted file mode 100644 index 3cf4a3d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/draw_py.py +++ /dev/null @@ -1,539 +0,0 @@ -'''Pygame Drawing algorithms written in Python. (Work in Progress) - -Implement Pygame's Drawing Algorithms in a Python version for testing -and debugging. -''' -from __future__ import division -import sys - -if sys.version_info >= (3, 0, 0): - from math import floor, ceil -else: - # Python2.7 - # FIXME : the import of the builtin math module is broken ... - def floor(x): - int_x = int(x) - return int_x if (x == int_x or x > 0) else int_x - 1 - - def ceil(x): - int_x = int(x) - return int_x if (int_x == x or x < 0) else int_x + 1 - - -# H E L P E R F U N C T I O N S # - -# fractional part of x - -def frac(x): - '''return fractional part of x''' - return x - floor(x) - -def inv_frac(x): - '''return inverse fractional part of x''' - return 1 - (x - floor(x)) # eg, 1 - frac(x) - - -# L O W L E V E L D R A W F U N C T I O N S # -# (They are too low-level to be translated into python, right?) - -def set_at(surf, x, y, color): - surf.set_at((x, y), color) - - -def draw_pixel(surf, x, y, color, bright, blend=True): - '''draw one blended pixel with given brightness.''' - try: - other_col = surf.get_at((x, y)) if blend else (0, 0, 0, 0) - except IndexError: # pixel outside the surface - return - new_color = tuple((bright * col + (1 - bright) * pix) - for col, pix in zip(color, other_col)) - # FIXME what should happen if only one, color or surf_col, has alpha? - surf.set_at((x, y), new_color) - - -def _drawhorzline(surf, color, x_from, y, x_to): - if x_from == x_to: - surf.set_at((x_from, y), color) - return - - start, end = (x_from, x_to) if x_from <= x_to else (x_to, x_from) - for x in range(start, end + 1): - surf.set_at((x, y), color) - - -def _drawvertline(surf, color, x, y_from, y_to): - if y_from == y_to: - surf.set_at((x, y_from), color) - return - - start, end = (y_from, y_to) if y_from <= y_to else (y_to, y_from) - for y in range(start, end + 1): - surf.set_at((x, y), color) - - -# I N T E R N A L D R A W L I N E F U N C T I O N S # - -def _clip_and_draw_horzline(surf, color, x_from, y, x_to): - '''draw clipped horizontal line.''' - # check Y inside surf - clip = surf.get_clip() - if y < clip.y or y >= clip.y + clip.h: - return - - x_from = max(x_from, clip.x) - x_to = min(x_to, clip.x + clip.w - 1) - - # check any x inside surf - if x_to < clip.x or x_from >= clip.x + clip.w: - return - - _drawhorzline(surf, color, x_from, y, x_to) - - -def _clip_and_draw_vertline(surf, color, x, y_from, y_to): - '''draw clipped vertical line.''' - # check X inside surf - clip = surf.get_clip() - - if x < clip.x or x >= clip.x + clip.w: - return - - y_from = max(y_from, clip.y) - y_to = min(y_to, clip.y + clip.h - 1) - - # check any y inside surf - if y_to < clip.y or y_from >= clip.y + clip.h: - return - - _drawvertline(surf, color, x, y_from, y_to) - -# These constans xxx_EDGE are "outside-the-bounding-box"-flags -LEFT_EDGE = 0x1 -RIGHT_EDGE = 0x2 -BOTTOM_EDGE = 0x4 -TOP_EDGE = 0x8 - -def encode(x, y, left, top, right, bottom): - '''returns a code that defines position with respect to a bounding box''' - # we use the fact that python interprets booleans (the inqualities) - # as 0/1, and then multiply them with the xxx_EDGE flags - return ((x < left) * LEFT_EDGE + - (x > right) * RIGHT_EDGE + - (y < top) * TOP_EDGE + - (y > bottom) * BOTTOM_EDGE) - - -INSIDE = lambda a: not a -ACCEPT = lambda a, b: not (a or b) -REJECT = lambda a, b: a and b - - -def clip_line(line, left, top, right, bottom, use_float=False): - '''Algorithm to calculate the clipped line. - - We calculate the coordinates of the part of the line segment within the - bounding box (defined by left, top, right, bottom). The we write - the coordinates of the line segment into "line", much like the C-algorithm. - With `use_float` True, clip_line is usable for float-clipping. - - Returns: true if the line segment cuts the bounding box (false otherwise) - ''' - assert isinstance(line, list) - x1, y1, x2, y2 = line - dtype = float if use_float else int - - while True: - # the coordinates are progressively modified with the codes, - # until they are either rejected or correspond to the final result. - code1 = encode(x1, y1, left, top, right, bottom) - code2 = encode(x2, y2, left, top, right, bottom) - - if ACCEPT(code1, code2): - # write coordinates into "line" ! - line[:] = x1, y1, x2, y2 - return True - if REJECT(code1, code2): - return False - - # We operate on the (x1, y1) point, and swap if it is inside the bbox: - if INSIDE(code1): - x1, x2 = x2, x1 - y1, y2 = y2, y1 - code1, code2 = code2, code1 - if (x2 != x1): - m = (y2 - y1) / float(x2 - x1) - else: - m = 1.0 - # Each case, if true, means that we are outside the border: - # calculate x1 and y1 to be the "first point" inside the bbox... - if code1 & LEFT_EDGE: - y1 += dtype((left - x1) * m) - x1 = left - elif code1 & RIGHT_EDGE: - y1 += dtype((right - x1) * m) - x1 = right - elif code1 & BOTTOM_EDGE: - if x2 != x1: - x1 += dtype((bottom - y1) / m) - y1 = bottom - elif code1 & TOP_EDGE: - if x2 != x1: - x1 += dtype((top - y1) / m) - y1 = top - - -def _draw_line(surf, color, x1, y1, x2, y2): - '''draw a non-horizontal line (without anti-aliasing).''' - # Variant of https://en.wikipedia.org/wiki/Bresenham's_line_algorithm - # - # This strongly differs from craw.c implementation, because we use a - # "slope" variable (instead of delta_x and delta_y) and a "error" variable. - # And we can not do pointer-arithmetic with "BytesPerPixel", like in - # the C-algorithm. - if x1 == x2: - # This case should not happen... - raise ValueError - - slope = abs((y2 - y1) / (x2 - x1)) - error = 0.0 - - if slope < 1: - # Here, it's a rather horizontal line - - # 1. check in which octants we are & set init values - if x2 < x1: - x1, x2 = x2, x1 - y1, y2 = y2, y1 - y = y1 - dy_sign = 1 if (y1 < y2) else -1 - - # 2. step along x coordinate - for x in range(x1, x2 + 1): - set_at(surf, x, y, color) - error += slope - if error >= 0.5: - y += dy_sign - error -= 1 - else: - # Case of a rather vertical line - - # 1. check in which octants we are & set init values - if y1 > y2: - x1, x2 = x2, x1 - y1, y2 = y2, y1 - x = x1 - slope = 1 / slope - dx_sign = 1 if (x1 < x2) else -1 - - # 2. step along y coordinate - for y in range(y1, y2 + 1): - set_at(surf, x, y, color) - error += slope - if error >= 0.5: - x += dx_sign - error -= 1 - - -def _draw_aaline(surf, color, from_x, from_y, to_x, to_y, blend): - '''draw an anti-aliased line. - - The algorithm yields identical results with _draw_line for horizontal, - vertical or diagonal lines, and results changes smoothly when changing - any of the endpoint coordinates. - - Note that this yields strange results for very short lines, eg - a line from (0, 0) to (0, 1) will draw 2 pixels, and a line from - (0, 0) to (0, 1.1) will blend 10 % on the pixel (0, 2). - ''' - # The different requirements that we have on an antialiasing algorithm - # implies to make some compromises: - # 1. We want smooth evolution wrt to the 4 endpoint coordinates - # (this means also that we want a smooth evolution when the angle - # passes +/- 45° - # 2. We want the same behavior when swapping the endpoints - # 3. We want understandable results for the endpoint values - # (eg we want to avoid half-integer values to draw a simple plain - # horizontal or vertical line between two integer l endpoints) - # - # This implies to somehow make the line artificially 1 pixel longer - # and to draw a full pixel when we have the endpoints are identical. - dx = to_x - from_x - dy = to_y - from_y - - if dx == 0 and dy == 0: - # For smoothness reasons, we could also do some blending here, - # but it seems overshoot... - set_at(surf, int(from_x), int(from_y), color) - return - - if abs(dx) >= abs(dy): - if from_x > to_x: - from_x, to_x = to_x, from_x - from_y, to_y = to_y, from_y - dx = -dx - dy = -dy - - slope = dy / dx - def draw_two_pixel(x, float_y, factor): - y = floor(float_y) - draw_pixel(surf, x, y, color, factor * inv_frac(float_y), blend) - draw_pixel(surf, x, y + 1, color, factor * frac(float_y), blend) - - # A and G are respectively left and right to the "from" point, but - # with integer-x-coordinate, (and only if from_x is not integer). - # Hence they appear in following order on the line in general case: - # A from-pt G . . . to-pt S - # |------*-------|--- . . . ---|-----*------|- - G_x = ceil(from_x) - G_y = from_y + (G_x - from_x) * slope - - # 1. Draw start of the segment if we have a non-integer-part - if from_x < G_x: - # this corresponds to the point "A" - draw_two_pixel(floor(from_x), G_y - slope, inv_frac(from_x)) - - # 2. Draw end of the segment: we add one pixel for homogenity reasons - rest = frac(to_x) - S_x = ceil(to_x) - if rest > 0: - # Again we draw only if we have a non-integer-part - S_y = from_y + slope * (dx + 1 - rest) - draw_two_pixel(S_x, S_y, rest) - else: - S_x += 1 - - # 3. loop for other points - for x in range(G_x, S_x): - y = G_y + slope * (x - G_x) - draw_two_pixel(x, y, 1) - - else: - if from_y > to_y: - from_x, to_x = to_x, from_x - from_y, to_y = to_y, from_y - dx = -dx - dy = -dy - - slope = dx / dy - - def draw_two_pixel(float_x, y, factor): - x = floor(float_x) - draw_pixel(surf, x, y, color, factor * inv_frac(float_x), blend) - draw_pixel(surf, x + 1, y, color, factor * frac(float_x), blend) - - G_y = ceil(from_y) - G_x = from_x + (G_y - from_y) * slope - - # 1. Draw start of the segment - if from_y < G_y: - draw_two_pixel(G_x - slope, floor(from_y), inv_frac(from_y)) - - # 2. Draw end of the segment - rest = frac(to_y) - S_y = ceil(to_y) - if rest > 0: - S_x = from_x + slope * (dy + 1 - rest) - draw_two_pixel(S_x, S_y, rest) - else: - S_y += 1 - - # 3. loop for other points - for y in range(G_y, S_y): - x = G_x + slope * (y - G_y) - draw_two_pixel(x, y, 1) - - -# C L I P A N D D R A W L I N E F U N C T I O N S # - -def _clip_and_draw_line(surf, rect, color, pts): - '''clip the line into the rectangle and draw if needed. - - Returns true if anything has been drawn, else false.''' - # "pts" is a list with the four coordinates of the two endpoints - # of the line to be drawn : pts = x1, y1, x2, y2. - # The data format is like that to stay closer to the C-algorithm. - if not clip_line(pts, rect.x, rect.y, rect.x + rect.w - 1, - rect.y + rect.h - 1): - # The line segment defined by "pts" is not crossing the rectangle - return 0 - if pts[1] == pts[3]: # eg y1 == y2 - _drawhorzline(surf, color, pts[0], pts[1], pts[2]) - elif pts[0] == pts[2]: # eg x1 == x2 - _drawvertline(surf, color, pts[0], pts[1], pts[3]) - else: - _draw_line(surf, color, pts[0], pts[1], pts[2], pts[3]) - return 1 - -def _clip_and_draw_line_width(surf, rect, color, line, width): - yinc = xinc = 0 - if abs(line[0] - line[2]) > abs(line[1] - line[3]): - yinc = 1 - else: - xinc = 1 - newpts = line[:] - if _clip_and_draw_line(surf, rect, color, newpts): - anydrawn = 1 - frame = newpts[:] - else: - anydrawn = 0 - frame = [10000, 10000, -10000, -10000] - - for loop in range(1, width // 2 + 1): - newpts[0] = line[0] + xinc * loop - newpts[1] = line[1] + yinc * loop - newpts[2] = line[2] + xinc * loop - newpts[3] = line[3] + yinc * loop - if _clip_and_draw_line(surf, rect, color, newpts): - anydrawn = 1 - frame[0] = min(newpts[0], frame[0]) - frame[1] = min(newpts[1], frame[1]) - frame[2] = max(newpts[2], frame[2]) - frame[3] = max(newpts[3], frame[3]) - - if loop * 2 < width: - newpts[0] = line[0] - xinc * loop - newpts[1] = line[1] - yinc * loop - newpts[2] = line[2] - xinc * loop - newpts[3] = line[3] - yinc * loop - if _clip_and_draw_line(surf, rect, color, newpts): - anydrawn = 1 - frame[0] = min(newpts[0], frame[0]) - frame[1] = min(newpts[1], frame[1]) - frame[2] = max(newpts[2], frame[2]) - frame[3] = max(newpts[3], frame[3]) - - return anydrawn - - -def _clip_and_draw_aaline(surf, rect, color, line, blend): - '''draw anti-aliased line between two endpoints.''' - if not clip_line(line, rect.x - 1, rect.y -1, rect.x + rect.w, - rect.y + rect.h, use_float=True): - return # TODO Rect(rect.x, rect.y, 0, 0) - _draw_aaline(surf, color, line[0], line[1], line[2], line[3], blend) - return # TODO Rect(-- affected area --) - - -# D R A W L I N E F U N C T I O N S # - -def draw_aaline(surf, color, from_point, to_point, blend=True): - '''draw anti-aliased line between two endpoints.''' - line = [from_point[0], from_point[1], to_point[0], to_point[1]] - return _clip_and_draw_aaline(surf, surf.get_clip(), color, line, blend) - - -def draw_line(surf, color, from_point, to_point, width=1): - '''draw anti-aliased line between two endpoints.''' - line = [from_point[0], from_point[1], to_point[0], to_point[1]] - return _clip_and_draw_line_width(surf, surf.get_clip(), color, line, width) - - -# M U L T I L I N E F U N C T I O N S # - -def _multi_lines(surf, color, closed, points, width=1, blend=False, aaline=False): - '''draw several lines, either anti-aliased or not.''' - # The code for anti-aliased or not is almost identical, so it's factorized - length = len(points) - if length <= 2: - raise TypeError - line = [0] * 4 # store x1, y1 & x2, y2 of the lines to be drawn - - xlist = [pt[0] for pt in points] - ylist = [pt[1] for pt in points] - left = right = line[0] = xlist[0] - top = bottom = line[1] = ylist[0] - - for x, y in points[1:]: - left = min(left, x) - right = max(right, x) - top = min(top, y) - bottom = max(right, x) - - rect = surf.get_clip() - for loop in range(1, length): - - line[0] = xlist[loop - 1] - line[1] = ylist[loop - 1] - line[2] = xlist[loop] - line[3] = ylist[loop] - if aaline: - _clip_and_draw_aaline(surf, rect, color, line, blend) - else: - _clip_and_draw_line_width(surf, rect, color, line, width) - - if closed: - line[0] = xlist[length - 1] - line[1] = ylist[length - 1] - line[2] = xlist[0] - line[3] = ylist[0] - if aaline: - _clip_and_draw_aaline(surf, rect, color, line, blend) - else: - _clip_and_draw_line_width(surf, rect, color, line, width) - - return # TODO Rect(...) - -def draw_lines(surf, color, closed, points, width=1): - '''draw several lines connected through the points.''' - return _multi_lines(surf, color, closed, points, width, aaline=False) - - -def draw_aalines(surf, color, closed, points, blend=True): - '''draw several anti-aliased lines connected through the points.''' - return _multi_lines(surf, color, closed, points, blend=blend, aaline=True) - - -def draw_polygon(surface, color, points, width): - if width: - draw_lines(surface, color, 1, points, width) - return # TODO Rect(...) - num_points = len(points) - point_x = [x for x, y in points] - point_y = [y for x, y in points] - - miny = min(point_y) - maxy = max(point_y) - - if miny == maxy: - minx = min(point_x) - maxx = max(point_x) - _clip_and_draw_horzline(surface, color, minx, miny, maxx) - return # TODO Rect(...) - - for y in range(miny, maxy + 1): - x_intersect = [] - for i in range(num_points): - i_prev = i - 1 if i else num_points - 1 - - y1 = point_y[i_prev] - y2 = point_y[i] - - if y1 < y2: - x1 = point_x[i_prev] - x2 = point_x[i] - elif y1 > y2: - y2 = point_y[i_prev] - y1 = point_y[i] - x2 = point_x[i_prev] - x1 = point_x[i] - else: # special case handled below - continue - - if ( ((y >= y1) and (y < y2)) or ((y == maxy) and (y <= y2))) : - x_sect = (y - y1) * (x2 - x1) // (y2 - y1) + x1 - x_intersect.append(x_sect) - - x_intersect.sort() - for i in range(0, len(x_intersect), 2): - _clip_and_draw_horzline(surface, color, x_intersect[i], y, - x_intersect[i + 1]) - - # special case : horizontal border lines - for i in range(num_points): - i_prev = i - 1 if i else num_points - 1 - y = point_y[i] - if miny < y == point_y[i_prev] < maxy: - _clip_and_draw_horzline(surface, color, point_x[i], y, point_x[i_prev]) - - return # TODO Rect(...) diff --git a/venv/lib/python3.7/site-packages/pygame/event.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/event.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index c0fc87d..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/event.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/__init__.py b/venv/lib/python3.7/site-packages/pygame/examples/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/pygame/examples/aacircle.py b/venv/lib/python3.7/site-packages/pygame/examples/aacircle.py deleted file mode 100644 index a37751c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/aacircle.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python - -"""Proof of concept gfxdraw example""" - -import pygame -import pygame.gfxdraw - -def main(): - pygame.init() - screen = pygame.display.set_mode((500,500)) - screen.fill((255, 0, 0)) - s = pygame.Surface(screen.get_size(), pygame.SRCALPHA, 32) - pygame.draw.line(s, (0,0,0), (250, 250), (250+200,250)) - - width = 1 - for a_radius in range(width): - radius = 200 - pygame.gfxdraw.aacircle(s, 250, 250, radius-a_radius, (0, 0, 0)) - - screen.blit(s, (0, 0)) - pygame.display.flip() - try: - while 1: - event = pygame.event.wait() - if event.type == pygame.QUIT: - break - if event.type == pygame.KEYDOWN: - if event.key == pygame.K_ESCAPE or event.unicode == 'q': - break - pygame.display.flip() - finally: - pygame.quit() - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/aliens.py b/venv/lib/python3.7/site-packages/pygame/examples/aliens.py deleted file mode 100644 index f320588..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/aliens.py +++ /dev/null @@ -1,350 +0,0 @@ -#!/usr/bin/env python - -import random, os.path - -#import basic pygame modules -import pygame -from pygame.locals import * - -#see if we can load more than standard BMP -if not pygame.image.get_extended(): - raise SystemExit("Sorry, extended image module required") - - -#game constants -MAX_SHOTS = 2 #most player bullets onscreen -ALIEN_ODDS = 22 #chances a new alien appears -BOMB_ODDS = 60 #chances a new bomb will drop -ALIEN_RELOAD = 12 #frames between new aliens -SCREENRECT = Rect(0, 0, 640, 480) -SCORE = 0 - -main_dir = os.path.split(os.path.abspath(__file__))[0] - -def load_image(file): - "loads an image, prepares it for play" - file = os.path.join(main_dir, 'data', file) - try: - surface = pygame.image.load(file) - except pygame.error: - raise SystemExit('Could not load image "%s" %s'%(file, pygame.get_error())) - return surface.convert() - -def load_images(*files): - imgs = [] - for file in files: - imgs.append(load_image(file)) - return imgs - - -class dummysound: - def play(self): pass - -def load_sound(file): - if not pygame.mixer: return dummysound() - file = os.path.join(main_dir, 'data', file) - try: - sound = pygame.mixer.Sound(file) - return sound - except pygame.error: - print ('Warning, unable to load, %s' % file) - return dummysound() - - - -# each type of game object gets an init and an -# update function. the update function is called -# once per frame, and it is when each object should -# change it's current position and state. the Player -# object actually gets a "move" function instead of -# update, since it is passed extra information about -# the keyboard - - -class Player(pygame.sprite.Sprite): - speed = 10 - bounce = 24 - gun_offset = -11 - images = [] - def __init__(self): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect(midbottom=SCREENRECT.midbottom) - self.reloading = 0 - self.origtop = self.rect.top - self.facing = -1 - - def move(self, direction): - if direction: self.facing = direction - self.rect.move_ip(direction*self.speed, 0) - self.rect = self.rect.clamp(SCREENRECT) - if direction < 0: - self.image = self.images[0] - elif direction > 0: - self.image = self.images[1] - self.rect.top = self.origtop - (self.rect.left//self.bounce%2) - - def gunpos(self): - pos = self.facing*self.gun_offset + self.rect.centerx - return pos, self.rect.top - - -class Alien(pygame.sprite.Sprite): - speed = 13 - animcycle = 12 - images = [] - def __init__(self): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect() - self.facing = random.choice((-1,1)) * Alien.speed - self.frame = 0 - if self.facing < 0: - self.rect.right = SCREENRECT.right - - def update(self): - self.rect.move_ip(self.facing, 0) - if not SCREENRECT.contains(self.rect): - self.facing = -self.facing; - self.rect.top = self.rect.bottom + 1 - self.rect = self.rect.clamp(SCREENRECT) - self.frame = self.frame + 1 - self.image = self.images[self.frame//self.animcycle%3] - - -class Explosion(pygame.sprite.Sprite): - defaultlife = 12 - animcycle = 3 - images = [] - def __init__(self, actor): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect(center=actor.rect.center) - self.life = self.defaultlife - - def update(self): - self.life = self.life - 1 - self.image = self.images[self.life//self.animcycle%2] - if self.life <= 0: self.kill() - - -class Shot(pygame.sprite.Sprite): - speed = -11 - images = [] - def __init__(self, pos): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect(midbottom=pos) - - def update(self): - self.rect.move_ip(0, self.speed) - if self.rect.top <= 0: - self.kill() - - -class Bomb(pygame.sprite.Sprite): - speed = 9 - images = [] - def __init__(self, alien): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect(midbottom= - alien.rect.move(0,5).midbottom) - - def update(self): - self.rect.move_ip(0, self.speed) - if self.rect.bottom >= 470: - Explosion(self) - self.kill() - - -class Score(pygame.sprite.Sprite): - def __init__(self): - pygame.sprite.Sprite.__init__(self) - self.font = pygame.font.Font(None, 20) - self.font.set_italic(1) - self.color = Color('white') - self.lastscore = -1 - self.update() - self.rect = self.image.get_rect().move(10, 450) - - def update(self): - if SCORE != self.lastscore: - self.lastscore = SCORE - msg = "Score: %d" % SCORE - self.image = self.font.render(msg, 0, self.color) - - - -def main(winstyle = 0): - # Initialize pygame - if pygame.get_sdl_version()[0] == 2: - pygame.mixer.pre_init(44100, 32, 2, 1024) - pygame.init() - if pygame.mixer and not pygame.mixer.get_init(): - print ('Warning, no sound') - pygame.mixer = None - - fullscreen = False - # Set the display mode - winstyle = 0 # |FULLSCREEN - bestdepth = pygame.display.mode_ok(SCREENRECT.size, winstyle, 32) - screen = pygame.display.set_mode(SCREENRECT.size, winstyle, bestdepth) - - #Load images, assign to sprite classes - #(do this before the classes are used, after screen setup) - img = load_image('player1.gif') - Player.images = [img, pygame.transform.flip(img, 1, 0)] - img = load_image('explosion1.gif') - Explosion.images = [img, pygame.transform.flip(img, 1, 1)] - Alien.images = load_images('alien1.gif', 'alien2.gif', 'alien3.gif') - Bomb.images = [load_image('bomb.gif')] - Shot.images = [load_image('shot.gif')] - - #decorate the game window - icon = pygame.transform.scale(Alien.images[0], (32, 32)) - pygame.display.set_icon(icon) - pygame.display.set_caption('Pygame Aliens') - pygame.mouse.set_visible(0) - - #create the background, tile the bgd image - bgdtile = load_image('background.gif') - background = pygame.Surface(SCREENRECT.size) - for x in range(0, SCREENRECT.width, bgdtile.get_width()): - background.blit(bgdtile, (x, 0)) - screen.blit(background, (0,0)) - pygame.display.flip() - - #load the sound effects - boom_sound = load_sound('boom.wav') - shoot_sound = load_sound('car_door.wav') - if pygame.mixer: - music = os.path.join(main_dir, 'data', 'house_lo.wav') - pygame.mixer.music.load(music) - pygame.mixer.music.play(-1) - - # Initialize Game Groups - aliens = pygame.sprite.Group() - shots = pygame.sprite.Group() - bombs = pygame.sprite.Group() - all = pygame.sprite.RenderUpdates() - lastalien = pygame.sprite.GroupSingle() - - #assign default groups to each sprite class - Player.containers = all - Alien.containers = aliens, all, lastalien - Shot.containers = shots, all - Bomb.containers = bombs, all - Explosion.containers = all - Score.containers = all - - #Create Some Starting Values - global score - alienreload = ALIEN_RELOAD - kills = 0 - clock = pygame.time.Clock() - - #initialize our starting sprites - global SCORE - player = Player() - Alien() #note, this 'lives' because it goes into a sprite group - if pygame.font: - all.add(Score()) - - - while player.alive(): - - #get input - for event in pygame.event.get(): - if event.type == QUIT or \ - (event.type == KEYDOWN and event.key == K_ESCAPE): - return - elif event.type == KEYDOWN: - if event.key == pygame.K_f: - if not fullscreen: - print("Changing to FULLSCREEN") - screen_backup = screen.copy() - screen = pygame.display.set_mode( - SCREENRECT.size, - winstyle | FULLSCREEN, - bestdepth - ) - screen.blit(screen_backup, (0, 0)) - else: - print("Changing to windowed mode") - screen_backup = screen.copy() - screen = pygame.display.set_mode( - SCREENRECT.size, - winstyle, - bestdepth - ) - screen.blit(screen_backup, (0, 0)) - # screen.fill((255, 0, 0)) - pygame.display.flip() - fullscreen = not fullscreen - - - keystate = pygame.key.get_pressed() - - # clear/erase the last drawn sprites - all.clear(screen, background) - - #update all the sprites - all.update() - - #handle player input - direction = keystate[K_RIGHT] - keystate[K_LEFT] - player.move(direction) - firing = keystate[K_SPACE] - if not player.reloading and firing and len(shots) < MAX_SHOTS: - Shot(player.gunpos()) - shoot_sound.play() - player.reloading = firing - - # Create new alien - if alienreload: - alienreload = alienreload - 1 - elif not int(random.random() * ALIEN_ODDS): - Alien() - alienreload = ALIEN_RELOAD - - # Drop bombs - if lastalien and not int(random.random() * BOMB_ODDS): - Bomb(lastalien.sprite) - - # Detect collisions - for alien in pygame.sprite.spritecollide(player, aliens, 1): - boom_sound.play() - Explosion(alien) - Explosion(player) - SCORE = SCORE + 1 - player.kill() - - for alien in pygame.sprite.groupcollide(shots, aliens, 1, 1).keys(): - boom_sound.play() - Explosion(alien) - SCORE = SCORE + 1 - - for bomb in pygame.sprite.spritecollide(player, bombs, 1): - boom_sound.play() - Explosion(player) - Explosion(bomb) - player.kill() - - #draw the scene - dirty = all.draw(screen) - pygame.display.update(dirty) - - #cap the framerate - clock.tick(40) - - if pygame.mixer: - pygame.mixer.music.fadeout(1000) - pygame.time.wait(1000) - pygame.quit() - - - -#call the "main" function if running this script -if __name__ == '__main__': main() - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/arraydemo.py b/venv/lib/python3.7/site-packages/pygame/examples/arraydemo.py deleted file mode 100644 index 9341db6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/arraydemo.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python - -import os - -import pygame -from pygame import surfarray -from pygame.locals import * - -main_dir = os.path.split(os.path.abspath(__file__))[0] - -def surfdemo_show(array_img, name): - "displays a surface, waits for user to continue" - screen = pygame.display.set_mode(array_img.shape[:2], 0, 32) - surfarray.blit_array(screen, array_img) - pygame.display.flip() - pygame.display.set_caption(name) - while 1: - e = pygame.event.wait() - if e.type == MOUSEBUTTONDOWN: break - elif e.type == KEYDOWN and e.key == K_s: - #pygame.image.save(screen, name+'.bmp') - #s = pygame.Surface(screen.get_size(), 0, 32) - #s = s.convert_alpha() - #s.fill((0,0,0,255)) - #s.blit(screen, (0,0)) - #s.fill((222,0,0,50), (0,0,40,40)) - #pygame.image.save_extended(s, name+'.png') - #pygame.image.save(s, name+'.png') - #pygame.image.save(screen, name+'_screen.png') - #pygame.image.save(s, name+'.tga') - pygame.image.save(screen, name+'.png') - elif e.type == QUIT: - raise SystemExit() - -def main(arraytype=None): - """show various surfarray effects - - If arraytype is provided then use that array package. Valid - values are 'numeric' or 'numpy'. Otherwise default to NumPy, - or fall back on Numeric if NumPy is not installed. - - """ - if arraytype not in ('numpy', None): - raise ValueError('Array type not supported: %r' % arraytype) - - import numpy as N - from numpy import int32, uint8, uint - - pygame.init() - print ('Using %s' % surfarray.get_arraytype().capitalize()) - print ('Press the mouse button to advance image.') - print ('Press the "s" key to save the current image.') - - #allblack - allblack = N.zeros((128, 128), int32) - surfdemo_show(allblack, 'allblack') - - - #striped - #the element type is required for N.zeros in NumPy else - #an array of float is returned. - striped = N.zeros((128, 128, 3), int32) - striped[:] = (255, 0, 0) - striped[:,::3] = (0, 255, 255) - surfdemo_show(striped, 'striped') - - - #rgbarray - imagename = os.path.join(main_dir, 'data', 'arraydemo.bmp') - imgsurface = pygame.image.load(imagename) - rgbarray = surfarray.array3d(imgsurface) - surfdemo_show(rgbarray, 'rgbarray') - - - #flipped - flipped = rgbarray[:,::-1] - surfdemo_show(flipped, 'flipped') - - - #scaledown - scaledown = rgbarray[::2,::2] - surfdemo_show(scaledown, 'scaledown') - - - #scaleup - #the element type is required for N.zeros in NumPy else - #an #array of floats is returned. - shape = rgbarray.shape - scaleup = N.zeros((shape[0]*2, shape[1]*2, shape[2]), int32) - scaleup[::2,::2,:] = rgbarray - scaleup[1::2,::2,:] = rgbarray - scaleup[:,1::2] = scaleup[:,::2] - surfdemo_show(scaleup, 'scaleup') - - - #redimg - redimg = N.array(rgbarray) - redimg[:,:,1:] = 0 - surfdemo_show(redimg, 'redimg') - - - #soften - #having factor as an array forces integer upgrade during multiplication - #of rgbarray, even for numpy. - factor = N.array((8,), int32) - soften = N.array(rgbarray, int32) - soften[1:,:] += rgbarray[:-1,:] * factor - soften[:-1,:] += rgbarray[1:,:] * factor - soften[:,1:] += rgbarray[:,:-1] * factor - soften[:,:-1] += rgbarray[:,1:] * factor - soften //= 33 - surfdemo_show(soften, 'soften') - - - #crossfade (50%) - src = N.array(rgbarray) - dest = N.zeros(rgbarray.shape) # dest is float64 by default. - dest[:] = 20, 50, 100 - diff = (dest - src) * 0.50 - xfade = src + diff.astype(uint) - surfdemo_show(xfade, 'xfade') - - - #alldone - pygame.quit() - -if __name__ == '__main__': - main() - - - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/audiocapture.py b/venv/lib/python3.7/site-packages/pygame/examples/audiocapture.py deleted file mode 100644 index c140e51..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/audiocapture.py +++ /dev/null @@ -1,58 +0,0 @@ -import pygame as pg -import time - -if pg.get_sdl_version()[0] < 2: - raise SystemExit('This example requires pygame 2 and SDL2.') - -from pygame._sdl2 import ( - get_audio_device_name, - get_num_audio_devices, - AudioDevice, - AUDIO_F32, - AUDIO_ALLOW_FORMAT_CHANGE -) - -pg.mixer.pre_init(44100, 32, 2, 512) -pg.init() - -# init_subsystem(INIT_AUDIO) -names = [get_audio_device_name(x, 1) for x in range(get_num_audio_devices(1))] -print(names) - -iscapture = 1 -sounds = [] -sound_chunks = [] - -def callback(audiodevice, audiomemoryview): - """ This is called in the sound thread. - - Note, that the frequency and such you request may not be what you get. - """ - # print(type(audiomemoryview), len(audiomemoryview)) - # print(audiodevice) - sound_chunks.append(bytes(audiomemoryview)) - - -audio = AudioDevice( - devicename=names[0], - iscapture=1, - frequency=44100, - audioformat=AUDIO_F32, - numchannels=2, - chunksize=512, - allowed_changes=AUDIO_ALLOW_FORMAT_CHANGE, - callback=callback, -) -# start recording. -audio.pause(0) - -print('recording with :%s:' % names[0]) -time.sleep(5) - - -print('Turning data into a pygame.mixer.Sound') -sound = pg.mixer.Sound(buffer=b''.join(sound_chunks)) - -print('playing back recorded sound') -sound.play() -time.sleep(5) diff --git a/venv/lib/python3.7/site-packages/pygame/examples/blend_fill.py b/venv/lib/python3.7/site-packages/pygame/examples/blend_fill.py deleted file mode 100644 index 822fa82..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/blend_fill.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python -import os -import pygame -from pygame.locals import * - -def usage (): - print ("Press R, G, B to increase the color channel values,") - print ("1-9 to set the step range for the increment,") - print ("A - ADD, S- SUB, M- MULT, - MIN, + MAX") - print (" to change the blend modes") - - -main_dir = os.path.split(os.path.abspath(__file__))[0] -data_dir = os.path.join(main_dir, 'data') - -def main(): - color = [0, 0, 0] - changed = False - blendtype = 0 - step = 5 - - pygame.init () - screen = pygame.display.set_mode ((640, 480), 0, 32) - screen.fill ((100, 100, 100)) - - image = pygame.image.load (os.path.join (data_dir, "liquid.bmp")).convert() - blendimage = pygame.image.load (os.path.join (data_dir, "liquid.bmp")).convert() - screen.blit (image, (10, 10)) - screen.blit (blendimage, (200, 10)) - - pygame.display.flip () - pygame.key.set_repeat (500, 30) - usage() - - going = True - while going: - for event in pygame.event.get (): - if event.type == QUIT: - going = False - - if event.type == KEYDOWN: - usage () - - if event.key == K_ESCAPE: - going = False - - if event.key == K_r: - color[0] += step - if color[0] > 255: - color[0] = 0 - changed = True - - elif event.key == K_g: - color[1] += step - if color[1] > 255: - color[1] = 0 - changed = True - - elif event.key == K_b: - color[2] += step - if color[2] > 255: - color[2] = 0 - changed = True - - elif event.key == K_a: - blendtype = BLEND_ADD - changed = True - elif event.key == K_s: - blendtype = BLEND_SUB - changed = True - elif event.key == K_m: - blendtype = BLEND_MULT - changed = True - elif event.key == K_PLUS: - blendtype = BLEND_MAX - changed = True - elif event.key == K_MINUS: - blendtype = BLEND_MIN - changed = True - - elif event.key in (K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9): - step = int (event.unicode) - - if changed: - screen.fill ((100, 100, 100)) - screen.blit (image, (10, 10)) - blendimage.blit (image, (0, 0)) - #blendimage.fill (color, (0, 0, 20, 20), blendtype) - blendimage.fill (color, None, blendtype) - screen.blit (blendimage, (200, 10)) - print ("Color: %s, Pixel (0,0): %s" % - (tuple(color), - [blendimage.get_at ((0, 0))])) - changed = False - pygame.display.flip () - - - pygame.quit() - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/blit_blends.py b/venv/lib/python3.7/site-packages/pygame/examples/blit_blends.py deleted file mode 100644 index 33fe214..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/blit_blends.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python - -# fake additive blending. Using NumPy. it doesn't clamp. -# press r,g,b - -import os, pygame -from pygame.locals import * - -try: - import pygame.surfarray - import numpy -except: - print ("no surfarray for you! install numpy") - -import time - -main_dir = os.path.split(os.path.abspath(__file__))[0] -data_dir = os.path.join(main_dir, 'data') - -def main(): - pygame.init() - pygame.mixer.quit() # remove ALSA underflow messages for Debian squeeze - screen = pygame.display.set_mode((640, 480)) - - im1= pygame.Surface(screen.get_size()) - #im1= im1.convert() - im1.fill((100, 0, 0)) - - - - im2= pygame.Surface(screen.get_size()) - im2.fill((0, 50, 0)) - # we make a srcalpha copy of it. - #im3= im2.convert(SRCALPHA) - im3 = im2 - im3.set_alpha(127) - - images = {} - images[K_1] = im2 - images[K_2] = pygame.image.load(os.path.join(data_dir, "chimp.bmp")) - images[K_3] = pygame.image.load(os.path.join(data_dir, "alien3.gif")) - images[K_4] = pygame.image.load(os.path.join(data_dir, "liquid.bmp")) - img_to_blit = im2.convert() - iaa = img_to_blit.convert_alpha() - - - - blits = {} - blits[K_a] = BLEND_ADD - blits[K_s] = BLEND_SUB - blits[K_m] = BLEND_MULT - blits[K_EQUALS] = BLEND_MAX - blits[K_MINUS] = BLEND_MIN - - blitsn = {} - blitsn[K_a] = "BLEND_ADD" - blitsn[K_s] = "BLEND_SUB" - blitsn[K_m] = "BLEND_MULT" - blitsn[K_EQUALS] = "BLEND_MAX" - blitsn[K_MINUS] = "BLEND_MIN" - - - screen.blit(im1, (0, 0)) - pygame.display.flip() - clock = pygame.time.Clock() - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - - going = True - while going: - clock.tick(60) - - for event in pygame.event.get(): - if event.type == QUIT: - going = False - if event.type == KEYDOWN: - usage() - - if event.type == KEYDOWN and event.key == K_ESCAPE: - going = False - - elif event.type == KEYDOWN and event.key in images.keys(): - img_to_blit = images[event.key] - iaa = img_to_blit.convert_alpha() - - elif event.type == KEYDOWN and event.key in blits.keys(): - t1 = time.time() - # blits is a dict keyed with key -> blit flag. eg BLEND_ADD. - im1.blit(img_to_blit, (0,0), None, blits[event.key]) - t2 = time.time() - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - print ("time to do:%s:" % (t2-t1)) - - - elif event.type == KEYDOWN and event.key in [K_t]: - - for bkey in blits.keys(): - t1 = time.time() - - for x in range(300): - im1.blit(img_to_blit, (0,0), None, blits[bkey]) - - t2 = time.time() - - # show which key we're doing... - onedoing = blitsn[bkey] - print ("time to do :%s: is :%s:" % (onedoing, t2-t1)) - - - elif event.type == KEYDOWN and event.key in [K_o]: - t1 = time.time() - # blits is a dict keyed with key -> blit flag. eg BLEND_ADD. - im1.blit(iaa, (0,0)) - t2 = time.time() - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - print ("time to do:%s:" % (t2-t1)) - - - elif event.type == KEYDOWN and event.key == K_SPACE: - # this additive blend without clamp two surfaces. - #im1.set_alpha(127) - #im1.blit(im1, (0,0)) - #im1.set_alpha(255) - t1 = time.time() - - im1p = pygame.surfarray.pixels2d(im1) - im2p = pygame.surfarray.pixels2d(im2) - im1p += im2p - del im1p - del im2p - t2 = time.time() - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - print ("time to do:%s:" % (t2-t1)) - - elif event.type == KEYDOWN and event.key in [K_z]: - t1 = time.time() - im1p = pygame.surfarray.pixels3d(im1) - im2p = pygame.surfarray.pixels3d(im2) - im1p16 = im1p.astype(numpy.uint16) - im2p16 = im1p.astype(numpy.uint16) - im1p16 += im2p16 - im1p16 = numpy.minimum(im1p16, 255) - pygame.surfarray.blit_array(im1, im1p16) - - del im1p - del im2p - t2 = time.time() - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - print ("time to do:%s:" % (t2-t1)) - - elif event.type == KEYDOWN and event.key in [K_r, K_g, K_b]: - # this adds one to each pixel. - colmap={} - colmap[K_r] = 0x10000 - colmap[K_g] = 0x00100 - colmap[K_b] = 0x00001 - im1p = pygame.surfarray.pixels2d(im1) - im1p += colmap[event.key] - del im1p - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - - elif event.type == KEYDOWN and event.key == K_p: - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - - - - - - elif event.type == KEYDOWN and event.key == K_f: - # this additive blend without clamp two surfaces. - - t1 = time.time() - im1.set_alpha(127) - im1.blit(im2, (0,0)) - im1.set_alpha(255) - - t2 = time.time() - print ("one pixel is:%s:" % [im1.get_at((0,0))]) - print ("time to do:%s:" % (t2-t1)) - - - screen.blit(im1, (0, 0)) - pygame.display.flip() - - pygame.quit() - -def usage(): - print ("press keys 1-5 to change image to blit.") - print ("A - ADD, S- SUB, M- MULT, - MIN, + MAX") - print ("T - timing test for special blend modes.") - -if __name__ == '__main__': - usage() - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/camera.py b/venv/lib/python3.7/site-packages/pygame/examples/camera.py deleted file mode 100644 index 9967d81..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/camera.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python - -# 1. Basic image capturing and displaying using the camera module - -import pygame -import pygame.camera -from pygame.locals import * - - -class VideoCapturePlayer(object): - - size = ( 640, 480 ) - def __init__(self, **argd): - self.__dict__.update(**argd) - super(VideoCapturePlayer, self).__init__(**argd) - - # create a display surface. standard pygame stuff - self.display = pygame.display.set_mode( self.size, 0 ) - self.init_cams(0) - - def init_cams(self, which_cam_idx): - - # gets a list of available cameras. - self.clist = pygame.camera.list_cameras() - print (self.clist) - - if not self.clist: - raise ValueError("Sorry, no cameras detected.") - - try: - cam_id = self.clist[which_cam_idx] - except IndexError: - cam_id = self.clist[0] - - # creates the camera of the specified size and in RGB colorspace - self.camera = pygame.camera.Camera(cam_id, self.size, "RGB") - - # starts the camera - self.camera.start() - - self.clock = pygame.time.Clock() - - # create a surface to capture to. for performance purposes, you want the - # bit depth to be the same as that of the display surface. - self.snapshot = pygame.surface.Surface(self.size, 0, self.display) - - def get_and_flip(self): - # if you don't want to tie the framerate to the camera, you can check and - # see if the camera has an image ready. note that while this works - # on most cameras, some will never return true. - if 0 and self.camera.query_image(): - # capture an image - - self.snapshot = self.camera.get_image(self.snapshot) - - if 0: - self.snapshot = self.camera.get_image(self.snapshot) - #self.snapshot = self.camera.get_image() - - # blit it to the display surface. simple! - self.display.blit(self.snapshot, (0,0)) - else: - self.snapshot = self.camera.get_image(self.display) - #self.display.blit(self.snapshot, (0,0)) - - - pygame.display.flip() - - def main(self): - going = True - while going: - events = pygame.event.get() - for e in events: - if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE): - going = False - if e.type == KEYDOWN: - if e.key in range(K_0, K_0+10) : - self.init_cams(e.key - K_0) - - - self.get_and_flip() - self.clock.tick() - print (self.clock.get_fps()) - -def main(): - pygame.init() - pygame.camera.init() - VideoCapturePlayer().main() - pygame.quit() - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/chimp.py b/venv/lib/python3.7/site-packages/pygame/examples/chimp.py deleted file mode 100644 index 2e703cb..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/chimp.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python -""" -This simple example is used for the line-by-line tutorial -that comes with pygame. It is based on a 'popular' web banner. -Note there are comments here, but for the full explanation, -follow along in the tutorial. -""" - - -# Import Modules -import os, pygame -from pygame.locals import * -from pygame.compat import geterror - -if not pygame.font: print('Warning, fonts disabled') -if not pygame.mixer: print('Warning, sound disabled') - -main_dir = os.path.split(os.path.abspath(__file__))[0] -data_dir = os.path.join(main_dir, 'data') - - -# functions to create our resources -def load_image(name, colorkey=None): - fullname = os.path.join(data_dir, name) - try: - image = pygame.image.load(fullname) - except pygame.error: - print('Cannot load image:', fullname) - raise SystemExit(str(geterror())) - image = image.convert() - if colorkey is not None: - if colorkey is -1: - colorkey = image.get_at((0, 0)) - image.set_colorkey(colorkey, RLEACCEL) - return image, image.get_rect() - - -def load_sound(name): - class NoneSound: - def play(self): pass - if not pygame.mixer or not pygame.mixer.get_init(): - return NoneSound() - fullname = os.path.join(data_dir, name) - try: - sound = pygame.mixer.Sound(fullname) - except pygame.error: - print('Cannot load sound: %s' % fullname) - raise SystemExit(str(geterror())) - return sound - - -# classes for our game objects -class Fist(pygame.sprite.Sprite): - """moves a clenched fist on the screen, following the mouse""" - def __init__(self): - pygame.sprite.Sprite.__init__(self) #call Sprite initializer - self.image, self.rect = load_image('fist.bmp', -1) - self.punching = 0 - - def update(self): - """move the fist based on the mouse position""" - pos = pygame.mouse.get_pos() - self.rect.midtop = pos - if self.punching: - self.rect.move_ip(5, 10) - - def punch(self, target): - """returns true if the fist collides with the target""" - if not self.punching: - self.punching = 1 - hitbox = self.rect.inflate(-5, -5) - return hitbox.colliderect(target.rect) - - def unpunch(self): - """called to pull the fist back""" - self.punching = 0 - - -class Chimp(pygame.sprite.Sprite): - """moves a monkey critter across the screen. it can spin the - monkey when it is punched.""" - def __init__(self): - pygame.sprite.Sprite.__init__(self) # call Sprite intializer - self.image, self.rect = load_image('chimp.bmp', -1) - screen = pygame.display.get_surface() - self.area = screen.get_rect() - self.rect.topleft = 10, 10 - self.move = 9 - self.dizzy = 0 - - def update(self): - """walk or spin, depending on the monkeys state""" - if self.dizzy: - self._spin() - else: - self._walk() - - def _walk(self): - """move the monkey across the screen, and turn at the ends""" - newpos = self.rect.move((self.move, 0)) - if not self.area.contains(newpos): - if self.rect.left < self.area.left or \ - self.rect.right > self.area.right: - self.move = -self.move - newpos = self.rect.move((self.move, 0)) - self.image = pygame.transform.flip(self.image, 1, 0) - self.rect = newpos - - def _spin(self): - """spin the monkey image""" - center = self.rect.center - self.dizzy = self.dizzy + 12 - if self.dizzy >= 360: - self.dizzy = 0 - self.image = self.original - else: - rotate = pygame.transform.rotate - self.image = rotate(self.original, self.dizzy) - self.rect = self.image.get_rect(center=center) - - def punched(self): - """this will cause the monkey to start spinning""" - if not self.dizzy: - self.dizzy = 1 - self.original = self.image - - -def main(): - """this function is called when the program starts. - it initializes everything it needs, then runs in - a loop until the function returns.""" - # Initialize Everything - pygame.init() - screen = pygame.display.set_mode((468, 60)) - pygame.display.set_caption('Monkey Fever') - pygame.mouse.set_visible(0) - - # Create The Backgound - background = pygame.Surface(screen.get_size()) - background = background.convert() - background.fill((250, 250, 250)) - - # Put Text On The Background, Centered - if pygame.font: - font = pygame.font.Font(None, 36) - text = font.render("Pummel The Chimp, And Win $$$", 1, (10, 10, 10)) - textpos = text.get_rect(centerx=background.get_width()/2) - background.blit(text, textpos) - - # Display The Background - screen.blit(background, (0, 0)) - pygame.display.flip() - - # Prepare Game Objects - clock = pygame.time.Clock() - whiff_sound = load_sound('whiff.wav') - punch_sound = load_sound('punch.wav') - chimp = Chimp() - fist = Fist() - allsprites = pygame.sprite.RenderPlain((fist, chimp)) - - # Main Loop - going = True - while going: - clock.tick(60) - - # Handle Input Events - for event in pygame.event.get(): - if event.type == QUIT: - going = False - elif event.type == KEYDOWN and event.key == K_ESCAPE: - going = False - elif event.type == MOUSEBUTTONDOWN: - if fist.punch(chimp): - punch_sound.play() # punch - chimp.punched() - else: - whiff_sound.play() # miss - elif event.type == MOUSEBUTTONUP: - fist.unpunch() - - allsprites.update() - - # Draw Everything - screen.blit(background, (0, 0)) - allsprites.draw(screen) - pygame.display.flip() - - pygame.quit() - -# Game Over - - -# this calls the 'main' function when this script is executed -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/cursors.py b/venv/lib/python3.7/site-packages/pygame/examples/cursors.py deleted file mode 100644 index 2bcc9e4..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/cursors.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python - -import pygame - - -arrow = ( "xX ", - "X.X ", - "X..X ", - "X...X ", - "X....X ", - "X.....X ", - "X......X ", - "X.......X ", - "X........X ", - "X.........X ", - "X......XXXXX ", - "X...X..X ", - "X..XX..X ", - "X.X XX..X ", - "XX X..X ", - "X X..X ", - " X..X ", - " X..X ", - " X..X ", - " XX ", - " ", - " ", - " ", - " ") - - -no = (" ", - " ", - " XXXXXX ", - " XX......XX ", - " X..........X ", - " X....XXXX....X ", - " X...XX XX...X ", - " X.....X X...X ", - " X..X...X X..X ", - " X...XX...X X...X ", - " X..X X...X X..X ", - " X..X X...X X..X ", - " X..X X.,.X X..X ", - " X..X X...X X..X ", - " X...X X...XX...X ", - " X..X X...X..X ", - " X...X X.....X ", - " X...XX X...X ", - " X....XXXXX...X ", - " X..........X ", - " XX......XX ", - " XXXXXX ", - " ", - " ", - ) - -def TestCursor(arrow): - hotspot = None - for y in range(len(arrow)): - for x in range(len(arrow[y])): - if arrow[y][x] in ['x', ',', 'O']: - hotspot = x,y - break - if hotspot != None: - break - if hotspot == None: - raise Exception("No hotspot specified for cursor '%s'!" % -cursorname) - s2 = [] - for line in arrow: - s2.append(line.replace('x', 'X').replace(',', '.').replace('O', -'o')) - cursor, mask = pygame.cursors.compile(s2, 'X', '.', 'o') - size = len(arrow[0]), len(arrow) - pygame.mouse.set_cursor(size, hotspot, cursor, mask) - -def main(): - pygame.init() - pygame.font.init() - font = pygame.font.Font(None, 24) - bg = pygame.display.set_mode((800, 600), 0, 24) - bg.fill((255,255,255)) - bg.blit(font.render("Click to advance", 1, (0, 0, 0)), (0, 0)) - pygame.display.update() - for cursor in [no, arrow]: - TestCursor(cursor) - going = True - while going: - pygame.event.pump() - for e in pygame.event.get(): - if e.type == pygame.MOUSEBUTTONDOWN: - going = False - pygame.quit() - - -if __name__ == '__main__': - main() - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.gif deleted file mode 100644 index c4497e0..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.jpg b/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.jpg deleted file mode 100644 index 6d110a4..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.jpg and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.png b/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.png deleted file mode 100644 index 471d6a4..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/alien1.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/alien2.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/alien2.gif deleted file mode 100644 index 8df05a3..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/alien2.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/alien2.png b/venv/lib/python3.7/site-packages/pygame/examples/data/alien2.png deleted file mode 100644 index aef5ace..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/alien2.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/alien3.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/alien3.gif deleted file mode 100644 index 5305d41..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/alien3.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/alien3.png b/venv/lib/python3.7/site-packages/pygame/examples/data/alien3.png deleted file mode 100644 index 90d0f7c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/alien3.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/arraydemo.bmp b/venv/lib/python3.7/site-packages/pygame/examples/data/arraydemo.bmp deleted file mode 100644 index ad96338..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/arraydemo.bmp and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/asprite.bmp b/venv/lib/python3.7/site-packages/pygame/examples/data/asprite.bmp deleted file mode 100644 index cc96356..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/asprite.bmp and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/background.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/background.gif deleted file mode 100644 index 5041ce6..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/background.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/blue.mpg b/venv/lib/python3.7/site-packages/pygame/examples/data/blue.mpg deleted file mode 100644 index 60dceca..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/blue.mpg and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/bomb.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/bomb.gif deleted file mode 100644 index 02271c3..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/bomb.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/boom.wav b/venv/lib/python3.7/site-packages/pygame/examples/data/boom.wav deleted file mode 100644 index f19126a..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/boom.wav and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/brick.png b/venv/lib/python3.7/site-packages/pygame/examples/data/brick.png deleted file mode 100644 index cfe37a3..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/brick.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/car_door.wav b/venv/lib/python3.7/site-packages/pygame/examples/data/car_door.wav deleted file mode 100644 index 60acf9e..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/car_door.wav and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/chimp.bmp b/venv/lib/python3.7/site-packages/pygame/examples/data/chimp.bmp deleted file mode 100644 index ec5f88a..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/chimp.bmp and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/city.png b/venv/lib/python3.7/site-packages/pygame/examples/data/city.png deleted file mode 100644 index 202da5c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/city.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/danger.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/danger.gif deleted file mode 100644 index 106d69c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/danger.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/explosion1.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/explosion1.gif deleted file mode 100644 index fabec16..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/explosion1.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/fist.bmp b/venv/lib/python3.7/site-packages/pygame/examples/data/fist.bmp deleted file mode 100644 index a75f12e..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/fist.bmp and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.mp3 b/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.mp3 deleted file mode 100644 index 4c26994..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.mp3 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.ogg b/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.ogg deleted file mode 100644 index e050848..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.ogg and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.wav b/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.wav deleted file mode 100644 index 68a96b8..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/house_lo.wav and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/liquid.bmp b/venv/lib/python3.7/site-packages/pygame/examples/data/liquid.bmp deleted file mode 100644 index c4f12eb..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/liquid.bmp and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/midikeys.png b/venv/lib/python3.7/site-packages/pygame/examples/data/midikeys.png deleted file mode 100644 index 74ecb86..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/midikeys.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/oldplayer.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/oldplayer.gif deleted file mode 100644 index 93906ab..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/oldplayer.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/player1.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/player1.gif deleted file mode 100644 index 6c4eda7..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/player1.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/punch.wav b/venv/lib/python3.7/site-packages/pygame/examples/data/punch.wav deleted file mode 100644 index aa3f56c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/punch.wav and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/sans.ttf b/venv/lib/python3.7/site-packages/pygame/examples/data/sans.ttf deleted file mode 100644 index 09fac2f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/sans.ttf and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/secosmic_lo.wav b/venv/lib/python3.7/site-packages/pygame/examples/data/secosmic_lo.wav deleted file mode 100644 index 867f802..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/secosmic_lo.wav and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/shot.gif b/venv/lib/python3.7/site-packages/pygame/examples/data/shot.gif deleted file mode 100644 index 18de528..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/shot.gif and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/static.png b/venv/lib/python3.7/site-packages/pygame/examples/data/static.png deleted file mode 100644 index fb3b057..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/static.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/whiff.wav b/venv/lib/python3.7/site-packages/pygame/examples/data/whiff.wav deleted file mode 100644 index 3954efa..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/data/whiff.wav and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/data/yuv_1.pgm b/venv/lib/python3.7/site-packages/pygame/examples/data/yuv_1.pgm deleted file mode 100644 index a59b383..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/data/yuv_1.pgm +++ /dev/null @@ -1,6 +0,0 @@ -P5 -752 864 -255 -€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€                                   €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€     -              €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€                             €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€                                 €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€>Zideigcgffedcbbeeeeeeeedddddddd_adglortsuy~„ˆŒŽ’”˜£¨¬®º¹º¾ÄÇÉÈÏÙÁ§¸ÏÒÒÈÇÇÅÄÃÂÁ¼¹µ®¨¢ž›˜˜•Š†…‡xwyy{|w}~‚ytvtvrxuw{vwvvwwusrtutnnoligdgkkhhlnnoopppoononkjjd_crxqnrqqlihmnkkmklmnnnmlkghiiigfegggghhhhkgeedbdgeeeeeeeecegfeddeefilpsuwuw|ƒŒ’—™£¥©®³¸¼¾ÂÄÈÊÌÊÉÇÉÆÀ¸±«§¤ ˜”‘‰†€‚€|xvvxwwwz{{y~~|{yxwv{{{{{{{{vvuvvvvvxvuuwwvsstwyyuvwsoorsoquuttrponmusrmhlojflppnlifgmpqrlhioqrokhikstsniegisqmigiospqkcckqqnkd`hrnbQJC3,.,-OU>0444.&0>FO[T<3FRPMTZ[MKILQLDDMLIHHGDCFXZEBY_IAYeO8@Wa]][ZZZ[\^XRK<,*00259=98666=<>=97EW<%$#.@DSgkVKs¥©dC{ªˆ[_eQ<555512369;=?BEABDFHIIIHIJKLLLLPPQQRSSTTTUUVWWXZ[\]_`bbcccddeeegggffeeegghhiijjmmkjiiiikkllnnopnooppqqrpqrqpnljja`aWUcrvnmpxqN25=<<==?79521/--0**+++,,,)07;:89;CCDDGIKLNNNNNMMMRRRRRRRR€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€=ctkhmjekkjigfddddddddddcccbbbbaeeddddcceeddddccbcdgilmnnoqvz‚Š”„{‘ŸŸ¦¬®²¸½ÃÇÉÐÐÐÐÏÏÏÏÊËËÇÂÁÄÇ°ƒjrxtrrtvvnuƒ}}}‚wx{wz{~{yxurtzspmmonifflnjglpqqrrsrrqponihjd`gnypntsrjgfkmhjononmlklmnkklmlkihjjkkklllokggfegkiiiiiiiigffgiigeggffeeddfecdefedhjloruwx~†•ž¥©ººº¼¿ÃÇÉÌÊÆÆÆÆÅÄÂÁ¿¸¯¦¡ž—•’ŽŠ‡„‚}}}}||||{{zzyxxx|{zz||{z|yvvxxurtwyzxwxzuppuursxuututttttstoinqidiorojfdgloqqjehorsohefimoolfdgksqmhehotqsmaakqomlc]fso`RKD2+1-0NY<0940(&1?GOaX60HUNIR[[ZVTVXSNPKIQQDDLJ?JPHFW\NL\d^M?DV]_\XZ\ZYbWNH9))41258;=>>?><:89;><9;?:5?QA''% /=CXacWJo¥¨fIv¡‹`[hT;534;89;>ABCDGJGHIJJIIIJKLNOPPPRRSTVWXXZZ[\]__`abbccddeccddeeffhhhhggggeffhijkkkkjjklmnoooooppplkkjiihh`_\ZVRONH??A;A]vrptswrM//5//.,0*.,*+0588579;>ACDDIOQONOQTSRRQRRRVVWXYZ[[`abcdefg€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Bgvlkpnmqqponmlliiiiiiiiiihhggffhggggfffffghiijjiiiiiiiieffedcddcd_ahecknprtwz|}‡‰Ž“™Ÿ£¦­±µ¸º¿Èϼ[emhioomgd‡¦œ•œ•yrxvywxqr||vuvwyslopgilkgipspuuuuutssrogeg`\expntqsnjeilhmtrponmlmnnmmoonljimmmmnnnnplkkkkmplllllllljihjkkihllllllllonmllljigghijklljhebbcfhhimqw|€‚Ž’™£¬²µÈÌÒÖ×ØÚÛÐÒÑÊÀ¹¸º­«§¢œ—“‘ˆ‡…ƒ~}|~|z{|}{y}yyzzuqu…‚€‚|urrtutuxuuvvwwxxpptrmrtkfhnsogcfjmnnmgflnpqmhdegmookecgkppokhinrqsnfekolmnhafqpfQJH5/601PV;/61-(&4CHR`T12HTMGQ[^TILSOPWXY[b^QMTWKOPE?LSHGZ_SJGHLN[^XY[[Z_SJE9**302479::9<==<;;=>=77;95?OC*'##187BAAILMLIHJLMMMMMLKJPQSUVWWWWWXYZZ[[^^_``abbeeddcbbbcccdeeffffghiijjiijjklmmllkkllmnkjihfedcZYVSOLIHHGEBAABCB@LMJPPQVTOY`XSdeNBPQ>>Wc]Y[^bXSPI9((12358998748<>=:9999<=73@SD#() 2@CVgdSLq¤¤fG}¦‰b_fY??=9ADNOQSQMKLOKMNQRTTTSTVXYZ[[\]]]]^^^````aaaabbbbbcccccddefggdefhjlmnjjjkkkllgec`][YYUTROLJHFDDCBA@@?=>?AABAAFEIH@Ldojp{svvL)'.6R[QVVTgwvqqrqtttsssrrtojilnljmmlmmopqssrqponnlmnopqqr€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gnxptuqsxusttsrroqsuurrwtrssqsurtqqsuwuqrqqsrppsqpsusmjkppqrolmphmnkhhjjmigihghliffhgdbcdeeeddegc_cgccgecbcfjors|pgghgjo\PKNPNJC<<=>?CFENQQLLJ5.B^`[\\`^TPI8)*0/14789:;<;9:<:9<9=<=;8DU@'&%!.:FTefSLq©¡cEz¦Ša]cVHB=:@GOTTTUTSSTRQSWZZYYWUZ_]\^^]]^_^^`cba`abcefdccdedegeeghhmojjihijhebfb][ZXSOOMJHHHGFBBAAA??DC@ACCDEDJKKMOPRSXX[ZTVesopurwuQ01:I_hghc]ntqstsvvwwwy{{z|xpkjjkmrnlnppprppqponpsqrqpopqr€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gmyrvxutqtvtssttvxwtttrrutrrtwusstvvtttuurrtussusttrqpoorqstspquusqqpnnpsolllknrljjnqnjhnljiiihggjmjdgigfedcbcdedddddcddcdec^``[YVUVUSOKOKMNMX^UG@IVXEAb}ZOplKd“˜Œzvzvszpfiw~}yttsswxusqxwvwxxuqwtqqtspmqtqruttvrssrtvvtprsrqrrrquvwxut{vuuvsqtyvqsvtrsuvvvwvuuuwtqqsusqpqqpqsrnqoooommmjiikkjknhgeddfgigghmnnrw€…‹Ž“š¢±¬®²·À¹¤Š––‘˜ž|‹ž”–’Ž˜‹~w{~y{zz|yx||vpmkknleipsqoidgoqpomfdjrrplfdioprphdhmnpoibdlonlke_enj^UNK8-411IWA141.)'7DGVaM67MRNIM[W64MJ?ORLJV?1HJ79SD;745996;78;?@816IW]]XZ`TLE3(+213566777:=<8789;;:8=<4@UA%%&#,9GWhjXPtª dEz¥Šb]cVJIHHNRW\ZY[[ZYYYYXYZ]^^[\[\]^^^`a``^^^__`a`_beggfgiihghggif``b^`\VRPNMKLJHHHHFD??@CEDCDCCCEGFGJNMORSTVWXZ]`aa_^c`_\VXgvvppqywS39JXfklnjhqvy{{{~‚„‚…„€wljomknolmqspnoroorqoostrqrqprv€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dn~wwvtwxwvxyyxvyxvwxutwxytrvvsrsuvtropruqortrqquvtpqvxwuttutqqtvtstvvtrpoqttrqqpnnrtspnrpnlllkinmmnnljgklkjhgijkfeggefhdefc``a_ec``c_[]^[\[UQPLQOQNONIOVPL]WHhŒˆzyyzup~x\Yv—£‰_YqŒ|}‰zjpuvtttsxwponqotvwwxwsurrrrsttssssssqsusvvxzvsvywwxwtuwxtvxvz{uvvwxwuwztvxwwwuqswxuuvrlwurprtsptpnqtrnmsqpooprsolklnmmnljjklijmqmoqt{~uyƒƒ{~…€u¨¯±»·´Ä“rrxy‡Ž‘‹‹™•ylonlmheoropiekopoonhflqopmeelpqtpgdflrsphaclrrmnh`frn_WMK9/312IWA141-)(6DJY_I4:NPNNS]V53KB:RTMUR46[Q7FWKLQNKLHDFHHGKPJ906IZ^Z\^QJC3(+0025677889:98:<96=;9<<6@SC$$'$+7HXggSLr¬¤dEz¦Ša]cXOQSW\\\a^\\]]]]Z\]ZZ[\[\^[Y]^\\aa`^_bca`figdddc`__`^ZWVTQQOMLJDFEDDEEDCCCCBBAAAFGIJKKLLNOOSWWWZVY__YX`hcdfgiijie\WUUZemrlnpwrT?AB@@@@BBCBBEGHJJKLNQUWVUYZX]]]`dcabhgihb_beggeddefgbUKMQ[ekqmpqtoS?J]]dnmnngceijjhdfghiijlnlmoomnompoooonmlpnlmqspknqrrstsq€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€F{…wŠƒ~‡€i^gnkquwxz||}|z{zz|~{w{{{€}xxwrpsvurqurqruuuuyvvxxvvwwwvtuvwvsttttuuuuvvtrrstsstsssuwnstpnorutsrrsssrlpqpnnoplmliiknogejh`bif_`^\^\Z\[\ZY`^Y]XY`[\^[ckpppnmrk\UUUQU\YWY_[X`_UTWYZZYTNNQ[ƒ¡žœ ua_bagjjlkknqrrrstvwxyz{{‚€~~€y€ywz|yz|~~|{{}{~{wvx{~}{yz|ys{{{yyxxxw{{xx{}|}xvvvuvxvwz{{xvtuvvtssuutomoqqpqqnossqokroquxyzyodadaacbafijmllqst|tm|„€zoknmiijmopnnjhhoomnledlprqkfchqpqqkehqsqrlcagnpqme`gpnfWMK;1302NW>/64.%(;GJ[`E.9LRNHT`M7HVPQ>)F\B>ZdMEYWIFU^OEKGNbV;EO6,32/5?LW\NIB1)./13678899789;=:9;=74<=5?SA''%",9EUegUPv±©_F©‡\\h`WWWZa_\^`a_`a_Y\]^__^_a^]__][\[XVVVVRNKKKIECEFECAAA@>>?=@BA@?BFGHJLMNPPTVYZZ[^_]`ddcba`cbacfdabgdegecb`cddeffggcWNLP[myuoqpxuV/64-$)>HIZ_C.ATWRMW_K/;>8<2*EMFBFLOKEFNLHJIFKDN[N>KK4162112CXZMIC0*///136789:9<;89:::;85;<7ASA)'%!-9DVggTMt­¥_F€©‡[\h`XXUXacaaca^_bb^[[]_^[XYVRRRKJLMJGEFEDDFA@?>>>>>=<=@CDFIIKLNPOPTXY[\\]^`bccb```a`^_cb^]`]]]_cbadfcceefhhghhhhgffcWONR^nytpppxxX<[{mcnmljhkonknnilmlkmpoljpnknmjonoonnpqpopqqppppppprrsrr€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€<^ihrsmnsvshacjmlijpvutw}~„ˆ‘‘“Ÿ«ºÏÕÒÕÕÑÎÌÈ¿¿¸³¯­¬¨¢›•‘‡ƒ‚~~|z{|ysstspqw{{vxxvuvuswvttuuutuqtyxuusssstttttwrosvtpotrpqtoknlonnpmhfedcbacdb`agh`]ccgeabfa]blponprql_[^bb_`acaddaacfbaced_^`ba^^_^\Za]^`^^b__bb_]]]\\^]ZY[\\[\\`hjlqtsuy‚~y}€€€€€€„€}|||~€y|~}}}}z|~€|yy}{zzxvw{yyzzzyxxzzyvwzzv|xuuvuvxvyzwsrstns{|tsy{pollnlikplospmoqurkhjnk_ajqqpkffkprolgdfnpppnfgsqqokfdhnspolggmrqoicempomkfbirncTMK8.410LYA141,'->GJ[\@/EXTRX\]QC?=<8655:9416>=5=FA7;CDBAHLBAH@1.7/,6DU]XLJC/)//.036789:6;=;9878;<9;;9CRC(&&"+8DVeeQJpª¢aF}§ˆ^]f^YYTV`ccgfd_XTVXTRPNMLKJDCBA@ACDBBBA@?@@EBADDDGLMLNQSSUW\VOQZ^]`___^^_cf]^^^]]]^_\\_a`bdabcehhimogcefggehhgghijkaUMOV_krsruqusW=Y{i_rrnjjjjhikkhgklkjkmnmrnilonoqppqpoooprqopsspmopprtsq€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€<[d\]bcb````dfc^abcdhjjllrttwy|€„Ž™®ÊÔÎÎ×Ö×ÙØÖÖÙÛÚ×ÔÖØÕÏÎÉÆÅÄÀ¼»·¸ª•” Ÿ’“ˆ…†…‚}€zvvxyttutsstvwuxxtx|yprttttuvwxwuuvursxvqrsqqlmlmnjilmigeabee]__bcacffijfhf^boxqoqqsl][ckfacfgjbdiedfhcaeea`bb^`b`^^`^c^^`aeb^``^_bb_acdbabba]_ba`bddcdfhmqrqqkmsrtxyxy{~~|wx…“‘†šŸŸœ”Œ‰„€~€€{z{~|{z{}~{zww{€ztux||xuuvxvuwwsonrxxty~zqooqstronsqnrusqopoldjsfajqpmihjlsqmmhdimoookbdppopmdbjptvqhejnptupfcinpmme]dqpcXPI1'244IYE22-+)0>EJ\Z>1ATJK]]WT\ZSRPLOMOKHECBBB?:9;845;87726=3/153CX[[[YLH@+'0224677888:868;:77<;8<;6@RD&%(#*6FQdgVPu¬£cEz¦Ša]c^ZZTS]a`cee^K;9@EECAABA>BDA?BC@@ADEEEIKLKKOUUQSZVVX\]\[\`\WZba]^[^`a`^__`abccdefcfeceiieefeddbcggfhjfddeihhgghij_VQSV_ltsrurvvZCc„mapljmoooonlkkonmnmkmqpnpqpqqmprpllrutoqsqnnqttrpqqppq€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€:]hcgdcjeadeadf^_db_cc_`cffghecffhnx…•—š™œ¡¥¨¬°¼»¾ÃÅÄÇÌÌÐÕ××ÖÖ×ØÝ͸¿ÕÚÏÎÊÆÄÄÁº´¹±ª¤ŸŸž˜‹‰‡ƒ‚~zyywz|vuywuvutvyusuvvywvrvutrmqurnnppooieeefgbbcbbfgfgjkecijg^epsoppqthabcegecddjnhchkhfgededaaa`bb]_cca``begfd_`aaabceedbcfdbedda`cc``a^`eeabhghgfgggklmkknllqhja]ÔÚ»²ÌãæáÝÙÓËÇÀ¸°ª¥£—Œ‹‰…‚|wx}{w}~~}€{z{}~|yw|||}zsnuzyvvxzpsusprtrutuwvssukrytjlogckoopmijiptqojdgpproifillophabjnqppmfhnnrsmcckpqole`emleVQM;*.20IWD166,$,FHI^X;0JWOHPVWUSWYUPQSUUSRSUUUUUVOLRPIIGBGFAA8-16JYa`Y[YOK=+(-/-25589:;988::9:;;;=>;5>RI)&+$)9BVbhVPz±§dFz£ˆa^c`XZZZ]^afeg_E25=?@BDDCB@CEGGGHKNNOQRSTTUWVXZ[XWX^]\Z\_`__^^`_]^baabcddb`fffeeeefdgiihghjffgggfedfiheeiiffghhggggcUPTV^ksxttnszbDe‹t`rrlmkmnmlmmmopqpnnpqqqonooonlqsoorrotqprrqqttrsuurpp€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€=`jfigcfdadd`dheghdbdcbcdfedecabdcbcefedkiimnnpsyz}ƒ…†ŠŽ“™ž¢¦ª­´¹®¢¬ÄÍÊÎÎÏÒÖØØ×ÚßÝØØÕÏËÌÃÀÄÁ¶°³ª‡vƒz||}zwyzwwvxxvstzusrrusoroknolpnbkecjied]afiigcclgjlmjjcenqppsvsk`behgeeihgjllkihbdedcb``_ac`_egabbcdddddbdda`dghhgghhfghffccffccic`dfecddcegedffefdcdefg`ca_xœ¦œžÁåñíëííëììììêéçãßÚÖÑÊÁº´ª”Š‡ƒ‚€}{{}€{wvy}~|x{|{{|yup{‚~|}wzsrutuwuxxwvvvussvwpjqulfmqtvphgjnqpqnghpoqrlegpnonhbcjmrpkedhloqpjechnspmfbgongUQP=*,22IWD166,%4EHQeX:5K_]UX[YSFOVTONOPQPQUVUVYYWV\^XQOVVWVZWB19V_Z\[Y]YOK=,*.013349;87:<<::<<:<::<86BUD%"(")9CUbiWOx¯§dH}¦‡_^g`XYZZ_adhcgeN98?GHIJLMNOTTTRPRUXXWUUWXYYXWVVY\^^\^_]\^``acdcabcecdeeddgigeeigcdjgeccdfghefghgfeehhhigdfidefgghijcXQQS_mstrtrxx]Fb|mdomlmnmmppnnqlkoroopmoonooqqqtpnqsqpprootvtqpssssrssr€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€?hpejlhgjhhgddfghdadhifddfecdedccbbceffeheeffdegfefgfcdfdeffghkmswsnv††Ž‘”™¢§ªª¹¿»ÀÇÈÄËÐÔØÙÒÐÙË”zˆ’’––Œˆ…¢³¡”œ—ƒ‡~uv€z{xvwsttvwsmielhecbfd`chjgehigkkhdd`dsxrptrokgbdmoheijkoljlikfefgfeff`efa_dgbdfgecaabafhfdffdhgiifjnmijklomihlihikjjjljklkkookkjhfhhfonifedgf]sŠ”™Ÿ¥¦©¬²¹ÀÆÉËÖØÝäìîíêðèÝÒÉÿ¼´µ³¯¨¡œ™˜”††‡x‚ƒ}}}wyŠ’‹†‰„yupsxusx~vy{ywv{z{zqiosmcjqqmljdinsqpmhlpssplfgopnongcgmrroiefkorojdejpsqngbgong[QJ;/33-IWE166-'2CGPcW90I`bYY[\WRX\YVUVUSRUXWSTX[TV\ZSSWRUPLYZD9Pc\X`]\]YNJ<,*/004558989789;<<;;878:73=NH)%)$)8BVcjVNv®¥fH}§‰__i`WYZ[`cfgdgaQFGITUVVXXYYWXXWVUWY\YWWY[[Z[]\YZ`dd[_dddddbaeggfgfceffffeddfffffeefeffeddgiegihgefffgihggikffhggghheXQOR_ntuuuqyx^N^fahpjoqlnonnnnnmlnnnoonooppqponpoqsrmmpppqtsqqsttrpqtus€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Amvlqpjorpoppnmosmjlnonmmmkijjiglkihgfddhfegfefheffgeeegjkkkhgedfigfgfdgjjjiijlnmryzy€†ƒ„–˜œ™š¤ }o}†‹’Ž•Š½ÜÀ­Â¡šŸ}[pŽ—““’““Œ¢ž{r}piozrcbbfiljhjjhmlefih_i†ˆpouqsnebbgijkkolpmlqnomiggfffdffgedfiifhiifffgjfgmldchkhkonkkmmmlmnonmmnnnnqqqmponoooqrpppnookopknnlrsllkjmpmgmmmnprtvwy}†Ž–™š¢ žž §®³·½ÅËÍÌÊÉÎÏËÿ½´¨€u{ƒƒ}zƒ’›š™˜‰uqttw|{wy||{||}{y{z|xqtvpijmoonkiiosqojflruqnlgeloopmfdksnoqmfgoqoqlcblsroleafnmf[QM>/12/JXF256.).>FRbV9-L^`[[\_\\][YYZVQVWX[YVTUWVY^YPPWZ\TLTWMN^^X[\WY^WMH;*+/0.36689:<779<<978;;:<;4;OI,&*%*7BWciVMv¬£hGy¥‰a_g_WZ]]bceffe^QPTUZZZZYYXXWYZ[ZZ[\_^^_bcb`befecdfgcdcbdgfdeecbcffeffggfeb`bbbbfihedfhheefidgiigffgfeeffghhefggghijaYSQSanqswvoyz`Q^YXkrmprmpqolmllptpnpnjmomlmnopqlnqrqoprpopstsrrrqruwvrp€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dnynrutxsrqrusrttqqqonpsvsrrrqpooopoonnnpmmnnmnpmnnkigggjhgefghhffffhddkgeccdefge`cigcdfgjhiokgnofbgkkllnirqo¨•„— ŽŠ‡iSt—–š›¡§¢«äÔz~½¼˜io¤‹¡¤l`Œ¡—r^etsmpnftŒroxqplb`ejjhlpooplntpmnhfghgfejhfgjkjiggghihhglihkkihiloooqqnmonnnopppnopoqsqmtwvqrsstpnpttttqpususptrrpnnpqpnmmkjjklljhgfghihhhhhhkorkpw~„‰Ž‘—¡ £¬¯­selokx£Ÿ™•™™Šw‰š•‡‰‡‚ˆ„‚||}|y|z€„{yrimlntqiijmppplehsqooodbkprrkcdkppmmicgmnkqoc^hsrpmfafnmf[QM?001.JYG256/+*+-9E:+,CMQVZZ\\\^_`dd`YZ[\Z[ZWSTY[[\SNRSUVRUWW_]Y^\VX\`VLG8)*/.02249;988:;99:978:8;=4:QC'"&$)7DVdkWLs¬¤fFx¢‡_^e^X]_`bbbgdfcWQW[\[ZYZZ\]^^^^_`bd__``aaaababdca`aedcabcb_dcbbbbdfffffeefgfbaegfedfeeegggfegjihfghlhgjjeeihhhggghiaZTORbpprurmyx]QaXXktklomllmmjijkpomqnmrqonorrsuppqqqpswpoprssrrusqrssst€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Djzusuutssqrrqqrrrrtuuuvvssutsssstttrrrttqpqpnnqpstsrtutsqmlkllljllmoljopnlkjjigijiiljghhkgcffcehfccefgjddhhcioifhkhiiablrpptsv~w„²ªit°¼¤s`–¨«ÎÄgT¦×Äœ`QŒ¯œsƒ°£„„ƒztvrnx~|||~xkotru{tloojdfifbcgjjjkkjkgfegikigfjljlomhmrokosrpppqsrsrpmqrrrtrnuvvtrvywsqswxwvutuuwxutsvuuuspqsppqqqpnnqqqrqrqqjmprrponjjigeegiejmklrwxmkmqlfm{†‡‰‹wŒ­·¯°®®»­‘yx||†‡‡‹‰}…‰ƒrjmomjfgkpqqkeimnoroeckqonkebhnpqngcdinoold`irqrohchpoh]PJ>210,JYG3560-+'#&,*(../.6;:==AFIKMQSRWZZZ\a_Z^b][`_Z\XX]]ZXX]]Z^]\`^cVLE6'*.-/3336765987679:;6869<5:PE*#'%*6DTenYKr¬§bFz ‚\]e_X\__babfdfcWTZ^_____```_`_``aaaccb`__`afa_beecc]befddca_bgieabfhdacgieaefggffedeghihhgfegijhhghjjjjhfgjkkjhffgheWQSU_mtusqq{v\T^YYjrjioomkmopnllmopmmnpqporromnorrrpqrsoprrqprtsttrqrtv€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Chxttrsyvuuuwxxuxxuswxvwutvvtssrvwwurpqrvssssqqtpsussuwvstvyzvplpttqsrnmoqrrpoopptpikmjfinnhfhjiigggffhjdgjhghhefbbdfgfhfcbcec^bbciib_gmigZhoqƒ|[T{’ŒiGa™¥Žn|¢žwvxqqqs‡±¼½±µ¾œbw²¬{dbo~ŽˆŠƒtorpllmjimjhghlmkhmlnqpmnppnostpnqtqrrpqtsnrutuvwurrvxwvwuwwuvxxutzuttwyxyxvvyyuttuuvvvussqsuvvutssrrqqpooopqponoppqqomllkknpoqsngjqxzwrqrt{{{‚†Œ——|kmnjvŠ˜™š… ªžxhqqkjiflrsqidimnpqmggkpmlkedkskppjefmttnkfciqqrohchpohWPOB.*-0JZH3561/)--**,+)-+&()&(&'*+)*/477:=>CHKJSWVTY\[[__ddbb_ba_`a`\\bWLF6'*.,.3655667:767879<<:9:==DRI.&('*5CVdkWNu®¦`H{ž€]_d`WZ[\acfhff`UU]```bcb`_]``abba`_eca`````da_`bdddadecceghcegfdcfigc`ciic\_fifeiigdhlligghfgiijihhchiedgihiiihhhjldTPWZ^jsusuw~v]U^YZjrlkqmmmlnqplkkprooqnronopnmooopqsrokppqrrsrptrrssrtw€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€I‡™‡‡†{vxwxz}€}uw~}xxvtvtuyyvwxutvwwutuwwttvvuvytwxtrstrsrrsvvusswsnrxwuotxxtqsuuprusqrrsopssrsuomqsmmqqnklljlkehgfgecedcbfcdfcfc`bc_^aa[[\[]]Y]ZZZZZWTWWXUTU[Zapnllomku„–•¤³ŽNu»¤\BO±ÑÍÅÏáÖ»®±ŸŽ“Š‚„„€}{ywrolqspmmoolptsqvwrxuutrswvssrtx{yvyz{}~xuyyzvtxyvvvx~{yytuzusvxwy|}|zwvuuuwxyywvuusstuvusprsutsrrronmopponoopqonqtnrwxxwtrl`]db\]bdbejjgkttx}yt|‚}wroqrkgijmrtskcftmmqmfglntuldglmnmnkfjrtonng_dorqmfbgongYOMA/+,-JZH356201/.-*')/%((,,++%)*(%$&&$#$&%%&)*,04559?ACGMNRVTW]^[__]a^XMF6'*/,03449;986899:=>>DADIKPX\C*"%&*5D[cfUR|°¢bHyœ€a`a`WWXYaekogebWRY`cccccba`aa`aabbab`_`bba_`deb`adegfcaaccaffedfiheceffffhiefggfedchgfffggggghijjihiiihhijijjjihijldZSOQaptuuwswt`WbZ[jsnlpnmnpommoonnooqrrpmlopoorpmovumlqrrronortrmox{vrs€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€U¤À°©‘y|}tkimqrttvwwwwvwx||vvzvuzxuspsuqptwvuvvxzwqswuqx|xttuuxvwtrxynssttttuurqswywuvwtutqsvsprssroptvsppqnnrpmonjmqmkjmmfehheggdege_a_``]]^[\Z\ZWZ[TWUVSRSRYlvuomoqhWRWZZZVVceaWP\w™šš°É½­»½¨®¸£ž¸º³·¾ºµ±«¢   š””˜†‚…ƒ|xuvuqpsspnquwwtrqx|€€ysx€xy{}yx~|}{}xyxr~wwwy||ywz{zyyyxwxxxyyxvt}xtsuwwvwttvvttvupsqqvtsrvwtruvolt{zwuuskjkljjkijkkjjkkjkfgiedaW[nsrqjhhknrurjfioqommjhkprpkhgioromicelpknldaipnqqfadkpi[QN@0,.1CVI799425332//0,-/.)(+..&)*((*)&'))&%''$(%$')'&'()+-0468??@CHLMLFA92-,-.34579:<93.+("),('0DXdiVNt¬¡fNu“‚c_ibWZ][addggheZT[da``a`_`bbabb``a^bcddddeggbaeeddebdffeffcgdcfgeefghfeghggfigfgihfhghijjhghhefkkggiigghiiijjjjihij`TRXZ`lrtttszt^Y^\Zktmmojnnlnonnpmmnnonkpqnntspqqsqoooorqqqrutrqvwussqqs€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€[¢Â¹°œˆƒ€‚wc[diejijjmnonvuutwz‡†……½Á·µ±¯°¯¬ªª¥Ÿž˜”‹‹‰…€~{yuvxxxywvuuttsssvvvspouzwqtyxrrttwywuuwxvvvuwwurxtyvstsutpqtrpppplklmkkmfiligkldccegha\^_[YVXWT\ormkouvlXQTVUWUVZZZYZ[_a]_]\a_ZZ\^^``VVe]^cdbehekmry~‡ŒŽ’•˜ ª­ª¤¦ž•™”œŸž©³¦“”——’Ž’™’†ˆ„ƒ‡~yƒ‹‡€€‚yy{}|zxxyy{}}{yy{~~~zxwy{sux{}|zxwwwx{|yuztwzxwstvwurqsuwttvxyx{snnpmmopompuuolmlpphejkfmmnstmjiioqomiggqssqmeemsrpjcdkpsspieejpqrlefknonldcinpfXQN>,,/-GYI3689;9458423311475/-./..//-,-*(''('()'++'%&'%'(+-..,,--.,+,15149?EKOR[\^_^\YVNLHB;4/,***)(%$"&!'*)*3GWfkVOx±¤hJq–…a]i`W\_^cgikgfd\W\aaaaa`^_bcabca`a`fcabdccddbddbacdfgecefghfdceffefgfggeegggghijhffghjihgggkhijgiljfikhefhhggilljkmdVQST_nuussqys_Y`]\lvopqtpnnopommllnqqpnrorsoqtsmqsrsporqoortutqotvuusru€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€?n~~wqqqqlc^cillmmmmoqrsvyyxz„‰ˆ…„ŸÆÐÉÊÈÉÌÍÌÍÏÈÈÍÐÉÃÄÆÀ¾¹´°®ª¨§¤¡—•‘ŽŠ‰‡…‚€~}xwwxuqsyvssxvqoquuwwtstvyrrwwyzwppzvpwvsstsqnpssnlkoolkmqllmighgbedef`Z^b[USYXSZosomoqodZRUUUWWYTUVY[\ZYWXZYY]_]^`_bfc_ccgiebehfddefffghcegfhklkmqokksurt€ˆ‰˜“…‹‹•“–¬Æ±œœ›£®•­Õȳ½ºª›˜“Œ‰ƒ€~~{yy{}}|{z{|}z|~}{{}z~~{yzzy~vuxvux|zw{{utuwmtyywwz{qmosrqrssrrtspopsurkfglnnossmhhgkmosqkhmnmkmojgnupmhcfmootrkgekupnmhdhlkmkcekpofXRP?+.1-GYJ67;;><57<946869842331-,-12/.0/-,/.+*,,,*))+)&,,,-./0125:?BFJLVY]a`]YVPNJE>70-))*))'&%%%%&&&&&%#$#(3F[gjUNx³©gIs™…_\i_X]a`filjhhe[V[bebaaa`_`cbbceb`cdccddcdfeddddedddfdcfffheddeffefedfgeefdgigghhghijjjgfffjhklffhehilkfhmnggikkklocWQQUborvtsrws^Xga[kvrstpjnrlmrpjsrpsolssqlloqppqoruqpqrpquupptupswxssvw€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€>^gacdee^aefdaabcdedcdgikiknqpruvwz}’±¾¿ÃÂÅÉÌÌÏÒËÎ×ÛÔÏÔÙØØØ×ÕÑÐÐÏÓÑÊÉÊÈÅÿ»¸´±°¥¨©¥¡ž˜‘ˆƒ€‚ƒvsyzvuvurprwxuswuyuqsrqsvwuutsrtnnrsmlorjjkigkmekicda^`a_ZWZVRajnkjmoqj[TXZYZXYZ[\\\[\\[Z^_[\^]b`_bhfa`aggbadggjjihggfelklmkhikjjkkljjibgifeggdgeeghkwƒs|z~ˆ€€ª×ɹÒÕüÁ¿½½¿¿¾¼¸²¯±°§—•‘ŽŒŒ‰„‚|yxxxyz}}xx|‚€zy}|{…zxzuwzusqvywwyzxsprutsuuxrrwxstzwxvleiqtqjkonmidimonmjiklmmnmffonmooihihoqmhhilromjebgmmpmgiprrjZRR@,/4.DYM:<==??89?<8:<;<:535766323424784221-.030.00.,,57;@EKORTVZ^`]XSROKE@952.,*'&%%%)*))'&$#(''&&%%%% $%%+4DZbeTMt¯¨fLzž†a`l`Z]_`cdgfilgYRZeecbccbbbcbbdeb`dacddddedcddbdedcaddeggegfccghddgeeggfhiefiifhjihjiihhhgggjhglkgijffhhjljkllihiklcUPUZdnpvrsqwr_Ye`\lwpppnkpqmqunrtnmutnpttrrssqrtnptspsutnnqswvrqsxxtsvv€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€:`kbdjkgcfgecefecefecdehgcbeeccfabfgny}ƒˆ”—¢¤¦®´´·¼¿ÂÄÈÎÐÐÑÔÓÝÜÖØÜÜÚ×××××ØØØÙØÖÒÐÍÉÅÈÇÀº¸·µ·¥†v}‚~~|‚…{uwyu|{wuvy{vursvtpotqlnsvqjolkiehkihmlfcbaa`]WUZUQbornnoopiYT[_^]YXW[]^[[^aZX\`_^`a_`aeggffeiigjlmpklkkmpommkloollpnkknmllomifimmkifjjedhhdb]fdafdgt‚up„{y„ˆ–œ  ¢«¶¹·¶¹·´²µ¸¹´¯²¬¥£¤¢œ–””‘‰†}}{{|x{{{vy~srw|‚ƒ~|zsqqrruxxxvvyxwvwsxxlbmyuojpsnheghoqoljhgllikmiflppplechkoolhfgjosqg_bimpomgjmook\RRC-/4/AYO>?@?@@;=A?=?@><<=;78<;99997788533358;77;BDEIOUWY[[[YXUQLID?7242.+(''(+)%"!!#$&''&%#"!&%$$$%%&&!"!#+6EYbgYRu­¦cL}Ÿ„``ja[\_chfhiijfZT[dcbcddcdgd`becabbabbbcedaaefdcedeedddffgjfdeggedffggeeijggggjkhgjihghijihcgffjihkjghkjjiijlkhfjlleYQRWeqrtprswubZca^owonournnpsropqqonorupqwvonrroootwtrwsrtvwxxvostuwvuw€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€9cm`affccghedgfcccdeffgfffggfdfigdfeddadbbdffegjiimprx{yƒ„Œ’•—šŸ©«ª¯´¶º¼¾ÂÆËÏÓÕÚÖÕ××ÖÚàÝÞÝÝßÜÙÝÆœƒ‰Ž‘•’™{oŠ‘Œys‚ƒ‚ƒ|uy}|yux}€{plmkjlmkkmjgikib__^`[TRZUPbvunnqqqj_WY[Z\\]d^YX\`a_ba`ab`^``cfedfgfkmjkpnknnonmotrnpooppppprpooooqsonooljntqrpkijmormoolllijdfhhgdecddcbcfhljmpomu€ƒƒ†—ž Ÿ¦©®°±³¶¸¸¹º¼»¶³²¯­¦£š”˜–ˆ…ƒ~|ttw‰•“’’ˆxttutsy|z|}wuxwsy{}rfsykhnrqmghglppnhdelsuqkdfqnomjihhjlmnjeelrssjadjlkmkfgihki]OPD--4/AYQ?AAACA>@B@ACB@?ACA==@8:<=>>=;>@BCEHJITUWWWVXZRQNJD=841-)%$$%&%'+--*'$$#""#$'(&&'''&%$(&%###$$#!,7F\gkZRw¯¥bHzŸ„_^gb[[_hmjkmjif^Y]ccccdcbcfdabdbaaabdeeddeffcbehdbdfdefffgheggffffdikjhijjkkgfijfgmihghjjjighmnighgdgjjihikfghhijmncZVTVertsostyucZhc_nwprurqonnmovrqrqnosspsqpsrqtqsssusruutssvurpuvssvwvw€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€?hsijigfkjhffgggigefhgdadefgfggglfffghehgghifcdhhggedhidib^bffdefmmmqqqxz|„‰Ž’”˜Ÿ¤¦§«°²¼¼¼ÃÊÈÈÑ¿•‰’“—›žž¡š‹‹—ž¥¤ª¦zm“ž™£›‰‹˜™’š¡¸Å¤wxš–Ž…ysqolrtkddc^_[X[TSj…uqstun^VY[Z\[\X[_`^]]_ab^]bb`caefcagiclkhltpkpuvtrtxvpprqprvvrqsrqqstsmoqtsrsurjkvxolqppqsonspqlwtijmurollmoonsjehhc_`fdbabcccadinrw{~ƒ†Œ“—›¥¯°²°³²´¿¾šsx‚z}‚‡–›œžœŒy{~|zyx{}~~~{{zƒ€qw€uiinoqqgchmppnjghlrrolggnprnhedfmppqmeelqlmjcblqnnmhhgfkk]MOD,,2/BYQ@ABBEA@BCACECAEECACCB@CDDFLONPV[]]]XRTRME@=71,,*('&%%%&&%##$'%%%$$$$$$$$$$$$$%%&%$"! $#"!!#$%$"(+09AL[hjSKt®£fHy¡ˆb`hd[Z^gkeellmi]W]fgdcccbaaddccdb`cfdcddccdehgfffdegdegeeffffghgedeehihhfehhjjhiihikjiiiiihiijkjiihhlkijjikffhkjijmdXVYY`oytptvyubXea_r}urqosusnoqroqrstttuooprqstsprutqsvtturrwwtuzvuvwxxt€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Ehrospnqpsromjlroliknnlnjijjjijmhglngfhe_dhhffghgfddffdcehkiecfieeffgghhdfhhggikehmqolmrtvx|€„‡‰snsy{{{„ƒ‡‡…Š’—”žœ‚yŽ¢¡ª¤”™¥ ºÆàúÙ‚|ÍÜÊÇÌÉ’s‘t\œº¡¡ž˜‘‰Œ„e]w tqzurv]XYWX\YY[W^\^_Wb^fe\`ecgejhcefgiiknorsrpuqqutprxttssuvusxsquvssvtuwwuvvronquursvnqrrsrqsppttpmopklnpqqrrrnggopljlmmmmnmlhjiffijhghijjihgjoprx{…yploqry„ŽŒ‰ˆŽ”‹x€œ©¤¢Ÿž¤Š~~€‚€}‚‚yy€vpmmrqiigpolmiekpsnkmgbgopolgcgoromjeelrolgcdjnklpkbcghg]UQE/+32>SQBDGCDABCDEEDDCFJMNOQRWY\^^][YUQKGC>950***'((#"$&'&%%%%$##$#"!%#$'%"#'&% !&"&##$%%$$#''(*+-27<>BCNTLJZghYNz°¨fG{¦‹c_ebZZ^gmihghkh[TYbkhbaffb_cddcbbcfddcbbeghcadgdcffgefiifefhffggghgeefggfgieghijhhjdiljgilogikiikllmmmlmlllhjkjhinr`WRU\fptwwwrvvd]he`n|utxsqsvsrsrrwwvvspsutvuprwwtuutttsptvuuwwvxwuvzzvuw€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Ao|ruzwsstttutrqsqprurprvplotvtrlloommppoomkijlmiklkihikjkkjihhieeeeeeeefeeeefeehecdgjifffeeefghgghfceihhffhgfhlklrvrmpw{uˆ|u~‡Š¢ÅßÀxu¹ÌÁÃÙÂ}‡¹kT¶îÓÎÏ×ÚÎÏàÌŽu‘ž‡rtwsrnRHq„\gstttnid]`]]bfe`febdhfdhmmosssuyrqstsqtyutstwxxvsvwtsuvttopvtpqvwvtssuvutuvutrppsqqstvwvwutuuuroqsppsropmpstsqnmlorqonoomnonmmnojkllkjkjlorpmlmlnrutu{~xx€…‰Œ›ž…mjorx˜ž’€‡›¢•|mmnkecgnnnojehqrokjgionknnfekoponlghmsokgffjmlpmdbilhd^TOD0-30;NOILJDGCDFIKMMNUTTX\\WRSQNID@<://-+)'%%%"#&%'(#&'''&%$# !"#$%$#" "! $!!$! &#$%),1578=??BIOQSZ^WWYNIXgj[Mw¯ªhK¦†]^j`XY_gmjlljkh[SYdgdegfffbcecbddcfgcadeeddebdgdcdcgfeeefggdeggehidfiiffiiegilifkmgghjljhiljjjkjiikmknplkmkgjlkhgilf\VTV_ltuutputc]jhcpzuuvrpruttutwtqquwtrtyxqovyutstvwttusuwyyxyxvwwvvwy{€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Enyqtutxwwyxvuwvwutwzxtuuvxyxutvuwustuwzwuuttsssqstsrqstrqpoponmooooonnnqnllnnliikjhjlidijjjjiiiiilkgfhghgghiiiicedceedecbdddhkhhs~…~ee€………“…b{ŽiZ’±£¡£³¹´»ÎÁŽx‚pmyxvwj55~´¶Î·]o½Ä¸¼¨•ˆ{xŒxlzupvxxyupomptsqptxtuusrstuwuuvxyxwrwyusvyxqtvvxyyxy{yutwxuttvwwwuszxwvxywuyustxxtouyyxxurtstvusqrtnqturnpsrollnppoqlmnlnspoqrpmlmnls{ytt{}og`_aadhfigfhgiq{…~qx‰qnssmggklnppkghrqpmjgjntmljddlppmliefnsqpkeelqmnhbfppga_RMC1.2.ADHMMMNPSX[^\baYV\^[[fk^XWMJYhk[Lv®ªgH{§d`e`[^afjimkjkh\RW_efcbddcefaeiddhhabdefeefccefbcgjehhffhighgghhihgikjggjifiihjlhgllmljjklkkmmllnnmmmkjkkihljgggjklf[TUYdoussvtzwd]hfervswvsstsqquwtruusuxwywtuxwvzwussy}ztwwyxuuwwxwy{zxxz€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€]ŸµŸ†~|zsnottpwzxvy|{{zvvy}}{y{~zx{yux|zzzyuwzxvuvxxwvuuvvuuuvtttuuvvvtuwvutuvyvstttuvstuvwwxxqqsspoqqonoqpmkmrqqpnlkikkjlnljmifhllhghhfmdffdebfbb_Z^\\[WWYRR\koonknqdRMRNPdbK`€uvsuw‰¢°…k‹”¤¼ÊÉÊÍÉÄÊʳyBAÀËÀº½Á¾¹·±«¦¦£˜˜’Œ…ƒ„…„€~xw~|ww{zz|{wuvytuxvxytu{xtxzxxuvwwwvutwywuvvuyuuwzyvvzytruvuuwoswuroqtrrpossqrqstsrrpmlnuyxw|~zlhligjlnjjlkjkjjlof^egY]hqnooiemknqniijmplijgfmqnljhehopoojegmqpkheehmnnnf_elmmZRPE0+1/;KMBBA92/..,+(&%"&)'%#%'$#####$$"!!"%%&% "###! #""!!!! "#$%',2678:??AHMNRUVTUWZW`ea]\]]^`daY\b```____``ahg]Zac^dfeZYZPL\ikZMw®§hH|¨Žfaec[\`hmjkkkkf]Z`eedghffgfecaeiebfeddfedfihhfeghhffgijiggighjhhjkfjmmjjmmkhkkmplimljkmnnmmmkkmonnoomsvpkmmkkigfgkmgYPT_jpovuww|wc]nkfr{tstsvvsssrswwvuvutusvywvywqtwwuuxzywxxxyzz|yzzwx{yu€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€U–°¤™…wz}}kalspsxtsy}xvz}{y{||||zttwx€‡…‡‡œ°¶³°°®«««ª¨¦©¥¡ ¡¡˜•”’ŒŠ‡†„‚~|y|yuruyyustwxxwtsvxzyvwywwwutwzyu{swztx|tuvwvsuwtxuqquututpnrxqrlloopgbhceZSVWPPYjklqpnmdRPSRRUUWSPQYUT\[\W\\Y]acbgfdgjloklmnnlheijihhijkmggoroosqqppt{}{…Œ›¶¹¢’š¬½ÂÃÌÚåÏÄÅËÏÝéååòïéîêÚÓÐËÄÀ½¸¯©ŸŸ›™š–Ž…€…‡‚~xzxuuxzxuwxvssvzwuwwrswvrxytswunptyxtv{|yojotrqsvsorwtptpnqmdhplfimnpmhjlnmmomiglnookggjmllmmihjkmnkfhmnqokfekqqslcclojg]SOF4/3-#$$&'%$(##$%%%$$"'('$#"!!"""### "%&&'*-7;::>BFNNSVTPPV\VZ_^ZWUVQ\db\\^^`bgbY\b_`_]\\^acblk^[eh`cjj\Z[PIZjm\Lt­©eH}§Šbdng^]ahmhhhgih_[_eggccfgefdggffhgehgfeffedghgefhgfgijjihjkkhlnikpmmmmklnmknkmomlmnpnllnqpmooonorqoomptrjiokhfhlonlaYTUXbnuvtvv{ub\kkdo{vttyxrpvyvtrtsswwvxtqtywvwvvwxxusvzwuxzxyzwuvwxyz{{€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€8_oggf_^^]]]_bdfdghghlljonnqtvvuqv|}}~ƒˆ‚……Š ¹¿µ·¹ºº¹¹»¼»»¼¼¼½½½¾½»¹·µ´³¹µ³²®¨¦§£¢Ÿ›˜”‘Š‡ƒ|{}{zyxwuvvxzzyx}|{zyy{|{uwvqtzxrquvsnp}xrqtrprvqommkjkf`_^ZXOKYloppntveUNQQSYWSS[WQYa\UYY\__`eldgiihhknponorsrqrolmrtspppmijnnjkmlhffebeegmrsmgmpux{€†Š‰ˆ‰™¤­²ÎßØÓáßÔÝÛ×ÖÙÞàÝÙÏÖÝÜÔÎÍÏÇÆþ¹³®¬£¢™™™‘…ˆ†ƒ~~z{{{zywvr‚y{|qsuƒ…€~yxprpnruuvmprrssroqrwobkwpflsrmkkiiklnnjhilqrmigimnrojhfhmlmokeflmpmjcakrmmohdgnrkZRNH:-)) &(#"! ""$$! !!!$$"$  %&&)+/51AABFMRUUSTVWUX\^[VVY[[]^[WY^Z\bb\[][ZbicY]cbfklbXZ`^dd]]^^eechf][cgebifZ\[NMYijXKy±ªdH|«Ž^`na\`bfkinkkni]ZdkiigikifhjgimlhhjiffjihhhfghijiihggfgihgjlljimomjkllknqpknqonolkpqjinponlomnonlmnmnnoppnmqkggfjmkcXRU[dovvuuvxu`^nkfrxqswssrsvutuzvstutuwuqquvwwutxzwuxzw}yzzutx{zxvy}zx}€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€<`j^_cadabcdca`^c__bb^^a\[[\_``_^^____``dbbcdekqlosuwy}€~…‰‰Ž˜œ™—šŸ¥¬²·¹½¼¾ÄÇÇÊÎÏÏÐÒÓÔÕÕ×ÒÒÙÚÕÕÛÔ¬Š†Œ‹‘’‹‰‰ŠŽ‹†„…„……‚„‚}{wyy|vuutzwtyvqtpqxsomghga[XYTNZkqmkmmniSQWWTWX[ZYXUVZ\Y]]_ccbdhhjllmortvtssuutrutrrrssspoosutrrorrpqstrusqoopqqtqoortsrmllmooom`gfchifgje`_cghgmoqtx|€‚‡Š—Ÿ¥©«µ»ÀÀÁÇÍÐÚÜÞáâââáåâßÚ×ÕÔÔ®x‚{v€Ž˜—’–”ˆt{}yxzzxyxvvy|zwxvytimuphlopomkiiloonigjmonjigilrrolhehorqkfhjmrsohcdimmmngehmqk_UVK/#&$! "%&$""%(***/56@@CJKQTVYWTW\Y\baZW[Z\bdaZXZ]]_`\XZ_\afd]_a]]ekd[]dbcgicZ^dbcb[[\^fgeki\Y`c^fnl][XJJ\klYKw¯§bH~«]`ob\`bgjinjknj^\ekjjhhkighggigeilffhiilkiggikkjihigjifjmlikoollkkmrljlnnmmnpnmonmportpmnpnmmmnnmmojnpnlkkjpnlihhklf[UW\dntxzyy~ybajihu{ruxtsrtutsussuxwttvtrtwvuwyxvxzyxz|{yxwtvzz{{ywzzy|€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€9_mcdd_`cdefeecc`__bbabda``acdcb__^^__^^a`aec^\]abca^]]^_]^bdcdghjlorvxy…„†‘’•™¢¤¦©¬¯²³¼º»ÀÂÁÂÆȦ‰†ŒŽŽ‘”˜œ˜–—š£Ÿ›šœœ˜”š›•–žž—˜˜‰‹‰ž†|Ÿ–|Š™Œ„vjtwttihgb]YZSKUmnlmlmrnZQSTUYXW\WVYXZ[W][\_``cghkosttrrxwuvxywvvvuutuvwxsqtvttwuxwutwvtuusrqrtvvsqpqqqpuspqqsrqlnpnnqrntpkijkjijhfghigd\]`cegghkmnptz‚‘”š¢ª°´½½¿ÄÊÒØݯ|wŠt„¡®§ž••ž˜‹°·¦¥£›œŠxuz~€€~xrv}€xqoonnmgimoolfdhnpomjhginmmmjfhoopkggjklppjeeikmmnfehlpk[SSK1!% '3AJQOG=97?GHGLNTWXYVSVWZ[^a]Z]_YZbaYUYZ]ad`[Z]^_aa^Z\a^bgf__a^ainf[]bacfid^afehha``_dddmk_\egaflh\]\NL]mmYJv®¦bI}ªŒ_and\\ahnjklgjkb^elihjlhikkgfhjihfcfjigjkhhijkjhhhihhhjkllmjmmmmnnpopopqrplnonmooopmrsnlopmmrpmqqoqsqnmmpppikjhggild\UTYgswswuruwc_olgt{ttuusstussuuvxxvstwvwvuxzxxusuxyyxvvxwvxyyzz|~~z{zu€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€8`qjlmgggggfghjkdffefghfb`_`bcb``___aaa`Z_b_[Z]`\]__^^_a_[\ab_`d`_`__^_^[Y[_a`behhhijkklpuwx{„ƒshhnpsu}€‚~„‰Œ‰ˆŒ‘”’››•• £¡£¬²¹¯™¬t¼¸¯ÙÌ´yKFT[[RQLMKEEFJ]ojloilujWMPUX[WUXTVZ[\`ca]\^abeiginrvvusvttuwxwvxxyxxy{|{uswyxy{uwwtuvvtrsuuuuvwrsssrsuwvtttusqoqqsqosvqrppprrqookijmomkpppnmjhghfedccehfecbbcefkknry€‡‹qv}z{‚‘ŒŠ‰›—‚•ÀÒÊÏÐÎÕÑ£}tqlwžŸ¤–€ƒœªšxjrphhigknnlgfiplkoohgnomlkfcgnrnmlhgjmloleehlonmeehkol`UNMC2+0>Qkvw|‚k\MP\[UVUXY\b`[Y[[Z]a\Y^]YY^^YWZ[\`a_[]b``bc_[]b``eg`\^_`hnf\]cadehf`bfefgcdb`b`ipna]bb\^^WNWZKEYikXKx±ªdI|¨Žabkd[]`imjkpiii`]emghlmkkliljhiljjljjiikhgkiijjjjjkhgiklkmojlnllklooonllmnnpponoooonoooommomrplnnlmqnmnoonmkkjjnnllf]VTZfrvvvvstwg`smfpyuuuuttutttvvwvttuussxwtvzxtuuuuuyzvvvw{{uu}zvwxxx{|€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€>eskmojllkjjjmoqilkgfhgdhgefghfebcdedeffdgd^\ab``ab`][[\^_^\[[ZXYYYYYYYY][Z]\ZY\YYYYYYYYZ`_XW[[TXXWX[^\XY[]\ZZ\_]]]afihfkjfhotuw“¬¥|x…db““…¡±¡d41LQD784/.//,=drjlqhnud=5;@DINTgtpbeu~xsppponputrsuvvuxwvwxywvyxvvvxxxvtuy{{yxvyyxx{{yrsuxyywvuwxwuuwzwvtvwyyxtuusquwtttuuutrqzupnprrqoppqqqoooopplilqkkmmmkihkjhgghijinslozvqrx~|z}}vy‚ŠŒŒ— ¦…jfeev¦¥±§‡®´žwhtshfijmoomifiopppjdfpomlkffkqpmljdejlnpnfaflonleehjnlbYOPRKLYox††y|†‡†x`U^`ZYYYVY``ZY^\Z_c^[_[Z[^]ZY[]^ac`^`f`acc`\^cb`eha[]_]ekdZ\b`jikjdcecce``^XXTRUQG??=953./HYOJ[kkYKx°¨dI{§ccia\`cfkinolmi]\dikmkjlmiifkihkljiklkkkjikjjlllkkjhjkjgijijigjmnlmmlnpollnnnnljjjjnooopnnqqopqomookmprokikjjkllllkbXRV\fqxyuvxvud_omgrzuuuutuustvvuwwuuwwttuxywwwuwuwxwvy{wuwzyxzzxywwz{{}€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€In{qrtprponmljjjpppnmllkonllmmljfhjiggikefd`bhf`cdeca_`a^^^^_____^][ZXWW[XWYXTTVTTTTTTTSX[[VUYXTPRSQRRPKKMOPPNMLJKLMNNMKOLMNNQTSPQ^bSQXOQVWal]R^YECPSGBGD97;9/9\hcehfnr^2)+,+4BQu’‹¢¾Åü¸µ¶´®¨¦¨£™—–“‘‹‰‡ˆ‡…ƒ‚{z{{ywz{zyyyvrxzzwwyywywvwywsnxxwvvvwxxvutvwwvwzwstwxwwwwwwwxyuvwxxvsprstttsrqsqrusnmprssrqnkipppooooosqqkntomqtttxxttte_`^\dkecbdfhnsxvzkt‚ysjjstkedjlmmlgdfhqtohehnooqohfikopnhehmollmidfmnnkdehjnla[VUY^l{}x}|nkv|ƒ‡rYYa^Z\^\\a`ZY^[[bha[__`dgd^]`^_ceb^_c`acc`\^cbcgha^_^gnsj^_daea`]SNMIGG@=70/+%'&# #=TNJ]mmYKv®¦bJ~§Œbcjb\`bgjinmkmi^]dhmokhklkkljhggefiikmkikljjlmmkjjjojknlijljlnmlijnkmmkknpnoqqolmnnmnlkpqonqmormmpnnnnnonmlljlkginodYSU[cnuxvwwyvc`pojsxrsrtuvtsvxwuvussuvuxstyzyyxstyzwvwvxwwvx~~vy|ytx}}|€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Bhwnqtprpqstutsrtqqttqqtusrrrrqonqtrnlorjopkfhmnjkljhfghhbagf__e``______]ZY\[XXZ\[ZXVTRQRRRQQQQPOQQONOPOLKJJLMNNHJLNMLKKMIMOIJLHIJLIHHDEGHJJHHFCFMGCIKIIFFDADB::7/*(%$$ "!!%,' >TOLZjkXKx±©`K€©Š`cld\]ahnjkogfg`]emjkmliloliklifkokjgikhhklhjlkihjmjhikkmnlnnmnnlknqoopponnmoqomnqqprrqpkkpoopoklmjmoqqppmjidgjhkmic[SRWdpttywtyyeasofntrvwtuwtrwzwtsrtwywtuxvsuz{|wwxwvwyxx|zwy{yvxw{…ŠŒŠ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€3JRLORSVSSUVXZ[\^]\^`bbaccdfhijkgijlmmmmllmnopqqoqsuuutsqppoonnnkkjhgeddgfdba_]\^^]]]\\\[ZYXVUTSTTTTTTTTLLLLLLMLNMLLKJIIJJJJKKKKHHIIIIIIGGGGGGGGGFEEEDB@F>?AA<9EV]Z^^^g\F=FGBFDAEIQZbimowx~…Š’˜ ¦¬²¶¸¹Â¾ÁËÏÍÐØÝÕÍÐÛáÞÙÐÑÓÐÊÃÀÀ·Àº¢–œ•“—”ª®—…Š‹‡Šˆƒˆ‰ƒz{~~xz~|y{wqqrtw{}{upwwwwwxxxursvwtsvutsqqrtutuuronqtosvtqopsprrttvwlmlv~vrw{tifkkiihgjlkkklkhjle^cg`^hopomhflpnjljfgqmmnkhhjlpqlfekpmkjigimnpnlhffjmrpe]dnolh`X`nppzyku‚sejp{…ƒt]X`Y^`^`iia`d]]fid___]diaXZ_adikhda`bcefaYUVQOJD=73032-$!  !"##&(# (+)*-1%:VPJ]mjVLx­¥dI}ªbbje[cecmqnnpmg_^glklliijlmlmiinnjkkkjjkmmlkljgkpoijljhlnllnjlppoolkmopnlkkpmmpqnmnpmopmmqtonqromnlnsrnopnmjeglkjlng[WXZanuuuupuvf_rlct{vsttuuwyvsvuuuuuuuusvz{{vuxwvuwzyvwyzxuuuwyw‚‹Œ…|xt€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+AF===;=>>>??@@ACBACEFFECCDEFGHIIJKLMMMMPQQRSTUUWY[^_``_ccdefghhgghijkklonnmmllkkkjigfeeccbba`__\\\]\\\\VVUTSRQQRRQPOONNLLKJIHGGIHGGEEDDFFFFFFFFDCCDEFDCA>>??:9HfkcgigleP>BEAFFD@ACEFFFEFFGJIHINRSW[^acdompy}~‡™–”š£«®®©´»¼¿ÇÊÇÅÊÁ­¦°¸·¾Á½Æáäʳ´¸¹¼¹¶¿Å³¦¨¦ £Ÿ—¥¼¹ž•›—ŠŒŠˆ‡‰‰ˆ‡~|xutvz|urqrsqrvtuwxxwvtruwxvuttvtrsuvspuysnrvqhft|xtuxyunptpmnnrpmlmmlkmmkc[enjelommkgfjnooogcisojmogcjnnmjffimpopmiilnkmlggknlnogagnmjpfYd|{mmsmvwfdkm€‡{gbf]]_]`hg`^_\_gha\]b`gnia_^]^^\UNIGB@?>7/**&%$" !!!!"#$&&'**)(&%$$%%!$,,)+,0$8UNH[ljWN{±ªdI|ªcbje]`dgklqpooma^hkmnlkkljgjjhgjmjghjighkkhmhgkmkkmlmkilooqojjmnoomkmmmnqpnqnmqqnmpmooqtropqoopnmnnrqnmnmklnkkljjmme[WY[erxvtvw{uddrlct|xuvuutuvttvsvyxvttuutvyxxvtvxxy{yutwz{yvvx{…‰upcXV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&@???>==<<:98:<==;::;;<<<<9:;<<;;:9::;<<==;<<==<;:DDDEEEFFHIJKMNPPSTTUVVWWUUVWYZ[[^^^^^^^^________``_^^\\[XXXWVUUUTTSRQPPOONNLLJJIJJKJKJJJGFFFFECA??>;<65HflacjeggV<=@AAABCCBAAEDEEC?@DAAABCDFGGFFFFEEFNNOOQTXZZcigjqsow{zrqzƒ…•—¦Âɵ£¥¯¶¿ÂÅ×äÊÆÏÍÅÉÎÑçëåÛ×ÕÐÏÇÈÊÏÒÒÐͽ·­£šš›’ŽŒ‡…†}zwuyxwutqmjsrrrrqqqtwtszwoniuvruvtwuopqmmpnnoprsromlori_grmgjmmjjkiiljhlkghoqpnkedmomljgehlpoomijnpmmlhcchojmjfjmkioibj}shmou}xf_ik{†„tee^[]]ahga_b_agf^WVXTUUNGA:6773/*('!  !!"#%&&'))',++*)(('(((''&&&%$#%)&&++/#7SMG\mkXP|²«dI|ªcbke^_cjjhrsmml^[hkkkkklnmkigikigikjgghihhijmmihlkglmlklmnpnmoqommmjknpnjloooomnppnrrpostqprqppqpoqqomoqnllmkjihjnmd[XWYdquytvw{ufgqjarzwvwvttuutuursuuttuwwwyywvvttvwwyzyxxw{{wy€„|ndb_]^_€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$8=79<9:==<<;::9;:9:<=<;<<;;;;;;<==>=<;;;;<<===>@@?>=<::99999999:;::9:997889:;;;???@@@AA?@@AABBCFFFGFGFFHIIJJJJKNNMMLLKKKLLLMMNNMNMNNNNNMMMMMMMMMLKJJHFDFFB=>85G]jb`hfglY=9D@<@D@@@@@@@?@?@CA?AECB@??@ACEFEDCDCBDEEDBCEHABCEEDCDDGHIGHIKIORXfkb[[dkvz’Ÿ‘Ž™ž¢°¹¾ÞçâÙßéèãåääçêííëêéçãÞØÒÎÊÈÈȶ­ª°®ª¦£ ŸŸœ—‘ŽŒ‹ˆ…|}}{yxy{xwz|{wmlu€ƒ€€~}tpmlmsvtostqoopppswn`htqbfqtljjbjqqnnkggnoqojhjmknqne`hsrolifhlmlmkedimnimkgjljjikpomxzjjmrwreajoz‚ugebaa`aa^WSRMJKH@851-,*$#""% !#$$&'''(()))++)(()++((''&&&&&&%%$$###&''((+.,1$7TMG_olYNz®§cH|ªccke^_cgjipolmm`\fimnmkiklmlkmnjfhmlhhllhimijjjjlljjmnnnmmnmmnonnopkjlqqonqosqlmssnqomlkproqqooqpnpqqpqrqnlkjiihkmnb[WVXervwutryxhdpkdtzvtwvtvxvwwuvtrruvvtvxvuxywvvxwwwxyzxyww~„~ul_[adhsƒ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&:=69;7788889899:878:;:988877666:;;<;;:9<<<=====:::999::6789:<==A@@>=<;:<<<<<;<;9999::;;7889:;<<99:9:9996677889988777766566788999999:9::>>?>?>?>BBAACB@>?>9573/@DBA@>>?@A>?@?ABCB?>>>>????>?@BAAAFCBBA>?C?DDBCCBBADBDB?GLGDKLOX]bz‘€•‘šª¿ÒàçéÝÜÚÙÙÚÛÝÞÞâççàÛÚàßÞÝÞàâãØÒÍÉÉÇÅ¿º´°¯­©¥‹y{zx~}zzŽ›”••‡nusoswvwsuurqrrppovqditofiopkijhjnopqjfjnmopjegnolkkigiopmlighlmllgabimmkmjfikjllowshpwmgiqumeehiu|~q[USKHEB=60+)&$#! ! !%##$%$#$')/)%&)(()++*)(''&(&%%$$$&##$$&&''##%&()*+,0348>:0/2&9UNH]mkVMx­¦cH|ªcclf\bcbjmkmookc_hllmmjjkmmkomimolknmlmlkkmkhhmomlnonljikorqommoqpnnpokklljjjiefffdhghfeinmtsqoqonqtsrqommkllkkmlmnf^ZXZesvsturyyg`qokz}utvsqsttuvsvutvxywuuwvtuvuuxwwxwwxyzyv{ƒ|njc^n~‚}{€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€(;?:@DA@;;<==???=;:;==<;==<<<<<<99:;;;:98899999965433334333333440000000033210/../012345623344556::::::::;;::9:99;;;;;;::==<<<<;;;;:9876655555555320100-+.+*)*)'.GdldfgfkmN018789:::;=?AB?>@CA?AEHGGGFGHHEEDEEDCDECABBB@>>B@;;:=<:;?9;====>>@@=AA=BCCCGB<>>EDJIFHHINNNRZhv€†}wtty„‘‘•£¦©®¹½ÃÊÐÕØÙÞßàäæèæåææåãààâ丆|ƒ€ƒˆ’–—™“–{{–™Žˆ†Š~tvyxvvuqxxjmxvqnmqtmfgmolkmicdjpqnhbdnlmmkfcholmnmiimosoicbeknmmgeikklpopruwupdhqtlfeeejw‰s>%)&###"#"%&$(*&&)(+++)&%&(*&$&&&&)%%%%%%%%!!#'(%$$)**,-./0:;>BEIKMPPQSY^N504&9UNHYjhVLy¯¨bG{©ddlf\cd`jnilpmia`hmmnlkklkhjolimnklpnmnliillmmmnomkonopmkkjikjhhgeebfgebdefjjiijlkjnpmkkjhhmnopqpoqqopqnkjkijjkmllnaYVX[guzutvuyvfdnlgw{wx~utsstwxvtuwvuuwystwwsuxxvstwwx{|yqy…~pig`^v‡Œ“Šu€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&8;59=87667899::=;:;<=<:==>>>>>>:;<>>>>>=========<:87778<<;976545566778833210//.00/.,+**//000000000000005444444488888888;;;;;;;;>==;;:98:9:9:9:9;986630.)&,,(((,>ZjgiifjoZ4(.++-210../010025536:78899887=::>?;<@AAABCDCBDGE@BHG@BACEEA>=>==>>>?>@@=BA;<;>9:9?C=>CC>>A@?AEGHGCCDEFFFFKHHLOOSWY\bhnrtv„‰—¢¦©¯·ÀÂÂÇÔàµz…ˆˆ£¬ ”‘”’ƒpŸÊÐÄʾ¯¼¾˜xrsqzŠ˜•ŽttŽ˜{iosleffproolggkqpkigfknookfdhmmnomigijmniackmholfekljkpnjn{‚zpgorojdadgcmˆy<#'"" " '&%%'*-.,$#(&$$$)*+)'%&'(&&''$&*"#$&(*+,//4:=;;>>>========<<::;<>@>>=<<;::========;;;:9998777666556666555511111111.//12344222223330123456756666666555555556544431/+(22(')+4MbfijehnfA03.0675310011101320151234420/5006612713454445;758;<<;::=AB@=;FCCCABEF@BAGF@A?CBCACD>@:@BA?96==<;=?@?=B@>;;<>@?=>BCAACA@???ACDEHJIHJOTTY^`bgqymVXX[ddrt~|z‚ƒte‘ÃÊÁÓͽÎÞ©|spk º·¿²‰ƒ§Ã¶€eoqlkgknmnqkfhmnrqjfjpprrlfehllkjiffggjkjedinookefmnjiqqnktƒklxrhhb^gedctx^LHUKE;-()%(&(4GVZW@0),+(()%'((&$$$$%(++),1568;>ACDEBBED@@B\\\[ZZYYYZZ[\]^^`ZY[[\R?.2$7RKE_pmYOz¯¨aG{©ddmf^_bggenplnna]hklnmjhikkkjjkijjjijkkjkmnnmmmidbcfaacbbdcdda`gjgeghhgffhigcdkliilmdahqvvqikqtpnnnppponqpjkolfhllleZVZ\es{wtutywe`rmfv|vrsuwtruvsrvvvuuuvvvusuwustvvwxxywr|qrŽyqt{¤¾Ì·¢€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&:@::;;>==>==<::9;;::==;===<<<;;;><;=:79=;99;;97<::<<::<988:967;<:89;<;9;;::9988987666667655557777777777766799758998544555656565887766657996688535899642022//.*%+BelggeiqfD,48103467876586568987;88;:88:876667895555666643333333332124422235668<;<>?@AAADEFEDEGIEEGIHFFIDEEDBABD@A@>>@?>?==?>;<@@=<=@A?<==<;>AA?=?@?><<<;;=:79:7ACDEEBBBA?;8621.-.&&=VZ[]YVYWW]]UKEFLY^ZZ_ZL9*2*6SOEVplVLx®]Hƒ¬‹bcnc_dd`ciqpnqpb\gmlnonllmnmkikolhjljmolkljkliddeef`acefhihgcdjllmnps{{€‹ƒŠ“™ššœŸ ¢¤—‚wtrmlppkmuoqqopqmgpkhjljijfXSSZhryzzvxxtefmofsyutwtuwyxuvywvvvvxxvtuwwvuuuwqyxxysx„ukšÕÇ ›¨ž±ÕèìèÛ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!3:6764645678888;77<<99=;==::<<9;><;=;8:>>===>=<;<;;:;:::;<;::;;9888999899::;;<<;;:999::98776789666666668886558:5677667966666666877766654664355245676532455322.)'AakijhdphI./5211233443334555556655665567899987644567899888999::8::99;<;;9997457;9754689986557:=88888:<=:>??=<::>?><=:;;83550:UjhbgnooW?>>=====<;::;==>:<<:;<<9<:88:;:9;879:9:<<:;==:;>@AA?BFEADEEDCCCBB>?DFCCEBBCEJMF6:VkibekmiN<:<:84:987764224/WqoXLx³©`G«Žfdji]^_chijmkmne^gkomllnnmkkjjiiknnllmkikopk`]baXTWg~– ¤©«©³»¶´½À½À¾Â¾ÂÉÉÃÇÆÅÅÅÅÅÆÈÆÉÌÊÈÈÈ·}nprrprnnqrnllkmlikopmfXUU[gpvwytuyzicttjuzsqtuvvttuwvrqsvvvvsuvwwwxz{{{y{|vwzy|žÆàÍ´ËÊ©®Õèèê퀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%48676268887643247644774799768867889998835775579688779977::979998:;;989;:::998887777789:::9:9:::;;;;;;;;:99;<<:79:;;99:;::9:9:::777666666766565565332334232001.*06Jhldjkin\;)063432223562344334658866997;:876677899::;;;99::;<<=<>=:89:9667888:=999:9::;?>=>@A@>?@@@BCB?EDCCCCBA>?AB@>?B@>>>=;=?=<::;><9::::;;9798:>?=>CAADJRWH11RkmjkmlhYQTUMFEHB?CDBBGE?9@\Y@Onlkopkedkkhkokfjmlllhbelpomkedhonkjjfcgnmlhcaglklnjeji\Z`^^XJED>4@NV[][XYY\bdaWPMEFPZepw‚„wikpx„…‹‹ˆa:$#&%&'''%%&''&&%+)'&%&'(((&'+%&?USJMRKOZN3).)&)(*':W[XQ<)1)0MKB\rkVLz²¦bIªŒeend]accimsmjmpc]fjikllkklnmkjhglokhklkloniefjfUSnŽ¤®¹¿ÂÆÅÃÇÈÄÆÌÈÃÈÈÿÀÃÅÅÅÆÂÀÂÆÈÆÄÄÆÉÇÁÂÉÍÔ¾—{trptpnossplkklkjknnmg\YXZfoxwxuuyxgcrtjvzutywvuwwvwyuvwutwvqvwwvtsrrxvzyqrxx„œª¿æà¼Ç˪­Ôéìí퀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&594665966766543623562254223310243566554458765797679:9:;8899967:79;:879;8877665598889:;;99999999;;;;;;;;><:9;<=<;<==;;<=<<<<<<<<99::::::87777666764223452320120,05Ihleljhpb?-17365433568765689878;;9:==:<;:998:::::;<<==;;;;;;;;9::9<;;;:9:=;;;::::::86679:9=:99;;<<:<>??=<<>BDCBBDEDDEFECBCBBCA@????>>??>=<;;<;97898:;:@G?+3WpoihjniVKMPLIGDCBBFJMMPNPM[aLIQconomhbfjmmmfdkpnopkefjknpme`emnjijjgjnomidbgkjjjecojUMRNMPOF=:INXekkhfnlntvroneZVZ_ehdkx~{unp|…‰†ƒ€e?"''"$,+$''(((('&('%$$%&'*)((+$%>\SHKNMQQ?35<6+))+&,EZ]P8'1+1NOJ^rkVKv®£`Jƒ«‹cepb]cdbfmtrokje_ellkjkmmmkmmmjgklgikjimpkbaghZUr£ÂÌÆÂÄÇÇÉÌËÆÃÄÅÄÅÈÏÉÇÊÉÃÀÁÅÂÀÀÃÅÅÄÇÅÄÂÀÅËËË˼—wrwsrqomoqqnljjkkikmeWTTZfouttrtuq``tsht|xvytsuwwuvxxwwvuvvspswyzyxx{xztzxx±¬¿ëÙ¹ÊΨªÓëìëꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€2FLFEGGJEEEDCBA@==<=<<;<:6599448546755645666566753377669:89;:78<::;;:9:;7778899:887778997889998899999999:<<<::;=9;<<<<=?<<<<<<<<<<<<<===<;:<;:9;<;98899:67644752+8Sjjkmdhq_>/45254444566:878:<:89::::<=<;<=>>><<==<<<<;;<<<<<;;;=<:9:=<:<;;<;9;=<<;<;<<=<<;;<<<<=;:<;:9::;<;99:;;<;89<<;;;<=<<<====>>??@=>?@?=>@==<=@CDCCILNU[J.0SjkkmmphPEF@=<688646<=<<;>DZ^B/?^qolmkgjpqqnfcjolnojdfkmoolgegjmnlhdgikpmjechkjloe`szaNMKJPUPOZedefdchorljotuw{sup^VemcY`kw|xuy‚‹‹…‹†pB(")$$)$&&())*))%&%&')**''%'*$&>YWRTURL=/457;8-'+)$8WbO3(2,2PRM\snZJr«¤]I…¯ddne^ab`ehmqnllc]ekmnonkkkmlnnifhkilmkjmlf_f\\gyœ¾ËÅÀ¿ÆÇÃÄÉËÊÈÅÃÆÇÁÃÃÄÅÄÄÃÄÆÆÄÂÂÄÈÌÈÄÃÃÄÈËÉÀÅÒ½…ounoqpopsqmljiljhingVPS^msvyzx{{veeyufs|zuuquwvtvvvwvvutwwtwxyyyyz{tx|qryxˆ«®¤ÂéѸÎש§Õîêè뀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€6Q\SRTTTUUUUTRQPORRNNQQMNKKNMJJLHFHIFEGCA@??@@?>:999:99988998789876778764455667788766677889::9888888888869;<:8898:;;::;<;;;;;;;;99::;;;;=;:<<99;;;:9999977645774);^lhmmbjoX6/4423455665588889999878::::<:;<>>>>=::;<=>>????@@@@@>?><=??>@???><=?><:99;>@?ABCBA@@?@BCCBBB>??@????>=<<=<:7??><;;<=;:99;=<:89:;:9;>:88;>@ACKLJISZF%1RgilmiifTNPKEDB@=:9<@DFKD^WKMRI9+'+,,2=:-%)&2N[P9-2)3SRJ[ro]Nt¬¥bKƒ«‹dfqjbccdhjnolqrc]hlpnlkmlkhglmkjjjjiklnnhcefZo—©°¿ÌÅÁÁþÀÄÄÇÅÂÄÅ¿ÆÇÆÃÃÄÄÂÄÃÂÁÂÄÈËÍÇÃÁ¿ÂÆÇÅ¿ÐÏ¢yponoqrtspljjkkjjlohZWZblprvxvxzwebzvgs{xttswzwttvwwwxwvxxtwwvuttvxp{}rpt„¦¶§ž½ç×·ÁÓ¤¡Ïëèå怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€1Q`WTWUSSTUWXYYYZXWYXVVWXXXXWWVWWTVWSSTPVTRRTTRPNPPLKMLIGJKHGIJICA@ACCB@A@@@?>>><<;:999:89:;;:988888888897557887:;;976669999999955666777854763363333221144312541-<<<<<===;<<=>??@=@@?>?AB>==>=<=@AAA@?=<;>@@?>=?@ACCA?@AAEDCDDDB@CA@AA??@>@A@@???=@@=<=>=?====<=?;99==;>DCHJJSYE$+Qhhfgipl^NKNF>A>>;8;CGFFHBI\^I3@aurnlhgonjlplgkmkopgaekhnqlgillklkfdfikmkheejnlplcdshI?BDO\bcefbdfhjmnmmmmnqtvwwy||iaX]dfrƒˆƒ‡‰†„†‹c;-*)&!''())(('&'()('%$((&'*#$=\YQVT9(*(#,+ /?4&*!(F_V812%4TPE]qm^Sz¯¥bI‚¬Žgfmf^abbgjnllllc]dlpnmmnnlkdijjmmjkommmi`bn_Z}¯¾¹¿ÈÃÁÀÀÀÁÂÃÈÈÁ¿ÅÁ½ÄÄÄÅÇÈÆÁ½ÆÃÀÁÅÇÆÄĽ¼ÀÃÆÇÆÈÂÅÊ·wvsomqrqpqgkmkjmpqfZWVXdoyuwuvzzgavvjtyutxwxyxtpsxuprxyvuwruxzzzyy{|uty|ª²©«ÃåÖ´ÅÍ¥¤Ïìïë倀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€-GNGJLIKKKLLMMNNRQPPPQSTRTUSPPSVVYYVVYYVUXWUUYZXWYZXX[\ZXXXXXXXXSSTTUUUVSSRRQPPPOONMLKKJDIKHEEECFCBCB>=?DA?>??>=<;=>=?AA?=<<==;;=<;??<>A><;:<:;>><;<9;=>?>=?AAABEAADFDBABBAACDC@BCCA>=>@A@@A?==?GBFLPSD'(Ihjeeejj]OPMA@@;7349?BBA=9AZ`C,:arnqndclommmieiljmngchmkoqkdchklmkfeikkpkgecgmoloedgTBALW_dd^^gd_`fjpsnnkmstrtzw{‚z|‚‚zi[\ckv‚‡‡†Š„…‘‡pQ9.& (')+)%&,#')(''('%(('*%&?:<@=;;;::::987623553346,/Qllkkjfm^B56786679<=<:9;;88;;9;;;;<<<<>>===<<<>==<;:98;:9:<=<;<:9;>?><>=;:<::=<8:<;;<=>>;77:=<9:;<;;99AA?=;;<=;;<=><<><97:<=<==<;;<=<9>??=;;>=<>B@DKMQ[N-)Hkrkkjml_QROEDD=;9:Y\^X?((+#(++4;8<4(%%9ZS6-6+5QLE^qkZPy±¨_H«Žgfli\^caflrpomld_gnikmnnlllmjjmmkjlgknkbiug[­ÂÅÅÃŽ¿ÃÆÄÂÂÄÐÊÀ¼ÀÃÁÀÆþ¼ÁÈÈÄÆÅÄÄÅÅÄÂÂÇÉÆÁÁÄÆÉÉÂÈά~mvrprrommpmlnopopdZWZ^frx}ttu{yifrpgt{yxwsuvwtuwxvvwwwwxx|uotzxtuvzrq{‰ž«¥ªªÁçÙ¹ÃÅ¥¦Óíééꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€7QXQUVSUUTSRPONNMNOQQPONLKJJKKKLLJIIIIJLGHJLLIGFHFFHHFEHJKKKJJKKMMMMNNNNMNOPRSTUUUVWYYZ[\YX[]][Y[[[[\^^\]^_]Z[]`_]^__[YZ^[ZZYWVXWWXVUSTTTOQRPOQQKKKKKIHGFFGECABB34SnmggkemnW;362333577647898899788888999:::99988<<<<<;;;<::;<=<;===<<;==>=::;:9=;8:<;;<==>=::=>=<<<<<<<<=;:<>>;8=<<<;;>A@=;<====<;;;<>=;;=?><999;:<>?>@DEJOLN[R4$=dogfijobSQMA?>:999;>DHEC@E[^C.:[ponmhclonllgchpoopkcckmmomfcinnmiddgjjliihegigjgcs|eU\fdca_bffcgd^ajqrrolmswuquuxyz€{{„ywoc_q€†…††…ˆ†ƒ†…r[3%$*)'*'&*+'&()(#&'&(#$><<<;;;::;;;;<<<<;=>=;:;=8:;<::;><<:;=;9=:8:><;;:878<<:9:::9::;<=<<;<>>=;99:;;9:<;;;=>>=>??=;<<<;;<=::=@@@ADGINMOWM1';aoegmoqdUSNECB;::;=AEHEECI\`E29]spmkgfiljkmjgjkpqnjeekllmkdchmmoniefjnlqkabhkloiap|m_b[_ggbdgcbb]^cflwsnmswuqotx{{|€‚€‚„‡…€{lYXn€ƒ{vy€ƒ…ˆ‹Œf='*(&()")+(&))'%&%%*%%:X`XE/&*(*),3?;,,<0*&5XZ9%0&6URM_rkZPy±§hK¬eeoi_cd`ekoqsqmdaimlmljknmihlmjhilmkqp`cqjb£¶ÃÇ¿ÂÅÆÁÂÉÆÀÄÏËÂÁÄÃÅÇÄÂÃÄÅÅÄÀ½¼ÁÅÇÇÈľÅÃÂÁÃÄÄÃÅÈÂÃÏÇ¡}usrssqnlnlmonkmrd\YZ^htyxy|wyvgf|vgrwuuvtrsuuwvsssuwxwuryzxwyxvv~qly‡’¡«¹´ªÀæѱÆͨ¢Íìîíꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€6PXQUUPQRRSTUVVWSSRRRSTURUWVTRTVTTVZZVTTYVUWWTTVUXXUUXXUUUUUUTUUTTTSTSTSTTTSSRRRQQQOONMMPNOPNJKOLQSOLMOPLKKMOONMNLLNPPRUSSSRRTUUVX[ZYX[]_\]\Z\_]``aaa`_^```abbccdWT[hpjejfgfefhdfeca`_``Z[\^^\[ZYYYXXWWWQQQPPPPOOONMLKJJGHIGECDEA@@BDCA>>?>?@=:<:7:=;998:7687669::989:;<8;?>;:<;:;;:=ABCHHKVL/*=cqijprqeUQMDCA<:8:@DGGLJHL_`E2:atonkegjkijnkgikmlkjfdimnnicchkjjifeeghinh`djjipj^h}zk_X_ikedeabeb^dintqnmoponosxzz|€€‚‡‰‚ƒx^GRk„{{‚{€…ˆˆŠŽ’‹l=&,+&+*$#()$$()(&&-*(;X`T=-)*#',79:920:3*%6[^8(2(6SOJ`rlZOx°§_E}©‹bdqh`ed`eloqpnkc`gkmlkjlmkiojilonkigliZjtgyž§³ÆźÂÆÇÃÂÄÆÃÆÌÑÅÂÃÁÂÆżÀÅÇÅÃÂÃÃÇÅÀÁÇÈÃÁÃÃÂÆÊÉÄÈÌŽÅÈ«„wurpqqpnnlmnnknrc]ZZ]iuxvy{tyyifxpdu|vsuxxwvwvuuzwtrsuwwuxxwxvssymm|™¡§µ¶®ÁèÙ¶ÁĦ¦Ñëêê耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€6QYTWYTUTTTUUUUVTSRQQSUVTTTUUUTSVUTTTTUVVTSSSRSUXVUXWTTWWTQSVXURVVUVUUUUVVVVWWWWWXWXWXWWZZZYVTUXSWYVRQSTVSQQTVVURQQSROMNPQPNMNOMNOOOMMMNPOPPMQSPSTTUUUTSUTSUVWVUWSQSbngbabehe_\_`bddcbbbfdeggedfccdeffggddddcccba```__^^`^\\]][ZZZZ[ZYVTSSRSSOLNPLLLIFGFEFC?=?@?@@>=<<<<;<>??>==A=;;<;;AFILILZR3&>bpjkllodSMHA?<<97:@FGFGEBH\`C/;/**/63'%<]\6'2(6TPK`slZOx°¦]F‚¬cdnb^egbhnprpmleagmpnllkjijmnppnlkkqme`pnj”¯­ºÉÀ»ÇÈÄÂÀÁÂÄÅÆÆÁÂÈÇÅÅÆÂÃÅÆÃÁÃÇÉÊľ¿ÅľÁÆÇÃÂÄÿÀÆļÂʲˆwvroorqomljjlnmlb]ZY\iuwvxytywjozobu|uru~|uruvsuwwvvvuttrtwzzuu|{lp…–š«³²±Éëس¼Å¦¤Íéëê怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€3NXTYZVWVVVVVVVVTTTTTUUVWWWXYXWVZYVSSVYZUVWVWXWVZWVXWTSUWUTTVWVTWWWVVVUUUUVVWXXYXXXXYYZZ^^][Z[\\[YXYYWVWZ[\]^]\Z^\]``]\]\]\YY[[Y[ZZZ[[ZYWX[ZX[]XTTUUUUTTWUSSTSQOUOFDWgc_imsrcPMTRTWWVUUVXUTVVTUXZZ[\\]^^a```____bbbcccddfdcdffedbdggedeggfdcebbehdcd`^^]_`_\[\[YWVUSRQQPTPLKLMKIIGGGFDBBBCB@?@A@>><9899998655677668;;:;=BFMLMVK-(DfolmkklbRLIDDA<;;>CGIJDA>E^bG1<^oknmeclomlmieijmpmgeimnnmiddilgkmjecioklojcinjongrŠ~f]`djnmkljhheabionrqljnqotzzy|~}~‚€ƒ†„‡rZTey…‡‚ƒ‚„‡ˆ……‡‰‚U*")%)+)'3A=-$''&)$%<[cQ7+*+$*8>/&))*63$(E_W4&0'6TQLaslZOx¯¦bL…®hgme`ec^gpspnmld_gnmllnoljipmhhmrpjoma`mgr£·°ÁËÁÀÇÈÀÁÂÁÁÂÄÆÈ¿ÅËÇÂÃÉÅÄÅÄÂÂÅÆÃÀÀÅÇÅÀ¿ÂÃÁÀÁ¼ÃÄ¿ÅѺŒvvtpqsrmnpnkmrrma]YW[iuvzwur{yjnscrytsvyvrswxusyywvuvwx{wtx{xuy|ls‹—¨¶®®ÈâδÃɦ Éèíì倀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€7R[VYZUVVVVWWXXYUVWXYXWVSVYYVUUWSWYXXYWSSWZYY[ZVTVWUUXXVVY\ZWVX[XXXWWWVVWWWXXXXXYYYYYYYY[[YWX]]Y^XUY\[XWYZ[ZXXY[_\[\[Z[^[^^[\`a_^]\^acb`Y[^^\_`Z_`aaaa```_]]__]\`_XQ\iimplpqcVW]WXXVSSUWXTSUTSTXQQQQQQQQSSSSRRRRTTTSRQQQTVWXWXY[\[YZ[\\[b`]]``bhb`dhgfgfiggiighjjjjihgffdddcaaabca`_\ZZ]Z\\ZXWVTWWTQOOONLLKJHGEDGEDCA?@CHHNQPSJ5,IgkkmkmkaPIE@@<78:<>BEHGB>E]bD-:[nlkkgckppnlgekqlmoiehlomkicbgklkjihilomihgegkkqg]p“œ‡k^_^aggipvssndclrlmmmlklmq{~{zz{€{ƒ‡€ˆ‡‰r_\ey†…†‡„…Œ‹…‰‹‡rO3)*/',L`N.&,.,,%)C\eR6)(,)5:5+,+%-95#-N`T5*4)5RNHaslZOx¯¥bJ¨kkpebhf`fmnppome`ipqnlmnmjgjijlnlkjon^gtez´º®ÀÊÄÄÃÅ¿ÂÅÆÃÁÄÉÁÄÅÈÉÀ¾ÉÇÂÁÆÉÈÅÄÉ¿ÄÊÊÇÅÃÂÁÁÂÃÄÅÇÉÇÀÆÔ¿tvusstqkiopjinpm`\YWZhtux{|vzvfk{rcrzxwxvtvwuvwuuvwwvuvwyyvsuvvymix–‚y‘¡š˜¨¾·¬¾Ã§¦Îéëë怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€9TUOTUUTTUWTXXRVURRVWUTURWXUSVYYUUVVVVVVXVUVXYXUYYXXWWVVVVWWXYZZ]WUVWUUWXVVXXVW[ZVY[Y[\WTWYYWVWX[^_ZVY[VY[\[XWXYZ^_\ZZYV[\]]]]\[`_]]][_ha`_]]]]][afe`]`dZ[]^`bcd`i_RZfikjmnkgeeg``````aaaaa`_^^^^^^^^^^^[[[ZZZZZ[TQRTUUSQQQSUUSQQRRSRRQPSRQQQQRRUUUUUUUUYZZ[\]]^aaaaaaaaedcbbcdejjiihggglllkkkjjiihhggffeedcbbaa_^][YXWVZVTY_^VM@MfmjolimaVPLFBA=>@BDFHHHICFY`K0:Wmolmjcmprsphdfnmmlhfhlrokhhhgflnkffhkoiffc_dlmtk^o— ˆxiebadhjksutohfkpkkmpnhjprz{vw{‚‚ƒ„……‰‚wojjz…„„†‰‹ˆ………ˆˆe7'',*"4YcF 'AC0 "?]bN8.)+,;<.#*)&.;1$6X`O715)4TTL\poZP}°¥aJ¨‹gipibfeaiprrqokc]cnkomknqmhkimpmllgplZlznŠ´¶­ÂÌƼ¾ÅÂÀÃÆľÀÆÇÂÃÊÌÆÂÄÄÁÁÄÆÄÃÃÈÆÃÂÄÆÆÄÄÇÿÄÇÆÅÆÆÃÂÍб†sqppqqpnmmkimstqeWW]WWktv{wxvijyqew€{xyuwywvyzuvvustwwvutquurz~rn}”™„s~„‰‡‹’Žš¼¿œ Éâèë耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Bjwplgin_\`]WTQYWYZZXVW[XWUTUXWUXWWWVVVVWVVWXYYXVVVUUUTTWWWWVVVUTUUWXZXU]YVWY[]_ZWWXUY__]YUUVXXWY_a]XZ[YZZ[[Z[[\Y]^\[]\Y_````__^Z[\]][[]\[[[[\]][_ba]Z[]baaa``__[c^Yaknrkhfec`adccccccccccccbbbbaabcdeef\\]_`bcdba`ce`^edcbbbba`cb`_]\\\WXZ\[[YXTTTTTTTTRRRRRRQQSSSSSSSSXWVUUVWXUVWXYZ[\[\]^`abcfffeedddddeffghhggghijjkjigffgffZWfnmmjmqg__`_^^XXWVVUTTYWMJX`O;AUlrlklhiiijkhdfponnlhgilnlgdfiigkmhchnlkjhecgklokbr—¥•‰nicbdinpkorqmlossrsvrljltyzwy~|€‚ƒ„………ƒ„~ohp|‡ƒƒ‰‡ˆŒ‡ƒ‹zG'#) ;[Z@, )E@*$)9XdX?*'4;=0#%,+*0@,(CZ]Q715)4USK]qpZP}¯¤bI€©‹ghqg`ed`gnpopqmcbilomjjnonookkkhknmpi]otm’¹¶·ÇÆÄÇÇÅÇÅÆÊÈÂÃÈÉÁÄÊÅÁÅÈÀÁÅÆÄÄÅÃÄÅÄÂÄÈÇÂÁÉÅÀÆÉÅÈÆÁÂÅÈͶwusrrqomonlhjnnkcXX[V]p}sx{w|vfj~vhw|vuxtvvuutsruvvwxxwvvvswwt{mh}’’Œ†ˆ“Š†‘Žu±½ž¨Òèëì退€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Z¢›“Ž•¡‡stxspmnsqmkmpolfgigc`chdcba``abccba^\\\^^^^]]]]YYXVUTSRWXVRUZ[XSUVUSUWYRS[]XVXVWWXZ\[XUV[\ZZ\^_[[[[^^^^[^_]^`_\___^^]]\^___``_\^^_`abbc]^```^^^aa`^]\[[\_[Yafinsnkkhb`a________\\]]]]^^`aabbccc``abdeff^ea]ega_`abccbccacefgfedccccdeffffffffffgffedcba````````]\[ZZ[\]YYYXXXXXQQRTUWXXZZZYYXXWTUVVWXYYXYZ[]_``ba`][\_b[Wellnnsnf`bfgghhggggfffdh`V[cd`\^jqljkelnnpqlhhnnmjhhjjmlkidbfmolifchlkllhdfikmijeoœ’ˆyohghklgmuxvssssrtxzwutvxyyz}~}€€‚ƒ„„…‰…ƒviis‚ˆŽŒƒ…ŽŽˆ‚Œ†`<))/=5)'*))06:&2U\XR904)5TSJ]qpZP|¯¤bH€©Œfhrg`ec_gnpspqlbdmnomiilllnmjkkjlonoi^nrr›¿¼·ÂÃÄÂÁÃÃÀÁÆÆÃÃÇÈÄ¿ÃÆÁ¿ÇÉÄÁÀÁÃÅÂÉÄÁÅÊÉÄÀ¿ÁÅÆÅÃÇËÆÁÆÄÁË»tsrrttsrrqnkknnlb[YWVcu{uxxwydj|sgw~xuwsvuuwurtvuvwxurpwxuxxu|nj…–˜•—”•¡¢“¼¼ ¬Öêëìꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Z¡—’š©”tt~{{z‚€{x{}||z|{yy|zywvvx{|}€~}{{{{zzzzxxvvtsrrlmmllnkgdfgfipvxnt~wpnjghhfbabec`VQUYXY[YXYZ\ZYY\[ZZ]ZVXXYZ[]_`a_^]\ab\``abbbaa[\]_aba_______^_e`[^ekmpmopnhb^]aaaa```__``aabbb``__^]]\ccba``__X_^]__]a`bddddegeeedca_]`_^^_acdbbbbbbbbgffeedddgggggggghgfeefghffedcbaabbaaa`````__^^]][ZYYXWVVVVVWWWWWVUSSTUTSCMbebjkkh_YZ[ZXXZZ[\]^__[b_X\dgheagpmjgchmoolhdejlnmiegjqmjjgdfkqoifhiilomhbbfiighcgzƒvilf_[[]_`hmqutuvywusvyyvsyxy{|||~€€‚ƒ„„ˆ„…‰s`fy‚‰‰…„…‡‰ˆ‡ˆva<*4GW^V@3+CP6%%(@\a[H21>:*+-+&*14.,D`\VS;/4)4TRI]qp[P|¯£bHªŒfgrhafdahoqtpqofcilmpomoqomjjkmmnnnmj\kwz Á»ºÄÂÄÆÄÅÄÁÁÅÆÄÃÅÉÃÁÄÆÅÅÆÅÄÅÈÈÄÂÄ¿ÂÈÊÆÄÇÃÅÉÇÂÈÊÁÄÈÉÁ¿ÍºŠwusrsrpnonmlmoqqe]XWXgwyzwvwzeizpcu€|xwvwwwxvst}ywxxvuuuxuxwu{|poŒš’¥œ’–—¢°«¬Á½ž¨Òæéê耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€]’ ’‹“¡mtƒ{vzƒ…‚}z}~|{}}z{~}|{}†‰‡ŠŒŒŒŽ‘ŽŽŽŽŽŽŽŽŒ‘Ž‹‰’ˆ‹—©¸À¾¾ÃÀ¶³··±±°«§£¤¥¥“xltuomponmmlkjhhgdefb\a`_^^_ab]Z`d`ab[``aa`][Y\[[\]\ZX^^^^^^^^b\[`gjjfhnqjb__^``__^]]]bbbbcccc``______aa`_^]\\_ab```bbccc`]\\^_acdc`[YXY[]^]\[___________``aab````````cba``abcabcdeffghggggfffiiihhggfihhgfeeddddccbaaa_^^````KUjlhljhd\VWXVSSVVVVUUUTTTOO\aWLSZgpqkggimnkkhgjpnprkcckmomhfiifjomhghkmmkhc_ekijhcdmjXF?<::Z_]P916/(*('+-.06CT^ZZU;.3)5TRH^rp[P|®£bI€©Œfhqhaed`hoqrqpmgdglnqqpqpljkkkkmnopli^oy~¥Â¹½ÉÃÅÈÇÆÆÂÂÅÇÆÄÅÇÆÅÃÂÁÂÄÄÄÁÂÆ¿Å¿ÀÄÈÈÆÄÂÇÉÄÄÇÆ¿ÃÈÇ¿Æ϶Šusqqqqomkjjjkmorj]XXZgvw|www|wgktds|xvwuuwwvvvswttwxwutswuvvtzyip™–›Ÿœ§ š©¼³°Ä¾¥Ðæéé䀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€\–¦—’Ž“¡‹kp{uw{‚{y{|{x|~€€{z}‚zyvvy…‰‰Š‹Œ‹ŒŽŠŠ‹‹‹ŒŒŒŠŠ‹ŒŒŽŽ’‘“‘’”’“•›©ÁÖÛØØÕÎÑÙÛÔÔÔÖØ×ÓÐÚÀ™Œ—š‘››–””–šš–”•–‘ŠŒŠˆ††‡‰Š~~Ÿ–Œ…€|xuxyywsolklkjihgfeabimjljcknmicaacddcba``____^^]]]]]^^_`aa]]^^^__`ba_]_hkfbdeda]ZXTVY\`cde]]]^]^]]]]]]]]]]ZZ[\]^__________a`_^^_`a^^____``[[\^_`abccbbaaa`bbbddeffaabccdeeffecbdgj_\jsqqnqleadhgeedddcbbaaa^VVaf]UV^gpsngflonllkhgmoolhfgihlmighihkjkjfgkjihheaiolnifgf[G81/,**(&#.5@MXaehwyyy|€€}z{{{{}}}€‚‚ƒƒ„„‡†„„yeep~†…„‡Œ„‹ˆ†‡‡{a<5_dA"*68WY7!%>Z^\WE50*''&*+).=P[\XX^V:-2(5TQG_rq[P|®¢aJ¨‹hipf_db^fmonsqhdfjnnmlopkilillmoolnmecvu{ªÃº¸ÅÇÅÁÁÆÃÀÁÄÆÅÄÅÆÊÆÄÉÄ¿ÃÃÅÂÂÇÆÀ¿ÄÄÆÈÆÃÄÇÇÄÅÉÉÈÇÅÁÃÁÁËϵ‘rpopqrqonmlmlknrl]WY[gtxzw{xxshm}uhvzttxtsuvtvxutssvywtqrwvvutyvdrŽœ›œ ¨ŸžµÅ³¬Ã¿Ÿ§Óêíê €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€X•¦—•‘“ Žps„„~{~‚ƒ„„€}‚€~|}~~{~|ywz€ˆ………‡ŠŒ‰ŠŠŠ‹‹ŒŒ‹‹ŒŽŽ‹Œ“–“Œ”–’–¯ÍÕÑÒÑÎÐÕÔÏÌÊËÏÓÔÔ×¼•‰™”•œ¡¤ š—šž­¬©§ª¬§Ÿžžž¡¦ª­««ÇÝÑÁ¿½¿ÀÂþº·º½¿½·²²´§¦¦¥¥¤¤¤¤¨¨“uihaidk~‰†„ˆ‚‚€~}}|vutrqonnjjihgfedaabbbbcc]]`fg`\_fgijkiebd`ZVW\dhba_]\\^^^^^^^^^^^^______________dcbaabcdba`_^]\\[\]^`abb___^^]]]]]^_`aab[\]_`bcda```abcc[Vdkkmmrib_beca`ddefghhijjd_chjldaempmhcmqpnomifekokhiigjkmlhdgmnhhkjiihlkidcjnjnhgh_M>72.+)(('%*(')2?LS`hpswz{wy}~zz{~€‚ƒƒˆ‡ˆ‡t\_m€‰†ƒ„„‡‡‰‡ƒ}l9(FcY7$9<-N];!'AV^`_P714*',/),AT`aZWY^T9,1)6TPF_sq[P{­¢_Kƒ¦‰ikof_cb^fmokqslefjklkloqnmrjnnnqoklrfcwsz«Ä·µÅÆÃÃÆÆÅÃÃÅÅÿÀÆÉÆÂÁÀÁÃÃÃÃÄÃÆÂÂÈÊÇÅÇÇÃÃÃÄÉǼÁÃÄÀÄͺ“wurrqqnlqooomknsj\WX[gvywy{vxtfkysiwzstyxvuutttuywtsuwvsryxwvvzvfv”žœžž¡žœ¥ÄÔ½±ÆÁ¡©Ôéëë瀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€^™¦––’’œ…px‡„|{„ƒ€‚ƒ~~~‚‚}yz|{~{wux~…ŠŒŠ‰‰Œ‹‰‰‰‰Š‹‹‹ŒŠŠ‹‹ŒŽ‘ˆ”ŒŠ”ªÅ×ÒÑÑÍÏÑÎÒÔÕÔÑÏÏÐØ¿™Ÿ—š˜Ÿ¤¡™–š¡§¦£¢¦©¤œ¤¢ žž £¥¨¦ÃÝμ¾ÁÁÄÇÉÈÅ¿ÇÍÒÑÌÊÍÑÁÂÃÅÇÉÊËÍÔϪ|owwkcy«ÊÊÆÍÉÉÈÇÅÄÃÿ¿½»¸¶µ´³±®©¥ ›™™˜–”“’‘”‰rba_^dliggilmnmliea^[Zffffdb`_^^^^^^^^^^]\[ZZY________`_^]]^_`^^^_``aa`____^^^\\\[[ZZZa``_^]\\^^_`abcdea`fmng^V]nmhoqrslfggc^\]]^_`bcc`b\Y]`_``ZbmliieioolkllmjjnpkdfonoolgfimlmhehjlonmgacjlkleefV@67/+&$%')))&#"%)-/4DU`hpspxyy€€z~~€‚ƒƒ‡ƒ…†ƒ…†~rd^k}‡Š‹ƒƒ‰Œ~~„s=-LcQ/#?7&G^E%#@\\W^\E772+./*=X_`ZVZZYQ9+1(6TPF_sq[P{­¢^L„¦ˆjlng`ec`gnpqptodbjmoqpoolijoqlilllqwi_ru{¨Å¹µÅÅÁÃÆÄÂÂÂÃÃÂÂÂÁÂÁÅɽÅÈÀÂÇÄÄÇÄÁÂÃÅÆÆĽÆÈÃÀÀÁÄÁÉ;¸Ì¿Žutrrstrpmllmlimrg\WWYhwyw{ys|xeh|tgtxsuzwussurqwxwuuwzxts{yyww{whx•œ•š›§Ÿ¨Íݾ¯ÉãªÐãçì쀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€bž¯ š”—¢‡ou†ƒzxˆ‚€†‰ƒ~~€€€~y~z{{y}†‰‰‡‰Ž‘Š‹’”’ŽŠ‹ŽŠ…‰‘Š‰‰‘“Ž‘’‘¢ÁÌÏÐÏÎÎÌÈÑÔÔÒÒÔÐÉÔ½–Ž˜•˜ –ž¤¡š—œ¡¥¥£¡£¦¤Ÿ¤¦¡ŸŸ §­©¿ÓÊÀ¼±¹Âü¼Ã¹¿¾ÁÆÈÇĽ¹¼¿¾ÄÌËÈÐȨ†vvzuj~³Ø×ÍÌÖØØÖÙÝÚÓÓÖÖÒÒÖÕÒ×ÙÒÏÕÔÏÓËÈÍÏÌÐØÙÛ©lZbbdggffimlloonkhfdccYe¦³£‘y‰ˆ‰Š‚‡˜‘wmx|wzz|Š‹„‚‚qhtljj_\cad```__^^]^^^^^^^^\\\\\\\\\]]]]\]^Z`c_bb\^fchkgqsZ]flqmlvty€|qlg_\Z_^X\gh_cib`b`aaZXgnkjghikifhjiinmlkiffhkooifhjhjnkeehhjlhgebgmkpmh]J;52**)((())''(('%$##.9FUcmpuz}}~€„„‚‚„‡…ƒƒ†ˆ‡…‚vd_rˆŒ„‰ˆŽ‘‚{wN8LeH%-B0!IcF'&=Z^UV\WK>46516IWT[^YSY\P?03-2RMGatqZNz­¢cI§‹hipi]bd_aisrqsocalsplknpmigknlkmlkmqe]u|}¥Â¸¸ÃÆÆÁÃÈÇÀÁÇÅÁÃÉÄ»ÁÇÀÀÅÄȾÂÈÅÂÅÈÅÂÁÃÇÇÃÁÆÅÀÁÅÇÆ¿ÂĿƽ’uyururnqklllmmnnd][Z[ivutzywytclviuyvvuvvvussvyyyyzxutuvxwxuv{thrŽ™—Ÿ§©­¦§Çܹ¥ÄĤ¦Ìåêêꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€^žµ©£š˜¡st…‡~„ˆ…ƒ‚‚ƒ~ƒ‚€}}€€}|||z‡Š†ŠŠ†„‡ˆ‡‰ŒŽŽŒŠŠ’Ž‡†Œ‘ŽŠ‡‰“ŒŠ‹‘¦ÇÓÐÏÓÓÐÐÔÍÏÑÔÖ×ÔÏÔ½–˜—›¤›œŸ¡š ª©©§¤¥§¥ £¦£Ÿ ¡£«ª¦½ÕÊ»¹¸¹Á¼¼ÂÁ¹»¿ÂÃÁÂÅÉÀ¼¿ÂÁÇÏÎÆÌʽ­Ÿ’ˆzl´ÕÐÊÒÒÐÐÓÕÔÑÐÍÌÌÍÍÌÌÍÎÐËÊÒÓÐÔÌÑÓÏÎÕØÕ«vW]c`afgjijlnmopnligfeeg\vÅöÞ½¹ÁÏØÖÊÐàÔ±§²··¿ÄÊÝÞÕÑÍ»¯»¾·ªˆwŠ–—‹ˆ†„„€}yuqnmkkjiggeeeb``bb`]]_`]]aaadiilokd\eqsqiemntxtnnlea[]_``chlcljie_cd\_iloqjffloonhdelmnnjfgjnolgcfijqkghiffkhhjidgllnleWF:40*)(((()*(((('&$#$!)9EUgw}~€~{…„ƒ†…‡†„…‰‰†‰€rgl}†…„ˆŠŒ…|bET`<-AF)'K_G($C]\VWW[\M:8997=BBOV][Z_V?.1,3TOI^qoYO|°¥`K„«Œgfjg^df`cjqqpspc_hprnlmpolilnlknmkmud]y€}¤Æ¸·ÅÈÆÅÈÈǾ¿ÉÈÀ¿ÄȾ½ÄÅÄÅÈÇ¿ÁÁÀÁÃËÄÁÀÂÉÌÈÂÊÆÀÄÅÅÊÂÆÈþËÁtwtqsqmpoonmlkkkd\XWYiy{tzzz|wckƒtduzuvxxvuvxwutyvtuvvvvswx{xwzqiw”œ–ž¨®·®¬ËÞ»§Æ¾¢¦Êãêë退€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€c£¸«¦žž©˜uq‡…„…„„‚ƒƒƒ‚‚~zz~}||zy~ˆŠ‹‹‡ˆ‰ŒŽŠ‡Š‰ŠŒ‘‹‹ŽŽ‹Œ‘’–’”‘ ÀÎÕÔÊËÖÙÒÊÊÍÓ×ÕÑÏÕ¾–‹–—œ¤¤ž ŸšŸ©«¬©¦¦§¥ ¢¥¡ž ¡¤«®ªÂØɲ²¹¸¿À»»À¿¸µ¼Â¿¾ÅËÀ¼¿ÂÁÇÏÎÆÈËÌÐÏȾ´¤§¿ÎËÌ×ÑËËÑÓÎÌÍÒÎÎÒÓÎÎÓÏÏÊÉÎÎÌÍÔÍÖÔËÜÖ¤bV`b]bffhlkikmllqomkihggo`[Ýà·²¾ËÙÞÖÛêßƼÅÊÏÚáéçêçêïãÞììéâ·žÀÛÕÐÐÏÏÎÍÍÍÉÈÆÃÀ½»º»¹·²¯«¨§§¡œ¡¢—“ŽŽˆ~vjjgfmrnikojhknuqxysmold`^_`acfkomlgjjee_XbmlmojhjlmnmhgljknnkggjqqojgegiljjheejnkklidejjlkaOA:3-*)(((()*))))(&$#!1ARar‚„}†ƒ~{~„……†€€…‡„ƒƒzg^pƒ…ˆ…ˆŠ„ƒ‚jOXX33PR#+SaE!B]][\ZYVC:3582.49:EW\Y`X=-0+3UPK`sqZN{®£_L„§‡fjqjbeb\dnsrqure`hoqssqnoppmomlonlntc\x|¤Åº¸ÆÅ¿ÀÇÄþÀÿÁÄÄÁÀÃÆÁÀÆÄÁÂÄÄÅûÃÆÁÀÇÅÂÈÈÂÂÆÈÇÅ¿ÄÇÉľÊÀtusqrpnooonmmlkkc\\[]jvvw{yx|xfm~shy{suzruursxyuttuuuvz~uxwxvv{tlz–œ– ¬²´´°ÃÖ¾¬Á¾¥ªÍäêë瀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€h¨¹©¦ £´¢{r€†„€„‰††Š‡€}€‚‚}}€}~|zƒŒŽ‹” ¤šŒ‘‹‹Ž–žž•‹Š’˜’ŒŒ‘Ž”Œ“™¸ËÍÌÉÊÎÏÎÕÎÉÍÏÎÐÕ×Á—‰‘’–œ ™—œ›£¦§¦£¢¤£Ÿž——¡¡¥©¬ÄØȳµÀ·¼½ºº½¼·´¸¼¾½¿Ãǽ¹»¾¾ÄËËÒÒÏÌÊËÊÊÈÈÈÉËÍÍÍÒÏÎÒÓÑÎÍÉÇÇÊÊÈÈËÒÐËÊÍÎÌÌÐÒÊÎÞÍštUZdc^`aedfeflonnqpnmkjjjmsdwÈè¿°»ºÄ×ØÜéäÀ¸¿ÅËÕ×àìîéëíßØæãâ亘¿àÖØØØÙÙÙÙÚÝÝÜÜÜÛÛÛÜÝÜÝÜÝÜÝÚÖÒÓÖ×ÔÐÑÍÑÑÂÅÈ´ƒw~}zq]dffdyš¢›—’Œ†††rcbfgmvysrzwoogfhptpkfcjlkmnkhjkkkkigfhjkkjgfhjkllheilhjiifbeiikl_J@<4,)(('()*+***)(&$#'5Nhy{…ƒ‚‚‚€€ƒ„‚‚„…‡ƒiUhƒˆ‚‡…‰Œˆˆ†r_ZM6BXS24W`?$%CQXUZ^RC6-*1:3)*/.6FRX_S8.2,3UPKbuqZNz¬¡`L‚¢„hntebie`hnopptpc`gnjopmijlmmonmonlotiay€‚ªÇ·¸ÇÆ¿½ÂÀ¾½¾¿»»ÀÄÄÅÁÂÆÅÀÁÇÃÁÂÀÁÄÆÈÁ¾ÄÈÆÅÇÈÆÄÈÈÃÁÆÆÃÅÅ¿Å»tussrqppkllmnnoog_[ZZivwy{wv{yfn~wkvwtwysrsvvssvwvvwxyyzwxvwtv|uny’™—¦±³³·²ÁÕÄ´ÆÆ«¯Òçéè怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€d©¾¯­¥¥·ªƒx‚ƒ€~„†ƒ…‰ˆ€~‚……ƒ€€ƒƒ€ƒ‚‚~ˆ”•—¢°²§–‘”“’— «¬§Ÿ¢¡«·¹¬š’’“£¤Ÿ›“ˆ–¸ÏÑÒÑÐÐÏÍÊÈÍÕØÑÌÊØ׆ŒŒŽ‘Ž•›žŸ œŸ Ÿ ž¤ ™›ª±°±®´ÈÓų±¸µ¹º¸¸º¹µ²¶º»»¼Àû·º½¼ÂÊÉÈÉÌÌÉÇÉÍÅÌÏÊÇÊÍÌÒÓÒÐÒÔÑËÌÎÏÍÎÑÒÏËÇÇÉËÏÑÏÏÍÊÓË”b]``^ce]^cifdfmmkkqpommlllfuna•Òм¸¸ÂÔÖÛçÞ½ÄÊÕÛÙàëíéêíßØæêä库·àÛÙØ×ÖÔÓÒÑÝÜÚÙ×ÕÔÓÓÓÕÖØÙÚÛØØØÙÚÚÛÛØÕÚÞÓÙãÉ“|‰‹‚™ž„nnpkb…ÀÐÄÉÊÇÇÄÅÌÆhhi`oŽœ›¡¢‹€• œzjonijgknnlmkfenlkjhfgiiiigfehknkkiegklmllhghkklm]HC@5,((''()++***)'%#"" '6Pf‚…†ƒ…„…†…‚…‰…‰‰rTXs‚†‡†‹‹‰Œ‡uf]OCOQ90CbU2!(C\a\[T?40,+4:3)(*-05CW_O804.3TOI^qoYO|°¥_Lƒ§Šjjjjeha[gqspptnb`iooponnpolmonmonlosg`w©Æº»ÇÅÄÁÄƽÂÈÄ»½ÅÃÃÁÁÃÃÂÃƽºÂÊÅÁÄÅÄÆÅÁÄÈÈÃÃÃÃÅÅÆÈÇÄÅÆ¿Å»vtutsrsqjklmnpqqd\ZY[jz{vyww|yel„wgtzxwtvssvwuuwwvtstvxxswx{xwzrjv‘——§³´¶°¬Ãؾ¯Ì˪¬Ôêèæ退€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€d¬Â´³©©»­†z€{y€€‚ƒ††††…}~‚€||Š–—›£¨¨¦£œ•˜šž¤©¬­®°®°±²¾À³¢Ÿ™•”“™¤³»º¯Œ—µÈÎÐÌËÍÊÄÐÍÊÍÐÒÔÖÓÀ—…‹‹‹ŒŒŽ‘”˜œœ›–›š™œž¦£œ¡²»»»ÁÆÑØν´¶´¶····¶´°¶»»¶µ»Á»¸º½¼ÃÊÊÊÉÉÍÌÉÇÉÌËÍÎÉÅËÔÏÓÑÎÏÓÏÇÊÏÐÍÍÓÓÏÉÄÄÇÇËÎÊÊÊÔÈa[X[^]ed_egigdhnolmqponmmmnipxin©Í¿¶ÇÔÙÔßéÔ¿¼ÃÌÛâÝåèêåçéÜÔâèáẔ¶ßá×××ÖÖÖÕÕÛÚÙØÖÔÓÒÙÙØ×ÖÕÔÔÔÖØÙÙÚÜÞÚØÕÖÌÔàÅ’y„ˆ‚”›™”Šyi‰¾ËÃÊÍÎÎÌÑÞÙœojg^už¸·ÂÀ‘³Ê¸‚fssjkgkpojhhhjponlgeglmmlieeghjiihcbipmmjfdfhkmlZIHB3,('''(*+,)))(&$! !(:ewƒ†…~†‚‚†…ƒ‰…ƒˆ_M_z…††‹‰‡Œˆ~e`]HBG?>Zb>'$&?]^\V>,.-/.371/12-/.7Q`S>25.4TNI`spZO{®¤]I‚©Žljjfcie_hooqqtpcajqooonnnmkmomlonlnq`Xv}{¢Å¼»À¾¿¿ÁÄÿÃËÉÂÁÄÂÀÁÆþ¿ÅÂÁ¿ÃÈÆÂÅÇÀ¿ÆÈÀ¾ÄÂÅËÉÄÊÌÃÅÈÊÅ¿ËÂusturrsqmmnnnnnoc]\\]kxwvzwvzxfn…vgy}vstrvvrrwywoswxwwvuuxwyww{sdw”™•¥²µ³®ªÁÕ»¬ÇÊ£¤Ïéçç뀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€h±Åµµ­®Ä±‰}„ƒ~|‚ˆ†„ƒ…†…ƒ„„~‚|}~z|‹™š‘œ¤£  £¤¥¦¨ª«­°²¬®¶´«®µ°¦¯®§§©¬±µ¿À¹°¤§ºÃ¹¹Ãù¸ÂÂÂÀ¿ÂÆÆÄɺ”†Ž—•–˜˜—™œ˜Ÿœšœžž—˜£¨©¬­«´ÅÉ»²µ³´µ¶¶µ´³²¶¹¸µ´·º¹µ¸»ºÀÈÇÇÂÂÆÇÃÃÅ¿ÁÉÐÎÇÉÑÑÐÏÎÐÏÌÇÊËÌÌÍÏÐÐÍÆÆÈÅÉÌÆÆÓ·cZW]bYZbbafgiihilkjlpoonmnnooclsg~´Ì½ÑÜÛ×èóÙ¿»¾ÆÙáÝçéìèëïãÝëåãã¹—µ××ÑÑÒÔÕÖ×ØÕÖ××ØÙÚÚÕÕÕÖÖ×××Ø××ØÚÛÛÚÚÝÕÕÏÔàÅ”€‰‹‰†ƒ™·Ç옡ÂÐÂÆÇÈÊÆË×Ëšrje`xž³¯¾Ã¡“±Âº„eqrgjkjnomljikooomifimkmlfdehhijigffghlkheehkmliWJLB0+''''(*+-((('%" +Rwƒ‚‚€ƒ~‚€„…‡ŒvYZo}…„†‡„ˆŠŽo^R:.AY][G'(/+=Y\VG0&.+-+3;:9::@ED@K^X?04.4UPJburZNz¬¡^G§‹jlricgb]forrosre^fonoonkijmlnlknmkmsb[x€}¦È»¼ÇÅÃÂÃÀ¿¾ÀÂÃÂÃÁÇÁÁÇÊÃÀÃÄÃÁÂÇÈÄ¿ÀÂÇÆÁÄÈÅËËÇÃÈËżÄÇÊÅÀÌÃ’sprsoproppoonmmmi`\YZixyx{xuywgr„wjxyrtzsuwxusstyxwuwxxuwyvwtv|ufy˜›—¥±±°²¯Â×óÉË£¡Ëçêê뀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€j´È¸¹°±Èº‘„Œˆ…Š‡†ŒŽ‰„„…‡ˆ‡‡ˆ„€‚~’ ¡ £©¯®«®µµµµ¶··µ´¶³º¼±±·´±¿¼±´¾ÁÀÈËÅÁÄÁ¿ÇÆ¿¾ÃÄÀÂÊÈÊÇÃÄÈÆÀÁ´’‡’–—•——››—œ¦¢£ŸœŸžŸ¡›—™™šŸ¦¤¿Ë¾´¹³²´¶¶´²³¸µ²´·¸¶²µ±³¶¶¼ÃÃÄÃÇÎÌÅÄÈÃÉÌÉÇÉÎÒÕÏÎÒÓÎÊËÌÈÉÎÐÍÎÒËÄÆÊÉÏÔÎÚ¿…YXb]WZW]]_ecaaghgikloponnmnoohlnrfl™»»ÃÇËÏãðÙÀ»º¿ÒÛØäçêæèìàÙçäæâ´–¸ØØÖÖÕÕÔÔÔÓÒÒÓÕÖØÙÚØØ××××ÖÖ×ÓÏÑÖØÔÏÕÝÔÕÐÒØ»•€ƒ€€…‚–³ÑÓÉÆÅËÏÄÄÃÆÊÇÊÔΙrnh]tœ¶¬·»˜Ž±Æ¶‡gruiggoooqqlghnmlljhikrrmeafklknlhggjqmlhgikjikfTJNA,)''''(*,-(('&$!#&Bp‰…‡„€‚„ƒ‚ƒ„Œ‚eV\v…€€‡‚ƒ‡vS5(%0EI:2)(%":YbS@7,%&&&''())*'((&!2\ƒ„„ƒ‚ƒƒ€zƒƒ€ƒ„u`byw{„‡ˆˆp_l`2#($(*$*,'9Z\EIhnhljjllmmmmknoYCV^A,4- 6SQK_trZP~²¦eL‚¨jjngaec^fnruqrrf_hopnoomnnmjkmnnnmlsg]t€€¦Æ¶µÄü»Ãþ¿ÂÃÁ¿ÁÅÈÀ¼ÃÈÅÂÅÉÂÃÊÉ¿¾ÆÉÁÁÇÈÆÅÁÃÌÈÀÅÈÅÆÄÃÂÁÂÌ¿utsssrqprnmopnkkh_[[^kvvx|zvxyhmƒweqxuuwzwuuvvwwwuuvxwwwvyyzwx|uiy•™˜¦®¬±±­ÀÒº«Äɨ¨Ìçìê怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€n¸Å²º»¹Ë¾x€ƒ‚}zz|}||}~}yyyx}}}}Ž–•››—–™œ¡¤¢¤©«ª«®³±¯®«ª®³³³´±³º¼¿ÁÅÇÆÄÄÄÃÆÄÅÅÃÄÈÈÉÉÊÊÏÒËËÍ»“‡—–’™¥˜ž¡šš¤£žŸ¦§£Ÿ ¦Ÿ—˜Ÿ¡¡¤¥«ÀÙѺ·¿»º·±­´¸µ¨°º·¯¯¬ª«²´´··¶»º¸»ÂÁ»¼ÃÁÇÊÉÉÍÎÌÍÑÕÔÑÎÍÍÓÔÑÎÍÏÍÇÅÇÊÇÌÞÀuRac[aic^_\YXX[^`dghikomijsqlmkd`ik\TUOO\S„®À·»ÓÒ¿·¾ÄÉÏÎÎãìæëìÙÔáççÜ®”·ÙÙÔØ×ÒÒÕÕÓÖÕÕÖÔÑÒÖÔÒÓÓÑÓÔÐÒÓÕØØ×ÕÕÖÚØÏÍÓßÁ”}„Š‰‹ˆ¯ÍÊÊÊÆÃÇÎÊÅÂËÊÁÎÍšond`uŸ¶«ÃÌ—‘±Ã¹ˆgqwokggoomnhcghknnjhillihhegjjnmkggjkknoheggjng[I<3*%*%%&'())*))('#0_~ˆˆ…ˆ…€€~‚€‚}d\r}xtyƒ‹r\bQ0(&(#+&)1.:W]KPnodgeda^ZWTSRLMA8R\=.4."7RPJ`vsXL{® _J€¦Škkogaec^fnrvsstg`jqonopnopoppppomkjsg]vƒ†ªÇ¹¶ÀÁ¾¿Á¾ÀÄÆÃÃÅÄÀÉÁ½ÂÅÀ½ÀÃÂÂÃÅÅÄÄÂÄÂÁÃÃÁÁÂÊÌÇÂÂÃÄÂÂÂÃÁÍ“wurrttropkjnommqj`[Y[hvywwvzyfn~uhu{xvttuwxtrtyuvsqt{|xwwtwwy{phz”˜˜¦­¬¬°°ÁÒ¹§½Ë¨§Íèíê瀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€m¸É·¿½»ÌÄ‘z‚~{}~~}|zz|y}€}zxyzzvvw~—”—˜•”˜—•–ŸžŸ¤¦¥¦©®¬«««ª®³°²´¬®¹¼¼Ã¿¾ÀÂÃÄÆÃÃÈÊÇÈÉÈÉÈÉÉÎÍÇÉÎÁšœ•˜¥ ŸŸ—™¥£¢¢¢¤¤ œ¡Ÿš™¡¤¨©°ÄÚз´º½¹µ¯°¹¹¯¥«¸·°¬©§§©°·¸´³¶¹º½¾¹µ¹ÁÂÄÈÈÆÄÅÇÏÍÌÍÐÐÍÉÒÓÑÌËÍËžÅÐÊÉ׳iMcbZdlf\[[ZYYZ\]ceedhlmjiolb[RNR^ZNLRNOVMV‡ÀÂÁÝܽ¸¸»ÄÇÂÉãëçëì×ÓáèåÚ°•²ÑÒÍÑÓÐÌÐÖØÒ×ÚÕÔÖÖÒÑÒÕÓÐÓÙØÒÔ×ØÙØ×Ö×ÚÛÖÒÔâÉ’}„‰‡‰…‹©ÈÈÉÌÈÃÄÅÇÊÉÍƾÑÏ›omd`v¡»°ÆÌ—‘±Á¹‡fouljehnlknjcekjijljihllnlgfjlpkgehlkholfffflogXC3.(%($%%'())**(''$ 4h…‰‡Š‚‚‚~~‚„ƒkVhzypqz€yr[?,2D>/(@IFLED[`G>@A>:AAAWY8/4.$9QOJ`vsXL{® ]Jƒ§‰hkqhbfd_gosrqrqd\hqrqqqooonmnnooonnrh^v†‹«Àµ¶ÂÂÄÂÄÄÅÂÂÆÆÂÀÂÈÂÁÄÿ¿ÄÄÃÀÀÄÉÇÁÆÈÃÁÈÉÄÄÈÃÄÅÃÂÄÄÇÅÇÆÁËÂ’wurrttrolmmlkmopf\WX\ix}zxtx~wdrˆudx€zururqtwwuuttuvxwwvxwtwxy{qh{”—™¦¬­±®®È׶¥ÅЪ¨Ïëîê怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€f¯Å·»º»Ì¿’{~}zy€‚€{y{y~{{}~€zvtyœ™“™–“–š˜––—› ¢££¤§¤¤©­­«ª¥­²©ª·º¹º¶¶¼ÁÂÂÄÅÁÂÅÇÊÊÆÇÄÄÆÎÐÊÍÒÃœ›Ÿ››¦©¦¡ž¡£¥¦¥¢ŸœšžŸ ¢¢£«¯À×Ò½¹»¹···µ¶¶°®©®³±¬§¨¯©«´µ²³¶º¸³¯²¹º¶¶¿ÄÁ¼¿ÅÊÄÄÅÈËÍÏÏÌÌÉÇÆÇÄÁËÈÊÂÈÕ¦\Lba[`caWVWXYZ[^`defgilkimh`ZQIN\SEHNMWZMSJk´ÏÎÝÛÀ½¼¼ÄÄÂÐåìçìêÕÒãçãÛ±“±ÑÏÉÈÊÌÊÈÈÉÉËËÌÍÎËÇÊËËÊËÐÑÎÍÎÏÑÓÕÔÓÔÑÓØÕÏÜÊ‘‡Šˆ‰‹«ËÊÉËËÅÅÈÈÉÈÌžÓÔnj`^w¤¶®ÁÄ•–·È»‡dmrigempkimjegknonidejlnlihfgknlighjkkkihidemm\O:..-%!#$%&')***'&'$8l‡‚„†ƒ€€„„‡uW_qyonywr[?>ACFJLNMMPR[ZV`Y;.4.&:ROJ_trZP~²¦aM…«Šgiqicge`hptsrtsd]ispnoommmlkkklmmnnsi^u†‹¨¹°¸ÇÅÁ¼ÀÆž¾Äü½ÆÊÆÆÉÅÁÂÅÈ¿ÃÈÉÆÂÉÆÂÃÈÆÂÁÃÆÅÃÁÂÅÇÅÃÇÇÀÌÅ”utsssrqpnoppmjjle[X[_jwyz|wuzuco†sbrxvwuutrtvwwwyyyzwvx}wzz{xy}vh}•˜›¨¬®¶³²È׸«ËÒª§Ðìïê瀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Y–«¢¨«¯¼¦ˆ|~}{{€€~|{|~z}}{{||}}~y{ šœ˜““’‘‘’•—˜›žŸž Ÿ¥ª¨¤¢ ¦«£¦³´±µ¶¹½ÀÁÁÁ¾¾ÁÁ¾ÀÆÈÂÄÈÇÇÆÆÑÓ½—‹—œžŸ ª«£ ¡¤¨¨¤£¥©¨£ž Ÿ¢£¥¨«³ÇÛÓÀÀƶ¹½º³³¶µª¬±®«®­¬¯«­µ¸²¯²·¶²°´¼½¹·¿Â¿ÁÈÈÁÇÊÎÎÌËÎÒËÇÅÆÆÄÂÁÎÈĺËÙ¡VL\\^cdf]UVWWXZ_cecdgjjkkgZRSUVWVRHFJW[T\SJXœÒØÜÜÁ¹º¼ÁÆÊ×çíçëêÔÒæêæà³’¸ÜÓÏÑÓÔÒÎÍÒÒÌËÏÐËÇÇÌÌÉÆÊÍÊÆÆÉÌÌÊÈÍÓÎÊËÏÎÊÑ·ƒ–“‘ªÎÐËÌÌÈÆÆÅÅÅÍÊÃÕÖŸoj`^v£·µÂÀ˜žÁÔ¿‡cmqfefkplilhdhrmijmjhkookghhhkikkiddinjghgahrjOJ<00/&##$&')**-)')(" !!# Cq†‡|~ƒ€„…|`Xerliv}wlXPc\DXkLOgnl_IT`ZUY\[SORUVTOJFRZ_\X\R=-2-'AHOMKHQbkmpxonihhchqbU•ÖÜÝØüÅÍÐÓÔÚèîèíëÖÕééÝ×­‹µÞÒÍÖ×ÑÏÒÒÑÑÑÒÑÐËÃÅÏÒÒÎÌÏÐÌÑÒÕ×ÖØÙ×ÒÖØÕÐÐÚ¾†gu{u}z{ÇÊÄÅÅÁÁÌËĽÅÈÊÚÔ™mhb]xŸ¼½Ã½ ·É¾ˆcmskkknpmlmkghjoomifihgkpmdflnjiihghllmkefhijeidL=B:% &+-(%*'&&''%" !>wŽ„‚€€€}{~‚x\UmyvmWVf`[lM&LqaYi\, <[_[WYVOLPMJKNNJERZ]WY`U=,50$9VSK`tr[P{­¡`J£†glsg`ca_iruwqttg^gsrpnmnpomopommpoktmav†®Â¹ÀÊÆÃÀÁÄÁÀÄÈÁ»ÁÈÅÁÂÉÉÃÁÅÆÃÂÆÈÄÂÅÆÄÂÃÇÉÆÃÂÆÇÈÆÃÁÂÁÂÅüƿ“urtpornqqmqrljordZVW\l{|wwy{{ufq…ufw|urrvuuutvvrtyxtrvwuvxvwuw{tn€™œš¤­²³¶¸È×·Ÿ¾Ë©¦Ëéìéꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€U…’†‡‰†ƒ„}~€}}|}~€€~||v|~}}}}~~y}–«©¤¦¢Ÿ ž™˜˜œ›–”“’”™•”™œš™ž“’¡¨¨ª®¯«¬±³¯²µ¹»¼»»»¼¼ÂÄÇÈ¿ʻ˜‰‘’“™œ ¡œœ¡¤§«ª§©¬ª£¢Ÿ˜š¤¨¥¦³½ÓèÞÆÄÊÂÀÂÁ½½¾º°´¹´°¯©¦®±³²¯®²µ²®­°µµ°ª´·µ±³º¾½½¾¼½ÂÄÅÈÈÇÇÊËÀ°¦˜–‘’’\FW][[gmjh]VW\]Y\bj^H;@FEDPY`juwsprlmlllhji[W˜ÕÛÝÚÀ½ÄÇÊÓ×ÙèíçëëÖÕèåßئ„µàÏÈÑÔÑÓÖÕÒÏÒÐÍÔÔÊÇÎÔÕÏËÎÒÒÐÐÐÑÓÒÓÕÖÓÓÓÒÑÙ½ˆkz€}‡…†¤ÌÎÆÅÈÆÃÈÍÌÉÊÃÂÚÑ—nic]w¿Äɾœ ´À²„gorhhfnpmlmkghjllljgijigkkgfijjiihfgjklnjgeet‚€\:=M7#+0,(**+++*'$!!#T€‰‚~€~|}~~x]Rj€gBKibXpO%NsSShY)>[``^`]UONUOHLMHISW\\\^O5+51%;VSJ_sq[P|®£^J¤…ektidga]hpqtrtpd`ktmopononmmpqnlnomqiax…‰«Ã¸ºÅÃÂÄÉÊÂÂÈƼ»ÃÅ¿ÀÀÄÉÅÂÅÈÉÄÁÇÅÁÃÁÄÃÁÃÈÈÄÅÆÄÁÃÄÅÅÄÁÄÆÃÊÀ“qpvttsmlpkjopmmpk^YZ^jwzwxz|zseqƒufv|vvxvuussvxvxttxzwuvyxuwwy|sl|•šš¥¬°¸¹µÀÓ»¤ÁÏ©¡Ääêéꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Y‹–‰Ž–•“’Š†„€‚„‚€€€€}{{|}zx}{wy{—®­¤§£Ÿ ¡œšš›˜”’“–˜™”‘•™ššš”——• ¤¢ £¦¨¬±±®µ¶¸¹¸·µ´¿½ÂÃÁ¿»ÀÁ¶˜Œ““’–ššš›œŸ¡ ¤§©¨§¨¤ ¥¥žœ¥«««°½ÓæÝÍÌÎÇÁ¿ÂÁ¿¾¿·ºº´±±«¨¯¯±³±¯²¸µ¯©«°³³³³²µµ²µ¹¸À½¹º¿ÃÄÄǼ¾Æ°b`YYY\ZMGOXUXaimhabXW`^XZaaOHNQSUU\hpqrrpqsrttqpon`TZœÓÛàßÆÀÁ¾ÃÒ××æêäèéÔÒåâÞئƒ´ÞÎÉÏÑÒÒÑÐÐÏÓÑÍÐÓËÉÐÕÖÒÏÏÐÑÓÓÐÑÓÐÎÖØÒÐÓÕÔÛÀˆnz~{…ƒ¨ÍÎÅÀÇÊÆÉÊÇÉÑÉÃÖИpld]wž¸ÀŹ— µ¿µ„epvnnmjooljfeiljimjcfjllomdbirlkjjghkknnjgaew†mH14:5&'01*(*)'&&&#0h‹}€€}|~~{{bOh…uL(Ko^Ig`A\hBRt`.=W[\\]\VPCNMGHIIMU\`_aaSA-60$9USK]qp[P}¯¤aK‚¥‡hmugaeb]hqtsvyp`_jpnprqonnonnoqnklqngay…Š¬À·¶ÇÇ¿ÃÎ̾¾ÄÈÀ¾ÄÉÉļ¿ÆÅÂÇÈÇÀÂÊÇÁÅÆÆÄÂÃÇÆÃÈÆÄÅÇÇÅÆÄÄÆÆÂÍÕppuutslloqpllnrvk^Z]ajuxy{vv|t`n‹wdtzwvwzwvwuuvxuwxxuvy~zyuwx{}shx‘˜›§®°º¸²¿Ô¾§ÃϨžÂâéç耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€T‰”„ˆŽ‹ˆ‡ƒ}€}|„|yz|yv{€}z}„ƒ}~š´³©¬¨£¥¥¢  –“——“”šš•““”’”––˜œ“‘£¢  £¨ª§¤£®¯°±´¶¸¸ºµ··¸¸»ÃÀµ–Š“”’’”—™™š £¡¤§¨¦£¡ ©¨§±ÂËÍÒÍØåéÚÇÅÃÄÁÂÃÄ¿º·¹»¹º·­ªµµ¶¶´²±²¬®³··³±²°°µ¸µ³·»¾¿¿½º»ÀÄ´ý…\[[WWTROHKXXQUaili`b\\]Z\aaTLIJOYcdinnmqsrqttwyurqj_Q`¦ÛãäÝÊÀ¿¿ÆÖÚÚæêãèéÕÒäå×Ô¯‹®×ÒÎÒÓÑÑÒÓÖÓÐÓÒÎÊÉÏÐÐÏÑÓÏÌÎÑÐÒÕÓÎÎÓÒÐÓÔÑÏÜÆŠs€Š†‡§ÉËÅÄÊÊÅÃÇÄÃÎÏÌÛÖœqja\{¤¶¾Ã³“ž·Á¶€`nvkiijooljfeikmoqj`bhmmomheilkjjjhjmnkjhgbar€]1$%)@?##,.+(*)&$$&%!!Q……~€||{}cG_}_2#Tr\8NcZ^M6E\P, <^^][Z[XRTXZVRONONTNEEB9418/ 6STM`sr[P{­¡bK¤‰jnsh`dd_fpvtqute]gtvqnpqonpnmopokmqskbwƒŽ²Â¸¸ÊËÂÃÉÄýÁÉǼ½ÈÏƾ¿Ä¾½ÆËÇÁÁÆÄÁÀÊÅÀÁÅÇÄÀÌÌÈÄÃÃÆÉÀÅÇ¿ºËÅ”vrsonqorospihknph\X[_iuxyztu}wbo‚wjvtms|vstutrttwvusvyzyzyuwx{}sj{”œžª°²¶´³ÅÜÁ§ÀЪ¡Åæëè瀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€`˜£‘Œ~~€zz~€}{{{yy‚€ƒ~›¶´­®©¥§¦££¢š˜›š—–•–”’“’–œ›šŒ™Ÿ›š¥¥ ž¢¦¦§«°µ¸¹³®±µ¹»¹½Æ¸•…“’‘”––—œž¢¥¤££¢¡£§¨§¤¬¿ÊËËÊÓÞæØÈÊÏÃÅÆÁ¾Åĸº¹¸¶¸µ®°µ¸¶±°´µ²±¯°µµ±®®ª¯°±´±³¾¹·¹»»¾ÄÄ»¾¼—d\g`]]WUXVTXWQR\ellg_Z\]VVWSKJDH[jjgfknptsnkonpwvwvh[H]¦ÝêèÝɼ¿ÈÑÚÝáêíåêìØÕæäØÖ²Œ¯ØÓÊÓÕÐÑÖØØÓÎÐÑÏÎÍÐÍÎÏÒÔÎËÑÓÍÑ×ÑÐÔÔÑÏÎÏÍÏÚÁ„o{}~‹†ˆ³ÒÎÈÉËľÀÄÈÈÌÊÍàÖslb]{¦¼ÃŶ”ž¶Á¹„dsvhegnpmlmkghjoonidgiflqogcejljjjghkljkhhd_fqS$IP*"""&))++-+'')(%7mŽ‚}{|~d@OfD#(CNA-1=@A0/.14-+@VSPLHIIC>=:6433311-+*'&)39/4SSNdwt\Oy©bI}¢ˆilqjafe^cmusloti_ixtoorrmlolopmmopnvk`r}´Äµ¸ÅÆÇÇÈÅÂÁÀÅÄ¿¿ÇÈÈÅÿ¿ÅÅÿ½ÀÆÆÂÆÃÁÃÄÄÃÃÆËÊÅÄÈÇÄÀÈÉ¿¹ÍÈ–urspnqnqrpnmmooni]XY]jwzyvux{whrƒscu{wuvtuvvvxyvtuuwvuuuyxuwwy|sl}—œ¥ª¬¶´²ÅÛ©ÄÒ®§Ëêîêꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€_š§•‘‰~}}†Œ‡€~~}}~}}~{}€‚}‚~š¶³±°«©ª§¤¦ ŸŸ™œ˜–””””’•™•–š”’›™’——šžž›ž£¥¤¥§«­­¬±¯´·ºº´´Å¸•„Ž““”šš˜› ž¥¥£¢£¤£¢¢ž¤¡ž¤«¬­¬±ÆàÝÉÉÐÏÄÅÈÀ½À¼»½½º¹³ª®¶¹¹µ²²²°·²°µ·²­««²±±·¶´¹´¯²¸»ÁÄ¿·Ã§q\ee[Z[WV\]Z\YYW[eigcd[ZXNIJF9AJXgkjkiorturqvxtqtsvzlSB_©ÛééÞǸ¿ÎÓÕÙçëíåêíÚÖæäÞÜ®‰²ÞÔÊÒÔÐÑÓÑÎÐÎÎÌÐÓÌÈÒÔÓÔÕÍÊÒÑËÌÏÏÏÒÑÕÒÍËÍÑÙº†s}|}‰‚„®ÎÊÅÈÇÄÆÊÅÇËÌÉÍßÒ›tmc\z£»Áƹ™¡¹Å¹„akpfilnpmlmkghjmjhgfiimnmkihjmnlkiffiilkfdfd_R2! U[2$%!#&(+*,+(()& N…‡{{|~€‚„hEIS;+30/37328;@FEHNQSO610,(+,%(5628=?D@-.8.%(*290 5SSMcvt\Oyªž`I£‡hlre`fd^fqxvqsre`jupopspllpmnnlnqqmrg_t|ˆ²Ä´¼ÉÅÃÀÅÊÃÃÄÿ¿ÂÄÇÇÆÃÃÄÅÅÇÀ½ÀÃÃÄÅÅÇÇÅÅÆÆÄÄÉÇÁÅËÊÅÃÇÈÂÀÒÊ–qpusssmmokmmghorg]XY^n|}ywwyzugs…vhx|ustuuuuvxxtwxuqtyxqvxvwuw{tj}˜™¡¦ª··³¿Ö®ÊÒ¯©Ëèìê쀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€aœ©—’‰‚†„€‚€ƒ€|}€€~}~}ƒ€~…ƒ€¸µ¸µ¯®¯«¨«ª¤£¡š—›š™™˜•–˜““–›š“•—™˜—™Ÿ¤žŸ¡¥©«©§§¨®¯±¶¶»¾µ•…•––”™™’–¦¨§¦§©©£¡¦¥¤¦¥¥©­©¼áãËÅËÇ¿ÇËÂÃÇ¿»¿À¼º¶²¹¸·¹»·±²··´³µ²¬¬±µ¶²®±·¸´´²¸»¶·»¹¸¸—hXbcZ[^[VUTU\QXY]imgdc]WLCMWQAK^ihhkkgjkorqs{qrswrqteOEm·àæäÛÈ·¿ÎÎÊÒéêìãéìÙÕäéÞÚ°‰±ÞÙÑÔÓÒÔÔÑÐÎÒÓÑÔÒÊÍ×ÔÎÐ×ÔÍÏÑÑÌÍÕÔÎÏÖØÖÎÊÍؼ…t~}Ž‡Š«ÏÍÆÆÁ¿ÉÇÇÍÍÊËÑÞÔ›qi^Yz¦¿Äɾ›ž²¾¼‰fnskkhjooljfeihkjjeadfkllmljjmmkkjggjjikhb`mkO(6hW,$#!$(()--+*-.&8mŠzz}}€„|dILSKKOLOUYTYaY\TWc]_cJ+(((%*,%8[fahhdmU./?,((080"7TRK^rq[P|®£`K‚¦‡gmvfcic\fptvsvrc^jwtpmmnnprplknqonopfd}ƒ‰±Å¹¾ËÈÁ¾ÂÂýÁÈÁ¼ÃÉÉ¿¼ÃÅÁ¾¾ÆÃÀÂÆÆÄÅÄÆÄÂÇÍÊÁÂÇÉÇÇÉÈÇÂÂÃÂÂÒÆ‘rqusrrmnqlnoikojg_[Z]kvtz|vuzsaothwxpsxwttxyzzywwwvvwxxuxwxtuztj~› œ£ª¯³¹¶¿ÔÄ°ÉÏ­§Èäèç뀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€_œ«•Œ„~€ƒ‡…€~~~~~~|||‚‡…€~{¡»¹µ¹¶­­³°§­¥¦¨¤¡££š—–—•’‘’—–’“˜š›•–•“•šžžžž£¤ ¢¨©¦¡¦¬°±²ºÀ²•Š’˜››–›œœ›”˜©­«¨§©¨£Ÿ ££¡¡£¥©ª«ÈëãËËÐÅÄÇÅ»½ÄÂÂÇÉÉǽ¶¹¼½¼»¹¶´³°·²´Âº­´À¹µ··³²µ¹»¿½¸¹»¶¿¯‘q]]`^ZXY\\XVXZTT\fljdaWLA=DKHMallnponklkjmrtswttttwtiIHÎâÝäÝØÎÔÝÝÙÚëéñêëêØ×èåÝܱ‰®ÙÒËÓÑÍÒÔÓÕÏÒÑÒ×ÕÎÎÕÐÒÔÒÔÕÑËÎÏÍÏÔÕÓËÓÔËÈÏÛµ…v}†…ˆ¨ÌÊÃÃÅÆÈÇÅÏÕÏÉÎÞÒ›qjbZyª½¼Å¼š ²Á¸†empjlfnopnjgffijllifhkklolfjmijmmgegklmnedcmoJ$ A`>&($#'('+,..-*&# !Y…ƒxy€ƒ~sYOPPNEOR[YZ`[Z[ZY[X]_K*'+%")'&KjogfjroC0A@').*06-6TQKaws[P{­ bL§Šhoriagd^gqvrsutfanvspllorrrrponoonlte^w„‰«Ã·¿ÏÉÂÁÅ¿ÂÇÄÀÁÅ¿¿ÁÆÇÃÂÃÅÆÅÂÀÃÆÅÇÅÅÆÄÄÄÂÁÅÈÈÉÇÅÇÂÅľ»ÊĘqturqrpmnnknolopg^XY^juyzxwwvsiw‰wftyxywyxursutqtuuwyutyvwvxwx|soz–¡ž£ª²±¶´¿ÙÆ«Àи«Æåíî뀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€d¤°“‹Š„ƒƒŠ…|z{~‚ƒƒ‚†„~…ƒ‚…€‚ž··´º¹²°¶¶°¨§ª«¦¤£Ÿžžž™••˜“•–‹”œš••““––žž£¤ ¡§¨¡£§¥¨°±²º¯–Ž”–š˜š¡”›®«¦¦©§¦¥££§§¦£¢¦¯¬²ÌèàÎÎÏÅÄÇÅ¿ÂÆÁÁÉÈÂÁ¾¹·À½¼¾¼¶³µµ·³´º³«±°®®²¶¶¶·ºº´²·»º·±¶œoX\ba][[]]ZYZ^UNXikfg`RKMMMSYhkllonifhklklprrsovvox{hIU‰ÈÐÅÔàÉÇÍÎÑÕÑÕâêåèèÖÕæåÜÚ¯†¬ØÒÃÌÐÏÓÖÕÖÏÔÓÔÕÐÌÐÒÍÍÑÒÑÎËÓÓÑÐÐÒÒÑÙÖÑÌÌÒÞ»‚t‚„€‡ƒ„¨ÊÊÆÇÆÄÆÅÉÎÎÌÇÇÖÖ›nid]y¨··Ãº–Ÿ¸ËÊgoskjfmnonjhggklmmifgjnpnhddhkimmiehjknjcc^joJ#!"*;+  !"&(*--..+*)$ H„zruw{€€y_ORUTQJ?IXXXZ\[[Y\X]_L+(*'(+)0TpjkmhnbEGVB"%,*06,5SPKaws[P{¬ bK§Šhoqiagd^gqvxwvsd^lunnnpqqpopnmnpqpnsf`xƒˆ«Äº»ÇÅÁÁÇÈÀ¿ÃÄÀ½ÂÇÄÃÅÊÊÆÄÅÇÃÃÅÄÀÁÃÆÅÄÇÉÄ¿ÁÀÇÆÂÅÌÊÅÅÅÀ¼¾ÎÅ”quurqrpmllimnkopf^YY_lx|r{yvzwgt‡uewzuvxpquwurswsvwtrvyuvwvxwx{rh{™¡ ¨­®±²¯¾ØĨ¼Î¶©Åãìí退€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€g¡ª’Œ†‡€ˆ†€€„…„‚‚‚ƒƒ„€ƒ‚€~~‚Š…ƒ·¹´¼½¶³·¸´ª®³°ªª¨¢¡¡¢ ›•–š—•–Š–Ÿ•–—˜••–•’ššž›¡¢§¥©§©­­¯»®–Ž”–šŸ£œž¢— ®«¥©®«©ª¨¥©©¦¤£¨°²¸ÏæÞÌÉÇÄÃÿ¿ÈÏÊÃÈÅÁ¿º¹À¿ÀÁ¿¸·¹¸¶¶·µ°°´´µµµ·¸µ±´·±­µ·³²³¼¨vUXc`_]\\[ZZ[Y[U[noc]UNMQSWaijijjjlmkdimkkmpppuysqxtcRV„»¾¸ÎÖµµºº¿ÆÀ½ÜæãçèÖÕææÜØ«„ªØÓÅÌÐÏÐÓÕÕÖÙØÔÓÍËÐÒÎÍÐÔÑÍÍÏÒÔÒÎÌÑÖÝÖÑÏÏÑܽˆv~|…„‡®ÓÑÇÈËÇÂÆÌÎÉËÍÎÚÓ™mie_|«µµÃ»™¢»ÌÆ‹cmtiihkmnmkihiijlkhegjkmmjfdhmilmifgjkmkif_mnD # $%(-.,-,)+,(  3r†{stknrzmWIT][^XCBX]WZ^ZZZ\X\_L-*)'+)&9awhjmhmVJGH7$%*1/5,5RPJaws[P{­ aK€§Šhoqiagd^gqvvtspb[jsqqqpnmllnmmnpqqprfbyƒ†«Æ·¶ÄÇý¿Á¿¾Á¾½ÃÇÇÂÀÁÁÀÃÈËÀ»ÁÆþ¾Èý¿ÈÉÅÃÂÆÆÃÈËÇÀÅÅÁ½¿ÎÅ”qturqrpmkjhlnkopd]Z\bny{u~}y{ucq†wgrtuzyqsvvwwwvvsrrqsxzwxvxwx{rh€ž¡ «®¬µ²«ºÕÅ®Ä͵¨Ãâêë耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€e¥‹„z|xv{|}€}}}}}|{{„€€}{|~€‚‡Žˆƒ›µ¸¯µµ¯¬¯®ª¦«­©¤¤£  Ÿž™”“–—“‹ˆ”™’’‘‘’‘“““•˜šŸž¡¡£¨§«²¨’Œ•™›œ¡ž ¢›”š¨©¥©®­««¨ª¬©¥¥§©¬°µÍèàǾ»»½¼¸¸ÃÆ¿½Á¿½¾»µ´¸¼½¹´´¶¸³°¶º³°µ¶²µ´°²µ³­³¸¹¶´·¹»¸£šŠdU^`_][ZYXYZTZX[ee\YNKINT]ejejpoikkdbimmjlmnmvvrtvj[YSg›¾ÑÞغµº¾ÃÇÄÈÝèæëìÙ×çèÝת‚ªÙÖÉÎÓÒÏÑÒÐÒÖÓÒÓÏÉÊÓÔÑÐÓÑÏÔÓÓÒÑÏÏÑÔÔÓÔÒÍË×¹…u‡‚‚¨ÎËÃÆÏÌÈÄÇÉÅÆÎÔÜΗojd]~°º¶ÄÀž§ºÃ¸…amshhjjlmmkiijhiklighlomolcdkllmkhdfjnjoohdl_1"##*""&#&--+,+++-)%! &_…|rm`fmgOBKURW]N?N^YYZY[[]W[_M/*&&,#"@cqgimkiSF=2,,)(1/5+4RPJaxs[P{¬ aK€¦‰gnqiagd^gqvrqrrc\irusqommllonmnpqporgc{‚†«ÇµµÄÈÄ¿ÃȽ»¾ÄÄÂÂÂÇÃÁÿ¿ÃÄÀ¼¾ÃÄÄÅÉÈÅÀ¾ÅÉÆÉÉÿÁÅÆÇÂÆÅÁ¾ËÅ—quurqrpmllinnlope]YZ`mx|wwxzwqevƒwhuxwyvqssruzysuwxxyxvywxvxwxzqj‚  «®¯®±±¿×È­»Íµ¨Ãâêë耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€e¦¯“‹|wzwyy{|utxxyyxux{{yz|zvx}{}|{–²µ°²²®¯°®¨¥¨¨¦£¢¡¡¥ ˜–•‘“’‹‰‹‘‡‡‰Œ‹‡‰Š‹‘”‘–‘• ¡Ÿ¡Ÿ‹•›š–Ÿ£žŽ’©«ª©«®®«¨«ª¨§«¬®²¸»ÏèãÌÅÿ¼¾ÆÉúÂÀº¼¾¹´·»½¹µ´µ³±­·½´±µ²¬®­ª¬±°«°¯³®¤ª¸¸ ut‰wWWb^][YXYZZXV[`ZKGLH@BTdfgkggjklpofbinlkkmnmipvtqhTUXPg¦ØäÔ´±·¹½ÃÆÑâìéííÙÖæêßÙ«ƒ«ÛØËÐÓÑÏÒÔÑ×ØÔÏÒÐËÉÒ×ÓÍÐÎËÏÚØÖÓÒÒÒÑÐÒÓÏÊË×·†t~€†€®ÌÉÆÈÈÄÆÆÇÈÅÇÌÏØÑšqkd\|®½¶Ã¾›¤¸¾¼hmtjhgjlnmkiijjkmmjhjmllnkehmjnmjfcdkpjmjgf]C!"! (3)#%!%+-.---,(&&%##%%!"P‚}i`Xgpj\SILWXZZSEP]ZYYY[\]WZ_N-'$'-$(Nh[TRPMHH@C4*/+%'05,5SPJaws[Q{­ aJ€¦‰gnpiagd^gqvwtvug]jssqooqrqoqpnnppnmrgd{ƒ†¬Ç¼»ÈÈþÃÇú»ÃÆÃÃÅÇÅÅÇÆÀ¾¿Êļ¾ÇÊÆÃÈÉÉÄÁÅÉÅÂÇÆÂÅÈÆÂÁÆÆ¿ÌÅ—qturqrplpoloolopi_WV[iw||yy{ztj|‡qcy~tsuruvuuvvtxzxwwvuyxxwxwwzqf|˜ ¢¥¤©°¸·¼ÎƲ¿Î¶©Åãìí退€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€i¬¸¡“‚‚„‚yy|{}}||~~zx{~~{zz{~z}œ»¿µ¶¶¶¸¼¹´­°°¯°®«ª®¨¤¥¦£Ÿ™›˜””””•––“‹ŒŒ‹‡‰‰‰Ž’’’•“•›šœ ¢•”šš•š¢¥¦¡•˜­°¯ªª®¯¬«©¦¦¬®ª­¸½ÀÎâÞÌÆÅÃÅ¿ÁÈÊÇÂÈÆÂÄÄÁÁÅÄÄÃÀ½ºº¼¹ÀÆ¿¹¹µ³´´µ·¹·³®¥¥ – ˜vaZfg[U[\^]ZZ\\[XZ_^PCAC<>I^ifbgiddiklkifkmlkmnnnjlsqgZPPSOPq»èÒ±³¹¶ºÆÍÕåîêìë×ÔäëàÚ¬„¬ÛØÏÒÒÏÏÓÔÓÐÓÐÎÐÎÊÌÍÓÏËÐÑÍÍÒÓÓÒÓÔÕÕ×ÓÏÊËÐܺ‹v||{†‚§ÉÊÈÊÊÆÆÈÎÎËÑÐÌÚÚŸqke]x¦½µÀ¹’ž¸ÃÃ’fismigkmnmkihiklmlhefiejlhhjkioligddjqrkeieJ-"%"""$ "#"&+.+)*.-**-('##-5/ F~ƒhXW_\T]^RP[acd^[`]USQX[]^VY_O,#%*.)5][><@:.&3HF.$+'#%17-6TQKaws[P{¬ `J¦ˆgmpiagd^gqvxttte]kwvrooprrprpoooonlshc{ƒˆ¬Ç¸¸ÅÈÆÃÅÅƼ»ÃÄÀÃÊÆÃÁÃÃÁÂÆÇÿÂÇÃÀÄÈÆÃÅÊÉÅÄÈÈÄÂÄÇÄÂÄÅÃÁÂÑÆ“quurqrpmrqmpplooe]YZ_juwuyutxsctŠsbv{sruwyyusuvtsruwttutxywyvwypf{™¤¢—Œ‘—«µ¸ÅÀ±¾Î¶ªÅãìí退€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€m¯¼¦¡}ƒƒ„ƒ‡†}~†„‡‡ˆŠ……ƒ‚ƒ……ƒ‚‚„}}š¸¼³¶¸¸¹º¹¶²·³¯µ¸´±´¯¬­®«¨§¥¤£  £ ˜—™™–”’‹ŒŽŒŒ’œ•˜š›™—ž¥¥–Ž”™›™œ¥©¥Ÿš ¬°¬«¬­¬«©©ª¬¯®¨«µÁÂÑæäÎÄÃÄÅÃÂÆÉÅ¿ÄÇÇÇÉÃÁÆÌËÉÅÂÀÂÅÇÂÄÉÇÂÀ½ºº¼ÁÀº²­—‹†~rsuj[a[OQWWSY[\[[]]ZX]WIDHKIJYcfigee`bflnjghjlmkloqpookkj[NQOJVWQØÙ¸¶¼½ÃÌÐÙçïéëéÖÕåèÞÙ¬„«ÚÖÎÓÒÐÔÕÑÏÐÒÓÕ×ÎÇËÔÖÓÎÐÏËÊÏËËÏÕ×ÕÓØÑÍÍÎÑÝÀ‡t}€€‡{ŸÈÊÃÅÊÇÃÅÎÍÉÐÏÌÛÛŸrke]x¦Â¶À¸‘žºÅÀagtlhimnomkhggkklkgcdgkmlgdehklkjjgehntigl_=%()$'(! !#'*,/1.-.-)''''#"%!'26#:s…fV]WBO\RIT\clhcb]WQJIW[^^VX_O.#(,*);`hZ`f\J5*IK3''$')28. 7USMaws[P{­ `J¥ˆfmpiagd^gqvwrrrd\jvwtpnoopoqonnppomugbz…Š­Æ¹·ÄÇÇÅÈÈÁ»¿ÇÆÀÁÆÄÁÁÄÅÄÆÉŽ·¿ÊÇÁÅÆÆÅÇÉÅÁÄËÉÅÃÄÃÂÂÄÅÄÂÃÑÆ“qturqrpmpploolopc\Z]bluwtvsszudsŠxhwzuutvvtqrvxvsvvsqvzwyywxwwypj€œ¦¡}€…š¤£«ª©Á͵¨Ãâêë耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€k´Â¨£˜‡†„‚€‰‰}{Š‡ˆŒŒ‰‡‰Œ…‚…‰ˆ……††‰~™¶º³·º¹·¶µ´¶»³«²¼ºµ´²°±°¬«¬¤§­¨ž¤¦š™››™˜˜—”‘“‘‘Ž‘’““•™˜œ Ž‹•š›šŸ ¤¤›•¨¯ª®³¯¬«¨¥¬¯¯¯¯²¸ÆÁÌæåÌÄÊÆÿÁÈËÆÁ½ÅÇÆÇÿÁÂËÎÉÃÃÆÇÄ¿½ÁÅÃÁÁ¿¼½¿¶£’‰€vk`Y\^YQZa[PPUUTXZYZ\[X[]M=@DBBWde_biiediihlmjkmnlklqrqnjkj^TQSQNRUHZ›ÉĶ»ÈÐÐÏÜéñêëê××èæÜØ«ƒªØÓÍÓÐÎÕÖÑÏÓÐÏÔÖÌÈÐÔÕÓÐÎÎÏÒÒÏÐÔÔÐÎÐÑÌÎÔÒÌÙŠu{}}†€~§ÌÌÅÅÆÄÄÇÌÌÆÈËÍÚÔsld\{­ÉºÂ¼—¤¼Á¼ŒenwidknopnjgfflmnmifgklknlddkmiikmjfgllejkU5$'("##!" ##%,.-0/-+,--*(()'!"#&#!$6,.d|^Sg]>IP>Ea^Wa_aZNTZOQW[^^VX^O2%*,$#8Y[bgkjk`A:\U9'&.(39/!8VSMaws[P{¬ `I¥ˆfmoiagd^gqvzuvwf[grpqrrqonnommnpqqovg`y†‹­Åº¹ÅÇþÀÃÁ½¿ÄÿÂÆÄÂÂÅÅÃÃÅɺ¿ÉÈÃÇÃÀÃÇÈÉÆÀÂËËÄÃÆÅÁÁÆÈÄÂÎÅ–qturqrpmnnknoloph^XX^lz}vsy{sdq’}kyztw}wttwwtuyxwvwwsrwyzwyvwypi€œ¥¡‚ˆ›§¤¥Ÿ¢Á˳§Âàéê怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€l¸È­§š…„‡ƒŠ†{ˆ‡‡‹‹‡‰……‰Š†„†‡‚†‡„‚•³»³´¶···¶µ¸·³²··µ¶¹µ¯¯µ´­«¨««¥¡¡¡  Ÿ›™š™–”˜•Š‹’Š˜›—•›š‹•››žŸ¡¦§ ›¡­««®®ªª®®­­­®¯³µ´¼ÂÕëáÆÂËÊÃÃÆÄÅÇÃÃÈÈÇÍǺ»ÄÈÆÃÆÇÅƾÃÌÊÀÁÉÈÇÈȵš€pula^^XX_a\[ZYWVTT\WW[\[\\WWIBNTRVakkeeedddfikkjjjfkokhnpijmj^SPSUUWRV[Se’¿µ¹ÈÑØÜØæêëîçÔÒçäÛر…¨ÜÕËÐÎËÐÏÌÍÎÏÎÓÔËÉÓÓÏÐÑÌÏ×ØÒÑÑÓÒÑÑÓÐÐÔ×ÔÑÜÁ‹w}{y„„†¨ÍÐÆÅÄÈÉÁÄÌÍÊÇÏãјokcYy®À­»¾™¡ºÈÁˆbowgdhlikpnggmlhlnihjgnlkjkkllkjlkefjkhiplN."%$! &&!%'%##'+--.1-,.,)*.(*(""(*)& #% *.*^u^MPK'!"!! %)'')'%$'*,.0-//-++*))*)%#'+,)&  '& '/+\t]A766@YYXa[TWda`^ZVPINONLFMWJ0.2*%.-)-)&?fpjo\:'(''++09/5VSJ]wu]Pz¬¡`L¦ˆhopgcgc^dmvyrquiYdurqppqponrrqpooppsic|Š¬Ä¹¾ÄÀ»¾ÁžÁÅƽÀÈÈÃÂÃÁÂÆÅÅÁ¾ÂÆÿÀÄÄÁ¿ÂÇÈÆÃÄÄÄÄÆÅÿÆËļÌÇ•ossqprpmnpllkkqsc[XZ`jstwxxx{ucwŒ|hu{tpruwvvxuqtswutwvsrxwtwwy{qi ©¥£¡¦¬±¯²¼»¸¿Æ»¿Õêî退€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€r¶Ä¯­ ‰„€…ˆˆŠˆ†ˆ‰…ƒ…‡‹Œ‡‹‰†…†‚ˆ‡‡‡‰ˆ˜²¸´³³²³³´µ³µ´²´³³¶»·³´µ°¯¶µ²±²²­¨¤«©¦£££Ÿ››œ›™™“Œ‰‘‹‰Ž‘‘–™”‘‘“˜š™œœ›Ÿ§¨«ª¨«©¦¨£¨¬«®³µ´³½ÕëãËÈÌÇÁÆÏÌÇÇÅÁ½ÄÇÆÊÉÆÉÈÆÄÄÆÉÈÎÈÅÈÈÃÆÏâ‹ur…€fTYZZ]^\\[ZYWVTSRWUXXTWZUNAN_^Y^aSIS]VOUY\^W[`P.$',5@7+(+#.SlljhD&&-'$.09/5VSJ`xt]Q{«ž]O„£…krmfdib]enuvxvsg]huvsppponnmnoppomlqi`wŽ›´¿¼½ÂÆÈ»Âż»ÃÅÂÂÆÆÂÀÄÆÂÃÊÅÀ¼ÀÃÀ½ÁÅÂÄÉÉÅÅÈÈÇÄÁÁÄÅÅÂÆÉ»ÊÄqttrqsrnlnlnnmqpg`\\_kywxyz|tbv‘}ivywvtzsqrrtvsvssxzxwxvxvxtv{tl{‘˜™›˜šž™—š™˜œ  œ«Ñìë倀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€tÁί©¡Ž…‰…‚†Œˆ‚ƒ…‰ˆ‡‹Š††‡ˆˆ‰…„††††˜°²²±°®¯¯±±®´¶µ¶³°³¶¹·µ¹¹µ²²¯¯±¯­¯´³°­¬«¨¥£¦¦§¥¢ž›”‘ŽŽ‘Œ”‘ŽŽŽ’’•–—œ¢£Ÿ¤£¥£¢¥¨¦®«¤¦¶ÄÆÄÄÈØèßÍÊÇÆÄÇÉÇÊÎ˽ÅÈÆÊÈÃÊÉÇÉÇÂÂÄÂÁÁÈÇÆÏ̵ˆ~|z‚‚jWS[ZW]_\\]]\ZYWVUVVZZXXRD?8=P[]`e_ababdfecehiiiiinmkjnrfOMQTTTUUSVOSWQUZUS‰ÃÕÏÕàßêéæéçØÙîèÙÔ¯‚¢ÖÔËÒÙØÓÑÑÏÓÓÎÌÑÓÔØÛÐÑØÙÕÔÑØÔÓÔÓÐÐÓÒÐÓÕÔÐ׶€nyzy€|{¡ÊÊÂÍÉÂÂÃÇÌÌÎÎÏØÌ•opg[~¹Ä·¹¬¬ÇÅÃŒckshgillnnlhhjoljiffilmhfgjhhijikiddjkfyŽ…Z2',+%%()**&)))+,+,.-/.,*)))((),-.--$&#%/03,&FZQ=,%'8SYQQSOCT]UMXZPIRUYZTX_O-+..Hi\8+.&/TlliiJ-(,'%.09/5VSJaxt\R{«_M‚¤‡ipoeeia\fouytvxg[hvqrrrsutrlmopponmpjbxŽœ¶À»¿ÆÆÅ¿¿ÌÆ¿ÀÆýÀÅÇÁ¾ÂÆÃÃÈÆÄÂÃÆÃÁÃÄÅÆÄÄÅÅÃÆÅÄÄÈÉÇÃÃÆÈ»ËÄŽqturrsrnnolnnmpnc^\]`itxqz~xwrbuyizysvtquvvvsqvwuxwrtxvtwxzwvzrk~–˜“’“›ž–‘•›™—š§¢›¬Ôëé瀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€s¾Î´®¢Œ‰ˆ†ˆŠ†‚……‚†ŒŠ‡ˆ‰ˆ…ƒ‰‹…‚‡†‡ˆ‡‡™²·°°¯®®®®¯«°²³´±­®µ¹µ±µ¹·³¹¸¶´±¯²¶­««¬«©¨ª«©©§ ŸŸœ›•‘ŽŒ’Œ““”•—šœ››ž¢¦¦¨§£¦©¥£§¤¢¥©¬±²³ÈãÝÌÊÊÇ¿ÁÈÉËËƽÁÅÆÉÉÄÇÍÅÃÇÇÂÀÁÆÅÆÅÆʸ—€~x^RW[ZY[]\^\[ZYWUTSUVTX[NAC;;JYZZ`ca]^cbacffggfffghgglpi_VNNRTSTVTQHGRXPNSUHn­ÕÑÉÕãëêçêéÙØëãÕÓ²‡§ÚÙÔÕÔÒÑÑÑÑÐÓÑÐÕÒÒÙÖÑÔØÕ×ØÒÓÑÑÐÐÐÑÔÓÒÖØÓÎÚ¿Šw€~‡„…¦ÌÌÄÍËÇÆÄÆÊÊÊÌÏÜЖopfY~½Ë·¼·™°ÈÈÀ‹cjrigilmnnkihilkkljihjkkhhhjjiijnoihjjjwsS8*#) */*+*))+,,*+-+,./.*()))*--++,'&**+,6*5?6,'&(6R\P?=FIJOMQZUIHNONMHQ]Q1)*6ZlE/0"/[ngkjF)&*(),09/5VSJcys\R|ªœaK€¦‰hnpeeja[gotwuwwe[juqrpnorrooopqqqqppjczœµÀ¸¾ÄÂÁÂÂÇÂÀÄÇ¿ÂÄÅÂÁÃÅÄÃÁÆÄÁÁÃÂÁÄÇÆÄÄÅÆÄÂÁÀ¿ÁÅÈÄ¿ÃÆǽÍÆpstrqsqnrqkkllpoc\YZ_jvyzyy|vbvŽvfxwsxyxsoptvvvvrpruwwvuvuyyzzor’”‘“‘–¡š˜œ›•”™¡¦¢±Øíè倀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€tÀÑ·³¨’Š‡ŠŠ‰‰†„‰‡ƒƒ†††ˆ‡„…†‰‡†ˆ‡‡ˆˆ‡—³½²²³²²±°°³±«¨«­¯³²·µ±´¶µ´¸¸¶²°°°°¯®¯°¬¨©­°««ª¦¥¥¡ž¢”“’Ž‘Ž‘˜œ™ŸžŸ žœ¢ª§ª¬©¨©¨¥¤¦¤¥«ª©®¯¬ÆçßÅÄÊÅÃÊÎÊÈËʼ½ÃÅÇÈÅÅÍÆÂÄÈÄÁÃÉÊÅÄÈ·šŒ…tyƒnVNXUX[[[]]\[ZYWUTTYYYZUG@C=KY[Y\`_b[]ba`cecddddfhjcdlkWFHRSRPNOQSSMPXXLBBEEW™ÚàÐÙììêèìêÚÖèáÒѲ‰©ÞßËÓÓÏÐÓÕÙÑÏÍÒ×ÔÏÐÎÐÓÓÑÓÖÕÑÓÑÎÎÒÒÏÍÎÑÏËÊÙ½Šw€€‡……©ÍÎÄÉÉÊËÈÇËÌÌÊÌØÔ™qncW€¿ÏÀ¹›­ÆÍÅdiphijlnomkhhhjlnomighlljijjkkiilkffiikkeQ8*$&K\6%,-+))''+0,*-20*(,**+-*''*&$+)(+2*$($&)$ :OOA;@PZMKJRZTKLGGDC?JYO5!":bzeE)&$=epjpb8"),()*09/5VSJdys[R|ªšaK€¦‰hopdfj`[gpttxytd]jtutsrrqnlppoopqrrskc|’´¿»¿ÅÃÀÂÀ½Á½ÂÇÃÂÆÇÄÁÄÇÅÄÄÀÇÃÀÀÁ¾½ÁÊÃÂÈÉÄÂÅÃÂÁÂÇËÉÅÃÅƾÐÈ‘orspprpmqpjjlmqpj`YY_kwz|tsyu^r{gu{utwyuvvrsxxsstxxsquwwtwxy{phuŠ’Ž’•’Ž•–Ÿ ´Þñç €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€wÄЯ­¨’„„‡†…‡…„‡‡‰‰‰Œ‹ˆˆŒ„„Š‰†„ƒ~†‡€~•³¸²²´´´²°¯²±¬ª¯°¯°­´¶¶¶³±´³´³²µ¸·³³³¶·³®±·°©©««¬«¤žŸžœ•’“ŽŽ‘”™Ÿ¢œŸ£¥«­©¨®®¥©©¥¥ª©¨°­¯ÉçÚÂÂÆÁÂÈÊÇÌÏÉ¿ÁÅÆÇÇÄÆÊÊÄÁÅÇÅÅÆÍÅÆȤ…kx…wdWSUPV][Z]\^^\[YXVVSRZU@=A8,#2UijiiV+1/%&)09/5VSJdys[R|ªš`L¦ˆhopdfj`[gpttxwtgZewwsoonmmoonmlmnprvja|”ž³½¹¼ÇÆ¿ÀÃÇĽÂÉþÅËÆ¿ÂÈÄÂÆÇÀ½¾ÄÇÃÁÆÆÃÂÅÄÂÂÆÄÄÄÄÆÇÄÀÂÄÆ¿ÒÊ“orrpoqplmmjmoorod[WZboxytz{xzuaqou~xsxyupqwxvwxxwvuttuvxvwtu{uiŸ¡–”““•—”œœš³âòç䀀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€uÅÔ³°¬•†ƒŠ‹‰‹Š‡‡Ž‹‰Žˆ†Œˆƒ€‚…ƒ‡††˜²¹··¶µ´´³²°¯®°³³±­¬³³­°º¹¯·¶²°²¶·µ··¸¶°¯³¶¯°®­¯«¦§¥¡¡¢œ—˜œš–Š”››˜™œ¥£™¨°¬ª«©¦§«¦¨§§ª««­­µËá×½»ÄÂÅËÈÄÎÓǼÇÉÄÇÌËÊÌÂÃÊÌÈÆÅÃÊÆÕŘwiyˆqYXZXYZYWWY[Z[[YZZVQW[XF8=HJHS\^[[]^^bb`aeebfeddegfehjWCDGFKKJKNPQPNKYZQKC3/*))()*&+..--..,1/0+$(+#%)&"&-+' "DJFEEMTA4;@>>GKDEGBJZQGUTMMWWY`V1(*4QkT-1:Xnigjb?+&-+'*,9<0!9XUMdxs]Rw§œ]L€¤Šjoqgcib[frwqswtb[hptqopqonnlpqlilppxi`~•›²Á¼ÀÇž¼½Ä¼¼¿ÃÂÀÃÉÇÂÁÆÇÂÁÃÄÀÁÄÃÃÅÃÂÄÅÄÃÃÅÈÃÂÃÃÂÂÅÅÄÃÃÁ½ÎÈ–rqrsspoovkgnrnnse\[VXn{zuyxtyu_s¡˜qr{xustssuwwtrtuvuuvvvryxwsw}qbÑß¿ÈÄ»¼Ãø´¶´²°¶Íåîê怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€sÇÚºµ­–ˆ‹Œˆ‰Š‰Š‰‹‹ŒŒ‰ˆˆ‡‚€„†…ƒ„‡„„›·¿²´³²±³³±¯²´²°°¯­°¯®®±´µ´³µ¶µ¶¸¸¶µ´²²³²°¯®°¯¯²±­®®¨¦¦¤¢ œ›“Š—ššž £œŸ¥ž¢¯®«©«¬¬ªªª«¨¦«±²²³µÉåÚ½»ÇÁÁÇÇÃÈËÁ»ÅÊÊÉÈÆÈÉÃÆÎÌÉÈÈÆËÎÒ´Šˆqr‡oVRXYXXY[[YWZ\]\[XTQWZSFCGNUYXVVVWYZZ]^\]`a^``acb`djkT=9BGFFLOSSPNNORUPKJC78605=1-6?AGPE?FX^PJQYQOXZ[\H/+/*.HbrmlmmiZB-.0/,)&&+58,6URKdxt]Rw§œ`Oƒ¥Œkqsgcib[frwywwtd[gnmqsommmkjnqpoonltkbz‘´¾¸¿ÈÆÃÅÅÆÁÀÂÄÃÁÃÈÇÆÆÄÄÂÀ¿ÀÁ¿ÀÄ¿ÂÁÅÁ¿ÇÊÅÂÃÄÆÄ¿ÀÄÇÄÆÈ·þŽotuppsrmopokjlnmd[[X\oyxzytqzy`qª ojxsqyusstvwwvutstwxwuwutzxuysdÕçÒÙåàßÜßáàááÝâáåëíëé耀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€tÇظ°¨•†Šˆ†ŠŒŠŠ‘’†‰Šƒ„ƒ„‡‰†‚€‚Š‡—´½·µµ¶¶´´µ³²±³³³²²¯¯¯¯°±±±´³²°¯¯°¯´¸³¯´¶µ¹¹µ´µ´®ªª«««¨¢¡¥§¢Ÿ••››š¡«§¥¢›Ÿª°³³¯¯°®©©ª¦¤©°²´·µÀÕÓÁº¹ÂÀÂÿÁÿÂÂÀÃÉÈÄÅÈÃÄÈÅÅÉÌÇÃÁ¥‹‰„~ŽŒdNUXVXZ[YWVWVWY[ZXY[YOGDDMVYXWWYZZ[]\]^^_`aa`bbbfi_Q02>C=CLJFGJNRQNJQMGCAAFNNNi¦ÐǾÑàéäêíßÜäß×Щ‚§Ú×ÎÓÔÓÓÒÒÖÔÔÔÖÖÔÐÍÏÒÖØÖÔÒÒÚ×ÓÑÑÓÐÍÓÕÒÔÊÐÙ½}…„‡ƒ…¥ËÏÊÊÆÈÊÊÉËÉËÍÓâÓ™mjfYw±ÉÇÑÈš£ÆÕÇ`hpgfihlllkggnjlnmhdfjmjjkjfefikkieehlmgi_m“x2"(>jP%#+)**%%(''*-,,-//.//("! )-+)+)$''#!'86;7/IBFMIHKHIDP_[QLHMJLVX[]K/(.,'/9FV\ZSG4'(-.)&)((/48,6URLdxs^Qx§œ`O¤‰josgcib[frwywwtc[jtwsqtqmnsmnppoonmrldxžµ¼´¾ÇÅÃÆľ¾ÀÆÉÈÆÅÉ¿ÄÄ¿¾ÁÁÀ¿ÀÂÁÀÃÆÆÄÅÈÆÂÂÍËÊÇÅÆÇÅÁÁÂÀ»ÊÃnuvootsllljikppmd\ZY^owwwyvt|z_p”lo|wtxstuwxwspruuttvwvtvwzvsztg“ÖãËÒáÝààâäääãàââæëëëìꀀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€uÃÓ¸µ¬“…Š‹†‚‡‹‹ŒŠŒ‡ƒˆŒ†„ƒƒ‚†‰ˆ†}~ˆ‡‚š¼Ã¾¸·»º³³·±´»¼´ª¬´²±°®¯¯²´°²´²¯®°´¶·°«±´³µ¸²²·µ®«­¨§«¯¬¨¨ª¦¢˜•™š™›¢¦¨¢œ¥«¯²±­®²±««ª¨¨ª«¯·º¸ÂÔÑÀ»¹ÀÀÁ½¼ÁÅÁÁÃÂÃÊÌÉÇÀ¾ÄÊËËËÈÊκŒ†’~w~Œ~ZMWXXYZXWVWXUUVUWWZ[YKA@FQYWXZZXVX[][[[\^]^_`_^ac\J918@>=BHGIMQRPMLLOKF@AIOPNJV‘ÌŸÓâèäêíÝÚãÛÒ̦¤ØÖÈÌÑÕ×ÑÐÓÓÒÓÓÕÕÖÖÐÒÕÕÔÒÒÒÕØÙÕÒÒÕ×Ö×ÔÕÍÒÚ½Ž}‡…€‡„†©ÐÓÊËÆÇÇÍËÉÃÃÈÎÛÓšmjfYw°ÒËÍà´ÒÔÊelthgihkllkfgnmmmmifgjmkjlkhggijkhedhlmkk`n„c+!'/UM/&,(('&&($'+-,+,--/0.(!"$&)*+(&%(($$IFY^D=NTJKX^XVSGHKQXVZ_P1%*+('(/,75-.-*,,,))-'$-58-8WTNdxt]Rw§œ`M€¡‡gmqgcib[frwvwzxeZhtvspnnnnnqonnnoopqmdwŽžµ»¶¿ÇÃÀÄÂÀÂÁÂÄÈÇÅÄÈÇÆÃÁ¿ÂÅ¿ÁÄÃÂÁÀÅÂÃÄÂÄÈÉÇÄÂÁÃÇÆÀ¾¿ÂÁ¾ÎÆ“nuvootslkiikmnnoe\ZX`ouwuyyv|w_r•ˆjt~xwtvusrsssspwyvuy||wwsuw{}maÓÝÄÍàßààßÝÝÞàâäáæêêìí怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€sÅÙ»²¦“Š‹†ƒ†ˆ…ƒ‹ŠˆŒŠ‚‚†‡†…‡‡‡„Š‰ƒŸÁÈÀ»º½ºµ³·µ¶¸¸±«°º¹²¯±¯­±º³±°±±°°°®³°­°³´¶µ®¯µ´¯­®±¯²µ®§§©¬¥š—››œ¢¡¡¤©¡ž¨¬³³¯¬°µ±©¬­­¯¯¬¯·¸µ¿Ó˶´»¾À¿½½ÃÅüÄÈÊÌËÉÊÄÂÅÉÈÇÆÁÉÒ²|„ƒfoŒnVTWZ[YVVWXWVRTRQSXXUJ@59JVVUVXXUTWZY\[[^_^^ac^^bZG848<<>BEFJIJLNNMLLQIB?DPRIENMj«ÍËÍãéåëëØÖâØÍÈ¥£××ËÏÓÖÕÑÏÓÔÓÑÑÑÔÖ×ÒÓÒÒÑÒÓÔÍÒÖÖÔÕÖÖÔ×ÕÙÒÖÛ»‹|‡‡ˆ†‰§ÍÎÆÈÇÊËÉÊÍÊËÎÏÙÔšmjfYv°ËÄź¹ØÖÊdlshfgikjmojehnlmnlgfhjnokggiijjjhefhkpljmtg@!''&+47/()%$&&))&#)-,*+.0-.,'! "$&),+)''"!&7?4;=+!/GRR\Q58KLIOZ[V\]RPUZ]VZaR/#(-**)/),)(,*&*))*,+&'/48,8XUNdxs^Qx§œ`M~ …flpgcib[frwtuzyeZjxutrppqolsolmoopqqlcwŽžµ»·¿ÆÀ¾ÃÅÆÂÃÃÄÄÄÄÅÃÅÆÆÅÅÄÃÁ¾ÀÄÃÄÅÃÂÁľÄÊÅÁÄÈÆÃÅÈƽÁÇŽÉÄ“otuppsrmlklolhjpg]XW`otxw{ytyv`u™ˆiuzuvrrtuwwvtswxusuxvptyvuuz}nd”ÙãÊÔãÝÕØØ×ÚÛÝâáÞâèçëê߀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€yÆÕ¸³©•ŠŠˆŒ‹…€ŠŒ‹‰Š‡ƒ€‹ŽŒ‰ˆ‡†‹„Š‰ƒ ÄÈÀÀ¿½¼»º¹½ºµ±°²´µ¸´´µ³¯²¸³¯­°´²­©ª°³±°³¶·´±³¶µ´´±®­°±­ª®±­¥š–™˜š ¥¤¥¨¢›¥®³±®­°²­¥®®¬¬®­°·¸³»ÍDz±¶·¹º»½½½¼ºÁÆÉÉÄÃÉÌÊÌÊÆÅÈÆÊÆ ~Šsd†›ˆ`UWTWZXWWXXVTUXUPRUOD>86ATZY[TUVVVWYZ][[_`^^ada^WG849368@CEKLLKJJMOSEDIGECDC?AFKJOQMMD204)5URLdxs^Qw§œcP¡†gnrgcib[frwtsxzi\hsxusrolnsqmmrtqnnrjayœ³¼¸ÁÉÅ¿¾¾ÀÀÁÁÂÃÄÄÃÂÂÃÅÃÁÀÂÃÃÂÂÅÁ¾¿ÂÄÅÅÂÀÂÅÃÅÇÇÆÈƾ»¼ÀÀ¼ÊÁŒrqrsspoojijnonnqk^VT`nszzzvt|zar¦•kpyqqwoqrtuvvvuttwyxy{yuqxyxzqbÖèØÞÛ½¾»ÁÒÜÞãÝÜæêäçæØ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€tÄÖ¼¸¬”Š‡Šˆ…‡ˆ„‚€…ŠŠ‰ŠŒŒˆˆ‰ˆ‡ŒŒ‰Ž…|›ÄÌÇÀÂÆ¿¾º»»µ²¹¹°¬¯µ»º³­«­³«­³±­®¯±®¬­±´²®®°³³³¸·­°´µ±­°°­°®¡’–› šž§¨¢¢©ª¬²²®³µ¬£«¨¥ª¬­³µ¸ÅÌÀ²¬ª©·¾¹·¼½·µ¸ÂÇÇÀ¼ÆÆÆÂÂÆÅÇÏ˪‡„Œ‚y„‘‡nXVYXWXWXYWSRTURRTPE>>>=FU]YVXYSSYZVW^YY\`a^^adZF1,47457:>ADEEJIJMMKLQOFCBBDD?HHGLLZ•ÔìèçëìÜÑÝÎÊÅ } ÔÚÑÒÔÒÎÓÙØÒÕÓÐÒÓÔÕÕÓÓÕÕÖÖÒÒÔÐÌÏÓÓÓÍÓÔÕÊÏÙ½‡yƒƒƒŒŠ¢ÌÏÇËÊÌËÎÇÈËÌÌÒãØšlkfVr®ÎÌÔɨ°ÆÝÌŒakpghfglmlmigiqmjkkeelnljigefhnnheihgjpkdtzU>IC,&(&(+-)*'%'$"%&)+,++-.1+&#! ## !)+)+,)##/DW^^]\\[ZZ]^`^\Z\]Y\__][\^__]\\\\]KKKJJJJJHGFEDCBB??=:62.,,/'5XXNcws]Qw¦›`N}œ„ipqichaZfsxnwzsa[ittrrqpqqnlnprrqomxqf|’˜­»º¿ÅÃÂÄÁÀýÀÆÅÀÂÇÅ¿ÀÃÃÃÿ¾ÁÃÄÄÁÁÅÄÃÿ½ÂÅÁ¾ÅÃÂÈÆÀÁ¼ÁÄÃÃÏÃousoqqmlnnmklnood[[[`muyy{yv{u^t«žpnyursvpqvwuuwwyvruwvvxvu{yuxpd—×äÝÚÍÎIJ§¸ÉÒßãßÞäæâèçÖ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€n¼Î²®¥ˆ„…‚‚„}~€~€…‡……„„ˆŠ‰…‚†}¤ÍÏü»»µ¶¸¶µ·´°°®¨£¨®¯ª§©©§©¨¦§«®®­§§¨ª«««ª¨®¯­®²­¦«®µ¯ ¤°¯¬¦›”•—•“˜—™››£«¨©ª­­©¦§©¬¨¦¬¯®¯´²¹ÂÀ¶­¨¨°´±²¹ºµµ¹ÁÃÃÀ¹¾ÁÄÅÅÅÂÃÊÊ¡||ˆ}Š•‚hVQX[VZWVWWTSTSSWXL<6;DLUXXXVUWYWSTYZY]\]^][\`RB30368;569<@CFGIHHJIFHKFCDB<>DEJJEDNNg®âïêêç×ÔÔÌľœ}¡ÔØÎÏÑÒÓÖ×ÓÏÐÒÖ×ÒÏÓÒÐÒÕÓÑÒÒÒÒÕÔÏÑÔÏÎÒÒÕÌÑÚ½‰{………‘Ž‹¥ËÐÌÌÆÉÏÌÊÌÍÍËÍÜØ›mjeUt²ÑËÔа³ÃÖÎŽbjphiginonoliknmllkgeilmmjfefhmnifhggkihimqW8;D7 &&)#&)(()&!"'),-,-./.+&! #&'#%&+/+)/0*"+CX]]\\\\\\^\Z[\]ZXRTUTOLKLGFEDDDDE;;:87543....----.-+*)+,-11) 7UTLdxt^Rx§œaM~¡‰jorhchaYfsxxvuse\jwtomnoqrpmoqrrqnmrkay˜®½¸¹ÂÅÀÀÁÄÃÀ¿À¾¿ÃÆÃÃÂÂÅÅýÁÂÁÂÁÀÁÁÁÅÉÃÀÄÉÇÆÁ¿ÄÉĽÀÃÃÁÀÍ‘ousoqqmlmnopppoofZXYapuvtwut}{`o¦™lkwutvsvywqqsuuwxvsstuwutzxuwod˜Øâ×ÔËÑ׸¦¼ÏÍÖäãáåæâæâÑ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€lºÌ°ª¡„„‚€ƒ}y|‚‚}~„ƒ€‚††…„‚{¬ÓÐÂÁÁ¿º¹º·´¶¹¶±¯®«©°®¥¥¬¬¥£¨§¥«¯­«¨¨ª¬¬©©ª««ª«¯ª¦¨««°¬¡§²±²¨ž˜•“‰Ž“˜˜˜›Ÿ¡ ¡¡£¥¦§¨¯®¬«°´´±·¹½À¼®¤¤«¬­«®±²¯²¶½½À¿¸¸¿ÃÄÄÁ½¿È¿”t}Œ„|‚Œ}m[NT_`\WTUVTTTTSXWI:BGIKKMNMIHIG@@ABGKHPQOESOK‚ÒõìêëÖÓÔÎû›~¡ÒÕËÎÐÒØÚ×ÕÌÏÑÒÕÕÒÒÏÍÐÔÑÍÐÕÔÒÐÑÓÒÑÑÎÏÎÓÍÒÚ¼Š{………‘ŽŒ«ÏÒÉžÅÌÉÉÍÎÒÑÐÛ×›njcTw¸ØÌÑ˪¯Ã×БciphjjinonokiklmmkkhgiimnjfefglmkggfhmlkkcZchYC:'"%&+'%)(')&""$&()*+,-+)$#)*)').2-,3/' #:NRQPNMKJIB@>>@@?<9::840/./.-,++,,++++++++++,,----000000//11+#:XVNdyt^Sx¨bL~¥jnshchaYesxyutwj^hvyplmoopoopqrqomlpi`x˜¯¿¸»ÅÅ¿ÃÆľ¿ÁÀ¿ÁÄÂÃÇÄÁÿ¿Ã¿¾ÃĽ»ÀÂÃÇÆ¿½ÃÉÈÂÃÆÄÄÅÀÄÅÄ¿¾ËÂ’ousoqqmlnlkjjknpfZWX`ouvtwtpyy`o©po{wtvqsvvvwvqmttsvtt|uusywuwnd™ÙàÒÐËÔìÊ°ÁÓÊÑèçäæåâäÝÊ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€p¿Ó¸¯£†…ƒ„†ƒ„ŠŒ‰†‰ŒŽŒ††‰‰ˆ‰ˆˆƒ„~¬ÒÏÊÌÍÊÅÄÃÀÀÀ¿¶·¹´³º¸°­²±ª«®¬©ª­®¯®¯°±°°¯¯°­«°±­«±³·³±µ·µ¶º²§™“‘–——••˜ž¡žœ¢¦¤¤¤ž¥¤¤¦§¬®¬«±µ¸µ¥›Ÿ¤¥¦©¬¯°°«°¸»½½¶ºÀÀÀ¿¼º¿Ç¹p{Š…}{…xfXT[^\^WRSUUTTQMOPC9@MSTVYYWWXWTU[[VUYZ\YV\bYI886203675558BGIHNRRRSROQJKQUYXPQNLMUQIRPY”âìĹÆǺ³–u‘ÄÏ¿¾ÂÄÁ¿ÁÃÂÅÁºº¾ÂÅÅÂÅÌËÈÈÉÉËžÄÊÉÉÍÊÉÑÈÅÍ·ˆy„„ƒŒŠ«ËÒÏÊÄÊÉÃÇÇÆÍÊËàØ›mjeUt²ÆÌßÍŸ²ÍÑ͘gfrlgfinonokikknnllifgkkkigfgimiihfhlljsdaŽ©‹qaOBDRT>*"))%')%"&%%'))('#"%*)$%)'(,-.5:76431,#!%&(''&&%%$%#!!##!$"""%$! !"  "$%$#"$$$'@XSLdxs]Rw§œ`M}Ÿ‡jorgag_Xdqv|wvwhZgyxrppnnnkllmnprstpi_w–¬¼´ÂÉÂÂÆÂÁÂÂÃÂÀÂÅÅÀÀÂÂÂÁÂÂÄÈĽ¿ÃÁ½ÀÃÁ½ÁÄÂÀÁÁÁÂŽ½¿ÃÄÂÁÍÂousoqqmlkgimniimh[WV_nvxytpr{u]t¥˜kiwuuxtpqtwxuptvwwwxwtpvvvvz}na”ÔàÙÜ×ÞéàÔÙßÝãæèåÞÞæåÒ¼€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€uÂÜ´ª™Œ‰‰‡‰Š‡ŠˆŠŽŒˆŒŠ‹ŽŠ‰„ƒ­ÕÕÍÆÅÓÞÓÆÉÄÊÉÇÌÉ¿ÄÀ¿ÀÀ¾¹¶¸¸±¬²·¶´´´´²³µ´±±·´­­¯¯°³µ¸·²¯±´º¹ ™ž˜š¡˜–Ÿ£Ÿ¢¯ª­°®°®¦¤¬ª¦©§¤¨¦¤­³ªž™•š˜”“˜œ™š ¨ª®¯¨«´²µµ­­µº›ukvrs~}zŒ\LW^ZVUVWUQQSIA?>;;8569:84889;?CFHMPPLLPTTSPQQORXWQOXORYMSRRiºç̽¾À±«”zšÍØÆÆÍÏÈÆÍÑÒÍÇÆÉÈÅÇÊÆËÔÑÈÆÈËÄÃÇÅÃÄÅÅÃÃʾºÃ®‚t~~~Š‡…¦ÌÖÍÅÄÌÈÒÏÉËÕÑËÜØšlkfVr®ÉÊÛÍ °ÈˢggslfeglmlmigilpnjiiggjmmieehjnhhhfimklkebgoqdEEYPIQ;*$*("$('$%$$%''&$$'+,&!#()$$(+397740/.*""$"!!%%%%%%%%++,-/01155568:<>E<9>R]TQfzv`Tz©žaL~¤Œjnsfaf_WdqvxxwscZhuysonnrrnrrponnnnvodz–ª¹ºÁÈÇÆÇÃÀÂÃÆÄÀ¿½ºÁÅÃÁ¿À½¾¿ÂÅÿ¿ÇÄÀÃÇÄ¿ÁÄ¿¿ÂÅÃÀÀÿ¾ÍÄ”ousoqqmlpkjnnklrg\ZX^lv|tvvuyrYn£™op}yuutrsusrtutwxvvttxqwwwv{~ob•ÕÝÓÖÕááâáäããéåéæÞÝæåϹ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€tÃ׿¹°›‡†…ˆ‹ŒŠ‡‰ŠŽŒ‹‹ŒŒŽŒ†‡‹ˆ†«×ÙοÃÎÍÍÒÐËÌÈÇËÈÃÄÄÃÃÅÆÄ¿»¼½µ±¸¼µ°´´´´³²°¯®¯²³°­®³²±²²±³¶´¸¶Ÿœ¤œ—¡¢Ÿœ›› ª²¯¬®²°ª¥¦«¬¨¦ª­®«¯±®ª£–Š“”’“˜™—™š›¢«¬©§¦ª´·³«­·µ“pnwpt~}p|nSNZ`YPOSSQQPB<=ACLVWUUVVVWWWWWSNRYXQ[WC5<=8;<=74:;8:<56<=@EDLNQQRSTTYTQQRUXWQOQURNLILQV†ÓÙ³½½³¥Žy—ÌÔÇÊËÎÏÌÌÓÕÚÛÓÐÐÊËËÐÒÒÒÎÌÑÔÑÍÏÓÑÍÎÔÎÎÑÌÈÙņr}}~„¢ÏÐÆÏÏÌÊÊÊÊËÓÎËÞ×—nodVz´ËÉØÍ¥·ÑÙÉ•bhthhfhkmmkihhllkklhefjjjiiijklnkebejlijhg_fviG^jL?H6$$$%&((&$#"""%((%',-+(##&"%),-035355/-/&!&'% " !#$$%%%%&(*+.10--279;989:;=@BA9>IMQVWUUUVVVVWUUSRRUXYXF309??>@948;859957;:=DGINQRPPRUSTWWRRVVPRTSMJGBOROj±Ð¹³µ­£z–ÈÎÌÏÎÏÐÍËÏÍÏÐÎÓÖÌÉÍÐÐÐÔÒÌËÐÏÏÔØÔÏÐÕÐÑÔÏÊÙÃy†‡Œ‡ˆ¨ÎÒÊÈÃÅÇÊËÈÆÏÏÎÞÛšmkaU{¹ÍÈ×Ѭ»ÏÓ̘cishgehkmmkihhmnnmjfgllnomjijmlnmieeghmniifgme`th:(31%%$$%''&%#$$$"#%'*.,'%$%'%$$)/3216310.-&&+-*()++*-36533369<@@@@ACEFGIIEEINPNNMMNRUVZZTW\^b`bb_]_decfgjfd^RTexv`Ry§šdP{œ‰knqhagc[epxuuxyh^jtspqpnoqoqrqpppposg_|“˜­º¸½ÆÆÄÅÃÄÄÄÄ¿ÀÀ¾ÁÀ¿¿À¿¿ÃÆ¿ÀÆÄÁÁÂÄÁ¿ÁÃÄÄÄÅÂÀÀÂÆǾÃû¶ÌÅ‹kttqrqmmikoojgjpf]XZ`mwxswwtzu]q¡gmwtvxvussuywtvtutsuxvtxwxttype‘ÉßÜ×ÖãæÜßéæÜÛàåçÛÒâãƳ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€sÁÕ¼·®šŒˆ‡‡‰‰‰‰ŽŠˆ‰Š‰Š‹ŒŒ‹ŒŽ‹‡‰ˆ…¤ÐÕÉÉÑÔÏÐÒËÑÍÌËÉÊÎÏÍÎÍÉÆľ¾¿¾³­¸Ã¿¹¹¹¸·µ´´»µ¯®­­®±³°±²°±³²´´¡’š¡žœž¡£¡š ¯­®¯ª©¯°¦¢©¬«ª¬®°¬°¶²¡”‘ŒŒ“–’˜ žœ¤¢¡¤¤¨®«¦¨¥‡lkmfr€rn{…sZU[YWXVROKB?;CQUTUVTTTTUUVVSVVQOSY\H3-6:=?9<4389556646:9KTTQPQSSSTTUUUUUXUPSVJ7&'-36:==87;91054335;@ABDHFGKNLLNONRVVWVSQUUI?CLNUaLEŒÊ¼¢¤Ÿ˜ˆv“ÆÌÃÇÆÅÌÏÍÊÎÐÒÏÎËÅÊÒÐÌÉÌÕÖÑÍÒÒÐÓÕÕÕÊÌÔ×ÔÑÝÁ‰z††ƒŒ‰‹«ÊÉÇÌÃÆÑÏÒÑÏÑËËàÙmidVxºÉÎÞÍ¥ºÏÍΘchtijjhkmmkihhhklkifeilljhfeggmklljfhllljgd`jƒn1$&&#%&$""$%&&%# !$%$'&#!$%&('$$&+./.0242140&"5N[]\a_ab`[[_Z^ca][^baa```abcege`^adecge]]ce`cc]\]^gh`hi`\`eeegf^^^RNgusaSu¤›bP~žˆimthagc[dqwuvzwdZivqqooqmkopnnoomlnti`xŽ–¬¶·ÂËÅ¿ÃÄÆÁ¼ÁÇÄÁÁÁÎÍÄ¿ÂÀ¼¿Ãľ¾¿¿½ÂÁÀ¾¼½ÁÆÃÂÀ½¼ÁľÀÃÁºÆÀktuqrqnnjhiklllmd[VW^kuvtrsuyn[y§hovquwwuuttstvrutrtwvvvuqtuvxkj”Ñå×ÍÑßãÝßããàÜÖÔÙÜâìÙ²¤€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€rÃÛÅÀ¶Ÿ‹‹Šˆ†‡‹ˆ‡‡†……†ˆˆŠŒ‹Š‹‰Ž‡„­ÚØÇÌÒÕÐÊÈÊÎÓÌÅÈÉÈËÌÎÐÐÍÊÆÅÅÍƺ¼ÄÅÅÃÂÁ¿½¼»»½»¹¶°­±··°°´±®°±´¯¡–™žŸ œœ¡£ŸŸ©­¯³³¯±®¥£«°­©­±²¸³±¶º´¥—”“••””’‘’‘‘”˜–›œž¢¦ž…hjz|zxxtxqu…€fWWUSLB@?;;MWUTTSTTTTUUVVVURSXXL6&'-07ENKGC?>>4-/6666;CCBEHDDJMLJLNNQSRTWVUYUG;BMQOD9]°Ð°¡ ›”„s’ÅÍÁÅÅÆËÎÌÍÎÍÏÏÑÏÈËÒÑÑÎÎÒÔÐÒÓÏËÍÏÏÐÇËÒÔÐÏÛ¾‹z‚ƒ„‹‡¦ÑÏÄÍÌÉÈÌÏÏÏÐÈÄÙÖœlieVv¸ËÑàϦ»ÓÔÍ—bgrikkhkmmkihhooljheeihkmlgffhkklkiikomlgjiZ\tQ%#*%%"&%#"#$%%%##" #$#$$""%%%''&%'),./3/2752/+*#)BZa]Y\\^a`\]aZ_cd`^^_bba``abcdfd_]adefhe_]ab_bb\^_ahhajkb^eihhig^_`SNeus`Rv¤šbO|ˆimrhagc[epxtuyyf[hssnpspmnnqnmppllovk_v‹—¯¹´ÀÇ¿½ÆÇÄÀ¾ÅÉÀ¼¿ÁÁ¿Ãý¼¾¹½Â¿¿ÁÂþ¿ÀÂÂÂÃÄÅÃÁÀÁÄľ·ÂÉÁ¹Ê‰jstpqplmkjjllllmd[VX^kuwvttvzoZv§—mpytqstvwxvuttttvvuwywswvwssxoc’ÓæÕÑÚåãßßáßààÛàæâãëÔ«¢€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€sÃÙþµŸ‘†ŠŠ‡††‰ˆ‡…†‡†„‹Ž‹‰‹ŽŒ‰Œ‰¨Ó×ÍÑÑÍËÌÎÐÐÒÐÏÏÌÉËÌÈÈÌÏÌÉÈÄÉƼ»ÁÇÉÂÃÃÂÁ¾»¹¹¹ºº¸´´·¶¯°´²¯²´¶¯ –šžŸž› ££¡¢®²±±³µ´¯¨£¨­­¯²´´´´·¼½´¤—ž˜––”•—”””’’””“—˜—–š™š¢¢‡nrxmltoqor}~qcXROI?:=>FRVTVWUWUUUUVVWWTQSWO<*#$0:DSYRKHC<:6..6;;9;ABDJGCDJMIIMMMPQOQVWQXUC8>GF2-K–ÙÑ¥¡–Ž}mŽÄοÂÃÆÊÉÉÎÍÊËÌÐÎÇÊÏÐÓÒÎÐÕÖÏÎÌÎÑÏÌÍÉÍÑÏËÊ×¹‹{ƒƒ…’ˆ¨ÐÎÂÌÔÒÇÏÏËÇÌÈÆ×ÑšljhVr´ÐÌ×̦¹ÒØϘbeqgiihkmmkihhllklnkggjlnlhfgjnonjghkmnmghhc]N."'("!!#"#$%%$#!#$$#"!"%%%&&%%&()(').25037511//+4L^`\[[[^a`\\__`bba```_^]]]]^_bdb^\`decca``ceffe_bfgkidlmc`hkhghf\^aUOdvu^Pw¥˜bNy›ˆilohagc[dqwuuxwf\hquopspmnopoooonnnqiby›°·´Àƽ¾ÉÈÀ¿¿ÄĽ¹½Á¿Äž¾ÄÄÀÁÃÃÀ¿ÁÁÁÀ¿¾ÂÅÅþÀÁÁÄËËÄ»ÄȽ´ÈÄirropokknnmlnpold[VX_kuwyxvw|qXp¦–loytrtvvustutqusuwuvwvtxwxttypa–ÓàÖÕÙâäáßÛØÝáÞÛßÞßæÓ²©€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€wÄÖ¼¸²¡–‰Œ‹‡„„…‹ŒŠ‡ˆŠˆƒ‡ŠŒŠŠŒŽŽŽ‡‚¦ÓÕÊÓÕÎÎÒÑÍÕÏÎÒÑÎËËÍÉÉÍÌÇÆÈÇÅľ¹ÁËËÈÇÅÃÁÀ¿¿»¹¹»»¸µµº²²¶²®¯²¸¯Ÿ—šžžŸ¢¢ Ÿ ©®ªª°´®ª­ª¨§«°²°®®²»Äø©Ÿ ž™™ ž–’“–‘‘–‘•Ž‘˜™™œ˜‡nilkotvqqtrlqyy]SMH?=DJUXTQVWUYUUVVVWWWSUVO@0('(8GMMJC>;;>@EOFCELLGHNNNPPOQTUY`YA2;C?;RÓïÑ¡••‘ŒoŽÁÇÁÃÄÆÆÂÁÈÄÄÉÎÒÎÅÇÕÐÑÑÎÑÖÖÓÍËÒ×ÒÐÕËÏÑÍÇÇÓ´‡{‡…‚‹¥ÄÌÏÑÊÊÇÍÎÊÇÏÏÈÒÔœjddTs¸ÍÁÏΫ·ËÓЙbeoegfhkmmkihhkmmlkhgkpnlifgikimnkikmlknhcm‰C%$#$#$#""#%&&$"""#%$!$(($$&&%*(&')-./64531340//;Q_`_caacea\Z]db`_`aaaeeccbcddbdc_^bfgifedddfhfd^afgidgoncailgjkg\^aUNcwu]Ox¦—bMw™‡jkmhagc[dqxxvwue\irrpmoroovoqqnnppmsjawŽ›³¼¸¾ÄÁÂÇÄÀÃÁ¾¾ÀÀ¿Á¿¾¾¿¿¼¾ÄÁ¼ÁÅÀ½ÀÀÁÁ¾½ÁÆľÃÆĽ¼ÅÇÁ»¿ÆĻſhqqnonjjipqkinoke\WX_lvwwvvw~uZp¤’kqztttstsqrvvtqvurtutsxwtwwx{ndžÔÛÖÕÒÚçààãÞÜßâàÜàãäÚĸ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€qÁÙÆ¿²ž–ŒŠ‹Š‡ŠŒ‰†‚†‰‡†‡ŽŠˆŒŒŠŽŒŽŒ†…¦ÌËÄÍÕÓËËÏÒÒÏËÊÌÎÎÍÏÏÐÑÏÏÐÏÍÈÅÄÂÁÈÏÈÄÈËËžÄÀº½Â¾»º·³³³´´³°®¹µ¤—™›› š›œ ¡œ§ª««§©¯¬¢¦«¨§°²¯¯µ½ÀÅù¯¤Ÿ£¢ –’™“ŽŒ““Ž”•“”šŠm_ksnmsqhouki}ŽyVKQG@JT]RVUTWQRTXXRRXYVX]W@65-*+CLG;,*,;B>30246=?88DEEPIBEMLIJJJLNONNQTWVYC/9B?h´Ñá梒•Œ†|nŒ¾ÇÀ¾ÁÆÄ¿Ã˾ÂÉËÊÅÂÌÖÏÏÏËÍÓÓÓÒÍÐØÓÌÒËÍÏÒÍÐѽ‹z‚€Œ‹‰ªÉÎÎÐËËÉÈÊÍÎÎÇÆ×Õ™ljeWy¹ÉÆÐ˯½ËÑǘaipifigllklifhmllkhfgjlpnihddhlnomihkolon_|·™>$#$$ # !""##$$$" !"" &')('&&')&&)++.32454477657;F`bUbdabdb]\_defedcccadgea`ejcikfbcgigikd]ekelg`]^bfheig_\__ZY_[]b[RMg{u\Qv¤›cSz™‰ijsk`b`]hqtstvwh[gutpqqooqqppqqqponvoe|‘–ª¸¸¾ÂÁÅƽ»¿ÁÂÂÂÁ½º¾ÁÀ¿Ã½¼½¿ÁÂÂÂÁÀ¿¼½ÀÀ¿¿ÀÁÂý»¾¾ÂÄÀ¼ÌÃŒkqtqoonkjkllmmll^YZV[lysutx€tYt¥‘gmvssstvspruuuuuwsosxwwwutou}mfšÒÙ×ØÒàáââáÝÛÝàÞààéâÌÅÄ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€wÅÚü±ž–Ž‹ŒŒˆŠŠ‰‰Š‹‡ƒ†ˆ‡Š‹ˆ‰ŒŽ‡…¥ËÌÅÑÏËÎÌÇËÑÍÌÎÎËÌÏÍÉÌÓÒÎÏÒËÄÅÇÂÁÈÌÆÈËÅÄÆÁ¾ÂÂÄÁ¾ÀÁ»´¶¶µ±¯¯°µ·§””˜—”•› ¦¡¢¨¬©§¦¥«®¬©­·¿Ã½ÄÇÍÉ»±¨­¬¦œšžž™š¢š—ŽŒ”ŽŠŽ“•“ƒjbqxmgmoktxpv‚ƒ~v_JFNTV_USOQSOUVVTSVXVR^YL835-,7DBB@1''29;88975;A@:;DKMHBEMMKLMNNLHINQQQV^M8145,+AE>BL?0./29>??;65<<9;CE>322355313003750+,3?P]\TScvr\Rv¤ePv–‡koug`fb[epwwwyyj]huokmpnlmnpooonmllti]v‘™ªµ¬¾ÌľÃ»À¿Áý½ÂÁ¾¿ÀÀÃÄ¿Á¿¾¿ÂÄ¿À¾¾ÃÀ¼¾Â½ÁÄÁÂýº½À»·Ç¿ˆkqtqoonkiijjjjkkfXVX_kt{wvsszpZx«”hnywz}yttusqrrpuuqruvvvvvupu}ldŸÚÚÒØÒ×æßÜåìàÕÙçßáèà×н€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€rÃÜÈż§›‹‹‰‰‹Š‰……‰‹‰…‚ˆŒŠˆŒ‰Š‹‹…¢ÊÑËÐÓÐÌËËÈÐÑÐÍÊÌÏÒÍÍÍÍÌÐÑÎÇËäîѼÆÓÑËÌÊÇÈÇÈÊ¿ÂÅÃÀ¾º·´µ·º¹¸»¸£‘•œœœ—›››œ›£ªªª§¤¨«©¢¤¦§§§§§««´Çɽ¶±¬­­ªª«¨£¡¤¤™—’–’”–“Ž‹Ž’‰t`\^[^hqpqtw}}x||fHCT^ZXWUSTTRUQUXWTSUWM>4286)*>EAAEMPN79FLW`WOfws^Ssž˜`Kx›Šmpqd`hcYbq{|xvui^iuxposspppqponoprspi`y‘˜­»¶¸¿¿½Â¾¸¿ÄÂÀÁÀ¾ÄÄÁ½½¼¿ÅÀ¾ÀŽ¾ÄÅÁ¿¼º¿ÅÄ»¿À½½ÁÃÁ½ÀþºÊŠkqtqoonklkkjkkmmcWTT_pwvz{wvzpZy¨foyrsvouvsqoqwvustyvsutvvwrv|ke—ÓÓÁÃÉÚáÜÞêê×ÏÚäÞæßÖç怀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€wÇÝÆÅÁ¬”ŒŒŒ‰ŠŒŒŒ‹ŠŠ‰††‰‰ˆˆ‰‹‹ŒŠ†¡ÉÑÇÑÏÍÓÒÍÐÎÌÌÑÑÍËÌÎËËÎÎÐÐÎÌÐéôÛÃÉÖÓÊÍÍÊÏÏÌÂÉËÈÆÄÂÁÀ¿½¼»»ººÆ½§–—›¤¤Ÿ›¡Ÿ ª¬©©¬®±¬£¡¥£§¯­©®°¬µËÍ»±®¯°°±¯«§¤©®©¢›–Ÿ£˜’–™•‰p[[`_^hojdgilu|xuzybQVXf^RRVXQTWTPSXSI6/034-'3HJ:6IYVH/47BJIGGJJIJJJKMMLJJLUYWRSSG^©ÞåÝÄŸŒ„~}xnŒ¾Æ½¾ÀÀ½¼½À»ºÂƾ³¼Ü⺰ÄÉÆÆÄÈËÑÐÊÍÏÉÏÍËÏÍÐ͵„w…‡…ˆƒ«ÎÐÇÅÆÌËÇÈÉÆþÃÚÖšljcTuµÕÍÇ¿³ÆÐÙÍ‘]nqhhggllklhehkmmifghiiikkhgggfikjfgkooogijcr…T<8*!"&##""!! #!!##" "$%%%#&&%'.1146656897960!2i†„yW5)(%"$&')))(()-../03576?BGFAAGHDCHIGIEGJKLNNLQSVVOPTIX›ÚêÜÁ›„}{ti‰¼Å·¹¹¶·¹¼¼¿¹¼À¾¹ÆäïÀ­¿ÈÅ¿ÃÈËÊÉÍÎÉËÈÆËÊÎ̳…x†‡†Ž†¦ÌÑÈÇÆÊÇÆÁ½¸º¹À×Õ™ljeVx·×ÉÉƲ½ÆÍË`qodhjimmlkgdfjlljgfghlmkgdcejlmmkhgjmlmihdax‘wF51$ "$#"""! "#" #$$"!!"###!"%&$(-1146656897:903k…ƒŒzZ<4;=86KDA?>@FHECEBD@?EFEGCJNMLNOMPRW[OLSIX–ÛîÚ¿„„|xqf†ºÄ¶¿À¶¯³º¼º¹À½»ËìíÈ·ÂÆÀÁȾÂÀÂÎÎÊÌÊÇÆÊËÏÍ´Š|†…‚Œ†‚£ÍÕÎÊÅĽº¹½ÃÌÌÌÜÕ™ljeWy¹ÓÅÎÔ¼ÀÆËÐ\oqhiggllklifhijllifeghkjhjgeglkjighikjmki`c}’c9.-"#$! !!""#!%&$!!"#"!!"$$#"%"#'*+/3467569977>41k„Ž‚jYW[\]^_```aadbaacdeedbbeedeicdabigcefdcdeecacabhmnkgG9JSWZPSfvscXu ›_M£†empgagbZdpwwtssf\huxmjopmoqrqpooopprh\u—¨³±¼Á½ÁÆÂÂÀÀÁÁ¿»»¾¿ÀÃþ¾¿»¾À¾»½ÂÀº¹¼ÃÀ»ÂÉĽ½À¿½Äƽ»¿Á½¹ÉÀ‰kqtqoonkkjihhjlmdWVY_jpuxytptjX{¡Šcq}tswpqqqrppswrqstvxusuwysw{inÀ¾ÒÑËÄÉÚÞÐÑÞã×ÔæäÚêèÑ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€s¼ÒÃû­ª’ƒ‡Œˆ…~…‡……„€ƒ‡}‚€ƒƒŠ‚ƒ„}—ÃÎÃÅÇÆÂÁÅËÆÂÃÆÇÇÇÅÃÄÃÄÇÈÇÆËÖèíèÞÑÎÄÄÅÃÀÃÆÂÀ¾ÁÁ¼½¿¹»»½¿»³µ¿¾À©–˜ ¤ž¦©¤¢¥žŸ°·®¨ª¬­­­¬ª¯±­¯³°³»ÄÏÎý¸´¸´°¯µ·ª«³²ª«ª¡£§¨¤¢¥¤ž›š—ƒlb[\gggie`fonpq{wx}xz™jUPQSVRQXT:)1565/)')+CHB8@OK:2+$!5F??DABDECDGBA@ADFFEJHGGJMOONRWYSPQLHŠÚíÞÈžƒ€wxte¹Åµ¸Á»µ»½º·»¸½¾¹ÌèéÎÇÄÿ¼ÌùÅÌÇÇÈÊÌÉÅÉÌÐη‹z…‰†‡…¤ÌÓËÁ¸¼¹¼ÅÇÉÒÊÃÚÖ˜lkdSyÀ×ÀÅϾ¾ÒÑ͉boqklbelnljedikmnnlgdgmiffhhijkmkikijpkmhk_e••Y0+%# ! !"#$%%$#""!"###!!##!!#&%%())-39328:648<>13h‚‡‹‹‡„s`a`_abbbccbj`dccjigfecbeghidedcdbdkcmjeeegafejonkhcKEMPY_TXewsaWx¡—dPx–…knoe`gd\epvxtqoe]gpupnmloqommmnooppjh`vœ¯¶´ÁÇ¿¼ÀÀÂÅÁº¼Ã¿¹¼ÃÄ¿¾ÀÀ½¾¿¿½¾Á¼¾¿¿¿ÀÂļÁÄ¿¼¾¾¸»ÂÇÀ¸ÆÀŒhnqonnnkmihiiklhcXUW`ouwxwprym\|ŸŒlqxutvuqnrwuqtrrssrrrswwtvtrukgš¿½ÆÚØÔÉÏÑÐÕàåâÏÚçäßëëÕ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€yÆÛÇÄ»©Ÿ’ˆ‡ˆ‰„{‡‡†Š‹…‚„‚€€ƒ„…‡†„…€ÊÔÄÊÐÒÏÌÍÏËÌÎÍËËËÉÊÌÌËÊÉÉËËÔåìêâÔÎÊËÍÌÈÌÐÍÊÆÈÉÄÃÃÀÀÇËÈÂÀÆÎÇ號£¤¡Ÿ¤£¤¤››©®­®ª¦¬²¯§§¯²¯°´²µ»ÁÌÎû³¸¹µ±°µ¹¯ª«´²¬¬ª¨­¬­©£¡¢žœŸ’ze]`irihmiacktwxvst{„’…gSRUPKV[E0-0354/(%(,AIA6?PM?4)%&087>C@>>>?ACBA@ACEDDFFGJMNNMSORWRQRJM~ÍñåÇ›€zzwmŠ½Ë·¼À¹¸¾¼½½À»¿½¯ÁèðÓÈÂÀÀ¼ÃÄ¿ÈŽÂÉÍÍÈÈÑËÇÈ»…{‡†ƒ†€„¬Ä½µµ¶ÃÌÌËÈÈɾÁá×–jmfQvÀáÊÐÔÀ½Î×Òbkpijeelnljfdhknmljhefkhjnkeeljjihgfglnjee_d‡Ž_/&&!# %"""! ##!!!""#%" !! "%&%&)*+/4778899889=15kƒ…‹Š€~mdadfffinmjc`bckoeeiccijhghaegfcadibiffecebeckqlhe`FBKMV_TUewsaWx —cPy—„jnpf_c_Wanvzywtf]htupnooppnoonnnnnnmj`t›¯¸±º¿¹¼À¼¸¿Â¿¿Ã¾¿ÂÄþ¼¾¿½º»¼¿À¾¼º½ÀÀ¿ÀÂÁ¿¿ÀÁ¾¾Ã¹»ÀĽµÅÁhnqonnnkliikilnl^WY[amtwtzutyo\v¨nuxrpsrrpquvroqturswwttwtustxkf”»ÄØðëáÐÔÓÐÓÝâàÏÜêåÚàäØ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€uÆÞÈƯ¡“Œ‡„‰ƒy€„ŒŒ‹‹‰‰…„‡…„ˆ‰ˆ‰ˆ†ˆ† ÍÔÄÊÑÔÒÐÐÐÍÉËÐÏÈÉÏÏÐÑÐÍËÌÍÍÕäëëãÔÍÎÏÑÏÍÏÓÑÓÌÍÏËÇÇÈÌÐÑÉÅÇÉÆÈç”™žžœ ¢¤¥¢šœ¨®«¬­­«©§¤¦®±®¯³³·¼ÁÌͺ±µ¸·¶±²¸²«©¶¸¯­­ª­¯±®¦¥§¤¡¤ ˆkaiqnebjka^hhiloqpv€x‡ŽuZRSNOWM5-1.033.'#'-;HE7;JOJ9)*00.2>@AA??@A@A@@ABCCBFEEFJMOPSOSTNPRJPÒêܺ”†Š…}vnƒ­¹°µ´­±¶²µ¹¼µ¸¶£µæðØÎÆ¿¾»»ÃÁÅÉÐÏÇÏÎÊÍɼÌÕ®||ˆ…„ƒ“Ä´½È¾¿Ç¾ÄÎν¾ÝØ•hngQu¿àÖáØÃÄËÔÖ•bfohhiflmlkfdhjmmijhfekjlmkgfgkiihedgjlijgefrqU,$% # "!! !""'" ! #'&%'*+,05679999988<.7o„…Šˆ€|tk[Y`ec`^][ECXlmc]hvh^]]XZaaVPWej^OQIDQQDCE@D\omhf^GCLKT_UQevs`Vw —bP{˜ƒhnrhaf`Ycpxwxzyl`ispnqrnjmopponnmmlqk`sŒ™®¸³¾Ä¿¾Â¿¾¿ÂÀ¾ÁÀ¾À¾½ÀÀ½»½¾¼¼¾¿½»»¾¼¸·¾Æž¾¿À¿ÂÇż»¿Á¹°Á½‹hnqonnnkjikljlooaXWW]luzqwsrwn_~¦‹jtyqqxrstrpstqwvutuutqqwusqw{kb™ËÔÙåàÛÖÚÜÙÛÞÞÚÓÛåßÑÑÕЀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€nÀÙÆÇŵª“‹†ƒ†„|€‡‰…†‰Š……‡…ƒ„††‡ˆ‡Š†¢ÏÔÉÍÏÍÌÎÐÑÐÐÎÍÎÍÌÎÍÊÊÍÎÏÏÏÐØçíêáÑÊÑÐÐÏÌÐÒÍÒËÊÏÍÇÇÌËÍÏÎËÎËÄÊÍ´—–¡£¢¥£¥§¡›Ÿ¨¬¯¨§¯°¨¦§¨«­«­±±µ½ÅÏÍÁºµ¬³¸¹²®³±¯³º¸µ²¬­¯±­©««©ª¨¥£“tgovlkfdjlij`Z_knqwzvn|Ž‰pYWSYJ3.53-.01.&"'/7DG;6FRQ?--83.16>ABA@CB?@@@AAAA@ECBDHMPQPPRQONJJ{¿âáäÊšˆˆ…{tp…©·±±°©­²¯²«¯«¯®¤½éîØÏų¯µ´½½·´³š‹¨ÅÆÁ»·ÄÄ¡tu€‚„‚«ÓÐÅÅü¼½Á»¼½¸±¿à×–ilgSu»ÓÙêÔÄÐÍËÕ˜bdrifkglmkkgdgjlkjjiedkkjhggggnklmggkmiikffii^D-*' !!! !""##!  !!"%"!#"!"%%$&)++.36449;8689;*;t‡‡ˆ†‡€kotWDBAA>;7DSO=,-:==?6;==:,'% """"""!!!!!  !"##!#&&#"#$$%()),07348=;86:9%AxˆŠŠ„„~kmyeN?4672.0+@^T99=7MU?0=81ES93LipW6-LPD85AJQP?79:;;>>==?ABA@@@A@ADHLMMQPPRTFX æëÝäìÅšŽŠƒ~x»ÈÀº¾¼¸¼¾¼¾¿½¼·½ØëçÐÈɺ¾Æ¬y\UVV``Z`—½¸¨¹Ç®„x~„…z„¼áÆÀÍÊ»³°³ÀËÎÓÒÐÜÓšlhcXw²ÑÉÓɼÈÑ×ΔbhvkehhkkjliefijjllgdfjjjjgfgjkijjggjnjgdfgfmeL1"&'%&'#"""""""  "#$"!$(($!"$$&))(*-4899;@<695"N~‡‹Œ‚w|{uz|zkYL=007,@LDHL9/BSB3>93HM34UljS7.6IWE16:1Kgohfd`G4?NV]UVcuq_UvŸ–bP{˜ƒhmribf`Wanvtwxvg\gssrroijoplmmnnoppnh_y’™«¶´¹ÁÀ½ÁÀ½ÁÅÅÁ¾¿¾½·»¿¿¿¾½º¼¿À¿¾¿¾»¼¾»¸ºÁÃÁ¾Á¾½ÁÀº±ºÃÁºÈ¿Šhnqonnnkkikkghkj`WVV\ktytxsuyl[|ƒhuwrsroqrqpsvtttrrsvtqqwusqw{ki‹·¾µ·¹¼½¿ÁÃÃÂÀ¿ÆÂÁÃÂÀÀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€uÆÛÃÁ¾­ ‹…†„„‡€}†Šˆ‰Š‡…‚ˆ†…„ƒƒ…‡‹†ˆˆ‚žÌÕÍÑÑËÌÑÑËÏÕÐÉÎÑÎËÍÊÎÑËÊÑÔÊÔæíëã×ÓÍÌÎÌÊÍÏÌËÌÈÇËËËÑÀŸ“¢µÅÍɪš˜——ž¢¤¤¥§§Ÿ—›§¬©ª«¬±²ª¥§¨«®°±±²·½Èɾµ­´µ±°±¸½²¬­¶¸¸»¶²±²µµ²µ¸µµ´¯ª¤‘vcf]bf_jzwiuyyzspxxffu—´¡d9,83-220*(,,)+13:BEB>CLRKCHOUVD78:=?=<<<>ABB@AACA@AEIMNPPSTUGXŸèè×âòѤŽŠƒ}rŒ¾Ë¿¾ÂÀ¼½¼Àý¾»»ÑéæÕÍƳ¸¹ŠTObos‚~eaƒœ“¯Ð´‹~„‡†…±Öƹ³´¶¹ÀÍÒÑÎÒÍÍãÔ˜jieWu±ØÊÑ͸½ÔÝÑ”bhtidehkjjlieejlkjiffhkljghjidjjihgfhmmkfdgclmZ?+-0--+))(''&&%! !#$%#"$''#"#&&(*)'(,09=::==850")]„„ˆ‰szƒ}~u|uaL=79=;>RM2/CJ@563:RN05\oeM4<>KVC01/A_qjdehiH4@MU]URcuq_UvŸ•cPz—„jnpldhbZcpwuwxudYfssqokjnoinnnnnnooog^w‘˜¬¹±·Á¿º½¾½½¿À¼»¿Á¿½½¿ÂÄÁ¾½¿ÀÁÀ½¼½¾»¿Á¾¼¾Âý¾¾»¼Âü²ºÁ½µÃ¼‡hnqonnnkliijikmj]UXZ`lruttnqyn`˜|exzrstlqssrrrsrtvvspswtwtustxkj‹¹Ã·µ¶¸·¹¼¿À¾»¹¼¼½¿À¾¿Ã€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€qÇÞÄ°žŠŠ‡ƒ‡‡{{‰ŠˆŠŠ…„…ƒ‡ˆ‡†ƒ„Œ‹„‡Š… ÎÚÎÒÐËÌÓÔÏÑÐÊËÒÑÊÊÏËÏÑÉÇÍÑÎÕæíìåÖÏÑÓÕÐÈÈÌËÌÏÊÆÊËÊϯ–˜Ÿ«º½¤’—œ™ž Ÿš¡§§ž’”¢ªªª¬¬«¨¤¥¦§§ª¬®±²ºÁÊÈ»´®®¶¶³°¶¼´«­¶¹¸µ®­±¶·¶¸¶²±¶»±¥¦ž…rhall^k{pgpz~{vpmz{gbcz¦¤qE6;1-3/1)&,.+,1::6?NH=ANMCFMUYM:88;<::<;>ABBAAB@@ADGJKJPNQRZNBbµäååñÔ£…Š‚~nŠÁλ¿Â¿¾¼¸ÀÈÅ¿ÃÁ¸ËëéßÔí½Ç“[^m|‹œj[pŽ¾Ô²ƒy‚€}¡±³³¹ËÏÇËÆÉÇÌÖÉÀØÖ–hjgVs±ÙÑØÓµºØÛÔ”chqgcbikjjljeejnlgffhjkjiigeefjkiggeelkojbgejoudQKKKJB642/+(&$!!!!"$%&%##%%##%(()+*((+.5==968<1,"1hˆ‚„„yz|€ƒxsz|z{|o]C>62?B44@>=;55G`H-8`jYC2:Q`U:5=29:;:<@BA@A>@ABCFKOOQKOPEBHl¡àïèΛ†ˆƒ|rŒ¾ÏÂÂÃÀ»½À½Á¾½ÀÀ¾ÏéõÚÐÈ­·Å›jqxy‡™–‘—˜tSu´Ã®~u~ƒ‚†…ƒ ÀÌÌÌÉÈÇÊËÉÌÓÇÅäÓ“eheVw¸ÒÐÜÕ¶¿ÉÌÔšdgofigglmlmigihlmlkhefkkjhgffglhfijhgjkgfggehntrlffijh[NF?:5-.,+%#!)$%&&&&%$#%()(*..,'-5:9534, "Ktƒ…Švpz€€€~y|you|y€ocN:48;9;=8>G<DHMB57AJK49P84UnjbfhdK=FMU]QMbvq]Wx šdQy–‚hnshae`Xbpxpovte^fmnqpllppmnnnnnnnnpjby“§µ³¸¿ÀÂÄ¿»ÃÃÀ¿ÁÀ¼»¾ÂÀ¹¹¿Ã¿»º¼½½¼¹¼À»ºÀÀ¿¿¾¾¾º¹½¿¼·¿ÆÁ¸Å¿‹iqplmljkkjihijklbUOVbmttsvqtxk]€œ{fusorwusrssrqpu|vouvqqsrptutuinŒ®²¬®­²µ¶³­«®®ªª§¨®¯««°€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€rÅÛÁÁÁ­š…ƒ…‰‰~{ƒ†‚‡Œ‡…ŠŠ‰‡††‚„††Œ‹ƒŸÎÙÍÎÍÊËÏÏÌÌÌÉÊÍÌÊÍÎÏÎËÊÍÐÐÎÔäíëßÐÍÓÑÍÆÄÌÐÊÅÆÈÈÄÂÆÍ­Ž–’”šœ–”–˜š›š™™š›š™™˜›¢¥¤¡žŸ¥¥¦¨¨ªµ¾½ÂÁÅÎÎĶ­­´±©«·¸¯¯³¸¶µ³¯³´·¹·¶·¹¹³³³¯­¯¨–zlbdktywtvzyvwwsw{pernW`ˆp;)-*+$10.,,-/15:;:<60+"(Tw}ƒˆwlo~†‡†„{spmsyuy|q]G:56<@;9=::G>457=E?;6:AD5E^B3MdhccggK>FMU]RNbwr`Yx–aOz™„imph`e`Xcpxxvyue_hppnmmnmmnnnnnnnnnpi`x•¨µ­´¿Á¿¾¹¹¼¿ÁÀ¿¼¹»»¾¿¿ÀÃÂÀ¾½»¼Áƹ¹À¹¹ÃÅ¿½Åú»ÄÄ»µ½Ä¿¶Ä½‰iqplmljkiijkkkkjcYUWZcovrtqv|n\|{fvvrqsrruxuooutoqussusssquuuvia’ÂÁ·½ÀÂÂÄÃÂÄÇÅÁºÁ½»¿¿º€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€zÊÝÁÁÁ°Ÿ’‡……ˆ‡}z††„‡ˆ…ˆŠ‹‹‡„…„ƒ…‡‡Œ‹‚žÍØÉÍÎÊÇÈÌÎÎÍÍÎÏÌËÌÏÉÊÌÊÊÍÍÊ×éëèã×ÏÍÏÏÉÇÎÎÅÅÇÉÉÿÃ˶ˆ’’”•••––™žŸ™”—šž¡ž˜—™ ¢ ¡§ª¦¡¡¦¨¥¥«®¬¶°´ÂÊ·±°¶³©ª²²ª©¯¶µ´³°µµ¹»¹¶µ¶¶³µ·²­²´¬™hgtxtr{wxz~€znr|udmiTby<*,/4122/,,/28:<;99COSOHJNOVWA78:988=9>@??@AA?>?BGJJIQRRRJFNPN€ÕõêÐ¥ˆ…€{r‹»Ê¾¾ÂÃÀÁÂÀºÁÆĽ¼ÐìéÙÎÄ»´¯±š}zu~w~€›¥|WgŠ„ƒ‚~‰’‡¡ÌÓÆÈÊÉÌÌÍËÅÌÑÓáÕ•gheUv·×ÔÔÏÁÊÆÆÈ’ahrgidfkljkhehjlkjjhhkhijigefhjmmhfhjgmliffiklnmjginqoc]^[WN=7W\D-%%&&'''%$$%*-+*,/00--4=>6-)#0^zz‰unt€ƒƒ…†xjjlnqpsx{€t_F6D=6468FLU]RObxtaZw›’eQy”gotg`e`Ycpwutyvg`ipnoolkmnmnnnnnnnnph]w‘˜ªµ­´¿¿º¹¹¿À¿º¸»»¼½ÃÂÁÀÀ¿¼¹¼¿À¾¿Á¿º¼Á¼ºÀÃÀ¸¼¿¾½À½»¾´»Â½µÃ¼‡iqplmljkmkihijmn`YWXX`mvuunrzn]} |dsurstqtutrssssqstsuvsturuvwxji¹¿¹½¾ÄÁÀ¾½ÀÅÈÇÂÃÆÈÆÃÅÈ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€vÈÞÇÇƲ¡“‰††‡†}{‡‹‹Š……ˆ‡‹ˆ‡‡„„††……‹‰€œËÕÆÌÑÐËÊÎÑÍÌÍÐÏÏÑÒÐÇÊÐÎÍÎËÉØêëçãØÒÌÍÍÉÆÌÎÇÉÊÌÌÇÃÆË´’Œ’Ž“–”˜–˜žŸ›š£žž¥¥œ™¡¨©¦¥§ª©¦¤ªª¦¥§ª¬¬«·ÌÑõ±²³°­­°°¯©°µ²²³¯°°¶»»¹····¹»¶¯³¶²®”wuƒrq}||z}}xu{uekfYuŽ~H)++.61110//13989<:69@OKDIRWWO@6699968;=?@@@?>CA@BEJLMMMQSJJUQI€Ö÷èÉŠ‰„{p‰¹È½ÀÅÄÁÃľÃÆÅ¿ÁÒéêßÓ¿»´™‰xoxvmumt’Ÿ~`ct†~‡‚|¤ÉÎÉÑËÅÍÎÈÇÉËÈÊÜÖ–gieUu·Ô××ÐÁÉÇÎÉ“airhieejkjjhegfmomifehjiggffgilkiihhjlmkgddgijllihjpqod]]XTM>9WjZ<* $&''''&%$)**+-/0/40,-473,'!4d|x~…yu|ƒ|€rgllfkmonps|ƒ€lWJ60887875776=>1513=<.IfM3AVfjecfM?FLU]SPbxs`Xv›“bPz—ƒimqg`eaYcowwuzwf_fmmqrmklnmnnnnnnnnpg\v’™«µ¨´Âþ»º½¾À¿¾½¼»¾¿¿¾¾½¾½¼¾½»½ÁÁ½»»»»¼º¿Â¸»ÃÀº¼¾¼¿µ¼Á½µÄ¼‡iqplmljkjjjkklll\UTY^gptvvorwl^€zcstrrtqppsusqprrrvxtqvtvsvuxyji½Ä¹µ¶Á¿À¿½ºººº¶¾Á¾¼¾À¿€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€sÄÛÆÇ‰‡…‡…}}ƒ‹ˆ…†ƒ…‡‰†‡ˆ…„…………Š‰€œËÖÉËÏÐÎÎÑÕÏÌÍÎÎÎÑÓÊÇÌÏÌÐÓÎÎÔæîéÚÎÏÎËÌÌÉËÏÎÎËÊÌÌÉÈÉ°’—’’’’•˜–—œŸ¤£ ¡§¦›™£®¬ª§£¢¤¦¥©«ª©©­²­¯¾ÒÔÁ³²²²°­¬­¯¯«²µ°±´­¨¯´¸¸¶¶¶µ¹·º¸³µµ®²¢ŽˆŠ~ty€~z||zwspvh[xˆsJ/*(+.0/.03565668;;989?EDFLQUN?679898:=:965<=5687BE>?EKH528:7769954:@A@@;>ACCDFILIMSMLRKUŸéòæÎ……~zqŒ½ÍÃÂÅÇÃÂÃÂÂÄÅÆÅÅÓçðáÖÀ›|uƒ€vux|vjokkminŠ‘x`v‰…†ˆ†ËÓÅÆÆÂÁÃÆÊÌÍÈÈÜÕ•gjgXxº×ÛÝË­¶ÆÚÏ—djrhjgfkkkkhehkkjjiffjkihhhigfnlihhihhljgeceimmmjhimmj_[[TPL>6QdibM1$!''((''%%(),.,+.328:3-,+(!!9fvu{xtx‚†|}|ojkfgplfqvxvw||x}ul\IIQONMQS[^RQSTZTM]hZ`ehfdeggO@FLT]TRcwp\Uu—eQy•€hote_fc[douvtxte_irupmoqpnmnnnnnnnnph]w‘˜ªµ±»Ã¾»¿¼¹ÀÀ½¾ÁÀ»¸µ»ÀÁÀÁ¿½ÁƼ¼½»¼¾»¶¸¼À¾»Á¼¶ºÁ¾¸¸½À½¸È¾‡iqplmljkkjhggilmaVRX`jsvqvtuvh\ƒ”vewvpptptuqortsuturmrvrrwtssyzhjˆ§¦£ª©¨®®®¯¬¬±¹¸°°¸¸°®µ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€tÄÝËÌÅ°¡‘‰‡…††€‚‹‰‡‡…ƒ…‚‚††‡‰‰‡‰ƒ„ŠŠƒ ÐÛÐÎÌÌÊÉÎÕÑÑÍÍÑÏÍÐÑÍÌÍËÉËÑÈÕèîìæÕËÐÎÐÐÌÊÍÍÈÆÇËÌÊÈÈ«ŠŽ’““—™šŸ¢¡¥©¦¤¦¢ž¡§£§­«¦§¨¦¥¨©§ª²´¯³ÃÕÕô°¬°°ª©®°¯¦ª¯°´µ®®±±¯®°µ¶µ¶²µ¸µ´³®³·»±˜…~€‚z{~{~~„lefZ@2/+)((//136788;;<;989<77233277--587657:99=AA>;>?BCEFHIJHKRNKSSP†ÒîèÍœ……}si…ºÍÂÄÇÄÁÅÈÃËÊÈÈÅÃÒìêçÄ•vq}‚zmrz{vnphf`WS[juqz‰‡…Œ…{ ÇÓËÉÆÅÅÅËÊÈÏËÉÜÓ“fihY{¼ÏÎÔɬ·ÅÏÊ”cjsijegklklifikmlhhfcblljgffhjnmjeegiihigc`cimkkjgillh_]]VROA8TfdaaL2"'(((('&%((,.-*-32892-,*%%Agqr|x}~ƒyx|{nihdjnff{sptwvw{~ƒ‡smjcg_cfik^UPTXRQ]dcdgieabfhPAFKT]UScwq]Vtš“aO{˜„impe_fc[douvtxue^fnsronnpnlnnnnnnnnpi`x•¨µ¶¼¿¹·¾¾¾À¿»¸ºº»¾¹»¾¿À¿¾¾»¾¹µº¼¸¶¼¼··½Á½¸ÀÁ»¼Â¿µ·»¾»·Ç½„iqplmljkghijjjiiaVQW_jrsuwrrth^…ž{esspswtsrssrqrpstuwsrupvtrrxzfh¸½°ªª´¶³´¶¶²±²®°¯­°µ³«€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€o¾ÖÅÈı¥ˆ‡…††„†Œ‡ƒ…‡ˆˆ„‡‹Š†‡‰ˆ€‡ˆŸÏÚÑÐÑÑÌÈËÒÎÏËËÑÎÉÌÍËÈÈËÍÌÌÊÔæîìâÎÄÑÐÐËÅÇËÊÆÆÉÍÌÉÉÌ­”“–•’—˜™ £›¥©¨©ª¢œ£©§«­¦¢¨®§¥ª«§¬³±²ºËÛØĵ±²±°¯­¬®²ª«°³µ°ª®³²°°²µµ²º¶¸¸²°±®«°½¾­Ÿ’ƒ€~~€}syzu€…xfN8.-)%'0/2565568645:<;<=AEFGNMNSMGPVJfµíìǘˆ…zsj†ºÌÄÁÃÆÃÃÇÉÇÄÅÉÇÄÑéðÌy{}~jev{svunfQFHFDGJZl“ŠŒˆ‚©ÈÏÄÂÃÄÁÌÎÈÆÐÊÅÛÒ’fihZ|¾ÖË̦µÁÃÄakujidglmlmigijjjjhcaejlmkfegjjjihggikhlkdaflohihgillh_]\SON@8Sk_XlfA$'(()('&%*'(,.--0474-*-*#(Hinp~zz|ƒƒ}{|xjihfjg_k{oluulq€|z{€ƒ€||hXXYZb\RLTXOLSZeeilkfbcePAFKT]UScxs`Wt—dQy–‚hnse_fc\dotxuxte`kuqopqnijpnnnnnnnnpjby“§µ®´¼ºº½»»¿¼¶·¼¾»¹¾¼¼¿¿¼¹¸¼¿¼º½º¶º¹»º¹º¿Âº¹Àýº¿Àº¶¹¼¹µÅ»‚iqplmljkihggghjk^ROWcmqotvqsxk]£fturrttvuqorssqvtqrrprovtqqxyej•ÁºÀÂÄÇ¿¼ÁÆÇÇÇÇÃÀÁÅÆÁº€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€b¢ÁÁÄÀ¯Ÿ”‰ˆ‡ƒ‚}‰ˆ†‚‚‡ˆ„†‰‡‡Š‡ƒ…‡…‹…žÎÝÌÊÑÒÇÃËÐÎÏÐÍÌÏÏÊÌÎÍÉÉÌÍËÊØèïêÝÑÊÒÍÌÈÂÈÏÊÈÍÍÌÅÄÊĬ“Ž’”•—œ¢£ £¦¨¬®©¢¢©©ªªª«­®¬ª««©ª­¯´ÂÏÛÓ»±´µ´´°­¯¯©¤³º¯­²¯¯¶±¯³¶´³³¶¶µ´³²±°¯·Àº±°«¦•ƒ|yqljnnkm•‡j_ZK2%+,+02454248=8:<<;:;=:;:63310/036765679;:;==>AEHIJKLOQNMS\QRˆÜöÅ’„‚~{tkˆ»ËÀÅÈÆÅÇÇÅÈÃÃÊÊÃÔöášswxw{ubblvwvpcUI@><8:A7M™Š‡‰ƒ¥ÌÌ¿ÃÁÁÈÈÅÇËÐÊÇßÔ‘gmgXÂÖÍÐÁ©¿ÄÎÏŸdhqjeechjjligjhkkiheejnnmjgfhkjhfefhijkjgdehihnlhdejlkc`\SRK:8NgbYcvi3"'&'/+%)%*+,,,/0571,/.*)0Tjkr{xtƒ‡xuwrcac_^\]fnhgjf`iyurt}…‹€fLIWb]SNNSQJP_efrlgfdhkN;EPW^URdun\Xyž”gUy”ƒhkpi`eaZdotxwwqecltimrpklonmmmmmmmmth[u‘šª´³´º½½¹µ¹½¹¸¾Â¼¹¾¿½º¼À¿¼½Á¿¼º»½½¹¸¼½¹¸¼¿¾¹¹½»µºÁ½³¼Å·°È¼hppklljkkmihggie\UUV_ptruwusyi_ƒ—xcpqqrronoqqoqvtstsoqttwtswstvef•½À¶¹ÀÁ¾ÀÁÁÁÁÂÄÊÈÅÆÈÈÆÀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€R¨¦®µ±©’†…„ƒ†„ƒ‹Š‡„…‡ˆˆŠˆƒ‚…‡……ˆ‡‹‡~œÍÙÈÆÍÒÍÌÐÒÎÐÎËËÍÎÍÍÌÌÍËÊÌÑÏÖáíîáÐÅÐÎÍÊÈÍÏÊÍÐÍËÆÇÑͨ‘’’‘–œŸ¡£¥¢§¬«¢œ ¥©«ª«­¬©¦®­©«­«¬¶ÀÌÙѹ³»µ¯­®±µµ®ª±´®°¶¯¬ª¯³²¯°´¸º¶²±±°¯¯´²³·´°²¯« ’vqnovy~‘™xRKRG1&+/-.0367557:79:;::999=<7468721475249:<;88;>@==@CCBFMKGELSSQOSLsÆóÊ“…‡‚}vk†¸ÈÃÇÇÃÁÅÈÈÅÆÌÌÄÉÖÜ¡}owxvqeecnxtnh^KB<=;537;7^ˆ‰„…¤ÎÐÄÉÇÀ¿ÆÉÊÆËÍËÜÔ‘fldTz½àÔι¨ÇËÌΟdhqkfffklkkhegglnlgbemjkkigefgkkjgdcehhhfddgjjmkgdekmkeb]VTM=:PcaY\sxP+*('&&(*$+.-*(-1351-//*(#5Wmou{ws€†…|ssr`[]\WW[fa^be`ahjmry~ƒ†„€†oYT^_WSQRRQX_bgnheebfiP=FOU^TMavq^Wv–fTx“ƒhkqi`eaZdotstvrfagnpkjosmknmmmmmmmmti[t”¥¯´ºÁ¾¸¶µ¼º¸¶¹¼»»Á¿À¾½¾½¼¼¾½¸¹½¾º¹¼¿½¹ºÀÀ¼¶»¾¼»ÀÀ¹´½Å·°Ç»hppklljkjmihhhjg_UST]ntuptsrxia†›}hvurqpoqqqqrrqrsrqrpqutpnrptyji”¹½µ·»º¸º»»¹¹»¼¾¿ÀÀ¿¿ÀÀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€J“• ¤¢’…‚„‡ˆ„‚„ƒƒˆ‹‡‡ˆˆŠ‰†ˆ‰‹„|žÐÙÉÅÇÍÎÏÏÍÌÍËËÑÒÏÏÉÉÉÉÉÉËÌÈÑãðîÞÑÍÓÕÑÍÐÑËÇÉËÈÈÃÆÐË£Ž”‘“’‘”š  Ÿ¥­¢¤§¬ª ¤£©ª§¨¬«¦£®­©°¯«®¶µºÍн¶½¼³°±²´µ±®°±®³¶­«¬®¯­±µ¶²·±®°²±±±°±­­¯²¹²®±¯¨£ ™š¤¥¨¤zRPVO=-/430147987678899:9867=?:7985332147986:<;::;<>:;ADBDIFFHJJJNSUHX¦ëЕ†Œ…wl„µÅÀÆÊÈÇÈÈÅÄÃÆÉÊʸ›€qjmkjjhidkpgdbXE=87611489>:586324984689879:9=99=BCCEHFEGIMOQVLGˆàÕ—‡ˆ|xm†¸ËÄÇÈÄÂÆÉÉÅÆÃÄŲ’|qhggcdlic`ZVabQ@956644686<;Lx†¦ÏÏÂÆÅÆÎËÇÆÇÌÇÆàÓejbRw»ÙØÛŲÒÓÍÍždhqkfgjonlkgbegkkijighkjjihggfilmicbfkilkfbdimjjgeglmlc^[VRM?:Pba`etq]4)-(%&.*--***,,00///+(+B`sx|ztzƒ„tmoof[]]X_baS]ghhomagdjz„…†Š„‹‹ze`giaRP[abi`a`hkfggM8AMW`VPcun]Yx›‘cQv’ƒimsiaeaZdotxwytf`hqupjilooommmmmmmmnf\u”¨¶¯µ¾¼·¹¸¹»¹¶»ÁÁ»¸¿À¾½ÀÁÀ¿À»·½Á¹²ºº¹¹»ÀÀ¿Àþ¸ºÀ¾¹·½Å¹±Å¸€hppklljkhliiijnk]QRYbmpsuupoxi]~˜xevvrqswsqstrqrosxvppvyxsqsoptdhŽ«¨Ÿ¤ªª¬®®­ª©©«¬®°°®®°²€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€N~„†ˆ†„‡‚„‰‰‚‹Š†‰ˆ…‡†…‰Š……‡…„ƒ‡…~ËÓÀÃÅÉÎÐÏÍÏÒÎËÎËÈÌÑÌÊÌÏÍËÌÎÔâïïßÌÁÍÑËÅÍÏËÌÍÏÍÍÊÊÏÅ ‘•Ž“•”“–œ ¢®§£§©¢¡«¬©¦¥§ª«ª©®°­¯µ¸·¹¸ÀØؾ¶¿¾¼¾»³¯·¾±³·µ³¯©¯²°°²³±±²´³´µ´±²µ°¸¹°ºÃÀÌãÆÃÕ§¥¦¶´W^hsucPGC=:9767788889877:<=<<==<:87348956;:96569987;><;8678986898679<>>?A@<:::77;:65899779;957<=9;ACAEHJJJNPQUXEqÑÕ—†‚|yl€±Å¿ÄÅÂÁÄÅÄÉÄÇÇ·Ÿ…nwpc\agc]SVIAXlZ832146876<9NbYbwqmpgV@/#%7J9(*,-1+*-/21,--)1Pjtzzut€‚vcdmcYXXY`ZP[gjlqlfkghlrx|‚††Šˆƒˆ‹{cg\OMNKHKPU\hifhcS?IRV[QM_ur_Wt˜cQv’‚hkqi`eaZdotvuwte]fsolstjkqmmmmmmmmmof[tŒ“¦²©¬¹¾¼¿¿½¼»º½À¼·¸»¼¼¼¼¹¹½»º¼½¸µ·º¾¼¹º½¿¿½½¿Á¾º»»·¶ºÂº³Å¸„hppklljkhkhhijnj^RQU_lqsnturud]… }fwupquqsssttsqqtsqsutsrqt{wwwdd–ÂÈ¿ÀÅÅÃÅÆÅÂÂÃÄÅ¿ÀÂÃÀ½€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Hv„{{zxwuuwvxsko{}}{}~|€‚€€~||yy{vr•ÃÇÁÇÅÂÅÄ¿½ÆÄÂÃÉËżºÂý¼Âþ¹ÐæíêâÖÊ»¾Â¿º½ÂÁü»º½Äºœ‰ŠŒ“’—ž¢¡ž¢¥¦¨¨£¢©©§¥¤§©¨¤¦¬¬§©¬­¬£¥³Ôá̾¾ÅÂÁ¿»¼½¹º¹¸µ·µ«©¯²µ³¯­°´«¯¯¬«¯²²±±µ®¶¾µ¸Å® º²‰‚‚‡…}ƒŒ~d_o…‰vaSF?<98::8578:::99:=>?@?=:89999:;:8::878996::98=CB>DFFDEJOPUVDsÒÒ”…ˆ|xwm³ÊÉÊÈÂÀÆÌÎÄÅÉ¿¢Š~wtme_^`\SPPCJilJ0740/1577A38AEJNUŸÐÔÂÆÊÉÈÃÈËÈÌÊÅÔÒŽdjcSy½ÔÙàɳÐÐÊÌchqlhhglmlmjgjlkhhkjijkkkjhgijmlkhffhjnomhdgmqppnklnmid[]ZTQG?R_[lwccudUK=)+DY9(./+/,/0130+,.+7Ypuywrt}~rdflcYXY\]XX_cflpicfjgktvt{ˆ‡†„…ˆŠ…fWPSVUUTSZbifflfQFQRR[SK_ur`Xt˜dRw‘fjoi`eaZdotqqurcZcpsnpokorkmmmmmmmmqfZsŽ–§²¨­¼½¹½À½¸»»¼¿½»¼º½¾½¼»¼¿½¸´µº¼»º»½»¸¹¾¾º½ÂÁºº¼»¹´·Àº´Æº‡hppklljkhkhhhiliZRSV_nqprvurve\›xcvvootqpqtvtqorttutqqvustytstae”¾Ä»»¼¹¼¾¿¿¾½¿ÀÅÅÄÄÅÅÄÄ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€M{Š‚~}{|~}~{u|}yxxy{}||‚‚~yw|x{}utŸÑÓÌÎÆ¿ÅÉÇÈÅÇÇÆÅÉÈÃÃÇÇÃÃÆÅÂÃÖçëêäѼ£¯ÄÍÈÉÌÅÁ¿ÂÂÄȼ™†ŠŒ‡Œ‘–—“”šžŸž ŸŸž¢  £¦¥ ›ž¥¦¡¦§¥¨¬£ž¨¾¾ÅÛßȼ¿ÄÄÃÀ¼¾¾¸µ¶·´¸¹±¯¸·´±²´±¬­²²­­±°¬¯¯°««³°«·«’¢¤tv„|xy}†‚shpƒŠzfXJ=;:;=;729:<==<:8>><=?A@<9:<<978;998667768:<<<=@B=CEDDHKJTQAvÔÏ‘…‘ƒ|xjz©¿¹½¼¸·»¾¾¼¾¾º«z}qc]\URTVTNARuh?/7643465318>?HKEKyÀÓ¿½ÂÃÁÅÄÈÍÑÇÀÕÒŽdh`Ou¸ÏÝåÁ¢ÃÊÈÌchqlhhhmnmmjfinnoqofchjlmljhijoonkiiknttqkhjorqrollnmibY[XQOF=W^`wvV\wgKGK85L[=&+..4.-2241*,/,<`uwxupt€ˆ„widhfbWUY[WU_[Z_jolgefjmnou€‰‚…‰ˆ„ƒˆŽi`cccc_[bhkfhqkLHUPO^WLato^Yw˜‹eSw’fhni`eaZdotsrvte\ftsmospklpmmmmmmmmqfYt’›­·©±À»±¸¾¼ÀÁ¼¸»¿¾½¹¼»¸¹¹¸¸»Â¾¶¸º·¶·¿¿¸¸¿Á½ºÄÀ¸¼¿¼º³¶¿ºµÆ»‰hppklljkhkhgghkh\TSS\msrtsooyl`Ÿ{gzypnqutpmpttoqsurpstorootqsvfl–¹½·º¼¹¸º¼¼»»½¿»¾ÀÀ¾¾ÀÀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€K‰ˆ€{ƒ†‰ƒ}…„‚}€„‚„…„€ƒ„‚x{€~{£ÓÖÈÈÊÅÂÌÒÊÆÇÉÈÄÇÊÉÊÌÌËËÍÍÌÊÚæîèÞÓ»“šÄÔÃÉÓÎÈÅÂÍÈÆκ—„Œ“Ž‘’——˜˜–—šœžšž¦¥ ¥¤šœ¡Ÿ¥£§¤Ÿ¥š”¡¹¸ÅßÞŽÂÀÄÆÿ¼½À·±¶¸·¸³®¸³²´³®­±¯µ±­°±®±»¹±«­°© ®¤™›š~q„‚}}|}ˆŽ‡Š‰†‰v]^]B66<:895777:=;78<===?B@<:;<:9:<<98;:67:96779:;<===@AGIEIVJBˆäÎŒ‡ƒ|m}¥°±¯¯²¶¸µ²¹·¸³uxi_`\LISWNOK\nS3852456;=6278;DIIJ\•ÆÊÁÃÉÉÄÆÉÉÎÊÇÚÍ”jncOy·ÌÒÚÁ¬Î˽Ðflnigfiopkkomgorqnmjghjlnmiehlssojimppsurlhlruuvtolmljd[ZYYRDCSUd€rNWqgLDJA?R_>'++-..-,2..0..(::::<=<<<;>@><98:==<=<:869;;88899::;:9>@?ACLUEU¨ìÏ“‡„€~yj¯Â¹¼ÀÁ¿¾À½¼¹®“vmrc`XNJQTNPIJY\E3976/.9=72/128BHJOOu®ÌÆÃÈÄÊÉÈÅÈÄÁÕÒ˜jm`MwµÒÚÓµªËÈÉΟjqtmiemmnpomlknusnmnljonlkgegklllnoomkouxsmmtzuvuolllj`Z]ZRNKPN[pzbJVnnQ?EHLY_A(+**-/003400*&4ayyvwpty{rhdedZWY\ZZ\[VY\ahkhbafgipwupˆ‰†‡†ƒƒŠ€dECY^NDT[WZfh`SDOTV^VQcvqaYs•ŽfUy‘jnrgbdbZapttrusfahprnmmlllijlllllkjnhatŸ­µ®²¼¼º¾¼µ¼»¸»ÀÀ»¸½¸¸½½¹º½»¶µº»¹¸ºÆļ¸»»·¶¾ÁÀº¹Àþ³ºÃ·¯Ã¹…homjmnkjhfhjhjml]USX`krpnrruwb_ƒŒhevxrpuoruupnqvpsqoqrrsrrutqtrimŠžŸž¡¨§¦§©¬­®­¯°°¯®®¯€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€N{‰…†ˆ„„ƒ„„ƒ†‡‚„ƒ†‰ˆƒ„‡………„„„‚…¢ÑÖÆÆÉÊÈÊÌÈÇÍËÉÍÏËÉÍÌÉÆÈÊÊÇÊÚçïêáÖ¿˜ƒœÃËÅÅÑËÌÅÉÅÄ˶šŠ“Ž•–•’•›žœ ¦¤¢¤  ««§¥££¤¦¥¤¤¨© ‘’—•¿áÖÅÇÆËËÆÃÆÇÿ¼¼¿Ä½±¶¼·¼¿µ´ºº²´··³±¯ª¯³¸°¨­«™•‹Š”r„zuy€…‰€œ¯Œ[U[TWQ;-26369::87:=<>B>::>>:;?;<:7898899999999;49?A@?ILCuÐóÀ„€€zj|¯Å¶¼ÁÀ½º¾ÂÅÀ·¤‡miqd\OL[bWIOEMWK95721)(272//--5@BFONVÃÇÀÇÅÊÈÇÄÇÄÃØÓ—gi_My¸ÖÙϱ¨ÉÉÎËhovsrropqsspnmrwwrrsrptqpnmjkmmlptustwyyuniltztwvpkklkg\]WOPNLKbypUFPgpU:;HSbc?'//.,..237..*%Hw|syxnwz|zlcaa`TUYZY[\XWXY]did]bjkfnzxju‚ˆƒ„†…ƒ…ˆsPDKNIJHO^ffglS@KVZ\RPduo_Zt–dSwhlphaba[cprxvuqgafoommnmmomkmmiimmkqi`rŸ¬±®µ½»»À½¹º¸¶¸¼¹¹½¹¼À¾¹·º¼¿½¹¸¼º¸¹ÂÁ¹¸¿À»¼·¹¿¾ºÁµ±¹Ã¸°Â¶€homjmnkjkhhhfhlj]URW`kqostrsucbˆ‰gfxxqorrutqqurlptvutsqqsrttrtriqz}|}{|ƒ„ƒ‚‚ƒ…‡ˆ‹‰ˆ‰‹ŽŽ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€R}’•”‡†„‚„†ƒ„„€„‡‡ˆ‡†‡ˆ†…„†…ƒ|~~ ÎÖÆÆÉÍÌËÊÉÉÇÆËÏÌÊÎÊÈÆÉÍÎÌÉÊÚçðëâ×À•z’Äм´ÆÍÒÉÉÄÅȲ—‹’‘‘’––”˜  šž¤¤¦ª¦£ª¯­ªª©¤žž§¢¢¢˜ŽµÛÒÀÄÊÈÊËÆÃÆÉþ½ÀÇÅ·µ¼¾¿¹´¹½¸³±¯°³µ³°´´²®­´¬˜Š‰„‡wn~su|ƒ€‚†ƒ—°Ž[V]]Z^M2*020668759==>AA><>@@:??=;>?>=>;B=9<8999:;;;;?;8@?;K{µÜëíĆ|zzn~¬À½¿ÃÂÁ¿ÀÂĹ©–nc`^CJjp]QOCGNLD=7.-/3/&)/.+&(6?86@MC_žÈƾÃÊÌÎÌÎÇÀÑÑ’ad^P}»ÐË˶¦ËÍÃϦorzuompwzwvyvnrtwyyqlnqruurmnrsvyvqmqvrwzwpnrywywqllkj\\bYQSL@PwwURQCSgW:.,AkrF(/1-+//21,+*#@wqy}rmp{|~shcVNTXY[YX[^YY]_]\\\[]fmijvzmis„ƒƒ‡Š„€|hI?IPLKSbifglT<@HRZMB_tq`Wq•eTx€imqiaa`\dpqquvriafrrmpsnhkqnkjjkjlnqf`w˜§¶¯²·¶µµ´¸¼»¹¹¼º¸¹Á¾·µº¹µ¶½¾ºµ¸»¹·º¿º¶¾ÃÀ½¿ÁÀ¶±ºÃ²ºÆ¼³Å¸‚homjmnkjfdhjijli]TRW_jqoqqnrxfaƒfcrssuuqopstrrursuurqqruqrssvsgkwwmiimtpqssrmifnmosxyws€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€b¦¿²«£ž£›Ž‹Š„‰“†‡Š‰‘‘Ž‰…„‚€ƒ‚ÇÓÄÈÉÉËÊÆÆÂÄÈÇÅÊËÄËÎÏÍÉÉËÌÌÛèïêàÕ½xŠ¯ÄÉ·£ºÌÆÃÅÊÉ®Ž‰ŒŒŽ”‘”–—–”—ž¡ ¢¨§¦§¢¡ª³¬¨§¤¡ ¡——™–Ž’—”–¾áÒÀÊÐËËËÈÌÎɺ¾ÊÇ¿ÂÀ¹ÂÄÀÂÉÀ´¶¶»¸·¹³­²³···µ³³·°”Œu^`hw|‚‚‚œ©„\X[W_[b_F358<=<==:8=AA=:<9::;;<<=B=9;A=Jq¾çíè뾇ƒ‹|uti{«Â·½ÁÀ»¹»¿º±¢‘~jXMNDXtmWJB@CA===6/062,+)%&&#'5=74^sqaVp•‘cSv~hkohaba[cprvvvpc]ftrklqolnpnjilljjnfc_qŠš«¶¨«µ¹¹¸·»½º´³·ºº¼»º»½¼º¸¸¸½¾·µÀ÷»¾¹µºÁ¾¶º¾½¹»Á¿·²ºÅ»³Æ»…homjmnkjfehkiijg\TRV_jpopqopsaa‡…dfuqquurttppsrmqprpkkosuqqrswsgjyxkhnswwvtuurkelmquwxur€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€a¤¿®£Ÿ ¡“ŽŽŽ‹‹Œ‰‰˜“‹–‘‹Œ‹‡……‡‡ƒ~šÃÑÅÍÌÈËÍÉÇÉÉÈÆÈÎÏËÉÉËÍÊÇÉÎÍÜèïèÞÒºŒrˆ°ÁËÇÄÈÌÈ«ŠŒ‰’‘‘”—˜™›œœ¢§¥¥§¢ §®ª¨£Ÿ¥©£“•””˜™•—–š»ÙÎÂÉÏÊÌÌÆÈÎË¿ÁÇÅÆǾ·ÂÇÅÂÆÄ»·º»¸¶¶³³º¶±±¶º¸´°¬§±®ˆghty}|{€{|€™ŽhU\d_b^`bVA406;99@>;?D@>ADCA@:>A@?>>>>?AB@??<<<<<<<<<;9;9>V…¸Õäâæ껊Šƒuqshy®É¹¼¿¿¼º»½µ«›ˆveWMQVa_L@EK?:668972374020(&'%'29418>=Cq²ÆÀÉÀÃÇÉÎÉÄÖ×—eh`Ms«ÊÑÂœšËÌÆزuq{xrruwz~}yusuyzyyvrsvwwvsnorzyyzwrsvxwutqorw|ztooplfg_ZWYOER|‰iJTVF>eoJ2*,VrM'-2/-0,.100'9m„ko}tipw}‚tj[MUc`USVZYXZXYYWWZ^^Z]bliepvihq~~tt~}z„‡mMHQAWkrrrqmO=EKUaWL^ro_Wq–aPtŒ}eimgbdbZapttqrnb_jutomnmmmjkkllmkkkgb\n‰™¨±¨ª·ºµµ´µ¸¶¶ºÀ½»½½µ·Áļ¸¹¾º¸¸º¼ºµºº¹¶¸¿¾µ²¼À¼»¾¼·±¸Â¸°Æ¼ˆhomjmnkjigiigghf\TQV_jpnlpptwc`„†ejxrpsqpprsrqqrolmqrrsqvqqrtxsfkvuow‚„~wnhffnqtuvuvv€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€[”°¥™–—”‡„‡ˆ‰ˆ…ˆŒ’Š’—†‰Š‡ƒ}†‡‚|{“¹ÇÃÌÊÄÇËÇÅÌÏÊÆÌÑÍÈÅÇÉÊÉÉÉÇÎÝèîçÜи•s‘Ä̽¦‘©ÄÄÃÊÐÌ®Ž‹ŒˆŒ‘ŒŽ“”•–•–šž ¢ ¤¬¨¡£«««¤ž¨­ŸŽ•›•œš¢ÈÞÊ¿ËÓÄÄËÉÈËÉÂÄÆÅÊȺ¸ÈÃÁĽ½ÁÅÀ½»¸µ·¹µ¯³·³µº¹µµµ¦dgy€}uv}{w~„“x[Y_d\`d`_dYA09@;7@?:>>>>>ACA=<=>?@A><>?@<8=A=>>>=<<;;9>?5L¼Ä×äçé㽎}‚xwxgt¦Â¼¹¸º½½º¶´ª–mc^\g]J;8?BA@>?A<9@AA><<=;78?A<:>?;8@8@w²Â¸ÕåÝä縊wrsiw¨Å¼¹½¿º»¼·±§’pdfrdK:3/:C<<37=849<=?DJONHC;2151.01:>Jt«¿¼ÂÌÊÉÆËÈÇÛÒ’agbPt«ÇÒÆ™–ÆÔÂʪvpxsosyzy{zvtw||{|ytvywy}{tswxt|„{pt|z{wppyxxtnkmnld\]\SPo ›uYSYT=3[zb?0!7fa.$86++2.4-&8t˜wmyjiv€ugcfdabbOO\`ZYWVTYYVWZ[\Zhun[bshZhzzff{~|xƒ’†g\gibhplnsoW<>JW]QP`pma[s”cVx‹yfnrjbeb\clovwvnb`hnnnmmnnoommlkjihhnh_wŒ¢¯®³¿¾µµ»¿¼·µ¼Á½··»»»ºº¹¹¹º»¸¸½¾º¸»»¼¹¶º½¹½º½º³¼ÆÁ¶¹Àµ°Å¹jnnigikjilkgehkk]USV\fnnspnruec„hhwrouxoqrssrqqttprxuoqrstsrurgkwq~ž¤œœšš˜ˆm[^imruutuvv€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€S‰ž™ž¡–†€ƒ…ˆˆ…ƒˆ…‡ˆ…ˆŠ‡ƒ€‚„€€€}€›¥®´»ÃÅÆËÅÆÈÊÍÎËÇÈÏÎÈÆÌÌÆÆÕãîç×ǯx–»Ë̯—ªÃ¸¯¸Ä芉Ž‹Œ‹ŒŽ–˜˜ž§¡¢£££¢¢¤­´®£¦ª¤›––™ ¡–’™¢”žËàÎÅÌÈÆÊÍÈÆÉËÁ¿ÇÉÊÊÀ¾ÆÇÄÄÇÉÉÃÄÈÃÀ»±²¼ºº¹·¸º»´´¼«|k…ž«ª¢¨±¡”§~WW_`c_ddccfhaW?58=9<@9=?ABA@@@@@?>=;=:754>@Nzª½¾ÆÌÈÅÅÍÌÆ×Ò’agbPt«ÅÇÁ ™ÁÖÐȨvr}xtv||z{ytrvyz}~wqtzy{~zsrx{|~}vsv{{|xqotz|yytmjjjhf\\VRo—«•nXVXTA6UuiF4%0]]5'/.,./.,*8]‹”vhydgy€yrfadb_``TOWZVWVVWWVVWWW[^fqpbdoi_etyhdx€urw‡ŠtcfddoytoppV>?IW^RO`pma[s”ŽbUx}gkmibe_Xanupprnc`hppnnqnhjqklnonmkjof[u•¦°¬¯»½´³·¼º¹¸º¼ºº¾º¹º¾¾»·¶º¹´·À»²³»¼ºµ¶À½¹¿Â»¶ÁŹ±¹Â·®Ã¹†imnjhjjighgfimlj`UOU_lqmlnqvvdc…†cfwrpuwtonrsrrttrvxsprsqrsrrvsinvpƒ¥¨š–šš–†jX\imsupnrtq€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Ht‚x}‡‚ohmq{ˆ‰‚‚‰ˆ‡‡ˆŠŠˆ…‚…ƒ~~{|‚€„Œ‘˜Ÿ¦²¹ÀÈÉÅÉÓÕÎÈÈÄËÎÈÆËÍËÁËÝðç̸£‰v•ºÉȬ–¹É¾¸¹´°œ‡‰ˆ‰ŒŠ‹‹‘—š˜¥§¥¡¤¨¢ž¥®²®§§§ ™—˜›ž›’‘™¤–¢ÌÞÎÈÍÃÃÉÌÇÄÈËÆÄÉÆÇÈÀ¿ÆËÉÆÇÊÍÉÆÇÄÀÀ½¼¾À¹¹¼º¸¹¶·²»¬v”©³®¤±Ã¨‚}ŽiZfaZcfjjjhijihY9/;>:;:???ABBA@BCDA<<>?==?A?:9;B;785//4;9H{²Á½ÁÏàÚßâ´ˆ€upqgs£¾¹¶º½¹º¼·²œŒ}mojRB8431471,0;@93:CJMSZ`a`][QG@=AB<:Ca®·º¿ÈÆÅÆÎËÅÔÒ’agbPt«ÃÅÇ©›¾ÔËÍ«ytƒ}ytsz{z}|xww~~{|zutwx{~zsu}~{{}{ss|~xqnoruzzvolkkhf_Y[m‹¢¦‹bX[WTH:ImuR:)(Q]=+--+.,0$4Wœ‘rm‚wgp|}yoebcb`aaZTWZZZYW[\XSVXZ[dhigcdmjdalwiat{vpzˆ|d\aeoxvnklU?AHU_SNaqma[r”ŽgUv|ejnd`fbYanvqqvui_dmomlmnmnoijmopnljjaVp‹’¤¯®°¼¾¹¸·µµ¸º»º··º¾¸¶º¼»º¼»º¶¶¼½¶³¸¿½µ¸À¿¶¶¾À··À¼¬°ºÅ¸¬¿·†gloljjjgfghhjmkh]URV_jrrpmkqvge…†dhysorrqqrqpqtwqpuvqtwrnorrrwukkvw‰¤¢˜•˜†xhZZclttmmvyu€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€8[jd`b`UTY^qˆ‰‚‡‰„„ˆ‰‡‡Š†€}€€€€~€ƒˆˆ‹’š§³½ÇÃÆÑÓÍÈËÍÑÑËÈÉÉÆÂÆ×ðå뜂s—¾ËÇ©’³ÈÅÆƼ´ „‰Ž‡‡‘‰Ž“—š›œ§¥ ¥«¢ž«¯°­©§¤—‘•—–”•›¢ š§ÎßÎÅÌÉÅÈÌËÉÈÆÃÅËÈÇÉÂÀÇÊÉËÉÅÇÇËÉÆþ¾Á»·¼À»º¼¸µ³»· ™¨±¶µ¤§¼­‡zsaZbcaehlkjkjklnmS<:B>:??@AAAAABBBFGBBC@;;<><;:;>732.,3>@;gª¾µ·ÁËÝÖÜݱ†wrsgr¡»µ³·º·¹»·®›‡}{u]C6123012..3;?:9BLSTUY]acec^WPNRN@9Ot—©´¾ÄÁÄÉÈËÆÂÖÒ’agbPt«½ÆÇ “ÇàÌΫzv|yrv}}{}{vtw}yzytswx}|ttzy{y{|xuz€ztrppswyzwroqpnf[_—œ §XW`VUM=ACB@?BEIEGJFDB<=AC>97;>=84346;BC?BDB?>BGHDDGGFC?ACC?>Q†°¸³©¥¬ÂÕÏÔÕ©ƒ‚ytugp›´«©®³±³¶²›Œ—•mTN;52/-+,++-37>EHHLPTY^aehkjihdchcVV~¡°¼ÊƸÃÃÅÅËÈÄ×Ò’agbPt«ÊÌ¿š—ÀÎÃɤut~{{rsz{z}|xwz}}~yswzy{~ztu|~ww€spz}}|wokq{yzwrnnkg^x–¥’Š™‘kPW_W\R6'Q†]7"DlT&!.,-))Nr„““wboymjyzohcdaabbacf`Y[YUWZ\ZVVWWX\cZXko``jf`cnh`m{ztx}{}{mkoqojkkkTAEKU^TRbrnaZr’ŒaRu{dkrh`b^Zclqrtwrc]dnsolmmkijspmjijkmrh[sŒ”§³©«¹¿·³´¹¼¹·»½¸¶¼ºº¹¸·¸¹º»»¶µ»¼¶°¹º¶³¶»¼»¹¿½¸ºÁ¼²­µ¿µ­Ãº‡gloljjjgjifceikiZRNT]jqpppptucb„…egtnptrprrrqrrrostqqsttrrtrrushpoz˜¢••Œ‚vso`Ycmnnorvwv€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€3]h^^]XTVU^sƒ…ƒ……†…„„„„„€‚}|€‚}|}ƒ‰„ƒ‡‰…†Šˆ‘–›£°½ÄÅËÊÊËÌËËËÉÃÑíḢ›tœ¿ÆÁ¨—µÊÆÈÎÉ¿¥‡†‹Œ‘Ž“—˜•–š¢œ›¢¢œŸª©¨§¡™˜—Œ“Ž”™•™¢­ÌâÒÆÎÒÌËÍÍÎÐÐÃÁÉËÍÌÂÀÁÅÄÄÄÅÊÊÄÅÄÃÄÁ¾½Çÿ¼»¾ÀÁ¿¿»»ÀÀ»¸¨…dZ`ba`^Z_`bgjropnjlpojpuyqW>;EABBA@@CFGIHGHFA=B@?@DGHG<77=>>HV¦®£¨«¦«¾ÒÍÑÒ¦vrrdk–­¦¥ª¯­°³°ž”žŽX?>-.+(&()((/6:?ILKNRV\`ceilkjjhfig^k–±´¼ÄÀºÄÄÅÄÉÇÆÛÒ’agbPt«Ï͸“•µÆÑÌ¥wu}z{pv||z{ytrw}}y{yuuxzƒ|rqv|z}|wvvyzzuonu|xywromjfg”¨ ŒŠšŽeSY[X`Q/!G‰•rD'>lT$%<63-Afx†–‰peonfmyqfddgecfedghbYZYSVZYZWTVX[^]NRlmWZje`coi^k{€xw|}€‚minohdhhU@DMV\STcrnaZq’ŒbRtŒ}hjjc^ca[clqssvse]guqmiinqnipolkjklmsh\t—«·®°½¿¶´¹¿¹·¶¹»·¶ºº¸·¸¸¸¸¹½Âº´¼Á¹´¼º·µµ·¸´³¿Ã¹³¹»µ¬¶Â¶«¾·‡imnjhjjihhfdfjjgZTSV]hqsronqsdd‡…fiupormnssnlnporttoottmoprrrwuklr}’•—†}sssaV^inprw}|w€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€;dmbb\WZZ\hy…‰‡ƒ„ƒ€ƒˆ†ƒ‰„ƒ†‚~…‚||‚‰„‚†ˆ„†‹‰ŒŠ‰—¥°µ¾ÁÅÉÉËÏÔÆÂÑî⸡˜}v¡ÇÍÅ©—¸ÍÅÂÉÊÀ£‰†ŒŽ‘‘““—“‹›š–—› ¤§©¨§ —˜˜‘•’’‘Ž‘–”›ª¾ÛäÏÆÎÖÍÊËÊËÌËÁÀÉËÌÊÂÅÈËËÊÆÂÅÅÆÈÇÇÊÈÃÂÂÅÅÿ¼¿ÈÆÀ¿Á¿º­›zh[YX\a_[[abjnflknmhjqrnquvzpL7BCB@@ABDECKLIKIB=@?ADEEGJC@DIBA\€©«£ «­§¦¼ÑÌÐФ€|soo`g‘©¤¢¨­«®²®œš˜}P;5)/,((,.,+5<=?INPVXZ]__afjjjlkhig`a–¸»¿¿½ÂÂÆÉÆÆÃÆàÒ’agbPt«ÁÏÄ££³µ½É£vu~}tsyzy{zvtw|}}zuvy{€xpr|y|zv{~zyvrpruxvxwsqqnk…¨›|˜bUYXYcO+@„ŸM+8lP#5P?53_v}‹‘|ninoioxria`db`ccbemdVUUQW\TTUYZVV\XNYur\\hd_cqk]i|€tkov{‡whjqjeikV?DNW[RUcsnaZq’ŒiUq†zjlhb^c_W`nwusvudX`oqljmnkkmjjjjkkkkkdZrŠ’©¹®³½º²µ¹¸¼º··º¹·¸¹µ³·º¹¹º·¹·¶»»¶³»º»¹¶º½º´¸¹³³¼¾¶¯¼É¹§¸±ƒjnnigikjgklhfffd_UPU`lrpinswtacŠdjxssupoooprtttspoqqpppstusquqgkwzy€’œ‘~}yyuaXeqsngkx}z€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€'FmsŽŒwinwmoywkeddc\ZbgfdmbSWXQXZRSWVTVYYVUXim\ZgcXbmi`dtztqpqy…{lisndecT@DKT[QQgwp`Yr…cUvŒ~gjoa]c_X_krvuwrc]eqqnllnonloljjklkjpi]rŠ”©´ª°¹»º·²¶¼·¹¿½·¶¹¸º¸¸½¿»¸¼¼¹´µ»¹¯º½¾¸´»¿¹´Â¿¶·º¼º²¹Á¶®Á´~emmikkijghikgekh[QLQ\iolqoouubdŠ†dhyvttpuooqppqppoqsrqstrprrrsphmyrpŠ¤¢Œ~vu{vb[fokimtxyx€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Aiqefe_[filu€„‚‚‚‚~‚‡‹ˆ…„ƒƒz‚zz„‰‡…„……„‚‰‡‡„„Š‘›¡©±¶»ÀÃÆÔàîç̸¬~w¥ÆÉÀ ˜¼ÌÆÂÄÊÚ‚‹Œ‰ŒŽŽ“•’Œ’˜œœ¢¤¡š™•–œœ™šŽ‘’‘•Œ’¿àÓÆÍÔÏÎÍËÍÐÏÆÈÏÑÐÌÄÁÉÊÊÈÆÄÄÇÆÅÆÈż½ÆÈÂÀÃÆÈÇļÄ©…rprjjgeilf\Q\cehklnnnmoppnlntutwxdJ=AA@BA>=GIHHHEBC@ABCDIMMDOGD_”¶ª”Ž“—Ÿ—˜¹ËÄÉÊ¡~ƒvqmdk†ž¤¡¥ª«¬ª¤£ŠaC;831,''**+/17DGEB?KJIJKFCFFHEBEJKKKKKd´À©‘Œ™¢¦¡˜¿ÍÃÇÍ¥‚~€uqogm‰¡šŸž §¨¤¥™rO@:62.((')./18=?BGNSVXWY[[^beeegljfgigZn¥Â¾¾ÂÃÄÅÇÅÈÂÁ×ÐŽai`Q{±ÎÌÆŸ›ÂËÆЧrv€|xwr{{{~zps{}yzwtvy{€„€uquƒzw{ztsxyyysjjsy}wuumiœŸ“›™†‹ŒoUX[[Yd\0F‚€_:*PjUS|_/BkwzŒˆt`Uakmtupkif_efehhda[TLV\Y]]Z\\YVXVPNW^kmccgaX^ji]Ygu‚}uxtq{…xfflghjT=?HWaUPctn_Ys“ˆcUv~gjoa^faX^ltrtwob^hqoommlkjjoliijkkknh\r‹”©´¯°·¶²µµ´··³¶À½¶·º·¶º¾º¶¶¾¾»¹ºº¸µ·¼¸´»Á½··º·¶·»¿¸¯¹Å¸¬¾´‚emmikjijkifhffkg\RNS]ipnnonpsfeƒˆfjytqrnqpmmsrqsrprrnknqorsontqdhywjt’›…|{vyzdVblomhgpz€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Fovilojcgiku~}‚„‚€€‚ƒ…„ƒ„†‡…„ˆ‡„€}}€ƒƒ†‡†‡ˆ‹‰‡†ˆ‡†‡‹‡‰Œ”›£©®ÆÔÒÁ»¸¤x{©ÃÈÁ –ÀÍÅ¿Áǽ‘‡ˆ†ˆŠ‰‹“—™™— ž˜œ£˜Œ˜–‘Ž‘–˜˜•”•——•–™Ÿ•ÂàÕÊÍÇÊÍËÈÍÓÐÇÈËÊÇŽ¹ÇÈÆÇÌÉÃÄÆÅÈÈÄÆÇÃÅÈÈþÂÅÂƺ’oksplmjkopnotcX\jkiiinoonllnqporvw{ypT<;GFFHAEEGLOJGKLMIGMLJOPJg›­´½¯“”žŸ ¢¡ÄÏÂÈЪƒ|tqohn‰¡™›œ¢¦¦¢_MG;20*,/,,201;AAAFLRRQWZ[[^figdgijhhii]h Ä¼¾ÅÃÂÈËÆƾÎÑ_h`R{¯ÂÄÈ¢‘·Å½Ó«tv}{vrqz}|~xow|yvz{tp|}€{qpx~vsz{utz~{ztkku|zrtshoŒ¢˜‡‡‰rUX]^\kf7E…`>7ZiX[}U5Xxs‚~iVCOW\fliiifbfc``]VRPLJUZWXXVWWTT[[SQW^iibcf[X^in`QUruurntƒ}jfnkhiS?@GT_SNarm_Yt“‰aTt‹|ehma`gcY`mvuvuncbhnqomkiijklkjklljhlg\rŠ•¨³¯®º¾·µ·º»»¶µ·µ²¶¸¹¹»¼¼»»º¸µ¶ºº·µ¹¿½··»»¹¸½¾¼·»Á¹­¹Å¹«½´„emmijkijjgcffhmh[SOT]ipotqmored„‡fiyrprprsqoppqupqrroqrqrturqvsgn}{lj„”„xqq€ƒi[hqrlehryy€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Evpjjjiddit|usz~|z{|}}}}}}~€…||xtyƒ‚ƒ‚‚ƒ‡‹ƒ~€‚€„ƒ……†ˆŒ–­Â²²· uy£¸¿¹›–¸Å½¹½ÄºŽ|…†ƒ†‡‡ˆŠŠ’—™–“Ÿ š™œ’‰’Ž•™˜““––“’–™™‘—ÂàÑÅÉËÍÑÏÊËÍÊÁÆÍÊÇÆÄÄÇÆÅÇÉÅžÄÈ¿ÆÍÃÂÅÆÃÂÈË»£}hnpnqplkljhlsiXXflkmmmnnmkknpilou{xx€pLAFHJKPTSY\Z\bfffffhkjii]b™¾¹¼Ã¾ºÄÊÆÉÆÀÎÊŽfk\JyµÇÅÅ–­Ä½Ô§qxxsuuz|yy{ysw{xu|}vq}|}~{srz~~{ywwwxxutqlnv{xsvqi}œ£žž‡…‡tTUZ\]rp=?y„fFCave`oNGkyvŒnbTAILJNQRV[\]ZPHKMJHPNOWZWY\TVVST[ZRTUYce`dgXUYem]N[z{|{wmlu|qkrmgeUDGJT]SN`ql^Ys”‰aStŠ|dhma^faX_ltyqqod[ampomjihiklkjklkigke\r‹”¨²¬«¸¾¶³µ¸¶¶·¸·¶¸º¹·¹»º¸º½º¶±³¹¸²°¸ºº·µº¼¶´¸º¹¶¾Âµ¬¸Å¸«½µ…emmikjijifcfghlg[SPT]ipolfelsegŠ…eiwrpsrronmmpttopsrqpqoqpqqprofjwzpkz‰‚ys|Š€f\ejoneahoq€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€P|lhkkhigjwztw€}{|}~~~|}}ywy}€„zuyyv|ˆˆ‡‰‹ŒŒ‰ƒ~}}€~€€€‚‚–³¿°°¶žsz§ÂȽ™–¸Å½»ÂË•}…†„†ˆ‡‰ˆŒ‘–—˜——œž›˜–Œˆ’”šŽ’˜š•”™œšž”–ÀáÒÁÆÏÊÌÐÌÈÉËÃÇÌÉÆÆÂÁÇÈËÌÉÅÆȾ¿ÄÅÆÈÊÃÇÊÇÅÈĺ©‹mipppxruslijifocaillnqmmmnopmkirsswww{`BISTURRV\\YZ^UWXWW[XPTRS_bbffZTVelUIa}~}|wrps{ƒwkljhhXILNU_UP`ql^Xs“‰bUuŒ}fin`\a^W_jqsprqc[dsonmlkjjinkhgijjjid[s‹•¨²®¯¹ºµµµ·µ´·¼¹·µ´¹··¶´¶¸·¾¸¯¯¶¸³²º»½»·¹¹²¶»¼¹µ¼¿²­·Ã¸¬¿¶„emmijkijigfhgfidZRPT\gpoqmksub`„„dhwpossmloplorqrqsuqorvtqststqiiuxtow…‡wr‚‘|]\ljprmkpw{€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Nynorkannnv‚‚|z€€‚……ƒ‚„…~€„†‚‚yv„—œ™–•–—–••Œ…ƒ‚ƒŠ†ƒƒ„vƒ¦½±°¹¤t|¬ÊÒÀššÀÌÃÀÇÐƘ€ˆ‰†Š‹ŠŒŽ‘•—˜™œž›œž˜Œ‰˜’“–“‘’‘‘˜›š›œ›•“¼ãÔÂÌÒÉÈÍÌÉÌÏÆÇÈÅÆÉÄÀÂÄÈÇÁÃÇÄ¿ÅÈÈÊÊÅÀÄËËÂÂȼ£“{kmppqprutnlmicnkhhjiimonoprqomqsroquwxzqTAFEAIFGGHKLLOKFGGBENTn‘¯½¼¾Á³©¢§ª¯±ª«ÆØÓÝ㶈|wutkn†œ›”“•š¡“u]YI941,)""(..1;BECBEJMLILPW_cdgighfhlhej`X…¶Á¿ÀÀÄÇÉÅÆ¿ÑÍŒ`h^Ny°ÁÈÇ™Œ¶Ã¸Â¦us~„}oqw|}}{uov||{}yst{z{~vty{xz|srwxyyslnuvw||nm“°ª‘ŠŠ’ŠhTKLQ[ƒI-Okp]TgˆxbQOntrŠˆlacdh[VY\UMNNHEHHJSYXVWVY]\XUXVVVVWZZVXUU]acf][VUbnYI^zƒ€wyysoz†|leejkWGJKUaVOarl]Xr’‡_Rr‰{cfke_c`[dnrnrvob^itrnjijklljijklkhehc[rŒ•¨²¬¯¶¶³²²·µ²±´¶²²¶¹¹¹¶²µººº·¯®·»¸¹·»¾»¹ºº·²º»·±¶½·®¶Á¶®Á·ƒemmikkijbdfjhgkgYRPT[footqoqrceŠ…eiwpmrsnpsqonnlnqsqosxxqqrqpspfpxurvz‚|w‚ŒwWWmoomnrx}~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dwƒqnqmhlpor}ƒz…†‚ƒˆ‰†ˆ‡…†ˆˆ‰ˆ†‚||ˆ–›ŸŸ›œ˜Ž†„„ƒ…ˆ‰‰‰‡„„‡‰~ƒ¢»­©²Ÿy{¥Àȸ˜ ÄÎÿÄ˾‚‹Œ‰ŒŽ’’’“–šŸ™’–ž—ˆ…‹–š‡‘œ”›žœ™˜—Ÿš”¸ÞλÊÑÉÅÃÁÅÉÇÅÆÈÂÀÄÄÄÆÅǽÇÍÅ¿ÃÃÄÊËÈÈÏÐÏÊú¢‡mglokntrsmlopljkloljoomoqrrqooqttjilmtzy~~hMGHFFFJJJMMKKQNIKOKRho{¡ÆÅÁű¤¥¦Ÿ¦·±©ÂÖÔßã´…zƒyvrgh“–•Œšž†jZZH50)&.'%,2.1Xe^_bflnihmbV~¼ÇÁÄ¿ÁÅÆÂÇÈÆØËllVKw¢¿ÇÀŠƒ²¾¼Ì¦sy‚~xuq{~{}}wqrz||}xpqyzz{xrpt{}}ztsuzv{{qjpwut€{j}Ÿ¬²ª—ˆ¢›|rg_[VfŽŠV.''4Seafpvdctmoˆ‚f`cWRTLScdSKUYPSUVWYZWTZZZ[[[[[[VTUW\ZRPTTV_d_YUWPWigOPk€|~|xqqv}}qcagT?AHS\RQ_on`]r‰„cSt‹zdhlg_b]V_ksvture]ervpkiijkliijjiiijjf]t–©³®­µ¸¶·µ¶µ®®µº½»´µ¶³¶¼·²¶Á½¶µ¹¶°®´ºº³´¾¿·´ÀÀ³°¾À²³¸Àµ­À³{`ikhkkhiefdijfggWRTRYippnooto^gŠ‚dftqoppoonoruroqqppqttqoooprxsgjyz{|vtwuqqwq`\hsqoqv~‚„€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Cnymmnopti^l„ƒz€„††ƒ‚ƒ„……††………†‡ˆ‹ˆ™•š—“”–£¦¤™››˜–‡†‡‚|{|€‘››¡œ‡nqvyƒƒ’®½¿¿Å;|†‡‡ˆˆ‡Ž‘•™š˜•˜žšŠ„‹Ž”•‘ŽŒ‡Ž–œ ž˜š˜š¾ÞÐÀÃÎÇÇÊÊËÍʽ»ÄÑÛäæßÑü¾ÀÂÅÄÀÁ¼¶¸¾ÁÂÀ¥Ž€rfdekpqmknpqooooppqqsphchopoomkknprrnoqppqtv|z}{fMDHKJHDBDKPLJMRSWh|rtytqŠ­º¬¦£¯½¸°µÎÞ×Ú廃{‚{rpde…›Ÿ”‘˜š‰seXK<30..0//**3=CFKFFGHKLIFNSH?L\[`efgkgdkdU†ÃǿľÃÅÅÁÆÆÆÙĉgjXO}§½Ê½ÁÍÁŤsu}{wrq{~{}}vps{zwxuoouvx|{rouuvy{xtuyyzysmpvwu}tj†©«§ ‘ƒž‹oincb`n–“[2$',8QdmvwZi{j{”ya\[JFSZ[caTQWWPQWWUWXX[VXYXWVX[_WTWYXTLQTV[aa\ZWZQUgkSNgyy{{wsszzh`eQ;>FS\SQanla]qˆ†bRuŒ{dhjc^c`Z`jousvrd[cntnijkihhlighkljghcZq‹•¨²¢«ºº´´·»µ±´¹ºº·³¶¹¸¹½¸³¶¹¹´´»·³¸º¹¸º¼¼¸´¸¹¹²°¸»´±¶¾³¬¿²{`ikhkkhifechkghe]QPS\imnmlkrq`e„‡hjwtopppppoprssprusoqsptrrootsjg{~{|zxtxtu{xe^hppnlr}„ƒ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€@pzjkqpkti_m‡‡zy€„„„„…„…‚„…‡††…„ŠŠ‹Š‚‹œ˜”™“’•’š¦ª¢›™›”‰……||y}ŠŒ‘xz}ux€~‚˜¨¯µ¿Ç³…„ˆŠ‰‰‡‡•—’–ššœœ›˜‹„‹‘“‘’ŽŠŽ˜ž¡¡”•™½ÜÐÁÄËÅÇÌÊÊÌÍÄÃÈËËÕãéàϼºÁÅÃÂƼÂÈÁ¾¾µ˜zgghmojopnkjklklprrpopsuslcdlpnmpqnlnqsnpppopsuptvyxeMDFGHIJKLMJGINRWdrwrz~to}‹žª®²¸´²½ÏÞØÝ纄~‚{sqef‡œ˜—–’‡{ttbJ<;630'-/*+9@@DIINPJD=515BMLHRaed_bkgdm\R˹ÂÂÅÄÃÀÅÄÄÙÊgiVP~©¾È·y~»À¯Ã¦vw~}ytq{~z||upu}|wuqnrqpptslktwvxzxwx||zwslnv{{wqq²­š‘…z‰’yeiqcehp”—i9"&..>Yo|q\ntpŽ”sa\WHJ]hcf`WTWTPU[XW^]UTXXZZURU[UQQX\\VRRSW`gaZXWZQSdoYMc~‚xvy|ywtu|zlbeR<=FS]SPaom_\q‰†aRuŽ|egic]a]Wamttosr`^ktrljmomklmjfhkmifmg^t—ªµ©­·¹³³³¶µ´·¼º´´·º·³µ·´±µ»¼µ¶½·®´¾º²±¸½»·¹¼¸´·¼»·¯¸Ã¶«½³€dmmijieeggbeffig[LKR^koqqnkstdi‡‚dfurpqrrrppqrpnmnrsortrrsrpottlly{€yvzwuw}vb[fptspvƒˆ…€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Eu€qswtmyn^e€‡}zƒ‚„„‚…„„„††‡ˆ…ˆ‰„„˜–“¡˜—›•–£¢Ÿœ˜”š›“‰†{yrszzv|†…ƒ‡}z‰€„Œ“ž¯¸§…ƒ‡‰‰‡‡‡†‘•Ž”™–™›š—•Œ†ˆ’Œ‘–ž¢”“•˜»ÜÒÄÆÉÃÆÌÌÊÍÍÆÆÊÆÀÇÜéçÝǹ¾ÁÁÆÆÈËû¾­†ibfjimrlonnppooportronqtoqpg^epppsspnopmopqpppsusryysk_PMLNRSPMMKLQUYdpurtyywqh‚™®¸¸°²¾ÒÜ×áé·‚‚{sqegˆž•›—„xx~jNAC?<4$#&'0@FDHLLPMB;96999ESPP`h`VWZUT_RO–ÌÀ¸ÁÂÅÃÃÂÆÃÂØÏggVQ‚­ºÁ³~‡º¸«Á¤vw~ztq{}z|{toovvsqjejqonpoggpuvwwursuy|{rjnwz~zuw“®¦™–†{”–i]xuehll‚tB"!/13Ksriqxhzšˆi`]WMS`caicVW]\VY\WU]\TR[WX\\VX_YXY[\XROTQS]d_YZUXQRanXG\|…}zyyuwusuyqecV>?GU^SO`qn]Ys‹…aRu|dgic\a]Xantqmtq`[flojghkklmljjijiignh]rŠ“¦±°§®¹º³°´º·¶¸¹¶´µ·²²º½´°³µ¶±³Àº¯±¾¿³¨°½½¶³¿À¶¶¾»®®·Âµ«½³€dmmijieeegeeecih\VVSWejipposp`j†ghurmnnmnpopqrqpmorsstrssurqtrhnz{}xy}{vuwq^[houvu{…ˆƒ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Fq|tuupmrl_gƒŠ|sƒ}‚€‡†„…†ˆŠŠˆ‡††‚ˆ’’”ž¡”‘–•–’”ž ™’•Ÿ”ƒwtxqnjbi~ˆ”ƒ€‹}z~}}‡— –„€ƒ†…„…ˆŠ’”––‘“––•–”‰Š†‰Ž‹‰‡‘–”” ¥˜‰”¡®ÎãÓÃÈËÆÇÌÍÏÎËÀÃÉÉÃÉÙãë߸œ¡¨£¤Ÿ¡¤ª®¡acfjkfhkjhgjnolmpqrssqppqrqrhX\lrkqsqnoqqpqqqppru{vuz|vpqmdZVVVQMLKQWWWduuuursuuqtwˆ¤´¸ºÀÑÛ×âéµ€„{srfh‰Ÿ™ “yr‡~lVJIED>/%(.9FIHJSSSK>9<>?QMDNSU`h\PIB>DOLQ™ÉÀ¾À¿ÄÂÃÄÈÂÀÕÍŒdfWT„¯¯Å¹{„¾º§¾Ÿpv~}wrr|}z{{tntvsqrkfkonnqpihoquxzwttuxy{ulmuzyypt—¤‘—€‚—zZr‰ylmnfozsN'*2/Coto}zo“£z[VSNMYb`af_WY\ZYYZURVXTUWPOVZWUVUWY]]VNMVRS\d_YZZ[SQ]lYFZy„|yxutwsszug`W?>GU_SN`ro[XsŒ…bRt{dhjc^c`Yajonqurf`foonlklmmmjklkhghikdXm„ «¦¤±»¸²±¸¹¹·µ¸¸µ´µ°µÁ¸³·¸¶¬­¸¹°®º½·±¶¹´²ªµ¾¶¯¼Á³®´¼²«¿²{`ikhkkhi`gghedgeYPPRZhmnonjpn`h‹„fhwtrssnrsplmorsrstsppstuutqsnbk~~y{||{ztrwsc]iptvw|„„€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gq{qturoqi\e‰}x}}‚„ƒ†‚‚ƒ…‡ˆ‡…‡‡Š…|€Œ‹‰‘•Œ†‰Œ’“—“’Ÿ›–†wt}ofaZ_wŽ‹ƒˆ‡vozxx{…‹…€„„‚†‹‘““”••”†€€ƒ…ˆŒ‹•’ žŽ‡•¦¶ÓåÒÄÉËÊÍÍËÍÍÇÂÂÅÿÉÜèðÝ¢t{„ztuj|¡¢y_dgeacijkoljknpnmnlnprrqppwrqm_Yeskorpnoruqrsrqqsuy€wsƒ„upwrmjgaWOKINXWS^qvpswuuzsmq|‘²Â·ÎÜÙà浂zsrgiŠ œ£•yt…‰{ndXLDEFB76=FIHHGORYXNGDADZYHDITdfZQG>HX\HY£ÉÁÀ½¾ÂÀÃÇ˽ÓÍfiYQ|£«É½y‚ÃƷßpy„€yur|}y{zsmsupllifkkkiihfipmosvsptzzwwvnkszxwnu“•‚‡Ž|wv\i—•vqqldfmmZ1%-0=Xqy{u„¥›lVPMNR\ddge\VYVRT[XTU[[QFEDGNWZYWPPT\`VMNTSV^c]VX_]TPYj\JXu€}yxwtxtt|xiaT;;DU_SNbqm\Yq‹‡cSt‹zchlg_b]V_lsuuul^Ybkjnoljkkikllkhgghng[p‡‘¥°¦ª¶·³±°²³´µ¹»¶µ»»±±¸º´´¹¼·­ª³¹´­¶¹¶¶»¸³µ¶¹¾·°»Â¶®´¼²¬À³|`ikhkkhiafffffhc]MJR]ilnrnjqtekŠ„eguqmnnpqqpqqpnqtsrsppvqpqqsvqdlz||zz|qnqxud`mxyz|€…†…€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€BoximutmldZh„‰|y€†…‚„ƒ€ƒ…ƒ‚‚„‡‰‰‡ˆŠ‰ƒ~‚‹‡‚ˆ––Š‹‰ŽŠ‰ŽŽ‘šœœš‰yzud[\Y\r‹¢•}Œ€xruxx~„}„‚„„„‰‘•–’“Œ‘Ž””‹…€‚…ƒˆ‰„œ‹–›ž¾ÝÖÉÊÈÍÒÎÆÆÈžÁÇÅ¿ÇÚæïâ¥nq€{…•vmnfedagoidnkiimswvsrtutqpprtoqvn_arvrnmoponrssrqqsv|qiowz|ssssrngaXPS_`Zbsyvv{}xv{~rsts­³ËÞÛÝ㶂zsrgiŠ œ¦›v„†ysrdN@AINFAHOJHJHMOVYUTTRKPROICJZj\SF@VkeEa­ÌÀ¾»ÀÀ¿ÄÉͼÑŇdj[RzžºÅ´y|ª°·Â›kw„~wsr|}y{zsmltrljgehklkihefkmmszvoqz{yvpknux~zy}‡†ƒ“™”sfu”¥‹osqjefhib;'$(17>X{yu“ „^XTU[^^_^de[SY]XVWVX[\[RGINRTWZYUWRQ[`UMRQRV\^WRT[YROXi[IRp€ƒƒ|wuwytt}yjeQ88BT_TOdpk]Zp‰‰dStŠycima\a_Ybmssqwr^X`bjppjhlnlllkjihggng[p‰“¨´¨«··¯­¯¶¸±²½À²®»Àµ±·¹¶³²¹·²°·½¶«¶¿»³º½µ¯²ºº±³ÃŶ­·Â·¬¿µ‚dmmijieeegbbdgidXQQQXgmmklmsq`g‰„fhvspqqpqrppqqqtuomttoqsqporvpbf|{}~|wrsw|vgk{xwz~ƒ…€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gqz|wolscXd}ˆ€vƒˆƒ€€‚|„†‰Š„„‡ˆ…‚ƒˆ„‚ˆ“™—”–“‹ŽŒˆ‰Ž‘“Ÿjqpb[agit‚›£‘|†“ˆxkrv{€{xƒ…†‡†…„Ž•–”•–’‘–˜’”™˜ˆ€ƒ‚€‚‚™’Š‹•¸áÚÄÊÆÈÌÍÊÇÊоÂÇÈÍÚáëݦ|‡“Ž£§’tlqqojgfhlmiellmprsqostusomorvvqqskgounmsrqvqqpruursvkRFQ`ntrrw{{xtqokklrtjfousrrstrp~urttwˆœÌÝÜâæ´€ƒ|roce†œ¥›œ‰u|wbgk`LCFGENOIHPPIFJLS[]Z[^TPVUKFJXhZ@5.4RbQi³Ò¿¾Á¼»ÇÊÃƾ´ÅȆcg\Ty¬¶Æ¬|²µ¹Ê p{‚zwvr|}y{ztnorpjfdeikjhmoc`mpstsrsuvuwxsmqy|yyqyƒ…’¢²¥„Š£†vy{mjibolE)%)/3-W~~Ž¥’tZWUYYV^gcabWQ\]WXZZXW[[WVYXY\]YVVXQR]`UMLRQW^]YVTZXPMXcZIRm~‚|vzzwuuw}xgN>9BT[QO`pm\Xq‹ŠcRn‹€ehlf^a]YblprqspaWaoprjfoqllllmmlkjipc[qŒ–¤²°¬µ¹³­²¿¸¯±º¹±°³¶°®¶½ºµ¶±µ°°½½²­»¿µ®·¿º«¯¼»®±Áø¶Áų«¾¯zflieijhhgfeihgjd]TPSZenorolpo`i‹‚djwppurssplnrrnqooqrqprqrurosskn~„ƒ„|~€{{€~uty„}{~„†‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Crƒ|zurssdYcz…{z„‚€€€€‚„…†‡ˆ‡†ŠŠ‡ƒ‚…ˆˆ„†Š“•—™•Ž‹‘ŒˆˆŠŽŽyfgm`Zbimv‚Žœ˜Ž}optuy€{}€‚ƒ„…„„‰”‘’—–’’••–œš‘‰…„ŠŒ††ˆ“ŽŒ‡Š•œ·ÜÚÇÈÃÆÌÏÍÈÉÍż¼ÃÇÏ×Úïá©‚“Œ–ª™rure^fhijihghqmnsuropklmoqrssqxtossnmqroqqqtsututqwvhQE@DIU_air|€}zyzqtuyzqlr{yusrrrrpptxxvw{¬ßÝÖå¸}„„|sqdf†¡ Ÿ‡og``caUF????OUQLQRIAIKQY]^afb]_YLDER\TD?98GNQwºÌ½Âļ¾ÅľÆºÌɈdg[Sxª¿Ì®~ˆ»º¹ÃŸqx|vtyywxvqonqpnlgcciliijeclnnquxvspwxytlmrtxuo}›¬­¤¢¡•£„m„…mlnalqR-!*/18g‹Š›¨]SUV[]Z`d_ccVQ[]UU[XRRWYVTZ[ZYZ\[WVRV^]TNOQR\e`WUXRWSOZf[FQm}€ƒxuyvutu|zoT?9BS[SP^pn^WoŠŠfTl„ybimc\`^Zdotusto_Xcrolmmgghfhijkkkjij_XoŠ•¥´®¨¯µ±­®¸¶¯±¹¸³²´·³¯±···º¶º±­¸¼´°´º¶¶¹¹¸´·¿Á¶±¾Ä¸¬¸¿¯ª¿³bjkillfeacehfdjg[SORXdmmspkom`j„fkwonroopqprtsoppnotsqssqqppushp}}‚€„€€€~{zxxw€‚€ƒ‡„€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dmytyyrood\g}†yƒ€ƒ€…ƒ†ˆˆ‰ŠŠˆ†ƒ€ƒ‡„‡‰ˆ‰‹”‡„ˆ‹Š‰‹‰ŽŠ„~qgjk^Zcjlw‡’–˜“†qcaovyy||}€€ƒ„†‡ˆ•‘Ž”™••’““’™•“’ŽŠ‹Šƒ…†‹Ž’’Ž—œµÙØÇÉÄÆËÎÌÈÈÌÀ¿ÄÇÂÅÓÝíæ­zˆ‚‰‹{migcbbhmkebejljlqsqprusqprsrqtusswrigouomrststpwxttjRFEFCAHUZ[cpy~}~~vz{zztpqtttsstvxsuwwzytn‚°Õç峄„}usfg‡™œœz[Z_ddWLEA@GNOYYQTWNAJLRX]_ejfa`WKDERZUIEC?CCWŽÄÅ»ÆƼÄÇþǸÈÊŠfh[Sw¦¾Ê¬{„¸¶³Âžqw|uswyz|uoonomjifegklhfhfisnpuywstyxxwpiluxyq‚“—««¡©­™–…htˆ~jjkftb5 /-.I‡—•Ÿ—gQVXY]]\ac^b`TP[]VUSVWVVTSU][XVY]]YTTY^ZRORTRZbZMMTYXPISb^MSmzz‚|vyxwtty{vZ?9DQ[WP`ol\YqŠ‡cUo…ychie^b^Yblqqpsn`Xanpptpgjneffhijjjika[q‰’¢³«¨²·±ª¬·¼µµ¹¸´´µ¸°¯ºÂ¸­¬¼º±¯¸µ­¯´µ±´»»¹µ·¼¾´®¹¾³³¿Ã°¨»¬wekjfjjhhfghjfdheZRORYdmmonlrqah‰divoorpoonnnooopqqpqrrrmnrsuwpbm|{‚ƒ‡‡‡…‚wy~€ƒ†‰Š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gmtoy{sjsg[d|…~z„‚…‡€…„„‚†ŠŒ‡…ˆ†{}‡ˆ‡†…ƒƒ†‰‰…ƒ„„‚„‰…ŽŽ†|roslbcnlhx“—†”±žgQakswyz||}€€€‚…ˆŠˆ‘Ž”•”‘‘“——–•—™’‡‚ƒ„„Š•œœ–”¶ÜÕÂÍÊÉÉÊÈÅÇÌÉÄÅÊÈÌÖÜåæ±m[htŠ~mbcec_ahmkdadikoqqqrsrqqpqrsssrtomrtkbmtpnpqsusmrzscTHFGGFCFMSSV`nz~~}z~yvvtsknqttvy{}zuux|yuov§å沇~„~vuhiˆ™˜—j=RkcVHELHEO]MWXSUZSFJNSX[_bf^\]ULEBM^YOLMKIHg§ÌÁ»ÅüÀÅÅÂÉ¿²ÃÊŒhh[Sv£·È¬x}±±°Æn{zvtqx}€€zplqsrojdchljimkcbkroquvsv{yzztmnux}xq„”Š‘ª®¢§®¡›šh_y‹vikgrm?$1(/c¡›˜—wUW\^][YZacaa_TQY[TTWWUVWUUX\VRUXZYYTVZ]XQPRRS]g`TQU_WOLS\ZPNhz‚ƒ{sy|{wtw{{]<7EQ\XOdngZ\vŠeZs…wdkje_b^W_iorrsoaZcompnjjnlggghjjjiine_tŠ’£´§§³¹±ª¬¸»µ³µ³²³´¸±°¸º°­´½º±²ºµ¬°À¿³±»Â¾±¶½º¯²¾¾´´¿Ä²ª½¯zgliehjhidbafeegb[SPT[fnollmusbf†ƒgjuomomoooponpsrnpqosuointvuunbp„Œ‡‡‡†‰’ŠŽŽŒ‹„xy}ƒšª¡€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€GtwwwtqviZc~‡}w}…„†ƒ~…„…‹ŒŠˆŒ’Œ€ˆŒ†„†‡ƒ‚„„„…††„„…†‹Œˆ‚xqpiep}sbuš‘r‰»©gSmlmrvz{zw}}}}‚…‡Œ”™—–••“‘“—˜Ÿš˜œ™ˆŒŠ”œ –•¹ßÒ¿ÌÍÊÉÈÆÃÅÈþÁÄÁÅÐÖãä±bD]~Œ…jYajhdcehjifcdfknoopsutpprtvuroqqopvsh\irtsolswurprlVINKIHIHDHQVV]jw{~€€xux{xrrrrsuxzzurtxwusrkr§×¶ƒ€ƒ}wuiiˆœš™c,>\WB7AQKAJXLQRORVRFEKRW[^`ac`[NFA>J[ZVU[ZQN»Ð¿¾À½¼¸¿ÃÂǼ³ÉÉihZSv¡ºË®x€´´°Ä›nz‚zusoz}{rjmopnkddjigilja`gllouuqrw{|{umovy|zt…’…¬©–¢²©¡œnid|“yajrqH(*$?„©œ’€_R]^^^ZW\acadaXSWTPQWWWWXUVZ_YVZ^^\[VVZ\XQNPPS[]WRTWZSOQV\YOKcz‚…ytuz}yvw|€_;5DS\WMdmgZ]v‰bYu†xejh`[`\V^jqqqtoc\dmokegnjgljjkkkiihjb]sŠ”¦¸¨¤®¶³¬¬´¶±²··¶··³µ¶¶µ±¯²»¾´¯¹»´±½Àµ¯·¿¼®´½º®²ÁÁ´¯»Á°ª¿²~cjkilkgegedhggicZRPT\fonnmlqq`gˆƒhjtpmnnqppqomorllqrmqwsoprpptqfk‡•““”–š–‘”—––—Š‚||–ÇÙÁ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Fxˆ|wutsuiZd‚‰|u~„|‚……‚ƒƒ‚†ˆŠŒŠ‰‰ˆ†„ƒ‡…‡ƒ€ƒƒ€~€ƒ…„‚‚‡Š†}rkj``q‚u`o“”~…“yZ`unntxtqtxwwxyz|~†“˜™–˜š•Ž˜œ›–˜›““‹•–•š›ž»ÜÓ¿ÇËÉÈÉÈÄÃÄÄÅËÍÄÅÔáêêªVQ†œ‹i^]dghkkhhiihfedljikortupooruvspvqovysf]jppsrpsrqxrmk]OSPONOLFIUVX`koqw{|{wrw|zyupprvxyssuwvsqrqwiw¦§…}|vvih†š•˜˜i/-FXH8?QIAKWMNNMPVQB?GPVZ^`_he\KEB?KX[[YbdYX•ÈÏ¿À½º½½ÂÂÀø²ÌÇjh[Uv ¼Ê©u»·­¼šnw~zrp{{wxyrlrqoolfbfgiihfgimimtwsnqw|{yqjmvz{}v…“…±¤‡´¥¢¡€`j•ŒlhrpQ- *^¢ª›}`QU]aX\YY`d`]c_XUUSQSSVXZXST[YYWWZ]ZUWVY]YPLMLV`_USTTTPOPT]^RLbw{xowywvw|„gA5BT\TM`nj\[r‡€\Vt‰zdhea\a\U\homorob]birlhkljinkkkljjhgja[q‰’£´©¦±¸³­­·°¬°¸¸´²±·µ³µ¶²¯±»»²°º¹®ª½¿¶³¹¾º¯¶¾¿³°Áŵ±¼Á¯§º¬wdkjgjkggefghdaeaWQOSZfmmmmkrqag‡€ghtqoprsrrrpnoqortsstvxspnnrwrdi…‘Œ‰‹Œ‘’•”“—ž›”ˆƒzŒ½æèÚ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Hy‰}{zuptjYaˆ}}„ƒ„„€‚‡†„ƒ…†…†ˆ‹ˆƒ€‚…„ƒ‚ƒ€€‚…}{}}}ƒ†skjjbbo~ykv•’“~WUrƒ{rtwqkpwstvwxxxxzƒŽ‘‘•™—‘”—“ŒŽ•˜–š—“›Ÿ—ŽŠ‡‘›™“œ ¡¶×ÔÂÅÉÆÅÇÈÅÅÆÈÃÆÊÉÍØÞìë¥`x§˜rc__`bjlcijjjigedqomlmosvwtrrtvvtttrosyrdhqoovurnotojpm_URSPMMHJTTV`ikhmwssxxtw|yxsoossoiepzzutvwqtrp~Ž‹|€zuuig…˜‘‘—o50LaT=CVTOUXNQSRWb[DBIPSW\__bd^PMG>F[^^Ydj`e¢Î̾Á½¼¾ÅÆÄÂǺ²ÊÅŒjh[Vx ·Æ¥qº¶«º˜nw~zrszzvxwqoopqsridgjkmkhgiioqsutstv{{zrjkruuyr”Œ—º¤‚˜¯£¥¤“ˆkcxŒˆmml]6>®¥ˆdMP[_bVZVVae`^a\UTWVTVYVSUYVUZYZYXZ[ZVXVY_ZNHJNV]YRTZZRMORT]^TK`v~‚}tpvvrqqw‚vM7@SZQN_nl][p†€`Vq„ubjle`d^U\gntsrk`]gpnllkijkihhijjihgnbZp‰’¡°¬ª¶»²ª­¹´®²»º³°±±¯²»¼²¯¶»µ¬±¾·©¨¿¾µ²¸¹µ¬³¹¹¯¬¼À±¯»À®§º¬xglidhjijefgjgejgWPOS[fmmlllsraf…‚jjtrooqlmoppnnorqmntsoprpoquyrbl…‰ŠŽŽ‘•–”˜™Ž‰~ªáèÛÞ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€J}‚€ytng[dˆ…†}}„…ƒ€‚ƒ…†…‡ŠŒ‡ƒƒƒ‚ƒ……†…ƒ€€‚„€€€€€€~vppojqmr~}‡™ŠŠ–†ZVžuiqtrpnstvxxxvuz‚Š‹•˜’’•˜—“–™—™Ÿš’–š˜•Œ…‡”™•”¦£›°ÓÔÄÈÉÅÂÃÅÆÈÌÂÃÊÌÅÈ×ãçä¨~™œtbcXX^`fmkiklkhfeflppiflsvutrqqqrsrrstxzpaautpuspqumdfqvjZWXRMOOOTUV_jlhipqqy~{||wspstm]ONg{{vwxu}lgnuƒŠƒ~ztuhg„—œ‘šr4=`cM8Daf`ZNNTYX`phKHOSRTZ]]ih\IFC>I\ac]hodj¨ÐʼÁ¿¿¿ÂÂÂÆÏöËÄ‹jh[Wy ³Æ©s}¸·¯À˜lzytquy{|}xqqmmllh`_fjggieadikqvuqpsvxxvpimw{y|q}’Œ–µ›}Ž¤£¤›ŒŠ‹t^`‚˜|fhe@!P˜®™oSPXcb\XZSQ]dbbc\TTWWTUVVVYZVU[\ZY[[YXYXUZ`ZKEJRW[XRUWTWMLRTX\XPbrv{„‚wuxtnkkq}W:>SYOPani\\s†}aVr‡xckoc_d_W_ksrqqj^\fnmjjlljiidefhhhhhnaYq˜¦´¯ª²·±ªª³¶­°¹¸²±µµ°¯·¾¸±±»µª®¾½³±¸½¶²¶¼¾¹³¹´¬±¾½³°¼Á°©½¯{ckkhkkgfigfigfhaXQPU]honomjon_g‰€hitsqqtppnmnopqmprrolntrqqqrwsglƒŒ‡Œ’–”•™œš“ŒŽÄïæÒÕ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€J}{€€}tgY`|Œ„yƒ€|€ƒ‚€„‚€€†…ƒ…‡ƒ|}„„~€~|~~€€~‚|}|ulkmknkrzq}ˆxˆ‹ww‘°fjpmsloqrqrvwttx|‚…Ž”’–™—“•–•˜–—˜˜š  ™‹–š˜˜¢£¸ÚÕÆÄÈÄÄÇÈÅÅÆ»ÊÓÁÀÙèì๕xYV]YWZ\^bggilmhcejnqtpijrwsssrqprupusswyseamtnqvojchnopqpoebgfZWYS_WS`njcgqsutuy{yvnnxv_NOJTdtytrvwoqsoxƒƒ{wtvnj›”›ƒ?+HOB?QljTHINS[[e€wI>MQQVUV`feYKCEa_MGNNNOKHFIQVRT]\GBB42CLSfhcfkamŸÉżºº¹½ÆÅ¿¾ÇÁ¸ÆÄe_DEy¦¯º¡r»¾µ¸”djtolnoqsttrpnrnkklfdglmljiffjnoruspszxtspifjnnji}–”¤¨„kƒ•™­—`mŸ”hfqom{…gg›škW][[]_^\ZXZZZ^^`d\URUYXVV][WVYWVY[\^_^ZUPVYXZ^RDDPZ[URQNKLMMRUWZUO\lv{~€€}stuqv|uyƒuQJXVPcwn[Yqˆw[Ur†ybfgc`aZU`jrnkonb]ekjlmlkijjjiiiihhglaXp…©·ª¦³¹°ª®¹µ±³¸·¯¯´³°¯¶¼³¯·¾»°®¸³¨©¸·±¯¸·°°·¼¶«±Á½«­Àí©¿­xbjjfghfgkeceimj`VPPT\fmlmihpo\fŒ‚fjwplqwpqommqrqmpssqoopnnqqryugmŸš˜œ ¡¦¢¢­®œ¯åòèæëíãÛØ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Glww„ˆƒzp\a}ˆ€€€ƒƒ€€~}~}ƒ…†‡…ƒ€€€~€‚‚~~~|~{{~||sotuliZW`_blkm‚Ÿ³¡„’®ª”yjlmnpprqpssmlqu{„‰‡ˆˆ‡‰‹‰–”ŒŽ“”’•–’‘’“‘“˜–•°ÇÆËÚÖÌÈÃÃÅÇÄÀÂÈÄÈËÉÄÇÔßÕskc]\YY[\^dh`TY]adiopmqolpskbdltzxwxvqpnouvxwi^dmjhpssvyyvuwxwrvwuu{xn`[[^[]lznxqfo{xqnwywwzzy}}xvwuruuvupqvywwtqfbuŠ‘‰†Ža0;G?SnbHTxvVFNQIEFEFKRROPVRBA=))>JT[bhce_j—Èɾ½À½»ÂÃÁÃƼ¶ÊÄ‚hdCCx¡±¹¡z‚²°¬µ”dkuohjjnquurnlppomjedikkjhgdejoopstppwqqpjcfkkmlhx—¦ xk‡“–°˜[m¥›phsuiowo€–~^X_Z[\\``XVWX]cece_XRUYYVXYWWYXXY[ZYY[\\XTWWV]`PAEU]\SRURLGKOUWWZTO\jrv{ƒ€rrywx|zz„aPVSO^qiZZsŠ|\Vq„xcefa_b\Xajpplnl_\emnnkjijhfiiiihhggoc[s…Ž§²«§³¹°ª¬³´®®¶·±¯³·²±ºÀµ­´¼º««º¶§¨º½³®¹¿¶­¯»º­¯¾¾°¨¿Æ°«¿«sckkghighfeffehgaWQPV]hnmjmlnl`j‰|hnwonqqppoopqpmrtsqopruqprqsytdtŸž¤¢Ÿ¥žŸ®¯£¸èëéçêëçÞÖ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Hpzu~}yxq`_v…‚}ƒ}}†…~|€€‚ƒ‚‚‡…‚~~~~‚„†„~||~~~~~|{z{{vmiiklZT[knhv—¤®¯š“¦©œ˜xhmklrnnnnqrmlrvzƒˆ…ƒ‡‰†ˆŠˆ‡”‘‹‹Œ‰‹‘’‰ˆŠ‰Š‹‹™©ªºÔÒÉÉÆÄÄÇÆÃÄÇÄ¿ÄÇÄÌÕÑ …ohaYXZVXZ]bfc\^ZY]dinsrnpxxmcaikpvwspossuyvvwmdlywoqtvywuvz|{xyxwyzvuztj_]\Zbptvkerztmr{~zzzyxwxzywwwsrptxtpu{z{xukfwˆŒ‚Œ„R1BAFdmfen†nTPNHFHAFNROKLNPGC@98@FKJTikf\k˜ÉÌÄÂÁ½¼º½ÁÄȾºÎÄkkRNxœ±·žxƒ³¯¨½›giqg^_gjnqroljlppkfabhfhijiedgmnnrvqnsvtpicfjjpngpŒš§•owŠ‘²—Ws¨™xrqvmst~–j__d[\a^\^^ZXVZbddgaXSTVVWXWTZ]VW]\[VTW_`[UVWW]`N@EY]XOQVTPFLQVVWYRP[gou|€|suzwtxzz„nYUPM^ogZZpˆ|[Vqƒvagja`b[Wajpplol`[dklljghkkhiiihhgggnb\v†¨±¤¤²¹²®¯´µ®®´µ­«¯·°®¸À´ª®¼¶¨©ºº®­¹º®¨·¿´¦®½¾®¬º¼°©ÀÅ®«Á¬pbjjfhhfgccedacd_WQPU]gnmjommlbk‰~hmvonpnmllnopqrpqqoprsrvtsqrwp_o•«¨¨°¯©²®´¾½¸ÉæêéèèìëáÔ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Cp|pnopr{oaew€€€€}z„{~‚‚†„€}{||{„‡†ƒ€}€€~}~|y||ztnkhbca\fpn€ ›š–¤¢‚wx~oflnrrquusrpoq|zƒ„‹‡…‡Š‹‘“ŠŒ‹‰”“Œ‡‡……‰ŠŽ“‘®ÕÕÉÈÈÄÄÇÈÅÅÆ̾ÁÆÃËÏ¿Š{kb\VUXUY]adgfc][XZ`hlmmrutrqj_jorsvxvpptrrrsxxqqyxqqtwuuvxwtsusyww}ysw}se_b_]dupa_qyoirtuwywvxwxwvz{xtrrvzupqwwwtrjetƒ†~}ŠP;JAStnlƒ’€of]UNIAJTVPLMMSRJGJHGNKNLYgh[lÌ˼¹¼¼¼ÁÁÀÀý¹ËÈ‹kk[X| µ¾ t‚¾»­¹—acmfaeghjkllkkoolhgfccfhiihedhnmlpuqosvnhgdeiknlcjŠ ¬“q‘†’®ŽXq«œƒxfr{uuƒšYY^Z\a`[[^\ZYX[```d]TTXVWZY]ZZ[ZXXY^[Y\`_YRTYZ\]PBCSVSNORPNHMQUTUXSNXepx~}|wopvyw|„u^TNM`of[Zm„{YUrƒt_hndaaYT_ksqmom`[cilnlheghhiihhggggj^[wˆ‘ª³¢¤³¶¬©®µ±¬®¶µ­¬´·®©´¾¶¯¶º²¦§³µ­¬¹º±­¸»²¬·Àº«¯¾¹¥¯ÁÁ©¨Â®qaiieggeffdeedgf_VPOU\gmmlmlonaj‹…fgtolnplloqomnrsnlptssvsqqqsysbr§È¾ÈÌÈÉÍÖÚÒÐÞìèêìíêäÝÙ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Bpzpspms|tbe~…|{„„ƒ‚‚€€ƒƒ~|~‚†‚€‚‚}„ˆ†‚€~†~|€yvyz|yuqf_cdiiurh|Œ’‹ˆˆ†wklnknomqppoptusppxz~ƒƒ…’Ž‰†ŠŒ††Ž“”“’“‹‰Žˆ‘”’¤ÇÓÌËÇÈÆÄÇÌÈÁÉÁÇÉÆÒÄ–}wmc\VW\U\bdefe`a\^\`hghsuuuuqidclrrsvwtqpooqrstxvwyumpzvssuvvvwvvvuvwvtuzwkaacaopY[xwhiwxv||w||xxyyywxyrsttsqpprz{vmlx~‚}~ƒ~gSMHStoi}„“ˆ|gSDAMVSLJKLJROINUX[_YOUfebk¡É˼¿À½¿¹ÀÀ¿Ä¼¹ÑljhfVT{¢´½¤x€µ¸´»•^cqkcbhikmnmlknnlkjfdfkmkhgedfkhowunnrrlheadkonf^n«ªrŒ’„”¬Rv±¥…vz{vz{‡ˆnYX\^^]^^^][ZXV\cccg`TUZXST\_YXZXVXZWVX[[WUURRW]]M=CRVRQPNONMPQSVWUVSVemv}}yxvtvzrtziZOO_pj]ZsŒ{]Tu…uaeh_\b[T^lssnol^ZfphhkjefjlgdekmighkaZr†Œ¢®¨¨µ¹­¦©²±©®·±­±´²««·¾¸´¹º±§¯¾·¬¯»º²²¼½±ª·Â³¥°¼»±«½Éµ¨º­u`kihjfgeffefdfibTPMQ_jlghjjonakŒ„ghvtrojplnqnmqsostpnprtuuutswo]s»ãâÞÞßÝàâçêçãçíêëììêãÛÔ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Jv}qx{xy|vedx€|€}€‚~€}|€€‚€~‚…€}~|€‚††ƒ‚ƒƒ€~}zz{~|vojdcghcrzeo”–ˆ‘š‚`qŒpsopqnnroorttrrtwz‰‰‡ŠŠ†‰Š……‘’”–•”“•““Š‹‘’‘•‘˜¯¸¶¼»ÀÄÇÉÌËÉÂÁÉÈÉɤuqzp[W_^X\[^dfdcee^YTZghjstuvxuoiigmx|wqqusqqrstvsxzwuvvuyvtuvwy|wsqsttsst{}ukea_kkW^xvmuyywyzy{|v{~|xwxzsrtvtpnozzuqhdq{{z}u`NHbtimusƒŠ|wkXEJTZRKHGCITTNQWZ^bYMN[`epœÉÍ»¼À¼»¼ÅÇÃż»ÓƈgfVU{£²½¢u~¶¶®»—`doiaaghjlmljinnljgccgghhhgddipmoqmijmpnlf^`fjlh_m’­©Ž~‡š«yU³¤‡z€‚€y~€sc_a^]^`^\ab[VWW\ddabaUTXYXVY[XX[ZXVTVVWZ]^ZV[TSZ]M;?LXVPOOPKJNTUTTURTXflrzz„}wvwxvvsw‚€r]KK`nfZWoŠ|ZRs…ucgk_\`[Valpqmmh\\hojghhghjhjhgjlljjmd[s…‹ ¬¥§´¶®¯²·±©­´¯©¬±¶¯¬³µ®¬´¸´±´¸¶°®·º¯ª¹Âµ¦±¿³¤«»»­¦¾Æ¯¦½¬q`lhhifgecdbdcehb\UMNZinmkmlom`iŠ‚hkvqosstpnonnonqsrpoqssrrssswp_zÄçâÞáäâÝßäèæäçîêëëìëåÝ×€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gu}ptyvsvqcbu}€}ƒ|€€~€‚€‚zv|„ƒ€ƒ~{~€~{{}}~{wpifgecWbnq}‰‹ˆ‘ “pƒ§‘ktrimpmpssssrooqvw€…‚‰‹ˆƒ…‰††Œ•–™š–“•™”˜–Ž‘”‘”‘˜›Ÿ«¯µ¾ÆÈÈËÏÄÈÎÊÐÀ†cuxjY[\YYb^^ab_dlhc]W]ikhmpstuuohegmrtrppstrmmpsruxywwxxurtxzxrqqrtxyyxvuvy{{ysg\geU]son}yxyxz|ywxz{y{|ytqprwwrpquwxxlbo~||}|tbSTTZhqkjw…~}‚mTDFO[]N<;DLRSS\c`ZaYQNU_jr—ËÕ¿¼¾¾¾ÀžÆÀ¹ÉÆffWU|£²½ p}¸µ¦´’_dojehfgikkjhgjlkieabgijjkhcdkpopokjmngjlgacgfhk_m–°§‰“‹ˆ ¥q\ˆ¶§…‚|y{€ud^a_\bbb_]`_XZYV\gd^cZRUWVVW[YVVWXZYUTVXY]_\WWTZ`ZA4AOUQQTSOHKMRSOQVUMUdlrx{€€~xtsvwtx€{dLMcmdZVm‰]Tt…uafh_Z_[Xbklooqj]\delijkkllhjjhgillii`Yr‡Ž¥±§¨µ¶®­³¸²­®´²«®·¼²¯¸¹­ªµ¼µ±´·²°¶¸¼¯§µ¿³¤³¿¶§®ÀÁ«¦ÁŪ«Ä¯r`kihjfgeggefegjcWTQT`jkflmlom_iŠ„jmwokoqnnmmpqqqponmnpppporqswq`|ÉéÝÙßãàÛÝâçæäçíêêëììçàÛ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gu~ooqppok_bw‚~€€€~|}€~‚€€ƒ‡‘¥ ‘†…„„€x|~}||}}zwwrikkeda[]{…km†“ž¥–›«gnqghqoqrpoprsssur{ƒ‚‡Š„ƒ‰‰ˆ’“—š˜“”˜š›–‰’”Š’’Ž”Ÿ¦¨°º¾¿ÅÎÉËÏËέslƒv`Y]YSX]_cecadifba\[gj`enutrsndajqrrvupsroljlnpxyxvtvvvquyysnnrtzytszztrvxz~~rehhZ_lko~}yytv|xwu|}zwxuqsrsvvsonrwz~sen€|{~€{q`NVKSon_nxqx…†rSCFN^cP4-6AEFJVcc\`\VQScrw˜ÊÒ¿¾À½¼ÁÈÇÁÆ¿¸ÉÁ„efWV|£¶¿kzº·¥µ“`enhcgdfhiihfedgihgcbfijiigbbhmmooifgghjhdainlem`m›±¤Ž’‚ƒ  kb‹²ž‹„}}~|oebZSXecaa`]YYYXXeqg\b^TSVWXUW]ZWVTVXVRVZ\[[YXWV[^ZH8?PTOPTSOHGJQTSRSUNVcmtxyz}wtvvssvz}jPSenf]ZnŠ^Uu†tadg^[`]Xbjknmoi]]egihjkjiifhifcdjjgjaYs†Ž¤°©¨´·ª¤«·´°°µ³©«¶²ªª¶¼´¯´¹³¬³½µ¬³µ¸­¨·¼³­´»²¨®ÁÁ¨¢½¼¥ªÃ©o`lhhifgeggdecdf_VROR\hjgjkjnm`k~eivqnnmgmqtvrlmppoopqrrppqqqvo^}ÍëÜÙàáÝÞßãèçåçìêêëíìéã߀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gpzomnorqndew‚‚ƒ‚€€€}}‚ƒ€€€|}Š¤·¼·¬œŠ†Œ‰€|€~}}~|{{zvtmghkdcliYuxm’—˜‘‰nhnmknptrrqrtvwvvpyˆ‹ŒŠ†……ˆŒŠŠ˜––˜™š –”‘‹‹Ž‘“ŒŒŒ–š˜œ¦®´¾ÈÆÇËÌÁfy†q\V\]ZV^^befdeggab^Wcoc_lyyuvrghgiovxwtwrpqojmsswxsruwuvvwvtrv|wzyuy}xlnw{ww{yrnpebkow€{xzuw|wtrx{xxzytoqstvwtptuswtjo{~yx{iKGQ\oi\n~mjt{„|\EIS[[RD4(07=NSNNTUPFDNQSWOB@MRTams|{}{zxuuvwtx|ƒ|_I`mg[WoŠ|^Uu…t`dg_\aZS]hmilpiZ[hokhhhfghfjhgijhffkb[sˆ¦²¤¨¶¶ª¨­³¬¦­¸±¦«µ¸¬ª·º­ª¸¿³®·¾´¨§½¾¯«¾¾¬¦´Áµ¥¬¹¹¬­¾Ã¯¬Â®q`kihjfgebcbcceibUSQS]hmlpolmj^hŠ‚hkvqossolmpppqqtqpqsstusstsswp^€ÏéÜßÞÜáæäåéëèçéíëëêêçã߀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Is€ts}…„~|smt}|w~‚€}~ƒƒ‚‚‚€”ÀâÝǽ§Ž}}€€{|‚ƒ|y|}yyukhkjipink_gvj\WlŽ¨¾¦~wnrsmnnorsrtvsoxxz~‚‚‚…ˆ‹Œ‹‹Š”™œ›˜˜›˜ŒŽ‘”––’ŽŽŒˆ…Ž’Œ‡–œž¢ª¹ŠaƒgYs€`YeWW^caaeijoi\YZannaZevvpomfeejqvtqutvxuootqpoptyzxwvvvtsw|~zuruxxuztu{{tsxwuidjv|twwtyytwxyvuvyywuurpqtvvtpsqy}ss}|z}|uz{mw•›‚w……YCEOix\KKPUWXVMCGIPSSVO=?XYSf{|vœÊ͹»Á¾»»¾ÁÆ˼·ÓÁ…ghXSw›©°—q€¶²¨¹“\ania`degggecbfjlmkfeighhigcdkegjhehljkgec`bgimd^}£¦•…tea„¬Ÿ]L‚±¢……‚ƒzum_]`]_e^_le]juinypa_cWVZZVWY\XZYURUVQRY__][YW\[XUZVE=ITRNRVQDIMNQXYK=ALSdos{y||~}wrtzzsvy€iPcqiZVq[Ss…ubgjb\^XS_ilfmshXZgkllmkffjkhdeijfccg_Ys‰‘©¶¡¤µ¹«¤«·±¨±½³§«³¸²®²¶²­°Áº®±¾º¬«»º®°À¸§«¸Ä± ­¿º¨«¿Æ¯¦Â°m`lihjfgehhffdeg`VSQRZejjmmjlk_j„ghvtrojllpropqosomorsrsrqsstyra…ÑéÝàÞÞéãáâçêèèéîìêêéæâÞ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Lwuy‡†wfdx‚{w~{‚€ƒ{€ƒ||‚ƒ…~žÕëÛź¢Œvr}„ƒ‚„„}y~~ywwtqppoljlpifyƒpX\ˆ“ƒ¨É£|vnpomllsrqqsuusrssuy~€€ƒ‡‹Ž˜˜”“••”–“”—•“™—ŒŠŒ‹Œ‰Š’”Œ•—ŸžzmƒxYl‚v\[ba_\]adddeih_[\cppeZ`kvztrfcfkmqwzttwyxurpppqrtuvvuuxyxyzwxutwzwvzxutvwvtttmcdpxxx~xx{{zyvwxxyywutquutuuw|yqs|{su~€{y}}tz’Žv~›˜yO@BAc‡iGHQSQSROMKJJQYWRRS\\XjzršÉÊÁÀÀ»»½Ã¾¹Á¾ºÎÃeeRPx›¦°—nz°¯§¾”^cmhb^cghhjfa`cghggdcejihhgdceenofbghcgfeddeginec‚¢¡yi`Y„µ¡ZH…­¢‹€‹‰wtzugab_[bdmz}mfw{gisldbZXWVXYXWVTXYVTUWWXY\_]YVUWSVZ\TB;NQNNUTLIKMSTSTOAFJQ[ht|€€{vstvrqqu{}oYbmj^\qƒy[Rqschha[a^V\dioopk_[clgknlllkhiiihhggflaZqƒª³¤£¸¿®¨¯´±¨­¸±¥©´³ª«¹¿´°¸¸³ª®¹³©¬¶·°®»½¯¤¯½³ª¸Ä¼¨ ¿Æ­©¼§s`lihjfge`dfigfg_TQQT[gmijkhmh\m„flvqpoqmmnnnqqolpqppoqurppqtyq^ÏïßÛâççåãåéêççêéììììçßÝ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Cmwnortupphjz€~}†‚‚†‡‚|€„}€ƒƒ¥Øíßų®¤‚htˆ†|†€x{|z|xrmllgacnfl‚xmƒ“ˆ„‰ž£ˆztooqpmlrrqrtutstuuz‚|~†ŒŒŽŽ’•““•˜—””‹•—“‘“”“““‘Œ’ŽŒ‰ˆ‹‘ŽŸ“mtƒjg}zia^][`\\bfffhli^[Y^kpj`^cpyrojedimsvtuwvtsutqnquvsprvttttuyytwutvywuwwvvxxvsq{m\[l{}{wxvwzxvwzxwvustwsrtvux{{qpquwuuz}€€zx{kUUXVaŠ˜{PCE@TzlKFNPPRRPOPOKNX[WUT___uwv›ËÌ¿¿½¾ÁÅÁ¾ÅÀ½ÓÃeeRPw›¤®˜r³®¢¹[akga_bijhfbaefijjidaaggggebcegcbeebejiheb_`cfkdd†¥„rf^[ˆ³˜TJƒ«¢Žƒˆqsysgdeb]bs‚‰…o`jsiklcfgYZSPUWUUYVYZWTTTTXY]_\WTUTRUX\YJAIRPLRSKEFLRSPQROWNN\lsx€‚€|xwvvtnlov~wd^jj^Zp…`Wu„tdgg\X_\V_imolkg^]elhihhhjjjiihhggggj[Wv‰Ž§¶¥§¸¹©«²³´¬®µ¯§­¹¹«¨¸Á·®³¼´«±¼²§¬À½®§³º´¯¹Á³¨³½¸«©ÀÀª­Á§k`liijggeegghddf_ROOT\hoknnimi]p‘bhsopprlllorpnqqoqrqsuttrqqsxo\†ÐëÞÝãåãåãæìéãäìéêêêëèá߀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€8anggdehlngfw|{€|‚ˆ‡†‡…~|‚†}€‚…¦ÔèÜÄ°¼¹fo€|†‚x{~}|xvohhkh`cnin{jo¡¥„y‚ŠŒzyunnrmmqpqsvvtrsqu}‚ƒ‚„ˆ‰ˆ‹Œ‘‘”–˜œ™”’’––—š–‘‘Ž‘Ž‹’’¦’sqmk„…i]a[Z[[Y\beeehkg`][\irlc]]kxtmfdfhlrxwsrtwxtsutpmqtsrswuwwvuvt{yutwxwxwvuuvxxwxj[[o~~wwzzxyxwx|wtvyyxvwtuvuvxvwwttwtsw…ƒ{olqhS<3=CF`q_LCG@DgnRCJLMPRQRQRNKSYYWd_\e}qs˜Èɾ¹º¹½ÃÄÁ¿Ä¼ºÑ‚efRPwš§¯–q€¶® µŽZalhcadiifeaafeggffc`agghhecdgiihggfdbghigddgjide‰¨•vhi`a‘¶”SQ„ª¡…ngqwpghhc_qŠ†}kbprmoj_ei\YWWWYXXWWWWUTUUTUY^`^YXZXVXWYXK@HSTPSSKEHMQRTRPQTNNYhsxx~{xxwuumosx~l`gg]Zo…^Usrafe\Z`[T_ikfkql_Ybkkihhhgijihhhhgggmb\t†¥® ¤·¹®®²±²¬¯´®¨°¹µ¬­º¾°ª²¸²ª±¼´ª®½»°­»¿³ª¸Ãµ¨±¼¸©®Á¿©®Ã¦g`lihjfgeghffccgaVRQT[flhikiok]m‹dktqrrsqspmqolnnorqlloqvsrqswnZˆÒîâããââæãæìéàâìëìêêëåÛÖ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€4\hdc_afh_S\w„€€|}…‹‰~wz‚‡‚~€„‚žËÞÕöÆÇ”afy|‚€ƒ|}}y{u…„xi[]ceddfz˜—€su‡›’wwxnouronruwvuuuuuwz€„†‚}€†Ž‰‰Ž‘‘””Ž“‘Œ“’Ž’Ž‰Š‘‹ŠŽœ—‡vq‰•u^`_XXZWZ^bddefhhdb_^grkb]^gvxocdfhiqwyruvwwvtqlnqrpmpuuqs{{xvw{ztsvxvvxyyxxwustj[Xhy{vstwxvz|yxvstxzwruvsrtuuxutsx|wvzg]ZYO?757=AIMJEBFB?XhTAFHINPQURWUQRUW\\Zdntxts™ÊË¿¹¹»À¼¼¼¾Ä½½Ö‚fgSQw™¬°“l}µ¯¢µ]dmidbehfefeabefedecabghjiebdgihgeddeehigb^_elideŠ¤ŠjbhZ^“³ŠLRŒ«¢€r\]oumehga`|•‘‚wgfvqkom_`e[PTVVUWWVVVUUVYZZW[ad`\\_YVXVUSE8EPTUVQIINPORVRMPNQX`hryzy|vvvtypqsqz}rhhc\[pƒzZQp€qbfg^]bZS_iiknpj^[bhihhifccghhhhghggkbZp©±§¤²¸®«®´«©°·±«®´²­­µ´ª©´¹±©±¼±¦¬»¹¯¯¿Â²¥¯Ã·¦¯Á½¨«ÂĬ¬À¦k`liijggecedfceidXTRUZejfeiipl]kˆ€dirlnmmoromopmmstsqrqprurrqswn[ƒÒðååãáææâäêèâãëëìëëìãÖ΀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€5[f``\_dbWO_{‚‰†z{„‹xtuz†„ƒ†|šËßÒÄ¿ÍÆ”hp}~†ƒ~z~‚zv~’ž©¥—‚kZ\]dffxŠŠŽƒnq’ª–oqwonuutrrvvrqtvtyrt…{z|}„Œ‰‹’‹Šˆ‰‘˜˜””‘Ž‘“”•‘‹‰†ˆŽŽ’Ž“™ˆ~“§‰``ga]UV[^`adhhfikib_]blob^`bo{tjedfjpustuvtttsrsrrsttuuvqrwzywrvwvuxwsqzz{yxvtsxp`Xev{wuuxxwz|xvyzvtwxvvwurssuzyvsuvsv€„dPPPIA;>;12BIIJCCDEDMWM@CDFJMPWZ\\[[XY_^alihrsl™Êʾ¹¹º¿½½¾ÁĽ¿ØÁ‚fhTQv˜¬°“k|´® ´^emgbabfgfie``fiihhebbghihc`addgheceghbdfdaafkidgˆš~caeYc˜®JZ—­¢‹veOUntjafd^c„—tcfvmksqcdh^SRSVWUVXXXZ[^^^^\_dfa[Z\\W[[ZVHCC@AEFKTZZYZ_]ZZZ\numghpšÉÊÁ¾¾»¼½¿Á¿¾¹¼ÐÀfiUQv—£­•nz®¨›­Š\dkeaa`figea_bcfgdb`bgjiihebbdicbfd^`hjigdcdgkfal†ƒlee^^u£¢nOv¦£ˆd[KLirh^dcf{•šŒ€k]nxgflgZ`dUJWbcdgfaabefd^\]Z\`dc_^_\W[ZWWK<>OVSUSLIOLNQTTROMOZabmzzyz~‚€xuxxyvuxz~ƒtga_\n…[Rqp_ccb]`ZS\fkonkd^agiijjihijigggghhiib[Xp„’¬² §³±©«±ºµ¬¯·±¨®º´¬¬¶¹­«¶·³¬²¼µ©ª¹¹¯ªµ¹°ª¸¾¯§µ¿·¥°ÃÁ«°Å¨i`lihjfgeefefcdhbVRRT[fkhgjhnj\lŠ|cjplrqoqsqnoqpnqmmnopqnsqqqsxp]‚È̶Ñéèâçâãçéæèíëìììíéâ߀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€:clfmpjacjfgwƒ†ƒy~Žƒwoottw~~€‹„…¡Ç×ÒÈÁ¶ÂÛðèÁš‰‚‚~zwz”·¼¨˜•{kgejrpjxŠŒ“‘¨¬Žƒ†umprtvtwvqpturqttty€‚„‹’“Ž‰ˆŒ‘š”“˜š—–•’–—‘Ž‘’ŒŒŠ…ƒŠŽŒ‰ŠŽ‹Žš‚˜¡›žlajl`[[][Y_ijfgmljife^Ziqp_[_bluqebhjimrttuxxwvwurqrrprvrsutruxvxwwxwst{wsuzysu|wtdV`v~yutx{wuxz{yyyuqsyuwvvywuvzwvrpyzlOV\UE??=A<416=>=ECFA@CB@EC?@CDISW]\Z]_^_\^nkZdss˜ÇÈ¿¾¾¹¹¿ÃÆÂÀ½ÁÒ¿giVRv—¥®”juª¨Ÿ«Š]fnheedgdcec`bdikgc__chgffebbchgfffeeejhda`chkc_o‡~igebcz§¤sV~™¡¤‰a[MKgrh^eem†–›Ži^w~zoqpb`cZR_jkd^_ccbcb^Z[_]^aeea^_]Y]YV[R@@LRSUOHIMMRSMOSRNLYfcfptzvyysuzzts{{z|j`_]oƒy[Rqrcgh_X]ZT]iqnome]^dfjkjiiie_fgghhiiii[Tp„¨·¥¨¶·¬¥¨µµ«­¶¯¥«¸¸¬ª¶·©©º¹«¤¶Á°¤±¾¹ª§¸¾²§»À¬Ÿ¯Á¼©²Åì¯Ä¦g`lihjggedeccbelgROOT\hokmmilh]o~ekojonknollpnlpnpooqqqsusrqswn[}ÊÒ»ÔìèßìâßçëèçëçêëìíèáÞ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€@ivtyytoszj_r€‚‚u}’”ƒywrls~|wy|}ƒ¯ÎÑϼ¹Øëìéس”ƒ}}ytŠ§­¶´›Œ‘~a_mllnƒŠ}‰™·¯|x…qklprurrrrqonnqsoos|ƒ†‡‰‹ŒŽ—š™–”–˜•‘Œ“—˜—“‘“‹‡„Œ‹‹††‹ˆŒ“„Š£¦¢ž`hljd\X]Z[^ejebgknke_\^dmm_[_bnvrffgfghrttussvxussttsqqrusqoptxzuuwyxvuvwwwwwxxxzveV_v}wwvxzzwwx{|wv{zuwyvtwyxwxyttwsuymYUWVTMAA=>;548>BA>BDBCEBC??CC@DMVUVZaebZX\_VRkss™ÁÈ»¹ºººÁÅÁÀº¿Ö½fkUMs˜¤¯•js¨§ž©Œ_dkgdb_befedddbfgeb``cegcaeebeihhgdacfhie`_agnb_pƒ{gadlv ¤kV|œ£ €eXLKasg]_h‚’’˜zlqvushca]\^^XYcfc`^_feeb][\[[`dff_[]]^\YVYUB=HPPTRHIKHNSOLQVSKR_ehotvzƒztqquwvttx}~od]Yo…yZRrp`ef_^_YS_ljkpodZZcljiikkhgikihfffhii[Vs…Œ§¶¡¤»Â«¤²¸µ§¦¯­¥«¶»¯ª´º²­²·¬¦³¼¬£°¼¹«ª¾Â±§·¿³¥²Àµ£«ÀÆ­¯Ä¥l_jfefdfdbhhgdfh^PPQRZinggnllh_o‰}bktmpqpmoonpsropnqpjkrrrotqqxm^ƒÂÎÄØèèçáàäëëççëíìêìíæßÝ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Cp}u{{st{j^o~}}€~ˆˆ}zzols{wtw{yxy–¾Ò̸ÊçìçìÞ»¦€xtz–­«­µ¢ˆzhZ_oron~qƒ—…«`krpikmpsqptutpnnnmnot~‰ŒŽ’““’Œ’š›™–˜œ™Ž•šš—’““’‹‡ˆŒ‹Š‹ŠŠŒŠ‰Ž“Œ–¥£¥–l]kogb`\Z\Y^cbegbjmkiheabord]^^iurhfefkmtyrptrpswrqrwvsruuttuuvwxzxwwvutuuxywvwvstviX\q{xxvuwxwxy{zutxyvw|ywvwwxyxrtwps|q[PTXWVNG==<86:>?CB?BGVVUUZ_XM^h[]dlnrÄÊ¿À½¸·½À¾Áø»ÓÁ~ciXRt•¢­”ir§¥œ§‰\djea`dghhda__defgfa_bkjdab_]bliffecdfmkd_^_bggdv‰uTRhzy† z€Ÿ¦mXNVhvokq{‰ˆ~~vjdeech`]_]^aaYYab]]]a]^ab][ZXZ^acd_\^^\Z[YZTA;GQSSMFLJKPSQPRTSLQ_ehntz{{zywuupsvvuvz~|sh]Wn…{\Tsp_cd`]^YT^kkgfki[Yenlifffehkjigfffghi^Zu„‰¥¶¥¥³¶££´·¬©±º´¬±»´¯²½»«¦±¾´«²¼±¤ª»»®«¼¿°¦´½²¢«»¹°¬¼½£§Á£i_jfegdfdafeecgkbSPSWZbiilkhni[mŽ}bkumpqppqpmmppntppronoprotqqxm^‹ÐàÓÞçåäââåëëççêíìêìíçßÝ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Bs~pt~tsyi\n€|€Š†}‚{y{xonw{|}z†³ÔÍÂÚòíäïâ¿°‡ur†«º«¥«˜t^WUV_ie_jgl”Ÿvz£kuyoiloswvuwxtoostrsux~ˆ‘’Ž•™™—•œœœ›Ÿš””˜˜–•““—“””Ž……‰Œ‹‰‡‰ŒŠ‰‘Ž©§›}[_lpgda][\\\^cfdaeijklhbalrf\][bpric_cjlqorutsuupostppstrttvwwwvuvuvy{xutxzxsruursxn\[p}{wtstvuvvtwyxvuvxvxxwwxyyxtstos{s_PW]Y\WND?>:58=9:?>?CA=@AAAA@@BELOSUSRW^ik_a`bhnžÈ˼¿¼¸¼¾ÆþÁ½ÂÔÂdjYRu–¡«“ir¥£›¨‡\fmd_`fhihd`^]edcee`^ahihhga^cihijjfdciidbccbdigwcFTtŠz‹ª™‚––•‘rii{…ˆ~trz{gd_\^_]]_d]]_^^_^WV_a]\[^\\`a^\[X__^]]YX[\]\[USTI9GSRTRHGNPOMPRTVSLQ^egmtux|}}ytqwvvuvwxxzym\Vm„}[Ssp_dea\]ZU]ilkkngYZgniijllihhiihfffggk`\v„Š¥µ¢¦¸º¨¨µ´§¨²¹³ª­²³ªª·¹­¬º¿°¦²Áµ¨¯½¼­§µ·ª¤¶¾³¥¯»´§¬¼¾¦­Å£f`kgegdfecfdcbekdUNQY\`gknjfmiYm‘~bltmoqomopnnopnspnpspoqrotpqxm^Œ×îÞáâÞßäãçêëççéíìêëíçßÝ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Ar|nr}~vpte\rˆ‡zv†‘‰‰xurj`gw€{zx{Ÿ»¸ÆáðíçîáÀ«ŸŽysŽ·½£‘ŠsYR_`S[c]e{sk‰„Zq¿Ä‘€‚plprtwusssssvxxuwyz}‚Š’Ž•™™–‘šŸœœ  œš›™™•”—˜—˜—•’Œ„„ˆŠ‰ˆŠŒŠŠŠŠŽ¡¯¥ƒ^]cjmlh`[^^\Y^hfab`gkljfbcgog^aaervmb\afjnsssrqsvvqstsrrqqqqqrrsstyvuwyxvuwxxvuutrvyo\[p~|utsuwxwuzwwxwzzytxzwuvvuyyqpsuvsbW^_YYVRPDA?43:<>A@?BBACCC@<;?CCAMPMPY^___ab]a\d˜È̹º»»Á¾ÈÅÁÃÀÄÕ¿€gmVOt™¢«“ku¦£ªˆ]jpe`cbeggecaaeffhgb`cegfec`ahffffc`aecgheddiplSh‡jHb’’z‰¬˜w‰¦§˜†xuoix‰‰~li|jd__a`\]cd^^b`__]YV[]\\[]`^__[[\[Z]^`a[VW]\ZXRTUI;GRRSSJGNQNNSTRTRMP]dfksxz{{|{xtwtqsx{ywz~q[Wo„}WPp€qagia[[ZU\hllqpf^]chhhiiihgfhhggffffn`Ys†¨²¡¦·¸¤¤²·°ª®·²«¬°º«¨¶¹«¨´¿¯£±¿³¦­¾º¬­¼»¬¨¶À´£®½´£«½Á«¯Ç¤g`kgfgegeegeebcgaUKLW_dikjkglg\n~blulopolnonmmmllsqklstprotqqxm^‹ØðáããßáååçëêççéììêëíçßÝ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dq}ssspkrse^s‡ƒwx{•´»ª˜Œ‚~umt…Ž‡„…‡š¢§ÍçéëéæÝŪ£Œ€•¹·—}r`RVebM^gcq…sdss^ŒÙБzmlruwyvtqpruxvutxwz~ƒˆ–‘“”“”—ŸŸ¡Ÿ›œ¡™™˜˜œš•”œš—‡ƒ‡Š‰ŠŒ‹‹Œ‹‹—²¯fTaekmljb]_`Y[cghf_^flmlgcdjrl^[Z]jzqb^cfjpsqtwuqqsprsrrqqpttsstvxyvtux{yvtxvvxwuuvxwm[Xl{{vuuvz|{yvsstuvwvvyzvstvwxxqptuusd^^\[XRTXJGG;05=C@AA?BFEAB@;;?A?AEL[rwhZcd^\^_Ve•ÈÏ»¼¾¼½¹½ÀÇʺºÑ¾glVNt™¥¬•ox¨¥Ÿ«ˆ]ipe`c`cffeccccffdb_`cghgc`_ciihgfcadhiiecghhjO7`uUq–z‡¤“x‹¤—’†xukdu‰Œ‚f_{…sb_\[YX[``[]bb``^ZUZ]^_\Z`]]\ZZ\[`a_^^ZX\W[[XSTUFŒËÝåêçåäääåäåääããääåæææææææææææææææææææååååååååäããáàßÞÞßÞÝÜÛÚÙÙàÞÚ×ÔÓÓÔËÐÑÌÆÄÁ¾¯§šŽ…~xupprtuvvvvtzˆŽŒŽ“š›ž œ˜—šž£¥¤¢¢¡Ÿ››š–”—˜•–š“‰‰ŽŽŠŠ‡ŒŠŠ‘¥» jVY`]_eimprof]]_\]hpmida^agllienri\W_hpul`_ackpstuurrvsssssstvtrruvwxwwsqruxywtuuuuuutuz}q[Yl}{vuvtsyyyywuy{wvxwtuyzwuxrlmqv|q^]bYV[ZTV[YSVRBEKNLMRPGEHIKKFFN[_ZUWZ^fc]OO€±¬•«Ãž¿¿ÁÁÀ¾½ÀÇÁ¾Ò¹y`nVIr”ž¨šo© ž¨^ckgc``gifefdbbdfeca``mjdig\^cjijhfjdT;41781++ &^‘•t`~¡œŽ‹Š‹—œ¤¬†cmrirxZ {X_a\`[\pve\`a]`a_XTUWWY[\Z][YZZWUTX]_[VTUUTSWWTN?6@SWQMHGJPOLNQQRSLKWcbcnqqtvwzzssqrqpuywy|zhqƒ{[Ss‚rbik\]bZRYckmkkf\Y`fhghkjgfgbehhfddeg^Xpƒ©²ª§º¸§ž¯¾®¤¬¸±¨­¶´­°ºµ¨©¸²¬¨²½µ¬²º»¯«¸¹«¥ºÂµ¦³Ãº¨¬À½¦²Æžh`lhgheecgfdgece`WONU\diikdbniZn‘cktknnlmoqollmomlkossqqrqsmpwka˜âðáÞåäìåãåéêëêìéêêìêäÝÜ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€>ŠÕîëçêïçççæçæççæææçççèèééééééééééééééééééééééééèèçæååääãââààÞÞÝÝÞßßÞÛØÖÐÒÑÊÆÈÊʼ»¸³«…|xssvxxvzy{€…‡“•–™™–‘‘”šœœ˜™œš•”–”‘•™—’”””‘ŒŒˆ†ˆŒ‹Ž’§½•bZ_`[[dhmprqjb\^ZYdnledeb_cjmlemsm`X]fovpb]\bnnqsuvrqutuvtrqqpsqrrruwuvtwwstwurvxvvxvrttxrb\l~xwxzztrvzxyxvxzxzxvvwxwvvxtlilt|w`[c]UZ^WR]]PRWJFINLKSTGEFHKIBERZ^\UX\^_`_KL‹¹©”³Éø¼¼ºººÁÇŵ¸Ó½sZmR>j“Ÿi©›”¢Œ\_kcVRYdc`db^aacdca^^_dgchfWW^daflgWC352495)##&5p˜”t_q Ž‡‘”——¦¨Ybigt‚’sV yU^c_^Y]ptaZ^]Y^^ZVXWWYWV[][XZ][XXWZ`d`UONUTRVVTO@7@TXPKGHIOONOOPSOIHS`acmpquxy}~wsqokjqwvvv~~op~x\Vv‚o]dh\Z^XU^flllnh\V\bifgjjeegdfhhfeeff]Xo‚Ž¨±©£¶¸¬£­µ®¥®º³¨­¶¸§§¸¹©¨¶¶±¬²º°ª²¹¸­­½»¬¦·Á³¢¯Ã»¨¬Ä¤¯Å j`lhgheechfdgebd_VNNU\chhigfmfZo€bktostpjmonoppoonponnrurqsmqwk`–Þîåãçãìæãäèëéêìèêìììæáက€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€>uºäìçæêééééééééèèèéééééêêêêêêêêéêéêéêéêççççççççêêééèçççææååäãããßáãäãáÝÛÝÝÙÕÓÓÎÇÇž¹±¨¢—Žywxyy~„…„ˆŽ“’•–—–’’——˜–“•›™“š£¡—“’’•””ššŒŽŒˆ‹‘ŒŒ‘¦ºacc^Y[dimoqqkd__\[dppigea^`fkmflspcX[emwse[[blnssqrttuttpmmrtstttsrvyvuttvvxxtsvwvvxwuzuvueZe{zywuuwy{yxx{}vsxzxwyzwuuxzulfiqxxe^c_ZZ\YX\\TSTRIHMNJRUJBCINJBGUX][VX__[_cNS•¼¥—ªÈÈÀÅÁº¼¿ÃÄÄúºÔ»|_cNS…œ°§w…¯ž«^drdIB]igdkgahghjihghibnknfFFfrsmZA2*&5<=5(&.7/9l’•€df”œ‹Š˜”—¥¤xR\jjq†•qV„¡|Y^b_^X]mrd]`_\_^]\ZRLIGHOXWUWZYWXXY_fcYRTUTQUVTO@7?RXOIFIINOOPNOTNIGQ]acmqrwzz~zwurmkrywrry}qp{vXQqm\cfa]^XT^cghijg][bidcehihhifggggffgg^Xp‚©±£¦¼¹©£°¶®§°½³©­¶¸¥¢¶»®ªµ¿³«µº«¤²»µ¦«ÀÁ¯¨¸Á³¡­¾µ¡¥ÅǪ²ÈŸe`lhgheecdhfdbeg]ROOQU_higihldZp|`hslpqnonmmoqolpnprnorpqqsnqxj_ŽÛïèåèáæçæâæêêêêçëíîìæßÞ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Bb˜ÏíëäçêêêêêêêêêêêêêêééèèèèèèèèèèèèèèèèææææææææééèèèçççèèèççæææääåååãâáßßÜØ××ÓÌÐËÆÁ¾º·´®¥—‰zxxw~„ƒ‚…Œ‘‘”–—™–—ž™œœ—–™›˜’œœ”‘’’•Ž…‰‡Œ‹‡‹”¬¼‹aff]Z_eimnooke__]\cnqlle^]_bgkkmpnbVZghuwi_^adpsroqtuvqqqpswvrtturruyvtvutxyvtxuuvwvxzxvzxfTa~|zzz{zxuxwx|~wswwvw{zvvyyxtkehqvyh`cd]YYY[[\]WQPMFKOKOSMEELRLFO^_`[SS[_[`cOU•¶ —¨ÅĹ¼¼¼ÄÁ¼¸»ÅÁÀÔ½z\`KW’¯ ª¢z‰¨™•¢Š`cphK?`khfmgajijjifefhfgcoiEEhtcK5(#*4;70,/488 +b‹“…je‹›‹˜“‘œ§£wScvsp—q[†ž‚e_^^^[]inf]_`_^\_bZQICCINUWVUWVVW_\]`_WVYTSPTUTOA7=PVOHFJIMOOQONSSNKQ\_aiprwyx{}xusrnlpursu{|tv€xZQn}n^ccZ[`YS[fnjhhc\Y`eehjihgd`ggffggggi_Zq„«³¤§¾»¬¦²¸®§±¼³¨®¸´ª¬¹·©ª¸¿¯¦±»¯§³Â¹©ª¾¾®§»Âµ§³¿´¢ªÇÆ¥®Å¡i`lhgheecbedccfi_RQPMQ`jijeeogWm“fovklmlnmmmlklmnnonnopppptosxj^Þñß×ÚÔÖáçåçêéëééììêêçã €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€F_®ÞëæïêêêêêêêêëëëêêéééççççççççççççççççèèèèèèèèççççççççèèèèèèèèèçææååääåèåÞÙÚÛÛÔÐÌÉÆÄÀ½·²©ž’†}xx}‚…‡‹”“—–•–’˜  ž›š›š˜•˜˜—˜–’•”’ˆ‰‡‰‡•²¾bikc[^eimmnnlh_]\]_gnmnf_]^^cilkpqdUXdjtvl`^`brqprtsrvwuqopssrrrssqsvutzxuvvtuyutwwvx}vtuylZa{xvzyvutvvyzxyzwyxxywuw{vvsldhqw{k_dh_WX]\YZ\YSQPIJOLMPOJHLNHGUc[[ZVSZ`]_aV_–¶£”«ÈËÃþ¹¹¼º·¹¾º¼Õ¿nSfJG‹½À¾¨{¸±³¼¬‘’›¤–•›šš–‘™¦¥¢œ–‘’–œ¢’xtwJ5&-513AF6,3:6*"+i“‰od~š–™™˜œ¥tWj~wq’m\|Ži`]^`^agid[\^^[X]^WVUPQVWSWWTVYYY[YZ^]ZXZTRPSTSOB69KTNIFJINNNRROPTPKQ[`cjru{|z}€|ronnklnmrvz{uz‚y^Tp~o^ba_^`YS[cijgfd\\bgcghffjhdhfddfggfi`Zr„‘«´¥ µ¹­¤­´®¦°»±¨°»¶¯²»µ¨©¶º¯¤­ºµ«¯Á¾®¬º»­©¹¿´©µÁµ¥®Æ¤­ÄŸi`lhgheeccbafedgbWRONUdmiheemgZp’}cmthijjnmmnlkmpnsojlmntpptpsxj^–åïÑ¿ÂÀÇÚèèêëéëêëíëêêéãက€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dgz›ØòêíêêêêêêêêëëêêééèèèèèèèèèèççèçèçççééééééééççççççççççççèçèèéèèçæäãããèêåàÞÜÙáÜÔÎÊÇļ¸²­¦œ…}|…‘‘˜š—˜’˜§¢Ÿ ¢ šš™š™”’•˜—’Ž’‹‰†…‰ŠŽ’´À’eioh^_dimnopolfabc`dmqnje_\\bgjiosgUUamruobY]dqqprtrpsqpqrrppptsuvttvwvwuswzytwvwxyyz{zuuzt`\l|uvywuutwxywvz|x|zyxwvvxvwwodepu{pa^ebYU]]XVZZWUQMKLMMOONKMMFHVaWW[ZVY_^]ddi’¸¯›«ÄÈÀ¾¼¼»Â¿¼¼Â¼¼Ó·nXgLR˜ÂÀÞ͉›æðçÉ«“—š¦¥§žž ¢ ™—š¢ –‹ŠŠ†„Œ‹‚lG,13499;B;645/#"4mŒ”•w[x’””œ™š¤–p]p}yx–m`q|zl`^aabhhdd^^]^]Y]ZUVWUUXVRVUSVZZY]]`a`ZTQTRORTSPB56FPOJFIJNMLRUQMQNJNZ`dkrv~€~‚…‚yrqtpmoqpswzwz€wZRqo_df^Z\WV_finlmi`\bgfhgdehihhfccegfeh_Yq„ª²¢ ¶¶¥œ«¹­¥®¹¯§±¿¸­­·¸¬«´º®¤­º´©«¼½±­»¼°ª»Âµ¥±¾³¡ªÀÁ«·Æ™``lhgheeccbaedcfaWPOT[djiffdjf_s~cltlnpoqonnonnmopmkprpooptptxj]œçîʲ°´ÃÝêèêíèêééìíììæÚÑ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€@ks…ËòçäééééééééëëêééèççééééééééééééééééèèèèèèèèèèèèèèééæææçççççèèéèçäâàååâßàäãÞÜÜÛÚÖÏÆÀ»´³´®¡–‘†~‚”–•”š˜—š–•Ÿ¤¡¢¤£Ÿ ›–—™““””—Ž†‹Œ‹Š‡Š‰‹‡¸Å™gekgagdimnprrpgabc]^hplnkaZ[bfmjmpdSTcknvvfWYdorqoqrqpnoqsqoqusqtwtrtvwwxuswytswyyy|{xpsywa[l{y{{wwxuyzxwxxwyxxxyzzxvwz|rbblrywdW`f]RU]YW^]UVPQMJLNNNQORQKMX__[\ZQR[^_daUp¥¶©©ÁÇ¿º¼Ãļ¼»½Á»»Ò»lUhOR’¹´Ë¾‡›ØÔÇ·_icddonknrledeaba`__bec`agX>2(72-3>;5971(  "7q†ˆ’|a~‰‰—’˜¥©•sgw~}…¡–tiqw|s``cbdmja^\[XYZVW[WX[[\\[YZWUY[ZXZ\_`a_[XTRORSSPB54COOKGIJOMKRWRKTPKNW]`fns{~}†„|squqmotprw}{|€xZSslZ`d]]`YS[dkdfjg]Y^eghhggggfhebbdffef]Xo‚Ž©±Ÿ¢ºº­§¯²­¥­·­¦²Áµ¨¨¶¹¯­·ºª¡°½±¦®»½±®½À°¦¹Â´¢®Á¸¥³Â»£°Ã›f`lhgheecbfdcaeg]SLOZ]_ejhb`lj^r“dktmpokkmnnnoonmknpnsvroptqtyi]›åíβ¬°ÆçîåçìçéçèëêêëçÜÔ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€=`s„ÁîéêéëçìèÝâãâåáÜßáÚÓÒÖÛÙÖ××ÔØÙÓÓÛÚØßâãáàåèæäåæíìäèëãééäæîéçñçéêêèçèêêäãååàÞáåàÞàÜÓÌÊÁ½·±¬©¨¨¡“ˆˆŽ‘”™™œŸœ›”–Ÿ¤¤¤¥¥£›•—™˜›š”˜™“’ˆ‰“Ž‰‹ˆˆ„ŠŠ’¶Ã–hgmbadgdinnqtphfb]]agljomd`^]_ikptiVTblnsreVWbloppstqoqrrrqrsurvtrtustvsvyvtvuvwz|socD:Nr|xhVj{~{vxxuyyxwuv{{u{wttxzzyyuun`bqxuwiZ`h`TU\_\[\[ZVPJIJMOPROONGRdbXV^[STXaaYa^Y©¤ªÅÊÁ½»¾¿»¼¼½¿¹½Ø½kUcST›Ä·Î¸†œÌŹ°„lsrhktuvz|vmmsoooooooorkuuT4.04/'4B<53*("""5n|Š™xfx‡€ƒœ™‹˜¦œ~mw}‚” •rvuja__^`v~h_`]\[Z^^WY\XTX[YW[YYZZXWY\^^]]^[WRTTSPRUJ:4CTOEGJIHOTPNOMLOIKZ]X[owx€€{„†{strsrloqqqvy|}s\Vo}j\_f][^YU]cfefjdXYceejkeeigafffeedddf_Zr‡’«´Ÿ©¼¸ª¥¶¾«¥®¸µ­®¾²¦­¶´­®¹¿«£µ¿­£°¾»®¯¿¾®§·½¯¡²Å½«®¿º£±Ä›e_jfdebcbbcbdcdg`TMLS\fjfgfgjgZs“z`lrommkjorpoppomlorpopooopmuwhcžëñˬ©¯ÂæêæçìëêêèëëêëèáÛ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€>gty·íèâÜÙÍÊÁ¶´¯²®©¨ªª¨¨¤¡¡¤¥¦©ªª®¬ª¨£ª¾ÊÀµ·¿À¿ÆÂÉ×ÝØÛâàåéåäçâàèåææåäåçêçåæêéäàáâäãÞÜÛÕÌÇýµ®ª§¦®¥ •‡‰“”˜š›—™œ••¡£¢¥§¤ —•”—˜˜š™š˜”“Œ††Œ‡‰ŠŠŠ”®µŠfhngcdghikoqpojc__^\bkmolgb\Y]fhosiWUchlqocXZbnttooqrrquupnqsssussutrttqrvvwwuvvbI60+.>f{}mXdx|wtwwwywtv{yvvxzywwwxzzwwyrccmpxzl[_hcXV^`\[]^]YQJKORTUTPJKN[g`SS^^UTV^a]^`Z_y–³ÅÉü¹¼¼½¹ºÀ¶¸Ù¸jVfUS”º·É¹Žœ½·º©yei_^ifgjmlcYUWVVUUTTSSX]b\G6,'.-&090(# 6q{‚š…p{…}…¢Ÿž¦˜uepvzŠ–“mfc]Z`g`_}„g`[\__[\\WZ^[VWZZ[YSSYZVX^[]]\]^[WQRRURQRJ;3@TQHHJLKMNKOSQIMIKWYV[qxux|y}‡…~{xuplprttwwy~wYTm|i\^dYW[WV`hmkgki]Zcjkiijjigfffeeeeddf^Yq„¨°¥¤³·¯¤®´®§²»²¦§³¹¦©·¹±®·¸±¥¬»¯£°Âºªªº¸¬¬¹½®¢´Ç¾ª°Â¾¥²Äžj`kfeecdbcdcdcdf_SMMS[eifgfhki]w–~clqnmppnlmppmkmqrqqqoorqqqntvf`êïÇ«­·ÌæêæèìêéëíìëíìåÜÚ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Erxo«áÒ¾³°¡œ™•˜‘Ž‰‹‘”‘‘”Ž‰‡Š‘˜š˜•’‹‚©·¨’Š‘‘” š§­«¬³´¾ÇËÍÔÛàæâåéêèæåäêçææåââåãæäàßßÚÓÐÌÄ»³«¦£¯««¥˜—™”˜›  ¢”•©§¢£¤¢¤¢™žžšœš—šš˜‘‘‰xs…‹Œ‹‰Š˜¥ yehljfefjhipoloohddb^ckqqolh^Z^gkpriVR^hmqmbY[blpojmrsqmrtrqsspssrstrqtqprstvwtuJ,)#'456ZwzlY]t|yy}zzxvuwxvtwxzzyxwxyzuw|tfdlov{qbbjfZSZ^ZW[^\^ULJNPRRVSIIUag_MO^aYTT[]d`\`[]m©Æɾ»º½¾¾½¼¼Ã¾¼Ï¹iTaPM‘·¯¼¥y¹¸ºªyktjhoijnnifgkmiihhggggckl`K4*-.2-..$ !7wzwš—}‡Ž…£–‡š¨”n]jnr„”•„j^\ZZ]l`bŠŠd\^]^\Y[]YW\\YYZYXZZYWXY[[Z\]\]^\XQPPTSPQJ<117MTOJJKIHIKNOOSOHL[a][``_WXWBozff\tÉì²sdfer|„Š‹£ÕÞ¶ÈæìÚɱ£ª›ªÍéíììææçïÂpazty“•„„ƒ‚Œ¨µ¤’€€€~}‚†‚‹’—¦ÀÚééãáãäáàâÙÚÚ×ÑÈ¿¹®¢——¡£¥¥œœ¡ •ž¡¡¦©§¦¢˜™ž›™žœ–—˜ƒv}ˆwhpo{—¨—s_e_bghkfefccikimqqojfcbbcgjopg]\[imop`T\`]`c]UXckpqoonlltqqtsopusoqsqqrqrttsqmnv}f07bzxm_]p}|zytuxrrwxwvuwxyzzzzz|wuxtjfmsu{wg_ekjUR\]\_^\^``][[][TS`aX[`]OT__XOP_d_Zeup^|ºÎ»µµ·»½ÀÂÁ¿¶¼Ú·gRaNK³©³ }–¼·¹¨i]xlWaqspqvwqnpmmmnnoookwsZA5346&!7r|b~ª«˜zˆ®¦‹¨Œh_eZ`‚¥Šj]aa`e^\]‡ŸrS^a][ZZ^^XVWYURUYUZ\\[[]][]^^[[[WRPSGBJPQMA;35CONHGKKJIEITPLIHGINOORKNORciQ=EV_ihftrotx~‚zi^p|k_aeYW[WT]cghjhbZUYcfijgefijfffeedddgYYt€®¯›£¹»­ ¬µ®§°¹´ª«¹±£¬¼¸«ª¹»´®·¾µ®´¿¹¬¯¼¶ª­¾Å¸©¶Å¹¥´Ä¾¦³Ä›e_jfdebcbbcbdcdg`TMLRZcfbfdfheXq‘z_inljllpjglppnmomopnoqrrrrntue_|²º¸¸·¸Íäèãåêèççëëêëìçá߀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Eiyad‹´Øåºzbku€ƒ‚Š‰„°âÕºÄÛíݼ­®­  ÉêìçæìçèíÅ{cvz€˜’„…„‚‚’°·¢š–Šƒ‡†}||~~…„…“§ºËÞàÝßåçÝÞÞÙÔÔÑÊűŸ‡€•¢š›¦¨—œ•Ž™¡¢££¢¡ ¡––—˜—˜£œ—”‹‚……zgooz˜¡„dcfe`afihheccehlqqqomkhfffglrqng_\bkqq`R\bY]d\W\amopqpoqurutqorrpvspnopqrnstrqssp~k;/Wx~rf_t~{}zuvuutuvyzvtvyxwwxzywxynip{xzzna`ee_QZd\YY[X`f[W^d\SQYa`]][WX`]XWQ_aXYhurax³ÒÁ¼Â»¼Á¼µ¼Ä¹»×­hOaKK‰®§²žy¶³·¦iWljakturv|yldegdcfhcafefgVB6.2<)!7msb{ ­‰o„°§ª‘n^f]]ƒ£Šd^gb][`Vc”œmYbb_ZWX\`b[ZWTUVUTYY\][Y\a_]\\[[YSUSD@JRSOD;58>HLIJHGHKMMLPNMMICADF=AAADOiT2/<;IVTNS[\`mwk^r‚k\b`[[]UR^ehihgbZZ_bhhjhdefcbdffecdek^[p|Ž­­Ÿ¡·º«ž¬³ª¦­¹µ¨©³±«±À½§¨¿·ªª»Â²©±¼½­©¼»«©½À®£·Ã¶©°¾¾©±Ãœ`ahcehcddfdbcfea_PMMQXbgcbdfjfZw{akplhklkkklllmonmmppoopnrppotb`‰»¸·»¹¸ÅÞââêðèçëììêìîéä倀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dmq[}´ÇÓç¸wduˆ†}‡‰»ÞÉ´»ÓãѲ¨¬«Ÿ¡ÊëìçæëåæêÀtZkoy—”………}„˜³µ¡—™–‰˜Œ}xxz|€„‡Ÿ·ÑàáÞàåãÞÙÖÕÕÐɧ’‚„–£ž™ž¬£––š››š›œœšš’•——••––•”…ˆ‡wqqn€ —q\igffebcfjeaacflrqonnlhgjfgkmllh`ZajooaS[\V\cYT]emprqonpqommoqrststqmnstsrvvssvsmwmA)Qt}pcV\agt|wmwvuuuvwyxutvwxyz{|zzxojqvuwyreaggXQ]aYZ_ZV[b_\`c^TPXab^\`\[a_YXQYb^[hxu`t°ÐÀº¿¹»Á¾·½Ä··ÑµmRbLMŠ±©²¡€”²­´¢dQgcW_ipllpmd_a_dbadb_dihhR;4.69' 3gq`v˜¨Ÿw‡¬¤‘¡«‘n`f]]…£Šf`gb][\Ym™•iZb^][Z[\]^YXXXUTUUXZ\]\ZZ\a^\ZXZYSOMA@JQQKE=649FOLNJFEGKNOQRROJFGJI?;:<;8DN3!*'.48349;EXda^r|g]c_^]^UR\ceijjdYV]bbdhhdeebbdffdcdef[Zs­­¡ ³·©Ÿ°»«ž«¹®§±·¸°±¼·¤¤¹À±¤³Ä¶¥­¾·­²À»¬«½½­©¼Â³¨°¿¾©±Ã›`ahcehcddcdbadge`QNNRYchdeccig\wŽ{alpljmmpmlmoppnmomjjnponrppotb`ŒÆÉÉÎÊÅÊÝßÞçëæèìëëêìîçàà€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Iifh§ÎÅÑë¸ue|‰Šz‡Š—ÈÞ°´ËßÔ¸©ªªž¢Ìììçæëæçê¾pTceu—•ƒƒ…‚{‚–¨©œ“‘’ˆ‚˜{vw|ƒˆ‡Šˆ…ˆ¢¼ÖäàÜåâà×ÐÖÒÀž‰†Œ•¢¦£›¡© ›š—–––˜˜˜–—š›˜˜˜—”•˜’†ŒŸŸ‰{rl†¡ˆc`fa`bb`elhdbbceinkoqoookedinnmnkb[bknodW\ZU[`WS]dinqonprqopollnppptsmnuxtsusoqvwr|rH3N_]UOSNMOVdqwxwvuuuvvxvuwzyxwsx{{zqknrtvzyiahl\TW[ZZa[Y\_^[^d_VQW`da]\YW]^ZYSUb_Xauxcr­Ñ¹½¹½À¾¹¿Æ¸·Ï³nWgRN‚¦¢·ª”·²²¨ws†pr{~|„‚zuvxtpsyxw{qccRA4)67%.`n_n¢¥˜‹©¢”¥¬nah[\‡£‹icga^\Y^}‹c[a_]ZXWY\_YUX[WTVVZ\]]\\[Y\Z\\\\ZSNTPLMORPB>935DONKIHHJLLLLPQKEDGJKEA=@@>GL7#'-+.20/233;ISSTm€o[]dZZ\UR^fhehkeXV_g`adcbfjhbdffeccddXYr€®¯£¤µ´§¡²¸§¡¬¸°¦¬¶·­«´³¥§ºÂ¯¤µÅ²£°Å´¦®¿¾±«ºÂ´¦µÁ·©°À¿©²Ã›aahcehcddced`bhe]ROOSZdieecchg\xŽzampmlookkllmnpqlmoqrpnnnrppotb`£ÞÝ×ÜàßãçããééäéééêêìíåÜÛ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Jgj„ÈÌ­Éíºwg|‡…z~ŒŠžÒÞ½­³ÏäÖº¯­¦¤ÐíëçæêééíÁrUcdw—”ƒ‚…„…•Ÿ Ÿ™’“‹‰™}xws…ƒˆˆ‰Ž‘–ªÍÛÜÜßâ×ÎÙÑ®‰ƒ‹’‘™¢©ªš§¯–š••––––˜—–’”””“”‘Ž‘“Š„™º¸†wuŒ’t^hggd`_bhmgdbccdgjnpolnqmgeinonole^cknoi]^_WY^VU\`hnqnmrtsmqsqkhhjpuupotvtrrqppsuuvzW$ %3:FLJDFMNKOPKPevxxwvuuuuxxvxzyvvv{}}{umjrvv|~n_djf\NW_UW^^\]^[]e]WUZbea\^\W[\YWPQ_^V[owfq¬ÓÆ»¾¼À¼½ºÂɼºÑ¯kWfRLv™Ÿ¶ªƒ•º¶¹§m]{€ohipnnoi_WUZPWaXMS`G-5AB4%:;&  ,\m`hƒªŸˆ§¤™¦¬ochZ[ˆ£Œmgg`_]Xcˆž€`\^^]ZWUW[^]UU[XUVV\__]\]]\[XWWWYXSQWSQOPSO?=;76@KNIIJKLKJINRQKFHJKJHDCEEGPF.$-.*-1.26536>CMZr}l[X\\[]UQ\cffjleXT]eegiifhifceffdccdhYWq~¯²›¥¸±¥©¹¶¬¦«³«£­¾¶­ª´¶­®¿¼«¨ºÂ²¨¯»·®°»º®«¸Å¸¦¯À¹«±Â¿©³Ä™aahbehdddbccabgf^RNORYdidcdejdYw‘x`lollongjlkjloplkmpqnlmnrppotb`¤ãâØÝæååãßàëéèïîéêéëìåÜÜ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Dk£×ÂœÀë¼|j{„…~‰‹‰¬ÞÓ°©»Ùèϲ¯¯¢›¦ÓïëèæèçéíÃuWefx–•‡‡……††•››ž˜‘Š{…—ztq|€|‚„€ˆ’“”‹‹¨ÎáÕÔÚÛØÛÆ›xƒ“˜—–™¥¯¡ž¬­˜Œ”“˜›™——˜—‘’•˜˜“Ž‹‘„…§ÊĦŽ€ˆ’~b^gdhga^aefjfbaabfjllouumhhhgikklmj`biloj`_`XZ\UU^bkprlkorqqklplbepsuusqqrtrrsvtpqu{\)6CBIHIKKJKNNLQXTOYgxwxwvuutwxwuxxxzxyxwyxsnswwzn\[^caQQZRO_]Y\cb_c_ZX[bca_b`Y[^\ZTNZ^\]ksgo¨ÒǼ¾½Áº½ºÀÈ»¹Í²kU_MHo–™ “xŠ¡Ÿ±]AU^TPPQONNNKKMEF^b;%7J1";D4%7>' .[mbf}—« Š§¨Ÿ¥«‘qcgZ]‰£Œqkg`a^ZfŽšy_][Z[\[XWXZ_VVZYTVX[_b^\\^^`]\YXXWQMSROMNTQ@;::7;GOOLIFFHKNRSPKKLLJHDCIONNM<&'/**0..23115=EO\pwgXZa\[]TQ\cegijdXV]cefigeefccegfdbccjZXq~­±š¥¸±¥©¹·©ž¯¾¬¥µ½¶­«´·«ª¸»­¨µÀ·¬«¼»¯¯¿¹ª«º¿µ«¶¿¶§±ÄÀ¨µÄ˜bahcehcddgccdbcb[PMNQXcgcddeidYx‘v_kmjknlnonllnnllnnlmoolnrppotb`¤åäØÝçæåßÛßéèåëéëêééëåßà€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Fp›ÃÚ±“Ãé½~l{†ŒŠ‘…‰Áçâ«ÍÛæÙ»©§¦š¨ÖðëèæçãåëÂtVcdu”—‹‡„††–›˜‘‹„u–’†|ppy~{€„‚‹’²ÉÕÌÒààЫ…z‰””™™•¡¤©¡ ®¨”•¡Ÿ›šš™•–˜›™“Ž’‡‘´Ì½¢‰‚p_ee`aa_\^chjfcbbadgmmprojjnjghkmool`^djmj`_[WZ[RTahjnoljmoongfjjdfnrtvuqnpstrrttrsv{lN;DRPHFFHLONKHOOQVUNSdvwwxwvutrwvuxyx{{yvvxwpkqvux{p]TUU`ZHKWY]]\^db_a`\Z_cc`_``WW]^\VSZ^^]itlm£Íƺ½¼¿¼¿»¿Ä¸µÈ´mXaQLl”¢‘t…›–¦˜dKXZQRWTTSTTTUVMRdX(*7%!6=3+44#!1[kbew‘¤œ‹Ž¥¬£¥©”tbe\_ˆ£uph_b_\iŽ”s_\[Z\][WVX[^YXZXST[X]aa][[\^]]\Z[WPLSSPJLTSD<9:68DNRNIDCFLQRQONOQNKJJNWWZ^U,")*&05../.-039BKSiwiZ^iYY[TQ]egefgaXW^cbcddadgedfggdbbchYXuŽ«¯ ¤¶µ¦ ³»­£±»©¢´Áµ¬«·¸©¦µ¿°¦µÁ°¤µÈ»¦­Å½©®¿¾¯§µÃ·£²ÆÁ¨µÄ—bahbehdddf`adbcc]PMMQWbgcgdbge\xv`klhjmjmppmkmnnlnopponlnrppotb`¤äâ×ÛãâáÞÝÞèéåèéìëéêëæàက€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€H}»×Éš‘Èë½}iy…Œ‹ŽƒÎé¿«½ÖÙäàÁ£Ÿ§˜©ÙñëèæçãåëÁsUaaq”˜ŒŠ‡…„Š˜¢¡›”„vƒ–‘ˆroy‚€€†„„ˆˆšÅ³‹Ÿ°ÀÅÒÜÕ»•x~‹Ž‰’–’šŸª«¢¢¬¤—Ÿ¤¢ž›—–˜››™•””‘¤Âǯšˆ…idjdeedc__eligeedbbdgmqpoomjkhhlptrkb[aini`aZVYXPSageillkjkkddfgihfensvsooqsuurqsslcUMFJMFADIMPPNMMNJRRUXML`tuwwxwvuqwvtyzwxywwz{ysmptvwzudSNOafJE]d\\]^_^_c_[]ejd]ZbbWT[[XRW[^][gunm ÊƼ¿½¾¿Â¾¿Åº¸Êµn[dUOk“›¦–r„ —›¤rU]dbbb]]]\[YWUTUWE%$% "7>201( !2[g_cs‰˜—Ž£«§¨§—xac^a‡¤Žxsh^c_]kŽŽo^Z]]^^ZVUY^]\YYZTRZ[]`a_[ZZ\YWVVXXSOOMLKORJC=;;68AIONKHEFJMQQRUWVTTQT\`Y_dK(','(11021/2655=ESmuf^a_\[]TP[bdgggaWU[_egjjgggcdfgfdbbbgVWuƒŽ¬±ž´¸¨±¿©§´½®¡¬»¶«­»¼¬«½¿­¤·Ã­¤ºÁ·¬µÅºª¯À¿¯£±Á¸¢²ÈÁ§·Ä–bahcehcdd`[_bagiaQNNRXchcgdbfe[ywbmlhjmigmplikopnkknnkkmnrppotb`£ãäÝáãÝÛåäáæèâäçììêëìåÞÞ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€@ÛجŒ•Áï¾zet€†„†‡™ÐãÅÁÔÒÜâϯ¡ Ÿ˜ªÚñëèææåèíÃtU``p•—†„†‡…ˆ’Ÿ›—’zn|Ž‰†…ylx†‚|ƒ…„žàÙ¨±´š»ÓÒÄ®‘|Š’™”—ž ª«£§¬£•œ¡ œ—‘Ž•š›˜—–”’—£½Õͱ ‘†{nceh`bfhfbbeilhdcbacfkjjmpqnklhffktskd[`kojbc^WWUOS^adglnkfddbaabcdhkirvqnqtrtyyuuqZ>6AHGGFFFKPTUTQNKOUOP[QJ\stvxxxwvvzurwyuuzvvxxuqnpswx{{lWGPgqXK_d\YWY]adfd\[chd][bcWU]_\VTZ^^[gsho ÊÈ¿ÂÀ¿ÁÅÀÂÉÀ¿ÒºpY^PNk—œ¤•zŽ¦™žŸu[]_\[\^]YUTUVVVYXN9#&( :D42/$2Zd]ao„Ž”‘¡©¨«¥˜y`a_c†¤Žzth^c`]m‹m\X`Z]`^YVX[]^YX]WQW`__a_[Z[^\\[Z\YSJLMMLOTM@=>=9:@BMMLJGFIKJKPVVSSURV_d^hc6%&($*-(241.2965=GMgte[abZY[SQ\cfdefaVT\aeegfcefdefggdbabhVTr€Ž¯¶™ ¹¼©œ­¸© ³¿¨Ÿ²»²¦§¶¶¤¥ººª£³Áº¯¯À¶«±½»°­½º°«´»²¥²ÉÁ§·Å–cahcehcddfcec^cdWROOSYdieeeehcYx’xdnmikniknokknomnonllnnmnrppotb`¡ÚÖÌÏÓÒÖÜÝØßæãæïëìëììäÛÚ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€;–Ú⊈Æí»ygw…‰€‚ˆ¦ÚáÊÒàÔäỘ˜£¢±×ïìéëèæéê¾sR]cu–†ƒ‡„ˆ”¡ –”—•‡mu‡ƒymt~~ƒ€€€¨êá±ÃÆ”œÑźͳ{{”‘‘—‘”“™¢ª­£¤“ ™™š›‘–š•”˜•Ÿ»ÄÇÖÓÀ¸©kklf_cefc__`chmlgcb__bjkgfnrrspnkhghlnk^]gkjfa[UWZUW^_aaehfcbdaabdfghjpnorspoqsslu~a?@ACECA@BEMPRRPMMOOPORRFFYuwrr{zuwwxxuty|yxyusw|wmjz{u{€p[K\plTUgb_YY[\affda_bfa]`fbXY_]ZVMU`^Ycomm—ÄƸº¼½¿ÀÂý¸ÁѲlWdQKq”šŸ’v† œ¤ŸtV\`]^XZZYYYXXXPUUPN<&+$)-?E3.7% 0clY`s~~–™š¬®§§œy_e[Z„¥‘‚yk^]aft……k[b_\[[[ZZZYZ^ZWZXSS_]]\Y\^Z\^_\Y[YRMOPMJOQME@>?<7:AIJLKHFGJOQQSYXTSU^idelN)!#%')+/2110//26:EIavhY_`[Z]XT\bgjgg`TU`fgfefgfecffffeca_bVXs«¯š¤¾·¦¤´º¬ ­¾±¤¯¾·«±¿·§ª¹º§¥µÁ·«²Å»ªª»º¬ªºÀ±¦¶Áµ©±Ä¼¢´Çšd`jecdadced`aacf^ONMPYbeciccgfZv˜‚`ipmjjflomhkpngmoljorqqpnpppt^b™ÑÊÀÅËÊÌÊÍÓ×ØÛßãëëéêêãÞက€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€E‘Ì´“ŠÎïºwcr€‡{ˆ¯âáÅËÛÛÜѲ“‘¡©ž³Øïìéêèæéê¾uU`dr’—„€ƒ}‡™¢œ•”‘‡mqŒ“Štgn|„‡„„…|¥äݲÄŠ°ž²å˃•Š‡Ž˜¡¦§¤¢¥¦›—œœ—™›™•’–ž™’›¹É¿¼Õع§}fcccah_fe^\_gojjfdfd``gkjjpsqrnnjghnomo`^ipne[[XURRZabcaacc`^_baabdgiknoppqqpoqrrriQBFBCDCA@BDKPTRPNJECPNHICJcvwvwwtqsvtuuvy|{tyzuruurmquvzv_]mm^S_j_[^^[]acdbeccdc``c_UV]]\YPT_b]eqsmšÆÀ«®´¶Ã¿¹¼À¼ÀϤbQcRLq”™¡”v…¢ž£ŸtV\`\^XVVVUUUTTUSSORS@0-(*;?668) "-_o]\n€}––ž®±©§›y`d\Y¤”ƒxm_\bju‚iZa^\[[[ZZZYW]\ZZVRVZZ]^[[^_]\[XVYYRRPQPNPQNFA?@=954?A4?ewquzrpuxxxvrr{}wwz{yxyvqtvrr|~tjqlTK^pm^\a^Y\cedb`_cdb_`gfWRXYZYRR[a\aknnÎÔÊÔÚÜØÝÓËȸ¸Ñ¬iUcOIr˜—¥šx‡ª¤£ sT]`Z[WWWVVUVUUUOPTSD=<>BFHIHIJJHILLNUY[`d]^th4!#$&'*-0110.-.035@WkcZ`a\WXTT_bcdejdVT^ceghhfffgbcdefedceYYtŽ®³¡¤¸³¨ª·º©£±¾°¥­¶²¦¬¾»®¯»¼¨©»Ã³§²Á¼«§¶¸¬¨½Á±§¸Ä·©­Â½¦³¿”c`jeccadcaa`cbcbXPNLOXbfdcbeidWvˆcinkjmkkmmlkmmlnmlljmonnqsmmwa_œÖÒÊÍÐÊÉÍÐÎËÎÕÓÊÏÜçêçÝÖØ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€J€®³œ}‘Öô·{qy‚ˆ}|–ÈèæÕËËÈ¿¯š”Ÿž¤Ääîìèéëèëé¾~jqluŒ}~„‘““¢­¥›”’„on…}opr|€}}…†}¥äߵürw¢Œ“˜‚xŽ“‘”Ž”•‘”˜›£©¥žž š–š™—““–—˜˜¦Èм®±Ëæج}jgfff`^Y`a\\`dfilkhhgcaa_dkia`elilnlgiomd]bjjbZZWNMX^^a`_]ZYZ^`aehikmlikjlopnnplrm[KB=:>=<=ADHJKLMPRRMH94402Uzyqsvqryzzttuusuuvwutw{|yqvtrzztzo[T[hmc^]^\\aedcbcedbbbdfWQY\^^UV^a^dnnh“ÇÑÈÌÖÞâåÛÙâ×ËÖ°mZfQIr˜—¦œz‹¬§¥¡rT]aXZWXXXWWVVVTOX[D!>^H5=B32;'"%!5al`\brrb~ Ÿž¥›¥œ‚gd\Zxœœ‹zuhal}zqj_X^\\[[Z[ZZYY[\YXWXYYZZY[]^]`\[ZZ]\XSQPOOTTNFA<:;<@DBGJJIJIFIMNPVYZ_og]aO$! "$$&(+.110/--/15;OfdZ`c]XYUV`deggicVT\_dfhhfdfgcdefedcbfYYsŽ®³¢¦¹°££¯®©£±½¯¤­¶²§­¼¸ª°À¹¬­º¿³§°¼¹­­¾¾²¯º¿°¦¹Æº¬²Ã¼§¸Ã”b`jebdadcabaccbbXPNLNWbfdbaehdWv…agmklpnkkkjklljlmmmomlnpnppou_a˜ÓÐÉÏÓÎÎÆÊÎÎËÉÌÐÍÝêíéÞÖÖ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€M}ªªŒ“Îí®vpw†yz—Çáçæ×ÉÁ¦’‘•™™¦Êèîëéèíéë辂qxozŒ‰˜Ž†™¬¥œœ”‰wu‰|jlr~‚€~„‡}¤äàµÁ¹u•¤˜‹u~†“’Ž“‘”˜˜¥ª£šœ™—™—š›’”˜–³ÕÌ´«®¼Ýبuhgdbe`][_^[]abbhmlgggdc_\dqqieedfhhhklmgebfmkb]XRNS\]\_]_][Z^becefdeijhjhjopmmpnscLD?;=><:<@EIJKJKNQQPPLE:5B]puwutspswwtrtwusssxwtsuyyvtuwww|}tfc__]dngZZ^a]_dfcfhe``a]dhYQZ\\]WX^a\bjim·ÄÀÄÈÌÛÝÓÒßÝÑѨhYiSJr—™¦œ~Ž®§¨¢rS^`XXVVVVVUUTTRQcZ-:[\NF?007(;dh^_akra{¡£ ¤ž¤Ÿ‰mc[[x™™Œ~ulgozkd]Y][[[[[ZZYYZ\ZWVXXXTYYXY\^ab\Y\`b^VKNQQQTQIEB???@@AAHMJGHHFFJKMU[ahsom]6 "#$&)+010/..0157F^aZ_d^YYTS_fihee_SS]abgjhb_aegfdcbcddfYXr~®³”·²££²¸ª¢¯¼®£­¹±¤¨¹¸®´ÃÁ«¦´½±§´ºµ©¯Äõ²»Âµ¨¶Àµª´Â»©ºÄ”a`jeccadcccacbbcYQOLNWbfegcceaVu‚akpjhmnljjllkklqnhhmompokmnnr\`ŸÓÐÎÑÊÁÆÇÌÏËÅÅËÒÓáëîëàÓÍ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€H}œ¨ŒÎë©rnt}‡{zœÌÝæîؽ¨‘ˆ‘™™—”¨Ïëîëèèíéìè¾…v|qv‰Žƒƒ“œ—†“§¤™‘Œ€~Š‹{npwƒƒ€~‚…§åâ¼Çº“‚§”—Œ‡Ž“–—Ž’•˜š›¡ªª£œ˜˜•“—•’“˜˜ÂÛ讬µÒÌœndf`]a]YXYZ[]Z\bfllijic^_\cnpld\_cecdillmh`cnm_USMPZ_^]\aa_\\`cdfihdeikihijmnnnornW@==:;A=:;?DGHFILLMPQQHKNPMIO]jsupqsuusuywtvxvrwzwuuvwytuxxyxqXTZiidgc[\_a^]`daceb`cc]cj[QY[YZURZ`]aikq‡¤¥Ÿ¨±¶ÄÌÌÊÒÖÑΤfYhRIr™›¥œ’­§«£rS]aVWVUUUTTSSSQNXI'!+>LRQA05=) >eg___gm_v›¤¢¥£¢¡rcY\y–•Žunor€xg_][][\[[[ZZZYZYYZZZWRUYYXZYZ^aZUWZ]]XPQSRQRRMDCAABA?=BKNHBEHIJLLMU^hrpqvT!!!"$')/010//0255?T\Y^`\XYSR\ejccf`TS]bbccdeddcedcbcdeffXWp}Œ®³˜›´¶«§³º¬ ¬º®¢®½·§ªº¹­¯»º¡ ·Ã³§¶Ã¸©®¿º«¬À±¥·Â³¤²Á¼ª¹À‘a`jecdadcfeababd[QOLMVafed__cbYyž‚blqjgjiijlllmoojjkmmklommomls]^šÍÊÈÌÇÁÇÐÎÊÇËÏÏÊÕãìíêßÐÈ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€H‰Š—Úïªqmt~Œ‚~£ÓÝãíѯ“‹Œ”™œ–¨Ñìîëèèîéì辆yrp…}’˜ˆ‚‘¢¥ž—‘Š‚‡“yjs{…}€‚†…ƒ¬æäÄϽ‘‰‡‘ª™Œ–œ„†‡ŒŽ‘––•™—–›§«˜›•”–‹••’Œ¥ÒØÏ¿³¬µÌ¿gae`[_ZTVXVTUWZ_eihfjlfa_\ajooh\^bdcachmig``ii`ZRKOY[^a___]\_dgfdhhddhhefjlklppmtgM:;>95D@;;>CEEAJMJJOPNSMORNKMM[oupqttvsstsopuwvwwvvxxwvqnt}zstiWWptbZY[]\\__`fgaadefd]]gYPY\Z[\U[da`hlb‹¶²œ˜›ž©°°¬µÅÌɪjYfOHsœœ¤œƒ”®§­£qR^`VWVUUUUTTSSNV]K2#!.A;/9A%#>dg`^\ef[o•¢¢¤¥¡£”ubX]{•“Žƒupstwd]^\]Z\[[[ZZZYZYZ[XUUUVWWY\ZX[]YYZZ[\ZPOQRONNLID>:;>@@EMOE>BILILLLQW^fksn? !!#&(/010001334;MXY^\WVZVR[cghhi`QP]ecegfdbcd_acefffffXVp{‹­³™œ¸½¬Ÿ­»­Ÿªº­¡®¿·©­½º¬­¸¸©«¹¼¬£²À·ª°¾¶ª¯·¾°¦¶Â·¬®À½«¶¼Ža`jecdadcgfaa`bd\QOLMVaffe``eeZv™€^gomklhgopjgkljlghllmnlmqsmmxb_–ÎÈ¿ÅÍÌÏËÉÌÑÑÌÎÕÔäïïêßÒÌ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€J†—„—Öè¥nr‚Šƒ§ÉÚæàÀ ›  Ÿ™°ÓéèçêéçâììÁˆwyltˆžš†€}‹¢§˜‘˜Ž‚~‰’Œ}qt|„†ƒ€‚†‚„¨áÝÈÓÈ’„Ž‰›¦„ˆœ‘€|‡Œ”’“••”™ ¨ª¡““žœŽŠ’•‹‘¶ÓÒ¾²©³½¥|feb_[XWUKPVWUSSTcehknnjg^\_hprlcZ^ccadhgbgecgg_YQFN][Y^ab_`a`cffffeddfikgpslgimmrY@899:=?>:99=AABKLFCDFGKLLLQXYUjwZ( #&)+.--/231/26=HRX[]WW]WRY`ehde`TRZ_^gkfacff_addcdfi`RXwƒ“±°—Ÿ·º«ž®»ª¢°·¢ž±¼²¡­¿¸©¬½»°«µ¼±©­½¸°²½º¯­»¿¯¦·À²¨³Æ¼¦¸¿d`f`bfbeecdbbaad\PONPXac`fgbdfZx›€cnsnhjmjkiilmklifinlloqnssnqx`bÈÆÉÌÊËÔÉÍÔÔÌÊÎÎ×åëëëßÒЀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€N…”Œ‡™Øîªx~‰”ƒƒ®ÑÜßØñ»»º²¥šš ­ÍäêëêææàêêÁ†t{xx……„•¡zoq„¡¨›“—…‚Šƒums{ƒ†ƒƒ‡{©çßÉÜÑy™²˜‚–“„‚ˆŒ‘’‘“—”‘’—££›’•Ÿ“Šˆ~Š³Ï̾µ­®·±jjhd_ZWTSPMKNTWURY^ejkjhha^`gmpmf__acbcefghb^ehaZKEP`^Z]^_^`cdefdcdedcejoqsqkfgkneQ=7889<:;:<@DCDLEADGKR[WOLQPJKTN^pvrqrotssssuwywruxsruuxrqvyywqlddg`ceRW\]]`_\\`bca`bcca^WR[_Y\aXX^]dnhfŒÂ;ÁĽ¼À½¸±¦°Ë­o^gPKuš ¢žŒ•¨¦²¦rTZ]VUWUUUUUUUU[LX_>(=<27>)Dfa[cbac[f” ¥¤¦£šƒm^`y–‘†€zz~uuwg]]Z[\Z]][[[YWYVUWVWURPWWTUUUX[YYYY\[VTQQRQRPKEC?;;?@@CLNGCCGMHJLNRXYYgbD! "%)+.--02320407GSV[b\Z\VQ[chb]aaTPZehfgjiecedcccddddcTYw‚®­š¦ºµ§ ®²§¢²º¥Ÿ²¿³¢«»·®¯»´©¨·Á·®±À·©¬»»¯«¼¿¯¥¶¿²¨µÈ¾©ºÁ‘edibce`aaccbcaad\RQOPX`a^dfaceYwš`ipnkkkkjjjkmmnlmnljmnjinmiksZ]šÑÍÅÆÎÍËÈÊÐÒÍÍÓ×ÜèíìèÚÊÇ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€O‡žŸ¥—£Ûë¢s{€„Œ{ƒ®ÒÚÚ×ÓÒ×ÍŽ©•—¦±Íæïìâáëäçå‹sts{}|…˜™†wlrˆ¢«Ÿ––ˆŠ”‚wsw~„†…ƒ…ˆ‹~œçâÆÝÐ’|•Ž”Ǹ}ƒŸ¤Ž€ƒˆ‹’’•˜•‹Ž’˜œŸ˜—™“‰”¶ÍÊı¥¯¸¨†mnje_ZUSQPNLLNPRSSYbhkkkkga_chmnjb_`bbceeffa_gkaUJFP]\Z^a^]`dfhgdhgd_\_hpklnomgc`SF:7878;9:==?BDABDEEHMSTTMLPPIIONWlxsnontsrrsssrutvyxtu|xrpswzulrtm^ZhjNR]_\``\[_aa^]]^^^\WS\^W[]V]ia]jkfÅϸ´»¼¾¿½Áü¿Ò¯p^hQLvšŸ£ Œ–ª¨²¦qSZ]WVYTTTTTTTTTMW[? $&#-?=16<) 4U]Z^`ec\`o„˜¡Ÿ¥£ ‰m^dy–Ž‚}z|€xsrd\\[[\[\\ZZ[ZYYYWWZYVUQUWVVVXYXXZZY[\YQOQSRPLFCB?<=@A?AJMHA?FPOLJJNS\dfM/!$')../03332657@LV[]ZWXRPY`deaecVP[efdgjgaafheccddb_fVYv€«©˜ µ·¬¢®³© ®¹«¦³¼µ¤¬¸´­¯¹º­¨µ½³ª¯Ä¹ª­¾¿²­¾À¯¦µ¿µª´Ç¿©ºÁ‘dagabfbddbcbcaac[NMMPXadacc_bcWw™|^gnmklklkjhhhjjlkkkkmmjknnjns[_ÕÐÁÀÇÆ¿ÍÆÇÌÌÌÑÒÞéîîëÜÎÌ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€L‰©°³¤®áç›mzyx†~‚¢»ÂÄÆÉÐ˹­ª Œ–™¬ÇáîèäíæêëÌ•rnqyyx‰ˆvz‡˜›“‹Š†‰…zspy}‚€€†pŽãݺÕÎ…™‹”ÜÒƒr•¥™ŒŠˆ‡ŽŽ‹„ƒ†™¦¨ šœ¡£¦¤Ÿ£²½¿ºª£²½¦mnke_YVTSQSSOJINSTV[bhllkfa`dimlga````dgfgf_[bibVBFT`_\]^`^`cehheec`[Y]goolhfb^VPD>9:9779<<@B>AB>;?B?>@>@?@BGIJIFIJFGJJGAWszssywuutsrsuv{wvrrx|vrnpttrqppgc]Yit_R]`^`^[]Z\]]_a_]_dbY\^VWWZX]^dsihŠ½Ë¾À¿µ¾À¾¿¿¸¿Ö¯o[eOKs”š©¨‹–³±°¦rT[^XWYUUTTSSSRVUNNUI1%+$1HC369(7ek[^c_`ZW^m}  ¢¤lbm‚…z{}€…~ojbYUTW][ZZYYYZZXUVXYXVQPPRUTSRPTVZZX[^]WTTUTSPKA?=:<@CBDJMKIGKSWSRQQPW_W@3." "&',.013344722:FQ[bZVYUT\ab`ci`PO[bcffaadgebcdcbabceUWt~­¬›¨º°¡ž±·¤¡±¼«Ÿ¬»³¡«¹²¦¬¾¶««º¾°ª¶½µ«¯¾½°«»»®§µÂº®°Ã¼§¹¾`dibce`ba`bbcaabZONMOW`b_cb_dbXy™{bmokillimmkmjhliklkkllmrrrqtw_eŸËÈÉËÈÈÐÎÊÏÕÒÎÓØÞåççæÙÐÔ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Fˆ«««¥­ÏË¢Ž Ÿ‘˜šœ––›—”‘’–—•“””œ³Â»¶À¿¼µ§–ŒŽ””–”–š™”Ž’””˜—’Œ‡ŠŠŠ‰ˆ‡‡†ˆ‰›­“†ž›‡„Œ¡³Ÿ„|„‰’›š–š˜–•˜–•š¡§¬³¹¼¿ÅÌÎÆ¿ÃÌÊÇÊÈ÷¶µšxnokd^YXXYTSRSUUSPTUVWZ_fkfdcccgklf``eb_ae]``]_f^MBMZ^[\`caadfddca^]\[ZYZ[]]XSTWPF9:;<;988<:;?>=>>?>>BFIHEBGGCCGFBA_wxtvwtutrrrsssvuuvvxxxrmnttqpplc]a`chbX[[]_`^^`bb_^^_^\baZ\^Y^bf[aa_mmb‡¾É»ÀÈÅÀ¾º½Âº»Ë°nZdOKr’§¦‘š³¯´¨tV\^WVXVVUUTSRRPNOQTVNAABJO=2::' 6oz`[de_YV^hq‚˜¢¨©‘tho„ryƒˆˆ|nga[XXWZ[ZZZZYXYVVXYVUTQPQSSQOOSUW[[XZ][URQRQQOKCB>;>======@FHGFECA@?>?Snystwtsuvvvutssruuvxtptrnptsqpnjg]^d_`e`\\\]^_\\^_^]^__Z[[Y\YVb†~\Ya^knh‡¹È¹»Á¾»¿ÀÁÀ¶ºÎ²pZeQLs“œ¨¨‘œ´±´«vW]^VUVWWVUUSSSQQWWQSXVTV[S:0<;& *e}eZaf_ZV\fit‡›¨ª”}opƒŒ{nz‡‹‰{ob]YZ]XY[ZZ\[XVWV[][YTRUWVQLKGGOTVYZY\^\WSSTTTRNHGC@@BB@EIGABGNTXYZYYWSN98<<4(! #%'),/1113539:8>O\^ZX[TPYageac_SQZ_fcbdfca_`abbbbcceUWt°°›¥º¸ª ¯·«¡©·¯¤¬ºª™£¶´«®¼Á¯¨¶¾±¬¸¾µ¨«»¼±­ºº°«·Á¸§°Ä¾ª»ÀŽ`_e`bfcff_aacbaaXMLKNW`c`b^^c`Wz˜z`ilkmniigjkjkkhkkjkmjhmommnrrZc£×ÒÇÆÉÇÅÑËÊÏÔØÛØÛâæêéÙÎÓ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€K…œ‘–š–›¡­¦£©ª¬ª¨©©¬¬¤£°ª¤ ¢£¢  ¨¥¤¬±§¢«²±© £¨ª©¯ª£¤§ œ£¡§©¤¢¦£œ˜ž¡žž¢¡œš™™›ž¡££ ™¤¡”“™¬ ¡›—™—–¡™Ÿ¢¤§¥§±®±¹ÃÇÇËÑ×ÜÞÜÛÝÝÚÚàØÏÖÛØ×ÓÖÒÓÆ–mlvqg^WTTUWWVUTSSRSRQUZ`celd`cinojacc`agf]\[[Y_h]CDT_^[\`dgfgd]\_aY]`^YUUWXURQOJA;88889;;;AB<9>?<;;88=@@CJQF><96:Ci}{ntwsvqtutqqtxwuvvtvyxqqstpoomlf]]\[`ba\\\Z^b_\^^^`a`]_[[]_XVh‘—nV[`mhdˆÄÕÀºÄËÆÄÀÂŽ¾Ïµr\fRNt”˜¬¬Ž™¹µ±¬wX]^VTUXXWVUTSSUSSTSPQUNNVV>1:8&*^xf`b[_[V[dekz‘¢§”ƒtpƒ‡yp|ˆŠ‰}l`\WVZY][ZZ]\XUVX^ZX]XPRRPF@DCEOQRVYZ_b_VSSUTTSNKKHDCCA>BIJDDIORX[ZXXYTL8:>B>0! #&')+/001357:;<@JV^UU[UPW^cceg_QS]_cdfhihebca`acdb`gVXt°°™§»µ©§³³ª¡«¸°¤®¿­¡®¼¶­°½½©¢µÀ´«²º¶¯²½»±°¾½µ¯ºÃ¹§­Á»¨¹¾‹]chbceabb_aadbaaXPOMOV^_\`\\b^Uy–u^jlhjmkmfjmgjnghkjhlnmlollnqqYbžÐÌÈÉËËÍËÍÐÎÉÍ×ÛÜäéïíÛÍÑ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Fž–“”“–³ÐÛÖØÛÙÚÙÙÙÙÙÙÙÙÛØÕÔÖØ×ÕØÏÒÕËÉÓØÙÙÙÛÝÛØÕÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÏÏÏÏÏÏÏÏÌÌËÉÈÈËÍÍÊÌΟºÇÄÀÀÄÄÀÃÊÇÇÈÉÊÊËËÖØÚÝàãåçæåäãâáààÝÚØÙÛÜÙ×åßÕÜƈkusia^[WUWVXXVWZYVVUSSV]beihfcejmmifb^]ab_Y[\YZc[DCX__`bfdeb_^_`_^ca\WY\YRPWWPH?=CB=9;=88AG=:?A<:><<969AHKQJEC=9CRm}zrz{tvvutssstusqsussvxtpqpmrvrndYX^_`d_]]^]^abaa^^`__d_X]]`^Ta”¥pVbbkhgŠÄÒ¾¿ÇÃÁ¿À¿µ¸Ë¯r_fPMt’šª¨‘ ·°·¬tW^]WYXXWWVUTTSTRPNORWZTPRR?6;6$!&[wi_^YZ\[\chikƒ–§™†~ut~wr~‡ˆ†xi_XXWX\_ZYZXVXYT\[YVVWUQURDKVZ[XXQO[bceac`TRY]cfifa^adeeddccbbeVVsƒ’®¯˜ž¶µ¦¤´¯ ›°¼§®»¬ž©¼µ¤©¸º¬¥¶Ãµ«µ½¹³´¾¸ª±½Æ·©·Á°œª¾¸¦ºÁ]cfacb^a^c]]abb_XMKJMV_`\abcc_[|Žr^klikmhknjfikhghkkknnkjknomor\d¢ÒÍÉÍÑÐÑÍÌÐÔÒÎÓÜÜéìéåÙÎÏ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€C~¥¦£§¥¡¬ÎçéèìîïêêêêêêêêëèæéîîçààãåæáÚÛãéçäãåçéêééééééééééééééééêêêêêêêêíëèççèééååéëßÓ×åáßàäçæååãääååææçáâãäåæçèææåäãâáááßÜÝßßÝÛÛßàåÅ‚gswlb^[VUXVXXWVVVVRTUTSX`ghgedfjjigcba`ab`\ZYY]bXDAV_bdehfjd^\^`_]\]ZWX[WPSVSKC<:?C?::<9:AC=:==99=?<:<<>ENJMOOLKMQ`swqsurrttttsssrytrssuxzsnnmkptooja]]\[^_`a_]`a]__\\``]]\X_^a_U^Ž—kW`aloeˆÂѽ¼ÂÀÀÀ¼¾À·¹Ë¯r_fPMt’œ¬«“¢¹³¹¬tW^\WXXXWWVUTTSPQRTUTSSOPVT<099"!" %Wsg`a`caZYaedfv‹£Ÿƒwt~zwŠ‰yi]VY[[[ZZ\ZY[YWZXYYXXXTOURE?IPNNNPTWYZ\^\TQWWRPSRPLGBBBBEHJIGINSWXZ]\WOHA8132(  !$'-,,,-/246888>JUZZXXQO[bcb`b_TQZ^ahic_bcbedddccbbbUWu„‘«ª•Ÿº·¢²¶¡ ³º£¯º¬¨½µ£©º¸¯¨³¿¶­²»²§«¿¿¬ªº½¯¦¶¿³¨±Ã»§ºÀŒ]`c_ccaecb_acaa`\MLKNV_`]_`ac_\}‘v^ikiklinkggigfjlhjnlkkjkmomor\d¥ÓÌÅÈÍÍÏÐÔÔÑÒ××ÒØèîéâÕÎÔ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Gz¢£ §«§¢¹ÙëêçéêææææææææéåãåéäÖÊÃÅÀÅÒ˽½Ùáéìéææèççççççççççççççççççççççççéççèçá×ÎÎÍÐÓÍÄÇÑæèéæäååäããääååååéééèèççççççæåäããåäââäãâàßáãå¼zgvuj_YVRSV[[ZWTSTVSUVTQS\ddghfcdhkhcaa``cd_XUX^aTB@S^dgdeccb`\ZY[]``\XX[ZVTSKB>::>CB=9;;BS^gidb`[]_^[YZ\_^[VUVVURNC<;;CB?@@<;:<=<;:986:;;>EKMOPLEFOPLO_szrsvssttuttssrrvwuttsxojihptnficZX]be\`b^]bb\`b`^^````^^XX_]`\^Z[]`gjh‘ÇÔþ¾¿½À¿ÀÀ·¼Ð®q_fPNu”šª¨‘ ·°·¬tV]\VXWXWVVUUTSSSTTTSQPPOUT>396#!Poeaega\UW_ccfam‡—™Žztyts‚Šˆqe[WZ[YXVWVZ[VVYZYZYWWXXWQUOHIOPQMMOTWXXY_]ZWTSRRQQSL@AGDDHKJHINTWY[\\YTQK>40-07=?3&#"#&+),.-+-274578==CLPNKILNMJMYq}wsvsttssssttwvwwtvxxtmkjjprkfjf]\`bb__^\[_b`_^^__^_`]^`\[`_\a__\Y`jij—ÊÑÄ¿½¿ÃÃÀÂŽÀÒ­p^fQNu”šª¨‘ ·°·«sV][VWWXWWVUTTSRSUUUSPOSPTTA7;5" Qqh`bb\YWZaabhcez›”uzooƒ‹ynf\WYYWXXXY]\XX\[VXZYXYWUQWVNKNQSNNPTVVX[da\VTTVWWSTLA@FBBEIJKMQTXY\\[YWVQE;40<<>??;98:=@CEKHGKMLJIIXp|ursqssrqqrstwtsrruxvnjjkjopiehbZZ^__^]_a`_^]`\]aba`^]^_][_`\Z[^^Y`g`jžÌÌÁÀ»½¾ÁÀÁÀ·»Ð¬p^fQNv•œ¬«“¢¹³¹«sV\[UWWXWWVUTTSUTSRRRRRRQWWB6;7#Psja_^^[Y]a__fecrˆ˜˜„y€pn…‹}tmi]VWXXXXW]\YYZY[VXZXWWVUTVUQNOPRQLLSTRU]_\[_`[VUWSSOEDFEGIJJJKNQXZ\\[ZZ[TKHEANeoXG1#!%%$+,-,+,035579:>HQUVZSOX_adbc]OMV]dedcbedaaaabbcbc`SUt…”°°•ž´´¨ ­³¢Ÿ­¸«¡«¶³ž¢·µ©«¶¹®§²»¯©´º¶­®¾¾®­»¼¯ª¸»¯ªµÁ¶¦¸º‡^cfbddaeb_`b`^a`XNNMNT]abc_^b]Wz’vamogdhjkhikhgjnkjkkhillilmlmpZb§ØÓÐÓØÖ×ÛØ×ÚÛÙÕÔßæéêèÚÑ×€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€RµéÞâíêëãêéäåççèææææææææääåääæéìèèèåæììåççèêíìèäèèèèèèèèèèèèèèèèååååååååççèéèæâßãçêéææéìêçæéêçäãççææåäääççèèééêêèèèèèèèèçéêéæåæèäèêÏq~yYSPTVTTVOLLNPUam`YUVUSV^aceebbgmmc]^_acd^ZQR\ZH9BQTTVWXSUX[YURSUVTRTUTOKA?97<>=?BEB>?><<@@AB@??@?;65?=;=@ACC@=?DB>::;<>AHIJKID?(%**('')-145679:99BLSV[TOX^aa]^[RR\bdggcacc```aabbcc^RVu„Ž¤¡”¤·®ž›¯º©¡¨²¨Ÿ§´¯¤«·¯¤«¶³ª¤®·­¨´¿»µµ½µ§®½¿¯§¸Á´©·Ã»­¿¼†]`c_bb`dabaa_`fcVLMMMR[`bb]\a]Wz”t^jnnok`chmkfhkhihehmnklhkmklpYa®ÜÒÊÌÒÓÖÌÔØÔÔÚààáèëíéÖÉ΀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€O¼ùèÝäèìéééééééééèæååçéëçççèèèéééèèçèççææèéèæåæççèééèææççæææèèçæèèèèèèèèèèèèèèèèééèèççèèèèèèèèèèèèèçççççççèèéèèèééèèèèçççéêéçæèêëèðÊ‘ƒz_V[PLWTILMVXY_\]iifaZUTWYb_^`_\aik`V[^^ce`SOV^UHLXYTLKPQKPQRSSSSTQRRPKE?<9426;>???A?;=?@@DB>?DEA?<=CC;9@DEFIKG?;>8469::;=?@?@;:67<=:834:@>?EHFHHD>=@Dizxtttuosnmrroosvwutqqurfahnoonrgda\]]W[_`adhhfe\^aca^^^^^]^a`]\]]]^Z`eXq°Ñþ¿¿ÂÄÁÁÁººÈ¬o]fQNv–œ¨¥‘ µ®¶¬rQWYVVRRTUWWWVUTSRRRSTUSQQTE247' Kpj`eda^ZY[]ae^bfeizˆˆ‚yv~|rmg`\YXYYWUWWWXYYYYZXYZXYYVUQRUQNMLDGCEOUTS[]][[]]\VSV\\UNLOONLGFKSQXZZ[]]``^[YPSbe\WB)!%%$'()+-023568856?INWXPNY`_dba\RQ[dddddcbbc_baacb`bhTSq«¦’œ³³§¤³µ¦™«¼ªœ¬½© «¶°¦©¸¹¤ž²¾°¨²¾´«²»±§­¹»¯«»À²©¹Å¶¥»ºXad_ba^b_]ab]]ecVLLKOW`b_`_\`_Z{–x^hlhfiihhiiijjjfjljhilnlnlkppXg°ÝÍÈÊÊÍËÎÕÓÎÐÖØÚÙçëêåÛÓË€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&|ÙôæåëäèèèèèèèèèèèççççæèèèèççççéèçççççææçèççæçèççèèèçççæçççççèèéééééééééééééééééèèçççèèèèèèèèèèèèèèèçççççèèèèèèèèèèèçèççèéèçæèéâêêÅ’n]a]YY]_agjfhbWTQVcefc]YYWTZcaYUMN\gi]TUV]b_STZZMFRSMIKLJKNNQTSNJKMJGBAAA?=7447;<;:>><;;98:<92046454138;@FHINME@CLPctxvvsrlpooqsrrswvrsuvxrgaiqrolnnh`XZ\[`he_`jmigbcff`Y[`^\_bb_^]\[\\Z`f\t²ÏÀ»À¾¿ÁÂÀÂú»Ê®n\hSLt——¤¤±¬¶®qPWYSSQQQRRSRQPYXVTTTUVPNQUE247' Hnh_fg\]]YWZaecddbdr„‰z{yqnh`]ZWXY[[XXXXYYZ[[YYZYXXVVRQRQOLGAECBMWWQW]^YUW]`WST\_ZSQONNKGFMUX\]\]\]ab`^\TUbe_YC)!"##'')*-024568965;DLUWOOY`^`^_[SRY_dcdec`adbda_ba_bhTRn{Œ«©•ž´±¦¤²±¥Ÿ¯¸¦¡³½¬¡©µ±¨¨µ·¥¢¶¿°¨²½µ®´»±¨¯½½°«º¾³­´Á´¤»ºƒ[ad`bb^b_`_`_^a_VNLKMV_b`b]Z`aZ{”v]hlhfhhhjifjnmhkjjkjhjmknljppXg¶ÝËÇÇÄÇÉËÕØÓÖÚØÔÜæéìèÙÏË€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ Y¹òéâëèçèçèçèçèèèçççççæèèèèçççæèççççççæåççèæççéçççèççççæçèèççèéêêéêééééèèèèèèèèéèèççççèèèèéèéèèèèèèèèçççççèèèèèèèçèçèçççèèçææçéäé纂ja]fqmbbddh_`\YVPPYflme]ZZX\`]WQKKS_kaTRTZ^VLQ[YLFRPNKJJKLLQSSPLHFF@ACCBA?=:7578:87>>>?>823:955873283147—çðâçêççççççççèææåçèèçèèèçççææèçæççççæææççççèèçççæççççæçèèèçééêêêéêéééèèèèèèèèèèççççççèèèèèèèèèèèèççççæççèèèèçççççèçèçççççææçèæêä°vegkwxnbbededhf`]XQLU_hie_XSXX]]SKKJUgeXSTYZNFNXVJDMNLJKLNOOSSQMGCAA>?@AAA@@87669::9;;;<=85777559:8853358:>BMKLNPOPRNYnyvoqynpssrrtvouxytqtrjgkjjnnohjwuc`g`\aaahllmhdcec]\`^`ec]]a`\[]]]``Yz¼Ò¿¿¿¿ÁÅÃÀÀ»»Æ«q_ePOw”–¦§‘±¬¸®nMVWOOOPPPQSTUVQRSRQOMLMTZVA9@8( @jiaeecbbYLN\eea_`bgtƒŽ‹}trpne^^]YWUWXYZZYWWY\YXXXZYXXVVTRRPMJG@@HQUTQWZ`aVJP_ba^WVX[ZVSPMIDEGSVYZ\[]b\[XYUT\a[R;&!$&'&'(*-0346677536A><<>AA@=;;:99975;:76889<><647:;?775588>>==<<>AA?<@?;76898<=<:;<;<@?;9:;:<;;7588;BNKJIJJJJFQmvstqrtsqnoqstttsuwyzscelljnpsxxx}r\[b__ccahlfggfebaabbccaa_]]_[][[_][ƒÅÑÁÀ¿ÁÁ»À¿¿Â¾Àͬp^eQOw•©¦Ÿ²ª±§lLQSQQMPPONOPRSSSUX\`dgdjl^@:D8!!Fqk]adg^bcRLXbbcbbbaix…Œrsupb[WUTWYYX[YWX[\ZWWYWWZXVWXWSRSQQVH9;KTSRSPVY[cljb_\\^_]]^]\[ULCDHHQWX\^_`^\UVURY`_N2 !$#!%&(*-1457422116>;88:;::=<:;<;:==979::<9978=<<@DHHDBBB?@Rswqwvvonmortrptuuurswtkhlidhoxyzx~xd^^^_bYMXlqihfdbb`^`ddcc]Y]\Y]\]a\Z‹ÆʺÁþ¿À¾Áû½Í®n\hRMt—Ÿ©¤ž²¨­¤lLPRRSMNOPTX]bemoprssrrpqqbA8A8%! !KukY_fdZbgWNXaa`[[``eoŠrsvo`]\[ZZYVTZXWXXWURXZWVYXUV]YSRSPOUM>>NUSRPQVY]grrjj_[aaYVY]\[ZTKFFLOU]`\\bc_XXVSZb^K.!&$"%&(*-1458300016=BMRNOZ_]]_`ZRU]`adeb`aba]`abec`biSPn‘®ª‘¶µ£œ®µ­¥«²§ž©¹³¢ªºµ¥¥·¹«¦³»²¬µ»¶®³½·ª¨´µ­§±¼³£¯Àµ£´±}Ydfbddadabdd_^c`TNLJLT]_\^]^]X\Žp\iieggbcghefkljgknmihjmiljinnVe³áÐÇÉÍÐÍËÍÎËÌ×ÚÐØêéçéÛÏ΀€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&,3qÐðáæíåäèçãåëæçèççæçèæçèèççæåèçæççååçêèçèèææèèèçæææææèèééêéééêêêéèèèçèææèèççèèèçæææåæççèèèççææçççæçèéæççæåäåæéèçæççæååååãåççåæìæ¨eo|\^``][ZZ_efcelmhnmjcYQPRPX\`bXG?JTbeZRRU@BLM@@LRQRQOMKGA==;89=?>9<=>@?=>DB;427978;=<<=?@>><99;<=@98=>:BD9@`qpuwuvusqqporwuvuvuuwrfbkkeiszyytxp]\`acWJShoiojcda^bfdedb`_\X[[\]b]^ŽÇÌÁÂÅÀ¿ÃÇÅÃý½É¨q^bQPv– ª¡ˆš°©µªkENUPMOJPJCHMPUTTVXYYZ[aYSJBGF8%! !Ryn\agddec`]]^^b`^bbbh{‚|oqzq`ZXXZYUTUYXWY\ZWYYYYYYYXXX\VQTROTSFENSRQMRLQ[akuwqrj_\aa]]ab^XRJCEQWX]`__`^ZZVOXdY=($$%$',/1479752/159;FPSMNY_^a_]WQRY\cedcec`bgd_ae`\_bUTl~˜±¡‹¡¹°Ÿž°µ¬¥¯µ¥«¸«œ¦¹´§¬µ¼©¦µ¿³¨±º¶®¯µ°©ª·¸©£µ¼®¤²¿±£½·|Zce`bcafd[_c``eaRNNIIT]`ae^Z[XY}l^kigiifhiiiiijjkiffkkjmjkijnlVj¹ßÏÈËÎÌÍ××ÑËÎØÚÖ×åìéä×ÌÌ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"12]¾ôìâëåçèããçæåæççååææèéèææçèççççççèèééèçèèçæèççççççççèèééêêééééééèèèèèæçèèççèèèæåççåæååæçççççæçççææçèææçæååæçéèææææäãåçèæäæçèàñë©n}‹iY]ab_\YX\_dhklkimjknk_SNPSV^e]K@?I[d]TNG>CNK<=LSSSOIFFB>;>>==???==:;AA>=:=<769:959>><:;>B?<::;=@=978:<=@BC>AB@=5Dgyomvxssqqrqpqtrtttrqtqbdllntuusxzz{wi\^^`aPFYmrkehidbfgeeda_]Y[^\YYa]^ŽËÒÆÂÄÂÄÇž»¿¼¿Ë¨q]bQQv–Ÿ¬¤Ÿ³©µªkFNUPNOLR?! #! !#$&' '=>????>;@>:;AC?>:>@=<<:8558==977;88::89<:50025565:;><9BGPcsqjnsqqqstsqqrtsqrttun^bhipurrux{vsyr\]]\e`JNfojhifdfhgefeb`^[\a_[[b][‹ÈÏÁ¾¿¾ÁÄ»»¿»½Ê¨q^bQPv–š¨¢‹°¥®©kGOVPNPQXJ-"%!"!0BBLSG)" !Nxphoghdbdda^]c`_]\_fhpyzrqum\WYZYWVVUWXWXYYY\YXWVVVVWXWYVPRTNQOMPTMGINQPNR[hurvwpf``bc__bb\UREEKU[^_]a]XZXRY`Q3" ""%)('*2750/01/-/7>FPTMNX_]]ZXSMPZ`fbcc^_b_\___`]]dbORr|‡¨¬” ±¬¥¨³¯¢¡®²£ ­´®¢¦¹º¦§¼²¤¨¸¿±§²¾¹±³¼´¨§¶·¨£µ¼°¦²¿°¢½·|Yce`bb`eb\\`aab]RJLKNX_][[Y\^XX’p`mmjhgdjjjhgghikmmkhhjijkijnkVj±ÒÇÊÐÍËÓÎÇÄÈÊÈÇÉÐãææëÝÊÊ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€/+3fœ·ÅæêèååäåêèèççææååæååææåäåæççååæåãææçèçççèåæçççççèçèèéêêêêèèèèèèèèçççççææçèèææççäãæççèèèçççççæåååæåæççççèèæææçèéééçççæåååæÝìè³uekc_^[WVX^ccadlmihlhmmffmok]UOTadYLB?Qb^UNB:FMC5;IMFE@<<@A@<:8<@CA=@?<=AB??>??=<;72238<933720278547=<6015525<<<;E]mcao{urvtpqstsqqqutqstvvnbckprsrxzvxwsyvc]d_ajYGUikljgfggjiihc`^\Z^^[Z`XVŽÆȽ¿Ä¿½¾À¾¿À¹»Ê¨q]bQQv–™¦Ÿˆ™­¢­§kHQVPNQlh\QLJFC9<;5/--,++8>=OT<% ! Rsjnehkkgcaa`b^``[_feirwsopi[TY[ZWVTRYZYXXWX[YYZZYXWVYX\ZQQURRKKOKE?9EOPMOT^lkottngfjle`ac`YTH?DS[_a``]Y[UQZbJ,! !"&''*-0013////.08@GQTNMX^][YYWSU[^cdffa_``]``_a_^beTSm{®¦’¡´®¤¦³±¤¢¯´¤Ÿ«±©¢§´²¥¦µ¬ž¡³»¯¥±º²«¶¿±¤­º¸¦¡µ¾¯£±¿¯¢¼¶{Yce`ba_daa_`a`a^WKMJLV^]\_Z\`Z[~palhejmjefffefhjggjjgikijkijmlVj¶ÖÇÇÍÎÐØ×ÏËÏÒÑÑÕÚÝßäéÝÍÊ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+00Ebw„¾åñçéèäëèççæææååèåäååãäççæææççæääåççççèèæçèçææççèèééêêééççççèèèèççççæææææçææçæååæççèèèçççççæåäåæääåçèèèèææççççèèçççææåææÜíì¶q[cbZ[[ZYZ]`ggikkiggcba`^^dkjkbTT``UG6>GF>>=:<@A?<:9=BCA=??=>CC?=?>=;;;975359844651155226695,*02-994>C2((++,+(++(%')''-S}tjspglldafgd`]ab^_dbcksqjig\VY[XXXYXWYXVVVVYYZ[[ZYWVWX\]WRQSSPROC94/8ELMOOR]cekrtqqtvsmgda]XM?AOY\^^a\XXSQ\aB%!!"#%&&+0/+.63/-/239?GQTMNX^\[]`\SQW\cedcda_`bc`_cb_`dUSk{”¯¡‘¡·¯££³³¦¡®¶§Ÿª´­¢«·®£¨²¯¤§´·¨¢´½°¥²Ã´¤ª¹º¬¤³¹¬£±¾¯¡¼¶{Xce`aa^c`b_``_`^WKMJLV]][_Y^b[[o\komihidhljfeiojghjikmljkijnkVjÃç×ÐÖÚÜàßÚ×ØÚÛÚÙ×ÒÐÜçÛËÊ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€.4-8Ugg…ÆìééêåæææååååääéæåææäåèåååæçèæäáåççæçèèæçèçååçèéééêééèèçççççççççèçææææåâååæççæççççççæååæææåääåæääåæççæåääåæææçèèæäåææååâçëÃzX]_YZYXXZ^beikjiifbba_\ZZZ[bjpgVORSK>BW_RC:==@?<@?ACC@<;??<>DD>;???=;;<=>625;:6363243/.02/.-,+-03,6azvstdZgzvoqrqssrqrrrsuuvssurifmnjrvstwzwsvyxe]]]htcT]jmjmjefggigb__^Y[[[[^VWŒÅǼ½Â¾¾ÂÀ½Âƽ»È¨q]bQQv– « †—¬¤±¤kJTXPOSVNKPQOG>HFEKVXG2#!(-(*0/'#')%)/..,'&*-/4-8c…{ptrmpokkmia^^ab``aa`fomdce_XYYYXYXWVXXVWXXYYYYYXXXXXXXZZSORORRQMA2,.7CLOKJP[[`jrtuxtywnc^\[O@HRTNMX]\\\]XPPV[dd^]bc^_cea^cd`_aQQn|Ž¨ ‘¡µ®£¤²²¤ž¬µ§ ­¹¨™£¶¬Ÿ¨µ·¨¨¶»®¦µ½·©¯¿µ¤§º»«¥µ»ªž°¾¯¡»µzXde`a`^b_]]__^`[QJLKMW][YZW^cWZ…—takjffhhjgefhifcmgefghiijkijmlVj¿æ×ÐÔÙ×ØÛÛÚÙÛÛÖÐËËÇÔæÚÈÍ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ .1*5WlheÅåêçåãæåääææåäçççæææææâãäãæèæâàäççæçèèæççæäåèëêêêêéèççèèçççæææçèèæåææåãååæèçååååæçççççååååääåçââãåççæäâãåååæçèççèèçåçêæàèÊ}TZ]YYXVVY_dbdfefggedlmgehbVWVcqkYJBB?CRZN>8;A@;;@?;@A@>?A????=>BB><=>??><<<=:77775321110.,+/*)-.++/,"-Zvqosl_fyzpqwrtusrstsvustuvwpmhjghqutwttrnouxzh\]grn]Rdmieefhffhgb``_Z]]\\a[ZÊÏÀ½¿¿Â¿½ÃǼ¸Å¨q^bQPv–© †—«¢­£jKUYPOSYRQQMIE=JHGMZ\G-*$$*,)*-(""$#'.*&)%"&,,,.Af~{uslnkhgfb_\^``_`__b_bki]]dc\YWVXWWVVYXWYZYXYYYZZZZYZYXYZUQRNOMPVN>4./;KOJILSSV^fkoqqvysi_[YSE=GV\_e_XVXTT[X4"!&($%(+-/1335/-00/4=HRUNMW][_\ZVQTZ]ca_^aaa`^ca]acaa_OSr~‹¥  µ­££²±£Ÿ«²¤Ÿ®¸¨¥µ¯¡©º¶£¢²»¯£¬¶´§©·® ¦¸·ª¨½À©–°½® »µzXde`a`]a^cca]\a`VKMJKU\[ZYU]`TW…˜nanjgihbdfiigfghigfgjkihjkijnkVj¶ÙÊÆÊËËÏÐÑÑÒÔÕÐÊËÑÎÔãÛÎÕ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ '-+2M`^b`˜ÞìâäææåååççææçèçââæçææèçååèåÞßãççæçèèæææåäåéìëëëêéèçæèèççææååçèèæåææäææåæèçãâççèèèççæääååäåæçäääåååãáäåææååæççåäæçæçèàâíÅsT^WTVY[[\^```__aehjlpmfeihbZSVagg[J;ABDHB:=9?@<<>=9AA@>?BB?B?>?@>=?>@?=?>=@==@CDB?=<;97652/11/--.//*-,(*0.&$)Vvvqmnffntwuporsrqrrpstrtssupqmnosvu{xmmtrqvx}u][bgpiW]glhehmiikic__^[_\XXa^^Êͽ¹»¹¼ÄÁ½ÀÀ¶¸Ë¨q^bQQv–ž«¤¯¤­¢jLUYQPTWMNRJDEFBEGMWZH2 $%'+($&''$&*+($&&&(,+'(3BRVSVWPRG@>;8?K_b_\__^d_`hgZZde]XVY[[YYWYXVXYVTTVXZZYXVXZ\][WRQUPONLLJB2,8KOLMNNNOSZ`egjmsxuldaVI>DSXZb`]\\SOTP1"!')$"*.+-3633..0-*1=IRUNMW][\\\VMMV\__de``cbX``]^aac_RVs€¨žŽ ¶®¡¡±²¤¢¬¯Ÿž¬´«¨¬²¬ £±¶§§µºª§º° §¼²¢©·¹«¦·¼«ž°½® ºµzWde`a`\`^^aa\[a^QONHHRZ\]]UZ]RV‚’o\hjhfhiefhijihfhifdijgfjkijnlVj¹ÖÄÂÇÇÊÔÔÓÓÕÚÜÚØ×ÜÜÛßÝÙÝ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+303BQWRNoµæçÝáåæççæãàÞäææããææãåäåäéëãââêåãèéêåããåèèçèëéðïèêïëäéçèêçãåìëéçæåäåæçàççääâçâæèçååæçâäæççæäâìëêèæäãâåæçèèçæåãåçéêèæåäáæ³gS[XYXWX[afjjjgcekoomlklic]ZSOLLJKLJ=A@>?@AB??@CC@>>=@BA><>AA><;:;;82.0).Lab^ab^_`cchdWZeh^^]\ZXWVYXXZYVUVVWWVWYYXTTWYYZYRTUUPJNOH905BHHJLILNOPTZ_cehilmjd]PFHS[\Z^_Z^UPWJ"!###$&*-..///-0.,--3=KUTNPTVZ^\[YRPU[`^cgc__`a_`cc_]_hQSr~§¥“ž¯­¥ ¬³¡°µ§ž°º©Ÿ¥²®¤¦«°ª§´º¥›­·¯¥ª·µ©¥³¸¥¤¸¾§Ÿ°¿¬šµ´ySdd_ab`a]a]\]_a^UKKIKT[]\ZX[\RY†‘nbhhkfdkkeeiifeclkeafkkjkihkojUl½ÚÇÇÒÒÏÕÐÖÚÝáåãßÞâãáãçäÜ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*1-0>MRPM[”Õèâéççççæåääèéèçæççæçåèäåéäâæíæâåçèãåãåéêèêîîéæçèæåèæååçåäåéèèççææææãçêãåíæâèæåææææèççææææççæåååååååæçèééèçæçèèèçæäãâêè©`V`]XWXZ^ekojkjghkjhgihc\TLGEDDCACC@C@=@DCBE?@?>==<<@?>>>?>><>AB@?@C=?>=@?=?@@@AEGC<;87886332221/-+).0,&!$&(Ajzronhgpxxtsuplnoorwwuttsrstqnrwvsplosw{|{xwzxe_fmpgY\ikhijggijf_[[\W]rqh^bˆÃͽ¾ÂÄÀ¿ÂÀ¿Â½¼Æ¥m`bOPr”Ÿ­Ÿ‰š¬¨±¢jKTVNMQYOLMJJHBIPRPQRLCGGGE@:4054333445667778887>>IH===;I^caa_Z[]`]a`WYa`\]]ZWVXZWVVXWUTTQPRUVUVYWSSXYYVSRMMPOPQOH<2:INJIHLOOLKMPW\_`befdf\KFRYYYb`X[WTT=##$!!&),...022-/.,-.4?MTRKMRVZ[\[WOOV[dbba_ab^]]^aba`abPSp}¦Ÿ‘¢³¬¢ ®´¤¡²¸­¤°¶ª §³ªž¥²±¥©¸³¡¡°·© ª¶¬ ¢¸¹©¨¸¼¦˜¯¿­›µ²yTdd_bb`a]Z^a``e_RGIJNW\ZX^Z[^SY†“h[fikifejihghihgkihimkhhkihkojTlÂßÌÍÖÕÐÕÐÓÕ×ÚÜÜÚÞàáßàâÞØ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€)0+,:HMNPNyÉìâßëêèçæåææèççèèçèéêéìæäçäæáëçæéèêçäãäçéèéëïëèèèéèçììëéêëêæåçèèèéèçÝçéßäéàâëæåæäßâèäâàÞÞàâäßßàáãäåæãäåææåäãéçåãâááááêÞ›aY[WXWXY]bgjddccecYN[]XLDDED?CGECEC>BCB@A@@A>@A??@@>B@>=???>@>?BC@?@<;76:;;=DB@CGID>8556531010/--,--4<3$%&:auqmoihowxursnntuppssrrtusrrnmswsonmvtsvz|{xy‚k]dork\V_hhgihghgd][\]Vf„{dV[‰Ãξ½¿Á¾ÀÁ¿À»ºÆ¦l]`ORq’ž¬Ÿˆš¬¨±¢iJTYPLNXQNMKJID?9447740***)&!#"! ####$$$$'(*,,,+*,330333CU`a`^X[_a[^_\]_[Y[\[XVVVWVVWWUUUKOQPQTTSWWVUUSUYTNNRQNOOMKEBEB??;:;AIOOMMRVUVZ]_bcTIQWWZ]]VWXYR3!##&))+,,-023-.-,-.6CSXUMNU[^Z\\UNPX[d`bc_[_d^_`__`_]]PUo|Ž¦›“§·«Ÿ ­¯¥¡¬±© ©¯­£«·­Ÿ§¸¶ª«¸¸¤ ®¹¬¤°º®£§²³ª­¸¼§–­¿¯œ³¯wVdd`ab`a]_`ba``]TMMJJS[]^_Y\`UX‚h\ikijhadhhfigefgllgfhjkkigkojTk³ÏÀÁÈÅÀÂÄÃÄÆÈÈÉËÓÒÑÏÏÐÏÌ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+1,,9FKTVKe²èêáâáàßÝÜÛÛÝÛÚÜÜÙØÛÔÖ×ÑÒÐÎÚæçÝÜÜ×ÙÚÚÛØÕÚæîîìçääâáßÙÕ×ÔÐÐÔÏÈËÍÎËÊÊÇľÄÆÂļ·ÎéÔÃÀ¿¹·»¼»¹¸¸¹»¼¹¹»¼¾¿ÀÁÀÁÂÄÃÃÁÁÉÇÄÁÀÁÂû¿°`[VXYXWVWY[]^[XY\WF5?ILE@BB?BDFC@CEC>CC@?@@AED@>=@BCAAA?>>?@CAAC@<=B@;986:=<@@?@AA@?:7554100-,,-048:?J: #$6[ttnokhmuxurrnotuqprnpruvtrswonnjhmqxtprvzzxyyt]aomlcWVajifihggc^\]VPhdV[‰Ãο½½¿½ÁÀ¾Áú¹Æ¨m\`QTs’ž¬žˆ™¬§°¦kJU[SOPVOLPMGBB=&""/N^a_^Z\^_[\a_^]Y][YXWVUTWVVWWUUVRPQTVVUTVYVSUVTVWWWUPLLLJJNOIFB<82.222?HFCC@:BAA@=>AACA?ADA?AGDA?>=>??AB@=<>@B@@@@>>?><=<88:98876557:32120--.3457;AEHHP?$4Xuxoplilsxvqqonopppprsstvtsvqebfhkrwssttwwwwxs|zfckhlm`S[lofkiihd_]^]SiŒ}`T[„¿ÌÀ¿¾ÁÁÁÀ¾Á¹¸Æªo^bTVu”«ž‡™«§°¨lIRYRPRXLGMK>;CH&  Mab__\ZY[X\`_ZYX\ZXWWVUTTSSTUTTUSQRUXXWVXZUOQSUYY[[XSNMNPJJLNQRNGD>969?DHJKJLNLHEQ[TLRYVT\]WSN?%"# %(%(+-.0221--,-./BBAAA@=>BAACB?<=?@@?=<==><;>BC>99<;874364211001431000037BCDEHKNOLOC&#1Rt{oqnjkrxvpnqrppsrmqrprvuruqdclqpqrstuvuvwywux{tjgiingWVfmfjiijf`]^[W_ria\`…ÀÎÂÀ¾ÀÂÀÁ¾ÀÁ¹¸ÄªqadTVv–«‡˜«¦¯¦kHPUONR\J@FD78KJ(F_b^`^[YZZ]b`ZX[UVWXXWVVUSSTUTUWQSSQRVVSTWXZWTTX[ZYZXRNRJMMOTVSTQOKE@@CFEGGHLNLGJOVRIRYQSWYWSK6! ""%&((*,-/10.-,+-/0>SVWVQPW\Y[ZXUQSXZ_abca]]b___^^_```ORnzŒ¤“§º±¦¤¯±£¤²³©¡¬³¬¤ª³ª¡©µ±¥§¶´¤£²µ¬¤­·°¤¥´µ¥¥µ»¦š¬¼®ž³©tYdd_bb`a]\\]]_c\PKKIKSZZZYZ\[QX~c`hdgiffbcfgeijdchjjkkihjgfjmhSj¢º¯µ·¹¸¶³¶»¾¾¾¿ÂÁÅÉÊÇþ»€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ -3/0=KPQXTa“¿ÇÁÄÅÇÈÊÊÊÉÈÊÌÌÎÑÒÒÍÐÕÒÑÓÐÖÛÖËÏÚØÖÓØÛÛÛâììäξ°¨®ÎèæÚÜâçéèèêîñíãâéëçéêìæèïéçåçëîìéçèèèééééèèííììëëêêèéêëëêéèëìíííìêêéöàŽRPTPPPPPPONMDC=648<<;>@ABCDC??@A@BB?;AB??@@ACBAAAABC@@AA@?=<8>A><<;89:8564243102200120...05;JKLNNONNMNJ1 *Ioznqokjqxwpptsopttonqpqurotmabntsrqsttsssvxwwwzyoijghi_TZfihhijf_\\]_[[X_\X‡ÂÐÄÁ¼¿Á¿Â¿¾Àº¹Â¨pbdRSt—ª‡˜ª¦¯£jJSVMMRYI=@A6;QI&<[b]_][[\[]baZX\VVUUVWYYUTTUVUVXURQSUTUWWTUXYWWVWZZYYTQTPPIDMWZ[VRMIGFECGJJJLOPNQSVQKRWQSRTVTF0"" "$$%&)**+-//,-++-/1@WVTRMKTZWX[\VPRXY`eb]_b`\ca`__]^_]PUn{Ž¦š£µ¬¢¤±µ§§·¸ª¢­°¦ «·¬Ÿ¥±²¨¢­´¢›®¶© ª¶®£¥±°££¯·¨›­»¬ž³ªtXdd`ab`a]a^^__^ZRJLLOW\ZX[Z\\RY‚g`kihigfehifeijhgggigcemigeimhRj¦¼°´µ·¹¶µº¾¾½¿ÃÆÊÑÔÏÊÆÀº€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+2-/=KPQRGe¸êæÜåäããâââããææããææãââíçßèèàßêèææäæåèãßáçëêèÇ°¥¥¯ÏíñÞßäêèââèÛßÛÓÕàæãàãíêæëåáèèéêíîëæêèæååæèêççççççççèéêëëêéèêëìíëèåãêîݘ[SUSPPOMJGDB9744:@>8??@CCA?><=@@@DC=BBABEC@ACCB??ACBABDDCA?>SG":]e][XWX[WX^^XUYWVTVYYXUTSRTTTUWSQOPTXYWSSUTQTWTNY\VVWVWSRNHIX_Y\[XRMJJLMPPMLOSTQXYSOSVUVRUXS@(   #'(&,,++-0/.,+*./1AZZVRLJU]ZV]aXPRYY^bb``_^ab^^``_`d[RYq}“¨˜Ÿ®¤¢°²ª¦¶·©¤³µ«¢«¸®Ÿ¦¶¯¥¥±°Ÿž°µ© ¨¶²§¦±°ª«®¶«ž­»«´ªsWdd_bb`a]]`a^]`]SHKLOW\YVZX[^TY}c[jjdeiidhhgfdcfjjhgihfgigeimhRi¯Âµ¸·¹¼¹¸½¿¼»¿ÅÇÖÜÚÑÉÆÁ»€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&003BMLMMHÜéã×àßãæâáåèêæãäåääåæâäæäãâÝáèèåæåãäèæäêèæëæÅŸ›œ§ÕíæâÜÞÞÙÜáÞÅÂÈÏÑÚããÞèäèëæëéééêêêêëíéééèèçææììëêéçææìëëìììëêêêëëëëìíëôã›PGULIC;64321::::::::<<<<=====;=@BCDEB?@DADECAA@?B@@@A@<98<><9986.35300/.1/14630/53.1646DIJLOQQPOQRT? &>buropnkmsustsqkhouspuustrrvo\U^efdbfnqqronqox|ulhgekimo`QZqmfifca\bZ[\YX^^W…¾Ë¿Ä¼µ¾ÂÁ¿Äƺ´¿¢racPQx•¡­˜‚°¦®£jJTYQQUWFCF;3@PB$ 7Yb\]ZY\\ZY[]]ZWWSRVZYVSRPSY[WTTSMOSRUYVVWWUTTTTVTRVZTPVWVQKLSXZ[]\XUSPLTQRUVUVZZWWSKLSUOPQTQ;$ "#!#&)*,--,+++)/.-/9UZXUMIU_ZY[\XRTZ]\]ab^]__`__]`^Y_\OQhx¦—¦µ¥“¯³Ÿ§´µ§¢«²¬¢¨±°¨ª¸±¤¢²µ¤£¶º­¥¯¸±§©°±©¨µ¹ª›«Â®š·®rYfd^adcb\\`_\`c\QKLKLUZYXVW\ZOZ‚‚^aqg`ehedhjhedeeiggfehkhflhfkhSl®Â°¶¿»´¶¹¼¾ÂÁ¼¾ÊÌÑÏÊÌȾ¶€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ '--1=ILMFd»îçæååââãâäåààèèáàçéæççéèæèçÞßææãåæåççæãèèéí俤–£ÖñëíèééäæêçÞÚÞáßäêèèíåæîëçåäêëééìëæêëëëëìììææçèèéêêáâãâááãåßßßÞààÛÔÓÔ‡I=E>;8448<>?::::::::;<<<<===;=>??@?=DDCA?@BDBDCBABA??@@?<;;<7679732342//./12-0343211.224757CKLNQRSRQPQWG" %6[sqnqoklsusvrpolostmqsssrsun`X\_bb`dlpmnqtuwwk[YgolkiilgWUfmfhic^]`[[\YX]^V~½Êº¾½¶ºÃÀ¼¾Á¹¸Å¢racPQx•£®™ƒœ¯¤ª£jJTYRQUPEBB61@RG& 7S_]\[Z[XXY\^\WRUTTWWURPQSVWVSTVXVSRRVVQUVXYWTRRWUSX\WRQUVSLHLU\[_a^[ZXUQPSX[ZZ]ZVXXPLORPQSVN6  "#&&'***+,,,,)+,-63.ASVQKOVY[XYYUNOUW]aa``^[^_^^]`^Y`^PTo~’¦–ˆ¥·¬›¢­­¢¨²°¤¥®²ª¢©³°§¨´± ¡±°Ÿ °µ¨ ¬·«¡·¯£¥¶º­¡¬¶¦œ¹±vUfd^adbb\^^``_a]SOMHIRZ^`ZY\[OZƒ„hbjbbikgehjheeghhghfcfjjglhfkhSl¨Çº¶³²·À´¹½ÁÄÁ¾ÁÄÅÆÇÅÀº¸€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#(*,3@IM>BŠáîÞåëâäçäßâåáßãäàßãåäæèêçæêéÞÝäæãæçæçæçãæèìîß¼«¦Ÿ¬ÕîìíéêêçèêèåáàÞÙØÙÖÐÖÒÉÌÉÆ×èм»ÃÃÁÁÀ¿¿¾½¼¼¼¸¸¸¶¶µµ´¸¶µ³±¯¬ª°°®«­±¯©¦¬§~J573;855:=?@::::::::::;;<<=====??===FFD@@ACBBA@>@A@>?AC@<888977996441/.01/02021.-156.455768@MNPRTTTTPPYQ,$-Pprmrqjksvqtqssrqrqpquvtrsslb\]^adaegjkotuprrgXZhoijieim`SXjffib[]][[\YW\]U€¿Ï¾Â½ÀÀ¿½¿Âº·Â¢racPQx•©—‚ž±¨¯¤jKTZRQUPE@:11BRI'!$$7J\^\[\Z[YXY[[XUWVVVUTVYVVUTUVWVYWSUYYUSVWXVUUUUTSUWWXYXQSSNGGPZ[_ca`_]ZVVY]^\[[^XWXRKMRPPRTH/ !##%%&%+))),-.-*++-82%+IYUKRXX[XYYTOOUWacb`b`^`^\^]`_[a^PVtƒ’¤”¦µª¤¯¯¦©±­ŸŸ¯ºª¢ª³®¦§³¸£¤³°¢¤°¶¨ž©·©›£¹±¥¥²´¨ ¬·ª¡¸«rUed^acab\_Z^a_a^SLKILUWWXWU[\QX~b_jdbdffcdddefecgeedeghegkgflgRl±É·¶¾Ã¾¶¶»½ÁÇÇÂÀÊÌÈÃÀ¹¶»€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!((*:IKG.X´ëáÛæéßâæåàáææèåãäååãããäæåâçåÜÜäæåççääèéäåçííÙ¾°«ª­»ÆÇÀ¾½»¹··¶±¯®¬©¨§¦›¢§¡¡žÁÙ±“•š–›™š›žŸ¢£¤¡¡¢£¤¥¦¦²±±µ¹¹µ±ºººº»½¿Á½ÇÀI19=A=989;:9::9:9:9:9::;;<<=?<=??<;=<>>@ACBADDB@@CBA?@??<9535653464/,/241..0/0//14546645756=LNPRRSTUQOXV4$%Howltsjisvqtutpmpstsrtwrrtslb^bdfgegehmmopnlmh`^dggiieflhZQ`gddaZ\Z[[[XV[\T~¹Ê¿À¾ºÁý½¿¸¶À¢racPQx•™¦•ƒŸ´ª²¤kKUZSQURF7.*1@LG'"""*4AYaYZ\[^[YY[\ZXVWWYWVW[XWUUWYYX\WRRVVUVXUTUTSV[RRWXVXZUQPQPKGKQX\^``a]YZ[]`a`___ZXWRKLPNMPO@) % %'%%(*('),.-,.+,01-$ 7V\KMWXXXYYUPSXZa^^_]_a_^[]^__\a]NSs ’¦´«¡§¯®«¥«°§¡ª´«¢ª²¬¦ªµ³  ±²¥¤¯´ªžª¹­ž¨²±¨¦²¶©ª¾¬¸©mXdd_ab`a]]XX\^b\NKJINVZY[ZVY]QYƒ`_kggifcbcddfhhghihfcehkhkgglgQm«Ã¶¸»½º´½¿¾ÀÆÇÆÉÐ×ÓÎÍļÀ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*((>BCC@?@AA=;989864121/020-/230.-///./245448547846>JLOONORUSNUV9#$Cn{mstkjptrsutolmrurnqsqptto`]eifebahnmihlmdgf_\`efgggefkdSTfa]`[ZYYZ[WV[[T{¶É½¾¼»ÀÀÁ¾¾À¹µ½¢racPQx•ª™…¡³¨®¤kLUZSRVTG4''.;FF&  " /07VbYX\\[ZZ[^]YVWVVUTSTVUVWXXWY\WUROPUWTSVWSSVWVQQTVUZZSSPPOLHILUWY\cfc]bbba````[\ZXTNJJJLPN;$#$&$%(('')-.,*/,,00*$!BWPJSYUXXXTPRWX`]__^_a^^Z^^__]a^MRo~ “¥³«¤©®­ª¥¬³¨Ÿ§³¨¡­³ª¥­¹³¤¡°³¢š¤¯ª¢«»®Ÿ¦µ±¦§¶¸§—ª»£’µ¯rWcd_aa_a]__]\cg^QKJILUYZ[ZUY\RY€‚`akcdihefiieacgkhggffegiijfhmfQn°Åº¼·µ¼ÀÁÂÂÅÈÅÇÑÔÎÍÍȾ»½€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+**:@3*V¯èíêãßåçâäæâàáâäâäéçãäéâßáåããâÞåéäÝÞáãèéêæèèèçÕ·­¤¢¢·ÒÐÌÐÑÓØØ×ÚØÛÛÝááßâßÝÞßßÛÙåçÔÆÅÀµ°µ´³±°­¬ªª¤¤¢¡Ÿžœœ“›¡ Ÿ§»ÌæçççåããäÜæå©[VgR;978:;97776767776779:;<<<>=<;=<;@??AA@@B?AA><===<<;:8743310220/1231.-00.0/144348425:948BHKMMKLPUTNSW@"#>fzossnkmrtqrrsqlmsuqprqpruqa[ce`\[Zhonnkc]_fd\^ikdfehe`hiZOaa\^\YXXXYWU[\T}¾ÒÁ¿ÄÃù¾¾¿Ã»¶»¢racPQx•«™‡£¶ª°¥lLVZSRVXO>315>KE%#" 3-/PbZZZ][YWX[[YVWTRSRRTVSSUWXXXYWUTTSTTRQXXRQVUPMQUTSWZYVTQNJHILRTV[cjifhhfb_`_]\]YSQNJJILSM6! !"$%%&''*,,*),.*04*!+FRPU]ZZYXTORVV`cbbca^_`Z^_]_]`_ORn}£–¦¯§¡§®±¦©²° ›ª·¬£°°°¯¯±¥¡®³¤Ÿ­°¬¥­¸¬Ÿ¥°¨ž¤´³¦žª·£’«¨sSbd`aa]`][`^[_bZPHJJMTYVTVTX\RY}|]aj`agfeggeb`bdeihfimlgdiieinePo²¾°¸¹¸¹¶¼ÀÄÉÈÁÂÍ×À¾Å¸±¹¼€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*-1;5.7ƒÒæáéßÛáääèçàÞáãâÞáçæßàçáàãäãåäÝâçãÝßâäèåæåêéææÚÁ«š–—·çêæêèæéåàâåêèçêçâääèèçææáί’~|xjgpdefghijjgggffeedchlidiwƒµÒèèãæéèçæë´fi~Z<:878741555555555678::<<=<<<<;;<@@?=<;=?@BB?>>=;<<=;830.1//10-.11111100/.0477544436975;CHKNMJKPVTPU[I$$8[sqqrqmjovtrpqsnmrvtrstqptpb[]\YYYZfjkm_NLcgbY^knefcdeace`SYca\[XXVWXVU[\U}¿ÔÁ¾ÄĽÀ¼º½¹·¿¢racPQx•œª›‰¦¹­³¥lLV[SSV[SHCDBDNA#!5-)I^^]X[^ZVVXZZXSSTVVUUVSSSTWYXVYTPQUUUXUTTUSPPRNTWXXVUVYXTNJIKKMRVX]cggdffcbcb^`]TONKJOIMRG0"%()&'()*))))--42" "/CMX^Z[ZYTQRWW^aa__]\_aZ_`]^]^^PTn}‘¦—¤¯¦¢¥¬¯¦¦­®¤£¬²¥«²©¦¬²®¤¤¯¬ž °²ª¦¯µ¨ ©²ª¡¤°­¡«¹©˜¬¤pTac`a`]`]]_`^^`[QILKIS\]Z\Y[ZQZ|_aichkfdedbcehgdghgfheehjieineOo°Â¹À¼½¾·¾ÆÎÒÐÈÆÍÑÄÆɾ¹ÁÄ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€'/9?23N§ëå×äàáäããäãáäæáãããâãäâÞÞáãâàæåÛÛãäâäåäåáâãìêåçàÒ¬‘Š§ÞéàæååëéäèáçååêèãæãæâãäãÆŠ^XSROLLOSTVY[^_adca_\ZXW[[[[[[ZYhªäìâäçââàðÄom|N976787414444444455689:;<:==::=>;><899752/-,.11.-.//00231.0423335774:65546?99=@=9<=;::97522441/1..120,-10/11./3368743664356558?FLKJJKMPRWROYY3!'OsrjnsnjoruttspoooottstomtobYXWZ^_^cdYH>CMZidV\eed\_a_\\`eWL\naUXXSX[TOUYQ¿Ò¿Á»ÂººÂ¼³¶ sf_KQt–¡¬˜‡¨¶¤¯©nOVVTXXQKHKJECG?#+8) 9Z]X\^\ZYY[]\[ZVUUVTRSQSUTSUWYWVSQRQPQTVTOPUTNQPPRW\^]RRTSNKIFFMMLSVUV\`^\aa[YVPOSQIFJJLG4"! !##%%'*)()**(*-))24&"  =X^XYXURRVZa`^_a][`]`^\]\\a[JQn–¥– ®ª¢¦¯²¬¥§­¨ ¦²¨ ¥­ª©¯²ª¡¤²± Ÿ®««¬®³¯¤¥´¬œ£²³¡š­·¤”®©sTbaaabb`^\^b_\c`OJNHFS[XXXVYZT[yrW\heghhdchfdjjeegeggfgigklggkfTn©½±¯µ·±µ­³¹¸½Áº¼Óâκ»¸¹Ã€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€0920)P«äâßàãâßßáäáÞàâáááààââáßÝáäåâáåâÚ×âåäååäçàááäæâÝÝݲ–’ƒˆ¶ÜéâæíéäååææææææææäããèÚŸ^BLMMOPQRRPUYZ[]__^\aio{‡Œ‰„~ytoljZi¡ÜæÙÜèèäëÚ§‚hG67887531/////02354444567876788889;;;:;<=9;;8678722220/.032/,.11.440.12138765545688878=EKLKKLLMPTWUQXX8 &Jnslnrmjosssrqrvskqrqrtsruq_VZ^`dga_TEBJMHXfbVZcbaga]]\[[\TFN^\XXTUUUSRZ^Zs³ÎÀ½¾¹¿»½º¾Åº¯´«s][PYu‘›¨šˆ¢²¤ª¤lPX\[\WYQKIFBFOA$+6(-Q_[Z\_][[[\ZYUUVXVSSUVVUUWXYXVURPPPPRTSSUVVSQNQTVVXZ[WUWWRMNNILQTRRTVSTVXZ^]XRLLPNHFJKME2! ! $('&))*+)')-+.53#":V\UVVTQRVZ\_^]``_a]`^\]\\a\JOk~•¨šŠž¯©œŸ¬±«£§°¨§¹°¦¬·­£ªµ­¢¥´²¡ ®­©¯µ³ª¦¯³°£¦¯®ž™¤²¤˜­¢nWeb_^`aaa^`a][`]NKPNLTYXWXVYZR]xa^efjhecacffcdghgeffdfhglkdcfcTo¦¶¥£®­±°´¶¯±¹¼ÅÊνµ³¬³Æ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€55//9}ÔëÛßèààåÞáäâÞÞàãèáãäãßÝßâÛßããâäâÙÜäåãåãáâãßàâååÞÑÔµ—‹‡˜ËêâåëçãããååååååååæÝãëÆwGILMNOPQRSOTVSXbgdieerƒˆ‹’˜ƒ}uppf_‰ÒïÞÛíèáä὇X=677764202231002422333445;746889:6:<99;<9898645660010123210-,055266326668955764477678 $Cftmnqmjouysonqtqlnprrpoqupd]__ade^LAFLIHKS``X\a^[[XWWYYVRIDIQQORUUTSRU\^Yn°ÖÌÂÃÁÀ¿¿º¼Àµ±¿ŸrkjTPmž¥™Œ¤®¢¬¤nPVYXYVXOIIIFIOC$+4+(Kb_Z]^][[\][ZWUUTSRUYVSPQTUTQTTPMNOPSVXXURRTUQSVVUTWZ[WX\XRQRKGLSOOSSRQUVSVWONIILKFFIKLC.  #" &*)&*+-,)'*.-380 # !7SZVVVSPPTX[`_[_a`_]`^\]\\a_NSn“¡’Šœ«¦œ¡«­¡¤­¯¤Ÿ§±¡›©¸¯¡¥®«ž¡°®Ÿ©¬¤§±¶ª¡ª´¬ ©·´¢ž¬·ªŸ± mZaaabcc`]\_^Y[a]SJLLLQWWUXQVYNW{u[akcbacdfbdfdegehfhhghihjjdafbRmž¯Ÿ˜Ÿ«§¤¡¡¡ ¬¸·ºÅµ¯¨¢²Ç€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*5213Z¯åæÜåêÝÝåáÚàããâàÞáãäáßàããßÝÝßßÞãäÝØâäáââàâãßáäãèáÏËÁ¢‰Œƒ±ÝååáâåææååååååååèÛåë´aDVMNNPQRSSVXRLRbe^_bcrŠ‰Œ–‰„~wtunazÀïëàèéääéÔ•U:77654200.//0/12401233333755665555:;88<;7677654331/-0463110244101//25358796677568458=AEFFLJKRTRQSY[WY[F"$=_uompmjnutpoppqqpqpprprttnhc``c_SHEFJLIGIMY]Z]^YUXYXWZ\THAHLLHFKSPTUSRURJY«ÁÉÇÂÀ¼¿¼ÀÁ´±Â¥secMOr›¥¥‘Š§«œ±£oQVWQTWaRFGJJKNB$,2*$(A[^Y]ZYY[^`_]XUSUVUUVXSPRVVTRUTPMMOQVVVUTSUWXVUTUTUX\WRUZYUPMPGEKPTSOTTVTQQPJNJIJIEEHLL>*"! $('&()*)'&(+-69,! #6S[XXWSPORV^ca]^`^]]`^\]\\a\LRo€”¡‘ž«¦£¬«ž¥¯«¥¦Ÿ›§´­¨­³¬ž¡±¯Ÿž©­¨¡©¹¯¦³©ž¬¶®›ž°¸©¢ºªoVdb``aaa`^b_[]aZRMJHKQZ]XZTYZLYuXbkcdffefeehgfefccceegijgiefidQi•¥šš ¦›”––™ž¯º²®¹¹®£›¥ÁÑ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#84+?†×ìáãæãÞàãàâãâàáààâàßÞßâåâÝáÜÝÝÝââØÑáçàßáâåàßäæãçæ×ÂÁ®‡ÃåîàâçæêääääääääëàéÞ™Q@PNNOPQSST[\VMR`faells‰’ŽŽ’–•‰~xwvq_i§åêÚÛâããéÛ›T7875310//-,,-//000112333266433565878:;988666543332.-142/-12685/.0-.476686898679988:=AEGHHMJKQWVSQZ[XZ`M)$7YsomonikstusnjotttrqpoprqmfbaddWD>IMGEJLIIRVVXYVSZWUWZXMBCIKHJJJMMNRTPF@@HP^y¤É;¾ÂÀ¿À´³Ä¤sfcPSu™§ªŽ¥­š±¦pRY\TU[]PGIMLJK>#/0"#"0LYWYWWY[_`_]ZVUVWUUUYVTVXWWWXWTPQRUZ_VQUYVUVUSSSSSW\YTTY\[VMRNGGRVNGNSSQRRNNQMKJHEEFKJ9$ !!!%&&&((''()*),66(! 8U]WXWTQQTX_b`]__]^]`^\]\\a`MQk|’¢”‰ °¨š ¬®¥£¨¦™™¥«ž˜£¬¤œ£¬ª› °­žž©­©¤©¶ª›§³©©¶°žœ­±”²«uYdb``aaa`^ba^ab[RMIHKRZ]YWV\WL^†vX^hdifdfdbcefdefhgecddfggiggleRj–ž”—››’‘”˜™£«¤¤¡©¨¡›¥¼Â€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€6,)g¿ëçàæáÛáåßÜÛÛÚÝàßÜÞÞÛÜàáÞÞáÝØÛßßãàÔÕäçàßãâáÞàãåãææá¿´«˜Š“¥ÔîéæäãêääääääääéåìЃLGNNOPQRSTTXYUR\sƒ‰„‡w}†ŠŒš›Ž}uqov^\™ßèÔØÛßÞâÔ•O48741/...10./00/.001122235532488687789987764321252/0430/1145323203668<;779;;88;<:===>@CEHOMKMQVVTZ[X\bU1$1Rpnlonhhptuvtpprpruvurrrsmede^SJCFFFHJKJJIMPRSUVWUUY^VG?ACIKIJKKKLGJVR=5@IQPNQQMORPLIGEEFIG4  #" "')&'*))(*-,+.51$!;W^TUUSRSW\\_^]``_a]`^\]\\aaNRl}Ÿ‰ž­¤™ ªª¡¡¨¥™ž«­œ•¢°¡™¬¤–ž®¨š¨®£¤±µ¦š¤¶° ¦¶¸£˜¬±›­§s[aaabcc`]]_``ad]TJJJMRVWWTQWZP[{nU`ibfe`ahddfdegfdeefggfgjkfeieSlŽ”Šˆ‡‡ˆ‹Œ—Ÿ—•”œž šš¨®€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€(.O¥æêáàâÝÛááÜÜÞÝÛÜßÞÞáâÜÚÞàßßâßØÚÝÝáâØÕâäÞáäâàßàßàäââæз§žŒˆ„ŽªÑêêããçääääääääâæîÆsKSTOOPQRTTUURUeœª­˜“Œ€uz‰‘’™–ˆ{spxc[ÜëØÛââáãÎO99730.---.../////00//0122.03676417764688564320/14211221366640021/5756:978;;;:;<===>><;<@DFLQQSVUQX][Zc]9"+Jmmlopgdmvsrrpprroqootvtsnda^PCBFGFEGKMKGKMRWXY[\[^a[L>KWXf‚¨¸½»¼±¤£‡ZPQKR\`}xlz„~‡ˆdMQWVWZ\TLG<0-10#41'% 2IUWUVX[^^[YVWZ]\XVWYUTVVRSVVWTPPPSXRVYWUUTSPMNQQNQWWOMPSVUQIIFA>=>@JOQPOONMPNKGEEFGFD/ " !! %&%%()('(**(25,! #=W\TUURPQUY[a`[^b`_]`^\]\\aYIPm€”¢’‰©¡˜¡©¥˜Ÿ¬¨™Ÿ­ª” ¬Œš°¢– ±©š ­²£¥±¶ªŸ¤­« ¤±³£ž«±¢·§mTeb_^`aaaa`a`_`\QKMKNVXWZXSW\V[vjYek`fid_defc``cddghikieehiedicQj…†Š‰Š‡…‰ˆŠ›¤™•ž˜–•–§´€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%>‡ÒíßäßÞßáÞÙÚàááàááÛÖØÝáåäàÞàãâÛÝàÜàáØÐàæàÞÞàäáàÚÜäàÝæÞ¤£›‹’„ƒ®áêäæåääääääääÜäî¾eCQROOPQSTUUVR]‚¬À¹ª¢˜–wuˆ•Ž”—Ž~trqf[†ÕîÜÚãÞÝßÀG7973/-,,-/010.---0/.../12236752368534655642231//2430..13452012124768969===;:==<=@>ABA=!&Ejllopgbjutrompplrupknppum^SLGEGFIFEGHHJMKOX`b`__b`TCWZWWVRNMPT\ecZ\b`[]`^\]\\a^LPj|‘¢“†ž­£–©¨›ªª˜—§­™” «˜¦ž“Ÿ°¥—Ÿ­°§£¥«§¡©´«¦²¯ž°°›·§oZbaaabb`^a]^_\`^SKKEHUZX\XZYSPa~mUcmbb_`gdhdbghdeegeefdbccfdfjcNf™‘’–˜–‘”—–£Ÿ«®£›š›§±€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€)f¾ðìáæåÞÚÛÚ×ÙßáÞÜáåÞ×ÙÛâãàÞÞÞáßÚÛÝÜÛÚØÒâæÞÜÞÞßÜÚÛáâÞÜÞçÖº¡™›—Š~ƒ­Ýíæäçâßááàãçàãï¹`CNRPT[WYXPXUPÀÖÈ»À«¤™–wq„—Ž˜ŸŽ~xpxu`€ÓðÝÜßÞßäÉz;563/./110210...../////023666566663432321241/010/0.*-102773001145358:==>?@@?==>@BDEFDJOU[VSXS[eceeF#%:dqjfrlajtsswuppqrqtsmouukWFBDIHAHEDGIJMQMNYaacd_TLC><<@DKMRSH97?GHFTcS;9C=:BJFGRRU[VMME1/=A?@BEKKFFKKFEIB?AJTXYZRLHGC<9;-%&5-#$##5CMPPRVWY`[TUYYZ[WWVTSRRRROMNNKJNQSQQSTSSUVPLMLKOUTRQSVVSNFED>59EMNORURNOTSGHEEHAEK; !#" "$&''&'&%$),*+12) ## ! -DX^RUWPPRPT^b_Z]_^__``][\^_aOTn|Ž‡ž«¢™œ£§ ¤¬¥—™§¬ž“¤²Ž›¦¡–¢³©—¬®¥¤­¯¤Ÿ§­­£¦¶°žœ¬±–¶§lV`ba_`ca\ba`^ae^ONLHJT[[ZZZZRH`|kXdhbficeiebadggfigeffeefjiikn^Th–Ÿ–“˜“”›žŸ›ž¦£ž¡¡¥¨¦ž˜œ¤€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ .Z–¾ÊÓàæäÞÛÝàÝ×ÝÞÚØÚÜÜÞÜÜÚÚÝÞÝàÞÛÚÛÜÝÜÚÕÝÞÜÝÞßâßãäáÜÛÝàáæШ“š™Œ…„ˆªÏàåãâåçåáâæâãë°T==?BCCC=>?ACEEFHJKNUVVYX[a_`dM%!$7`oieoj_fpyvrspoqnlpusqtwgL=CEBELEDFIJKKMONS^ecVGA?@DEDCDKNOROB64<@COa\C47ADB@=@IMGHLLJ<&)7<;>ACIICAFKIC?<:=FORUVTSPKF@:7@80-+(')'&'%! !&+/1467/42&&1+!"" !&):GPQRTX\VPNPPPPOSOKKNQRQUNJKJKNNONPSUTSTWRPQOKKOTRSXYUOMH>:921?LQRSTQNMNLGF@@C>EK: !"! !"$%&&&&&'%'*).1.#"$ !0JZZVVSLNTQU\]]_`\]fa^YY\``^aNTqƒ•ŸŒ‹Ÿª¡™ž¥§ §£˜œ©¬¡š¢©™Œ˜ª¡‘˜©¡‘–¥­  ±³ š¦µ«›ž¯«›®®™™»ªjScbbccbaa^]^`abZMNMJLRUUUWY\XK`zjYdlfdfccehhdbeebbhkhccegeeehl^Ti—ž•‘”•œœ §¥ £ ©¨ žŸœ›€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#&/;Ka~”µÒèãÚÚÞÜÝÙ×ÖØÜßÜØÛÜÝÛÖÕÛÞÛÝßÝÚÚØØÜÝÝÞßßâÚàáÚÖÚßáÛâÛÁ¨›—”‚ˆ†ƒ‰œ»ØèâßâåäãäÞãïµ[F[eNT^\_\RXg¦âñèáÜÑƹ«ª¥Žvo—›œ’}oqo]yÇæ×ÕØÔÖÛ½q9373102321321//.///00-/45234566544652132/.0--031.,.//14335324547998::;>?=>@@@ACEED=?ACEFFFGIJLTXWW[Y^^]eU,"3[mkeni^cruttqlnottqopopsfMCIIABHDFGGHJJJJLMPTNC<<@E?ABBDD?987;BHKOS_gmidb_ZYQKJJGGJ69>5'(-(6@FEFJJFIRSIA>7-,(!'4-$!$:NPMRXXSPLMPPOQSPNNPRQPROPOLMQTPOQRRQPQRNLMMKMPPNQVUMIID97<85<;:;?EDCCDDCB?ADGHHGGGKLLQWYXZW]b`gZ/ .Uloink`aupospjmposqnpqptiREJHB@CFGFCDHKJFMKEA;;B@>>???AFGDJNLSYS?57FWd[C7<<:66F_zq]G<<>>?DFLRTROQRQLJB>CCCOL4!##"##%&(&%&&(**/GSL0!)K]VPUVOMPPW[ZSPUXVVXUUY[[[\]OUmzŸ‘ˆœª¤›Ÿ¤¢›ž¦£šžª­—’¢ª—Ÿ§šŽ›°§”›­«›©°¢™ ­¢›¥²¯ ™¦°¢Ÿ¶ŸfYcbbb`^^a^]_^]]ZRILKKRYYXTRWYN`~t[dle`dfhcdccfifaeddggeegddein`Wm—‘’•“˜œœž¢¦¦§«­«­°ª£žœ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!(*)*.1;Ha„¬ËÜààÙÖØÜÜÙ×ÚÞÝÙØ××ÙÝàÞØØÝàÞÎÑÚàÝÜÞÝáßßááÝÛÜÜÛÞâÝÅ¥Ž—’‡‚„€†Ž¡¾ÔÛÚÞæäæë¬TBPQNT^\`^U[šèóàìçº¶º¶‘cp•£ ¤–{rgh\s¸ÜÕÒÔÌÏÒ°i;87544653122210.--/02430/021111122010/0/./--.00//0243352/152368;;89==;;:;@EDCCEEDCCDGHJJJJKNOLKS[_ZX^daf]5)Lhqjnka_msnknmnpnqpqsmmulM@@@CHPHFKLLWdXD;?OdcJ7:96=Os–•i?7?9(-7859;:;==946<91101578;@CLYafhhglfaa`]\]dabdirmXO]bYXekg`\TJB=>A8CLMJHD>@50/.,+(%#  "?PHGPPMONORMHJKLLKJKNPWVVRKJOQRRPMLLLMNMNNMIGFIKNNKIIKE?@EGHGDKT[YURPMJIB?@>GXK/ ##""#%()$%)*,+).G\eN-"'@[YOTWNMOPX^SHJPLJOQLJKLIIKRITm|¡‘‰š§‘˜¤¥—¦£—›©­•£ª”Œ›£Ÿ™¬¡•©¦œ§«Ÿ™ ®¥œ¢¯­Ÿ•ª±Ÿ˜´¢iUcdb_]_`_[]_\Z]ZQGJGENXYTWSXZQ`zmWdlffgbdfgfbadffbcfhfbchfffim^TiŽ’ŽŽ’”—”“•””—šš¢ž› ˜–¦€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"(+*'&'/08G[z§ÌáßßÛÕØØÑÏØÛÙ××ØÛÝÜÓÒÙÖÓÜÕÍÓßÞÚÜÝØÙÞâÝÖ×ÝÛÙØÜáÙ¾£˜˜€}}~ƒ}|ƒ”§µÀÇÇÔä¬WERNOT[Y\]U^¢åòêíè×»¬¶¹µ¹¿ og¥§¨—|shj^r°ÕÐÌÉÀÄɤc;:765677415433222210154/-/-.01221000-.00.-,+,/2333332331/153589;;99:9:>??ABABDGIIHHIIJKMMNLORPLPZ^[\]a_daA %Aaojjja[dopnnnomnqpnomnvjO>@DFGHCEFFGIIINPMF?:DKHRMAGRWXc^WIDU^P@KTeƒ“‰xw\9.73#/6539;87/9DD8-.60+).368;<@DL[jleb]\_a`_a`_UVdbQI^kpg`cfcT]a[WYVOHGHJJIHJSga?.43%'$ ?98<>;=>?CCBCFFFHJLLLPNLLLORTQLMPMNTXV_[__bgR$"!6]rlknf]cjkormkknmmopnothPCGFACHFCCFHHKPOKKIA@C@BBCCA@EKPLONFLXXSZ`VEIX[cghmvwomeP85;0! +3218;87748CF;.''!%,1358BGHRdjefa_aa^]^[^TWjbLHO\b\TUWV```_\[]_VMHLPMFBWiiP8001&" ""!CaSJTTICDD??CCCFIIHHKONSXUNMPPLONKIKNPLPTWXWTRUPNPPORXTUTMJNL@D<8=CDDDDECHPoŽ„E#! #$$$$%%&&)(*)))-8=H\g^M6 4RWNTVMJMQ[VB-+0,)/2--6<:7667823653441130.10/2+*.0../++****++,/2676544011357889<;9::;>A>@DEACHHGILMLNQQQRRPNPTVRNPQNQ[Z^X^^^fY- ,Yrkhnh\`kpooonmlppmmprvkPDGBADBEDCDHJJJJLJECCCACA?>@DFFLJMVUFGZQT][E@U]lld^bbZTZQ?100+()45-.6947::++(%#!!!!GjYMOOJC;8:?A@GIIFFIJHLGPYSJJKHMOMNTXYQRTQMPSQQUUQSWWUWWSNLLJGFD@=<>CGKMO\e}—„F!%!!!%&$%))%#)&%&)49>;E]^QI3 >XZSQQNINTP;1**,+)*.,**.4;?G[mp|™ ‰‚—¦ž–¤¯¨ž¥«¨¡ž¤«¢˜¥­—ž«‘‡”¨Ÿ”§ª›§«¢ž£®§•›³¨“›¬¬œ¯›mXSTTUUTRRQNOPNNNJIAFNTTQUVSVXN`zjPafba]]d[X[__^^]_`aaaabceecbg[Qg‚’˜¡¦¡ŸœŸ¢¤¦¦¦¥£« ›¡Ž€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%&"#'&$%)++.560-4:DZ€¬Ì×ØÔÎÑÙÙÓÐÐÐÑÓ×ÛÞàÞÛÙØÙÚÛàààáàáááßÛÚßÞÚÚÞßßÜÕÅ­’w~ˆ…z|y~rXMRVd`b][YS]¡çîäæм°¡œ”Žƒˆ”„e}¢ÀÀ¦…qrjjv|xxrppxtS9==<98677875542221042/00.-0/12-,,*-+)((+.16300220-/255458;9;::;<<@A@BFEBBEFGILMNPSSTSRQRRRROMOPLNW[]X_^]e[0 ,Snlhok\cmrppommnpnlnpppoZMGAAFFDEEFGIJLHKJD??AB?@A@?BFJKFFS]TGELLPWOENW_aZRRQMKKH:-***+-*,028:516:FLKKJHIKKJEGIMRNJMQSSQQTSPOPQONSYZY^^XUUSOVXWRMLKIEDCCBCEFJRRVWq{A%"""$%&&('&$*()()49>;@LLPS=$+JWQUSQMLPI6##%''%&(''&&'*-/7@JQb‚”Ž…š§ž“ž¦™¡§¤œ™ ¨Ÿš§ª”‹›§“Š—ª¡Ž’£¥–”£©ž—œ¬¥”›³«—«¬˜™¯šiUUTTSSSSSSNOPNNMHA>DJRWTWVSVWM`{j[hkc^VQT[ZXXZXY^ZWX\YUYcddegjZSp•£¡£°¸¸­¸¶³±³¸½ÀÅÓÝÈ›„š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&% !#"!('.LhpqrO48Jg‡±ÎáÛÔØÜØÙØ×Ö×ØÚÛÜÛÛÚÛÝßàÛÛÜÜÞÞßßßÛÙÙ×ÑÐÑÖÖÖÓÉ·¢”Œ†ƒ€}v„yfbgVQg~pkc[WSU“Ýôìä˸­¡“Š‡€‹•g~¥Á¼tieeimkkmkiiqpQ79<;97777742474211/2/+,//.,)*+,./,%'+.23339730..131477558;:;::>??AA@AABDHKKKLNOOQSTTRPRSRMRQOQSOOV\\X_`]d_7 *Hhngnn]]ionnonmimlkoqpqpdVKDCFHDFHGGGKNILLD==BEADFB=;BIGIPW\^R?DGPZREKRQPHCFF?CCLdmkjigfecbfbei`LBBCA>?IUZYPEA@@?=C^XI=;:Pm~‹”£¤–‘›£š£ª§ž›£«ž©«•Œ›¨œŒ“¤›‰‘¥¨•“§®¡˜ª¢’™²¬šœ¬®—–°šeRWVSRQRSTROPPMLKG>>DHQYWVVSVVM`{jVbfb`XQTPVY\_[X\Y]ZQPXaeddgkmYUw¢«§ª°°µ¶µ´´¸¼¿¾¼ÎáÚª€˜£€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$#! !)/&H£Ú×ÑáÝ®a1.CWi—ÆÚ×ÓÓÔ×ÖÔÒÏÎÌÌÐÐÎÍËÊÉÉÃÃÁÀ¾½¼»À¾»º¶³±±³´±¬¥¡¡¢“‘™|y|oVT_QU~šmd[ZXQ}ÉïéÚǸª •z”qk–¾ÏÄ }tihdcejjghfdmnR75::98777775551/00*,,++-,++))**,,,*,/23432320,*+05666789::;<:;@A@ABCCCCEHJMNOPPPQSVVUUVVRNQRQSTPOT\[Y_`]bb@ %=dqejo`]hnmmmkjiopnnnnpngZOIDBEEFGHGIKNLOLD>@DH@CEC=:0*$ ,9;=979:99:>KK9%$+28?EC@?CGNZfYKFFGC>BHB5.26=IDXukJ5)/$" -Znf_XWOB7872;NRLILMKJJOMLLMPRPMNNQSUTRZZ]^^`ca]_^VNNRTQUWSNKHFFEEFGHHHOROU]xŒo2$ ###$"#((&%('++-).@G=9;EEDP\H.$!*JZVSQRQE4)&#$!!$$!$$$$$%%%))+/2=O^t‘¥šŒ™§¤•ž¤¢š˜ž§Ÿœ¨®›Ž›¬ˆŽ¤›ˆ§ª–•ª³¥› ©¢“™®«š™°²™—²œdRZWTQPQTVRPQQLJJHBAGJQXUUUTVULa}k[egdaVLMYZZXVVX]ZYVUUZ`eeefjl[Uu–›•—–Œ–™™šŸ¥¨¨¥ÄÒ¹ˆ˜œŒ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!!"""$*.<áÛÆÎÑÌ©oD76H\…±Çú¸¹¸¸¶µ²±°²±°®­ª¨§ŸŸ  ¡¢¢¢Ÿ ¡ ¡¢£¢ª««§£¡£¥“‘¦¿¹œ„xkbSOSSmš¥‰rh_\\SgªÕÒŽ·©™—›‹}‰‡iw²ÖÓÃyrokhggfhkhfckoV:4888887767542011--++,+))*,.0-+)+/001010003/,+.1228889::;:==<'+1/2<>D^ovxywspqstovvZ;7A?=@GOVajaQKMOMKRWPD@A=EUpŠŒŒœ§Ÿ“™Ÿ˜—œ¢š•Ÿª–ª”‚«§¢¥”“¦­¡™ž«¤™›«¨™–±³œš³œhT\YVSRSUVUPOPNMKEDAFKRVSTUTVTKa~kXbedcYPSMOW[WY^\]YVVWYahggfej]Tl…xtyzzuy‡‹‹‹“¥ ‘€ŠŸ‘h€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!#%%$&).##Y¢· «œ ©š|Z<>EUq’ª©ž£¤¦¨¨¨§¦¢££¤¢¡ž›œŸ ¡¢£    žŸ™˜–’Œ‡ox– yjic]WTd‰¨Ÿype\]XX‰·Ãº¸´¨›‘˜–Šƒui‡¾Úη”toekmhdglptpjpt[<3678888666311252+0+*-+*,.+,+,//./,.0210-+210013559;=<:9;=<>=>AA?ACDDEGJNROQRRRSTTXWUVSQRUNQQPQOMPY[Z[]]_eS'/Yrfdjd[gnoppnmkonmoqpqocXPHBBEGECEILKKMMHC@BDCFB@B@>AGOUIDV]W[cNG[[F=>53..3204-33387/*,)$!#5779<;78;85MLKKKJJLKRSWZPPebba`][^b^[[]\\]\^^b^SPTTTUUSRSPKPLGFHKLMMKL`ly{V$!#!"!###$'''&''*(%(@[\C>76BD@LM;6-,HUINXS@."!+.-(#"##! !#%!((&')-5?BYyˆ‘’‘”–—–—šœ“•ž—‰œŽ‚¤ ‘Ÿ“’ž£œ™ž¦£›œ¤¢—”©ª™›­šlW][YVUUVVXOKNQSNC?=CIPVSUTTVSJblV`ddcYSYTQY\V[b\V]YMKYcdfgdch]Rh~tu{}†ŒŒ—¢£›¡©ž‹}‡™’rV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#'*'$'&&# @ˆ®Ÿœ§¦œ¡ª‘fAEEKf‹›˜•–˜šš™—–˜˜—•‘Œˆ…ƒ‚€~{ywvtwuomnmhkgb`_]ZWUMR\ZWY\`[[VVo‘Ÿœ“{qj]^\Skš¸¸²®§¡‹‹‘Ž}mv•¶ÔÕ¹tkdmogcgkirngms]=156789765:40/011,-*,/.-/.,,*+22-,0010/-,++.00/16;:=?>;:;>:=>?AA@BEDHLMKLORTUSSTUTUUUTSQQRQTRPQPORW[[X[^_dZ+ +Qnlfga\hononkioqnkmnmnof_UGCFEEDEEGIKKKLKEAACCB>>AB@DLRaR?GRT][ECZX=9C>ADIKC;DIINWaf]OMQSV`t‰•›’|pv~yŠ•l7''"  >eja`UJ==@IG40BTVWTQQSTV]]_^LGZebbb`\\_\XZ^^]\ZXW]\RPTRSVVSRSSQQMIIKLKIPMM_o~yH "# !""$&%&((&%*($*Hd]H?<47BBFNB1+KTNQUF-"!"# &1:<72/+*)'%"! '%()'*,2APbz…‡……‡ŒŽŽŒ‘‘‰†‰…‚‡‰‰ŽŽ”Ž“”’”š•—”“–”Ž™˜‘–¤”oY^]\ZYWVVWOLORSOF<=CGOXVUTUVRIb€lU_ccaWT][Y]^Y]a[Y\\XWZafeedei[RmŠ‰‚ŠŽ‰–¨¨ª«ª¨©¬¯‡{|‘™|`\€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ %*-($&$$#-a‹„‹…ˆˆzqePCM]jyˆpqqqnjfcbccc`\XVUUVWXZ[[RVUONQPKJKLKJKOTJNXZTRUROOTSXv”› –vlk___UW€¨­§¦¨Ÿ‹ˆˆ‚rm‹ ¥Áί†rkkedklfejsogls]=====<<7<>?AAADECFLNLNTNQRQQSTSQUWWVUROORPMNMMPU[[VY^^d^."*Jkpid^\hppqqnmkonjjlnqmjh\HEICCDFFFGJLINOIBADFC??BA>BKSXTJDBHSP;B_\KPVY``XLEQfsiVLQSLFJDD@775*&&$ !,563:JG/$/84055:IOFFECA?>=;26:;A@BJPV]cVLMQQR\q…šš‰pjwzp{k3'&" AgkbTEFA>=FJ:3APVZWUX]`Z^cf_NJVb__ceb``a[[__^^^[Y^^USWUPUVROPSTTNGDFIJJNHBTlw;$$ !!%&$%)(%$+)$+Nh[K>C;2=EHJF-+MRRTM7 %/316BLLIG>>=:5/)%%'$$%+-.4:D_y‚…~‚‰‹†—”‹‹Œ…|v~ƒ}yƒ…x†……„ƒ…Š…ŠŠˆ‰ˆ†‡‹Š‰’œqZ^^^][YWUSPQRPPPL=AGGNZXSTUVRIc€lXbec^SP[XY[[[_`[^XX^^Z]fcbeikXSt‘’ŠŽ––›¤ž›¡ª©œwžy[^a€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"$%)*''"$$ &?ZgVVVWWXYYX[SEG[jmpdRSb`UTQRSTTSRQFHLOPOLKOOOOONMLKJJLLKLMNTVPLMONVLOSUlŽ£—~rtfTW]Tcƒ•™Ÿ¥–‡{~yivš¨»¹˜{qmlhhllhhlnqjks_?26554332253223111.-,,-.--//00.,,//.-,---,3213;CEB@@>=<<<<9?A??@ACFFHLNNQUTSQOQUWWSVXWVUTRRQPOONNNPYZW[^^^b>"Eklgmb_hrojlllqmjkkijlllbUMGDHDIIDFMOLOIKLD@DFE@AFD?DPX]\ZSBATS<O[NHJLMKF<5JL30:;;;BINU[[QEGNONWlwŽš“…xsvxksc5%/" Fc^ODBFDACGC:=JZ\]YUX]^X\cbVKNX_cb`a`\Z][Z[^_]ZZ\`]USVUTRTVTUWUPJDEGHKNKMRUe|g3"" %&"'&'&'&'&&')'5XfTHRSA.-8>FE2/LNKN;&"*/:KQOKDAENVVUTSQLFA5,%%&$"#))-16BZomd_gomimwyww|~|{}xv‰‰€|‡ŽŒ‹ŽŽŠ‚‹‡€ƒ„‡†ˆv^aed`\[ZWTSWUMORL@@AISTSVTVWNIdyfYacb_UQYXZYWX[`c\[`_XZbegfbblaUkŠ‰Ž‘–’”“š‹z}|‘ˆ]RbW€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!#$)+'&#"! #1FTLLKLMOPQNW]WKELVcvoYW_ZORSSSQOMKGHKMNNMLRQONLLLMJKMPQPPPNRSOMOPNPJLOQb‚š¡œ‡uqgYU\RWj{‡ŽŽ„}xtpwŠ›ž¯¼®ulimjjmmjjmmpklsaB3444433331563/111/0110/..,,.0/-,-1/../00//5<@DEB=;<>?@?>=;??@AABFGHKOOOOQQSTTTUWWVUSRTWVSPQSTSPMKNY]XY\]`dD"=dlimc\erpkmlkljilmnnpnpk^QIFGFGHHHJLNLJLJA>CEFDDEDAGOXagibOENO?AS^^adeeinrz}ptu|ˆY:DGFEECCGB=950*''+*3?=0.7@?<@DABK@07<:98=DIS[^LBEJKLSdqx„‚rccgew„f7"&'!$$(ANJHGDBLIHHB=CPY[ZY[^\Y[^a_WPQU]ca]]]]^_^\ZYYXY[Z\[TQRQTRTUSTVTTNIIIIJLKOQTfv[+"" !%&#&'&'&'&'&(&&=_gXIU[M5-03@HQRPSTUVOIcygT^cc`TNU^XW[\[\^\[Z\]\`ifecdk]SkŠ†‚ƒ„~‚~{ysr}‰‘›sW]VU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!!#*,'$# "%->QJJKLPSWXW[_^VH=9@_tpeZTTXXXVTPMKKJHGIMRUPPOOQSVXJLOPQRRPMOOLORRMOONPRZw™›xmjd[]YY\fw~tmns}“Ÿ—ž³µœ€oihgeeggeegimikrcE42233444424530.143320.--..,*,--.0../////05>EGECA?>??@?><;===?BBCHHKNOPPQPQSTSSUVWRW[ZXVTRUTSQPOOONX\XX[^`gM!" 4\kileXaqrlmljljkmnmmloroaOGFEJFEIKIJMHJKE<<@AACGHFDEGKWafcRCCJFKXaddcbdhqywrszƒ‰œ‹ZABEFHGCDMJGEDA>=>FBAC@:>HFFEEHC?ATJGHHIKICMXD.3:;;;AFINSSJCEFHLTarŒ‘ŠtcbxqwvX4%'&%3;9@CKTVSTTSUQJbyiZbdb_VR[`YU[a`^_]YW[_ZZcgddgiYQm‰voqvvsnplht‡†uŠ˜{[\WYo€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ "*-'"# !'.AVPPNOPSVXXWTU\`UD4=UszhXW]]]\YVSQGFDDEIMOQQQQQQQQLNMKLOPONNLLPUSNMRPQTSj“—›”}kmoh^ad``o~~towˆ•œ“¢¯¦ŠwlfiffegffefdgegnbG51113344552011015//./010/.++-//..-./.,.48@FIEBABBCA?=<<=>>><>BDFJJMONNRTTQRRQSUUSWWUTSUTSTSRQOOMMPVYWY^_]iV( !.VkiieV^orlmmkmllnnljhrxxiTKJHMFDJMKIKNQOF@BDABBCDEEIMDKPRTPKLOPT[_`_]eeiu{pm{ˆŒŽŠƒd8)1455634:A>=?@ADHGGILJGHLGMOMLKOYRFAEGHF@-9[bKDB<@AGMORUSNKLKNX`jpˆ–€na_c[[YF0&##93/,,4ANSU_ipmaUJC4*"#').33K^U>9?CB?CGEDHJHJJJIJLMRRRPPQSTPSTRNNPQLNPPLLPRKNPQUXWSRKKNLOL@;>AIRTQQTPURJazjV`cb`UOW[YWZ]WWb`ZZ^][]bb_cij[Wt—„|~„xv}yxyszšŒe]ZLd…€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ !*,'""!!&+:NMKJIKOSVTUSQV]]YH?AUmwlYRSSSROMKCDFHHGFEFHLNPONMRSOHHNRPOONLQVUOIQOOSM]ƒ˜›–ƒokqrccecbo…‰’™š’‘š¨¦“}rg`dccdbccdc^`^`f`I600123344110.1440210000.-/,*,.-,-0/..07@GEHHEBB@>@?=<<>@BABA@FIKMLOQOPRUTOQSTUVTQSUWWYYXTTRPMLLMNOUYWX^`]h]1+QkhfeU[lqjlmkklnoqpnlv}{hSLMMIHIJKJKKOROJNWXTRNLLLKNTUYXWZZXX`^]]^]_ce`_gh_nŒƒeG813.1/,./--,+-00.-.-/3565567<<;@BDIC::@B=5+.+@JCCEB87;?AEJIJIIDDKMQgol`YTPPGDIJ<-$.FN@./FPJKOLGPLIB:?JOT\YV_^YZ`\[]\WVX]aa^`_ZYZYXXYZZZ\ZZ[XQPUWUWXUVXUPMMNLHGIJPO[p_5!!"#$$%&'&'&'&'&)%*6JbiZHO``M>3#!1&>L52OPLJLJLSZLBBB;7973-((,49;H\jlfdf[J6)"#)1->QPD<5915;6488:;<CDCACGHGKNRQNLPURPNPUUQNYUT]mvl]RDBIMPMC;?AGQTQQSOUSIa{kV`cb`UOWYX\[NEJTZ[\ZYZ`efbfjhZVoŒ†{tx~|tkqkflp|“ŒbgcP_{€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ !)+&#!!%,,4HRPMLLPTW[ROSSMOWQPPOP]kkbbcca_]\OLIFEFGIFHKORTUVSWTKKTWTQSRPQVUQLSRTXRYt•›˜Šxmnuriffjx†‹‡”–’“’–¤¨—tl`Z\]__]]__][[YY_^M911122223.//1231////.--.00,**++,/..05BB@DHJKNQSSRRQPQTVUUVVURUWVTTUUTQMJIKNQLT[YV[`_d`;(HhjecTXkqjklkegijlnliu€zeTLJKDJMLIILMLNJIT`b_d`^`_]^bhhgeeedcca_``]]ce\WZWWq–‡hRD=A@;9768:93//30..,**+'--,/1./1/7A@:1)05;73536--1669<:67<:BEKRNIKMFD@;BLMRZYW[\Y[^Z[_^WUX\_[WYZWVSVYYWVWX[\[[YSRVYWXYVWXVNLLLJGGJILO`mT-"!!$%$%'&'&'&'&'&%.33EVSJRcbNF>,#+%!184DQNNJABLRQG@@@>;;4542-)'&%(3FT^hrrhZJ8("$("*=>;:<>=;;:9;>><>B?CDCGIJLQZ_XOJ>>GKOLC=ACGPUTSQNVSGa}lZbdb_VR[ZY^TBGL?>OTQVYY`eaefdZVhqi_Y]cdagaWZt‘{d[[TVw‘€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!"')%$ *2,3LTQNKKNSV^ROX]Y[blpymJ>YvŒŒŒŠˆ…‚€ƒ|rf\VRQ`\UNJHIIJRTOQZ[TSWXSRTUSRVVY^ZZf†•˜„uovzrnor~‰‰ƒ‡‰‹—›˜ƒrkc\ZZ^aa^^aa^[ZVTZ^R>222211110..11/.0,-/.-,++---//-.105>@A?CBA?>===CGHHKKMQPQSTSPPPSUVTSUWWXXWUVVSOMNPQQONLLS[YU[a_`aD!"=engaRVkrjjkhjlllmonku…j]QGHDHLKKLMMOPIFOVVUZXUUW[_bc`]\]`dh^aaad`\^b\]`\]kz[JC>;=?GCFEDEEDFFDDEEBABDBCGE=873;<:<>==627@A>:41?KA8747018>?@A=;:>FQQQXZWUVZ][Y[__ZVUZ^\XYYXYY[[YVUWZUWUTVTQSYWXYVVXUKIIIHEGLIIObgH&# !%&$%('&'&'&'&#+5/"%6@@Nc_KJI6)*1'0CLLLKEAKSPOKD>=><8@;64651-+(&*4CQZfglpeK1"'(*$6s¤§ˆƒ‘™†€Ž—ƒz|‡ƒtsufbliXPV^RKLNHA=CE@;=:694;?>>BDE9;ELMPM@<@?BKRRRONWRFbkT^cc`TNUZ][H>SYTXVSVXW]^__\htqpuz{{|‰‰–«¯˜qR\hNOo‚Œ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ "#'(%%$ )1'/Mbbdiq|‡’‘“˜ž¢¢Ÿ„T?Skffeb_ZVTX^hqvvtrkaRC:78:>KRPT\[QTZ[VRSTTQSTY^\XZvŽ–’Žszuuwut|ƒ€zz‡“‘‘Ž†rhe][^]Z^^ZZ^^Z]ZURX_UB3322100/32.,,//.-,,-.0001//0/038;BIJGEHLHHE@>@A@CA><<>ACBEGJKIJPQPQSRPQUSTTTUWVTUWXVTSRPLNPRRQONNRXXV]b\\`I!#4bri`QUktljjehiihjmli~‹z]SKELHFFINQNKNOIELQRSQTSPPSRNRQTWURQQOSQORPLLKFHGCFHB=?@=HQHAJLLKIDCJFDEGHFFHHDEJG=67=A>=GNNMK>8;=><607LNB>7154685563:6635=?BEOPLMOOQXUXR9$ %.;826O]UKGLUK<799DOOQTWXX[ZVYY[^_]WQRZ]ZYWVWTUWXXYYZVXTSWVQPXVWXUUWTQOMLHEGLJFOca? $ !&'$$)&'&'&'&'&/3.'$(02F`\GJO>+;A'9NNJLFBFGJPTJBA?;;@85236886;91&#+6;NKThulTA% &',U‰œ‚—ž†~“£Ž‚‹£ ‹«¢Œ‹¡¡‰ŸŒ€…ŽŠ}tŠokqgN>GD=77;946?JKHPQE>BABLTUTNOXREckYacb_UQYW_YD9?JOL[a__cdf\Z]YYZUZ``gv…š¤§²¯œ_TejTO`‚‘~t€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ %#""$)"/65AUp‡ «§š’”š¨´²¨›Œjb]UHAEJEBGK@558.4?LTUPKD>758=><>KVXVWWUPTVUW[YRRSTTSRQP]}’†y~ttvy{|†‰‡Œ‘‹Šˆslkb]d_QXX\Y_c\^RWWQSa[C63+,52*/-11--00-/.'050867424;@CHMLNNHHJIGFHF?>BABA;9?BACCDFILMNNPQQRSSSRSSTUVVWYVSSUURONNPQPPPOMLRXWVX[[bhO$!2Wmh`VReqliihflojhklj}{`EFGAAKNNJIKLKPNJGNX[XQVZZXVVXTUXXURPLRONLJKJFF=9999::AGMLJNPB87855873JbRDF;9;;:;?A@C=;<>EJJMZ[]XR\\UQXQ5%%!! )' 5[`PJLTUK>98>JNNMMSVSV[YWW[ZTTUPTZ]ZXXXUWXWVWXWUXWVWYXTQTTVWUTUTMIIKIEFKJFVdO-!" !$%$$'$$&(&##'&/.'&*+-9J_\NOH5<9EPZ[YZYWSWYZ]a_YWWXXWVTSWiƒŒ‹‡zustz‚„†‹ˆ†‚ztkg`S[g^ccgbafelcee``kjY=3,-3872/.-,+,.16BIW[X_^SJEKPOPUMKMMHGIHHFCABA?>?==>>BEFEHKNOPQRQONQUVQMRTUVWWVUVSOLNRROORTRPOLJOTWVUW[]]fV,!.QljaOSfngjmkdgjkkmprl\KCA?@FEIKJJNPPOOKGHNPNPQPOQVWUXYZZXXXXWXVSRSRPQMLLFEO[bjkr~uYGPRMILKIMPJGJLKJJJHFFB>=@?ACHLJGKI=578657:6G\SJOI=>>?DJKHAAEIEEKP[hqkb_]]Thw_0%% "!5Y\NJOTSG?>; ,LSKLNHDDDITKHFCA@>;>>?@;8;A@CO\]UB..,(-B]mohV8*)'Aqwnnsronlmlpvslnx€vt~€yw~‚}v|†x{Œ}œ†UGPONLFAACKNLIJMJD;@BEOTSQQQUNHfiW`cdaTNVVWSE88:59BA==>?><>A?@CDGGEHJLMMOQTUSPQTVVUY[YTSVVSQPPSSSSUSRPMLOPOMSWYXY[^Ycc@">dpcOQdnjhggjedhihjobMGIBBHEEHJKLNOPMOMHDDDCEFJKFBISNOPUXQFBFDIMOVZT]fg_WVapt|‰‰v^LAFFFILKHIDFJLKGC@B??CDB?>@BDIMJHKD:0---/20'1SYBIUULIPUUUVTUUUVVTPUVY[[YXY\WTUXWTRYWUUVVUTWVWXUTRNJIIJIFFIGNaZ2"%$##&'&')(''(%#$(22.*(# #-M^_LFSE5C>#2RNPMNJDCGJLPHCFF?>HLLMNMKMRRPPMHKNKJH@3')9J\hnfJ*3Zjikjgkqpqssrtwuqtz{wy‚|w€zsqqrpsszpTGRNMPOKKOOQQMLPMB>@?CMRRTQMQOId|hUde]ZVRTVSPI?>@?98666:=??==A@A?>?DHIFHKJDF^|„vtldx€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"%!"  #'6>;9:788BDBQ\PGY_[U?(!%(*$"(&$!$)))*(%')'&'-1004997?@CFGKNKHKH@8455799:>NUG>?AEGDB@>;=;>?>?BADLPW[[ef]d`B% &&"?[ZQX_]QABID;?ISXNIOVUUWVTVWUWWQTWY[[[ZYSVYXWVUUWUTSTTTSUSUWVUSOJIJJHFFHEP_P*##%#$''&')(&&'%#%*0.+*)$$,-4F_TGTL887$3QOMPOMHBBJPKNKE@95;A?ACDCFKLHKNLJKJNPOG:016>O\ikI.7SONOQWZVWXZ[\\\\_dfceikhkrvqnrtpmorxxiVNQOPRQLJKJLPMIMMD>?>CMQQTQOSNFa|lY_`a^QMXRNNKD@BA<==::;AEHEDEG9:axncYQIDBEGHOd€Šqvodv–‹\€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$%###!!%3:5..39?B@?EFFIIINLKJJMPSTNRVVTSUXTUTPOPPNORTUVWTPRQONOQOLNQUXWX\b`adL .Xn`HUikfggghiihhkmmXHADEBDHDFIMNMOQQNKIIIFDHJJHHJHCMW\[TGBMQ@BPRUUJ^”µ°©ª¬²¯«§¬³«‘zUIJNIJKDFJKGCDHKHECFGFGIPRRTVQLOOMJIIJIHMKBCC833-4::7777448=?EJIKRQ[[Vflo|mB###C_]U[a]PCBHD=AHPVPKPWVSUYUWWSVYSPU[]\[[ZUXZVSTVWTVWVTSTUUSUWVURMMLJHEDEGCT\C!#$'%%''%$&'%%&&%(-0*$&(#%06%-RUEMPA2)$9SOINRL>>PP>7MN>::4/1//3457:;56;9:?A@CEFC=3+-9BK[f\KNIJNNOPLNOPSROOSQRSTRQRUPRVVV]^W``_``YSTQOMKJJIGEFMPJKKD;=>FPQOQRQVOEboV^bc`SLUTOOSMFB??AA@=?DIIJHEL??rœƒ~xoe`[]gxˆ~cmxiy¤]Y€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"#!$$"" *---'-@LHFFGILMU_F&%& &#(,+(&!&&)(#$''(%%(/3310.1421>PPLKNNLMPSPQVXTQQPQSUWXXWQX]WTf{ƒ}{{{wutppqniihaYm€u^t­Ì¯‡ŠŒ™¢¦™š¡¤ £¦¢¢¢¡œ—’˜¢¡¤§§¦¦¥¤¢¦£–¦Œi\NKPMKMMJJFDFDAADBBBBCCBBFHHHHKONNNOOOQRTTUVXXVTRQONNOQRSUVUVWURMOPOPQQPPRUWWVZ__`eT)*Rk]NZjhadfhgjkjijll[IAFEBEIDEINOMNQRNKJKKKJKIIIFDHOU\]XPA?NSDBNUPJH`‡Ÿ¢§¨§­¤ª©©®¯·È£^FUNLVRUXYVVZ]]YXYZXUTVVWWXZTORQQQPQPNLKF;95*)+-4;<===;A?CGFGKKJSWUXboswfM7(!#Da`V[`[NCCIE>BHNTRLMSTRTVVYYVVVRSV[^]YXXXVTSTVWWTVXXVTTVVTUWWUQKHIJIHGFFGWS3$%'%%'&$$&%$$&&&*0/(#%% &8E3(8IHGLH4#ARNKMQB(/QR14KJ97;5.-**,.-.051354574232/39817,/7;IYY>:>EDCFIKKMONLLNONPRPLNSUQSSQSSJKOQSVQNSVSNOYaZMKDHLHHKF<<>FPQORQRVNFe€kRae_\VOQUPSWQGCBGD@<;?FKMOOFLD>f|smpz|wmlsvljv€~f¬œeO[€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ "# $-*(*#(@SHDP[XRVO+9D2)53,-0,*,-&'+)"#)*&,48854361145:Phkecghfgknjlruropmnqtvwxx|z{|}€{tngbchfabd`\[ZQ[pqgy£Ç½“ˆŒ™›—¡ ¢Ÿ›¡¤ž˜›£¡›¢©¡¢  ¤¤¡¢¥ ¨§Ž¡‹l[OJKJMOIJKFDGE@ADFCADDBEIGIJGILKLORRQPQRTUUUUTTSONOPQRSUTTTUSPQTMQSOLMPRPRWZXUW[\_gZ/(Oi\OWeifeeidgjjiikmXGADADJIFFJOOLLPQNKLMOPQPQQMHGKOLQPONDCQRHBNYNELg•¢Ÿ¤£¢§¤¤¦š˜¬¥wbiiehicddbdeb\[XVSNJLPOQQTVQMOKKIGEEDB8=86433.39>ACDA=@?DF@BTeSNTRYd_aZ[N7%$ "DaaVZ]YLCCJF?AGNUTLHNRRVVYYVUTTUWUW\^ZY[XVVXXUSRSTUVUTSRSQSVWWSMLLKHEEEFMXJ'&$$"#&'%&($##%&',2*'&)'"0IMH.!L\ih\V]\RNRRLBIC>CB@GA63985796789;;;:==@DC@CJJEINMQSLJOOQVSR[\YWc{‡sWKBFOLJIB>=?;76637?@>?==A:8CECIJIMV`b[S@?DJMOJ@9=@FPSPPPPQKHdxcU```]QNTVOQH?bZQ`jieccghhhkigkhWJGFEHGFKHHMRSOLMNKIMONMOQQRUWZ`^`bcgnrrty~ƒ†‡‡†z{xpnpnijje]WWTQRQPONMLLKJIJLNMLMKKKIFGJMEDNSOKMJKLMMKJH=93/-,*)').6989<@DJOPNLLPQX_]\hzƒlE% "I`[TZ]VHCCHH?=ELQRSOKRWTUVXWUVVTUTX[ZY[[YZZYWVVVTTSSTSRPRPPTSNJJKMJFDEEFNK3! &$&$#%&&'(($$&'.2-"#("!>YWDR`D#8PGJ:!%FOIVM18@5FWC/88422.-1//641Fbs†€y†’‡z‹™}‹œ•ƒ‹š—‹Š””…†’–~~ˆ}ry…zjemk\W`cXOPRLEEE>=>;89:<86:@B?;69AHKMI@8;>EPTRSRRSMJfzdXbaa]OLQRUVD7FQOPPQQQRUWU[[[VLPdmdYV]ebZ[aZ`pij‰ÂÏŽQ__Na€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ #"!""!)2+%-#4rŠ]t¡ŒH"2TF3/563521/-,,,,*)'$%)06987544442230J¶©›ž¡£¦©¨¥£¦«°±­¥žŸž ¢¡ž ¥­¨£¡£¢ œœœšœœ¢œŽ‰ƒ~ƒv‚¨Õíà±¼½¥‹’’‰–Ÿ£¤¢¡£¢ž•˜œœ›œž¡žŸ¢¡¡¡Ÿ¡ª¥–™ž‘bHFHGIGFHIDADECB@ADGIIHGJJLPPNOQSUUVVVURONMKMPSSRRUUQRUUVTVXTSTQPQRQOMNPNPTWVSY``X^bG8[ZTdmidcdfeffihgjfTHGFFGDCEDDHPUSNKNMLPTUVUWVVWUUY^`abdghfeeffa\Z\]YVWYXXY\]\XX[\YWVVUTTSSTUXZ[WRNTQNLHEGKQCBS^WOOPOMID?;820....--.-3@EB@B>EHGHLMISRW^`^epjO6(! $G_]U[[VLFEGF@@HMOQXSFLWTSUYYWWVRUSX^]ZZ[\\\[ZXVTWVUUTSRPPQSTTSSSNLIFFHIINA) !$$#%&$%&()'%$$(01)%&"1VfXCOfY((JID>,1LQKUA.=6RlP8:6<21./2127:6G]k{ƒyqoqk`htrgn{wim{~uu‡}|Ž“ƒŽš‘„‹š—…ˆž ‹‡–œ‘…Š•Š{{…‚oo{d@;B>977998;>DGHLMG<>?DMQPQPPRKHdxbV```]PNTQSS?7ITQ[]``^[XWYVTVWVY^pw}}ƒ…„ƒŠŒ—¹ÞÌ‚W_]L[~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!"!! $.5///>™a‡Ã{+WM2-4026,********)&$'-6;>:86424567706_”¤—–—š §ª§£§¥¢¢¢¢ ž–••—”‘•‘“—šš•‘ˆ‡…‡‡ŠŒƒƒ‚~yvvxryŽ»ÙÒ¹´·¬Œ†Š“š £¡Ÿ Ÿœ•˜››šš›ž™š¡¤¢¡£¢¥®¤•¢£…QEHKGHGGDFDBDEBAEDDFJMLLLJMSSPRWUUUTSRPONNMOQTSRUSPQTQPURNQSORURQQQPOONONORVWWY]_X\eP%2S\WdjhghfdcdehghjeQGFD>;5406=?DPA?OYRHFCCBA?<989876530///4<>:79)2JQQS8'@@PnlM83=3101212697DXct~xyjcc``dbbafg`bkm\^kpffpsslnwunp{|os‡‰zw„‹‹”…‹¡¥‰ˆ¤‚D8C@=9779:FGIHGMNG;<=BLQRTOOQKGcv`T__`]QOUWTRB5:BFPSX[^`bdheff__^X[fw„‰“¦ºÄÁÓÙÐÜßÀ€^[bMMrŽ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!! ! (:/#.3 E˜‡h˜ACF-34220+(()*++*('$'-4;=<:87533467:26S~•”Œ‡Œ’”‘ŽŽŠ†ƒ„…………ƒ‚‚~xx{||~‡‹Žƒ‚‚†…ƒ†ƒ€‚€|wqlnnq†—• ´¸³´–‹“› žœœœ™™šœš™˜šœ›—š¡¦¥¥§¦¥¬¨¢«™gCEHFFECDBFFCEDDDGHJIJKPSMPSSRSTTVUSQNMLLLOSTTSTUSSRRUTPQTSVURVZTQQQPPNOOLOTWYWXZ^Y\dX-+L]]aeilhdefggiijkdQHG?3*#"!(17AJPPNPKB?>@DADB@EHJKKNTWY\afhmoorurkjlcWZgg[^_\URPMHLLKIHGFECBA?ACGIHFFFDBCFKDCMVSMJEGIJIEA>973/,*++)-268940*++***-./6CLPOPPQRMIdv^Vaaa]PMSONMA762259=ACDFGJOVTJN]eillkjkf_[accb[X^dYNL`ƒ„d€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!"215/+",h¢w]‚j/7>0-/0,'*+$'&$$%'*,:;=;9789777765327AdŠ–‡ƒ†|€‚€|{|zyxzzxvonquutw|wxyyyxxx{vwy}ytvtonprq^^d^ZVh ½µ¹µ–†Šˆ”žœžš›œšš›Ÿ¢¤£¡¡¥ª¬ª§§ª°¶¯„LFHC@CFEBDGEBEGHIKJJLPQPNVPNQQMMQNMNPPPRVWSRSUSRQUTTSQUWSRTVUUVURQQQPONOOPRUXYYXW^`[^_<!;UfiheecbcaddfgjjaN>70/5:>?91+).;GMLFBDFEEFDCGLHFJLJJMMJHIMGHQSOPWV^^VVbkkpsspnnkgdcb`_]\[VVUQMKKLIFDB?=?CHKNOMF?:531/.../147899;<:@JSWWWYYXY[\]aeddefkrx{uN%#!.Tc^Z_YPHDFIFBFMLIQUNFIRVUVXXVWXVSTY]\[ZYWXYYXXYZYXWVVTRQVVVTPMMNLFGFCFID1! !"&##)&%%&%%%%%$*1*#%,")')-+&%()&$#'.6;=:8788658999740.LmŠ›¡”„‚…ƒ„‡†ƒ†€€€‚ƒ‚{|{|}{wx|y{|}||}~ƒ‘ˆ‡‰€}€ytuxyncf]^]c™°¯±¹¢†Œ‹†“žŸžŸ¡¡˜š›šš¢§©©¥¡£ª¬©¥®®¦­°ˆRCA@@ACFBEHFFJLJIIKLMLMNPONMNOPOMNNPQQOPTSRSUSPORTQOSXTQTTTSSUTTUSPNOQQOLPTXYWVXZ^bZ\`@!5Plgcee``hfhghhji^I:68>EEEDDB;.',7LKC;;<<>BB?@DFFHKJLOOOU]`dmx~€…Š‡‚…‰‰…€tvslfb\USRPNKIGFACEDB@ABC?:8547=><97545712468:;<>?@@@BFIHIQ^da_bbcda]^djlmkgfkqtwP."!!/We]Z`YOEAFJGBFLLHNWQCHUVTVXXVWXVUSW\[XY[\YWWYZYWUTTTUUTSRNPUSKIMIFGFCIH=-!$$%!%(#!$&$$%%$%-1' '9KH@NULIC:+,75* .J:,01;==<<85:=9844ALHDFJDHMJILJLMPQMMONRPJFMROJFMPORUTRWUZ`]_fjfmhentoknoqqpptxrttpmsskgoqwzcHEEEGIJIFCFMQMJNJ=7;>EOQNNPQSNJcu]Wbaa]PMRUUTJB>9<6654358:>;:<=<<@?<>EJLOTWZVW_\TUJGMftebw€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ " !+<3-,2)6uk_rgA4,&*(%$$#$&)''+39<:==<=<?>@GE>CNMJKPPIJOOMHLPLLOOLLJJNNLMNQPPTURQVVY^a``cljkonklqoutswnUBNOKGJIINHHKNNOKA<<=DMNMPPMPKJgu^Raa_ZNMRRPRNF?8:<;9524;BCBDC;6@MWPJIHEDE?@CFHHGECUnud\{¤€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!$0:50.2-!%6IXe`S:$''()(%$%($)18;<:9;;;:987676;;/2Y„‘ŽŠ›¬š€‚†ƒƒ~{y{zxwurqnkkostwy}ƒ…†‡‰••‘‹…„~~}yuszysqtŽ¨­±ÃÛ‰š¨°´¬•—šž¡¤¦¨¦¦ª°³±­±°²µ¯³±’fQCA>>BBGGHHIJJJKMNNLMPSNQSQONPQQRSSQQRSQPQQPORUUUUTSTVWXUTSQSTSSSRPOOONORVXXXYZ[^\adN'.Rnrh`acc\bfgfdllY@>FIFIHDGQRNJ7'-HJFHNLIFMMLMQSSU[XTSSUUTVUSSQMIFLKKLKIGHGEDFEBBE?ABBBCCBEDCDEFDBHGDCBCCDDDGGBBEECEFHHDBDCDFFFFGHFDM\```_```aabeikkkjgirzoA# " 5\dYX]SJIECFGACKMKLQMACOUPUXYXXWUSUXYVXYWYYXXWWWWWUSTUTRRSSSSPLKKJHFEFI@/"!"$$##$##$&&&$#%%//)/:FLNF?CFB>AB=-$.6/!&)(,/787@CD?;8/.982266215759<9<@?ADBBEFEFIKKLMMMOLNOPRSQPQQQRQOMMNNOQQQRTTWXXXVOLKMHEHGCBGGIJJKI@9;>FOPLMPMPJJgu^U`^][QMPPPVTKA8:888768>DADFKH8Bf{pfdd_WRTNGCFLQS_qt\S{­Â€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#&7:66761,$2LQXX7!*%)'! &&!/5;<:778<:7558;==78=Kn“‘Ž‡ˆ‘…„‹ˆ‡‡‡„ƒ„ƒ‚†Š…}zspqtuu|†}‚…‰‹Œ‰‘–“‘‘Ž‰‹‡„†Š‹†ŒŠƒˆŒŽ›¯®´Ê²‰„Ž‹—§ª¥¦§žžž¡¥§¨¨ª§¦¨««§£¢¤¢£ ¡¦›ŒpUE>AFFEFGHIJKLLLMNOOPPNRUSQRRQQRTTTSUVRPPRTSQOSTTTTSTUUUTRQSVUWRNORROLQTXYXWXY\_]]dV0)Mpre[]`dc`eggfljT==?@A5&&/1'!! ##(+.114@B=>;<:69932<916844668:=?AAABFIKKLMJLMNNNNLSSSQQQQONMPPMMRTONGFT\WOGEGGGJH@9;>FPOLMOMPJIhv]U`^][QMPRRVOE=9=<=;879>BBCDKI><978968:;;9756=Tq‰˜—Œ‰‡…ƒ‚†……†‰‹ˆ„„ˆƒ‚ƒymllsz~||€†…†‡ˆ‰ŠŠŠŠŽŽ‹ŠŒŠŠ‘‰‰‰†‡‹‰‰¨«°Ê½“ŒŒŠ’šˆ’ •’‘““‘ŽŒ‰‡‡‰Š‰ˆŒ“š¦©­ …`B?EDBCDFGIJKJJKNQQMJMQSSRRRQVUTTTTSRPRUVURQPRSTTSTUVSTTTTVVUSQQRQOPSQTWYXWWY\`][c`;$Ffia`b^bjc_aihgdV>=@?>5&!+)&($!&(*--7AKJ2 &$%).;RZO5+Dah|Šyqvwkbhh]R^]OGJIC=::=;>>;647;746678;=?@@@CEEEHMLJQOQSQUWSZ[W`v~lZDCFHIKIA;<OixqmlkjmrqmhghjkjladŽÄΠp€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#"0<<787585,&%$IvW#$)(*(&*5AFB@=<=;8587555689So‹™–‹Šˆ„†…‚…ŠŒŠ„ƒ†‰ˆƒƒ|uw{‡‰ˆŒŠƒŒ‹‹ŠŠ‰‰‰ŽŒ‹‹‰†‰ŽŽŽ‹‡…Šƒ„Š„†“—ª·Á rtvmmpkflqkheeffc`achpzƒˆ‹˜š’—žœŸ¡¢ž€WCBB@ABCFGHIIJLNPOLJNORTTRRSUTSSTTTSOQSSSSTTSTTSSSUWVUUVYWUSSQQRQOORORVWXWXY]`^ZcfE >Yffghbbif^_ijfdX@<@CEIGHNHKLSK>LcQABFDGFABA@A@@ABCCCCCCDDEEDFGFCHFFFFDDEIFGIJGFFGIIGGGHGJHHIKKJHKJIFDCDEEEBBGFDCAEEDFGEBEDCBDFHIGBFTbc_][_ccfhhdhgknhcod2"!$C\^UTVQQNHDHKDCKOOKJHHKOPQQRTVVVVVTSRSVYWSTUVVUTSRPQSUTRQNQQLIIIGFCBDE;,"!$#!"&'%&')(&%%%#4QYOA@A?>=0#"*! /1& %'%)(.AWV;! %%';Yg[7Ah|”‚ƒ•›‰“…‰”ˆwŠ€sqwsgdih]VZ\QIKKDA@CA;8;<:88;<:7?<::>DFBEGFNbfXI>?EJKMI?<<=CMNMPNMQHHhv[Raa_ZNMRNTR=5BKNFKMKKMLJFDKOG>J\d]VSSU]fec`]\\^`[~¬ÜëµqX€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#!)7::6:6244/-!#Vƒ[!$8#&+9WsviaZPKIHDA@=;>@CEGMMJOQPI?D_TGFGEFFCFGEDBCFHGEEEEFFGHFDDGFDDCCEEEFHFEDFGGFEHHFFHKKIJJJJJJIHJJIGDDFHFBBEFHHDBEDCFHHGDDEFGGECECES`b^_\^aegigdiikmihs^/#&HbaVTRNOKGCHLEBHKPPONLLORSRRTTSRRQRTURUVUUUTSRRSSSSTUUTSTOPOKHHHHEDBEB5&"###!"%''()('$$'*BtrL?AAJE==A>:=DA?;8;3"&%&0/,#!'$*3B\[; "*,(9YlZWg|Ž„uŒ–ƒyŠ”‡|„•|‰™…Œš˜‚Ž”‰‰ƒ}‚„zpowtgckd[UVVOIHMH<9><798857>A?A:=CIJKF<9;>FOPLMMMQGGhv[U`^][QMPPUP:4HSWX^`][ZUNKNNRRFPr‰wttstvpomihp~‰´¿ÒæÓŒY^€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€! !/8899798448:#,o’Z"$<,+Fxšžšœš”‹ƒ~}}~vvx|„Ž—””–šš”’—‹Ž’“’‘‘ŽŽŽŠ‰ŠŽ‹’”’‘Ž‘‰‰‰‰’‘‘‘‹Ž”˜™——˜ŽŽ’’Ž‰…‚„zƒ˜”’¨¸·‹NFICGLJHKMKNPRTVY[cgmqsrpnsuu~†Š‰~›½º“gLDDEEFGGGJLNLJILOONOQRSTTQRSSRRSTRVXWUUUUVWWVVUUVUVTTWVTURSRONNNMNQVWXXYZ^_`^dkQ& -PjkejiaYbddffibI9C@=98>=1" 1@A%-,/EXX6'#+%+?QfnhqŠ‚~—‰€Ž–Œ„ˆ’Œ‡‘‡y~}~•‰€Œ—‹†“–‡ˆ™™ˆ‡–•‡‚‹Ž…„‡{jlt[???<8:<417=?DHIJE;9;>FPOLMLNQGFhvZU`^][QMPSTM84AJLV`impsnfZ_[[\U]w•–Ÿ­·¼¾À¶¼¾º´¸ÇÕßäçÎYQe€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"'156;8+1<7366#1‰¦\$!/)Wˆœ™’‰“—šš—•–˜š˜—••–—˜•’”‘‰“–’Œ’ŽŒŽŽ‹‰“’•Ž’“–™ˆ‹ŒŠˆ†…†‡‡‡ŒŽ‹‰Š‹†}u|{~†oy–Žž¶½‹IIWKIQRRVYRVZ\\^bfafnv~…‹Ž–˜–—”•šbt¾¶hOHHHHHHHHIIJJJJJJLNNMORSRRQQRSTTSSSRQRVWURSUVUTTTTWVTUTQRRSQOOQQOOSVXXXYZ^^`_dkS* (Wg_Ybea^`bcede[@6=EFHKGHIPNJJGB=DOMJKHEEGJJIIHHJHFDEGHFCGDCEGGGIIGFGFEFHGFFFEDFIGHIIIJHEJIIIJJIHHFEEDC@>?BBABAADECBEGDBDCEGFDB@@CCEQ_`]a_]^eihghgiiggmu{U,!"  .R[ZUXUQQJDGOMBBKOOQQQRRPQOMNRTSQPQPQRRUURUTRQRSUVWTSTUTSSUQOMIDDGEFEE>,!%%! #$$&+('%&(*)(3k™€M;::;?<;B1  8F=$ #!(58BZM4#%,,;Yqpr‰|†–Ž‰“™‘Œ”‘—›”‰”—‰‹–“„™‰‚‹Ž‘—˜…„˜œŠ†”˜‰¦–„•£v@8D917?95=CCFHHIF<<<=DMNMPLNQFFhvZRaa_ZNMRPOK<4866FLPQZgnmlopkdb^Z_bkx„’¤³ÂÑßàÙ×áìäàÌWYfT€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$-/-/2-..58464,>ƒ«m-3Ddƒ—•’‹‹Ž‘’‘’”—–””–˜•’’–—“”˜¢›”’”•“–””••““•‘Œ“™“‘–”–—‹Œˆ‚„‚ƒ„ƒ„‡‘š£ „™““ˆƒ~~ymuvŠ‡us”¤”“°¹†QIPQSRRRVXXUUXYXY^ahr{‚„ƒ‚‚‚uwwrvoSk©Ç±˜wOKMKFFIF?BEHJKKLMNLNNKMSTQQTURSWWUWVSRSUUSUSRTSQRVUSSVUVXUSPNOQPMLNTXWXZYZ]`^Zf^.)Ne][b^bc`babg`K98EGDHJHJOSOJLI?9EONLMKIGGHHIIIIIIFFGGDCEEFGHHGFECEGGFEEEFFFIKHDDGIJJIIIJHFIMJFHKGGHFBBEF@CDDEEDABBDGGGEBAACECDGFDFDK]b^`_^`effjonefmcfxsG& # $ =[]VXZTQPHCFOPFBINONQSQRRPQSUTRQRTSRRTUSTWPRUUSPOOUQPSTSSVOQNHDEGFDCFA/""# "%&&'),&!-'#)%)C~˜ŒY4>6<@?=A@=?>==8:<:A9" 3HA5! #$ +=FLC+%#!-6Hcloˆ”€•—Ž‰˜‰‘—‘‰•’‹‹‘’ŒŽ““Œ”šˆ™–ŒŒ–•Œ‹•š‘Š’›”Šš•ˆ‘vB5@???=9:89COOMQLOSEGktXV_^^ZLJPOPKC=:859;>?@ELRV[SMXgmnnj^Yadbgv€Ž™¡©´¼°’lW[bZK€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€(-+(,0/-,48455.3„ Z@hŠ”¡¡™™š™›’”–™››››Ÿ› ¡ žš›žŸœœ ¡˜—˜˜–“’”–™™˜–•–•š–•—““–’Œˆxy|€}„Š›®¹®—™˜’ˆyyukktƒ‹‰zu¥•Ž®¾„FCOOOLIHJLNJJNPQUZijkhfgkpdebkor{uu‹´Á®œ~WKLF;35=DLIGGIKMMJIKLJMRSQRRTUVUVWWWWVUSSVRPPRTTSVVSRTUUVTSPNORQNNRVXXWWXZ]^\Ye`5%Ea_\^[`a]abbaVC7CKHCEFFKJPMHIJA9BMKHIHIJIJKJGFFFGEEGGEEFDDDEFGHHGGGGFFGHGFDEHGEFHIIJJJIIJGGIHHJJHHIGCBCBCBBBAAABA@BEDCDEDDB@@ADFDFCK[a]__aehhghiogiofj{tC% # &!#Fa[WXWRRPGDGQTGAJSNNRSQQSQQSUUUUVXVSQTVVVVSSTUUTRQRUUQPSUSQOKHGHFDDDE>,!"#!$%&(*-(")*)\‡–aC><=;B?>:;<9A:$!"0F?:5 #'/9GI;(! ,-:Xkr‡‘ƒ•”Œ‰‘—Œ‡–‘‰–“”•Ž•–ˆ…Ž•‡š–ŒŒ––ŒŒ•œ”•ž– šŒ”žvA5??@>:9AKIHHFFKG;99:BMQNOJMSGJnrUV`^^ZLJPNOH=9<:4679;=@ABCACDCKYb`fffifbea`_bfhge`Z]hfXRV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$.-'%)/2-*38456,F‘ƒQt¤° ¥¤¢§¦¤¦Ÿ¡£¤¤¤£¢ Ÿ £¥£¡¤¢¢¥¦¤¤¦¡Ÿ›š™˜—–••–——™š™“•š˜˜—’Ž’‹†{qosw{yvw~…³ÖÝÇ«›—Š†|tsqicq„ŒŒy ™Ž§¾ˆGAEFGFEFIKMKNTYZ^chlppoqy}{}††ˆˆƒ¤¾·¤—€_GGE>3.5>@@ACFGGFKKNNLJKJMQQQUTRSTPNPQOOPQORSQRUTRTUTVVTRQRRQRQNJKRTUXVTW[\\ZWdc?!<^e`^[`a^acc`RB?@BGDFDIX^]^_cghhiihmgiogm{o<# ! !% .SfYYYTPSQFEFQTH@JTPPQRPQTTRSTUVWXXSRSTUSTUUUVUUTSRVTRQSTSPPMJJJGEDEFD7' "$$%&&&&'((%'),$%EyŠ“—k7:C2).<=$ /1%'35?A6*$%(#.Pmw‡†”‘ˆˆ•‡…Ž•Š—”Ž–——˜‹ˆ‘˜Š“œ•ŒŒ––ŒŒ–””ž–—Š”ŸwD;E=7;CFHIIKLGFJE58;>9;8558;<;B?BA9=EBBJPU]][]fjnnkiiknie\IDd€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*3-'&(,4-)39567%V~dcž®›¢§¥¤¥¢ §§§¨¨§¥£¢ž¡£¢  ¢¥¤££¥¥¥¥¦£¡ž™–”•–•“”™™–”–”‘“—•••ŽŽ‹‰†~tostspmntz†µåõã¿¡•‹‡‚zpqpjao„‘ƒ|”š”¡´ŽTDKMOPQTW[_^afgdceorvwvwy{xsw{tsvmp¨Â¯˜‹{dJFFHE=9::>BEFFGHHHIKIIIHKOQQRQQTRPMNNNNOPNNPQRRPVVSRSTSSTSPMLNMKKPSTUTSU[[YWVadI3Yfb_^`^___a`TFB@DCEGGHMOONMKF>:@KLJKHFEGHHHFFGHFFFFFFFFDEFGGFDCEEEFHGECIHGHIGEGHHHIJJIHEGJJFGHGGEFGEEEB>BB??CC@ACCA@EFBCEDCCBBDDFCGT]\]`dgfgkmljeglfmxf4!  $!>afX[YQOTQFFGORFBJQQOPRQSTRSSRSTUUUTUVWWVUUQSTTRSTVTUSOQWUNMLLMIDCFEF@0""%$%%&''()&('&*"-[Šˆšq43E:3('4<=A<548=;7C="&/"9?B8"(8.%-6<4$ -,5?<-###+QqzˆƒŒœ˜Œ”™Ž„Œ”‰˜•ŒŽ––ŽŽ–˜‘”š‰™•‹‹•–Œ—š‘Š’›”Š™“†“uD>B==CHHHIBGIGDIG=9;;BMQNNONOBHotVWb`^YMKOMSRH>>??@<87:=??>=?>86@MGD@AFIFEFP[a``di_ZNBEc“·€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€.2,('&'3/,59677-BSl˜³¯¥¢¦¥¥¨¢ž¡¤¤¥¥¥£¢¡ ¢¥¤¢¢¤¦¡¢¢¢£¤¤¤¤¡œ•‘“Žˆ‰Ž…„ˆŽ“•’“”’‹‰‡ƒ}wso{umebbef…¼ëóáÂ¥—‡‚xrssk_k…˜š„z“œ—¦†XFTUVTRRTVRPQVXX[_voga^\[YYUYXPMKFh¬Ã­”„yiQIFKNJED:=ADDCCBEEDDFHJKJKNOLMPQNPRQRRSSUTRQTTTTQQQRUTSSQRQOOONKOQVWTSSRZZVUU]cQ$*M`^]]\W^\ZZZOFDEEEEGGHKKNOLJHC<@MNLKHGHIGFEGGGGGGGFFEEDCDFHHHGGGGGHIIGEFGFHIHGJGHIIIHIIFFIJGEGHJGGFDDCADAADEBBFD@ACBACEDDEDBBCBCEDEQ[^]`ehggklkifhkfou\,! ! &#Pk`Y]YPPTPHGJRPECLOPNOSTUSNSRQQRSTTWSPQVWTORSTSRQSTPTURQSSRLKLLIDCFED:*"%%%%&'(((''%&'"1  /Wsy…Ž‡™—‹†Œ•ƒŒ”‰—•‰••‹‹“—‰‘–…•”Š‹•–—š‘‹‘›“Šž•‰”qB>FA@FKJHGBEIFABEB<9:CONLOLMQELpsSVca]XMKNKOQME>>@===>@AAAC@?D=,I…ŸwcUMHCBAABFGGFGIKUtš®¯€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ .0)(&"#0117:797:.C…«¤¢ªŸ£  ¦¥›—šœž¡£¤¤¤¢Ÿž £¤£¡¡£¤¢¢¥¥£¤ ›•’‘‘‘’‹ˆ‹ˆ€…‚†ŠŠˆ‰Š‰€yibdgvoe_]^^^…ÄððÜÁ£”†~{wzvk]i…œœ‚u£›“œwMCLMLJFEFHGDDJQXaiaXMHHIHE==B?=>:>¹Â¬•‚yiOKHJMMMMBBCDFGGEDFEGKMKMMKMNMORPPSTSQTUUTVTRSRQURQOPSTTWTTQNNQSSNOTWRRTRYYUUTY`U)%E[^]_ZU]]XSOIDFEGIJKKJIIPQJEIE9:ILJHEFHJGEEGHGFGHHFEFEDFFFFGGHHHGFFGGGGEFEFGGGLFHIJHHHJJGGHFGHHHGGGDCDBEA@BB?@DAACEA>@B@>AB@BFEBDCEN[`_bfjkjigejjklhqsS&!"""&.bpZ[_XPRTPLHOWRCCLPSOMOPSTQQQQQRSUVYVRQSWVSVVTSRQQQRQRTSQQSNKHIIGEDE@2% "%('&&'('&*$#)()PˆŽ‰’qB6<;87-/=+ $2;4,83$8/+A>&9B% .IP>-&'35(")5WrvƒŠ…˜—‹„Œ–„Œ”ˆ–“‡Œ••Š‰’—Š‡—Ž‰’›“ŠŠ•–Ž˜œ”•–ž•‰—ŸtFFOD=CKMIEIIKH?>@?<99DONKOMNQEInsUVca\WMJLMLNQNHDDA@??@ACDKLGIE4Q•µ²±«ž“‰€rne\USUXRf›®³§™€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!./))% #-46::9;7.2fž¬¦¥¬¤©¡—š™–—™œ £¥¦¦¢ Ÿ £¥£¡¡¥¥¢£¦§¤¢Ÿœ›—“ŒŒŒ‰ƒ~z~€€}|~vaX^bkhdaabccˆÆðïáÇ¥‘Šƒ~~€xh`lƒ’’|rŠ¤œ˜žtE>EGGFEFIKLIIMQTY`OKHHJKJGINSNV[Vc—À¼¦Žwp`JJIGHLNMNJDACGKMEHHIOPNOPPQRSTUTYXWUTTVVXVTUWWURVWVVXVTSSTUSRRRQJMRTSRSSWXTUTV]V0#@Z^^^YWYXPGFDCEAEJLNPOJLPSMFGD9=IKJJHHHLJGFFFFEFHGEEFFDGGFFFFFFFFFFEEFGGHHHHEDHGHIIIHIIIHHGFHIGEEHHEDDCCCCBABBA?BDA>@DC?ADEDBA?ACCDM\cabeimkgedjkkkgqnJ#"$""% ?poX^_XRUTOPKPXSDAKPRONMLOSTRRRRQQTVTUURPQSSSSSSSSSSSSRQSURNNLIFFGGDE<+!"!!%%$$&)++*)$'+);iŒˆ’l:2A@13<1 ",+$3;.",)$76$)@@'%A>! )CWTF3/2)$.;9Tq{„‹‚“…‚‹•…”ˆ”‘ˆŽ˜—‹Š”™Š’š‘Œ”ž“‰Š•–ŽŽ™””ž–›’ˆ– uJLIFFHHFHLGHJGDFE><:;CNOLNMORCGltXUc`ZULIKMLNQSQOMMICABDEDBFDEE>V†¢ž ¢Ÿ£©¨©ª¨Ÿ•’˜Ÿ£«³´°­¬¬€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#02,+%!&*5:<;:<7/L•¯¤›ˆˆ’¤©¡¡¤¢ ›Ÿ¢¤¥¥¥¦©«ª¨¨ª­¡¥¥¢¢¦§£ ŸŸ¤§¥•ŽŽˆ……ƒ…ˆ‡†ˆŠ‡‚„…‡~jbgihiiigffgÆèêæÑ­—…ƒƒxeep…„vr‹ž¡“£}G=BDEEEFILONQVWSRSVWWY[]^__edYabXjœ½´ …icUNLE?BKOMROJDABFIONGDGIHKJOQQSQPSTSSVYYYZZWXXUWXWWZZY[\ZXUWVSQQPNLRTTVSQTVWTUTSZV5:V\ZYUVQNB:>BA?BEHHJPQLNMPQKGE@FLGEIJIGHGFFFHIJEGGEEGGEBCEGGGFEEGIIHGGGBEHJLIGJHGGHIJIGEILIEHIFEFJJEBBA?>@CDA@ABBDEBABA?CCCECBD@CCDL\ecccfjjggjhiihdniB!#%" "$%LxnX``WSWSNRNOTREAIMLMRRNNQRSTTRONQSRUVUSSROPRSTRRSUTSSSSRPOKMKECEGGE9' #"!%'%$&)*)(&',*)L}‹ŠŽˆe>4999:>=, .!&6;*#" %59))>=!$F?! %9S\UG=+(;J=Qt‰…†”††‘™†Ž•‡Œ“Šš™Œ‹•›ŽŠ‘—†–’‰Š•–Ž™š‘Š’›”Š“‡•pDGGCCJNKHFGIKHGMI;:<ADFBFHILLLOOMLKKID@FIIIJHEGGEGIEDFFDFDDFFEFFFDBDHIHBEGGGHGDCDIJEBFIFFHIHFFGIHIKJGFFGGGFEDCC@?@BDDA>ABDDBCDCCACEA@CDECDEIV``fhgiookjhijegr]. # $)3iwcXd`SQROPNJQSLECHPMNOOPPPPPRRONPRSTQUUPRVTVRPQSSSSSQQSTRPOIJKJGDDF?/$  %&&%$%'('&",-"/cŠ‰ˆ†’€S936;96;;5/$,067&"",130/<<$9. ##3HTUXE4KS;Pw~ƒ††‘Ž†ˆ’–Œ‚•…Ž–‡˜•Œ“˜‹‰—‘‹’”‰ˆ“–ŽŽ–—†ž•Š–’†•sLGG@>??@>8=A@<=@?:9;>K`“³¨£ª¤¡ ¡¡¢¢¢ž£¤¤«­¨£¥ª§¥ª­«ª¦¦¡›˜šš˜š–’•šš•’˜””—››™ž–‡}‡ƒ‡Šƒ~„Œ™µÚíݽ§¢™——•‘’††‰ŠŠˆ„Š˜ ›˜— bOTVSVXVVXYXWY[[ZY\\\\\\\\^]_Za^Zz±³¦¢ƒRGJIFFJJIJMRPPTVSOMFJJJOUSKGIJIFEFGGJJGFHJIOQSUVVUTY_a^\\ZW[UNKKLKJJPWZVRRUVUPMNLOY?!,GWWZ__mbG5;A> ".82*" 5JS\QCRN;Os~ˆŽ€‚‘’ˆ„•†•Ž‡–‹•“Š‰’›‹‘™‘†Š–’‰‰•š’ŒŽ—“”ž“ˆŽ™”†’˜oKJUOMONONGJIECGKE:<<;BLNLLJPVFInqNR__^XJJRSJHMPRRRPSSQSZ^^_^[\UEYŒœ˜”“”–˜šŸ›™™–•šŸš–˜ž¡¤©€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!/5474.,+168::60RƒN?DD@AC@CB@BAACDCDDHUacfjihjgdgijhksf@"#"'#/Z‚{^SnjYUXURNPNMMJDFNQNNQROOSRQRSSSSTVWTTWRPWWTSSSQQRURPRSQNLJIGDCBA@3(""!#&&('&')*)(,/(+Nz‹ˆ†Š’vH449?:6<=:9,/=3 ($%-374/63,AICGF4#""!0DWVINI?<?CDBCCA@DCDDGTaeekmllhehjjdjs]3#" #&";BKKINLJKEPrqRYb\Z^ZTOMNPPHEGIJJHDFOZ`^`^`WCSƒ˜’’˜™–š¢Ÿ—¤žž¡œ ¡¢¤¡Ÿ €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$0436622++/5:71,7DD:9:7554?==a’©žž œ› ¡˜›š˜šžžž£ ¢¦§¤¤¨«¤ žŸŸ›ššž˜˜œœ™˜”“•”“—˜•“–‘‘––‘Ž‘’’”™›˜£««©¡ž£žœ›–’’“Ž‹Ž’”‘›žš— “eOWXTUWTUWTWZYXXZ[YYYYYYYYX\Z[]Qb˜±›†dJLMKMNNOONMNNOOQRRQRWXVVUOFFC??BGKMMMNOPPSVRRRPNKHFIFDFFDCCBDFDA>==8::7:AGHLMKMRTZfc9 JjbZ]T=3>648;BMOKJOQTRPLLLJLKIIGFGIGFDDGIEBEFEFEEIGDBDDCDEFEEEDDEEHFEFIHFEGFFGHGGHGEFGGFHJIHGEDA@?@?>>?AABAAA??CD@>@DDBCDACCEDFSbffjjhhghmjibgoR,!# "# &Nx†rZ[|y`QTRONPKKPNCERQQPPPQQRQPRRQOQUTQSSPRTPPMMPTTRQSRQQOLKLMLGCEE?7& ###''&%$%'('&/)&?l†…‚‡‘`606<<52:;7<7)+5'"-$(1334/-%!4CDEFKRXYQB2%")"?U]SCRo~‡Œ„ƒ‘‡…“‹ˆ–†‹’†‰”—Œˆ“Œ‰š•‹Œ”‡‰””‹Š“–‘Ž•›’Š–…”™nOV[WXXVWXTQVYUTRKAD>;CLKINKPQCMrqNSc`Y]`b`\YZZXVXXYYYXZ\]\X[[`ZHY‰˜’’—š—™ Ÿ£Ÿœ¡§¥¤¡š˜Ÿž€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€".1/21--+),496/+<>;79=;6;7>@9=bš¡˜œ’–Ÿššš›¢¦¡Ÿ¥£¥©¨¤¥©©¥©§ š™™™Ÿšœž™˜˜”›ž—’“—˜’‘••‘‘–ŒŒ–š›š¡¡”•Ÿ¢ššœš•—˜”’”–•’œ›–Ÿ‘bJUVQSTRSUUXYWVXWVXXXXWXXX[]\^[Rl ®ƒ~cJIOLOQPPQOLNLIJNRPMRRRSW[XRSLDABEFEEDFHHHKNOOPPPOOOQNOSRNPVQOLHFDBAIIGHLTY[^_]^badnf? :[YURC1?ADE@@EDABD@@CC?@DCCCDDESbhcggfhgeijichhI($"#"2aƒqWX}eTVTRSSOLPNEDMQQQOPQQRQQSUUUTUTPORQOOPURQSTSPOTRPPOMKKLKFCEFEKNUSQQURA0*!"'9NRBNn‹‹‚†‘†…“Š‚‹’Ž†Œ”’†‡‘••–Šˆ˜•‘˜–Ž‘›™‹”–‘—˜‹‡œ”‡—œnMS[XYZWXYVTWWUUVMBB?=CLLJMGORDMopTUe`X\^^Y\^`^[^^Y_[XY\^^\\^^a[HY‰Ÿ–‘–š™––•—œ¦¬­«««ª¢œ ¡¢€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$16574-++')395-*EJB8865:<:99=5E­¤  ¡¤¥¡¤¥¦ª¬¨¥ª¨©«©¨­¯©¦©§Ÿššž££œ—š›Ÿœ  œ™™–—™™•“”ŽŽ’‘Ž•–œ¡°²¤ £—–˜˜•—–Ž’ŠŠ”•—˜•Ÿ’bHUVRTUSUWUVXXWWXXXXXXXXXXYZZ^XTv¤¯|r^KFKMQSQQSPLOMKILOOKOOQTUVWZUTTTSPJFGEDEC@AELKIHHIJKOMMNLHJNMMLKJIIHMMPQQNMMQSRSUPOW`D1P\WI60?DDKF4*.5@AA??AD@ACCBCCAECBBBCCCBCECDRcjdgggkhcbjihkcB&"!#!"Cq~~qVUu}hWYUTVRRQOMGFKRQPPPOPRQSTTTSRPSQORUSRUTSSTSQRSTRPPPNMMHHEBEE8'"" !%'('&')*)(,&8^x~€ƒ…’†Q0564:32>=597 ""(0/%+2.2>6#&>GC>Ro€‰‹ƒŠ‹’’…ˆŒ†Œ”‘“ˆ˜”ŽŽ‘’”—“Ž‘™–‹——ŽŽ——Šˆ”—Ž‚–žqNRYWYYVVXVUUSQSTK???>CMOKKHKLDQqpW^f]X]^^\WW]a]Z]aa^\]`_[WZ\[_ZI\Ÿ˜”•šš–’žž¢¨¨£¥¬ª¯®¨¦¥¥§€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€.=DEF@62+&(394,)BSG4;>68=:8;D87h¥²¨Ÿ©ª¢§ª¬©¨ªª«¯¬««ª¬´´©­¨£ š›Ÿ¡žœ ¢¡¢§¢¤¦££¥¤¤¡š™™˜–•“”•“”•“«Ê×ŪŸ™–——–—’ˆ‘”“‹ˆ›œš”’cKTUQRTRTVWY[[XUVZYYYYYYYYVUW[TT{¥¨ŽvdWROPMRTRRSQLMONJGHJJEJMMOVZZYYYYYXWVWSQPLFEICDEGGHGGHHILMLHFGIKIGGKOLJIJJKQZVWTUXX[ffP*-G[V?2:<@GH>1+,4:AHJJMNMLNOOLIIJKIFIKHDFEFGDBFFGEDFEDEEFEDDFFDFFFCCEFEFHFEHIEDJGEEFEEEJFCEFFFGJJHFDBA@@AABAABB@@AA?@BB?CDCC?AHBCECDRckllgfjhddijmo`>$ # ,QyyxrWTkxgWZVSVQQQSNEFPSPNPPNOSQTUSQRSSRQPPRSRRLORTQOPSQPPPMKKNDEB@DD4!"!%(&%$%'('&,&?iz{€…Š•‚H*486:44A?698!&.$./#-701?8%,?AFMKB5%"!$,;Zr{…Šƒƒ“”Š…Œ“…‰ˆ•‘‡š™‰Š–˜Ž‘“’–“ŒŒ“”ŒŠŽ™ŽŽšš‹…¡”„–ŸsRWYXZ[WWYXXYVSSTLA=??DMPLJLOK?QwqMVa_^^WW\Z[]ZVY^^_\[]``]Z^_]_YHZŒ—•““•–•’• ©§¢£¥¢¢£¢¤¤¢ €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€H†¦¡špA3'.-/4'(CRK<6:A:4::1;?8L’±¥¡¬§©«¨¦¥§©¨¥£¥©§§®²°°«¤¡¤¤ŸŸ¢¢Ÿ ¢£¥¨£ £§§¥£¡¢ ›™›š–“’‘’“’‹’°Ð×Ä­ ž™–””–”†‹‘‘“‘•›š“’žbFPURQQPRRRSUVVVWXZXVWYYWUTZ[`SU„¦¤‹t\NMMRRTVUPLKKMLKKKJGEFEGLOPSV\\[[[Z[[ZZYVSPPQPMKJJJHEGIHFGKNMLKLLNQSUWWXY\_bdfhgdejmlpY,>aV80?@BJL<+**2?EJKLMQNN>MypEPc_Y]YX\[YX]c^Z\\\^^ZWX]`]\`XH\‰”Œ‹’•—•˜˜œ £¤ —›žž›œ¡¥¤€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€gªÄ»»¿µžR.%0..21FYSFA>;3;==5;=6B{®°¥°³®ª§¨ª¬ª§¥¦§ª««®³³­«¦£¤¡›—˜ žš›¡¤¤¦§  §§¡ ¤¦§¥Ÿš˜•’˜”Ž‘•µÉÉ»­§¥›š˜—˜—‘‰Ž”–––’Ž’–“˜ŠaIQSQRTRTURTUVVWWXYXWWXXWUWYX^SX‡§ ‡nYPTPMRSQNIGGIJMPOJHILFEGKMNORXXYZ[[[[YZ[[ZZ[\XWUUTRNKIKJHHJKIEGIMOQRRWWWXY[\\`cdbceebaS.E`K/3>ADJI9**+0:CHJLOONOQQNMNIHIJIGGJIIHFEFHGEEEFFFEEGFEEFFB=DBACEEB@DCDFECEIGFEFIJHDGGFGIJIEFFFHHDAA@??ACCA=@AAA@ABCDAEF@CGBAACCGWfkhgghecdhihir^4"!##"$#Ht€sxqYRZ[[[YVSQSQQRNDFPURQRRQPPTSVWQORRRNOSSQRSSTTSQPQQTQOONLJJFGFC?7+! ###$$%%%&0Uwzw‡‰“zB-437:62:<;<0$)<3-,"06/0<9("3DA," '-..16:<@PbZ<&#5]ƒŒ…†‘ŠƒŒ•Š‚‡Œƒ–†Š‘“–˜‘‹Œ“–Š’”‹–“ŠŒ”“‹Ž”•’›—‹––jMU[VX[WVWWUUUQUUJED?>ELOMMOOPAMwlBH^^X]]\\[XXYWZ\Y_\[\\[]``^^c[K^‹ŒŠŽ““”’’•—›˜“”—šššœŸ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€c¬¾©­¶º¹ _+ 'CU<=WSHG@>?<9;9;>9;\œ±¡¨®¦¢­«ªª§£¥©¡£©ª«´¸°®ª¦¡™šœ˜šœ¡¥ œ ž £¥£¢££¥¥¡š˜–”Ž‘’’œ±Äĺ°¦žŸžœššš”Ž”•”•“‘“”™—‘—‡_JRSPRURTUTUVVUTTUWXXWVUVVYWV\R[©’oZOTSOQRSQMIGGCFHHGFGIKHFFGILPSUWYZZYYXY[[ZZ[\Z[\]^\YVRTTQPPNKNNNNPRTU][XVWY]`^aa][YUPTO2>R;(4=AGJE4)*+.8CJKLPPMLORPNNNIKMGFHFEIIFFGGGFHHEDEDABCDDDEEDCEEEEECAACFFEEFFGEEHJJGEGGFGIKJGGFFGFCBBAA?>@CC@CCCBAABBBFEACDCCA@CEIXglffhihddgjfgqZ/ "#$!"(Ry}r{v^UWWWVVUUTRQQSOGFNVQNNPQRRSQSSOOTUSQRRPOPQQRSSSRSTUQNMLJIIGECA;0&  ####$$%%&:c|wv‚…‹’wA,5459529<;1" &('&%$&+/1159BQXG0! +5h–†x˜Ž†ƒ‰•Œƒ‹“Œ‚Š”Ž‰‹‘‘•™“’“——ˆš“‹Ž˜•Œ”œŽ“—‰Œš•Š–—jMW[VXZWUWWSTVSXVICC=;BKNLJHKPAOvmFB\_\a`[W_]][YZ_^_\Z\^\YX[YZ`ZJ\‰•‘‘‹‰’’’‘“•’Ž•š™™›žž€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€eµÁ§©®®µ¸¦t3@†17URJJ?;E?8;>8G‚ª£¡©¦©¬§¥§¨¦§«¤¦¨¤£«­¦££ ›––™›—›ž¡¡ž›žŸŸŸ¢¤£žš›žž›˜–“ŽŒ—¢±¾¿·®¨¤¢žœœ™”’‘’’’’’‘’“—‘Œ‘ƒZDSTPQSRRQPRSTSTTUUWYWUSUWVTTZO`—¯„si\PSSRPQSSQNKJJJJIHFFEJIHIHHHHPRTVWWVVWWVVTUVYYZ[]^_^]Z\\ZXXURSRQRTX\__]YXX\`cZ\[WTSPKa_A,:2)1/(*+-6ELLLQPIGLQROLHGIIEHKFFHGEGECEDEDBCFEAEEECBCEGHFDDHJGABEFEDFGFGFFIIGGHGGFGHIIHHFFGDDCB@BCCBCBBCCCA?>??BABCABCBA@DHLXflefhkiedglfhqV+ %!##".[~v|nWPVUTSRSTUSPPRRJGLTPMNOQRSRSSSPMLNOPTSONQSSQPQTVTRRPONKHHJIEA?7+"  #####$#$(Emzot…ˆŒwA,554741:<<;/);>%,(#45-3;2$,5.%-5692/064-,2*/335@MV9'#=|Œ~†˜’ˆˆ‡€Š“ŒƒŒ–’—œ‡“™‘ŠŽ˜šŠ˜“’–‘’š–Šˆ•‘Š”„“•jNYZVWZWUWVRTVUXWHBE?>EMPNMFJOBOvnLPa^X[\\Z]\Z]_[Y^`]Z[]_^][Z\c\L_‹™““”‘ŽŽ‘‘’’”’Ž—›˜–šžž€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€_²¿¦®¯ª¯§º²hl«~)9PNMSE:?F>@C>?@6Ap¦¨›¡¤¥ŸŸ £¢œ˜˜› –™ŸŸž £ šš™ž Ÿž£¥¥¦¢œŸ¢  ¢œ›š››˜“•““’Š“¡¤«µ¸°¦¤¨£œšžœ•’•’’”•”‘Ž‹Ž‘ˆ‚\HRUQORSTPQRTSSRSSTVXVTRTVQQUXMg£³d\XTUTPPONNOPQQKLNMKJLNJKLKJHGEJLORTVVVWVURRSWZZZYXXYZ[Z]^\[\ZVSTUWZ\^_[[\\\[ZZTWVTUZ\ZfgL!  ./-1;FF;6;@??C>635417CNB8$H|‡„“’‚‚Ž†‹”Œ…Ž˜”…Ž—„‰“’“—’Ž’˜’“—“Ž’˜˜Ž•‰”Žƒ“–hMYZUWYWUVVUUUQVUJEA=>CIKKLLORBOtmOYb^[\Z\^ZXW[a\X[[ZYXY[\[\Z\b\L_Œ•”““”“–’’•–––’“–šš˜—šŸ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€Z«¹£©­¨«¤¬µ€}¢o@NUINYODB?;;@==D<:cŸ©–š”‹’™›š™›ž¢ž™œžž£¥¥¥¤¡››šŸŸž¡¤¢¢£¡Ÿž››œŸ¡ŸžŸ™–•˜””——•—š ¥°¸²¥ ¤¡œšœš“‘•’‘“•““’Ž’‰€]LOUROQUVRPQSTTUVWUUVUTSSTNPUVKp¬±uZUWPQRROOPRUWVUONMLLLLLMONMKKIGEGIMQTWYZYVROPRUTTSRRTWZY[\[[]]ZZYXXZ]_aaa_^[YWVXZYWY_cb[aO)"./.4=CJE4(%&)/;GKKLQLJJMMMNQJIGHIGEGFGFCFIHFEEDEEEEFDCCDCBCDFDAACFFDFFFGGFFFIFDEFGGHHGFFFFFGFEHHDED@B@?AA@AC@AAA@@AB?EB?DFCDDBGNQXchffhjiffjkhllH$#&! #".b~urxfXWXVTQPPPQPOQTQGFOTSSTSRQSTPPQMNSTQPQSSSRPSQPPRRQONLKMLJGFIE?9/&"" ####""!!0Zvuqw~‚‡Ž~K.562310:=<;-#0@<""&:7,7<-!$6C@9=HTMB;72-<4Y•ªž¢˜“› œ ¦¨Ÿ¡¥¨««®²°©¥¥£ Ÿ¢Ÿ Ÿž¢¢¡¢ž¤§¥£¢ žŸ  œ˜™žœ–’”š›™–¥¥ª°¯¨¤¥ŸŸ™–•”“‘‘‘”•‘Œ‹„‰|ZHNTROQSUSSSUTTSTTUTSSTTSRPQUSLy³¨kTUTJLRSPPRWZ[XURPNNOQPPNPRQPOMIFFGILQUW\[YVQNMMKLMMNOSVUWWUVZ[ZYWUTW\bfgd_ZXWWX[]]ZZ^^\ZaU1"+,/7>BIC1&%#'1?HJILPNJHJMMMNKIGHJHFHHHHGFFFCEDDEFFGHDDEDA@CGDDEDCBDGDFGFFFEAGFEDEGGGHGFGGFGIHFHFBDFA>?ABBA@@BBBBAABCCABBBDEBFDIQRWafgfgihfhlggkf? ""!!# '[€xop^TQVUTRQPPPMRSQMHHNURPQRRTVRNPROPTTNRTQPTVQOPPQQQRSOLKKJHFFDC=3)$"! ###""!!!?hxqvyx€ƒŽ„R0462200:><;- 0G@%&;6*8;)!0@A96>FKSXOA;BJTTV\^WI>56:>:&/^Ž”‘‰†ŠŒ„€’ˆ„•ˆ“šŽ„˜’’Œ“™•Œ˜•Œ”‘‹‹•Œ„Žœ“‡——eHVYTVYVTVURTVUYWHAB>=CJLLLJMNATwmQX_\]_]__^Z[\[^_\Z\\ZY]``c_]`XJ_Ž™’’–•“’Ž‘”š›“Ž’——™––šœ™€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€\«À©¢¢¢¡«ª½phQ5.*!'( !" &!$%$125UŒª£ ¤Ÿš¡£žž¤¥¡Ÿ¤«§£¥¥¨ œŸ ›™œ›•–Ÿ¡œ™ Ÿžžœš˜˜—šœ”Ž“œ•——–••–—žœœ ¢¡™¢ —”——‘Ž‹“‘’ŠŒ†‚„`KOTQPPOQQRSTTSRRSVTRRTUSQSSUPM€µ jQNNHPSMQPOQTVVTSUVRMKMQRSRPOQQOKIHHILPRY[\[WSQPHJLLJJKMNOOMNRTTSSRRTWZ\`^[WUTUUTXZYZ\ZW[aW4 "(08?BIB/%%"&3BIHILPQGAEMOKGJJIGHIGEJDEHDADFHECCEEEDFDCDDDEHEDDEDBBEECBCDDEGDFGEDFGFHFFGHGIKFEHHCEE?>BDA?@BBCCCB@?@ACB?AEA>CHEJRSW_dhgghgfindfja9!"%"#!GzzmjZSOTTSSRQQPPPPQNGGNVROOQQPPNNNOPQSVSSSRQSTPPQQQQQRSONLKHEEGAB;/%""  ###"!! Ntxkvyv‚Ž‡V2463100:><:-0I?% %;5)7:'-8<>@98GPMPWSD8578IF-;w˜‘€…Œƒ’‡ƒŒ‰…‹–—‹“—†Œ—›‘‹’›•‘“–‘Œ–•Œ•šŽ„‘˜ƒ••dHVYTVYVTUUUUURVVIDD>>88+0OA%(&*D2'89+%09<@;6EKPRPRVSL>33::AQ]Yc^HLYE*\•–ƒƒŠ~—„Œ’ˆ•’ŠŽ”‘Š‹–šŽŒ“”“š˜‹••Š‘”’”‡—”fLU\WXZVTUUTTTRVUIDB=>EKJJKJLNBVwjRY[X\`^\Y]^[Y^_]\^^_`_\[]^[ZbXHdš’“˜——˜™“Ž‘––“—‘•–’”š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€R’Ÿ•š™š†gD0(#  !!"""#$%'(())+:g“˜ŠŠ‘ŒŽ’’“”–•‘’“’“‹Š‰‰‹Ž‘‘‘”ŒŽ–”ŠŠ”‘‹…‡Œ‹ŠŒŒˆ‡ŠŽŽ‘“‘š›š˜™™™•“”•“’Ž‡†‹Ž‰‹`JOROPROOONOOORTSPSRQRTUTSOSSMV‘ºiPPPJKKPLLORSTX]YTUWSPRTRRSSQPQROOJFHHFJPPSW[\[Z\WROLIIKGEFIIGFHFJLLLOSVVTQONNOPTSUWWUUXRST> (49>DD8)  *7>CLPLIJ>>FFEGGHKLJHHHGCEGGFFGICEFECCEHGFDDEEDBDCBCFFDACDEFFEDEGFFFFGFEIGEGIJIGIGEEDBAA@B?=AB@@C??B@@BCBDCAACCACKPPQX`e_`ejjeejjmnU."$ ! F[N@DOXTTROOQSRQMKOOHFLQSROORRPMOPQQSVYVTRQQQRSOPPPOMMLJLJGHJHEA7+(&!  !!! "# (#:gwsrrp{‚ƒŽl:.974.0==:9*0M=#)*(=3.4-#,::;;:BMOQSUWTQSF>8.-5J€”‰…†‹Ž„€Œ•‘‡“ˆ”’Ž‹”‘•˜”ŽŽ‘Ž‰Ž—˜’‰‹‹…‹”Ž‡—”fKUZUVZVUWVRUXVXUIDC>>FLLKKJLNCVwjRY^ZZ]]]Y[[Z\__]]_\[]^^]\c`]cWE_‰‘‹’•“•”—••”•˜—“’š˜›  š˜š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€F€‘†‹|YC2$#%# !!""!#&()(&$'/Ch††‰€„‰‰‰ˆŠŒŽŠ‰ŒŽŽŽŽ‹‰Œ‹‹‹ŠŠŒ‘Œ‹Ž‘ŠŠŽŽ‹‡ƒ†‹ˆˆŠŒ‹ˆ‰Œ——’˜£›š™™—”‘“”“‘’“‘Œ‡Š‰…bJOQNPQNNNRRQNNPQPSQPQSTSRQSRL\—ºiPNQMLJMPNNOPRTXUUZ\XWWUQRTTPLOTTSNKMLHHIHLUZZZ\b]YXWSPONLJIHEEFFEFIJLNPPLIKLKOTVQPUZYTQNOVH#(49DDGPOIJH?AGFCEDHIIHFGFDDFHHFEFHFEEDDDDDBEGGFCCDCDCCEHFCFDCDEDEFEFFFFFFGHEEHIFGHEFGFFEDBCA?@CA@BB?@A@@BBABCBBCCBGNRRQW_dhddggfhmemlN+$'!!! 0?;6ARWSRRSRPPQPPLLPLGJQOOQQNOQURPPRSSRUUPOSPMPOPPQRPNLNLKKJIGD;0(%#!  !" !"&%Jotqtuqy„†vB.77303<<<;( 6N9!)(.@4##'<=::>EMNLKILODXxjQX`]Z[\]Z\Z[]\ZZZ[\]]^_aa[[[bWIe”“”’’’‘”–•”‘”š›ž Ÿ›šœ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€It……~bC.$%  ! ! !!!!! !$&'(''())=cy|~{ƒƒ€€ƒ††…†‰ŠŠ’—’Š‹ˆŠŒŠ††‰Œ‹Ž‘ŽŒŠ‰‹‰††‰‡‰ŒŽŽ‘“›—™›–™¡Ÿ››š˜”‘‘’““““’ˆŠŠ‰dJLRONOPPMNQSSRQPPOQRRPOQRRSOJ`œµ—fPLONMIHKJJKNPSTQWXW[[VTXXWWTQQSUURQQOKJFEHOUX[]Z[[\]^\ZWURPMKJJMHGJKIHJJGFGHIKNMORSOJKPPTaX0'498BB3$'6CLHFKLEDND@ELIFDDIJJHHIGEEEEFFEEDIEBCEGDAADGHEDDECEFEDEFEFDDFGFEECEGFEDFHEEFGFEFHHIFBADC@C@@CB>?EB@ACABDC>@BBBCBBJOSSRV_fhggfegheeqhD% "" ! #%/43CTURPQUTPNPMPLLPLEIQQPOMOPQRRQRQRRRUVSRWTNORQPQRQNLIFFIIHEB7-&# !  ! #%.Ysrrwuov‚…‚‘ƒL,37324<:=<'";P7!*'1D4 ":A869CSQNLMPRSPSWXUTSP?;9.5V`UGMu‹…Ž‘„”‹†•‹–‘‰“ˆ‘˜”Ž•˜‘Œ“š•’“‘‰ˆ’Š•…•’eKUXTUYVUWWRRSSWVICE>]qutu{}€‚……†‰‹ˆ†Š‘‘Œ‹Œ‹‡ŠŒŽŽŽŽŽ’”‘‰ˆŠŠ‰‡†ˆŒŽ‹’•–™™š›š›ž¡ žœœ™–’‘‘ŒŠŠŒŽ•fJKQONPQRNNPSTRPPQOPQQONOQRRNHf ®eTLLMNKGEIMMKLOSWZWUZ^[XZZYWVVUSQRTSPNNMLKGEIPVW[_`ZZ^`^\[YVTRQOQLILMJGGGIJIIKKIDFJJEBGOZ_nh<&4::@=-%1ERJCFLIDHNAAHMKFFGHIIHHIHFEEEEEEDDDDDDEEEEECCCGGECEEFFECDFEEEDFGFCDDEEEFFGCFHFDEGGGGEBDGGC?@A?>?@@@?ABABDB=?ABBACDLORSRV_headgeefcfqc;#""" " +32?SVUQOQPNOSRPMQVLFMRSRONNPPKOSSQPSURSRRTTPNSQONNMKIFGHFEHE:3'#$"!"##""!  %$7erntxsmt„…‚”X..7324;:<;&@?@A@=>@A?BC@>?ABABDIMORSRT^hlbelfadego[5&(&$!! $17FUUSQPQPOOPPMIOTLDHRQRTQNMPNOOQQRQQTSSSSUUQTRPNMLKJKIJHDE@4)&$$#"""!""##"!! &$?kpktxpnt„…‚’”h8,6312<:98%!BQ2#/(CB?AB<=@@@BC@AABCCCHNQQRSQR[fgdii__d_lqU0&'#" ! ! -;LVRNQRRQQOLNMMORMGFNRTQOPPNNNONOPQRRPQQPQRQRSRQOMLLHCEFB@;/%)(" !  !"#$$$ !&$!Elpgrsnrv€…Š“yG/44.0<:65%&HR. .'79##'-%'/.1@A<>A@=>AAADEADCCEDEJQSRSSPOXc]abRH[jaijL*&($$!"!"+6EOQQSRMMQQNONOTSLHIOPQQONOROOMMMOQSQPQRRPPPMPQQNLJJJHIF@A:)%(&! ! "$%#! "&%#Ilpdnpnvx}…ƒ‘ƒT235,.<:43%&HP*/*#7. "$% '+(1014;BIMOVURUWPO[JAXv‚…„†|†‘…‹’’ˆ…‘•Œ‚Œ•‡‹““Š“˜’Ž’•”‘Ž–”ŒŽš’‰Š””ŠˆŽ•ƒ’ŽcKVVRTWUTVVWUTRTRE?A=>FLMMNIKL@TuiQY`]Z[]`^[\ZZ]\[_]\\_`^]]W^aaSHg—‹ŒŒŽ‘‘’—™—–¡œš››™—€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%*)'#"& ''$'&%#(% ""(1BQO@77?atqqtuxkt}…Ž–•‘ŒŒ‘“Ž–ˆ‰“’‹‰‰‹ŽŠ†‡Œ‹†Š’”’’–œš¡¯·¶¦¢§ª§ª¯«§§§¦¦¦¥¥¡œž££—“’Œ’„\INQMNQQPNOOONNOQRPPONMMLLJRFSŠ´«†cNKQOLKPSOJHIKKKNLLNPQTW[[\]^]\ZYWTTUWXXXUTUTQQTOKIKKKNSXWXZ\[XU[XXYYWX[``acgkoqqlkopmklqa[`P% !-<85,'4JODELPNONB38ISOHDA@HJIGFFECCDEEEDEEEECCEEDECEB@BBACFFFEEDCBGFDCCDEEDCEHGEDFGFEFGHHGEDDEGFDBB??ABBBC>;=BB@?=@@BA=?DCB?=?FNUXT]X97]menj@"$#"'$ !""'7IRQORRQPRQNRMLRSIEITPMOSROONNLLNMOUSQQQQQRSQQQNLJKLFIKHB:0)$#"!!!"#"##!"%#%$(#&Mpllmoopxˆ„ƒ‹‘c(*2-//+0.!+@F' 0(#/()!" !%(,))-/4<=@IHRUSUSSZJ=YyƒŒŠz‰‘Šƒ†„†Ž”Œ…‘™…Œ’ŽˆŽ“ŽŒ“—•‘‘•˜”Ž’™•’™“Ž•”Œ‡‰•‰ƒ”fNUZYXVUSRRTRNOSPFAC=:DMMMKDJNCWufOX]Y_`\X[[_^ZYYZ]]\]^][[]a^\aTIh”ŠŽ‹‰‰Ž”˜˜–š ¤ž›žŸ˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*+#"&%$!)/(#)++-+'%&+5?EKJ@:>@?^{‰ƒ€‚ƒ†ˆŠŠ‰Ž–˜•‰‰‹Ž’•œ—Ž‘’“Œ‹Œ‘‘‘Œš›¤¤£¢¢¥¨¨§£¦«­®®¬©¦¢¤¥Ÿ ¢ž™˜–”‘Œ‹Š‰‡„~}‡‡…‚‰}VDMPKKMNONNOPQONMMPOONMLLKKQDR‹µª„fPIMNPNNONMLLLMMLKKNPQSVSW[___\[\[ZZZZXWXZZXUTUWTQNMJIIJRUXZZWUS[bfffijjdfilnonnolkllife]TU_S+ ,<>5+)7LOBEPQLLE:4DNQKGECBFHIHGFECHFDEEFCAFFDCEECCDFDCDDBEDDCCCDEFEFFGFFDCFFHIEBCFHGGIHEDEGGGHHGFEC@>>=;:::898679844895687:88?ISVVYVN:=[f`lc:!$!$# ""(8IRQNQRPPRQOOQRQMIHKQMJMPPNPLQQONNMOSUTQQSROTRPOMLKKKGCA?8.&'!!#"!# "" !"#%$(#'Lollmonow‰„‰–s5&0,,-,++(*00!$-&/3&")"#*..**-049=>EOTQORUG@Yx‡Š€y†‰ƒ…ŒŽˆˆ–ˆŽ†‡‘“ŒˆŽ“”‹Ž˜˜Œ•™’˜””’ŠŒ–•Š‡”Œ†–ŽbLUYXWVUTSSSSQTXTJC@<;EMLNLFJJ?VveM[^VZ][Y^]\\\\^^ZY\]]]]]]_]]bTHhŒ‘Ž‹‘‘Ž–™™›Ÿ˜˜™›œ›™˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+) ('#,2.&%*,+).:FNZsŠzI0?C2:W~Ž‘‹Ž••“™šš™šœš–‘”–••™ž¡›œ›™—™œ œœŸ£¥¥¤¦£¥«®«ª­¬¬©¤¥§¦¡Ÿ›œ ”‘‹ˆˆ‡‚~~}{xusqpmmnnnprs|~}y‚wR@LOLLONOMNOPONMMMOONMMLKKMOBSµ©ƒeSMLKNOPMNPOMKLMQNKJKMQTYZZZ[\_b____^[VSSZ^[XXYXWWVRQONLKQW[[YYYU]fkorrnlkkklmoqsrmhd`YSQNQXO. *:;/#(>NKBHROJI<17KOLGFECBDHJIGFECGHIGECDEDEDDGGEFEFFDFDCCEEFFGEEDDDDDEEEEFGHHFDGJIDBDC=<>@ABA@>>>;878988936:97:=;=;:967::646?KQPLLHD9@\gejY2 %"! !")9IRQMPQPQSSPPOOSTMHJSQOQSQORQPMJNPPOURRTUQPPQOLKLKJHGCA?:1($%%#  !#! !"!#$$(%(Mnllmonow€††ƒ‡—ƒG$+*)+,''.(*')+!,?+)--.-1.2:=68:6587?:DQTTVRBB\v„Š†‚““ˆ‰‘ˆ“‹†‹Ž‹Š‘‘‰‰’‘“’‹’™’’”“’ŒŒ‘‰‹“”Š—^JUWWWVVUUTTTRSVQF@@>=GLJKJILL@WwhPY`Z]\YW\]\][YZ^^Y\^^]]]\[[^cSFf‘‰Ž‘’’–˜••˜š–“’”’’–˜•‘€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%&-CU[cmnhdWKXknr‚”šœsI63Bj•¢Ÿ”“™š—–˜œŸ¡Ÿš”Ÿ££  §¬¬¥¨¬­¬««¬­­­®¯®ª¨®§¢¡Ÿ›˜˜•“ŽŠ‡‡„€ƒ|xzxrrxpooonmmllkkkjjiimjghlopoz}xwTBJOLMONNLLMNNMMMNNNMMKKJJLNBUŽ²¦„eSNMJMOQNPPOLJIIONMMMMNORSTWY\^```__]YTPMTY\[\ZYX[[YWXVRPSWYYXWW[UT[djmpqnjhhjnqfe`XTSOHFEBC>+ (76*.KODDMNJIF5/<667878::;:;@EHD@8?D8=[kkhO*!%! !!+8HQQLOPPQTTQPQQPPONNQQQSTPNPQPOONMMQOQRSSTRPMLJJJJIIFFE?7/)&&$""  """!"###$#)%)Lmllmomnu€ˆˆ†’ŠY'")**,&%/)+&"*')A,&)'>DFJBAKI?:<=>>;;38FMS]dJ>Wx€…y†—“……‘’ƒ†‘…ƒ‹‘“‡ŒŒˆ–‘Š’“’–‘’—’Ž“Ž“‰‰Ž’Œˆ—Œ_JUVVVVUVUVXWSRTOFA=;/3BIHFDDA=>HJJHGGHHFEDCEEEECDCCEDBBEEFFEDCBFFEDEDEECBB@@?>>=;978998952269;;;<<;989:9655755663588897<@@>>?@@AAB@?<;:5@I<:TeieF&#&   !,9GPPLOPPQTTQUPJKRURPNNORSPNQNNQRPMNPLORRRRRPNNNLJHIJGE>4-,)$%  "! !"#"$%###)'*Lkmlmnmmu~ˆˆƒ‹i-)-)*(%+-&!,#,6)"$+?BCH@BNWPC843;GBA=:9;PjYCVx…†ƒƒ‹ˆ…ŒŠ†’‘††‘‡Ž•ˆŽ–“‡‹––Ž—˜Ž‰–•’˜Š•‘††‘‰…•aLVVVVVVUVUUURRTPHDA=;FNKIFEKOCXvgP^bY]``[ZZ[YWYXXZ]^^\[[\\YZ^cREf‘’ŒŽŽ”“•›Ÿžš˜Œ•“€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€3>P³·¡ž«¨³¶sE¼¬©®°­µµ¥rM62;CFVjŠ§·¾·§³­©ªª©©««©¤ ¡¢ ”Šˆ‡…}xvsrpnjhihgggda_d_^bdccfeggffiihfhfehgfhlllmmmnndhkjhhmqy‚x}wWEIMKJKJLLMNOOMLJJMLLKJJIIILE\Žª ‡fQKMMMLMPQQRQPPPLNPPPQPNNNORSUVV`_```]YVSLHMQTUXUWYZ[\\ZWTPPRUVVVWURSZ`bccb_[UPLOMGCBCCA=A@AD9!-,,IKFFDH@CD4-;IKIEDD@>CJKIGGHHGHIHFCCDFCDBBEEDFDCDEBCCA>???=<:9:976679:9;;::<==9<=<;<>?;;:876777447899;<9=CDCA>HOMC@CDCBCC@<99;;@H@>Qbj_?"#$  !,7ENNMPQPQSSPPNNRTQPSRQQRSQPTNOOOMPRQONOSSPNPOPOLHEEFDE>0)*)$""! !"! !""#%##"*(,Ljmlmnlls}‚†ƒ€†“z;)-)'(())#!*%($$!3@@CFAEQPWWQI916CKMMI>>BEEEEDD@>A7 ()*@@@A>;<;99:8;:999:;;:;<=>===9:;9:;:868;:7347;;::<=>?A?>?AAABDABBAABAAOPDADDABBB@><;;:@H??VgnX8 !   !,7CMMNQRPPRQOORPMQUTPURPQRPOSNQTQMMPNQRQPQSPKNMLIGFEE??8,%%%"$!!$"!! !"#$#"!)),Lhmlmnlks|ƒƒ‚“ŠR'(*(#%*,%" $(#%:@?CGDHNSONRTL@92:HX`YJ?@Rir‰z‹”‰‹†‡ŒŽ‡…’‡‚ˆ‰‡–—’Ž‘’‘’‘‘”Œ‰Ž–”’—‹Œ‘ŠˆŠŠ‡–‹]JVYXWVUTSSTVVVVPGCB>;DKJJGHMNCZyiPU`\\[\]b^XW[]^^Z]^^[[\]]_\[_QHk”š‹“”“““‘ŽŽŒŠ˜™’’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€)&>‹Á¼«¶¶Ä¼lV£Â²²»§mC70'.31,)))''.02E^htmijhb_`[Z]`^Z[``^^`bcbaec``acdcbddb_`ceab`^aefbabcdddca^ca`gfdighhijkllgiklkigfr~t{z]LHMKJJILLLMNMLLLMLKKJIIHHMJA\‘«ž„fTKFFMOLJQWXSOOPQTTQQRQNINQOJJOUVZ_ehe_[Z[XQNLJFJGFHHEEHFHHGFEGIKJKOPPRVWUPLHGGGHFIPTPMNMH<8<8'%5JQMI>:FEEF;0<989:<;768::<:89:8:=<;;<<<<;;::::9876899:=A>9A?@C@=>DJHEDEEEDECACCCBC@BDC@BECAQQA=BDACBAAA?<96EN=0'&)'"#" !""!"#"!!#$##"!**-Lhmlmnlkr{€‚„…”f1&&'!,/')&"#+!*A@=BEDJNQNRWSPRP913G`yŠ„xŠ‘Š‹Œ†‹‘‹ƒŠ‡‰’‹’—‘ŠŠ’•–™’’–’’•‘Š‹”“‡ƒˆŒ‹˜‰ZHVZYXWUTRRPQPPRNGEA>;DJHJJFLMAUugQZ`WY\_[ZZ[[]^[Y\]YZ_^Z\ab]Y]QJl–˜‘‘“™•’”•”’‘ŽŽ‘ŽŽŽ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€.12e«¸•”¥®–]PŸŽ[;9>822001-'&'%%'()/6LYca[[]^W\ab_^_b_abb_]]^ba`__acddcba_^]\^`ba__accdec`^^_bbbbbcccaceecdhlghc_aca`l}swz_FFMLKKJMNLKJLNNMKJJJIIHHHKLD`•«…gVMKLNJJLTUPNSUQPSSTUQQUPPQQNKNRUZ`cfgeaYXXVQKJMKGEFEA?@=AB@>?@?BDGHIKOQUXXUUZ]]b[TYdaZ\_J=;;;/ (Hb^ME>AILA>95DQJE<:?BCDDB?==>;9;9;949=9;;9<@=;;;=>;:<;898:97:;98;9;AGHGFEEGGJMJLHHHGDAACGIIGFGEAEDBBBCCCCBCEFDDEDNNC@CB>C@@B?:8;=EE>>SlmP+"!! ,4?MHMPQPNNQUOQRPMLNQSRSTQRTSQRPPQOMPPQQPPOOONMJFEGFCF=+!')$ " $#"!!$$ #&'#&+%1Riiioqmlvˆƒ„€ˆ”u?",-!(3)"+%.1%&:B=?FFAFQPRSRQPQSUP>49`jbRGFMXZQ52100.*&!"#%%$'*1CV]\]\Z\^^]\^]\\]_bdd`\\]_abba`^^^^___`b`^^__]Z_`bb``ac]]\\\\[[UWYYXZ^abca`chhgoƒw{€dJHOMLKIKKQOLLLMMLJJJIIHHHGHB`”¨™‚hWPMLMKMLNOQRSUVRTTVUQOSVQMKJJLOPSW\bhhea^[ZVRRSROMLKGECEBACB>=>A@CGGFIOOW^beiiggc\^ffbd\K@;9<0 'IdZEA@DKF@=:>OVJGABFC=989<<;;><9<=?==;8?<:;>=<;;;989768;@CDDIKKKOOQV[][WLHMPPNLRJIJID?>AKNOKGFGIDFGFCAABCABGIGDCFNMFCDEG@ACC=78=>FE?>TllK%!" #!-0:KMLORQOOQTRQOOOONMQOOUXURSRSRSTQOPPQQONOPOMMJGFHGDE:*$((#! """ "##""$$"&)#2Tjkikoopv|€‡‚…€…“|B'4;.,+1%$-%67'#(B?===<;<<;:;9;94787679754428?<??<98>FF??VniF#!#%#" #./6HOKNRSQPQRRRRPNNPQQTSQSQQUUUTTSPNOLNOMMPOMLLJGGIHEB6)&*&!!!%$"" !"#""#%!&'!4Wmkijmopu|‚…‚†€‘‡S+0?B?/)!$.$>;% #9F<<>BCCIQRQPPRSRPUOSWOID87MdqgJHf‚Ž‘ˆ„‹‘ˆ‰Ž‹‚††‡Ž‰‹‘•–Ž•š‘“–Œ‘“‰˜“‰‹“’‰„”‰ˆšŒ]LWYTVXUSTTVURQRNFCC>?GIEFLHKI?Zt`OZ]XVXX\]XZ\Z\][VY\]]`]Z\ZX\bPEgŒŒŽ’Ž–””‹Ž“‘’’——““‘‹€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$-38COXQPTK7=NH>IVYTQUYF2/61/0)%('$#%%#$%&.BY`\WXZ\\\YWXZ\\ZXXYYXWWWYZ\WWVUUTTSPRUVTUWZ[]^_]]_afgghijkkmnnnoprtsrqoorrmtƒz„gFGLIIIJMMJJKMMMLLIIIIHHGGJGAd–¦™‰lWPOLLLMMONMPUTPPRTXYVRSNMMOOMLLNNNRX`dghd_ZXXZ\\^]ZYXUPQRQPOPMIJFDFEBDIFJMQV]bfdjhbbhifZSLBA>;;=<>CF?DGCDJB:KF9222141/;C;FG@?Yoe=##""%##"026EMJMQSRQQQSRPQQRQQQSQQSTRRSRRRROMNLNOPRRQNKLJHHJHE>1')*$!"" #"""####$&! &%!8\nfkmomlr„ƒ†€~Žc0(:FE0($#.& C; $$2CA<=?ABCIOPQRRPPQRWNPSQSRG<7BWgdUMaˆ†‰ƒ‰‹‚…ŽŒ…†’‘ˆŠ“•’”››”‘’“•“‘Ž‘–“Œ”‘‡ƒ–Š‰šŒ]LXXTUXTSTTVUSTWSHAB;;EKJIKJLI@Zt_N[`\ZYZ^_\]\ZZ]^^[\[\__\[][^aPIl‘’Ž‰‹ˆ…ˆˆ††‡ƒ€„„ƒ…‡…~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!).4?O[ONXL-+:9CRTOLJC?>=>@?7:>=?JSRKW`VQSK>KE4(+,,243=A69EDMTWUTSTWRPPUZ\[ZNHOQIFGIHIMOKDCFNLKLKHGIHIIHGFFGKKJGFGJLRWO@=BC??@@>=;:9>EG??[p`3" $"#263@JILPRRQQPNOQRSRQPSOQUTQPPSPOONNPQNMNQRPNMJKJHHIHD;-'*)!"#"#! !"###$%%!!&$%@amekmmnkq„†€|Š”h5-;C=.1.%%1+$F?#*6?C==>@BBBGMOQRQPOPRNOQQNPSSJ6?EFcp`N`„Ž–‚ˆ‘‚…Œ‹††••‰‹”˜•‘””Ž‘’–”’’˜ŒŽ‡„Ž–‰‡—ˆXGSWRTWTRTTRSSUYSHA>9;FNLIIIKH?Yp\L[`][[Y\\UX[[ZZ[\XY]\YZ]^[[_cRLo’•ŒŽ‡‡‰‰††ˆ…ƒ‡‚…‰‚tpu€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ .12f—¡—sTJMLMLINIJPRPPTUVW[^\XYYVTUUQNMJKLNMOU[\bggc_\ZVWWX[^_][[\]YSQRQRRQPNIE@ADIKMPT\bhgekkdbZTMHMH3:iiG8;>997=JTPJIHA<>?CD@05=ACNVUPUYRKLC9>?1(/1/7:6<=6=IIOSTTUUSSURRX]^]\TMOPNMKIFLRQKEFKIJKLJHGILIGFHIIIFIJKKMNNNRMEAB@>A?<<>@;6>]p[/ #$&% #460I@,19.(41(JG&5HE?@?>@CCBGNQPOOQRQOMPPPSRRWE=[fJAY{hQ_ƒ‘Šˆ’‚†‹ˆ…‰ˆ–˜Ž””ŽŒ”Ž’”Ž‹“œ”Ž‰„˜‹ˆ—‡YGTVRSWTSTTORSSTNFC@<=DIHHKILJBYp\NY^ZYYWXWXZ\\ZZ\_]WVY[^_[[[]^MIm‘‹†ŠŒŒŒ‰Š…†ˆ…„††‰‹ŽŠ€zx€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$00/8IVPKWG-6JUROKOUQLQW@028622,)'()'&%&'(&%%,3P[`\[_`]a``beggfefijkkjiklllmnnnsrqswyxwz|€‚ƒ‡‰Š‹ŒŽ’”•›˜––™šš™•““ŒŽ„p{‡€„lIFJGGJJKIGILLLKKLHHHGGFFFGAAm¢–ŽtTLOLLMKNJKQQLNTQQSW]][]ZWTSRONOKKMONLMPW]cdeeca^[YYZZZ[][]^\VSSVRPPQOMMNJIIJKQXVY`bcknhfZSPMPJ74epP77?:;7?NRJCDF?;78963)63)LL) 9KFAD@=?EEBGQQPOOQQPOSSRRROPXB=a€~eNSypOb‘‘‡‹‰’Œ‰ŒˆˆŽ“’‘—šŽ‘—˜‘‘••’’“”˜š“‘’”Œ†‘˜‹ˆ˜‰[KWVQSVTRUTPRRRRLECC;:BKLHEHLLD[p_T\_ZXZZ\ZY[][YY[\[[]]ZWZ^]\\[IEiŒ‡…ˆ†…†ƒ†‹……†‚„ƒ†ˆŠŒ‹ˆ„€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ .348BI\OQ<*>PTGNLGKMKLWF2.7:415.)))(),*'%&''')?]qkchmjmmmkiknrooopruxzxyz}ƒ„‚…‰‹ŒŽ’•”–˜™™™œžž›™—•“’•Š†„‚~{uoqpghg[bo}wu{eDFJGGJJKIJKKIGGIJHHGGFFFFFAFu¥§—ŽuVORLLNNNOPPMKLNMNOV^`adba^ZTPRVSPMMKIJNTWYZ^dedc^ZZYWXZWYYYYYUPZTQVXTSVXSOMKKPWWV]bfoupgXQPOQJ80asV54A=@?ENNID?@<;=@ED?6:A@BMLHHA:?F9.441;>5499=EHLTXVSQQTSTUW[]ZURMLLNMKOPTUPLJHEIMOMLLMLLKIIIJJJINPMHC>:@LMA;?BA??=;=?<6;CF==^pU&!&& -61;KJIIJMOPQRRRSTSOMNOOQSQPTPMORQPRQPNLMOOMKJKJGFFC>4)&& "%# "%$! "&'$"$#"$&5Yndcmkjokm€„€~ˆv<*;G;'2>3)51(MJ,#7DFEE?;>EFCHSOPQQONPQSMKPRONRD9\z…ŽvSUomXn“Š’‰€Š“‡Š’‹––‘–—•’‘’’Ž–™”Ž’“‰„“”‡„•‡YJWUQSVTSTTSTRSTNE@?;=>DOJ=?DB>BA:8;;77;GH?=]nH & ($ &+ )11>IHIJKLLLKLORRSTTTRKKRTOOTSNPSOLNPPNMMOOMJLJJHEEA9/+'$#""! !#&! "'$!##(#&?\ijiffijlry~‰ƒ€~„Šs;%9I;(,<-(5,,SJ" >CDAELOOPQPONNOPPPOOQSF=Yv„Œ†ecpoez„‰„‘‘…„’Œ’”’’”•“Ž”•‘Ž•“‘‘“•‰‹ˆ†“•†ƒ•‡ZLYURTURSTPQRPOROF?@;?GHEHKFLG?]u\M[`^[YX][Z\\ZYXXWY\[[^\XZ]Z[aSHa~…}|ƒ„…‚||~€|…„‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ (028D@CUS6/JWNMQTUY\YTdbA*3<:6=91)')+,-+)''()*.0ClŠˆ‚‹ŠŽ”™›ššš———˜™›ž™•’’ŽŠˆ‰ˆƒ}xwxkifc^[XVTTSRQPPOQRRSTUVVUUWURUVQQkwuw_FHJGINLJPJJJIJJHFGGFFFFFFI?Dq¡žŠŒ†_LIGLNQPPONMNNNMOPORX]_aacec_\[VWVQMLMNSPPSVXZ\___^Z[]\ZXUTVXYXY\`dgijjeilljhfdgeccinlfe]XRJHE<""Otb:2?A;FPPH@AJFKNGERSG=COPHLURLORHEMKCGO?.44/5<=<89>DGTUTTUSPQRMMU[ZVTMOOIHLNT[^XOKE??EIKKKLKHNLJJLPOLEB@A@>==CHEA@??BCA<:=<88>HF==]i>#$%%+6'#(/.9EJKLLLLKKGJLOQSRPOLMQPLMSUQRTPMNOONMMMLJIMIGGGHA6,'$&$  $ "%# ! #%#"$'$*E`hgggijiir|~‡‚}ƒ‹v=(>J;)/=@EDBHRSOLLNOOOOPPPOOQSHA^x‚‰ƒ€‘†xokrŠ”Žƒ„‘„…Ž’Ž’“’Œ”–’‘“Ž“˜“Ž……“–ˆ†–…WJWWTTROQUTOOQUUKBAC9:FMMHDELG@^u\MZ\XWXZ]ZZXX\]\YY[[YZ]]\[]]^_OHd€…|z}}~~||}~{z€„€„ƒ€|}‚„„€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#,07B<=WZ43[dZZZXVX[[XXhU/,88<;83.)%&(+++)'(+.000M…¤¤£ŸŸ ¢¢Ÿ›˜‘‹…}{zupkhifa\TTUSPNNOOONLKIHHPPQQRRSSSSTTUUVVVUVURTVRH`urqs]DEKHDIKHIIJIGGIIGGGFEECCBI>Bt¦ž‡„bOLLNNOOONMMMNNMOQNMPW\`__abaac]YUTSPNLRMKMQUY]VY][XY\ZZ[YXZ\ZVVXYYY\aeifddefggllkiknmilf\NCA?8%!JsmF38:9ELHDBELGOVJGOKG@AJKEIOHHKPHDGB9@I;+01-4>>;78>DFLNPRUTQRUOOV[YWXIIMKHIKO[\UNMICCIHFDCCCD@BEHLKE>>=>@B@?>BKLB=@A>A@<;><76ED@=<>CEA=EPOQQOLLNQOPPOOOQSB>_z‚…}ˆ•|rozŽŠ€…“”‰‡ŽŠ‹“–“’–”‘””’‘“”Ž“˜Œ”‡‰“”‰Š˜SIWSSWVQPSSORRQQLDAA9=GLJHFFOMB[nYO]]YZ^]\VTUVVXY[[\YXYZ\]\]_a^KHjˆ†|z{{|~|ƒ‡‡„…ˆ†‰‰‰‰ŒŽŠ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%*4@:4LR.9hjcbdfjptts~Šw>(6889765/'%'#(,+)(+/,2.1U‹œ…€zvtromfd`\WTRQNNLKHGFFJKKKKKKLLLLMMMMNNNNNMMMMQRRSSUUUVTVVRTURH[qrqs^FBJMHEJJEIJKKJHEBFDB?;8548+1k§ †ŒmSKKKKNQQPPOPPQOQQOKMQVYZ]abaadea^]YTRSQPPQQRV[VX[[XY\[[\\[[\\Z^_^[WX]bmifgjliegjkiilmkqiXF<>>9)DqtQ:99@HMHCEHECKMCAD>>>?ED@IMCEKQJFJD9=F;-22-0;;735=DFLSTQQTUUWRPV[\ZYLGNPJHIM\\TNQOJHNJECA>>?A@?@ABB@>?@BBB?>BKNE>::=;438FF==^g8!"#"$<@)!*+8FLLMNOONNMNNMLOQRRPOPQRQPMQSQMNPRPOONNLKJGGIJFE>3($"$#  ! "%%#" &&'8Wigbfgjkhiq}€zƒ‚{ƒŒp9+@D4(02,++7H6!*?FGDA9=BD@>EOSQONQSQMOPPPOOQS@=]y‡‚€„Œ„{|‡Ž‡„’’ˆŠ‹‹’”Ž‘”“Ž‘‘’‘“•’‘“•‹Ž•ŠŒ‘ˆŒ™~QJXPRYYRPRRRTQORPG@C;]u^R\[WZ\[YSVZ[ZXZ[Z[[[ZZ[]^]]a_JHq“•ŠŒ’’ŽŽ— š’’–šš–’‘“‘€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$/?;8EF6[†€ŠŠŒ‘—š›œš “U)085:875.&$&%'))(()+0/205Nc_[WSRRQOLLKJHIHIJIIJJKKLMMLLLLLKJONNMMLLKKKLMOPQQTTTTTTTTWSUVTSUSHVmrqq]EAEIIGHIIKC=??;87321/.,,+3&)f¨¢ƒtUKMKKMONNMNNOORSSQOMNPVW[__^`ddddd^VSTOPSUVTTVYVWYXWZYXXZ\[YZ]XZ[XUTX\afihikhbehiedeedi_N@?BA>-:iu[EA=AA=7FINUURRTTTVQORY]ZVNIMMIIKNWYTSYYRPPJEDC@@B??@BCB@>??@?@?><>GLB;=@===:9=:42>0)03,*&8M9"*>EEDC;BGNQQOQSRNOPPOOOQSF?\vˆ„‚ŽŒŠ‹Œ‹†ˆŠ…†“•‰ŽŠŒ”–“Ž‘Ž‹Ž”–“’””‘”––”ŠŒ‹…™{PLZSSWVOOSUSRQUXPEAA;>FIGHJFIE?`v^Q\[UVYY\[]ZZ\^\[[\_^\^\\_\Y]`JIw˜‘‘”•”ž˜›žŸŸš’Ž“”€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!,<:7:5?~®¢›¥Ÿ››››œ”’e0,97:9972*%#-)&%')('73591-9IPNMMNNMLNMKKJJKKMIGIMPMJIHGIKLLJMMMMMNNNLLMOPQRSVVUUUUTTWSTWTSTSHRiqonZCCIFCHIGII8+-2312/00001115+.h¬¤~‹uULQNLNMMLLLMNOTSRSRPNOUTVXYZ^bdfiie^YWSQPSTUUWXTTWUUVWUTVYXVW[VXZYWUUV]gkd_beeadda_``^]UHBED><12`vcK@:BKSL@EKFAC99<>@BA>=?@???@?=?JKA:=?=?>:9<;54DHD=AalA  #'#!?I/ %'4DHJLNPRSSPPNMMNNMMOPOOPQPPUVRNMNPOMLLNNLIJFDDBA8+%'### !""##!$%  "'%,Daicbifeijmryƒ}z~z‚ƒb60>:3/3/,+)8K9%.AD@:677787888\y‚…}x†’„ˆ”“ŠŽŒ…‡ˆŒ‘‘ŠŠ‘•’Ž‘Ž‹“•“——“–”Ž“Ž‰Œ‰ƒ˜yOMYRRWXRQSRQTTUUNFC>:?IJGGIELKF`rYN^]YZZX\]\[YYXYYZWZXW[YUX]V[`JJz¡›••—“‘’’‰ˆŠŽ˜Ÿ˜œš˜™š›€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"#*97<@4Eƒ£”†€|vmfd`]ZJ.*74466430+&+)'&')*+66541*/?OONMKKLMMLKJJKLLMIFHLMKGNLJJLMKIGGIJKLNNMMMMMNNNLMNOQSTUXRTWTRTTLRjutr_HFLIDHJHHG8-.4432100.-,++..3iª¢||{TGNKLOQQQPQRSSSQQSSQQSRQRW[\\\]bgfb_]\]XSRRQRTVUXYWWYXXUSSTVWXVXYYWTQOV_eb]]`b^``^_aa_UOFEG@9<4 *XxiJ80@IJDCHIF@??B@:=FECJOKNURIPUKFLD8;A7,.0*)4;=65=FIPRRSUTRTTQPSZ]ZUNFFMQLHPW[Z[_\PKHECBB@=;??@CC@>??@@?@AA>EKIB>;;@@>97;955CHE>>\oO$ !"$&% "AL4 "$#$1BIJLNPRRRTTROPQQPPONNOQPNRQNNSTQOONMMMMKILGCA?=5("#!!!!#"$%# $(#'%.Hbfbehfhjjjr{ƒ|{zƒ~O*(0*())+...))8D4'2>;60)+,-/13446:>AKTRJOPPOOOQS<;^z‚yt‡‹ˆŽ‘‹†‰ŽŽˆ‰‹‡’’•’‘“–”‘••’˜—’•Œ’ŽŠ’‹„˜wOMVPQWZVTRMPUTQQNGBB9;FMLHEHIDAcx]O\\Y\[WXX[^^ZWWXWY[ZZ\[Y[^W\_IL{˜”—™”‘ŽŒŠŒ–›˜š›˜˜šš™€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ &%*74400/..01368;=>>CGsªœtsŠySIQNMPMMLLMNOORPPRRQTXXSQV]a`_c`]]bgd][ZXWTQQTPTYYUVXVWVUUY[WQTTUUUSOLQUZ_`acd``_^_ba^NIBDG>:D6! OxoK4-7GIDHHBA7=IE=85;>ERSILQMEHLD@FB9?D:--.,-6>@84;CEIQUSSSPOSPPTY[XVNJFEKMHLZ\ZX\XNJKGB@AAA?=?BC@<=A>@@@@A?<>LI;:>>=?=758744;EH@9Vr]2 #%'(!'EM6&"%"$1BJKLNPQQQSTUSQPPOQRSQPPPPPRPOQPNOPPOOMLKJKHEA;83)   #%# !!#!&%/Iadafegjkhhq}„}~y{ƒzO..4,/1.-1../1;B3)14,--)+-//023374..=PVQOPPPOOQS@?`y~‚€†ŽŽ‡ˆ‘‘‰„‹ˆˆŽŠ‰‘“‹‘”’‘”“’–—“”•“Ž”–Ž’Š‹…Ž˜vNKSTRTVTTSOQPORUND?B;=FIHHILJB>aw_R]\X\]Z\\\YXYZ[[\Z[^^\\^__Z^^HM{˜’–’‰Œ’“Œ‹‘Œ“’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%&'--,458>21IPHJKHGJJFBAEA<:2!!"&!'0+%*2=75784/-9INLMLKNMMMKJIIJJJKKHKNMKMLKLKKNKLKJMNMPMNONMNOPSQRUVSRUSSSSTVUQHN`rvq`KAEGFDEHIFD>25FLMPQSUVWWW[WWy§¤j|ZHNLLNTLIOSPOQSQPPQUVQSRQTY_a``ca_accdb][][VSUPSQOUZYVXUVZZWVXZVRRSSPLOW[\aa__[]`_`daWHIHDFBFDJRVTUUPMQOQUXWUUMEEEEHGHUYWUVWPGFGD@>=<>=?=BD=648:71=EE?CTgqF!#'(,)EN6'$$'(%)5>IGMQMPVTQSWVQQSQMORQLKMOSQTUPNPPORQMKMLIIHFB<5-& "!#'$" &%%;)+1,..*./111.,*2)&'):MRTNMPJIPMA=\x‚ƒ|€†ŽŠŽ‹‡Œ‰‚‹”‘‹Š‘Œ‹‘•–”–‘‘”‹’•’’“’‘‘Ž’”‡Œ‘”†—vMLVOUUPOSQLRUVWTJA?@;>FIHHHINE?`rZO`_WWZXZ]Y[YWXXY]\[]YY\ZZ\[][ILw—‘Œ“”Œ‘“‘Ž‹Š‰ŠŒ‹ŽŽ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€1<-$/55892-9JNIJFFFHIIFCBBAA;+!#)0-*.0./59731.9GNJHKLHHIJJJJILJJKKIJNKLJJKKKLMLILPNLNNNNNNNOPQSUUTSTUSUVVVUQNIK^qup]E>CFFEGIJFDA;E[ba]\[Z[]_a[WWy§¥h}_KPQOORMLONIIOVXZVOQWXQPQSWZ]__cdcffdcb`\XUUUUTTSTWUTVTUVUUXZZ[XVVWUPLU\_`aa_`_]^_]ZTMHGFEHD;AA*ArzW8003:FMG=:@JJA45BE?9:;>GKIELPLCBB>EOI525..47833>GDIPROPQOMURQTXYXXLGECEHHN][XYZSHDBCCBA=<>?A@AED?;<<=9669724;?GEJJOQNOTRSLNUTQPQMMNNKLOPMPSRPRRPNPPMLMLIJHFC<1'"! "&# !'$ %=[uƒ|}Š‹ˆŽ‘Š„‹ˆƒ‹‘Œ†ˆŽ”””–•‘”•‘“—˜‘‹Ž—’‘ŠˆŒ…Ž™vLN]LRSPQUUPQSSTRJA?A6' ",0+(,)'*-.--,.CRKEIHLJIIJJJIEKIFIKKLNLKKLMMKMMLMPNLLPONNNOOOOSUSRSTTTVWUTSRPIFUion_HCEFCCEIJGD@;H[_ZYYZZZZYY[VWz§¥gnlNIILQOMNQOKMTSUYXSSUTPNORTTW\`ddcghgfbb`\YWVUUYXTUUTTRPTYYTV[[XUUUTPMQX\^```aa^_`\VOIDBDGMG>BI25j|^;.33:GMB9:FFA;44:6:9=EHFGEEJQRRRNFENH3/1,,15712?HDMSSOQSRQSOOTZZWTMKGCEFFP`]YWTI?>@AA@A?>@BB@ACB>?>=ALJ?>?=;<5378314>GF;>Tioc;#" $'"*ENC2%/-(("&;KLPROORQMPUTOMPSSQQPMLMLPPPPRQPQNNNNMLJIHHHF:*! !$!$'#!%>Zc_`cdfghjmrvx€}|{„~O)#%%&'"%#""'%"("$)')363/.,*)'&&!(&&+$';NSMKPRSN?A_v€†€}ŠŽŠŽ‹ˆ’Š„‹Œ‡Š’”Œ”””—”‘‘“‘’’Ž‘–•‘”Ž†Š’‹„’oJMYVXVQORQNQQPQQIA?>:=FIHGFEKDCfv[M[\Y[]WVUZYZZXY\\XXZY[]\\WW\\KMv””’••–‘“˜—““™›Ÿ¢ ›™˜•€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€B?AC8.18BGDGHGMNKNQONROFBJF3,1-,05722@HDMSSOPSSRTPPW]^XUJIEBHJHSac\SNG@@CEB?????ECA@BB@?>?@?>?>=COMA>A><::96310/;FH>>UgihH"! (*#.FGB9&/0/-%);IKOQPPQQKMPQNNPRPOQQNMMLLMMPSQNPONMNMKHHGHKI:& !%(!!'A]d]^ddeghjmrv}z~}ƒuF" !"#$"$$""&&&'$"%&&.62*#$%&'&%$$)$%-'"/FTOIPRRP@B`xˆˆŠ‹‹‘”‹…Œ“Š‰’ŽŽ”•‘Œ‘“’“’‘’‘“”‘Œ‘””””‘‹‰Œ‹Š“•rQOPPSTRSVVSQPNOPJB@>:?HKIHFAIEDdqWLZ[X\]ZZYYZXWYYXZZZ[ZYZZ\XX]^KLs‘ŠŠ‹”•“Œ‘‘“’“—•””“”•“€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*56.+9B?)(>KGFIIHHIIGHJEDIKGD>3&  (,((('((&$% )>KIIKIJKKKJJJKIJLKKLLJHKLJKOMJNOKKNPMNMMMOPPPOPPPPQRQOSWVTSRPKDSimfYF>BFFGHGECBA=GY[V][YXWWXYYRVz¢¢‚jp€xWJNONOMMNMLLOSSXZXX[[UNLOQOORUZ^_egffjghjib`b`]WUWWUWUVWUSSVYVVWXYWSPOPU[`a^ZZ[\[UQKC?@HKJC=AJ;'VxjH7.3BRN82>?:GJ5'1;JPGBIMNEEOWQJIF==FE2*-*)15834@HDLRTQSTSQUQPU\]ZWNKGCJJFM`bZPOIAADDA@A@?AAA@>?BA>>?@>>??=CPN@<>=<88742100;EI>>SgfnV+#(#)==<5&(./("+;EIMPQQQRSQOPSRPNMNQRPPPPRSOKORRSQNMNMJHHHHIH;(""%& )E_c]^dceghjnsw…€||~‚n?"%%%&')(&#''$))%&&))$ !#$%%%$%!!('$&>SQJMOPQ??]v€„~~‰Œ‰‡ŽŠ†Š‘‹ƒŒ•‘ˆ„‘ŽŽ””‘““‘’•”“–•Ž“–‘”“ŽŠ‰‹Œ‘Š’•rOPVQUWWVUQMQPNOQKCA?;>GIIHGHNFCbpYR\[Z]]XXUY[YXYXWXY[][YWY\ZZ_^KJqˆ†‹‡‡‘Ž–’Ž‘ŽŽ’‘‹‹Œ’‘€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€'.:9-2B>0-=JHEJEDFJJIKPMLOPOPPO:." ".&((&&&%#(%#2GKGKFHHHGGGHHGIIGKLGIJKLJKMNMKKLKLNNLLLMOPQRPPQRPPRTQTUTRRQOJCNdkcUH?CEGGHFCDEEAIZ]ZXXWVVUUUWPWyžŸ…qky|aHINNLNOOMLORSSX[ZY[[XRNPQPOOQVWY^cdeefhijjfbhe^Y[[XWVTUYXTU[VUUUVVTSPNQY^_[V^YYYPF@=ACJJE@$$NvpM6.6GUK3.=>:JF(%59NTF;CMJAEKNICFF@7@C4*.,)24766?FEJRTSTUSQROOV[\XULIEDJLHM]^UQSL@@HB>AB>>DBBA?@DEB>?@??@?>DPNA<==>;4144004?EG=;Qfgn^4" #"4BB91%"'&$+2BHLNQPOROPPPQQPRNNPPMMNNMNMMNNKJQNLMMJIHLGDC;- )'##,I`b]_ccdfhjnswƒ†y}„f6&-+,+,1./.30))% ##&&##"$#"!!#%&%$##$'((;RSLLLNQ??]w€…~~ŠŽŒ‹ˆ†ˆŠ‰–’ˆ‰’•Ž‹‘”’”—“‹Œ’”•”‘••’’Žˆˆ„Ž˜qGL`TUTSSSSSQQPRRKCA@:;BEFHHILB@aoWO[XVZYWZX[YZZYYZXVZ\^ZWY[[[^^JKr‘‹†ˆ†…Œ‹ŠŠ‹ŠŒŽ‹ŒŒ‹‹Ž€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ *0?C0/A<23AKIGJOMJGHLU[][UNKJJLKKJD;.!'(*($"#" $%!*BMLMGGGGHIJLJGHKLJJJKLKJLKLMNJKNLJLMMNNMMMNOPPQQPPRUQRRPQRROMHPgqdOCDEEDDFECEEE@HY\YVWXYYXVUTOWw™œˆxmp~mQORNLMNNOQSUVRU\_`_[ZVSRQQPORTSRX]adbbcfjkhdilg``b^YZZXUTWYZWWXYZYVURNQX]_^ZVQQSMEBB>AGDABABFC+BptQ4+2@KB-.@;>L?',AC@>@@??@@>DNMB?><=8436427@@CF?;OdfjfB$!$$ 1FE>>0&+0+',AHKMQNKOSPNRUONUQPRRPPPOMJJMMKLLQNKKLLKKLHD?7+&/-!"0Na`^`abdfhjntx†y‚X/)300..519<;/+(#&&**')$#" !"'&&(((+/=PSOMLOP?B`v€‡~ˆŽŽˆ‡’‹‰”‘‰Ž•”••Ž‘–”‘•Žˆ˜–ŒŒ’‘“’‹‰Œ‹„“ŸyKL\VTRQQRTUOQRSRJA@A;>EHHIIHKDDdqXP^ZVXVUYWZWXZYZ[XXZY[ZW[\[Z]\JLv–“‰‹‰ˆŒŠ‹‹‹‹ˆ„†ˆ„‰†ˆ‹ŠŠ‹‹€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!)1CI82;;16GMJIJLMJB;::9582+-.**.258@GB7*%*,*('$"$!&:HHEIIHIIIHGJLJILKHGLMIHKJHIKKNOJIKKMNNMLLMNPOOOPPQRLNPQSUSOJCH^maJ@ACDDFGFDCCB=FX[XWWVUUVWXSNWw–›Š}chvQJPPPMJKNRPLSNOV[^`_ZZWTQPQPPSRRW[]`f^\bhiijikjihd``_[XYYVUXYYXWWWWVRMOUXZ\ZUSQNF@=9?AEA?CCAGG07iuS4/4?H>,/BCED518;BGHB@@>>>@@??@@?AJHA?<9971-0;EHG>AFB>ObdkpQ%'&&!.?:9?@;<;*#2AILMPMHLMQPNQRRSSRSUTSQMOMMLILOMPMJJKMNMIJG>1%*30$ 1P`__a`bdfhjntx~}~‰yJ-*511/.612;3!%'#(*/,*.)""""""##(&$(,))0AOSQONPO;A_r|†€y…‰ˆŠ‡„’‹ƒŠ’…†’‹Š’•’‘š—Œ’›•‹‰‘˜“’‘’’‡„ˆ“nJLWQRTWVSPOMPRTQH@><9>GJHFDEJDCanYT\YX\ZXZVVWVW[[YZXYX\\YZYYX[[JNz›šŽ‹‹Œ‡…†……‰Ž‰ˆŒŒ‹‰€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!05?J7)9>59FKJJII:,&$$%&$%&%$$&('(" &,2;H:( &))'"%#%*?MEHGGIIFEEJIHIIIHHKIJMLIJNJKLKKJKLLKNQOMNNQQRPORRLNRTQPQRPLCJ\i_JADEDDFEDGBBCEKUYSWXUPPUXVWLSzšœy\c€rMIPTPOMLMNPRLNRW[_ab\XXWUUTPSPOPRSW\_\^_`ekjgihcchhd\[XVUVWXVVTTWWVVQNKLRYYURRSLA>@==?@33@A=@948=BIF=9;:8346?GMOECBD?9G^gam`- "#$%(.6<3@=<@/!,BGKLNONJMOPPPQRROOPRRQNLKMOONOOOLMMMLKJJKJF:)+4:9'$#2Va\\]achhinpsx…~~‰qA&+*&&+.-.2/#$))&*01/14.-+(&'((&'('(,01LROKKNRL>Ebt|‚~€‡‘Žˆˆ…“Œ„Œ•‰‰‘‘Œ”“Œ”•’‘‘’‘Ž“ŽŒ‘’Œ†…Ž’Œ“•oJNZTSSRQQPPOPPRRJ@=:9?GIHIJFHAEgnUT_ZVVYZZ\ZYUVZVTZ[WX\[[]]\UYYFN –‘•——“Š‹‰‰‰ƒ€†Šˆ‡ŒŒ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"+:F@0/=?08KLJH@) !"!**)'&%&&$(('($)8D@.%&$  # $7JFEHJIHJJIIHHIJJJJLJKLKIJLJKKLKKLMOOPPMMNMMONNQRPPSSRQRRPMMHKWfaJ=DC@AEFCCD@=@IVXQWXVTSUVU\PSx–™}^\zvSJNQUTRMJJMPSSTWZ_cf`][ZVVVTTSQOOQSUX[YZbgedbcdehhc]_^[YYWUUUVUUXVSRVLHOVVTTMLJHECABCCA?>ACCAE8)_{`<:@=:978688867=;::;;;BDAINC7GVJ;6:EC3),,&340*0@IIKSSRRSUONRVXZ^^[VNE@@GMNXYPIKH@>BCB?>=<=>??C>??FD;8:979?DKLMNF@EG=7J`dbke9!###'08>?9>AD@@A@><=A>BDAAA@=@A?9648?MOONJJNIBBE?:I_fdhlK"# ")/=936<835>FLLKKLLIJLOOMMOPOONONMLNONLMNONNKKMMKKMMH=.!'7BC:'(+!5<% 6Yc^]_baefgkmpu…ˆ„‹l6 #%'&&"#.588610674101110486596;KLNLMNNRP;A_s€†{v‰‰Š•Ž„„ˆ‘ŒƒŒ•Ž‚‰™˜’‘•”•–Ž•–‹’•Ž‘•ŠŠ‰Œˆ“nLP\TTSSRRRQQQPSTL@;97?KOLIFFIBDemSP]ZZ[\ZXW]^YTX[ZZZ[\ZXY[ZY\a[GR—’’“•”Œ’ŠŠŠ‰‡ƒƒ…‡†‰ŒŠ†€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€.2+*.4<7;AIMD.$%'(&"!#&"" #0;B7*!!'$#" 'BGFDDGIIIIKKJJJKJIHKLJIJJHJKLLLLMNKKLKJMPPPNORQRSQSRSTSOORKGEPgdI>BDDBEEDDDCBBHSZWXUVZZWVYYTWw””Œ„gTkxYIMOMOQQPOPQQQRTW\`bbaba[YYXSTRONNONKMRUTY\Yabdfecccc`b`WVXVXXVTVVUWQKIOSSSTKIA>>@EMIFCDEC@=?HD*N{f4#368;7326;ADOD4:6??4*((%'+.19AECMUSQSRTTPTTZfcUNVQDAGIFFGHFFGC??@=?A?>?=>=?BA=>BBB>99866:;95325777@KSMNKNNMQP>?]wƒ‚x{‹‘……”„†‘”ˆ•‰ŠŽ‹˜—‘”ŠŽ’Ž‹”‰‰‘˜’‰‰‹‘Œ‰”—qKOZUUTSRQQPMPQSRJBA<:?GGFGHLLCGgnTT\XTVWWTSTXYYYWVX^ZWXZ\ZW\Z^^IQ~›’ŒŽ•—ŽŒ’”‘‹ˆŽŠˆ‹Œ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%%'.+,;59DNJ3 &*,5FI;.(&$!$-BIOD6.%# ! $>FECBEHHHHIIIIJLLJIKLJIKLKIJLLLLNOLJLMLKMNMLNOPQRORQRTROORIHENffK<@EECDEDDFB@CLY\UWTTXYXY]WTVs‹‰iVivZINONNNPRRPNQRUW\_bdabeea^][XVTTRPOOKJOSSSVVY]`aabcdcb`^ZWVTRTTSTSRTSMJLPQPOGDB@@EJIHGDBABBBAGE.Dvm<7C?8:94,6;6135/,,*+*+/5:>C@<=?@ABCBE>;>@=:9NK>>HOE=BFJEE:3AH<9@<60-,*(,)(*1:BFJRPSN?DZZVUY]aefUJ@CGC>?AJMGDDA:==>>?@;30003579;?@AC?:9<>BFJLJFEAFHKGFIC?BFA>LQLMICGHEIMF?HHA9::7@A@91;E@@@:4111005999BPYY\abdaZ[]LLS]cca_PE@DFDB@DLOGACB;?BDD>53841021-.1.12248:9:LVQJHIJGHGHEFIBA@DD?J_ichgmZ-!! "($ "&-6=ELLLMONNPQQPOOOLNPPONMMPQQOOOPOQOMMNLJHB?.(7@HI;(-GD%3O_J%"%:Zb\]`eeiiimnou~‚ƒŠ„T*(/+23(&+$'(!%(3<7+$"&,-+.5;?HKHFCJJBEflSS]XUWZZXVYXWWXZ[ZV\ZUX]\YW\`UBT„˜’–‘”‘ŽŒ‘”–—™š˜•—š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"&+,11.9MTI@:7-,3/&)./1112573/00-0424467640.-.)/F: " ,CKFCEGGEEEGGFGKKHIKLMLJIILMMLJIIJKLNMKMOMNJKPONOOMQRPOPPOMIFHZfWD>GJFDEEEE@=>HX^ZUZ\YXXTNYYUh~||„|aZjbLJQJJKMNOPPOPQSUY\]_^bgikjf`bdbbb`]\VUTQRTSPOQUVW_ic`dbXW[XVWVSRQPSOIGNTSJCDFGMSOFDMHFHGCDH>FXS+)_rTCDCIG<;@@,073345=9.,>B7=E:0+9GCDCBBFHIKNNURKQbjebimmlodF>BFTef]\TGAAABD@FMOF?AC>?BDDA813:<>=;::;<=<941/.4;=:>DC@HFBEIMPF>BGA98?HHFEFDJCDeoTO]ZXWWWZ]YVX\ZVVXYZYY[YX\YZ_ZFP~––’’Ž“š’Œ‡ŒŒ’˜›™——˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"#!!""%&+,14-.$  !""! """!!!#$!).(&12# ,>IDEGEGIHBHIHJKKLIHJLKIIKHIKMLJKNLKMNJJJHIKNMLKMNQNMQRNLMKKIEUgX@>CDBCCBBFB?@HV][WXWTUXWRQTYgvx|Š‚gYfbMKVPMNOKLOLNJJQUWZ^^_abcfikfecbaabb]ZWVVVSPQRTW[^`_acfe`[ZZWWVRRRQMNHCHTUKEGGNWVLFGJHLMIGE@BK]\3YsN0;C6.5:=C=4>A/0HM819NM9@F:/0:A?9CBGJC<::AFKKXkplhdhqmYG@A?CGN[_VL?=BA?CFFMKFJMGA@A>:9=??<99<>==?FCBCA==@<:6322343:7?IGECAGJA=SfabecllD!'* $#+/!,;>GJ@.!";K=% ;R\J)#!5T_]``^`hljkpsp€ˆ€†K&-+(,*(.*&*% *3:?8,  $%(/8HUPJOIONKGKSNCFFFGD@KCEejSS]YXZYXYXWWXZ[ZXWZZZYY[[Z]^]RAP{†‹Ž‰‡‰‹ŽŽŒŠŠ‘”‘’•”€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€" %"$)+*! ,FJFFGDEJKHHJLJHIKHJKJIJJJIJLLJILQIJKKKLLNHKNPONNPNNOPPONOIIIESfZD/PS73CFHGMHIMGEHPOPLNI75D>22>B>>DB?CFENTVYTHB<::?EHC<@CFFCA@?@DIJE@@DCDEFEB?=68675387VjgbjgeldF..! !" +=DIMPNLONOPPQRRQTSSSSSRQRQLNSRNPRPMNMLKG<+.Ebs~„z~†ˆ‹ŽŽ‹‡Ž“Ž‡‹‘Ž‰‡‘‘‰Œ˜™Š“Ž’”‘Ž‘•“–˜”’‡ƒ‡Ž‹ƒŽ’jGLVSPSTPNONONNQSLA=:CECCCBCDA==ET[XWUVXVSTWWQP_mjo‚†o[^\NMTPNNNLMOMJKLMORTT^]][]`eihfeghhfchie^\]\Y\YWVW[_cfhgcbcb][ZWTSSSQKHJRXQIIKNZ`UIFFJJLLHEDAIRaa=JeJ=JRHDIHGGGDFC>ADNGA@B?86@?30=A=?BH>/18=B:9/18=B:@YgYGEKMFCGHCCFAA>@CAAGL^bb_YK>;BE>>The\ceciqoe]I+#(&!$5>DINPKKRQPOOPRRQPQRQPOPQNORRQOOOOLKLLKMH8&$1?FGC4$*?G7"(BUZF% 7V^[]_bdimkmrusw‚ƒ‰xA!)+',)(,)$&!'.2*" (.=D>1"!'0?WcZOLJNKINF:Cbu…{~Š‰ŠŽŽ‰…ˆŽŒ‡‡’“‰Š–˜’•’ŽŽŽ’‘Ž”‘ŠŠŒŒˆ†‰Ž‡‘”lJPZNOSSOPRQSQPRQF<;><@EGHGCGL@AdiQP\WWYWWXWUUXZZWVW[VUWZXXZX[^VEV‚’‰†‡ŠŠ‡‡Š‹‘ŒŠ‹Œ‹‘’‘’˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€2SA!$%',08<=;547=?>855-"!&)&,AFCCGGIJJFGHGGIJGIKKLKKKLIHIJKKJIJMMMMLJLMMMMLMNOOPPPNOOOLJIFN`\F=ABABB@@A@@AIVZTVWUTUXWSVTT_hel‚„r[WULKPPOONMMNNSPMMOQRRSVZ^afilkjigedddcffdccb`a^YVUVZ^adedbc_ZTXYXWXTNIGJQSMGHMR[YJEHIHMQOHDEGNWceE 8VM9BKD=DFE<@?;22<;::;=5-39B@609CC?EM;0@E98MG9BGCLJIRUMFHHFEGEEHKHEC@@CITbkikeWH<=Rechihebdo{„sE"!"=J=+(6@EIMOLLRROMPPOORPRRQPOPRORVVQSRNQOMOLLNI6#!*5BIG?-0AC1 .GWYC#7U\Y\_ebgjkmrrpyƒ†‰q;$/+'+(&,'##" ##11)&)#"-:IL4  )1BZjdOFLILTH6Bbv…{€ŠˆŽ“‘Š…ŠŽŒŽ‹‰…‡‘–“‹‘“Ž’”“ŽˆŠŽŽŠ’’Šƒ…‹†ŽgGOXKPTQOQSRMMNPNG@A<9#&)+05;>>?DHFD>3($)/+/.09954**+-,)3GFDEFHHIHGHFEHJIGFGHHHGHHGJKJHILOIILNLLNLLMNNNMMMPNNOPPPPNIGFL^^F:BEB@?@CDB?=EU[VUVUSUXWSTUU^gek{v_SQKLNPPONNNNOKLORNHEGNPTW[^bdiiiihhhhffgigdce`][ZXW[__aefea]ZRRRRRQOLJDHSSHDIOW]THGMLLOOKFDHNT[chQ%-CD;C@6=?429?EE<JMDALRMF=58FKA=D<286/1;71H^`^N>HOKFEFELNHDH>7>GEBAHQZ^d\NDFKLOC83=KOF;5;FNNGA>LLKLSZXQ=AL[gfZNLOIMURPLDCC>>Sfddie_dinv~tM2;:'$7Y`F/,6BHIINPOOPNOQQNOQTRPPRSRQNRRPQOMNNLKKJHKH8%&0:DIG<(4B?+2KYW@"&?Z^[\^efghhlpssw€‡‰k5#.*&+'&*&!$! *:2''-$"*?UN3!"&+Bay^FIMOSL9Car}ƒ~†‘Šˆ”“ˆŒ’Œ†‰‡‰Ž‡ˆ‰•”Œ‹’’Ž†ˆŽ‰„ˆ‘‰„ŽhGMSPUVQOONMMPQPME?=>:>DHGFCBJAEegPQZVVXWWWWUZ[XY[ZTZUUZ]YXZYY]SA[Žœ‘‘ŒŒ‘Ž‘”•ŒŠŠŒŽ‘““€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€1K9$.120//149=BEGCDEFEB=:)($ !+78=>?A??CFEEECCDFIHEFKJHJIGHLKHHLIKLKJKMMJILMJLOMMMMNNOOPPNMOPQQQNGDEJ]bJ=CECCBBC>???GUXPTSTWUSTXTSQ[hhfo}{eRNMOOPQOMONMPPMMPPKKOOOPQTZafhgfhihd`gccffccffcbb_[\`bcfheb^]YRNQOKJNGFLUQHEGNY[QIJKJNNIFFGJNY^aj[06FG83?OM<:KLPUTTVOLMPTVUU[TIB;6?E<57<;0.11./*8MVO81CPMHIJHGME@GC77EHB==<50<84ALT]dbYOHYfc]XPXcJFD?>Sdcfke_dhjr‚uP7;;6>>G_^A25?DIHGMQPOPQRQPQRSRQOOPPPPSRRUSOMQNOKLLKML=,%+5?FGB7%#8C:%6NZU>!(@Y\[]^eghhkorttvŠk5$,*&*'$)% ! "5;/%*2, *DYN5 ,Aiq^KJLOM9Cbs|‚}…ŒŠŒŒˆŠ“‹ƒ’Š„„‘ˆŠ“’†’“‘‹ŽŽŽŽ‹‹ŽŒˆ‹‘…„‘”lKMPRTSQRQNMMPPNKGA?97=HJHFDDKADcfPU]YXZXXXWX]]XUY[ZUYZYWYZ[UW^UB]š‘“‘Œ‘Ž‘•“Ž‘’”””—€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€;P4(0;CGGFFFFFEEEEECDDEA8+! !  !->FCEHE@EEDDDBCEJHGHIIHJHJKJJJKJHIIJLNMJKKLLJJKLNMLKLMOPQRQNNOPNNGEFHZcL?CDCEECBCCA?FV[TTSTVURTXUQP^lf_eu{fNIJMLPROMPOMPNKKORQONNOQQSUY[``bfijgdeaadgfefdcdeb^_c`beffc]XUPOQPJGHEIRTLGGHTYSGEFHLKNKFEFKT]__ke85PXLMRUVSRNOJHINQHDDGDDA?NXLGC7146=:<<=FQTIE91154,:FLGGLJCLRNCFJ@13<=9<<50'%!%-.+,3IO=147?EOR>;DEW[RML=((67<99DJJNOJIGC?=BKIC=?DA97ANOHF@2)5859??>;- $!!"+/+,6DKBINJEFKO@:9?FHJLEA?CJMIDHMWdebbWJDB?BViid`fjdgoosrW3)/7>47VbK=?DFIJKLJMVPMMQRQQSTQONPRQPRVTOOQONLPJHLJIMH;23;EH@5- #/;B3#9Q[S;!1GZ\^`^dgeeimprrwz…ˆa.")*&*&$($3B@.&),/.%#!/LT:% !;MjrUEMG;Fdsz{†Œ‰‰Ž‘†ŽŒ‡‹‘ƒ‡ˆ†Ž’ŽŒ‹‹ŒŽ‘‘‰‰ŒŽ‡†ˆˆ…‡Ž‹‡‘gJPVTQLOVSMOMMNRRH?>>;>EDBEGHLAFhhNO[VVWVUVUTWXTRTVUTXZYZ\[XYX]R=[’œ•’’””’’’’“““’’‘‘‘‘’“–—€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€  F8/ADEKE?>ABA>@ABCCBBB<4-#!#" ,CK@AJHCGHFEGGFCIKHHJJGIHIKKJIJIJLLLLLLMKIJLMMKKNMMONLNMLNLLNPWUKDDFWbN=>DDCC@BBA@?FRUMTWSNQTUXTRTfn_ZbjoiUHJONOOMNQMKPQMNRQLKNMNNMQX]]accabffb\_a__beeda^`a]Z\`bdb`a_YSPMKLMLKEHRRHHPTUPJDCDGIMNNLJLU_`^`muM#/@BJ\R:>KSL=?J=0=D><8@F;5HTKEB+#3/0687DMEED><:7=CEQL>?D@8819EGFD<=?A@>;99;<4*$$$ !&*089579;@=BHKJEA>GLOIAAKVhndTUWNGBBD?=SihcfigcdltusM$#059;BOYMAFIIKMNMLLLSSQOQRPNOQQQRSQMRXUOQSPOSQLKNMJKIF?@ADI?4+  '1?A/&DU_U:";U\Y\_bchifdjttnv~Œ‰[*%.(()%(*  4@?0).1.,/%%9OR4$ !!(6XtgONK8Cdt{x”Š’’…Šˆ‹‹ˆˆ‰‰‹Œ‰ŒŒ‘ˆ”‡‰ˆ‡…‡‰‰ŠŒ‡‹ŽcCMURPOPQPNKRQNOPC;A;6;DGIHBAGAHdePT^VUZXTTWYWYXTWZUSUUUY\[[U[aPCf’—“—™™˜—˜˜™–’–˜’‘”“Ž—€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*1%*BKGAFC@ACDCA@BDCA=;:.'!" !#"'9FFGIDBFHECEFFFHJJIHGGHJIFEIKKLJHHJKJILKJJKKKKKJLMKLMKRONKLMMSVJCEDR_M<>EDBB@C?<<>FQUOTURSVUQRTQRgp^TZekgUJKOOLOMMPPOQQRPLKOPOMPQPQUZ[b_`fgb_a[\[YY\_`aca^^_^_a`ab`_[UPOOOONLJLLRRKLTVUOIEEFJMOLKKMQX_b`bozX""0?BFJE@B@99EEKXM57@A@HH@>ED969:=GMC5??<=<<<2;=<91%&" !"$'&$'05:>@@ABKVUJMajdh_PFHHECDCE?=SgecfifcdlsrI''--33?BFJJJHIKMNMLLMPPOPSSQQTSRSSQOMTSTTQMOQPNLLLMMJKIDFEFG;-%%-7>>- ,DS\Q7&@X]YZ\aecilggnsst|‹ˆV% *(()%(*  3@<0)-1/.0-  ):PF."*CfkWNL=Ebu€x„’‘‹Œ‘‘ŽˆŠ‰ˆ‘“Š„‹ŽŒˆŠ‘ŽŒ“”“Žˆ†ˆ‰Ž…‡‰…‰ŽˆƒcEPVQOOPQQOMRSNMMD=A<8?FEFFBBE@JgeOSZXWUSWZXWWUWZWTVZURVYVV\Y[_O@b—ŒŽ‘•–––•˜˜–’‘–˜–“‘–˜’’—š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€",?EBHHBBAABBA@CA?@A<0''!  #,8CHFBCGIFDEGHFIJHHIIFFGIJJIIIIJJIGGHJIIJKJJJKMKLMKLNMMLMMPPMRYJDGCM\MEC3,48;EMC3413@CBE?78@HI@EEE>>Tfcdfhfcdkr|wO)/>;5679?AFNLIKMMLLLMMORSTPQTROPTURRTUSQQQQOMMNNKINNFKLJLJGE8)!")4;?;,$5ERVJ3,I[\X[]bedijghnsst|ŒˆR&())$()"2<:0),11003''CP@$#/MfcUH8De|ˆ‚uƒ“Ž†Š”’‹ƒ‰‹ˆŽŠˆ‰‰‹‘Œ‰Ž’ŒŽŽŒ‰‰‰‰ŒŒƒˆ‰ƒŠ“‹ƒeFOVPONNPQPPNQPOOF==;8?FFEFBED=IhdNTZXWTTXZVXYVV\YTW[YWWYWWYYY^PA^‰’‰‹‘’‘‘—™•““““›–“–›™˜™€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€%6CAAHBAAA?>==>?AA;2*%$(/5@FDCCFGECDFFDHIGGJJGIHHLLJHIJJJJKJIHIJKKKJIIKMKJLLKNMMMLOOMS[MFHCL[O=?DDAB@C@==@GRXUSWY]_ZUVXQOl~hTW`ebVLKLJMQOMOQOPLMQTTQNLLMMLMQW[\`cehlkhgd^XVWUSYYZ_a]\aa`bda_ZUSQNKJLPSILSXWWXVMIECFJMNMHFMV]`agkos}m7(GVUZRQ[VNLPVLEH@7=BCOQ?9EMDFQME;$=:&.;73AALL<7D?DCFHD@?D64238A@6.'$#!#""""  #%%+7?@ADBEIFCLPEBED=@VfcdfgecekqupR;AFJNKJKMMLLLMOONQUURRTQQUTQORSRPQSMJPNNOONPOJKLKMJGF9*! %.7<@9*)@ADF;A@?><<=>><7-$ "6BBEGEEEEEDDEFFFEFHIHIJKKIFHJJGJJJKLLKJLLLLKJIHLNLKMMLOKNNLONKS\OGFBKZO@?CBBC@A??>=COUPS^eeda_b^TPo…kSU^b_UMKKJOONORONROMOSTPOQOMJHJNSW[]afjnnlhfa\ZXUPRUVW[\[[^ad`\YWQOOMLKLNPLMRWZZXTJHFEHMOMMKLS\behiourwn?!=H?@COWG;@IKD7=KB0=22;?6=B0,5/5E>746;<9>51/+)("  ! "  !"#!!##!+&+105>?=DD:6=EGGHCIG@@?>>===<;5.'##!",AJFDDDGGFDDEFGFGGFGHHHHKIGFGIHGHIJJHIJLJJJKLLLKJIJJIJLJJNOMPNJR]REDBJUOB?AABD@@@A@>BOUPU`b]Y\bjj_[|‘qST`b^TLJJIRQOPRNLPOOPQQRRSTOJHIJMPU\achnmgffd`\ZUPSRPRVXX[[_b^WWUPLLKKKKKKKLOUYWPJKIHJNPOLNOSZ`dhlkqwponH%19?AE?7=ND0:GGPOKERG4J^]MKSb\OD4*0+$,68:7=IHIJ><@A>@HD5.169779;=7=F;4''6CQQG3!,CUYY_a`defghhjnqsuy‹†J '&!+)!"(2:>4,-11/.-5<9)!0IE+%2Tl_=C`tƒ‹~}‘‰‹’Ž‰‰‹Œˆ‡ŠŒŒ‰‹Œ’’Ž‘’‹““ŽŽ‰‰†‚‡Œ…‚‰„‹š]CPTRRQOMMNOOKMSOD=>:9AGDDFCEF?HgdNRXVWXTSVYUUWYVTTWXVUZ[VU[XX]PA_Š”ˆˆˆˆˆ‰‰‰ŠŒŠ†…‡‡‰Œ’“’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€=EBAA=><==<==81+!#$# !!!#!(@DEBABDFEDCCCDEFFGGEGIIGIFFHHGHJKIGGIJJIGHIJJKMNJIKLJKLJOQNJNLHR`UDCFHQPB@BABC@A@@??DQXVTZYSQS\gmddˆwSS]^ZSMLNNNPOORQPSNPQPQSTRTPNLJHJMPY^^cmmfkmkfa\WRTQOQUSV\]^_[URQMLJHHHIIHGKPTUQJFMHHOSPLKLOU[_bfknpulkpR'8HNUQ99UeZBJ_]YYXY\ZVVVUPTOMRTK9//'$-//677HLJJFMJE?;;:;FB90$!0?FQOA-3NZZY^_\cfggghknqsww‰…J%$ ,* (4=>2*.20/1028;3# $;G8#!)>\fBAax‚‰‚‡Œ‘ŒˆŠŒˆ†ŠŽ‡†‹‹’‹ˆ‘ŒŽŽ‰ŠŽŒ‹Žƒ‚Œ“‰‡Ž†Š˜^BNSKOSUSOLKMKNSOE><:9AGDCFDEE?KhbLSWVWWTUWVVWWXXUTXWZZXYYYYVY^NAc‘™ŽŽŒ‘‘ŽŽ‰‰Ž€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ =A?>;:=8=<<<:1#!! !#!  (>NE@DHGECFEDDDDFHGDDGIIIJCGJJIIJKIJJJIHHHHIJIHHJMIKKKMLJKLNKIPOJSdXEEIHOQA@CBAB@B<;;>DOXXQUWWVSV`f_c‹ wOO``[RLKLNOSQNPQOOOOPQRSTUQPPPLIJNRTW\cimnjmnkgdb`QSSRSUVZZXWVRONLKHFFHHGEHNTTQMLONFFRVNHIQTZadfjnporjjtY/$NcVMWSPWSMPWSJHIF@ICBLOG<=0 +$"3.!9MJD>IOKMNHIRVWSTQGGIA:53;29M7+5.+=E?A928@>>9, ,("'/*+,+))*,&$""" !"!!$&6IL>?TdggfdcceiktrT87;9?NHD=4,"#6EIPL>*!9Ua\W\]\bechllhhpxyx‰‡M$# ,*!(4=>0)/4103334660$#-FFFHDGE>Lh`KV[UTXYWTPVXWUUVVVW[YTTY[\RX_PCh”š“’‘‘‘‘™‹“”“’–”“•—€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€(?A=9::9<;84-#!!!" "! !#$" '#"!"5FFDEFGEEFEDGGCFJFHHHHHIIIHHHIJJIGHHIKJGGHLLIIKKJMMJIKMMKKKLLJNMKSeYFFKILL=ADDBAABA<CUL>=ELE9CJA7;E?67HN@2@RM@C3 '%0'+GME>6EFKSULIRPI>55995011-/@JF@ELEFH><561/7>@A8% *&%) +, $/2.*//++(&& !&%"!!6EH>BYijceecbelqxsM*/<>@L>9@HKLQRMJLONLJNOMOUUONRSRRQQRSSQPNKKNNNLKKMNMLLOOLJJJJE>99>CB;/$ !&:JHPR>%$1@O[YZ^a``adehkoquxu€†L !(# #'2=81-/341/12873;8#$%,=>, $.AXTD^~‚€…‹‘Š‡“Š‹ŒŒˆ…ˆŠ‡ƒ‰ŒˆˆŒŒˆ‘‰‘‹‹†…Ž•‡„ŠŒˆ‰ŠŠ…„“aCMSMQQMMQQLQONQPG>><=ADDEEA?C>Lg_MUVUY\ZWWVQTVVWXVSVUVZZVUV[XZLBf’›’’‘——’‘••‘‘•••™—™š˜—™š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*=<;=>76?95,# #"$"!#&'$!$% *;DHGGECBCDEFCBEGECCHGFEEEEFIHFFGHIHJHFHJJIIHJKHFJLHJNKFHOPKJLLKNMIPi\ECJIKJ?@BBABDF@;;=BOZYUVX]^XZei]b‰¤…XM[aWMLLMLORPPRRPRSTRPPRSSWTRQPMLNHKOSX]cgmoponnljg`XSQQU[][VSPNLJIIFFHHFGHMQOGEHLMPONOLKQSYagijmqrpmlkxm=1>34??BA7:G@/8ECHTMGQJ@L[]XPVd^HF>&"(1+,026BE:3DONLJ@2>@<2/598;ACAACHLDGRF8>>5BXhgfggdacinusT20;?BBAAFMIFMTOKLMMMNIOQOPSROPOOQTUTRQRSQLKNPNMLLMNMMMOOMKLKJLC?CC?:6-!$+:IJPS@)(6HW\ZY[]]_acfijlptwvˆP "'#"%'0:9/),1222*286497.  ''1;7#"!)>NTKa~‡†Š—ŠŠ‹Š„‡ŠŒ‰‡‹Ž‰„Š‹‰‡‰ŽŒ†‹’’Œˆ‡‰Ž‹Œˆ‡‹Ž†„‘Š†ˆŒˆ‚’cAKVPRQOMNMLJLORNB:<89?EEEDADI@JgcNPZVVWUTUTTWXXXWWVWVVYZWWYVY]MBg’˜‘”““–•’”˜•’‘”—šœ›˜™š——˜™€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€.@<9=?96;)&!  #%""""!"$%$#"+>JJHEFECCDDBBFDCEDBEEEGGGFFEJHFEEEFFJIHHGFGHIHIIFGJIKIIKLKLNJLNLNMHNe]GCLKKFBAA@@ABCA=;CZ_ZZ[\YRS[ZQTWOQWO@A4'(*462+-:CB2;@FMH:14=A?BJKE>JMIF?:=5>QNFX_P?IK?0*097@B<+%*"$# *'$# "$ "#!'+*.429HJ?AWfffggdbcintqW83IfcORWSSVVVWUXWWVUUVXZXWXXVVXTX]NFn˜˜‹‘’‘’“˜“‘‘““–™•”—™˜—˜˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€1D?87;;5.  "" "$&!"""""#$"#0BIHGFBDDDCEDCBDDEIKGBHHFEDDEFHHIIJJJJJKKKHGGHGLKGIKJHIJJJIKLNLMOMNNKOgdLBHIHD@@???@??C?;:APXUVXWRPValh\c‹¥Š^Ocj]QNJJKNOOQRSSUPQPOQRQSTSSSQONOOOOOQU[_ccgnponpomicZUX_^][YWVTSRPOQQLKRXWTNGIOQVTQOOPRVZdmqstrommkkjsoN3QYTV^bXVYRUWSRMFHEBCACNK?EJ=;FIA@:%&)+540.027??=59IJDFKJFDHKC65@A;>FHGMU`^[eaKIEA>80**29AWffbdeecfkpvoT85?B@JF53EMMPNOPQOLLNLKMMLPRNNPPONMPRRPPPOOPNOMKLMNLIOOMMMONLIKLKIG=0'!$/8CJJKRL?2-5:513/563,#&"!*>A*#" +8GR`q{s‰•‹ŠŽ‘Œ†Ž‰†‹‘Žˆ„‡ˆ‡ˆ‹ŽŒŽŠŒ‰Œ‹‰‹„…ŠŠ†‡ˆŠ††’Œ]COTPPPONNNMOMMPPF<;96;DEDDFEE>>>@@A@A>::CRZYY]]UT^jnn^`‚œ‡`MamaQMJKMMMOPQRTTQPRSSRSQSUUSQPPOONMLMPUY_^bilkknmnplcZ[`_ab^YVVWWUTUUOOUYWQLKLS\YRPSSSV[djoprttqlnnomnkQ#*L\QFIMEOMB@DIMA@ANK+'GPIMONONLILONPSQMOSRPPPQQQPOOPQPLLOPNMNNONMLLMNNPQOKLOOJIJA0(',4?JNJNRNDCIRXXY[[]_ce`gkgflrtz|†‘l)%&&,&"/A>3/6:537065&'&!#2C5&!#!'4=M`pƒ}u†Ž‹ˆŽ‹Œˆ‘Œ…†‹Š„‡’Œ‡‹Ž‹‰‘Ž‹Ž“ˆ†‰ˆƒˆŽ‡…†‰††’‰[BOSORRMMOOKKMOQMB;=;8=EFCEFIFA@??>?AB@=:=FPVWUZac`cecaVZ|™ŒeMbuhOIKMNPOQSRTVUSORXVSSSTWWTQRQPMMMMMPTWZ[^befhilorqi`]`cghd\WWZ[YUWYVTW^YNKNQU_[RQVXY^dlmnnopqpnqqsoliW+1A;2?G;CF@DH?7:LHOMBLSDENVWZ_a_RDFA*#*11.*&+25*-@F75@AKE?;8;EOWZbd][\VC<>C>HSF<>DOO<;JLMMMNNOMJJMONOPNMOQPQRQONNOONONLMOONPRRONMNMNMMNPPOPNLJIJC8-05LULQQLLRRMNNNOMC=>:9@HGEDCHG?@A>;=@BABCCHJHHID>@Ru’ŠiP[tjPHHIKOMOQPQTROQTTTUURSWYXUSQONNOONORTVXYZ]befikpqmedfmoojc][\YXVVYYWYUSOJKQ\g_USWY`jnmnqttpkinqpplhh^5,@=5ANM:29R`F9QZZTVa_WYWXWUSQMIPJHF4!#+0,/.%%18%0MF56@>CILKKPVVUQKFECA<:9>>I\XMW`S:;JNFLNIK]Q*#2+()"/:'(,$5DG=@WffcfhgedhksrP7ACAB@>>??<:=>9667688587.4@WuˆfQ[rhQJHLUPNPRPRTROPPRURPSXYXVRQRSMMNMKJJKSUVUX`dddgkonjjmvvtpid`^X\\Z\]]`ZTPLMYegaYUVYfruxsopommolokkfcgb<0EHKQTZ[NQW[]SMSTMHID>A?IOG?AB=>HHLB)!%#$..,1)#(>OG::3:CIKIB;HJ>3:CB>IUUX[WWPHITVHDMSLJH@IRTQQPMKMRXZVY]_\Z\^__afknoqs}~}€?($&/+#!$'$"!&7C4" &($"")67' &*.0RnxˆŽ‡‰ˆ‰Œ‘‹Ž•”‘Š†ˆŠ‰Š‹Š‰ŠŠ‹ŽŽŽ‰ˆ†‰ŒŠŠŒŠ…‹‹ˆ‡…†ƒ„Ž‚S>MQOOOOONNOKLLNK@:<8:?BBDEBEB:Mk_JSXUUVTTVXVWUSTVVTYXVUUVWXVWXICl“‘Š“–”‘—‘•™•”—•”“–™˜—˜˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$%000.&%*/21230+!!# $%"!"$,6?HMJCHGFEEDDDAABCCCCCCCBBBDFGGFEEEEFIFFFFFGGFHHHIIGGFFGHGHJJJIIJKMNLKKKMIHKLNdmX@FQNC:?@?AAAA><:7<<;:86549BRm~xiXWloYLKHKRPNORROLOQONQSTVSUXXURQQOPOLKMLHNPOMPX^^a`dkomlmuropqng`\XVXZ[^b_WRSW`hjeZT[fnppsojiousnmngadefjG8_^WZMEMJLMH??BNIB?<55>ECLI>AC>1;PME@,!-.#$*/15+5LI@A5+4CHD7-:<=>FQY[a]ZVJ;7@UficfgebchmxnN;DG@BKC16JNJJJNPOOONLLMNQRSRQSPPOLLMLMLNRPKLQQQPNLKLMOQNJLMNQOMNMKJJIECGMMPRNKNQRRSVX\]\\^^]_fceknmnrz}}‰…J""*0'$252.*%7VI !"'# %).-%#+/+Fy‚–‹Œ’ˆŒ‹‡ŒŽŽŽŽˆ‰ŒŠ‡ŠŒ‹ŒŠ‰†Œ‘‹ˆˆ‹ŒŠˆŠ‹ƒ„‹Œ„„Šˆ†Ž€Q=MUJMPQOMMNMKMNJA=>87@FDCFDII=Lh]KP^WTTRSVVUWWUUVVTTXXVVVWYUW\L@f‘“‘”—–’‘”—’––”‘’”–”’—˜—˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+#!../-"'/430.05:<83*!!!! "&)+5A@>?@@A@=<<<;:;;=@?=;<=DGMaqqgYYou[GILPPOOPPRRSPMNPQPRUUVWXYXUROPNMLLKIJLLKMSUUUVZadglrqnkklmlj_][ZXX\bd_XRTakl`[Y]ekllmmllnqplljc_a`ahM 7YI6@=4CA>SX>7EMM?7BH<2HB/0GVN;6F[WHF:',+"%+/9@07QF39B.+4=DA?CDLY`_XUEA?A?;@UfhfggeccgjtmM9GO=/::/6INLMNPPOPQOLLLMORTTTMNOPQPOONPQRPNNPMMMMMLLKOQNJKMNQQNLKIKNOLHIQTPNRQOMLOTX[[]]]__^_bdinppop|‹ŠR%#  $1AG<970.,"+T[3"''&#&..,.4/?m…‰•”„”•ˆ‰‡„Œ‹ˆ‹‹††‹Œ†ˆŽ‹‹‹ˆŒ‹‰‹‹‡††ˆŠ†‚…Š†ˆ‘yGLg]LRVSUXWVUSUVWXXWWWUTYZVVYXRTXJ@g“—“–•’‘’’••’“”’“——”•€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€'1!$,02. !/242248;@DC@;5.)%#$'()/:BDEIKIGGHGJHEDEECACCCCBBAACBBBCCDDDBCEFCCEGFGGGGGHFGGFFHIHKJJJIGGHLIHKKIHJKMPLLNIF`m]CDRQC>AA?@?>>A=;;=>?A?ACEEDDDHMS`jiaTQes`JHLPQRSQPOSVSQQNLQVSTUUVWXUPQOOONLKLKMNMNOPNPQUXY[biiknprqpoib\YYZ\][\\Z\dhefc_\_ejkjorqoppnnibacacmV%0TI2684BBLER]HPRFGRZZJBJVSDCD/! )+('+37=,6QA*3@@6/:>=NOQROOLD944:CHDBCBCCDHHJNOLMJIRI,#+.!$)&++/>+2;#5EI>@VghhgfdddfhtoTBHF72=?69JMKIQQPOPRQNLLLLOSSPPRQPRPMOJKNOOOPRQPOOONMKMPPNNOOPMLNONNNLPPNSYSNTPNKMRWYZZ]]^a`_`acefhmrv{~‹ŽZ '&%,,,.6B@:621=A5'FW@ &0* !072++2B`vw–…‰ˆŠŽ…ˆ‹ŽŒŠŒŽ‘Œ…ˆ‹‰‰Ž„‹ŽŒ‹‘‘ˆˆ‹ŒŒ‰Š‡†Š‰ƒƒŠ…‹šyFAPTMOQQPONNLKNQMC==;7=EFFGBCF>Mh[MVXTUWVTTSVVWYXUTUUXWVXUSYSVZK@d’›‘’’‘’‘’’“’’‘”’‘‘””•˜€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€.1&,/2+ (/2005=@??CCCCA?<977:<>BHKLHJJIFGGHHFCDEGECBBCCCCBAEDCBBCDEDEEDEFFDFGHGFHHHFHGEEHHGJIGFHLIDIJIGHKLLHFILMNJJ]l^B@PSD=AA@@?==@<9<@A@?>;9:==848HUadb_VQ\mgOFGKQSTTQQSUROOPNNPPORSSTVWUSQQSQNNQNOONNONNNPRSRRUYdgjllmnpof\YZ\\ZYWY^bgjhffa]aipqkpsqooonle_bfber_, +QU?765=II;-A_\>31:BAGE<:AD@B?>AA>;<>CEHMKIOK>85=LH4-43,(%#)/+$%%.=-+8%6EH>@WgghfdddfghwrW?=83A@2"$5<808EA* !,.)(3:2)%,4BV[`x‰‹‘Œ‰†‚‰ŒŠ‰ŠŽŠˆˆˆˆˆ‰Ž‰†ˆ‘Ž‰‡ŠŒ‹Œ††ˆ‹‡€„Ž†›vDDPTOPQQPOONNLMOKB>?86@@=>??@@;8;?>;86201441-:O]a_^a\SXhcOFJNMORTUUUUPQPQRQPRPRSRSVYZVUVWUQRVQONMMMNPLLMMNOQQTY`cddgilh`ZXY[\^XX\bjoohigehnpnkmljjkkha\Y^dadqe4"HZOGHIHQZKCNRQFNMGF>9?@?@?OQ=BD@7DJEI;!&.)$(,0<7.>M:).*(?937;@ACB?;;<43566457BIGCKMF?91-(6PJ3,3113(")*,+(#!)/&!.&7EH=AWgfedccehjkxrN.0744DA67FOQMJNPNNPSTOLJIMQQONNQSQQPMQONOMIIKOOPOONOOOONLKNQQOMMMLMONNORRRVUNNPQSTVZ]Z]^_bbabcdhlkilrv|…i+%$":73.+)./2AJRcw‡“ˆ‡ˆ„‡‹Š‰Š‹Œ†‚…††‹ŽŒŠŠŠ‹‰…‰‹‹‹Œ‰ˆ‡‰‚†††‡‡ˆ“c;EPRPPPPPONNNLMOKB=>76>ECBEBCC:Mi[LWZUTVVVYYWVTTVYXVSSVWVWYWXYZL?Z„•ˆ‰•—˜—Š‹’ŽŽ’”•”‘“”‘€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€-1)(10+ "/0/6=>@DEDBC@ACDEEEDGDDHHFFJIFEIJIEDFGGFCBCDCBBBBCCCABCDDDCCGCBEFEDFFFGHGEFIHHGGFFFGIHGHIJIHHHIJJKMOLMNIIMJHXjeKBQWG?@?@@<9;=<:977643468F\fd\Z\UMUbZHHLNLNPTUVTRQURNOPNNTTSSSUWYXYZZWUVYWTRSRQRUNNLJKOPOPV^ehikljf_YWYZZ[YXW[flionkijkkilkighkhd\[[`gefof= ?ddPMZN=GFBJC<;BOL:;DB78GBIP=>IJ@DHJRK)!/1/-013-=L7&-'%$)6<7>@@@A?:426421/147@E:-6IE3499'2SI+/3131%"'&&)*())$%$"",- 7EG=AYgfccccdgkmwnJ/4833?=49FNPMFLPOMOSURQLHJPQOQPRROOOLNONMLMMLKMOPONMNLLLJIKONJJMOOPPOOKOTSTUQNRVXWXY[\^]^`a`b`_agiinvw„€…‘p4!>?;9;NUE8BNM?<:8650146762//3.$/GI72MfYJTXTVXWUURRSRRSTVWYUWWTVYVUTUKARuŠƒ…‡‡††‰Œ‘’“’ŽŒ–˜’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€*5/-2%)53493ABDFGGGGFHGDDGHFIGHIGDFKEEDDDDEEEDCBAAAAABDEFFFECBBEGFCBEEFHFDEIHEFJJGGIDIKFDHIGFJKHGJJHHJMJIKJKWhfOANXJ<@?=>??@@;8;??:6/.,,.4;@Vs€s^W\ZQORNIJIJJKMOQSTTSQRQNQSQXTTVUQPRV[]YVWYYZXY\ZURSSZ_\YXXVSSSTX^cfddaYTTVVSRTTRWag_cilhbdicegffe`ZW^`bfe``dH)WhH0351EH9D@:7@DVE+'0*+2002.275.()--(-'(3108EF=ADCC@BBAADEDEDEIHEEHFEDCEIIFDHHGHIIKIHIIFGIFGHKLKHIKJKLKLMJGXhiWAJ\M<=A>=@>?@<79@<1-/-.5>@EP]u…xaVSNLMMLIGIJJKMQSSTUVSOPSPNPSUUVWTRVSV[]][XV[_^ZZZYZ^\[\[ZY[YWVVXZ]^adc^YVTSRVWPNW``]`cb`bcabegfda]Y^bdcbb`\fT((GaV@=::;CH:2>7FUB*0/+)++)(+/1002.'#(+))/227FJ>=UhgddcbbfkpxoK1:A95.'&4HNLNMNNNNNNORUOJMLKNNPTTOMQSSMLNNNMKLKJJJKLMMMNNLJKNNOONNPSUWSQTZ][XXYZZZZ\^]\]`cdca`cgjlmoqu‚q1 ;E-&50$/5/'-0% +0/04@NM>@OLGF7K…sq™Ÿ„„‚ƒ†…†”Œ’ˆ‹ŠŠ“‹‹Ž‹“˜‘‰‹Ž‹‘”’„‹‰Š…Š™‹Z>@<;??A=<89<945138DTYVVe~Ž€cUQLJJJIIHIJJKMQSSTUVSQPPTVSSSSSSRRUVUX]\XX\XY]_\[]_\\]^\Z[^_\YWWXZ\_`^XSQRRTSTQLOZb_]_b``cffffc^[[\`cecbb_[eX,#E`[JHGIGDEGJNLPQKJLMPPLIIKNLILIRSKIEB6*%! "%0/86%"*-.2('-$#5D@3--.21*/-+12+&&.735HH,5;?9@L<$01/--,*+(,/.+***,,++.2317EJ>=ThgbcdddfjmuoN4:=0)$"$2GQNMLLLLMMMMURNMMKLQPNORPNOPMOSSNNOOJMNMIIJMLMNNMLKJIKLMORTTWUSUY\]\ZYXXY[]_^^^^_`abbehijmquv€ƒp/5@2++  $/:1$&)"'7842106==9CBDE14Xˆ…z‹¢£›Ž‹Œ„~†Œ†|„ŠŠ‰ŒŽŽŽŽ‹Ž‹•“ŽŽ‰”’ŽŠ“‰„š«€H?ONMSPQQOMLMNNLMNJA<=88>DDDEFCD>Sj[LV^SV[XROUTTUVVTQRWVTSTVWYVT[ZCBf†“ŠŒ‹‚„…‡Š‹‹‹‹’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!02 "''5604889>BBAABBCC?@BBDFCFEFGGEEFHIHGFFECDCBCDDB@CBCCABC@CCCCCCCCBDEEEEFEEFHJHEEFGFIJFDHIGIHEGIHFHGHIJHHHIJKJKKJFTek[CGYM<=A>=@>?<;99<=?DCDKTTFCNl…™ŠhUQMPNLKJIHGLLNRTUUVUQQTTTUTTUWWVVWUUWZYZZYWXZ\[[__YWY]``_`bbb`^][[Zba[SNOTWQPSTMKU^`^adbacdgec_YVZ`befcba^Zd_5>VVONID@?=;;<>E@;DMNMTNHFJJE@?FK;*+4=9.+%$,:8"%.23,-()=??/(043.&'&)12+%"1;55DE4-0;>DM?()+,+-,-/*,.-++/4.+*,.-,,8DI>=SggaceffgikupO354%#$(5JUROKLLLMMMNRQPLJMOLONPRONOPPPPOQUSLLLMLKJJIJMNKIIKKLLOSRNQWTUXZ[\\\YWVVY[\\\^`a`_`aecdfloqpx€„Žm,2?5,"$0:1##'!5C7002-'(),SlZIPUSXWWYUPVVTSWTPRTVSRUUVYTTZXH@[€‘ŠŠŠ††‰ŠŒ‹ŠŒ‘’‘“€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"0/!&05313448>@??BCA@BBCECEFFEEFGFEDEFHHGFFEBDEEEDCBCBDB@@@?BC??BB?@DFEEFEBBEGGEBBEHHGHFGHGEHIHDFJIFGGHIKJIHJIJIIIKIFUgo`EFVL;=A?>@>><968=CJRPKIPNBAQm‡ž“nYTOKKJIIIIILLNRTTUVWTQSUSQUQSVWUWWRWUUZ][WVUXYYYXX]]\]_``abbddca`][[\ZTOOQRMOSSNMU[Z]`bbefdba_[XX]cdgfcaa^Zdg?5GLIF?<:>A<64=>64@NPFCLHAA:,')68)!"'68/00&+3'$+0104(3@@;-*2550""#*00*&!155==796IKHKK>0:Lf{†”žšŒ’«¶¦¤­ªž˜œ““••ˆ”’‘•”‹‹ŽŒŠ‰‹Ž¥§|B?QMPQLLOQPMKKMNLLNI@<=68?FFDDCGEA=<@>@?;::;=DIMPNLQTY_p†¡™u^WNGIJIHHJLJJMQSSSUVWURTTRSVUVUTVYVVRSX]\ZYX[]\\Z[ac`^]^^addfgcaa`_[\[VQPQQQOPQNOX_Z[]^_dge_^[XY^bdegfcaa_\ahI/AIE=:BAQegeeeccejmxqK'$$*&'4FKKNLLMNNOPPOOPPQNLMLJKMOQQNOSTQOOQRNNNLKKMNKLNNNLNQNOONQVVRWWYYYYWWTUWXYY[[__`aacdeabdeimswy‚†g'.B- ,92# $!!  >@>>@=<<;DS_Z[XVVUW]u† ž}fZKLMMKHGHJKLNRTTUVVTVXUUUSXVTSRUWVSUWY[][XXY[\[]^[^_`a_^_cfjkhddb``a]VQQTUTMNSQOWa_\^bccdd`]YW[beeegfbaa`^^eO!+AIB;57:7/++-117B94;.76*.02D6/.&&:?5.-3301)"!&/53%8,%9B=;3+)3:-$,0,,-,/14:CJKIHHEENL7.+/022.))$&)*,---/--01/+*8BF?>QdhgfdbadinupM)$#*&'6HMNTJJKLMNOOOSROPRPONIJPQNOQQSRRSSPPRQONNOPPMJIKMMMOOMOTSPRYWWXXYWURUVWXXY[^]^_abcdd`bcegjnqx†e&+?- '61$"&"1IJ@;4379?GKIIJIFEFF>537DRcš™¦°¦šžŸ™šœ–’˜—›¡›œª§¢¥©¤¡Ÿœ•™–”›£‘g?FLPQPRQPNLMOPMJMLNQLB<<::@DCBCCFB:QgVGUTSURRYVQSQTUQPQQOQVXTSSQZXXUTHCe„‘Œ‘‘’“’‘Ž‡„‡‹Œ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€1.,10121/2578:>@ADDDDBDDCBACEEEGGFEGIHHGFFGHG@CEDA@AC>=@CBABAACB@@DECBFFCBFGFEDFHIHHJGFHHGGIHHCDGIGHHIFEGIIHGJJIHIJJGR`ibJHYT:0'(16-%*1/*/77PchefedcehltqO,%###&4FLMPJJKLMNOORNOQPNOPMLORPMORNRRRSQPRTQNNQQPMNOOMKKOSOPPRVYYWUVXYXVSQUUVWXZ[\\_bccbcd_`bejllkv}€‡e&'7( &41&$($0JRNKMEFLMLKGIKIDDHKJ@=;0387DTbdm}Šž ŸŸ£ ˜”ž“‡ˆ‹‚}†‘‰‹“•––“™Ÿ Ÿ¦•c?ANMNQNOOLMMLKKKMMLMOJA<=88>DDDEFE@9Rm[LXXQTVTRPSRQSUUSRRSUWWUUWXWTYVXL>T‘……ŒŽ‘Œ“”‘–’”‰†……‡€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ /++22252142/39@DCA@ACEBBBACEDDEEEDCEHFEEFEDCDACEDBABC<=<>A@@CCBBDDCBCBCCCCDFGFFEDGJIFIIFGJHFHGGJIDEGFJHGGHHIKKKIHIKJHR_gaIFWR<=A>>@>><8:<:;??FC=98?Wsu™¢zjRJIIIIJJJIJLPRRSTSVUTVTST[[XX[XUXZXWXZ[ZXWVTV]^\\Z[]aba`abghdabbaab`YSRRRSTUPJNY`a``__bb]YWY`fhggdfeaaccadf^4!/%$;1&5(1?.0--?:'.2&)-0/-3=3-;<) *9/*//+-.20% "$)-/'49*,AEFHC92'!),)',.,+)*/-,.1/)')$,0/.10.8AE?>PchdeffeegivsQ-$"#'+7GPNNKLLMOPPQPNRSOQSMMNONMOOLLPPOPPPSQQPPONMLOQQNLMOORSTVXYWUVWWWVUTTWWX[]^]\_bdda`cgbdfijloqu|€‡d'"-!(84% $#!$DTKELJFINJEHLHGEHKJGHIDI;0644-887BFCFWaaf’Š˜ŸŸ ¨¢…œˆzwuw{{‘ªÁ³`9ADBAEC@CCCDEDDFIHGGGGGFDJKIGIHGHJHGGFEEFIJJGHKKHGKKHGJIESflfPFTO<;?>=@????88>4##3YtrŸ£…pTMLLKIIHFCGMPQRUWVVUTTUWYYTRTUUW[WWYXRRWYXXWVWYZYXY[\]_abefeccddbda]XUUTNWQLMQQV^ac`[^`][YZ^cddehfccffa`bacbA '3$"9/.2&9;)**.?6'10*,%//(5:.*/94%%20(+20-1442+$ "%'(4@7%(9@8..1+(.4=>><989;CHKT\ekw…Ž•¥¢•˜¤§¥¡™yQ:=IMNNMLLMNOMLLMMMLLLNMGA>?99?DCBBBHB8SkVITVRWZVWWQOOUVSSRSRRUURRUUTWWTTUK=lŠ‡ˆ‰ŠŽ‹‰‹Ž‘““˜—•””“€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"0, .4122222333:@B@@ABCCBBDCBCEEDDEGIIIHFDCDDCACCCBA@@@>@BBAABD@BBBBDCBCBDGGFDCAFJJGFHLIKIHIHEFIFEFGGFEJIGHHHHHGJKHGJIFSaghTHTR>;?@?A?@?;46<0 &A_sq~œŸˆw]PLIIJJIGHJLMNPSVVWXXWWXZTSSVXVUUW^ZSUWWZ[YXXXWX[^\ZZ\_abefgfdcdfc_^]VSTVRUVVTPT`b^]]\\ZTUZ`cddddegihc__b_cdG  ,!1,/3(56*'-180)2*00!*5,.3%)+)%%-5.+-/-/32032*$$&("%28/-9A:48??>><999878:;;7200/+)**'$&*,*(.-*+,)(+%&--&'.0,(&&')+6AC?>LbcbbdfdcgmtpO/)$" #*9JOOQPNLKMOPPRONNNQRONMMKKMONTQOPSTRPPNQTSRQNQPPRQPQTROOTURQRSVURSTTTYXWXZ\_`_`aabdcagddhjkmmv~‡‰Z#):5&#*)4HMLLKIJJJKJKJJJJJJIIIIKKIHGFEDCBBAA@@@<;:9888958;=AFHHQ\hjiuzq^RC=@GKLMMMMLLMNLNPPNLLNGILMHA==67=CCCDDGB8SiVLZYQSVSTXVTUXTRTSQUVSSWWSTSWWTUVMASu‘†Š‘’Ž‘‹‹“Œ‰Œ‹‹’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!.)!06444333455:?BBB@@CCCDEEDEGDDDEFGGFEDDFGGEC@AAA@@@A@@@AAAA@@BCAACEBFEDFGFDCGGHIJJHFFGHGIGFGKHFFGFEDFEEFGHHIILMKKMMJW`ejYGPP>8=?>>=?=:6<@/ $''''0B\oq}˜šŽŒhUMGGKLJIIJKMOPRSUWYXUSSTYWUTTTSS\_XV]]Y\]ZYYYXWYZZ[^`a_]ccdedbcgfbb_WSUXUXYXUQU_ebcb]YWSV]defffdafigdcef\bhO!&#!,#0/,;9.,15;/'/+67!%5) '#43 4=3+-/,+14/0671+)(%# %,/8@A?:9::Ncdda`abbfkytT2,)%)'*.:IMKMNLJIKLMMMNPOMOSSMNMNPONPLORQONQSQSSPOTUOQPQRRPRUSSUTQTWSSTUTQRSSWVX[]\^```_^adfegbcgjkort}~‡‰Z# '43%"+-"-IULJNMIHIKLLKJHJJJJIIIIJJJIIHHHIIIIHHHHDEEEDBA@=;;<;99:8:?>9=@:=;:;AEIJLMMLLKMNNMLJKKLLKLMLF?;<66=CDCEEC@;ViRHUXPPTQPRRTRTTSUSSVVWVUUUUSVWTVWQGFd€‘ŽŽŒ‹Š‰‹ŽŽ’’‘“’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!-'"16534454445;?BDB??C@ACDCCDFBEFHGGGIFEDDDDCABBBA@@?????@BCBBBFDABCABGFFEDDEGGHIJIIGFHJJJIIIJIGGGGFFGGIHFGKKHGJJJJLKIWael[GNO>6;@=<;>?=7;=/&/6965:GYkn}•—Žƒm[RHGKKJKJIIJNQRSTWYYVTTTSWYVPOTXTW[\ZX[^^][Z\^\XXYZ\^___dddfggghddc^Y[][YZXVTQS[`b`\ZWSTZ`egffgghkjfdec^]biU)$('*$0)+<7*20-0,*2/3;+*80(.5=?:@:7:;;<89?EDCDDFA:UfRHUXQPRQSWXSRVVUVUTWTUVUWWTSVVUWYTN@ABB>@EABCCDDEFBDFFEDEFGFFEEEDCCCBBAA@@;>AA?>@B@CCBCDEGEDCDFGFCEHKJGEFGKJJHFEGGGEEEEDDFFGGDFIJGFHIHIJJHR`fl\IPSA8>B>=>??=67:0%)/-'):JWck{’’‘ƒmaYMHIIILJIIKNQQQRUYZXVUTXVRPQTVVTUZZX]_Y]\ZY[_^Z[[[ZY[_bcefghjjigggb\\^ZW[[XUPRXZ\XSUVVV\_cffddfjlkfdda\_biY0!*)&",3,+0,(60#&5;17A<9?>;<>=;:=A<4,-,+,.//,../372)$!!#!#.:CCBCCG@;WjVNXWSRQPQSRQUYROTSNSUTSWVUWSUVUWXVSEBgŽ‰Œ„Œ‘‹ŒŽ‹ŽŽŒ’€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€".&&2322210/269?AABBA@D@AAABCCCFGGGEEDECCDEEECBCBBBBBBA@BDCA@ABEBDE@AEE=BGFCBDGHHGHHHGFLKJHDEHIKHFGHGFEGFEGHIIIHIIJIJIIRbgj[JQR@8>B<=>=>>77>4"3ESbgxŒ‹ˆ‚me`RIIIHLJJKMPPONRUY\[ZWVQV[\XUTVWY\[X[^^^ZYY[\^a]_`^[Z\_`ehggijjmkki_XWXZ[XVUSRTY[[TNSZ[^`chhfdedikhcaab`ag]7'*&(+02,#%/A=515=@;6:;9:<;62;>9:@;.,,*+-,+0.../11.**#"%# %/881*((#  !"""%((')-.+,-,+,.,()-))//)+1-*+*()-3,(*+'$%3CE>=PfdfcbcddhmvrV5*.-*)116HNJLPNKJKMMMLMKKNPNMLOOOONLLPPOOOOOOPOONNSVSQPPRQPQTTTRRSNMRSRSUSTX[XYYXZ]\Z_``^_bdefhhedkpooz~ŠŽ_'$! (34(!!#2BNPKHLJGJLKIIHJKLJJJJIIIIJJJJJKKKHHHIIIJJKKLLKKJINLKLKIHIJEINLMLGEKPPKIMRLMMMKLLNNNLKJJKKJKMLF>::45;AAABBE>;YjVLTSSTUWWVTQTWRPUWUPTUUSSUXTUUUVWWVL?O|–‡ŽˆŒ‡ˆŠˆ‘Œ‡†‰€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+$(5444431149>@ABCDCBCCCCBDFECCDEDDDFGGGFEDDCCDCBBBB@?@@AAAAAACADC>@EDCBABEHGDEFGGHIKLHGIHDEIIFDBDFFEEIGFFGFFFGGGHHGFFUdfi\JOQ?8?@:=?;@@86<4# &$':HUedt…‚Œ‚oheVIIJIKMLJKLOQRNORUXYYYSUX\ZWVYZWVXZ[\\^\\]^^`d`aa_[Z[]beijiijlkikjd\ZYZYTRTTQPXXXSNRZ]bbdfhhfefhjhc^^a_^ea@(33:89>:26>><=<:76:99<==<99==95331.,,**-+*/0/01/,**/-'#$%$"&'"!" $$#""#&(*,/-++-/./.+,..*,,*,330/,+)(''((! ! 1BE<;PfcaacdcbfksoX9/54.*437JQMOSQNMNOPOJNOMNNNPSQQQONNNNMMNOOOORKLSSPQTPOOPPOPSQOQWTKLTSTTVWUTWYVSSWZZYZ^```bdecdfffkqqo{‹a(%!(14' "$/;VdOIRVTRRSRRSROSTSTVYUQSUTVXTUUUUUUVXWF6TŽŒŠŠŒŽ‹Š‰‰Œ‹‹‹Œ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€+$*5321232249=BCAACDCDDDBBDFEBCEGECCFIDEEEEDCCCBAABBA@?BDB?=>@@CC@>ADDCEFEFHHGGHHHGHKMJIKJFFIIHFFFFEEFHJIFGJJGJIIJIHGGUbbg^MQSA:BB;@B<:=87;2$!()#)@NS]bqz}‹ƒrigWIJLKKKJJKMPSTPOOQUY[\USUXXTSUUWYYY]_[Z[\[]__]^__^\[[[cbdhihhijjjhec]TQUWUSPLLXTNNUZ]`ccbadgfdc`affcbe^\ddE ,>CB>888:;3<9637>;:=;??;3.41/(!#*)#+-*)-,*..,-11-,.,1/&""" & #')'&&'.+,020./00.+,//,*,.,)%%'&'&" !"!$*-2CF==Rhd^_ceb_cjoseOGJD87?99IOKNROMLLMNNLPONQQPQSNQTQNNMNOOOOOOONRRPSSPNNPPNNPQQOPTSNOSRNPQUYWSVWUVZ]\[[]ab`_`cfabfhgimnq}‚“d,&")14*'*(2AQOMJIMNKIJKLLKJIGHHIJKKLHHHGGFFEHIIJJKKLMLJHIJLMJNMIIMMILPJFMOJHLJHIKMMMMNNMLLMNNOOMKKKLIKMMG@<<;;@ECAA@@;b˜ ƒŠ‹Œ‰ˆŒ‰‡†‡Œ‡†€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!+ *000210/26;>BBCDDCBACDEFFECBFFGFECBAEEEDDDCCCCA@AB@=>@A??ADD?@DDACFDBCDEFGHHJJIGEFIKJHHIIHHIMJHGGFGIFEGGEGIEEJKHHJJGRafj_JHM=9>=>C=7;B1#%(AOW]_nwsw‡„nlm]JFHILLKKKKOQPVPOTXVWZZ[YXZWSSVPU\ZY^`^^__][[^Z\_a`][Z\`bfkkgfohffb]YVUVVTROMLSNMV]\\a_b`_cffhiecdfea^^bdiP$(AIA85;?<:;?@@>:86510/..+'$$!"&)+&()((+./,01/153--/10-#)(%" !%(('(*.-)%-/.*,21*-/-)),+'$"  ! ##"&#)''*-3;=:@H@:Qhfbdfc__dirncZ^f_PWUKGPPHILMNPPPMJRNJLPMLQPNNQRPMKMNONNOOMNQSSRQSTLKLNNMNPOPPPQTTRPOSVVSU[YTWZUW^^^[`d__dcabdgjkkknvyq4" ""()3-'%&'0@OOKIKJIKKJIGGHIKKIHIIIHIDGJIIIJIHIIJJIIHHNMGHKJILMNMLKLMMLJKLLJHHMNJJMNLLLMNLMMJLOOMMLKMKIKLF><<77:@CCBAD?=ZjSHTSTVUQPSSQSUVTSRSRQRTVUUUVRTXWUTSTTN@;_‘ŸŒ†ŒŽ†‰Œ‡ˆŒŽ‹ŠŠ‰€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!*+100432359=?@@@A@@@@ABDEEEDCADGGFDCCEEDDCCCC@@??@BA?>ABB@A@?AABBBDEBDCBEGIGEHHHIJKJIIJIGGHHEEFDEGFDHGGGIIGFGFGHJKJIHO_be_OMN@<@>;=;;A>89?2#$(BPUW]jrosnhi[LIJIIKNMLMLMRQSQOOTVVY[[[[YWWVSUZYX[_`^^``^\\\]^]]\]]]`beijhhieegd^YTVVUROMMMPPRY_]]a]abbefddifefggd`_bbiV*#=H@=>A?859241)&()'(&$#$%%%'%""#&&&&()(),//.00./11-120/0+"'%$%'))(.241-+++,-,+-/,'&&" "%)-)(+)(*++-1112209AG@;Qgfbdec``dirhVKOUVZdXTWQGFJMOOLLOOMMHIOOKMSUPQTQQQNONPONOPNLOPPPRTUPNMNPQPOQPPRRQQSRQRTUVWY[XTUZ^][^bbbea`fdccceiklnx{€x:!!""*4.%"#%1AMMJJNMIIIIHHHIJKHGGHJJKKLIHIIIHIIIJKJJJJMMKJKJHIJJJKJKKLLLLMMMJIIJLNLIJMKLMMLIINOLLMLKJIIHLOH?:9779=AAAAE?;VfPIVXTSTTUTPSQRUUQPRSUVSRTUUSPRWWVVUWWTMA>_Ÿ‹—–“—‹††Š€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!)-22255679;=>EDCA@???DDEEEDCCDEGFDBABDDDCCBBB@@@?@@@?=@BBBBA@ABCCABDCCDFFFEFGKLLJGGJLLJIIIHGFHDGHEEGEEGFGJFDGMJHGIIHGTddebTPM>:?=<>>=>>96=3$ANRQVdplirwqde[MJIGGJKHJRQNSTTRONPTVWX[\ZXXVWSRV\\\_][[]`_^\]]\[YYZ[]`bdhhiklhhie_YRVVURNKIIORV[_][]]bccddcchfeegheb`a`h_34?8421,%&,)&" #$"!!$'*+#! $&'&#$$$&*,++,,,,-//--)(.1.*-*'(,/0/-.,)(('#!!!!"!"$%((*,.--.31.,,**--*# %-0.8AG??BAFNOXZQKKHMRPIGMQPOJJMMPTSNLOSRRTRQPPONPQOOOONNPQQOQRQOPQRPOPQRQRSQQPOQTVWZ[YWY\[YZ^`ac``dddddefijny}€‘€B !"!&-0,'%&&.::56:@CB@@G?;?>>>=86<20MUVWY^ceforlfg\NFFHKHMMMPPORWSPQOMOUUUX[XXXVXVTWZ[Z]^^^^^^]^__`_][ZZ^bdefffhjgghfc^XRRQOMLLMSXZ\^]ZZ]bdcbcdeeeddegea_`_ie;*0+('($!  &!"$')+,,*'##%'&%$$##&*,**+,-,+-/./,*,//1210/-+)'#&%" !"&'&((),/0/0,,-///,+-3/))++-1&(2316AG?@Hhˆ§˜œ—›¨©ŸŸ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!'!386789;<>>>=CBBAAAAAGGFFFFFFHEBDHJHECCCBBABABBBAA@AAA@@?@@AACBAABBBCCCDEGHHHHIJKLLIHIJIGHJIFEIHFJHDFHGGEEHLKDGHHKOLDO`aefULJ@<@>;=<;><76=09RPORXY^`cjnj``YNGFGGIMONNQRQTTTQONPPQPUXYZ\YUVYYWV[]``_\[[]_[]^^][ZY[_cehgfhecefec^WVSNIGINQW[\\_^\Z\aeecdeedffdegfb]_aihB"$$#$%%""#!"$%%'*+'),/00/.3.(%&&%#%%%$'+,+&(,..,./,..--)(+$$$$" "!!"!)+,-0465224675311/,+,--,&//(),.10,'0>>QcdbbbaabfhpkI2<:2=ABEKNJIPNNOOLIMSPRPNOPQSORQPQPPTPSOKNONQQNNQRONOSSQNNPQPPRRPPQQORRTUVVWYZVW[\^_^^bdeeeegefgfggikrrzƒ“ŠS!#!" !!%2:1)-0.&!"%.:CC@AGIGHKIIHJJJJIKLLKKJHJJIFGHIGKJJIIIIIIKLIHJJIKJIIJJIHKKKJIIKMLKLLJHLRKJLMKIJJKLKJKKIILIKLE=::89<@BBBCA;=ZfQJUSQRSSTUSSUVSRTVVTVVSQRTTUSTWYVUVWXSTXVLB?LYkŠ¥«¦¢™…~“‘”€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"&$7;9;==>@@AAADDEEDDBAGEB?<;:9989>EHGEDDCCBBBBECBCB@?@===?@@@@BBAABABDBCDFGHHIIJLKJIIIKLKJHHGFILHDFFDGFCEIGGHHFFGJLLIGRbaegVJI?;?>;>=>Qada`__`begvpL4<;-/716FNJJONLMQOJKQKOOMPPMLQRSQPPQQPSOKNNMPROMOQOOQORTTRQPPRPOQQPOPSQQTWWVUZYTU[ZVZ_`cc_`baegijhhikqox‚\*$##" (0--471*+--)$%'-6?A??CEGKLJHHIJIIHIJIIJJJIHIKJIHJHHHHHIJJLIIKKKLKJJIJJKKKIJKKKJKKGJLMMMKJJMMLNNLKLKJLMIGLMJJIB<;<58[ePKWTRRSQRRPRQQRRSTUUTSRRSTTWURSUSSWTURVWRQOL>;ETiƒ”©¨¡¡§¦žœ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#&&:><=@@@@ABCC?ACDDA><0-*&" #)1:CHEDDDDCCCC@@ACBAA@??@A?>=@BBAA?AGCDFGGFHIJIIKNNKHMLLLJFFIHEECADHFKGFFFFHKKJIIIHHHRb^ah\RN<9>=>95<2'HVNNQSSW[^fhc`YQJJLNLLQQMMPRRQTVTRPPQQRRRTTUVZ[ZY]^[Xa_]YY[]][[\[[\^__aabedbcbchjif^TMMLKKMPSTWY[]]]`ccffegiffkkfehgb_Z_dgQ&(&),.00111.+*((+02478778:71+(*+)&&''%%'))&('$##!"!$&&(./11100025992+,33454320/45311320.//.01.(+,.,*++)*61(,,&%2BF=@Q`c_^]]_adennR9@>+ )*)3DJJMOLMPOLLOOQNKNSQKOQRPOPONRQPNMOPONNMNOQRRROLMNOOORONPRQPPUTSSVYYVX[XW\\[a__cb]afcadhijjmolmy}‰f0$$##  ""09:2/133/)00(!!&/9??=?GJHIIHHHJKJIIHHHGGHJGHIJIGJMKJJIIIJJKGHKKKLMJJKKKKKLKKJJJKLMJMLIHKMLOMJLOMJKKKMNLIJMJHJLE=::89<@@@ACC;=YbMJUWRPRVXVQTTSSSSSSRRUVTQTXTSRTWUTWWTVYTRUVRIED?@LXks~Š•šž€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#''<@=?A@??@ABCEED@90'!!0ABB@@B@@BBA??AA??BBBFFEDFHIGFJKLNNMJHIKLJGGHIEFGFFGGEEIHEGGDEGKKHGJJGTc[[e]SL?;?>;=<<<@;4;50KTLKLPQUY]eg`[VOJGIJHMQMJPRQTUVWVUVUSRUTSUUUXRZYW[ZWY\\[XY\]]]]]\[[\]]^]_dfefeehgdaZOMLKJKMQTW[^`a^_ccchjiihbejkecfe`aY\afW* +.+)+0541241*)-0/.157778:5/*)*+($"$$! "$$"%# "''"(++,2789>@A<635940154//475453002133026620100241,)+.--44*3D=,/4,%1BF=@Q_c^][\^acdniQAC9#&$,IOGNONLMNNNNPOPQNNOLNSPLPPOQTPRQLOSNLPPNNPPMONOQPNNPSRONQSPLRTSPQVXVXXVTW[\\_fc^abaffghgfgkoinzz„’m3%%#$!-1' )6<8,-41-15./*$$(.68?CEGGHMIHHIKJHFGGJLKIHIGIJIIIHFKJIIHHHIJHJKIIKLIJJJHHHIKJJKKLMMOLIJMMLKKKKKJLNLLLMKHKKFJHJKE=;;79>BCAABD=?ZcNJUTRSTSSTRQUVSQSTSUVUSRTUVUTRUYWVYVTYZWTRWQTSKDBCACDHLOW`d€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$'#,=>D<0%+?BEFEDDD?ACCA???BAA@??@AC@=>???ADEDDHKJHHHKMKLNLJKJHHHHFDFIGEEGFFEFHIHHIGGHJLKIHP]XWe`NLA=>?;:<=:=86:2CEFHFFIKJIHHFFGJIHGIJIFFHHHJIGHIIIJIHJMJJJJIHHHHKKLNMJKJJKKKKKJJKKJJLLJMJLNMLLLJKLLKJIHKJJLE<<:88AECA?@F=?ZbMLXUSQQRSRRTTTTTTSRSVTSTUTUUSSUVTTVRUWVVVUTUSQRTRLFEEEDCCBB€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"$".>BB?ACDCBBC<@DC@?AEA>=?@?@CC@??@ABCBGFEHIHKLKJJLLJIIIJKJIHGDFHGDEGFDGIFDEHIGIJIHIGDM\VQ`aQJA=@CA>=<;=75;3 #@RNMRRLOSWZ[[[TQMJHHIIMNOOPQTVXURTWZYYUUTRPOTYUVXYZZXTY\^]\\[Zaa`_`ba_]cedddddgkgbdbVLJMNMMORS_b_]afd`befedehjkgeedccd_\\\ff?'00+,36323/*((*))-,)&$"!  ####"$" !&-/.0343355531147:98<>EC8146..16863203446521544332/,/230/0/,,++8AADD@9:-"'3AE?=Ma^[YY[ZY\broG()1-!'HSIHKLOPMJKPQONOOMLMROOOOPNKPTUTROMPNJJOSQMJOMMOONORPQONNOOOQRQOOTWXWYXVXZZ[\`cdbbdfbefeegjlmqxƒ‘{7'+"!%"/7.$#)485,*./04551(#$',3@EFFHGFIIIHHJKIEFIKIGHIJHHGFGHIKJHFHJJIJNLKJJJJIJKJIKKJLLKKJJJJJIIIJJKKKLLKMNLJLOLKMMKIHIIILG?<636@CBDCBA:@^cKIVSOPRORURSTTUTTSSRRTTSUUSRUWWVUSRTWURU[ZTXXWWUTTTSRPNKIGF€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ #3A@@?@<;@FA3'#2AIBCEECBBC@@@??@CDE@>@@>>@ABBCDEECEFEEGHGFJMMLLJIMKJJKJGEFEFHGEEGFEGGDEFGFDIJGGJIEK]WO`gVHC<<>===;<=64<5 +HRNNRMNOQTVWWWRPNKIGGFHJMNOOPQPTWVTSVWVVVWVUVWUUTUWYXUTX\]^_^\]_`___aa]_`agihfjkhfe^SNJLMNPUY[c`_^adc^aefddfjjia^de`]aa]__gkI%..,,00/1/+%""!!#""!#$&'''('%&'&####(.0/06643.-353248<=<7>HG2$2BE?=L_]ZXXYYW[`jpI"')(%DOLMKLOPNJKNMOQQPNPRSRQONLKLMPQQSQOOKNOLKNPOKMPNMQSMOTSQSSPPORTQRUWUXYURX^_^___acdedeghgfilmoqw|{9 #$+& /5-%$(384-.52-.0:3'"$%)3DGHGIGFIJIIJIGHJHGGGJMLIJIIIGHHHJIIJJIJKLJGGIJJIJKJIJHGGKJIIIJKLLIHKMLLLKMKHJMMNMJHKLIHILLKJF@?838ABADB@@8?^eLKZRQUXTSTPQQRRRRRQTSTSQSUSRVYVUVURUVXVSRUXTVXVTTVYUUUUUUTT€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€&9A<<=B??@9*0?DBCEFFDCDEAAA@>?AC@>;:<@B@ABA??BDCD@CFEILFIJJKMLIJJIHIHGGHFFGGDEFFGEDGHFEEFHHGILKFR`TGXgZLC;9;<<<:==44<6 3NSMOOKOPQRSTUUOPNKGEEGJLOQQPONSSVYVRSXVUVWWWUUUUTTVXYXYZ[Z[]__[^`_]]^``aaceefkfdgh_VRNIILPTX\`caaa]acaaddcdiife^]dd][__^cbclS#"&'%%&%$)(%!"#&&&'*.2589;73.'$&'#"%&&(.1003253&!,5335:>??@=95443/20,+*)''+,,/4434430--/001//0/,,-5DFA?:9?D4!1BE>>L\[XWVXWVY_mn@!*&AONNLLNQNKJMPOOPQQPOQSRPPMMRPPNOOONNOLIIJLMOQNPPLLOMMPQPRRPPPSUUUWVVVYWVZ][Z]_ababdfeffegjkkmpv|z7"++(.5/'#$1;3,085/04<8,$#"(7GJHHIHFILGEIJGGJIIHHIKIGHHJLIIIGIIJJIIJKLJIHIJJILLKJJJKKKJJIIIJJIKNNKIKPOJKNLIJKNMKLKIHKIJIIB<<969BCAB@>A9>]dLGQORUVUROORRRSSSSSQTTTVVTTTXXSPUWVTTRQTWVRQRTVUUTTWWWWWVVV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€)<@;>@AD>."+02369;>?@AEGBBBBCBA@?@?>@B@=ABA??ABBBBGHDGLIKIKONLJHJKJIHIIHGFFFDDFFFFHHGEEGKIFGJKHDR\PBSbXJB<<@A?=9==44<6 9OOLKNOQQQRRSSSLLLIGFHIHJLOSVWXVSSTUSUWVXYXVUWYTWXXXZ[[VZ]^``^[_^^_`_^]dccdfdejcegcYTQJHGLTXWWZbbfeabfdacb`chifa_aca]\`__cabl[+$''&))*.1,(&)-01-28>=?B?74552/////.,)*++,//.0310/.-.02..20*+13$0CE>>KYYWUUVVTX]ll=!)DRNHLLNPPKJKONNMNNNLRURNPRPPPQQOMMPRNMMMMMORPKNQLLPOOMMNMNQQQQSUVVX[UXZXYXY\\^aabbdfceeegkmmimw~ƒx3#(,-4.'#$1;8..30,18>>2'&$*:IKIGJIGIGEFHJJIHHJJHGGHGIHIJGGIHIGFGIJHFIIJJJJIIJJJJJKKKJJJJJJJIJKLJJKKKLIJNMJJKMNLJKIIJIHIKD;:846?CBB@ABBB:1##'%"$((+.259>BECABCC@==<=@A@><<9<=64<5 !=NONLJMPQRSSSRRMLJIGGGGIIKNQSTSSWXURSUVTVWVUUUUVYZYYZ\][[[[\`aa]]^`_]]^`baafihfggb^[SKIJINW[ZZ\acedbefe`ccaaddb`aba_]_aaa_^an`3)/0/49;=?8.((,02.27::;;=950*&'(' $$"$+/./10044484347=?<654/*010/.-*'*,)'),-.2000/,-/2.,/0..0140%'15<;:-0DE=>JWWUTSUTSV\fl?! (BROIMLNQPLIJLOPONORSRTRQONMNONOOLMOPNOMJKOOLJKOPMOQORNOQNOQQQPQUUTUYZYUTXZ[^]]^adedcdghggilnglv~ƒw1#" '.4,$#'278/.2/,.2?C6))(,:JKHGJJHIFFGIHGHJGGFFHJIGHHHGEEGGHHGGIJIFFHJKKJJJIIKLLKKIIIJKKKJJKMMKJKKJKNMJKKJIKLIHKKHHMJHKF><:34>CCBAC>:>X^LMXTQSTPRUSSSRRRSSSQRTTSTTQTVXWUSQQXUTUUSSUWXWURRUXSSTTUVVV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€':A@>5%!#%(,15;>AA@>??@>>>?@ABAABBAAACCGEDIJIJKGHKLKJIJKKKKKIHIEEEDCEEGFDCFIHEEFFEFHIGOYOBPaZJ@;:;98;;;=75;3 !?ONPNIIMOQSSRPOLLKJIGFEHHJNQSSRSRSVUQPRWWWVWXWUWYZYXZ\]^]\[]`a`Z\_a^[]`]bcabdefgaZYWMFIMOSX]``^bfd^`dgfbbb```_^ba`_^^`abd`]ald?)12.5>?>>7.(),0225:::9:;84/*$"$% &(%&,/-453113558657<@AA?>98;6.+,./.1330')%!'/22210.,,/1,-//,*.3.0--48@=2/DE=?ITUTRRSSRUZkg;+&# $?PNOMLNQQLIILLLJLOQQOMQTOJMRQNNNLLMJOOPPLHKQONNOMJLPOOPQQQQPRRSUUSSU\ZUTY[WUY]bdcbcedggedfhjhlu|€Žw2"$!-5-#"(262,.4310.;F<+)).;JJGFJJIJKGEGIHGHHHHGHIIGEGHGGGGIHJJIIJJHJJKKLLKJIHHHHJLLKKKJJJJJMLJKMMLJJKKLNLJLMMKJMKGHMJGGB<<946?CBB@A@>AA>@@ACCABE@DFEEFHJQPNNOKJOKJIIIGGGIEDEDCEEDCDFGFEGFKKGDGHGJVM?L_YI?<>A=:;;:=86:2$BNIMOJJKMPSSROMGILLJHGGIHHJNQSTRSTUVVUTSWYXVVVVVXYYZ[ZY]\[[]acc]]^bca`aa_]_bbbfd_]YQNMJMSXZ^b^W]hieghebe`\\^`aae_\]^__aahc__hfI+43-3==8;6/+,.//-27:99:<;4.)""%!#!%/1-2025448996459=@@>=;<<406.133330*(*&"(00+231-+,./-)*--+-35/-"&3@G5+"/EE[bMJURQPPRSRQSSRRQQRRQTSRTUTTRRRSTUUUTTUWVSRRXURTWYVSUUUUVVWW€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€-@?-(:C@=;:975/,'" """%',4;=?C>>>>>?@AB??BBBBBDEFFHIIJKLLLMLLLOMMKGHJJEFFEDEFHFECCDEGGHDFHEHIAMXK=L]YJ?;9;=;:9=?;088)FKJMKKIJMPRRPLHGHIJJIGFIIHHJNTXUVVTVYWRUUTUWUSXUYWX]\X[a^]]^ab_bb`aec````_`dd`]a]ZXSMKLKPW\]]^abegfffeba`^\\^adb`^]^`bab^^ccioN!"/3/2;=;51+)-.---0246:<<=7/'"""%!,1+.25435788315;>=;=9:91/465523411//*%#%*,-/10--..*.-.-)&,500-!$269B<%0EF<;FSUNSTPPTWVgjG((" (AQLLOKHHKMMKKLOQPOPSRPNNNNNONONLKKLLOOOPRRPNKOQPPNMPONOPRTTSTSTVWVTTZYUSVVUW]]]]`efegeeeffjnonux~ŠƒA# #,4*&4850142...2@A4*&.@KHIIEDGJFHIIGEFFFGHIHHIJHHHIJHGIIEFJJKJFIJIGJMLHIKKJLJHIKKKJIHJNJLJIKJIKJKLLKKLMLJJLKHGHCHKJA::799?BCB@BC8=_cJJSSSSRPPQSSRTTPRTOTTSTTTTSTUVWVUSQUUUUTSSURRSUTSSUTWWTSVWV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€/:,%5>?????@@A>=83/,)%"%%%+4=>@@@A=@BA?ACCDHIGGKJGIIJLLLLKMNKHHFEGEGHFEFFECEEFEEEFEFEEGIHGNSKELZZJ@=;;<<<;9>;398 -EKMOIIKGNQQPQNIKKKJIGFEIGGIJLQWVTTWYWUVTYYUTUVVZUUZ\[\`^]^_\VYb^^_`]^``\]^_a`^`_[XWRLJLPRV]aca_^fifcdb^]_``^]_ac`aege`][`ccagoV' -313862.)'+.+),,,,.38;;:3,&!#$!" *-',12//34230/38:8775566325751123542+%$&'(*-//-,.-+(+-)%+48/3,#4F8'95"0EF<;FSTQTUQPSUTejB)BRMMOLIJMNMKNMMOPQRRMRSPNPNKKLMMMMMMOPPPPQQQMOOOPONOPQQQRTTSRTVVTSTWVZXTUXXX[\]`bcddfdeikkhfllsx‹‡H#!"+1'+784002111.3<<5-*1?KHHHGHHEIIIIJIHGGHJJIIIJHJJHIJIIMIFFFGJKIJLMKJIJIHJKJJKJJJJJHHJLLKJJJLKIJIIKKKKLLMKIILLILKIJC9:;6;A@@C@?@7<]`IJQQPPQRSSTPQSSPRSPQQQQRTUWTPSUQTWUVTRRSUTSSUVTTUUUUUVVVUSS€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€" -8;>>>=<<>@@AA@?=952-(%%&-8?BBCCABDDBCDGHJJKJJKLLLKJJMLIJKHFIGFFGFCCEDEGFDCEFDGECGGEGKLHDHW\M>>==>?=:9;92840FLLMIKMLLORRMJIKKKJHGHHHFFGHHLRVSSVWSRTVWYYUSVX\YWWWY[\Z]]ZZ^]WWZ__^`cbeb_`ca^^`][XTOMOPSX\_aa`geege`[Z]\\\]_``aeggffb^_abeebi]4).*+.-)$'''*,*(-*()09=>;3-)&'&"" !)*''+,)*-.,-)(+1578469:8448872245861*%%&%&(-/.,,./--('))*+($*07C>*#*,%0EG=;ERSQSSPNOQQ^d=$?OKJNLKLOOMJMNOOOQRSSONQRONPNNNNMKKLLMNOPOPPOMLKNNMMPPQRRRSTSRRSTSTVRWWTTWXX]\_bdcdfddgjkiijiiqw€ŒŒT #!!+4,!!*693/./144/35860,4@JIHHHKIBGHIIHHHIGHIIIHGGGIHGHIHHIHHIJHGHJHHJKIIJJIJJIJLKJIIIHGIKLJKKJMMIJJIIKKKJNMKIKLKHGGHJC:;;79@CEDAA@9?]aKMSPOOQSTSRSTTRPPRSPRTUUTTTUTTSSSTTTSRSUWUQUWWVVVVUTVUTUXWS€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#*/168:<==>>>@AABB@=<;61.,-08ACCACDCDHHHHIKKKLLMMMKKKIGIHBGDCDCAADEFFFDDEFCFGHHHGHLOICGW[M>>=<=>>;==84831ELNOLIIJMNOPPLIIJKJHGHHHIHGEHJLQSTVVWWWVVYXUWZX[]ZWYZZ[ZY^_[]_W\a`_aeddhfddc`^a]^\XSPOORVZ]`cdd_bca\ZZ[\aed`]^a_egcadebbda``akd9(.,,-)$&&&(*)(*1.*+19>?82*('" $&%!!#'('''(+*)(*,,,-/4673498//4543/147:80+'&%##&,-.,,--,+)(-.)(..6DGFAC@9>]_JLSQQQRRRQOPRRPOOQTRRRRQRRRUVUTVVUWUTSSTUUVSSTVVTSTWVUVVUUT€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"&&'*,/4;?A?>==<;>??>>;999:;?BC@;>?@@B?@DCEEDFHJILKKKLLMNMJJKIHFCFGFCCFGEDEFFGEEDCEGGGEEFIKHCFTXG<==<<>=::;7383%3?GMRNKMJLOONMLKHIJIHGGGFHHEEFHHMQTTVYZXVVYZWXZYZ[Z[\ZXZZTX`_^_^adb_cfdbid__b`\\\_^VRQQOTUX\bfd_^^\\]]\[\`ca][^baaaacffdccdb_^ih>'-+,,)&&''())),---.28;<95+)+##38*%('%&&&&(,00.-),,)+0547784036232-,1485,+*'%#$&*,.-,+*))+'&,,1?FILHEG>$"16/EG=;EPQMMNNNNPS_^:!& %BTOMMMNOPOMLIMPONOPQQSQKJNQQOMLLLKMPMJJKMMNORPOMMMNNLLNOPOQSPPQTUTTTVUVWVVWX\^``abeefccfjklmjkqwˆi-#! )2,#%.9:5-,14553328;3,6CNIGHGHIHGFFGIJJIHHIIIHGFIHGGIGGIKKHIKIFFEHLLJHJMKKJHIIIJJJJKJIIJIKKIIIGGIKLKJJKLIKLKJJJJFKMJ@9;917@AAC@?B:@^aJLUQRSSQPOOOQSSSQQSUSPPQRRRRPQQQSVUSRTWWTSUUSSUVUUVRRUWWVVW€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#&%$%')*,17;==?>;99<>==;;?@<:>=:;?A??A?CA?AB@ADDDEGIKMLLKJJKKKLKIKMHEHCDDCCFECFEEFFFEEGJHGGECHKGFFDSZJ:;====;99<846745?GIIMMIJKLORMHHMGHHIIHGFFGHIHGHJLMOSUWWXXUX][VVYXW[]YZ[XU[\Z[Z\`^_ab`cc^`ca^[ZZ]\a`XRRRPPUZ]`b_ZXY\][XY]Y[_cedb``aaacfgegbacb^fiH&*)*+)(#()'),+*'*-/022130&$(!$9>,'-*'))&))).52)(++''/695862033375.-0341()*(&&'(,..,+*+,,*$%-08HMHLQJJF/"1;'/DG==AE=BabJMVPRSSQPPQPPQSRQQQTSRTVVSQRSRSSSRRURRUURRVUTSQRUVVRTUTSSTT€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$&&&'&)((*/5<=>>?A<9<@@??=@>BKNJLOJFHLONKKLMJIHHHIHFGHJIHGHHJILQVWXYTXZZ[[XUVX\[X^b]^_ZX]ZUW_^__]^`^^`_\]^^]^`_YSPPQRY__]]\YW[\Y[``\Z]`ba`ab`cdbbdeccc_[bdjlR##,./-(%'((*,*)+*,.-+)'%,)%&'#'58.*,,+--+0.''-+!&')('(/55652002464.,0462+*)*)'&',,,+)),.)).346CPHDEJGJL9&,7*!".DG>ED>C^^HKSRRRQQQRRSOPRRSUTPOOQTUTRPTSRVSPRTRRVWTRRVVTRSVWVYUTVVTVZ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$()))'((),06;<==<<=<9<<<=<99>=>>:;>><@BAACA?EBBCEGHIJJKMNNMKJKHIKGDEFFECCDEEEDEFFDEFHFBFJFFHF@IH@BRYK;;PIFNNMLKLNOLKLOQPPPPPQQOMLMOONKJLNOOLKMMKLPQOOOMOQQPMLOPNNOMPRSRQTYWWYXSTY[\ZZ^```aeddhjjiiklqx…•<$ !.70$"+7;5579753.1:;7409MHJMKGHIGHGEFHJJIJIGGHIIIIHGGHGGJLHIJHHIHHKLJIKJHIKJJKJHHJIJLKIHHLHJMKHHIJMMKJLLJHKKIIKJHHJIHB==837>?ADCCD@E^]ILRUSQQQRRQTNPSRSTRQPPPQRRSTTSSSSSTUTTVVTSSUSSUVTSSWUSSSSSS€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€#(*))*,-,-27>=::;<>>=88>?<;>====<=?@A?=?B@?DEEFEDGKLLMKJLKJKKIJKIEEHFFECBDEDCEEEDDFGFGFEEDEHHFFCBJZQ>9=;QIKKKMNNNMLJIIJMOOOQRTSQNLKMONNONMNNMMNOONNRRPPSRPPNQOMQQMKPPNNRSRSX]YTVWWZZYZ^___afeggfhlmnouv|‚…G!"/<." )9:83000-+++/25639FLKIHGFEEFFHHFFHHIEGIHHHFIHGHJHGIIHHJJIIJKIIJJHHIKJHGJKKKKLKIHIKKIJMKGHKKJJJJJJKLGJMKIIIHGGIH@:8858?@AA@BD:C_]KOSPPPPPPQQQRQQRSSQQRSSQOPSURSVVSSVTRRUVTSUUSQTWTQTUUVVSQTX€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$*),.,)+/3::;<<=<<:<=;?C?>ACDB@CFDDFDDIKKNMIHKLILKHFFEDEFGFCBDFDEEDCDFHDFGGGGFGLFDDAETO9:@;8<@=:973388=CLKLOLJHKKLLJLLHHIGDDGHHJLIHKJGIKJLMIMTUSRRTX]^\[[ZYX]_\\ac`YVX[[\^`bb`_`\[_^[\`ced_ZWURONNSX[ZY[[`babcb_bbbcefffbadca``cccbebcrl>&.-*)*++*)(%$'++.21./0-*+03114-))/1..2."'1(#*,'%$!&4?<50...+&)*)'(./**)*-,'$%$$'**+/353139AEEIECCB?BHE8(/CE=;BLNKMKLMJKRW]B#=QIKKLMNNNMLMJHJNQPOSRQPOONMPNOPNOOLQOMOPONOPPPPOPONQROOPNLOOMPRQRUWWYYXXXXXX\^^]`dhfddfgkmlmosu~€Œ‡R""!3='$23-)%$$####'+1317EJIIHGGGHIGHIIIGDEEHLHDDGIHGHIHGIIHGHHHIJIIJJJJLOIIHHJKIHIIIKKKJJKJJJHIKKHJKKIHJLKLLKIIIJHFHHA;9737?ABB@AC7Aa]GKTQQPPOOOORQOOPRSSRRQQRSSQQUURSVVSSRPSYVQSRSUVUTTSXVSSUUTT€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$%(**-00.69;<<;<====<<;<<>=<=?@@?>=AB@BFDDBEHEHLJKKMMJJLLJLLGEFFCDCDEFDEGDFHHGFGIBEFGHGFGGDDCBFTM:6?@?;=>=741588;EKKKMJJJKJLKJJLJIIHGGHIJLJJIIIIGIJJIJKMOQRWYUX]]]YUW_c_Z]``\[^]YUZ_```ab`ba]\]abgjh_WTRPOORX]]\\Z]de`]__aabbddddcbbcda___dfba`bolA$/.)**,)))'&(+-+,24/,.0-*/4/&%,,---,--,"%.+"$+0*'$!&4??951,+)%(,0,(*02++-0/($$%"$)../1110,+-/.2124435:5.&/CF=;99=<9:?@?==>???A?>??BCABEDBDHJJHIHHJIGHJIGGEDCDDCBDEDCCEDCCDDDDBDDEEEDEGEBA@FUQ:7<;:;><=96445:CILJKLJHIKNMLLLKNLGGHEEIIHHJKHHIGGIGHLMKMQRTUTW]^ZZZ\^[[c____]YXYWZ[\_cc____^\[_deeaVNKLLKMQVZ][Z]`_^__`dc_acadeb`c^[_dbacedbffbepN"/-*+*+*++'*-,+*(,330212+*22)&+*+.,)+0-.((*&#*(%##"*8B8541*)+*-.01..2621.**,,*%#&/5533010-+,-,+)(+-++++*&/CE>ABB@AB:D_ZIOXQQQQQQPPSPPRTSQPRQQSRRRSQSUUSSTUUSTUSRSTSRUVSQSURVWUSSUW€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ &(-11..4<>=::<9:;::;=>???>>>?@BA?>@@@BDEDCFGHIHJJJKJHJHEDDDABDBDEDCBCBDEEDCBCDCDDDFECDFABEDBNN88A>==>:<:7545ANPMLMOLIILNNLMKKNIDCDBDJKEIJJLIFGFFJKHIMNKOQSUUW[b[UX`_\]\^ab]VVYZ[\^`a_]a_^^`abbb]VPNMMLLOQSY^]Y[^__`__caacbbgjc`dc`dfc`aacbb^\dmW& +.--+('*,++,((*''/43144+&-0-..,++*((*+,'&)#!*%#$%'/;C;874/./.,.2311442/*(+/.*&(/5744521.))*-,*'&'*+,..-).BF>=BLMJKJJLIJP]Y@, 9RMILLLLMMNNNMLKLNPRRSSRQONMLOONOMNQQNMOQPONQPONPPOOOONLOQRQPOMOTUSSUXTOQSTWYZ]]]]affdefhjkjkmovƒ{„d)$#  !$&$(')**/=KKJIIHHIIJHGGGHIGEHHGIGFIIGIIGHJHIJJHIIIGJJIIKMKGJKJIKIHHFIKJJLKIKMKGIJJLIIJJKLMNKIIJKIHHFGJIB;9936>@AB@B@7B][HMTRRSSSRRQSOMNPQQRTPOQSSQPVSRRSRRTWSRTTUUSTTTTUTSUVSSVVSSW€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"+43./4:=<::<:==::>?=;=???>?@>BBBB@?DE@BHFEFFIGHJJKJGHFEDDEDDCDDCCEDCBEGFDCBCDFEFGFDCJFCECBOQ>7;;;;<;;:5459GTVNLMNMKMMKLNKJKKHGFGGFGIKKIIIJHHFGIJHIJKORPNPPT[]][[^_\ZZ^``_^[WXY^dc]]a`ba][^^Z\XSPNMLLOSSTYa`[Y]de`]__^```acccddcccbbecbb`bb^ah\-(.-***&',-*(('*)',22-31+),,*-/.*%%'%"#$&$""&%%&((.7<>?=81-,.,131-142+-021.,,./14543353.*(*)'))*)('()*-*-BF?=BKMJKJJLIIPZ[>#7ROIMLLLLMNNNKIIKNNNSTSRONNNPNNOPRSRNOPPNNNNOOKKNOMMOPQRQOPRQNOQQSROPWVRUURTWXZ^_`bgebcfhlljjkpwz†h(&! """""!&'))(2CNHHHHHIJKGFGHGHHGHKHEEHGFHHIIGHIHJJIHGIIIIJKKJIGFKLJJJJIKJIJJIHILKIGIKKKNJIGIKMLKJIHHHHHJFGKJB;9858?@@B@CD8Cc^FHQPPQSSSRRRRSRPOPRQRQPQSRPURRUUSPPRRRRSRQQTVSPSUTSTTTUUUUV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ &-1159;::;=<<;:<>???>>===>>BBB@BBA@EFDFJHDHKHHKLMLGIHFCDEEDFDBBDEDBFCABEFDBEEEEHGEECBABBAML@:>;>?>78;8425GVTKKLMLJNMJKMKIIGFGHIJIHHMIHHGILJGIGFKJHJPMKNSTTUV[_^\]^\_`bc`ZXYXX[ceb`aba`^ZVVXUTQLJJJJORTTX^_[`_ab`adccaadebbf^]`dcabc\`c__b`anh:&0+%)/)&*.+()()*+-12/31/-+(&%*%#%&$$'"%'$!"#!! "#$)25=A?:60.330.,-00,',10+(+/4201587400.+***(%'*+****(-,!-BF?=BKLJKJJLHIPXX86RPIMLLKLMNOPNMMOPPPTSQONNNNKLPRPOPNPPQQNJLPPOMMNNNNPPQQPNPRSPQRQPPMOSROQSTUWWY]acccfdeghijgiiqx}z‰Žh("! ! ! $)&&4ELJIIHGGHHEFHIGGIJIGGHHGGEGIIGHHHHJHHHHHIJFGIIHGGHHJJJKIGGJGHKKHHKIIJKKJKKJKKKKKLMLLLIHHJKLIIG?:9:69@BBB@AB8Dc^FKVOOPPPOMLTSRQRSRPORTRPQSSQUWROPSSPRTTSSTUTUTRTWURSWXTSUUS€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ !$(,05:<<;::>=;::==<=<<>??>=>@B?<@DB@ACBDIKGFIIJKIHIIFCBBBBBCAABBBDFACEEEDDEEEBBEFFGFA@CC?KOB9=>=9;<;=6325DOLEJNNKFJMLKLMJFGIFGGEFHGFHHHIIIKFEEDADIKMHJNPRTRXX[\Y\`a^bd`\[ZWX[^`bdca]]ZVUWVRWTOJLQQMPSWXZ]^^`cc`_]^c_^ac`beddaceccc`a^^_cebdmj>%2+#,.+').-(&*'*,**--%'(%$)+'&&'('&).&)'##%$%$$$$#)/2>>=<<@BA@CDADBABFHHGJIHIJGEHEEDDDEDCA@CFEDDCCDDEEEFFGHEBEFEFIECD@@JOC:9;;=:7;<7/18ISPJIMMKHGLJMPMJJJIIIIHHGFGKKJKLLLHDDEC@CJKMLKLNORSW[ZZ\_`\]``^__[X[]]_aa`X[ZVSTUUTTSQNMMNSVVW[^^]_aec__ba^^bdbbdcaababefb_acccccadjN+/---)%'-/,(++*)*'&('"#&&'))%%$%-31+'+,'&))&'''%*35/6>56A;311/-,*()+,0/)*263101111233.*+./.,)%',)##'&&%-@C=?@@C;F]UELROQPOMOUPOLNQRRSRRRQOPRTSQRUUQQSRTRRTSQQRQPQSSSTVSSUWURRU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ !"#*0:=99;:9::;;<<=<<===>>?>?@@>?AACDDEEFHIJGHJHFFGHECCCBBCDDCDECBCDCBCEFEDECFIFDFHHFCD@@IND99>=<:;<;9117GQPJKKJJGJLLNNLMMKHHHHHHHHKLMKJJJHIHHHFDDEGIHHMRQORUYZZZ]`]\^]ZY[\Y[]`cc_YVWVTRTWXVVURNMNOPUX[_`^]^bfebbcb`acccdb^_baaega[bcdd`]^bjoT#"-0,+,,)&*32,,*))*+))(&%&$%()+%#(.10/01-'&*)#$')*.9<73:59DA6-.,)&&(**-+*-24432235652021/../,(*'&'('()+'$.AC=>=<<=?=;=?>=@CEDCDFGGGIFHJHFFEFEDCBBA@DECCEDCFFECCDDCADEGGCBEDDBABAAJOA;<;;==;969549GOMLMIMRMJKJLOMKJHJIHGGFGGIIKKIJLJKKKJJHGFGHHJKJJMQRVYYX[`ZZ^`\WWX[\^_ba[TUUVWWVXY]XSQRROLSTTV]a`^`egfdbabbffccddcababe_\baaa`aabagmX)*.--+*)(,032-*)**)($&('$%')+-(%&(*.40+)*+(&&%%',5>=35=BFB4.-,+++,---&-1004521134541.,/0-+*& #&'''(*+*".AD==<<>@A>>B@?ACDCBCEFEDGGGIIGDFBDDCCDCAEDCCDCBDDDDDDDDDEIIFGHGHDC@B@?FKB9:=:::<:56239FQLJJLNLHKKKLKKNOLKJHHGHIIJHKMIKMKJGFIIGFIIIJMLHJRQQTWYZ\^[[_a^ZYWV[_`_]ZVVVW[ZXWYZZXSNKMPPTWZ\]^`cggdca_bcggefda`_ca`b_Z[_aababba`k^2&--.*+(',550-)),+*'$'+(%&('(/+''+00.-(',+%#'%%&-7BA97@LJ5$(1/++.-(&*+.475004:840.//.595-(*($#"%*/5>==>?>;=????>ACEDCCDFFGGGIFEFFDCCCABDCBCDCBCCEEEDDDCCBDEFGDBEFDABB?DID;<=:9:<<75026CNLLKIJJJIKLNNMNMHIIHFGGHHKILMJKNLKHHJJHILJJIIMONOQQSUY]^\\\_`__\WPW_bb_][[XXZ[Z[]`YROOOMJPUZ]_^]_`cdcca`cfc^_dc`_aca_aa`_e`_becaaboh:(-,*+,,*,2543-*,-+)'%(&$(($#)++)*,-+*)(%%&'''(*,1B8)%**.)'('%)/035400:FGE@:762,043,*,,'% #.106F6(0CE>=CLLJJKLKJMQUO7''((!/PTLKNLILMLLLMNOMLNRTTSQQQPNQOMMNLKLMNOPPPOOLNQONNONNOONOPPPSRQSURPSVXVSSSRQXXWX^``abccceghgllmt~…Žk.#$#&(()+-)  &%(/56;BJHGGIIIHFFHHGFFHKIGHJKIGIGHIIFGHIGGIJJIIHHGGKJHGKKJIKIGJHGHIIHILJJIHJIHHGHKLKHHHIKKJJJHHIGJH@:8439A@?ABDB8F_UFOSOSQQRNQTQORTQNOQRQQQRRSSRTTSVSPPRSSSSTTRSRRRRQSUUUTRSVUR€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"&'$'5=:9;;;::<>;=>?>==>==>@@BB@ACDEDEEFGFGHFFFEDDCCBBBCB@BDCCDCCBBCEFFDDDGGCBDDEEBDDBEJD??<9;<;975116JSPGGMMIGJLJKLKJLMIIHGFFFFIHJLKLOOJKKJIIIHJJHJMMJIPQRTY^_\]]^__`]WV[_abca^_^^_aaaba]WROOOOTUVZ_`]]^acdca`aa^]aec`___^`ba`aaaaca`bgfqmA$,,,-+()*-2794,*+*((''&(+,(%)&%&)*(&&%$$%''')*+-/9BE?2,(%**&..*&&+/1/1007FQVOMJGIIB9364/-.,((,/57-(0:.!0CE>=CLLHIJJIILOWR9)'($/RVNKNKHKNLKPLJORPNQTRQQQQQQQOLMONLMNOPPPOOOOOONLNPPQPNPQOPSRORWSRTTOSSSTTRRVWXZ^__`cefffhigjnnr~ƒ†Œk.  ""&'*)'% "!$+0.2;FJIEFLKEHKKIHIIGGIHFHKIDIHGHIIGFJFEGHHIHJIEEGIIJIKJGHIIHKIIJJIIJJJIIKKIIMKIHHIIIKLJHJJHFFEHG@<:72;@?A@;AC9IbWGNPOQMORQROLNRRNOQOMQSQNORSPRRSTTRQRRQRTUTRTTRQSUTRTTUVUSST€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€")'$+7<:;<<;;;<;<<=>>??=>@?=>@?AABDFFFEFEEEDDFE?BCBBDDCECCCCBBCDCBBDDCBDEFDAEHFFFBCEACHG<<>;::>:852-9^hPHQPLLHKLMPOKKLKJIJIIIIIIHIKKKMOMNNLLLIFIJKMNKJLNPRUY\]^b_^^]^^[]]^^beebddccdeeba`\UPNQTPTX\^\\__`cedaabc`aba`acee_\``\]bbba``bdekmK!*-.-*$ (24/681)**(+(&&)*)'#&')+)'')'%'**((+347:>EJI9&"$'**+(**)-44.0-0?R[XPPPLHJPQNIC:1-+(%.8;83)(57/&1DF?=CLLFGHGFHJLWQ9'!.QUKMNKIMNLLMKKNPMMQSQPQRPPQLLMNPONPNNOOOOOPPLMOOPPMNRPMNPONRQOPVUQSTTRPRSQPTWY[]^^`a`cghgjokomq„…Œn/#''##)-(!  !%)*0:BJLEEJJEGHGEGIHDFJJGGIIEHHHGHJIEHEGJJIIHIIJJJIJILIIKJIJKIIIJKJIIJJIILLIIDLNHFKLHLKIHJKIIGFIH@<:62;ABEB=EC6E_TFQUNPRQOORPPSSOMOOKPQQOOQRPTSSRQQRRSTSRSVTQRTUSSSSQVTRRSTUU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!&*($+;::;<<<;:==<<=>??:<>>:DA4EbWFOSOOTPLQPMNQPPSSPNMQRPMOSVURRSRSSONSUQORSQSQQTVUSSRTUTSSUV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ !$$$%$'0<:;=>:9<=>>=>@?=????ACB?A@@CDCDEGB@DECBDDCCDEEDC@ACCDDCC@BCCCDDDDDDDDDEEEEBBA?DGE;;<:;<>;;65-;:629>=@A?DB7IbUDJOOMPQMNPMNPQPOPQPOPQOOPQQNORSSUVSTSRQRSTTRRTUTRSTTTTTTUUV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€! !"#"! #!#&''()6=?;:99<>>>>>>>=?ABA?@??BCBACEDBEDCCAABECCBBBBBCABBCBBAABA@BDDCBCEFDBBEHADB@@CHI@9:;:;<;<=2/1:MRLKKKLKLNKLMMMMLKHJJHHJJIMKJLLJKOLMPOJILLJJLMLJIJMMMNPUY]^_adb\WUYZ\^bhkkffdbcffdfa[WUSRSQSV[_bba]difba`^_^_bcabfcbcedc`_b`^`bda^bbkf9'.*&())((,133>A3)+*$&/1)&,20..014421220.,-.0/10-374,*+--*)(,(%+59AO_RJT^VMPQOOOKKJGNHEIIEEIECA@@?:32,*#-DC>=AMJIIIGFGJMUQ/+JPMMKNOKKNMJMMMPQPPPPQRRQPQPPQQOONLLLMNMNOPOPMJLOONKLNNLNQPRRQSSQRWWWXXWWWXYYXWZ^__`aacdghjmmksƒs2$"""!%*'#'%*24:DIHHHGGGFEFHHGGHHGHHGGHFEFFGHHFGJKHHHHJHFGGHHIIIIJJIGFJGFHJIIIHHIJIHFEHIJJJJKLJJJIHIHDGGIG>973/:@=@C@A>8J_TFMPQOMMPPONNOPRRQONORSQPRRQSQQQRTUSPRTUTSRSRRRRRQRTUUTSSSRR€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€! !%&$$&&#%'$$'&1:<:;;:=@=;=??=;>>==?BCB@@??ADCA?FGB@CD@BBCA@@ABDB@??ACD@@BCCBCEABCDDCCBDEA@@BGHB;;:8::8:=3/37AKOPMMMJGIONLKKJKLGIJJJJIIKLMLJKLKLLMLJKMJIJLNMKJJLKKLOSWY_`ced_[YUZ]]aghddddcefecgd^VRRRRQSW[_aa`]^bffb``b^^bdabfg``db_``b`_]`cdbc`hjC&-+)**(%$*/654HaTFNRNROMQOMRROMNPOPQOQSRRRRQPRUSPQTTTSRRTTTSQQQSTSQQQSUTQPSU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!$&()*))(%$&&.8;:<<:<==>?=;;=@><>@BBBCDCBAABCCACCDBAAADCAAABBBEDCCCCA@FDBCDEEEHE@AA@EHD<;:9<=;<<9426?KLLLMMKIILLLMLLKKHGHHGFGHIHJNKJKLNNLKOPMLJIJLNNLLKLKLMQUX_`aba_\[X[\[^efcbdefdddddc]UORUTSQT\_]]`]^afhfca]`a__bca`bde`_`a``_^_ceed`hoM #++')*)$#'+787?A1&()+65)$+5.-,+((**,*''(+-/12202@D:1/-/32/,)**'0Na\VTTTPOOPLGHIFJLIGIIHGGFF@EDFJFCLLFB6*!0@B?=BLKIIIGFGJMWS0(GOLJLONJJLNLMKLPRPOOPQSRPOPMNOOMMNKNKILOLMTOPQQMOQOQPNMMPPOPOPQQTUTSTVVWWXXZ\]]\\\]ccbcdgjlijhny|‹w3'-"!#'& #)+/5@IIHGGGEEGIIIIIFGJGFFFIKIGIIHIIIIHFGHIHHHGIKJHHIIHGHIJHKKHGHIHJGFGIJIIEFGHHIJLIGGIGGGGEGKI?9855:>?@@>D@3HbSEPSPSNMTRLOQONOOPPQOOPRQPPRPQSSQSTTUSQRTUTRUSQRSSSTSTTTSSTU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!$$"((''++(')8=:;==<<<>>=:;==?A@??>=BABCBAACB@@CECAABBCCCBBACBAABCBBABB@CFFC@BEFEDCCGC?AA@GLF=<;:==;<98428@IPNLKLJHIMKJHHIKLJJKLKHHIKHKNKIJJOPONNLKMMKJLNONNMMMKKMU[\_bcc`\YYZ[]`dfeeeeeddfhg_ZVRQRQSPS[_[[^Y_efc```___accbbdb``cb_]`^_accccffluY#!*,''*(##')1327B;)'*-0/''26((*)(()))))),.12/0203<>3464221/10((*'4KY]_^SJIKHLOLEDILJIHFDEFGGFIFFKFBIIJNIB;0-6@B=:BLLIIIGFGJMXT1&FPLJLNLKJKNLMKKOQOMRPPPQPPQONPQONOOKKLMOKKPNNQOMNQPOQQNNPRSQOQQQRTRTUUUTTTTXZ]^\\]^`abcefhhikkq|}€Œv4'* ! "!"%$ %--4@HHGHHEFHIGFGKJFGHFEGFFIHEGGFFFGHIIGGGHGEDGJIFGHIHHHHHHJJIIKIDHGGGJKJGIIIIIIIIIKJHIIIIJIKG=7863;A>AB?A>7K_QFOQRRPORQMMNOPOOOPPOPPNOOQRRRSQOQUUSSSSSSSTSRSUVUSRTTSTUUUU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€$'%#&'&(-59:=>=<=<;<>><;<>>??@?=?BCB@ABB@BA?@CCABBAAAABBBA@@BCCB@CC@@DDBEEDEEFFFDD@??BJOG=<=;<:8;:7117@KNNKKNLIJMMMMLKJJIIJLKHGHKKLMKLLJLJLOLLNONLKKKKKLKNOMJMSY[_bdca]YXWZ`deeghfeghhhhd`]YQQROPQTY\\\\`\]dfb^_b\[beb_`eecbb_^ba`aba``beikt`,),(()'#%)*+372:D5$(+**(+3/'(+.-.-++-.0//..)(*.3>?4362---,0.)'))*;RV^^VPNHAGMKEDIKKGBBHIDBEJGFHHHGFHHJD@A>?:?B<9AKLIIIGFGJMWT1%EQLLLJJMMKKKMMLNONMRPNOPPPQNMOQOMNNLQPKJMONONOONOPPLNNNOQQRQQQTURPTTUUUUTTUWWXZZ[\^_`abcefgills}}€t4& )2(!%"!))5CIEEHIGGGGGFFFHFFFEEFFHIIHHHGHEFFGGGFFIHGGIIHEHJIGHLKIJJIIHHHGKKJIIKJHIHHIIJJIJLJHHHFHBCGF<7625;@?AA>B>;K]SEKSQRRQNMOPNPPNMOPQNRQMLRSRQSTSOPSSRSSSRSSSTRRSSSSTSTTTSSTU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€! ")168449<<;9:<>=<<==?><;>A@==@A??@BA@@BCB@?@@ABA@@BCCBBBCCA?CCA?BED@CBBDGGD@CDA?>AJQF::=<;::9>:206ALNMLMNLIHLLLKKKLLKIHGGGILJHHLOMMOQNNOMNNLOMKLLKJJILONLMQUY[^^^_]Z\YY^bceifdehjhgg`aaYOQVTOSVX[^^[]]_ba^]^_abaabba_bc``a`_baa_^^adfjgne5',)(+*'),)*5;45EB+&+0,'+1/))-00121-/2210/0,/8AGLC0-.)(02+((()-+$/GYRMTYPGJIEEFEDGIGBADC?BIJIHFFGFCDDE?<@>?=?C>9AHIIIIGFGJMUQ0%ERMLLIINMJKLMLKNOOOONPQRPONOPRROMNNKLOONNNMLNPPQONNQRPNQQONQQQRTRSXSTUVVVWWVWY[\\]`cccdegijfihmwx}‹s1&49/ ! &5FIEFHGDGIHFEFGGHHFGHGGHGHIHGHHHIGFHHIKIHHHGFGHKIGFGGHHHIJHGHJKKJIGGGHHKKJIHGFFJIHHGDEGGFHF>::78:?@B?=D?9J_SCKTQPOOOOPPNOPOOOPOOQQNNQSRQQRSSTSOQQSTTSRQQQRTTRRSUTSSSTSS€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ !"$# #&(19995588:<9:<=<<=>=<====?@@A?@AA??ABBBBA@??>ACCA@BDECBBCCCAABAABEFDBCDEEDDCDB?A@?JTJ<9=<:9;49844;GKQKKMJIJFLLMMLKJIGHHIIIJJKKHIMNKLKNMKMNMNRNMOQPNLKLLLKLPTX[]]^`_]][\`bbbdeddfedfijd`YOOSSPSVY\__\\]__``^]_^^`bbceebbb`abaa`^]_cdbjngmj?'-,*,+'+.**1:=9AG8+,1,(045/021//234553/-./35;@CIA/,20+-+')((),,,29FGEINJIURBAHCDJDKGAEFBEDEG@>B@@=>D@;AFFIIIGFGJMRO.%ERNIMKJLKIMNNJIMQQQQPPPONPRPRTRNNNMLIKMKKLJMONKKJKPPSQNOPPROPSTRSVVTUVVVVVWTVZ]\Z\`abcefghhmnlpy{‚’s/(162+  %5EIGIICCFHHGIIIDHGFHHFFGEGIGFGGGHFEHIHJEGJLJGGJGIIGFGHGFJKHILJEIIIIIGHIHJKJIIJKHHIHEGIGGFHG?;:65>>>>>>>????????@@@@@@@@BBBBBBBB>??@ABBC??@ABBCCAABCDDEEEA@@@BMXL=:=;=?:<74535CRMMLLKJIIMMLKKJIIJIGFFGIJKLLLLMMMNNNNNNNNNNNNNNNNNMLKLNQRVX[^___^YZ\^aceedfhjjhfdcdc\TOORQTW[\\[Z\^accb`^bbbbbbbbcccccccc__abdfggdiejpG#/0.+*+++*,)8A9FIOWHEA><=?@B@=;>BB>GGDBCBBDCHE;9@EC=;AB=@JKJHHHGGIMQS3%ESIONLKKLNOJMOOOOQTTQQSRNNPOOOOOOOOLMOPPOMLNNNOOPPPRQPOOPQRTSRQRTWXUUUUUUUUQRUX[^_`bbcdfghhmhjry|„Žt0!+53)"$)'(CKGFEFEFFGGHGFEEFHIIHFEGGGGGGGGGFGFGFGFHHHHHHHHHIJJJJIHFHIKKIHFJJIIIIJJIIJJKLMMLKJHFECCECHG=9:60C>7K\MDOTNOOOOPPPNPQPNNOPPPPPPPPPQQQQQQQQQQQQQQQQRRRRRRRRRSTUUUTT€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ !!489:;;:<::::::::<<<<<<<<>>>>>>>>????????AAAAAAAABBBBBBBB??@AABCCAABBCDDEAAABCDDDEB@@AEOYI;:A>;:5:96206CNNNMLLKJJNMMLKKJJJJIIIIJJLLMMMNNNOOOOOOOOOOOOOOOOONLLMOQRUWZ]_`__]^^_`abbcdfhhgdcffc\TQSVQSW[]^]]Z\_abb`_ccccccccaaaaaaaaaabcdffgjh_esV  -1.,+++)(0138;;@K?.,58CIA)(+12/.0,,,--/013CG>?MSM2)*2128:9525=====;:;>DHH><@B>BHGGHJJGFJPZS0$CQIMLJJIKLMJKLMMNPQSQQQPONOOOOOOOOOJLMNNMLJNNOOOPPPQQPPPPQQRQPOPRTVTTTTTTTTRSUX[\^^bbcefghimhhpv{„p0 !'-133390$#@E>9P]IESSNNNOOOPPPOMNPPONOOOOOOOOQQPQPQQQRRRRRRRRSSRSRSRSPQRTUUUU€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€!"0478:867::::::::<<<<<<<<>>>>>>>>@@@@@@@@@@@@@@@@AAAAAAAA???@AABBAABBCDDDBCCCDEEEBA?@AEOXQ>8;97<>8<920:EKLLLKKJJILLLKJJIIHHIIIIHHKKKKLLLMMMMMMMMMMMMMMMMMMLLKLMOPVWZ]_`a`]^_`cdefdeghhgedfeaZSQTWOQUX[\]\Z\^abcbadddddddd________eeffggggabafp[*3:8520,(&/316?:9FC/*-2BK?/,+./-/3421/.---3BE=BVa\B55=;7:>5/+/;BC@E9,%&+0388:;>@BCB>>AB=97=EE=88:=<778614<@??>?A=4OcNBOPOOOOPPPPQONOPQPNPPPPPPPPQQQQQQQQPPPPPPPPTTTTTTTTPPRTUUVV€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€  " #"/469::7899999999<<<<<<<>>>>>>>AAAAAAAAAAAAAAAA@@@AAABBAABBCCCDCCCDDEEEC@<:=EQZJ>9<:9;<9;7./;IOLLLLKKJJLLLKKJJJHIKLLKIHKKKLLLMMMMMMMMMMMMMMMMMMMLKLLMNOWXZ\]_^_\\]_acdeffghhhffgea[TPQRRTVYZ\[\[\^`aa``aaaaaaaabcbcbcbchhhhghggbadhm]2 4CA>:60+(-/29?=:@G<.*6HG8,**+,*+-+++,,./0>C@=GWXL>55;704:;7348;:863-%"$''++*,.379>=?CDC@?>B@:8987438EKJDBDBBA=@FIDGJJECHNRK;(%#! "-AOPKKJJIJJKNMKMOQPNPSRPOQQNPPPPPPPPMMNOONMMLLLLLLLLKLNOONLKPONOOPQRQQQQQQQQWXYZ[[[[\^`dgjlnlijpv{ƒ‹t0#%.5=@>A6'-DGDHJFGIFGHGDEFIEGHJIIFEGGGGGGGGFFFFFFFFGGGGGGGGGGHHHHGGGHIJIIHGIHGGGGHIGHHHHIIJKJIHFDCCEEHG@;836<>>ABAB<5K_RIPROOOPOPPPNPQPONOQPPPPPPPPQQPQPQQQRRRRRRRRSSRSRSRSPPRSSTTS€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€                 -    €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€        €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€       €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€           €€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒ€€~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚€~~€€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€~€€~~~~~~~~~~~~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€€€€€€€€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€€€~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~~~‚‚‚€€€€€€€€€€€~‚€€€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€}€‚€‚„ƒ‚~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚ƒ‚~€€€€€€€€€€~‚~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~~~~~~~~~~~€€€€€€€€~~~~~€€€€€€€€}~}}€€€€€€~~~~~~~~~~€€€€€€€€~~~~~~€€€€€€€€€~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€~~€€€€€€€€€€€~~~~~~~~€€€€€€€€€€€~~€‚€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚ƒ‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€‚‚‚€‚€€ƒ‚€€ƒƒƒ€€€€‚‚~~}‚ƒ‚ƒƒ‚‚‚€ƒ‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€zzzzzzzz{{{{{{{{yyyyyyyyxwxwxwxwwwwwwwwwuuuuuuuuxxxxxxxxvvvvvvvvxwxwxwxwvvvvvvvv{}}zyyyyyyyyyyyyyyyyyyzz{z{z{zzz{z{z{zxxxxxxxxvuuuttssqqrstuuvttttttttyyyyyyyyxy|}}|yxvvvvvvvv€€€€€€€€€€€€€€€€€€€€€€€€‚€~~yyzyzyyyzz{{{{zzyxyxyxyx}}}}}}}}{{{zyxxwvvvvvvvv|}~€‚ƒƒƒƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„†…†…†…††ƒƒƒƒƒƒƒƒ……„ƒ‚€ƒƒƒƒƒƒƒƒ‚‚‚‚‚€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚„„„„„„„„‡‡ˆ‡ˆ‡ˆ‡ŠŠŠŠŠŠŠŠ‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚††††††††ˆˆˆˆˆˆˆˆŠŠŠŠŠŠŠŠ……………………„„„„„„„„ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€‚ƒƒƒ‡‹‹ŠŽ‰……‰Œ‹ŽŽˆ‹ŒŠ…‚„†‡ŒŒ‹Ž‘‘‘‘‘Ž……„ƒ‚€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€wwwwwwwwxxxxxxxxyyyyyyyyxxxxxxxxxxxxxxxxwwwwwwww||||||||{{{{{{{{xxxxxxxxwwwwwwwwy|~}zwuuxxxxxxxxxxxxxxxxwwwwwwwwwwwwwwwwyyyyyyyy||{{zyyyrsstuvwwuuuuuuuuqqqqqqqqsvy{{yvswwwwwwww€€€€€€€€€€€€€€€€€€€€€€€€‚‚€~~zzzzzzzz}}~~}}}}}}}}}}}}}}}}}}}|{zyxwwuuuuuuuu{|}‚„„„ƒƒƒ‚‚‚‚‚‚‚‚‚‚„„„„„„„„††††††††ƒƒƒƒƒƒƒƒ††††††††„„„„„„„„‡†…„ƒ‚€„„„„„„„„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€ƒƒƒƒƒƒƒƒƒƒ‚ƒ‚ƒ‚ƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚„„„„„„„„‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚„„„„„„„„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆ‰‰‰‰‰‰‰‰„„„„„„„„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€‚ƒƒ„„‡‹Ž‹‹Ž“”Œƒ€‚„~………††‡‡‡†‹‰‚…‡ŽŒ“’““““’‘‡†…„ƒ‚€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€zvtuvvuuvwxxxwwvtuvwxxxxvvvvvvvvyyyyyyyyzzyyyyzzyz{||||{{{{{{{{{zz{|}}}}~}|{zzzzy|~~{xvvuwxxwvvwwxxxxxwwwxxxxxxwvvvvvwwwvwwvvvxyyxwvwxyz{{{zzyyy{{{{{{{{yxvvvutrux{|{xvuvwwvttuv|€€€€‚‚‚‚‚€€€€~}{zyyzzyxy}~‚€€€€€~||{zywvuuvwwvuuvyz|‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ†…„ƒƒ„……ƒƒ„„„„ƒƒ…„„ƒƒƒƒƒ…„ƒƒƒƒ„…ƒƒ‚‚ƒ„…†‡‡…ƒ‚…†‡‡ˆˆ‡‡††……„ƒƒ‚‚ƒ„…„ƒ‚ƒ€€€€€€€€€€€€€€€€€ƒ†…ƒ‚ƒƒƒƒƒƒƒƒƒ‚€‚ƒƒ‚‚‚‚ƒƒ„„………‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒ‚‚‚‚‚€‚‚~ƒ‚‚ƒ‚€€‚ƒƒ†„‚‚ƒ„……ƒ‚‚‚‚ƒ„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚ƒ„„ƒ‚‚‚„ƒ‚ƒƒ„ƒ‚„„ƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒ„…†‡ˆ‰‰………„ƒƒ‚‚…ƒƒ…††ƒ‚€‚‚‚‚‚‚‚‚‚‚‚ƒ„…†Œ‹ˆ…ƒ~‚~~ƒ‚€‚„‚~}€…‰ŒŒ‹‘“”““““””‰ˆ…ƒ‚‚„„„ƒƒƒ‚‚„„„ƒƒ‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚€‚„„ƒ‚‚‚‚ƒ‚‚€€‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€}zxy{{{{xxyyyyxxxxwwwwvwwwwwwwwwwwwwwwwwvvvvvwxxwwxxxxxxxxxxxxxxwxy{|}~{{{zzz{{{}~~{zz{{|||zyyyxxxyxxxwwxxxxxxwwwwvvvvvvvvuuuvwwwwwvvuuwwwwwwwwyyyyyyyyzyxyz{zy{}~~}{{{yyxxwxxx{€€€€€€€€€~}|{yzzzyxy}~~€€~~~~~}~€{{{zyxvvtuvwvvvwyz|‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚„ƒ‚‚‚‚ƒ„‚‚‚ƒƒ‚‚‚ƒƒ‚ƒƒ„„„„„ƒƒƒƒ„„ƒƒ‚‚‚ƒ…†‡‡†„€ƒ……††††††……„„ƒƒ‚‚‚ƒ„„„ƒƒƒ€€€€€€€€€€€€€€€€€„‡†ƒ€€ƒƒ‚‚‚‚ƒ„††„„„„ƒƒƒ‚‚ƒƒƒ„ƒƒƒƒƒƒƒƒ‚„„„ƒƒ‚‚‚ƒƒƒƒ€ƒ„‚€€€‚‚ƒƒƒ‚‚‚„……„‚‚‚€€‚ƒ…††„‚€€‚ƒ„ƒƒƒƒ„ƒƒƒ„„„ƒ‚‚ƒƒƒƒƒƒ‚‚‚ƒƒƒƒƒ~~€‚‚‚‚€€ƒ‚‚€€€€‚ƒ„‚‚€€‚‚‚‚‚‚‚‚ƒ‚‚‚ƒ„…‡ŒŽ’‰„ƒ€€€€‚ƒƒ‚‚„‚}{}€ŠŒ‹‘“”““““•”‰ˆ…ƒ‚‚„„ƒƒƒƒ‚‚„„ƒƒƒ‚‚‚‚‚‚‚‚€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ„ƒ‚‚‚‚‚€€€€€‚€€€€€€€€€€€€€€€€|yy{~€‚€~}|{zxvuuuvwwwwwwwwwwwwwwwwuuuuvwxxvvwwwvvuwwwwwwwwwwwxyyzzyyxxxxxy{}}|zxyz{{{|}}||zz{{|{{zz{{{{{{zzzyxxvvuvwwvuuvwvvwvwvvvuuvvwwxxvvvvvvvvwvvvxxxw||}|{z{||zxx{{{z{€‚€€€€€€~}}{zyzyxwx|~}}€€€€€~~~‚€{{zzyxwvtuvvwvwwyz|‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚ƒƒ‚‚ƒƒ‚‚‚‚‚‚ƒƒƒƒ‚ƒƒƒƒƒƒ‚ƒ‚‚‚ƒ„…†‡‡„€„„„………„„ƒ„„ƒƒƒ‚‚‚‚„„„„ƒƒ€€€€€€€€€€€€€€€€€ˆˆz{~~€‚‚ˆ‰‹‹Š‰‰Š‰ˆˆ‡‡†††„„„„…„„„„„………„ƒƒ„„„ƒƒ€‚ƒ„„„ƒ‚‚ƒ„‚‚€‚‚ƒƒƒ‚‚ƒ‚€€‚‚‚‚€€€€€€€€€€‚‚‚‚ƒƒƒƒ‚‚ƒƒƒ‚‚‚ƒ„„„ƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒ„„„„…„„ƒ‚€„„ƒƒ‚€‚€‚‚€‚€‚‚‚‚‚„ƒƒ‚‚ƒ„…ˆŽŽŽ“†€€~€€€{y{ŠŽŒŽ‘“–•”“““’‰ˆ…ƒ‚‚„ƒƒƒƒ‚‚‚ƒƒ‚‚‚€‚‚‚‚€€€‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ„ƒ‚‚‚€€€€‚€€€€€€€€€€€€€€€€|zy{}~~‚‚‚‚‚‚}{wuttuvuuuuuuuuvvvvvvvvuuuuuvvwvwxyxwutvvvwvwwwxxwwwwxxzyxwwwww}~~}ywvwxxxy{||z{||}|}|||}}}}}}|||{{zyxwyyyywwwxyxutrrrsuvvvvwwwwwwwwwwwyxvwwwutxz{{yxxyzyxyz{{{{ƒ€€€€€€~}}|zzzzxvx|}}|~~}~~~€€€€€€~zzzzyxxwtuvwvwwwyz|‚ƒƒƒƒƒƒ„„„„ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒ„„„‚ƒƒ„„ƒƒ‚ƒƒƒƒ‚‚‚ƒƒƒƒ‚‚‚‚ƒ„…†‡ˆ…„„„„„„ƒƒ‚ƒƒƒƒƒƒƒƒ‚ƒ„„„„„€€€€€€€€€€€€€€€€…‡„}~€€€ˆŠ‹‹Š‰‰ŠŠŠŠ‹‹ŒŒ‹‹‹‹‹‹‹‹ˆˆˆˆ‡†…„†…„ƒ‚ƒƒƒ‚‚‚‚‚‚‚ƒ‚‚‚ƒƒƒƒƒƒ„„„ƒƒƒ„„ƒƒ€‚€€‚„‚€~€€€€€€€€€€‚‚‚ƒ‚‚‚‚€‚†‹Ž‘‘Ž‹‰ˆ‡†……„ƒ‚€€„„„„ƒ„ƒƒ††…„„ƒƒ‚€‚‚€€€‚‚‚‚ƒƒ‚‚‚‚‚‚‚ƒ„…‰Œ•‘†ƒ„……‚‚€€|y{„‹ŽŽŒŽ’””“’’’”’Ž‰ˆ…ƒ‚‚ƒƒƒƒ‚‚‚‚‚€€€‚‚‚‚„„ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚€‚ƒ„„‚€ƒƒƒƒ€€€€€€€€€€€€€€€€€€|yxyyyyz{{{{{{||{ywutuwxvuvuvuvvssssssssuutssssswxz{{ywuwwwwxxxxxxxxxyz{zzyxxxxx}|xvvyxxxzzxwxyzzzzzzz{{{{{{z{{{{{zyxyzzzyxxyyxwvvvxytttttsssttttttttwvuuvvusvy||{ywvvwxwwwyz{€‚ƒ€€€€€‚€€~}}{zyzyxwx{~||}~}{{{{||}}~~~}zzzzyxxwvvvvwvvuyz|‚ƒƒƒƒƒ„„………„ƒ„ƒƒ‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚‚ƒƒƒƒ‚‚‚‚ƒ‚ƒ€‚ƒƒƒƒ‚‚‚‚ƒ„…†ˆˆ…‚…„„……„„ƒ‚„„„ƒƒƒƒƒ‚‚ƒ„„„„ƒ€€€€€€€€€€€€€€€€‚ƒ‚‚‚‚‚€€‚„„ƒ‚‚ƒ„„……††‡‡‰‰‰‰‰‰‰‰ŒŒŒŒŒŒ‹Š‡…„‚ƒ„†‡†…„‚‚‚‚ƒƒ…†ƒ‚€ƒƒƒ‚ƒƒƒ„„ƒ‚ƒƒƒ‚‚ƒ‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ…‡ˆˆ‡††‡ˆ‰‹ŽŒ‹Š‰ˆˆ‡‡‡††…†……„„ƒƒ‚„ƒƒƒ„„ƒ€‚€‚‚€‚‚‚‚‚‚‚ƒƒ‚ƒ„…‰ŒŽ”’ˆ„‡‰‹‹ˆˆˆˆˆ‡††††…ƒ€~€„ˆŒŽŒŽ’““’’’“””‰ˆ…ƒ‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ„„ƒƒƒƒƒƒƒ‚ƒ‚ƒ‚ƒ‚‚‚‚‚‚‚‚‚‚‚€€‚‚„ƒ‚€‚ƒƒ‚€‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€zxwwwwwyxxwwwwwwxxvvuwxyxxxxxxxxwwwwwwwwxxvvtutuz{}}{yzzz{{|||yxwvvwxxwwwwwwxx{}~{xvvxxxyyyyxwxxyyyyyxxyyyyxxxyyzyyxwwxyyxwwxxxwwvuttwwvvuutttttttttttsstvwwvtw{|zxutuwwwuuwy{€€‚€€‚€€~}{zyyz{yxx|~}{}}|{|{|||||{z{|~~~}{{zzyxwvvvvvvwuuyz|‚ƒƒƒƒƒ„……††„„ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚€€‚‚‚‚€‚ƒƒƒ‚‚‚‚‚ƒƒ‚‚‚ƒ‚‚‚ƒ„…‡ˆˆ…€‚„„„„„„„ƒƒ„„„„„„„„‚‚ƒ„„„„ƒ€€€€€€€€€€€€€€€€€ƒ„„‚ƒ‚„„„„„ƒƒƒ€‚€€€€‚€€‚ƒ„…††††„ƒ€€‚„……„ƒ‚‚‚ƒƒ…ˆ‰ˆ†‡‡†„„„ƒƒ„„„†„‚‚ƒƒƒ…„ƒƒ‚‚‚‚‚‚‚‚€ƒƒƒƒƒƒƒƒƒƒƒƒ‚ƒ„ƒƒƒ„„„ƒ‚‚ƒ„„ƒ‚ƒƒƒƒƒƒƒƒ‡‡‡‡ˆˆˆˆ‹Š‰ˆ†…„„†……„ƒƒ‚‚…„‚ƒ„†„ƒ‚€‚€€‚‚€€‚‚ƒƒ„‚ƒ„…ˆŽŒŒŒ‘Ž†ƒ†‡‰‰‡‰ŠŠŠˆˆˆ‰ŠŠ‰‡…„…‡ŒŽŒŽ‘“•““““•“‰ˆ…ƒ‚‚ƒƒ‚‚ƒƒƒƒƒƒƒƒ‚ƒƒƒƒƒƒ„‚‚‚‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚‚‚€€‚ƒƒƒ€‚€€€‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€{yxxwvvwxxwwwxyywwwwwwwwyyyyyyyyzzzzzzzzzzyxxxxx{|}}~}}|||||}}~~{ywvuuuvvvvvwwxx{|}|yvuuvvwxxwxxwwxyyyyywxxxxxxwvwxxywvuuvxxwwwwwwwxxxxxxxxxxxxx{{{{{{{{xwwxz{{{vx{{zxwvxxxwwvwx{€€€€€ƒ‚€€€‚€€~}zyxy{{{zy}}|}~|~~~~~~~}{{{|~~}{{{zyxvvvuuvwwvuyz|‚ƒƒƒƒƒ„…†††„„„ƒƒ‚‚‚‚‚‚‚‚‚‚‚~€‚‚€‚‚€€‚ƒ„ƒƒ‚ƒƒ‚‚‚‚ƒƒƒƒ‚‚‚ƒ…†ˆˆ‡„‚‚„‚ƒƒ„„„ƒƒ„„„„„ƒƒƒƒƒƒ„„„ƒ‚€€€€€€€€€€€€€€€€ƒ„„ƒ„„ƒ„„„„ƒƒ‚‚ƒ„„ƒ‚‚ƒ‚‚‚€€€‚‚‚‚‚‚‚‚€€€€€€€~~€€€‚…‡†‡ˆ‰ˆ†††……††††…„…‡‰ŠŠ‡…ƒ„†‡†……„ƒ‚‚‚ƒ„ƒƒƒƒƒƒƒƒƒƒ„ƒ‚‚„……„„„……„„‚‚ƒƒ„„„„ƒƒƒƒƒ‚‚‚€‚ƒƒ…„„‚‚€€†……„„ƒƒ‚ƒ‚‚ƒ„„‚‚€€€‚‚€€‚‚‚ƒ‚ƒ„…‡ŽŠ‰‹„ƒ…„ƒ…„„†‡†„„…†ˆ‰ˆ‡†„„„ŒŽŒŽ‘“’‘‘‘’”“‰ˆ…ƒ‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚€€€‚ƒ‚‚€€€‚‚€€€‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€xwvwwwxzyywvvvvvwwxxxwvuxxxxxxxxwwwwwwwwyyxxxxyyyxxxxyzzyyyzz{{{|{zyxy{|{zzyyyyy|xutuuvwvtsssuvvwwxxxwwwxxwwwwxxyywvutvwxwwwwxwuuuvwxvvvwwxxxxxxxxxxxxwvwxxwv}~}||}|zxxyyxw|€€€€„ƒ€€€‚€€~}yxxy{|{{z}}|~~}~~~}}||~~€~}||{zywvuuttvwxwvyz|‚ƒƒƒƒƒ„…††‡„„„ƒƒ‚‚‚€‚ƒƒ‚‚‚‚‚ƒƒ‚‚‚‚ƒƒ„„ƒ€„ƒ‚‚‚‚ƒ„ƒƒ‚‚ƒ„…†ˆˆ†„ƒ‚‚ƒ‚‚ƒƒƒƒƒ„„„„ƒƒ‚‚ƒƒƒ„„„ƒ€€€€€€€€€€€€€€€€†ŠŠ†€~€‚€‚‚ƒƒƒ„„ƒ‚‚ƒ‚‚ƒƒ„„‚‚‚‚‚‚‚‚‚‚ƒƒ‚€€‚ƒ„ƒ‚€€ƒ„ƒ}|€€€‚‚‚‚„ƒ„…†‡‡††„ƒ…‰ŒŒŠŠ‰ˆ‡‡‡ˆ‰††††††††ƒƒ„ƒƒ„…‡…„ƒ„„…„ƒ„„ƒƒƒƒ„„ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚‚€€€~‚…ƒ‚€‚‚€€‚‚‚‚‚ƒ„…‡ŽŠ‡ŠŽˆˆ‰…„……‚„ƒ‚„„„ƒ‚‚‚ƒ„ŒŽŒŽ’’‘‹‰ˆ…ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚€€ƒƒ‚€‚‚‚‚ƒƒ‚€€€€€€€€€€€€€€€€€€€xnnx}}€~}}}}zywuuuuvxwwwwvvvvvvvvvvvvvvvvvvvywuuvwxxwxxxxwwvxxxyyzzz|||{{{{{€€}zzz~}zxwwxy|{zyxwvvuuuuuuuuwwvvvuuuuuvvwwxxuuttuwz{vvvvvvvvyyxxwwwvvwwuuwvux{~~{xxxxxwwxz|~|€€‚‚€€€€€ƒƒ‚‚€~}{zxvvwxzxxyzz{||{{|}}~€~~}}zzyyxxwwzzzzzzzz{|}~€‚ƒƒ„„„„………†„„„„ƒƒ‚‚‚€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ„ƒƒ‚‚‚ƒƒƒ‚‚„ƒ‚„‡†‡†ƒ‚ƒƒƒƒƒ„„ƒƒƒ‚‚‚‚ƒƒƒƒ‚ƒ…††…„„€€€€€€€€€€€€€€€€ˆ””†|{~€€€€„…†ˆ‰ˆˆ‡††††††††………„ƒ‚‚‚‚‚‚‚„ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€‚ƒ„‚‚ƒ„†‡ˆˆ„…†‡ˆ‰Š‹…………………………………………………„„„„„„„„„„„„„‚‚ƒƒ„„„…€‚ƒƒ„ƒƒƒ„…„‚‚‚„ƒƒ‚€€‚‚‚‚ƒ„……Š‘ŽŒŒŽŽŒ‹‹Š‰‰‰ˆ‡††††…………„„„ŠŠ‹ŒŽ‹‹Š‰‡†……ˆ‡…ƒƒ‚‚ƒ„„ƒƒ‚€‚‚€€‚‚‚‚ƒƒ‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚‚‚€‚‚‚‚ƒ„‚‚‚‚€€‚‚„ƒ€‚‚‚‚€€‚‚€€€€€€€€€€€€€€€€€€€xpqy|}……„ƒƒƒƒƒ~|zwussstttsssrrttstststvvvvvvvvvuuvwwwvvvwxxxxwxxxyyyyyxxyyyzzz~|z{|}}{{{}~~~~~~~~~zzzzzzzzzzzzzzzzuuvvwwwxponnoprsssssssssyxxxwwwwwxxwwyywx{~~{yxyvvuuvxyz}ƒ‚‚‚€€€€€‚ƒ‚~}|zxvuuvwuvvwwxyywwxyyzz{yyyzzzzz||{{zzzz~~~~~~~~{|}€‚ƒ„„„………†††……††…„‚‚‚€€€‚‚‚‚‚‚ƒ‚‚ƒƒƒ‚‚‚‚‚‚ƒƒ‚„‡†‡†ƒ‚ƒƒƒƒƒ„„„ƒƒƒƒ‚‚‚ƒ„„‚‚ƒƒƒƒƒƒ€€€€€€€€€€€€€€€€„Œ‹‚}~~€€€€€€€…†ˆŠ‹‹‹‹ŒŒŒŒŒŒŒŒŠŠŠŠ‰‰‰‰‰‰ˆ‡†…„ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚€€€€€‚|}}~~€€~€‚‚„„„……†††††††††††Œ‰ˆ†…††††††††‚ƒƒƒ„„„„ƒƒ„„„ƒ‚‚ƒ„„ƒ………„„ƒƒƒ‚‚‚‚ƒ„…‡‹‘’‘‘‘ŽŽŽŽŽ‹‹Š‰ˆˆ‡‡………†††††ˆˆ‡‡†…„„‡†…ƒ‚‚‚ƒ„„ƒƒ‚‚‚€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚€€‚‚„ƒ€‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€}wwzzyzz~~~~~~|{zxwwwwwvvvvuuuttttttttttttttttuwy{zyxwvvwwwwvvwwwvwvvvvwwxyz{{}}~}{z{||{zyyyyzxxyyyyzz{{{{{{{{wxxyyz{{zzz{{{||vvuuuuvwrrrrrrrrtttttsssrttstuvtx{~}{xxxxwvuvwxy}€ƒ‚‚‚ƒƒ€€€€‚‚‚‚‚€~~}{zxvutuussttuuvvuuvvwxxxyzzz{|||~~~}}}}|||}‚ƒ„…………††‡‡……††…„‚‚‚‚‚‚‚‚‚‚€ƒƒƒ‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚‚ƒ‚ƒ††‡†ƒ‚‚ƒƒƒƒ„„„ƒƒƒ„ƒ‚‚‚„…‚‚‚‚‚ƒƒƒ€€€€€€€€€€€€€€€€„ƒ€‚ƒ‚‚€€€€‚ƒ„…‡‡‡‡ˆˆˆˆˆˆˆˆŒŒŒŒŒŒ……………………††…„„…††ƒƒƒƒƒƒƒƒƒ‚€€‚€€€‚ƒ‚‚‚‚‚‚‚‚‚‚ƒƒ‚„„„„…„……‡‡‡‡‡‡‡‡‡‡‡‡‡ˆˆˆˆˆˆ‡†…„ƒ……†…ƒ‚ƒƒƒƒƒ‚ƒ‚‚‚‚‚‚‚ƒ„…†‹‘‘’’’’’“’““’‘‘ŽŠŠ‰ˆ‡……„………„„ƒƒƒ…„…„…„…„†…„ƒ‚‚ƒƒ„„ƒ‚‚€‚‚€€‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚€‚‚„ƒ€‚‚‚‚ƒ‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€|xyzyxyxyyxxxyyyyyyyyyzz{{zzzyyyxxxxxxxxuuuuuuuuy|~~|{{||{{zywvvvuuttssuuvvwxxx{}~~|zyy}}|{zzyyzzzyxwwwxxxxxxxxuuvwxyzzxxxxxyyyzzyyxxxxyyyyyyyywwwwwwwwtvvuvxxwuxz{xutuwvttsuuv{~€‚‚€‚‚‚€~}}zywvuuuvuuuvvwwwxyyzzzz{}}}~~~~~~~~~~~}}}}}}}}|}}ƒƒ„…††††‡‡‡……„„ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚€‚„ƒƒ‚‚ƒ‚ƒ‚ƒƒƒ‚‚ƒ††‡†ƒ‚ƒƒƒ„„„„„ƒƒƒƒ‚ƒ‚ƒ„„„ƒƒƒ„…„„€€€€€€€€€€€€€€€€ƒ…‚ƒ„‚‚‚‚€€‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ„„…†††‡‡‡‡ˆˆˆ„„„…………†‡†„ƒƒ„†‡‰‰‰‰‰‰‰‰‡‡‡‡‡‡‡‡„ƒ€€‚„…€€€€‚„„„ƒ‚‚……„„ƒƒ‚‚„„ƒƒƒƒ‚‚‚ƒ‚ƒ‚ƒ‚ƒ€€€‚‚ƒ‚‚‚‚‚‚‚‚„„„„„„„„ˆˆˆ‡†…„ƒ…††…ƒƒƒ„ƒƒƒƒƒ„„„‚‚‚‚ƒ„…ˆ‹‘’‘Ž‹Šˆ‡…………„„ƒƒ‚‚††††…………ƒƒƒ„„…††……ƒƒ‚‚‚ƒ„„ƒƒ‚‚‚‚‚ƒ‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚ƒ‚‚‚‚„ƒ€‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€{xxzxxxwwwwwwwxxxxxxxwwvzzzyyyxxzzzzzzzz{z{z{z{{|}~|{{{€€€}|{xwwvvuuutttttttt{}€~zwvwwxyzz{{zzzyyxxxwwwwwwwwvvwxyz{{wwwvvvvvyyyyxxwwyyyyyyyyyyyzzzzzxz{y{|}|x{~}{xxxzywvvvxxz}€€€‚‚€€€€€€‚}}|xxwwwxxxyyzzzz{z||}|}}}}~~~}}}||}}}}~}~~|||{|{|||}~ƒ„………††‡‡‡‡†…„‚‚‚‚ƒ‚‚‚€€€€‚‚‚‚‚€ƒƒ„ƒ‚‚‚ƒƒƒƒƒƒ‚‚ƒ††‡†ƒ‚‚ƒƒƒƒ„„…„„ƒ‚‚„„„„„ƒ„„„…††…ƒ€€€€€€€€€€€€€€€€„ƒ€‚‚‚ƒ‚‚‚‚ƒ‚ƒƒƒ‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ€€‚‚ƒƒ„ƒƒ‚€€€‚‚‚ƒ‚€€‚ƒ‡‡‡‡‡‡‡‡‰‰‰‰‰‰‰‰„ƒ‚‚„‡‰ˆ‡‡†††‡ˆ†……„„‚‚‚……„ƒƒ‚…„„‚€€ƒƒƒƒƒƒƒƒ‚‚ƒƒ„„‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚‚€‚ƒƒ‚€€‚‚‚‚‚‚ƒ‚‚‚‚‚ƒ„…‹ŒŽŽ‹ŠŠ‰ˆ‡‡†„ƒ„ƒƒƒƒƒ„„„„…„…„ƒƒƒƒ„„„„ƒƒ„„††‡‡…„ƒ‚‚‚ƒƒ„„ƒ‚‚€‚ƒ‚‚ƒ‚ƒ‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚ƒ‚‚‚‚‚‚ƒ‚ƒ‚€‚‚„ƒ€‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€{vw{zyyxwwvvvvvwuuvwwwvvyyyxxxwwyyyyyyyy{{{{{{{{zzzyxxxx||}}}}||zzzzyyyyyyxxxxxx|~~zwvuvwxxyxwsttuvwxxttttttttuuvwwxxyxxwwwvvvxxyzzzyxwwwwwwwwxxxxxyyyxzzyy{{z{~€~{z{}}{zz{|}{~€‚‚‚€€€€‚€~}||xyyyzz{{}}}}}}}}}}}~}~}~~~~}||||||}}~~~}~}~}~}}}}~€ƒ„…………††††‡‡†…„ƒƒƒ„‚‚‚€€€€‚‚€€€‚‚‚‚‚‚‚ƒ€‚‚ƒ‚‚‚‚ƒ‚ƒ††‡†ƒ‚ƒƒƒ„„…„„„„‚ƒ„………ƒƒƒƒƒ…††„‚€€€€€€€€€€€€€€€€„Š‰ƒ€€‚…‚‚ƒƒƒ„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒ‚‚‚‚ƒƒƒƒƒ‚‚€€€€€€€€€€€€„ƒ‚‚ƒ…†ˆŠ‰ˆ‡‡‡ˆˆ‰‰‰ˆ‡‡††‹‹ŠŠ‰ˆˆˆ‡†……ƒƒ‚‚ƒ‚ƒ‚ƒ‚ƒƒƒƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚ƒƒƒƒ€€€€€€€€€€‚‚‚‚ƒ„…†‡ˆˆˆ‡††ƒƒƒ„ƒ„„„„„ƒ„ƒƒƒƒƒ„„……†††ƒƒƒ„„………ƒ„„……††‡„ƒ‚‚‚‚ƒ„„ƒƒ‚‚ƒ‚‚ƒ‚‚‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚€‚‚„ƒ€‚‚‚‚€€€€€€‚€€€€€€€€€€€€€€€€€€wpr{€}||{{{{{wxyzzzyyyyxxxwwwxxxwxxxxwwwwwwwwwwvwwxwwwwxyyyyxzzzz{{{{{{|||||}~€~|zzz||}~~|{zvvwxyz{{ttttttttvvvvvvvvuuuttsssrsuwwwvvxxxxxxxxwxxxyyyyxyyxxyzxxz~}{xxxzyxwxxz{|‚€€€€€€€€ƒ‚€~}||{{||||||~~~~~~~~}}}}}}}}~~~~~~}}}}~~}~€‚ƒ……„„………†††……†……„‚‚‚‚‚‚€€€‚‚€€€ƒ‚‚ƒƒ€€‚‚ƒƒ‚‚‚‚‚ƒƒ‚„‡†‡†ƒ‚ƒƒ„„„……„„„‚ƒ„„…„„ƒ„ƒ‚„…†„‚€€€€€€€€€€€€€€€€Ž”‘…~}€€€‚„…ƒƒƒƒƒƒƒƒ‚‚‚€€€€‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚€€€€€€€€ƒƒƒƒƒƒƒƒ‚€€…„„„„„„„‹‹‹ŠŠŠŠŠˆˆˆˆ‰‰‰‰‡‡ˆ‡ˆ‡ˆ‡ŽŒŠˆ…„ƒ‚‚‚‚‚‚‚‚„ƒƒƒ‚‚‚‚ƒƒ„„„„ƒƒ……†„ƒ‚ƒƒ„ƒƒƒ‚‚‚‚‚‚‚ƒ„…‚‚‚ƒ„„ƒ‚‚‚ƒƒƒƒƒƒ†††……„„„„„„„„„……‡††††………ƒƒƒƒƒƒ„„ƒ‚‚‚ƒƒ„„ƒ‚‚€‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚€€‚‚„ƒ€‚‚‚‚€€€€‚‚‚€€€€€€€€€€€€€€€€€€ypr}‚‚„„ƒ‚€~|zxvvvuuuttxxxxxxxxwwwwwwwwwvvwyzyxxxyyyxwwxxxyyzzzxxyyz{||€€~|zz}xy{}~~~}{{{{{{{{zzzzzzzz{{zzzzyyxxxwvvvupqtvwwvussssssssssssttuuvxxvvwwvy||yyyxwvvvwyz|€€€€€‚ƒƒ€~€~}||~~~~~}||}}}}}}}}}}}}}|||{|||}}~~~~~€€€~~~~~~~}~‚„……„„„…………†‚„…†…ƒ‚‚‚‚‚‚‚‚ƒ‚‚€€‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚ƒ‚ƒ„„ƒƒƒ‚‚„ƒ‚„‡†‡†ƒ‚ƒƒ„„„……„„„ƒƒƒ„„„„„…ƒ‚ƒ…†…„€€€€€€€€€€€€€€€€Š’{}€‚€€€€€€€ƒ…‡‰‰‰‰‰‰‰‰‰ŠŠ‰‰ˆ‡‡†ˆˆ‡††…„„„„„ƒƒƒƒ‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚€~ƒ‚€€€€€€€~~€‚ƒ„…‡‡‡‡‡‡‡‡‹‹‹‹ŠŠŠŠŠŠ‰Š‰ŠŠŠˆˆˆ‡‡‡††ƒƒ„„„ƒ‚‚„……„‚‚‚‚„ƒƒ‚€€‚‚‚‚ƒ„…ƒƒ„…†‡‡†‡††…„ƒƒ‚‚‚ƒ„…†‡ˆ‡‡‡‡†††…ˆ‡†…„ƒ‚‚‚‚‚‚ƒ‚‚ƒƒ„„ƒƒ‚‚ƒƒƒƒ‚‚‚ƒƒ‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚ƒƒƒƒ‚‚‚‚€‚‚„ƒ€‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€wxyz{|~~~~~~~~~~€€~~}wwwwwwwwwwvusrqqsssssssswwwwwwwwwwxxxyyyyyyyxxwwyyyzzz{{}~~}}{zyzzzzzzzzyyyxxwww{{{{{{{{{|}|zyz{||||||||xy{}~~~~ttttttttrrrqqqpptrqrtutsvwyzzywvtuvvvwy{|€€€€€€€€€€€€€‚ƒ‚‚‚‚‚}|{||}}~~~}}}}}}}}}}}}}}}}~~~~~~~~~~~~~~~~~~~~~}ƒ„‚ƒƒƒ„„„„†„„……‚‚‚‚ƒƒƒƒƒƒ‚ƒƒ‚‚ƒ‚‚‚‚‚ƒƒ‚‚ƒ‚ƒ„„ƒƒ„ƒ‚‚ƒ„†††„‚‚‚„„…†…„ƒ‚„„ƒ„………„‡„……ƒƒ…ƒ€€€€€€€€€€€€€€€€„„ƒƒ‚‚€€€€€€€€……†††‡‡‡ŠŠŠŠŠŠŠŠŒŒŒŒŒŒŒŒ……………………………„„ƒƒƒ‚‚‚‚‚‚‚‚ƒƒ‚‚ƒƒ„„ƒ‚‚€„„ƒ‚€~}€€€€€€€€€€€‚‚‚€€€€€€€€……†‡‰Š‹ŒŒŒŒŒŒŒŒŒŒ‹‰‡…ƒ‚…†‡†ƒ‚ƒ„„ƒƒƒƒ„…†‚‚‚‚‚‚‚‚‚‚‚‚‚€€€‚‚ƒ„…††‡‡†„‚†††……„„ƒ††††††††„„„„„„„„…„„ƒƒ‚‚ƒƒƒƒƒ‚‚‚„…†„€‚…„„ƒ‚‚€€ƒ‚‚‚‚‚‚‚‚‚€€€ƒƒƒƒƒƒ„„‚‚€‚€‚‚‚‚‚ƒƒƒƒ‚€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ„€€€€€€€€€€€€€€€€xyyyyyyyzzzzzzzzzzzzz{{{xxxxxxxxzzyxwwvvuuuuuuuuzzzzzzzzvwwwxxxxwwvvuuuuxxxyyyyz~~}}{{zzzzzzzz{{{zzzyyxxxxxxxxyz{zxwwyzzzzzzzzwxz||}}|{{{{{{{{{zyywvuuwvuwz|{zuvxyyxvuuvvutuvx|€€€€€€€‚‚‚‚‚‚}|zz{{{||}||||||||~~~~~~~~€€€€€€€€~~~~~~~~~~~ƒ…ƒƒƒ„„„„…†„„…„‚‚‚‚‚‚‚‚‚€€‚‚‚‚‚€€€‚ƒƒ‚‚ƒ‚€‚ƒƒƒƒƒƒ‚‚ƒ„‡‡‡…ƒ‚ƒƒ„„„„„„„„„„ƒ„„…„„‚‚„‚ƒ„‚€€€€€€€€€€€€€€€€…„„„ƒƒ‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚‚‚‚€€€‚‚‚„„„„„„„„††††††††‡‡‡‡‡‡‡‡…………„„ƒƒ††††††††€€€„„ƒƒ‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ€€€€€€€€‚‚‚€€ˆ‡‡†…„ƒƒ„†‡‡††‡‰……„„„…†‡‚‚‚€‚€‚‚‚ƒ„…‰ŠŠŠ‰ˆ†…‡‡††……„„„„„„„„„„‚‚‚‚‚‚‚‚‚ƒƒ„……„„ƒ„ƒƒƒƒ‚ƒ‚€€‚…„„ƒƒ€€ƒ‚‚‚‚‚‚‚‚‚‚‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚‚€‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€zzyxwvuuvvvvvvvvttuuvwxxwwwwwwwwzyzyyyyyyyyyyyyy{{{{{{{{zzz{{|||zzzyyyxx{{{{||||~}|{vuvuvuvuwwwvvuuuyxyxyxyyyz{zxwxyyyyyyyyyxyz{}|||zzzz{zzzwxyy{|}}}|{}€€{|~~|{{||zyyz{|~€€€€‚€€€‚‚‚~}{{||}}}}~~~~~~~~~~~~~~~~{|}~€‚€€€~~‚„…ƒƒƒƒ„„……†„„……‚‚‚‚‚‚‚‚€€‚€ƒƒƒ‚‚‚‚€€‚‚ƒƒƒ‚‚‚ƒ„‡‡‡…ƒ‚ƒƒ„ƒƒƒƒ„„„„ƒƒƒ„„„„ƒ…„ƒ‚€€€€€€€€€€€€€€€€€„„„ƒƒƒƒ‚„„„ƒ„ƒ„„ƒƒƒ‚ƒ‚ƒƒ‚‚‚ƒƒ€€€€€€€€‚‚‚‚‚€€€€€€€€€€€ƒƒƒƒƒƒƒƒ€~€€„„ƒƒ‚‚…†‡‡‰‰Š‹……………………„ƒ„ƒ„ƒ„„€€‚‚‚‚‚‚‚‚‚‚‚‚‚€€‚‚‚‚‚‚‚‚€€€€€€ƒ„…ƒ€~€€€‚‚‚‚€‚‚‚ƒ‚‚ƒ„…ŠŠŠŠŠˆ‡†„„„ƒƒ‚‚‚ƒƒƒ‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€ƒ…„„ƒ‚‚€€‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚‚‚‚‚„„„ƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚€‚ƒƒ€€€€€€€€€€€€€€€€{{zywwvuwwwwwwwwtuuuuvvwuuuuuuuuvvvwwxwxyyyyyyyyxyxyxyxx{{|||}}}}}}}|||{{||||}}}}~~~}}|{uututututtstssrrtttuttttuvwvtssuvvvvvvvvtuvxxxwwzzzzzzzzyyyzz{{{|zyyzzxw{|~~|{{||{yyz||€€€€€€€€€€€€€‚‚€~~€€€~~~~~~~~~~~~~~~~~~~~~€€‚‚€€€}~~€ƒ„…ƒ„ƒ„„………†…„…„ƒ‚‚‚‚‚‚‚ƒƒ‚‚ƒ‚‚€€‚‚‚‚ƒ‚‚‚‚ƒƒƒ‚‚ƒ„†††„‚‚‚‚ƒ„„……„„ƒƒƒƒƒ„„„ƒ††…„‚€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ„„„……‚‚‚‚‚‚‚‚€€€€€€€€€€~~~‚‚€‚‚…„„ƒ‚‚††‡ˆ‰‹‹Œ‹‹‹‹‹‹‹‹ŠŠŠŠŠŠŠŠ…††††‡‡ˆ„„„„„„„„‚‚‚ƒ‚ƒ‚‚€‚‚‚‚‚‚‚‚€‚‚‚€‚‚‚€€€‚€€‚‚‚‚‚‚‚ƒ‚‚ƒ„…†‡††……„„‚‚‚‚‚‚‚‚ƒƒ‚ƒ‚ƒ‚ƒ€€€€€‚‚‚‚‚‚ƒ„„„ƒƒ€€‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚‚‚‚‚„„ƒ„ƒƒƒƒ‚‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚…„‚‚‚ƒƒƒ€€€€€€€€€€€€€€€€{{{{{zzzzzzzzzzzzzzyyxxxvvvvvvvvvvvvwwwwuuvuvuuuxxxwxwxxxxxyyyzzzzzyyxxxxxyyzyzz{||||{{zzzzyzyzzyyyxxwwwvvvvvvvvwxywvuvwttttttttopqrsrrqutututuuwwwwxxyyzxvwxxwuz{}~~}{zyzzyxyz||~€€€€€€€‚‚~‚‚€€€€€~~}}}|‚‚‚~}|{~}}||{{{||~€‚ƒ„…„„„„……†††„„……‚‚‚„ƒƒ‚‚‚ƒƒ‚‚ƒƒ‚‚€€€‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚ƒ‚‚‚ƒ„†††„‚‚‚ƒ„„…………„ƒƒƒƒƒƒƒƒ‚ƒ…†‡†„ƒ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚„ƒ„ƒ„ƒ„„„„ƒƒ‚‚‚‚„ƒƒ‚‚‚ƒ„……„„ƒ‚‚‚‚‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚††††††††……††‡‡‡‡‡‡‡‡‡‡‡‡ŒŒŒŒŒŒŒŒ‹Š‰‡†„ƒ‚„„„„„„„„ƒƒ‚‚€ƒ„ƒ‚‚ƒ€€€‚ƒ‚‚‚‚‚ƒ‚‚‚‚ƒ„…„„„ƒƒ‚ƒƒƒƒƒƒƒƒ„ƒ‚‚‚€‚ƒ„…†‡††‡‡ˆˆ‰‰ˆ†…ƒƒƒ‚‚„„ƒ‚‚€€‚‚‚‚‚‚‚‚‚‚„„„ƒ‚ƒƒƒƒƒƒ„„„ƒ„ƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚ƒƒƒ‚‚‚ƒƒ€€€€€€€€€€€€€€€€{||}}}~~}}}}}}}}~~}}||{{{{{{{{{{z{zzzzzzyzyzyzyzzzz{z{zzyyyzz{{{yyyxxxwwyyyzz{{{z{{|{{zz{{{{{{{{}}||{{{{}}|}|}|}}~~|{|}yyyyyyyytuvwwvuuqqpqpqqqnooqrstussstxzyyxy{||{yxxxyxwxy{|€€€‚€‚‚‚~‚€€€€€ƒƒ‚€€€€€€€€€{{zzyzyy||{{zzyyz{}€ƒƒ„„„„……††††„„…„ƒ‚„„ƒƒ‚‚‚‚€€‚ƒƒ‚‚‚‚‚€€‚ƒƒ‚‚ƒƒ‚‚‚‚‚ƒ„‡‡‡…ƒ‚ƒƒ…„„ƒƒ„…†‚ƒƒƒ‚ƒƒƒ„…„ƒƒ‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒƒ‚‚‚‚ƒ……„„ƒ‚‚‚‚‚€€€€€€€€€€€€€€€‚‚‚ƒƒƒƒƒƒƒƒ……………………ŒŒ‹‹‹Š‹‹‹‹‹‹‹‹ˆˆ‡†„ƒ‚‚…††…€€‚‚‚‚ƒ„‚‚‚‚‚ƒ‚€€‚‚ƒ„……„ƒƒ‚ƒ„…€€€‚……………………‡‡‡‰‰Š‹‹‰‰Š‹‹ŒŒ‹ˆ…„„„‚„„ƒƒ€€‚‚‚‚‚‚‚‚‚‚ƒƒ„ƒƒ€‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€‚ƒƒ„„†€€€€€€€€€€€€€€€€|||}}}~~}}}}}}}}~~~}}}}}~~~~~~~~~}||{~~~~~~~~}}}}}}}}|}}}~~~}}|||{{{||||}}}}z{||||{{{{{z{z{{|||{{zzz{z{z{z{{{|}|zyz{}}}}}}}}{|}}}|{{yyyyyyyyuutsrpooooorvxyxtuwxxwutvvvutsuv|~€€€€€€€€€€~‚€€‚‚€€„ƒƒ‚€}}}}}}}}zyzyzyzzzzzzzyyyxwwvuttsxy|‚‚‚„„……†††††„„……‚‚‚‚‚‚‚‚‚‚‚‚‚€€‚ƒ‚ƒ‚‚‚‚‚„„…ƒƒ‚„…‚ƒ‚‚ƒƒ‚‚‚‚‚ƒ„‡‡‡…ƒ‚ƒƒ…„ƒ‚‚ƒ„„‚‚ƒƒ‚‚ƒƒ‡‡„‚ƒ‚‚€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ€€€€€€€€€€ƒ‚‚‚………„ƒƒ‚‚„„ƒƒƒ‚‚‚ƒƒ„ƒ„ƒƒƒ‚‚ƒƒƒƒ€€€€€€€€ƒƒƒƒƒƒƒƒ‚ƒ„†‡ˆˆ‡‡‡‡‡‡‡‡Œ‹Š‰ˆ‡†ƒ…††…„†‡………„…†‡‡‚‚‚‚€‚‚ƒ„…†…„‚ƒ„…†‚‚ƒƒ……………………‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‰‰Š‹ŒŽ‰†ƒƒ„„ƒ„„ƒ‚‚€€‚‚ƒ‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€ƒ††…„…‡€€€€€€€€€€€€€€€€|||||}}}||||||||{{{|}}~~}}}}}}}}€}|{{{{{{{{{{}}}}}}}}|||}}}~~~~~}}}||{{{||||}{||}}}||}}}}}}}}}}||{{{{{{{{{{{{{|}|zyz{{{{{{{{{zz{|{{yy||||||||}}||{{{{yyyz~€yz|}}|zy{{{ywvwx|€€€€€€€€€€€~~~€€‚€€…„„ƒƒ‚‚‚}}|{zzyxxxxxxxxxyyyyyyyy~}{xtqomnnmmkkjjvx{~‚‚‚„………†††‡†„„…„‚‚€€€‚‚ƒƒ‚ƒƒ‚‚ƒ…„„ƒ‚‚€€~€‚€‚‚ƒ‚‚‚‚ƒ„†††„‚‚‚ƒƒƒƒ‚‚‚‚‚ƒƒ‚‚‚ƒ†…}|{z|€€€€€€€€€€€€€€€€‚‚ƒƒ„„„‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚€€€€€€€€‚‚‚‚€€‚‚‚‚‚‚‚‚ƒƒ‚‚ƒƒ†……„ƒƒ‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ€€€€€€€€€€€‚‚‚ƒƒƒƒƒƒƒƒ„„„„„„„„‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚„ƒƒ‚€‚ƒ~~‚‚‚ƒ„‚‚‚‚‚‚‚‚‚‚‚€‚‚‚ƒ„……„‚‚ƒ„†‡ˆˆ‰‰ŠŠŠ‹‹‹‹‹‹‹‹‰‰‰‰‰‰‰‰…†‡ˆŠ‹ŒŒŒŽ‘‘‡„ƒ„ƒ„„ƒ‚‚€€‚‚ƒ‚‚‚‚‚ƒ‚€‚ƒ„ƒƒ„„„„„„†††……………‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€„‰Šˆˆ‰Œ€€€€€€€€€€€€€€€€}}}}}}}}}}}}}}}}}}}|}}}}}}}}}}}}~~~~~~~~||||{{{{}}}}}}}}||||||||}}}}}}}}||||||||||}}}}|{{{{|||}}}}}}}}}}}}}}}}}}~~}|||}||{zyxx{{{{{{{{zzzzzzzz}}||||||~}}|||}}~~~}}~~~~~~~~~€€€€€€€~~€‚ƒ‚‚‚‚††††††††‚~|zzzzzyxxyz}~{{||}}~~‚~zpjgdgosqlghnu}‚ƒ‚ƒ…………††††„„…†…ƒ‚‚€‚‚‚ƒ‚‚‚€~|{~~}|{zyyxxyz{zxu|‚ƒ‚„‚‚‚ƒ……‡ˆ…‚†„„„„…………‚ƒƒƒ‚‚ƒ…„|xuv{~€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ€€‚‚‚‚‚‚‚‚‚‚‚†‡‡†„ƒƒ„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€‚ƒ„„„„„„„„ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚ƒƒƒ„„„„‚€‚‚‚ƒ‚€€€€€€€€€€‚‚‚€‚€€‚‚€ƒ‚€‚„…‚‚‚‚ƒƒ†ˆŠ‹Œ‹Š‰ŽŒ‰ˆˆ‰‰ƒƒ„„„„„ƒƒ„…‡ŠŒŒ‰†‡‹Œ†ƒƒƒ‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚‚ƒ‚‚‚„†‰‹‰‰‰‰‰‰‰‰‹ŠŠ‰‰‰Š‹‡‡†ƒ€ƒƒ‚‚‚‚‚‚ƒƒ‚‚ƒ‚‚€€‚‚‚‚ƒƒˆˆ„„€€€€€€€€€€€€€€€€||||||||||||||||}}}}}}}}}}|}|}|}}}}}}}}}}}}}||||}}}~}~}}||||||||}}}}}}}}||||||||z{|}}~}}{{|||}}}}}}}}}}}}}}~}}}}~~~}|||~~}}||{{||||||||||||||||}}|{{{||{{zzzz{{|}}~~}}|||||||||€€€€€€€€€€ƒ‚‚‚…††………………………ƒƒ‚‚€~~~}||~€~~~~~€}{wtutllv||xpkmu}‚ƒ‚ƒ…………††††…„„……ƒ‚‚‚‚‚‚ƒ„ƒ|zxwzzzzz{{{zzz{|{yw{~‚ƒ…‚‚‚ƒ…†‡†„ƒ…„„„„„„„„„‚‚ƒ……„„‚}xuw{€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒ…††…ƒ‚‚ƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒ€€€‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒ„„ƒƒƒƒ„„‚„…„‚€€‚‚‚‚‚‚‚€‚€€€ƒ‚‚‚‚ƒ„ƒƒ…†††„„‡‡………†‡ˆ‰‰‰‰‡…ƒ‚ƒƒƒ„„„ƒƒƒƒ„†ˆ‰‰ˆ‰‡……ˆ‹ŽŒ†ƒƒƒ‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚‚‚‚ƒƒ…†ˆ‰‰‰‰‰‰‰‰‰‰‰ˆˆ‡ˆ‰‰‡‡†„€ƒƒ‚‚‚‚‚‚ƒ‚‚‚‚‚‚€€€€‚‚ƒƒƒ‚‚…‹‰„€€€€€€€€€€€€€€€€{{{{{{{{||||||||}}}}}}}}|||{|{||||||||||~~~}}|||~~~~~~~~}|}|}|}}}|}|}|}|||||||||yz|}~~~~||||}}}}~}~}~}~~~~~~~~~~~~}}|||~~~~~~~~}}}}}}}}}}}}}}}}}}||{{||||{{{{||{||}}}||{{{{{{{{€€€‚€€€€€„‚€‚ƒ‚ƒ€‚†‡†„€€€€‚ƒ…††…„ƒ‚€€€€€€~~}~}{ury{tx~€wpou}‚ƒ‚ƒ„„……†††††„„„„‚‚‚‚‚ƒ‚€~|{z{|yzz{||}}|||}}}{yz|‚ƒ„†‚ƒ‚ƒ…‡‡…ƒ‚‚ƒ„„„„ƒƒƒƒ‚„‚‚„…†……‚~yvw|€€€€€€€€€€€€€€€€€ƒ‚ƒ‚ƒ‚ƒƒƒ‚ƒ‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒ„„…„‚‚‚ƒƒƒ‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚‚ƒƒ‚‚‚‚ƒƒ„…††„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚€€€€€‚‚ƒƒƒƒƒ††…„……‡ˆ‡…„ƒƒƒ……„†ˆ‰‰…~ƒƒƒƒƒƒƒƒ„ƒ„…ˆˆ‡……„„ƒ„†ŠŒ†ƒƒ„‚‚ƒƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚ƒƒ‚‚‚‚ƒ„…†‡‡‡‡‡‡‡‡‡‡‡‡‡††††‡‡‡‡‡„‚€‚ƒƒ‚‚‚‚‚‚‚ƒ‚‚€€‚‚ƒƒƒƒŠŽŠƒ€€€€€€€€€€€€€€€€{{{{{{{{{{{{{{{{||{|{|||{{{{{{{{{{{|{|{{}}}}||{{}}|}|}}}}}}}|}}}||||||||||||||||z{|}}~}}|||}}}}~~~~~}~~~~~~~~~~~~~~~}}~~~~~~~~||||||||||||||||~}}||||}}}|||}~~{|}}~}}|{{{{{{{{€‚€€€€€…ƒ€€‚‚ƒ€ƒƒ‚€€€€€ƒ…††††€}}||~~}}|||vlowy~zrpu}‚ƒ‚ƒ„„„……††††…„„ƒ‚‚ƒ‚€‚ƒ‚~|{}~~}~}~}~~}}~~}{y{~€‚„…†‚‚‚‚ƒ…‡‡†„‚‚ƒ………„„ƒƒ‚‚‚ƒ„„ƒ„†ˆ…ƒ~yvx|€€€€€€€€€€€€€€€€€ƒƒƒƒ‚ƒƒƒƒƒƒƒ‚ƒƒƒ„„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚ƒƒƒƒ„„„„ƒƒƒƒƒƒƒƒ„……„‚‚‚ƒ„„ƒƒƒ‚‚‚€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚„„„„ƒƒ‚‚‚‚…†‡†„ƒ‚ƒƒƒ‚ƒ‚ƒƒƒ‚‚‚‚‚‚‚‚€€€€‚ƒ„…„ƒ‚ƒ„„……………„…‡ˆ‡†ƒƒ…ˆ‹Š‰…„„„ƒƒ‚ƒƒƒ…„„‡‰‹ˆ†‚ƒ„ƒƒˆŒ†‚ƒƒ‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚‚‚‚ƒ…‡ˆ‡†…††††††††††…†…†††‡‡‡†‚ƒƒ‚‚‚‚‚‚ƒ‚‚‚€€‚‚‚‚‚‚‚‚„Š‰ƒ€€€€€€€€€€€€€€€€€{{{{{{{{{{{{{{{{zzzzzzzz{z{z{z{z{{{{{{{{|{{{{zzz{{{{{{{{||}|}|}|{{{{{{{{{{{{{{{{||}}~}||||||}}}}}}~}~}~}~~~}~}~~~~~~~~}}}|}}}}}}}}||||||||~~}}}}}}||{{||}~|}~~~~||||||||€€€€€€€€ƒ€€€€€€€€€€€‚ƒ…†‡€€~|{{}}}}}}}}}€xihs|}~}~|snu}‚ƒ‚ƒ„„„„…………‡……„„‚‚‚€‚‚…‚|{{}~~~~~~~~~~~~~|yz|‚„…„‚ƒ‚ƒ…†‡ˆ„€ƒ†††……„„ƒƒ‚ƒ„„ƒ„†‡…ƒ~ywx}€€€€€€€€€€€€€€€€€‚‚ƒ‚ƒ‚ƒ‚‚‚ƒ‚ƒ‚ƒ‚„ƒ„ƒ„ƒ„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„…„…„…„……ƒƒƒƒƒƒƒƒƒƒƒ„„„„…ƒƒƒƒƒƒƒƒ„…†…„ƒ„…„„„„ƒƒƒƒ‚‚ƒƒƒ‚ƒ‚ƒ‚ƒƒ‚‚‚‚‚‚‚‚„ƒ„ƒƒ‚‚‚„ƒƒ‚‚‚‚‚„…†…„‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚€€‚‚‚€‚„……„ƒ‚‚ƒ„„„„„„‚…‰‹Š‡ƒ„„†‡‰Š‹‹†…„ƒƒƒ„„„‚ƒ†‹Œ‰†„…ƒ‚ˆŽŒ†ƒƒ„‚‚ƒƒƒƒ‚‚‚ƒƒƒ‚‚ƒƒƒ„…†ˆˆ‡†„†††††††††††††††††‡ˆ†„‚‚ƒ‚‚‚‚‚‚‚ƒ‚‚€€€‚‚‚ƒƒƒ‚‚€†‹‹‡‚€€€€€€€€€€€€€€€€{{{{{{{{||||||||zzzzzzzz{{z{z{{{{{{{{{{{{{zzzzyyzzzzzzzz||{|{|{|z{z{z{z{zzzzzzzz|}}~}}|{{{{{||||}}|}|}|}}}}}}}}}~~}~}}}}~~~}}||~~~~~~~~||||||||~}}|||}}||||}}~|}~}}}}}}}}~€€€€€€€€€€€€€€€‚€€€€€€€€ƒ‚‚ƒ…‡‚ƒ‚‚€~}~~~~~~~~~~xlku~~||€|uqu}‚ƒ‚ƒƒ„„„„………‡………„‚‚€€‚}{zz{|}z{{|}~~~~~~~~~}{{|‚„ƒ‚‚‚‚‚ƒ……‡ˆ…€‚††………„„„„…ƒ‚‚„……„…‚}yvx|€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ……………………„„„„„„„„ƒƒƒƒ„„„„„„„„„„„„„†††„„…†………„„„ƒƒƒ„ƒ„ƒ„ƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ„ƒƒƒƒƒƒƒƒ„„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚„ƒ‚‚ƒ„……ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚€€‚ƒƒƒ‚‚„………ƒ‚…„ƒƒƒ„…†ƒ„…‡†…ƒ‚ƒƒ‚ƒ„‡ŠŒˆ‡…„ƒ„„…‚†ŠŒˆ„‚„…„‚ƒ‰ŽŒ†‚ƒƒ‚‚ƒƒƒƒ‚‚‚ƒƒƒ‚‚ƒƒƒ††‡‡‡†……………………………††‡†††…†ˆˆˆ„ƒ‚‚ƒ‚‚‚‚‚‚ƒ‚‚‚€€‚‚ƒƒ‚‚‚‡Œ‹†ƒƒ€€€€€€€€€€€€€€€€||||||||||||||||{{{{{{{{{{{{{{{{|||{|{||{{{{zzzzzzzzzzzz{{{{{{{{zzzzzzzzyyyyyyyy{|}}~}}|zzzz{{{|||||||||||||||||{{{{{{{{|{{{{{{{||||||||{{{{{{{{||{{{{{{||||}~~{|}~}}}}}}}}~€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚ƒƒ„„„‚€~~}}||{|}}vmpy}€}||uru}‚ƒ‚ƒƒƒ„„„„……†…………‚€€€ƒ‚yyz{|}~~{{||||}}~}}}~}||{|~‚ƒ‚€‚ƒ‚ƒ…†‡‡„€ƒ…„„„„„„„„…„‚‚ƒ„ƒ‚„}xvw{€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ„„„ƒ„ƒ„„ƒƒƒƒƒƒƒƒ‚‚ƒƒƒ„„„„„„„„„„„ƒ„……„ƒ„…†…………„„„„„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„…††††††††…………………………………………„„„„„„…„„„ƒ‚ƒ…†…ƒ‚‚‚‚‚‚ƒ‚‚‚ƒ‚‚‚‚€‚‚‚€€ƒƒƒ‚ƒ„……„„ƒ‚ƒ„…†…„„„„ƒƒƒ„……„ƒ‚€‚„‡‰Š‰‡…„„††…ƒƒ‡ŒŒ‡‚ƒ„„ƒ‚„‰Œ†ƒƒ„‚‚ƒƒƒƒ‚‚‚„ƒƒ‚‚ƒƒ„‰ˆ‡††…††††††††††„…†††……„†ˆ‰ˆ†ƒ‚‚ƒ‚‚‚‚‚‚‚ƒ‚‚‚‚ƒƒ‚†Œˆ„‚€€€€€€€€€€€€€€€€}}}}}}}}}}}}}}}}}}}}}}}}{{{{{{{{||||||||||||{{{{{{{{{{{{zzzzzzzzzzzzzzzzxxxxxxxxzz|}~~~~yyzzz{{{{{{{{{{{{{{{{{{{yyyyyyyyxxxyyzzzzzzzzzzzyyyyyyyy{{zzyyzzyyyyz{||yz|}~~~~||||||||}€€€~€€€€€€€€€€ƒ‚€}€€€€€€€€€‚‚€~}‚„…„ƒ€~~}|zyxxy}~ulqz{{~ƒ|qmu}‚ƒ‚ƒƒƒƒ„„„……†……†…‚€€€‚ƒƒ}}~~~}|€~||{z}||}}}|}||~‚ƒ~‚‚‚ƒ…‡‡…„‚‚ƒ„ƒƒƒƒƒ„„„„„„ƒ‚ƒ„|xuv{~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ„„„„„„„„„„‚ƒ„„ƒ‚ƒ„††………„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„ƒƒƒ„…………………………††††††††‡‡‡‡‡‡‡‡††††‡‡‡ˆ‡‡†…„„„„„†‡†„‚‚ƒ‚‚‚‚‚‚‚‚ƒƒ‚‚‚‚€ƒƒ‚‚ƒ€‚ƒ‚‚ƒ„„„„„„ƒƒƒƒƒ„„…„„ƒƒ„…†‡‡…ƒ‚‚ƒ„…‹Š‡…„…†‡ˆ††‰ŽŽˆ‚ƒƒƒ‚‚…‰ŒŒ†ƒƒƒ‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚„ƒƒ‚‚ƒƒ„‹‰‡………†‡††††††††ƒ„…†……„ƒ†ˆ‰‰†ƒ‚‚ƒ‚‚‚‚‚‚ƒƒ‚‚€‚ƒ„…„‚…‹‹…€€€€€€€€€€€€€€€€|ywwz}€}€€~~}}}}}}}|}}}}}}}}}}|{{{|}}}}}}}}}}~€}|~}}}~~€}zz|~}|||||||||x{‚‚~|zzzzzzzz}|zyxxyyzz{{{{{{{ywvvx{}zzzzzzzzzzzzzzzzyyyyyyyyzyzyzyzzuwxyyz{}yz{|}||{yyzz{{||}€€‚~€€€€€€€€‚‚€€€ƒ‚€€€ƒƒ‚‚€‚‚‚‚‚‚‚‚ƒ…‡†ƒ‚ƒ€}|~vorvzymmx~vmmu{€„…„ƒƒ„„…††‡ƒ„……„ƒ‚‚€ƒ‚||}}}}}|}}}}}~}}}~~}~~|{|~ƒƒ‚‚‚ƒ„…‰ˆ‡„‚‚ƒ…ƒƒ„……„ƒƒƒ‚ƒ…ƒ‚ƒ‚~yuvz€€€€€€€€€€€€€€€€€„‡‡‚€€€€€‚€~~‚€€€€€€€€‚‚€~~€€€€€~~~€€€€€€€€€€€€ƒƒƒƒƒƒƒƒ‚‚ƒƒƒƒƒƒ‚ƒ„„ƒ‚‚……„„…†‡ˆƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„„„„†††††††††††………„„ˆ‡†„„„„„……„„„„„„…„ƒ‚€€€‚‚€€€€€€‚‚‚‚€~‚„„ƒƒ……†††‡‡‡ƒƒ„„……††………………………Š‹‡…‰ŠŠ‰‰ŒŽŠƒ……„ƒ‚†’Œ†‚‚‚„„„ƒƒ‚ƒ‚‚‚‚‚‚ƒƒ‚ƒ‚ƒ‚ƒ‚……………„„„†††……………„……††…„ƒ‡†‡ˆ†ƒ‚‚‚‚€‚ƒ‚€ƒ‚„††ƒ…‡ŠŒŠ„€€€€€€€€€€€€€€€€€|yvvy}€~€‚€€€€€€€€~}}~~|{xxxxxxxx{}~}{{|~{|||}}~~‚|}€}}}}}}}}xz~€}{||||||||€}|{||}~~||{||}||zzzz{|zzzzzzzz}}|{yxww{{{{{{{{xxxxxxxxyyyyyxyyz{{||{zyyyyzz{{{|€€‚~€€€€€€€€~€€‚‚€~€‚‚‚‚‚‚‚‚ƒ…‡…ƒ‚‚}{yqkovx|xnmw€~|slnu{€„…„ƒƒƒ„……††„„………„‚‚‚€ƒ‚|}}~~~~~}}}~~~~~~~}}~||{~ƒƒ‚‚‚ƒ„…‡‡…„ƒ‚ƒƒ„„„ƒ‚ƒ‚ƒ…ƒƒƒ‚~yuvz€€€€€€€€€€€€€€€€„ˆ‹Š†‚‚ƒ€€€€‚€€€ƒ„‚‚‚‚‚‚‚‚€‚ƒ„ƒ„†ˆˆˆˆˆˆˆˆˆƒƒƒƒƒƒƒƒ„ƒƒ‚‚€€€€€€€€€~‚‚~€€€€€€€€€€‚ƒ„„…„ƒ†††††††††††…………………„…„………††††††††‚‚‚ƒƒƒ„„ˆ‡†…„……†††††…†……„„ƒ‚‚‚€€€€€‚‚‚€€ƒ„„„„‚ƒƒ„„…††…………„„„„……………………†‰‹‹ˆ‡‰Œˆˆ‡‡‹Š„‚ƒ„…†‰ŒŒ†‚‚‚„„„ƒƒ‚‚ƒ‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚ƒ………………„„††…†…†…………†††…„„‡†‡ˆ†ƒ‚‚‚‚‚ƒƒƒ‚€‚ƒ€‚………ƒ„‡‹ŒŠ„€€€€€€€€€€€€€€€€€{xvvy}€€‚„ƒ‚‚‚‚ƒƒƒ„……†ƒƒƒƒƒƒƒƒƒƒ}zuuuuuuuuz{}|zy{|{{{{||}}~{yx{||{xxxxxxxxwy|~}{z{{{{{{{{~}{{{|}~~~~~~~~}|{z}}}}}}}}~}}||{{{zzzzzzzzyyyyyyyy}}|{zzyx}}~}}{zyzzzz{{|||~€€€€€€€€€€~‚€€~}}}~~~€‚ƒƒ‚‚‚‚‚‚‚‚€„†‡†ƒ€‚‚{vnlrtquvomv€€|wskiou{€€ƒ…„‚‚ƒƒ„………„……†…„ƒ‚‚ƒƒ}}~€€~~~~~}~~}}||}|{|}ƒƒ‚‚‚ƒ„…†††„ƒ‚‚‚ƒƒƒƒ‚‚ƒ‚ƒ…ƒ€„…‚}xuvz~€€€€€€€€€€€€€€€€†ŠŒ‡ƒƒ„€€€€‚€‚„„‚‚‚‚‚ƒƒƒ…‰Œ††††††††‰ˆˆ†…„ƒ‚‡‡‡‡‡‡‡‡††††††††‡…‚€€‚…‡…„…„…„…„‚‚‚‚€€€ƒ„†„ƒ‚€€„„ƒƒ‚€€……………………ƒ„„……††‡„„ƒƒƒ„…†……………………„ƒ‚‚‚‚€‚‚‚€ƒ„…………‚‚‚‚ƒƒƒ†……„„ƒ‚‚……………………‡ˆ‰‰‰‰ŠŠ‡‡††ŠŽ‹†‚…‰ŽŽŒ†‚‚‚„„„„ƒ‚ƒ‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚…………………………†…†…††††††††……‡†‡ˆ†ƒ‚‚‚‚‚‚ƒ„„ƒ€‚‚‚‚‚‚‚‚ƒ€€ƒ„…„‚„‡‹Š…€€€€€€€€€€€€€€€€€ywtvy~‚‚„……„ƒƒ„ƒ„„……‡‡‡††††††††‡…„†‡…€{vvvvvvvvz|}}{z{}|||}}~~~|yvwxzzywwwwwwwwxy{||{zxwwwwwwwwzywvvwxyz|‚ƒ‚€~€|ywyzyzyzyytuvxyz{|{{{{{{{{zzzzzzzz~||}}|z€€€~|zy{|{|||||{~€€€‚€€€€€€€€~}|{{|}||}€‚ƒ„‚‚‚‚‚‚‚‚€€„†‡„‚‚€}vqryuprumhow~~wpjdeou{€„…„‚‚ƒ„„…………††…„ƒ‚‚€ƒ‚}}~€€€~}}}~}}}}}}~~||{~ƒƒ‚‚‚ƒ„…†‡‡‡„ƒ‚‚ƒƒƒƒƒƒƒƒ„‚ƒ„‚€…‡ƒ}wuvz~€€€€€€€€€€€€€€€€„ˆ‹Š…€€€€€€‚~~~~~~~~€€~~€„‡‹Œ‹Œ‹Œ‹‹†††‡‡‡‡ˆ‰‰ˆ‡†……„ŠŠŠŠŠŠŠŠŒŒŒŒŒŒŒŒŠ‡……‡Š‹‹‹‹‹‹‹‹ˆˆˆˆ‡‡††„ƒƒ‚ƒƒ„„ƒƒƒƒ„…‡ˆ††††††††ˆˆ‡†…ƒƒ‚„„„„„„„„€€€€€€€€‚‚€€ƒ„…‚‚‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚„††‡††……„ƒ‚€„„ƒ„ƒƒƒƒ……………………ˆ‡†‡‰‹Š‰‡‡†‡‹ˆƒ„†Š’‘ŽŒ†‚‚‚„„„ƒƒ‚‚‚‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ……………………………†…††††††††††…‡†‡ˆ†ƒ‚‚‚‚‚ƒ„…„ƒ‚‚‚‚‚‚‚ƒ€ƒ…„ƒƒ‡Œ‹…€€€€€€€€€€€€€€€€€xutty~‚ƒƒ„††…ƒ„„‚‚ƒƒ„„……††††††††ˆ‡ˆˆ‰†‚}xxxxxxxx|~~}|}~~~€€€€}zz}~~|}}}}}}}}yyz{zzyxwwwwwwwwwvuuuvwx{}€‚ƒ‚€~|xvssssssssnnpprsttrrrrrrrrwwwvwwwwzxwy|~}{~~~~|zxwzzzzzyzyz~€€€€€€€€€€~}|{zz{|z{|~€ƒ„‚‚‚‚‚‚‚‚‚€€„……„ƒ~vqtyxrstkcgn{|xrmeelu{€€ƒ…„‚‚ƒ„„………†††…„‚‚ƒƒ||~~€~}{{|}}}}~€|{|}ƒƒ‚‚‚ƒ„…†‡ˆ†…‚‚‚„„„ƒ„ƒ„„„ƒƒ„‚€ƒ‡‰ƒ|vtvz}€€€€€€€€€€€€€€€€„‡Šˆ„€€€€€€€~~~~~~~~~~~~|{}‚…ˆˆˆˆˆˆˆˆ„………‡‡‡‡††††††††…„…„…„…„†††††††††ˆŒŽŽŒˆ†ˆˆˆ‡ˆ‡ˆˆŒ‹‹ŠŠ‰‰ˆ‚ƒ„„„„ƒƒ„„„…†‡‰ŠŒŒŒŒŒŒŒŒ‘‘ŽŽŽŽŽŽŠŠŠŠŠŠŠŠˆˆ†…ƒ€€‚‚‚‚ƒ„…†‚‚ƒƒ„„ƒ‚‚‚‚‚‚‚‚ƒ„†‡ˆˆˆ‡‹Š‰†„€€‚‚„„…†……………………ˆ…„†ŠŒ‹ˆŠ‰ˆ‡Œ‹…ƒƒ‡ŒŒ†‚‚‚„„„„ƒ‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚…………†…†……………††††‡‡‡†††††‡†‡ˆ†ƒ‚‚‚‚ƒƒ„……ƒ‚ƒ‚„„ƒ‚‚‡Œ‹…€€€€€€€€€€€€€€€€€vtrty~‚„‚„…†„ƒƒ„ƒƒƒƒƒƒƒƒ…†‡‡…ƒ€xxxxxxxx}€}}~€€€€|yz{~}|~~~~~~~~{{zzzyyy{{{{{{{{{zyyyz|}€€~€€}ywwwvwvwwwxxvusrqqoooooooorrrrrrrrsqpty||zzzzzzxwvxxwwvvvvy~€€€‚~~}|zzz{{zz{}~€‚‚‚‚‚‚‚‚‚ƒ€„…†…€~uopqqostkflsx|}}yohku{€„…„‚‚ƒƒ„…………†††…ƒ‚‚€ƒ‚|}}~~~}}~~|{{||{zz|}~~||{~ƒƒ‚‚‚ƒ„…†††…‚‚„„„„ƒ„„„…ƒƒƒ‚„‰‹„{utwz|€€€€€€€€€€€€€€€€†ŠŒŠ…€€€€€€‚€~‚‚‚‚‚‚‚‚~€€€‚†‰ˆˆˆˆˆˆˆˆƒ„„…†‡‡ˆƒƒƒ„„„……„„„„„„„„ƒƒƒƒƒƒƒƒƒ…ˆŠŠˆ…ƒ……………………‰‰ˆ‡††…„€‚ƒ„„„„†……„„…††……………………ƒƒƒƒƒƒƒƒŽŽŽŽ‘‘‘‘‘‘‘‘‘ŒŠ‡…„†……„…†‡ˆƒƒ„……††‡„ƒ‚‚‚‚‚‚‚‚‚‚„…†ˆ‰Š‰‰Ž‹ˆ…ƒ~€‚ƒ…†‡……………………†…„†‰ŒŠ‰‹‹ˆ‡‹Š„ƒ‚ƒ†‰ŒŒ†‚‚‚„„„ƒƒ‚‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ………†…†††……………†††‡‡††††††‡†‡ˆ†ƒ‚‚‚‚‚ƒ„…„ƒ‚€€€‚‚ƒ„…‚~‚‡Œ…€€€€€€€€€€€€€€€€€usrsy~‚„ƒ„„ƒ‚‚ƒ‚€~~~~ƒ~|~€yyyyyyyy|}~|{}~~~€€€~{xxz||z{{{{{{{{}|{zzyzz||||||||~~}}}‚„ƒ‚€€€‚‚}{||||||||~}zxutsyyyyyyyysssssssssposz~}zyz{{{zyyyyxxwwwvy~€€‚‚€€‚€€~~}|{zz{|z{||~€€‚‚‚‚‚‚‚‚‚‚ƒ…†€€wrqnlowwnlu{y}vmmu{€„…„ƒƒƒ„……††……†…„ƒ€‚€ƒƒ}~~~~}}|}€}{}~}{zyz{{{|{|}ƒƒ‚‚‚ƒ„…‡‡†ƒ‚‚ƒ„„ƒ‚ƒƒ„„…ƒ‚ƒ‚…ŠŒ„zttwz|€€€€€€€€€€€€€€€€ˆ‹ŽŒ‡‚‚€€€€~€‚ƒ……„„‡‡‡‡‡‡‡‡‚„††††ˆŠ‰‰‰‰‰‰‰‰„„…†‡ˆ‰‰„„„„ƒƒƒƒ††††††††……………………‡†„‚‚„†‡††††††††††…„ƒ€€‚ƒ„………„„ƒ„„…†……………………‚‚ƒ„…††‰‰‰‰‰‰‰‰‡‡‡‡‡‡‡‡‹‰‡„ƒ‚†…„ƒƒƒ„„ƒ„„…†‡ˆˆ„„ƒ‚‚‚‚ƒ‚€‚‚…†ˆ‰ŠŠŠŠŒ‹Šˆ†„ƒ‚€ƒ„……………………………„…†‰ŠŠŠŒŠ‡…ˆŒ‡………„…‡ŠŒ†‚‚‚„„„„ƒ‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚…………††††„„……††‡‡‡‡†………††‡†‡ˆ†ƒ‚‚‚‚‚ƒ„„„ƒ‚‚‚‚ƒ…„‚€}‡Œ…€€€€€€€€€€€€€€€€tsqsx~ƒ„€‚„„‚‚‚‚‚‚€~}||}}}}}}}}y}€}xw{€{{{{{{{{z|}|zz{}}}}~~‚||€€~~~~~~~~~}|{zzzzzzzzzzzz~~€‚ƒƒƒƒ‚‚€‚ƒƒƒ‚€xxxxxxxxyxwvtsrrwwwwwwwwwwwwwwwwwsrv}||}~~~|||{{zzyx~€‚‚€€€€€‚‚~}|{{{{|{{||}~‚‚‚‚‚‚‚‚„„‚‚ƒ…~€‚|wvqpv~zomvz{}}€xppu{€„…„ƒƒ„„…††‡…………„ƒ€‚€ƒ‚~~}}||~€}}~€|{|||||{{~ƒƒ‚‚‚ƒ„…‰ˆ†„‚‚ƒ…„ƒ‚‚‚‚ƒ„…ƒ‚ƒ‚…‹…zttwz|€€€€€€€€€€€€€€€€ˆ‹ŽŒ†‚€€€€}‚…ˆ‰‰‰‰‰‰‰‰‰‰‰ˆŠ‹‹ˆ‡‡‡‰‰‰‰‰‰‰‰……†‡ˆŠŠ‹‡††„ƒ‚ƒƒƒƒƒƒƒƒ……………………‡‡†……†‡‡„„ƒ„ƒ„„„†…„ƒ‚€…„ƒƒƒ„…†‚‚‚ƒ…‡‰Š……………………}~ƒ„††ˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆˆ‹‹Šˆ‡…„ƒƒ‚~‚‚ƒ„…††‡…„ƒ‚€€€‚‚ƒƒ‚‚€€‚‚†‡ˆŠ‹‹‹Šˆˆ‡††…„„‚‚ƒƒƒ……………………„„…†ˆ‰Š‹‹‰…ƒ†Š‰…‡‰Š‰ˆˆ‹ŽŒ†‚‚‚„„„ƒƒ‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ………†††††„„……††‡‡‡‡†………††‡†‡ˆ†ƒ‚‚‚‚‚ƒ„„„ƒ€‚‚‚‚‚‚‚‚‚‚‚„…„‚}‡Ž…€€€€€€€€€€€€€€€€wursx~ƒ‡†…„ƒƒƒƒ€€€€~~}|{{{|}|{zyyz{|zzzzzyyy}}xuvwwzzzz|~~zx|€€~~~~}}~~~}|z|~~{wx{~~|{{|wz~ƒ‚€€‚ƒƒ‚€ƒƒ„„ƒ~yxwvuvxzy||wuvxwvwxyyxwvtuuuvvvwvutvz|}}}~€€}|}~}{yxy{€‚€€‚ƒ‚€€€€€€}}|{{zzy{{|}~€€€€‚ƒƒƒƒƒ‚‚ƒƒ‚‚€~€}vpqz‚€€€{smtyƒ„ƒƒƒ„„„„…………†……††ƒ€€€€€€~~}|||||}}~~~~~}}||||{{}ƒƒ‚‚…‡ˆˆ‡†„ƒ„…ƒƒƒƒƒƒƒƒ††…„…ˆ‹…}vtv{~€€€€€€€€€€€€€€€€„ˆ‹Š†‚ƒ€€~~‚ƒ„„………‡‡‡‡‡‡‡‡ˆˆˆˆˆˆˆˆ‡‡ˆˆˆ‰‰‰ˆ‡ˆ‹ŒŠŠŒ‹ŒŠ‡……„ƒ†††……„„„„„„„„„…………………………„„ƒ‚ƒ„†‡‡…„‚€€‚ƒƒ„„ƒƒ†„‚‚„†„‡‡ƒƒ…„‚‚„ƒ„‰‡‡‡‡‡‡‡‡‡‡ˆ‡ˆ‡ˆ‡ˆ‰Š‰†„ƒ„‚~{|}}~€„‚€ƒ„„„ƒƒ‚‚‚ƒ‚‚‚‚‚‚‚ƒ†††‡‡ˆˆ‰‹‰‡††…ƒ‚‚‚€€…††††…„ƒƒƒ„„…††‡~€„‰Œ‹‡„…„ƒ„†‰Œˆƒ‚ƒƒ„„ƒƒ‚€‚‚‚‚‚ƒƒƒƒ„……†‡‡‡‡†………†††††„„…††…„„…†ˆ‡…ƒƒƒ‚‚‚‚€‚‚ƒƒƒƒ‚‚€€€€€€€€‚‚‚‚‡ˆ‚}ˆŒŠ…‚€€€€€€€€€€€€€€€€wsopv†Šˆ‡†……………ƒƒƒ„ƒƒ‚‚€~}|}}~||{zz{|}||||{{{{{~~{z|}}z{}~€€€|z{~~}~€~}||}~}|z{}~|yz{|}}|zyx{~‚‚€€‚‚‚‚‚ƒƒ„„„ƒƒ}|zywxz{y||wuvxwwwxxxwwvvvvvwwwwvutvz}~~}~€€~}{}~~{yyy{€‚€€€‚‚‚€€€€€€€}}||{zzz{{|}~€€€€‚‚ƒƒƒƒ‚‚‚ƒ„„ƒ‚€~~~|wsu|‚ƒ‚€}xqmty‚ƒƒƒƒ„„„„…………†……†…‚€€€€~~||{z||}}~~~~~||}}~|{{~€ƒƒ‚‚„‡†‡†…ƒ‚ƒ„ƒƒƒƒƒƒƒƒ…†„„„€‡‹…}vtv{~€€€€€€€€€€€€€€€€†Š‹†~~~~}}‚ƒ„„„„†††††‡‡‡ˆˆˆˆˆˆˆˆ‡‡‡ˆˆ‰‰‰†…†‰ŠˆˆŠ‹‹‰…ƒƒ‚€†††……„„„„„ƒ„ƒ„……………………………„ƒƒƒ…†‡†…„‚‚‚ƒ„„„„ƒ‚‚ƒƒƒƒ‚‚ƒ††‚€‚„ƒ‚‚„ƒ„‰‡‡‡‡‡‡‡‡ˆˆˆˆˆˆˆˆˆ‰Š‰†„ƒ„~{|}}~€„‚€ƒ„„„ƒƒ‚€‚‚ƒ‚‚‚‚‚‚‚ƒ…†ˆ‰ŠŠ‰ˆ‹‰‡††…ƒ‚‚‚€€„„…††…„„ƒƒƒ„„„„…†††‰‹Š„€€„‡‰Œˆƒ‚ƒƒ„„ƒƒ‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ„††‡‡‡††…………†††††…††‡‡‡†……‡ˆ‡…ƒƒƒ‚‚‚€€‚‚ƒƒƒƒ‚‚‚‚‚‚‡ˆ‚}‚‡ŒŠ…‚€€€€€€€€€€€€€€€€yvrrx€‰ˆ‡‡††††††††……„„„ƒ‚€}}||||}}{{{zzyyy{~€~€}~€€€€|{{~~|}|}}}~}}|~|zz}~}|}{z{}}zvz{‚‚€‚‚ƒ„…„„ƒ„„…†€~|yxxyzy||wuvxwxxwvvvvvwwwwwvvvvutvz}~}~€€~}{}~}{yyz{€‚€€‚‚€€€€€€€€}}||{{zz{{|}~€€€€‚‚ƒƒ‚‚‚ƒ„ƒ‚€~}}{|{wsty}xxxvspnmuy~‚ƒƒƒƒ„„„„……………„…†…‚€€€€€~~~}}||{}}}}~~~~~~}}}|{{}ƒƒ‚‚‚„†………ƒ‚‚‚ƒƒƒƒƒƒƒƒ„…ƒ‚ƒ†‹…}vtv{~€€€€€€€€€€€€€€€€„ˆ‹‹†}|~~~~~~~~€‚ƒ„„„ƒ„„„……†††‡‡‡‡‡‡‡‡‡‡‡‡ˆˆ‰‰…„…ˆ‰‡‡‰‰‰ˆ†………ƒ††…………„„„ƒƒƒ„„„……………………………„„„…†‡†…ƒ‚‚‚ƒ„„„„ƒ€‚…‡‡…‚€„‡‡ƒƒ…„‚‚„ƒ„‰ˆˆˆˆˆˆˆˆˆˆ‰ˆ‰ˆ‰ˆˆ‰Š‰‡„„„}{|}}~€„‚ƒ„„„ƒ‚‚€‚ƒ‚€‚‚‚‚‚‚ƒ„†‰‹Œ‹‰ˆ‹‰ˆ††„ƒ‚‚‚€€‚‚„…††††………„„„„„ˆ…„†‹Œ‰…ˆˆ‡‡‰‹ŽŒˆƒ€‚ƒƒ„„ƒ‚‚€ƒƒ‚‚‚‚ƒƒ‚‚‚ƒƒƒƒ„†‡‡‡††……………†††††††‡‡‡‡‡†…‡ˆ‡…ƒ‚ƒ‚‚‚€‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚†ˆ‚}ˆŒŠ…‚€€€€€€€€€€€€€€€€~|yy|†Š†††††††‡†…„„ƒƒƒƒ……„ƒ‚€~~~~~~{{zzyxxx|€‚€€}~~~~€{y|~||{|~€}|~|z{|~~~~|{{|{yx{}‚‚€€ƒ…†ˆ†„ƒ‚‚ƒ~zxwwvvy||wuvxwyxvvuvvwwwwvvuuuvutwz~~€€~}z|~~{zyz{€‚€€€€€€€~}}|{{zzz{{}}€€€€€‚‚‚‚€‚‚…„„ƒ‚€€xnijmmnnmihikuy~‚‚ƒ„„„„„……………„„†…‚€€€€€€}~}~}~~~||}}}}}~}~~~~}|||{{~€ƒƒ‚‚€‚„†………„‚ƒƒ‚ƒ‚ƒƒƒƒ„‚‚‚~…‹…}vtv{~€€€€€€€€€€€€€€€€ƒ‡ˆ†~|€‚„„„ƒƒ‚‚ƒƒ„„…………††††‡‡†‡‡‡‡ˆˆˆ…„…‰‰ˆ‡‰…†…ƒƒ„…ƒ……………………„„ƒƒƒ„„„……„…„…„……………………†…„ƒ‚‚‚‚ƒƒƒƒƒ‚„‡‰‰‡„‚‡ŠŠ†„†ˆ‡‚‚„ƒ„‰ˆ‰ˆ‰ˆ‰ˆˆ‰‰‰‰‰‰‰‰ˆ‰Š‰†„ƒ„}{|}}~€„‚€ƒ„„„ƒƒ€‚‚‚‚‚‚‚‚ƒ„…‡‰ŠŠ‰‰‹Šˆ‡…„‚‚‚‚€€€‚…†‡‡‡‡‡††……„„†ƒ…‹ŒŒŒŽ‘’Œˆƒ‚ƒƒ„„ƒƒ‚ƒ‚‚‚‚‚‚ƒ‚‚‚ƒƒƒƒ„††††††……………††††††††‡†††††‡ˆˆ„ƒ‚‚‚‚ƒ‚‚‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‡ˆ‚}‚‡ŒŠ…‚€€€€€€€€€€€€€€€€€}|}€„‡††‡‡‡‡‡‡…„ƒ††……„ƒ‚‚€€~~~}||{{z|€ƒ€€~~~€‚ƒƒ€{y|~}{{}}~~~~~}|{}~}~~}{z{|}~€€‚‚‚‚‚€ƒ…††„ƒ€€€‚~zwxwvuy||wuvxwyxvuuuwwxwwvvuuuvtuw{~€€~‚€~|z|~}|zz{{€‚€€€€€€€~~}}|{{zzz{|~~€€€€€€‚€€€‚‚€€€€‚xlddhmnnjgegivy~‚„…„„„„……………„„†…‚€€€€€€~~~}}}}}||||||||}}|{|{|||{{}ƒƒ‚‚‚‚„††††„ƒ‚ƒƒƒƒƒƒƒƒƒƒ‚ƒ‚‚~~„‹…}vtv{~€€€€€€€€€€€€€€€€‚†‡†‚}ƒ………ƒ‚‚‚ƒƒƒ„„„„……†††††‡‡‡‡ˆˆ…ƒ…ˆ‰†‡ˆ†‡†ƒ‚ƒƒ‚………………………„„ƒ„ƒ„„„„…„…„„„…………………„„„ƒ‚‚‚ƒ€€‚‚‚‚„…‡‡‡‡…„‡ŠŠ‡…†ˆˆ‚‚„ƒ„‰‰ˆ‰ˆ‰ˆ‰‰‰‰‰‰‰‰‰‰ˆ‰Š‰‡„„„}{{}}„‚ƒ„„„ƒ‚‚€‚‚€‚‚‚‚‚ƒ……„…†ˆ‰‹‹‹Šˆ†ƒ‚‚‚‚‚€€€‚„††‡‡†††…„ƒƒƒ†ƒ‚„‹ŽŒˆƒ€‚ƒƒ„„ƒ‚‚€‚‚‚‚‚‚‚ƒƒƒƒ„…………††††………†††††‡†††††‡††ˆ‰‡…‚‚‚‚‚ƒ‚‚‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚†ˆ‚}ˆŒŠ…€€€€€€€€€€€€€€€€~}||€…‰‡ˆˆ‰ˆˆˆˆ†…‚€€€„…………„ƒƒ‚‚ƒƒ‚€~}|{zz|€‚ƒ„ƒ€ƒ„„‚€~||}|zz|}}}~~~~~}}}~~|~}{{}€€€‚‚‚‚‚‚‚ƒ„…€€€€€‚„zxyzxvy||wuvxwxwvvuvwxyxxxwwvvuutw{~‚}|y{}}{zz{{€‚€€€€€€€€~~}}|{{{zz{|}~€€€€€€€€‚€€€‚€€€xmghlmmmlkklnwz~€‚„†„„„„……………„…†…‚€€€€€~}{{zz||||||||{|{|||{{|{{~€ƒƒ‚‚‚‚€ƒ…‡‡†…ƒƒƒ„ƒƒƒ„ƒƒƒƒƒ„‚‚~~…‹…}vtv{~€€€€€€€€€€€€€€€€…†‡‡…‚}~~~~~~~~„†‡†„ƒ‚‚‚‚‚‚‚‚‚ƒƒ„„……††††‡‡‡‡ˆƒ‚ƒ†‡……‡†‡†„„……ƒ„„…………††……„„„„„„„„„„„„„„„„……„„ƒ‚„ƒ‚‚ƒƒ€€‚‚‚‚…„„„„„„……ˆˆ„‚„†…‚‚„ƒ„‰ˆ‰ˆ‰ˆ‰ˆ‰‰‰‰‰‰‰‰‰ˆ‰Š‰†„ƒ„€}z|}~„‚€ƒ„„„ƒƒ€‚‚‚€€€‚‚‚‚‚ƒ…„ƒ‚„†ŠŒŒŒ‹‰…ƒ‚‚‚‚‚€€€‚„…†††„„ƒƒ‚ƒ‚‚‚‚ƒ†ŠŽ‘ŽŽŒˆƒ‚ƒƒ„„ƒƒ‚‚‚‚‚‚ƒƒƒƒ„……………††‡………†††††ˆˆ‡‡‡‡‡ˆ‡ˆ‰ˆ„‚‚ƒƒƒ‚‚‚‚ƒƒƒƒ‚‚‚‚‚‚‡ˆ‚}‚‡ŒŠ…‚€€€€€€€€€€€€€€€€€€‚†‰‡ˆˆˆ‰ˆ‡‡†„‚€€‚ƒ„………„ƒƒ„„„ƒ‚€~~|{zzy~ƒƒ‚ƒ„…„‚~~||}|yy{{{|}~~}}~~~~}~~}}}ƒ€€€‚ƒ‚‚ƒƒ„ƒƒƒ€€€€€ƒ}xwyzyvy||wuvxwwwvvwwxyxxxxwwwwutuw|€‚‚€‚}|y{}}|z{|{€‚€€€€~~~~}||{{zz{|}~€€€€€€€€€€€†…„‚€}}}ysomlmpppruvwvwz}€‚„†„„„„…………†……†…‚€€€€~}}||{{z}}}}}}}}z{|}~|{z|{{}ƒƒ‚‚‚‚ƒ…‡‡‡…ƒ‚ƒ„„„„„„„„„ƒ„‚‚ƒ…‹…}vtv{~€€€€€€€€€€€€€€€€……„ƒ‚€~}}}~~~~€‚…‡ˆ‡…ƒƒƒƒ‚‚€‚‚ƒ„„……††††‡‡‡‡ƒ‚ƒ†‡……‡ƒ„„‚‚„„ƒ„„„……†††††…„„„……„„„„„„„„‚ƒ„„„ƒ€ƒƒ‚‚ƒ„‚ƒ„„„ƒƒ„„„„„„„„…ˆˆ„‚„……‚‚„ƒ„‰ˆˆˆˆˆˆˆˆ‰‰‰‰‰‰‰‰ˆ‰Š‰†„ƒ„€|{{}~„‚€ƒ„„„ƒ‚‚‚€€€‚‚‚‚‚ƒ„„„„…ˆŠ‹ŒŒŒ‰…ƒ‚‚‚‚‚€€€€‚ƒ…………„„…………††‚„‡ˆ‰‰ŒŽ‡ˆˆˆˆˆ‡‡Œˆƒ€‚ƒƒ„„ƒ‚‚€‚‚‚‚‚‚‚ƒƒƒƒ„†……„……††………†††††ˆ‡‡††‡‡ˆ‡ˆ‰‡„‚ƒƒƒƒ‚‚‚ƒƒƒƒ‚‚‚‚‚‚‡ˆ‚}ˆŒŠ…‚€€€€€€€€€€€€€€€€ƒ‡ˆ‡††‡†‡‡ˆ‡‡†……ƒ€~}}~€ƒ„……„„„………„ƒ€‚€~}||€‚}{}€€‚‚‚‚‚|y{}{yx|{zzz|}~€€~}|€}{|‚ƒƒ€€€‚ƒ‚ƒ„……„‚‚‚ƒ‚€~|zutwywty||wuvxwvvvwwxyywwwwvvvvutux|€‚‚€‚}{y{}}{{{|{€‚~€‚€€~~~}||{{zz{|}~€€€€€€€€|yvsqpqmikqwyy}{{}€|xz}€„†„„„„…………†……††ƒ€€€€|||}}~~~~~~~~~~~|||}}}|||{{}ƒƒ‚ƒƒ‚€ƒ…†††„ƒ‚‚ƒ„„„„„„„„„„ƒ‚ƒ†‹…}vtv{~€€€€€€€€€€€€€€€€€~~~~~~~€€€‚…ˆ‰‡…„…„ƒ‚€‚‚ƒ„………††††‡‡‡…„…ˆ‰‡‡ˆ‡‡†„ƒ„ƒ‚„„„……†††††………………„„„„„„„„‚‚ƒ„ƒ‚€ƒƒ‚‚ƒ„ƒƒ„…………„„……††……„†‰‰…ƒ…‡‡‚‚„ƒ„‰ˆˆˆˆˆˆˆˆ‰‰ˆ‰ˆ‰ˆ‰ˆ‰Š‰†„ƒ„€|z|}~‚„‚€ƒ„„„ƒƒ‚€‚€€€‚‚‚‚‚ƒƒ„…‡ˆ‰Š‹ŒŒŒ‰…‚‚ƒ‚‚‚€€€‚„„„„„‡ˆˆ‰ŠŠ‹‹‰ŒŽŒ†ƒƒ„…††††…„„Œˆƒ‚ƒƒ„„ƒƒ‚ƒƒ‚‚‚‚ƒƒ‚‚‚ƒƒƒƒ„‡†…„„„……………†††††‡†…………†‡‡‰‰ˆ„€‚ƒƒƒƒƒ‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‡ˆ‚}‚‡ŒŠ…‚€€€€€€€€€€€€€€€€€€ƒˆŠˆ‡ˆˆˆˆˆ‡‡‡‡‡„~||}~€ƒ„…………………„„ƒ„‚‚ƒ‚€}‚„}|zz~~~€‚€~zy}}yxzz}}{{}€‚‚~||~€‚ƒ‚€€‚ƒ†‡‡„€~€‚€€{ttwzywz|{vsuxxyxwvvwyzvvwwxwvvvutw{~€€‚‚{x}}}|yz}€€€€€€€€€€€€€€}}}€~|||yz{|}~€€€€€€€€€€€€‚|yvqichfflu|~~€€~~}}z{~€‚ƒ„„‚ƒƒƒ„„„„†„ƒ„„‚‚„ƒ‚€‚‚~}}||{{zz{||}~~~}}~~~~}}~|{|€‚ƒƒ‚‚‚‚ƒ„„‡ˆ‡ƒ‚ƒƒ‚ƒ„„ƒ‚……}‡Š…~xwy}€€€€€€€€€€€€€€€€€‚€|{}~}~~~~~~~~~…‡‡‡††††…„ƒƒ‚‚€€€‚‚‚ƒƒ…†ˆ‰„„†Š‹‰ˆ‰Š‹Š†„ƒ‚…„ƒ„…†……†………†††………………………„„ƒƒ‚‚ƒƒƒƒƒƒƒƒ€‚„†‡‡…„…ƒ‚„††„…‡ˆ‡…„…‡ƒ‚‚„‚€ƒ‡‰ˆˆˆˆˆˆ‡ˆˆˆˆ‰‰‰‰ˆŠ‹Š‡„ƒ„€~||||~€ƒ‚€„…†…„ƒ‚€€€€ƒ…†…ƒ‚„‡Š‹‰†ƒ‚€€‚ƒ‚‚‚ƒƒƒƒ„……†‰ŽŒˆ„ƒƒƒƒ„„……††‰‡„‚‚ƒ„„„ƒƒ‚€‚ƒƒƒƒ‚‚„…„‚‚„††……††‡‡†††††‡‡‡‡††………………‡ˆ‡…‚‚ƒ‚‚‚‚‚„……ƒ‚‚ƒƒ‚‚‚ƒ„‚‚‚‚‡ˆ‚‡ŒŽŒ†‚€€€€€€€€€€€€€€€€€€ƒˆŠˆ‡ˆˆˆˆˆˆˆ‡‡‡…~||}~}~€‚ƒƒ„„…†……„„„ƒ‚‚ƒ‚}€‚~}}€€€‚‚€~zy|}zy|~}|||||}}€€~}|}}€‚ƒ€€‚ƒ„†…„‚€€€€€€€{utwyxvz|zvsvxxwvuuuvwxwwvvvvvwvutw{~€€‚‚‚€{x}€}}{y{}€€€€€€€€€€€€€€}}}~}{{|yzz|}~~~~€€€€€€€€€€~vkchfflu|~~€€~~}}z{~€‚ƒ„„ƒƒƒ„„„„……„„†…ƒ‚ƒ‚€‚‚}~}}||{{}}}}}|||}}~~~~}}~|{|€‚ƒƒ‚‚‚‚‚ƒƒ„‡ˆ†ƒ‚ƒƒ‚ƒ„„ƒ‚ƒ„…}€†Š…~xvw{~€€€€€€€€€€€€€€€€‚‚€|{}~}~~~~~~~~~…‡‡‡†‡††……„ƒƒƒ‚‚€€‚‚ƒ…†‡ƒ„†Š‹‰ˆ‰‡ˆˆ…ƒ„„ƒ…„ƒ„…†………†††………†……………………„„ƒƒ‚‚ƒƒƒƒƒƒƒƒ„„„………„„„„ƒƒ„…„„‹‹‰‡„„†‰ƒ‚‚„ƒƒ‡‰‰ˆ‰ˆˆˆˆˆˆˆ‰‰‰‰‰ˆŠ‹Š‡„ƒ„~||||~ƒ‚€ƒ††…ƒƒ‚€‚€€„†‡…„ƒ…‡‹ŒŒŒ‹ˆ…‚€„„ƒƒ‚‚‚‚‚‚‚ƒƒƒ…„ƒ‚ƒ‡‘ŽŒˆ„ƒƒƒƒ„„……††‰‡„‚‚ƒ„„„ƒƒ‚€€‚ƒƒƒƒ‚‚ƒ„„‚‚‚„††……††‡‡†††††‡‡‡‡††………………‡ˆˆ…ƒ‚ƒ‚‚‚‚‚„……ƒ‚‚‚‚„‚‚‚‚‡ˆ‚…ŠŒŠ„€€€€€€€€€€€€€€€€ƒˆŠˆ‡ˆ‰‰‰ˆˆˆˆˆ‡…‚~}|~~|}}~€€‚ƒ„…†††……„ƒƒƒ‚€~€‚€€€‚„„‚zy{}{|~|z|}|{||}~~~}|}}~€‚‚‚ƒ„……‚€€‚‚€€‚‚‚€~}zvuwyxvz|{usuxxwwwwwwxxwwvuuvwwvutw{~€€~‚ƒ‚‚€zy~€~||{y{}€€€€€€€€€€€€€€}}|}~}|{z{yz{{|}~~~}}}}~€€€€€€€€€€‚„ƒ{ofigglv|~}€€~~}}z{~€‚ƒ„„ƒƒƒƒ„„……„ƒ…‡†ƒƒ‚€‚‚}}~~~}|{~~}}|||{}}~~~~}}~|{|€‚ƒƒ‚‚‚‚‚„„†‡†‚€‚‚‚ƒ…†„‚‚‚„„}„ˆ„~xvvy{€€€€€€€€€€€€€€€€‚ƒ}{}~}~~~~~~~~~…‡ˆ‡††‡‡†……„„„„ƒ‚€€€‚ƒ„ƒƒ…‰Š‰ˆˆ†ˆ‡…„„…„…„ƒ„…†……„…‡†…„…†……………………ƒƒƒ‚ƒ‚‚‚‚‚‚‚‚‚‚‚ˆ†„‚‚‚„„„„„ƒƒ„†ˆ‘Œˆ„ƒ†‰ƒ‚„ƒƒˆ‰‰‰‰‰ˆˆˆˆˆ‰ˆ‰‰‰‰ˆŠ‹Š‡„ƒ„~||||~ƒ€„…†…„ƒ‚€‚‚‚‚…‡ˆ‡††‡‰‹Š‹ŒŒ‹ˆ…‚†‡ˆ‡‡„‚‚‚‚ƒƒƒ‚ƒƒƒ„†‹ŽŒŽŽŒˆ…ƒƒƒƒ„„……††‰‡„‚‚ƒ„„„„‚‚€‚ƒƒƒƒ‚‚ƒ„„‚‚ƒ„††………††‡†…††††‡‡‡††………††…‡‰‡…‚‚ƒ‚‚‚‚‚ƒ…„ƒ€‚€‚ƒ‚‚‚‡Š„ƒ†ŠŒ‰„€€€€€€€€€€€€€€€€~‚ˆ‰ˆ‡ˆ‰‰‰‰ˆˆˆˆ‡…}}~}}}~€€ƒ……††††††……ƒ‚€‚„‚€€}ƒ……„ƒ€|y{{{|~|zz{}||}{{{||}||}~~€‚€‚ƒ„……ƒ~€‚ƒ€‚ƒ€}{{xuuwyyxz|zvsvxxvwwyxxxwwvvvuvvwvutw{~€€}~‚ƒ‚‚yz~€}{{zy{~€€€€‚€€€€€€}}{{|{{zzzyzz{{|}}|||||}~€€€€€€€€‚‚|smjhgmu}~}€€~~}}z{~€‚ƒ„„ƒ„ƒ„„…………„„‡…ƒƒ‚€‚‚|}~~~||~}}}|}}}}~~~~}}}~|{|€‚ƒƒ‚‚‚‚ƒƒ„†‡…‚€‚‚ƒ‚‚ƒ……„ƒƒƒ„…~}ƒ€{xvwy{€€€€€€€€€€€€€€€€ƒ„}|}~}~~~~~~~~~…‡‡‡†‡‡‡‡‡††……†…„‚~€‚„ˆ‰‡†‡ˆ‰ˆ…ƒ„„„…„ƒ„…†……„…††„……†……………………ƒƒ‚ƒ‚ƒ‚‚‚‚‚‚‚‚‚‚ˆ†„‚‚ƒ„„……„‚„ˆŒ““Œ‡…†‡‚‚„ƒ„ˆ‰Š‰‰‰‰ˆˆˆ‰ˆ‰‰‰‰‰ˆŠ‹Š‡„ƒ„~|}}|~ƒ‚€ƒ††…ƒƒ€‚ƒƒ‚‚ƒ†‰‰‰ˆˆ‰ŠŒŠ‹‹Œ‹Šˆ‡‰Š‹ŒŠ‡‚€‚‚‚ƒƒƒ€ƒƒ„†‰‹ŒŽŒˆ…„„ƒƒ„„……††‰‡„‚‚ƒ„„„ƒƒ‚€€‚ƒƒƒƒ‚ƒ„„‚‚ƒ…††…………††…†…†††††‡††……†††…‡ˆˆ…ƒ‚ƒ‚‚‚‚‚„……ƒ‚‚‚‚ƒ‚‚‚‡‹‡†‡ŠŽ‹†€€€€€€€€€€€€€€€€~~‚‡‰ˆ‡ˆ‰‰‰ˆ‰ˆˆˆˆ…‚}}~€‚ƒ……†††‡‡‡†…„„……ƒ‚€€€€‚„……ƒ‚}{z{{||z|}|{|}|zzzz{{|}~~€€€‚‚ƒ„„„€€€€‚‚‚€‚~{yuttuxz{{z|{utuxxtuvwwwvuuuvvwvvuvutw{~€€}~‚‚y{~|zzzx|~€‚€€€€€€€}}{zzzzzzyzzzz{{||{{{{|}~€€€€€€€€ƒ€€{upkiimv|~}€€~~}}z{~€‚ƒ„„„„„„……††‡…„…„‚ƒ‚€‚‚|}~~~~}~~}}||}}~~~~~}}|~|{|€‚ƒƒ‚‚‚‚‚„„†‡†‚€‚‚ƒƒƒƒ„ƒƒƒ„ƒ„…‚}|~}{ywxy{|€€€€€€€€€€€€€€€€„„‚}|~~}~~~~~~~~~…‡ˆ‡‡†‡‡‡†‡†††‡†…ƒ‚‚‚‚€~€€€ƒ†ˆ†…†‡ˆ‡ƒ‚‚„ƒ…„ƒ„…†……†…„„†……„……………………‚‚ƒ‚ƒ‚ƒƒ‚‚‚‚‚‚‚‚…„„ƒƒƒ„„„…†……‡Œ’””‘Œ‡„„‚‚ƒƒ„‰Š‰‰‰‰ˆ‰ˆ‰‰‰‰‰‰ŠŠˆŠ‹Š‡„ƒ„}}}}ƒ€„…†…„ƒ‚€€‚ƒƒ‚‚„‡‰ŠŠŠŠ‹‹ŒŒ‹‹‹Š‹Š‹‹‹ŒŽŽŒ‡ƒ€‚‚‚ƒƒƒƒ‚€ƒ‰‹ŽŒˆ…„…ƒƒ„„……††‰‡„‚‚ƒ„„„„‚‚€‚ƒƒƒƒ‚‚„ƒƒ‚„…††……………†……†…†††††††††††‡…‡‰‡…‚‚ƒ‚‚‚‚‚ƒ…„ƒ‚‚ƒ‚‚‚ƒ‚ƒ‡ŒŠ‰Š‹‹…€€€€€€€€€€€€€€€€}}‡‰ˆ‡‰‰‰ˆˆˆˆˆˆˆ†‚}~~€€€€€~€‚ƒ„„„„…‡‡†………††‚€‚‚‚‚ƒ„…„€~{{{{{{{~}{{{zzyyyz|}~~€€€‚ƒƒƒ‚‚€‚‚€€€€~|yxsstvxyz{z|zvsvxxuvvwwwvvuuvwwvuuvutw{~€€~~€€€z|{xyyx|‚ƒ€€€€€€€€}}{zyxyzzyzzzzz{z{zzz{|}~€€€€€€€€€€€€|tmljinv}~}€€~~}}z{~€‚ƒ„„„„„……†††ˆ†„…ƒƒƒ‚€‚‚}}}}}~~€€~}||||~~~~~}||~|{|€‚ƒƒ‚‚‚‚ƒƒ„†‡†‚€‚‚ƒƒƒƒƒ‚ƒƒ„ƒƒ…ƒ~{{yxxxy{|}€€€€€€€€€€€€€€€€……‚~|~~|~~~~~~~~~…‡‡‡†‡†‡†‡††††‡†…„„„„„‚‚€‚€€ƒ‡ˆ†…†„†„‚€‚„„…„ƒ„…†……†…ƒ„…†…„……………………‚‚‚ƒ‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚ƒ„„„„ƒ…††‡ˆ‹Ž‘‘”–”ˆ„‚€ƒƒ‚„‰‰‰‰‰ˆ‰ˆˆ‰‰‰‰‰ŠŠŠˆŠ‹Š‡„ƒ„}}}}ƒ‚€ƒ††…ƒƒ€‚‚‚‚„‡ŠŠ‹‹ŒŒŒ‹‹Œ‹ŠŠŠ‹Œ‹Ž‹ˆƒ‚‚‚ƒƒƒ‚‚ƒˆ‹ŠŒŒˆ†……ƒƒ„„……††‰‡„‚‚ƒ„„„ƒƒ‚€€‚ƒƒƒƒ‚€‚ƒ„‚ƒ„†††………………………†…†††††††††‡‡…‡ˆˆ…ƒ‚ƒ‚‚‚‚‚„……ƒ‚ƒƒƒ‚‚‚ƒƒ‚€‡ŽŠŠŠ‹‹ˆƒ€€€€€€€€€€€€€€€€|}†‰‡‡‰ˆˆˆˆˆ‡‡‡ˆ†ƒ~~€€€€~~}~€‚‚‚‚€‚„…„ƒ„†‡†€}€€‚„„ƒ~}|{{{|{z}~~}}|{zzyyyz|~€€€€‚‚ƒ‚€€€‚‚‚€€~}{yxwtuwwwwwwz|{vsuxxvwwwxwxwuuvvvvvuvutw{~€€€€~€€{|zwxyx|‚‚ƒ€€€€€€}}{zxxyzzzzzzzzzzzzzz{|}€€€€€€€€€‚‚|skmjjnw}~}€€~~}}z{~€‚ƒ„„„„……††††‡………„€ƒ‚‚‚}||||}€€~|||}}~~~~~}||~|{|€‚ƒƒ‚‚‚‚ƒ„„‡ˆ‡ƒ‚ƒƒƒ‚‚‚ƒƒƒƒ…ƒƒ…ƒ}zyxxyyz{||€€€€€€€€€€€€€€€€…†ƒ~|~~|~~~~~~~~~…‡ˆ‡††††††††††‡†…………††……„ƒ‚‚ƒƒ‚‚…ˆŠˆ‡‡†‡†‚‚„……„ƒ„…†………„„„………„……………………‚‚ƒƒ„„ƒƒƒƒƒƒƒƒ‚‚ƒ„„„ƒƒ††‡‰‘‘’”•’ˆ†…€ƒƒ‚…Š‰‰‰ˆˆˆˆˆ‰‰‰‰ŠŠŠŠˆŠ‹Š‡„ƒ„‚}}}}‚ƒ‚€„…†…„ƒ‚€€‚‚„‡ŠŠ‹ŒŒ‹Š‹‹‹Š‹‹ŒŒ‹ŒŒŠ‡„‚‚‚ƒƒƒ€‚ƒƒƒ„‡‰ŠŒŒ‰†…†ƒƒ„„……††‰‡„‚‚ƒ„„„„ƒ‚€‚ƒƒƒƒ‚€ƒƒƒƒ…†††……„„………………†…†††††††‡‡ˆ…‡‰‡…‚‚ƒ‚‚‚‚‚ƒ…„ƒ‚ƒ‚‚‚‚ƒ€†ŒŒŒ‹ˆƒ€€€€€€€€€€€€€€€€||€†‰ˆ‡‰ˆˆˆˆ‡‡‡‡ˆ†ƒ€~~€€€~~}}}~€€€€|‚ƒ‚‚ƒ…‰…~z{{z{|}€ƒ…„‚€zzz{||{z}{{~}||zzyyz|~€€€€€‚‚ƒ‚~€‚ƒ‚‚}{yxwwwxyxvuttz|{vsuxxuuuvvvvvvvuutuvvvutw{~€€€~}€€||zwxyx}‚‚ƒ€€€€€€}}|zxxy{{zzzzzyyyyzzz{|~€€€€€€€€€‚|slmkjow}~}€€~~}}z{~€‚ƒ„„„………†††‡†……‡†‚€€ƒ‚€‚‚}|{{{}~~}||}~~~~~}||~|{|€‚ƒƒ‚‚‚‚‚ƒ„„‡ˆ‡ƒ‚ƒƒƒ€ƒ„„ƒ…ƒƒ…ƒ~yxyyzz{{zz€€€€€€€€€€€€€€€€††ƒ~|~~|~~~~~~~~~…‡‡‡††………†††††††………†‡‡‡‡…„„„„„„„†Š‹‰ˆ‰Š‹ˆ…‚ƒ„……„ƒ„…†……„………„ƒ„………………………‚‚ƒƒ„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ††‡‹“’‘””“‹ˆ‡‡€€ƒƒ‚…Š‰ˆˆˆˆˆˆ‡‰‰‰‰ŠŠŠŠˆŠ‹Š‡„ƒ„‚}~~}‚ƒ‚€ƒ…†…ƒƒ‚€€ƒ‡‰Š‹ŽŒŠ‰ŠŠ‹ŒŒŒ‹‹‹‹‹‹‰†„‚‚‚‚ƒƒƒ„ƒ‚€€ƒˆŒŠ‹Œ‰†††ƒƒ„„……††‰‡„‚‚ƒ„„„ƒƒ‚€€‚ƒƒƒƒ‚ƒƒƒƒ…‡††……„„…………………††††††††‡‡ˆ…‡ˆˆ…ƒ‚ƒ‚‚‚‚‚„……ƒ‚‚€‚€ƒ‚‡ŽŽŠ…€€€€€€€€€€€€€€€€{}€„‡ˆ‰ˆ†ˆ‰‰‡…†‡ˆˆ„~{||z}}~~€€€€€€€‚„„€€‚‚ƒƒƒ‚~~~zy||||}}~~~{x{~~~~€€€‚ƒ„ƒ€‚€‚ƒƒ‚‚€|zxwvvwxxxxvutx{{ustvuxussuwxxwwvsstututtvz~~~€~{zwxxwy~€€€€‚|}zxxz{{zyyyxyzz{yyyz{}~‚‚€€}‚ƒ€xpkokhlt{~}|}€€}|yz}€ƒ…††„„„……†††††…„„ƒ‚‚€ƒ„‚}||~~{~~~~~~~~~}||~ƒƒ‚‚ƒƒƒ‚‚„†…††‚~}‚ƒ„„„„„…†…ƒƒ…‚{xxvyytty}~€€€€€€€€€€€€€€€€‚„ƒ|}~}~~~|„†††‡ˆ………………………†‡‡ˆ‡†…†††……„„„‚ƒ†ˆ‰‰‰ˆ‰ˆ‡…„„ƒƒ„„„……„„„……………………‡‡‡†…„ƒ‚…„ƒƒ‚ƒƒƒ‚ƒ„„………„‚‚‚ƒƒ…†‰Œ‘’“”Ž‘‘Œ‡„„…‚ƒ†Š‰‰ˆˆˆˆˆˆˆ‰ŠŠŠŠŠŠ‹‹‰†„„…€}|~~~€ƒƒ‚ƒ…‡†……ƒ€‚‚€‚‚‚‚‚‚‚‚€€ƒ‡‰ˆ‰Š‹Œ‹‹ŠŠ‹‹‹‹ŒŒŒŒŒŒ‰…‚‚……„‚ƒ‚€‚†ŠŒŒŽŽŒ‰…„„†††…„„ƒƒŠˆ†„‚‚ƒƒƒ‚‚‚‚‚ƒ„ƒƒ„‚ƒ…††……„„……ƒƒ„……†‡‡††††††††ˆ‰‹‰†ƒƒƒ‚ƒƒ‚‚„†…ƒ‚‚‚‚‚‚‚‚‚‚‚ƒ„‡‰ŠŽŽ‹†‚€€€€€€€€€€€€€€€€{}€„‡ˆ‰‰†‡ˆˆ‡‡†‡ˆˆ…}~~|€€€}}~€‚€€€€€€€€ƒƒ€€€~€‚‚‚‚~~{z|||||}}}~~yvy|}~€€€€ƒ„„ƒ‚‚‚‚€‚ƒ‚‚~}{yxwvvwxxxwwutx{zustvuxxvvuuvwutttssttuttvz~€€}}{zwxxwy~€€€€‚~|}{yyz{{zyyyyyzz{yyyz{}~|||||}}€|vokliglu}€€}~€€~}y{}ƒ……†„„„……††††‡‡‡…ƒ€~€€ƒ„‚€~}|~~~~~~~~~~~~~~~~~}||~ƒƒ‚‚ƒƒƒ‚‚„††‡†ƒ~€ƒƒƒƒƒ‚ƒ„……ƒƒ…‚|xxuyyvv{€€€€€€€€€€€€€€€€€‚„ƒ|}~}~~~|€„†††‡ˆ†††††††††††‡†††††††………„„ƒ…‡‰‰‰ˆˆ……„ƒƒƒ„„„„„„„„„„……………………‡‡‡†…„ƒ‚…„ƒ‚‚‚‚‚‚ƒ„„……„„‚‚‚ƒ„†ˆŠ‘’’“Ž‘‘Œ‡„„…ƒ„ˆ‰‰ˆˆ‡ˆˆˆˆˆ‰ŠŠŠŠŠŠ‹‹‰†„„…€~|~~~ƒƒ‚ƒ…‡†……ƒ€€‚€ƒ†‰‡ˆ‰‹‹‹‹ŠŠ‹‹‹‹ŒŒŒŒŒŒŒŒ‰…‚„„‚‡†……†ŠŽŽŒ‡ƒ‚„„„„„„„„‰ˆ†„‚‚‚ƒƒ‚‚‚‚‚‚‚ƒƒƒƒ„ƒƒ…††……„………ƒ„„……†‡‡††††††††‡‰Š‰†ƒ‚ƒ‚ƒƒ‚‚„……ƒ‚‚‚ƒ„†ˆŠ†‡ˆ‰‡„€€€€€€€€€€€€€€€€{}„‡ˆ‰‰‡‡‡ˆˆˆ‡‡‡ˆ…~€€~€€€~~€€€€€€€~‚€~€€‚ƒ„…†††~~{{|{||||}}}{wvx{|~€€€€‚‚ƒƒ‚‚ƒƒ‚€‚‚‚€€|{zxwwvvwwxxxwvuwzzusuvuwxywvtvwwuvwwttuuttvz~€€€~}|{zwxxwy~‚€€€€€~{~|{z{{{zzyyyyyz{zyzz{|~~€~€€}}}|ytpmjhhmv}~~€~}z{~€ƒ„……„„…………††‡‡†…„‚€€€ƒƒ‚€€~}|}|~}}~}~}~~~~~}}}||}ƒƒ‚‚ƒƒ‚‚‚„†‡ˆ‡„€~„ƒƒ„ƒƒ‚„……‚ƒ…ƒ|yyy||yy|€€€€€€€€€€€€€€€€€‚„ƒ|}~}~~~~~~}€„‡‡†‡‡†††††††††††…†…†††††………………†ˆ‰Š‰ˆ‡ˆ‡†„„ƒƒƒ„ƒƒƒƒƒƒ„……………………††‡‡†…ƒ‚…„ƒ‚€‚ƒ„„…„„ƒƒ‚‚‚ƒ„…‰ŠŒŽ‘’’‘ŽŒˆ„€ƒ„ƒ…‰‰‰ˆ‡‡‡ˆˆ‡ˆ‰‰Š‰‰‰Š‹‹‰†„„…~}~~}‚ƒ‚ƒ…‡†……ƒ€‚‚‚€‚‚‚‚…‡†‡ˆŠŠ‹ŠŠŠŠ‹‹‹‹ŒŒ‹‹‹Œ‹‰…ƒ‚‚„††…ƒ†…„„…ˆ‹ŒŽŽ‹†‚€€‚‚ƒƒ……††ˆ‡…ƒ‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚ƒƒƒƒ„‚ƒ…††…„…„……„„……††‡‡††††††††‡‰Šˆ†ƒ‚‚‚ƒƒ‚‚„†…ƒ‚‚‚‚„†ˆ‰‚„……„‚€€€€€€€€€€€€€€€€{~…‡‰‰‰‡‡†‡‰Šˆ†…†ƒ}€€€€€€~~‚€‚‚ƒ„…………€~}|}||||||||zxwxz}‚‚‚‚‚€‚€‚€~zyxwvvwwvwwxxwvuvzzusuwvuwxxwwwxxxxyxvttuttvz~€€€€~}|{zwxxwy~€€€€€}{~}|{{{zzzzyyyzz{zzzz{|}~}zwutsqppomkjilkkpv}€€}}}~}}}}z|~‚„„…„„„……††††…„„ƒƒƒƒ€€‚ƒƒ|{}}{|~~~~~~}~€~}|}|{~ƒƒ‚‚ƒƒƒ‚‚„†ˆ‰ˆ…€€‚…„…„„ƒ„„……ƒƒ†ƒ}zz€€}{{}~€€€€€€€€€€€€€€€€‚„ƒ|}~}~~~~~~~…‡‡†‡‡††††††††‡†……„…†‡††††…†……„†‡ˆˆˆ†…‰ˆ†…ƒ‚ƒƒƒ‚‚ƒƒƒ„„„„„„„„…†‡‡‡…ƒ‚…„ƒ‚€€€€‚ƒƒ„„„ƒƒƒƒ‚‚ƒ„…†‹Œ‘‘‘ŽŠ…€ƒ„„†Š‰‰ˆ‡‡‡‡ˆ‡ˆˆ‰‰‰‰‰Š‹‹‰†„„…~}~~}‚ƒ‚ƒ…‡†……ƒ€€‚‚ƒ‚‚€‚„‡‡‡ˆ‰‰ŠŠŠŠŠŠ‹‹‹‹‹‹‹‹Œ‹‰…‚„„…‡Š‹‹‰‹Š‰‰Š‹ŽŒŽŒˆ„ƒƒ„…………††‡‡†…ƒ‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚ƒ‚„„ƒƒ…††……„………„……††‡‡‡††††††††‡‰‰ˆ…ƒ‚‚ƒƒ‚‚„……ƒ‚‚‚‚ƒ…‡ˆƒ„…‡††…„€€€€€€€€€€€€€€€€|~…ˆ‰Š‰ˆ‡‡ˆ‰Šˆ†ƒ„|{}~}}}~~€€€€€~~~‚ƒ€‚ƒ‚‚ƒ„…………€€~}}||||||{{{z{~ƒ‚‚‚ƒƒ€‚€€€€~}yxwwvvwwuvwxxwwvvyyusuwwwvwwyxwvuvwvvvusuttvz~~~€~~{zwxxwy~‚€€€€€€€€|z||}|{{zz{zzyzzz{{zzz{|}~}yuqnlkmmmmnnnoqqrtx{}}{{{z{{{{{|€ƒƒ„„„„…………††ƒ„…††„ƒ‚ƒƒ}{|~||}~~}}}}}~}|}||}ƒƒ‚‚ƒƒ‚‚‚„†ˆ‰ˆ…€‚…„……„„ƒ„…„‚ƒ…„~{{€€|{|~€€€€€€€€€€€€€€€€‚„ƒ|}~}}}~}~~‚†ˆ‡††‡††††††††‡†…„……†‡††††††††ƒ„†‡‡†…„„„ƒƒƒ‚‚ƒƒ„„„„„„„„„…‡ˆˆ†„‚…„ƒ€€€ƒƒ„„„ƒƒ‚ƒƒƒƒ„…‡ˆŒŽ‘‘‘‘‘ŽŒˆƒƒ„ƒ†Š‰‰ˆ‡‡‡ˆˆˆˆ‰‰‰‰‰ˆŠ‹‹‰†„„…‚}~~}~ƒ‚ƒ…‡†……ƒ€‚‚‚€‚‚ƒ„‡ˆˆˆˆ‰‰ŠŠŠŠŠŠ‹‹‹‹‹‹‹ŒŒ‰…‚‚‚„‡‹Œ‹‹ŠŠŠ‹ŒŒ‰‹ŒŠˆˆˆ‰ˆˆ‡‡‡‡††…„‚‚‚ƒƒƒ‚‚ƒ‚‚‚‚‚ƒƒ„‚ƒ…††…„…„……„„……††††……………………†ˆ‰ˆ…‚‚ƒƒ‚‚„†…ƒ‚‚‚‚ƒ„…†‡‡ˆ‡‡††…€€€€€€€€€€€€€€€€|~…ˆŠŠŠ‰ˆˆ‰‰‰‡†ƒ„|z||{{{|}~€€€€€€€€€€~~~~€ƒƒ‚‚ƒ„…†††‚ƒƒ€‚€~~~}}}}||~}{z}‚‚‚‚ƒƒ€€€‚€€€~||zyxwvvwwuvwxxxwwuyyusvxwzyvvwwussuvuvxxvuttvz~}~€€{zwxxwy~€€€€€€€€€€|yy{||{{{{|{zzyzz{{{zzz||}}zxvuuwwxyz{{{wwwyy{{{|{zzyz{|{}‚ƒƒƒ„„„……†††††††„ƒ‚‚ƒƒ€~}}~€€}~~~}}}}||}~~~~||}|{~ƒƒ‚‚ƒƒƒ‚‚„†ˆ‰ˆ…€‚…„„„ƒ‚‚ƒ„„‚ƒ†„||||}~|}€€€€€€€€€€€€€€€€€‚„ƒ|}~}|}}~~~€ƒ‡ˆ‡††‡‡‡‡‡‡‡‡‡††…†…††††††††‡†‡‚„…‡‡‡……‡†…„‚‚ƒ‚€€‚ƒ„„„„„„„„‚„‡ˆˆ†„‚„ƒ‚‚‚‚‚ƒƒƒ„ƒƒ‚‚ƒƒƒ„…‡ˆ‰‹ŒŽ‘‘‘‘’’‘‹†ƒƒƒ‚„‰ŠŠ‰ˆˆˆˆ‰ˆ‰‰‰‰‰‰ˆŠ‹‹‰†„„…‚}~~}~ƒ‚ƒ…‡†……ƒ€€‚‚‚‚‚…‡‹Š‰‰ˆˆ‰‰‰ŠŠŠŠ‹‹‹‹‹ŒŒŠ‡„€„ˆ‹‹Šˆˆ‡‡‡‡†††‰‹‹Šˆ‰ŠŠŠ‰ˆ‡‡††…„ƒ‚‚‚ƒƒ‚‚‚‚ƒƒ‚‚‚‚‚ƒ„„ƒƒ…††……„………„„„……†…†……………………†ˆˆˆ„‚‚ƒƒ‚‚„……ƒ‚‚‚ƒ……ˆˆ‡‡……„„€€€€€€€€€€€€€€€€|~‚…ˆŠŠŠ‰ŠŠŠˆ‡††††ƒ}{||z{{||}}}}€€~~~}€€~~~‚‚~~~€‚‚‚‚ƒ……‚‚„„~~}}}{~zx{~‚‚‚‚€€€‚€}|{{zywwvvvtuwwxxxwuxyusvxxzywuutuuvvvvvwxxuttvz~€€€~}{zwxxwy~‚€€€€€€€€€€€~{yvy{|{z{||{{zzzzz{{{z{{|}~|{{{{{yz|}}|{zz{|||{{{~}|{{{}~|}‚ƒƒƒ„„…………††‡†…„ƒ‚‚‚ƒ‚‚ƒ‚€~}~€€~|||}}}~~~~~}}}}}}||}ƒƒ‚‚ƒƒ‚‚‚„†ˆˆˆ„‚„„„„ƒ‚‚ƒ„„‚ƒ†…}}yy{~}z|€€€€€€€€€€€€€€€€€‚„ƒ|}~}||}}~~„‡ˆˆ††‡‡‡‡‡‡‡‡‡††††‡†††††††‡‡‡‡ƒ„†ˆ‰‰ˆ‡‹‰‡…„ƒ‚ƒ‚€€‚ƒ„„„„„„„„„‡‰‰‡„‚ƒ‚‚‚‚ƒ„„ƒƒƒƒƒ‚‚ƒƒ„„†‡‰ŠŠ‹‘’’‘’“”“’’’‰…ƒ‚€ƒ‡‹ŠŠ‰‰‰‰‰‰‰ŠŠŠ‰‰‰Š‹‹‰†„„…ƒ~~~|~€ƒ‚ƒ…‡†……ƒ€‚‚€ƒ†‰Ž‹‰ˆˆˆˆ‰‰ŠŠŠŠ‹‹ŒŒŒŽŒˆ…€€ƒ†ˆ‡†ŠŠŠ‰‰ˆ‡†…‡‰‰‡††‡‡†‡†††††„„‚‚‚ƒƒƒ‚‚ƒƒ‚‚‚ƒƒ„‚ƒ…††…„…„……„„„„…………………………………‡ˆ‡„€‚ƒƒ‚‚„†…ƒƒ‚‚ƒ„…ˆ‡‡……„„„€€€€€€€€€€€€€€€€|‚†ˆŠŠŠ‰‹ŒŠˆ†††ˆˆ„~{||z||||{{{{}}}~~~~~€€~~~~||}{‚‚ƒƒƒƒ††ƒƒ……‚€€~~~}{~yvy||€€‚‚‚}~€€~~€~}{z|{yxwvvvtuvxxxxwtxyusvxxwwwussvxxvuutssuuttvz~€€€€~}|{zwxxwy~€€€€€€€€€~{xux{|{z{}||{zzzzz|{{zz{|}~~}|{||}}~€}zx{|~~}||}€}||}€|}‚ƒƒƒ„„„……†††ƒƒ…………ƒ‚ƒ‚‚ƒ‚€~}}}€€~{||}~€€~~}}|}}}}||~ƒƒ‚‚ƒƒƒ‚‚„†‡ˆ‡„€„………„ƒƒ„…„‚ƒ†…€}~zy{~|xx|€€€€€€€€€€€€€€€€‚„ƒ|}~}|||}~~„‡‰ˆ††‡‡‡‡‡‡‡‡‡…†‡ˆ‡‡†……†††‡‡‡‡„…‡Š‹‹ŠŠ‹Šˆ‡†………ƒ‚€€‚ƒƒƒƒƒƒƒƒƒƒ‡‰‰‡„‚‚‚‚‚ƒ„…†ƒƒƒƒƒ‚„„„„†ˆ‰‹‰ŠŒŽ‘’’‘“”•”“““‘‹†„‚‚†‹‹Š‰‰‰‰Š‰‰ŠŠŠŠ‰‰Š‹‹‰†„„…ƒ€~~~|}€ƒ‚ƒ…‡†……ƒ€€‚‚‚‚‚‚‚‚‚‚€€ƒ‡‰ŽŒŠˆˆˆˆ‰‰‰ŠŠŠŠŠŒŒŽŽŒ‰†„ƒƒ…‡‡…„††††…„‚„†ˆ‡„‚‚‚ƒƒƒ„……††„ƒ‚‚‚ƒƒƒ‚‚„ƒ‚‚‚ƒƒ„ƒƒ…††……„………ƒ„„„„„„„………………………‡ˆ‡„€‚ƒƒ‚‚„……ƒ‚‚‚‚‚‚‚‚ƒ‚‚ƒ„ˆˆ‡††††‡€€€€€€€€€€€€€€€€||„ˆŠ‰ˆŠŠŠŠŠ‰ˆˆ‰ˆ„}z{|{{{zyyyyy||~€€€~~~||~€€‚„…„ƒ…„ƒƒ…†††‚‚€|}~|zy|€‚„„‚€€‚€€}{{|||{zyyxvuttuuvvvvvvyxvvwxxxuuuuuuuvttstuutstssvz}~}€€‚}{z~€|yzzwx|€‚€€€€€€€~}{{{{{{||{{{{{{{{zzzz{{{{|}~~~~}|~~~~~~~~}|{zz{|}}}}~~€|}‚ƒ„……„„„ƒ„…††…„…‡†ƒ‚ƒ‚€~}}}}}~~~~~~~~||}~~}||€~‚ƒƒ‚‚‚‚ƒ„„ˆˆˆ„‚„ƒƒ„„„ƒ‚‚„„„„ƒ€z{{zz|‚€€€€€€€€€€€€€€€€ƒ…„|}}}}}}}}~~}†‰‰ˆ‡ˆ‡‡‡‡‡‡‡‡††††‡‡‡†††††††††…‡ˆŠŠ‰ˆ‡‰ˆ‡…„ƒƒƒ€€€€‚‚ƒƒ„„ƒ…‡‡†…„…„‚‚„†††€‚„„ƒ‚‚ƒƒ‡‰‡†ˆ‰ˆŒŒŽ‘“””••••””“•’Œ……ŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠ‰ˆˆ‡††‚~|}}|‚ƒ‚‚ƒ†‡‡†„ƒ‚€€€ƒ„ƒƒ…ŒŠ‰ˆˆ‰‰Š‰‰‰‰ŠŠ‹‹ŒŒŒŒ‹Š‰‡†…„ƒƒ„„…………………………………………†††……………†…„‚ƒƒƒ‚‚‚‚‚‚‚„ƒ‚‚ƒ…†……„………………††††……††††††††††††…„‚‚‚‚‚€€ƒ„„‚€ƒƒ‚‚€€‚‚‚ƒ„†‡††††††††€€€€€€€€€€€€€€€€||„ˆŠ‰ˆŠŠŠŠ‰‰ˆˆ‰ˆ„}z{|zzzzzzz{{{{|}~~€~€€€€€€€€ƒƒ‚‚„…†…„…„‚ƒ„††…‚ƒƒƒ‚€€€}zyz|€‚ƒƒ€€€€€~|ywxzz{zyyxvuutuuuvvvvvxwuvxyyyvvvvvuuuvuttuvutsssvz}~}}~€~|zy~|yzywy|€‚€€€€€€€€~}||{{{{{{{{{{{{{{zzzz{{{{}}~~}}||||||||~}||}~||}}~~~}~€‚ƒ„„„„„ƒƒ„…†‡…„…‡†ƒ‚€‚‚‚€~}}}~~}|}€~}}}|}~~€~~‚ƒƒ‚‚‚‚‚ƒƒ„ˆˆˆ…€‚…ƒƒ„„„ƒ‚‚ƒƒƒƒ‚~z{zzz|€€€€€€€€€€€€€€€€ƒ…„|}}}}}}}}~~~}†‰‰ˆ‡ˆ‡ˆ‡ˆ‡ˆ‡ˆ‡‡ˆ‡†††‡††††††††…†ˆ‰Š‰ˆ‡…„ƒƒ‚‚‚‚€€‚‚‚ƒƒ€‚…†††‡ˆ„ƒ‚ƒ………€‚„„‚‚ƒ„…‰ŒŒ‹Œ‹‰ŒŒŒ‘’“””•••••”—”…€€ƒ‡ŠŠŠŠŠŠŠŠ‰‰‰‰‰‰‰‰ŠŠ‰‰‡‡††ƒ}}}|~‚ƒ‚‚ƒ†‡‡†ƒƒ‚€€€ƒƒƒƒ…‹‹‰‰ˆ‰‰Š‰‰‰‰‰ŠŠ‹‹ŒŒŒ‹‹Š‰‡†„ƒƒƒ„„„„„„„„„„……………………„„„„„„„„…„ƒ‚‚ƒƒ‚‚‚‚‚‚‚‚€‚ƒƒ‚‚ƒ……………„………………†††……††………………††††…„‚‚‚‚‚‚ƒ……ƒ‚‚‚‚‚‚‚ƒ„†‡‡‡‡‡‡‡‡‡€€€€€€€€€€€€€€€€||„ˆŠ‰ˆ‰‰Š‰‰ˆˆ‡‰ˆƒ|y{|zzzzzz{{|{zzz{}}}~€€€€€‚‚‚~|}~ƒ…ˆ‰‰ˆ…ƒ‚‚„……„„„…„„‚€‚€~|||{z~€‚‚€€€€€}zvtuwyzyyxwvvuuuuuvvvvvvutvz|{zxxwwvvuuwuuuvvvutssv{}~}|}~~|{y~{yyywy|€€€€€€€€€€~}}}||{{zz{{{{{{{{zzzz{{{{}~€€~}}}}}}}}}€~~€€}}~~~~€ƒƒƒƒ„„„ƒ„„††…„…‡†ƒ‚ƒ‚€~|}~€~~|{|€}|||||}~}}‚ƒƒ‚‚‚‚‚„„ˆˆˆ„‚„ƒƒ„„„ƒƒ‚‚‚ƒ‚‚€~}zz{zz{~€€€€€€€€€€€€€€€€€ƒ…„|}}}}}}}}}~~}†‰‰ˆ‡ˆˆˆˆˆˆˆˆˆ‡‰Š‰†…†ˆ††††††††„…‡‰‰ˆ‡†‡†…ƒ‚€€€€€‚„…††‡ˆ…‚ƒ„„ƒ€‚„ƒƒ‚„…ˆ‘‘Ž‹ŒŒŒŽ’’“””•–––•˜”„~‚…‰‰‰‰‰‰‰‰ˆˆˆˆˆˆˆˆŠŠŠˆˆ†††„€~}}|~ƒ‚‚ƒ†‡‡†„ƒ‚€‚€€‚‚„†‰‰ˆˆ‰‰ŠŠ‰‰‰‰‰‰ŠŠ‹‹ŒŒŒ‹Š‰†…„ƒƒƒƒ„„ƒ„ƒ„ƒ„„„ƒ„ƒ„ƒ„„ƒƒƒƒ„ƒ„ƒ„ƒ‚‚‚ƒƒƒ‚‚‚‚‚‚‚„ƒ‚‚ƒ…………„„„………………†……………………………††††…„‚‚‚‚‚‚„†…ƒ‚‚‚‚‚‚‚ƒ„†‡‡‡‡‡‡‡‡‡€€€€€€€€€€€€€€€€||„ˆŠ‰ˆ‰‰‰‰‰‰ˆ‡ˆˆƒ|y{{zzzzyzz{{zyxxy|}}~~‚‚|{|}€‚…‡ŠŠ‰„ƒƒ„ƒƒ…†…†…„‚‚~||}{|~€€€€€€€~zxutuwxxyyxwwvuuuuuuuvvwussw|}{zyyxwvvuuuuvwwussssvz}~}|~€€~}y}{xyxvz}€€€€€€€€€€}~~}|{{zz{{{{{{{{zzzz{{{{}~€€~}~~€€~€‚‚‚‚‚„„ƒ„ƒ…†‡…„…‡†ƒ‚€‚‚‚€~~~€€€~}}||}€~|}~~~~}|~ƒƒ‚‚‚‚‚ƒƒ„ˆˆˆ…€…„„„„ƒƒƒƒ‚‚‚}}z{zzy{}€€€€€€€€€€€€€€€€€ƒ…„|}}}}}}}}}~~}†‰‰ˆ‡ˆˆˆˆˆˆˆˆˆˆŠ‹Š‡…†ˆ‡‡‡‡‡‡‡‡„…‡ˆˆˆ†…ˆ‡†ƒ‚€€‚€€‚€€€€€€€€‚„……„„…†…ƒ‚„ƒ‚‚ƒƒ‚ƒ…‡‘“’‘‘ŽŒŒŒŽ‘’’“”•––––•’‹„~~‚†ˆˆˆˆˆˆˆˆ‰‰‰‰‰‰‰‰ŠŠ‰‰‡‡††„~~}|~ƒ‚‚ƒ†‡‡†ƒƒ€ƒƒ†‡‡‡ˆˆŠŠ‹‰ˆˆˆˆ‰‰ŠŠ‹‹ŒŒ‹ŠŠ††„ƒƒƒ„„ƒƒƒ„ƒ„ƒƒ‚‚‚‚‚‚‚‚ƒ„„„„………ƒ‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚€‚ƒ„‚‚ƒ………„„„……………………………„„………………††††…„‚‚‚‚‚‚‚ƒ…††„‚‚‚‚‚‚‚‚ƒ„†‡‡‡‡‡‡‡‡‡€€€€€€€€€€€€€€€€||„ˆŠ‰ˆˆˆ‰‰‰ˆˆˆˆ‡‚|yz{yzzzyyyyyzyxwx{~€~~~~~}|€€€€‚„‚‚ƒ„††…„„‚€€‚‚„…………„ƒƒ‚|}€~zy|~€€€€€~}wvvwxxwvxxxwwvvuvvuuuvwwurrw~~zzzyxwvvursuvxvsqtssv{}~}|}€€~}x}~zxxxv{}~€€€€€€€€€~~~~||{zz{{{{{{{{zzzz{{{{}~~}||}}}}}|~}~}}}}}}~€‚‚‚‚„„„ƒ„„††…„…‡†ƒ‚ƒ‚€~€€}|{}~~~~€€€€~}{|}ƒƒ‚‚‚‚‚‚„„ˆˆˆ„‚„„„ƒƒƒƒƒƒ‚‚~|{{{zzz~€€€€€€€€€€€€€€€€€ƒ…„|}}}}}|||}~~}†ˆ‰ˆˆˆˆ‡ˆ‡ˆ‡ˆˆˆ‰Š‰‡†‡ˆ‡‡‡‡‡‡‡‡„…‡ˆˆˆ†……„ƒ‚€€€‚‚‚‚€~‚ƒ„„ƒ„…‡‡…ƒ‚ƒƒƒ‚‚ƒ‚‚ƒ†‰‘””‘‘Ž‘’’’“”••••‘Ž‰‚~„ˆˆˆˆˆˆˆˆˆŠŠŠŠŠŠŠŠŠŠŠˆˆ†††„€~~~|‚ƒ‚‚ƒ†‡‡†„ƒ‚€€ƒ€€ƒ………‡‡‰‰ŠŠˆˆˆˆˆˆ‰‰ŠŠ‹ŒŒ‹‹Š‡‡…„„„……„„„„„„„„„„„„„„„„……††‡‡‡ˆ„ƒ‚‚ƒƒƒƒ‚‚‚‚‚‚‚„ƒ‚‚ƒ……„„„„„……†……„„„…†„„„……………††††…„‚‚‚‚‚‚‚ƒ…††„‚€‚‚‚ƒ‚‚‚ƒ„†‡‡‡‡‡‡‡‡‡€€€€€€€€€€€€€€€€||„ˆŠ‰ˆˆ‰‰‰‰‰ˆˆˆ‡‚{xzzy{{zyyyyyzyyxyz|}~~~~~}{{~}~€‚€€€‚„…„ƒƒ€€€‚‚ƒ„„„ƒƒ…‚€{wwz|~~~€€€€~}|uvvxyywuxxxwwvvvvvutuvwxvssx‚~yyyxxwwvvrstvwvsqsssvz}~}{|}~~}{zx|~zwxwu{}~~€‚€€€€€€€~~~}}||{{||||||||zzzz{{{{|}}~~}}|{{{{{{{{z{{|||{z{{{{zzyy{|~€‚ƒƒƒ„„ƒ„„…†‡…„…‡†ƒ‚€‚‚‚€~€€€€€~}|{~€€~}~€~}}}}}{{z}€ƒƒ‚‚‚‚‚ƒƒ„ˆˆˆ…€…„„ƒƒƒƒƒƒ‚‚‚ƒ€~}|}|{z{~€€€€€€€€€€€€€€€€€ƒ…„|}}}}||||}}~}†‰‰ˆ‡ˆ‡‡‡‡‡‡‡‡ˆ‡‡‡ˆˆˆˆ‡‡‡‡‡‡‡‡„…‡‰‰ˆ‡†ˆ‡…„‚€ƒƒ‚‚ƒƒ‚‚€~~€‚ƒƒ…ˆŠ‰‡„ƒƒ„‚‚ƒƒ‚‚„‡Š”•”Ž‘‘ŽŽŽŽ‘’‘’“”””””Œ†}„‰‰‰‰‰‰‰‰‰‹‹‹‹‹‹‹‹ŠŠ‰‰‡‡††ƒ€}~~~€ƒƒ‚‚ƒ†‡‡†ƒƒ€‚€‚„„…†ˆˆ‰ˆˆˆˆ‡‡‡ˆˆ‰‰Š‹ŒŒŒ‹‹‰ˆ‡†……†‡……………………ˆˆ‡ˆ‡ˆ‡ˆ…††‡‡ˆ‰‰†…„ƒ‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚€‚ƒ„‚‚ƒ……„„„ƒ„„…†…„„ƒ„…†„„………†††††††…„‚‚‚‚‚‚„†…ƒ‚‚‚‚‚‚‚ƒ„†‡††††††††€€€€€€€€€€€€€€€€||„ˆŠ‰ˆ‰‰‰ŠŠ‰‰‰‡‡‚{xzzy{{{zzz{{zzzzzz{{||}}}}|{~|zyz{||~€„…†…ƒ€€€~‚ƒƒ„„…ƒ{xux{}}~€€€€~|{uuuvxxwuxxwwwwvvwvuttuwxwtsy€ƒ}wwwwwwwwwutttvuustssvz}~}|}~~~|zyw|~zwxwu|}~~~€‚€€€€€€€~}}}}}|}|||||||||zzzz{{{{{{|}}|{{zzzzzzzzzz|}}|{z|||{{zzzxz}‚ƒ„…„„„ƒ„…††…„…‡†ƒ‚ƒ‚€~~~~}~~~~~}||||}{zz|€ƒƒ‚‚‚‚‚ƒ„„ˆˆˆ„‚„…„ƒƒ‚ƒƒƒƒƒ„ƒƒ~~~}|{|€€€€€€€€€€€€€€€€ƒ…„|}}}}||||}}~}†‰‰ˆ‡ˆ‡‡‡†‡†‡‡‡…„…ˆŠ‰‡‡‡‡‡‡‡‡‡…†ˆ‰Š‰ˆ‡‹‰‡„ƒ‚„ƒ‚‚‚‚ƒ„ƒƒ‚€~}€‚ƒ…ˆ‹‹ˆ†„…„ƒ‚‚ƒƒ‚‚„ˆ‹”––’‘‘’“‘’’“““’’Œ‡€|}‚‡ŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠŠ‰ˆˆ†††‚}~…ƒ‚‚ƒ†‡‡†„ƒ‚€€€€‚‚ƒ„…†‡ˆˆ‡‡‡‡‡‡‡ˆˆˆ‰Š‹‹ŒŒŒ‹‹Šˆ‡‡‡ˆˆ††††††††ˆ‡ˆ‡ˆ‡ˆˆ„„…†‡ˆˆ‰‰ˆ†„ƒƒƒƒ‚‚‚‚‚‚‚„ƒ‚‚ƒ……„„ƒ„„„…†…„ƒƒ„…†„„……†††‡††††…„‚‚‚‚‚‚ƒ……ƒ‚‚‚‚‚‚‚ƒ„†‡……………………€€€€€€€€€€€€€€€€||„ˆŠ‰ˆ‰‰ŠŠŠŠ‰‰‡†‚{xyzy|{{{||}}z{{|{zyyz{|}}}}}~||}}}~€ƒ……„ƒ~€~|}‚ƒ„„‚€}{tw{|}}€€€€}|{vtssuvwvxwwwwwwvwvuttuwxxutyƒ}uvvwwwwwwxvtstvvwsssvz}~}~€€}{zw|}zwwwu|}~~}~€ƒ€€€€€€€~||}}}}}}||||||||zzzz{{{{zz{||{zz||||||||z{}~~}{z€€~~}}wy|‚„…†„„ƒƒ„…†‡…„…‡†ƒ‚‚‚€~~~~~~€€€€~}~~€€~zyz|€ƒƒ‚‚‚‚‚‚ƒ„„ˆˆˆ„€‚„…„ƒ‚‚ƒƒ„„„„„ƒ‚€~~~}||€€€€€€€€€€€€€€€€ƒ…„|}}}|||||}}}}†‰‰ˆ‡ˆ††††††††‡„„‰‹‰†ˆˆˆˆˆˆˆˆ…‡ˆŠŠ‰ˆ‡Š‰‡†„ƒƒƒ„ƒƒ‚‚ƒƒ„„ƒƒ€~}‚‚‚‚ƒ†ˆŒ‰†………ƒ‚ƒƒƒ‚‚„ˆŒ”—˜”’’‘‘’“‘’’’’’‘‘‘Ž‡€||€…ŠŠŠŠŠŠŠŠ‰‰‰‰‰‰‰‰ŠŠ‰‰‡‡††~}~€‚†ƒ‚‚ƒ†‡‡†ƒƒ‚€€€‚‚€‚„…†ˆˆ‡††‡‡‡‡‡‡ˆˆ‰Š‹ŒŒŒŒ‹Œ‹Šˆˆˆ‰‰‡‡‡‡‡‡‡‡……………………ƒƒ„…†‡‡ˆ‹Šˆ…ƒ‚ƒƒƒ‚‚‚‚‚‚‚„ƒ‚‚ƒ…„„„„ƒ„„„†…„ƒƒ„…†„………††‡‡††††…„‚‚‚‚‚€€ƒ„„‚€‚‚‚‚‚‚‚ƒ„†‡……………………€€€€€€€€€€€€€€€€~}‚‡‹ŒŒ‹‹Œ‹ŠŠŠ‹‹ˆƒ~{yzz{||yz|}|{}}zy{||}|{{}}|{}€€}{~€€„‰Šˆ†‚~}|}€~~€€……„ƒ€wwwwy|~~~}|{utsuxyywuuvvvvvvvvvvwwwwrsw{|yxxxvuvwyyywtrrsurrsw|~}|}~~~|zyz{vvxxz}€€€€‚€€€€~€€€~}{{{}}||||||||{{{zzzyyz{}~€~~~~~~~~}wy{~‚ƒ„…„…„…„…„……†…‚€€€‚ƒ‚€~~}||||~~~~}}~~€~}~€~~|{|‚ƒ‚ƒƒƒ‚ƒƒ„…†††…ƒ‚ƒ„ƒƒƒƒƒƒ‚‚ƒƒ‚‚€€€~~}}}}€€€€€€€€€€€€€€€€ƒ†…}}|{||||||}}|~ƒ‰‰‡†‰‡‡‡†‡‡ˆˆˆƒ„ˆˆ‡ˆˆ‰Š‰ˆ‡ˆ‰†…†ˆŠ‰†„†††…ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒ„„……‰ŠŠ‰†„ƒƒ‚ƒƒ„‡Š”–—”Ž“Ž‘“’‘‘‘‘’“ˆƒ}{€ˆŠ‰‰Š‹Šˆ‡„†‰Š‰‰Š‹ŠŠŠŠˆ†…ƒ‡„ƒ„ƒ‚ƒ…ƒ‚‚ƒ†‡‡†ƒƒ‚‚‚€‚‚‚‚‚‚††††‡‡‡ˆ†††‡‡‡‡‡ˆ‰ŠŒŽŽŽŠ‡†‡‡†…‚‚‚‚ƒƒƒƒ€€‚„„ƒ‚‚‚ƒ„ˆ‡…„‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚ƒ„„‚‚‚ƒƒƒ„‡†„„……ƒ‚„„„……††††………………†…‡ˆ‡…‚‚ƒ‚‚‚€€‚‚ƒƒ‚‚ƒ‚‚‚‚ƒ„ƒ‚‚ƒ„†‡‡‡†…„ƒ€€€€€€€€€€€€€€€€||}†‰ŠŠŠ‹‹‹Š‰ŠŠ‹ˆƒ~zzzz|}|zy{{z{~}zxz{{|zyz|}|z}€}€€ƒ„ƒƒ…†††‚€}~€~~€€€~~}}yxwxy{}€€€~|zyutstwxwvvvvvvvvvvvvvwwwwrsw{|yuwyyxwwwzxwvwxyywsqu{}zxyz{{zxwy{vvxxz}€€€€‚€€€€~€€€€~}|{|}}||||||||{{zzzzyyz{}~€~~~~~~~~~~~~~~~~~~yz}€‚„……„…„…„…„…„………‚€€‚€‚‚‚€~{|}~~~~~~~~~}}~~}~~~~}~|{|‚ƒ‚ƒƒ‚‚ƒƒ„…†††…ƒ‚ƒ„„„„„ƒƒ‚‚„ƒƒƒ‚€~~~~€€€€€€€€€€€€€€€€ƒ†…}}|{||||||}}|~ƒ‰‰‡†‰‡‡†‡†‡ˆˆˆƒ…ˆˆ‡ˆ‡‰‰‰‡†‡ˆ……†ˆŠ‰‡„…†…„‚€ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚ƒƒ„„„……‰ŠŠ‰†„ƒ„€‚ƒƒ…ˆŠ“•—•‘‘“’“’‘‘‘‘’Ž‰„}{‰ŠŠ‰ŠŠŠˆ‡…‡ŠŠ‰ˆˆŠ‰ŠŠŠˆ‡…„‡„‚ƒƒ‚…ƒ‚‚ƒ†‡‡†ƒƒ‚‚€‚‚‚‚‚……†††‡‡‡†††‡‡‡ˆˆˆ‰‹ŒŽŽŽŠ‡†‡ˆ†…‚€‚ƒ„€€€‚ƒˆ‡…„‚‚‚‚ƒƒ‚‚‚‚‚‚ƒƒ„‚‚‚ƒƒƒ„„„……„„……„„„……††††………………†…‡ˆˆ…ƒ‚ƒ‚‚‚€€‚‚ƒƒ‚‚‚‚€‚‚ƒ‚€‚ƒƒ„„…„ƒ‚‚€€€€€€€€€€€€€€€€{{|…ˆŠ‰‰Š‹Š‰‰‰Š‹ˆƒ~{yzz}}|zzzzy|~~zxyzzzyyz|}}{~€‚€€„…„„…†‡…‚‚‚‚~€€€}}}}}}}}zyxwyz|}}zywvuttuvutvvvvvuvvvvvvwwwwstw|€}zuwyyxwwwzwux|€€~wqsz}xvwyzzyxwy{vvwwz}€€€€‚€€€€~€€€€€~}|||}}||||||||{zzzzyyyz{}~€~~~~~~~~~~~~|}}~€€z{~€ƒ…††…„…„…„…„ƒ„…„ƒ‚‚ƒ‚€~}~€€~|{}~€€~}|}}~~}}}~~|{|‚ƒ‚ƒƒ‚‚‚ƒ„…†††…ƒ‚ƒ„………„ƒ‚‚‚„„„„ƒƒƒƒ†……„„„„ƒ€€€€€€€€€€€€€€€€ƒ†…}}|{|||{||}}|~ƒ‰‰‡†‰‡‡‡†‡‡ˆˆˆƒ‚„ˆˆ‡ˆ‡ˆ‰‡…„…†ƒ„…‡‰ˆ†„…††…ƒ‚‚‚‚‚‚ƒƒ„„„ƒƒƒƒƒƒƒƒƒƒ„„„………‰Š‹‰‡„„„€‚ƒƒ„…ˆ‹“––”’“”‘‘’““’‘‘‘‘’Š„€~}}ƒ‹Š‰‰‰‹Šˆ‡†ˆŠŠˆ††‡ˆˆ‰‰ˆ‡…„†ƒ‚‚‚€‚…ƒ‚‚ƒ†‡‡†ƒƒ‚‚‚€‚‚€…………††‡‡††‡‡ˆˆˆˆ‰Š‹ŒŽŽŽŠ‡†‡‡‡…ƒ‚‚€~€€‚ƒ€‚ˆ‡†ƒƒ‚‚‚ƒƒ‚‚‚‚‚‚‚‚ƒƒ‚‚ƒƒƒƒƒ„……„„…†„„„………†††………………†…‡‰‡…‚‚ƒ‚‚‚€€‚‚ƒƒ‚‚€€€‚€€€‚€‚‚‚€€€€€€€€€€€€€€€€|{|€…‰ŠŠŠŠ‹Š‰‰‰Š‹ˆƒ~zzzz|}|{zzzyx{{yx{}}zzy{~€€‚€€€€‚„……†‡‡ˆƒƒ‚~€€€€}}}~~~~yxwwxz|~€€}{yxvuuttttsvvvvuvuuvvvvwwwwstw|€€}zxxxwuvxzxutx€……‚ƒ}wvz}}{wyz|||{zy~€{uuwwz}€€€€€€€€€€€€€~~}}}}}}||||{{zzzzyyyyz{}~€~~~~}}}}}}}}{||~€y{}€‚„…†„…„…„…„…ƒ„……ƒ‚‚ƒ‚€‚‚‚€~€€~}|}~€~}}~~}|}~|{|‚ƒ‚ƒ‚‚‚‚ƒ„„†††…ƒ‚ƒ„„„ƒƒ‚‚‚‚……………„„„‰‰ˆˆ‡‡‡‡€€€€€€€€€€€€€€€€ƒ†…}}|{|||||}}}|~ƒ‰‰‡†‰‡‡†‡†‡ˆˆˆ„…ˆˆ‡ˆ†‡ˆ†ƒ‚‚„‚„†‡‡…„………„‚‚‚ƒ‚ƒƒ„……ƒƒƒƒƒƒƒƒƒ„„„„……†‰Š‹Š‡…„„ƒ„…„†‰ŒŽ‘•—–”””’‘’”““’‘‘‘‘’‹…€~„‹ŠŠ‰ŠŠŠˆ‡‡‰Š‰†„ƒ„†‡ˆˆˆ‡†…†ƒ€€„ƒ‚‚ƒ†‡‡†ƒƒ‚‚€‚€€€„„„……††††††‡ˆ‰‰‰ŠŠŒŽŽŽŠ‡†‡ˆ†………„ƒ€€~~~€€~~}~~€‚ˆ‡…„‚‚‚‚ƒƒ‚‚‚‚‚‚ƒ‚‚ƒ‚ƒƒ„……†…„ƒ„„„„„„…………†………………†…‡ˆˆ…ƒ‚ƒ‚‚‚€€‚‚ƒƒ‚‚€€~€‚‚‚‚‚€€€€€€€€€€€€€€€€}}~‚‡ŠŒ‹‹‹Œ‹ŠŠŠ‹‹ˆƒ~{yzz{{{{{zzz{}}yxyzy|{{}ƒ„ƒ…ƒ€~~~€ƒƒƒ…††……€‚~}~€€||||}}~~wvvvx{~}}~}}{zyvvvutssswvvvvuuuvvvvwwwwsux}€~zwwwvuuwyussx†…‚…ƒ|z{~€yz}~~}|x~€zuuwvz}€€€€€€€€€€€€€€~~}}|}}}||{{{zzzyyxxxz{}~€~~~~}}}}}}}}{|}}€xy|~‚„„…„…„…„…„ƒ„†…„‚ƒƒ‚ƒ‚€~|}~€€€~~~}€~}~~|{|‚ƒ‚‚‚‚‚ƒ„†††…ƒ‚ƒ„ƒ‚‚‚‚ƒƒ……………………†††……„„„€€€€€€€€€€€€€€€€ƒ†…}}|{|||{||}}|~ƒ‰‰‡†‰‡‡‡†‡‡ˆˆˆƒ‚„ˆˆ‡ˆ‡‡ˆ…ƒ€‚ƒ†ˆ‰ˆ‡‡„„„‚€‚‚ƒƒ„……„„„„„„„„„„„„……††Š‹‹Šˆ……„‚ƒ………‡ŠŒŽ”––•””’‘“””“’‘‘‘‘’ˆ‚€€€„‰Š‰‰‰‹Šˆ‡‡‰Šˆ…ƒ‚‚……‡‡ˆ‡‡††‚~€„ƒ‚‚ƒ†‡‡†ƒ‚ƒ‚‚€‚€€€ƒƒ„„…„……††‡‡‰‰ŠŠŠ‹ŒŽŽŽŠ‡†‡‡‡…ˆˆ‡†„ƒ‚‚~}|||}~~~~~€‚ƒˆ‡†ƒƒ‚‚‚ƒ‚ƒ‚‚‚‚‚‚‚‚‚ƒƒƒƒ†…„„……„ƒ„„„„„………†………………†…‡‰‡…‚‚ƒ‚‚‚€€‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚€€€‚ƒƒƒƒ€€€€€€€€€€€€€€€€~~ƒ‡‹ŒŒ‹ŒŒ‹ŠŠŠ‹‹ˆƒ~zzzz{zz|{zyz|~}zwxyx|{|ƒ†‡‡ˆƒ~}~€€ƒ††…„€€~}~~~~~~~~{{z{zzzzvuuvx{~|}}}||zzuvwvtsttwwvvuuuuvvvvwwwwtux}~{suwwvuuuutuz„ƒ„…„€|{}z{}~~~|{x~zttvvz}€€€€€€€€€€€€€~€~}|~~}}{{zzzzyyxxxxz{}~€~~~~~~~~~~~~|}}~~€€wy{~€‚ƒƒ„…„…„…„…„…††„ƒ‚‚‚€‚‚‚€~|}~€€€~~~~€~~~~|{|‚ƒ‚‚‚‚ƒ„†††…ƒ‚ƒ„‚‚‚ƒ„„„„………††‡‡††…………€€€€€€€€€€€€€€€€ƒ†…}}|{||||||}}|~ƒ‰‰‡†‰‡‡†‡†‡ˆˆˆ„…ˆˆ‡ˆ‡ˆˆ†‚€€‚…ˆŠ‹ŠŠŠ‡‡…„‚‚ƒ„‚‚‚ƒƒ„„„„„„„„„„„„…………††‡Š‹‹‹ˆ†……ƒ„…††ˆŠŽ’”••””’‘‘’“““’‘‘‘‘’“‹ƒ€€ƒ‡ŠŠ‰ŠŠŠˆ‡…‡‰‡…‚‚‚ƒ„…‡‡ˆ‡‡…~~}}ƒƒ‚‚ƒ†‡‡†ƒƒ‚‚€‚€‚‚ƒƒƒƒ„„„…†‡ˆ‰Š‹‹‹ŒŽŽŽŽŠ‡†‡ˆ†…ˆˆˆ‡†……„€~}|}}~‚„…ˆ‡…„‚‚‚‚ƒƒ‚‚‚‚€‚‚‚‚ƒƒ„„ƒƒ„…††…„„„„„„„„†………………†…‡ˆˆ…ƒ‚ƒ‚‚‚€€‚‚ƒƒ‚‚‚‚‚ƒ‚ƒ‚‚‚€€€‚‚‚„„„ƒ‚€€€€€€€€€€€€€€€€}|~†Š‹‹Š‹‹‹Š‰ŠŠ‹ˆƒ~{yzz|{{||yxyvyzxx{}}zz{~ƒ‡ˆˆˆƒ~~}~€~}€„†……‡ƒ€€~‚}}}}}}}}}||{zyyxwvvvxz|~}}~}|zyxuvwvutuvwwwvvuutvvvvwwwwtvy}~{rtvvutttvxz~‚‚‚‚‚€}|||||~}|zw}yttvvz}€€€€€€€€€€~€€€}|~~}||{zzyyyyxxxxz{}~€~~~~~~~~~~~~~~~~~~~xy|‚ƒ„„…„…„…„…„…†‡†„‚‚‚‚ƒ‚€~~~}|~~~~~~~~~|{|‚ƒ‚‚‚ƒƒ†††…ƒ‚ƒ„„ƒ‚‚‚ƒ„…ƒƒƒ„……††‰‰‰ˆˆ‡‡‡€€€€€€€€€€€€€€€€ƒ†…}}|{||||||}}|~ƒ‰‰‡†‰‡‡‡†‡‡ˆˆˆƒ‚„ˆˆ‡ˆ‰‰‰†‚€‚…ˆŠ‹ŠŠ‹‰‰ˆ…„ƒ„…ƒƒƒƒƒƒƒƒ………………………………††‡‡Š‹ŒŠˆ†……ƒ…††‡ˆ‹Ž‘’“””‘‘‘‘‘“”’‘‘‘‘’“‰€‚…Š‰‰‰‹Šˆ‡„†‡‡„‚‚ƒ‚ƒ…†ˆˆˆ‡…€}|||ƒƒ‚‚ƒ†‡‡†ƒƒ‚‚‚€‚‚‚‚‚‚‚‚‚ƒƒ„„…†‡ˆ‰Š‹ŒŒŒŽŽŽŠ‡†‡‡‡…†††††††…„ƒ‚€€€ƒ„†‡ˆ‡…ƒƒ‚‚‚ƒƒ‚‚‚ƒ‚€€‚‚‚ƒƒƒƒƒƒƒ„…†††„„„„„„„„†………………†…‡‰‡…‚‚ƒ‚‚‚€€‚‚ƒƒ‚‚‚‚ƒ‚ƒ‚€‚ƒ‚‚ƒƒƒ‚€€€€€€€€€€€€€€€€€{{|€…ˆŠ‰ŠŠ‹Š‰‰‰Š‹ˆƒ~{yzz}{{}{xww{}}ywxyxyxz}‚†‡‡‡‚~}~€ƒ††„„‡…‚€„}}}}}}||€€~|{zzxwvvwy{|}{ywvtvxwutvxwwwvuuttvvvvwwwwtvy~‚{tttsrrtvx{‚€~~}zx}~€~|{w}~yttuvz}€€€€€€€€€€€€~€€}|~~}}{{zzyyyyxxxxz{}~€~~~~~~~~}y{}€ƒ…††„…„…„…„…†‡‡†„‚‚‚‚€~€}}}}~}~€€~~}|}~~|~|{|‚ƒ‚‚‚ƒƒ†††…ƒ‚ƒ„†…„ƒƒƒ„…‚‚ƒƒ„……†ˆ‡‡‡††††€€€€€€€€€€€€€€€€ƒ†…}}|{||||||}}|~ƒ‰‰‡†‰‡‡††‡‡ˆˆˆƒ…ˆˆ‡ˆ‰Š‰†‚€€ƒ‡‰‰ˆ‰Šˆ‡†ƒ‚ƒƒƒƒƒƒƒ‚……………………………††‡‡‡Š‹Œ‹ˆ†……ƒ…†‡‡ˆ‹Ž‘‘’““‘’‘’”’‘‘‘‘’‡~}~…Š‰‰ŠŠŠˆ‡‚„††„‚‚ƒ‚„†‡ˆˆˆ„€||{|ƒƒ‚‚ƒ†‡‡†ƒƒ‚‚€‚‚‚‚‚‚‚‚‚ƒƒƒƒ…†‡ˆ‰‹ŒŒŒŽŽŽŠ‡†‡‡†…„„„„…………‡†…ƒƒƒƒ„‚‚‚ƒ„†ˆ‰ˆ‡…„‚‚‚‚ƒƒ‚‚ƒ‚€€€‚‚‚ƒƒƒ„„…†…„ƒ„…„„„„„„„„†………………†…‡ˆˆ…ƒ‚ƒ‚‚‚€€‚‚ƒƒ‚‚€‚‚€„ƒ‚‚ƒ„‚‚‚‚‚€~€€€€€€€€€€€€€€€€€‚‡‹‹ŠŠ‰‰ˆˆ‡ˆ‡‚|yz{y{{{zyxxx|}}{yxy{|yxy„ˆŠ…„‚€€€‚ƒ…†‡‡ˆ‡~~‚€€|}~~}|€~}|}}}|||{{{{{{~}|{yxwvuuuuvvwxwvvuuuvvvvvvvvvvttv{‚}yuvvussux~„„~zz{{{{||{}~~|zxz{|zwuvxy~‚€€€€€€€ƒ€~€€~€€€€}{}{zyz{||{zxxwww{|}€€€€€‚‚‚€€~€{{|~ƒ…††………………………………„ƒ‚€€‚}~~~~~~~~|}~~}|~}}}~~}|}|}€ƒ„ƒ‚‚ƒ‚‚ƒ…‡‡†„‚‚ƒ‚‚‚‚‚‚‚‚€‚„…†††ˆ‡‡††……„€€€€€€€€€€€€€€€€‚ƒ‚€|z{}||||||||}„‰‰……‡‡‡‡‡ˆˆ‰‰ˆ†……‡‰‰ˆˆ‰ˆ„€ƒ„†ˆŠ‰‰ˆ†…„‚€€€‚‚ƒƒƒ„„„„„„„„„„„……„„„…††„…‡ˆˆ†„ƒ………‡‰‹‘‘‘‘‘‡}~ƒ‡‹Šˆ‰‹ŒŠ‰…„ƒ‚‚ƒ…†‡‡‡‡‡‡‡‡„‚~||~€ƒ…ƒ‚ƒ†‡†„„ƒ‚‚‚‚‚‚‚€€€€‚‚€€‚ƒ‚ƒ…„…†‡‰Š‹ŒŠ‹ŒŠ‰ˆ‡†††††…„„ƒƒ„„†…………„„„‚‚ƒ„…†ˆ‡…„‚‚‚‚„„ƒƒƒ‚‚‚‚‚‚‚‚ƒ‚ƒ………………„„„…„ƒ„………„…………††††„†ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚€€€‚~€‚„„„ƒ‚€€€€€€€€€€€€€€€€†††ˆŠ‹‹‹‹‹ŠŠ‰ˆˆˆ‰‰„}z||{z{{{zzzz{||{xxy{{yx{€…ˆ‰ˆ†ƒ~~~ƒ„…†††ƒ|~‚€€€€}}}}|{zz{{|||}|{zywvuuuuuuvvwxwvvuuuuvvvvvvvvvttv{‚}yuvvussux€‚‚€~zzzzyyyy{}~~}zxz{|zwuvxy~€€€€€‚€~€~€€€€}{~}{zyz{|{{yxxwwx{|}€€€€€‚‚‚‚€€€~€€{z{~‚………………………………………„ƒ‚ƒƒ‚ƒ„„|||}~€~~}}}}~~~~~}|}~}|}€ƒ„ƒ‚‚ƒ‚‚ƒ…‡‡†…‚‚ƒ„„„ƒƒƒ‚‚€ƒ„†††…ˆ‡‡††………€€€€€€€€€€€€€€€€€~|{{|||||||||}„‰‰†…‡‡‡‡‡‡ˆ‰‰ˆ†„…‡‰‰ˆˆ‰‡…€ƒ…‡ˆˆ‡†ˆ‡…ƒ‚‚ƒƒƒƒ„„„„„„„„„„„……„„„…††‡††…„………‡‡‡‰Š‘‘‘‘‘‘‘‘‘Žˆ‚}~‚‡‹Šˆ‰‹ŒŠ‰„„‚‚ƒ„†‡‰‰‰‰‰‰‰‰„‚||~€‚…ƒ‚ƒ†‡†„„ƒ‚€‚‚‚‚‚‚€€€‚‚‚‚€‚ƒ‚ƒ………†‡‰Š‹‹Š‹ŒŒŠ‰ˆ‡†††††…„„ƒ„„„„…„……………‚‚‚„…‡ˆ‡…„‚‚‚‚„„ƒƒ‚‚‚‚‚‚‚‚‚ƒ‚ƒ……………„„„„…„ƒ„………„……………†††„†ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚‚‚€€€~€‚ƒ„ƒ‚€€€€€€€€€€€€€€€€ˆ‰ŒŒŒ‹‹‹‹ŠŠ‰‰ˆˆˆ‹Š…~{}}|zzzzzzzzy{|zywyzyyz}‚†ˆ‡ˆ†ƒ~~~€‚„…………‚~{|~}~€‚€~~‚~~~}{yxyz{}~€{zyxwutsuuuuvvwwwvvuuuvvvvvvvvvvttv{‚~yuvvussvx€‚~}}zzzyxwww{|~}{yz{|zwuvxy~€€€€€€€€~€}{}|{zzz{{{zyxxxxx||~€€€€€€€‚‚‚€€€€zyz}„…„………………………………„ƒ‚„‚‚‚„ƒ}}~}~~~~}}~~~~}}}}~~}}€}|}€ƒ„ƒ‚‚ƒ‚ƒ…‡‡‡„ƒ‚ƒ……„„ƒƒƒ‚‚„…†††…‡‡‡††………€€€€€€€€€€€€€€€€}|}||{||||||||}„‰‰†…ˆ‡‡‡‡‡ˆ‰‰ˆ†„„‡ˆ‰ˆˆ‰ˆ„€€ƒ…†††…‡†…ƒ€€ƒƒƒƒ„„„„„„„„„„„„…„„„„…††‰‡…‚‚ƒ…†ˆˆ‰ŠŒŽ‘’‘‘‘‘‘‘‘‘‘“Š‚~~‚†‹Šˆ‰‹ŒŠ‰ƒ‚ƒƒ„†‡ˆŠŠŠŠŠŠŠŠ…ƒ}|~€‚…ƒ‚ƒ†‡†„„ƒ‚‚‚‚‚‚€‚‚‚‚‚ƒ‚ƒ………‡‡‰‰‹‹‹‹ŒŒŠ‰ˆ††…†††……ƒƒƒ„„ƒƒ„„…†††„ƒƒ†ˆˆ‡†ƒƒ‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚ƒ‚ƒ…………„„„„ƒ…„ƒ„………„„„…„…………„†ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚„ƒ‚€€€€€‚ƒ‚‚€€€€€€€€€€€€€€€€…‰ŒŽŒ‹‹ŒŠŠŠŠ‰‰‰‰Š‰„~{|}{{zyyyyxwxz{{yxxzxx{„‡ˆ†…„‚€€€‚ƒ……†††|{~}{|~~}~€~zxwxy|~‚ƒ||zyxwuuuuuuuvvwwwvvuuuvvvvvvvvvttv{‚}yuvvussux€‚€}z{||{zzxxvvz|}~}{yz{|zwuvxx~€€€€~€€€~~€€€€}{||{{{{{{{zyxxxxx|}~€€€€€€€€‚‚‚‚€€€~€€yyy}€„„„………………………………„ƒ‚€‚‚}~~~~}~}}|}~~}||}}€~}}|}€ƒ„ƒ‚‚ƒ‚‚ƒ…‡‡†…‚‚‚ƒ„„„„ƒƒƒƒƒ„…††……„†††……………€€€€€€€€€€€€€€€€€}{{|}|z|||||||||~ƒ‰‰††ˆˆ‡‡‡‡ˆ‰‰‡…ƒ„†ˆˆ‡ˆ‰‡…€€ƒ…†††…†…ƒ€ƒƒƒ„„„„…„„„„„„„„„„„„„……†‡†„ƒ‚‚‚ƒˆˆˆ‰‹Ž‘‘‘‘‘‘‘‘‘Ž”‘Šƒ~~‚†‹Šˆ‰‹ŒŠ‰‚‚ƒ„†ˆ‰ŠŠŠŠŠŠŠŠŠ†„€}|~€‚…ƒ‚ƒ†‡†„„ƒ‚€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€ƒ…†††ˆˆŠŠŠ‹ŒŒŒŒ‰‰‡†…†…††…„„ƒ„„„‚‚ƒ„…‡‡ˆ†„€‚†ˆˆ‡…„‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚€ƒ………„„„„ƒƒ…„ƒ„………„„„„…„………„‡ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚…„‚€€€€‚‚‚‚‚‚€€€€€€€€€€€€€€€€…ˆŽ‹‹ŒŠŠŠŠŠ‰Š‰ˆˆƒ|y{{z{zyyzyxwwy||{yyzyy{€…ˆˆ‡…„‚€€€€ƒ„……†…|{}~|{{}~~~}~~}{xvxz|‚„†~|{yyxvuuuuuvvwvvuuuvvvvvvvvvvttv{‚~xuvwussvx€~{xy{}}|{zyxxy{}~}|zz{|zwuvxx}€€~€€~~~~~€€}{{{{{|{{{zzyxxxyy|}~€€€€€€€‚‚‚€€€€yyz|ƒ„„………………………………„ƒ‚‚€€€‚}||}}~€~~}}}}~~}}~€€}}|}€ƒ„ƒ‚‚ƒ‚ƒ…‡‡‡„ƒ‚ƒ‚‚‚‚ƒƒƒƒ„„………„ƒ‚„„„„„„„„€€€€€€€€€€€€€€€€€}{z|}|z|||||||||~ƒ‰‰††‰ˆˆ‡‡‡ˆˆ‰‡…ƒƒ†‡ˆ‡ˆ‰ˆ„€‚…‡ˆ‡‡††…ƒ€ƒƒ„„…„……„„„„„„„„„„„ƒ„„……„„…„„‚€………†ˆ‹‘‘‘‘‘‘‘‘ŽŽ“‰‚}~‚†‹Šˆ‰‹ŒŠ‰‚ƒ„…ˆˆ‰Š‰‰‰ˆ‰ˆ‰‰‡…~}~€‚…ƒ‚ƒ†‡†„„ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ‚ƒ…††‡‡‰‰ŠŠŒŒŒŒ‹‰ˆ‡†…………†……ƒƒƒ„„‚‚ƒ„††‡ˆ†„‚…ˆˆ‡†ƒƒ‚‚‚ƒ‚‚‚€‚‚‚‚‚ƒ‚ƒ…………„„„„ƒ…„ƒ„………„„„„„…………„†ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚…„ƒ€ƒƒƒ‚‚‚‚ƒ€€€€€€€€€€€€€€€€ˆŠŒŒ‹‹‹ŠŠŠŠŠŠŠŠˆ‡‚{yz{y{zyz{}{zvz}~|{z{zz{„ˆ‰‰ˆ†ƒ~~~~€‚ƒƒƒƒ‚}z{{{{}}~€€~}~~}~~~|zwx{}€ƒ…†‚‚€}|{{vuuuuuuvwvvvuuuvvvvvvvvvttv{‚}yuvvussux€~|yxyz}}||{zyyy{}~|{z{|zwuvxw}€~€€~€~~~~~}~€€€€}{yz{|||{{zyyxxxyy}~~€€€€€€€‚‚‚‚€€€~€€zzz}„…„………………………………„ƒ‚ƒƒ‚ƒ„„}}}~~~~}}}~~~}}~}|}~€}|}€ƒ„ƒ‚‚ƒ‚‚ƒ…‡‡†…‚‚‚ƒ‚‚ƒƒƒ„„„……„‚€‚‚‚ƒƒ€€€€€€€€€€€€€€€€€~||{|{z||||||||{}ƒˆ‰‡‡Šˆˆ‡‡‡ˆˆˆ‡…‚ƒ…‡‡†ˆ‰‡…€‚ƒ…‡‰ˆˆ‡ˆ‡†„‚‚„„„„„………„„„„„„„„„„ƒƒƒ„……ƒ„……ƒ~|ƒƒƒ„†‰‹Œ‘‘‘‘‘‘’’‘‘ŽŽŽ‡}}‚‡‹Šˆ‰‹ŒŠ‰ƒ„†ˆˆ‰‰‰ˆ‰ˆ‰ˆ‰ˆˆ‰†‚~}~€‚…ƒ‚ƒ†‡†„„ƒ‚€‚€‚‚‚‚‚‚‚‚‚‚‚€ƒ…‡‡‡ˆˆ‰‰‰ŒŒ‹‹‰ˆ††…………†…„„ƒ„„„ƒƒ„……††‡†„€€‚„†ˆ‡…„‚‚‚‚‚‚‚‚€€‚‚‚‚‚‚‚€ƒ…††…………„„…„ƒ„………„„„„……………„†ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚ƒƒ‚‚ƒ„†…„ƒƒƒ„„€€€€€€€€€€€€€€€€†††ˆŠ‹Œ‹ŠŠŠŠŠŠ‹Š‰ˆƒ}z{|z|zyz|}|{wz~€}||}{{}‚‡Š‹ˆ†ƒ~~~~~€‚ƒƒƒ‚~{{|z{}~~~~~~~~~}}}~~}|yz|~ƒ…†ƒ‚€}|{vuutuuuuwvvuuuvvvvvvvvvvttv{‚~yuvvussux€}{yyzyyzzzzzzzzxz}~~}{z{|zwuvxw|€~~€~~}~~}~~}}€€}{yy{|}||{yyxxxxyz}~€€€€€‚‚‚€€~€{z{~‚…†…………………………………„ƒ‚„‚‚‚„ƒ~~~~}}}|}~~}|€€~|z{~€}|}€ƒ„ƒ‚‚ƒ‚ƒ…‡‡‡„‚‚ƒ‚‚‚ƒƒƒ„„ƒ„„ƒ‚~~€€€€€€€€€€€€€€€€€€€€}{zz{||||||||{}‚ˆ‰‡‡Šˆˆˆ‡‡‡ˆˆ†„‚ƒ…‡‡†ˆ‰ˆ„€‚ƒ…‡ˆˆ‡‡‰ˆ†…ƒƒ‚‚„„„„……††„„„„„„„„„ƒƒƒƒ„………„„‚~}‚‚ƒ„†ˆŠŒ‘‘‘‘‘’‘‘‘ŽŽŽŽŠ…|}ƒˆ‹Šˆ‰‹ŒŠ‰…†‡‰‰‰ˆ‡ŠŠŠŠŠŠŠŠ‰‡‚}~€‚…ƒ‚ƒ†‡†„„ƒ‚‚€€‚‚‚‚‚‚‚€€‚‚‚ƒ‚ƒ…‡‡ˆˆˆˆ‰‰ŒŒ‹Šˆ‡‡……„……†…„ƒƒƒ„„„„…………………„ƒ‚‚‚ƒ„ˆ‡…ƒƒ‚‚‚‚‚‚€€€‚‚‚‚‚ƒ‚ƒ…†††††…………„ƒ„………„………………††„†ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚‚‚ƒ…†‰ˆ†„„„…†€€€€€€€€€€€€€€€€€‚‡ŒŠŠŠŠŠ‹‹‹ŠŠ…~{}}|}{yz{|zxw{€~}}~|z|‡‹…„‚€€€€ƒ„„„„€}|~~|zz}|{zz{|}~}||}~~~z{|ƒ…†‚‚~}|{vuuutuuuwvvuuuuvvvvvvvvvttv{‚}yuvvussux€|yxy{zyxxxxyyyyxz|~~}{z{|zwuvxw|€~}€~~}}~}~~}}€€€€}{xy{}}}|{yyxxxyyz}~€~€€€€‚‚‚€€~€€{{{~‚…††………………………………„ƒ‚€€‚}||}}~~~}}}}~~}{|}}}}|}€ƒ„ƒ‚‚ƒ‚‚ƒ…‡‡†…‚‚ƒ„„„„„„ƒƒƒƒƒƒ~|}}~~€€€€€€€€€€€€€€€€€€€€~zyz{||||||||{}‚ˆ‰‡‡Šˆˆˆ‡‡‡ˆˆ†„‚ƒ…‡‡†ˆ‰ˆ„€‚…‡ˆˆ‡†ˆ‡†„‚‚„„„………††„„„„„„„„ƒƒƒƒƒ„„…‡…‚€~~~ƒƒƒ„†‰‹Œ‘‘‘‘‘‘‘‘ŽŽŽŠˆƒ~{}ƒˆ‹Šˆ‰‹ŒŠ‰†‡ˆŠŠ‰‡†‹‹‹Œ‹‹‹‹Š‡ƒ}~€‚…ƒ‚ƒ†‡†„„ƒ‚‚€€‚‚‚‚‚‚€€€€‚‚ƒ‚ƒ…‡‡ˆˆˆˆ‰‰Œ‹Šˆ‡†…„„„…†…„„ƒƒ„„†…………„„„„ƒƒƒƒƒƒƒˆ‡…„‚‚‚‚‚‚€€€€‚‚‚‚‚ƒ‚ƒ…‡‡††††………„ƒ„………„…………††††„†ˆ‡„‚‚‚€‚‚‚‚€‚‚ƒƒ‚‚€€€‚…‡ˆ‹‰‡…„…†‡€€€€€€€€€€€€€€€€‚„‡ŠŒŒ‹ŠŠ‹ŒŠ‰‡†„|xyyyzzzzzvux~€}}~{{}€…ˆˆ‡‡†„‚€€€€‚ƒ„…zwxzz|{{{{{|}}{||{~€}z{|}~€€xwutttvwxwvutuuvyxxwvvuuttw{€|vutsstuvzzzzzxwvttuuwwxxzyy|€€|xy~~yvwxvx{€~~}}~~}}}}~~€€€€‚ƒ}yz{}~~}{zyyxxyz{|€€‚‚‚‚‚‚‚‚‚‚‚€{z{~‚…†……„„„„…††‡…………‚‚ƒƒ‚‚‚€|}~~~~€~~€€~}~€~|{|‚ƒ‚‚‚‚€„†ˆ‡…„ƒ‚‚‚‚ƒ„„ƒƒƒ…‚ƒ„…„{www{}~€€€€€€€€€€€€€€€€‚~~}|z{{{{{{{{}}ƒ…†‡‡‡‡‡ˆˆˆˆ„„‚€ƒ‰‹ˆ‡Šˆƒ€‚ƒ…‡‡‡†…‡†…ƒ‚‚‚‚„„…………„„……………………ƒƒƒƒ„…†‡………„ƒƒ‚‚ƒƒƒ„…††‡‹’““““‘‘ŽŽŽŽŽ‡†ƒ|}‚†‰Š‹ŒŒ‹Š‰‹‰‡††ˆ‹‘’““‘‹†}|…„ƒ‚ƒ†‡†„„ƒ‚€€‚‚€€‚ƒƒ„‰ˆ‡††ˆŠ‹‹ŒŽŽ‹‡…………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€ƒ‚‚‚€€ƒ‚‚‚‚‚ƒ‚„ƒ‚ƒ„………„ƒ‚ƒƒƒ„„………„„„……††††…‡‰†„‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚„ˆŒŠŒ‹†„†ˆˆ€€€€€€€€€€€€€€€€‚‚‚„‡Š‹ŒŒŒŠŠŠŠŒŒ‹‰ˆ‡†ƒ|zzzzzzzzwvy~|{}{{|…ˆˆ‡‡†…ƒ‚€€€€‚ƒ„……€{xyyz{~||||||{{z{{{}€|{{|}~€€zywuttuuwwvvuuuuxxwwvuuuttw{€|vvtsstvvzzzzyxwvsstuvwwxzyz}|yy~~yvwxvx{€~~}~~~~}}}~~€€€€€‚‚}yz{}~~}{zzyyyyz|}€€‚‚‚‚‚‚‚‚‚‚€{z{~‚…………„„„„…††‡…„…„‚‚‚‚‚‚€}~€€~~~}}}~~€~~€~|{|‚ƒ‚‚ƒ‚€ƒ†ˆ‡…„ƒƒ‚‚‚ƒ„„ƒƒ„„‚ƒ„…„|xyy|‚€€€€€€€€€€€€€€€€€€‚€~~~}|z{{{{{{{{|}~€ƒ„…†††‡ˆˆ‰‰††ƒ€ƒ‰Š‡‡Šˆƒ€‚ƒ…‡ˆ‡†…‡†…„‚‚‚‚„„…………„„……………………„„„„„……†………„ƒƒƒ‚ƒƒ„„…††‡‹ŒŽ‘’“““ŽŽŽŽ‡†ƒ|}†‰Š‹ŒŒ‹Š‰‰ˆ‡†‡‰ŒŽ‘’“”“’ŽŒ†}|…„ƒ‚ƒ†‡†„ƒƒ‚€‚‚€€‚‚ƒ„„‰ˆ‡†‡ˆŠ‹‹ŒŽ‹‡…………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€ƒ‚‚‚€€ƒ‚‚‚‚‚ƒƒ„ƒ‚ƒ……„„ƒƒƒƒƒƒƒ„„………„„„……††††…‡‰†„‚‚‚ƒ‚€€‚ƒƒ‚‚‚‚‚„ˆ‹Œ‹†ƒ†ˆˆ€€€€€€€€€€€€€€€€ƒƒ„…‡‰‹ŒŒ‹ŠŠŠŠ‹Œ‹Šˆˆˆ‡„}}||{zyyxwy~{z{{{}€…ˆˆ‡‡†…ƒ€€€€‚ƒ„„…|yyzyz|}}}}|{zy{||{}~||}}~€€|{ywutttvvvvvuuuvvvvuuttttw{€|wvuttuvwzz{zzxwvrrstuuvvyy{}~|yy}yvwxvx{€~~~~~}}~~€€€€€‚€}z{|}}~}|{{zzyz{|}€€‚‚‚‚€{z{~‚…†……„„„„…†††„„„„‚ƒ‚‚€~€€€~~~~}}}}}}€€€~|{|‚ƒ‚‚‚‚€„†ˆ‡†„ƒ‚‚‚‚ƒƒƒƒƒ„„‚ƒƒ„„|xzz}‚ƒ€€€€€€€€€€€€€€€€€~}~}|{|||||||||}}~€€„„…†ˆ‰ŠŠˆˆ…ƒˆ‰†‡Šˆƒ€‚„†‡†……†…„‚‚„„…………„„……………………†††…………………„„„ƒƒƒƒ„„……††‡Š‹‘’““‘‘‘ŽŒ‹Š‰ˆ‡„||…ˆ‰Š‹‹Š‰ˆ‡‡††ˆ‹Ž‘’”••“‘Œ†~|€…„ƒ‚ƒ†‡†„„ƒ‚€‚€€‚ƒƒ„„ˆ‡††‡ˆ‰ŠŠ‹ŒŒŠ‡†………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€‚‚‚‚€€ƒ‚‚‚‚‚„ƒ‚‚ƒ…„ƒƒƒƒ„…ƒƒ„„„………„„…………†††…‡‰†„‚‚‚‚‚€‚ƒƒ‚‚‚‚‚„ˆ‹‹„‚„‡ˆ€€€€€€€€€€€€€€€€„„…††ˆŠŒ‹‹ŠŠŠŠ‹‹ŒŠ‰‰ŠŠˆ†„ƒ~|zxwwvy~|{}{{|…ˆˆ‡†…ƒ‚€€‚ƒƒ€|yz{z{}||}~}{zy{{z{{}}}}}}~€€~}{yvussuvvwwvutuuuuuuuuttw{€|xwvuuvwx{{{{yxvurrrssttuxy{~~~{zy~~yvxxvx{€~~€~~~~€~€€€}{|}}}}}||||{{z{|}€€€‚‚‚€{{{~‚…………„„„„…†††„ƒ„ƒ€‚‚‚‚‚€€€€~~~~~~~}}~€€~~~~|{|‚ƒ‚‚ƒ‚€ƒ†ˆ‡…„ƒƒ‚‚ƒƒƒƒƒƒ„„‚‚ƒ„ƒ}yzz}‚ƒ€€€€€€€€€€€€€€€€€€~}}}~|{|||||||||||}}}}}‚ƒ…†‰ŠŠŠ‰†‚ƒˆˆ…‡Šˆƒ€€ƒ…†…„„……ƒ‚€€€€„„…………„„……………………††††††……„„„„„„ƒƒ„„……††††‰Š‹Ž‘’“’‘Ž“’Ž‹‰‡†‰ˆ„€||€„ˆ‰Š‹‹Š‰ˆ…††ˆ‰‘’“•––”’‘†~~€„„ƒ‚ƒ†‡†„ƒƒ€€‚€€‚ƒ„„…†††‡‡ˆˆ‰‰Š‹‹Š‰‡†………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€ƒ‚‚‚€€‚‚‚‚‚ƒƒƒ‚ƒ„„ƒƒ„„…†„„„„…………„…„……†…††…‡‰†„‚‚ƒ‚‚€€‚ƒƒ‚‚‚‚‚‚‚ƒˆ‹‘‹„€ƒ†ˆ€€€€€€€€€€€€€€€€…†‡‡†‡‰‹‹‹‹‹‹‹‹‹Œ‹‰‰Š‹ŠˆŠˆ…‚~{xwwvy}}{{}€…ˆˆ‡„ƒ‚€~~~~~€€{yz||}~z{}}}|{zyyyyyy{|~~~€~}{yvtstuvwwvvuuuuuvvvvttw{€|yxwvvwxy{{|zzwvurrsrssttwy|~~}|{y}yvwxvx{€~~€€€~~~~€~~~€~|~}}}}}~~~}|{{|}}€€‚‚‚‚€{z{~‚…†……„„„„…†††„ƒ„ƒ€ƒ‚‚‚€€~}}}~~~~~~~}}}~~~}}}}}~|{|‚ƒ‚‚‚‚€„†ˆ‡†„ƒ‚‚‚ƒƒƒƒƒ„„„ƒ‚‚ƒƒ}zyy|‚€€€€€€€€€€€€€€€€€€~}||~~}|||||||||||||||||€‚ƒ…†ˆˆ‰‰…ƒ‡ˆ…‡Šˆƒ€ƒ…†‡‡†…‡…„ƒ‚„„…………„„……………………„…††‡††…ƒƒ„„„„„„………†††††ˆˆŠ‹Ž’““‘‘‘‘’‘Š‡…„‰ˆ…|{€ƒ‡ˆ‰ŠŠ‰ˆ‡……‡ˆ‹‘’“•––•“’Ž…~~€ƒ„ƒ‚ƒ†‡†„„ƒ‚€€€€€€€€‚‚„„………††‡‡ˆˆˆˆ‰‰‰‰ˆ‡‡………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€‚‚‚‚€€‚€‚‚‚„ƒ‚‚ƒƒƒ„……………„„„…………………………………†…‡‰†„‚‚ƒ‚‚€‚ƒƒ‚‚‚‚‚‚‚‚ƒ‡Š’‘Œƒ€‚†‡€€€€€€€€€€€€€€€€†ˆ‰ˆ†…ˆ‹‹‹‹‹‹‹‹‹ŒŠˆˆ‰ŠŠ‰Œ‹ˆ…‚|{zy|}{{|…ˆˆ‡„ƒ€~~}}~~€€€~zy|}}}z{||}}|||{{|{z|~}zxutuuuvvvvvvvvwwxxxttw{€|zyxwwxyz||{{ywutsssssssswz}}|{{y~~yvxxvx{€~~~€€~~~~~~|}~~}~}}|}~}}|}}~€€€‚‚€{{{~‚…………„„„„…†††„ƒ…ƒ‚‚‚‚‚€€~}}}~~}}}}}}||}}}}}}||~|{|‚ƒ‚‚ƒ‚€ƒ†ˆ‡…„ƒƒ‚‚ƒƒ‚ƒ„„„ƒƒ‚‚ƒƒ~{yy|‚€€€€€€€€€€€€€€€€€€}|{|}~}|||||||||}}}}}~~~~‚ƒ„…‡‡ƒ€‚‡ˆ…‡Šˆƒ€ƒ…‡ˆ‰‰ˆ‡ˆˆ†…ƒƒƒƒ„„…………„„……………………ƒ„…‡††……ƒƒƒ„„„……††††††††‡‡ˆŠŒ‘“““’‘‘‘‘’Ž‹‰‡…„Š‰…€{{ƒ‡ˆ‰ŠŠ‰ˆ‡…†‡ŠŒŽ‘’”––•”’Ž…}~€€‚„ƒ‚ƒ†‡†„ƒƒ€€€€€€€€€€€€‚‚ƒ„………„…†‡ˆ‡‡†ˆˆ‡ˆ‡‡‡‡………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€ƒ‚‚‚€€‚€‚ƒƒƒ‚ƒƒƒ…††……„…………………………………………†…‡‰†„‚‚‚ƒ‚‚€€‚ƒƒ‚‚‚‚‚‚‚‚ƒ‡Š‹…ƒ†‡€€€€€€€€€€€€€€€€‡‰Šˆ……‡ŠŒŒŒŒŒŒŒŒŒŠ‡†‡ˆˆ‡ŠŠˆ‡†„ƒƒ€~€ƒƒ}{|{{}€…ˆˆ‡…„ƒ€~€€€€{z|~||}{{{|||}}~}}}|{}€€€€~|ywuuuuuvvwwwwxxyyzzttw{€|{zyxxyz{|||{ywuttttstsssvz~}{{|y~~yvwxvx{€~~~~~€~~~~}||~~}€}||}€€~}}}}~€€€€€€€€€€‚‚‚€{z{~‚…†……„„„„…††‡…„…„‚‚ƒ‚‚‚€}~~~~}}~~~}||}}}}}}}|~|{|‚ƒ‚‚‚‚€„†ˆ‡…„ƒ‚‚‚ƒƒ‚ƒ„„„ƒƒ‚‚‚‚~|zz}‚ƒ€€€€€€€€€€€€€€€€€|{{|}~~}{{{{{{{{}}~~€€€~~~€€„„~‡ˆ…‡Šˆƒ€‚„†‡ˆˆ‡†ˆ‡…„ƒ‚‚‚„„…………„„……………………„„†††…„‚‚ƒƒƒ„………††††††††††‡ˆ‹Ž‘““““““’’’ŽŒŠˆ‡……‹‰…€{{‚†‡ˆ‰‰ˆ‡††‡‰ŠŒŽ‘“•–•“’…}}€€€‚„ƒ‚ƒ†‡†„„ƒ‚€€€€€€€€€€€€€€€‚‚ƒƒ„……†ƒ„†‡ˆ‡†…‡‡††††‡‡………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€‚‚‚‚€€‚€€‚‚„ƒ‚‚ƒ„„…………………………………………………………†…‡‰†„‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚ƒƒ‚ƒ†‰ŽŒ†ƒ„††€€€€€€€€€€€€€€€€‡Š‹‰…„‡ŠŒŒŒŒŒŒŒŠ†…†‡‡†‡‡ˆˆˆ‰‰‰…ƒƒ…ƒ|yy{{|…ˆˆ‡‡†„‚€€€‚‚‚‚‚‚€}{}}|{|{{{{{|}}|zz{yxz€€€€€~~~}zxvvuutuvwxxxyyz{{|ttw{€|{{yyyy{{}||{ywusttttttttvz~}{{|y~~yvwxvx{€~~~~~~~€~~~~}{|}~~€}||}€€~}}}~€€€€€€€€€€‚‚‚€{z{~‚…†……„„„„…††‡………„‚‚ƒƒ‚‚‚€|}€‚}~~~}}~~~~~}}}~|{|‚ƒ‚‚‚‚€ƒ†ˆ‡…„ƒ‚‚‚„ƒ‚ƒ„„„ƒƒ‚‚‚‚~|{{~ƒ„‚‚€€€€€€€€€€€€€€€€|{z|}~~}{{{{{{{{}~~€‚‚}}}~~~~~‚‚€}€†ˆ†‡Šˆƒ€€ƒ…†…„ƒ…„ƒ€€€€„„…………„„…………………………†‡†„‚‚‚ƒƒ„………‡‡‡††††††…†‡Š‘““””••”’‘Œ‹Šˆ‡†…‹‰…€{{~‚†‡ˆ‰‰ˆ‡††‡‰‹ŽŽŽ‘“••”“’…}}€€€„ƒ‚ƒ†‡†„ƒƒ‚€€€€€€€€€€€€€€€‚‚ƒƒ„…††ƒ„†ˆˆ‡†…‡††……†‡ˆ………………………„„„„„„ƒƒƒ„…………„„…†…„ƒ„„†‡†„€ƒ‚‚‚€€€€€ƒ„ƒ‚ƒ†…„„„„…†…………………………………………†…‡‰†„‚‚‚ƒ‚€€‚ƒƒ‚‚‚‚‚ƒƒ‚ƒ†‰ŒŽŒ‡„…††€€€€€€€€€€€€€€€€†‡‰‰ˆˆ‰ŠŒ‹‹‹‹‹Š‹‰†…„„…ƒ…†…‡Šˆ…ƒ…†…ƒ€vvx~†ŠŠ‡‡‡…ƒ€€ƒ‚‚‚‚ƒ„……€|}~}}~~|z{}{}€€~~~€~}{yxwqsuvuvxz|{xwvwyzyxy|}|zwuuw{}~}zxvvvwwvusssstwy{}}|{yy}~zxyzyy|~~~€~~}~€~~~~~~~~~~~}}|~}}}}€‚€~|||}~€€ƒ‚‚€€‚‚‚‚‚€{z{~‚…†……………ƒƒ…†††……„‚€‚€€€y{~€€~|~|z|€|||}~~}||~|{|‚ƒ‚‚ƒƒƒ‚‚„††††„‚‚ƒ„ƒƒ‚ƒƒ„„ƒƒƒƒƒ‚€‚€„…‚‚ƒ€€€€€€€€€€€€€€€€~z{|{~}{{||||{{~~€€€€}}~~}}~‚‰ŠŠ†€‚€‚…‡†ƒ€‚‚€~€…‡…„…†…†…„ƒ„„‚‚‚‚‚††††††††††‡‡‡‡††„„…†Š‘”•””••”‘ŽŒ‹‰‡††††ˆ‡„{{ƒ†‡‰ŠŠˆ†„…‡‹ŽŽŽŽ’””’Ž†~~€ƒ„‚‚ƒ…†…„ƒ‚€€€€€€€€€€€€€‚‚€€€€‚„…†‡‡‡‡‡††††„„……††‡‡‡††……„„ƒ„ƒƒ‚‚ƒ…………„„„…††……………………‡†…ƒ‚‚‚‚‚€€€€€€‚‚ƒƒƒ„„„ƒƒƒƒ‚„„„„„„„„†……„„………†ˆ‰ˆ…‚‚‚‚ƒƒ‚‚‚‚ƒ‚‚‚‚‚ƒƒ…„„ƒƒ„……€€€€€€€€€€€€€€€€‰Š‹Šˆ‡‡ˆŒ‹‹‹‹ŠŠŒ‹‰‡†…„„„††…†‡…y|~~}|~€{{}‚…ˆ‡†„„„ƒ‚‚ƒƒ……„„„…†‡„{}}||~}{|~|~‚€€€€~~€~}|{zzvvvtssvy}|{yyyyywvw{}|zwuuxz}|{ywvvvwwvuttttuxz|~~}|zx|}ywyzxy|~~~~€~~~€€€~~~~~~~~~~}}}}~}}}}~€€~}|}}~~€ƒ„‚‚ƒ‚€€€€‚‚‚‚€{z{~‚…………………ƒƒ…†ƒ„„„„„ƒ‚€€€|}~~~~}}|||}~~}|~~|{|‚ƒ‚‚ƒƒƒ‚‚„††‡†„‚‚ƒƒƒƒƒ„„ƒƒƒƒƒƒ‚€‚€„„‚€€€€€€€€€€€€€€€€~z{|{~}||{{{{||}}€€€€€~}}}}~€†††ƒ}~€€ƒ……‚€€€€€€…‡…„…†…†…„ƒƒ‚€‚‚‚‚‚……………………†††‡‡‡†††††‡ˆ‹Ž‘‘‘“••“‘Œ‹‰‡††††ˆ‡„{|ƒ‡ˆ‰‹Š‰‡††ˆ‹ŽŽŽ‘””’Ž†~~~€ƒ„‚ƒ…†…„ƒƒ€€€€€€€€€€€€‚‚€€€ƒ„…†‡‡‡†‡††††„„……††‡‡††……„„ƒƒƒƒƒƒƒ„„…„„„„„……†……………………‡†„ƒ‚‚‚‚€€‚€€‚‚‚ƒƒƒ„„„„ƒƒƒƒ„„„…„„„„†……„„……††ˆ‰ˆ…‚‚‚‚ƒƒ‚‚€‚ƒ‚‚‚‚ƒƒ‚‚ƒ„…€€€€€€€€€€€€€€€€Š‹‹Šˆˆˆ‰Œ‹‹Š‹‹Š‰ŠŠŠ‰ˆ†…„…††„…„}uwxuqpuz|„†‡‡‡ˆ„„ƒƒƒ„„„……„„……†‡ƒ~{{|{{}€~||€~€‚€€€€~€~~~}}|||{zxursux~~~}|zyxttvz€€~|zxvvwz{yxwvvvvwxwvuuuvvyz}~|{x|}ywxyxy{~~~€€~€€€~~~~}}}~}}|}~€€~}}}~~€‚ƒ„€ƒƒƒ€~€€€‚‚‚€€{z{~‚…†……………ƒƒ…†………„„‚€‚€€€€~}||}~~|}~~~||}~~|}}~|{|‚ƒ‚‚ƒƒ‚‚„…†††„‚‚ƒƒƒƒƒ„„ƒƒƒƒƒƒ€ƒ‚„„€‚€€€€€€€€€€€€€€€€~{{|{}|}|{zz{|}||~€€€€~}{|€‚€}|~€€ƒƒ‚~~~€…‡…„…†…††…ƒ~~€€‚‚ƒ„„……………………††‡‡‡††…ˆ‡‡†‡ˆŠŠŒ“”“‘‹Š‰‡††‡‡ˆ‡„{{€ƒˆˆŠ‹‹ŠŠ‰ˆŠŽŽŽ‘““‘Ž…~~~€ƒ„‚‚‚…†…„ƒ‚‚€€€€€€€€€€€€€€€€‚‚„…††††††‡†‡‡…………††††………„„ƒƒƒ‚ƒƒ„„„„„„„ƒƒ„„…………………………‡†…ƒ‚‚‚‚‚€€‚‚‚‚‚‚‚‚ƒƒ………„„„„„……………………†……„………††ˆ‰ˆ…‚‚‚‚ƒƒ‚‚€€€‚‚‚‚‚‚ƒƒ‚„…€€€€€€€€€€€€€€€€‡ˆŠŠŠŠ‹‹ŠŠ‹‹Œ‹Š‰ŠŠ‹‰ˆ†…„††„ƒƒzwywogejpz€‡ŠˆˆŠŒ‡…„ƒ‚‚‚ƒƒ‚‚‚ƒƒ„‚}y{{zz|~|{}€ƒ„ƒ€‚€€€€~€€€~}}|||||ywvxz~€€|ywssuy~€~|{xwvxyzwvvvuvvvxxwvvwwxxy|~~}{zw||yvxyxy|~~~~€€€‚€€€€€~~~~}~}}~}}||}~~~}~~~‚‚€‚ƒ‚€€€‚‚‚‚€{{{~‚…………………ƒƒ…†††……ƒ‚€€€€~}||}~€€€~}}~~}}|}~~~|{|‚ƒ‚‚ƒ‚‚ƒ…†‡†„‚‚ƒ„ƒ‚ƒ„„ƒ‚ƒƒ„„„ƒƒ…ƒƒ†…‚€‚€€€€€€€€€€€€€€€€{{|{}~|}|{{z{|}{{}~€€€€€€€~~z|€ƒƒ‚€~~~}}}€€ƒƒƒ€€€€…‡…„…†…†‡…ƒ}|}€‚ƒ„…†………………………††‡††……‡‡†‡†‡‡ˆŠ‰‰‹ŽŽ‹Šˆ‡‡‡‡ˆˆ‡„{|ƒˆ‰‰‹‹ŒŒŒŠŒ‘’‘Ž‹Œ„~}~~€„„ƒƒ…†…„ƒƒ€€€€€€€€‚ƒƒ……††††‡†‡‡‡…………††††„„„„ƒƒ‚‚‚ƒ………ƒƒƒƒƒƒƒ„„………………………‡†„ƒ‚‚‚‚€€‚‚‚‚ƒ‚‚ƒƒƒ††…………„„††††††††……„„„…†††ˆ‰ˆ…‚‚‚‚ƒƒ‚‚€€€‚‚‚‚ƒƒ€„…‡€€€€€€€€€€€€€€€€†ˆ‰ŠŠŠŒ‰‰Š‹ŒŒ‹‰‰‹‹‹‰‡†„††„„ƒy{zwmc`fmz‰‹ŠˆŠŒˆ†ƒ‚‚€~ƒ‚‚‚‚|yz{yz{{yy|…††‚‚€€€€€€€‚€~}{zyz|~}}~€€}{yutvz€~|{zxxwxxuuvvvuuuxwwvwwxywx{|}|zyx|}ywxyxy{~~}€€€‚€€€€€€€€€}}~}~~~~~~||||}~}}~~~~~€€€€€€€€€‚‚‚‚{z{~‚…†……………ƒƒ…†„„„„„ƒƒ‚‚€€€||}}~~€€~}~~€~~|{|‚ƒ‚‚‚‚ƒ…†††„‚‚ƒ„ƒ‚‚‚ƒ‚‚‚„…††††‡„…††€€€€€€€€€€€€€€€€€{||{}~{{{|{|{|{z{|~~~~€€€~}y|„…ƒ€~~~}}~€€‚‚‚‚‚„…ƒ‚‚€€€…‡…„…†……†‡ƒ||}€€‚„…††…………………………††††……„„††ˆˆˆˆ‰‡†‡ˆ‰‰‡Š‰ˆ‡‡‡ˆˆˆ‡„{{€ƒ‡ˆˆ‰‹ŒŽŽ‹ŒŽ‘‘”“’’’Œˆ‹ƒ}}~~€„„‚‚‚…†…„ƒ‚‚€€€€€‚‚ƒ„„…………††‡‡ˆˆ††††…………ƒƒƒƒƒ‚ƒ‚‚‚„„…„„ƒ„ƒƒƒƒ„…………………………‡†…ƒ‚‚‚‚‚€€ƒ‚‚‚‚‚‚‚‚ƒƒ†††……………††††††††…„„„……†††ˆ‰ˆ…‚‚‚‚ƒƒ‚‚€€€‚‚‚‚ƒƒ‚‚„†ˆ‰€€€€€€€€€€€€€€€€ˆ‰ŠŠ‰ˆ‰Š‰‰ŠŒŽŒ‹‰ŠŠ‹ŠŠ‰‰†ˆˆ‡…„~yyxtledjp|ƒŠŒ‰‡‡ˆ‡…‚‚ƒƒ€…„ƒ‚‚‚ƒƒ‚}yz{zz|zxx{€„††ƒ‚€€€€€€€‚‚€~|zyxx|€‚‚€€~€~}|xxx|}{{zyxwwvvwwwvvtswwvvvwxywy{}}|{yy}~zxzzyy|~~~~€€€€‚€€€€€€€}}}~~~~~~|{{{|||}~~~~~€‚‚ƒƒ‚€€€€€€€€€‚‚‚{{{~‚…………………ƒƒ…†…………„ƒ‚€€z{}~~}}}~~~|||}~~}|~|{|‚ƒ‚‚‚ƒ„†‡†„‚‚ƒ„„ƒ‚‚ƒ‚ƒ…†‡‡‡†„„…ƒ€~€€€€€€€€€€€€€€€€€||}{}~{{{||||{{{||}}}}|}~~~~}}z}„„ƒ€~}|}~€€€‚ƒƒ‚ƒ„…„„ƒ‚€€…‡…„…†…„‡‡…}~€‚‚ƒ„……………………………………†††……‚ƒ…‡ˆ‰‰‰Šˆ……††…„‰‰ˆ‡‡‡ˆ‰ˆ‡„{|ƒ†††‡‰ŒŽ‹Œ’“”–•””“Œ‰Š‚||}~€„„ƒƒ…†…„ƒƒ€€€€€€‚‚‚ƒƒ„„…„……†‡ˆˆ‰††††…………ƒƒƒƒƒƒƒƒ‚ƒƒ„„„„„„„ƒƒƒ„…………………………‡†„ƒ‚‚‚‚€€‚‚‚‚‚‚ƒƒƒ††…………„„††††††††„„„„„††‡†ˆ‰ˆ…‚‚‚‚ƒƒ‚‚€‚‚‚‚‚ƒƒ„„„…†ˆ‰Š€€€€€€€€€€€€€€€€‡ˆ‰‰ˆˆ‰Š‹‹ŒŽŒŠ‹ŠŠ‰ŠŠ‹‹‰‹‹‰‡„~wwuqlhinsz€‡‹Š‡‡‡ˆ…ƒƒ„…„‚††…„„„„„ƒ}z{|zz||zy{~‚‚ƒ€~}~€€€€€€€€€~}|zyyx{€~~}}}~€|{|~|{{{zywvuxxyxwusrvvvuvwxyz{}€}|z~{y{|zy|~~~€€€}}}}~~~|{{{{|{|~€~}~‚‚‚‚€€€€€€‚‚‚‚{z{~‚…†……………ƒƒ…††††…„‚€‚€€€{|~~}{|~~}|}}{{|||||{~|{|‚ƒ‚‚€‚„†††„‚‚ƒƒ„„ƒ€‚‚ƒ„„…„„ƒ„‚€{zz€€€€€€€€€€€€€€€€€|||{|~{||{{{{||||}}}|{z||}}~}}||~ƒ„ƒ€}{{}}€ƒƒ‚‚ƒ„‚‚‚€€€€…‡…„…†…ƒ†ˆ…~€ƒƒƒ„„„„„„…………………………†††……„ƒ„†ˆŠŠ‰‰ŒŠ‡…†‡†…‰ˆ‡‡‡ˆ‰‰ˆ‡„{{ƒ„„„…ˆ‹Ž‘Š‹ŒŽ‘“•—””””•“Œ‰{|}}€„„‚‚‚…†…„ƒ‚‚€€€‚€€€€‚‚‚ƒƒ„„„„„„…†‡ˆ‰‰‡‡††……„„ƒƒƒƒ„ƒ„„ƒƒƒƒƒ„„…„„„„„……†……………………‡†…ƒ‚‚‚‚‚€€‚€€‚‚‚‚‚ƒƒ†…………„„„†…†…†…††„„„„……‡‡†ˆ‰ˆ…‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚ƒƒ††……†‡ˆ‰€€€€€€€€€€€€€€€€„…‡ˆˆ‰‹ŒŒŒŽŽ‹‰‹‹‰‰‰ŠŒŒŽ‹‡„}vxupljkorv|ƒ‰ŠŠ‰‰Š‡„„††…‚‡†…„„„„„ƒ~z{|{{|~|z{}~ƒ}|}~€€€€€€€}}}|{{zzz|}~}~~}{{|~ƒ~~~|{{{zywutyyzyxusqvvuuvwxy|~€‚‚€~{€|z|}{y|~~~€€€€|}}~~~|{zz{{z|~€€€~~€€€‚‚€~€€€‚‚‚‚‚{z{~‚…†……………ƒƒ…†„„„„„ƒ‚‚‚€€€}}~~~~}|y}€}|~€~}||||}~~|{|‚ƒ‚€‚€€‚„†‡†„‚‚ƒƒ…„‚€€‚ƒƒƒƒƒ‚€~}xvw€€€€€€€€€€€€€€€€€||}{|}z}|{zz{|}}}}}|{zy{{|}}||{}~€‚ƒƒ‚|yz|~}{~€‚ƒ‚ƒ€€€€€€€€…‡…„…†…‚†ˆ†………„„„ƒƒƒ††††††††„……††……„„…‡‰ŠŠˆˆŽ‹ˆ‡ˆ‰‰ˆ‰ˆ‡‡‡ˆ‰Šˆ‡„{{ƒƒƒƒ„†ŠŽ‘ŠŠŒŽ‘”—™“’“”–•’ˆ{{}}€„„‚ƒ…†…„ƒƒ€€‚€€€€‚ƒƒƒƒƒ„„„„„…†‡ˆ‰‰‡‡††……„„ƒƒ„„„„„„„ƒƒ‚‚ƒ…………„„„…††……………………‡†…ƒ‚‚‚‚‚€€€€€€‚‚ƒƒƒ…………„„„„……………………„„„„…†‡‡†ˆ‰ˆ…‚‚‚‚ƒƒ‚‚‚‚ƒ‚‚‚‚‚ƒƒ‡†………††‡€€€€€€€€€€€€€€€€…‡ˆ‡……ˆ‹ŒŒ‹‹ŠŠ‹‹Œ‹‰ŠŒŒ‰ŒŒ‰ˆ†€ywtoljjkkn}‡‰ŠŠ‰‰‰ˆ‡…„„„ƒ†‡‡†„ƒ„†„€|z|}|zyyz{}~~~€€€€€€€€‚€~~~~~~}}}{|}~}~~}{yz|}}}}}‚~|{{|}|xvyyywtrrsuuvtsrtvw{~~}yz~zxyyxw}€€€~€€€€€€€~~€€~~~}}|||{{~~~€‚‚‚€€‚‚ƒƒƒ‚€€€€€€€‚‚ƒƒ}||~‚†‡‡……„„„„……„ƒƒ„ƒ€€‚€‚‚€~~~}}}}~~}}}~€|||||||||{{}€‚‚ƒ‚€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚„ƒ„„ƒ€|yz{|}}||{€€€€€€€€€€€€€€€€~{|}}~||{{zz{{|{~~zy{|{}||||{{{}~ƒ„……~||}}}||ƒƒ‚‚‚‚€~€~€ƒ…††…„‚‡Š‡€}„„„„…………………………………„„„……………‡‡‡ˆˆˆ‰‰Š‰ˆ‡††‡‡‰‰‰‰‰ˆˆˆˆ†…„~y}††…„„‡Œ‘•‹Œ“–—”•”’“•”†‚~~~}€„†‚…‡†…†„‚€€€€€€ƒ„ƒ‚‚ƒ„„„„……††††……„„ƒƒ„„„„ƒƒƒƒƒƒƒƒ„„„„„„…………„„„„„„„ƒ‚…††„‚€€‚‚‚‚‚€€‚‚‚‚€‚ƒƒ‚ƒ„……„„„„ƒƒƒƒƒ„……†††…†…†…†…†ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡…††„‚‚„†€€€€€€€€€€€€€€€€…‡‰ˆ†…ˆ‹ŒŒ‹‹Š‹‹‹‹ŠŠŠ‹‹‹‹‰‹Œ‰ˆ†€yxvrnjgggn|†ˆŠŠ‰‰ˆˆ†…„ƒƒƒ‡‡ˆ‡‡††‡€}zy{}}{yzz|}~€~€€€€€€€€€€€~~~~}}|{{}€}}}|{z{}}~}}~|{z{|{xuyzzxusssvwwvttvxy|~}}~{z~~zwyyxx}€€€€~€€€€€€~€€~~~}}|||||~~€€‚‚€‚‚ƒƒ‚‚€€€€€€‚ƒƒ~}}‚…††……„„„„………„„…„€‚€‚‚€~~~~~}}}}~~~~~~|||||||||{{}€‚‚ƒ‚€€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚‚ƒƒ„ƒ~|z{|}}}|{€€€€€€€€€€€€€€€€}z{}|~||{{zz{{|z|}}|}{z}||||{{{|}€‚ƒ…„„~||}}}|}‚‚‚‚ƒ€€€}}}~‚ƒ…†††„„ƒ‡‰†€}€„„„„…………………………………†††††‡‡‡‡‡‡‡‡ˆˆˆ‰‰ˆ‡†‡‡‡‰‰‰‰ˆˆˆˆ‰ˆ‡…y|ƒ†…„„†‹’ŽŒŠ‹“•–“”“’“–”†‚~~}}€„†‚…‡…„†„‚€€€€€€€ƒ„‚‚‚„„„„„……††††……„„ƒƒ„„„„ƒƒƒƒƒƒƒƒƒ„„„„„„………„„„„……„ƒ‚‚…††…€€‚‚‚ƒ‚€€‚‚‚‚€‚ƒƒ‚‚ƒ„……„„„„ƒƒƒƒƒ„……††…†…†…†…††ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡…††„‚‚ƒ†€€€€€€€€€€€€€€€€‡‰Š‰††‰ŒŒŒ‹Š‹Š‹‹‰Š‹‹ŠŠ‰Š‰‹‹‰ˆ…zyyuogccdn|†ˆ‰Š‰‰ˆ‡†„ƒ‚‚‚……†‡ˆˆ‡…}{yy{}}|zz{|}~€€€€€€€~€~~~|zy|€‚€}|||{{|~€}~~}~€~|{zz|zxv{{{yvttuvwxvuuwy{}}||~~|z~zxyyxx}€€€~~€€€€€€~~€€~~~~}}||||~~€€‚€‚‚ƒ‚‚€€€€€€€€‚‚ƒƒ~‚„…………„„„„……†„„†…‚‚‚‚‚€~}}}}}~~~~~€€~}||||||||||{{}‚‚ƒ‚€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚€‚ƒ„ƒ€~}yz{|}}}|€€€€€€€€€€€€€€€€}y{||}~{|{{{{{{||z{|~|{{}||||{{{z}ƒ……„ƒ~||}}}|~ƒ„€€€}}~††‡‡‡†…„ƒ†ˆ†~€„„„…„………………†…†………†††‡‡‡‡ˆ††††‡‡‡‡‰ˆˆ‡‡‡‡ˆ‰ˆ‰ˆˆ‡ˆ‡‰‡ˆ‡‚|}‚†…„ƒ…ˆŒŒŠ‰ŠŽ’””’““’”–•…~}}}€„†„‡…„…„‚€€€€€„„ƒ‚ƒƒ„„„„……†††………„„„ƒ„„„„ƒƒƒƒƒƒƒƒ„„„„„„………„„ƒ„…………„ƒƒ…††„‚€€‚‚‚‚‚€‚‚‚‚€‚ƒƒ‚ƒ„………„„„„„ƒƒ„„……†††…†…†…†…†ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡…††„‚ƒ…€€€€€€€€€€€€€€€€ˆŠ‹Š‡‡‰ŒŒŒ‹‹Š‹‹‹‰‹Œ‹Š‰‰Š‰ŠŠŠˆ„zzzumd`bfqˆ‰ŠŠ‰‰‡†…„‚‚‚‚‚„‡‡„‚|{zz{{{{{||}}~€€€€€€~}~€€€€}|{}€}|}}|||~~~}~€€~}{|||zx{||zwuuvuvwvtuwy{~~}}~|z~~zwyyxx~€€€ƒ‚‚€€€€€€€~€€}~~}}||}~€€€€€‚‚‚€€‚‚‚€€€€€€‚ƒƒƒ€‚„„ƒ……„„„„……†„„……‚‚‚€‚‚€~|||}}~~~}}}~}}|{|||||||||{{}€ƒ‚ƒ‚€€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚‚ƒƒ|zyxyz}}~~~€€€€€€€€€€€€€€€€|yz|{}~{|{{{{{{|{xz{{{~}||||{{{y|…††ƒ‚~||}}}|€€€€ƒ„ƒƒƒ€€†††‡‡‡††ƒ…‡…€„„„„……………††…†…†…†……††††‡‡††††††††ˆˆ‡‡†‡‡ˆˆˆˆˆ‡‡‡‡…†‡ˆ…€‚†…ƒƒ„†‰‹‹‰ˆ‹Ž‘““‘“““”—”…}}}}ƒ…€„†…„…„‚€€€€€€€ƒ„‚‚‚„„„„„……††…………„„„„„„„„ƒƒƒƒƒƒƒ„ƒ„„„ƒ„„…„„ƒƒ„……††…„„…††…€€‚‚ƒ‚‚€€‚‚‚‚€‚ƒƒ‚ƒ„…………„„„„„„„……†…†…†…†…†…††ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡„……„‚…€€€€€€€€€€€€€€€€‰ŠŒŠ‡‡‰ŒŒŒ‹Š‹Š‹‹ŠŠŠ‰‰‰‰ŠŠ‰‰Šˆƒ~z{ytjb`eju‚ŠŠŠŠ‰‰‡†…ƒ‚€ƒ……ƒ~~|{zz}}}}~~€€€€€~~~€€€}~~€€€€~€~|{|~~~~||}~~}}€}|~}|{|}}zxuuvstutssvxz}€}yz~zxxyxy~‚€€€€‚‚ƒ€€€‚€~€€}~~}}}~~€€€‚‚‚€€‚‚‚€€€€€€€€‚‚ƒƒ„‚€€ƒ„„ƒ……„„„„………„„…„€‚ƒ‚€}|||}}}~~{{{z{{|}|||||||||{{}‚‚ƒ‚€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚€‚„|vtsvwz|~~€€€€€€€€€€€€€€€€|yz|{|~{{||||||{{{|}zz}}||||{{{y|„‡…ƒ‚~||}}}|€€€€ƒ„„„„ƒ‚ƒƒƒ„„††ˆˆƒ„…„‚‚ƒ„„…„…………††††††††……††††‡‡†††††…†…ˆ‡‡†‡‡ˆˆˆ‡ˆ‡‡†‡†……‡ˆˆ„‚…„ƒƒ„†ˆ‰Šˆ‰ŠŽ‘’’’”•”•–“Ž…€~}}|ƒ…€€ƒ†„ƒ„„‚€€€€€„„ƒ‚ƒƒ„„„„……††………„…„„„„„„„ƒƒƒƒƒƒƒƒ„„„„ƒƒ„„„„ƒƒ„„…††……„…††„‚€€‚‚ƒ‚‚€‚‚‚‚€‚ƒƒ‚ƒ„†…………„„„………………†††…†…†…†…†ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡„……ƒ€‚„€€€€€€€€€€€€€€€€‰‹ŒŠ‡†‰‹ŒŒ‹‹Š‹‹‹Š‡„„…ˆŠŠŠˆˆŠˆƒ}z{xrjcbfku‚ŠŠŠŠ‰‰‡†…„‚‚€€ƒ„„ƒ‚€‚ƒ‚|||~~~~~~€€€€€~}~~€€€€~€€€€‚{y{~~€}|}~~€}|~€€‚€}}}~}||}|{wuuvrsttstvxz~€€€}xz~~zwyyxy~‚€‚€€€€€~€€€€‚€~|€€~~}}€€‚‚€€€€€€€€‚ƒƒƒ‚€ƒ……„……„„„„………„ƒ…„€‚€‚‚€~|||}}}}}{zyyy{|}|||||||||{{}€ƒ‚ƒ‚€€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚€ƒ…ztqquwy|~€€€€€€€€€€€€€€€€€€€}zz}{}~{{||||||{|‚…‚}zz}||||{{{z}€„……„ƒ~||}}}|~ƒ„ƒ„ƒ‚€‚‚…†ˆƒ„„ƒƒƒƒƒ„„„……………†††††††††††‡‡‡‡ˆ††††††……ˆ‡‡‡‡ˆˆ‰‡‡‡‡††††‡‡‡ˆ‡…ƒƒƒƒ„†ˆ‰‰‰‰Œ’’‘“•–••–‘‹„€}}||ƒ„€ƒ…„ƒ„„‚€€€€€€€€€€€€€€€ƒ„‚‚‚„„„„„……††……„…„…„„„„„„ƒƒƒƒƒƒƒ„ƒ„„„ƒƒƒ„„„ƒƒƒƒ„…………„…††…€€‚‚‚ƒ‚‚€€‚‚‚‚€‚ƒƒ‚ƒ„††…………„„………†…†…†…†…†…†…††ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡„„„ƒ€€‚„€€€€€€€€€€€€€€€€‰‹‹Š‡†ˆ‹ŒŒ‹Š‹Š‹‹‰ƒ}|†‰ˆŠˆˆŠˆ‚|z{xsmhfffs€ˆˆ‰ŠŠŠ‡†…„ƒ‚‚‚‚ƒƒƒƒ„„ƒ††ƒ€€~~€€€~~}}~~€€€€€€€€€€€€~{z|~~€~~~~€€}{}€€~{z{|{z{||zwttustuttuwz|€}zz~zxyyxy‚‚‚€€€€€€ƒ€~|€€€~~~€€‚‚€€€€€€€€€€‚‚ƒƒƒ€„…††……„„„„……†„„……‚‚‚€‚‚€~}}}||||||||{|||||||||||||{{}€‚‚ƒ‚€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚€„‡ƒ{usttvy{~€€€€€€€€€€€€€€€€€}z{}|~|{||}}||{~…Œ‹†€|z}||||{{{|}€‚„„…„~||}}}|}‚‚‚‚ƒ‚ƒƒ€€€~ƒ„ƒƒƒƒ„„„ƒ„„„„…………†††††††††††††‡‡‡‡‡‡††††…‡‡‡†‡ˆ‰‰‡‡‡††††…‡††‡‡†‚~‚‚ƒ„…‡‰‰‰‰ŠŒ’’‘•—˜––”ˆ„€}|||‚„€ƒ…ƒ‚„„‚€€€€€€€€€€€€€€„„ƒ‚ƒƒ„„„„……††„„…„…„……„„„„ƒƒƒƒƒƒƒƒ„„„„ƒƒ„„„ƒƒ‚‚‚ƒ„……„„…††„‚€€‚‚‚‚‚€€‚‚‚‚€‚ƒƒ‚ƒ„†††……………†††…†…†…†…†…†…†…†ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡ƒ„„‚€ƒ€€€€€€€€€€€€€€€€‰Š‹‰†…‡ŠŒŒ‹‹Š‹‹‹ˆ€xw~…‡‡Š‡‡Š‰‚|zzxtqmhdbp}†‡ˆ‰Š‹ˆ‡…„ƒ‚‚‚‚ƒƒƒƒ…€ƒ‡ˆ…ƒ„…~~€€€€~~}}}~€€€€‚€~€€~~€~|||}~|~€€€}{}€}yxyzzy{|{yvttutuvvuvy{~€€~}~|z~zwyyxz‚‚‚€€€€€€€ƒ}|€€€~~~‚‚~€€‚‚‚€€€€€€€€‚‚ƒƒ‚€„†‡‡……„„„„……‡……††ƒ‚ƒ‚€‚‚€~}}}}||{{}}~~~}{{|||||||||{{}€‚‚ƒ‚€‚„…‡†…„ƒƒƒƒ„ƒƒƒƒƒƒ‚€…ˆ…}wvxtux|~€‚€€€€€€€€€€€€€€€€~{|~}~|{||}}||{‹‘Ž†‚€~}||||{{{}~‚„……~||}}}||ƒƒ‚‚‚ƒƒƒ€€€~}|}€„ƒ‚ƒ„…„ƒ„„„……………††††††††„„„……………‡‡‡‡††††‡‡††‡ˆ‰Š‡††††…………„„…‡ˆ„€‚ƒ„†ˆ‰Š‰‰Š‘“’‘–˜˜—•”†„€}|||~‚„€ƒ…ƒ‚„„‚€€€€€€€€€€€€€€ƒ„ƒ‚‚ƒ„„„„……††„„„„„………„„„„ƒƒƒƒƒƒƒƒ„„„„‚ƒƒ„„ƒƒ‚‚ƒ„„„„„…††„‚€€‚‚‚ƒ‚€€‚‚‚‚€‚ƒƒ‚ƒ„††††…………††††††………†…†…†…††ˆ‰ˆ…‚‚‚‚‚€€‚ƒƒ‚‚‚‚‚‚‚„†‡ƒ„„‚€ƒ€€€€€€€€€€€€€€€€‰ˆ‡…ƒ„‡Š‰ˆ†…‡ŠŠ‰Š‚xsv~†‹‰†„…ˆ…{syvtsmedfr}†‡‡†‡ŠŠ‡„ƒ„†…„ƒ‚‚ƒ„…„……†ˆ‰‡†‡‡„€~~€€€€€€€~}|~~~~~‚€€€€~€€€~€€€€~||€€€€~~~}}}~~}{yxxw{{zywvutwvutuwz|z}~~~{z€|zzzx{~‚€€€€€€€~~~€~~~}€€€€€€€€€€€‚ƒ„ƒ‚€~€}~‚‚‚ƒƒ‚‚‚ƒ„…‡ˆ‡†……„„……„………„„ƒ‚‚‚ƒ…ƒ}}}}}}}}~~}|{{{{{|||}}~~zzz|€‚‚‚‚‚‚‚ƒ„…†…‚ƒƒ‚‚‚ƒƒƒ„‚}yvuuwxz~}}€€€€€€€€€€€€€€€€||||{}€|{|ƒ‡‹Š†„‚z}|yy{€‚„…„‚€€~~}||}~€‚€ƒƒ‚‚‚~}}}~}€ƒƒ‚‚„„„„…………………††‡‡††„…††……†‡††††††††……………†‡ˆ†††……†††ˆ‡‡‡ˆˆ‡…}~„ŠŒŒŠŽŽ“•“–•˜š–‘‹…ƒ~{}~ƒ€„†…„‚€€€€€‚‚‚‚‚ƒƒ„„ƒƒ„„„………ƒƒƒƒƒƒƒƒ„„„„„„„„ƒƒƒƒƒ„……„„„„„„„„„„„„…………ƒƒƒƒƒ€€€‚‚‚‚€ƒƒ‚‚‚‚€ƒ………………………‡††……„……………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚‚€€‚‚‚ƒ„……ƒƒ‚€„€€€€€€€€€€€€€€€€ƒ„ƒ‚‚ƒ‡Š‰…‚‚„†ˆ‰ˆtoqzƒˆ‹Šˆ†ƒ€|zzwtqkeeis~†ˆ‡†‡ŠŒ‰…„„……„„ƒ‚‚ƒ„„ƒ„ƒ„‡‡†††‡ƒ~~€€€€€€€~}}~~~~~€€€€€€€~€€€€~}|€€€€~~}~~ƒƒ‚}{zzzyxwvvuvuttuxz|z}~}}~|yz€|zzzx{~‚€€€€€€€€€€~~~}€€€€€€€€€€€€‚ƒƒ€€~~€‚‚‚„„ƒƒƒ„……‡†…„„„„…„………„ƒ‚‚€€„‚~}}}}}}}}|}}~}|zy{{{||}}}zzz|€‚‚‚‚‚‚ƒ„…†…‚ƒƒ‚„ƒ‚‚‚‚‚ƒ„‚}yvuuwxz~}}€€€€€€€€€€€€€€€€~~~€‚‚}~‚…„‚€‡‰‘‘Œ„~{|}€ƒƒ}}|}€‚„‚€€~}}||}~€‚‚‚‚€€€€~~~~€}€ƒƒ‚ƒ„„„„…………………†‡‡‡††…†††………†††††††††…………†‡ˆˆ†††……†††‡‡†‡‡‡†…€~|~ƒ‰ŒŒŒŽ‘““”“–˜–“„ƒ~|}~€ƒ€„†…„‚‚€€€€€€‚‚‚‚ƒƒƒ„„ƒƒ„„„………ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„…„„„„„„„„„„„„…………ƒƒƒ„ƒ‚€€€‚‚‚‚€ƒƒ‚‚‚‚€ƒ………………………‡‡†…………†………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚‚‚€‚‚ƒ„……ƒ‚‚€„€€€€€€€€€€€€€€€€„„…„ƒ…ˆ‹Š}‚„Š‡}rknw€†‹ŒŠƒ|x|€{wsojeglt†ˆˆ‡‡Š‹‰…ƒ‚‚‚ƒ‚€‚‚‚ƒ†‡……†…‚~€€€€€€~~~~~~~€€‚€€€~€€€€~}|€€€~~~…†‡‡„€|zxxxxwwwwttttvx{|{~}}}{xz€|zzzx{~‚€€€€~€€€€€€€~~~~}€€€€€€€€€€€€‚‚‚€€‚€€€€€„„ƒƒƒƒ„…†…„ƒƒƒ„„………„„ƒ‚€€€‚€|}}}}}}}}||~~~|zy{{{{||||zzz|€‚‚‚‚‚‚‚‚ƒ„…†…‚ƒƒ‚„„ƒ‚‚‚ƒƒƒ„‚~xvuuwxz}}€€€€€€€€€€€€€€€€~}~€€}{‡‰…ƒ…„‰‹‘—˜‘„y|{|ƒ‹Œ†||}ƒ„ƒ‚€~~}}|}~ƒ…„„ƒƒ‚‚€€€€~~~~€~€‚ƒƒ‚ƒ„„„…„………………††‡†††††‡†…„……††††††††††††‡‡ˆ‰†††……†††‡†††‡‡†„~|{}‚‡‹ŽŒŒŽ“——™˜–—’‡ƒ|}~€ƒ‚€€„……ƒ‚€€€€‚‚‚‚‚ƒƒ„„ƒƒ„„„………ƒƒ„ƒ„ƒ„ƒ„„„„„„„„„ƒƒƒƒƒ„…„„„„„„„„„„„„…………ƒ„„ƒƒ‚€€‚‚‚‚€ƒ‚‚‚‚ƒƒ………………………‡‡††††††………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚‚ƒ‚‚‚‚‚ƒ„…„ƒ‚‚…€€€€€€€€€€€€€€€€†‡‡†……ˆŠ‹y~~‚‹‰tmox†Œ‹†~uu{‚|wrnifiow€‡ˆˆˆˆŠ‰‡ƒ€~}}}~}}~€€€ƒ†††…†„~€€€€€€~}~~~~€€€€€€€~~€€€€~}}€~€€ƒ…‡ˆ†€{wwwwwwwwwtttuvy{|}€~~~|xz€|zzzx{~‚€€€€€€€€€€~~}}~~~~~~~~€€€€€€€€€€€€€€€€€€‚ƒ…†‡……„ƒƒƒƒ„…………ƒƒ€ƒ|}}}}}}}}}}||{|{{{{{{{{{{zzz|€‚‚‚‚‚ƒƒ„…†…‚ƒƒ‚‚‚‚ƒ‚ƒƒ„ƒ„‚}yvuvxx{}~€€€€€€€€€€€€€€€€€~~~|€‰Œ…‚†…~…Œ”š˜Ž€w||‡ŽŽ‡€ƒ…‡†„~}}}}}„†‡††††……ƒƒ‚‚‚~}}}~€‚ƒƒƒ„„„„„…………………†‡‡‡††…†††………†††††††††††††‡ˆˆ‰†††……†††††…†††…„}zy|„‰Ž’Ž‹ŒŒ‹’™š›–“–•‹ƒ|}~~€‚‚€„…„ƒ‚‚€€€€€€‚‚‚‚ƒƒ„ƒ„ƒƒ„„„………„„„„„„„„……………………„„ƒƒƒ„„„„„„„„„„„„„„„…………„„„„ƒ‚€€‚‚‚‚€‚‚‚‚‚‚€ƒ………………………‡‡††††‡‡………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚ƒƒƒ‚‚‚ƒ„…„‚‚€€…€€€€€€€€€€€€€€€€…†‡‡…†ˆŠ‰}x{~|€‰‹‚xrt{ƒ‡Ž‰‚zvw|€{vrojgjqx‡ˆ‰ˆˆŠˆ†ƒ€|{z{|{{|~€‚‚„†‡†…†‚€~~€€€~€€€€~~~~~~}~~~€€€~€~~}~€~€€€‚…ˆ‰†{wvvvwwwwxttuvxy{|~€}yz€|zzzx{~‚€€€€€€€€€~~~}~~~~~~~~€€€€€€€€€€€€€~€€€ƒ„†‡……„ƒƒƒƒ„………„„‚€‚ƒƒ‚„‚}}}}}}}}}}||||{||{{{{{{{{zzz|€‚‚‚‚‚‚‚‚ƒ„…†…‚ƒƒ‚‚‚‚‚‚‚ƒ„‚~xvuvxyz~~€€€€€€€€€€€€€€€€‚€~€€~ƒŒŽˆ…ˆ‡€Š–™’†}y{…‰‹‰…€ƒ†‡†ƒ~~}}}}‚…‡ˆˆˆˆˆˆ††…„„‚~}}}~€€ƒ„…„ƒ„„…„………………††‡†††„…††……†‡††††††††††††‡‡‰‰†††……††††………††…„~yxz}†’ŽŒ‹Œ”–˜“–™“„|}~~‚ƒ€„…„‚‚€€€€‚‚‚‚‚ƒƒ„„ƒƒ„„„………„„„„„„„„„„„„„„„„„„„ƒƒƒ„„„„„„„„„„„„„„…………„„…„„‚€‚‚‚‚€‚‚‚‚ƒƒ………………………‡‡††††‡‡………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚‚ƒ‚‚‚‚‚‚‚‚ƒ„…ƒ‚…€€€€€€€€€€€€€€€€‡ˆŠŠ‰‰ŠŒƒ{vy{{}ƒˆzvx|€‚‹…~yy{}~yurplikqz‚‡ˆ‰ˆˆŠ‰‡…}{{|||{}€‚ƒƒƒƒ„‡‡†…†€~€~€€€€~~~~~}}|}~~€€€~~€~}~~~€€€~ƒ†‰Š‰„{vvvvwwwwvwwxyz{{}€}zz€|zzzx{~‚€€‚€€€€€€€€~~}}~~~~~~~~~~~~~~€€€€€€€‚€€€€€ƒƒ‚‚‚ƒƒ„†…„ƒƒƒ„„……„„ƒ‚€€ƒ„„‚ƒ„‚}}}}}}}}}z|}~}}{z||||{{{{zzz|€‚‚‚‚‚ƒƒ„…†…‚ƒƒ‚‚‚‚‚‚‚€ƒ„‚}yvuvxy{€~~€€€€€€€€€€€€€€€€~|{{}~ˆ‘‰‹‰„~‰””Š€~|‚‰‹ˆ„ƒ„‚€‚ƒ„~~~~}}~€ƒ†‡ˆˆˆˆˆ‡‡…„ƒƒ€€~~~~€ƒ…†…ƒ„„„…………………†‡‡‡††„……†……†‡†††††††††††††‡ˆ‰†††……†††††…†††…„zwzz{‚ŒŽŽŽŽ‘”•– ›„€|}~~„‚‚ƒ„ƒ‚‚€€€€€€‚‚‚‚ƒƒƒ„„ƒƒ„„„……………„…„…„…‚‚‚‚‚‚‚‚…„ƒƒƒƒƒ„„„„„„„„„„„„„…………„………„ƒ‚€‚‚‚‚€‚‚‚ƒ‚‚‚€ƒ………………………††††††‡‡………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚‚ƒ‚‚‚‚ƒ„…‚€€€…€€€€€€€€€€€€€€€€‰‹‹‰†ƒƒƒ{wttwxz{{vuvxyy€|ywxyyxvtsrojlp{‚‡ˆˆˆˆ‰‡‡…€|z{|}||~€‚„„„„…†‡…„…~~~€€}~€~~~~~~~~~€€€~~}~~~~€€€~}~~„†‰Š‰†‚€wwvvvvvvxyyzzz{zz}~~~}zz€|zzzx{~‚€€€€€€€€€€€~~~}}}}}}}}}~~~~~~~~~~€€‚‚‚~~€€€€ƒ‚‚‚‚ƒ„‡†…„„„„…………„ƒ€ƒ„ƒƒ€|}}}}}}}}z{}}~}{z}}}||{{{zzz|€‚‚‚‚‚‚‚‚ƒ„…†…‚ƒƒ‚‚‚ƒƒƒ‚ƒ„‚}xvuvxy{€€~~€€€€€€€€€€€€€€€€}{z{}€‹‘Œ‰‡†ƒ‰†‚……‹‹‡„†ˆ…ƒ‚ƒ„‚€}~~~~~~}ƒ†ˆˆˆ‡ˆˆ‡†…„ƒ‚€€~~~~€‚ƒ†‡…ƒ„„„„………………††‡‡††……††……†††††††††††………†‡ˆˆ†††……†††††††‡†…„€zxyyw‰ŽŽŒ‹˜ž¢Ÿ—…€}}~~„‚‚ƒ„ƒ‚€€€€‚‚‚‚‚ƒƒ„„ƒƒ„„„……………………………ƒƒƒƒƒƒƒƒ…„„ƒƒƒƒƒ„„„„„„„„„„„„……………………„ƒ‚€‚‚‚‚€‚ƒƒ‚‚ƒ………………………†…………†‡‡………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚‚‚€‚‚‚ƒ„…‚€€€‚…€€€€€€€€€€€€€€€€ˆˆ‡‚|vsrutrqswwuxurrstsqttttttsrusstpllp{‚‡‡ˆˆ‡‰……ƒ{yz|}}|}‚‚„„„††„ƒƒ~}€}~~~~~~~~~}€€€€~~€~~~~~~€€~}}}ƒ…‡‰ˆ†ƒwwwvvvuuzz{{{{zzx{}||}|yz€|zzzx{~‚€€€€€€€~~~€~~~}}}}}}}}}~~~~~~~~~~~€€‚ƒƒ~}€~€€€€€€‚„…†‡†……„„…………„„ƒ€‚€z}}}}}}}}|||||||}~~}}|||{zzz|€‚‚‚‚‚‚‚ƒ„…†…‚ƒƒ‚‚ƒ„„„ƒƒƒ„‚}yvuvyy{€€~~€€€€€€€€€€€€€€€€ƒ€„‡‰‰ŠŒŽŽ‹†„…ˆŠŠˆ…„†ˆ‚†Š‹ˆ‡‰‹‰‡†……„~~~~~~~~}€„†‡‡‡†‰ˆ‡†…„ƒƒ‚~}}}~‚ƒ†‡…ƒ„„„…………………†‡‡‡††††‡†…„……††††††††……………†‡ˆ†††……†††‡†††‡‡…„{xywu}ˆŒ’‘Œ‰„‰•žŸ–‰…€}}~~„ƒ‚ƒ„‚‚‚€€€€€€‚‚‚‚ƒƒƒ„„ƒƒ„„„………………………………………………………„ƒƒƒƒƒ„„„„„„„„„„„„……………………„ƒ‚€‚‚‚‚€‚ƒƒ‚‚‚€ƒ……………………………„……††‡………†††‡‡†ˆ‰ˆ…‚‚‚ƒƒ‚‚‚‚€€€‚‚‚ƒ„…‚€€€‚…€€€€€€€€€€€€€€€€ˆ~rnqtsomortutsrqqrrqpontrpnmnpqopqrsuuv~‚…ˆŠ‹ŒŠˆƒ|zzz|}}~‚ƒ€ƒ†ˆ†ƒ€~~€€€€€€€€€€€~~}}}}~€€€~~}||}~~€€€€~~}…ˆˆˆˆ†‚}wsuwwwx€„…€|{yvy}~}|~|x|€€|y{{z|‚€€}‚‚~~}||~€~~~~~~~~~~~~€€€‚‚€€€€€€€€€€‚‚ƒƒ€€‚ƒƒ„…„…„…„…„ƒ„…„‚€‚ƒ‚€~}}}}}}}}{|}}}}|{}||||{{{|{z{‚ƒƒ‚‚‚‚ƒ„‡†…ƒ‚„ƒƒ‚‚ƒƒ„‚ƒƒ‚{wuvxy{€~€€€€€€€€€€€€€€€€……………………ˆ‡…„ƒƒ„„ƒƒ„„…††‡…„„…†‰‹Œ‰‰‰ˆ‡††…‚‚€~~~~}~‚…‡ˆˆˆ‡ˆˆˆ†„„„€}}}‚‚ƒ„………………………………††††††††‡‡‡‡‡†††„…………†††ƒ†‰ˆ†…†ˆ‡‡‡‡‡‡‡‡†…………†‡‡‚|xwxxy|ŠŽ‘‘‘Ž‹…|™œ–Œƒ}€‚€€€ƒ…„ƒ‚‚€€‚‚‚‚‚‚ƒƒƒ‚ƒ‚ƒ‚ƒ‚„„„„„„„„‚ƒ„……………ƒƒƒƒƒ„……„„ƒƒƒƒ„„……„„„ƒƒƒƒ„„„………†…………„„„„ƒ‚‚‚‚‚ƒ‚‚ƒ„†…†…†…†…††††††††……†††‡‡‡†ˆ‰‡„‚€€€€€‚ƒƒ‚‚‚€‚‚‚‚‚„„„‚€„€€€€€€€€€€€€€€€€‚ypnrwwuoprssrqptttttsrqrqnmlmnommnpqrstyz}€ƒ†ˆˆ‡†ƒ€~}}~}~~€‚ƒ„‚‚‚„††„‚~~€€€€€€€€€€€~~}}}}~~€€€€~~~~||~~€€~~~}…ˆˆˆˆ†‚}wstvvvw€…‡„|xty~~{{zxz~{xyzx{}€~€~‚‚~~}|}~€~~~~~~~~~~~~€€€€€‚‚€€€€€€€€€€€‚‚ƒ‚‚‚‚‚‚„…„…„…„…ƒ„…„‚€‚€‚‚‚€~||||||||{|}}}}|{}||||{{{|{z|‚ƒƒ‚‚‚ƒ„‡†…ƒ‚‚‚ƒƒ‚‚‚‚ƒƒ€€~zvtwyy{€~€€€€€€€€€€€€€€€€ƒƒƒƒƒƒƒƒ„ƒ‚€€€€‚ƒƒ„„„‚‚‚‚„†‡‡‡‡‡††††„ƒ‚€~~~€‚……†…„„…†…ƒ‚‚‚€~}}‚‚ƒ„………………………………††††††††‡‡‡‡††††……………………ƒ†ˆˆ†…†ˆ††††††‡‡†…………†‡‡‚}xxxxy|ˆ‘‘‘‘Œ„zz„‘ˆ„|~€€‚ƒ‚ƒ…‡†…ƒƒƒƒƒƒƒƒ„„ƒ‚‚ƒ„„ƒƒƒƒ‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„„„‚ƒ„…………„ƒ‚‚‚‚ƒ„„„„ƒƒƒƒ„„……„„„ƒƒƒƒ„„„………†………„„„ƒƒƒ‚‚‚‚‚ƒ‚‚ƒ„…†…†…†…†††††††††……†††‡‡‡†ˆ‰‡„‚€€€‚ƒƒ‚‚‚€‚‚‚‚‚‚ƒ„ƒ‚€„€€€€€€€€€€€€€€€€„}vtx{|zvwxxxwvuwvvuuttttsqpopqqlmnopqrsrsvy}€ƒ„…„}||}zz{{}€‚‚ƒ…‡†„‚€€€€€€€€€€€~~}}}}~~~~~~~~}|~€~~}…‡‡ˆˆ‡ƒxstuuuv†Š‡ƒztz‚{yyyz}~zwxywz}€€}~€‚ƒƒ‚€~~~~}~~€€~~~~~~~~~~~~~€€€€‚€€€€€€€€ƒƒƒ‚‚‚‚‚…„…„…„…„„……„ƒ‚ƒ‚€~||||||||{|}}}}|{}||||{{{|zz{‚ƒƒ‚‚‚‚‚ƒ„‡†…ƒ‚‚‚‚ƒƒ‚‚‚‚ƒƒ‚‚|xvxzz|~~€€€€€€€€€€€€€€€€‚€€€€‚‚‚‚ƒ‚‚‚ƒ„„„………†††††„ƒ€~~ƒ…‡††…‡‡ˆ‡†„ƒ„„|}~‚‚ƒ„………………………………††††††††‡‡‡†‡†††††††………„„…‡‡††‡ˆ†††††††††…………†‡‡ƒ}yxxwy{†Œ‘‘‘‘„zw|…‡…„€||}~ƒ…ƒƒ„‡ˆˆ‡……………………†……„„„…†…„„ƒƒ‚‚ƒ‚ƒ‚ƒ‚ƒ‚„„„„„„„„ƒƒ„…………„ƒƒ‚‚ƒƒƒ„„„ƒƒƒƒ„„……„„„ƒƒƒƒƒ„„……………„„„ƒƒƒƒƒ‚‚‚‚‚€‚ƒ„†…†…†…†…††††††††……†††‡‡‡†ˆ‰‡„‚€€€€€‚‚ƒ‚‚‚‚€‚‚‚‚€‚„„ƒ‚€€‚…€€€€€€€€€€€€€€€€‡‚zwxyxwttuuuuuusrqpooppssrqpppptsrqpnmmnoqtx|„‚€}zyxxwwxyz|}~„ˆ‰‡ƒ€€€€€€€€~}}}}}~~~~~~€}}€~~~€~}€„†‡‡‰ˆ…ztsttst†Šˆ…‚~z|€~|zzz{{xyzx|‚€€€ƒ„„‚€~~~~~€€~~~~~~~~~~~~~€€€€€€‚€€€€€€€€€€€€€‚‚‚‚‚‚ƒƒ„…„…„…„…„………ƒ‚€‚‚‚€~{{{{{{{{{|}}}}|{}||||{{{|{z|ƒƒƒ‚‚ƒƒ„††„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒ„………ƒ|zy{{|}~€€€€€€€€€€€€€€€€‚ƒƒ„„ƒ‚‚‚‚‚‚‚‚‚ƒ‡†„ƒ‚ƒ„…‚‚ƒ„„…††ˆ‡‡…ƒ~„‡‰‹ŠŠ‰Š‹Šˆ†††…ƒ€~}€‚‚ƒ„………………………………††††††††‡‡†‡†††††††††………„…………†‡ˆ††††††……††………††‡„~yyxwx{‚‰‘‘’‘†|y{{~‚‚‚}|}~ƒ„ƒ‚„†ˆ‡†…………………………„„ƒ„……††…„‚‚€‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„„„ƒ„„………„„„„„„„„„…„„ƒƒƒƒ„„……„„„ƒƒƒƒ„„…„……†„„„„ƒƒƒƒ‚‚‚‚‚‚‚ƒ„…†…†…†…†††††††††……†††‡‡‡†ˆ‰‡„‚‚‚€€€‚ƒ‚‚‚€‚‚‚‚‚‚ƒ„ƒ‚€€€‚…€€€€€€€€€€€€€€€€ƒzwwwwvxxxxyz{{{zxvvvwxrrrqqpooqqpppooomnoquy}‚}zyxwzzz{}~€€ƒ…‡†ƒ€~}|}}}~~~€€€€€€€~}€€~~~}€ƒ†…‡‰‰†„|uttsrs€…ˆ†………ƒ}|}~~|z}‚}{{|{„„ƒ‚ƒ„‚ƒ„…„ƒ€~~€€€€~~~~~~~~~~~~~€€€€€€€‚€€~~~€€€‚‚ƒ„„…„…„…„…„„…†…ƒ‚‚ƒ‚€~{{{{{{{{{|}}}}|{}||||{{{|zz{‚„ƒ‚‚‚‚‚ƒ„†…„ƒƒ‚ƒƒ„„„„„„„„„………ƒ€}{z{{|}}€€€€€€€€€€€€€€€€ƒƒƒƒƒƒƒƒ‚„…†„ƒ‚………„„„„„‡†„ƒƒƒ„„‚‚‚ƒ„……†‡‡ˆ‡…ƒ€„‡Š‹‹‹ˆ‰Šˆ‡„„„„ƒ‚‚ƒ„………………………………††††††††‡†‡†††††……††††††…„ƒƒ…†ˆ‰†††††………†…………†‡‡…{yxwxz~‡‘‘’’‘‡~z{zz}}~~~~€‚€…†…„‚‚‚‚‚‚‚‚ƒƒ‚‚‚ƒƒ†……ƒ‚€€ƒ‚ƒ‚ƒ‚ƒ‚„„„„„„„„„„………„„ƒ……„„„„„…„„ƒƒƒƒ„„……„„„ƒƒƒƒƒ„„………………„„„ƒƒƒ‚€‚‚‚€‚ƒ„†…†…†…†…††††††††……†††‡‡‡†ˆ‰‡„‚‚‚‚€€‚‚ƒ‚‚€‚‚‚‚€‚„„ƒ€€€‚…€€€€€€€€€€€€€€€€‚‚ƒ‚€€‚‚}zz{}zz{{{yxwrrqqppoonnnprwz}~~}|||{||}~€‚ƒ‚€€ƒƒƒ€€€€~}||}}~~~€€€€€€€€€~}~€~~}€ƒ……†ŠŠˆ‡~wttrrs„‡†…ˆ‰ˆ€|z||{~‚‚~{}}|€ƒ…†„„„…ƒ„…†…ƒ}~€€~~~~~~~~~~~~}€€€€‚€}}~~€€~~~~€‚ƒƒ„„…„…„…„…„…†…ƒ‚‚‚€‚‚‚€~||||||||{|}}}}|{}||||{{{|{z|‚ƒƒ‚‚ƒƒ„……„ƒ‚ƒƒƒƒ„„„„„„ƒ‚‚ƒ~{yy{{|}}€€€€€€€€€€€€€€€€ƒƒƒƒƒƒƒƒ~€‚……„‚€†††………„„ƒƒ‚ƒ„ƒƒƒ„„„………†‡ˆ‡„‚€€‚„†‡ˆ‡†ˆ‰‰ˆ…„ƒƒ‚‚‚‚‚‚‚ƒ„………………………………††††††††††††††……………††‡‡‡…ƒ‚„‡ˆ‰‡‡‡†††……††………††‡‡{zxwwz{…Ž’’“““†}yzxxyx{~~~€€€~€‚„ƒ‚€€€€€€€€‚€€€‚„ƒƒ‚€€‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„„„„…………„ƒƒ„„„ƒƒƒƒ„„„ƒƒƒƒ„„……„„„ƒƒƒƒ„„„„……††………„„„„‚€‚‚‚ƒ„…†…†…†…†††††††††……†††‡‡‡†ˆ‰‡„‚‚‚‚‚‚‚‚ƒ‚‚€‚€‚‚‚‚‚‚ƒ„‚€€€‚…€€€€€€€€€€€€€€€€………„„…‡ˆƒ~zxxxxywsqprtv€ƒ„ƒ~€|xtpmlonmmpswy~~~~~}}|z{{|}€„~|~ƒ„€€€~~€~}||||~~~~€€€€€€€€€~~~~€€€~~}‚„„†ŠŠ‰‰xutrrs~„ˆˆˆ‰‰†‚}{}}{|}‚}{||{€‚……„ƒ„…„…††…ƒ}~€‚€~~~~~~~~~~~~}‚‚€€|}}~€€~~}}|€€‚…„…„…„…„……†…„‚‚‚‚ƒ‚€~||||||||{|}}}}|{}||||{{{|zz{‚ƒƒ‚‚‚‚‚ƒ„…„„ƒƒƒƒ„‚ƒƒ„„ƒƒ‚€‚‚€~{yxzz|}~€€€€€€€€€€€€€€€€~„‡ˆ†„‚………„ƒƒ‚‚ƒ‚‚ƒ„†‡…………„„„„‚„†‡‡†ƒ‚€„†‡‡††‰ŠŠ‰‡„„ƒƒ„…ƒ‚€‚ƒ„………………………………†††††††††††††………††††††††…‚€€„‡‰‰ˆ‡‡‡††……†…………†‡‡ˆ|zywwyxƒŽ’’“”•‰}xxwwxv{|{~‚€~€ƒ„„‚€€€€€€€€‚€€€‚€€ƒ‚ƒ‚ƒ‚ƒ‚„„„„„„„„„…………„ƒ‚…„„ƒƒƒƒ„„„ƒƒƒƒ„„……„„„ƒƒƒƒ„„„…………††††…………‚€€‚‚€‚ƒ„†…†…†…†…††††††††……†††‡‡‡†ˆ‰‡„‚ƒ‚‚‚‚‚‚ƒƒ‚€‚€‚‚‚‚€‚„„‚€€€€ƒ†€€€€€€€€€€€€€€€€‚ƒ‚€}||}†ƒ~yusrr{xurqsvx}~€€~|z€€}|zyyonllmptv~~|{yx{{{|~‚‚€}}~€‚‚€€€~~€€~}||||}~~€€€€€€€€€~~}~~€€€€~~}‚„„†Š‹‰‹‚yvtrrs}„ŠŠŠ‰‡ƒ‚}~zz||€€|yz{y~ƒ„ƒ‚‚ƒ„…†‡†„}~€‚‚€~~~~~~~~~~~~}€€‚‚€||}~€€~~}}||€€€€€€€„…„…„…„……††…„‚‚‚‚‚‚€~}}}}}}}}{|}}}}|{}||||{{{|{z|‚ƒƒ‚‚‚‚ƒ„…„ƒƒƒƒƒ„‚‚ƒƒ‚‚‚ƒ„‚€}{xyz{}~€€€€€€€€€€€€€€€€~~~~~~~~‚„ˆ‹ŒŠ‡…„ƒƒ‚€€…„„…†‰‹Œ‡‡†……„ƒƒ€‚…‡ˆ†„‚~€ƒ†‰ŠŠŠ‰‰Šˆ†„ƒƒ}€ƒ†‡…‚€‚ƒ„………………………………†††††††††††††………‡‡‡††††…†‚€„‡‰‰ˆˆ‡‡†††…†…………†‡‡ˆ‚|zxwwyw‚’’“••€xxwyzx}zy~‚„€ƒ…„ƒƒ‚‚ƒ€€€‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„„„……………„ƒ‚††…„„„„…„„ƒƒƒƒ„„……„„„ƒƒƒƒ„„„………†‡‡‡†††……€€€‚‚ƒ„…†…†…†…†††††††††……†††‡‡‡†ˆ‰‡„‚ƒƒ‚‚‚‚‚ƒƒ‚€€‚€‚‚‚‚‚ƒ„‚€€€€‚†€€€€€€€€€€€€€€€€„ƒ€„†„{uqrvyuxyvrrx~~€ƒƒ}tqstvy}€€~|yrnoqppq‚€~}|||{|}€‚ƒƒƒ}~~~~||}~€€€€€€€~~}}|{{{|~€€€~}}~€€}€}{yyyz~€€€€€€}}€…‡†ˆ‹ˆ„|tqrsryˆŠ‰Š‰ˆƒ€~~€}yz~€|yzzw~„†…„„„††…„ƒ‚‚€€€€€€€€€~~~€€€‚‚‚}~~~€€€€€€€€€€€€€€}}}}~€~~~€€~{~}}}~~€€‚„……„„„„……„ƒ‚‚‚€‚‚€~}||{{||}{{{z{||}{{|||}}}}||~ƒƒ‚‚ƒƒ‚‚„†…„ƒ‚‚‚‚„„ƒƒƒƒ„„‚ƒ}yxxuwz}~~}€€€€€€€€€€€€€€€€~ƒƒ€~~‚…ŠŒ‹Š‰‰…€‡„€‰“•’‘Œ‡„ƒ…†„…„…Šˆ~€ƒ…†††…‰‡„ƒ‚‚‚‡ˆ‡…ƒƒƒ…††…„……„„„………††……†††‡‡‡†††††………‡‡†………†‡„‚€€ƒ‡‰‹Œ‹‰†ƒƒ„††††††††‰ˆ„}yyxvy~‰’““••xyyvvy|}}|{|‚~‚†ˆ‡†…ƒ€‚‚‚‚‚ƒƒƒ‚€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ„………ƒƒƒƒ„„……„„ƒƒƒƒ„„…„ƒƒ‚ƒƒƒ„„„„„„„„„„ƒƒƒ„„…‡ˆˆˆˆ‡†…‚‚‚‚€€ƒƒƒ„†…†…†…†…†††††††††††††††††‡‰‡„€‚‚‚‚€‚ƒƒƒƒ‚€‚‚‚‚‚‚‚‚ƒ‚‚ƒ„„‚‚‚…€€€€€€€€€€€€€€€€„‚€„†„{uqruxwyyuqry€ƒƒ}tqstux|€€{vsnknu„ƒ€~~}}{{|}~€‚ƒzy|~~}|}}~€€€€€€€€~~}}||{{|~€€€~~€}~~|zxxyxxz}€€€€€~}€…‡‡ˆ‹‰†~vrrsrx€ˆ‰‰Š‰ˆ€}{|~|xz€|yzzx~„†…„„„„„ƒ‚€€€€€€€€€€€~~~€€€}~~€€€€€€€€€€€€€}}}}~€€~€€~|}}}}}~‚ƒ……„„„………„ƒ‚‚‚€‚‚€~{{{{{|}~{{{{{|}}{{|||}}}}|{}ƒ‚‚ƒƒ‚‚„†…„ƒ‚‚‚‚„„ƒƒƒƒ„„ƒ„„|xvvvxz}~~}€€€€€€€€€€€€€€€€~ƒƒ€~~‚†ŠŒ‹‰‰‰…€‡„€€ˆ’•“’‰…‚ƒ„„…„…‰Œ‡}„………„‡†…„‚‚†‹‹‡…„„…††…„ƒ„„…………†††……†††‡‡‡††††…………‡‡†……††‡…„ƒ„†‰ŒŽŽŽ‹ˆ……†††††††††‡†‚|yzywx|†”•––Ž‚xxywvy|}}}|}€‚„†…„ƒ€€€‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ„………ƒƒƒ„„„……„„ƒƒƒƒ„„…„ƒƒ‚ƒƒƒ„„„„„„„„„ƒƒƒƒ„……ˆˆ‰‰ˆ‡††‚‚‚‚‚€€ƒƒƒ„…†…†…†…††††††††††††††††††‡‰‡„€‚‚‚‚€‚ƒƒƒ‚‚€‚‚‚‚‚‚‚‚ƒ‚‚ƒ„…†ƒ‚‚€‚†€€€€€€€€€€€€€€€€ƒ€~„†…zurrtuxyxtprz‚€„ƒ}tqsuuwz‚ƒƒ‚€}xpls~†…ƒ€{{{{}~€‚‚{tty~~}||}~€€€€€€€~~~}||{{|~€€€€€}~}{xwuutxwxz}€€~~}€„‡†ˆŠŠ‡xsssrx€‡‰‰Š‰ˆ|z{~}yz|zzzx}€„……ƒƒ„€~~~€€€€€€€€€~~~~~~€€€~~~~€€€€€€€€~}}}~€€€€€€}}||{|}~~ƒ„…„„„………„ƒƒ‚‚‚‚€~{z{{|}~|{{{{|}}|||||}}}|{z|€‚€‚ƒƒ‚ƒ„†…„ƒ‚‚‚‚„ƒƒƒƒƒƒ„…†…‚}xvuwx{}~~~}€€€€€€€€€€€€€€€€‚„ƒ~~‚†Š‹Š‰‰Š…€‚‡„ˆ’–“‘‘‹†‚€„……„ˆŠ†~|~€‚„„„ƒ‡‰Šˆ†„„……Š’Š†„…†‡†„‚‚‚……†…††††……†††‡‡‡†††††………‡††…††‡ˆ‡‡‰‹‘’Š‡‡‡††††††††††ƒ~|||zyzƒŽ”•–•Žƒyxywvx|}~}}~ƒ€~‚ƒ‚€€~||}~}|~€ƒ‚‚‚€€€‚‚‚‚‚‚‚‚ƒƒƒ„„„……ƒƒƒƒ„„……„„ƒƒƒƒ„„…„„ƒƒ‚ƒƒ„„„„„„„„„ƒƒƒƒ„…†‰‰ŠŠŠ‰ˆ‡ƒƒ‚‚€ƒ‚‚ƒ„†…†…†…†…†††††††††††††††††‡‰‡„€‚‚‚‚€‚‚ƒƒƒ‚€‚‚‚ƒ……‡„‚€‚‡€€€€€€€€€€€€€€€€‚€~~~„†„€ytssssyyxtps|„‚ƒƒ{tqsxwwz}‚„…~~~}xv}‡‡†„‚€€||{||~€‚ypow}~||}}~~€€€€€€€€~}}||{}~€€€€€€€€}|zwutttuwwwz}€~~~}„†‡‡ˆŠˆƒztrrrw‡‰‰Š‰ˆ~{||{}z{zx}€„…„ƒƒ„€€€€~~€€€€€€€€€~~~~}}}~~~~€€€€€€~}}}}~€€€€~||{{{||}€„„„„…†……„„ƒ‚‚€‚‚€~||{{{|}~||{{{|}~||||||||{zz|€‚‚ƒ‚ƒ…†…„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒ„……‚}ywwyz{}}~}}€€€€€€€€€€€€€€€€€ƒ…„~ƒ†‰ŠŠ‰Š‹…€‡ƒˆ“–”ˆƒ€…†…„…‡„~|~€ƒ„„„ƒ†‰‹Š‡……†‡Œ‘”‹…ƒ„…††„ƒ‚ƒ††††††††……†††‡‡‡††††…†……‡‡†††‡ˆ‰‰‹Ž’““’‘’‘ŽŠ‡‡‡†††††††††‡…€|zŒ“””’„zwxwwx{}~ƒ}}€~~{yyz{zy|}~‚ƒƒƒ€€€€‚‚‚‚‚‚‚‚ƒƒ„„„„„„ƒƒƒ„„…„…„„„„„„„„…„ƒƒ‚ƒƒƒ„„„„„„„„„ƒƒƒ„…†‡‰ŠŠ‹‹Š‰‰„„ƒ‚€‚ƒƒ‚„…†…†…†…††††††††††††††††††‡‰‡„€‚‚‚‚€‚‚ƒƒ‚‚‚‚‚ƒ„…†‰†ƒ‚€ƒˆ€€€€€€€€€€€€€€€€€~}~„‡ƒ~wttttswyyurt}„ƒzrpsyxxy}€ƒ„|{|€‚„ˆ‡†„‚€~~~~}~‚xoov}~|||}~€€€€€€€~~}|||}}~€€€€~{yxwuuuwxwvvy}€~~~}~ƒ‡††‡ˆˆ„{tqqrv~†ˆ‰ŠŠˆ‚~{z~~|{€}{{{y}€ƒ…„ƒƒƒ€€€€€€€€€€€€€€~~~||}}~~~~~€€€€~}}|}}€€}|{z{{||~€ƒ„„„…††……„ƒ‚‚‚ƒ‚€}~}|{{{|||||{|}}~||||||||{zz|€‚‚‚ƒ…†…„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒ‚„‚{{{{{||}}}}€€€€€€€€€€€€€€€€‚„†…‚~~„†‰Š‰‰Š‹…€‚…ƒ€Š”—”Ž‰„€…‡†‚ƒ„‚}}ƒ………„‚…ˆ‡„‚ƒ„†Š‘Žˆ„‚ƒ„†……„„„††††††††……†††‡‡‡†††††………‡†††‡‡‰‰ŒŽ’“’‘‘‰†……†††††††††‡†ƒ‚ƒƒ}y~Š’””’†{wwxxx{}€€€€‚„‚||}~~}|zxwxyyxz{~‚ƒ‚€€€€‚‚‚‚‚‚‚‚„„„„„„ƒƒƒƒƒƒ„„……„„„„„„„„…„„‚ƒ‚ƒƒ„„„„„„„„„ƒƒƒ„…‡‡ŠŠ‹‹Œ‹‹Š……ƒ‚‚€ƒ‚‚ƒ„†…†…†…†…†††††††††††††††††‡‰‡„€‚‚‚‚€‚ƒƒ‚‚‚‚‚‚ƒ…‡ˆŠ†„ƒ€ƒ‡€€€€€€€€€€€€€€€€~||~„‡€{usuvusuxyxuv}ƒxpnqxxwy|€‚ƒ{{‚‡‡……ˆ‡…ƒ€~~€€€€zsrw|}{|}}~~€€€€€€€€~}}||~}}€€€~|zxwwwwxxyvuuy|€€€~€}}ƒ†‡††‡ˆ…}tqqru~…ˆˆ‰Šˆ‚}yxz|{y{€‚~{|{y|ƒ„ƒ‚‚ƒ€€€€€€€€€~~~|}}}~~€€~~~~~~€€€€€‚~~}}|}~~€~}}|{z{{|}‚ƒ„„†‡††…„ƒƒ‚‚‚€‚‚€~~}|{{z{}||||}~~}}}||||||{z|€‚€‚‚‚„…†…„ƒ‚‚‚‚‚‚ƒƒƒƒ‚‚€‚ƒ‚}|}|}|}}}}}€€€€€€€€€€€€€€€€ƒ…‡†‚~…‡‰‰ˆˆŠŒ…€„ƒ‚…Ž—˜“ŽŒˆ„€…‡†‚€‚€}}„………„‚„……ƒ‚ƒ„…ˆ‹Š†„ƒƒ…††………†††††…†…………†††‡‡‡††††…†……‡‡††‡ˆ‰ŠŽ‘‘ŽŒˆ…„„††††††††„†…ƒ‚ƒƒ{v{ˆ‘”””‘‡|vvxxxz}€‚‚ƒ…‚{{|~~~|zwwxyxwz{}€€€€‚‚‚‚‚‚‚‚……„„„ƒƒƒƒƒƒ„„„……„„„……„„„…„ƒƒ‚ƒƒƒ„„„„„„„„ƒƒƒ„…†‡ˆ‰Š‹‹ŒŒ‹‹‡…ƒ‚‚€ƒƒ‚„…†…†…†…††††††††††††††††††‡‰‡„€‚‚‚‚€€‚ƒƒƒ‚‚‚‚‚‚„†ˆ‰‰‡……ƒ‚†€€€€€€€€€€€€€€€€~}||~…‡}xssvywtqv{{yx|€€}vnmpuuvy}ƒƒ‚|z€‡ˆ††Š‰‡„‚€~€€}xwz||{||}~€€€€€€€~~}||~}}€~}}~}{xwwxxxwwvutux|€€€€}}‚‡‡……†‰‡vrrtu}…ˆˆ‰Š‰„ywyzyw|€‚~|||z|‚„ƒ‚‚‚€€€€€€€€€€€€~~~}}}~~€€€~~}~~€€~€€€‚‚‚ƒ~}}|||}~~}~~~}|{{{|||~ƒ„„†‡††…„„ƒ‚‚‚€‚‚€~~}|{{{{{}}|||}~}}}|||{{}|{}ƒ‚€‚‚‚„††…„ƒ‚‚‚‚‚‚ƒƒƒƒ‚‚‚ƒ„‚|{|~}}}}}}}€€€€€€€€€€€€€€€€„†ˆ†‚~~…‡ˆˆ‡ˆŠ…€‚‚ƒ„‰’š˜’‘‘Œ‡ƒ€€†ˆ†||~€‚„„„ƒ„„„„„„………‡‰‰‡…†‡…†‡‡†………†††…………„……†††‡‡‡†††††………‡†††‡‰Š‹Ž‘‘‰…„„††††††††„††ƒƒ„ƒ€{vzˆ’”•‘ˆ|uvxyxz}ƒƒƒ„…~{z|€€|zxxyyyxz{}~€€€€‚‚‚‚‚‚‚‚‚‚………„ƒƒƒ‚ƒƒƒƒ„„……„„…………„„…„ƒƒƒ‚ƒƒ„„„„„„„„ƒƒƒ„…†ˆ‰‰‰Š‹ŒŒ‹‹ˆ†„‚€‚€ƒ‚‚ƒ„†…†…†…†…†††††††††††††††††‡‰‡„€‚‚‚‚€€‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒ„†ˆŠˆ‡‡ˆ†‚‚„€€€€€€€€€€€€€€€€~}{|}…‡{vqrwzxuou|}{y|€|umlorsuy~‚ƒƒ€{y~„‡‰ŒŒ‹ˆ†„‚}~€‚‚€~}|{{|{{|}}~€€€€€€€€€~~}}|~}}‚€~}}}}zxwxyyxvtrttux|€€€€€€}}‚†‡…„†‰ˆ€wssuu}…‡ˆ‰Š‰‡‚|yz{zx|‚|||z|‚„ƒ‚‚‚€€€€€€€€€€€€€~~~}~~~€€€€€~~~}~~€€~€€‚ƒƒƒ~~}|||}}}€~}}~~~}|{{|||~ƒ„…†‡††……„ƒ‚‚‚€‚‚€~}||{{||}}}|||}~}}}|||{{}||~ƒƒ‚€‚‚‚„††…„ƒ‚‚‚‚‚‚ƒƒƒƒ‚‚„……ƒ{zz~~}}|}}}€€€€€€€€€€€€€€€€„‡ˆ†‚~~†‡ˆˆ‡ˆ‹…€ƒ†Œ•›™’“’‹††ˆ†}~~|{}ƒƒ‚‚„‚‚ƒ„„…‡ˆ‡…†‡Š‡ˆˆˆ†…„…††………„„„……†††‡‡‡†††††………‡††‡‡‰‹ŒŒŽ‘’“’“‘Ž‰†……†††††††††ˆˆ…„…„‚|w{ˆ‘’“‘‰}uuxyxz}ƒƒ„„…~zz|‚}{xxyzzx{|}~~‚‚‚‚‚‚‚‚‚‚‚‚………„ƒƒ‚‚ƒƒƒ„„„……„„…………„„…„ƒƒ‚ƒƒƒ„„„„„„„„ƒƒƒ„…‡ˆ‰ˆ‰Š‹‹‹‹‹ˆ†„€‚‚€ƒƒƒ„…†…†…†…††††††††††††††††††‡‰‡„€‚‚‚‚€€‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒ„‡‰Š‡‡ˆ‰‡ƒƒ€€€€€€€€€€€€€€€€ƒ€}{|ƒ…ywuttuwysuwwvwz}}|zwtqontux{‚„†{yx|‚†…ƒŠ†ƒ€|{{…†€{ƒ~{yyz{}}~~~~}}~~~€€€~~}}}}~~~~€€€€€€~|{zwyzywvvwuuvwz|~€}~~~~}€~~ƒ‡‡…Š‰‡…‚|{|ƒ‡‰Š‰ˆ‚|x{}|{z{}€|yww|…ƒ‚‚‚€€€€€€€€€€€€€~}~€}~~~~~~~~~~~~~~~~‚€~~€~‚€€€}}|||}}~€~~~}}}|{{{{|}~€‚„…………………„ƒ‚ƒ‚€~~~}}}|||~~~~~~~~€}|||~~{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒƒƒƒ‚‚ƒ‚„…ƒ|{{zz{}|z{~€€€€€€€€€€€€€€€€€ƒ…„‚…ŠŠŠŠŠŠŠŠ‰‡„€~~€ˆ‰‹Ž‘”–—”’‹‡ƒ~‰ˆ‡…ƒ~}}~€‚‚‚‚ƒƒƒ„…††‡‡ˆˆˆ†††††††††…†…†…†………†‡ˆ‡‡‡‡‡‡‡†††…†††‡ˆŠ‹Œ‘ŽŒŒŽ‘‘‘’’‘Ž‹‡…†††††††††ˆ‡…„…„‚zzz|€„‰ŒŽˆ~wuvy{|~€‚~{{~‚‚zwxzyy{y|‚ƒ‚€€€‚‚ƒ„„ƒ‚‚„„„ƒ„ƒ„„ƒƒ„„„„……‚ƒ„„……„„„„ƒƒƒƒ„„„………††††„ƒƒ„‡ˆˆ‡ˆ‰Š‹‹ŠŠŠ‰ˆ†„‚€‚‚‚‚‚‚‚‚‚‚‚€ƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚€‚ƒƒ‚‚‚‚„†ˆ‰ˆ‡ˆˆ…‚„ˆ€€€€€€€€€€€€€€€€~}{{{~‚yxvuuvxy{ytpot}„~~|zwvtsz{}€‚…‡ˆ‚€„†…‚ŽŒ‹‰ˆ‡‡‡‚}~€}y~|zyz{|}}~~~}}~~~€€€~~}}}}~~~~€€€€€~|{yywyzywvvwuvvxy|~€~~~~~~~~€~ƒˆˆ†‰Š‰‰ˆ‡††‚ƒ†ˆ‰‰‰ˆƒ}y{}|zy{~€€|yww|„ƒ‚ƒ‚€€€€€€€€€€€€~}~€}~~~~~~~~~~~~~~~~}}~~}‚€€€€€~}}|||}}~~~}}}}}|{{{{|}~€‚ƒ………„„„ƒƒ€€‚€‚‚‚€~~~}}}|||~~~~~~~~~~}|||||{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒƒƒƒƒ‚‚‚ƒ‚„„ƒ|{{zz{}|z{~€€€€€€€€€€€€€€€€ƒ…ƒ€~€‚‰‰‰‰‰‰‰‰‡…ƒ~~~‚ƒ†ˆŠ‹ŒŒ‹‰†‚|{„„ƒƒ€{{{||}}}‚‚‚ƒƒ„„„…††‡‡ˆˆˆ††††††††…†…†…†…†……†‡‡‡‡‡‡‡††††………††ˆ‰‹ŒŽŒ‘‘‘‘‘‘ŽŠ†„††††††††…††„ƒ„ƒwvvvwz|}€}ywwyyz|~€ƒ~{{~‚‚zwyyyy{z{}€€€€‚‚ƒ„„ƒ‚‚ƒƒƒ„ƒƒƒƒƒƒƒ„„„„…„„………„„ƒ„„ƒƒƒ„„„„…………†††…„ƒ…‡‰ˆ‡ˆ‰ŠŠ‹ŠŠŠ‰ˆ†„‚€‚‚‚‚‚‚‚‚‚€ƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚€‚ƒƒ‚‚‚ƒ†‡‰ˆˆˆˆ…‚„ˆ€€€€€€€€€€€€€€€€‚‚‚‚ƒƒ††„„„„††}xrqw‰ŠŠŠˆˆ††…‚ƒ„†‡ˆ‰‰‡…„……ƒ‹‹‹‹ŒŒˆzvxyzxyyxxyz|}}~~~~}~~€€~~}}}}~~~~€~|{yxwwxyywvvvuuvwz|~€~}}~~~ƒˆŠˆˆˆˆˆˆ‡††ˆˆ‰‰‰‰‰‰„~z{}{yx|~€€|ywv|„ƒ‚ƒƒ€€€€€€€€€€€€€~~€€~~~~~~~~~~~€~}}}~}}€€€~}}|||}~~~}}|||}}|{{{{||}ƒ„……„„„ƒƒ‚€€‚ƒ‚€~~~}}}|||}}}}}}}}|||}|{{z{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒƒ„„ƒ‚‚ƒƒ‚„…ƒ|{{zz{}|z{~€€€€€€€€€€€€€€€€ƒƒ‚€~€€€€€€€„„„ƒ‚}|}~‚‚„ƒ‚€~|{z~}}||||{ƒƒƒ„„„…………††‡‡ˆˆ†††††††††…†…†…†………‡‡ˆ‡‡‡†††…†…………†‡‰ŠŒŽŽŽŽ‰†ƒ†††††††††‡†ƒ‚}yxxwwwwwvvwy{{yw|~€‚€}{{~ƒƒ€zwxzxy{{{||~~€€€‚‚ƒ„„ƒ‚‚ƒƒƒƒƒƒƒƒƒƒ„„„„…………††…„„ƒ„„„ƒƒƒ„„………………††…„ƒ„‡ˆˆ†ˆˆ‰Š‹ŠŠŠŠˆ‡„‚€€‚‚‚ƒƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚‚‚‚‚‚‚„…ˆ‰ˆ‡ˆˆ…„ˆ€€€€€€€€€€€€€€€€…†††„ƒ€‚‚‚‚‚}„ˆƒƒƒ„ƒ„„„………††‡‡‡ˆ‡†…„ƒ€€€‚ƒƒ„{vpnptvvttuwy{}}~~~~~~€€~~}}}}~~~~~€|{xwvuxxxxwwuuuvvxy|~€€~}}~€~‚ˆ‹ŠŠŠŠ‰ˆ‡……‹Š‰ˆ‡ˆ‰Š…z|}zxw|€|xvv{„ƒ‚ƒƒ‚‚€€€€€€€€€€€€~€~~~~€~}}~~~€‚‚€€~}||||~~}}||{|}}|{{{{||}ƒ„„……………„ƒ‚‚€‚‚‚€~~~}}}|||}}}}}}}}{|}~~|{z{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒƒƒƒƒ‚‚ƒ‚„„ƒ|{{zz{}|z{~€€€€€€€€€€€€€€€€€€€‚‚‚‚„…†‡…ƒ€}€€~~‚‚‚‚‚‚‚‚„„ƒƒ€€ƒ„„……†††„……††‡‡‡††††††††…†…†…†…†…††‡‡ˆ‡‡………………………†ˆŠŒŽŽŽ‘Œ‰…ƒ††††††††‡ˆˆ†‚}xuxxxyxxxxuwy|}|xv|~€ƒ}z{~‚ƒƒ€{wyyyxz{{zz{}€€€‚‚ƒ„ƒƒ‚‚ƒƒƒƒ‚ƒƒƒƒ„ƒ„„…„………†††……„„„ƒƒƒ„„„……………………†„ƒ…‡ˆ‡†‡ˆ‰ŠŠŠŠŠŠ‰†…‚€€‚‚‚€ƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚‚‚‚‚ƒ†‡‰ˆˆˆˆ…‚„ˆ€€€€€€€€€€€€€€€€ƒƒ‚€}yvtuuuvvuuuoruutrstssttvvwwtttttsssrssrqppqqqqqqqqqpnllnprrpqtwz}~~~€~~~~}}}}~~~~~~~}{xwvvuxxxwxvvuuuvwz|~€€~}}~€~‚‡‹‹ŠŠŠŠŠ‰‰ˆŠ‰‡††‡‰Š…z|}zxw}€|xvu{€„ƒ‚ƒ„‚‚‚‚€€€€€€€€€€€~€€~~~€€~~€‚‚€€~}}|||~}}|}||{}}|{{{{||}‚„„„„„…„„ƒ‚‚‚ƒ‚€~~~}}}|||}}}}}}}}|}~|{{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒ‚ƒƒ‚‚‚‚„…‚|{{zz{}|z{~€€€€€€€€€€€€€€€€~~~€‚„……ƒ‚ƒ‚ƒ‚ƒ‚†‡‰‰ˆ†ƒ†……„„ƒ‚‚‚‚ƒƒ„ƒ„„‡‡†……ƒƒ‚ˆ‡‡……„ƒƒƒƒ„„……††„„……††‡‡†††††††††…†…†…†………‡‡ˆ‡‡†……………………†‡‰ŒŒ‰…ƒ†††††††††‡ˆˆ„}uquvwxyxxxxxyz|{yw|~€‚|z{~‚„„{xxzxxz{zyyz|~€€€‚‚ƒƒ„ƒ‚‚‚‚‚‚ƒ‚‚‚ƒƒ„„„„……ƒ„……††††„„„ƒƒƒ„„………„…„…„†…„…‡‡‡…†‡ˆ‰ŠŠŠŠŠ‰‡„ƒ€€‚ƒƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚‚€‚‚‚„…ˆ‰ˆ‡ˆˆ…„ˆ€€€€€€€€€€€€€€€€‚~{wurqqrsttsrqopqpnnpspppqrsssrrrqqqqqnoqponpqppoonmllqqpqppoonqtx{~~€€~~~}}}}~~~~~~~~}|zxvvvwxwwwwwutuvvxy|~€€~~~~€€~†Š‹ˆˆˆ‰‰‰ˆˆ‰ˆ‡††‡ˆ‰„~z{}{yx~€€|xuuz€ƒƒ‚„„‚‚‚‚‚€€€€€€€€€‚€€€€€€€€€€~~€€€~~€€€€€€€~}}}}}~~}}}}||}}|{{{{|{|~€‚ƒ„„ƒƒƒƒƒ‚‚€‚‚‚€~~~}}}|||}}}}}}}}~~~~}|{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒƒƒƒ‚‚ƒ‚„„ƒ|{{zz{}|z{~€€€€€€€€€€€€€€€€€„††…‡‡ˆˆ‡†…„……„ƒ‚‚„„„……††‡‰‰ˆ‡……„ƒ„„„„ƒƒ‚‚ƒƒƒ„„………ƒ„„……†††††††††††…†…†…†…†…††‡‡‡‡‡………†…†††ˆ‰‹ŽŽŽ‘‰†ƒ††††††††ƒ„‡‰†~wsvwwxxyxxxwvwxzzy|~€ƒ~|z{~‚„…|xyyxwyzzyyz|~€€‚‚ƒ„„ƒ‚‚‚‚‚‚‚‚‚‚ƒ„ƒ„„…„…‚‚„…††‡‡„„ƒƒƒ„„„……„…„„„„‡…„…†‡†……†‡‰ŠŠŠŠ‹Š‡…ƒ‚€€€€‚‚‚€ƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚€‚‚€‚ƒ†‡‰ˆˆˆˆ…‚„ˆ€€€€€€€€€€€€€€€€|yusrstnoqrrqonpppmklorpppqqqrrppppqqqqmprqnmoqtsssrrqqtuusrpppoqvz}~€€€~~~}}}}~~~~~}}}|{ywvvxxxwvvxwusuuvwz|~€€€~€„‰Š‰‰ŠŠŠ‰ˆˆ‰ˆˆˆ‡ˆˆˆƒ}y{}|zy~€‚€|wutzƒ‚‚„„ƒƒ‚‚‚‚€€€€€€€€‚€‚€€€€€€€€€~~~€€}}~€~€€~}}~~~~~}}}}}|{{{{|{|~€‚ƒ„„‚ƒƒƒƒ‚‚‚ƒ‚€~~~}}}|||||||||||~}||||}{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒ„„„ƒ‚‚ƒ„‚„…ƒ|{{zz{}|z{~€€€€€€€€€€€€€€€€‡„‚ƒ…†„……………………††„ƒƒƒ„„‚‚‚€€€‚‚‚ƒƒƒ„„……………………€‚‚‚‚‚‚‚ƒƒ„„„ƒƒ„„……†††††††††††…†…†…†………†‡ˆ‡‡‡……††††‡‡‰‹ŽŽŽŽŽŽ“‘‘‘ŽŠ†„††††††††„„‡‰†ywwwwvwwwwywuuwxyy|~€‚~{z{‚……‚|yyywwyyyzz|}~~€€‚‚ƒ„„ƒ‚‚‚‚‚‚‚‚ƒƒ„„„„……‚‚„……………„„„ƒƒƒ„„………„„„„ƒ‡†……‡‡†„…†‡‰ŠŠŠŠ‹Šˆ…ƒ‚€€€€€€‚‚ƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚€€‚‚€‚‚‚„…ˆ‰ˆ‡ˆˆ…‚„ˆ€€€€€€€€€€€€€€€€usolkmprpqsuusqpqrsqonpsuuuuuuuuppqqrrsspsusomorpppqqqqqtttsqqrsprw{~€€€€~~}}}}~~~~~}}}|zxvvwyzxwvvwwusuvvxy|~€~€€~€€~ƒˆŠˆˆˆ‰‰‰‰ˆŠŠŠ‰‰ˆ‡‡‚|x{}|{z~€‚€|wutzƒƒ‚„„ƒƒƒ‚‚‚€€€€€€€€‚€€‚€€€€€€€€€€~~~~||}~~}~€~~~}~~~~~}}}|{{{{|{|~€‚ƒƒ„ƒ„„„„„ƒƒ‚‚‚€~~~}}}|||||||||||~}|zz{|}{zz}ƒƒ‚‚‚‚ƒ„„†…ƒƒ………„ƒƒ„…‚„„ƒ|{{zz{}|z{~€€€€€€€€€€€€€€€€Œ‰…„†…‚~‚‚‚‚‚‚‚‚…ƒ€‚ƒ€€€€€€‚ƒ„…††€€‚ƒ„„‚‚ƒƒƒ„ƒƒƒ„„………††††††††…†…†…†…†……†‡‡‡‡‡…†††‡‡‡‡‹ŒŽŒŽŽŒŽ‘”‘’’‘Ž‹‡…††††††††ˆ‡‡ˆ…~zyxxwwwxxy|zwwwxxw|~€ƒ}{y{~ƒ……‚|yyywwyyyz{|}~~€€‚‚ƒ„„ƒ‚‚ƒƒ„„„„……ƒƒ„„…„„„„„ƒƒƒƒ„„………„„„ƒƒ‡†……†‡†„„…‡ˆŠŠŠŠ‹Šˆ…ƒ‚€€€€€€€€‚‚‚€ƒ………………………††††††††…………†††††‡‰‡„‚‚ƒƒ‚‚€€‚‚‚„†ˆ‰ˆˆˆˆ…‚„ˆ€€€€€€€€€€€€€€€€zwsonoqrurppsutrsrppqrrqrtspnopqqrstuuuuvurqrrrrqqqrrsstssssssrrrvz||||}~~~~~~~}~€€}}}}}}}}~~}{ywxxxxxxxxxyyxxwvuwvuuw{~€~~~~}ƒ†ŠŠˆ‡†††††‡ˆ‰ŠŠŠ‰ˆ€|z|}{{|€{wuuy~‚‚‚„„ƒƒƒƒƒƒƒƒƒ~€€~€‚‚‚‚€€€€€€€€~~~~~~~~}}}€€€}}}}}}}}}}~~~€~|{zzz}{{}„……„„„„ƒ‚€€‚€€‚‚€~}}}||{{}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒ……ƒ‚ƒ„…„ƒƒ„…€‚ƒ~{zz~|xxzz{€€€€€€€€€€€€€€€€ˆ†ƒ€‚ƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚ƒƒ‚‚‚‚‚ƒƒ‚‚‚„…†‡††…………††††††‡‡‡‡††ˆ‡‡‡‡‡‡‡‰ˆ‡†‡‰‹ŽŽŒŒŽŽŽ‘Œ‰‡††††††††‡‡‡†„€{wwxy{||zxzyxwwxyy~~~€‚€~{{~‚ƒ€{xyzyy{xwwwy{~€€€€ƒ„„„ƒ‚€ƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„…†…„‚‚ƒƒ„…„…„…„…„ˆ‡………†……~‚‡Š‹ŠŠŠŠŠŠ‡„‚‚‚‚‚‚€ƒ…„„„………††††††††††……„„„…†‡…‡ˆ‡„€‚‚ƒƒ‚‚€€€€‚„‡‰Šˆ‰Š‰„€„‹€€€€€€€€€€€€€€€€ywtrpppqsqnortrqqqpppppqqqqpnnpsuvwxxwvvtsqqrtuussssrrqqqrrrqqppruz}||}~~~~~~~}}}~€}}}}}}}}|}}|{yxvwwwwwwwwxxxxwwvuwvuvwz~€~~~€~}‚†‰ŠŠˆ‡………††‡ˆ‰‰‰ˆ‡€|z||{{|{wvvy‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€~~~~~~~~~€€€€€€~~~~~~~~}~~~~€}|{zzz}|{}€„……„„„„ƒ‚€€‚€‚‚€}}}}||||}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒ…„ƒ‚ƒƒƒƒƒƒƒƒƒ„‚~{z{||yz|{z€€€€€€€€€€€€€€€€‰‡„‚€‚‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚ƒƒ‚‚‚„…†‡††………††††‡††‡ˆˆ‡††‡‡‡‡‡‡‡‡†††‡‰ŒŽŽŽŽŒŽŽ‘‘Œˆ†††††††††‡‡‡†…|xvwy{|}}|zzxwwxxy~~€‚€}{{~‚‚€{wxxwwywwwxy|~€€€€‚ƒƒ„ƒƒƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„…†…„ƒ‚ƒƒ„„…„…„…„…‡†……††…„‚‡Š‹ŠŠŠ‰ŠŠ‡„‚‚€‚‚‚‚€ƒ…„„„………††††††††††……„„„…†‡…‡ˆ‡„€‚‚ƒƒ‚‚€€‚„‡‰Š‡ˆŠ‰„€„Š€€€€€€€€€€€€€€€€xwusqppoqomnpqqonoqppopponooonrv}|{yxvtsrqpprsttsrqppqrsqrsrqoopquz|}|}~~~~~}~}}}}~}}}}}}}}{{zyxwwvwwwwwwwwwwxxwvvuwvvvxz}~~~~€€„‰‹Š‰†„„…†…†‡ˆˆ‡‡†€|z|}{{|‚{xvv{€ƒ‚‚‚€‚‚€‚€€€€€€€€~~~~~~~~€€~~~€}|{z{{}{{}ƒ……„„„ƒƒ€‚€€‚‚€}}}}}|||}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒ…„ƒ‚‚‚ƒ‚‚ƒ‚‚„…‚{{{z{y{}{y~€€€€€€€€€€€€€€€€Šˆ†ƒ‚€€€ƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚ƒƒ‚‚‚‚„…†‡††……††††‡‡†‡ˆˆˆˆ‡†‡‡‡‡‡‡‡‡††ˆŠŒŽŽŽŽŽŽŽŽ‘‘‘Ž‹ˆ………………………‡‡‡‡…‚}zwxzz{|}~{zyxwwxx€~~€‚€}z{}€‚‚zwwywwyxxyz{}€€€‚ƒ„„„ƒƒƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„…†…„ƒƒƒ„„…„…„…„…„………†‡†…ƒ€ƒ‡ŠŠŠŠŠŠŠŠ‡„‚€‚‚‚ƒƒ…„„„……………………………………„„„……†‡…‡ˆ‡„€‚‚ƒƒ‚‚‚‚‚„‡‰Š‡ˆ‰‰„„‰€€€€€€€€€€€€€€€€zwsommnopnmnoqonnopqppopnnnooqty}{xutsssqpoooppprqommnopprsronnppuy||}}~~}~}}}}|||}~|||}|}||zzyxwwwwwwwwwwwwwwwxwwvuwwvwxz|~~~~~~€~~‚‡ŠŠ‡…ƒ…††…†‡ˆˆˆ‡†€|z||{{|€{ywx{€ƒ‚€€~€€€‚€€€€€~~€€}~}~€‚€€€€€€€€~~}|{{{{}|{}€„……„„„„‚‚€€‚€‚ƒ€}}}}}}}}}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒ„„ƒ‚‚‚‚‚‚‚‚‚‚ƒ……ƒ|zzzzxy|zy~€€€€€€€€€€€€€€€€Š‰‡…‚€ƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚‚‚‚ƒƒ‚‚‚‚„…†‡††…††††‡‡‡‡ˆˆ‰‰ˆˆ‡‡‡‡ˆˆˆˆˆ‰Š‹ŽŽŽŽŽŽŽŽ‘‘ŒŠ‡………………………‡‡‡‡†„€}zzz{zyz}{zyxwwxx€€~~}zz}€‚~yvyzzz}}}}~~€€€€€€‚„„…„„ƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„………„„„„„„„…„…„…„…„„…‡ˆ‡„‚„ˆŠŠŠŠŠ‰ŠŠˆƒ‚‚‚€‚‚‚‚€ƒ…………………………………………………„„„…†‡…‡ˆ‡„€‚‚ƒƒ‚‚‚‚‚‚‚„‡‰Š‡ˆ‰‰…‚„ˆ€€€€€€€€€€€€€€€€|xqljkmponnnpppnooppqqponopoqtxyzwtqqqqqqponoonmmnnoonnmnoqpnlmnpty|}}~~~~~~}}}|||{||~~{{{{|{{{zyxxwwxxwwwwwwwwwwxwxwvvxwxxz{|}~~~~~€~}€…‰ˆ…‚ƒ†ˆ†††ˆˆ‰ˆ‡‡€|z|}{{|‚€}yxy|„‚€€€€€€€‚‚ƒ‚‚€€€€€€}}}}~‚~~~~~}{{{{|}{{}ƒ……„„„ƒƒ€‚€€‚‚€}}}}~}~~}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒ„ƒ‚‚‚‚‚ƒ‚ƒƒƒ‚‚ƒƒƒ……‚zyy{zvvxwx~€€€€€€€€€€€€€€€€‰‰ˆ†…‚‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚‚ƒ‚ƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚ƒƒƒ„ƒƒ‚ƒƒƒ‚‚ƒƒƒ‚„…†‡††††††‡‡ˆ‡ˆˆ‰‰‰‰ˆˆˆˆˆˆ‰‰ŠŠŒŽŽŒŽŽŽŽŽŽŽŽŽŽ‹ˆ†………………………‡‡‡‡‡…‚€{yyzzxy{{zyxwwxx€€~~|zz|}xwy|}‚‚‚‚€€€€€‚ƒ„………„ƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„………………„„ƒ…„…„…„…„ƒ„…†‡‡„‚ƒ…ˆŠŠŠŠ‹ŠŠŠ‡„‚€‚‚‚ƒƒ………………………………………………„„„……†‡…‡ˆ‡„€‚‚ƒƒ‚‚‚‚‚‚‚„‡‰Šˆˆ‰Š‡ƒ„ˆ€€€€€€€€€€€€€€€€}ztommnponnopqonoonpqrqpoppqtxyywtrqqpompooooponllnoopppmoppnmmnosx|}~€~~~~}}|||{{{{|}}zzzzzzzzxxxxxxxxwwwwwwwwwxxxxxwwyyz{{}}}~~~~~~|~‚†‡„€…‡††‡ˆ‰‰‰ˆ‡€|z||{{|‚€}zyz|„ƒ‚ƒ‚‚‚€€€€€‚‚‚€‚‚€€€€€~}|||~€~~~~~~}}~~||{{||}|{}€„……„„„„ƒ‚€€‚€‚ƒ€}}}~~~~~}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒƒƒ‚‚‚‚ƒƒ‚ƒ„„ƒ‚ƒ„ƒ„„‚}ywwzxtsvuw}€€€€€€€€€€€€€€€€‡‡‡‡†„‚‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚ƒƒ„ƒƒƒƒƒƒƒ‚‚ƒƒƒ‚„…†‡†††††‡‡ˆˆˆˆ‰ŠŠŠŠ‰ˆ‰‰‰ŠŠ‹‹ŒŒŽŽŽŽŽŽŽŽŽŽŽŽŽŒ‹‰‡††……………………‡‡‡ˆˆ‡…ƒzwvz|{z|zyxwwxyy€€€}~€~|yy|€}xvy|}‚ƒƒƒ‚€€€€‚ƒ„………„„ƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„……†††……„ƒ„…„…„…„…„ƒ„…††…ƒ„†ˆŠ‰‰Š‹‰ŠŠˆƒ‚‚€‚‚‚‚€ƒ………………„„„…………………………„„„…†‡…‡ˆ‡„€‚‚ƒƒ‚‚‚‚‚„‡‰Š‰‰Š‹‰……ˆ€€€€€€€€€€€€€€€€}}{zwsponnoopponnnnnopppqporvyywrpoopqpnonnopponponmmmnonnooooooosx|}~€~~~}}|||{{{z{{|}xxyxyxyxvwxyyxwvwwwwwwwwxxyyyyxxzz||}~~~~~~~~~~|}ƒ‡†€}†‡††ˆˆ‰ˆ‡‡€|z|}{{|€‚€}{zz{„„ƒ……ƒ‚‚‚‚‚‚‚‚€€~€€~‚‚‚€~~|{{}~€~~~}}}}|~}|{{{||}{{}„……„„„ƒƒ€‚€€‚‚€}}~~~~}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒ‚ƒƒ€|xvvwvssvtuz€€€€€€€€€€€€€€€€„…‡ˆˆ†…ƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒ„„ƒƒƒ„ƒƒ‚‚ƒƒ„‚„…†‡††††‡‡ˆˆˆˆ‰‰Š‹‹Š‰‰ŠŠ‹‹ŒŒŽŽŽŽŽŽŒŒ‹Š‰ˆ‡†††††††††††‡‡‡ˆ‰ˆ‡…|ww|€}{{yyxwwxyz€~}€~{yy|€€}yvxzz{}}~€‚€€€€„…………„ƒƒƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„………†††…„ƒ…„…„…„…„„ƒƒ„…†…„…‡‰‰‰‰Š‹ŠŠŠ‡„‚€‚‚‚‚ƒ…††………„„„„„„„„„„„……„„„…†‡…‡ˆ‡„€‚‚ƒƒ‚‚€€‚„‡‰ŠŠŠ‹‹‡†ˆ€€€€€€€€€€€€€€€€|‚ƒ€yrmnnnopponmnnnmnoqronsxywuspmmnoomnmmnnonmnnmlllmmlmmnoonnnsx|}~~~}}}|||{{zzz{|}xxxxxxxxtuwxyxvuxxxxxxxxxxyyzyyx{|}~~~~~~~€~}~~}|~‚ˆ‰{~„‡…†‡ˆˆˆ‡†€|z|}{{|€‚~{z{{„„„††…ƒƒƒƒƒƒƒƒ‚€€‚‚€||}~~}||‚‚‚‚€~~~€~|{{|}~}}}||||~}||{||}}{{}„……„„„„ƒ‚€€‚€€‚‚€}}}~~}}}}}}}}~~~~}}}}{zz}ƒƒ‚ƒƒ‚‚ƒƒƒƒ‚‚‚‚ƒ„ƒ‚‚‚ƒƒƒ‚ƒƒ€{wuuttstwtsw€€€€€€€€€€€€€€€€‚ƒ†ˆ‰ˆ†…„„„„„„„„ƒƒƒƒƒ‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ„„ƒƒƒ„ƒƒ‚‚ƒƒ„‚„…†‡†††‡‡‡‡ˆˆˆ‰ŠŠ‹‹ŠŠ‰Š‹‹ŒŽŽŽŽŽŽŽŽŽŽŽŽŽŽŒŒŒŒŒŒŒŒŒŒ‰ˆ‡††††‡††††††††‡‡‡ˆ‰‰ˆ†€zx~‚zyyxwwxyz{€~}~€~{yy{~€€~yvwwvvxxy|€€€€€€€…†††…„ƒ‚ƒ‚‚‚‚ƒ„„„„„„„„ƒ„„„„„„……††‡†…„ƒ„…„…„…„……„ƒƒ„…†……‡‰‰‰‰ŠŒ‰ŠŠ‡„‚‚‚‚‚‚€ƒ…††………„„„„„„„„„„„……„„„…†‡…‡ˆ‡„€‚‚ƒƒ‚‚€€€€‚„‡‰Š‹Š‹Œˆ‡‰€€€€€€€€€€€€€€€€‚†…}uroooooooonmmmlllkhkpw||yvssrqppoonnnmllkkppoonmmlmmmnnnnnkqy~}|~~~}|{{{zzyyxwwwwwwwxxxxwwxxxxwwvvvwwwwwwxyyxy|~€~||~~}~~~~~~~~}}~~}}‚‡†}ƒ†…†‰ˆ„…ˆ‚|{}|{|{|€}zxx}‚…„„‡‰ˆ…„„ƒ‚€€€€€€{|||}}~~€€~~~}|||||€€€€€~}||||}||{{{{{z{}~‚„……„„ƒ‚€€‚€‚‚€~~€~}}~€~}|~}}}}|z|{{}€‚ƒ‚„ƒƒ‚‚‚ƒƒƒ„ƒ€ƒ„ƒƒƒƒƒƒƒƒ‚ƒ„|xvvwvvxxvx|€€€€€€€€€€€€€€€€‡…{{†Š…ƒƒƒƒƒƒƒƒ„„„„„„„„†„‚‚‚‚‚‚‚‚‚„„„„„„„„„„„„„„„„‚‚‚‚‚‚‚‚ƒƒ„„…†‡‡…††‡ˆˆ‰‰ŽŽŽŽŽŽŽŽŒ‹Œ‹ŠŠŠ‰…‚„ˆ‰‡………‡††††††…‡‡‡‡‡‡‡‡†€{z‚{xxyyyz{|~~~}~|yy{~}xuvxwxz}~‚„ƒƒ‚‚‚‚‚‚‚‚‚ˆ‡‡‡††……ƒ‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„ƒ„„„……„„„„„…†‡‡‡‡……………………„„………………ˆ‰‰‰‰‰‰Š‹Š‰…‚€€€€‚‚‚ƒ†……„„„„„„„„„……………………††††…‡ˆ‡„‚‚€€€‚‚ƒ…„‚€‚ƒƒˆŠ‰‰‰‰‰‰‰‰‰€€€€€€€€€€€€€€€€|~†ˆxtppppppppttssrrrrpquz|{vrssrrrqqqsssssrrrtttsrrqquuuttssspv|€€~~~|{zxxwwwwwwwwwwwwxxxxwwxxxxwwvwwwwwwxxyzzyz}€~||~~~~~~~~}}~~}~€…ˆ„}}‚††‡ˆ‡…†ˆ‚|{~|{|{~ƒ‚|zy|„ƒƒ…†…ƒƒ‚€~€€€€}~~~~~~~}|||}{{|~€€~}||||||{{{{{{{|}ƒ„………„ƒ‚€‚€‚‚€~~}~€€~}||}~~}}}}|{|{{}€ƒ‚‚ƒƒ‚‚‚‚ƒ„…„€‚„„ƒƒƒƒƒƒƒƒƒ„„}xwvxwwxwvw{€€€€€€€€€€€€€€€€ˆ‡}zƒ‰‡…………………………………………‡…ƒ€ƒ„„„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ……„…„…„…‚‚ƒ„…†‡‡…†‡ˆŠ‹ŒŽŽŽŽŽŽŽŽŽ‹ŠŒŠ‰‰‰ˆ„‚„‡‰‡†……††††…†……‡‡‡‡‡‡‡‡…ƒ}z}‚|zxvwz|{y~~~}~|yy{~}xuxyyz|}€‚ƒƒ‚‚‚‚‚‚‚‚‚‚……„„„„ƒƒ‚ƒ‚ƒ‚ƒ‚ƒƒ„„„„„„ƒƒƒ„„„„ƒƒƒ„…††‡††……………………„„„……†††ˆ‰‰‰‰‰‰Š‹Š‰†‚€€€€€‚‚‚ƒ†…„„ƒ„„„„„„„„……………………†††…‡ˆ‡„‚‚€‚€€‚‚ƒ„„€‚ƒƒˆŠ‰‰‰‰‰‰‰‰‰€€€€€€€€€€€€€€€€{x{…‹…zsrrrrrrrrsssrrrqqsux|~|wszzzz{{{{zzzzz{{{}||{{{{zyyyxwvvvw{€~€~}{ywvuwwxwxxxxwwwwxxxxwwxxxxwwwwwwxxxxy{|{{{~€~||~~~~~}|}~~}~‚ˆ‡|…‡‰ˆ‡‡‡†‚||~|{|{‚„ƒ€}{z{€ƒ‚ƒ„ƒ€~~}€€€€€€€€€€€€€€€€~~~~~€~}}}}zz{|~~€€~}||||}||{{{{{{|~‚ƒ…………„„ƒ‚‚€€‚‚‚€€~}~~€~|zz|~}}}}|{|{{}‚ƒ‚‚‚‚‚‚…†…‚ƒ„„‚‚‚‚ƒ‚‚‚ƒ„…‚~zxxyxxyxuwz€€€€€€€€€€€€€€€€~‡‰€{ˆ‰††††††††††††††††ˆ†„‚‚ƒ„†„„…„…„…„…„…„…„…………………………„„„„„„„„‚ƒ…†‡‡‡ˆ‰‹ŽŽŽŽŽŽŽŽŽŽŽŽŽŽŽŽŽŽŽŒ‹Š‰Š‰‡‡ˆ†ƒ€€„‡‰‡†…††††……………‡‡‡‡‡‡‡‡……€yzƒ€{xvvy{zx}~~~}}|yy{~}xvx{z|~~‚ƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚€ƒ‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„ƒƒ„„„„„„ƒƒƒ„…†…†………………………„„……††††ˆ‰‰‰‰‰‰ŠŠŠ‰…‚€€€€€€‚‚‚ƒ†…„ƒ„ƒ„„„„„„…………„„…………††…‡ˆ‡„‚‚€‚€€€‚‚„ƒ‚€‚ƒƒˆ‹‰ŠŠŠŠŠŠŠŠ€€€€€€€€€€€€€€€€{wz…Š„xpmmmmmmmmoooonnmmprw}}zyzz{{|}}{{zzyyyx{|{{{{{{{{{{{{{{z|~~|{}|{zywvuuyyxyxxwwwwwwxxxxwxxyxxwwwwwxxyyz{|}}|}€ƒ€~||}~€€€~}|}~~}|~~‚‡ˆƒ~|ƒˆŠˆ‡ˆˆ†‚||~||||~ƒƒ|zzz‚‚„†…€€~~€€€€€€€€€€€€€€€€~~~~~~~€~~~~}~}}}}}}€~}||||||{{{{{|{|}€„…†………„ƒƒ‚‚‚€‚‚€}~€~~~~~~~~~}}~~}||{{}€ƒ‚‚‚‚ƒ„†…ƒ‚ƒƒ‚‚‚‚‚‚‚‚ƒ……ƒ~{yyywxyxvx|€€€€€€€€€€€€€€€€€†‡„ˆ‡††††††††…†…†…†…††…ƒ‚„†ƒƒ‚ƒ‚ƒ‚ƒƒƒƒ„ƒ„ƒƒƒƒƒƒƒƒƒƒ€€€€‚ƒ…†‡ˆ‹‹Œ‘‘ŽŽŽŽŽŽŽŽŽŽŽŽŽŒŒŒ‹Š‰ˆˆ‰ˆ††‡…‚€ƒ‡ˆ‡†††…†…………„…‡‡‡‡‡‡‡‡†…{y~‚ƒ{zyxvwxy}~~~}}€|yy{~}yvxzzz}€€€‚‚‚‚‚€€€€€€€€‚ƒ‚ƒ‚ƒ‚ƒƒ„„„„„„ƒ„………………„‚ƒ„………„„……………………„„……†‡‡‡ˆ‰‰‰‰‰‰ŠŠŠˆ†‚€€€€€‚‚‚ƒ……„„ƒƒƒ„„„„…„………„„„…………†…‡ˆ‡„‚‚‚€€€‚ƒƒ€‚‚ƒˆŠ‰ŠŠŠŠŠŠŠŠ€€€€€€€€€€€€€€€€}y{ƒ†}rknnnnnnnnpooonnnnmpv|‚ƒ~yyzz{||}~~}|{yyx{{{{{{||{{|}~~xz{{xwy{xwwwwvvvwwwvwvvvwwwwxxxxxxyyyxxwxxxyzz{{|}~~}~„€~||~~€€€€~}||}~}||€„†‡‡{†‰ˆ‡‰‰†ƒ||~}|}|}‚~zyxz‚‚ƒ†ˆˆ€€€€€€€€€€€€€€€€€€€€€€~~~~~~~€€~~~~~~~~€~}||||}||{{{{{{|}ƒ……………„„ƒ‚‚‚€€‚‚‚€~~€~}€€€€}|~}}~~}||{{}‚ƒ‚€‚ƒƒ……‚‚‚‚‚‚‚‚ƒ„…ƒ~{zzwvwyyxz~€€€€€€€€€€€€€€€€…„‚„ˆˆ„……………………„„„„„„„„„ƒƒ…ƒƒƒƒƒƒƒƒ€€€€€€€€€€€ƒ„†ˆ‰ŠŽŽŽŽŽŽŽŽŽŽŽŒ‹‹Š‰ˆˆ‡‡‰‡†††…‚‚†ˆˆ‡†‡†………………„ˆˆˆˆˆˆˆˆ†‚~|zz…}|zwvvxy}}~}|}~€|yy{~~yvwyxy{€‚‚€€€€ƒ‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„ƒ„„…………„„ƒƒ„„…„„ƒ……………………„„……‡‡ˆˆˆ‰‰‰‰‰‰Š‰‰ˆ…‚€€€€‚‚‚ƒ…„„ƒƒƒƒ„„„„„…………„„„„……………‡ˆ‡„‚‚‚‚€€‚ƒ‚€‚ƒƒˆ‹‰ŠŠŠŠŠŠŠŠ€€€€€€€€€€€€€€€€~{|€~ulilllllllloonnmmmmnqv|~zxxxyyzzzzzyxxwvvuuuvvwwwstttuuvvsvyywvvwvvvvvwwwuvvwwxxxwwwwxxxxxxxyyyxxxxyzz{{||~~}~„€~||~~€€€€}|{|}~}|}ƒ†…†ˆ„}~‚†ˆˆ‰‰ˆƒ}|}|}||€}zxw{€ƒƒƒ†ˆ‡€€€€€€€~~€€€€€€€€~~~~~€~~~~~€€‚€~}||||||{{{{{{z{|~€‚„………„„ƒƒƒ‚‚€‚‚€~~}~€€€~~~~}}~~}|{{}€ƒ‚‚‚ƒ„ƒ……‚€€‚‚‚‚‚‚‚‚ƒ„‚~{zzvuvyyy{€€€€€€€€€€€€€€€€€‡ƒ€‚ˆ‹ˆ‚„„„„„„„„ƒƒƒ„ƒ„ƒƒƒ€ƒ…††††††††……………………††††††††……………………‚ƒ„†ˆŠŒŒŽŽŽŽŽŽŽŽŽŽŽŽŽŒŒ‹‹‹Š‰ˆ‡‡‡‡‰‡†††…‚~‚†ˆˆ‡‡ˆ††…†…………ˆˆˆˆˆˆˆˆ…~{|zx{ƒ‚~xwwyxv|}}}||~€|yy{~}yvxzz{}‚‚€€‚ƒ‚‚‚‚ƒ‚ƒ‚ƒ‚ƒƒ„„„„„„ƒƒƒƒ„„ƒƒƒƒ„„…„„„ƒ……………………„„…†‡ˆˆ‰ˆ‰‰‰‰‰‰Š‰‰ˆ…‚‚€€€‚‚‚ƒ…„ƒƒ‚ƒƒƒ„„„…„………„„„„„…………‡ˆ‡„‚‚‚‚€€€‚€‚€‚‚ƒˆŠ‰ŠŠŠŠŠŠŠŠ€€€€€€€€€€€€€€€€}{||vmiknnnnnnnnmlllkkkjnpu{{wqppppooolmmnnoppmnnoppqqrrqonmlknsy{zwvvwwwvvvvvvvwwxxyywwwwxxxxxxyyyyxxxyyz{||}|}~~}~ƒ€~||~~€€€}|{|}}|{„‡††‡…|~ƒˆ‰ˆˆŠƒ}}}|~|}€‚~{yy}„‚ƒƒ‚€€€€€€€€€€€~~~~€~~}~~€€€~}||||}||{{{{{yz|}€ƒ„…„„„ƒƒƒ‚‚€‚‚‚€€~}~~~~}|}~€~}~~}|{{}€‚ƒ‚‚ƒ„„ƒ…†ƒ‚€‚‚‚‚‚‚‚‚€‚‚}zyywvwyyy{€€€€€€€€€€€€€€€€†ƒ‚„ˆ‰‡„„„„„„„„„„„…„…„…„ƒ‚€€‚…†††††††††‰‰‰‰‰‰‰‰ŠŠŠŠŠŠŠŠ‡‡‡‡‡‡‡‡ƒ„†ˆŠŒŽŽŽŽŽŽŽŽŽŽŽŽŽŽŒŒ‹‹Š‹Šˆ‡††††‰ˆ‡‡‡†‚€~‚†ˆˆ‡‡ˆ†††††………ˆˆˆˆˆˆˆˆ|yzyxy}„zwxyxw|}}}||~€|yy{~|xvy}~ƒƒ‚€€€‚ƒ€‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚ƒ„„„„„„ƒ‚‚ƒƒƒƒ‚‚„„………„„ƒ……………………„„…†‡ˆ‰‰ˆ‰‰‰‰‰‰Š‰‰ˆ…‚‚€€€‚‚‚ƒ…„ƒƒ‚‚ƒƒ„„„„…………ƒƒ„„„„………‡ˆ‡„‚‚‚‚‚‚€€‚€€‚€‚ƒƒˆ‹‰‰‰‰‰‰‰‰‰€€€€€€€€€€€€€€€€{z{zqhhnllllllllnmmmlllkkmsy~{xppponmmllmnpsuvwxxyzz{||{zxuromklry}|ywvyxxwvuttvvvvvvvvwwwwxxxxxxyyyyxxxyyz{|}}|}~~}~€ƒ€~||~~~€}|{|}}|{„‡‡††……z|ˆŠˆˆ‹ƒ}}}}~}„ƒ€|{z‚„}}}}~~~~~€€€€€€€€€~~€€~}}}~€€~~€~}|||||||{{{{{yy{}‚ƒ„„„ƒƒƒ‚‚‚€‚‚€}~€~~zz{{|~€~}~~|{{}€ƒƒ‚‚‚‚‚ƒ„…„†‡„‚‚‚‚‚‚‚‚‚‚‚‚€|zyyywxzyxz~€€€€€€€€€€€€€€€€„ƒ„‡ˆ†††……………………††††††††…„‚‚„†ˆ„„„„„„„„„„„„„„„„…………………………………………„…‡‰ŒŽ‘‹ŒŒŽŽŽŽŽŽŽŽŽŽŒŒ‹ŠŠŠ‹Šˆ‡††††Šˆ‡‡‡†ƒ€~†ˆˆ‡ˆˆ†††††††…ˆˆˆˆˆˆˆˆ~{xxxyyyƒ}ywwxy|}}|||~|yy{~{ww{€ƒ†‰ƒ‚€€‚ƒ€€€€€€€€€€€‚‚‚‚ƒ‚ƒ‚ƒ‚ƒƒ„„„„„„ƒƒƒƒ„„ƒƒƒ„„…………„„……………………„„…†‡ˆ‰Šˆ‰‰‰‰‰‰Šˆˆ‡…‚‚€€€‚‚‚ƒ„„ƒƒ‚‚ƒƒ„„„„…………ƒƒ„„„„………‡ˆ‡„‚‚‚‚ƒ‚€€‚€€‚ƒƒˆŠ‰‰‰‰‰‰‰‰‰€€€€€€€€€€€€€€€€}xtqonkhnnnmmkkjlmonmllmmqsuz}upppoonnmlnrv{‚ƒ€~|}€€~|~€|upmhmuyyyxyxxyyxxwwvvwwwwwwxxxwwxxxxxxyyyyyzzzz{{{{~~~€€€~~~~{|~~}€}}}zwwy€‡Š†…ˆ‡‚}z|…‹ˆ€zxz|}~}|{‚‚€~}|||†…zyz{~‚€~€‚~~~~~~~~~}~€€€€~~~~~~~}}}}}~}~~~~}|{{{||||{{{{{{yz|||}€‚……„„ƒ‚‚‚‚‚‚‚~~~~~~~~~~~~}~|{{}|{}€‚€‚‚€€‚ƒ……†…ƒ‚ƒƒƒƒƒ‚‚ƒ„„ƒƒ}{yyvtvwwz~€€€€€€€€€€€€€€€€†„ˆŒ‹……‹‡‡††††‡‡†††……„„„†…ƒ‚ƒ„ƒ„†††„€‚ƒƒƒƒ‚€‡†„‚‚‚‚‚}ˆˆƒ„‚„ˆŒŽŒŒŽŽŽŽŽŽŽŽŽŽŽŽŽŽŽŒŒŒ‹‹‹ŠŠ††††††††††††…„ƒ‚‚„…‡‡‡‡††………††‡†‡‡†‰ŽŽ…|x|}yvxƒˆ‡}uw‚‹‡„}||~xyz|}€€}wtw€Š‘‡ƒ€€‚ƒ‚€€€€€€€€‚ƒƒ€‚ƒ‡†„‚‚ƒ„…„„„„„„„„ƒƒƒ„„„„„†…………„„„†††……„„„„……†‡ˆˆ‰ˆˆ‰‰ŠŠ‰‰‰ˆ‡…ƒ‚€€€€€‚‚‚‚ƒ…„„„„„„„„…„…„…„…„ƒƒ„„……††…†ˆ†„€‚‚€€€‚€€‚‚‚‚‚„†‰‹ŒŒŒ‹‹ŠŠŠ€€€€€€€€€€€€€€€€|xronnmkhjklkkkkijlmlkjjloqsx~ynnmmnoppmpsx}€ƒ„ƒ}|~}vplhmtyyyxyxxxxxxwwvwwwwwwxxxxwwxxxxxxxyyyyzzz{{{||~~~€€€~}~~|}~€~~}{{yvvx|„ˆ†…ˆ‡‚{wx}‚€|yyz|}~}|{‚€~}||{†…~yy{{~~€€€€€€€€€~€€€€~~~~~~~}}}~€}~~~~~~||{{||||{{z{{{y{|||}€‚……„„ƒ‚‚€€€€~~~~€€€€|~€€~}}~}|{}€‚€€‚„„………ƒ‚‚ƒƒƒƒ‚‚‚ƒ„ƒ‚€}{xwyvuvwwz~€€€€€€€€€€€€€€€€ƒ„‡Šˆ……‰‡‡††††‡‡ˆ‡†…„„……†…ƒ‚‚ƒ„†……„ƒƒƒƒƒƒƒƒ‚€€ƒ‚€€‚‚‚|€‡ˆ„…ƒ‚„ˆŒŒŒŒŽŽŽŽŽŽŽŽŽŽŽŽŒŒŒ‹‹Š‰‰††††††††††††…„ƒ‚‚ƒ…†‡‡‡††………………†ˆ‰‰‹Žˆyzxuw}Š‰‚ƒŠŠ‡ƒ€}}}~yzz{|}~~|vruˆ‹‹†ƒ€€‚ƒ‚€€€€€€€€€€‚ƒƒ€ƒ‡†„ƒ‚ƒ„…„„„„„„„„ƒƒƒ„„„„„…………„„„„†††……„„„„……†‡ˆˆ‰ˆˆ‰‰‰Š‰‰ˆˆ†…ƒ‚€€€€€‚‚‚‚€ƒ…„„„„„„„„„…„…„…„…ƒƒ„„……††…‡‡‡ƒ€‚€€€‚‚€€€‚„…ˆŠŒŒŒŒ‹‹‹ŠŠ€€€€€€€€€€€€€€€€|wpllmmlhijjiikmiikmnmkjklnov}€~sqolllmnoqv{‚„„ƒ€~~€€~||~~xpkhmuyzxyywwxxxwwvwwwwwwxxxxxwwxxxwwwxyzzzz{{{{|||~~~€€€~~~~}}~€€~{yyxuuw{‚‡…„…ƒ~yutwyxxzyz||}}||€€~~||{{€„‚}yz}|~€€‚€€~~~~€€~~~~~~~}~~€}}~~~~~}}{{{||||{{{{{{z{}||}€‚…„„ƒƒ‚€€€€€€€€~~~~€€€€€~|}}}||}‚‚€€ƒ…„……„ƒ‚‚ƒ‚ƒƒ‚‚‚ƒ„„ƒƒ€~{xwzwvwxxz~€€€€€€€€€€€€€€€€€…ˆ‡††‡‡‡‡‡†††‡‡‰ˆ†…„…†‡†…„‚‚‚ƒƒˆ†„‚‚ƒ…†„ƒƒ‚‚‚€€ƒ„‚‚|†ˆ…†„ƒ„‰ŒŒŒŒŽŽŽŽŽŒŒŒŒŽŒ‹‹Š‰ˆˆ††††††††††††…„ƒƒƒ…†‡‡‡‡†……„„„„†‰ŒŒŽŽ‰zxvv~‡Ž‘’ŒŽŽŒŠ†‚€€€zvy€†ˆ‡…ƒ€‚‚‚€‚ƒ‚€ƒ‡†…ƒƒƒ„„„„„„„„„„ƒƒ„„„„„„…„…„„„„„††…………„„„……†‡‡ˆ‰ˆˆ‰‰Š‰‰‰ˆ‡†„ƒ€€€€‚€‚€€ƒ…„„„„„„„„…„…„…„…„ƒƒ„„……††…†ˆ†„€‚€€‚ƒ€€€€€€€€‚ƒ…‡Š‹Œ‹‹‹‹ŠŠŠ€€€€€€€€€€€€€€€€}wojijiiklkiggjmiiikmnljijjmrywuqmkkklptx~„„„~}{}€~}~€‚€zqjhntyyyxywwwxwwwvwwwxwxxxxxxwwxxxvwwxyz{{{{{|||}}~~~€€€~}~~}~€€€€}yvwvutv|‚†„‚€|wxutuuuw{z{{||}|}~~}||{{z{}~}}~}~€€€€€€€€€€€‚€€}~~~~~~~~€~~~~~~}}}~~~~~~||{{||||{{z{{|{|}}|}€‚„„„ƒ‚‚‚‚‚ƒƒƒ‚‚~~~~~~~~~~~~}}}}}||~€ƒ‚€€‚„…„……„ƒ‚‚ƒ‚ƒƒƒ‚‚‚ƒ‡†…„~{z{xxyzy{~€€€€€€€€€€€€€€€€‡Š‡…‰Š‡ˆˆ‡‡†‡‡‡ˆˆ‡†††‡‡……„„ƒƒƒƒ††„ƒƒ„…†…„‚‚‡†„ƒ‚ƒƒ„‚ƒ€|~„ˆ‡‡…ƒ…‰ŒŒŒŒŒ‹‹‹‹ŒŒ‹Šˆ‡††††††††††…††††…„ƒ€ƒ…†‡††‡††……„……†‹‘‘’‘Žˆ{y{ˆ‘‘‘“ŠŒŠˆ…ƒƒƒƒ‡‡‡††………†…ƒƒ…†††„ƒ‚‚‚ƒƒƒƒƒƒƒƒ€‚ƒƒ€ƒ††……„„„„„„„…„…„„„„„„„…„…„„„„„„„„……………………„……†‡ˆˆ‰ˆ‰‰Š‰‰‰‰‡†…„ƒ‚€€€€€‚€€‚„„„„„„„„„„…„…„…„…„„„…„…………‡‡‡ƒ€‚€€ƒƒ€‚€€€€ƒ„†ˆŠŠŠŠŠŠŠ‰Š€€€€€€€€€€€€€€€€~xqkihgejkjihgikigggjjjiihhjnszzxtpnlllqtzƒ„„ƒ|{|„‡ˆ‡}~€ƒ‚|qhhmuyzxyyvwwwxwwwwwwwxxxxxxxwwxxxwxxyz{||||||}}}}~~~€€€~~~~~~€€€€}xtuvttv{‚~|xuwuuvutvz{{{{||}}}||||{{{ywvy~‚‚€€€~~}~}~}~}~~€€}~~~~~~~~~~}~~~~~}~~~||}}~~~~~}}{{{|||{{z{{|||}~}}}€‚„„ƒƒ‚‚ƒ„„ƒ‚~~~~~~~~~~~~~~}€€~}}~ƒƒ‚€‚„…„„…„‚‚‚‚ƒƒ‚‚‚ƒ†††„‚}{}{z|}{|€€€€€€€€€€€€€€€€€ˆŒˆ‡‹‰‰‰ˆ‡‡‡‡‡ˆˆ‰‰‰ˆ‡‡†††††…„„ƒ„†‡‡†…„†„‚€€€‚ƒ‹‰†ƒ~~‚„{|ƒˆ‰ˆ†……ŠŒŒŒŒŒŒŒŒŒŒŒŒŒŒ‹‹‹‹ŒŒ‹‹Š‰‡†………………………………†††…„„€ƒ„††††‡††††††‡‡’’““’Œ…ƒˆŽ’““’’•“Ž‰ˆˆ‡‡‡ˆˆŠŠŠ‰‰‰ˆˆŒŽˆ„„…‚‚‚ƒ„„„„„„„„„ƒ„ƒ‚‚„†††††…„„…„…„…„……„„„„…„……„„„„…„…„……………………„……†‡‡ˆ‰‰‰‰‰Š‰‰ˆ†……ƒƒ€€€€‚€€€‚„ƒƒƒƒƒƒƒƒ…„…„…„…„„„„„……………†ˆ†„€‚€‚€ƒƒ€‚‚‚‚€€ƒ…‡ˆˆˆˆˆˆˆ‰ˆ€€€€€€€€€€€€€€€€}ytqomjggghiiihgjigghjjkhggjkmry|xrnjhptz€ƒ„‚€~}~ƒ‰Ž~~€„„~qghntyyyxyvwwxxxwwwwwxxxxxxxxwwxxxyyz{{||}||}}}~~~~~~€€€~}~~~~€€€}wstuutvwz|}{yxwvttvustw|{z{{|}~{{{{z{zzxusv|‚„ƒƒ‚‚ƒƒ~{{{{{{{{}|{}~€}}~}~}~}}~}}}}~~~~~}}}}}~|||}}~~~~~||{{||{{{{{{||}~~~}}‚„ƒƒ‚‚€€‚ƒƒ‚€~~~~€€€€~€~~}}ƒƒ‚€€‚ƒ…ƒ„„„‚‚‚ƒƒ‚‚‚ƒƒƒƒ}|}}€€~~€€€€€€€€€€€€€€€€€€†‰‰ˆŠ‹‹‹Š‰‰ˆˆˆˆ‡ˆŠ‹Š‰ˆ‡‡ˆˆ‰ˆ‡†…‚„†ˆˆ‡…„‡…‚€‚„‰‡ƒ{yww‚„‚{zˆ‹Š‡…‡ŠŒŒŒŒŒŒŒŒŒŒŒ‹‹‹‹‹‹‹‹‹‹‹‹‰‰ˆ‡†……„……………………„……††…„„€‚„…†††‡‡†††‡‡ˆˆŽ“”’“’‘‹††ŠŽŽ“”‘‘’’‡ˆˆ‰‰ŠŠ‹ˆˆˆ‰‰‰‰‰‘“‘‹…‚‚‚‚‚€ƒ………………………ƒ„……ƒƒƒ…†††‡††„„……………………„„„…„………„…„……………„„…………††„……†‡ˆˆ‰‰‰‰Š‰‰ˆˆ……„ƒ‚‚€€€€‚€€„ƒƒƒƒƒƒƒƒ„…„…„…„…„„„…„…„……‡‡‡ƒ€‚€€€ƒ‚€‚‚‚‚€€€‚„†‡………††‡‡‡€€€€€€€€€€€€€€€€|zyyzwsokiggiihfjjjiiijkighjifjq}zupmosz€ƒƒ€~}}~‚ˆŽ~~€„†rfhmuyzxxywwxxxxxxwwxxxxxxxxxwwxxx{{{||}}}}}}~~~~~~~€€€~~~~~~~~€€}wrruuuvvwyyxwwxxustuttw|{zzz|~zzzzzzzzwvvvy}‚„…„ƒ„……‚{{{{{{{{zyy{~~}}}}}}}}|||||}~~~~~~}|||}}||||}}~~~}}{{{||{{{z{{||~~~}}€‚ƒƒƒ‚€€€ƒ„„‚€~~~~€€€€€€€~{~}}‚ƒƒ‚€‚‚„ƒ„„ƒ‚‚€‚ƒƒ‚‚‚‚‚‚€‚€€ƒ‚€€€€€€€€€€€€€€€€€~€„‡‡…‡ŠŒ‹‹Š‰‰‰‰ˆ‰Š‹‹Š‰‰‰Š‹‹‹‰ˆ†„……†‡‡‡‡‡…~~‚„†„€|ywvv‚…‚{y€ˆŒŠˆ†‡ŠŒŒŒŒŒŒ‹Œ‹‹‹‹‹‹ŠŠ‹ŒŒ‹Šˆ‡‡††……………………………„……††……„€‚ƒ…………‡‡††††††‰”“‘’“’‘Œ‹ŒŽŽŽ‘’‘‘ˆˆ‰ŠŠŠŠŠ‡ˆˆ‰Š‹ŒŒ‘’Š„€‚ƒ‚€€ƒ†††††††††„…‡†„„…†…†‡‡‡†„ƒ……………………„„„„……………………††††„„„……†††„……†‡ˆˆ‰‰‰Š‰‰‰ˆˆ„„„ƒ‚€€€€‚€€ƒƒƒƒƒƒƒƒƒ…„…„…„…„………„…„„„…†ˆ†„€‚€€‚‚€€‚‚€€€€‚ƒ…†ƒƒƒ„„………€€€€€€€€€€€€€€€€{{|€€{wtngegiiggijjhghiighjgcelpty~€~}nsz€ƒ‚}{z{~„‡ˆ‡~~€…‡€rfhmuyyyxywwxxyyxxwxxxxxxyxxxwwxxx||}}}}}}}}}~~~~~~€€€~~~~~~~~€€~wqruuuvxxxwustv{vstuuvy}|zzz|~yyyyzzzzwyyxvx…‡…ƒ„††ƒ{|{|{|{|xwwz}~}}}}}}}}|{{{{|}~~~~~}|{|||||||}}}}~~||{{||{{{{{{||~}}‚ƒƒƒ‚€€‚…††…‚~~~~~~~~~~~~}}~€‚‚€}~}‚„ƒ‚‚‚€‚ƒƒ„„ƒ‚‚€ƒƒ‚‚ƒ„„…„„ƒƒ‚„„€‚€€€€€€€€€€€€€€€€|{†…‚‰Œ‹ŠŠŠŠŠŠŠŠŠŠŠŠŠŠ‹ŒŒ‹‰‡‡†„„„†‰Šˆ…~}‚……ƒ€~{zzz‚…‚{y€ˆ‹ˆ†‡ŠŒŒŒŒŒ‹‹‹‹‹‹‹ŠŠŠŠ‹ŒŒ‹Š‡‡†††……………………………„„…†††…„€‚ƒ„………‡‡†……………Š”’‘““ŽŽŽŒŒŽ’’‘–˜”‰ŠŠ‹Š‰ˆˆ‰Š‹ŒŽ‘ŽŽ’”‘ˆ‚ƒ‚€€ƒ‡‡‡‡‡‡‡‡‡…†‡‡……†‡…†‡ˆˆ†„ƒ……………………„„„…………………†††‡‡‡„„„……†††„……†‡ˆˆ‰‰‰ŠŠ‰‰ˆˆ„„ƒƒ‚€€€‚€€€~ƒƒƒƒƒƒƒƒƒ„…„…„…„……………„„„„…‡‡†„€‚‚€€€‚€€€€€€€€€€ƒ„…‚‚ƒƒ„„„€€€€€€€€€€€€€€€€}{zz{|{zyxtnhegiggggggggcddefgghbgov}€€z|}~‚†„~~|{}€ƒ„„€~}‚ƒ€}}~~|zxxzxxxxxxxxxxxxxxxxwwxxyzzz{{{{||}~~~~€€€€€~}~~~~~|yvuuuuuwxxxxwvuuuuttuvvzzzyz{|}{{zyxwvvw}xvz€‚„†‡†‚~{{zyz{{{z~xv|€|z~~}}}~€|{}~|{||||||||||||{|}€€€€~~}}~~}}||{{{|}|z{}€€~~€‚…„ƒ‚€~}}„…„ƒ‚€}}~„†€€€~~}‚‚ƒƒƒ‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒ„ƒƒ‚ƒ„…†………„‚€~}||||}‚€€€€€€€€€€€€€€€€~~~€‚…‡„†‰ŒŒ‹ŠŠŠŠŠŠŠŠ‰‰ˆ‡†…………„„„~ˆ‚€€€€~}€~{|€ƒ‚‚ƒ‡‹ŽŒŒŒŒŒ‹‹‹‹‹‹‹‹‹‹‹‹Œ‹Š‰ˆˆ‡††††………………†……„„„„„„…………€€‚†‡†…‡‡††††‡‡ŠŒ‘’‘’’ŽŽ‘‘Ž’’‹‰‰‰ŠŠ‡……†‰‹‘Œ‡‰’Žˆƒ‚‚‚‚ƒ„…†‡‰‰‡††‡‡ˆˆ†„ƒ…‡ˆˆ‡††…„„‡††††………††††††††……††††……„„„„„„„„„…†ˆ‰‰‰‰ˆˆ‰ŠŒ‹ˆ…„„ƒ‚‚€€‚‚€~}}~~~~~~‚‚‚ƒƒ„„„……………………„„„„„„„„ƒ„…„‚‚ƒ‚‚€€€€€‚‚‚€€‚„…†‹‡„„‡‰ˆ‡€€€€€€€€€€€€€€€€~~~~|wojptwwsnkijjjjjjjjmnnoppqqrtwyzywv{}}|€…„€zyx{~‚ƒƒ~~€ƒ„‚~}zwwyxxxxxxxxxxxxxxxxwwxxyzzz{{{{|}}~~~~€€€€~}~~~~~~|yvuuvuuwwxxwwvuvvuttuuv{{{{|~€~}|zywvvvz}|wux}ƒ…†…‚~{xwvvxxwv{vu|~}~}}}}~€€~zz|}|{||}|}|}|}}}||}~€€€€~~}}~~}}||{{{|||z{}€€~~€‚‚‚€~}}~€ƒ†‡ˆƒ‚€€‚~~}}}|}~€‚ƒ„‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒ„„ƒƒƒ„……ƒ„„„ƒ‚€}zxy}…€€€€€€€€€€€€€€€€€€€ƒ‡ŠŒ††‡ˆ‰Š‹Œ‹‹‹‹‹‹‹‹‰‰‰‰‰‰‰‰†††††‡‡‡…„…†ƒ€…††…ƒ~~~{{~€€‚†‹ŽŒŒŒŒŒŒ‹‹‹‹‹‹‹‹‹‹‹ŒŒ‹Š‰ˆˆ††††††……………†……„„„„„„…………€€€‚…‡†…‡††……††‡ŠŒ’‘‘’’ŽŒŽŽ‘’‘Ž‹Š‰‰Š‰†„ƒ…‡Š‘‘ŠŒ’Šƒƒ‚‚‚„„…‡ˆŠŠˆˆ‡ˆˆ‰‰†ƒ‚„†ˆˆ‡‡……„„‡††††………††††††††……††††……„„„„„„„„„…†ˆ‰‰‰‰‰ˆ‰ŠŒ‹ˆ…‚‚‚‚ƒ‚‚€~~}||||||||€‚‚ƒƒ„„„„„„„„………………………††…‚€€‚‚€€€€€‚‚‚€‚ƒ…†„…‡ˆˆ†……€€€€€€€€€€€€€€€€{|}~~xqkpv}€}yusyyyyyyyyyzzz{{||~{yvtr~~|z}ƒ…‚xwvx}€‚‚~~~„…„‚€€~zwvwxxxxxxxxwwwwxwwwwxxyyzz{|{|{|}~~~~~~€€~}~~~~~|zwuuvvuwwwxwwvuvvuuuuvv}}}}~‚€~|zywwvxyxvuwy‚„„ƒ€}zvuttuvutuqrz€€€~}}|}~€}zy{}{z{}}}}}}}}~~}~~€€€€€~}}~~}}||{{{{|{{{}€€~€‚€€€~~}€~€ƒ†ˆ…………ƒ‚€€€€€€€€€~~}}|{{}~€ƒƒ‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒƒ„……………„‚‚ƒ„„ƒ‚€~{y{}‚„€€€€€€€€€€€€€€€€€€€„‡‹‡…„ƒ„†ŠŒˆˆˆˆˆˆˆˆ„„„„„„„„ƒƒ„…†‡ˆˆ„„†ˆ†€~€‰‹‹‡‚}~‚‚€}zz|~}}€„ŠŽŒŒŒŒŒŒ‹‹‹Œ‹Œ‹Œ‹ŒŒ‹Š‰ˆˆ‡†††††………………………„„„„„„…………€€…‡‡†††…………††‰Œ‘’‘’’ŽŒ‹‹ŽŽ’’‘‘’‹ˆ‡††‡†„‚ƒ†‰ŒŽ‘Ž‘„ƒƒ‚ƒ„…†ˆ‰‹‹Š‰‰Š‹Œ‹ˆ„‚ƒ…ˆˆˆ††„„„‡††††……………………………„………†……„„„„„„„„„„…†ˆ‰‰‰‰‰‰‰Š‹Š‡„~~€ƒ„……‚€€~~||||||||~€€ƒƒƒƒƒƒƒƒ…„…„…„……†‡‡„€‚‚€€€€€‚‚‚€€€ƒ„…€„ˆˆ…‚ƒ…€€€€€€€€€€€€€€€€}||}~~|z|~~€ƒƒƒƒƒƒƒƒ€€€€€€€€‚~|zzzz€€}yz……{zxz|€€}~‚„………‚‚~zwvvxxwxwxxxwwwwwwwwxxxyzz{{|||||~~~~~~~€€~}~~~~~}zwuvvvvvwwxxwvvuuuuvwxy~~}~€€~|{zyxxwxxxxx€ƒƒ‚|zvuttuutsnlnw}}}|||}€}zy|}|{|~~}~}~~~€€€€€€~~}}~~}}||{{z{||z{~€‚‚~€‚€€€€€€„ƒ€€„…‡‡†„‚‚‚‚‚‚‚‚€€}~~€€‚‚ƒ‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒƒ„…†††„„€‚„„…„„€‚‚‚€€€€€€€€€€€€€€€€~~~€ƒ†ˆ„ƒ‚‚‚„†‡ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„……††ƒƒ†Šˆ}}‡‹Œˆ€}~„‚|yyz{z{~ƒ‰ŒŒŒŒŒŒ‹Œ‹Œ‹Œ‹ŒŒ‹‹Š‰ˆ‡†††††…………………………„„„„„„…………‚€„‡‡††………„……†‰Œ‘‘‘‘’Ž‹‹ŒŽŽ‘““‘‘Œ‡ƒ‚‚ƒ……ƒƒ…ˆŠ‹Œ„„ƒƒƒ„…†ˆŠŒŒ‹‹‹ŒŽŠ†„…†ˆˆ‡‡……„„‡††††……………………………„„…………„„„„„„„„„„„…†ˆ‰‰‰‰Š‰‰Š‹‰…‚}~‚„„…‚‚€€~~~}}|}}~~‚ƒ‚ƒ‚ƒ‚‚„……„€‚‚€€€€€‚‚‚€€€‚ƒ„ƒ„„‚€ƒ†€€€€€€€€€€€€€€€€ƒ€}|~€‚‚||‚~~}~~yy„†‚}|~}|~€‚„„…†‚ƒ‚zwvwxxxxxxxxxwxwxwxwxxyyz{{{}|}|}~€~~~€€~}~~~~~}zwvvvvvvwxxxxwwttuvxz{}€~||{||€€~~}}{zyz{|{z‚‚€~|zvuttttsrjhltz|}€}||{|}~~{{}~}~~~~~~~€€€€€€€€~}}~~}}||{{y{|{{{~€‚ƒƒ~€‚‚‚‚‚‚‚ƒ„…„„ƒ€‚„†ˆ‡†ƒ‚……………………„„„ƒƒ‚‚‚‚‚‚‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒ„„†††…„ƒ€ƒ„…………‚ƒ„………„„€€€€€€€€€€€€€€€€~}|}‚€‚ƒ„ƒ‚ƒƒƒƒƒƒƒƒ……„„ƒƒ‚‚ƒ‚…ŠŠƒ}|‚…ˆ…€}€ƒ„‚~{zzz{zz}‚‰ŒŒŒŒŒŒŒŒ‹‹‹‹‹‹‹‹‹‹‹‹Š‰ˆ‡†††††…………„„…………„„ƒ„„„„…………ƒ€€„†‡‡†……„………†‰‹‘‘‘Ž‹ŠŒŽŽ‘“’Žˆ‚€ƒ‡‡‡…‚ƒ…†ˆˆˆˆŒŽŒŽƒƒƒ‚ƒƒ……ˆŠŒŒ‹‹‹Œ‘‘‰‡‡‰ˆˆˆ††„„„‡††††……………………………ƒ„„„…„„ƒ„„„„„„„„„…†ˆ‰‰‰‰‹Š‰ŠŠˆ„€€‚‚‚€€€€~~}}}{|||}~~~€€€€€€€‚ƒƒ€‚ƒ‚‚€€€€€‚‚‚€€‚ƒ…‚€ƒƒ‚€€€€€€€€€€€€€€€€€~|}~~}}€~{xyz||||||||~~~}}|||~€€€‚zy~ƒ…‡„€~||~‚‚‚ƒ„‚‚{xxyxxxyxxxxxxxxxxxxxxyzz{{|}}}}}€€€~}~~~~~}{xvvwwvwxyyzyyxvvwxy|}~~|{z{{{€€€~}}}~~~}‚‚‚€~|{vussttsqkjnuyz{||{{{}~~{{~€€€€€€€€€€~~}}~~}}||{{yz{{{|~ƒ„ƒ‚€‚„„ƒƒ‚‚€‚…ˆ‰Šˆ‡ƒ„…††…„ƒ„„„„„„„„……„„ƒƒ‚‚„„ƒƒ‚€‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒ„……………„ƒ‚ƒ……†……‡†„„ƒ…†‡€€€€€€€€€€€€€€€€‚}}~€~€‚…„ƒ€ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ……„ƒ‚€€‚ƒ‰Š„~~ƒƒ€„…}{{|||{|~ƒˆŒŒ‹ŒŒŒ‹‹‹‹‹‹‹Š‹Š‹‹‹ŠŠ‰ˆ‡††……†…………„„„„………„„ƒ„„„„…………ƒ~€‚†‡ˆ††…………††ˆ‹Ž‘‘‹ŠŠ‹ŒŒŽŒŠŠ†ƒƒƒ†‰Š‰ˆ‚ƒ„…††……‰‰ŠŠ‰‰‰Š‚‚‚‚ƒ„…†ˆŠ‹ŠŠŠ‹Œ‰‡ˆ‰ˆˆ‡‡……„„‡††††……………………………ƒƒ„„„„ƒƒ„„„„„„„„„…†ˆ‰‰‰‰‹ŠŠŠ‰‡ƒ€€€€€€~~}|{{zz{{||}}~~~€‚‚ƒ…‚‚€€€€€‚‚‚€€€‚‚€€‚……~€€€€€€€€€€€€€€€€|{{|~~|z}€€}zxx€€€€€€€€~~}|||}ƒƒ‚€~~|z~‚„†ƒ€~}|‚‚€‚€|yz{yyyyyyyyyyyyyyyyxyyz{{||~}}}~€€€€€€€€€€€~}~~~~~}{xvvwwwxyzz{{zzyyyz{|}~|{{{{|~~€€~~ƒ‚‚€~}|xwvuvvtsppsy{z{|{{{{|}~}{z}~€€€€€€€€€~}}~~}}||{{yz{{{|„„„‚€~€…„„‚€€€ƒ…‡ˆŠŠ‡†„ƒ‚‚‚‚€€€€€€€€‚‚‚€€€‚‚‚€‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒ……„ƒƒƒ„„ƒƒ……†…„„†…„ƒƒƒ……€€€€€€€€€€€€€€€€‚~~‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚†‰†‚€‚‚ƒ„„}{}~~~€„‰‹ŒŠ‹‹‹‹‹‹ŠŠŠŠŠŠŠŠŠŠŠ‰‰ˆ‡†…………………„„„„„……„„ƒƒ„„„„…………„‚†ˆˆ‡††……††‡ˆ‹Ž‘‘ŽŠ‡‡ˆˆˆŠ‹‰‡‡ˆ†ƒ…„„…‡ˆ†„€ƒ„………………………………€‚ƒƒ…†‰‰‰ˆ‰Š‹Œ‹‰†…†ˆˆˆ‡††„„„‡††††………„„„„„„„„‚ƒƒ„„ƒƒ‚„„„„„„„„„…†ˆ‰‰‰‰Œ‹ŠŠ‰†‚~€€€€€€€€‚‚‚~}|{zzy|||}}~~~‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€ƒ‚‚€€€€€‚‚‚‚€‚ƒ„ƒ€€€€€€€€€€€€€€€€€}{{}~}|}€‚‚€}}~‚€~~}z}„…ƒ€~}‚}{~‚ƒ„~€}|‚‚~€|z{|yyyyyyyyyyyyyyyyyyyz{{||~~~~~€€€€€€€€€€€~}~~~~~~{xvwwwwyyz{||{{}||{{|}}yzz{}‚‚‚€~}}~€€~€ƒƒ‚€~}{zyxyxwvutw|}{||{{z{|}~|zy|~~}€€€€€€€€€€~€€€~~}}~~}}||{{xz{{{|‚„…„‚€„„ƒ€~}}€€‚…†‹‰…€||||||||€~~}}~~€€‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒƒ†…„ƒ‚ƒƒ„„„…†……„ƒ€‚„……ƒ€€€€€€€€€€€€€€€€~}}}‚ƒ…ƒ‚„€€€€€€€€€€€€€€€€€‚‚ƒƒ‚~…‰‡„ƒ‚‚ƒ„„ƒ„€||~…‰‹‹Š‹‹‹‹ŠŠŠŠ‰‰‰‰‰‰‰‰Š‰ˆˆ††…„…………„„„„„„„…„„ƒƒ„„„„…………„~~‚†ˆ‰‡‡††††‡‡ˆ‹Ž‘‘ˆ…„…†††‡…ƒ„ˆˆ…„ƒ‚ƒ„ƒ€~€ƒ„………ƒ‚‚ƒ„‚€€€€‚ƒƒ…‡ˆˆˆˆ‰‡ˆˆ†ƒƒ„‡ˆˆ‡‡……„„‡††††………„„„„„„„„‚ƒƒ„ƒƒƒ‚„„„„„„„„„…†ˆ‰‰‰‰Œ‹Š‰‰†~€€€‚‚‚‚‚€~||{||}}~~ƒƒƒƒƒƒƒƒ„„„„„„„„‚ƒ‚€~‚‚€€€€€‚‚‚‚€€‚ƒ„‚ƒ†€€€€€€€€€€€€€€€€|}}~€„‚€~}~€ƒ}}}~~}{xvwz~€€‚ƒ€~~}}|€€€€€€€€‚‚‚‚ƒ…‡ˆ„ƒ‚~}|yz|}~~}}|{zyyyyyzzzzz{|}~}}}}~€€€€€~}}}}~‚€~|||}~|yvuvwxxxz}~~|||yzz{||}}~~~~~~~~€€€~€‚„„ƒ‚€~{{{zzyyyuwy{|}}}}}}}|||}}}}~€€€€~€~~}||||{{{{{{|||||€‚‚€€‚ƒ„„„„‚€€€€€‚‚†‡‰Šˆ…~€€}}~~€€‚‚‚‚€€‚ƒ„…†…ƒƒƒ„‚‚‚ƒ„„†††……………………‚‚…€€€€€€€€€€€€€€€€€€~}ƒƒ„……„„ƒ~~~}}}}}ƒ€}}}z€‚ƒ……ƒ‚€ƒƒ„„…†††€€‚‚‚|~€~|{|~~~„ˆ‹ŠŠ‰ˆ‡†……„†ˆ‰‰ˆˆˆ‰‰ˆ‡†……„„„„„„„„„††……„„ƒƒ„„ƒƒ„„……„‚€€ƒ†‰ˆ‡††…†††ŠŒŽŽŽŒˆ……††‡‡‡‡††………ƒƒƒƒƒƒƒƒ‚‚‚‚ƒ„††ˆ…ƒ„††ƒ~€‚‚‚„„……†‡‡ˆˆ‡†………†††‡ˆ†„‚ƒ„†††………„„ƒƒƒƒƒƒƒƒ„„„„„„„„„„„„„„„„…†‡ˆ‰‰‰‰‡ŠŒ‹ˆƒ}{}€ƒ„ƒ€€€€‚€€€€‚}|{{{|}~‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ„„ƒƒ‚‚‚‚€€„†…€€€€€€€€€€€€€€€€}~~~€‚~}~€~}{zz{||||}}|zxwx{~€€‚€~}}}€€€€€€€€€„…‡„ƒ‚€}}{|}~~~}||zzyyyy{{{{|}~~~~}~~~€€€€~}}}~~‚€~||}}~{yvuvwxxxz|~}{zzyyzz{||}}}}}}}}}€€~€€‚ƒƒ‚€~~|||{{zzzz{}~}}}}|||}}}}~€€€€€€€€€€€€€~}}||||{{{{{{|||}}€‚‚€€‚ƒƒ„„ƒ‚€€€€‚‚ƒ†††…„‚‚‚ƒƒ„„„‚‚‚‚‚‚‚‚€€€‚‚‚‚€€‚ƒ„………ƒƒƒ„ƒ‚‚‚€‚‚ƒƒƒ‚€ƒ„‚€€€€€€€€€€€€€€€€€~~‚€€‚‚€€€€~|{€ƒ„„ƒ€‚ƒƒ„„………‚‚‚‚‚‚‚‚~€€}|}€€€ƒ‡‰‹‰‰ˆ‡†…„„„†ˆ‰ˆˆ‡ˆˆˆ‡‡†…„„„„„„„„„„†………„„ƒƒ„„ƒƒƒ„……„ƒ€€ƒ†‰ˆ‡††…†††‹ŽŽŽŒˆ……††‡ˆˆˆˆ‡‡††‚‚‚‚ƒ„…†‡…ƒ„††‚~~€€‚ƒ„„„„………†…„ƒƒ„„…†‡ˆ†„‚ƒ„†††………„„ƒƒƒƒƒƒƒƒ„„„„„„„„„„„„„„„„…†‡ˆ‰‰‰‰ˆŠŒ‹†‚~}|~€ƒƒƒ€€€€‚€€€€‚~|{zz{{€€€€€‚‚‚€€€€€‚„€€€€€€€€€€€€€€€€~~}}}€€~}}}~||{{{|}~z{{{zywvy|~}~~~~}}|||€€€€€€€€~~€„…ƒ‚‚€~}~€€€}|{zzyzz|||||}~~~~~€€€€€€~~~~~~‚€~||}~~{ywvwxxxxz}}|zyyzzz{||}}~}~}~}~}€€€€€‚‚€~~}}}}|||||€€‚‚‚€}}}}|||}}}}~€€€€€€€€€€€€€€~}}}||||{{{{{||}}}}€ƒ‚€‚‚ƒƒƒƒ‚€€€€€‚‚}~ƒ„……‚‚ƒƒ„„„„………………………„„ƒ‚‚‚‚‚€€‚ƒ„……„ƒ‚ƒ„„ƒ‚€~~€‚ƒ„‚‚ƒ…„€|€€€€€€€€€€€€€€€€~‚‚€€~~‚‚‚€€€~€~}~~‚ƒƒƒƒ‚ƒƒ„„„„……„„ƒƒ‚‚„‚}€‚‚„†‡‡††…„ƒƒ‚ƒ…ˆˆˆ‡‡ˆ‡‡‡†…„„ƒ„„„„„„„„………„„„ƒƒ„„ƒƒ„„……„ƒ€ƒ†ˆˆ‡‡††…††‹‘ŽŽŒˆ……††‡‰‰‰ˆˆˆˆˆ€€€€€ƒ‚‚‚ƒ„………ƒ‚ƒ…„‚€‚ƒƒƒ‚‚‚‚‚ƒ‚‚‚ƒ„†‡ˆ†„‚ƒ„†††………„„„„„„„„„„„„„„„„„„„„„„„„„„…†‡ˆ‰‰‰‰Š‹Œ‰…€}|}~€‚ƒ‚€€€€‚€€€‚ƒ‚€~}|||€€€€€€€€~~€‚‚‚‚‚€€}}€ƒ†‡€€€€€€€€€€€€€€€€€€~|{zz~}}}|}}}zz{{|}}~{{zyxvuty{}}{yyz{{z{zzzz~~~~~}}|}}€ƒ‚‚‚€€€‚€€}}{{zzzz{{{{{|}~~€€€€~~‚€~||}~~~{ywwxxxxxz}~}|{{||}}~~~~€€€€€€€€€€€€€€€€€~~}}~~~~~~~~ƒƒ„„ƒ‚€€}}}}|||}}}}~€€€€€€€€€~~||}||||{{{{{{|}~~~€‚‚€€‚‚‚ƒƒ‚€€€‚‚~€€€€€€€€€ƒƒƒƒƒƒƒƒ„„ƒƒ‚‚‚‚‚‚€€‚ƒƒ„…„‚‚‚ƒ„„‚‚‚‚ƒ‚‚ƒƒ………ƒ…‡†‚€€€€€€€€€€€€€€€€€~~~†…ƒ€~}}}~~~~~~~}z|€…„„…………„„…†…†††††…„„ƒ€€ƒ……„€‚„ƒ€‚ƒ„„ƒƒ‚‚‚„†ˆ‡‡‡‡††……„„ƒƒ„„„„„„„„……„„„„„ƒ„„ƒƒƒ„………ƒ€€ƒ…ˆˆ‡††…†††‹Ž‘ŽŒ‡…„…††ˆˆˆˆ‡ˆ‡ˆ€€€€„ƒƒ‚ƒƒ„…ƒƒƒƒƒ€€€‚‚‚„ƒƒ‚€€€ƒ„…†‡ˆ†„‚ƒ„†††………„„„„„„„„„„„„„„„„„„„„„„„„„„…†‡ˆ‰‰‰‰‹‹Šˆ‚||~€‚‚‚€€€‚‚€€€‚‡†„‚‚‚ƒ‚ƒ‚ƒ‚‚€€€€€€€€€€€€‚‚‚‚€€‚€~€„‡‡…€€€€€€€€€€€€€€€€€}|zyx{{{||||{vwwxyyzz}|zxutrrwy{zyvwwxxxxxxyx{|||}}}~||{{}~‚‚‚‚ƒ‚‚ƒ‚‚€~}|{{z{{|{{{|}~~€€€€€€€€€~‚€~|}}~~~~~{yxxyyxwxz~€€€€€€ƒ‚ƒ‚ƒ‚ƒ‚€€‚‚€€€€€€€~~}}‚‚ƒ‚‚€}}}}}|||}}}}~€€€€€€€€€~~~~}||}||||{{{{{||~~€ƒ‚€‚€‚‚ƒ‚‚€€€€‚‚ƒ‚~}}}}~~~~€€€€€€€€€€€‚‚‚‚‚‚€€‚ƒƒ„…„ƒ‚ƒ„ƒƒ‚ƒ„…………………………‚ƒ€~ƒ€€€€€€€€€€€€€€€€}~}}€ƒ‰‡„~}}xyzz|}~~~|xw{„Šˆ‡‡‡‡‡‡ˆˆˆˆˆˆˆˆˆ‡†…„ƒ‚€…†‡…‚€‚„ƒ‚€€€‚‚€€€ƒ†‡‡‡‡‡†……„„„„ƒ„„„„„„„„„„„„„„„ƒ„„„ƒ„„……†„€‚…‡ˆ‡‡…†…††‹ŽŽŽŽ‹‡„„„……††††††††ƒƒƒƒƒƒƒƒ…„ƒƒ‚ƒƒ„‚‚‚‚ƒ‚€‚‚ƒ‚ƒƒƒƒ„ƒƒ‚€ƒ„†‡†‡ˆ†„‚ƒ„†††………„„„„„„„„„„ƒƒƒƒƒƒƒƒ„„„„„„„„…†‡ˆ‰‰‰‰‹Š‰…}}}‚€€€‚€€€‚……„ƒƒ„……„„„„„„„„‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚€€„ƒ„†Š‰…€€€€€€€€€€€€€€€€€~}zyxwxyz{|{{zwxyzzzyy|{yvutsswy{{ywwxwxxxxyyyz{{||}}}||{{|~€‚€‚ƒ„„„‚ƒƒƒ‚€~~||{{{{}}}}~€€€€~€€€€€~€~‚€~|}~~~~~{zxyyzxwx{‚‚‚‚‚‚‚‚‚‚‚‚ƒ„ƒ„ƒ„ƒƒ€€‚‚‚€€€€~~~~€€€€€}|}}}}|||}}}}~€€€€€€€€€€€€€~}||}||||{{{z{|}~€€€‚‚€€‚€‚‚‚€€€‚‚€€€€€€€€€€€€~~~~~~~~~~€€‚‚‚‚‚€€‚ƒƒ„„„‚‚ƒƒƒ‚ƒƒ„…†„„„„ƒƒƒƒ~{yz~‚„…€€€€€€€€€€€€€€€€}~||…Š‡ƒ|{zzyzz||~~}yww}…Œ‰‰ˆ‡‡ˆ‰‰‰‰‰‰ˆˆˆˆˆˆ‡†…„ƒ‚†‡ˆ†‚€€ƒ‚€~~€€€€€€€€€ƒ…‡‡‡‡‡…………„„„„„„„„„„„„„„„„„„„„„„ƒƒƒ„……†„€€‚„‡ˆ‡††…†††ŠŒŽŽŠ…ƒ‚ƒ„„„„„„„…………†…†…†…………„ƒ‚‚‚ƒ‚‚ƒƒ‚ƒƒƒƒƒƒƒƒƒ‚‚€€~€€‚…‡ˆ†‡ˆ†„‚ƒ„†††………„„„„„„„„„„ƒƒƒƒƒƒƒƒ„„„„„„„„…†‡ˆ‰‰‰‰Š‰†ƒ~}~€€€€€€€€‚€€€‚€€€ƒƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ…„„ƒ‚‚‚‚‚€€„†ˆ‹Šˆ‚~€€€€€€€€€€€€€€€€~}|zyxwvwy{{{zyz{|}}{zyyxwuuvwxy{}}|{{|zz{{||}}{{|}}~||||}‚€‚ƒ„††‚‚ƒƒ‚€~~}||{||~~}~~€€€~~~€€€~~~~~‚€~|}~~~~}{zyyzzywy|€‚ƒ‚‚ƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚‚€€‚‚‚‚‚‚€€€€~~~~€€€€~€€€~}}}}}|||}}}}~€€€€€€€€~}|}||||{{{z{|}~€€‚‚€‚‚‚‚€€€€‚‚~€‚ƒ„ƒ‚„ƒƒ‚‚‚€€€€€€€€‚‚‚‚€€‚ƒƒƒ„ƒ‚‚‚ƒ‚‚‚ƒƒ‚‚‚‚ƒƒƒƒ}}~‚……ƒ€€€€€€€€€€€€€€€€|€~{|†Š…€|zz{}}}|}||||zxx}†ˆˆ‡†……†‡ˆˆˆˆ‡††……††……„ƒ‚‚†‡ˆ…€€~~~€€€€€€€€€€‚…†‡†‡‡†…………„…„„„„„„„„„ƒƒƒ„„„„„„„ƒƒ„„……†„‚€€„†ˆ‡†††…††‰‹ŽŽŒˆ„‚‚ƒƒƒƒ„„„„……†††††††††…„ƒ‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚€€€~~}}}}~ƒ†‡†‡ˆ†„‚ƒ„†††………„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„…†‡ˆ‰‰‰‰ˆ‡…€}}‚€€€€€€€‚€€€€‚‚€€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€‚…ˆˆ†ƒ€€€€€€€€€€€€€€€€€~}|{zyxxuvxz{zyxyz{{zywuvuuuvx{|{}€€~€}}}~€€||}~€€}|||}‚€‚„…†‡‚‚‚‚‚~~}|||||}}|}}~€€€~~~~€€€~~~}}~~‚€~|~~~~}}{zyyzzywz}€‚‚‚‚ƒ‚‚‚‚‚‚‚€€€€€€€€€€‚ƒƒ‚‚‚€€€€€~~€€€€~€€€}}}}|||}}}}~€€€€~~}|}||||{{{z{|}~€€‚‚€€‚~€‚‚€€€‚‚€‚„‡†„„„ƒƒ‚‚„„„„„„„„ƒ‚‚€€‚‚‚‚€€‚ƒ‚ƒ„ƒ‚‚ƒ‚€€€€€‚ƒ„……„‡‰‰†ƒ‚ƒ€€€€€€€€€€€€€€€€|~€}z{‡‘ˆƒ}}}}|{zyxyww{……„‡†„ƒƒ„†‡††……„„ƒƒƒƒ‚€€€†‡‡…~~€€~}}~€€€€€€‚„††††‡††………………„„„„„„„„ƒƒƒƒ„„„„„„ƒƒƒ„……‡…‚€€„†ˆ‡††…†††‰‹ŽŒŒŒ‹ˆ„‚‚ƒƒ„„………†……………………††„ƒ‚‚‚‚‚ƒ„ƒƒ‚ƒ€€€€~~}}}}||zz{|‚„††‡ˆ†„‚ƒ„†††………„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„…†‡ˆ‰‰‰‰‡†ƒ}}€ƒ‚€€€€€€€€‚€€€€‚…„‚€€€€€€€€€€€€€€€€~€€‚‚‚‚€€€ƒ†…€€€€€€€€€€€€€€€€€~ysrw|tvy{}|{zzzyyxxwwttuvwxxyz{|}}}}}}}|||}~~xyz{}€€|}}{z|€ƒ€€€~~~~~}||}~~~~~~~€~€~~€€€€€~€~}€~|}~}ywvvvy|€‚‚‚‚€€€‚ƒƒ€€€€€€€~€€€€€€€€€~~~€€‚€~}}}||~}||}}~~€€€€€€€~~~~~}}}}~}|{zz{{|}‚ƒƒƒ€€~~€€ƒ€€~|‚‚‚‚‚‚‚‚„„„„„„„„ƒƒƒƒƒƒƒƒ‚‚ƒ„…ƒ„ƒ‚€€ƒ„……„ƒ‚ƒ„€€€|}~‚ƒ„ƒ†‡„€€€€€€€€€€€€€€€€€}|yw{€’•‹~{{|€~}}|{{vx{‚„†‡†††……„„„ƒƒ„………„„…†††…„‚‚„ƒ‚ƒ…„‚€‚‚‚‚€€‚„„‚€€€‚…†…ƒ…„…„…„…„††……„„ƒƒ„„„……„„„ƒ„„„„„„…‡†„~~‚…‰‡……‡ˆ‡†ˆ‹ŽŒ‡†„‚€‚………††‡ˆ‰‰‡‡‡††………‚‚€€~~~~~~~~}}}}}}}}|||{{{zz|zxy~ƒ‡ˆˆ‡…„„„…………„„„…††„ƒ„ƒ„ƒ„ƒƒƒ„„„„ƒƒ„………………†‡‡ˆ‰ŠŠ‰‰ˆ„€€}~~€‚€‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€‚ƒƒ€‚„€~~~€€€‚‚‚€€€€€€€€€€€€€€€€~~|wssvytuxz{{zzzzyyxxwwtuuvwxyyxyz||}||~}}|||||{|||}}~~|}}{z{‚€€~~}}~~}}}}}~~~~~~~€~~€€€€€~€~}€€~}}~|yvvvvx|€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€‚€~}}}||~}||}}~~€€€€€€€€~~~~}}}}}}|{z{{{|}‚ƒ‚‚€€€~‚€€€€€€€€‚‚‚‚‚‚‚‚€€€‚ƒ„ƒƒƒ‚€€‚ƒ„…„ƒ‚‚„‚‚‚‚‚ƒƒƒ„„„„ƒ…†„€|{|€€€€€€€€€€€€€€€€€~|{xvx}Œˆ|z{|€||{{zyyy€‚…†‡ˆˆ………„„ƒƒƒ„„„………„„††……„ƒ‚„ƒ‚ƒ…„‚‚ƒ„„„„ƒ‚‚ƒ…††…ƒ‚€€‚„†…ƒ„…„…„…„…………„„„ƒƒ„„„……„„„ƒ„„„„„„…‡†„~‚…‰‡……‡ˆ‡†‰ŒŽ‘Œˆ‚‚‚ƒƒ€€ƒƒƒƒƒƒƒƒ€€~~~~~~~~~~~~~}}}}}}}}|||{{{zz|zxz~„‡ˆ‡‡…„„„„…†……„„„……ƒ„ƒ„ƒ„ƒ„ƒƒ„„„„ƒƒ„„…………††‡ˆˆ‰ŠŠ‰‰ˆ…€€€~~~€€€~€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€‚ƒƒ‚„‚‚€€€‚~‚„„…€€€€€€€€€€€€€€€€~|yvttuvstvxyyyxzyyyxwwwtuuvwxyyyy{{||||}}||{{{z~~~}}||}}}{zz~€€€€€€€€~~~}}}|}}}}}}}}~~~~~~~€~€€€€~€~|}~~~{xvuvux{~~~~~€€€€€€€€€€€€€€€€€€€€€€€€‚€~}}}||~}||}}~~€€€€€€€€€€~~~~}}}}}||{{{{||}€‚‚‚‚€€€~€ƒ„ƒ‚~~~~~~~~€€€€€€€€€€€€‚ƒ‚‚‚€‚ƒ„…„ƒ‚‚ƒƒƒƒ‚‚‚‚ƒƒ‚‚‚}ywyz€€€€€€€€€€€€€€€€}{zwtuy…Š„{z|}}}}}||{{†‡ˆˆ‰ˆ‡‡„„„„„ƒƒƒ……………………††…„ƒ‚„ƒ‚ƒ…„‚‚‚ƒ„„ƒ‚‚ƒ„…††…„ƒ‚€€„……ƒ…„…„…„…„……„„„ƒƒƒ„„„……„„„ƒ„„„„„„…††…‚„ˆ‡††‡ˆˆ‡Š‘‘Ž‰ƒ€€€~}}~~~}}}}|}}}}||||}}}}~}}}}}}}}}}}|||{{{zz|zyz€„‡ˆ‡†…„ƒ„„…††…„„„„„„ƒ„ƒ„ƒ„ƒƒƒ„„„„ƒƒ„„„……†††‡ˆ‰‰Š‰‰‰ˆ…€€€€€€~~~~}~~~€€€‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚ƒ„ƒ‚€‚ƒƒƒ‚‚‚ƒ„€€‚ƒ†‡‰Š€€€€€€€€€€€€€€€€|xwvwutsstuvvvvxxxwwvvvtuuvwxyyz{{|{{zzyzz|||||~~~~~~~~}{yz|€€€€€€€€~~}}|||||}}~~~}}~~~~~~~€€€€~€~{|~~~~~ywuuuuwz~~~~~~€€€€€€€€€€€€€€€€€€€€€€€‚€€~}}}||~}||}}~~€€€€€€€€€€‚~~~~}}}}||{{z{{||}~€€€€‚€ƒ„ƒ‚~~~~~~~~~~~€€€‚ƒ„„„‚‚‚ƒƒƒ‚‚‚‚‚€€€{|{ywx|€€€€€€€€€€€€€€€€}{{yutv€‡„}‚…„„„„„„„ƒ‡‡ˆˆ‡‡…„……………………‡†……„………††„ƒ€€€„ƒ‚ƒ…„‚€€‚‚‚‚‚‚‚‚ƒ€ƒ…„ƒ„…„…„…„…„„„„ƒƒƒƒ„„„……„„„ƒ„„„„„„……†…ƒ€ƒˆ‡††‡ˆˆˆŒŽ‘’’ŽŠ…‚~~}~~~||{{~~}}}}||}}}}}}}}}}}}}}}}}}}}}}}}|||{{{zz{zy|€…‡‡‡†„ƒƒƒ„„‡†…„ƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒ„„……†††ˆˆ‰Š‰‰ˆˆˆ†‚€€€€€€€€€€~}||}}}}}~€‚‚‚‚‚‚‚‚„„„„„„„„„„„„„„„„‚„…„‚ƒƒ‚‚„„……†‡ˆŠ‹‹€€€€€€€€€€€€€€€€€}zyzyvsrrrrssttvvvuuttssttuvwxxzzzyywvuuvy{~~||}€‚ƒ~zxx{~~~~}}|||||~~}|}}~}~~~~~~~€€€€~€~{|}~~~xvuuvuwz~~€€€€€€€€€€€€€€€€€€€€€€€€€€€‚~}}}||~}||}}~~€€€€€€€€€€‚~~~~}}}}|{{z{{|||}€€€€€„ƒƒƒ€€€€€€€€~~~~~~~~~~}~€€€‚ƒƒ„ƒ‚‚ƒ‚€€€€ƒƒƒƒ„„„„}{|„€€€€€€€€€€€€€€€€€}|}{wuvˆ‡‚…‡‡Š‰‰‰‰‰ŠŠŠ††‡‡ˆ††…††‡‡‡‡‡‡‡†…ƒƒƒ„…‡…ƒ~„ƒ‚ƒ…„‚~€€~„‚€€‚„„„…„…„…„…„„„„„ƒƒƒƒ„„„……„„„ƒ„„„„„„…………ƒ‚‡‡‡‡‡ˆ‰Š‘‘’’‹ƒ€~~~~~~~~~}~}}|||||||}}}}}}}}}}}|}|}}}}}}}}}}|||{{{zz{zz}‚…‡‡†…„ƒ‚ƒƒ„‡†…„ƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„„ƒƒ„……†‡‡ˆˆ‰‰Š‰ˆˆˆ†„€~}€€€€€€€€‚~}||~}|||}€‚‚‚‚‚‚‚‚„„„„„„„„„„„„„„„„ƒ…†„‚‚ƒ€€€€ƒ„‡‡‡†††††€€€€€€€€€€€€€€€€}{{{zvsrqpooprssssrrqqqrrstuuvwwwwwutrrrux|yz|„†‡}{wwy|~~}}||{|~€~|{|||}}~}~~~~}~~~€€€€€~€~|||}~}|xutvvvwz}~€€€€€€€€€€€€€€€€€€€€€€€~€€‚€€~}}}||~}||}}~~€€€€€€€€€~~~~}}}}|{{{{||}|}~€€€€€~~~€…ƒ‚‚€~‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚~}~~€€€€‚ƒ‚ƒ„ƒ‚€€€€€€‚‚‚†‡‡„€‚„€€€€€€€€€€€€€€€€€}|~~zwv‰Š‡ŠŒ‹‰‰‰‰ŠŠ‹‹‡ˆ‰ŠŠŠ‰ˆ‡‡‡ˆˆˆˆˆˆ†„‚‚ƒ„‡†‚}}~~„ƒ‚ƒ…„‚€‚ƒƒ‚€ƒ‚‚ƒ†ƒ€ƒ„„„…„…„…„…„„„„„„„„„„„……„„„ƒ„„„„„„…„…†„€€‚†‡ˆ‡‡ˆ‰‹‘‘’Œƒ€~€~}|~~~~}}}}|}|}||||}}}}}}}}}}}}}}}}}}}}|||{{{zz{z{ƒ‡‡‡†…„‚‚‚ƒƒ†…„ƒƒƒƒƒƒ„ƒ„ƒ„ƒ„„„ƒƒƒƒ„„ƒƒ„„…†‡‡‰‰‰Š‰‰ˆ‡ˆ‡„}}~€€€€‚‚€~}}}|{|}€‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ„„„„„„„„„††…‚ƒ€€‚ƒ„…„„ƒ‚€€€€€€€€€€€€€€€€€€|||{zxurrpommnpqqppoonnnppqrstuuuuvvvutssuy}€‚yz|‚…‡‰€€~zwvx{~~~~~~~~€~~~}z|€€~|z{{||||}}~~}~~~~€€~€€€€~€~~}|}~~|zwuuvwvxz~~€€€€€€€€€€€€€€€~~~~~€€€€‚€~}}}||~}||}}~~€€€€€€€~~~~}}}}{{{z{|}}}}~€~~~}}}~‚€‚€‚‚‚‚€€€€€€€€~~€‚€‚€ƒ„‚ƒ„ƒ€‚‚‚‚€€~~~}}}}„†…‚€€€€€€€€€€€€€€€€€€~{|{wv~‰Œ‰‹Œˆˆˆ‰ŠŠ‹‹‰Š‹‹‹Š‰ˆ†‡‡‡ˆˆˆˆˆ†ƒ€€‚ƒˆ…‚~||}~„ƒ‚ƒ…„‚€‚‚‚‚€‚~~‚‡„€€ƒ„„…„…„…„…„„„„„„„„„„„„……„„„ƒ„„„„„„…ƒ…†…‚€€†‡ˆˆ‡ˆŠŒ‘’Œ…€}}~~}}}}~~~~~~~~~~~~~~||||}}}}}}}}}}}}}}}}}}}}|||{{{zzzz|„‡‡†…„ƒ‚‚‚‚ƒ…„„ƒƒƒ„„„ƒ„ƒ„ƒ„ƒ„„ƒƒƒƒ„„‚ƒƒ„†‡‡ˆ‰‰ŠŠ‰ˆˆ‡‡‡†}|~€‚‚‚€€€~|{|~€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ…†‡…ƒ‚ƒ€‚ƒ…†„„„ƒƒƒ‚‚€€€€€€€€€€€€€€€€z{{{yvsqqpnllmopoonnmmlloopqrsstuuvxxxxwtw{€z{}€ƒ…‡ˆ€€~zvvxz~~~~~~~~€€€~~~z|€€|z{{{{||||~~~~}~~~~€€~€€€€~€~}|}~~{ywuuwwwx{~~~~~~~~~€€€€€~€€€€€~~~~~~~~~~€€‚€~}}}||~}||}}~~€€€€€~~~~}}}}{{zz{|}~}}~~~~~}||}~~~}~‚€€€€€€€€€€€€€€€€€€€‚‚‚‚ƒ…‚ƒƒƒ€‚ƒƒƒ‚‚‚ƒƒ‚€~}|uy~€}}~€€€€€€€€€€€€€€€€|zz~{wu}‰ŒŠŠ‹ˆ‰‰Š‹‹ŒŒŠŠ‹‹Š‰‡†…†††‡‡ˆˆ‡…‚€‚ˆ…}{{|}„ƒ‚ƒ…„‚}~€€~}~|{{|~‡„€~€‚„„„…„…„…„…„„„„…………„„„……„„„ƒ„„„„„„…ƒ…†…‚€€†‡‰ˆ‡ˆŠ‘’Œ…€}}~}}|||}}}}}|}}}}}}}}}}}}~~~}}}}}}}}}}}}}}}}|||{{{zzzz|€„‡‡†…„ƒ‚‚‚ƒ„ƒƒƒƒ„„…ƒ„ƒ„ƒ„ƒ„„„ƒƒƒƒ„„‚‚ƒ„†‡‡ˆ‰‰ŠŠ‰ˆ‡‡‡ˆ†‚}{}€‚‚‚‚}{|~ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚†‡‡†ƒƒƒ‚‚ƒ„…‡ˆ…††‡‡ˆ‰‰€€€€€€€€€€€€€€€€|zwwvtpmlmpqqoljkoollnolllmnopqquuttssrrrtw{}~~~yyz|€~}}|wtx}~€€}||}}~~}}|~~}}||}}}}}}}}~~~~~~~~}}}}~‚|||{zxwvvvvuvwxx€€~~~€€€€€€€€€€€~~~~~~€~~~€€‚~}}}}~~‚‚€~}}}~~~~~~€~~€€€€€€€~}|{{zzzzz{}~|}~€€}}~~~~€€~|z}~€‚‚}€€€‚‚‚‚‚€€‚……„ƒ€€€~€€‚ƒ„ƒ„…ƒ‚€‚…ƒ‚‚ƒƒ‚„ƒƒ€~}~~€€€€€€€€€€€€€€€€€|ww{{wvz„ˆ„|€Œ…‹‰ˆŠŒ‹Š‰Œ‹Š‰‰‰‰‰ˆ‡‡††‡‡‡†…„ƒƒƒƒˆ‡…~}~€„‚€‚…†ƒ||||||||yz{{z{}€‚‚‚ƒƒƒ…………„„„„ƒƒƒ„„………††††††††„„………††††††…„‚€„‡‰ˆŠŽŽŒŽŽŽŽ‹ƒ~|~}}~~~}}~}}}}}|||}}}}}~~~~~~~~~~~~}}}}~}}|||{{{xz~‚„……„…ƒ€‚‚€„„ƒƒƒƒ„„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ‚‚ƒ…†‡ˆ‰ŠŠ‹Š‰ˆ†…ˆ†ƒ€€€€€€€€~€ƒ„ƒƒ‚‚‚‚ƒƒ‚ƒ„ƒ‚‚ƒ„ƒƒ‚…††…ƒ‚ƒƒ€ƒ………††††‡‡‡‡€€€€€€€€€€€€€€€€zxwwyyvtqrtutrommopmmoomoopqrsttttssrrqqqsvyz{zzxwwy{|zy}}|{wux}~€€}||}}~~~}}~~~}}}|}}}}}}}}}}}}}}}}}}}}~‚|||{zxwvvvvvvwwx~€~~~~€€€€€€€€€€€~~~~~~~~€€€‚~}}}}~~€‚€~}}}~~~~~~~~€€€€€€~}||{z{zzz{}~€|}~€€€}}~~~~€€~|{|}€€‚‚ƒ‚‚€€€€€‚ƒ„ƒ‚€~€€€€€ƒƒƒ„„„‚‚‚‚‚ƒƒ‚€€€€€€€€€€€€€€€€€€€€€}urwzywxƒz}‡‹ˆŒŠˆ‰‹Œ‹ŠŒ‹Š‰ˆˆˆˆŠŠ‰‰ˆˆ‰‰‡†…„ƒƒƒ„‰ˆ‡ƒ€ƒ„‚‚„…‚||||||||y{{{z{}€‚‚‚ƒƒƒ…………„„„„ƒƒƒ„„……………………………„„„……………†††…„‚€ƒ‡‰‰‹ŽŒŽŽŽ‘ŽŠƒ~€€~}~~}}}~~~~~~}}}}}}}}~~~~~~~~~~~~~}}}}~~}}||{{{{x{~‚„……„„‚€€ƒ‚‚„„ƒƒƒƒ„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„‚ƒ„…†ˆˆ‰ŠŠŠŠ‰ˆ†…‡…‚~~€€€€€‚‚‚€‚ƒ‚‚‚‚ƒ‚ƒ„ƒ‚‚ƒ„ƒƒ‚‚‚„††…‚‚ƒƒ€ƒ…………††††‡‡‡€€€€€€€€€€€€€€€€|yxyxuswxyyxvsqnoonnoonppqrstuurrstuvwwrsvwyyxw|{{|~~|z|{{zxux}€€€~~}}}~~~}}}~~~~}}}}}}}}}}}}||||||||}}}}~‚}||zyxwvvvvuvvxx|}~~~~~€€€€€€€€€€€~~~~~~~~€€‚~}}}}~~‚‚€~}}~~~~~~~~~~~€€€€€~~}}|{{{{{z|}€|}~€}}~~~~€€€~}~}}~‚ƒ‚‚ƒ‚‚~€€‚€€‚‚€€€€€€‚ƒƒ„…ƒ‚€‚€€‚€€€€€€€€€€€€€€€€€€vqsyzxw~}zxy~…ŠŒŠ‰Š‹Œ‹‹Š‰ˆ‡‡‡‡‰‰ˆ‡‡‡ˆˆˆ‡†…„„„„‡‡†ƒƒ…„‚‚„„~||||||||z{|{{{~€‚‚ƒƒƒƒ………„„„„ƒƒƒ„„„„……„„…„…„…„ƒƒ„„„„……†††…„‚€ƒ†‰‰‹‹Ž‘‘Ž‰‚~€~~~}}}}~~~~~}}}}}}}}}~~~~~~~~~~~~}}}}~~}}|||{{{y{‚…………‚€‚ƒ„ƒ„„ƒƒƒƒ„„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒ„…‡‡ˆ‰‰ŠŠŠ‰‡†…†„}}~€€€€‚ƒƒ„„ƒ‚€€€‚‚‚‚‚ƒƒƒ‚‚‚ƒ„ƒƒ‚‚‚‚ƒ„…†„‚‚ƒ€€ƒ……„……†…††††€€€€€€€€€€€€€€€€|yvuvwut{{|{zwutnnmmnonmnooqqsstqqrrrrssstvwxyyy{zz|~~}{zyy{ywy}€€~~}}}~~~~}}~~~~}}}}~~~~~~~~}}}}}}}}}}}}~‚~}{zxwwwvvuvvwwx|}~~~€€€€~~~~~~~€€€€‚~}}}}~~€‚€~~~~~~~~~~~~€€€€~~~}}||{{{{{{|~€|}~€€€}}~~~€€€€€‚~~€‚ƒ€€~€‚‚‚‚ƒ€‚‚€€ƒ€€€€€€‚ƒƒ„„„‚‚‚€€ƒ€€€‚ƒƒƒ€€€€€€€€€€€€€€€€€€„~yxyzyy~zwwuv~ˆ‹‰‰Œ‹Š‰ˆ‡‡ˆˆ‰ˆ‡‡†‡‡‡ˆ‡†…„„…………„€‚…„ƒ‚ƒƒƒ€~||||||||z{||{|~‚‚ƒƒƒ……„„„„ƒƒ„„„„„„„„„„ƒ„ƒ„ƒ„ƒƒƒƒƒ„„„††††„‚€†‰ŠŒŠŽ’““Ž‰€~~~}}}~}||}}}}}}|||}}}}}}}}}}}}}}}}~}}}}}}~}}||{{{{y{ƒ…††…~}„…„„„ƒƒƒƒ„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„„„…†‡ˆˆ‰‰‰‰Šˆ‡…„†„~|}~~€€€€‚‚€€‚ƒ‚€‚‚ƒƒƒ‚‚‚ƒ„ƒƒƒ‚ƒƒƒ„……„‚‚‚€‚……„……………†††€€€€€€€€€€€€€€€€nllnswzz{{{zyxvunlkmnnmlnnooqqrsrrqqqpppqrtvwyyzxxx{~~|zxyz{y{~€€~~~~~}}~~~~}}}}}~~~~~~~~~~~~}}}}~‚€~|yxvwwvvvuvvxx|}€€€€€~~~~~~~€€€€‚~}}}}~~‚‚€~~~~~~~~~€€€~~~}}}}||||||{}~€|}~€}}~~~€‚‚‡…‚€€€€€€‚‚ƒƒ„ƒ‚€‚ƒƒ€‚ƒ‚€€€‚‚ƒ„…ƒ‚€‚‚ƒ„ƒ€‚€€ƒƒ„„‚ƒƒƒƒ‚€€€€€€€€€€€€€€€€ˆ‰‡|z|~€zwwtrxƒ‹‰‰ŒŒŒŒŠ‰‰‰‰‰‹ŠŠ‰‰‰‰‰‰ˆ‡†………………„‚€€‚…„„ƒ‚‚}||||||||z{}|||‚‚ƒƒƒƒ„„„ƒ„ƒƒƒ„„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒ„„„„†††…„‚€€…‰Š‹Ž’’”“‰~~~}}}~}||}}}}}}|||}}}}}}}}}}}}}}}}}}}}}}}}}|}||{{{y|€ƒ††††}}~„…„„„ƒƒƒƒ„„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ……††‡ˆˆˆ‰‰‰‰ˆ†…„†„~|}~~€€€€‚‚~}}}}}}€„…‚€‚‚ƒƒ‚‚ƒƒƒƒ‚ƒƒƒ„ƒ„…ƒ‚€‚‚€‚„„„„„…„…………€€€€€€€€€€€€€€€€mjhjnsuvzzyyxxxxplkmoomloopqrsttsrrrrrqqpqrtuwxyvvwy}~}|yxx||||€~}~~}~~~~}}}}~~~~~~~~~~~~€€€€€€€€}}}}~‚|ywvvwvvuvvwwx}~€€€€€€€€€€€€€€€€~~~€€€‚~}}}}~~€‚€~~~~~€~€€~~}}}}|}|}||||}€|}~€€€}}~~~€€‚„…Šˆ†„‚€€‚ƒƒƒ‚‚‚ƒƒ„ƒƒ€€ƒƒ‚€€‚„ƒ‚€€€‚ƒ„„„‚„„„„‚‚‚‚‚‚ƒ„„ƒ„„……………€€€€€€€€€€€€€€€€ˆ‘Š€‚|yzwtw~ˆŽŠ‰ŒŠŒ‹Š‰‰‰‰ŠŠ‰‰ˆ‰‰‰Š‰ˆ†††††‰ˆ‡„‚‚„†„„„ƒ€}|||||||||{|}}|}‚‚ƒƒƒ„„„„ƒƒƒƒ……„„„„ƒƒƒ„ƒ„ƒ„ƒƒƒƒƒ„„………††††„‚€„ˆ‹‘‘’“”‹ƒ}~~~~~~}}}}~~~~~}}}~~}}}}}}}}}}}}}}|}}}}}}|}}||{{{{z|€„†‡‡†}|~€ƒ„„„„ƒƒƒƒ„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„†††‡‡ˆˆˆˆˆˆ‰‡†„ƒ†„}}~€€€‚‚‚‚‚€}|{{zzz|~ƒ…‚‚‚‚‚ƒ‚‚ƒƒ‚‚‚‚ƒƒ„ƒ„…„€€€~„„ƒƒ„„„„………€€€€€€€€€€€€€€€€sojikmooyyxxyzz{rmkorpnnppqrstuuutsrponmrrstuvwxsssuxyxvywx|~}~€€}}}~~~~~|}}}~~~~~~~~~~~}}}}~‚‚|xvvvwvvvuvwxx}~€€€€€€€€€€€€€~~~€€‚~}}}}~~‚‚€~~~~~€€€€~~}}}}}}}}}|||}‚|}~€€}}~~~€ƒ…‡‰‰ˆ‡†ƒ€€€‚ƒƒƒƒ‚ƒƒƒƒ‚‚‚‚ƒƒ‚€ƒ„„ƒ‚€€€€ƒ„…ƒ‚€‚„ƒƒƒƒƒƒ‚‚‚‚‚ƒƒƒƒ‚ƒ„„…„„„€€€€€€€€€€€€€€€€…‘Œˆ‡…‚}{||{z{…Œ‹‰ŒŒˆŒ‹Š‰ˆˆˆ‰‰‰ˆ‡‡‡ˆˆŠ‰ˆ‡†††‡ŠŠˆ…‚ƒ………„ƒ~}|||||||||{|}}|}‚‚‚ƒƒƒƒ„„„ƒƒƒƒ‚………„„ƒƒƒ„„„„„„„„„„„„…………†††…„‚€~ƒ‰‹Ž‘‘’‘’“‘„|}}~~}~~}}}~~~~~~}}}~~~}}}}}}}}}}}}}||}~~}||}}|||{{{z}€„†‡‡†~}}€‚ƒ‚„„ƒƒƒƒ„„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ‡‡‡‡ˆˆˆˆˆˆˆˆ‡…„ƒ‡…‚~~€€€‚‚‚‚‚}}|yyzz|~‚ƒ‚‚‚‚ƒ‚ƒ‚‚‚‚‚ƒƒƒ„…ƒ€~ƒ„ƒƒƒ„„„„„„€€€€€€€€€€€€€€€€rnihknpqyxxyz{}~uomqtrppppqrsttuuuttsssrvvuvvwwxyxxy{{ywywx}‚€}}}€~~~~||}}~~~~~~~~~~}}}}~‚‚€|xvuvwvvvvvwxx|~~~~~€€€€€€€€€€€€€€€~~~€€€‚~}}}}~~€‚€~~~~€€€€~~|||}}}}}}}|}}‚|}~€€}}~~~€€€„†ˆ‡ˆ‰Š‰†ƒ‚~€‚‚‚‚‚‚‚‚‚€ƒƒ‚ƒ„ƒ€ƒ…„ƒ‚€€€ƒ„„ƒ‚€‚ƒ‚‚„…„‚ƒƒƒƒƒƒ‚‚€‚‚‚‚‚€€€€€€€€€€€€€€€€‰ŽŒŒ‰‚€}|}€€~{ƒ‹ŒŠŒ‹†‹Š‰ˆ‡‡‡‡‹‹Š‰‰‰‰ŠŠŠˆ‡‡†‡‡Š‰‡ƒ€€‚„……ƒ€~||||||||||{|}}|}‚‚‚‚ƒƒƒ„„ƒƒƒƒ‚‚………„„ƒƒƒ„„„„„„„„„„………††††††…„‚€}ƒ‰Œ‘‘‘‘“’Ž„~z{|}~}}~~~}}~}}}}}|||~~~}}}}}}}}}}}}}||}~~}||}}||{{{{z}„‡‡‡‡€~}~€‚‚„„ƒƒƒƒ„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„‡‡‡‡ˆˆˆˆˆˆˆˆ‡…„ƒˆ†ƒ€€€‚‚‚€€€€zzzz{|~ƒƒ‚‚‚‚ƒƒ‚‚‚‚‚ƒƒƒ„„ƒ€€~ƒƒƒƒƒƒ„„„„„€€€€€€€€€€€€€€€€wrljmrvx}}}|{zzzutsrrstuqqrsuuvwxwuttuvwvvwxyz{{{{{||}}}xyz|€‚ƒ~~~~~€~~~}~~}}}~~~~~~~~~~~}}}}}}}}}}}}}}}}}}}}||{|}~€€}xuux{{wuvxxxy~~~~~€€€~€€€€€€€€€}~~}~~€€€€€€ƒ‚€~}||||}~€€~~}}}~€€€~€€€€~~~}|||||||}}€€~€€~~~~€€€€€ƒ„†ˆ‰‰ˆˆƒ€‚‚ƒƒƒƒ„ƒ‚‚‚€€‚ƒ‚€‚€€‚‚ƒ„ƒ‚€€€‚ƒƒ‚ƒ„„…„ƒ‚‚€‚ƒ‚‚‚ƒ€€€€€€€€€€€€€€€€‰‹‹‡‚{zzz|}|{ƒˆŠ‡†‰‹‹‹‹‹Š‰ˆˆˆˆˆˆ‡‡†††‡‡‡††………††……„„„ƒ…„‚€~|zy||||||||||||~‚€‚ƒ„ƒƒ„„„„„„„„„„„ƒ„ƒ„ƒ„ƒ‚‚‚ƒƒ„„„„………†††ˆ‡†…†„~ƒ‰‘‘‘…|||}~~}~}~}~}}}}}}}}}||}}}~~~}}}}}}}}||||||||{{{zyyyxw|‚†‡‡‡ˆ…‚€€‚€„„ƒ‚‚ƒ„„ƒƒƒƒƒƒƒƒ„ƒƒƒƒƒ„…„…‡ˆ‰ˆ‡‡ˆˆ‡††…„„‡†ƒ||‚‚€€}~€‚€~~}|{zyxƒ„ƒ‚‚‚‚ƒ‚‚‚‚ƒƒƒ„…ƒ€€€€‚‚‚‚‚‚€€€€ƒƒ„…………„„„€€€€€€€€€€€€€€€€~yutw{}~}||{{zzyxxxxwwvvwwxyyzz{yxwvvvxyzzzz{{{{yzzz{{{|yyz|}€€~~~~~~~~}~~}~~~€€€~~}}}}~~~~~~~~}}}}}}}}}||}}€€}yuux{{wuvwwwx}~~~€€€~€€€€€€€€€~~€€‚~|||||}~€€€~~~~~~~~~~€€~~~~}|||||||}~~€€~€~~~~€€€€ƒƒ‚‚ƒ…†ˆ‡„€€€€‚ƒ‚€€‚ƒ‚ƒƒ‚‚‚‚€‚ƒ€‚€€‚‚ƒ„„‚€ƒƒ‚‚ƒƒƒ‚‚ƒ„…„„ƒƒ‚‚ƒ‚‚€€€€€€€€€€€€€€€€ƒƒƒƒƒ€|x|{zzzzyy€„‡„„‡‰‰††††…………‡‡‡††………††………„„„†………„„ƒƒ„„‚€~|{z|||||||||||}}‚€ƒƒƒƒƒ„„„„„„„„„ƒ„ƒ„ƒ„ƒ„„„„„„„ƒƒƒƒ„„„………ˆ‡…………~‚‡ŒŒ‰‰‘‘“’†|||}~}}~}~}~}~}}}}}}}}}}}|||||}~}~}~}~}}}}}}}}||{{zzyyx}‚†‡‡‡ˆ…ƒ€€‚€„ƒƒ‚‚ƒƒ„ƒ„ƒ„ƒ„ƒ„„„ƒƒƒ„„……†‡ˆ‰ˆˆ‡ˆˆ‡‡……„„††„€}|‚€€€€€~~}|||ƒƒƒ‚ƒ‚‚ƒ‚‚‚‚ƒƒ„…„€€€€‚‚‚‚‚€€€€€€‚‚ƒƒƒ„……………€€€€€€€€€€€€€€€€~{xx{}~}~}}}|||{}~}{zyyyyyyyyyyxxxxyz|{{{{zzzyyyzz{{{||||}}~~~~~~€~~~~}€~~~~~€€€€~~}}}~~~~~~~~~}}}~€}yvvx{{xvvwvvx}~€€€€€€€€€€€€€€€~~~€€~~~}€€€‚}|||||}€€~~~~}}~~~~€~~~~}}||{||||}}€€€~~€€~~~~~€€€€€€€ƒ„‡„‚€ƒƒ‚€ƒƒƒƒ‚‚‚‚€€‚‚‚€ƒ‚€€‚ƒƒ„…„ƒ‚„„ƒƒƒƒ‚‚ƒ„………„„„ƒ€€€€€€€€€€€€€€€€€€€ƒ€}}~{w}|{{{z€ƒ…………††††‡‡†††………„„……„„„ƒƒƒ……„„ƒƒƒ‚ƒƒ‚€~|{{||||||||||||~‚‚ƒ‚‚ƒƒ„„„ƒ„ƒ„„„ƒ„ƒ„ƒ„ƒ†††…„„ƒƒ‚‚ƒƒƒ„„„ˆ†……†„‚~€…‹Žˆ„‡’’“’’†||}~~|~}~}~}~}}}}}}}}}~~~}}||}}}}}}}}~~~~~~~~|||||{{{z~ƒ‡ˆ‡‡‡…ƒ€‚‚‚„ƒƒƒƒƒƒ„„„„„„„„„„ƒƒƒ„„………†‡ˆ‰ˆˆ‡ˆˆˆ††„„„††„~}‚€€€€€€€‚‚ƒƒ‚€€€|~‚‚‚‚‚€‚‚‚‚‚‚ƒƒƒ„…ƒ€€€€€‚‚€€ƒƒ‚‚ƒ„…†††€€€€€€€€€€€€€€€€}{yz|}|z~~~~~~}}ƒ…„‚~|xxxxwwwwyyyyyzz{zzzzzzzz{{{||}}~€€€€~~~~~~~}}‚€~~~~€€€€~~}}~~~~~~~~~~~€‚€~zwwx{{xvvvuvx}~€€€€€€€€€€€€€€€€€€~}}~~~~~€€€~}|||||}~€€€€~}}~€€€}}~~€€~~~~}}||{||||||~~€€€€~}~€}}~}~~~€€€}~€€‚‚‚ƒƒƒ‚‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚€‚ƒ€ƒ‚‚ƒƒ„…„ƒ‚‚‚„„ƒƒ‚‚€‚„…†……„„ƒ€‚€€€€€€€€€€€€€€€€}xx{}zw|}~}{{{|x}}}ƒ‚……††‡ˆˆˆ†††………„„„„„„ƒƒ‚‚ƒƒƒ‚‚€~}|{|||||||||||}}€‚€‚‚‚‚‚ƒƒ„ƒ„ƒ„ƒƒƒ„ƒ„ƒ„ƒ„……„„„„ƒƒƒƒƒƒ„„„…‡†……†…‚€€€ƒŒ‘Ž‡Š““‘‘Ž‡||}~~{}~}~}~}~}}}}}}}}~~~~~}}}|}|}|}||~~~~~~~~}}|}|}||{€„ˆˆ‡‡‡…ƒ‚ƒ‚ƒƒƒƒƒƒƒƒ„„„„„„„„„ƒƒƒƒ……††‡ˆˆ‰ˆˆˆˆˆ‡‡……„„††…‚~€€€~€€~~~‚ƒƒƒ‚‚‚‚‚€‚€€€€€‚‚‚‚‚ƒ‚ƒƒ„…„€€€€€‚€€€€€€‚‚ƒ‚ƒ„…†††€€€€€€€€€€€€€€€€|{{}~}{zzzzzzzzz||zzyzyyxxxyyz{{|||{{|}}~€€‚‚†……ƒƒ€~~€~~~}‚‚~~~~~~€~~~~~~~~~~~~€€€|xwyzzwvvvtvx}~€€€€€€€€€€€€€€€€~~~~~~~~€}|}~~~~€€€€~||{|||}€€~}~€€~~~€€€€~~~~~}||{|{||||}}€€~}}~}}}~}~~€€€€€‚ƒ‚€‚ƒƒƒ‚ƒƒ‚‚ƒ‚‚‚‚‚‚ƒƒ‚€€‚‚‚€‚‚‚‚‚ƒ…„ƒ‚‚ƒƒƒƒ‚€€€€‚„……†…ƒƒ‚€ƒ„€€€€€€€€€€€€€€€€|xuuxyxvxz|{yxz}}‚„„†…‚‚ƒƒ„……††………„„„„ƒƒƒƒ‚‚‚€€~~~}}||{||||||||||||~‚€‚‚‚‚‚ƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ‚‚ƒƒƒƒ„„„„„………††‡………†…ƒ€ƒ€ƒŒ’Ž‡„“‘Ž†|}}~~{~}~}~}~}}}}}}}}}|||}}}}~}}}}}}}}~~~~~~~~}}}}}}}}}†ˆ‰‡‡‡†„‚ƒƒƒ‚ƒƒƒƒƒƒ‚„ƒ„ƒ„ƒ„„ƒƒƒƒ„…†‡ˆˆˆˆ‰‰ˆˆˆˆˆ††„„„†††ƒ€€€€€~~~}|}~€€‚‚€‚€€€‚‚‚‚‚‚ƒƒƒ„…ƒ€€€€€€€‚‚€€ƒƒƒƒ„„…„„„€€€€€€€€€€€€€€€€|xutvxwvsssttuuutuwzzzyxzzzzzzzz{|}€€€‚„……†ƒƒƒ„„………ˆˆ‡†…„ƒƒ€€~~~}}€}~~~~€€~~~~~~~~~~~~~€€€}yxyzxvvvutvy}~€€~€€€€€€€€€€€~~~~~~~~~~~~~~€€€€€~}|{||||}~€€€€€€€€~€€€€~~~~}}||{{{||||||~~€€€}|}~~|}}}~}~~~€€€€„ƒ‚‚ƒ„„„ƒ€ƒ‚„„‚€‚‚‚ƒƒ‚€‚ƒ€‚ƒ„„ƒ‚‚ƒ‚‚‚€€€‚ƒ„……„„‚ƒƒ‚‚‚„…€€€€€€€€€€€€€€€€}}~~~~~ƒ…ƒ~}‚…‰Š‡…‡‰‡ƒƒƒƒ„„„„ƒƒƒ‚‚€€€~~~}}}|||{{{{{{zz|||||||||||}}€‚‚ƒ‚‚ƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„‚ƒƒ„„„…………††††…„…††ƒƒ‚‹Œˆˆ‘Žƒ~}~}}~}}~}~}~}~}}}}}}}}~~~€€~~~~~~~~|||||||||||}}}}~ƒ‡Š‰ˆ‡‡†„‚‚‚„ƒ‚‚‚ƒƒƒƒ‚‚ƒƒƒƒƒƒƒƒƒƒƒ„„††‡ˆˆ‰‰‰‰‰‰ˆˆ‡‡……„„…†‡…€€€€€~~~~}~~€~~€€€€ƒ‚‚‚‚ƒ‚‚‚ƒ‚ƒƒ„…„€€€€€€‚€€€‚‚ƒ„„„„„„ƒƒ€€€€€€€€€€€€€€€€yuomoqssqrrssttustuwy{|}||}}~~€‚„……„„…†††‡ˆˆˆ„„……††‡‡‡‡‡†…„„ƒ€€€€~~~}€€€~~~€€€€~~~~~~~~~~~}}}~€€€}zxyzvuuvutw{}~~~~~~€€€€€€€€€€€€€€~}}{{{|||}€€€€€€€€€~~€~~~~~}}}||{{{{||||}}€€}||~~|||}}~~~~€‚‚‚„ƒ‚‚ƒ„ƒƒƒƒ‚‚ƒ„‚‚‚ƒƒ‚€€‚ƒ‚€€€‚ƒƒ‚‚‚‚‚‚‚€‚‚ƒ„„…„ƒƒ‚ƒ„„„„…†€€€€€€€€€€€€€€€€€…‹Œˆ†‡Š‰ŒŽ‰‚~ƒ‡‹‹‡„…†„ƒƒƒ‚‚‚‚~~~}}}||{{{zzz{{{zzyyyyxyyyyyy||||||||||||~‚€ƒƒƒƒƒ„ƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒ„„„„„„…………†„„…††„}€‰ŒˆˆŒ‘‘‘’’Š~~}}~~}~}~}~}}}}}}}}}}}}}}}}}{{{{{{{{{{||}}}}„‰ŠŠˆ‡†‡„‚‚ƒ„ƒ‚‚‚ƒƒƒƒ‚‚ƒƒƒ‚ƒ‚ƒƒƒƒƒ„…†‡ˆ‰‰‰‰‰‰‰‰ˆˆ‡††„„„…†‡…‚€€€€€€~}~€€€€}}~€‚€‚‚ƒ‚‚‚‚‚‚ƒƒ‚‚‚‚ƒƒƒ„…ƒ€€€€ƒƒ„„„„„ƒƒ‚€€€€€€€€€€€€€€€€~xqnpsvwtuuvwwxxvwwy|‚ƒ‚‚ƒ„…†‡ˆƒ„†ˆ‰‰ˆ‡ˆˆ‡‡‡‡‡‡„„……†††‡††……„„ƒƒ€€€€~~~}~~€€€€€€€€€€€~}}}}}}}}~}||}}~€€€~{yyzutuvutw|~~~~~~}}~~~~~~~~~~€€‚ƒƒ‚‚€€€€€€~}|{{{|}|}~€€€€€~}~~}~~~~~}}}||{{{{||||}~~€€~}{|~}|||}}}~~~‚‚‚ƒƒ‚ƒ„„ƒ~€‚‚‚ƒƒƒƒƒ‚‚ƒ„‚€€‚ƒ€€€€€€ƒƒ‚‚‚‚ƒƒ‚‚ƒƒ„„…„ƒ‚‚‚ƒ………„„…€€€€€€€€€€€€€€€€}†ŽŠ†‡‹ˆŒŒ†|vvz„ˆˆƒ€€~~}}||{{{{zzzyyxxxwwwvvzzyyxxwwwwwxxxxy||||||||||||~‚€‚ƒ„ƒƒ„„ƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„†………„„ƒƒ‚ƒƒƒ„„„„…„„…††„‚~{~†ˆ…‡Ž““’’“’‹„€}~}|€}~}~}~}~}}}}}}}}||{{zzzzzzzzzzzzzzzzzzzzz{{||}}}…‰‹Šˆ††‡…‚‚ƒ„ƒ‚‚ƒƒƒƒ‚‚‚‚‚‚‚‚‚ƒƒƒ„…†‡ˆŠ‰‰‰‰‰‰‰ˆˆ‡‡……„„…†‡†ƒ€€€€€€~}€‚ƒƒƒ}}~€‚‚ƒƒƒ‚€€‚ƒƒ‚‚‚ƒƒ„‚‚‚‚ƒƒƒ„…ƒ€€€€€€€‚ƒƒ„„„„„ƒƒ€€€€€€€€€€€€€€€€}{yy{||{€{wuwzwy{|~€ƒ……„ƒ…ˆ‰‡„‡‡‡‡‡‡‡‡††††††††ƒƒƒƒƒƒƒƒ……††…ƒ‚‚}}~€€~~~~~€€€€€€€~~}~}}~~}||||}}~~~~~€~}€~{yyzxwvuuwz}~€€€€€~~€€€€€‚ƒƒ‚‚‚‚„‚€€€}‚ƒ}€~~€}|{{|||{}}~~€€€€€€€~~~~~€~~~}||{{{{{{{{{{{{|}~€€~||~~|||}~~‚‚ƒƒ€~€‚ƒ„ƒƒ‚ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒƒƒ‚‚‚ƒ„„ƒ‚€€‚‚€€€‚ƒƒƒ‚ƒ‚€‚‚‚ƒƒƒ„„………†…„„„…‡ˆ€€€€€€€€€€€€€€€€~~€‚€}„€~{ywvvyy{~zv{zyxxy{|yyyyyyyyyyyyyyyy{{{zzzyyyyyxwvvv{||}}||{{|zy|‚‚€‚ƒƒƒƒ„„„ƒƒ‚‚‚‚ƒƒƒƒƒƒ„ƒƒƒƒƒƒƒƒ„„„„…………††………„„„€„„„‰‘’‘‰€~~~~~~~~~~}zzzyzz{{yyyyzyxwxyyzzzyyyyzzz{{{z{|}}}||€…‰‰‰Šˆ…„‚ƒƒƒƒ„ƒƒƒƒ‚‚‚„„„ƒƒƒƒ‚ƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰Š‰ˆ†…………ƒ„††…ƒ€~€€€€€€€€ƒƒ‚‚€‚‚‚‚‚‚‚‚‚ƒ„ƒ€‚€€€€‚€‚‚€~€‚ƒ„…„„„„„„„„€€€€€€€€€€€€€€€€|{{}€€}€€}yvwxxz{||~€‚…„„………ƒƒƒƒƒƒƒƒƒ„„„„„„„„‡‡‡‡‡‡‡‡‹‹Š‰‡…ƒ€~~~€~~~~€€€€€€€~~}}~~}~}}}}}~~~}~€~~€€|z{|yxvuux|€€€€€€€€€‚ƒƒ‚‚„‚€€€€€€€€€~~€}|z{|}||}}}~€€€€€€€€€€€~~~}||{{{{{{{{{{}}~€€€~||~~||}~~€‚‚ƒ‚‚€€€‚ƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒ‚‚‚ƒ„ƒ‚€‚€€€‚ƒ„ƒ‚‚ƒƒ‚‚‚‚‚ƒƒƒ‚ƒƒ„…†‡‡……†‡‡‡‡‡€€€€€€€€€€€€€€€€{{}}z|~|ywvutwwx{}~}||{zyxyz{||||||||zzzzzzzzxwwwvvuuuuuvvwww{||}|||{{|{z}‚ƒ€€‚ƒƒƒƒ„„ƒƒƒƒ‚‚‚‚‚ƒƒ„„„ƒƒƒƒƒƒƒƒƒƒƒƒ„„„„††……„„„„‚„„„‡‘’ˆ}||||{{{€€€€~}zyyyyzz{yxxyyzyxyyyzzzzz{{{{{{{|{{|}}}}|‚‡ŠŠ‰‰ˆ‡„ƒ‚„ƒƒƒƒƒƒƒƒ‚‚ƒƒƒƒƒƒƒƒƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰Š‰‡†…………ƒ„††…ƒ€~€€€€€€€€€€€‚ƒ„ƒ€‚€€€€‚€‚‚€€‚‚ƒ€€€€€€€€€€€€€€€€€€€€€€€€yz{‚‚€}‚ƒ|xvuyz{{{z|}‚„…†…„„„‡‡‡‡‡‡‡‡ˆˆˆˆˆˆˆˆ‰‰‰‰‰‰‰‰‹‹‹Šˆ†„‚€~€~~~~€€€€€€~~}}}~€€~}~~~~~~}}}~~€~}€~|}yxuuvz‚€€‚‚‚‚€€€€€‚€€‚‚ƒƒ‚„‚€€€€€~~€|{zz|}}}||}}~€€€€€€€€~~~}}|{{{{{{{{{{~~€‚‚ƒ€~||~~}}~~€€€‚‚ƒƒƒ€€€‚‚„„ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒ‚‚‚‚‚ƒƒ€€‚‚€€€‚ƒ„„‚‚ƒƒ‚‚‚‚‚‚‚ƒƒ‚‚ƒ„†‡‰‰†‡‰Š‹Š‰ˆ€€€€€€€€€€€€€€€€zy{}|y{}|{ywutswwwwxy||{zxxxxyxxxxxxxxwvwvwvwwvvvuutttuuvwwxxy{|||}||{|}|{~ƒ„€‚ƒƒƒƒ„ƒƒƒƒƒƒ‚‚‚ƒƒ„„…„ƒ„ƒ„ƒ„ƒ‚‚‚ƒƒƒƒ„††………„„„‚……ƒ„‡ŠŽ‘‘‡€€€€€€€€€~}|yyyxyyzzyxxxzzzyyyyzz{{{||||||||{|}~~~}}„ˆŒŠˆ‡ˆˆ…ƒ‚ƒƒƒƒ‚‚ƒ‚ƒƒƒƒ‚‚ƒƒ„„„„ƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰Š‰‡……„……ƒ„††…ƒ€~€€€€€€€€€€€€€€€€‚ƒ„ƒ€‚€€€€‚€‚€€€€€€€€€€€€€€€€€€€€€€€€xwx{~€€~‚ƒ„ƒzurwyzzyyyz~‚…‡†‡‰‹ŠŠŠŠŠŠŠŠ‰‰‰‰‰‰‰‰††††††††„……†…„ƒƒ~€~~€~~~€€€~~}}€€~€€€~~}}}~€~~~€€€€zxuux}„‚‚ƒƒƒ‚‚€€€‚‚‚€‚‚ƒ‚€€€ƒƒ‚‚€€‚€€‚€~~€|{z{|}}}{||}~~~€€€€€~€€~~~}||{{{{{{{{{{}~~€‚‚€~||}~}~~€‚‚‚‚‚€€€‚„…ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒ‚‚‚‚‚‚‚ƒƒ‚€‚€€€‚ƒ……ƒ‚ƒƒ‚‚ƒ…‡‰Š‹‰ŠŠŒŒŒ‹Š€€€€€€€€€€€€€€€€|z{~~{|||{zxvtsxyxwuux{zzyxwwvvttttttttvvvvvvvvyyyxxwwwzzzzyyyy{||}|||{}~}{~„…€‚ƒƒƒƒƒƒƒƒƒƒƒƒ‚ƒƒ„……„„„„„„„„‚‚ƒƒƒ„„„††……„„„„……‚ƒ‰Ž’‘‰„€}~~€‚ƒƒ€}}{zyxxxxyyzzyxyz{zzyyzz{{||}}||{{{{||}~~~~…‰Œ‹†…†ˆ†„‚‚ƒ„ƒƒ‚‚‚ƒƒƒƒ„ƒƒƒƒƒ„„„ƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰‰ˆ†…„„„„ƒ„††…ƒ€~€€€€€€€€€€€€€€‚‚ƒ„ƒ€‚€€€€€€~~€€€€€€€€€€€€€€€€|ywx{‚‚‚ƒ„„|uqtvxyyxyyz~‚„…†ˆ‹„„„„„„„„ƒƒƒƒƒƒƒƒ„„„„„„„„‚ƒƒƒ‚~~€~€~~~€€€~~~~€€€€~€€€~~~~~€~}~€€‚zxuvyƒ…‚‚ƒƒ„ƒ‚‚‚€‚‚‚‚€‚‚‚€~~‚€€€€€€~~€|{zz|}}}{||}~~~€€€€€~€€~~}}|{{{{{{{{{{{||}~€€€~||~~}~~€‚‚‚€€€‚€‚„…ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒƒƒ„ƒ„ƒƒƒƒ€€‚‚€€‚‚…†ƒ‚ƒƒƒ‚ƒ…‡‰Š‹Š‰‰ˆ‰‰‹‹€€€€€€€€€€€€€€€€}yy}~||~yzzywusruwxxvuwxzzzzzxxwyyyyyyyy{{{{{{{{{{zzzyyyzzzzyyyy{|||}||{}~}{~„…€‚ƒƒƒƒƒƒƒƒƒƒƒƒ‚‚ƒƒ„„…„„„„„„„„ƒ„„„…………†…†……„„„€€„…‚Š”Š…||}~‚~~~~||{xxxxxxyy|{zy{{{{zzzzz{|}|{{{{zzz||}~~~~„‡‹‰†‚„‡‡…ƒ‚ƒ„ƒ‚‚‚‚‚ƒƒ„„„„„ƒƒƒƒƒƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰‰ˆ†„„ƒ„„ƒ„††…ƒ€~€€€€€‚‚€€€€€‚‚ƒƒ‚ƒ„ƒ€‚€€€€€€‚€~~}~~~~~~~~€€€€€€€€€€€€€€€€‚~zy|€„†‚‚‚‚}vrrtwyxxxyxz}€‚ƒ€€€€‚‚‚‚‚‚‚‚„„„…„…„„………„ƒ€~€€~~~€€€€€~‚€~€~€€€€€}~€~~~€€zwuw{€ƒ„ƒƒƒ„ƒƒ‚‚‚€€€‚‚€€€}||€‚€€‚‚€€€~~€}|z{{|||||}}~€€€€€€€~~~~~}||{{{{{{{{{{{{|}~€€~||~~}}~~€€€€€€ƒƒ„ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒ„„„…„„ƒƒƒ‚€‚€€€‚‚…†ƒ‚ƒƒƒƒ‚‚‚‚‚‚‚ƒ…†ˆ‰‰‡‡………†ˆ‰€€€€€€€€€€€€€€€€{wwz|{z{wwxxwtrppsvxxxxx{||}}}{{||{|{|{|z{z{z{zzyyyxxwwwstuvxyzz{||}|||{|}|{~ƒ„€‚ƒƒƒƒ‚ƒƒƒƒƒƒ„‚‚ƒƒƒƒ„„ƒ„ƒ„ƒ„ƒ„„„„………††††……„„„„‚ƒ…ƒ‚ƒ’”ˆƒ€~~}|xxxxxyyz}|z{{}}}{{zzz{||zzzzzzzz{|}~~~}}„†‰Š†ƒƒ†‡†ƒƒƒ„ƒ‚‚ƒ‚ƒƒƒƒƒ„„„„ƒƒ‚‚ƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰ˆ‡†„ƒƒƒ„ƒ„††…ƒ€~€€‚‚€‚‚‚€€€‚‚‚‚‚‚ƒ„ƒ€‚€€€€€€€€‚‚€€€€€€€€€€€€€€€€€‚|{}€‚ƒ€€€€}xsqtwyyxxxuuwz~€€€‚‚ƒ‚ƒ‚ƒ‚„„„„„„„„„„„„„„„„…………„‚€~~~€€~~~€€€€€~€€€€€€€€€€€~€€€€€€~~€~}€€~}€ywvx}‚‚ƒƒƒƒƒ‚‚€€€€€€€€}|{{}~€‚€€‚‚‚€€~~€~}{{{|{z}}}~€€€€€€€€€€~~~~~~}}|{{{{{{{{{{||}~€€€~||~~||}~~€€~}~~€‚ƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒƒƒ„„„ƒƒ‚ƒ‚€€‚‚€€€‚„†ƒ€‚ƒƒƒƒ‚‚‚‚‚‚‚ƒ„…†‡‡…………††‡‡€€€€€€€€€€€€€€€€|xv{}|{{yz{{zxusrstwyyxwwxz|}||{yyyyyyyywwwwwwwwxxxwwvvvrstuwxyz{|||}||{{|{z}‚ƒ€€‚ƒƒƒƒ‚‚ƒƒƒƒ„„„ƒƒƒƒƒ‚‚ƒƒƒƒƒƒƒƒƒƒƒ„„„……††………„„„ƒ}„„„…’“†‚€€€€€€€€~}|{yxxxxyyz|{z{}€€|{zyyz{{zzzzzzzz{{|}}}}|„…ˆŠ‰…„…ˆ†„ƒ„„ƒ‚ƒƒƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰ˆ‡…„ƒ‚ƒƒƒ„††…ƒ€~€€‚‚ƒ€€‚‚€€€‚€‚ƒ„ƒ€‚€€€€€€€€€ƒƒƒƒƒƒƒƒ€€€€€€€€€€€€€€€€}{{}~}~~}yuruxyxwwwrrtyƒ„ƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ€‚ƒƒƒƒ‚€~}}‚€~~~€€€€€~~€€€€€€€~}~€€‚‚}~€~~€€~}}ywvy~€ƒƒƒƒƒ‚€€€€€€€€€€€}{zz{}€€€‚€‚‚€€~~€~|{{{zz}}~~€€€€€€€~€€~~~}||{{{{{{{{{{~~€‚‚€~||~~|||}~~€~}|}~~~€ƒ„ƒ‚‚ƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚ƒ‚ƒƒ„ƒƒ‚ƒ‚€€‚‚€€€‚€„†ƒ€€‚ƒ‚‚‚ƒƒƒ„„…………†‡‰‰ˆ‡‡€€€€€€€€€€€€€€€€zy}€}~}~€€}zxvuuvxyvtrtvyzzyyyyyyyyyyxxxxxxxxyyxxwwwvwwwwxxxx{||}}||{{|zy|‚‚€‚ƒƒƒƒ‚‚‚ƒƒ„„„„„„ƒƒ‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚ƒƒƒ„„††……„„„„„}€„…†‡’‘‹…ƒ‚‚€€€€€€€~|{yxyyxxyyzz{zz{~€‚ƒ}|zyyyz{yzzzz{{{z{|}}}||„…ˆŒ‹‡……ˆ†„ƒ„„ƒ‚„„ƒƒƒ‚‚‚‚ƒƒƒƒ„„„ƒ„……‡‡ˆ‰‰‰‰‰‰‰‰‰ˆ‡…„ƒ‚ƒƒƒ„††…ƒ€~€€‚ƒƒ~€‚‚ƒ€€€€€‚‚‚‚‚‚‚‚‚ƒ„ƒ€‚€€€€€€€‚‚‚‚€€€€€€€€€€€€€€€€{z{|~}€€€€{||{zyxwutuvy}„‚‚‚ƒ„‚ƒƒƒƒƒ‚ƒƒƒƒƒ‚‚‚ƒƒ„„„ƒ‚‚€„ƒ„……„‚}€€€~~~€€€~~€€}}}}~~€€€€~~~~€‚‚€}{{zvwƒ‚€€€‚ƒƒƒƒ‚€€€€€€€~‚}zyz~€€‚‚€€€~~~}}}|||||z{|}~€€€€€€}~~~}}}}}}}}{{{{{{{{|}~€€€€~{z|~~~~}||~~|~~~~~~~~‚ƒ„…‚ƒƒƒƒ‚‚ƒ„„‚‚ƒƒ‚€‚ƒƒƒ‚‚‚‚‚€€€€€‚„„‚‚ƒ‚ƒ‚‚‚ƒ‚‚ƒ†‰ŠŠˆ††ˆ‰ˆŠ‰…€€€€€€€€€€€€€€€€{ywwy{||{zyxxyyy{zzzzyyyxvtuwyyxwxyyyyxwwwwwwwwwwwwwwwwwwvvvwxyy{}~|{{ywwuttvz~€‚ƒƒƒ‚‚‚‚ƒƒ„„„††…„„ƒ‚‚ƒƒƒ„„„„„‚‚‚‚ƒƒ„…………„„ƒƒƒ‚€~‚…‡‡”’‰‚€€€~~}~~~~}|yyxxwwwxz{{{ƒ……}zzzzyy{{{zzzzzz{||}}||€…‹Œ‰†…Š‰‡„ƒƒƒƒƒƒ‚‚‚„„…„ƒ‚‚ƒ††††††††ŠŠŠŠŠŠŠŠˆ‡†„ƒƒƒ„‚…ˆ‡ƒ|{~~ƒƒ~€€€€€€€‚‚‚‚‚‚‚‚ƒƒ‚€‚‚‚‚‚‚ƒ‚€‚‚‚€€~€€€€€€€€€€€€€€€€€€€€€{zyz{}}}€€€€€€€€~~~~|{zyvvvwz}€‚‚ƒ‚‚‚ƒ‚ƒƒƒƒƒ‚ƒƒƒƒƒƒ‚‚ƒƒ„„ƒƒ‚‚€€„„„……„‚}€€€~~~€€~€€‚€~€€€€€~~~~~€‚‚€}||{wx„‚‚‚‚ƒƒƒƒƒ‚‚‚‚‚‚€|zz{~€‚‚€€‚‚€€€~~}}}}||||z{|}~€€€€€€~~~~~}}}}}}}}{{{{{{{{|}~€€€€€~{{|~~~}|}~}|~~~~~~~~‚ƒ„…ƒƒ„„„ƒƒ‚ƒ…„ƒ‚„ƒ‚‚‚ƒƒƒƒ‚‚‚‚‚€€€‚‚‚„…ƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒ†‰ŠŠˆ‹‰‰ˆ‡ˆˆ…€€€€€€€€€€€€€€€€}{xxy{||zyxxwxxxzzzzzzzzyxvwyzzyvwwxxxwvwwwwwwwwwwwwwwwwvvvvwxyy{}~|{{ywwvttvz~€‚ƒƒƒ‚ƒƒƒƒƒƒƒƒ………„„ƒƒƒ‚‚‚‚ƒƒƒƒ‚‚‚‚ƒƒ„……………„„„„ƒ‚…‡‡”‘ˆ‚€€ƒ‚€~~}€€€€~}zyxwwxyyz{{|~ƒƒ‚|{yxyyyx{{{{{{{{z{{}}}||~ƒˆ‹Šˆ…„Š‰‡…„ƒ‚ƒƒƒƒƒƒ‚‚‚ƒ„„„ƒ‚ƒƒ††††††††‰‰‰‰‰‰‰‰ˆ‡…„ƒƒƒ„ƒ…‡‡ƒ||~}~€}€€€€€€€€‚‚‚‚‚‚€‚ƒƒ‚€‚€‚€‚‚ƒ‚€€‚‚€€€€~€€€€€€€€€€€€€€€€€€€€€€€€|yxz{}}€€€€€€€€€€€}|{xxxxz|€ƒƒƒ‚€‚‚‚ƒƒƒƒ‚ƒƒƒƒƒƒƒƒƒƒ„„„ƒ‚‚€~„„…††„‚~€€€~~~~€€€€€€‚‚‚‚€€€€€€€€~~~~~}}}}€‚‚€}||{xyƒ‚ƒƒ„ƒ„ƒ‚‚‚‚‚€€€€€|z{}€‚‚‚€‚‚‚€€€~~~~}}||{z{|}~€€€€€€€€€~~~}}}}}}}}||||||||||~€€€€€}{{}~~}~}}}~~~}‚ƒ„…„„„„„„ƒƒ„……ƒƒ„„ƒƒ‚‚‚‚ƒƒ„‚‚‚‚€‚‚ƒ……‚€‚‚‚‚ƒ‚‚‚‚‚ƒ…‰ŠŠˆ‹ŠŠ‰ˆ‹‹ˆ€€€€€€€€€€€€€€€€~|yxzz{{zzyxwwxxyyzz{{||{yxxz{{zwwxxxwwwwwwwwwwwwwwwwwwwwvvvwxyy{}~|{{ywwuttvz~€‚ƒƒƒ‚ƒƒƒƒƒƒƒƒ„„„„„„„„‚‚‚‚ƒ‚‚‚ƒƒ„„……………„„„ƒ‚…†‡Œ‘‡‚€€€€~|{z|zyxxy{|y{{|~~{zyyyzyyzz{{{{||z{||}||||†‰ˆ†„„‰ˆ‡…„„„„ƒƒƒƒƒƒ‚‚‚ƒ„ƒƒƒ„„††††††††ˆˆˆˆˆˆˆˆˆ‡†„„ƒƒ„ƒ…‡†‚}|€€€~|~~~~~~~~€€€€€€€€€€‚‚‚‚€€‚ƒƒ‚€‚€€€€‚ƒ‚€€‚€€€~€€~~€€€€€€€€€€€€€€€€ƒ{yz}€~~}}||}~~~~}|{zzz{|~~‚ƒƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒƒ‚‚€€„„…††…‚€~€€€~~~~€€€€€‚‚‚€€€€€~~}}}}||}‚‚€}|||zz~€€€‚ƒƒƒ‚‚€€€€€€€|{}€€‚‚€€€€~~||{{z{|}~€€€€€€€€€€~~}~~}}}}}}}}|||||||||}~€€€€}{|}~||}|~~€€€€€€€€‚ƒ„…„„„„„„ƒƒ„……„ƒ……„„ƒ‚‚‚ƒ„…‚‚‚‚€‚‚ƒƒ……ƒ€€€‚ƒƒƒ‚‚‚‚ƒ†‰‹Šˆ‰‰‹ŒŒŽŽŠ€€€€€€€€€€€€€€€€~|yxxyyy{{zxxxxxxxyz{|}~{zyyyz{{xxxyxyxxwwwwwwwwwwwwwwwvvvvwwxyy{}~|{{ywwvttvz~€‚ƒƒƒ‚„„„ƒƒ‚‚‚‚ƒƒƒ„„„„‚‚‚‚‚ƒƒ‚‚ƒƒ„„………………„…„‚…††ŠŽ‹…‚€‚~€‚ƒ~~}{yxzywvvxz{yz{|}~{{zzz{{{{yzzzz{{{z{{}|}|||€…ˆ‡†……‡†…„ƒ„„„ƒƒƒƒƒƒƒƒ‚ƒ„ƒƒ„…††††††††‡‡‡ˆ‡ˆ‡‡ˆ‡…„ƒƒƒ„„†‡…‚~}}€~}}}}}}}}}€€€€€€€€€€‚€€‚ƒƒ‚€‚€€€‚ƒ‚€€‚€€€€~€~~~~~~}}€€€€€€€€€€€€€€€€„€{xz|~}}{zyxxzz||~}}}|||{||}}ƒ‚‚‚ƒ‚‚‚‚‚ƒƒƒƒƒƒƒ„ƒ„ƒƒƒ„„„ƒ‚‚€~„……††…ƒ€~€€€~~~~~~€€€€€€€€€~€€}}}}}||||‚€~}}|{{}~~~€€‚‚‚‚‚‚€€€€€~~€||~€ƒ‚‚€‚‚‚€€€~}||{z{|}~€€€€€€€~~~~}}}}}}}}||||||||||~€€€€~}|}~{{{}}€‚€€€€€€€€‚ƒ„…ƒƒ„„„ƒƒ‚„……„„……„„ƒ‚‚‚ƒ„…‚‚‚‚€‚‚ƒ……‚€€€‚‚ƒƒ‚‚‚‚ƒ…‰ŠŠˆŠŠ‹Œ‹Œˆ€€€€€€€€€€€€€€€€}zxvwxyx{zywwvvwwxxz{|}~zzzyxyzzxxxxxxxxwwwwwwwwwwwwvvvvwvvvwxyy{}~|{{ywwuttvz~€‚ƒƒƒ‚„„„ƒƒ‚‚‚‚‚‚ƒƒ„„„‚‚ƒƒƒƒ„„ƒƒƒ‚ƒƒ„„„„…„…„……„‚‚„……†ˆ†‚ƒ‚‚†„‚~~€~|{zyxwwxz{yz{{}~}zyyzzzzzzyyyyzzzzz{||}||||€…‡ˆ‡ˆˆ…„„‚‚‚‚ƒƒƒƒƒƒƒƒƒ‚ƒƒƒƒ……††††††††‡‡‡‡‡‡‡‡ˆ‡†„„ƒ„„…†‡…~}~‚‚€~~}}}}}}}}€€€€€€€€€€€€€€€€€‚ƒƒ‚€‚€€€‚ƒ‚€‚‚€€€~€€~~~~~}}}€€€€€€€€€€€€€€€€~yxy|€~~}|{zyyz{|}~~~~}}}}}}}}€‚ƒ‚‚‚ƒ‚‚‚‚‚ƒƒ„ƒƒƒ„ƒ„„„ƒƒƒ„ƒƒ‚‚€€„…†‡‡†ƒ€€€~~~€€€€€€€€€€~~~€}~}}|}|||~‚€~}~}|}}~~~|}~€‚‚‚ƒƒƒ‚‚‚‚‚€€€‚€~}„ƒ‚ƒƒ‚€€€}|{{z{|}~€€€€€€€~~~~}}}}}}}}|||||||||}~€€€€€}}|~}{z|}~~€ƒ€€€€€€€€‚ƒ„…ƒƒ„„ƒƒ‚‚ƒ……„„……„ƒ‚‚‚‚ƒƒ„‚‚‚‚€€€€€‚‚„„‚€€€‚‚ƒƒƒ‚‚ƒ†‰‹Šˆ‰ˆˆˆˆ‹Œ‰€€€€€€€€€€€€€€€€|zwwwyyyzywvuuuuwwxyz{|}z{{zxxy{xxwwwwwxwwwwwwwwwwwwvvvvvvvwwxyy{}~|{{ywwvttvz~€‚ƒƒƒ‚ƒƒƒƒƒƒƒƒ‚‚‚ƒƒƒ„„ƒƒ„„„„……ƒƒƒ‚ƒƒƒ„ƒ„ƒ„„„„„…‚€€‚„……ƒ„ƒƒ‚ƒ…„‚€€€~}|zzyyxxyyyzzz{}|zwxyyyxxyzzyzyzyzz{{}}}||{„‡‡ˆˆ‰†…„ƒ‚‚‚ƒƒƒƒƒƒ‚ƒ„ƒƒ„…††††††††‡‡‡‡‡‡‡‡ˆ‡…„ƒƒƒ„†††„€~~~€€€~}}~~~~~~~~~€€€€€€€€€€€€€€€€‚‚ƒƒ‚€‚€€€€‚ƒ‚€‚‚‚€€€€~€€~~~~~€€€€€€€€€€€€€€€€}zy{}€€€€€€€}}}~~}}}€‚ƒƒƒ‚‚‚ƒ‚‚‚‚‚ƒ„ƒƒƒƒ„„„„ƒƒ„„„ƒ‚‚€~„…†‡‡†ƒ€€€~~€€€€€€€€€€€€€€~~~€€~~~}}}}|{~‚€~~~~~€}}€‚‚€€€€€€€€~€‚‚~€€ƒ‚‚‚€€€€€~|{{z{|}~€€€€~}}}}}}}}}}}}}}}}}}||~€€€€}|}~€}z|}~~€‚€€€€€€€€‚ƒ„…„„„„„„ƒƒƒ„…ƒ„……„‚‚ƒƒƒƒ‚‚‚‚‚€€ƒ„€€‚‚ƒƒ‚‚ƒ…‰ŠŠˆ‰ˆˆˆˆŠ‹ˆ€€€€€€€€€€€€€€€€|zxwy{||{{yxwvvvvwwxyz{{||}{yyz|xwwvwwxxwwwwwwwwwwwwvvuuwvvvwxyy{}~|{{ywwuttvz~€‚ƒƒƒ‚ƒƒƒƒƒƒƒƒ‚‚‚ƒƒƒƒƒƒƒ„„„„……„ƒƒƒ‚ƒƒƒ‚‚ƒƒƒƒ„„…ƒ€€‚„……ƒ‚ƒƒ‚~}|zz{zzxwvzzzyz}}{z|}}|{{{|{{{zzzzz{||}|||z}„…†ˆ‰Šˆ‡…ƒ‚‚‚‚‚‚ƒƒƒƒƒ‚ƒ„ƒƒƒ„…††††††††ˆˆˆ‡ˆ‡ˆˆˆ‡†„ƒƒƒ„†‡†ƒ€}~€~}~€€€€€€€€€€€€€€‚‚‚‚‚ƒƒ‚€‚€‚€‚‚ƒ‚€€‚‚€€€€€~€€€~€€€€€€€€€€€€€€€€}{{}€‚‚‚‚ƒ„„……………„ƒ‚€}}}~~~~}‚ƒ„‚ƒ‚‚‚‚ƒ„ƒƒƒ„„„„„ƒƒ„„ƒƒ‚‚€€„††‡‡†„€€€~~€€‚€€€€€€€~~~~€€~~~~}}}}{~‚€~~~€€€€‚~~~}}}||{{|||}}}}€ƒƒ€€}|{|~€€€‚‚ƒƒ€€€€‚€~|{{z{|}~€€€€€~}|}}}}}}}}}}}}}}}}|}~€€€€}||~€€}z}€€~}€‚ƒ„…………………„„ƒ„„ƒƒ……„€‚ƒƒƒ‚‚‚‚‚~~€€€‚ƒ€‚‚‚ƒ„‚‚ƒ†‰ŠŠˆ‹‹Œ‹ŠŠ‰…€€€€€€€€€€€€€€€€}{yy{}~~~|{zyyywwwxxyyz}~~}zz{}yyxwwxyywwwwwwwwwwwvvvuuvvvvwxyy{}~|{{ywwuttvz~€‚ƒƒƒ‚‚‚‚ƒƒ„„„ƒƒƒƒƒ‚‚‚ƒƒƒƒ„„„„„ƒƒƒ‚ƒƒƒ‚‚‚‚ƒƒƒ„…ƒ€€‚„„„ƒ‚~ƒƒ€‚‚‚‚‚€€‚‚€€‚‚ƒƒ‚€}|zzyxz}~}‚‚€}}||{{zzz{||}}||x{‚ƒ„†ˆŒ‰‡…„ƒƒ‚‚‚ƒƒƒƒƒ‚ƒ„„ƒƒƒ„††††††††ˆˆˆˆˆˆˆˆˆ‡…„ƒƒƒ„†‡†ƒ}~€‚~€€€€€€€€€€€€€ƒƒƒ‚‚€€‚ƒƒ‚€‚‚‚‚‚‚ƒ‚€€‚€€€~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~}}|}}~€‚‚‚‚ƒ„‚‚‚ƒƒƒ„„„ƒƒƒƒƒ„…„ƒƒƒƒƒƒ‚€~€€€‚†††‡‡†„‚‚€~~~~€€€€€€€€€€~~~~}}~~~~}~~€€€€€€€~~~~~~~xxy|~}zwxyz||}}}~€‚|{{}}}}}}}|||}~€‚‚‚‚‚‚€€€€~~}||}}}~~~€€€€€€€€~}}}||||||||{|||}}}~€€€€€|{~€~}~~~~~~~~~‚€€ƒ„‚€ƒ…†„„………………ƒƒƒ„„„„„ƒƒ‚‚‚‚ƒƒ‚‚€€‚‚€€€‚ƒ„…ƒ‚ƒƒ‚‚‚‚‚‚„†ˆ‰‰‰ŠŒŽŽ‹‰‰Š€€€€€€€€€€€€€€€€~~~~}}}}||}|}||||||||||||||}|{{zzxxxyyxwxxxwwwwwwvvvwwwvvvvvwwxxz|}|}}zvutstvy}€‚ƒƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„ƒ‚…€€‚‚‚„…†‡‡‡†…††…„‚€}{z{~€‚‚…„‚€€€~}}||{z{{||{zz{{|~€ƒ…‡††…„„ƒ‚‚ƒ‚ƒ‚ƒ‚ƒ‚‚‚‚ƒƒ„„„……††‡‡‡‡…‡‰ˆ…„†ˆ††………„„„„††~€€~~}|}}~‚‚€~}€€€€€€€€€‚‚‚€‚„…‚€€‚‚‚€€€~}~~€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~}}}}~~‚ƒ‚‚‚ƒ‚‚‚ƒƒƒƒƒƒƒ‚‚‚ƒ„„„ƒ‚‚ƒƒ‚‚€€€€‚††‡‡‡†„‚‚€~~~~€€€€€€€€€€~~~~~~~~}~€€€€€€~~€€~~~~{zz|}}zxyz{|}}||}~~~{zyz}~~€‚‚‚‚€€€€~~€€€€}}}~~~~~€€€€€~~}||||||||{|||}}}~€€€€€|{~€~}~~~~~~~~~€€‚ƒ‚ƒ…†„„………………ƒƒƒ„„„„„ƒƒ‚‚‚‚ƒƒ‚‚€€‚‚€€€€‚‚„…ƒ‚‚‚‚‚‚‚‚„…ˆ‰ŠŠ‹‹ŠŠ‹ŒŒŠˆ€€€€€€€€€€€€€€€€~~~~}}}}|||||||||||||||||||||{zzyyxxxyxwxxxwwwwwwvvvwwwvvvvvvwxxz||||}zvutstvy}€‚ƒƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„ƒ‚„€€‚‚‚ƒ„…†††…………„ƒ‚€~}}‚………€€€‚ƒ€€€€~~}|{{{}~ƒ„……„„ƒƒ‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚‚‚ƒƒ„„„………††‡‡‡…‡ˆˆ……†ˆ††……„„„„„††~€€€€€€€€€€~}|||}}~‚€~~€€€€€€€€€€€€€€€€€€‚„…‚€€‚‚‚€€€€€~~€~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~}}}}~€‚‚ƒ‚‚‚ƒƒƒƒƒ‚ƒ‚ƒƒƒ‚ƒƒ„„„ƒ‚‚ƒƒ‚€€€‚…††‡‡†„‚‚€~~~€€€€€€€€€~~~}~~~~~~€€€€€€€€€€€~~~~}||}}{z{||}}}||z{{{zz|~€€‚‚ƒƒ„ƒƒ‚€€~~~~}€€€€€€}}~~~~~€€€~~||}|}|}|{{||}}}}€€€€€|{~€~}~~~~~~~~~€‚‚‚ƒ…†„„„……………ƒƒ„„„„„„ƒƒƒ‚‚‚ƒƒ‚‚€€‚‚€€€€€‚ƒ„ƒ‚‚‚‚‚ƒ…‡‰‰ŠŠ‹‰‰‹Ž‹ˆ€€€€€€€€€€€€€€€€~~~}}}|{{{{{{{{||||||||{||||{zzyxxxxxxwxxxwwwwwwvvvwwwvvvvvwwxxz|}|}}zvutttvz}€€‚ƒƒƒƒ‚‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„ƒ‚„‚‚‚ƒ„…………ƒƒƒƒ‚‚‚‚„…„ƒ€€€€€€€€€€€‚‚‚€}|zzz{|~€ƒƒƒƒƒƒƒƒƒ‚ƒ‚ƒ‚ƒ‚‚‚ƒƒƒƒ„„…………††‡‡…‡ˆ‡†…†ˆ………„„„„„„††~€€€€€€€€€€}}|{{{|}€€~€€€€€€€€€€€€€‚€€€‚„…€€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~}}}}~€€‚‚‚‚‚‚‚‚„„ƒƒ‚‚‚‚ƒƒƒ„ƒ„„„„„‚ƒƒ„ƒ‚€€…†‡‡ˆ†„‚€~~€€€€€€€€€€}~~~~~€€€€€€€€€€€€€~~€~}}}||}}~~~}||yz{||‚…€€€€€€€€€€€€€€€€€‚‚~~~€~€€€€~~}}}}}}}}{||}|}}~€€€€€|{~€~}~~~~~~~~~}‚ƒ‚€‚‚„„…ƒ„„…………„„„„„„„„„ƒƒ‚ƒ‚ƒƒƒ‚‚€€‚‚€€€€€ƒ„‚€‚ƒ‚‚€‚„†‡‡‡‡ˆ‰ŠŠ‰‰ˆ€€€€€€€€€€€€€€€€~~}}||z{z{z{z{||||||||{{|||{zyxxxxxxwwxxxwwwwwwvvvwwvvvvvvvwxxz||||}zvuuttvz~€€‚ƒƒƒƒ‚‚‚ƒƒƒƒƒ„„ƒ„ƒƒƒƒ„„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ……„ƒ€€‚ƒ„……‚‚‚‚‚‚‚‚„ƒ‚ƒƒ~{€€€€€€€€‚‚‚}{zyyyyz|}~‚‚‚ƒ‚ƒƒƒ‚ƒ‚ƒ‚ƒ‚ƒ‚‚‚ƒƒ„„„„…………††‡†‡‡‡†††‡……„…„„ƒƒ„††~€€}}{{z{{|~€€€€€€€€€€€€€€€€€€‚€€€‚‚……‚€€€‚‚€€‚€€€€€€€€ƒƒ‚€€€€€€€€€€€€€€€€€€€€€€€€~~}}}}}}}~€‚‚ƒƒƒ„ƒƒƒƒ‚‚‚„„„„……………„„ƒ„„„ƒ€€€…††‡‡†„‚€~€€€€€€€€€}}~~~~~€€€€€€€€€€€€€€€€‚€}}}~~~~~}|}~}~ƒ…€€€€€€€€€€€€‚€€€€€€~~~€€€€€€€~~~~}}}}}}}}{{||}}}}€€€€€|{~€~}~}~€‚‚€€€‚‚ƒ„„„ƒƒ„………„„„„„„„„„„„ƒƒ‚ƒƒƒ„‚‚€€‚‚€€€€€€ƒ„‚€‚‚ƒ‚€‚„†‡‡‡„†ˆˆ†……†€€€€€€€€€€€€€€€€€~}||{{z{z{z{z||||||||{{{|{zyyxxxxxwwwxxxwwwwwwvvvwwwvvvvvwwxxz|}|}}zvvutuwz~€€‚ƒƒƒƒ‚‚ƒ‚ƒƒƒƒ„„„ƒƒƒƒ‚„„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ……„~ƒ~€‚ƒ„„‚‚‚‚ƒƒ‚‚‚‚|z{|}~€‚ƒƒƒ„‚‚‚}{zyyyyz{}~‚‚‚‚ƒƒƒƒƒ‚ƒ‚ƒ‚ƒ‚‚‚ƒƒƒƒ„„„„…………††‡†††‡‡‡†…„„„„ƒƒƒ„††~€€~}|{{z{{|}€€€€€€€€€€€€€‚€€€‚‚„…€€€‚€‚€€€€€€€€€€€ƒ‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~}}}}}}}}~€‚ƒƒƒƒƒƒƒƒ‚ƒ‚‚ƒ„„…………„…„ƒ„„„ƒƒ€€„†‡‡ˆ†„‚€€~~€~€€€€€€€€~~~|}~~€€€€€€€€€€€€~~~€€€€€€||}~€‚ƒƒ……„ƒ‚€€€€€}~~~~€€€€€~~~~~~~}~}~}~{||||}}~€€€€€|{~€~}~~~€€€€‚ƒ„„„„ƒƒ„„…„„„„„„„„„ƒƒ„„ƒƒƒƒƒ„‚‚€€‚‚€€€€ƒ„ƒ‚‚‚‚‚ƒ„‡ˆŠŠŠ‡ˆ‰‰‰ˆ‡†€€€€€€€€€€€€€€€€€€~}|{{{{{{{{{{||||||||z{{{{zyywxxxwwwwxxxwwwwwwvvvwwwvvvvvvwxxz||||}zvvuuuw{~€‚ƒƒƒƒ‚‚‚ƒƒƒƒƒ„„„„ƒƒ‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ††„~€‚‚‚‚€€‚ƒ„‚‚‚‚€€‚‚||}~~~~~{|}€‚ƒ„‚‚‚€}|yyyz{}€ƒƒƒƒƒƒƒƒ‚ƒ‚ƒ‚ƒ‚ƒ‚‚‚ƒƒ„„„„„„…………†ˆ†…†‡ˆ‡…„„„„ƒƒƒƒ„††~€€€€€€€€€€~}|{{{{{|}€€€€€€€€€€€€€€‚‚……‚€€€‚€‚€€€€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€€€~~~~}}}}}}}~€‚ƒ‚ƒƒƒ‚‚‚ƒƒƒƒƒƒ‚ƒ„„„„„ƒ„ƒƒ‚ƒƒƒ‚€€€€~„††‡‡†„‚€~~€€~~€€€€€€€€~~~~|}~~~~€€€€€€€€‚‚€€€€}‚‚€~~~~€€}{{{}~~€‚„„ƒƒƒ‚‚‚‚‚€€€‚‚€~}|}}}}~~~~€€€~~~~~~~~~~{|||}}}}€€€€€|{~€~}~~~€€‚ƒ…„„ƒƒƒ„„„„„„„„„„„ƒƒƒ„„ƒƒƒƒ„„‚‚€€‚‚€„…ƒ‚‚‚‚‚‚‚‚ƒ†‡‰Š‹‹‰‰Š‹‹‹Š‰€€€€€€€€€€€€€€€€€~}{{z||||||||||||||||zz{{{zyxwxyxwvwwxxxwwwwwwvvvwwwvvvvvwwxxz|}|}}zvvvuuw{~€‚ƒƒƒƒ‚‚‚‚ƒƒƒƒ…„„ƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ††„~~€‚‚‚‚€€€€€‚ƒ‚‚€~ƒƒƒ‚€}|z{{{||}}~~}|yyz{}ƒ……„„ƒƒ‚‚ƒ‚ƒ‚ƒ‚ƒ‚‚‚ƒƒƒƒ„„ƒƒ„„…………ˆ†……ˆˆ‡…„„„ƒƒƒƒ‚„††~€€€€€€€€€€€~||{{|z{}€€€€€€€€€€€€€€€€€‚„…‚€€€‚ƒ€€€€€€€€€~~€€~€€€€€€€€€€€€€€€€€€€€€€€€€~~~~}}}}}}}~€‚ƒƒ‚‚‚ƒ‚‚‚‚ƒƒƒƒ„‚‚ƒƒƒ‚‚ƒ‚‚‚‚‚‚€€€„††‡‡†„‚€~~€€~~~€€€€€€}}~~~~|}~~~€€€€€€‚‚‚€€€{~‚ƒ~}~‚‚‚‚|~€~}}}}}}}}||~~€‚ƒ„„…„„„ƒƒƒ‚ƒ‚€~|{z||}}}~~~~~€€€€€€~~~~~~~~{|||}}}~€€€€€|{~€~}~€~}}‚€‚„…„ƒƒƒƒ„„„„„„„„„„„ƒƒƒ„„ƒƒƒƒ„„‚‚€€‚‚€„…ƒ‚ƒƒ‚‚‚‚‚‚„†ˆ‰‰‰†‡ˆŠŠŠŠ‰€€€€€€€€€€€€€€€€€~}|zz|||}|}||||||||||zz{{{zyxwxxxwvwxxxxwwwwwwvvvwwwvvvvvvwxxz|}|}}zvwvuux{€‚ƒƒƒƒ‚‚‚ƒƒƒƒƒ……„„ƒ‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ††„~~€‚‚‚‚€€‚€€‚‚‚‚‚€€}}‚‚‚ƒ„……„ƒ‚‚€~|zxxz{{||{zzyyz|~ƒ…††……„ƒ‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚‚‚ƒƒ„„„ƒƒ„„„………ˆ†„…ˆ‰‡…„„„ƒƒƒ‚‚„††~€€€~}||||yz|~€€€€€€€€€€€€€€€€€€‚‚‚€‚„…‚€€€‚ƒ€€€€€€€€~~}~‚€€€€€€€€€€€€€€€€€€€€€€€€€~~}}}~€€€}{{z{{{}€‚‚‚ƒƒƒ‚‚‚ƒƒ‚ƒƒƒ„ƒƒ‚‚„…†…„ƒƒ„„ƒƒ„„ƒ‚‚€€€€ƒ‰ˆ‡…ƒ€~~~€€€€~~~~~‚€~}}~~~~~~~~~~~~€€€€€‚‚‚‚€}}~~€~~~€€~}|||~~~|||~~}~~€‚‚€€€~|zy{|}~€€€€€}~€€€€~~~~}|}}}}~~~~~~€€€~}}€}~~~€€€ƒ~~ƒ†ƒ‚„ƒƒƒ„„……†…„„„„„„„ƒƒ„…†………ƒ€€€€„…ƒ‚€ƒƒƒƒ‚‚‚‚€ƒ„„„ƒ†„ƒƒ„……„€€€€€€€€€€€€€€€€€€~}}|}}}}}}}}}}{{|{|{|{yyxxxxxxyxxwwwwwxxwwwwwwvwwxxwvuwvuvwwxzz}~}}|zxuttvy}€‚‚‚‚ƒ„„ƒ‚‚‚„„„„„„„„‚‚ƒƒ„„ƒ„ƒ‚‚„ƒƒ„……„ƒƒƒ……„€ƒƒƒ‚‚€€€€€€€€‚ƒ……ƒ€€€€€€‚‚‚ƒƒ„„ƒƒ‚}~~~~}|{zzz{~…‡†††…„„ƒƒƒƒƒƒƒƒƒƒ„ƒƒ‚ƒƒ„…„„„„„„……††††††††„ƒ„ƒ„ƒ„ƒ‡†„€||‚€~~€€~}|{~}||}ƒ€€€€€€~~€€‚€€€‚ƒ€‚ƒ‚€‚‚€‚€€€€€€€‚‚‚‚ƒƒƒƒƒƒƒƒ€€€€€€€€€€€€€€€€€€€~~}~~~~~}|{{{|}~€ƒƒ‚‚‚ƒ‚ƒ‚ƒ‚‚‚ƒƒƒƒƒ‚‚‚„……„ƒƒƒ„„ƒƒ„„ƒ‚‚€€€€ƒˆ‡†„‚€~~~~~~€€~~~~~~~~~~~€€€€€€‚€~~~~~~~~€€€€€€€€€€}~~~}|}}|}|}}}}~~€‚€€€~|zz{|}€€€~€€€€€€~~~~}}}}}}~~~~~€€€€~}}€}~~~€€€~~€ƒ~ƒ†…ƒ„„„„ƒƒƒ„„…„„„„„……ƒ‚‚ƒ„„„„…ƒ€€€€„…ƒ‚€‚ƒ„ƒ‚‚ƒ€‚ƒ„„„„…„„„……„„€€€€€€€€€€€€€€€€€~}|||}|}|}|}|}{{{{{{{{yyxxxxxxxxwwwwwwxxwwvwwwvwwxwwvuwvuwwwxzz}~}}}{xvvuwz~€€‚‚ƒ‚‚ƒ„„„‚‚‚„„„„„„„„‚‚‚ƒƒƒƒƒƒ„ƒ‚‚ƒƒƒ„……„ƒƒƒ……„€ƒƒƒ‚‚€€€€€€€€ƒ……ƒ‚‚ƒƒƒ……„ƒƒƒƒƒ€~}|zzz{~…‡………„ƒƒƒ‚ƒƒƒƒƒƒƒƒƒƒ‚‚‚ƒƒ„„„ƒƒ„„……††…†…†…†ƒ„ƒ„ƒ„ƒ„††„€}}‚€~€€€€~}||||{{|~€€€€€€€€€‚€€€ƒƒ€‚‚‚€‚‚€‚€€€€€€€€‚‚‚ƒƒƒƒƒƒƒƒ€€€€€€€€€€€€€€€€€€€~~~~}}}~}}|||}}€‚ƒƒƒƒ‚ƒƒƒƒ‚‚‚‚ƒƒƒ‚‚‚ƒƒ„ƒ‚‚ƒ„„„ƒƒ„„ƒ‚‚€€€€ƒ††„ƒ€~~~~~~~~€€€€€~~~~€€€€€€€€€€~~~~~~~~~~ƒ‚‚‚€€‚€~~}{~}|{{{}}|}}~€€€€€~|{z||~~€~~}€€€€€~~~}}~}}}}~~€€€€€~}}€}~~~€€~}}~~~‚†…†‡……„ƒƒƒƒƒ„„ƒ„„……†‚‚‚ƒ„„ƒ„ƒ€€€€€„…ƒ‚€‚ƒƒ‚‚„€‚ƒ…„…„…„……‡†…„€€€€€€€€€€€€€€€€€~|||||||||||||{{{{{{{{yyxxxxxxyxxwwwwwxxwwwwwwwwwwwwvvwvvwwwwyz|~}}|{xxwwy|€€€‚‚ƒƒƒƒƒ„„ƒ‚‚‚‚„„„„„„„„„ƒƒƒƒƒƒ‚‚ƒ‚‚‚ƒƒƒ„……„ƒƒƒ……„€ƒƒ‚‚‚‚€€‚„…„ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ„„………„„„„„„ƒ}|zzz{}„†„„„ƒƒ‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚ƒƒ„ƒƒƒƒ„„…………„…„……„ƒ„ƒ„ƒ„ƒ†…„€~|€~~}}}|{{{|}€€€€€€€€€‚€€€€ƒ„€‚€‚‚€‚€€€‚€€€€€€€‚‚ƒƒƒƒƒƒƒƒ€€€€€€€€€€€€€€€€€€}}||}}}}}~~€€‚ƒƒƒƒ€‚„ƒƒ‚‚‚ƒ‚‚‚‚ƒƒƒ‚‚ƒ…„„ƒƒ„„ƒ‚‚€€€€ƒ„„ƒ‚€~~~€}}~~€€~~~€€€€€€€€~~~€€€€€€€€‚€€~~}}~~~~~~~~ƒ‚‚€€€}~~~|{~}||{||}{||}~€€€€~}{z|}}€€€€}|~~~~~€}}}}}}~~€€€€€~}}€}~~~€€~}|||~~~€‚…†‡„„ƒƒƒƒƒƒ„„„„„„……ƒ‚‚ƒ„……„„‚€€€€€€€„…ƒ‚€‚‚‚ƒƒƒ‚ƒ……†……„……‡‡‡…„€€€€€€€€€€€€€€€€~}|{{{{{{{{{{{{zzzzzzzzyyxxwxxxxxwwwwwxxxwwvwwxwwwwwwvvwvvwxwwxz|}}|}{yzyy{~€€€‚ƒƒ„„„ƒƒƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒ…„„„ƒƒ‚‚‚‚‚‚‚‚‚‚ƒƒ„……„ƒƒƒ……„€‚‚‚‚‚‚‚‚€€‚ƒ„„„‚‚‚‚‚‚‚‚„„„ƒƒƒƒƒ‚‚‚ƒƒƒƒƒ‚ƒ„†……ƒ‚……„ƒ|{zzzz}€ƒ…ƒƒ‚ƒ‚‚ƒƒƒƒƒƒƒƒ‚‚‚ƒ‚ƒƒƒƒƒƒƒƒƒ„„„„„„„„„„ƒ„ƒ„ƒ„ƒ„……„‚~}~€€€~~~~~}}}|}|}}~€€€€€€‚€€€€ƒ„€‚€‚‚€‚€€‚€€€€€€€€‚‚ƒƒƒƒƒ‚‚‚‚€€€€€€€€€€€€€€€€€€~~}}}~€€€€€‚‚‚‚‚€‚ƒ„ƒ‚‚‚‚‚ƒ‚‚ƒƒ‚ƒ…„„ƒƒ„„ƒ‚‚€€€€ƒƒƒ‚€~~~}|€~~€€€~~€€€€€€€€€€€€€€€€€‚‚‚‚‚€€~~}}}~~~~~}~~€€€~}|{{}}}{}}~~~}}|{{||~~€}|{|}~~€€~}~~~~~~~€~}~~~~~€€€€€~}}€}~~~€€~}||||}~~€‚…‡„ƒƒƒƒ„„………„„„„„„ƒ‚‚ƒ„……„„‚€€‚€€„…ƒ‚€‚‚‚ƒƒ‚‚„…††††……†‡‰ˆ‡…€€€€€€€€€€€€€€€€~}|{zz{{zz{z{zzzyyzyzyzyyyyxxxxxyxxwwwwwxwwvwwxxwwwwvvvvwvvxxwwx{|}||||z{{{}€€€ƒƒ„„„„ƒƒƒ‚ƒƒƒ‚ƒƒƒƒƒƒƒƒ……„„ƒƒƒ‚ƒ‚‚‚ƒ‚‚‚ƒƒ„……„ƒƒƒ……„€‚‚‚‚‚‚‚‚€€‚‚ƒ„‚‚‚‚‚‚‚‚„„„„ƒƒ‚‚‚‚ƒ‚ƒƒ„„ƒƒƒ‚ƒ‚‚‚ƒƒƒ€}{yzzzz|‚„‚‚‚‚ƒƒƒƒƒƒƒƒƒƒ„„„„„„ƒƒƒƒƒƒ„„„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„ƒ„……‚}~€€€‚}~~~}}}}}}~~€€€€€€€€€€€‚€€€„„€€€‚‚€‚€€€‚‚‚€€€€€€ƒƒƒ‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€‚€ƒƒƒ‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚„…„„ƒƒ„„ƒ‚‚€€€€ƒ‚‚‚€€€~~||€€€~}}}~€€€€€€€€€€€€€€€€€€€‚‚‚‚€~~~~~~~~~}}}}~~~~~~~~|{z{|}|{|}~~}{||}}~|{}}~~€€~~~~~€€~~~~~~~€€€€€€~}}€}~~~€€~}}~~|}}…†„ƒƒƒ„„…††……„„ƒ„„‚‚‚ƒ„„ƒƒ‚€€‚‚€€€€„…ƒ‚€€‚ƒ‚‚ƒ‚ƒ„†‡‡‡‡†††ˆˆ‰‡†€€€€€€€€€€€€€€€€~}|{zzzzzzyzyzyzyyyyyyyyyyxxwxxxxxwwwwwwwwwwwwxxxwwvvvvwwvwxxwvw{|}||}|z|||~€€‚‚ƒ„„„„ƒƒ‚ƒƒƒƒ‚ƒƒƒƒƒƒƒƒ…„„„„„„ƒ„ƒ‚ƒƒ„ƒ‚ƒƒ„……„ƒƒƒ……„€‚‚‚‚ƒƒ€€‚„‚‚‚‚‚‚‚‚„„ƒƒ‚‚ƒƒƒƒ„„„…ƒ~‚€~}zy{zzz|~ƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒ„„………„„„ƒƒƒƒ„„…„„ƒ„ƒ„„„ƒ„ƒ„ƒ„ƒ„„……ƒ~~€€€‚}~~~}}|||}~~~}~€€‚€€€„„‚€€‚‚€‚€€€‚‚‚€€€€‚ƒ„‚‚€€€€€€€€€€€€€€€€€€€~~€€€‚‚‚‚ƒƒ„ƒ‚‚‚‚€€‚‚ƒƒ‚‚ƒƒƒƒƒ‚‚‚‚‚‚‚„…„„ƒƒ„„ƒ‚‚€€€€ƒ‚‚‚‚€~}}|}}~~€€~}}}~€‚€€€€€€€€€€€€€€€€€€~~~~~~~~~~~}}}||}}}~€€~|{{}}|{{{|}~~||}}~€€‚‚‚€~|{}}~~€€€€~~~~€~~€€€€€€€~}}€}~~~€€€~~€ƒƒ~‚†‡……„„„……………„„„„„…ƒ‚‚ƒ„„„„ƒ€€‚‚€€„…ƒ‚€‚ƒ‚€‚ƒ„…†ˆ‡‡‡‡††‡ˆ‰‰ˆ€€€€€€€€€€€€€€€€}|{zyyyzyyyyyyyyxxxxxxxxyyxxxxxxyxxwwwwwwwwvwwxxxwwvvvwwwvwxxwvw{|}{|||{}|}~‚ƒ„ƒ„ƒƒ‚‚‚ƒƒƒ‚ƒƒƒƒƒƒƒƒ„„„„„…………ƒ‚ƒ„…„ƒƒƒ„……„ƒƒƒ……„€‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒ‚‚€€ƒƒƒƒ„„„„…„‚€€€€€}|{{zyz{~ƒ‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒ„„…„„„„„ƒƒ„„……„„„„„„„„„ƒ„ƒ„ƒ„ƒƒ„…ƒ€~~~€€€‚~|~}|{{z{{}}~|{z‚~€€€€€‚€€€‚„…€€€‚‚€‚€‚ƒ‚€€€€‚~€ƒ…€€€€€€€€€€€€€€€€€€~~€€€‚ƒ„„„‚ƒ„……„ƒ‚ƒ‚‚‚‚‚‚ƒƒ‚ƒƒ„ƒƒƒ‚‚‚ƒƒ‚ƒ„…„„ƒƒ„„ƒ‚‚€€€€ƒ‚‚‚‚‚‚‚‚€€~}||}}}}€€€~€€€€€€€€~~~~~}|}}}~~€~~~}}||||}}~€€}}}}{z{zz{|}€||}~€€‚‚‚€~|{}~~~~~~~~}}}}~€~~€€~~~€€€€€€€€~}}€}~~~€€€‚††ƒˆ‰††……„„„…„„„„„„…†„ƒƒ„…†……ƒ€€‚‚€€„…ƒ‚€‚‚‚‚ƒ„†‡ˆˆˆ‡ˆ‡†‡ˆ‰‰‰€€€€€€€€€€€€€€€€}|{zyyyyyyyyyyyyxxxxxxxxyyxxxxxxxxxwwwwwwwwwwwxxxxwvvvwwwvwyxwvw{||{{}|{}}}€}€‚ƒƒƒƒƒƒ‚‚‚ƒ„ƒ‚‚‚‚‚‚‚‚‚ƒƒ„„……††…„ƒƒ…†„ƒƒƒ„……„ƒƒƒ……„€‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚ƒƒ~~€‚ƒƒƒƒƒƒƒƒ‚€~~ƒƒƒƒ„„„„‚ƒ„„ƒ€}|€€€~}{zyz{~‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚ƒ„„„ƒƒ„„„„„„……„„„„„„„„ƒ„ƒ„ƒ„ƒ„ƒ„…„€~}~€€€‚‚~{€~}|{zzxy{|{zyx‚~€‚‚€€€‚„…‚€€€‚‚€‚€‚ƒ‚€€€€ƒ~~€ƒ…€€~~€€€€€€€€€€€€€€€€~€€€€‚ƒ„„„ƒƒƒ‚‚‚‚‚‚‚‚ƒƒ„„‚‚‚‚ƒƒƒƒƒƒƒƒƒƒ…„‚ƒ„„ƒ~~†‡‰Š†€}€ƒ~~}}}~~~}}~~€€€€€€€‚‚‚€€€€€‚}||||{{|}}}}|zz{}€ƒƒƒ}||}~|{|}~~~}}}}~~~~}}}}|{zzyy||||}~€€€€~~€€~€}}~~}~~~~~~€€€€}}€€~€€€~~~€‚‚‚‚‚‚‚‚…………„„„ƒ„„„„„„„„…†‡‡†…„ƒ|}€ƒƒ‚‚€~€‚ƒ††„‚‚€€€‚ƒ‚‚…ˆŠ‰ˆ‹Š‰‰‰ŠŠŠ€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyywwxxxxyyxxxxxxxxxxxxwwwwwwwvvvuuwuvxyxxz{|}}|{{{vwy|€€~‚ƒƒƒƒ„„„„ƒƒƒƒƒ„ƒƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ……„„„„ƒƒ„„„„ƒ‚€€ƒ…†††††…ƒ‚ƒ„†ˆ‚ƒƒ~||}ƒ‚‚ƒ…†…ƒ‚‚‚ƒƒƒƒƒ†…„‚‚‚‚‚‚„…„„„„‚„„ƒ€}}}{ywx{}~~|~€‚ƒƒ‚‚ƒ‚‚‚ƒƒ„„„ƒƒƒƒ„„„„„„„„„„ƒ„„………„„ƒ„„ƒƒ‚‚ƒ„†„~~~€€€€~}}~}|{zz{{|}~~}}~~€€‚‚‚‚‚‚‚‚„ƒ‚€~†„€€€‚‚ƒ‚€~~€€€~€€€€€€€€€€€€€€€€~~€€‚‚‚‚‚‚‚‚ƒƒƒƒƒ‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒ„„ƒƒƒƒƒ‚€€~…†ˆ‰…}‚~~}}}~~~|}~~€€~€€€€€€€€€€€~|zyyxxzz{||||{{{{}€‚‚‚€€~|{{{|}}~}}|{~~}}{{{{~~}||{{{||||}~€€€€~€~~~~~~}}}~~~~~~~€€€€€€}}~€~€€€€~~~€‚‚‚‚‚‚‚‚……„ƒ€ƒƒ‚ƒ‚ƒ‚ƒ„„……„ƒ‚~€‚‚‚‚€~€‚‚……ƒ€‚€‚‚‚‚„‡‰‰ˆ‹ŠŠŠ‹‹‹Š€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyyxxxxxxxxxxxxxxxxwwwxwxxxwwwwvvuuwuvxxxxzz|}}||||xxz~‚€~‚ƒƒƒƒ„„„ƒƒƒƒƒƒƒƒ‚‚ƒƒ„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ……„„„„ƒƒ„„„„ƒ‚€€„‡ŠŒŽŽŒŠ‡…ƒƒƒ„‡ˆ‡„~}~ƒ……„‚ƒƒƒƒƒƒƒƒ…………ƒƒ€€„…†††‚„„ƒ€}|}{yxx{}~~|~€‚ƒƒ‚‚ƒ‚‚‚‚ƒƒ„„„ƒƒƒƒ„„„„„„„„„„ƒ„„………„„„„„„ƒƒ‚ƒ„†„~~~€€€€~}}~}||{{||}~~€€€€‚‚‚€€€€€€€€€€€†„€€€€€‚ƒ‚€~€€€€€~€€€€€€€€€€€€€€€€~~~~€€€€‚‚ƒƒƒƒƒƒƒƒƒƒƒ‚‚ƒ‚‚‚‚‚‚‚‚‚‚ƒ‚ƒ‚‚‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„ƒƒ‚ƒƒ€€€~€ƒ„††ƒ}~~~}~~~~}}~€€€~~~€€€€€€~~~€€~}{yxwxxyyz{|{{z|||}€€€~|z{|}}}}}{{z~}}|||||~~}}||||||}~€€€~€~}}~~~~~~~~€€}}€€~€€€€~~~€„ƒ‚€}|{€€€€€€€€‚‚‚~€€€‚‚‚€~€‚„…‚€€‚‚‚‚ƒ‡‰‰ˆŠ‰ŠŠŒ‹Šˆ€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyyxxxxxxxxxxxxxxxxwwwwxxxxwwwwvvvvwuvwxwxyz{}}}|}~{{}‚‚€}‚ƒƒƒƒ„„ƒƒ‚‚‚ƒƒ‚‚‚‚ƒƒ„…ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„ƒ„ƒ„„„„„‚€€…ˆ‹ŽŒ‰†„‚‚‚ƒ„„‚€~€ƒ„ƒ‚„„„„„„„„„„……„‚€~€€„†††ƒ„…ƒ€}||{yxy{}}}|~€‚ƒƒ‚‚ƒ‚‚‚‚ƒƒƒ„ƒƒƒƒƒƒ„ƒƒƒƒƒƒƒƒƒƒ„„……„„„„„„„ƒƒ‚ƒ„†„~~~€~}}~~}}|||~~~€€‚‚‚ƒƒ„„„~€ƒ…††„ƒ€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€~~}~€€‚ƒ‚ƒƒƒƒƒƒƒƒƒ‚‚‚‚ƒ‚‚‚‚‚‚‚ƒ‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚„…„‚‚ƒ„‚€~ƒƒƒ~}~€~~}~~~~~€‚‚€€€€€~€~~~€€€€~}{{z{yz{|||{{}||}€€~}}||}}}}|{{}~‚~~}}|||||}~€€€€€€€€€~~~€€€€€€€€~~~~}}~€~€€€~~~€€€€€€€€€‚€€~~||~~~~~~~€€~}|€€‚‚€~€‚„„ƒ€‚‚€‚€ƒ…ˆ‰ˆ‡‡ˆ‰Š‰‡†€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyyyyxxxxwwxxxxxxxxwwwxwxxxwwwwvvvvwvuxxwwyy{|~}}~~~~ƒ‚€}‚ƒƒƒƒ„„ƒƒ‚‚‚‚‚ƒƒ‚ƒ‚ƒ„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„ƒ„ƒ„ƒƒ„„„„ƒ‚€€‚„†‡ˆˆˆ‰ˆ†…„„„„€~~~~}~‚ƒ„„„„ƒƒƒƒƒƒ‚‚€}}€}~€‚ƒ„ƒ……ƒ}||{zyz{}|||~€‚ƒƒ‚‚ƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒ‚ƒ‚ƒ‚ƒƒƒ„„„„„„„„„…„„ƒƒƒ„†„~~~~}}~~~~~~}}€€€ƒƒƒƒƒƒƒƒ„„„„„„„„‚ƒ…‡ˆŠŠƒ‚€€€€€€€€€€€ƒ‚€€€‚€€€€€€€€€€€€€€€€~}~~€€€‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚ƒ…„ƒ‚ƒ…ƒ€€€€~~€~}€€~~~~~}~~~€€€€€€€€€€€€€~~~€€€€€€€ƒ‚€~}|}}z{||}}}|}||}~~‚‚ƒƒ„}z{|}~}€„†ˆ‡…ƒ~~~~~}||||||}~€€€€€€€€€€€~~~€€€€€€€€~~‚‚}|€€€€€~~~~€€€€€€€€€€€€€€~~~~~~~~~~~}|{€‚€~€‚‚„…ƒ‚‚‚‚‚‚€ƒ€…‡‰‰††‡ˆ‰ˆ†…€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyyyyyxxwwwxxxxxxxxxxxwxwwwwwwwwwwvwuvwxvvxyz}}}|}~€€‚„ƒ€}‚ƒƒƒƒ„„ƒƒ‚‚‚‚‚„„ƒ‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„„…„„‚€‚‚ƒ„„ƒ‚‚„„ƒƒ„„……………‚€}}}}|{|~„ƒƒƒ‚‚‚€~{{z||~~~}~‚„„…†ƒ€|{{{zzz}}|z|~€‚ƒƒ‚‚ƒƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒ‚‚ƒ‚ƒ‚ƒ‚ƒƒ„„„„„„„„„„„„ƒƒƒ„†„~~~~~~~}~~~€€‚‚‚„„„„„„„„„„……†‡ˆˆ‚€€€€‚‚‚€€€€€€€ƒ‚€€€‚€€€€€€€€€€€€€€€€~~}€€‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚ƒ‚‚ƒƒƒƒƒƒƒƒ‚ƒƒ„ƒƒƒ„‚€€~€~€€€~~~~~}}||}~~€€€€€€€€€€€€€~~~€€€€€€€€€€€‚~||{|{||}}~}}}|{|~~€€ƒ…„~|}~€‚‚‚ƒ…‡‰‰‡„‚}~~~}||||||}~€€€~€~~~~~~€€}}~€€~~~~~€€€€€€€€€€€‚ƒƒ~~}|€€€€€~€‚‚……„‚ƒƒ‚‚€ƒ€ƒ‡ˆ‰ˆˆˆ‰‰‰ˆ‡€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyyxxxxxxxxxxxxxxxxxxxxwwwwwwwwwwwwwvuwwvvwy{|}|||}€‚„ƒ~‚ƒƒƒƒ„„ƒƒ‚‚‚ƒƒ…„ƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„„„„„ƒ‚€€€‚ƒ„„ƒƒƒƒƒƒƒƒƒƒ……„‚}|}~~|zz|‚€~~€~zxwy{}}~~~~€ƒ…„††„|{{{zz{}}{y|~€‚ƒƒ‚‚‚ƒƒƒƒ‚‚‚‚‚ƒƒƒƒ‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚ƒƒ„„„„ƒƒƒ„„„„ƒƒƒ„†„~~~}~~~~}}~~~€€€€€‚‚‚‚‚‚‚‚€€€€‚‚‚‚‚‚€€€€€€€€€€~€€€€€€€€€€€€€€€€€€€~~€€‚‚‚‚‚‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚‚‚ƒƒƒƒƒƒƒƒƒ‚‚ƒ„„„„‚~€‚€€~€€€€~~~~~~}}||}~~€€€€€€€€€€~~}€€€€€€€€€€€~|{{{{z{||}||||{{|~~||~€ƒ„……€‚ƒ„„„„†††……„ƒ‚}~~}|||||}~€€€~€~€€€~~~~~€€€€€€€€}}€€~~}~~~€€€€€€€€€€€€€€€€€€€€~}~€€€€€~€‚ƒ…ƒ‚‚‚€‚‚‚ƒƒ†‰‰Š‰‰‰ŠŠŠ‰€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyyxxxxxxxxxxxxxxxxxxxwxwwwwwwwwwwwwuvwwuuwy{||{z{{~~‚„„‚ƒƒƒƒ„„„ƒƒƒƒƒƒ„„ƒƒ‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚ƒƒƒƒ„„„„„„ƒ‚€€ƒƒƒƒƒƒƒ„„„„„„„„„‚}}~}}~|{{}~~}||{}{yxxy{|~~~~€‚ƒ…††ƒ€|zz{zz|}}{x|~€‚ƒƒ‚‚‚ƒƒƒƒ‚‚‚‚ƒƒƒƒ‚‚ƒƒƒƒƒƒƒƒ‚ƒƒ„„„„ƒ‚ƒƒƒ„ƒƒƒƒ„†„~~~}}~~~~~}~~€‚‚‚€€€€€€€€€€€€€€€€€€€‚‚€€‚‚‚‚‚‚‚‚€€€€€€€€€€‚€}€€€€~€€€€€€€€€€€€€€€€~€€‚‚‚‚‚‚ƒƒƒƒ‚‚ƒ‚‚‚‚‚‚€€‚‚ƒƒƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒ‚‚„…„ƒ~€‚~~€€€~~~~€~~~~~~~€~€€€€€€~~}€€€€€€€€€‚}|{{{yzz{|{{{{zz|~|}~~~„‡‚ƒ„………„„…ƒ€€ƒ…~~€€~}||||}~€€€~€€~~€€€€~~€€€€€~~€€€€}}~€~}}~~~€€€~~‚‚‚‚‚‚‚‚‚‚€~|~€€~€‚€ƒ„‚€€‚‚‚‚ƒ~‚†ˆ‰‹Š‰‰‰ŠŠŠ€€€€€€€€€€€€€€€€{{{zzzyyzzzyyyyyyyyyyyyywwwxxyyyxxxxxxxxwwwwxxxxwwwwwwwwwvuwwuuwy{||zzzz}}~„„‚‚ƒƒƒƒ„„„„ƒƒƒƒƒƒƒƒƒ‚ƒƒ„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚ƒƒƒƒ„„„„„„ƒ‚€€€€‚ƒ…†‡‡‡ŒŒŠ‡‚~}|}~|{{}~}}|{{zzzzzzz{{|€~}}~~…††„|zz{z{|}}zx|~€‚ƒƒ‚‚‚ƒƒ„ƒ‚‚‚ƒƒƒƒ‚‚ƒƒƒƒƒƒƒƒ‚ƒƒ„„„ƒƒ‚‚ƒƒƒƒƒ‚ƒ„†„~~~}}~~~~}}~~€€‚‚€€€ƒƒ‚€€‚‚‚‚‚‚‚‚€€€‚‚€ƒ‚€€‚‚‚‚‚‚‚‚€€€€€€€€€‚‚€~}€€~€€€€€€€€€€€€€€€€~~€€ƒƒ‚‚‚ƒƒƒ‚‚‚‚‚€€€‚‚‚‚ƒ„„ƒƒ‚‚ƒƒƒƒƒƒ„‚‚ƒ„„ƒ‚‚€€€€ƒ~~‚ƒ€~~}~~~€€~~}}€€€€€~}€€€€€€€~|zyyyxxz{{zzyyyz|~€€€~~~€„†‡‡…ƒ‚€€€€{||}~€€}|{{|}~~€€~}}~~€€~~~}€€€€€€€€€€€€€€€€€~}}~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒ„ƒ‚‚‚‚€€€ƒ†‡‡‰‰ŠŠ‰‰€€€€€€€€€€€€€€€€|{zzyzz{yyyyyyyyxxxxyyyywxxxxxxxzyxwwwxxyyxwwwwxwwwwvuttvvvvwwwwz{||{{|}~€ƒƒ‚€€€‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒƒ‚‚‚‚ƒ„ƒ„ƒ„ƒ„ƒ„ƒ‚ƒ„„‚€|~„†‡‡‡ˆˆ‰ŠŒŽŽˆ‚}|}~~€€~~|zxxy{|}}}}}}|||~|{{{}~ƒ‡Šˆ{yzzyz{~‚…ˆ…„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ‚„…„€}}}||}}}~~~~~~~€‚€€€€€€€€€€‚‚€€€‚‚‚€€€€€ƒ€€€~€€€€€€€€€€€€€€€€~~€€‚ƒ‚‚‚€‚‚‚‚‚‚‚‚€‚€€‚‚‚ƒ‚‚ƒ„„ƒƒ‚ƒƒƒƒƒƒ„ƒƒ„…„„ƒƒ€€€€‚€~~‚‚€~~~}~€€€€~~€€€€~~€€€€€€€€€~|{zyywxyzzzyx{{|~€€€€€€~~€‚ƒ„‚€€€€~‚€€€€{||}~€~|{{|}~~€€~}~~~~~~~~~~~}~~~~€€€€€€€€~}}~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒ„ƒ‚‚€‚ƒƒ‚„‰‰‰‰ˆ‡†…€€€€€€€€€€€€€€€€||zzyyyzyzyzyzyzyyyyyyyyyxxwwxxyyyxwwwxxxxwwwxxyvwwwvvuuvvvvvwwwz{||{{|}~€ƒƒ‚€€€€‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ‚‚‚‚‚ƒƒ„ƒ„ƒ„ƒ„„ƒƒƒ„„‚€„ˆŠŒŒŒŽŽŽŽŽŽŒ‰„~zz|~~~~~~~~}{z{}~~~~}}}}}}{{{{{||}€„ˆ†|{|zzz{}€„†……ƒ‚‚‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ‚„…„€~}}}}}~~~~~~€‚€€‚‚‚‚€€€€€‚€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚€€€€€‚‚ƒƒ‚‚‚‚€€€‚ƒƒƒ‚‚ƒƒ„ƒƒƒ‚ƒƒƒƒƒƒ„„„………„„„€€€€‚€~~‚‚€~~}~}~€€€€€€€€€€€€€€€~~€€€€€€€€€€€€€}|{{{z{{|||||~~~€€€~}~€~~~~~~~}}|‚€€€~||}}€€€~|{{|}~€~~~~~}}~~~~}~~€€€€€€€€€}}}~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒ„‚‚‚‚‚‚‚‚‚ƒ‚€‚ƒ……†‡‡†††€€€€€€€€€€€€€€€€}|{yyxxxzzzzzzzz{{{zyxxxzyxwwwyyyyxxwxxxwwwwxxyywwxwxwwvvvvvwwwwz{||{{|}~€‚ƒ‚€€€€‚‚ƒƒƒƒ„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒƒƒ‚‚‚ƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒ„„‚€€‚†‰ŽŽŒŒŠˆ‡†…………‚~|||~}||}~€~|||}~~~~~~~}}}yz{{|||{}„ƒ€}}~{zzz|‚„†…„ƒ‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ‚„…„~~~~~€€~~~~€‚‚‚‚‚‚‚‚‚€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚€€~~€‚ƒ„‚‚‚‚‚€‚‚ƒƒ„„„ƒ‚‚‚ƒƒƒƒƒƒ„„„„„„„„„€€€€~~€}~~~}~~~€€€€€€€€€€€€€€€€€€€€€€€~|}|}}~~~€€€€€€€€~~€€~~~}}}~}}}|}}}~}||}}~~€€€‚‚‚€~|{{|}€€€~~~~~~€€€€€€€€€€€€€€€€€€€~}~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒƒ‚‚‚ƒƒƒ‚€‚‚ƒ„……†……€€€€€€€€€€€€€€€€~}{zxxxx{{{{{{{{}}|{zyxxzyxxwxxyxxxxxxxxwwwxwxxywxxyyyxxvvvwvwwwz{||{{|}€‚‚€€€‚ƒƒ„ƒƒ„„„„„„ƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒƒ‚‚‚ƒƒƒƒ„ƒ„ƒ„ƒ„„„ƒ„…„‚ƒ‡ˆŠ‰‰ˆˆ‡†„ƒ‚‚‚~|{{}}||{|}}}{zz|}}}~~}}||}}}}}}}€‚}}~{{zz{~€‚††„„‚ƒƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ‚„……~€€€€~~~~€‚€‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€‚‚€€€~}}~‚ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚…„„ƒ‚‚‚‚ƒƒƒƒƒƒ„ƒƒƒƒƒƒƒƒ€€€€€~~€€~}}~~~|}~~~}~~~~~€€€€€€€€€€€€€€€€€€€€€€~~~~~~~~€€€€€~~‚‚}}}||}~~~~{{|~~|yv}}~~€‚‚‚~}|||~€€€~~}€~~€€€€€€€€€€€€€}}}~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒ„‚‚‚‚ƒ‚€€€€€€‚ƒ„„„„„‚€€€€€€€€€€€€€€€€€~|zzxyx|||{|{||~}}{{yyxyyyyyxxxxxxxxxxxxxxxxwwwwwxxxxxxvvvvwwwwz{||{{|}‚‚€€‚ƒ„ƒƒƒ„„„„„ƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒ‚‚‚ƒƒ„ƒ„ƒ„ƒ„ƒ…„ƒ„……ƒ~‚ƒƒ‚‚€‚‚‚~}~~~}|{|}{{||}||{zyyz}~€~~}}}~~~‚„€€~|{{{{zz{~€‚†……ƒƒ‚ƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ‚„†„‚€€€€€€‚‚~~~~€‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~€€€~~~~}~~€€‚‚‚‚ƒƒƒ‚‚‚ƒƒ‚‚‚ƒƒ„‚ƒƒƒƒƒƒ„ƒƒ‚‚‚ƒƒƒ€€€€~~~€€~}}~~~|}~~~}~~~~~~~~~~€€€~~€€€€€€€€€€€€€€€€€‚‚ƒƒƒ‚‚ƒ‚€€~~~€}{z{|}~€€€}}}~~{xu}}~€‚ƒ‚‚€}||}~€€€€~}}~~~~}}~~~€€~~~~€€€€€€€~}}~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒƒ‚€€‚€€€‚„ƒƒƒ‚‚‚ƒƒƒ‚€€€€€€€€€€€€€€€€~}|zzzz|}|}|}||~~}|{{zzyzyzyyxwwwxxyxxxxxxyxxwvvvvwwwvvvvvwvwwwz{||{{|}€‚€€‚‚‚ƒƒ„ƒƒ„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„„„ƒƒ‚ƒƒ„ƒ„ƒ„ƒ„ƒ„…„„„……ƒ€€€~|z{|||zywvxyz{||}}z{|}}|{zzz{}‚‚€€~}}}~~~~€ƒ†ˆ„ƒ‚€}{zy{zzz|‚„†…„ƒ‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ‚„……‚€€€‚‚‚~~~~€‚‚‚‚‚€€€€€‚‚€€€€€€€€€€€€€€€€€€€€€€€€€}}~~€€~~}}}~~~~‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ„…‚ƒƒƒƒƒƒ„„„ƒ‚‚ƒ„„€€€€~~€€~}}~~~~}~~€€~~~€€€~~€€€€€€€€€€€€€‚„„ƒƒƒ„…†„ƒ‚ƒƒƒ€€€€€€~|zz{}}~€€€}|}~~€€ƒƒƒ‚~||}~€€€€~}}~~~~~~~~~}}~€~~~~€€€€€€€€}}}~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚ƒ‚€€‚€€€‚‚ƒ„…„‚€‚ƒƒƒƒ€€€€€€€€€€€€€€€€~}||||}}}}}}}}}}}}|}|||zzzyyxxxwwxyyyxxwxxxyxwwwwwwwvvuvvvvwwwwz{||{{|}€‚€€‚‚‚ƒƒ„ƒƒƒ„„„ƒƒ‚‚‚ƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„„ƒƒƒƒ„„„ƒ„ƒ„ƒ„ƒ…„„………ƒ‚€€€|yxz{||{ywvwxz|}|{y||||||||||}ƒƒƒ€~~}}|~}~~ƒ„…„~|{zzzz{}€„†…„ƒ‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ‚„†…ƒ‚€€€‚‚~~~~€‚€€‚‚‚‚‚€€€€€€‚€€€€€€€€€€€€€€€€€€€||}}~€~~}}}}~~~}}€€€‚‚‚‚‚ƒƒƒƒƒ‚‚‚ƒƒ„‚ƒƒƒƒƒƒ„……„ƒƒ„……€€€€~~~€€~~}}~~~~~€€€€€€€~~€€€~~€‚€€€€€€€€€‚ƒ„ƒƒ‚‚ƒ……‚‚‚‚‚€€~}~|{||~~~~‚€ƒ……}~~€‚ƒƒƒ‚~}}}~€€€€€€~}}~~~€€~~€€~€€€€~}}~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚€€€€€€€‚ƒ‚€‚ƒƒ‚ƒƒƒƒƒ‚‚‚‚ƒƒ‚‚€€€€€€€€€€€€€€€€€~~}}}~~}}}}}}}}|||}}}}~{{yxwxxyvwxyyyxxvwxyyyxxyyyyxxwvvvvvwwwwz{||{{|}€‚€‚‚‚ƒƒ„„ƒƒ„„ƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„„„ƒƒƒƒ„„ƒ„ƒ„ƒ„ƒ„……„…†…ƒ‚€€~{xvyz{}}|zyxxy{|~~~}}{{{||}}}~‚ƒƒ‚€~}}||~~~~~}}|…ƒ~}|{zyz{~‚…ˆ…„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒ„„„„„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„ƒƒƒƒƒ„†…ƒ‚‚ƒ€€€‚~~~~€‚‚‚‚‚‚‚‚‚€€‚‚€€€‚‚‚€€€€€€€€‚€€€€€€€€€€€€€€€€€~€~~~€€€€€~€€€€€‚‚‚ƒ„„†…„ƒ‚‚ƒƒƒ‚ƒ‚ƒ‚ƒ‚ƒ„……„„‚€€~}~€€}~~~~~~}€€€~€€€~~~~~~€}~€€€€€€€€‚‚‚‚‚ƒ„…†„ƒƒƒ‚‚~~€€€~€|z{|€€€€€€~~~€ƒƒ„‚}}~~~~€€~~~~~~~}~€€€~~~‚€~}|}~€~~~}}}}}€€€€~€€€€€€€€€€€€€€€€€€€€ƒ„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€~~}}|}}|||}}}}|zzzzzzzz{||{yxxywwxxxyyywwwwwwwwwwxxxxwvxwwwvwwwxz||{zz{ƒƒ€‚‚ƒƒƒƒƒ„‚‚‚ƒƒ‚‚ƒƒƒƒ„„ƒƒ„„„………ƒƒƒ‚‚ƒƒƒƒ„„„ƒ‚ƒƒ‚ƒƒƒ‚‚ƒƒ€~~€€}zyz{{zyxxywxyz{|||}}}~~}}}~~€€€€€~~}|{{{}~|zz~‚ƒƒƒƒ‚}|zzz{~‚†ˆ†…„ƒ‚‚‚‚‚‚‚ƒƒƒƒ„„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒ„„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒƒ€ƒ„ƒ‚€~€‚‚‚~€‚‚‚‚‚€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚ƒƒ„ƒƒƒ‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚ƒƒƒƒƒƒƒ„„‚€~}~€€€~~}}}~~~€€€€€€~~~~€€~~€€€€€€€€€€€€€‚‚‚ƒƒ‚‚‚‚ƒ„ƒ‚‚‚€€~~€}{{|~€€€~|zy~~~€‚ƒƒ‚}}~~~~€€~~~~~~~~~}~~~€€€€€€‚€~}|}~€€€€€€€€€€€€€€€€€€€€€€€€€€~ƒ„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€~~}~~~|}}}}}||{{z{{{{{zz{zxwwxxxxxxyxyyxxxxwwwwxxxxxwwxwwwvwwwxz|}||||ƒƒ€€€‚ƒƒƒƒƒ„ƒƒƒƒƒƒƒƒ‚‚ƒƒƒƒ„„„„„„„„„„ƒƒƒ‚‚ƒƒƒƒ„„„ƒ‚‚ƒ‚ƒƒƒ‚‚ƒƒ€~~€€}zyyzzzyyzzyyz{|}}}}~~~~~~}}~~€€€€~~}|{||||}€‚„ƒƒƒ‚}|||{|~„††…„ƒ‚‚‚‚‚‚‚ƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„„ƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒ‚ƒ„„‚€~€€‚‚‚~~€‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚ƒ„‚‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚ƒ‚…„ƒ‚‚‚ƒ„‚€~~~~€€€~}|}}~~~~~~~~~~~€€€€€~€€€€€€€€€€‚‚€€‚ƒ‚„ƒ‚€€€~~~~~€€€~~|||~~~~~€~|{~~~€‚ƒƒ}~~~~~€€~~~~}}}~~~€~~€€€€‚‚€~}|}~€€€€€€€€€€€€~‚€€€€€€€€€€€€€€€€€€€€€€€€~~€ƒ„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€~€€~~~~~}}|{{{{{{{{yyzyxxxyyxyxxxxxzzyyxxwwxxxwwwwwwwwvwwwxyz|}}}}}ƒƒ€€‚ƒƒƒƒƒƒ……„„„ƒƒƒƒƒƒƒƒƒƒƒ„„„„„„ƒƒƒƒƒ‚‚ƒƒƒ„„…„ƒ‚‚‚‚ƒ„ƒƒ‚ƒ„€€}zzyz{{{{}~{||}~~~~€€}~€€€€€€~}|{~{{|€ƒƒ‚‚‚‚€~|{~}||}‚„†…„ƒ‚‚‚‚‚‚ƒ‚ƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒ„„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒƒ‚ƒ„„‚€‚€€‚‚~~~€‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€~€€~~€€€‚‚‚‚‚€€‚‚‚‚‚ƒƒƒ‚ƒƒƒƒ‚‚‚ƒ‚ƒ‚ƒ‚ƒƒ„ƒ„ƒƒƒƒ€€€~€~~€€~}}}~~~~~~~~~~~~€€€€€€€€€€€€€€€€€‚‚€ƒƒƒƒƒ‚€€~~~~~}}~€€}}}}||}}}~~~€€‚‚€~~~€ƒƒ‚~~~~~€€~~~}}}~~€€€€~€€€€€€‚€~}|}~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~€ƒ„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ€€€€€€€€€€€€€€€€~€€~}||{{{{z{zzyyzzzzzzyyyyxxwwyyyxxxwwyxwwvwwxwwwvwwxxyz{}}}||}‚€€€‚‚ƒƒƒƒƒƒ„„„„„„ƒƒƒƒƒƒƒƒƒƒ„„„„ƒƒƒƒƒƒƒ‚‚ƒƒƒ„……„‚‚‚‚ƒƒ„„ƒ‚ƒ„‚€€}zz{|}~~}~€€€€€€€€€€€}||~}{~€ƒ‚‚€€~}{{}|{{}‚„†…„ƒ‚‚‚‚‚‚‚ƒƒƒƒƒƒ„ƒ„ƒ„ƒ„„ƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒ‚„…„ƒ‚€€‚‚~}~€‚‚€€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~€~€€€~~~€€‚‚‚ƒ‚‚€‚‚‚‚ƒ‚ƒ‚ƒ„„„„ƒ‚ƒ‚ƒ‚ƒ‚ƒ‚‚ƒƒƒ„ƒ„ƒ€€€~€~~~~~~~~~}}}~~~~€~~~€€€€€€€€€€€~€~‚‚€ƒ„‚€€~~~~~~~~~~}}~€~}{{}}}||}}}~~€}}|}}~~~€‚ƒ~€~~~€€~~~~~€€€€~~~~~}~€€€€‚€~}|}~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~€ƒ„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ€€€€€€€€€€€€€€€€~~~}~~€}}|{z{zzyzyyyyyzzzzyyyxxxxwwwwwwwwwyxxvwwxxwwvvwwxxzz{||{zy{€€€€‚‚ƒƒƒ‚ƒƒƒƒƒƒƒƒ„„ƒƒƒƒƒƒƒƒ„„„„ƒƒƒ‚ƒƒƒ‚‚ƒƒƒ„„…„ƒ‚‚ƒƒƒ„„ƒƒƒ„ƒ€€€|{z}}€€€€€€€€€€‚‚ƒ‚‚€€}||}}€‚‚‚‚€~~||{{zz{}€ƒ…†…„ƒ‚‚‚‚ƒ‚ƒ‚ƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒ„„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒƒ‚ƒ„……ƒ€‚€€~}~€‚€€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚ƒ‚€€‚‚‚‚ƒƒƒ‚‚……„„ƒƒ‚‚‚ƒ‚ƒ‚ƒ‚ƒƒƒƒƒƒ„„„€€€~}‚€~~}~~~~}}}}~~~€~~}~€€€€€€€€€€~€~‚€€ƒƒ‚€€€€€~~~~~~~~~~~~~~}{{|}}}|}}}~}{zz{|~~~€€‚‚~€~~~€€~~€€€~~}~~~}}~}}}~€€€‚€~}|}~€~~€€€~€€€€€€€€€€€€€€€€€€€€€€€€€€~~~€ƒ„‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒ€€€€€€€€€€€€€€€€~~}}}~~~~~}}{{z{zzyyxxwxyyyxyyxyxxxxwwwwwwwwxxwwwwwxwvvvwwxx{{{|{{yxz€‚‚ƒƒƒƒ‚ƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒ„„ƒƒƒƒƒƒƒƒƒ‚‚ƒƒƒƒ„„„ƒ‚ƒƒƒ„„„ƒƒƒ„„€}{{}~~~~€€€€€€€€ƒƒƒƒ‚‚€€}|{z}„………†ƒ‚€}|||zzzz|ƒ…†…„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„„ƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒ‚ƒ………ƒ€€~€~}}~‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€‚ƒ‚‚ƒƒƒ‚‚„„ƒƒ‚‚‚ƒƒ‚ƒ‚ƒ‚ƒ‚ƒƒ„„„ƒƒ‚€€€~}‚€~~~~~~}~~~~~~~~€€~~~}~€€€€€€€€~€€€€€€€€‚ƒ‚€€€~~~~~~€}||}}}|}}~~€~}{{{|}~~~€€‚~~~~€€~€€~~~}}~~~~~~€€‚€~}|}~€~~€€€€€€€€€€€€€€€€€€€€€~~~€€ƒ„‚‚‚‚‚‚‚‚‚‚‚ƒ‚‚ƒ€€€€€€€€€€€€€€€€~~}}}~~}~~~~~}}|||{{zzzyxwxxyxwxxxxxxyyxxxxxwwwxxxxxwwwvvvvwwxy{{{|}|{yz€‚ƒƒƒƒ‚‚ƒ‚‚ƒƒƒƒƒƒ„„ƒƒƒƒ‚‚ƒƒƒƒƒƒƒƒƒƒƒ‚‚ƒƒƒ‚ƒ„ƒƒƒ„„ƒ„„„ƒƒ„„…‚€|{{|}~~~~€€€€€€€€‚‚‚€~}|{z}‚…††††ƒ‚€~}||}|{zz|~ƒ†…„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒ„„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒƒ‚ƒ…†…„‚€~~~~~~}}}‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€~~~~€€€€€€€€€€€‚ƒ‚‚ƒƒƒƒ‚ƒ‚‚‚ƒƒ‚ƒ‚ƒ‚ƒ‚ƒ‚ƒ…††„€€€~}‚‚€~~}~}}}}~~~~€€€~~}~€}~€€€€€€~€€€‚€€~~~€€€ƒ~}}}}|}}~€€||||{{zz~~~€€‚~~~€€~~~~}~€€€~~€€€€‚€~}|}~€€‚‚€€€‚‚‚€€€€€€€€€€€€€€€~}~€€ƒ„‚‚‚€€€€€€€€‚‚‚‚‚‚‚‚ƒ‚‚‚€€€€€€€€€€€€€€€€€~~~~~}}}~~~~}~}}}||{{zyxyyzywwwxxxyyyzyyyxxwwwwxxxxwvvvvvwwxy{{{|}~|{{€‚‚‚€‚ƒƒƒƒ‚‚ƒ„„„ƒƒƒ‚‚„„ƒƒƒƒ‚‚‚‚‚ƒƒƒ„„ƒƒƒ‚‚ƒƒƒ‚‚ƒƒƒƒ„…ƒ„„„ƒƒ„„…‚€€|{{}~€€€~€€€€~€‚‚€~}{z{~‚…†…„‚„ƒ}}}}~}{{{}€†…„ƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒ„„ƒƒƒƒƒƒ„ƒ„ƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒ‚„…†…„‚€€}}~~~}~}|}‚‚€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€€€€‚‚‚‚‚‚‚‚‚‚ƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒƒ„„„„ƒ‚€~}€€€€~}}~~}}}~~~~~€€€~~~€~€€‚€€‚€€€€€€€€€€€‚ƒ‚€€~€€€€~~~~~~~~}}}~~~€€}|{{{|~~~€€€€~~~€€~~~~~~~~~€€€~~~~~€‚€€€€||€~€€€€€€€€€€€€€€€€€€€€€~~€ƒƒ‚‚ƒƒƒ€€€~‚‚‚‚‚‚€€€€€€€€€€€€€€€€~~~~~~~~}}}}~~~~|||{{zzzzyxyyyyxyxyxyxyxxxxxxxxxwwwwwwwwwwwwwwxx{{|||{{{~€€€‚‚‚‚ƒƒƒƒƒƒƒ‚‚ƒƒƒ„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚„„„„„„„„ƒ€}|{|€€€€€€€€€€€€€€€~~€€}|{|}~}„……„„‚ƒƒ‚}||||{{}€ƒ…†…„ƒ‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„ƒƒƒƒ†„‚‚ƒƒƒƒ‚„‡†‚€~~}~~}~€‚‚‚‚€€‚‚‚‚€€‚‚‚€‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚ƒƒƒ‚‚ƒƒ‚‚‚ƒƒƒƒƒƒƒƒƒƒƒ„„„ƒ‚‚€€€€~~}}~~~}}}}~~~~~€€€~~€~€€~~~€€€€€€€€€€€€€€€‚ƒ‚€€~€€~~~~~~~~€€€~~}}}€€€€€~}|}}}€€€€€~~~€€€€€~~~~~~~~~~~€€€‚€}|€€€~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~}~€€€ƒƒ‚‚‚‚‚‚‚‚~€‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€~~~~~~~~}}}}}|||{{zzzyxyyyyxxyxyxyxyxxxxxxxxwwwwwwwwwwwwwwxxz{||{|||€€€€‚‚‚‚ƒƒƒƒƒƒƒ‚‚ƒƒƒ„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚ƒ‚ƒ‚ƒ„„„„„„„„ƒ‚€€}|{|~€€€€€€€€€€€€€€€€€€~}|{{||}|}€‚ƒƒ‚‚}||||{{}€ƒ…††„ƒ‚‚‚‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„ƒƒƒƒ…„‚‚‚ƒƒƒƒ‚„‡†‚~~~~~~}}}~€€‚‚‚€€‚‚‚‚‚‚€‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚ƒƒ„„‚‚ƒ‚‚‚‚ƒƒƒƒ‚ƒƒƒƒƒƒ„„„ƒ‚ƒ‚€~~}}}}|}}~~~~~~€€€€€€~€~~~~€€€‚‚€€€€€€€€€‚ƒ‚€€€€€€~~~~~~~~~€~~~~~€€€€€€€€€€~€€€€€€€~~~€€€€~~~~~~~~~~~~}}€€€€€€€€||€~~€€€€€€€€€€€€€€€€€€€€€€€}}~€€ƒƒ‚‚‚‚€‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€~~}~~~~}}}}}|||{{{zzyxyyyyxyxyxyxyxyyxxxxwwwwwwwwwwwwwvwwxxyz||||}~€€€€‚‚‚‚ƒƒƒƒ„ƒƒƒƒƒƒ„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„ƒ‚€€~|{|~~~~€€€€€€€€€€€€€€€€€€€€{{{{||||{|~€~}€€~}||||{{}€ƒ…‡†„ƒ‚‚‚‚‚ƒƒƒƒ„„„„„„„„„„„„„„„„„„„ƒ„ƒƒƒ…„‚‚ƒƒƒƒ‚„††‚}}~~~}}~~€‚‚‚€‚‚‚‚‚€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚€€€€€€€‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚ƒ‚ƒ‚ƒ‚ƒƒ„ƒƒ‚‚‚€€€€€€€€~}}}}}}~~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚€€€€€€€~~~~~~€~~~~~€€€€€€€€€€€€€€€€€€€~~~€€€€€€~~~~~~~~€~~~}€€€€€‚€}|€~€€€€€€€€€€€€€€€€€€€€~}}~ƒƒ€€€€€€€€€‚€‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€~~~}~~}}||||{{zzzzzyxyyyyxxyxyxyxyyyyxxwwwwwwwwwwwwwwwwxxxyz{|{|~€€€€€€€€‚‚‚‚‚ƒƒƒ„„ƒƒƒƒ„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„ƒ‚€€€€~}|}}~}~€~€€€€€€€€€€~~z{{}}}}}|}~~~}|}€€~}|}||{{}€ƒ…‡†…ƒ‚‚‚‚‚‚‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„„„ƒ„ƒ„ƒƒ…„‚‚‚ƒƒƒƒ‚„‡†‚~~~}~~~€‚‚‚€‚‚‚‚‚€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚ƒ‚ƒ‚‚€€€‚‚‚‚‚‚‚‚‚‚ƒ‚ƒ‚ƒ‚‚‚ƒƒƒƒ‚‚€€€€~€€€€€€~}~}}}~~~~~~~~~€€€€€€€€€€€€€€‚€€€‚€€€€€€€€‚‚‚€€~~~~~~~~~~~~~~€€€€€€€€€€€€€€€€€€€€~~~€€~~€‚‚~~~€€€||€~~~~~~€€€€€€€€€€€€€€€€~}~ƒƒ€€€€€€€€€‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€~~~~~}}||{{{zzyyyzyxyyyyxyxyxyxyxyyyxxwwwwwwwwwwwwwwvwwxxyz||{|~€€€€€€€‚‚‚ƒƒƒ„ƒƒƒƒƒƒ„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„ƒƒƒƒƒƒƒƒƒ„‚€€}|}~}~~€€€€€€€€€~~||}~~~~~~}}€€~}|}||{{}€ƒ…‡†…„ƒ‚‚‚‚‚‚‚ƒ‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ„ƒ„„„ƒƒ‚ƒƒƒ‚ƒ‚„††‚~~~~~~}}~~€‚‚‚€€‚‚‚‚‚€‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~€‚‚ƒƒƒ‚€€€€€‚‚‚‚‚ƒ‚‚‚‚ƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒ‚€€€~€€€}~}~}}~~~~~~~€€€€€€€€€€€€€€€€€€€€€‚‚€‚‚‚~~~}}}}~~~~~€~~~~~€€€€€€~~~~€€€€€~~~€€€€€€€€€€€~~~~€€‚‚‚€}|€~~~~€€€€€€€€€€€€€€€€~~€€€€€~~~€‚ƒ€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€~~~~~~}}}}{{{{zzyyzyxyyyyxxyxyxyxyyyxxxxwwwwwwwwwwwwwwwwxxy{||z{~€€€€€‚‚ƒƒƒƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒ„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„ƒƒƒƒƒƒƒƒƒƒƒƒ„ƒ€}|}~~~€‚‚€€€€€€€~~€€€€€€€€€€~}||||{{}€ƒ…ˆ‡…„ƒ‚‚‚‚‚‚ƒ‚ƒƒƒ‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒ„ƒ„„„„ƒ‚ƒƒ„ƒ‚ƒ‚„‡†‚€~}}}~~}}}}~€€‚‚‚‚‚€‚‚‚‚‚€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~€‚‚‚‚€‚‚‚‚ƒ‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒ‚€€€€~~€€~~~~}}~~~~~~~~~~}€€€€€€€€€€€€€€€€€€€‚€‚‚‚‚€~~~~~~~~~~~~~~~}~~~€€~~}|{{||}~€€€€~~~€€€€€€€~~~€~€‚‚‚€||€}}~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~€€€€‚ƒ€‚‚‚‚‚€€‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€~~~~~~~}}}}|||{{zzzzyxyyyyxyxyxyxyxxxxxxxxxwwwwwwwwwwwwwwxxz|}|z{~‚€€€€€‚‚ƒƒƒƒ‚‚‚‚‚‚ƒƒƒƒƒƒ„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚„ƒ‚€}|}‚€€€€€€€€‚‚€€€~~~€€€€‚‚}||||{{}€ƒ…ˆ‡†„ƒƒƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„ƒƒƒ„ƒƒ‚ƒ‚„‡†‚ƒ‚€~~~}}|}~€€‚‚€‚‚€‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€}~~~~~~~}~€€€‚‚‚€€€‚‚ƒ‚‚‚ƒƒ‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚‚‚€€€€~€€€~}}}~~~~~~~}}€€€€€€€€€‚‚€€€€‚‚€€€€€€€‚€‚ƒ‚‚‚€€€€~~~~~€€~~~~~|}}~~€€~~~~|zyzz{|~€€€€~~~€€€€€€~~~~~~~~~~~~~€€€€~~€‚‚‚€||€{|}~€~~~‚‚€€€~~€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚€€€‚€€‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€}}}}~~~~}}}||{{{zyxyyyyxxyxyxyxyxxxxxxxxwwwwwwwwwwwwwwxx{|}|zz~‚€€€€‚‚ƒƒƒ‚‚‚‚‚‚ƒƒƒƒƒ„„„ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚…ƒ~|~€€€€~~~}€€€€‚‚‚€‚}}}€€€€€€‚‚‚ƒƒ‚}||||{{}€ƒ…ˆ‡†„ƒƒƒƒ‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒ„„„„„„„„ƒƒƒƒ„„„„„ƒƒƒ„„‚ƒ‚„‡†‚………„ƒ~~}|||~€€‚‚€‚‚‚€‚‚‚‚‚€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~}}~€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€~~~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~}}~~||||||||~~~}}}}xxwxz|€~~|}}~€€€€€€€€€€€€€€€€€€€€€€€€€€~~~~~~~~€€€€€€€€‚€~~}}}}}}}}~~~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€~~}}}}}}}}}}}}}}||{{{yyyyyyyyyyyyyyyyxxxxxxxxvvvvvvvvxxxxxxxxz{{|}~~~€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒƒ‚‚€~~~~~~~~~~~~~~~~€€€€€€€€‚‚‚‚‚‚‚‚ƒƒƒ‚‚‚€‚‚ƒ†††…‚|z||{|}‚„‡‡†…„ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚„„„„„„„„‚‚‚‚‚‚‚‚‚„…†„‚ƒ„…††…„ƒƒ}}ƒ‚‚‚‚‚‚‚‚€€€€€€€€‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~€€€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€~~~~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~}}||}}~}}}}}}}}~~}}}|||{{z{|~€‚€€~~}}~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€~~||||||||€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚€€€€€€€€‚‚‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€~~~}}}}}}}}|||{{{zzyyyyyyyyyyyyyyyywxwxwxwxxxxxxxxxwwwwwwww{||}~~€€€€€€€€‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒ‚€€€€€€€€€ƒ‚‚‚€€‚‚‚………„‚|z}}|}}€‚ƒ…„„ƒ‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚‚ƒƒƒƒƒƒƒƒ‚‚‚‚‚‚‚‚‚ƒ…„ƒ‚„…†‡‡†…„‚~~‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€€~~~~~~~~€€€€€~~}€‚ƒ~~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€}}}~€€€€€€€€€€€€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚€‚‚€~~‚€€€€€€€‚‚‚‚‚‚‚‚€‚ƒƒ‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€~‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚‚‚‚‚€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚‚‚‚€€€€€€€€~~€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€‚€€€€€€‚ƒ€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pygame/examples/dropevent.py b/venv/lib/python3.7/site-packages/pygame/examples/dropevent.py deleted file mode 100644 index 060693c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/dropevent.py +++ /dev/null @@ -1,62 +0,0 @@ -import pygame as pg - -if pg.get_sdl_version() < (2,0,0): - raise Exception("This example requires SDL2.") - -pg.init() - -def main(): - - Running = True - surf = pg.display.set_mode((640,480)) - font = pg.font.SysFont("Arial", 24) - clock = pg.time.Clock() - - spr_file_text = font.render("Feed me some file or image!", 1, (255,255,255)) - spr_file_text_rect = spr_file_text.get_rect() - spr_file_text_rect.center = surf.get_rect().center - - spr_file_image = None - spr_file_image_rect = None - - while Running: - for ev in pg.event.get(): - if ev.type == pg.QUIT: - Running = False - elif ev.type == pg.DROPBEGIN: - print(ev) - print("File drop begin!") - elif ev.type == pg.DROPCOMPLETE: - print(ev) - print("File drop complete!") - elif ev.type == pg.DROPTEXT: - print(ev) - spr_file_text = font.render(ev.text, 1, (255,255,255)) - spr_file_text_rect = spr_file_text.get_rect() - spr_file_text_rect.center = surf.get_rect().center - elif ev.type == pg.DROPFILE: - print(ev) - spr_file_text = font.render(ev.file, 1, (255,255,255)) - spr_file_text_rect = spr_file_text.get_rect() - spr_file_text_rect.center = surf.get_rect().center - - #Try to open the file if it's an image - filetype = ev.file[-3:] - if (filetype in ["png", "bmp", "jpg"]): - spr_file_image = pg.image.load(ev.file).convert() - spr_file_image.set_alpha(127) - spr_file_image_rect = spr_file_image.get_rect() - spr_file_image_rect.center = surf.get_rect().center - - surf.fill((0,0,0)) - surf.blit(spr_file_text, spr_file_text_rect) - if (spr_file_image): - surf.blit(spr_file_image, spr_file_image_rect) - - pg.display.flip() - clock.tick(30) - - pg.quit() - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/eventlist.py b/venv/lib/python3.7/site-packages/pygame/examples/eventlist.py deleted file mode 100644 index 7c31836..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/eventlist.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python - -"""Eventlist is a sloppy style of pygame, but is a handy -tool for learning about pygame events and input. At the -top of the screen are the state of several device values, -and a scrolling list of events are displayed on the bottom. - -This is not quality 'ui' code at all, but you can see how -to implement very non-interactive status displays, or even -a crude text output control. -""" - -from pygame import * - -ImgOnOff = [] -Font = None -LastKey = None - -def showtext(win, pos, text, color, bgcolor): - textimg = Font.render(text, 1, color, bgcolor) - win.blit(textimg, pos) - return pos[0] + textimg.get_width() + 5, pos[1] - - -def drawstatus(win): - bgcolor = 50, 50, 50 - win.fill(bgcolor, (0, 0, 640, 120)) - win.blit(Font.render('Status Area', 1, (155, 155, 155), bgcolor), (2, 2)) - - pos = showtext(win, (10, 30), 'Mouse Focus', (255, 255, 255), bgcolor) - win.blit(ImgOnOff[mouse.get_focused()], pos) - - pos = showtext(win, (330, 30), 'Keyboard Focus', (255, 255, 255), bgcolor) - win.blit(ImgOnOff[key.get_focused()], pos) - - pos = showtext(win, (10, 60), 'Mouse Position', (255, 255, 255), bgcolor) - p = '%s, %s' % mouse.get_pos() - pos = showtext(win, pos, p, bgcolor, (255, 255, 55)) - - pos = showtext(win, (330, 60), 'Last Keypress', (255, 255, 255), bgcolor) - if LastKey: - p = '%d, %s' % (LastKey, key.name(LastKey)) - else: - p = 'None' - pos = showtext(win, pos, p, bgcolor, (255, 255, 55)) - - pos = showtext(win, (10, 90), 'Input Grabbed', (255, 255, 255), bgcolor) - win.blit(ImgOnOff[event.get_grab()], pos) - - -def drawhistory(win, history): - win.blit(Font.render('Event History Area', 1, (155, 155, 155), (0,0,0)), (2, 132)) - ypos = 450 - h = list(history) - h.reverse() - for line in h: - r = win.blit(line, (10, ypos)) - win.fill(0, (r.right, r.top, 620, r.height)) - ypos -= Font.get_height() - - -def main(): - init() - - win = display.set_mode((640, 480), RESIZABLE) - display.set_caption("Mouse Focus Workout") - - global Font - Font = font.Font(None, 26) - - global ImgOnOff - ImgOnOff.append(Font.render("Off", 1, (0, 0, 0), (255, 50, 50))) - ImgOnOff.append(Font.render("On", 1, (0, 0, 0), (50, 255, 50))) - - history = [] - - #let's turn on the joysticks just so we can play with em - for x in range(joystick.get_count()): - j = joystick.Joystick(x) - j.init() - txt = 'Enabled joystick: ' + j.get_name() - img = Font.render(txt, 1, (50, 200, 50), (0, 0, 0)) - history.append(img) - if not joystick.get_count(): - img = Font.render('No Joysticks to Initialize', 1, (50, 200, 50), (0, 0, 0)) - history.append(img) - - going = True - while going: - for e in event.get(): - if e.type == QUIT: - going = False - if e.type == KEYDOWN: - if e.key == K_ESCAPE: - going = False - else: - global LastKey - LastKey = e.key - if e.type == MOUSEBUTTONDOWN: - event.set_grab(1) - elif e.type == MOUSEBUTTONUP: - event.set_grab(0) - if e.type == VIDEORESIZE: - win = display.set_mode(e.size, RESIZABLE) - - if e.type != MOUSEMOTION: - txt = '%s: %s' % (event.event_name(e.type), e.dict) - img = Font.render(txt, 1, (50, 200, 50), (0, 0, 0)) - history.append(img) - history = history[-13:] - - - drawstatus(win) - drawhistory(win, history) - - display.flip() - time.wait(10) - - quit() - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/fastevents.py b/venv/lib/python3.7/site-packages/pygame/examples/fastevents.py deleted file mode 100644 index 07ff793..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/fastevents.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python -""" This is a stress test for the fastevents module. - -*Fast events does not appear faster!* - -So far it looks like normal pygame.event is faster by up to two times. -So maybe fastevent isn't fast at all. - -Tested on windowsXP sp2 athlon, and freebsd. - -However... on my debian duron 850 machine fastevents is faster. -""" - -import pygame -from pygame import * - -# the config to try different settings out with the event queues. - -# use the fastevent module or not. -use_fast_events = 1 - -# use pygame.display.flip(). -# otherwise we test raw event processing throughput. -with_display = 1 - -# limit the game loop to 40 fps. -slow_tick = 0 - -NUM_EVENTS_TO_POST = 200000 - - - -if use_fast_events: - event_module = fastevent -else: - event_module = event - - - - -from threading import Thread - -class post_them(Thread): - def __init__(self): - Thread.__init__(self) - self.done = [] - self.stop = [] - - def run(self): - self.done = [] - self.stop = [] - for x in range(NUM_EVENTS_TO_POST): - ee = event.Event(USEREVENT) - try_post = 1 - - # the pygame.event.post raises an exception if the event - # queue is full. so wait a little bit, and try again. - while try_post: - try: - event_module.post(ee) - try_post = 0 - except: - pytime.sleep(0.001) - try_post = 1 - - if self.stop: - return - self.done.append(1) - - - -import time as pytime - -def main(): - init() - - if use_fast_events: - fastevent.init() - - c = time.Clock() - - win = display.set_mode((640, 480), RESIZABLE) - display.set_caption("fastevent Workout") - - poster = post_them() - - t1 = pytime.time() - poster.start() - - going = True - while going: -# for e in event.get(): - #for x in range(200): - # ee = event.Event(USEREVENT) - # r = event_module.post(ee) - # print (r) - - #for e in event_module.get(): - event_list = [] - event_list = event_module.get() - - for e in event_list: - if e.type == QUIT: - print (c.get_fps()) - poster.stop.append(1) - going = False - if e.type == KEYDOWN: - if e.key == K_ESCAPE: - print (c.get_fps()) - poster.stop.append(1) - going = False - if poster.done: - print (c.get_fps()) - print (c) - t2 = pytime.time() - print ("total time:%s" % (t2 - t1)) - print ("events/second:%s" % (NUM_EVENTS_TO_POST / (t2 - t1))) - going = False - if with_display: - display.flip() - if slow_tick: - c.tick(40) - - - pygame.quit() - - - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/fonty.py b/venv/lib/python3.7/site-packages/pygame/examples/fonty.py deleted file mode 100644 index bdd77f1..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/fonty.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python - -"""Here we load a .TTF font file, and display it in -a basic pygame window. It demonstrates several of the -Font object attributes. Nothing exciting in here, but -it makes a great example for basic window, event, and -font management.""" - - -import pygame -from pygame.locals import * -from pygame.compat import unichr_, unicode_ -import sys -import locale - - -if sys.version_info >= (3,): - def print_unicode(s): - e = locale.getpreferredencoding() - print (s.encode(e, 'backslashreplace').decode()) -else: - def print_unicode(s): - e = locale.getpreferredencoding() - print (s.encode(e, 'backslashreplace')) - -def main(): - #initialize - pygame.init() - resolution = 400, 200 - screen = pygame.display.set_mode(resolution) - -## pygame.mouse.set_cursor(*pygame.cursors.diamond) - - fg = 250, 240, 230 - bg = 5, 5, 5 - wincolor = 40, 40, 90 - - #fill background - screen.fill(wincolor) - - #load font, prepare values - font = pygame.font.Font(None, 80) - text = 'Fonty' - size = font.size(text) - - #no AA, no transparancy, normal - ren = font.render(text, 0, fg, bg) - screen.blit(ren, (10, 10)) - - #no AA, transparancy, underline - font.set_underline(1) - ren = font.render(text, 0, fg) - screen.blit(ren, (10, 40 + size[1])) - font.set_underline(0) - - - a_sys_font = pygame.font.SysFont("Arial", 60) - - - #AA, no transparancy, bold - a_sys_font.set_bold(1) - ren = a_sys_font.render(text, 1, fg, bg) - screen.blit(ren, (30 + size[0], 10)) - a_sys_font.set_bold(0) - - #AA, transparancy, italic - a_sys_font.set_italic(1) - ren = a_sys_font.render(text, 1, fg) - screen.blit(ren, (30 + size[0], 40 + size[1])) - a_sys_font.set_italic(0) - - - # Get some metrics. - print ("Font metrics for 'Fonty': %s" % a_sys_font.metrics (text)) - ch = unicode_("%c") % 0x3060 - msg = (unicode_("Font metrics for '%s': %s") % - (ch, a_sys_font.metrics (ch))) - print_unicode(msg) - - ## #some_japanese_unicode = u"\u304b\u3070\u306b" - ##some_japanese_unicode = unicode_('%c%c%c') % (0x304b, 0x3070, 0x306b) - - #AA, transparancy, italic - ##ren = a_sys_font.render(some_japanese_unicode, 1, fg) - ##screen.blit(ren, (30 + size[0], 40 + size[1])) - - - - - - #show the surface and await user quit - pygame.display.flip() - while 1: - #use event.wait to keep from polling 100% cpu - if pygame.event.wait().type in (QUIT, KEYDOWN, MOUSEBUTTONDOWN): - break - - - -if __name__ == '__main__': main() - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/freetype_misc.py b/venv/lib/python3.7/site-packages/pygame/examples/freetype_misc.py deleted file mode 100644 index 21c5f70..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/freetype_misc.py +++ /dev/null @@ -1,103 +0,0 @@ -import sys, os -import pygame -from pygame.locals import * - -try: - import pygame.freetype as freetype -except ImportError: - print ("No FreeType support compiled") - sys.exit () - -colors = { - "grey_light" : pygame.Color(200, 200, 200), - "grey_dark" : pygame.Color(100, 100, 100), - "green" : pygame.Color(50, 255, 63), - "red" : pygame.Color(220, 30, 30), - "blue" : pygame.Color(50, 75, 245) -} - -def run(): - pygame.init() - - fontdir = os.path.dirname(os.path.abspath (__file__)) - font = freetype.Font(os.path.join (fontdir, "data", "sans.ttf")) - - screen = pygame.display.set_mode((800, 600)) - screen.fill (colors["grey_light"]) - - font.underline_adjustment = 0.5 - font.pad = True - font.render_to(screen, (32, 32), "Hello World", colors["red"], - colors['grey_dark'], size=64, - style=freetype.STYLE_UNDERLINE|freetype.STYLE_OBLIQUE) - font.pad = False - - font.render_to(screen, (32, 128), "abcdefghijklm", colors["grey_dark"], - colors["green"], size=64) - - font.vertical = True - font.render_to(screen, (32, 200), "Vertical?", colors["blue"], - None, size=32) - font.vertical = False - - font.render_to(screen, (64, 190), "Let's spin!", colors["red"], - None, size=48, rotation=55) - - font.render_to(screen, (160, 290), "All around!", colors["green"], - None, size=48, rotation=-55) - - font.render_to(screen, (250, 220), "and BLEND", - pygame.Color(255, 0, 0, 128), None, size=64) - - font.render_to(screen, (265, 237), "or BLAND!", - pygame.Color(0, 0xCC, 28, 128), None, size=64) - - # Some pinwheels - font.origin = True - for angle in range(0, 360, 45): - font.render_to(screen, (150, 420), ")", pygame.Color('black'), - size=48, rotation=angle) - font.vertical = True - for angle in range(15, 375, 30): - font.render_to(screen, (600, 400), "|^*", pygame.Color('orange'), - size=48, rotation=angle) - font.vertical = False - font.origin = False - - utext = pygame.compat.as_unicode(r"I \u2665 Unicode") - font.render_to(screen, (298, 320), utext, pygame.Color(0, 0xCC, 0xDD), - None, size=64) - - utext = pygame.compat.as_unicode(r"\u2665") - font.render_to(screen, (480, 32), utext, colors["grey_light"], - colors["red"], size=148) - - font.render_to(screen, (380, 380), "...yes, this is an SDL surface", - pygame.Color(0, 0, 0), - None, size=24, style=freetype.STYLE_STRONG) - - font.origin = True - r = font.render_to(screen, (100, 530), "stretch", - pygame.Color('red'), - None, size=(24, 24), style=freetype.STYLE_NORMAL) - font.render_to(screen, (100 + r.width, 530), " VERTICAL", - pygame.Color('red'), - None, size=(24, 48), style=freetype.STYLE_NORMAL) - - r = font.render_to(screen, (100, 580), "stretch", - pygame.Color('blue'), - None, size=(24, 24), style=freetype.STYLE_NORMAL) - font.render_to(screen, (100 + r.width, 580), " HORIZONTAL", - pygame.Color('blue'), - None, size=(48, 24), style=freetype.STYLE_NORMAL) - - pygame.display.flip() - - while 1: - if pygame.event.wait().type in (QUIT, KEYDOWN, MOUSEBUTTONDOWN): - break - - pygame.quit() - -if __name__ == "__main__": - run () diff --git a/venv/lib/python3.7/site-packages/pygame/examples/glcube.py b/venv/lib/python3.7/site-packages/pygame/examples/glcube.py deleted file mode 100644 index be2d7f9..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/glcube.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python - -"""Draw a cube on the screen. every frame we orbit -the camera around by a small amount and it appears -the object is spinning. note i've setup some simple -data structures here to represent a multicolored cube, -we then go through a semi-unoptimized loop to draw -the cube points onto the screen. opengl does all the -hard work for us. :] -""" - -import pygame -from pygame.locals import * - -try: - from OpenGL.GL import * - from OpenGL.GLU import * -except ImportError: - print ('The GLCUBE example requires PyOpenGL') - raise SystemExit - - - -#some simple data for a colored cube -#here we have the 3D point position and color -#for each corner. then we have a list of indices -#that describe each face, and a list of indieces -#that describes each edge - - -CUBE_POINTS = ( - (0.5, -0.5, -0.5), (0.5, 0.5, -0.5), - (-0.5, 0.5, -0.5), (-0.5, -0.5, -0.5), - (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), - (-0.5, -0.5, 0.5), (-0.5, 0.5, 0.5) -) - -#colors are 0-1 floating values -CUBE_COLORS = ( - (1, 0, 0), (1, 1, 0), (0, 1, 0), (0, 0, 0), - (1, 0, 1), (1, 1, 1), (0, 0, 1), (0, 1, 1) -) - -CUBE_QUAD_VERTS = ( - (0, 1, 2, 3), (3, 2, 7, 6), (6, 7, 5, 4), - (4, 5, 1, 0), (1, 5, 7, 2), (4, 0, 3, 6) -) - -CUBE_EDGES = ( - (0,1), (0,3), (0,4), (2,1), (2,3), (2,7), - (6,3), (6,4), (6,7), (5,1), (5,4), (5,7), -) - - - -def drawcube(): - "draw the cube" - allpoints = list(zip(CUBE_POINTS, CUBE_COLORS)) - - glBegin(GL_QUADS) - for face in CUBE_QUAD_VERTS: - for vert in face: - pos, color = allpoints[vert] - glColor3fv(color) - glVertex3fv(pos) - glEnd() - - glColor3f(1.0, 1.0, 1.0) - glBegin(GL_LINES) - for line in CUBE_EDGES: - for vert in line: - pos, color = allpoints[vert] - glVertex3fv(pos) - - glEnd() - -def init_gl_stuff(): - - glEnable(GL_DEPTH_TEST) #use our zbuffer - - #setup the camera - glMatrixMode(GL_PROJECTION) - glLoadIdentity() - gluPerspective(45.0,640/480.0,0.1,100.0) #setup lens - glTranslatef(0.0, 0.0, -3.0) #move back - glRotatef(25, 1, 0, 0) #orbit higher - -def main(): - "run the demo" - #initialize pygame and setup an opengl display - pygame.init() - - fullscreen = True - pygame.display.set_mode((640,480), OPENGL|DOUBLEBUF|FULLSCREEN) - - init_gl_stuff() - - going = True - while going: - #check for quit'n events - events = pygame.event.get() - for event in events: - if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): - going = False - - elif event.type == KEYDOWN: - if event.key == pygame.K_f: - if not fullscreen: - print("Changing to FULLSCREEN") - pygame.display.set_mode((640, 480), OPENGL | DOUBLEBUF | FULLSCREEN) - else: - print("Changing to windowed mode") - pygame.display.set_mode((640, 480), OPENGL | DOUBLEBUF) - fullscreen = not fullscreen - init_gl_stuff() - - - #clear screen and move camera - glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) - - #orbit camera around by 1 degree - glRotatef(1, 0, 1, 0) - - drawcube() - pygame.display.flip() - pygame.time.wait(10) - - -if __name__ == '__main__': main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/headless_no_windows_needed.py b/venv/lib/python3.7/site-packages/pygame/examples/headless_no_windows_needed.py deleted file mode 100644 index ac39662..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/headless_no_windows_needed.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python -"""How to use pygame with no windowing system, like on headless servers. - -Thumbnail generation with scaling is an example of what you can do with pygame. -NOTE: the pygame scale function uses mmx/sse if available, and can be run - in multiple threads. - -""" -useage = """-scale inputimage outputimage new_width new_height -eg. -scale in.png out.png 50 50 - -""" - -import os, sys - -# set SDL to use the dummy NULL video driver, -# so it doesn't need a windowing system. -os.environ["SDL_VIDEODRIVER"] = "dummy" - - -import pygame.transform - - -if 1: - #some platforms need to init the display for some parts of pygame. - import pygame.display - pygame.display.init() - screen = pygame.display.set_mode((1,1)) - - - -def scaleit(fin, fout, w, h): - i = pygame.image.load(fin) - - if hasattr(pygame.transform, "smoothscale"): - scaled_image = pygame.transform.smoothscale(i, (w,h)) - else: - scaled_image = pygame.transform.scale(i, (w,h)) - pygame.image.save(scaled_image, fout) - -def main(fin, fout, w, h): - """smoothscale image file named fin as fout with new size (w,h)""" - scaleit(fin, fout, w, h) - -if __name__ == "__main__": - if "-scale" in sys.argv: - fin, fout, w, h = sys.argv[2:] - w, h = map(int, [w,h]) - main(fin, fout, w,h) - else: - print (useage) - - - - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/liquid.py b/venv/lib/python3.7/site-packages/pygame/examples/liquid.py deleted file mode 100644 index 6bfad0d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/liquid.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -"""This examples demonstrates a simplish water effect of an -image. It attempts to create a hardware display surface that -can use pageflipping for faster updates. Note that the colormap -from the loaded GIF image is copied to the colormap for the -display surface. - -This is based on the demo named F2KWarp by Brad Graham of Freedom2000 -done in BlitzBasic. I was just translating the BlitzBasic code to -pygame to compare the results. I didn't bother porting the text and -sound stuff, that's an easy enough challenge for the reader :]""" - -import pygame, os -from pygame.locals import * -from math import sin -import time - -main_dir = os.path.split(os.path.abspath(__file__))[0] - -def main(): - #initialize and setup screen - pygame.init() - screen = pygame.display.set_mode((640, 480), HWSURFACE|DOUBLEBUF) - - #load image and quadruple - imagename = os.path.join(main_dir, 'data', 'liquid.bmp') - bitmap = pygame.image.load(imagename) - bitmap = pygame.transform.scale2x(bitmap) - bitmap = pygame.transform.scale2x(bitmap) - - #get the image and screen in the same format - if screen.get_bitsize() == 8: - screen.set_palette(bitmap.get_palette()) - else: - bitmap = bitmap.convert() - - #prep some variables - anim = 0.0 - - #mainloop - xblocks = range(0, 640, 20) - yblocks = range(0, 480, 20) - stopevents = QUIT, KEYDOWN, MOUSEBUTTONDOWN - while 1: - for e in pygame.event.get(): - if e.type in stopevents: - return - - anim = anim + 0.02 - for x in xblocks: - xpos = (x + (sin(anim + x * .01) * 15)) + 20 - for y in yblocks: - ypos = (y + (sin(anim + y * .01) * 15)) + 20 - screen.blit(bitmap, (x, y), (xpos, ypos, 20, 20)) - - pygame.display.flip() - time.sleep(0.01) - - -if __name__ == '__main__': main() - - - -"""BTW, here is the code from the BlitzBasic example this was derived -from. i've snipped the sound and text stuff out. ------------------------------------------------------------------ -; Brad@freedom2000.com - -; Load a bmp pic (800x600) and slice it into 1600 squares -Graphics 640,480 -SetBuffer BackBuffer() -bitmap$="f2kwarp.bmp" -pic=LoadAnimImage(bitmap$,20,15,0,1600) - -; use SIN to move all 1600 squares around to give liquid effect -Repeat -f=0:w=w+10:If w=360 Then w=0 -For y=0 To 599 Step 15 -For x = 0 To 799 Step 20 -f=f+1:If f=1600 Then f=0 -DrawBlock pic,(x+(Sin(w+x)*40))/1.7+80,(y+(Sin(w+y)*40))/1.7+60,f -Next:Next:Flip:Cls -Until KeyDown(1) -""" diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/JavaCompiling.plist b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/JavaCompiling.plist deleted file mode 100644 index 6e7346a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/JavaCompiling.plist +++ /dev/null @@ -1,8 +0,0 @@ - - - - - JavaSourceSubpath - _MainMenu_EOArchive_English.java - - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/_MainMenu_EOArchive_English.java b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/_MainMenu_EOArchive_English.java deleted file mode 100644 index 558bc78..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/_MainMenu_EOArchive_English.java +++ /dev/null @@ -1,68 +0,0 @@ -// _MainMenu_EOArchive_English.java -// Generated by EnterpriseObjects palette at Tuesday, March 2, 2004 8:45:51 PM America/New_York - -import com.webobjects.eoapplication.*; -import com.webobjects.eocontrol.*; -import com.webobjects.eointerface.*; -import com.webobjects.eointerface.swing.*; -import com.webobjects.foundation.*; -import javax.swing.*; - -public class _MainMenu_EOArchive_English extends com.webobjects.eoapplication.EOArchive { - PygameAppDelegate _pygameAppDelegate0; - - public _MainMenu_EOArchive_English(Object owner, NSDisposableRegistry registry) { - super(owner, registry); - } - - protected void _construct() { - Object owner = _owner(); - EOArchive._ObjectInstantiationDelegate delegate = (owner instanceof EOArchive._ObjectInstantiationDelegate) ? (EOArchive._ObjectInstantiationDelegate)owner : null; - Object replacement; - - super._construct(); - - - if ((delegate != null) && ((replacement = delegate.objectForOutletPath(this, "delegate")) != null)) { - _pygameAppDelegate0 = (replacement == EOArchive._ObjectInstantiationDelegate.NullObject) ? null : (PygameAppDelegate)replacement; - _replacedObjects.setObjectForKey(replacement, "_pygameAppDelegate0"); - } else { - _pygameAppDelegate0 = (PygameAppDelegate)_registered(new PygameAppDelegate(), "PygameAppDelegate"); - } - } - - protected void _awaken() { - super._awaken(); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(_owner(), "unhideAllApplications", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(_owner(), "hide", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(_owner(), "hideOtherApplications", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(_owner(), "orderFrontStandardAboutPanel", ), "")); - - if (_replacedObjects.objectForKey("_pygameAppDelegate0") == null) { - _connect(_owner(), _pygameAppDelegate0, "delegate"); - } - - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(_owner(), "terminate", ), "")); - } - - protected void _init() { - super._init(); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "undo", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "paste", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "arrangeInFront", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "copy", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "showHelp", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "clearRecentDocuments", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "selectAll", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "performMiniaturize", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "toggleContinuousSpellChecking", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "print", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "cut", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "runPageLayout", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "checkSpelling", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "delete", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "redo", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "performClose", ), "")); - .addActionListener((com.webobjects.eointerface.swing.EOControlActionAdapter)_registered(new com.webobjects.eointerface.swing.EOControlActionAdapter(null, "showGuessPanel", ), "")); - } -} diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/classes.nib b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/classes.nib deleted file mode 100644 index 1c6603e..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/classes.nib +++ /dev/null @@ -1,13 +0,0 @@ -{ - IBClasses = ( - {CLASS = FirstResponder; LANGUAGE = ObjC; SUPERCLASS = NSObject; }, - { - ACTIONS = {}; - CLASS = PygameAppDelegate; - LANGUAGE = ObjC; - OUTLETS = {}; - SUPERCLASS = NSObject; - } - ); - IBVersion = 1; -} \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/info.nib b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/info.nib deleted file mode 100644 index 7d93905..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/info.nib +++ /dev/null @@ -1,21 +0,0 @@ - - - - - IBDocumentLocation - 269 494 356 240 0 0 1600 1002 - IBEditorPositions - - 29 - 125 344 278 44 0 0 1600 1002 - - IBFramework Version - 349.0 - IBOpenObjects - - 29 - - IBSystem Version - 7D24 - - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/keyedobjects.nib b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/keyedobjects.nib deleted file mode 100644 index 8ef64c0..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/MainMenu.nib/keyedobjects.nib and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/aliens.icns b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/aliens.icns deleted file mode 100644 index 6dbe102..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/English.lproj/aliens.icns and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/README.txt b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/README.txt deleted file mode 100644 index 29573e6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/README.txt +++ /dev/null @@ -1,17 +0,0 @@ -********************************************************************* - THESE INSTRUCTIONS ARE ONLY FOR MAC OS X 10.3, AND WILL ONLY CREATE - STANDALONE BUNDLES FOR MAC OS X 10.3. THERE IS NO SUPPORT FOR - MAC OS X 10.2. - -Also works on 10.4 and 10.5 -********************************************************************* - - -Install py2app and its dependencies. - -easy_install py2app - - - -To create the bundle: - python setup.py py2app diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/aliens.py b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/aliens.py deleted file mode 100644 index 5a23e41..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/aliens.py +++ /dev/null @@ -1,325 +0,0 @@ -#! /usr/bin/env python - -import random, os.path - -#import basic pygame modules -import pygame -from pygame.locals import * - -#see if we can load more than standard BMP -if not pygame.image.get_extended(): - raise SystemExit("Sorry, extended image module required") - - -#game constants -MAX_SHOTS = 2 #most player bullets onscreen -ALIEN_ODDS = 22 #chances a new alien appears -BOMB_ODDS = 60 #chances a new bomb will drop -ALIEN_RELOAD = 12 #frames between new aliens -SCREENRECT = Rect(0, 0, 640, 480) -SCORE = 0 - - -def load_image(file): - "loads an image, prepares it for play" - file = os.path.join('data', file) - try: - surface = pygame.image.load(file) - except pygame.error: - raise SystemExit('Could not load image "%s" %s'%(file, pygame.get_error())) - return surface.convert() - -def load_images(*files): - imgs = [] - for file in files: - imgs.append(load_image(file)) - return imgs - - -class dummysound: - def play(self): pass - -def load_sound(file): - if not pygame.mixer: return dummysound() - file = os.path.join('data', file) - try: - sound = pygame.mixer.Sound(file) - return sound - except pygame.error: - print ('Warning, unable to load,', file) - return dummysound() - - - -# each type of game object gets an init and an -# update function. the update function is called -# once per frame, and it is when each object should -# change it's current position and state. the Player -# object actually gets a "move" function instead of -# update, since it is passed extra information about -# the keyboard - - -class Player(pygame.sprite.Sprite): - speed = 10 - bounce = 24 - gun_offset = -11 - images = [] - def __init__(self): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect() - self.reloading = 0 - self.rect.centerx = SCREENRECT.centerx - self.rect.bottom = SCREENRECT.bottom - 1 - self.origtop = self.rect.top - self.facing = -1 - - def move(self, direction): - if direction: self.facing = direction - self.rect.move_ip(direction*self.speed, 0) - self.rect = self.rect.clamp(SCREENRECT) - if direction < 0: - self.image = self.images[0] - elif direction > 0: - self.image = self.images[1] - self.rect.top = self.origtop - (self.rect.left/self.bounce%2) - - def gunpos(self): - pos = self.facing*self.gun_offset + self.rect.centerx - return pos, self.rect.top - - -class Alien(pygame.sprite.Sprite): - speed = 13 - animcycle = 12 - images = [] - def __init__(self): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect() - self.facing = random.choice((-1,1)) * Alien.speed - self.frame = 0 - if self.facing < 0: - self.rect.right = SCREENRECT.right - - def update(self): - self.rect.move_ip(self.facing, 0) - if not SCREENRECT.contains(self.rect): - self.facing = -self.facing; - self.rect.top = self.rect.bottom + 1 - self.rect = self.rect.clamp(SCREENRECT) - self.frame = self.frame + 1 - self.image = self.images[self.frame/self.animcycle%3] - - -class Explosion(pygame.sprite.Sprite): - defaultlife = 12 - animcycle = 3 - images = [] - def __init__(self, actor): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect() - self.life = self.defaultlife - self.rect.center = actor.rect.center - - def update(self): - self.life = self.life - 1 - self.image = self.images[self.life/self.animcycle%2] - if self.life <= 0: self.kill() - - -class Shot(pygame.sprite.Sprite): - speed = -11 - images = [] - def __init__(self, pos): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect() - self.rect.midbottom = pos - - def update(self): - self.rect.move_ip(0, self.speed) - if self.rect.top <= 0: - self.kill() - - -class Bomb(pygame.sprite.Sprite): - speed = 9 - images = [] - def __init__(self, alien): - pygame.sprite.Sprite.__init__(self, self.containers) - self.image = self.images[0] - self.rect = self.image.get_rect() - self.rect.centerx = alien.rect.centerx - self.rect.bottom = alien.rect.bottom + 5 - - def update(self): - self.rect.move_ip(0, self.speed) - if self.rect.bottom >= 470: - Explosion(self) - self.kill() - - -class Score(pygame.sprite.Sprite): - def __init__(self): - pygame.sprite.Sprite.__init__(self) - self.font = pygame.font.Font(None, 20) - self.font.set_italic(1) - self.color = Color('white') - self.lastscore = -1 - self.update() - self.rect = self.image.get_rect().move(10, 450) - - def update(self): - if SCORE != self.lastscore: - self.lastscore = SCORE - msg = "Score: %d" % SCORE - self.image = self.font.render(msg, 0, self.color) - - - -def main(winstyle = 0): - # Initialize pygame - pygame.init() - if pygame.mixer and not pygame.mixer.get_init(): - print ('Warning, no sound') - pygame.mixer = None - - # Set the display mode - winstyle = 0 # |FULLSCREEN - bestdepth = pygame.display.mode_ok(SCREENRECT.size, winstyle, 32) - screen = pygame.display.set_mode(SCREENRECT.size, winstyle, bestdepth) - - #Load images, assign to sprite classes - #(do this before the classes are used, after screen setup) - img = load_image('player1.gif') - Player.images = [img, pygame.transform.flip(img, 1, 0)] - img = load_image('explosion1.gif') - Explosion.images = [img, pygame.transform.flip(img, 1, 1)] - Alien.images = load_images('alien1.gif', 'alien2.gif', 'alien3.gif') - Bomb.images = [load_image('bomb.gif')] - Shot.images = [load_image('shot.gif')] - - #decorate the game window - icon = pygame.transform.scale(Alien.images[0], (32, 32)) - pygame.display.set_icon(icon) - pygame.display.set_caption('Pygame Aliens') - pygame.mouse.set_visible(0) - - #create the background, tile the bgd image - bgdtile = load_image('background.gif') - background = pygame.Surface(SCREENRECT.size) - for x in range(0, SCREENRECT.width, bgdtile.get_width()): - background.blit(bgdtile, (x, 0)) - screen.blit(background, (0,0)) - pygame.display.flip() - - #load the sound effects - boom_sound = load_sound('boom.wav') - shoot_sound = load_sound('car_door.wav') - if pygame.mixer and pygame.mixer.music: - music = os.path.join('data', 'house_lo.wav') - pygame.mixer.music.load(music) - pygame.mixer.music.play(-1) - - # Initialize Game Groups - aliens = pygame.sprite.Group() - shots = pygame.sprite.Group() - bombs = pygame.sprite.Group() - all = pygame.sprite.RenderUpdates() - lastalien = pygame.sprite.GroupSingle() - - #assign default groups to each sprite class - Player.containers = all - Alien.containers = aliens, all, lastalien - Shot.containers = shots, all - Bomb.containers = bombs, all - Explosion.containers = all - Score.containers = all - - #Create Some Starting Values - global score - alienreload = ALIEN_RELOAD - kills = 0 - clock = pygame.time.Clock() - - #initialize our starting sprites - global SCORE - player = Player() - Alien() #note, this 'lives' because it goes into a sprite group - if pygame.font: - all.add(Score()) - - - while player.alive(): - - #get input - for event in pygame.event.get(): - if event.type == QUIT or \ - (event.type == KEYDOWN and event.key == K_ESCAPE): - return - keystate = pygame.key.get_pressed() - - # clear/erase the last drawn sprites - all.clear(screen, background) - - #update all the sprites - all.update() - - #handle player input - direction = keystate[K_RIGHT] - keystate[K_LEFT] - player.move(direction) - firing = keystate[K_SPACE] - if not player.reloading and firing and len(shots) < MAX_SHOTS: - Shot(player.gunpos()) - shoot_sound.play() - player.reloading = firing - - # Create new alien - if alienreload: - alienreload = alienreload - 1 - elif not int(random.random() * ALIEN_ODDS): - Alien() - alienreload = ALIEN_RELOAD - - # Drop bombs - if lastalien and not int(random.random() * BOMB_ODDS): - Bomb(lastalien.sprite) - - # Detect collisions - for alien in pygame.sprite.spritecollide(player, aliens, 1): - boom_sound.play() - Explosion(alien) - Explosion(player) - SCORE = SCORE + 1 - player.kill() - - for alien in pygame.sprite.groupcollide(shots, aliens, 1, 1).keys(): - boom_sound.play() - Explosion(alien) - SCORE = SCORE + 1 - - for bomb in pygame.sprite.spritecollide(player, bombs, 1): - boom_sound.play() - Explosion(player) - Explosion(bomb) - player.kill() - - #draw the scene - dirty = all.draw(screen) - pygame.display.update(dirty) - - #cap the framerate - clock.tick(40) - - if pygame.mixer and pygame.mixer.music: - pygame.mixer.music.fadeout(1000) - pygame.time.wait(1000) - - - -#call the "main" function if running this script -if __name__ == '__main__': main() - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/setup.py b/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/setup.py deleted file mode 100644 index 4c45283..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/aliens_app_example/setup.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Script for building the example. - -Usage: - python setup.py py2app -""" -from distutils.core import setup -import py2app - -NAME = 'aliens' -VERSION = '0.1' - -plist = dict( - CFBundleIconFile=NAME, - CFBundleName=NAME, - CFBundleShortVersionString=VERSION, - CFBundleGetInfoString=' '.join([NAME, VERSION]), - CFBundleExecutable=NAME, - CFBundleIdentifier='org.pygame.examples.aliens', -) - -setup( - data_files=['English.lproj', '../../data'], - app=[ - #dict(script="aliens_bootstrap.py", plist=plist), - dict(script="aliens.py", plist=plist), - ], -) diff --git a/venv/lib/python3.7/site-packages/pygame/examples/macosx/macfont.py b/venv/lib/python3.7/site-packages/pygame/examples/macosx/macfont.py deleted file mode 100644 index cce23ce..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/macosx/macfont.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -EXPERIMENTAL CODE! - -Here we load a .TTF font file, and display it in -a basic pygame window. It demonstrates several of the -Font object attributes. Nothing exciting in here, but -it makes a great example for basic window, event, and -font management. -""" - - -import pygame -import math -from pygame.locals import * -from pygame import Surface -from pygame.surfarray import blit_array, make_surface, pixels3d, pixels2d -import Numeric - -from Foundation import * -from AppKit import * - -def _getColor(color=None): - if color is None: - return NSColor.clearColor() - div255 = (0.00390625).__mul__ - if len(color) == 3: - color = tuple(color) + (255.0,) - return NSColor.colorWithDeviceRed_green_blue_alpha_(*map(div255, color)) - -def _getBitmapImageRep(size, hasAlpha=True): - width, height = map(int, size) - return NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 4, hasAlpha, False, NSDeviceRGBColorSpace, width*4, 32) - -class SysFont(object): - def __init__(self, name, size): - self._font = NSFont.fontWithName_size_(name, size) - self._isBold = False - self._isOblique = False - self._isUnderline = False - self._family = name - self._size = size - self._setupFont() - - def _setupFont(self): - name = self._family - if self._isBold or self._isOblique: - name = '%s-%s%s' % ( - name, - self._isBold and 'Bold' or '', - self._isOblique and 'Oblique' or '') - self._font = NSFont.fontWithName_size_(name, self._size) - print (name, self._font) - if self._font is None: - if self._isBold: - self._font = NSFont.boldSystemFontOfSize(self._size) - else: - self._font = NSFont.systemFontOfSize_(self._size) - - def get_ascent(self): - return self._font.ascender() - - def get_descent(self): - return -self._font.descender() - - def get_bold(self): - return self._isBold - - def get_height(self): - return self._font.defaultLineHeightForFont() - - def get_italic(self): - return self._isOblique - - def get_linesize(self): - pass - - def get_underline(self): - return self._isUnderline - - def set_bold(self, isBold): - if isBold != self._isBold: - self._isBold = isBold - self._setupFont() - - def set_italic(self, isOblique): - if isOblique != self._isOblique: - self._isOblique = isOblique - self._setupFont() - - def set_underline(self, isUnderline): - self._isUnderline = isUnderline - - def size(self, text): - return tuple(map(int,map(math.ceil, NSString.sizeWithAttributes_(text, { - NSFontAttributeName: self._font, - NSUnderlineStyleAttributeName: self._isUnderline and 1.0 or None, - })))) - - def render(self, text, antialias, forecolor, backcolor=(0,0,0,255)): - size = self.size(text) - img = NSImage.alloc().initWithSize_(size) - img.lockFocus() - - NSString.drawAtPoint_withAttributes_(text, (0.0, 0.0), { - NSFontAttributeName: self._font, - NSUnderlineStyleAttributeName: self._isUnderline and 1.0 or None, - NSBackgroundColorAttributeName: backcolor and _getColor(backcolor) or None, - NSForegroundColorAttributeName: _getColor(forecolor), - }) - - rep = NSBitmapImageRep.alloc().initWithFocusedViewRect_(((0.0, 0.0), size)) - img.unlockFocus() - if rep.samplesPerPixel() == 4: - s = Surface(size, SRCALPHA|SWSURFACE, 32, [-1<<24,0xff<<16,0xff<<8,0xff]) - - a = Numeric.reshape(Numeric.fromstring(rep.bitmapData(), typecode=Numeric.Int32), (size[1], size[0])) - blit_array(s, Numeric.swapaxes(a,0,1)) - return s.convert_alpha() - -if __name__=='__main__': - pygame.init() - screen = pygame.display.set_mode((600, 600)) - s = SysFont('Gill Sans', 36) - s.set_italic(1) - s.set_underline(1) - done = False - surf = s.render('OS X Fonts!', True, (255,0,0,255), (0,0,0,0)) - screen.blit(surf, (0,0)) - screen.blit(surf, (2, 2)) - pygame.display.update() - while not done: - - for e in pygame.event.get(): - if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE): - done = True - break diff --git a/venv/lib/python3.7/site-packages/pygame/examples/mask.py b/venv/lib/python3.7/site-packages/pygame/examples/mask.py deleted file mode 100644 index c7067ac..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/mask.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env python -"""A pgyame.mask collition detection example - -exports main() - -This module can also be run as a stand-alone program, excepting -one or more image file names as command line arguments. - -""" - -import sys, random -import pygame, pygame.image, pygame.surface, pygame.time, pygame.display - -def maskFromSurface(surface, threshold = 127): - #return pygame.mask.from_surface(surface, threshold) - - mask = pygame.mask.Mask(surface.get_size()) - key = surface.get_colorkey() - if key: - for y in range(surface.get_height()): - for x in range(surface.get_width()): - if surface.get_at((x,y)) != key: - mask.set_at((x,y),1) - else: - for y in range(surface.get_height()): - for x in range (surface.get_width()): - if surface.get_at((x,y))[3] > threshold: - mask.set_at((x,y),1) - return mask - -def vadd(x,y): - return [x[0]+y[0],x[1]+y[1]] - -def vsub(x,y): - return [x[0]-y[0],x[1]-y[1]] - -def vdot(x,y): - return x[0]*y[0]+x[1]*y[1] - -class Sprite: - def __init__(self, surface, mask = None): - self.surface = surface - if mask: - self.mask = mask - else: - self.mask = maskFromSurface(self.surface) - self.setPos([0,0]) - self.setVelocity([0,0]) - - def setPos(self,pos): - self.pos = [pos[0],pos[1]] - def setVelocity(self,vel): - self.vel = [vel[0],vel[1]] - def move(self,dr): - self.pos = vadd(self.pos,dr) - def kick(self,impulse): - self.vel[0] += impulse[0] - self.vel[1] += impulse[1] - - def collide(self,s): - """Test if the sprites are colliding and - resolve the collision in this case.""" - offset = [int(x) for x in vsub(s.pos,self.pos)] - overlap = self.mask.overlap_area(s.mask,offset) - if overlap == 0: - return - """Calculate collision normal""" - nx = (self.mask.overlap_area(s.mask,(offset[0]+1,offset[1])) - - self.mask.overlap_area(s.mask,(offset[0]-1,offset[1]))) - ny = (self.mask.overlap_area(s.mask,(offset[0],offset[1]+1)) - - self.mask.overlap_area(s.mask,(offset[0],offset[1]-1))) - if nx == 0 and ny == 0: - """One sprite is inside another""" - return - n = [nx,ny] - dv = vsub(s.vel,self.vel) - J = vdot(dv,n)/(2*vdot(n,n)) - if J > 0: - """Can scale up to 2*J here to get bouncy collisions""" - J *= 1.9 - self.kick([nx*J,ny*J]) - s.kick([-J*nx,-J*ny]) - return - """Separate the sprites""" - c1 = -overlap/vdot(n,n) - c2 = -c1/2 - self.move([c2*nx,c2*ny]) - s.move([(c1+c2)*nx,(c1+c2)*ny]) - - def update(self,dt): - self.pos[0] += dt*self.vel[0] - self.pos[1] += dt*self.vel[1] - - -def main(*args): - """Display multiple images bounce off each other using collition detection - - Positional arguments: - one or more image file names. - - This pygame.masks demo will display multiple moving sprites bouncing - off each other. More than one sprite image can be provided. - - """ - - if len(args) == 0: - raise ValueError("Require at least one image file name: non given") - print ('Press any key to quit') - screen = pygame.display.set_mode((640,480)) - images = [] - masks = [] - for impath in args: - images.append(pygame.image.load(impath).convert_alpha()) - masks.append(maskFromSurface(images[-1])) - - numtimes = 10 - import time - t1 = time.time() - for x in range(numtimes): - m = maskFromSurface(images[-1]) - t2 = time.time() - - print ("python maskFromSurface :%s" % (t2-t1)) - - t1 = time.time() - for x in range(numtimes): - m = pygame.mask.from_surface(images[-1]) - t2 = time.time() - - print ("C pygame.mask.from_surface :%s" % (t2-t1)) - - sprites = [] - for i in range(20): - j = i % len(images) - s = Sprite(images[j],masks[j]) - s.setPos((random.uniform(0,screen.get_width()), - random.uniform(0,screen.get_height()))) - s.setVelocity((random.uniform(-5,5),random.uniform(-5,5))) - sprites.append(s) - pygame.time.set_timer(pygame.USEREVENT,33) - while 1: - event = pygame.event.wait() - if event.type == pygame.QUIT: - return - elif event.type == pygame.USEREVENT: - """Do both mechanics and screen update""" - screen.fill((240,220,100)) - for i in range(len(sprites)): - for j in range(i+1,len(sprites)): - sprites[i].collide(sprites[j]) - for s in sprites: - s.update(1) - if s.pos[0] < -s.surface.get_width()-3: - s.pos[0] = screen.get_width() - elif s.pos[0] > screen.get_width()+3: - s.pos[0] = -s.surface.get_width() - if s.pos[1] < -s.surface.get_height()-3: - s.pos[1] = screen.get_height() - elif s.pos[1] > screen.get_height()+3: - s.pos[1] = -s.surface.get_height() - screen.blit(s.surface,s.pos) - pygame.display.update() - elif event.type == pygame.KEYDOWN: - return - -if __name__ == '__main__': - if len(sys.argv) < 2: - print ('Usage: mask.py [ ...]') - print ('Let many copies of IMAGE(s) bounce against each other') - print ('Press any key to quit') - else: - main(*sys.argv[1:]) - - - - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/midi.py b/venv/lib/python3.7/site-packages/pygame/examples/midi.py deleted file mode 100644 index 41d8e5b..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/midi.py +++ /dev/null @@ -1,822 +0,0 @@ -#!/usr/bin/env python - -"""Contains an example of midi input, and a separate example of midi output. - -By default it runs the output example. -python midi.py --output -python midi.py --input - -""" - -import sys -import os - -import pygame -import pygame.midi -from pygame.locals import * - -try: # Ensure set available for output example - set -except NameError: - from sets import Set as set - - -def print_device_info(): - pygame.midi.init() - _print_device_info() - pygame.midi.quit() - -def _print_device_info(): - for i in range( pygame.midi.get_count() ): - r = pygame.midi.get_device_info(i) - (interf, name, input, output, opened) = r - - in_out = "" - if input: - in_out = "(input)" - if output: - in_out = "(output)" - - print ("%2i: interface :%s:, name :%s:, opened :%s: %s" % - (i, interf, name, opened, in_out)) - - - - -def input_main(device_id = None): - pygame.init() - pygame.fastevent.init() - event_get = pygame.fastevent.get - event_post = pygame.fastevent.post - - pygame.midi.init() - - _print_device_info() - - - if device_id is None: - input_id = pygame.midi.get_default_input_id() - else: - input_id = device_id - - print ("using input_id :%s:" % input_id) - i = pygame.midi.Input( input_id ) - - pygame.display.set_mode((1,1)) - - - - going = True - while going: - events = event_get() - for e in events: - if e.type in [QUIT]: - going = False - if e.type in [KEYDOWN]: - going = False - if e.type in [pygame.midi.MIDIIN]: - print (e) - - if i.poll(): - midi_events = i.read(10) - # convert them into pygame events. - midi_evs = pygame.midi.midis2events(midi_events, i.device_id) - - for m_e in midi_evs: - event_post( m_e ) - - del i - pygame.midi.quit() - - - -def output_main(device_id = None): - """Execute a musical keyboard example for the Church Organ instrument - - This is a piano keyboard example, with a two octave keyboard, starting at - note F3. Left mouse down over a key starts a note, left up stops it. The - notes are also mapped to the computer keyboard keys, assuming an American - English PC keyboard (sorry everyone else, but I don't know if I can map to - absolute key position instead of value.) The white keys are on the second - row, TAB to BACKSLASH, starting with note F3. The black keys map to the top - row, '1' to BACKSPACE, starting with F#3. 'r' is middle C. Close the - window or press ESCAPE to quit the program. Key velocity (note - amplitude) varies vertically on the keyboard image, with minimum velocity - at the top of a key and maximum velocity at bottom. - - Default Midi output, no device_id given, is to the default output device - for the computer. - - """ - - # A note to new pygamers: - # - # All the midi module stuff is in this function. It is unnecessary to - # understand how the keyboard display works to appreciate how midi - # messages are sent. - - # The keyboard is drawn by a Keyboard instance. This instance maps Midi - # notes to musical keyboard keys. A regions surface maps window position - # to (Midi note, velocity) pairs. A key_mapping dictionary does the same - # for computer keyboard keys. Midi sound is controlled with direct method - # calls to a pygame.midi.Output instance. - # - # Things to consider when using pygame.midi: - # - # 1) Initialize the midi module with a to pygame.midi.init(). - # 2) Create a midi.Output instance for the desired output device port. - # 3) Select instruments with set_instrument() method calls. - # 4) Play notes with note_on() and note_off() method calls. - # 5) Call pygame.midi.Quit() when finished. Though the midi module tries - # to ensure that midi is properly shut down, it is best to do it - # explicitly. A try/finally statement is the safest way to do this. - # - GRAND_PIANO = 0 - CHURCH_ORGAN = 19 - - instrument = CHURCH_ORGAN - #instrument = GRAND_PIANO - start_note = 53 # F3 (white key note), start_note != 0 - n_notes = 24 # Two octaves (14 white keys) - - bg_color = Color('slategray') - - key_mapping = make_key_mapping([K_TAB, K_1, K_q, K_2, K_w, K_3, K_e, K_r, - K_5, K_t, K_6, K_y, K_u, K_8, K_i, K_9, - K_o, K_0, K_p, K_LEFTBRACKET, K_EQUALS, - K_RIGHTBRACKET, K_BACKSPACE, K_BACKSLASH], - start_note) - - - - pygame.init() - pygame.midi.init() - - _print_device_info() - - if device_id is None: - port = pygame.midi.get_default_output_id() - else: - port = device_id - - print ("using output_id :%s:" % port) - - - - midi_out = pygame.midi.Output(port, 0) - try: - midi_out.set_instrument(instrument) - keyboard = Keyboard(start_note, n_notes) - - screen = pygame.display.set_mode(keyboard.rect.size) - screen.fill(bg_color) - pygame.display.flip() - - background = pygame.Surface(screen.get_size()) - background.fill(bg_color) - dirty_rects = [] - keyboard.draw(screen, background, dirty_rects) - pygame.display.update(dirty_rects) - - regions = pygame.Surface(screen.get_size()) # initial color (0,0,0) - keyboard.map_regions(regions) - - pygame.event.set_blocked(MOUSEMOTION) - repeat = 1 - mouse_note = 0 - on_notes = set() - while 1: - update_rects = None - e = pygame.event.wait() - if e.type == pygame.MOUSEBUTTONDOWN: - mouse_note, velocity, __, __ = regions.get_at(e.pos) - if mouse_note and mouse_note not in on_notes: - keyboard.key_down(mouse_note) - midi_out.note_on(mouse_note, velocity) - on_notes.add(mouse_note) - else: - mouse_note = 0 - elif e.type == pygame.MOUSEBUTTONUP: - if mouse_note: - midi_out.note_off(mouse_note) - keyboard.key_up(mouse_note) - on_notes.remove(mouse_note) - mouse_note = 0 - elif e.type == pygame.QUIT: - break - elif e.type == pygame.KEYDOWN: - if e.key == pygame.K_ESCAPE: - break - try: - note, velocity = key_mapping[e.key] - except KeyError: - pass - else: - if note not in on_notes: - keyboard.key_down(note) - midi_out.note_on(note, velocity) - on_notes.add(note) - elif e.type == pygame.KEYUP: - try: - note, __ = key_mapping[e.key] - except KeyError: - pass - else: - if note in on_notes and note != mouse_note: - keyboard.key_up(note) - midi_out.note_off(note, 0) - on_notes.remove(note) - - dirty_rects = [] - keyboard.draw(screen, background, dirty_rects) - pygame.display.update(dirty_rects) - finally: - del midi_out - pygame.midi.quit() - -def make_key_mapping(key_list, start_note): - """Return a dictionary of (note, velocity) by computer keyboard key code""" - - mapping = {} - for i in range(len(key_list)): - mapping[key_list[i]] = (start_note + i, 127) - return mapping - -class NullKey(object): - """A dummy key that ignores events passed to it by other keys - - A NullKey instance is the left key instance used by default - for the left most keyboard key. - - """ - - def _right_white_down(self): - pass - - def _right_white_up(self): - pass - - def _right_black_down(self): - pass - - def _right_black_up(self): - pass - -null_key = NullKey() - -def key_class(updates, image_strip, image_rects, is_white_key=True): - """Return a keyboard key widget class - - Arguments: - updates - a set into which a key instance adds itself if it needs - redrawing. - image_strip - The surface containing the images of all key states. - image_rects - A list of Rects giving the regions within image_strip that - are relevant to this key class. - is_white_key (default True) - Set false if this is a black key. - - This function automates the creation of a key widget class for the - three basic key types. A key has two basic states, up or down ( - depressed). Corresponding up and down images are drawn for each - of these two states. But to give the illusion of depth, a key - may have shadows cast upon it by the adjacent keys to its right. - These shadows change depending on the up/down state of the key and - its neighbors. So a key may support multiple images and states - depending on the shadows. A key type is determined by the length - of image_rects and the value of is_white. - - """ - - # Naming convention: Variables used by the Key class as part of a - # closure start with 'c_'. - - # State logic and shadows: - # - # A key may cast a shadow upon the key to its left. A black key casts a - # shadow on an adjacent white key. The shadow changes depending of whether - # the black or white key is depressed. A white key casts a shadow on the - # white key to its left if it is up and the left key is down. Therefore - # a keys state, and image it will draw, is determined entirely by its - # itself and the key immediately adjacent to it on the right. A white key - # is always assumed to have an adjacent white key. - # - # There can be up to eight key states, representing all permutations - # of the three fundamental states of self up/down, adjacent white - # right up/down, adjacent black up/down. - # - down_state_none = 0 - down_state_self = 1 - down_state_white = down_state_self << 1 - down_state_self_white = down_state_self | down_state_white - down_state_black = down_state_white << 1 - down_state_self_black = down_state_self | down_state_black - down_state_white_black = down_state_white | down_state_black - down_state_all = down_state_self | down_state_white_black - - # Some values used in the class. - # - c_down_state_initial = down_state_none - c_down_state_rect_initial = image_rects[0] - c_down_state_self = down_state_self - c_updates = updates - c_image_strip = image_strip - c_width, c_height = image_rects[0].size - - # A key propagates its up/down state change to the adjacent white key on - # the left by calling the adjacent key's _right_black_down or - # _right_white_down method. - # - if is_white_key: - key_color = 'white' - else: - key_color = 'black' - c_notify_down_method = "_right_%s_down" % key_color - c_notify_up_method = "_right_%s_up" % key_color - - # Images: - # - # A black key only needs two images, for the up and down states. Its - # appearance is unaffected by the adjacent keys to its right, which cast no - # shadows upon it. - # - # A white key with a no adjacent black to its right only needs three - # images, for self up, self down, and both self and adjacent white down. - # - # A white key with both a black and white key to its right needs six - # images: self up, self up and adjacent black down, self down, self and - # adjacent white down, self and adjacent black down, and all three down. - # - # Each 'c_event' dictionary maps the current key state to a new key state, - # along with corresponding image, for the related event. If no redrawing - # is required for the state change then the image rect is simply None. - # - c_event_down = {down_state_none: (down_state_self, image_rects[1])} - c_event_up = {down_state_self: (down_state_none, image_rects[0])} - c_event_right_white_down = { - down_state_none: (down_state_none, None), - down_state_self: (down_state_self, None)} - c_event_right_white_up = c_event_right_white_down.copy() - c_event_right_black_down = c_event_right_white_down.copy() - c_event_right_black_up = c_event_right_white_down.copy() - if len(image_rects) > 2: - c_event_down[down_state_white] = ( - down_state_self_white, image_rects[2]) - c_event_up[down_state_self_white] = (down_state_white, image_rects[0]) - c_event_right_white_down[down_state_none] = (down_state_white, None) - c_event_right_white_down[down_state_self] = ( - down_state_self_white, image_rects[2]) - c_event_right_white_up[down_state_white] = (down_state_none, None) - c_event_right_white_up[down_state_self_white] = ( - down_state_self, image_rects[1]) - c_event_right_black_down[down_state_white] = ( - down_state_white, None) - c_event_right_black_down[down_state_self_white] = ( - down_state_self_white, None) - c_event_right_black_up[down_state_white] = ( - down_state_white, None) - c_event_right_black_up[down_state_self_white] = ( - down_state_self_white, None) - if len(image_rects) > 3: - c_event_down[down_state_black] = ( - down_state_self_black, image_rects[4]) - c_event_down[down_state_white_black] = (down_state_all, image_rects[5]) - c_event_up[down_state_self_black] = (down_state_black, image_rects[3]) - c_event_up[down_state_all] = (down_state_white_black, image_rects[3]) - c_event_right_white_down[down_state_black] = ( - down_state_white_black, None) - c_event_right_white_down[down_state_self_black] = ( - down_state_all, image_rects[5]) - c_event_right_white_up[down_state_white_black] = ( - down_state_black, None) - c_event_right_white_up[down_state_all] = ( - down_state_self_black, image_rects[4]) - c_event_right_black_down[down_state_none] = ( - down_state_black, image_rects[3]) - c_event_right_black_down[down_state_self] = ( - down_state_self_black, image_rects[4]) - c_event_right_black_down[down_state_white] = ( - down_state_white_black, image_rects[3]) - c_event_right_black_down[down_state_self_white] = ( - down_state_all, image_rects[5]) - c_event_right_black_up[down_state_black] = ( - down_state_none, image_rects[0]) - c_event_right_black_up[down_state_self_black] = ( - down_state_self, image_rects[1]) - c_event_right_black_up[down_state_white_black] = ( - down_state_white, image_rects[0]) - c_event_right_black_up[down_state_all] = ( - down_state_self_white, image_rects[2]) - - - class Key(object): - """A key widget, maintains key state and draws the key's image - - Constructor arguments: - ident - A unique key identifier. Any immutable type suitable as a key. - posn - The location of the key on the display surface. - key_left - Optional, the adjacent white key to the left. Changes in - up and down state are propagated to that key. - - A key has an associated position and state. Related to state is the - image drawn. State changes are managed with method calls, one method - per event type. The up and down event methods are public. Other - internal methods are for passing on state changes to the key_left - key instance. - - """ - - is_white = is_white_key - - def __init__(self, ident, posn, key_left = None): - """Return a new Key instance - - The initial state is up, with all adjacent keys to the right also - up. - - """ - if key_left is None: - key_left = null_key - rect = Rect(posn[0], posn[1], c_width, c_height) - self.rect = rect - self._state = c_down_state_initial - self._source_rect = c_down_state_rect_initial - self._ident = ident - self._hash = hash(ident) - self._notify_down = getattr(key_left, c_notify_down_method) - self._notify_up = getattr(key_left, c_notify_up_method) - self._key_left = key_left - self._background_rect = Rect(rect.left, rect.bottom - 10, - c_width, 10) - c_updates.add(self) - - def down(self): - """Signal that this key has been depressed (is down)""" - - self._state, source_rect = c_event_down[self._state] - if source_rect is not None: - self._source_rect = source_rect - c_updates.add(self) - self._notify_down() - - def up(self): - """Signal that this key has been released (is up)""" - - self._state, source_rect = c_event_up[self._state] - if source_rect is not None: - self._source_rect = source_rect - c_updates.add(self) - self._notify_up() - - def _right_white_down(self): - """Signal that the adjacent white key has been depressed - - This method is for internal propagation of events between - key instances. - - """ - - self._state, source_rect = c_event_right_white_down[self._state] - if source_rect is not None: - self._source_rect = source_rect - c_updates.add(self) - - def _right_white_up(self): - """Signal that the adjacent white key has been released - - This method is for internal propagation of events between - key instances. - - """ - - self._state, source_rect = c_event_right_white_up[self._state] - if source_rect is not None: - self._source_rect = source_rect - c_updates.add(self) - - def _right_black_down(self): - """Signal that the adjacent black key has been depressed - - This method is for internal propagation of events between - key instances. - - """ - - self._state, source_rect = c_event_right_black_down[self._state] - if source_rect is not None: - self._source_rect = source_rect - c_updates.add(self) - - def _right_black_up(self): - """Signal that the adjacent black key has been released - - This method is for internal propagation of events between - key instances. - - """ - - self._state, source_rect = c_event_right_black_up[self._state] - if source_rect is not None: - self._source_rect = source_rect - c_updates.add(self) - - def __eq__(self, other): - """True if same identifiers""" - - return self._ident == other._ident - - def __hash__(self): - """Return the immutable hash value""" - - return self._hash - - def __str__(self): - """Return the key's identifier and position as a string""" - - return ("" % - (self._ident, self.rect.top, self.rect.left)) - - def draw(self, surf, background, dirty_rects): - """Redraw the key on the surface surf - - The background is redrawn. The altered region is added to the - dirty_rects list. - - """ - - surf.blit(background, self._background_rect, self._background_rect) - surf.blit(c_image_strip, self.rect, self._source_rect) - dirty_rects.append(self.rect) - - return Key - -def key_images(): - """Return a keyboard keys image strip and a mapping of image locations - - The return tuple is a surface and a dictionary of rects mapped to key - type. - - This function encapsulates the constants relevant to the keyboard image - file. There are five key types. One is the black key. The other four - white keys are determined by the proximity of the black keys. The plain - white key has no black key adjacent to it. A white-left and white-right - key has a black key to the left or right of it respectively. A white-center - key has a black key on both sides. A key may have up to six related - images depending on the state of adjacent keys to its right. - - """ - - my_dir = os.path.split(os.path.abspath(__file__))[0] - strip_file = os.path.join(my_dir, 'data', 'midikeys.png') - white_key_width = 42 - white_key_height = 160 - black_key_width = 22 - black_key_height = 94 - strip = pygame.image.load(strip_file) - names = [ - 'black none', 'black self', - 'white none', 'white self', 'white self-white', - 'white-left none', 'white-left self', 'white-left black', - 'white-left self-black', 'white-left self-white', 'white-left all', - 'white-center none', 'white-center self', - 'white-center black', 'white-center self-black', - 'white-center self-white', 'white-center all', - 'white-right none', 'white-right self', 'white-right self-white'] - rects = {} - for i in range(2): - rects[names[i]] = Rect(i * white_key_width, 0, - black_key_width, black_key_height) - for i in range(2, len(names)): - rects[names[i]] = Rect(i * white_key_width, 0, - white_key_width, white_key_height) - return strip, rects - -class Keyboard(object): - """Musical keyboard widget - - Constructor arguments: - start_note: midi note value of the starting note on the keyboard. - n_notes: number of notes (keys) on the keyboard. - - A Keyboard instance draws the musical keyboard and maintains the state of - all the keyboard keys. Individual keys can be in a down (depressed) or - up (released) state. - - """ - - _image_strip, _rects = key_images() - - white_key_width, white_key_height = _rects['white none'].size - black_key_width, black_key_height = _rects['black none'].size - - _updates = set() - - # There are five key classes, representing key shape: - # black key (BlackKey), plain white key (WhiteKey), white key to the left - # of a black key (WhiteKeyLeft), white key between two black keys - # (WhiteKeyCenter), and white key to the right of a black key - # (WhiteKeyRight). - BlackKey = key_class(_updates, - _image_strip, - [_rects['black none'], _rects['black self']], - False) - WhiteKey = key_class(_updates, - _image_strip, - [_rects['white none'], - _rects['white self'], - _rects['white self-white']]) - WhiteKeyLeft = key_class(_updates, - _image_strip, - [_rects['white-left none'], - _rects['white-left self'], - _rects['white-left self-white'], - _rects['white-left black'], - _rects['white-left self-black'], - _rects['white-left all']]) - WhiteKeyCenter = key_class(_updates, - _image_strip, - [_rects['white-center none'], - _rects['white-center self'], - _rects['white-center self-white'], - _rects['white-center black'], - _rects['white-center self-black'], - _rects['white-center all']]) - WhiteKeyRight = key_class(_updates, - _image_strip, - [_rects['white-right none'], - _rects['white-right self'], - _rects['white-right self-white']]) - - def __init__(self, start_note, n_notes): - """Return a new Keyboard instance with n_note keys""" - - self._start_note = start_note - self._end_note = start_note + n_notes - 1 - self._add_keys() - - def _add_keys(self): - """Populate the keyboard with key instances - - Set the _keys and rect attributes. - - """ - - # Keys are entered in a list, where index is Midi note. Since there are - # only 128 possible Midi notes the list length is managable. Unassigned - # note positions should never be accessed, so are set None to ensure - # the bug is quickly detected. - # - key_map = [None] * 128 - - start_note = self._start_note - end_note = self._end_note - black_offset = self.black_key_width // 2 - prev_white_key = None - x = y = 0 - if is_white_key(start_note): - is_prev_white = True - else: - x += black_offset - is_prev_white = False - for note in range(start_note, end_note + 1): - ident = note # For now notes uniquely identify keyboard keys. - if is_white_key(note): - if is_prev_white: - if note == end_note or is_white_key(note + 1): - key = self.WhiteKey(ident, (x, y), prev_white_key) - else: - key = self.WhiteKeyLeft(ident, (x, y), prev_white_key) - else: - if note == end_note or is_white_key(note + 1): - key = self.WhiteKeyRight(ident, (x, y), prev_white_key) - else: - key = self.WhiteKeyCenter(ident, - (x, y), - prev_white_key) - is_prev_white = True - x += self.white_key_width - prev_white_key = key - else: - key = self.BlackKey(ident, - (x - black_offset, y), - prev_white_key) - is_prev_white = False - key_map[note] = key - self._keys = key_map - - kb_width = key_map[self._end_note].rect.right - kb_height = self.white_key_height - self.rect = Rect(0, 0, kb_width, kb_height) - - def map_regions(self, regions): - """Draw the key regions onto surface regions. - - Regions must have at least 3 byte pixels. Each pixel of the keyboard - rectangle is set to the color (note, velocity, 0). The regions surface - must be at least as large as (0, 0, self.rect.left, self.rect.bottom) - - """ - - # First draw the white key regions. Then add the overlapping - # black key regions. - # - cutoff = self.black_key_height - black_keys = [] - for note in range(self._start_note, self._end_note + 1): - key = self._keys[note] - if key.is_white: - fill_region(regions, note, key.rect, cutoff) - else: - black_keys.append((note, key)) - for note, key in black_keys: - fill_region(regions, note, key.rect, cutoff) - - def draw(self, surf, background, dirty_rects): - """Redraw all altered keyboard keys""" - - changed_keys = self._updates - while changed_keys: - changed_keys.pop().draw(surf, background, dirty_rects) - - def key_down(self, note): - """Signal a key down event for note""" - - self._keys[note].down() - - def key_up(self, note): - """Signal a key up event for note""" - - self._keys[note].up() - -def fill_region(regions, note, rect, cutoff): - """Fill the region defined by rect with a (note, velocity, 0) color - - The velocity varies from a small value at the top of the region to - 127 at the bottom. The vertical region 0 to cutoff is split into - three parts, with velocities 42, 84 and 127. Everything below cutoff - has velocity 127. - - """ - - x, y, width, height = rect - if cutoff is None: - cutoff = height - delta_height = cutoff // 3 - regions.fill((note, 42, 0), - (x, y, width, delta_height)) - regions.fill((note, 84, 0), - (x, y + delta_height, width, delta_height)) - regions.fill((note, 127, 0), - (x, y + 2 * delta_height, width, height - 2 * delta_height)) - -def is_white_key(note): - """True if note is represented by a white key""" - - key_pattern = [True, False, True, True, False, True, - False, True, True, False, True, False] - return key_pattern[(note - 21) % len(key_pattern)] - - -def usage(): - print ("--input [device_id] : Midi message logger") - print ("--output [device_id] : Midi piano keyboard") - print ("--list : list available midi devices") - -def main(mode='output', device_id=None): - """Run a Midi example - - Arguments: - mode - if 'output' run a midi keyboard output example - 'input' run a midi event logger input example - 'list' list available midi devices - (default 'output') - device_id - midi device number; if None then use the default midi input or - output device for the system - - """ - - if mode == 'input': - input_main(device_id) - elif mode == 'output': - output_main(device_id) - elif mode == 'list': - print_device_info() - else: - raise ValueError("Unknown mode option '%s'" % mode) - -if __name__ == '__main__': - - try: - device_id = int( sys.argv[-1] ) - except: - device_id = None - - if "--input" in sys.argv or "-i" in sys.argv: - - input_main(device_id) - - elif "--output" in sys.argv or "-o" in sys.argv: - output_main(device_id) - elif "--list" in sys.argv or "-l" in sys.argv: - print_device_info() - else: - usage() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/moveit.py b/venv/lib/python3.7/site-packages/pygame/examples/moveit.py deleted file mode 100644 index 194cd12..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/moveit.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python - -""" -This is the full and final example from the Pygame Tutorial, -"How Do I Make It Move". It creates 10 objects and animates -them on the screen. - -Note it's a bit scant on error checking, but it's easy to read. :] -Fortunately, this is python, and we needn't wrestle with a pile of -error codes. -""" - - -#import everything -import os, pygame -from pygame.locals import * - -main_dir = os.path.split(os.path.abspath(__file__))[0] - -#our game object class -class GameObject: - def __init__(self, image, height, speed): - self.speed = speed - self.image = image - self.pos = image.get_rect().move(0, height) - def move(self): - self.pos = self.pos.move(self.speed, 0) - if self.pos.right > 600: - self.pos.left = 0 - - -#quick function to load an image -def load_image(name): - path = os.path.join(main_dir, 'data', name) - return pygame.image.load(path).convert() - - -#here's the full code -def main(): - pygame.init() - screen = pygame.display.set_mode((640, 480)) - - player = load_image('player1.gif') - background = load_image('liquid.bmp') - - # scale the background image so that it fills the window and - # successfully overwrites the old sprite position. - background = pygame.transform.scale2x(background) - background = pygame.transform.scale2x(background) - - screen.blit(background, (0, 0)) - - objects = [] - for x in range(10): - o = GameObject(player, x*40, x) - objects.append(o) - - while 1: - for event in pygame.event.get(): - if event.type in (QUIT, KEYDOWN): - return - - for o in objects: - screen.blit(background, o.pos, o.pos) - for o in objects: - o.move() - screen.blit(o.image, o.pos) - - pygame.display.update() - - - -if __name__ == '__main__': main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/oldalien.py b/venv/lib/python3.7/site-packages/pygame/examples/oldalien.py deleted file mode 100644 index d1f6d9f..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/oldalien.py +++ /dev/null @@ -1,237 +0,0 @@ -#!/usr/bin/env python - -"""This is a much simpler version of the aliens.py -example. It makes a good place for beginners to get -used to the way pygame works. Gameplay is pretty similar, -but there are a lot less object types to worry about, -and it makes no attempt at using the optional pygame -modules. -It does provide a good method for using the updaterects -to only update the changed parts of the screen, instead of -the entire screen surface. This has large speed benefits -and should be used whenever the fullscreen isn't being changed.""" - - -#import -import random, os.path, sys -import pygame -from pygame.locals import * - -if not pygame.image.get_extended(): - raise SystemExit("Requires the extended image loading from SDL_image") - - -#constants -FRAMES_PER_SEC = 40 -PLAYER_SPEED = 12 -MAX_SHOTS = 2 -SHOT_SPEED = 10 -ALIEN_SPEED = 12 -ALIEN_ODDS = 45 -EXPLODE_TIME = 6 -SCREENRECT = Rect(0, 0, 640, 480) - - -#some globals for friendly access -dirtyrects = [] # list of update_rects -next_tick = 0 # used for timing -class Img: pass # container for images -main_dir = os.path.split(os.path.abspath(__file__))[0] # Program's diretory - - -#first, we define some utility functions - -def load_image(file, transparent): - "loads an image, prepares it for play" - file = os.path.join(main_dir, 'data', file) - try: - surface = pygame.image.load(file) - except pygame.error: - raise SystemExit('Could not load image "%s" %s' % - (file, pygame.get_error())) - if transparent: - corner = surface.get_at((0, 0)) - surface.set_colorkey(corner, RLEACCEL) - return surface.convert() - - - -# The logic for all the different sprite types - -class Actor: - "An enhanced sort of sprite class" - def __init__(self, image): - self.image = image - self.rect = image.get_rect() - - def update(self): - "update the sprite state for this frame" - pass - - def draw(self, screen): - "draws the sprite into the screen" - r = screen.blit(self.image, self.rect) - dirtyrects.append(r) - - def erase(self, screen, background): - "gets the sprite off of the screen" - r = screen.blit(background, self.rect, self.rect) - dirtyrects.append(r) - - -class Player(Actor): - "Cheer for our hero" - def __init__(self): - Actor.__init__(self, Img.player) - self.alive = 1 - self.reloading = 0 - self.rect.centerx = SCREENRECT.centerx - self.rect.bottom = SCREENRECT.bottom - 10 - - def move(self, direction): - self.rect = self.rect.move(direction*PLAYER_SPEED, 0).clamp(SCREENRECT) - - -class Alien(Actor): - "Destroy him or suffer" - def __init__(self): - Actor.__init__(self, Img.alien) - self.facing = random.choice((-1,1)) * ALIEN_SPEED - if self.facing < 0: - self.rect.right = SCREENRECT.right - - def update(self): - global SCREENRECT - self.rect[0] = self.rect[0] + self.facing - if not SCREENRECT.contains(self.rect): - self.facing = -self.facing; - self.rect.top = self.rect.bottom + 3 - self.rect = self.rect.clamp(SCREENRECT) - - -class Explosion(Actor): - "Beware the fury" - def __init__(self, actor): - Actor.__init__(self, Img.explosion) - self.life = EXPLODE_TIME - self.rect.center = actor.rect.center - - def update(self): - self.life = self.life - 1 - - -class Shot(Actor): - "The big payload" - def __init__(self, player): - Actor.__init__(self, Img.shot) - self.rect.centerx = player.rect.centerx - self.rect.top = player.rect.top - 10 - - def update(self): - self.rect.top = self.rect.top - SHOT_SPEED - - - - -def main(): - "Run me for adrenaline" - global dirtyrects - - # Initialize SDL components - pygame.init() - screen = pygame.display.set_mode(SCREENRECT.size, 0) - clock = pygame.time.Clock() - - # Load the Resources - Img.background = load_image('background.gif', 0) - Img.shot = load_image('shot.gif', 1) - Img.bomb = load_image('bomb.gif', 1) - Img.danger = load_image('danger.gif', 1) - Img.alien = load_image('alien1.gif', 1) - Img.player = load_image('oldplayer.gif', 1) - Img.explosion = load_image('explosion1.gif', 1) - - # Create the background - background = pygame.Surface(SCREENRECT.size) - for x in range(0, SCREENRECT.width, Img.background.get_width()): - background.blit(Img.background, (x, 0)) - screen.blit(background, (0,0)) - pygame.display.flip() - - # Initialize Game Actors - player = Player() - aliens = [Alien()] - shots = [] - explosions = [] - - # Main loop - while player.alive or explosions: - clock.tick(FRAMES_PER_SEC) - - # Gather Events - pygame.event.pump() - keystate = pygame.key.get_pressed() - if keystate[K_ESCAPE] or pygame.event.peek(QUIT): - break - - # Clear screen and update actors - for actor in [player] + aliens + shots + explosions: - actor.erase(screen, background) - actor.update() - - # Clean Dead Explosions and Bullets - for e in explosions: - if e.life <= 0: - explosions.remove(e) - for s in shots: - if s.rect.top <= 0: - shots.remove(s) - - # Move the player - direction = keystate[K_RIGHT] - keystate[K_LEFT] - player.move(direction) - - # Create new shots - if not player.reloading and keystate[K_SPACE] and len(shots) < MAX_SHOTS: - shots.append(Shot(player)) - player.reloading = keystate[K_SPACE] - - # Create new alien - if not int(random.random() * ALIEN_ODDS): - aliens.append(Alien()) - - # Detect collisions - alienrects = [] - for a in aliens: - alienrects.append(a.rect) - - hit = player.rect.collidelist(alienrects) - if hit != -1: - alien = aliens[hit] - explosions.append(Explosion(alien)) - explosions.append(Explosion(player)) - aliens.remove(alien) - player.alive = 0 - for shot in shots: - hit = shot.rect.collidelist(alienrects) - if hit != -1: - alien = aliens[hit] - explosions.append(Explosion(alien)) - shots.remove(shot) - aliens.remove(alien) - break - - # Draw everybody - for actor in [player] + aliens + shots + explosions: - actor.draw(screen) - - pygame.display.update(dirtyrects) - dirtyrects = [] - - pygame.time.wait(50) - - -#if python says run, let's run! -if __name__ == '__main__': - main() - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/overlay.py b/venv/lib/python3.7/site-packages/pygame/examples/overlay.py deleted file mode 100644 index 8329071..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/overlay.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -import sys -import pygame -from pygame.compat import xrange_ - -SR= (800,600) -ovl= None - -######################################################################## -# Simple video player -def vPlayer( fName ): - global ovl - f= open( fName, 'rb' ) - fmt= f.readline().strip() - res= f.readline().strip() - col= f.readline().strip() - if fmt!= "P5": - print ('Unknown format( len %d ). Exiting...' % len( fmt )) - return - - w,h= [ int(x) for x in res.split( ' ' ) ] - h= ( h* 2 )/ 3 - # Read into strings - y= f.read( w*h ) - u= [] - v= [] - for i in xrange_( 0, h/2 ): - u.append( f.read( w/2 )) - v.append( f.read( w/2 )) - - u= ''.join(u) - v= ''.join(v) - - # Open overlay with the resolution specified - ovl= pygame.Overlay(pygame.YV12_OVERLAY, (w,h)) - ovl.set_location(0, 0, w, h) - - ovl.display((y,u,v)) - while 1: - pygame.time.wait(10) - for ev in pygame.event.get(): - if ev.type in (pygame.KEYDOWN, pygame.QUIT): - return - - -def main(fname): - """play video file fname""" - pygame.init() - try: - pygame.display.set_mode(SR) - vPlayer(fname) - finally: - pygame.quit() - -# Test all modules -if __name__== '__main__': - if len( sys.argv )!= 2: - print ("Usage: play_file ") - else: - main(sys.argv[1]) - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/pixelarray.py b/venv/lib/python3.7/site-packages/pygame/examples/pixelarray.py deleted file mode 100644 index 3092b18..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/pixelarray.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python -import os, pygame -from pygame.compat import xrange_ - -main_dir = os.path.split(os.path.abspath(__file__))[0] -data_dir = os.path.join(main_dir, 'data') - -def show (image): - screen = pygame.display.get_surface() - screen.fill ((255, 255, 255)) - screen.blit (image, (0, 0)) - pygame.display.flip () - while 1: - event = pygame.event.wait () - if event.type == pygame.QUIT: - raise SystemExit - if event.type == pygame.MOUSEBUTTONDOWN: - break - -def main(): - pygame.init () - - pygame.display.set_mode ((255, 255)) - surface = pygame.Surface ((255, 255)) - - pygame.display.flip () - - # Create the PixelArray. - ar = pygame.PixelArray (surface) - r, g, b = 0, 0, 0 - # Do some easy gradient effect. - for y in xrange_ (255): - r, g, b = y, y, y - ar[:,y] = (r, g, b) - del ar - show (surface) - - # We have made some gradient effect, now flip it. - ar = pygame.PixelArray (surface) - ar[:] = ar[:,::-1] - del ar - show (surface) - - # Every second column will be made blue - ar = pygame.PixelArray (surface) - ar[::2] = (0, 0, 255) - del ar - show (surface) - - # Every second row will be made green - ar = pygame.PixelArray (surface) - ar[:,::2] = (0, 255, 0) - del ar - show (surface) - - # Manipulate the image. Flip it around the y axis. - surface = pygame.image.load (os.path.join (data_dir, 'arraydemo.bmp')) - ar = pygame.PixelArray (surface) - ar[:] = ar[:,::-1] - del ar - show (surface) - - # Flip the image around the x axis. - ar = pygame.PixelArray (surface) - ar[:] = ar[::-1,:] - del ar - show (surface) - - # Every second column will be made white. - ar = pygame.PixelArray (surface) - ar[::2] = (255, 255, 255) - del ar - show (surface) - - # Flip the image around both axes, restoring it's original layout. - ar = pygame.PixelArray (surface) - ar[:] = ar[::-1,::-1] - del ar - show (surface) - - # Rotate 90 degrees clockwise. - w, h = surface.get_size () - surface2 = pygame.Surface ((h, w), surface.get_flags (), surface) - ar = pygame.PixelArray (surface) - ar2 = pygame.PixelArray (surface2) - ar2[...] = ar.transpose ()[::-1,:] - del ar, ar2 - show (surface2) - - # Scale it by throwing each second pixel away. - surface = pygame.image.load (os.path.join (data_dir, 'arraydemo.bmp')) - ar = pygame.PixelArray (surface) - sf2 = ar[::2,::2].make_surface () - del ar - show (sf2) - - # Replace anything looking like the blue color from the text. - ar = pygame.PixelArray (surface) - ar.replace ((60, 60, 255), (0, 255, 0), 0.06) - del ar - show (surface) - - # Extract anything which might be somewhat black. - surface = pygame.image.load (os.path.join (data_dir, 'arraydemo.bmp')) - ar = pygame.PixelArray (surface) - ar2 = ar.extract ((0, 0, 0), 0.07) - sf2 = ar2.surface - del ar, ar2 - show (sf2) - - # Compare two images. - surface = pygame.image.load (os.path.join (data_dir, 'alien1.gif')) - surface2 = pygame.image.load (os.path.join (data_dir, 'alien2.gif')) - ar1 = pygame.PixelArray (surface) - ar2 = pygame.PixelArray (surface2) - ar3 = ar1.compare (ar2, 0.07) - sf3 = ar3.surface - del ar1, ar2, ar3 - show (sf3) - -if __name__ == '__main__': - main() - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/playmus.py b/venv/lib/python3.7/site-packages/pygame/examples/playmus.py deleted file mode 100644 index 4b4e46d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/playmus.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python - -"""A simple music player. - - Use pygame.mixer.music to play an audio file. A window is - created to handle keyboard events for playback commands. - -""" - -from __future__ import print_function -import pygame -import pygame.freetype -from pygame.locals import * -import sys -import os - -class Window(object): - """The application's Pygame window - - A Window instance manages the creation of and drawing to a - window. It is a singleton class. Only one instance can exist. - - """ - - instance = None - - def __new__(cls, *args, **kwds): - """Return an open Pygame window""" - - if Window.instance is not None: - return Window.instance - self = object.__new__(cls) - pygame.display.init() - self.screen = pygame.display.set_mode((600, 400)) - Window.instance = self - return self - - def __init__(self, title): - pygame.display.set_caption(title) - self.screen.fill(Color('white')) - pygame.display.flip() - - pygame.freetype.init() - self.font = pygame.freetype.Font(None, 20) - self.font.origin = True - self.ascender = int(self.font.get_sized_ascender() * 1.5) - self.descender = int(self.font.get_sized_descender() * 1.5) - self.line_height = self.ascender - self.descender - - self.write_lines("'q', ESCAPE or close this window to quit\n" - "SPACE to play/pause\n" - "'r' to rewind\n" - "'f' to faid out over 5 seconds\n", 0) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - return False - - def close(self): - pygame.display.quit() - Window.instance = None - - def write_lines(self, text, line=0): - w, h = self.screen.get_size() - line_height = self.line_height - nlines = h // line_height - if line < 0: - line = nlines + line - for i, text_line in enumerate(text.split('\n'), line): - y = i * line_height + self.ascender - # Clear the line first. - self.screen.fill(Color('white'), - (0, i * line_height, w, line_height)) - - # Write new text. - self.font.render_to(self.screen, (15, y), text_line, Color('blue')) - pygame.display.flip() - - -def show_usage_message(): - print("Usage: python playmus.py ") - print(" python -m pygame.examples.playmus ") - -def main(file_path): - """Play an audio file with pygame.mixer.music""" - - with Window(file_path) as win: - win.write_lines('Loading ...', -1) - pygame.mixer.init(frequency=44100) - try: - paused = False - pygame.mixer.music.load(file_path) - - # Make sure the event loop ticks over at least every 0.5 seconds. - pygame.time.set_timer(USEREVENT, 500) - - pygame.mixer.music.play() - win.write_lines("Playing ...\n", -1) - - while pygame.mixer.music.get_busy(): - e = pygame.event.wait() - if e.type == pygame.KEYDOWN: - key = e.key - if key == K_SPACE: - if paused: - pygame.mixer.music.unpause() - paused = False - win.write_lines("Playing ...\n", -1) - else: - pygame.mixer.music.pause() - paused = True - win.write_lines("Paused ...\n", -1) - elif key == K_r: - pygame.mixer.music.rewind() - if paused: - win.write_lines("Rewound.", -1) - elif key == K_f: - win.write_lines("Faiding out ...\n", -1) - pygame.mixer.music.fadeout(5000) - # when finished get_busy() will return 0. - elif key in [K_q, K_ESCAPE]: - pygame.mixer.music.stop() - # get_busy() will now return 0. - elif e.type == QUIT: - pygame.mixer.music.stop() - # get_busy() will now return 0. - pygame.time.set_timer(USEREVENT, 0) - finally: - pygame.mixer.quit() - -if __name__ == '__main__': -# Check the only command line argument, a file path - if len(sys.argv) != 2: - show_usage_message() - else: - main(sys.argv[1]) - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/prevent_display_stretching.py b/venv/lib/python3.7/site-packages/pygame/examples/prevent_display_stretching.py deleted file mode 100644 index 9e728a6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/prevent_display_stretching.py +++ /dev/null @@ -1,89 +0,0 @@ -# coding: ascii -"""Prevent display stretching - -On some computers, the display environment can be configured to stretch -all windows so that they will not appear too small on the screen for -the user. This configuration is especially common on high-DPI displays. -pygame graphics appear distorted when automatically stretched by the -display environment. This script demonstrates a technique for preventing -this stretching and distortion. - -Limitations: -This script makes an API call that is only available on Windows (versions -Vista and newer). ctypes must be installed. - -""" - -# Ensure that the computer is running Windows Vista or newer -import os, sys -if os.name != "nt" or sys.getwindowsversion()[0] < 6: - raise NotImplementedError('this script requires Windows Vista or newer') - -# Ensure that ctypes is installed. It is included with Python 2.5 and newer, -# but Python 2.4 users must install ctypes manually. -try: - import ctypes -except ImportError: - print('install ctypes from http://sourceforge.net/projects/ctypes/files/ctypes') - raise - -import pygame - -# Determine whether or not the user would like to prevent stretching -if os.path.basename(sys.executable) == 'pythonw.exe': - selection = 'y' -else: - from pygame.compat import raw_input_ - selection = None - while selection not in ('y', 'n'): - selection = raw_input_('Prevent stretching? (y/n): ').strip().lower() - -if selection == 'y': - msg = 'Stretching is prevented.' -else: - msg = 'Stretching is not prevented.' - -# Prevent stretching -if selection == 'y': - user32 = ctypes.windll.user32 - user32.SetProcessDPIAware() - -# Show screen -pygame.display.init() -RESOLUTION = (350, 350) -screen = pygame.display.set_mode(RESOLUTION) - -# Render message onto a surface -pygame.font.init() -font = pygame.font.Font(None, 36) -msg_surf = font.render(msg, 1, pygame.Color('green')) -res_surf = font.render('Intended resolution: %ix%i' % RESOLUTION, 1, pygame.Color('green')) - -# Control loop -running = True -clock = pygame.time.Clock() -counter = 0 -while running: - - for event in pygame.event.get(): - if event.type == pygame.QUIT: - running = False - - screen.fill(pygame.Color('black')) - - # Draw lines which will be blurry if the window is stretched - # or clear if the window is not stretched. - pygame.draw.line(screen, pygame.Color('white'), (0, counter), (RESOLUTION[0] - 1, counter)) - pygame.draw.line(screen, pygame.Color('white'), (counter, 0), (counter, RESOLUTION[1] - 1)) - - # Blit message onto screen surface - msg_blit_rect = screen.blit(msg_surf, (0, 0)) - screen.blit(res_surf, (0, msg_blit_rect.bottom)) - - clock.tick(10) - - pygame.display.flip() - - counter += 1 - if counter == RESOLUTION[0]: - counter = 0 diff --git a/venv/lib/python3.7/site-packages/pygame/examples/scaletest.py b/venv/lib/python3.7/site-packages/pygame/examples/scaletest.py deleted file mode 100644 index a38ad81..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/scaletest.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python - -import sys, time -import pygame - -def main(imagefile, convert_alpha=False, run_speed_test=False): - """show an interactive image scaler - - Args: - imagefile - name of source image (required) - convert_alpha - use convert_alpha() on the surf (default False) - run_speed_test - (default False) - - """ - - # initialize display - pygame.display.init() - # load background image - background = pygame.image.load(imagefile) - - if run_speed_test: - if convert_alpha: - # convert_alpha() requires the display mode to be set - pygame.display.set_mode((1, 1)) - background = background.convert_alpha() - - SpeedTest(background) - return - - # start fullscreen mode - screen = pygame.display.set_mode((1024, 768), pygame.FULLSCREEN) - if convert_alpha: - background = background.convert_alpha() - - # turn off the mouse pointer - pygame.mouse.set_visible(0) - # main loop - bRunning = True - bUp = False - bDown = False - bLeft = False - bRight = False - cursize = [background.get_width(), background.get_height()] - while(bRunning): - image = pygame.transform.smoothscale(background, cursize) - imgpos = image.get_rect(centerx=512, centery=384) - screen.fill((255,255,255)) - screen.blit(image, imgpos) - pygame.display.flip() - for event in pygame.event.get(): - if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE): - bRunning = False - if event.type == pygame.KEYDOWN: - if event.key == pygame.K_UP: bUp = True - if event.key == pygame.K_DOWN: bDown = True - if event.key == pygame.K_LEFT: bLeft = True - if event.key == pygame.K_RIGHT: bRight = True - if event.type == pygame.KEYUP: - if event.key == pygame.K_UP: bUp = False - if event.key == pygame.K_DOWN: bDown = False - if event.key == pygame.K_LEFT: bLeft = False - if event.key == pygame.K_RIGHT: bRight = False - if bUp: - cursize[1] -= 2 - if cursize[1] < 1: cursize[1] = 1 - if bDown: - cursize[1] += 2 - if bLeft: - cursize[0] -= 2 - if cursize[0] < 1: cursize[0] = 1 - if bRight: - cursize[0] += 2 - - -def SpeedTest(image): - print("\nImage Scaling Speed Test - Image Size %s\n" % str( - image.get_size())) - - imgsize = [image.get_width(), image.get_height()] - duration = 0.0 - for i in range(128): - shrinkx = (imgsize[0] * i) // 128 - shrinky = (imgsize[1] * i) // 128 - start = time.time() - tempimg = pygame.transform.smoothscale(image, (shrinkx, shrinky)) - duration += (time.time() - start) - del tempimg - - print("Average transform.smoothscale shrink time: %.4f ms." % ( - duration / 128 * 1000)) - - duration = 0 - for i in range(128): - expandx = (imgsize[0] * (i + 129)) // 128 - expandy = (imgsize[1] * (i + 129)) // 128 - start = time.time() - tempimg = pygame.transform.smoothscale(image, (expandx, expandy)) - duration += (time.time() - start) - del tempimg - - print("Average transform.smoothscale expand time: %.4f ms." % ( - duration / 128 * 1000)) - - duration = 0.0 - for i in range(128): - shrinkx = (imgsize[0] * i) // 128 - shrinky = (imgsize[1] * i) // 128 - start = time.time() - tempimg = pygame.transform.scale(image, (shrinkx, shrinky)) - duration += (time.time() - start) - del tempimg - - print("Average transform.scale shrink time: %.4f ms." % ( - duration / 128 * 1000)) - - duration = 0 - for i in range(128): - expandx = (imgsize[0] * (i + 129)) // 128 - expandy = (imgsize[1] * (i + 129)) // 128 - start = time.time() - tempimg = pygame.transform.scale(image, (expandx, expandy)) - duration += (time.time() - start) - del tempimg - - print("Average transform.scale expand time: %.4f ms." % ( - duration / 128 * 1000)) - - -if __name__ == '__main__': - # check input parameters - if len(sys.argv) < 2: - print("\nUsage: %s imagefile [-t] [-convert_alpha]" % sys.argv[0]) - print(" imagefile image filename (required)") - print(" -t run speed test") - print(" -convert_alpha use convert_alpha() on the image's " - "surface\n") - else: - main(sys.argv[1], - convert_alpha = '-convert_alpha' in sys.argv, - run_speed_test = '-t' in sys.argv) diff --git a/venv/lib/python3.7/site-packages/pygame/examples/scrap_clipboard.py b/venv/lib/python3.7/site-packages/pygame/examples/scrap_clipboard.py deleted file mode 100644 index 0fc6be9..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/scrap_clipboard.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -""" -Demonstrates the clipboard capabilities of pygame. -""" -import os - -import pygame -from pygame.locals import * -import pygame.scrap as scrap -from pygame.compat import as_bytes -BytesIO = pygame.compat.get_BytesIO() - -def usage (): - print ("Press the 'g' key to get all of the current clipboard data") - print ("Press the 'p' key to put a string into the clipboard") - print ("Press the 'a' key to get a list of the currently available types") - print ("Press the 'i' key to put an image into the clipboard") - -main_dir = os.path.split(os.path.abspath(__file__))[0] - -pygame.init () -screen = pygame.display.set_mode ((200, 200)) -c = pygame.time.Clock () -going = True - -# Initialize the scrap module and use the clipboard mode. -scrap.init () -scrap.set_mode (SCRAP_CLIPBOARD) - -usage () - -while going: - for e in pygame.event.get (): - if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE): - going = False - - elif e.type == KEYDOWN and e.key == K_g: - # This means to look for data. - print ("Getting the different clipboard data..") - for t in scrap.get_types (): - r = scrap.get (t) - if r and len (r) > 500: - print ("Type %s : (large %i byte buffer)" % (t, len(r))) - elif r is None: - print ("Type %s : None" % (t,)) - else: - print ("Type %s : '%s'" % (t, r.decode('ascii', 'ignore'))) - if "image" in t: - namehint = t.split("/")[1] - if namehint in ['bmp', 'png', 'jpg']: - f = BytesIO(r) - loaded_surf = pygame.image.load(f, "." + namehint) - screen.blit(loaded_surf, (0,0)) - - - elif e.type == KEYDOWN and e.key == K_p: - # Place some text into the selection. - print ("Placing clipboard text.") - scrap.put (SCRAP_TEXT, - as_bytes("Hello. This is a message from scrap.")) - - elif e.type == KEYDOWN and e.key == K_a: - # Get all available types. - print ("Getting the available types from the clipboard.") - types = scrap.get_types () - print (types) - if len (types) > 0: - print ("Contains %s: %s" % - (types[0], scrap.contains (types[0]))) - print ("Contains _INVALID_: ", scrap.contains ("_INVALID_")) - - elif e.type == KEYDOWN and e.key == K_i: - print ("Putting image into the clipboard.") - scrap.set_mode (SCRAP_CLIPBOARD) - fp = open (os.path.join(main_dir, 'data', 'liquid.bmp'), 'rb') - buf = fp.read () - scrap.put ("image/bmp", buf) - fp.close () - - elif e.type in (KEYDOWN, MOUSEBUTTONDOWN): - usage () - pygame.display.flip() - c.tick(40) - - - - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/scroll.py b/venv/lib/python3.7/site-packages/pygame/examples/scroll.py deleted file mode 100644 index 1d4a9d6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/scroll.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/env python - -"""An zoomed image viewer that demonstrates Surface.scroll - -This example shows a scrollable image that has a zoom factor of eight. -It uses the Surface.scroll function to shift the image on the display -surface. A clip rectangle protects a margin area. If called as a function, -the example accepts an optional image file path. If run as a program -it takes an optional file path command line argument. If no file -is provided a default image file is used. - -When running click on a black triangle to move one pixel in the direction -the triangle points. Or use the arrow keys. Close the window or press ESC -to quit. - -""" - -import sys -import os - -import pygame -from pygame.transform import scale -from pygame.locals import * - -main_dir = os.path.dirname(os.path.abspath(__file__)) - -DIR_UP = 1 -DIR_DOWN = 2 -DIR_LEFT = 3 -DIR_RIGHT = 4 - -zoom_factor = 8 - -def draw_arrow(surf, color, posn, direction): - x, y = posn - if direction == DIR_UP: - pointlist = ((x - 29, y + 30), (x + 30, y + 30), - (x + 1, y - 29), (x, y - 29)) - elif direction == DIR_DOWN: - pointlist = ((x - 29, y - 29), (x + 30, y - 29), - (x + 1, y + 30), (x, y + 30)) - elif direction == DIR_LEFT: - pointlist = ((x + 30, y - 29), (x + 30, y + 30), - (x - 29, y + 1), (x - 29, y)) - else: - pointlist = ((x - 29, y - 29), (x - 29, y + 30), - (x + 30, y + 1), (x + 30, y)) - pygame.draw.polygon(surf, color, pointlist) - -def add_arrow_button(screen, regions, posn, direction): - draw_arrow(screen, Color('black'), posn, direction) - draw_arrow(regions, (direction, 0, 0), posn, direction) - -def scroll_view(screen, image, direction, view_rect): - dx = dy = 0 - src_rect = None - zoom_view_rect = screen.get_clip() - image_w, image_h = image.get_size() - if direction == DIR_UP: - if view_rect.top > 0: - screen.scroll(dy=zoom_factor) - view_rect.move_ip(0, -1) - src_rect = view_rect.copy() - src_rect.h = 1 - dst_rect = zoom_view_rect.copy() - dst_rect.h = zoom_factor - elif direction == DIR_DOWN: - if view_rect.bottom < image_h: - screen.scroll(dy=-zoom_factor) - view_rect.move_ip(0, 1) - src_rect = view_rect.copy() - src_rect.h = 1 - src_rect.bottom = view_rect.bottom - dst_rect = zoom_view_rect.copy() - dst_rect.h = zoom_factor - dst_rect.bottom = zoom_view_rect.bottom - elif direction == DIR_LEFT: - if view_rect.left > 0: - screen.scroll(dx=zoom_factor) - view_rect.move_ip(-1, 0) - src_rect = view_rect.copy() - src_rect.w = 1 - dst_rect = zoom_view_rect.copy() - dst_rect.w = zoom_factor - elif direction == DIR_RIGHT: - if view_rect.right < image_w: - screen.scroll(dx=-zoom_factor) - view_rect.move_ip(1, 0) - src_rect = view_rect.copy() - src_rect.w = 1 - src_rect.right = view_rect.right - dst_rect = zoom_view_rect.copy() - dst_rect.w = zoom_factor - dst_rect.right = zoom_view_rect.right - if src_rect is not None: - scale(image.subsurface(src_rect), - dst_rect.size, - screen.subsurface(dst_rect)) - pygame.display.update(zoom_view_rect) - -def main(image_file=None): - if image_file is None: - image_file = os.path.join(main_dir, 'data', 'arraydemo.bmp') - margin = 80 - view_size = (30, 20) - zoom_view_size = (view_size[0] * zoom_factor, - view_size[1] * zoom_factor) - win_size = (zoom_view_size[0] + 2 * margin, - zoom_view_size[1] + 2 * margin) - background_color = Color('beige') - - pygame.init() - - # set up key repeating so we can hold down the key to scroll. - old_k_delay, old_k_interval = pygame.key.get_repeat () - pygame.key.set_repeat (500, 30) - - try: - screen = pygame.display.set_mode(win_size) - screen.fill(background_color) - pygame.display.flip() - - image = pygame.image.load(image_file).convert() - image_w, image_h = image.get_size() - - if image_w < view_size[0] or image_h < view_size[1]: - print ("The source image is too small for this example.") - print ("A %i by %i or larger image is required." % zoom_view_size) - return - - regions = pygame.Surface(win_size, 0, 24) - add_arrow_button(screen, regions, - (40, win_size[1] // 2), DIR_LEFT) - add_arrow_button(screen, regions, - (win_size[0] - 40, win_size[1] // 2), DIR_RIGHT) - add_arrow_button(screen, regions, - (win_size[0] // 2, 40), DIR_UP) - add_arrow_button(screen, regions, - (win_size[0] // 2, win_size[1] - 40), DIR_DOWN) - pygame.display.flip() - - screen.set_clip((margin, margin, zoom_view_size[0], zoom_view_size[1])) - - view_rect = Rect(0, 0, view_size[0], view_size[1]) - - scale(image.subsurface(view_rect), zoom_view_size, - screen.subsurface(screen.get_clip())) - pygame.display.flip() - - - # the direction we will scroll in. - direction = None - - clock = pygame.time.Clock() - clock.tick() - - going = True - while going: - # wait for events before doing anything. - #events = [pygame.event.wait()] + pygame.event.get() - events = pygame.event.get() - - for e in events: - if e.type == KEYDOWN: - if e.key == K_ESCAPE: - going = False - elif e.key == K_DOWN: - scroll_view(screen, image, DIR_DOWN, view_rect) - elif e.key == K_UP: - scroll_view(screen, image, DIR_UP, view_rect) - elif e.key == K_LEFT: - scroll_view(screen, image, DIR_LEFT, view_rect) - elif e.key == K_RIGHT: - scroll_view(screen, image, DIR_RIGHT, view_rect) - elif e.type == QUIT: - going = False - elif e.type == MOUSEBUTTONDOWN: - direction = regions.get_at(e.pos)[0] - elif e.type == MOUSEBUTTONUP: - direction = None - - if direction: - scroll_view(screen, image, direction, view_rect) - clock.tick(30) - - finally: - pygame.key.set_repeat (old_k_delay, old_k_interval) - pygame.quit() - -if __name__ == '__main__': - if len(sys.argv) > 1: - image_file = sys.argv[1] - else: - image_file = None - main(image_file) diff --git a/venv/lib/python3.7/site-packages/pygame/examples/sound.py b/venv/lib/python3.7/site-packages/pygame/examples/sound.py deleted file mode 100644 index 6fa32f9..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/sound.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python - -"""extremely simple demonstration playing a soundfile -and waiting for it to finish. you'll need the pygame.mixer -module for this to work. Note how in this simple example we -don't even bother loading all of the pygame package. Just -pick the mixer for sound and time for the delay function. - -Optional command line argument: - the name of an audio file. - - -""" - -import os.path, sys -import pygame.mixer, pygame.time -mixer = pygame.mixer -time = pygame.time - -main_dir = os.path.split(os.path.abspath(__file__))[0] - -def main(file_path=None): - """Play an audio file as a buffered sound sample - - Option argument: - the name of an audio file (default data/secosmic_low.wav - - """ - if file_path is None: - file_path = os.path.join(main_dir, - 'data', - 'secosmic_lo.wav') - - #choose a desired audio format - mixer.init(11025) #raises exception on fail - - - #load the sound - sound = mixer.Sound(file_path) - - - #start playing - print ('Playing Sound...') - channel = sound.play() - - - #poll until finished - while channel.get_busy(): #still playing - print (' ...still going...') - time.wait(1000) - print ('...Finished') - -if __name__ == '__main__': - if len(sys.argv) > 1: - main(sys.argv[1]) - else: - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/sound_array_demos.py b/venv/lib/python3.7/site-packages/pygame/examples/sound_array_demos.py deleted file mode 100644 index 142ed7b..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/sound_array_demos.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/env python -""" -Creates an echo effect an any Sound object. - -Uses sndarray and MumPy ( or Numeric) to create offset faded copies of the -original sound. Currently it just uses hardcoded values for the -number of echos and the delay. Easy for you to recreate as -needed. The array packaged used can be specified by an optional ---numpy or --numeric command line option. - -version 2. changes: -- Should work with different sample rates now. -- put into a function. -- Uses NumPy by default, but falls back on Numeric. - -""" - -__author__ = "Pete 'ShredWheat' Shinners, Rene Dudfield" -__copyright__ = "Copyright (C) 2004 Pete Shinners, Copyright (C) 2005 Rene Dudfield" -__license__ = "Public Domain" -__version__ = "2.0" - - -import os.path -import pygame.mixer, pygame.time, pygame.sndarray, pygame -import pygame.surfarray, pygame.transform -from pygame import sndarray, mixer - -from numpy import zeros, int32, int16 - -import time - - -#mixer.init(44100, -16, 0) -mixer.init() -#mixer.init(11025, -16, 0) -#mixer.init(11025) - - - -def make_echo(sound, samples_per_second, mydebug = True): - """ returns a sound which is echoed of the last one. - """ - - echo_length = 3.5 - - a1 = sndarray.array(sound) - if mydebug: - print ('SHAPE1: %s' % (a1.shape,)) - - length = a1.shape[0] - - #myarr = zeros(length+12000) - myarr = zeros(a1.shape, int32) - - if len(a1.shape) > 1: - mult = a1.shape[1] - size = (a1.shape[0] + int(echo_length * a1.shape[0]), a1.shape[1]) - #size = (a1.shape[0] + int(a1.shape[0] + (echo_length * 3000)), a1.shape[1]) - else: - mult = 1 - size = (a1.shape[0] + int(echo_length * a1.shape[0]),) - #size = (a1.shape[0] + int(a1.shape[0] + (echo_length * 3000)),) - - if mydebug: - print (int(echo_length * a1.shape[0])) - myarr = zeros(size, int32) - - - - if mydebug: - print ("size %s" % (size,)) - print (myarr.shape) - myarr[:length] = a1 - #print (myarr[3000:length+3000]) - #print (a1 >> 1) - #print ("a1.shape %s" % (a1.shape,)) - #c = myarr[3000:length+(3000*mult)] - #print ("c.shape %s" % (c.shape,)) - - incr = int(samples_per_second / echo_length) - gap = length - - - myarr[incr:gap+incr] += a1>>1 - myarr[incr*2:gap+(incr*2)] += a1>>2 - myarr[incr*3:gap+(incr*3)] += a1>>3 - myarr[incr*4:gap+(incr*4)] += a1>>4 - - if mydebug: - print ('SHAPE2: %s' % (myarr.shape,)) - - - sound2 = sndarray.make_sound(myarr.astype(int16)) - - return sound2 - - -def slow_down_sound(sound, rate): - """ returns a sound which is a slowed down version of the original. - rate - at which the sound should be slowed down. eg. 0.5 would be half speed. - """ - - raise NotImplementedError() - grow_rate = 1 / rate - - # make it 1/rate times longer. - - a1 = sndarray.array(sound) - - surf = pygame.surfarray.make_surface(a1) - print (a1.shape[0] * grow_rate) - scaled_surf = pygame.transform.scale(surf, (int(a1.shape[0] * grow_rate), a1.shape[1])) - print (scaled_surf) - print (surf) - - a2 = a1 * rate - print (a1.shape) - print (a2.shape) - print (a2) - sound2 = sndarray.make_sound(a2.astype(int16)) - return sound2 - - - - -def sound_from_pos(sound, start_pos, samples_per_second = None, inplace = 1): - """ returns a sound which begins at the start_pos. - start_pos - in seconds from the begining. - samples_per_second - - """ - - # see if we want to reuse the sound data or not. - if inplace: - a1 = pygame.sndarray.samples(sound) - else: - a1 = pygame.sndarray.array(sound) - - # see if samples per second has been given. If not, query the mixer. - # eg. it might be set to 22050 - if samples_per_second is None: - samples_per_second = pygame.mixer.get_init()[0] - - # figure out the start position in terms of samples. - start_pos_in_samples = int(start_pos * samples_per_second) - - # cut the begining off the sound at the start position. - a2 = a1[start_pos_in_samples:] - - # make the Sound instance from the array. - sound2 = pygame.sndarray.make_sound(a2) - - return sound2 - - - - -def main(arraytype=None): - """play various sndarray effects - - If arraytype is provided then use that array package. Valid - values are 'numeric' or 'numpy'. Otherwise default to NumPy, - or fall back on Numeric if NumPy is not installed. - - """ - - main_dir = os.path.split(os.path.abspath(__file__))[0] - - if arraytype not in ('numpy', None): - raise ValueError('Array type not supported: %r' % arraytype) - - print ("Using %s array package" % sndarray.get_arraytype()) - print ("mixer.get_init %s" % (mixer.get_init(),)) - inited = mixer.get_init() - - samples_per_second = pygame.mixer.get_init()[0] - - - - print (("-" * 30) + "\n") - print ("loading sound") - sound = mixer.Sound(os.path.join(main_dir, 'data', 'car_door.wav')) - - - - print ("-" * 30) - print ("start positions") - print ("-" * 30) - - start_pos = 0.1 - sound2 = sound_from_pos(sound, start_pos, samples_per_second) - - print ("sound.get_length %s" % (sound.get_length(),)) - print ("sound2.get_length %s" % (sound2.get_length(),)) - sound2.play() - while mixer.get_busy(): - pygame.time.wait(200) - - print ("waiting 2 seconds") - pygame.time.wait(2000) - print ("playing original sound") - - sound.play() - while mixer.get_busy(): - pygame.time.wait(200) - - print ("waiting 2 seconds") - pygame.time.wait(2000) - - - - if 0: - #TODO: this is broken. - print (("-" * 30) + "\n") - print ("Slow down the original sound.") - rate = 0.2 - slowed_sound = slow_down_sound(sound, rate) - - slowed_sound.play() - while mixer.get_busy(): - pygame.time.wait(200) - - - print ("-" * 30) - print ("echoing") - print ("-" * 30) - - t1 = time.time() - sound2 = make_echo(sound, samples_per_second) - print ("time to make echo %i" % (time.time() - t1,)) - - - print ("original sound") - sound.play() - while mixer.get_busy(): - pygame.time.wait(200) - - print ("echoed sound") - sound2.play() - while mixer.get_busy(): - pygame.time.wait(200) - - - sound = mixer.Sound(os.path.join(main_dir, 'data', 'secosmic_lo.wav')) - - t1 = time.time() - sound3 = make_echo(sound, samples_per_second) - print ("time to make echo %i" % (time.time() - t1,)) - - print ("original sound") - sound.play() - while mixer.get_busy(): - pygame.time.wait(200) - - - print ("echoed sound") - sound3.play() - while mixer.get_busy(): - pygame.time.wait(200) - -if __name__ == '__main__': - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/stars.py b/venv/lib/python3.7/site-packages/pygame/examples/stars.py deleted file mode 100644 index 22e5ef6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/stars.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python - -"""A simple starfield example. Note you can move the 'center' of -the starfield by leftclicking in the window. This example show -the basics of creating a window, simple pixel plotting, and input -event management""" - - -import random, math, pygame -from pygame.locals import * - -#constants -WINSIZE = [640, 480] -WINCENTER = [320, 240] -NUMSTARS = 150 - - -def init_star(): - "creates new star values" - dir = random.randrange(100000) - velmult = random.random()*.6+.4 - vel = [math.sin(dir) * velmult, math.cos(dir) * velmult] - return vel, WINCENTER[:] - - -def initialize_stars(): - "creates a new starfield" - stars = [] - for x in range(NUMSTARS): - star = init_star() - vel, pos = star - steps = random.randint(0, WINCENTER[0]) - pos[0] = pos[0] + (vel[0] * steps) - pos[1] = pos[1] + (vel[1] * steps) - vel[0] = vel[0] * (steps * .09) - vel[1] = vel[1] * (steps * .09) - stars.append(star) - move_stars(stars) - return stars - - -def draw_stars(surface, stars, color): - "used to draw (and clear) the stars" - for vel, pos in stars: - pos = (int(pos[0]), int(pos[1])) - surface.set_at(pos, color) - - -def move_stars(stars): - "animate the star values" - for vel, pos in stars: - pos[0] = pos[0] + vel[0] - pos[1] = pos[1] + vel[1] - if not 0 <= pos[0] <= WINSIZE[0] or not 0 <= pos[1] <= WINSIZE[1]: - vel[:], pos[:] = init_star() - else: - vel[0] = vel[0] * 1.05 - vel[1] = vel[1] * 1.05 - - -def main(): - "This is the starfield code" - #create our starfield - random.seed() - stars = initialize_stars() - clock = pygame.time.Clock() - #initialize and prepare screen - pygame.init() - screen = pygame.display.set_mode(WINSIZE) - pygame.display.set_caption('pygame Stars Example') - white = 255, 240, 200 - black = 20, 20, 40 - screen.fill(black) - - #main game loop - done = 0 - while not done: - draw_stars(screen, stars, black) - move_stars(stars) - draw_stars(screen, stars, white) - pygame.display.update() - for e in pygame.event.get(): - if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE): - done = 1 - break - elif e.type == MOUSEBUTTONDOWN and e.button == 1: - WINCENTER[:] = list(e.pos) - clock.tick(50) - - -# if python says run, then we should run -if __name__ == '__main__': - main() - - diff --git a/venv/lib/python3.7/site-packages/pygame/examples/testsprite.py b/venv/lib/python3.7/site-packages/pygame/examples/testsprite.py deleted file mode 100644 index 123c0c1..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/testsprite.py +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/env python -# like the testsprite.c that comes with sdl, this pygame version shows -# lots of sprites moving around. - - -import pygame, sys, os -from pygame.locals import * -from random import randint -from time import time -import pygame.joystick -from pygame.compat import xrange_ - -##import FastRenderGroup as FRG -import pygame.sprite as FRG - -if "-psyco" in sys.argv: - try: - import psyco - psyco.full() - except Exception: - print ("No psyco for you! psyco failed to import and run.") - -main_dir = os.path.split(os.path.abspath(__file__))[0] -data_dir = os.path.join(main_dir, 'data') - - - - - - -# use this to use update rects or not. -# If the screen is mostly full, then update rects are not useful. -update_rects = True -if "-update_rects" in sys.argv: - update_rects = True -if "-noupdate_rects" in sys.argv: - update_rects = False - -use_static = False -if "-static" in sys.argv: - use_static = True - - -use_FastRenderGroup = False -if "-FastRenderGroup" in sys.argv: - update_rects = True - use_FastRenderGroup = True - - -flags = 0 -if "-flip" in sys.argv: - flags ^= DOUBLEBUF - -if "-fullscreen" in sys.argv: - flags ^= FULLSCREEN - -if "-sw" in sys.argv: - flags ^= SWSURFACE - -use_rle = True - -if "-hw" in sys.argv: - flags ^= HWSURFACE - use_rle = False - - -screen_dims = [640, 480] - -if "-height" in sys.argv: - i = sys.argv.index("-height") - screen_dims[1] = int(sys.argv[i+1]) - -if "-width" in sys.argv: - i = sys.argv.index("-width") - screen_dims[0] = int(sys.argv[i+1]) - -if "-alpha" in sys.argv: - use_alpha = True -else: - use_alpha = False - -print (screen_dims) - - -##class Thingy(pygame.sprite.Sprite): -## images = None -## def __init__(self): -## pygame.sprite.Sprite.__init__(self) -## self.image = Thingy.images[0] -## self.rect = self.image.get_rect() -## self.rect.x = randint(0, screen_dims[0]) -## self.rect.y = randint(0, screen_dims[1]) -## #self.vel = [randint(-10, 10), randint(-10, 10)] -## self.vel = [randint(-1, 1), randint(-1, 1)] -## -## def move(self): -## for i in [0, 1]: -## nv = self.rect[i] + self.vel[i] -## if nv >= screen_dims[i] or nv < 0: -## self.vel[i] = -self.vel[i] -## nv = self.rect[i] + self.vel[i] -## self.rect[i] = nv - -class Thingy(FRG.DirtySprite): - images = None - def __init__(self): -## pygame.sprite.Sprite.__init__(self) - FRG.DirtySprite.__init__(self) - self.image = Thingy.images[0] - self.rect = self.image.get_rect() - self.rect.x = randint(0, screen_dims[0]) - self.rect.y = randint(0, screen_dims[1]) - #self.vel = [randint(-10, 10), randint(-10, 10)] - self.vel = [randint(-1, 1), randint(-1, 1)] - self.dirty = 2 - - def update(self): - for i in [0, 1]: - nv = self.rect[i] + self.vel[i] - if nv >= screen_dims[i] or nv < 0: - self.vel[i] = -self.vel[i] - nv = self.rect[i] + self.vel[i] - self.rect[i] = nv - -class Static(FRG.DirtySprite): - images = None - def __init__(self): - FRG.DirtySprite.__init__(self) - self.image = Static.images[0] - self.rect = self.image.get_rect() - self.rect.x = randint(0, 3*screen_dims[0]/4) - self.rect.y = randint(0, 3*screen_dims[1]/4) - - - -def main(update_rects = True, - use_static = False, - use_FastRenderGroup = False, - screen_dims = [640, 480], - use_alpha = False, - flags = 0, - ): - """Show lots of sprites moving around - - Optional keyword arguments: - update_rects - use the RenderUpdate sprite group class (default True) - use_static - include non-moving images (default False) - use_FastRenderGroup - Use the FastRenderGroup sprite group (default False) - screen_dims - Pygame window dimensions (default [640, 480]) - use_alpha - use alpha blending (default False) - flags - additional display mode flags (default no addiontal flags) - - """ - - if use_FastRenderGroup: - update_rects = True - - - #pygame.init() - pygame.display.init() - - - - #if "-fast" in sys.argv: - - screen = pygame.display.set_mode(screen_dims, flags) - - - # this is mainly for GP2X, so it can quit. - pygame.joystick.init() - num_joysticks = pygame.joystick.get_count() - if num_joysticks > 0: - stick = pygame.joystick.Joystick(0) - stick.init() # now we will receive events for the joystick - - - screen.fill([0,0,0]) - pygame.display.flip() - sprite_surface = pygame.image.load(os.path.join(data_dir, "asprite.bmp")) - sprite_surface2 = pygame.image.load(os.path.join(data_dir, "static.png")) - - if use_rle: - sprite_surface.set_colorkey([0xFF, 0xFF, 0xFF], SRCCOLORKEY|RLEACCEL) - sprite_surface2.set_colorkey([0xFF, 0xFF, 0xFF], SRCCOLORKEY|RLEACCEL) - else: - sprite_surface.set_colorkey([0xFF, 0xFF, 0xFF], SRCCOLORKEY) - sprite_surface2.set_colorkey([0xFF, 0xFF, 0xFF], SRCCOLORKEY) - - if use_alpha: - sprite_surface = sprite_surface.convert_alpha() - sprite_surface2 = sprite_surface2.convert_alpha() - else: - sprite_surface = sprite_surface.convert() - sprite_surface2 = sprite_surface2.convert() - - Thingy.images = [sprite_surface] - if use_static: - Static.images = [sprite_surface2] - - if len(sys.argv) > 1: - try: - numsprites = int(sys.argv[-1]) - except Exception: - numsprites = 100 - else: - numsprites = 100 - sprites = None - if use_FastRenderGroup: -## sprites = FRG.FastRenderGroup() - sprites = FRG.LayeredDirty() - else: - if update_rects: - sprites = pygame.sprite.RenderUpdates() - else: - sprites = pygame.sprite.Group() - - for i in xrange_(0, numsprites): - if use_static and i%2==0: - sprites.add(Static()) - sprites.add(Thingy()) - - done = False - frames = 0 - start = time() - - background = pygame.Surface(screen.get_size()) - background = background.convert() - background.fill([0,0,0]) - - - while not done: - if not update_rects: - screen.fill([0,0,0]) - -## for sprite in sprites: -## sprite.move() - - if update_rects: - sprites.clear(screen, background) - sprites.update() - - rects = sprites.draw(screen) - if update_rects: - pygame.display.update(rects) - else: - pygame.display.flip() - - - for event in pygame.event.get(): - if event.type in [KEYDOWN, QUIT, JOYBUTTONDOWN]: - done = True - - - frames += 1 - end = time() - print ("FPS: %f" % (frames / ((end - start)))) - pygame.quit() - - - -if __name__ == "__main__": - main( update_rects, - use_static, - use_FastRenderGroup, - screen_dims, - use_alpha, - flags ) diff --git a/venv/lib/python3.7/site-packages/pygame/examples/textinput.py b/venv/lib/python3.7/site-packages/pygame/examples/textinput.py deleted file mode 100644 index 6bb6124..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/textinput.py +++ /dev/null @@ -1,151 +0,0 @@ -import pygame, sys -import pygame.freetype - -###CONSTS -# Set to true or add 'showevent' in argv to see IME and KEYDOWN events -PRINT_EVENT = False -# frames per second, the general speed of the program -FPS = 50 -# size of window -WINDOWWIDTH, WINDOWHEIGHT = 640, 480 -BGCOLOR = (0, 0, 0) - -# position of chatlist and chatbox -CHATLIST_POS = pygame.Rect(0, 20, WINDOWWIDTH, 400) -CHATBOX_POS = pygame.Rect(0, 440, WINDOWWIDTH, 40) -CHATLIST_MAXSIZE = 20 - -TEXTCOLOR = (0,255,0) - -#Add fontname for each language, otherwise some text can't be correctly displayed. -FONTNAMES = ["notosanscjktcregular", "notosansmonocjktcregular" , - "notosansregular,", - "microsoftjhengheimicrosoftjhengheiuilight", - "microsoftyaheimicrosoftyaheiuilight", - "msgothicmsuigothicmspgothic", - "msmincho", - "Arial"] - -#Version check -if (pygame.get_sdl_version() < (2,0,0)): - raise Exception("This example requires SDL2.") - -#Initalize -pygame.init() -Screen = pygame.display.set_mode((WINDOWWIDTH,WINDOWHEIGHT)) -pygame.display.set_caption("TextInput example") -FPSClock = pygame.time.Clock() - -#Freetype -#"The font name can be a comma separated list of font names to search for." -FONTNAMES = ",".join(str(x) for x in FONTNAMES) -Font = pygame.freetype.SysFont(FONTNAMES, 24) -FontSmall = pygame.freetype.SysFont(FONTNAMES, 16) -print("Using font: " + Font.name) - -#Main loop process -def main(): - global BGCOLOR, PRINT_EVENT, CHATBOX_POS, CHATLIST_POS, CHATLIST_MAXSIZE - global FPSClock , Font, Screen - - """ - https://wiki.libsdl.org/SDL_HINT_IME_INTERNAL_EDITING - https://wiki.libsdl.org/Tutorials/TextInput - Candidate list not showing due to SDL2 problem ;w; - """ - pygame.key.start_text_input() - input_rect = pygame.Rect(80,80,320,40) - pygame.key.set_text_input_rect(input_rect) - - _IMEEditing = False - _IMEText = "" - _IMETextPos = 0 - _IMEEditingText = "" - _IMEEditingPos = 0 - ChatList = [] - - while True: - for event in pygame.event.get(): - if event.type == pygame.QUIT: - pygame.quit() - return - - elif event.type == pygame.KEYDOWN: - if (PRINT_EVENT): - print(event) - - if _IMEEditing: - if (len(_IMEEditingText) == 0): - _IMEEditing = False - continue - - if event.key == pygame.K_BACKSPACE: - if (len(_IMEText) > 0 and _IMETextPos > 0): - _IMEText = _IMEText[0:_IMETextPos-1] + _IMEText[_IMETextPos:] - _IMETextPos = max(0,_IMETextPos-1) - - elif event.key == pygame.K_DELETE: - _IMEText = _IMEText[0:_IMETextPos] + _IMEText[_IMETextPos+1:] - elif event.key == pygame.K_LEFT: - _IMETextPos = max(0,_IMETextPos-1) - elif event.key == pygame.K_RIGHT: - _IMETextPos = min(len(_IMEText),_IMETextPos+1) - - elif event.key in [pygame.K_RETURN, pygame.K_KP_ENTER] and len(event.unicode) == 0: - #Block if we have no text to append - if len(_IMEText) == 0: - continue - - #Append chat list - ChatList.append(_IMEText) - if (len(ChatList) > CHATLIST_MAXSIZE): - ChatList.pop(0) - _IMEText = "" - _IMETextPos = 0 - - elif event.type == pygame.TEXTEDITING: - if (PRINT_EVENT): - print(event) - _IMEEditing = True - _IMEEditingText = event.text - _IMEEditingPos = event.start - - elif event.type == pygame.TEXTINPUT: - if (PRINT_EVENT): - print(event) - _IMEEditing = False - _IMEEditingText = "" - _IMEText = _IMEText[0:_IMETextPos] + event.text + _IMEText[_IMETextPos:] - _IMETextPos += len(event.text) - - #Screen updates - Screen.fill(BGCOLOR) - - #Chat List updates - chat_height = CHATLIST_POS.height / CHATLIST_MAXSIZE - for i in range(len(ChatList)): - FontSmall.render_to(Screen, (CHATLIST_POS.x, CHATLIST_POS.y + i*chat_height), ChatList[i], TEXTCOLOR) - - #Chat box updates - start_pos = CHATBOX_POS.copy() - ime_textL = ">" + _IMEText[0:_IMETextPos] - ime_textM = _IMEEditingText[0:_IMEEditingPos] + "|" + _IMEEditingText[_IMEEditingPos:] - ime_textR = _IMEText[_IMETextPos:] - - rect_textL = Font.render_to(Screen, start_pos, ime_textL, TEXTCOLOR) - start_pos.x += rect_textL.width - - #Editing texts should be underlined - rect_textM = Font.render_to(Screen, start_pos, ime_textM, TEXTCOLOR, None, pygame.freetype.STYLE_UNDERLINE) - start_pos.x += rect_textM.width - rect_textr = Font.render_to(Screen, start_pos, ime_textR, TEXTCOLOR) - - pygame.display.update() - - FPSClock.tick(FPS) - -if __name__ == '__main__': - if 'showevent' in sys.argv: - PRINT_EVENT = True - - main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/vgrade.py b/venv/lib/python3.7/site-packages/pygame/examples/vgrade.py deleted file mode 100644 index ff0a6b3..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/vgrade.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python - -"""This example demonstrates creating an image with numpy -python, and displaying that through SDL. You can look at the -method of importing numpy and pygame.surfarray. This method -will fail 'gracefully' if it is not available. -I've tried mixing in a lot of comments where the code might -not be self explanatory, nonetheless it may still seem a bit -strange. Learning to use numpy for images like this takes a -bit of learning, but the payoff is extremely fast image -manipulation in python. - -For Pygame 1.9.2 and up, this example also showcases a new feature -of surfarray.blit_surface: array broadcasting. If a source array -has either a width or height of 1, the array is repeatedly blitted -to the surface along that dimension to fill the surface. In fact, -a (1, 1) or (1, 1, 3) array results in a simple surface color fill. - -Just so you know how this breaks down. For each sampling of -time, 30% goes to each creating the gradient and blitting the -array. The final 40% goes to flipping/updating the display surface - -If using an SDL version at least 1.1.8 the window will have -no border decorations. - -The code also demonstrates use of the timer events.""" - - -import os, pygame -from pygame.locals import * - -try: - from numpy import * - from numpy.random import * -except ImportError: - raise SystemExit('This example requires numpy and the pygame surfarray module') - -pygame.surfarray.use_arraytype('numpy') - -timer = 0 -def stopwatch(message = None): - "simple routine to time python code" - global timer - if not message: - timer = pygame.time.get_ticks() - return - now = pygame.time.get_ticks() - runtime = (now - timer)/1000.0 + .001 - print ("%s %s %s" % - (message, runtime, ('seconds\t(%.2ffps)'%(1.0/runtime)))) - timer = now - - - -def VertGradientColumn(surf, topcolor, bottomcolor): - "creates a new 3d vertical gradient array" - topcolor = array(topcolor, copy=0) - bottomcolor = array(bottomcolor, copy=0) - diff = bottomcolor - topcolor - width, height = surf.get_size() - # create array from 0.0 to 1.0 triplets - column = arange(height, dtype='float')/height - column = repeat(column[:, newaxis], [3], 1) - # create a single column of gradient - column = topcolor + (diff * column).astype('int') - # make the column a 3d image column by adding X - column = column.astype('uint8')[newaxis,:,:] - #3d array into 2d array - return pygame.surfarray.map_array(surf, column) - - - -def DisplayGradient(surf): - "choose random colors and show them" - stopwatch() - colors = randint(0, 255, (2, 3)) - column = VertGradientColumn(surf, colors[0], colors[1]) - pygame.surfarray.blit_array(surf, column) - pygame.display.flip() - stopwatch('Gradient:') - - - -def main(): - pygame.init() - pygame.mixer.quit() # remove ALSA underflow messages for Debian squeeze - size = 600, 400 - os.environ['SDL_VIDEO_CENTERED'] = '1' - screen = pygame.display.set_mode(size, NOFRAME, 0) - - pygame.event.set_blocked(MOUSEMOTION) #keep our queue cleaner - pygame.time.set_timer(USEREVENT, 500) - - while 1: - event = pygame.event.wait() - if event.type in (QUIT, KEYDOWN, MOUSEBUTTONDOWN): - break - elif event.type == USEREVENT: - DisplayGradient(screen) - - - -if __name__ == '__main__': main() diff --git a/venv/lib/python3.7/site-packages/pygame/examples/video.py b/venv/lib/python3.7/site-packages/pygame/examples/video.py deleted file mode 100644 index 21bf4e1..0000000 --- a/venv/lib/python3.7/site-packages/pygame/examples/video.py +++ /dev/null @@ -1,100 +0,0 @@ -import pygame - - -if pygame.get_sdl_version()[0] < 2: - raise SystemExit('This example requires pygame 2 and SDL2.') - -import os -data_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], - 'data') - -from pygame._sdl2 import ( - Window, - Texture, - Renderer, - get_drivers, - messagebox, -) - -def load_img(file): - return pygame.image.load(os.path.join(data_dir, file)) - -pygame.display.init() -pygame.key.set_repeat(1000, 10) - -for driver in get_drivers(): - print(driver) - -import random -answer = messagebox("I will open two windows! Continue?", "Hello!", info=True, - buttons=('Yes', 'No', 'Chance'), - return_button=0, escape_button=1) -if answer == 1 or (answer == 2 and random.random() < .5): - import sys - sys.exit(0) - -win = Window('asdf', resizable=True) -renderer = Renderer(win) -tex = Texture.from_surface(renderer, load_img('alien1.gif')) - -running = True - -x, y = 250, 50 -clock = pygame.time.Clock() - -backgrounds = [(255,0,0,255), (0,255,0,255), (0,0,255,255)] -bg_index = 0 - -renderer.draw_color = backgrounds[bg_index] - -win2 = Window('2nd window', size=(256, 256), always_on_top=True) -win2.opacity = 0.5 -win2.set_icon(load_img('bomb.gif')) -renderer2 = Renderer(win2) -tex2 = Texture.from_surface(renderer2, load_img('asprite.bmp')) -renderer2.clear() -renderer2.copy(tex2) -renderer2.present() -del tex2 - -full = 0 - -srcrect = (0, 0, tex.width, tex.height) - -while running: - for event in pygame.event.get(): - if event.type == pygame.QUIT: - running = False - elif getattr(event, 'window', None) == win2: - if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE or\ - event.type == pygame.WINDOWEVENT and event.event == pygame.WINDOWEVENT_CLOSE: - win2.destroy() - elif event.type == pygame.KEYDOWN: - if event.key == pygame.K_ESCAPE: - running = False - elif event.key == pygame.K_LEFT: - x -= 5 - elif event.key == pygame.K_RIGHT: - x += 5 - elif event.key == pygame.K_DOWN: - y += 5 - elif event.key == pygame.K_UP: - y -= 5 - elif event.key == pygame.K_f: - if full == 0: - win.set_fullscreen(True) - full = 1 - else: - win.set_windowed() - full = 0 - elif event.key == pygame.K_SPACE: - bg_index = (bg_index + 1) % len(backgrounds) - renderer.draw_color = backgrounds[bg_index] - - dstrect = (x, y, tex.width, tex.height) - renderer.clear() - renderer.copy(tex, srcrect, dstrect) - renderer.present() - - clock.tick(60) - win.title = str('FPS: {}'.format(clock.get_fps())) diff --git a/venv/lib/python3.7/site-packages/pygame/fastevent.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/fastevent.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 4b7190b..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/fastevent.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/font.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/font.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 06060c0..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/font.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/freesansbold.ttf b/venv/lib/python3.7/site-packages/pygame/freesansbold.ttf deleted file mode 100644 index a98562f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/freesansbold.ttf and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/freetype.py b/venv/lib/python3.7/site-packages/pygame/freetype.py deleted file mode 100644 index 1a90753..0000000 --- a/venv/lib/python3.7/site-packages/pygame/freetype.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Enhanced Pygame module for loading and rendering computer fonts""" - -import sys -from pygame._freetype import ( - Font, - STYLE_NORMAL, STYLE_OBLIQUE, STYLE_STRONG, STYLE_UNDERLINE, STYLE_WIDE, - STYLE_DEFAULT, - init, quit, get_init, - was_init, get_cache_size, get_default_font, get_default_resolution, - get_error, get_version, set_default_resolution, - _PYGAME_C_API, __PYGAMEinit__, - ) -from pygame.sysfont import match_font, get_fonts, SysFont as _SysFont -from pygame import compat - -def SysFont(name, size, bold=0, italic=0, constructor=None): - """pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font - create a pygame Font from system font resources - - This will search the system fonts for the given font - name. You can also enable bold or italic styles, and - the appropriate system font will be selected if available. - - This will always return a valid Font object, and will - fallback on the builtin pygame font if the given font - is not found. - - Name can also be a comma separated list of names, in - which case set of names will be searched in order. Pygame - uses a small set of common font aliases, if the specific - font you ask for is not available, a reasonable alternative - may be used. - - if optional contructor is provided, it must be a function with - signature constructor(fontpath, size, bold, italic) which returns - a Font instance. If None, a pygame.freetype.Font object is created. - """ - if constructor is None: - def constructor(fontpath, size, bold, italic): - font = Font(fontpath, size) - font.strong = bold - font.oblique = italic - return font - - return _SysFont(name, size, bold, italic, constructor) diff --git a/venv/lib/python3.7/site-packages/pygame/ftfont.py b/venv/lib/python3.7/site-packages/pygame/ftfont.py deleted file mode 100644 index 78c9357..0000000 --- a/venv/lib/python3.7/site-packages/pygame/ftfont.py +++ /dev/null @@ -1,187 +0,0 @@ -"""pygame module for loading and rendering fonts (freetype alternative)""" - -__all__ = ['Font', 'init', 'quit', 'get_default_font', 'get_init', 'SysFont'] - -from pygame._freetype import init, Font as _Font, get_default_resolution -from pygame._freetype import quit, get_default_font, get_init as _get_init -from pygame._freetype import __PYGAMEinit__ -from pygame.sysfont import match_font, get_fonts, SysFont as _SysFont -from pygame import encode_file_path -from pygame.compat import bytes_, unicode_, as_unicode, as_bytes -from pygame import Surface as _Surface, Color as _Color, SRCALPHA as _SRCALPHA - -class Font(_Font): - """Font(filename, size) -> Font - Font(object, size) -> Font - create a new Font object from a file (freetype alternative) - - This Font type differs from font.Font in that it can render glyphs - for Unicode code points in the supplementary planes (> 0xFFFF). - """ - - __encode_file_path = staticmethod(encode_file_path) - __get_default_resolution = staticmethod(get_default_resolution) - __default_font = encode_file_path(get_default_font()) - - __unull = as_unicode(r"\x00") - __bnull = as_bytes("\x00") - - def __init__(self, file, size=-1): - if size <= 1: - size = 1 - if isinstance(file, unicode_): - try: - bfile = self.__encode_file_path(file, ValueError) - except ValueError: - bfile = '' - else: - bfile = file - if isinstance(bfile, bytes_) and bfile == self.__default_font: - file = None - if file is None: - resolution = int(self.__get_default_resolution() * 0.6875) - if resolution == 0: - kwds['resolution'] = 1 - else: - resolution = 0 - super(Font, self).__init__(file, size=size, resolution=resolution) - self.strength = 1.0 / 12.0 - self.kerning = False - self.origin = True - self.pad = True - self.ucs4 = True - self.underline_adjustment = 1.0 - - def render(self, text, antialias, color, background=None): - """render(text, antialias, color, background=None) -> Surface - draw text on a new Surface""" - - if text is None: - text = "" - if (isinstance(text, unicode_) and # conditional and - self.__unull in text): - raise ValueError("A null character was found in the text") - if (isinstance(text, bytes_) and # conditional and - self.__bnull in text): - raise ValueError("A null character was found in the text") - save_antialiased = self.antialiased - self.antialiased = bool(antialias) - try: - s, r = super(Font, self).render(text, color, background) - return s - finally: - self.antialiased = save_antialiased - - def set_bold(self, value): - """set_bold(bool) -> None - enable fake rendering of bold text""" - - self.wide = bool(value) - - def get_bold(self): - """get_bold() -> bool - check if text will be rendered bold""" - - return self.wide - - def set_italic(self, value): - """set_italic(bool) -> None - enable fake rendering of italic text""" - - self.oblique = bool(value) - - def get_italic(self): - """get_italic() -> bool - check if the text will be rendered italic""" - - return self.oblique - - def set_underline(self, value): - """set_underline(bool) -> None - control if text is rendered with an underline""" - - self.underline = bool(value) - - def get_underline(self): - """set_bold(bool) -> None - enable fake rendering of bold text""" - - return self.underline - - def metrics(self, text): - """metrics(text) -> list - Gets the metrics for each character in the pased string.""" - - return self.get_metrics(text) - - def get_ascent(self): - """get_ascent() -> int - get the ascent of the font""" - - return self.get_sized_ascender() - - def get_descent(self): - """get_descent() -> int - get the descent of the font""" - - return self.get_sized_descender() - - def get_height(self): - """get_height() -> int - get the height of the font""" - - return self.get_sized_ascender() - self.get_sized_descender() + 1 - - def get_linesize(self): - """get_linesize() -> int - get the line space of the font text""" - - return self.get_sized_height(); - - def size(self, text): - """size(text) -> (width, height) - determine the amount of space needed to render text""" - - return self.get_rect(text).size - -FontType = Font - -def get_init(): - """get_init() -> bool - true if the font module is initialized""" - - return _get_init() - -def SysFont(name, size, bold=0, italic=0, constructor=None): - """pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font - create a pygame Font from system font resources (freetype alternative) - - This will search the system fonts for the given font - name. You can also enable bold or italic styles, and - the appropriate system font will be selected if available. - - This will always return a valid Font object, and will - fallback on the builtin pygame font if the given font - is not found. - - Name can also be a comma separated list of names, in - which case set of names will be searched in order. Pygame - uses a small set of common font aliases, if the specific - font you ask for is not available, a reasonable alternative - may be used. - - if optional contructor is provided, it must be a function with - signature constructor(fontpath, size, bold, italic) which returns - a Font instance. If None, a pygame.ftfont.Font object is created. - """ - if constructor is None: - def constructor(fontpath, size, bold, italic): - font = Font(fontpath, size) - font.set_bold(bold) - font.set_italic(italic) - return font - - return _SysFont(name, size, bold, italic, constructor) - -del _Font, get_default_resolution, encode_file_path, as_unicode, as_bytes - diff --git a/venv/lib/python3.7/site-packages/pygame/gfxdraw.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/gfxdraw.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index fc6f1cb..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/gfxdraw.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/image.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/image.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 94f95ff..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/image.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/imageext.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/imageext.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index fb40f7e..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/imageext.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/joystick.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/joystick.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index e3d2182..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/joystick.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/key.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/key.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 38cc66b..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/key.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/locals.py b/venv/lib/python3.7/site-packages/pygame/locals.py deleted file mode 100644 index 9b1f2fb..0000000 --- a/venv/lib/python3.7/site-packages/pygame/locals.py +++ /dev/null @@ -1,30 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2000-2003 Pete Shinners -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Pete Shinners -## pete@shinners.org - - - -"""Set of functions from PyGame that are handy to have in -the local namespace for your module""" - -from pygame.constants import * -from pygame.rect import Rect -import pygame.color as color -Color = color.Color - diff --git a/venv/lib/python3.7/site-packages/pygame/macosx.py b/venv/lib/python3.7/site-packages/pygame/macosx.py deleted file mode 100644 index dfa35fb..0000000 --- a/venv/lib/python3.7/site-packages/pygame/macosx.py +++ /dev/null @@ -1,22 +0,0 @@ -import platform -import os -import sys -from pygame.pkgdata import getResource -from pygame import sdlmain_osx - -__all__ = ['Video_AutoInit'] - -def Video_AutoInit(): - """Called from the base.c just before display module is initialized.""" - if 'Darwin' in platform.platform(): - if not sdlmain_osx.RunningFromBundleWithNSApplication(): - try: - default_icon_data = getResource('pygame_icon.tiff').read() - except IOError: - default_icon_data = None - except NotImplementedError: - default_icon_data = None - sdlmain_osx.InstallNSApplication(default_icon_data) - if (os.getcwd() == '/') and len(sys.argv) > 1: - os.chdir(os.path.dirname(sys.argv[0])) - return True diff --git a/venv/lib/python3.7/site-packages/pygame/mask.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/mask.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 8360ebe..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/mask.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/math.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/math.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 7e77a3f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/math.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/midi.py b/venv/lib/python3.7/site-packages/pygame/midi.py deleted file mode 100644 index 1d0ab2e..0000000 --- a/venv/lib/python3.7/site-packages/pygame/midi.py +++ /dev/null @@ -1,728 +0,0 @@ -"""pygame.midi -pygame module for interacting with midi input and output. - -The midi module can send output to midi devices, and get input -from midi devices. It can also list midi devices on the system. - -Including real midi devices, and virtual ones. - -It uses the portmidi library. Is portable to which ever platforms -portmidi supports (currently windows, OSX, and linux). - -This uses pyportmidi for now, but may use its own bindings at some -point in the future. The pyportmidi bindings are included with pygame. - -New in pygame 1.9.0. -""" - - -#TODO: -# - finish writing tests. -# - likely as interactive tests... so you'd need to plug in a midi device. -# - create a background thread version for input threads. -# - that can automatically inject input into the event queue -# once the input object is running. Like joysticks. - - - -import atexit -import math - -import pygame -import pygame.locals - - - -# -MIDIIN = pygame.locals.USEREVENT + 10 -MIDIOUT = pygame.locals.USEREVENT + 11 - -_init = False -_pypm = None - - -__all__ = [ - "Input", - "MIDIIN", - "MIDIOUT", - "MidiException", - "Output", - "get_count", - "get_default_input_id", - "get_default_output_id", - "get_device_info", - "init", - "midis2events", - "quit", - "get_init", - "time", - "frequency_to_midi", - "midi_to_frequency", - "midi_to_ansi_note", -] - -__theclasses__ = ["Input", "Output"] - - -def init(): - """initialize the midi module - pygame.midi.init(): return None - - Call the initialisation function before using the midi module. - - It is safe to call this more than once. - """ - global _init, _pypm - if not _init: - import pygame.pypm - _pypm = pygame.pypm - - _pypm.Initialize() - _init = True - atexit.register(quit) - - -def quit(): - """uninitialize the midi module - pygame.midi.quit(): return None - - - Called automatically atexit if you don't call it. - - It is safe to call this function more than once. - """ - global _init, _pypm - if _init: - # TODO: find all Input and Output classes and close them first? - _pypm.Terminate() - _init = False - del _pypm - #del pygame._pypm - - -def get_init(): - """returns True if the midi module is currently initialized - pygame.midi.get_init(): return bool - - Returns True if the pygame.midi module is currently initialized. - - New in pygame 1.9.5. - """ - return _init - - -def _check_init(): - if not _init: - raise RuntimeError("pygame.midi not initialised.") - -def get_count(): - """gets the number of devices. - pygame.midi.get_count(): return num_devices - - - Device ids range from 0 to get_count() -1 - """ - _check_init() - return _pypm.CountDevices() - - - - -def get_default_input_id(): - """gets default input device number - pygame.midi.get_default_input_id(): return default_id - - - Return the default device ID or -1 if there are no devices. - The result can be passed to the Input()/Ouput() class. - - On the PC, the user can specify a default device by - setting an environment variable. For example, to use device #1. - - set PM_RECOMMENDED_INPUT_DEVICE=1 - - The user should first determine the available device ID by using - the supplied application "testin" or "testout". - - In general, the registry is a better place for this kind of info, - and with USB devices that can come and go, using integers is not - very reliable for device identification. Under Windows, if - PM_RECOMMENDED_OUTPUT_DEVICE (or PM_RECOMMENDED_INPUT_DEVICE) is - *NOT* found in the environment, then the default device is obtained - by looking for a string in the registry under: - HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Input_Device - and HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Output_Device - for a string. The number of the first device with a substring that - matches the string exactly is returned. For example, if the string - in the registry is "USB", and device 1 is named - "In USB MidiSport 1x1", then that will be the default - input because it contains the string "USB". - - In addition to the name, get_device_info() returns "interf", which - is the interface name. (The "interface" is the underlying software - system or API used by PortMidi to access devices. Examples are - MMSystem, DirectX (not implemented), ALSA, OSS (not implemented), etc.) - At present, the only Win32 interface is "MMSystem", the only Linux - interface is "ALSA", and the only Max OS X interface is "CoreMIDI". - To specify both the interface and the device name in the registry, - separate the two with a comma and a space, e.g.: - MMSystem, In USB MidiSport 1x1 - In this case, the string before the comma must be a substring of - the "interf" string, and the string after the space must be a - substring of the "name" name string in order to match the device. - - Note: in the current release, the default is simply the first device - (the input or output device with the lowest PmDeviceID). - """ - return _pypm.GetDefaultInputDeviceID() - - - - -def get_default_output_id(): - """gets default output device number - pygame.midi.get_default_output_id(): return default_id - - - Return the default device ID or -1 if there are no devices. - The result can be passed to the Input()/Ouput() class. - - On the PC, the user can specify a default device by - setting an environment variable. For example, to use device #1. - - set PM_RECOMMENDED_OUTPUT_DEVICE=1 - - The user should first determine the available device ID by using - the supplied application "testin" or "testout". - - In general, the registry is a better place for this kind of info, - and with USB devices that can come and go, using integers is not - very reliable for device identification. Under Windows, if - PM_RECOMMENDED_OUTPUT_DEVICE (or PM_RECOMMENDED_INPUT_DEVICE) is - *NOT* found in the environment, then the default device is obtained - by looking for a string in the registry under: - HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Input_Device - and HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Output_Device - for a string. The number of the first device with a substring that - matches the string exactly is returned. For example, if the string - in the registry is "USB", and device 1 is named - "In USB MidiSport 1x1", then that will be the default - input because it contains the string "USB". - - In addition to the name, get_device_info() returns "interf", which - is the interface name. (The "interface" is the underlying software - system or API used by PortMidi to access devices. Examples are - MMSystem, DirectX (not implemented), ALSA, OSS (not implemented), etc.) - At present, the only Win32 interface is "MMSystem", the only Linux - interface is "ALSA", and the only Max OS X interface is "CoreMIDI". - To specify both the interface and the device name in the registry, - separate the two with a comma and a space, e.g.: - MMSystem, In USB MidiSport 1x1 - In this case, the string before the comma must be a substring of - the "interf" string, and the string after the space must be a - substring of the "name" name string in order to match the device. - - Note: in the current release, the default is simply the first device - (the input or output device with the lowest PmDeviceID). - """ - _check_init() - return _pypm.GetDefaultOutputDeviceID() - - -def get_device_info(an_id): - """ returns information about a midi device - pygame.midi.get_device_info(an_id): return (interf, name, input, output, opened) - - interf - a text string describing the device interface, eg 'ALSA'. - name - a text string for the name of the device, eg 'Midi Through Port-0' - input - 0, or 1 if the device is an input device. - output - 0, or 1 if the device is an output device. - opened - 0, or 1 if the device is opened. - - If the id is out of range, the function returns None. - """ - _check_init() - return _pypm.GetDeviceInfo(an_id) - - -class Input(object): - """Input is used to get midi input from midi devices. - Input(device_id) - Input(device_id, buffer_size) - - buffer_size - the number of input events to be buffered waiting to - be read using Input.read() - """ - - def __init__(self, device_id, buffer_size=4096): - """ - The buffer_size specifies the number of input events to be buffered - waiting to be read using Input.read(). - """ - _check_init() - - if device_id == -1: - raise MidiException("Device id is -1, not a valid output id. -1 usually means there were no default Output devices.") - - try: - r = get_device_info(device_id) - except TypeError: - raise TypeError("an integer is required") - except OverflowError: - raise OverflowError("long int too large to convert to int") - - # and now some nasty looking error checking, to provide nice error - # messages to the kind, lovely, midi using people of whereever. - if r: - interf, name, input, output, opened = r - if input: - try: - self._input = _pypm.Input(device_id, buffer_size) - except TypeError: - raise TypeError("an integer is required") - self.device_id = device_id - - elif output: - raise MidiException("Device id given is not a valid input id, it is an output id.") - else: - raise MidiException("Device id given is not a valid input id.") - else: - raise MidiException("Device id invalid, out of range.") - - - - - def _check_open(self): - if self._input is None: - raise MidiException("midi not open.") - - - - def close(self): - """ closes a midi stream, flushing any pending buffers. - Input.close(): return None - - PortMidi attempts to close open streams when the application - exits -- this is particularly difficult under Windows. - """ - _check_init() - if not (self._input is None): - self._input.Close() - self._input = None - - - - def read(self, num_events): - """reads num_events midi events from the buffer. - Input.read(num_events): return midi_event_list - - Reads from the Input buffer and gives back midi events. - [[[status,data1,data2,data3],timestamp], - [[status,data1,data2,data3],timestamp],...] - """ - _check_init() - self._check_open() - return self._input.Read(num_events) - - - def poll(self): - """returns true if there's data, or false if not. - Input.poll(): return Bool - - raises a MidiException on error. - """ - _check_init() - self._check_open() - - r = self._input.Poll() - if r == _pypm.TRUE: - return True - elif r == _pypm.FALSE: - return False - else: - err_text = GetErrorText(r) - raise MidiException( (r, err_text) ) - - - - -class Output(object): - """Output is used to send midi to an output device - Output(device_id) - Output(device_id, latency = 0) - Output(device_id, buffer_size = 4096) - Output(device_id, latency, buffer_size) - - The buffer_size specifies the number of output events to be - buffered waiting for output. (In some cases -- see below -- - PortMidi does not buffer output at all and merely passes data - to a lower-level API, in which case buffersize is ignored.) - - latency is the delay in milliseconds applied to timestamps to determine - when the output should actually occur. (If latency is < 0, 0 is - assumed.) - - If latency is zero, timestamps are ignored and all output is delivered - immediately. If latency is greater than zero, output is delayed until - the message timestamp plus the latency. (NOTE: time is measured - relative to the time source indicated by time_proc. Timestamps are - absolute, not relative delays or offsets.) In some cases, PortMidi - can obtain better timing than your application by passing timestamps - along to the device driver or hardware. Latency may also help you - to synchronize midi data to audio data by matching midi latency to - the audio buffer latency. - - """ - - def __init__(self, device_id, latency = 0, buffer_size = 4096): - """Output(device_id) - Output(device_id, latency = 0) - Output(device_id, buffer_size = 4096) - Output(device_id, latency, buffer_size) - - The buffer_size specifies the number of output events to be - buffered waiting for output. (In some cases -- see below -- - PortMidi does not buffer output at all and merely passes data - to a lower-level API, in which case buffersize is ignored.) - - latency is the delay in milliseconds applied to timestamps to determine - when the output should actually occur. (If latency is < 0, 0 is - assumed.) - - If latency is zero, timestamps are ignored and all output is delivered - immediately. If latency is greater than zero, output is delayed until - the message timestamp plus the latency. (NOTE: time is measured - relative to the time source indicated by time_proc. Timestamps are - absolute, not relative delays or offsets.) In some cases, PortMidi - can obtain better timing than your application by passing timestamps - along to the device driver or hardware. Latency may also help you - to synchronize midi data to audio data by matching midi latency to - the audio buffer latency. - """ - - _check_init() - self._aborted = 0 - - if device_id == -1: - raise MidiException("Device id is -1, not a valid output id. -1 usually means there were no default Output devices.") - - try: - r = get_device_info(device_id) - except TypeError: - raise TypeError("an integer is required") - except OverflowError: - raise OverflowError("long int too large to convert to int") - - # and now some nasty looking error checking, to provide nice error - # messages to the kind, lovely, midi using people of whereever. - if r: - interf, name, input, output, opened = r - if output: - try: - self._output = _pypm.Output(device_id, latency) - except TypeError: - raise TypeError("an integer is required") - self.device_id = device_id - - elif input: - raise MidiException("Device id given is not a valid output id, it is an input id.") - else: - raise MidiException("Device id given is not a valid output id.") - else: - raise MidiException("Device id invalid, out of range.") - - def _check_open(self): - if self._output is None: - raise MidiException("midi not open.") - - if self._aborted: - raise MidiException("midi aborted.") - - - def close(self): - """ closes a midi stream, flushing any pending buffers. - Output.close(): return None - - PortMidi attempts to close open streams when the application - exits -- this is particularly difficult under Windows. - """ - _check_init() - if not (self._output is None): - self._output.Close() - self._output = None - - def abort(self): - """terminates outgoing messages immediately - Output.abort(): return None - - The caller should immediately close the output port; - this call may result in transmission of a partial midi message. - There is no abort for Midi input because the user can simply - ignore messages in the buffer and close an input device at - any time. - """ - - _check_init() - if self._output: - self._output.Abort() - self._aborted = 1 - - - - - - def write(self, data): - """writes a list of midi data to the Output - Output.write(data) - - writes series of MIDI information in the form of a list: - write([[[status <,data1><,data2><,data3>],timestamp], - [[status <,data1><,data2><,data3>],timestamp],...]) - fields are optional - example: choose program change 1 at time 20000 and - send note 65 with velocity 100 500 ms later. - write([[[0xc0,0,0],20000],[[0x90,60,100],20500]]) - notes: - 1. timestamps will be ignored if latency = 0. - 2. To get a note to play immediately, send MIDI info with - timestamp read from function Time. - 3. understanding optional data fields: - write([[[0xc0,0,0],20000]]) is equivalent to - write([[[0xc0],20000]]) - - Can send up to 1024 elements in your data list, otherwise an - IndexError exception is raised. - """ - _check_init() - self._check_open() - - self._output.Write(data) - - def write_short(self, status, data1=0, data2=0): - """write_short(status <, data1><, data2>) - Output.write_short(status) - Output.write_short(status, data1 = 0, data2 = 0) - - output MIDI information of 3 bytes or less. - data fields are optional - status byte could be: - 0xc0 = program change - 0x90 = note on - etc. - data bytes are optional and assumed 0 if omitted - example: note 65 on with velocity 100 - write_short(0x90,65,100) - """ - _check_init() - self._check_open() - self._output.WriteShort(status, data1, data2) - - def write_sys_ex(self, when, msg): - """writes a timestamped system-exclusive midi message. - Output.write_sys_ex(when, msg) - - msg - can be a *list* or a *string* - when - a timestamp in miliseconds - example: - (assuming o is an onput MIDI stream) - o.write_sys_ex(0,'\\xF0\\x7D\\x10\\x11\\x12\\x13\\xF7') - is equivalent to - o.write_sys_ex(pygame.midi.time(), - [0xF0,0x7D,0x10,0x11,0x12,0x13,0xF7]) - """ - _check_init() - self._check_open() - self._output.WriteSysEx(when, msg) - - def note_on(self, note, velocity, channel=0): - """turns a midi note on. Note must be off. - Output.note_on(note, velocity, channel=0) - - note is an integer from 0 to 127 - velocity is an integer from 0 to 127 - channel is an integer from 0 to 15 - - Turn a note on in the output stream. The note must already - be off for this to work correctly. - """ - if not (0 <= channel <= 15): - raise ValueError("Channel not between 0 and 15.") - - self.write_short(0x90 + channel, note, velocity) - - def note_off(self, note, velocity=0, channel=0): - """turns a midi note off. Note must be on. - Output.note_off(note, velocity=0, channel=0) - - note is an integer from 0 to 127 - velocity is an integer from 0 to 127 (release velocity) - channel is an integer from 0 to 15 - - Turn a note off in the output stream. The note must already - be on for this to work correctly. - """ - if not (0 <= channel <= 15): - raise ValueError("Channel not between 0 and 15.") - - self.write_short(0x80 + channel, note, velocity) - - - def set_instrument(self, instrument_id, channel=0): - """select an instrument for a channel, with a value between 0 and 127 - Output.set_instrument(instrument_id, channel=0) - - Also called "patch change" or "program change". - """ - if not (0 <= instrument_id <= 127): - raise ValueError("Undefined instrument id: %d" % instrument_id) - - if not (0 <= channel <= 15): - raise ValueError("Channel not between 0 and 15.") - - self.write_short(0xc0 + channel, instrument_id) - - def pitch_bend(self, value=0, channel=0): - """modify the pitch of a channel. - Output.pitch_bend(value=0, channel=0) - - Adjust the pitch of a channel. The value is a signed integer - from -8192 to +8191. For example, 0 means "no change", +4096 is - typically a semitone higher, and -8192 is 1 whole tone lower (though - the musical range corresponding to the pitch bend range can also be - changed in some synthesizers). - - If no value is given, the pitch bend is returned to "no change". - """ - if not (0 <= channel <= 15): - raise ValueError("Channel not between 0 and 15.") - - if not (-8192 <= value <= 8191): - raise ValueError("Pitch bend value must be between " - "-8192 and +8191, not %d." % value) - - # "The 14 bit value of the pitch bend is defined so that a value of - # 0x2000 is the center corresponding to the normal pitch of the note - # (no pitch change)." so value=0 should send 0x2000 - value = value + 0x2000 - LSB = value & 0x7f # keep least 7 bits - MSB = value >> 7 - self.write_short(0xe0 + channel, LSB, MSB) - - - -""" -MIDI commands - - 0x80 Note Off (note_off) - 0x90 Note On (note_on) - 0xA0 Aftertouch - 0xB0 Continuous controller - 0xC0 Patch change (set_instrument?) - 0xD0 Channel Pressure - 0xE0 Pitch bend - 0xF0 (non-musical commands) -""" - - - -def time(): - """returns the current time in ms of the PortMidi timer - pygame.midi.time(): return time - - The time is reset to 0, when the module is inited. - """ - return _pypm.Time() - - - -def midis2events(midis, device_id): - """converts midi events to pygame events - pygame.midi.midis2events(midis, device_id): return [Event, ...] - - Takes a sequence of midi events and returns list of pygame events. - """ - evs = [] - for midi in midis: - - ((status,data1,data2,data3),timestamp) = midi - - e = pygame.event.Event(MIDIIN, - status=status, - data1=data1, - data2=data2, - data3=data3, - timestamp=timestamp, - vice_id = device_id) - evs.append( e ) - - - return evs - - - - - -class MidiException(Exception): - """exception that pygame.midi functions and classes can raise - MidiException(errno) - """ - def __init__(self, value): - self.parameter = value - def __str__(self): - return repr(self.parameter) - - - -def frequency_to_midi(freqency): - """ converts a frequency into a MIDI note. - - Rounds to the closest midi note. - - ::Examples:: - - >>> frequency_to_midi(27.5) - 21 - >>> frequency_to_midi(36.7) - 26 - >>> frequency_to_midi(4186.0) - 108 - """ - return int( - round( - 69 + ( - 12 * math.log(freqency / 440.0) - ) / math.log(2) - ) - ) - -def midi_to_frequency(midi_note): - """ Converts a midi note to a frequency. - - ::Examples:: - - >>> midi_to_frequency(21) - 27.5 - >>> midi_to_frequency(26) - 36.7 - >>> midi_to_frequency(108) - 4186.0 - """ - return round(440.0 * 2 ** ((midi_note - 69) * (1./12.)), 1) - -def midi_to_ansi_note(midi_note): - """ returns the Ansi Note name for a midi number. - - ::Examples:: - - >>> midi_to_ansi_note(21) - 'A0' - >>> midi_to_ansi_note(102) - 'F#7' - >>> midi_to_ansi_note(108) - 'C8' - """ - notes = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#'] - num_notes = 12 - note_name = notes[int(((midi_note - 21) % num_notes))] - note_number = int(round(((midi_note - 21) / 11.0))) - return '%s%s' % (note_name, note_number) diff --git a/venv/lib/python3.7/site-packages/pygame/mixer.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/mixer.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index e9b4b07..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/mixer.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/mixer_music.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/mixer_music.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b042665..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/mixer_music.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/mouse.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/mouse.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 4b00016..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/mouse.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/newbuffer.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/newbuffer.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index f033d72..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/newbuffer.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/overlay.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/overlay.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index f4073cf..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/overlay.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/pixelarray.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/pixelarray.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 2e082f2..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/pixelarray.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/pixelcopy.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/pixelcopy.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 6b007d0..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/pixelcopy.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/pkgdata.py b/venv/lib/python3.7/site-packages/pygame/pkgdata.py deleted file mode 100644 index 25ad64a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/pkgdata.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -pkgdata is a simple, extensible way for a package to acquire data file -resources. - -The getResource function is equivalent to the standard idioms, such as -the following minimal implementation: - - import sys, os - - def getResource(identifier, pkgname=__name__): - pkgpath = os.path.dirname(sys.modules[pkgname].__file__) - path = os.path.join(pkgpath, identifier) - return file(os.path.normpath(path), mode='rb') - -When a __loader__ is present on the module given by __name__, it will defer -getResource to its get_data implementation and return it as a file-like -object (such as StringIO). -""" - -__all__ = ['getResource'] -import sys -import os -from pygame.compat import get_BytesIO -BytesIO = get_BytesIO() - -try: - from pkg_resources import resource_stream, resource_exists -except ImportError: - def resource_exists(package_or_requirement, resource_name): - return False - def resource_stream(package_of_requirement, resource_name): - raise NotImplementedError - -def getResource(identifier, pkgname=__name__): - """ - Acquire a readable object for a given package name and identifier. - An IOError will be raised if the resource can not be found. - - For example: - mydata = getResource('mypkgdata.jpg').read() - - Note that the package name must be fully qualified, if given, such - that it would be found in sys.modules. - - In some cases, getResource will return a real file object. In that - case, it may be useful to use its name attribute to get the path - rather than use it as a file-like object. For example, you may - be handing data off to a C API. - """ - if resource_exists(pkgname, identifier): - return resource_stream(pkgname, identifier) - - mod = sys.modules[pkgname] - fn = getattr(mod, '__file__', None) - if fn is None: - raise IOError("%s has no __file__!" % repr(mod)) - path = os.path.join(os.path.dirname(fn), identifier) - if sys.version_info < (3, 3): - loader = getattr(mod, '__loader__', None) - if loader is not None: - try: - data = loader.get_data(path) - except IOError: - pass - else: - return BytesIO(data) - return open(os.path.normpath(path), 'rb') diff --git a/venv/lib/python3.7/site-packages/pygame/pygame.ico b/venv/lib/python3.7/site-packages/pygame/pygame.ico deleted file mode 100644 index 06f699e..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/pygame.ico and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/pygame_icon.bmp b/venv/lib/python3.7/site-packages/pygame/pygame_icon.bmp deleted file mode 100644 index 74aea77..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/pygame_icon.bmp and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/pygame_icon.icns b/venv/lib/python3.7/site-packages/pygame/pygame_icon.icns deleted file mode 100644 index 2610a8d..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/pygame_icon.icns and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/pygame_icon.svg b/venv/lib/python3.7/site-packages/pygame/pygame_icon.svg deleted file mode 100644 index bbee79d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/pygame_icon.svg +++ /dev/null @@ -1,259 +0,0 @@ - - -image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pygame/pygame_icon.tiff b/venv/lib/python3.7/site-packages/pygame/pygame_icon.tiff deleted file mode 100644 index e779143..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/pygame_icon.tiff and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/pypm.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/pypm.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 4a8c89f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/pypm.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/rect.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/rect.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 961a878..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/rect.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/rwobject.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/rwobject.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 71e2f7d..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/rwobject.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/scrap.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/scrap.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index c2e59c8..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/scrap.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/sndarray.py b/venv/lib/python3.7/site-packages/pygame/sndarray.py deleted file mode 100644 index d45517a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/sndarray.py +++ /dev/null @@ -1,103 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2008 Marcus von Appen -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Marcus von Appen -## mva@sysfault.org - -"""pygame module for accessing sound sample data - -Functions to convert between numpy arrays and Sound -objects. This module will only be available when pygame can use the -external numpy package. - -Sound data is made of thousands of samples per second, and each sample -is the amplitude of the wave at a particular moment in time. For -example, in 22-kHz format, element number 5 of the array is the -amplitude of the wave after 5/22000 seconds. - -Each sample is an 8-bit or 16-bit integer, depending on the data format. -A stereo sound file has two values per sample, while a mono sound file -only has one. - -Supported array systems are - - numpy - -The array type to use can be changed at runtime using the use_arraytype() -method, which requires one of the above types as string. - -Sounds with 16-bit data will be treated as unsigned integers, -if the sound sample type requests this. -""" - -import pygame._numpysndarray as numpysnd - -def array (sound): - """pygame.sndarray.array(Sound): return array - - Copy Sound samples into an array. - - Creates a new array for the sound data and copies the samples. The - array will always be in the format returned from - pygame.mixer.get_init(). - """ - return numpysnd.array (sound) - -def samples (sound): - """pygame.sndarray.samples(Sound): return array - - Reference Sound samples into an array. - - Creates a new array that directly references the samples in a Sound - object. Modifying the array will change the Sound. The array will - always be in the format returned from pygame.mixer.get_init(). - """ - return numpysnd.samples (sound) - -def make_sound (array): - """pygame.sndarray.make_sound(array): return Sound - - Convert an array into a Sound object. - - Create a new playable Sound object from an array. The mixer module - must be initialized and the array format must be similar to the mixer - audio format. - """ - return numpysnd.make_sound (array) - -def use_arraytype (arraytype): - """pygame.sndarray.use_arraytype (arraytype): return None - - DEPRECATED - only numpy arrays are now supported. - """ - arraytype = arraytype.lower () - if arraytype != 'numpy': - raise ValueError("invalid array type") - -def get_arraytype (): - """pygame.sndarray.get_arraytype (): return str - - DEPRECATED - only numpy arrays are now supported. - """ - return 'numpy' - -def get_arraytypes (): - """pygame.sndarray.get_arraytypes (): return tuple - - DEPRECATED - only numpy arrays are now supported. - """ - return ('numpy',) diff --git a/venv/lib/python3.7/site-packages/pygame/sprite.py b/venv/lib/python3.7/site-packages/pygame/sprite.py deleted file mode 100644 index 6eea6ba..0000000 --- a/venv/lib/python3.7/site-packages/pygame/sprite.py +++ /dev/null @@ -1,1601 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2000-2003, 2007 Pete Shinners -## (C) 2004 Joe Wreschnig -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Pete Shinners -## pete@shinners.org - -"""pygame module with basic game object classes - -This module contains several simple classes to be used within games. There -are the main Sprite class and several Group classes that contain Sprites. -The use of these classes is entirely optional when using Pygame. The classes -are fairly lightweight and only provide a starting place for the code -that is common to most games. - -The Sprite class is intended to be used as a base class for the different -types of objects in the game. There is also a base Group class that simply -stores sprites. A game could create new types of Group classes that operate -on specially customized Sprite instances they contain. - -The basic Sprite class can draw the Sprites it contains to a Surface. The -Group.draw() method requires that each Sprite have a Surface.image attribute -and a Surface.rect. The Group.clear() method requires these same attributes -and can be used to erase all the Sprites with background. There are also -more advanced Groups: pygame.sprite.RenderUpdates() and -pygame.sprite.OrderedUpdates(). - -Lastly, this module contains several collision functions. These help find -sprites inside multiple groups that have intersecting bounding rectangles. -To find the collisions, the Sprites are required to have a Surface.rect -attribute assigned. - -The groups are designed for high efficiency in removing and adding Sprites -to them. They also allow cheap testing to see if a Sprite already exists in -a Group. A given Sprite can exist in any number of groups. A game could use -some groups to control object rendering, and a completely separate set of -groups to control interaction or player movement. Instead of adding type -attributes or bools to a derived Sprite class, consider keeping the -Sprites inside organized Groups. This will allow for easier lookup later -in the game. - -Sprites and Groups manage their relationships with the add() and remove() -methods. These methods can accept a single or multiple group arguments for -membership. The default initializers for these classes also take a -single group or list of groups as argments for initial membership. It is safe -to repeatedly add and remove the same Sprite from a Group. - -While it is possible to design sprite and group classes that don't derive -from the Sprite and AbstractGroup classes below, it is strongly recommended -that you extend those when you create a new Sprite or Group class. - -Sprites are not thread safe, so lock them yourself if using threads. - -""" - -##todo -## a group that holds only the 'n' most recent elements. -## sort of like the GroupSingle class, but holding more -## than one sprite -## -## drawing groups that can 'automatically' store the area -## underneath so they can "clear" without needing a background -## function. obviously a little slower than normal, but nice -## to use in many situations. (also remember it must "clear" -## in the reverse order that it draws :]) -## -## the drawing groups should also be able to take a background -## function, instead of just a background surface. the function -## would take a surface and a rectangle on that surface to erase. -## -## perhaps more types of collision functions? the current two -## should handle just about every need, but perhaps more optimized -## specific ones that aren't quite so general but fit into common -## specialized cases. - -import pygame -from pygame import Rect -from pygame.time import get_ticks -from operator import truth - -# Python 3 does not have the callable function, but an equivalent can be made -# with the hasattr function. -if 'callable' not in dir(__builtins__): - callable = lambda obj: hasattr(obj, '__call__') - -# Don't depend on pygame.mask if it's not there... -try: - from pygame.mask import from_surface -except: - pass - - -class Sprite(object): - """simple base class for visible game objects - - pygame.sprite.Sprite(*groups): return Sprite - - The base class for visible game objects. Derived classes will want to - override the Sprite.update() method and assign Sprite.image and Sprite.rect - attributes. The initializer can accept any number of Group instances that - the Sprite will become a member of. - - When subclassing the Sprite class, be sure to call the base initializer - before adding the Sprite to Groups. - - """ - - def __init__(self, *groups): - self.__g = {} # The groups the sprite is in - if groups: - self.add(*groups) - - def add(self, *groups): - """add the sprite to groups - - Sprite.add(*groups): return None - - Any number of Group instances can be passed as arguments. The - Sprite will be added to the Groups it is not already a member of. - - """ - has = self.__g.__contains__ - for group in groups: - if hasattr(group, '_spritegroup'): - if not has(group): - group.add_internal(self) - self.add_internal(group) - else: - self.add(*group) - - def remove(self, *groups): - """remove the sprite from groups - - Sprite.remove(*groups): return None - - Any number of Group instances can be passed as arguments. The Sprite - will be removed from the Groups it is currently a member of. - - """ - has = self.__g.__contains__ - for group in groups: - if hasattr(group, '_spritegroup'): - if has(group): - group.remove_internal(self) - self.remove_internal(group) - else: - self.remove(*group) - - def add_internal(self, group): - self.__g[group] = 0 - - def remove_internal(self, group): - del self.__g[group] - - def update(self, *args): - """method to control sprite behavior - - Sprite.update(*args): - - The default implementation of this method does nothing; it's just a - convenient "hook" that you can override. This method is called by - Group.update() with whatever arguments you give it. - - There is no need to use this method if not using the convenience - method by the same name in the Group class. - - """ - pass - - def kill(self): - """remove the Sprite from all Groups - - Sprite.kill(): return None - - The Sprite is removed from all the Groups that contain it. This won't - change anything about the state of the Sprite. It is possible to - continue to use the Sprite after this method has been called, including - adding it to Groups. - - """ - for c in self.__g: - c.remove_internal(self) - self.__g.clear() - - def groups(self): - """list of Groups that contain this Sprite - - Sprite.groups(): return group_list - - Returns a list of all the Groups that contain this Sprite. - - """ - return list(self.__g) - - def alive(self): - """does the sprite belong to any groups - - Sprite.alive(): return bool - - Returns True when the Sprite belongs to one or more Groups. - """ - return truth(self.__g) - - def __repr__(self): - return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g)) - - -class DirtySprite(Sprite): - """a more featureful subclass of Sprite with more attributes - - pygame.sprite.DirtySprite(*groups): return DirtySprite - - Extra DirtySprite attributes with their default values: - - dirty = 1 - If set to 1, it is repainted and then set to 0 again. - If set to 2, it is always dirty (repainted each frame; - flag is not reset). - If set to 0, it is not dirty and therefore not repainted again. - - blendmode = 0 - It's the special_flags argument of Surface.blit; see the blendmodes in - the Surface.blit documentation - - source_rect = None - This is the source rect to use. Remember that it is relative to the top - left corner (0, 0) of self.image. - - visible = 1 - Normally this is 1. If set to 0, it will not be repainted. (If you - change visible to 1, you must set dirty to 1 for it to be erased from - the screen.) - - _layer = 0 - 0 is the default value but this is able to be set differently - when subclassing. - - """ - - def __init__(self, *groups): - - self.dirty = 1 - self.blendmode = 0 # pygame 1.8, referred to as special_flags in - # the documentation of Surface.blit - self._visible = 1 - self._layer = getattr(self, '_layer', 0) # Default 0 unless - # initialized differently. - self.source_rect = None - Sprite.__init__(self, *groups) - - def _set_visible(self, val): - """set the visible value (0 or 1) and makes the sprite dirty""" - self._visible = val - if self.dirty < 2: - self.dirty = 1 - - def _get_visible(self): - """return the visible value of that sprite""" - return self._visible - - visible = property(lambda self: self._get_visible(), - lambda self, value: self._set_visible(value), - doc="you can make this sprite disappear without " - "removing it from the group,\n" - "assign 0 for invisible and 1 for visible") - - def __repr__(self): - return "<%s DirtySprite(in %d groups)>" % \ - (self.__class__.__name__, len(self.groups())) - - -class AbstractGroup(object): - """base class for containers of sprites - - AbstractGroup does everything needed to behave as a normal group. You can - easily subclass a new group class from this or the other groups below if - you want to add more features. - - Any AbstractGroup-derived sprite groups act like sequences and support - iteration, len, and so on. - - """ - - # dummy val to identify sprite groups, and avoid infinite recursion - _spritegroup = True - - def __init__(self): - self.spritedict = {} - self.lostsprites = [] - - def sprites(self): - """get a list of sprites in the group - - Group.sprite(): return list - - Returns an object that can be looped over with a 'for' loop. (For now, - it is always a list, but this could change in a future version of - pygame.) Alternatively, you can get the same information by iterating - directly over the sprite group, e.g. 'for sprite in group'. - - """ - return list(self.spritedict) - - def add_internal(self, sprite): - self.spritedict[sprite] = 0 - - def remove_internal(self, sprite): - r = self.spritedict[sprite] - if r: - self.lostsprites.append(r) - del self.spritedict[sprite] - - def has_internal(self, sprite): - return sprite in self.spritedict - - def copy(self): - """copy a group with all the same sprites - - Group.copy(): return Group - - Returns a copy of the group that is an instance of the same class - and has the same sprites in it. - - """ - return self.__class__(self.sprites()) - - def __iter__(self): - return iter(self.sprites()) - - def __contains__(self, sprite): - return self.has(sprite) - - def add(self, *sprites): - """add sprite(s) to group - - Group.add(sprite, list, group, ...): return None - - Adds a sprite or sequence of sprites to a group. - - """ - for sprite in sprites: - # It's possible that some sprite is also an iterator. - # If this is the case, we should add the sprite itself, - # and not the iterator object. - if isinstance(sprite, Sprite): - if not self.has_internal(sprite): - self.add_internal(sprite) - sprite.add_internal(self) - else: - try: - # See if sprite is an iterator, like a list or sprite - # group. - self.add(*sprite) - except (TypeError, AttributeError): - # Not iterable. This is probably a sprite that is not an - # instance of the Sprite class or is not an instance of a - # subclass of the Sprite class. Alternately, it could be an - # old-style sprite group. - if hasattr(sprite, '_spritegroup'): - for spr in sprite.sprites(): - if not self.has_internal(spr): - self.add_internal(spr) - spr.add_internal(self) - elif not self.has_internal(sprite): - self.add_internal(sprite) - sprite.add_internal(self) - - def remove(self, *sprites): - """remove sprite(s) from group - - Group.remove(sprite, list, or group, ...): return None - - Removes a sprite or sequence of sprites from a group. - - """ - # This function behaves essentially the same as Group.add. It first - # tries to handle each argument as an instance of the Sprite class. If - # that failes, then it tries to handle the argument as an iterable - # object. If that failes, then it tries to handle the argument as an - # old-style sprite group. Lastly, if that fails, it assumes that the - # normal Sprite methods should be used. - for sprite in sprites: - if isinstance(sprite, Sprite): - if self.has_internal(sprite): - self.remove_internal(sprite) - sprite.remove_internal(self) - else: - try: - self.remove(*sprite) - except (TypeError, AttributeError): - if hasattr(sprite, '_spritegroup'): - for spr in sprite.sprites(): - if self.has_internal(spr): - self.remove_internal(spr) - spr.remove_internal(self) - elif self.has_internal(sprite): - self.remove_internal(sprite) - sprite.remove_internal(self) - - def has(self, *sprites): - """ask if group has a sprite or sprites - - Group.has(sprite or group, ...): return bool - - Returns True if the given sprite or sprites are contained in the - group. Alternatively, you can get the same information using the - 'in' operator, e.g. 'sprite in group', 'subgroup in group'. - - """ - return_value = False - - for sprite in sprites: - if isinstance(sprite, Sprite): - # Check for Sprite instance's membership in this group - if self.has_internal(sprite): - return_value = True - else: - return False - else: - try: - if self.has(*sprite): - return_value = True - else: - return False - except (TypeError, AttributeError): - if hasattr(sprite, '_spritegroup'): - for spr in sprite.sprites(): - if self.has_internal(spr): - return_value = True - else: - return False - else: - if self.has_internal(sprite): - return_value = True - else: - return False - - return return_value - - def update(self, *args): - """call the update method of every member sprite - - Group.update(*args): return None - - Calls the update method of every member sprite. All arguments that - were passed to this method are passed to the Sprite update function. - - """ - for s in self.sprites(): - s.update(*args) - - def draw(self, surface): - """draw all sprites onto the surface - - Group.draw(surface): return None - - Draws all of the member sprites onto the given surface. - - """ - sprites = self.sprites() - surface_blit = surface.blit - for spr in sprites: - self.spritedict[spr] = surface_blit(spr.image, spr.rect) - self.lostsprites = [] - - def clear(self, surface, bgd): - """erase the previous position of all sprites - - Group.clear(surface, bgd): return None - - Clears the area under every drawn sprite in the group. The bgd - argument should be Surface which is the same dimensions as the - screen surface. The bgd could also be a function which accepts - the given surface and the area to be cleared as arguments. - - """ - if callable(bgd): - for r in self.lostsprites: - bgd(surface, r) - for r in self.spritedict.values(): - if r: - bgd(surface, r) - else: - surface_blit = surface.blit - for r in self.lostsprites: - surface_blit(bgd, r, r) - for r in self.spritedict.values(): - if r: - surface_blit(bgd, r, r) - - def empty(self): - """remove all sprites - - Group.empty(): return None - - Removes all the sprites from the group. - - """ - for s in self.sprites(): - self.remove_internal(s) - s.remove_internal(self) - - def __nonzero__(self): - return truth(self.sprites()) - - def __len__(self): - """return number of sprites in group - - Group.len(group): return int - - Returns the number of sprites contained in the group. - - """ - return len(self.sprites()) - - def __repr__(self): - return "<%s(%d sprites)>" % (self.__class__.__name__, len(self)) - -class Group(AbstractGroup): - """container class for many Sprites - - pygame.sprite.Group(*sprites): return Group - - A simple container for Sprite objects. This class can be subclassed to - create containers with more specific behaviors. The constructor takes any - number of Sprite arguments to add to the Group. The group supports the - following standard Python operations: - - in test if a Sprite is contained - len the number of Sprites contained - bool test if any Sprites are contained - iter iterate through all the Sprites - - The Sprites in the Group are not ordered, so the Sprites are drawn and - iterated over in no particular order. - - """ - def __init__(self, *sprites): - AbstractGroup.__init__(self) - self.add(*sprites) - -RenderPlain = Group -RenderClear = Group - -class RenderUpdates(Group): - """Group class that tracks dirty updates - - pygame.sprite.RenderUpdates(*sprites): return RenderUpdates - - This class is derived from pygame.sprite.Group(). It has an enhanced draw - method that tracks the changed areas of the screen. - - """ - def draw(self, surface): - spritedict = self.spritedict - surface_blit = surface.blit - dirty = self.lostsprites - self.lostsprites = [] - dirty_append = dirty.append - for s in self.sprites(): - r = spritedict[s] - newrect = surface_blit(s.image, s.rect) - if r: - if newrect.colliderect(r): - dirty_append(newrect.union(r)) - else: - dirty_append(newrect) - dirty_append(r) - else: - dirty_append(newrect) - spritedict[s] = newrect - return dirty - -class OrderedUpdates(RenderUpdates): - """RenderUpdates class that draws Sprites in order of addition - - pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates - - This class derives from pygame.sprite.RenderUpdates(). It maintains - the order in which the Sprites were added to the Group for rendering. - This makes adding and removing Sprites from the Group a little - slower than regular Groups. - - """ - def __init__(self, *sprites): - self._spritelist = [] - RenderUpdates.__init__(self, *sprites) - - def sprites(self): - return list(self._spritelist) - - def add_internal(self, sprite): - RenderUpdates.add_internal(self, sprite) - self._spritelist.append(sprite) - - def remove_internal(self, sprite): - RenderUpdates.remove_internal(self, sprite) - self._spritelist.remove(sprite) - - -class LayeredUpdates(AbstractGroup): - """LayeredUpdates Group handles layers, which are drawn like OrderedUpdates - - pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates - - This group is fully compatible with pygame.sprite.Sprite. - New in pygame 1.8.0 - - """ - - _init_rect = Rect(0, 0, 0, 0) - - def __init__(self, *sprites, **kwargs): - """initialize an instance of LayeredUpdates with the given attributes - - You can set the default layer through kwargs using 'default_layer' - and an integer for the layer. The default layer is 0. - - If the sprite you add has an attribute _layer, then that layer will be - used. If **kwarg contains 'layer', then the passed sprites will be - added to that layer (overriding the sprite._layer attribute). If - neither the sprite nor **kwarg has a 'layer', then the default layer is - used to add the sprites. - - """ - self._spritelayers = {} - self._spritelist = [] - AbstractGroup.__init__(self) - self._default_layer = kwargs.get('default_layer', 0) - - self.add(*sprites, **kwargs) - - def add_internal(self, sprite, layer=None): - """Do not use this method directly. - - It is used by the group to add a sprite internally. - - """ - self.spritedict[sprite] = self._init_rect - - if layer is None: - try: - layer = sprite._layer - except AttributeError: - layer = sprite._layer = self._default_layer - elif hasattr(sprite, '_layer'): - sprite._layer = layer - - sprites = self._spritelist # speedup - sprites_layers = self._spritelayers - sprites_layers[sprite] = layer - - # add the sprite at the right position - # bisect algorithmus - leng = len(sprites) - low = mid = 0 - high = leng - 1 - while low <= high: - mid = low + (high - low) // 2 - if sprites_layers[sprites[mid]] <= layer: - low = mid + 1 - else: - high = mid - 1 - # linear search to find final position - while mid < leng and sprites_layers[sprites[mid]] <= layer: - mid += 1 - sprites.insert(mid, sprite) - - def add(self, *sprites, **kwargs): - """add a sprite or sequence of sprites to a group - - LayeredUpdates.add(*sprites, **kwargs): return None - - If the sprite you add has an attribute _layer, then that layer will be - used. If **kwarg contains 'layer', then the passed sprites will be - added to that layer (overriding the sprite._layer attribute). If - neither the sprite nor **kwarg has a 'layer', then the default layer is - used to add the sprites. - - """ - - if not sprites: - return - if 'layer' in kwargs: - layer = kwargs['layer'] - else: - layer = None - for sprite in sprites: - # It's possible that some sprite is also an iterator. - # If this is the case, we should add the sprite itself, - # and not the iterator object. - if isinstance(sprite, Sprite): - if not self.has_internal(sprite): - self.add_internal(sprite, layer) - sprite.add_internal(self) - else: - try: - # See if sprite is an iterator, like a list or sprite - # group. - self.add(*sprite, **kwargs) - except (TypeError, AttributeError): - # Not iterable. This is probably a sprite that is not an - # instance of the Sprite class or is not an instance of a - # subclass of the Sprite class. Alternately, it could be an - # old-style sprite group. - if hasattr(sprite, '_spritegroup'): - for spr in sprite.sprites(): - if not self.has_internal(spr): - self.add_internal(spr, layer) - spr.add_internal(self) - elif not self.has_internal(sprite): - self.add_internal(sprite, layer) - sprite.add_internal(self) - - def remove_internal(self, sprite): - """Do not use this method directly. - - The group uses it to add a sprite. - - """ - self._spritelist.remove(sprite) - # these dirty rects are suboptimal for one frame - r = self.spritedict[sprite] - if r is not self._init_rect: - self.lostsprites.append(r) # dirty rect - if hasattr(sprite, 'rect'): - self.lostsprites.append(sprite.rect) # dirty rect - - del self.spritedict[sprite] - del self._spritelayers[sprite] - - def sprites(self): - """return a ordered list of sprites (first back, last top). - - LayeredUpdates.sprites(): return sprites - - """ - return list(self._spritelist) - - def draw(self, surface): - """draw all sprites in the right order onto the passed surface - - LayeredUpdates.draw(surface): return Rect_list - - """ - spritedict = self.spritedict - surface_blit = surface.blit - dirty = self.lostsprites - self.lostsprites = [] - dirty_append = dirty.append - init_rect = self._init_rect - for spr in self.sprites(): - rec = spritedict[spr] - newrect = surface_blit(spr.image, spr.rect) - if rec is init_rect: - dirty_append(newrect) - else: - if newrect.colliderect(rec): - dirty_append(newrect.union(rec)) - else: - dirty_append(newrect) - dirty_append(rec) - spritedict[spr] = newrect - return dirty - - def get_sprites_at(self, pos): - """return a list with all sprites at that position - - LayeredUpdates.get_sprites_at(pos): return colliding_sprites - - Bottom sprites are listed first; the top ones are listed last. - - """ - _sprites = self._spritelist - rect = Rect(pos, (0, 0)) - colliding_idx = rect.collidelistall(_sprites) - colliding = [_sprites[i] for i in colliding_idx] - return colliding - - def get_sprite(self, idx): - """return the sprite at the index idx from the groups sprites - - LayeredUpdates.get_sprite(idx): return sprite - - Raises IndexOutOfBounds if the idx is not within range. - - """ - return self._spritelist[idx] - - def remove_sprites_of_layer(self, layer_nr): - """remove all sprites from a layer and return them as a list - - LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites - - """ - sprites = self.get_sprites_from_layer(layer_nr) - self.remove(*sprites) - return sprites - - #---# layer methods - def layers(self): - """return a list of unique defined layers defined. - - LayeredUpdates.layers(): return layers - - """ - return sorted(set(self._spritelayers.values())) - - def change_layer(self, sprite, new_layer): - """change the layer of the sprite - - LayeredUpdates.change_layer(sprite, new_layer): return None - - The sprite must have been added to the renderer already. This is not - checked. - - """ - sprites = self._spritelist # speedup - sprites_layers = self._spritelayers # speedup - - sprites.remove(sprite) - sprites_layers.pop(sprite) - - # add the sprite at the right position - # bisect algorithmus - leng = len(sprites) - low = mid = 0 - high = leng - 1 - while low <= high: - mid = low + (high - low) // 2 - if sprites_layers[sprites[mid]] <= new_layer: - low = mid + 1 - else: - high = mid - 1 - # linear search to find final position - while mid < leng and sprites_layers[sprites[mid]] <= new_layer: - mid += 1 - sprites.insert(mid, sprite) - if hasattr(sprite, 'layer'): - sprite.layer = new_layer - - # add layer info - sprites_layers[sprite] = new_layer - - def get_layer_of_sprite(self, sprite): - """return the layer that sprite is currently in - - If the sprite is not found, then it will return the default layer. - - """ - return self._spritelayers.get(sprite, self._default_layer) - - def get_top_layer(self): - """return the top layer - - LayeredUpdates.get_top_layer(): return layer - - """ - return self._spritelayers[self._spritelist[-1]] - - def get_bottom_layer(self): - """return the bottom layer - - LayeredUpdates.get_bottom_layer(): return layer - - """ - return self._spritelayers[self._spritelist[0]] - - def move_to_front(self, sprite): - """bring the sprite to front layer - - LayeredUpdates.move_to_front(sprite): return None - - Brings the sprite to front by changing the sprite layer to the top-most - layer. The sprite is added at the end of the list of sprites in that - top-most layer. - - """ - self.change_layer(sprite, self.get_top_layer()) - - def move_to_back(self, sprite): - """move the sprite to the bottom layer - - LayeredUpdates.move_to_back(sprite): return None - - Moves the sprite to the bottom layer by moving it to a new layer below - the current bottom layer. - - """ - self.change_layer(sprite, self.get_bottom_layer() - 1) - - def get_top_sprite(self): - """return the topmost sprite - - LayeredUpdates.get_top_sprite(): return Sprite - - """ - return self._spritelist[-1] - - def get_sprites_from_layer(self, layer): - """return all sprites from a layer ordered as they where added - - LayeredUpdates.get_sprites_from_layer(layer): return sprites - - Returns all sprites from a layer. The sprites are ordered in the - sequence that they where added. (The sprites are not removed from the - layer. - - """ - sprites = [] - sprites_append = sprites.append - sprite_layers = self._spritelayers - for spr in self._spritelist: - if sprite_layers[spr] == layer: - sprites_append(spr) - elif sprite_layers[spr] > layer:# break after because no other will - # follow with same layer - break - return sprites - - def switch_layer(self, layer1_nr, layer2_nr): - """switch the sprites from layer1_nr to layer2_nr - - LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None - - The layers number must exist. This method does not check for the - existence of the given layers. - - """ - sprites1 = self.remove_sprites_of_layer(layer1_nr) - for spr in self.get_sprites_from_layer(layer2_nr): - self.change_layer(spr, layer1_nr) - self.add(layer=layer2_nr, *sprites1) - - -class LayeredDirty(LayeredUpdates): - """LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates - - pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty - - This group requires pygame.sprite.DirtySprite or any sprite that - has the following attributes: - image, rect, dirty, visible, blendmode (see doc of DirtySprite). - - It uses the dirty flag technique and is therefore faster than - pygame.sprite.RenderUpdates if you have many static sprites. It - also switches automatically between dirty rect updating and full - screen drawing, so you do no have to worry which would be faster. - - As with the pygame.sprite.Group, you can specify some additional attributes - through kwargs: - _use_update: True/False (default is False) - _default_layer: default layer where the sprites without a layer are - added - _time_threshold: treshold time for switching between dirty rect mode - and fullscreen mode; defaults to updating at 80 frames per second, - which is equal to 1000.0 / 80.0 - - New in pygame 1.8.0 - - """ - - def __init__(self, *sprites, **kwargs): - """initialize group. - - pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty - - You can specify some additional attributes through kwargs: - _use_update: True/False (default is False) - _default_layer: default layer where the sprites without a layer are - added - _time_threshold: treshold time for switching between dirty rect - mode and fullscreen mode; defaults to updating at 80 frames per - second, which is equal to 1000.0 / 80.0 - - """ - LayeredUpdates.__init__(self, *sprites, **kwargs) - self._clip = None - - self._use_update = False - - self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps - - self._bgd = None - for key, val in kwargs.items(): - if key in ['_use_update', '_time_threshold', '_default_layer']: - if hasattr(self, key): - setattr(self, key, val) - - def add_internal(self, sprite, layer=None): - """Do not use this method directly. - - It is used by the group to add a sprite internally. - - """ - # check if all needed attributes are set - if not hasattr(sprite, 'dirty'): - raise AttributeError() - if not hasattr(sprite, 'visible'): - raise AttributeError() - if not hasattr(sprite, 'blendmode'): - raise AttributeError() - - if not isinstance(sprite, DirtySprite): - raise TypeError() - - if sprite.dirty == 0: # set it dirty if it is not - sprite.dirty = 1 - - LayeredUpdates.add_internal(self, sprite, layer) - - def draw(self, surface, bgd=None): - """draw all sprites in the right order onto the given surface - - LayeredDirty.draw(surface, bgd=None): return Rect_list - - You can pass the background too. If a self.bgd is already set to some - value that is not None, then the bgd argument has no effect. - - """ - # speedups - _orig_clip = surface.get_clip() - _clip = self._clip - if _clip is None: - _clip = _orig_clip - - _surf = surface - _sprites = self._spritelist - _old_rect = self.spritedict - _update = self.lostsprites - _update_append = _update.append - _ret = None - _surf_blit = _surf.blit - _rect = Rect - if bgd is not None: - self._bgd = bgd - _bgd = self._bgd - init_rect = self._init_rect - - _surf.set_clip(_clip) - # ------- - # 0. decide whether to render with update or flip - start_time = get_ticks() - if self._use_update: # dirty rects mode - # 1. find dirty area on screen and put the rects into _update - # still not happy with that part - for spr in _sprites: - if 0 < spr.dirty: - # chose the right rect - if spr.source_rect: - _union_rect = _rect(spr.rect.topleft, - spr.source_rect.size) - else: - _union_rect = _rect(spr.rect) - - _union_rect_collidelist = _union_rect.collidelist - _union_rect_union_ip = _union_rect.union_ip - i = _union_rect_collidelist(_update) - while -1 < i: - _union_rect_union_ip(_update[i]) - del _update[i] - i = _union_rect_collidelist(_update) - _update_append(_union_rect.clip(_clip)) - - if _old_rect[spr] is not init_rect: - _union_rect = _rect(_old_rect[spr]) - _union_rect_collidelist = _union_rect.collidelist - _union_rect_union_ip = _union_rect.union_ip - i = _union_rect_collidelist(_update) - while -1 < i: - _union_rect_union_ip(_update[i]) - del _update[i] - i = _union_rect_collidelist(_update) - _update_append(_union_rect.clip(_clip)) - # can it be done better? because that is an O(n**2) algorithm in - # worst case - - # clear using background - if _bgd is not None: - for rec in _update: - _surf_blit(_bgd, rec, rec) - - # 2. draw - for spr in _sprites: - if 1 > spr.dirty: - if spr._visible: - # sprite not dirty; blit only the intersecting part - if spr.source_rect is not None: - # For possible future speed up, source_rect's data - # can be prefetched outside of this loop. - _spr_rect = _rect(spr.rect.topleft, - spr.source_rect.size) - rect_offset_x = spr.source_rect[0] - _spr_rect[0] - rect_offset_y = spr.source_rect[1] - _spr_rect[1] - else: - _spr_rect = spr.rect - rect_offset_x = -_spr_rect[0] - rect_offset_y = -_spr_rect[1] - - _spr_rect_clip = _spr_rect.clip - - for idx in _spr_rect.collidelistall(_update): - # clip - clip = _spr_rect_clip(_update[idx]) - _surf_blit(spr.image, - clip, - (clip[0] + rect_offset_x, - clip[1] + rect_offset_y, - clip[2], - clip[3]), - spr.blendmode) - else: # dirty sprite - if spr._visible: - _old_rect[spr] = _surf_blit(spr.image, - spr.rect, - spr.source_rect, - spr.blendmode) - if spr.dirty == 1: - spr.dirty = 0 - _ret = list(_update) - else: # flip, full screen mode - if _bgd is not None: - _surf_blit(_bgd, (0, 0)) - for spr in _sprites: - if spr._visible: - _old_rect[spr] = _surf_blit(spr.image, - spr.rect, - spr.source_rect, - spr.blendmode) - _ret = [_rect(_clip)] # return only the part of the screen changed - - - # timing for switching modes - # How may a good threshold be found? It depends on the hardware. - end_time = get_ticks() - if end_time-start_time > self._time_threshold: - self._use_update = False - else: - self._use_update = True - -## # debug -## print " check: using dirty rects:", self._use_update - - # emtpy dirty rects list - _update[:] = [] - - # ------- - # restore original clip - _surf.set_clip(_orig_clip) - return _ret - - def clear(self, surface, bgd): - """use to set background - - Group.clear(surface, bgd): return None - - """ - self._bgd = bgd - - def repaint_rect(self, screen_rect): - """repaint the given area - - LayeredDirty.repaint_rect(screen_rect): return None - - screen_rect is in screen coordinates. - - """ - if self._clip: - self.lostsprites.append(screen_rect.clip(self._clip)) - else: - self.lostsprites.append(Rect(screen_rect)) - - def set_clip(self, screen_rect=None): - """clip the area where to draw; pass None (default) to reset the clip - - LayeredDirty.set_clip(screen_rect=None): return None - - """ - if screen_rect is None: - self._clip = pygame.display.get_surface().get_rect() - else: - self._clip = screen_rect - self._use_update = False - - def get_clip(self): - """get the area where drawing will occur - - LayeredDirty.get_clip(): return Rect - - """ - return self._clip - - def change_layer(self, sprite, new_layer): - """change the layer of the sprite - - LayeredUpdates.change_layer(sprite, new_layer): return None - - The sprite must have been added to the renderer already. This is not - checked. - - """ - LayeredUpdates.change_layer(self, sprite, new_layer) - if sprite.dirty == 0: - sprite.dirty = 1 - - def set_timing_treshold(self, time_ms): - """set the treshold in milliseconds - - set_timing_treshold(time_ms): return None - - Defaults to 1000.0 / 80.0. This means that the screen will be painted - using the flip method rather than the update method if the update - method is taking so long to update the screen that the frame rate falls - below 80 frames per second. - - """ - self._time_threshold = time_ms - - -class GroupSingle(AbstractGroup): - """A group container that holds a single most recent item. - - This class works just like a regular group, but it only keeps a single - sprite in the group. Whatever sprite has been added to the group last will - be the only sprite in the group. - - You can access its one sprite as the .sprite attribute. Assigning to this - attribute will properly remove the old sprite and then add the new one. - - """ - - def __init__(self, sprite=None): - AbstractGroup.__init__(self) - self.__sprite = None - if sprite is not None: - self.add(sprite) - - def copy(self): - return GroupSingle(self.__sprite) - - def sprites(self): - if self.__sprite is not None: - return [self.__sprite] - else: - return [] - - def add_internal(self, sprite): - if self.__sprite is not None: - self.__sprite.remove_internal(self) - self.remove_internal(self.__sprite) - self.__sprite = sprite - - def __nonzero__(self): - return self.__sprite is not None - - def _get_sprite(self): - return self.__sprite - - def _set_sprite(self, sprite): - self.add_internal(sprite) - sprite.add_internal(self) - return sprite - - sprite = property(_get_sprite, - _set_sprite, - None, - "The sprite contained in this group") - - def remove_internal(self, sprite): - if sprite is self.__sprite: - self.__sprite = None - if sprite in self.spritedict: - AbstractGroup.remove_internal(self, sprite) - - def has_internal(self, sprite): - return self.__sprite is sprite - - # Optimizations... - def __contains__(self, sprite): - return self.__sprite is sprite - - -# Some different collision detection functions that could be used. -def collide_rect(left, right): - """collision detection between two sprites, using rects. - - pygame.sprite.collide_rect(left, right): return bool - - Tests for collision between two sprites. Uses the pygame.Rect colliderect - function to calculate the collision. It is intended to be passed as a - collided callback function to the *collide functions. Sprites must have - "rect" attributes. - - New in pygame 1.8.0 - - """ - return left.rect.colliderect(right.rect) - -class collide_rect_ratio: - """A callable class that checks for collisions using scaled rects - - The class checks for collisions between two sprites using a scaled version - of the sprites' rects. Is created with a ratio; the instance is then - intended to be passed as a collided callback function to the *collide - functions. - - New in pygame 1.8.1 - - """ - - def __init__(self, ratio): - """create a new collide_rect_ratio callable - - Ratio is expected to be a floating point value used to scale - the underlying sprite rect before checking for collisions. - - """ - self.ratio = ratio - - def __call__(self, left, right): - """detect collision between two sprites using scaled rects - - pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool - - Tests for collision between two sprites. Uses the pygame.Rect - colliderect function to calculate the collision after scaling the rects - by the stored ratio. Sprites must have "rect" attributes. - - """ - - ratio = self.ratio - - leftrect = left.rect - width = leftrect.width - height = leftrect.height - leftrect = leftrect.inflate(width * ratio - width, - height * ratio - height) - - rightrect = right.rect - width = rightrect.width - height = rightrect.height - rightrect = rightrect.inflate(width * ratio - width, - height * ratio - height) - - return leftrect.colliderect(rightrect) - -def collide_circle(left, right): - """detect collision between two sprites using circles - - pygame.sprite.collide_circle(left, right): return bool - - Tests for collision between two sprites by testing whether two circles - centered on the sprites overlap. If the sprites have a "radius" attribute, - then that radius is used to create the circle; otherwise, a circle is - created that is big enough to completely enclose the sprite's rect as - given by the "rect" attribute. This function is intended to be passed as - a collided callback function to the *collide functions. Sprites must have a - "rect" and an optional "radius" attribute. - - New in pygame 1.8.0 - - """ - - xdistance = left.rect.centerx - right.rect.centerx - ydistance = left.rect.centery - right.rect.centery - distancesquared = xdistance ** 2 + ydistance ** 2 - - if hasattr(left, 'radius'): - leftradius = left.radius - else: - leftrect = left.rect - # approximating the radius of a square by using half of the diagonal, - # might give false positives (especially if its a long small rect) - leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5) - # store the radius on the sprite for next time - setattr(left, 'radius', leftradius) - - if hasattr(right, 'radius'): - rightradius = right.radius - else: - rightrect = right.rect - # approximating the radius of a square by using half of the diagonal - # might give false positives (especially if its a long small rect) - rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5) - # store the radius on the sprite for next time - setattr(right, 'radius', rightradius) - return distancesquared <= (leftradius + rightradius) ** 2 - -class collide_circle_ratio(object): - """detect collision between two sprites using scaled circles - - This callable class checks for collisions between two sprites using a - scaled version of a sprite's radius. It is created with a ratio as the - argument to the constructor. The instance is then intended to be passed as - a collided callback function to the *collide functions. - - New in pygame 1.8.1 - - """ - - def __init__(self, ratio): - """creates a new collide_circle_ratio callable instance - - The given ratio is expected to be a floating point value used to scale - the underlying sprite radius before checking for collisions. - - When the ratio is ratio=1.0, then it behaves exactly like the - collide_circle method. - - """ - self.ratio = ratio - - - def __call__(self, left, right): - """detect collision between two sprites using scaled circles - - pygame.sprite.collide_circle_radio(ratio)(left, right): return bool - - Tests for collision between two sprites by testing whether two circles - centered on the sprites overlap after scaling the circle's radius by - the stored ratio. If the sprites have a "radius" attribute, that is - used to create the circle; otherwise, a circle is created that is big - enough to completely enclose the sprite's rect as given by the "rect" - attribute. Intended to be passed as a collided callback function to the - *collide functions. Sprites must have a "rect" and an optional "radius" - attribute. - - """ - - ratio = self.ratio - xdistance = left.rect.centerx - right.rect.centerx - ydistance = left.rect.centery - right.rect.centery - distancesquared = xdistance ** 2 + ydistance ** 2 - - if hasattr(left, "radius"): - leftradius = left.radius * ratio - else: - leftrect = left.rect - leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5) - # store the radius on the sprite for next time - setattr(left, 'radius', leftradius) - - if hasattr(right, "radius"): - rightradius = right.radius * ratio - else: - rightrect = right.rect - rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5) - # store the radius on the sprite for next time - setattr(right, 'radius', rightradius) - - return distancesquared <= (leftradius + rightradius) ** 2 - -def collide_mask(left, right): - """collision detection between two sprites, using masks. - - pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool - - Tests for collision between two sprites by testing if their bitmasks - overlap. If the sprites have a "mask" attribute, that is used as the mask; - otherwise, a mask is created from the sprite image. Intended to be passed - as a collided callback function to the *collide functions. Sprites must - have a "rect" and an optional "mask" attribute. - - New in pygame 1.8.0 - - """ - xoffset = right.rect[0] - left.rect[0] - yoffset = right.rect[1] - left.rect[1] - try: - leftmask = left.mask - except AttributeError: - leftmask = from_surface(left.image) - try: - rightmask = right.mask - except AttributeError: - rightmask = from_surface(right.image) - return leftmask.overlap(rightmask, (xoffset, yoffset)) - -def spritecollide(sprite, group, dokill, collided=None): - """find Sprites in a Group that intersect another Sprite - - pygame.sprite.spritecollide(sprite, group, dokill, collided=None): - return Sprite_list - - Return a list containing all Sprites in a Group that intersect with another - Sprite. Intersection is determined by comparing the Sprite.rect attribute - of each Sprite. - - The dokill argument is a bool. If set to True, all Sprites that collide - will be removed from the Group. - - The collided argument is a callback function used to calculate if two - sprites are colliding. it should take two sprites as values, and return a - bool value indicating if they are colliding. If collided is not passed, all - sprites must have a "rect" value, which is a rectangle of the sprite area, - which will be used to calculate the collision. - - """ - if dokill: - - crashed = [] - append = crashed.append - - if collided: - for s in group.sprites(): - if collided(sprite, s): - s.kill() - append(s) - else: - spritecollide = sprite.rect.colliderect - for s in group.sprites(): - if spritecollide(s.rect): - s.kill() - append(s) - - return crashed - - elif collided: - return [s for s in group if collided(sprite, s)] - else: - spritecollide = sprite.rect.colliderect - return [s for s in group if spritecollide(s.rect)] - - -def groupcollide(groupa, groupb, dokilla, dokillb, collided=None): - """detect collision between a group and another group - - pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb): - return dict - - Given two groups, this will find the intersections between all sprites in - each group. It returns a dictionary of all sprites in the first group that - collide. The value for each item in the dictionary is a list of the sprites - in the second group it collides with. The two dokill arguments control if - the sprites from either group will be automatically removed from all - groups. Collided is a callback function used to calculate if two sprites - are colliding. it should take two sprites as values, and return a bool - value indicating if they are colliding. If collided is not passed, all - sprites must have a "rect" value, which is a rectangle of the sprite area - that will be used to calculate the collision. - - """ - crashed = {} - SC = spritecollide - if dokilla: - for s in groupa.sprites(): - c = SC(s, groupb, dokillb, collided) - if c: - crashed[s] = c - s.kill() - else: - for s in groupa: - c = SC(s, groupb, dokillb, collided) - if c: - crashed[s] = c - return crashed - -def spritecollideany(sprite, group, collided=None): - """finds any sprites in a group that collide with the given sprite - - pygame.sprite.spritecollideany(sprite, group): return sprite - - Given a sprite and a group of sprites, this will return return any single - sprite that collides with with the given sprite. If there are no - collisions, then this returns None. - - If you don't need all the features of the spritecollide function, this - function will be a bit quicker. - - Collided is a callback function used to calculate if two sprites are - colliding. It should take two sprites as values and return a bool value - indicating if they are colliding. If collided is not passed, then all - sprites must have a "rect" value, which is a rectangle of the sprite area, - which will be used to calculate the collision. - - - """ - if collided: - for s in group: - if collided(sprite, s): - return s - else: - # Special case old behaviour for speed. - spritecollide = sprite.rect.colliderect - for s in group: - if spritecollide(s.rect): - return s - return None diff --git a/venv/lib/python3.7/site-packages/pygame/surface.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/surface.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 1b8dccc..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/surface.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/surfarray.py b/venv/lib/python3.7/site-packages/pygame/surfarray.py deleted file mode 100644 index 91446d7..0000000 --- a/venv/lib/python3.7/site-packages/pygame/surfarray.py +++ /dev/null @@ -1,290 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2007 Marcus von Appen -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Marcus von Appen -## mva@sysfault.org - -"""pygame module for accessing surface pixel data using array interfaces - -Functions to convert pixel data between pygame Surfaces and arrays. This -module will only be functional when pygame can use the external Numpy or -Numeric packages. - -Every pixel is stored as a single integer value to represent the red, -green, and blue colors. The 8bit images use a value that looks into a -colormap. Pixels with higher depth use a bit packing process to place -three or four values into a single number. - -The arrays are indexed by the X axis first, followed by the Y -axis. Arrays that treat the pixels as a single integer are referred to -as 2D arrays. This module can also separate the red, green, and blue -color values into separate indices. These types of arrays are referred -to as 3D arrays, and the last index is 0 for red, 1 for green, and 2 for -blue. - -Supported array types are - - numpy - numeric (deprecated; will be removed in Pygame 1.9.3.) - -The default will be numpy, if installed. Otherwise, Numeric will be set -as default if installed, and a deprecation warning will be issued. If -neither numpy nor Numeric are installed, the module will raise an -ImportError. - -The array type to use can be changed at runtime using the use_arraytype() -method, which requires one of the above types as string. - -Note: numpy and Numeric are not completely compatible. Certain array -manipulations, which work for one type, might behave differently or even -completely break for the other. - -Additionally, in contrast to Numeric, numpy does use unsigned 16-bit -integers. Images with 16-bit data will be treated as unsigned -integers. Numeric instead uses signed integers for the representation, -which is important to keep in mind, if you use the module's functions -and wonder about the values. -""" - -# Try to import the necessary modules. -import pygame._numpysurfarray as numpysf - -from pygame.pixelcopy import array_to_surface, make_surface as pc_make_surface - -def blit_array (surface, array): - """pygame.surfarray.blit_array(Surface, array): return None - - Blit directly from a array values. - - Directly copy values from an array into a Surface. This is faster than - converting the array into a Surface and blitting. The array must be the - same dimensions as the Surface and will completely replace all pixel - values. Only integer, ascii character and record arrays are accepted. - - This function will temporarily lock the Surface as the new values are - copied. - """ - return numpysf.blit_array (surface, array) - -def array2d (surface): - """pygame.surfarray.array2d (Surface): return array - - Copy pixels into a 2d array. - - Copy the pixels from a Surface into a 2D array. The bit depth of the - surface will control the size of the integer values, and will work - for any type of pixel format. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - return numpysf.array2d (surface) - -def pixels2d (surface): - """pygame.surfarray.pixels2d (Surface): return array - - Reference pixels into a 2d array. - - Create a new 2D array that directly references the pixel values in a - Surface. Any changes to the array will affect the pixels in the - Surface. This is a fast operation since no data is copied. - - Pixels from a 24-bit Surface cannot be referenced, but all other - Surface bit depths can. - - The Surface this references will remain locked for the lifetime of - the array (see the Surface.lock - lock the Surface memory for pixel - access method). - """ - return numpysf.pixels2d (surface) - -def array3d (surface): - """pygame.surfarray.array3d (Surface): return array - - Copy pixels into a 3d array. - - Copy the pixels from a Surface into a 3D array. The bit depth of the - surface will control the size of the integer values, and will work - for any type of pixel format. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - return numpysf.array3d (surface) - -def pixels3d (surface): - """pygame.surfarray.pixels3d (Surface): return array - - Reference pixels into a 3d array. - - Create a new 3D array that directly references the pixel values in a - Surface. Any changes to the array will affect the pixels in the - Surface. This is a fast operation since no data is copied. - - This will only work on Surfaces that have 24-bit or 32-bit - formats. Lower pixel formats cannot be referenced. - - The Surface this references will remain locked for the lifetime of - the array (see the Surface.lock - lock the Surface memory for pixel - access method). - """ - return numpysf.pixels3d (surface) - -def array_alpha (surface): - """pygame.surfarray.array_alpha (Surface): return array - - Copy pixel alphas into a 2d array. - - Copy the pixel alpha values (degree of transparency) from a Surface - into a 2D array. This will work for any type of Surface - format. Surfaces without a pixel alpha will return an array with all - opaque values. - - This function will temporarily lock the Surface as pixels are copied - (see the Surface.lock - lock the Surface memory for pixel access - method). - """ - return numpysf.array_alpha (surface) - -def pixels_alpha (surface): - """pygame.surfarray.pixels_alpha (Surface): return array - - Reference pixel alphas into a 2d array. - - Create a new 2D array that directly references the alpha values - (degree of transparency) in a Surface. Any changes to the array will - affect the pixels in the Surface. This is a fast operation since no - data is copied. - - This can only work on 32-bit Surfaces with a per-pixel alpha value. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpysf.pixels_alpha (surface) - -def pixels_red (surface): - """pygame.surfarray.pixels_red (Surface): return array - - Reference pixel red into a 2d array. - - Create a new 2D array that directly references the red values - in a Surface. Any changes to the array will affect the pixels - in the Surface. This is a fast operation since no data is copied. - - This can only work on 24-bit or 32-bit Surfaces. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpysf.pixels_red (surface) - -def pixels_green (surface): - """pygame.surfarray.pixels_green (Surface): return array - - Reference pixel green into a 2d array. - - Create a new 2D array that directly references the green values - in a Surface. Any changes to the array will affect the pixels - in the Surface. This is a fast operation since no data is copied. - - This can only work on 24-bit or 32-bit Surfaces. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpysf.pixels_green (surface) - -def pixels_blue (surface): - """pygame.surfarray.pixels_blue (Surface): return array - - Reference pixel blue into a 2d array. - - Create a new 2D array that directly references the blue values - in a Surface. Any changes to the array will affect the pixels - in the Surface. This is a fast operation since no data is copied. - - This can only work on 24-bit or 32-bit Surfaces. - - The Surface this array references will remain locked for the - lifetime of the array. - """ - return numpysf.pixels_blue (surface) - -def array_colorkey (surface): - """pygame.surfarray.array_colorkey (Surface): return array - - Copy the colorkey values into a 2d array. - - Create a new array with the colorkey transparency value from each - pixel. If the pixel matches the colorkey it will be fully - tranparent; otherwise it will be fully opaque. - - This will work on any type of Surface format. If the image has no - colorkey a solid opaque array will be returned. - - This function will temporarily lock the Surface as pixels are - copied. - """ - return numpysf.array_colorkey (surface) - -def make_surface(array): - """pygame.surfarray.make_surface (array): return Surface - - Copy an array to a new surface. - - Create a new Surface that best resembles the data and format on the - array. The array can be 2D or 3D with any sized integer values. - """ - return numpysf.make_surface (array) - -def map_array (surface, array): - """pygame.surfarray.map_array (Surface, array3d): return array2d - - Map a 3D array into a 2D array. - - Convert a 3D array into a 2D array. This will use the given Surface - format to control the conversion. Palette surface formats are not - supported. - """ - return numpysf.map_array (surface, array) - -def use_arraytype (arraytype): - """pygame.surfarray.use_arraytype (arraytype): return None - - DEPRECATED - only numpy arrays are now supported. - """ - arraytype = arraytype.lower () - if arraytype != "numpy": - raise ValueError("invalid array type") - -def get_arraytype (): - """pygame.surfarray.get_arraytype (): return str - - DEPRECATED - only numpy arrays are now supported. - """ - return "numpy" - -def get_arraytypes (): - """pygame.surfarray.get_arraytypes (): return tuple - - DEPRECATED - only numpy arrays are now supported. - """ - return ("numpy",) - diff --git a/venv/lib/python3.7/site-packages/pygame/surflock.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/surflock.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index e9f07ef..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/surflock.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/sysfont.py b/venv/lib/python3.7/site-packages/pygame/sysfont.py deleted file mode 100644 index b3c7443..0000000 --- a/venv/lib/python3.7/site-packages/pygame/sysfont.py +++ /dev/null @@ -1,411 +0,0 @@ -# coding: ascii -# pygame - Python Game Library -# Copyright (C) 2000-2003 Pete Shinners -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Library General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Library General Public License for more details. -# -# You should have received a copy of the GNU Library General Public -# License along with this library; if not, write to the Free -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# -# Pete Shinners -# pete@shinners.org -"""sysfont, used in the font module to find system fonts""" - -import os -import sys -from pygame.compat import xrange_, PY_MAJOR_VERSION -from os.path import basename, dirname, exists, join, splitext -import xml.etree.ElementTree as ET - - -OpenType_extensions = frozenset(('.ttf', '.ttc', '.otf')) -Sysfonts = {} -Sysalias = {} - -# Python 3 compatibility -if PY_MAJOR_VERSION >= 3: - def toascii(raw): - """convert bytes to ASCII-only string""" - return raw.decode('ascii', 'ignore') - if os.name == 'nt': - import winreg as _winreg - else: - import subprocess -else: - def toascii(raw): - """return ASCII characters of a given unicode or 8-bit string""" - return raw.decode('ascii', 'ignore') - if os.name == 'nt': - import _winreg - else: - import subprocess - - -def _simplename(name): - """create simple version of the font name""" - # return alphanumeric characters of a string (converted to lowercase) - return ''.join(c.lower() for c in name if c.isalnum()) - - -def _addfont(name, bold, italic, font, fontdict): - """insert a font and style into the font dictionary""" - if name not in fontdict: - fontdict[name] = {} - fontdict[name][bold, italic] = font - - -def initsysfonts_win32(): - """initialize fonts dictionary on Windows""" - - fontdir = join(os.environ.get('WINDIR', 'C:\\Windows'), 'Fonts') - - TrueType_suffix = '(TrueType)' - mods = ('demibold', 'narrow', 'light', 'unicode', 'bt', 'mt') - - fonts = {} - - # add fonts entered in the registry - - # find valid registry keys containing font information. - # http://docs.python.org/lib/module-sys.html - # 0 (VER_PLATFORM_WIN32s) Win32s on Windows 3.1 - # 1 (VER_PLATFORM_WIN32_WINDOWS) Windows 95/98/ME - # 2 (VER_PLATFORM_WIN32_NT) Windows NT/2000/XP - # 3 (VER_PLATFORM_WIN32_CE) Windows CE - if sys.getwindowsversion()[0] == 1: - key_name = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Fonts" - else: - key_name = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts" - key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_name) - - for i in xrange_(_winreg.QueryInfoKey(key)[1]): - try: - # name is the font's name e.g. Times New Roman (TrueType) - # font is the font's filename e.g. times.ttf - name, font = _winreg.EnumValue(key, i)[0:2] - except EnvironmentError: - break - - # try to handle windows unicode strings for file names with - # international characters - if PY_MAJOR_VERSION < 3: - # here are two documents with some information about it: - # http://www.python.org/peps/pep-0277.html - # https://www.microsoft.com/technet/archive/interopmigration/linux/mvc/lintowin.mspx#ECAA - try: - font = str(font) - except UnicodeEncodeError: - # MBCS is the windows encoding for unicode file names. - try: - font = font.encode('MBCS') - except: - # no success with str or MBCS encoding... skip this font. - continue - - if splitext(font)[1].lower() not in OpenType_extensions: - continue - if not dirname(font): - font = join(fontdir, font) - - if name.endswith(TrueType_suffix): - name = name.rstrip(TrueType_suffix).rstrip() - name = name.lower().split() - - bold = italic = 0 - for m in mods: - if m in name: - name.remove(m) - if 'bold' in name: - name.remove('bold') - bold = 1 - if 'italic' in name: - name.remove('italic') - italic = 1 - name = ''.join(name) - - name = _simplename(name) - - _addfont(name, bold, italic, font, fonts) - - return fonts - - -def _add_font_paths(sub_elements, fonts): - """ Gets each element, checks its tag content, - if wanted fetches the next value in the iterable - """ - font_name = font_path = None - for tag in sub_elements: - if tag.text == "_name": - font_name = next(sub_elements).text - if splitext(font_name)[1] not in OpenType_extensions: - break - bold = "bold" in font_name - italic = "italic" in font_name - if tag.text == "path" and font_name is not None: - font_path = next(sub_elements).text - _addfont(_simplename(font_name),bold,italic,font_path,fonts) - break - - -def _system_profiler_darwin(): - fonts = {} - flout, flerr = subprocess.Popen( - ' '.join(['system_profiler', '-xml','SPFontsDataType']), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True - ).communicate() - - for font_node in ET.fromstring(flout).iterfind('./array/dict/array/dict'): - _add_font_paths(font_node.iter("*"), fonts) - - return fonts - - - -def initsysfonts_darwin(): - """ Read the fonts on MacOS, and OS X. - """ - # if the X11 binary exists... try and use that. - # Not likely to be there on pre 10.4.x ... or MacOS 10.10+ - if exists('/usr/X11/bin/fc-list'): - fonts = initsysfonts_unix('/usr/X11/bin/fc-list') - # This fc-list path will work with the X11 from the OS X 10.3 installation - # disc - elif exists('/usr/X11R6/bin/fc-list'): - fonts = initsysfonts_unix('/usr/X11R6/bin/fc-list') - elif exists('/usr/sbin/system_profiler'): - try: - fonts = _system_profiler_darwin() - except: - fonts = {} - else: - fonts = {} - - return fonts - - -# read the fonts on unix -def initsysfonts_unix(path="fc-list"): - """use the fc-list from fontconfig to get a list of fonts""" - fonts = {} - - try: - # note, we capture stderr so if fc-list isn't there to stop stderr - # printing. - flout, flerr = subprocess.Popen('%s : file family style' % path, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - close_fds=True).communicate() - except Exception: - return fonts - - entries = toascii(flout) - try: - for line in entries.split('\n'): - - try: - filename, family, style = line.split(':', 2) - if splitext(filename)[1].lower() in OpenType_extensions: - bold = 'Bold' in style - italic = 'Italic' in style - oblique = 'Oblique' in style - for name in family.strip().split(','): - if name: - break - else: - name = splitext(basename(filename))[0] - - _addfont( - _simplename(name), bold, italic or oblique, filename, fonts) - - except Exception: - # try the next one. - pass - - except Exception: - pass - - return fonts - - -def create_aliases(): - """map common fonts that are absent from the system to similar fonts that are installed in the system""" - alias_groups = ( - ('monospace', 'misc-fixed', 'courier', 'couriernew', 'console', - 'fixed', 'mono', 'freemono', 'bitstreamverasansmono', - 'verasansmono', 'monotype', 'lucidaconsole'), - ('sans', 'arial', 'helvetica', 'swiss', 'freesans', - 'bitstreamverasans', 'verasans', 'verdana', 'tahoma'), - ('serif', 'times', 'freeserif', 'bitstreamveraserif', 'roman', - 'timesroman', 'timesnewroman', 'dutch', 'veraserif', - 'georgia'), - ('wingdings', 'wingbats'), - ) - for alias_set in alias_groups: - for name in alias_set: - if name in Sysfonts: - found = Sysfonts[name] - break - else: - continue - for name in alias_set: - if name not in Sysfonts: - Sysalias[name] = found - - -# initialize it all, called once -def initsysfonts(): - if sys.platform == 'win32': - fonts = initsysfonts_win32() - elif sys.platform == 'darwin': - fonts = initsysfonts_darwin() - else: - fonts = initsysfonts_unix() - Sysfonts.update(fonts) - create_aliases() - if not Sysfonts: # dummy so we don't try to reinit - Sysfonts[None] = None - - -# pygame.font specific declarations -def font_constructor(fontpath, size, bold, italic): - import pygame.font - - font = pygame.font.Font(fontpath, size) - if bold: - font.set_bold(1) - if italic: - font.set_italic(1) - - return font - - -# the exported functions - -def SysFont(name, size, bold=False, italic=False, constructor=None): - """pygame.font.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font - create a pygame Font from system font resources - - This will search the system fonts for the given font - name. You can also enable bold or italic styles, and - the appropriate system font will be selected if available. - - This will always return a valid Font object, and will - fallback on the builtin pygame font if the given font - is not found. - - Name can also be a comma separated list of names, in - which case set of names will be searched in order. Pygame - uses a small set of common font aliases, if the specific - font you ask for is not available, a reasonable alternative - may be used. - - if optional contructor is provided, it must be a function with - signature constructor(fontpath, size, bold, italic) which returns - a Font instance. If None, a pygame.font.Font object is created. - """ - if constructor is None: - constructor = font_constructor - - if not Sysfonts: - initsysfonts() - - gotbold = gotitalic = False - fontname = None - if name: - allnames = name - for name in allnames.split(','): - name = _simplename(name) - styles = Sysfonts.get(name) - if not styles: - styles = Sysalias.get(name) - if styles: - plainname = styles.get((False, False)) - fontname = styles.get((bold, italic)) - if not fontname and not plainname: - # Neither requested style, nor plain font exists, so - # return a font with the name requested, but an - # arbitrary style. - (style, fontname) = list(styles.items())[0] - # Attempt to style it as requested. This can't - # unbold or unitalicize anything, but it can - # fake bold and/or fake italicize. - if bold and style[0]: - gotbold = True - if italic and style[1]: - gotitalic = True - elif not fontname: - fontname = plainname - elif plainname != fontname: - gotbold = bold - gotitalic = italic - if fontname: - break - - set_bold = set_italic = False - if bold and not gotbold: - set_bold = True - if italic and not gotitalic: - set_italic = True - - return constructor(fontname, size, set_bold, set_italic) - - -def get_fonts(): - """pygame.font.get_fonts() -> list - get a list of system font names - - Returns the list of all found system fonts. Note that - the names of the fonts will be all lowercase with spaces - removed. This is how pygame internally stores the font - names for matching. - """ - if not Sysfonts: - initsysfonts() - return list(Sysfonts) - - -def match_font(name, bold=0, italic=0): - """pygame.font.match_font(name, bold=0, italic=0) -> name - find the filename for the named system font - - This performs the same font search as the SysFont() - function, only it returns the path to the TTF file - that would be loaded. The font name can be a comma - separated list of font names to try. - - If no match is found, None is returned. - """ - if not Sysfonts: - initsysfonts() - - fontname = None - allnames = name - for name in allnames.split(','): - name = _simplename(name) - styles = Sysfonts.get(name) - if not styles: - styles = Sysalias.get(name) - if styles: - while not fontname: - fontname = styles.get((bold, italic)) - if italic: - italic = 0 - elif bold: - bold = 0 - elif not fontname: - fontname = list(styles.values())[0] - if fontname: - break - return fontname diff --git a/venv/lib/python3.7/site-packages/pygame/tests/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/__init__.py deleted file mode 100644 index 48cfdce..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Pygame unit test suite package - -Exports function run() - -A quick way to run the test suite package from the command line -is by importing the go submodule: - -python -m "import pygame.tests" [] - -Command line option --help displays a usage message. Available options -correspond to the pygame.tests.run arguments. - -The xxxx_test submodules of the tests package are unit test suites for -individual parts of Pygame. Each can also be run as a main program. This is -useful if the test, such as cdrom_test, is interactive. - -For Pygame development the test suite can be run from a Pygame distribution -root directory using run_tests.py. Alternately, test/__main__.py can be run -directly. - -""" - -if __name__ == 'pygame.tests': - from pygame.tests.test_utils.run_tests import run -elif __name__ == '__main__': - import os - import sys - pkg_dir = os.path.split(os.path.abspath(__file__))[0] - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) - - if is_pygame_pkg: - import pygame.tests.__main__ - else: - import test.__main__ -else: - from test.test_utils.run_tests import run diff --git a/venv/lib/python3.7/site-packages/pygame/tests/__main__.py b/venv/lib/python3.7/site-packages/pygame/tests/__main__.py deleted file mode 100644 index 51b2e13..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/__main__.py +++ /dev/null @@ -1,133 +0,0 @@ -"""Load and run the Pygame test suite - -python -c "import pygame.tests.go" [] - -or - -python test/go.py [] - -Command line option --help displays a command line usage message. - -run_tests.py in the main distribution directory is an alternative to test.go - -""" - -import sys - -if __name__ == '__main__': - import os - pkg_dir = os.path.split(os.path.abspath(__file__))[0] - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -if is_pygame_pkg: - from pygame.tests.test_utils.run_tests import run_and_exit - from pygame.tests.test_utils.test_runner import opt_parser -else: - from test.test_utils.run_tests import run_and_exit - from test.test_utils.test_runner import opt_parser - -if is_pygame_pkg: - test_pkg_name = "pygame.tests" -else: - test_pkg_name = "test" -program_name = sys.argv[0] -if program_name == '-c': - program_name = 'python -c "import %s.go"' % test_pkg_name - -########################################################################### -# Set additional command line options -# -# Defined in test_runner.py as it shares options, added to here - -opt_parser.set_usage(""" - -Runs all or some of the %(pkg)s.xxxx_test tests. - -$ %(exec)s sprite threads -sd - -Runs the sprite and threads module tests isolated in subprocesses, dumping -all failing tests info in the form of a dict. - -""" % {'pkg': test_pkg_name, 'exec': program_name}) - -opt_parser.add_option ( - "-d", "--dump", action = 'store_true', - help = "dump results as dict ready to eval" ) - -opt_parser.add_option ( - "-F", "--file", - help = "dump results to a file" ) - -opt_parser.add_option ( - "-m", "--multi_thread", metavar = 'THREADS', type = 'int', - help = "run subprocessed tests in x THREADS" ) - -opt_parser.add_option ( - "-t", "--time_out", metavar = 'SECONDS', type = 'int', - help = "kill stalled subprocessed tests after SECONDS" ) - -opt_parser.add_option ( - "-f", "--fake", metavar = "DIR", - help = "run fake tests in run_tests__tests/$DIR" ) - -opt_parser.add_option ( - "-p", "--python", metavar = "PYTHON", - help = "path to python excutable to run subproccesed tests\n" - "default (sys.executable): %s" % sys.executable) - -opt_parser.add_option ( - "-I", "--interactive", action = 'store_true', - help = "include tests requiring user input") - -opt_parser.add_option( - "-S", "--seed", type = 'int', - help = "Randomisation seed" -) - -########################################################################### -# Set run() keyword arguements according to command line arguemnts. -# args will be the test module list, passed as positional argumemts. - -options, args = opt_parser.parse_args() - -kwds = {} -if options.incomplete: - kwds['incomplete'] = True -if options.usesubprocess: - kwds['usesubprocess'] = True -else: - kwds['usesubprocess'] = False -if options.dump: - kwds['dump'] = True -if options.file: - kwds['file'] = options.file -if options.exclude: - kwds['exclude'] = options.exclude -if options.unbuffered: - kwds['unbuffered'] = True -if options.randomize: - kwds['randomize'] = True -if options.seed is not None: - kwds['seed'] = options.seed -if options.multi_thread is not None: - kwds['multi_thread'] = options.multi_thread -if options.time_out is not None: - kwds['time_out'] = options.time_out -if options.fake: - kwds['fake'] = options.fake -if options.python: - kwds['python'] = options.python -if options.interactive: - kwds['interactive'] = True - -########################################################################### -# Run the test suite. -run_and_exit(*args, **kwds) - - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/base_test.py b/venv/lib/python3.7/site-packages/pygame/tests/base_test.py deleted file mode 100644 index 86c9ccb..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/base_test.py +++ /dev/null @@ -1,638 +0,0 @@ -# -*- coding: utf8 -*- - -import sys -import unittest - -import platform -IS_PYPY = 'PyPy' == platform.python_implementation() - -try: - from pygame.tests.test_utils import arrinter -except NameError: - pass -import pygame - - -init_called = quit_called = 0 -def __PYGAMEinit__(): #called automatically by pygame.init() - global init_called - init_called = init_called + 1 - pygame.register_quit(pygame_quit) -def pygame_quit(): - global quit_called - quit_called = quit_called + 1 - - -quit_hook_ran = 0 -def quit_hook(): - global quit_hook_ran - quit_hook_ran = 1 - - -class BaseModuleTest(unittest.TestCase): - - def tearDown(self): - # Clean up after each test method. - pygame.quit() - - def testAutoInit(self): - pygame.init() - pygame.quit() - self.assertEqual(init_called, 1) - self.assertEqual(quit_called, 1) - - def test_get_sdl_byteorder(self): - """Ensure the SDL byte order is valid""" - byte_order = pygame.get_sdl_byteorder() - expected_options = (pygame.LIL_ENDIAN, pygame.BIG_ENDIAN) - - self.assertIn(byte_order, expected_options) - - def test_get_sdl_version(self): - """Ensure the SDL version is valid""" - self.assertEqual(len(pygame.get_sdl_version()), 3) - - class ExporterBase(object): - def __init__(self, shape, typechar, itemsize): - import ctypes - - ndim = len(shape) - self.ndim = ndim - self.shape = tuple(shape) - array_len = 1 - for d in shape: - array_len *= d - self.size = itemsize * array_len - self.parent = ctypes.create_string_buffer(self.size) - self.itemsize = itemsize - strides = [itemsize] * ndim - for i in range(ndim - 1, 0, -1): - strides[i - 1] = strides[i] * shape[i] - self.strides = tuple(strides) - self.data = ctypes.addressof(self.parent), False - if self.itemsize == 1: - byteorder = '|' - elif sys.byteorder == 'big': - byteorder = '>' - else: - byteorder = '<' - self.typestr = byteorder + typechar + str(self.itemsize) - - def assertSame(self, proxy, obj): - self.assertEqual(proxy.length, obj.size) - iface = proxy.__array_interface__ - self.assertEqual(iface['typestr'], obj.typestr) - self.assertEqual(iface['shape'], obj.shape) - self.assertEqual(iface['strides'], obj.strides) - self.assertEqual(iface['data'], obj.data) - - def test_PgObject_GetBuffer_array_interface(self): - from pygame.bufferproxy import BufferProxy - - class Exporter(self.ExporterBase): - def get__array_interface__(self): - return {'version': 3, - 'typestr': self.typestr, - 'shape': self.shape, - 'strides': self.strides, - 'data': self.data} - __array_interface__ = property(get__array_interface__) - # Should be ignored by PgObject_GetBuffer - __array_struct__ = property(lambda self: None) - - _shape = [2, 3, 5, 7, 11] # Some prime numbers - for ndim in range(1, len(_shape)): - o = Exporter(_shape[0:ndim], 'i', 2) - v = BufferProxy(o) - self.assertSame(v, o) - ndim = 2 - shape = _shape[0:ndim] - for typechar in ('i', 'u'): - for itemsize in (1, 2, 4, 8): - o = Exporter(shape, typechar, itemsize) - v = BufferProxy(o) - self.assertSame(v, o) - for itemsize in (4, 8): - o = Exporter(shape, 'f', itemsize) - v = BufferProxy(o) - self.assertSame(v, o) - - # Is the dict received from an exporting object properly released? - # The dict should be freed before PgObject_GetBuffer returns. - # When the BufferProxy v's length property is referenced, v calls - # PgObject_GetBuffer, which in turn references Exporter2 o's - # __array_interface__ property. The Exporter2 instance o returns a - # dict subclass for which it keeps both a regular reference and a - # weak reference. The regular reference should be the only - # remaining reference when PgObject_GetBuffer returns. This is - # verified by first checking the weak reference both before and - # after the regular reference held by o is removed. - - import weakref, gc - - class NoDictError(RuntimeError): - pass - - class WRDict(dict): - """Weak referenceable dict""" - pass - - class Exporter2(Exporter): - def get__array_interface__2(self): - self.d = WRDict(Exporter.get__array_interface__(self)) - self.dict_ref = weakref.ref(self.d) - return self.d - __array_interface__ = property(get__array_interface__2) - def free_dict(self): - self.d = None - def is_dict_alive(self): - try: - return self.dict_ref() is not None - except AttributeError: - raise NoDictError("__array_interface__ is unread") - - o = Exporter2((2, 4), 'u', 4) - v = BufferProxy(o) - self.assertRaises(NoDictError, o.is_dict_alive) - length = v.length - self.assertTrue(o.is_dict_alive()) - o.free_dict() - gc.collect() - self.assertFalse(o.is_dict_alive()) - - def test_GetView_array_struct(self): - from pygame.bufferproxy import BufferProxy - - class Exporter(self.ExporterBase): - def __init__(self, shape, typechar, itemsize): - super(Exporter, self).__init__(shape, typechar, itemsize) - self.view = BufferProxy(self.__dict__) - - def get__array_struct__(self): - return self.view.__array_struct__ - __array_struct__ = property(get__array_struct__) - # Should not cause PgObject_GetBuffer to fail - __array_interface__ = property(lambda self: None) - - _shape = [2, 3, 5, 7, 11] # Some prime numbers - for ndim in range(1, len(_shape)): - o = Exporter(_shape[0:ndim], 'i', 2) - v = BufferProxy(o) - self.assertSame(v, o) - ndim = 2 - shape = _shape[0:ndim] - for typechar in ('i', 'u'): - for itemsize in (1, 2, 4, 8): - o = Exporter(shape, typechar, itemsize) - v = BufferProxy(o) - self.assertSame(v, o) - for itemsize in (4, 8): - o = Exporter(shape, 'f', itemsize) - v = BufferProxy(o) - self.assertSame(v, o) - - # Check returned cobject/capsule reference count - try: - from sys import getrefcount - except ImportError: - # PyPy: no reference counting - pass - else: - o = Exporter(shape, typechar, itemsize) - self.assertEqual(getrefcount(o.__array_struct__), 1) - - if pygame.HAVE_NEWBUF: - from pygame.tests.test_utils import buftools - - def NEWBUF_assertSame(self, proxy, exp): - buftools = self.buftools - Importer = buftools.Importer - self.assertEqual(proxy.length, exp.len) - imp = Importer(proxy, buftools.PyBUF_RECORDS_RO) - self.assertEqual(imp.readonly, exp.readonly) - self.assertEqual(imp.format, exp.format) - self.assertEqual(imp.itemsize, exp.itemsize) - self.assertEqual(imp.ndim, exp.ndim) - self.assertEqual(imp.shape, exp.shape) - self.assertEqual(imp.strides, exp.strides) - self.assertTrue(imp.suboffsets is None) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf(self): - from pygame.bufferproxy import BufferProxy - - Exporter = self.buftools.Exporter - _shape = [2, 3, 5, 7, 11] # Some prime numbers - for ndim in range(1, len(_shape)): - o = Exporter(_shape[0:ndim], '=h') - v = BufferProxy(o) - self.NEWBUF_assertSame(v, o) - ndim = 2 - shape = _shape[0:ndim] - for format in ['b', 'B', '=h', '=H', '=i', '=I', '=q', '=Q', 'f', 'd', - '1h', '=1h', 'x', '1x', '2x', '3x', '4x', '5x', '6x', - '7x', '8x', '9x']: - o = Exporter(shape, format) - v = BufferProxy(o) - self.NEWBUF_assertSame(v, o) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_bad_format(self): - from pygame.bufferproxy import BufferProxy - from pygame.newbuffer import BufferMixin - from ctypes import create_string_buffer, addressof - - buftools = self.buftools - Exporter = buftools.Exporter - Importer = buftools.Importer - PyBUF_FORMAT = buftools.PyBUF_FORMAT - - for format in ['', '=', '1', ' ', '2h', '=2h', - '0x', '11x', '=!', 'h ', ' h', 'hh', '?']: - exp = Exporter((1,), format, itemsize=2) - b = BufferProxy(exp) - self.assertRaises(ValueError, Importer, b, PyBUF_FORMAT) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_PgDict_AsBuffer_PyBUF_flags(self): - from pygame.bufferproxy import BufferProxy - - is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN - fsys, frev = ('<', '>') if is_lil_endian else ('>', '<') - buftools = self.buftools - Importer = buftools.Importer - a = BufferProxy({'typestr': '|u4', - 'shape': (10, 2), - 'data': (9, False)}) # 9? No data accesses. - b = Importer(a, buftools.PyBUF_SIMPLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 4) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, 9) - b = Importer(a, buftools.PyBUF_WRITABLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 4) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, 9) - b = Importer(a, buftools.PyBUF_ND) - self.assertEqual(b.ndim, 2) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 4) - self.assertEqual(b.shape, (10, 2)) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, 9) - a = BufferProxy({'typestr': fsys + 'i2', - 'shape': (5, 10), - 'strides': (24, 2), - 'data': (42, False)}) # 42? No data accesses. - b = Importer(a, buftools.PyBUF_STRIDES) - self.assertEqual(b.ndim, 2) - self.assertTrue(b.format is None) - self.assertEqual(b.len, 100) - self.assertEqual(b.itemsize, 2) - self.assertEqual(b.shape, (5, 10)) - self.assertEqual(b.strides, (24, 2)) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, 42) - b = Importer(a, buftools.PyBUF_FULL_RO) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, '=h') - self.assertEqual(b.len, 100) - self.assertEqual(b.itemsize, 2) - self.assertEqual(b.shape, (5, 10)) - self.assertEqual(b.strides, (24, 2)) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, 42) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_ANY_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_ANY_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG) - a = BufferProxy({'typestr': frev + 'i2', - 'shape': (3, 5, 10), - 'strides': (120, 24, 2), - 'data': (1000000, True)}) # 1000000? No data accesses. - b = Importer(a, buftools.PyBUF_FULL_RO) - self.assertEqual(b.ndim, 3) - self.assertEqual(b.format, frev + 'h') - self.assertEqual(b.len, 300) - self.assertEqual(b.itemsize, 2) - self.assertEqual(b.shape, (3, 5, 10)) - self.assertEqual(b.strides, (120, 24, 2)) - self.assertTrue(b.suboffsets is None) - self.assertTrue(b.readonly) - self.assertEqual(b.buf, 1000000) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FULL) - - @unittest.skipIf(IS_PYPY or (not pygame.HAVE_NEWBUF), 'newbuf with ctypes') - def test_PgObject_AsBuffer_PyBUF_flags(self): - from pygame.bufferproxy import BufferProxy - import ctypes - - is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN - fsys, frev = ('<', '>') if is_lil_endian else ('>', '<') - buftools = self.buftools - Importer = buftools.Importer - e = arrinter.Exporter((10, 2), typekind='f', - itemsize=ctypes.sizeof(ctypes.c_double)) - a = BufferProxy(e) - b = Importer(a, buftools.PyBUF_SIMPLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, e.len) - self.assertEqual(b.itemsize, e.itemsize) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, e.data) - b = Importer(a, buftools.PyBUF_WRITABLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, e.len) - self.assertEqual(b.itemsize, e.itemsize) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, e.data) - b = Importer(a, buftools.PyBUF_ND) - self.assertEqual(b.ndim, e.nd) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, e.itemsize) - self.assertEqual(b.shape, e.shape) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, e.data) - e = arrinter.Exporter((5, 10), typekind='i', itemsize=2, - strides=(24, 2)) - a = BufferProxy(e) - b = Importer(a, buftools.PyBUF_STRIDES) - self.assertEqual(b.ndim, e.nd) - self.assertTrue(b.format is None) - self.assertEqual(b.len, e.len) - self.assertEqual(b.itemsize, e.itemsize) - self.assertEqual(b.shape, e.shape) - self.assertEqual(b.strides, e.strides) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, e.data) - b = Importer(a, buftools.PyBUF_FULL_RO) - self.assertEqual(b.ndim, e.nd) - self.assertEqual(b.format, '=h') - self.assertEqual(b.len, e.len) - self.assertEqual(b.itemsize, e.itemsize) - self.assertEqual(b.shape, e.shape) - self.assertEqual(b.strides, e.strides) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, e.data) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_WRITABLE) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_WRITABLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_ANY_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_ANY_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_CONTIG) - e = arrinter.Exporter((3, 5, 10), typekind='i', itemsize=2, - strides=(120, 24, 2), - flags=arrinter.PAI_ALIGNED) - a = BufferProxy(e) - b = Importer(a, buftools.PyBUF_FULL_RO) - self.assertEqual(b.ndim, e.nd) - self.assertEqual(b.format, frev + 'h') - self.assertEqual(b.len, e.len) - self.assertEqual(b.itemsize, e.itemsize) - self.assertEqual(b.shape, e.shape) - self.assertEqual(b.strides, e.strides) - self.assertTrue(b.suboffsets is None) - self.assertTrue(b.readonly) - self.assertEqual(b.buf, e.data) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FULL) - - def test_PgObject_GetBuffer_exception(self): - # For consistency with surfarray - from pygame.bufferproxy import BufferProxy - - bp = BufferProxy(1) - self.assertRaises(ValueError, getattr, bp, 'length') - - def not_init_assertions(self): - self.assertFalse(pygame.get_init(), "pygame shouldn't be initialized") - self.assertFalse(pygame.display.get_init(), - "display shouldn't be initialized") - - if 'pygame.mixer' in sys.modules: - self.assertFalse(pygame.mixer.get_init(), - "mixer shouldn't be initialized") - - if 'pygame.font' in sys.modules: - self.assertFalse(pygame.font.get_init(), - "init shouldn't be initialized") - - ## !!! TODO : Remove when scrap works for OS X - import platform - if platform.system().startswith('Darwin'): - return - - try: - self.assertRaises(pygame.error, pygame.scrap.get) - except NotImplementedError: - # Scrap is optional. - pass - - # pygame.cdrom - # pygame.joystick - - def init_assertions(self): - self.assertTrue(pygame.get_init()) - self.assertTrue(pygame.display.get_init()) - - if 'pygame.mixer' in sys.modules: - self.assertTrue(pygame.mixer.get_init()) - - if 'pygame.font' in sys.modules: - self.assertTrue(pygame.font.get_init()) - - def test_quit__and_init(self): - # __doc__ (as of 2008-06-25) for pygame.base.quit: - - # pygame.quit(): return None - # uninitialize all pygame modules - - # Make sure everything is not init - self.not_init_assertions() - - # Initiate it - pygame.init() - - # Check - self.init_assertions() - - # Quit - pygame.quit() - - # All modules have quit - self.not_init_assertions() - - def test_register_quit(self): - """Ensure that a registered function is called on quit()""" - self.assertFalse(quit_hook_ran) - - pygame.init() - pygame.register_quit(quit_hook) - pygame.quit() - - self.assertTrue(quit_hook_ran) - - def test_get_error(self): - - # __doc__ (as of 2008-08-02) for pygame.base.get_error: - - # pygame.get_error(): return errorstr - # get the current error message - # - # SDL maintains an internal error message. This message will usually - # be given to you when pygame.error is raised. You will rarely need to - # call this function. - # - - # The first error could be all sorts of nonsense or empty. - e = pygame.get_error() - pygame.set_error("hi") - self.assertEqual(pygame.get_error(), "hi") - pygame.set_error("") - self.assertEqual(pygame.get_error(), "") - - - - def test_set_error(self): - - # The first error could be all sorts of nonsense or empty. - e = pygame.get_error() - pygame.set_error("hi") - self.assertEqual(pygame.get_error(), "hi") - pygame.set_error("") - self.assertEqual(pygame.get_error(), "") - - def test_unicode_error(self): - if sys.version_info.major > 2: - pygame.set_error(u'你好') - self.assertEqual(u'你好', pygame.get_error()) - else: - # no unicode objects for now - pygame.set_error(u'你好') - encstr = u'你好'.encode('utf8') - self.assertEqual(encstr, pygame.get_error()) - - def test_init(self): - - # __doc__ (as of 2008-08-02) for pygame.base.init: - - # pygame.init(): return (numpass, numfail) - # initialize all imported pygame modules - # - # Initialize all imported Pygame modules. No exceptions will be raised - # if a module fails, but the total number if successful and failed - # inits will be returned as a tuple. You can always initialize - # individual modules manually, but pygame.init is a convenient way to - # get everything started. The init() functions for individual modules - # will raise exceptions when they fail. - # - # You may want to initalise the different modules seperately to speed - # up your program or to not use things your game does not. - # - # It is safe to call this init() more than once: repeated calls will - # have no effect. This is true even if you have pygame.quit() all the - # modules. - # - - - - # Make sure everything is not init - self.not_init_assertions() - - # Initiate it - pygame.init() - - # Check - self.init_assertions() - - # Quit - pygame.quit() - - # All modules have quit - self.not_init_assertions() - - def test_get_init(self): - # Test if get_init() gets the init state. - self.assertFalse(pygame.get_init()) - - def test_get_init__after_init(self): - # Test if get_init() gets the init state after pygame.init() called. - pygame.init() - - self.assertTrue(pygame.get_init()) - - def test_get_init__after_quit(self): - # Test if get_init() gets the init state after pygame.quit() called. - pygame.init() - pygame.quit() - - self.assertFalse(pygame.get_init()) - - def todo_test_segfault(self): - - # __doc__ (as of 2008-08-02) for pygame.base.segfault: - - # crash - - self.fail() - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/blit_test.py b/venv/lib/python3.7/site-packages/pygame/tests/blit_test.py deleted file mode 100644 index f551a68..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/blit_test.py +++ /dev/null @@ -1,159 +0,0 @@ -import unittest - -import pygame -from pygame.locals import * - -class BlitTest( unittest.TestCase ): - def test_SRCALPHA( self ): - """ SRCALPHA tests. - """ - #blend(s, 0, d) = d - s = pygame.Surface((1,1), SRCALPHA, 32) - s.fill((255, 255,255, 0)) - - d = pygame.Surface((1,1), SRCALPHA, 32) - d.fill((0, 0,255, 255)) - - s.blit(d, (0,0)) - self.assertEqual(s.get_at((0,0)), d.get_at((0,0)) ) - - #blend(s, 255, d) = s - s = pygame.Surface((1,1), SRCALPHA, 32) - s.fill((123, 0, 0, 255)) - s1 = pygame.Surface((1,1), SRCALPHA, 32) - s1.fill((123, 0, 0, 255)) - d = pygame.Surface((1,1), SRCALPHA, 32) - d.fill((10, 0,0, 0)) - s.blit(d, (0,0)) - self.assertEqual(s.get_at((0,0)), s1.get_at((0,0)) ) - - #TODO: these should be true too. - #blend(0, sA, 0) = 0 - #blend(255, sA, 255) = 255 - #blend(s, sA, d) <= 255 - - def test_BLEND( self ): - """ BLEND_ tests. - """ - - #test that it doesn't overflow, and that it is saturated. - s = pygame.Surface((1,1), SRCALPHA, 32) - s.fill((255, 255,255, 0)) - - d = pygame.Surface((1,1), SRCALPHA, 32) - d.fill((0, 0,255, 255)) - - s.blit(d, (0,0), None, BLEND_ADD) - - #print "d %s" % (d.get_at((0,0)),) - #print s.get_at((0,0)) - #self.assertEqual(s.get_at((0,0))[2], 255 ) - #self.assertEqual(s.get_at((0,0))[3], 0 ) - - - - s.blit(d, (0,0), None, BLEND_RGBA_ADD) - #print s.get_at((0,0)) - self.assertEqual(s.get_at((0,0))[3], 255 ) - - - # test adding works. - s.fill((20, 255,255, 0)) - d.fill((10, 0,255, 255)) - s.blit(d, (0,0), None, BLEND_ADD) - self.assertEqual(s.get_at((0,0))[2], 255 ) - - # test subbing works. - s.fill((20, 255,255, 0)) - d.fill((10, 0,255, 255)) - s.blit(d, (0,0), None, BLEND_SUB) - self.assertEqual(s.get_at((0,0))[0], 10 ) - - # no overflow in sub blend. - s.fill((20, 255,255, 0)) - d.fill((30, 0,255, 255)) - s.blit(d, (0,0), None, BLEND_SUB) - self.assertEqual(s.get_at((0,0))[0], 0 ) - - - def make_blit_list(self, num_surfs): - - blit_list = [] - for i in range(num_surfs): - dest = (i * 10, 0) - surf = pygame.Surface((10, 10), SRCALPHA, 32) - color = (i * 1, i * 1, i * 1) - surf.fill(color) - blit_list.append((surf, dest)) - return blit_list - - def test_blits(self): - - NUM_SURFS = 255 - PRINT_TIMING = 0 - dst = pygame.Surface((NUM_SURFS * 10, 10), SRCALPHA, 32) - dst.fill((230, 230, 230)) - blit_list = self.make_blit_list(NUM_SURFS) - - def blits(blit_list): - for surface, dest in blit_list: - dst.blit(surface, dest) - - from time import time - t0 = time() - results = blits(blit_list) - t1 = time() - if PRINT_TIMING: - print("python blits: %s" % (t1-t0)) - - dst.fill((230, 230, 230)) - t0 = time() - results = dst.blits(blit_list) - t1 = time() - if PRINT_TIMING: - print("Surface.blits :%s" % (t1-t0)) - - - # check if we blit all the different colors in the correct spots. - for i in range(NUM_SURFS): - color = (i * 1, i * 1, i * 1) - self.assertEqual(dst.get_at((i * 10, 0)), color) - self.assertEqual(dst.get_at(((i * 10) + 5, 5)), color) - - self.assertEqual(len(results), NUM_SURFS) - - t0 = time() - results = dst.blits(blit_list, doreturn = 0) - t1 = time() - if PRINT_TIMING: - print("Surface.blits doreturn=0: %s" % (t1-t0)) - self.assertEqual(results, None) - - - t0 = time() - results = dst.blits(((surf, dest) for surf, dest in blit_list)) - t1 = time() - if PRINT_TIMING: - print("Surface.blits generator: %s" % (t1-t0)) - - - def test_blits_not_sequence(self): - dst = pygame.Surface((100, 10), SRCALPHA, 32) - self.assertRaises(ValueError, dst.blits, None) - - def test_blits_wrong_length(self): - dst = pygame.Surface((100, 10), SRCALPHA, 32) - self.assertRaises(ValueError, dst.blits, [pygame.Surface((10, 10), SRCALPHA, 32)]) - - def test_blits_bad_surf_args(self): - dst = pygame.Surface((100, 10), SRCALPHA, 32) - self.assertRaises(TypeError, dst.blits, [(None, None)]) - - def test_blits_bad_dest(self): - dst = pygame.Surface((100, 10), SRCALPHA, 32) - self.assertRaises(TypeError, dst.blits, [(pygame.Surface((10, 10), SRCALPHA, 32), None)]) - - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/bufferproxy_test.py b/venv/lib/python3.7/site-packages/pygame/tests/bufferproxy_test.py deleted file mode 100644 index 7e1bcca..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/bufferproxy_test.py +++ /dev/null @@ -1,483 +0,0 @@ -import sys -import re -import weakref -import gc -import ctypes -import unittest - - -import pygame -from pygame.bufferproxy import BufferProxy -from pygame.compat import as_bytes - -try: - BufferError -except NameError: - from pygame import BufferError - - -class BufferProxyTest(unittest.TestCase): - view_keywords = {'shape': (5, 4, 3), - 'typestr': '|u1', - 'data': (0, True), - 'strides': (4, 20, 1)} - - def test_module_name(self): - self.assertEqual(pygame.bufferproxy.__name__, - "pygame.bufferproxy") - - def test_class_name(self): - self.assertEqual(BufferProxy.__name__, "BufferProxy") - - def test___array_struct___property(self): - kwds = self.view_keywords - v = BufferProxy(kwds) - d = pygame.get_array_interface(v) - self.assertEqual(len(d), 5) - self.assertEqual(d['version'], 3) - self.assertEqual(d['shape'], kwds['shape']) - self.assertEqual(d['typestr'], kwds['typestr']) - self.assertEqual(d['data'], kwds['data']) - self.assertEqual(d['strides'], kwds['strides']) - - def test___array_interface___property(self): - kwds = self.view_keywords - v = BufferProxy(kwds) - d = v.__array_interface__ - self.assertEqual(len(d), 5) - self.assertEqual(d['version'], 3) - self.assertEqual(d['shape'], kwds['shape']) - self.assertEqual(d['typestr'], kwds['typestr']) - self.assertEqual(d['data'], kwds['data']) - self.assertEqual(d['strides'], kwds['strides']) - - def test_parent_property(self): - kwds = dict(self.view_keywords) - p = [] - kwds['parent'] = p - v = BufferProxy(kwds) - - self.assertIs(v.parent, p) - - def test_before(self): - def callback(parent): - success.append(parent is p) - - class MyException(Exception): - pass - - def raise_exception(parent): - raise MyException("Just a test.") - - kwds = dict(self.view_keywords) - p = [] - kwds['parent'] = p - - # For array interface - success = [] - kwds['before'] = callback - v = BufferProxy(kwds) - self.assertEqual(len(success), 0) - d = v.__array_interface__ - self.assertEqual(len(success), 1) - self.assertTrue(success[0]) - d = v.__array_interface__ - self.assertEqual(len(success), 1) - d = v = None - gc.collect() - self.assertEqual(len(success), 1) - - # For array struct - success = [] - kwds['before'] = callback - v = BufferProxy(kwds) - self.assertEqual(len(success), 0) - c = v.__array_struct__ - self.assertEqual(len(success), 1) - self.assertTrue(success[0]) - c = v.__array_struct__ - self.assertEqual(len(success), 1) - c = v = None - gc.collect() - self.assertEqual(len(success), 1) - - # Callback raises an exception - kwds['before'] = raise_exception - v = BufferProxy(kwds) - self.assertRaises(MyException, lambda : v.__array_struct__) - - def test_after(self): - def callback(parent): - success.append(parent is p) - - kwds = dict(self.view_keywords) - p = [] - kwds['parent'] = p - - # For array interface - success = [] - kwds['after'] = callback - v = BufferProxy(kwds) - self.assertEqual(len(success), 0) - d = v.__array_interface__ - self.assertEqual(len(success), 0) - d = v.__array_interface__ - self.assertEqual(len(success), 0) - d = v = None - gc.collect() - self.assertEqual(len(success), 1) - self.assertTrue(success[0]) - - # For array struct - success = [] - kwds['after'] = callback - v = BufferProxy(kwds) - self.assertEqual(len(success), 0) - c = v.__array_struct__ - self.assertEqual(len(success), 0) - c = v.__array_struct__ - self.assertEqual(len(success), 0) - c = v = None - gc.collect() - self.assertEqual(len(success), 1) - self.assertTrue(success[0]) - - def test_attribute(self): - v = BufferProxy(self.view_keywords) - self.assertRaises(AttributeError, getattr, v, 'undefined') - v.undefined = 12; - self.assertEqual(v.undefined, 12) - del v.undefined - self.assertRaises(AttributeError, getattr, v, 'undefined') - - def test_weakref(self): - v = BufferProxy(self.view_keywords) - weak_v = weakref.ref(v) - - self.assertIs(weak_v(), v) - - v = None - gc.collect() - - self.assertIsNone(weak_v()) - - def test_gc(self): - """refcount agnostic check that contained objects are freed""" - def before_callback(parent): - return r[0] - def after_callback(parent): - return r[1] - class Obj(object): - pass - p = Obj() - a = Obj() - r = [Obj(), Obj()] - weak_p = weakref.ref(p) - weak_a = weakref.ref(a) - weak_r0 = weakref.ref(r[0]) - weak_r1 = weakref.ref(r[1]) - weak_before = weakref.ref(before_callback) - weak_after = weakref.ref(after_callback) - kwds = dict(self.view_keywords) - kwds['parent'] = p - kwds['before'] = before_callback - kwds['after'] = after_callback - v = BufferProxy(kwds) - v.some_attribute = a - weak_v = weakref.ref(v) - kwds = p = a = before_callback = after_callback = None - gc.collect() - self.assertTrue(weak_p() is not None) - self.assertTrue(weak_a() is not None) - self.assertTrue(weak_before() is not None) - self.assertTrue(weak_after() is not None) - v = None - [gc.collect() for x in range(4)] - self.assertTrue(weak_v() is None) - self.assertTrue(weak_p() is None) - self.assertTrue(weak_a() is None) - self.assertTrue(weak_before() is None) - self.assertTrue(weak_after() is None) - self.assertTrue(weak_r0() is not None) - self.assertTrue(weak_r1() is not None) - r = None - gc.collect() - self.assertTrue(weak_r0() is None) - self.assertTrue(weak_r1() is None) - - # Cycle removal - kwds = dict(self.view_keywords) - kwds['parent'] = [] - v = BufferProxy(kwds) - v.some_attribute = v - tracked = True - for o in gc.get_objects(): - if o is v: - break - else: - tracked = False - self.assertTrue(tracked) - kwds['parent'].append(v) - kwds = None - gc.collect() - n1 = len(gc.garbage) - v = None - gc.collect() - n2 = len(gc.garbage) - self.assertEqual(n2, n1) - - def test_c_api(self): - api = pygame.bufferproxy._PYGAME_C_API - api_type = type(pygame.base._PYGAME_C_API) - - self.assertIsInstance(api, api_type) - - def test_repr(self): - v = BufferProxy(self.view_keywords) - cname = BufferProxy.__name__ - oname, ovalue = re.findall(r"<([^)]+)\(([^)]+)\)>", repr(v))[0] - self.assertEqual(oname, cname) - self.assertEqual(v.length, int(ovalue)) - - def test_subclassing(self): - class MyBufferProxy(BufferProxy): - def __repr__(self): - return "*%s*" % (BufferProxy.__repr__(self),) - kwds = dict(self.view_keywords) - kwds['parent'] = 0 - v = MyBufferProxy(kwds) - self.assertEqual(v.parent, 0) - r = repr(v) - self.assertEqual(r[:2], '*<') - self.assertEqual(r[-2:], '>*') - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def NEWBUF_test_newbuf(self): - from ctypes import string_at - - from pygame.tests.test_utils import buftools - Exporter = buftools.Exporter - Importer = buftools.Importer - exp = Exporter((10,), 'B', readonly=True) - b = BufferProxy(exp) - self.assertEqual(b.length, exp.len) - self.assertEqual(b.raw, string_at(exp.buf, exp.len)) - d = b.__array_interface__ - try: - self.assertEqual(d['typestr'], '|u1') - self.assertEqual(d['shape'], exp.shape) - self.assertEqual(d['strides'], exp.strides) - self.assertEqual(d['data'], (exp.buf, True)) - finally: - d = None - exp = Exporter((3,), '=h') - b = BufferProxy(exp) - self.assertEqual(b.length, exp.len) - self.assertEqual(b.raw, string_at(exp.buf, exp.len)) - d = b.__array_interface__ - try: - lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN - f = '{}i{}'.format('<' if lil_endian else '>', exp.itemsize) - self.assertEqual(d['typestr'], f) - self.assertEqual(d['shape'], exp.shape) - self.assertEqual(d['strides'], exp.strides) - self.assertEqual(d['data'], (exp.buf, False)) - finally: - d = None - - exp = Exporter((10, 2), '=i') - b = BufferProxy(exp) - imp = Importer(b, buftools.PyBUF_RECORDS) - self.assertTrue(imp.obj is b) - self.assertEqual(imp.buf, exp.buf) - self.assertEqual(imp.ndim, exp.ndim) - self.assertEqual(imp.format, exp.format) - self.assertEqual(imp.readonly, exp.readonly) - self.assertEqual(imp.itemsize, exp.itemsize) - self.assertEqual(imp.len, exp.len) - self.assertEqual(imp.shape, exp.shape) - self.assertEqual(imp.strides, exp.strides) - self.assertTrue(imp.suboffsets is None) - - d = {'typestr': '|u1', - 'shape': (10,), - 'strides': (1,), - 'data': (9, True)} # 9? Will not reading the data anyway. - b = BufferProxy(d) - imp = Importer(b, buftools.PyBUF_SIMPLE) - self.assertTrue(imp.obj is b) - self.assertEqual(imp.buf, 9) - self.assertEqual(imp.len, 10) - self.assertEqual(imp.format, None) - self.assertEqual(imp.itemsize, 1) - self.assertEqual(imp.ndim, 0) - self.assertTrue(imp.readonly) - self.assertTrue(imp.shape is None) - self.assertTrue(imp.strides is None) - self.assertTrue(imp.suboffsets is None) - - try: - pygame.bufferproxy.get_segcount - except AttributeError: - pass - else: - def test_oldbuf_arg(self): - self.OLDBUF_test_oldbuf_arg() - - def OLDBUF_test_oldbuf_arg(self): - from pygame.bufferproxy import (get_segcount, get_read_buffer, - get_write_buffer) - - content = as_bytes('\x01\x00\x00\x02') * 12 - memory = ctypes.create_string_buffer(content) - memaddr = ctypes.addressof(memory) - def raise_exception(o): - raise ValueError("An exception") - - bf = BufferProxy({'shape': (len(content),), - 'typestr': '|u1', - 'data': (memaddr, False), - 'strides': (1,)}) - seglen, segaddr = get_read_buffer(bf, 0) - self.assertEqual(segaddr, 0) - self.assertEqual(seglen, 0) - seglen, segaddr = get_write_buffer(bf, 0) - self.assertEqual(segaddr, 0) - self.assertEqual(seglen, 0) - segcount, buflen = get_segcount(bf) - self.assertEqual(segcount, 1) - self.assertEqual(buflen, len(content)) - seglen, segaddr = get_read_buffer(bf, 0) - self.assertEqual(segaddr, memaddr) - self.assertEqual(seglen, len(content)) - seglen, segaddr = get_write_buffer(bf, 0) - self.assertEqual(segaddr, memaddr) - self.assertEqual(seglen, len(content)) - - bf = BufferProxy({'shape': (len(content),), - 'typestr': '|u1', - 'data': (memaddr, True), - 'strides': (1,)}) - segcount, buflen = get_segcount(bf) - self.assertEqual(segcount, 1) - self.assertEqual(buflen, len(content)) - seglen, segaddr = get_read_buffer(bf, 0) - self.assertEqual(segaddr, memaddr) - self.assertEqual(seglen, len(content)) - self.assertRaises(ValueError, get_write_buffer, bf, 0) - - bf = BufferProxy({'shape': (len(content),), - 'typestr': '|u1', - 'data': (memaddr, True), - 'strides': (1,), - 'before': raise_exception}) - segcount, buflen = get_segcount(bf) - self.assertEqual(segcount, 0) - self.assertEqual(buflen, 0) - - bf = BufferProxy({'shape': (3, 4), - 'typestr': '|u4', - 'data': (memaddr, True), - 'strides': (12, 4)}) - segcount, buflen = get_segcount(bf) - self.assertEqual(segcount, 3 * 4) - self.assertEqual(buflen, 3 * 4 * 4) - for i in range(0, 4): - seglen, segaddr = get_read_buffer(bf, i) - self.assertEqual(segaddr, memaddr + i * 4) - self.assertEqual(seglen, 4) - - -class BufferProxyLegacyTest(unittest.TestCase): - content = as_bytes('\x01\x00\x00\x02') * 12 - buffer = ctypes.create_string_buffer(content) - data = (ctypes.addressof(buffer), True) - - def test_length(self): - - # __doc__ (as of 2008-08-02) for pygame.bufferproxy.BufferProxy.length: - - # The size of the buffer data in bytes. - bf = BufferProxy({'shape': (3, 4), - 'typestr': '|u4', - 'data': self.data, - 'strides': (12, 4)}) - self.assertEqual(bf.length, len(self.content)) - bf = BufferProxy({'shape': (3, 3), - 'typestr': '|u4', - 'data': self.data, - 'strides': (12, 4)}) - self.assertEqual(bf.length, 3*3*4) - - def test_raw(self): - - # __doc__ (as of 2008-08-02) for pygame.bufferproxy.BufferProxy.raw: - - # The raw buffer data as string. The string may contain NUL bytes. - - bf = BufferProxy({'shape': (len(self.content),), - 'typestr': '|u1', - 'data': self.data}) - self.assertEqual(bf.raw, self.content) - bf = BufferProxy({'shape': (3, 4), - 'typestr': '|u4', - 'data': self.data, - 'strides': (4, 12)}) - self.assertEqual(bf.raw, self.content) - bf = BufferProxy({'shape': (3, 4), - 'typestr': '|u1', - 'data': self.data, - 'strides': (16, 4)}) - self.assertRaises(ValueError, getattr, bf, 'raw') - - def test_write(self): - - # __doc__ (as of 2008-08-02) for pygame.bufferproxy.BufferProxy.write: - - # B.write (bufferproxy, buffer, offset) -> None - # - # Writes raw data to the bufferproxy. - # - # Writes the raw data from buffer to the BufferProxy object, starting - # at the specified offset within the BufferProxy. - # If the length of the passed buffer exceeds the length of the - # BufferProxy (reduced by the offset), an IndexError will be raised. - from ctypes import c_byte, sizeof, addressof, string_at, memset - - nullbyte = '\x00'.encode('latin_1') - Buf = c_byte * 10 - data_buf = Buf(*range(1, 3 * sizeof(Buf) + 1, 3)) - data = string_at(data_buf, sizeof(data_buf)) - buf = Buf() - bp = BufferProxy({'typestr': '|u1', - 'shape': (sizeof(buf),), - 'data': (addressof(buf), False)}) - try: - self.assertEqual(bp.raw, nullbyte * sizeof(Buf)) - bp.write(data) - self.assertEqual(bp.raw, data) - memset(buf, 0, sizeof(buf)) - bp.write(data[:3], 2) - raw = bp.raw - self.assertEqual(raw[:2], nullbyte * 2) - self.assertEqual(raw[2:5], data[:3]) - self.assertEqual(raw[5:], nullbyte * (sizeof(Buf) - 5)) - bp.write(data[:3], bp.length - 3) - raw = bp.raw - self.assertEqual(raw[-3:], data[:3]) - self.assertRaises(IndexError, bp.write, data, 1) - self.assertRaises(IndexError, bp.write, data[:5], -1) - self.assertRaises(IndexError, bp.write, data[:5], bp.length) - self.assertRaises(TypeError, bp.write, 12) - bp = BufferProxy({'typestr': '|u1', - 'shape': (sizeof(buf),), - 'data': (addressof(buf), True)}) - self.assertRaises(pygame.BufferError, bp.write, '123'.encode('latin_1')) - finally: - # Make sure bp is garbage collected before buf - bp = None - gc.collect() - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/camera_test.py b/venv/lib/python3.7/site-packages/pygame/tests/camera_test.py deleted file mode 100644 index 8dfb45a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/camera_test.py +++ /dev/null @@ -1,9 +0,0 @@ -import unittest -import math - -import pygame -from pygame.compat import long_ - - -class CameraModuleTest(unittest.TestCase): - pass diff --git a/venv/lib/python3.7/site-packages/pygame/tests/cdrom_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/cdrom_tags.py deleted file mode 100644 index 6ec1b19..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/cdrom_tags.py +++ /dev/null @@ -1 +0,0 @@ -__tags__ = ['interactive', 'SDL2_ignore'] diff --git a/venv/lib/python3.7/site-packages/pygame/tests/cdrom_test.py b/venv/lib/python3.7/site-packages/pygame/tests/cdrom_test.py deleted file mode 100644 index af0426b..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/cdrom_test.py +++ /dev/null @@ -1,318 +0,0 @@ -import unittest -from pygame.tests.test_utils import question, prompt - -import pygame - - -pygame.cdrom.init() -# The number of CD drives available for testing. -CD_DRIVE_COUNT = pygame.cdrom.get_count() -pygame.cdrom.quit() - - -class CDROMModuleTest(unittest.TestCase): - def setUp(self): - pygame.cdrom.init() - - def tearDown(self): - pygame.cdrom.quit() - - def todo_test_CD(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD: - - # pygame.cdrom.CD(id): return CD - # class to manage a cdrom drive - # - # You can create a CD object for each cdrom on the system. Use - # pygame.cdrom.get_count() to determine how many drives actually - # exist. The id argument is an integer of the drive, starting at zero. - # - # The CD object is not initialized, you can only call CD.get_id() and - # CD.get_name() on an uninitialized drive. - # - # It is safe to create multiple CD objects for the same drive, they - # will all cooperate normally. - # - - self.fail() - - def test_get_count(self): - """Ensure the correct number of CD drives can be detected.""" - count = pygame.cdrom.get_count() - response = question('Is the correct number of CD drives on this ' - 'system [{}]?'.format(count)) - - self.assertTrue(response) - - def test_get_init(self): - """Ensure the initialization state can be retrieved.""" - self.assertTrue(pygame.cdrom.get_init()) - - def test_init(self): - """Ensure module still initialized after multiple init() calls.""" - pygame.cdrom.init() - pygame.cdrom.init() - - self.assertTrue(pygame.cdrom.get_init()) - - def test_quit(self): - """Ensure module not initialized after quit() called.""" - pygame.cdrom.quit() - - self.assertFalse(pygame.cdrom.get_init()) - - def test_quit__multiple(self): - """Ensure module still not initialized after multiple quit() calls.""" - pygame.cdrom.quit() - pygame.cdrom.quit() - - self.assertFalse(pygame.cdrom.get_init()) - - -@unittest.skipIf(0 == CD_DRIVE_COUNT, "No CD drives detected") -class CDTypeTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - pygame.cdrom.init() - - cls._cd_id = 0 # Only testing drive 0 for now. Expand in the future. - cls._cd = pygame.cdrom.CD(cls._cd_id) - - @classmethod - def tearDownClass(cls): - pygame.cdrom.quit() - - def setUp(self): - self._cd.init() - - def tearDown(self): - self._cd.quit() - - def test_eject(self): - """Ensure CD drive opens/ejects.""" - self._cd.eject() - response = question('Did the CD eject?') - - self.assertTrue(response) - - prompt("Please close the CD drive") - - def test_get_name(self): - """Ensure correct name for CD drive.""" - cd_name = self._cd.get_name() - response = question('Is the correct name for the CD drive [{}]?' - ''.format(cd_name)) - - self.assertTrue(response) - - def todo_test_get_all(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_all: - - # CD.get_all(): return [(audio, start, end, lenth), ...] - # get all track information - # - # Return a list with information for every track on the cdrom. The - # information consists of a tuple with four values. The audio value is - # True if the track contains audio data. The start, end, and length - # values are floating point numbers in seconds. Start and end - # represent absolute times on the entire disc. - # - - self.fail() - - def todo_test_get_busy(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_busy: - - # CD.get_busy(): return bool - # true if the drive is playing audio - # - # Returns True if the drive busy playing back audio. - - self.fail() - - def todo_test_get_current(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_current: - - # CD.get_current(): return track, seconds - # the current audio playback position - # - # Returns both the current track and time of that track. This method - # works when the drive is either playing or paused. - # - # Note, track 0 is the first track on the CD. Track numbers start at zero. - - self.fail() - - def test_get_empty(self): - """Ensure correct name for CD drive.""" - prompt("Please ensure the CD drive is closed") - is_empty = self._cd.get_empty() - response = question('Is the CD drive empty?') - - self.assertEqual(is_empty, response) - - def test_get_id(self): - """Ensure the drive id/index is correct.""" - cd_id = self._cd.get_id() - - self.assertEqual(self._cd_id, cd_id) - - def test_get_init(self): - """Ensure the initialization state can be retrieved.""" - self.assertTrue(self._cd.get_init()) - - def todo_test_get_numtracks(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_numtracks: - - # CD.get_numtracks(): return count - # the number of tracks on the cdrom - # - # Return the number of tracks on the cdrom in the drive. This will - # return zero of the drive is empty or has no tracks. - # - - self.fail() - - def todo_test_get_paused(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_paused: - - # CD.get_paused(): return bool - # true if the drive is paused - # - # Returns True if the drive is currently paused. - - self.fail() - - def todo_test_get_track_audio(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_audio: - - # CD.get_track_audio(track): return bool - # true if the cdrom track has audio data - # - # Determine if a track on a cdrom contains audio data. You can also - # call CD.num_tracks() and CD.get_all() to determine more information - # about the cdrom. - # - # Note, track 0 is the first track on the CD. Track numbers start at zero. - - self.fail() - - def todo_test_get_track_length(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_length: - - # CD.get_track_length(track): return seconds - # length of a cdrom track - # - # Return a floating point value in seconds of the length of the cdrom track. - # Note, track 0 is the first track on the CD. Track numbers start at zero. - - self.fail() - - def todo_test_get_track_start(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.get_track_start: - - # CD.get_track_start(track): return seconds - # start time of a cdrom track - # - # Return the absolute time in seconds where at start of the cdrom track. - # Note, track 0 is the first track on the CD. Track numbers start at zero. - - self.fail() - - def test_init(self): - """Ensure CD drive still initialized after multiple init() calls.""" - self._cd.init() - self._cd.init() - - self.assertTrue(self._cd.get_init()) - - def todo_test_pause(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.pause: - - # CD.pause(): return None - # temporarily stop audio playback - # - # Temporarily stop audio playback on the CD. The playback can be - # resumed at the same point with the CD.resume() method. If the CD is - # not playing this method does nothing. - # - # Note, track 0 is the first track on the CD. Track numbers start at zero. - - self.fail() - - def todo_test_play(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.play: - - # CD.init(): return None - # initialize a cdrom drive for use - # - # Playback audio from an audio cdrom in the drive. Besides the track - # number argument, you can also pass a starting and ending time for - # playback. The start and end time are in seconds, and can limit the - # section of an audio track played. - # - # If you pass a start time but no end, the audio will play to the end - # of the track. If you pass a start time and 'None' for the end time, - # the audio will play to the end of the entire disc. - # - # See the CD.get_numtracks() and CD.get_track_audio() to find tracks to playback. - # Note, track 0 is the first track on the CD. Track numbers start at zero. - - self.fail() - - def test_quit(self): - """Ensure CD drive not initialized after quit() called.""" - self._cd.quit() - - self.assertFalse(self._cd.get_init()) - - def test_quit__multiple(self): - """Ensure CD drive still not initialized after multiple quit() calls. - """ - self._cd.quit() - self._cd.quit() - - self.assertFalse(self._cd.get_init()) - - def todo_test_resume(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.resume: - - # CD.resume(): return None - # unpause audio playback - # - # Unpause a paused CD. If the CD is not paused or already playing, - # this method does nothing. - # - - self.fail() - - def todo_test_stop(self): - - # __doc__ (as of 2008-08-02) for pygame.cdrom.CD.stop: - - # CD.stop(): return None - # stop audio playback - # - # Stops playback of audio from the cdrom. This will also lose the - # current playback position. This method does nothing if the drive - # isn't already playing audio. - # - - self.fail() - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/color_test.py b/venv/lib/python3.7/site-packages/pygame/tests/color_test.py deleted file mode 100644 index 992d9d4..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/color_test.py +++ /dev/null @@ -1,1027 +0,0 @@ -import unittest -import math -import operator -import platform - -import pygame -from pygame.compat import long_ - - -IS_PYPY = 'PyPy' == platform.python_implementation() -################################### CONSTANTS ################################## - -rgba_vals = [0, 1, 62, 63, 126, 127, 255] - -rgba_combinations = [ - (r,g,b,a) for r in rgba_vals - for g in rgba_vals - for b in rgba_vals - for a in rgba_vals - ] - -################################################################################ - - -def rgba_combos_Color_generator(): - for rgba in rgba_combinations: - yield pygame.Color(*rgba) - - -# Python gamma correct -def gamma_correct(rgba_0_255, gamma): - corrected = round(255.0 * math.pow(rgba_0_255 / 255.0, gamma)) - return max(min(int(corrected), 255), 0) - - -################################################################################ - -# TODO: add tests for -# correct_gamma() -- test against statically defined verified correct values -# coerce () -- ?? - -def _assignr(x, y): - x.r = y - - -def _assigng(x, y): - x.g = y - - -def _assignb(x, y): - x.b = y - - -def _assigna(x, y): - x.a = y - - -def _assign_item(x, p, y): - x[p] = y - - -class ColorTypeTest (unittest.TestCase): - def test_new(self): - c = pygame.Color.__new__(pygame.Color) - self.assertEqual(c, pygame.Color(0, 0, 0, 255)) - self.assertEqual(len(c), 4) - - def test_init(self): - c = pygame.Color(10, 20, 30, 200) - self.assertEqual(c, (10, 20, 30, 200)) - c.set_length(3) - self.assertEqual(len(c), 3) - c.__init__(100, 110, 120, 128) - self.assertEqual(len(c), 4) - self.assertEqual(c, (100, 110, 120, 128)) - - def test_invalid_html_hex_codes(self): - # This was a problem with the way 2 digit hex numbers were - # calculated. The test_hex_digits test is related to the fix. - Color = pygame.color.Color - self.assertRaises(ValueError, lambda: Color('# f000000')) - self.assertRaises(ValueError, lambda: Color('#f 000000')) - self.assertRaises(ValueError, lambda: Color('#-f000000')) - - def test_hex_digits(self): - # This is an implementation specific test. - # Two digit hex numbers are calculated using table lookups - # for the upper and lower digits. - Color = pygame.color.Color - self.assertEqual(Color('#00000000').r, 0x00) - self.assertEqual(Color('#10000000').r, 0x10) - self.assertEqual(Color('#20000000').r, 0x20) - self.assertEqual(Color('#30000000').r, 0x30) - self.assertEqual(Color('#40000000').r, 0x40) - self.assertEqual(Color('#50000000').r, 0x50) - self.assertEqual(Color('#60000000').r, 0x60) - self.assertEqual(Color('#70000000').r, 0x70) - self.assertEqual(Color('#80000000').r, 0x80) - self.assertEqual(Color('#90000000').r, 0x90) - self.assertEqual(Color('#A0000000').r, 0xA0) - self.assertEqual(Color('#B0000000').r, 0xB0) - self.assertEqual(Color('#C0000000').r, 0xC0) - self.assertEqual(Color('#D0000000').r, 0xD0) - self.assertEqual(Color('#E0000000').r, 0xE0) - self.assertEqual(Color('#F0000000').r, 0xF0) - self.assertEqual(Color('#01000000').r, 0x01) - self.assertEqual(Color('#02000000').r, 0x02) - self.assertEqual(Color('#03000000').r, 0x03) - self.assertEqual(Color('#04000000').r, 0x04) - self.assertEqual(Color('#05000000').r, 0x05) - self.assertEqual(Color('#06000000').r, 0x06) - self.assertEqual(Color('#07000000').r, 0x07) - self.assertEqual(Color('#08000000').r, 0x08) - self.assertEqual(Color('#09000000').r, 0x09) - self.assertEqual(Color('#0A000000').r, 0x0A) - self.assertEqual(Color('#0B000000').r, 0x0B) - self.assertEqual(Color('#0C000000').r, 0x0C) - self.assertEqual(Color('#0D000000').r, 0x0D) - self.assertEqual(Color('#0E000000').r, 0x0E) - self.assertEqual(Color('#0F000000').r, 0x0F) - - def test_comparison(self): - Color = pygame.color.Color - - # Check valid comparisons - self.assertTrue(Color(255, 0, 0, 0) == Color(255, 0, 0, 0)) - self.assertTrue(Color(0, 255, 0, 0) == Color(0, 255, 0, 0)) - self.assertTrue(Color(0, 0, 255, 0) == Color(0, 0, 255, 0)) - self.assertTrue(Color(0, 0, 0, 255) == Color(0, 0, 0, 255)) - self.assertFalse(Color(0, 0, 0, 0) == Color(255, 0, 0, 0)) - self.assertFalse(Color(0, 0, 0, 0) == Color(0, 255, 0, 0)) - self.assertFalse(Color(0, 0, 0, 0) == Color(0, 0, 255, 0)) - self.assertFalse(Color(0, 0, 0, 0) == Color(0, 0, 0, 255)) - self.assertTrue(Color(0, 0, 0, 0) != Color(255, 0, 0, 0)) - self.assertTrue(Color(0, 0, 0, 0) != Color(0, 255, 0, 0)) - self.assertTrue(Color(0, 0, 0, 0) != Color(0, 0, 255, 0)) - self.assertTrue(Color(0, 0, 0, 0) != Color(0, 0, 0, 255)) - self.assertFalse(Color(255, 0, 0, 0) != Color(255, 0, 0, 0)) - self.assertFalse(Color(0, 255, 0, 0) != Color(0, 255, 0, 0)) - self.assertFalse(Color(0, 0, 255, 0) != Color(0, 0, 255, 0)) - self.assertFalse(Color(0, 0, 0, 255) != Color(0, 0, 0, 255)) - - self.assertTrue(Color(255, 0, 0, 0) == (255, 0, 0, 0)) - self.assertTrue(Color(0, 255, 0, 0) == (0, 255, 0, 0)) - self.assertTrue(Color(0, 0, 255, 0) == (0, 0, 255, 0)) - self.assertTrue(Color(0, 0, 0, 255) == (0, 0, 0, 255)) - self.assertFalse(Color(0, 0, 0, 0) == (255, 0, 0, 0)) - self.assertFalse(Color(0, 0, 0, 0) == (0, 255, 0, 0)) - self.assertFalse(Color(0, 0, 0, 0) == (0, 0, 255, 0)) - self.assertFalse(Color(0, 0, 0, 0) == (0, 0, 0, 255)) - self.assertTrue(Color(0, 0, 0, 0) != (255, 0, 0, 0)) - self.assertTrue(Color(0, 0, 0, 0) != (0, 255, 0, 0)) - self.assertTrue(Color(0, 0, 0, 0) != (0, 0, 255, 0)) - self.assertTrue(Color(0, 0, 0, 0) != (0, 0, 0, 255)) - self.assertFalse(Color(255, 0, 0, 0) != (255, 0, 0, 0)) - self.assertFalse(Color(0, 255, 0, 0) != (0, 255, 0, 0)) - self.assertFalse(Color(0, 0, 255, 0) != (0, 0, 255, 0)) - self.assertFalse(Color(0, 0, 0, 255) != (0, 0, 0, 255)) - - self.assertTrue((255, 0, 0, 0) == Color(255, 0, 0, 0)) - self.assertTrue((0, 255, 0, 0) == Color(0, 255, 0, 0)) - self.assertTrue((0, 0, 255, 0) == Color(0, 0, 255, 0)) - self.assertTrue((0, 0, 0, 255) == Color(0, 0, 0, 255)) - self.assertFalse((0, 0, 0, 0) == Color(255, 0, 0, 0)) - self.assertFalse((0, 0, 0, 0) == Color(0, 255, 0, 0)) - self.assertFalse((0, 0, 0, 0) == Color(0, 0, 255, 0)) - self.assertFalse((0, 0, 0, 0) == Color(0, 0, 0, 255)) - self.assertTrue((0, 0, 0, 0) != Color(255, 0, 0, 0)) - self.assertTrue((0, 0, 0, 0) != Color(0, 255, 0, 0)) - self.assertTrue((0, 0, 0, 0) != Color(0, 0, 255, 0)) - self.assertTrue((0, 0, 0, 0) != Color(0, 0, 0, 255)) - self.assertFalse((255, 0, 0, 0) != Color(255, 0, 0, 0)) - self.assertFalse((0, 255, 0, 0) != Color(0, 255, 0, 0)) - self.assertFalse((0, 0, 255, 0) != Color(0, 0, 255, 0)) - self.assertFalse((0, 0, 0, 255) != Color(0, 0, 0, 255)) - - class TupleSubclass(tuple): - pass - self.assertTrue(Color(255, 0, 0, 0) == TupleSubclass((255, 0, 0, 0))) - self.assertTrue(TupleSubclass((255, 0, 0, 0)) == Color(255, 0, 0, 0)) - self.assertFalse(Color(255, 0, 0, 0) != TupleSubclass((255, 0, 0, 0))) - self.assertFalse(TupleSubclass((255, 0, 0, 0)) != Color(255, 0, 0, 0)) - - # These are not supported so will be unequal. - self.assertFalse(Color(255, 0, 0, 0) == "#ff000000") - self.assertTrue(Color(255, 0, 0, 0) != "#ff000000") - - self.assertFalse("#ff000000" == Color(255, 0, 0, 0)) - self.assertTrue("#ff000000" != Color(255, 0, 0, 0)) - - self.assertFalse(Color(255, 0, 0, 0) == 0xff000000) - self.assertTrue(Color(255, 0, 0, 0) != 0xff000000) - - self.assertFalse(0xff000000 == Color(255, 0, 0, 0)) - self.assertTrue(0xff000000 != Color(255, 0, 0, 0)) - - self.assertFalse(Color(255, 0, 0, 0) == [255, 0, 0, 0]) - self.assertTrue(Color(255, 0, 0, 0) != [255, 0, 0, 0]) - - self.assertFalse([255, 0, 0, 0] == Color(255, 0, 0 ,0)) - self.assertTrue([255, 0, 0, 0] != Color(255, 0, 0, 0)) - - # Comparison is not implemented for invalid color values. - class Test(object): - def __eq__(self, other): - return -1 - - def __ne__(self, other): - return -2 - - class TestTuple(tuple): - def __eq__(self, other): - return -1 - - def __ne__(self, other): - return -2 - - t = Test() - t_tuple = TestTuple(('a', 0, 0, 0)) - black = Color('black') - self.assertEqual(black == t, -1) - self.assertEqual(t == black, -1) - self.assertEqual(black != t, -2) - self.assertEqual(t != black, -2) - self.assertEqual(black == t_tuple, -1) - self.assertEqual(black != t_tuple, -2) - self.assertEqual(t_tuple == black, -1) - self.assertEqual(t_tuple != black, -2) - - def test_ignore_whitespace(self): - self.assertEqual(pygame.color.Color('red'), pygame.color.Color(' r e d ')) - - def test_slice(self): - #"""|tags: python3_ignore|""" - - # slicing a color gives you back a tuple. - # do all sorts of slice combinations. - c = pygame.Color(1,2,3,4) - - self.assertEqual((1,2,3,4), c[:]) - self.assertEqual((1,2,3), c[:-1]) - - self.assertEqual((), c[:-5]) - - self.assertEqual((1,2,3,4), c[:4]) - self.assertEqual((1,2,3,4), c[:5]) - self.assertEqual((1,2), c[:2]) - self.assertEqual((1,), c[:1]) - self.assertEqual((), c[:0]) - - - self.assertEqual((2,), c[1:-2]) - self.assertEqual((3, 4), c[-2:]) - self.assertEqual((4,), c[-1:]) - - - # NOTE: assigning to a slice is currently unsupported. - - - def test_unpack(self): - # should be able to unpack to r,g,b,a and r,g,b - c = pygame.Color(1,2,3,4) - r,g,b,a = c - self.assertEqual((1,2,3,4), (r,g,b,a)) - self.assertEqual(c, (r,g,b,a)) - - c.set_length(3) - r,g,b = c - self.assertEqual((1,2,3), (r,g,b)) - - def test_length(self): - # should be able to unpack to r,g,b,a and r,g,b - c = pygame.Color(1,2,3,4) - self.assertEqual(len(c), 4) - - c.set_length(3) - self.assertEqual(len(c), 3) - - # it keeps the old alpha anyway... - self.assertEqual(c.a, 4) - - # however you can't get the alpha in this way: - self.assertRaises(IndexError, lambda x:c[x], 4) - - c.set_length(4) - self.assertEqual(len(c), 4) - self.assertEqual(len(c), 4) - - self.assertRaises(ValueError, c.set_length, 5) - self.assertRaises(ValueError, c.set_length, -1) - self.assertRaises(ValueError, c.set_length, 0) - self.assertRaises(ValueError, c.set_length, pow(2, long_(33))) - - def test_case_insensitivity_of_string_args(self): - self.assertEqual(pygame.color.Color('red'), pygame.color.Color('Red')) - - def test_color(self): - c = pygame.Color(10, 20, 30, 40) - self.assertEqual(c.r, 10) - self.assertEqual(c.g, 20) - self.assertEqual(c.b, 30) - self.assertEqual(c.a, 40) - - c = pygame.Color("indianred3") - self.assertEqual(c.r, 205) - self.assertEqual(c.g, 85) - self.assertEqual(c.b, 85) - self.assertEqual(c.a, 255) - - c = pygame.Color(0xAABBCCDD) - self.assertEqual(c.r, 0xAA) - self.assertEqual(c.g, 0xBB) - self.assertEqual(c.b, 0xCC) - self.assertEqual(c.a, 0xDD) - - self.assertRaises(ValueError, pygame.Color, 257, 10, 105, 44) - self.assertRaises(ValueError, pygame.Color, 10, 257, 105, 44) - self.assertRaises(ValueError, pygame.Color, 10, 105, 257, 44) - self.assertRaises(ValueError, pygame.Color, 10, 105, 44, 257) - - def test_rgba(self): - c = pygame.Color(0) - self.assertEqual(c.r, 0) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 0) - self.assertEqual(c.a, 0) - - # Test simple assignments - c.r = 123 - self.assertEqual(c.r, 123) - self.assertRaises(ValueError, _assignr, c, 537) - self.assertEqual(c.r, 123) - self.assertRaises(ValueError, _assignr, c, -3) - self.assertEqual(c.r, 123) - - c.g = 55 - self.assertEqual(c.g, 55) - self.assertRaises(ValueError, _assigng, c, 348) - self.assertEqual(c.g, 55) - self.assertRaises(ValueError, _assigng, c, -44) - self.assertEqual(c.g, 55) - - c.b = 77 - self.assertEqual(c.b, 77) - self.assertRaises(ValueError, _assignb, c, 256) - self.assertEqual(c.b, 77) - self.assertRaises(ValueError, _assignb, c, -12) - self.assertEqual(c.b, 77) - - c.a = 255 - self.assertEqual(c.a, 255) - self.assertRaises(ValueError, _assigna, c, 312) - self.assertEqual(c.a, 255) - self.assertRaises(ValueError, _assigna, c, -10) - self.assertEqual(c.a, 255) - - def test_repr(self): - c = pygame.Color(68, 38, 26, 69) - t = "(68, 38, 26, 69)" - self.assertEqual(repr(c), t) - - def test_add(self): - c1 = pygame.Color(0) - self.assertEqual(c1.r, 0) - self.assertEqual(c1.g, 0) - self.assertEqual(c1.b, 0) - self.assertEqual(c1.a, 0) - - c2 = pygame.Color(20, 33, 82, 193) - self.assertEqual(c2.r, 20) - self.assertEqual(c2.g, 33) - self.assertEqual(c2.b, 82) - self.assertEqual(c2.a, 193) - - c3 = c1 + c2 - self.assertEqual(c3.r, 20) - self.assertEqual(c3.g, 33) - self.assertEqual(c3.b, 82) - self.assertEqual(c3.a, 193) - - c3 = c3 + c2 - self.assertEqual(c3.r, 40) - self.assertEqual(c3.g, 66) - self.assertEqual(c3.b, 164) - self.assertEqual(c3.a, 255) - - # Issue #286: Is type checking done for Python 3.x? - self.assertRaises(TypeError, operator.add, c1, None) - self.assertRaises(TypeError, operator.add, None, c1) - - def test_sub(self): - c1 = pygame.Color(0xFFFFFFFF) - self.assertEqual(c1.r, 255) - self.assertEqual(c1.g, 255) - self.assertEqual(c1.b, 255) - self.assertEqual(c1.a, 255) - - c2 = pygame.Color(20, 33, 82, 193) - self.assertEqual(c2.r, 20) - self.assertEqual(c2.g, 33) - self.assertEqual(c2.b, 82) - self.assertEqual(c2.a, 193) - - c3 = c1 - c2 - self.assertEqual(c3.r, 235) - self.assertEqual(c3.g, 222) - self.assertEqual(c3.b, 173) - self.assertEqual(c3.a, 62) - - c3 = c3 - c2 - self.assertEqual(c3.r, 215) - self.assertEqual(c3.g, 189) - self.assertEqual(c3.b, 91) - self.assertEqual(c3.a, 0) - - # Issue #286: Is type checking done for Python 3.x? - self.assertRaises(TypeError, operator.sub, c1, None) - self.assertRaises(TypeError, operator.sub, None, c1) - - def test_mul(self): - c1 = pygame.Color(0x01010101) - self.assertEqual(c1.r, 1) - self.assertEqual(c1.g, 1) - self.assertEqual(c1.b, 1) - self.assertEqual(c1.a, 1) - - c2 = pygame.Color(2, 5, 3, 22) - self.assertEqual(c2.r, 2) - self.assertEqual(c2.g, 5) - self.assertEqual(c2.b, 3) - self.assertEqual(c2.a, 22) - - c3 = c1 * c2 - self.assertEqual(c3.r, 2) - self.assertEqual(c3.g, 5) - self.assertEqual(c3.b, 3) - self.assertEqual(c3.a, 22) - - c3 = c3 * c2 - self.assertEqual(c3.r, 4) - self.assertEqual(c3.g, 25) - self.assertEqual(c3.b, 9) - self.assertEqual(c3.a, 255) - - # Issue #286: Is type checking done for Python 3.x? - self.assertRaises(TypeError, operator.mul, c1, None) - self.assertRaises(TypeError, operator.mul, None, c1) - - def test_div(self): - c1 = pygame.Color(0x80808080) - self.assertEqual(c1.r, 128) - self.assertEqual(c1.g, 128) - self.assertEqual(c1.b, 128) - self.assertEqual(c1.a, 128) - - c2 = pygame.Color(2, 4, 8, 16) - self.assertEqual(c2.r, 2) - self.assertEqual(c2.g, 4) - self.assertEqual(c2.b, 8) - self.assertEqual(c2.a, 16) - - c3 = c1 // c2 - self.assertEqual(c3.r, 64) - self.assertEqual(c3.g, 32) - self.assertEqual(c3.b, 16) - self.assertEqual(c3.a, 8) - - c3 = c3 // c2 - self.assertEqual(c3.r, 32) - self.assertEqual(c3.g, 8) - self.assertEqual(c3.b, 2) - self.assertEqual(c3.a, 0) - - # Issue #286: Is type checking done for Python 3.x? - self.assertRaises(TypeError, operator.floordiv, c1, None) - self.assertRaises(TypeError, operator.floordiv, None, c1) - - # Division by zero check - dividend = pygame.Color(255, 255, 255, 255) - for i in range(4): - divisor = pygame.Color(64, 64, 64, 64) - divisor[i] = 0 - quotient = pygame.Color(3, 3, 3, 3) - quotient[i] = 0 - self.assertEqual(dividend // divisor, quotient) - - def test_mod(self): - c1 = pygame.Color(0xFFFFFFFF) - self.assertEqual(c1.r, 255) - self.assertEqual(c1.g, 255) - self.assertEqual(c1.b, 255) - self.assertEqual(c1.a, 255) - - c2 = pygame.Color(2, 4, 8, 16) - self.assertEqual(c2.r, 2) - self.assertEqual(c2.g, 4) - self.assertEqual(c2.b, 8) - self.assertEqual(c2.a, 16) - - c3 = c1 % c2 - self.assertEqual(c3.r, 1) - self.assertEqual(c3.g, 3) - self.assertEqual(c3.b, 7) - self.assertEqual(c3.a, 15) - - # Issue #286: Is type checking done for Python 3.x? - self.assertRaises(TypeError, operator.mod, c1, None) - self.assertRaises(TypeError, operator.mod, None, c1) - - # Division by zero check - dividend = pygame.Color(255, 255, 255, 255) - for i in range(4): - divisor = pygame.Color(64, 64, 64, 64) - divisor[i] = 0 - quotient = pygame.Color(63, 63, 63, 63) - quotient[i] = 0 - self.assertEqual(dividend % divisor, quotient) - - def test_float(self): - c = pygame.Color(0xCC00CC00) - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 0) - self.assertEqual(float(c), float(0xCC00CC00)) - - c = pygame.Color(0x33727592) - self.assertEqual(c.r, 51) - self.assertEqual(c.g, 114) - self.assertEqual(c.b, 117) - self.assertEqual(c.a, 146) - self.assertEqual(float(c), float(0x33727592)) - - def test_oct(self): - c = pygame.Color(0xCC00CC00) - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 0) - self.assertEqual(oct(c), oct(0xCC00CC00)) - - c = pygame.Color(0x33727592) - self.assertEqual(c.r, 51) - self.assertEqual(c.g, 114) - self.assertEqual(c.b, 117) - self.assertEqual(c.a, 146) - self.assertEqual(oct(c), oct(0x33727592)) - - def test_hex(self): - c = pygame.Color(0xCC00CC00) - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 0) - self.assertEqual(hex(c), hex(0xCC00CC00)) - - c = pygame.Color(0x33727592) - self.assertEqual(c.r, 51) - self.assertEqual(c.g, 114) - self.assertEqual(c.b, 117) - self.assertEqual(c.a, 146) - self.assertEqual(hex(c), hex(0x33727592)) - - - def test_webstyle(self): - c = pygame.Color("#CC00CC11") - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 17) - self.assertEqual(hex(c), hex(0xCC00CC11)) - - c = pygame.Color("#CC00CC") - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 255) - self.assertEqual(hex(c), hex(0xCC00CCFF)) - - c = pygame.Color("0xCC00CC11") - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 17) - self.assertEqual(hex(c), hex(0xCC00CC11)) - - c = pygame.Color("0xCC00CC") - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 255) - self.assertEqual(hex(c), hex(0xCC00CCFF)) - - self.assertRaises(ValueError, pygame.Color, "#cc00qq") - self.assertRaises(ValueError, pygame.Color, "0xcc00qq") - self.assertRaises(ValueError, pygame.Color, "09abcdef") - self.assertRaises(ValueError, pygame.Color, "09abcde") - self.assertRaises(ValueError, pygame.Color, "quarky") - - def test_int(self): - # This will be a long - c = pygame.Color(0xCC00CC00) - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 0) - self.assertEqual(int(c), int(0xCC00CC00)) - - # This will be an int - c = pygame.Color(0x33727592) - self.assertEqual(c.r, 51) - self.assertEqual(c.g, 114) - self.assertEqual(c.b, 117) - self.assertEqual(c.a, 146) - self.assertEqual(int(c), int(0x33727592)) - - def test_long(self): - # This will be a long - c = pygame.Color(0xCC00CC00) - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 0) - self.assertEqual(c.b, 204) - self.assertEqual(c.a, 0) - self.assertEqual(long_ (c), long_ (0xCC00CC00)) - - # This will be an int - c = pygame.Color(0x33727592) - self.assertEqual(c.r, 51) - self.assertEqual(c.g, 114) - self.assertEqual(c.b, 117) - self.assertEqual(c.a, 146) - self.assertEqual(long_ (c), long_ (0x33727592)) - - def test_normalize(self): - c = pygame.Color(204, 38, 194, 55) - self.assertEqual(c.r, 204) - self.assertEqual(c.g, 38) - self.assertEqual(c.b, 194) - self.assertEqual(c.a, 55) - - t = c.normalize() - - self.assertAlmostEqual(t[0], 0.800000, 5) - self.assertAlmostEqual(t[1], 0.149016, 5) - self.assertAlmostEqual(t[2], 0.760784, 5) - self.assertAlmostEqual(t[3], 0.215686, 5) - - def test_len(self): - c = pygame.Color(204, 38, 194, 55) - self.assertEqual(len(c), 4) - - def test_get_item(self): - c = pygame.Color(204, 38, 194, 55) - self.assertEqual(c[0], 204) - self.assertEqual(c[1], 38) - self.assertEqual(c[2], 194) - self.assertEqual(c[3], 55) - - def test_set_item(self): - c = pygame.Color(204, 38, 194, 55) - self.assertEqual(c[0], 204) - self.assertEqual(c[1], 38) - self.assertEqual(c[2], 194) - self.assertEqual(c[3], 55) - - c[0] = 33 - self.assertEqual(c[0], 33) - c[1] = 48 - self.assertEqual(c[1], 48) - c[2] = 173 - self.assertEqual(c[2], 173) - c[3] = 213 - self.assertEqual(c[3], 213) - - # Now try some 'invalid' ones - self.assertRaises(ValueError, _assign_item, c, 0, 95.485) - self.assertEqual(c[0], 33) - self.assertRaises(ValueError, _assign_item, c, 1, -83) - self.assertEqual(c[1], 48) - self.assertRaises(ValueError, _assign_item, c, 2, "Hello") - self.assertEqual(c[2], 173) - - def test_Color_type_works_for_Surface_get_and_set_colorkey(self): - s = pygame.Surface((32, 32)) - - c = pygame.Color(33, 22, 11, 255) - s.set_colorkey(c) - - get_r, get_g, get_b, get_a = s.get_colorkey() - - self.assertTrue(get_r == c.r) - self.assertTrue(get_g == c.g) - self.assertTrue(get_b == c.b) - self.assertTrue(get_a == c.a) - -########## HSLA, HSVA, CMY, I1I2I3 ALL ELEMENTS WITHIN SPECIFIED RANGE ######### - - def test_hsla__all_elements_within_limits(self): - for c in rgba_combos_Color_generator(): - h, s, l, a = c.hsla - self.assertTrue(0 <= h <= 360) - self.assertTrue(0 <= s <= 100) - self.assertTrue(0 <= l <= 100) - self.assertTrue(0 <= a <= 100) - - def test_hsva__all_elements_within_limits(self): - for c in rgba_combos_Color_generator(): - h, s, v, a = c.hsva - self.assertTrue(0 <= h <= 360) - self.assertTrue(0 <= s <= 100) - self.assertTrue(0 <= v <= 100) - self.assertTrue(0 <= a <= 100) - - def test_cmy__all_elements_within_limits(self): - for c in rgba_combos_Color_generator(): - c, m, y = c.cmy - self.assertTrue(0 <= c <= 1) - self.assertTrue(0 <= m <= 1) - self.assertTrue(0 <= y <= 1) - - def test_i1i2i3__all_elements_within_limits(self): - for c in rgba_combos_Color_generator(): - i1, i2, i3 = c.i1i2i3 - self.assertTrue( 0 <= i1 <= 1) - self.assertTrue(-0.5 <= i2 <= 0.5) - self.assertTrue(-0.5 <= i3 <= 0.5) - - def test_issue_269(self): - """PyColor OverflowError on HSVA with hue value of 360 - - >>> c = pygame.Color(0) - >>> c.hsva = (360,0,0,0) - Traceback (most recent call last): - File "", line 1, in - OverflowError: this is not allowed to happen ever - >>> pygame.ver - '1.9.1release' - >>> - - """ - - c = pygame.Color(0) - c.hsva = 360, 0, 0, 0 - self.assertEqual(c.hsva, (0, 0, 0, 0)) - c.hsva = 360, 100, 100, 100 - self.assertEqual(c.hsva, (0, 100, 100, 100)) - self.assertEqual(c, (255, 0, 0, 255)) - -####################### COLORSPACE PROPERTY SANITY TESTS ####################### - - def colorspaces_converted_should_not_raise(self, prop): - fails = 0 - - x = 0 - for c in rgba_combos_Color_generator(): - x += 1 - - other = pygame.Color(0) - - try: - setattr(other, prop, getattr(c, prop)) - #eg other.hsla = c.hsla - - except ValueError: - fails += 1 - - self.assertTrue(x > 0, "x is combination counter, 0 means no tests!") - self.assertTrue((fails, x) == (0, x)) - - def test_hsla__sanity_testing_converted_should_not_raise(self): - self.colorspaces_converted_should_not_raise('hsla') - - def test_hsva__sanity_testing_converted_should_not_raise(self): - self.colorspaces_converted_should_not_raise('hsva') - - def test_cmy__sanity_testing_converted_should_not_raise(self): - self.colorspaces_converted_should_not_raise('cmy') - - def test_i1i2i3__sanity_testing_converted_should_not_raise(self): - self.colorspaces_converted_should_not_raise('i1i2i3') - -################################################################################ - - def colorspaces_converted_should_equate_bar_rounding(self, prop): - for c in rgba_combos_Color_generator(): - other = pygame.Color(0) - - try: - setattr(other, prop, getattr(c, prop)) - #eg other.hsla = c.hsla - - self.assertTrue(abs(other.r - c.r) <= 1) - self.assertTrue(abs(other.b - c.b) <= 1) - self.assertTrue(abs(other.g - c.g) <= 1) - # CMY and I1I2I3 do not care about the alpha - if not prop in ("cmy", "i1i2i3"): - self.assertTrue(abs(other.a - c.a) <= 1) - - except ValueError: - pass # other tests will notify, this tests equation - - def test_hsla__sanity_testing_converted_should_equate_bar_rounding(self): - self.colorspaces_converted_should_equate_bar_rounding('hsla') - - def test_hsva__sanity_testing_converted_should_equate_bar_rounding(self): - self.colorspaces_converted_should_equate_bar_rounding('hsva') - - def test_cmy__sanity_testing_converted_should_equate_bar_rounding(self): - self.colorspaces_converted_should_equate_bar_rounding('cmy') - - def test_i1i2i3__sanity_testing_converted_should_equate_bar_rounding(self): - self.colorspaces_converted_should_equate_bar_rounding('i1i2i3') - -################################################################################ - - def test_correct_gamma__verified_against_python_implementation(self): - "|tags:slow|" - # gamma_correct defined at top of page - - gammas = [i / 10.0 for i in range(1, 31)] # [0.1 ... 3.0] - gammas_len = len(gammas) - - for i, c in enumerate(rgba_combos_Color_generator()): - gamma = gammas[i % gammas_len] - - corrected = pygame.Color(*[gamma_correct(x, gamma) - for x in tuple(c)]) - lib_corrected = c.correct_gamma(gamma) - - self.assertTrue(corrected.r == lib_corrected.r) - self.assertTrue(corrected.g == lib_corrected.g) - self.assertTrue(corrected.b == lib_corrected.b) - self.assertTrue(corrected.a == lib_corrected.a) - - # TODO: test against statically defined verified _correct_ values - # assert corrected.r == 125 etc. - - def test_pickle(self): - import pickle - c1 = pygame.Color(1,2,3,4) - #c2 = pygame.Color(255,254,253,252) - pickle_string = pickle.dumps(c1) - c1_frompickle = pickle.loads(pickle_string) - self.assertEqual(c1,c1_frompickle) - -################################################################################ -# only available if ctypes module is also available - - @unittest.skipIf(IS_PYPY, 'PyPy has no ctypes') - def test_arraystruct(self): - - import pygame.tests.test_utils.arrinter as ai - import ctypes as ct - - c_byte_p = ct.POINTER(ct.c_byte) - c = pygame.Color(5, 7, 13, 23) - flags = (ai.PAI_CONTIGUOUS | ai.PAI_FORTRAN | - ai.PAI_ALIGNED | ai.PAI_NOTSWAPPED) - for i in range(1, 5): - c.set_length(i) - inter = ai.ArrayInterface(c) - self.assertEqual(inter.two, 2) - self.assertEqual(inter.nd, 1) - self.assertEqual(inter.typekind, 'u') - self.assertEqual(inter.itemsize, 1) - self.assertEqual(inter.flags, flags) - self.assertEqual(inter.shape[0], i) - self.assertEqual(inter.strides[0], 1) - data = ct.cast(inter.data, c_byte_p) - for j in range(i): - self.assertEqual(data[j], c[j]) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf(self): - from pygame.tests.test_utils import buftools - from ctypes import cast, POINTER, c_uint8 - - class ColorImporter(buftools.Importer): - def __init__(self, color, flags): - super(ColorImporter, self).__init__(color, flags) - self.items = cast(self.buf, POINTER(c_uint8)) - - def __getitem__(self, index): - if 0 <= index < 4: - return self.items[index] - raise IndexError("valid index values are between 0 and 3: " - "got {}".format(index)) - def __setitem__(self, index, value): - if 0 <= index < 4: - self.items[index] = value - else: - raise IndexError("valid index values are between 0 and 3: " - "got {}".format(index)) - - c = pygame.Color(50, 100, 150, 200) - imp = ColorImporter(c, buftools.PyBUF_SIMPLE) - self.assertTrue(imp.obj is c) - self.assertEqual(imp.ndim, 0) - self.assertEqual(imp.itemsize, 1) - self.assertEqual(imp.len, 4) - self.assertTrue(imp.readonly) - self.assertTrue(imp.format is None) - self.assertTrue(imp.shape is None) - self.assertTrue(imp.strides is None) - self.assertTrue(imp.suboffsets is None) - for i in range(4): - self.assertEqual(c[i], imp[i]) - imp[0] = 60 - self.assertEqual(c.r, 60) - imp[1] = 110 - self.assertEqual(c.g, 110) - imp[2] = 160 - self.assertEqual(c.b, 160) - imp[3] = 210 - self.assertEqual(c.a, 210) - imp = ColorImporter(c, buftools.PyBUF_FORMAT) - self.assertEqual(imp.ndim, 0) - self.assertEqual(imp.itemsize, 1) - self.assertEqual(imp.len, 4) - self.assertEqual(imp.format, 'B') - self.assertEqual(imp.ndim, 0) - self.assertEqual(imp.itemsize, 1) - self.assertEqual(imp.len, 4) - imp = ColorImporter(c, buftools.PyBUF_ND) - self.assertEqual(imp.ndim, 1) - self.assertEqual(imp.itemsize, 1) - self.assertEqual(imp.len, 4) - self.assertTrue(imp.format is None) - self.assertEqual(imp.shape, (4,)) - self.assertEqual(imp.strides, None) - imp = ColorImporter(c, buftools.PyBUF_STRIDES) - self.assertEqual(imp.ndim, 1) - self.assertTrue(imp.format is None) - self.assertEqual(imp.shape, (4,)) - self.assertEqual(imp.strides, (1,)) - imp = ColorImporter(c, buftools.PyBUF_C_CONTIGUOUS) - self.assertEqual(imp.ndim, 1) - imp = ColorImporter(c, buftools.PyBUF_F_CONTIGUOUS) - self.assertEqual(imp.ndim, 1) - imp = ColorImporter(c, buftools.PyBUF_ANY_CONTIGUOUS) - self.assertEqual(imp.ndim, 1) - for i in range(1, 5): - c.set_length(i) - imp = ColorImporter(c, buftools.PyBUF_ND) - self.assertEqual(imp.ndim, 1) - self.assertEqual(imp.len, i) - self.assertEqual(imp.shape, (i,)) - self.assertRaises(BufferError, ColorImporter, - c, buftools.PyBUF_WRITABLE) - - -class SubclassTest(unittest.TestCase): - - class MyColor(pygame.Color): - def __init__ (self, *args, **kwds): - super(SubclassTest.MyColor, self).__init__ (*args, **kwds) - self.an_attribute = True - - def test_add(self): - mc1 = self.MyColor(128, 128, 128, 255) - self.assertTrue(mc1.an_attribute) - c2 = pygame.Color(64, 64, 64, 255) - mc2 = mc1 + c2 - self.assertTrue(isinstance(mc2, self.MyColor)) - self.assertRaises(AttributeError, getattr, mc2, 'an_attribute') - c3 = c2 + mc1 - self.assertTrue(type(c3) is pygame.Color) - - def test_sub(self): - mc1 = self.MyColor(128, 128, 128, 255) - self.assertTrue(mc1.an_attribute) - c2 = pygame.Color(64, 64, 64, 255) - mc2 = mc1 - c2 - self.assertTrue(isinstance(mc2, self.MyColor)) - self.assertRaises(AttributeError, getattr, mc2, 'an_attribute') - c3 = c2 - mc1 - self.assertTrue(type(c3) is pygame.Color) - - def test_mul(self): - mc1 = self.MyColor(128, 128, 128, 255) - self.assertTrue(mc1.an_attribute) - c2 = pygame.Color(64, 64, 64, 255) - mc2 = mc1 * c2 - self.assertTrue(isinstance(mc2, self.MyColor)) - self.assertRaises(AttributeError, getattr, mc2, 'an_attribute') - c3 = c2 * mc1 - self.assertTrue(type(c3) is pygame.Color) - - def test_div(self): - mc1 = self.MyColor(128, 128, 128, 255) - self.assertTrue(mc1.an_attribute) - c2 = pygame.Color(64, 64, 64, 255) - mc2 = mc1 // c2 - self.assertTrue(isinstance(mc2, self.MyColor)) - self.assertRaises(AttributeError, getattr, mc2, 'an_attribute') - c3 = c2 // mc1 - self.assertTrue(type(c3) is pygame.Color) - - def test_mod(self): - mc1 = self.MyColor(128, 128, 128, 255) - self.assertTrue(mc1.an_attribute) - c2 = pygame.Color(64, 64, 64, 255) - mc2 = mc1 % c2 - self.assertTrue(isinstance(mc2, self.MyColor)) - self.assertRaises(AttributeError, getattr, mc2, 'an_attribute') - c3 = c2 % mc1 - self.assertTrue(type(c3) is pygame.Color) - - def test_inv(self): - mc1 = self.MyColor(64, 64, 64, 64) - self.assertTrue(mc1.an_attribute) - mc2 = ~mc1 - self.assertTrue(isinstance(mc2, self.MyColor)) - self.assertRaises(AttributeError, getattr, mc2, 'an_attribute') - - def test_correct_gamma(self): - mc1 = self.MyColor(64, 70, 75, 255) - self.assertTrue(mc1.an_attribute) - mc2 = mc1.correct_gamma(.03) - self.assertTrue(isinstance(mc2, self.MyColor)) - self.assertRaises(AttributeError, getattr, mc2, 'an_attribute') - - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/compat_test.py b/venv/lib/python3.7/site-packages/pygame/tests/compat_test.py deleted file mode 100644 index d16f3f0..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/compat_test.py +++ /dev/null @@ -1,87 +0,0 @@ -import sys - -import unittest -from pygame import compat -encode_file_path = sys.modules['pygame.rwobject'].encode_file_path - -class CompatModuleTest(unittest.TestCase): - def test_as_unicode(self): - r = r'Bo\u00F6tes' - ords = [ord('B'), ord('o'), 0xF6, ord('t'), ord('e'), ord('s')] - self.assertEqual(len(r), 11) - u = compat.as_unicode(r) - self.assertIsInstance(u, compat.unicode_) - self.assertEqual([ord(c) for c in u], ords) - - def test_as_bytes(self): - ords = [0, 1, 0x7F, 0x80, 0xC3, 0x20, 0xC3, 0xB6, 0xFF] - s = ''.join([chr(i) for i in ords]) - self.assertEqual(len(s), len(ords)) - b = compat.as_bytes(s) - self.assertIsInstance(b, compat.bytes_) - self.assertEqual([compat.ord_(i) for i in b], ords) - - def test_ord_(self): - self.assertIsInstance(compat.ord_(compat.bytes_(1)[0]), int) - - def test_bytes_(self): - self.assertFalse(compat.bytes_ is compat.unicode_) - self.assertTrue(hasattr(compat.bytes_, 'capitalize')) - self.assertFalse(hasattr(compat.bytes_, 'isdecimal')) - - def test_unicode_(self): - self.assertTrue(hasattr(compat.unicode_(), 'isdecimal')) - - def test_long_(self): - self.assertIsInstance(int('99999999999999999999'), compat.long_) - - def test_geterror(self): - msg = 'Success' - try: - raise TypeError(msg) - except TypeError: - e = compat.geterror() - self.assertIsInstance(e, TypeError) - self.assertEqual(str(e), msg) - - def test_xrange_(self): - self.assertFalse(isinstance(compat.xrange_(2), list)) - - def test_unichr_(self): - ordval = 86 - c = compat.unichr_(ordval) - self.assertIsInstance(c, compat.unicode_) - self.assertEqual(ord(c), ordval) - - def test_get_BytesIO(self): - BytesIO = compat.get_BytesIO() - b1 = compat.as_bytes("\x00\xffabc") - b2 = BytesIO(b1).read() - self.assertIsInstance(b2, compat.bytes_) - self.assertEqual(b2, b1) - - def test_get_StringIO(self): - StringIO = compat.get_StringIO() - b1 = "abcde" - b2 = StringIO(b1).read() - self.assertIsInstance(b2, str) - self.assertEqual(b2, b1) - - def test_raw_input_(self): - StringIO = compat.get_StringIO() - msg = 'success' - tmp = sys.stdin - sys.stdin = StringIO(msg + '\n') - try: - s = compat.raw_input_() - self.assertEqual(s, msg) - finally: - sys.stdin = tmp - - def test_filesystem_encode(self): - upath = compat.as_unicode(r"ab\u212Acd") - self.assertEqual(compat.filesystem_encode(upath), - encode_file_path(upath)) - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/constants_test.py b/venv/lib/python3.7/site-packages/pygame/tests/constants_test.py deleted file mode 100644 index 6ac5748..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/constants_test.py +++ /dev/null @@ -1,51 +0,0 @@ -import unittest -import pygame.constants - - -class KmodTests(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.constants = [ - 'KMOD_NONE', - 'KMOD_LSHIFT', - 'KMOD_RSHIFT', - 'KMOD_LCTRL', - 'KMOD_RCTRL', - 'KMOD_LALT', - 'KMOD_RALT', - 'KMOD_LMETA', - 'KMOD_RMETA', - 'KMOD_NUM', - 'KMOD_CAPS', - 'KMOD_MODE', - 'KMOD_CTRL', - 'KMOD_SHIFT', - 'KMOD_ALT', - 'KMOD_META', - ] - if pygame.get_sdl_version()[0] >= 2: - cls.constants.extend([ - 'KMOD_LGUI', - 'KMOD_RGUI', - 'KMOD_GUI', - ]) - - def test_kmod_existence(self): - for k in self.constants: - self.assertTrue(hasattr(pygame.constants, k), 'missing constant {}'.format(k)) - - def test_kmod_types(self): - for k in self.constants: - self.assertEqual(type(getattr(pygame.constants, k)), int) - -class KeyConstantTests(unittest.TestCase): - def test_letters(self): - for c in range(ord('a'), ord('z') + 1): - c = chr(c) - self.assertTrue(hasattr(pygame.constants, 'K_%s' % c), - 'missing constant: K_%s' % c) - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/cursors_test.py b/venv/lib/python3.7/site-packages/pygame/tests/cursors_test.py deleted file mode 100644 index 1e465e8..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/cursors_test.py +++ /dev/null @@ -1,63 +0,0 @@ -import unittest -from pygame.tests.test_utils import fixture_path -import pygame - - -class CursorsModuleTest(unittest.TestCase): - def todo_test_compile(self): - - # __doc__ (as of 2008-06-25) for pygame.cursors.compile: - - # pygame.cursors.compile(strings, black, white,xor) -> data, mask - # compile cursor strings into cursor data - # - # This takes a set of strings with equal length and computes - # the binary data for that cursor. The string widths must be - # divisible by 8. - # - # The black and white arguments are single letter strings that - # tells which characters will represent black pixels, and which - # characters represent white pixels. All other characters are - # considered clear. - # - # This returns a tuple containing the cursor data and cursor mask - # data. Both these arguments are used when setting a cursor with - # pygame.mouse.set_cursor(). - - self.fail() - - def test_load_xbm(self): - # __doc__ (as of 2008-06-25) for pygame.cursors.load_xbm: - - # pygame.cursors.load_xbm(cursorfile, maskfile) -> cursor_args - # reads a pair of XBM files into set_cursor arguments - # - # Arguments can either be filenames or filelike objects - # with the readlines method. Not largely tested, but - # should work with typical XBM files. - - # Test that load_xbm will take filenames as arguments - cursorfile = fixture_path(r"xbm_cursors/white_sizing.xbm") - maskfile = fixture_path(r"xbm_cursors/white_sizing_mask.xbm") - cursor = pygame.cursors.load_xbm(cursorfile, maskfile) - - # Test that load_xbm will take file objects as arguments - with open(cursorfile) as cursor_f, open(maskfile) as mask_f: - cursor = pygame.cursors.load_xbm(cursor_f, mask_f) - - # Is it in a format that mouse.set_cursor won't blow up on? - pygame.display.init() - try: - pygame.mouse.set_cursor(*cursor) - except pygame.error as e: - if 'not currently supported' in str(e): - unittest.skip('skipping test as set_cursor() is not supported') - finally: - pygame.display.quit() - -################################################################################ - -if __name__ == '__main__': - unittest.main() - -################################################################################ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/display_test.py b/venv/lib/python3.7/site-packages/pygame/tests/display_test.py deleted file mode 100644 index 7ec8bba..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/display_test.py +++ /dev/null @@ -1,440 +0,0 @@ -# -*- coding: utf-8 -*- - -import unittest -import pygame, pygame.transform -from pygame.compat import unicode_ - -from pygame import display - -class DisplayModuleTest(unittest.TestCase): - default_caption = "pygame window" - - def setUp(self): - display.init() - def tearDown(self): - display.quit() - - def test_update(self): - """ see if pygame.display.update takes rects with negative values. - "|Tags:display|" - """ - - #pygame.init() - screen = pygame.display.set_mode((100, 100)) - screen.fill((55, 55, 55)) - - r1 = pygame.Rect(0, 0, 100, 100) - pygame.display.update(r1) - - r2 = pygame.Rect(-10, 0, 100, 100) - pygame.display.update(r2) - - r3 = pygame.Rect(-10, 0, -100, -100) - pygame.display.update(r3) - - # NOTE: if I don't call pygame.quit there is a segfault. hrmm. - #pygame.quit() - # I think it's because unittest runs stuff in threads - # here's a stack trace... - - # NOTE to author of above: - # unittest doesn't run tests in threads - # segfault was probably caused by another tests need - # for a "clean slate" - - """ - #0 0x08103b7c in PyFrame_New () - #1 0x080bd666 in PyEval_EvalCodeEx () - #2 0x08105202 in PyFunction_SetClosure () - #3 0x080595ae in PyObject_Call () - #4 0x080b649f in PyEval_CallObjectWithKeywords () - #5 0x08059585 in PyObject_CallObject () - #6 0xb7f7aa2d in initbase () from /usr/lib/python2.4/site-packages/pygame/base.so - #7 0x080e09bd in Py_Finalize () - #8 0x08055597 in Py_Main () - #9 0xb7e04eb0 in __libc_start_main () from /lib/tls/libc.so.6 - #10 0x08054e31 in _start () - - """ - - def test_Info(self): - inf = pygame.display.Info() - self.assertNotEqual(inf.current_h, -1) - self.assertNotEqual(inf.current_w, -1) - #probably have an older SDL than 1.2.10 if -1. - - screen = pygame.display.set_mode((128,128)) - inf = pygame.display.Info() - self.assertEqual(inf.current_h, 128) - self.assertEqual(inf.current_w, 128) - - def todo_test_flip(self): - - # __doc__ (as of 2008-08-02) for pygame.display.flip: - - # pygame.display.flip(): return None - # update the full display Surface to the screen - # - # This will update the contents of the entire display. If your display - # mode is using the flags pygame.HWSURFACE and pygame.DOUBLEBUF, this - # will wait for a vertical retrace and swap the surfaces. If you are - # using a different type of display mode, it will simply update the - # entire contents of the surface. - # - # When using an pygame.OPENGL display mode this will perform a gl buffer swap. - - self.fail() - - def todo_test_get_active(self): - - # __doc__ (as of 2008-08-02) for pygame.display.get_active: - - # pygame.display.get_active(): return bool - # true when the display is active on the display - # - # After pygame.display.set_mode() is called the display Surface will - # be visible on the screen. Most windowed displays can be hidden by - # the user. If the display Surface is hidden or iconified this will - # return False. - # - - self.fail() - - def test_get_caption(self): - - # __doc__ (as of 2008-08-02) for pygame.display.get_caption: - - # pygame.display.get_caption(): return (title, icontitle) - # get the current window caption - # - # Returns the title and icontitle for the display Surface. These will - # often be the same value. - # - - screen = display.set_mode((100, 100)) - self.assertEqual(display.get_caption()[0], self.default_caption) - - def test_set_caption(self): - - # __doc__ (as of 2008-08-02) for pygame.display.set_caption: - - # pygame.display.set_caption(title, icontitle=None): return None - # set the current window caption - # - # If the display has a window title, this function will change the - # name on the window. Some systems support an alternate shorter title - # to be used for minimized displays. - # - - TEST_CAPTION = "test" - screen = display.set_mode((100, 100)) - self.assertIsNone(display.set_caption(TEST_CAPTION)) - self.assertEqual(display.get_caption()[0], TEST_CAPTION) - self.assertEqual(display.get_caption()[1], TEST_CAPTION) - - def test_caption_unicode(self): - TEST_CAPTION = u'å°' - display.set_caption(TEST_CAPTION) - import sys - if sys.version_info.major >= 3: - self.assertEqual(display.get_caption()[0], TEST_CAPTION) - else: - self.assertEqual(unicode_(display.get_caption()[0], 'utf8'), TEST_CAPTION) - - def todo_test_get_driver(self): - - # __doc__ (as of 2008-08-02) for pygame.display.get_driver: - - # pygame.display.get_driver(): return name - # get the name of the pygame display backend - # - # Pygame chooses one of many available display backends when it is - # initialized. This returns the internal name used for the display - # backend. This can be used to provide limited information about what - # display capabilities might be accelerated. See the SDL_VIDEODRIVER - # flags in pygame.display.set_mode() to see some of the common - # options. - # - - self.fail() - - def todo_test_get_init(self): - - # __doc__ (as of 2008-08-02) for pygame.display.get_init: - - # pygame.display.get_init(): return bool - # true if the display module is initialized - # - # Returns True if the pygame.display module is currently initialized. - - self.fail() - - def todo_test_get_surface(self): - - # __doc__ (as of 2008-08-02) for pygame.display.get_surface: - - # pygame.display.get_surface(): return Surface - # get a reference to the currently set display surface - # - # Return a reference to the currently set display Surface. If no - # display mode has been set this will return None. - # - - self.fail() - - def todo_test_get_wm_info(self): - - # __doc__ (as of 2008-08-02) for pygame.display.get_wm_info: - - # pygame.display.get_wm_info(): return dict - # Get information about the current windowing system - # - # Creates a dictionary filled with string keys. The strings and values - # are arbitrarily created by the system. Some systems may have no - # information and an empty dictionary will be returned. Most platforms - # will return a "window" key with the value set to the system id for - # the current display. - # - # New with pygame 1.7.1 - - self.fail() - - def todo_test_gl_get_attribute(self): - - # __doc__ (as of 2008-08-02) for pygame.display.gl_get_attribute: - - # pygame.display.gl_get_attribute(flag): return value - # get the value for an opengl flag for the current display - # - # After calling pygame.display.set_mode() with the pygame.OPENGL flag, - # it is a good idea to check the value of any requested OpenGL - # attributes. See pygame.display.gl_set_attribute() for a list of - # valid flags. - # - - self.fail() - - def todo_test_gl_set_attribute(self): - - # __doc__ (as of 2008-08-02) for pygame.display.gl_set_attribute: - - # pygame.display.gl_set_attribute(flag, value): return None - # request an opengl display attribute for the display mode - # - # When calling pygame.display.set_mode() with the pygame.OPENGL flag, - # Pygame automatically handles setting the OpenGL attributes like - # color and doublebuffering. OpenGL offers several other attributes - # you may want control over. Pass one of these attributes as the flag, - # and its appropriate value. This must be called before - # pygame.display.set_mode() - # - # The OPENGL flags are; - # GL_ALPHA_SIZE, GL_DEPTH_SIZE, GL_STENCIL_SIZE, GL_ACCUM_RED_SIZE, - # GL_ACCUM_GREEN_SIZE, GL_ACCUM_BLUE_SIZE, GL_ACCUM_ALPHA_SIZE, - # GL_MULTISAMPLEBUFFERS, GL_MULTISAMPLESAMPLES, GL_STEREO - - self.fail() - - def todo_test_iconify(self): - - # __doc__ (as of 2008-08-02) for pygame.display.iconify: - - # pygame.display.iconify(): return bool - # iconify the display surface - # - # Request the window for the display surface be iconified or hidden. - # Not all systems and displays support an iconified display. The - # function will return True if successfull. - # - # When the display is iconified pygame.display.get_active() will - # return False. The event queue should receive a ACTIVEEVENT event - # when the window has been iconified. - # - - self.fail() - - def todo_test_init(self): - - # __doc__ (as of 2008-08-02) for pygame.display.init: - - # pygame.display.init(): return None - # initialize the display module - # - # Initializes the pygame display module. The display module cannot do - # anything until it is initialized. This is usually handled for you - # automatically when you call the higher level pygame.init(). - # - # Pygame will select from one of several internal display backends - # when it is initialized. The display mode will be chosen depending on - # the platform and permissions of current user. Before the display - # module is initialized the environment variable SDL_VIDEODRIVER can - # be set to control which backend is used. The systems with multiple - # choices are listed here. - # - # Windows : windib, directx - # Unix : x11, dga, fbcon, directfb, ggi, vgl, svgalib, aalib - # On some platforms it is possible to embed the pygame display into an - # already existing window. To do this, the environment variable - # SDL_WINDOWID must be set to a string containing the window id or - # handle. The environment variable is checked when the pygame display - # is initialized. Be aware that there can be many strange side effects - # when running in an embedded display. - # - # It is harmless to call this more than once, repeated calls have no effect. - - self.fail() - - def test_list_modes(self): - modes = pygame.display.list_modes( - depth=0, flags=pygame.FULLSCREEN, display=0 - ) - # modes == -1 means any mode is supported. - if modes != -1: - self.assertEqual(len(modes[0]), 2) - self.assertEqual(type(modes[0][0]), int) - - modes = pygame.display.list_modes() - if modes != -1: - self.assertEqual(len(modes[0]), 2) - self.assertEqual(type(modes[0][0]), int) - - modes = pygame.display.list_modes( - depth=0, flags=0, display=0 - ) - if modes != -1: - self.assertEqual(len(modes[0]), 2) - self.assertEqual(type(modes[0][0]), int) - - def test_mode_ok(self): - pygame.display.mode_ok((128, 128)) - modes = pygame.display.list_modes() - if modes != -1: - size = modes[0] - self.assertNotEqual(pygame.display.mode_ok(size), 0) - - pygame.display.mode_ok((128, 128), 0, 32) - pygame.display.mode_ok((128, 128), flags=0, depth=32, display=0) - - - def test_mode_ok_fullscreen(self): - modes = pygame.display.list_modes() - if modes != -1: - size = modes[0] - self.assertNotEqual(pygame.display.mode_ok( - size, - flags=pygame.FULLSCREEN), 0) - - def test_get_num_displays(self): - self.assertGreater(pygame.display.get_num_displays(), 0) - - def todo_test_quit(self): - - # __doc__ (as of 2008-08-02) for pygame.display.quit: - - # pygame.display.quit(): return None - # uninitialize the display module - # - # This will shut down the entire display module. This means any active - # displays will be closed. This will also be handled automatically - # when the program exits. - # - # It is harmless to call this more than once, repeated calls have no effect. - - self.fail() - - def todo_test_set_gamma(self): - - # __doc__ (as of 2008-08-02) for pygame.display.set_gamma: - - # pygame.display.set_gamma(red, green=None, blue=None): return bool - # change the hardware gamma ramps - # - # Set the red, green, and blue gamma values on the display hardware. - # If the green and blue arguments are not passed, they will both be - # the same as red. Not all systems and hardware support gamma ramps, - # if the function succeeds it will return True. - # - # A gamma value of 1.0 creates a linear color table. Lower values will - # darken the display and higher values will brighten. - # - - self.fail() - - def todo_test_set_gamma_ramp(self): - - # __doc__ (as of 2008-08-02) for pygame.display.set_gamma_ramp: - - # change the hardware gamma ramps with a custom lookup - # pygame.display.set_gamma_ramp(red, green, blue): return bool - # set_gamma_ramp(red, green, blue): return bool - # - # Set the red, green, and blue gamma ramps with an explicit lookup - # table. Each argument should be sequence of 256 integers. The - # integers should range between 0 and 0xffff. Not all systems and - # hardware support gamma ramps, if the function succeeds it will - # return True. - # - - self.fail() - - def todo_test_set_icon(self): - - # __doc__ (as of 2008-08-02) for pygame.display.set_icon: - - # pygame.display.set_icon(Surface): return None - # change the system image for the display window - # - # Sets the runtime icon the system will use to represent the display - # window. All windows default to a simple pygame logo for the window - # icon. - # - # You can pass any surface, but most systems want a smaller image - # around 32x32. The image can have colorkey transparency which will be - # passed to the system. - # - # Some systems do not allow the window icon to change after it has - # been shown. This function can be called before - # pygame.display.set_mode() to create the icon before the display mode - # is set. - # - - self.fail() - - def test_set_mode_kwargs(self): - - pygame.display.set_mode(size=(1, 1), flags=0, depth=0, display=0) - - - def todo_test_set_palette(self): - - # __doc__ (as of 2008-08-02) for pygame.display.set_palette: - - # pygame.display.set_palette(palette=None): return None - # set the display color palette for indexed displays - # - # This will change the video display color palette for 8bit displays. - # This does not change the palette for the actual display Surface, - # only the palette that is used to display the Surface. If no palette - # argument is passed, the system default palette will be restored. The - # palette is a sequence of RGB triplets. - # - - self.fail() - - def todo_test_toggle_fullscreen(self): - - # __doc__ (as of 2008-08-02) for pygame.display.toggle_fullscreen: - - # pygame.display.toggle_fullscreen(): return bool - # switch between fullscreen and windowed displays - # - # Switches the display window between windowed and fullscreen modes. - # This function only works under the unix x11 video driver. For most - # situations it is better to call pygame.display.set_mode() with new - # display flags. - # - - self.fail() - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/draw_test.py b/venv/lib/python3.7/site-packages/pygame/tests/draw_test.py deleted file mode 100644 index 42387db..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/draw_test.py +++ /dev/null @@ -1,1294 +0,0 @@ -import unittest -import sys - -import pygame -from pygame import draw -from pygame import draw_py -from pygame.locals import SRCALPHA -from pygame.tests import test_utils - -PY3 = sys.version_info >= (3, 0, 0) - -RED = BG_RED = pygame.Color('red') -GREEN = FG_GREEN = pygame.Color('green') - - -def get_border_values(surface, width, height): - """Returns a list containing lists with the values of the surface's - borders. - """ - border_top = [surface.get_at((x, 0)) for x in range(width)] - border_left = [surface.get_at((0, y)) for y in range(height)] - border_right = [ - surface.get_at((width - 1, y)) for y in range(height)] - border_bottom = [ - surface.get_at((x, height - 1)) for x in range(width)] - - return [border_top, border_left, border_right, border_bottom] - - -def corners(surface): - """Returns a tuple with the corner positions of the given surface. - - Clockwise from the top left corner. - """ - width, height = surface.get_size() - return ((0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)) - - -def border_pos_and_color(surface): - """Yields each border position and its color for a given surface. - - Clockwise from the top left corner. - """ - width, height = surface.get_size() - right, bottom = width - 1, height - 1 - - # Top edge. - for x in range(width): - pos = (x, 0) - yield pos, surface.get_at(pos) - - # Right edge. - # Top right done in top edge loop. - for y in range(1, height): - pos = (right, y) - yield pos, surface.get_at(pos) - - # Bottom edge. - # Bottom right done in right edge loop. - for x in range(right - 1, -1, -1): - pos = (x, bottom) - yield pos, surface.get_at(pos) - - # Left edge. - # Bottom left done in bottom edge loop. Top left done in top edge loop. - for y in range(bottom - 1, 0, -1): - pos = (0, y) - yield pos, surface.get_at(pos) - - -class DrawTestCase(unittest.TestCase): - """Base class to test draw module functions.""" - draw_rect = staticmethod(draw.rect) - draw_polygon = staticmethod(draw.polygon) - draw_circle = staticmethod(draw.circle) - draw_ellipse = staticmethod(draw.ellipse) - draw_arc = staticmethod(draw.arc) - draw_line = staticmethod(draw.line) - draw_lines = staticmethod(draw.lines) - draw_aaline = staticmethod(draw.aaline) - draw_aalines = staticmethod(draw.aalines) - - -class PythonDrawTestCase(unittest.TestCase): - """Base class to test draw_py module functions.""" - # draw_py is currently missing some functions. - #draw_rect = staticmethod(draw_py.draw_rect) - draw_polygon = staticmethod(draw_py.draw_polygon) - #draw_circle = staticmethod(draw_py.draw_circle) - #draw_ellipse = staticmethod(draw_py.draw_ellipse) - #draw_arc = staticmethod(draw_py.draw_arc) - draw_line = staticmethod(draw_py.draw_line) - draw_lines = staticmethod(draw_py.draw_lines) - draw_aaline = staticmethod(draw_py.draw_aaline) - draw_aalines = staticmethod(draw_py.draw_aalines) - - -### Ellipse Testing ########################################################### - -class DrawEllipseMixin(object): - """Mixin tests for drawing ellipses. - - This class contains all the general ellipse drawing tests. - """ - - def test_ellipse(self): - """Tests ellipses of differing sizes on surfaces of differing sizes. - - Checks if the number of sides touching the border of the surface is - correct. - """ - left_top = [(0, 0), (1, 0), (0, 1), (1, 1)] - sizes = [(4, 4), (5, 4), (4, 5), (5, 5)] - color = (1, 13, 24, 255) - - def same_size(width, height, border_width): - """Test for ellipses with the same size as the surface.""" - surface = pygame.Surface((width, height)) - - self.draw_ellipse(surface, color, (0, 0, width, height), - border_width) - - # For each of the four borders check if it contains the color - borders = get_border_values(surface, width, height) - for border in borders: - self.assertTrue(color in border) - - def not_same_size(width, height, border_width, left, top): - """Test for ellipses that aren't the same size as the surface.""" - surface = pygame.Surface((width, height)) - - self.draw_ellipse(surface, color, - (left, top, width - 1, height - 1), border_width) - - borders = get_border_values(surface, width, height) - - # Check if two sides of the ellipse are touching the border - sides_touching = [ - color in border for border in borders].count(True) - self.assertEqual(sides_touching, 2) - - for width, height in sizes: - for border_width in (0, 1): - same_size(width, height, border_width) - for left, top in left_top: - not_same_size(width, height, border_width, left, top) - - def _check_1_pixel_sized_ellipse(self, surface, collide_rect, - surface_color, ellipse_color): - # Helper method to check the surface for 1 pixel wide and/or high - # ellipses. - surf_w, surf_h = surface.get_size() - - surface.lock() # For possible speed up. - - for pos in ((x, y) for y in range(surf_h) for x in range(surf_w)): - # Since the ellipse is just a line we can use a rect to help find - # where it is expected to be drawn. - if collide_rect.collidepoint(pos): - expected_color = ellipse_color - else: - expected_color = surface_color - - self.assertEqual(surface.get_at(pos), expected_color, - 'collide_rect={}, pos={}'.format(collide_rect, pos)) - - surface.unlock() - - def test_ellipse__1_pixel_width(self): - """Ensures an ellipse with a width of 1 is drawn correctly. - - An ellipse with a width of 1 pixel is a vertical line. - """ - ellipse_color = pygame.Color('red') - surface_color = pygame.Color('black') - surf_w, surf_h = 10, 20 - - surface = pygame.Surface((surf_w, surf_h)) - rect = pygame.Rect((0, 0), (1, 0)) - collide_rect = rect.copy() - - # Calculate some positions. - off_left = -1 - off_right = surf_w - off_bottom = surf_h - center_x = surf_w // 2 - center_y = surf_h // 2 - - # Test some even and odd heights. - for ellipse_h in range(6, 10): - # The ellipse is drawn on the edge of the rect so collide_rect - # needs +1 height to track where it's drawn. - collide_rect.h = ellipse_h + 1 - rect.h = ellipse_h - - # Calculate some variable positions. - off_top = -(ellipse_h + 1) - half_off_top = -(ellipse_h // 2) - half_off_bottom = surf_h - (ellipse_h // 2) - - # Draw the ellipse in different positions: fully on-surface, - # partially off-surface, and fully off-surface. - positions = ((off_left, off_top), - (off_left, half_off_top), - (off_left, center_y), - (off_left, half_off_bottom), - (off_left, off_bottom), - - (center_x, off_top), - (center_x, half_off_top), - (center_x, center_y), - (center_x, half_off_bottom), - (center_x, off_bottom), - - (off_right, off_top), - (off_right, half_off_top), - (off_right, center_y), - (off_right, half_off_bottom), - (off_right, off_bottom)) - - for rect_pos in positions: - surface.fill(surface_color) # Clear before each draw. - rect.topleft = rect_pos - collide_rect.topleft = rect_pos - - self.draw_ellipse(surface, ellipse_color, rect) - - self._check_1_pixel_sized_ellipse(surface, collide_rect, - surface_color, ellipse_color) - - def test_ellipse__1_pixel_width_spanning_surface(self): - """Ensures an ellipse with a width of 1 is drawn correctly - when spanning the height of the surface. - - An ellipse with a width of 1 pixel is a vertical line. - """ - ellipse_color = pygame.Color('red') - surface_color = pygame.Color('black') - surf_w, surf_h = 10, 20 - - surface = pygame.Surface((surf_w, surf_h)) - rect = pygame.Rect((0, 0), (1, surf_h + 2)) # Longer than the surface. - - # Draw the ellipse in different positions: on-surface and off-surface. - positions = ((-1, -1), # (off_left, off_top) - (0, -1), # (left_edge, off_top) - (surf_w // 2, -1), # (center_x, off_top) - (surf_w - 1, -1), # (right_edge, off_top) - (surf_w, -1)) # (off_right, off_top) - - for rect_pos in positions: - surface.fill(surface_color) # Clear before each draw. - rect.topleft = rect_pos - - self.draw_ellipse(surface, ellipse_color, rect) - - self._check_1_pixel_sized_ellipse(surface, rect, surface_color, - ellipse_color) - - def test_ellipse__1_pixel_height(self): - """Ensures an ellipse with a height of 1 is drawn correctly. - - An ellipse with a height of 1 pixel is a horizontal line. - """ - ellipse_color = pygame.Color('red') - surface_color = pygame.Color('black') - surf_w, surf_h = 20, 10 - - surface = pygame.Surface((surf_w, surf_h)) - rect = pygame.Rect((0, 0), (0, 1)) - collide_rect = rect.copy() - - # Calculate some positions. - off_right = surf_w - off_top = -1 - off_bottom = surf_h - center_x = surf_w // 2 - center_y = surf_h // 2 - - # Test some even and odd widths. - for ellipse_w in range(6, 10): - # The ellipse is drawn on the edge of the rect so collide_rect - # needs +1 width to track where it's drawn. - collide_rect.w = ellipse_w + 1 - rect.w = ellipse_w - - # Calculate some variable positions. - off_left = -(ellipse_w + 1) - half_off_left = -(ellipse_w // 2) - half_off_right = surf_w - (ellipse_w // 2) - - # Draw the ellipse in different positions: fully on-surface, - # partially off-surface, and fully off-surface. - positions = ((off_left, off_top), - (half_off_left, off_top), - (center_x, off_top), - (half_off_right, off_top), - (off_right, off_top), - - (off_left, center_y), - (half_off_left, center_y), - (center_x, center_y), - (half_off_right, center_y), - (off_right, center_y), - - (off_left, off_bottom), - (half_off_left, off_bottom), - (center_x, off_bottom), - (half_off_right, off_bottom), - (off_right, off_bottom)) - - for rect_pos in positions: - surface.fill(surface_color) # Clear before each draw. - rect.topleft = rect_pos - collide_rect.topleft = rect_pos - - self.draw_ellipse(surface, ellipse_color, rect) - - self._check_1_pixel_sized_ellipse(surface, collide_rect, - surface_color, ellipse_color) - - def test_ellipse__1_pixel_height_spanning_surface(self): - """Ensures an ellipse with a height of 1 is drawn correctly - when spanning the width of the surface. - - An ellipse with a height of 1 pixel is a horizontal line. - """ - ellipse_color = pygame.Color('red') - surface_color = pygame.Color('black') - surf_w, surf_h = 20, 10 - - surface = pygame.Surface((surf_w, surf_h)) - rect = pygame.Rect((0, 0), (surf_w + 2, 1)) # Wider than the surface. - - # Draw the ellipse in different positions: on-surface and off-surface. - positions = ((-1, -1), # (off_left, off_top) - (-1, 0), # (off_left, top_edge) - (-1, surf_h // 2), # (off_left, center_y) - (-1, surf_h - 1), # (off_left, bottom_edge) - (-1, surf_h)) # (off_left, off_bottom) - - for rect_pos in positions: - surface.fill(surface_color) # Clear before each draw. - rect.topleft = rect_pos - - self.draw_ellipse(surface, ellipse_color, rect) - - self._check_1_pixel_sized_ellipse(surface, rect, surface_color, - ellipse_color) - - def test_ellipse__1_pixel_width_and_height(self): - """Ensures an ellipse with a width and height of 1 is drawn correctly. - - An ellipse with a width and height of 1 pixel is a single pixel. - """ - ellipse_color = pygame.Color('red') - surface_color = pygame.Color('black') - surf_w, surf_h = 10, 10 - - surface = pygame.Surface((surf_w, surf_h)) - rect = pygame.Rect((0, 0), (1, 1)) - - # Calculate some positions. - off_left = -1 - off_right = surf_w - off_top = -1 - off_bottom = surf_h - left_edge = 0 - right_edge = surf_w - 1 - top_edge = 0 - bottom_edge = surf_h - 1 - center_x = surf_w // 2 - center_y = surf_h // 2 - - # Draw the ellipse in different positions: center surface, - # top/bottom/left/right edges, and off-surface. - positions = ((off_left, off_top), - (off_left, top_edge), - (off_left, center_y), - (off_left, bottom_edge), - (off_left, off_bottom), - - (left_edge, off_top), - (left_edge, top_edge), - (left_edge, center_y), - (left_edge, bottom_edge), - (left_edge, off_bottom), - - (center_x, off_top), - (center_x, top_edge), - (center_x, center_y), - (center_x, bottom_edge), - (center_x, off_bottom), - - (right_edge, off_top), - (right_edge, top_edge), - (right_edge, center_y), - (right_edge, bottom_edge), - (right_edge, off_bottom), - - (off_right, off_top), - (off_right, top_edge), - (off_right, center_y), - (off_right, bottom_edge), - (off_right, off_bottom)) - - for rect_pos in positions: - surface.fill(surface_color) # Clear before each draw. - rect.topleft = rect_pos - - self.draw_ellipse(surface, ellipse_color, rect) - - self._check_1_pixel_sized_ellipse(surface, rect, surface_color, - ellipse_color) - - -class DrawEllipseTest(DrawEllipseMixin, DrawTestCase): - """Test draw module function ellipse. - - This class inherits the general tests from DrawEllipseMixin. It is also - the class to add any draw.ellipse specific tests to. - """ - - -@unittest.skip('draw_py.draw_ellipse not supported yet') -class PythonDrawEllipseTest(DrawEllipseMixin, PythonDrawTestCase): - """Test draw_py module function draw_ellipse. - - This class inherits the general tests from DrawEllipseMixin. It is also - the class to add any draw_py.draw_ellipse specific tests to. - """ - - -### Line Testing ############################################################## - -class LineMixin(object): - """Mixin test for drawing lines and aalines. - - This class contains all the general line/lines/aaline/aalines drawing - tests. - """ - - def setUp(self): - self._colors = ((0, 0, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255), - (255, 255, 0), (255, 0, 255), (0, 255, 255), - (255, 255, 255)) - - # Create some surfaces with different sizes, depths, and flags. - self._surfaces = [] - for size in ((49, 49), (50, 50)): - for depth in (8, 16, 24, 32): - for flags in (0, SRCALPHA): - surface = pygame.display.set_mode(size, flags, depth) - self._surfaces.append(surface) - self._surfaces.append(surface.convert_alpha()) - - def test_line__color(self): - """Tests if the line drawn is the correct color.""" - pos = (0, 0) - for surface in self._surfaces: - for expected_color in self._colors: - self.draw_line(surface, expected_color, pos, (1, 0)) - - self.assertEqual(surface.get_at(pos), expected_color, - 'pos={}'.format(pos)) - - def test_aaline__color(self): - """Tests if the aaline drawn is the correct color.""" - pos = (0, 0) - for surface in self._surfaces: - for expected_color in self._colors: - self.draw_aaline(surface, expected_color, pos, (1, 0)) - - self.assertEqual(surface.get_at(pos), expected_color, - 'pos={}'.format(pos)) - - def test_line__gaps(self): - """Tests if the line drawn contains any gaps.""" - expected_color = (255, 255, 255) - for surface in self._surfaces: - width = surface.get_width() - self.draw_line(surface, expected_color, (0, 0), (width - 1, 0)) - - for x in range(width): - pos = (x, 0) - self.assertEqual(surface.get_at(pos), expected_color, - 'pos={}'.format(pos)) - - def test_aaline__gaps(self): - """Tests if the aaline drawn contains any gaps. - - See: #512 - """ - expected_color = (255, 255, 255) - for surface in self._surfaces: - width = surface.get_width() - self.draw_aaline(surface, expected_color, (0, 0), (width - 1, 0)) - - for x in range(width): - pos = (x, 0) - self.assertEqual(surface.get_at(pos), expected_color, - 'pos={}'.format(pos)) - - def test_lines__color(self): - """Tests if the lines drawn are the correct color. - - Draws lines around the border of the given surface and checks if all - borders of the surface only contain the given color. - """ - for surface in self._surfaces: - for expected_color in self._colors: - self.draw_lines(surface, expected_color, True, - corners(surface)) - - for pos, color in border_pos_and_color(surface): - self.assertEqual(color, expected_color, - 'pos={}'.format(pos)) - - def test_aalines__color(self): - """Tests if the aalines drawn are the correct color. - - Draws aalines around the border of the given surface and checks if all - borders of the surface only contain the given color. - """ - for surface in self._surfaces: - for expected_color in self._colors: - self.draw_aalines(surface, expected_color, True, - corners(surface)) - - for pos, color in border_pos_and_color(surface): - self.assertEqual(color, expected_color, - 'pos={}'.format(pos)) - - def test_lines__gaps(self): - """Tests if the lines drawn contain any gaps. - - Draws lines around the border of the given surface and checks if - all borders of the surface contain any gaps. - """ - expected_color = (255, 255, 255) - for surface in self._surfaces: - self.draw_lines(surface, expected_color, True, corners(surface)) - - for pos, color in border_pos_and_color(surface): - self.assertEqual(color, expected_color, 'pos={}'.format(pos)) - - def test_aalines__gaps(self): - """Tests if the aalines drawn contain any gaps. - - Draws aalines around the border of the given surface and checks if - all borders of the surface contain any gaps. - - See: #512 - """ - expected_color = (255, 255, 255) - for surface in self._surfaces: - self.draw_aalines(surface, expected_color, True, corners(surface)) - - for pos, color in border_pos_and_color(surface): - self.assertEqual(color, expected_color, 'pos={}'.format(pos)) - - -class PythonDrawLineTest(LineMixin, DrawTestCase): - """Test draw_py module functions: line, lines, aaline, and aalines. - - This class inherits the general tests from LineMixin. It is also the class - to add any draw_py.draw_line/lines/aaline/aalines specific tests to. - """ - - -class DrawLineTest(LineMixin, PythonDrawTestCase): - """Test draw module functions: line, lines, aaline, and aalines. - - This class inherits the general tests from LineMixin. It is also the class - to add any draw.line/lines/aaline/aalines specific tests to. - """ - - def test_path_data_validation(self): - """Test validation of multi-point drawing methods. - - See bug #521 - """ - surf = pygame.Surface((5, 5)) - rect = pygame.Rect(0, 0, 5, 5) - bad_values = ('text', b'bytes', 1 + 1j, # string, bytes, complex, - object(), (lambda x: x)) # object, function - bad_points = list(bad_values) + [(1,) , (1, 2, 3)] # wrong tuple length - bad_points.extend((1, v) for v in bad_values) # one wrong value - good_path = [(1, 1), (1, 3), (3, 3), (3, 1)] - # A) draw.lines - check_pts = [(x, y) for x in range(5) for y in range(5)] - for method, is_polgon in ((draw.lines, 0), (draw.aalines, 0), - (draw.polygon, 1)): - for val in bad_values: - # 1. at the beginning - draw.rect(surf, RED, rect, 0) - with self.assertRaises(TypeError): - if is_polgon: - method(surf, GREEN, [val] + good_path, 0) - else: - method(surf, GREEN, True, [val] + good_path) - # make sure, nothing was drawn : - self.assertTrue(all(surf.get_at(pt) == RED for pt in check_pts)) - # 2. not at the beginning (was not checked) - draw.rect(surf, RED, rect, 0) - with self.assertRaises(TypeError): - path = good_path[:2] + [val] + good_path[2:] - if is_polgon: - method(surf, GREEN, path, 0) - else: - method(surf, GREEN, True, path) - # make sure, nothing was drawn : - self.assertTrue(all(surf.get_at(pt) == RED for pt in check_pts)) - - def _test_endianness(self, draw_func): - """ test color component order - """ - depths = 24, 32 - for depth in depths: - surface = pygame.Surface((5, 3), 0, depth) - surface.fill(pygame.Color(0,0,0)) - draw_func(surface, pygame.Color(255, 0, 0), (0, 1), (2, 1), 1) - self.assertGreater(surface.get_at((1, 1)).r, 0, 'there should be red here') - surface.fill(pygame.Color(0,0,0)) - draw_func(surface, pygame.Color(0, 0, 255), (0, 1), (2, 1), 1) - self.assertGreater(surface.get_at((1, 1)).b, 0, 'there should be blue here') - - def test_line_endianness(self): - """ test color component order - """ - self._test_endianness(draw.line) - - def test_aaline_endianness(self): - """ test color component order - """ - self._test_endianness(draw.aaline) - - def test_color_validation(self): - surf = pygame.Surface((10, 10)) - colors = 123456, (1, 10, 100), RED # but not '#ab12df' or 'red' ... - points = ((0, 0), (1, 1), (1, 0)) - # 1. valid colors - for col in colors: - draw.line(surf, col, (0, 0), (1, 1)) - draw.aaline(surf, col, (0, 0), (1, 1)) - draw.aalines(surf, col, True, points) - draw.lines(surf, col, True, points) - draw.arc(surf, col, pygame.Rect(0, 0, 3, 3), 15, 150) - draw.ellipse(surf, col, pygame.Rect(0, 0, 3, 6), 1) - draw.circle(surf, col, (7, 3), 2) - draw.polygon(surf, col, points, 0) - # 2. invalid colors - for col in ('invalid', 1.256, object(), None, '#ab12df', 'red'): - with self.assertRaises(TypeError): - draw.line(surf, col, (0, 0), (1, 1)) - with self.assertRaises(TypeError): - draw.aaline(surf, col, (0, 0), (1, 1)) - with self.assertRaises(TypeError): - draw.aalines(surf, col, True, points) - with self.assertRaises(TypeError): - draw.lines(surf, col, True, points) - with self.assertRaises(TypeError): - draw.arc(surf, col, pygame.Rect(0, 0, 3, 3), 15, 150) - with self.assertRaises(TypeError): - draw.ellipse(surf, col, pygame.Rect(0, 0, 3, 6), 1) - with self.assertRaises(TypeError): - draw.circle(surf, col, (7, 3), 2) - with self.assertRaises(TypeError): - draw.polygon(surf, col, points, 0) - - -# Using a separate class to test line anti-aliasing. -class AntiAliasedLineMixin(object): - """Mixin tests for line anti-aliasing. - - This class contains all the general anti-aliasing line drawing tests. - """ - - def setUp(self): - self.surface = pygame.Surface((10, 10)) - draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0) - - def _check_antialiasing(self, from_point, to_point, should, check_points, - set_endpoints=True): - """Draw a line between two points and check colors of check_points.""" - if set_endpoints: - should[from_point] = should[to_point] = FG_GREEN - - def check_one_direction(from_point, to_point, should): - self.draw_aaline(self.surface, FG_GREEN, from_point, to_point, - True) - - for pt in check_points: - color = should.get(pt, BG_RED) - if PY3: # "subTest" is sooo helpful, but does not exist in PY2 - with self.subTest(from_pt=from_point, pt=pt, to=to_point): - self.assertEqual(self.surface.get_at(pt), color) - else: - self.assertEqual(self.surface.get_at(pt), color) - # reset - draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0) - - # it is important to test also opposite direction, the algorithm - # is (#512) or was not symmetric - check_one_direction(from_point, to_point, should) - if from_point != to_point: - check_one_direction(to_point, from_point, should) - - def test_short_non_antialiased_lines(self): - """test very short not anti aliased lines in all directions.""" - # Horizontal, vertical and diagonal lines should not be anti-aliased, - # even with draw.aaline ... - check_points = [(i, j) for i in range(3, 8) for j in range(3, 8)] - - def check_both_directions(from_pt, to_pt, other_points): - should = {pt: FG_GREEN for pt in other_points} - self._check_antialiasing(from_pt, to_pt, should, check_points) - - # 0. one point - check_both_directions((5, 5), (5, 5), []) - # 1. horizontal - check_both_directions((4, 7), (5, 7), []) - check_both_directions((5, 4), (7, 4), [(6, 4)]) - - # 2. vertical - check_both_directions((5, 5), (5, 6), []) - check_both_directions((6, 4), (6, 6), [(6, 5)]) - # 3. diagonals - check_both_directions((5, 5), (6, 6), []) - check_both_directions((5, 5), (7, 7), [(6, 6)]) - check_both_directions((5, 6), (6, 5), []) - check_both_directions((6, 4), (4, 6), [(5, 5)]) - - def test_short_line_anti_aliasing(self): - check_points = [(i, j) for i in range(3, 8) for j in range(3, 8)] - - def check_both_directions(from_pt, to_pt, should): - self._check_antialiasing(from_pt, to_pt, should, check_points) - - # lets say dx = abs(x0 - x1) ; dy = abs(y0 - y1) - brown = (127, 127, 0) - # dy / dx = 0.5 - check_both_directions((4, 4), (6, 5), {(5, 4): brown, (5, 5): brown}) - check_both_directions((4, 5), (6, 4), {(5, 4): brown, (5, 5): brown}) - # dy / dx = 2 - check_both_directions((4, 4), (5, 6), {(4, 5): brown, (5, 5): brown}) - check_both_directions((5, 4), (4, 6), {(4, 5): brown, (5, 5): brown}) - - # some little longer lines; so we need to check more points: - check_points = [(i, j) for i in range(2, 9) for j in range(2, 9)] - # dy / dx = 0.25 - reddish = (191, 63, 0) - greenish = (63, 191, 0) - should = {(4, 3): greenish, (5, 3): brown, (6, 3): reddish, - (4, 4): reddish, (5, 4): brown, (6, 4): greenish} - check_both_directions((3, 3), (7, 4), should) - should = {(4, 3): reddish, (5, 3): brown, (6, 3): greenish, - (4, 4): greenish, (5, 4): brown, (6, 4): reddish} - check_both_directions((3, 4), (7, 3), should) - # dy / dx = 4 - should = {(4, 4): greenish, (4, 5): brown, (4, 6): reddish, - (5, 4): reddish, (5, 5): brown, (5, 6): greenish, - } - check_both_directions((4, 3), (5, 7), should) - should = {(4, 4): reddish, (4, 5): brown, (4, 6): greenish, - (5, 4): greenish, (5, 5): brown, (5, 6): reddish} - check_both_directions((5, 3), (4, 7), should) - - def test_anti_aliasing_float_coordinates(self): - """Float coordinates should be blended smoothly.""" - check_points = [(i, j) for i in range(5) for j in range(5)] - brown = (127, 127, 0) - - # 0. identical point : current implementation does no smoothing... - expected = {(1, 2): FG_GREEN} - self._check_antialiasing((1.5, 2), (1.5, 2), expected, - check_points, set_endpoints=False) - expected = {(2, 2): FG_GREEN} - self._check_antialiasing((2.5, 2.7), (2.5, 2.7), expected, - check_points, set_endpoints=False) - - # 1. horizontal lines - # a) blend endpoints - expected = {(1, 2): brown, (2, 2): FG_GREEN} - self._check_antialiasing((1.5, 2), (2, 2), expected, - check_points, set_endpoints=False) - expected = {(1, 2): brown, (2, 2): FG_GREEN, (3, 2): brown} - self._check_antialiasing((1.5, 2), (2.5, 2), expected, - check_points, set_endpoints=False) - expected = {(2, 2): brown, (1, 2): FG_GREEN, } - self._check_antialiasing((1, 2), (1.5, 2), expected, - check_points, set_endpoints=False) - expected = {(1, 2): brown, (2, 2): (63, 191, 0)} - self._check_antialiasing((1.5, 2), (1.75, 2), expected, - check_points, set_endpoints=False) - - # b) blend y-coordinate - expected = {(x, y): brown for x in range(2, 5) for y in (1, 2)} - self._check_antialiasing((2, 1.5), (4, 1.5), expected, - check_points, set_endpoints=False) - - # 2. vertical lines - # a) blend endpoints - expected = {(2, 1): brown, (2, 2): FG_GREEN, (2, 3): brown} - self._check_antialiasing((2, 1.5), (2, 2.5), expected, - check_points, set_endpoints=False) - expected = {(2, 1): brown, (2, 2): (63, 191, 0)} - self._check_antialiasing((2, 1.5), (2, 1.75), expected, - check_points, set_endpoints=False) - # b) blend x-coordinate - expected = {(x, y): brown for x in (1, 2) for y in range(2, 5)} - self._check_antialiasing((1.5, 2), (1.5, 4), expected, - check_points, set_endpoints=False) - # 3. diagonal lines - # a) blend endpoints - expected = {(1, 1): brown, (2, 2): FG_GREEN, (3, 3): brown} - self._check_antialiasing((1.5, 1.5), (2.5, 2.5), expected, - check_points, set_endpoints=False) - expected = {(3, 1): brown, (2, 2): FG_GREEN, (1, 3): brown} - self._check_antialiasing((2.5, 1.5), (1.5, 2.5), expected, - check_points, set_endpoints=False) - # b) blend sidewards - expected = {(2, 1): brown, (2, 2): brown, (3, 2): brown, (3, 3): brown} - self._check_antialiasing((2, 1.5), (3, 2.5), expected, - check_points, set_endpoints=False) - - reddish = (191, 63, 0) - greenish = (63, 191, 0) - expected = {(2, 1): greenish, (2, 2): reddish, - (3, 2): greenish, (3, 3): reddish, - (4, 3): greenish, (4, 4): reddish} - self._check_antialiasing((2, 1.25), (4, 3.25), expected, - check_points, set_endpoints=False) - - def test_anti_aliasing_at_and_outside_the_border(self): - check_points = [(i, j) for i in range(10) for j in range(10)] - - reddish = (191, 63, 0) - brown = (127, 127, 0) - greenish = (63, 191, 0) - from_point, to_point = (3, 3), (7, 4) - should = {(4, 3): greenish, (5, 3): brown, (6, 3): reddish, - (4, 4): reddish, (5, 4): brown, (6, 4): greenish} - - for dx, dy in ((-4, 0), (4, 0), # moved to left and right borders - (0, -5), (0, -4), (0, -3), # upper border - (0, 5), (0, 6), (0, 7), # lower border - (-4, -4), (-4, -3), (-3, -4)): # upper left corner - first = from_point[0] + dx, from_point[1] + dy - second = to_point[0] + dx, to_point[1] + dy - expected = {(x + dx, y + dy): color - for (x, y), color in should.items()} - self._check_antialiasing(first, second, expected, check_points) - - -@unittest.expectedFailure -class AntiAliasingLineTest(AntiAliasedLineMixin, DrawTestCase): - """Test anti-aliasing for draw. - - This class inherits the general tests from AntiAliasedLineMixin. It is - also the class to add any anti-aliasing draw specific tests to. - """ - -class PythonAntiAliasingLineTest(AntiAliasedLineMixin, PythonDrawTestCase): - """Test anti-aliasing for draw_py. - - This class inherits the general tests from AntiAliasedLineMixin. It is - also the class to add any anti-aliasing draw_py specific tests to. - """ - - -### Draw Module Testing ####################################################### - -# These tests should eventually be moved to their appropriate mixin/class. -class DrawModuleTest(unittest.TestCase): - - def setUp(self): - (self.surf_w, self.surf_h) = self.surf_size = (320, 200) - self.surf = pygame.Surface(self.surf_size, pygame.SRCALPHA) - self.color = (1, 13, 24, 205) - - def test_rect__fill(self): - # __doc__ (as of 2008-06-25) for pygame.draw.rect: - - # pygame.draw.rect(Surface, color, Rect, width=0): return Rect - # draw a rectangle shape - - rect = pygame.Rect(10, 10, 25, 20) - drawn = draw.rect(self.surf, self.color, rect, 0) - - self.assertEqual(drawn, rect) - - # Should be colored where it's supposed to be - for pt in test_utils.rect_area_pts(rect): - color_at_pt = self.surf.get_at(pt) - self.assertEqual(color_at_pt, self.color) - - # And not where it shouldn't - for pt in test_utils.rect_outer_bounds(rect): - color_at_pt = self.surf.get_at(pt) - self.assertNotEqual(color_at_pt, self.color) - - # Issue #310: Cannot draw rectangles that are 1 pixel high - bgcolor = pygame.Color('black') - self.surf.fill(bgcolor) - hrect = pygame.Rect(1, 1, self.surf_w - 2, 1) - vrect = pygame.Rect(1, 3, 1, self.surf_h - 4) - drawn = draw.rect(self.surf, self.color, hrect, 0) - self.assertEqual(drawn, hrect) - x, y = hrect.topleft - w, h = hrect.size - self.assertEqual(self.surf.get_at((x - 1, y)), bgcolor) - self.assertEqual(self.surf.get_at((x + w, y)), bgcolor) - for i in range(x, x + w): - self.assertEqual(self.surf.get_at((i, y)), self.color) - drawn = draw.rect(self.surf, self.color, vrect, 0) - self.assertEqual(drawn, vrect) - x, y = vrect.topleft - w, h = vrect.size - self.assertEqual(self.surf.get_at((x, y - 1)), bgcolor) - self.assertEqual(self.surf.get_at((x, y + h)), bgcolor) - for i in range(y, y + h): - self.assertEqual(self.surf.get_at((x, i)), self.color) - - def test_rect__one_pixel_lines(self): - rect = pygame.Rect(10, 10, 56, 20) - - drawn = draw.rect(self.surf, self.color, rect, 1) - self.assertEqual(drawn, rect) - - # Should be colored where it's supposed to be - for pt in test_utils.rect_perimeter_pts(drawn): - color_at_pt = self.surf.get_at(pt) - self.assertEqual(color_at_pt, self.color) - - # And not where it shouldn't - for pt in test_utils.rect_outer_bounds(drawn): - color_at_pt = self.surf.get_at(pt) - self.assertNotEqual(color_at_pt, self.color) - - # See DrawLineTest class for additional draw.line() and draw.aaline() - # tests. - def test_line(self): - # (l, t), (l, t) - drawn = draw.line(self.surf, self.color, (1, 0), (200, 0)) - self.assertEqual(drawn.right, 201, - "end point arg should be (or at least was) inclusive") - - # Should be colored where it's supposed to be - for pt in test_utils.rect_area_pts(drawn): - self.assertEqual(self.surf.get_at(pt), self.color) - - # And not where it shouldn't - for pt in test_utils.rect_outer_bounds(drawn): - self.assertNotEqual(self.surf.get_at(pt), self.color) - - # Line width greater that 1 - line_width = 2 - offset = 5 - a = (offset, offset) - b = (self.surf_size[0] - offset, a[1]) - c = (a[0], self.surf_size[1] - offset) - d = (b[0], c[1]) - e = (a[0] + offset, c[1]) - f = (b[0], c[0] + 5) - lines = [(a, d), (b, c), (c, b), (d, a), - (a, b), (b, a), (a, c), (c, a), - (a, e), (e, a), (a, f), (f, a), - (a, a),] - for p1, p2 in lines: - msg = "%s - %s" % (p1, p2) - if p1[0] <= p2[0]: - plow = p1 - phigh = p2 - else: - plow = p2 - phigh = p1 - self.surf.fill((0, 0, 0)) - rec = draw.line(self.surf, (255, 255, 255), p1, p2, line_width) - xinc = yinc = 0 - if abs(p1[0] - p2[0]) > abs(p1[1] - p2[1]): - yinc = 1 - else: - xinc = 1 - for i in range(line_width): - p = (p1[0] + xinc * i, p1[1] + yinc * i) - self.assertEqual(self.surf.get_at(p), (255, 255, 255), msg) - p = (p2[0] + xinc * i, p2[1] + yinc * i) - self.assertEqual(self.surf.get_at(p), (255, 255, 255), msg) - p = (plow[0] - 1, plow[1]) - self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg) - p = (plow[0] + xinc * line_width, plow[1] + yinc * line_width) - self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg) - p = (phigh[0] + xinc * line_width, phigh[1] + yinc * line_width) - self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg) - if p1[0] < p2[0]: - rx = p1[0] - else: - rx = p2[0] - if p1[1] < p2[1]: - ry = p1[1] - else: - ry = p2[1] - w = abs(p2[0] - p1[0]) + 1 + xinc * (line_width - 1) - h = abs(p2[1] - p1[1]) + 1 + yinc * (line_width - 1) - msg += ", %s" % (rec,) - self.assertEqual(rec, (rx, ry, w, h), msg) - - @unittest.expectedFailure - def test_line_for_gaps(self): - """ |tags: ignore| - """ - # __doc__ (as of 2008-06-25) for pygame.draw.line: - - # pygame.draw.line(Surface, color, start_pos, end_pos, width=1): return Rect - # draw a straight line segment - - # This checks bug Thick Line Bug #448 - - width = 200 - height = 200 - surf = pygame.Surface((width, height), pygame.SRCALPHA) - - def white_surrounded_pixels(x, y): - offsets = [(1, 0), (0, 1), (-1, 0), (0, -1)] - WHITE = (255, 255, 255, 255) - return len([1 for dx, dy in offsets - if surf.get_at((x+dx, y+dy)) == WHITE]) - - def check_white_line(start, end): - surf.fill((0, 0, 0)) - pygame.draw.line(surf, (255, 255, 255), start, end, 30) - - BLACK = (0, 0, 0, 255) - for x in range(1, width-1): - for y in range(1, height-1): - if surf.get_at((x, y)) == BLACK: - self.assertTrue(white_surrounded_pixels(x, y) < 3) - - check_white_line((50, 50), (140, 0)) - check_white_line((50, 50), (0, 120)) - check_white_line((50, 50), (199, 198)) - - -### Polygon Testing ########################################################### - -SQUARE = ([0, 0], [3, 0], [3, 3], [0, 3]) -DIAMOND = [(1, 3), (3, 5), (5, 3), (3, 1)] -CROSS = ([2, 0], [4, 0], [4, 2], [6, 2], - [6, 4], [4, 4], [4, 6], [2, 6], - [2, 4], [0, 4], [0, 2], [2, 2]) - - -class DrawPolygonMixin(object): - """Mixin tests for drawing polygons. - - This class contains all the general polygon drawing tests. - """ - - def setUp(self): - self.surface = pygame.Surface((20, 20)) - - def test_draw_square(self): - self.draw_polygon(self.surface, RED, SQUARE, 0) - # note : there is a discussion (#234) if draw.polygon should include or - # not the right or lower border; here we stick with current behavior, - # eg include those borders ... - for x in range(4): - for y in range(4): - self.assertEqual(self.surface.get_at((x, y)), RED) - - def test_draw_diamond(self): - pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0) - self.draw_polygon(self.surface, GREEN, DIAMOND, 0) - # this diamond shape is equivalent to its four corners, plus inner square - for x, y in DIAMOND: - self.assertEqual(self.surface.get_at((x, y)), GREEN, msg=str((x, y))) - for x in range(2, 5): - for y in range(2, 5): - self.assertEqual(self.surface.get_at((x, y)), GREEN) - - def test_1_pixel_high_or_wide_shapes(self): - # 1. one-pixel-high, filled - pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0) - self.draw_polygon(self.surface, GREEN, [(x, 2) for x, _y in CROSS], 0) - cross_size = 6 # the maximum x or y coordinate of the cross - for x in range(cross_size + 1): - self.assertEqual(self.surface.get_at((x, 1)), RED) - self.assertEqual(self.surface.get_at((x, 2)), GREEN) - self.assertEqual(self.surface.get_at((x, 3)), RED) - pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0) - # 2. one-pixel-high, not filled - self.draw_polygon(self.surface, GREEN, [(x, 5) for x, _y in CROSS], 1) - for x in range(cross_size + 1): - self.assertEqual(self.surface.get_at((x, 4)), RED) - self.assertEqual(self.surface.get_at((x, 5)), GREEN) - self.assertEqual(self.surface.get_at((x, 6)), RED) - pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0) - # 3. one-pixel-wide, filled - self.draw_polygon(self.surface, GREEN, [(3, y) for _x, y in CROSS], 0) - for y in range(cross_size + 1): - self.assertEqual(self.surface.get_at((2, y)), RED) - self.assertEqual(self.surface.get_at((3, y)), GREEN) - self.assertEqual(self.surface.get_at((4, y)), RED) - pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0) - # 4. one-pixel-wide, not filled - self.draw_polygon(self.surface, GREEN, [(4, y) for _x, y in CROSS], 1) - for y in range(cross_size + 1): - self.assertEqual(self.surface.get_at((3, y)), RED) - self.assertEqual(self.surface.get_at((4, y)), GREEN) - self.assertEqual(self.surface.get_at((5, y)), RED) - - def test_draw_symetric_cross(self): - """non-regression on issue #234 : x and y where handled inconsistently. - - Also, the result is/was different whether we fill or not the polygon. - """ - # 1. case width = 1 (not filled: `polygon` calls internally the `lines` function) - pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0) - self.draw_polygon(self.surface, GREEN, CROSS, 1) - inside = [(x, 3) for x in range(1, 6)] + [(3, y) for y in range(1, 6)] - for x in range(10): - for y in range(10): - if (x, y) in inside: - self.assertEqual(self.surface.get_at((x, y)), RED) - elif (x in range(2, 5) and y <7) or (y in range(2, 5) and x < 7): - # we are on the border of the cross: - self.assertEqual(self.surface.get_at((x, y)), GREEN) - else: - # we are outside - self.assertEqual(self.surface.get_at((x, y)), RED) - - # 2. case width = 0 (filled; this is the example from #234) - pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0) - self.draw_polygon(self.surface, GREEN, CROSS, 0) - inside = [(x, 3) for x in range(1, 6)] + [(3, y) for y in range(1, 6)] - for x in range(10): - for y in range(10): - if (x in range(2, 5) and y <7) or (y in range(2, 5) and x < 7): - # we are on the border of the cross: - self.assertEqual(self.surface.get_at((x, y)), GREEN, msg=str((x, y))) - else: - # we are outside - self.assertEqual(self.surface.get_at((x, y)), RED) - - def test_illumine_shape(self): - """non-regression on issue #313""" - rect = pygame.Rect((0, 0, 20, 20)) - path_data = [(0, 0), (rect.width-1, 0), # upper border - (rect.width-5, 5-1), (5-1, 5-1), # upper inner - (5- 1, rect.height-5), (0, rect.height-1)] # lower diagonal - # The shape looks like this (the numbers are the indices of path_data) - - # 0**********************1 <-- upper border - # *********************** - # ********************** - # ********************* - # ****3**************2 <-- upper inner border - # ***** - # ***** (more lines here) - # ***** - # ****4 - # **** - # *** - # ** - # 5 - # - - # the current bug is that the "upper inner" line is not drawn, but only - # if 4 or some lower corner exists - pygame.draw.rect(self.surface, RED, (0, 0, 20, 20), 0) - - # 1. First without the corners 4 & 5 - self.draw_polygon(self.surface, GREEN, path_data[:4], 0) - for x in range(20): - self.assertEqual(self.surface.get_at((x, 0)), GREEN) # upper border - for x in range(4, rect.width-5 +1): - self.assertEqual(self.surface.get_at((x, 4)), GREEN) # upper inner - - # 2. with the corners 4 & 5 - pygame.draw.rect(self.surface, RED, (0, 0, 20, 20), 0) - self.draw_polygon(self.surface, GREEN, path_data, 0) - for x in range(4, rect.width-5 +1): - self.assertEqual(self.surface.get_at((x, 4)), GREEN) # upper inner - - def test_invalid_points(self): - self.assertRaises(TypeError, lambda: self.draw_polygon(self.surface, - RED, ((0, 0), (0, 20), (20, 20), 20), 0)) - - -class DrawPolygonTest(DrawPolygonMixin, DrawTestCase): - """Test draw module function polygon. - - This class inherits the general tests from DrawPolygonMixin. It is also - the class to add any draw.polygon specific tests to. - """ - - -class PythonDrawPolygonTest(DrawPolygonMixin, PythonDrawTestCase): - """Test draw_py module function draw_polygon. - - This class inherits the general tests from DrawPolygonMixin. It is also - the class to add any draw_py.draw_polygon specific tests to. - """ - - -### Rect Testing ############################################################## - -class DrawRectMixin(object): - """Mixin tests for drawing rects. - - This class contains all the general rect drawing tests. - """ - - def todo_test_circle(self): - self.fail() - - -class DrawRectTest(DrawRectMixin, DrawTestCase): - """Test draw module function rect. - - This class inherits the general tests from DrawRectMixin. It is also the - class to add any draw.rect specific tests to. - """ - - -class PythonDrawRectTest(DrawRectMixin, PythonDrawTestCase): - """Test draw_py module function draw_rect. - - This class inherits the general tests from DrawRectMixin. It is also the - class to add any draw_py.draw_rect specific tests to. - """ - - -### Circle Testing ############################################################ - -class DrawCircleMixin(object): - """Mixin tests for drawing circles. - - This class contains all the general circle drawing tests. - """ - - def todo_test_circle(self): - self.fail() - -class DrawCircleTest(DrawCircleMixin, DrawTestCase): - """Test draw module function circle. - - This class inherits the general tests from DrawCircleMixin. It is also - the class to add any draw.circle specific tests to. - """ - - -class PythonDrawCircleTest(DrawCircleMixin, PythonDrawTestCase): - """Test draw_py module function draw_circle." - - This class inherits the general tests from DrawCircleMixin. It is also - the class to add any draw_py.draw_circle specific tests to. - """ - - -### Arc Testing ############################################################### - -class DrawArcMixin(object): - """Mixin tests for drawing arcs. - - This class contains all the general arc drawing tests. - """ - - def todo_test_arc(self): - self.fail() - - -class DrawArcTest(DrawArcMixin, DrawTestCase): - """Test draw module function arc. - - This class inherits the general tests from DrawArcMixin. It is also the - class to add any draw.arc specific tests to. - """ - - -class PythonDrawArcTest(DrawArcMixin, PythonDrawTestCase): - """Test draw_py module function draw_arc. - - This class inherits the general tests from DrawArcMixin. It is also the - class to add any draw_py.draw_arc specific tests to. - """ - - -############################################################################### - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/event_test.py b/venv/lib/python3.7/site-packages/pygame/tests/event_test.py deleted file mode 100644 index fccb4cf..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/event_test.py +++ /dev/null @@ -1,340 +0,0 @@ -import os -import unittest - -import pygame -from pygame.compat import as_unicode - -################################################################################ - -events = ( -# pygame.NOEVENT, -# pygame.ACTIVEEVENT, - pygame.KEYDOWN, - pygame.KEYUP, - pygame.MOUSEMOTION, - pygame.MOUSEBUTTONDOWN, - pygame.MOUSEBUTTONUP, - pygame.JOYAXISMOTION, - pygame.JOYBALLMOTION, - pygame.JOYHATMOTION, - pygame.JOYBUTTONDOWN, - pygame.JOYBUTTONUP, - pygame.VIDEORESIZE, - pygame.VIDEOEXPOSE, - pygame.QUIT, - pygame.SYSWMEVENT, - pygame.USEREVENT, -# pygame.NUMEVENTS, -) - - -class EventTypeTest(unittest.TestCase): - def test_Event(self): - """Ensure an Event object can be created.""" - e = pygame.event.Event(pygame.USEREVENT, some_attr=1, other_attr='1') - - self.assertEqual(e.some_attr, 1) - self.assertEqual(e.other_attr, "1") - - # Event now uses tp_dictoffset and tp_members: request 62 - # on Motherhamster Bugzilla. - self.assertEqual(e.type, pygame.USEREVENT) - self.assertIs(e.dict, e.__dict__) - - e.some_attr = 12 - - self.assertEqual(e.some_attr, 12) - - e.new_attr = 15 - - self.assertEqual(e.new_attr, 15) - - # For Python 2.x a TypeError is raised for a readonly member; - # for Python 3.x it is an AttributeError. - self.assertRaises((TypeError, AttributeError), setattr, e, 'type', 0) - self.assertRaises((TypeError, AttributeError), setattr, e, 'dict', None) - - # Ensure attributes are visible to dir(), part of the original - # posted request. - d = dir(e) - attrs = ('type', 'dict', '__dict__', 'some_attr', 'other_attr', - 'new_attr') - - for attr in attrs: - self.assertIn(attr, d) - - def test_as_str(self): - # Bug reported on Pygame mailing list July 24, 2011: - # For Python 3.x str(event) to raises an UnicodeEncodeError when - # an event attribute is a string with a non-ascii character. - try: - str(pygame.event.Event(events[0], a=as_unicode(r"\xed"))) - except UnicodeEncodeError: - self.fail("Event object raised exception for non-ascii character") - # Passed. - - -race_condition_notification = """ -This test is dependent on timing. The event queue is cleared in preparation for -tests. There is a small window where outside events from the OS may have effected -results. Try running the test again. -""" - -class EventModuleArgsTest(unittest.TestCase): - def setUp(self): - pygame.display.init() - pygame.event.clear() - - def tearDown(self): - pygame.display.quit() - - def test_get(self): - pygame.event.get() - pygame.event.get(None) - pygame.event.get(None, True) - - pygame.event.get(pump=False) - pygame.event.get(pump=True) - pygame.event.get(eventtype=None) - pygame.event.get(eventtype=pygame.USEREVENT, - pump=False) - - def test_clear(self): - pygame.event.clear() - pygame.event.clear(None) - pygame.event.clear(None, True) - - pygame.event.clear(pump=False) - pygame.event.clear(pump=True) - pygame.event.clear(eventtype=None) - pygame.event.clear(eventtype=pygame.USEREVENT, - pump=False) - - def test_peek(self): - pygame.event.peek() - pygame.event.peek(None) - pygame.event.peek(None, True) - - pygame.event.peek(pump=False) - pygame.event.peek(pump=True) - pygame.event.peek(eventtype=None) - pygame.event.peek(eventtype=pygame.USEREVENT, - pump=False) - - -class EventModuleTest(unittest.TestCase): - def setUp(self): - pygame.display.init() - pygame.event.clear() # flush events - - def tearDown(self): - pygame.event.clear() # flush events - pygame.display.quit() - - def test_event_attribute(self): - e1 = pygame.event.Event(pygame.USEREVENT, attr1='attr1') - self.assertEqual(e1.attr1, 'attr1') - - def test_set_blocked(self): - """Ensure events can be blocked from the queue.""" - event = events[0] - pygame.event.set_blocked(event) - - self.assertTrue(pygame.event.get_blocked(event)) - - pygame.event.post(pygame.event.Event(event)) - ret = pygame.event.get() - should_be_blocked = [e for e in ret if e.type == event] - - self.assertEqual(should_be_blocked, []) - - def test_set_blocked_all(self): - """Ensure all events can be unblocked at once.""" - pygame.event.set_blocked(None) - - for e in events: - self.assertTrue(pygame.event.get_blocked(e)) - - def test_post__and_poll(self): - """Ensure events can be posted to the queue.""" - e1 = pygame.event.Event(pygame.USEREVENT, attr1='attr1') - pygame.event.post(e1) - posted_event = pygame.event.poll() - - self.assertEqual(e1.attr1, posted_event.attr1, - race_condition_notification) - - # fuzzing event types - for i in range(1, 11): - pygame.event.post(pygame.event.Event(events[i])) - - self.assertEqual(pygame.event.poll().type, events[i], - race_condition_notification) - - def test_post_large_user_event(self): - pygame.event.post(pygame.event.Event(pygame.USEREVENT, {'a': "a" * 1024})) - e = pygame.event.poll() - - self.assertEqual(e.type, pygame.USEREVENT) - self.assertEqual(e.a, "a" * 1024) - - def test_get(self): - """Ensure get() retrieves all the events on the queue.""" - event_cnt = 10 - for _ in range(event_cnt): - pygame.event.post(pygame.event.Event(pygame.USEREVENT)) - - queue = pygame.event.get() - - self.assertEqual(len(queue), event_cnt) - self.assertTrue(all(e.type == pygame.USEREVENT for e in queue)) - - def test_get_type(self): - ev = pygame.event.Event(pygame.USEREVENT) - pygame.event.post(ev) - queue = pygame.event.get(pygame.USEREVENT) - self.assertEqual(len(queue), 1) - self.assertEqual(queue[0].type, pygame.USEREVENT) - - def test_clear(self): - """Ensure clear() removes all the events on the queue.""" - for e in events: - pygame.event.post(pygame.event.Event(e)) - - poll_event = pygame.event.poll() - - self.assertNotEqual(poll_event.type, pygame.NOEVENT) - - pygame.event.clear() - poll_event = pygame.event.poll() - - self.assertEqual(poll_event.type, pygame.NOEVENT, - race_condition_notification) - - def test_event_name(self): - """Ensure event_name() returns the correct event name.""" - self.assertEqual(pygame.event.event_name(pygame.KEYDOWN), "KeyDown") - self.assertEqual(pygame.event.event_name(pygame.USEREVENT), - "UserEvent") - - def test_wait(self): - """Ensure wait() waits for an event on the queue.""" - event = pygame.event.Event(events[0]) - pygame.event.post(event) - wait_event = pygame.event.wait() - - self.assertEqual(wait_event.type, event.type) - - def test_peek(self): - """Ensure queued events can be peeked at.""" - event_types = [pygame.KEYDOWN, pygame.KEYUP, pygame.MOUSEMOTION] - - for event_type in event_types: - pygame.event.post(pygame.event.Event(event_type)) - - for event_type in event_types: - self.assertTrue(pygame.event.peek(event_type)) - - self.assertTrue(pygame.event.peek(event_types)) - - def test_peek_empty(self): - pygame.event.clear() - self.assertFalse(pygame.event.peek()) - - def test_set_allowed(self): - """Ensure a blocked event type can be unblocked/allowed.""" - event = events[0] - pygame.event.set_blocked(event) - - self.assertTrue(pygame.event.get_blocked(event)) - - pygame.event.set_allowed(event) - - self.assertFalse(pygame.event.get_blocked(event)) - - def test_set_allowed_all(self): - """Ensure all events can be unblocked/allowed at once.""" - pygame.event.set_blocked(None) - - for e in events: - self.assertTrue(pygame.event.get_blocked(e)) - - pygame.event.set_allowed(None) - - for e in events: - self.assertFalse(pygame.event.get_blocked(e)) - - def test_pump(self): - """Ensure pump() functions properly.""" - pygame.event.pump() - - @unittest.skipIf(os.environ.get('SDL_VIDEODRIVER') == 'dummy', - 'requires the SDL_VIDEODRIVER to be a non "dummy" value') - def test_set_grab__and_get_symmetric(self): - """Ensure event grabbing can be enabled and disabled.""" - surf = pygame.display.set_mode((10,10)) - pygame.event.set_grab(True) - - self.assertTrue(pygame.event.get_grab()) - - pygame.event.set_grab(False) - - self.assertFalse(pygame.event.get_grab()) - - def test_event_equality(self): - a = pygame.event.Event(events[0], a=1) - b = pygame.event.Event(events[0], a=1) - c = pygame.event.Event(events[1], a=1) - d = pygame.event.Event(events[0], a=2) - - self.assertTrue(a == a) - self.assertFalse(a != a) - self.assertTrue(a == b) - self.assertFalse(a != b) - self.assertTrue(a != c) - self.assertFalse(a == c) - self.assertTrue(a != d) - self.assertFalse(a == d) - - def todo_test_get_blocked(self): - - # __doc__ (as of 2008-08-02) for pygame.event.get_blocked: - - # pygame.event.get_blocked(type): return bool - # test if a type of event is blocked from the queue - # - # Returns true if the given event type is blocked from the queue. - - self.fail() - - def todo_test_get_grab(self): - - # __doc__ (as of 2008-08-02) for pygame.event.get_grab: - - # pygame.event.get_grab(): return bool - # test if the program is sharing input devices - # - # Returns true when the input events are grabbed for this application. - # Use pygame.event.set_grab() to control this state. - # - - self.fail() - - def todo_test_poll(self): - - # __doc__ (as of 2008-08-02) for pygame.event.poll: - - # pygame.event.poll(): return Event - # get a single event from the queue - # - # Returns a single event from the queue. If the event queue is empty - # an event of type pygame.NOEVENT will be returned immediately. The - # returned event is removed from the queue. - # - - self.fail() - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fastevent_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/fastevent_tags.py deleted file mode 100644 index c660bef..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/fastevent_tags.py +++ /dev/null @@ -1 +0,0 @@ -__tags__ = [] diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fastevent_test.py b/venv/lib/python3.7/site-packages/pygame/tests/fastevent_test.py deleted file mode 100644 index 34723a5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/fastevent_test.py +++ /dev/null @@ -1,150 +0,0 @@ -import unittest -from pygame.tests.event_test import race_condition_notification -import pygame -from pygame import event, fastevent -from pygame.compat import geterror - -################################################################################ - -class FasteventModuleTest(unittest.TestCase): - - def setUp(self): - pygame.display.init() - fastevent.init() - event.clear() - - def tearDown(self): - # fastevent.quit() # Does not exist! - pygame.display.quit() - - def test_init(self): - # Test if module initialized after multiple init() calls. - fastevent.init() - fastevent.init() - - self.assertTrue(fastevent.get_init()) - - def test_auto_quit(self): - # Test if module uninitialized after calling pygame.quit(). - pygame.quit() - - self.assertFalse(fastevent.get_init()) - - def test_get_init(self): - # Test if get_init() gets the init state. - self.assertTrue(fastevent.get_init()) - - def test_get(self): - # __doc__ (as of 2008-08-02) for pygame.fastevent.get: - - # pygame.fastevent.get() -> list of Events - # get all events from the queue - - for _ in range(1, 11): - event.post(event.Event(pygame.USEREVENT)) - - self.assertListEqual([e.type for e in fastevent.get()], - [pygame.USEREVENT] * 10, - race_condition_notification) - - def test_poll(self): - - # __doc__ (as of 2008-08-02) for pygame.fastevent.poll: - - # pygame.fastevent.poll() -> Event - # get an available event - # - # Returns next event on queue. If there is no event waiting on the - # queue, this will return an event with type NOEVENT. - - self.assertEqual(fastevent.poll().type, pygame.NOEVENT, - race_condition_notification) - - def test_post(self): - - # __doc__ (as of 2008-08-02) for pygame.fastevent.post: - - # pygame.fastevent.post(Event) -> None - # place an event on the queue - # - # This will post your own event objects onto the event queue. - # You can past any event type you want, but some care must be - # taken. For example, if you post a MOUSEBUTTONDOWN event to the - # queue, it is likely any code receiving the event will expect - # the standard MOUSEBUTTONDOWN attributes to be available, like - # 'pos' and 'button'. - # - # Because pygame.fastevent.post() may have to wait for the queue - # to empty, you can get into a dead lock if you try to append an - # event on to a full queue from the thread that processes events. - # For that reason I do not recommend using this function in the - # main thread of an SDL program. - - for _ in range(1, 11): - fastevent.post(event.Event(pygame.USEREVENT)) - - self.assertListEqual([e.type for e in event.get()], - [pygame.USEREVENT] * 10, - race_condition_notification) - - try: - # Special case for post: METH_O. - fastevent.post(1) - except TypeError: - e = geterror() - msg = ("argument 1 must be %s, not %s" % - (fastevent.Event.__name__, type(1).__name__)) - self.assertEqual(str(e), msg) - else: - self.fail() - - def test_post__clear(self): - """Ensure posted events can be cleared.""" - for _ in range(10): - fastevent.post(event.Event(pygame.USEREVENT)) - - event.clear() - - self.assertListEqual(fastevent.get(), []) - self.assertListEqual(event.get(), []) - - def todo_test_pump(self): - - # __doc__ (as of 2008-08-02) for pygame.fastevent.pump: - - # pygame.fastevent.pump() -> None - # update the internal messages - # - # For each frame of your game, you will need to make some sort - # of call to the event queue. This ensures your program can internally - # interact with the rest of the operating system. If you are not using - # other event functions in your game, you should call pump() to allow - # pygame to handle internal actions. - # - # There are important things that must be dealt with internally in the - # event queue. The main window may need to be repainted. Certain joysticks - # must be polled for their values. If you fail to make a call to the event - # queue for too long, the system may decide your program has locked up. - - self.fail() - - def test_wait(self): - - # __doc__ (as of 2008-08-02) for pygame.fastevent.wait: - - # pygame.fastevent.wait() -> Event - # wait for an event - # - # Returns the current event on the queue. If there are no messages - # waiting on the queue, this will not return until one is - # available. Sometimes it is important to use this wait to get - # events from the queue, it will allow your application to idle - # when the user isn't doing anything with it. - - event.post(pygame.event.Event(1)) - self.assertEqual(fastevent.wait().type, 1, race_condition_notification) - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/A_PyGameMono-8.png b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/A_PyGameMono-8.png deleted file mode 100644 index b15961f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/A_PyGameMono-8.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-18-100dpi.bdf b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-18-100dpi.bdf deleted file mode 100644 index a88f083..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-18-100dpi.bdf +++ /dev/null @@ -1,165 +0,0 @@ -STARTFONT 2.1 -FONT -FontForge-PyGameMono-Medium-R-Normal--25-180-100-100-M-250-ISO10646-1 -SIZE 18 100 100 -FONTBOUNDINGBOX 21 22 0 0 -COMMENT "Generated by fontforge, http://fontforge.sourceforge.net" -COMMENT "Created by Lenard Lindstrom,,, with FontForge 2.0 (http://fontforge.sf.net)" -STARTPROPERTIES 29 -FOUNDRY "FontForge" -FAMILY_NAME "PyGameMono" -WEIGHT_NAME "Medium" -SLANT "R" -SETWIDTH_NAME "Normal" -ADD_STYLE_NAME "" -PIXEL_SIZE 25 -POINT_SIZE 180 -RESOLUTION_X 100 -RESOLUTION_Y 100 -SPACING "M" -AVERAGE_WIDTH 250 -CHARSET_REGISTRY "ISO10646" -CHARSET_ENCODING "1" -FONTNAME_REGISTRY "" -CHARSET_COLLECTIONS "ISO10646-1" -FONT_NAME "PyGameMono" -FACE_NAME "PyGame Mono" -FONT_VERSION "001.000" -FONT_ASCENT 20 -FONT_DESCENT 5 -UNDERLINE_POSITION -2 -UNDERLINE_THICKNESS 2 -RAW_ASCENT 800 -RAW_DESCENT 200 -RELATIVE_WEIGHT 50 -RELATIVE_SETWIDTH 50 -FIGURE_WIDTH -1 -AVG_UPPERCASE_WIDTH 250 -ENDPROPERTIES -CHARS 5 -STARTCHAR .notdef -ENCODING 0 -SWIDTH 1000 0 -DWIDTH 25 0 -BBX 20 20 0 0 -BITMAP -FFFFF0 -FFFFF0 -FE07F0 -F801F0 -F000F0 -E00070 -E00070 -C00030 -C00030 -C00030 -C00030 -C00030 -C00030 -E00070 -E00070 -F000F0 -F801F0 -FE07F0 -FFFFF0 -FFFFF0 -ENDCHAR -STARTCHAR A -ENCODING 65 -SWIDTH 1000 0 -DWIDTH 25 0 -BBX 20 21 0 1 -BITMAP -03FC00 -1FFF80 -3FFFC0 -7C03E0 -F000F0 -E00070 -E00070 -F000F0 -FC03F0 -FFFFF0 -FFFFF0 -FFFFF0 -FF0FF0 -7C03F0 -7801E0 -7800E0 -7000E0 -700060 -600060 -200040 -200040 -ENDCHAR -STARTCHAR B -ENCODING 66 -SWIDTH 1000 0 -DWIDTH 25 0 -BBX 18 20 1 0 -BITMAP -FFFE00 -FFFF80 -7E0780 -7801C0 -7000C0 -3000C0 -3000C0 -3801C0 -3E0780 -3FFF00 -3FFF00 -3E0780 -380180 -3000C0 -3000C0 -3000C0 -7801C0 -7E07C0 -FFFF80 -FFFE00 -ENDCHAR -STARTCHAR C -ENCODING 67 -SWIDTH 1000 0 -DWIDTH 25 0 -BBX 20 20 0 0 -BITMAP -00FC00 -03FF00 -0FFF80 -1F03E0 -3E0070 -7C0010 -780000 -F80000 -F00000 -F00000 -F00000 -F00000 -F80000 -780000 -7C0010 -3E0070 -1F01E0 -0FFFC0 -03FF80 -00FE00 -ENDCHAR -STARTCHAR u13079 -ENCODING 77945 -SWIDTH 1000 0 -DWIDTH 25 0 -BBX 21 10 0 5 -BITMAP -03FC00 -0FFF80 -1E73C0 -78F8F0 -F0F878 -70F870 -3870E0 -1E03C0 -0FFF80 -03FC00 -ENDCHAR -ENDFONT diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-18-75dpi.bdf b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-18-75dpi.bdf deleted file mode 100644 index 127f704..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-18-75dpi.bdf +++ /dev/null @@ -1,143 +0,0 @@ -STARTFONT 2.1 -FONT -FontForge-PyGameMono-Medium-R-Normal--19-180-75-75-M-190-ISO10646-1 -SIZE 18 75 75 -FONTBOUNDINGBOX 15 17 0 0 -COMMENT "Generated by fontforge, http://fontforge.sourceforge.net" -COMMENT "Created by Lenard Lindstrom,,, with FontForge 2.0 (http://fontforge.sf.net)" -STARTPROPERTIES 29 -FOUNDRY "FontForge" -FAMILY_NAME "PyGameMono" -WEIGHT_NAME "Medium" -SLANT "R" -SETWIDTH_NAME "Normal" -ADD_STYLE_NAME "" -PIXEL_SIZE 19 -POINT_SIZE 180 -RESOLUTION_X 75 -RESOLUTION_Y 75 -SPACING "M" -AVERAGE_WIDTH 190 -CHARSET_REGISTRY "ISO10646" -CHARSET_ENCODING "1" -FONTNAME_REGISTRY "" -CHARSET_COLLECTIONS "ISO10646-1" -FONT_NAME "PyGameMono" -FACE_NAME "PyGame Mono" -FONT_VERSION "001.000" -FONT_ASCENT 15 -FONT_DESCENT 4 -UNDERLINE_POSITION -2 -UNDERLINE_THICKNESS 1 -RAW_ASCENT 800 -RAW_DESCENT 200 -RELATIVE_WEIGHT 50 -RELATIVE_SETWIDTH 50 -FIGURE_WIDTH -1 -AVG_UPPERCASE_WIDTH 190 -ENDPROPERTIES -CHARS 5 -STARTCHAR .notdef -ENCODING 0 -SWIDTH 1000 0 -DWIDTH 19 0 -BBX 15 15 0 0 -BITMAP -FFFE -FFFE -FC7E -F01E -E00E -C006 -C006 -C006 -C006 -C006 -E00E -F01E -FC7E -FFFE -FFFE -ENDCHAR -STARTCHAR A -ENCODING 65 -SWIDTH 1000 0 -DWIDTH 19 0 -BBX 15 17 0 0 -BITMAP -0FE0 -3FF8 -783C -F01E -E00E -E00E -F01E -F83E -FFFE -FFFE -FC7E -701C -701C -600C -600C -4004 -4004 -ENDCHAR -STARTCHAR B -ENCODING 66 -SWIDTH 1000 0 -DWIDTH 19 0 -BBX 15 15 0 0 -BITMAP -FFF8 -7FFC -780E -3006 -3006 -380E -3FF8 -3FF8 -3FF8 -380E -3006 -3006 -7C1E -7FFC -FFF8 -ENDCHAR -STARTCHAR C -ENCODING 67 -SWIDTH 1000 0 -DWIDTH 19 0 -BBX 15 15 0 0 -BITMAP -03E0 -0FF8 -3C1C -7806 -7000 -E000 -E000 -E000 -E000 -E000 -7000 -7806 -3C1C -0FF8 -03E0 -ENDCHAR -STARTCHAR u13079 -ENCODING 77945 -SWIDTH 1000 0 -DWIDTH 19 0 -BBX 15 7 0 4 -BITMAP -0FE0 -3838 -638C -E38E -638C -3838 -0FE0 -ENDCHAR -ENDFONT diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-8.bdf b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-8.bdf deleted file mode 100644 index 17bef06..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono-8.bdf +++ /dev/null @@ -1,103 +0,0 @@ -STARTFONT 2.1 -FONT -FontForge-PyGameMono-Medium-R-Normal--8-80-75-75-C-80-ISO10646-1 -SIZE 8 75 75 -FONTBOUNDINGBOX 6 7 0 0 -COMMENT "Generated by fontforge, http://fontforge.sourceforge.net" -COMMENT "Created by Lenard Lindstrom,,, with FontForge 2.0 (http://fontforge.sf.net)" -STARTPROPERTIES 29 -FOUNDRY "FontForge" -FAMILY_NAME "PyGameMono" -WEIGHT_NAME "Medium" -SLANT "R" -SETWIDTH_NAME "Normal" -ADD_STYLE_NAME "" -PIXEL_SIZE 8 -POINT_SIZE 80 -RESOLUTION_X 75 -RESOLUTION_Y 75 -SPACING "C" -AVERAGE_WIDTH 80 -CHARSET_REGISTRY "ISO10646" -CHARSET_ENCODING "1" -FONTNAME_REGISTRY "" -CHARSET_COLLECTIONS "ISO10646-1" -FONT_NAME "PyGameMono" -FACE_NAME "PyGame Mono" -FONT_VERSION "001.000" -FONT_ASCENT 6 -FONT_DESCENT 2 -UNDERLINE_POSITION -1 -UNDERLINE_THICKNESS 1 -RAW_ASCENT 800 -RAW_DESCENT 200 -RELATIVE_WEIGHT 50 -RELATIVE_SETWIDTH 50 -FIGURE_WIDTH -1 -AVG_UPPERCASE_WIDTH 80 -ENDPROPERTIES -CHARS 5 -STARTCHAR .notdef -ENCODING 0 -SWIDTH 1000 0 -DWIDTH 8 0 -BBX 6 6 0 0 -BITMAP -FC -84 -84 -84 -84 -FC -ENDCHAR -STARTCHAR A -ENCODING 65 -SWIDTH 1000 0 -DWIDTH 8 0 -BBX 6 7 0 0 -BITMAP -78 -84 -84 -FC -84 -84 -84 -ENDCHAR -STARTCHAR B -ENCODING 66 -SWIDTH 1000 0 -DWIDTH 8 0 -BBX 6 6 0 0 -BITMAP -FC -44 -78 -4C -44 -FC -ENDCHAR -STARTCHAR C -ENCODING 67 -SWIDTH 1000 0 -DWIDTH 8 0 -BBX 6 6 0 0 -BITMAP -78 -C4 -C0 -C0 -C4 -78 -ENDCHAR -STARTCHAR u13079 -ENCODING 77945 -SWIDTH 1000 0 -DWIDTH 8 0 -BBX 6 4 0 1 -BITMAP -78 -B4 -B4 -78 -ENDCHAR -ENDFONT diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono.otf b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono.otf deleted file mode 100644 index 5e9b66c..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/PyGameMono.otf and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/test_fixed.otf b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/test_fixed.otf deleted file mode 100644 index 3488898..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/test_fixed.otf and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/test_sans.ttf b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/test_sans.ttf deleted file mode 100644 index 09fac2f..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/test_sans.ttf and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/u13079_PyGameMono-8.png b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/u13079_PyGameMono-8.png deleted file mode 100644 index 911da8a..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/fonts/u13079_PyGameMono-8.png and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/xbm_cursors/white_sizing.xbm b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/xbm_cursors/white_sizing.xbm deleted file mode 100644 index d334d8d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/xbm_cursors/white_sizing.xbm +++ /dev/null @@ -1,8 +0,0 @@ -#define resize_white_width 16 -#define resize_white_height 16 -#define resize_white_x_hot 7 -#define resize_white_y_hot 7 -static unsigned char resize_white_bits[] = { - 0xff, 0x03, 0x01, 0x02, 0xfd, 0x03, 0x05, 0x00, 0xf5, 0x0f, 0x15, 0x08, - 0xd5, 0xeb, 0x55, 0xaa, 0x55, 0xaa, 0xd7, 0xab, 0x10, 0xa8, 0xf0, 0xb7, - 0x00, 0xa8, 0xc0, 0x9f, 0x40, 0x80, 0xc0, 0xff}; diff --git a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/xbm_cursors/white_sizing_mask.xbm b/venv/lib/python3.7/site-packages/pygame/tests/fixtures/xbm_cursors/white_sizing_mask.xbm deleted file mode 100644 index f00bc46..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/fixtures/xbm_cursors/white_sizing_mask.xbm +++ /dev/null @@ -1,8 +0,0 @@ -#define resize_white_mask_width 16 -#define resize_white_mask_height 16 -#define resize_white_mask_x_hot 7 -#define resize_white_mask_y_hot 7 -static unsigned char resize_white_mask_bits[] = { - 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x07, 0x00, 0xf7, 0x0f, 0xf7, 0x0f, - 0xf7, 0xef, 0x77, 0xee, 0x77, 0xee, 0xf7, 0xef, 0xf0, 0xef, 0xf0, 0xff, - 0x00, 0xf8, 0xc0, 0xff, 0xc0, 0xff, 0xc0, 0xff}; diff --git a/venv/lib/python3.7/site-packages/pygame/tests/font_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/font_tags.py deleted file mode 100644 index c660bef..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/font_tags.py +++ /dev/null @@ -1 +0,0 @@ -__tags__ = [] diff --git a/venv/lib/python3.7/site-packages/pygame/tests/font_test.py b/venv/lib/python3.7/site-packages/pygame/tests/font_test.py deleted file mode 100644 index 9f59807..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/font_test.py +++ /dev/null @@ -1,546 +0,0 @@ -# -*- coding: utf8 -*- - -import sys -import os -import unittest -import platform - -import pygame -from pygame import font as pygame_font # So font can be replaced with ftfont -from pygame.compat import as_unicode, unicode_, as_bytes, xrange_, filesystem_errors -from pygame.compat import PY_MAJOR_VERSION - -FONTDIR = os.path.join(os.path.dirname (os.path.abspath (__file__)), - 'fixtures', 'fonts') - -UCS_4 = sys.maxunicode > 0xFFFF - -def equal_images(s1, s2): - size = s1.get_size() - if s2.get_size() != size: - return False - w, h = size - for x in xrange_(w): - for y in xrange_(h): - if s1.get_at((x, y)) != s2.get_at((x, y)): - return False - return True - - -IS_PYPY = 'PyPy' == platform.python_implementation() - - -@unittest.skipIf(IS_PYPY, 'pypy skip known failure') # TODO -class FontModuleTest( unittest.TestCase ): - - def setUp(self): - pygame_font.init() - - def tearDown(self): - pygame_font.quit() - - def test_SysFont(self): - # Can only check that a font object is returned. - fonts = pygame_font.get_fonts() - if 'arial' in fonts: - # Try to use arial font if it is there, rather than a random font - # which can be different depending on installed fonts on the system. - font_name = 'arial' - else: - font_name = sorted(fonts)[0] - o = pygame_font.SysFont(font_name, 20) - self.assertTrue(isinstance(o, pygame_font.FontType)) - o = pygame_font.SysFont(font_name, 20, italic=True) - self.assertTrue(isinstance(o, pygame_font.FontType)) - o = pygame_font.SysFont(font_name, 20, bold=True) - self.assertTrue(isinstance(o, pygame_font.FontType)) - o = pygame_font.SysFont('thisisnotafont', 20) - self.assertTrue(isinstance(o, pygame_font.FontType)) - - def test_get_default_font(self): - self.assertEqual(pygame_font.get_default_font(), 'freesansbold.ttf') - - def test_get_fonts_returns_something(self): - fnts = pygame_font.get_fonts() - self.assertTrue(fnts) - - # to test if some files exist... - #def XXtest_has_file_osx_10_5_sdk(self): - # import os - # f = "/Developer/SDKs/MacOSX10.5.sdk/usr/X11/include/ft2build.h" - # self.assertEqual(os.path.exists(f), True) - - #def XXtest_has_file_osx_10_4_sdk(self): - # import os - # f = "/Developer/SDKs/MacOSX10.4u.sdk/usr/X11R6/include/ft2build.h" - # self.assertEqual(os.path.exists(f), True) - - def test_get_fonts(self): - fnts = pygame_font.get_fonts() - - self.assertTrue(fnts, msg=repr(fnts)) - - if (PY_MAJOR_VERSION >= 3): - # For Python 3.x, names will always be unicode strings. - name_types = (str,) - else: - # For Python 2.x, names may be either unicode or ascii strings. - name_types = (str, unicode) - - for name in fnts: - # note, on ubuntu 2.6 they are all unicode strings. - - self.assertTrue(isinstance(name, name_types), name) - # Font names can be comprised of only numeric characters, so - # just checking name.islower() will not work as expected here. - self.assertFalse(any(c.isupper() for c in name)) - self.assertTrue(name.isalnum(), name) - - def test_get_init(self): - self.assertTrue(pygame_font.get_init()) - pygame_font.quit() - self.assertFalse(pygame_font.get_init()) - - def test_init(self): - pygame_font.init() - - def test_match_font_all_exist(self): - fonts = pygame_font.get_fonts() - - # Ensure all listed fonts are in fact available, and the returned file - # name is a full path. - for font in fonts: - path = pygame_font.match_font(font) - self.assertFalse(path is None) - self.assertTrue(os.path.isabs(path)) - - def test_match_font_bold(self): - fonts = pygame_font.get_fonts() - - # Look for a bold font. - self.assertTrue(any(pygame_font.match_font(font, bold=True) - for font in fonts)) - - - def test_match_font_italic(self): - fonts = pygame_font.get_fonts() - - # Look for an italic font. - self.assertTrue(any(pygame_font.match_font(font, italic=True) - for font in fonts)) - - def test_match_font_comma_separated(self): - fonts = pygame_font.get_fonts() - - # Check for not found. - self.assertTrue(pygame_font.match_font('thisisnotafont') is None) - - # Check comma separated list. - names = ','.join(['thisisnotafont', fonts[-1], 'anothernonfont']) - self.assertFalse(pygame_font.match_font(names) is None) - names = ','.join(['thisisnotafont1', 'thisisnotafont2', 'thisisnotafont3']) - self.assertTrue(pygame_font.match_font(names) is None) - - def test_quit(self): - pygame_font.quit() - - -@unittest.skipIf(IS_PYPY, 'pypy skip known failure') # TODO -class FontTest(unittest.TestCase): - - def setUp(self): - pygame_font.init() - - def tearDown(self): - pygame_font.quit() - - def test_render_args(self): - screen = pygame.display.set_mode((600, 400)) - rect = screen.get_rect() - f = pygame_font.Font(None, 20) - screen.fill((10, 10, 10)) - font_surface = f.render(" bar", True, (0, 0, 0), (255, 255, 255)) - font_rect = font_surface.get_rect() - font_rect.topleft = rect.topleft - self.assertTrue(font_surface) - screen.blit(font_surface, font_rect, font_rect) - pygame.display.update() - self.assertEqual(tuple(screen.get_at((0,0)))[:3], (255, 255, 255)) - self.assertEqual(tuple(screen.get_at(font_rect.topleft))[:3], (255, 255, 255)) - - # If we don't have a real display, don't do this test. - # Transparent background doesn't seem to work without a read video card. - if os.environ.get('SDL_VIDEODRIVER') != 'dummy': - screen.fill((10, 10, 10)) - font_surface = f.render(" bar", True, (0, 0, 0), None) - font_rect = font_surface.get_rect() - font_rect.topleft = rect.topleft - self.assertTrue(font_surface) - screen.blit(font_surface, font_rect, font_rect) - pygame.display.update() - self.assertEqual(tuple(screen.get_at((0,0)))[:3], (10, 10, 10)) - self.assertEqual(tuple(screen.get_at(font_rect.topleft))[:3], (10, 10, 10)) - - screen.fill((10, 10, 10)) - font_surface = f.render(" bar", True, (0, 0, 0)) - font_rect = font_surface.get_rect() - font_rect.topleft = rect.topleft - self.assertTrue(font_surface) - screen.blit(font_surface, font_rect, font_rect) - pygame.display.update(rect) - self.assertEqual(tuple(screen.get_at((0,0)))[:3], (10, 10, 10)) - self.assertEqual(tuple(screen.get_at(font_rect.topleft))[:3], (10, 10, 10)) - - - -@unittest.skipIf(IS_PYPY, 'pypy skip known failure') # TODO -class FontTypeTest( unittest.TestCase ): - - def setUp(self): - pygame_font.init() - - def tearDown(self): - pygame_font.quit() - - def test_get_ascent(self): - # Ckecking ascent would need a custom test font to do properly. - f = pygame_font.Font(None, 20) - ascent = f.get_ascent() - self.assertTrue(isinstance(ascent, int)) - self.assertTrue(ascent > 0) - s = f.render("X", False, (255, 255, 255)) - self.assertTrue(s.get_size()[1] > ascent) - - def test_get_descent(self): - # Ckecking descent would need a custom test font to do properly. - f = pygame_font.Font(None, 20) - descent = f.get_descent() - self.assertTrue(isinstance(descent, int)) - self.assertTrue(descent < 0) - - def test_get_height(self): - # Ckecking height would need a custom test font to do properly. - f = pygame_font.Font(None, 20) - height = f.get_height() - self.assertTrue(isinstance(height, int)) - self.assertTrue(height > 0) - s = f.render("X", False, (255, 255, 255)) - self.assertTrue(s.get_size()[1] == height) - - def test_get_linesize(self): - # Ckecking linesize would need a custom test font to do properly. - # Questions: How do linesize, height and descent relate? - f = pygame_font.Font(None, 20) - linesize = f.get_linesize() - self.assertTrue(isinstance(linesize, int)) - self.assertTrue(linesize > 0) - - def test_metrics(self): - # Ensure bytes decoding works correctly. Can only compare results - # with unicode for now. - f = pygame_font.Font(None, 20) - um = f.metrics(as_unicode(".")) - bm = f.metrics(as_bytes(".")) - - self.assertEqual(len(um), 1) - self.assertEqual(len(bm), 1) - self.assertIsNotNone(um[0]) - self.assertEqual(um, bm) - - u = u"\u212A" - b = u.encode("UTF-16")[2:] # Keep byte order consistent. [2:] skips BOM - bm = f.metrics(b) - - self.assertEqual(len(bm), 2) - - try: # FIXME why do we do this try/except ? - um = f.metrics(u) - except pygame.error: - pass - else: - self.assertEqual(len(um), 1) - self.assertNotEqual(bm[0], um[0]) - self.assertNotEqual(bm[1], um[0]) - - if UCS_4: - u = u"\U00013000" - bm = f.metrics(u) - - self.assertEqual(len(bm), 1) - self.assertIsNone(bm[0]) - - return # unfinished - # The documentation is useless here. How large a list? - # How do list positions relate to character codes? - # What about unicode characters? - - # __doc__ (as of 2008-08-02) for pygame_font.Font.metrics: - - # Font.metrics(text): return list - # Gets the metrics for each character in the pased string. - # - # The list contains tuples for each character, which contain the - # minimum X offset, the maximum X offset, the minimum Y offset, the - # maximum Y offset and the advance offset (bearing plus width) of the - # character. [(minx, maxx, miny, maxy, advance), (minx, maxx, miny, - # maxy, advance), ...] - - self.fail() - - def test_render(self): - f = pygame_font.Font(None, 20) - s = f.render("foo", True, [0, 0, 0], [255, 255, 255]) - s = f.render("xxx", True, [0, 0, 0], [255, 255, 255]) - s = f.render("", True, [0, 0, 0], [255, 255, 255]) - s = f.render("foo", False, [0, 0, 0], [255, 255, 255]) - s = f.render("xxx", False, [0, 0, 0], [255, 255, 255]) - s = f.render("xxx", False, [0, 0, 0]) - s = f.render(" ", False, [0, 0, 0]) - s = f.render(" ", False, [0, 0, 0], [255, 255, 255]) - # null text should be 1 pixel wide. - s = f.render("", False, [0, 0, 0], [255, 255, 255]) - self.assertEqual(s.get_size()[0], 1) - # None text should be 1 pixel wide. - s = f.render(None, False, [0, 0, 0], [255, 255, 255]) - self.assertEqual(s.get_size()[0], 1) - # Non-text should raise a TypeError. - self.assertRaises(TypeError, f.render, - [], False, [0, 0, 0], [255, 255, 255]) - self.assertRaises(TypeError, f.render, - 1, False, [0, 0, 0], [255, 255, 255]) - # is background transparent for antialiasing? - s = f.render(".", True, [255, 255, 255]) - self.assertEqual(s.get_at((0, 0))[3], 0) - # is Unicode and bytes encoding correct? - # Cannot really test if the correct characters are rendered, but - # at least can assert the encodings differ. - su = f.render(as_unicode("."), False, [0, 0, 0], [255, 255, 255]) - sb = f.render(as_bytes("."), False, [0, 0, 0], [255, 255, 255]) - self.assertTrue(equal_images(su, sb)) - u = as_unicode(r"\u212A") - b = u.encode("UTF-16")[2:] # Keep byte order consistent. [2:] skips BOM - sb = f.render(b, False, [0, 0, 0], [255, 255, 255]) - try: # FIXME why do we do this try/except ? - su = f.render(u, False, [0, 0, 0], [255, 255, 255]) - except pygame.error: - pass - else: - self.assertFalse(equal_images(su, sb)) - - # If the font module is SDL_ttf based, then it can only supports UCS-2; - # it will raise an exception for an out-of-range UCS-4 code point. - if UCS_4 and not hasattr(f, 'ucs4'): - ucs_2 = as_unicode(r"\uFFEE") - s = f.render(ucs_2, False, [0, 0, 0], [255, 255, 255]) - ucs_4 = as_unicode(r"\U00010000") - self.assertRaises(UnicodeError, f.render, - ucs_4, False, [0, 0, 0], [255, 255, 255]) - - b = as_bytes("ab\x00cd") - self.assertRaises(ValueError, f.render, b, 0, [0, 0, 0]) - u = as_unicode("ab\x00cd") - self.assertRaises(ValueError, f.render, b, 0, [0, 0, 0]) - - def test_set_bold(self): - f = pygame_font.Font(None, 20) - self.assertFalse(f.get_bold()) - f.set_bold(True) - self.assertTrue(f.get_bold()) - f.set_bold(False) - self.assertFalse(f.get_bold()) - - def test_set_italic(self): - f = pygame_font.Font(None, 20) - self.assertFalse(f.get_italic()) - f.set_italic(True) - self.assertTrue(f.get_italic()) - f.set_italic(False) - self.assertFalse(f.get_italic()) - - def test_set_underline(self): - f = pygame_font.Font(None, 20) - self.assertFalse(f.get_underline()) - f.set_underline(True) - self.assertTrue(f.get_underline()) - f.set_underline(False) - self.assertFalse(f.get_underline()) - - def test_size(self): - f = pygame_font.Font(None, 20) - text = as_unicode("Xg") - size = f.size(text) - w, h = size - s = f.render(text, False, (255, 255, 255)) - btext = text.encode("ascii") - - self.assertIsInstance(w, int) - self.assertIsInstance(h, int) - self.assertEqual(s.get_size(), size) - self.assertEqual(f.size(btext), size) - - text = as_unicode(r"\u212A") - btext = text.encode("UTF-16")[2:] # Keep the byte order consistent. - bsize = f.size(btext) - try: # FIXME why do we do this try/except ? - size = f.size(text) - except pygame.error: - pass - else: - self.assertNotEqual(size, bsize) - - def test_font_file_not_found(self): - # A per BUG reported by Bo Jangeborg on pygame-user mailing list, - # http://www.mail-archive.com/pygame-users@seul.org/msg11675.html - - pygame_font.init() - self.assertRaises(IOError, - pygame_font.Font, - unicode_('some-fictional-font.ttf'), 20) - - def test_load_from_file(self): - font_name = pygame_font.get_default_font() - font_path = os.path.join(os.path.split(pygame.__file__)[0], - pygame_font.get_default_font()) - f = pygame_font.Font(font_path, 20) - - def test_load_from_file_obj(self): - font_name = pygame_font.get_default_font() - font_path = os.path.join(os.path.split(pygame.__file__)[0], - pygame_font.get_default_font()) - with open(font_path, "rb") as f: - font = pygame_font.Font(f, 20) - - def test_load_default_font_filename(self): - # In font_init, a special case is when the filename argument is - # identical to the default font file name. - f = pygame_font.Font(pygame_font.get_default_font(), 20) - - def _load_unicode(self, path): - import shutil - fdir = unicode_(FONTDIR) - temp = os.path.join(fdir, path) - pgfont = os.path.join(fdir, u'test_sans.ttf') - shutil.copy(pgfont, temp) - try: - with open(temp, 'rb') as f: - pass - except IOError: - raise unittest.SkipTest('the path cannot be opened') - try: - pygame_font.Font(temp, 20) - finally: - os.remove(temp) - - def test_load_from_file_unicode_0(self): - """ASCII string as a unicode object""" - self._load_unicode(u'temp_file.ttf') - - def test_load_from_file_unicode_1(self): - self._load_unicode(u'你好.ttf') - - def test_load_from_file_bytes(self): - font_path = os.path.join(os.path.split(pygame.__file__)[0], - pygame_font.get_default_font()) - filesystem_encoding = sys.getfilesystemencoding() - try: # FIXME why do we do this try/except ? - font_path = font_path.decode(filesystem_encoding, - filesystem_errors) - except AttributeError: - pass - bfont_path = font_path.encode(filesystem_encoding, - filesystem_errors) - f = pygame_font.Font(bfont_path, 20) - - -@unittest.skipIf(IS_PYPY, 'pypy skip known failure') # TODO -class VisualTests( unittest.TestCase ): - - __tags__ = ['interactive'] - - screen = None - aborted = False - - def setUp(self): - if self.screen is None: - pygame.init() - self.screen = pygame.display.set_mode((600, 200)) - self.screen.fill((255, 255, 255)) - pygame.display.flip() - self.f = pygame_font.Font(None, 32) - - def abort(self): - if self.screen is not None: - pygame.quit() - self.aborted = True - - def query(self, - bold=False, italic=False, underline=False, antialiase=False): - if self.aborted: - return False - spacing = 10 - offset = 20 - y = spacing - f = self.f - screen = self.screen - screen.fill((255, 255, 255)) - pygame.display.flip() - if not (bold or italic or underline or antialiase): - text = "normal" - else: - modes = [] - if bold: - modes.append("bold") - if italic: - modes.append("italic") - if underline: - modes.append("underlined") - if antialiase: - modes.append("antialiased") - text = "%s (y/n):" % ('-'.join(modes),) - f.set_bold(bold) - f.set_italic(italic) - f.set_underline(underline) - s = f.render(text, antialiase, (0, 0, 0)) - screen.blit(s, (offset, y)) - y += s.get_size()[1] + spacing - f.set_bold(False) - f.set_italic(False) - f.set_underline(False) - s = f.render("(some comparison text)", False, (0, 0, 0)) - screen.blit(s, (offset, y)) - pygame.display.flip() - while 1: - for evt in pygame.event.get(): - if evt.type == pygame.KEYDOWN: - if evt.key == pygame.K_ESCAPE: - self.abort() - return False - if evt.key == pygame.K_y: - return True - if evt.key == pygame.K_n: - return False - if evt.type == pygame.QUIT: - self.abort() - return False - - def test_bold(self): - self.assertTrue(self.query(bold=True)) - - def test_italic(self): - self.assertTrue(self.query(italic=True)) - - def test_underline(self): - self.assertTrue(self.query(underline=True)) - - def test_antialiase(self): - self.assertTrue(self.query(antialiase=True)) - - def test_bold_antialiase(self): - self.assertTrue(self.query(bold=True, antialiase=True)) - - def test_italic_underline(self): - self.assertTrue(self.query(italic=True, underline=True)) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/freetype_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/freetype_tags.py deleted file mode 100644 index 5c56fc3..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/freetype_tags.py +++ /dev/null @@ -1,12 +0,0 @@ -__tags__ = ['development'] - -exclude = False - -try: - import pygame.freetype -except ImportError: - exclude = True - -if exclude: - __tags__.extend(['ignore', 'subprocess_ignore']) - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/freetype_test.py b/venv/lib/python3.7/site-packages/pygame/tests/freetype_test.py deleted file mode 100644 index 05603ef..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/freetype_test.py +++ /dev/null @@ -1,1554 +0,0 @@ -import os -if os.environ.get('SDL_VIDEODRIVER') == 'dummy': - __tags__ = ('ignore', 'subprocess_ignore') - -import unittest -import sys -import ctypes -import weakref -import gc -import platform - -IS_PYPY = 'PyPy' == platform.python_implementation() - - -try: - from pygame.tests.test_utils import arrinter -except NameError: - pass - -import pygame -try: - import pygame.freetype as ft -except ImportError: - ft = None -from pygame.compat import as_unicode, bytes_, unichr_, unicode_ - - -FONTDIR = os.path.join(os.path.dirname (os.path.abspath (__file__)), - 'fixtures', 'fonts') - -def nullfont(): - """return an uninitialized font instance""" - return ft.Font.__new__(ft.Font) - -max_point_size_FX6 = 0x7FFFFFFF -max_point_size = max_point_size_FX6 >> 6 -max_point_size_f = max_point_size_FX6 * 0.015625 - -def surf_same_image(a, b): - """Return True if a's pixel buffer is identical to b's""" - - a_sz = a.get_height() * a.get_pitch() - b_sz = b.get_height() * b.get_pitch() - if a_sz != b_sz: - return False - a_bytes = ctypes.string_at(a._pixels_address, a_sz) - b_bytes = ctypes.string_at(b._pixels_address, b_sz) - return a_bytes == b_bytes - - -class FreeTypeFontTest(unittest.TestCase): - - _fixed_path = os.path.join(FONTDIR, 'test_fixed.otf') - _sans_path = os.path.join(FONTDIR, 'test_sans.ttf') - _mono_path = os.path.join(FONTDIR, 'PyGameMono.otf') - _bmp_8_75dpi_path = os.path.join(FONTDIR, 'PyGameMono-8.bdf') - _bmp_18_75dpi_path = os.path.join(FONTDIR, 'PyGameMono-18-75dpi.bdf') - _bmp_18_100dpi_path = os.path.join(FONTDIR, 'PyGameMono-18-100dpi.bdf') - _TEST_FONTS = {} - - @classmethod - def setUpClass(cls): - ft.init() - - # Setup the test fonts. - - # Inconsolata is an open-source font designed by Raph Levien. - # Licensed under the Open Font License. - # http://www.levien.com/type/myfonts/inconsolata.html - cls._TEST_FONTS['fixed'] = ft.Font(cls._fixed_path) - - # Liberation Sans is an open-source font designed by Steve Matteson. - # Licensed under the GNU GPL. - # https://fedorahosted.org/liberation-fonts/ - cls._TEST_FONTS['sans'] = ft.Font(cls._sans_path) - - # A scalable mono test font made for pygame. It contains only - # a few glyphs: '\0', 'A', 'B', 'C', and U+13079. - # It also contains two bitmap sizes: 8.0 X 8.0 and 19.0 X 19.0. - cls._TEST_FONTS['mono'] = ft.Font(cls._mono_path) - - # A fixed size bitmap mono test font made for pygame. - # It contains only a few glyphs: '\0', 'A', 'B', 'C', and U+13079. - # The size is 8.0 X 8.0. - cls._TEST_FONTS['bmp-8-75dpi'] = ft.Font(cls._bmp_8_75dpi_path) - - # A fixed size bitmap mono test font made for pygame. - # It contains only a few glyphs: '\0', 'A', 'B', 'C', and U+13079. - # The size is 8.0 X 8.0. - cls._TEST_FONTS['bmp-18-75dpi'] = ft.Font(cls._bmp_18_75dpi_path) - - # A fixed size bitmap mono test font made for pygame. - # It contains only a few glyphs: '\0', 'A', 'B', 'C', and U+13079. - # The size is 8.0 X 8.0. - cls._TEST_FONTS['bmp-18-100dpi'] = ft.Font(cls._bmp_18_100dpi_path) - - @classmethod - def tearDownClass(cls): - ft.quit() - - def test_freetype_defaultfont(self): - font = ft.Font(None) - self.assertEqual(font.name, "FreeSans") - - def test_freetype_Font_init(self): - - self.assertRaises(IOError, ft.Font, os.path.join (FONTDIR, 'nonexistant.ttf')) - - f = self._TEST_FONTS['sans'] - self.assertIsInstance(f, ft.Font) - - f = self._TEST_FONTS['fixed'] - self.assertIsInstance(f, ft.Font) - - # Test keyword arguments - f = ft.Font(size=22, file=None) - self.assertEqual(f.size, 22) - f = ft.Font(font_index=0, file=None) - self.assertNotEqual(ft.get_default_resolution(), 100) - f = ft.Font(resolution=100, file=None) - self.assertEqual(f.resolution, 100) - f = ft.Font(ucs4=True, file=None) - self.assertTrue(f.ucs4) - self.assertRaises(OverflowError, ft.Font, file=None, - size=(max_point_size + 1)) - self.assertRaises(OverflowError, ft.Font, file=None, size=-1) - - f = ft.Font(None, size=24) - self.assertTrue(f.height > 0) - self.assertRaises(IOError, f.__init__, - os.path.join(FONTDIR, 'nonexistant.ttf')) - - # Test attribute preservation during reinitalization - f = ft.Font(self._sans_path, size=24, ucs4=True) - self.assertEqual(f.name, 'Liberation Sans') - self.assertTrue(f.scalable) - self.assertFalse(f.fixed_width) - self.assertTrue(f.antialiased) - self.assertFalse(f.oblique) - self.assertTrue(f.ucs4) - f.antialiased = False - f.oblique = True - f.__init__(self._mono_path) - self.assertEqual(f.name, 'PyGameMono') - self.assertTrue(f.scalable) - self.assertTrue(f.fixed_width) - self.assertFalse(f.antialiased) - self.assertTrue(f.oblique) - self.assertTrue(f.ucs4) - - # For a bitmap font, the size is automatically set to the first - # size in the available sizes list. - f = ft.Font(self._bmp_8_75dpi_path) - sizes = f.get_sizes() - self.assertEqual(len(sizes), 1) - size_pt, width_px, height_px, x_ppem, y_ppem = sizes[0] - self.assertEqual(f.size, (x_ppem, y_ppem)) - f.__init__(self._bmp_8_75dpi_path, size=12) - self.assertEqual(f.size, 12.0) - - @unittest.skipIf(IS_PYPY, "PyPy doesn't use refcounting") - def test_freetype_Font_dealloc(self): - import sys - handle = open(self._sans_path, 'rb') - - def load_font(): - tempFont = ft.Font(handle) - - try: - load_font() - - self.assertEqual(sys.getrefcount(handle), 2) - finally: - # Ensures file is closed even if test fails. - handle.close() - - def test_freetype_Font_scalable(self): - - f = self._TEST_FONTS['sans'] - self.assertTrue(f.scalable) - - self.assertRaises(RuntimeError, lambda : nullfont().scalable) - - def test_freetype_Font_fixed_width(self): - - f = self._TEST_FONTS['sans'] - self.assertFalse(f.fixed_width) - - f = self._TEST_FONTS['mono'] - self.assertTrue(f.fixed_width) - - self.assertRaises(RuntimeError, lambda : nullfont().fixed_width) - - def test_freetype_Font_fixed_sizes(self): - - f = self._TEST_FONTS['sans'] - self.assertEqual(f.fixed_sizes, 0) - f = self._TEST_FONTS['bmp-8-75dpi'] - self.assertEqual(f.fixed_sizes, 1) - f = self._TEST_FONTS['mono'] - self.assertEqual(f.fixed_sizes, 2) - - def test_freetype_Font_get_sizes(self): - f = self._TEST_FONTS['sans'] - szlist = f.get_sizes() - self.assertIsInstance(szlist, list) - self.assertEqual(len(szlist), 0) - - f = self._TEST_FONTS['bmp-8-75dpi'] - szlist = f.get_sizes() - self.assertIsInstance(szlist, list) - self.assertEqual(len(szlist), 1) - - size8 = szlist[0] - self.assertIsInstance(size8[0], int) - self.assertEqual(size8[0], 8) - self.assertIsInstance(size8[1], int) - self.assertIsInstance(size8[2], int) - self.assertIsInstance(size8[3], float) - self.assertEqual(int(size8[3] * 64.0 + 0.5), 8 * 64) - self.assertIsInstance(size8[4], float) - self.assertEqual(int(size8[4] * 64.0 + 0.5), 8 * 64) - - f = self._TEST_FONTS['mono'] - szlist = f.get_sizes() - self.assertIsInstance(szlist, list) - self.assertEqual(len(szlist), 2) - - size8 = szlist[0] - self.assertEqual(size8[3], 8) - self.assertEqual(int(size8[3] * 64.0 + 0.5), 8 * 64) - self.assertEqual(int(size8[4] * 64.0 + 0.5), 8 * 64) - - size19 = szlist[1] - self.assertEqual(size19[3], 19) - self.assertEqual(int(size19[3] * 64.0 + 0.5), 19 * 64) - self.assertEqual(int(size19[4] * 64.0 + 0.5), 19 * 64) - - def test_freetype_Font_use_bitmap_strikes(self): - f = self._TEST_FONTS['mono'] - try: - # use_bitmap_strikes == True - # - self.assertTrue(f.use_bitmap_strikes) - - # bitmap compatible properties - s_strike, sz = f.render_raw('A', size=19) - try: - f.vertical = True - s_strike_vert, sz = f.render_raw('A', size=19) - finally: - f.vertical = False - try: - f.wide = True - s_strike_wide, sz = f.render_raw('A', size=19) - finally: - f.wide = False - try: - f.underline = True - s_strike_underline, sz = f.render_raw('A', size=19) - finally: - f.underline = False - - # bitmap incompatible properties - s_strike_rot45, sz = f.render_raw('A', size=19, rotation=45) - try: - f.strong = True - s_strike_strong, sz = f.render_raw('A', size=19) - finally: - f.strong = False - try: - f.oblique = True - s_strike_oblique, sz = f.render_raw('A', size=19) - finally: - f.oblique = False - - # compare with use_bitmap_strikes == False - # - f.use_bitmap_strikes = False - self.assertFalse(f.use_bitmap_strikes) - - # bitmap compatible properties - s_outline, sz = f.render_raw('A', size=19) - self.assertNotEqual(s_outline, s_strike) - try: - f.vertical = True - s_outline, sz = f.render_raw('A', size=19) - self.assertNotEqual(s_outline, s_strike_vert) - finally: - f.vertical = False - try: - f.wide = True - s_outline, sz = f.render_raw('A', size=19) - self.assertNotEqual(s_outline, s_strike_wide) - finally: - f.wide = False - try: - f.underline = True - s_outline, sz = f.render_raw('A', size=19) - self.assertNotEqual(s_outline, s_strike_underline) - finally: - f.underline = False - - # bitmap incompatible properties - s_outline, sz = f.render_raw('A', size=19, rotation=45) - self.assertEqual(s_outline, s_strike_rot45) - try: - f.strong = True - s_outline, sz = f.render_raw('A', size=19) - self.assertEqual(s_outline, s_strike_strong) - finally: - f.strong = False - try: - f.oblique = True - s_outline, sz = f.render_raw('A', size=19) - self.assertEqual(s_outline, s_strike_oblique) - finally: - f.oblique = False - finally: - f.use_bitmap_strikes = True - - def test_freetype_Font_bitmap_files(self): - """Ensure bitmap file restrictions are caught""" - f = self._TEST_FONTS['bmp-8-75dpi'] - f_null = nullfont() - s = pygame.Surface((10, 10), 0, 32) - a = s.get_view('3') - - exception = AttributeError - self.assertRaises(exception, setattr, f, 'strong', True) - self.assertRaises(exception, setattr, f, 'oblique', True) - self.assertRaises(exception, setattr, f, 'style', ft.STYLE_STRONG) - self.assertRaises(exception, setattr, f, 'style', ft.STYLE_OBLIQUE) - exception = RuntimeError - self.assertRaises(exception, setattr, f_null, 'strong', True) - self.assertRaises(exception, setattr, f_null, 'oblique', True) - self.assertRaises(exception, setattr, f_null, 'style', ft.STYLE_STRONG) - self.assertRaises(exception, setattr, f_null, 'style', ft.STYLE_OBLIQUE) - exception = ValueError - self.assertRaises(exception, f.render, - 'A', (0, 0, 0), size=8, rotation=1) - self.assertRaises(exception, f.render, - 'A', (0, 0, 0), size=8, style=ft.STYLE_OBLIQUE) - self.assertRaises(exception, f.render, - 'A', (0, 0, 0), size=8, style=ft.STYLE_STRONG) - self.assertRaises(exception, f.render_raw, 'A', size=8, rotation=1) - self.assertRaises(exception, f.render_raw, - 'A', size=8, style=ft.STYLE_OBLIQUE) - self.assertRaises(exception, f.render_raw, - 'A', size=8, style=ft.STYLE_STRONG) - self.assertRaises(exception, f.render_to, - s, (0, 0), 'A', (0, 0, 0), size=8, rotation=1) - self.assertRaises(exception, f.render_to, - s, (0, 0), 'A', (0, 0, 0), size=8, - style=ft.STYLE_OBLIQUE) - self.assertRaises(exception, f.render_to, - s, (0, 0), 'A', (0, 0, 0), size=8, - style=ft.STYLE_STRONG) - self.assertRaises(exception, f.render_raw_to, - a, 'A', size=8, rotation=1) - self.assertRaises(exception, f.render_raw_to, - a, 'A', size=8, style=ft.STYLE_OBLIQUE) - self.assertRaises(exception, f.render_raw_to, - a, 'A', size=8, style=ft.STYLE_STRONG) - self.assertRaises(exception, f.get_rect, 'A', size=8, rotation=1) - self.assertRaises(exception, f.get_rect, - 'A', size=8, style=ft.STYLE_OBLIQUE) - self.assertRaises(exception, f.get_rect, - 'A', size=8, style=ft.STYLE_STRONG) - - # Unsupported point size - exception = pygame.error - self.assertRaises(exception, f.get_rect, 'A', size=42) - self.assertRaises(exception, f.get_metrics, 'A', size=42) - self.assertRaises(exception, f.get_sized_ascender, 42) - self.assertRaises(exception, f.get_sized_descender, 42) - self.assertRaises(exception, f.get_sized_height, 42) - self.assertRaises(exception, f.get_sized_glyph_height, 42) - - def test_freetype_Font_get_metrics(self): - - font = self._TEST_FONTS['sans'] - - metrics = font.get_metrics('ABCD', size=24) - self.assertEqual(len(metrics), len('ABCD')) - self.assertIsInstance(metrics, list) - - for metrics_tuple in metrics: - self.assertIsInstance(metrics_tuple, tuple, metrics_tuple) - self.assertEqual(len(metrics_tuple), 6) - - for m in metrics_tuple[:4]: - self.assertIsInstance(m, int) - - for m in metrics_tuple[4:]: - self.assertIsInstance(m, float) - - # test for empty string - metrics = font.get_metrics('', size=24) - self.assertEqual(metrics, []) - - # test for invalid string - self.assertRaises(TypeError, font.get_metrics, 24, 24) - - # raises exception when uninitalized - self.assertRaises(RuntimeError, nullfont().get_metrics, - 'a', size=24) - - def test_freetype_Font_get_rect(self): - - font = self._TEST_FONTS['sans'] - - def test_rect(r): - self.assertIsInstance(r, pygame.Rect) - - rect_default = font.get_rect("ABCDabcd", size=24) - test_rect(rect_default) - self.assertTrue(rect_default.size > (0, 0)) - self.assertTrue(rect_default.width > rect_default.height) - - rect_bigger = font.get_rect("ABCDabcd", size=32) - test_rect(rect_bigger) - self.assertTrue(rect_bigger.size > rect_default.size) - - rect_strong = font.get_rect("ABCDabcd", size=24, style=ft.STYLE_STRONG) - test_rect(rect_strong) - self.assertTrue(rect_strong.size > rect_default.size) - - font.vertical = True - rect_vert = font.get_rect("ABCDabcd", size=24) - test_rect(rect_vert) - self.assertTrue(rect_vert.width < rect_vert.height) - font.vertical = False - - rect_oblique = font.get_rect("ABCDabcd", size=24, style=ft.STYLE_OBLIQUE) - test_rect(rect_oblique) - self.assertTrue(rect_oblique.width > rect_default.width) - self.assertTrue(rect_oblique.height == rect_default.height) - - rect_under = font.get_rect("ABCDabcd", size=24, style=ft.STYLE_UNDERLINE) - test_rect(rect_under) - self.assertTrue(rect_under.width == rect_default.width) - self.assertTrue(rect_under.height > rect_default.height) - - # Rect size should change if UTF surrogate pairs are treated as - # one code point or two. - ufont = self._TEST_FONTS['mono'] - rect_utf32 = ufont.get_rect(as_unicode(r'\U00013079'), size=24) - rect_utf16 = ufont.get_rect(as_unicode(r'\uD80C\uDC79'), size=24) - self.assertEqual(rect_utf16, rect_utf32); - ufont.ucs4 = True - try: - rect_utf16 = ufont.get_rect(as_unicode(r'\uD80C\uDC79'), size=24) - finally: - ufont.ucs4 = False - self.assertNotEqual(rect_utf16, rect_utf32); - - self.assertRaises(RuntimeError, - nullfont().get_rect, 'a', size=24) - - # text stretching - rect12 = font.get_rect('A', size=12.0) - rect24 = font.get_rect('A', size=24.0) - rect_x = font.get_rect('A', size=(24.0, 12.0)) - self.assertEqual(rect_x.width, rect24.width) - self.assertEqual(rect_x.height, rect12.height) - rect_y = font.get_rect('A', size=(12.0, 24.0)) - self.assertEqual(rect_y.width, rect12.width) - self.assertEqual(rect_y.height, rect24.height) - - def test_freetype_Font_height(self): - - f = self._TEST_FONTS['sans'] - self.assertEqual(f.height, 2355) - - f = self._TEST_FONTS['fixed'] - self.assertEqual(f.height, 1100) - - self.assertRaises(RuntimeError, lambda : nullfont().height) - - - def test_freetype_Font_name(self): - - f = self._TEST_FONTS['sans'] - self.assertEqual(f.name, 'Liberation Sans') - - f = self._TEST_FONTS['fixed'] - self.assertEqual(f.name, 'Inconsolata') - - nf = nullfont() - self.assertEqual(nf.name, repr(nf)) - - def test_freetype_Font_size(self): - - f = ft.Font(None, size=12) - self.assertEqual(f.size, 12) - f.size = 22 - self.assertEqual(f.size, 22) - f.size = 0 - self.assertEqual(f.size, 0) - f.size = max_point_size - self.assertEqual(f.size, max_point_size) - f.size = 6.5 - self.assertEqual(f.size, 6.5) - f.size = max_point_size_f - self.assertEqual(f.size, max_point_size_f) - self.assertRaises(OverflowError, setattr, f, 'size', -1) - self.assertRaises(OverflowError, setattr, f, 'size', - (max_point_size + 1)) - - f.size = 24.0, 0 - size = f.size - self.assertIsInstance(size, float) - self.assertEqual(size, 24.0) - - f.size = 16, 16 - size = f.size - self.assertIsInstance(size, tuple) - self.assertEqual(len(size), 2) - - x, y = size - self.assertIsInstance(x, float) - self.assertEqual(x, 16.0) - self.assertIsInstance(y, float) - self.assertEqual(y, 16.0) - - f.size = 20.5, 22.25 - x, y = f.size - self.assertEqual(x, 20.5) - self.assertEqual(y, 22.25) - - f.size = 0, 0 - size = f.size - self.assertIsInstance(size, float) - self.assertEqual(size, 0.0) - self.assertRaises(ValueError, setattr, f, 'size', (0, 24.0)) - self.assertRaises(TypeError, setattr, f, 'size', (24.0,)) - self.assertRaises(TypeError, setattr, f, 'size', (24.0, 0, 0)) - self.assertRaises(TypeError, setattr, f, 'size', (24.0j, 24.0)) - self.assertRaises(TypeError, setattr, f, 'size', (24.0, 24.0j)) - self.assertRaises(OverflowError, setattr, f, 'size', (-1, 16)) - self.assertRaises(OverflowError, setattr, f, 'size', - (max_point_size + 1, 16)) - self.assertRaises(OverflowError, setattr, f, 'size', (16, -1)) - self.assertRaises(OverflowError, setattr, f, 'size', - (16, max_point_size + 1)) - - # bitmap files with identical point size but differing ppems. - f75 = self._TEST_FONTS['bmp-18-75dpi'] - sizes = f75.get_sizes() - self.assertEqual(len(sizes), 1) - size_pt, width_px, height_px, x_ppem, y_ppem = sizes[0] - self.assertEqual(size_pt, 18) - self.assertEqual(x_ppem, 19.0) - self.assertEqual(y_ppem, 19.0) - rect = f75.get_rect('A', size=18) - rect = f75.get_rect('A', size=19) - rect = f75.get_rect('A', size=(19.0, 19.0)) - self.assertRaises(pygame.error, f75.get_rect, 'A', size=17) - f100 = self._TEST_FONTS['bmp-18-100dpi'] - sizes = f100.get_sizes() - self.assertEqual(len(sizes), 1) - size_pt, width_px, height_px, x_ppem, y_ppem = sizes[0] - self.assertEqual(size_pt, 18) - self.assertEqual(x_ppem, 25.0) - self.assertEqual(y_ppem, 25.0) - rect = f100.get_rect('A', size=18) - rect = f100.get_rect('A', size=25) - rect = f100.get_rect('A', size=(25.0, 25.0)) - self.assertRaises(pygame.error, f100.get_rect, 'A', size=17) - - def test_freetype_Font_rotation(self): - - test_angles = [(30, 30), - (360, 0), (390, 30), - (720, 0), (764, 44), - (-30, 330), - (-360, 0), (-390, 330), - (-720, 0), (-764, 316)] - - f = ft.Font(None) - self.assertEqual(f.rotation, 0) - for r, r_reduced in test_angles: - f.rotation = r - self.assertEqual(f.rotation, r_reduced, - "for angle %d: %d != %d" % - (r, f.rotation, r_reduced)) - self.assertRaises(TypeError, setattr, f, 'rotation', '12') - - def test_freetype_Font_render_to(self): - # Rendering to an existing target surface is equivalent to - # blitting a surface returned by Font.render with the target. - font = self._TEST_FONTS['sans'] - - surf = pygame.Surface((800, 600)) - color = pygame.Color(0, 0, 0) - - rrect = font.render_to(surf, (32, 32), - 'FoobarBaz', color, None, size=24) - self.assertIsInstance(rrect, pygame.Rect) - self.assertEqual(rrect.top, rrect.height) - ## self.assertEqual(rrect.left, something or other) - - rcopy = rrect.copy() - rcopy.topleft = (32, 32) - self.assertTrue(surf.get_rect().contains(rcopy)) - - rect = pygame.Rect(20, 20, 2, 2) - rrect = font.render_to(surf, rect, 'FoobarBax', color, None, size=24) - self.assertEqual(rrect.top, rrect.height) - self.assertNotEqual(rrect.size, rect.size) - rrect = font.render_to(surf, (20.1, 18.9), 'FoobarBax', - color, None, size=24) - ## self.assertEqual(tuple(rend[1].topleft), (20, 18)) - - rrect = font.render_to(surf, rect, '', color, None, size=24) - self.assertFalse(rrect) - self.assertEqual(rrect.height, font.get_sized_height(24)) - - # invalid surf test - self.assertRaises(TypeError, font.render_to, - "not a surface", "text", color) - self.assertRaises(TypeError, font.render_to, - pygame.Surface, "text", color) - - # invalid dest test - for dest in [None, 0, 'a', 'ab', - (), (1,), ('a', 2), (1, 'a'), (1+2j, 2), (1, 1+2j), - (1, int), (int, 1)]: - self.assertRaises(TypeError, font.render, - surf, dest, 'foobar', color, size=24) - - # misc parameter test - self.assertRaises(ValueError, font.render_to, surf, (0, 0), - 'foobar', color) - self.assertRaises(TypeError, font.render_to, surf, (0, 0), - 'foobar', color, "", size=24) - self.assertRaises(ValueError, font.render_to, surf, (0, 0), - 'foobar', color, None, style=42, size=24) - self.assertRaises(TypeError, font.render_to, surf, (0, 0), - 'foobar', color, None, style=None, size=24) - self.assertRaises(ValueError, font.render_to, surf, (0, 0), - 'foobar', color, None, style=97, size=24) - - def test_freetype_Font_render(self): - - font = self._TEST_FONTS['sans'] - - surf = pygame.Surface((800, 600)) - color = pygame.Color(0, 0, 0) - - rend = font.render('FoobarBaz', pygame.Color(0, 0, 0), None, size=24) - self.assertIsInstance(rend, tuple) - self.assertEqual(len(rend), 2) - self.assertIsInstance(rend[0], pygame.Surface) - self.assertIsInstance(rend[1], pygame.Rect) - self.assertEqual(rend[0].get_rect().size, rend[1].size) - - s, r = font.render('', pygame.Color(0, 0, 0), None, size=24) - self.assertEqual(r.width, 1) - self.assertEqual(r.height, font.get_sized_height(24)) - self.assertEqual(s.get_size(), r.size) - self.assertEqual(s.get_bitsize(), 32) - - # misc parameter test - self.assertRaises(ValueError, font.render, 'foobar', color) - self.assertRaises(TypeError, font.render, 'foobar', color, "", - size=24) - self.assertRaises(ValueError, font.render, 'foobar', color, None, - style=42, size=24) - self.assertRaises(TypeError, font.render, 'foobar', color, None, - style=None, size=24) - self.assertRaises(ValueError, font.render, 'foobar', color, None, - style=97, size=24) - - # valid surrogate pairs - font2 = self._TEST_FONTS['mono'] - ucs4 = font2.ucs4 - try: - font2.ucs4 = False - rend1 = font2.render(as_unicode(r'\uD80C\uDC79'), color, size=24) - rend2 = font2.render(as_unicode(r'\U00013079'), color, size=24) - self.assertEqual(rend1[1], rend2[1]) - font2.ucs4 = True - rend1 = font2.render(as_unicode(r'\uD80C\uDC79'), color, size=24) - self.assertNotEqual(rend1[1], rend2[1]) - finally: - font2.ucs4 = ucs4 - - # malformed surrogate pairs - self.assertRaises(UnicodeEncodeError, font.render, - as_unicode(r'\uD80C'), color, size=24) - self.assertRaises(UnicodeEncodeError, font.render, - as_unicode(r'\uDCA7'), color, size=24) - self.assertRaises(UnicodeEncodeError, font.render, - as_unicode(r'\uD7FF\uDCA7'), color, size=24) - self.assertRaises(UnicodeEncodeError, font.render, - as_unicode(r'\uDC00\uDCA7'), color, size=24) - self.assertRaises(UnicodeEncodeError, font.render, - as_unicode(r'\uD80C\uDBFF'), color, size=24) - self.assertRaises(UnicodeEncodeError, font.render, - as_unicode(r'\uD80C\uE000'), color, size=24) - - # raises exception when uninitalized - self.assertRaises(RuntimeError, nullfont().render, - 'a', (0, 0, 0), size=24) - - # Confirm the correct glpyhs are returned for a couple of - # unicode code points, 'A' and '\U00023079'. For each code point - # the rendered glyph is compared with an image of glyph bitmap - # as exported by FontForge. - path = os.path.join(FONTDIR, 'A_PyGameMono-8.png') - A = pygame.image.load(path) - path = os.path.join(FONTDIR, 'u13079_PyGameMono-8.png') - u13079 = pygame.image.load(path) - - font = self._TEST_FONTS['mono'] - font.ucs4 = False - A_rendered, r = font.render('A', bgcolor=pygame.Color('white'), size=8) - u13079_rendered, r = font.render(as_unicode(r'\U00013079'), - bgcolor=pygame.Color('white'), size=8) - - ## before comparing the surfaces, make sure they are the same - ## pixel format. Use 32-bit SRCALPHA to avoid row padding and - ## undefined bytes (the alpha byte will be set to 255.) - bitmap = pygame.Surface(A.get_size(), pygame.SRCALPHA, 32) - bitmap.blit(A, (0, 0)) - rendering = pygame.Surface(A_rendered.get_size(), pygame.SRCALPHA, 32) - rendering.blit(A_rendered, (0, 0)) - self.assertTrue(surf_same_image(rendering, bitmap)) - bitmap = pygame.Surface(u13079.get_size(), pygame.SRCALPHA, 32) - bitmap.blit(u13079, (0, 0)) - rendering = pygame.Surface(u13079_rendered.get_size(), - pygame.SRCALPHA, 32) - rendering.blit(u13079_rendered, (0, 0)) - self.assertTrue(surf_same_image(rendering, bitmap)) - - def test_freetype_Font_render_mono(self): - font = self._TEST_FONTS['sans'] - color = pygame.Color('black') - colorkey = pygame.Color('white') - text = "." - - save_antialiased = font.antialiased - font.antialiased = False - try: - surf, r = font.render(text, color, size=24) - self.assertEqual(surf.get_bitsize(), 8) - flags = surf.get_flags() - self.assertTrue(flags & pygame.SRCCOLORKEY) - self.assertFalse(flags & (pygame.SRCALPHA | pygame.HWSURFACE)) - self.assertEqual(surf.get_colorkey(), colorkey) - self.assertIsNone(surf.get_alpha()) - - translucent_color = pygame.Color(*color) - translucent_color.a = 55 - surf, r = font.render(text, translucent_color, size=24) - self.assertEqual(surf.get_bitsize(), 8) - flags = surf.get_flags() - self.assertTrue(flags & (pygame.SRCCOLORKEY | pygame.SRCALPHA)) - self.assertFalse(flags & pygame.HWSURFACE) - self.assertEqual(surf.get_colorkey(), colorkey) - self.assertEqual(surf.get_alpha(), translucent_color.a) - - surf, r = font.render(text, color, colorkey, size=24) - self.assertEqual(surf.get_bitsize(), 32) - finally: - font.antialiased = save_antialiased - - @unittest.skipIf(pygame.get_sdl_version()[0] == 2, "skipping due to blending issue (#864)") - def test_freetype_Font_render_to_mono(self): - # Blitting is done in two stages. First the target is alpha filled - # with the background color, if any. Second, the foreground - # color is alpha blitted to the background. - font = self._TEST_FONTS['sans'] - text = " ." - rect = font.get_rect(text, size=24) - size = rect.size - fg = pygame.Surface((1, 1), pygame.SRCALPHA, 32) - bg = pygame.Surface((1, 1), pygame.SRCALPHA, 32) - surrogate = pygame.Surface((1, 1), pygame.SRCALPHA, 32) - surfaces = [pygame.Surface(size, 0, 8), - pygame.Surface(size, 0, 16), - pygame.Surface(size, pygame.SRCALPHA, 16), - pygame.Surface(size, 0, 24), - pygame.Surface(size, 0, 32), - pygame.Surface(size, pygame.SRCALPHA, 32)] - fg_colors = [ - surfaces[0].get_palette_at(2), - surfaces[1].unmap_rgb(surfaces[1].map_rgb((128, 64, 200))), - surfaces[2].unmap_rgb(surfaces[2].map_rgb((99, 0, 100, 64))), - (128, 97, 213), - (128, 97, 213), - (128, 97, 213, 60)] - fg_colors = [pygame.Color(*c) for c in fg_colors] - self.assertEqual(len(surfaces), len(fg_colors)) # integrity check - bg_colors = [ - surfaces[0].get_palette_at(4), - surfaces[1].unmap_rgb(surfaces[1].map_rgb((220, 20, 99))), - surfaces[2].unmap_rgb(surfaces[2].map_rgb((55, 200, 0, 86))), - (255, 120, 13), - (255, 120, 13), - (255, 120, 13, 180)] - bg_colors = [pygame.Color(*c) for c in bg_colors] - self.assertEqual(len(surfaces), len(bg_colors)) # integrity check - - save_antialiased = font.antialiased - font.antialiased = False - try: - fill_color = pygame.Color('black') - for i, surf in enumerate(surfaces): - surf.fill(fill_color) - fg_color = fg_colors[i] - fg.set_at((0, 0), fg_color) - surf.blit(fg, (0, 0)) - r_fg_color = surf.get_at((0, 0)) - surf.set_at((0, 0), fill_color) - rrect = font.render_to(surf, (0, 0), text, fg_color, - size=24) - bottomleft = 0, rrect.height - 1 - self.assertEqual(surf.get_at(bottomleft), fill_color, - "Position: {}. Depth: {}." - " fg_color: {}.".format(bottomleft, - surf.get_bitsize(), fg_color)) - bottomright = rrect.width - 1, rrect.height - 1 - self.assertEqual(surf.get_at(bottomright), r_fg_color, - "Position: {}. Depth: {}." - " fg_color: {}.".format(bottomright, - surf.get_bitsize(), fg_color)) - for i, surf in enumerate(surfaces): - surf.fill(fill_color) - fg_color = fg_colors[i] - bg_color = bg_colors[i] - bg.set_at((0, 0), bg_color) - fg.set_at((0, 0), fg_color) - if surf.get_bitsize() == 24: - # For a 24 bit target surface test against Pygame's alpha - # blit as there appears to be a problem with SDL's alpha - # blit: - # - # self.assertEqual(surf.get_at(bottomright), r_fg_color) - # - # raises - # - # AssertionError: (128, 97, 213, 255) != (129, 98, 213, 255) - # - surrogate.set_at((0, 0), fill_color) - surrogate.blit(bg, (0, 0)) - r_bg_color = surrogate.get_at((0, 0)) - surrogate.blit(fg, (0, 0)) - r_fg_color = surrogate.get_at((0, 0)) - else: - # Surface blit values for comparison. - surf.blit(bg, (0, 0)) - r_bg_color = surf.get_at((0, 0)) - surf.blit(fg, (0, 0)) - r_fg_color = surf.get_at((0, 0)) - surf.set_at((0, 0), fill_color) - rrect = font.render_to(surf, (0, 0), text, fg_color, - bg_color, size=24) - bottomleft = 0, rrect.height - 1 - self.assertEqual(surf.get_at(bottomleft), r_bg_color) - bottomright = rrect.width - 1, rrect.height - 1 - self.assertEqual(surf.get_at(bottomright), r_fg_color) - finally: - font.antialiased = save_antialiased - - def test_freetype_Font_render_raw(self): - - font = self._TEST_FONTS['sans'] - - text = "abc" - size = font.get_rect(text, size=24).size - rend = font.render_raw(text, size=24) - self.assertIsInstance(rend, tuple) - self.assertEqual(len(rend), 2) - - r, s = rend - self.assertIsInstance(r, bytes_) - self.assertIsInstance(s, tuple) - self.assertTrue(len(s), 2) - - w, h = s - self.assertIsInstance(w, int) - self.assertIsInstance(h, int) - self.assertEqual(s, size) - self.assertEqual(len(r), w * h) - - r, (w, h) = font.render_raw('', size=24) - self.assertEqual(w, 0) - self.assertEqual(h, font.height) - self.assertEqual(len(r), 0) - - # bug with decenders: this would crash - rend = font.render_raw('render_raw', size=24) - - # bug with non-printable characters: this would cause a crash - # because the text length was not adjusted for skipped characters. - text = unicode_("").join([unichr_(i) for i in range(31, 64)]) - rend = font.render_raw(text, size=10) - - def test_freetype_Font_render_raw_to(self): - - # This only checks that blits do not crash. It needs to check: - # - int values - # - invert option - # - - font = self._TEST_FONTS['sans'] - text = "abc" - - # No frills antialiased render to int1 (__render_glyph_INT) - srect = font.get_rect(text, size=24) - surf = pygame.Surface(srect.size, 0, 8) - rrect = font.render_raw_to(surf.get_view('2'), text, size=24) - self.assertEqual(rrect, srect) - - for bpp in [24, 32]: - surf = pygame.Surface(srect.size, 0, bpp) - rrect = font.render_raw_to(surf.get_view('r'), text, size=24) - self.assertEqual(rrect, srect) - - # Underlining to int1 (__fill_glyph_INT) - srect = font.get_rect(text, size=24, style=ft.STYLE_UNDERLINE) - surf = pygame.Surface(srect.size, 0, 8) - rrect = font.render_raw_to(surf.get_view('2'), text, size=24, - style=ft.STYLE_UNDERLINE) - self.assertEqual(rrect, srect) - - for bpp in [24, 32]: - surf = pygame.Surface(srect.size, 0, bpp) - rrect = font.render_raw_to(surf.get_view('r'), text, size=24, - style=ft.STYLE_UNDERLINE) - self.assertEqual(rrect, srect) - - # Unaliased (mono) rendering to int1 (__render_glyph_MONO_as_INT) - font.antialiased = False - try: - srect = font.get_rect(text, size=24) - surf = pygame.Surface(srect.size, 0, 8) - rrect = font.render_raw_to(surf.get_view('2'), text, size=24) - self.assertEqual(rrect, srect) - - for bpp in [24, 32]: - surf = pygame.Surface(srect.size, 0, bpp) - rrect = font.render_raw_to(surf.get_view('r'), text, size=24) - self.assertEqual(rrect, srect) - finally: - font.antialiased = True - - # Antialiased render to ints sized greater than 1 byte - # (__render_glyph_INT) - srect = font.get_rect(text, size=24) - - for bpp in [16, 24, 32]: - surf = pygame.Surface(srect.size, 0, bpp) - rrect = font.render_raw_to(surf.get_view('2'), text, size=24) - self.assertEqual(rrect, srect) - - # Underline render to ints sized greater than 1 byte - # (__fill_glyph_INT) - srect = font.get_rect(text, size=24, style=ft.STYLE_UNDERLINE) - - for bpp in [16, 24, 32]: - surf = pygame.Surface(srect.size, 0, bpp) - rrect = font.render_raw_to(surf.get_view('2'), text, size=24, - style=ft.STYLE_UNDERLINE) - self.assertEqual(rrect, srect) - - # Unaliased (mono) rendering to ints greater than 1 byte - # (__render_glyph_MONO_as_INT) - font.antialiased = False - try: - srect = font.get_rect(text, size=24) - - for bpp in [16, 24, 32]: - surf = pygame.Surface(srect.size, 0, bpp) - rrect = font.render_raw_to(surf.get_view('2'), - text, size=24) - self.assertEqual(rrect, srect) - finally: - font.antialiased = True - - def test_freetype_Font_text_is_None(self): - f = ft.Font(self._sans_path, 36) - f.style = ft.STYLE_NORMAL - f.rotation = 0 - text = 'ABCD' - - # reference values - get_rect = f.get_rect(text) - f.vertical = True - get_rect_vert = f.get_rect(text) - - self.assertTrue(get_rect_vert.width < get_rect.width) - self.assertTrue(get_rect_vert.height > get_rect.height) - f.vertical = False - render_to_surf = pygame.Surface(get_rect.size, pygame.SRCALPHA, 32) - - if IS_PYPY: - return - - arr = arrinter.Array(get_rect.size, 'u', 1) - render = f.render(text, (0, 0, 0)) - render_to = f.render_to(render_to_surf, (0, 0), text, (0, 0, 0)) - render_raw = f.render_raw(text) - render_raw_to = f.render_raw_to(arr, text) - - # comparisons - surf = pygame.Surface(get_rect.size, pygame.SRCALPHA, 32) - self.assertEqual(f.get_rect(None), get_rect) - s, r = f.render(None, (0, 0, 0)) - self.assertEqual(r, render[1]) - self.assertTrue(surf_same_image(s, render[0])) - r = f.render_to(surf, (0, 0), None, (0, 0, 0)) - self.assertEqual(r, render_to) - self.assertTrue(surf_same_image(surf, render_to_surf)) - px, sz = f.render_raw(None) - self.assertEqual(sz, render_raw[1]) - self.assertEqual(px, render_raw[0]) - sz = f.render_raw_to(arr, None) - self.assertEqual(sz, render_raw_to) - - def test_freetype_Font_text_is_None(self): - f = ft.Font(self._sans_path, 36) - f.style = ft.STYLE_NORMAL - f.rotation = 0 - text = 'ABCD' - - # reference values - get_rect = f.get_rect(text) - f.vertical = True - get_rect_vert = f.get_rect(text) - - # vertical: trigger glyph positioning. - f.vertical = True - r = f.get_rect(None) - self.assertEqual(r, get_rect_vert) - f.vertical = False - - # wide style: trigger glyph reload - r = f.get_rect(None, style=ft.STYLE_WIDE) - self.assertEqual(r.height, get_rect.height) - self.assertTrue(r.width > get_rect.width) - r = f.get_rect(None) - self.assertEqual(r, get_rect) - - # rotated: trigger glyph reload - r = f.get_rect(None, rotation=90) - self.assertEqual(r.width, get_rect.height) - self.assertEqual(r.height, get_rect.width) - - # this method will not support None text - self.assertRaises(TypeError, f.get_metrics, None) - - def test_freetype_Font_fgcolor(self): - f = ft.Font(self._bmp_8_75dpi_path) - notdef = '\0' # the PyGameMono .notdef glyph has a pixel at (0, 0) - f.origin = False - f.pad = False - black = pygame.Color('black') # initial color - green = pygame.Color('green') - alpha128 = pygame.Color(10, 20, 30, 128) - - c = f.fgcolor - self.assertIsInstance(c, pygame.Color) - self.assertEqual(c, black) - - s, r = f.render(notdef) - self.assertEqual(s.get_at((0, 0)), black) - - f.fgcolor = green - self.assertEqual(f.fgcolor, green) - - s, r = f.render(notdef) - self.assertEqual(s.get_at((0, 0)), green) - - f.fgcolor = alpha128 - s, r = f.render(notdef) - self.assertEqual(s.get_at((0, 0)), alpha128) - - surf = pygame.Surface(f.get_rect(notdef).size, pygame.SRCALPHA, 32) - f.render_to(surf, (0, 0), None) - self.assertEqual(surf.get_at((0, 0)), alpha128) - - self.assertRaises(AttributeError, setattr, f, 'fgcolor', None) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf(self): - from pygame.tests.test_utils import buftools - Exporter = buftools.Exporter - font = self._TEST_FONTS['sans'] - srect = font.get_rect("Hi", size=12) - for format in ['b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q', - 'x', '1x', '2x', '3x', '4x', '5x', '6x', '7x', - '8x', '9x', 'h', '=h', '@h', '!h', '1h', '=1h']: - newbuf = Exporter(srect.size, format=format) - rrect = font.render_raw_to(newbuf, "Hi", size=12) - self.assertEqual(rrect, srect) - # Some unsupported formats - for format in ['f', 'd', '2h', '?', 'hh']: - newbuf = Exporter(srect.size, format=format, itemsize=4) - self.assertRaises(ValueError, font.render_raw_to, - newbuf, "Hi", size=12) - - def test_freetype_Font_style(self): - - font = self._TEST_FONTS['sans'] - - # make sure STYLE_NORMAL is the default value - self.assertEqual(ft.STYLE_NORMAL, font.style) - - # make sure we check for style type - with self.assertRaises(TypeError): - font.style = "None" - with self.assertRaises(TypeError): - font.style = None - - # make sure we only accept valid constants - with self.assertRaises(ValueError): - font.style = 112 - - # make assure no assignements happened - self.assertEqual(ft.STYLE_NORMAL, font.style) - - # test assignement - font.style = ft.STYLE_UNDERLINE - self.assertEqual(ft.STYLE_UNDERLINE, font.style) - - # test complex styles - st = ( ft.STYLE_STRONG | ft.STYLE_UNDERLINE | - ft.STYLE_OBLIQUE ) - - font.style = st - self.assertEqual(st, font.style) - - # and that STYLE_DEFAULT has no effect (continued from above) - self.assertNotEqual(st, ft.STYLE_DEFAULT) - font.style = ft.STYLE_DEFAULT - self.assertEqual(st, font.style) - - # revert changes - font.style = ft.STYLE_NORMAL - self.assertEqual(ft.STYLE_NORMAL, font.style) - - def test_freetype_Font_resolution(self): - text = "|" # Differs in width and height - resolution = ft.get_default_resolution() - new_font = ft.Font(self._sans_path, resolution=2 * resolution) - self.assertEqual(new_font.resolution, 2 * resolution) - size_normal = self._TEST_FONTS['sans'].get_rect(text, size=24).size - size_scaled = new_font.get_rect(text, size=24).size - size_by_2 = size_normal[0] * 2 - self.assertTrue(size_by_2 + 2 >= size_scaled[0] >= size_by_2 - 2, - "%i not equal %i" % (size_scaled[1], size_by_2)) - size_by_2 = size_normal[1] * 2 - self.assertTrue(size_by_2 + 2 >= size_scaled[1] >= size_by_2 - 2, - "%i not equal %i" % (size_scaled[1], size_by_2)) - new_resolution = resolution + 10 - ft.set_default_resolution(new_resolution) - try: - new_font = ft.Font(self._sans_path, resolution=0) - self.assertEqual(new_font.resolution, new_resolution) - finally: - ft.set_default_resolution() - - def test_freetype_Font_path(self): - self.assertEqual(self._TEST_FONTS['sans'].path, self._sans_path) - self.assertRaises(AttributeError, getattr, nullfont(), 'path') - - # This Font cache test is conditional on freetype being built by a debug - # version of Python or with the C macro PGFT_DEBUG_CACHE defined. - def test_freetype_Font_cache(self): - glyphs = "abcde" - glen = len(glyphs) - other_glyphs = "123" - oglen = len(other_glyphs) - uempty = unicode_("") -## many_glyphs = (uempty.join([unichr_(i) for i in range(32,127)] + -## [unichr_(i) for i in range(161,172)] + -## [unichr_(i) for i in range(174,239)])) - many_glyphs = uempty.join([unichr_(i) for i in range(32,127)]) - mglen = len(many_glyphs) - - count = 0 - access = 0 - hit = 0 - miss = 0 - - f = ft.Font(None, size=24, font_index=0, resolution=72, ucs4=False) - f.style = ft.STYLE_NORMAL - f.antialiased = True - - # Ensure debug counters are zero - self.assertEqual(f._debug_cache_stats, (0, 0, 0, 0, 0)) - # Load some basic glyphs - count = access = miss = glen - f.render_raw(glyphs) - self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss)) - # Vertical should not affect the cache - access += glen - hit += glen - f.vertical = True - f.render_raw(glyphs) - f.vertical = False - self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss)) - # New glyphs will - count += oglen - access += oglen - miss += oglen - f.render_raw(other_glyphs) - self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss)) - # Point size does - count += glen - access += glen - miss += glen - f.render_raw(glyphs, size=12) - self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss)) - # Underline style does not - access += oglen - hit += oglen - f.underline = True - f.render_raw(other_glyphs) - f.underline = False - self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss)) - # Oblique style does - count += glen - access += glen - miss += glen - f.oblique = True - f.render_raw(glyphs) - f.oblique = False - self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss)) - # Strong style does; by this point cache clears can happen - count += glen - access += glen - miss += glen - f.strong = True - f.render_raw(glyphs) - f.strong = False - ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats - self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss), - (count, access, hit, miss)) - # Rotation does - count += glen - access += glen - miss += glen - f.render_raw(glyphs, rotation=10) - ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats - self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss), - (count, access, hit, miss)) - # aliased (mono) glyphs do - count += oglen - access += oglen - miss += oglen - f.antialiased = False - f.render_raw(other_glyphs) - f.antialiased = True - ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats - self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss), - (count, access, hit, miss)) - # Trigger a cleanup for sure. - count += 2 * mglen - access += 2 * mglen - miss += 2 * mglen - f.get_metrics(many_glyphs, size=8) - f.get_metrics(many_glyphs, size=10) - ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats - self.assertTrue(ccount < count) - self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss), - (count, access, hit, miss)) - - try: - ft.Font._debug_cache_stats - except AttributeError: - del test_freetype_Font_cache - - def test_undefined_character_code(self): - # To be consistent with pygame.font.Font, undefined codes - # are rendered as the undefined character, and has metrics - # of None. - font = self._TEST_FONTS['sans'] - - img, size1 = font.render(unichr_(1), (0, 0, 0), size=24) - img, size0 = font.render("", (0, 0, 0), size=24) - self.assertTrue(size1.width > size0.width ) - - metrics = font.get_metrics(unichr_(1) + unichr_(48), size=24) - self.assertEqual(len(metrics), 2) - self.assertIsNone(metrics[0]) - self.assertIsInstance(metrics[1], tuple) - - @unittest.skipIf(pygame.get_sdl_version()[0] == 2, "SDL2 surfaces are only limited by memory") - def test_issue_144(self): - """Issue #144: unable to render text""" - - # The bug came in two parts. The first was a convertion bug from - # FT_Fixed to integer in for an Intel x86_64 Pygame build. The second - # was to have the raised exception disappear before Font.render - # returned to Python level. - # - font = ft.Font(None, size=64) - s = 'M' * 100000 # Way too long for an SDL surface - self.assertRaises(pygame.error, font.render, s, (0, 0, 0)) - - def test_issue_242(self): - """Issue #242: get_rect() uses 0 as default style""" - - # Issue #242: freetype.Font.get_rect() ignores style defaults when - # the style argument is not given - # - # The text boundary rectangle returned by freetype.Font.get_rect() - # should match the boundary of the same text rendered directly to a - # surface. This permits accurate text positioning. To work properly, - # get_rect() should calculate the text boundary to reflect text style, - # such as underline. Instead, it ignores the style settings for the - # Font object when the style argument is omitted. - # - # When the style argument is not given, freetype.get_rect() uses - # unstyled text when calculating the boundary rectangle. This is - # because _ftfont_getrect(), in _freetype.c, set the default - # style to 0 rather than FT_STYLE_DEFAULT. - # - font = self._TEST_FONTS['sans'] - - # Try wide style on a wide character. - prev_style = font.wide - font.wide = True - try: - rect = font.get_rect('M', size=64) - surf, rrect = font.render(None, size=64) - self.assertEqual(rect, rrect) - finally: - font.wide = prev_style - - # Try strong style on several wide characters. - prev_style = font.strong - font.strong = True - try: - rect = font.get_rect('Mm_', size=64) - surf, rrect = font.render(None, size=64) - self.assertEqual(rect, rrect) - finally: - font.strong = prev_style - - # Try oblique style on a tall, narrow character. - prev_style = font.oblique - font.oblique = True - try: - rect = font.get_rect('|', size=64) - surf, rrect = font.render(None, size=64) - self.assertEqual(rect, rrect) - finally: - font.oblique = prev_style - - # Try underline style on a glyphless character. - prev_style = font.underline - font.underline = True - try: - rect = font.get_rect(' ', size=64) - surf, rrect = font.render(None, size=64) - self.assertEqual(rect, rrect) - finally: - font.underline = prev_style - - def test_issue_237(self): - """Issue #237: Memory overrun when rendered with underlining""" - - # Issue #237: Memory overrun when text without descenders is rendered - # with underlining - # - # The bug crashes the Python interpreter. The bug is caught with C - # assertions in ft_render_cb.c when the Pygame module is compiled - # for debugging. So far it is only known to affect Times New Roman. - # - name = "Times New Roman" - font = ft.SysFont(name, 19) - if font.name != name: - # The font is unavailable, so skip the test. - return - font.underline = True - s, r = font.render("Amazon", size=19) - - # Some other checks to make sure nothing else broke. - for adj in [-2, -1.9, -1, 0, 1.9, 2]: - font.underline_adjustment = adj - s, r = font.render("Amazon", size=19) - - def test_issue_243(self): - """Issue Y: trailing space ignored in boundary calculation""" - - # Issue #243: For a string with trailing spaces, freetype ignores the - # last space in boundary calculations - # - font = self._TEST_FONTS['fixed'] - r1 = font.get_rect(" ", size=64) - self.assertTrue(r1.width > 1) - r2 = font.get_rect(" ", size=64) - self.assertEqual(r2.width, 2 * r1.width) - - def test_garbage_collection(self): - """Check reference counting on returned new references""" - def ref_items(seq): - return [weakref.ref(o) for o in seq] - - font = self._TEST_FONTS['bmp-8-75dpi'] - font.size = font.get_sizes()[0][0] - text = 'A' - rect = font.get_rect(text) - surf = pygame.Surface(rect.size, pygame.SRCALPHA, 32) - refs = [] - refs.extend(ref_items(font.render(text, (0, 0, 0)))) - refs.append(weakref.ref(font.render_to(surf, (0, 0), text, (0, 0, 0)))) - refs.append(weakref.ref(font.get_rect(text))) - - n = len(refs) - self.assertTrue(n > 0) - - # for pypy we garbage collection twice. - for i in range(2): - gc.collect() - - for i in range(n): - self.assertIsNone(refs[i](), "ref %d not collected" % i) - - try: - from sys import getrefcount - except ImportError: - pass - else: - array = arrinter.Array(rect.size, 'u', 1) - o = font.render_raw(text) - self.assertEqual(getrefcount(o), 2) - self.assertEqual(getrefcount(o[0]), 2) - self.assertEqual(getrefcount(o[1]), 2) - self.assertEqual(getrefcount(font.render_raw_to(array, text)), 1) - o = font.get_metrics('AB') - self.assertEqual(getrefcount(o), 2) - for i in range(len(o)): - self.assertEqual(getrefcount(o[i]), 2, - "refcount fail for item %d" % i) - o = font.get_sizes() - self.assertEqual(getrefcount(o), 2) - for i in range(len(o)): - self.assertEqual(getrefcount(o[i]), 2, - "refcount fail for item %d" % i) - - def test_display_surface_quit(self): - """Font.render_to() on a closed display surface""" - - # The Font.render_to() method checks that PySurfaceObject.surf is NULL - # and raise a exception if it is. This fixes a bug in Pygame revision - # 0600ea4f1cfb and earlier where Pygame segfaults instead. - null_surface = pygame.Surface.__new__(pygame.Surface) - f = self._TEST_FONTS['sans'] - self.assertRaises(pygame.error, f.render_to, - null_surface, (0, 0), "Crash!", size=12) - - def test_issue_565(self): - """get_metrics supporting rotation/styles/size""" - - tests = [ - {'method': 'size', 'value': 36, 'msg': 'metrics same for size'}, - {'method': 'rotation', 'value': 90, 'msg': 'metrics same for rotation'}, - {'method': 'oblique', 'value': True, 'msg': 'metrics same for oblique'} - ] - text = "|" - - def run_test(method, value, msg): - font = ft.Font(self._sans_path, size=24) - before = font.get_metrics(text) - font.__setattr__(method, value) - after = font.get_metrics(text) - self.assertNotEqual(before, after, msg) - - for test in tests: - run_test(test['method'], test['value'], test['msg']) - - -class FreeTypeTest(unittest.TestCase): - def setUp(self): - ft.init() - - def tearDown(self): - ft.quit() - - def test_resolution(self): - try: - ft.set_default_resolution() - resolution = ft.get_default_resolution() - self.assertEqual(resolution, 72) - new_resolution = resolution + 10 - ft.set_default_resolution(new_resolution) - self.assertEqual(ft.get_default_resolution(), new_resolution) - ft.init(resolution=resolution+20) - self.assertEqual(ft.get_default_resolution(), new_resolution) - finally: - ft.set_default_resolution() - - def test_autoinit_and_autoquit(self): - pygame.init() - self.assertTrue(ft.get_init()) - pygame.quit() - self.assertFalse(ft.get_init()) - - # Ensure autoquit is replaced at init time - pygame.init() - self.assertTrue(ft.get_init()) - pygame.quit() - self.assertFalse(ft.get_init()) - - def test_init(self): - # Test if module initialized after calling init(). - ft.quit() - ft.init() - - self.assertTrue(ft.get_init()) - - def test_init__multiple(self): - # Test if module initialized after multiple init() calls. - ft.init() - ft.init() - - self.assertTrue(ft.get_init()) - - def test_quit(self): - # Test if module uninitialized after calling quit(). - ft.quit() - - self.assertFalse(ft.get_init()) - - def test_quit__multiple(self): - # Test if module initialized after multiple quit() calls. - ft.quit() - ft.quit() - - self.assertFalse(ft.get_init()) - - def test_get_init(self): - # Test if get_init() gets the init state. - self.assertTrue(ft.get_init()) - - def test_cache_size(self): - DEFAULT_CACHE_SIZE = 64 - self.assertEqual(ft.get_cache_size(), DEFAULT_CACHE_SIZE) - ft.quit() - self.assertEqual(ft.get_cache_size(), 0) - new_cache_size = DEFAULT_CACHE_SIZE * 2 - ft.init(cache_size=new_cache_size) - self.assertEqual(ft.get_cache_size(), new_cache_size) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/ftfont_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/ftfont_tags.py deleted file mode 100644 index 5e17673..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/ftfont_tags.py +++ /dev/null @@ -1,12 +0,0 @@ -__tags__ = ['development'] - -exclude = False - -try: - import pygame.ftfont -except ImportError: - exclude = True - -if exclude: - __tags__.extend(['ignore', 'subprocess_ignore']) - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/ftfont_test.py b/venv/lib/python3.7/site-packages/pygame/tests/ftfont_test.py deleted file mode 100644 index 0acd0ef..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/ftfont_test.py +++ /dev/null @@ -1,20 +0,0 @@ -import sys -import os -import unittest -from pygame.tests import font_test - -import pygame.ftfont - -font_test.pygame_font = pygame.ftfont -# Disable UCS-4 specific tests as this "Font" type does accept UCS-4 codes. -font_test.UCS_4 = False - -for name in dir(font_test): - obj = getattr(font_test, name) - if (isinstance(obj, type) and # conditional and - issubclass(obj, unittest.TestCase)): - new_name = 'Ft%s' % name - globals()[new_name] = type(new_name, (obj, ), {}) - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/gfxdraw_test.py b/venv/lib/python3.7/site-packages/pygame/tests/gfxdraw_test.py deleted file mode 100644 index a6e3390..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/gfxdraw_test.py +++ /dev/null @@ -1,817 +0,0 @@ -import unittest -import pygame -import pygame.gfxdraw -from pygame.locals import * -from pygame.tests.test_utils import SurfaceSubclass - -def intensity(c, i): - """Return color c changed by intensity i - - For 0 <= i <= 127 the color is a shade, with 0 being black, 127 being the - unaltered color. - - For 128 <= i <= 255 the color is a tint, with 255 being white, 128 the - unaltered color. - - """ - r, g, b = c[0:3] - if 0 <= i <= 127: - # Darken - return ((r * i) // 127, (g * i) // 127, (b * i) // 127) - # Lighten - return (r + ((255 - r) * (255 - i)) // 127, - g + ((255 - g) * (255 - i)) // 127, - b + ((255 - b) * (255 - i)) // 127) - - -class GfxdrawDefaultTest( unittest.TestCase ): - - is_started = False - - foreground_color = (128, 64, 8) - background_color = (255, 255, 255) - - def make_palette(base_color): - """Return color palette that is various intensities of base_color""" - # Need this function for Python 3.x so the base_color - # is within the scope of the list comprehension. - return [intensity(base_color, i) for i in range(0, 256)] - - default_palette = make_palette(foreground_color) - - default_size = (100, 100) - - def check_at(self, surf, posn, color): - sc = surf.get_at(posn) - fail_msg = ("%s != %s at %s, bitsize: %i, flags: %i, masks: %s" % - (sc, color, posn, surf.get_bitsize(), surf.get_flags(), - surf.get_masks())) - self.assertEqual(sc, color, fail_msg) - - def check_not_at(self, surf, posn, color): - sc = surf.get_at(posn) - fail_msg = ("%s != %s at %s, bitsize: %i, flags: %i, masks: %s" % - (sc, color, posn, surf.get_bitsize(), surf.get_flags(), - surf.get_masks())) - self.assertNotEqual(sc, color, fail_msg) - - @classmethod - def setUpClass(cls): - # Necessary for Surface.set_palette. - pygame.init() - pygame.display.set_mode((1, 1)) - - @classmethod - def tearDownClass(cls): - pygame.quit() - - def setUp(self): - # This makes sure pygame is always initialized before each test (in - # case a test calls pygame.quit()). - if not pygame.get_init(): - pygame.init() - - Surface = pygame.Surface - size = self.default_size - palette = self.default_palette - if not self.is_started: - # Create test surfaces - self.surfaces = [Surface(size, 0, 8), - Surface(size, SRCALPHA, 16), - Surface(size, SRCALPHA, 32)] - self.surfaces[0].set_palette(palette) - nonpalette_fmts = ( - #(8, (0xe0, 0x1c, 0x3, 0x0)), - (12, (0xf00, 0xf0, 0xf, 0x0)), - (15, (0x7c00, 0x3e0, 0x1f, 0x0)), - (15, (0x1f, 0x3e0, 0x7c00, 0x0)), - (16, (0xf00, 0xf0, 0xf, 0xf000)), - (16, (0xf000, 0xf00, 0xf0, 0xf)), - (16, (0xf, 0xf0, 0xf00, 0xf000)), - (16, (0xf0, 0xf00, 0xf000, 0xf)), - (16, (0x7c00, 0x3e0, 0x1f, 0x8000)), - (16, (0xf800, 0x7c0, 0x3e, 0x1)), - (16, (0x1f, 0x3e0, 0x7c00, 0x8000)), - (16, (0x3e, 0x7c0, 0xf800, 0x1)), - (16, (0xf800, 0x7e0, 0x1f, 0x0)), - (16, (0x1f, 0x7e0, 0xf800, 0x0)), - (24, (0xff, 0xff00, 0xff0000, 0x0)), - (24, (0xff0000, 0xff00, 0xff, 0x0)), - (32, (0xff0000, 0xff00, 0xff, 0x0)), - (32, (0xff000000, 0xff0000, 0xff00, 0x0)), - (32, (0xff, 0xff00, 0xff0000, 0x0)), - (32, (0xff00, 0xff0000, 0xff000000, 0x0)), - (32, (0xff0000, 0xff00, 0xff, 0xff000000)), - (32, (0xff000000, 0xff0000, 0xff00, 0xff)), - (32, (0xff, 0xff00, 0xff0000, 0xff000000)), - (32, (0xff00, 0xff0000, 0xff000000, 0xff)) - ) - for bitsize, masks in nonpalette_fmts: - self.surfaces.append(Surface(size, 0, bitsize, masks)) - for surf in self.surfaces: - surf.fill(self.background_color) - - def test_gfxdraw__subclassed_surface(self): - """Ensure pygame.gfxdraw works on subclassed surfaces.""" - surface = SurfaceSubclass((11, 13), SRCALPHA, 32) - surface.fill(pygame.Color('blue')) - expected_color = pygame.Color('red') - x, y = 1, 2 - - pygame.gfxdraw.pixel(surface, x, y, expected_color) - - self.assertEqual(surface.get_at((x, y)), expected_color) - - def test_pixel(self): - """pixel(surface, x, y, color): return None""" - fg = self.foreground_color - bg = self.background_color - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.pixel(surf, 2, 2, fg) - for x in range(1, 4): - for y in range(1, 4): - if x == 2 and y == 2: - self.check_at(surf, (x, y), fg_adjusted) - else: - self.check_at(surf, (x, y), bg_adjusted) - - def test_hline(self): - """hline(surface, x1, x2, y, color): return None""" - fg = self.foreground_color - bg = self.background_color - startx = 10 - stopx = 80 - y = 50 - fg_test_points = [(startx, y), (stopx, y), ((stopx - startx) // 2, y)] - bg_test_points = [(startx - 1, y), (stopx + 1, y), - (startx, y - 1), (startx, y + 1), - (stopx, y - 1), (stopx, y + 1)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.hline(surf, startx, stopx, y, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_vline(self): - """vline(surface, x, y1, y2, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 50 - starty = 10 - stopy = 80 - fg_test_points = [(x, starty), (x, stopy), (x, (stopy - starty) // 2)] - bg_test_points = [(x, starty - 1), (x, stopy + 1), - (x - 1, starty), (x + 1, starty), - (x - 1, stopy), (x + 1, stopy)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.vline(surf, x, starty, stopy, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_rectangle(self): - """rectangle(surface, rect, color): return None""" - fg = self.foreground_color - bg = self.background_color - rect = pygame.Rect(10, 15, 55, 62) - rect_tuple = tuple(rect) - fg_test_points = [rect.topleft, - (rect.right - 1, rect.top), - (rect.left, rect.bottom - 1), - (rect.right - 1, rect.bottom - 1)] - bg_test_points = [(rect.left - 1, rect.top - 1), - (rect.left + 1, rect.top + 1), - (rect.right, rect.top - 1), - (rect.right - 2, rect.top + 1), - (rect.left - 1, rect.bottom), - (rect.left + 1, rect.bottom - 2), - (rect.right, rect.bottom), - (rect.right - 2, rect.bottom - 2)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.rectangle(surf, rect, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - surf.fill(bg) - pygame.gfxdraw.rectangle(surf, rect_tuple, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_box(self): - """box(surface, rect, color): return None""" - fg = self.foreground_color - bg = self.background_color - rect = pygame.Rect(10, 15, 55, 62) - rect_tuple = tuple(rect) - fg_test_points = [rect.topleft, - (rect.left + 1, rect.top + 1), - (rect.right - 1, rect.top), - (rect.right - 2, rect.top + 1), - (rect.left, rect.bottom - 1), - (rect.left + 1, rect.bottom - 2), - (rect.right - 1, rect.bottom - 1), - (rect.right - 2, rect.bottom - 2)] - bg_test_points = [(rect.left - 1, rect.top - 1), - (rect.right, rect.top - 1), - (rect.left - 1, rect.bottom), - (rect.right, rect.bottom)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.box(surf, rect, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - surf.fill(bg) - pygame.gfxdraw.box(surf, rect_tuple, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_line(self): - """line(surface, x1, y1, x2, y2, color): return None""" - fg = self.foreground_color - bg = self.background_color - x1 = 10 - y1 = 15 - x2 = 92 - y2 = 77 - fg_test_points = [(x1, y1), (x2, y2)] - bg_test_points = [(x1 - 1, y1), (x1, y1 - 1), (x1 - 1, y1 - 1), - (x2 + 1, y2), (x2, y2 + 1), (x2 + 1, y2 + 1)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.line(surf, x1, y1, x2, y2, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_circle(self): - """circle(surface, x, y, r, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - r = 30 - fg_test_points = [(x, y - r), - (x, y + r), - (x - r, y), - (x + r, y)] - bg_test_points = [(x, y), - (x, y - r + 1), - (x, y - r - 1), - (x, y + r + 1), - (x, y + r - 1), - (x - r - 1, y), - (x - r + 1, y), - (x + r + 1, y), - (x + r - 1, y)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.circle(surf, x, y, r, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_arc(self): - """arc(surface, x, y, r, start, end, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - r = 30 - start = 0 # +x direction, but not (x + r, y) (?) - end = 90 # -y direction, including (x, y + r) - fg_test_points = [(x, y + r), (x + r, y + 1)] - bg_test_points = [(x, y), - (x, y - r), - (x - r, y), - (x, y + r + 1), - (x, y + r - 1), - (x - 1, y + r), - (x + r + 1, y), - (x + r - 1, y), - (x + r, y)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.arc(surf, x, y, r, start, end, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_aacircle(self): - """aacircle(surface, x, y, r, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - r = 30 - fg_test_points = [(x, y - r), - (x, y + r), - (x - r, y), - (x + r, y)] - bg_test_points = [(x, y), - (x, y - r + 1), - (x, y - r - 1), - (x, y + r + 1), - (x, y + r - 1), - (x - r - 1, y), - (x - r + 1, y), - (x + r + 1, y), - (x + r - 1, y)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.aacircle(surf, x, y, r, fg) - for posn in fg_test_points: - self.check_not_at(surf, posn, bg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_filled_circle(self): - """filled_circle(surface, x, y, r, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - r = 30 - fg_test_points = [(x, y - r), - (x, y - r + 1), - (x, y + r), - (x, y + r - 1), - (x - r, y), - (x - r + 1, y), - (x + r, y), - (x + r - 1, y), - (x, y)] - bg_test_points = [(x, y - r - 1), - (x, y + r + 1), - (x - r - 1, y), - (x + r + 1, y)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.filled_circle(surf, x, y, r, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_ellipse(self): - """ellipse(surface, x, y, rx, ry, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - rx = 30 - ry = 35 - fg_test_points = [(x, y - ry), - (x, y + ry), - (x - rx, y), - (x + rx, y)] - bg_test_points = [(x, y), - (x, y - ry + 1), - (x, y - ry - 1), - (x, y + ry + 1), - (x, y + ry - 1), - (x - rx - 1, y), - (x - rx + 1, y), - (x + rx + 1, y), - (x + rx - 1, y)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.ellipse(surf, x, y, rx, ry, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_aaellipse(self): - """aaellipse(surface, x, y, rx, ry, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - rx = 30 - ry = 35 - fg_test_points = [(x, y - ry), - (x, y + ry), - (x - rx, y), - (x + rx, y)] - bg_test_points = [(x, y), - (x, y - ry + 1), - (x, y - ry - 1), - (x, y + ry + 1), - (x, y + ry - 1), - (x - rx - 1, y), - (x - rx + 1, y), - (x + rx + 1, y), - (x + rx - 1, y)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.aaellipse(surf, x, y, rx, ry, fg) - for posn in fg_test_points: - self.check_not_at(surf, posn, bg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_filled_ellipse(self): - """filled_ellipse(surface, x, y, rx, ry, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - rx = 30 - ry = 35 - fg_test_points = [(x, y - ry), - (x, y - ry + 1), - (x, y + ry), - (x, y + ry - 1), - (x - rx, y), - (x - rx + 1, y), - (x + rx, y), - (x + rx - 1, y), - (x, y)] - bg_test_points = [(x, y - ry - 1), - (x, y + ry + 1), - (x - rx - 1, y), - (x + rx + 1, y)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.filled_ellipse(surf, x, y, rx, ry, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_pie(self): - """pie(surface, x, y, r, start, end, color): return None""" - fg = self.foreground_color - bg = self.background_color - x = 45 - y = 40 - r = 30 - start = 0 # +x direction, including (x + r, y) - end = 90 # -y direction, but not (x, y + r) (?) - fg_test_points = [(x, y), - (x + 1, y), - (x, y + 1), - (x + r, y)] - bg_test_points = [(x - 1, y), - (x, y - 1), - (x - 1, y - 1), - (x + 1, y + 1), - (x + r + 1, y), - (x + r, y - 1), - (x, y + r + 1)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.pie(surf, x, y, r, start, end, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_trigon(self): - """trigon(surface, x1, y1, x2, y2, x3, y3, color): return None""" - fg = self.foreground_color - bg = self.background_color - x1 = 10 - y1 = 15 - x2 = 92 - y2 = 77 - x3 = 20 - y3 = 60 - fg_test_points = [(x1, y1), (x2, y2), (x3, y3)] - bg_test_points = [(x1 - 1, y1 - 1), - (x2 + 1, y2 + 1), - (x3 - 1, y3 + 1), - (x1 + 10, y1 + 30)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.trigon(surf, x1, y1, x2, y2, x3, y3, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_aatrigon(self): - """aatrigon(surface, x1, y1, x2, y2, x3, y3, color): return None""" - fg = self.foreground_color - bg = self.background_color - x1 = 10 - y1 = 15 - x2 = 92 - y2 = 77 - x3 = 20 - y3 = 60 - fg_test_points = [(x1, y1), (x2, y2), (x3, y3)] - bg_test_points = [(x1 - 1, y1 - 1), - (x2 + 1, y2 + 1), - (x3 - 1, y3 + 1), - (x1 + 10, y1 + 30)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.aatrigon(surf, x1, y1, x2, y2, x3, y3, fg) - for posn in fg_test_points: - self.check_not_at(surf, posn, bg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - @unittest.expectedFailure - def test_aatrigon__with_horizontal_edge(self): - """Ensure aatrigon draws horizontal edges correctly. - - This test creates 2 surfaces and draws an aatrigon on each. The pixels - on each surface are compared to ensure they are the same. The only - difference between the 2 aatrigons is the order the points are drawn. - The order of the points should have no impact on the final drawing. - - Related to issue #622. - """ - bg_color = pygame.Color('white') - line_color = pygame.Color('black') - width, height = 11, 10 - expected_surface = pygame.Surface((width, height), 0, 32) - expected_surface.fill(bg_color) - surface = pygame.Surface((width, height), 0, 32) - surface.fill(bg_color) - - x1, y1 = width - 1, 0 - x2, y2 = (width - 1) // 2, height - 1 - x3, y3 = 0, 0 - - # The points in this order draw as expected. - pygame.gfxdraw.aatrigon(expected_surface, x1, y1, x2, y2, x3, y3, - line_color) - - # The points in reverse order fail to draw the horizontal edge along - # the top. - pygame.gfxdraw.aatrigon(surface, x3, y3, x2, y2, x1, y1, line_color) - - # The surfaces are locked for a possible speed up of pixel access. - expected_surface.lock() - surface.lock() - for x in range(width): - for y in range(height): - self.assertEqual(expected_surface.get_at((x, y)), - surface.get_at((x, y)), - 'pos=({}, {})'.format(x, y)) - - surface.unlock() - expected_surface.unlock() - - def test_filled_trigon(self): - """filled_trigon(surface, x1, y1, x2, y2, x3, y3, color): return None""" - fg = self.foreground_color - bg = self.background_color - x1 = 10 - y1 = 15 - x2 = 92 - y2 = 77 - x3 = 20 - y3 = 60 - fg_test_points = [(x1, y1), (x2, y2), (x3, y3), - (x1 + 10, y1 + 30)] - bg_test_points = [(x1 - 1, y1 - 1), - (x2 + 1, y2 + 1), - (x3 - 1, y3 + 1)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.filled_trigon(surf, x1, y1, x2, y2, x3, y3, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_polygon(self): - """polygon(surface, points, color): return None""" - fg = self.foreground_color - bg = self.background_color - points = [(10, 80), (10, 15), (92, 25), (92, 80)] - fg_test_points = (points + - [(points[0][0], points[0][1] - 1), - (points[0][0] + 1, points[0][1]), - (points[3][0] - 1, points[3][1]), - (points[3][0], points[3][1] - 1), - (points[2][0], points[2][1] + 1)]) - bg_test_points = [(points[0][0] - 1, points[0][1]), - (points[0][0], points[0][1] + 1), - (points[0][0] - 1, points[0][1] + 1), - (points[0][0] + 1, points[0][1] - 1), - (points[3][0] + 1, points[3][1]), - (points[3][0], points[3][1] + 1), - (points[3][0] + 1, points[3][1] + 1), - (points[3][0] - 1, points[3][1] - 1), - (points[2][0] + 1, points[2][1]), - (points[2][0] - 1, points[2][1] + 1), - (points[1][0] - 1, points[1][1]), - (points[1][0], points[1][1] - 1), - (points[1][0] - 1, points[1][1] - 1)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.polygon(surf, points, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_aapolygon(self): - """aapolygon(surface, points, color): return None""" - fg = self.foreground_color - bg = self.background_color - points = [(10, 80), (10, 15), (92, 25), (92, 80)] - fg_test_points = points - bg_test_points = [(points[0][0] - 1, points[0][1]), - (points[0][0], points[0][1] + 1), - (points[0][0] - 1, points[0][1] + 1), - (points[0][0] + 1, points[0][1] - 1), - (points[3][0] + 1, points[3][1]), - (points[3][0], points[3][1] + 1), - (points[3][0] + 1, points[3][1] + 1), - (points[3][0] - 1, points[3][1] - 1), - (points[2][0] + 1, points[2][1]), - (points[2][0] - 1, points[2][1] + 1), - (points[1][0] - 1, points[1][1]), - (points[1][0], points[1][1] - 1), - (points[1][0] - 1, points[1][1] - 1)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.aapolygon(surf, points, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_not_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - @unittest.expectedFailure - def test_aapolygon__with_horizontal_edge(self): - """Ensure aapolygon draws horizontal edges correctly. - - This test creates 2 surfaces and draws a polygon on each. The pixels - on each surface are compared to ensure they are the same. The only - difference between the 2 polygons is that one is drawn using - aapolygon() and the other using multiple line() calls. They should - produce the same final drawing. - - Related to issue #622. - """ - bg_color = pygame.Color('white') - line_color = pygame.Color('black') - width, height = 11, 10 - expected_surface = pygame.Surface((width, height), 0, 32) - expected_surface.fill(bg_color) - surface = pygame.Surface((width, height), 0, 32) - surface.fill(bg_color) - - points = ((0, 0), (0, height - 1), (width - 1, height - 1), - (width - 1, 0)) - - # The points are used to draw the expected aapolygon using the line() - # function. - for (x1, y1), (x2, y2) in zip(points, points[1:] + points[:1]): - pygame.gfxdraw.line(expected_surface, x1, y1, x2, y2, line_color) - - # The points in this order fail to draw the horizontal edge along - # the top. - pygame.gfxdraw.aapolygon(surface, points, line_color) - - # The surfaces are locked for a possible speed up of pixel access. - expected_surface.lock() - surface.lock() - for x in range(width): - for y in range(height): - self.assertEqual(expected_surface.get_at((x, y)), - surface.get_at((x, y)), - 'pos=({}, {})'.format(x, y)) - - surface.unlock() - expected_surface.unlock() - - def test_filled_polygon(self): - """filled_polygon(surface, points, color): return None""" - fg = self.foreground_color - bg = self.background_color - points = [(10, 80), (10, 15), (92, 25), (92, 80)] - fg_test_points = (points + - [(points[0][0], points[0][1] - 1), - (points[0][0] + 1, points[0][1]), - (points[0][0] + 1, points[0][1] - 1), - (points[3][0] - 1, points[3][1]), - (points[3][0], points[3][1] - 1), - (points[3][0] - 1, points[3][1] - 1), - (points[2][0], points[2][1] + 1), - (points[2][0] - 1, points[2][1] + 1)]) - bg_test_points = [(points[0][0] - 1, points[0][1]), - (points[0][0], points[0][1] + 1), - (points[0][0] - 1, points[0][1] + 1), - (points[3][0] + 1, points[3][1]), - (points[3][0], points[3][1] + 1), - (points[3][0] + 1, points[3][1] + 1), - (points[2][0] + 1, points[2][1]), - (points[1][0] - 1, points[1][1]), - (points[1][0], points[1][1] - 1), - (points[1][0] - 1, points[1][1] - 1)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.filled_polygon(surf, points, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - def test_textured_polygon(self): - """textured_polygon(surface, points, texture, tx, ty): return None""" - w, h = self.default_size - fg = self.foreground_color - bg = self.background_color - tx = 0 - ty = 0 - texture = pygame.Surface((w + tx, h + ty), 0, 24) - texture.fill(fg, (0, 0, w, h)) - points = [(10, 80), (10, 15), (92, 25), (92, 80)] - # Don't know how to really check this as boarder points may - # or may not be included in the textured polygon. - fg_test_points = [(points[1][0] + 30, points[1][1] + 40)] - bg_test_points = [(points[0][0] - 1, points[0][1]), - (points[0][0], points[0][1] + 1), - (points[0][0] - 1, points[0][1] + 1), - (points[3][0] + 1, points[3][1]), - (points[3][0], points[3][1] + 1), - (points[3][0] + 1, points[3][1] + 1), - (points[2][0] + 1, points[2][1]), - (points[1][0] - 1, points[1][1]), - (points[1][0], points[1][1] - 1), - (points[1][0] - 1, points[1][1] - 1)] - for surf in self.surfaces[1:]: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.textured_polygon(surf, points, texture, -tx, -ty) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - # Alpha blit to 8 bits-per-pixel surface forbidden. - texture = pygame.Surface(self.default_size, SRCALPHA, 32) - self.assertRaises(ValueError, - pygame.gfxdraw.textured_polygon, - self.surfaces[0], - points, - texture, 0, 0) - - def test_bezier(self): - """bezier(surface, points, steps, color): return None""" - fg = self.foreground_color - bg = self.background_color - points = [(10, 50), (25, 15), (60, 80), (92, 30)] - fg_test_points = [points[0], points[3]] - bg_test_points = [(points[0][0] - 1, points[0][1]), - (points[3][0] + 1, points[3][1]), - (points[1][0], points[1][1] + 3), - (points[2][0], points[2][1] - 3)] - for surf in self.surfaces: - fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg)) - bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg)) - pygame.gfxdraw.bezier(surf, points, 30, fg) - for posn in fg_test_points: - self.check_at(surf, posn, fg_adjusted) - for posn in bg_test_points: - self.check_at(surf, posn, bg_adjusted) - - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/image__save_gl_surface_test.py b/venv/lib/python3.7/site-packages/pygame/tests/image__save_gl_surface_test.py deleted file mode 100644 index be4ee95..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/image__save_gl_surface_test.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import unittest - -from pygame.tests import test_utils -import pygame -from pygame.locals import * - - -@unittest.skipIf(os.environ.get('SDL_VIDEODRIVER') == 'dummy', - 'OpenGL requires a non-"dummy" SDL_VIDEODRIVER') -class GL_ImageSave(unittest.TestCase): - def test_image_save_works_with_opengl_surfaces(self): - """ - |tags:display,slow,opengl| - """ - - pygame.display.init() - screen = pygame.display.set_mode((640,480), OPENGL|DOUBLEBUF) - pygame.display.flip() - - tmp_dir = test_utils.get_tmp_dir() - # Try the imageext module. - tmp_file = os.path.join(tmp_dir, "opengl_save_surface_test.png") - pygame.image.save(screen, tmp_file) - - self.assertTrue(os.path.exists(tmp_file)) - - os.remove(tmp_file) - - # Only test the image module. - tmp_file = os.path.join(tmp_dir, "opengl_save_surface_test.bmp") - pygame.image.save(screen, tmp_file) - - self.assertTrue(os.path.exists(tmp_file)) - - os.remove(tmp_file) - - # stops tonnes of tmp dirs building up in trunk dir - os.rmdir(tmp_dir) - pygame.display.quit() - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/image_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/image_tags.py deleted file mode 100644 index 3f6c181..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/image_tags.py +++ /dev/null @@ -1,7 +0,0 @@ -__tags__ = [] - -import pygame -import sys -if 'pygame.image' not in sys.modules: - __tags__.extend(('ignore', 'subprocess_ignore')) - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/image_test.py b/venv/lib/python3.7/site-packages/pygame/tests/image_test.py deleted file mode 100644 index 9ec64a2..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/image_test.py +++ /dev/null @@ -1,527 +0,0 @@ -# -*- coding: utf-8 -*- - -import array -import os -import tempfile -import unittest -import glob - -from pygame.tests.test_utils import example_path, png -import pygame, pygame.image, pygame.pkgdata -from pygame.compat import xrange_, ord_, unicode_ - - -def test_magic(f, magic_hex): - """ tests a given file to see if the magic hex matches. - """ - data = f.read(len(magic_hex)) - - if len(data) != len(magic_hex): - return 0 - - for i in range(len(magic_hex)): - if magic_hex[i] != ord_(data[i]): - return 0 - - return 1 - - -class ImageModuleTest( unittest.TestCase ): - def testLoadIcon(self): - """ see if we can load the pygame icon. - """ - f = pygame.pkgdata.getResource("pygame_icon.bmp") - self.assertEqual(f.mode, "rb") - - surf = pygame.image.load_basic(f) - - self.assertEqual(surf.get_at((0,0)),(5, 4, 5, 255)) - self.assertEqual(surf.get_height(),32) - self.assertEqual(surf.get_width(),32) - - def testLoadPNG(self): - """ see if we can load a png with color values in the proper channels. - """ - # Create a PNG file with known colors - reddish_pixel = (210, 0, 0, 255) - greenish_pixel = (0, 220, 0, 255) - bluish_pixel = (0, 0, 230, 255) - greyish_pixel = (110, 120, 130, 140) - pixel_array = [reddish_pixel + greenish_pixel, - bluish_pixel + greyish_pixel] - - f_descriptor, f_path = tempfile.mkstemp(suffix='.png') - - with os.fdopen(f_descriptor, 'wb') as f: - w = png.Writer(2, 2, alpha=True) - w.write(f, pixel_array) - - # Read the PNG file and verify that pygame interprets it correctly - surf = pygame.image.load(f_path) - - self.assertEqual(surf.get_at((0, 0)), reddish_pixel) - self.assertEqual(surf.get_at((1, 0)), greenish_pixel) - self.assertEqual(surf.get_at((0, 1)), bluish_pixel) - self.assertEqual(surf.get_at((1, 1)), greyish_pixel) - - # Read the PNG file obj. and verify that pygame interprets it correctly - with open(f_path, 'rb') as f: - surf = pygame.image.load(f) - - self.assertEqual(surf.get_at((0, 0)), reddish_pixel) - self.assertEqual(surf.get_at((1, 0)), greenish_pixel) - self.assertEqual(surf.get_at((0, 1)), bluish_pixel) - self.assertEqual(surf.get_at((1, 1)), greyish_pixel) - - os.remove(f_path) - - def testLoadJPG(self): - """ see if we can load a jpg. - """ - - f = example_path('data/alien1.jpg') # normalized - # f = os.path.join("examples", "data", "alien1.jpg") - surf = pygame.image.load(f) - - with open(f, "rb") as f: - surf = pygame.image.load(f) - - # with open(os.path.join("examples", "data", "alien1.jpg"), "rb") as f: - # surf = pygame.image.load(open(os.path.join("examples", "data", - # "alien1.jpg"), "rb")) - - def testSaveJPG(self): - """ JPG equivalent to issue #211 - color channel swapping - - Make sure the SDL surface color masks represent the rgb memory format - required by the JPG library. The masks are machine endian dependent - """ - - from pygame import Color, Rect - - # The source image is a 2 by 2 square of four colors. Since JPEG is - # lossy, there can be color bleed. Make each color square 16 by 16, - # to avoid the significantly color value distorts found at color - # boundaries due to the compression value set by Pygame. - square_len = 16 - sz = 2 * square_len, 2 * square_len - - # +---------------------------------+ - # | red | green | - # |----------------+----------------| - # | blue | (255, 128, 64) | - # +---------------------------------+ - # - # as (rect, color) pairs. - def as_rect(square_x, square_y): - return Rect(square_x * square_len, square_y * square_len, - square_len, square_len) - squares = [(as_rect(0, 0), Color("red")), - (as_rect(1, 0), Color("green")), - (as_rect(0, 1), Color("blue")), - (as_rect(1, 1), Color(255, 128, 64))] - - # A surface format which is not directly usable with libjpeg. - surf = pygame.Surface(sz, 0, 32) - for rect, color in squares: - surf.fill(color, rect) - - # Assume pygame.image.Load works correctly as it is handled by the - # third party SDL_image library. - f_path = tempfile.mktemp(suffix='.jpg') - pygame.image.save(surf, f_path) - jpg_surf = pygame.image.load(f_path) - - # Allow for small differences in the restored colors. - def approx(c): - mask = 0xFC - return pygame.Color(c.r & mask, c.g & mask, c.b & mask) - offset = square_len // 2 - for rect, color in squares: - posn = rect.move((offset, offset)).topleft - self.assertEqual(approx(jpg_surf.get_at(posn)), approx(color)) - - def testSavePNG32(self): - """ see if we can save a png with color values in the proper channels. - """ - # Create a PNG file with known colors - reddish_pixel = (215, 0, 0, 255) - greenish_pixel = (0, 225, 0, 255) - bluish_pixel = (0, 0, 235, 255) - greyish_pixel = (115, 125, 135, 145) - - surf = pygame.Surface((1, 4), pygame.SRCALPHA, 32) - surf.set_at((0, 0), reddish_pixel) - surf.set_at((0, 1), greenish_pixel) - surf.set_at((0, 2), bluish_pixel) - surf.set_at((0, 3), greyish_pixel) - - f_path = tempfile.mktemp(suffix='.png') - pygame.image.save(surf, f_path) - - try: - # Read the PNG file and verify that pygame saved it correctly - reader = png.Reader(filename=f_path) - width, height, pixels, metadata = reader.asRGBA8() - - # pixels is a generator - self.assertEqual(tuple(next(pixels)), reddish_pixel) - self.assertEqual(tuple(next(pixels)), greenish_pixel) - self.assertEqual(tuple(next(pixels)), bluish_pixel) - self.assertEqual(tuple(next(pixels)), greyish_pixel) - - finally: - # Ensures proper clean up. - if not reader.file.closed: - reader.file.close() - del reader - os.remove(f_path) - - def testSavePNG24(self): - """ see if we can save a png with color values in the proper channels. - """ - # Create a PNG file with known colors - reddish_pixel = (215, 0, 0) - greenish_pixel = (0, 225, 0) - bluish_pixel = (0, 0, 235) - greyish_pixel = (115, 125, 135) - - surf = pygame.Surface((1, 4), 0, 24) - surf.set_at((0, 0), reddish_pixel) - surf.set_at((0, 1), greenish_pixel) - surf.set_at((0, 2), bluish_pixel) - surf.set_at((0, 3), greyish_pixel) - - f_path = tempfile.mktemp(suffix='.png') - pygame.image.save(surf, f_path) - - try: - # Read the PNG file and verify that pygame saved it correctly - reader = png.Reader(filename=f_path) - width, height, pixels, metadata = reader.asRGB8() - - # pixels is a generator - self.assertEqual(tuple(next(pixels)), reddish_pixel) - self.assertEqual(tuple(next(pixels)), greenish_pixel) - self.assertEqual(tuple(next(pixels)), bluish_pixel) - self.assertEqual(tuple(next(pixels)), greyish_pixel) - - finally: - # Ensures proper clean up. - if not reader.file.closed: - reader.file.close() - del reader - os.remove(f_path) - - def test_save(self): - - s = pygame.Surface((10,10)) - s.fill((23,23,23)) - magic_hex = {} - magic_hex['jpg'] = [0xff, 0xd8, 0xff, 0xe0] - magic_hex['png'] = [0x89 ,0x50 ,0x4e ,0x47] - # magic_hex['tga'] = [0x0, 0x0, 0xa] - magic_hex['bmp'] = [0x42, 0x4d] - - - formats = ["jpg", "png", "bmp"] - # uppercase too... JPG - formats = formats + [x.upper() for x in formats] - - for fmt in formats: - try: - temp_filename = "%s.%s" % ("tmpimg", fmt) - pygame.image.save(s, temp_filename) - - # Using 'with' ensures the file is closed even if test fails. - with open(temp_filename, "rb") as handle: - # Test the magic numbers at the start of the file to ensure - # they are saved as the correct file type. - self.assertEqual((1, fmt), (test_magic(handle, - magic_hex[fmt.lower()]), fmt)) - - # load the file to make sure it was saved correctly. - # Note load can load a jpg saved with a .png file name. - s2 = pygame.image.load(temp_filename) - #compare contents, might only work reliably for png... - # but because it's all one color it seems to work with jpg. - self.assertEqual(s2.get_at((0,0)), s.get_at((0,0))) - finally: - #clean up the temp file, comment out to leave tmp file after run. - os.remove(temp_filename) - - def test_save_colorkey(self): - """ make sure the color key is not changed when saving. - """ - s = pygame.Surface((10,10), pygame.SRCALPHA, 32) - s.fill((23,23,23)) - s.set_colorkey((0,0,0)) - colorkey1 = s.get_colorkey() - p1 = s.get_at((0,0)) - - temp_filename = "tmpimg.png" - try: - pygame.image.save(s, temp_filename) - s2 = pygame.image.load(temp_filename) - finally: - os.remove(temp_filename) - - colorkey2 = s.get_colorkey() - # check that the pixel and the colorkey is correct. - self.assertEqual(colorkey1, colorkey2) - self.assertEqual(p1, s2.get_at((0,0))) - - def test_load_unicode_path(self): - import shutil - orig = unicode_(example_path("data/asprite.bmp")) - temp = os.path.join(unicode_(example_path('data')), u'你好.bmp') - shutil.copy(orig, temp) - try: - im = pygame.image.load(temp) - finally: - os.remove(temp) - - def _unicode_save(self, temp_file): - im = pygame.Surface((10, 10), 0, 32) - try: - with open(temp_file, 'w') as f: - pass - os.remove(temp_file) - except IOError: - raise unittest.SkipTest('the path cannot be opened') - - self.assertFalse(os.path.exists(temp_file)) - - try: - pygame.image.save(im, temp_file) - - self.assertGreater(os.path.getsize(temp_file), 10) - finally: - try: - os.remove(temp_file) - except EnvironmentError: - pass - - def test_save_unicode_path(self): - """save unicode object with non-ASCII chars""" - self._unicode_save(u"你好.bmp") - - def assertPremultipliedAreEqual(self, string1, string2, source_string): - self.assertEqual(len(string1), len(string2)) - block_size = 20 - if string1 != string2: - for block_start in xrange_(0, len(string1), block_size): - block_end = min(block_start + block_size, len(string1)) - block1 = string1[block_start:block_end] - block2 = string2[block_start:block_end] - if block1 != block2: - source_block = source_string[block_start:block_end] - msg = "string difference in %d to %d of %d:\n%s\n%s\nsource:\n%s" % (block_start, block_end, len(string1), block1.encode("hex"), block2.encode("hex"), source_block.encode("hex")) - self.fail(msg) - - def test_to_string__premultiplied(self): - """ test to make sure we can export a surface to a premultiplied alpha string - """ - - def convertRGBAtoPremultiplied(surface_to_modify): - for x in xrange_(surface_to_modify.get_width()): - for y in xrange_(surface_to_modify.get_height()): - color = surface_to_modify.get_at((x, y)) - premult_color = (color[0]*color[3]/255, - color[1]*color[3]/255, - color[2]*color[3]/255, - color[3]) - surface_to_modify.set_at((x, y), premult_color) - - test_surface = pygame.Surface((256, 256), pygame.SRCALPHA, 32) - for x in xrange_(test_surface.get_width()): - for y in xrange_(test_surface.get_height()): - i = x + y*test_surface.get_width() - test_surface.set_at((x,y), ((i*7) % 256, (i*13) % 256, (i*27) % 256, y)) - premultiplied_copy = test_surface.copy() - convertRGBAtoPremultiplied(premultiplied_copy) - self.assertPremultipliedAreEqual(pygame.image.tostring(test_surface, "RGBA_PREMULT"), - pygame.image.tostring(premultiplied_copy, "RGBA"), - pygame.image.tostring(test_surface, "RGBA")) - self.assertPremultipliedAreEqual(pygame.image.tostring(test_surface, "ARGB_PREMULT"), - pygame.image.tostring(premultiplied_copy, "ARGB"), - pygame.image.tostring(test_surface, "ARGB")) - - no_alpha_surface = pygame.Surface((256, 256), 0, 24) - self.assertRaises(ValueError, pygame.image.tostring, no_alpha_surface, "RGBA_PREMULT") - - # Custom assert method to check for identical surfaces. - def _assertSurfaceEqual(self, surf_a, surf_b, msg=None): - a_width, a_height = surf_a.get_width(), surf_a.get_height() - - # Check a few things to see if the surfaces are equal. - self.assertEqual(a_width, surf_b.get_width(), msg) - self.assertEqual(a_height, surf_b.get_height(), msg) - self.assertEqual(surf_a.get_size(), surf_b.get_size(), msg) - self.assertEqual(surf_a.get_rect(), surf_b.get_rect(), msg) - self.assertEqual(surf_a.get_colorkey(), surf_b.get_colorkey(), msg) - self.assertEqual(surf_a.get_alpha(), surf_b.get_alpha(), msg) - self.assertEqual(surf_a.get_flags(), surf_b.get_flags(), msg) - self.assertEqual(surf_a.get_bitsize(), surf_b.get_bitsize(), msg) - self.assertEqual(surf_a.get_bytesize(), surf_b.get_bytesize(), msg) - # Anything else? - - # Making the method lookups local for a possible speed up. - surf_a_get_at = surf_a.get_at - surf_b_get_at = surf_b.get_at - for y in xrange_(a_height): - for x in xrange_(a_width): - self.assertEqual(surf_a_get_at((x, y)), surf_b_get_at((x, y)), - msg) - - def test_fromstring__and_tostring(self): - """Ensure methods tostring() and fromstring() are symmetric.""" - - #################################################################### - def RotateRGBAtoARGB(str_buf): - byte_buf = array.array("B", str_buf) - num_quads = len(byte_buf)//4 - for i in xrange_(num_quads): - alpha = byte_buf[i*4 + 3] - byte_buf[i*4 + 3] = byte_buf[i*4 + 2] - byte_buf[i*4 + 2] = byte_buf[i*4 + 1] - byte_buf[i*4 + 1] = byte_buf[i*4 + 0] - byte_buf[i*4 + 0] = alpha - return byte_buf.tostring() - - #################################################################### - def RotateARGBtoRGBA(str_buf): - byte_buf = array.array("B", str_buf) - num_quads = len(byte_buf)//4 - for i in xrange_(num_quads): - alpha = byte_buf[i*4 + 0] - byte_buf[i*4 + 0] = byte_buf[i*4 + 1] - byte_buf[i*4 + 1] = byte_buf[i*4 + 2] - byte_buf[i*4 + 2] = byte_buf[i*4 + 3] - byte_buf[i*4 + 3] = alpha - return byte_buf.tostring() - - #################################################################### - test_surface = pygame.Surface((64, 256), flags=pygame.SRCALPHA, - depth=32) - for i in xrange_(256): - for j in xrange_(16): - intensity = j*16 + 15 - test_surface.set_at((j + 0, i), (intensity, i, i, i)) - test_surface.set_at((j + 16, i), (i, intensity, i, i)) - test_surface.set_at((j + 32, i), (i, i, intensity, i)) - test_surface.set_at((j + 32, i), (i, i, i, intensity)) - - self._assertSurfaceEqual(test_surface, test_surface, - 'failing with identical surfaces') - - rgba_buf = pygame.image.tostring(test_surface, "RGBA") - rgba_buf = RotateARGBtoRGBA(RotateRGBAtoARGB(rgba_buf)) - test_rotate_functions = pygame.image.fromstring( - rgba_buf, test_surface.get_size(), "RGBA") - - self._assertSurfaceEqual(test_surface, test_rotate_functions, - 'rotate functions are not symmetric') - - rgba_buf = pygame.image.tostring(test_surface, "RGBA") - argb_buf = RotateRGBAtoARGB(rgba_buf) - test_from_argb_string = pygame.image.fromstring( - argb_buf, test_surface.get_size(), "ARGB") - - self._assertSurfaceEqual(test_surface, test_from_argb_string, - '"RGBA" rotated to "ARGB" failed') - - argb_buf = pygame.image.tostring(test_surface, "ARGB") - rgba_buf = RotateARGBtoRGBA(argb_buf) - test_to_argb_string = pygame.image.fromstring( - rgba_buf, test_surface.get_size(), "RGBA") - - self._assertSurfaceEqual(test_surface, test_to_argb_string, - '"ARGB" rotated to "RGBA" failed') - - for fmt in ('ARGB', 'RGBA'): - fmt_buf = pygame.image.tostring(test_surface, fmt) - test_to_from_fmt_string = pygame.image.fromstring( - fmt_buf, test_surface.get_size(), fmt) - - self._assertSurfaceEqual(test_surface, test_to_from_fmt_string, - 'tostring/fromstring functions are not ' - 'symmetric with "{}" format'.format(fmt)) - - def todo_test_frombuffer(self): - - # __doc__ (as of 2008-08-02) for pygame.image.frombuffer: - - # pygame.image.frombuffer(string, size, format): return Surface - # create a new Surface that shares data inside a string buffer - # - # Create a new Surface that shares pixel data directly from the string - # buffer. This method takes the same arguments as - # pygame.image.fromstring(), but is unable to vertically flip the - # source data. - # - # This will run much faster than pygame.image.fromstring, since no - # pixel data must be allocated and copied. - - self.fail() - - def todo_test_get_extended(self): - - # __doc__ (as of 2008-08-02) for pygame.image.get_extended: - - # pygame.image.get_extended(): return bool - # test if extended image formats can be loaded - # - # If pygame is built with extended image formats this function will - # return True. It is still not possible to determine which formats - # will be available, but generally you will be able to load them all. - - self.fail() - - def todo_test_load_basic(self): - - # __doc__ (as of 2008-08-02) for pygame.image.load_basic: - - # pygame.image.load(filename): return Surface - # pygame.image.load(fileobj, namehint=): return Surface - # load new image from a file - - self.fail() - - def todo_test_load_extended(self): - - # __doc__ (as of 2008-08-02) for pygame.image.load_extended: - - # pygame module for image transfer - - self.fail() - - def todo_test_save_extended(self): - - # __doc__ (as of 2008-08-02) for pygame.image.save_extended: - - # pygame module for image transfer - - self.fail() - - def threads_load(self, images): - import pygame.threads - for i in range(10): - surfs = pygame.threads.tmap(pygame.image.load, images) - for s in surfs: - self.assertIsInstance(s, pygame.Surface) - - def test_load_png_threads(self): - self.threads_load(glob.glob(example_path("data/*.png"))) - - def test_load_jpg_threads(self): - self.threads_load(glob.glob(example_path("data/*.jpg"))) - - def test_load_bmp_threads(self): - self.threads_load(glob.glob(example_path("data/*.bmp"))) - - def test_load_gif_threads(self): - self.threads_load(glob.glob(example_path("data/*.gif"))) - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/imageext_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/imageext_tags.py deleted file mode 100644 index 60df1da..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/imageext_tags.py +++ /dev/null @@ -1,8 +0,0 @@ -__tags__ = [] - -import pygame -import sys -if 'pygame.imageext' not in sys.modules: - __tags__.extend(('ignore', 'subprocess_ignore')) - - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/imageext_test.py b/venv/lib/python3.7/site-packages/pygame/tests/imageext_test.py deleted file mode 100644 index 530dc12..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/imageext_test.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf8 -*- -import os -import os.path -import sys -import unittest - -from pygame.tests.test_utils import example_path -import pygame, pygame.image, pygame.pkgdata -from pygame.compat import as_unicode, unicode_ -imageext = sys.modules['pygame.imageext'] - -class ImageextModuleTest( unittest.TestCase ): - # Most of the testing is done indirectly through image_test.py - # This just confirms file path encoding and error handling. - def test_save_non_string_file(self): - im = pygame.Surface((10, 10), 0, 32) - self.assertRaises(TypeError, imageext.save_extended, im, []) - - def test_load_non_string_file(self): - self.assertRaises(pygame.error, imageext.load_extended, []) - - @unittest.skip("SDL silently removes invalid characters") - def test_save_bad_filename(self): - im = pygame.Surface((10, 10), 0, 32) - u = u"a\x00b\x00c.png" - self.assertRaises(pygame.error, imageext.save_extended, im, u) - - @unittest.skip("SDL silently removes invalid characters") - def test_load_bad_filename(self): - u = u"a\x00b\x00c.png" - self.assertRaises(pygame.error, imageext.load_extended, u) - - def test_save_unknown_extension(self): - im = pygame.Surface((10, 10), 0, 32) - s = "foo.bar" - self.assertRaises(pygame.error, imageext.save_extended, im, s) - - def test_load_unknown_extension(self): - s = "foo.bar" - self.assertRaises(pygame.error, imageext.load_extended, s) - - def test_load_unknown_file(self): - s = "nonexistent.png" - self.assertRaises(pygame.error, imageext.load_extended, s) - - def test_load_unicode_path_0(self): - u = unicode_(example_path("data/alien1.png")) - im = imageext.load_extended(u) - - def test_load_unicode_path_1(self): - """non-ASCII unicode""" - import shutil - orig = unicode_(example_path("data/alien1.png")) - temp = os.path.join(unicode_(example_path('data')), u'你好.png') - shutil.copy(orig, temp) - try: - im = imageext.load_extended(temp) - finally: - os.remove(temp) - - def _unicode_save(self, temp_file): - im = pygame.Surface((10, 10), 0, 32) - try: - with open(temp_file, 'w') as f: - pass - os.remove(temp_file) - except IOError: - raise unittest.SkipTest('the path cannot be opened') - - self.assertFalse(os.path.exists(temp_file)) - - try: - imageext.save_extended(im, temp_file) - - self.assertGreater(os.path.getsize(temp_file), 10) - finally: - try: - os.remove(temp_file) - except EnvironmentError: - pass - - def test_save_unicode_path_0(self): - """unicode object with ASCII chars""" - self._unicode_save(u"temp_file.png") - - def test_save_unicode_path_1(self): - self._unicode_save(u"你好.png") - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/joystick_test.py b/venv/lib/python3.7/site-packages/pygame/tests/joystick_test.py deleted file mode 100644 index 7d5b328..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/joystick_test.py +++ /dev/null @@ -1,91 +0,0 @@ -import unittest - -class JoystickTypeTest(unittest.TestCase): - def todo_test_Joystick(self): - - # __doc__ (as of 2008-08-02) for pygame.joystick.Joystick: - - # pygame.joystick.Joystick(id): return Joystick - # create a new Joystick object - # - # Create a new joystick to access a physical device. The id argument - # must be a value from 0 to pygame.joystick.get_count()-1. - # - # To access most of the Joystick methods, you'll need to init() the - # Joystick. This is separate from making sure the joystick module is - # initialized. When multiple Joysticks objects are created for the - # same physical joystick device (i.e., they have the same ID number), - # the state and values for those Joystick objects will be shared. - # - # The Joystick object allows you to get information about the types of - # controls on a joystick device. Once the device is initialized the - # Pygame event queue will start receiving events about its input. - # - # You can call the Joystick.get_name() and Joystick.get_id() functions - # without initializing the Joystick object. - # - - self.fail() - -class JoytickModuleTest(unittest.TestCase): - def todo_test_get_count(self): - - # __doc__ (as of 2008-08-02) for pygame.joystick.get_count: - - # pygame.joystick.get_count(): return count - # number of joysticks on the system - # - # Return the number of joystick devices on the system. The count will - # be 0 if there are no joysticks on the system. - # - # When you create Joystick objects using Joystick(id), you pass an - # integer that must be lower than this count. - # - - self.fail() - - def todo_test_get_init(self): - - # __doc__ (as of 2008-08-02) for pygame.joystick.get_init: - - # pygame.joystick.get_init(): return bool - # true if the joystick module is initialized - # - # Test if the pygame.joystick.init() function has been called. - - self.fail() - - def todo_test_init(self): - - # __doc__ (as of 2008-08-02) for pygame.joystick.init: - - # pygame.joystick.init(): return None - # initialize the joystick module - # - # This function is called automatically by pygame.init(). - # It initializes the joystick module. This will scan the system for - # all joystick devices. The module must be initialized before any - # other functions will work. - # - # It is safe to call this function more than once. - - self.fail() - - def todo_test_quit(self): - - # __doc__ (as of 2008-08-02) for pygame.joystick.quit: - - # pygame.joystick.quit(): return None - # uninitialize the joystick module - # - # Uninitialize the joystick module. After you call this any existing - # joystick objects will no longer work. - # - # It is safe to call this function more than once. - - self.fail() - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/key_test.py b/venv/lib/python3.7/site-packages/pygame/tests/key_test.py deleted file mode 100644 index 0163c7d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/key_test.py +++ /dev/null @@ -1,68 +0,0 @@ -import unittest -import pygame -import pygame.key - - -class KeyModuleTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - pygame.init() - - @classmethod - def tearDownClass(cls): - pygame.quit() - - def setUp(cls): - # This makes sure pygame is always initialized before each test (in - # case a test calls pygame.quit()). - if not pygame.get_init(): - pygame.init() - - def test_import(self): - 'does it import' - import pygame.key - - def todo_test_get_focused(self): - - # __doc__ (as of 2008-08-02) for pygame.key.get_focused: - - # pygame.key.get_focused(): return bool - # true if the display is receiving keyboard input from the system - # - # This is true when the display window has keyboard focus from the - # system. If the display needs to ensure it does not lose keyboard - # focus, it can use pygame.event.set_grab() to grab all input. - # - - self.fail() - - def test_get_pressed(self): - states = pygame.key.get_pressed() - self.assertEqual(states[pygame.K_RIGHT], 0) - - def test_name(self): - self.assertEqual(pygame.key.name(pygame.K_RETURN), "return") - self.assertEqual(pygame.key.name(pygame.K_0), "0") - self.assertEqual(pygame.key.name(pygame.K_SPACE), "space") - - def test_set_and_get_mods(self): - pygame.key.set_mods(pygame.KMOD_CTRL) - self.assertEqual(pygame.key.get_mods(), pygame.KMOD_CTRL) - - pygame.key.set_mods(pygame.KMOD_ALT) - self.assertEqual(pygame.key.get_mods(), pygame.KMOD_ALT) - pygame.key.set_mods(pygame.KMOD_CTRL | pygame.KMOD_ALT) - self.assertEqual(pygame.key.get_mods(), pygame.KMOD_CTRL | pygame.KMOD_ALT) - - def test_set_and_get_repeat(self): - self.assertEqual(pygame.key.get_repeat(), (0, 0)) - - pygame.key.set_repeat(10, 15) - self.assertEqual(pygame.key.get_repeat(), (10, 15)) - - pygame.key.set_repeat() - self.assertEqual(pygame.key.get_repeat(), (0, 0)) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/mask_test.py b/venv/lib/python3.7/site-packages/pygame/tests/mask_test.py deleted file mode 100644 index f7a508c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/mask_test.py +++ /dev/null @@ -1,2050 +0,0 @@ -from collections import OrderedDict -import random -import unittest - -import pygame -import pygame.mask -from pygame.locals import * - - -def random_mask(size = (100,100)): - """random_mask(size=(100,100)): return Mask - Create a mask of the given size, with roughly half the bits set at random.""" - m = pygame.Mask(size) - for i in range(size[0] * size[1] // 2): - x, y = random.randint(0,size[0] - 1), random.randint(0, size[1] - 1) - m.set_at((x,y)) - return m - -def maskFromSurface(surface, threshold = 127): - mask = pygame.Mask(surface.get_size()) - key = surface.get_colorkey() - if key: - for y in range(surface.get_height()): - for x in range(surface.get_width()): - if surface.get_at((x+0.1,y+0.1)) != key: - mask.set_at((x,y),1) - else: - for y in range(surface.get_height()): - for x in range (surface.get_width()): - if surface.get_at((x,y))[3] > threshold: - mask.set_at((x,y),1) - return mask - - -class MaskTypeTest(unittest.TestCase): - ORIGIN_OFFSETS = ((0, 0), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), - (-1, -1), (-1, 0), (-1, 1)) - - def _assertMaskEqual(self, m1, m2, msg=None): - # Checks to see if the 2 given masks are equal. - m1_count = m1.count() - - self.assertEqual(m1.get_size(), m2.get_size(), msg=msg) - self.assertEqual(m1_count, m2.count(), msg=msg) - self.assertEqual(m1_count, m1.overlap_area(m2, (0, 0)), msg=msg) - - # This can be used to help debug exact locations. - ##for i in range(m1.get_size()[0]): - ## for j in range(m1.get_size()[1]): - ## self.assertEqual(m1.get_at((i, j)), m2.get_at((i, j))) - - def test_mask(self): - """Ensure masks are created correctly without fill parameter.""" - expected_count = 0 - expected_size = (11, 23) - mask1 = pygame.mask.Mask(expected_size) - mask2 = pygame.mask.Mask(size=expected_size) - - self.assertEqual(mask1.count(), expected_count) - self.assertEqual(mask1.get_size(), expected_size) - - self.assertEqual(mask2.count(), expected_count) - self.assertEqual(mask2.get_size(), expected_size) - - def test_mask__negative_size(self): - """Ensure the mask constructor handles negative sizes correctly.""" - for size in ((1, -1), (-1, 1), (-1, -1)): - with self.assertRaises(ValueError): - mask = pygame.Mask(size) - - def test_mask__fill_kwarg(self): - """Ensure masks are created correctly using the fill keyword.""" - width, height = 37, 47 - expected_size = (width, height) - fill_counts = {True : width * height, False : 0 } - - for fill, expected_count in fill_counts.items(): - msg = 'fill={}'.format(fill) - - mask = pygame.mask.Mask(expected_size, fill=fill) - - self.assertEqual(mask.count(), expected_count, msg) - self.assertEqual(mask.get_size(), expected_size, msg) - - def test_mask__fill_arg(self): - """Ensure masks are created correctly using a fill arg.""" - width, height = 59, 71 - expected_size = (width, height) - fill_counts = {True : width * height, False : 0 } - - for fill, expected_count in fill_counts.items(): - msg = 'fill={}'.format(fill) - - mask = pygame.mask.Mask(expected_size, fill) - - self.assertEqual(mask.count(), expected_count, msg) - self.assertEqual(mask.get_size(), expected_size, msg) - - def test_mask__size_kwarg(self): - """Ensure masks are created correctly using the size keyword.""" - width, height = 73, 83 - expected_size = (width, height) - fill_counts = {True : width * height, False : 0 } - - for fill, expected_count in fill_counts.items(): - msg = 'fill={}'.format(fill) - - mask1 = pygame.mask.Mask(fill=fill, size=expected_size) - mask2 = pygame.mask.Mask(size=expected_size, fill=fill) - - self.assertEqual(mask1.count(), expected_count, msg) - self.assertEqual(mask2.count(), expected_count, msg) - self.assertEqual(mask1.get_size(), expected_size, msg) - self.assertEqual(mask2.get_size(), expected_size, msg) - - def test_get_size(self): - """Ensure a mask's size is correctly retrieved.""" - expected_size = (93, 101) - mask = pygame.mask.Mask(expected_size) - - self.assertEqual(mask.get_size(), expected_size) - - def test_get_at(self): - """Ensure individual mask bits are correctly retrieved.""" - width, height = 5, 7 - mask0 = pygame.mask.Mask((width, height)) - mask1 = pygame.mask.Mask((width, height), fill=True) - mask0_expected_bit = 0 - mask1_expected_bit = 1 - pos = (width - 1, height - 1) - - # Check twice to make sure bits aren't toggled. - self.assertEqual(mask0.get_at(pos), mask0_expected_bit) - self.assertEqual(mask0.get_at(pos), mask0_expected_bit) - self.assertEqual(mask1.get_at(pos), mask1_expected_bit) - self.assertEqual(mask1.get_at(pos), mask1_expected_bit) - - def test_get_at__out_of_bounds(self): - """Ensure get_at() checks bounds.""" - width, height = 11, 3 - mask = pygame.mask.Mask((width, height)) - - with self.assertRaises(IndexError): - mask.get_at((width, 0)) - - with self.assertRaises(IndexError): - mask.get_at((0, height)) - - with self.assertRaises(IndexError): - mask.get_at((-1, 0)) - - with self.assertRaises(IndexError): - mask.get_at((0, -1)) - - def test_set_at(self): - """Ensure individual mask bits are set to 1.""" - width, height = 13, 17 - mask0 = pygame.mask.Mask((width, height)) - mask1 = pygame.mask.Mask((width, height), fill=True) - mask0_expected_count = 1 - mask1_expected_count = mask1.count() - expected_bit = 1 - pos = (width - 1, height - 1) - - mask0.set_at(pos, expected_bit) # set 0 to 1 - mask1.set_at(pos, expected_bit) # set 1 to 1 - - self.assertEqual(mask0.get_at(pos), expected_bit) - self.assertEqual(mask0.count(), mask0_expected_count) - self.assertEqual(mask1.get_at(pos), expected_bit) - self.assertEqual(mask1.count(), mask1_expected_count) - - def test_set_at__to_0(self): - """Ensure individual mask bits are set to 0.""" - width, height = 11, 7 - mask0 = pygame.mask.Mask((width, height)) - mask1 = pygame.mask.Mask((width, height), fill=True) - mask0_expected_count = 0 - mask1_expected_count = mask1.count() - 1 - expected_bit = 0 - pos = (width - 1, height - 1) - - mask0.set_at(pos, expected_bit) # set 0 to 0 - mask1.set_at(pos, expected_bit) # set 1 to 0 - - self.assertEqual(mask0.get_at(pos), expected_bit) - self.assertEqual(mask0.count(), mask0_expected_count) - self.assertEqual(mask1.get_at(pos), expected_bit) - self.assertEqual(mask1.count(), mask1_expected_count) - - def test_set_at__default_value(self): - """Ensure individual mask bits are set using the default value.""" - width, height = 3, 21 - mask0 = pygame.mask.Mask((width, height)) - mask1 = pygame.mask.Mask((width, height), fill=True) - mask0_expected_count = 1 - mask1_expected_count = mask1.count() - expected_bit = 1 - pos = (width - 1, height - 1) - - mask0.set_at(pos) # set 0 to 1 - mask1.set_at(pos) # set 1 to 1 - - self.assertEqual(mask0.get_at(pos), expected_bit) - self.assertEqual(mask0.count(), mask0_expected_count) - self.assertEqual(mask1.get_at(pos), expected_bit) - self.assertEqual(mask1.count(), mask1_expected_count) - - def test_set_at__out_of_bounds(self): - """Ensure set_at() checks bounds.""" - width, height = 11, 3 - mask = pygame.mask.Mask((width, height)) - - with self.assertRaises(IndexError): - mask.set_at((width, 0)) - - with self.assertRaises(IndexError): - mask.set_at((0, height)) - - with self.assertRaises(IndexError): - mask.set_at((-1, 0)) - - with self.assertRaises(IndexError): - mask.set_at((0, -1)) - - def test_overlap(self): - """Ensure the overlap intersection is correctly calculated. - - Testing the different combinations of full/empty masks: - (mask1-filled) 1 overlap 1 (mask2-filled) - (mask1-empty) 0 overlap 1 (mask2-filled) - (mask1-filled) 1 overlap 0 (mask2-empty) - (mask1-empty) 0 overlap 0 (mask2-empty) - """ - expected_size = (4, 4) - offset = (0, 0) - expected_default = None - expected_overlaps = {(True, True) : offset} - - for fill2 in (True, False): - mask2 = pygame.mask.Mask(expected_size, fill=fill2) - mask2_count = mask2.count() - - for fill1 in (True, False): - key = (fill1, fill2) - msg = 'key={}'.format(key) - mask1 = pygame.mask.Mask(expected_size, fill=fill1) - mask1_count = mask1.count() - expected_pos = expected_overlaps.get(key, expected_default) - - overlap_pos = mask1.overlap(mask2, offset) - - self.assertEqual(overlap_pos, expected_pos, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), expected_size, msg) - self.assertEqual(mask2.get_size(), expected_size, msg) - - def test_overlap__offset(self): - """Ensure an offset overlap intersection is correctly calculated.""" - mask1 = pygame.mask.Mask((65, 3), fill=True) - mask2 = pygame.mask.Mask((66, 4), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - for offset in self.ORIGIN_OFFSETS: - msg = 'offset={}'.format(offset) - expected_pos = (max(offset[0], 0), max(offset[1], 0)) - - overlap_pos = mask1.overlap(mask2, offset) - - self.assertEqual(overlap_pos, expected_pos, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_overlap__offset_with_unset_bits(self): - """Ensure an offset overlap intersection is correctly calculated - when (0, 0) bits not set.""" - mask1 = pygame.mask.Mask((65, 3), fill=True) - mask2 = pygame.mask.Mask((66, 4), fill=True) - unset_pos = (0, 0) - mask1.set_at(unset_pos, 0) - mask2.set_at(unset_pos, 0) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - for offset in self.ORIGIN_OFFSETS: - msg = 'offset={}'.format(offset) - x, y = offset - expected_y = max(y, 0) - if 0 == y: - expected_x = max(x + 1, 1) - elif 0 < y: - expected_x = max(x + 1, 0) - else: - expected_x = max(x, 1) - - overlap_pos = mask1.overlap(mask2, offset) - - self.assertEqual(overlap_pos, (expected_x, expected_y), msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - self.assertEqual(mask1.get_at(unset_pos), 0, msg) - self.assertEqual(mask2.get_at(unset_pos), 0, msg) - - def test_overlap__no_overlap(self): - """Ensure an offset overlap intersection is correctly calculated - when there is no overlap.""" - mask1 = pygame.mask.Mask((65, 3), fill=True) - mask1_count = mask1.count() - mask1_size = mask1.get_size() - - mask2_w, mask2_h = 67, 5 - mask2_size = (mask2_w, mask2_h) - mask2 = pygame.mask.Mask(mask2_size) - set_pos = (mask2_w - 1, mask2_h - 1) - mask2.set_at(set_pos) - mask2_count = 1 - - for offset in self.ORIGIN_OFFSETS: - msg = 'offset={}'.format(offset) - - overlap_pos = mask1.overlap(mask2, offset) - - self.assertIsNone(overlap_pos, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - self.assertEqual(mask2.get_at(set_pos), 1, msg) - - def test_overlap__offset_boundary(self): - """Ensures overlap handles offsets and boundaries correctly.""" - mask1 = pygame.mask.Mask((13, 3), fill=True) - mask2 = pygame.mask.Mask((7, 5), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - # Check the 4 boundaries. - offsets = ((mask1_size[0], 0), # off right - (0, mask1_size[1]), # off bottom - (-mask2_size[0], 0), # off left - (0, -mask2_size[1])) # off top - - for offset in offsets: - msg = 'offset={}'.format(offset) - - overlap_pos = mask1.overlap(mask2, offset) - - self.assertIsNone(overlap_pos, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_overlap__invalid_mask_arg(self): - """Ensure overlap handles invalid mask arguments correctly.""" - size = (5, 3) - offset = (0, 0) - mask = pygame.mask.Mask(size) - invalid_mask = pygame.Surface(size) - - with self.assertRaises(TypeError): - overlap_pos = mask.overlap(invalid_mask, offset) - - def test_overlap__invalid_offset_arg(self): - """Ensure overlap handles invalid offset arguments correctly.""" - size = (2, 7) - offset = '(0, 0)' - mask1 = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask(size) - - with self.assertRaises(TypeError): - overlap_pos = mask1.overlap(mask2, offset) - - def test_overlap_area(self): - """Ensure the overlap_area is correctly calculated. - - Testing the different combinations of full/empty masks: - (mask1-filled) 1 overlap_area 1 (mask2-filled) - (mask1-empty) 0 overlap_area 1 (mask2-filled) - (mask1-filled) 1 overlap_area 0 (mask2-empty) - (mask1-empty) 0 overlap_area 0 (mask2-empty) - """ - expected_size = width, height = (4, 4) - offset = (0, 0) - expected_default = 0 - expected_counts = {(True, True) : width * height} - - for fill2 in (True, False): - mask2 = pygame.mask.Mask(expected_size, fill=fill2) - mask2_count = mask2.count() - - for fill1 in (True, False): - key = (fill1, fill2) - msg = 'key={}'.format(key) - mask1 = pygame.mask.Mask(expected_size, fill=fill1) - mask1_count = mask1.count() - expected_count = expected_counts.get(key, expected_default) - - overlap_count = mask1.overlap_area(mask2, offset) - - self.assertEqual(overlap_count, expected_count, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), expected_size, msg) - self.assertEqual(mask2.get_size(), expected_size, msg) - - def test_overlap_area__offset(self): - """Ensure an offset overlap_area is correctly calculated.""" - mask1 = pygame.mask.Mask((65, 3), fill=True) - mask2 = pygame.mask.Mask((66, 4), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - # Using rects to help determine the overlapping area. - rect1 = pygame.Rect((0, 0), mask1_size) - rect2 = pygame.Rect((0, 0), mask2_size) - - for offset in self.ORIGIN_OFFSETS: - msg = 'offset={}'.format(offset) - rect2.topleft = offset - overlap_rect = rect1.clip(rect2) - expected_count = overlap_rect.w * overlap_rect.h - - overlap_count = mask1.overlap_area(mask2, offset) - - self.assertEqual(overlap_count, expected_count, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_overlap_area__offset_boundary(self): - """Ensures overlap_area handles offsets and boundaries correctly.""" - mask1 = pygame.mask.Mask((11, 3), fill=True) - mask2 = pygame.mask.Mask((5, 7), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - expected_count = 0 - - # Check the 4 boundaries. - offsets = ((mask1_size[0], 0), # off right - (0, mask1_size[1]), # off bottom - (-mask2_size[0], 0), # off left - (0, -mask2_size[1])) # off top - - for offset in offsets: - msg = 'offset={}'.format(offset) - - overlap_count = mask1.overlap_area(mask2, offset) - - self.assertEqual(overlap_count, expected_count, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_overlap_area__invalid_mask_arg(self): - """Ensure overlap_area handles invalid mask arguments correctly.""" - size = (3, 5) - offset = (0, 0) - mask = pygame.mask.Mask(size) - invalid_mask = pygame.Surface(size) - - with self.assertRaises(TypeError): - overlap_count = mask.overlap_area(invalid_mask, offset) - - def test_overlap_area__invalid_offset_arg(self): - """Ensure overlap_area handles invalid offset arguments correctly.""" - size = (7, 2) - offset = '(0, 0)' - mask1 = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask(size) - - with self.assertRaises(TypeError): - overlap_count = mask1.overlap_area(mask2, offset) - - def test_overlap_mask(self): - """Ensure overlap_mask's mask has correct bits set. - - Testing the different combinations of full/empty masks: - (mask1-filled) 1 overlap_mask 1 (mask2-filled) - (mask1-empty) 0 overlap_mask 1 (mask2-filled) - (mask1-filled) 1 overlap_mask 0 (mask2-empty) - (mask1-empty) 0 overlap_mask 0 (mask2-empty) - """ - expected_size = (4, 4) - offset = (0, 0) - expected_default = pygame.mask.Mask(expected_size) - expected_masks = { - (True, True) : pygame.mask.Mask(expected_size, fill=True)} - - for fill2 in (True, False): - mask2 = pygame.mask.Mask(expected_size, fill=fill2) - mask2_count = mask2.count() - - for fill1 in (True, False): - key = (fill1, fill2) - msg = 'key={}'.format(key) - mask1 = pygame.mask.Mask(expected_size, fill=fill1) - mask1_count = mask1.count() - expected_mask = expected_masks.get(key, expected_default) - - overlap_mask = mask1.overlap_mask(mask2, offset) - - self._assertMaskEqual(overlap_mask, expected_mask, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), expected_size, msg) - self.assertEqual(mask2.get_size(), expected_size, msg) - - def test_overlap_mask__bits_set(self): - """Ensure overlap_mask's mask has correct bits set.""" - mask1 = pygame.mask.Mask((50, 50), fill=True) - mask2 = pygame.mask.Mask((300, 10), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - mask3 = mask1.overlap_mask(mask2, (-1, 0)) - - for i in range(50): - for j in range(10): - self.assertEqual(mask3.get_at((i, j)), 1, - '({}, {})'.format(i, j)) - - for i in range(50): - for j in range(11, 50): - self.assertEqual(mask3.get_at((i, j)), 0, - '({}, {})'.format(i, j)) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count) - self.assertEqual(mask2.count(), mask2_count) - self.assertEqual(mask1.get_size(), mask1_size) - self.assertEqual(mask2.get_size(), mask2_size) - - def test_overlap_mask__offset(self): - """Ensure an offset overlap_mask's mask is correctly calculated.""" - mask1 = pygame.mask.Mask((65, 3), fill=True) - mask2 = pygame.mask.Mask((66, 4), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - expected_size = mask1.get_size() - mask2_size = mask2.get_size() - - # Using rects to help determine the overlapping area. - rect1 = pygame.Rect((0, 0), expected_size) - rect2 = pygame.Rect((0, 0), mask2_size) - - for offset in self.ORIGIN_OFFSETS: - msg = 'offset={}'.format(offset) - rect2.topleft = offset - overlap_rect = rect1.clip(rect2) - expected_count = overlap_rect.w * overlap_rect.h - - overlap_mask = mask1.overlap_mask(mask2, offset) - - self.assertEqual(overlap_mask.count(), expected_count, msg) - self.assertEqual(overlap_mask.get_size(), expected_size, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), expected_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_overlap_mask__offset_boundary(self): - """Ensures overlap_mask handles offsets and boundaries correctly.""" - mask1 = pygame.mask.Mask((9, 3), fill=True) - mask2 = pygame.mask.Mask((11, 5), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - expected_count = 0 - expected_size = mask1_size - - # Check the 4 boundaries. - offsets = ((mask1_size[0], 0), # off right - (0, mask1_size[1]), # off bottom - (-mask2_size[0], 0), # off left - (0, -mask2_size[1])) # off top - - for offset in offsets: - msg = 'offset={}'.format(offset) - - overlap_mask = mask1.overlap_mask(mask2, offset) - - self.assertEqual(overlap_mask.count(), expected_count, msg) - self.assertEqual(overlap_mask.get_size(), expected_size, msg) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_overlap_mask__invalid_mask_arg(self): - """Ensure overlap_mask handles invalid mask arguments correctly.""" - size = (3, 2) - offset = (0, 0) - mask = pygame.mask.Mask(size) - invalid_mask = pygame.Surface(size) - - with self.assertRaises(TypeError): - overlap_mask = mask.overlap_mask(invalid_mask, offset) - - def test_overlap_mask__invalid_offset_arg(self): - """Ensure overlap_mask handles invalid offset arguments correctly.""" - size = (5, 2) - offset = '(0, 0)' - mask1 = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask(size) - - with self.assertRaises(TypeError): - overlap_mask = mask1.overlap_mask(mask2, offset) - - def test_mask_access( self ): - """ do the set_at, and get_at parts work correctly? - """ - m = pygame.Mask((10,10)) - m.set_at((0,0), 1) - self.assertEqual(m.get_at((0,0)), 1) - m.set_at((9,0), 1) - self.assertEqual(m.get_at((9,0)), 1) - - #s = pygame.Surface((10,10)) - #s.set_at((1,0), (0, 0, 1, 255)) - #self.assertEqual(s.get_at((1,0)), (0, 0, 1, 255)) - #s.set_at((-1,0), (0, 0, 1, 255)) - - # out of bounds, should get IndexError - self.assertRaises(IndexError, lambda : m.get_at((-1,0)) ) - self.assertRaises(IndexError, lambda : m.set_at((-1,0), 1) ) - self.assertRaises(IndexError, lambda : m.set_at((10,0), 1) ) - self.assertRaises(IndexError, lambda : m.set_at((0,10), 1) ) - - def test_fill(self): - """Ensure a mask can be filled.""" - width, height = 11, 23 - expected_count = width * height - expected_size = (width, height) - mask = pygame.mask.Mask(expected_size) - - mask.fill() - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - - def test_clear(self): - """Ensure a mask can be cleared.""" - expected_count = 0 - expected_size = (13, 27) - mask = pygame.mask.Mask(expected_size, fill=True) - - mask.clear() - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - - def test_invert(self): - """Ensure a mask can be inverted.""" - side = 73 - expected_size = (side, side) - mask1 = pygame.mask.Mask(expected_size) - mask2 = pygame.mask.Mask(expected_size, fill=True) - expected_count1 = side * side - expected_count2 = 0 - - for i in range(side): - expected_count1 -= 1 - expected_count2 += 1 - pos = (i, i) - mask1.set_at(pos) - mask2.set_at(pos, 0) - - mask1.invert() - mask2.invert() - - self.assertEqual(mask1.count(), expected_count1) - self.assertEqual(mask2.count(), expected_count2) - self.assertEqual(mask1.get_size(), expected_size) - self.assertEqual(mask2.get_size(), expected_size) - - for i in range(side): - pos = (i, i) - msg = 'pos={}'.format(pos) - - self.assertEqual(mask1.get_at(pos), 0, msg) - self.assertEqual(mask2.get_at(pos), 1, msg) - - def test_invert__full(self): - """Ensure a full mask can be inverted.""" - expected_count = 0 - expected_size = (43, 97) - mask = pygame.mask.Mask(expected_size, fill=True) - - mask.invert() - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - - def test_invert__empty(self): - """Ensure an empty mask can be inverted.""" - width, height = 43, 97 - expected_size = (width, height) - expected_count = width * height - mask = pygame.mask.Mask(expected_size) - - mask.invert() - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - - def test_scale(self): - """Ensure a mask can be scaled.""" - width, height = 43, 61 - original_size = (width, height) - - for fill in (True, False): - original_mask = pygame.mask.Mask(original_size, fill=fill) - original_count = width * height if fill else 0 - - # Test a range of sizes. Also tests scaling to 'same' - # size when new_w, new_h = width, height - for new_w in range(width - 10, width + 10): - for new_h in range(height - 10, height + 10): - expected_size = (new_w, new_h) - expected_count = new_w * new_h if fill else 0 - msg = 'size={}'.format(expected_size) - - mask = original_mask.scale(expected_size) - - self.assertEqual(mask.count(), expected_count, msg) - self.assertEqual(mask.get_size(), expected_size) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), original_count, - msg) - self.assertEqual(original_mask.get_size(), original_size, - msg) - - def test_scale__negative_size(self): - """Ensure scale handles negative sizes correctly.""" - mask = pygame.Mask((100, 100)) - - with self.assertRaises(ValueError): - mask.scale((-1, -1)) - - with self.assertRaises(ValueError): - mask.scale((-1, 10)) - - with self.assertRaises(ValueError): - mask.scale((10, -1)) - - def test_draw(self): - """Ensure a mask can be drawn onto another mask. - - Testing the different combinations of full/empty masks: - (mask1-filled) 1 draw 1 (mask2-filled) - (mask1-empty) 0 draw 1 (mask2-filled) - (mask1-filled) 1 draw 0 (mask2-empty) - (mask1-empty) 0 draw 0 (mask2-empty) - """ - expected_size = (4, 4) - offset = (0, 0) - expected_default = pygame.mask.Mask(expected_size, fill=True) - expected_masks = {(False, False) : pygame.mask.Mask(expected_size)} - - for fill2 in (True, False): - mask2 = pygame.mask.Mask(expected_size, fill=fill2) - mask2_count = mask2.count() - - for fill1 in (True, False): - key = (fill1, fill2) - msg = 'key={}'.format(key) - mask1 = pygame.mask.Mask(expected_size, fill=fill1) - expected_mask = expected_masks.get(key, expected_default) - - mask1.draw(mask2, offset) - - self._assertMaskEqual(mask1, expected_mask, msg) - - # Ensure mask2 unchanged. - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask2.get_size(), expected_size, msg) - - def test_draw__offset(self): - """Ensure an offset mask can be drawn onto another mask.""" - mask1 = pygame.mask.Mask((65, 3)) - mask2 = pygame.mask.Mask((66, 4), fill=True) - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - # Using rects to help determine the overlapping area. - rect1 = pygame.Rect((0, 0), mask1_size) - rect2 = pygame.Rect((0, 0), mask2_size) - - for offset in self.ORIGIN_OFFSETS: - msg = 'offset={}'.format(offset) - rect2.topleft = offset - overlap_rect = rect1.clip(rect2) - expected_count = overlap_rect.w * overlap_rect.h - mask1.clear() # Ensure it's empty for testing each offset. - - mask1.draw(mask2, offset) - - self.assertEqual(mask1.count(), expected_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - - # Ensure mask2 unchanged. - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_draw__offset_boundary(self): - """Ensures draw handles offsets and boundaries correctly.""" - mask1 = pygame.mask.Mask((13, 5)) - mask2 = pygame.mask.Mask((7, 3), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - # Check the 4 boundaries. - offsets = ((mask1_size[0], 0), # off right - (0, mask1_size[1]), # off bottom - (-mask2_size[0], 0), # off left - (0, -mask2_size[1])) # off top - - for offset in offsets: - msg = 'offset={}'.format(offset) - - mask1.draw(mask2, offset) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_draw__invalid_mask_arg(self): - """Ensure draw handles invalid mask arguments correctly.""" - size = (7, 3) - offset = (0, 0) - mask = pygame.mask.Mask(size) - invalid_mask = pygame.Surface(size) - - with self.assertRaises(TypeError): - mask.draw(invalid_mask, offset) - - def test_draw__invalid_offset_arg(self): - """Ensure draw handles invalid offset arguments correctly.""" - size = (5, 7) - offset = '(0, 0)' - mask1 = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask(size) - - with self.assertRaises(TypeError): - mask1.draw(mask2, offset) - - def test_erase(self): - """Ensure a mask can erase another mask. - - Testing the different combinations of full/empty masks: - (mask1-filled) 1 erase 1 (mask2-filled) - (mask1-empty) 0 erase 1 (mask2-filled) - (mask1-filled) 1 erase 0 (mask2-empty) - (mask1-empty) 0 erase 0 (mask2-empty) - """ - expected_size = (4, 4) - offset = (0, 0) - expected_default = pygame.mask.Mask(expected_size) - expected_masks = { - (True, False) : pygame.mask.Mask(expected_size, fill=True)} - - for fill2 in (True, False): - mask2 = pygame.mask.Mask(expected_size, fill=fill2) - mask2_count = mask2.count() - - for fill1 in (True, False): - key = (fill1, fill2) - msg = 'key={}'.format(key) - mask1 = pygame.mask.Mask(expected_size, fill=fill1) - expected_mask = expected_masks.get(key, expected_default) - - mask1.erase(mask2, offset) - - self._assertMaskEqual(mask1, expected_mask, msg) - - # Ensure mask2 unchanged. - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask2.get_size(), expected_size, msg) - - def test_erase__offset(self): - """Ensure an offset mask can erase another mask.""" - mask1 = pygame.mask.Mask((65, 3)) - mask2 = pygame.mask.Mask((66, 4), fill=True) - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - # Using rects to help determine the overlapping area. - rect1 = pygame.Rect((0, 0), mask1_size) - rect2 = pygame.Rect((0, 0), mask2_size) - rect1_area = rect1.w * rect1.h - - for offset in self.ORIGIN_OFFSETS: - msg = 'offset={}'.format(offset) - rect2.topleft = offset - overlap_rect = rect1.clip(rect2) - expected_count = rect1_area - (overlap_rect.w * overlap_rect.h) - mask1.fill() # Ensure it's filled for testing each offset. - - mask1.erase(mask2, offset) - - self.assertEqual(mask1.count(), expected_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - - # Ensure mask2 unchanged. - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_erase__offset_boundary(self): - """Ensures erase handles offsets and boundaries correctly.""" - mask1 = pygame.mask.Mask((7, 11), fill=True) - mask2 = pygame.mask.Mask((3, 13), fill=True) - mask1_count = mask1.count() - mask2_count = mask2.count() - mask1_size = mask1.get_size() - mask2_size = mask2.get_size() - - # Check the 4 boundaries. - offsets = ((mask1_size[0], 0), # off right - (0, mask1_size[1]), # off bottom - (-mask2_size[0], 0), # off left - (0, -mask2_size[1])) # off top - - for offset in offsets: - msg = 'offset={}'.format(offset) - - mask1.erase(mask2, offset) - - # Ensure mask1/mask2 unchanged. - self.assertEqual(mask1.count(), mask1_count, msg) - self.assertEqual(mask2.count(), mask2_count, msg) - self.assertEqual(mask1.get_size(), mask1_size, msg) - self.assertEqual(mask2.get_size(), mask2_size, msg) - - def test_erase__invalid_mask_arg(self): - """Ensure erase handles invalid mask arguments correctly.""" - size = (3, 7) - offset = (0, 0) - mask = pygame.mask.Mask(size) - invalid_mask = pygame.Surface(size) - - with self.assertRaises(TypeError): - mask.erase(invalid_mask, offset) - - def test_erase__invalid_offset_arg(self): - """Ensure erase handles invalid offset arguments correctly.""" - size = (7, 5) - offset = '(0, 0)' - mask1 = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask(size) - - with self.assertRaises(TypeError): - mask1.erase(mask2, offset) - - def test_count(self): - """Ensure a mask's set bits are correctly counted.""" - side = 67 - expected_size = (side, side) - expected_count = 0 - mask = pygame.mask.Mask(expected_size) - - for i in range(side): - expected_count += 1 - mask.set_at((i, i)) - - count = mask.count() - - self.assertEqual(count, expected_count) - self.assertEqual(mask.get_size(), expected_size) - - def test_count__full_mask(self): - """Ensure a full mask's set bits are correctly counted.""" - width, height = 17, 97 - expected_size = (width, height) - expected_count = width * height - mask = pygame.mask.Mask(expected_size, fill=True) - - count = mask.count() - - self.assertEqual(count, expected_count) - self.assertEqual(mask.get_size(), expected_size) - - def test_count__empty_mask(self): - """Ensure an empty mask's set bits are correctly counted.""" - expected_count = 0 - expected_size = (13, 27) - mask = pygame.mask.Mask(expected_size) - - count = mask.count() - - self.assertEqual(count, expected_count) - self.assertEqual(mask.get_size(), expected_size) - - def todo_test_centroid(self): - """Ensure a mask's centroid is correctly calculated.""" - self.fail() - - def test_centroid__empty_mask(self): - """Ensure an empty mask's centroid is correctly calculated.""" - expected_centroid = (0, 0) - expected_size = (101, 103) - mask = pygame.mask.Mask(expected_size) - - centroid = mask.centroid() - - self.assertEqual(centroid, expected_centroid) - self.assertEqual(mask.get_size(), expected_size) - - def todo_test_angle(self): - """Ensure a mask's orientation angle is correctly calculated.""" - self.fail() - - def test_angle__empty_mask(self): - """Ensure an empty mask's angle is correctly calculated.""" - expected_angle = 0.0 - expected_size = (107, 43) - mask = pygame.mask.Mask(expected_size) - - angle = mask.angle() - - self.assertIsInstance(angle, float) - self.assertAlmostEqual(angle, expected_angle) - self.assertEqual(mask.get_size(), expected_size) - - def test_drawing(self): - """ Test fill, clear, invert, draw, erase - """ - m = pygame.Mask((100,100)) - self.assertEqual(m.count(), 0) - - m.fill() - self.assertEqual(m.count(), 10000) - - m2 = pygame.Mask((10, 10), fill=True) - m.erase(m2, (50,50)) - self.assertEqual(m.count(), 9900) - - m.invert() - self.assertEqual(m.count(), 100) - - m.draw(m2, (0,0)) - self.assertEqual(m.count(), 200) - - m.clear() - self.assertEqual(m.count(), 0) - - def test_outline(self): - """ - """ - - m = pygame.Mask((20,20)) - self.assertEqual(m.outline(), []) - - m.set_at((10,10), 1) - self.assertEqual(m.outline(), [(10,10)]) - - m.set_at((10,12), 1) - self.assertEqual(m.outline(10), [(10,10)]) - - m.set_at((11,11), 1) - self.assertEqual(m.outline(), [(10,10), (11,11), (10,12), (11,11), (10,10)]) - self.assertEqual(m.outline(2), [(10,10), (10,12), (10,10)]) - - #TODO: Test more corner case outlines. - - def test_convolve__size(self): - sizes = [(1,1), (31,31), (32,32), (100,100)] - for s1 in sizes: - m1 = pygame.Mask(s1) - for s2 in sizes: - m2 = pygame.Mask(s2) - o = m1.convolve(m2) - for i in (0,1): - self.assertEqual(o.get_size()[i], - m1.get_size()[i] + m2.get_size()[i] - 1) - - def test_convolve__point_identities(self): - """Convolving with a single point is the identity, while convolving a point with something flips it.""" - m = random_mask((100,100)) - k = pygame.Mask((1,1)) - k.set_at((0,0)) - - self._assertMaskEqual(m, m.convolve(k)) - self._assertMaskEqual(m, k.convolve(k.convolve(m))) - - def test_convolve__with_output(self): - """checks that convolution modifies only the correct portion of the output""" - - m = random_mask((10,10)) - k = pygame.Mask((2,2)) - k.set_at((0,0)) - - o = pygame.Mask((50,50)) - test = pygame.Mask((50,50)) - - m.convolve(k,o) - test.draw(m,(1,1)) - self._assertMaskEqual(o, test) - - o.clear() - test.clear() - - m.convolve(k,o, (10,10)) - test.draw(m,(11,11)) - self._assertMaskEqual(o, test) - - def test_convolve__out_of_range(self): - full = pygame.Mask((2, 2), fill=True) - - self.assertEqual(full.convolve(full, None, ( 0, 3)).count(), 0) - self.assertEqual(full.convolve(full, None, ( 0, 2)).count(), 3) - self.assertEqual(full.convolve(full, None, (-2, -2)).count(), 1) - self.assertEqual(full.convolve(full, None, (-3, -3)).count(), 0) - - def test_convolve(self): - """Tests the definition of convolution""" - m1 = random_mask((100,100)) - m2 = random_mask((100,100)) - conv = m1.convolve(m2) - - for i in range(conv.get_size()[0]): - for j in range(conv.get_size()[1]): - self.assertEqual(conv.get_at((i,j)) == 0, - m1.overlap(m2, (i - 99, j - 99)) is None) - - def _draw_component_pattern_box(self, mask, size, pos, inverse=False): - # Helper method to create/draw a 'box' pattern for testing. - # - # 111 - # 101 3x3 example pattern - # 111 - pattern = pygame.mask.Mask((size, size), fill=True) - pattern.set_at((size // 2, size // 2), 0) - - if inverse: - mask.erase(pattern, pos) - pattern.invert() - else: - mask.draw(pattern, pos) - - return pattern - - def _draw_component_pattern_x(self, mask, size, pos, inverse=False): - # Helper method to create/draw an 'X' pattern for testing. - # - # 101 - # 010 3x3 example pattern - # 101 - pattern = pygame.mask.Mask((size, size)) - - ymax = size - 1 - for y in range(size): - for x in range(size): - if x == y or x == ymax - y: - pattern.set_at((x, y)) - - if inverse: - mask.erase(pattern, pos) - pattern.invert() - else: - mask.draw(pattern, pos) - - return pattern - - def _draw_component_pattern_plus(self, mask, size, pos, inverse=False): - # Helper method to create/draw a '+' pattern for testing. - # - # 010 - # 111 3x3 example pattern - # 010 - pattern = pygame.mask.Mask((size, size)) - - xmid = ymid = size // 2 - for y in range(size): - for x in range(size): - if x == xmid or y == ymid: - pattern.set_at((x, y)) - - if inverse: - mask.erase(pattern, pos) - pattern.invert() - else: - mask.draw(pattern, pos) - - return pattern - - def test_connected_component(self): - """Ensure a mask's connected component is correctly calculated.""" - width, height = 41, 27 - expected_size = (width, height) - original_mask = pygame.mask.Mask(expected_size) - patterns = [] # Patterns and offsets. - - # Draw some connected patterns on the original mask. - offset = (0, 0) - pattern = self._draw_component_pattern_x(original_mask, 3, offset) - patterns.append((pattern, offset)) - - size = 4 - offset = (width - size, 0) - pattern = self._draw_component_pattern_plus(original_mask, size, - offset) - patterns.append((pattern, offset)) - - # Make this one the largest connected component. - offset = (width // 2, height // 2) - pattern = self._draw_component_pattern_box(original_mask, 7, offset) - patterns.append((pattern, offset)) - - expected_pattern, expected_offset = patterns[-1] - expected_count = expected_pattern.count() - original_count = sum(p.count() for p, _ in patterns) - - mask = original_mask.connected_component() - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - self.assertEqual(mask.overlap_area(expected_pattern, expected_offset), - expected_count) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), original_count) - self.assertEqual(original_mask.get_size(), expected_size) - - for pattern, offset in patterns: - self.assertEqual(original_mask.overlap_area(pattern, offset), - pattern.count()) - - def test_connected_component__full_mask(self): - """Ensure a mask's connected component is correctly calculated - when the mask is full.""" - expected_size = (23, 31) - original_mask = pygame.mask.Mask(expected_size, fill=True) - expected_count = original_mask.count() - - mask = original_mask.connected_component() - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), expected_count) - self.assertEqual(original_mask.get_size(), expected_size) - - def test_connected_component__empty_mask(self): - """Ensure a mask's connected component is correctly calculated - when the mask is empty.""" - expected_size = (37, 43) - original_mask = pygame.mask.Mask(expected_size) - original_count = original_mask.count() - expected_count = 0 - - mask = original_mask.connected_component() - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), original_count) - self.assertEqual(original_mask.get_size(), expected_size) - - def test_connected_component__one_set_bit(self): - """Ensure a mask's connected component is correctly calculated - when the coordinate's bit is set with a connected component of 1 bit. - """ - width, height = 71, 67 - expected_size = (width, height) - original_mask = pygame.mask.Mask(expected_size, fill=True) - xset, yset = width // 2, height // 2 - set_pos = (xset, yset) - expected_offset = (xset - 1, yset - 1) - - # This isolates the bit at set_pos from all the other bits. - expected_pattern = self._draw_component_pattern_box(original_mask, 3, - expected_offset, inverse=True) - expected_count = 1 - original_count = original_mask.count() - - mask = original_mask.connected_component(set_pos) - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - self.assertEqual(mask.overlap_area(expected_pattern, expected_offset), - expected_count) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), original_count) - self.assertEqual(original_mask.get_size(), expected_size) - self.assertEqual(original_mask.overlap_area( - expected_pattern, expected_offset), expected_count) - - def test_connected_component__multi_set_bits(self): - """Ensure a mask's connected component is correctly calculated - when the coordinate's bit is set with a connected component of > 1 bit. - """ - expected_size = (113, 67) - original_mask = pygame.mask.Mask(expected_size) - p_width, p_height = 11, 13 - set_pos = xset, yset = 11, 21 - expected_offset = (xset - 1, yset - 1) - expected_pattern = pygame.mask.Mask((p_width, p_height), fill=True) - - # Make an unsymmetrical pattern. All the set bits need to be connected - # in the resulting pattern for this to work properly. - for y in range(3, p_height): - for x in range(1, p_width): - if x == y or x == y - 3 or x == p_width - 4: - expected_pattern.set_at((x, y), 0) - - expected_count = expected_pattern.count() - original_mask.draw(expected_pattern, expected_offset) - - mask = original_mask.connected_component(set_pos) - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - self.assertEqual(mask.overlap_area(expected_pattern, expected_offset), - expected_count) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), expected_count) - self.assertEqual(original_mask.get_size(), expected_size) - self.assertEqual(original_mask.overlap_area( - expected_pattern, expected_offset), expected_count) - - def test_connected_component__unset_bit(self): - """Ensure a mask's connected component is correctly calculated - when the coordinate's bit is unset. - """ - width, height = 109, 101 - expected_size = (width, height) - original_mask = pygame.mask.Mask(expected_size, fill=True) - unset_pos = (width // 2, height // 2) - original_mask.set_at(unset_pos, 0) - original_count = original_mask.count() - expected_count = 0 - - mask = original_mask.connected_component(unset_pos) - - self.assertEqual(mask.count(), expected_count) - self.assertEqual(mask.get_size(), expected_size) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), original_count) - self.assertEqual(original_mask.get_size(), expected_size) - self.assertEqual(original_mask.get_at(unset_pos), 0) - - def test_connected_component__out_of_bounds(self): - """Ensure connected_component() checks bounds.""" - width, height = 19, 11 - original_size = (width, height) - original_mask = pygame.mask.Mask(original_size, fill=True) - original_count = original_mask.count() - - for pos in ((0, -1), (-1, 0), (0, height + 1), (width + 1, 0)): - with self.assertRaises(IndexError): - mask = original_mask.connected_component(pos) - - # Ensure the original mask is unchanged. - self.assertEqual(original_mask.count(), original_count) - self.assertEqual(original_mask.get_size(), original_size) - - def test_connected_components(self): - """ - """ - - m = pygame.Mask((10,10)) - self.assertEqual(repr(m.connected_components()), "[]") - - comp = m.connected_component() - self.assertEqual(m.count(), comp.count()) - - m.set_at((0,0), 1) - m.set_at((1,1), 1) - comp = m.connected_component() - comps = m.connected_components() - comps1 = m.connected_components(1) - comps2 = m.connected_components(2) - comps3 = m.connected_components(3) - self.assertEqual(comp.count(), comps[0].count()) - self.assertEqual(comps1[0].count(), 2) - self.assertEqual(comps2[0].count(), 2) - self.assertEqual(repr(comps3), "[]") - - m.set_at((9, 9), 1) - comp = m.connected_component() - comp1 = m.connected_component((1, 1)) - comp2 = m.connected_component((2, 2)) - comps = m.connected_components() - comps1 = m.connected_components(1) - comps2 = m.connected_components(2) - comps3 = m.connected_components(3) - self.assertEqual(comp.count(), 2) - self.assertEqual(comp1.count(), 2) - self.assertEqual(comp2.count(), 0) - self.assertEqual(len(comps), 2) - self.assertEqual(len(comps1), 2) - self.assertEqual(len(comps2), 1) - self.assertEqual(len(comps3), 0) - - def test_get_bounding_rects(self): - """ - """ - - m = pygame.Mask((10,10)) - m.set_at((0,0), 1) - m.set_at((1,0), 1) - - m.set_at((0,1), 1) - - m.set_at((0,3), 1) - m.set_at((3,3), 1) - - r = m.get_bounding_rects() - - self.assertEqual( - repr(r), - "[, , ]") - - #1100 - #1111 - m = pygame.Mask((4,2)) - m.set_at((0,0), 1) - m.set_at((1,0), 1) - m.set_at((2,0), 0) - m.set_at((3,0), 0) - - m.set_at((0,1), 1) - m.set_at((1,1), 1) - m.set_at((2,1), 1) - m.set_at((3,1), 1) - - r = m.get_bounding_rects() - self.assertEqual(repr(r), "[]") - - #00100 - #01110 - #00100 - m = pygame.Mask((5,3)) - m.set_at((0,0), 0) - m.set_at((1,0), 0) - m.set_at((2,0), 1) - m.set_at((3,0), 0) - m.set_at((4,0), 0) - - m.set_at((0,1), 0) - m.set_at((1,1), 1) - m.set_at((2,1), 1) - m.set_at((3,1), 1) - m.set_at((4,1), 0) - - m.set_at((0,2), 0) - m.set_at((1,2), 0) - m.set_at((2,2), 1) - m.set_at((3,2), 0) - m.set_at((4,2), 0) - - r = m.get_bounding_rects() - self.assertEqual(repr(r), "[]") - - #00010 - #00100 - #01000 - m = pygame.Mask((5,3)) - m.set_at((0,0), 0) - m.set_at((1,0), 0) - m.set_at((2,0), 0) - m.set_at((3,0), 1) - m.set_at((4,0), 0) - - m.set_at((0,1), 0) - m.set_at((1,1), 0) - m.set_at((2,1), 1) - m.set_at((3,1), 0) - m.set_at((4,1), 0) - - m.set_at((0,2), 0) - m.set_at((1,2), 1) - m.set_at((2,2), 0) - m.set_at((3,2), 0) - m.set_at((4,2), 0) - - r = m.get_bounding_rects() - self.assertEqual(repr(r), "[]") - - #00011 - #11111 - m = pygame.Mask((5,2)) - m.set_at((0,0), 0) - m.set_at((1,0), 0) - m.set_at((2,0), 0) - m.set_at((3,0), 1) - m.set_at((4,0), 1) - - m.set_at((0,1), 1) - m.set_at((1,1), 1) - m.set_at((2,1), 1) - m.set_at((3,1), 1) - m.set_at((3,1), 1) - - r = m.get_bounding_rects() - #TODO: this should really make one bounding rect. - #self.assertEqual(repr(r), "[]") - - def test_zero_mask(self): - mask = pygame.mask.Mask((0, 0)) - self.assertEqual(mask.get_size(), (0, 0)) - - mask = pygame.mask.Mask((100, 0)) - self.assertEqual(mask.get_size(), (100, 0)) - - mask = pygame.mask.Mask((0, 100)) - self.assertEqual(mask.get_size(), (0, 100)) - - def test_zero_mask_get_size(self): - """Ensures get_size correctly handles zero sized masks.""" - for expected_size in ((41, 0), (0, 40), (0, 0)): - mask = pygame.mask.Mask(expected_size) - - size = mask.get_size() - - self.assertEqual(size, expected_size) - - def test_zero_mask_get_at(self): - """Ensures get_at correctly handles zero sized masks.""" - for size in ((51, 0), (0, 50), (0, 0)): - mask = pygame.mask.Mask(size) - - with self.assertRaises(IndexError): - value = mask.get_at((0, 0)) - - def test_zero_mask_set_at(self): - """Ensures set_at correctly handles zero sized masks.""" - for size in ((31, 0), (0, 30), (0, 0)): - mask = pygame.mask.Mask(size) - - with self.assertRaises(IndexError): - mask.set_at((0, 0)) - - def test_zero_mask_overlap(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask((100, 100)) - self.assertEqual(mask.overlap(mask2, (0, 0)), None) - self.assertEqual(mask2.overlap(mask, (0, 0)), None) - - def test_zero_mask_overlap_area(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask((100, 100)) - self.assertEqual(mask.overlap_area(mask2, (0, 0)), 0) - self.assertEqual(mask2.overlap_area(mask, (0, 0)), 0) - - def test_zero_mask_overlap_mask(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask((100, 100)) - - overlap_mask = mask.overlap_mask(mask2, (0, 0)) - overlap_mask2 = mask2.overlap_mask(mask, (0, 0)) - - self.assertEqual(mask.get_size(), overlap_mask.get_size()) - self.assertEqual(mask2.get_size(), overlap_mask2.get_size()) - - def test_zero_mask_fill(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size, fill=True) - self.assertEqual(mask.count(), 0) - - def test_zero_mask_clear(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask.clear() - self.assertEqual(mask.count(), 0) - - def test_zero_mask_flip(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask.invert() - self.assertEqual(mask.count(), 0) - - def test_zero_mask_scale(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask2 = mask.scale((2, 3)) - self.assertEqual(mask2.get_size(), (2, 3)) - - def test_zero_mask_draw(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask((100, 100), fill=True) - before = [mask2.get_at((x, y)) for x in range(100) for y in range(100)] - mask.draw(mask2, (0, 0)) - after = [mask2.get_at((x, y)) for x in range(100) for y in range(100)] - self.assertEqual(before, after) - - def test_zero_mask_erase(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - mask2 = pygame.mask.Mask((100, 100), fill=True) - before = [mask2.get_at((x, y)) for x in range(100) for y in range(100)] - mask.erase(mask2, (0, 0)) - after = [mask2.get_at((x, y)) for x in range(100) for y in range(100)] - self.assertEqual(before, after) - - def test_zero_mask_count(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size, fill=True) - self.assertEqual(mask.count(), 0) - - def test_zero_mask_centroid(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - self.assertEqual(mask.centroid(), (0, 0)) - - def test_zero_mask_angle(self): - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - mask = pygame.mask.Mask(size) - self.assertEqual(mask.angle(), 0.0) - - def test_zero_mask_outline(self): - """Ensures outline correctly handles zero sized masks.""" - expected_points = [] - - for size in ((61, 0), (0, 60), (0, 0)): - mask = pygame.mask.Mask(size) - - points = mask.outline() - - self.assertListEqual(points, expected_points, - 'size={}'.format(size)) - - def test_zero_mask_outline__with_arg(self): - """Ensures outline correctly handles zero sized masks - when using the skip pixels argument.""" - expected_points = [] - - for size in ((66, 0), (0, 65), (0, 0)): - mask = pygame.mask.Mask(size) - - points = mask.outline(10) - - self.assertListEqual(points, expected_points, - 'size={}'.format(size)) - - def test_zero_mask_convolve(self): - """Ensures convolve correctly handles zero sized masks. - - Tests the different combinations of sized and zero sized masks. - """ - for size1 in ((17, 13), (71, 0), (0, 70), (0, 0)): - mask1 = pygame.mask.Mask(size1, fill=True) - - for size2 in ((11, 7), (81, 0), (0, 60), (0, 0)): - msg = 'sizes={}, {}'.format(size1, size2) - mask2 = pygame.mask.Mask(size2, fill=True) - expected_size = (max(0, size1[0] + size2[0] - 1), - max(0, size1[1] + size2[1] - 1)) - - mask = mask1.convolve(mask2) - - self.assertIsNot(mask, mask2, msg) - self.assertEqual(mask.get_size(), expected_size, msg) - - def test_zero_mask_convolve__with_output_mask(self): - """Ensures convolve correctly handles zero sized masks - when using an output mask argument. - - Tests the different combinations of sized and zero sized masks. - """ - for size1 in ((11, 17), (91, 0), (0, 90), (0, 0)): - mask1 = pygame.mask.Mask(size1, fill=True) - - for size2 in ((13, 11), (83, 0), (0, 62), (0, 0)): - mask2 = pygame.mask.Mask(size2, fill=True) - - for output_size in ((7, 5), (71, 0), (0, 70), (0, 0)): - msg = 'sizes={}, {}, {}'.format(size1, size2, output_size) - output_mask = pygame.mask.Mask(output_size) - - mask = mask1.convolve(mask2, output_mask) - - self.assertIs(mask, output_mask, msg) - self.assertEqual(mask.get_size(), output_size, msg) - - def test_zero_mask_connected_component(self): - """Ensures connected_component correctly handles zero sized masks.""" - expected_count = 0 - - for size in ((81, 0), (0, 80), (0, 0)): - mask = pygame.mask.Mask(size) - - cc_mask = mask.connected_component() - - self.assertEqual(cc_mask.get_size(), size) - self.assertEqual(cc_mask.count(), expected_count, - 'size={}'.format(size)) - - def test_zero_mask_connected_component__indexed(self): - """Ensures connected_component correctly handles zero sized masks - when using an index argument.""" - for size in ((91, 0), (0, 90), (0, 0)): - mask = pygame.mask.Mask(size) - - with self.assertRaises(IndexError): - cc_mask = mask.connected_component((0, 0)) - - def test_zero_mask_connected_components(self): - """Ensures connected_components correctly handles zero sized masks.""" - expected_cc_masks = [] - - for size in ((11, 0), (0, 10), (0, 0)): - mask = pygame.mask.Mask(size) - - cc_masks = mask.connected_components() - - self.assertListEqual(cc_masks, expected_cc_masks, - 'size={}'.format(size)) - - def test_zero_mask_get_bounding_rects(self): - """Ensures get_bounding_rects correctly handles zero sized masks.""" - expected_bounding_rects = [] - - for size in ((21, 0), (0, 20), (0, 0)): - mask = pygame.mask.Mask(size) - - bounding_rects = mask.get_bounding_rects() - - self.assertListEqual(bounding_rects, expected_bounding_rects, - 'size={}'.format(size)) - - -class MaskModuleTest(unittest.TestCase): - # The @unittest.expectedFailure decorator can be removed when issue #897 - # is fixed. - @unittest.expectedFailure - def test_from_surface(self): - """Ensures from_surface creates a mask with the correct bits set. - - This test checks the masks created by the from_surface function using - 16 and 32 bit surfaces. Each alpha value (0-255) is tested against - several different threshold values. - Note: On 16 bit surface the requested alpha value can differ from what - is actually set. This test uses the value read from the surface. - """ - threshold_count = 256 - surface_color = [55, 155, 255, 0] - expected_size = (11, 9) - all_set_count = expected_size[0] * expected_size[1] - none_set_count = 0 - - for depth in (16, 32): - surface = pygame.Surface(expected_size, SRCALPHA, depth) - - for alpha in range(threshold_count): - surface_color[3] = alpha - surface.fill(surface_color) - - if depth < 32: - # On surfaces with depths < 32 the requested alpha can be - # different than what gets set. Use the value read from the - # surface. - alpha = surface.get_at((0, 0))[3] - - # Test the mask created at threshold values low, high and - # around alpha. - threshold_test_values = set( - [-1, 0, alpha - 1, alpha, alpha + 1, 255, 256]) - - for threshold in threshold_test_values: - msg = 'depth={}, alpha={}, threshold={}'.format( - depth, alpha, threshold) - - if alpha > threshold: - expected_count = all_set_count - else: - expected_count = none_set_count - - mask = pygame.mask.from_surface(surface, threshold) - - self.assertEqual(mask.get_size(), expected_size, msg) - self.assertEqual(mask.count(), expected_count, msg) - - def test_from_surface__different_alphas_32bit(self): - """Ensures from_surface creates a mask with the correct bits set - when pixels have different alpha values (32 bits surfaces). - - This test checks the masks created by the from_surface function using - a 32 bit surface. The surface is created with each pixel having a - different alpha value (0-255). This surface is tested over a range - of threshold values (0-255). - """ - offset = (0, 0) - threshold_count = 256 - surface_color = [10, 20, 30, 0] - expected_size = (threshold_count, 1) - expected_mask = pygame.Mask(expected_size, fill=True) - surface = pygame.Surface(expected_size, SRCALPHA, 32) - - # Give each pixel a different alpha. - surface.lock() # Lock for possible speed up. - for a in range(threshold_count): - surface_color[3] = a - surface.set_at((a, 0), surface_color) - surface.unlock() - - # Test the mask created for each different alpha threshold. - for threshold in range(threshold_count): - msg = 'threshold={}'.format(threshold) - expected_mask.set_at((threshold, 0), 0) - expected_count = expected_mask.count() - - mask = pygame.mask.from_surface(surface, threshold) - - self.assertEqual(mask.get_size(), expected_size, msg) - self.assertEqual(mask.count(), expected_count, msg) - self.assertEqual(mask.overlap_area(expected_mask, offset), - expected_count, msg) - - # The @unittest.expectedFailure decorator can be removed when issue #897 - # is fixed. - @unittest.expectedFailure - def test_from_surface__different_alphas_16bit(self): - """Ensures from_surface creates a mask with the correct bits set - when pixels have different alpha values (16 bit surfaces). - - This test checks the masks created by the from_surface function using - a 16 bit surface. Each pixel of the surface is set with a different - alpha value (0-255), but since this is a 16 bit surface the requested - alpha value can differ from what is actually set. The resulting surface - will have groups of alpha values which complicates the test as the - alpha groups will all be set/unset at a given threshold. The setup - calculates these groups and an expected mask for each. This test data - is then used to test each alpha grouping over a range of threshold - values. - """ - threshold_count = 256 - surface_color = [110, 120, 130, 0] - expected_size = (threshold_count, 1) - surface = pygame.Surface(expected_size, SRCALPHA, 16) - - # Give each pixel a different alpha. - surface.lock() # Lock for possible speed up. - for a in range(threshold_count): - surface_color[3] = a - surface.set_at((a, 0), surface_color) - surface.unlock() - - alpha_thresholds = OrderedDict() - special_thresholds = set() - - # Create the threshold ranges and identify any thresholds that need - # special handling. - for threshold in range(threshold_count): - # On surfaces with depths < 32 the requested alpha can be different - # than what gets set. Use the value read from the surface. - alpha = surface.get_at((threshold, 0))[3] - - if alpha not in alpha_thresholds: - alpha_thresholds[alpha] = [threshold] - else: - alpha_thresholds[alpha].append(threshold) - - if threshold < alpha: - special_thresholds.add(threshold) - - # Use each threshold group to create an expected mask. - test_data = [] # [(from_threshold, to_threshold, expected_mask), ...] - offset = (0, 0) - erase_mask = pygame.Mask(expected_size) - exp_mask = pygame.Mask(expected_size, fill=True) - - for thresholds in alpha_thresholds.values(): - for threshold in thresholds: - if threshold in special_thresholds: - # Any special thresholds just reuse previous exp_mask. - test_data.append((threshold, threshold + 1, exp_mask)) - else: - to_threshold = thresholds[-1] + 1 - - # Make the expected mask by erasing the unset bits. - for thres in range(to_threshold): - erase_mask.set_at((thres, 0), 1) - - exp_mask = pygame.Mask(expected_size, fill=True) - exp_mask.erase(erase_mask, offset) - test_data.append((threshold, to_threshold, exp_mask)) - break - - # All the setup is done. Now test the masks created over the threshold - # ranges. - for from_threshold, to_threshold, expected_mask in test_data: - expected_count = expected_mask.count() - - for threshold in range(from_threshold, to_threshold): - msg = 'threshold={}'.format(threshold) - - mask = pygame.mask.from_surface(surface, threshold) - - self.assertEqual(mask.get_size(), expected_size, msg) - self.assertEqual(mask.count(), expected_count, msg) - self.assertEqual(mask.overlap_area(expected_mask, offset), - expected_count, msg) - - def todo_test_from_surface__with_colorkey(self): - """Ensures from_surface creates a mask with the correct bits set - when the surface uses a colorkey. - """ - self.fail() - - def test_from_threshold(self): - """ Does mask.from_threshold() work correctly? - """ - - a = [16, 24, 32] - - for i in a: - surf = pygame.surface.Surface((70,70), 0, i) - surf.fill((100,50,200),(20,20,20,20)) - mask = pygame.mask.from_threshold(surf,(100,50,200,255),(10,10,10,255)) - - rects = mask.get_bounding_rects() - - self.assertEqual(mask.count(), 400) - self.assertEqual(mask.get_bounding_rects(), [pygame.Rect((20,20,20,20))]) - - for i in a: - surf = pygame.surface.Surface((70,70), 0, i) - surf2 = pygame.surface.Surface((70,70), 0, i) - surf.fill((100,100,100)) - surf2.fill((150,150,150)) - surf2.fill((100,100,100), (40,40,10,10)) - mask = pygame.mask.from_threshold(surf, (0,0,0,0), (10,10,10,255), surf2) - - self.assertEqual(mask.count(), 100) - self.assertEqual(mask.get_bounding_rects(), [pygame.Rect((40,40,10,10))]) - - def test_zero_size_from_surface(self): - """Ensures from_surface can create masks from zero sized surfaces.""" - for size in ((100, 0), (0, 100), (0, 0)): - mask = pygame.mask.from_surface(pygame.Surface(size)) - - self.assertIsInstance(mask, pygame.mask.MaskType, - 'size={}'.format(size)) - self.assertEqual(mask.get_size(), size) - - def test_zero_size_from_threshold(self): - a = [16, 24, 32] - sizes = ((100, 0), (0, 100), (0, 0)) - - for size in sizes: - for i in a: - surf = pygame.surface.Surface(size, 0, i) - surf.fill((100, 50, 200), (20, 20, 20, 20)) - mask = pygame.mask.from_threshold(surf, (100, 50, 200, 255), (10, 10, 10, 255)) - - self.assertEqual(mask.count(), 0) - - rects = mask.get_bounding_rects() - self.assertEqual(rects, []) - - for i in a: - surf = pygame.surface.Surface(size, 0, i) - surf2 = pygame.surface.Surface(size, 0, i) - surf.fill((100, 100, 100)) - surf2.fill((150, 150, 150)) - surf2.fill((100, 100, 100), (40, 40, 10, 10)) - mask = pygame.mask.from_threshold(surf, (0, 0, 0, 0), (10, 10, 10, 255), surf2) - - self.assertEqual(mask.count(), 0) - - rects = mask.get_bounding_rects() - self.assertEqual(rects, []) - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/math_test.py b/venv/lib/python3.7/site-packages/pygame/tests/math_test.py deleted file mode 100644 index 07a1dee..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/math_test.py +++ /dev/null @@ -1,1612 +0,0 @@ -# -*- coding: utf-8 -*- -import sys -import unittest -import math -from time import clock -import platform - -import pygame.math -from pygame.math import Vector2, Vector3 - -IS_PYPY = 'PyPy' == platform.python_implementation() -PY3 = sys.version_info.major == 3 - - -class Vector2TypeTest(unittest.TestCase): - - def setUp(self): - pygame.math.enable_swizzling() - self.zeroVec = Vector2() - self.e1 = Vector2(1, 0) - self.e2 = Vector2(0, 1) - self.t1 = (1.2, 3.4) - self.l1 = list(self.t1) - self.v1 = Vector2(self.t1) - self.t2 = (5.6, 7.8) - self.l2 = list(self.t2) - self.v2 = Vector2(self.t2) - self.s1 = 5.6 - self.s2 = 7.8 - - def tearDown(self): - pygame.math.enable_swizzling() - - def testConstructionDefault(self): - v = Vector2() - self.assertEqual(v.x, 0.) - self.assertEqual(v.y, 0.) - - def testConstructionScalar(self): - v = Vector2(1) - self.assertEqual(v.x, 1.) - self.assertEqual(v.y, 1.) - - def testConstructionScalarKeywords(self): - v = Vector2(x=1) - self.assertEqual(v.x, 1.) - self.assertEqual(v.y, 1.) - - def testConstructionKeywords(self): - v = Vector2(x=1, y=2) - self.assertEqual(v.x, 1.) - self.assertEqual(v.y, 2.) - - def testConstructionXY(self): - v = Vector2(1.2, 3.4) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - - def testConstructionTuple(self): - v = Vector2((1.2, 3.4)) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - - def testConstructionList(self): - v = Vector2([1.2, 3.4]) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - - def testConstructionVector2(self): - v = Vector2(Vector2(1.2, 3.4)) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - - def testAttributAccess(self): - tmp = self.v1.x - self.assertEqual(tmp, self.v1.x) - self.assertEqual(tmp, self.v1[0]) - tmp = self.v1.y - self.assertEqual(tmp, self.v1.y) - self.assertEqual(tmp, self.v1[1]) - self.v1.x = 3.141 - self.assertEqual(self.v1.x, 3.141) - self.v1.y = 3.141 - self.assertEqual(self.v1.y, 3.141) - def assign_nonfloat(): - v = Vector2() - v.x = "spam" - self.assertRaises(TypeError, assign_nonfloat) - - def testSequence(self): - v = Vector2(1.2, 3.4) - Vector2()[:] - self.assertEqual(len(v), 2) - self.assertEqual(v[0], 1.2) - self.assertEqual(v[1], 3.4) - self.assertRaises(IndexError, lambda : v[2]) - self.assertEqual(v[-1], 3.4) - self.assertEqual(v[-2], 1.2) - self.assertRaises(IndexError, lambda : v[-3]) - self.assertEqual(v[:], [1.2, 3.4]) - self.assertEqual(v[1:], [3.4]) - self.assertEqual(v[:1], [1.2]) - self.assertEqual(list(v), [1.2, 3.4]) - self.assertEqual(tuple(v), (1.2, 3.4)) - v[0] = 5.6 - v[1] = 7.8 - self.assertEqual(v.x, 5.6) - self.assertEqual(v.y, 7.8) - v[:] = [9.1, 11.12] - self.assertEqual(v.x, 9.1) - self.assertEqual(v.y, 11.12) - def overpopulate(): - v = Vector2() - v[:] = [1, 2, 3] - self.assertRaises(ValueError, overpopulate) - def underpopulate(): - v = Vector2() - v[:] = [1] - self.assertRaises(ValueError, underpopulate) - def assign_nonfloat(): - v = Vector2() - v[0] = "spam" - self.assertRaises(TypeError, assign_nonfloat) - - def testExtendedSlicing(self): - # deletion - def delSlice(vec, start=None, stop=None, step=None): - if start is not None and stop is not None and step is not None: - del vec[start:stop:step] - elif start is not None and stop is None and step is not None: - del vec[start::step] - elif start is None and stop is None and step is not None: - del vec[::step] - v = Vector2(self.v1) - self.assertRaises(TypeError, delSlice, v, None, None, 2) - self.assertRaises(TypeError, delSlice, v, 1, None, 2) - self.assertRaises(TypeError, delSlice, v, 1, 2, 1) - - # assignment - v = Vector2(self.v1) - v[::2] = [-1] - self.assertEqual(v, [-1, self.v1.y]) - v = Vector2(self.v1) - v[::-2] = [10] - self.assertEqual(v, [self.v1.x, 10]) - v = Vector2(self.v1) - v[::-1] = v - self.assertEqual(v, [self.v1.y, self.v1.x]) - a = Vector2(self.v1) - b = Vector2(self.v1) - c = Vector2(self.v1) - a[1:2] = [2.2] - b[slice(1,2)] = [2.2] - c[1:2:] = (2.2,) - self.assertEqual(a, b) - self.assertEqual(a, c) - self.assertEqual(type(a), type(self.v1)) - self.assertEqual(type(b), type(self.v1)) - self.assertEqual(type(c), type(self.v1)) - - def testAdd(self): - v3 = self.v1 + self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x + self.v2.x) - self.assertEqual(v3.y, self.v1.y + self.v2.y) - v3 = self.v1 + self.t2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x + self.t2[0]) - self.assertEqual(v3.y, self.v1.y + self.t2[1]) - v3 = self.v1 + self.l2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x + self.l2[0]) - self.assertEqual(v3.y, self.v1.y + self.l2[1]) - v3 = self.t1 + self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.t1[0] + self.v2.x) - self.assertEqual(v3.y, self.t1[1] + self.v2.y) - v3 = self.l1 + self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.l1[0] + self.v2.x) - self.assertEqual(v3.y, self.l1[1] + self.v2.y) - - def testSub(self): - v3 = self.v1 - self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x - self.v2.x) - self.assertEqual(v3.y, self.v1.y - self.v2.y) - v3 = self.v1 - self.t2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x - self.t2[0]) - self.assertEqual(v3.y, self.v1.y - self.t2[1]) - v3 = self.v1 - self.l2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x - self.l2[0]) - self.assertEqual(v3.y, self.v1.y - self.l2[1]) - v3 = self.t1 - self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.t1[0] - self.v2.x) - self.assertEqual(v3.y, self.t1[1] - self.v2.y) - v3 = self.l1 - self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.l1[0] - self.v2.x) - self.assertEqual(v3.y, self.l1[1] - self.v2.y) - - def testScalarMultiplication(self): - v = self.s1 * self.v1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, self.s1 * self.v1.x) - self.assertEqual(v.y, self.s1 * self.v1.y) - v = self.v1 * self.s2 - self.assertEqual(v.x, self.v1.x * self.s2) - self.assertEqual(v.y, self.v1.y * self.s2) - - def testScalarDivision(self): - v = self.v1 / self.s1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertAlmostEqual(v.x, self.v1.x / self.s1) - self.assertAlmostEqual(v.y, self.v1.y / self.s1) - v = self.v1 // self.s2 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, self.v1.x // self.s2) - self.assertEqual(v.y, self.v1.y // self.s2) - - def testBool(self): - self.assertEqual(bool(self.zeroVec), False) - self.assertEqual(bool(self.v1), True) - self.assertTrue(not self.zeroVec) - self.assertTrue(self.v1) - - def testUnary(self): - v = +self.v1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, self.v1.x) - self.assertEqual(v.y, self.v1.y) - self.assertNotEqual(id(v), id(self.v1)) - v = -self.v1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, -self.v1.x) - self.assertEqual(v.y, -self.v1.y) - self.assertNotEqual(id(v), id(self.v1)) - - def testCompare(self): - int_vec = Vector2(3, -2) - flt_vec = Vector2(3.0, -2.0) - zero_vec = Vector2(0, 0) - self.assertEqual(int_vec == flt_vec, True) - self.assertEqual(int_vec != flt_vec, False) - self.assertEqual(int_vec != zero_vec, True) - self.assertEqual(flt_vec == zero_vec, False) - self.assertEqual(int_vec == (3, -2), True) - self.assertEqual(int_vec != (3, -2), False) - self.assertEqual(int_vec != [0, 0], True) - self.assertEqual(int_vec == [0, 0], False) - self.assertEqual(int_vec != 5, True) - self.assertEqual(int_vec == 5, False) - self.assertEqual(int_vec != [3, -2, 0], True) - self.assertEqual(int_vec == [3, -2, 0], False) - - def testStr(self): - v = Vector2(1.2, 3.4) - self.assertEqual(str(v), "[1.2, 3.4]") - - def testRepr(self): - v = Vector2(1.2, 3.4) - self.assertEqual(v.__repr__(), "") - self.assertEqual(v, Vector2(v.__repr__())) - - def testIter(self): - it = self.v1.__iter__() - if PY3: - next_ = it.__next__ - else: - next_ = it.next - self.assertEqual(next_(), self.v1[0]) - self.assertEqual(next_(), self.v1[1]) - self.assertRaises(StopIteration, lambda : next_()) - it1 = self.v1.__iter__() - it2 = self.v1.__iter__() - self.assertNotEqual(id(it1), id(it2)) - self.assertEqual(id(it1), id(it1.__iter__())) - self.assertEqual(list(it1), list(it2)); - self.assertEqual(list(self.v1.__iter__()), self.l1) - idx = 0 - for val in self.v1: - self.assertEqual(val, self.v1[idx]) - idx += 1 - - def test_rotate(self): - v1 = Vector2(1, 0) - v2 = v1.rotate(90) - v3 = v1.rotate(90 + 360) - self.assertEqual(v1.x, 1) - self.assertEqual(v1.y, 0) - self.assertEqual(v2.x, 0) - self.assertEqual(v2.y, 1) - self.assertEqual(v3.x, v2.x) - self.assertEqual(v3.y, v2.y) - v1 = Vector2(-1, -1) - v2 = v1.rotate(-90) - self.assertEqual(v2.x, -1) - self.assertEqual(v2.y, 1) - v2 = v1.rotate(360) - self.assertEqual(v1.x, v2.x) - self.assertEqual(v1.y, v2.y) - v2 = v1.rotate(0) - self.assertEqual(v1.x, v2.x) - self.assertEqual(v1.y, v2.y) - # issue 214 - self.assertEqual(Vector2(0, 1).rotate(359.99999999), Vector2(0, 1)) - - def test_rotate_ip(self): - v = Vector2(1, 0) - self.assertEqual(v.rotate_ip(90), None) - self.assertEqual(v.x, 0) - self.assertEqual(v.y, 1) - v = Vector2(-1, -1) - v.rotate_ip(-90) - self.assertEqual(v.x, -1) - self.assertEqual(v.y, 1) - - def test_normalize(self): - v = self.v1.normalize() - # length is 1 - self.assertAlmostEqual(v.x * v.x + v.y * v.y, 1.) - # v1 is unchanged - self.assertEqual(self.v1.x, self.l1[0]) - self.assertEqual(self.v1.y, self.l1[1]) - # v2 is paralell to v1 - self.assertAlmostEqual(self.v1.x * v.y - self.v1.y * v.x, 0.) - self.assertRaises(ValueError, lambda : self.zeroVec.normalize()) - - def test_normalize_ip(self): - v = +self.v1 - # v has length != 1 before normalizing - self.assertNotEqual(v.x * v.x + v.y * v.y, 1.) - # inplace operations should return None - self.assertEqual(v.normalize_ip(), None) - # length is 1 - self.assertAlmostEqual(v.x * v.x + v.y * v.y, 1.) - # v2 is paralell to v1 - self.assertAlmostEqual(self.v1.x * v.y - self.v1.y * v.x, 0.) - self.assertRaises(ValueError, lambda : self.zeroVec.normalize_ip()) - - def test_is_normalized(self): - self.assertEqual(self.v1.is_normalized(), False) - v = self.v1.normalize() - self.assertEqual(v.is_normalized(), True) - self.assertEqual(self.e2.is_normalized(), True) - self.assertEqual(self.zeroVec.is_normalized(), False) - - def test_cross(self): - self.assertEqual(self.v1.cross(self.v2), - self.v1.x * self.v2.y - self.v1.y * self.v2.x) - self.assertEqual(self.v1.cross(self.l2), - self.v1.x * self.l2[1] - self.v1.y * self.l2[0]) - self.assertEqual(self.v1.cross(self.t2), - self.v1.x * self.t2[1] - self.v1.y * self.t2[0]) - self.assertEqual(self.v1.cross(self.v2), -self.v2.cross(self.v1)) - self.assertEqual(self.v1.cross(self.v1), 0) - - def test_dot(self): - self.assertAlmostEqual(self.v1.dot(self.v2), - self.v1.x * self.v2.x + self.v1.y * self.v2.y) - self.assertAlmostEqual(self.v1.dot(self.l2), - self.v1.x * self.l2[0] + self.v1.y * self.l2[1]) - self.assertAlmostEqual(self.v1.dot(self.t2), - self.v1.x * self.t2[0] + self.v1.y * self.t2[1]) - self.assertEqual(self.v1.dot(self.v2), self.v2.dot(self.v1)) - self.assertEqual(self.v1.dot(self.v2), self.v1 * self.v2) - - def test_angle_to(self): - self.assertEqual(self.v1.rotate(self.v1.angle_to(self.v2)).normalize(), - self.v2.normalize()) - self.assertEqual(Vector2(1, 1).angle_to((-1, 1)), 90) - self.assertEqual(Vector2(1, 0).angle_to((0, -1)), -90) - self.assertEqual(Vector2(1, 0).angle_to((-1, 1)), 135) - self.assertEqual(abs(Vector2(1, 0).angle_to((-1, 0))), 180) - - def test_scale_to_length(self): - v = Vector2(1, 1) - v.scale_to_length(2.5) - self.assertEqual(v, Vector2(2.5, 2.5) / math.sqrt(2)) - self.assertRaises(ValueError, lambda : self.zeroVec.scale_to_length(1)) - self.assertEqual(v.scale_to_length(0), None) - self.assertEqual(v, self.zeroVec) - - def test_length(self): - self.assertEqual(Vector2(3, 4).length(), 5) - self.assertEqual(Vector2(-3, 4).length(), 5) - self.assertEqual(self.zeroVec.length(), 0) - - def test_length_squared(self): - self.assertEqual(Vector2(3, 4).length_squared(), 25) - self.assertEqual(Vector2(-3, 4).length_squared(), 25) - self.assertEqual(self.zeroVec.length_squared(), 0) - - def test_reflect(self): - v = Vector2(1, -1) - n = Vector2(0, 1) - self.assertEqual(v.reflect(n), Vector2(1, 1)) - self.assertEqual(v.reflect(3*n), v.reflect(n)) - self.assertEqual(v.reflect(-v), -v) - self.assertRaises(ValueError, lambda : v.reflect(self.zeroVec)) - - def test_reflect_ip(self): - v1 = Vector2(1, -1) - v2 = Vector2(v1) - n = Vector2(0, 1) - self.assertEqual(v2.reflect_ip(n), None) - self.assertEqual(v2, Vector2(1, 1)) - v2 = Vector2(v1) - v2.reflect_ip(3*n) - self.assertEqual(v2, v1.reflect(n)) - v2 = Vector2(v1) - v2.reflect_ip(-v1) - self.assertEqual(v2, -v1) - self.assertRaises(ValueError, lambda : v2.reflect_ip(Vector2())) - - def test_distance_to(self): - diff = self.v1 - self.v2 - self.assertEqual(self.e1.distance_to(self.e2), math.sqrt(2)) - self.assertAlmostEqual(self.v1.distance_to(self.v2), - math.sqrt(diff.x * diff.x + diff.y * diff.y)) - self.assertEqual(self.v1.distance_to(self.v1), 0) - self.assertEqual(self.v1.distance_to(self.v2), - self.v2.distance_to(self.v1)) - - def test_distance_squared_to(self): - diff = self.v1 - self.v2 - self.assertEqual(self.e1.distance_squared_to(self.e2), 2) - self.assertAlmostEqual(self.v1.distance_squared_to(self.v2), - diff.x * diff.x + diff.y * diff.y) - self.assertEqual(self.v1.distance_squared_to(self.v1), 0) - self.assertEqual(self.v1.distance_squared_to(self.v2), - self.v2.distance_squared_to(self.v1)) - - def test_update(self): - v = Vector2(3, 4) - v.update(0) - self.assertEqual(v, Vector2((0, 0))) - v.update(5, 1) - self.assertEqual(v, Vector2(5, 1)) - v.update((4, 1)) - self.assertNotEqual(v, Vector2((5, 1))) - - def test_swizzle(self): - self.assertTrue(hasattr(pygame.math, "enable_swizzling")) - self.assertTrue(hasattr(pygame.math, "disable_swizzling")) - # swizzling not disabled by default - pygame.math.disable_swizzling() - self.assertRaises(AttributeError, lambda : self.v1.yx) - pygame.math.enable_swizzling() - - self.assertEqual(self.v1.yx, (self.v1.y, self.v1.x)) - self.assertEqual(self.v1.xxyyxy, (self.v1.x, self.v1.x, self.v1.y, - self.v1.y, self.v1.x, self.v1.y)) - self.v1.xy = self.t2 - self.assertEqual(self.v1, self.t2) - self.v1.yx = self.t2 - self.assertEqual(self.v1, (self.t2[1], self.t2[0])) - self.assertEqual(type(self.v1), Vector2) - def invalidSwizzleX(): - Vector2().xx = (1, 2) - def invalidSwizzleY(): - Vector2().yy = (1, 2) - self.assertRaises(AttributeError, invalidSwizzleX) - self.assertRaises(AttributeError, invalidSwizzleY) - def invalidAssignment(): - Vector2().xy = 3 - self.assertRaises(TypeError, invalidAssignment) - def unicodeAttribute(): - getattr(Vector2(), "ä") - self.assertRaises(AttributeError, unicodeAttribute) - - def test_swizzle_return_types(self): - self.assertEqual(type(self.v1.x), float) - self.assertEqual(type(self.v1.xy), Vector2) - self.assertEqual(type(self.v1.xyx), Vector3) - # but we don't have vector4 or above... so tuple. - self.assertEqual(type(self.v1.xyxy), tuple) - self.assertEqual(type(self.v1.xyxyx), tuple) - - def test_elementwise(self): - # behaviour for "elementwise op scalar" - self.assertEqual(self.v1.elementwise() + self.s1, - (self.v1.x + self.s1, self.v1.y + self.s1)) - self.assertEqual(self.v1.elementwise() - self.s1, - (self.v1.x - self.s1, self.v1.y - self.s1)) - self.assertEqual(self.v1.elementwise() * self.s2, - (self.v1.x * self.s2, self.v1.y * self.s2)) - self.assertEqual(self.v1.elementwise() / self.s2, - (self.v1.x / self.s2, self.v1.y / self.s2)) - self.assertEqual(self.v1.elementwise() // self.s1, - (self.v1.x // self.s1, self.v1.y // self.s1)) - self.assertEqual(self.v1.elementwise() ** self.s1, - (self.v1.x ** self.s1, self.v1.y ** self.s1)) - self.assertEqual(self.v1.elementwise() % self.s1, - (self.v1.x % self.s1, self.v1.y % self.s1)) - self.assertEqual(self.v1.elementwise() > self.s1, - self.v1.x > self.s1 and self.v1.y > self.s1) - self.assertEqual(self.v1.elementwise() < self.s1, - self.v1.x < self.s1 and self.v1.y < self.s1) - self.assertEqual(self.v1.elementwise() == self.s1, - self.v1.x == self.s1 and self.v1.y == self.s1) - self.assertEqual(self.v1.elementwise() != self.s1, - self.v1.x != self.s1 and self.v1.y != self.s1) - self.assertEqual(self.v1.elementwise() >= self.s1, - self.v1.x >= self.s1 and self.v1.y >= self.s1) - self.assertEqual(self.v1.elementwise() <= self.s1, - self.v1.x <= self.s1 and self.v1.y <= self.s1) - self.assertEqual(self.v1.elementwise() != self.s1, - self.v1.x != self.s1 and self.v1.y != self.s1) - # behaviour for "scalar op elementwise" - self.assertEqual(5 + self.v1.elementwise(), Vector2(5, 5) + self.v1) - self.assertEqual(3.5 - self.v1.elementwise(), Vector2(3.5, 3.5) - self.v1) - self.assertEqual(7.5 * self.v1.elementwise() , 7.5 * self.v1) - self.assertEqual(-3.5 / self.v1.elementwise(), (-3.5 / self.v1.x, -3.5 / self.v1.y)) - self.assertEqual(-3.5 // self.v1.elementwise(), (-3.5 // self.v1.x, -3.5 // self.v1.y)) - self.assertEqual(-3.5 ** self.v1.elementwise(), (-3.5 ** self.v1.x, -3.5 ** self.v1.y)) - self.assertEqual(3 % self.v1.elementwise(), (3 % self.v1.x, 3 % self.v1.y)) - self.assertEqual(2 < self.v1.elementwise(), 2 < self.v1.x and 2 < self.v1.y) - self.assertEqual(2 > self.v1.elementwise(), 2 > self.v1.x and 2 > self.v1.y) - self.assertEqual(1 == self.v1.elementwise(), 1 == self.v1.x and 1 == self.v1.y) - self.assertEqual(1 != self.v1.elementwise(), 1 != self.v1.x and 1 != self.v1.y) - self.assertEqual(2 <= self.v1.elementwise(), 2 <= self.v1.x and 2 <= self.v1.y) - self.assertEqual(-7 >= self.v1.elementwise(), -7 >= self.v1.x and -7 >= self.v1.y) - self.assertEqual(-7 != self.v1.elementwise(), -7 != self.v1.x and -7 != self.v1.y) - - # behaviour for "elementwise op vector" - self.assertEqual(type(self.v1.elementwise() * self.v2), type(self.v1)) - self.assertEqual(self.v1.elementwise() + self.v2, self.v1 + self.v2) - self.assertEqual(self.v1.elementwise() + self.v2, self.v1 + self.v2) - self.assertEqual(self.v1.elementwise() - self.v2, self.v1 - self.v2) - self.assertEqual(self.v1.elementwise() * self.v2, (self.v1.x * self.v2.x, self.v1.y * self.v2.y)) - self.assertEqual(self.v1.elementwise() / self.v2, (self.v1.x / self.v2.x, self.v1.y / self.v2.y)) - self.assertEqual(self.v1.elementwise() // self.v2, (self.v1.x // self.v2.x, self.v1.y // self.v2.y)) - self.assertEqual(self.v1.elementwise() ** self.v2, (self.v1.x ** self.v2.x, self.v1.y ** self.v2.y)) - self.assertEqual(self.v1.elementwise() % self.v2, (self.v1.x % self.v2.x, self.v1.y % self.v2.y)) - self.assertEqual(self.v1.elementwise() > self.v2, self.v1.x > self.v2.x and self.v1.y > self.v2.y) - self.assertEqual(self.v1.elementwise() < self.v2, self.v1.x < self.v2.x and self.v1.y < self.v2.y) - self.assertEqual(self.v1.elementwise() >= self.v2, self.v1.x >= self.v2.x and self.v1.y >= self.v2.y) - self.assertEqual(self.v1.elementwise() <= self.v2, self.v1.x <= self.v2.x and self.v1.y <= self.v2.y) - self.assertEqual(self.v1.elementwise() == self.v2, self.v1.x == self.v2.x and self.v1.y == self.v2.y) - self.assertEqual(self.v1.elementwise() != self.v2, self.v1.x != self.v2.x and self.v1.y != self.v2.y) - # behaviour for "vector op elementwise" - self.assertEqual(self.v2 + self.v1.elementwise(), self.v2 + self.v1) - self.assertEqual(self.v2 - self.v1.elementwise(), self.v2 - self.v1) - self.assertEqual(self.v2 * self.v1.elementwise(), (self.v2.x * self.v1.x, self.v2.y * self.v1.y)) - self.assertEqual(self.v2 / self.v1.elementwise(), (self.v2.x / self.v1.x, self.v2.y / self.v1.y)) - self.assertEqual(self.v2 // self.v1.elementwise(), (self.v2.x // self.v1.x, self.v2.y // self.v1.y)) - self.assertEqual(self.v2 ** self.v1.elementwise(), (self.v2.x ** self.v1.x, self.v2.y ** self.v1.y)) - self.assertEqual(self.v2 % self.v1.elementwise(), (self.v2.x % self.v1.x, self.v2.y % self.v1.y)) - self.assertEqual(self.v2 < self.v1.elementwise(), self.v2.x < self.v1.x and self.v2.y < self.v1.y) - self.assertEqual(self.v2 > self.v1.elementwise(), self.v2.x > self.v1.x and self.v2.y > self.v1.y) - self.assertEqual(self.v2 <= self.v1.elementwise(), self.v2.x <= self.v1.x and self.v2.y <= self.v1.y) - self.assertEqual(self.v2 >= self.v1.elementwise(), self.v2.x >= self.v1.x and self.v2.y >= self.v1.y) - self.assertEqual(self.v2 == self.v1.elementwise(), self.v2.x == self.v1.x and self.v2.y == self.v1.y) - self.assertEqual(self.v2 != self.v1.elementwise(), self.v2.x != self.v1.x and self.v2.y != self.v1.y) - - # behaviour for "elementwise op elementwise" - self.assertEqual(self.v2.elementwise() + self.v1.elementwise(), self.v2 + self.v1) - self.assertEqual(self.v2.elementwise() - self.v1.elementwise(), self.v2 - self.v1) - self.assertEqual(self.v2.elementwise() * self.v1.elementwise(), (self.v2.x * self.v1.x, self.v2.y * self.v1.y)) - self.assertEqual(self.v2.elementwise() / self.v1.elementwise(), (self.v2.x / self.v1.x, self.v2.y / self.v1.y)) - self.assertEqual(self.v2.elementwise() // self.v1.elementwise(), (self.v2.x // self.v1.x, self.v2.y // self.v1.y)) - self.assertEqual(self.v2.elementwise() ** self.v1.elementwise(), (self.v2.x ** self.v1.x, self.v2.y ** self.v1.y)) - self.assertEqual(self.v2.elementwise() % self.v1.elementwise(), (self.v2.x % self.v1.x, self.v2.y % self.v1.y)) - self.assertEqual(self.v2.elementwise() < self.v1.elementwise(), self.v2.x < self.v1.x and self.v2.y < self.v1.y) - self.assertEqual(self.v2.elementwise() > self.v1.elementwise(), self.v2.x > self.v1.x and self.v2.y > self.v1.y) - self.assertEqual(self.v2.elementwise() <= self.v1.elementwise(), self.v2.x <= self.v1.x and self.v2.y <= self.v1.y) - self.assertEqual(self.v2.elementwise() >= self.v1.elementwise(), self.v2.x >= self.v1.x and self.v2.y >= self.v1.y) - self.assertEqual(self.v2.elementwise() == self.v1.elementwise(), self.v2.x == self.v1.x and self.v2.y == self.v1.y) - self.assertEqual(self.v2.elementwise() != self.v1.elementwise(), self.v2.x != self.v1.x and self.v2.y != self.v1.y) - - # other behaviour - self.assertEqual(abs(self.v1.elementwise()), (abs(self.v1.x), abs(self.v1.y))) - self.assertEqual(-self.v1.elementwise(), -self.v1) - self.assertEqual(+self.v1.elementwise(), +self.v1) - self.assertEqual(bool(self.v1.elementwise()), bool(self.v1)) - self.assertEqual(bool(Vector2().elementwise()), bool(Vector2())) - self.assertEqual(self.zeroVec.elementwise() ** 0, (1, 1)) - self.assertRaises(ValueError, lambda : pow(Vector2(-1, 0).elementwise(), 1.2)) - self.assertRaises(ZeroDivisionError, lambda : self.zeroVec.elementwise() ** -1) - - def test_elementwise(self): - v1 = self.v1 - v2 = self.v2 - s1 = self.s1 - s2 = self.s2 - # behaviour for "elementwise op scalar" - self.assertEqual(v1.elementwise() + s1, (v1.x + s1, v1.y + s1)) - self.assertEqual(v1.elementwise() - s1, (v1.x - s1, v1.y - s1)) - self.assertEqual(v1.elementwise() * s2, (v1.x * s2, v1.y * s2)) - self.assertEqual(v1.elementwise() / s2, (v1.x / s2, v1.y / s2)) - self.assertEqual(v1.elementwise() // s1, (v1.x // s1, v1.y // s1)) - self.assertEqual(v1.elementwise() ** s1, (v1.x ** s1, v1.y ** s1)) - self.assertEqual(v1.elementwise() % s1, (v1.x % s1, v1.y % s1)) - self.assertEqual(v1.elementwise() > s1, v1.x > s1 and v1.y > s1) - self.assertEqual(v1.elementwise() < s1, v1.x < s1 and v1.y < s1) - self.assertEqual(v1.elementwise() == s1, v1.x == s1 and v1.y == s1) - self.assertEqual(v1.elementwise() != s1, v1.x != s1 and v1.y != s1) - self.assertEqual(v1.elementwise() >= s1, v1.x >= s1 and v1.y >= s1) - self.assertEqual(v1.elementwise() <= s1, v1.x <= s1 and v1.y <= s1) - self.assertEqual(v1.elementwise() != s1, v1.x != s1 and v1.y != s1) - # behaviour for "scalar op elementwise" - self.assertEqual(s1 + v1.elementwise(), (s1 + v1.x, s1 + v1.y)) - self.assertEqual(s1 - v1.elementwise(), (s1 - v1.x, s1 - v1.y)) - self.assertEqual(s1 * v1.elementwise(), (s1 * v1.x, s1 * v1.y)) - self.assertEqual(s1 / v1.elementwise(), (s1 / v1.x, s1 / v1.y)) - self.assertEqual(s1 // v1.elementwise(), (s1 // v1.x, s1 // v1.y)) - self.assertEqual(s1 ** v1.elementwise(), (s1 ** v1.x, s1 ** v1.y)) - self.assertEqual(s1 % v1.elementwise(), (s1 % v1.x, s1 % v1.y)) - self.assertEqual(s1 < v1.elementwise(), s1 < v1.x and s1 < v1.y) - self.assertEqual(s1 > v1.elementwise(), s1 > v1.x and s1 > v1.y) - self.assertEqual(s1 == v1.elementwise(), s1 == v1.x and s1 == v1.y) - self.assertEqual(s1 != v1.elementwise(), s1 != v1.x and s1 != v1.y) - self.assertEqual(s1 <= v1.elementwise(), s1 <= v1.x and s1 <= v1.y) - self.assertEqual(s1 >= v1.elementwise(), s1 >= v1.x and s1 >= v1.y) - self.assertEqual(s1 != v1.elementwise(), s1 != v1.x and s1 != v1.y) - - # behaviour for "elementwise op vector" - self.assertEqual(type(v1.elementwise() * v2), type(v1)) - self.assertEqual(v1.elementwise() + v2, v1 + v2) - self.assertEqual(v1.elementwise() - v2, v1 - v2) - self.assertEqual(v1.elementwise() * v2, (v1.x * v2.x, v1.y * v2.y)) - self.assertEqual(v1.elementwise() / v2, (v1.x / v2.x, v1.y / v2.y)) - self.assertEqual(v1.elementwise() // v2, (v1.x // v2.x, v1.y // v2.y)) - self.assertEqual(v1.elementwise() ** v2, (v1.x ** v2.x, v1.y ** v2.y)) - self.assertEqual(v1.elementwise() % v2, (v1.x % v2.x, v1.y % v2.y)) - self.assertEqual(v1.elementwise() > v2, v1.x > v2.x and v1.y > v2.y) - self.assertEqual(v1.elementwise() < v2, v1.x < v2.x and v1.y < v2.y) - self.assertEqual(v1.elementwise() >= v2, v1.x >= v2.x and v1.y >= v2.y) - self.assertEqual(v1.elementwise() <= v2, v1.x <= v2.x and v1.y <= v2.y) - self.assertEqual(v1.elementwise() == v2, v1.x == v2.x and v1.y == v2.y) - self.assertEqual(v1.elementwise() != v2, v1.x != v2.x and v1.y != v2.y) - # behaviour for "vector op elementwise" - self.assertEqual(v2 + v1.elementwise(), v2 + v1) - self.assertEqual(v2 - v1.elementwise(), v2 - v1) - self.assertEqual(v2 * v1.elementwise(), (v2.x * v1.x, v2.y * v1.y)) - self.assertEqual(v2 / v1.elementwise(), (v2.x / v1.x, v2.y / v1.y)) - self.assertEqual(v2 // v1.elementwise(), (v2.x // v1.x, v2.y // v1.y)) - self.assertEqual(v2 ** v1.elementwise(), (v2.x ** v1.x, v2.y ** v1.y)) - self.assertEqual(v2 % v1.elementwise(), (v2.x % v1.x, v2.y % v1.y)) - self.assertEqual(v2 < v1.elementwise(), v2.x < v1.x and v2.y < v1.y) - self.assertEqual(v2 > v1.elementwise(), v2.x > v1.x and v2.y > v1.y) - self.assertEqual(v2 <= v1.elementwise(), v2.x <= v1.x and v2.y <= v1.y) - self.assertEqual(v2 >= v1.elementwise(), v2.x >= v1.x and v2.y >= v1.y) - self.assertEqual(v2 == v1.elementwise(), v2.x == v1.x and v2.y == v1.y) - self.assertEqual(v2 != v1.elementwise(), v2.x != v1.x and v2.y != v1.y) - - # behaviour for "elementwise op elementwise" - self.assertEqual(v2.elementwise() + v1.elementwise(), v2 + v1) - self.assertEqual(v2.elementwise() - v1.elementwise(), v2 - v1) - self.assertEqual(v2.elementwise() * v1.elementwise(), (v2.x * v1.x, v2.y * v1.y)) - self.assertEqual(v2.elementwise() / v1.elementwise(), (v2.x / v1.x, v2.y / v1.y)) - self.assertEqual(v2.elementwise() // v1.elementwise(), (v2.x // v1.x, v2.y // v1.y)) - self.assertEqual(v2.elementwise() ** v1.elementwise(), (v2.x ** v1.x, v2.y ** v1.y)) - self.assertEqual(v2.elementwise() % v1.elementwise(), (v2.x % v1.x, v2.y % v1.y)) - self.assertEqual(v2.elementwise() < v1.elementwise(), v2.x < v1.x and v2.y < v1.y) - self.assertEqual(v2.elementwise() > v1.elementwise(), v2.x > v1.x and v2.y > v1.y) - self.assertEqual(v2.elementwise() <= v1.elementwise(), v2.x <= v1.x and v2.y <= v1.y) - self.assertEqual(v2.elementwise() >= v1.elementwise(), v2.x >= v1.x and v2.y >= v1.y) - self.assertEqual(v2.elementwise() == v1.elementwise(), v2.x == v1.x and v2.y == v1.y) - self.assertEqual(v2.elementwise() != v1.elementwise(), v2.x != v1.x and v2.y != v1.y) - - # other behaviour - self.assertEqual(abs(v1.elementwise()), (abs(v1.x), abs(v1.y))) - self.assertEqual(-v1.elementwise(), -v1) - self.assertEqual(+v1.elementwise(), +v1) - self.assertEqual(bool(v1.elementwise()), bool(v1)) - self.assertEqual(bool(Vector2().elementwise()), bool(Vector2())) - self.assertEqual(self.zeroVec.elementwise() ** 0, (1, 1)) - self.assertRaises(ValueError, lambda : pow(Vector2(-1, 0).elementwise(), 1.2)) - self.assertRaises(ZeroDivisionError, lambda : self.zeroVec.elementwise() ** -1) - self.assertRaises(ZeroDivisionError, lambda : self.zeroVec.elementwise() ** -1) - self.assertRaises(ZeroDivisionError, lambda : Vector2(1,1).elementwise() / 0) - self.assertRaises(ZeroDivisionError, lambda : Vector2(1,1).elementwise() // 0) - self.assertRaises(ZeroDivisionError, lambda : Vector2(1,1).elementwise() % 0) - self.assertRaises(ZeroDivisionError, lambda : Vector2(1,1).elementwise() / self.zeroVec) - self.assertRaises(ZeroDivisionError, lambda : Vector2(1,1).elementwise() // self.zeroVec) - self.assertRaises(ZeroDivisionError, lambda : Vector2(1,1).elementwise() % self.zeroVec) - self.assertRaises(ZeroDivisionError, lambda : 2 / self.zeroVec.elementwise()) - self.assertRaises(ZeroDivisionError, lambda : 2 // self.zeroVec.elementwise()) - self.assertRaises(ZeroDivisionError, lambda : 2 % self.zeroVec.elementwise()) - - def test_slerp(self): - self.assertRaises(ValueError, lambda : self.zeroVec.slerp(self.v1, .5)) - self.assertRaises(ValueError, lambda : self.v1.slerp(self.zeroVec, .5)) - self.assertRaises(ValueError, - lambda : self.zeroVec.slerp(self.zeroVec, .5)) - v1 = Vector2(1, 0) - v2 = Vector2(0, 1) - steps = 10 - angle_step = v1.angle_to(v2) / steps - for i, u in ((i, v1.slerp(v2, i/float(steps))) for i in range(steps+1)): - self.assertAlmostEqual(u.length(), 1) - self.assertAlmostEqual(v1.angle_to(u), i * angle_step) - self.assertEqual(u, v2) - - v1 = Vector2(100, 0) - v2 = Vector2(0, 10) - radial_factor = v2.length() / v1.length() - for i, u in ((i, v1.slerp(v2, -i/float(steps))) for i in range(steps+1)): - self.assertAlmostEqual(u.length(), (v2.length() - v1.length()) * (float(i)/steps) + v1.length()) - self.assertEqual(u, v2) - self.assertEqual(v1.slerp(v1, .5), v1) - self.assertEqual(v2.slerp(v2, .5), v2) - self.assertRaises(ValueError, lambda : v1.slerp(-v1, 0.5)) - - def test_lerp(self): - v1 = Vector2(0, 0) - v2 = Vector2(10, 10) - self.assertEqual(v1.lerp(v2, 0.5), (5, 5)) - self.assertRaises(ValueError, lambda : v1.lerp(v2, 2.5)) - - v1 = Vector2(-10, -5) - v2 = Vector2(10, 10) - self.assertEqual(v1.lerp(v2, 0.5), (0, 2.5)) - - def test_polar(self): - v = Vector2() - v.from_polar(self.v1.as_polar()) - self.assertEqual(self.v1, v) - self.assertEqual(self.e1.as_polar(), (1, 0)) - self.assertEqual(self.e2.as_polar(), (1, 90)) - self.assertEqual((2 * self.e2).as_polar(), (2, 90)) - self.assertRaises(TypeError, lambda : v.from_polar((None, None))) - self.assertRaises(TypeError, lambda : v.from_polar("ab")) - self.assertRaises(TypeError, lambda : v.from_polar((None, 1))) - self.assertRaises(TypeError, lambda : v.from_polar((1, 2, 3))) - self.assertRaises(TypeError, lambda : v.from_polar((1,))) - self.assertRaises(TypeError, lambda : v.from_polar(1, 2)) - v.from_polar((.5, 90)) - self.assertEqual(v, .5 * self.e2) - v.from_polar((1, 0)) - self.assertEqual(v, self.e1) - - def test_subclass_operation(self): - class Vector(pygame.math.Vector2): - pass - - vec = Vector() - - vec_a = Vector(2, 0) - vec_b = Vector(0, 1) - - vec_a + vec_b - vec_a *= 2 - - - -class Vector3TypeTest(unittest.TestCase): - - def setUp(self): - self.zeroVec = Vector3() - self.e1 = Vector3(1, 0, 0) - self.e2 = Vector3(0, 1, 0) - self.e3 = Vector3(0, 0, 1) - self.t1 = (1.2, 3.4, 9.6) - self.l1 = list(self.t1) - self.v1 = Vector3(self.t1) - self.t2 = (5.6, 7.8, 2.1) - self.l2 = list(self.t2) - self.v2 = Vector3(self.t2) - self.s1 = 5.6 - self.s2 = 7.8 - - def testConstructionDefault(self): - v = Vector3() - self.assertEqual(v.x, 0.) - self.assertEqual(v.y, 0.) - self.assertEqual(v.z, 0.) - - def testConstructionXYZ(self): - v = Vector3(1.2, 3.4, 9.6) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - self.assertEqual(v.z, 9.6) - - def testConstructionTuple(self): - v = Vector3((1.2, 3.4, 9.6)) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - self.assertEqual(v.z, 9.6) - - def testConstructionList(self): - v = Vector3([1.2, 3.4, -9.6]) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - self.assertEqual(v.z, -9.6) - - def testConstructionVector3(self): - v = Vector3(Vector3(1.2, 3.4, -9.6)) - self.assertEqual(v.x, 1.2) - self.assertEqual(v.y, 3.4) - self.assertEqual(v.z, -9.6) - - def testConstructionScalar(self): - v = Vector3(1) - self.assertEqual(v.x, 1.) - self.assertEqual(v.y, 1.) - self.assertEqual(v.z, 1.) - - def testConstructionScalarKeywords(self): - v = Vector3(x=1) - self.assertEqual(v.x, 1.) - self.assertEqual(v.y, 1.) - self.assertEqual(v.z, 1.) - - def testConstructionKeywords(self): - v = Vector3(x=1, y=2, z=3) - self.assertEqual(v.x, 1.) - self.assertEqual(v.y, 2.) - self.assertEqual(v.z, 3.) - - def testConstructionMissing(self): - def assign_missing_value(): - v = Vector3(1, 2) - self.assertRaises(ValueError, assign_missing_value) - - def assign_missing_value(): - v = Vector3(x=1, y=2) - self.assertRaises(ValueError, assign_missing_value) - - def testAttributAccess(self): - tmp = self.v1.x - self.assertEqual(tmp, self.v1.x) - self.assertEqual(tmp, self.v1[0]) - tmp = self.v1.y - self.assertEqual(tmp, self.v1.y) - self.assertEqual(tmp, self.v1[1]) - tmp = self.v1.z - self.assertEqual(tmp, self.v1.z) - self.assertEqual(tmp, self.v1[2]) - self.v1.x = 3.141 - self.assertEqual(self.v1.x, 3.141) - self.v1.y = 3.141 - self.assertEqual(self.v1.y, 3.141) - self.v1.z = 3.141 - self.assertEqual(self.v1.z, 3.141) - def assign_nonfloat(): - v = Vector2() - v.x = "spam" - self.assertRaises(TypeError, assign_nonfloat) - - def testSequence(self): - v = Vector3(1.2, 3.4, -9.6) - self.assertEqual(len(v), 3) - self.assertEqual(v[0], 1.2) - self.assertEqual(v[1], 3.4) - self.assertEqual(v[2], -9.6) - self.assertRaises(IndexError, lambda : v[3]) - self.assertEqual(v[-1], -9.6) - self.assertEqual(v[-2], 3.4) - self.assertEqual(v[-3], 1.2) - self.assertRaises(IndexError, lambda : v[-4]) - self.assertEqual(v[:], [1.2, 3.4, -9.6]) - self.assertEqual(v[1:], [3.4, -9.6]) - self.assertEqual(v[:1], [1.2]) - self.assertEqual(v[:-1], [1.2, 3.4]) - self.assertEqual(v[1:2], [3.4]) - self.assertEqual(list(v), [1.2, 3.4, -9.6]) - self.assertEqual(tuple(v), (1.2, 3.4, -9.6)) - v[0] = 5.6 - v[1] = 7.8 - v[2] = -2.1 - self.assertEqual(v.x, 5.6) - self.assertEqual(v.y, 7.8) - self.assertEqual(v.z, -2.1) - v[:] = [9.1, 11.12, -13.41] - self.assertEqual(v.x, 9.1) - self.assertEqual(v.y, 11.12) - self.assertEqual(v.z, -13.41) - def overpopulate(): - v = Vector3() - v[:] = [1, 2, 3, 4] - self.assertRaises(ValueError, overpopulate) - def underpopulate(): - v = Vector3() - v[:] = [1] - self.assertRaises(ValueError, underpopulate) - def assign_nonfloat(): - v = Vector2() - v[0] = "spam" - self.assertRaises(TypeError, assign_nonfloat) - - def testExtendedSlicing(self): - # deletion - def delSlice(vec, start=None, stop=None, step=None): - if start is not None and stop is not None and step is not None: - del vec[start:stop:step] - elif start is not None and stop is None and step is not None: - del vec[start::step] - elif start is None and stop is None and step is not None: - del vec[::step] - v = Vector3(self.v1) - self.assertRaises(TypeError, delSlice, v, None, None, 2) - self.assertRaises(TypeError, delSlice, v, 1, None, 2) - self.assertRaises(TypeError, delSlice, v, 1, 2, 1) - - # assignment - v = Vector3(self.v1) - v[::2] = [-1.1, -2.2] - self.assertEqual(v, [-1.1, self.v1.y, -2.2]) - v = Vector3(self.v1) - v[::-2] = [10, 20] - self.assertEqual(v, [20, self.v1.y, 10]) - v = Vector3(self.v1) - v[::-1] = v - self.assertEqual(v, [self.v1.z, self.v1.y, self.v1.x]) - a = Vector3(self.v1) - b = Vector3(self.v1) - c = Vector3(self.v1) - a[1:2] = [2.2] - b[slice(1,2)] = [2.2] - c[1:2:] = (2.2,) - self.assertEqual(a, b) - self.assertEqual(a, c) - self.assertEqual(type(a), type(self.v1)) - self.assertEqual(type(b), type(self.v1)) - self.assertEqual(type(c), type(self.v1)) - - def testAdd(self): - v3 = self.v1 + self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x + self.v2.x) - self.assertEqual(v3.y, self.v1.y + self.v2.y) - self.assertEqual(v3.z, self.v1.z + self.v2.z) - v3 = self.v1 + self.t2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x + self.t2[0]) - self.assertEqual(v3.y, self.v1.y + self.t2[1]) - self.assertEqual(v3.z, self.v1.z + self.t2[2]) - v3 = self.v1 + self.l2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x + self.l2[0]) - self.assertEqual(v3.y, self.v1.y + self.l2[1]) - self.assertEqual(v3.z, self.v1.z + self.l2[2]) - v3 = self.t1 + self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.t1[0] + self.v2.x) - self.assertEqual(v3.y, self.t1[1] + self.v2.y) - self.assertEqual(v3.z, self.t1[2] + self.v2.z) - v3 = self.l1 + self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.l1[0] + self.v2.x) - self.assertEqual(v3.y, self.l1[1] + self.v2.y) - self.assertEqual(v3.z, self.l1[2] + self.v2.z) - - def testSub(self): - v3 = self.v1 - self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x - self.v2.x) - self.assertEqual(v3.y, self.v1.y - self.v2.y) - self.assertEqual(v3.z, self.v1.z - self.v2.z) - v3 = self.v1 - self.t2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x - self.t2[0]) - self.assertEqual(v3.y, self.v1.y - self.t2[1]) - self.assertEqual(v3.z, self.v1.z - self.t2[2]) - v3 = self.v1 - self.l2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.v1.x - self.l2[0]) - self.assertEqual(v3.y, self.v1.y - self.l2[1]) - self.assertEqual(v3.z, self.v1.z - self.l2[2]) - v3 = self.t1 - self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.t1[0] - self.v2.x) - self.assertEqual(v3.y, self.t1[1] - self.v2.y) - self.assertEqual(v3.z, self.t1[2] - self.v2.z) - v3 = self.l1 - self.v2 - self.assertTrue(isinstance(v3, type(self.v1))) - self.assertEqual(v3.x, self.l1[0] - self.v2.x) - self.assertEqual(v3.y, self.l1[1] - self.v2.y) - self.assertEqual(v3.z, self.l1[2] - self.v2.z) - - def testScalarMultiplication(self): - v = self.s1 * self.v1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, self.s1 * self.v1.x) - self.assertEqual(v.y, self.s1 * self.v1.y) - self.assertEqual(v.z, self.s1 * self.v1.z) - v = self.v1 * self.s2 - self.assertEqual(v.x, self.v1.x * self.s2) - self.assertEqual(v.y, self.v1.y * self.s2) - self.assertEqual(v.z, self.v1.z * self.s2) - - def testScalarDivision(self): - v = self.v1 / self.s1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertAlmostEqual(v.x, self.v1.x / self.s1) - self.assertAlmostEqual(v.y, self.v1.y / self.s1) - self.assertAlmostEqual(v.z, self.v1.z / self.s1) - v = self.v1 // self.s2 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, self.v1.x // self.s2) - self.assertEqual(v.y, self.v1.y // self.s2) - self.assertEqual(v.z, self.v1.z // self.s2) - - def testBool(self): - self.assertEqual(bool(self.zeroVec), False) - self.assertEqual(bool(self.v1), True) - self.assertTrue(not self.zeroVec) - self.assertTrue(self.v1) - - def testUnary(self): - v = +self.v1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, self.v1.x) - self.assertEqual(v.y, self.v1.y) - self.assertEqual(v.z, self.v1.z) - self.assertNotEqual(id(v), id(self.v1)) - v = -self.v1 - self.assertTrue(isinstance(v, type(self.v1))) - self.assertEqual(v.x, -self.v1.x) - self.assertEqual(v.y, -self.v1.y) - self.assertEqual(v.z, -self.v1.z) - self.assertNotEqual(id(v), id(self.v1)) - - def testCompare(self): - int_vec = Vector3(3, -2, 13) - flt_vec = Vector3(3.0, -2.0, 13.) - zero_vec = Vector3(0, 0, 0) - self.assertEqual(int_vec == flt_vec, True) - self.assertEqual(int_vec != flt_vec, False) - self.assertEqual(int_vec != zero_vec, True) - self.assertEqual(flt_vec == zero_vec, False) - self.assertEqual(int_vec == (3, -2, 13), True) - self.assertEqual(int_vec != (3, -2, 13), False) - self.assertEqual(int_vec != [0, 0], True) - self.assertEqual(int_vec == [0, 0], False) - self.assertEqual(int_vec != 5, True) - self.assertEqual(int_vec == 5, False) - self.assertEqual(int_vec != [3, -2, 0, 1], True) - self.assertEqual(int_vec == [3, -2, 0, 1], False) - - def testStr(self): - v = Vector3(1.2, 3.4, 5.6) - self.assertEqual(str(v), "[1.2, 3.4, 5.6]") - - def testRepr(self): - v = Vector3(1.2, 3.4, -9.6) - self.assertEqual(v.__repr__(), "") - self.assertEqual(v, Vector3(v.__repr__())) - - def testIter(self): - it = self.v1.__iter__() - if PY3: - next_ = it.__next__ - else: - next_ = it.next - self.assertEqual(next_(), self.v1[0]) - self.assertEqual(next_(), self.v1[1]) - self.assertEqual(next_(), self.v1[2]) - self.assertRaises(StopIteration, lambda : next_()) - it1 = self.v1.__iter__() - it2 = self.v1.__iter__() - self.assertNotEqual(id(it1), id(it2)) - self.assertEqual(id(it1), id(it1.__iter__())) - self.assertEqual(list(it1), list(it2)); - self.assertEqual(list(self.v1.__iter__()), self.l1) - idx = 0 - for val in self.v1: - self.assertEqual(val, self.v1[idx]) - idx += 1 - - def test_rotate(self): - v1 = Vector3(1, 0, 0) - axis = Vector3(0, 1, 0) - v2 = v1.rotate(90, axis) - v3 = v1.rotate(90 + 360, axis) - self.assertEqual(v1.x, 1) - self.assertEqual(v1.y, 0) - self.assertEqual(v1.z, 0) - self.assertEqual(v2.x, 0) - self.assertEqual(v2.y, 0) - self.assertEqual(v2.z, -1) - self.assertEqual(v3.x, v2.x) - self.assertEqual(v3.y, v2.y) - self.assertEqual(v3.z, v2.z) - v1 = Vector3(-1, -1, -1) - v2 = v1.rotate(-90, axis) - self.assertEqual(v2.x, 1) - self.assertEqual(v2.y, -1) - self.assertEqual(v2.z, -1) - v2 = v1.rotate(360, axis) - self.assertEqual(v1.x, v2.x) - self.assertEqual(v1.y, v2.y) - self.assertEqual(v1.z, v2.z) - v2 = v1.rotate(0, axis) - self.assertEqual(v1.x, v2.x) - self.assertEqual(v1.y, v2.y) - self.assertEqual(v1.z, v2.z) - # issue 214 - self.assertEqual(Vector3(0, 1, 0).rotate(359.9999999, Vector3(0, 0, 1)), - Vector3(0, 1, 0)) - - def test_rotate_ip(self): - v = Vector3(1, 0, 0) - axis = Vector3(0, 1, 0) - self.assertEqual(v.rotate_ip(90, axis), None) - self.assertEqual(v.x, 0) - self.assertEqual(v.y, 0) - self.assertEqual(v.z, -1) - v = Vector3(-1, -1, 1) - v.rotate_ip(-90, axis) - self.assertEqual(v.x, -1) - self.assertEqual(v.y, -1) - self.assertEqual(v.z, -1) - - def test_rotate_x(self): - v1 = Vector3(1, 0, 0) - v2 = v1.rotate_x(90) - v3 = v1.rotate_x(90 + 360) - self.assertEqual(v1.x, 1) - self.assertEqual(v1.y, 0) - self.assertEqual(v1.z, 0) - self.assertEqual(v2.x, 1) - self.assertEqual(v2.y, 0) - self.assertEqual(v2.z, 0) - self.assertEqual(v3.x, v2.x) - self.assertEqual(v3.y, v2.y) - self.assertEqual(v3.z, v2.z) - v1 = Vector3(-1, -1, -1) - v2 = v1.rotate_x(-90) - self.assertEqual(v2.x, -1) - self.assertAlmostEqual(v2.y, -1) - self.assertAlmostEqual(v2.z, 1) - v2 = v1.rotate_x(360) - self.assertAlmostEqual(v1.x, v2.x) - self.assertAlmostEqual(v1.y, v2.y) - self.assertAlmostEqual(v1.z, v2.z) - v2 = v1.rotate_x(0) - self.assertEqual(v1.x, v2.x) - self.assertAlmostEqual(v1.y, v2.y) - self.assertAlmostEqual(v1.z, v2.z) - - def test_rotate_x_ip(self): - v = Vector3(1, 0, 0) - self.assertEqual(v.rotate_x_ip(90), None) - self.assertEqual(v.x, 1) - self.assertEqual(v.y, 0) - self.assertEqual(v.z, 0) - v = Vector3(-1, -1, 1) - v.rotate_x_ip(-90) - self.assertEqual(v.x, -1) - self.assertAlmostEqual(v.y, 1) - self.assertAlmostEqual(v.z, 1) - - def test_rotate_y(self): - v1 = Vector3(1, 0, 0) - v2 = v1.rotate_y(90) - v3 = v1.rotate_y(90 + 360) - self.assertEqual(v1.x, 1) - self.assertEqual(v1.y, 0) - self.assertEqual(v1.z, 0) - self.assertAlmostEqual(v2.x, 0) - self.assertEqual(v2.y, 0) - self.assertAlmostEqual(v2.z, -1) - self.assertAlmostEqual(v3.x, v2.x) - self.assertEqual(v3.y, v2.y) - self.assertAlmostEqual(v3.z, v2.z) - v1 = Vector3(-1, -1, -1) - v2 = v1.rotate_y(-90) - self.assertAlmostEqual(v2.x, 1) - self.assertEqual(v2.y, -1) - self.assertAlmostEqual(v2.z, -1) - v2 = v1.rotate_y(360) - self.assertAlmostEqual(v1.x, v2.x) - self.assertEqual(v1.y, v2.y) - self.assertAlmostEqual(v1.z, v2.z) - v2 = v1.rotate_y(0) - self.assertEqual(v1.x, v2.x) - self.assertEqual(v1.y, v2.y) - self.assertEqual(v1.z, v2.z) - - def test_rotate_y_ip(self): - v = Vector3(1, 0, 0) - self.assertEqual(v.rotate_y_ip(90), None) - self.assertAlmostEqual(v.x, 0) - self.assertEqual(v.y, 0) - self.assertAlmostEqual(v.z, -1) - v = Vector3(-1, -1, 1) - v.rotate_y_ip(-90) - self.assertAlmostEqual(v.x, -1) - self.assertEqual(v.y, -1) - self.assertAlmostEqual(v.z, -1) - - def test_rotate_z(self): - v1 = Vector3(1, 0, 0) - v2 = v1.rotate_z(90) - v3 = v1.rotate_z(90 + 360) - self.assertEqual(v1.x, 1) - self.assertEqual(v1.y, 0) - self.assertEqual(v1.z, 0) - self.assertAlmostEqual(v2.x, 0) - self.assertAlmostEqual(v2.y, 1) - self.assertEqual(v2.z, 0) - self.assertAlmostEqual(v3.x, v2.x) - self.assertAlmostEqual(v3.y, v2.y) - self.assertEqual(v3.z, v2.z) - v1 = Vector3(-1, -1, -1) - v2 = v1.rotate_z(-90) - self.assertAlmostEqual(v2.x, -1) - self.assertAlmostEqual(v2.y, 1) - self.assertEqual(v2.z, -1) - v2 = v1.rotate_z(360) - self.assertAlmostEqual(v1.x, v2.x) - self.assertAlmostEqual(v1.y, v2.y) - self.assertEqual(v1.z, v2.z) - v2 = v1.rotate_z(0) - self.assertAlmostEqual(v1.x, v2.x) - self.assertAlmostEqual(v1.y, v2.y) - self.assertEqual(v1.z, v2.z) - - def test_rotate_z_ip(self): - v = Vector3(1, 0, 0) - self.assertEqual(v.rotate_z_ip(90), None) - self.assertAlmostEqual(v.x, 0) - self.assertAlmostEqual(v.y, 1) - self.assertEqual(v.z, 0) - v = Vector3(-1, -1, 1) - v.rotate_z_ip(-90) - self.assertAlmostEqual(v.x, -1) - self.assertAlmostEqual(v.y, 1) - self.assertEqual(v.z, 1) - - def test_normalize(self): - v = self.v1.normalize() - # length is 1 - self.assertAlmostEqual(v.x * v.x + v.y * v.y + v.z * v.z, 1.) - # v1 is unchanged - self.assertEqual(self.v1.x, self.l1[0]) - self.assertEqual(self.v1.y, self.l1[1]) - self.assertEqual(self.v1.z, self.l1[2]) - # v2 is paralell to v1 (tested via cross product) - cross = ((self.v1.y * v.z - self.v1.z * v.y) ** 2 + - (self.v1.z * v.x - self.v1.x * v.z) ** 2 + - (self.v1.x * v.y - self.v1.y * v.x) ** 2) - self.assertAlmostEqual(cross, 0.) - self.assertRaises(ValueError, lambda : self.zeroVec.normalize()) - - def test_normalize_ip(self): - v = +self.v1 - # v has length != 1 before normalizing - self.assertNotEqual(v.x * v.x + v.y * v.y + v.z * v.z, 1.) - # inplace operations should return None - self.assertEqual(v.normalize_ip(), None) - # length is 1 - self.assertAlmostEqual(v.x * v.x + v.y * v.y + v.z * v.z, 1.) - # v2 is paralell to v1 (tested via cross product) - cross = ((self.v1.y * v.z - self.v1.z * v.y) ** 2 + - (self.v1.z * v.x - self.v1.x * v.z) ** 2 + - (self.v1.x * v.y - self.v1.y * v.x) ** 2) - self.assertAlmostEqual(cross, 0.) - self.assertRaises(ValueError, lambda : self.zeroVec.normalize_ip()) - - def test_is_normalized(self): - self.assertEqual(self.v1.is_normalized(), False) - v = self.v1.normalize() - self.assertEqual(v.is_normalized(), True) - self.assertEqual(self.e2.is_normalized(), True) - self.assertEqual(self.zeroVec.is_normalized(), False) - - def test_cross(self): - def cross(a, b): - return Vector3(a[1] * b[2] - a[2] * b[1], - a[2] * b[0] - a[0] * b[2], - a[0] * b[1] - a[1] * b[0]) - self.assertEqual(self.v1.cross(self.v2), cross(self.v1, self.v2)) - self.assertEqual(self.v1.cross(self.l2), cross(self.v1, self.l2)) - self.assertEqual(self.v1.cross(self.t2), cross(self.v1, self.t2)) - self.assertEqual(self.v1.cross(self.v2), -self.v2.cross(self.v1)) - self.assertEqual(self.v1.cross(self.v1), self.zeroVec) - - def test_dot(self): - self.assertAlmostEqual(self.v1.dot(self.v2), - self.v1.x * self.v2.x + self.v1.y * self.v2.y + self.v1.z * self.v2.z) - self.assertAlmostEqual(self.v1.dot(self.l2), - self.v1.x * self.l2[0] + self.v1.y * self.l2[1] + self.v1.z * self.l2[2]) - self.assertAlmostEqual(self.v1.dot(self.t2), - self.v1.x * self.t2[0] + self.v1.y * self.t2[1] + self.v1.z * self.t2[2]) - self.assertAlmostEqual(self.v1.dot(self.v2), self.v2.dot(self.v1)) - self.assertAlmostEqual(self.v1.dot(self.v2), self.v1 * self.v2) - - def test_angle_to(self): - self.assertEqual(Vector3(1, 1, 0).angle_to((-1, 1, 0)), 90) - self.assertEqual(Vector3(1, 0, 0).angle_to((0, 0, -1)), 90) - self.assertEqual(Vector3(1, 0, 0).angle_to((-1, 0, 1)), 135) - self.assertEqual(abs(Vector3(1, 0, 1).angle_to((-1, 0, -1))), 180) - # if we rotate v1 by the angle_to v2 around their cross product - # we should look in the same direction - self.assertEqual(self.v1.rotate(self.v1.angle_to(self.v2), self.v1.cross(self.v2)).normalize(), - self.v2.normalize()) - - def test_scale_to_length(self): - v = Vector3(1, 1, 1) - v.scale_to_length(2.5) - self.assertEqual(v, Vector3(2.5, 2.5, 2.5) / math.sqrt(3)) - self.assertRaises(ValueError, lambda : self.zeroVec.scale_to_length(1)) - self.assertEqual(v.scale_to_length(0), None) - self.assertEqual(v, self.zeroVec) - - def test_length(self): - self.assertEqual(Vector3(3, 4, 5).length(), math.sqrt(3 * 3 + 4 * 4 + 5 * 5)) - self.assertEqual(Vector3(-3, 4, 5).length(), math.sqrt(-3 * -3 + 4 * 4 + 5 * 5)) - self.assertEqual(self.zeroVec.length(), 0) - - def test_length_squared(self): - self.assertEqual(Vector3(3, 4, 5).length_squared(), 3 * 3 + 4 * 4 + 5 * 5) - self.assertEqual(Vector3(-3, 4, 5).length_squared(), -3 * -3 + 4 * 4 + 5 * 5) - self.assertEqual(self.zeroVec.length_squared(), 0) - - def test_reflect(self): - v = Vector3(1, -1, 1) - n = Vector3(0, 1, 0) - self.assertEqual(v.reflect(n), Vector3(1, 1, 1)) - self.assertEqual(v.reflect(3*n), v.reflect(n)) - self.assertEqual(v.reflect(-v), -v) - self.assertRaises(ValueError, lambda : v.reflect(self.zeroVec)) - - def test_reflect_ip(self): - v1 = Vector3(1, -1, 1) - v2 = Vector3(v1) - n = Vector3(0, 1, 0) - self.assertEqual(v2.reflect_ip(n), None) - self.assertEqual(v2, Vector3(1, 1, 1)) - v2 = Vector3(v1) - v2.reflect_ip(3*n) - self.assertEqual(v2, v1.reflect(n)) - v2 = Vector3(v1) - v2.reflect_ip(-v1) - self.assertEqual(v2, -v1) - self.assertRaises(ValueError, lambda : v2.reflect_ip(self.zeroVec)) - - def test_distance_to(self): - diff = self.v1 - self.v2 - self.assertEqual(self.e1.distance_to(self.e2), math.sqrt(2)) - self.assertEqual(self.v1.distance_to(self.v2), - math.sqrt(diff.x * diff.x + diff.y * diff.y + diff.z * diff.z)) - self.assertEqual(self.v1.distance_to(self.v1), 0) - self.assertEqual(self.v1.distance_to(self.v2), - self.v2.distance_to(self.v1)) - - def test_distance_squared_to(self): - diff = self.v1 - self.v2 - self.assertEqual(self.e1.distance_squared_to(self.e2), 2) - self.assertAlmostEqual(self.v1.distance_squared_to(self.v2), - diff.x * diff.x + diff.y * diff.y + diff.z * diff.z) - self.assertEqual(self.v1.distance_squared_to(self.v1), 0) - self.assertEqual(self.v1.distance_squared_to(self.v2), - self.v2.distance_squared_to(self.v1)) - - def test_swizzle(self): - self.assertTrue(hasattr(pygame.math, "enable_swizzling")) - self.assertTrue(hasattr(pygame.math, "disable_swizzling")) - # swizzling enabled by default - pygame.math.disable_swizzling() - self.assertRaises(AttributeError, lambda : self.v1.yx) - pygame.math.enable_swizzling() - - self.assertEqual(self.v1.yxz, (self.v1.y, self.v1.x, self.v1.z)) - self.assertEqual(self.v1.xxyyzzxyz, (self.v1.x, self.v1.x, self.v1.y, - self.v1.y, self.v1.z, self.v1.z, - self.v1.x, self.v1.y, self.v1.z)) - self.v1.xyz = self.t2 - self.assertEqual(self.v1, self.t2) - self.v1.zxy = self.t2 - self.assertEqual(self.v1, (self.t2[1], self.t2[2], self.t2[0])) - self.v1.yz = self.t2[:2] - self.assertEqual(self.v1, (self.t2[1], self.t2[0], self.t2[1])) - self.assertEqual(type(self.v1), Vector3) - - @unittest.skipIf(IS_PYPY, "known pypy failure") - def test_invalid_swizzle(self): - def invalidSwizzleX(): - Vector3().xx = (1, 2) - def invalidSwizzleY(): - Vector3().yy = (1, 2) - def invalidSwizzleZ(): - Vector3().zz = (1, 2) - def invalidSwizzleW(): - Vector3().ww = (1, 2) - self.assertRaises(AttributeError, invalidSwizzleX) - self.assertRaises(AttributeError, invalidSwizzleY) - self.assertRaises(AttributeError, invalidSwizzleZ) - self.assertRaises(AttributeError, invalidSwizzleW) - def invalidAssignment(): - Vector3().xy = 3 - self.assertRaises(TypeError, invalidAssignment) - - def test_swizzle_return_types(self): - self.assertEqual(type(self.v1.x), float) - self.assertEqual(type(self.v1.xy), Vector2) - self.assertEqual(type(self.v1.xyz), Vector3) - # but we don't have vector4 or above... so tuple. - self.assertEqual(type(self.v1.xyxy), tuple) - self.assertEqual(type(self.v1.xyxyx), tuple) - - def test_dir_works(self): - # not every single one of the attributes... - attributes = set(['lerp', 'normalize', 'normalize_ip', 'reflect', 'slerp', 'x', 'y']) - # check if this selection of attributes are all there. - self.assertTrue(attributes.issubset(set(dir(self.v1)))) - - def test_elementwise(self): - # behaviour for "elementwise op scalar" - self.assertEqual(self.v1.elementwise() + self.s1, - (self.v1.x + self.s1, self.v1.y + self.s1, self.v1.z + self.s1)) - self.assertEqual(self.v1.elementwise() - self.s1, - (self.v1.x - self.s1, self.v1.y - self.s1, self.v1.z - self.s1)) - self.assertEqual(self.v1.elementwise() * self.s2, - (self.v1.x * self.s2, self.v1.y * self.s2, self.v1.z * self.s2)) - self.assertEqual(self.v1.elementwise() / self.s2, - (self.v1.x / self.s2, self.v1.y / self.s2, self.v1.z / self.s2)) - self.assertEqual(self.v1.elementwise() // self.s1, - (self.v1.x // self.s1, self.v1.y // self.s1, self.v1.z // self.s1)) - self.assertEqual(self.v1.elementwise() ** self.s1, - (self.v1.x ** self.s1, self.v1.y ** self.s1, self.v1.z ** self.s1)) - self.assertEqual(self.v1.elementwise() % self.s1, - (self.v1.x % self.s1, self.v1.y % self.s1, self.v1.z % self.s1)) - self.assertEqual(self.v1.elementwise() > self.s1, - self.v1.x > self.s1 and self.v1.y > self.s1 and self.v1.z > self.s1) - self.assertEqual(self.v1.elementwise() < self.s1, - self.v1.x < self.s1 and self.v1.y < self.s1 and self.v1.z < self.s1) - self.assertEqual(self.v1.elementwise() == self.s1, - self.v1.x == self.s1 and self.v1.y == self.s1 and self.v1.z == self.s1) - self.assertEqual(self.v1.elementwise() != self.s1, - self.v1.x != self.s1 and self.v1.y != self.s1 and self.v1.z != self.s1) - self.assertEqual(self.v1.elementwise() >= self.s1, - self.v1.x >= self.s1 and self.v1.y >= self.s1 and self.v1.z >= self.s1) - self.assertEqual(self.v1.elementwise() <= self.s1, - self.v1.x <= self.s1 and self.v1.y <= self.s1 and self.v1.z <= self.s1) - # behaviour for "scalar op elementwise" - self.assertEqual(5 + self.v1.elementwise(), Vector3(5, 5, 5) + self.v1) - self.assertEqual(3.5 - self.v1.elementwise(), Vector3(3.5, 3.5, 3.5) - self.v1) - self.assertEqual(7.5 * self.v1.elementwise() , 7.5 * self.v1) - self.assertEqual(-3.5 / self.v1.elementwise(), (-3.5 / self.v1.x, -3.5 / self.v1.y, -3.5 / self.v1.z)) - self.assertEqual(-3.5 // self.v1.elementwise(), (-3.5 // self.v1.x, -3.5 // self.v1.y, -3.5 // self.v1.z)) - self.assertEqual(-3.5 ** self.v1.elementwise(), (-3.5 ** self.v1.x, -3.5 ** self.v1.y, -3.5 ** self.v1.z)) - self.assertEqual(3 % self.v1.elementwise(), (3 % self.v1.x, 3 % self.v1.y, 3 % self.v1.z)) - self.assertEqual(2 < self.v1.elementwise(), 2 < self.v1.x and 2 < self.v1.y and 2 < self.v1.z) - self.assertEqual(2 > self.v1.elementwise(), 2 > self.v1.x and 2 > self.v1.y and 2 > self.v1.z) - self.assertEqual(1 == self.v1.elementwise(), 1 == self.v1.x and 1 == self.v1.y and 1 == self.v1.z) - self.assertEqual(1 != self.v1.elementwise(), 1 != self.v1.x and 1 != self.v1.y and 1 != self.v1.z) - self.assertEqual(2 <= self.v1.elementwise(), 2 <= self.v1.x and 2 <= self.v1.y and 2 <= self.v1.z) - self.assertEqual(-7 >= self.v1.elementwise(), -7 >= self.v1.x and -7 >= self.v1.y and -7 >= self.v1.z) - self.assertEqual(-7 != self.v1.elementwise(), -7 != self.v1.x and -7 != self.v1.y and -7 != self.v1.z) - - # behaviour for "elementwise op vector" - self.assertEqual(type(self.v1.elementwise() * self.v2), type(self.v1)) - self.assertEqual(self.v1.elementwise() + self.v2, self.v1 + self.v2) - self.assertEqual(self.v1.elementwise() + self.v2, self.v1 + self.v2) - self.assertEqual(self.v1.elementwise() - self.v2, self.v1 - self.v2) - self.assertEqual(self.v1.elementwise() * self.v2, (self.v1.x * self.v2.x, self.v1.y * self.v2.y, self.v1.z * self.v2.z)) - self.assertEqual(self.v1.elementwise() / self.v2, (self.v1.x / self.v2.x, self.v1.y / self.v2.y, self.v1.z / self.v2.z)) - self.assertEqual(self.v1.elementwise() // self.v2, (self.v1.x // self.v2.x, self.v1.y // self.v2.y, self.v1.z // self.v2.z)) - self.assertEqual(self.v1.elementwise() ** self.v2, (self.v1.x ** self.v2.x, self.v1.y ** self.v2.y, self.v1.z ** self.v2.z)) - self.assertEqual(self.v1.elementwise() % self.v2, (self.v1.x % self.v2.x, self.v1.y % self.v2.y, self.v1.z % self.v2.z)) - self.assertEqual(self.v1.elementwise() > self.v2, self.v1.x > self.v2.x and self.v1.y > self.v2.y and self.v1.z > self.v2.z) - self.assertEqual(self.v1.elementwise() < self.v2, self.v1.x < self.v2.x and self.v1.y < self.v2.y and self.v1.z < self.v2.z) - self.assertEqual(self.v1.elementwise() >= self.v2, self.v1.x >= self.v2.x and self.v1.y >= self.v2.y and self.v1.z >= self.v2.z) - self.assertEqual(self.v1.elementwise() <= self.v2, self.v1.x <= self.v2.x and self.v1.y <= self.v2.y and self.v1.z <= self.v2.z) - self.assertEqual(self.v1.elementwise() == self.v2, self.v1.x == self.v2.x and self.v1.y == self.v2.y and self.v1.z == self.v2.z) - self.assertEqual(self.v1.elementwise() != self.v2, self.v1.x != self.v2.x and self.v1.y != self.v2.y and self.v1.z != self.v2.z) - # behaviour for "vector op elementwise" - self.assertEqual(self.v2 + self.v1.elementwise(), self.v2 + self.v1) - self.assertEqual(self.v2 - self.v1.elementwise(), self.v2 - self.v1) - self.assertEqual(self.v2 * self.v1.elementwise(), (self.v2.x * self.v1.x, self.v2.y * self.v1.y, self.v2.z * self.v1.z)) - self.assertEqual(self.v2 / self.v1.elementwise(), (self.v2.x / self.v1.x, self.v2.y / self.v1.y, self.v2.z / self.v1.z)) - self.assertEqual(self.v2 // self.v1.elementwise(), (self.v2.x // self.v1.x, self.v2.y // self.v1.y, self.v2.z // self.v1.z)) - self.assertEqual(self.v2 ** self.v1.elementwise(), (self.v2.x ** self.v1.x, self.v2.y ** self.v1.y, self.v2.z ** self.v1.z)) - self.assertEqual(self.v2 % self.v1.elementwise(), (self.v2.x % self.v1.x, self.v2.y % self.v1.y, self.v2.z % self.v1.z)) - self.assertEqual(self.v2 < self.v1.elementwise(), self.v2.x < self.v1.x and self.v2.y < self.v1.y and self.v2.z < self.v1.z) - self.assertEqual(self.v2 > self.v1.elementwise(), self.v2.x > self.v1.x and self.v2.y > self.v1.y and self.v2.z > self.v1.z) - self.assertEqual(self.v2 <= self.v1.elementwise(), self.v2.x <= self.v1.x and self.v2.y <= self.v1.y and self.v2.z <= self.v1.z) - self.assertEqual(self.v2 >= self.v1.elementwise(), self.v2.x >= self.v1.x and self.v2.y >= self.v1.y and self.v2.z >= self.v1.z) - self.assertEqual(self.v2 == self.v1.elementwise(), self.v2.x == self.v1.x and self.v2.y == self.v1.y and self.v2.z == self.v1.z) - self.assertEqual(self.v2 != self.v1.elementwise(), self.v2.x != self.v1.x and self.v2.y != self.v1.y and self.v2.z != self.v1.z) - - # behaviour for "elementwise op elementwise" - self.assertEqual(self.v2.elementwise() + self.v1.elementwise(), self.v2 + self.v1) - self.assertEqual(self.v2.elementwise() - self.v1.elementwise(), self.v2 - self.v1) - self.assertEqual(self.v2.elementwise() * self.v1.elementwise(), - (self.v2.x * self.v1.x, self.v2.y * self.v1.y, self.v2.z * self.v1.z)) - self.assertEqual(self.v2.elementwise() / self.v1.elementwise(), - (self.v2.x / self.v1.x, self.v2.y / self.v1.y, self.v2.z / self.v1.z)) - self.assertEqual(self.v2.elementwise() // self.v1.elementwise(), - (self.v2.x // self.v1.x, self.v2.y // self.v1.y, self.v2.z // self.v1.z)) - self.assertEqual(self.v2.elementwise() ** self.v1.elementwise(), - (self.v2.x ** self.v1.x, self.v2.y ** self.v1.y, self.v2.z ** self.v1.z)) - self.assertEqual(self.v2.elementwise() % self.v1.elementwise(), - (self.v2.x % self.v1.x, self.v2.y % self.v1.y, self.v2.z % self.v1.z)) - self.assertEqual(self.v2.elementwise() < self.v1.elementwise(), - self.v2.x < self.v1.x and self.v2.y < self.v1.y and self.v2.z < self.v1.z) - self.assertEqual(self.v2.elementwise() > self.v1.elementwise(), - self.v2.x > self.v1.x and self.v2.y > self.v1.y and self.v2.z > self.v1.z) - self.assertEqual(self.v2.elementwise() <= self.v1.elementwise(), - self.v2.x <= self.v1.x and self.v2.y <= self.v1.y and self.v2.z <= self.v1.z) - self.assertEqual(self.v2.elementwise() >= self.v1.elementwise(), - self.v2.x >= self.v1.x and self.v2.y >= self.v1.y and self.v2.z >= self.v1.z) - self.assertEqual(self.v2.elementwise() == self.v1.elementwise(), - self.v2.x == self.v1.x and self.v2.y == self.v1.y and self.v2.z == self.v1.z) - self.assertEqual(self.v2.elementwise() != self.v1.elementwise(), - self.v2.x != self.v1.x and self.v2.y != self.v1.y and self.v2.z != self.v1.z) - - # other behaviour - self.assertEqual(abs(self.v1.elementwise()), (abs(self.v1.x), abs(self.v1.y), abs(self.v1.z))) - self.assertEqual(-self.v1.elementwise(), -self.v1) - self.assertEqual(+self.v1.elementwise(), +self.v1) - self.assertEqual(bool(self.v1.elementwise()), bool(self.v1)) - self.assertEqual(bool(Vector3().elementwise()), bool(Vector3())) - self.assertEqual(self.zeroVec.elementwise() ** 0, (1, 1, 1)) - self.assertRaises(ValueError, lambda : pow(Vector3(-1, 0, 0).elementwise(), 1.2)) - self.assertRaises(ZeroDivisionError, lambda : self.zeroVec.elementwise() ** -1) - self.assertRaises(ZeroDivisionError, lambda : Vector3(1,1,1).elementwise() / 0) - self.assertRaises(ZeroDivisionError, lambda : Vector3(1,1,1).elementwise() // 0) - self.assertRaises(ZeroDivisionError, lambda : Vector3(1,1,1).elementwise() % 0) - self.assertRaises(ZeroDivisionError, lambda : Vector3(1,1,1).elementwise() / self.zeroVec) - self.assertRaises(ZeroDivisionError, lambda : Vector3(1,1,1).elementwise() // self.zeroVec) - self.assertRaises(ZeroDivisionError, lambda : Vector3(1,1,1).elementwise() % self.zeroVec) - self.assertRaises(ZeroDivisionError, lambda : 2 / self.zeroVec.elementwise()) - self.assertRaises(ZeroDivisionError, lambda : 2 // self.zeroVec.elementwise()) - self.assertRaises(ZeroDivisionError, lambda : 2 % self.zeroVec.elementwise()) - - - def test_slerp(self): - self.assertRaises(ValueError, lambda : self.zeroVec.slerp(self.v1, .5)) - self.assertRaises(ValueError, lambda : self.v1.slerp(self.zeroVec, .5)) - self.assertRaises(ValueError, - lambda : self.zeroVec.slerp(self.zeroVec, .5)) - steps = 10 - angle_step = self.e1.angle_to(self.e2) / steps - for i, u in ((i, self.e1.slerp(self.e2, i/float(steps))) for i in range(steps+1)): - self.assertAlmostEqual(u.length(), 1) - self.assertAlmostEqual(self.e1.angle_to(u), i * angle_step) - self.assertEqual(u, self.e2) - - v1 = Vector3(100, 0, 0) - v2 = Vector3(0, 10, 7) - radial_factor = v2.length() / v1.length() - for i, u in ((i, v1.slerp(v2, -i/float(steps))) for i in range(steps+1)): - self.assertAlmostEqual(u.length(), (v2.length() - v1.length()) * (float(i)/steps) + v1.length()) - self.assertEqual(u, v2) - self.assertEqual(v1.slerp(v1, .5), v1) - self.assertEqual(v2.slerp(v2, .5), v2) - self.assertRaises(ValueError, lambda : v1.slerp(-v1, 0.5)) - - def test_lerp(self): - v1 = Vector3(0, 0, 0) - v2 = Vector3(10, 10, 10) - self.assertEqual(v1.lerp(v2, 0.5), (5, 5, 5)) - self.assertRaises(ValueError, lambda : v1.lerp(v2, 2.5)) - - v1 = Vector3(-10, -5, -20) - v2 = Vector3(10, 10, -20) - self.assertEqual(v1.lerp(v2, 0.5), (0, 2.5, -20)) - - def test_spherical(self): - v = Vector3() - v.from_spherical(self.v1.as_spherical()) - self.assertEqual(self.v1, v) - self.assertEqual(self.e1.as_spherical(), (1, 90, 0)) - self.assertEqual(self.e2.as_spherical(), (1, 90, 90)) - self.assertEqual(self.e3.as_spherical(), (1, 0, 0)) - self.assertEqual((2 * self.e2).as_spherical(), (2, 90, 90)) - self.assertRaises(TypeError, lambda : v.from_spherical((None, None, None))) - self.assertRaises(TypeError, lambda : v.from_spherical("abc")) - self.assertRaises(TypeError, lambda : v.from_spherical((None, 1, 2))) - self.assertRaises(TypeError, lambda : v.from_spherical((1, 2, 3, 4))) - self.assertRaises(TypeError, lambda : v.from_spherical((1, 2))) - self.assertRaises(TypeError, lambda : v.from_spherical(1, 2, 3)) - v.from_spherical((.5, 90, 90)) - self.assertEqual(v, .5 * self.e2) - - def test_inplace_operators(self): - - v = Vector3(1,1,1) - v *= 2 - self.assertEqual(v, (2.0,2.0,2.0)) - - v = Vector3(4,4,4) - v /= 2 - self.assertEqual(v, (2.0,2.0,2.0)) - - - v = Vector3(3.0,3.0,3.0) - v -= (1,1,1) - self.assertEqual(v, (2.0,2.0,2.0)) - - v = Vector3(3.0,3.0,3.0) - v += (1,1,1) - self.assertEqual(v, (4.0,4.0,4.0)) - - def test_pickle(self): - import pickle - v2 = Vector2(1, 2) - v3 = Vector3(1, 2, 3) - self.assertEqual(pickle.loads(pickle.dumps(v2)), v2) - self.assertEqual(pickle.loads(pickle.dumps(v3)), v3) - - - def test_subclass_operation(self): - class Vector(pygame.math.Vector3): - pass - v = Vector(2.0, 2.0, 2.0) - v *= 2 - self.assertEqual(v, (4.0, 4.0, 4.0)) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/midi_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/midi_tags.py deleted file mode 100644 index c6c9454..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/midi_tags.py +++ /dev/null @@ -1 +0,0 @@ -__tags__ = ['interactive'] diff --git a/venv/lib/python3.7/site-packages/pygame/tests/midi_test.py b/venv/lib/python3.7/site-packages/pygame/tests/midi_test.py deleted file mode 100644 index b8d1b6a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/midi_test.py +++ /dev/null @@ -1,385 +0,0 @@ -import unittest -import os -import sys -import time - -import pygame -import pygame.midi -import pygame.compat -from pygame.locals import * - - -class MidiInputTest(unittest.TestCase): - - def setUp(self): - pygame.midi.init() - in_id = pygame.midi.get_default_input_id() - if in_id != -1: - self.midi_input = pygame.midi.Input(in_id) - else: - self.midi_input = None - - def tearDown(self): - if self.midi_input: - self.midi_input.close() - pygame.midi.quit() - - def test_Input(self): - """|tags: interactive| - """ - - i = pygame.midi.get_default_input_id() - if self.midi_input: - self.assertEqual(self.midi_input.device_id, i) - - # try feeding it an input id. - i = pygame.midi.get_default_output_id() - - # can handle some invalid input too. - self.assertRaises(pygame.midi.MidiException, pygame.midi.Input, i) - self.assertRaises(pygame.midi.MidiException, pygame.midi.Input, 9009) - self.assertRaises(pygame.midi.MidiException, pygame.midi.Input, -1) - self.assertRaises(TypeError, pygame.midi.Input, "1234") - self.assertRaises(OverflowError, pygame.midi.Input, pow(2, 99)) - - def test_poll(self): - - if not self.midi_input: - self.skipTest('No midi Input device') - - self.assertFalse(self.midi_input.poll()) - # TODO fake some incoming data - - pygame.midi.quit() - self.assertRaises(RuntimeError, self.midi_input.poll) - # set midi_input to None to avoid error in tearDown - self.midi_input = None - - def test_read(self): - - if not self.midi_input: - self.skipTest('No midi Input device') - - read = self.midi_input.read(5) - self.assertEqual(read, []) - # TODO fake some incoming data - - pygame.midi.quit() - self.assertRaises(RuntimeError, self.midi_input.read, 52) - # set midi_input to None to avoid error in tearDown - self.midi_input = None - - def test_close(self): - if not self.midi_input: - self.skipTest('No midi Input device') - - self.assertIsNotNone(self.midi_input._input) - self.midi_input.close() - self.assertIsNone(self.midi_input._input) - - -class MidiOutputTest(unittest.TestCase): - - def setUp(self): - pygame.midi.init() - m_out_id = pygame.midi.get_default_output_id() - if m_out_id != -1: - self.midi_output = pygame.midi.Output(m_out_id) - else: - self.midi_output = None - - def tearDown(self): - if self.midi_output: - self.midi_output.close() - pygame.midi.quit() - - def test_Output(self): - """|tags: interactive| - """ - i = pygame.midi.get_default_output_id() - if self.midi_output: - self.assertEqual(self.midi_output.device_id, i) - - # try feeding it an input id. - i = pygame.midi.get_default_input_id() - - # can handle some invalid input too. - self.assertRaises(pygame.midi.MidiException, pygame.midi.Output, i) - self.assertRaises(pygame.midi.MidiException, pygame.midi.Output, 9009) - self.assertRaises(pygame.midi.MidiException, pygame.midi.Output, -1) - self.assertRaises(TypeError, pygame.midi.Output,"1234") - self.assertRaises(OverflowError, pygame.midi.Output, pow(2,99)) - - def test_note_off(self): - """|tags: interactive| - """ - - if self.midi_output: - out = self.midi_output - out.note_on(5, 30, 0) - out.note_off(5, 30, 0) - with self.assertRaises(ValueError) as cm: - out.note_off(5, 30, 25) - self.assertEqual(str(cm.exception), "Channel not between 0 and 15.") - with self.assertRaises(ValueError) as cm: - out.note_off(5, 30, -1) - self.assertEqual(str(cm.exception), "Channel not between 0 and 15.") - - def test_note_on(self): - """|tags: interactive| - """ - - if self.midi_output: - out = self.midi_output - out.note_on(5, 30, 0) - out.note_on(5, 42, 10) - with self.assertRaises(ValueError) as cm: - out.note_on(5, 30, 25) - self.assertEqual(str(cm.exception), "Channel not between 0 and 15.") - with self.assertRaises(ValueError) as cm: - out.note_on(5, 30, -1) - self.assertEqual(str(cm.exception), "Channel not between 0 and 15.") - - def test_set_instrument(self): - - if not self.midi_output: - self.skipTest('No midi device') - out = self.midi_output - out.set_instrument(5) - out.set_instrument(42, channel=2) - with self.assertRaises(ValueError) as cm: - out.set_instrument(-6) - self.assertEqual(str(cm.exception), "Undefined instrument id: -6") - with self.assertRaises(ValueError) as cm: - out.set_instrument(156) - self.assertEqual(str(cm.exception), "Undefined instrument id: 156") - with self.assertRaises(ValueError) as cm: - out.set_instrument(5, -1) - self.assertEqual(str(cm.exception), "Channel not between 0 and 15.") - with self.assertRaises(ValueError) as cm: - out.set_instrument(5, 16) - self.assertEqual(str(cm.exception), "Channel not between 0 and 15.") - - def test_write(self): - if not self.midi_output: - self.skipTest('No midi device') - - out = self.midi_output - out.write([[[0xc0, 0, 0], 20000]]) - # is equivalent to - out.write([[[0xc0], 20000]]) - # example from the docstring : - # 1. choose program change 1 at time 20000 and - # 2. send note 65 with velocity 100 500 ms later - out.write([ - [[0xc0, 0, 0], 20000], - [[0x90, 60, 100], 20500] - ]) - - out.write([]) - verrry_long = [[[0x90, 60, i % 100], 20000 + 100 * i] for i in range(1024)] - out.write(verrry_long) - - too_long = [[[0x90, 60, i % 100], 20000 + 100 * i] for i in range(1025)] - self.assertRaises(IndexError, out.write, too_long) - # test wrong data - with self.assertRaises(TypeError) as cm: - out.write('Non sens ?') - error_msg = "unsupported operand type(s) for &: 'str' and 'int'" - self.assertEqual(str(cm.exception), error_msg) - - with self.assertRaises(TypeError) as cm: - out.write(["Hey what's that?"]) - self.assertEqual(str(cm.exception), error_msg) - - def test_write_short(self): - """|tags: interactive| - """ - if not self.midi_output: - self.skipTest('No midi device') - - out = self.midi_output - # program change - out.write_short(0xc0) - # put a note on, then off. - out.write_short(0x90, 65, 100) - out.write_short(0x80, 65, 100) - out.write_short(0x90) - - def test_write_sys_ex(self): - if not self.midi_output: - self.skipTest('No midi device') - - out = self.midi_output - out.write_sys_ex(pygame.midi.time(), - [0xF0, 0x7D, 0x10, 0x11, 0x12, 0x13, 0xF7]) - - def test_pitch_bend(self): - # FIXME : pitch_bend in the code, but not in documentation - if not self.midi_output: - self.skipTest('No midi device') - - out = self.midi_output - with self.assertRaises(ValueError) as cm: - out.pitch_bend(5, channel=-1) - self.assertEqual(str(cm.exception), "Channel not between 0 and 15.") - with self.assertRaises(ValueError) as cm: - out.pitch_bend(5, channel=16) - with self.assertRaises(ValueError) as cm: - out.pitch_bend(-10001, 1) - self.assertEqual(str(cm.exception), "Pitch bend value must be between " - "-8192 and +8191, not -10001.") - with self.assertRaises(ValueError) as cm: - out.pitch_bend(10665, 2) - - def test_close(self): - if not self.midi_output: - self.skipTest('No midi device') - self.assertIsNotNone(self.midi_output._output) - self.midi_output.close() - self.assertIsNone(self.midi_output._output) - - def test_abort(self): - if not self.midi_output: - self.skipTest('No midi device') - self.assertEqual(self.midi_output._aborted, 0) - self.midi_output.abort() - self.assertEqual(self.midi_output._aborted, 1) - - -class MidiModuleTest(unittest.TestCase): - - def setUp(self): - pygame.midi.init() - - def tearDown(self): - pygame.midi.quit() - - def test_MidiException(self): - - def raiseit(): - raise pygame.midi.MidiException('Hello Midi param') - - with self.assertRaises(pygame.midi.MidiException) as cm: - raiseit() - self.assertEqual(cm.exception.parameter, 'Hello Midi param') - - def test_get_count(self): - c = pygame.midi.get_count() - self.assertIsInstance(c, int) - self.assertTrue(c >= 0) - - def test_get_default_input_id(self): - - midin_id = pygame.midi.get_default_input_id() - # if there is a not None return make sure it is an int. - self.assertIsInstance(midin_id, int) - self.assertTrue(midin_id >= -1) - pygame.midi.quit() - self.assertRaises(RuntimeError, pygame.midi.get_default_output_id) - - def test_get_default_output_id(self): - - c = pygame.midi.get_default_output_id() - self.assertIsInstance(c, int) - self.assertTrue(c >= -1) - pygame.midi.quit() - self.assertRaises(RuntimeError, pygame.midi.get_default_output_id) - - def test_get_device_info(self): - - an_id = pygame.midi.get_default_output_id() - if an_id != -1: - interf, name, input, output, opened = pygame.midi.get_device_info(an_id) - self.assertEqual(output, 1) - self.assertEqual(input, 0) - self.assertEqual(opened, 0) - - an_in_id = pygame.midi.get_default_input_id() - if an_in_id != -1: - r = pygame.midi.get_device_info(an_in_id) - # if r is None, it means that the id is out of range. - interf, name, input, output, opened = r - - self.assertEqual(output, 0) - self.assertEqual(input, 1) - self.assertEqual(opened, 0) - out_of_range = pygame.midi.get_count() - for num in range(out_of_range): - self.assertIsNotNone(pygame.midi.get_device_info(num)) - info = pygame.midi.get_device_info(out_of_range) - self.assertIsNone(info) - - def test_init(self): - - pygame.midi.quit() - self.assertRaises(RuntimeError, pygame.midi.get_count) - # initialising many times should be fine. - pygame.midi.init() - pygame.midi.init() - pygame.midi.init() - pygame.midi.init() - - self.assertTrue(pygame.midi.get_init()) - - def test_midis2events(self): - - midi_data = ([[0xc0, 0, 1, 2], 20000], - [[0x90, 60, 100, 'blablabla'], 20000] - ) - events = pygame.midi.midis2events(midi_data, 2) - self.assertEqual(len(events), 2) - - for eve in events: - # pygame.event.Event is a function, but ... - self.assertEqual(eve.__class__.__name__, 'Event') - self.assertEqual(eve.vice_id, 2) - # FIXME I don't know what we want for the Event.timestamp - # For now it accepts it accepts int as is: - self.assertIsInstance(eve.timestamp, int) - self.assertEqual(eve.timestamp, 20000) - self.assertEqual(events[1].data3, 'blablabla') - - def test_quit(self): - - # It is safe to call this more than once. - pygame.midi.quit() - pygame.midi.init() - pygame.midi.quit() - pygame.midi.quit() - pygame.midi.init() - pygame.midi.init() - pygame.midi.quit() - - self.assertFalse(pygame.midi.get_init()) - - def test_get_init(self): - # Already initialized as pygame.midi.init() was called in setUp(). - self.assertTrue(pygame.midi.get_init()) - - def test_time(self): - - mtime = pygame.midi.time() - self.assertIsInstance(mtime, int) - # should be close to 2-3... since the timer is just init'd. - self.assertTrue(0 <= mtime < 100) - - - def test_conversions(self): - """ of frequencies to midi note numbers and ansi note names. - """ - from pygame.midi import ( - frequency_to_midi, midi_to_frequency, midi_to_ansi_note - ) - self.assertEqual(frequency_to_midi(27.5), 21) - self.assertEqual(frequency_to_midi(36.7), 26) - self.assertEqual(frequency_to_midi(4186.0), 108) - self.assertEqual(midi_to_frequency(21), 27.5) - self.assertEqual(midi_to_frequency(26), 36.7) - self.assertEqual(midi_to_frequency(108), 4186.0) - self.assertEqual(midi_to_ansi_note(21), 'A0') - self.assertEqual(midi_to_ansi_note(102), 'F#7') - self.assertEqual(midi_to_ansi_note(108), 'C8') - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/mixer_music_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/mixer_music_tags.py deleted file mode 100644 index a131d09..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/mixer_music_tags.py +++ /dev/null @@ -1,7 +0,0 @@ -__tags__ = [] - -import pygame -import sys -if 'pygame.mixer_music' not in sys.modules: - __tags__.extend(('ignore', 'subprocess_ignore')) - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/mixer_music_test.py b/venv/lib/python3.7/site-packages/pygame/tests/mixer_music_test.py deleted file mode 100644 index e7ee608..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/mixer_music_test.py +++ /dev/null @@ -1,259 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import sys -import unittest - -from pygame.tests.test_utils import example_path -import pygame -from pygame.compat import as_unicode, unicode_, filesystem_encode - - -class MixerMusicModuleTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - # Initializing the mixer is slow, so minimize the times it is called. - pygame.mixer.init() - - @classmethod - def tearDownClass(cls): - pygame.mixer.quit() - - def setUp(cls): - # This makes sure the mixer is always initialized before each test (in - # case a test calls pygame.mixer.quit()). - if pygame.mixer.get_init() is None: - pygame.mixer.init() - - def test_load(self): - "|tags:music|" - # __doc__ (as of 2008-07-13) for pygame.mixer_music.load: - - # pygame.mixer.music.load(filename): return None - # Load a music file for playback - - data_fname = example_path('data') - formats = ['mp3', 'ogg', 'wav'] - - for f in formats: - path = os.path.join(data_fname, 'house_lo.%s' % f) - if os.sep == '\\': - path = path.replace('\\', '\\\\') - umusfn = as_unicode(path) - bmusfn = filesystem_encode(umusfn) - - pygame.mixer.music.load(umusfn) - pygame.mixer.music.load(bmusfn) - - def test_load_object(self): - """test loading music from file-like objects.""" - formats = ['ogg', 'wav'] - data_fname = example_path('data') - for f in formats: - path = os.path.join(data_fname, 'house_lo.%s' % f) - if os.sep == '\\': - path = path.replace('\\', '\\\\') - bmusfn = filesystem_encode(path) - - with open(bmusfn, 'rb') as musf: - pygame.mixer.music.load(musf) - - def test_load_unicode(self): - """test non-ASCII unicode path""" - import shutil - ep = unicode_(example_path('data')) - temp_file = os.path.join(ep, u'你好.wav') - org_file = os.path.join(ep, u'house_lo.wav') - try: - with open(temp_file, 'w') as f: - pass - os.remove(temp_file) - except IOError: - raise unittest.SkipTest('the path cannot be opened') - shutil.copy(org_file, temp_file) - try: - pygame.mixer.music.load(temp_file) - pygame.mixer.music.load(org_file) # unload - finally: - os.remove(temp_file) - - def todo_test_queue(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.queue: - - # This will load a music file and queue it. A queued music file will - # begin as soon as the current music naturally ends. If the current - # music is ever stopped or changed, the queued song will be lost. - # - # The following example will play music by Bach six times, then play - # music by Mozart once: - # - # pygame.mixer.music.load('bach.ogg') - # pygame.mixer.music.play(5) # Plays six times, not five! - # pygame.mixer.music.queue('mozart.ogg') - - self.fail() - - def todo_test_stop(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.stop: - - # Stops the music playback if it is currently playing. - - self.fail() - - def todo_test_rewind(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.rewind: - - # Resets playback of the current music to the beginning. - - self.fail() - - def todo_test_get_pos(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.get_pos: - - # This gets the number of milliseconds that the music has been playing - # for. The returned time only represents how long the music has been - # playing; it does not take into account any starting position - # offsets. - # - - self.fail() - - def todo_test_fadeout(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.fadeout: - - # This will stop the music playback after it has been faded out over - # the specified time (measured in milliseconds). - # - # Note, that this function blocks until the music has faded out. - - self.fail() - - def todo_test_play(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.play: - - # This will play the loaded music stream. If the music is already - # playing it will be restarted. - # - # The loops argument controls the number of repeats a music will play. - # play(5) will cause the music to played once, then repeated five - # times, for a total of six. If the loops is -1 then the music will - # repeat indefinitely. - # - # The starting position argument controls where in the music the song - # starts playing. The starting position is dependent on the format of - # music playing. MP3 and OGG use the position as time (in seconds). - # MOD music it is the pattern order number. Passing a startpos will - # raise a NotImplementedError if it cannot set the start position - # - - self.fail() - - def todo_test_load(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.load: - - # This will load a music file and prepare it for playback. If a music - # stream is already playing it will be stopped. This does not start - # the music playing. - # - # Music can only be loaded from filenames, not python file objects - # like the other pygame loading functions. - # - - self.fail() - - def todo_test_get_volume(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.get_volume: - - # Returns the current volume for the mixer. The value will be between - # 0.0 and 1.0. - # - - self.fail() - - def todo_test_set_endevent(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.set_endevent: - - # This causes Pygame to signal (by means of the event queue) when the - # music is done playing. The argument determines the type of event - # that will be queued. - # - # The event will be queued every time the music finishes, not just the - # first time. To stop the event from being queued, call this method - # with no argument. - # - - self.fail() - - def todo_test_pause(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.pause: - - # Temporarily stop playback of the music stream. It can be resumed - # with the pygame.mixer.music.unpause() function. - # - - self.fail() - - def todo_test_get_busy(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.get_busy: - - # Returns True when the music stream is actively playing. When the - # music is idle this returns False. - # - - self.fail() - - def todo_test_get_endevent(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.get_endevent: - - # Returns the event type to be sent every time the music finishes - # playback. If there is no endevent the function returns - # pygame.NOEVENT. - # - - self.fail() - - def todo_test_unpause(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.unpause: - - # This will resume the playback of a music stream after it has been paused. - - self.fail() - - def todo_test_set_volume(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer_music.set_volume: - - # Set the volume of the music playback. The value argument is between - # 0.0 and 1.0. When new music is loaded the volume is reset. - # - - self.fail() - - def todo_test_set_pos(self): - - # __doc__ (as of 2010-24-05) for pygame.mixer_music.set_pos: - - #This sets the position in the music file where playback will start. The - # meaning of "pos", a float (or a number that can be converted to a float), - # depends on the music format. Newer versions of SDL_mixer have better - # positioning support than earlier. An SDLError is raised if a particular - # format does not support positioning. - # - - self.fail() - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/mixer_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/mixer_tags.py deleted file mode 100644 index 7cba721..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/mixer_tags.py +++ /dev/null @@ -1,7 +0,0 @@ -__tags__ = [] - -import pygame -import sys -if 'pygame.mixer' not in sys.modules: - __tags__.extend(('ignore', 'subprocess_ignore')) - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/mixer_test.py b/venv/lib/python3.7/site-packages/pygame/tests/mixer_test.py deleted file mode 100644 index e00926c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/mixer_test.py +++ /dev/null @@ -1,1029 +0,0 @@ -# -*- coding: utf8 -*- - -import sys -import os -import unittest -import platform - -from pygame.tests.test_utils import example_path, AssertRaisesRegexMixin - -import pygame -from pygame import mixer -from pygame.compat import unicode_, as_bytes, bytes_ - - -IS_PYPY = 'PyPy' == platform.python_implementation() - -################################### CONSTANTS ################################## - -FREQUENCIES = [11025, 22050, 44100, 48000] -SIZES = [-16, -8, 8, 16] -if pygame.get_sdl_version()[0] >= 2: - SIZES.append(32) - -CHANNELS = [1, 2] -BUFFERS = [3024] - -CONFIGS = [{'frequency' : f, 'size' : s, 'channels': c} - for f in FREQUENCIES - for s in SIZES - for c in CHANNELS] -# Using all CONFIGS fails on a Mac; probably older SDL_mixer; we could do: -# if platform.system() == 'Darwin': -# But using all CONFIGS is very slow (> 10 sec for example) -# And probably, we don't need to be so exhaustive, hence: - -CONFIG = {'frequency' : 22050, 'size' : -16, 'channels' : 2} # base config -if pygame.get_sdl_version()[0] >= 2: - CONFIG = {'frequency' : 44100, 'size' : 32, 'channels' : 2} # base config - -############################## MODULE LEVEL TESTS ############################## - -class MixerModuleTest(unittest.TestCase): - - def tearDown(self): - mixer.quit() - mixer.pre_init(0, 0, 0, 0) - - def test_init__keyword_args(self): - # note: this test used to loop over all CONFIGS, but it's very slow.. - mixer.init(**CONFIG) - mixer_conf = mixer.get_init() - - self.assertEqual(mixer_conf[0], CONFIG['frequency']) - # Not all "sizes" are supported on all systems, hence "abs". - self.assertEqual(abs(mixer_conf[1]), abs(CONFIG['size'])) - self.assertEqual(mixer_conf[2], CONFIG['channels']) - - def test_pre_init__keyword_args(self): - # note: this test used to loop over all CONFIGS, but it's very slow.. - mixer.pre_init(**CONFIG) - mixer.init() - - mixer_conf = mixer.get_init() - - self.assertEqual(mixer_conf[0], CONFIG['frequency']) - # Not all "sizes" are supported on all systems, hence "abs". - self.assertEqual(abs(mixer_conf[1]), abs(CONFIG['size'])) - self.assertEqual(mixer_conf[2], CONFIG['channels']) - - def test_pre_init__zero_values(self): - # Ensure that argument values of 0 are replaced with - # default values. No way to check buffer size though. - mixer.pre_init(44100, -8, 1) # Non default values - mixer.pre_init(0, 0, 0) # Should reset to default values - mixer.init() - self.assertEqual(mixer.get_init(), (22050, -16, 2)) - - def test_init__zero_values(self): - # Ensure that argument values of 0 are replaced with - # preset values. No way to check buffer size though. - mixer.pre_init(44100, 8, 1, allowedchanges=0) # None default values - mixer.init(0, 0, 0) - self.assertEqual(mixer.get_init(), (44100, 8, 1)) - - @unittest.skip('SDL_mixer bug') - def test_get_init__returns_exact_values_used_for_init(self): - # fix in 1.9 - I think it's a SDL_mixer bug. - - # TODO: When this bug is fixed, testing through every combination - # will be too slow so adjust as necessary, at the moment it - # breaks the loop after first failure - - for init_conf in CONFIGS: - frequency, size, channels - if (frequency, size) == (22050, 16): - continue - mixer.init(frequency, size, channels) - - mixer_conf = mixer.get_init() - - self.assertEqual(init_conf, mixer_conf) - mixer.quit() - - def test_get_init__returns_None_if_mixer_not_initialized(self): - self.assertIsNone(mixer.get_init()) - - def test_get_num_channels__defaults_eight_after_init(self): - mixer.init() - self.assertEqual(mixer.get_num_channels(), 8) - - def test_set_num_channels(self): - mixer.init() - - default_num_channels = mixer.get_num_channels() - for i in range(1, default_num_channels + 1): - mixer.set_num_channels(i) - self.assertEqual(mixer.get_num_channels(), i) - - def test_quit(self): - """ get_num_channels() Should throw pygame.error if uninitialized - after mixer.quit() """ - mixer.init() - mixer.quit() - self.assertRaises(pygame.error, mixer.get_num_channels) - - def test_sound_args(self): - def get_bytes(snd): - return snd.get_raw() - mixer.init() - - sample = as_bytes('\x00\xff') * 24 - wave_path = example_path(os.path.join('data', 'house_lo.wav')) - uwave_path = unicode_(wave_path) - bwave_path = uwave_path.encode(sys.getfilesystemencoding()) - snd = mixer.Sound(file=wave_path) - self.assertTrue(snd.get_length() > 0.5) - snd_bytes = get_bytes(snd) - self.assertTrue(len(snd_bytes) > 1000) - self.assertEqual(get_bytes(mixer.Sound(wave_path)), snd_bytes) - self.assertEqual(get_bytes(mixer.Sound(file=uwave_path)), snd_bytes) - self.assertEqual(get_bytes(mixer.Sound(uwave_path)), snd_bytes) - arg_emsg = 'Sound takes either 1 positional or 1 keyword argument' - - with self.assertRaises(TypeError) as cm: - mixer.Sound() - self.assertEqual(str(cm.exception), arg_emsg) - with self.assertRaises(TypeError) as cm: - mixer.Sound(wave_path, buffer=sample) - self.assertEqual(str(cm.exception), arg_emsg) - with self.assertRaises(TypeError) as cm: - mixer.Sound(sample, file=wave_path) - self.assertEqual(str(cm.exception), arg_emsg) - with self.assertRaises(TypeError) as cm: - mixer.Sound(buffer=sample, file=wave_path) - self.assertEqual(str(cm.exception), arg_emsg) - - with self.assertRaises(TypeError) as cm: - mixer.Sound(foobar=sample) - self.assertEqual(str(cm.exception), - "Unrecognized keyword argument 'foobar'") - - snd = mixer.Sound(wave_path, **{}) - self.assertEqual(get_bytes(snd), snd_bytes) - snd = mixer.Sound(*[], **{'file': wave_path}) - - with self.assertRaises(TypeError) as cm: - mixer.Sound([]) - self.assertEqual(str(cm.exception), - 'Unrecognized argument (type list)') - - with self.assertRaises(TypeError) as cm: - snd = mixer.Sound(buffer=[]) - emsg = 'Expected object with buffer interface: got a list' - self.assertEqual(str(cm.exception), emsg) - - ufake_path = unicode_('12345678') - self.assertRaises(IOError, mixer.Sound, ufake_path) - self.assertRaises(IOError, mixer.Sound, '12345678') - - with self.assertRaises(TypeError) as cm: - mixer.Sound(buffer=unicode_('something')) - emsg = 'Unicode object not allowed as buffer object' - self.assertEqual(str(cm.exception), emsg) - self.assertEqual(get_bytes(mixer.Sound(buffer=sample)), sample) - if type(sample) != str: - somebytes = get_bytes(mixer.Sound(sample)) - # on python 2 we do not allow using string except as file name. - self.assertEqual(somebytes, sample) - self.assertEqual(get_bytes(mixer.Sound(file=bwave_path)), snd_bytes) - self.assertEqual(get_bytes(mixer.Sound(bwave_path)), snd_bytes) - - snd = mixer.Sound(wave_path) - with self.assertRaises(TypeError) as cm: - mixer.Sound(wave_path, array=snd) - self.assertEqual(str(cm.exception), arg_emsg) - with self.assertRaises(TypeError) as cm: - mixer.Sound(buffer=sample, array=snd) - self.assertEqual(str(cm.exception), arg_emsg) - snd2 = mixer.Sound(array=snd) - self.assertEqual(snd.get_raw(), snd2.get_raw()) - - def test_sound_unicode(self): - """test non-ASCII unicode path""" - mixer.init() - import shutil - ep = unicode_(example_path('data')) - temp_file = os.path.join(ep, u'你好.wav') - org_file = os.path.join(ep, u'house_lo.wav') - shutil.copy(org_file, temp_file) - try: - with open(temp_file, 'rb') as f: - pass - except IOError: - raise unittest.SkipTest('the path cannot be opened') - - try: - sound = mixer.Sound(temp_file) - del sound - finally: - os.remove(temp_file) - - @unittest.skipIf(os.environ.get('SDL_AUDIODRIVER') == 'disk', - 'this test fails without real sound card') - def test_array_keyword(self): - try: - from numpy import (array, arange, zeros, - int8, uint8, - int16, uint16, - int32, uint32) - except ImportError: - self.skipTest('requires numpy') - - freq = 22050 - format_list = [-8, 8, -16, 16] - channels_list = [1, 2] - - a_lists = dict((f, []) for f in format_list) - a32u_mono = arange(0, 256, 1, uint32) - a16u_mono = a32u_mono.astype(uint16) - a8u_mono = a32u_mono.astype(uint8) - au_list_mono = [(1, a) for a in [a8u_mono, a16u_mono, a32u_mono]] - for format in format_list: - if format > 0: - a_lists[format].extend(au_list_mono) - a32s_mono = arange(-128, 128, 1, int32) - a16s_mono = a32s_mono.astype(int16) - a8s_mono = a32s_mono.astype(int8) - as_list_mono = [(1, a) for a in [a8s_mono, a16s_mono, a32s_mono]] - for format in format_list: - if format < 0: - a_lists[format].extend(as_list_mono) - a32u_stereo = zeros([a32u_mono.shape[0], 2], uint32) - a32u_stereo[:,0] = a32u_mono - a32u_stereo[:,1] = 255 - a32u_mono - a16u_stereo = a32u_stereo.astype(uint16) - a8u_stereo = a32u_stereo.astype(uint8) - au_list_stereo = [(2, a) - for a in [a8u_stereo, a16u_stereo, a32u_stereo]] - for format in format_list: - if format > 0: - a_lists[format].extend(au_list_stereo) - a32s_stereo = zeros([a32s_mono.shape[0], 2], int32) - a32s_stereo[:,0] = a32s_mono - a32s_stereo[:,1] = -1 - a32s_mono - a16s_stereo = a32s_stereo.astype(int16) - a8s_stereo = a32s_stereo.astype(int8) - as_list_stereo = [(2, a) - for a in [a8s_stereo, a16s_stereo, a32s_stereo]] - for format in format_list: - if format < 0: - a_lists[format].extend(as_list_stereo) - - for format in format_list: - for channels in channels_list: - try: - mixer.init(freq, format, channels) - except pygame.error: - # Some formats (e.g. 16) may not be supported. - continue - try: - __, f, c = mixer.get_init() - if f != format or c != channels: - # Some formats (e.g. -8) may not be supported. - continue - for c, a in a_lists[format]: - self._test_array_argument(format, a, c == channels) - finally: - mixer.quit() - - def _test_array_argument(self, format, a, test_pass): - from numpy import array, all as all_ - - try: - snd = mixer.Sound(array=a) - except ValueError: - if not test_pass: - return - self.fail("Raised ValueError: Format %i, dtype %s" % - (format, a.dtype)) - if not test_pass: - self.fail("Did not raise ValueError: Format %i, dtype %s" % - (format, a.dtype)) - a2 = array(snd) - a3 = a.astype(a2.dtype) - lshift = abs(format) - 8 * a.itemsize - if lshift >= 0: - # This is asymmetric with respect to downcasting. - a3 <<= lshift - self.assertTrue(all_(a2 == a3), - "Format %i, dtype %s" % (format, a.dtype)) - - def _test_array_interface_fail(self, a): - self.assertRaises(ValueError, mixer.Sound, array=a) - - def test_array_interface(self): - mixer.init(22050, -16, 1, allowedchanges=0) - snd = mixer.Sound(buffer=as_bytes('\x00\x7f') * 20) - d = snd.__array_interface__ - self.assertTrue(isinstance(d, dict)) - if pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN: - typestr = '') if is_lil_endian else ('>', '<') - shape = (10, channels)[:ndim] - strides = (channels * itemsize, itemsize)[2 - ndim:] - exp = Exporter(shape, format=frev + 'i') - snd = mixer.Sound(array=exp) - buflen = len(exp) * itemsize * channels - imp = Importer(snd, buftools.PyBUF_SIMPLE) - self.assertEqual(imp.ndim, 0) - self.assertTrue(imp.format is None) - self.assertEqual(imp.len, buflen) - self.assertEqual(imp.itemsize, itemsize) - self.assertTrue(imp.shape is None) - self.assertTrue(imp.strides is None) - self.assertTrue(imp.suboffsets is None) - self.assertFalse(imp.readonly) - self.assertEqual(imp.buf, snd._samples_address) - imp = Importer(snd, buftools.PyBUF_WRITABLE) - self.assertEqual(imp.ndim, 0) - self.assertTrue(imp.format is None) - self.assertEqual(imp.len, buflen) - self.assertEqual(imp.itemsize, itemsize) - self.assertTrue(imp.shape is None) - self.assertTrue(imp.strides is None) - self.assertTrue(imp.suboffsets is None) - self.assertFalse(imp.readonly) - self.assertEqual(imp.buf, snd._samples_address) - imp = Importer(snd, buftools.PyBUF_FORMAT) - self.assertEqual(imp.ndim, 0) - self.assertEqual(imp.format, format) - self.assertEqual(imp.len, buflen) - self.assertEqual(imp.itemsize, itemsize) - self.assertTrue(imp.shape is None) - self.assertTrue(imp.strides is None) - self.assertTrue(imp.suboffsets is None) - self.assertFalse(imp.readonly) - self.assertEqual(imp.buf, snd._samples_address) - imp = Importer(snd, buftools.PyBUF_ND) - self.assertEqual(imp.ndim, ndim) - self.assertTrue(imp.format is None) - self.assertEqual(imp.len, buflen) - self.assertEqual(imp.itemsize, itemsize) - self.assertEqual(imp.shape, shape) - self.assertTrue(imp.strides is None) - self.assertTrue(imp.suboffsets is None) - self.assertFalse(imp.readonly) - self.assertEqual(imp.buf, snd._samples_address) - imp = Importer(snd, buftools.PyBUF_STRIDES) - self.assertEqual(imp.ndim, ndim) - self.assertTrue(imp.format is None) - self.assertEqual(imp.len, buflen) - self.assertEqual(imp.itemsize, itemsize) - self.assertEqual(imp.shape, shape) - self.assertEqual(imp.strides, strides) - self.assertTrue(imp.suboffsets is None) - self.assertFalse(imp.readonly) - self.assertEqual(imp.buf, snd._samples_address) - imp = Importer(snd, buftools.PyBUF_FULL_RO) - self.assertEqual(imp.ndim, ndim) - self.assertEqual(imp.format, format) - self.assertEqual(imp.len, buflen) - self.assertEqual(imp.itemsize, 2) - self.assertEqual(imp.shape, shape) - self.assertEqual(imp.strides, strides) - self.assertTrue(imp.suboffsets is None) - self.assertFalse(imp.readonly) - self.assertEqual(imp.buf, snd._samples_address) - imp = Importer(snd, buftools.PyBUF_FULL_RO) - self.assertEqual(imp.ndim, ndim) - self.assertEqual(imp.format, format) - self.assertEqual(imp.len, buflen) - self.assertEqual(imp.itemsize, itemsize) - self.assertEqual(imp.shape, exp.shape) - self.assertEqual(imp.strides, strides) - self.assertTrue(imp.suboffsets is None) - self.assertFalse(imp.readonly) - self.assertEqual(imp.buf, snd._samples_address) - imp = Importer(snd, buftools.PyBUF_C_CONTIGUOUS) - self.assertEqual(imp.ndim, ndim) - self.assertTrue(imp.format is None) - self.assertEqual(imp.strides, strides) - imp = Importer(snd, buftools.PyBUF_ANY_CONTIGUOUS) - self.assertEqual(imp.ndim, ndim) - self.assertTrue(imp.format is None) - self.assertEqual(imp.strides, strides) - if ndim == 1: - imp = Importer(snd, buftools.PyBUF_F_CONTIGUOUS) - self.assertEqual(imp.ndim, 1) - self.assertTrue(imp.format is None) - self.assertEqual(imp.strides, strides) - else: - self.assertRaises(BufferError, Importer, snd, - buftools.PyBUF_F_CONTIGUOUS) - - def todo_test_fadeout(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.fadeout: - - # pygame.mixer.fadeout(time): return None - # fade out the volume on all sounds before stopping - # - # This will fade out the volume on all active channels over the time - # argument in milliseconds. After the sound is muted the playback will - # stop. - # - - self.fail() - - def todo_test_find_channel(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.find_channel: - - # pygame.mixer.find_channel(force=False): return Channel - # find an unused channel - # - # This will find and return an inactive Channel object. If there are - # no inactive Channels this function will return None. If there are no - # inactive channels and the force argument is True, this will find the - # Channel with the longest running Sound and return it. - # - # If the mixer has reserved channels from pygame.mixer.set_reserved() - # then those channels will not be returned here. - # - - self.fail() - - def todo_test_get_busy(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.get_busy: - - # pygame.mixer.get_busy(): return bool - # test if any sound is being mixed - # - # Returns True if the mixer is busy mixing any channels. If the mixer - # is idle then this return False. - # - - self.fail() - - def todo_test_pause(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.pause: - - # pygame.mixer.pause(): return None - # temporarily stop playback of all sound channels - # - # This will temporarily stop all playback on the active mixer - # channels. The playback can later be resumed with - # pygame.mixer.unpause() - # - - self.fail() - - def todo_test_set_reserved(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.set_reserved: - - # pygame.mixer.set_reserved(count): return None - # reserve channels from being automatically used - # - # The mixer can reserve any number of channels that will not be - # automatically selected for playback by Sounds. If sounds are - # currently playing on the reserved channels they will not be stopped. - # - # This allows the application to reserve a specific number of channels - # for important sounds that must not be dropped or have a guaranteed - # channel to play on. - # - - self.fail() - - def todo_test_stop(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.stop: - - # pygame.mixer.stop(): return None - # stop playback of all sound channels - # - # This will stop all playback of all active mixer channels. - - self.fail() - - def todo_test_unpause(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.unpause: - - # pygame.mixer.unpause(): return None - # resume paused playback of sound channels - # - # This will resume all active sound channels after they have been paused. - - self.fail() - -############################## CHANNEL CLASS TESTS ############################# - -class ChannelTypeTest(AssertRaisesRegexMixin, unittest.TestCase): - @classmethod - def setUpClass(cls): - # Initializing the mixer is slow, so minimize the times it is called. - mixer.init() - - @classmethod - def tearDownClass(cls): - mixer.quit() - - def setUp(cls): - # This makes sure the mixer is always initialized before each test (in - # case a test calls pygame.mixer.quit()). - if mixer.get_init() is None: - mixer.init() - - def test_channel(self): - """Ensure Channel() creation works.""" - channel = mixer.Channel(0) - - self.assertIsInstance(channel, mixer.ChannelType) - self.assertEqual(channel.__class__.__name__, 'Channel') - - def test_channel__without_arg(self): - """Ensure exception for Channel() creation with no argument.""" - with self.assertRaises(TypeError): - mixer.Channel() - - def test_channel__invalid_id(self): - """Ensure exception for Channel() creation with an invalid id.""" - with self.assertRaises(IndexError): - mixer.Channel(-1) - - def test_channel__before_init(self): - """Ensure exception for Channel() creation with non-init mixer.""" - mixer.quit() - - with self.assertRaisesRegex(pygame.error, 'mixer not initialized'): - mixer.Channel(0) - - def todo_test_fadeout(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.fadeout: - - # Channel.fadeout(time): return None - # stop playback after fading channel out - # - # Stop playback of a channel after fading out the sound over the given - # time argument in milliseconds. - # - - self.fail() - - def test_get_busy(self): - """Ensure an idle channel's busy state is correct.""" - expected_busy = False - channel = mixer.Channel(0) - - busy = channel.get_busy() - - self.assertEqual(busy, expected_busy) - - def todo_test_get_busy__active(self): - """Ensure an active channel's busy state is correct.""" - self.fail() - - def todo_test_get_endevent(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_endevent: - - # Channel.get_endevent(): return type - # get the event a channel sends when playback stops - # - # Returns the event type to be sent every time the Channel finishes - # playback of a Sound. If there is no endevent the function returns - # pygame.NOEVENT. - # - - self.fail() - - def todo_test_get_queue(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_queue: - - # Channel.get_queue(): return Sound - # return any Sound that is queued - # - # If a Sound is already queued on this channel it will be returned. - # Once the queued sound begins playback it will no longer be on the - # queue. - # - - self.fail() - - def todo_test_get_sound(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_sound: - - # Channel.get_sound(): return Sound - # get the currently playing Sound - # - # Return the actual Sound object currently playing on this channel. If - # the channel is idle None is returned. - # - - self.fail() - - def test_get_volume(self): - """Ensure a channel's volume can be retrieved.""" - expected_volume = 1.0 # default - channel = mixer.Channel(0) - - volume = channel.get_volume() - - self.assertAlmostEqual(volume, expected_volume) - - def todo_test_get_volume__while_playing(self): - """Ensure a channel's volume can be retrieved while playing.""" - self.fail() - - def todo_test_pause(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.pause: - - # Channel.pause(): return None - # temporarily stop playback of a channel - # - # Temporarily stop the playback of sound on a channel. It can be - # resumed at a later time with Channel.unpause() - # - - self.fail() - - def todo_test_play(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.play: - - # Channel.play(Sound, loops=0, maxtime=0, fade_ms=0): return None - # play a Sound on a specific Channel - # - # This will begin playback of a Sound on a specific Channel. If the - # Channel is currently playing any other Sound it will be stopped. - # - # The loops argument has the same meaning as in Sound.play(): it is - # the number of times to repeat the sound after the first time. If it - # is 3, the sound will be played 4 times (the first time, then three - # more). If loops is -1 then the playback will repeat indefinitely. - # - # As in Sound.play(), the maxtime argument can be used to stop - # playback of the Sound after a given number of milliseconds. - # - # As in Sound.play(), the fade_ms argument can be used fade in the sound. - - self.fail() - - def todo_test_queue(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.queue: - - # Channel.queue(Sound): return None - # queue a Sound object to follow the current - # - # When a Sound is queued on a Channel, it will begin playing - # immediately after the current Sound is finished. Each channel can - # only have a single Sound queued at a time. The queued Sound will - # only play if the current playback finished automatically. It is - # cleared on any other call to Channel.stop() or Channel.play(). - # - # If there is no sound actively playing on the Channel then the Sound - # will begin playing immediately. - # - - self.fail() - - def todo_test_set_endevent(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.set_endevent: - - # Channel.set_endevent(): return None - # Channel.set_endevent(type): return None - # have the channel send an event when playback stops - # - # When an endevent is set for a channel, it will send an event to the - # pygame queue every time a sound finishes playing on that channel - # (not just the first time). Use pygame.event.get() to retrieve the - # endevent once it's sent. - # - # Note that if you called Sound.play(n) or Channel.play(sound,n), the - # end event is sent only once: after the sound has been played "n+1" - # times (see the documentation of Sound.play). - # - # If Channel.stop() or Channel.play() is called while the sound was - # still playing, the event will be posted immediately. - # - # The type argument will be the event id sent to the queue. This can - # be any valid event type, but a good choice would be a value between - # pygame.locals.USEREVENT and pygame.locals.NUMEVENTS. If no type - # argument is given then the Channel will stop sending endevents. - # - - self.fail() - - def todo_test_set_volume(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.set_volume: - - # Channel.set_volume(value): return None - # Channel.set_volume(left, right): return None - # set the volume of a playing channel - # - # Set the volume (loudness) of a playing sound. When a channel starts - # to play its volume value is reset. This only affects the current - # sound. The value argument is between 0.0 and 1.0. - # - # If one argument is passed, it will be the volume of both speakers. - # If two arguments are passed and the mixer is in stereo mode, the - # first argument will be the volume of the left speaker and the second - # will be the volume of the right speaker. (If the second argument is - # None, the first argument will be the volume of both speakers.) - # - # If the channel is playing a Sound on which set_volume() has also - # been called, both calls are taken into account. For example: - # - # sound = pygame.mixer.Sound("s.wav") - # channel = s.play() # Sound plays at full volume by default - # sound.set_volume(0.9) # Now plays at 90% of full volume. - # sound.set_volume(0.6) # Now plays at 60% (previous value replaced). - # channel.set_volume(0.5) # Now plays at 30% (0.6 * 0.5). - - self.fail() - - def todo_test_stop(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.stop: - - # Channel.stop(): return None - # stop playback on a Channel - # - # Stop sound playback on a channel. After playback is stopped the - # channel becomes available for new Sounds to play on it. - # - - self.fail() - - def todo_test_unpause(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Channel.unpause: - - # Channel.unpause(): return None - # resume pause playback of a channel - # - # Resume the playback on a paused channel. - - self.fail() - -############################### SOUND CLASS TESTS ############################## - -class SoundTypeTest(AssertRaisesRegexMixin, unittest.TestCase): - @classmethod - def setUpClass(cls): - # Initializing the mixer is slow, so minimize the times it is called. - mixer.init() - - @classmethod - def tearDownClass(cls): - mixer.quit() - - def setUp(cls): - # This makes sure the mixer is always initialized before each test (in - # case a test calls pygame.mixer.quit()). - if mixer.get_init() is None: - mixer.init() - - # See MixerModuleTest's methods test_sound_args(), test_sound_unicode(), - # and test_array_keyword() for additional testing of Sound() creation. - def test_sound(self): - """Ensure Sound() creation with a filename works.""" - filename = example_path(os.path.join('data', 'house_lo.wav')) - sound1 = mixer.Sound(filename) - sound2 = mixer.Sound(file=filename) - - self.assertIsInstance(sound1, mixer.Sound) - self.assertIsInstance(sound2, mixer.Sound) - - def test_sound__from_file_object(self): - """Ensure Sound() creation with a file object works.""" - filename = example_path(os.path.join('data', 'house_lo.wav')) - - # Using 'with' ensures the file is closed even if test fails. - with open(filename, "rb") as file_obj: - sound = mixer.Sound(file_obj) - - self.assertIsInstance(sound, mixer.Sound) - - def test_sound__from_sound_object(self): - """Ensure Sound() creation with a Sound() object works.""" - filename = example_path(os.path.join('data', 'house_lo.wav')) - sound_obj = mixer.Sound(file=filename) - - sound = mixer.Sound(sound_obj) - - self.assertIsInstance(sound, mixer.Sound) - - def todo_test_sound__from_buffer(self): - """Ensure Sound() creation with a buffer works.""" - self.fail() - - def todo_test_sound__from_array(self): - """Ensure Sound() creation with an array works.""" - self.fail() - - def test_sound__without_arg(self): - """Ensure exception raised for Sound() creation with no argument.""" - with self.assertRaises(TypeError): - mixer.Sound() - - def test_sound__before_init(self): - """Ensure exception raised for Sound() creation with non-init mixer.""" - mixer.quit() - filename = example_path(os.path.join('data', 'house_lo.wav')) - - with self.assertRaisesRegex(pygame.error, 'mixer not initialized'): - mixer.Sound(file=filename) - - @unittest.skipIf(IS_PYPY, 'pypy skip') - def test_samples_address(self): - """Test the _samples_address getter.""" - from ctypes import pythonapi, c_void_p, py_object - - try: - Bytes_FromString = pythonapi.PyBytes_FromString # python 3 - except: - Bytes_FromString = pythonapi.PyString_FromString # python 2 - - Bytes_FromString.restype = c_void_p - Bytes_FromString.argtypes = [py_object] - samples = as_bytes('abcdefgh') # keep byte size a multiple of 4 - sample_bytes = Bytes_FromString(samples) - - snd = mixer.Sound(buffer=samples) - - self.assertNotEqual(snd._samples_address, sample_bytes) - - def todo_test_fadeout(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Sound.fadeout: - - # Sound.fadeout(time): return None - # stop sound playback after fading out - # - # This will stop playback of the sound after fading it out over the - # time argument in milliseconds. The Sound will fade and stop on all - # actively playing channels. - # - - self.fail() - - def todo_test_get_length(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Sound.get_length: - - # Sound.get_length(): return seconds - # get the length of the Sound - # - # Return the length of this Sound in seconds. - - self.fail() - - def test_get_num_channels(self): - """Ensure correct number of channels.""" - expected_channels = 0 - filename = example_path(os.path.join('data', 'house_lo.wav')) - sound = mixer.Sound(file=filename) - - num_channels = sound.get_num_channels() - - self.assertEqual(num_channels, expected_channels) - - def todo_test_get_num_channels__while_playing(self): - """Ensure correct number of channels while playing.""" - self.fail() - - def test_get_volume(self): - """Ensure a sound's volume can be retrieved.""" - expected_volume = 1.0 # default - filename = example_path(os.path.join('data', 'house_lo.wav')) - sound = mixer.Sound(file=filename) - - volume = sound.get_volume() - - self.assertAlmostEqual(volume, expected_volume) - - def todo_test_get_volume__while_playing(self): - """Ensure a sound's volume can be retrieved while playing.""" - self.fail() - - def todo_test_play(self): - - # __doc__ (as of 2008-08-02) for pygame.mixer.Sound.play: - - # Sound.play(loops=0, maxtime=0, fade_ms=0): return Channel - # begin sound playback - # - # Begin playback of the Sound (i.e., on the computer's speakers) on an - # available Channel. This will forcibly select a Channel, so playback - # may cut off a currently playing sound if necessary. - # - # The loops argument controls how many times the sample will be - # repeated after being played the first time. A value of 5 means that - # the sound will be played once, then repeated five times, and so is - # played a total of six times. The default value (zero) means the - # Sound is not repeated, and so is only played once. If loops is set - # to -1 the Sound will loop indefinitely (though you can still call - # stop() to stop it). - # - # The maxtime argument can be used to stop playback after a given - # number of milliseconds. - # - # The fade_ms argument will make the sound start playing at 0 volume - # and fade up to full volume over the time given. The sample may end - # before the fade-in is complete. - # - # This returns the Channel object for the channel that was selected. - - self.fail() - - def test_set_volume(self): - """Ensure a sound's volume can be set.""" - float_delta = 1.0 / 128 # SDL volume range is 0 to 128 - filename = example_path(os.path.join('data', 'house_lo.wav')) - sound = mixer.Sound(file=filename) - current_volume = sound.get_volume() - - # (volume_set_value : expected_volume) - volumes = ((-1, current_volume), # value < 0 won't change volume - (0, 0.0), - (0.01, 0.01), - (0.1, 0.1), - (0.5, 0.5), - (0.9, 0.9), - (0.99, 0.99), - (1, 1.0), - (1.1, 1.0), - (2.0, 1.0)) - - for volume_set_value, expected_volume in volumes: - sound.set_volume(volume_set_value) - - self.assertAlmostEqual(sound.get_volume(), expected_volume, - delta=float_delta) - - def todo_test_set_volume__while_playing(self): - """Ensure a sound's volume can be set while playing.""" - self.fail() - - def test_stop(self): - """Ensure stop can be called while not playing a sound.""" - expected_channels = 0 - filename = example_path(os.path.join('data', 'house_lo.wav')) - sound = mixer.Sound(file=filename) - - sound.stop() - - self.assertEqual(sound.get_num_channels(), expected_channels) - - def todo_test_stop__while_playing(self): - """Ensure stop stops a playing sound.""" - self.fail() - - def test_get_raw(self): - """Ensure get_raw returns the correct bytestring.""" - samples = as_bytes('abcdefgh') # keep byte size a multiple of 4 - snd = mixer.Sound(buffer=samples) - - raw = snd.get_raw() - - self.assertIsInstance(raw, bytes_) - self.assertEqual(raw, samples) - - -##################################### MAIN ##################################### - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/mouse_test.py b/venv/lib/python3.7/site-packages/pygame/tests/mouse_test.py deleted file mode 100644 index 8215cc8..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/mouse_test.py +++ /dev/null @@ -1,150 +0,0 @@ -import unittest - - -class MouseModuleTest(unittest.TestCase): - def todo_test_get_cursor(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.get_cursor: - - # pygame.mouse.get_cursor(): return (size, hotspot, xormasks, andmasks) - # get the image for the system mouse cursor - # - # Get the information about the mouse system cursor. The return value - # is the same data as the arguments passed into - # pygame.mouse.set_cursor(). - # - - self.fail() - - def todo_test_get_focused(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.get_focused: - - # pygame.mouse.get_focused(): return bool - # check if the display is receiving mouse input - # - # Returns true when pygame is receiving mouse input events (or, in - # windowing terminology, is "active" or has the "focus"). - # - # This method is most useful when working in a window. By contrast, in - # full-screen mode, this method always returns true. - # - # Note: under MS Windows, the window that has the mouse focus also has - # the keyboard focus. But under X-Windows, one window can receive - # mouse events and another receive keyboard events. - # pygame.mouse.get_focused() indicates whether the pygame window - # receives mouse events. - # - - self.fail() - - def todo_test_get_pos(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.get_pos: - - # pygame.mouse.get_pos(): return (x, y) - # get the mouse cursor position - # - # Returns the X and Y position of the mouse cursor. The position is - # relative the the top-left corner of the display. The cursor position - # can be located outside of the display window, but is always - # constrained to the screen. - # - - self.fail() - - def todo_test_get_pressed(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.get_pressed: - - # pygame.moouse.get_pressed(): return (button1, button2, button3) - # get the state of the mouse buttons - # - # Returns a sequence of booleans representing the state of all the - # mouse buttons. A true value means the mouse is currently being - # pressed at the time of the call. - # - # Note, to get all of the mouse events it is better to use either - # pygame.event.wait() or pygame.event.get() and check all of those events - # to see if they are MOUSEBUTTONDOWN, MOUSEBUTTONUP, or MOUSEMOTION. - # Note, that on X11 some XServers use middle button emulation. When - # you click both buttons 1 and 3 at the same time a 2 button event can - # be emitted. - # - # Note, remember to call pygame.event.get() before this function. - # Otherwise it will not work. - # - - self.fail() - - def todo_test_get_rel(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.get_rel: - - # pygame.mouse.get_rel(): return (x, y) - # get the amount of mouse movement - # - # Returns the amount of movement in X and Y since the previous call to - # this function. The relative movement of the mouse cursor is - # constrained to the edges of the screen, but see the virtual input - # mouse mode for a way around this. Virtual input mode is described - # at the top of the page. - # - - self.fail() - - def todo_test_set_cursor(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.set_cursor: - - # pygame.mouse.set_cursor(size, hotspot, xormasks, andmasks): return None - # set the image for the system mouse cursor - # - # When the mouse cursor is visible, it will be displayed as a black - # and white bitmap using the given bitmask arrays. The size is a - # sequence containing the cursor width and height. Hotspot is a - # sequence containing the cursor hotspot position. xormasks is a - # sequence of bytes containing the cursor xor data masks. Lastly is - # andmasks, a sequence of bytes containting the cursor bitmask data. - # - # Width must be a multiple of 8, and the mask arrays must be the - # correct size for the given width and height. Otherwise an exception - # is raised. - # - # See the pygame.cursor module for help creating default and custom - # masks for the system cursor. - # - - self.fail() - - def todo_test_set_pos(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.set_pos: - - # pygame.mouse.set_pos([x, y]): return None - # set the mouse cursor position - # - # Set the current mouse position to arguments given. If the mouse - # cursor is visible it will jump to the new coordinates. Moving the - # mouse will generate a new pygaqme.MOUSEMOTION event. - # - - self.fail() - - def todo_test_set_visible(self): - - # __doc__ (as of 2008-08-02) for pygame.mouse.set_visible: - - # pygame.mouse.set_visible(bool): return bool - # hide or show the mouse cursor - # - # If the bool argument is true, the mouse cursor will be visible. This - # will return the previous visible state of the cursor. - # - - self.fail() - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/overlay_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/overlay_tags.py deleted file mode 100644 index a92aa6a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/overlay_tags.py +++ /dev/null @@ -1,2 +0,0 @@ -# Overlay support was removed in SDL 2 -__tags__ = ['SDL2_ignore'] diff --git a/venv/lib/python3.7/site-packages/pygame/tests/overlay_test.py b/venv/lib/python3.7/site-packages/pygame/tests/overlay_test.py deleted file mode 100644 index d5c1799..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/overlay_test.py +++ /dev/null @@ -1,36 +0,0 @@ -import unittest - - -class OverlayTypeTest(unittest.TestCase): - def todo_test_display(self): - - # __doc__ (as of 2008-08-02) for pygame.overlay.overlay.display: - - # Overlay.display((y, u, v)): return None - # Overlay.display(): return None - # set the overlay pixel data - - self.fail() - - def todo_test_get_hardware(self): - - # __doc__ (as of 2008-08-02) for pygame.overlay.overlay.get_hardware: - - # Overlay.get_hardware(rect): return int - # test if the Overlay is hardware accelerated - - self.fail() - - def todo_test_set_location(self): - - # __doc__ (as of 2008-08-02) for pygame.overlay.overlay.set_location: - - # Overlay.set_location(rect): return None - # control where the overlay is displayed - - self.fail() - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/pixelarray_test.py b/venv/lib/python3.7/site-packages/pygame/tests/pixelarray_test.py deleted file mode 100644 index f877ed2..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/pixelarray_test.py +++ /dev/null @@ -1,1420 +0,0 @@ -import sys -import platform -try: - reduce -except NameError: - from functools import reduce -import operator -import weakref -import gc -import unittest - -from pygame.tests.test_utils import SurfaceSubclass - -try: - from pygame.tests.test_utils import arrinter -except NameError: - pass - -import pygame -from pygame.compat import xrange_ - -PY3 = sys.version_info >= (3, 0, 0) -IS_PYPY = 'PyPy' == platform.python_implementation() - - -class TestMixin (object): - def assert_surfaces_equal (self, s1, s2): - # Assumes the surfaces are the same size. - w, h = s1.get_size () - for x in range (w): - for y in range (h): - self.assertEqual (s1.get_at ((x, y)), s2.get_at ((x, y)), - "size: (%i, %i), position: (%i, %i)" % - (w, h, x, y)) - -class PixelArrayTypeTest (unittest.TestCase, TestMixin): - def test_compare(self): - # __doc__ (as of 2008-06-25) for pygame.pixelarray.PixelArray.compare: - - # PixelArray.compare (array, distance=0, weights=(0.299, 0.587, 0.114)): Return PixelArray - # Compares the PixelArray with another one. - - w = 10 - h = 20 - size = w, h - sf = pygame.Surface (size, 0, 32) - ar = pygame.PixelArray (sf) - sf2 = pygame.Surface (size, 0, 32) - self.assertRaises (TypeError, ar.compare, sf2) - ar2 = pygame.PixelArray (sf2) - ar3 = ar.compare (ar2) - self.assertTrue(isinstance (ar3, pygame.PixelArray)) - self.assertEqual (ar3.shape, size) - sf2.fill (pygame.Color ('white')) - self.assert_surfaces_equal (sf2, ar3.surface) - del ar3 - r = pygame.Rect (2, 5, 6, 13) - sf.fill (pygame.Color ('blue'), r) - sf2.fill (pygame.Color ('red')) - sf2.fill (pygame.Color ('blue'), r) - ar3 = ar.compare (ar2) - sf.fill (pygame.Color ('white'), r) - self.assert_surfaces_equal (sf, ar3.surface) - - # FINISH ME! - # Test other bit depths, slices, and distance != 0. - - def test_close(self): - """ does not crash when it is deleted. - """ - s = pygame.Surface((10,10)) - a = pygame.PixelArray(s) - a.close() - del a - - def test_close_raises(self): - """ when you try to do an operation after it is closed. - """ - s = pygame.Surface((10,10)) - a = pygame.PixelArray(s) - a.close() - def do_operation(): - a[:] - self.assertRaises (ValueError, do_operation) - - def do_operation2(): - a[:] = 1 - self.assertRaises (ValueError, do_operation2) - - def do_operation3(): - a.make_surface() - self.assertRaises (ValueError, do_operation3) - - def do_operation4(): - for x in a: - pass - self.assertRaises (ValueError, do_operation4) - - def test_context_manager(self): - """ closes properly. - """ - s = pygame.Surface((10,10)) - with pygame.PixelArray(s) as a: - a[:] - - def test_pixel_array (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((10, 20), 0, bpp) - sf.fill ((0, 0, 0)) - ar = pygame.PixelArray (sf) - - self.assertEqual (ar._pixels_address, sf._pixels_address) - - if sf.mustlock (): - self.assertTrue (sf.get_locked ()) - - self.assertEqual (len (ar), 10) - - del ar - if sf.mustlock (): - self.assertFalse (sf.get_locked ()) - - def test_as_class (self): - # Check general new-style class freatures. - sf = pygame.Surface ((2, 3), 0, 32) - ar = pygame.PixelArray (sf) - self.assertRaises (AttributeError, getattr, ar, 'nonnative') - ar.nonnative = 'value' - self.assertEqual (ar.nonnative, 'value') - r = weakref.ref (ar) - self.assertTrue (r() is ar) - del ar - gc.collect () - self.assertTrue (r() is None) - - class C (pygame.PixelArray): - def __str__ (self): - return "string (%i, %i)" % self.shape - - ar = C (sf) - self.assertEqual (str (ar), "string (2, 3)") - r = weakref.ref (ar) - self.assertTrue (r() is ar) - del ar - gc.collect () - self.assertTrue (r() is None) - - def test_pixelarray__subclassed_surface(self): - """Ensure the PixelArray constructor accepts subclassed surfaces.""" - surface = SurfaceSubclass((3, 5), 0, 32) - pixelarray = pygame.PixelArray(surface) - - self.assertIsInstance(pixelarray, pygame.PixelArray) - - # Sequence interfaces - def test_get_column (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((6, 8), 0, bpp) - sf.fill ((0, 0, 255)) - val = sf.map_rgb ((0, 0, 255)) - ar = pygame.PixelArray (sf) - - ar2 = ar.__getitem__ (1) - self.assertEqual (len(ar2), 8) - self.assertEqual (ar2.__getitem__ (0), val) - self.assertEqual (ar2.__getitem__ (1), val) - self.assertEqual (ar2.__getitem__ (2), val) - - ar2 = ar.__getitem__ (-1) - self.assertEqual (len(ar2), 8) - self.assertEqual (ar2.__getitem__ (0), val) - self.assertEqual (ar2.__getitem__ (1), val) - self.assertEqual (ar2.__getitem__ (2), val) - - def test_get_pixel (self): - w = 10 - h = 20 - size = w, h - bg_color = (0, 0, 255) - fg_color_y = (0, 0, 128) - fg_color_x = (0, 0, 11) - for bpp in (8, 16, 24, 32): - sf = pygame.Surface (size, 0, bpp) - mapped_bg_color = sf.map_rgb (bg_color) - mapped_fg_color_y = sf.map_rgb (fg_color_y) - mapped_fg_color_x = sf.map_rgb (fg_color_x) - self.assertNotEqual (mapped_fg_color_y, mapped_bg_color, - "Unusable test colors for bpp %i" % (bpp,)) - self.assertNotEqual (mapped_fg_color_x, mapped_bg_color, - "Unusable test colors for bpp %i" % (bpp,)) - self.assertNotEqual (mapped_fg_color_y, mapped_fg_color_x, - "Unusable test colors for bpp %i" % (bpp,)) - sf.fill (bg_color) - - ar = pygame.PixelArray (sf) - - ar_y = ar.__getitem__ (1) - for y in xrange_ (h): - ar2 = ar_y.__getitem__ (y) - self.assertEqual (ar2, mapped_bg_color, - "ar[1][%i] == %i, mapped_bg_color == %i" % - (y, ar2, mapped_bg_color)) - - sf.set_at ((1, y), fg_color_y) - ar2 = ar_y.__getitem__ (y) - self.assertEqual (ar2, mapped_fg_color_y, - "ar[1][%i] == %i, mapped_fg_color_y == %i" % - (y, ar2, mapped_fg_color_y)) - - sf.set_at ((1, 1), bg_color) - for x in xrange_ (w): - ar2 = ar.__getitem__ (x).__getitem__ (1) - self.assertEqual (ar2, mapped_bg_color, - "ar[%i][1] = %i, mapped_bg_color = %i" % - (x, ar2, mapped_bg_color)) - sf.set_at ((x, 1), fg_color_x) - ar2 = ar.__getitem__ (x).__getitem__ (1) - self.assertEqual (ar2, mapped_fg_color_x, - "ar[%i][1] = %i, mapped_fg_color_x = %i" % - (x, ar2, mapped_fg_color_x)) - - ar2 = ar.__getitem__ (0).__getitem__ (0) - self.assertEqual (ar2, mapped_bg_color, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (1).__getitem__ (0) - self.assertEqual (ar2, mapped_fg_color_y, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (-4).__getitem__ (1) - self.assertEqual (ar2, mapped_fg_color_x, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (-4).__getitem__ (5) - self.assertEqual (ar2, mapped_bg_color, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (-4).__getitem__ (0) - self.assertEqual (ar2, mapped_bg_color, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (-w + 1).__getitem__ (0) - self.assertEqual (ar2, mapped_fg_color_y, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (-w).__getitem__ (0) - self.assertEqual (ar2, mapped_bg_color, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (5).__getitem__ (-4) - self.assertEqual (ar2, mapped_bg_color, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (5).__getitem__ (-h + 1) - self.assertEqual (ar2, mapped_fg_color_x, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (5).__getitem__ (-h) - self.assertEqual (ar2, mapped_bg_color, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (0).__getitem__ (-h + 1) - self.assertEqual (ar2, mapped_fg_color_x, "bpp = %i" % (bpp,)) - - ar2 = ar.__getitem__ (0).__getitem__ (-h) - self.assertEqual (ar2, mapped_bg_color, "bpp = %i" % (bpp,)) - - def test_set_pixel (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((10, 20), 0, bpp) - sf.fill ((0, 0, 0)) - ar = pygame.PixelArray (sf) - - ar.__getitem__ (0).__setitem__ (0, (0, 255, 0)) - self.assertEqual (ar[0][0], sf.map_rgb ((0, 255, 0))) - - ar.__getitem__ (1).__setitem__ (1, (128, 128, 128)) - self.assertEqual (ar[1][1], sf.map_rgb ((128, 128, 128))) - - ar.__getitem__(-1).__setitem__ (-1, (128, 128, 128)) - self.assertEqual (ar[9][19], sf.map_rgb ((128, 128, 128))) - - ar.__getitem__ (-2).__setitem__ (-2, (128, 128, 128)) - self.assertEqual (ar[8][-2], sf.map_rgb ((128, 128, 128))) - - def test_set_column (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((6, 8), 0, bpp) - sf.fill ((0, 0, 0)) - ar = pygame.PixelArray (sf) - - sf2 = pygame.Surface ((6, 8), 0, bpp) - sf2.fill ((0, 255, 255)) - ar2 = pygame.PixelArray (sf2) - - # Test single value assignment - ar.__setitem__ (2, (128, 128, 128)) - self.assertEqual (ar[2][0], sf.map_rgb ((128, 128, 128))) - self.assertEqual (ar[2][1], sf.map_rgb ((128, 128, 128))) - - ar.__setitem__ (-1, (0, 255, 255)) - self.assertEqual (ar[5][0], sf.map_rgb ((0, 255, 255))) - self.assertEqual (ar[-1][1], sf.map_rgb ((0, 255, 255))) - - ar.__setitem__ (-2, (255, 255, 0)) - self.assertEqual (ar[4][0], sf.map_rgb ((255, 255, 0))) - self.assertEqual (ar[-2][1], sf.map_rgb ((255, 255, 0))) - - # Test list assignment. - ar.__setitem__ (0, [(255, 255, 255)] * 8) - self.assertEqual (ar[0][0], sf.map_rgb ((255, 255, 255))) - self.assertEqual (ar[0][1], sf.map_rgb ((255, 255, 255))) - - # Test tuple assignment. - # Changed in Pygame 1.9.2 - Raises an exception. - self.assertRaises (ValueError, ar.__setitem__, 1, - ((204, 0, 204), (17, 17, 17), (204, 0, 204), - (17, 17, 17), (204, 0, 204), (17, 17, 17), - (204, 0, 204), (17, 17, 17))) - - # Test pixel array assignment. - ar.__setitem__ (1, ar2.__getitem__ (3)) - self.assertEqual (ar[1][0], sf.map_rgb ((0, 255, 255))) - self.assertEqual (ar[1][1], sf.map_rgb ((0, 255, 255))) - - def test_get_slice (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((10, 20), 0, bpp) - sf.fill ((0, 0, 0)) - ar = pygame.PixelArray (sf) - - self.assertEqual (len (ar[0:2]), 2) - self.assertEqual (len (ar[3:7][3]), 20) - - self.assertEqual (ar[0:0], None) - self.assertEqual (ar[5:5], None) - self.assertEqual (ar[9:9], None) - - # Has to resolve to ar[7:8] - self.assertEqual (len (ar[-3:-2]), 1) # 2D - self.assertEqual (len (ar[-3:-2][0]), 20) # 1D - - # Try assignments. - - # 2D assignment. - ar[2:5] = (255, 255, 255) - - # 1D assignment - ar[3][3:7] = (10, 10, 10) - self.assertEqual (ar[3][5], sf.map_rgb ((10, 10, 10))) - self.assertEqual (ar[3][6], sf.map_rgb ((10, 10, 10))) - - @unittest.skipIf(IS_PYPY, 'skipping for PyPy (segfaults on mac pypy3 6.0.0)') - def test_contains (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((10, 20), 0, bpp) - sf.fill ((0, 0, 0)) - sf.set_at ((8, 8), (255, 255, 255)) - - ar = pygame.PixelArray (sf) - self.assertTrue ((0, 0, 0) in ar) - self.assertTrue ((255, 255, 255) in ar) - self.assertFalse ((255, 255, 0) in ar) - self.assertFalse (0x0000ff in ar) - - # Test sliced array - self.assertTrue ((0, 0, 0) in ar[8]) - self.assertTrue ((255, 255, 255) in ar[8]) - self.assertFalse ((255, 255, 0) in ar[8]) - self.assertFalse (0x0000ff in ar[8]) - - def test_get_surface (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface((10, 20), 0, bpp) - sf.fill((0, 0, 0)) - ar = pygame.PixelArray(sf) - self.assertTrue(ar.surface is sf) - - def test_get_surface__subclassed_surface(self): - """Ensure the surface attribute can handle subclassed surfaces.""" - expected_surface = SurfaceSubclass((5, 3), 0, 32) - pixelarray = pygame.PixelArray(expected_surface) - - surface = pixelarray.surface - - self.assertIs(surface, expected_surface) - self.assertIsInstance(surface, pygame.Surface) - self.assertIsInstance(surface, SurfaceSubclass) - - def test_set_slice (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((6, 8), 0, bpp) - sf.fill ((0, 0, 0)) - ar = pygame.PixelArray (sf) - - # Test single value assignment - val = sf.map_rgb ((128, 128, 128)) - ar[0:2] = val - self.assertEqual (ar[0][0], val) - self.assertEqual (ar[0][1], val) - self.assertEqual (ar[1][0], val) - self.assertEqual (ar[1][1], val) - - val = sf.map_rgb ((0, 255, 255)) - ar[-3:-1] = val - self.assertEqual (ar[3][0], val) - self.assertEqual (ar[-2][1], val) - - val = sf.map_rgb ((255, 255, 255)) - ar[-3:] = (255, 255, 255) - self.assertEqual (ar[4][0], val) - self.assertEqual (ar[-1][1], val) - - # Test array size mismatch. - # Changed in ver. 1.9.2 - # (was "Test list assignment, this is a vertical assignment.") - val = sf.map_rgb ((0, 255, 0)) - self.assertRaises (ValueError, ar.__setitem__, slice (2, 4), - [val] * 8) - - # And the horizontal assignment. - val = sf.map_rgb ((255, 0, 0)) - val2 = sf.map_rgb ((128, 0, 255)) - ar[0:2] = [val, val2] - self.assertEqual (ar[0][0], val) - self.assertEqual (ar[1][0], val2) - self.assertEqual (ar[0][1], val) - self.assertEqual (ar[1][1], val2) - self.assertEqual (ar[0][4], val) - self.assertEqual (ar[1][4], val2) - self.assertEqual (ar[0][5], val) - self.assertEqual (ar[1][5], val2) - - # Test pixelarray assignment. - ar[:] = (0, 0, 0) - sf2 = pygame.Surface ((6, 8), 0, bpp) - sf2.fill ((255, 0, 255)) - - val = sf.map_rgb ((255, 0, 255)) - ar2 = pygame.PixelArray (sf2) - - ar[:] = ar2[:] - self.assertEqual (ar[0][0], val) - self.assertEqual (ar[5][7], val) - - # Ensure p1 ... pn are freed for array[...] = [p1, ..., pn] - # Bug fix: reference counting. - if hasattr(sys, 'getrefcount'): - class Int(int): - """Unique int instances""" - pass - - sf = pygame.Surface ((5, 2), 0, 32) - ar = pygame.PixelArray (sf) - pixel_list = [Int(i) for i in range(ar.shape[0])] - refcnts_before = [sys.getrefcount (i) for i in pixel_list] - ar[...] = pixel_list - refcnts_after = [sys.getrefcount (i) for i in pixel_list] - gc.collect () - self.assertEqual (refcnts_after, refcnts_before) - - def test_subscript (self): - # By default we do not need to work with any special __***__ - # methods as map subscripts are the first looked up by the - # object system. - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((6, 8), 0, bpp) - sf.set_at ((1, 3), (0, 255, 0)) - sf.set_at ((0, 0), (0, 255, 0)) - sf.set_at ((4, 4), (0, 255, 0)) - val = sf.map_rgb ((0, 255, 0)) - - ar = pygame.PixelArray (sf) - - # Test single value requests. - self.assertEqual (ar[1,3], val) - self.assertEqual (ar[0,0], val) - self.assertEqual (ar[4,4], val) - self.assertEqual (ar[1][3], val) - self.assertEqual (ar[0][0], val) - self.assertEqual (ar[4][4], val) - - # Test ellipse working. - self.assertEqual (len (ar[...,...]), 6) - self.assertEqual (len (ar[1,...]), 8) - self.assertEqual (len (ar[...,3]), 6) - - # Test simple slicing - self.assertEqual (len (ar[:,:]), 6) - self.assertEqual (len (ar[:,]), 6) - self.assertEqual (len (ar[1,:]), 8) - self.assertEqual (len (ar[:,2]), 6) - # Empty slices - self.assertEqual (ar[4:4,], None) - self.assertEqual (ar[4:4,...], None) - self.assertEqual (ar[4:4,2:2], None) - self.assertEqual (ar[4:4,1:4], None) - self.assertEqual (ar[4:4:2,], None) - self.assertEqual (ar[4:4:-2,], None) - self.assertEqual (ar[4:4:1,...], None) - self.assertEqual (ar[4:4:-1,...], None) - self.assertEqual (ar[4:4:1,2:2], None) - self.assertEqual (ar[4:4:-1,1:4], None) - self.assertEqual (ar[...,4:4], None) - self.assertEqual (ar[1:4,4:4], None) - self.assertEqual (ar[...,4:4:1], None) - self.assertEqual (ar[...,4:4:-1], None) - self.assertEqual (ar[2:2,4:4:1], None) - self.assertEqual (ar[1:4,4:4:-1], None) - - # Test advanced slicing - ar[0] = 0 - ar[1] = 1 - ar[2] = 2 - ar[3] = 3 - ar[4] = 4 - ar[5] = 5 - - # We should receive something like [0,2,4] - self.assertEqual (ar[::2,1][0], 0) - self.assertEqual (ar[::2,1][1], 2) - self.assertEqual (ar[::2,1][2], 4) - # We should receive something like [2,2,2] - self.assertEqual (ar[2,::2][0], 2) - self.assertEqual (ar[2,::2][1], 2) - self.assertEqual (ar[2,::2][2], 2) - - # Should create a 3x3 array of [0,2,4] - ar2 = ar[::2,::2] - self.assertEqual (len (ar2), 3) - self.assertEqual (ar2[0][0], 0) - self.assertEqual (ar2[0][1], 0) - self.assertEqual (ar2[0][2], 0) - self.assertEqual (ar2[2][0], 4) - self.assertEqual (ar2[2][1], 4) - self.assertEqual (ar2[2][2], 4) - self.assertEqual (ar2[1][0], 2) - self.assertEqual (ar2[2][0], 4) - self.assertEqual (ar2[1][1], 2) - - # Should create a reversed 3x8 array over X of [1,2,3] -> [3,2,1] - ar2 = ar[3:0:-1] - self.assertEqual (len (ar2), 3) - self.assertEqual (ar2[0][0], 3) - self.assertEqual (ar2[0][1], 3) - self.assertEqual (ar2[0][2], 3) - self.assertEqual (ar2[0][7], 3) - self.assertEqual (ar2[2][0], 1) - self.assertEqual (ar2[2][1], 1) - self.assertEqual (ar2[2][2], 1) - self.assertEqual (ar2[2][7], 1) - self.assertEqual (ar2[1][0], 2) - self.assertEqual (ar2[1][1], 2) - # Should completely reverse the array over X -> [5,4,3,2,1,0] - ar2 = ar[::-1] - self.assertEqual (len (ar2), 6) - self.assertEqual (ar2[0][0], 5) - self.assertEqual (ar2[0][1], 5) - self.assertEqual (ar2[0][3], 5) - self.assertEqual (ar2[0][-1], 5) - self.assertEqual (ar2[1][0], 4) - self.assertEqual (ar2[1][1], 4) - self.assertEqual (ar2[1][3], 4) - self.assertEqual (ar2[1][-1], 4) - self.assertEqual (ar2[-1][-1], 0) - self.assertEqual (ar2[-2][-2], 1) - self.assertEqual (ar2[-3][-1], 2) - - # Test advanced slicing - ar[:] = 0 - ar2 = ar[:,1] - ar2[:] = [99] * len(ar2) - self.assertEqual (ar2[0], 99) - self.assertEqual (ar2[-1], 99) - self.assertEqual (ar2[-2], 99) - self.assertEqual (ar2[2], 99) - self.assertEqual (ar[0,1], 99) - self.assertEqual (ar[1,1], 99) - self.assertEqual (ar[2,1], 99) - self.assertEqual (ar[-1,1], 99) - self.assertEqual (ar[-2,1], 99) - - # Cases where a 2d array should have a dimension of length 1. - ar2 = ar[1:2,:] - self.assertEqual (ar2.shape, (1, ar.shape[1])) - ar2 = ar[:,1:2] - self.assertEqual (ar2.shape, (ar.shape[0], 1)) - sf2 = pygame.Surface ((1, 5), 0, 32) - ar2 = pygame.PixelArray (sf2) - self.assertEqual (ar2.shape, sf2.get_size ()) - sf2 = pygame.Surface ((7, 1), 0, 32) - ar2 = pygame.PixelArray (sf2) - self.assertEqual (ar2.shape, sf2.get_size ()) - - # Array has a single ellipsis subscript: the identity operator - ar2 = ar[...] - self.assertTrue(ar2 is ar) - - # Ensure x and y are freed for p = array[x, y] - # Bug fix: reference counting - if hasattr(sys, 'getrefcount'): - class Int(int): - """Unique int instances""" - pass - - sf = pygame.Surface ((2, 2), 0, 32) - ar = pygame.PixelArray (sf) - x, y = Int(0), Int(1) - rx_before, ry_before = sys.getrefcount (x), sys.getrefcount (y) - p = ar[x, y] - rx_after, ry_after = sys.getrefcount (x), sys.getrefcount (y) - self.assertEqual (rx_after, rx_before) - self.assertEqual (ry_after, ry_before) - - def test_ass_subscript (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((6, 8), 0, bpp) - sf.fill ((255, 255, 255)) - ar = pygame.PixelArray (sf) - - # Test ellipse working - ar[...,...] = (0, 0, 0) - self.assertEqual (ar[0,0], 0) - self.assertEqual (ar[1,0], 0) - self.assertEqual (ar[-1,-1], 0) - ar[...,] = (0, 0, 255) - self.assertEqual (ar[0,0], sf.map_rgb ((0, 0, 255))) - self.assertEqual (ar[1,0], sf.map_rgb ((0, 0, 255))) - self.assertEqual (ar[-1,-1], sf.map_rgb ((0, 0, 255))) - ar[:,...] = (255, 0, 0) - self.assertEqual (ar[0,0], sf.map_rgb ((255, 0, 0))) - self.assertEqual (ar[1,0], sf.map_rgb ((255, 0, 0))) - self.assertEqual (ar[-1,-1], sf.map_rgb ((255, 0, 0))) - ar[...] = (0, 255, 0) - self.assertEqual (ar[0,0], sf.map_rgb ((0, 255, 0))) - self.assertEqual (ar[1,0], sf.map_rgb ((0, 255, 0))) - self.assertEqual (ar[-1,-1], sf.map_rgb ((0, 255, 0))) - - # Ensure x and y are freed for array[x, y] = p - # Bug fix: reference counting - if hasattr(sys, 'getrefcount'): - class Int(int): - """Unique int instances""" - pass - - sf = pygame.Surface ((2, 2), 0, 32) - ar = pygame.PixelArray (sf) - x, y = Int(0), Int(1) - rx_before, ry_before = sys.getrefcount (x), sys.getrefcount (y) - ar[x, y] = 0 - rx_after, ry_after = sys.getrefcount (x), sys.getrefcount (y) - self.assertEqual (rx_after, rx_before) - self.assertEqual (ry_after, ry_before) - - def test_pixels_field(self): - for bpp in [1, 2, 3, 4]: - sf = pygame.Surface ((11, 7), 0, bpp * 8) - ar = pygame.PixelArray (sf) - ar2 = ar[1:,:] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - ar.itemsize) - ar2 = ar[:,1:] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - ar.strides[1]) - ar2 = ar[::-1,:] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - (ar.shape[0] - 1) * ar.itemsize) - ar2 = ar[::-2,:] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - (ar.shape[0] - 1) * ar.itemsize) - ar2 = ar[:,::-1] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - (ar.shape[1] - 1) * ar.strides[1]) - ar3 = ar2[::-1,:] - self.assertEqual (ar3._pixels_address - ar._pixels_address, - (ar.shape[0] - 1) * ar.strides[0] + - (ar.shape[1] - 1) * ar.strides[1]) - ar2 = ar[:,::-2] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - (ar.shape[1] - 1) * ar.strides[1]) - ar2 = ar[2::,3::] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - ar.strides[0] * 2 + ar.strides[1] * 3) - ar2 = ar[2::2,3::4] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - ar.strides[0] * 2 + ar.strides[1] * 3) - ar2 = ar[9:2:-1,:] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - ar.strides[0] * 9) - ar2 = ar[:,5:2:-1] - self.assertEqual (ar2._pixels_address - ar._pixels_address, - ar.strides[1] * 5) - ##? ar2 = ar[:,9:2:-1] - - def test_make_surface (self): - bg_color = pygame.Color (255, 255, 255) - fg_color = pygame.Color (128, 100, 0) - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((10, 20), 0, bpp) - bg_color_adj = sf.unmap_rgb (sf.map_rgb (bg_color)) - fg_color_adj = sf.unmap_rgb (sf.map_rgb (fg_color)) - sf.fill (bg_color_adj) - sf.fill (fg_color_adj, (2, 5, 4, 11)) - ar = pygame.PixelArray (sf) - newsf = ar[::2,::2].make_surface () - rect = newsf.get_rect () - self.assertEqual (rect.width, 5) - self.assertEqual (rect.height, 10) - for p in [(0, 2), (0, 3), (1, 2), - (2, 2), (3, 2), (3, 3), - (0, 7), (0, 8), (1, 8), - (2, 8), (3, 8), (3, 7)]: - self.assertEqual (newsf.get_at (p), bg_color_adj) - for p in [(1, 3), (2, 3), (1, 5), (2, 5), (1, 7), (2, 7)]: - self.assertEqual (newsf.get_at (p), fg_color_adj) - - # Bug when array width is not a multiple of the slice step. - w = 17 - lst = list(range(w)) - w_slice = len(lst[::2]) - h = 3 - sf = pygame.Surface ((w, h), 0, 32) - ar = pygame.PixelArray (sf) - ar2 = ar[::2,:] - sf2 = ar2.make_surface () - w2, h2 = sf2.get_size () - self.assertEqual (w2, w_slice) - self.assertEqual (h2, h) - - # Bug when array height is not a multiple of the slice step. - # This can hang the Python interpreter. - h = 17 - lst = list(range(h)) - h_slice = len(lst[::2]) - w = 3 - sf = pygame.Surface ((w, h), 0, 32) - ar = pygame.PixelArray (sf) - ar2 = ar[:,::2] - sf2 = ar2.make_surface () # Hangs here. - w2, h2 = sf2.get_size () - self.assertEqual (w2, w) - self.assertEqual (h2, h_slice) - - def test_make_surface__subclassed_surface(self): - """Ensure make_surface can handle subclassed surfaces.""" - expected_size = (3, 5) - expected_flags = 0 - expected_depth = 32 - original_surface = SurfaceSubclass(expected_size, expected_flags, - expected_depth) - pixelarray = pygame.PixelArray(original_surface) - - surface = pixelarray.make_surface() - - self.assertIsNot(surface, original_surface) - self.assertIsInstance(surface, pygame.Surface) - self.assertNotIsInstance(surface, SurfaceSubclass) - self.assertEqual(surface.get_size(), expected_size) - self.assertEqual(surface.get_flags(), expected_flags) - self.assertEqual(surface.get_bitsize(), expected_depth) - - def test_iter (self): - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((5, 10), 0, bpp) - ar = pygame.PixelArray (sf) - iterations = 0 - for col in ar: - self.assertEqual (len (col), 10) - iterations += 1 - self.assertEqual (iterations, 5) - - def test_replace (self): - #print "replace start" - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((10, 10), 0, bpp) - sf.fill ((255, 0, 0)) - rval = sf.map_rgb ((0, 0, 255)) - oval = sf.map_rgb ((255, 0, 0)) - ar = pygame.PixelArray (sf) - ar[::2].replace ((255, 0, 0), (0, 0, 255)) - self.assertEqual (ar[0][0], rval) - self.assertEqual (ar[1][0], oval) - self.assertEqual (ar[2][3], rval) - self.assertEqual (ar[3][6], oval) - self.assertEqual (ar[8][9], rval) - self.assertEqual (ar[9][9], oval) - - ar[::2].replace ((0, 0, 255), (255, 0, 0), weights=(10, 20, 50)) - self.assertEqual (ar[0][0], oval) - self.assertEqual (ar[2][3], oval) - self.assertEqual (ar[3][6], oval) - self.assertEqual (ar[8][9], oval) - self.assertEqual (ar[9][9], oval) - #print "replace end" - - def test_extract (self): - #print "extract start" - for bpp in (8, 16, 24, 32): - sf = pygame.Surface ((10, 10), 0, bpp) - sf.fill ((0, 0, 255)) - sf.fill ((255, 0, 0), (2, 2, 6, 6)) - - white = sf.map_rgb ((255, 255, 255)) - black = sf.map_rgb ((0, 0, 0)) - - ar = pygame.PixelArray (sf) - newar = ar.extract ((255, 0, 0)) - - self.assertEqual (newar[0][0], black) - self.assertEqual (newar[1][0], black) - self.assertEqual (newar[2][3], white) - self.assertEqual (newar[3][6], white) - self.assertEqual (newar[8][9], black) - self.assertEqual (newar[9][9], black) - - newar = ar.extract ((255, 0, 0), weights=(10, 0.1, 50)) - self.assertEqual (newar[0][0], black) - self.assertEqual (newar[1][0], black) - self.assertEqual (newar[2][3], white) - self.assertEqual (newar[3][6], white) - self.assertEqual (newar[8][9], black) - self.assertEqual (newar[9][9], black) - #print "extract end" - - def test_2dslice_assignment (self): - w = 2 * 5 * 8 - h = 3 * 5 * 9 - sf = pygame.Surface ((w, h), 0, 32) - ar = pygame.PixelArray (sf) - size = (w, h) - strides = (1, w) - offset = 0 - self._test_assignment (sf, ar, size, strides, offset) - xslice = slice (None, None, 2) - yslice = slice (None, None, 3) - ar, size, strides, offset = self._array_slice ( - ar, size, (xslice, yslice), strides, offset) - self._test_assignment (sf, ar, size, strides, offset) - xslice = slice (5, None, 5) - yslice = slice (5, None, 5) - ar, size, strides, offset = self._array_slice ( - ar, size, (xslice, yslice), strides, offset) - self._test_assignment (sf, ar, size, strides, offset) - - def _test_assignment (self, sf, ar, ar_size, ar_strides, ar_offset): - self.assertEqual (ar.shape, ar_size) - ar_w, ar_h = ar_size - ar_xstride, ar_ystride = ar_strides - sf_w, sf_h = sf.get_size () - black = pygame.Color ('black') - color = pygame.Color (0, 0, 12) - pxcolor = sf.map_rgb (color) - sf.fill (black) - for ar_x, ar_y in [(0, 0), - (0, ar_h - 4), - (ar_w - 3, 0), - (0, ar_h - 1), - (ar_w - 1, 0), - (ar_w - 1, ar_h - 1)]: - sf_offset = ar_offset + ar_x * ar_xstride + ar_y * ar_ystride - sf_y = sf_offset // sf_w - sf_x = sf_offset - sf_y * sf_w - sf_posn = (sf_x, sf_y) - sf_pix = sf.get_at (sf_posn) - self.assertEqual (sf_pix, black, - "at pixarr posn (%i, %i) (surf posn (%i, %i)): " - "%s != %s" % - (ar_x, ar_y, sf_x, sf_y, sf_pix, black)) - ar[ar_x, ar_y] = pxcolor - sf_pix = sf.get_at (sf_posn) - self.assertEqual (sf_pix, color, - "at pixarr posn (%i, %i) (surf posn (%i, %i)): " - "%s != %s" % - (ar_x, ar_y, sf_x, sf_y, sf_pix, color)) - - def _array_slice (self, ar, size, slices, strides, offset): - ar = ar[slices] - xslice, yslice = slices - w, h = size - xstart, xstop, xstep = xslice.indices(w) - ystart, ystop, ystep = yslice.indices(h) - w = (xstop - xstart + xstep - 1) // xstep - h = (ystop - ystart + ystep - 1) // ystep - xstride, ystride = strides - offset += xstart * xstride + ystart * ystride - xstride *= xstep - ystride *= ystep - return ar, (w, h), (xstride, ystride), offset - - def test_array_properties(self): - # itemsize, ndim, shape, and strides. - for bpp in [1, 2, 3, 4]: - sf = pygame.Surface ((2, 2), 0, bpp * 8) - ar = pygame.PixelArray (sf) - self.assertEqual (ar.itemsize, bpp) - - for shape in [(4, 16), (5, 13)]: - w, h = shape - sf = pygame.Surface (shape, 0, 32) - bpp = sf.get_bytesize () - pitch = sf.get_pitch () - ar = pygame.PixelArray (sf) - self.assertEqual (ar.ndim, 2) - self.assertEqual (ar.shape, shape) - self.assertEqual (ar.strides, (bpp, pitch)) - ar2 = ar[::2,:] - w2 = len(([0] * w)[::2]) - self.assertEqual (ar2.ndim, 2) - self.assertEqual (ar2.shape, (w2, h)) - self.assertEqual (ar2.strides, (2 * bpp, pitch)) - ar2 = ar[:,::2] - h2 = len(([0] * h)[::2]) - self.assertEqual (ar2.ndim, 2) - self.assertEqual (ar2.shape, (w, h2)) - self.assertEqual (ar2.strides, (bpp, 2 * pitch)) - ar2 = ar[1] - self.assertEqual (ar2.ndim, 1) - self.assertEqual (ar2.shape, (h,)) - self.assertEqual (ar2.strides, (pitch,)) - ar2 = ar[:,1] - self.assertEqual (ar2.ndim, 1) - self.assertEqual (ar2.shape, (w,)) - self.assertEqual (ar2.strides, (bpp,)) - - def test_self_assign(self): - # This differs from NumPy arrays. - w = 10 - max_x = w - 1 - h = 20 - max_y = h - 1 - for bpp in [1, 2, 3, 4]: - sf = pygame.Surface ((w, h), 0, bpp * 8) - ar = pygame.PixelArray (sf) - for i in range (w * h): - ar[i % w, i // w] = i - ar[:,:] = ar[::-1,:] - for i in range (w * h): - self.assertEqual (ar[max_x - i % w, i // w], i) - ar = pygame.PixelArray (sf) - for i in range (w * h): - ar[i % w, i // w] = i - ar[:,:] = ar[:,::-1] - for i in range (w * h): - self.assertEqual (ar[i % w, max_y - i // w ], i) - ar = pygame.PixelArray (sf) - for i in range(w * h): - ar[i % w, i // w] = i - ar[:,:] = ar[::-1,::-1] - for i in range (w * h): - self.assertEqual (ar[max_x - i % w, max_y - i // w], i) - - def test_color_value (self): - # Confirm that a PixelArray slice assignment distinguishes between - # pygame.Color and tuple objects as single (r, g, b[, a]) colors - # and other sequences as sequences of colors to be treated as - # slices. - sf = pygame.Surface ((5, 5), 0, 32) - ar = pygame.PixelArray (sf) - index = slice(None, None, 1) - ar.__setitem__ (index, (1, 2, 3)) - self.assertEqual (ar[0, 0], sf.map_rgb ((1, 2, 3))) - ar.__setitem__ (index, pygame.Color (10, 11, 12)) - self.assertEqual (ar[0, 0], sf.map_rgb ((10, 11, 12))) - self.assertRaises (ValueError, ar.__setitem__, index, (1, 2, 3, 4, 5)) - self.assertRaises (ValueError, ar.__setitem__, (index, index), - (1, 2, 3, 4, 5)) - self.assertRaises (ValueError, ar.__setitem__, index, [1, 2, 3]) - self.assertRaises (ValueError, ar.__setitem__, (index, index), - [1, 2, 3]) - sf = pygame.Surface ((3, 3), 0, 32) - ar = pygame.PixelArray (sf) - ar[:] = (20, 30, 40) - self.assertEqual (ar[0, 0], sf.map_rgb ((20, 30, 40))) - ar[:] = [20, 30, 40] - self.assertEqual (ar[0, 0], 20) - self.assertEqual (ar[1, 0], 30) - self.assertEqual (ar[2, 0], 40) - - def test_transpose (self): - # PixelArray.transpose(): swap axis on a 2D array, add a length - # 1 x axis to a 1D array. - sf = pygame.Surface ((3, 7), 0, 32) - ar = pygame.PixelArray (sf) - w, h = ar.shape - dx, dy = ar.strides - for i in range (w * h): - x = i % w - y = i // w - ar[x, y] = i - ar_t = ar.transpose() - self.assertEqual (ar_t.shape, (h, w)) - self.assertEqual (ar_t.strides, (dy, dx)) - for i in range (w * h): - x = i % w - y = i // w - self.assertEqual (ar_t[y, x], ar[x, y]) - ar1D = ar[0] - ar2D = ar1D.transpose() - self.assertEqual (ar2D.shape, (1, h)) - for y in range (h): - self.assertEqual (ar1D[y], ar2D[0, y]) - ar1D = ar[:,0] - ar2D = ar1D.transpose() - self.assertEqual (ar2D.shape, (1, w)) - for x in range (2): - self.assertEqual (ar1D[x], ar2D[0, x]) - - def test_length_1_dimension_broadcast (self): - w = 5 - sf = pygame.Surface ((w, w), 0, 32) - ar = pygame.PixelArray (sf) - # y-axis broadcast. - sf_x = pygame.Surface ((w, 1), 0, 32) - ar_x = pygame.PixelArray (sf_x) - for i in range (w): - ar_x[i, 0] = (w + 1) * 10 - ar[...] = ar_x - for y in range (w): - for x in range (w): - self.assertEqual (ar[x, y], ar_x[x, 0]) - # x-axis broadcast. - ar[...] = 0 - sf_y = pygame.Surface ((1, w), 0, 32) - ar_y = pygame.PixelArray (sf_y) - for i in range (w): - ar_y[0, i] = (w + 1) * 10 - ar[...] = ar_y - for x in range (w): - for y in range (w): - self.assertEqual (ar[x, y], ar_y[0, y]) - # (1, 1) array broadcast. - ar[...] = 0 - sf_1px = pygame.Surface ((1, 1), 0, 32) - ar_1px = pygame.PixelArray (sf_1px) - ar_1px[0, 0] = 42 # Well it had to show up somewhere. - ar[...] = ar_1px - for y in range (w): - for x in range (w): - self.assertEqual (ar[x, y], 42) - - def test_assign_size_mismatch (self): - sf = pygame.Surface ((7, 11), 0, 32) - ar = pygame.PixelArray (sf) - self.assertRaises (ValueError, ar.__setitem__, Ellipsis, ar[:, 0:2]) - self.assertRaises (ValueError, ar.__setitem__, Ellipsis, ar[0:2, :]) - - def test_repr (self): - # Python 3.x bug: the tp_repr slot function returned NULL instead - # of a Unicode string, triggering an exception. - sf = pygame.Surface ((3, 1), pygame.SRCALPHA, 16) - ar = pygame.PixelArray(sf) - ar[...] = 42 - pixel = sf.get_at_mapped ((0, 0)) - self.assertEqual(repr (ar), - type (ar).__name__ + "([\n [42, 42, 42]]\n)") - - -class PixelArrayArrayInterfaceTest(unittest.TestCase, TestMixin): - - @unittest.skipIf(IS_PYPY, 'skipping for PyPy (why?)') - def test_basic (self): - # Check unchanging fields. - sf = pygame.Surface ((2, 2), 0, 32) - ar = pygame.PixelArray (sf) - - ai = arrinter.ArrayInterface (ar) - self.assertEqual (ai.two, 2) - self.assertEqual (ai.typekind, 'u') - self.assertEqual (ai.nd, 2) - self.assertEqual (ai.data, ar._pixels_address) - - @unittest.skipIf(IS_PYPY, 'skipping for PyPy (why?)') - def test_shape(self): - - for shape in [[4, 16], [5, 13]]: - w, h = shape - sf = pygame.Surface (shape, 0, 32) - ar = pygame.PixelArray (sf) - ai = arrinter.ArrayInterface (ar) - ai_shape = [ai.shape[i] for i in range(ai.nd)] - self.assertEqual (ai_shape, shape) - ar2 = ar[::2,:] - ai2 = arrinter.ArrayInterface (ar2) - w2 = len(([0] * w)[::2]) - ai_shape = [ai2.shape[i] for i in range(ai2.nd)] - self.assertEqual (ai_shape, [w2, h]) - ar2 = ar[:,::2] - ai2 = arrinter.ArrayInterface (ar2) - h2 = len(([0] * h)[::2]) - ai_shape = [ai2.shape[i] for i in range(ai2.nd)] - self.assertEqual (ai_shape, [w, h2]) - - @unittest.skipIf(IS_PYPY, 'skipping for PyPy (why?)') - def test_itemsize (self): - for bytes_per_pixel in range(1, 5): - bits_per_pixel = 8 * bytes_per_pixel - sf = pygame.Surface ((2, 2), 0, bits_per_pixel) - ar = pygame.PixelArray (sf) - ai = arrinter.ArrayInterface (ar) - self.assertEqual (ai.itemsize, bytes_per_pixel) - - @unittest.skipIf(IS_PYPY, 'skipping for PyPy (why?)') - def test_flags (self): - aim = arrinter - common_flags = (aim.PAI_NOTSWAPPED | aim.PAI_WRITEABLE | - aim.PAI_ALIGNED) - s = pygame.Surface ((10, 2), 0, 32) - ar = pygame.PixelArray (s) - ai = aim.ArrayInterface (ar) - self.assertEqual (ai.flags, common_flags | aim.PAI_FORTRAN) - - ar2 = ar[::2,:] - ai = aim.ArrayInterface (ar2) - self.assertEqual (ai.flags, common_flags) - - s = pygame.Surface ((8, 2), 0, 24) - ar = pygame.PixelArray (s) - ai = aim.ArrayInterface (ar) - self.assertEqual (ai.flags, common_flags | aim.PAI_FORTRAN) - - s = pygame.Surface ((7, 2), 0, 24) - ar = pygame.PixelArray (s) - ai = aim.ArrayInterface (ar) - self.assertEqual (ai.flags, common_flags) - - def test_slicing (self): - # This will implicitly test data and strides fields. - # - # Need an 8 bit test surfaces because pixelcopy.make_surface - # returns an 8 bit surface for a 2d array. - - factors = [7, 3, 11] - - w = reduce (operator.mul, factors, 1) - h = 13 - sf = pygame.Surface ((w, h), 0, 8) - color = sf.map_rgb ((1, 17, 128)) - ar = pygame.PixelArray (sf) - for f in factors[:-1]: - w = w // f - sf.fill ((0, 0, 0)) - ar = ar[f:f + w,:] - ar[0][0] = color - ar[-1][-2] = color - ar[0][-3] = color - sf2 = ar.make_surface () - sf3 = pygame.pixelcopy.make_surface (ar) - self.assert_surfaces_equal (sf3, sf2) - - h = reduce (operator.mul, factors, 1) - w = 13 - sf = pygame.Surface ((w, h), 0, 8) - color = sf.map_rgb ((1, 17, 128)) - ar = pygame.PixelArray (sf) - for f in factors[:-1]: - h = h // f - sf.fill ((0, 0, 0)) - ar = ar[:,f:f + h] - ar[0][0] = color - ar[-1][-2] = color - ar[0][-3] = color - sf2 = ar.make_surface () - sf3 = pygame.pixelcopy.make_surface (ar) - self.assert_surfaces_equal (sf3, sf2) - - w = 20 - h = 10 - sf = pygame.Surface ((w, h), 0, 8) - color = sf.map_rgb ((1, 17, 128)) - ar = pygame.PixelArray (sf) - for slices in [(slice (w), slice (h)), - (slice (0, w, 2), slice (h)), - (slice (0, w, 3), slice (h)), - (slice (w), slice (0, h, 2)), - (slice (w), slice (0, h, 3)), - (slice (0, w, 2), slice (0, h, 2)), - (slice (0, w, 3), slice (0, h, 3)), - ]: - sf.fill ((0, 0, 0)) - ar2 = ar[slices] - ar2[0][0] = color - ar2[-1][-2] = color - ar2[0][-3] = color - sf2 = ar2.make_surface () - sf3 = pygame.pixelcopy.make_surface (ar2) - self.assert_surfaces_equal (sf3, sf2) - - -@unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') -class PixelArrayNewBufferTest(unittest.TestCase, TestMixin): - - if pygame.HAVE_NEWBUF: - from pygame.tests.test_utils import buftools - - bitsize_to_format = {8: 'B', 16: '=H', 24: '3x', 32: '=I'} - - def test_newbuf_2D (self): - buftools = self.buftools - Importer = buftools.Importer - - for bit_size in [8, 16, 24, 32]: - s = pygame.Surface ((10, 2), 0, bit_size) - ar = pygame.PixelArray (s) - format = self.bitsize_to_format[bit_size] - itemsize = ar.itemsize - shape = ar.shape - w, h = shape - strides = ar.strides - length = w * h * itemsize - imp = Importer (ar, buftools.PyBUF_FULL) - self.assertTrue (imp.obj, ar) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 2) - self.assertEqual (imp.itemsize, itemsize) - self.assertEqual (imp.format, format) - self.assertFalse (imp.readonly) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - self.assertTrue (imp.suboffsets is None) - self.assertEqual (imp.buf, s._pixels_address) - - s = pygame.Surface ((8, 16), 0, 32) - ar = pygame.PixelArray (s) - format = self.bitsize_to_format[s.get_bitsize ()] - itemsize = ar.itemsize - shape = ar.shape - w, h = shape - strides = ar.strides - length = w * h * itemsize - imp = Importer (ar, buftools.PyBUF_SIMPLE) - self.assertTrue (imp.obj, ar) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 0) - self.assertEqual (imp.itemsize, itemsize) - self.assertTrue (imp.format is None) - self.assertFalse (imp.readonly) - self.assertTrue (imp.shape is None) - self.assertTrue (imp.strides is None) - self.assertTrue (imp.suboffsets is None) - self.assertEqual (imp.buf, s._pixels_address) - imp = Importer (ar, buftools.PyBUF_FORMAT) - self.assertEqual (imp.ndim, 0) - self.assertEqual (imp.format, format) - imp = Importer (ar, buftools.PyBUF_WRITABLE) - self.assertEqual (imp.ndim, 0) - self.assertTrue (imp.format is None) - imp = Importer (ar, buftools.PyBUF_F_CONTIGUOUS) - self.assertEqual (imp.ndim, 2) - self.assertTrue (imp.format is None) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - imp = Importer (ar, buftools.PyBUF_ANY_CONTIGUOUS) - self.assertEqual (imp.ndim, 2) - self.assertTrue (imp.format is None) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar, buftools.PyBUF_ND) - - ar_sliced = ar[:,::2] - format = self.bitsize_to_format[s.get_bitsize ()] - itemsize = ar_sliced.itemsize - shape = ar_sliced.shape - w, h = shape - strides = ar_sliced.strides - length = w * h * itemsize - imp = Importer (ar_sliced, buftools.PyBUF_STRIDED) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 2) - self.assertEqual (imp.itemsize, itemsize) - self.assertTrue (imp.format is None) - self.assertFalse (imp.readonly) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - self.assertEqual (imp.buf, s._pixels_address) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_SIMPLE) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_ND) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_ANY_CONTIGUOUS) - - ar_sliced = ar[::2,:] - format = self.bitsize_to_format[s.get_bitsize ()] - itemsize = ar_sliced.itemsize - shape = ar_sliced.shape - w, h = shape - strides = ar_sliced.strides - length = w * h * itemsize - imp = Importer (ar_sliced, buftools.PyBUF_STRIDED) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 2) - self.assertEqual (imp.itemsize, itemsize) - self.assertTrue (imp.format is None) - self.assertFalse (imp.readonly) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - self.assertEqual (imp.buf, s._pixels_address) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_SIMPLE) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_ND) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar_sliced, - buftools.PyBUF_ANY_CONTIGUOUS) - - s2 = s.subsurface ((2, 3, 5, 7)) - ar = pygame.PixelArray (s2) - format = self.bitsize_to_format[s.get_bitsize ()] - itemsize = ar.itemsize - shape = ar.shape - w, h = shape - strides = ar.strides - length = w * h * itemsize - imp = Importer (ar, buftools.PyBUF_STRIDES) - self.assertTrue (imp.obj, ar) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 2) - self.assertEqual (imp.itemsize, itemsize) - self.assertTrue (imp.format is None) - self.assertFalse (imp.readonly) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - self.assertTrue (imp.suboffsets is None) - self.assertEqual (imp.buf, s2._pixels_address) - self.assertRaises (BufferError, Importer, ar, buftools.PyBUF_SIMPLE) - self.assertRaises (BufferError, Importer, ar, buftools.PyBUF_FORMAT) - self.assertRaises (BufferError, Importer, ar, buftools.PyBUF_WRITABLE) - self.assertRaises (BufferError, Importer, ar, buftools.PyBUF_ND) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_ANY_CONTIGUOUS) - - def test_newbuf_1D(self): - buftools = self.buftools - Importer = buftools.Importer - - s = pygame.Surface ((2, 16), 0, 32) - ar_2D = pygame.PixelArray (s) - x = 0 - ar = ar_2D[x] - format = self.bitsize_to_format[s.get_bitsize ()] - itemsize = ar.itemsize - shape = ar.shape - h = shape[0] - strides = ar.strides - length = h * itemsize - buf = s._pixels_address + x * itemsize - imp = Importer (ar, buftools.PyBUF_STRIDES) - self.assertTrue (imp.obj, ar) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 1) - self.assertEqual (imp.itemsize, itemsize) - self.assertTrue (imp.format is None) - self.assertFalse (imp.readonly) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - self.assertTrue (imp.suboffsets is None) - self.assertEqual (imp.buf, buf) - imp = Importer (ar, buftools.PyBUF_FULL) - self.assertEqual (imp.ndim, 1) - self.assertEqual (imp.format, format) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_SIMPLE) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_FORMAT) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_WRITABLE) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_ND) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises (BufferError, Importer, ar, - buftools.PyBUF_ANY_CONTIGUOUS) - y = 10 - ar = ar_2D[:,y] - shape = ar.shape - w = shape[0] - strides = ar.strides - length = w * itemsize - buf = s._pixels_address + y * s.get_pitch() - imp = Importer (ar, buftools.PyBUF_FULL) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 1) - self.assertEqual (imp.itemsize, itemsize) - self.assertEqual (imp.format, format) - self.assertFalse (imp.readonly) - self.assertEqual (imp.shape, shape) - self.assertEqual (imp.strides, strides) - self.assertEqual (imp.buf, buf) - self.assertTrue (imp.suboffsets is None) - imp = Importer (ar, buftools.PyBUF_SIMPLE) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 0) - self.assertEqual (imp.itemsize, itemsize) - self.assertTrue (imp.format is None) - self.assertFalse (imp.readonly) - self.assertTrue (imp.shape is None) - self.assertTrue (imp.strides is None) - imp = Importer (ar, buftools.PyBUF_ND) - self.assertEqual (imp.len, length) - self.assertEqual (imp.ndim, 1) - self.assertEqual (imp.itemsize, itemsize) - self.assertTrue (imp.format is None) - self.assertFalse (imp.readonly) - self.assertEqual (imp.shape, shape) - self.assertTrue (imp.strides is None) - imp = Importer (ar, buftools.PyBUF_C_CONTIGUOUS) - self.assertEqual (imp.ndim, 1) - imp = Importer (ar, buftools.PyBUF_F_CONTIGUOUS) - self.assertEqual (imp.ndim, 1) - imp = Importer (ar, buftools.PyBUF_ANY_CONTIGUOUS) - self.assertEqual (imp.ndim, 1) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/pixelcopy_test.py b/venv/lib/python3.7/site-packages/pygame/tests/pixelcopy_test.py deleted file mode 100644 index 8cf89eb..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/pixelcopy_test.py +++ /dev/null @@ -1,653 +0,0 @@ -import ctypes -import platform -import unittest - -try: - from pygame.tests.test_utils import arrinter -except NameError: - pass -import pygame -from pygame.locals import * -from pygame.pixelcopy import ( - surface_to_array, map_array, array_to_surface, - make_surface -) - -IS_PYPY = 'PyPy' == platform.python_implementation() - - -def unsigned32(i): - """cast signed 32 bit integer to an unsigned integer""" - return i & 0xFFFFFFFF - -class PixelcopyModuleTest (unittest.TestCase): - - bitsizes = [8, 16, 32] - - test_palette = [(0, 0, 0, 255), - (10, 30, 60, 255), - (25, 75, 100, 255), - (100, 150, 200, 255), - (0, 100, 200, 255)] - - surf_size = (10, 12) - test_points = [((0, 0), 1), ((4, 5), 1), ((9, 0), 2), - ((5, 5), 2), ((0, 11), 3), ((4, 6), 3), - ((9, 11), 4), ((5, 6), 4)] - - def __init__(self, *args, **kwds): - pygame.display.init() - try: - unittest.TestCase.__init__(self, *args, **kwds) - self.sources = [self._make_src_surface(8), - self._make_src_surface(16), - self._make_src_surface(16, srcalpha=True), - self._make_src_surface(24), - self._make_src_surface(32), - self._make_src_surface(32, srcalpha=True)] - finally: - pygame.display.quit() - - def _make_surface(self, bitsize, srcalpha=False, palette=None): - if palette is None: - palette = self.test_palette - flags = 0 - if srcalpha: - flags |= SRCALPHA - surf = pygame.Surface(self.surf_size, flags, bitsize) - if bitsize == 8: - surf.set_palette([c[:3] for c in palette]) - return surf - - def _fill_surface(self, surf, palette=None): - if palette is None: - palette = self.test_palette - surf.fill(palette[1], (0, 0, 5, 6)) - surf.fill(palette[2], (5, 0, 5, 6)) - surf.fill(palette[3], (0, 6, 5, 6)) - surf.fill(palette[4], (5, 6, 5, 6)) - - def _make_src_surface(self, bitsize, srcalpha=False, palette=None): - surf = self._make_surface(bitsize, srcalpha, palette) - self._fill_surface(surf, palette) - return surf - - def setUp(self): - pygame.display.init() - - def tearDown(self): - pygame.display.quit() - - def test_surface_to_array_2d(self): - alpha_color = (0, 0, 0, 128) - - for surf in self.sources: - src_bitsize = surf.get_bitsize() - for dst_bitsize in self.bitsizes: - # dst in a surface standing in for a 2 dimensional array - # of unsigned integers. The byte order is system dependent. - dst = pygame.Surface(surf.get_size(), 0, dst_bitsize) - dst.fill((0, 0, 0, 0)) - view = dst.get_view('2') - self.assertFalse(surf.get_locked()) - if dst_bitsize < src_bitsize: - self.assertRaises(ValueError, surface_to_array, view, surf) - self.assertFalse(surf.get_locked()) - continue - surface_to_array(view, surf) - self.assertFalse(surf.get_locked()) - for posn, i in self.test_points: - sp = surf.get_at_mapped(posn) - dp = dst.get_at_mapped(posn) - self.assertEqual(dp, sp, - "%s != %s: flags: %i" - ", bpp: %i, posn: %s" % - (dp, sp, - surf.get_flags(), surf.get_bitsize(), - posn)) - del view - - if surf.get_masks()[3]: - dst.fill((0, 0, 0, 0)) - view = dst.get_view('2') - posn = (2, 1) - surf.set_at(posn, alpha_color) - self.assertFalse(surf.get_locked()) - surface_to_array(view, surf) - self.assertFalse(surf.get_locked()) - sp = surf.get_at_mapped(posn) - dp = dst.get_at_mapped(posn) - self.assertEqual(dp, sp, - "%s != %s: bpp: %i" % - (dp, sp, surf.get_bitsize())) - - if IS_PYPY: - return - # Swapped endian destination array - pai_flags = arrinter.PAI_ALIGNED | arrinter.PAI_WRITEABLE - for surf in self.sources: - for itemsize in [1, 2, 4, 8]: - if itemsize < surf.get_bytesize(): - continue - a = arrinter.Array(surf.get_size(), 'u', itemsize, - flags=pai_flags) - surface_to_array(a, surf) - for posn, i in self.test_points: - sp = unsigned32(surf.get_at_mapped(posn)) - dp = a[posn] - self.assertEqual(dp, sp, - "%s != %s: itemsize: %i, flags: %i" - ", bpp: %i, posn: %s" % - (dp, sp, itemsize, - surf.get_flags(), surf.get_bitsize(), - posn)) - - def test_surface_to_array_3d(self): - self.iter_surface_to_array_3d((0xff, 0xff00, 0xff0000, 0)) - self.iter_surface_to_array_3d((0xff0000, 0xff00, 0xff, 0)) - - def iter_surface_to_array_3d(self, rgba_masks): - dst = pygame.Surface(self.surf_size, 0, 24, masks=rgba_masks) - - for surf in self.sources: - dst.fill((0, 0, 0, 0)) - src_bitsize = surf.get_bitsize() - view = dst.get_view('3') - self.assertFalse(surf.get_locked()) - surface_to_array(view, surf) - self.assertFalse(surf.get_locked()) - for posn, i in self.test_points: - sc = surf.get_at(posn)[0:3] - dc = dst.get_at(posn)[0:3] - self.assertEqual(dc, sc, - "%s != %s: flags: %i" - ", bpp: %i, posn: %s" % - (dc, sc, - surf.get_flags(), surf.get_bitsize(), - posn)) - view = None - - def test_map_array(self): - targets = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True), - ] - source = pygame.Surface(self.surf_size, 0, 24, - masks=[0xff, 0xff00, 0xff0000, 0]) - self._fill_surface(source) - source_view = source.get_view('3') # (w, h, 3) - for t in targets: - map_array(t.get_view('2'), source_view, t) - for posn, i in self.test_points: - sc = t.map_rgb(source.get_at(posn)) - dc = t.get_at_mapped(posn) - self.assertEqual(dc, sc, - "%s != %s: flags: %i" - ", bpp: %i, posn: %s" % - (dc, sc, - t.get_flags(), t.get_bitsize(), - posn)) - - color = pygame.Color("salmon") - color.set_length(3) - for t in targets: - map_array(t.get_view('2'), color, t) - sc = t.map_rgb(color) - for posn, i in self.test_points: - dc = t.get_at_mapped(posn) - self.assertEqual(dc, sc, - "%s != %s: flags: %i" - ", bpp: %i, posn: %s" % - (dc, sc, - t.get_flags(), t.get_bitsize(), - posn)) - - # mismatched shapes - w, h = source.get_size() - target = pygame.Surface((w, h + 1), 0, 32) - self.assertRaises(ValueError, map_array, target, source, target) - target = pygame.Surface((w - 1, h), 0, 32) - self.assertRaises(ValueError, map_array, target, source, target) - - def test_array_to_surface_broadcasting(self): - # target surfaces - targets = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True), - ] - - w, h = self.surf_size - - # broadcast column - column = pygame.Surface((1, h), 0, 32) - for target in targets: - source = pygame.Surface((1, h), 0, target) - for y in range(h): - source.set_at((0, y), - pygame.Color(y + 1, y + h + 1, y + 2 * h + 1)) - pygame.pixelcopy.surface_to_array(column.get_view('2'), source) - pygame.pixelcopy.array_to_surface(target, column.get_view('2')) - for x in range(w): - for y in range(h): - self.assertEqual(target.get_at_mapped((x, y)), - column.get_at_mapped((0, y))) - - # broadcast row - row = pygame.Surface((w, 1), 0, 32) - for target in targets: - source = pygame.Surface((w, 1), 0, target) - for x in range(w): - source.set_at((x, 0), - pygame.Color(x + 1, x + w + 1, x + 2 * w + 1)) - pygame.pixelcopy.surface_to_array(row.get_view('2'), source) - pygame.pixelcopy.array_to_surface(target, row.get_view('2')) - for x in range(w): - for y in range(h): - self.assertEqual(target.get_at_mapped((x, y)), - row.get_at_mapped((x, 0))) - - # broadcast pixel - pixel = pygame.Surface((1, 1), 0, 32) - for target in targets: - source = pygame.Surface((1, 1), 0, target) - source.set_at((0, 0), pygame.Color(13, 47, 101)) - pygame.pixelcopy.surface_to_array(pixel.get_view('2'), source) - pygame.pixelcopy.array_to_surface(target, pixel.get_view('2')) - p = pixel.get_at_mapped((0, 0)) - for x in range(w): - for y in range(h): - self.assertEqual(target.get_at_mapped((x, y)), p) - - -class PixelCopyTestWithArray(unittest.TestCase): - try: - import numpy - except ImportError: - __tags__ = ['ignore', 'subprocess_ignore'] - else: - pygame.surfarray.use_arraytype('numpy') - - bitsizes = [8, 16, 32] - - test_palette = [(0, 0, 0, 255), - (10, 30, 60, 255), - (25, 75, 100, 255), - (100, 150, 200, 255), - (0, 100, 200, 255)] - - surf_size = (10, 12) - test_points = [((0, 0), 1), ((4, 5), 1), ((9, 0), 2), - ((5, 5), 2), ((0, 11), 3), ((4, 6), 3), - ((9, 11), 4), ((5, 6), 4)] - - pixels2d = set([8, 16, 32]) - pixels3d = set([24, 32]) - array2d = set([8, 16, 24, 32]) - array3d = set([24, 32]) - - def __init__(self, *args, **kwds): - import numpy - - self.dst_types = [numpy.uint8, numpy.uint16, numpy.uint32] - try: - self.dst_types.append(numpy.uint64) - except AttributeError: - pass - pygame.display.init() - try: - unittest.TestCase.__init__(self, *args, **kwds) - self.sources = [self._make_src_surface(8), - self._make_src_surface(16), - self._make_src_surface(16, srcalpha=True), - self._make_src_surface(24), - self._make_src_surface(32), - self._make_src_surface(32, srcalpha=True)] - finally: - pygame.display.quit() - - def _make_surface(self, bitsize, srcalpha=False, palette=None): - if palette is None: - palette = self.test_palette - flags = 0 - if srcalpha: - flags |= SRCALPHA - surf = pygame.Surface(self.surf_size, flags, bitsize) - if bitsize == 8: - surf.set_palette([c[:3] for c in palette]) - return surf - - def _fill_surface(self, surf, palette=None): - if palette is None: - palette = self.test_palette - surf.fill(palette[1], (0, 0, 5, 6)) - surf.fill(palette[2], (5, 0, 5, 6)) - surf.fill(palette[3], (0, 6, 5, 6)) - surf.fill(palette[4], (5, 6, 5, 6)) - - def _make_src_surface(self, bitsize, srcalpha=False, palette=None): - surf = self._make_surface(bitsize, srcalpha, palette) - self._fill_surface(surf, palette) - return surf - - def setUp(self): - pygame.display.init() - - def tearDown(self): - pygame.display.quit() - - def test_surface_to_array_2d(self): - try: - from numpy import empty, dtype - except ImportError: - return - - palette = self.test_palette - alpha_color = (0, 0, 0, 128) - - dst_dims = self.surf_size - destinations = [empty(dst_dims, t) for t in self.dst_types] - if (pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN): - swapped_dst = empty(dst_dims, dtype('>u4')) - else: - swapped_dst = empty(dst_dims, dtype('u4')) - else: - swapped_dst = empty(dst_dims, dtype('i', - '!i', '1i', '=1i', '@q', 'q', '4x', '8x']: - surface.fill((255, 254, 253)) - exp = Exporter(shape, format=format) - exp._buf[:] = [42] * exp.buflen - array_to_surface(surface, exp) - for x in range(w): - for y in range(h): - self.assertEqual(surface.get_at((x, y)), (42, 42, 42, 255)) - # Some unsupported formats for array_to_surface and a 32 bit surface - for format in ['f', 'd', '?', 'x', - '1x', '2x', '3x', '5x', '6x', '7x', '9x']: - exp = Exporter(shape, format=format) - self.assertRaises(ValueError, array_to_surface, surface, exp) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/rect_test.py b/venv/lib/python3.7/site-packages/pygame/tests/rect_test.py deleted file mode 100644 index d034667..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/rect_test.py +++ /dev/null @@ -1,784 +0,0 @@ -import unittest -from pygame import Rect - -class RectTypeTest(unittest.TestCase): - def testConstructionXYWidthHeight(self): - r = Rect(1, 2, 3, 4) - self.assertEqual(1, r.left) - self.assertEqual(2, r.top) - self.assertEqual(3, r.width) - self.assertEqual(4, r.height) - - def testConstructionTopLeftSize(self): - r = Rect((1, 2), (3, 4)) - self.assertEqual(1, r.left) - self.assertEqual(2, r.top) - self.assertEqual(3, r.width) - self.assertEqual(4, r.height) - - def testCalculatedAttributes(self): - r = Rect(1, 2, 3, 4) - - self.assertEqual(r.left + r.width, r.right) - self.assertEqual(r.top + r.height, r.bottom) - self.assertEqual((r.width, r.height), r.size) - self.assertEqual((r.left, r.top), r.topleft) - self.assertEqual((r.right, r.top), r.topright) - self.assertEqual((r.left, r.bottom), r.bottomleft) - self.assertEqual((r.right, r.bottom), r.bottomright) - - midx = r.left + r.width // 2 - midy = r.top + r.height // 2 - - self.assertEqual(midx, r.centerx) - self.assertEqual(midy, r.centery) - self.assertEqual((r.centerx, r.centery), r.center) - self.assertEqual((r.centerx, r.top), r.midtop) - self.assertEqual((r.centerx, r.bottom), r.midbottom) - self.assertEqual((r.left, r.centery), r.midleft) - self.assertEqual((r.right, r.centery), r.midright) - - def test_normalize(self): - r = Rect(1, 2, -3, -6) - r2 = Rect(r) - r2.normalize() - self.assertTrue(r2.width >= 0) - self.assertTrue(r2.height >= 0) - self.assertEqual((abs(r.width), abs(r.height)), r2.size) - self.assertEqual((-2, -4), r2.topleft) - - def test_left(self): - """Changing the left attribute moves the rect and does not change - the rect's width - """ - r = Rect(1, 2, 3, 4) - new_left = 10 - - r.left = new_left - self.assertEqual(new_left, r.left) - self.assertEqual(Rect(new_left, 2, 3, 4), r) - - def test_right(self): - """Changing the right attribute moves the rect and does not change - the rect's width - """ - r = Rect(1, 2, 3, 4) - new_right = r.right + 20 - expected_left = r.left + 20 - old_width = r.width - - r.right = new_right - self.assertEqual(new_right, r.right) - self.assertEqual(expected_left, r.left) - self.assertEqual(old_width, r.width) - - def test_top(self): - """Changing the top attribute moves the rect and does not change - the rect's width - """ - r = Rect(1, 2, 3, 4) - new_top = 10 - - r.top = new_top - self.assertEqual(Rect(1, new_top, 3, 4), r) - self.assertEqual(new_top, r.top) - - def test_bottom(self): - """Changing the bottom attribute moves the rect and does not change - the rect's height - """ - r = Rect(1, 2, 3, 4) - new_bottom = r.bottom + 20 - expected_top = r.top + 20 - old_height = r.height - - r.bottom = new_bottom - self.assertEqual(new_bottom, r.bottom) - self.assertEqual(expected_top, r.top) - self.assertEqual(old_height, r.height) - - def test_centerx(self): - """Changing the centerx attribute moves the rect and does not change - the rect's width - """ - r = Rect(1, 2, 3, 4) - new_centerx = r.centerx + 20 - expected_left = r.left + 20 - old_width = r.width - - r.centerx = new_centerx - self.assertEqual(new_centerx, r.centerx) - self.assertEqual(expected_left, r.left) - self.assertEqual(old_width, r.width) - - def test_centery(self): - """Changing the centerx attribute moves the rect and does not change - the rect's width - """ - r = Rect(1, 2, 3, 4) - new_centery = r.centery + 20 - expected_top = r.top + 20 - old_height = r.height - - r.centery = new_centery - self.assertEqual(new_centery, r.centery) - self.assertEqual(expected_top, r.top) - self.assertEqual(old_height, r.height) - - def test_topleft(self): - """Changing the topleft attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.topleft = new_topleft - self.assertEqual(new_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_bottomleft(self): - """Changing the bottomleft attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_bottomleft = (r.left + 20, r.bottom + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.bottomleft = new_bottomleft - self.assertEqual(new_bottomleft, r.bottomleft) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_topright(self): - """Changing the bottomleft attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_topright = (r.right + 20, r.top + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.topright = new_topright - self.assertEqual(new_topright, r.topright) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_bottomright(self): - """Changing the bottomright attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_bottomright = (r.right + 20, r.bottom + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.bottomright = new_bottomright - self.assertEqual(new_bottomright, r.bottomright) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_center(self): - """Changing the center attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_center = (r.centerx + 20, r.centery + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.center = new_center - self.assertEqual(new_center, r.center) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_midleft(self): - """Changing the midleft attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_midleft = (r.left + 20, r.centery + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.midleft = new_midleft - self.assertEqual(new_midleft, r.midleft) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_midright(self): - """Changing the midright attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_midright= (r.right + 20, r.centery + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.midright = new_midright - self.assertEqual(new_midright, r.midright) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_midtop(self): - """Changing the midtop attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_midtop= (r.centerx + 20, r.top + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.midtop = new_midtop - self.assertEqual(new_midtop, r.midtop) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_midbottom(self): - """Changing the midbottom attribute moves the rect and does not change - the rect's size - """ - r = Rect(1, 2, 3, 4) - new_midbottom = (r.centerx + 20, r.bottom + 30) - expected_topleft = (r.left + 20, r.top + 30) - old_size = r.size - - r.midbottom = new_midbottom - self.assertEqual(new_midbottom, r.midbottom) - self.assertEqual(expected_topleft, r.topleft) - self.assertEqual(old_size, r.size) - - def test_width(self): - """Changing the width resizes the rect from the top-left corner - """ - r = Rect(1, 2, 3, 4) - new_width = 10 - old_topleft = r.topleft - old_height = r.height - - r.width = new_width - self.assertEqual(new_width, r.width) - self.assertEqual(old_height, r.height) - self.assertEqual(old_topleft, r.topleft) - - def test_height(self): - """Changing the height resizes the rect from the top-left corner - """ - r = Rect(1, 2, 3, 4) - new_height = 10 - old_topleft = r.topleft - old_width = r.width - - r.height = new_height - self.assertEqual(new_height, r.height) - self.assertEqual(old_width, r.width) - self.assertEqual(old_topleft, r.topleft) - - def test_size(self): - """Changing the size resizes the rect from the top-left corner - """ - r = Rect(1, 2, 3, 4) - new_size = (10, 20) - old_topleft = r.topleft - - r.size = new_size - self.assertEqual(new_size, r.size) - self.assertEqual(old_topleft, r.topleft) - - def test_contains(self): - r = Rect(1, 2, 3, 4) - - self.assertTrue(r.contains(Rect(2, 3, 1, 1)), - "r does not contain Rect(2, 3, 1, 1)") - self.assertTrue(r.contains(Rect(r)), - "r does not contain the same rect as itself") - self.assertTrue(r.contains(Rect(2, 3, 0, 0)), - "r does not contain an empty rect within its bounds") - self.assertFalse(r.contains(Rect(0, 0, 1, 2)), - "r contains Rect(0, 0, 1, 2)") - self.assertFalse(r.contains(Rect(4, 6, 1, 1)), - "r contains Rect(4, 6, 1, 1)") - self.assertFalse(r.contains(Rect(4, 6, 0, 0)), - "r contains Rect(4, 6, 0, 0)") - - def test_collidepoint(self): - r = Rect(1, 2, 3, 4) - - self.assertTrue(r.collidepoint(r.left, r.top), - "r does not collide with point (left, top)") - self.assertFalse(r.collidepoint(r.left - 1, r.top), - "r collides with point (left - 1, top)") - self.assertFalse(r.collidepoint(r.left, r.top - 1), - "r collides with point (left, top - 1)") - self.assertFalse(r.collidepoint(r.left - 1, r.top - 1), - "r collides with point (left - 1, top - 1)") - - self.assertTrue(r.collidepoint(r.right - 1, r.bottom - 1), - "r does not collide with point (right - 1, bottom - 1)") - self.assertFalse(r.collidepoint(r.right, r.bottom), - "r collides with point (right, bottom)") - self.assertFalse(r.collidepoint(r.right - 1, r.bottom), - "r collides with point (right - 1, bottom)") - self.assertFalse(r.collidepoint(r.right, r.bottom - 1), - "r collides with point (right, bottom - 1)") - - def test_inflate__larger(self): - """The inflate method inflates around the center of the rectangle - """ - r = Rect(2, 4, 6, 8) - r2 = r.inflate(4, 6) - - self.assertEqual(r.center, r2.center) - self.assertEqual(r.left - 2, r2.left) - self.assertEqual(r.top - 3, r2.top) - self.assertEqual(r.right + 2, r2.right) - self.assertEqual(r.bottom + 3, r2.bottom) - self.assertEqual(r.width + 4, r2.width) - self.assertEqual(r.height + 6, r2.height) - - def test_inflate__smaller(self): - """The inflate method inflates around the center of the rectangle - """ - r = Rect(2, 4, 6, 8) - r2 = r.inflate(-4, -6) - - self.assertEqual(r.center, r2.center) - self.assertEqual(r.left + 2, r2.left) - self.assertEqual(r.top + 3, r2.top) - self.assertEqual(r.right - 2, r2.right) - self.assertEqual(r.bottom - 3, r2.bottom) - self.assertEqual(r.width - 4, r2.width) - self.assertEqual(r.height - 6, r2.height) - - def test_inflate_ip__larger(self): - """The inflate_ip method inflates around the center of the rectangle - """ - r = Rect(2, 4, 6, 8) - r2 = Rect(r) - r2.inflate_ip(-4, -6) - - self.assertEqual(r.center, r2.center) - self.assertEqual(r.left + 2, r2.left) - self.assertEqual(r.top + 3, r2.top) - self.assertEqual(r.right - 2, r2.right) - self.assertEqual(r.bottom - 3, r2.bottom) - self.assertEqual(r.width - 4, r2.width) - self.assertEqual(r.height - 6, r2.height) - - def test_inflate_ip__smaller(self): - """The inflate method inflates around the center of the rectangle - """ - r = Rect(2, 4, 6, 8) - r2 = Rect(r) - r2.inflate_ip(-4, -6) - - self.assertEqual(r.center, r2.center) - self.assertEqual(r.left + 2, r2.left) - self.assertEqual(r.top + 3, r2.top) - self.assertEqual(r.right - 2, r2.right) - self.assertEqual(r.bottom - 3, r2.bottom) - self.assertEqual(r.width - 4, r2.width) - self.assertEqual(r.height - 6, r2.height) - - def test_clamp(self): - r = Rect(10, 10, 10, 10) - c = Rect(19, 12, 5, 5).clamp(r) - self.assertEqual(c.right, r.right) - self.assertEqual(c.top, 12) - c = Rect(1, 2, 3, 4).clamp(r) - self.assertEqual(c.topleft, r.topleft) - c = Rect(5, 500, 22, 33).clamp(r) - self.assertEqual(c.center, r.center) - - def test_clamp_ip(self): - r = Rect(10, 10, 10, 10) - c = Rect(19, 12, 5, 5) - c.clamp_ip(r) - self.assertEqual(c.right, r.right) - self.assertEqual(c.top, 12) - c = Rect(1, 2, 3, 4) - c.clamp_ip(r) - self.assertEqual(c.topleft, r.topleft) - c = Rect(5, 500, 22, 33) - c.clamp_ip(r) - self.assertEqual(c.center, r.center) - - def test_clip(self): - r1 = Rect(1, 2, 3, 4) - self.assertEqual(Rect(1, 2, 2, 2), r1.clip( Rect(0, 0, 3, 4))) - self.assertEqual(Rect(2, 2, 2, 4), r1.clip( Rect(2, 2, 10, 20))) - self.assertEqual(Rect(2, 3, 1, 2), r1.clip(Rect(2, 3, 1, 2))) - self.assertEqual((0, 0), r1.clip(20, 30, 5, 6).size) - self.assertEqual(r1, r1.clip(Rect(r1)), - "r1 does not clip an identical rect to itself") - - def test_move(self): - r = Rect(1, 2, 3, 4) - move_x = 10 - move_y = 20 - r2 = r.move(move_x, move_y) - expected_r2 = Rect(r.left + move_x, r.top + move_y, r.width, r.height) - self.assertEqual(expected_r2, r2) - - def test_move_ip(self): - r = Rect(1, 2, 3, 4) - r2 = Rect(r) - move_x = 10 - move_y = 20 - r2.move_ip(move_x, move_y) - expected_r2 = Rect(r.left + move_x, r.top + move_y, r.width, r.height) - self.assertEqual(expected_r2, r2) - - def test_union(self): - r1 = Rect(1, 1, 1, 2) - r2 = Rect(-2, -2, 1, 2) - self.assertEqual(Rect(-2, -2, 4, 5), r1.union(r2)) - - def test_union__with_identical_Rect(self): - r1 = Rect(1, 2, 3, 4) - self.assertEqual(r1, r1.union(Rect(r1))) - - def test_union_ip(self): - r1 = Rect(1, 1, 1, 2) - r2 = Rect(-2, -2, 1, 2) - r1.union_ip(r2) - self.assertEqual(Rect(-2, -2, 4, 5), r1) - - def test_unionall(self): - r1 = Rect(0, 0, 1, 1) - r2 = Rect(-2, -2, 1, 1) - r3 = Rect(2, 2, 1, 1) - - r4 = r1.unionall([r2, r3]) - self.assertEqual(Rect(-2, -2, 5, 5), r4) - - def test_unionall_ip(self): - r1 = Rect(0, 0, 1, 1) - r2 = Rect(-2, -2, 1, 1) - r3 = Rect(2, 2, 1, 1) - - r1.unionall_ip([r2, r3]) - self.assertEqual(Rect(-2, -2, 5, 5), r1) - - # Bug for an empty list. Would return a Rect instead of None. - self.assertTrue(r1.unionall_ip([]) is None) - - def test_colliderect(self): - r1 = Rect(1, 2, 3, 4) - self.assertTrue(r1.colliderect(Rect(0, 0, 2, 3)), - "r1 does not collide with Rect(0, 0, 2, 3)") - self.assertFalse(r1.colliderect(Rect(0, 0, 1, 2)), - "r1 collides with Rect(0, 0, 1, 2)") - self.assertFalse(r1.colliderect(Rect(r1.right, r1.bottom, 2, 2)), - "r1 collides with Rect(r1.right, r1.bottom, 2, 2)") - self.assertTrue(r1.colliderect(Rect(r1.left + 1, r1.top + 1, - r1.width - 2, r1.height - 2)), - "r1 does not collide with Rect(r1.left + 1, r1.top + 1, "+ - "r1.width - 2, r1.height - 2)") - self.assertTrue(r1.colliderect(Rect(r1.left - 1, r1.top - 1, - r1.width + 2, r1.height + 2)), - "r1 does not collide with Rect(r1.left - 1, r1.top - 1, "+ - "r1.width + 2, r1.height + 2)") - self.assertTrue(r1.colliderect(Rect(r1)), - "r1 does not collide with an identical rect") - self.assertFalse(r1.colliderect(Rect(r1.right, r1.bottom, 0, 0)), - "r1 collides with Rect(r1.right, r1.bottom, 0, 0)") - self.assertFalse(r1.colliderect(Rect(r1.right, r1.bottom, 1, 1)), - "r1 collides with Rect(r1.right, r1.bottom, 1, 1)") - - def testEquals(self): - """ check to see how the rect uses __eq__ - """ - r1 = Rect(1, 2, 3, 4) - r2 = Rect(10, 20, 30, 40) - r3 = (10, 20, 30, 40) - r4 = Rect(10, 20, 30, 40) - - class foo (Rect): - def __eq__(self, other): - return id(self) == id(other) - def __ne__(self, other): - return id(self) != id(other) - - class foo2 (Rect): - pass - - r5 = foo(10, 20, 30, 40) - r6 = foo2(10, 20, 30, 40) - - self.assertNotEqual(r5, r2) - - # because we define equality differently for this subclass. - self.assertEqual(r6, r2) - - - rect_list = [r1, r2, r3, r4, r6] - - # see if we can remove 4 of these. - rect_list.remove(r2) - rect_list.remove(r2) - rect_list.remove(r2) - rect_list.remove(r2) - self.assertRaises(ValueError, rect_list.remove, r2) - - def test_collidedict(self): - - # __doc__ (as of 2008-08-02) for pygame.rect.Rect.collidedict: - - # Rect.collidedict(dict): return (key, value) - # test if one rectangle in a dictionary intersects - # - # Returns the key and value of the first dictionary value that - # collides with the Rect. If no collisions are found, None is - # returned. - # - # Rect objects are not hashable and cannot be used as keys in a - # dictionary, only as values. - - r = Rect(1, 1, 10, 10) - r1 = Rect(1, 1, 10, 10) - r2 = Rect(50, 50, 10, 10) - r3 = Rect(70, 70, 10, 10) - r4 = Rect(61, 61, 10, 10) - - d = {1: r1, 2: r2, 3: r3} - - rects_values = 1 - val = r.collidedict(d, rects_values) - self.assertTrue(val) - self.assertEqual(len(val), 2) - self.assertEqual(val[0], 1) - self.assertEqual(val[1], r1) - - none_d = {2: r2, 3: r3} - none_val = r.collidedict(none_d, rects_values) - self.assertFalse(none_val) - - barely_d = {1: r1, 2: r2, 3: r3} - k3, v3 = r4.collidedict(barely_d, rects_values) - self.assertEqual(k3, 3) - self.assertEqual(v3, r3) - - - def test_collidedictall(self): - - # __doc__ (as of 2008-08-02) for pygame.rect.Rect.collidedictall: - - # Rect.collidedictall(dict): return [(key, value), ...] - # test if all rectangles in a dictionary intersect - # - # Returns a list of all the key and value pairs that intersect with - # the Rect. If no collisions are found an empty dictionary is - # returned. - # - # Rect objects are not hashable and cannot be used as keys in a - # dictionary, only as values. - - r = Rect(1, 1, 10, 10) - - r2 = Rect(1, 1, 10, 10) - r3 = Rect(5, 5, 10, 10) - r4 = Rect(10, 10, 10, 10) - r5 = Rect(50, 50, 10, 10) - - rects_values = 1 - d = {2: r2} - l = r.collidedictall(d, rects_values) - self.assertEqual(l, [(2, r2)]) - - d2 = {2: r2, 3: r3, 4: r4, 5: r5} - l2 = r.collidedictall(d2, rects_values) - self.assertEqual(l2, [(2, r2), (3, r3), (4, r4)]) - - def test_collidelist(self): - - # __doc__ (as of 2008-08-02) for pygame.rect.Rect.collidelist: - - # Rect.collidelist(list): return index - # test if one rectangle in a list intersects - # - # Test whether the rectangle collides with any in a sequence of - # rectangles. The index of the first collision found is returned. If - # no collisions are found an index of -1 is returned. - - r = Rect(1, 1, 10, 10) - l = [Rect(50, 50, 1, 1), Rect(5, 5, 10, 10), Rect(15, 15, 1, 1)] - - self.assertEqual(r.collidelist(l), 1) - - f = [Rect(50, 50, 1, 1), (100, 100, 4, 4)] - self.assertEqual(r.collidelist(f), -1) - - - def test_collidelistall(self): - - # __doc__ (as of 2008-08-02) for pygame.rect.Rect.collidelistall: - - # Rect.collidelistall(list): return indices - # test if all rectangles in a list intersect - # - # Returns a list of all the indices that contain rectangles that - # collide with the Rect. If no intersecting rectangles are found, an - # empty list is returned. - - r = Rect(1, 1, 10, 10) - - l = [ - Rect(1, 1, 10, 10), - Rect(5, 5, 10, 10), - Rect(15, 15, 1, 1), - Rect(2, 2, 1, 1), - ] - self.assertEqual(r.collidelistall(l), [0, 1, 3]) - - f = [Rect(50, 50, 1, 1), Rect(20, 20, 5, 5)] - self.assertFalse(r.collidelistall(f)) - - - def test_fit(self): - - # __doc__ (as of 2008-08-02) for pygame.rect.Rect.fit: - - # Rect.fit(Rect): return Rect - # resize and move a rectangle with aspect ratio - # - # Returns a new rectangle that is moved and resized to fit another. - # The aspect ratio of the original Rect is preserved, so the new - # rectangle may be smaller than the target in either width or height. - - r = Rect(10, 10, 30, 30) - - r2 = Rect(30, 30, 15, 10) - - f = r.fit(r2) - self.assertTrue(r2.contains(f)) - - f2 = r2.fit(r) - self.assertTrue(r.contains(f2)) - - - - def test_copy(self): - r = Rect(1, 2, 10, 20) - c = r.copy() - self.assertEqual(c, r) - - - def test_subscript(self): - r = Rect(1, 2, 3, 4) - self.assertEqual(r[0], 1) - self.assertEqual(r[1], 2) - self.assertEqual(r[2], 3) - self.assertEqual(r[3], 4) - self.assertEqual(r[-1], 4) - self.assertEqual(r[-2], 3) - self.assertEqual(r[-4], 1) - self.assertRaises(IndexError, r.__getitem__, 5) - self.assertRaises(IndexError, r.__getitem__, -5) - self.assertEqual(r[0:2], [1, 2]) - self.assertEqual(r[0:4], [1, 2, 3, 4]) - self.assertEqual(r[0:-1], [1, 2, 3]) - self.assertEqual(r[:], [1, 2, 3, 4]) - self.assertEqual(r[...], [1, 2, 3, 4]) - self.assertEqual(r[0:4:2], [1, 3]) - self.assertEqual(r[0:4:3], [1, 4]) - self.assertEqual(r[3::-1], [4, 3, 2, 1]) - self.assertRaises(TypeError, r.__getitem__, None) - - def test_ass_subscript(self): - r = Rect(0, 0, 0, 0) - r[...] = 1, 2, 3, 4 - self.assertEqual(r, [1, 2, 3, 4]) - self.assertRaises(TypeError, r.__setitem__, None, 0) - self.assertEqual(r, [1, 2, 3, 4]) - self.assertRaises(TypeError, r.__setitem__, 0, '') - self.assertEqual(r, [1, 2, 3, 4]) - self.assertRaises(IndexError, r.__setitem__, 4, 0) - self.assertEqual(r, [1, 2, 3, 4]) - self.assertRaises(IndexError, r.__setitem__, -5, 0) - self.assertEqual(r, [1, 2, 3, 4]) - r[0] = 10 - self.assertEqual(r, [10, 2, 3, 4]) - r[3] = 40 - self.assertEqual(r, [10, 2, 3, 40]) - r[-1] = 400 - self.assertEqual(r, [10, 2, 3, 400]) - r[-4] = 100 - self.assertEqual(r, [100, 2, 3, 400]) - r[1:3] = 0 - self.assertEqual(r, [100, 0, 0, 400]) - r[...] = 0 - self.assertEqual(r, [0, 0, 0, 0]) - r[:] = 9 - self.assertEqual(r, [9, 9, 9, 9]) - r[:] = 11, 12, 13, 14 - self.assertEqual(r, [11, 12, 13, 14]) - r[::-1] = r - self.assertEqual(r, [14, 13, 12, 11]) - - -class SubclassTest(unittest.TestCase): - class MyRect(Rect): - def __init__(self, *args, **kwds): - super(SubclassTest.MyRect, self).__init__(*args, **kwds) - self.an_attribute = True - - def test_copy(self): - mr1 = self.MyRect(1, 2, 10, 20) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.copy() - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - - def test_move(self): - mr1 = self.MyRect(1, 2, 10, 20) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.move(1, 2) - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - - def test_inflate(self): - mr1 = self.MyRect(1, 2, 10, 20) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.inflate(2, 4) - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - - def test_clamp(self): - mr1 = self.MyRect(19, 12, 5, 5) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.clamp(Rect(10, 10, 10, 10)) - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - - def test_clip(self): - mr1 = self.MyRect(1, 2, 3, 4) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.clip(Rect(0, 0, 3, 4)) - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - - def test_union(self): - mr1 = self.MyRect(1, 1, 1, 2) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.union(Rect(-2, -2, 1, 2)) - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - - def test_unionall(self): - mr1 = self.MyRect(0, 0, 1, 1) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.unionall([Rect(-2, -2, 1, 1), Rect(2, 2, 1, 1)]) - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - - def test_fit(self): - mr1 = self.MyRect(10, 10, 30, 30) - self.assertTrue(mr1.an_attribute) - mr2 = mr1.fit(Rect(30, 30, 15, 10)) - self.assertTrue(isinstance(mr2, self.MyRect)) - self.assertRaises(AttributeError, getattr, mr2, "an_attribute") - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_3_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_3_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_3_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_4_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_4_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_4_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_5_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_5_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_5_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_6_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_6_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/fake_6_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/no_assertions__ret_code_of_1__test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/no_assertions__ret_code_of_1__test.py deleted file mode 100644 index d3545e3..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/no_assertions__ret_code_of_1__test.py +++ /dev/null @@ -1,38 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - pass - - def test_get_mods(self): - pass - - def test_get_pressed(self): - pass - - def test_name(self): - pass - - def test_set_mods(self): - pass - - def test_set_repeat(self): - pass - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/zero_tests_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/zero_tests_test.py deleted file mode 100644 index c4ded95..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/all_ok/zero_tests_test.py +++ /dev/null @@ -1,22 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - pass - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/incomplete_todo_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/incomplete_todo_test.py deleted file mode 100644 index 1b76918..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/incomplete_todo_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def todo_test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def todo_test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/magic_tag_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/magic_tag_test.py deleted file mode 100644 index 34d32c6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/magic_tag_test.py +++ /dev/null @@ -1,38 +0,0 @@ -__tags__ = ['magic'] - -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/sleep_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/sleep_test.py deleted file mode 100644 index cc830bb..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/everything/sleep_test.py +++ /dev/null @@ -1,29 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -import time - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - stop_time = time.time() + 10.0 - while time.time() < stop_time: - time.sleep(1) - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/invisible_tag_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/invisible_tag_test.py deleted file mode 100644 index 063cbc4..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/invisible_tag_test.py +++ /dev/null @@ -1,41 +0,0 @@ -__tags__ = ['invisible'] - -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/magic_tag_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/magic_tag_test.py deleted file mode 100644 index 34d32c6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/exclude/magic_tag_test.py +++ /dev/null @@ -1,38 +0,0 @@ -__tags__ = ['magic'] - -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_3_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_3_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_3_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_4_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_4_test.py deleted file mode 100644 index b540a3c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/failures1/fake_4_test.py +++ /dev/null @@ -1,41 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(False, "Some Jibberish") - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - if 1: - if 1: - assert False - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/fake_2_test.py deleted file mode 100644 index 1c68eaa..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def todo_test_get_pressed(self): - self.fail() - - def test_name(self): - self.assertTrue(True) - - def todo_test_set_mods(self): - self.fail() - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/fake_3_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/fake_3_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete/fake_3_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/fake_2_test.py deleted file mode 100644 index 1b76918..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def todo_test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def todo_test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/fake_3_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/fake_3_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/incomplete_todo/fake_3_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/fake_1_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/fake_1_test.py deleted file mode 100644 index a4262fa..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/fake_1_test.py +++ /dev/null @@ -1,40 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - while True: - pass - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/infinite_loop/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_3_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_3_test.py deleted file mode 100644 index 7362aa4..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_3_test.py +++ /dev/null @@ -1,40 +0,0 @@ -import sys -if __name__ == '__main__': - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - sys.stderr.write("jibberish messes things up\n") - self.assertTrue(False) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_4_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_4_test.py deleted file mode 100644 index b540a3c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stderr/fake_4_test.py +++ /dev/null @@ -1,41 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(False, "Some Jibberish") - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - if 1: - if 1: - assert False - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_3_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_3_test.py deleted file mode 100644 index 6b4af95..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_3_test.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -if __name__ == '__main__': - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - sys.stdout.write("jibberish ruins everything\n") - self.assertTrue(False) - - def test_name(self): - sys.stdout.write("forgot to remove debug crap\n") - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_4_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_4_test.py deleted file mode 100644 index b540a3c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_4_test.py +++ /dev/null @@ -1,41 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(False, "Some Jibberish") - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - if 1: - if 1: - assert False - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/run_tests__test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/run_tests__test.py deleted file mode 100644 index db6736a..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/run_tests__test.py +++ /dev/null @@ -1,131 +0,0 @@ -################################################################################ - -import subprocess, os, sys, re, difflib - -################################################################################ - -IGNORE = ( - '.svn', - 'infinite_loop', -) -NORMALIZERS = ( - (r"Ran (\d+) tests in (\d+\.\d+)s", "Ran \\1 tests in X.XXXs" ), - (r'File ".*?([^/\\.]+\.py)"', 'File "\\1"'), -) - -################################################################################ - -def norm_result(result): - "normalize differences, such as timing between output" - for normalizer, replacement in NORMALIZERS: - if hasattr(normalizer, '__call__'): - result = normalizer(result) - else: - result = re.sub(normalizer, replacement, result) - - return result - -def call_proc(cmd, cd=None): - proc = subprocess.Popen ( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd = cd, - universal_newlines = True, - ) - if proc.wait(): - print ("%s %s" % (cmd, proc.wait())) - raise Exception(proc.stdout.read()) - - return proc.stdout.read() - -################################################################################ - -unnormed_diff = '-u' in sys.argv -verbose = '-v' in sys.argv or unnormed_diff -if '-h' in sys.argv or '--help' in sys.argv: sys.exit ( - "\nCOMPARES OUTPUT OF SINGLE VS SUBPROCESS MODE OF RUN_TESTS.PY\n\n" - '-v, to output diffs even on success\n' - '-u, to output diffs of unnormalized tests\n\n' - "Each line of a Differ delta begins with a two-letter code:\n\n" - " '- ' line unique to sequence 1\n" - " '+ ' line unique to sequence 2\n" - " ' ' line common to both sequences\n" - " '? ' line not present in either input sequence\n" -) - -main_dir = os.path.split(os.path.abspath(sys.argv[0]))[0] -trunk_dir = os.path.normpath(os.path.join(main_dir, '../../')) - -test_suite_dirs = [x for x in os.listdir(main_dir) - if os.path.isdir(os.path.join(main_dir, x)) - and x not in IGNORE ] - - -################################################################################ - -def assert_on_results(suite, single, sub): - test = globals().get('%s_test' % suite) - if hasattr(test, '__call_'): - test(suite, single, sub) - print ("assertions on %s OK" % (suite,)) - - -# Don't modify tests in suites below. These assertions are in place to make sure -# that tests are actually being ran - -def all_ok_test(uite, *args): - for results in args: - assert "Ran 36 tests" in results # some tests are runing - assert "OK" in results # OK - -def failures1_test(suite, *args): - for results in args: - assert "FAILED (failures=2)" in results - assert "Ran 18 tests" in results - -################################################################################ -# Test that output is the same in single process and subprocess modes -# - -base_cmd = [sys.executable, 'run_tests.py', '-i'] - -cmd = base_cmd + ['-n', '-f'] -sub_cmd = base_cmd + ['-f'] -time_out_cmd = base_cmd + ['-t', '4', '-f', 'infinite_loop' ] - -passes = 0 -failed = False - -for suite in test_suite_dirs: - single = call_proc(cmd + [suite], trunk_dir) - subs = call_proc(sub_cmd + [suite], trunk_dir) - - normed_single, normed_subs = map(norm_result,(single, subs)) - - failed = normed_single != normed_subs - if failed: - print ('%s suite comparison FAILED\n' % (suite,)) - else: - passes += 1 - print ('%s suite comparison OK' % (suite,)) - - assert_on_results(suite, single, subs) - - if verbose or failed: - print ("difflib.Differ().compare(single, suprocessed):\n") - print (''.join ( list( - difflib.Differ().compare ( - (unnormed_diff and single or normed_single).splitlines(1), - (unnormed_diff and subs or normed_subs).splitlines(1) - )) - )) - -sys.stdout.write("infinite_loop suite (subprocess mode timeout) ") -loop_test = call_proc(time_out_cmd, trunk_dir) -assert "successfully terminated" in loop_test -passes += 1 -print ("OK") - -print ("\n%s/%s suites pass" % (passes, len(test_suite_dirs) + 1)) - -print ("\n-h for help") - -################################################################################ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/__init__.py deleted file mode 100644 index 1bb8bf6..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/fake_2_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/fake_2_test.py deleted file mode 100644 index 398aef5..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/fake_2_test.py +++ /dev/null @@ -1,39 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - self.assertTrue(True) - - def test_get_mods(self): - self.assertTrue(True) - - def test_get_pressed(self): - self.assertTrue(True) - - def test_name(self): - self.assertTrue(True) - - def test_set_mods(self): - self.assertTrue(True) - - def test_set_repeat(self): - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/sleep_test.py b/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/sleep_test.py deleted file mode 100644 index e1c9857..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/run_tests__tests/timeout/sleep_test.py +++ /dev/null @@ -1,30 +0,0 @@ -if __name__ == '__main__': - import sys - import os - pkg_dir = (os.path.split( - os.path.split( - os.path.split( - os.path.abspath(__file__))[0])[0])[0]) - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest - -import time - -class KeyModuleTest(unittest.TestCase): - def test_get_focused(self): - stop_time = time.time() + 10.0 - while time.time() < stop_time: - time.sleep(1) - - self.assertTrue(True) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/rwobject_test.py b/venv/lib/python3.7/site-packages/pygame/tests/rwobject_test.py deleted file mode 100644 index ad5ad33..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/rwobject_test.py +++ /dev/null @@ -1,117 +0,0 @@ -import sys -import unittest - -from pygame import encode_string, encode_file_path -from pygame.compat import bytes_, as_bytes, as_unicode - - -class RWopsEncodeStringTest(unittest.TestCase): - global getrefcount - - def test_obj_None(self): - encoded_string = encode_string(None) - - self.assertIsNone(encoded_string) - - def test_returns_bytes(self): - u = as_unicode(r"Hello") - encoded_string = encode_string(u) - - self.assertIsInstance(encoded_string, bytes_) - - def test_obj_bytes(self): - b = as_bytes("encyclop\xE6dia") - encoded_string = encode_string(b, 'ascii', 'strict') - - self.assertIs(encoded_string, b) - - def test_encode_unicode(self): - u = as_unicode(r"\u00DEe Olde Komp\u00FCter Shoppe") - b = u.encode('utf-8') - self.assertEqual(encode_string(u, 'utf-8'), b) - - def test_error_fowarding(self): - self.assertRaises(SyntaxError, encode_string) - - def test_errors(self): - s = r"abc\u0109defg\u011Dh\u0125ij\u0135klmnoprs\u015Dtu\u016Dvz" - u = as_unicode(s) - b = u.encode('ascii', 'ignore') - self.assertEqual(encode_string(u, 'ascii', 'ignore'), b) - - def test_encoding_error(self): - u = as_unicode(r"a\x80b") - encoded_string = encode_string(u, 'ascii', 'strict') - - self.assertIsNone(encoded_string) - - def test_check_defaults(self): - u = as_unicode(r"a\u01F7b") - b = u.encode("unicode_escape", "backslashreplace") - encoded_string = encode_string(u) - - self.assertEqual(encoded_string, b) - - def test_etype(self): - u = as_unicode(r"a\x80b") - self.assertRaises(SyntaxError, encode_string, - u, 'ascii', 'strict', SyntaxError) - - def test_string_with_null_bytes(self): - b = as_bytes("a\x00b\x00c") - encoded_string = encode_string(b, etype=SyntaxError) - encoded_decode_string = encode_string(b.decode(), 'ascii', 'strict') - - self.assertIs(encoded_string, b) - self.assertEqual(encoded_decode_string, b) - - try: - from sys import getrefcount as _g - getrefcount = _g # This nonsense is for Python 3.x - except ImportError: - pass - else: - def test_refcount(self): - bpath = as_bytes(" This is a string that is not cached.")[1:] - upath = bpath.decode('ascii') - before = getrefcount(bpath) - bpath = encode_string(bpath) - self.assertEqual(getrefcount(bpath), before) - bpath = encode_string(upath) - self.assertEqual(getrefcount(bpath), before) - - def test_smp(self): - utf_8 = as_bytes("a\xF0\x93\x82\xA7b") - u = as_unicode(r"a\U000130A7b") - b = encode_string(u, 'utf-8', 'strict', AssertionError) - self.assertEqual(b, utf_8) - # For Python 3.1, surrogate pair handling depends on whether the - # interpreter was built with UCS-2 or USC-4 unicode strings. - ##u = as_unicode(r"a\uD80C\uDCA7b") - ##b = encode_string(u, 'utf-8', 'strict', AssertionError) - ##self.assertEqual(b, utf_8) - -class RWopsEncodeFilePathTest(unittest.TestCase): - # Most tests can be skipped since RWopsEncodeFilePath wraps - # RWopsEncodeString - def test_encoding(self): - u = as_unicode(r"Hello") - encoded_file_path = encode_file_path(u) - - self.assertIsInstance(encoded_file_path, bytes_) - - def test_error_fowarding(self): - self.assertRaises(SyntaxError, encode_file_path) - - def test_path_with_null_bytes(self): - b = as_bytes("a\x00b\x00c") - encoded_file_path = encode_file_path(b) - - self.assertIsNone(encoded_file_path) - - def test_etype(self): - b = as_bytes("a\x00b\x00c") - self.assertRaises(TypeError, encode_file_path, b, TypeError) - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/scrap_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/scrap_tags.py deleted file mode 100644 index 7601521..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/scrap_tags.py +++ /dev/null @@ -1,24 +0,0 @@ -# For now the scrap module has not been updated for SDL 2 -__tags__ = ['SDL2_ignore'] - -import sys - -exclude = False - -if sys.platform == 'win32' or sys.platform.startswith('linux'): - try: - import pygame - pygame.scrap._NOT_IMPLEMENTED_ - except AttributeError: - pass - else: - exclude = True -else: - exclude = True - -if exclude: - __tags__.extend(['ignore', 'subprocess_ignore']) - - - - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/scrap_test.py b/venv/lib/python3.7/site-packages/pygame/tests/scrap_test.py deleted file mode 100644 index 60ff248..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/scrap_test.py +++ /dev/null @@ -1,290 +0,0 @@ -import os -import sys -if os.environ.get('SDL_VIDEODRIVER') == 'dummy': - __tags__ = ('ignore', 'subprocess_ignore') -import unittest -from pygame.tests.test_utils import trunk_relative_path - -import pygame -from pygame import scrap -from pygame.compat import as_bytes - -class ScrapModuleTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - pygame.display.init() - pygame.display.set_mode((1, 1)) - scrap.init() - - @classmethod - def tearDownClass(cls): - # scrap.quit() # Does not exist! - pygame.display.quit() - - def test_init(self): - """Ensures scrap module still initialized after multiple init calls.""" - scrap.init() - scrap.init() - - self.assertTrue(scrap.get_init()) - - def test_get_init(self): - """Ensures get_init gets the init state.""" - self.assertTrue(scrap.get_init()) - - def todo_test_contains(self): - """Ensures contains works as expected.""" - self.fail() - - def todo_test_get(self): - """Ensures get works as expected.""" - self.fail() - - def test_get__owned_empty_type(self): - """Ensures get works when there is no data of the requested type - in the clipboard and the clipboard is owned by the pygame application. - """ - # Use a unique data type identifier to ensure there is no preexisting - # data. - DATA_TYPE = 'test_get__owned_empty_type' - - if scrap.lost(): - # Try to acquire the clipboard. - scrap.put(pygame.SCRAP_TEXT, b'text to clipboard') - - if scrap.lost(): - self.skipTest('requires the pygame application to own the ' - 'clipboard') - - data = scrap.get(DATA_TYPE) - - self.assertIsNone(data) - - def todo_test_get_types(self): - """Ensures get_types works as expected.""" - self.fail() - - def todo_test_lost(self): - """Ensures lost works as expected.""" - self.fail() - - def test_set_mode(self): - """Ensures set_mode works as expected.""" - scrap.set_mode(pygame.SCRAP_SELECTION) - scrap.set_mode(pygame.SCRAP_CLIPBOARD) - - self.assertRaises(ValueError, scrap.set_mode, 1099) - - def test_put__text(self): - """Ensures put can place text into the clipboard.""" - scrap.put(pygame.SCRAP_TEXT, as_bytes("Hello world")) - - self.assertEqual(scrap.get(pygame.SCRAP_TEXT), as_bytes("Hello world")) - - scrap.put(pygame.SCRAP_TEXT, as_bytes("Another String")) - - self.assertEqual(scrap.get(pygame.SCRAP_TEXT), - as_bytes("Another String")) - - @unittest.skipIf('pygame.image' not in sys.modules, - 'requires pygame.image module') - def test_put__bmp_image(self): - """Ensures put can place a BMP image into the clipboard.""" - sf = pygame.image.load(trunk_relative_path( - "examples/data/asprite.bmp")) - expected_string = pygame.image.tostring(sf, "RGBA") - scrap.put(pygame.SCRAP_BMP, expected_string) - - self.assertEqual(scrap.get(pygame.SCRAP_BMP), expected_string) - - def test_put(self): - """Ensures put can place data into the clipboard - when using a user defined type identifier. - """ - DATA_TYPE = 'arbitrary buffer' - - scrap.put(DATA_TYPE, as_bytes('buf')) - r = scrap.get(DATA_TYPE) - - self.assertEqual(r, as_bytes('buf')) - - -class ScrapModuleClipboardNotOwnedTest(unittest.TestCase): - """Test the scrap module's functionality when the pygame application is - not the current owner of the clipboard. - - A separate class is used to prevent tests that acquire the clipboard from - interfering with these tests. - """ - @classmethod - def setUpClass(cls): - pygame.display.init() - pygame.display.set_mode((1, 1)) - scrap.init() - - @classmethod - def tearDownClass(cls): - # scrap.quit() # Does not exist! - pygame.quit() - pygame.display.quit() - - def _skip_if_clipboard_owned(self): - # Skip test if the pygame application owns the clipboard. Currently, - # there is no way to give up ownership. - if not scrap.lost(): - self.skipTest('requires the pygame application to not own the ' - 'clipboard') - - def test_get__not_owned(self): - """Ensures get works when there is no data of the requested type - in the clipboard and the clipboard is not owned by the pygame - application. - """ - self._skip_if_clipboard_owned() - - # Use a unique data type identifier to ensure there is no preexisting - # data. - DATA_TYPE = 'test_get__not_owned' - - data = scrap.get(DATA_TYPE) - - self.assertIsNone(data) - - def test_get_types__not_owned(self): - """Ensures get_types works when the clipboard is not owned - by the pygame application. - """ - self._skip_if_clipboard_owned() - - data_types = scrap.get_types() - - self.assertIsInstance(data_types, list) - - def test_contains__not_owned(self): - """Ensures contains works when the clipboard is not owned - by the pygame application. - """ - self._skip_if_clipboard_owned() - - # Use a unique data type identifier to ensure there is no preexisting - # data. - DATA_TYPE = 'test_contains__not_owned' - - contains = scrap.contains(DATA_TYPE) - - self.assertFalse(contains) - - def test_lost__not_owned(self): - """Ensures lost works when the clipboard is not owned - by the pygame application. - """ - self._skip_if_clipboard_owned() - - lost = scrap.lost() - - self.assertTrue(lost) - - -class X11InteractiveTest(unittest.TestCase): - __tags__ = ['ignore', 'subprocess_ignore'] - try: - pygame.display.init() - except Exception: - pass - else: - if pygame.display.get_driver() == 'x11': - __tags__ = ['interactive'] - pygame.display.quit() - - def test_issue_208(self): - """PATCH: pygame.scrap on X11, fix copying into PRIMARY selection - - Copying into theX11 PRIMARY selection (mouse copy/paste) would not - work due to a confusion between content type and clipboard type. - - """ - - from pygame import display, event, freetype - from pygame.locals import SCRAP_SELECTION, SCRAP_TEXT - from pygame.locals import KEYDOWN, K_y, QUIT - - success = False - freetype.init() - font = freetype.Font(None, 24) - display.init() - display.set_caption("Interactive X11 Paste Test") - screen = display.set_mode((600, 200)) - screen.fill(pygame.Color('white')) - text = "Scrap put() succeeded." - msg = ('Some text has been placed into the X11 clipboard.' - ' Please click the center mouse button in an open' - ' text window to retrieve it.' - '\n\nDid you get "{}"? (y/n)').format(text) - word_wrap(screen, msg, font, 6) - display.flip() - event.pump() - scrap.init() - scrap.set_mode(SCRAP_SELECTION) - scrap.put(SCRAP_TEXT, text.encode('UTF-8')) - while True: - e = event.wait() - if e.type == QUIT: - break - if e.type == KEYDOWN: - success = (e.key == K_y) - break - pygame.display.quit() - self.assertTrue(success) - -def word_wrap(surf, text, font, margin=0, color=(0, 0, 0)): - font.origin = True - surf_width, surf_height = surf.get_size() - width = surf_width - 2 * margin - height = surf_height - 2 * margin - line_spacing = int(1.25 * font.get_sized_height()) - x, y = margin, margin + line_spacing - space = font.get_rect(' ') - for word in iwords(text): - if word == '\n': - x, y = margin, y + line_spacing - else: - bounds = font.get_rect(word) - if x + bounds.width + bounds.x >= width: - x, y = margin, y + line_spacing - if x + bounds.width + bounds.x >= width: - raise ValueError("word too wide for the surface") - if y + bounds.height - bounds.y >= height: - raise ValueError("text to long for the surface") - font.render_to(surf, (x, y), None, color) - x += bounds.width + space.width - return x, y - -def iwords(text): - # r"\n|[^ ]+" - # - head = 0 - tail = head - end = len(text) - while head < end: - if text[head] == ' ': - head += 1 - tail = head + 1 - elif text[head] == '\n': - head += 1 - yield '\n' - tail = head + 1 - elif tail == end: - yield text[head:] - head = end - elif text[tail] == '\n': - yield text[head:tail] - head = tail - elif text[tail] == ' ': - yield text[head:tail] - head = tail - else: - tail += 1 - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/sndarray_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/sndarray_tags.py deleted file mode 100644 index 6493eb2..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/sndarray_tags.py +++ /dev/null @@ -1,12 +0,0 @@ -__tags__ = ['array'] - -exclude = False - -try: - import pygame.mixer - import numpy -except ImportError: - exclude = True - -if exclude: - __tags__.extend(('ignore', 'subprocess_ignore')) diff --git a/venv/lib/python3.7/site-packages/pygame/tests/sndarray_test.py b/venv/lib/python3.7/site-packages/pygame/tests/sndarray_test.py deleted file mode 100644 index 67b110b..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/sndarray_test.py +++ /dev/null @@ -1,174 +0,0 @@ -import unittest - -from numpy import int8, int16, uint8, uint16, float32, array, alltrue - -import pygame -from pygame.compat import as_bytes -import pygame.sndarray - - -SDL2 = pygame.get_sdl_version()[0] >= 2 - - -class SndarrayTest (unittest.TestCase): - array_dtypes = {8: uint8, -8: int8, 16: uint16, -16: int16, 32: float32} - - def _assert_compatible(self, arr, size): - dtype = self.array_dtypes[size] - self.assertEqual(arr.dtype, dtype) - - def test_array(self): - - def check_array(size, channels, test_data): - try: - pygame.mixer.init(22050, size, channels, allowedchanges=0) - except pygame.error: - # Not all sizes are supported on all systems. - return - try: - __, sz, __ = pygame.mixer.get_init() - if sz == size: - srcarr = array(test_data, self.array_dtypes[size]) - snd = pygame.sndarray.make_sound(srcarr) - arr = pygame.sndarray.array(snd) - self._assert_compatible(arr, size) - self.assertTrue(alltrue(arr == srcarr), - "size: %i\n%s\n%s" % ( - size, arr, test_data)) - finally: - pygame.mixer.quit() - - check_array(8, 1, [0, 0x0f, 0xf0, 0xff]) - check_array(8, 2, - [[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]]) - check_array(16, 1, [0, 0x00ff, 0xff00, 0xffff]) - check_array(16, 2, [[0, 0xffff], [0xffff, 0], - [0x00ff, 0xff00], [0x0f0f, 0xf0f0]]) - check_array(-8, 1, [0, -0x80, 0x7f, 0x64]) - check_array(-8, 2, - [[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]]) - check_array(-16, 1, [0, 0x7fff, -0x7fff, -1]) - check_array(-16, 2, [[0, -0x7fff], [-0x7fff, 0], - [0x7fff, 0], [0, 0x7fff]]) - - def test_get_arraytype(self): - array_type = pygame.sndarray.get_arraytype() - - self.assertEqual(array_type, 'numpy', - "unknown array type %s" % array_type) - - def test_get_arraytypes(self): - arraytypes = pygame.sndarray.get_arraytypes() - self.assertIn('numpy', arraytypes) - - for atype in arraytypes: - self.assertEqual(atype, 'numpy', "unknown array type %s" % atype) - - def test_make_sound(self): - - def check_sound(size, channels, test_data): - try: - pygame.mixer.init(22050, size, channels, allowedchanges=0) - except pygame.error: - # Not all sizes are supported on all systems. - return - try: - __, sz, __ = pygame.mixer.get_init() - if sz == size: - srcarr = array(test_data, self.array_dtypes[size]) - snd = pygame.sndarray.make_sound(srcarr) - arr = pygame.sndarray.samples(snd) - self.assertTrue(alltrue(arr == srcarr), - "size: %i\n%s\n%s" % ( - size, arr, test_data)) - finally: - pygame.mixer.quit() - - check_sound(8, 1, [0, 0x0f, 0xf0, 0xff]) - check_sound(8, 2, - [[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]]) - check_sound(16, 1, [0, 0x00ff, 0xff00, 0xffff]) - check_sound(16, 2, [[0, 0xffff], [0xffff, 0], - [0x00ff, 0xff00], [0x0f0f, 0xf0f0]]) - check_sound(-8, 1, [0, -0x80, 0x7f, 0x64]) - check_sound(-8, 2, - [[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]]) - check_sound(-16, 1, [0, 0x7fff, -0x7fff, -1]) - check_sound(-16, 2, [[0, -0x7fff], [-0x7fff, 0], - [0x7fff, 0], [0, 0x7fff]]) - - if SDL2: - check_sound(32, 2, [[0.0, -1.0], [-1.0, 0], [1.0, 0], [0, 1.0]]) - - def test_samples(self): - - null_byte = as_bytes('\x00') - def check_sample(size, channels, test_data): - try: - pygame.mixer.init(22050, size, channels, allowedchanges=0) - except pygame.error: - # Not all sizes are supported on all systems. - return - try: - __, sz, __ = pygame.mixer.get_init() - if sz == size: - zeroed = null_byte * ((abs(size) // 8) * - len(test_data) * - channels) - snd = pygame.mixer.Sound(buffer=zeroed) - samples = pygame.sndarray.samples(snd) - self._assert_compatible(samples, size) - ##print ('X %s' % (samples.shape,)) - ##print ('Y %s' % (test_data,)) - samples[...] = test_data - arr = pygame.sndarray.array(snd) - self.assertTrue(alltrue(samples == arr), - "size: %i\n%s\n%s" % ( - size, arr, test_data)) - finally: - pygame.mixer.quit() - - check_sample(8, 1, [0, 0x0f, 0xf0, 0xff]) - check_sample(8, 2, - [[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]]) - check_sample(16, 1, [0, 0x00ff, 0xff00, 0xffff]) - check_sample(16, 2, [[0, 0xffff], [0xffff, 0], - [0x00ff, 0xff00], [0x0f0f, 0xf0f0]]) - check_sample(-8, 1, [0, -0x80, 0x7f, 0x64]) - check_sample(-8, 2, - [[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]]) - check_sample(-16, 1, [0, 0x7fff, -0x7fff, -1]) - check_sample(-16, 2, [[0, -0x7fff], [-0x7fff, 0], - [0x7fff, 0], [0, 0x7fff]]) - - if SDL2: - check_sample(32, 2, [[0.0, -1.0], [-1.0, 0], [1.0, 0], [0, 1.0]]) - - def test_use_arraytype(self): - - def do_use_arraytype(atype): - pygame.sndarray.use_arraytype(atype) - - pygame.sndarray.use_arraytype('numpy') - self.assertEqual(pygame.sndarray.get_arraytype(), 'numpy') - - self.assertRaises(ValueError, do_use_arraytype, 'not an option') - - - @unittest.skipIf(not SDL2, 'requires SDL2') - def test_float32(self): - """ sized arrays work with Sounds and 32bit float arrays. - """ - try: - pygame.mixer.init(22050, 32, 2, allowedchanges=0) - except pygame.error: - # Not all sizes are supported on all systems. - self.skipTest("unsupported mixer configuration") - - arr = array([[0.0, -1.0], [-1.0, 0], [1.0, 0], [0, 1.0]], float32) - newsound = pygame.mixer.Sound(array=arr) - pygame.mixer.quit() - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/sprite_test.py b/venv/lib/python3.7/site-packages/pygame/tests/sprite_test.py deleted file mode 100644 index 86a7a1d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/sprite_test.py +++ /dev/null @@ -1,1248 +0,0 @@ -#################################### IMPORTS ################################### -# -*- encoding: utf-8 -*- - - -import unittest - -import pygame -from pygame import sprite - - -################################# MODULE LEVEL ################################# - -class SpriteModuleTest( unittest.TestCase ): - pass - -######################### SPRITECOLLIDE FUNCTIONS TEST ######################### - -class SpriteCollideTest( unittest.TestCase ): - - def setUp(self): - self.ag = sprite.AbstractGroup() - self.ag2 = sprite.AbstractGroup() - self.s1 = sprite.Sprite(self.ag) - self.s2 = sprite.Sprite(self.ag2) - self.s3 = sprite.Sprite(self.ag2) - - self.s1.image = pygame.Surface((50,10), pygame.SRCALPHA, 32) - self.s2.image = pygame.Surface((10,10), pygame.SRCALPHA, 32) - self.s3.image = pygame.Surface((10,10), pygame.SRCALPHA, 32) - - self.s1.rect = self.s1.image.get_rect() - self.s2.rect = self.s2.image.get_rect() - self.s3.rect = self.s3.image.get_rect() - self.s2.rect.move_ip(40, 0) - self.s3.rect.move_ip(100, 100) - - def test_spritecollide__works_if_collided_cb_is_None(self): - # Test that sprites collide without collided function. - self.assertEqual ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, collided = None - ), - [self.s2] - ) - - def test_spritecollide__works_if_collided_cb_not_passed(self): - # Should also work when collided function isn't passed at all. - self.assertEqual(sprite.spritecollide ( - self.s1, self.ag2, dokill = False), - [self.s2] - ) - - def test_spritecollide__collided_must_be_a_callable(self): - # Need to pass a callable. - self.assertRaises ( - TypeError, - sprite.spritecollide, self.s1, self.ag2, dokill = False, collided = 1 - ) - - def test_spritecollide__collided_defaults_to_collide_rect(self): - # collide_rect should behave the same as default. - self.assertEqual ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, collided = sprite.collide_rect - ), - [self.s2] - ) - - def test_collide_rect_ratio__ratio_of_one_like_default(self): - # collide_rect_ratio should behave the same as default at a 1.0 ratio. - self.assertEqual ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, - collided = sprite.collide_rect_ratio(1.0) - ), - [self.s2] - ) - - def test_collide_rect_ratio__collides_all_at_ratio_of_twenty(self): - # collide_rect_ratio should collide all at a 20.0 ratio. - collided_func = sprite.collide_rect_ratio(20.0) - expected_sprites = sorted(self.ag2.sprites(), key=id) - - collided_sprites = sorted(sprite.spritecollide( - self.s1, self.ag2, dokill=False, collided=collided_func), key=id) - - self.assertListEqual(collided_sprites, expected_sprites) - - def test_collide_circle__no_radius_set(self): - # collide_circle with no radius set. - self.assertEqual ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, collided = sprite.collide_circle - ), - [self.s2] - ) - - def test_collide_circle_ratio__no_radius_and_ratio_of_one(self): - # collide_circle_ratio with no radius set, at a 1.0 ratio. - self.assertEqual ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, - collided = sprite.collide_circle_ratio(1.0) - ), - [self.s2] - ) - - def test_collide_circle_ratio__no_radius_and_ratio_of_twenty(self): - # collide_circle_ratio with no radius set, at a 20.0 ratio. - collided_func = sprite.collide_circle_ratio(20.0) - expected_sprites = sorted(self.ag2.sprites(), key=id) - - collided_sprites = sorted(sprite.spritecollide( - self.s1, self.ag2, dokill=False, collided=collided_func), key=id) - - self.assertListEqual(expected_sprites, collided_sprites) - - def test_collide_circle__with_radii_set(self): - # collide_circle with a radius set. - self.s1.radius = 50 - self.s2.radius = 10 - self.s3.radius = 400 - collided_func = sprite.collide_circle - expected_sprites = sorted(self.ag2.sprites(), key=id) - - collided_sprites = sorted( - sprite.spritecollide(self.s1, self.ag2, dokill=False, - collided=collided_func), key=id) - - self.assertListEqual(expected_sprites, collided_sprites) - - def test_collide_circle_ratio__with_radii_set(self): - # collide_circle_ratio with a radius set. - self.s1.radius = 50 - self.s2.radius = 10 - self.s3.radius = 400 - collided_func = sprite.collide_circle_ratio(0.5) - expected_sprites = sorted(self.ag2.sprites(), key=id) - - collided_sprites = sorted(sprite.spritecollide( - self.s1, self.ag2, dokill=False, collided=collided_func), key=id) - - self.assertListEqual(expected_sprites, collided_sprites) - - def test_collide_mask__opaque(self): - # make some fully opaque sprites that will collide with masks. - self.s1.image.fill((255,255,255,255)) - self.s2.image.fill((255,255,255,255)) - self.s3.image.fill((255,255,255,255)) - - # masks should be autogenerated from image if they don't exist. - self.assertEqual ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, - collided = sprite.collide_mask - ), - [self.s2] - ) - - self.s1.mask = pygame.mask.from_surface(self.s1.image) - self.s2.mask = pygame.mask.from_surface(self.s2.image) - self.s3.mask = pygame.mask.from_surface(self.s3.image) - - # with set masks. - self.assertEqual ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, - collided = sprite.collide_mask - ), - [self.s2] - ) - - def test_collide_mask__transparent(self): - # make some sprites that are fully transparent, so they won't collide. - self.s1.image.fill((255,255,255,0)) - self.s2.image.fill((255,255,255,0)) - self.s3.image.fill((255,255,255,0)) - - self.s1.mask = pygame.mask.from_surface(self.s1.image, 255) - self.s2.mask = pygame.mask.from_surface(self.s2.image, 255) - self.s3.mask = pygame.mask.from_surface(self.s3.image, 255) - - self.assertFalse ( - sprite.spritecollide ( - self.s1, self.ag2, dokill = False, collided = sprite.collide_mask - ) - ) - - def test_spritecollideany__without_collided_callback(self): - - # pygame.sprite.spritecollideany(sprite, group) -> sprite - # finds any sprites that collide - - # if collided is not passed, all - # sprites must have a "rect" value, which is a - # rectangle of the sprite area, which will be used - # to calculate the collision. - - # s2 in, s3 out - expected_sprite = self.s2 - collided_sprite = sprite.spritecollideany(self.s1, self.ag2) - - self.assertEqual(collided_sprite, expected_sprite) - - # s2 and s3 out - self.s2.rect.move_ip(0, 10) - collided_sprite = sprite.spritecollideany(self.s1, self.ag2) - - self.assertIsNone(collided_sprite) - - # s2 out, s3 in - self.s3.rect.move_ip(-105, -105) - expected_sprite = self.s3 - collided_sprite = sprite.spritecollideany(self.s1, self.ag2) - - self.assertEqual(collided_sprite, expected_sprite) - - # s2 and s3 in - self.s2.rect.move_ip(0, -10) - expected_sprite_choices = self.ag2.sprites() - collided_sprite = sprite.spritecollideany(self.s1, self.ag2) - - self.assertIn(collided_sprite, expected_sprite_choices) - - def test_spritecollideany__with_collided_callback(self): - - # pygame.sprite.spritecollideany(sprite, group) -> sprite - # finds any sprites that collide - - # collided is a callback function used to calculate if - # two sprites are colliding. it should take two sprites - # as values, and return a bool value indicating if - # they are colliding. - - # This collision test can be faster than pygame.sprite.spritecollide() - # since it has less work to do. - - arg_dict_a = {} - arg_dict_b = {} - return_container = [True] - - # This function is configurable using the mutable default arguments! - def collided_callback(spr_a, spr_b, - arg_dict_a=arg_dict_a, arg_dict_b=arg_dict_b, - return_container=return_container): - - count = arg_dict_a.get(spr_a, 0) - arg_dict_a[spr_a] = 1 + count - - count = arg_dict_b.get(spr_b, 0) - arg_dict_b[spr_b] = 1 + count - - return return_container[0] - - # This should return a sprite from self.ag2 because the callback - # function (collided_callback()) currently returns True. - expected_sprite_choices = self.ag2.sprites() - collided_sprite = sprite.spritecollideany(self.s1, self.ag2, - collided_callback) - - self.assertIn(collided_sprite, expected_sprite_choices) - - # The callback function should have been called only once, so self.s1 - # should have only been passed as an argument once - self.assertEqual(len(arg_dict_a), 1) - self.assertEqual(arg_dict_a[self.s1], 1) - - # The callback function should have been called only once, so self.s2 - # exclusive-or self.s3 should have only been passed as an argument - # once - self.assertEqual(len(arg_dict_b), 1) - self.assertEqual(list(arg_dict_b.values())[0], 1) - self.assertTrue(self.s2 in arg_dict_b or self.s3 in arg_dict_b) - - arg_dict_a.clear() - arg_dict_b.clear() - return_container[0] = False - - # This should return None because the callback function - # (collided_callback()) currently returns False. - collided_sprite = sprite.spritecollideany(self.s1, self.ag2, - collided_callback) - - self.assertIsNone(collided_sprite) - - # The callback function should have been called as many times as - # there are sprites in self.ag2 - self.assertEqual(len(arg_dict_a), 1) - self.assertEqual(arg_dict_a[self.s1], len(self.ag2)) - self.assertEqual(len(arg_dict_b), len(self.ag2)) - - # Each sprite in self.ag2 should be called once. - for s in self.ag2: - self.assertEqual(arg_dict_b[s], 1) - - def test_groupcollide__without_collided_callback(self): - - # pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb) -> dict - # collision detection between group and group - - # test no kill - expected_dict = {self.s1: [self.s2]} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False) - - self.assertDictEqual(expected_dict, crashed) - - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False) - - self.assertDictEqual(expected_dict, crashed) - - # Test dokill2=True (kill colliding sprites in second group). - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True) - - self.assertDictEqual(expected_dict, crashed) - - expected_dict = {} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False) - - self.assertDictEqual(expected_dict, crashed) - - # Test dokill1=True (kill colliding sprites in first group). - self.s3.rect.move_ip(-100, -100) - expected_dict = {self.s1: [self.s3]} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False) - - self.assertDictEqual(expected_dict, crashed) - - expected_dict = {} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False) - - self.assertDictEqual(expected_dict, crashed) - - def test_groupcollide__with_collided_callback(self): - - collided_callback_true = lambda spr_a, spr_b: True - collided_callback_false = lambda spr_a, spr_b: False - - # test no kill - expected_dict = {} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False, - collided_callback_false) - - self.assertDictEqual(expected_dict, crashed) - - expected_dict = {self.s1: sorted(self.ag2.sprites(), key=id)} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False, - collided_callback_true) - for value in crashed.values(): - value.sort(key=id) - - self.assertDictEqual(expected_dict, crashed) - - # expected_dict is the same again for this collide - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False, - collided_callback_true) - for value in crashed.values(): - value.sort(key=id) - - self.assertDictEqual(expected_dict, crashed) - - # Test dokill2=True (kill colliding sprites in second group). - expected_dict = {} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True, - collided_callback_false) - - self.assertDictEqual(expected_dict, crashed) - - expected_dict = {self.s1: sorted(self.ag2.sprites(), key=id)} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True, - collided_callback_true) - for value in crashed.values(): - value.sort(key=id) - - self.assertDictEqual(expected_dict, crashed) - - expected_dict = {} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True, - collided_callback_true) - - self.assertDictEqual(expected_dict, crashed) - - # Test dokill1=True (kill colliding sprites in first group). - self.ag.add(self.s2) - self.ag2.add(self.s3) - expected_dict = {} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False, - collided_callback_false) - - self.assertDictEqual(expected_dict, crashed) - - expected_dict = {self.s1: [self.s3], self.s2: [self.s3]} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False, - collided_callback_true) - - self.assertDictEqual(expected_dict, crashed) - - expected_dict = {} - crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False, - collided_callback_true) - - self.assertDictEqual(expected_dict, crashed) - - def test_collide_rect(self): - # Test colliding - some edges touching - self.assertTrue(pygame.sprite.collide_rect(self.s1, self.s2)) - self.assertTrue(pygame.sprite.collide_rect(self.s2, self.s1)) - - # Test colliding - all edges touching - self.s2.rect.center = self.s3.rect.center - - self.assertTrue(pygame.sprite.collide_rect(self.s2, self.s3)) - self.assertTrue(pygame.sprite.collide_rect(self.s3, self.s2)) - - # Test colliding - no edges touching - self.s2.rect.inflate_ip(10, 10) - - self.assertTrue(pygame.sprite.collide_rect(self.s2, self.s3)) - self.assertTrue(pygame.sprite.collide_rect(self.s3, self.s2)) - - # Test colliding - some edges intersecting - self.s2.rect.center = (self.s1.rect.right, self.s1.rect.bottom) - - self.assertTrue(pygame.sprite.collide_rect(self.s1, self.s2)) - self.assertTrue(pygame.sprite.collide_rect(self.s2, self.s1)) - - # Test not colliding - self.assertFalse(pygame.sprite.collide_rect(self.s1, self.s3)) - self.assertFalse(pygame.sprite.collide_rect(self.s3, self.s1)) - - -################################################################################ - -class AbstractGroupTypeTest( unittest.TestCase ): - def setUp(self): - self.ag = sprite.AbstractGroup() - self.ag2 = sprite.AbstractGroup() - self.s1 = sprite.Sprite(self.ag) - self.s2 = sprite.Sprite(self.ag) - self.s3 = sprite.Sprite(self.ag2) - self.s4 = sprite.Sprite(self.ag2) - - self.s1.image = pygame.Surface((10, 10)) - self.s1.image.fill(pygame.Color('red')) - self.s1.rect = self.s1.image.get_rect() - - self.s2.image = pygame.Surface((10, 10)) - self.s2.image.fill(pygame.Color('green')) - self.s2.rect = self.s2.image.get_rect() - self.s2.rect.left = 10 - - self.s3.image = pygame.Surface((10, 10)) - self.s3.image.fill(pygame.Color('blue')) - self.s3.rect = self.s3.image.get_rect() - self.s3.rect.top = 10 - - self.s4.image = pygame.Surface((10, 10)) - self.s4.image.fill(pygame.Color('white')) - self.s4.rect = self.s4.image.get_rect() - self.s4.rect.left = 10 - self.s4.rect.top = 10 - - self.bg = pygame.Surface((20, 20)) - self.scr = pygame.Surface((20, 20)) - self.scr.fill(pygame.Color('grey')) - - def test_has( self ): - " See if AbstractGroup.has() works as expected. " - - self.assertEqual(True, self.s1 in self.ag) - - self.assertEqual(True, self.ag.has(self.s1)) - - self.assertEqual(True, self.ag.has([self.s1, self.s2])) - - # see if one of them not being in there. - self.assertNotEqual(True, self.ag.has([self.s1, self.s2, self.s3])) - self.assertNotEqual(True, self.ag.has(self.s1, self.s2, self.s3)) - self.assertNotEqual(True, self.ag.has(self.s1, - sprite.Group(self.s2, self.s3))) - self.assertNotEqual(True, self.ag.has(self.s1, [self.s2, self.s3])) - - # test empty list processing - self.assertFalse(self.ag.has(*[])) - self.assertFalse(self.ag.has([])) - self.assertFalse(self.ag.has([[]])) - - # see if a second AbstractGroup works. - self.assertEqual(True, self.ag2.has(self.s3)) - - def test_add(self): - ag3 = sprite.AbstractGroup() - sprites = (self.s1, self.s2, self.s3, self.s4) - - for s in sprites: - self.assertNotIn(s, ag3) - - ag3.add(self.s1, [self.s2], self.ag2) - - for s in sprites: - self.assertIn(s, ag3) - - def test_add_internal(self): - self.assertNotIn(self.s1, self.ag2) - - self.ag2.add_internal(self.s1) - - self.assertIn(self.s1, self.ag2) - - def test_clear(self): - - self.ag.draw(self.scr) - self.ag.clear(self.scr, self.bg) - self.assertEqual((0, 0, 0, 255), - self.scr.get_at((5, 5))) - self.assertEqual((0, 0, 0, 255), - self.scr.get_at((15, 5))) - - def test_draw(self): - - self.ag.draw(self.scr) - self.assertEqual((255, 0, 0, 255), - self.scr.get_at((5, 5))) - self.assertEqual((0, 255, 0, 255), - self.scr.get_at((15, 5))) - - def test_empty(self): - - self.ag.empty() - self.assertFalse(self.s1 in self.ag) - self.assertFalse(self.s2 in self.ag) - - def test_has_internal(self): - self.assertTrue(self.ag.has_internal(self.s1)) - self.assertFalse(self.ag.has_internal(self.s3)) - - def test_remove(self): - - # Test removal of 1 sprite - self.ag.remove(self.s1) - self.assertFalse(self.ag in self.s1.groups()) - self.assertFalse(self.ag.has(self.s1)) - - # Test removal of 2 sprites as 2 arguments - self.ag2.remove(self.s3, self.s4) - self.assertFalse(self.ag2 in self.s3.groups()) - self.assertFalse(self.ag2 in self.s4.groups()) - self.assertFalse(self.ag2.has(self.s3, self.s4)) - - # Test removal of 4 sprites as a list containing a sprite and a group - # containing a sprite and another group containing 2 sprites. - self.ag.add(self.s1, self.s3, self.s4) - self.ag2.add(self.s3, self.s4) - g = sprite.Group(self.s2) - self.ag.remove([self.s1, g], self.ag2) - self.assertFalse(self.ag in self.s1.groups()) - self.assertFalse(self.ag in self.s2.groups()) - self.assertFalse(self.ag in self.s3.groups()) - self.assertFalse(self.ag in self.s4.groups()) - self.assertFalse(self.ag.has(self.s1, self.s2, self.s3, self.s4)) - - def test_remove_internal(self): - - self.ag.remove_internal(self.s1) - self.assertFalse(self.ag.has_internal(self.s1)) - - def test_sprites(self): - expected_sprites = sorted((self.s1, self.s2), key=id) - sprite_list = sorted(self.ag.sprites(), key=id) - - self.assertListEqual(sprite_list, expected_sprites) - - def test_update(self): - - class test_sprite(pygame.sprite.Sprite): - sink = [] - def __init__(self, *groups): - pygame.sprite.Sprite.__init__(self, *groups) - def update(self, *args): - self.sink += args - - s = test_sprite(self.ag) - self.ag.update(1, 2, 3) - - self.assertEqual(test_sprite.sink, [1, 2, 3]) - - -################################################################################ - -# A base class to share tests between similar classes - -class LayeredGroupBase: - def test_get_layer_of_sprite(self): - expected_layer = 666 - spr = self.sprite() - self.LG.add(spr, layer=expected_layer) - layer = self.LG.get_layer_of_sprite(spr) - - self.assertEqual(len(self.LG._spritelist), 1) - self.assertEqual(layer, self.LG.get_layer_of_sprite(spr)) - self.assertEqual(layer, expected_layer) - self.assertEqual(layer, self.LG._spritelayers[spr]) - - def test_add(self): - expected_layer = self.LG._default_layer - spr = self.sprite() - self.LG.add(spr) - layer = self.LG.get_layer_of_sprite(spr) - - self.assertEqual(len(self.LG._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__sprite_with_layer_attribute(self): - expected_layer = 100 - spr = self.sprite() - spr._layer = expected_layer - self.LG.add(spr) - layer = self.LG.get_layer_of_sprite(spr) - - self.assertEqual(len(self.LG._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__passing_layer_keyword(self): - expected_layer = 100 - spr = self.sprite() - self.LG.add(spr, layer=expected_layer) - layer = self.LG.get_layer_of_sprite(spr) - - self.assertEqual(len(self.LG._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__overriding_sprite_layer_attr(self): - expected_layer = 200 - spr = self.sprite() - spr._layer = 100 - self.LG.add(spr, layer=expected_layer) - layer = self.LG.get_layer_of_sprite(spr) - - self.assertEqual(len(self.LG._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__adding_sprite_on_init(self): - spr = self.sprite() - lrg2 = sprite.LayeredUpdates(spr) - expected_layer = lrg2._default_layer - layer = lrg2._spritelayers[spr] - - self.assertEqual(len(lrg2._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__sprite_init_layer_attr(self): - expected_layer = 20 - spr = self.sprite() - spr._layer = expected_layer - lrg2 = sprite.LayeredUpdates(spr) - layer = lrg2._spritelayers[spr] - - self.assertEqual(len(lrg2._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__sprite_init_passing_layer(self): - expected_layer = 33 - spr = self.sprite() - lrg2 = sprite.LayeredUpdates(spr, layer=expected_layer) - layer = lrg2._spritelayers[spr] - - self.assertEqual(len(lrg2._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__sprite_init_overiding_layer(self): - expected_layer = 33 - spr = self.sprite() - spr._layer = 55 - lrg2 = sprite.LayeredUpdates(spr, layer=expected_layer) - layer = lrg2._spritelayers[spr] - - self.assertEqual(len(lrg2._spritelist), 1) - self.assertEqual(layer, expected_layer) - - def test_add__spritelist(self): - expected_layer = self.LG._default_layer - sprite_count = 10 - sprites = [self.sprite() for _ in range(sprite_count)] - - self.LG.add(sprites) - - self.assertEqual(len(self.LG._spritelist), sprite_count) - - for i in range(sprite_count): - layer = self.LG.get_layer_of_sprite(sprites[i]) - - self.assertEqual(layer, expected_layer) - - def test_add__spritelist_with_layer_attr(self): - sprites = [] - sprite_and_layer_count = 10 - for i in range(sprite_and_layer_count): - sprites.append(self.sprite()) - sprites[-1]._layer = i - - self.LG.add(sprites) - - self.assertEqual(len(self.LG._spritelist), sprite_and_layer_count) - - for i in range(sprite_and_layer_count): - layer = self.LG.get_layer_of_sprite(sprites[i]) - - self.assertEqual(layer, i) - - def test_add__spritelist_passing_layer(self): - expected_layer = 33 - sprite_count = 10 - sprites = [self.sprite() for _ in range(sprite_count)] - - self.LG.add(sprites, layer=expected_layer) - - self.assertEqual(len(self.LG._spritelist), sprite_count) - - for i in range(sprite_count): - layer = self.LG.get_layer_of_sprite(sprites[i]) - - self.assertEqual(layer, expected_layer) - - def test_add__spritelist_overriding_layer(self): - expected_layer = 33 - sprites = [] - sprite_and_layer_count = 10 - for i in range(sprite_and_layer_count): - sprites.append(self.sprite()) - sprites[-1].layer = i - - self.LG.add(sprites, layer=expected_layer) - - self.assertEqual(len(self.LG._spritelist), sprite_and_layer_count) - - for i in range(sprite_and_layer_count): - layer = self.LG.get_layer_of_sprite(sprites[i]) - - self.assertEqual(layer, expected_layer) - - def test_add__spritelist_init(self): - sprite_count = 10 - sprites = [self.sprite() for _ in range(sprite_count)] - - lrg2 = sprite.LayeredUpdates(sprites) - expected_layer = lrg2._default_layer - - self.assertEqual(len(lrg2._spritelist), sprite_count) - - for i in range(sprite_count): - layer = lrg2.get_layer_of_sprite(sprites[i]) - - self.assertEqual(layer, expected_layer) - - def test_remove__sprite(self): - sprites = [] - sprite_count = 10 - for i in range(sprite_count): - sprites.append(self.sprite()) - sprites[-1].rect = 0 - - self.LG.add(sprites) - - self.assertEqual(len(self.LG._spritelist), sprite_count) - - for i in range(sprite_count): - self.LG.remove(sprites[i]) - - self.assertEqual(len(self.LG._spritelist), 0) - - def test_sprites(self): - sprites = [] - sprite_and_layer_count = 10 - for i in range(sprite_and_layer_count, 0, -1): - sprites.append(self.sprite()) - sprites[-1]._layer = i - - self.LG.add(sprites) - - self.assertEqual(len(self.LG._spritelist), sprite_and_layer_count) - - # Sprites should be ordered based on their layer (bottom to top), - # which is the reverse order of the sprites list. - expected_sprites = list(reversed(sprites)) - actual_sprites = self.LG.sprites() - - self.assertListEqual(actual_sprites, expected_sprites) - - def test_layers(self): - sprites = [] - expected_layers = [] - layer_count = 10 - for i in range(layer_count): - expected_layers.append(i) - for j in range(5): - sprites.append(self.sprite()) - sprites[-1]._layer = i - self.LG.add(sprites) - - layers = self.LG.layers() - - self.assertListEqual(layers, expected_layers) - - def test_add__layers_are_correct(self): - layers = [1, 4, 6, 8, 3, 6, 2, 6, 4, 5, 6, 1, 0, 9, 7, 6, 54, 8, 2, - 43, 6, 1] - for lay in layers: - self.LG.add(self.sprite(), layer=lay) - layers.sort() - - for idx, spr in enumerate(self.LG.sprites()): - layer = self.LG.get_layer_of_sprite(spr) - - self.assertEqual(layer, layers[idx]) - - def test_change_layer(self): - expected_layer = 99 - spr = self.sprite() - self.LG.add(spr, layer=expected_layer) - - self.assertEqual(self.LG._spritelayers[spr], expected_layer) - - expected_layer = 44 - self.LG.change_layer(spr, expected_layer) - - self.assertEqual(self.LG._spritelayers[spr], expected_layer) - - expected_layer = 77 - spr2 = self.sprite() - spr2.layer = 55 - self.LG.add(spr2) - self.LG.change_layer(spr2, expected_layer) - - self.assertEqual(spr2.layer, expected_layer) - - def test_get_top_layer(self): - layers = [1, 5, 2, 8, 4, 5, 3, 88, 23, 0] - for i in layers: - self.LG.add(self.sprite(), layer=i) - top_layer = self.LG.get_top_layer() - - self.assertEqual(top_layer, self.LG.get_top_layer()) - self.assertEqual(top_layer, max(layers)) - self.assertEqual(top_layer, max(self.LG._spritelayers.values())) - self.assertEqual(top_layer, - self.LG._spritelayers[self.LG._spritelist[-1]]) - - def test_get_bottom_layer(self): - layers = [1, 5, 2, 8, 4, 5, 3, 88, 23, 0] - for i in layers: - self.LG.add(self.sprite(), layer=i) - bottom_layer = self.LG.get_bottom_layer() - - self.assertEqual(bottom_layer, self.LG.get_bottom_layer()) - self.assertEqual(bottom_layer, min(layers)) - self.assertEqual(bottom_layer, min(self.LG._spritelayers.values())) - self.assertEqual(bottom_layer, - self.LG._spritelayers[self.LG._spritelist[0]]) - - def test_move_to_front(self): - layers = [1, 5, 2, 8, 4, 5, 3, 88, 23, 0] - for i in layers: - self.LG.add(self.sprite(), layer=i) - spr = self.sprite() - self.LG.add(spr, layer=3) - - self.assertNotEqual(spr, self.LG._spritelist[-1]) - - self.LG.move_to_front(spr) - - self.assertEqual(spr, self.LG._spritelist[-1]) - - def test_move_to_back(self): - layers = [1, 5, 2, 8, 4, 5, 3, 88, 23, 0] - for i in layers: - self.LG.add(self.sprite(), layer=i) - spr = self.sprite() - self.LG.add(spr, layer=55) - - self.assertNotEqual(spr, self.LG._spritelist[0]) - - self.LG.move_to_back(spr) - - self.assertEqual(spr, self.LG._spritelist[0]) - - def test_get_top_sprite(self): - layers = [1, 5, 2, 8, 4, 5, 3, 88, 23, 0] - for i in layers: - self.LG.add(self.sprite(), layer=i) - expected_layer = self.LG.get_top_layer() - layer = self.LG.get_layer_of_sprite(self.LG.get_top_sprite()) - - self.assertEqual(layer, expected_layer) - - def test_get_sprites_from_layer(self): - sprites = {} - layers = [1, 4, 5, 6, 3, 7, 8, 2, 1, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 0, 1, 6, 5, 4, 3, 2] - for lay in layers: - spr = self.sprite() - spr._layer = lay - self.LG.add(spr) - if lay not in sprites: - sprites[lay] = [] - sprites[lay].append(spr) - - for lay in self.LG.layers(): - for spr in self.LG.get_sprites_from_layer(lay): - self.assertIn(spr, sprites[lay]) - - sprites[lay].remove(spr) - if len(sprites[lay]) == 0: - del sprites[lay] - - self.assertEqual(len(sprites.values()), 0) - - def test_switch_layer(self): - sprites1 = [] - sprites2 = [] - layers = [3, 2, 3, 2, 3, 3, 2, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 2, - 3, 2, 3] - for lay in layers: - spr = self.sprite() - spr._layer = lay - self.LG.add(spr) - if lay==2: - sprites1.append(spr) - else: - sprites2.append(spr) - - sprites1.sort(key=id) - sprites2.sort(key=id) - layer2_sprites = sorted(self.LG.get_sprites_from_layer(2), key=id) - layer3_sprites = sorted(self.LG.get_sprites_from_layer(3), key=id) - - self.assertListEqual(sprites1, layer2_sprites) - self.assertListEqual(sprites2, layer3_sprites) - self.assertEqual(len(self.LG), len(sprites1) + len(sprites2)) - - self.LG.switch_layer(2, 3) - layer2_sprites = sorted(self.LG.get_sprites_from_layer(2), key=id) - layer3_sprites = sorted(self.LG.get_sprites_from_layer(3), key=id) - - self.assertListEqual(sprites1, layer3_sprites) - self.assertListEqual(sprites2, layer2_sprites) - self.assertEqual(len(self.LG), len(sprites1) + len(sprites2)) - - def test_copy(self): - self.LG.add(self.sprite()) - spr = self.LG.sprites()[0] - lg_copy = self.LG.copy() - - self.assertIsInstance(lg_copy, type(self.LG)) - self.assertIn(spr, lg_copy) - self.assertIn(lg_copy, spr.groups()) - - -########################## LAYERED RENDER GROUP TESTS ########################## - -class LayeredUpdatesTypeTest__SpriteTest(LayeredGroupBase, unittest.TestCase): - sprite = sprite.Sprite - - def setUp(self): - self.LG = sprite.LayeredUpdates() - -class LayeredUpdatesTypeTest__DirtySprite(LayeredGroupBase, unittest.TestCase): - sprite = sprite.DirtySprite - - def setUp(self): - self.LG = sprite.LayeredUpdates() - -class LayeredDirtyTypeTest__DirtySprite(LayeredGroupBase, unittest.TestCase): - sprite = sprite.DirtySprite - - def setUp(self): - self.LG = sprite.LayeredDirty() - - def test_repaint_rect(self): - group = self.LG - surface = pygame.Surface((100, 100)) - - group.repaint_rect(pygame.Rect(0, 0, 100, 100)) - group.draw(surface) - - def test_repaint_rect_with_clip(self): - group = self.LG - surface = pygame.Surface((100, 100)) - - group.set_clip(pygame.Rect(0, 0, 100, 100)) - group.repaint_rect(pygame.Rect(0, 0, 100, 100)) - group.draw(surface) - - def _nondirty_intersections_redrawn(self, use_source_rect=False): - # Helper method to ensure non-dirty sprites are redrawn correctly. - # - # Parameters: - # use_source_rect - allows non-dirty sprites to be tested - # with (True) and without (False) a source_rect - # - # This test was written to reproduce the behavior seen in issue #898. - # A non-dirty sprite (using source_rect) was being redrawn incorrectly - # after a dirty sprite intersected with it. - # - # This test does the following. - # 1. Creates a surface filled with white. Also creates an image_source - # with a default fill color of yellow and adds 2 images to it - # (red and blue rectangles). - # 2. Creates 2 DirtySprites (red_sprite and blue_sprite) using the - # image_source and adds them to a LayeredDirty group. - # 3. Moves the red_sprite and calls LayeredDirty.draw(surface) a few - # times. - # 4. Checks to make sure the sprites were redrawn correctly. - RED = pygame.Color('red') - BLUE = pygame.Color('blue') - WHITE = pygame.Color('white') - YELLOW = pygame.Color('yellow') - - surface = pygame.Surface((60, 80)) - surface.fill(WHITE) - start_pos = (10, 10) - - # These rects define each sprite's image area in the image_source. - red_sprite_source = pygame.Rect((45, 0), (5, 4)) - blue_sprite_source = pygame.Rect((0, 40), (20, 10)) - - # Create a source image/surface. - image_source = pygame.Surface((50, 50)) - image_source.fill(YELLOW) - image_source.fill(RED, red_sprite_source) - image_source.fill(BLUE, blue_sprite_source) - - # The blue_sprite is stationary and will not reset its dirty flag. It - # will be the non-dirty sprite in this test. Its values are dependent - # on the use_source_rect flag. - blue_sprite = pygame.sprite.DirtySprite(self.LG) - - if use_source_rect: - blue_sprite.image = image_source - # The rect is a bit smaller than the source_rect to make sure - # LayeredDirty.draw() is using the correct dimensions. - blue_sprite.rect = pygame.Rect(start_pos, - (blue_sprite_source.w - 7, blue_sprite_source.h - 7)) - blue_sprite.source_rect = blue_sprite_source - start_x, start_y = blue_sprite.rect.topleft - end_x = start_x + blue_sprite.source_rect.w - end_y = start_y + blue_sprite.source_rect.h - else: - blue_sprite.image = image_source.subsurface(blue_sprite_source) - blue_sprite.rect = pygame.Rect(start_pos, blue_sprite_source.size) - start_x, start_y = blue_sprite.rect.topleft - end_x, end_y = blue_sprite.rect.bottomright - - # The red_sprite is moving and will always be dirty. - red_sprite = pygame.sprite.DirtySprite(self.LG) - red_sprite.image = image_source - red_sprite.rect = pygame.Rect(start_pos, red_sprite_source.size) - red_sprite.source_rect = red_sprite_source - red_sprite.dirty = 2 - - # Draw the red_sprite as it moves a few steps. - for _ in range(4): - red_sprite.rect.move_ip(2, 1) - - # This is the method being tested. - self.LG.draw(surface) - - # Check colors where the blue_sprite is drawn. We expect red where the - # red_sprite is drawn over the blue_sprite, but the rest should be - # blue. - surface.lock() # Lock surface for possible speed up. - try: - for y in range(start_y, end_y): - for x in range(start_x, end_x): - if red_sprite.rect.collidepoint(x, y): - expected_color = RED - else: - expected_color = BLUE - - color = surface.get_at((x, y)) - - self.assertEqual(color, expected_color, - 'pos=({}, {})'.format(x, y)) - finally: - surface.unlock() - - def test_nondirty_intersections_redrawn(self): - """Ensure non-dirty sprites are correctly redrawn - when dirty sprites intersect with them. - """ - self._nondirty_intersections_redrawn() - - def test_nondirty_intersections_redrawn__with_source_rect(self): - """Ensure non-dirty sprites using source_rects are correctly redrawn - when dirty sprites intersect with them. - - Related to issue #898. - """ - self._nondirty_intersections_redrawn(True) - - -############################### SPRITE BASE CLASS ############################## -# -# tests common between sprite classes - -class SpriteBase: - def setUp(self): - self.groups = [] - for Group in self.Groups: - self.groups.append(Group()) - - self.sprite = self.Sprite() - - def test_add_internal(self): - - for g in self.groups: - self.sprite.add_internal(g) - - for g in self.groups: - self.assertIn(g, self.sprite.groups()) - - def test_remove_internal(self): - - for g in self.groups: - self.sprite.add_internal(g) - - for g in self.groups: - self.sprite.remove_internal(g) - - for g in self.groups: - self.assertFalse(g in self.sprite.groups()) - - def test_update(self): - - class test_sprite(pygame.sprite.Sprite): - sink = [] - def __init__(self, *groups): - pygame.sprite.Sprite.__init__(self, *groups) - def update(self, *args): - self.sink += args - - s = test_sprite() - s.update(1, 2, 3) - - self.assertEqual(test_sprite.sink, [1, 2, 3]) - - def test___init____added_to_groups_passed(self): - expected_groups = sorted(self.groups, key=id) - sprite = self.Sprite(self.groups) - groups = sorted(sprite.groups(), key=id) - - self.assertListEqual(groups, expected_groups) - - def test_add(self): - expected_groups = sorted(self.groups, key=id) - self.sprite.add(self.groups) - groups = sorted(self.sprite.groups(), key=id) - - self.assertListEqual(groups, expected_groups) - - def test_alive(self): - self.assertFalse(self.sprite.alive(), - "Sprite should not be alive if in no groups") - - self.sprite.add(self.groups) - - self.assertTrue(self.sprite.alive()) - - def test_groups(self): - for i, g in enumerate(self.groups): - expected_groups = sorted(self.groups[:i+1], key=id) - self.sprite.add(g) - groups = sorted(self.sprite.groups(), key=id) - - self.assertListEqual(groups, expected_groups) - - def test_kill(self): - self.sprite.add(self.groups) - - self.assertTrue(self.sprite.alive()) - - self.sprite.kill() - - self.assertListEqual(self.sprite.groups(), []) - self.assertFalse(self.sprite.alive()) - - def test_remove(self): - self.sprite.add(self.groups) - self.sprite.remove(self.groups) - - self.assertListEqual(self.sprite.groups(), []) - - -############################## SPRITE CLASS TESTS ############################## - -class SpriteTypeTest(SpriteBase, unittest.TestCase): - Sprite = sprite.Sprite - - Groups = [ sprite.Group, - sprite.LayeredUpdates, - sprite.RenderUpdates, - sprite.OrderedUpdates, ] - -class DirtySpriteTypeTest(SpriteBase, unittest.TestCase): - Sprite = sprite.DirtySprite - - Groups = [ sprite.Group, - sprite.LayeredUpdates, - sprite.RenderUpdates, - sprite.OrderedUpdates, - sprite.LayeredDirty, ] - -############################## BUG TESTS ####################################### - -class SingleGroupBugsTest(unittest.TestCase): - def test_memoryleak_bug(self): - # For memoryleak bug posted to mailing list by Tobias Steinrücken on 16/11/10. - # Fixed in revision 2953. - - import weakref - import gc - - class MySprite(sprite.Sprite): - def __init__(self, *args, **kwargs): - sprite.Sprite.__init__(self, *args, **kwargs) - self.image = pygame.Surface( (2, 4), 0, 24 ) - self.rect = self.image.get_rect() - - g = sprite.GroupSingle() - screen = pygame.Surface((4, 8), 0, 24) - s = MySprite() - r = weakref.ref(s) - g.sprite = s - del s - gc.collect() - - self.assertIsNotNone(r()) - - g.update() - g.draw(screen) - g.sprite = MySprite() - gc.collect() - - self.assertIsNone(r()) - - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/surface_test.py b/venv/lib/python3.7/site-packages/pygame/tests/surface_test.py deleted file mode 100644 index 6d5a917..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/surface_test.py +++ /dev/null @@ -1,2538 +0,0 @@ -import os - -import unittest -from pygame.tests import test_utils -from pygame.tests.test_utils import ( - example_path, AssertRaisesRegexMixin, SurfaceSubclass) -try: - from pygame.tests.test_utils.arrinter import * -except (ImportError, NameError): - pass - -import pygame -from pygame.locals import * -from pygame.compat import xrange_, as_bytes, as_unicode -from pygame.bufferproxy import BufferProxy - -import platform -import gc -import weakref -import ctypes - -IS_PYPY = 'PyPy' == platform.python_implementation() - -def intify(i): - """If i is a long, cast to an int while preserving the bits""" - if 0x80000000 & i: - return int((0xFFFFFFFF & i)) - return i - -def longify(i): - """If i is an int, cast to a long while preserving the bits""" - if i < 0: - return 0xFFFFFFFF & i - return long(i) - - -class SurfaceTypeTest(AssertRaisesRegexMixin, unittest.TestCase): - def test_surface__pixel_format_as_surface_subclass(self): - """Ensure a subclassed surface can be used for pixel format - when creating a new surface.""" - expected_depth = 16 - expected_flags = SRCALPHA - expected_size = (13, 37) - depth_surface = SurfaceSubclass((11, 21), expected_flags, - expected_depth) - - surface = pygame.Surface(expected_size, 0, depth_surface) - - self.assertIsNot(surface, depth_surface) - self.assertIsInstance(surface, pygame.Surface) - self.assertNotIsInstance(surface, SurfaceSubclass) - self.assertEqual(surface.get_size(), expected_size) - self.assertEqual(surface.get_flags(), expected_flags) - self.assertEqual(surface.get_bitsize(), expected_depth) - - def test_set_clip( self ): - """ see if surface.set_clip(None) works correctly. - """ - s = pygame.Surface((800, 600)) - r = pygame.Rect(10, 10, 10, 10) - s.set_clip(r) - r.move_ip(10, 0) - s.set_clip(None) - res = s.get_clip() - # this was garbled before. - self.assertEqual(res[0], 0) - self.assertEqual(res[2], 800) - - def test_print(self): - surf = pygame.Surface((70,70), 0, 32) - self.assertEqual(repr(surf), '') - - def test_keyword_arguments(self): - surf = pygame.Surface((70,70), flags=SRCALPHA, depth=32) - self.assertEqual(surf.get_flags() & SRCALPHA, SRCALPHA) - self.assertEqual(surf.get_bitsize(), 32) - - # sanity check to make sure the check below is valid - surf_16 = pygame.Surface((70,70), 0, 16) - self.assertEqual(surf_16.get_bytesize(), 2) - - # try again with an argument list - surf_16 = pygame.Surface((70,70), depth=16) - self.assertEqual(surf_16.get_bytesize(), 2) - - def test_set_at(self): - - #24bit surfaces - s = pygame.Surface( (100, 100), 0, 24) - s.fill((0,0,0)) - - # set it with a tuple. - s.set_at((0,0), (10,10,10, 255)) - r = s.get_at((0,0)) - self.assertIsInstance(r, pygame.Color) - self.assertEqual(r, (10,10,10, 255)) - - # try setting a color with a single integer. - s.fill((0,0,0,255)) - s.set_at ((10, 1), 0x0000FF) - r = s.get_at((10,1)) - self.assertEqual(r, (0,0,255, 255)) - - - def test_SRCALPHA(self): - # has the flag been passed in ok? - surf = pygame.Surface((70,70), SRCALPHA, 32) - self.assertEqual(surf.get_flags() & SRCALPHA, SRCALPHA) - - #24bit surfaces can not have SRCALPHA. - self.assertRaises(ValueError, pygame.Surface, (100, 100), pygame.SRCALPHA, 24) - - # if we have a 32 bit surface, the SRCALPHA should have worked too. - surf2 = pygame.Surface((70,70), SRCALPHA) - if surf2.get_bitsize() == 32: - self.assertEqual(surf2.get_flags() & SRCALPHA, SRCALPHA) - - def test_masks(self): - def make_surf(bpp, flags, masks): - pygame.Surface((10, 10), flags, bpp, masks) - # With some masks SDL_CreateRGBSurface does not work properly. - masks = (0xFF000000, 0xFF0000, 0xFF00, 0) - self.assertEqual(make_surf(32, 0, masks), None) - # For 24 and 32 bit surfaces Pygame assumes no losses. - masks = (0x7F0000, 0xFF00, 0xFF, 0) - self.assertRaises(ValueError, make_surf, 24, 0, masks) - self.assertRaises(ValueError, make_surf, 32, 0, masks) - # What contiguous bits in a mask. - masks = (0x6F0000, 0xFF00, 0xFF, 0) - self.assertRaises(ValueError, make_surf, 32, 0, masks) - - def test_get_bounding_rect (self): - surf = pygame.Surface ((70, 70), SRCALPHA, 32) - surf.fill((0,0,0,0)) - bound_rect = surf.get_bounding_rect() - self.assertEqual(bound_rect.width, 0) - self.assertEqual(bound_rect.height, 0) - surf.set_at((30,30),(255,255,255,1)) - bound_rect = surf.get_bounding_rect() - self.assertEqual(bound_rect.left, 30) - self.assertEqual(bound_rect.top, 30) - self.assertEqual(bound_rect.width, 1) - self.assertEqual(bound_rect.height, 1) - surf.set_at((29,29),(255,255,255,1)) - bound_rect = surf.get_bounding_rect() - self.assertEqual(bound_rect.left, 29) - self.assertEqual(bound_rect.top, 29) - self.assertEqual(bound_rect.width, 2) - self.assertEqual(bound_rect.height, 2) - - surf = pygame.Surface ((70, 70), 0, 24) - surf.fill((0,0,0)) - bound_rect = surf.get_bounding_rect() - self.assertEqual(bound_rect.width, surf.get_width()) - self.assertEqual(bound_rect.height, surf.get_height()) - - surf.set_colorkey((0,0,0)) - bound_rect = surf.get_bounding_rect() - self.assertEqual(bound_rect.width, 0) - self.assertEqual(bound_rect.height, 0) - surf.set_at((30,30),(255,255,255)) - bound_rect = surf.get_bounding_rect() - self.assertEqual(bound_rect.left, 30) - self.assertEqual(bound_rect.top, 30) - self.assertEqual(bound_rect.width, 1) - self.assertEqual(bound_rect.height, 1) - surf.set_at((60,60),(255,255,255)) - bound_rect = surf.get_bounding_rect() - self.assertEqual(bound_rect.left, 30) - self.assertEqual(bound_rect.top, 30) - self.assertEqual(bound_rect.width, 31) - self.assertEqual(bound_rect.height, 31) - - # Issue #180 - pygame.display.init() - try: - surf = pygame.Surface((4, 1), 0, 8) - surf.fill((255, 255, 255)) - surf.get_bounding_rect() # Segfault. - finally: - pygame.display.quit() - - def test_copy(self): - """Ensure a surface can be copied.""" - color = (25, 25, 25, 25) - s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32) - s1.fill(color) - - s2 = s1.copy() - - s1rect = s1.get_rect() - s2rect = s2.get_rect() - - self.assertEqual(s1rect.size, s2rect.size) - self.assertEqual(s2.get_at((10,10)), color) - - def test_fill(self): - """Ensure a surface can be filled.""" - color = (25, 25, 25, 25) - fill_rect = pygame.Rect(0, 0, 16, 16) - s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32) - s1.fill(color, fill_rect) - - for pt in test_utils.rect_area_pts(fill_rect): - self.assertEqual(s1.get_at(pt), color) - - for pt in test_utils.rect_outer_bounds(fill_rect): - self.assertNotEqual(s1.get_at(pt), color) - - def test_fill_negative_coordinates(self): - - # negative coordinates should be clipped by fill, and not draw outside the surface. - color = (25, 25, 25, 25) - color2 = (20, 20, 20, 25) - fill_rect = pygame.Rect(-10, -10, 16, 16) - - s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32) - r1 = s1.fill(color, fill_rect) - c = s1.get_at((0,0)) - self.assertEqual(c, color) - - # make subsurface in the middle to test it doesn't over write. - s2 = s1.subsurface((5, 5, 5, 5)) - r2 = s2.fill(color2, (-3, -3, 5, 5)) - c2 = s1.get_at((4,4)) - self.assertEqual(c, color) - - # rect returns the area we actually fill. - r3 = s2.fill(color2, (-30, -30, 5, 5)) - # since we are using negative coords, it should be an zero sized rect. - self.assertEqual(tuple(r3), (0, 0, 0, 0)) - - def test_fill_keyword_args(self): - """Ensure fill() accepts keyword arguments.""" - color = (1, 2, 3, 255) - area = (1, 1, 2, 2) - s1 = pygame.Surface((4, 4), 0, 32) - s1.fill(special_flags=pygame.BLEND_ADD, color=color, rect=area) - - self.assertEqual(s1.get_at((0, 0)), (0, 0, 0, 255)) - self.assertEqual(s1.get_at((1, 1)), color) - - ######################################################################## - - def test_get_alpha(self): - """Ensure a surface's alpha value can be retrieved.""" - s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32) - - self.assertEqual(s1.get_alpha(), 255) - - for alpha in (0, 32, 127, 255): - s1.set_alpha(alpha) - for t in range(4): - s1.set_alpha(s1.get_alpha()) - - self.assertEqual(s1.get_alpha(), alpha) - - ######################################################################## - - def test_get_bytesize(self): - """Ensure a surface's bit and byte sizes can be retrieved.""" - depth = 32 - depth_bytes = 4 - s1 = pygame.Surface((32, 32), pygame.SRCALPHA, depth) - - self.assertEqual(s1.get_bytesize(), depth_bytes) - self.assertEqual(s1.get_bitsize(), depth) - - ######################################################################## - - def test_get_flags(self): - """Ensure a surface's flags can be retrieved.""" - s1 = pygame.Surface((32,32), pygame.SRCALPHA, 32) - - self.assertEqual(s1.get_flags(), pygame.SRCALPHA) - - ######################################################################## - - def test_get_parent(self): - """Ensure a surface's parent can be retrieved.""" - parent = pygame.Surface((16, 16)) - child = parent.subsurface((0,0,5,5)) - - self.assertIs(child.get_parent(), parent) - - ######################################################################## - - def test_get_rect(self): - """Ensure a surface's rect can be retrieved.""" - size = (16, 16) - surf = pygame.Surface(size) - rect = surf.get_rect() - - self.assertEqual(rect.size, size) - - ######################################################################## - - def test_get_width__size_and_height(self): - """Ensure a surface's size, width and height can be retrieved.""" - for w in xrange_(0, 255, 32): - for h in xrange_(0, 127, 15): - s = pygame.Surface((w, h)) - self.assertEqual(s.get_width(), w) - self.assertEqual(s.get_height(), h) - self.assertEqual(s.get_size(), (w, h)) - - def test_get_view(self): - """Ensure a buffer view of the surface's pixels can be retrieved.""" - # Check that BufferProxys are returned when array depth is supported, - # ValueErrors returned otherwise. - Error = ValueError - s = pygame.Surface((5, 7), 0, 8) - v2 = s.get_view('2') - - self.assertRaises(Error, s.get_view, '0') - self.assertRaises(Error, s.get_view, '1') - self.assertIsInstance(v2, BufferProxy) - self.assertRaises(Error, s.get_view, '3') - - s = pygame.Surface((8, 7), 0, 8) - length = s.get_bytesize() * s.get_width() * s.get_height() - v0 = s.get_view('0') - v1 = s.get_view('1') - - self.assertIsInstance(v0, BufferProxy) - self.assertEqual(v0.length, length) - self.assertIsInstance(v1, BufferProxy) - self.assertEqual(v1.length, length) - - s = pygame.Surface((5, 7), 0, 16) - v2 = s.get_view('2') - - self.assertRaises(Error, s.get_view, '0') - self.assertRaises(Error, s.get_view, '1') - self.assertIsInstance(v2, BufferProxy) - self.assertRaises(Error, s.get_view, '3') - - s = pygame.Surface((8, 7), 0, 16) - length = s.get_bytesize() * s.get_width() * s.get_height() - v0 = s.get_view('0') - v1 = s.get_view('1') - - self.assertIsInstance(v0, BufferProxy) - self.assertEqual(v0.length, length) - self.assertIsInstance(v1, BufferProxy) - self.assertEqual(v1.length, length) - - s = pygame.Surface((5, 7), pygame.SRCALPHA, 16) - v2 = s.get_view('2') - - self.assertIsInstance(v2, BufferProxy) - self.assertRaises(Error, s.get_view, '3') - - s = pygame.Surface((5, 7), 0, 24) - v2 = s.get_view('2') - v3 = s.get_view('3') - - self.assertRaises(Error, s.get_view, '0') - self.assertRaises(Error, s.get_view, '1') - self.assertIsInstance(v2, BufferProxy) - self.assertIsInstance(v3, BufferProxy) - - s = pygame.Surface((8, 7), 0, 24) - length = s.get_bytesize() * s.get_width() * s.get_height() - v0 = s.get_view('0') - v1 = s.get_view('1') - - self.assertIsInstance(v0, BufferProxy) - self.assertEqual(v0.length, length) - self.assertIsInstance(v1, BufferProxy) - self.assertEqual(v1.length, length) - - s = pygame.Surface((5, 7), 0, 32) - length = s.get_bytesize() * s.get_width() * s.get_height() - v0 = s.get_view('0') - v1 = s.get_view('1') - v2 = s.get_view('2') - v3 = s.get_view('3') - - self.assertIsInstance(v0, BufferProxy) - self.assertEqual(v0.length, length) - self.assertIsInstance(v1, BufferProxy) - self.assertEqual(v1.length, length) - self.assertIsInstance(v2, BufferProxy) - self.assertIsInstance(v3, BufferProxy) - - s2 = s.subsurface((0, 0, 4, 7)) - - self.assertRaises(Error, s2.get_view, '0') - self.assertRaises(Error, s2.get_view, '1') - - s2 = None - s = pygame.Surface((5, 7), pygame.SRCALPHA, 32) - - for kind in ('2', '3', 'a', 'A', 'r', 'R', 'g', 'G', 'b', 'B'): - self.assertIsInstance(s.get_view(kind), BufferProxy) - - # Check default argument value: '2' - s = pygame.Surface((2, 4), 0, 32) - v = s.get_view() - if not IS_PYPY: - ai = ArrayInterface(v) - self.assertEqual(ai.nd, 2) - - # Check locking. - s = pygame.Surface((2, 4), 0, 32) - - self.assertFalse(s.get_locked()) - - v = s.get_view('2') - - self.assertFalse(s.get_locked()) - - c = v.__array_interface__ - - self.assertTrue(s.get_locked()) - - c = None - gc.collect() - - self.assertTrue(s.get_locked()) - - v = None - gc.collect() - - self.assertFalse(s.get_locked()) - - # Check invalid view kind values. - s = pygame.Surface((2, 4), pygame.SRCALPHA, 32) - self.assertRaises(TypeError, s.get_view, '') - self.assertRaises(TypeError, s.get_view, '9') - self.assertRaises(TypeError, s.get_view, 'RGBA') - self.assertRaises(TypeError, s.get_view, 2) - - # Both unicode and bytes strings are allowed for kind. - s = pygame.Surface((2, 4), 0, 32) - s.get_view(as_unicode('2')) - s.get_view(as_bytes('2')) - - # Garbage collection - s = pygame.Surface((2, 4), 0, 32) - weak_s = weakref.ref(s) - v = s.get_view('3') - weak_v = weakref.ref(v) - gc.collect() - self.assertTrue(weak_s() is s) - self.assertTrue(weak_v() is v) - del v - gc.collect() - self.assertTrue(weak_s() is s) - self.assertTrue(weak_v() is None) - del s - gc.collect() - self.assertTrue(weak_s() is None) - - def test_get_buffer(self): - # Check that get_buffer works for all pixel sizes and for a subsurface. - - # Check for all pixel sizes - for bitsize in [8, 16, 24, 32]: - s = pygame.Surface((5, 7), 0, bitsize) - length = s.get_pitch() * s.get_height() - v = s.get_buffer() - - self.assertIsInstance(v, BufferProxy) - self.assertEqual(v.length, length) - self.assertEqual(repr(v), "") - - # Check for a subsurface (not contiguous) - s = pygame.Surface((7, 10), 0, 32) - s2 = s.subsurface((1, 2, 5, 7)) - length = s2.get_pitch() * s2.get_height() - v = s2.get_buffer() - - self.assertIsInstance(v, BufferProxy) - self.assertEqual(v.length, length) - - # Check locking. - s = pygame.Surface((2, 4), 0, 32) - v = s.get_buffer() - self.assertTrue(s.get_locked()) - v = None - gc.collect() - self.assertFalse(s.get_locked()) - - OLDBUF = hasattr(pygame.bufferproxy, 'get_segcount') - - @unittest.skipIf(not OLDBUF, 'old buffer not available') - def test_get_buffer_oldbuf(self): - from pygame.bufferproxy import get_segcount, get_write_buffer - - s = pygame.Surface((2, 4), pygame.SRCALPHA, 32) - v = s.get_buffer() - segcount, buflen = get_segcount(v) - self.assertEqual(segcount, 1) - self.assertEqual(buflen, s.get_pitch() * s.get_height()) - seglen, segaddr = get_write_buffer(v, 0) - self.assertEqual(segaddr, s._pixels_address) - self.assertEqual(seglen, buflen) - - @unittest.skipIf(not OLDBUF, 'old buffer not available') - def test_get_view_oldbuf(self): - from pygame.bufferproxy import get_segcount, get_write_buffer - - s = pygame.Surface((2, 4), pygame.SRCALPHA, 32) - v = s.get_view('1') - segcount, buflen = get_segcount(v) - self.assertEqual(segcount, 8) - self.assertEqual(buflen, s.get_pitch() * s.get_height()) - seglen, segaddr = get_write_buffer(v, 7) - self.assertEqual(segaddr, s._pixels_address + s.get_bytesize() * 7) - self.assertEqual(seglen, s.get_bytesize()) - - def test_set_colorkey(self): - - # __doc__ (as of 2008-06-25) for pygame.surface.Surface.set_colorkey: - - # Surface.set_colorkey(Color, flags=0): return None - # Surface.set_colorkey(None): return None - # Set the transparent colorkey - - s = pygame.Surface((16,16), pygame.SRCALPHA, 32) - - colorkeys = ((20,189,20, 255),(128,50,50,255), (23, 21, 255,255)) - - for colorkey in colorkeys: - s.set_colorkey(colorkey) - - for t in range(4): - s.set_colorkey(s.get_colorkey()) - - self.assertEqual(s.get_colorkey(), colorkey) - - def test_set_masks(self): - s = pygame.Surface((32,32)) - r,g,b,a = s.get_masks() - s.set_masks((b,g,r,a)) - r2,g2,b2,a2 = s.get_masks() - self.assertEqual((r,g,b,a), (b2,g2,r2,a2)) - - - def test_set_shifts(self): - s = pygame.Surface((32,32)) - r,g,b,a = s.get_shifts() - s.set_shifts((b,g,r,a)) - r2,g2,b2,a2 = s.get_shifts() - self.assertEqual((r,g,b,a), (b2,g2,r2,a2)) - - def test_blit_keyword_args(self): - color = (1, 2, 3, 255) - s1 = pygame.Surface((4, 4), 0, 32) - s2 = pygame.Surface((2, 2), 0, 32) - s2.fill((1, 2, 3)) - s1.blit(special_flags=BLEND_ADD, source=s2, - dest=(1, 1), area=s2.get_rect()) - self.assertEqual(s1.get_at((0, 0)), (0, 0, 0, 255)) - self.assertEqual(s1.get_at((1, 1)), color) - - def todo_test_blit(self): - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.blit: - - # Surface.blit(source, dest, area=None, special_flags = 0): return Rect - # draw one image onto another - # - # Draws a source Surface onto this Surface. The draw can be positioned - # with the dest argument. Dest can either be pair of coordinates - # representing the upper left corner of the source. A Rect can also be - # passed as the destination and the topleft corner of the rectangle - # will be used as the position for the blit. The size of the - # destination rectangle does not effect the blit. - # - # An optional area rectangle can be passed as well. This represents a - # smaller portion of the source Surface to draw. - # - # An optional special flags is for passing in new in 1.8.0: BLEND_ADD, - # BLEND_SUB, BLEND_MULT, BLEND_MIN, BLEND_MAX new in 1.8.1: - # BLEND_RGBA_ADD, BLEND_RGBA_SUB, BLEND_RGBA_MULT, BLEND_RGBA_MIN, - # BLEND_RGBA_MAX BLEND_RGB_ADD, BLEND_RGB_SUB, BLEND_RGB_MULT, - # BLEND_RGB_MIN, BLEND_RGB_MAX With other special blitting flags - # perhaps added in the future. - # - # The return rectangle is the area of the affected pixels, excluding - # any pixels outside the destination Surface, or outside the clipping - # area. - # - # Pixel alphas will be ignored when blitting to an 8 bit Surface. - # special_flags new in pygame 1.8. - - self.fail() - - def test_blit__SRCALPHA_opaque_source(self): - src = pygame.Surface( (256,256), SRCALPHA ,32) - dst = src.copy() - - for i, j in test_utils.rect_area_pts(src.get_rect()): - dst.set_at( (i,j), (i,0,0,j) ) - src.set_at( (i,j), (0,i,0,255) ) - - dst.blit(src, (0,0)) - - for pt in test_utils.rect_area_pts(src.get_rect()): - self.assertEqual(dst.get_at(pt)[1], src.get_at(pt)[1]) - - def todo_test_blit__blit_to_self(self): #TODO - src = pygame.Surface( (256,256), SRCALPHA, 32) - rect = src.get_rect() - - for pt, color in test_utils.gradient(rect.width, rect.height): - src.set_at(pt, color) - - src.blit(src, (0, 0)) - - def todo_test_blit__SRCALPHA_to_SRCALPHA_non_zero(self): #TODO - # " There is no unit test for blitting a SRCALPHA source with non-zero - # alpha to a SRCALPHA destination with non-zero alpha " LL - - w,h = size = 32,32 - - s = pygame.Surface(size, pygame.SRCALPHA, 32) - s2 = s.copy() - - s.fill((32,32,32,111)) - s2.fill((32,32,32,31)) - - s.blit(s2, (0,0)) - - # TODO: - # what is the correct behaviour ?? should it blend? what algorithm? - - self.assertEqual(s.get_at((0,0)), (32,32,32,31)) - - def test_blit__SRCALPHA32_to_8(self): - # Bug: fatal - # SDL_DisplayConvert segfaults when video is uninitialized. - target = pygame.Surface((11, 8), 0, 8) - color = target.get_palette_at(2) - source = pygame.Surface((1, 1), pygame.SRCALPHA, 32) - source.set_at((0, 0), color) - target.blit(source, (0, 0)) - - @unittest.skipIf(os.environ.get('SDL_VIDEODRIVER') == 'dummy', - 'requires a non-"dummy" SDL_VIDEODRIVER') - def test_image_convert_bug_131(self): - # Bitbucket bug #131: Unable to Surface.convert(32) some 1-bit images. - # https://bitbucket.org/pygame/pygame/issue/131/unable-to-surfaceconvert-32-some-1-bit - - pygame.display.init() - try: - pygame.display.set_mode((640,480)) - - im = pygame.image.load(example_path( - os.path.join("data", "city.png"))) - im2 = pygame.image.load(example_path( - os.path.join("data", "brick.png"))) - - self.assertEqual(im.get_palette(), - ((0, 0, 0, 255), (255, 255, 255, 255))) - self.assertEqual(im2.get_palette(), - ((0, 0, 0, 255), (0, 0, 0, 255))) - - self.assertEqual(repr(im.convert(32)), '') - self.assertEqual(repr(im2.convert(32)), '') - - # Ensure a palette format to palette format works. - im3 = im.convert(8) - self.assertEqual(repr(im3), '') - self.assertEqual(im3.get_palette(), im.get_palette()) - - finally: - pygame.display.quit() - - def test_convert_init(self): - """ Ensure initialization exceptions are raised - for surf.convert().""" - pygame.display.quit() - surf = pygame.Surface((1, 1)) - - self.assertRaisesRegex(pygame.error, 'display initialized', - surf.convert) - - pygame.display.init() - try: - if os.environ.get('SDL_VIDEODRIVER') != 'dummy': - try: - surf.convert(32) - surf.convert(pygame.Surface((1, 1))) - except pygame.error: - self.fail("convert() should not raise an exception here.") - - self.assertRaisesRegex(pygame.error, 'No video mode', - surf.convert) - - pygame.display.set_mode((640,480)) - try: - surf.convert() - except pygame.error: - self.fail("convert() should not raise an exception here.") - finally: - pygame.display.quit() - - def test_convert_alpha_init(self): - """ Ensure initialization exceptions are raised - for surf.convert_alpha().""" - pygame.display.quit() - surf = pygame.Surface((1, 1)) - - self.assertRaisesRegex(pygame.error, 'display initialized', - surf.convert_alpha) - - pygame.display.init() - try: - self.assertRaisesRegex(pygame.error, 'No video mode', - surf.convert_alpha) - - pygame.display.set_mode((640,480)) - try: - surf.convert_alpha() - except pygame.error: - self.fail("convert_alpha() should not raise an exception here.") - finally: - pygame.display.quit() - - def todo_test_convert(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.convert: - - # Surface.convert(Surface): return Surface - # Surface.convert(depth, flags=0): return Surface - # Surface.convert(masks, flags=0): return Surface - # Surface.convert(): return Surface - # change the pixel format of an image - # - # Creates a new copy of the Surface with the pixel format changed. The - # new pixel format can be determined from another existing Surface. - # Otherwise depth, flags, and masks arguments can be used, similar to - # the pygame.Surface() call. - # - # If no arguments are passed the new Surface will have the same pixel - # format as the display Surface. This is always the fastest format for - # blitting. It is a good idea to convert all Surfaces before they are - # blitted many times. - # - # The converted Surface will have no pixel alphas. They will be - # stripped if the original had them. See Surface.convert_alpha() for - # preserving or creating per-pixel alphas. - # - - self.fail() - - def test_convert__pixel_format_as_surface_subclass(self): - """Ensure convert accepts a Surface subclass argument.""" - expected_size = (23, 17) - convert_surface = SurfaceSubclass(expected_size, 0, 32) - depth_surface = SurfaceSubclass((31, 61), 0, 32) - - pygame.display.init() - try: - surface = convert_surface.convert(depth_surface) - - self.assertIsNot(surface, depth_surface) - self.assertIsNot(surface, convert_surface) - self.assertIsInstance(surface, pygame.Surface) - self.assertIsInstance(surface, SurfaceSubclass) - self.assertEqual(surface.get_size(), expected_size) - finally: - pygame.display.quit() - - def todo_test_convert_alpha(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.convert_alpha: - - # Surface.convert_alpha(Surface): return Surface - # Surface.convert_alpha(): return Surface - # change the pixel format of an image including per pixel alphas - # - # Creates a new copy of the surface with the desired pixel format. The - # new surface will be in a format suited for quick blitting to the - # given format with per pixel alpha. If no surface is given, the new - # surface will be optimized for blitting to the current display. - # - # Unlike the Surface.convert() method, the pixel format for the new - # image will not be exactly the same as the requested source, but it - # will be optimized for fast alpha blitting to the destination. - # - - self.fail() - - def test_convert_alpha__pixel_format_as_surface_subclass(self): - """Ensure convert_alpha accepts a Surface subclass argument.""" - expected_size = (23, 17) - convert_surface = SurfaceSubclass(expected_size, SRCALPHA, 32) - depth_surface = SurfaceSubclass((31, 57), SRCALPHA, 32) - - pygame.display.init() - try: - pygame.display.set_mode((60, 60)) - - # This is accepted as an argument, but its values are ignored. - # See issue #599. - surface = convert_surface.convert_alpha(depth_surface) - - self.assertIsNot(surface, depth_surface) - self.assertIsNot(surface, convert_surface) - self.assertIsInstance(surface, pygame.Surface) - self.assertIsInstance(surface, SurfaceSubclass) - self.assertEqual(surface.get_size(), expected_size) - finally: - pygame.display.quit() - - def todo_test_get_abs_offset(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_abs_offset: - - # Surface.get_abs_offset(): return (x, y) - # find the absolute position of a child subsurface inside its top level parent - # - # Get the offset position of a child subsurface inside of its top - # level parent Surface. If the Surface is not a subsurface this will - # return (0, 0). - # - - self.fail() - - def todo_test_get_abs_parent(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_abs_parent: - - # Surface.get_abs_parent(): return Surface - # find the top level parent of a subsurface - # - # Returns the parent Surface of a subsurface. If this is not a - # subsurface then this surface will be returned. - # - - self.fail() - - def test_get_at(self): - surf = pygame.Surface((2, 2), 0, 24) - c00 = pygame.Color(1, 2, 3) - c01 = pygame.Color(5, 10, 15) - c10 = pygame.Color(100, 50, 0) - c11 = pygame.Color(4, 5, 6) - surf.set_at((0, 0), c00) - surf.set_at((0, 1), c01) - surf.set_at((1, 0), c10) - surf.set_at((1, 1), c11) - c = surf.get_at((0, 0)) - self.assertIsInstance(c, pygame.Color) - self.assertEqual(c, c00) - self.assertEqual(surf.get_at((0, 1)), c01) - self.assertEqual(surf.get_at((1, 0)), c10) - self.assertEqual(surf.get_at((1, 1)), c11) - for p in [(-1, 0), (0, -1), (2, 0), (0, 2)]: - self.assertRaises(IndexError, surf.get_at, p) - - def test_get_at_mapped(self): - color = pygame.Color(10, 20, 30) - for bitsize in [8, 16, 24, 32]: - surf = pygame.Surface((2, 2), 0, bitsize) - surf.fill(color) - pixel = surf.get_at_mapped((0, 0)) - self.assertEqual(pixel, surf.map_rgb(color), - "%i != %i, bitsize: %i" % - (pixel, surf.map_rgb(color), bitsize)) - - def todo_test_get_bitsize(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_bitsize: - - # Surface.get_bitsize(): return int - # get the bit depth of the Surface pixel format - # - # Returns the number of bits used to represent each pixel. This value - # may not exactly fill the number of bytes used per pixel. For example - # a 15 bit Surface still requires a full 2 bytes. - # - - self.fail() - - def todo_test_get_clip(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_clip: - - # Surface.get_clip(): return Rect - # get the current clipping area of the Surface - # - # Return a rectangle of the current clipping area. The Surface will - # always return a valid rectangle that will never be outside the - # bounds of the image. If the Surface has had None set for the - # clipping area, the Surface will return a rectangle with the full - # area of the Surface. - # - - self.fail() - - def todo_test_get_colorkey(self): - surf = pygame.surface((2, 2), 0, 24) - self.assertIsNone(surf.get_colorykey()) - colorkey = pygame.Color(20, 40, 60) - surf.set_colorkey(colorkey) - ck = surf.get_colorkey() - self.assertIsInstance(ck, pygame.Color) - self.assertEqual(ck, colorkey) - - def todo_test_get_height(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_height: - - # Surface.get_height(): return height - # get the height of the Surface - # - # Return the height of the Surface in pixels. - - self.fail() - - def todo_test_get_locked(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_locked: - - # Surface.get_locked(): return bool - # test if the Surface is current locked - # - # Returns True when the Surface is locked. It doesn't matter how many - # times the Surface is locked. - # - - self.fail() - - def todo_test_get_locks(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_locks: - - # Surface.get_locks(): return tuple - # Gets the locks for the Surface - # - # Returns the currently existing locks for the Surface. - - self.fail() - - def todo_test_get_losses(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_losses: - - # Surface.get_losses(): return (R, G, B, A) - # the significant bits used to convert between a color and a mapped integer - # - # Return the least significant number of bits stripped from each color - # in a mapped integer. - # - # This value is not needed for normal Pygame usage. - - self.fail() - - def todo_test_get_masks(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_masks: - - # Surface.get_masks(): return (R, G, B, A) - # the bitmasks needed to convert between a color and a mapped integer - # - # Returns the bitmasks used to isolate each color in a mapped integer. - # This value is not needed for normal Pygame usage. - - self.fail() - - def todo_test_get_offset(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_offset: - - # Surface.get_offset(): return (x, y) - # find the position of a child subsurface inside a parent - # - # Get the offset position of a child subsurface inside of a parent. If - # the Surface is not a subsurface this will return (0, 0). - # - - self.fail() - - def test_get_palette(self): - pygame.display.init() - try: - palette = [Color(i, i, i) for i in range(256)] - pygame.display.set_mode((100, 50)) - surf = pygame.Surface((2, 2), 0, 8) - surf.set_palette(palette) - palette2 = surf.get_palette() - r,g,b = palette2[0] - - self.assertEqual(len(palette2), len(palette)) - for c2, c in zip(palette2, palette): - self.assertEqual(c2, c) - for c in palette2: - self.assertIsInstance(c, pygame.Color) - finally: - pygame.display.quit() - - def test_get_palette_at(self): - # See also test_get_palette - pygame.display.init() - try: - pygame.display.set_mode((100, 50)) - surf = pygame.Surface((2, 2), 0, 8) - color = pygame.Color(1, 2, 3, 255) - surf.set_palette_at(0, color) - color2 = surf.get_palette_at(0) - self.assertIsInstance(color2, pygame.Color) - self.assertEqual(color2, color) - self.assertRaises(IndexError, surf.get_palette_at, -1) - self.assertRaises(IndexError, surf.get_palette_at, 256) - finally: - pygame.display.quit() - - def todo_test_get_pitch(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_pitch: - - # Surface.get_pitch(): return int - # get the number of bytes used per Surface row - # - # Return the number of bytes separating each row in the Surface. - # Surfaces in video memory are not always linearly packed. Subsurfaces - # will also have a larger pitch than their real width. - # - # This value is not needed for normal Pygame usage. - - self.fail() - - def todo_test_get_shifts(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_shifts: - - # Surface.get_shifts(): return (R, G, B, A) - # the bit shifts needed to convert between a color and a mapped integer - # - # Returns the pixel shifts need to convert between each color and a - # mapped integer. - # - # This value is not needed for normal Pygame usage. - - self.fail() - - def todo_test_get_size(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.get_size: - - # Surface.get_size(): return (width, height) - # get the dimensions of the Surface - # - # Return the width and height of the Surface in pixels. - - self.fail() - - def todo_test_lock(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.lock: - - # Surface.lock(): return None - # lock the Surface memory for pixel access - # - # Lock the pixel data of a Surface for access. On accelerated - # Surfaces, the pixel data may be stored in volatile video memory or - # nonlinear compressed forms. When a Surface is locked the pixel - # memory becomes available to access by regular software. Code that - # reads or writes pixel values will need the Surface to be locked. - # - # Surfaces should not remain locked for more than necessary. A locked - # Surface can often not be displayed or managed by Pygame. - # - # Not all Surfaces require locking. The Surface.mustlock() method can - # determine if it is actually required. There is no performance - # penalty for locking and unlocking a Surface that does not need it. - # - # All pygame functions will automatically lock and unlock the Surface - # data as needed. If a section of code is going to make calls that - # will repeatedly lock and unlock the Surface many times, it can be - # helpful to wrap the block inside a lock and unlock pair. - # - # It is safe to nest locking and unlocking calls. The surface will - # only be unlocked after the final lock is released. - # - - self.fail() - - def test_map_rgb(self): - color = Color(0, 128, 255, 64) - surf = pygame.Surface((5, 5), SRCALPHA, 32) - c = surf.map_rgb(color) - self.assertEqual(surf.unmap_rgb(c), color) - - self.assertEqual(surf.get_at((0, 0)), (0, 0, 0, 0)) - surf.fill(c) - self.assertEqual(surf.get_at((0, 0)), color) - - surf.fill((0, 0, 0, 0)) - self.assertEqual(surf.get_at((0, 0)), (0, 0, 0, 0)) - surf.set_at((0, 0), c) - self.assertEqual(surf.get_at((0, 0)), color) - - def todo_test_mustlock(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.mustlock: - - # Surface.mustlock(): return bool - # test if the Surface requires locking - # - # Returns True if the Surface is required to be locked to access pixel - # data. Usually pure software Surfaces do not require locking. This - # method is rarely needed, since it is safe and quickest to just lock - # all Surfaces as needed. - # - # All pygame functions will automatically lock and unlock the Surface - # data as needed. If a section of code is going to make calls that - # will repeatedly lock and unlock the Surface many times, it can be - # helpful to wrap the block inside a lock and unlock pair. - # - - self.fail() - - def test_set_alpha_none(self): - """surf.set_alpha(None) disables blending""" - s = pygame.Surface((1,1), SRCALPHA, 32) - s.fill((0, 255, 0, 128)) - s.set_alpha(None) - self.assertEqual(None, s.get_alpha()) - - s2 = pygame.Surface((1,1), SRCALPHA, 32) - s2.fill((255, 0, 0, 255)) - s2.blit(s, (0, 0)) - self.assertEqual(s2.get_at((0, 0))[0], 0, "the red component should be 0") - - def test_set_alpha_value(self): - """surf.set_alpha(x), where x != None, enables blending""" - s = pygame.Surface((1,1), SRCALPHA, 32) - s.fill((0, 255, 0, 128)) - s.set_alpha(255) - - s2 = pygame.Surface((1,1), SRCALPHA, 32) - s2.fill((255, 0, 0, 255)) - s2.blit(s, (0, 0)) - self.assertGreater(s2.get_at((0, 0))[0], 0, "the red component should be above 0") - - def test_palette_colorkey(self): - """ test bug discovered by robertpfeiffer - https://github.com/pygame/pygame/issues/721 - """ - surf = pygame.image.load(example_path(os.path.join("data", "alien2.png"))) - key = surf.get_colorkey() - self.assertEqual(surf.get_palette()[surf.map_rgb(key)], key) - - def test_palette_colorkey_set_px(self): - surf = pygame.image.load(example_path(os.path.join("data", "alien2.png"))) - key = surf.get_colorkey() - surf.set_at((0, 0), key) - self.assertEqual(surf.get_at((0, 0)), key) - - def test_palette_colorkey_fill(self): - surf = pygame.image.load(example_path(os.path.join("data", "alien2.png"))) - key = surf.get_colorkey() - surf.fill(key) - self.assertEqual(surf.get_at((0, 0)), key) - - def test_set_palette(self): - palette = [pygame.Color(i, i, i) for i in range(256)] - palette[10] = tuple(palette[10]) # 4 element tuple - palette[11] = tuple(palette[11])[0:3] # 3 element tuple - - surf = pygame.Surface((2, 2), 0, 8) - pygame.display.init() - try: - pygame.display.set_mode((100, 50)) - surf.set_palette(palette) - for i in range(256): - self.assertEqual(surf.map_rgb(palette[i]), i, - "palette color %i" % (i,)) - c = palette[i] - surf.fill(c) - self.assertEqual(surf.get_at((0, 0)), c, - "palette color %i" % (i,)) - for i in range(10): - palette[i] = pygame.Color(255 - i, 0, 0) - surf.set_palette(palette[0:10]) - for i in range(256): - self.assertEqual(surf.map_rgb(palette[i]), i, - "palette color %i" % (i,)) - c = palette[i] - surf.fill(c) - self.assertEqual(surf.get_at((0, 0)), c, - "palette color %i" % (i,)) - self.assertRaises(ValueError, surf.set_palette, - [Color(1, 2, 3, 254)]) - self.assertRaises(ValueError, surf.set_palette, - (1, 2, 3, 254)) - finally: - pygame.display.quit() - - def test_set_palette__fail(self): - pygame.init() - palette = 256 * [(10, 20, 30)] - surf = pygame.Surface((2, 2), 0, 32) - self.assertRaises(pygame.error, surf.set_palette, palette) - pygame.quit() - - def test_set_palette_at(self): - pygame.display.init() - try: - pygame.display.set_mode((100, 50)) - surf = pygame.Surface((2, 2), 0, 8) - original = surf.get_palette_at(10) - replacement = Color(1, 1, 1, 255) - if replacement == original: - replacement = Color(2, 2, 2, 255) - surf.set_palette_at(10, replacement) - self.assertEqual(surf.get_palette_at(10), replacement) - next = tuple(original) - surf.set_palette_at(10, next) - self.assertEqual(surf.get_palette_at(10), next) - next = tuple(original)[0:3] - surf.set_palette_at(10, next) - self.assertEqual(surf.get_palette_at(10), next) - self.assertRaises(IndexError, - surf.set_palette_at, - 256, replacement) - self.assertRaises(IndexError, - surf.set_palette_at, - -1, replacement) - finally: - pygame.display.quit() - - def test_subsurface(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.subsurface: - - # Surface.subsurface(Rect): return Surface - # create a new surface that references its parent - # - # Returns a new Surface that shares its pixels with its new parent. - # The new Surface is considered a child of the original. Modifications - # to either Surface pixels will effect each other. Surface information - # like clipping area and color keys are unique to each Surface. - # - # The new Surface will inherit the palette, color key, and alpha - # settings from its parent. - # - # It is possible to have any number of subsurfaces and subsubsurfaces - # on the parent. It is also possible to subsurface the display Surface - # if the display mode is not hardware accelerated. - # - # See the Surface.get_offset(), Surface.get_parent() to learn more - # about the state of a subsurface. - # - - surf = pygame.Surface((16, 16)) - s = surf.subsurface(0,0,1,1) - s = surf.subsurface((0,0,1,1)) - - #s = surf.subsurface((0,0,1,1), 1) - # This form is not acceptable. - #s = surf.subsurface(0,0,10,10, 1) - - self.assertRaises(ValueError, surf.subsurface, (0,0,1,1,666)) - - - self.assertEqual(s.get_shifts(), surf.get_shifts()) - self.assertEqual(s.get_masks(), surf.get_masks()) - self.assertEqual(s.get_losses(), surf.get_losses()) - - # Issue 2 at Bitbucket.org/pygame/pygame - surf = pygame.Surface.__new__(pygame.Surface) - self.assertRaises(pygame.error, surf.subsurface, (0, 0, 0, 0)) - - def todo_test_unlock(self): - - # __doc__ (as of 2008-08-02) for pygame.surface.Surface.unlock: - - # Surface.unlock(): return None - # unlock the Surface memory from pixel access - # - # Unlock the Surface pixel data after it has been locked. The unlocked - # Surface can once again be drawn and managed by Pygame. See the - # Surface.lock() documentation for more details. - # - # All pygame functions will automatically lock and unlock the Surface - # data as needed. If a section of code is going to make calls that - # will repeatedly lock and unlock the Surface many times, it can be - # helpful to wrap the block inside a lock and unlock pair. - # - # It is safe to nest locking and unlocking calls. The surface will - # only be unlocked after the final lock is released. - # - - self.fail() - - def test_unmap_rgb(self): - # Special case, 8 bit-per-pixel surface (has a palette). - surf = pygame.Surface((2, 2), 0, 8) - c = (1, 1, 1) # Unlikely to be in a default palette. - i = 67 - pygame.display.init() - try: - pygame.display.set_mode((100, 50)) - surf.set_palette_at(i, c) - unmapped_c = surf.unmap_rgb(i) - self.assertEqual(unmapped_c, c) - # Confirm it is a Color instance - self.assertIsInstance(unmapped_c, pygame.Color) - finally: - pygame.display.quit() - - # Remaining, non-pallete, cases. - c = (128, 64, 12, 255) - formats = [(0, 16), (0, 24), (0, 32), - (SRCALPHA, 16), (SRCALPHA, 32)] - for flags, bitsize in formats: - surf = pygame.Surface((2, 2), flags, bitsize) - unmapped_c = surf.unmap_rgb(surf.map_rgb(c)) - surf.fill(c) - comparison_c = surf.get_at((0, 0)) - self.assertEqual(unmapped_c, comparison_c, - "%s != %s, flags: %i, bitsize: %i" % - (unmapped_c, comparison_c, flags, bitsize)) - # Confirm it is a Color instance - self.assertIsInstance(unmapped_c, pygame.Color) - - def test_scroll(self): - scrolls = [(8, 2, 3), - (16, 2, 3), - (24, 2, 3), - (32, 2, 3), - (32, -1, -3), - (32, 0, 0), - (32, 11, 0), - (32, 0, 11), - (32, -11, 0), - (32, 0, -11), - (32, -11, 2), - (32, 2, -11)] - for bitsize, dx, dy in scrolls: - surf = pygame.Surface((10, 10), 0, bitsize) - surf.fill((255, 0, 0)) - surf.fill((0, 255, 0), (2, 2, 2, 2,)) - comp = surf.copy() - comp.blit(surf, (dx, dy)) - surf.scroll(dx, dy) - w, h = surf.get_size() - for x in range(w): - for y in range(h): - self.assertEqual(surf.get_at((x, y)), - comp.get_at((x, y)), - "%s != %s, bpp:, %i, x: %i, y: %i" % - (surf.get_at((x, y)), - comp.get_at((x, y)), - bitsize, dx, dy)) - # Confirm clip rect containment - surf = pygame.Surface((20, 13), 0, 32) - surf.fill((255, 0, 0)) - surf.fill((0, 255, 0), (7, 1, 6, 6)) - comp = surf.copy() - clip = Rect(3, 1, 8, 14) - surf.set_clip(clip) - comp.set_clip(clip) - comp.blit(surf, (clip.x + 2, clip.y + 3), surf.get_clip()) - surf.scroll(2, 3) - w, h = surf.get_size() - for x in range(w): - for y in range(h): - self.assertEqual(surf.get_at((x, y)), - comp.get_at((x, y))) - # Confirm keyword arguments and per-pixel alpha - spot_color = (0, 255, 0, 128) - surf = pygame.Surface((4, 4), pygame.SRCALPHA, 32) - surf.fill((255, 0, 0, 255)) - surf.set_at((1, 1), spot_color) - surf.scroll(dx=1) - self.assertEqual(surf.get_at((2, 1)), spot_color) - surf.scroll(dy=1) - self.assertEqual(surf.get_at((2, 2)), spot_color) - surf.scroll(dy=1, dx=1) - self.assertEqual(surf.get_at((3, 3)), spot_color) - surf.scroll(dx=-3, dy=-3) - self.assertEqual(surf.get_at((0, 0)), spot_color) - - -class SurfaceSubtypeTest(unittest.TestCase): - """Issue #280: Methods that return a new Surface preserve subclasses""" - - def setUp(self): - pygame.display.init() - - def tearDown(self): - pygame.display.quit() - - def test_copy(self): - """Ensure method copy() preserves the surface's class - - When Surface is subclassed, the inherited copy() method will return - instances of the subclass. Non Surface fields are uncopied, however. - This includes instance attributes. - """ - expected_size = (32, 32) - ms1 = SurfaceSubclass(expected_size, SRCALPHA, 32) - ms2 = ms1.copy() - - self.assertIsNot(ms1, ms2) - self.assertIsInstance(ms1, pygame.Surface) - self.assertIsInstance(ms2, pygame.Surface) - self.assertIsInstance(ms1, SurfaceSubclass) - self.assertIsInstance(ms2, SurfaceSubclass) - self.assertTrue(ms1.test_attribute) - self.assertRaises(AttributeError, getattr, ms2, "test_attribute") - self.assertEqual(ms2.get_size(), expected_size) - - def test_convert(self): - """Ensure method convert() preserves the surface's class - - When Surface is subclassed, the inherited convert() method will return - instances of the subclass. Non Surface fields are omitted, however. - This includes instance attributes. - """ - expected_size = (32, 32) - ms1 = SurfaceSubclass(expected_size, 0, 24) - ms2 = ms1.convert(24) - - self.assertIsNot(ms1, ms2) - self.assertIsInstance(ms1, pygame.Surface) - self.assertIsInstance(ms2, pygame.Surface) - self.assertIsInstance(ms1, SurfaceSubclass) - self.assertIsInstance(ms2, SurfaceSubclass) - self.assertTrue(ms1.test_attribute) - self.assertRaises(AttributeError, getattr, ms2, "test_attribute") - self.assertEqual(ms2.get_size(), expected_size) - - def test_convert_alpha(self): - """Ensure method convert_alpha() preserves the surface's class - - When Surface is subclassed, the inherited convert_alpha() method will - return instances of the subclass. Non Surface fields are omitted, - however. This includes instance attributes. - """ - pygame.display.set_mode((40, 40)) - expected_size = (32, 32) - s = pygame.Surface(expected_size, SRCALPHA, 16) - ms1 = SurfaceSubclass(expected_size, SRCALPHA, 32) - ms2 = ms1.convert_alpha(s) - - self.assertIsNot(ms1, ms2) - self.assertIsInstance(ms1, pygame.Surface) - self.assertIsInstance(ms2, pygame.Surface) - self.assertIsInstance(ms1, SurfaceSubclass) - self.assertIsInstance(ms2, SurfaceSubclass) - self.assertTrue(ms1.test_attribute) - self.assertRaises(AttributeError, getattr, ms2, "test_attribute") - self.assertEqual(ms2.get_size(), expected_size) - - def test_subsurface(self): - """Ensure method subsurface() preserves the surface's class - - When Surface is subclassed, the inherited subsurface() method will - return instances of the subclass. Non Surface fields are uncopied, - however. This includes instance attributes. - """ - expected_size = (10, 12) - ms1 = SurfaceSubclass((32, 32), SRCALPHA, 32) - ms2 = ms1.subsurface((4, 5), expected_size) - - self.assertIsNot(ms1, ms2) - self.assertIsInstance(ms1, pygame.Surface) - self.assertIsInstance(ms2, pygame.Surface) - self.assertIsInstance(ms1, SurfaceSubclass) - self.assertIsInstance(ms2, SurfaceSubclass) - self.assertTrue(ms1.test_attribute) - self.assertRaises(AttributeError, getattr, ms2, "test_attribute") - self.assertEqual(ms2.get_size(), expected_size) - - -class SurfaceGetBufferTest(unittest.TestCase): - - # These tests requires ctypes. They are disabled if ctypes - # is not installed. - # - try: - ArrayInterface - except NameError: - __tags__ = ('ignore', 'subprocess_ignore') - - lilendian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN - - def _check_interface_2D(self, s): - s_w, s_h = s.get_size() - s_bytesize = s.get_bytesize(); - s_pitch = s.get_pitch() - s_pixels = s._pixels_address - - # check the array interface structure fields. - v = s.get_view('2') - if not IS_PYPY: - flags = PAI_ALIGNED | PAI_NOTSWAPPED | PAI_WRITEABLE - if (s.get_pitch() == s_w * s_bytesize): - flags |= PAI_FORTRAN - - inter = ArrayInterface(v) - - self.assertEqual(inter.two, 2) - self.assertEqual(inter.nd, 2) - self.assertEqual(inter.typekind, 'u') - self.assertEqual(inter.itemsize, s_bytesize) - self.assertEqual(inter.shape[0], s_w) - self.assertEqual(inter.shape[1], s_h) - self.assertEqual(inter.strides[0], s_bytesize) - self.assertEqual(inter.strides[1], s_pitch) - self.assertEqual(inter.flags, flags) - self.assertEqual(inter.data, s_pixels); - - def _check_interface_3D(self, s): - s_w, s_h = s.get_size() - s_bytesize = s.get_bytesize(); - s_pitch = s.get_pitch() - s_pixels = s._pixels_address - s_shifts = list(s.get_shifts()) - - # Check for RGB or BGR surface. - if s_shifts[0:3] == [0, 8, 16]: - if self.lilendian: - # RGB - offset = 0 - step = 1 - else: - # BGR - offset = s_bytesize - 1 - step = -1 - elif s_shifts[0:3] == [8, 16, 24]: - if self.lilendian: - # xRGB - offset = 1 - step = 1 - else: - # BGRx - offset = s_bytesize - 2 - step = -1 - elif s_shifts[0:3] == [16, 8, 0]: - if self.lilendian: - # BGR - offset = 2 - step = -1 - else: - # RGB - offset = s_bytesize - 3 - step = 1 - elif s_shifts[0:3] == [24, 16, 8]: - if self.lilendian: - # BGRx - offset = 2 - step = -1 - else: - # RGBx - offset = s_bytesize - 4 - step = -1 - else: - return - - # check the array interface structure fields. - v = s.get_view('3') - if not IS_PYPY: - inter = ArrayInterface(v) - flags = PAI_ALIGNED | PAI_NOTSWAPPED | PAI_WRITEABLE - self.assertEqual(inter.two, 2) - self.assertEqual(inter.nd, 3) - self.assertEqual(inter.typekind, 'u') - self.assertEqual(inter.itemsize, 1) - self.assertEqual(inter.shape[0], s_w) - self.assertEqual(inter.shape[1], s_h) - self.assertEqual(inter.shape[2], 3) - self.assertEqual(inter.strides[0], s_bytesize) - self.assertEqual(inter.strides[1], s_pitch) - self.assertEqual(inter.strides[2], step) - self.assertEqual(inter.flags, flags) - self.assertEqual(inter.data, s_pixels + offset); - - def _check_interface_rgba(self, s, plane): - s_w, s_h = s.get_size() - s_bytesize = s.get_bytesize(); - s_pitch = s.get_pitch() - s_pixels = s._pixels_address - s_shifts = s.get_shifts() - s_masks = s.get_masks() - - # Find the color plane position within the pixel. - if not s_masks[plane]: - return - alpha_shift = s_shifts[plane] - offset = alpha_shift // 8 - if not self.lilendian: - offset = s_bytesize - offset - 1 - - # check the array interface structure fields. - v = s.get_view('rgba'[plane]) - if not IS_PYPY: - inter = ArrayInterface(v) - flags = PAI_ALIGNED | PAI_NOTSWAPPED | PAI_WRITEABLE - self.assertEqual(inter.two, 2) - self.assertEqual(inter.nd, 2) - self.assertEqual(inter.typekind, 'u') - self.assertEqual(inter.itemsize, 1) - self.assertEqual(inter.shape[0], s_w) - self.assertEqual(inter.shape[1], s_h) - self.assertEqual(inter.strides[0], s_bytesize) - self.assertEqual(inter.strides[1], s_pitch) - self.assertEqual(inter.flags, flags) - self.assertEqual(inter.data, s_pixels + offset); - - def test_array_interface(self): - self._check_interface_2D(pygame.Surface((5, 7), 0, 8)) - self._check_interface_2D(pygame.Surface((5, 7), 0, 16)) - self._check_interface_2D(pygame.Surface((5, 7), pygame.SRCALPHA, 16)) - self._check_interface_3D(pygame.Surface((5, 7), 0, 24)) - self._check_interface_3D(pygame.Surface((8, 4), 0, 24)) # No gaps - self._check_interface_2D(pygame.Surface((5, 7), 0, 32)) - self._check_interface_3D(pygame.Surface((5, 7), 0, 32)) - self._check_interface_2D(pygame.Surface((5, 7), pygame.SRCALPHA, 32)) - self._check_interface_3D(pygame.Surface((5, 7), pygame.SRCALPHA, 32)) - - def test_array_interface_masks(self): - """Test non-default color byte orders on 3D views""" - - sz = (5, 7) - # Reversed RGB byte order - s = pygame.Surface(sz, 0, 32) - s_masks = list(s.get_masks()) - masks = [0xff, 0xff00, 0xff0000] - if s_masks[0:3] == masks or s_masks[0:3] == masks[::-1]: - masks = s_masks[2::-1] + s_masks[3:4] - self._check_interface_3D(pygame.Surface(sz, 0, 32, masks)) - s = pygame.Surface(sz, 0, 24) - s_masks = list(s.get_masks()) - masks = [0xff, 0xff00, 0xff0000] - if s_masks[0:3] == masks or s_masks[0:3] == masks[::-1]: - masks = s_masks[2::-1] + s_masks[3:4] - self._check_interface_3D(pygame.Surface(sz, 0, 24, masks)) - - masks = [0xff00, 0xff0000, 0xff000000, 0] - self._check_interface_3D(pygame.Surface(sz, 0, 32, masks)) - - # Unsupported RGB byte orders - if pygame.get_sdl_version()[0] == 1: - # Invalid mask values with SDL2 - masks = [0xff00, 0xff, 0xff0000, 0] - self.assertRaises(ValueError, - pygame.Surface(sz, 0, 24, masks).get_view, '3') - - def test_array_interface_alpha(self): - for shifts in [[0, 8, 16, 24], [8, 16, 24, 0], - [24, 16, 8, 0], [16, 8, 0, 24]]: - masks = [0xff << s for s in shifts] - s = pygame.Surface((4, 2), pygame.SRCALPHA, 32, masks) - self._check_interface_rgba(s, 3) - - def test_array_interface_rgb(self): - for shifts in [[0, 8, 16, 24], [8, 16, 24, 0], - [24, 16, 8, 0], [16, 8, 0, 24]]: - masks = [0xff << s for s in shifts] - masks[3] = 0 - for plane in range(3): - s = pygame.Surface((4, 2), 0, 24) - self._check_interface_rgba(s, plane) - s = pygame.Surface((4, 2), 0, 32) - self._check_interface_rgba(s, plane) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf_PyBUF_flags_bytes(self): - from pygame.tests.test_utils import buftools - Importer = buftools.Importer - s = pygame.Surface((10, 6), 0, 32) - a = s.get_buffer() - b = Importer(a, buftools.PyBUF_SIMPLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 1) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address) - b = Importer(a, buftools.PyBUF_WRITABLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertFalse(b.readonly) - b = Importer(a, buftools.PyBUF_FORMAT) - self.assertEqual(b.ndim, 0) - self.assertEqual(b.format, 'B') - b = Importer(a, buftools.PyBUF_ND) - self.assertEqual(b.ndim, 1) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 1) - self.assertEqual(b.shape, (a.length,)) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address) - b = Importer(a, buftools.PyBUF_STRIDES) - self.assertEqual(b.ndim, 1) - self.assertTrue(b.format is None) - self.assertEqual(b.strides, (1,)) - s2 = s.subsurface((1, 1, 7, 4)) # Not contiguous - a = s2.get_buffer() - b = Importer(a, buftools.PyBUF_SIMPLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 1) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s2._pixels_address) - b = Importer(a, buftools.PyBUF_C_CONTIGUOUS) - self.assertEqual(b.ndim, 1) - self.assertEqual(b.strides, (1,)) - b = Importer(a, buftools.PyBUF_F_CONTIGUOUS) - self.assertEqual(b.ndim, 1) - self.assertEqual(b.strides, (1,)) - b = Importer(a, buftools.PyBUF_ANY_CONTIGUOUS) - self.assertEqual(b.ndim, 1) - self.assertEqual(b.strides, (1,)) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf_PyBUF_flags_0D(self): - # This is the same handler as used by get_buffer(), so just - # confirm that it succeeds for one case. - from pygame.tests.test_utils import buftools - Importer = buftools.Importer - s = pygame.Surface((10, 6), 0, 32) - a = s.get_view('0') - b = Importer(a, buftools.PyBUF_SIMPLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 1) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf_PyBUF_flags_1D(self): - from pygame.tests.test_utils import buftools - Importer = buftools.Importer - s = pygame.Surface((10, 6), 0, 32) - a = s.get_view('1') - b = Importer(a, buftools.PyBUF_SIMPLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, s.get_bytesize()) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address) - b = Importer(a, buftools.PyBUF_WRITABLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertFalse(b.readonly) - b = Importer(a, buftools.PyBUF_FORMAT) - self.assertEqual(b.ndim, 0) - self.assertEqual(b.format, '=I') - b = Importer(a, buftools.PyBUF_ND) - self.assertEqual(b.ndim, 1) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, s.get_bytesize()) - self.assertEqual(b.shape, (s.get_width() * s.get_height(),)) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address) - b = Importer(a, buftools.PyBUF_STRIDES) - self.assertEqual(b.ndim, 1) - self.assertTrue(b.format is None) - self.assertEqual(b.strides, (s.get_bytesize(),)) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf_PyBUF_flags_2D(self): - from pygame.tests.test_utils import buftools - Importer = buftools.Importer - s = pygame.Surface((10, 6), 0, 32) - a = s.get_view('2') - # Non dimensional requests, no PyDEF_ND, are handled by the - # 1D surface buffer code, so only need to confirm a success. - b = Importer(a, buftools.PyBUF_SIMPLE) - self.assertEqual(b.ndim, 0) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, s.get_bytesize()) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address) - # Uniquely 2D - b = Importer(a, buftools.PyBUF_STRIDES) - self.assertEqual(b.ndim, 2) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, s.get_bytesize()) - self.assertEqual(b.shape, s.get_size()) - self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch())) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address) - b = Importer(a, buftools.PyBUF_RECORDS_RO) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, '=I') - self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch())) - b = Importer(a, buftools.PyBUF_RECORDS) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, '=I') - self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch())) - b = Importer(a, buftools.PyBUF_F_CONTIGUOUS) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, None) - self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch())) - b = Importer(a, buftools.PyBUF_ANY_CONTIGUOUS) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, None) - self.assertEqual(b.strides, (s.get_bytesize(), s.get_pitch())) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - s2 = s.subsurface((1, 1, 7, 4)) # Not contiguous - a = s2.get_view('2') - b = Importer(a, buftools.PyBUF_STRIDES) - self.assertEqual(b.ndim, 2) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, s2.get_bytesize()) - self.assertEqual(b.shape, s2.get_size()) - self.assertEqual(b.strides, (s2.get_bytesize(), s.get_pitch())) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s2._pixels_address) - b = Importer(a, buftools.PyBUF_RECORDS) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, '=I') - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FORMAT) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_WRITABLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_ANY_CONTIGUOUS) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf_PyBUF_flags_3D(self): - from pygame.tests.test_utils import buftools - Importer = buftools.Importer - s = pygame.Surface((12, 6), 0, 24) - rmask, gmask, bmask, amask = s.get_masks() - if self.lilendian: - if rmask == 0x0000ff: - color_step = 1 - addr_offset = 0 - else: - color_step = -1 - addr_offset = 2 - else: - if (rmask == 0xff0000): - color_step = 1 - addr_offset = 0 - else: - color_step = -1 - addr_offset = 2 - a = s.get_view('3') - b = Importer(a, buftools.PyBUF_STRIDES) - w, h = s.get_size() - shape = w, h, 3 - strides = 3, s.get_pitch(), color_step - self.assertEqual(b.ndim, 3) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 1) - self.assertEqual(b.shape, shape) - self.assertEqual(b.strides, strides) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address + addr_offset) - b = Importer(a, buftools.PyBUF_RECORDS_RO) - self.assertEqual(b.ndim, 3) - self.assertEqual(b.format, 'B') - self.assertEqual(b.strides, strides) - b = Importer(a, buftools.PyBUF_RECORDS) - self.assertEqual(b.ndim, 3) - self.assertEqual(b.format, 'B') - self.assertEqual(b.strides, strides) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FORMAT) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_ANY_CONTIGUOUS) - - @unittest.skipIf(not pygame.HAVE_NEWBUF, 'newbuf not implemented') - def test_newbuf_PyBUF_flags_rgba(self): - # All color plane views are handled by the same routine, - # so only one plane need be checked. - from pygame.tests.test_utils import buftools - Importer = buftools.Importer - s = pygame.Surface((12, 6), 0, 24) - rmask, gmask, bmask, amask = s.get_masks() - if self.lilendian: - if rmask == 0x0000ff: - addr_offset = 0 - else: - addr_offset = 2 - else: - if rmask == 0xff0000: - addr_offset = 0 - else: - addr_offset = 2 - a = s.get_view('R') - b = Importer(a, buftools.PyBUF_STRIDES) - w, h = s.get_size() - shape = w, h - strides = s.get_bytesize(), s.get_pitch() - self.assertEqual(b.ndim, 2) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.length) - self.assertEqual(b.itemsize, 1) - self.assertEqual(b.shape, shape) - self.assertEqual(b.strides, strides) - self.assertTrue(b.suboffsets is None) - self.assertFalse(b.readonly) - self.assertEqual(b.buf, s._pixels_address + addr_offset) - b = Importer(a, buftools.PyBUF_RECORDS_RO) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, 'B') - self.assertEqual(b.strides, strides) - b = Importer(a, buftools.PyBUF_RECORDS) - self.assertEqual(b.ndim, 2) - self.assertEqual(b.format, 'B') - self.assertEqual(b.strides, strides) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_FORMAT) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_WRITABLE) - self.assertRaises(BufferError, Importer, a, buftools.PyBUF_ND) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, - buftools.PyBUF_ANY_CONTIGUOUS) - - -class SurfaceBlendTest(unittest.TestCase): - - def setUp(self): - # Needed for 8 bits-per-pixel color palette surface tests. - pygame.display.init() - - def tearDown(self): - pygame.display.quit() - - _test_palette = [(0, 0, 0, 255), - (10, 30, 60, 0), - (25, 75, 100, 128), - (200, 150, 100, 200), - (0, 100, 200, 255)] - surf_size = (10, 12) - _test_points = [((0, 0), 1), ((4, 5), 1), ((9, 0), 2), - ((5, 5), 2), ((0, 11), 3), ((4, 6), 3), - ((9, 11), 4), ((5, 6), 4)] - - def _make_surface(self, bitsize, srcalpha=False, palette=None): - if palette is None: - palette = self._test_palette - flags = 0 - if srcalpha: - flags |= SRCALPHA - surf = pygame.Surface(self.surf_size, flags, bitsize) - if bitsize == 8: - surf.set_palette([c[:3] for c in palette]) - return surf - - def _fill_surface(self, surf, palette=None): - if palette is None: - palette = self._test_palette - surf.fill(palette[1], (0, 0, 5, 6)) - surf.fill(palette[2], (5, 0, 5, 6)) - surf.fill(palette[3], (0, 6, 5, 6)) - surf.fill(palette[4], (5, 6, 5, 6)) - - def _make_src_surface(self, bitsize, srcalpha=False, palette=None): - surf = self._make_surface(bitsize, srcalpha, palette) - self._fill_surface(surf, palette) - return surf - - def _assert_surface(self, surf, palette=None, msg=""): - if palette is None: - palette = self._test_palette - if surf.get_bitsize() == 16: - palette = [surf.unmap_rgb(surf.map_rgb(c)) for c in palette] - for posn, i in self._test_points: - self.assertEqual(surf.get_at(posn), palette[i], - "%s != %s: flags: %i, bpp: %i, posn: %s%s" % - (surf.get_at(posn), - palette[i], surf.get_flags(), - surf.get_bitsize(), posn, msg)) - - def test_blit_blend(self): - sources = [self._make_src_surface(8), - self._make_src_surface(16), - self._make_src_surface(16, srcalpha=True), - self._make_src_surface(24), - self._make_src_surface(32), - self._make_src_surface(32, srcalpha=True)] - destinations = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True)] - blend = [('BLEND_ADD', (0, 25, 100, 255), - lambda a, b: min(a + b, 255)), - ('BLEND_SUB', (100, 25, 0, 100), - lambda a, b: max(a - b, 0)), - ('BLEND_MULT', (100, 200, 0, 0), - lambda a, b: (a * b) // 256), - ('BLEND_MIN', (255, 0, 0, 255), min), - ('BLEND_MAX', (0, 255, 0, 255), max)] - - for src in sources: - src_palette = [src.unmap_rgb(src.map_rgb(c)) - for c in self._test_palette] - for dst in destinations: - for blend_name, dst_color, op in blend: - dc = dst.unmap_rgb(dst.map_rgb(dst_color)) - p = [] - for sc in src_palette: - c = [op(dc[i], sc[i]) for i in range(3)] - if dst.get_masks()[3]: - c.append(dc[3]) - else: - c.append(255) - c = dst.unmap_rgb(dst.map_rgb(c)) - p.append(c) - dst.fill(dst_color) - dst.blit(src, - (0, 0), - special_flags=getattr(pygame, blend_name)) - self._assert_surface(dst, p, - (", op: %s, src bpp: %i" - ", src flags: %i" % - (blend_name, - src.get_bitsize(), - src.get_flags()))) - - src = self._make_src_surface(32) - masks = src.get_masks() - dst = pygame.Surface(src.get_size(), 0, 32, - [masks[2], masks[1], masks[0], masks[3]]) - for blend_name, dst_color, op in blend: - p = [] - for src_color in self._test_palette: - c = [op(dst_color[i], src_color[i]) for i in range(3)] - c.append(255) - p.append(tuple(c)) - dst.fill(dst_color) - dst.blit(src, - (0, 0), - special_flags=getattr(pygame, blend_name)) - self._assert_surface(dst, p, ", %s" % blend_name) - - # Blend blits are special cased for 32 to 32 bit surfaces. - # - # Confirm that it works when the rgb bytes are not the - # least significant bytes. - pat = self._make_src_surface(32) - masks = pat.get_masks() - if min(masks) == intify(0xFF000000): - masks = [longify(m) >> 8 for m in masks] - else: - masks = [intify(m << 8) for m in masks] - src = pygame.Surface(pat.get_size(), 0, 32, masks) - self._fill_surface(src) - dst = pygame.Surface(src.get_size(), 0, 32, masks) - for blend_name, dst_color, op in blend: - p = [] - for src_color in self._test_palette: - c = [op(dst_color[i], src_color[i]) for i in range(3)] - c.append(255) - p.append(tuple(c)) - dst.fill(dst_color) - dst.blit(src, - (0, 0), - special_flags=getattr(pygame, blend_name)) - self._assert_surface(dst, p, ", %s" % blend_name) - - def test_blit_blend_rgba(self): - sources = [self._make_src_surface(8), - self._make_src_surface(16), - self._make_src_surface(16, srcalpha=True), - self._make_src_surface(24), - self._make_src_surface(32), - self._make_src_surface(32, srcalpha=True)] - destinations = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True)] - blend = [('BLEND_RGBA_ADD', (0, 25, 100, 255), - lambda a, b: min(a + b, 255)), - ('BLEND_RGBA_SUB', (0, 25, 100, 255), - lambda a, b: max(a - b, 0)), - ('BLEND_RGBA_MULT', (0, 7, 100, 255), - lambda a, b: (a * b) // 256), - ('BLEND_RGBA_MIN', (0, 255, 0, 255), min), - ('BLEND_RGBA_MAX', (0, 255, 0, 255), max)] - - for src in sources: - src_palette = [src.unmap_rgb(src.map_rgb(c)) - for c in self._test_palette] - for dst in destinations: - for blend_name, dst_color, op in blend: - dc = dst.unmap_rgb(dst.map_rgb(dst_color)) - p = [] - for sc in src_palette: - c = [op(dc[i], sc[i]) for i in range(4)] - if not dst.get_masks()[3]: - c[3] = 255 - c = dst.unmap_rgb(dst.map_rgb(c)) - p.append(c) - dst.fill(dst_color) - dst.blit(src, - (0, 0), - special_flags=getattr(pygame, blend_name)) - self._assert_surface(dst, p, - (", op: %s, src bpp: %i" - ", src flags: %i" % - (blend_name, - src.get_bitsize(), - src.get_flags()))) - - # Blend blits are special cased for 32 to 32 bit surfaces - # with per-pixel alpha. - # - # Confirm the general case is used instead when the formats differ. - src = self._make_src_surface(32, srcalpha=True) - masks = src.get_masks() - dst = pygame.Surface(src.get_size(), SRCALPHA, 32, - (masks[2], masks[1], masks[0], masks[3])) - for blend_name, dst_color, op in blend: - p = [tuple([op(dst_color[i], src_color[i]) for i in range(4)]) - for src_color in self._test_palette] - dst.fill(dst_color) - dst.blit(src, - (0, 0), - special_flags=getattr(pygame, blend_name)) - self._assert_surface(dst, p, ", %s" % blend_name) - - # Confirm this special case handles subsurfaces. - src = pygame.Surface((8, 10), SRCALPHA, 32) - dst = pygame.Surface((8, 10), SRCALPHA, 32) - tst = pygame.Surface((8, 10), SRCALPHA, 32) - src.fill((1, 2, 3, 4)) - dst.fill((40, 30, 20, 10)) - subsrc = src.subsurface((2, 3, 4, 4)) - subdst = dst.subsurface((2, 3, 4, 4)) - subdst.blit(subsrc, (0, 0), special_flags=BLEND_RGBA_ADD) - tst.fill((40, 30, 20, 10)) - tst.fill((41, 32, 23, 14), (2, 3, 4, 4)) - for x in range(8): - for y in range(10): - self.assertEqual(dst.get_at((x, y)), tst.get_at((x, y)), - "%s != %s at (%i, %i)" % - (dst.get_at((x, y)), tst.get_at((x, y)), - x, y)) - - def test_blit_blend_big_rect(self): - """ test that an oversized rect works ok. - """ - color = (1, 2, 3, 255) - area = (1, 1, 30, 30) - s1 = pygame.Surface((4, 4), 0, 32) - r = s1.fill(special_flags=pygame.BLEND_ADD, color=color, rect=area) - - self.assertEqual(pygame.Rect((1, 1, 3, 3)), r) - self.assertEqual(s1.get_at((0, 0)), (0, 0, 0, 255)) - self.assertEqual(s1.get_at((1, 1)), color) - - black = pygame.Color("black") - red = pygame.Color("red") - self.assertNotEqual(black, red) - - surf = pygame.Surface((10, 10), 0, 32) - surf.fill(black) - subsurf = surf.subsurface(pygame.Rect(0, 1, 10, 8)) - self.assertEqual(surf.get_at((0, 0)), black) - self.assertEqual(surf.get_at((0, 9)), black) - - subsurf.fill(red, (0, -1, 10, 1), pygame.BLEND_RGB_ADD) - self.assertEqual(surf.get_at((0, 0)), black) - self.assertEqual(surf.get_at((0, 9)), black) - - subsurf.fill(red, (0, 8, 10, 1), pygame.BLEND_RGB_ADD) - self.assertEqual(surf.get_at((0, 0)), black) - self.assertEqual(surf.get_at((0, 9)), black) - - def test_GET_PIXELVALS(self): - # surface.h GET_PIXELVALS bug regarding whether of not - # a surface has per-pixel alpha. Looking at the Amask - # is not enough. The surface's SRCALPHA flag must also - # be considered. Fix rev. 1923. - src = self._make_surface(32, srcalpha=True) - src.fill((0, 0, 0, 128)) - src.set_alpha(None) # Clear SRCALPHA flag. - dst = self._make_surface(32, srcalpha=True) - dst.blit(src, (0, 0), special_flags=BLEND_RGBA_ADD) - self.assertEqual(dst.get_at((0, 0)), (0, 0, 0, 255)) - - def test_fill_blend(self): - destinations = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True)] - blend = [('BLEND_ADD', (0, 25, 100, 255), - lambda a, b: min(a + b, 255)), - ('BLEND_SUB', (0, 25, 100, 255), - lambda a, b: max(a - b, 0)), - ('BLEND_MULT', (0, 7, 100, 255), - lambda a, b: (a * b) // 256), - ('BLEND_MIN', (0, 255, 0, 255), min), - ('BLEND_MAX', (0, 255, 0, 255), max)] - - for dst in destinations: - dst_palette = [dst.unmap_rgb(dst.map_rgb(c)) - for c in self._test_palette] - for blend_name, fill_color, op in blend: - fc = dst.unmap_rgb(dst.map_rgb(fill_color)) - self._fill_surface(dst) - p = [] - for dc in dst_palette: - c = [op(dc[i], fc[i]) for i in range(3)] - if dst.get_masks()[3]: - c.append(dc[3]) - else: - c.append(255) - c = dst.unmap_rgb(dst.map_rgb(c)) - p.append(c) - dst.fill(fill_color, special_flags=getattr(pygame, blend_name)) - self._assert_surface(dst, p, ", %s" % blend_name) - - def test_fill_blend_rgba(self): - destinations = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True)] - blend = [('BLEND_RGBA_ADD', (0, 25, 100, 255), - lambda a, b: min(a + b, 255)), - ('BLEND_RGBA_SUB', (0, 25, 100, 255), - lambda a, b: max(a - b, 0)), - ('BLEND_RGBA_MULT', (0, 7, 100, 255), - lambda a, b: (a * b) // 256), - ('BLEND_RGBA_MIN', (0, 255, 0, 255), min), - ('BLEND_RGBA_MAX', (0, 255, 0, 255), max)] - - for dst in destinations: - dst_palette = [dst.unmap_rgb(dst.map_rgb(c)) - for c in self._test_palette] - for blend_name, fill_color, op in blend: - fc = dst.unmap_rgb(dst.map_rgb(fill_color)) - self._fill_surface(dst) - p = [] - for dc in dst_palette: - c = [op(dc[i], fc[i]) for i in range(4)] - if not dst.get_masks()[3]: - c[3] = 255 - c = dst.unmap_rgb(dst.map_rgb(c)) - p.append(c) - dst.fill(fill_color, special_flags=getattr(pygame, blend_name)) - self._assert_surface(dst, p, ", %s" % blend_name) - - -class SurfaceSelfBlitTest(unittest.TestCase): - """Blit to self tests. - - This test case is in response to MotherHamster Bugzilla Bug 19. - """ - - def setUp(self): - # Needed for 8 bits-per-pixel color palette surface tests. - pygame.display.init() - - def tearDown(self): - pygame.display.quit() - - _test_palette = [(0, 0, 0, 255), - (255, 0, 0, 0), - (0, 255, 0, 255)] - surf_size = (9, 6) - - def _fill_surface(self, surf, palette=None): - if palette is None: - palette = self._test_palette - surf.fill(palette[1]) - surf.fill(palette[2], (1, 2, 1, 2)) - - def _make_surface(self, bitsize, srcalpha=False, palette=None): - if palette is None: - palette = self._test_palette - flags = 0 - if srcalpha: - flags |= SRCALPHA - surf = pygame.Surface(self.surf_size, flags, bitsize) - if bitsize == 8: - surf.set_palette([c[:3] for c in palette]) - self._fill_surface(surf, palette) - return surf - - def _assert_same(self, a, b): - w, h = a.get_size() - for x in range(w): - for y in range(h): - self.assertEqual(a.get_at((x, y)), b.get_at((x, y)), - ("%s != %s, bpp: %i" % - (a.get_at((x, y)), b.get_at((x, y)), - a.get_bitsize()))) - - def test_overlap_check(self): - # Ensure overlapping blits are properly detected. There are two - # places where this is done, within SoftBlitPyGame() in alphablit.c - # and PySurface_Blit() in surface.c. SoftBlitPyGame should catch the - # per-pixel alpha surface, PySurface_Blit the colorkey and blanket - # alpha surface. per-pixel alpha and blanket alpha self blits are - # not properly handled by SDL 1.2.13, so Pygame does them. - bgc = (0, 0, 0, 255) - rectc_left = (128, 64, 32, 255) - rectc_right = (255, 255, 255, 255) - colors = [(255, 255, 255, 255), (128, 64, 32, 255)] - overlaps = [(0, 0, 1, 0, (50, 0)), - (0, 0, 49, 1, (98, 2)), - (0, 0, 49, 49, (98, 98)), - (49, 0, 0, 1, (0, 2)), - (49, 0, 0, 49, (0, 98))] - surfs = [pygame.Surface((100, 100), SRCALPHA, 32)] - surf = pygame.Surface((100, 100), 0, 32) - surf.set_alpha(255) - surfs.append(surf) - surf = pygame.Surface((100, 100), 0, 32) - surf.set_colorkey((0, 1, 0)) - surfs.append(surf) - for surf in surfs: - for s_x, s_y, d_x, d_y, test_posn in overlaps: - surf.fill(bgc) - surf.fill(rectc_right, (25, 0, 25, 50)) - surf.fill(rectc_left, (0, 0, 25, 50)) - surf.blit(surf, (d_x, d_y), (s_x, s_y, 50, 50)) - self.assertEqual(surf.get_at(test_posn), rectc_right) - - # https://github.com/pygame/pygame/issues/370#issuecomment-364625291 - @unittest.skipIf('ppc64le' in platform.uname(), 'known ppc64le issue') - def test_colorkey(self): - # Check a workaround for an SDL 1.2.13 surface self-blit problem - # (MotherHamster Bugzilla bug 19). - pygame.display.set_mode((100, 50)) # Needed for 8bit surface - bitsizes = [8, 16, 24, 32] - for bitsize in bitsizes: - surf = self._make_surface(bitsize) - surf.set_colorkey(self._test_palette[1]) - surf.blit(surf, (3, 0)) - p = [] - for c in self._test_palette: - c = surf.unmap_rgb(surf.map_rgb(c)) - p.append(c) - p[1] = (p[1][0], p[1][1], p[1][2], 0) - tmp = self._make_surface(32, srcalpha=True, palette=p) - tmp.blit(tmp, (3, 0)) - tmp.set_alpha(None) - comp = self._make_surface(bitsize) - comp.blit(tmp, (0, 0)) - self._assert_same(surf, comp) - - # https://github.com/pygame/pygame/issues/370#issuecomment-364625291 - @unittest.skipIf('ppc64le' in platform.uname(), 'known ppc64le issue') - def test_blanket_alpha(self): - # Check a workaround for an SDL 1.2.13 surface self-blit problem - # (MotherHamster Bugzilla bug 19). - pygame.display.set_mode((100, 50)) # Needed for 8bit surface - bitsizes = [8, 16, 24, 32] - for bitsize in bitsizes: - surf = self._make_surface(bitsize) - surf.set_alpha(128) - surf.blit(surf, (3, 0)) - p = [] - for c in self._test_palette: - c = surf.unmap_rgb(surf.map_rgb(c)) - p.append((c[0], c[1], c[2], 128)) - tmp = self._make_surface(32, srcalpha=True, palette=p) - tmp.blit(tmp, (3, 0)) - tmp.set_alpha(None) - comp = self._make_surface(bitsize) - comp.blit(tmp, (0, 0)) - self._assert_same(surf, comp) - - def test_pixel_alpha(self): - bitsizes = [16, 32] - for bitsize in bitsizes: - surf = self._make_surface(bitsize, srcalpha=True) - comp = self._make_surface(bitsize, srcalpha=True) - comp.blit(surf, (3, 0)) - surf.blit(surf, (3, 0)) - self._assert_same(surf, comp) - - def test_blend(self): - bitsizes = [8, 16, 24, 32] - blends = ['BLEND_ADD', - 'BLEND_SUB', - 'BLEND_MULT', - 'BLEND_MIN', - 'BLEND_MAX'] - for bitsize in bitsizes: - surf = self._make_surface(bitsize) - comp = self._make_surface(bitsize) - for blend in blends: - self._fill_surface(surf) - self._fill_surface(comp) - comp.blit(surf, (3, 0), - special_flags=getattr(pygame, blend)) - surf.blit(surf, (3, 0), - special_flags=getattr(pygame, blend)) - self._assert_same(surf, comp) - - def test_blend_rgba(self): - bitsizes = [16, 32] - blends = ['BLEND_RGBA_ADD', - 'BLEND_RGBA_SUB', - 'BLEND_RGBA_MULT', - 'BLEND_RGBA_MIN', - 'BLEND_RGBA_MAX'] - for bitsize in bitsizes: - surf = self._make_surface(bitsize, srcalpha=True) - comp = self._make_surface(bitsize, srcalpha=True) - for blend in blends: - self._fill_surface(surf) - self._fill_surface(comp) - comp.blit(surf, (3, 0), - special_flags=getattr(pygame, blend)) - surf.blit(surf, (3, 0), - special_flags=getattr(pygame, blend)) - self._assert_same(surf, comp) - - def test_subsurface(self): - # Blitting a surface to its subsurface is allowed. - surf = self._make_surface(32, srcalpha=True) - comp = surf.copy() - comp.blit(surf, (3, 0)) - sub = surf.subsurface((3, 0, 6, 6)) - sub.blit(surf, (0, 0)) - del sub - self._assert_same(surf, comp) - # Blitting a subsurface to its owner is forbidden because of - # lock conficts. This limitation allows the overlap check - # in PySurface_Blit of alphablit.c to be simplified. - def do_blit(d, s): - d.blit(s, (0, 0)) - sub = surf.subsurface((1, 1, 2, 2)) - self.assertRaises(pygame.error, do_blit, surf, sub) - - def test_copy_alpha(self): - """issue 581: alpha of surface copy with SRCALPHA is set to 0.""" - surf = pygame.Surface((16, 16), pygame.SRCALPHA, 32) - self.assertEqual(surf.get_alpha(), 255) - surf2 = surf.copy() - self.assertEqual(surf2.get_alpha(), 255) - - -class SurfaceFillTest(unittest.TestCase): - - def setUp(self): - pygame.display.init() - - def tearDown(self): - pygame.display.quit() - - def test_fill(self): - screen = pygame.display.set_mode((640, 480)) - - # Green and blue test pattern - screen.fill((0, 255, 0), (0, 0, 320, 240)) - screen.fill((0, 255, 0), (320, 240, 320, 240)) - screen.fill((0, 0, 255), (320, 0, 320, 240)) - screen.fill((0, 0, 255), (0, 240, 320, 240)) - - # Now apply a clip rect, such that only the left side of the - # screen should be effected by blit opperations. - screen.set_clip((0, 0, 320, 480)) - - # Test fills with each special flag, and additionaly without any. - screen.fill((255, 0, 0, 127), (160, 0, 320, 30), 0) - screen.fill((255, 0, 0, 127), (160, 30, 320, 30), pygame.BLEND_ADD) - screen.fill((0, 127, 127, 127), (160, 60, 320, 30), pygame.BLEND_SUB) - screen.fill((0, 63, 63, 127), (160, 90, 320, 30), pygame.BLEND_MULT) - screen.fill((0, 127, 127, 127), (160, 120, 320, 30), pygame.BLEND_MIN) - screen.fill((127, 0, 0, 127), (160, 150, 320, 30), pygame.BLEND_MAX) - screen.fill((255, 0, 0, 127), (160, 180, 320, 30), pygame.BLEND_RGBA_ADD) - screen.fill((0, 127, 127, 127), (160, 210, 320, 30), pygame.BLEND_RGBA_SUB) - screen.fill((0, 63, 63, 127), (160, 240, 320, 30), pygame.BLEND_RGBA_MULT) - screen.fill((0, 127, 127, 127), (160, 270, 320, 30), pygame.BLEND_RGBA_MIN) - screen.fill((127, 0, 0, 127), (160, 300, 320, 30), pygame.BLEND_RGBA_MAX) - screen.fill((255, 0, 0, 127), (160, 330, 320, 30), pygame.BLEND_RGB_ADD) - screen.fill((0, 127, 127, 127), (160, 360, 320, 30), pygame.BLEND_RGB_SUB) - screen.fill((0, 63, 63, 127), (160, 390, 320, 30), pygame.BLEND_RGB_MULT) - screen.fill((0, 127, 127, 127), (160, 420, 320, 30), pygame.BLEND_RGB_MIN) - screen.fill((255, 0, 0, 127), (160, 450, 320, 30), pygame.BLEND_RGB_MAX) - - # Update the display so we can see the results - pygame.display.flip() - - # Compare colors on both sides of window - for y in range(5, 480, 10): - self.assertEqual(screen.get_at((10, y)), screen.get_at((330, 480 - y))) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/surfarray_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/surfarray_tags.py deleted file mode 100644 index 132d559..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/surfarray_tags.py +++ /dev/null @@ -1,16 +0,0 @@ -__tags__ = ['array'] - -exclude = False - -try: - import numpy -except ImportError: - exclude = True -else: - try: - import pygame.pixelcopy - except ImportError: - exclude = True - -if exclude: - __tags__.extend(('ignore', 'subprocess_ignore')) diff --git a/venv/lib/python3.7/site-packages/pygame/tests/surfarray_test.py b/venv/lib/python3.7/site-packages/pygame/tests/surfarray_test.py deleted file mode 100644 index 4f43fce..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/surfarray_test.py +++ /dev/null @@ -1,667 +0,0 @@ - -import unittest -import platform - -from numpy import \ - uint8, uint16, uint32, uint64, zeros, \ - float32, float64, alltrue, rint, arange - -import pygame -from pygame.locals import * - -import pygame.surfarray -arraytype = 'numpy' - - -IS_PYPY = 'PyPy' == platform.python_implementation() - - -@unittest.skipIf(IS_PYPY, 'pypy skip known failure') # TODO -class SurfarrayModuleTest (unittest.TestCase): - pixels2d = {8: True, 16: True, 24: False, 32: True} - pixels3d = {8: False, 16: False, 24: True, 32: True} - array2d = {8: True, 16: True, 24: True, 32: True} - array3d = {8: False, 16: False, 24: True, 32: True} - - test_palette = [(0, 0, 0, 255), - (10, 30, 60, 255), - (25, 75, 100, 255), - (100, 150, 200, 255), - (0, 100, 200, 255)] - surf_size = (10, 12) - test_points = [((0, 0), 1), ((4, 5), 1), ((9, 0), 2), - ((5, 5), 2), ((0, 11), 3), ((4, 6), 3), - ((9, 11), 4), ((5, 6), 4)] - - @classmethod - def setUpClass(cls): - # Needed for 8 bits-per-pixel color palette surface tests. - pygame.init() - - @classmethod - def tearDownClass(cls): - pygame.quit() - - def setUp(cls): - # This makes sure pygame is always initialized before each test (in - # case a test calls pygame.quit()). - if not pygame.get_init(): - pygame.init() - - # Makes sure the same array package is used each time. - pygame.surfarray.use_arraytype(arraytype) - - def _make_surface(self, bitsize, srcalpha=False, palette=None): - if palette is None: - palette = self.test_palette - flags = 0 - if srcalpha: - flags |= SRCALPHA - surf = pygame.Surface(self.surf_size, flags, bitsize) - if bitsize == 8: - surf.set_palette([c[:3] for c in palette]) - return surf - - def _fill_surface(self, surf, palette=None): - if palette is None: - palette = self.test_palette - surf.fill(palette[1], (0, 0, 5, 6)) - surf.fill(palette[2], (5, 0, 5, 6)) - surf.fill(palette[3], (0, 6, 5, 6)) - surf.fill(palette[4], (5, 6, 5, 6)) - - def _make_src_surface(self, bitsize, srcalpha=False, palette=None): - surf = self._make_surface(bitsize, srcalpha, palette) - self._fill_surface(surf, palette) - return surf - - def _assert_surface(self, surf, palette=None, msg=""): - if palette is None: - palette = self.test_palette - if surf.get_bitsize() == 16: - palette = [surf.unmap_rgb(surf.map_rgb(c)) for c in palette] - for posn, i in self.test_points: - self.assertEqual(surf.get_at(posn), palette[i], - "%s != %s: flags: %i, bpp: %i, posn: %s%s" % - (surf.get_at(posn), - palette[i], surf.get_flags(), - surf.get_bitsize(), posn, msg)) - - def _make_array3d(self, dtype): - return zeros((self.surf_size[0], self.surf_size[1], 3), dtype) - - def _fill_array2d(self, arr, surf): - palette = self.test_palette - arr[:5,:6] = surf.map_rgb(palette[1]) - arr[5:,:6] = surf.map_rgb(palette[2]) - arr[:5,6:] = surf.map_rgb(palette[3]) - arr[5:,6:] = surf.map_rgb(palette[4]) - - def _fill_array3d(self, arr): - palette = self.test_palette - arr[:5,:6] = palette[1][:3] - arr[5:,:6] = palette[2][:3] - arr[:5,6:] = palette[3][:3] - arr[5:,6:] = palette[4][:3] - - def _make_src_array3d(self, dtype): - arr = self._make_array3d(dtype) - self._fill_array3d(arr) - return arr - - def _make_array2d(self, dtype): - return zeros(self.surf_size, dtype) - - def test_array2d(self): - - sources = [self._make_src_surface(8), - self._make_src_surface(16), - self._make_src_surface(16, srcalpha=True), - self._make_src_surface(24), - self._make_src_surface(32), - self._make_src_surface(32, srcalpha=True)] - palette = self.test_palette - alpha_color = (0, 0, 0, 128) - - for surf in sources: - arr = pygame.surfarray.array2d(surf) - for posn, i in self.test_points: - self.assertEqual(arr[posn], surf.get_at_mapped(posn), - "%s != %s: flags: %i, bpp: %i, posn: %s" % - (arr[posn], - surf.get_at_mapped(posn), - surf.get_flags(), surf.get_bitsize(), - posn)) - - if surf.get_masks()[3]: - surf.fill(alpha_color) - arr = pygame.surfarray.array2d(surf) - posn = (0, 0) - self.assertEqual(arr[posn], surf.get_at_mapped(posn), - "%s != %s: bpp: %i" % - (arr[posn], - surf.get_at_mapped(posn), - surf.get_bitsize())) - - def test_array3d(self): - - sources = [self._make_src_surface(16), - self._make_src_surface(16, srcalpha=True), - self._make_src_surface(24), - self._make_src_surface(32), - self._make_src_surface(32, srcalpha=True)] - palette = self.test_palette - - for surf in sources: - arr = pygame.surfarray.array3d(surf) - def same_color(ac, sc): - return (ac[0] == sc[0] and - ac[1] == sc[1] and - ac[2] == sc[2]) - for posn, i in self.test_points: - self.assertTrue(same_color(arr[posn], surf.get_at(posn)), - "%s != %s: flags: %i, bpp: %i, posn: %s" % ( - tuple(arr[posn]), surf.get_at(posn), - surf.get_flags(), surf.get_bitsize(), - posn)) - - def test_array_alpha(self): - - palette = [(0, 0, 0, 0), - (10, 50, 100, 255), - (60, 120, 240, 130), - (64, 128, 255, 0), - (255, 128, 0, 65)] - targets = [self._make_src_surface(8, palette=palette), - self._make_src_surface(16, palette=palette), - self._make_src_surface(16, palette=palette, srcalpha=True), - self._make_src_surface(24, palette=palette), - self._make_src_surface(32, palette=palette), - self._make_src_surface(32, palette=palette, srcalpha=True)] - - for surf in targets: - p = palette - if surf.get_bitsize() == 16: - p = [surf.unmap_rgb(surf.map_rgb(c)) for c in p] - arr = pygame.surfarray.array_alpha(surf) - if surf.get_masks()[3]: - for (x, y), i in self.test_points: - self.assertEqual(arr[x, y], p[i][3], - ("%i != %i, posn: (%i, %i), " - "bitsize: %i" % - (arr[x, y], p[i][3], - x, y, - surf.get_bitsize()))) - else: - self.assertTrue(alltrue(arr == 255)) - - # No per-pixel alpha when blanket alpha is None. - for surf in targets: - blanket_alpha = surf.get_alpha() - surf.set_alpha(None) - arr = pygame.surfarray.array_alpha(surf) - self.assertTrue(alltrue(arr == 255), - "All alpha values should be 255 when" - " surf.set_alpha(None) has been set." - " bitsize: %i, flags: %i" % ( - surf.get_bitsize(), surf.get_flags())) - surf.set_alpha(blanket_alpha) - - # Bug for per-pixel alpha surface when blanket alpha 0. - for surf in targets: - blanket_alpha = surf.get_alpha() - surf.set_alpha(0) - arr = pygame.surfarray.array_alpha(surf) - if surf.get_masks()[3]: - self.assertFalse(alltrue(arr == 255), - "bitsize: %i, flags: %i" % - (surf.get_bitsize(), surf.get_flags())) - else: - self.assertTrue(alltrue(arr == 255), - "bitsize: %i, flags: %i" % ( - surf.get_bitsize(), surf.get_flags())) - surf.set_alpha(blanket_alpha) - - def test_array_colorkey(self): - - palette = [(0, 0, 0, 0), - (10, 50, 100, 255), - (60, 120, 240, 130), - (64, 128, 255, 0), - (255, 128, 0, 65)] - targets = [self._make_src_surface(8, palette=palette), - self._make_src_surface(16, palette=palette), - self._make_src_surface(16, palette=palette, srcalpha=True), - self._make_src_surface(24, palette=palette), - self._make_src_surface(32, palette=palette), - self._make_src_surface(32, palette=palette, srcalpha=True)] - - for surf in targets: - p = palette - if surf.get_bitsize() == 16: - p = [surf.unmap_rgb(surf.map_rgb(c)) for c in p] - surf.set_colorkey(None) - arr = pygame.surfarray.array_colorkey(surf) - self.assertTrue(alltrue(arr == 255)) - - for i in range(1, len(palette)): - surf.set_colorkey(p[i]) - alphas = [255] * len(p) - alphas[i] = 0 - arr = pygame.surfarray.array_colorkey(surf) - for (x, y), j in self.test_points: - self.assertEqual(arr[x, y], alphas[j], - ("%i != %i, posn: (%i, %i), " - "bitsize: %i" % - (arr[x, y], alphas[j], - x, y, - surf.get_bitsize()))) - - def test_blit_array(self): - - # bug 24 at http://pygame.motherhamster.org/bugzilla/ - if 'numpy' in pygame.surfarray.get_arraytypes(): - prev = pygame.surfarray.get_arraytype() - # This would raise exception: - # File "[...]\pygame\_numpysurfarray.py", line 381, in blit_array - # (array[:,:,1::3] >> losses[1] << shifts[1]) | \ - # TypeError: unsupported operand type(s) for >>: 'float' and 'int' - pygame.surfarray.use_arraytype('numpy') - s = pygame.Surface((10,10), 0, 24) - a = pygame.surfarray.array3d(s) - pygame.surfarray.blit_array(s, a) - prev = pygame.surfarray.use_arraytype(prev) - - # target surfaces - targets = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True), - ] - - # source arrays - arrays3d = [] - dtypes = [(8, uint8), (16, uint16), (32, uint32)] - try: - dtypes.append((64, uint64)) - except NameError: - pass - arrays3d = [(self._make_src_array3d(dtype), None) - for __, dtype in dtypes] - for bitsize in [8, 16, 24, 32]: - palette = None - if bitsize == 16: - s = pygame.Surface((1,1), 0, 16) - palette = [s.unmap_rgb(s.map_rgb(c)) - for c in self.test_palette] - if self.pixels3d[bitsize]: - surf = self._make_src_surface(bitsize) - arr = pygame.surfarray.pixels3d(surf) - arrays3d.append((arr, palette)) - if self.array3d[bitsize]: - surf = self._make_src_surface(bitsize) - arr = pygame.surfarray.array3d(surf) - arrays3d.append((arr, palette)) - for sz, dtype in dtypes: - arrays3d.append((arr.astype(dtype), palette)) - - # tests on arrays - def do_blit(surf, arr): - pygame.surfarray.blit_array(surf, arr) - - for surf in targets: - bitsize = surf.get_bitsize() - for arr, palette in arrays3d: - surf.fill((0, 0, 0, 0)) - if bitsize == 8: - self.assertRaises(ValueError, do_blit, surf, arr) - else: - pygame.surfarray.blit_array(surf, arr) - self._assert_surface(surf, palette) - - if self.pixels2d[bitsize]: - surf.fill((0, 0, 0, 0)) - s = self._make_src_surface(bitsize, surf.get_flags() & SRCALPHA) - arr = pygame.surfarray.pixels2d(s) - pygame.surfarray.blit_array(surf, arr) - self._assert_surface(surf) - - if self.array2d[bitsize]: - s = self._make_src_surface(bitsize, surf.get_flags() & SRCALPHA) - arr = pygame.surfarray.array2d(s) - for sz, dtype in dtypes: - surf.fill((0, 0, 0, 0)) - if sz >= bitsize: - pygame.surfarray.blit_array(surf, arr.astype(dtype)) - self._assert_surface(surf) - else: - self.assertRaises(ValueError, do_blit, - surf, self._make_array2d(dtype)) - - # Check alpha for 2D arrays - surf = self._make_surface(16, srcalpha=True) - arr = zeros(surf.get_size(), uint16) - arr[...] = surf.map_rgb((0, 128, 255, 64)) - color = surf.unmap_rgb(arr[0, 0]) - pygame.surfarray.blit_array(surf, arr) - self.assertEqual(surf.get_at((5, 5)), color) - - surf = self._make_surface(32, srcalpha=True) - arr = zeros(surf.get_size(), uint32) - color = (0, 111, 255, 63) - arr[...] = surf.map_rgb(color) - pygame.surfarray.blit_array(surf, arr) - self.assertEqual(surf.get_at((5, 5)), color) - - # Check shifts - arr3d = self._make_src_array3d(uint8) - - shift_tests = [(16, - [12, 0, 8, 4], - [0xf000, 0xf, 0xf00, 0xf0]), - (24, - [16, 0, 8, 0], - [0xff0000, 0xff, 0xff00, 0]), - (32, - [0, 16, 24, 8], - [0xff, 0xff0000, 0xff000000, 0xff00])] - - for bitsize, shifts, masks in shift_tests: - surf = self._make_surface(bitsize, srcalpha=(shifts[3] != 0)) - palette = None - if bitsize == 16: - palette = [surf.unmap_rgb(surf.map_rgb(c)) - for c in self.test_palette] - surf.set_shifts(shifts) - surf.set_masks(masks) - pygame.surfarray.blit_array(surf, arr3d) - self._assert_surface(surf, palette) - - # Invalid arrays - surf = pygame.Surface((1,1), 0, 32) - t = 'abcd' - self.assertRaises(ValueError, do_blit, surf, t) - - surf_size = self.surf_size - surf = pygame.Surface(surf_size, 0, 32) - arr = zeros([surf_size[0], surf_size[1] + 1, 3], uint32) - self.assertRaises(ValueError, do_blit, surf, arr) - arr = zeros([surf_size[0] + 1, surf_size[1], 3], uint32) - self.assertRaises(ValueError, do_blit, surf, arr) - - surf = pygame.Surface((1, 4), 0, 32) - arr = zeros((4,), uint32) - self.assertRaises(ValueError, do_blit, surf, arr) - arr.shape = (1, 1, 1, 4) - self.assertRaises(ValueError, do_blit, surf, arr) - - # Issue #81: round from float to int - try: - rint - except NameError: - pass - else: - surf = pygame.Surface((10, 10), pygame.SRCALPHA, 32) - w, h = surf.get_size() - length = w * h - for dtype in [float32, float64]: - surf.fill((255, 255, 255, 0)) - farr = arange(0, length, dtype=dtype) - farr.shape = w, h - pygame.surfarray.blit_array(surf, farr) - for x in range(w): - for y in range(h): - self.assertEqual(surf.get_at_mapped((x, y)), - int(rint(farr[x, y]))) - - def test_get_arraytype(self): - array_type = pygame.surfarray.get_arraytype() - - self.assertEqual(array_type, 'numpy', - "unknown array type %s" % array_type) - - def test_get_arraytypes(self): - - arraytypes = pygame.surfarray.get_arraytypes() - self.assertIn('numpy', arraytypes) - - for atype in arraytypes: - self.assertEqual(atype, 'numpy', "unknown array type %s" % atype) - - def test_make_surface(self): - - # How does one properly test this with 2d arrays. It makes no sense - # since the pixel format is not entirely dependent on element size. - # Just make sure the surface pixel size is at least as large as the - # array element size I guess. - # - for bitsize, dtype in [(8, uint8), (16, uint16), (24, uint32)]: -## Even this simple assertion fails for 2d arrays. Where's the problem? -## surf = pygame.surfarray.make_surface(self._make_array2d(dtype)) -## self.assertGreaterEqual(surf.get_bitsize(), bitsize, -## "not %i >= %i)" % (surf.get_bitsize(), bitsize)) -## - surf = pygame.surfarray.make_surface(self._make_src_array3d(dtype)) - self._assert_surface(surf) - - # Issue #81: round from float to int - try: - rint - except NameError: - pass - else: - w = 9 - h = 11 - length = w * h - for dtype in [float32, float64]: - farr = arange(0, length, dtype=dtype) - farr.shape = w, h - surf = pygame.surfarray.make_surface(farr) - for x in range(w): - for y in range(h): - self.assertEqual(surf.get_at_mapped((x, y)), - int(rint(farr[x, y]))) - - def test_map_array(self): - - arr3d = self._make_src_array3d(uint8) - targets = [self._make_surface(8), - self._make_surface(16), - self._make_surface(16, srcalpha=True), - self._make_surface(24), - self._make_surface(32), - self._make_surface(32, srcalpha=True)] - palette = self.test_palette - - for surf in targets: - arr2d = pygame.surfarray.map_array(surf, arr3d) - for posn, i in self.test_points: - self.assertEqual(arr2d[posn], surf.map_rgb(palette[i]), - "%i != %i, bitsize: %i, flags: %i" % - (arr2d[posn], surf.map_rgb(palette[i]), - surf.get_bitsize(), surf.get_flags())) - - # Exception checks - self.assertRaises(ValueError, pygame.surfarray.map_array, - self._make_surface(32), - self._make_array2d(uint8)) - - def test_pixels2d(self): - - sources = [self._make_surface(8), - self._make_surface(16, srcalpha=True), - self._make_surface(32, srcalpha=True)] - - for surf in sources: - self.assertFalse(surf.get_locked()) - arr = pygame.surfarray.pixels2d(surf) - self.assertTrue(surf.get_locked()) - self._fill_array2d(arr, surf) - surf.unlock() - self.assertTrue(surf.get_locked()) - del arr - self.assertFalse(surf.get_locked()) - self.assertEqual(surf.get_locks(), ()) - self._assert_surface(surf) - - # Error checks - self.assertRaises(ValueError, - pygame.surfarray.pixels2d, - self._make_surface(24)) - - def test_pixels3d(self): - - sources = [self._make_surface(24), - self._make_surface(32)] - - for surf in sources: - self.assertFalse(surf.get_locked()) - arr = pygame.surfarray.pixels3d(surf) - self.assertTrue(surf.get_locked()) - self._fill_array3d(arr) - surf.unlock() - self.assertTrue(surf.get_locked()) - del arr - self.assertFalse(surf.get_locked()) - self.assertEqual(surf.get_locks(), ()) - self._assert_surface(surf) - - # Alpha check - color = (1, 2, 3, 0) - surf = self._make_surface(32, srcalpha=True) - arr = pygame.surfarray.pixels3d(surf) - arr[0,0] = color[:3] - self.assertEqual(surf.get_at((0, 0)), color) - - # Error checks - def do_pixels3d(surf): - pygame.surfarray.pixels3d(surf) - - self.assertRaises(ValueError, - do_pixels3d, - self._make_surface(8)) - self.assertRaises(ValueError, - do_pixels3d, - self._make_surface(16)) - - def test_pixels_alpha(self): - - palette = [(0, 0, 0, 0), - (127, 127, 127, 0), - (127, 127, 127, 85), - (127, 127, 127, 170), - (127, 127, 127, 255)] - alphas = [0, 45, 86, 99, 180] - - surf = self._make_src_surface(32, srcalpha=True, palette=palette) - - self.assertFalse(surf.get_locked()) - arr = pygame.surfarray.pixels_alpha(surf) - self.assertTrue(surf.get_locked()) - surf.unlock() - self.assertTrue(surf.get_locked()) - - for (x, y), i in self.test_points: - self.assertEqual(arr[x, y], palette[i][3]) - - for (x, y), i in self.test_points: - alpha = alphas[i] - arr[x, y] = alpha - color = (127, 127, 127, alpha) - self.assertEqual(surf.get_at((x, y)), color, - "posn: (%i, %i)" % (x, y)) - - del arr - self.assertFalse(surf.get_locked()) - self.assertEqual(surf.get_locks(), ()) - - # Check exceptions. - def do_pixels_alpha(surf): - pygame.surfarray.pixels_alpha(surf) - - targets = [(8, False), - (16, False), - (16, True), - (24, False), - (32, False)] - - for bitsize, srcalpha in targets: - self.assertRaises(ValueError, do_pixels_alpha, - self._make_surface(bitsize, srcalpha)) - - def test_pixels_red(self): - self._test_pixels_rgb('red', 0) - - def test_pixels_green(self): - self._test_pixels_rgb('green', 1) - - def test_pixels_blue(self): - self._test_pixels_rgb('blue', 2) - - def _test_pixels_rgb(self, operation, mask_posn): - method_name = "pixels_" + operation - - pixels_rgb = getattr(pygame.surfarray, method_name) - palette = [(0, 0, 0, 255), - (5, 13, 23, 255), - (29, 31, 37, 255), - (131, 157, 167, 255), - (179, 191, 251, 255)] - plane = [c[mask_posn] for c in palette] - - surf24 = self._make_src_surface(24, srcalpha=False, palette=palette) - surf32 = self._make_src_surface(32, srcalpha=False, palette=palette) - surf32a = self._make_src_surface(32, srcalpha=True, palette=palette) - - for surf in [surf24, surf32, surf32a]: - self.assertFalse(surf.get_locked()) - arr = pixels_rgb(surf) - self.assertTrue(surf.get_locked()) - surf.unlock() - self.assertTrue(surf.get_locked()) - - for (x, y), i in self.test_points: - self.assertEqual(arr[x, y], plane[i]) - - del arr - self.assertFalse(surf.get_locked()) - self.assertEqual(surf.get_locks(), ()) - - # Check exceptions. - targets = [(8, False), - (16, False), - (16, True)] - - for bitsize, srcalpha in targets: - self.assertRaises(ValueError, pixels_rgb, - self._make_surface(bitsize, srcalpha)) - - def test_use_arraytype(self): - - def do_use_arraytype(atype): - pygame.surfarray.use_arraytype(atype) - - pygame.surfarray.use_arraytype('numpy') - self.assertEqual(pygame.surfarray.get_arraytype(), 'numpy') - self.assertRaises(ValueError, do_use_arraytype, 'not an option') - - def test_surf_lock (self): - sf = pygame.Surface ((5, 5), 0, 32) - for atype in pygame.surfarray.get_arraytypes (): - pygame.surfarray.use_arraytype (atype) - - ar = pygame.surfarray.pixels2d (sf) - self.assertTrue(sf.get_locked()) - - sf.unlock () - self.assertTrue(sf.get_locked()) - - del ar - self.assertFalse(sf.get_locked()) - self.assertEqual(sf.get_locks(), ()) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/surflock_test.py b/venv/lib/python3.7/site-packages/pygame/tests/surflock_test.py deleted file mode 100644 index 931e965..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/surflock_test.py +++ /dev/null @@ -1,143 +0,0 @@ -import unittest -import sys -import platform - -import pygame - -IS_PYPY = 'PyPy' == platform.python_implementation() - -@unittest.skipIf(IS_PYPY, 'pypy skip known failure') # TODO -class SurfaceLockTest(unittest.TestCase): - - def test_lock(self): - sf = pygame.Surface((5, 5)) - - sf.lock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (sf,)) - - sf.lock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (sf, sf)) - - sf.unlock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (sf,)) - - sf.unlock() - self.assertEqual(sf.get_locked(), False) - self.assertEqual(sf.get_locks(), ()) - - def test_subsurface_lock(self): - sf = pygame.Surface((5, 5)) - subsf = sf.subsurface((1, 1, 2, 2)) - sf2 = pygame.Surface((5, 5)) - - # Simple blits, nothing should happen here. - sf2.blit(subsf, (0, 0)) - sf2.blit(sf, (0, 0)) - - # Test blitting on self: - self.assertRaises(pygame.error, sf.blit, subsf, (0, 0)) - #self.assertRaises(pygame.error, subsf.blit, sf, (0, 0)) - # ^ Fails although it should not in my opinion. If I cannot - # blit the subsurface to the surface, it should not be allowed - # the other way around as well. - - # Test additional locks. - sf.lock() - sf2.blit(subsf, (0, 0)) - self.assertRaises(pygame.error, sf2.blit, sf, (0, 0)) - - subsf.lock() - self.assertRaises(pygame.error, sf2.blit, subsf, (0, 0)) - self.assertRaises(pygame.error, sf2.blit, sf, (0, 0)) - - # sf and subsf are now explicitly locked. Unlock sf, so we can - # (assume) to blit it. - # It will fail though as the subsurface still has a lock around, - # which is okay and correct behaviour. - sf.unlock() - self.assertRaises(pygame.error, sf2.blit, subsf, (0, 0)) - self.assertRaises(pygame.error, sf2.blit, sf, (0, 0)) - - # Run a second unlock on the surface. This should ideally have - # no effect as the subsurface is the locking reason! - sf.unlock() - self.assertRaises(pygame.error, sf2.blit, sf, (0, 0)) - self.assertRaises(pygame.error, sf2.blit, subsf, (0, 0)) - subsf.unlock() - - sf.lock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (sf,)) - self.assertEqual(subsf.get_locked(), False) - self.assertEqual(subsf.get_locks(), ()) - - subsf.lock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (sf, subsf)) - self.assertEqual(subsf.get_locked(), True) - self.assertEqual(subsf.get_locks(), (subsf,)) - - sf.unlock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (subsf,)) - self.assertEqual(subsf.get_locked(), True) - self.assertEqual(subsf.get_locks(), (subsf,)) - - subsf.unlock() - self.assertEqual(sf.get_locked(), False) - self.assertEqual(sf.get_locks(), ()) - self.assertEqual(subsf.get_locked(), False) - self.assertEqual(subsf.get_locks(), ()) - - subsf.lock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (subsf,)) - self.assertEqual(subsf.get_locked(), True) - self.assertEqual(subsf.get_locks(), (subsf,)) - - subsf.lock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (subsf, subsf)) - self.assertEqual(subsf.get_locked(), True) - self.assertEqual(subsf.get_locks(), (subsf, subsf)) - - def test_pxarray_ref(self): - sf = pygame.Surface((5, 5)) - ar = pygame.PixelArray(sf) - ar2 = pygame.PixelArray(sf) - - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (ar, ar2)) - - del ar - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (ar2,)) - - ar = ar2[:] - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (ar2,)) - - del ar - self.assertEqual(sf.get_locked(), True) - self.assertEqual(len(sf.get_locks()), 1) - - def test_buffer(self): - sf = pygame.Surface((5, 5)) - buf = sf.get_buffer() - - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (buf,)) - - sf.unlock() - self.assertEqual(sf.get_locked(), True) - self.assertEqual(sf.get_locks(), (buf,)) - - del buf - self.assertEqual(sf.get_locked(), False) - self.assertEqual(sf.get_locks(), ()) - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/sysfont_test.py b/venv/lib/python3.7/site-packages/pygame/tests/sysfont_test.py deleted file mode 100644 index 9bfe623..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/sysfont_test.py +++ /dev/null @@ -1,30 +0,0 @@ -import unittest -import platform - -class SysfontModuleTest(unittest.TestCase): - def todo_test_create_aliases(self): - self.fail() - - def todo_test_initsysfonts(self): - self.fail() - - @unittest.skipIf('Darwin' not in platform.platform(), 'Not mac we skip.') - def test_initsysfonts_darwin(self): - import pygame.sysfont - self.assertTrue(len(pygame.sysfont.get_fonts()) > 10) - - def test_sysfont(self): - import pygame.font - pygame.font.init() - arial = pygame.font.SysFont('Arial', 40) - - def todo_test_initsysfonts_unix(self): - self.fail() - - def todo_test_initsysfonts_win32(self): - self.fail() - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_test_.py b/venv/lib/python3.7/site-packages/pygame/tests/test_test_.py deleted file mode 100644 index 5708909..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_test_.py +++ /dev/null @@ -1,3 +0,0 @@ -while True: - pass - \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/__init__.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/__init__.py deleted file mode 100644 index 17d0cba..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/__init__.py +++ /dev/null @@ -1,209 +0,0 @@ -#################################### IMPORTS ################################### - -is_pygame_pkg = __name__.startswith('pygame.tests.') - -import tempfile, sys, pygame, time, os - -################################################################################ -# Python 3.x compatibility -try: - xrange_ = xrange -except NameError: - xrange_ = range - -try: - raw_input_ = raw_input -except NameError: - raw_input_ = input - -def geterror(): - return sys.exc_info()[1] - - -class AssertRaisesRegexMixin(object): - """Provides a way to prevent DeprecationWarnings in python >= 3.2. - - For this mixin to override correctly it needs to be before the - unittest.TestCase in the multiple inheritance hierarchy. - e.g. class TestClass(AssertRaisesRegexMixin, unittest.TestCase) - - This class/mixin and its usage can be removed when pygame no longer - supports python < 3.2. - """ - def assertRaisesRegex(self, *args, **kwargs): - try: - return super(AssertRaisesRegexMixin, self).assertRaisesRegex( - *args, **kwargs) - except AttributeError: - try: - return super(AssertRaisesRegexMixin, self).assertRaisesRegexp( - *args, **kwargs) - except AttributeError: - self.skipTest( - 'No assertRaisesRegex/assertRaisesRegexp method') - - -################################################################################ - -this_dir = os.path.dirname(os.path.abspath(__file__)) -trunk_dir = os.path.split(os.path.split(this_dir)[0])[0] -if is_pygame_pkg: - test_module = 'tests' -else: - test_module = 'test' - -def trunk_relative_path(relative): - return os.path.normpath(os.path.join(trunk_dir, relative)) - -def fixture_path(path): - return trunk_relative_path(os.path.join(test_module, 'fixtures', path)) - -def example_path(path): - return trunk_relative_path(os.path.join('examples', path)) - -sys.path.insert(0, trunk_relative_path('.')) - - -################################## TEMP FILES ################################## - -def get_tmp_dir(): - return tempfile.mkdtemp() - -################################################################################ - -def question(q): - return raw_input_('\n%s (y/n): ' % q.rstrip(' ')).lower().strip() == 'y' - -def prompt(p): - return raw_input_('\n%s (press enter to continue): ' % p.rstrip(' ')) - -#################################### HELPERS ################################### - -def rgba_between(value, minimum=0, maximum=255): - if value < minimum: return minimum - elif value > maximum: return maximum - else: return value - -def combinations(seqs): - """ - - Recipe 496807 from ActiveState Python CookBook - - Non recursive technique for getting all possible combinations of a sequence - of sequences. - - """ - - r=[[]] - for x in seqs: - r = [ i + [y] for y in x for i in r ] - return r - -def gradient(width, height): - """ - - Yields a pt and corresponding RGBA tuple, for every (width, height) combo. - Useful for generating gradients. - - Actual gradient may be changed, no tests rely on specific values. - - Used in transform.rotate lossless tests to generate a fixture. - - """ - - for l in xrange_(width): - for t in xrange_(height): - yield (l,t), tuple(map(rgba_between, (l, t, l, l+t))) - - -def rect_area_pts(rect): - for l in xrange_(rect.left, rect.right): - for t in xrange_(rect.top, rect.bottom): - yield l, t - -def rect_perimeter_pts(rect): - """ - - Returns pts ((L, T) tuples) encompassing the perimeter of a rect. - - The order is clockwise: - - topleft to topright - topright to bottomright - bottomright to bottomleft - bottomleft to topleft - - Duplicate pts are not returned - - """ - clock_wise_from_top_left = ( - [(l, rect.top) for l in xrange_(rect.left, rect.right) ], - [(rect.right -1, t) for t in xrange_(rect.top + 1, rect.bottom) ], - [(l, rect.bottom -1) for l in xrange_(rect.right -2, rect.left -1, -1)], - [(rect.left, t) for t in xrange_(rect.bottom -2, rect.top, -1)] - ) - - for line in clock_wise_from_top_left: - for pt in line: yield pt - -def rect_outer_bounds(rect): - """ - - Returns topleft outerbound if possible and then the other pts, that are - "exclusive" bounds of the rect - - ?------O - |RECT| ?|0)uterbound - |----| - O O - - """ - return ( - (rect.left is not 0 and [(rect.left-1, rect.top)] or []) + - [ rect.topright, - rect.bottomleft, - rect.bottomright] - ) - -def import_submodule(module): - m = __import__(module) - for n in module.split('.')[1:]: - m = getattr(m, n) - return m - - -class SurfaceSubclass(pygame.Surface): - """A subclassed Surface to test inheritance.""" - def __init__(self, *args, **kwargs): - super(SurfaceSubclass, self).__init__(*args, **kwargs) - self.test_attribute = True - - -def test(): - """ - - Lightweight test for helpers - - """ - - r = pygame.Rect(0, 0, 10, 10) - assert ( - rect_outer_bounds ( r ) == [(10, 0), # tr - ( 0, 10), # bl - (10, 10)] # br - ) - - assert len(list(rect_area_pts(r))) == 100 - - - r = pygame.Rect(0, 0, 3, 3) - assert list(rect_perimeter_pts(r)) == [ - (0, 0), (1, 0), (2, 0), # tl -> tr - (2, 1), (2, 2), # tr -> br - (1, 2), (0, 2), # br -> bl - (0, 1) # bl -> tl - ] - - print ('Tests: OK') - -################################################################################ diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/arrinter.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/arrinter.py deleted file mode 100644 index b5808f9..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/arrinter.py +++ /dev/null @@ -1,398 +0,0 @@ -import sys -import ctypes -from ctypes import * -import unittest - -__all__ = ['PAI_CONTIGUOUS', 'PAI_FORTRAN', 'PAI_ALIGNED', - 'PAI_NOTSWAPPED', 'PAI_WRITEABLE', 'PAI_ARR_HAS_DESCR', - 'ArrayInterface',] - -try: - c_ssize_t # Undefined in early Python versions -except NameError: - if sizeof(c_uint) == sizeof(c_void_p): - c_size_t = c_uint - c_ssize_t = c_int - elif sizeof(c_ulong) == sizeof(c_void_p): - c_size_t = c_ulong - c_ssize_t = c_long - elif sizeof(c_ulonglong) == sizeof(c_void_p): - c_size_t = c_ulonglong - c_ssize_t = c_longlong - - -SIZEOF_VOID_P = sizeof(c_void_p) -if SIZEOF_VOID_P <= sizeof(c_int): - Py_intptr_t = c_int -elif SIZEOF_VOID_P <= sizeof(c_long): - Py_intptr_t = c_long -elif 'c_longlong' in globals() and SIZEOF_VOID_P <= sizeof(c_longlong): - Py_intptr_t = c_longlong -else: - raise RuntimeError("Unrecognized pointer size %i" % (pointer_size,)) - -class PyArrayInterface(Structure): - _fields_ = [('two', c_int), ('nd', c_int), ('typekind', c_char), - ('itemsize', c_int), ('flags', c_int), - ('shape', POINTER(Py_intptr_t)), - ('strides', POINTER(Py_intptr_t)), - ('data', c_void_p), ('descr', py_object)] - -PAI_Ptr = POINTER(PyArrayInterface) -try: - PyCObject_AsVoidPtr = pythonapi.PyCObject_AsVoidPtr -except AttributeError: - def PyCObject_AsVoidPtr(o): - raise TypeError("Not available") -else: - PyCObject_AsVoidPtr.restype = c_void_p - PyCObject_AsVoidPtr.argtypes = [py_object] - PyCObject_GetDesc = pythonapi.PyCObject_GetDesc - PyCObject_GetDesc.restype = c_void_p - PyCObject_GetDesc.argtypes = [py_object] -try: - PyCapsule_IsValid = pythonapi.PyCapsule_IsValid -except AttributeError: - def PyCapsule_IsValid(capsule, name): - return 0 -else: - PyCapsule_IsValid.restype = c_int - PyCapsule_IsValid.argtypes = [py_object, c_char_p] - PyCapsule_GetPointer = pythonapi.PyCapsule_GetPointer - PyCapsule_GetPointer.restype = c_void_p - PyCapsule_GetPointer.argtypes = [py_object, c_char_p] - PyCapsule_GetContext = pythonapi.PyCapsule_GetContext - PyCapsule_GetContext.restype = c_void_p - PyCapsule_GetContext.argtypes = [py_object] - -if sys.version_info >= (3,): # Python3 - PyCapsule_Destructor = CFUNCTYPE(None, py_object) - PyCapsule_New = pythonapi.PyCapsule_New - PyCapsule_New.restype = py_object - PyCapsule_New.argtypes = [c_void_p, c_char_p, POINTER(PyCapsule_Destructor)] - def capsule_new(p): - return PyCapsule_New(addressof(p), None, None) -else: - PyCObject_Destructor = CFUNCTYPE(None, c_void_p) - PyCObject_FromVoidPtr = pythonapi.PyCObject_FromVoidPtr - PyCObject_FromVoidPtr.restype = py_object - PyCObject_FromVoidPtr.argtypes = [c_void_p, POINTER(PyCObject_Destructor)] - def capsule_new(p): - return PyCObject_FromVoidPtr(addressof(p), None) - -PAI_CONTIGUOUS = 0x01 -PAI_FORTRAN = 0x02 -PAI_ALIGNED = 0x100 -PAI_NOTSWAPPED = 0x200 -PAI_WRITEABLE = 0x400 -PAI_ARR_HAS_DESCR = 0x800 - -class ArrayInterface(object): - def __init__(self, arr): - try: - self._cobj = arr.__array_struct__ - except AttributeError: - raise TypeError("The array object lacks an array structure") - if not self._cobj: - raise TypeError("The array object has a NULL array structure value") - try: - vp = PyCObject_AsVoidPtr(self._cobj) - except TypeError: - if PyCapsule_IsValid(self._cobj, None): - vp = PyCapsule_GetPointer(self._cobj, None) - else: - raise TypeError("The array object has an invalid array structure") - self.desc = PyCapsule_GetContext(self._cobj) - else: - self.desc = PyCObject_GetDesc(self._cobj) - self._inter = cast(vp, PAI_Ptr)[0] - - def __getattr__(self, name): - if (name == 'typekind'): - return self._inter.typekind.decode('latin-1') - return getattr(self._inter, name) - - def __str__(self): - if isinstance(self.desc, tuple): - ver = self.desc[0] - else: - ver = "N/A" - return ("nd: %i\n" - "typekind: %s\n" - "itemsize: %i\n" - "flags: %s\n" - "shape: %s\n" - "strides: %s\n" - "ver: %s\n" % - (self.nd, self.typekind, self.itemsize, - format_flags(self.flags), - format_shape(self.nd, self.shape), - format_strides(self.nd, self.strides), ver)) - -def format_flags(flags): - names = [] - for flag, name in [(PAI_CONTIGUOUS, 'CONTIGUOUS'), - (PAI_FORTRAN, 'FORTRAN'), - (PAI_ALIGNED, 'ALIGNED'), - (PAI_NOTSWAPPED, 'NOTSWAPPED'), - (PAI_WRITEABLE, 'WRITEABLE'), - (PAI_ARR_HAS_DESCR, 'ARR_HAS_DESCR')]: - if flag & flags: - names.append(name) - return ', '.join(names) - -def format_shape(nd, shape): - return ', '.join([str(shape[i]) for i in range(nd)]) - -def format_strides(nd, strides): - return ', '.join([str(strides[i]) for i in range(nd)]) - -class Exporter(object): - def __init__(self, shape, - typekind=None, itemsize=None, strides=None, - descr=None, flags=None): - if typekind is None: - typekind = 'u' - if itemsize is None: - itemsize = 1 - if flags is None: - flags = PAI_WRITEABLE | PAI_ALIGNED | PAI_NOTSWAPPED - if descr is not None: - flags |= PAI_ARR_HAS_DESCR - if len(typekind) != 1: - raise ValueError("Argument 'typekind' must be length 1 string") - nd = len(shape) - self.typekind = typekind - self.itemsize = itemsize - self.nd = nd - self.shape = tuple(shape) - self._shape = (c_ssize_t * self.nd)(*self.shape) - if strides is None: - self._strides = (c_ssize_t * self.nd)() - self._strides[self.nd - 1] = self.itemsize - for i in range(self.nd - 1, 0, -1): - self._strides[i - 1] = self.shape[i] * self._strides[i] - strides = tuple(self._strides) - self.strides = strides - elif len(strides) == nd: - self.strides = tuple(strides) - self._strides = (c_ssize_t * self.nd)(*self.strides) - else: - raise ValueError("Mismatch in length of strides and shape") - self.descr = descr - if self.is_contiguous('C'): - flags |= PAI_CONTIGUOUS - if self.is_contiguous('F'): - flags |= PAI_FORTRAN - self.flags = flags - sz = max(shape[i] * strides[i] for i in range(nd)) - self._data = (c_ubyte * sz)() - self.data = addressof(self._data) - self._inter = PyArrayInterface(2, nd, typekind.encode('latin_1'), - itemsize, flags, self._shape, - self._strides, self.data, descr) - self.len = itemsize - for i in range(nd): - self.len *= self.shape[i] - - __array_struct__ = property(lambda self: capsule_new(self._inter)) - - def is_contiguous(self, fortran): - if fortran in "CA": - if self.strides[-1] == self.itemsize: - for i in range(self.nd - 1, 0, -1): - if self.strides[i - 1] != self.shape[i] * self.strides[i]: - break - else: - return True - if fortran in "FA": - if self.strides[0] == self.itemsize: - for i in range(0, self.nd - 1): - if self.strides[i + 1] != self.shape[i] * self.strides[i]: - break - else: - return True - return False - -class Array(Exporter): - _ctypes = {('u', 1): c_uint8, - ('u', 2): c_uint16, - ('u', 4): c_uint32, - ('u', 8): c_uint64, - ('i', 1): c_int8, - ('i', 2): c_int16, - ('i', 4): c_int32, - ('i', 8): c_int64} - - def __init__(self, *args, **kwds): - super(Array, self).__init__(*args, **kwds) - try: - if self.flags & PAI_NOTSWAPPED: - ct = self._ctypes[self.typekind, self.itemsize] - elif c_int.__ctype_le__ is c_int: - ct = self._ctypes[self.typekind, self.itemsize].__ctype_be__ - else: - ct = self._ctypes[self.typekind, self.itemsize].__ctype_le__ - except KeyError: - ct = c_uint8 * self.itemsize - self._ctype = ct - self._ctype_p = POINTER(ct) - def __getitem__(self, key): - return cast(self._addr_at(key), self._ctype_p)[0] - def __setitem__(self, key, value): - cast(self._addr_at(key), self._ctype_p)[0] = value - def _addr_at(self, key): - if not isinstance(key, tuple): - key = key, - if len(key) != self.nd: - raise ValueError("wrong number of indexes") - for i in range(self.nd): - if not (0 <= key[i] < self.shape[i]): - raise IndexError("index {} out of range".format(i)) - return self.data + sum(i * s for i, s in zip(key, self.strides)) - -class ExporterTest(unittest.TestCase): - def test_strides(self): - self.check_args(0, (10,), 'u', (2,), 20, 20, 2) - self.check_args(0, (5, 3), 'u', (6, 2), 30, 30, 2) - self.check_args(0, (7, 3, 5), 'u', (30, 10, 2), 210, 210, 2) - self.check_args(0, (13, 5, 11, 3), 'u', (330, 66, 6, 2), 4290, 4290, 2) - self.check_args(3, (7, 3, 5), 'i', (2, 14, 42), 210, 210, 2) - self.check_args(3, (7, 3, 5), 'x', (2, 16, 48), 210, 240, 2) - self.check_args(3, (13, 5, 11, 3), '%', (440, 88, 8, 2), 4290, 5720, 2) - self.check_args(3, (7, 5), '-', (15, 3), 105, 105, 3) - self.check_args(3, (7, 5), '*', (3, 21), 105, 105, 3) - self.check_args(3, (7, 5), ' ', (3, 24), 105, 120, 3) - - def test_is_contiguous(self): - a = Exporter((10,), itemsize=2) - self.assertTrue(a.is_contiguous('C')) - self.assertTrue(a.is_contiguous('F')) - self.assertTrue(a.is_contiguous('A')) - a = Exporter((10, 4), itemsize=2) - self.assertTrue(a.is_contiguous('C')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('F')) - a = Exporter((13, 5, 11, 3), itemsize=2, strides=(330, 66, 6, 2)) - self.assertTrue(a.is_contiguous('C')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('F')) - a = Exporter((10, 4), itemsize=2, strides=(2, 20)) - self.assertTrue(a.is_contiguous('F')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('C')) - a = Exporter((13, 5, 11, 3), itemsize=2, strides=(2, 26, 130, 1430)) - self.assertTrue(a.is_contiguous('F')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('C')) - a = Exporter((2, 11, 6, 4), itemsize=2, strides=(576, 48, 8, 2)) - self.assertFalse(a.is_contiguous('A')) - a = Exporter((2, 11, 6, 4), itemsize=2, strides=(2, 4, 48, 288)) - self.assertFalse(a.is_contiguous('A')) - a = Exporter((3, 2, 2), itemsize=2, strides=(16, 8, 4)) - self.assertFalse(a.is_contiguous('A')) - a = Exporter((3, 2, 2), itemsize=2, strides=(4, 12, 24)) - self.assertFalse(a.is_contiguous('A')) - - def check_args(self, call_flags, - shape, typekind, strides, length, bufsize, itemsize, - offset=0): - if call_flags & 1: - typekind_arg = typekind - else: - typekind_arg = None - if call_flags & 2: - strides_arg = strides - else: - strides_arg = None - a = Exporter(shape, itemsize=itemsize, strides=strides_arg) - self.assertEqual(sizeof(a._data), bufsize) - self.assertEqual(a.data, ctypes.addressof(a._data) + offset) - m = ArrayInterface(a) - self.assertEqual(m.data, a.data) - self.assertEqual(m.itemsize, itemsize) - self.assertEqual(tuple(m.shape[0:m.nd]), shape) - self.assertEqual(tuple(m.strides[0:m.nd]), strides) - -class ArrayTest(unittest.TestCase): - - def __init__(self, *args, **kwds): - unittest.TestCase.__init__(self, *args, **kwds) - self.a = Array((20, 15), 'i', 4) - - def setUp(self): - # Every test starts with a zeroed array. - memset(self.a.data, 0, sizeof(self.a._data)) - - def test__addr_at(self): - a = self.a - self.assertEqual(a._addr_at((0, 0)), a.data) - self.assertEqual(a._addr_at((0, 1)), a.data + 4) - self.assertEqual(a._addr_at((1, 0)), a.data + 60) - self.assertEqual(a._addr_at((1, 1)), a.data + 64) - - def test_indices(self): - a = self.a - self.assertEqual(a[0, 0], 0) - self.assertEqual(a[19, 0], 0) - self.assertEqual(a[0, 14], 0) - self.assertEqual(a[19, 14], 0) - self.assertEqual(a[5, 8], 0) - a[0, 0] = 12 - a[5, 8] = 99 - self.assertEqual(a[0, 0], 12) - self.assertEqual(a[5, 8], 99) - self.assertRaises(IndexError, a.__getitem__, (-1, 0)) - self.assertRaises(IndexError, a.__getitem__, (0, -1)) - self.assertRaises(IndexError, a.__getitem__, (20, 0)) - self.assertRaises(IndexError, a.__getitem__, (0, 15)) - self.assertRaises(ValueError, a.__getitem__, 0) - self.assertRaises(ValueError, a.__getitem__, (0, 0, 0)) - a = Array((3,), 'i', 4) - a[1] = 333 - self.assertEqual(a[1], 333) - - def test_typekind(self): - a = Array((1,), 'i', 4) - self.assertTrue(a._ctype is c_int32) - self.assertTrue(a._ctype_p is POINTER(c_int32)) - a = Array((1,), 'u', 4) - self.assertTrue(a._ctype is c_uint32) - self.assertTrue(a._ctype_p is POINTER(c_uint32)) - a = Array((1,), 'f', 4) # float types unsupported: size system dependent - ct = a._ctype - self.assertTrue(issubclass(ct, ctypes.Array)) - self.assertEqual(sizeof(ct), 4) - - def test_itemsize(self): - for size in [1, 2, 4, 8]: - a = Array((1,), 'i', size) - ct = a._ctype - self.assertTrue(issubclass(ct, ctypes._SimpleCData)) - self.assertEqual(sizeof(ct), size) - - def test_oddball_itemsize(self): - for size in [3, 5, 6, 7, 9]: - a = Array((1,), 'i', size) - ct = a._ctype - self.assertTrue(issubclass(ct, ctypes.Array)) - self.assertEqual(sizeof(ct), size) - - def test_byteswapped(self): - a = Array((1,), 'u', 4, flags=(PAI_ALIGNED | PAI_WRITEABLE)) - ct = a._ctype - self.assertTrue(ct is not c_uint32) - if sys.byteorder == 'little': - self.assertTrue(ct is c_uint32.__ctype_be__) - else: - self.assertTrue(ct is c_uint32.__ctype_le__) - i = 0xa0b0c0d - n = c_uint32(i) - a[0] = i - self.assertEqual(a[0], i) - self.assertEqual(a._data[0:4], - cast(addressof(n), POINTER(c_uint8))[3:-1:-1]) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/async_sub.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/async_sub.py deleted file mode 100644 index 53d6483..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/async_sub.py +++ /dev/null @@ -1,272 +0,0 @@ -################################################################################ -""" - -Modification of http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554 - -""" - -#################################### IMPORTS ################################### - -import os -import platform -import subprocess -import errno -import time -import sys -import unittest -import tempfile - -def geterror (): - return sys.exc_info()[1] - -if sys.version_info >= (3,): - null_byte = '\x00'.encode('ascii') -else: - null_byte = '\x00' - -if platform.system() == 'Windows': - if sys.version_info >= (3,): - # Test date should be in ascii. - def encode(s): - return s.encode('ascii') - - def decode(b): - return b.decode('ascii') - else: - # Strings only; do nothing - def encode(s): - return s - - def decode(b): - return b - - try: - import ctypes - from ctypes.wintypes import DWORD - kernel32 = ctypes.windll.kernel32 - TerminateProcess = ctypes.windll.kernel32.TerminateProcess - def WriteFile(handle, data, ol = None): - c_written = DWORD() - success = ctypes.windll.kernel32.WriteFile(handle, ctypes.create_string_buffer(encode(data)), len(data), ctypes.byref(c_written), ol) - return ctypes.windll.kernel32.GetLastError(), c_written.value - def ReadFile(handle, desired_bytes, ol = None): - c_read = DWORD() - buffer = ctypes.create_string_buffer(desired_bytes+1) - success = ctypes.windll.kernel32.ReadFile(handle, buffer, desired_bytes, ctypes.byref(c_read), ol) - buffer[c_read.value] = null_byte - return ctypes.windll.kernel32.GetLastError(), decode(buffer.value) - def PeekNamedPipe(handle, desired_bytes): - c_avail = DWORD() - c_message = DWORD() - if desired_bytes > 0: - c_read = DWORD() - buffer = ctypes.create_string_buffer(desired_bytes+1) - success = ctypes.windll.kernel32.PeekNamedPipe(handle, buffer, desired_bytes, ctypes.byref(c_read), ctypes.byref(c_avail), ctypes.byref(c_message)) - buffer[c_read.value] = null_byte - return decode(buffer.value), c_avail.value, c_message.value - else: - success = ctypes.windll.kernel32.PeekNamedPipe(handle, None, desired_bytes, None, ctypes.byref(c_avail), ctypes.byref(c_message)) - return "", c_avail.value, c_message.value - - except ImportError: - from win32file import ReadFile, WriteFile - from win32pipe import PeekNamedPipe - from win32api import TerminateProcess - import msvcrt - -else: - from signal import SIGINT, SIGTERM, SIGKILL - import select - import fcntl - -################################### CONSTANTS ################################## - -PIPE = subprocess.PIPE - -################################################################################ - -class Popen(subprocess.Popen): - def recv(self, maxsize=None): - return self._recv('stdout', maxsize) - - def recv_err(self, maxsize=None): - return self._recv('stderr', maxsize) - - def send_recv(self, input='', maxsize=None): - return self.send(input), self.recv(maxsize), self.recv_err(maxsize) - - def read_async(self, wait=.1, e=1, tr=5, stderr=0): - if tr < 1: - tr = 1 - x = time.time()+ wait - y = [] - r = '' - pr = self.recv - if stderr: - pr = self.recv_err - while time.time() < x or r: - r = pr() - if r is None: - if e: - raise Exception("Other end disconnected!") - else: - break - elif r: - y.append(r) - else: - time.sleep(max((x-time.time())/tr, 0)) - return ''.join(y) - - def send_all(self, data): - while len(data): - sent = self.send(data) - if sent is None: - raise Exception("Other end disconnected!") - data = buffer(data, sent) - - def get_conn_maxsize(self, which, maxsize): - if maxsize is None: - maxsize = 1024 - elif maxsize < 1: - maxsize = 1 - return getattr(self, which), maxsize - - def _close(self, which): - getattr(self, which).close() - setattr(self, which, None) - - if platform.system() == 'Windows': - def kill(self): - # Recipes - #http://me.in-berlin.de/doc/python/faq/windows.html#how-do-i-emulate-os-kill-in-windows - #http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/347462 - - """kill function for Win32""" - TerminateProcess(int(self._handle), 0) # returns None - - def send(self, input): - if not self.stdin: - return None - - try: - x = msvcrt.get_osfhandle(self.stdin.fileno()) - (errCode, written) = WriteFile(x, input) - except ValueError: - return self._close('stdin') - except (subprocess.pywintypes.error, Exception): - if geterror()[0] in (109, errno.ESHUTDOWN): - return self._close('stdin') - raise - - return written - - def _recv(self, which, maxsize): - conn, maxsize = self.get_conn_maxsize(which, maxsize) - if conn is None: - return None - - try: - x = msvcrt.get_osfhandle(conn.fileno()) - (read, nAvail, nMessage) = PeekNamedPipe(x, 0) - if maxsize < nAvail: - nAvail = maxsize - if nAvail > 0: - (errCode, read) = ReadFile(x, nAvail, None) - except ValueError: - return self._close(which) - except (subprocess.pywintypes.error, Exception): - if geterror()[0] in (109, errno.ESHUTDOWN): - return self._close(which) - raise - - if self.universal_newlines: - # Translate newlines. For Python 3.x assume read is text. - # If bytes then another solution is needed. - read = read.replace("\r\n", "\n").replace("\r", "\n") - return read - - else: - def kill(self): - for i, sig in enumerate([SIGTERM, SIGKILL] * 2): - if i % 2 == 0: os.kill(self.pid, sig) - time.sleep((i * (i % 2) / 5.0) + 0.01) - - killed_pid, stat = os.waitpid(self.pid, os.WNOHANG) - if killed_pid != 0: return - - def send(self, input): - if not self.stdin: - return None - - if not select.select([], [self.stdin], [], 0)[1]: - return 0 - - try: - written = os.write(self.stdin.fileno(), input) - except OSError: - if geterror()[0] == errno.EPIPE: #broken pipe - return self._close('stdin') - raise - - return written - - def _recv(self, which, maxsize): - conn, maxsize = self.get_conn_maxsize(which, maxsize) - if conn is None: - return None - - if not select.select([conn], [], [], 0)[0]: - return '' - - r = conn.read(maxsize) - if not r: - return self._close(which) - - if self.universal_newlines: - r = r.replace("\r\n", "\n").replace("\r", "\n") - return r - - -################################################################################ - -def proc_in_time_or_kill(cmd, time_out, wd = None, env = None): - proc = Popen ( - cmd, cwd = wd, env = env, - stdin = subprocess.PIPE, stdout = subprocess.PIPE, - stderr = subprocess.STDOUT, universal_newlines = 1 - ) - - ret_code = None - response = [] - - t = time.time() - while ret_code is None and ((time.time() -t) < time_out): - ret_code = proc.poll() - response += [proc.read_async(wait=0.1, e=0)] - - if ret_code is None: - ret_code = '"Process timed out (time_out = %s secs) ' % time_out - try: - proc.kill() - ret_code += 'and was successfully terminated"' - except Exception: - ret_code += ('and termination failed (exception: %s)"' % - (geterror(),)) - - return ret_code, ''.join(response) - -################################################################################ - -class AsyncTest(unittest.TestCase): - def test_proc_in_time_or_kill(self): - ret_code, response = proc_in_time_or_kill( - [sys.executable, '-c', 'while 1: pass'], time_out = 1 - ) - - self.assertIn('rocess timed out', ret_code) - self.assertIn('successfully terminated', ret_code) - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/buftools.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/buftools.py deleted file mode 100644 index 1e2ab93..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/buftools.py +++ /dev/null @@ -1,555 +0,0 @@ -"""Module pygame.tests.test_utils.array - -Export the Exporter and Importer classes. - -Class Exporter has configurable shape and strides. Exporter objects -provide a convient target for unit tests on Pygame objects and functions that -import a new buffer interface. - -Class Importer imports a buffer interface with the given PyBUF_* flags. -It returns NULL Py_buffer fields as None. The shape, strides, and suboffsets -arrays are returned as tuples of ints. All Py_buffer field properties are -read-only. This class is useful in comparing exported buffer interfaces -with the actual request. The simular Python builtin memoryview currently -does not support configurable PyBUF_* flags. - -This module contains its own unit tests. When Pygame is installed, these tests -can be run with the following command line statement: - -python -m pygame.tests.test_utils.array - -""" -import pygame -if not pygame.HAVE_NEWBUF: - emsg = "This Pygame build does not support the new buffer protocol" - raise ImportError(emsg) -import pygame.newbuffer -from pygame.newbuffer import (PyBUF_SIMPLE, PyBUF_FORMAT, PyBUF_ND, - PyBUF_WRITABLE, PyBUF_STRIDES, PyBUF_C_CONTIGUOUS, - PyBUF_F_CONTIGUOUS, PyBUF_ANY_CONTIGUOUS, - PyBUF_INDIRECT, PyBUF_STRIDED, PyBUF_STRIDED_RO, - PyBUF_RECORDS, PyBUF_RECORDS_RO, PyBUF_FULL, - PyBUF_FULL_RO, PyBUF_CONTIG, PyBUF_CONTIG_RO) - -import unittest -import sys -import ctypes -import operator -try: - reduce -except NameError: - from functools import reduce - -__all__ = ["Exporter", "Importer"] - -try: - ctypes.c_ssize_t -except AttributeError: - void_p_sz = ctypes.sizeof(ctypes.c_void_p) - if ctypes.sizeof(ctypes.c_short) == void_p_sz: - ctypes.c_ssize_t = ctypes.c_short - elif ctypes.sizeof(ctypes.c_int) == void_p_sz: - ctypes.c_ssize_t = ctypes.c_int - elif ctypes.sizeof(ctypes.c_long) == void_p_sz: - ctypes.c_ssize_t = ctypes.c_long - elif ctypes.sizeof(ctypes.c_longlong) == void_p_sz: - ctypes.c_ssize_t = ctypes.c_longlong - else: - raise RuntimeError("Cannot set c_ssize_t: sizeof(void *) is %i" % - void_p_sz) - -def _prop_get(fn): - return property(fn) - -class Exporter(pygame.newbuffer.BufferMixin): - """An object that exports a multi-dimension new buffer interface - - The only array operation this type supports is to export a buffer. - """ - prefixes = {'@': '', '=': '=', '<': '=', '>': '=', '!': '=', - '2': '2', '3': '3', '4': '4', '5': '5', - '6': '6', '7': '7', '8': '8', '9': '9'} - types = {'c': ctypes.c_char, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, - '=c': ctypes.c_int8, '=b': ctypes.c_int8, '=B': ctypes.c_uint8, - '?': ctypes.c_bool, '=?': ctypes.c_int8, - 'h': ctypes.c_short, 'H': ctypes.c_ushort, - '=h': ctypes.c_int16, '=H': ctypes.c_uint16, - 'i': ctypes.c_int, 'I': ctypes.c_uint, - '=i': ctypes.c_int32, '=I': ctypes.c_uint32, - 'l': ctypes.c_long, 'L': ctypes.c_ulong, - '=l': ctypes.c_int32, '=L': ctypes.c_uint32, - 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, - '=q': ctypes.c_int64, '=Q': ctypes.c_uint64, - 'f': ctypes.c_float, 'd': ctypes.c_double, - 'P': ctypes.c_void_p, - 'x': ctypes.c_ubyte * 1, - '2x': ctypes.c_ubyte * 2, - '3x': ctypes.c_ubyte * 3, - '4x': ctypes.c_ubyte * 4, - '5x': ctypes.c_ubyte * 5, - '6x': ctypes.c_ubyte * 6, - '7x': ctypes.c_ubyte * 7, - '8x': ctypes.c_ubyte * 8, - '9x': ctypes.c_ubyte * 9} - - def __init__(self, - shape, - format=None, - strides=None, - readonly=None, - itemsize=None): - if format is None: - format = 'B' - if readonly is None: - readonly = False - prefix = '' - typecode = '' - i = 0 - if i < len(format): - try: - prefix = self.prefixes[format[i]] - i += 1 - except LookupError: - pass - if i < len(format) and format[i] == '1': - i += 1 - if i == len(format) - 1: - typecode = format[i] - if itemsize is None: - try: - itemsize = ctypes.sizeof(self.types[prefix + typecode]) - except KeyError: - raise ValueError("Unknown item format '" + format + "'") - self.readonly = bool(readonly) - self.format = format - self._format = ctypes.create_string_buffer(format.encode('latin_1')) - self.ndim = len(shape) - self.itemsize = itemsize - self.len = reduce(operator.mul, shape, 1) * self.itemsize - self.shape = tuple(shape) - self._shape = (ctypes.c_ssize_t * self.ndim)(*self.shape) - if strides is None: - self._strides = (ctypes.c_ssize_t * self.ndim)() - self._strides[self.ndim - 1] = itemsize - for i in range(self.ndim - 1, 0, -1): - self._strides[i - 1] = self.shape[i] * self._strides[i] - self.strides = tuple(self._strides) - elif len(strides) == self.ndim: - self.strides = tuple(strides) - self._strides = (ctypes.c_ssize_t * self.ndim)(*self.strides) - else: - raise ValueError("Mismatch in length of strides and shape") - buflen = max(d * abs(s) for d, s in zip(self.shape, self.strides)) - self.buflen = buflen - self._buf = (ctypes.c_ubyte * buflen)() - offset = sum((d - 1) * abs(s) - for d, s in zip(self.shape, self.strides) if s < 0) - self.buf = ctypes.addressof(self._buf) + offset - - def buffer_info(self): - return (addressof(self.buffer), self.shape[0]) - - def tobytes(self): - return cast(self.buffer, POINTER(c_char))[0:self._len] - - def __len__(self): - return self.shape[0] - - def _get_buffer(self, view, flags): - from ctypes import addressof - if (flags & PyBUF_WRITABLE) == PyBUF_WRITABLE and self.readonly: - raise BufferError("buffer is read-only") - if ((flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS and - not self.is_contiguous('C')): - raise BufferError("data is not C contiguous") - if ((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS and - not self.is_contiguous('F')): - raise BufferError("data is not F contiguous") - if ((flags & PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS and - not self.is_contiguous('A')): - raise BufferError("data is not contiguous") - view.buf = self.buf - view.readonly = self.readonly - view.len = self.len - if flags | PyBUF_WRITABLE == PyBUF_WRITABLE: - view.ndim = 0 - else: - view.ndim = self.ndim - view.itemsize = self.itemsize - if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: - view.format = addressof(self._format) - else: - view.format = None - if (flags & PyBUF_ND) == PyBUF_ND: - view.shape = addressof(self._shape) - elif self.is_contiguous('C'): - view.shape = None - else: - raise BufferError( - "shape required for {} dimensional data".format(self.ndim)) - if (flags & PyBUF_STRIDES) == PyBUF_STRIDES: - view.strides = ctypes.addressof(self._strides) - elif view.shape is None or self.is_contiguous('C'): - view.strides = None - else: - raise BufferError("strides required for none C contiguous data") - view.suboffsets = None - view.internal = None - view.obj = self - - def is_contiguous(self, fortran): - if fortran in "CA": - if self.strides[-1] == self.itemsize: - for i in range(self.ndim - 1, 0, -1): - if self.strides[i - 1] != self.shape[i] * self.strides[i]: - break - else: - return True - if fortran in "FA": - if self.strides[0] == self.itemsize: - for i in range(0, self.ndim - 1): - if self.strides[i + 1] != self.shape[i] * self.strides[i]: - break - else: - return True - return False - - -class Importer(object): - """An object that imports a new buffer interface - - The fields of the Py_buffer C struct are exposed by identically - named Importer read-only properties. - """ - def __init__(self, obj, flags): - self._view = pygame.newbuffer.Py_buffer() - self._view.get_buffer(obj, flags) - @property - def obj(self): - """return object or None for NULL field""" - return self._view.obj - @property - def buf(self): - """return int or None for NULL field""" - return self._view.buf - @property - def len(self): - """return int""" - return self._view.len - @property - def readonly(self): - """return bool""" - return self._view.readonly - @property - def format(self): - """return bytes or None for NULL field""" - format_addr = self._view.format - if format_addr is None: - return None - return ctypes.cast(format_addr, ctypes.c_char_p).value.decode('ascii') - @property - def itemsize(self): - """return int""" - return self._view.itemsize - @property - def ndim(self): - """return int""" - return self._view.ndim - @property - def shape(self): - """return int tuple or None for NULL field""" - return self._to_ssize_tuple(self._view.shape) - @property - def strides(self): - """return int tuple or None for NULL field""" - return self._to_ssize_tuple(self._view.strides) - @property - def suboffsets(self): - """return int tuple or None for NULL field""" - return self._to_ssize_tuple(self._view.suboffsets) - @property - def internal(self): - """return int or None for NULL field""" - return self._view.internal - - def _to_ssize_tuple(self, addr): - from ctypes import cast, POINTER, c_ssize_t - - if addr is None: - return None - return tuple(cast(addr, POINTER(c_ssize_t))[0:self._view.ndim]) - - -class ExporterTest(unittest.TestCase): - """Class Exporter unit tests""" - def test_formats(self): - char_sz = ctypes.sizeof(ctypes.c_char) - short_sz = ctypes.sizeof(ctypes.c_short) - int_sz = ctypes.sizeof(ctypes.c_int) - long_sz = ctypes.sizeof(ctypes.c_long) - longlong_sz = ctypes.sizeof(ctypes.c_longlong) - float_sz = ctypes.sizeof(ctypes.c_float) - double_sz = ctypes.sizeof(ctypes.c_double) - voidp_sz = ctypes.sizeof(ctypes.c_void_p) - bool_sz = ctypes.sizeof(ctypes.c_bool) - - self.check_args(0, (1,), 'B', (1,), 1, 1, 1) - self.check_args(1, (1,), 'b', (1,), 1, 1, 1) - self.check_args(1, (1,), 'B', (1,), 1, 1, 1) - self.check_args(1, (1,), 'c', (char_sz,), char_sz, char_sz, char_sz) - self.check_args(1, (1,), 'h', (short_sz,), short_sz, short_sz, short_sz) - self.check_args(1, (1,), 'H', (short_sz,), short_sz, short_sz, short_sz) - self.check_args(1, (1,), 'i', (int_sz,), int_sz, int_sz, int_sz) - self.check_args(1, (1,), 'I', (int_sz,), int_sz, int_sz, int_sz) - self.check_args(1, (1,), 'l', (long_sz,), long_sz, long_sz, long_sz) - self.check_args(1, (1,), 'L', (long_sz,), long_sz, long_sz, long_sz) - self.check_args(1, (1,), 'q', (longlong_sz,), - longlong_sz, longlong_sz, longlong_sz) - self.check_args(1, (1,), 'Q', (longlong_sz,), - longlong_sz, longlong_sz, longlong_sz) - self.check_args(1, (1,), 'f', (float_sz,), float_sz, float_sz, float_sz) - self.check_args(1, (1,), 'd', (double_sz,), - double_sz, double_sz, double_sz) - self.check_args(1, (1,), 'x', (1,), 1, 1, 1) - self.check_args(1, (1,), 'P', (voidp_sz,), voidp_sz, voidp_sz, voidp_sz) - self.check_args(1, (1,), '?', (bool_sz,), bool_sz, bool_sz, bool_sz) - self.check_args(1, (1,), '@b', (1,), 1, 1, 1) - self.check_args(1, (1,), '@B', (1,), 1, 1, 1) - self.check_args(1, (1,), '@c', (char_sz,), char_sz, char_sz, char_sz) - self.check_args(1, (1,), '@h', (short_sz,), - short_sz, short_sz, short_sz) - self.check_args(1, (1,), '@H', (short_sz,), - short_sz, short_sz, short_sz) - self.check_args(1, (1,), '@i', (int_sz,), int_sz, int_sz, int_sz) - self.check_args(1, (1,), '@I', (int_sz,), int_sz, int_sz, int_sz) - self.check_args(1, (1,), '@l', (long_sz,), long_sz, long_sz, long_sz) - self.check_args(1, (1,), '@L', (long_sz,), long_sz, long_sz, long_sz) - self.check_args(1, (1,), '@q', - (longlong_sz,), longlong_sz, longlong_sz, longlong_sz) - self.check_args(1, (1,), '@Q', (longlong_sz,), - longlong_sz, longlong_sz, longlong_sz) - self.check_args(1, (1,), '@f', (float_sz,), - float_sz, float_sz, float_sz) - self.check_args(1, (1,), '@d', (double_sz,), - double_sz, double_sz, double_sz) - self.check_args(1, (1,), '@?', (bool_sz,), bool_sz, bool_sz, bool_sz) - self.check_args(1, (1,), '=b', (1,), 1, 1, 1) - self.check_args(1, (1,), '=B', (1,), 1, 1, 1) - self.check_args(1, (1,), '=c', (1,), 1, 1, 1) - self.check_args(1, (1,), '=h', (2,), 2, 2, 2) - self.check_args(1, (1,), '=H', (2,), 2, 2, 2) - self.check_args(1, (1,), '=i', (4,), 4, 4, 4) - self.check_args(1, (1,), '=I', (4,), 4, 4, 4) - self.check_args(1, (1,), '=l', (4,), 4, 4, 4) - self.check_args(1, (1,), '=L', (4,), 4, 4, 4) - self.check_args(1, (1,), '=q', (8,), 8, 8, 8) - self.check_args(1, (1,), '=Q', (8,), 8, 8, 8) - self.check_args(1, (1,), '=?', (1,), 1, 1, 1) - self.check_args(1, (1,), 'h', (2,), 2, 2, 2) - self.check_args(1, (1,), '!h', (2,), 2, 2, 2) - self.check_args(1, (1,), 'q', (8,), 8, 8, 8) - self.check_args(1, (1,), '!q', (8,), 8, 8, 8) - self.check_args(1, (1,), '1x', (1,), 1, 1, 1) - self.check_args(1, (1,), '2x', (2,), 2, 2, 2) - self.check_args(1, (1,), '3x', (3,), 3, 3, 3) - self.check_args(1, (1,), '4x', (4,), 4, 4, 4) - self.check_args(1, (1,), '5x', (5,), 5, 5, 5) - self.check_args(1, (1,), '6x', (6,), 6, 6, 6) - self.check_args(1, (1,), '7x', (7,), 7, 7, 7) - self.check_args(1, (1,), '8x', (8,), 8, 8, 8) - self.check_args(1, (1,), '9x', (9,), 9, 9, 9) - self.check_args(1, (1,), '1h', (2,), 2, 2, 2) - self.check_args(1, (1,), '=1h', (2,), 2, 2, 2) - self.assertRaises(ValueError, Exporter, (2, 1), '') - self.assertRaises(ValueError, Exporter, (2, 1), 'W') - self.assertRaises(ValueError, Exporter, (2, 1), '^Q') - self.assertRaises(ValueError, Exporter, (2, 1), '=W') - self.assertRaises(ValueError, Exporter, (2, 1), '=f') - self.assertRaises(ValueError, Exporter, (2, 1), '=d') - self.assertRaises(ValueError, Exporter, (2, 1), 'f') - self.assertRaises(ValueError, Exporter, (2, 1), '>d') - self.assertRaises(ValueError, Exporter, (2, 1), '!f') - self.assertRaises(ValueError, Exporter, (2, 1), '!d') - self.assertRaises(ValueError, Exporter, (2, 1), '0x') - self.assertRaises(ValueError, Exporter, (2, 1), '11x') - self.assertRaises(ValueError, Exporter, (2, 1), 'BB') - - def test_strides(self): - self.check_args(1, (10,), '=h', (2,), 20, 20, 2) - self.check_args(1, (5, 3), '=h', (6, 2), 30, 30, 2) - self.check_args(1, (7, 3, 5), '=h', (30, 10, 2), 210, 210, 2) - self.check_args(1, (13, 5, 11, 3), '=h', (330, 66, 6, 2), 4290, 4290, 2) - self.check_args(3, (7, 3, 5), '=h', (2, 14, 42), 210, 210, 2) - self.check_args(3, (7, 3, 5), '=h', (2, 16, 48), 210, 240, 2) - self.check_args(3, (13, 5, 11, 3), '=h', (440, 88, 8, 2), 4290, 5720, 2) - self.check_args(3, (7, 5), '3x', (15, 3), 105, 105, 3) - self.check_args(3, (7, 5), '3x', (3, 21), 105, 105, 3) - self.check_args(3, (7, 5), '3x', (3, 24), 105, 120, 3) - - def test_readonly(self): - a = Exporter((2,), 'h', readonly=True) - self.assertTrue(a.readonly) - b = Importer(a, PyBUF_STRIDED_RO) - self.assertRaises(BufferError, Importer, a, PyBUF_STRIDED) - b = Importer(a, PyBUF_STRIDED_RO) - - def test_is_contiguous(self): - a = Exporter((10,), '=h') - self.assertTrue(a.is_contiguous('C')) - self.assertTrue(a.is_contiguous('F')) - self.assertTrue(a.is_contiguous('A')) - a = Exporter((10, 4), '=h') - self.assertTrue(a.is_contiguous('C')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('F')) - a = Exporter((13, 5, 11, 3), '=h', (330, 66, 6, 2)) - self.assertTrue(a.is_contiguous('C')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('F')) - a = Exporter((10, 4), '=h', (2, 20)) - self.assertTrue(a.is_contiguous('F')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('C')) - a = Exporter((13, 5, 11, 3), '=h', (2, 26, 130, 1430)) - self.assertTrue(a.is_contiguous('F')) - self.assertTrue(a.is_contiguous('A')) - self.assertFalse(a.is_contiguous('C')) - a = Exporter((2, 11, 6, 4), '=h', (576, 48, 8, 2)) - self.assertFalse(a.is_contiguous('A')) - a = Exporter((2, 11, 6, 4), '=h', (2, 4, 48, 288)) - self.assertFalse(a.is_contiguous('A')) - a = Exporter((3, 2, 2), '=h', (16, 8, 4)) - self.assertFalse(a.is_contiguous('A')) - a = Exporter((3, 2, 2), '=h', (4, 12, 24)) - self.assertFalse(a.is_contiguous('A')) - - def test_PyBUF_flags(self): - a = Exporter((10, 2), 'd') - b = Importer(a, PyBUF_SIMPLE) - self.assertTrue(b.obj is a) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.len) - self.assertEqual(b.itemsize, a.itemsize) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertTrue(b.internal is None) - self.assertFalse(b.readonly) - b = Importer(a, PyBUF_WRITABLE) - self.assertTrue(b.obj is a) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.len) - self.assertEqual(b.itemsize, a.itemsize) - self.assertTrue(b.shape is None) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertTrue(b.internal is None) - self.assertFalse(b.readonly) - b = Importer(a, PyBUF_ND) - self.assertTrue(b.obj is a) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.len) - self.assertEqual(b.itemsize, a.itemsize) - self.assertEqual(b.shape, a.shape) - self.assertTrue(b.strides is None) - self.assertTrue(b.suboffsets is None) - self.assertTrue(b.internal is None) - self.assertFalse(b.readonly) - a = Exporter((5, 10), '=h', (24, 2)) - b = Importer(a, PyBUF_STRIDES) - self.assertTrue(b.obj is a) - self.assertTrue(b.format is None) - self.assertEqual(b.len, a.len) - self.assertEqual(b.itemsize, a.itemsize) - self.assertEqual(b.shape, a.shape) - self.assertEqual(b.strides, a.strides) - self.assertTrue(b.suboffsets is None) - self.assertTrue(b.internal is None) - self.assertFalse(b.readonly) - b = Importer(a, PyBUF_FULL) - self.assertTrue(b.obj is a) - self.assertEqual(b.format, '=h') - self.assertEqual(b.len, a.len) - self.assertEqual(b.itemsize, a.itemsize) - self.assertEqual(b.shape, a.shape) - self.assertEqual(b.strides, a.strides) - self.assertTrue(b.suboffsets is None) - self.assertTrue(b.internal is None) - self.assertFalse(b.readonly) - self.assertRaises(BufferError, Importer, a, PyBUF_SIMPLE) - self.assertRaises(BufferError, Importer, a, PyBUF_WRITABLE) - self.assertRaises(BufferError, Importer, a, PyBUF_ND) - self.assertRaises(BufferError, Importer, a, PyBUF_C_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, PyBUF_F_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, PyBUF_ANY_CONTIGUOUS) - self.assertRaises(BufferError, Importer, a, PyBUF_CONTIG) - - def test_negative_strides(self): - self.check_args(3, (3, 5, 4), 'B', (20, 4, -1), 60, 60, 1, 3) - self.check_args(3, (3, 5, 3), 'B', (20, 4, -1), 45, 60, 1, 2) - self.check_args(3, (3, 5, 4), 'B', (20, -4, 1), 60, 60, 1, 16) - self.check_args(3, (3, 5, 4), 'B', (-20, -4, -1), 60, 60, 1, 59) - self.check_args(3, (3, 5, 3), 'B', (-20, -4, -1), 45, 60, 1, 58) - - def test_attributes(self): - a = Exporter((13, 5, 11, 3), '=h', (440, 88, 8, 2)) - self.assertEqual(a.ndim, 4) - self.assertEqual(a.itemsize, 2) - self.assertFalse(a.readonly) - self.assertEqual(a.shape, (13, 5, 11, 3)) - self.assertEqual(a.format, '=h') - self.assertEqual(a.strides, (440, 88, 8, 2)) - self.assertEqual(a.len, 4290) - self.assertEqual(a.buflen, 5720) - self.assertEqual(a.buf, ctypes.addressof(a._buf)) - a = Exporter((8,)) - self.assertEqual(a.ndim, 1) - self.assertEqual(a.itemsize, 1) - self.assertFalse(a.readonly) - self.assertEqual(a.shape, (8,)) - self.assertEqual(a.format, 'B') - self.assertTrue(isinstance(a.strides, tuple)) - self.assertEqual(a.strides, (1,)) - self.assertEqual(a.len, 8) - self.assertEqual(a.buflen, 8) - a = Exporter([13, 5, 11, 3], '=h', [440, 88, 8, 2]) - self.assertTrue(isinstance(a.shape, tuple)) - self.assertTrue(isinstance(a.strides, tuple)) - self.assertEqual(a.shape, (13, 5, 11, 3)) - self.assertEqual(a.strides, (440, 88, 8, 2)) - - def test_itemsize(self): - exp = Exporter((4, 5), format='B', itemsize=8) - imp = Importer(exp, PyBUF_RECORDS) - self.assertEqual(imp.itemsize, 8) - self.assertEqual(imp.format, 'B') - self.assertEqual(imp.strides, (40, 8)) - exp = Exporter((4, 5), format='weird', itemsize=5) - imp = Importer(exp, PyBUF_RECORDS) - self.assertEqual(imp.itemsize, 5) - self.assertEqual(imp.format, 'weird') - self.assertEqual(imp.strides, (25, 5)) - - def check_args(self, call_flags, - shape, format, strides, length, bufsize, itemsize, - offset=0): - format_arg = format if call_flags & 1 else None - strides_arg = strides if call_flags & 2 else None - a = Exporter(shape, format_arg, strides_arg) - self.assertEqual(a.buflen, bufsize) - self.assertEqual(a.buf, ctypes.addressof(a._buf) + offset) - m = Importer(a, PyBUF_RECORDS_RO) - self.assertEqual(m.buf, a.buf) - self.assertEqual(m.len, length) - self.assertEqual(m.format, format) - self.assertEqual(m.itemsize, itemsize) - self.assertEqual(m.shape, shape) - self.assertEqual(m.strides, strides) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/endian.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/endian.py deleted file mode 100644 index ae8fc19..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/endian.py +++ /dev/null @@ -1,18 +0,0 @@ -# Module pygame.tests.test_utils.endian -# -# Machine independent conversion to little-endian and big-endian Python -# integer values. - -import struct - -def little_endian_uint32(i): - """Return the 32 bit unsigned integer little-endian representation of i""" - - s = struct.pack('I', i) - return struct.unpack('=I', s)[0] diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/png.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/png.py deleted file mode 100644 index 9ca9539..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/png.py +++ /dev/null @@ -1,3671 +0,0 @@ -#!/usr/bin/env python - -# $URL: http://pypng.googlecode.com/svn/trunk/code/png.py $ -# $Rev: 228 $ - -# png.py - PNG encoder/decoder in pure Python -# -# Modified for Pygame in Oct., 2012 to work with Python 3.x. -# -# Copyright (C) 2006 Johann C. Rocholl -# Portions Copyright (C) 2009 David Jones -# And probably portions Copyright (C) 2006 Nicko van Someren -# -# Original concept by Johann C. Rocholl. -# -# LICENSE (The MIT License) -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation files -# (the "Software"), to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, merge, -# publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, -# subject to the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -# Changelog (recent first): -# 2009-03-11 David: interlaced bit depth < 8 (writing). -# 2009-03-10 David: interlaced bit depth < 8 (reading). -# 2009-03-04 David: Flat and Boxed pixel formats. -# 2009-02-26 David: Palette support (writing). -# 2009-02-23 David: Bit-depths < 8; better PNM support. -# 2006-06-17 Nicko: Reworked into a class, faster interlacing. -# 2006-06-17 Johann: Very simple prototype PNG decoder. -# 2006-06-17 Nicko: Test suite with various image generators. -# 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support. -# 2006-06-15 Johann: Scanline iterator interface for large input files. -# 2006-06-09 Johann: Very simple prototype PNG encoder. - -# Incorporated into Bangai-O Development Tools by drj on 2009-02-11 from -# http://trac.browsershots.org/browser/trunk/pypng/lib/png.py?rev=2885 - -# Incorporated into pypng by drj on 2009-03-12 from -# //depot/prj/bangaio/master/code/png.py#67 - - -""" -Pure Python PNG Reader/Writer - -This Python module implements support for PNG images (see PNG -specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads -and writes PNG files with all allowable bit depths (1/2/4/8/16/24/32/48/64 -bits per pixel) and colour combinations: greyscale (1/2/4/8/16 bit); RGB, -RGBA, LA (greyscale with alpha) with 8/16 bits per channel; colour mapped -images (1/2/4/8 bit). Adam7 interlacing is supported for reading and -writing. A number of optional chunks can be specified (when writing) -and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``. - -For help, type ``import png; help(png)`` in your python interpreter. - -A good place to start is the :class:`Reader` and :class:`Writer` classes. - -This file can also be used as a command-line utility to convert -`Netpbm `_ PNM files to PNG, and the reverse conversion from PNG to -PNM. The interface is similar to that of the ``pnmtopng`` program from -Netpbm. Type ``python png.py --help`` at the shell prompt -for usage and a list of options. - -A note on spelling and terminology ----------------------------------- - -Generally British English spelling is used in the documentation. So -that's "greyscale" and "colour". This not only matches the author's -native language, it's also used by the PNG specification. - -The major colour models supported by PNG (and hence by PyPNG) are: -greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes -referred to using the abbreviations: L, RGB, LA, RGBA. In this case -each letter abbreviates a single channel: *L* is for Luminance or Luma or -Lightness which is the channel used in greyscale images; *R*, *G*, *B* stand -for Red, Green, Blue, the components of a colour image; *A* stands for -Alpha, the opacity channel (used for transparency effects, but higher -values are more opaque, so it makes sense to call it opacity). - -A note on formats ------------------ - -When getting pixel data out of this module (reading) and presenting -data to this module (writing) there are a number of ways the data could -be represented as a Python value. Generally this module uses one of -three formats called "flat row flat pixel", "boxed row flat pixel", and -"boxed row boxed pixel". Basically the concern is whether each pixel -and each row comes in its own little tuple (box), or not. - -Consider an image that is 3 pixels wide by 2 pixels high, and each pixel -has RGB components: - -Boxed row flat pixel:: - - list([R,G,B, R,G,B, R,G,B], - [R,G,B, R,G,B, R,G,B]) - -Each row appears as its own list, but the pixels are flattened so that -three values for one pixel simply follow the three values for the previous -pixel. This is the most common format used, because it provides a good -compromise between space and convenience. PyPNG regards itself as -at liberty to replace any sequence type with any sufficiently compatible -other sequence type; in practice each row is an array (from the array -module), and the outer list is sometimes an iterator rather than an -explicit list (so that streaming is possible). - -Flat row flat pixel:: - - [R,G,B, R,G,B, R,G,B, - R,G,B, R,G,B, R,G,B] - -The entire image is one single giant sequence of colour values. -Generally an array will be used (to save space), not a list. - -Boxed row boxed pixel:: - - list([ (R,G,B), (R,G,B), (R,G,B) ], - [ (R,G,B), (R,G,B), (R,G,B) ]) - -Each row appears in its own list, but each pixel also appears in its own -tuple. A serious memory burn in Python. - -In all cases the top row comes first, and for each row the pixels are -ordered from left-to-right. Within a pixel the values appear in the -order, R-G-B-A (or L-A for greyscale--alpha). - -There is a fourth format, mentioned because it is used internally, -is close to what lies inside a PNG file itself, and has some support -from the public API. This format is called packed. When packed, -each row is a sequence of bytes (integers from 0 to 255), just as -it is before PNG scanline filtering is applied. When the bit depth -is 8 this is essentially the same as boxed row flat pixel; when the -bit depth is less than 8, several pixels are packed into each byte; -when the bit depth is 16 (the only value more than 8 that is supported -by the PNG image format) each pixel value is decomposed into 2 bytes -(and `packed` is a misnomer). This format is used by the -:meth:`Writer.write_packed` method. It isn't usually a convenient -format, but may be just right if the source data for the PNG image -comes from something that uses a similar format (for example, 1-bit -BMPs, or another PNG file). - -And now, my famous members --------------------------- -""" - -__version__ = "$URL: http://pypng.googlecode.com/svn/trunk/code/png.py $ $Rev: 228 $" - -from pygame.compat import geterror, imap_ -from array import array -import itertools -import math -import operator -import struct -import sys -import zlib -import warnings - - -__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array'] - - -# The PNG signature. -# http://www.w3.org/TR/PNG/#5PNG-file-signature -_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10) - -_adam7 = ((0, 0, 8, 8), - (4, 0, 8, 8), - (0, 4, 4, 8), - (2, 0, 4, 4), - (0, 2, 2, 4), - (1, 0, 2, 2), - (0, 1, 1, 2)) - -def group(s, n): - # See - # http://www.python.org/doc/2.6/library/functions.html#zip - return zip(*[iter(s)]*n) - -def isarray(x): - """Same as ``isinstance(x, array)``. - """ - return isinstance(x, array) - - -def tostring(row): - """Convert row of bytes to string. Expects `row` to be an - ``array``. - """ - return row.tostring() - -# Conditionally convert to bytes. Works on Python 2 and Python 3. -try: - bytes('', 'ascii') - def strtobytes(x): return bytes(x, 'iso8859-1') - def bytestostr(x): return str(x, 'iso8859-1') -except: - strtobytes = str - bytestostr = str - -def interleave_planes(ipixels, apixels, ipsize, apsize): - """ - Interleave (colour) planes, e.g. RGB + A = RGBA. - - Return an array of pixels consisting of the `ipsize` elements of data - from each pixel in `ipixels` followed by the `apsize` elements of data - from each pixel in `apixels`. Conventionally `ipixels` and - `apixels` are byte arrays so the sizes are bytes, but it actually - works with any arrays of the same type. The returned array is the - same type as the input arrays which should be the same type as each other. - """ - - itotal = len(ipixels) - atotal = len(apixels) - newtotal = itotal + atotal - newpsize = ipsize + apsize - # Set up the output buffer - # See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356 - out = array(ipixels.typecode) - # It's annoying that there is no cheap way to set the array size :-( - out.extend(ipixels) - out.extend(apixels) - # Interleave in the pixel data - for i in range(ipsize): - out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize] - for i in range(apsize): - out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize] - return out - -def check_palette(palette): - """Check a palette argument (to the :class:`Writer` class) for validity. - Returns the palette as a list if okay; raises an exception otherwise. - """ - - # None is the default and is allowed. - if palette is None: - return None - - p = list(palette) - if not (0 < len(p) <= 256): - raise ValueError("a palette must have between 1 and 256 entries") - seen_triple = False - for i,t in enumerate(p): - if len(t) not in (3,4): - raise ValueError( - "palette entry %d: entries must be 3- or 4-tuples." % i) - if len(t) == 3: - seen_triple = True - if seen_triple and len(t) == 4: - raise ValueError( - "palette entry %d: all 4-tuples must precede all 3-tuples" % i) - for x in t: - if int(x) != x or not(0 <= x <= 255): - raise ValueError( - "palette entry %d: values must be integer: 0 <= x <= 255" % i) - return p - -class Error(Exception): - prefix = 'Error' - def __str__(self): - return self.prefix + ': ' + ' '.join(self.args) - -class FormatError(Error): - """Problem with input file format. In other words, PNG file does - not conform to the specification in some way and is invalid. - """ - - prefix = 'FormatError' - -class ChunkError(FormatError): - prefix = 'ChunkError' - - -class Writer: - """ - PNG encoder in pure Python. - """ - - def __init__(self, width=None, height=None, - size=None, - greyscale=False, - alpha=False, - bitdepth=8, - palette=None, - transparent=None, - background=None, - gamma=None, - compression=None, - interlace=False, - bytes_per_sample=None, # deprecated - planes=None, - colormap=None, - maxval=None, - chunk_limit=2**20): - """ - Create a PNG encoder object. - - Arguments: - - width, height - Image size in pixels, as two separate arguments. - size - Image size (w,h) in pixels, as single argument. - greyscale - Input data is greyscale, not RGB. - alpha - Input data has alpha channel (RGBA or LA). - bitdepth - Bit depth: from 1 to 16. - palette - Create a palette for a colour mapped image (colour type 3). - transparent - Specify a transparent colour (create a ``tRNS`` chunk). - background - Specify a default background colour (create a ``bKGD`` chunk). - gamma - Specify a gamma value (create a ``gAMA`` chunk). - compression - zlib compression level (1-9). - interlace - Create an interlaced image. - chunk_limit - Write multiple ``IDAT`` chunks to save memory. - - The image size (in pixels) can be specified either by using the - `width` and `height` arguments, or with the single `size` - argument. If `size` is used it should be a pair (*width*, - *height*). - - `greyscale` and `alpha` are booleans that specify whether - an image is greyscale (or colour), and whether it has an - alpha channel (or not). - - `bitdepth` specifies the bit depth of the source pixel values. - Each source pixel value must be an integer between 0 and - ``2**bitdepth-1``. For example, 8-bit images have values - between 0 and 255. PNG only stores images with bit depths of - 1,2,4,8, or 16. When `bitdepth` is not one of these values, - the next highest valid bit depth is selected, and an ``sBIT`` - (significant bits) chunk is generated that specifies the original - precision of the source image. In this case the supplied pixel - values will be rescaled to fit the range of the selected bit depth. - - The details of which bit depth / colour model combinations the - PNG file format supports directly, are somewhat arcane - (refer to the PNG specification for full details). Briefly: - "small" bit depths (1,2,4) are only allowed with greyscale and - colour mapped images; colour mapped images cannot have bit depth - 16. - - For colour mapped images (in other words, when the `palette` - argument is specified) the `bitdepth` argument must match one of - the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a - PNG image with a palette and an ``sBIT`` chunk, but the meaning - is slightly different; it would be awkward to press the - `bitdepth` argument into service for this.) - - The `palette` option, when specified, causes a colour mapped image - to be created: the PNG colour type is set to 3; greyscale - must not be set; alpha must not be set; transparent must - not be set; the bit depth must be 1,2,4, or 8. When a colour - mapped image is created, the pixel values are palette indexes - and the `bitdepth` argument specifies the size of these indexes - (not the size of the colour values in the palette). - - The palette argument value should be a sequence of 3- or - 4-tuples. 3-tuples specify RGB palette entries; 4-tuples - specify RGBA palette entries. If both 4-tuples and 3-tuples - appear in the sequence then all the 4-tuples must come - before all the 3-tuples. A ``PLTE`` chunk is created; if there - are 4-tuples then a ``tRNS`` chunk is created as well. The - ``PLTE`` chunk will contain all the RGB triples in the same - sequence; the ``tRNS`` chunk will contain the alpha channel for - all the 4-tuples, in the same sequence. Palette entries - are always 8-bit. - - If specified, the `transparent` and `background` parameters must - be a tuple with three integer values for red, green, blue, or - a simple integer (or singleton tuple) for a greyscale image. - - If specified, the `gamma` parameter must be a positive number - (generally, a float). A ``gAMA`` chunk will be created. Note that - this will not change the values of the pixels as they appear in - the PNG file, they are assumed to have already been converted - appropriately for the gamma specified. - - The `compression` argument specifies the compression level - to be used by the ``zlib`` module. Higher values are likely - to compress better, but will be slower to compress. The - default for this argument is ``None``; this does not mean - no compression, rather it means that the default from the - ``zlib`` module is used (which is generally acceptable). - - If `interlace` is true then an interlaced image is created - (using PNG's so far only interace method, *Adam7*). This does not - affect how the pixels should be presented to the encoder, rather - it changes how they are arranged into the PNG file. On slow - connexions interlaced images can be partially decoded by the - browser to give a rough view of the image that is successively - refined as more image data appears. - - .. note :: - - Enabling the `interlace` option requires the entire image - to be processed in working memory. - - `chunk_limit` is used to limit the amount of memory used whilst - compressing the image. In order to avoid using large amounts of - memory, multiple ``IDAT`` chunks may be created. - """ - - # At the moment the `planes` argument is ignored; - # its purpose is to act as a dummy so that - # ``Writer(x, y, **info)`` works, where `info` is a dictionary - # returned by Reader.read and friends. - # Ditto for `colormap`. - - # A couple of helper functions come first. Best skipped if you - # are reading through. - - def isinteger(x): - try: - return int(x) == x - except: - return False - - def check_color(c, which): - """Checks that a colour argument for transparent or - background options is the right form. Also "corrects" bare - integers to 1-tuples. - """ - - if c is None: - return c - if greyscale: - try: - l = len(c) - except TypeError: - c = (c,) - if len(c) != 1: - raise ValueError("%s for greyscale must be 1-tuple" % - which) - if not isinteger(c[0]): - raise ValueError( - "%s colour for greyscale must be integer" % - which) - else: - if not (len(c) == 3 and - isinteger(c[0]) and - isinteger(c[1]) and - isinteger(c[2])): - raise ValueError( - "%s colour must be a triple of integers" % - which) - return c - - if size: - if len(size) != 2: - raise ValueError( - "size argument should be a pair (width, height)") - if width is not None and width != size[0]: - raise ValueError( - "size[0] (%r) and width (%r) should match when both are used." - % (size[0], width)) - if height is not None and height != size[1]: - raise ValueError( - "size[1] (%r) and height (%r) should match when both are used." - % (size[1], height)) - width,height = size - del size - - if width <= 0 or height <= 0: - raise ValueError("width and height must be greater than zero") - if not isinteger(width) or not isinteger(height): - raise ValueError("width and height must be integers") - # http://www.w3.org/TR/PNG/#7Integers-and-byte-order - if width > 2**32-1 or height > 2**32-1: - raise ValueError("width and height cannot exceed 2**32-1") - - if alpha and transparent is not None: - raise ValueError( - "transparent colour not allowed with alpha channel") - - if bytes_per_sample is not None: - warnings.warn('please use bitdepth instead of bytes_per_sample', - DeprecationWarning) - if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2): - raise ValueError( - "bytes per sample must be .125, .25, .5, 1, or 2") - bitdepth = int(8*bytes_per_sample) - del bytes_per_sample - if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth: - raise ValueError("bitdepth (%r) must be a postive integer <= 16" % - bitdepth) - - self.rescale = None - if palette: - if bitdepth not in (1,2,4,8): - raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8") - if transparent is not None: - raise ValueError("transparent and palette not compatible") - if alpha: - raise ValueError("alpha and palette not compatible") - if greyscale: - raise ValueError("greyscale and palette not compatible") - else: - # No palette, check for sBIT chunk generation. - if alpha or not greyscale: - if bitdepth not in (8,16): - targetbitdepth = (8,16)[bitdepth > 8] - self.rescale = (bitdepth, targetbitdepth) - bitdepth = targetbitdepth - del targetbitdepth - else: - assert greyscale - assert not alpha - if bitdepth not in (1,2,4,8,16): - if bitdepth > 8: - targetbitdepth = 16 - elif bitdepth == 3: - targetbitdepth = 4 - else: - assert bitdepth in (5,6,7) - targetbitdepth = 8 - self.rescale = (bitdepth, targetbitdepth) - bitdepth = targetbitdepth - del targetbitdepth - - if bitdepth < 8 and (alpha or not greyscale and not palette): - raise ValueError( - "bitdepth < 8 only permitted with greyscale or palette") - if bitdepth > 8 and palette: - raise ValueError( - "bit depth must be 8 or less for images with palette") - - transparent = check_color(transparent, 'transparent') - background = check_color(background, 'background') - - # It's important that the true boolean values (greyscale, alpha, - # colormap, interlace) are converted to bool because Iverson's - # convention is relied upon later on. - self.width = width - self.height = height - self.transparent = transparent - self.background = background - self.gamma = gamma - self.greyscale = bool(greyscale) - self.alpha = bool(alpha) - self.colormap = bool(palette) - self.bitdepth = int(bitdepth) - self.compression = compression - self.chunk_limit = chunk_limit - self.interlace = bool(interlace) - self.palette = check_palette(palette) - - self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap - assert self.color_type in (0,2,3,4,6) - - self.color_planes = (3,1)[self.greyscale or self.colormap] - self.planes = self.color_planes + self.alpha - # :todo: fix for bitdepth < 8 - self.psize = (self.bitdepth/8) * self.planes - - def make_palette(self): - """Create the byte sequences for a ``PLTE`` and if necessary a - ``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be - ``None`` if no ``tRNS`` chunk is necessary. - """ - - p = array('B') - t = array('B') - - for x in self.palette: - p.extend(x[0:3]) - if len(x) > 3: - t.append(x[3]) - p = tostring(p) - t = tostring(t) - if t: - return p,t - return p,None - - def write(self, outfile, rows): - """Write a PNG image to the output file. `rows` should be - an iterable that yields each row in boxed row flat pixel format. - The rows should be the rows of the original image, so there - should be ``self.height`` rows of ``self.width * self.planes`` values. - If `interlace` is specified (when creating the instance), then - an interlaced PNG file will be written. Supply the rows in the - normal image order; the interlacing is carried out internally. - - .. note :: - - Interlacing will require the entire image to be in working memory. - """ - - if self.interlace: - fmt = 'BH'[self.bitdepth > 8] - a = array(fmt, itertools.chain(*rows)) - return self.write_array(outfile, a) - else: - nrows = self.write_passes(outfile, rows) - if nrows != self.height: - raise ValueError( - "rows supplied (%d) does not match height (%d)" % - (nrows, self.height)) - - def write_passes(self, outfile, rows, packed=False): - """ - Write a PNG image to the output file. - - Most users are expected to find the :meth:`write` or - :meth:`write_array` method more convenient. - - The rows should be given to this method in the order that - they appear in the output file. For straightlaced images, - this is the usual top to bottom ordering, but for interlaced - images the rows should have already been interlaced before - passing them to this function. - - `rows` should be an iterable that yields each row. When - `packed` is ``False`` the rows should be in boxed row flat pixel - format; when `packed` is ``True`` each row should be a packed - sequence of bytes. - - """ - - # http://www.w3.org/TR/PNG/#5PNG-file-signature - outfile.write(_signature) - - # http://www.w3.org/TR/PNG/#11IHDR - write_chunk(outfile, 'IHDR', - struct.pack("!2I5B", self.width, self.height, - self.bitdepth, self.color_type, - 0, 0, self.interlace)) - - # See :chunk:order - # http://www.w3.org/TR/PNG/#11gAMA - if self.gamma is not None: - write_chunk(outfile, 'gAMA', - struct.pack("!L", int(round(self.gamma*1e5)))) - - # See :chunk:order - # http://www.w3.org/TR/PNG/#11sBIT - if self.rescale: - write_chunk(outfile, 'sBIT', - struct.pack('%dB' % self.planes, - *[self.rescale[0]]*self.planes)) - - # :chunk:order: Without a palette (PLTE chunk), ordering is - # relatively relaxed. With one, gAMA chunk must precede PLTE - # chunk which must precede tRNS and bKGD. - # See http://www.w3.org/TR/PNG/#5ChunkOrdering - if self.palette: - p,t = self.make_palette() - write_chunk(outfile, 'PLTE', p) - if t: - # tRNS chunk is optional. Only needed if palette entries - # have alpha. - write_chunk(outfile, 'tRNS', t) - - # http://www.w3.org/TR/PNG/#11tRNS - if self.transparent is not None: - if self.greyscale: - write_chunk(outfile, 'tRNS', - struct.pack("!1H", *self.transparent)) - else: - write_chunk(outfile, 'tRNS', - struct.pack("!3H", *self.transparent)) - - # http://www.w3.org/TR/PNG/#11bKGD - if self.background is not None: - if self.greyscale: - write_chunk(outfile, 'bKGD', - struct.pack("!1H", *self.background)) - else: - write_chunk(outfile, 'bKGD', - struct.pack("!3H", *self.background)) - - # http://www.w3.org/TR/PNG/#11IDAT - if self.compression is not None: - compressor = zlib.compressobj(self.compression) - else: - compressor = zlib.compressobj() - - # Choose an extend function based on the bitdepth. The extend - # function packs/decomposes the pixel values into bytes and - # stuffs them onto the data array. - data = array('B') - if self.bitdepth == 8 or packed: - extend = data.extend - elif self.bitdepth == 16: - # Decompose into bytes - def extend(sl): - fmt = '!%dH' % len(sl) - data.extend(array('B', struct.pack(fmt, *sl))) - else: - # Pack into bytes - assert self.bitdepth < 8 - # samples per byte - spb = int(8/self.bitdepth) - def extend(sl): - a = array('B', sl) - # Adding padding bytes so we can group into a whole - # number of spb-tuples. - l = float(len(a)) - extra = math.ceil(l / float(spb))*spb - l - a.extend([0]*int(extra)) - # Pack into bytes - l = group(a, spb) - l = map(lambda e: reduce(lambda x,y: - (x << self.bitdepth) + y, e), l) - data.extend(l) - if self.rescale: - oldextend = extend - factor = \ - float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1) - def extend(sl): - oldextend(map(lambda x: int(round(factor*x)), sl)) - - # Build the first row, testing mostly to see if we need to - # changed the extend function to cope with NumPy integer types - # (they cause our ordinary definition of extend to fail, so we - # wrap it). See - # http://code.google.com/p/pypng/issues/detail?id=44 - enumrows = enumerate(rows) - del rows - - # First row's filter type. - data.append(0) - # :todo: Certain exceptions in the call to ``.next()`` or the - # following try would indicate no row data supplied. - # Should catch. - i, row = next(enumrows) - try: - # If this fails... - extend(row) - except: - # ... try a version that converts the values to int first. - # Not only does this work for the (slightly broken) NumPy - # types, there are probably lots of other, unknown, "nearly" - # int types it works for. - def wrapmapint(f): - return lambda sl: f(map(int, sl)) - extend = wrapmapint(extend) - del wrapmapint - extend(row) - - for i,row in enumrows: - # Add "None" filter type. Currently, it's essential that - # this filter type be used for every scanline as we do not - # mark the first row of a reduced pass image; that means we - # could accidentally compute the wrong filtered scanline if - # we used "up", "average", or "paeth" on such a line. - data.append(0) - extend(row) - if len(data) > self.chunk_limit: - compressed = compressor.compress(tostring(data)) - if len(compressed): - # print >> sys.stderr, len(data), len(compressed) - write_chunk(outfile, 'IDAT', compressed) - # Because of our very witty definition of ``extend``, - # above, we must re-use the same ``data`` object. Hence - # we use ``del`` to empty this one, rather than create a - # fresh one (which would be my natural FP instinct). - del data[:] - if len(data): - compressed = compressor.compress(tostring(data)) - else: - compressed = '' - flushed = compressor.flush() - if len(compressed) or len(flushed): - # print >> sys.stderr, len(data), len(compressed), len(flushed) - write_chunk(outfile, 'IDAT', compressed + flushed) - # http://www.w3.org/TR/PNG/#11IEND - write_chunk(outfile, 'IEND') - return i+1 - - def write_array(self, outfile, pixels): - """ - Write an array in flat row flat pixel format as a PNG file on - the output file. See also :meth:`write` method. - """ - - if self.interlace: - self.write_passes(outfile, self.array_scanlines_interlace(pixels)) - else: - self.write_passes(outfile, self.array_scanlines(pixels)) - - def write_packed(self, outfile, rows): - """ - Write PNG file to `outfile`. The pixel data comes from `rows` - which should be in boxed row packed format. Each row should be - a sequence of packed bytes. - - Technically, this method does work for interlaced images but it - is best avoided. For interlaced images, the rows should be - presented in the order that they appear in the file. - - This method should not be used when the source image bit depth - is not one naturally supported by PNG; the bit depth should be - 1, 2, 4, 8, or 16. - """ - - if self.rescale: - raise Error("write_packed method not suitable for bit depth %d" % - self.rescale[0]) - return self.write_passes(outfile, rows, packed=True) - - def convert_pnm(self, infile, outfile): - """ - Convert a PNM file containing raw pixel data into a PNG file - with the parameters set in the writer object. Works for - (binary) PGM, PPM, and PAM formats. - """ - - if self.interlace: - pixels = array('B') - pixels.fromfile(infile, - (self.bitdepth/8) * self.color_planes * - self.width * self.height) - self.write_passes(outfile, self.array_scanlines_interlace(pixels)) - else: - self.write_passes(outfile, self.file_scanlines(infile)) - - def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile): - """ - Convert a PPM and PGM file containing raw pixel data into a - PNG outfile with the parameters set in the writer object. - """ - pixels = array('B') - pixels.fromfile(ppmfile, - (self.bitdepth/8) * self.color_planes * - self.width * self.height) - apixels = array('B') - apixels.fromfile(pgmfile, - (self.bitdepth/8) * - self.width * self.height) - pixels = interleave_planes(pixels, apixels, - (self.bitdepth/8) * self.color_planes, - (self.bitdepth/8)) - if self.interlace: - self.write_passes(outfile, self.array_scanlines_interlace(pixels)) - else: - self.write_passes(outfile, self.array_scanlines(pixels)) - - def file_scanlines(self, infile): - """ - Generates boxed rows in flat pixel format, from the input file - `infile`. It assumes that the input file is in a "Netpbm-like" - binary format, and is positioned at the beginning of the first - pixel. The number of pixels to read is taken from the image - dimensions (`width`, `height`, `planes`) and the number of bytes - per value is implied by the image `bitdepth`. - """ - - # Values per row - vpr = self.width * self.planes - row_bytes = vpr - if self.bitdepth > 8: - assert self.bitdepth == 16 - row_bytes *= 2 - fmt = '>%dH' % vpr - def line(): - return array('H', struct.unpack(fmt, infile.read(row_bytes))) - else: - def line(): - scanline = array('B', infile.read(row_bytes)) - return scanline - for y in range(self.height): - yield line() - - def array_scanlines(self, pixels): - """ - Generates boxed rows (flat pixels) from flat rows (flat pixels) - in an array. - """ - - # Values per row - vpr = self.width * self.planes - stop = 0 - for y in range(self.height): - start = stop - stop = start + vpr - yield pixels[start:stop] - - def array_scanlines_interlace(self, pixels): - """ - Generator for interlaced scanlines from an array. `pixels` is - the full source image in flat row flat pixel format. The - generator yields each scanline of the reduced passes in turn, in - boxed row flat pixel format. - """ - - # http://www.w3.org/TR/PNG/#8InterlaceMethods - # Array type. - fmt = 'BH'[self.bitdepth > 8] - # Value per row - vpr = self.width * self.planes - for xstart, ystart, xstep, ystep in _adam7: - if xstart >= self.width: - continue - # Pixels per row (of reduced image) - ppr = int(math.ceil((self.width-xstart)/float(xstep))) - # number of values in reduced image row. - row_len = ppr*self.planes - for y in range(ystart, self.height, ystep): - if xstep == 1: - offset = y * vpr - yield pixels[offset:offset+vpr] - else: - row = array(fmt) - # There's no easier way to set the length of an array - row.extend(pixels[0:row_len]) - offset = y * vpr + xstart * self.planes - end_offset = (y+1) * vpr - skip = self.planes * xstep - for i in range(self.planes): - row[i::self.planes] = \ - pixels[offset+i:end_offset:skip] - yield row - -def write_chunk(outfile, tag, data=strtobytes('')): - """ - Write a PNG chunk to the output file, including length and - checksum. - """ - - # http://www.w3.org/TR/PNG/#5Chunk-layout - outfile.write(struct.pack("!I", len(data))) - tag = strtobytes(tag) - outfile.write(tag) - outfile.write(data) - checksum = zlib.crc32(tag) - checksum = zlib.crc32(data, checksum) - checksum &= 2**32-1 - outfile.write(struct.pack("!I", checksum)) - -def write_chunks(out, chunks): - """Create a PNG file by writing out the chunks.""" - - out.write(_signature) - for chunk in chunks: - write_chunk(out, *chunk) - -def filter_scanline(type, line, fo, prev=None): - """Apply a scanline filter to a scanline. `type` specifies the - filter type (0 to 4); `line` specifies the current (unfiltered) - scanline as a sequence of bytes; `prev` specifies the previous - (unfiltered) scanline as a sequence of bytes. `fo` specifies the - filter offset; normally this is size of a pixel in bytes (the number - of bytes per sample times the number of channels), but when this is - < 1 (for bit depths < 8) then the filter offset is 1. - """ - - assert 0 <= type < 5 - - # The output array. Which, pathetically, we extend one-byte at a - # time (fortunately this is linear). - out = array('B', [type]) - - def sub(): - ai = -fo - for x in line: - if ai >= 0: - x = (x - line[ai]) & 0xff - out.append(x) - ai += 1 - def up(): - for i,x in enumerate(line): - x = (x - prev[i]) & 0xff - out.append(x) - def average(): - ai = -fo - for i,x in enumerate(line): - if ai >= 0: - x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff - else: - x = (x - (prev[i] >> 1)) & 0xff - out.append(x) - ai += 1 - def paeth(): - # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth - ai = -fo # also used for ci - for i,x in enumerate(line): - a = 0 - b = prev[i] - c = 0 - - if ai >= 0: - a = line[ai] - c = prev[ai] - p = a + b - c - pa = abs(p - a) - pb = abs(p - b) - pc = abs(p - c) - if pa <= pb and pa <= pc: Pr = a - elif pb <= pc: Pr = b - else: Pr = c - - x = (x - Pr) & 0xff - out.append(x) - ai += 1 - - if not prev: - # We're on the first line. Some of the filters can be reduced - # to simpler cases which makes handling the line "off the top" - # of the image simpler. "up" becomes "none"; "paeth" becomes - # "left" (non-trivial, but true). "average" needs to be handled - # specially. - if type == 2: # "up" - return line # type = 0 - elif type == 3: - prev = [0]*len(line) - elif type == 4: # "paeth" - type = 1 - if type == 0: - out.extend(line) - elif type == 1: - sub() - elif type == 2: - up() - elif type == 3: - average() - else: # type == 4 - paeth() - return out - - -def from_array(a, mode=None, info={}): - """Create a PNG :class:`Image` object from a 2- or 3-dimensional array. - One application of this function is easy PIL-style saving: - ``png.from_array(pixels, 'L').save('foo.png')``. - - .. note : - - The use of the term *3-dimensional* is for marketing purposes - only. It doesn't actually work. Please bear with us. Meanwhile - enjoy the complimentary snacks (on request) and please use a - 2-dimensional array. - - Unless they are specified using the *info* parameter, the PNG's - height and width are taken from the array size. For a 3 dimensional - array the first axis is the height; the second axis is the width; - and the third axis is the channel number. Thus an RGB image that is - 16 pixels high and 8 wide will use an array that is 16x8x3. For 2 - dimensional arrays the first axis is the height, but the second axis - is ``width*channels``, so an RGB image that is 16 pixels high and 8 - wide will use a 2-dimensional array that is 16x24 (each row will be - 8*3==24 sample values). - - *mode* is a string that specifies the image colour format in a - PIL-style mode. It can be: - - ``'L'`` - greyscale (1 channel) - ``'LA'`` - greyscale with alpha (2 channel) - ``'RGB'`` - colour image (3 channel) - ``'RGBA'`` - colour image with alpha (4 channel) - - The mode string can also specify the bit depth (overriding how this - function normally derives the bit depth, see below). Appending - ``';16'`` to the mode will cause the PNG to be 16 bits per channel; - any decimal from 1 to 16 can be used to specify the bit depth. - - When a 2-dimensional array is used *mode* determines how many - channels the image has, and so allows the width to be derived from - the second array dimension. - - The array is expected to be a ``numpy`` array, but it can be any - suitable Python sequence. For example, a list of lists can be used: - ``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact - rules are: ``len(a)`` gives the first dimension, height; - ``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the - third dimension, unless an exception is raised in which case a - 2-dimensional array is assumed. It's slightly more complicated than - that because an iterator of rows can be used, and it all still - works. Using an iterator allows data to be streamed efficiently. - - The bit depth of the PNG is normally taken from the array element's - datatype (but if *mode* specifies a bitdepth then that is used - instead). The array element's datatype is determined in a way which - is supposed to work both for ``numpy`` arrays and for Python - ``array.array`` objects. A 1 byte datatype will give a bit depth of - 8, a 2 byte datatype will give a bit depth of 16. If the datatype - does not have an implicit size, for example it is a plain Python - list of lists, as above, then a default of 8 is used. - - The *info* parameter is a dictionary that can be used to specify - metadata (in the same style as the arguments to the - :class:``png.Writer`` class). For this function the keys that are - useful are: - - height - overrides the height derived from the array dimensions and allows - *a* to be an iterable. - width - overrides the width derived from the array dimensions. - bitdepth - overrides the bit depth derived from the element datatype (but - must match *mode* if that also specifies a bit depth). - - Generally anything specified in the - *info* dictionary will override any implicit choices that this - function would otherwise make, but must match any explicit ones. - For example, if the *info* dictionary has a ``greyscale`` key then - this must be true when mode is ``'L'`` or ``'LA'`` and false when - mode is ``'RGB'`` or ``'RGBA'``. - """ - - # We abuse the *info* parameter by modifying it. Take a copy here. - # (Also typechecks *info* to some extent). - info = dict(info) - - # Syntax check mode string. - bitdepth = None - try: - mode = mode.split(';') - if len(mode) not in (1,2): - raise Error() - if mode[0] not in ('L', 'LA', 'RGB', 'RGBA'): - raise Error() - if len(mode) == 2: - try: - bitdepth = int(mode[1]) - except: - raise Error() - except Error: - raise Error("mode string should be 'RGB' or 'L;16' or similar.") - mode = mode[0] - - # Get bitdepth from *mode* if possible. - if bitdepth: - if info.get('bitdepth') and bitdepth != info['bitdepth']: - raise Error("mode bitdepth (%d) should match info bitdepth (%d)." % - (bitdepth, info['bitdepth'])) - info['bitdepth'] = bitdepth - - # Fill in and/or check entries in *info*. - # Dimensions. - if 'size' in info: - # Check width, height, size all match where used. - for dimension,axis in [('width', 0), ('height', 1)]: - if dimension in info: - if info[dimension] != info['size'][axis]: - raise Error( - "info[%r] shhould match info['size'][%r]." % - (dimension, axis)) - info['width'],info['height'] = info['size'] - if 'height' not in info: - try: - l = len(a) - except: - raise Error( - "len(a) does not work, supply info['height'] instead.") - info['height'] = l - # Colour format. - if 'greyscale' in info: - if bool(info['greyscale']) != ('L' in mode): - raise Error("info['greyscale'] should match mode.") - info['greyscale'] = 'L' in mode - if 'alpha' in info: - if bool(info['alpha']) != ('A' in mode): - raise Error("info['alpha'] should match mode.") - info['alpha'] = 'A' in mode - - planes = len(mode) - if 'planes' in info: - if info['planes'] != planes: - raise Error("info['planes'] should match mode.") - - # In order to work out whether we the array is 2D or 3D we need its - # first row, which requires that we take a copy of its iterator. - # We may also need the first row to derive width and bitdepth. - a,t = itertools.tee(a) - row = next(t) - del t - try: - row[0][0] - threed = True - testelement = row[0] - except: - threed = False - testelement = row - if 'width' not in info: - if threed: - width = len(row) - else: - width = len(row) // planes - info['width'] = width - - # Not implemented yet - assert not threed - - if 'bitdepth' not in info: - try: - dtype = testelement.dtype - # goto the "else:" clause. Sorry. - except: - try: - # Try a Python array.array. - bitdepth = 8 * testelement.itemsize - except: - # We can't determine it from the array element's - # datatype, use a default of 8. - bitdepth = 8 - else: - # If we got here without exception, we now assume that - # the array is a numpy array. - if dtype.kind == 'b': - bitdepth = 1 - else: - bitdepth = 8 * dtype.itemsize - info['bitdepth'] = bitdepth - - for thing in 'width height bitdepth greyscale alpha'.split(): - assert thing in info - return Image(a, info) - -# So that refugee's from PIL feel more at home. Not documented. -fromarray = from_array - -class Image: - """A PNG image. - You can create an :class:`Image` object from an array of pixels by calling - :meth:`png.from_array`. It can be saved to disk with the - :meth:`save` method.""" - def __init__(self, rows, info): - """ - .. note :: - - The constructor is not public. Please do not call it. - """ - - self.rows = rows - self.info = info - - def save(self, file): - """Save the image to *file*. If *file* looks like an open file - descriptor then it is used, otherwise it is treated as a - filename and a fresh file is opened. - - In general, you can only call this method once; after it has - been called the first time and the PNG image has been saved, the - source data will have been streamed, and cannot be streamed - again. - """ - - w = Writer(**self.info) - - try: - file.write - def close(): pass - except: - file = open(file, 'wb') - def close(): file.close() - - try: - w.write(file, self.rows) - finally: - close() - -class _readable: - """ - A simple file-like interface for strings and arrays. - """ - - def __init__(self, buf): - self.buf = buf - self.offset = 0 - - def read(self, n): - r = self.buf[self.offset:self.offset+n] - if isarray(r): - r = r.tostring() - self.offset += n - return r - - -class Reader: - """ - PNG decoder in pure Python. - """ - - def __init__(self, _guess=None, **kw): - """ - Create a PNG decoder object. - - The constructor expects exactly one keyword argument. If you - supply a positional argument instead, it will guess the input - type. You can choose among the following keyword arguments: - - filename - Name of input file (a PNG file). - file - A file-like object (object with a read() method). - bytes - ``array`` or ``string`` with PNG data. - - """ - if ((_guess is not None and len(kw) != 0) or - (_guess is None and len(kw) != 1)): - raise TypeError("Reader() takes exactly 1 argument") - - # Will be the first 8 bytes, later on. See validate_signature. - self.signature = None - self.transparent = None - # A pair of (len,type) if a chunk has been read but its data and - # checksum have not (in other words the file position is just - # past the 4 bytes that specify the chunk type). See preamble - # method for how this is used. - self.atchunk = None - - if _guess is not None: - if isarray(_guess): - kw["bytes"] = _guess - elif isinstance(_guess, str): - kw["filename"] = _guess - elif isinstance(_guess, file): - kw["file"] = _guess - - if "filename" in kw: - self.file = open(kw["filename"], "rb") - elif "file" in kw: - self.file = kw["file"] - elif "bytes" in kw: - self.file = _readable(kw["bytes"]) - else: - raise TypeError("expecting filename, file or bytes array") - - def chunk(self, seek=None): - """ - Read the next PNG chunk from the input file; returns a - (*type*,*data*) tuple. *type* is the chunk's type as a string - (all PNG chunk types are 4 characters long). *data* is the - chunk's data content, as a string. - - If the optional `seek` argument is - specified then it will keep reading chunks until it either runs - out of file or finds the type specified by the argument. Note - that in general the order of chunks in PNGs is unspecified, so - using `seek` can cause you to miss chunks. - """ - - self.validate_signature() - - while True: - # http://www.w3.org/TR/PNG/#5Chunk-layout - if not self.atchunk: - self.atchunk = self.chunklentype() - length,type = self.atchunk - self.atchunk = None - data = self.file.read(length) - if len(data) != length: - raise ChunkError('Chunk %s too short for required %i octets.' - % (type, length)) - checksum = self.file.read(4) - if len(checksum) != 4: - raise ValueError('Chunk %s too short for checksum.', tag) - if seek and type != seek: - continue - verify = zlib.crc32(strtobytes(type)) - verify = zlib.crc32(data, verify) - # Whether the output from zlib.crc32 is signed or not varies - # according to hideous implementation details, see - # http://bugs.python.org/issue1202 . - # We coerce it to be positive here (in a way which works on - # Python 2.3 and older). - verify &= 2**32 - 1 - verify = struct.pack('!I', verify) - if checksum != verify: - # print repr(checksum) - (a, ) = struct.unpack('!I', checksum) - (b, ) = struct.unpack('!I', verify) - raise ChunkError( - "Checksum error in %s chunk: 0x%08X != 0x%08X." % - (type, a, b)) - return type, data - - def chunks(self): - """Return an iterator that will yield each chunk as a - (*chunktype*, *content*) pair. - """ - - while True: - t,v = self.chunk() - yield t,v - if t == 'IEND': - break - - def undo_filter(self, filter_type, scanline, previous): - """Undo the filter for a scanline. `scanline` is a sequence of - bytes that does not include the initial filter type byte. - `previous` is decoded previous scanline (for straightlaced - images this is the previous pixel row, but for interlaced - images, it is the previous scanline in the reduced image, which - in general is not the previous pixel row in the final image). - When there is no previous scanline (the first row of a - straightlaced image, or the first row in one of the passes in an - interlaced image), then this argument should be ``None``. - - The scanline will have the effects of filtering removed, and the - result will be returned as a fresh sequence of bytes. - """ - - # :todo: Would it be better to update scanline in place? - - # Create the result byte array. It seems that the best way to - # create the array to be the right size is to copy from an - # existing sequence. *sigh* - # If we fill the result with scanline, then this allows a - # micro-optimisation in the "null" and "sub" cases. - result = array('B', scanline) - - if filter_type == 0: - # And here, we _rely_ on filling the result with scanline, - # above. - return result - - if filter_type not in (1,2,3,4): - raise FormatError('Invalid PNG Filter Type.' - ' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .') - - # Filter unit. The stride from one pixel to the corresponding - # byte from the previous previous. Normally this is the pixel - # size in bytes, but when this is smaller than 1, the previous - # byte is used instead. - fu = max(1, self.psize) - - # For the first line of a pass, synthesize a dummy previous - # line. An alternative approach would be to observe that on the - # first line 'up' is the same as 'null', 'paeth' is the same - # as 'sub', with only 'average' requiring any special case. - if not previous: - previous = array('B', [0]*len(scanline)) - - def sub(): - """Undo sub filter.""" - - ai = 0 - # Loops starts at index fu. Observe that the initial part - # of the result is already filled in correctly with - # scanline. - for i in range(fu, len(result)): - x = scanline[i] - a = result[ai] - result[i] = (x + a) & 0xff - ai += 1 - - def up(): - """Undo up filter.""" - - for i in range(len(result)): - x = scanline[i] - b = previous[i] - result[i] = (x + b) & 0xff - - def average(): - """Undo average filter.""" - - ai = -fu - for i in range(len(result)): - x = scanline[i] - if ai < 0: - a = 0 - else: - a = result[ai] - b = previous[i] - result[i] = (x + ((a + b) >> 1)) & 0xff - ai += 1 - - def paeth(): - """Undo Paeth filter.""" - - # Also used for ci. - ai = -fu - for i in range(len(result)): - x = scanline[i] - if ai < 0: - a = c = 0 - else: - a = result[ai] - c = previous[ai] - b = previous[i] - p = a + b - c - pa = abs(p - a) - pb = abs(p - b) - pc = abs(p - c) - if pa <= pb and pa <= pc: - pr = a - elif pb <= pc: - pr = b - else: - pr = c - result[i] = (x + pr) & 0xff - ai += 1 - - # Call appropriate filter algorithm. Note that 0 has already - # been dealt with. - (None, sub, up, average, paeth)[filter_type]() - return result - - def deinterlace(self, raw): - """ - Read raw pixel data, undo filters, deinterlace, and flatten. - Return in flat row flat pixel format. - """ - - # print >> sys.stderr, ("Reading interlaced, w=%s, r=%s, planes=%s," + - # " bpp=%s") % (self.width, self.height, self.planes, self.bps) - # Values per row (of the target image) - vpr = self.width * self.planes - - # Make a result array, and make it big enough. Interleaving - # writes to the output array randomly (well, not quite), so the - # entire output array must be in memory. - fmt = 'BH'[self.bitdepth > 8] - a = array(fmt, [0]*vpr*self.height) - source_offset = 0 - - for xstart, ystart, xstep, ystep in _adam7: - # print >> sys.stderr, "Adam7: start=%s,%s step=%s,%s" % ( - # xstart, ystart, xstep, ystep) - if xstart >= self.width: - continue - # The previous (reconstructed) scanline. None at the - # beginning of a pass to indicate that there is no previous - # line. - recon = None - # Pixels per row (reduced pass image) - ppr = int(math.ceil((self.width-xstart)/float(xstep))) - # Row size in bytes for this pass. - row_size = int(math.ceil(self.psize * ppr)) - for y in range(ystart, self.height, ystep): - filter_type = raw[source_offset] - source_offset += 1 - scanline = raw[source_offset:source_offset+row_size] - source_offset += row_size - recon = self.undo_filter(filter_type, scanline, recon) - # Convert so that there is one element per pixel value - flat = self.serialtoflat(recon, ppr) - if xstep == 1: - assert xstart == 0 - offset = y * vpr - a[offset:offset+vpr] = flat - else: - offset = y * vpr + xstart * self.planes - end_offset = (y+1) * vpr - skip = self.planes * xstep - for i in range(self.planes): - a[offset+i:end_offset:skip] = \ - flat[i::self.planes] - return a - - def iterboxed(self, rows): - """Iterator that yields each scanline in boxed row flat pixel - format. `rows` should be an iterator that yields the bytes of - each row in turn. - """ - - def asvalues(raw): - """Convert a row of raw bytes into a flat row. Result may - or may not share with argument""" - - if self.bitdepth == 8: - return raw - if self.bitdepth == 16: - raw = tostring(raw) - return array('H', struct.unpack('!%dH' % (len(raw)//2), raw)) - assert self.bitdepth < 8 - width = self.width - # Samples per byte - spb = 8//self.bitdepth - out = array('B') - mask = 2**self.bitdepth - 1 - shifts = map(self.bitdepth.__mul__, reversed(range(spb))) - for o in raw: - out.extend(map(lambda i: mask&(o>>i), shifts)) - return out[:width] - - return imap_(asvalues, rows) - - def serialtoflat(self, bytes, width=None): - """Convert serial format (byte stream) pixel data to flat row - flat pixel. - """ - - if self.bitdepth == 8: - return bytes - if self.bitdepth == 16: - bytes = tostring(bytes) - return array('H', - struct.unpack('!%dH' % (len(bytes)//2), bytes)) - assert self.bitdepth < 8 - if width is None: - width = self.width - # Samples per byte - spb = 8//self.bitdepth - out = array('B') - mask = 2**self.bitdepth - 1 - shifts = map(self.bitdepth.__mul__, reversed(range(spb))) - l = width - for o in bytes: - out.extend([(mask&(o>>s)) for s in shifts][:l]) - l -= spb - if l <= 0: - l = width - return out - - def iterstraight(self, raw): - """Iterator that undoes the effect of filtering, and yields each - row in serialised format (as a sequence of bytes). Assumes input - is straightlaced. `raw` should be an iterable that yields the - raw bytes in chunks of arbitrary size.""" - - # length of row, in bytes - rb = self.row_bytes - a = array('B') - # The previous (reconstructed) scanline. None indicates first - # line of image. - recon = None - for some in raw: - a.extend(some) - while len(a) >= rb + 1: - filter_type = a[0] - scanline = a[1:rb+1] - del a[:rb+1] - recon = self.undo_filter(filter_type, scanline, recon) - yield recon - if len(a) != 0: - # :file:format We get here with a file format error: when the - # available bytes (after decompressing) do not pack into exact - # rows. - raise FormatError( - 'Wrong size for decompressed IDAT chunk.') - assert len(a) == 0 - - def validate_signature(self): - """If signature (header) has not been read then read and - validate it; otherwise do nothing. - """ - - if self.signature: - return - self.signature = self.file.read(8) - if self.signature != _signature: - raise FormatError("PNG file has invalid signature.") - - def preamble(self): - """ - Extract the image metadata by reading the initial part of the PNG - file up to the start of the ``IDAT`` chunk. All the chunks that - precede the ``IDAT`` chunk are read and either processed for - metadata or discarded. - """ - - self.validate_signature() - - while True: - if not self.atchunk: - self.atchunk = self.chunklentype() - if self.atchunk is None: - raise FormatError( - 'This PNG file has no IDAT chunks.') - if self.atchunk[1] == 'IDAT': - return - self.process_chunk() - - def chunklentype(self): - """Reads just enough of the input to determine the next - chunk's length and type, returned as a (*length*, *type*) pair - where *type* is a string. If there are no more chunks, ``None`` - is returned. - """ - - x = self.file.read(8) - if not x: - return None - if len(x) != 8: - raise FormatError( - 'End of file whilst reading chunk length and type.') - length,type = struct.unpack('!I4s', x) - type = bytestostr(type) - if length > 2**31-1: - raise FormatError('Chunk %s is too large: %d.' % (type,length)) - return length,type - - def process_chunk(self): - """Process the next chunk and its data. This only processes the - following chunk types, all others are ignored: ``IHDR``, - ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``. - """ - - type, data = self.chunk() - if type == 'IHDR': - # http://www.w3.org/TR/PNG/#11IHDR - if len(data) != 13: - raise FormatError('IHDR chunk has incorrect length.') - (self.width, self.height, self.bitdepth, self.color_type, - self.compression, self.filter, - self.interlace) = struct.unpack("!2I5B", data) - - # Check that the header specifies only valid combinations. - if self.bitdepth not in (1,2,4,8,16): - raise Error("invalid bit depth %d" % self.bitdepth) - if self.color_type not in (0,2,3,4,6): - raise Error("invalid colour type %d" % self.color_type) - # Check indexed (palettized) images have 8 or fewer bits - # per pixel; check only indexed or greyscale images have - # fewer than 8 bits per pixel. - if ((self.color_type & 1 and self.bitdepth > 8) or - (self.bitdepth < 8 and self.color_type not in (0,3))): - raise FormatError("Illegal combination of bit depth (%d)" - " and colour type (%d)." - " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ." - % (self.bitdepth, self.color_type)) - if self.compression != 0: - raise Error("unknown compression method %d" % self.compression) - if self.filter != 0: - raise FormatError("Unknown filter method %d," - " see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ." - % self.filter) - if self.interlace not in (0,1): - raise FormatError("Unknown interlace method %d," - " see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ." - % self.interlace) - - # Derived values - # http://www.w3.org/TR/PNG/#6Colour-values - colormap = bool(self.color_type & 1) - greyscale = not (self.color_type & 2) - alpha = bool(self.color_type & 4) - color_planes = (3,1)[greyscale or colormap] - planes = color_planes + alpha - - self.colormap = colormap - self.greyscale = greyscale - self.alpha = alpha - self.color_planes = color_planes - self.planes = planes - self.psize = float(self.bitdepth)/float(8) * planes - if int(self.psize) == self.psize: - self.psize = int(self.psize) - self.row_bytes = int(math.ceil(self.width * self.psize)) - # Stores PLTE chunk if present, and is used to check - # chunk ordering constraints. - self.plte = None - # Stores tRNS chunk if present, and is used to check chunk - # ordering constraints. - self.trns = None - # Stores sbit chunk if present. - self.sbit = None - elif type == 'PLTE': - # http://www.w3.org/TR/PNG/#11PLTE - if self.plte: - warnings.warn("Multiple PLTE chunks present.") - self.plte = data - if len(data) % 3 != 0: - raise FormatError( - "PLTE chunk's length should be a multiple of 3.") - if len(data) > (2**self.bitdepth)*3: - raise FormatError("PLTE chunk is too long.") - if len(data) == 0: - raise FormatError("Empty PLTE is not allowed.") - elif type == 'bKGD': - try: - if self.colormap: - if not self.plte: - warnings.warn( - "PLTE chunk is required before bKGD chunk.") - self.background = struct.unpack('B', data) - else: - self.background = struct.unpack("!%dH" % self.color_planes, - data) - except struct.error: - raise FormatError("bKGD chunk has incorrect length.") - elif type == 'tRNS': - # http://www.w3.org/TR/PNG/#11tRNS - self.trns = data - if self.colormap: - if not self.plte: - warnings.warn("PLTE chunk is required before tRNS chunk.") - else: - if len(data) > len(self.plte)/3: - # Was warning, but promoted to Error as it - # would otherwise cause pain later on. - raise FormatError("tRNS chunk is too long.") - else: - if self.alpha: - raise FormatError( - "tRNS chunk is not valid with colour type %d." % - self.color_type) - try: - self.transparent = \ - struct.unpack("!%dH" % self.color_planes, data) - except struct.error: - raise FormatError("tRNS chunk has incorrect length.") - elif type == 'gAMA': - try: - self.gamma = struct.unpack("!L", data)[0] / 100000.0 - except struct.error: - raise FormatError("gAMA chunk has incorrect length.") - elif type == 'sBIT': - self.sbit = data - if (self.colormap and len(data) != 3 or - not self.colormap and len(data) != self.planes): - raise FormatError("sBIT chunk has incorrect length.") - - def read(self): - """ - Read the PNG file and decode it. Returns (`width`, `height`, - `pixels`, `metadata`). - - May use excessive memory. - - `pixels` are returned in boxed row flat pixel format. - """ - - def iteridat(): - """Iterator that yields all the ``IDAT`` chunks as strings.""" - while True: - try: - type, data = self.chunk() - except ValueError: - e = geterror() - raise ChunkError(e.args[0]) - if type == 'IEND': - # http://www.w3.org/TR/PNG/#11IEND - break - if type != 'IDAT': - continue - # type == 'IDAT' - # http://www.w3.org/TR/PNG/#11IDAT - if self.colormap and not self.plte: - warnings.warn("PLTE chunk is required before IDAT chunk") - yield data - - def iterdecomp(idat): - """Iterator that yields decompressed strings. `idat` should - be an iterator that yields the ``IDAT`` chunk data. - """ - - # Currently, with no max_length paramter to decompress, this - # routine will do one yield per IDAT chunk. So not very - # incremental. - d = zlib.decompressobj() - # Each IDAT chunk is passed to the decompressor, then any - # remaining state is decompressed out. - for data in idat: - # :todo: add a max_length argument here to limit output - # size. - yield array('B', d.decompress(data)) - yield array('B', d.flush()) - - self.preamble() - raw = iterdecomp(iteridat()) - - if self.interlace: - raw = array('B', itertools.chain(*raw)) - arraycode = 'BH'[self.bitdepth>8] - # Like :meth:`group` but producing an array.array object for - # each row. - pixels = imap_(lambda *row: array(arraycode, row), - *[iter(self.deinterlace(raw))]*self.width*self.planes) - else: - pixels = self.iterboxed(self.iterstraight(raw)) - meta = dict() - for attr in 'greyscale alpha planes bitdepth interlace'.split(): - meta[attr] = getattr(self, attr) - meta['size'] = (self.width, self.height) - for attr in 'gamma transparent background'.split(): - a = getattr(self, attr, None) - if a is not None: - meta[attr] = a - return self.width, self.height, pixels, meta - - - def read_flat(self): - """ - Read a PNG file and decode it into flat row flat pixel format. - Returns (*width*, *height*, *pixels*, *metadata*). - - May use excessive memory. - - `pixels` are returned in flat row flat pixel format. - - See also the :meth:`read` method which returns pixels in the - more stream-friendly boxed row flat pixel format. - """ - - x, y, pixel, meta = self.read() - arraycode = 'BH'[meta['bitdepth']>8] - pixel = array(arraycode, itertools.chain(*pixel)) - return x, y, pixel, meta - - def palette(self, alpha='natural'): - """Returns a palette that is a sequence of 3-tuples or 4-tuples, - synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These - chunks should have already been processed (for example, by - calling the :meth:`preamble` method). All the tuples are the - same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when - there is a ``tRNS`` chunk. Assumes that the image is colour type - 3 and therefore a ``PLTE`` chunk is required. - - If the `alpha` argument is ``'force'`` then an alpha channel is - always added, forcing the result to be a sequence of 4-tuples. - """ - - if not self.plte: - raise FormatError( - "Required PLTE chunk is missing in colour type 3 image.") - plte = group(array('B', self.plte), 3) - if self.trns or alpha == 'force': - trns = array('B', self.trns or '') - trns.extend([255]*(len(plte)-len(trns))) - plte = map(operator.add, plte, group(trns, 1)) - return plte - - def asDirect(self): - """Returns the image data as a direct representation of an - ``x * y * planes`` array. This method is intended to remove the - need for callers to deal with palettes and transparency - themselves. Images with a palette (colour type 3) - are converted to RGB or RGBA; images with transparency (a - ``tRNS`` chunk) are converted to LA or RGBA as appropriate. - When returned in this format the pixel values represent the - colour value directly without needing to refer to palettes or - transparency information. - - Like the :meth:`read` method this method returns a 4-tuple: - - (*width*, *height*, *pixels*, *meta*) - - This method normally returns pixel values with the bit depth - they have in the source image, but when the source PNG has an - ``sBIT`` chunk it is inspected and can reduce the bit depth of - the result pixels; pixel values will be reduced according to - the bit depth specified in the ``sBIT`` chunk (PNG nerds should - note a single result bit depth is used for all channels; the - maximum of the ones specified in the ``sBIT`` chunk. An RGB565 - image will be rescaled to 6-bit RGB666). - - The *meta* dictionary that is returned reflects the `direct` - format and not the original source image. For example, an RGB - source image with a ``tRNS`` chunk to represent a transparent - colour, will have ``planes=3`` and ``alpha=False`` for the - source image, but the *meta* dictionary returned by this method - will have ``planes=4`` and ``alpha=True`` because an alpha - channel is synthesized and added. - - *pixels* is the pixel data in boxed row flat pixel format (just - like the :meth:`read` method). - - All the other aspects of the image data are not changed. - """ - - self.preamble() - - # Simple case, no conversion necessary. - if not self.colormap and not self.trns and not self.sbit: - return self.read() - - x,y,pixels,meta = self.read() - - if self.colormap: - meta['colormap'] = False - meta['alpha'] = bool(self.trns) - meta['bitdepth'] = 8 - meta['planes'] = 3 + bool(self.trns) - plte = self.palette() - def iterpal(pixels): - for row in pixels: - row = map(plte.__getitem__, row) - yield array('B', itertools.chain(*row)) - pixels = iterpal(pixels) - elif self.trns: - # It would be nice if there was some reasonable way of doing - # this without generating a whole load of intermediate tuples. - # But tuples does seem like the easiest way, with no other way - # clearly much simpler or much faster. (Actually, the L to LA - # conversion could perhaps go faster (all those 1-tuples!), but - # I still wonder whether the code proliferation is worth it) - it = self.transparent - maxval = 2**meta['bitdepth']-1 - planes = meta['planes'] - meta['alpha'] = True - meta['planes'] += 1 - typecode = 'BH'[meta['bitdepth']>8] - def itertrns(pixels): - for row in pixels: - # For each row we group it into pixels, then form a - # characterisation vector that says whether each pixel - # is opaque or not. Then we convert True/False to - # 0/maxval (by multiplication), and add it as the extra - # channel. - row = group(row, planes) - opa = map(it.__ne__, row) - opa = map(maxval.__mul__, opa) - opa = zip(opa) # convert to 1-tuples - yield array(typecode, - itertools.chain(*map(operator.add, row, opa))) - pixels = itertrns(pixels) - targetbitdepth = None - if self.sbit: - sbit = struct.unpack('%dB' % len(self.sbit), self.sbit) - targetbitdepth = max(sbit) - if targetbitdepth > meta['bitdepth']: - raise Error('sBIT chunk %r exceeds bitdepth %d' % - (sbit,self.bitdepth)) - if min(sbit) <= 0: - raise Error('sBIT chunk %r has a 0-entry' % sbit) - if targetbitdepth == meta['bitdepth']: - targetbitdepth = None - if targetbitdepth: - shift = meta['bitdepth'] - targetbitdepth - meta['bitdepth'] = targetbitdepth - def itershift(pixels): - for row in pixels: - yield map(shift.__rrshift__, row) - pixels = itershift(pixels) - return x,y,pixels,meta - - def asFloat(self, maxval=1.0): - """Return image pixels as per :meth:`asDirect` method, but scale - all pixel values to be floating point values between 0.0 and - *maxval*. - """ - - x,y,pixels,info = self.asDirect() - sourcemaxval = 2**info['bitdepth']-1 - del info['bitdepth'] - info['maxval'] = float(maxval) - factor = float(maxval)/float(sourcemaxval) - def iterfloat(): - for row in pixels: - yield map(factor.__mul__, row) - return x,y,iterfloat(),info - - def _as_rescale(self, get, targetbitdepth): - """Helper used by :meth:`asRGB8` and :meth:`asRGBA8`.""" - - width,height,pixels,meta = get() - maxval = 2**meta['bitdepth'] - 1 - targetmaxval = 2**targetbitdepth - 1 - factor = float(targetmaxval) / float(maxval) - meta['bitdepth'] = targetbitdepth - def iterscale(): - for row in pixels: - yield map(lambda x: int(round(x*factor)), row) - return width, height, iterscale(), meta - - def asRGB8(self): - """Return the image data as an RGB pixels with 8-bits per - sample. This is like the :meth:`asRGB` method except that - this method additionally rescales the values so that they - are all between 0 and 255 (8-bit). In the case where the - source image has a bit depth < 8 the transformation preserves - all the information; where the source image has bit depth - > 8, then rescaling to 8-bit values loses precision. No - dithering is performed. Like :meth:`asRGB`, an alpha channel - in the source image will raise an exception. - - This function returns a 4-tuple: - (*width*, *height*, *pixels*, *metadata*). - *width*, *height*, *metadata* are as per the :meth:`read` method. - - *pixels* is the pixel data in boxed row flat pixel format. - """ - - return self._as_rescale(self.asRGB, 8) - - def asRGBA8(self): - """Return the image data as RGBA pixels with 8-bits per - sample. This method is similar to :meth:`asRGB8` and - :meth:`asRGBA`: The result pixels have an alpha channel, *and* - values are rescaled to the range 0 to 255. The alpha channel is - synthesized if necessary (with a small speed penalty). - """ - - return self._as_rescale(self.asRGBA, 8) - - def asRGB(self): - """Return image as RGB pixels. RGB colour images are passed - through unchanged; greyscales are expanded into RGB - triplets (there is a small speed overhead for doing this). - - An alpha channel in the source image will raise an - exception. - - The return values are as for the :meth:`read` method - except that the *metadata* reflect the returned pixels, not the - source image. In particular, for this method - ``metadata['greyscale']`` will be ``False``. - """ - - width,height,pixels,meta = self.asDirect() - if meta['alpha']: - raise Error("will not convert image with alpha channel to RGB") - if not meta['greyscale']: - return width,height,pixels,meta - meta['greyscale'] = False - typecode = 'BH'[meta['bitdepth'] > 8] - def iterrgb(): - for row in pixels: - a = array(typecode, [0]) * 3 * width - for i in range(3): - a[i::3] = row - yield a - return width,height,iterrgb(),meta - - def asRGBA(self): - """Return image as RGBA pixels. Greyscales are expanded into - RGB triplets; an alpha channel is synthesized if necessary. - The return values are as for the :meth:`read` method - except that the *metadata* reflect the returned pixels, not the - source image. In particular, for this method - ``metadata['greyscale']`` will be ``False``, and - ``metadata['alpha']`` will be ``True``. - """ - - width,height,pixels,meta = self.asDirect() - if meta['alpha'] and not meta['greyscale']: - return width,height,pixels,meta - typecode = 'BH'[meta['bitdepth'] > 8] - maxval = 2**meta['bitdepth'] - 1 - def newarray(): - return array(typecode, [0]) * 4 * width - if meta['alpha'] and meta['greyscale']: - # LA to RGBA - def convert(): - for row in pixels: - # Create a fresh target row, then copy L channel - # into first three target channels, and A channel - # into fourth channel. - a = newarray() - for i in range(3): - a[i::4] = row[0::2] - a[3::4] = row[1::2] - yield a - elif meta['greyscale']: - # L to RGBA - def convert(): - for row in pixels: - a = newarray() - for i in range(3): - a[i::4] = row - a[3::4] = array(typecode, [maxval]) * width - yield a - else: - assert not meta['alpha'] and not meta['greyscale'] - # RGB to RGBA - def convert(): - for row in pixels: - a = newarray() - for i in range(3): - a[i::4] = row[i::3] - a[3::4] = array(typecode, [maxval]) * width - yield a - meta['alpha'] = True - meta['greyscale'] = False - return width,height,convert(),meta - - -# === Internal Test Support === - -# This section comprises the tests that are internally validated (as -# opposed to tests which produce output files that are externally -# validated). Primarily they are unittests. - -# Note that it is difficult to internally validate the results of -# writing a PNG file. The only thing we can do is read it back in -# again, which merely checks consistency, not that the PNG file we -# produce is valid. - -# Run the tests from the command line: -# python -c 'import png;png.test()' - -# (For an in-memory binary file IO object) We use BytesIO where -# available, otherwise we use StringIO, but name it BytesIO. -try: - from io import BytesIO -except: - from StringIO import StringIO as BytesIO -import tempfile -import unittest - - -def test(): - unittest.main(__name__) - -def topngbytes(name, rows, x, y, **k): - """Convenience function for creating a PNG file "in memory" as a - string. Creates a :class:`Writer` instance using the keyword arguments, - then passes `rows` to its :meth:`Writer.write` method. The resulting - PNG file is returned as a string. `name` is used to identify the file for - debugging. - """ - - import os - - print (name) - f = BytesIO() - w = Writer(x, y, **k) - w.write(f, rows) - if os.environ.get('PYPNG_TEST_TMP'): - w = open(name, 'wb') - w.write(f.getvalue()) - w.close() - return f.getvalue() - -def testWithIO(inp, out, f): - """Calls the function `f` with ``sys.stdin`` changed to `inp` - and ``sys.stdout`` changed to `out`. They are restored when `f` - returns. This function returns whatever `f` returns. - """ - - import os - - try: - oldin,sys.stdin = sys.stdin,inp - oldout,sys.stdout = sys.stdout,out - x = f() - finally: - sys.stdin = oldin - sys.stdout = oldout - if os.environ.get('PYPNG_TEST_TMP') and hasattr(out,'getvalue'): - name = mycallersname() - if name: - w = open(name+'.png', 'wb') - w.write(out.getvalue()) - w.close() - return x - -def mycallersname(): - """Returns the name of the caller of the caller of this function - (hence the name of the caller of the function in which - "mycallersname()" textually appears). Returns None if this cannot - be determined.""" - - # http://docs.python.org/library/inspect.html#the-interpreter-stack - import inspect - - frame = inspect.currentframe() - if not frame: - return None - frame_,filename_,lineno_,funname,linelist_,listi_ = ( - inspect.getouterframes(frame)[2]) - return funname - -def seqtobytes(s): - """Convert a sequence of integers to a *bytes* instance. Good for - plastering over Python 2 / Python 3 cracks. - """ - - return strtobytes(''.join(chr(x) for x in s)) - -class Test(unittest.TestCase): - # This member is used by the superclass. If we don't define a new - # class here then when we use self.assertRaises() and the PyPNG code - # raises an assertion then we get no proper traceback. I can't work - # out why, but defining a new class here means we get a proper - # traceback. - class failureException(Exception): - pass - - def helperLN(self, n): - mask = (1 << n) - 1 - # Use small chunk_limit so that multiple chunk writing is - # tested. Making it a test for Issue 20. - w = Writer(15, 17, greyscale=True, bitdepth=n, chunk_limit=99) - f = BytesIO() - w.write_array(f, array('B', map(mask.__and__, range(1, 256)))) - r = Reader(bytes=f.getvalue()) - x,y,pixels,meta = r.read() - self.assertEqual(x, 15) - self.assertEqual(y, 17) - self.assertEqual(list(itertools.chain(*pixels)), - map(mask.__and__, range(1,256))) - def testL8(self): - return self.helperLN(8) - def testL4(self): - return self.helperLN(4) - def testL2(self): - "Also tests asRGB8." - w = Writer(1, 4, greyscale=True, bitdepth=2) - f = BytesIO() - w.write_array(f, array('B', range(4))) - r = Reader(bytes=f.getvalue()) - x,y,pixels,meta = r.asRGB8() - self.assertEqual(x, 1) - self.assertEqual(y, 4) - for i,row in enumerate(pixels): - self.assertEqual(len(row), 3) - self.assertEqual(list(row), [0x55*i]*3) - def testP2(self): - "2-bit palette." - a = (255,255,255) - b = (200,120,120) - c = (50,99,50) - w = Writer(1, 4, bitdepth=2, palette=[a,b,c]) - f = BytesIO() - w.write_array(f, array('B', (0,1,1,2))) - r = Reader(bytes=f.getvalue()) - x,y,pixels,meta = r.asRGB8() - self.assertEqual(x, 1) - self.assertEqual(y, 4) - self.assertEqual(list(pixels), map(list, [a, b, b, c])) - def testPtrns(self): - "Test colour type 3 and tRNS chunk (and 4-bit palette)." - a = (50,99,50,50) - b = (200,120,120,80) - c = (255,255,255) - d = (200,120,120) - e = (50,99,50) - w = Writer(3, 3, bitdepth=4, palette=[a,b,c,d,e]) - f = BytesIO() - w.write_array(f, array('B', (4, 3, 2, 3, 2, 0, 2, 0, 1))) - r = Reader(bytes=f.getvalue()) - x,y,pixels,meta = r.asRGBA8() - self.assertEqual(x, 3) - self.assertEqual(y, 3) - c = c+(255,) - d = d+(255,) - e = e+(255,) - boxed = [(e,d,c),(d,c,a),(c,a,b)] - flat = map(lambda row: itertools.chain(*row), boxed) - self.assertEqual(map(list, pixels), map(list, flat)) - def testRGBtoRGBA(self): - "asRGBA8() on colour type 2 source.""" - # Test for Issue 26 - r = Reader(bytes=_pngsuite['basn2c08']) - x,y,pixels,meta = r.asRGBA8() - # Test the pixels at row 9 columns 0 and 1. - row9 = list(pixels)[9] - self.assertEqual(row9[0:8], - [0xff, 0xdf, 0xff, 0xff, 0xff, 0xde, 0xff, 0xff]) - def testLtoRGBA(self): - "asRGBA() on grey source.""" - # Test for Issue 60 - r = Reader(bytes=_pngsuite['basi0g08']) - x,y,pixels,meta = r.asRGBA() - row9 = list(list(pixels)[9]) - self.assertEqual(row9[0:8], - [222, 222, 222, 255, 221, 221, 221, 255]) - def testCtrns(self): - "Test colour type 2 and tRNS chunk." - # Test for Issue 25 - r = Reader(bytes=_pngsuite['tbrn2c08']) - x,y,pixels,meta = r.asRGBA8() - # I just happen to know that the first pixel is transparent. - # In particular it should be #7f7f7f00 - row0 = list(pixels)[0] - self.assertEqual(tuple(row0[0:4]), (0x7f, 0x7f, 0x7f, 0x00)) - def testAdam7read(self): - """Adam7 interlace reading. - Specifically, test that for images in the PngSuite that - have both an interlaced and straightlaced pair that both - images from the pair produce the same array of pixels.""" - for candidate in _pngsuite: - if not candidate.startswith('basn'): - continue - candi = candidate.replace('n', 'i') - if candi not in _pngsuite: - continue - print ('adam7 read %s' % (candidate,)) - straight = Reader(bytes=_pngsuite[candidate]) - adam7 = Reader(bytes=_pngsuite[candi]) - # Just compare the pixels. Ignore x,y (because they're - # likely to be correct?); metadata is ignored because the - # "interlace" member differs. Lame. - straight = straight.read()[2] - adam7 = adam7.read()[2] - self.assertEqual(map(list, straight), map(list, adam7)) - def testAdam7write(self): - """Adam7 interlace writing. - For each test image in the PngSuite, write an interlaced - and a straightlaced version. Decode both, and compare results. - """ - # Not such a great test, because the only way we can check what - # we have written is to read it back again. - - for name,bytes in _pngsuite.items(): - # Only certain colour types supported for this test. - if name[3:5] not in ['n0', 'n2', 'n4', 'n6']: - continue - it = Reader(bytes=bytes) - x,y,pixels,meta = it.read() - pngi = topngbytes('adam7wn'+name+'.png', pixels, - x=x, y=y, bitdepth=it.bitdepth, - greyscale=it.greyscale, alpha=it.alpha, - transparent=it.transparent, - interlace=False) - x,y,ps,meta = Reader(bytes=pngi).read() - it = Reader(bytes=bytes) - x,y,pixels,meta = it.read() - pngs = topngbytes('adam7wi'+name+'.png', pixels, - x=x, y=y, bitdepth=it.bitdepth, - greyscale=it.greyscale, alpha=it.alpha, - transparent=it.transparent, - interlace=True) - x,y,pi,meta = Reader(bytes=pngs).read() - self.assertEqual(map(list, ps), map(list, pi)) - def testPGMin(self): - """Test that the command line tool can read PGM files.""" - def do(): - return _main(['testPGMin']) - s = BytesIO() - s.write(strtobytes('P5 2 2 3\n')) - s.write(strtobytes('\x00\x01\x02\x03')) - s.flush() - s.seek(0) - o = BytesIO() - testWithIO(s, o, do) - r = Reader(bytes=o.getvalue()) - x,y,pixels,meta = r.read() - self.assertTrue(r.greyscale) - self.assertEqual(r.bitdepth, 2) - def testPAMin(self): - """Test that the command line tool can read PAM file.""" - def do(): - return _main(['testPAMin']) - s = BytesIO() - s.write(strtobytes('P7\nWIDTH 3\nHEIGHT 1\nDEPTH 4\nMAXVAL 255\n' - 'TUPLTYPE RGB_ALPHA\nENDHDR\n')) - # The pixels in flat row flat pixel format - flat = [255,0,0,255, 0,255,0,120, 0,0,255,30] - asbytes = seqtobytes(flat) - s.write(asbytes) - s.flush() - s.seek(0) - o = BytesIO() - testWithIO(s, o, do) - r = Reader(bytes=o.getvalue()) - x,y,pixels,meta = r.read() - self.assertTrue(r.alpha) - self.assertTrue(not r.greyscale) - self.assertEqual(list(itertools.chain(*pixels)), flat) - def testLA4(self): - """Create an LA image with bitdepth 4.""" - bytes = topngbytes('la4.png', [[5, 12]], 1, 1, - greyscale=True, alpha=True, bitdepth=4) - sbit = Reader(bytes=bytes).chunk('sBIT')[1] - self.assertEqual(sbit, strtobytes('\x04\x04')) - def testPNMsbit(self): - """Test that PNM files can generates sBIT chunk.""" - def do(): - return _main(['testPNMsbit']) - s = BytesIO() - s.write(strtobytes('P6 8 1 1\n')) - for pixel in range(8): - s.write(struct.pack(' 255: - a = array('H') - else: - a = array('B') - fw = float(width) - fh = float(height) - pfun = test_patterns[pattern] - for y in range(height): - fy = float(y)/fh - for x in range(width): - a.append(int(round(pfun(float(x)/fw, fy) * maxval))) - return a - - def test_rgba(size=256, bitdepth=8, - red="GTB", green="GLR", blue="RTL", alpha=None): - """ - Create a test image. Each channel is generated from the - specified pattern; any channel apart from red can be set to - None, which will cause it not to be in the image. It - is possible to create all PNG channel types (L, RGB, LA, RGBA), - as well as non PNG channel types (RGA, and so on). - """ - - i = test_pattern(size, size, bitdepth, red) - psize = 1 - for channel in (green, blue, alpha): - if channel: - c = test_pattern(size, size, bitdepth, channel) - i = interleave_planes(i, c, psize, 1) - psize += 1 - return i - - def pngsuite_image(name): - """ - Create a test image by reading an internal copy of the files - from the PngSuite. Returned in flat row flat pixel format. - """ - - if name not in _pngsuite: - raise NotImplementedError("cannot find PngSuite file %s (use -L for a list)" % name) - r = Reader(bytes=_pngsuite[name]) - w,h,pixels,meta = r.asDirect() - assert w == h - # LAn for n < 8 is a special case for which we need to rescale - # the data. - if meta['greyscale'] and meta['alpha'] and meta['bitdepth'] < 8: - factor = 255 // (2**meta['bitdepth']-1) - def rescale(data): - for row in data: - yield map(factor.__mul__, row) - pixels = rescale(pixels) - meta['bitdepth'] = 8 - arraycode = 'BH'[meta['bitdepth']>8] - return w, array(arraycode, itertools.chain(*pixels)), meta - - # The body of test_suite() - size = 256 - if options.test_size: - size = options.test_size - options.bitdepth = options.test_depth - options.greyscale=bool(options.test_black) - - kwargs = {} - if options.test_red: - kwargs["red"] = options.test_red - if options.test_green: - kwargs["green"] = options.test_green - if options.test_blue: - kwargs["blue"] = options.test_blue - if options.test_alpha: - kwargs["alpha"] = options.test_alpha - if options.greyscale: - if options.test_red or options.test_green or options.test_blue: - raise ValueError("cannot specify colours (R, G, B) when greyscale image (black channel, K) is specified") - kwargs["red"] = options.test_black - kwargs["green"] = None - kwargs["blue"] = None - options.alpha = bool(options.test_alpha) - if not args: - pixels = test_rgba(size, options.bitdepth, **kwargs) - else: - size,pixels,meta = pngsuite_image(args[0]) - for k in ['bitdepth', 'alpha', 'greyscale']: - setattr(options, k, meta[k]) - - writer = Writer(size, size, - bitdepth=options.bitdepth, - transparent=options.transparent, - background=options.background, - gamma=options.gamma, - greyscale=options.greyscale, - alpha=options.alpha, - compression=options.compression, - interlace=options.interlace) - writer.write_array(sys.stdout, pixels) - -def read_pam_header(infile): - """ - Read (the rest of a) PAM header. `infile` should be positioned - immediately after the initial 'P7' line (at the beginning of the - second line). Returns are as for `read_pnm_header`. - """ - - # Unlike PBM, PGM, and PPM, we can read the header a line at a time. - header = dict() - while True: - l = infile.readline().strip() - if l == strtobytes('ENDHDR'): - break - if not l: - raise EOFError('PAM ended prematurely') - if l[0] == strtobytes('#'): - continue - l = l.split(None, 1) - if l[0] not in header: - header[l[0]] = l[1] - else: - header[l[0]] += strtobytes(' ') + l[1] - - required = ['WIDTH', 'HEIGHT', 'DEPTH', 'MAXVAL'] - required = [strtobytes(x) for x in required] - WIDTH,HEIGHT,DEPTH,MAXVAL = required - present = [x for x in required if x in header] - if len(present) != len(required): - raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL') - width = int(header[WIDTH]) - height = int(header[HEIGHT]) - depth = int(header[DEPTH]) - maxval = int(header[MAXVAL]) - if (width <= 0 or - height <= 0 or - depth <= 0 or - maxval <= 0): - raise Error( - 'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers') - return 'P7', width, height, depth, maxval - -def read_pnm_header(infile, supported=('P5','P6')): - """ - Read a PNM header, returning (format,width,height,depth,maxval). - `width` and `height` are in pixels. `depth` is the number of - channels in the image; for PBM and PGM it is synthesized as 1, for - PPM as 3; for PAM images it is read from the header. `maxval` is - synthesized (as 1) for PBM images. - """ - - # Generally, see http://netpbm.sourceforge.net/doc/ppm.html - # and http://netpbm.sourceforge.net/doc/pam.html - - supported = [strtobytes(x) for x in supported] - - # Technically 'P7' must be followed by a newline, so by using - # rstrip() we are being liberal in what we accept. I think this - # is acceptable. - type = infile.read(3).rstrip() - if type not in supported: - raise NotImplementedError('file format %s not supported' % type) - if type == strtobytes('P7'): - # PAM header parsing is completely different. - return read_pam_header(infile) - # Expected number of tokens in header (3 for P4, 4 for P6) - expected = 4 - pbm = ('P1', 'P4') - if type in pbm: - expected = 3 - header = [type] - - # We have to read the rest of the header byte by byte because the - # final whitespace character (immediately following the MAXVAL in - # the case of P6) may not be a newline. Of course all PNM files in - # the wild use a newline at this point, so it's tempting to use - # readline; but it would be wrong. - def getc(): - c = infile.read(1) - if not c: - raise Error('premature EOF reading PNM header') - return c - - c = getc() - while True: - # Skip whitespace that precedes a token. - while c.isspace(): - c = getc() - # Skip comments. - while c == '#': - while c not in '\n\r': - c = getc() - if not c.isdigit(): - raise Error('unexpected character %s found in header' % c) - # According to the specification it is legal to have comments - # that appear in the middle of a token. - # This is bonkers; I've never seen it; and it's a bit awkward to - # code good lexers in Python (no goto). So we break on such - # cases. - token = strtobytes('') - while c.isdigit(): - token += c - c = getc() - # Slight hack. All "tokens" are decimal integers, so convert - # them here. - header.append(int(token)) - if len(header) == expected: - break - # Skip comments (again) - while c == '#': - while c not in '\n\r': - c = getc() - if not c.isspace(): - raise Error('expected header to end with whitespace, not %s' % c) - - if type in pbm: - # synthesize a MAXVAL - header.append(1) - depth = (1,3)[type == strtobytes('P6')] - return header[0], header[1], header[2], depth, header[3] - -def write_pnm(file, width, height, pixels, meta): - """Write a Netpbm PNM/PAM file.""" - - bitdepth = meta['bitdepth'] - maxval = 2**bitdepth - 1 - # Rudely, the number of image planes can be used to determine - # whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM). - planes = meta['planes'] - # Can be an assert as long as we assume that pixels and meta came - # from a PNG file. - assert planes in (1,2,3,4) - if planes in (1,3): - if 1 == planes: - # PGM - # Could generate PBM if maxval is 1, but we don't (for one - # thing, we'd have to convert the data, not just blat it - # out). - fmt = 'P5' - else: - # PPM - fmt = 'P6' - file.write('%s %d %d %d\n' % (fmt, width, height, maxval)) - if planes in (2,4): - # PAM - # See http://netpbm.sourceforge.net/doc/pam.html - if 2 == planes: - tupltype = 'GRAYSCALE_ALPHA' - else: - tupltype = 'RGB_ALPHA' - file.write('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n' - 'TUPLTYPE %s\nENDHDR\n' % - (width, height, planes, maxval, tupltype)) - # Values per row - vpr = planes * width - # struct format - fmt = '>%d' % vpr - if maxval > 0xff: - fmt = fmt + 'H' - else: - fmt = fmt + 'B' - for row in pixels: - file.write(struct.pack(fmt, *row)) - file.flush() - -def color_triple(color): - """ - Convert a command line colour value to a RGB triple of integers. - FIXME: Somewhere we need support for greyscale backgrounds etc. - """ - if color.startswith('#') and len(color) == 4: - return (int(color[1], 16), - int(color[2], 16), - int(color[3], 16)) - if color.startswith('#') and len(color) == 7: - return (int(color[1:3], 16), - int(color[3:5], 16), - int(color[5:7], 16)) - elif color.startswith('#') and len(color) == 13: - return (int(color[1:5], 16), - int(color[5:9], 16), - int(color[9:13], 16)) - - -def _main(argv): - """ - Run the PNG encoder with options from the command line. - """ - - # Parse command line arguments - from optparse import OptionParser - import re - version = '%prog ' + re.sub(r'( ?\$|URL: |Rev:)', '', __version__) - parser = OptionParser(version=version) - parser.set_usage("%prog [options] [imagefile]") - parser.add_option('-r', '--read-png', default=False, - action='store_true', - help='Read PNG, write PNM') - parser.add_option("-i", "--interlace", - default=False, action="store_true", - help="create an interlaced PNG file (Adam7)") - parser.add_option("-t", "--transparent", - action="store", type="string", metavar="color", - help="mark the specified colour (#RRGGBB) as transparent") - parser.add_option("-b", "--background", - action="store", type="string", metavar="color", - help="save the specified background colour") - parser.add_option("-a", "--alpha", - action="store", type="string", metavar="pgmfile", - help="alpha channel transparency (RGBA)") - parser.add_option("-g", "--gamma", - action="store", type="float", metavar="value", - help="save the specified gamma value") - parser.add_option("-c", "--compression", - action="store", type="int", metavar="level", - help="zlib compression level (0-9)") - parser.add_option("-T", "--test", - default=False, action="store_true", - help="create a test image (a named PngSuite image if an argument is supplied)") - parser.add_option('-L', '--list', - default=False, action='store_true', - help="print list of named test images") - parser.add_option("-R", "--test-red", - action="store", type="string", metavar="pattern", - help="test pattern for the red image layer") - parser.add_option("-G", "--test-green", - action="store", type="string", metavar="pattern", - help="test pattern for the green image layer") - parser.add_option("-B", "--test-blue", - action="store", type="string", metavar="pattern", - help="test pattern for the blue image layer") - parser.add_option("-A", "--test-alpha", - action="store", type="string", metavar="pattern", - help="test pattern for the alpha image layer") - parser.add_option("-K", "--test-black", - action="store", type="string", metavar="pattern", - help="test pattern for greyscale image") - parser.add_option("-d", "--test-depth", - default=8, action="store", type="int", - metavar='NBITS', - help="create test PNGs that are NBITS bits per channel") - parser.add_option("-S", "--test-size", - action="store", type="int", metavar="size", - help="width and height of the test image") - (options, args) = parser.parse_args(args=argv[1:]) - - # Convert options - if options.transparent is not None: - options.transparent = color_triple(options.transparent) - if options.background is not None: - options.background = color_triple(options.background) - - if options.list: - names = list(_pngsuite) - names.sort() - for name in names: - print (name) - return - - # Run regression tests - if options.test: - return test_suite(options, args) - - # Prepare input and output files - if len(args) == 0: - infilename = '-' - infile = sys.stdin - elif len(args) == 1: - infilename = args[0] - infile = open(infilename, 'rb') - else: - parser.error("more than one input file") - outfile = sys.stdout - - if options.read_png: - # Encode PNG to PPM - png = Reader(file=infile) - width,height,pixels,meta = png.asDirect() - write_pnm(outfile, width, height, pixels, meta) - else: - # Encode PNM to PNG - format, width, height, depth, maxval = \ - read_pnm_header(infile, ('P5','P6','P7')) - # When it comes to the variety of input formats, we do something - # rather rude. Observe that L, LA, RGB, RGBA are the 4 colour - # types supported by PNG and that they correspond to 1, 2, 3, 4 - # channels respectively. So we use the number of channels in - # the source image to determine which one we have. We do not - # care about TUPLTYPE. - greyscale = depth <= 2 - pamalpha = depth in (2,4) - supported = map(lambda x: 2**x-1, range(1,17)) - try: - mi = supported.index(maxval) - except ValueError: - raise NotImplementedError( - 'your maxval (%s) not in supported list %s' % - (maxval, str(supported))) - bitdepth = mi+1 - writer = Writer(width, height, - greyscale=greyscale, - bitdepth=bitdepth, - interlace=options.interlace, - transparent=options.transparent, - background=options.background, - alpha=bool(pamalpha or options.alpha), - gamma=options.gamma, - compression=options.compression) - if options.alpha: - pgmfile = open(options.alpha, 'rb') - format, awidth, aheight, adepth, amaxval = \ - read_pnm_header(pgmfile, 'P5') - if amaxval != '255': - raise NotImplementedError( - 'maxval %s not supported for alpha channel' % amaxval) - if (awidth, aheight) != (width, height): - raise ValueError("alpha channel image size mismatch" - " (%s has %sx%s but %s has %sx%s)" - % (infilename, width, height, - options.alpha, awidth, aheight)) - writer.convert_ppm_and_pgm(infile, pgmfile, outfile) - else: - writer.convert_pnm(infile, outfile) - - -if __name__ == '__main__': - try: - _main(sys.argv) - except Error: - e = geterror() - sys.stderr.write("%s\n" % (e,)) diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/run_tests.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/run_tests.py deleted file mode 100644 index a1261cf..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/run_tests.py +++ /dev/null @@ -1,344 +0,0 @@ -import sys - -if __name__ == '__main__': - sys.exit("This module is for import only") - -test_pkg_name = '.'.join(__name__.split('.')[0:-2]) -is_pygame_pkg = test_pkg_name == 'pygame.tests' -test_runner_mod = test_pkg_name + '.test_utils.test_runner' - -if is_pygame_pkg: - from pygame.tests.test_utils import import_submodule - from pygame.tests.test_utils.test_runner \ - import prepare_test_env, run_test, combine_results, \ - get_test_results, TEST_RESULTS_START -else: - from test.test_utils import import_submodule - from test.test_utils.test_runner \ - import prepare_test_env, run_test, combine_results, \ - get_test_results, TEST_RESULTS_START -import pygame -import pygame.threads - -import os -import re -import shutil -import tempfile -import time -import random -from pprint import pformat - -was_run = False - -def run(*args, **kwds): - """Run the Pygame unit test suite and return (total tests run, fails dict) - - Positional arguments (optional): - The names of tests to include. If omitted then all tests are run. Test - names need not include the trailing '_test'. - - Keyword arguments: - incomplete - fail incomplete tests (default False) - usesubprocess - run all test suites in the current process - (default False, use separate subprocesses) - dump - dump failures/errors as dict ready to eval (default False) - file - if provided, the name of a file into which to dump failures/errors - timings - if provided, the number of times to run each individual test to - get an average run time (default is run each test once) - exclude - A list of TAG names to exclude from the run. The items may be - comma or space separated. - show_output - show silenced stderr/stdout on errors (default False) - all - dump all results, not just errors (default False) - randomize - randomize order of tests (default False) - seed - if provided, a seed randomizer integer - multi_thread - if provided, the number of THREADS in which to run - subprocessed tests - time_out - if subprocess is True then the time limit in seconds before - killing a test (default 30) - fake - if provided, the name of the fake tests package in the - run_tests__tests subpackage to run instead of the normal - Pygame tests - python - the path to a python executable to run subprocessed tests - (default sys.executable) - interative - allow tests tagged 'interative'. - - Return value: - A tuple of total number of tests run, dictionary of error information. The - dictionary is empty if no errors were recorded. - - By default individual test modules are run in separate subprocesses. This - recreates normal Pygame usage where pygame.init() and pygame.quit() are - called only once per program execution, and avoids unfortunate - interactions between test modules. Also, a time limit is placed on test - execution, so frozen tests are killed when there time allotment expired. - Use the single process option if threading is not working properly or if - tests are taking too long. It is not guaranteed that all tests will pass - in single process mode. - - Tests are run in a randomized order if the randomize argument is True or a - seed argument is provided. If no seed integer is provided then the system - time is used. - - Individual test modules may have a corresponding *_tags.py module, - defining a __tags__ attribute, a list of tag strings used to selectively - omit modules from a run. By default only the 'interactive', 'ignore', and - 'subprocess_ignore' tags are ignored. 'interactive' is for modules that - take user input, like cdrom_test.py. 'ignore' and 'subprocess_ignore' for - for disabling modules for foreground and subprocess modes respectively. - These are for disabling tests on optional modules or for experimental - modules with known problems. These modules can be run from the console as - a Python program. - - This function can only be called once per Python session. It is not - reentrant. - - """ - - global was_run - - if was_run: - raise RuntimeError("run() was already called this session") - was_run = True - - options = kwds.copy() - option_usesubprocess = options.get('usesubprocess', False) - option_dump = options.pop('dump', False) - option_file = options.pop('file', None) - option_randomize = options.get('randomize', False) - option_seed = options.get('seed', None) - option_multi_thread = options.pop('multi_thread', 1) - option_time_out = options.pop('time_out', 120) - option_fake = options.pop('fake', None) - option_python = options.pop('python', sys.executable) - option_exclude = options.pop('exclude', ()) - option_interactive = options.pop('interactive', False) - - if not option_interactive and 'interactive' not in option_exclude: - option_exclude += ('interactive',) - if option_usesubprocess and 'subprocess_ignore' not in option_exclude: - option_exclude += ('subprocess_ignore',) - elif 'ignore' not in option_exclude: - option_exclude += ('ignore',) - if sys.version_info < (3, 0, 0): - option_exclude += ('python2_ignore',) - else: - option_exclude += ('python3_ignore',) - - if pygame.get_sdl_version() < (2, 0, 0): - option_exclude += ('SDL1_ignore',) - else: - option_exclude += ('SDL2_ignore',) - main_dir, test_subdir, fake_test_subdir = prepare_test_env() - - ########################################################################### - # Compile a list of test modules. If fake, then compile list of fake - # xxxx_test.py from run_tests__tests - - TEST_MODULE_RE = re.compile('^(.+_test)\.py$') - - test_mods_pkg_name = test_pkg_name - - working_dir_temp = tempfile.mkdtemp() - - if option_fake is not None: - test_mods_pkg_name = '.'.join([test_mods_pkg_name, - 'run_tests__tests', - option_fake]) - test_subdir = os.path.join(fake_test_subdir, option_fake) - working_dir = test_subdir - else: - working_dir = working_dir_temp - - - # Added in because some machines will need os.environ else there will be - # false failures in subprocess mode. Same issue as python2.6. Needs some - # env vars. - - test_env = os.environ - - fmt1 = '%s.%%s' % test_mods_pkg_name - fmt2 = '%s.%%s_test' % test_mods_pkg_name - if args: - test_modules = [ - m.endswith('_test') and (fmt1 % m) or (fmt2 % m) for m in args - ] - else: - test_modules = [] - for f in sorted(os.listdir(test_subdir)): - for match in TEST_MODULE_RE.findall(f): - test_modules.append(fmt1 % (match,)) - - ########################################################################### - # Remove modules to be excluded. - - tmp = test_modules - test_modules = [] - for name in tmp: - tag_module_name = "%s_tags" % (name[0:-5],) - try: - tag_module = import_submodule(tag_module_name) - except ImportError: - test_modules.append(name) - else: - try: - tags = tag_module.__tags__ - except AttributeError: - print ("%s has no tags: ignoring" % (tag_module_name,)) - test_modules.append(name) - else: - for tag in tags: - if tag in option_exclude: - print ("skipping %s (tag '%s')" % (name, tag)) - break - else: - test_modules.append(name) - del tmp, tag_module_name, name - - ########################################################################### - # Meta results - - results = {} - meta_results = {'__meta__' : {}} - meta = meta_results['__meta__'] - - ########################################################################### - # Randomization - - if option_randomize or option_seed is not None: - if option_seed is None: - option_seed = time.time() - meta['random_seed'] = option_seed - print ("\nRANDOM SEED USED: %s\n" % option_seed) - random.seed(option_seed) - random.shuffle(test_modules) - - ########################################################################### - # Single process mode - - if not option_usesubprocess: - options['exclude'] = option_exclude - t = time.time() - for module in test_modules: - results.update(run_test(module, **options)) - t = time.time() - t - - ########################################################################### - # Subprocess mode - # - - else: - if is_pygame_pkg: - from pygame.tests.test_utils.async_sub import proc_in_time_or_kill - else: - from test.test_utils.async_sub import proc_in_time_or_kill - - pass_on_args = ['--exclude', ','.join(option_exclude)] - for field in ['randomize', 'incomplete', 'unbuffered']: - if kwds.get(field, False): - pass_on_args.append('--'+field) - - def sub_test(module): - print ('loading %s' % module) - - cmd = [option_python, '-m', test_runner_mod, - module] + pass_on_args - - return (module, - (cmd, test_env, working_dir), - proc_in_time_or_kill(cmd, option_time_out, env=test_env, - wd=working_dir)) - - if option_multi_thread > 1: - def tmap(f, args): - return pygame.threads.tmap ( - f, args, stop_on_error = False, - num_workers = option_multi_thread - ) - else: - tmap = map - - t = time.time() - - for module, cmd, (return_code, raw_return) in tmap(sub_test, - test_modules): - test_file = '%s.py' % os.path.join(test_subdir, module) - cmd, test_env, working_dir = cmd - - test_results = get_test_results(raw_return) - if test_results: - results.update(test_results) - else: - results[module] = {} - - results[module].update(dict( - return_code=return_code, - raw_return=raw_return, - cmd=cmd, - test_file=test_file, - test_env=test_env, - working_dir=working_dir, - module=module, - )) - - t = time.time() - t - - ########################################################################### - # Output Results - # - - untrusty_total, combined = combine_results(results, t) - total, n_errors, n_failures = count_results(results) - - meta['total_tests'] = total - meta['combined'] = combined - meta['total_errors'] = n_errors - meta['total_failures'] = n_failures - results.update(meta_results) - - if not option_usesubprocess and total != untrusty_total: - raise AssertionError('Something went wrong in the Test Machinery:\n' - 'total: %d != untrusty_total: %d' % (total, untrusty_total)) - - if not option_dump: - print (combined) - else: - print (TEST_RESULTS_START) - print (pformat(results)) - - if option_file is not None: - results_file = open(option_file, 'w') - try: - results_file.write(pformat(results)) - finally: - results_file.close() - - shutil.rmtree(working_dir_temp) - - return total, n_errors + n_failures - - -def count_results(results): - total = errors = failures = 0 - for result in results.values(): - if result.get('return_code', 0): - total += 1 - errors += 1 - else: - total += result['num_tests'] - errors += result['num_errors'] - failures += result['num_failures'] - - return total, errors, failures - - -def run_and_exit(*args, **kwargs): - """Run the tests, and if there are failures, exit with a return code of 1. - - This is needed for various buildbots to recognise that the tests have - failed. - """ - total, fails = run(*args, **kwargs) - if fails: - sys.exit(1) - sys.exit(0) - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/test_machinery.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/test_machinery.py deleted file mode 100644 index 2222b49..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/test_machinery.py +++ /dev/null @@ -1,80 +0,0 @@ -import inspect -import random -import re -import unittest - -try: - from StringIO import StringIO -except ImportError: - from io import StringIO - -from . import import_submodule - -class PygameTestLoader(unittest.TestLoader): - def __init__(self, randomize_tests=False, include_incomplete=False, - exclude=('interactive',)): - super(PygameTestLoader, self).__init__() - self.randomize_tests = randomize_tests - - if exclude is None: - self.exclude = set() - else: - self.exclude = set(exclude) - - if include_incomplete: - self.testMethodPrefix = ('test', 'todo_') - - def getTestCaseNames(self, testCaseClass): - res = [] - for name in super(PygameTestLoader, self).getTestCaseNames(testCaseClass): - tags = get_tags(testCaseClass, getattr(testCaseClass, name)) - if self.exclude.isdisjoint(tags): - res.append(name) - - if self.randomize_tests: - random.shuffle(res) - - return res - - -# Exclude by tags: - -TAGS_RE = re.compile(r"\|[tT]ags:(-?[ a-zA-Z,0-9_\n]+)\|", re.M) - -class TestTags: - def __init__(self): - self.memoized = {} - self.parent_modules = {} - - def get_parent_module(self, class_): - if class_ not in self.parent_modules: - self.parent_modules[class_] = import_submodule(class_.__module__) - return self.parent_modules[class_] - - def __call__(self, parent_class, meth): - key = (parent_class, meth.__name__) - if key not in self.memoized: - parent_module = self.get_parent_module(parent_class) - - module_tags = getattr(parent_module, '__tags__', []) - class_tags = getattr(parent_class, '__tags__', []) - - tags = TAGS_RE.search(inspect.getdoc(meth) or '') - if tags: test_tags = [t.strip() for t in tags.group(1).split(',')] - else: test_tags = [] - - combined = set() - for tags in (module_tags, class_tags, test_tags): - if not tags: continue - - add = set([t for t in tags if not t.startswith('-')]) - remove = set([t[1:] for t in tags if t not in add]) - - if add: combined.update(add) - if remove: combined.difference_update(remove) - - self.memoized[key] = combined - - return self.memoized[key] - -get_tags = TestTags() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/test_runner.py b/venv/lib/python3.7/site-packages/pygame/tests/test_utils/test_runner.py deleted file mode 100644 index 8ee322f..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/test_utils/test_runner.py +++ /dev/null @@ -1,234 +0,0 @@ -import sys -import os - -if __name__ == '__main__': - pkg_dir = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0] - parent_dir, pkg_name = os.path.split(pkg_dir) - is_pygame_pkg = (pkg_name == 'tests' and - os.path.split(parent_dir)[1] == 'pygame') - if not is_pygame_pkg: - sys.path.insert(0, parent_dir) -else: - is_pygame_pkg = __name__.startswith('pygame.tests.') - -import unittest -from .test_machinery import PygameTestLoader - -import re -try: - import StringIO -except ImportError: - import io as StringIO - -import optparse -from pprint import pformat - - -def prepare_test_env(): - test_subdir = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0] - main_dir = os.path.split(test_subdir)[0] - sys.path.insert(0, test_subdir) - fake_test_subdir = os.path.join(test_subdir, 'run_tests__tests') - return main_dir, test_subdir, fake_test_subdir - -main_dir, test_subdir, fake_test_subdir = prepare_test_env() - -################################################################################ -# Set the command line options -# -# options are shared with run_tests.py so make sure not to conflict -# in time more will be added here - -TAG_PAT = r'-?[a-zA-Z0-9_]+' -TAG_RE = re.compile(TAG_PAT) -EXCLUDE_RE = re.compile("(%s,?\s*)+$" % (TAG_PAT,)) - -def exclude_callback(option, opt, value, parser): - if EXCLUDE_RE.match(value) is None: - raise optparse.OptionValueError("%s argument has invalid value" % - (opt,)) - parser.values.exclude = TAG_RE.findall(value) - -opt_parser = optparse.OptionParser() - -opt_parser.add_option ( - "-i", "--incomplete", action = 'store_true', - help = "fail incomplete tests" ) - -opt_parser.add_option ( - "-s", "--usesubprocess", action = "store_true", - help = "run everything in a single process " - " (default: use no subprocesses)" ) - -opt_parser.add_option ( - "-e", "--exclude", - action = 'callback', - type = 'string', - help = "exclude tests containing any of TAGS", - callback = exclude_callback) - -opt_parser.add_option ( - "-v", "--unbuffered", action = 'store_true', - help = "Show stdout/stderr as tests run, rather than storing it and showing on failures" ) - -opt_parser.add_option ( - "-r", "--randomize", action = 'store_true', - help = "randomize order of tests" ) - -################################################################################ -# If an xxxx_test.py takes longer than TIME_OUT seconds it will be killed -# This is only the default, can be over-ridden on command line - -TIME_OUT = 30 - -# DEFAULTS - -################################################################################ -# Human readable output -# - -COMPLETE_FAILURE_TEMPLATE = """ -====================================================================== -ERROR: all_tests_for (%(module)s.AllTestCases) ----------------------------------------------------------------------- -Traceback (most recent call last): - File "test/%(module)s.py", line 1, in all_tests_for -subprocess completely failed with return code of %(return_code)s -cmd: %(cmd)s -test_env: %(test_env)s -working_dir: %(working_dir)s -return (first 10 and last 10 lines): -%(raw_return)s - -""" # Leave that last empty line else build page regex won't match - # Text also needs to be vertically compressed - - -RAN_TESTS_DIV = (70 * "-") + "\nRan" - -DOTS = re.compile("^([FE.sux]*)$", re.MULTILINE) - -def combine_results(all_results, t): - """ - - Return pieced together results in a form fit for human consumption. Don't - rely on results if piecing together subprocessed results (single process - mode is fine). Was originally meant for that purpose but was found to be - unreliable. See the dump option for reliable results. - - """ - - all_dots = '' - failures = [] - - for module, results in sorted(all_results.items()): - output, return_code, raw_return = map ( - results.get, ('output','return_code', 'raw_return') - ) - - if not output or (return_code and RAN_TESTS_DIV not in output): - # would this effect the original dict? TODO - output_lines = raw_return.splitlines() - if len(output_lines) > 20: - results['raw_return'] = '\n'.join(output_lines[:10] + - ['...'] + - output_lines[-10:] - ) - failures.append( COMPLETE_FAILURE_TEMPLATE % results ) - all_dots += 'E' - continue - - dots = DOTS.search(output).group(1) - all_dots += dots - - if 'E' in dots or 'F' in dots: - failures.append( output[len(dots)+1:].split(RAN_TESTS_DIV)[0] ) - - total_fails, total_errors = map(all_dots.count, 'FE') - total_tests = len(all_dots) - - combined = [all_dots] - if failures: - combined += [''.join(failures).lstrip('\n')[:-1]] - combined += ["%s %s tests in %.3fs\n" % (RAN_TESTS_DIV, total_tests, t)] - - if failures: - infos = ((["failures=%s" % total_fails] if total_fails else []) + - (["errors=%s" % total_errors] if total_errors else [])) - combined += ['FAILED (%s)\n' % ', '.join(infos)] - else: - combined += ['OK\n'] - - return total_tests, '\n'.join(combined) - -################################################################################ - -TEST_RESULTS_START = "<--!! TEST RESULTS START HERE !!-->" -TEST_RESULTS_END = "<--!! TEST RESULTS END HERE !!-->" -_test_re_str = '%s\n(.*)%s' % (TEST_RESULTS_START, TEST_RESULTS_END) -TEST_RESULTS_RE = re.compile(_test_re_str, re.DOTALL | re.M) - -def get_test_results(raw_return): - test_results = TEST_RESULTS_RE.search(raw_return) - if test_results: - try: - return eval(test_results.group(1)) - except: - print ("BUGGY TEST RESULTS EVAL:\n %s" % test_results.group(1)) - raise - - -################################################################################ - -def run_test(module, incomplete=False, usesubprocess=True, randomize=False, - exclude=('interactive',), buffer=True): - """Run a unit test module - """ - suite = unittest.TestSuite() - - print ('loading %s' % module) - - loader = PygameTestLoader(randomize_tests=randomize, - include_incomplete=incomplete, - exclude=exclude) - suite.addTest(loader.loadTestsFromName(module)) - - output = StringIO.StringIO() - runner = unittest.TextTestRunner(stream=output, buffer=buffer) - results = runner.run(suite) - - results = {module: { - 'output': output.getvalue(), - 'num_tests': results.testsRun, - 'num_errors': len(results.errors), - 'num_failures': len(results.failures), - }} - - if usesubprocess: - print (TEST_RESULTS_START) - print (pformat(results)) - print (TEST_RESULTS_END) - else: - return results - -################################################################################ - -if __name__ == '__main__': - options, args = opt_parser.parse_args() - if not args: - - if is_pygame_pkg: - run_from = 'pygame.tests.go' - else: - run_from = os.path.join(main_dir, 'run_tests.py') - sys.exit('No test module provided; consider using %s instead' % run_from) - run_test(args[0], - incomplete=options.incomplete, - usesubprocess=options.usesubprocess, - randomize=options.randomize, - exclude=options.exclude, - buffer=(not options.unbuffered), - ) - -################################################################################ - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/threads_test.py b/venv/lib/python3.7/site-packages/pygame/tests/threads_test.py deleted file mode 100644 index ce492bc..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/threads_test.py +++ /dev/null @@ -1,176 +0,0 @@ -import unittest -from pygame.threads import FuncResult, tmap, WorkerQueue, Empty, STOP -from pygame import threads -from pygame.compat import xrange_ - -import time - - -class WorkerQueueTypeTest(unittest.TestCase): - def test_usage_with_different_functions(self): - def f(x): - return x+1 - - def f2(x): - return x+2 - - wq = WorkerQueue() - fr = FuncResult(f) - fr2 = FuncResult(f2) - wq.do(fr, 1) - wq.do(fr2, 1) - wq.wait() - wq.stop() - - self.assertEqual(fr.result, 2) - self.assertEqual(fr2.result, 3) - - def todo_test_do(self): - - # __doc__ (as of 2008-06-28) for pygame.threads.WorkerQueue.do: - - # puts a function on a queue for running later. - # - - self.fail() - - def test_stop(self): - """Ensure stop() stops the worker queue""" - wq = WorkerQueue() - - self.assertGreater(len(wq.pool), 0) - - for t in wq.pool: - self.assertTrue(t.isAlive()) - - for i in xrange_(200): - wq.do(lambda x: x+1, i) - - wq.stop() - - for t in wq.pool: - self.assertFalse(t.isAlive()) - - self.assertIs(wq.queue.get(), STOP) - - def todo_test_threadloop(self): - - # __doc__ (as of 2008-06-28) for pygame.threads.WorkerQueue.threadloop: - - # Loops until all of the tasks are finished. - - self.fail() - - def test_wait(self): - - # __doc__ (as of 2008-06-28) for pygame.threads.WorkerQueue.wait: - - # waits until all tasks are complete. - - wq = WorkerQueue() - - for i in xrange_(2000): wq.do(lambda x: x+1, i) - wq.wait() - - self.assertRaises(Empty, wq.queue.get_nowait) - - wq.stop() - - -class ThreadsModuleTest(unittest.TestCase): - def todo_test_benchmark_workers(self): - "tags:long_running" - - # __doc__ (as of 2008-06-28) for pygame.threads.benchmark_workers: - - # does a little test to see if workers are at all faster. - # Returns the number of workers which works best. - # Takes a little bit of time to run, so you should only really call - # it once. - # You can pass in benchmark data, and functions if you want. - # a_bench_func - f(data) - # the_data - data to work on. - - self.fail() - - def test_init(self): - """Ensure init() sets up the worker queue""" - threads.init(8) - - self.assertIsInstance(threads._wq, WorkerQueue) - - threads.quit() - - def test_quit(self): - """Ensure quit() cleans up the worker queue""" - threads.init(8) - threads.quit() - - self.assertIsNone(threads._wq) - - def test_tmap(self): - # __doc__ (as of 2008-06-28) for pygame.threads.tmap: - - # like map, but uses a thread pool to execute. - # num_workers - the number of worker threads that will be used. If pool - # is passed in, then the num_workers arg is ignored. - # worker_queue - you can optionally pass in an existing WorkerQueue. - # wait - True means that the results are returned when everything is finished. - # False means that we return the [worker_queue, results] right away instead. - # results, is returned as a list of FuncResult instances. - # stop_on_error - - - func, data = lambda x:x+1, xrange_(100) - - tmapped = list(tmap(func, data)) - mapped = list(map(func, data)) - - self.assertEqual(tmapped, mapped) - - def todo_test_tmap__None_func_and_multiple_sequences(self): - """Using a None as func and multiple sequences""" - self.fail() - - res = tmap(None, [1,2,3,4]) - res2 = tmap(None, [1,2,3,4], [22, 33, 44, 55]) - res3 = tmap(None, [1,2,3,4], [22, 33, 44, 55, 66]) - res4 = tmap(None, [1,2,3,4,5], [22, 33, 44, 55]) - - self.assertEqual([1, 2, 3, 4], res) - self.assertEqual([(1, 22), (2, 33), (3, 44), (4, 55)], res2) - self.assertEqual([(1, 22), (2, 33), (3, 44), (4, 55), (None, 66)], res3) - self.assertEqual([(1, 22), (2, 33), (3, 44), (4, 55), (5,None)], res4) - - def test_tmap__wait(self): - r = range(1000) - wq, results = tmap(lambda x:x, r, num_workers = 5, wait=False) - wq.wait() - r2 = map(lambda x:x.result, results) - self.assertEqual(list(r), list(r2)) - - def test_FuncResult(self): - """Ensure FuncResult sets its result and exception attributes""" - # Results are stored in result attribute - fr = FuncResult(lambda x:x+1) - fr(2) - - self.assertEqual(fr.result, 3) - - # Exceptions are store in exception attribute - self.assertIsNone(fr.exception, "no exception should be raised") - - exception = ValueError('rast') - - def x(sdf): - raise exception - - fr = FuncResult(x) - fr(None) - - self.assertIs(fr.exception, exception) - - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/time_test.py b/venv/lib/python3.7/site-packages/pygame/tests/time_test.py deleted file mode 100644 index 7a84458..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/time_test.py +++ /dev/null @@ -1,204 +0,0 @@ -import unittest -import pygame - -Clock = pygame.time.Clock - - -class ClockTypeTest(unittest.TestCase): - def test_construction(self): - """Ensure a Clock object can be created""" - c = Clock() - - self.assertTrue(c, "Clock cannot be constructed") - - def todo_test_get_fps(self): - - # __doc__ (as of 2008-08-02) for pygame.time.Clock.get_fps: - - # Clock.get_fps(): return float - # compute the clock framerate - # - # Compute your game's framerate (in frames per second). It is computed - # by averaging the last few calls to Clock.tick(). - # - - self.fail() - - # delay_per_frame = 1 / 100.0 - # - # c = Clock() - # - # for f in range(100): - # c.tick() - # time.sleep(delay_per_frame) - # - # self.assertTrue(99.0 < c.get_fps() < 101.0) - - def todo_test_get_rawtime(self): - - # __doc__ (as of 2008-08-02) for pygame.time.Clock.get_rawtime: - - # Clock.get_rawtime(): return milliseconds - # actual time used in the previous tick - # - # Similar to Clock.get_time(), but this does not include any time used - # while Clock.tick() was delaying to limit the framerate. - # - - self.fail() - - def todo_test_get_time(self): - - # __doc__ (as of 2008-08-02) for pygame.time.Clock.get_time: - - # Clock.get_time(): return milliseconds - # time used in the previous tick - # - # Returns the parameter passed to the last call to Clock.tick(). It is - # the number of milliseconds passed between the previous two calls to - # Pygame.tick(). - # - - self.fail() - - # c = Clock() - # c.tick() #between here - # time.sleep(0.02) - # #get_time() - # c.tick() # here - # - # time.sleep(0.02) - # - # self.assertTrue(20 <= c.get_time() <= 30) - - - def todo_test_tick(self): - - # __doc__ (as of 2008-08-02) for pygame.time.Clock.tick: - - # Clock.tick(framerate=0): return milliseconds - # control timer events - # update the clock - # - # This method should be called once per frame. It will compute how - # many milliseconds have passed since the previous call. - # - # If you pass the optional framerate argument the function will delay - # to keep the game running slower than the given ticks per second. - # This can be used to help limit the runtime speed of a game. By - # calling Clock.tick(40) once per frame, the program will never run at - # more than 40 frames per second. - # - # Note that this function uses SDL_Delay function which is not - # accurate on every platform, but does not use much cpu. Use - # tick_busy_loop if you want an accurate timer, and don't mind chewing - # cpu. - # - - self.fail() - - # collection = [] - # c = Clock() - # - # c.tick() - # for i in range(100): - # time.sleep(0.005) - # collection.append(c.tick()) - # - # for outlier in [min(collection), max(collection)]: - # if outlier != 5: collection.remove(outlier) - # - # self.assertEqual(sum(collection) / len(collection), 5) - - def todo_test_tick_busy_loop(self): - - # __doc__ (as of 2008-08-02) for pygame.time.Clock.tick_busy_loop: - - # Clock.tick_busy_loop(framerate=0): return milliseconds - # control timer events - # update the clock - # - # This method should be called once per frame. It will compute how - # many milliseconds have passed since the previous call. - # - # If you pass the optional framerate argument the function will delay - # to keep the game running slower than the given ticks per second. - # This can be used to help limit the runtime speed of a game. By - # calling Clock.tick(40) once per frame, the program will never run at - # more than 40 frames per second. - # - # Note that this function uses pygame.time.delay, which uses lots of - # cpu in a busy loop to make sure that timing is more acurate. - # - # New in pygame 1.8.0. - - self.fail() - -class TimeModuleTest(unittest.TestCase): - def todo_test_delay(self): - - # __doc__ (as of 2008-08-02) for pygame.time.delay: - - # pygame.time.delay(milliseconds): return time - # pause the program for an amount of time - # - # Will pause for a given number of milliseconds. This function will - # use the processor (rather than sleeping) in order to make the delay - # more accurate than pygame.time.wait(). - # - # This returns the actual number of milliseconds used. - - self.fail() - - def todo_test_get_ticks(self): - - # __doc__ (as of 2008-08-02) for pygame.time.get_ticks: - - # pygame.time.get_ticks(): return milliseconds - # get the time in milliseconds - # - # Return the number of millisconds since pygame.init() was called. - # Before pygame is initialized this will always be 0. - # - - self.fail() - - def todo_test_set_timer(self): - - # __doc__ (as of 2008-08-02) for pygame.time.set_timer: - - # pygame.time.set_timer(eventid, milliseconds): return None - # repeatedly create an event on the event queue - # - # Set an event type to appear on the event queue every given number of - # milliseconds. The first event will not appear until the amount of - # time has passed. - # - # Every event type can have a separate timer attached to it. It is - # best to use the value between pygame.USEREVENT and pygame.NUMEVENTS. - # - # To disable the timer for an event, set the milliseconds argument to 0. - - self.fail() - - def todo_test_wait(self): - - # __doc__ (as of 2008-08-02) for pygame.time.wait: - - # pygame.time.wait(milliseconds): return time - # pause the program for an amount of time - # - # Will pause for a given number of milliseconds. This function sleeps - # the process to share the processor with other programs. A program - # that waits for even a few milliseconds will consume very little - # processor time. It is slightly less accurate than the - # pygame.time.delay() function. - # - # This returns the actual number of milliseconds used. - - self.fail() - -################################################################################ - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/touch_tags.py b/venv/lib/python3.7/site-packages/pygame/tests/touch_tags.py deleted file mode 100644 index 9f64857..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/touch_tags.py +++ /dev/null @@ -1,2 +0,0 @@ -__tags__ = ['SDL1_ignore'] - diff --git a/venv/lib/python3.7/site-packages/pygame/tests/touch_test.py b/venv/lib/python3.7/site-packages/pygame/tests/touch_test.py deleted file mode 100644 index 7cb4652..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/touch_test.py +++ /dev/null @@ -1,44 +0,0 @@ -import unittest -import pygame -from pygame._sdl2 import touch - - -has_touchdevice = touch.get_num_devices() > 0 - - -class TouchTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - pygame.display.init() - - @classmethod - def tearDownClass(cls): - pygame.display.quit() - - def test_num_devices(self): - touch.get_num_devices() - - @unittest.skipIf(not has_touchdevice, 'no touch devices found') - def test_get_device(self): - touch.get_device(0) - - def test_num_fingers__invalid(self): - self.assertRaises(pygame.error, touch.get_device, -1234) - self.assertRaises(TypeError, touch.get_device, 'test') - - @unittest.skipIf(not has_touchdevice, 'no touch devices found') - def test_num_fingers(self): - touch.get_num_fingers(touch.get_device(0)) - - def test_num_fingers__invalid(self): - self.assertRaises(TypeError, touch.get_num_fingers, 'test') - self.assertRaises(pygame.error, touch.get_num_fingers, -1234) - - @unittest.skipIf(not has_touchdevice, 'no touch devices found') - def todo_test_get_finger(self): - """ask for touch input and check the dict""" - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/transform_test.py b/venv/lib/python3.7/site-packages/pygame/tests/transform_test.py deleted file mode 100644 index 5fa116c..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/transform_test.py +++ /dev/null @@ -1,1096 +0,0 @@ -import unittest -import platform - -from pygame.tests import test_utils -import pygame -import pygame.transform -from pygame.locals import * - - -def show_image(s, images = []): - #pygame.display.init() - size = s.get_rect()[2:] - screen = pygame.display.set_mode(size) - screen.blit(s, (0,0)) - pygame.display.flip() - pygame.event.pump() - going = True - idx = 0 - while going: - events = pygame.event.get() - for e in events: - if e.type == QUIT: - going = False - if e.type == KEYDOWN: - if e.key in [K_s, K_a]: - if e.key == K_s: idx += 1 - if e.key == K_a: idx -= 1 - s = images[idx] - screen.blit(s, (0,0)) - pygame.display.flip() - pygame.event.pump() - elif e.key in [K_ESCAPE]: - going = False - pygame.display.quit() - pygame.display.init() - -def threshold(return_surf, surf, color, threshold = (0,0,0), diff_color = (0,0,0), change_return = True ): - """ given the color it makes return_surf only have areas with the given colour. - """ - - width, height =surf.get_width(), surf.get_height() - - if change_return: - return_surf.fill(diff_color) - - try: - r, g, b = color - except ValueError: - r, g, b, a = color - - - try: - tr, tg, tb = color - except ValueError: - tr, tg, tb, ta = color - - - - similar = 0 - for y in xrange(height): - for x in xrange(width): - c1 = surf.get_at((x,y)) - - if ( (abs(c1[0] - r) < tr) & - (abs(c1[1] - g) < tg) & - (abs(c1[2] - b) < tb) ): - # this pixel is within the threshold. - if change_return: - return_surf.set_at((x,y), c1) - similar += 1 - #else: - # print c1, c2 - - - return similar - - -class TransformModuleTest( unittest.TestCase ): - - def test_scale__alpha( self ): - """ see if set_alpha information is kept. - """ - - s = pygame.Surface((32,32)) - s.set_alpha(55) - self.assertEqual(s.get_alpha(),55) - - s = pygame.Surface((32,32)) - s.set_alpha(55) - s2 = pygame.transform.scale(s, (64,64)) - s3 = s.copy() - self.assertEqual(s.get_alpha(),s3.get_alpha()) - self.assertEqual(s.get_alpha(),s2.get_alpha()) - - def test_scale__destination( self ): - """ see if the destination surface can be passed in to use. - """ - - s = pygame.Surface((32,32)) - s2 = pygame.transform.scale(s, (64,64)) - s3 = s2.copy() - - s3 = pygame.transform.scale(s, (64,64), s3) - pygame.transform.scale(s, (64,64), s2) - - # the wrong size surface is past in. Should raise an error. - self.assertRaises(ValueError, pygame.transform.scale, s, (33,64), s3) - - s = pygame.Surface((32,32)) - s2 = pygame.transform.smoothscale(s, (64,64)) - s3 = s2.copy() - - s3 = pygame.transform.smoothscale(s, (64,64), s3) - pygame.transform.smoothscale(s, (64,64), s2) - - # the wrong size surface is past in. Should raise an error. - self.assertRaises(ValueError, pygame.transform.smoothscale, s, (33,64), s3) - - def test_scale__zero_surface_transform(self): - tmp_surface = pygame.transform.scale(pygame.Surface((128, 128)), (0, 0)) - self.assertEqual(tmp_surface.get_size(), (0, 0)) - tmp_surface = pygame.transform.scale(tmp_surface, (128, 128)) - self.assertEqual(tmp_surface.get_size(), (128, 128)) - - def test_threshold__honors_third_surface(self): - # __doc__ for threshold as of Tue 07/15/2008 - - # pygame.transform.threshold(DestSurface, Surface, color, threshold = - # (0,0,0,0), diff_color = (0,0,0,0), change_return = True, Surface = - # None): return num_threshold_pixels - - # When given the optional third - # surface, it would use the colors in that rather than the "color" - # specified in the function to check against. - - # New in pygame 1.8 - - ################################################################ - # Sizes - (w, h) = size = (32, 32) - - # the original_color is within the threshold of the threshold_color - threshold = (20, 20, 20, 20) - - original_color = (25,25,25,25) - threshold_color = (10, 10, 10, 10) - - # Surfaces - original_surface = pygame.Surface(size, pygame.SRCALPHA, 32) - dest_surface = pygame.Surface(size, pygame.SRCALPHA, 32) - - # Third surface is used in lieu of 3rd position arg color - third_surface = pygame.Surface(size, pygame.SRCALPHA, 32) - - # Color filling - original_surface.fill(original_color) - third_surface.fill(threshold_color) - - ################################################################ - # All pixels for color should be within threshold - # - pixels_within_threshold = pygame.transform.threshold ( - dest_surf=None, - surf=original_surface, - search_color=threshold_color, - threshold=threshold, - set_color=None, - set_behavior=0 - ) - - self.assertEqual(w*h, pixels_within_threshold) - - ################################################################ - # This should respect third_surface colors in place of 3rd arg - # color Should be the same as: surface.fill(threshold_color) - # all within threshold - - pixels_within_threshold = pygame.transform.threshold ( - dest_surf=None, - surf=original_surface, - search_color=None, - threshold=threshold, - set_color=None, - set_behavior=0, - search_surf=third_surface, - ) - self.assertEqual(w*h, pixels_within_threshold) - - def test_threshold_dest_surf_not_change(self): - """ the pixels within the threshold. - - All pixels not within threshold are changed to set_color. - So there should be none changed in this test. - """ - (w, h) = size = (32, 32) - threshold = (20, 20, 20, 20) - original_color = (25, 25, 25, 25) - original_dest_color = (65, 65, 65, 55) - threshold_color = (10, 10, 10, 10) - set_color = (255, 10, 10, 10) - - surf = pygame.Surface(size, pygame.SRCALPHA, 32) - dest_surf = pygame.Surface(size, pygame.SRCALPHA, 32) - search_surf = pygame.Surface(size, pygame.SRCALPHA, 32) - - surf.fill(original_color) - search_surf.fill(threshold_color) - dest_surf.fill(original_dest_color) - - # set_behavior=1, set dest_surface from set_color. - # all within threshold of third_surface, so no color is set. - - THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR = 1 - pixels_within_threshold = pygame.transform.threshold( - dest_surf=dest_surf, - surf=surf, - search_color=None, - threshold=threshold, - set_color=set_color, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR, - search_surf=search_surf, - ) - - # # Return, of pixels within threshold is correct - self.assertEqual(w*h, pixels_within_threshold) - - # # Size of dest surface is correct - dest_rect = dest_surf.get_rect() - dest_size = dest_rect.size - self.assertEqual(size, dest_size) - - # The color is not the change_color specified for every pixel As all - # pixels are within threshold - - for pt in test_utils.rect_area_pts(dest_rect): - self.assertNotEqual(dest_surf.get_at(pt), set_color) - self.assertEqual(dest_surf.get_at(pt), original_dest_color) - - def test_threshold_dest_surf_all_changed(self): - """ Lowering the threshold, expecting changed surface - """ - - (w, h) = size = (32, 32) - threshold = (20, 20, 20, 20) - original_color = (25, 25, 25, 25) - original_dest_color = (65, 65, 65, 55) - threshold_color = (10, 10, 10, 10) - set_color = (255, 10, 10, 10) - - surf = pygame.Surface(size, pygame.SRCALPHA, 32) - dest_surf = pygame.Surface(size, pygame.SRCALPHA, 32) - search_surf = pygame.Surface(size, pygame.SRCALPHA, 32) - - surf.fill(original_color) - search_surf.fill(threshold_color) - dest_surf.fill(original_dest_color) - - THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR = 1 - pixels_within_threshold = pygame.transform.threshold ( - dest_surf, - surf, - search_color=None, - set_color=set_color, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR, - search_surf=search_surf, - ) - - self.assertEqual(0, pixels_within_threshold) - - dest_rect = dest_surf.get_rect() - dest_size = dest_rect.size - self.assertEqual(size, dest_size) - - # The color is the set_color specified for every pixel As all - # pixels are not within threshold - for pt in test_utils.rect_area_pts(dest_rect): - self.assertEqual(dest_surf.get_at(pt), set_color) - - def test_threshold_count(self): - """ counts the colors, and not changes them. - """ - surf_size = (32, 32) - surf = pygame.Surface(surf_size, pygame.SRCALPHA, 32) - search_surf = pygame.Surface(surf_size, pygame.SRCALPHA, 32) - search_color = (55, 55, 55, 255) - original_color = (10, 10, 10, 255) - - surf.fill(original_color) - # set 2 pixels to the color we are searching for. - surf.set_at((0, 0), search_color) - surf.set_at((12, 5), search_color) - - # There is no destination surface, but we ask to change it. - # This should be an error. - self.assertRaises(TypeError, pygame.transform.threshold, - None, - surf, - search_color) - # from pygame.transform import THRESHOLD_BEHAVIOR_COUNT - THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF = 2 - self.assertRaises(TypeError, pygame.transform.threshold, - None, - surf, - search_color, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF) - - THRESHOLD_BEHAVIOR_COUNT = 0 - num_threshold_pixels = pygame.transform.threshold( - dest_surf=None, - surf=surf, - search_color=search_color, - set_behavior=THRESHOLD_BEHAVIOR_COUNT) - self.assertEqual(num_threshold_pixels, 2) - - def test_threshold_search_surf(self): - surf_size = (32, 32) - surf = pygame.Surface(surf_size, pygame.SRCALPHA, 32) - search_surf = pygame.Surface(surf_size, pygame.SRCALPHA, 32) - dest_surf = pygame.Surface(surf_size, pygame.SRCALPHA, 32) - - original_color = (10, 10, 10, 255) - search_color = (55, 55, 55, 255) - - surf.fill(original_color) - dest_surf.fill(original_color) - # set 2 pixels to the color we are searching for. - surf.set_at((0, 0), search_color) - surf.set_at((12, 5), search_color) - - search_surf.fill(search_color) - - # We look in the other surface for matching colors. - # Change it in dest_surf - THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF = 2 - - # TypeError: if search_surf is used, search_color should be None - self.assertRaises(TypeError, pygame.transform.threshold, - dest_surf, - surf, - search_color, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF, - search_surf=search_surf) - - # surf, dest_surf, and search_surf should all be the same size. - # Check surface sizes are the same size. - different_sized_surf = pygame.Surface((22, 33), pygame.SRCALPHA, 32) - self.assertRaises(TypeError, pygame.transform.threshold, - different_sized_surf, - surf, - search_color=None, - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF, - search_surf=search_surf) - - self.assertRaises(TypeError, pygame.transform.threshold, - dest_surf, - surf, - search_color=None, - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF, - search_surf=different_sized_surf) - - # We look to see if colors in search_surf are in surf. - num_threshold_pixels = pygame.transform.threshold( - dest_surf=dest_surf, - surf=surf, - search_color=None, - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF, - search_surf=search_surf) - - num_pixels_within = 2 - self.assertEqual(num_threshold_pixels, num_pixels_within) - - dest_surf.fill(original_color) - num_threshold_pixels = pygame.transform.threshold( - dest_surf, - surf, - search_color=None, - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF, - search_surf=search_surf, - inverse_set=True) - - self.assertEqual(num_threshold_pixels, 2) - - def test_threshold_inverse_set(self): - """ changes the pixels within the threshold, and not outside. - """ - surf_size = (32, 32) - _dest_surf = pygame.Surface(surf_size, pygame.SRCALPHA, 32) - _surf = pygame.Surface(surf_size, pygame.SRCALPHA, 32) - - dest_surf = _dest_surf # surface we are changing. - surf = _surf # surface we are looking at - search_color = (55, 55, 55, 255) # color we are searching for. - threshold = (0, 0, 0, 0) # within this distance from search_color. - set_color = (245, 245, 245, 255) # color we set. - inverse_set = 1 # pixels within threshold are changed to 'set_color' - - - original_color = (10, 10, 10, 255) - surf.fill(original_color) - # set 2 pixels to the color we are searching for. - surf.set_at((0, 0), search_color) - surf.set_at((12, 5), search_color) - - dest_surf.fill(original_color) - # set 2 pixels to the color we are searching for. - dest_surf.set_at((0, 0), search_color) - dest_surf.set_at((12, 5), search_color) - - - THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR = 1 - num_threshold_pixels = pygame.transform.threshold( - dest_surf, - surf, - search_color=search_color, - threshold=threshold, - set_color=set_color, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR, - inverse_set=1) - - self.assertEqual(num_threshold_pixels, 2) - # only two pixels changed to diff_color. - self.assertEqual(dest_surf.get_at((0, 0)), set_color) - self.assertEqual(dest_surf.get_at((12, 5)), set_color) - - - # other pixels should be the same as they were before. - # We just check one other pixel, not all of them. - self.assertEqual(dest_surf.get_at((2, 2)), original_color) - -#XXX - def test_threshold_non_src_alpha(self): - - result = pygame.Surface((10,10)) - s1 = pygame.Surface((10,10)) - s2 = pygame.Surface((10,10)) - s3 = pygame.Surface((10,10)) - s4 = pygame.Surface((10,10)) - - x = s1.fill((0, 0, 0)) - s1.set_at((0,0), (32, 20, 0 )) - - x = s2.fill((0,20,0)) - x = s3.fill((0,0,0)) - x = s4.fill((0,0,0)) - s2.set_at((0,0), (33, 21, 0 )) - s2.set_at((3,0), (63, 61, 0 )) - s3.set_at((0,0), (112, 31, 0 )) - s4.set_at((0,0), (11, 31, 0 )) - s4.set_at((1,1), (12, 31, 0 )) - - self.assertEqual(s1.get_at((0,0)), (32, 20, 0, 255)) - self.assertEqual(s2.get_at((0,0)), (33, 21, 0, 255)) - self.assertEqual((0, 0), (s1.get_flags(), s2.get_flags())) - - - - similar_color = (255, 255, 255, 255) - diff_color = (222, 0, 0, 255) - threshold_color = (20, 20, 20, 255) - - THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR = 1 - num_threshold_pixels = pygame.transform.threshold( - dest_surf=result, - surf=s1, - search_color=similar_color, - threshold=threshold_color, - set_color=diff_color, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR) - self.assertEqual(num_threshold_pixels, 0) - - num_threshold_pixels = pygame.transform.threshold( - dest_surf=result, - surf=s1, - search_color=(40, 40, 0), - threshold=threshold_color, - set_color=diff_color, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_COLOR) - self.assertEqual(num_threshold_pixels, 1) - - - self.assertEqual(result.get_at((0,0)), diff_color) - - - def test_threshold__uneven_colors(self): - (w,h) = size = (16, 16) - - original_surface = pygame.Surface(size, pygame.SRCALPHA, 32) - dest_surface = pygame.Surface(size, pygame.SRCALPHA, 32) - - original_surface.fill(0) - - threshold_color_template = [5, 5, 5, 5] - threshold_template = [6, 6, 6, 6] - - ################################################################ - - for pos in range(len('rgb')): - threshold_color = threshold_color_template[:] - threshold = threshold_template[:] - - threshold_color[pos] = 45 - threshold[pos] = 50 - - pixels_within_threshold = pygame.transform.threshold ( - None, - original_surface, - threshold_color, - threshold, - set_color=None, - set_behavior=0 - ) - - self.assertEqual(w*h, pixels_within_threshold) - - ################################################################ - - def test_threshold_set_behavior2(self): - """ raises an error when set_behavior=2 and set_color is not None. - """ - from pygame.transform import threshold - s1 = pygame.Surface((32,32), SRCALPHA, 32) - s2 = pygame.Surface((32,32), SRCALPHA, 32) - THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF = 2 - self.assertRaises(TypeError, threshold, - dest_surf=s2, - surf=s1, - search_color=(30, 30, 30), - threshold=(11, 11, 11), - set_color=(255, 0, 0), - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF) - - def test_threshold_set_behavior0(self): - """ raises an error when set_behavior=1 - and set_color is not None, - and dest_surf is not None. - """ - from pygame.transform import threshold - s1 = pygame.Surface((32,32), SRCALPHA, 32) - s2 = pygame.Surface((32,32), SRCALPHA, 32) - THRESHOLD_BEHAVIOR_COUNT = 0 - - self.assertRaises(TypeError, threshold, - dest_surf=None, - surf=s2, - search_color=(30, 30, 30), - threshold=(11, 11, 11), - set_color=(0, 0, 0), - set_behavior=THRESHOLD_BEHAVIOR_COUNT) - - self.assertRaises(TypeError, threshold, - dest_surf=s1, - surf=s2, - search_color=(30, 30, 30), - threshold=(11, 11, 11), - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_COUNT) - - threshold( - dest_surf=None, - surf=s2, - search_color=(30, 30, 30), - threshold=(11, 11, 11), - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_COUNT) - - - def test_threshold_from_surface(self): - """ Set similar pixels in 'dest_surf' to color in the 'surf'. - """ - from pygame.transform import threshold - - surf = pygame.Surface((32,32), SRCALPHA, 32) - dest_surf = pygame.Surface((32,32), SRCALPHA, 32) - surf_color = (40, 40, 40, 255) - dest_color = (255, 255, 255) - surf.fill(surf_color) - dest_surf.fill(dest_color) - THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF = 2 - - num_threshold_pixels = threshold( - dest_surf=dest_surf, - surf=surf, - search_color=(30, 30, 30), - threshold=(11, 11, 11), - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF, - inverse_set=1) - - self.assertEqual(num_threshold_pixels, dest_surf.get_height() * dest_surf.get_width()) - self.assertEqual(dest_surf.get_at((0, 0)), surf_color) - - - def test_threshold__surface(self): - """ - """ - from pygame.transform import threshold - - s1 = pygame.Surface((32,32), SRCALPHA, 32) - s2 = pygame.Surface((32,32), SRCALPHA, 32) - s3 = pygame.Surface((1,1), SRCALPHA, 32) - THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF = 2 - - # # only one pixel should not be changed. - # s1.fill((40,40,40)) - # s2.fill((255,255,255)) - # s1.set_at( (0,0), (170, 170, 170) ) - # # set the similar pixels in destination surface to the color - # # in the first surface. - # num_threshold_pixels = threshold( - # dest_surf=s2, - # surf=s1, - # search_color=(30,30,30), - # threshold=(11,11,11), - # set_color=None, - # set_behavior=THRESHOLD_BEHAVIOR_FROM_SEARCH_SURF) - - # #num_threshold_pixels = threshold(s2, s1, (30,30,30)) - # self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1) - # self.assertEqual(s2.get_at((0,0)), (0,0,0, 255)) - # self.assertEqual(s2.get_at((0,1)), (40, 40, 40, 255)) - # self.assertEqual(s2.get_at((17,1)), (40, 40, 40, 255)) - - - # # abs(40 - 255) < 100 - # #(abs(c1[0] - r) < tr) - - # s1.fill((160,160,160)) - # s2.fill((255,255,255)) - # num_threshold_pixels = threshold(s2, s1, (255,255,255), (100,100,100), (0,0,0), True) - - # self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width())) - - - # only one pixel should not be changed. - s1.fill((40, 40, 40)) - s1.set_at((0, 0), (170, 170, 170)) - THRESHOLD_BEHAVIOR_COUNT = 0 - - num_threshold_pixels = threshold( - dest_surf=None, - surf=s1, - search_color=(30, 30, 30), - threshold=(11, 11, 11), - set_color=None, - set_behavior=THRESHOLD_BEHAVIOR_COUNT) - - #num_threshold_pixels = threshold(s2, s1, (30,30,30)) - self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1) - - - # test end markers. 0, and 255 - - # the pixels are different by 1. - s1.fill((254,254,254)) - s2.fill((255,255,255)) - s3.fill((255,255,255)) - s1.set_at( (0,0), (170, 170, 170) ) - num_threshold_pixels = threshold(None, s1, (254,254,254), (1,1,1), - None, THRESHOLD_BEHAVIOR_COUNT) - self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1) - - - # compare the two surfaces. Should be all but one matching. - num_threshold_pixels = threshold(None, s1, None, (1,1,1), - None, THRESHOLD_BEHAVIOR_COUNT, s2) - self.assertEqual(num_threshold_pixels, (s1.get_height() * s1.get_width()) -1) - - - # within (0,0,0) threshold? Should match no pixels. - num_threshold_pixels = threshold(None, s1, (253,253,253), (0,0,0), - None, THRESHOLD_BEHAVIOR_COUNT) - self.assertEqual(num_threshold_pixels, 0) - - - # other surface within (0,0,0) threshold? Should match no pixels. - num_threshold_pixels = threshold(None, s1, None, (0,0,0), - None, THRESHOLD_BEHAVIOR_COUNT, s2) - self.assertEqual(num_threshold_pixels, 0) - - def test_threshold__subclassed_surface(self): - """Ensure threshold accepts subclassed surfaces.""" - expected_size = (13, 11) - expected_flags = 0 - expected_depth = 32 - expected_color = (90, 80, 70, 255) - expected_count = 0 - surface = test_utils.SurfaceSubclass(expected_size, expected_flags, - expected_depth) - dest_surface = test_utils.SurfaceSubclass(expected_size, - expected_flags, - expected_depth) - search_surface = test_utils.SurfaceSubclass(expected_size, - expected_flags, - expected_depth) - surface.fill((10, 10, 10)) - dest_surface.fill((255, 255, 255)) - search_surface.fill((20, 20, 20)) - - count = pygame.transform.threshold( - dest_surf=dest_surface, surf=surface, threshold=(1, 1, 1), - set_color=expected_color, search_color=None, - search_surf=search_surface) - - self.assertIsInstance(dest_surface, pygame.Surface) - self.assertIsInstance(dest_surface, test_utils.SurfaceSubclass) - self.assertEqual(count, expected_count) - self.assertEqual(dest_surface.get_at((0,0)), expected_color) - self.assertEqual(dest_surface.get_bitsize(), expected_depth) - self.assertEqual(dest_surface.get_size(), expected_size) - self.assertEqual(dest_surface.get_flags(), expected_flags) - - def test_laplacian(self): - """ - """ - - SIZE = 32 - s1 = pygame.Surface((SIZE, SIZE)) - s2 = pygame.Surface((SIZE, SIZE)) - s1.fill((10,10,70)) - pygame.draw.line(s1, (255,0,0), (3,10), (20,20)) - - # a line at the last row of the image. - pygame.draw.line(s1, (255,0,0), (0,31), (31,31)) - - - pygame.transform.laplacian(s1,s2) - - #show_image(s1) - #show_image(s2) - - self.assertEqual(s2.get_at((0,0)), (0, 0, 0, 255)) - self.assertEqual(s2.get_at((3,10)), (255,0,0,255)) - self.assertEqual(s2.get_at((0,31)), (255,0,0,255)) - self.assertEqual(s2.get_at((31,31)), (255,0,0,255)) - - - # here we create the return surface. - s2 = pygame.transform.laplacian(s1) - - self.assertEqual(s2.get_at((0,0)), (0, 0, 0, 255)) - self.assertEqual(s2.get_at((3,10)), (255,0,0,255)) - self.assertEqual(s2.get_at((0,31)), (255,0,0,255)) - self.assertEqual(s2.get_at((31,31)), (255,0,0,255)) - - def test_average_surfaces(self): - """ - """ - - SIZE = 32 - s1 = pygame.Surface((SIZE, SIZE)) - s2 = pygame.Surface((SIZE, SIZE)) - s3 = pygame.Surface((SIZE, SIZE)) - s1.fill((10,10,70)) - s2.fill((10,20,70)) - s3.fill((10,130,10)) - - surfaces = [s1, s2, s3] - surfaces = [s1, s2] - sr = pygame.transform.average_surfaces(surfaces) - - self.assertEqual(sr.get_at((0,0)), (10,15,70,255)) - - - self.assertRaises(TypeError, pygame.transform.average_surfaces, 1) - self.assertRaises(TypeError, pygame.transform.average_surfaces, []) - - self.assertRaises(TypeError, pygame.transform.average_surfaces, [1]) - self.assertRaises(TypeError, pygame.transform.average_surfaces, [s1, 1]) - self.assertRaises(TypeError, pygame.transform.average_surfaces, [1, s1]) - self.assertRaises(TypeError, pygame.transform.average_surfaces, [s1, s2, 1]) - - self.assertRaises(TypeError, pygame.transform.average_surfaces, (s for s in [s1, s2,s3] )) - - - - def test_average_surfaces__24(self): - - SIZE = 32 - depth = 24 - s1 = pygame.Surface((SIZE, SIZE), 0, depth) - s2 = pygame.Surface((SIZE, SIZE), 0, depth) - s3 = pygame.Surface((SIZE, SIZE), 0, depth) - s1.fill((10,10,70, 255)) - s2.fill((10,20,70, 255)) - s3.fill((10,130,10, 255)) - - surfaces = [s1, s2, s3] - sr = pygame.transform.average_surfaces(surfaces) - self.assertEqual( sr.get_masks(), s1.get_masks() ) - self.assertEqual( sr.get_flags(), s1.get_flags() ) - self.assertEqual( sr.get_losses(), s1.get_losses() ) - - if 0: - print ( sr, s1 ) - print ( sr.get_masks(), s1.get_masks() ) - print ( sr.get_flags(), s1.get_flags() ) - print ( sr.get_losses(), s1.get_losses() ) - print ( sr.get_shifts(), s1.get_shifts() ) - - self.assertEqual(sr.get_at((0,0)), (10,53,50,255)) - - def test_average_surfaces__subclassed_surfaces(self): - """Ensure average_surfaces accepts subclassed surfaces.""" - expected_size = (23, 17) - expected_flags = 0 - expected_depth = 32 - expected_color = (50, 50, 50, 255) - surfaces = [] - - for color in ((40, 60, 40), (60, 40, 60)): - s = test_utils.SurfaceSubclass(expected_size, expected_flags, - expected_depth) - s.fill(color) - surfaces.append(s) - - surface = pygame.transform.average_surfaces(surfaces) - - self.assertIsInstance(surface, pygame.Surface) - self.assertNotIsInstance(surface, test_utils.SurfaceSubclass) - self.assertEqual(surface.get_at((0,0)), expected_color) - self.assertEqual(surface.get_bitsize(), expected_depth) - self.assertEqual(surface.get_size(), expected_size) - self.assertEqual(surface.get_flags(), expected_flags) - - def test_average_surfaces__subclassed_destination_surface(self): - """Ensure average_surfaces accepts a destination subclassed surface.""" - expected_size = (13, 27) - expected_flags = 0 - expected_depth = 32 - expected_color = (15, 15, 15, 255) - surfaces = [] - - for color in ((10, 10, 20), (20, 20, 10), (30, 30, 30)): - s = test_utils.SurfaceSubclass(expected_size, expected_flags, - expected_depth) - s.fill(color) - surfaces.append(s) - expected_dest_surface = surfaces.pop() - - dest_surface = pygame.transform.average_surfaces(surfaces, - expected_dest_surface) - - self.assertIsInstance(dest_surface, pygame.Surface) - self.assertIsInstance(dest_surface, test_utils.SurfaceSubclass) - self.assertIs(dest_surface, expected_dest_surface) - self.assertEqual(dest_surface.get_at((0,0)), expected_color) - self.assertEqual(dest_surface.get_bitsize(), expected_depth) - self.assertEqual(dest_surface.get_size(), expected_size) - self.assertEqual(dest_surface.get_flags(), expected_flags) - - def test_average_color(self): - """ - """ - - a = [24, 32] - for i in a: - s = pygame.Surface((32,32), 0, i) - s.fill((0,100,200)) - s.fill((10,50,100), (0,0,16,32)) - - self.assertEqual(pygame.transform.average_color(s),(5,75,150,0)) - self.assertEqual(pygame.transform.average_color(s, (16,0,16,32)), (0,100,200,0)) - - def todo_test_rotate(self): - - # __doc__ (as of 2008-06-25) for pygame.transform.rotate: - - # pygame.transform.rotate(Surface, angle): return Surface - # rotate an image - - # color = (128, 128, 128, 255) - # s = pygame.Surface((3, 3)) - - # s.set_at((2, 0), color) - - # self.assertNotEqual(s.get_at((0, 0)), color) - # s = pygame.transform.rotate(s, 90) - # self.assertEqual(s.get_at((0, 0)), color) - - self.fail() - - def test_rotate__lossless_at_90_degrees(self): - w, h = 32, 32 - s = pygame.Surface((w, h), pygame.SRCALPHA) - - gradient = list(test_utils.gradient(w, h)) - - for pt, color in gradient: s.set_at(pt, color) - - for rotation in (90, -90): - s = pygame.transform.rotate(s,rotation) - - for pt, color in gradient: - self.assertTrue(s.get_at(pt) == color) - - def test_scale2x(self): - - # __doc__ (as of 2008-06-25) for pygame.transform.scale2x: - - # pygame.transform.scale2x(Surface, DestSurface = None): Surface - # specialized image doubler - - w, h = 32, 32 - s = pygame.Surface((w, h), pygame.SRCALPHA, 32) - - # s.set_at((0,0), (20, 20, 20, 255)) - - s2 = pygame.transform.scale2x(s) - self.assertEqual(s2.get_rect().size, (64, 64)) - - def test_get_smoothscale_backend(self): - filter_type = pygame.transform.get_smoothscale_backend() - self.assertTrue(filter_type in ['GENERIC', 'MMX', 'SSE']) - # It would be nice to test if a non-generic type corresponds to an x86 - # processor. But there is no simple test for this. platform.machine() - # returns process version specific information, like 'i686'. - - def test_set_smoothscale_backend(self): - # All machines should allow 'GENERIC'. - original_type = pygame.transform.get_smoothscale_backend() - pygame.transform.set_smoothscale_backend('GENERIC') - filter_type = pygame.transform.get_smoothscale_backend() - self.assertEqual(filter_type, 'GENERIC') - # All machines should allow returning to original value. - # Also check that keyword argument works. - pygame.transform.set_smoothscale_backend(type=original_type) - # Something invalid. - def change(): - pygame.transform.set_smoothscale_backend('mmx') - self.assertRaises(ValueError, change) - # Invalid argument keyword. - def change(): - pygame.transform.set_smoothscale_backend(t='GENERIC') - self.assertRaises(TypeError, change) - # Invalid argument type. - def change(): - pygame.transform.set_smoothscale_backend(1) - self.assertRaises(TypeError, change) - # Unsupported type, if possible. - if original_type != 'SSE': - def change(): - pygame.transform.set_smoothscale_backend('SSE') - self.assertRaises(ValueError, change) - # Should be back where we started. - filter_type = pygame.transform.get_smoothscale_backend() - self.assertEqual(filter_type, original_type) - - def todo_test_chop(self): - - # __doc__ (as of 2008-08-02) for pygame.transform.chop: - - # pygame.transform.chop(Surface, rect): return Surface - # gets a copy of an image with an interior area removed - # - # Extracts a portion of an image. All vertical and horizontal pixels - # surrounding the given rectangle area are removed. The corner areas - # (diagonal to the rect) are then brought together. (The original - # image is not altered by this operation.) - # - # NOTE: If you want a "crop" that returns the part of an image within - # a rect, you can blit with a rect to a new surface or copy a - # subsurface. - - self.fail() - - def todo_test_rotozoom(self): - - # __doc__ (as of 2008-08-02) for pygame.transform.rotozoom: - - # pygame.transform.rotozoom(Surface, angle, scale): return Surface - # filtered scale and rotation - # - # This is a combined scale and rotation transform. The resulting - # Surface will be a filtered 32-bit Surface. The scale argument is a - # floating point value that will be multiplied by the current - # resolution. The angle argument is a floating point value that - # represents the counterclockwise degrees to rotate. A negative - # rotation angle will rotate clockwise. - - self.fail() - - def todo_test_smoothscale(self): - # __doc__ (as of 2008-08-02) for pygame.transform.smoothscale: - - # pygame.transform.smoothscale(Surface, (width, height), DestSurface = - # None): return Surface - # - # scale a surface to an arbitrary size smoothly - # - # Uses one of two different algorithms for scaling each dimension of - # the input surface as required. For shrinkage, the output pixels are - # area averages of the colors they cover. For expansion, a bilinear - # filter is used. For the amd64 and i686 architectures, optimized MMX - # routines are included and will run much faster than other machine - # types. The size is a 2 number sequence for (width, height). This - # function only works for 24-bit or 32-bit surfaces. An exception - # will be thrown if the input surface bit depth is less than 24. - # - # New in pygame 1.8 - - self.fail() - - -class TransformDisplayModuleTest(unittest.TestCase): - - def setUp(self): - pygame.display.init() - - def tearDown(self): - pygame.display.quit() - - def test_flip(self): - """ honors the set_color key on the returned surface from flip. - """ - from pygame.tests.test_utils import example_path - - pygame.display.set_mode((320, 200)) - - fullname = example_path('data/chimp.bmp') - image_loaded = pygame.image.load(fullname) - - image = pygame.Surface(image_loaded.get_size(), 0, 32) - image.blit(image_loaded, (0, 0)) - - image_converted = image_loaded.convert() - - self.assertFalse(image.get_flags() & pygame.SRCALPHA) - self.assertFalse(image_converted.get_flags() & pygame.SRCALPHA) - - surf = pygame.Surface(image.get_size(), 0, 32) - surf2 = pygame.Surface(image.get_size(), 0, 32) - - surf.fill((255, 255, 255)) - surf2.fill((255, 255, 255)) - - colorkey = image.get_at((0,0)) - image.set_colorkey(colorkey, RLEACCEL) - timage = pygame.transform.flip(image, 1, 0) - - colorkey = image_converted.get_at((0,0)) - image_converted.set_colorkey(colorkey, RLEACCEL) - timage_converted = pygame.transform.flip(image_converted, 1, 0) - - # blit the flipped surface, and non flipped surface. - surf.blit(timage, (0, 0)) - surf2.blit(image, (0, 0)) - - # the results should be the same. - self.assertEqual(surf.get_at((0, 0)), surf2.get_at((0, 0))) - self.assertEqual(surf2.get_at((0, 0)), (255, 255, 255, 255)) - - # now we test the convert() ed image also works. - surf.fill((255, 255, 255)) - surf2.fill((255, 255, 255)) - surf.blit(timage_converted, (0, 0)) - surf2.blit(image_converted, (0, 0)) - self.assertEqual(surf.get_at((0, 0)), surf2.get_at((0, 0))) - - def test_flip_alpha(self): - """ returns a surface with the same properties as the input. - """ - from pygame.tests.test_utils import example_path - - pygame.display.set_mode((320, 200)) - - fullname = example_path('data/chimp.bmp') - image_loaded = pygame.image.load(fullname) - - image_alpha = pygame.Surface(image_loaded.get_size(), pygame.SRCALPHA, 32) - image_alpha.blit(image_loaded, (0, 0)) - - surf = pygame.Surface(image_loaded.get_size(), 0, 32) - surf2 = pygame.Surface(image_loaded.get_size(), 0, 32) - - colorkey = image_alpha.get_at((0,0)) - image_alpha.set_colorkey(colorkey, RLEACCEL) - timage_alpha = pygame.transform.flip(image_alpha, 1, 0) - - self.assertTrue(image_alpha.get_flags() & pygame.SRCALPHA) - self.assertTrue(timage_alpha.get_flags() & pygame.SRCALPHA) - - # now we test the alpha image works. - surf.fill((255, 255, 255)) - surf2.fill((255, 255, 255)) - surf.blit(timage_alpha, (0, 0)) - surf2.blit(image_alpha, (0, 0)) - self.assertEqual(surf.get_at((0, 0)), surf2.get_at((0, 0))) - self.assertEqual(surf2.get_at((0, 0)), (255, 0, 0, 255)) - - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/tests/version_test.py b/venv/lib/python3.7/site-packages/pygame/tests/version_test.py deleted file mode 100644 index 93051c7..0000000 --- a/venv/lib/python3.7/site-packages/pygame/tests/version_test.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import unittest - - -pg_header = os.path.join('src_c', '_pygame.h') - - -class VersionTest(unittest.TestCase): - @unittest.skipIf(not os.path.isfile(pg_header), - "Skipping because we cannot find _pygame.h") - def test_pg_version_consistency(self): - from pygame import version - pgh_major = -1 - pgh_minor = -1 - pgh_patch = -1 - import re - major_exp_search = re.compile('define\s+PG_MAJOR_VERSION\s+([0-9]+)').search - minor_exp_search = re.compile('define\s+PG_MINOR_VERSION\s+([0-9]+)').search - patch_exp_search = re.compile('define\s+PG_PATCH_VERSION\s+([0-9]+)').search - with open(pg_header) as f: - for line in f: - if pgh_major == -1: - m = major_exp_search(line) - if m: pgh_major = int(m.group(1)) - if pgh_minor == -1: - m = minor_exp_search(line) - if m: pgh_minor = int(m.group(1)) - if pgh_patch == -1: - m = patch_exp_search(line) - if m: pgh_patch = int(m.group(1)) - self.assertEqual(pgh_major, version.vernum[0]) - self.assertEqual(pgh_minor, version.vernum[1]) - self.assertEqual(pgh_patch, version.vernum[2]) - -if __name__ == '__main__': - unittest.main() diff --git a/venv/lib/python3.7/site-packages/pygame/threads/Py25Queue.py b/venv/lib/python3.7/site-packages/pygame/threads/Py25Queue.py deleted file mode 100644 index 603c1bd..0000000 --- a/venv/lib/python3.7/site-packages/pygame/threads/Py25Queue.py +++ /dev/null @@ -1,216 +0,0 @@ -"""A multi-producer, multi-consumer queue.""" - -from time import time as _time - -from collections import deque - -__all__ = ['Empty', 'Full', 'Queue'] - -class Empty(Exception): - "Exception raised by Queue.get(block=0)/get_nowait()." - pass - -class Full(Exception): - "Exception raised by Queue.put(block=0)/put_nowait()." - pass - -class Queue: - """Create a queue object with a given maximum size. - - If maxsize is <= 0, the queue size is infinite. - """ - def __init__(self, maxsize=0): - try: - import threading - except ImportError: - import dummy_threading as threading - self._init(maxsize) - # mutex must be held whenever the queue is mutating. All methods - # that acquire mutex must release it before returning. mutex - # is shared between the three conditions, so acquiring and - # releasing the conditions also acquires and releases mutex. - self.mutex = threading.Lock() - # Notify not_empty whenever an item is added to the queue; a - # thread waiting to get is notified then. - self.not_empty = threading.Condition(self.mutex) - # Notify not_full whenever an item is removed from the queue; - # a thread waiting to put is notified then. - self.not_full = threading.Condition(self.mutex) - # Notify all_tasks_done whenever the number of unfinished tasks - # drops to zero; thread waiting to join() is notified to resume - self.all_tasks_done = threading.Condition(self.mutex) - self.unfinished_tasks = 0 - - def task_done(self): - """Indicate that a formerly enqueued task is complete. - - Used by Queue consumer threads. For each get() used to fetch a task, - a subsequent call to task_done() tells the queue that the processing - on the task is complete. - - If a join() is currently blocking, it will resume when all items - have been processed (meaning that a task_done() call was received - for every item that had been put() into the queue). - - Raises a ValueError if called more times than there were items - placed in the queue. - """ - self.all_tasks_done.acquire() - try: - unfinished = self.unfinished_tasks - 1 - if unfinished <= 0: - if unfinished < 0: - raise ValueError('task_done() called too many times') - self.all_tasks_done.notifyAll() - self.unfinished_tasks = unfinished - finally: - self.all_tasks_done.release() - - def join(self): - """Blocks until all items in the Queue have been gotten and processed. - - The count of unfinished tasks goes up whenever an item is added to the - queue. The count goes down whenever a consumer thread calls task_done() - to indicate the item was retrieved and all work on it is complete. - - When the count of unfinished tasks drops to zero, join() unblocks. - """ - self.all_tasks_done.acquire() - try: - while self.unfinished_tasks: - self.all_tasks_done.wait() - finally: - self.all_tasks_done.release() - - def qsize(self): - """Return the approximate size of the queue (not reliable!).""" - self.mutex.acquire() - n = self._qsize() - self.mutex.release() - return n - - def empty(self): - """Return True if the queue is empty, False otherwise (not reliable!).""" - self.mutex.acquire() - n = self._empty() - self.mutex.release() - return n - - def full(self): - """Return True if the queue is full, False otherwise (not reliable!).""" - self.mutex.acquire() - n = self._full() - self.mutex.release() - return n - - def put(self, item, block=True, timeout=None): - """Put an item into the queue. - - If optional args 'block' is true and 'timeout' is None (the default), - block if necessary until a free slot is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises - the Full exception if no free slot was available within that time. - Otherwise ('block' is false), put an item on the queue if a free slot - is immediately available, else raise the Full exception ('timeout' - is ignored in that case). - """ - self.not_full.acquire() - try: - if not block: - if self._full(): - raise Full - elif timeout is None: - while self._full(): - self.not_full.wait() - else: - if timeout < 0: - raise ValueError("'timeout' must be a positive number") - endtime = _time() + timeout - while self._full(): - remaining = endtime - _time() - if remaining <= 0.0: - raise Full - self.not_full.wait(remaining) - self._put(item) - self.unfinished_tasks += 1 - self.not_empty.notify() - finally: - self.not_full.release() - - def put_nowait(self, item): - """Put an item into the queue without blocking. - - Only enqueue the item if a free slot is immediately available. - Otherwise raise the Full exception. - """ - return self.put(item, False) - - def get(self, block=True, timeout=None): - """Remove and return an item from the queue. - - If optional args 'block' is true and 'timeout' is None (the default), - block if necessary until an item is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises - the Empty exception if no item was available within that time. - Otherwise ('block' is false), return an item if one is immediately - available, else raise the Empty exception ('timeout' is ignored - in that case). - """ - self.not_empty.acquire() - try: - if not block: - if self._empty(): - raise Empty - elif timeout is None: - while self._empty(): - self.not_empty.wait() - else: - if timeout < 0: - raise ValueError("'timeout' must be a positive number") - endtime = _time() + timeout - while self._empty(): - remaining = endtime - _time() - if remaining <= 0.0: - raise Empty - self.not_empty.wait(remaining) - item = self._get() - self.not_full.notify() - return item - finally: - self.not_empty.release() - - def get_nowait(self): - """Remove and return an item from the queue without blocking. - - Only get an item if one is immediately available. Otherwise - raise the Empty exception. - """ - return self.get(False) - - # Override these methods to implement other queue organizations - # (e.g. stack or priority queue). - # These will only be called with appropriate locks held - - # Initialize the queue representation - def _init(self, maxsize): - self.maxsize = maxsize - self.queue = deque() - - def _qsize(self): - return len(self.queue) - - # Check whether the queue is empty - def _empty(self): - return not self.queue - - # Check whether the queue is full - def _full(self): - return self.maxsize > 0 and len(self.queue) == self.maxsize - - # Put a new item in the queue - def _put(self, item): - self.queue.append(item) - - # Get an item from the queue - def _get(self): - return self.queue.popleft() diff --git a/venv/lib/python3.7/site-packages/pygame/threads/__init__.py b/venv/lib/python3.7/site-packages/pygame/threads/__init__.py deleted file mode 100644 index cc4f9cf..0000000 --- a/venv/lib/python3.7/site-packages/pygame/threads/__init__.py +++ /dev/null @@ -1,310 +0,0 @@ -""" -* Experimental * - -Like the map function, but can use a pool of threads. - -Really easy to use threads. eg. tmap(f, alist) - -If you know how to use the map function, you can use threads. -""" - -__author__ = "Rene Dudfield" -__version__ = "0.3.0" -__license__ = 'Python license' - -import traceback, sys - -from pygame.compat import geterror - -if sys.version_info[0] == 3: - from queue import Queue - from queue import Empty -elif (sys.version_info[0] == 2 and sys.version_info[1] < 5): - from Py25Queue import Queue - from Py25Queue import Empty -else: - # use up to date version - from Queue import Queue - from Queue import Empty - -import threading -Thread = threading.Thread - -STOP = object() -FINISH = object() - -# DONE_ONE = object() -# DONE_TWO = object() - -# a default worker queue. -_wq = None - -# if we are using threads or not. This is the number of workers. -_use_workers = 0 - -# Set this to the maximum for the amount of Cores/CPUs -# Note, that the tests early out. -# So it should only test the best number of workers +2 -MAX_WORKERS_TO_TEST = 64 - - - -def init(number_of_workers = 0): - """ Does a little test to see if threading is worth it. - Sets up a global worker queue if it's worth it. - - Calling init() is not required, but is generally better to do. - """ - global _wq, _use_workers - - if number_of_workers: - _use_workers = number_of_workers - else: - _use_workers = benchmark_workers() - - # if it is best to use zero workers, then use that. - _wq = WorkerQueue(_use_workers) - - - - -def quit(): - """ cleans up everything. - """ - global _wq, _use_workers - _wq.stop() - _wq = None - _use_workers = False - - -def benchmark_workers(a_bench_func = None, the_data = None): - """ does a little test to see if workers are at all faster. - Returns the number of workers which works best. - Takes a little bit of time to run, so you should only really call - it once. - You can pass in benchmark data, and functions if you want. - a_bench_func - f(data) - the_data - data to work on. - """ - global _use_workers - - #TODO: try and make this scale better with slower/faster cpus. - # first find some variables so that using 0 workers takes about 1.0 seconds. - # then go from there. - - - # note, this will only work with pygame 1.8rc3+ - # replace the doit() and the_data with something that releases the GIL - - - import pygame - import pygame.transform - import time - - if not a_bench_func: - def doit(x): - return pygame.transform.scale(x, (544, 576)) - else: - doit = a_bench_func - - if not the_data: - thedata = [] - for x in range(10): - thedata.append(pygame.Surface((155,155), 0, 32)) - else: - thedata = the_data - - best = time.time() + 100000000 - best_number = 0 - last_best = -1 - - for num_workers in range(0, MAX_WORKERS_TO_TEST): - - wq = WorkerQueue(num_workers) - t1 = time.time() - for xx in range(20): - print ("active count:%s" % threading.activeCount()) - results = tmap(doit, thedata, worker_queue = wq) - t2 = time.time() - - wq.stop() - - - total_time = t2 - t1 - print ("total time num_workers:%s: time:%s:" % (num_workers, total_time)) - - if total_time < best: - last_best = best_number - best_number =num_workers - best = total_time - - if num_workers - best_number > 1: - # We tried to add more, but it didn't like it. - # so we stop with testing at this number. - break - - - return best_number - - - - -class WorkerQueue(object): - - def __init__(self, num_workers = 20): - self.queue = Queue() - self.pool = [] - self._setup_workers(num_workers) - - def _setup_workers(self, num_workers): - """ Sets up the worker threads - NOTE: undefined behaviour if you call this again. - """ - self.pool = [] - - for _ in range(num_workers): - self.pool.append(Thread(target=self.threadloop)) - - for a_thread in self.pool: - a_thread.setDaemon(True) - a_thread.start() - - - def do(self, f, *args, **kwArgs): - """ puts a function on a queue for running later. - """ - self.queue.put((f, args, kwArgs)) - - - def stop(self): - """ Stops the WorkerQueue, waits for all of the threads to finish up. - """ - self.queue.put(STOP) - for thread in self.pool: - thread.join() - - - def threadloop(self): #, finish = False): - """ Loops until all of the tasks are finished. - """ - while True: - args = self.queue.get() - if args is STOP: - self.queue.put(STOP) - self.queue.task_done() - break - else: - try: - args[0](*args[1], **args[2]) - finally: - # clean up the queue, raise the exception. - self.queue.task_done() - #raise - - - def wait(self): - """ waits until all tasks are complete. - """ - self.queue.join() - -class FuncResult: - """ Used for wrapping up a function call so that the results are stored - inside the instances result attribute. - """ - def __init__(self, f, callback = None, errback = None): - """ f - is the function we that we call - callback(result) - this is called when the function(f) returns - errback(exception) - this is called when the function(f) raises - an exception. - """ - self.f = f - self.exception = None - self.callback = callback - self.errback = errback - - def __call__(self, *args, **kwargs): - - #we try to call the function here. If it fails we store the exception. - try: - self.result = self.f(*args, **kwargs) - if self.callback: - self.callback(self.result) - except Exception: - self.exception = geterror() - if self.errback: - self.errback(self.exception) - - -def tmap(f, seq_args, num_workers = 20, worker_queue = None, wait = True, stop_on_error = True): - """ like map, but uses a thread pool to execute. - num_workers - the number of worker threads that will be used. If pool - is passed in, then the num_workers arg is ignored. - worker_queue - you can optionally pass in an existing WorkerQueue. - wait - True means that the results are returned when everything is finished. - False means that we return the [worker_queue, results] right away instead. - results, is returned as a list of FuncResult instances. - stop_on_error - - """ - - if worker_queue: - wq = worker_queue - else: - # see if we have a global queue to work with. - if _wq: - wq = _wq - else: - if num_workers == 0: - return map(f, seq_args) - - wq = WorkerQueue(num_workers) - - # we short cut it here if the number of workers is 0. - # normal map should be faster in this case. - if len(wq.pool) == 0: - return map(f, seq_args) - - #print ("queue size:%s" % wq.queue.qsize()) - - - #TODO: divide the data (seq_args) into even chunks and - # then pass each thread a map(f, equal_part(seq_args)) - # That way there should be less locking, and overhead. - - - - results = [] - for sa in seq_args: - results.append(FuncResult(f)) - wq.do(results[-1], sa) - - - #wq.stop() - - if wait: - #print ("wait") - wq.wait() - #print ("after wait") - #print ("queue size:%s" % wq.queue.qsize()) - if wq.queue.qsize(): - raise Exception("buggy threadmap") - # if we created a worker queue, we need to stop it. - if not worker_queue and not _wq: - #print ("stoping") - wq.stop() - if wq.queue.qsize(): - um = wq.queue.get() - if not um is STOP: - raise Exception("buggy threadmap") - - - # see if there were any errors. If so raise the first one. This matches map behaviour. - # TODO: the traceback doesn't show up nicely. - # NOTE: TODO: we might want to return the results anyway? This should be an option. - if stop_on_error: - error_ones = list(filter(lambda x:x.exception, results)) - if error_ones: - raise error_ones[0].exception - - return map(lambda x:x.result, results) - else: - return [wq, results] diff --git a/venv/lib/python3.7/site-packages/pygame/time.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/time.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 2ac33e8..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/time.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/transform.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/pygame/transform.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index df78bf9..0000000 Binary files a/venv/lib/python3.7/site-packages/pygame/transform.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/pygame/version.py b/venv/lib/python3.7/site-packages/pygame/version.py deleted file mode 100644 index 7b1862d..0000000 --- a/venv/lib/python3.7/site-packages/pygame/version.py +++ /dev/null @@ -1,45 +0,0 @@ -## pygame - Python Game Library -## Copyright (C) 2000-2003 Pete Shinners -## -## This library is free software; you can redistribute it and/or -## modify it under the terms of the GNU Library General Public -## License as published by the Free Software Foundation; either -## version 2 of the License, or (at your option) any later version. -## -## This library is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## Library General Public License for more details. -## -## You should have received a copy of the GNU Library General Public -## License along with this library; if not, write to the Free -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -## -## Pete Shinners -## pete@shinners.org - -"""Simply the current installed pygame version. The version information is -stored in the regular pygame module as 'pygame.ver'. Keeping the version -information also available in a separate module allows you to test the -pygame version without importing the main pygame module. - -The python version information should always compare greater than any previous -releases. (hmm, until we get to versions > 10) -""" - -class PygameVersion(tuple): - __slots__ = () - fields = 'major', 'minor', 'patch' - def __new__(cls, major, minor, patch): - return tuple.__new__(cls, (major, minor, patch)) - def __repr__(self): - fields = ('{}={}'.format(fld, val) for fld, val in zip(self.fields, self)) - return '{}({})'.format(str(self.__class__.__name__), ', '.join(fields)) - def __str__(self): - return '{}.{}.{}'.format(*self) - major = property(lambda self: self[0]) - minor = property(lambda self: self[1]) - patch = property(lambda self: self[2]) -ver = "1.9.6" -vernum = PygameVersion(1, 9, 6) -rev = "" diff --git a/venv/lib/python3.7/site-packages/setuptools-40.8.0-py3.7.egg b/venv/lib/python3.7/site-packages/setuptools-40.8.0-py3.7.egg deleted file mode 100644 index 9c35dc8..0000000 Binary files a/venv/lib/python3.7/site-packages/setuptools-40.8.0-py3.7.egg and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/setuptools.pth b/venv/lib/python3.7/site-packages/setuptools.pth deleted file mode 100644 index ca49991..0000000 --- a/venv/lib/python3.7/site-packages/setuptools.pth +++ /dev/null @@ -1 +0,0 @@ -./setuptools-40.8.0-py3.7.egg diff --git a/venv/lib64 b/venv/lib64 deleted file mode 120000 index 7951405..0000000 --- a/venv/lib64 +++ /dev/null @@ -1 +0,0 @@ -lib \ No newline at end of file diff --git a/venv/pyvenv.cfg b/venv/pyvenv.cfg deleted file mode 100644 index f967d3d..0000000 --- a/venv/pyvenv.cfg +++ /dev/null @@ -1,3 +0,0 @@ -home = /usr/bin -include-system-site-packages = false -version = 3.7.3